summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accessibility/speakup/serialio.c22
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_apollo.c4
-rw-r--r--drivers/accessibility/speakup/speakup_audptr.c8
-rw-r--r--drivers/accessibility/speakup/speakup_decext.c2
-rw-r--r--drivers/accessibility/speakup/speakup_decpc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c13
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c4
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_ltlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c4
-rw-r--r--drivers/accessibility/speakup/speakup_spkout.c4
-rw-r--r--drivers/accessibility/speakup/spk_priv.h4
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c187
-rw-r--r--drivers/accessibility/speakup/spk_types.h17
-rw-r--r--drivers/accessibility/speakup/synth.c9
-rw-r--r--drivers/accessibility/speakup/varhandlers.c1
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c23
-rw-r--r--drivers/acpi/acpi_configfs.c7
-rw-r--r--drivers/acpi/acpi_fpdt.c264
-rw-r--r--drivers/acpi/acpi_pad.c24
-rw-r--r--drivers/acpi/acpi_tad.c14
-rw-r--r--drivers/acpi/acpi_video.c99
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h3
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c4
-rw-r--r--drivers/acpi/acpica/dbobject.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsdebug.c4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c5
-rw-r--r--drivers/acpi/acpica/dswload.c4
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c9
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c71
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c4
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c4
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c6
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c6
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c19
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c5
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c4
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c4
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/hest.c12
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/battery.c33
-rw-r--r--drivers/acpi/bgrt.c20
-rw-r--r--drivers/acpi/bus.c179
-rw-r--r--drivers/acpi/button.c15
-rw-r--r--drivers/acpi/cppc_acpi.c8
-rw-r--r--drivers/acpi/device_pm.c20
-rw-r--r--drivers/acpi/device_sysfs.c64
-rw-r--r--drivers/acpi/dock.c26
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c75
-rw-r--r--drivers/acpi/osl.c75
-rw-r--r--drivers/acpi/pci_root.c40
-rw-r--r--drivers/acpi/platform_profile.c178
-rw-r--r--drivers/acpi/power.c53
-rw-r--r--drivers/acpi/property.c73
-rw-r--r--drivers/acpi/scan.c139
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/acpi/thermal.c182
-rw-r--r--drivers/acpi/utils.c86
-rw-r--r--drivers/acpi/x86/s2idle.c14
-rw-r--r--drivers/amba/bus.c234
-rw-r--r--drivers/android/binderfs.c6
-rw-r--r--drivers/ata/ahci_brcm.c14
-rw-r--r--drivers/ata/pata_icside.c21
-rw-r--r--drivers/atm/idt77252.c13
-rw-r--r--drivers/atm/idt77252.h2
-rw-r--r--drivers/auxdisplay/Kconfig3
-rw-r--r--drivers/auxdisplay/cfag12864b.c4
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c4
-rw-r--r--drivers/auxdisplay/ht16k33.c17
-rw-r--r--drivers/auxdisplay/ks0108.c4
-rw-r--r--drivers/base/Kconfig8
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_numa.c484
-rw-r--r--drivers/base/auxiliary.c13
-rw-r--r--drivers/base/base.h5
-rw-r--r--drivers/base/bus.c19
-rw-r--r--drivers/base/core.c218
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/devtmpfs.c15
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/memory.c35
-rw-r--r--drivers/base/node.c33
-rw-r--r--drivers/base/platform.c15
-rw-r--r--drivers/base/power/clock_ops.c223
-rw-r--r--drivers/base/power/domain.c89
-rw-r--r--drivers/base/power/domain_governor.c102
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/property.c15
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c10
-rw-r--r--drivers/base/regmap/regmap-sdw.c4
-rw-r--r--drivers/base/swnode.c294
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h8
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c18
-rw-r--r--drivers/block/drbd/drbd_req.h12
-rw-r--r--drivers/block/drbd/drbd_worker.c5
-rw-r--r--drivers/block/floppy.c30
-rw-r--r--drivers/block/loop.c98
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c17
-rw-r--r--drivers/block/n64cart.c178
-rw-r--r--drivers/block/nbd.c68
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/null_blk/zoned.c24
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c19
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c8
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/rsxx/dma.c3
-rw-r--r--drivers/block/skd_main.c3670
-rw-r--r--drivers/block/skd_s1120.h322
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c13
-rw-r--r--drivers/block/xen-blkback/blkback.c36
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/block/zram/zram_drv.c7
-rw-r--r--drivers/bluetooth/btintel.c21
-rw-r--r--drivers/bluetooth/btmtksdio.c16
-rw-r--r--drivers/bluetooth/btqca.c67
-rw-r--r--drivers/bluetooth/btqca.h1
-rw-r--r--drivers/bluetooth/btqcomsmd.c27
-rw-r--r--drivers/bluetooth/btrtl.c43
-rw-r--r--drivers/bluetooth/btusb.c313
-rw-r--r--drivers/bluetooth/hci_bcm.c1
-rw-r--r--drivers/bluetooth/hci_h5.c7
-rw-r--r--drivers/bluetooth/hci_ldisc.c41
-rw-r--r--drivers/bluetooth/hci_qca.c33
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/fsl-mc/Kconfig7
-rw-r--r--drivers/bus/fsl-mc/Makefile3
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c33
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c113
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h49
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c597
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/mhi/core/init.c12
-rw-r--r--drivers/bus/mhi/core/main.c194
-rw-r--r--drivers/bus/mhi/pci_generic.c381
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/simple-pm-bus.c3
-rw-r--r--drivers/bus/sunxi-rsb.c215
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/hw_random/ingenic-trng.c6
-rw-r--r--drivers/char/hw_random/iproc-rng200.c38
-rw-r--r--drivers/char/hw_random/nomadik-rng.c3
-rw-r--r--drivers/char/hw_random/optee-rng.c3
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c5
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c24
-rw-r--r--drivers/char/mem.c93
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/char/tpm/Kconfig10
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c1
-rw-r--r--drivers/char/tpm/tpm-chip.c2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c1
-rw-r--r--drivers/char/tpm/tpm-sysfs.c179
-rw-r--r--drivers/char/tpm/tpm.h4
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c50
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c790
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/at91/at91rm9200.c3
-rw-r--r--drivers/clk/at91/at91sam9260.c16
-rw-r--r--drivers/clk/at91/at91sam9g45.c3
-rw-r--r--drivers/clk/at91/at91sam9n12.c3
-rw-r--r--drivers/clk/at91/at91sam9rl.c3
-rw-r--r--drivers/clk/at91/at91sam9x5.c20
-rw-r--r--drivers/clk/at91/sama5d2.c3
-rw-r--r--drivers/clk/at91/sama5d3.c2
-rw-r--r--drivers/clk/at91/sama5d4.c3
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c2
-rw-r--r--drivers/clk/clk-ast2600.c37
-rw-r--r--drivers/clk/clk-axi-clkgen.c15
-rw-r--r--drivers/clk/clk-bd718x7.c12
-rw-r--r--drivers/clk/clk-divider.c9
-rw-r--r--drivers/clk/clk-efm32gg.c84
-rw-r--r--drivers/clk/clk-fixed-factor.c39
-rw-r--r--drivers/clk/clk-fixed-mmio.c2
-rw-r--r--drivers/clk/clk-k210.c1007
-rw-r--r--drivers/clk/clk-npcm7xx.c108
-rw-r--r--drivers/clk/clk-qoriq.c62
-rw-r--r--drivers/clk/clk-si570.c16
-rw-r--r--drivers/clk/clk-tango4.c85
-rw-r--r--drivers/clk/clk-u300.c1199
-rw-r--r--drivers/clk/clk-versaclock5.c64
-rw-r--r--drivers/clk/clk-xgene.c5
-rw-r--r--drivers/clk/clk.c24
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/imx/clk-imx31.c10
-rw-r--r--drivers/clk/imx/clk-imx6q.c6
-rw-r--r--drivers/clk/imx/clk-imx6sl.c1
-rw-r--r--drivers/clk/imx/clk-imx8mm.c12
-rw-r--r--drivers/clk/imx/clk-imx8mn.c12
-rw-r--r--drivers/clk/imx/clk-imx8mq.c22
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c26
-rw-r--r--drivers/clk/mediatek/Kconfig11
-rw-r--r--drivers/clk/mediatek/clk-mux.c89
-rw-r--r--drivers/clk/mediatek/clk-mux.h14
-rw-r--r--drivers/clk/meson/axg.c3
-rw-r--r--drivers/clk/meson/axg.h1
-rw-r--r--drivers/clk/meson/clk-pll.c10
-rw-r--r--drivers/clk/meson/meson8b.c45
-rw-r--r--drivers/clk/mmp/clk-audio.c6
-rw-r--r--drivers/clk/mstar/Kconfig9
-rw-r--r--drivers/clk/mstar/Makefile6
-rw-r--r--drivers/clk/mstar/clk-msc313-mpll.c155
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c2
-rw-r--r--drivers/clk/qcom/Kconfig61
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/a7-pll.c100
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c149
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c209
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-rcg.h9
-rw-r--r--drivers/clk/qcom/clk-rcg2.c57
-rw-r--r--drivers/clk/qcom/clk-regmap.c1
-rw-r--r--drivers/clk/qcom/clk-rpm.c63
-rw-r--r--drivers/clk/qcom/clk-rpmh.c49
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c7
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c143
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c68
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c3603
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c4629
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c7
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c4
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c3890
-rw-r--r--drivers/clk/qcom/gdsc.c10
-rw-r--r--drivers/clk/qcom/gdsc.h3
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c18
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c349
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c16
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c29
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c12
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c2864
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c39
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c67
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c270
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h33
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c267
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c4
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c2
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c6
-rw-r--r--drivers/clk/rockchip/clk.c4
-rw-r--r--drivers/clk/sifive/fu540-prci.h5
-rw-r--r--drivers/clk/sifive/sifive-prci.c5
-rw-r--r--drivers/clk/sirf/Makefile6
-rw-r--r--drivers/clk/sirf/atlas6.h32
-rw-r--r--drivers/clk/sirf/clk-atlas6.c150
-rw-r--r--drivers/clk/sirf/clk-atlas7.c1682
-rw-r--r--drivers/clk/sirf/clk-common.c1037
-rw-r--r--drivers/clk/sirf/clk-prima2.c149
-rw-r--r--drivers/clk/sirf/prima2.h26
-rw-r--r--drivers/clk/socfpga/clk-agilex.c88
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c53
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c3
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c83
-rw-r--r--drivers/clk/socfpga/clk-pll.c3
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h17
-rw-r--r--drivers/clk/spear/spear1310_clock.c1
-rw-r--r--drivers/clk/spear/spear1340_clock.c1
-rw-r--r--drivers/clk/st/clkgen-fsyn.c6
-rw-r--r--drivers/clk/st/clkgen-pll.c3
-rw-r--r--drivers/clk/sunxi-ng/Kconfig7
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c53
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1150
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h56
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-mod0.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c32
-rw-r--r--drivers/clk/tegra/Kconfig3
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-tegra124-emc.c41
-rw-r--r--drivers/clk/tegra/clk-tegra124.c26
-rw-r--r--drivers/clk/tegra/clk-tegra30.c7
-rw-r--r--drivers/clk/tegra/clk.h18
-rw-r--r--drivers/clk/tegra/cvb.c1
-rw-r--r--drivers/clk/ti/clkt_dpll.c3
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/dpll.c2
-rw-r--r--drivers/clk/ti/dpll3xxx.c20
-rw-r--r--drivers/clk/ti/dpll44xx.c6
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c7
-rw-r--r--drivers/clk/xilinx/Kconfig19
-rw-r--r--drivers/clk/xilinx/Makefile2
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c743
-rw-r--r--drivers/clk/zte/Makefile4
-rw-r--r--drivers/clk/zte/clk-zx296702.c741
-rw-r--r--drivers/clk/zte/clk-zx296718.c1074
-rw-r--r--drivers/clk/zte/clk.c446
-rw-r--r--drivers/clk/zte/clk.h174
-rw-r--r--drivers/clk/zynq/clkc.c73
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c1
-rw-r--r--drivers/clocksource/Kconfig37
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/hyperv_timer.c3
-rw-r--r--drivers/clocksource/mxs_timer.c5
-rw-r--r--drivers/clocksource/sh_cmt.c16
-rw-r--r--drivers/clocksource/timer-atlas7.c281
-rw-r--r--drivers/clocksource/timer-davinci.c5
-rw-r--r--drivers/clocksource/timer-efm32.c278
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c86
-rw-r--r--drivers/clocksource/timer-prima2.c242
-rw-r--r--drivers/clocksource/timer-tango-xtal.c57
-rw-r--r--drivers/clocksource/timer-u300.c457
-rw-r--r--drivers/counter/ti-eqep.c35
-rw-r--r--drivers/cpufreq/Kconfig.arm5
-rw-r--r--drivers/cpufreq/Kconfig.x8610
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c75
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c24
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c5
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c3
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c50
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c127
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c45
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c3
-rw-r--r--drivers/crypto/Kconfig36
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/Kconfig9
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c196
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c52
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c6
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h8
-rw-r--r--drivers/crypto/bcm/cipher.c6
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/spu.c20
-rw-r--r--drivers/crypto/bcm/spu2.c6
-rw-r--r--drivers/crypto/bcm/spu2.h8
-rw-r--r--drivers/crypto/bcm/spum.h22
-rw-r--r--drivers/crypto/bcm/util.c4
-rw-r--r--drivers/crypto/bcm/util.h26
-rw-r--r--drivers/crypto/caam/debugfs.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c8
-rw-r--r--drivers/crypto/ccp/sev-dev.c1
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c169
-rw-r--r--drivers/crypto/hisilicon/qm.c193
-rw-r--r--drivers/crypto/hisilicon/qm.h33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c42
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c23
-rw-r--r--drivers/crypto/inside-secure/safexcel.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c1
-rw-r--r--drivers/crypto/keembay/Kconfig31
-rw-r--r--drivers/crypto/keembay/Makefile3
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c1264
-rw-r--r--drivers/crypto/keembay/ocs-aes.c10
-rw-r--r--drivers/crypto/keembay/ocs-hcu.c840
-rw-r--r--drivers/crypto/keembay/ocs-hcu.h106
-rw-r--r--drivers/crypto/marvell/Kconfig15
-rw-r--r--drivers/crypto/marvell/Makefile1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c10
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h35
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c34
-rw-r--r--drivers/crypto/marvell/cesa/hash.c59
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c52
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h137
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h464
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c202
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h197
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c428
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h353
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h61
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c713
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c356
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c1415
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h162
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h29
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1758
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h178
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c410
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c167
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c541
-rw-r--r--drivers/crypto/mediatek/Makefile3
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c1271
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c586
-rw-r--r--drivers/crypto/mediatek/mtk-platform.h231
-rw-r--r--drivers/crypto/mediatek/mtk-regs.h190
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c1353
-rw-r--r--drivers/crypto/picoxcell_crypto.c1807
-rw-r--r--drivers/crypto/picoxcell_crypto_regs.h115
-rw-r--r--drivers/crypto/qat/Kconfig2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c14
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c17
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c27
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c4
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c12
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c28
-rw-r--r--drivers/crypto/sahara.c7
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/talitos.c50
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/crypto/vmx/aes.c1
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h6
-rw-r--r--drivers/crypto/vmx/vmx.c7
-rw-r--r--drivers/cxl/Kconfig53
-rw-r--r--drivers/cxl/Makefile7
-rw-r--r--drivers/cxl/bus.c29
-rw-r--r--drivers/cxl/cxl.h95
-rw-r--r--drivers/cxl/mem.c1552
-rw-r--r--drivers/cxl/pci.h31
-rw-r--r--drivers/dax/bus.c24
-rw-r--r--drivers/dax/bus.h2
-rw-r--r--drivers/dax/device.c8
-rw-r--r--drivers/dax/kmem.c7
-rw-r--r--drivers/dax/pmem/compat.c3
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/devfreq.c11
-rw-r--r--drivers/devfreq/governor.h2
-rw-r--r--drivers/devfreq/governor_passive.c44
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c4
-rw-r--r--drivers/dma-buf/Kconfig8
-rw-r--r--drivers/dma-buf/dma-buf.c141
-rw-r--r--drivers/dma-buf/dma-fence.c70
-rw-r--r--drivers/dma-buf/dma-heap.c14
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c25
-rw-r--r--drivers/dma-buf/heaps/system_heap.c25
-rw-r--r--drivers/dma-buf/st-dma-fence.c7
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/at_hdmac_regs.h28
-rw-r--r--drivers/dma/coh901318.c2808
-rw-r--r--drivers/dma/coh901318.h141
-rw-r--r--drivers/dma/coh901318_lli.c313
-rw-r--r--drivers/dma/dma-jz4780.c14
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c698
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h34
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/dw/core.c6
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/hsu/pci.c21
-rw-r--r--drivers/dma/idxd/device.c23
-rw-r--r--drivers/dma/idxd/dma.c6
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c16
-rw-r--r--drivers/dma/idxd/irq.c122
-rw-r--r--drivers/dma/idxd/sysfs.c4
-rw-r--r--drivers/dma/imx-sdma.c46
-rw-r--r--drivers/dma/lgm/Kconfig10
-rw-r--r--drivers/dma/lgm/Makefile2
-rw-r--r--drivers/dma/lgm/lgm-dma.c1739
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c4
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/qcom/bam_dma.c35
-rw-r--r--drivers/dma/qcom/gpi.c14
-rw-r--r--drivers/dma/sh/rcar-dmac.c112
-rw-r--r--drivers/dma/sirf-dma.c1170
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c140
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c13
-rw-r--r--drivers/dma/zx_dma.c941
-rw-r--r--drivers/edac/Kconfig15
-rw-r--r--drivers/edac/Makefile7
-rw-r--r--drivers/edac/amd64_edac.c332
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/amd64_edac_dbg.c55
-rw-r--r--drivers/edac/amd64_edac_inj.c235
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firmware/arm_scmi/driver.c4
-rw-r--r--drivers/firmware/arm_scmi/smc.c42
-rw-r--r--drivers/firmware/efi/apple-properties.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efistub.h11
-rw-r--r--drivers/firmware/google/coreboot_table.c5
-rw-r--r--drivers/firmware/google/coreboot_table.h2
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c4
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/qcom_scm.c16
-rw-r--r--drivers/firmware/smccc/smccc.c6
-rw-r--r--drivers/fpga/Kconfig11
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/dfl-fme-perf.c6
-rw-r--r--drivers/fpga/dfl-n3000-nios.c588
-rw-r--r--drivers/fpga/dfl-pci.c165
-rw-r--r--drivers/fpga/dfl.c4
-rw-r--r--drivers/fpga/dfl.h85
-rw-r--r--drivers/fpga/fpga-bridge.c4
-rw-r--r--drivers/gpio/Kconfig52
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO2
-rw-r--r--drivers/gpio/gpio-aggregator.c40
-rw-r--r--drivers/gpio/gpio-bd70528.c59
-rw-r--r--drivers/gpio/gpio-bd71828.c39
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c35
-rw-r--r--drivers/gpio/gpio-ep93xx.c232
-rw-r--r--drivers/gpio/gpio-intel-mid.c414
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-merrifield.c5
-rw-r--r--drivers/gpio/gpio-msic.c314
-rw-r--r--drivers/gpio/gpio-mvebu.c153
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-rcar.c85
-rw-r--r--drivers/gpio/gpio-sl28cpld.c4
-rw-r--r--drivers/gpio/gpio-tegra.c263
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-visconti.c218
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcove.c65
-rw-r--r--drivers/gpio/gpio-xilinx.c369
-rw-r--r--drivers/gpio/gpio-zx.c289
-rw-r--r--drivers/gpio/gpiolib-cdev.c147
-rw-r--r--drivers/gpio/gpiolib-of.c11
-rw-r--r--drivers/gpio/gpiolib-of.h5
-rw-r--r--drivers/gpio/gpiolib.c71
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c478
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c254
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c483
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c703
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c352
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h35
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.c26
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h345
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h1300
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h35
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h5
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c259
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h584
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h9
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h22
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c137
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c373
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c122
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c1307
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c229
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c93
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c9
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c55
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c25
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c17
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/ast/ast_post.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c46
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c57
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c67
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c81
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c33
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c125
-rw-r--r--drivers/gpu/drm/drm_crtc.c130
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h10
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c650
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c16
-rw-r--r--drivers/gpu/drm/drm_dsc.c30
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c110
-rw-r--r--drivers/gpu/drm/drm_encoder.c113
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c46
-rw-r--r--drivers/gpu/drm/drm_file.c76
-rw-r--r--drivers/gpu/drm/drm_gem.c31
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c141
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c14
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_irq.c44
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c25
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_memory.c51
-rw-r--r--drivers/gpu/drm/drm_mode_config.c51
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c59
-rw-r--r--drivers/gpu/drm/drm_plane.c179
-rw-r--r--drivers/gpu/drm/drm_prime.c66
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c14
-rw-r--r--drivers/gpu/drm/drm_syncobj.c8
-rw-r--r--drivers/gpu/drm/drm_vblank.c11
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c48
-rw-r--r--drivers/gpu/drm/gma500/Kconfig17
-rw-r--r--drivers/gpu/drm/gma500/Makefile37
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c11
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c15
-rw-r--r--drivers/gpu/drm/gma500/gem.c6
-rw-r--r--drivers/gpu/drm/gma500/gem.h2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/gtt.c20
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c5
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c562
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1017
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h79
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c603
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h377
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c679
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h80
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c966
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.c74
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h76
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c197
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c83
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c9
-rw-r--r--drivers/gpu/drm/gma500/mmu.c36
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c22
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c10
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/power.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c34
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h12
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h14
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c805
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h38
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c61
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug24
-rw-r--r--drivers/gpu/drm/i915/Makefile30
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c926
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c324
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c806
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c292
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4594
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h206
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2785
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c692
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c404
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1363
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c683
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c284
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c162
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c553
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1406
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c213
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h33
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c132
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c113
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c104
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c112
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c76
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c79
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c54
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c171
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c635
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h127
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c115
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3896
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5418
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c (renamed from drivers/gpu/drm/i915/intel_region_lmem.c)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h (renamed from drivers/gpu/drm/i915/intel_region_lmem.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c101
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c278
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c113
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c654
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4741
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c4701
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c196
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c457
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c284
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h41
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c76
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c9
-rw-r--r--drivers/gpu/drm/i915/i915_active.c35
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c763
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h125
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c163
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c475
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c146
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.h13
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c17
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c117
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h81
-rw-r--r--drivers/gpu/drm/i915/i915_request.c178
-rw-r--r--drivers/gpu/drm/i915/i915_request.h47
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c32
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c33
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c25
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c159
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c136
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h13
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c39
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c95
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c109
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c109
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c131
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c69
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c93
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c60
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h14
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c3
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/mediatek/Makefile5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c223
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c89
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c197
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c254
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c194
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c108
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c503
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h100
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c88
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c50
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c195
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c113
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c139
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c58
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c87
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c21
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c31
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c15
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c242
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h192
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h80
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/fifo.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4594
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c)56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c)18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig120
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile19
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig10
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c1385
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig135
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c87
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c202
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1949
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.h456
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h72
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c229
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h347
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c157
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c153
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c73
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c59
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c41
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig20
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c665
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c3
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c870
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c39
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c59
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c223
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c24
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c33
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c98
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h11
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c123
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c9
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c9
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c9
-rw-r--r--drivers/gpu/drm/stm/ltdc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c109
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/falcon.c9
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c9
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tegra/vic.c35
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h40
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c45
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c15
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c7
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c11
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c38
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c389
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h34
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c248
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c11
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_irq.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c81
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c54
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c)126
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c240
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h94
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c155
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c15
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c4
-rw-r--r--drivers/greybus/es2.c9
-rw-r--r--drivers/greybus/greybus_trace.h6
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h2
-rw-r--r--drivers/hid/hid-chicony.c55
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-google-hammer.c85
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c14
-rw-r--r--drivers/hid/hid-ite.c12
-rw-r--r--drivers/hid/hid-lg-g15.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c12
-rw-r--r--drivers/hid/hid-logitech-hidpp.c248
-rw-r--r--drivers/hid/hid-multitouch.c17
-rw-r--r--drivers/hid/hid-playstation.c1351
-rw-r--r--drivers/hid/hid-quirks.c26
-rw-r--r--drivers/hid/hid-roccat-arvo.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c143
-rw-r--r--drivers/hid/hid-sony.c20
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig47
-rw-r--r--drivers/hid/i2c-hid/Makefile6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c143
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c254
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c116
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c143
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c27
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c55
-rw-r--r--drivers/hid/wacom_sys.c42
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hv/channel.c4
-rw-r--r--drivers/hv/channel_mgmt.c77
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_fcopy.c36
-rw-r--r--drivers/hv/hv_kvp.c122
-rw-r--r--drivers/hv/hv_snapshot.c89
-rw-r--r--drivers/hv/hv_util.c222
-rw-r--r--drivers/hv/vmbus_drv.c66
-rw-r--r--drivers/hwmon/Kconfig34
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ab8500.c224
-rw-r--r--drivers/hwmon/abx500.c487
-rw-r--r--drivers/hwmon/abx500.h69
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/aht10.c348
-rw-r--r--drivers/hwmon/amd_energy.c4
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c4
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/lm70.c20
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig4
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/lm25066.c5
-rw-r--r--drivers/hwmon/pmbus/max16601.c91
-rw-r--r--drivers/hwmon/pmbus/max31785.c13
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c12
-rw-r--r--drivers/hwmon/pwm-fan.c168
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/tps23861.c601
-rw-r--r--drivers/hwmon/w83627ehf.c2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c122
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c822
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c189
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h505
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c35
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/Kconfig44
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c55
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c3
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h6
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c254
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c5
-rw-r--r--drivers/i2c/busses/i2c-efm32.c469
-rw-r--r--drivers/i2c/busses/i2c-elektor.c7
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c19
-rw-r--r--drivers/i2c/busses/i2c-imx.c85
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c97
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c46
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c122
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c59
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c66
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sirf.c475
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1008
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c29
-rw-r--r--drivers/i2c/busses/i2c-zx2967.c602
-rw-r--r--drivers/i2c/i2c-core-acpi.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c46
-rw-r--r--drivers/i2c/i2c-slave-testunit.c12
-rw-r--r--drivers/i2c/i2c-stub.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c112
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c163
-rw-r--r--drivers/i3c/device.c5
-rw-r--r--drivers/i3c/master.c8
-rw-r--r--drivers/i3c/master/Kconfig9
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c5
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c1478
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c6
-rw-r--r--drivers/iio/accel/kxcjk-1013.c32
-rw-r--r--drivers/iio/adc/Kconfig11
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c30
-rw-r--r--drivers/iio/adc/ad7476.c6
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c3
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c95
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c3
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c279
-rw-r--r--drivers/iio/adc/qcom-vadc-common.h177
-rw-r--r--drivers/iio/adc/sc27xx_adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c29
-rw-r--r--drivers/iio/adc/stm32-adc.c14
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c6
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c364
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c9
-rw-r--r--drivers/iio/adc/xilinx-xadc.h6
-rw-r--r--drivers/iio/chemical/bme680_core.c2
-rw-r--r--drivers/iio/chemical/pms7003.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c76
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.h15
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c31
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad5766.c643
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/gyro/bmg160_core.c25
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c40
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/industrialio-core.c44
-rw-r--r--drivers/iio/inkern.c34
-rw-r--r--drivers/iio/light/apds9960.c8
-rw-r--r--drivers/iio/light/hid-sensor-als.c39
-rw-r--r--drivers/iio/light/tsl2583.c8
-rw-r--r--drivers/iio/light/vl6180.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c26
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c48
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c1049
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c43
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c46
-rw-r--r--drivers/iio/position/Kconfig16
-rw-r--r--drivers/iio/position/Makefile1
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c385
-rw-r--r--drivers/iio/pressure/ms5637.c77
-rw-r--r--drivers/iio/proximity/sx9310.c5
-rw-r--r--drivers/iio/temperature/mlx90632.c6
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c9
-rw-r--r--drivers/infiniband/core/cm.c8
-rw-r--r--drivers/infiniband/core/cma.c81
-rw-r--r--drivers/infiniband/core/cma_configfs.c16
-rw-r--r--drivers/infiniband/core/counters.c78
-rw-r--r--drivers/infiniband/core/device.c23
-rw-r--r--drivers/infiniband/core/iwpm_msg.c16
-rw-r--r--drivers/infiniband/core/iwpm_util.c6
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/restrack.c5
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c2
-rw-r--r--drivers/infiniband/core/sa_query.c26
-rw-r--r--drivers/infiniband/core/ucma.c135
-rw-r--r--drivers/infiniband/core/umem.c5
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c174
-rw-r--r--drivers/infiniband/core/user_mad.c17
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c117
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c49
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h25
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c33
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c46
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c16
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c2
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c5
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c47
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c8
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c10
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c116
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h84
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c791
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h141
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c30
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c458
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c331
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c19
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c227
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c157
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h60
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c137
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c325
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c160
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c7
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c21
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c34
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h178
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c64
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c300
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h103
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c68
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c271
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c53
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c10
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c11
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c127
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c123
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c32
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c110
-rw-r--r--drivers/input/joydev.c7
-rw-r--r--drivers/input/joystick/Kconfig7
-rw-r--r--drivers/input/joystick/Makefile2
-rw-r--r--drivers/input/joystick/n64joy.c345
-rw-r--r--drivers/input/joystick/xpad.c18
-rw-r--r--drivers/input/keyboard/Kconfig6
-rw-r--r--drivers/input/keyboard/applespi.c23
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c79
-rw-r--r--drivers/input/keyboard/locomokbd.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c302
-rw-r--r--drivers/input/misc/ariel-pwrbutton.c6
-rw-r--r--drivers/input/misc/da7280.c3
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h6
-rw-r--r--drivers/input/serio/sa1111ps2.c4
-rw-r--r--drivers/input/serio/serport.c4
-rw-r--r--drivers/input/tablet/aiptek.c80
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/ads7846.c376
-rw-r--r--drivers/input/touchscreen/elants_i2c.c151
-rw-r--r--drivers/input/touchscreen/elo.c4
-rw-r--r--drivers/input/touchscreen/goodix.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c4
-rw-r--r--drivers/input/touchscreen/ili210x.c26
-rw-r--r--drivers/input/touchscreen/iqs5xx.c209
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c8
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c3
-rw-r--r--drivers/input/touchscreen/st1232.c53
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c1
-rw-r--r--drivers/input/touchscreen/sur40.c1
-rw-r--r--drivers/input/touchscreen/surface3_spi.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/zinitix.c4
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig41
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c191
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h73
-rw-r--r--drivers/interconnect/qcom/msm8916.c241
-rw-r--r--drivers/interconnect/qcom/msm8939.c355
-rw-r--r--drivers/interconnect/qcom/qcs404.c242
-rw-r--r--drivers/interconnect/qcom/sdx55.c356
-rw-r--r--drivers/interconnect/qcom/sdx55.h70
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h29
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h47
-rw-r--r--drivers/iommu/amd/init.c113
-rw-r--r--drivers/iommu/amd/io_pgtable.c558
-rw-r--r--drivers/iommu/amd/iommu.c675
-rw-r--r--drivers/iommu/amd/iommu_v2.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c154
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h14
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c9
-rw-r--r--drivers/iommu/dma-iommu.c86
-rw-r--r--drivers/iommu/hyperv-iommu.c177
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cap_audit.c205
-rw-r--r--drivers/iommu/intel/cap_audit.h130
-rw-r--r--drivers/iommu/intel/dmar.c17
-rw-r--r--drivers/iommu/intel/iommu.c460
-rw-r--r--drivers/iommu/intel/irq_remapping.c10
-rw-r--r--drivers/iommu/intel/pasid.c18
-rw-r--r--drivers/iommu/intel/svm.c112
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c65
-rw-r--r--drivers/iommu/io-pgtable.c3
-rw-r--r--drivers/iommu/iommu.c54
-rw-r--r--drivers/iommu/iova.c39
-rw-r--r--drivers/iommu/ipmmu-vmsa.c53
-rw-r--r--drivers/iommu/msm_iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu.c410
-rw-r--r--drivers/iommu/mtk_iommu.h12
-rw-r--r--drivers/iommu/tegra-gart.c7
-rw-r--r--drivers/ipack/ipack.c11
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-bcm2836.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-ls-extirq.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c7
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c180
-rw-r--r--drivers/irqchip/irq-sirfsoc.c134
-rw-r--r--drivers/irqchip/irq-sl28cpld.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c379
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c26
-rw-r--r--drivers/irqchip/irq-tango.c227
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile6
-rw-r--r--drivers/leds/blink/Kconfig20
-rw-r--r--drivers/leds/blink/Makefile2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c888
-rw-r--r--drivers/leds/flash/Kconfig16
-rw-r--r--drivers/leds/flash/Makefile3
-rw-r--r--drivers/leds/flash/leds-rt8515.c397
-rw-r--r--drivers/leds/led-class.c3
-rw-r--r--drivers/leds/led-core.c20
-rw-r--r--drivers/leds/led-triggers.c10
-rw-r--r--drivers/leds/leds-apu.c11
-rw-r--r--drivers/leds/leds-ariel.c6
-rw-r--r--drivers/leds/leds-blinkm.c24
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-lm3530.c10
-rw-r--r--drivers/leds/leds-lm3533.c4
-rw-r--r--drivers/leds/leds-lm355x.c8
-rw-r--r--drivers/leds/leds-lm3642.c16
-rw-r--r--drivers/leds/leds-lp50xx.c83
-rw-r--r--drivers/leds/leds-max8997.c12
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/leds/leds-ss4200.c18
-rw-r--r--drivers/leds/leds-wm831x-status.c12
-rw-r--r--drivers/leds/leds.h6
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-tty.c183
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c3
-rw-r--r--drivers/lightnvm/pblk-core.c5
-rw-r--r--drivers/lightnvm/pblk-gc.c3
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/lightnvm/pblk-recovery.c3
-rw-r--r--drivers/macintosh/adb-iop.c6
-rw-r--r--drivers/mailbox/arm_mhuv2.c30
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c8
-rw-r--r--drivers/mailbox/sprd-mailbox.c2
-rw-r--r--drivers/mailbox/tegra-hsp.c15
-rw-r--r--drivers/md/Kconfig3
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c12
-rw-r--r--drivers/md/bcache/btree.c21
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h36
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/request.c39
-rw-r--r--drivers/md/bcache/super.c79
-rw-r--r--drivers/md/bcache/sysfs.c29
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/bcache/writeback.h4
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-clone-target.c14
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c209
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-era-target.c93
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-integrity.c224
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-log-writes.c10
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap.c24
-rw-r--r--drivers/md/dm-table.c414
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-writecache.c80
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm.c112
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md.c75
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c2
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c112
-rw-r--r--drivers/media/cec/core/cec-adap.c4
-rw-r--r--drivers/media/cec/core/cec-api.c2
-rw-r--r--drivers/media/cec/platform/Makefile1
-rw-r--r--drivers/media/common/videobuf2/Kconfig1
-rw-r--r--drivers/media/common/videobuf2/Makefile1
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c223
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c11
-rw-r--r--drivers/media/common/videobuf2/videobuf2-memops.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c8
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/dvb-frontends/Kconfig11
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c1
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c1
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c1
-rw-r--r--drivers/media/dvb-frontends/mxl692.c1378
-rw-r--r--drivers/media/dvb-frontends/mxl692.h38
-rw-r--r--drivers/media/dvb-frontends/mxl692_defs.h548
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c1
-rw-r--r--drivers/media/i2c/Kconfig59
-rw-r--r--drivers/media/i2c/Makefile8
-rw-r--r--drivers/media/i2c/ccs-pll.c124
-rw-r--r--drivers/media/i2c/ccs-pll.h86
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c318
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c27
-rw-r--r--drivers/media/i2c/ccs/ccs-data.h2
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c29
-rw-r--r--drivers/media/i2c/ccs/ccs.h8
-rw-r--r--drivers/media/i2c/ccs/smiapp-reg-defs.h2
-rw-r--r--drivers/media/i2c/imx219.c23
-rw-r--r--drivers/media/i2c/imx258.c82
-rw-r--r--drivers/media/i2c/imx334.c1132
-rw-r--r--drivers/media/i2c/max9271.c5
-rw-r--r--drivers/media/i2c/max9286.c74
-rw-r--r--drivers/media/i2c/mt9m111.c17
-rw-r--r--drivers/media/i2c/mt9v111.c6
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/media/i2c/ov5647.c1259
-rw-r--r--drivers/media/i2c/ov5648.c2624
-rw-r--r--drivers/media/i2c/ov5670.c3
-rw-r--r--drivers/media/i2c/ov5675.c6
-rw-r--r--drivers/media/i2c/ov6650.c28
-rw-r--r--drivers/media/i2c/ov8856.c4
-rw-r--r--drivers/media/i2c/ov8865.c2972
-rw-r--r--drivers/media/i2c/ov9640.c15
-rw-r--r--drivers/media/i2c/ov9640.h2
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/rdacm21.c623
-rw-r--r--drivers/media/i2c/st-mipid02.c21
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig21
-rw-r--r--drivers/media/pci/intel/ipu3/Makefile3
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c314
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h125
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c (renamed from drivers/media/pci/intel/ipu3/ipu3-cio2.c)56
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c16
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164.h2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c46
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c26
-rw-r--r--drivers/media/platform/Kconfig18
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/Makefile6
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c (renamed from drivers/staging/media/allegro-dvt/allegro-core.c)974
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c (renamed from drivers/staging/media/allegro-dvt/allegro-mail.c)21
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h (renamed from drivers/staging/media/allegro-dvt/allegro-mail.h)5
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c (renamed from drivers/staging/media/allegro-dvt/nal-h264.c)336
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.h (renamed from drivers/staging/media/allegro-dvt/nal-h264.h)0
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c824
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h350
-rw-r--r--drivers/media/platform/allegro-dvt/nal-rbsp.c310
-rw-r--r--drivers/media/platform/allegro-dvt/nal-rbsp.h61
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/aspeed-video.c6
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h1
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c46
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c44
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c17
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpif.c3
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c86
-rw-r--r--drivers/media/platform/davinci/vpif_display.h1
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c25
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h2
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c12
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h1
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c11
-rw-r--r--drivers/media/platform/meson/ge2d/ge2d.c1
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c12
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c3
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c74
-rw-r--r--drivers/media/platform/pxa_camera.c86
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss.c11
-rw-r--r--drivers/media/platform/qcom/venus/Makefile4
-rw-r--r--drivers/media/platform/qcom/venus/core.c49
-rw-r--r--drivers/media/platform/qcom/venus/core.h78
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c3
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c154
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h22
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c59
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs.h38
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c1317
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c65
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h67
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c319
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c326
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c80
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c48
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c129
-rw-r--r--drivers/media/platform/qcom/venus/venc.c202
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c138
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c5
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c42
-rw-r--r--drivers/media/platform/rcar_drif.c2
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/rcar_jpu.c6
-rw-r--r--drivers/media/platform/renesas-ceu.c58
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c36
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c34
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c11
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c1
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c87
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c9
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h1
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal-camerarx.c373
-rw-r--r--drivers/media/platform/ti-vpe/cal-video.c394
-rw-r--r--drivers/media/platform/ti-vpe/cal.c399
-rw-r--r--drivers/media/platform/ti-vpe/cal.h105
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/video-mux.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1.h20
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c10
-rw-r--r--drivers/media/radio/radio-isa.c9
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c4
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/img-ir/Kconfig1
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ir_toy.c1
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/mceusb.c11
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/rc/sunxi-cir.c169
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c36
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.h7
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c8
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.h1
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c38
-rw-r--r--drivers/media/tuners/it913x.c1
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c35
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c46
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c26
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/pwc/pwc-if.c22
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c179
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c552
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c11
-rw-r--r--drivers/media/usb/uvc/uvc_isight.c17
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvc_status.c44
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c62
-rw-r--r--drivers/media/usb/uvc/uvc_video.c162
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h99
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c49
-rw-r--r--drivers/media/v4l2-core/Makefile2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c180
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c321
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c42
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c15
-rw-r--r--drivers/memory/Kconfig17
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/dfl-emif.c207
-rw-r--r--drivers/memory/emif.c3
-rw-r--r--drivers/memory/mtk-smi.c44
-rw-r--r--drivers/memory/pl172.c4
-rw-r--r--drivers/memory/pl353-smc.c4
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c4
-rw-r--r--drivers/memory/tegra/Kconfig4
-rw-r--r--drivers/memory/tegra/mc.c7
-rw-r--r--drivers/memory/tegra/tegra124-emc.c368
-rw-r--r--drivers/memory/tegra/tegra124.c82
-rw-r--r--drivers/memory/tegra/tegra186-emc.c12
-rw-r--r--drivers/memory/tegra/tegra20-emc.c13
-rw-r--r--drivers/memory/tegra/tegra30-emc.c13
-rw-r--r--drivers/memory/ti-aemif.c8
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/mfd/Kconfig22
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c43
-rw-r--r--drivers/mfd/acer-ec-a500.c202
-rw-r--r--drivers/mfd/altera-sysmgr.c3
-rw-r--r--drivers/mfd/arizona-core.c11
-rw-r--r--drivers/mfd/arizona-i2c.c11
-rw-r--r--drivers/mfd/arizona-spi.c138
-rw-r--r--drivers/mfd/arizona.h9
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/axp20x-rsb.c4
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bd9571mwv.c178
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/gateworks-gsc.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c28
-rw-r--r--drivers/mfd/intel-m10-bmc.c43
-rw-r--r--drivers/mfd/intel_msic.c425
-rw-r--r--drivers/mfd/iqs62x.c144
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/mcp-sa11x0.c3
-rw-r--r--drivers/mfd/mt6360-core.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c3
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel_tclib.c200
-rw-r--r--drivers/misc/bcm-vk/Kconfig29
-rw-r--r--drivers/misc/bcm-vk/Makefile12
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h549
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c1652
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c1357
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.h163
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.c275
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.h61
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c339
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c16
-rw-r--r--drivers/misc/cxl/cxllib.c4
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c17
-rw-r--r--drivers/misc/fastrpc.c7
-rw-r--r--drivers/misc/habanalabs/Kconfig1
-rw-r--r--drivers/misc/habanalabs/common/Makefile10
-rw-r--r--drivers/misc/habanalabs/common/asid.c6
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c8
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c550
-rw-r--r--drivers/misc/habanalabs/common/context.c33
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c43
-rw-r--r--drivers/misc/habanalabs/common/device.c65
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c222
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h120
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c32
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c56
-rw-r--r--drivers/misc/habanalabs/common/memory.c683
-rw-r--r--drivers/misc/habanalabs/common/mmu/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c (renamed from drivers/misc/habanalabs/common/mmu.c)130
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c (renamed from drivers/misc/habanalabs/common/mmu_v1.c)16
-rw-r--r--drivers/misc/habanalabs/common/pci/Makefile (renamed from drivers/net/ethernet/aurora/Makefile)2
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c (renamed from drivers/misc/habanalabs/common/pci.c)75
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c673
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c21
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c184
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c11
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c5
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h28
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h4
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h5
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h5
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/mei/bus.c24
-rw-r--r--drivers/misc/mei/client.c291
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/debugfs.c1
-rw-r--r--drivers/misc/mei/hbm.c165
-rw-r--r--drivers/misc/mei/hbm.h4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c10
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/hw.h61
-rw-r--r--drivers/misc/mei/init.c5
-rw-r--r--drivers/misc/mei/interrupt.c43
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/ocxl/file.c3
-rw-r--r--drivers/misc/pci_endpoint_test.c1
-rw-r--r--drivers/misc/pti.c978
-rw-r--r--drivers/misc/pvpanic.c78
-rw-r--r--drivers/misc/sgi-xp/xpnet.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c19
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h2
-rw-r--r--drivers/mmc/core/Kconfig8
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/block.c13
-rw-r--r--drivers/mmc/core/core.c11
-rw-r--r--drivers/mmc/core/crypto.c48
-rw-r--r--drivers/mmc/core/crypto.h40
-rw-r--r--drivers/mmc/core/host.c45
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/mmc_test.c31
-rw-r--r--drivers/mmc/core/queue.c10
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c6
-rw-r--r--drivers/mmc/host/Kconfig43
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/android-goldfish.c545
-rw-r--r--drivers/mmc/host/atmel-mci.c52
-rw-r--r--drivers/mmc/host/au1xmmc.c14
-rw-r--r--drivers/mmc/host/cavium.c5
-rw-r--r--drivers/mmc/host/cb710-mmc.c12
-rw-r--r--drivers/mmc/host/cqhci-core.c (renamed from drivers/mmc/host/cqhci.c)69
-rw-r--r--drivers/mmc/host/cqhci-crypto.c242
-rw-r--r--drivers/mmc/host/cqhci-crypto.h47
-rw-r--r--drivers/mmc/host/cqhci.h84
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c234
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h32
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c37
-rw-r--r--drivers/mmc/host/mmci.c74
-rw-r--r--drivers/mmc/host/mtk-sd.c18
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c7
-rw-r--r--drivers/mmc/host/omap_hsmmc.c18
-rw-r--r--drivers/mmc/host/owl-mmc.c9
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c16
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c91
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c5
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci-iproc.c18
-rw-r--r--drivers/mmc/host/sdhci-msm.c322
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c65
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed-test.c105
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c289
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c28
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c20
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-sirf.c235
-rw-r--r--drivers/mmc/host/sdhci-sprd.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.c8
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/mmc/host/sdhci_am654.c28
-rw-r--r--drivers/mmc/host/sunxi-mmc.c30
-rw-r--r--drivers/mmc/host/tifm_sd.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c8
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c9
-rw-r--r--drivers/mmc/host/wbsd.c35
-rw-r--r--drivers/most/core.c6
-rw-r--r--drivers/mtd/devices/phram.c6
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2
-rw-r--r--drivers/mtd/maps/pci.c8
-rw-r--r--drivers/mtd/mtdswap.c1
-rw-r--r--drivers/mtd/nand/raw/Kconfig10
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/omap2.c15
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c4
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c727
-rw-r--r--drivers/mtd/nand/spi/core.c14
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/afs.c4
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c4
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c170
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/core.c49
-rw-r--r--drivers/mtd/spi-nor/core.h2
-rw-r--r--drivers/mtd/spi-nor/sfdp.c5
-rw-r--r--drivers/mtd/spi-nor/sst.c52
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/io.c7
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arcnet/arc-rimi.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h6
-rw-r--r--drivers/net/arcnet/arcnet.c73
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020_cs.c4
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/bareudp.c38
-rw-r--r--drivers/net/bonding/bond_3ad.c26
-rw-r--r--drivers/net/bonding/bond_main.c174
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/caif/caif_virtio.c8
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile7
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/c_can/c_can.c4
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c1338
-rw-r--r--drivers/net/can/dev/Makefile11
-rw-r--r--drivers/net/can/dev/bittiming.c261
-rw-r--r--drivers/net/can/dev/dev.c470
-rw-r--r--drivers/net/can/dev/length.c95
-rw-r--r--drivers/net/can/dev/netlink.c379
-rw-r--r--drivers/net/can/dev/rx-offload.c (renamed from drivers/net/can/rx-offload.c)5
-rw-r--r--drivers/net/can/dev/skb.c231
-rw-r--r--drivers/net/can/flexcan.c130
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/kvaser_pciefd.c6
-rw-r--r--drivers/net/can/m_can/Makefile4
-rw-r--r--drivers/net/can/m_can/m_can.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c (renamed from drivers/net/can/m_can/tcan4x5x.c)148
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c135
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h57
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sja1000/tscan1.c4
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/spi/hi311x.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c158
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h2
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c12
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c225
-rw-r--r--drivers/net/dsa/b53/b53_priv.h26
-rw-r--r--drivers/net/dsa/b53/b53_regs.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c64
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c12
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h1
-rw-r--r--drivers/net/dsa/dsa_loop.c74
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c452
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h23
-rw-r--r--drivers/net/dsa/lan9303-core.c12
-rw-r--r--drivers/net/dsa/lantiq_gswip.c139
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c108
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c29
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h8
-rw-r--r--drivers/net/dsa/mt7530.c169
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig13
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c633
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c65
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h187
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c73
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h24
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/dsa/ocelot/felix.c1152
-rw-r--r--drivers/net/dsa/ocelot/felix.h18
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c67
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c49
-rw-r--r--drivers/net/dsa/qca/ar9331.c165
-rw-r--r--drivers/net/dsa/qca8k.c40
-rw-r--r--drivers/net/dsa/realtek-smi-core.h12
-rw-r--r--drivers/net/dsa/rtl8366.c156
-rw-r--r--drivers/net/dsa/rtl8366rb.c276
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c363
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/dsa/xrs700x/Kconfig26
-rw-r--r--drivers/net/dsa/xrs700x/Makefile4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c743
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h42
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c147
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c164
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_reg.h208
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c39
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/aurora/Kconfig23
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1520
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c692
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.h96
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c190
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c89
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h344
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c19
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c108
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c41
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c156
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c64
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c552
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c165
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c330
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c204
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c189
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h10
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c471
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e100.c92
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c949
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c752
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c398
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c679
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h174
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c667
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c155
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c156
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c662
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1059
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c107
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c48
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c84
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c626
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c127
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c587
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c272
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c166
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c220
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c261
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c130
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c432
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c652
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c139
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h614
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c181
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c236
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h135
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c551
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c155
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c93
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c984
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c370
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c499
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c1653
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c517
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c678
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c275
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c556
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c1619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c1640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c1633
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h193
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h434
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c214
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c114
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c385
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h25
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c751
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c885
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c175
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c602
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c19
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h295
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c331
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c49
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c134
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c35
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c196
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c533
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c30
-rw-r--r--drivers/net/ethernet/rocker/rocker.h6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c63
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c162
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c57
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c607
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h28
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c538
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.h34
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c91
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c12
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c94
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c38
-rw-r--r--drivers/net/hyperv/hyperv_net.h93
-rw-r--r--drivers/net/hyperv/netvsc.c89
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c56
-rw-r--r--drivers/net/hyperv/rndis_filter.c248
-rw-r--r--drivers/net/ifb.c7
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c486
-rw-r--r--drivers/net/ipa/gsi.h6
-rw-r--r--drivers/net/ipa/gsi_reg.h31
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_clock.c199
-rw-r--r--drivers/net/ipa/ipa_cmd.c77
-rw-r--r--drivers/net/ipa/ipa_cmd.h24
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c127
-rw-r--r--drivers/net/ipa/ipa_main.c43
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c1
-rw-r--r--drivers/net/ipa/ipa_reg.h22
-rw-r--r--drivers/net/ipa/ipa_table.c16
-rw-r--r--drivers/net/ipa/ipa_table.h8
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bitbang.c6
-rw-r--r--drivers/net/mdio/mdio-moxart.c4
-rw-r--r--drivers/net/mdio/of_mdio.c30
-rw-r--r--drivers/net/mhi/Makefile3
-rw-r--r--drivers/net/mhi/mhi.h40
-rw-r--r--drivers/net/mhi/net.c (renamed from drivers/net/mhi_net.c)199
-rw-r--r--drivers/net/mhi/proto_mbim.c293
-rw-r--r--drivers/net/netdevsim/dev.c40
-rw-r--r--drivers/net/netdevsim/fib.c678
-rw-r--r--drivers/net/netdevsim/netdev.c2
-rw-r--r--drivers/net/pcs/pcs-lynx.c36
-rw-r--r--drivers/net/phy/at803x.c85
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/broadcom.c282
-rw-r--r--drivers/net/phy/dp83822.c3
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/icplus.c387
-rw-r--r--drivers/net/phy/lxt.c1
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/phy/mscc/Makefile1
-rw-r--r--drivers/net/phy/mscc/mscc.h28
-rw-r--r--drivers/net/phy/mscc/mscc_main.c608
-rw-r--r--drivers/net/phy/mscc/mscc_serdes.c650
-rw-r--r--drivers/net/phy/mscc/mscc_serdes.h31
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c70
-rw-r--r--drivers/net/phy/phylink.c4
-rw-r--r--drivers/net/phy/realtek.c132
-rw-r--r--drivers/net/phy/sfp-bus.c38
-rw-r--r--drivers/net/phy/sfp.c208
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/ppp/ppp_async.c11
-rw-r--r--drivers/net/ppp/ppp_generic.c12
-rw-r--r--drivers/net/ppp/ppp_synctty.c11
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/tap.c19
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c27
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c35
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/pegasus.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c89
-rw-r--r--drivers/net/usb/r8152.c226
-rw-r--r--drivers/net/usb/r8153_ecm.c8
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c108
-rw-r--r--drivers/net/virtio_net.c31
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/vxlan.c31
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/farsync.c12
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wan/hdlc_x25.c6
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireguard/device.c21
-rw-r--r--drivers/net/wireguard/device.h15
-rw-r--r--drivers/net/wireguard/peer.c28
-rw-r--r--drivers/net/wireguard/peer.h8
-rw-r--r--drivers/net/wireguard/queueing.c86
-rw-r--r--drivers/net/wireguard/queueing.h45
-rw-r--r--drivers/net/wireguard/receive.c16
-rw-r--r--drivers/net/wireguard/send.c31
-rw-r--r--drivers/net/wireguard/socket.c8
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c41
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c287
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c103
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c191
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c26
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c234
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h37
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c95
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c41
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c40
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c11
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c94
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c281
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c190
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c317
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c283
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c130
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c182
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c188
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c74
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig5
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c210
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c192
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1605
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h683
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1842
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h979
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c552
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c177
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c528
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c356
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c1516
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h333
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1308
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h434
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h342
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c292
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h419
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c124
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/Kconfig2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h8
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c154
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c116
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c397
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c18623
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c36
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501.h2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c9
-rw-r--r--drivers/net/xen-netback/rx.c9
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/net/xen-netfront.c18
-rw-r--r--drivers/nfc/Kconfig11
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/pn533/pn533.c4
-rw-r--r--drivers/nfc/pn544/mei.c4
-rw-r--r--drivers/nfc/st-nci/se.c3
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/nfc/virtual_ncidev.c215
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/epf/Kconfig6
-rw-r--r--drivers/ntb/hw/epf/Makefile1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c753
-rw-r--r--drivers/nvdimm/blk.c7
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/bus.c13
-rw-r--r--drivers/nvdimm/dimm.c7
-rw-r--r--drivers/nvdimm/dimm_devs.c18
-rw-r--r--drivers/nvdimm/namespace_devs.c10
-rw-r--r--drivers/nvdimm/pmem.c9
-rw-r--r--drivers/nvdimm/region.c4
-rw-r--r--drivers/nvme/host/core.c132
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fc.c17
-rw-r--r--drivers/nvme/host/hwmon.c31
-rw-r--r--drivers/nvme/host/lightnvm.c7
-rw-r--r--drivers/nvme/host/multipath.c12
-rw-r--r--drivers/nvme/host/nvme.h26
-rw-r--r--drivers/nvme/host/pci.c157
-rw-r--r--drivers/nvme/host/rdma.c51
-rw-r--r--drivers/nvme/host/tcp.c85
-rw-r--r--drivers/nvme/host/trace.c53
-rw-r--r--drivers/nvme/host/zns.c11
-rw-r--r--drivers/nvme/target/admin-cmd.c118
-rw-r--r--drivers/nvme/target/configfs.c6
-rw-r--r--drivers/nvme/target/core.c37
-rw-r--r--drivers/nvme/target/fc.c83
-rw-r--r--drivers/nvme/target/fcloop.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c23
-rw-r--r--drivers/nvme/target/io-cmd-file.c5
-rw-r--r--drivers/nvme/target/nvmet.h20
-rw-r--r--drivers/nvme/target/passthru.c12
-rw-r--r--drivers/nvme/target/rdma.c26
-rw-r--r--drivers/nvme/target/tcp.c62
-rw-r--r--drivers/nvme/target/trace.h9
-rw-r--r--drivers/nvmem/Kconfig8
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/nvmem/imx-iim.c7
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c7
-rw-r--r--drivers/nvmem/rmem.c97
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/device.c31
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/of/property.c59
-rw-r--r--drivers/of/unittest.c2
-rw-r--r--drivers/opp/core.c800
-rw-r--r--drivers/opp/of.c230
-rw-r--r--drivers/opp/opp.h19
-rw-r--r--drivers/oprofile/buffer_sync.c591
-rw-r--r--drivers/oprofile/buffer_sync.h22
-rw-r--r--drivers/oprofile/cpu_buffer.c465
-rw-r--r--drivers/oprofile/cpu_buffer.h121
-rw-r--r--drivers/oprofile/event_buffer.c209
-rw-r--r--drivers/oprofile/event_buffer.h40
-rw-r--r--drivers/oprofile/nmi_timer_int.c157
-rw-r--r--drivers/oprofile/oprof.c286
-rw-r--r--drivers/oprofile/oprof.h50
-rw-r--r--drivers/oprofile/oprofile_files.c201
-rw-r--r--drivers/oprofile/oprofile_perf.c328
-rw-r--r--drivers/oprofile/oprofile_stats.c84
-rw-r--r--drivers/oprofile/oprofile_stats.h33
-rw-r--r--drivers/oprofile/oprofilefs.c300
-rw-r--r--drivers/oprofile/timer_int.c122
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/controller/Kconfig35
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c60
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c86
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c53
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c70
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c22
-rw-r--r--drivers/pci/controller/pci-host-common.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pci-xgene.c13
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c35
-rw-r--r--drivers/pci/controller/pcie-mediatek.c7
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1138
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip.c12
-rw-r--r--drivers/pci/controller/pcie-tango.c341
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c1
-rw-r--r--drivers/pci/endpoint/functions/Kconfig13
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2128
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c176
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c130
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c105
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/pci-bridge-emul.c11
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/pci/pci.c23
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer.c5
-rw-r--r--drivers/pci/pcie/aspm.c44
-rw-r--r--drivers/pci/pcie/bw_notification.c138
-rw-r--r--drivers/pci/pcie/err.c16
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c4
-rw-r--r--drivers/pci/proc.c6
-rw-r--r--drivers/pci/search.c4
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pci/syscall.c10
-rw-r--r--drivers/pcmcia/cistpl.c4
-rw-r--r--drivers/pcmcia/sa1111_generic.c3
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/arm-cci.c7
-rw-r--r--drivers/perf/arm-cmn.c19
-rw-r--r--drivers/perf/arm_dmc620_pmu.c5
-rw-r--r--drivers/perf/arm_pmu.c7
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c8
-rw-r--r--drivers/perf/arm_spe_pmu.c23
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c10
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/perf/qcom_l3_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c5
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c18
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1
-rw-r--r--drivers/phy/ingenic/Makefile2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c23
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c10
-rw-r--r--drivers/phy/mediatek/Kconfig4
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c3
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c21
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c430
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h147
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c74
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c13
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c12
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c222
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c11
-rw-r--r--drivers/pinctrl/Kconfig38
-rw-r--r--drivers/pinctrl/Makefile5
-rw-r--r--drivers/pinctrl/actions/Kconfig3
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c2
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c117
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c13
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c137
-rw-r--r--drivers/pinctrl/pinctrl-at91.c3
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c774
-rw-r--r--drivers/pinctrl/pinctrl-coh901.h6
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c178
-rw-r--r--drivers/pinctrl/pinctrl-k210.c985
-rw-r--r--drivers/pinctrl/pinctrl-single.c1
-rw-r--r--drivers/pinctrl/pinctrl-st.c1
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1111
-rw-r--r--drivers/pinctrl/pinmux.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig18
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c96
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c1624
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c1649
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c1
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c47
-rw-r--r--drivers/pinctrl/renesas/Kconfig5
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c38
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c4460
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c16
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c22
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h2
-rw-r--r--drivers/pinctrl/sirf/Makefile7
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c1137
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c6157
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c1131
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c894
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.h116
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c2
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c56
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c548
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c7
-rw-r--r--drivers/pinctrl/visconti/pinctrl-common.c23
-rw-r--r--drivers/pinctrl/zte/Kconfig14
-rw-r--r--drivers/pinctrl/zte/Makefile3
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c445
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.h102
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx296718.c1024
-rw-r--r--drivers/platform/chrome/cros_ec.c33
-rw-r--r--drivers/platform/chrome/cros_ec.h4
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c6
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c12
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c317
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c28
-rw-r--r--drivers/platform/olpc/olpc-ec.c37
-rw-r--r--drivers/platform/surface/Kconfig65
-rw-r--r--drivers/platform/surface/Makefile4
-rw-r--r--drivers/platform/surface/aggregator/Kconfig68
-rw-r--r--drivers/platform/surface/aggregator/Makefile17
-rw-r--r--drivers/platform/surface/aggregator/bus.c415
-rw-r--r--drivers/platform/surface/aggregator/bus.h27
-rw-r--r--drivers/platform/surface/aggregator/controller.c2579
-rw-r--r--drivers/platform/surface/aggregator/controller.h285
-rw-r--r--drivers/platform/surface/aggregator/core.c839
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h205
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2074
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h190
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c228
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h154
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c1263
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h143
-rw-r--r--drivers/platform/surface/aggregator/trace.h632
-rw-r--r--drivers/platform/surface/surface3-wmi.c6
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c886
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c322
-rw-r--r--drivers/platform/surface/surface_gpe.c4
-rw-r--r--drivers/platform/surface/surface_hotplug.c282
-rw-r--r--drivers/platform/x86/Kconfig207
-rw-r--r--drivers/platform/x86/Makefile19
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/acerhdf.c3
-rw-r--r--drivers/platform/x86/amd-pmc.c16
-rw-r--r--drivers/platform/x86/asus-laptop.c6
-rw-r--r--drivers/platform/x86/dell/Kconfig207
-rw-r--r--drivers/platform/x86/dell/Makefile21
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c (renamed from drivers/platform/x86/alienware-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dcdbas.c (renamed from drivers/platform/x86/dcdbas.c)0
-rw-r--r--drivers/platform/x86/dell/dcdbas.h (renamed from drivers/platform/x86/dcdbas.h)0
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c (renamed from drivers/platform/x86/dell-laptop.c)0
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.c (renamed from drivers/platform/x86/dell-rbtn.c)0
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.h (renamed from drivers/platform/x86/dell-rbtn.h)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c (renamed from drivers/platform/x86/dell-smbios-base.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c (renamed from drivers/platform/x86/dell-smbios-smm.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c (renamed from drivers/platform/x86/dell-smbios-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h (renamed from drivers/platform/x86/dell-smbios.h)0
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c (renamed from drivers/platform/x86/dell-smo8800.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-aio.c (renamed from drivers/platform/x86/dell-wmi-aio.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.c (renamed from drivers/platform/x86/dell-wmi-descriptor.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.h (renamed from drivers/platform/x86/dell-wmi-descriptor.h)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-led.c (renamed from drivers/platform/x86/dell-wmi-led.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile (renamed from drivers/platform/x86/dell-wmi-sysman/Makefile)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c (renamed from drivers/platform/x86/dell-wmi-sysman/biosattr-interface.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h (renamed from drivers/platform/x86/dell-wmi-sysman/dell-wmi-sysman.h)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/enum-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/int-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c (renamed from drivers/platform/x86/dell-wmi-sysman/passwordattr-interface.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/string-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c (renamed from drivers/platform/x86/dell-wmi-sysman/sysman.c)6
-rw-r--r--drivers/platform/x86/dell/dell-wmi.c (renamed from drivers/platform/x86/dell-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c (renamed from drivers/platform/x86/dell_rbu.c)0
-rw-r--r--drivers/platform/x86/hp-wmi.c17
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1455
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c1
-rw-r--r--drivers/platform/x86/intel-vbtn.c148
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c233
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c560
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c22
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c75
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c469
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c45
-rw-r--r--drivers/pnp/interface.c1
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c3
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c74
-rw-r--r--drivers/power/reset/atc260x-poweroff.c262
-rw-r--r--drivers/power/reset/linkstation-poweroff.c1
-rw-r--r--drivers/power/reset/zx-reboot.c86
-rw-r--r--drivers/power/supply/Kconfig27
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/acer_a500_battery.c297
-rw-r--r--drivers/power/supply/axp20x_usb_power.c2
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c6
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq256xx_charger.c1749
-rw-r--r--drivers/power/supply/bq25980_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/charger-manager.c8
-rw-r--r--drivers/power/supply/cpcap-battery.c217
-rw-r--r--drivers/power/supply/cpcap-charger.c262
-rw-r--r--drivers/power/supply/ds2760_battery.c2
-rw-r--r--drivers/power/supply/ds2780_battery.c8
-rw-r--r--drivers/power/supply/ingenic-battery.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c931
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max8903_charger.c360
-rw-r--r--drivers/power/supply/max8997_charger.c96
-rw-r--r--drivers/power/supply/power_supply_hwmon.c2
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/smb347-charger.c12
-rw-r--r--drivers/power/supply/wm97xx_battery.c45
-rw-r--r--drivers/power/supply/z2_battery.c46
-rw-r--r--drivers/powercap/Kconfig13
-rw-r--r--drivers/powercap/Makefile2
-rw-r--r--drivers/powercap/dtpm.c480
-rw-r--r--drivers/powercap/dtpm_cpu.c257
-rw-r--r--drivers/powercap/intel_rapl_common.c9
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/idt8a340_reg.h10
-rw-r--r--drivers/ptp/ptp_clockmatrix.c313
-rw-r--r--drivers/ptp/ptp_clockmatrix.h17
-rw-r--r--drivers/pwm/Kconfig10
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-iqs620a.c94
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c2
-rw-r--r--drivers/pwm/pwm-rockchip.c32
-rw-r--r--drivers/pwm/pwm-zx.c278
-rw-r--r--drivers/rapidio/rio.c2
-rw-r--r--drivers/regulator/Kconfig52
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab3100.c724
-rw-r--r--drivers/regulator/ab8500-ext.c422
-rw-r--r--drivers/regulator/ab8500.c116
-rw-r--r--drivers/regulator/atc260x-regulator.c539
-rw-r--r--drivers/regulator/axp20x-regulator.c7
-rw-r--r--drivers/regulator/bd70528-regulator.c11
-rw-r--r--drivers/regulator/bd71828-regulator.c13
-rw-r--r--drivers/regulator/bd718x7-regulator.c77
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c59
-rw-r--r--drivers/regulator/core.c62
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mt6315-regulator.c299
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c215
-rw-r--r--drivers/regulator/pca9450-regulator.c22
-rw-r--r--drivers/regulator/pf8x00-regulator.c286
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c728
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c36
-rw-r--r--drivers/regulator/rohm-regulator.c9
-rw-r--r--drivers/regulator/rt4831-regulator.c198
-rw-r--r--drivers/regulator/s5m8767.c15
-rw-r--r--drivers/remoteproc/Kconfig25
-rw-r--r--drivers/remoteproc/ingenic_rproc.c7
-rw-r--r--drivers/remoteproc/mtk_common.h7
-rw-r--r--drivers/remoteproc/mtk_scp.c82
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c63
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c1
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c23
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c4
-rw-r--r--drivers/reset/hisilicon/reset-hi3660.c9
-rw-r--r--drivers/reset/reset-k210.c131
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c17
-rw-r--r--drivers/rtc/Kconfig50
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c12
-rw-r--r--drivers/rtc/rtc-ab3100.c254
-rw-r--r--drivers/rtc/rtc-abx80x.c39
-rw-r--r--drivers/rtc/rtc-ac100.c4
-rw-r--r--drivers/rtc/rtc-armada38x.c21
-rw-r--r--drivers/rtc/rtc-asm9260.c6
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c2
-rw-r--r--drivers/rtc/rtc-cmos.c25
-rw-r--r--drivers/rtc/rtc-coh901331.c290
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c6
-rw-r--r--drivers/rtc/rtc-ds3232.c7
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c29
-rw-r--r--drivers/rtc/rtc-m48t59.c22
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c7
-rw-r--r--drivers/rtc/rtc-mcp795.c5
-rw-r--r--drivers/rtc/rtc-meson.c2
-rw-r--r--drivers/rtc/rtc-mrst.c521
-rw-r--r--drivers/rtc/rtc-mv.c14
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-mxc_v2.c7
-rw-r--r--drivers/rtc/rtc-opal.c27
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf2127.c46
-rw-r--r--drivers/rtc/rtc-pcf85063.c49
-rw-r--r--drivers/rtc/rtc-pcf85363.c10
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pl030.c4
-rw-r--r--drivers/rtc/rtc-pl031.c12
-rw-r--r--drivers/rtc/rtc-pm8xxx.c18
-rw-r--r--drivers/rtc/rtc-r7301.c5
-rw-r--r--drivers/rtc/rtc-rs5c372.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c23
-rw-r--r--drivers/rtc/rtc-rv3029c2.c22
-rw-r--r--drivers/rtc/rtc-rv3032.c13
-rw-r--r--drivers/rtc/rtc-rv8803.c13
-rw-r--r--drivers/rtc/rtc-rx6110.c4
-rw-r--r--drivers/rtc/rtc-rx8010.c21
-rw-r--r--drivers/rtc/rtc-rx8025.c5
-rw-r--r--drivers/rtc/rtc-rx8581.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-s3c.c17
-rw-r--r--drivers/rtc/rtc-s5m.c33
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-sirfsoc.c446
-rw-r--r--drivers/rtc/rtc-stm32.c4
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/rtc/rtc-tps65910.c19
-rw-r--r--drivers/rtc/rtc-tx4939.c303
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dasd_devmap.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c4
-rw-r--r--drivers/s390/char/sclp_tty.c1
-rw-r--r--drivers/s390/char/sclp_vt220.c1
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/css.c20
-rw-r--r--drivers/s390/cio/device.c39
-rw-r--r--drivers/s390/cio/qdio.h25
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c209
-rw-r--r--drivers/s390/cio/qdio_setup.c19
-rw-r--r--drivers/s390/cio/qdio_thinint.c70
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c149
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h12
-rw-r--r--drivers/s390/crypto/zcrypt_api.c14
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c15
-rw-r--r--drivers/s390/net/qeth_core.h47
-rw-r--r--drivers/s390/net/qeth_core_main.c148
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_l2_main.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c94
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aha1542.c3
-rw-r--r--drivers/scsi/fdomain_isa.c3
-rw-r--r--drivers/scsi/g_NCR5380.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c3
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd_zbc.c43
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c9
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h5
-rw-r--r--drivers/scsi/ufs/ufshcd.c1
-rw-r--r--drivers/sfi/Kconfig18
-rw-r--r--drivers/sfi/Makefile4
-rw-r--r--drivers/sfi/sfi_acpi.c214
-rw-r--r--drivers/sfi/sfi_core.c522
-rw-r--r--drivers/sfi/sfi_core.h81
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq-debugfs.c14
-rw-r--r--drivers/soc/Kconfig3
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c30
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c33
-rw-r--r--drivers/soc/atmel/soc.c240
-rw-r--r--drivers/soc/atmel/soc.h19
-rw-r--r--drivers/soc/bcm/Makefile2
-rw-r--r--drivers/soc/bcm/bcm63xx/Kconfig9
-rw-r--r--drivers/soc/bcm/bcm63xx/Makefile1
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c333
-rw-r--r--drivers/soc/bcm/brcmstb/common.c17
-rw-r--r--drivers/soc/canaan/Kconfig12
-rw-r--r--drivers/soc/canaan/Makefile3
-rw-r--r--drivers/soc/canaan/k210-sysctl.c78
-rw-r--r--drivers/soc/fsl/qe/qe_common.c20
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/soc-imx8m.c84
-rw-r--r--drivers/soc/kendryte/Kconfig14
-rw-r--r--drivers/soc/kendryte/Makefile3
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c260
-rw-r--r--drivers/soc/litex/Kconfig15
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c119
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8167-pm-domains.h86
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c32
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp.c)328
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c51
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c50
-rw-r--r--drivers/soc/qcom/ocmem.c8
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c24
-rw-r--r--drivers/soc/qcom/rpmpd.c28
-rw-r--r--drivers/soc/qcom/smem.c4
-rw-r--r--drivers/soc/qcom/socinfo.c105
-rw-r--r--drivers/soc/renesas/rcar-sysc.c37
-rw-r--r--drivers/soc/samsung/Kconfig12
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c57
-rw-r--r--drivers/soc/samsung/exynos-asv.h2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c71
-rw-r--r--drivers/soc/samsung/pm_domains.c97
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c27
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c5
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c31
-rw-r--r--drivers/soc/ti/k3-ringacc.c7
-rw-r--r--drivers/soc/ti/knav_dma.c1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/omap_prm.c11
-rw-r--r--drivers/soc/ti/pm33xx.c5
-rw-r--r--drivers/soc/ti/pruss.c91
-rw-r--r--drivers/soc/xilinx/Kconfig17
-rw-r--r--drivers/soc/xilinx/Makefile1
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c628
-rw-r--r--drivers/soc/zte/Kconfig15
-rw-r--r--drivers/soc/zte/Makefile6
-rw-r--r--drivers/soc/zte/zx296718_pm_domains.c181
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.c141
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.h44
-rw-r--r--drivers/soundwire/bus.c179
-rw-r--r--drivers/soundwire/cadence_master.c31
-rw-r--r--drivers/soundwire/intel.c8
-rw-r--r--drivers/soundwire/intel_init.c3
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/sysfs_slave.c2
-rw-r--r--drivers/spi/Kconfig34
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel-quadspi.c1
-rw-r--r--drivers/spi/spi-altera.c29
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-au1550.c53
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835.c8
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c333
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-clps711x.c2
-rw-r--r--drivers/spi/spi-dw-bt1.c2
-rw-r--r--drivers/spi/spi-efm32.c462
-rw-r--r--drivers/spi/spi-fsl-spi.c7
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c33
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-mem.c23
-rw-r--r--drivers/spi/spi-mpc52xx.c16
-rw-r--r--drivers/spi/spi-mt65xx.c72
-rw-r--r--drivers/spi/spi-orion.c55
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c29
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-qcom-qspi.c3
-rw-r--r--drivers/spi/spi-realtek-rtl.c209
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/spi/spi-rpc-if.c13
-rw-r--r--drivers/spi/spi-sh-msiof.c14
-rw-r--r--drivers/spi/spi-sirf.c1236
-rw-r--r--drivers/spi/spi-stm32.c150
-rw-r--r--drivers/spi/spi-synquacer.c4
-rw-r--r--drivers/spi/spi-tegra210-quad.c1410
-rw-r--r--drivers/spi/spi-txx9.c477
-rw-r--r--drivers/spi/spi.c72
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c5
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/board/Kconfig9
-rw-r--r--drivers/staging/clocking-wizard/TODO3
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c284
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c289
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c3
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h2
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c115
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c4
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c149
-rw-r--r--drivers/staging/fwserial/fwserial.c2
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c42
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c10
-rw-r--r--drivers/staging/greybus/audio_helper.c2
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c4
-rw-r--r--drivers/staging/greybus/audio_module.c2
-rw-r--r--drivers/staging/greybus/audio_topology.c6
-rw-r--r--drivers/staging/greybus/hid.c6
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/power_supply.c2
-rw-r--r--drivers/staging/greybus/spilib.c4
-rw-r--r--drivers/staging/hikey9xx/Kconfig2
-rw-r--r--drivers/staging/hikey9xx/hi6421-spmi-pmic.c331
-rw-r--r--drivers/staging/hikey9xx/hi6421v600-regulator.c533
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml108
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml19
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.c81
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.yaml1
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/allegro-dvt/Kconfig16
-rw-r--r--drivers/staging/media/allegro-dvt/Makefile5
-rw-r--r--drivers/staging/media/allegro-dvt/TODO4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c44
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c1
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c6
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c4
-rw-r--r--drivers/staging/media/imx/Kconfig9
-rw-r--r--drivers/staging/media/imx/Makefile2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c10
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c14
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c7
-rw-r--r--drivers/staging/media/imx/imx-media-of.c2
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c127
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c43
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c15
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h1
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c49
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/staging/media/tegra-video/csi.c35
-rw-r--r--drivers/staging/media/tegra-video/csi.h14
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c340
-rw-r--r--drivers/staging/media/tegra-video/vi.c348
-rw-r--r--drivers/staging/media/tegra-video/vi.h23
-rw-r--r--drivers/staging/media/tegra-video/video.c18
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c2
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/sound.c8
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/mt7621-dma/Makefile2
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c (renamed from drivers/staging/mt7621-dma/mtk-hsdma.c)6
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi58
-rw-r--r--drivers/staging/nvec/nvec_power.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c4
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/qlge/Kconfig1
-rw-r--r--drivers/staging/qlge/Makefile2
-rw-r--r--drivers/staging/qlge/TODO10
-rw-r--r--drivers/staging/qlge/qlge.h244
-rw-r--r--drivers/staging/qlge/qlge_dbg.c1650
-rw-r--r--drivers/staging/qlge/qlge_devlink.c163
-rw-r--r--drivers/staging/qlge/qlge_devlink.h9
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c239
-rw-r--r--drivers/staging/qlge/qlge_main.c1380
-rw-r--r--drivers/staging/qlge/qlge_mpi.c356
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c44
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h65
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c28
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c24
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c225
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h8
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h79
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h13
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h6
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c12
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c90
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/TODO2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c6
-rw-r--r--drivers/staging/vc04_services/interface/TODO4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c22
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c3
-rw-r--r--drivers/staging/vme/devices/vme_user.c16
-rw-r--r--drivers/staging/vt6655/baseband.c4
-rw-r--r--drivers/staging/vt6655/rxtx.h8
-rw-r--r--drivers/staging/vt6656/rf.c2
-rw-r--r--drivers/staging/vt6656/rxtx.h6
-rw-r--r--drivers/staging/wfx/bh.c1
-rw-r--r--drivers/staging/wfx/bh.h4
-rw-r--r--drivers/staging/wfx/bus.h3
-rw-r--r--drivers/staging/wfx/bus_sdio.c6
-rw-r--r--drivers/staging/wfx/bus_spi.c7
-rw-r--r--drivers/staging/wfx/data_rx.c5
-rw-r--r--drivers/staging/wfx/data_tx.c15
-rw-r--r--drivers/staging/wfx/data_tx.h4
-rw-r--r--drivers/staging/wfx/debug.c6
-rw-r--r--drivers/staging/wfx/fwio.c2
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h6
-rw-r--r--drivers/staging/wfx/hif_api_general.h9
-rw-r--r--drivers/staging/wfx/hif_tx.c4
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c5
-rw-r--r--drivers/staging/wfx/hwio.c3
-rw-r--r--drivers/staging/wfx/hwio.h2
-rw-r--r--drivers/staging/wfx/key.c2
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c7
-rw-r--r--drivers/staging/wfx/main.h3
-rw-r--r--drivers/staging/wfx/queue.c4
-rw-r--r--drivers/staging/wfx/queue.h3
-rw-r--r--drivers/staging/wfx/scan.h2
-rw-r--r--drivers/staging/wfx/sta.c6
-rw-r--r--drivers/staging/wfx/sta.h2
-rw-r--r--drivers/staging/wfx/traces.h3
-rw-r--r--drivers/staging/wfx/wfx.h3
-rw-r--r--drivers/staging/wimax/i2400m/fw.c17
-rw-r--r--drivers/staging/wimax/i2400m/netdev.c6
-rw-r--r--drivers/staging/wimax/i2400m/rx.c7
-rw-r--r--drivers/staging/wimax/i2400m/tx.c8
-rw-r--r--drivers/staging/wimax/i2400m/usb.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c28
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c3
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/target/target_core_pscsi.c5
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/tee/optee/call.c3
-rw-r--r--drivers/tee/optee/optee_msg.h158
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h103
-rw-r--r--drivers/tee/optee/optee_smc.h72
-rw-r--r--drivers/tee/optee/rpc.c70
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/cpufreq_cooling.c71
-rw-r--r--drivers/thermal/da9062-thermal.c4
-rw-r--r--drivers/thermal/gov_power_allocator.c37
-rw-r--r--drivers/thermal/gov_step_wise.c14
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c6
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c6
-rw-r--r--drivers/thermal/intel/therm_throt.c718
-rw-r--r--drivers/thermal/intel/thermal_interrupt.h15
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c4
-rw-r--r--drivers/thermal/khadas_mcu_fan.c1
-rw-r--r--drivers/thermal/qcom/Kconfig11
-rw-r--r--drivers/thermal/qcom/Makefile1
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c623
-rw-r--r--drivers/thermal/tango_thermal.c126
-rw-r--r--drivers/thermal/thermal_core.c72
-rw-r--r--drivers/thermal/thermal_core.h7
-rw-r--r--drivers/thermal/thermal_helpers.c7
-rw-r--r--drivers/thermal/thermal_sysfs.c85
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c7
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c54
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c6
-rw-r--r--drivers/thermal/zx2967_thermal.c256
-rw-r--r--drivers/thunderbolt/acpi.c67
-rw-r--r--drivers/thunderbolt/cap.c2
-rw-r--r--drivers/thunderbolt/ctl.c51
-rw-r--r--drivers/thunderbolt/dma_port.c2
-rw-r--r--drivers/thunderbolt/dma_test.c5
-rw-r--r--drivers/thunderbolt/domain.c48
-rw-r--r--drivers/thunderbolt/eeprom.c33
-rw-r--r--drivers/thunderbolt/icm.c12
-rw-r--r--drivers/thunderbolt/lc.c35
-rw-r--r--drivers/thunderbolt/nhi.c39
-rw-r--r--drivers/thunderbolt/path.c2
-rw-r--r--drivers/thunderbolt/switch.c82
-rw-r--r--drivers/thunderbolt/tb.c54
-rw-r--r--drivers/thunderbolt/tb.h22
-rw-r--r--drivers/thunderbolt/tb_regs.h1
-rw-r--r--drivers/thunderbolt/tunnel.c12
-rw-r--r--drivers/thunderbolt/usb4.c11
-rw-r--r--drivers/thunderbolt/xdomain.c15
-rw-r--r--drivers/tty/Kconfig14
-rw-r--r--drivers/tty/Makefile5
-rw-r--r--drivers/tty/amiserial.c3
-rw-r--r--drivers/tty/hvc/hvcs.c5
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/mxser.c1
-rw-r--r--drivers/tty/n_gsm.c3
-rw-r--r--drivers/tty/n_hdlc.c60
-rw-r--r--drivers/tty/n_null.c3
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tracerouter.c233
-rw-r--r--drivers/tty/n_tracesink.c228
-rw-r--r--drivers/tty/n_tracesink.h26
-rw-r--r--drivers/tty/n_tty.c160
-rw-r--r--drivers/tty/pty.c16
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c11
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile3
-rw-r--r--drivers/tty/serial/amba-pl010.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c3
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c43
-rw-r--r--drivers/tty/serial/efm32-uart.c852
-rw-r--r--drivers/tty/serial/fsl_lpuart.c4
-rw-r--r--drivers/tty/serial/icom.c4
-rw-r--r--drivers/tty/serial/ifx6x60.c1390
-rw-r--r--drivers/tty/serial/ifx6x60.h118
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/max3100.c3
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/mxs-auart.c45
-rw-r--r--drivers/tty/serial/owl-uart.c38
-rw-r--r--drivers/tty/serial/serial_core.c11
-rw-r--r--drivers/tty/serial/sifive.c1
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c1503
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h447
-rw-r--r--drivers/tty/serial/stm32-usart.c490
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/synclink_gt.c1
-rw-r--r--drivers/tty/tty_io.c278
-rw-r--r--drivers/tty/ttynull.c18
-rw-r--r--drivers/tty/vcc.c10
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped82
-rw-r--r--drivers/tty/vt/keyboard.c18
-rw-r--r--drivers/tty/vt/vt.c42
-rw-r--r--drivers/tty/vt/vt_ioctl.c154
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c12
-rw-r--r--drivers/usb/cdns3/Kconfig60
-rw-r--r--drivers/usb/cdns3/Makefile43
-rw-r--r--drivers/usb/cdns3/cdns3-debug.h (renamed from drivers/usb/cdns3/debug.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-ep0.c (renamed from drivers/usb/cdns3/ep0.c)8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c (renamed from drivers/usb/cdns3/gadget.c)34
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.h (renamed from drivers/usb/cdns3/gadget.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c24
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c315
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c1
-rw-r--r--drivers/usb/cdns3/cdns3-trace.c (renamed from drivers/usb/cdns3/trace.c)2
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h (renamed from drivers/usb/cdns3/trace.h)6
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h583
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c489
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2009
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h1601
-rw-r--r--drivers/usb/cdns3/cdnsp-mem.c1336
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c254
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c2438
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h830
-rw-r--r--drivers/usb/cdns3/core.c455
-rw-r--r--drivers/usb/cdns3/core.h65
-rw-r--r--drivers/usb/cdns3/drd.c224
-rw-r--r--drivers/usb/cdns3/drd.h94
-rw-r--r--drivers/usb/cdns3/gadget-export.h22
-rw-r--r--drivers/usb/cdns3/host-export.h18
-rw-r--r--drivers/usb/cdns3/host.c26
-rw-r--r--drivers/usb/chipidea/Kconfig3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c344
-rw-r--r--drivers/usb/chipidea/core.c10
-rw-r--r--drivers/usb/chipidea/host.c104
-rw-r--r--drivers/usb/class/cdc-acm.c10
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c40
-rw-r--r--drivers/usb/class/usbtmc.c85
-rw-r--r--drivers/usb/common/common.c26
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/gadget.c8
-rw-r--r--drivers/usb/dwc2/hcd.c15
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc2/params.c8
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc3/Kconfig10
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h12
-rw-r--r--drivers/usb/dwc3/drd.c25
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c8
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c363
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c9
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c69
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c71
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/gadget.c261
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c114
-rw-r--r--drivers/usb/gadget/configfs.c43
-rw-r--r--drivers/usb/gadget/function/f_midi.c12
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_audio.c135
-rw-r--r--drivers/usb/gadget/function/u_ether.c42
-rw-r--r--drivers/usb/gadget/function/u_ether.h12
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h15
-rw-r--r--drivers/usb/gadget/function/u_serial.c8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig13
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c4
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig11
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h134
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h21
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c128
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c55
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c45
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c30
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c10
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-hub.c3
-rw-r--r--drivers/usb/host/ehci-tegra.c604
-rw-r--r--drivers/usb/host/ohci-sa1111.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.c3
-rw-r--r--drivers/usb/host/xhci-mem.c21
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c130
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-mtk.h15
-rw-r--r--drivers/usb/host/xhci-mvebu.c42
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c1144
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c126
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/misc/usb251xb.c12
-rw-r--r--drivers/usb/misc/usb3503.c9
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/musb/jz4740.c18
-rw-r--r--drivers/usb/musb/musb_core.c31
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c103
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/serial/Kconfig9
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ark3116.c11
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/bus.c27
-rw-r--r--drivers/usb/serial/ch341.c4
-rw-r--r--drivers/usb/serial/cp210x.c219
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/f81232.c12
-rw-r--r--drivers/usb/serial/f81534.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_ti.c12
-rw-r--r--drivers/usb/serial/iuu_phoenix.c24
-rw-r--r--drivers/usb/serial/keyspan.c6
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kobil_sct.c6
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/metro-usb.c4
-rw-r--r--drivers/usb/serial/mos7720.c12
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/mxuport.c7
-rw-r--r--drivers/usb/serial/omninet.c6
-rw-r--r--drivers/usb/serial/opticon.c4
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/serial/oti6858.c6
-rw-r--r--drivers/usb/serial/pl2303.c12
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c4
-rw-r--r--drivers/usb/serial/ssu100.c4
-rw-r--r--drivers/usb/serial/symbolserial.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c9
-rw-r--r--drivers/usb/serial/usb-wwan.h2
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/xr_serial.c611
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c17
-rw-r--r--drivers/usb/typec/class.c106
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h6
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c35
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1173
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/displayport.c32
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c56
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/usbip/stub_main.c4
-rw-r--r--drivers/usb/usbip/usbip_common.h29
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/usb/usbip/vhci_rx.c2
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
-rw-r--r--drivers/vdpa/Kconfig1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h1
-rw-r--r--drivers/vdpa/mlx5/core/mr.c28
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c22
-rw-r--r--drivers/vdpa/vdpa.c503
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c98
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/vfio_pci.c12
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h2
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c24
-rw-r--r--drivers/vfio/platform/vfio_amba.c15
-rw-r--r--drivers/vfio/vfio.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c564
-rw-r--r--drivers/vhost/net.c36
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/vhost/vsock.c68
-rw-r--r--drivers/video/backlight/ktd253-backlight.c12
-rw-r--r--drivers/video/backlight/lms283gf05.c43
-rw-r--r--drivers/video/backlight/locomolcd.c3
-rw-r--r--drivers/video/backlight/qcom-wled.c2
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/video/console/vgacon.c19
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/acornfb.c34
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c20
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c15
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c4
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/cg6.c2
-rw-r--r--drivers/video/fbdev/cirrusfb.c20
-rw-r--r--drivers/video/fbdev/controlfb.c4
-rw-r--r--drivers/video/fbdev/core/fb_notify.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c25
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c4
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c10
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c13
-rw-r--r--drivers/video/fbdev/neofb.c4
-rw-r--r--drivers/video/fbdev/nvidia/nv_setup.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c4
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c8
-rw-r--r--drivers/video/fbdev/riva/fbdev.c9
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c28
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c3
-rw-r--r--drivers/video/fbdev/s3c-fb.c11
-rw-r--r--drivers/video/fbdev/sis/init.c33
-rw-r--r--drivers/video/fbdev/sis/oem310.h2
-rw-r--r--drivers/video/fbdev/sis/sis.h1
-rw-r--r--drivers/video/fbdev/sis/sis_main.c9
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c7
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/via/lcd.c4
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/video/of_display_timing.c1
-rw-r--r--drivers/video/of_videomode.c6
-rw-r--r--drivers/virt/Kconfig2
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/acrn/Kconfig15
-rw-r--r--drivers/virt/acrn/Makefile3
-rw-r--r--drivers/virt/acrn/acrn_drv.h227
-rw-r--r--drivers/virt/acrn/hsm.c470
-rw-r--r--drivers/virt/acrn/hypercall.h254
-rw-r--r--drivers/virt/acrn/ioeventfd.c273
-rw-r--r--drivers/virt/acrn/ioreq.c657
-rw-r--r--drivers/virt/acrn/irqfd.c235
-rw-r--r--drivers/virt/acrn/mm.c306
-rw-r--r--drivers/virt/acrn/vm.c126
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c18
-rw-r--r--drivers/virtio/Kconfig9
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_input.c26
-rw-r--r--drivers/virtio/virtio_mem.c45
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.h22
-rw-r--r--drivers/virtio/virtio_pci_modern.c506
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c599
-rw-r--r--drivers/virtio/virtio_vdpa.c3
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/w1/masters/ds2490.c25
-rw-r--r--drivers/w1/slaves/w1_therm.c22
-rw-r--r--drivers/w1/w1.c39
-rw-r--r--drivers/watchdog/Kconfig74
-rw-r--r--drivers/watchdog/Makefile7
-rw-r--r--drivers/watchdog/atlas7_wdt.c221
-rw-r--r--drivers/watchdog/coh901327_wdt.c408
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/intel-mid_wdt.c8
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c533
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h50
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/keembay_wdt.c286
-rw-r--r--drivers/watchdog/mei_wdt.c5
-rw-r--r--drivers/watchdog/mtk_wdt.c23
-rw-r--r--drivers/watchdog/pcwd.c7
-rw-r--r--drivers/watchdog/qcom-wdt.c13
-rw-r--r--drivers/watchdog/renesas_wdt.c33
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c216
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/tangox_wdt.c209
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c1
-rw-r--r--drivers/watchdog/zx2967_wdt.c279
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c78
-rw-r--r--drivers/xen/evtchn.c29
-rw-r--r--drivers/xen/gntdev.c37
-rw-r--r--drivers/xen/platform-pci.c8
-rw-r--r--drivers/xen/privcmd.c25
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--drivers/xen/xen-acpi-processor.c3
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c11
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c176
5372 files changed, 306183 insertions, 165896 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dcecc9f6e33f..62c753a73651 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -6,6 +6,7 @@ menu "Device Drivers"
source "drivers/amba/Kconfig"
source "drivers/eisa/Kconfig"
source "drivers/pci/Kconfig"
+source "drivers/cxl/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/rapidio/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index fd11b9ac4cc3..6fba7daba591 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -27,7 +27,7 @@ obj-y += idle/
obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
-obj-$(CONFIG_SFI) += sfi/
+
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
@@ -73,6 +73,7 @@ obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
obj-$(CONFIG_DAX) += dax/
+obj-$(CONFIG_CXL_BUS) += cxl/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c
index 403b01d66367..53580bdc5baa 100644
--- a/drivers/accessibility/speakup/serialio.c
+++ b/drivers/accessibility/speakup/serialio.c
@@ -27,11 +27,11 @@ static const struct old_serial_port *serstate;
static int timeouts;
static int spk_serial_out(struct spk_synth *in_synth, const char ch);
-static void spk_serial_send_xchar(char ch);
-static void spk_serial_tiocmset(unsigned int set, unsigned int clear);
-static unsigned char spk_serial_in(void);
-static unsigned char spk_serial_in_nowait(void);
-static void spk_serial_flush_buffer(void);
+static void spk_serial_send_xchar(struct spk_synth *in_synth, char ch);
+static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
+static unsigned char spk_serial_in(struct spk_synth *in_synth);
+static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth);
+static void spk_serial_flush_buffer(struct spk_synth *in_synth);
static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_serial_io_ops = {
@@ -150,7 +150,7 @@ static void start_serial_interrupt(int irq)
outb(1, speakup_info.port_tts + UART_FCR); /* Turn FIFO On */
}
-static void spk_serial_send_xchar(char ch)
+static void spk_serial_send_xchar(struct spk_synth *synth, char ch)
{
int timeout = SPK_XMITR_TIMEOUT;
@@ -162,7 +162,7 @@ static void spk_serial_send_xchar(char ch)
outb(ch, speakup_info.port_tts);
}
-static void spk_serial_tiocmset(unsigned int set, unsigned int clear)
+static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
int old = inb(speakup_info.port_tts + UART_MCR);
@@ -251,7 +251,7 @@ static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth)
return 1;
}
-static unsigned char spk_serial_in(void)
+static unsigned char spk_serial_in(struct spk_synth *in_synth)
{
int tmout = SPK_SERIAL_TIMEOUT;
@@ -265,7 +265,7 @@ static unsigned char spk_serial_in(void)
return inb_p(speakup_info.port_tts + UART_RX);
}
-static unsigned char spk_serial_in_nowait(void)
+static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth)
{
unsigned char lsr;
@@ -275,7 +275,7 @@ static unsigned char spk_serial_in_nowait(void)
return inb_p(speakup_info.port_tts + UART_RX);
}
-static void spk_serial_flush_buffer(void)
+static void spk_serial_flush_buffer(struct spk_synth *in_synth)
{
/* TODO: flush the UART 16550 buffer */
}
@@ -307,7 +307,7 @@ const char *spk_serial_synth_immediate(struct spk_synth *synth,
}
EXPORT_SYMBOL_GPL(spk_serial_synth_immediate);
-void spk_serial_release(void)
+void spk_serial_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts == 0)
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index c94328a5bd4a..c1ec087dca13 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -25,7 +25,7 @@
#define PROCSPEECH '\r'
static int synth_probe(struct spk_synth *synth);
-static void accent_release(void);
+static void accent_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -294,7 +294,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void accent_release(void)
+static void accent_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
index 0877b4044c28..cd63581b2e99 100644
--- a/drivers/accessibility/speakup/speakup_apollo.c
+++ b/drivers/accessibility/speakup/speakup_apollo.c
@@ -163,8 +163,8 @@ static void do_catch_up(struct spk_synth *synth)
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (!synth->io_ops->synth_out(synth, ch)) {
- synth->io_ops->tiocmset(0, UART_MCR_RTS);
- synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+ synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c
index e6a6a9665d8f..e89fd72579e6 100644
--- a/drivers/accessibility/speakup/speakup_audptr.c
+++ b/drivers/accessibility/speakup/speakup_audptr.c
@@ -119,8 +119,8 @@ static struct spk_synth synth_audptr = {
static void synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
- synth->io_ops->send_xchar(SYNTH_CLEAR);
+ synth->io_ops->flush_buffer(synth);
+ synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
synth->io_ops->synth_out(synth, PROCSPEECH);
}
@@ -130,11 +130,11 @@ static void synth_version(struct spk_synth *synth)
char synth_id[40] = "";
synth->synth_immediate(synth, "\x05[Q]");
- synth_id[test] = synth->io_ops->synth_in();
+ synth_id[test] = synth->io_ops->synth_in(synth);
if (synth_id[test] == 'A') {
do {
/* read version string from synth */
- synth_id[++test] = synth->io_ops->synth_in();
+ synth_id[++test] = synth->io_ops->synth_in(synth);
} while (synth_id[test] != '\n' && test < 32);
synth_id[++test] = 0x00;
}
diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
index 7408eb29cf38..092cfd08a9e1 100644
--- a/drivers/accessibility/speakup/speakup_decext.c
+++ b/drivers/accessibility/speakup/speakup_decext.c
@@ -218,7 +218,7 @@ static void do_catch_up(struct spk_synth *synth)
static void synth_flush(struct spk_synth *synth)
{
in_escape = 0;
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->synth_immediate(synth, "\033P;10z\033\\");
}
diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
index 96f24c848cc5..dec314dee214 100644
--- a/drivers/accessibility/speakup/speakup_decpc.c
+++ b/drivers/accessibility/speakup/speakup_decpc.c
@@ -125,7 +125,7 @@ enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC };
#define SYNTH_IO_EXTENT 8
static int synth_probe(struct spk_synth *synth);
-static void dtpc_release(void);
+static void dtpc_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -474,7 +474,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void dtpc_release(void)
+static void dtpc_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index ab6d61e80b1c..580ec796816b 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -78,6 +78,8 @@ static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute flush_time_attribute =
+ __ATTR(flush_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
@@ -99,6 +101,7 @@ static struct attribute *synth_attrs[] = {
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
+ &flush_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
@@ -118,6 +121,7 @@ static struct spk_synth synth_dectlk = {
.trigger = 50,
.jiffies = 50,
.full = 40000,
+ .flush_time = 4000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
@@ -200,18 +204,23 @@ static void do_catch_up(struct spk_synth *synth)
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
- unsigned long timeout = msecs_to_jiffies(4000);
+ unsigned long timeout;
DEFINE_WAIT(wait);
struct var_t *jiffy_delta;
struct var_t *delay_time;
+ struct var_t *flush_time;
int jiffy_delta_val;
int delay_time_val;
+ int timeout_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
+ flush_time = spk_get_var(FLUSH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
+ timeout_val = flush_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ timeout = msecs_to_jiffies(timeout_val);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
@@ -289,7 +298,7 @@ static void synth_flush(struct spk_synth *synth)
synth->io_ops->synth_out(synth, ']');
in_escape = 0;
is_flushing = 1;
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, SYNTH_CLEAR);
}
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index dbebed0eeeec..92838d3ae9eb 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -24,7 +24,7 @@
#define PROCSPEECH 0x00
static int synth_probe(struct spk_synth *synth);
-static void dtlk_release(void);
+static void dtlk_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -365,7 +365,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void dtlk_release(void)
+static void dtlk_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 414827e888fc..311f4aa0be22 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -24,7 +24,7 @@
#define SYNTH_CLEAR 0x03
static int synth_probe(struct spk_synth *synth);
-static void keynote_release(void);
+static void keynote_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -295,7 +295,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void keynote_release(void)
+static void keynote_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (synth_port)
diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c
index 3c59519a871f..3e59b387d0c4 100644
--- a/drivers/accessibility/speakup/speakup_ltlk.c
+++ b/drivers/accessibility/speakup/speakup_ltlk.c
@@ -132,7 +132,7 @@ static void synth_interrogate(struct spk_synth *synth)
synth->synth_immediate(synth, "\x18\x01?");
for (i = 0; i < 50; i++) {
- buf[i] = synth->io_ops->synth_in();
+ buf[i] = synth->io_ops->synth_in(synth);
if (i > 2 && buf[i] == 0x7f)
break;
}
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 9a7029539f35..c3f97c572fb6 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -24,7 +24,7 @@
#define CLEAR_SYNTH 0x18
static int softsynth_probe(struct spk_synth *synth);
-static void softsynth_release(void);
+static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth);
static unsigned char get_index(struct spk_synth *synth);
@@ -402,7 +402,7 @@ static int softsynth_probe(struct spk_synth *synth)
return 0;
}
-static void softsynth_release(void)
+static void softsynth_release(struct spk_synth *synth)
{
misc_deregister(&synth_device);
misc_deregister(&synthu_device);
diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c
index 6e933bf1de2e..bd3d8dc300ff 100644
--- a/drivers/accessibility/speakup/speakup_spkout.c
+++ b/drivers/accessibility/speakup/speakup_spkout.c
@@ -117,8 +117,8 @@ static struct spk_synth synth_spkout = {
static void synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
- synth->io_ops->send_xchar(SYNTH_CLEAR);
+ synth->io_ops->flush_buffer(synth);
+ synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
}
module_param_named(ser, synth_spkout.ser, int, 0444);
diff --git a/drivers/accessibility/speakup/spk_priv.h b/drivers/accessibility/speakup/spk_priv.h
index 0f4bcbe5ddb9..9da57ead17cb 100644
--- a/drivers/accessibility/speakup/spk_priv.h
+++ b/drivers/accessibility/speakup/spk_priv.h
@@ -34,8 +34,8 @@
const struct old_serial_port *spk_serial_init(int index);
void spk_stop_serial_interrupt(void);
-void spk_serial_release(void);
-void spk_ttyio_release(void);
+void spk_serial_release(struct spk_synth *synth);
+void spk_ttyio_release(struct spk_synth *synth);
void spk_ttyio_register_ldisc(void);
void spk_ttyio_unregister_ldisc(void);
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 6284aff434a1..9af1d4c124d3 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -12,14 +12,15 @@ struct spk_ldisc_data {
char buf;
struct completion completion;
bool buf_free;
+ struct spk_synth *synth;
};
-static struct spk_synth *spk_ttyio_synth;
-static struct tty_struct *speakup_tty;
-/* mutex to protect against speakup_tty disappearing from underneath us while
- * we are using it. this can happen when the device physically unplugged,
- * while in use. it also serialises access to speakup_tty.
+/*
+ * This allows to catch within spk_ttyio_ldisc_open whether it is getting set
+ * on for a speakup-driven device.
*/
+static struct tty_struct *speakup_tty;
+/* This mutex serializes the use of such global speakup_tty variable */
static DEFINE_MUTEX(speakup_tty_mutex);
static int ser_to_dev(int ser, dev_t *dev_no)
@@ -67,22 +68,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
static void spk_ttyio_ldisc_close(struct tty_struct *tty)
{
- mutex_lock(&speakup_tty_mutex);
- kfree(speakup_tty->disc_data);
- speakup_tty = NULL;
- mutex_unlock(&speakup_tty_mutex);
+ kfree(tty->disc_data);
}
static int spk_ttyio_receive_buf2(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
+ struct spk_synth *synth = ldisc_data->synth;
- if (spk_ttyio_synth->read_buff_add) {
+ if (synth->read_buff_add) {
int i;
for (i = 0; i < count; i++)
- spk_ttyio_synth->read_buff_add(cp[i]);
+ synth->read_buff_add(cp[i]);
return count;
}
@@ -114,11 +113,11 @@ static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch);
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch);
-static void spk_ttyio_send_xchar(char ch);
-static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear);
-static unsigned char spk_ttyio_in(void);
-static unsigned char spk_ttyio_in_nowait(void);
-static void spk_ttyio_flush_buffer(void);
+static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch);
+static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
+static unsigned char spk_ttyio_in(struct spk_synth *in_synth);
+static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth);
+static void spk_ttyio_flush_buffer(struct spk_synth *in_synth);
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
@@ -152,7 +151,7 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
if (ret)
return ret;
- tty = tty_kopen(dev);
+ tty = tty_kopen_exclusive(dev);
if (IS_ERR(tty))
return PTR_ERR(tty);
@@ -187,13 +186,17 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
mutex_lock(&speakup_tty_mutex);
speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
- if (ret)
- speakup_tty = NULL;
+ speakup_tty = NULL;
mutex_unlock(&speakup_tty_mutex);
- if (!ret)
+ if (!ret) {
/* Success */
+ struct spk_ldisc_data *ldisc_data = tty->disc_data;
+
+ ldisc_data->synth = synth;
+ synth->dev = tty;
return 0;
+ }
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
@@ -221,29 +224,30 @@ void spk_ttyio_unregister_ldisc(void)
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
- mutex_lock(&speakup_tty_mutex);
- if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
- int ret = speakup_tty->ops->write(speakup_tty, &ch, 1);
-
- mutex_unlock(&speakup_tty_mutex);
- if (ret == 0)
- /* No room */
- return 0;
- if (ret < 0) {
- pr_warn("%s: I/O error, deactivating speakup\n",
- in_synth->long_name);
- /* No synth any more, so nobody will restart TTYs,
- * and we thus need to do it ourselves. Now that there
- * is no synth we can let application flood anyway
- */
- in_synth->alive = 0;
- speakup_start_ttys();
- return 0;
- }
+ struct tty_struct *tty = in_synth->dev;
+ int ret;
+
+ if (!in_synth->alive || !tty->ops->write)
+ return 0;
+
+ ret = tty->ops->write(tty, &ch, 1);
+
+ if (ret == 0)
+ /* No room */
+ return 0;
+
+ if (ret > 0)
+ /* Success */
return 1;
- }
- mutex_unlock(&speakup_tty_mutex);
+ pr_warn("%s: I/O error, deactivating speakup\n",
+ in_synth->long_name);
+ /* No synth any more, so nobody will restart TTYs,
+ * and we thus need to do it ourselves. Now that there
+ * is no synth we can let application flood anyway
+ */
+ in_synth->alive = 0;
+ speakup_start_ttys();
return 0;
}
@@ -264,47 +268,20 @@ static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch)
return ret;
}
-static int check_tty(struct tty_struct *tty)
-{
- if (!tty) {
- pr_warn("%s: I/O error, deactivating speakup\n",
- spk_ttyio_synth->long_name);
- /* No synth any more, so nobody will restart TTYs, and we thus
- * need to do it ourselves. Now that there is no synth we can
- * let application flood anyway
- */
- spk_ttyio_synth->alive = 0;
- speakup_start_ttys();
- return 1;
- }
-
- return 0;
-}
-
-static void spk_ttyio_send_xchar(char ch)
+static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->send_xchar)
- speakup_tty->ops->send_xchar(speakup_tty, ch);
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->send_xchar)
+ tty->ops->send_xchar(tty, ch);
}
-static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
+static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->tiocmset)
- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->tiocmset)
+ tty->ops->tiocmset(tty, set, clear);
}
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
@@ -312,9 +289,10 @@ static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
return 1;
}
-static unsigned char ttyio_in(int timeout)
+static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
{
- struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
+ struct tty_struct *tty = in_synth->dev;
+ struct spk_ldisc_data *ldisc_data = tty->disc_data;
char rv;
if (!timeout) {
@@ -334,35 +312,29 @@ static unsigned char ttyio_in(int timeout)
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
- tty_schedule_flip(speakup_tty->port);
+ tty_schedule_flip(tty->port);
return rv;
}
-static unsigned char spk_ttyio_in(void)
+static unsigned char spk_ttyio_in(struct spk_synth *in_synth)
{
- return ttyio_in(SPK_SYNTH_TIMEOUT);
+ return ttyio_in(in_synth, SPK_SYNTH_TIMEOUT);
}
-static unsigned char spk_ttyio_in_nowait(void)
+static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth)
{
- u8 rv = ttyio_in(0);
+ u8 rv = ttyio_in(in_synth, 0);
return (rv == 0xff) ? 0 : rv;
}
-static void spk_ttyio_flush_buffer(void)
+static void spk_ttyio_flush_buffer(struct spk_synth *in_synth)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->flush_buffer)
- speakup_tty->ops->flush_buffer(speakup_tty);
-
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->flush_buffer)
+ tty->ops->flush_buffer(tty);
}
int spk_ttyio_synth_probe(struct spk_synth *synth)
@@ -373,37 +345,38 @@ int spk_ttyio_synth_probe(struct spk_synth *synth)
return rv;
synth->alive = 1;
- spk_ttyio_synth = synth;
return 0;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe);
-void spk_ttyio_release(void)
+void spk_ttyio_release(struct spk_synth *in_synth)
{
- if (!speakup_tty)
- return;
+ struct tty_struct *tty = in_synth->dev;
- tty_lock(speakup_tty);
+ tty_lock(tty);
- if (speakup_tty->ops->close)
- speakup_tty->ops->close(speakup_tty, NULL);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+
+ tty_ldisc_flush(tty);
+ tty_unlock(tty);
+ tty_kclose(tty);
- tty_ldisc_flush(speakup_tty);
- tty_unlock(speakup_tty);
- tty_kclose(speakup_tty);
+ in_synth->dev = NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
-const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff)
+const char *spk_ttyio_synth_immediate(struct spk_synth *in_synth, const char *buff)
{
+ struct tty_struct *tty = in_synth->dev;
u_char ch;
while ((ch = *buff)) {
if (ch == '\n')
- ch = synth->procspeech;
- if (tty_write_room(speakup_tty) < 1 ||
- !synth->io_ops->synth_out(synth, ch))
+ ch = in_synth->procspeech;
+ if (tty_write_room(tty) < 1 ||
+ !in_synth->io_ops->synth_out(in_synth, ch))
return buff;
buff++;
}
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index 91fca3033a45..6a96ad94bc3f 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -48,7 +48,7 @@ enum var_id_t {
ATTRIB_BLEEP, BLEEPS,
RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
DIRECT, PAUSE,
- CAPS_START, CAPS_STOP, CHARTAB, INFLECTION,
+ CAPS_START, CAPS_STOP, CHARTAB, INFLECTION, FLUSH,
MAXVARS
};
@@ -157,11 +157,11 @@ struct spk_synth;
struct spk_io_ops {
int (*synth_out)(struct spk_synth *synth, const char ch);
int (*synth_out_unicode)(struct spk_synth *synth, u16 ch);
- void (*send_xchar)(char ch);
- void (*tiocmset)(unsigned int set, unsigned int clear);
- unsigned char (*synth_in)(void);
- unsigned char (*synth_in_nowait)(void);
- void (*flush_buffer)(void);
+ void (*send_xchar)(struct spk_synth *synth, char ch);
+ void (*tiocmset)(struct spk_synth *synth, unsigned int set, unsigned int clear);
+ unsigned char (*synth_in)(struct spk_synth *synth);
+ unsigned char (*synth_in_nowait)(struct spk_synth *synth);
+ void (*flush_buffer)(struct spk_synth *synth);
int (*wait_for_xmitr)(struct spk_synth *synth);
};
@@ -178,6 +178,7 @@ struct spk_synth {
int trigger;
int jiffies;
int full;
+ int flush_time;
int ser;
char *dev_name;
short flags;
@@ -188,7 +189,7 @@ struct spk_synth {
int *default_vol;
struct spk_io_ops *io_ops;
int (*probe)(struct spk_synth *synth);
- void (*release)(void);
+ void (*release)(struct spk_synth *synth);
const char *(*synth_immediate)(struct spk_synth *synth,
const char *buff);
void (*catch_up)(struct spk_synth *synth);
@@ -200,6 +201,8 @@ struct spk_synth {
struct synth_indexing indexing;
int alive;
struct attribute_group attributes;
+
+ void *dev;
};
/*
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index ac47dbac7207..2b8699673bac 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -137,14 +137,14 @@ EXPORT_SYMBOL_GPL(spk_do_catch_up_unicode);
void spk_synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, synth->clear);
}
EXPORT_SYMBOL_GPL(spk_synth_flush);
unsigned char spk_synth_get_index(struct spk_synth *synth)
{
- return synth->io_ops->synth_in_nowait();
+ return synth->io_ops->synth_in_nowait(synth);
}
EXPORT_SYMBOL_GPL(spk_synth_get_index);
@@ -348,6 +348,7 @@ struct var_t synth_time_vars[] = {
{ TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } },
{ JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } },
{ FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } },
+ { FLUSH, .u.n = {NULL, 4000, 100, 4000, 0, 0, NULL } },
V_LAST_VAR
};
@@ -408,6 +409,8 @@ static int do_synth_init(struct spk_synth *in_synth)
synth_time_vars[2].u.n.default_val = synth->jiffies;
synth_time_vars[3].u.n.value =
synth_time_vars[3].u.n.default_val = synth->full;
+ synth_time_vars[4].u.n.value =
+ synth_time_vars[4].u.n.default_val = synth->flush_time;
synth_printf("%s", synth->init);
for (var = synth->vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
@@ -440,7 +443,7 @@ void synth_release(void)
sysfs_remove_group(speakup_kobj, &synth->attributes);
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
- synth->release();
+ synth->release(synth);
synth = NULL;
}
diff --git a/drivers/accessibility/speakup/varhandlers.c b/drivers/accessibility/speakup/varhandlers.c
index d7f6bec7ff06..067c0da97dcb 100644
--- a/drivers/accessibility/speakup/varhandlers.c
+++ b/drivers/accessibility/speakup/varhandlers.c
@@ -23,6 +23,7 @@ static struct st_var_header var_headers[] = {
{ "trigger_time", TRIGGER, VAR_TIME, NULL, NULL },
{ "jiffy_delta", JIFFY, VAR_TIME, NULL, NULL },
{ "full_time", FULL, VAR_TIME, NULL, NULL },
+ { "flush_time", FLUSH, VAR_TIME, NULL, NULL },
{ "spell_delay", SPELL_DELAY, VAR_NUM, &spk_spell_delay, NULL },
{ "bleeps", BLEEPS, VAR_NUM, &spk_bleeps, NULL },
{ "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &spk_attrib_bleep, NULL },
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..eedec61e3476 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -87,6 +87,14 @@ config ACPI_SPCR_TABLE
This table provides information about the configuration of the
earlycon console.
+config ACPI_FPDT
+ bool "ACPI Firmware Performance Data Table (FPDT) support"
+ depends on X86_64
+ help
+ Enable support for the Firmware Performance Data Table (FPDT).
+ This table provides information on the timing of the system
+ boot, S3 suspend and S3 resume firmware code paths.
+
config ACPI_LPIT
bool
depends on X86_64
@@ -326,6 +334,9 @@ config ACPI_THERMAL
To compile this driver as a module, choose M here:
the module will be called thermal.
+config ACPI_PLATFORM_PROFILE
+ tristate
+
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
default ""
@@ -395,9 +406,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +419,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 076894a3330f..700b41adf2db 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -57,6 +57,7 @@ acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
+obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 46a64e9fa716..b41180330cc1 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -6,6 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: AC: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,8 +20,6 @@
#include <linux/acpi.h>
#include <acpi/battery.h>
-#define PREFIX "ACPI: "
-
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
@@ -28,9 +28,6 @@
#define ACPI_AC_STATUS_ONLINE 0x01
#define ACPI_AC_STATUS_UNKNOWN 0xFF
-#define _COMPONENT ACPI_AC_COMPONENT
-ACPI_MODULE_NAME("ac");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
@@ -102,8 +99,9 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Error reading AC Adapter state"));
+ acpi_handle_info(ac->device->handle,
+ "Error reading AC Adapter state: %s\n",
+ acpi_format_exception(status));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
@@ -153,8 +151,8 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
switch (event) {
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
fallthrough;
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
@@ -278,9 +276,8 @@ static int acpi_ac_add(struct acpi_device *device)
goto end;
}
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- ac->state ? "on-line" : "off-line");
+ pr_info("%s [%s] (%s)\n", acpi_device_name(device),
+ acpi_device_bid(device), ac->state ? "on-line" : "off-line");
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
@@ -348,7 +345,7 @@ static int __init acpi_ac_init(void)
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
acpi_ac_blacklist[i].hrv)) {
- pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
+ pr_info("found native %s PMIC, not loading\n",
acpi_ac_blacklist[i].hid);
return -ENODEV;
}
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index cf91f49101ea..3a14859dbb75 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -268,7 +268,12 @@ static int __init acpi_configfs_init(void)
acpi_table_group = configfs_register_default_group(root, "table",
&acpi_tables_type);
- return PTR_ERR_OR_ZERO(acpi_table_group);
+ if (IS_ERR(acpi_table_group)) {
+ configfs_unregister_subsystem(&acpi_configfs);
+ return PTR_ERR(acpi_table_group);
+ }
+
+ return 0;
}
module_init(acpi_configfs_init);
diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
new file mode 100644
index 000000000000..a89a806a7a2a
--- /dev/null
+++ b/drivers/acpi/acpi_fpdt.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * FPDT support for exporting boot and suspend/resume performance data
+ *
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "ACPI FPDT: " fmt
+
+#include <linux/acpi.h>
+
+/*
+ * FPDT contains ACPI table header and a number of fpdt_subtable_entries.
+ * Each fpdt_subtable_entry points to a subtable: FBPT or S3PT.
+ * Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header
+ * and a number of fpdt performance records.
+ * Each FPDT performance record is composed of a fpdt_record_header and
+ * performance data fields, for boot or suspend or resume phase.
+ */
+enum fpdt_subtable_type {
+ SUBTABLE_FBPT,
+ SUBTABLE_S3PT,
+};
+
+struct fpdt_subtable_entry {
+ u16 type; /* refer to enum fpdt_subtable_type */
+ u8 length;
+ u8 revision;
+ u32 reserved;
+ u64 address; /* physical address of the S3PT/FBPT table */
+};
+
+struct fpdt_subtable_header {
+ u32 signature;
+ u32 length;
+};
+
+enum fpdt_record_type {
+ RECORD_S3_RESUME,
+ RECORD_S3_SUSPEND,
+ RECORD_BOOT,
+};
+
+struct fpdt_record_header {
+ u16 type; /* refer to enum fpdt_record_type */
+ u8 length;
+ u8 revision;
+};
+
+struct resume_performance_record {
+ struct fpdt_record_header header;
+ u32 resume_count;
+ u64 resume_prev;
+ u64 resume_avg;
+} __attribute__((packed));
+
+struct boot_performance_record {
+ struct fpdt_record_header header;
+ u32 reserved;
+ u64 firmware_start;
+ u64 bootloader_load;
+ u64 bootloader_launch;
+ u64 exitbootservice_start;
+ u64 exitbootservice_end;
+} __attribute__((packed));
+
+struct suspend_performance_record {
+ struct fpdt_record_header header;
+ u64 suspend_start;
+ u64 suspend_end;
+} __attribute__((packed));
+
+
+static struct resume_performance_record *record_resume;
+static struct suspend_performance_record *record_suspend;
+static struct boot_performance_record *record_boot;
+
+#define FPDT_ATTR(phase, name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%llu\n", record_##phase->name); \
+} \
+static struct kobj_attribute name##_attr = \
+__ATTR(name##_ns, 0444, name##_show, NULL)
+
+FPDT_ATTR(resume, resume_prev);
+FPDT_ATTR(resume, resume_avg);
+FPDT_ATTR(suspend, suspend_start);
+FPDT_ATTR(suspend, suspend_end);
+FPDT_ATTR(boot, firmware_start);
+FPDT_ATTR(boot, bootloader_load);
+FPDT_ATTR(boot, bootloader_launch);
+FPDT_ATTR(boot, exitbootservice_start);
+FPDT_ATTR(boot, exitbootservice_end);
+
+static ssize_t resume_count_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", record_resume->resume_count);
+}
+
+static struct kobj_attribute resume_count_attr =
+__ATTR_RO(resume_count);
+
+static struct attribute *resume_attrs[] = {
+ &resume_count_attr.attr,
+ &resume_prev_attr.attr,
+ &resume_avg_attr.attr,
+ NULL
+};
+
+static const struct attribute_group resume_attr_group = {
+ .attrs = resume_attrs,
+ .name = "resume",
+};
+
+static struct attribute *suspend_attrs[] = {
+ &suspend_start_attr.attr,
+ &suspend_end_attr.attr,
+ NULL
+};
+
+static const struct attribute_group suspend_attr_group = {
+ .attrs = suspend_attrs,
+ .name = "suspend",
+};
+
+static struct attribute *boot_attrs[] = {
+ &firmware_start_attr.attr,
+ &bootloader_load_attr.attr,
+ &bootloader_launch_attr.attr,
+ &exitbootservice_start_attr.attr,
+ &exitbootservice_end_attr.attr,
+ NULL
+};
+
+static const struct attribute_group boot_attr_group = {
+ .attrs = boot_attrs,
+ .name = "boot",
+};
+
+static struct kobject *fpdt_kobj;
+
+static int fpdt_process_subtable(u64 address, u32 subtable_type)
+{
+ struct fpdt_subtable_header *subtable_header;
+ struct fpdt_record_header *record_header;
+ char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT");
+ u32 length, offset;
+ int result;
+
+ subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
+ if (!subtable_header)
+ return -ENOMEM;
+
+ if (strncmp((char *)&subtable_header->signature, signature, 4)) {
+ pr_info(FW_BUG "subtable signature and type mismatch!\n");
+ return -EINVAL;
+ }
+
+ length = subtable_header->length;
+ acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header));
+
+ subtable_header = acpi_os_map_memory(address, length);
+ if (!subtable_header)
+ return -ENOMEM;
+
+ offset = sizeof(*subtable_header);
+ while (offset < length) {
+ record_header = (void *)subtable_header + offset;
+ offset += record_header->length;
+
+ switch (record_header->type) {
+ case RECORD_S3_RESUME:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ record_header->type, signature);
+ return -EINVAL;
+ }
+ if (record_resume) {
+ pr_err("Duplicate resume performance record found.\n");
+ continue;
+ }
+ record_resume = (struct resume_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ if (result)
+ return result;
+ break;
+ case RECORD_S3_SUSPEND:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+ continue;
+ }
+ if (record_suspend) {
+ pr_err("Duplicate suspend performance record found.\n");
+ continue;
+ }
+ record_suspend = (struct suspend_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ if (result)
+ return result;
+ break;
+ case RECORD_BOOT:
+ if (subtable_type != SUBTABLE_FBPT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+ return -EINVAL;
+ }
+ if (record_boot) {
+ pr_err("Duplicate boot performance record found.\n");
+ continue;
+ }
+ record_boot = (struct boot_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ if (result)
+ return result;
+ break;
+
+ default:
+ pr_err(FW_BUG "Invalid record %d found.\n", record_header->type);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int __init acpi_init_fpdt(void)
+{
+ acpi_status status;
+ struct acpi_table_header *header;
+ struct fpdt_subtable_entry *subtable;
+ u32 offset = sizeof(*header);
+
+ status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ if (!fpdt_kobj)
+ return -ENOMEM;
+
+ while (offset < header->length) {
+ subtable = (void *)header + offset;
+ switch (subtable->type) {
+ case SUBTABLE_FBPT:
+ case SUBTABLE_S3PT:
+ fpdt_process_subtable(subtable->address,
+ subtable->type);
+ break;
+ default:
+ pr_info(FW_BUG "Invalid subtable type %d found.\n",
+ subtable->type);
+ break;
+ }
+ offset += sizeof(*subtable);
+ }
+ return 0;
+}
+
+fs_initcall(acpi_init_fpdt);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b8745ce48a47..b84ab722feb4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -261,7 +261,7 @@ static uint32_t acpi_pad_idle_cpus_num(void)
return ps_tsk_num;
}
-static ssize_t acpi_pad_rrtime_store(struct device *dev,
+static ssize_t rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -275,16 +275,14 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_rrtime_show(struct device *dev,
+static ssize_t rrtime_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
}
-static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
- acpi_pad_rrtime_show,
- acpi_pad_rrtime_store);
+static DEVICE_ATTR_RW(rrtime);
-static ssize_t acpi_pad_idlepct_store(struct device *dev,
+static ssize_t idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -298,16 +296,14 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_idlepct_show(struct device *dev,
+static ssize_t idlepct_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
}
-static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
- acpi_pad_idlepct_show,
- acpi_pad_idlepct_store);
+static DEVICE_ATTR_RW(idlepct);
-static ssize_t acpi_pad_idlecpus_store(struct device *dev,
+static ssize_t idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -319,16 +315,14 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_idlecpus_show(struct device *dev,
+static ssize_t idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(false, buf,
to_cpumask(pad_busy_cpus_bits));
}
-static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
- acpi_pad_idlecpus_show,
- acpi_pad_idlecpus_store);
+static DEVICE_ATTR_RW(idlecpus);
static int acpi_pad_add_sysfs(struct acpi_device *device)
{
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 7d45cce0c3c1..e9b8e8305e23 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -237,7 +237,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
rt.tz, rt.daylight);
}
-static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
+static DEVICE_ATTR_RW(time);
static struct attribute *acpi_tad_time_attrs[] = {
&dev_attr_time.attr,
@@ -446,7 +446,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store);
+static DEVICE_ATTR_RW(ac_alarm);
static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -462,7 +462,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store);
+static DEVICE_ATTR_RW(ac_policy);
static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -478,7 +478,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store);
+static DEVICE_ATTR_RW(ac_status);
static struct attribute *acpi_tad_attrs[] = {
&dev_attr_caps.attr,
@@ -505,7 +505,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store);
+static DEVICE_ATTR_RW(dc_alarm);
static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -521,7 +521,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store);
+static DEVICE_ATTR_RW(dc_policy);
static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -537,7 +537,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store);
+static DEVICE_ATTR_RW(dc_status);
static struct attribute *acpi_tad_dc_attrs[] = {
&dev_attr_dc_alarm.attr,
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a322a7bd286b..2ea1781290cc 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -7,6 +7,8 @@
* Copyright (C) 2006 Thomas Tuttle <linux-kernel@ttuttle.net>
*/
+#define pr_fmt(fmt) "ACPI: video: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -26,16 +28,11 @@
#include <acpi/video.h>
#include <linux/uaccess.h>
-#define PREFIX "ACPI: "
-
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define MAX_NAME_LEN 20
-#define _COMPONENT ACPI_VIDEO_COMPONENT
-ACPI_MODULE_NAME("video");
-
MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
@@ -326,11 +323,11 @@ acpi_video_device_lcd_query_levels(acpi_handle handle,
*levels = NULL;
status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return status;
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _BCL data\n");
+ acpi_handle_info(handle, "Invalid _BCL data\n");
status = -EFAULT;
goto err;
}
@@ -354,7 +351,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
status = acpi_execute_simple_method(device->dev->handle,
"_BCM", level);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
+ acpi_handle_info(device->dev->handle, "_BCM evaluation failed\n");
return -EIO;
}
@@ -368,7 +365,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
return 0;
}
- ACPI_ERROR((AE_INFO, "Current brightness invalid"));
+ acpi_handle_info(device->dev->handle, "Current brightness invalid\n");
return -EINVAL;
}
@@ -622,9 +619,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
* BQC returned an invalid level.
* Stop using it.
*/
- ACPI_WARNING((AE_INFO,
- "%s returned an invalid level",
- buf));
+ acpi_handle_info(device->dev->handle,
+ "%s returned an invalid level", buf);
device->cap._BQC = device->cap._BCQ = 0;
} else {
/*
@@ -635,7 +631,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
* ACPI video backlight still works w/ buggy _BQC.
* http://bugzilla.kernel.org/show_bug.cgi?id=12233
*/
- ACPI_WARNING((AE_INFO, "Evaluating %s failed", buf));
+ acpi_handle_info(device->dev->handle,
+ "%s evaluation failed", buf);
device->cap._BQC = device->cap._BCQ = 0;
}
}
@@ -675,7 +672,7 @@ acpi_video_device_EDID(struct acpi_video_device *device,
if (obj && obj->type == ACPI_TYPE_BUFFER)
*edid = obj;
else {
- printk(KERN_ERR PREFIX "Invalid _DDC data\n");
+ acpi_handle_info(device->dev->handle, "Invalid _DDC data\n");
status = -EFAULT;
kfree(obj);
}
@@ -827,10 +824,9 @@ int acpi_video_get_levels(struct acpi_device *device,
int result = 0;
u32 value;
- if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle,
- &obj))) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
- "LCD brightness level\n"));
+ if (ACPI_FAILURE(acpi_video_device_lcd_query_levels(device->handle, &obj))) {
+ acpi_handle_debug(device->handle,
+ "Could not query available LCD brightness level\n");
result = -ENODEV;
goto out;
}
@@ -842,7 +838,6 @@ int acpi_video_get_levels(struct acpi_device *device,
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
- printk(KERN_ERR "can't allocate memory\n");
result = -ENOMEM;
goto out;
}
@@ -863,7 +858,7 @@ int acpi_video_get_levels(struct acpi_device *device,
for (i = 0; i < obj->package.count; i++) {
o = (union acpi_object *)&obj->package.elements[i];
if (o->type != ACPI_TYPE_INTEGER) {
- printk(KERN_ERR PREFIX "Invalid data\n");
+ acpi_handle_info(device->handle, "Invalid data\n");
continue;
}
value = (u32) o->integer.value;
@@ -900,7 +895,8 @@ int acpi_video_get_levels(struct acpi_device *device,
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > ACPI_VIDEO_FIRST_LEVEL)
- ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package"));
+ acpi_handle_info(device->handle,
+ "Too many duplicates in _BCL package");
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[ACPI_VIDEO_FIRST_LEVEL]) {
@@ -910,8 +906,8 @@ int acpi_video_get_levels(struct acpi_device *device,
sizeof(br->levels[ACPI_VIDEO_FIRST_LEVEL]),
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
- ACPI_ERROR((AE_INFO,
- "Found unordered _BCL package"));
+ acpi_handle_info(device->handle,
+ "Found unordered _BCL package");
br->count = count;
*dev_br = br;
@@ -989,9 +985,9 @@ set_level:
if (result)
goto out_free_levels;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n",
- br->count - ACPI_VIDEO_FIRST_LEVEL));
+ acpi_handle_debug(device->dev->handle, "found %d brightness levels\n",
+ br->count - ACPI_VIDEO_FIRST_LEVEL);
+
return 0;
out_free_levels:
@@ -1023,7 +1019,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_has_method(device->dev->handle, "_BQC")) {
device->cap._BQC = 1;
} else if (acpi_has_method(device->dev->handle, "_BCQ")) {
- printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
+ acpi_handle_info(device->dev->handle,
+ "_BCQ is used instead of _BQC\n");
device->cap._BCQ = 1;
}
@@ -1083,8 +1080,7 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
/* Does this device support video switching? */
if (video->cap._DOS || video->cap._DOD) {
if (!video->cap._DOS) {
- printk(KERN_WARNING FW_BUG
- "ACPI(%s) defines _DOD but not _DOS\n",
+ pr_info(FW_BUG "ACPI(%s) defines _DOD but not _DOS\n",
acpi_device_bid(video->device));
}
video->flags.multihead = 1;
@@ -1272,7 +1268,8 @@ acpi_video_device_bind(struct acpi_video_bus *video,
ids = &video->attached_array[i];
if (device->device_id == (ids->value.int_val & 0xffff)) {
ids->bind_info = device;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
+ acpi_handle_debug(video->device->handle, "%s: %d\n",
+ __func__, i);
}
}
}
@@ -1324,20 +1321,22 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
return AE_NOT_EXIST;
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
- if (!ACPI_SUCCESS(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_info(video->device->handle,
+ "_DOD evaluation failed: %s\n",
+ acpi_format_exception(status));
return status;
}
dod = buffer.pointer;
if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
- ACPI_EXCEPTION((AE_INFO, status, "Invalid _DOD data"));
+ acpi_handle_info(video->device->handle, "Invalid _DOD data\n");
status = -EFAULT;
goto out;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
- dod->package.count));
+ acpi_handle_debug(video->device->handle, "Found %d video heads in _DOD\n",
+ dod->package.count);
active_list = kcalloc(1 + dod->package.count,
sizeof(struct acpi_video_enumerated_device),
@@ -1352,15 +1351,18 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
obj = &dod->package.elements[i];
if (obj->type != ACPI_TYPE_INTEGER) {
- printk(KERN_ERR PREFIX
- "Invalid _DOD data in element %d\n", i);
+ acpi_handle_info(video->device->handle,
+ "Invalid _DOD data in element %d\n", i);
continue;
}
active_list[count].value.int_val = obj->integer.value;
active_list[count].bind_info = NULL;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
- (int)obj->integer.value));
+
+ acpi_handle_debug(video->device->handle,
+ "_DOD element[%d] = %d\n", i,
+ (int)obj->integer.value);
+
count++;
}
@@ -1451,7 +1453,8 @@ acpi_video_switch_brightness(struct work_struct *work)
out:
if (result)
- printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
+ acpi_handle_info(device->dev->handle,
+ "Failed to switch brightness\n");
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
@@ -1601,8 +1604,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
@@ -1675,8 +1678,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
keycode = KEY_DISPLAY_OFF;
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
break;
}
@@ -1812,11 +1814,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
&device->cooling_dev->device.kobj,
"thermal_cooling");
if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ pr_info("sysfs link creation failed\n");
+
result = sysfs_create_link(&device->cooling_dev->device.kobj,
&device->dev->dev.kobj, "device");
if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ pr_info("Reverse sysfs link creation failed\n");
}
static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
@@ -2030,7 +2033,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
- printk(KERN_WARNING FW_BUG
+ pr_info(FW_BUG
"Duplicate ACPI video bus devices for the"
" same VGA controller, please try module "
"parameter \"video.allow_duplicates=1\""
@@ -2073,7 +2076,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
- printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
+ pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 173447d50acf..725e2f65cdca 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2021 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 94e18bb76556..be3826f46f88 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index cf85d66da6e7..53b41c7a6119 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index f8a3abdfe250..3ccc7b2a76f1 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 7ba6e308f146..3170a24fe505 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 79f292687bd6..82a75964343b 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 2fee91f57b21..d41b810e367c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6ab92e28330d..810de0b4c125 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index a6d896cda2a5..816a16e1fc4c 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index f83b98fa13ac..be57436182a1 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 168904ba3086..93bd2d19c156 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40f6a3c33a15..199aabac3790 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 9f0219a8cb98..9db5ae0f79ea 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -284,6 +284,7 @@ struct acpi_object_addr_handler {
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
void *context;
+ acpi_mutex context_mutex;
acpi_adr_space_setup setup;
union acpi_operand_object *region_list; /* Regions using this handler */
union acpi_operand_object *next;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8825394be9ab..c3f12ee9fc6f 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index bc00b85c0a8f..8e40e5909458 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 57ea2276790f..15cf904f0751 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 6de8a1650d3d..0cb975a3e01d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 4c900c108f3f..e3beb096c46d 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 734624facda3..e2d0046799a2 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 7c89b470ec81..be6de7149e67 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1d541bbac4a3..d6b088c5001f 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index e5234e001acf..a9d91a3c2994 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index f5fba14461a6..fd813c5d3952 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2952856b8a67..b8a48923064f 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -473,7 +473,7 @@ char *acpi_db_get_next_token(char *string,
/* Remove any spaces at the beginning, ignore blank lines */
- while (*string && isspace(*string)) {
+ while (*string && isspace((int)*string)) {
string++;
}
@@ -571,7 +571,7 @@ char *acpi_db_get_next_token(char *string,
/* Find end of token */
- while (*string && !isspace(*string)) {
+ while (*string && !isspace((int)*string)) {
string++;
}
break;
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index 4b4c530a0654..95ab91b35f29 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -47,7 +47,7 @@ acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state)
/* Ignore control codes, they are not errors */
- if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(status)) {
return;
}
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index ad17f62e51d9..6630d6536fb0 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4b5b6e859f62..a152f03135cd 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -62,7 +62,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
}
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case AML_IF_OP:
/*
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 63bc5f19fb82..b9b03d629930 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -100,7 +100,7 @@ acpi_ds_dump_method_stack(acpi_status status,
/* Ignore control codes, they are not errors */
- if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(status)) {
return_VOID;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index fa768b3a989e..a16817767969 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 9be2a309424c..ba6f882e83bc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index cf67caff878a..8e011e59b9b4 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c0a14a6a2c20..3c0c31157e7e 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index d9c26e720cb7..639635291ab7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index d869568d55c2..e642d65bcc66 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 1d4f8c81028c..41ba7773fd10 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -598,8 +598,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
break;
}
- /* Fall through */
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case AML_INT_EVAL_SUBTREE_OP:
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 27069325b6de..a377638e44f9 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -224,7 +224,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
break;
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index edadbe146506..3625952c3957 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -214,7 +214,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break;
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9c397642fed7..9c123af08bc1 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 809a0c0536b5..fbe2ba05c82a 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9efca54c51ac..35385148fedb 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 0ced84ae13e4..de4eea606ccd 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 06b9c8dd11c9..c5a06882bdf6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index f5298be4273a..e5f8245c2d93 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 6d82d30d8f7b..b0724d6e6e80 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 738873e876ca..2e74308d7725 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 5884eba047f7..c0cd7147a5a3 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
/* Init handler obj */
+ status =
+ acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+
handler_obj->address_space.space_id = (u8)space_id;
handler_obj->address_space.handler_flags = flags;
handler_obj->address_space.region_list = NULL;
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index ce1eda6beb84..f14ebcd610ab 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index a8a4c8c9b9ef..4ef43c8ef5e7 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -112,6 +112,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
struct acpi_connection_info *context;
+ acpi_mutex context_mutex;
+ u8 context_locked;
acpi_physical_address address;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -136,6 +138,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
}
context = handler_desc->address_space.context;
+ context_mutex = handler_desc->address_space.context_mutex;
+ context_locked = FALSE;
/*
* It may be the case that the region has never been initialized.
@@ -204,6 +208,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler = handler_desc->address_space.handler;
address = (region_obj->region.address + region_offset);
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+ ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * For handlers other than the default (supplied) handlers, we must
+ * exit the interpreter because the handler *might* block -- we don't
+ * know what it will do, so we can't hold the lock on the interpreter.
+ */
+ acpi_ex_exit_interpreter();
+ }
+
/*
* Special handling for generic_serial_bus and general_purpose_io:
* There are three extra parameters that must be passed to the
@@ -212,48 +233,39 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* 2) Length of the above buffer
* 3) Actual access length from the access_as() op
*
+ * Since we pass these extra parameters via the context, which is
+ * shared between threads, we must lock the context to avoid these
+ * parameters being changed from another thread before the handler
+ * has completed running.
+ *
* In addition, for general_purpose_io, the Address and bit_width fields
* are defined as follows:
* 1) Address is the pin number index of the field (bit offset from
* the previous Connection)
* 2) bit_width is the actual bit length of the field (number of pins)
*/
- if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS ||
+ region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
context && field_obj) {
- /* Get the Connection (resource_template) buffer */
+ status =
+ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ goto re_enter_interpreter;
+ }
- context->connection = field_obj->field.resource_buffer;
- context->length = field_obj->field.resource_length;
- context->access_length = field_obj->field.access_length;
- }
- if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
- context && field_obj) {
+ context_locked = TRUE;
/* Get the Connection (resource_template) buffer */
context->connection = field_obj->field.resource_buffer;
context->length = field_obj->field.resource_length;
context->access_length = field_obj->field.access_length;
- address = field_obj->field.pin_number_index;
- bit_width = field_obj->field.bit_length;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
- &region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(region_obj->region.
- space_id)));
- if (!(handler_desc->address_space.handler_flags &
- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
- /*
- * For handlers other than the default (supplied) handlers, we must
- * exit the interpreter because the handler *might* block -- we don't
- * know what it will do, so we can't hold the lock on the interpreter.
- */
- acpi_ex_exit_interpreter();
+ if (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) {
+ address = field_obj->field.pin_number_index;
+ bit_width = field_obj->field.bit_length;
+ }
}
/* Call the handler */
@@ -261,6 +273,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function, address, bit_width, value, context,
region_obj2->extra.region_context);
+ if (context_locked) {
+ acpi_os_release_mutex(context_mutex);
+ }
+
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
@@ -277,6 +293,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
}
}
+re_enter_interpreter:
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 89be3ccdad53..984c172453bf 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index e4e012297eee..ff5cf5b0705a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 1a15b0087379..5445a361c621 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3be60673e461..a6d53cf86450 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index da97fd0c6b51..b1ff0a8f9c14 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
+ acpi_os_release_mutex(handler_obj->address_space.
+ context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 43711412722f..2d220d470c60 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 68efd704e2dc..0cd9b3738e76 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 50c7aad2e86d..6b7498371eb0 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index a17482428b46..80b52ad55775 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index a5223dcaee70..6a01e38b7d5a 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 47a4d9a40d6b..2aea44ecc37d 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 3323a2ba6a31..32f03ee81785 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ade35ff1c7ba..bdc7a30d1217 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -434,7 +434,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
* region_field case and write the datum to the Operation Region
*/
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_LOCAL_REGION_FIELD:
/*
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 717e3998fd77..ad19f914641b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 9ff247cba571..6237ae8284b1 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 74f8b0d0452b..5283603d078d 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index a46d685a3ffc..b639e930d642 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 03241d18ac1d..10323ab186da 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index c8d0d75fc450..140aae009690 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 55d0fa056fe7..2cf9f37a0ba8 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 4a0f03157e08..d8c55dde191b 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 4914dbc44517..82b713a9a193 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 3e4018678c09..d80b76455c50 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 912a078c60a4..fa6a96242835 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 4d1b22971d58..cbe2c88b1dc2 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -198,7 +198,7 @@ acpi_ex_resolve_operands(u16 opcode,
target_op = AML_DEBUG_OP;
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_REFCLASS_ARG:
case ACPI_REFCLASS_LOCAL:
@@ -264,7 +264,7 @@ acpi_ex_resolve_operands(u16 opcode,
* Else not a string - fall through to the normal Reference
* case below
*/
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ARGI_REFERENCE: /* References: */
case ARGI_INTEGER_REF:
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 760bc7cef55a..8e8d95f7947b 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 3adc0a29d890..12f4210ea085 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -96,7 +96,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
return_ACPI_STATUS(AE_OK);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
@@ -422,7 +422,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
break;
}
- /* Fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_EVENT:
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 8c34f4e2ab8f..08469d37e73e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc66696080a5..a82628683329 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f329b01672bb..1281c07112de 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index 832a47885b99..8846f483fb02 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8fefa6feac2f..4d41a866f633 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 9b9aac27ff7e..96f55f079988 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index d9be5d0545d4..803402aefaeb 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index b13a4ed5bc63..0770aa176cd5 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -167,7 +167,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
return (AE_BAD_PARAMETER);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_GPE_ENABLE:
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 317ae870336b..14baa13bf848 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 07473ddfa9a9..63deadde9f48 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b2ca7dfd3fc9..e15badf4077a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 134dbfadcd15..fb27aaad0dee 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index a4b66f4b2714..89b12afed564 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 6bbc7d350a16..c8a2747005c5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index c86c82939ebb..597d0eed23c1 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 994f0b556c60..2f66f3ed1810 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index b691fe20e384..d3dc6761bcdd 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index e16f6a0c2c3f..4db81f8ba29b 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 9ba17891edb6..7d77956ed790 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 7e74a765e785..778f80e624be 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 167a1c2495ab..e4e5f32da7dc 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 1875b1cba202..6742b50836f7 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 90db2d85e7f5..499067daa22c 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d2c8d8279e7a..14b71b41e845 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -495,8 +495,9 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
- char *dest;
+ union acpi_operand_object *new_string;
char *source;
+ char *dest;
ACPI_FUNCTION_NAME(ns_repair_HID);
@@ -517,6 +518,13 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
return_ACPI_STATUS(AE_OK);
}
+ /* It is simplest to always create a new string object */
+
+ new_string = acpi_ut_create_string_object(return_object->string.length);
+ if (!new_string) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
/*
* Remove a leading asterisk if present. For some unknown reason, there
* are many machines in the field that contains IDs like this.
@@ -526,7 +534,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
source = return_object->string.pointer;
if (*source == '*') {
source++;
- return_object->string.length--;
+ new_string->string.length--;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Removed invalid leading asterisk\n",
@@ -541,11 +549,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
* "NNNN####" where N is an uppercase letter or decimal digit, and
* # is a hex digit.
*/
- for (dest = return_object->string.pointer; *source; dest++, source++) {
+ for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)toupper((int)*source);
}
- return_object->string.pointer[return_object->string.length] = 0;
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_string;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e66abdab8f31..83d0f276da4d 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index b7f3e8603ad8..915c2433463d 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 0e6aba81605b..03487546da5a 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 3b40db4ad9f3..b9ff535aa02e 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 3cf0687b9915..4b51dd939f29 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -264,8 +264,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
ACPI_TO_POINTER
(TRUE));
if (ACPI_FAILURE(status)
- && ((status & AE_CODE_MASK) !=
- AE_CODE_CONTROL)) {
+ && !ACPI_CNTL_EXCEPTION(status)) {
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 2480c26c5171..e4420cd6d281 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 28af49263ebf..3e80eb1a5f35 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index ab9327f6a63c..476b00a121f3 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index bd3caf735be3..7eb7a81619a3 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -383,7 +383,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
default:
status = callback_status;
- if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(callback_status)) {
status = AE_OK;
}
break;
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index fceb311995e9..3f2eada44942 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index c8aef0694864..ffb2a7bfc6d7 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 00efae2f95ba..e6596051d548 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 0fe3adf6b0e5..7018a789debc 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 1bbfc8def388..fd0f28c7af1e 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 523b1e9b98d4..ebbca109edcb 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 907edc5edba7..5174abfa8af9 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 56d81e490a5c..2c2c2b1f5a28 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0bb15add2245..8d1e5b572493 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 0b3494ad9a70..254823d494a2 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dfe1ac3ae34a..4b9b329a5a92 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 7490429ddbf6..e6f51fedaf1a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index bcba993d4dac..38623049b962 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 0edc6ef5d46d..9fec3df6c3ba 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 99fa48722cf6..7001f4b113f1 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 303ab51b4fcf..796fd9b33a7d 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index d78656d960e8..e1b55575d5fb 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index f2ec427f4e29..8ab90f78825b 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 1b03a2747401..814145019f95 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 41bdd0278dd8..d9877153f400 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 0c8cb0612414..09245945f319 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index ed9aedf604a1..bcd3871079d7 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 4c0d4e434196..624a26794d55 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -112,7 +112,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
gpe_block);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 8180d1a458f5..d2503920c620 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e6dcbdc3fc6e..59a48371a7bc 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 0e02f12513dc..b1e94c094f9a 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 3e68864ef242..08e9f316cbde 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index fdbc397c038d..7b606a1e6986 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 46be549539e7..923dd15e7a16 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index bbec04c291d2..84a210b49e3a 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 0a01c08dad8a..7b8e8bf1e824 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index dd277f7e9f10..a6f87a88c30e 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 681c11f4af4e..e37d612e8db5 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 2d91003fcf26..199982a6fb16 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -104,7 +104,7 @@ acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr)
* 1) Runtime: terminate with no error, per the ACPI spec
* 2) Compiler: return an error
*/
- if (!isdigit(*string)) {
+ if (!isdigit((int)*string)) {
#ifdef ACPI_ASL_COMPILER
status = AE_BAD_DECIMAL_CONSTANT;
#endif
@@ -158,7 +158,7 @@ acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr)
* 1) Runtime: terminate with no error, per the ACPI spec
* 2) Compiler: return an error
*/
- if (!isxdigit(*string)) {
+ if (!isxdigit((int)*string)) {
#ifdef ACPI_ASL_COMPILER
status = AE_BAD_HEX_CONSTANT;
#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index d366be431a84..2ce85fcfeb5b 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index b8039954b0d1..090e44b6b6c7 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index ca7c9f0144ef..3285c1a92e40 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 653e3bb20036..91016366de1d 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2e0b0fcad960..b9597216d021 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -688,7 +688,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
break;
if (erst_timedout(&timeout, SPIN_UNIT))
return -EIO;
- };
+ }
rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
if (rc)
return rc;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 6e980fe16772..f220bb00e91b 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -49,6 +49,12 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1,
};
+static inline bool is_generic_error(struct acpi_hest_header *hest_hdr)
+{
+ return hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
+ hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
+}
+
static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
{
u16 hest_type = hest_hdr->type;
@@ -141,8 +147,7 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void
{
int *count = data;
- if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
- hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ if (is_generic_error(hest_hdr))
(*count)++;
return 0;
}
@@ -153,8 +158,7 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
struct ghes_arr *ghes_arr = data;
int rc, i;
- if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR &&
- hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ if (!is_generic_error(hest_hdr))
return 0;
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4eac6d7e9fb..2494138a6905 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
ncomp = (struct acpi_iort_named_component *)node->node_data;
+ if (!ncomp->memory_address_limit) {
+ pr_warn(FW_BUG "Named component missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
@@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
rc = (struct acpi_iort_root_complex *)node->node_data;
+ if (!rc->memory_address_limit) {
+ pr_warn(FW_BUG "Root complex missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
@@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
- dev->coherent_dma_mask = mask;
- *dev->dma_mask = mask;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 08ee1c7b12e0..b822f77afba6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -8,7 +8,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) "ACPI: battery: " fmt
#include <linux/async.h>
#include <linux/delay.h>
@@ -29,8 +29,6 @@
#include <acpi/battery.h>
-#define PREFIX "ACPI: "
-
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
@@ -44,10 +42,6 @@
#define ACPI_BATTERY_STATE_CHARGING 0x2
#define ACPI_BATTERY_STATE_CRITICAL 0x4
-#define _COMPONENT ACPI_BATTERY_COMPONENT
-
-ACPI_MODULE_NAME("battery");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
@@ -466,7 +460,8 @@ static int extract_package(struct acpi_battery *battery,
static int acpi_battery_get_status(struct acpi_battery *battery)
{
if (acpi_bus_get_status(battery->device)) {
- ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
+ acpi_handle_info(battery->device->handle,
+ "_STA evaluation failed\n");
return -ENODEV;
}
return 0;
@@ -535,8 +530,10 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s",
- use_bix ? "_BIX":"_BIF"));
+ acpi_handle_info(battery->device->handle,
+ "%s evaluation failed: %s\n",
+ use_bix ?"_BIX":"_BIF",
+ acpi_format_exception(status));
} else {
result = extract_battery_info(use_bix,
battery,
@@ -573,7 +570,9 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
+ acpi_handle_info(battery->device->handle,
+ "_BST evaluation failed: %s",
+ acpi_format_exception(status));
return -ENODEV;
}
@@ -590,7 +589,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n");
+ pr_warn_once(FW_BUG "(dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
@@ -625,7 +624,9 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm));
+ acpi_handle_debug(battery->device->handle, "Alarm set to %d\n",
+ battery->alarm);
+
return 0;
}
@@ -1201,8 +1202,7 @@ static int acpi_battery_add(struct acpi_device *device)
if (result)
goto fail;
- pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ pr_info("Slot [%s] (battery %s)\n", acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
@@ -1282,8 +1282,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
if (battery_check_pmic) {
for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
- pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
- ": found native %s PMIC, not loading\n",
+ pr_info("found native %s PMIC, not loading\n",
acpi_battery_blacklist[i]);
return;
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 251f961c28cc..19bb7f870204 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -15,40 +15,40 @@
static void *bgrt_image;
static struct kobject *bgrt_kobj;
-static ssize_t show_version(struct device *dev,
+static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
}
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR_RO(version);
-static ssize_t show_status(struct device *dev,
+static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_type(struct device *dev,
+static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
}
-static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+static DEVICE_ATTR_RO(type);
-static ssize_t show_xoffset(struct device *dev,
+static ssize_t xoffset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
}
-static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
+static DEVICE_ATTR_RO(xoffset);
-static ssize_t show_yoffset(struct device *dev,
+static ssize_t yoffset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
}
-static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
+static DEVICE_ATTR_RO(yoffset);
static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1682f8b454a2..be7da23fad76 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -5,6 +5,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -31,9 +33,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("bus");
-
struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
@@ -47,8 +46,7 @@ static inline int set_copy_dsdt(const struct dmi_system_id *id)
#else
static int set_copy_dsdt(const struct dmi_system_id *id)
{
- printk(KERN_NOTICE "%s detected - "
- "force copy of DSDT to local memory\n", id->ident);
+ pr_notice("%s detected - force copy of DSDT to local memory\n", id->ident);
acpi_gbl_copy_dsdt_locally = 1;
return 0;
}
@@ -116,13 +114,11 @@ int acpi_bus_get_status(struct acpi_device *device)
acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
- "functional but not present;\n",
- device->pnp.bus_id, (u32)sta));
+ pr_debug("Device [%s] status [%08x]: functional but not present\n",
+ device->pnp.bus_id, (u32)sta);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
- device->pnp.bus_id, (u32)sta));
+ pr_debug("Device [%s] status [%08x]\n", device->pnp.bus_id, (u32)sta);
return 0;
}
EXPORT_SYMBOL(acpi_bus_get_status);
@@ -281,10 +277,16 @@ bool osc_sb_apei_support_acked;
bool osc_pc_lpi_support_confirmed;
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
+/*
+ * ACPI 6.4 Operating System Capabilities for USB.
+ */
+bool osc_sb_native_usb4_support_confirmed;
+EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
-static void acpi_bus_osc_support(void)
+static void acpi_bus_osc_negotiate_platform_control(void)
{
- u32 capbuf[2];
+ u32 capbuf[2], *capbuf_ret;
struct acpi_osc_context context = {
.uuid_str = sb_uuid_str,
.rev = 1,
@@ -317,21 +319,109 @@ static void acpi_bus_osc_support(void)
if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
+ if (IS_ENABLED(CONFIG_USB4))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_NATIVE_USB4_SUPPORT;
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
- u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD) {
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- }
+
+ if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return;
+
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length <= OSC_SUPPORT_DWORD) {
kfree(context.ret.pointer);
+ return;
+ }
+
+ /*
+ * Now run _OSC again with query flag clear and with the caps
+ * supported by both the OS and the platform.
+ */
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
+ kfree(context.ret.pointer);
+
+ if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return;
+
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
}
- /* do we need to check other returned cap? Sounds no */
+
+ kfree(context.ret.pointer);
+}
+
+/*
+ * Native control of USB4 capabilities. If any of the tunneling bits is
+ * set it means OS is in control and we use software based connection
+ * manager.
+ */
+u32 osc_sb_native_usb4_control;
+EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
+
+static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
+{
+ printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
+ (bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_XDOMAIN) ? '+' : '-');
+}
+
+static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
+static void acpi_bus_osc_negotiate_usb_control(void)
+{
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = sb_usb_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+ acpi_handle handle;
+ acpi_status status;
+ u32 control;
+
+ if (!osc_sb_native_usb4_support_confirmed)
+ return;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return;
+
+ control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
+ OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
+
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = 0;
+ capbuf[OSC_CONTROL_DWORD] = control;
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (context.ret.length != sizeof(capbuf)) {
+ printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
+ goto out_free;
+ }
+
+ osc_sb_native_usb4_control =
+ control & ((u32 *)context.ret.pointer)[OSC_CONTROL_DWORD];
+
+ acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
+ acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
+ osc_sb_native_usb4_control);
+
+out_free:
+ kfree(context.ret.pointer);
}
/* --------------------------------------------------------------------------
@@ -915,9 +1005,9 @@ static int acpi_device_probe(struct device *dev)
return ret;
acpi_dev->driver = acpi_drv;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Driver [%s] successfully bound to device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
+
+ pr_debug("Driver [%s] successfully bound to device [%s]\n",
+ acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
@@ -931,8 +1021,9 @@ static int acpi_device_probe(struct device *dev)
}
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
+ pr_debug("Found driver [%s] for device [%s]\n", acpi_drv->name,
+ acpi_dev->pnp.bus_id);
+
get_device(dev);
return 0;
}
@@ -995,15 +1086,15 @@ static int __init acpi_bus_init_irq(void)
message = "platform specific model";
break;
default:
- printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n");
+ pr_info("Unknown interrupt routing model\n");
return -ENODEV;
}
- printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
+ pr_info("Using %s for interrupt routing\n", message);
status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
+ pr_info("_PIC evaluation failed: %s\n", acpi_format_exception(status));
return -ENODEV;
}
@@ -1027,7 +1118,7 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return;
- printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
+ pr_info("Core revision %08x\n", ACPI_CA_VERSION);
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
@@ -1048,15 +1139,13 @@ void __init acpi_early_init(void)
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to reallocate ACPI tables\n");
+ pr_err("Unable to reallocate ACPI tables\n");
goto error0;
}
status = acpi_initialize_subsystem();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to initialize the ACPI Interpreter\n");
+ pr_err("Unable to initialize the ACPI Interpreter\n");
goto error0;
}
@@ -1102,7 +1191,7 @@ void __init acpi_subsystem_init(void)
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
+ pr_err("Unable to enable ACPI\n");
disable_acpi();
} else {
/*
@@ -1131,8 +1220,7 @@ static int __init acpi_bus_init(void)
status = acpi_load_tables();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
+ pr_err("Unable to load the System Description Tables\n");
goto error1;
}
@@ -1150,14 +1238,13 @@ static int __init acpi_bus_init(void)
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to start the ACPI Interpreter\n");
+ pr_err("Unable to start the ACPI Interpreter\n");
goto error1;
}
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+ pr_err("Unable to initialize ACPI objects\n");
goto error1;
}
@@ -1168,7 +1255,8 @@ static int __init acpi_bus_init(void)
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
*/
- acpi_bus_osc_support();
+ acpi_bus_osc_negotiate_platform_control();
+ acpi_bus_osc_negotiate_usb_control();
/*
* _PDC control method may load dynamic SSDT tables,
@@ -1186,7 +1274,7 @@ static int __init acpi_bus_init(void)
*/
acpi_ec_dsdt_probe();
- printk(KERN_INFO PREFIX "Interpreter enabled\n");
+ pr_info("Interpreter enabled\n");
/* Initialize sleep structures */
acpi_sleep_init();
@@ -1205,8 +1293,7 @@ static int __init acpi_bus_init(void)
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
&acpi_bus_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to register for device notifications\n");
+ pr_err("Unable to register for system notifications\n");
goto error1;
}
@@ -1233,13 +1320,13 @@ static int __init acpi_init(void)
int result;
if (acpi_disabled) {
- printk(KERN_INFO PREFIX "Interpreter disabled.\n");
+ pr_info("Interpreter disabled.\n");
return -ENODEV;
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
if (!acpi_kobj) {
- printk(KERN_WARNING "%s: kset create error\n", __func__);
+ pr_debug("%s: kset create error\n", __func__);
acpi_kobj = NULL;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0d93a5ef4d07..85e5e0328a2e 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -21,8 +21,6 @@
#include <linux/dmi.h>
#include <acpi/button.h>
-#define PREFIX "ACPI: "
-
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
@@ -54,9 +52,6 @@ static const char * const lid_init_state_str[] = {
[ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
};
-#define _COMPONENT ACPI_BUTTON_COMPONENT
-ACPI_MODULE_NAME("button");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Button Driver");
MODULE_LICENSE("GPL");
@@ -285,7 +280,7 @@ static int acpi_button_add_fs(struct acpi_device *device)
return 0;
if (acpi_button_dir || acpi_lid_dir) {
- printk(KERN_ERR PREFIX "More than one Lid device found!\n");
+ pr_info("More than one Lid device found!\n");
return -EEXIST;
}
@@ -434,8 +429,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
}
@@ -523,7 +518,7 @@ static int acpi_button_add(struct acpi_device *device)
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
input->open = acpi_lid_input_open;
} else {
- printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
+ pr_info("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
@@ -567,7 +562,7 @@ static int acpi_button_add(struct acpi_device *device)
}
device_init_wakeup(&device->dev, true);
- printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
+ pr_info("%s [%s]\n", name, acpi_device_bid(device));
return 0;
err_remove_fs:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 75aaf94ae0a9..69057fcd2c04 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -233,8 +233,8 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ pcc_ss_data->pcc_comm_addr;
unsigned int time_delta;
/*
@@ -934,7 +934,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
int ret_val = 0;
- void __iomem *vaddr = 0;
+ void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -979,7 +979,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
- void __iomem *vaddr = 0;
+ void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3586434d0ded..096153761ebc 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -10,6 +10,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -20,9 +22,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_POWER_COMPONENT
-ACPI_MODULE_NAME("device_pm");
-
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
@@ -130,8 +129,8 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
*state = result;
out:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
- device->pnp.bus_id, acpi_power_state_string(*state)));
+ dev_dbg(&device->dev, "Device power state is %s\n",
+ acpi_power_state_string(*state));
return 0;
}
@@ -174,9 +173,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
/* There is a special case for D0 addressed below. */
if (state > ACPI_STATE_D0 && state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
- device->pnp.bus_id,
- acpi_power_state_string(state)));
+ dev_dbg(&device->dev, "Device already in %s\n",
+ acpi_power_state_string(state));
return 0;
}
@@ -276,10 +274,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to %s\n",
- device->pnp.bus_id,
- acpi_power_state_string(target_state)));
+ dev_dbg(&device->dev, "Power state changed to %s\n",
+ acpi_power_state_string(target_state));
}
return result;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 96869f1538b9..da4ff2a8b06a 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
@@ -333,11 +325,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+static DEVICE_ATTR_RO(modalias);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -366,8 +358,8 @@ static ssize_t power_state_show(struct device *dev,
static DEVICE_ATTR_RO(power_state);
static ssize_t
-acpi_eject_store(struct device *d, struct device_attribute *attr,
- const char *buf, size_t count)
+eject_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
@@ -395,28 +387,28 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+static DEVICE_ATTR_WO(eject);
static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
+hid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+static DEVICE_ATTR_RO(hid);
-static ssize_t acpi_device_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+static DEVICE_ATTR_RO(uid);
-static ssize_t acpi_device_adr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t adr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -425,16 +417,16 @@ static ssize_t acpi_device_adr_show(struct device *dev,
else
return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+static DEVICE_ATTR_RO(adr);
-static ssize_t acpi_device_path_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t path_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return acpi_object_path(acpi_dev->handle, buf);
}
-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+static DEVICE_ATTR_RO(path);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
@@ -463,8 +455,8 @@ static ssize_t description_show(struct device *dev,
static DEVICE_ATTR_RO(description);
static ssize_t
-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
+sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
@@ -475,11 +467,11 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", sun);
}
-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static DEVICE_ATTR_RO(sun);
static ssize_t
-acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
+hrv_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long hrv;
@@ -490,7 +482,7 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", hrv);
}
-static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
+static DEVICE_ATTR_RO(hrv);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 24e076f44d23..0937ceab052e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -484,7 +484,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
/*
* show_docked - read method for "docked" file in sysfs
*/
-static ssize_t show_docked(struct device *dev,
+static ssize_t docked_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
@@ -493,25 +493,25 @@ static ssize_t show_docked(struct device *dev,
acpi_bus_get_device(dock_station->handle, &adev);
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
-static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+static DEVICE_ATTR_RO(docked);
/*
* show_flags - read method for flags file in sysfs
*/
-static ssize_t show_flags(struct device *dev,
+static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
-static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+static DEVICE_ATTR_RO(flags);
/*
* write_undock - write method for "undock" file in sysfs
*/
-static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
@@ -525,13 +525,13 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
acpi_scan_lock_release();
return ret ? ret: count;
}
-static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+static DEVICE_ATTR_WO(undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
-static ssize_t show_dock_uid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
@@ -542,10 +542,10 @@ static ssize_t show_dock_uid(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
-static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+static DEVICE_ATTR_RO(uid);
-static ssize_t show_dock_type(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
@@ -561,7 +561,7 @@ static ssize_t show_dock_type(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", type);
}
-static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+static DEVICE_ATTR_RO(type);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index cb229e24c563..e6a5d997241c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
- char bus_id[15];
+ const char *bus_id;
unsigned int instance_no;
struct list_head node;
};
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b11b08a60684..8c5dde628405 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2269,40 +2269,24 @@ static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
/* enough info to uniquely specify an interleave set */
struct nfit_set_info {
- struct nfit_set_info_map {
- u64 region_offset;
- u32 serial_number;
- u32 pad;
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u32 pad;
};
struct nfit_set_info2 {
- struct nfit_set_info_map2 {
- u64 region_offset;
- u32 serial_number;
- u16 vendor_id;
- u16 manufacturing_date;
- u8 manufacturing_location;
- u8 reserved[31];
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u16 vendor_id;
+ u16 manufacturing_date;
+ u8 manufacturing_location;
+ u8 reserved[31];
};
-static size_t sizeof_nfit_set_info(int num_mappings)
-{
- return sizeof(struct nfit_set_info)
- + num_mappings * sizeof(struct nfit_set_info_map);
-}
-
-static size_t sizeof_nfit_set_info2(int num_mappings)
-{
- return sizeof(struct nfit_set_info2)
- + num_mappings * sizeof(struct nfit_set_info_map2);
-}
-
static int cmp_map_compat(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
return memcmp(&map0->region_offset, &map1->region_offset,
sizeof(u64));
@@ -2310,8 +2294,8 @@ static int cmp_map_compat(const void *m0, const void *m1)
static int cmp_map(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2322,8 +2306,8 @@ static int cmp_map(const void *m0, const void *m1)
static int cmp_map2(const void *m0, const void *m1)
{
- const struct nfit_set_info_map2 *map0 = m0;
- const struct nfit_set_info_map2 *map1 = m1;
+ const struct nfit_set_info2 *map0 = m0;
+ const struct nfit_set_info2 *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2361,22 +2345,22 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
return -ENOMEM;
import_guid(&nd_set->type_guid, spa->range_guid);
- info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
+ info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
+ info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
if (!info2)
return -ENOMEM;
for (i = 0; i < nr; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
- struct nfit_set_info_map *map = &info->mapping[i];
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
- spa->range_index, i);
+ struct nfit_set_info *map = &info[i];
+ struct nfit_set_info2 *map2 = &info2[i];
+ struct acpi_nfit_memory_map *memdev =
+ memdev_from_spa(acpi_desc, spa->range_index, i);
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
if (!memdev || !nfit_mem->dcr) {
@@ -2395,23 +2379,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
/* v1.1 namespaces */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map, NULL);
- nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map, NULL);
+ nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* v1.2 namespaces */
- sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
- cmp_map2, NULL);
- nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
+ sort(info2, nr, sizeof(*info2), cmp_map2, NULL);
+ nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0);
/* support v1.1 namespaces created with the wrong sort order */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map_compat, NULL);
- nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map_compat, NULL);
+ nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* record the result of the sort for the mapping position */
for (i = 0; i < nr; i++) {
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
+ struct nfit_set_info2 *map2 = &info2[i];
int j;
for (j = 0; j < nr; j++) {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 0418febc5cf2..327e1b4eb6b0 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -9,6 +9,8 @@
* Author: Matthew Wilcox <willy@linux.intel.com>
*/
+#define pr_fmt(fmt) "ACPI: OSL: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,6 +39,7 @@
#include "acpica/acnamesp.h"
#include "internal.h"
+/* Definitions for ACPI_DEBUG_PRINT() */
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
@@ -327,7 +330,7 @@ void __iomem __ref
acpi_size pg_sz;
if (phys > ULONG_MAX) {
- printk(KERN_ERR PREFIX "Cannot map memory that high\n");
+ pr_err("Cannot map memory that high: 0x%llx\n", phys);
return NULL;
}
@@ -528,13 +531,12 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
*new_val = NULL;
if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
- printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
- acpi_os_name);
+ pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
*new_val = acpi_os_name;
}
if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
- printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
+ pr_info("Overriding _REV return value to 5\n");
*new_val = (char *)5;
}
@@ -575,15 +577,14 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
return AE_ALREADY_ACQUIRED;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
- printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
- gsi);
+ pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
return AE_OK;
}
acpi_irq_handler = handler;
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
- printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ pr_err("SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
@@ -1071,7 +1072,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
if (type == OSL_DEBUGGER_MAIN_THREAD) {
ret = acpi_debugger_create_thread(function, context);
if (ret) {
- pr_err("Call to kthread_create() failed.\n");
+ pr_err("Kernel thread creation failed\n");
status = AE_ERROR;
}
goto out_thread;
@@ -1121,8 +1122,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
*/
ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
- printk(KERN_ERR PREFIX
- "Call to queue_work() failed.\n");
+ pr_err("Unable to queue work\n");
status = AE_ERROR;
}
err_workqueue:
@@ -1165,9 +1165,9 @@ acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
struct acpi_hp_work *hpw;
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling hotplug event (%p, %u) for deferred execution.\n",
- adev, src));
+ acpi_handle_debug(adev->handle,
+ "Scheduling hotplug event %u for deferred handling\n",
+ src);
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
if (!hpw)
@@ -1355,7 +1355,7 @@ acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
case ACPI_SIGNAL_FATAL:
- printk(KERN_ERR PREFIX "Fatal opcode executed\n");
+ pr_err("Fatal opcode executed\n");
break;
case ACPI_SIGNAL_BREAKPOINT:
/*
@@ -1407,7 +1407,7 @@ __setup("acpi_os_name=", acpi_os_name_setup);
static int __init acpi_no_auto_serialize_setup(char *str)
{
acpi_gbl_auto_serialize_methods = FALSE;
- pr_info("ACPI: auto-serialization disabled\n");
+ pr_info("Auto-serialization disabled\n");
return 1;
}
@@ -1458,38 +1458,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
int acpi_check_resource_conflict(const struct resource *res)
{
acpi_adr_space_type space_id;
- acpi_size length;
- u8 warn = 0;
- int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
- if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
- return 0;
if (res->flags & IORESOURCE_IO)
space_id = ACPI_ADR_SPACE_SYSTEM_IO;
- else
+ else if (res->flags & IORESOURCE_MEM)
space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ else
+ return 0;
+
+ if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
+ return 0;
+
+ pr_info("Resource conflict; ACPI support missing from driver?\n");
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
+ return -EBUSY;
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
+ pr_notice("Resource conflict: System may be unstable or behave erratically\n");
- length = resource_size(res);
- if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
- warn = 1;
- clash = acpi_check_address_range(space_id, res->start, length, warn);
-
- if (clash) {
- if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
- if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
- printk(KERN_NOTICE "ACPI: This conflict may"
- " cause random problems and system"
- " instability\n");
- printk(KERN_INFO "ACPI: If an ACPI driver is available"
- " for this device, you should use it instead of"
- " the native driver\n");
- }
- if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
- return -EBUSY;
- }
return 0;
}
EXPORT_SYMBOL(acpi_check_resource_conflict);
@@ -1722,7 +1712,7 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
static int __init acpi_no_static_ssdt_setup(char *s)
{
acpi_gbl_disable_ssdt_table_install = TRUE;
- pr_info("ACPI: static SSDT installation disabled\n");
+ pr_info("Static SSDT installation disabled\n");
return 0;
}
@@ -1731,8 +1721,7 @@ early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
static int __init acpi_disable_return_repair(char *s)
{
- printk(KERN_NOTICE PREFIX
- "ACPI: Predefined validation mechanism disabled\n");
+ pr_notice("Predefined validation mechanism disabled\n");
acpi_gbl_disable_auto_repair = TRUE;
return 1;
@@ -1758,7 +1747,7 @@ acpi_status __init acpi_os_initialize(void)
void *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
- pr_debug(PREFIX "%s: map reset_reg %s\n", __func__,
+ pr_debug("%s: Reset register mapping %s\n", __func__,
rv ? "successful" : "failed");
}
acpi_os_initialized = true;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0bf072cef6cf..dcd593766a64 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -56,8 +56,6 @@ static struct acpi_scan_handler pci_root_handler = {
},
};
-static DEFINE_MUTEX(osc_lock);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle: the ACPI CA node in question.
@@ -223,12 +221,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
{
- acpi_status status;
-
- mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags, NULL);
- mutex_unlock(&osc_lock);
- return status;
+ return acpi_pci_query_osc(root, flags, NULL);
}
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
@@ -353,10 +346,10 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
* _OSC bits the BIOS has granted control of, but its contents are meaningless
* on failure.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
+static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
struct acpi_pci_root *root;
- acpi_status status = AE_OK;
+ acpi_status status;
u32 ctrl, capbuf[3];
if (!mask)
@@ -370,18 +363,16 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if (!root)
return AE_NOT_EXIST;
- mutex_lock(&osc_lock);
-
*mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
if ((root->osc_control_set & ctrl) == ctrl)
- goto out;
+ return AE_OK;
/* Need to check the available controls bits before requesting them. */
while (*mask) {
status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
- goto out;
+ return status;
if (ctrl == *mask)
break;
decode_osc_control(root, "platform does not support",
@@ -392,21 +383,19 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if ((ctrl & req) != req) {
decode_osc_control(root, "not requesting control; platform does not support",
req & ~(ctrl));
- status = AE_SUPPORT;
- goto out;
+ return AE_SUPPORT;
}
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
capbuf[OSC_CONTROL_DWORD] = ctrl;
status = acpi_pci_run_osc(handle, capbuf, mask);
- if (ACPI_SUCCESS(status))
- root->osc_control_set = *mask;
-out:
- mutex_unlock(&osc_lock);
- return status;
+ if (ACPI_FAILURE(status))
+ return status;
+
+ root->osc_control_set = *mask;
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_pci_osc_control_set);
static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
bool is_pcie)
@@ -452,9 +441,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if ((status == AE_NOT_FOUND) && !is_pcie)
return;
- dev_info(&device->dev, "_OSC failed (%s)%s\n",
- acpi_format_exception(status),
- pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
+ dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
+ acpi_format_exception(status));
return;
}
@@ -510,7 +498,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
} else {
decode_osc_control(root, "OS requested", requested);
decode_osc_control(root, "platform willing to grant", control);
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
acpi_format_exception(status));
/*
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
new file mode 100644
index 000000000000..dd2fbf38e414
--- /dev/null
+++ b/drivers/acpi/platform_profile.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Platform profile sysfs interface */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/platform_profile.h>
+#include <linux/sysfs.h>
+
+static struct platform_profile_handler *cur_profile;
+static DEFINE_MUTEX(profile_lock);
+
+static const char * const profile_names[] = {
+ [PLATFORM_PROFILE_LOW_POWER] = "low-power",
+ [PLATFORM_PROFILE_COOL] = "cool",
+ [PLATFORM_PROFILE_QUIET] = "quiet",
+ [PLATFORM_PROFILE_BALANCED] = "balanced",
+ [PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
+ [PLATFORM_PROFILE_PERFORMANCE] = "performance",
+};
+static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
+
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ int err, i;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ if (len == 0)
+ len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
+ else
+ len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+ mutex_unlock(&profile_lock);
+ return len;
+}
+
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ int err;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ err = cur_profile->profile_get(cur_profile, &profile);
+ mutex_unlock(&profile_lock);
+ if (err)
+ return err;
+
+ /* Check that profile is valid index */
+ if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
+ return -EIO;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err, i;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(profile_names, buf);
+ if (i < 0) {
+ mutex_unlock(&profile_lock);
+ return -EINVAL;
+ }
+
+ /* Check that platform supports this profile choice */
+ if (!test_bit(i, cur_profile->choices)) {
+ mutex_unlock(&profile_lock);
+ return -EOPNOTSUPP;
+ }
+
+ err = cur_profile->profile_set(cur_profile, i);
+ mutex_unlock(&profile_lock);
+ if (err)
+ return err;
+ return count;
+}
+
+static DEVICE_ATTR_RO(platform_profile_choices);
+static DEVICE_ATTR_RW(platform_profile);
+
+static struct attribute *platform_profile_attrs[] = {
+ &dev_attr_platform_profile_choices.attr,
+ &dev_attr_platform_profile.attr,
+ NULL
+};
+
+static const struct attribute_group platform_profile_group = {
+ .attrs = platform_profile_attrs
+};
+
+void platform_profile_notify(void)
+{
+ if (!cur_profile)
+ return;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+}
+EXPORT_SYMBOL_GPL(platform_profile_notify);
+
+int platform_profile_register(struct platform_profile_handler *pprof)
+{
+ int err;
+
+ mutex_lock(&profile_lock);
+ /* We can only have one active profile */
+ if (cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -EEXIST;
+ }
+
+ /* Sanity check the profile handler field are set */
+ if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
+ !pprof->profile_set || !pprof->profile_get) {
+ mutex_unlock(&profile_lock);
+ return -EINVAL;
+ }
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err) {
+ mutex_unlock(&profile_lock);
+ return err;
+ }
+
+ cur_profile = pprof;
+ mutex_unlock(&profile_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_profile_register);
+
+int platform_profile_remove(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+
+ mutex_lock(&profile_lock);
+ cur_profile = NULL;
+ mutex_unlock(&profile_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_profile_remove);
+
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 189a0d4c6d06..9b608b55d2b2 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -21,6 +21,8 @@
* may be shared by multiple devices.
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,8 +34,6 @@
#include "sleep.h"
#include "internal.h"
-#define _COMPONENT ACPI_POWER_COMPONENT
-ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
#define ACPI_POWER_DEVICE_NAME "Power Resource"
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
@@ -181,9 +181,6 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
{
acpi_status status = AE_OK;
unsigned long long sta = 0;
- char node_name[5];
- struct acpi_buffer buffer = { sizeof(node_name), node_name };
-
if (!handle || !state)
return -EINVAL;
@@ -195,11 +192,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
*state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
ACPI_POWER_RESOURCE_STATE_OFF;
- acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
- node_name,
- *state ? "on" : "off"));
+ acpi_handle_debug(handle, "Power resource is %s\n",
+ *state ? "on" : "off");
return 0;
}
@@ -229,8 +223,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
break;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
- cur_state ? "on" : "off"));
+ pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
*state = cur_state;
return 0;
@@ -357,8 +350,7 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
- resource->name));
+ pr_debug("Power resource [%s] turned on\n", resource->name);
/*
* If there are other dependents on this power resource we need to
@@ -383,9 +375,7 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
int result = 0;
if (resource->ref_count++) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] already on\n",
- resource->name));
+ pr_debug("Power resource [%s] already on\n", resource->name);
} else {
result = __acpi_power_on(resource);
if (result)
@@ -413,8 +403,8 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n",
- resource->name));
+ pr_debug("Power resource [%s] turned off\n", resource->name);
+
return 0;
}
@@ -423,16 +413,12 @@ static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
int result = 0;
if (!resource->ref_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] already off\n",
- resource->name));
+ pr_debug("Power resource [%s] already off\n", resource->name);
return 0;
}
if (--resource->ref_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] still in use\n",
- resource->name));
+ pr_debug("Power resource [%s] still in use\n", resource->name);
} else {
result = __acpi_power_off(resource);
if (result)
@@ -672,7 +658,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
if (ACPI_SUCCESS(status)) {
return 0;
} else if (status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX "_DSW execution failed\n");
+ acpi_handle_info(dev->handle, "_DSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
@@ -680,7 +666,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/* Execute _PSW */
status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- printk(KERN_ERR PREFIX "_PSW execution failed\n");
+ acpi_handle_info(dev->handle, "_PSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
@@ -886,15 +872,16 @@ static void acpi_release_power_resource(struct device *dev)
kfree(resource);
}
-static ssize_t acpi_power_in_use_show(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
+static ssize_t resource_in_use_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
struct acpi_power_resource *resource;
resource = to_power_resource(to_acpi_device(dev));
return sprintf(buf, "%u\n", !!resource->ref_count);
}
-static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
+static DEVICE_ATTR_RO(resource_in_use);
static void acpi_power_sysfs_remove(struct acpi_device *device)
{
@@ -960,8 +947,8 @@ int acpi_add_power_resource(acpi_handle handle)
if (result)
goto err;
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
- acpi_device_bid(device), state ? "on" : "off");
+ pr_info("%s [%s] (%s)\n", acpi_device_name(device),
+ acpi_device_bid(device), state ? "on" : "off");
device->flags.match_driver = true;
result = acpi_device_add(device, acpi_release_power_resource);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 24e87b630573..e312ebaed8db 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -564,7 +564,7 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode,
/**
* acpi_data_get_property_array - return an ACPI array property with given name
- * @adev: ACPI data object to get the property from
+ * @data: ACPI data object to get the property from
* @name: Name of the property
* @type: Expected type of array elements
* @obj: Location to store a pointer to the property value (if not NULL)
@@ -787,9 +787,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const union acpi_object *obj;
int ret;
- if (!val)
- return -EINVAL;
-
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
if (ret)
@@ -799,28 +796,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
- *(u8 *)val = obj->integer.value;
+
+ if (val)
+ *(u8 *)val = obj->integer.value;
+
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
- *(u16 *)val = obj->integer.value;
+
+ if (val)
+ *(u16 *)val = obj->integer.value;
+
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
- *(u32 *)val = obj->integer.value;
+
+ if (val)
+ *(u32 *)val = obj->integer.value;
+
break;
default:
- *(u64 *)val = obj->integer.value;
+ if (val)
+ *(u64 *)val = obj->integer.value;
+
break;
}
+
+ if (!val)
+ return 1;
} else if (proptype == DEV_PROP_STRING) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
- *(char **)val = obj->string.pointer;
+ if (val)
+ *(char **)val = obj->string.pointer;
return 1;
} else {
@@ -829,20 +841,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val)
-{
- int ret;
-
- if (!adev)
- return -EINVAL;
-
- ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
- if (ret < 0 || proptype != ACPI_TYPE_STRING)
- return ret;
- return 0;
-}
-
static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
size_t nval)
{
@@ -928,10 +926,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
const union acpi_object *items;
int ret;
- if (val && nval == 1) {
+ if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
- if (ret >= 0)
+ /*
+ * The overflow error means that the property is there and it is
+ * single-value, but its type does not match, so return.
+ */
+ if (ret >= 0 || ret == -EOVERFLOW)
return ret;
+
+ /*
+ * Reading this property as a single-value one failed, but its
+ * value may still be represented as one-element array, so
+ * continue.
+ */
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
@@ -973,12 +981,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval)
-{
- return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL;
-}
-
/**
* acpi_node_prop_read - retrieve the value of an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
@@ -991,9 +993,9 @@ int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval)
+static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
@@ -1210,8 +1212,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
* acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint
- * @fwnode: Endpoint firmware node pointing to a remote device
- * @endpoint: Firmware node of remote endpoint is filled here if not %NULL
+ * @__fwnode: Endpoint firmware node pointing to a remote device
*
* Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 80b668c80073..a184529d8fa4 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -19,8 +19,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("scan");
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
@@ -265,8 +263,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return error;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Hot-removing device %s...\n", dev_name(&device->dev)));
+ acpi_handle_debug(handle, "Ejecting\n");
acpi_bus_trim(device);
@@ -486,6 +483,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@@ -577,27 +575,31 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context)
mutex_unlock(&acpi_device_del_lock);
}
-static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
- void (*callback)(void *))
+static struct acpi_device *handle_to_device(acpi_handle handle,
+ void (*callback)(void *))
{
+ struct acpi_device *adev = NULL;
acpi_status status;
- if (!device)
- return -EINVAL;
-
status = acpi_get_data_full(handle, acpi_scan_drop_device,
- (void **)device, callback);
- if (ACPI_FAILURE(status) || !*device) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
- handle));
- return -ENODEV;
+ (void **)&adev, callback);
+ if (ACPI_FAILURE(status) || !adev) {
+ acpi_handle_debug(handle, "No context!\n");
+ return NULL;
}
- return 0;
+ return adev;
}
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
{
- return acpi_get_device_data(handle, device, NULL);
+ if (!device)
+ return -EINVAL;
+
+ *device = handle_to_device(handle, NULL);
+ if (!*device)
+ return -ENODEV;
+
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_get_device);
@@ -609,10 +611,7 @@ static void get_acpi_device(void *dev)
struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
{
- struct acpi_device *adev = NULL;
-
- acpi_get_device_data(handle, &adev, get_acpi_device);
- return adev;
+ return handle_to_device(handle, get_acpi_device);
}
void acpi_bus_put_acpi_device(struct acpi_device *adev)
@@ -620,12 +619,23 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
put_device(&adev->dev);
}
+static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
+{
+ struct acpi_device_bus_id *acpi_device_bus_id;
+
+ /* Find suitable bus_id and instance number in acpi_bus_id_list. */
+ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
+ if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
+ return acpi_device_bus_id;
+ }
+ return NULL;
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
+ struct acpi_device_bus_id *acpi_device_bus_id;
int result;
- struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
- int found = 0;
if (device->handle) {
acpi_status status;
@@ -651,31 +661,26 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
- new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
- if (!new_bus_id) {
- pr_err(PREFIX "Memory allocation error\n");
- result = -ENOMEM;
- goto err_detach;
- }
-
mutex_lock(&acpi_device_lock);
- /*
- * Find suitable bus_id and instance number in acpi_bus_id_list
- * If failed, create one and link it into acpi_bus_id_list
- */
- list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
- if (!strcmp(acpi_device_bus_id->bus_id,
- acpi_device_hid(device))) {
- acpi_device_bus_id->instance_no++;
- found = 1;
- kfree(new_bus_id);
- break;
+
+ acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
+ if (acpi_device_bus_id) {
+ acpi_device_bus_id->instance_no++;
+ } else {
+ acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
+ GFP_KERNEL);
+ if (!acpi_device_bus_id) {
+ result = -ENOMEM;
+ goto err_unlock;
}
- }
- if (!found) {
- acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
- acpi_device_bus_id->instance_no = 0;
+ acpi_device_bus_id->bus_id =
+ kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ if (!acpi_device_bus_id->bus_id) {
+ kfree(acpi_device_bus_id);
+ result = -ENOMEM;
+ goto err_unlock;
+ }
+
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
@@ -685,10 +690,12 @@ int acpi_device_add(struct acpi_device *device,
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
+
mutex_unlock(&acpi_device_lock);
if (device->parent)
device->dev.parent = &device->parent->dev;
+
device->dev.bus = &acpi_bus_type;
device->dev.release = release;
result = device_add(&device->dev);
@@ -704,15 +711,19 @@ int acpi_device_add(struct acpi_device *device,
return 0;
- err:
+err:
mutex_lock(&acpi_device_lock);
+
if (device->parent)
list_del(&device->node);
+
list_del(&device->wakeup_list);
+
+err_unlock:
mutex_unlock(&acpi_device_lock);
- err_detach:
acpi_detach_data(device->handle, acpi_scan_drop_device);
+
return result;
}
@@ -815,7 +826,8 @@ static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
+ acpi_handle_info(handle, "_PRW evaluation failed: %s\n",
+ acpi_format_exception(status));
return err;
}
@@ -920,7 +932,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
- dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
+ dev_err(&device->dev, "Unable to extract wakeup power resources");
return;
}
@@ -1156,8 +1168,7 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
if (acpi_has_method(handle, "_BCM") &&
acpi_has_method(handle, "_BCL")) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
- "support\n"));
+ acpi_handle_debug(handle, "Found generic backlight support\n");
*cap |= ACPI_VIDEO_BACKLIGHT;
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
@@ -1648,17 +1659,15 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type,
unsigned long long sta)
{
- int result;
- struct acpi_device *device;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device_info *info = NULL;
+ struct acpi_device *device;
+ int result;
if (handle != ACPI_ROOT_OBJECT && type == ACPI_BUS_TYPE_DEVICE)
acpi_get_object_info(handle, &info);
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device) {
- printk(KERN_ERR PREFIX "Memory allocation error\n");
kfree(info);
return -ENOMEM;
}
@@ -1685,11 +1694,11 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_power_add_remove_device(device, true);
acpi_device_add_finalize(device);
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
- dev_name(&device->dev), (char *) buffer.pointer,
- device->parent ? dev_name(&device->parent->dev) : "(null)"));
- kfree(buffer.pointer);
+
+ acpi_handle_debug(handle, "Added as %s, parent %s\n",
+ dev_name(&device->dev), device->parent ?
+ dev_name(&device->parent->dev) : "(null)");
+
*child = device;
return 0;
}
@@ -2108,12 +2117,12 @@ void acpi_walk_dep_device_list(acpi_handle handle)
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
acpi_bus_get_device(dep->consumer, &adev);
- if (!adev)
- continue;
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev, true);
+ if (adev) {
+ adev->dep_unmet--;
+ if (!adev->dep_unmet)
+ acpi_bus_attach(adev, true);
+ }
list_del(&dep->node);
kfree(dep);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index a5cc4f3bb1e3..8baf7644a0d0 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -52,19 +52,12 @@ static const struct acpi_dlayer acpi_debug_layers[] = {
ACPI_DEBUG_INIT(ACPI_COMPILER),
ACPI_DEBUG_INIT(ACPI_TOOLS),
- ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 12c0ece746f0..95105db642b9 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -13,6 +13,8 @@
* concepts of 'multiple limiters', upper/lower limits, etc.
*/
+#define pr_fmt(fmt) "ACPI: thermal: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmi.h>
@@ -29,8 +31,6 @@
#include <linux/uaccess.h>
#include <linux/units.h>
-#define PREFIX "ACPI: "
-
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
@@ -43,9 +43,6 @@
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
-#define _COMPONENT ACPI_THERMAL_COMPONENT
-ACPI_MODULE_NAME("thermal");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
@@ -174,6 +171,8 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
@@ -195,8 +194,9 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
return -ENODEV;
tz->temperature = tmp;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n",
- tz->temperature));
+
+ acpi_handle_debug(tz->device->handle, "Temperature is %lu dK\n",
+ tz->temperature);
return 0;
}
@@ -214,8 +214,8 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
return -ENODEV;
tz->polling_frequency = tmp;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n",
- tz->polling_frequency));
+ acpi_handle_debug(tz->device->handle, "Polling frequency is %lu dS\n",
+ tz->polling_frequency);
return 0;
}
@@ -252,12 +252,12 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
* 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
* We need to re-bind the cooling devices of a thermal zone when this occurs.
*/
-#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \
do { \
if (flags != ACPI_TRIPS_INIT) \
- ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
+ acpi_handle_info(tz->device->handle, \
"ACPI thermal trip point %s changed\n" \
- "Please send acpidump to linux-acpi@vger.kernel.org", str)); \
+ "Please report to linux-acpi@vger.kernel.org\n", str); \
} while (0)
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
@@ -281,17 +281,17 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
*/
if (ACPI_FAILURE(status)) {
tz->trips.critical.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "No critical threshold\n"));
+ acpi_handle_debug(tz->device->handle,
+ "No critical threshold\n");
} else if (tmp <= 2732) {
- pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+ pr_info(FW_BUG "Invalid critical threshold (%llu)\n",
tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ acpi_handle_debug(tz->device->handle,
"Found critical threshold [%lu]\n",
- tz->trips.critical.temperature));
+ tz->trips.critical.temperature);
}
if (tz->trips.critical.flags.valid == 1) {
if (crt == -1) {
@@ -303,8 +303,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
- pr_warn(PREFIX "Critical threshold %d C\n",
- crt);
+ pr_info("Critical threshold %d C\n", crt);
+
tz->trips.critical.temperature = crt_k;
}
}
@@ -316,14 +316,14 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
"_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "No hot threshold\n"));
+ acpi_handle_debug(tz->device->handle,
+ "No hot threshold\n");
} else {
tz->trips.hot.temperature = tmp;
tz->trips.hot.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found hot threshold [%lu]\n",
- tz->trips.hot.temperature));
+ acpi_handle_debug(tz->device->handle,
+ "Found hot threshold [%lu]\n",
+ tz->trips.hot.temperature);
}
}
@@ -376,7 +376,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "Invalid passive threshold\n");
+ acpi_handle_info(tz->device->handle,
+ "Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
}
else
@@ -386,12 +387,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
sizeof(struct acpi_handle_list));
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
@@ -438,8 +439,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "Invalid active%d threshold\n",
- i);
+ acpi_handle_info(tz->device->handle,
+ "Invalid active%d threshold\n", i);
tz->trips.active[i].flags.valid = 0;
}
else
@@ -449,12 +450,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
sizeof(struct acpi_handle_list));
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
if (valid != tz->trips.active[i].flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
if (!tz->trips.active[i].flags.valid)
break;
@@ -467,7 +468,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (ACPI_SUCCESS(status)
&& memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
@@ -495,14 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@@ -677,27 +670,24 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
return 0;
}
-
-static int thermal_notify(struct thermal_zone_device *thermal, int trip,
- enum thermal_trip_type trip_type)
+static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
{
- u8 type = 0;
struct acpi_thermal *tz = thermal->devdata;
- if (trip_type == THERMAL_TRIP_CRITICAL)
- type = ACPI_THERMAL_NOTIFY_CRITICAL;
- else if (trip_type == THERMAL_TRIP_HOT)
- type = ACPI_THERMAL_NOTIFY_HOT;
- else
- return 0;
-
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
- dev_name(&tz->device->dev), type, 1);
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_HOT, 1);
+}
+
+static void acpi_thermal_zone_device_critical(struct thermal_zone_device *thermal)
+{
+ struct acpi_thermal *tz = thermal->devdata;
- if (trip_type == THERMAL_TRIP_CRITICAL && nocrt)
- return 1;
+ acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_CRITICAL, 1);
- return 0;
+ thermal_zone_device_critical(thermal);
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
@@ -767,25 +757,6 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
}
}
- for (i = 0; i < tz->devices.count; i++) {
- handle = tz->devices.handles[i];
- status = acpi_bus_get_device(handle, &dev);
- if (ACPI_SUCCESS(status) && (dev == device)) {
- if (bind)
- result = thermal_zone_bind_cooling_device
- (thermal, THERMAL_TRIPS_NONE,
- cdev, THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- else
- result = thermal_zone_unbind_cooling_device
- (thermal, THERMAL_TRIPS_NONE,
- cdev);
- if (result)
- goto failed;
- }
- }
-
failed:
return result;
}
@@ -812,7 +783,8 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.get_trip_temp = thermal_get_trip_temp,
.get_crit_temp = thermal_get_crit_temp,
.get_trend = thermal_get_trend,
- .notify = thermal_notify,
+ .hot = acpi_thermal_zone_device_hot,
+ .critical = acpi_thermal_zone_device_critical,
};
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
@@ -900,6 +872,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
@@ -910,23 +888,23 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
}
@@ -1020,7 +998,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
@@ -1052,9 +1048,11 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
- pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+ pr_info("%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
goto end;
@@ -1117,7 +1115,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
@@ -1126,24 +1124,24 @@ static int acpi_thermal_resume(struct device *dev)
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
- pr_notice(PREFIX "%s detected: "
- "disabling all active thermal trip points\n", d->ident);
+ pr_notice("%s detected: disabling all active thermal trip points\n",
+ d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
- pr_notice(PREFIX "%s detected: "
- "disabling all critical thermal trip point actions.\n", d->ident);
+ pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
+ d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
- pr_notice(PREFIX "%s detected: "
- "enabling thermal zone polling\n", d->ident);
+ pr_notice("%s detected: enabling thermal zone polling\n",
+ d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
@@ -1151,8 +1149,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
- pr_notice(PREFIX "%s detected: "
- "disabling all passive thermal trip points\n", d->ident);
+ pr_notice("%s detected: disabling all passive thermal trip points\n",
+ d->ident);
psv = -1;
}
return 0;
@@ -1205,7 +1203,7 @@ static int __init acpi_thermal_init(void)
dmi_check_system(thermal_dmi_table);
if (off) {
- pr_notice(PREFIX "thermal control disabled\n");
+ pr_notice("thermal control disabled\n");
return -ENODEV;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index d5411a166685..682edd913b3b 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -6,6 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: utils: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,24 +20,12 @@
#include "internal.h"
#include "sleep.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("utils");
-
/* --------------------------------------------------------------------------
Object Evaluation Helpers
-------------------------------------------------------------------------- */
-static void
-acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
+static void acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
{
-#ifdef ACPI_DEBUG_OUTPUT
- char prefix[80] = {'\0'};
- struct acpi_buffer buffer = {sizeof(prefix), prefix};
- acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
- (char *) prefix, p, acpi_format_exception(s)));
-#else
- return;
-#endif
+ acpi_handle_debug(h, "Evaluate [%s]: %s\n", p, acpi_format_exception(s));
}
acpi_status
@@ -53,25 +43,24 @@ acpi_extract_package(union acpi_object *package,
if (!package || (package->type != ACPI_TYPE_PACKAGE)
|| (package->package.count < 1)) {
- printk(KERN_WARNING PREFIX "Invalid package argument\n");
+ pr_debug("Invalid package argument\n");
return AE_BAD_PARAMETER;
}
if (!format || !format->pointer || (format->length < 1)) {
- printk(KERN_WARNING PREFIX "Invalid format argument\n");
+ pr_debug("Invalid format argument\n");
return AE_BAD_PARAMETER;
}
if (!buffer) {
- printk(KERN_WARNING PREFIX "Invalid buffer argument\n");
+ pr_debug("Invalid buffer argument\n");
return AE_BAD_PARAMETER;
}
format_count = (format->length / sizeof(char)) - 1;
if (format_count > package->package.count) {
- printk(KERN_WARNING PREFIX "Format specifies more objects [%d]"
- " than exist in package [%d].\n",
- format_count, package->package.count);
+ pr_debug("Format specifies more objects [%d] than present [%d]\n",
+ format_count, package->package.count);
return AE_BAD_DATA;
}
@@ -99,10 +88,8 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(char *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d]: got number, expecting"
- " [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d]: got number, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
@@ -123,10 +110,8 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(u8 *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d] got string/buffer,"
- " expecting [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d] got string/buffer, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
@@ -137,19 +122,15 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(void *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d] got reference,"
- " expecting [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d] got reference, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
case ACPI_TYPE_PACKAGE:
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found unsupported element at index=%d\n",
- i));
+ pr_debug("Unsupported element at index=%d\n", i);
/* TBD: handle nested packages... */
return AE_SUPPORT;
}
@@ -289,7 +270,7 @@ acpi_evaluate_integer(acpi_handle handle,
*data = element.integer.value;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
+ acpi_handle_debug(handle, "Return value [%llu]\n", *data);
return AE_OK;
}
@@ -363,8 +344,7 @@ acpi_evaluate_reference(acpi_handle handle,
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n",
- list->handles[i]));
+ acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
end:
@@ -843,12 +823,13 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
EXPORT_SYMBOL(acpi_dev_present);
/**
- * acpi_dev_get_first_match_dev - Return the first match of ACPI device
+ * acpi_dev_get_next_match_dev - Return the next match of ACPI device
+ * @adev: Pointer to the previous acpi_device matching this @hid, @uid and @hrv
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
- * Return the first match of ACPI device if a matching device was present
+ * Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
* The caller is responsible to call put_device() on the returned device.
@@ -856,8 +837,9 @@ EXPORT_SYMBOL(acpi_dev_present);
* See additional information in acpi_dev_present() as well.
*/
struct acpi_device *
-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv)
{
+ struct device *start = adev ? &adev->dev : NULL;
struct acpi_dev_match_info match = {};
struct device *dev;
@@ -865,9 +847,29 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
return dev ? to_acpi_device(dev) : NULL;
}
+EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
+
+/**
+ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return the first match of ACPI device if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * The caller is responsible to call put_device() on the returned device.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+{
+ return acpi_dev_get_next_match_dev(NULL, hid, uid, hrv);
+}
EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/*
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 25fea34b544c..2b69536cdccb 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info_amd info = { };
- if (package->type == ACPI_TYPE_INTEGER) {
- switch (i) {
- case 0:
- info.revision = package->integer.value;
- break;
- case 1:
- info.count = package->integer.value;
- break;
- }
- } else if (package->type == ACPI_TYPE_PACKAGE) {
+ if (package->type == ACPI_TYPE_PACKAGE) {
lpi_constraints_table = kcalloc(package->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
for (k = 0; k < info_obj->package.count; ++k) {
union acpi_object *obj = &info_obj->package.elements[k];
- union acpi_object *obj_new;
list = &lpi_constraints_table[lpi_constraints_table_size];
list->min_dstate = -1;
- obj_new = &obj[k];
switch (k) {
case 0:
dev_info.enabled = obj->integer.value;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index ecc304149067..939ca220bf78 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -56,31 +56,28 @@ amba_lookup(const struct amba_id *table, struct amba_device *dev)
return NULL;
}
-static int amba_match(struct device *dev, struct device_driver *drv)
+static int amba_get_enable_pclk(struct amba_device *pcdev)
{
- struct amba_device *pcdev = to_amba_device(dev);
- struct amba_driver *pcdrv = to_amba_driver(drv);
+ int ret;
- /* When driver_override is set, only bind to the matching driver */
- if (pcdev->driver_override)
- return !strcmp(pcdev->driver_override, drv->name);
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
- return amba_lookup(pcdrv->id_table, pcdev) != NULL;
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
+
+ return ret;
}
-static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
+static void amba_put_disable_pclk(struct amba_device *pcdev)
{
- struct amba_device *pcdev = to_amba_device(dev);
- int retval = 0;
-
- retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
- if (retval)
- return retval;
-
- retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
- return retval;
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
}
+
static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
@@ -152,102 +149,29 @@ static struct attribute *amba_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(amba_dev);
-#ifdef CONFIG_PM
-/*
- * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
- * enable/disable the bus clock at runtime PM suspend/resume as this
- * does not result in loss of context.
- */
-static int amba_pm_runtime_suspend(struct device *dev)
+static int amba_match(struct device *dev, struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
- int ret = pm_generic_runtime_suspend(dev);
+ struct amba_driver *pcdrv = to_amba_driver(drv);
- if (ret == 0 && dev->driver) {
- if (pm_runtime_is_irq_safe(dev))
- clk_disable(pcdev->pclk);
- else
- clk_disable_unprepare(pcdev->pclk);
- }
+ /* When driver_override is set, only bind to the matching driver */
+ if (pcdev->driver_override)
+ return !strcmp(pcdev->driver_override, drv->name);
- return ret;
+ return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-static int amba_pm_runtime_resume(struct device *dev)
+static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct amba_device *pcdev = to_amba_device(dev);
- int ret;
-
- if (dev->driver) {
- if (pm_runtime_is_irq_safe(dev))
- ret = clk_enable(pcdev->pclk);
- else
- ret = clk_prepare_enable(pcdev->pclk);
- /* Failure is probably fatal to the system, but... */
- if (ret)
- return ret;
- }
-
- return pm_generic_runtime_resume(dev);
-}
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops amba_pm = {
- .suspend = pm_generic_suspend,
- .resume = pm_generic_resume,
- .freeze = pm_generic_freeze,
- .thaw = pm_generic_thaw,
- .poweroff = pm_generic_poweroff,
- .restore = pm_generic_restore,
- SET_RUNTIME_PM_OPS(
- amba_pm_runtime_suspend,
- amba_pm_runtime_resume,
- NULL
- )
-};
-
-/*
- * Primecells are part of the Advanced Microcontroller Bus Architecture,
- * so we call the bus "amba".
- * DMA configuration for platform and AMBA bus is same. So here we reuse
- * platform's DMA config routine.
- */
-struct bus_type amba_bustype = {
- .name = "amba",
- .dev_groups = amba_dev_groups,
- .match = amba_match,
- .uevent = amba_uevent,
- .dma_configure = platform_dma_configure,
- .pm = &amba_pm,
-};
-EXPORT_SYMBOL_GPL(amba_bustype);
-
-static int __init amba_init(void)
-{
- return bus_register(&amba_bustype);
-}
-
-postcore_initcall(amba_init);
-
-static int amba_get_enable_pclk(struct amba_device *pcdev)
-{
- int ret;
-
- pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
- if (IS_ERR(pcdev->pclk))
- return PTR_ERR(pcdev->pclk);
-
- ret = clk_prepare_enable(pcdev->pclk);
- if (ret)
- clk_put(pcdev->pclk);
+ int retval = 0;
- return ret;
-}
+ retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+ if (retval)
+ return retval;
-static void amba_put_disable_pclk(struct amba_device *pcdev)
-{
- clk_disable_unprepare(pcdev->pclk);
- clk_put(pcdev->pclk);
+ retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
+ return retval;
}
/*
@@ -299,10 +223,10 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret;
pm_runtime_get_sync(dev);
- ret = drv->remove(pcdev);
+ if (drv->remove)
+ drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
@@ -313,15 +237,101 @@ static int amba_remove(struct device *dev)
amba_put_disable_pclk(pcdev);
dev_pm_domain_detach(dev, true);
- return ret;
+ return 0;
}
static void amba_shutdown(struct device *dev)
{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- drv->shutdown(to_amba_device(dev));
+ struct amba_driver *drv;
+
+ if (!dev->driver)
+ return;
+
+ drv = to_amba_driver(dev->driver);
+ if (drv->shutdown)
+ drv->shutdown(to_amba_device(dev));
+}
+
+#ifdef CONFIG_PM
+/*
+ * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
+ * enable/disable the bus clock at runtime PM suspend/resume as this
+ * does not result in loss of context.
+ */
+static int amba_pm_runtime_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret = pm_generic_runtime_suspend(dev);
+
+ if (ret == 0 && dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ clk_disable(pcdev->pclk);
+ else
+ clk_disable_unprepare(pcdev->pclk);
+ }
+
+ return ret;
+}
+
+static int amba_pm_runtime_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ if (dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ ret = clk_enable(pcdev->pclk);
+ else
+ ret = clk_prepare_enable(pcdev->pclk);
+ /* Failure is probably fatal to the system, but... */
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops amba_pm = {
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+ SET_RUNTIME_PM_OPS(
+ amba_pm_runtime_suspend,
+ amba_pm_runtime_resume,
+ NULL
+ )
+};
+
+/*
+ * Primecells are part of the Advanced Microcontroller Bus Architecture,
+ * so we call the bus "amba".
+ * DMA configuration for platform and AMBA bus is same. So here we reuse
+ * platform's DMA config routine.
+ */
+struct bus_type amba_bustype = {
+ .name = "amba",
+ .dev_groups = amba_dev_groups,
+ .match = amba_match,
+ .uevent = amba_uevent,
+ .probe = amba_probe,
+ .remove = amba_remove,
+ .shutdown = amba_shutdown,
+ .dma_configure = platform_dma_configure,
+ .pm = &amba_pm,
+};
+EXPORT_SYMBOL_GPL(amba_bustype);
+
+static int __init amba_init(void)
+{
+ return bus_register(&amba_bustype);
}
+postcore_initcall(amba_init);
+
/**
* amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
@@ -332,12 +342,10 @@ static void amba_shutdown(struct device *dev)
*/
int amba_driver_register(struct amba_driver *drv)
{
- drv->drv.bus = &amba_bustype;
+ if (!drv->probe)
+ return -EINVAL;
-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
- SETFN(probe);
- SETFN(remove);
- SETFN(shutdown);
+ drv->drv.bus = &amba_bustype;
return driver_register(&drv->drv);
}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7b4f154f07e6..e80ba93c62a9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -355,7 +355,8 @@ static inline bool is_binderfs_control_device(const struct dentry *dentry)
return info->control_dentry == dentry;
}
-static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int binderfs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -363,7 +364,8 @@ static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
is_binderfs_control_device(new_dentry))
return -EPERM;
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ return simple_rename(&init_user_ns, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
}
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 49f7acbfcf01..5b32df5d33ad 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -377,6 +377,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
if (ret)
return ret;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
@@ -406,6 +410,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+ ahci_platform_disable_regulators(hpriv);
+out_disable_clks:
ahci_platform_disable_clks(hpriv);
return ret;
}
@@ -490,6 +496,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (ret)
goto out_reset;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
/* Must be first so as to configure endianness including that
* of the standard AHCI register space.
*/
@@ -499,7 +509,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
if (!priv->port_mask) {
ret = -ENODEV;
- goto out_disable_clks;
+ goto out_disable_regulators;
}
/* Must be done before ahci_platform_enable_phys() */
@@ -524,6 +534,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+out_disable_regulators:
+ ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 08543aeb0093..498383cb6e29 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -202,14 +202,19 @@ static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev
* Choose the IOMD cycle timing which ensure that the interface
* satisfies the measured active, recovery and cycle times.
*/
- if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
- iomd_type = 'D', cycle = 187;
- else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
- iomd_type = 'C', cycle = 250;
- else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
- iomd_type = 'B', cycle = 437;
- else
- iomd_type = 'A', cycle = 562;
+ if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) {
+ iomd_type = 'D';
+ cycle = 187;
+ } else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) {
+ iomd_type = 'C';
+ cycle = 250;
+ } else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) {
+ iomd_type = 'B';
+ cycle = 437;
+ } else {
+ iomd_type = 'A';
+ cycle = 562;
+ }
ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
t.active, t.recover, t.cycle, iomd_type);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..0c13cac903de 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
@@ -3743,16 +3743,7 @@ static int __init idt77252_init(void)
struct sk_buff *skb;
printk("%s: at %p\n", __func__, idt77252_init);
-
- if (sizeof(skb->cb) < sizeof(struct atm_skb_data) +
- sizeof(struct idt77252_skb_prv)) {
- printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n",
- __func__, (unsigned long) sizeof(skb->cb),
- (unsigned long) sizeof(struct atm_skb_data) +
- sizeof(struct idt77252_skb_prv));
- return -EIO;
- }
-
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct idt77252_skb_prv) + sizeof(struct atm_skb_data));
return pci_register_driver(&idt77252_driver);
}
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 9339197d701c..b059d31364dd 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -789,7 +789,7 @@ struct idt77252_skb_prv {
struct scqe tbd; /* Transmit Buffer Descriptor */
dma_addr_t paddr; /* DMA handle */
u32 pool; /* sb_pool handle */
-};
+} __packed;
#define IDT77252_PRV_TBD(skb) \
(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->tbd)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index a2b59b84bb88..1509cb74705a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -507,6 +507,3 @@ config PANEL
depends on PARPORT
select AUXDISPLAY
select PARPORT_PANEL
-
-config CHARLCD
- tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 7eebae7e322c..fd430e6866a1 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -5,7 +5,7 @@
* Description: cfag12864b LCD driver
* Depends: ks0108
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -376,5 +376,5 @@ module_init(cfag12864b_init);
module_exit(cfag12864b_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("cfag12864b LCD driver");
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 2002291ab338..d66821adf453 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -5,7 +5,7 @@
* Description: cfag12864b LCD framebuffer driver
* Depends: cfag12864b
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -171,5 +171,5 @@ module_init(cfag12864bfb_init);
module_exit(cfag12864bfb_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("cfag12864b LCD framebuffer driver");
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index d951d54b26f5..1e69cc6d21a0 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- schedule_delayed_work(&fbdev->work,
- msecs_to_jiffies(HZ / fbdev->refresh_rate));
+ schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
}
/*
@@ -402,11 +401,6 @@ static int ht16k33_probe(struct i2c_client *client,
return -EIO;
}
- if (client->irq <= 0) {
- dev_err(&client->dev, "No IRQ specified\n");
- return -EINVAL;
- }
-
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -459,9 +453,12 @@ static int ht16k33_probe(struct i2c_client *client,
if (err)
goto err_fbdev_info;
- err = ht16k33_keypad_probe(client, &priv->keypad);
- if (err)
- goto err_fbdev_unregister;
+ /* Keypad */
+ if (client->irq > 0) {
+ err = ht16k33_keypad_probe(client, &priv->keypad);
+ if (err)
+ goto err_fbdev_unregister;
+ }
/* Backlight */
memset(&bl_props, 0, sizeof(struct backlight_properties));
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index abfe3fa9e6f4..03c95ad4216c 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -5,7 +5,7 @@
* Description: ks0108 LCD Controller driver
* Depends: parport
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -182,6 +182,6 @@ module_init(ks0108_init);
module_exit(ks0108_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("ks0108 LCD Controller driver");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 040be48ce046..ffcbe2bc460e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -161,7 +161,7 @@ config HMEM_REPORTING
default n
depends on NUMA
help
- Enable reporting for heterogenous memory access attributes under
+ Enable reporting for heterogeneous memory access attributes under
their non-uniform memory nodes.
source "drivers/base/test/Kconfig"
@@ -213,4 +213,10 @@ config GENERIC_ARCH_TOPOLOGY
appropriate scaling, sysfs interface for reading capacity values at
runtime.
+config GENERIC_ARCH_NUMA
+ bool
+ help
+ Enable support for generic NUMA implementation. Currently, RISC-V
+ and ARM64 use it.
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5e7bf9669a81..8b93a7f291ec 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
+obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
obj-y += test/
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
new file mode 100644
index 000000000000..4cc4e117727d
--- /dev/null
+++ b/drivers/base/arch_numa.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NUMA support, based on the x86 implementation.
+ *
+ * Copyright (C) 2015 Cavium Inc.
+ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
+ */
+
+#define pr_fmt(fmt) "NUMA: " fmt
+
+#include <linux/acpi.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <asm/sections.h>
+
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+nodemask_t numa_nodes_parsed __initdata;
+static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
+bool numa_off;
+
+static __init int numa_parse_early_param(char *opt)
+{
+ if (!opt)
+ return -EINVAL;
+ if (str_has_prefix(opt, "off"))
+ numa_off = true;
+
+ return 0;
+}
+early_param("numa", numa_parse_early_param);
+
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+EXPORT_SYMBOL(node_to_cpumask_map);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+
+/*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+const struct cpumask *cpumask_of_node(int node)
+{
+
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ if (WARN_ON(node < 0 || node >= nr_node_ids))
+ return cpu_none_mask;
+
+ if (WARN_ON(node_to_cpumask_map[node] == NULL))
+ return cpu_online_mask;
+
+ return node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(cpumask_of_node);
+
+#endif
+
+static void numa_update_cpu(unsigned int cpu, bool remove)
+{
+ int nid = cpu_to_node(cpu);
+
+ if (nid == NUMA_NO_NODE)
+ return;
+
+ if (remove)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+ else
+ cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, false);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, true);
+}
+
+void numa_clear_node(unsigned int cpu)
+{
+ numa_remove_cpu(cpu);
+ set_cpu_numa_node(cpu, NUMA_NO_NODE);
+}
+
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: cpumask_of_node() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+ int node;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
+
+ /* allocate and clear the mapping */
+ for (node = 0; node < nr_node_ids; node++) {
+ alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+ cpumask_clear(node_to_cpumask_map[node]);
+ }
+
+ /* cpumask_of_node() will now work */
+ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
+}
+
+/*
+ * Set the cpu to node and mem mapping
+ */
+void numa_store_cpu_info(unsigned int cpu)
+{
+ set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
+}
+
+void __init early_map_cpu_to_node(unsigned int cpu, int nid)
+{
+ /* fallback to node 0 */
+ if (nid < 0 || nid >= MAX_NUMNODES || numa_off)
+ nid = 0;
+
+ cpu_to_node_map[cpu] = nid;
+
+ /*
+ * We should set the numa node of cpu0 as soon as possible, because it
+ * has already been set up online before. cpu_to_node(0) will soon be
+ * called.
+ */
+ if (!cpu)
+ set_cpu_numa_node(cpu, nid);
+}
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init early_cpu_to_node(int cpu)
+{
+ return cpu_to_node_map[cpu];
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
+}
+
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
+ size_t align)
+{
+ int nid = early_cpu_to_node(cpu);
+
+ return memblock_alloc_try_nid(size, align,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static void __init pcpu_fc_free(void *ptr, size_t size)
+{
+ memblock_free_early(__pa(ptr), size);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ pcpu_fc_alloc, pcpu_fc_free);
+ if (rc < 0)
+ panic("Failed to initialize percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/**
+ * numa_add_memblk() - Set node id to memblk
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ int ret;
+
+ ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
+ if (ret < 0) {
+ pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
+ start, (end - 1), nid);
+ return ret;
+ }
+
+ node_set(nid, numa_nodes_parsed);
+ return ret;
+}
+
+/*
+ * Initialize NODE_DATA for a node on the local memory
+ */
+static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
+{
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+
+ if (start_pfn >= end_pfn)
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
+
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
+ nd = __va(nd_pa);
+
+ /* report and initialize */
+ pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
+ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != nid)
+ pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+ memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+ NODE_DATA(nid)->node_id = nid;
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+/*
+ * numa_free_distance
+ *
+ * The current table is freed.
+ */
+void __init numa_free_distance(void)
+{
+ size_t size;
+
+ if (!numa_distance)
+ return;
+
+ size = numa_distance_cnt * numa_distance_cnt *
+ sizeof(numa_distance[0]);
+
+ memblock_free(__pa(numa_distance), size);
+ numa_distance_cnt = 0;
+ numa_distance = NULL;
+}
+
+/*
+ * Create a new NUMA distance table.
+ */
+static int __init numa_alloc_distance(void)
+{
+ size_t size;
+ u64 phys;
+ int i, j;
+
+ size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
+ phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
+ size, PAGE_SIZE);
+ if (WARN_ON(!phys))
+ return -ENOMEM;
+
+ memblock_reserve(phys, size);
+
+ numa_distance = __va(phys);
+ numa_distance_cnt = nr_node_ids;
+
+ /* fill with the default distances */
+ for (i = 0; i < numa_distance_cnt; i++)
+ for (j = 0; j < numa_distance_cnt; j++)
+ numa_distance[i * numa_distance_cnt + j] = i == j ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+
+ pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
+
+ return 0;
+}
+
+/**
+ * numa_set_distance() - Set inter node NUMA distance from node to node.
+ * @from: the 'from' node to set distance
+ * @to: the 'to' node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance.
+ * If distance table doesn't exist, a warning is printed.
+ *
+ * If @from or @to is higher than the highest known node or lower than zero
+ * or @distance doesn't make sense, the call is ignored.
+ */
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if (!numa_distance) {
+ pr_warn_once("Warning: distance table not allocated yet\n");
+ return;
+ }
+
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
+ from < 0 || to < 0) {
+ pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ if ((u8)distance != distance ||
+ (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ numa_distance[from * numa_distance_cnt + to] = distance;
+}
+
+/*
+ * Return NUMA distance @from to @to
+ */
+int __node_distance(int from, int to)
+{
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+ return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init numa_register_nodes(void)
+{
+ int nid;
+ struct memblock_region *mblk;
+
+ /* Check that valid nid is set to memblks */
+ for_each_mem_region(mblk) {
+ int mblk_nid = memblock_get_region_node(mblk);
+ phys_addr_t start = mblk->base;
+ phys_addr_t end = mblk->base + mblk->size - 1;
+
+ if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
+ pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
+ mblk_nid, &start, &end);
+ return -EINVAL;
+ }
+ }
+
+ /* Finally register nodes. */
+ for_each_node_mask(nid, numa_nodes_parsed) {
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ setup_node_data(nid, start_pfn, end_pfn);
+ node_set_online(nid);
+ }
+
+ /* Setup online nodes to actual nodes*/
+ node_possible_map = numa_nodes_parsed;
+
+ return 0;
+}
+
+static int __init numa_init(int (*init_func)(void))
+{
+ int ret;
+
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+
+ ret = numa_alloc_distance();
+ if (ret < 0)
+ return ret;
+
+ ret = init_func();
+ if (ret < 0)
+ goto out_free_distance;
+
+ if (nodes_empty(numa_nodes_parsed)) {
+ pr_info("No NUMA configuration found\n");
+ ret = -EINVAL;
+ goto out_free_distance;
+ }
+
+ ret = numa_register_nodes();
+ if (ret < 0)
+ goto out_free_distance;
+
+ setup_node_to_cpumask_map();
+
+ return 0;
+out_free_distance:
+ numa_free_distance();
+ return ret;
+}
+
+/**
+ * dummy_numa_init() - Fallback dummy NUMA init
+ *
+ * Used if there's no underlying NUMA architecture, NUMA initialization
+ * fails, or NUMA is disabled on the command line.
+ *
+ * Must online at least one node (node 0) and add memory blocks that cover all
+ * allowed memory. It is unlikely that this function fails.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int __init dummy_numa_init(void)
+{
+ phys_addr_t start = memblock_start_of_DRAM();
+ phys_addr_t end = memblock_end_of_DRAM() - 1;
+ int ret;
+
+ if (numa_off)
+ pr_info("NUMA disabled\n"); /* Forced off on command line. */
+ pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
+
+ ret = numa_add_memblk(0, start, end + 1);
+ if (ret) {
+ pr_err("NUMA init failed\n");
+ return ret;
+ }
+
+ numa_off = true;
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+static int __init arch_acpi_numa_init(void)
+{
+ int ret;
+
+ ret = acpi_numa_init();
+ if (ret) {
+ pr_info("Failed to initialise from firmware\n");
+ return ret;
+ }
+
+ return srat_disabled() ? -EINVAL : 0;
+}
+#else
+static int __init arch_acpi_numa_init(void)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * arch_numa_init() - Initialize NUMA
+ *
+ * Try each configured NUMA initialization method until one succeeds. The
+ * last fallback is dummy single node config encompassing whole memory.
+ */
+void __init arch_numa_init(void)
+{
+ if (!numa_off) {
+ if (!acpi_disabled && !numa_init(arch_acpi_numa_init))
+ return;
+ if (acpi_disabled && !numa_init(of_numa_init))
+ return;
+ }
+
+ numa_init(dummy_numa_init);
+}
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 8336535f1e11..d8b314e7d0fd 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -15,6 +15,7 @@
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/auxiliary_bus.h>
+#include "base.h"
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
@@ -260,19 +261,11 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
-static int __init auxiliary_bus_init(void)
+void __init auxiliary_bus_init(void)
{
- return bus_register(&auxiliary_bus_type);
+ WARN_ON(bus_register(&auxiliary_bus_type));
}
-static void __exit auxiliary_bus_exit(void)
-{
- bus_unregister(&auxiliary_bus_type);
-}
-
-module_init(auxiliary_bus_init);
-module_exit(auxiliary_bus_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Auxiliary Bus");
MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>");
diff --git a/drivers/base/base.h b/drivers/base/base.h
index f5600a83124f..52b3d7b75c27 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -119,6 +119,11 @@ static inline int hypervisor_init(void) { return 0; }
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
extern void container_dev_init(void);
+#ifdef CONFIG_AUXILIARY_BUS
+extern void auxiliary_bus_init(void);
+#else
+static inline void auxiliary_bus_init(void) { }
+#endif
struct kobject *virtual_device_parent(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a9c23ecebc7c..36d0c654ea61 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -633,7 +633,7 @@ int bus_add_driver(struct device_driver *drv)
error = driver_add_groups(drv, bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
+ printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
__func__, drv->name);
}
@@ -729,23 +729,6 @@ int device_reprobe(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_reprobe);
-/**
- * find_bus - locate bus by name.
- * @name: name of bus.
- *
- * Call kset_find_obj() to iterate over list of buses to
- * find a bus by name. Return bus if found.
- *
- * Note that kset_find_obj increments bus' reference count.
- */
-#if 0
-struct bus_type *find_bus(char *name)
-{
- struct kobject *k = kset_find_obj(bus_kset, name);
- return k ? to_bus(k) : NULL;
-}
-#endif /* 0 */
-
static int bus_add_groups(struct bus_type *bus,
const struct attribute_group **groups)
{
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..f29839382f81 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -28,6 +28,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sysfs.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "power/power.h"
@@ -148,6 +149,21 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
fwnode_links_purge_consumers(fwnode);
}
+static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+
+ /* Don't purge consumer links of an added child */
+ if (fwnode->dev)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_links_purge_consumers(fwnode);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ fw_devlink_purge_absent_suppliers(child);
+}
+
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
@@ -208,6 +224,16 @@ int device_links_read_lock_held(void)
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -221,7 +247,12 @@ int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -229,7 +260,8 @@ int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
if (link->consumer == target)
@@ -302,7 +334,8 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@@ -456,7 +489,9 @@ static int devlink_add_symlinks(struct device *dev,
struct device *con = link->consumer;
char *buf;
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
@@ -470,12 +505,12 @@ static int devlink_add_symlinks(struct device *dev,
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
@@ -483,7 +518,7 @@ static int devlink_add_symlinks(struct device *dev,
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +541,9 @@ static void devlink_remove_symlinks(struct device *dev,
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
@@ -514,9 +551,9 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
@@ -546,7 +583,8 @@ postcore_initcall(devlink_class_init);
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
- DL_FLAG_SYNC_STATE_ONLY)
+ DL_FLAG_SYNC_STATE_ONLY | \
+ DL_FLAG_INFERRED)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -615,7 +653,7 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
- flags != DL_FLAG_SYNC_STATE_ONLY) ||
+ (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -671,6 +709,10 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
+ if (link->flags & DL_FLAG_INFERRED &&
+ !(flags & DL_FLAG_INFERRED))
+ link->flags &= ~DL_FLAG_INFERRED;
+
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
@@ -737,8 +779,9 @@ struct device_link *device_link_add(struct device *consumer,
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
- dev_set_name(&link->link_dev, "%s--%s",
- dev_name(supplier), dev_name(consumer));
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(consumer);
put_device(supplier);
@@ -929,6 +972,10 @@ int device_links_check_suppliers(struct device *dev)
mutex_lock(&fwnode_link_lock);
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
!fw_devlink_is_permissive()) {
+ dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
+ list_first_entry(&dev->fwnode->suppliers,
+ struct fwnode_link,
+ c_hook)->supplier);
mutex_unlock(&fwnode_link_lock);
return -EPROBE_DEFER;
}
@@ -943,6 +990,8 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
+ dev_dbg(dev, "probe deferral - supplier %s not ready\n",
+ dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
@@ -1121,12 +1170,22 @@ void device_links_driver_bound(struct device *dev)
LIST_HEAD(sync_list);
/*
- * If a device probes successfully, it's expected to have created all
+ * If a device binds successfully, it's expected to have created all
* the device links it needs to or make new device links as it needs
- * them. So, it no longer needs to wait on any suppliers.
+ * them. So, fw_devlink no longer needs to create device links to any
+ * of the device's suppliers.
+ *
+ * Also, if a child firmware node of this bound device is not added as
+ * a device by now, assume it is never going to be added and make sure
+ * other devices don't defer probe indefinitely by waiting for such a
+ * child device.
*/
- if (dev->fwnode && dev->fwnode->dev == dev)
+ if (dev->fwnode && dev->fwnode->dev == dev) {
+ struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
+ fwnode_for_each_available_child_node(dev->fwnode, child)
+ fw_devlink_purge_absent_suppliers(child);
+ }
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
@@ -1437,7 +1496,14 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
-static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
+ DL_FLAG_SYNC_STATE_ONLY)
+#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
+ DL_FLAG_AUTOPROBE_CONSUMER)
+#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
+ DL_FLAG_PM_RUNTIME)
+
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
@@ -1446,17 +1512,23 @@ static int __init fw_devlink_setup(char *arg)
if (strcmp(arg, "off") == 0) {
fw_devlink_flags = 0;
} else if (strcmp(arg, "permissive") == 0) {
- fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
} else if (strcmp(arg, "on") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
} else if (strcmp(arg, "rpm") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
- DL_FLAG_PM_RUNTIME;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
}
return 0;
}
early_param("fw_devlink", fw_devlink_setup);
+static bool fw_devlink_strict;
+static int __init fw_devlink_strict_setup(char *arg)
+{
+ return strtobool(arg, &fw_devlink_strict);
+}
+early_param("fw_devlink.strict", fw_devlink_strict_setup);
+
u32 fw_devlink_get_flags(void)
{
return fw_devlink_flags;
@@ -1464,7 +1536,12 @@ u32 fw_devlink_get_flags(void)
static bool fw_devlink_is_permissive(void)
{
- return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+ return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
+}
+
+bool fw_devlink_is_strict(void)
+{
+ return fw_devlink_strict && !fw_devlink_is_permissive();
}
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
@@ -1487,6 +1564,53 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
}
/**
+ * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
+ * @con: Device to check dependencies for.
+ * @sup: Device to check against.
+ *
+ * Check if @sup depends on @con or any device dependent on it (its child or
+ * its consumer etc). When such a cyclic dependency is found, convert all
+ * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
+ * This is the equivalent of doing fw_devlink=permissive just between the
+ * devices in the cycle. We need to do this because, at this point, fw_devlink
+ * can't tell which of these dependencies is not a real dependency.
+ *
+ * Return 1 if a cycle is found. Otherwise, return 0.
+ */
+static int fw_devlink_relax_cycle(struct device *con, void *sup)
+{
+ struct device_link *link;
+ int ret;
+
+ if (con == sup)
+ return 1;
+
+ ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &con->links.consumers, s_node) {
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
+
+ if (!fw_devlink_relax_cycle(link->consumer, sup))
+ continue;
+
+ ret = 1;
+
+ if (!(link->flags & DL_FLAG_INFERRED))
+ continue;
+
+ pm_runtime_drop_link(link);
+ link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
+ dev_dbg(link->consumer, "Relaxing link with %s\n",
+ dev_name(link->supplier));
+ }
+ return ret;
+}
+
+/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con - Consumer device for the device link
* @sup_handle - fwnode handle of supplier
@@ -1514,15 +1638,39 @@ static int fw_devlink_create_devlink(struct device *con,
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
+ * If it's one of those drivers that don't actually bind to
+ * their device using driver core, then don't wait on this
+ * supplier device indefinitely.
+ */
+ if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
+ sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
* If this fails, it is due to cycles in device links. Just
* give up on this link and treat it as invalid.
*/
- if (!device_link_add(con, sup_dev, flags))
+ if (!device_link_add(con, sup_dev, flags) &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ dev_info(con, "Fixing up cyclic dependency with %s\n",
+ dev_name(sup_dev));
+ device_links_write_lock();
+ fw_devlink_relax_cycle(con, sup_dev);
+ device_links_write_unlock();
+ device_link_add(con, sup_dev,
+ FW_DEVLINK_FLAGS_PERMISSIVE);
ret = -EINVAL;
+ }
goto out;
}
+ /* Supplier that's already initialized without a struct device. */
+ if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
+ return -EINVAL;
+
/*
* DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
* cycles. So cycle detection isn't necessary and shouldn't be
@@ -1611,7 +1759,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
con_dev = NULL;
} else {
own_link = false;
- dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+ dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
}
}
@@ -1666,7 +1814,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
if (own_link)
dl_flags = fw_devlink_get_flags();
else
- dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+ dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
@@ -1808,9 +1956,7 @@ const char *dev_driver_string(const struct device *dev)
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
+ return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
@@ -2585,6 +2731,11 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ dev->dma_coherent = dma_default_coherent;
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -4414,6 +4565,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4589,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 2f32f38a11ed..9179825ff646 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -371,13 +371,6 @@ static void driver_bound(struct device *dev)
device_pm_check_callbacks(dev);
/*
- * Reorder successfully probed devices to the end of the device list.
- * This ensures that suspend/resume order matches probe order, which
- * is usually what drivers rely on.
- */
- device_pm_move_to_tail(dev);
-
- /*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
@@ -619,6 +612,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index eac184e6d657..653c8c6ac7a7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -162,7 +162,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
+ err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -212,7 +212,8 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
+ err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ dev->devt);
if (!err) {
struct iattr newattrs;
@@ -221,7 +222,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
@@ -242,7 +243,8 @@ static int dev_rmdir(const char *name)
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(d_inode(parent.dentry), dentry);
+ err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ dentry);
else
err = -EPERM;
} else {
@@ -328,9 +330,10 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
- err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
+ err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index 908e6520e804..a9f57c22fb9e 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
*/
of_core_init();
platform_bus_init();
+ auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
container_dev_init();
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 2772f5d1948a..aa4737667026 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -51,7 +51,7 @@ static int isa_bus_remove(struct device *dev)
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->remove)
- return isa_driver->remove(dev, to_isa_dev(dev)->id);
+ isa_driver->remove(dev, to_isa_dev(dev)->id);
return 0;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index eef4ffb6122c..f35298425575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -35,7 +35,7 @@ static const char *const online_type_to_str[] = {
[MMOP_ONLINE_MOVABLE] = "online_movable",
};
-int memhp_online_type_from_str(const char *str)
+int mhp_online_type_from_str(const char *str)
{
int i;
@@ -253,7 +253,7 @@ static int memory_subsys_offline(struct device *dev)
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- const int online_type = memhp_online_type_from_str(buf);
+ const int online_type = mhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
int ret;
@@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
}
/*
- * phys_device is a bad name for this. What I really want
- * is a way to differentiate between memory ranges that
- * are part of physical devices that constitute
- * a complete removable unit or fru.
- * i.e. do these ranges belong to the same physical device,
- * s.t. if I offline all of these sections I can then
- * remove the physical device?
+ * Legacy interface that we cannot remove: s390x exposes the storage increment
+ * covered by a memory block, allowing for identifying which memory blocks
+ * comprise a storage increment. Since a memory block spans complete
+ * storage increments nowadays, this interface is basically unused. Other
+ * archs never exposed != 0.
*/
static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
- return sysfs_emit(buf, "%d\n", mem->phys_device);
+ return sysfs_emit(buf, "%d\n",
+ arch_get_memory_phys_device(start_pfn));
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -387,19 +387,19 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[memhp_default_online_type]);
+ online_type_to_str[mhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- const int online_type = memhp_online_type_from_str(buf);
+ const int online_type = mhp_online_type_from_str(buf);
if (online_type < 0)
return -EINVAL;
- memhp_default_online_type = online_type;
+ mhp_default_online_type = online_type;
return count;
}
@@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
#endif
-/*
- * Note that phys_device is optional. It is here to allow for
- * differentiation between which *physical* devices each
- * section belongs to...
- */
+/* See phys_device_show(). */
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
{
return 0;
@@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory)
static int init_memory_block(unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
- unsigned long start_pfn;
int ret = 0;
mem = find_memory_block_by_id(block_id);
@@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- start_pfn = section_nr_to_pfn(mem->start_section_nr);
- mem->phys_device = arch_get_memory_phys_device(start_pfn);
mem->nid = NUMA_NO_NODE;
ret = register_memory(mem);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 04f71c7bc3f8..f449dbb2c746 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -372,14 +372,19 @@ static ssize_t node_read_meminfo(struct device *dev,
struct pglist_data *pgdat = NODE_DATA(nid);
struct sysinfo i;
unsigned long sreclaimable, sunreclaimable;
+ unsigned long swapcached = 0;
si_meminfo_node(&i, nid);
sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
+#ifdef CONFIG_SWAP
+ swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
+#endif
len = sysfs_emit_at(buf, len,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
+ "Node %d SwapCached: %8lu kB\n"
"Node %d Active: %8lu kB\n"
"Node %d Inactive: %8lu kB\n"
"Node %d Active(anon): %8lu kB\n"
@@ -391,6 +396,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
+ nid, K(swapcached),
nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
node_page_state(pgdat, NR_ACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
@@ -461,16 +467,11 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(sunreclaimable)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
,
- nid, K(node_page_state(pgdat, NR_ANON_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
- HPAGE_PMD_NR)
+ nid, K(node_page_state(pgdat, NR_ANON_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
+ nid, K(node_page_state(pgdat, NR_FILE_THPS)),
+ nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
#endif
);
len += hugetlb_report_node_meminfo(buf, len, nid);
@@ -519,10 +520,14 @@ static ssize_t node_read_vmstat(struct device *dev,
sum_zone_numa_state(nid, i));
#endif
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- len += sysfs_emit_at(buf, len, "%s %lu\n",
- node_stat_name(i),
- node_page_state_pages(pgdat, i));
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+ unsigned long pages = node_page_state_pages(pgdat, i);
+
+ if (vmstat_item_print_in_thp(i))
+ pages /= HPAGE_PMD_NR;
+ len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
+ pages);
+ }
return len;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 95fd1549f87d..6e1f8e0b661c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
return -ERANGE;
nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
if (nvec < minvec)
return -ENOSPC;
@@ -571,7 +573,7 @@ static void platform_device_release(struct device *dev)
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
- of_device_node_put(&pa->pdev.dev);
+ of_node_put(pa->pdev.dev.of_node);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
@@ -1461,13 +1463,16 @@ static int platform_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- int ret = 0;
- if (drv->remove)
- ret = drv->remove(dev);
+ if (drv->remove) {
+ int ret = drv->remove(dev);
+
+ if (ret)
+ dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
+ }
dev_pm_domain_detach(_dev, true);
- return ret;
+ return 0;
}
static void platform_shutdown(struct device *_dev)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ced6863a16a5..84d5acb6301b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -23,6 +23,7 @@
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
@@ -32,9 +33,113 @@ struct pm_clock_entry {
char *con_id;
struct clk *clk;
enum pce_status status;
+ bool enabled_when_prepared;
};
/**
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
* pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
{
int ret;
- if (ce->status < PCE_STATUS_ERROR) {
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
- if (!ret)
- ce->status = PCE_STATUS_ENABLED;
- else
- dev_err(dev, "%s: failed to enable clk %p, error %d\n",
- __func__, ce->clk, ret);
+ break;
+ default:
+ return;
}
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
}
/**
@@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
+ ce->status = PCE_STATUS_ACQUIRED;
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
} else {
- if (clk_prepare(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- dev_err(dev, "clk_prepare() failed\n");
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev,
- "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
- }
+ ce->status = PCE_STATUS_PREPARED;
}
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
@@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
pm_clk_acquire(dev, ce);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
return 0;
}
@@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (!ce)
return;
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED) {
- clk_unprepare(ce->clk);
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
clk_put(ce->clk);
- }
+ break;
+ default:
+ break;
}
kfree(ce->con_id);
@@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
if (!psd)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
@@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
if (!psd || !clk)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
+ * object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
+ if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
}
EXPORT_SYMBOL_GPL(pm_clk_init);
@@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev)
INIT_LIST_HEAD(&list);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
@@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
+ ce->status = PCE_STATUS_PREPARED;
+ }
}
}
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a14eedacb92..78c310d3179d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -297,6 +297,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
return state;
}
+static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *parent,
+ unsigned int pstate)
+{
+ if (!parent->set_performance_state)
+ return pstate;
+
+ return dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ pstate);
+}
+
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
@@ -311,13 +323,8 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
list_for_each_entry(link, &genpd->child_links, child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
/* Find parent's performance state */
- ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- parent->opp_table,
- state);
+ ret = genpd_xlate_performance_state(genpd, parent, state);
if (unlikely(ret < 0))
goto err;
@@ -339,9 +346,11 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
goto err;
}
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+ }
genpd->performance_state = state;
return 0;
@@ -352,9 +361,6 @@ err:
child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
genpd_lock_nested(parent, depth + 1);
parent_state = link->prev_performance_state;
@@ -399,9 +405,6 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (!genpd)
return -ENODEV;
- if (unlikely(!genpd->set_performance_state))
- return -EINVAL;
-
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
@@ -423,6 +426,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+/**
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
+ *
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ *
+ *
+ * Allow devices to inform of the next wakeup. It's assumed that the users
+ * guarantee that the genpd wouldn't be detached while this routine is getting
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
+ * (RPM_SUSPENDED)."
+ * Although devices are expected to update the next_wakeup after the end of
+ * their usecase as well, it is possible the devices themselves may not know
+ * about that, so stale @next will be ignored when powering off the domain.
+ */
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->next_wakeup = next;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -934,8 +966,7 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!pm_runtime_is_irq_safe(dev) ||
- (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
@@ -1465,6 +1496,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ gpd_data->next_wakeup = KTIME_MAX;
spin_lock_irq(&dev->power.lock);
@@ -2164,6 +2196,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
cp->node = of_node_get(np);
cp->data = data;
cp->xlate = xlate;
+ fwnode_dev_initialized(&np->fwnode, true);
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
@@ -2353,6 +2386,7 @@ void of_genpd_del_provider(struct device_node *np)
}
}
+ fwnode_dev_initialized(&cp->node->fwnode, false);
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);
@@ -2463,7 +2497,7 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
out:
mutex_unlock(&gpd_list_lock);
- return ret;
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
@@ -2952,7 +2986,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
- seq_puts(s, p);
+ seq_printf(s, "%-25s ", p);
+}
+
+static void perf_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ seq_put_decimal_ull(s, "", gpd_data->performance_state);
}
static int genpd_summary_one(struct seq_file *s,
@@ -2980,7 +3022,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-15s ", genpd->name, state);
+ seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@@ -2988,6 +3030,8 @@ static int genpd_summary_one(struct seq_file *s,
* Also genpd->name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (list_is_first(&link->parent_node, &genpd->parent_links))
+ seq_printf(s, "\n%48s", " ");
seq_printf(s, "%s", link->child->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
@@ -3002,6 +3046,7 @@ static int genpd_summary_one(struct seq_file *s,
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
+ perf_status_str(s, pm_data->dev);
kfree(kobj_path);
}
@@ -3017,9 +3062,9 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status children\n");
+ seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 490ed7deb99a..c6c218758f0b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev)
return td->cached_suspend_ok;
}
+static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
+{
+ ktime_t domain_wakeup = KTIME_MAX;
+ ktime_t next_wakeup;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
+ return;
+
+ /*
+ * Devices that have a predictable wakeup pattern, may specify
+ * their next wakeup. Let's find the next wakeup from all the
+ * devices attached to this domain and from all the sub-domains.
+ * It is possible that component's a next wakeup may have become
+ * stale when we read that here. We will ignore to ensure the domain
+ * is able to enter its optimal idle state.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ next_wakeup = to_gpd_data(pdd)->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ next_wakeup = link->child->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ genpd->next_wakeup = domain_wakeup;
+}
+
+static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
+ unsigned int state, ktime_t now)
+{
+ ktime_t domain_wakeup = genpd->next_wakeup;
+ s64 idle_time_ns, min_sleep_ns;
+
+ min_sleep_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].residency_ns;
+
+ idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+
+ return idle_time_ns >= min_sleep_ns;
+}
+
static bool __default_power_down_ok(struct dev_pm_domain *pd,
unsigned int state)
{
@@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
}
/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
+ * _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ int state_idx = genpd->state_count - 1;
struct gpd_link *link;
+ /*
+ * Find the next wakeup from devices that can determine their own wakeup
+ * to find when the domain would wakeup and do it for every device down
+ * the hierarchy. It is not worth while to sleep if the state's residency
+ * cannot be met.
+ */
+ update_domain_next_wakeup(genpd, now);
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
+ /* Let's find out the deepest domain idle state, the devices prefer */
+ while (state_idx >= 0) {
+ if (next_wakeup_allows_state(genpd, state_idx, now)) {
+ genpd->max_off_time_changed = true;
+ break;
+ }
+ state_idx--;
+ }
+
+ if (state_idx < 0) {
+ state_idx = 0;
+ genpd->cached_power_down_ok = false;
+ goto done;
+ }
+ }
+
if (!genpd->max_off_time_changed) {
genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
@@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
genpd->cached_power_down_ok = true;
- genpd->state_idx = genpd->state_count - 1;
- /* Find a state to power down to, starting from the deepest. */
- while (!__default_power_down_ok(pd, genpd->state_idx)) {
- if (genpd->state_idx == 0) {
+ /*
+ * Find a state to power down to, starting from the state
+ * determined by the next wakeup.
+ */
+ while (!__default_power_down_ok(pd, state_idx)) {
+ if (state_idx == 0) {
genpd->cached_power_down_ok = false;
break;
}
- genpd->state_idx--;
+ state_idx--;
}
+done:
+ genpd->state_idx = state_idx;
genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ return _default_power_down_ok(pd, ktime_get());
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
@@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
s64 idle_duration_ns;
int cpu, i;
/* Validate dev PM QoS constraints. */
- if (!default_power_down_ok(pd))
+ if (!_default_power_down_ok(pd, now))
return false;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
@@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
}
/* The minimum idle duration is from now - until the next wakeup. */
- idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
return false;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 46793276598d..f893c3c5af07 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -16,6 +16,7 @@
*/
#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
@@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- pr_err("Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -1897,8 +1898,8 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- pr_info("Device %s not prepared for power transition: code %d\n",
- dev_name(dev), error);
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
put_device(dev);
break;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bfda153b1a41..a46a7e30881b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* suspending the device when both its runtime PM status is %RPM_ACTIVE and its
* runtime PM usage counter is not zero.
*
- * The caller is resposible for decrementing the runtime PM usage counter of
+ * The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 35b95c6ac0c6..1421e9548857 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -837,9 +837,15 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
+ *
+ * For fwnode node types that don't implement the .device_is_available()
+ * operation, this function returns true.
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
+ if (!fwnode_has_op(fwnode, device_is_available))
+ return true;
+
return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
@@ -1209,7 +1215,14 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
best_ep_id = fwnode_ep.id;
}
- return best_ep;
+ if (best_ep)
+ return best_ep;
+
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port,
+ endpoint, flags);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7f4b3b62492c..f2469d3435ca 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -68,7 +68,7 @@ static int regcache_hw_init(struct regmap *map)
map->cache_bypass = cache_bypass;
if (ret == 0) {
map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
+ map->cache_free = true;
} else {
kfree(tmp_buf);
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 8ce30650b97c..fe3ac26b66ad 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -15,11 +15,11 @@ static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int va
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int ret;
- ret = sdw_write(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
if (ret < 0)
return ret;
- return sdw_write(slave, reg, val & 0xff);
+ return sdw_write_no_pm(slave, reg, val & 0xff);
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
@@ -29,11 +29,11 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va
int read0;
int read1;
- read0 = sdw_read(slave, reg);
+ read0 = sdw_read_no_pm(slave, reg);
if (read0 < 0)
return read0;
- read1 = sdw_read(slave, SDW_SDCA_MBQ_CTL(reg));
+ read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
if (read1 < 0)
return read1;
@@ -98,4 +98,4 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index c83be26434e7..966de8a136d9 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -13,7 +13,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- return sdw_write(slave, reg, val);
+ return sdw_write_no_pm(slave, reg, val);
}
static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
@@ -22,7 +22,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read;
- read = sdw_read(slave, reg);
+ read = sdw_read_no_pm(slave, reg);
if (read < 0)
return read;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4a4b2008fbc2..37179a8b1ceb 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -24,6 +24,7 @@ struct swnode {
struct swnode *parent;
unsigned int allocated:1;
+ unsigned int managed:1;
};
static DEFINE_IDA(swnode_root_ids);
@@ -48,6 +49,19 @@ EXPORT_SYMBOL_GPL(is_software_node);
struct swnode, fwnode) : NULL; \
})
+static inline struct swnode *dev_to_swnode(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (!fwnode)
+ return NULL;
+
+ if (!is_software_node(fwnode))
+ fwnode = fwnode->secondary;
+
+ return to_swnode(fwnode);
+}
+
static struct swnode *
software_node_to_swnode(const struct software_node *node)
{
@@ -443,14 +457,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
- (c && list_is_last(&c->entry, &p->children)))
+ (c && list_is_last(&c->entry, &p->children))) {
+ fwnode_handle_put(child);
return NULL;
+ }
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
- return &c->fwnode;
+
+ fwnode_handle_put(child);
+ return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *
@@ -536,6 +554,115 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
return 0;
}
+static struct fwnode_handle *
+swnode_graph_find_next_port(const struct fwnode_handle *parent,
+ struct fwnode_handle *port)
+{
+ struct fwnode_handle *old = port;
+
+ while ((port = software_node_get_next_child(parent, old))) {
+ /*
+ * fwnode ports have naming style "port@", so we search for any
+ * children that follow that convention.
+ */
+ if (!strncmp(to_swnode(port)->node->name, "port@",
+ strlen("port@")))
+ return port;
+ old = port;
+ }
+
+ return NULL;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ struct fwnode_handle *parent;
+ struct fwnode_handle *port;
+
+ if (!swnode)
+ return NULL;
+
+ if (endpoint) {
+ port = software_node_get_parent(endpoint);
+ parent = software_node_get_parent(port);
+ } else {
+ parent = software_node_get_named_child_node(fwnode, "ports");
+ if (!parent)
+ parent = software_node_get(&swnode->fwnode);
+
+ port = swnode_graph_find_next_port(parent, NULL);
+ }
+
+ for (; port; port = swnode_graph_find_next_port(parent, port)) {
+ endpoint = software_node_get_next_child(port, endpoint);
+ if (endpoint) {
+ fwnode_handle_put(port);
+ break;
+ }
+ }
+
+ fwnode_handle_put(parent);
+
+ return endpoint;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_ref_args *ref;
+ const struct property_entry *prop;
+
+ if (!swnode)
+ return NULL;
+
+ prop = property_entry_get(swnode->node->properties, "remote-endpoint");
+ if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
+ return NULL;
+
+ ref = prop->pointer;
+
+ return software_node_get(software_node_fwnode(ref[0].node));
+}
+
+static struct fwnode_handle *
+software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ swnode = swnode->parent;
+ if (swnode && !strcmp(swnode->node->name, "ports"))
+ swnode = swnode->parent;
+
+ return swnode ? software_node_get(&swnode->fwnode) : NULL;
+}
+
+static int
+software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const char *parent_name = swnode->parent->node->name;
+ int ret;
+
+ if (strlen("port@") >= strlen(parent_name) ||
+ strncmp(parent_name, "port@", strlen("port@")))
+ return -EINVAL;
+
+ /* Ports have naming style "port@n", we need to select the n */
+ ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
+ if (ret)
+ return ret;
+
+ endpoint->id = swnode->id;
+ endpoint->local_fwnode = fwnode;
+
+ return 0;
+}
+
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
@@ -547,7 +674,11 @@ static const struct fwnode_operations software_node_ops = {
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
- .get_reference_args = software_node_get_reference_args
+ .get_reference_args = software_node_get_reference_args,
+ .graph_get_next_endpoint = software_node_graph_get_next_endpoint,
+ .graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
+ .graph_get_port_parent = software_node_graph_get_port_parent,
+ .graph_parse_endpoint = software_node_graph_parse_endpoint,
};
/* -------------------------------------------------------------------------- */
@@ -688,7 +819,11 @@ out_err:
* software_node_register_nodes - Register an array of software nodes
* @nodes: Zero terminated array of software nodes to be registered
*
- * Register multiple software nodes at once.
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
*/
int software_node_register_nodes(const struct software_node *nodes)
{
@@ -696,14 +831,23 @@ int software_node_register_nodes(const struct software_node *nodes)
int i;
for (i = 0; nodes[i].name; i++) {
- ret = software_node_register(&nodes[i]);
- if (ret) {
- software_node_unregister_nodes(nodes);
- return ret;
+ const struct software_node *parent = nodes[i].parent;
+
+ if (parent && !software_node_to_swnode(parent)) {
+ ret = -EINVAL;
+ goto err_unregister_nodes;
}
+
+ ret = software_node_register(&nodes[i]);
+ if (ret)
+ goto err_unregister_nodes;
}
return 0;
+
+err_unregister_nodes:
+ software_node_unregister_nodes(nodes);
+ return ret;
}
EXPORT_SYMBOL_GPL(software_node_register_nodes);
@@ -711,18 +855,23 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* software_node_unregister_nodes - Unregister an array of software nodes
* @nodes: Zero terminated array of software nodes to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
*
- * NOTE: Be careful using this call if the nodes had parent pointers set up in
- * them before registering. If so, it is wiser to remove the nodes
- * individually, in the correct order (child before parent) instead of relying
- * on the sequential order of the list of nodes in the array.
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- int i;
+ unsigned int i = 0;
+
+ while (nodes[i].name)
+ i++;
- for (i = 0; nodes[i].name; i++)
+ while (i--)
software_node_unregister(&nodes[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
@@ -757,16 +906,23 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. The array will be unwound in
+ * reverse order (i.e. last entry first) and thus if any members of the array are
+ * children of another member then the children must appear later in the list such
+ * that they are unregistered first.
*/
-void software_node_unregister_node_group(const struct software_node **node_group)
+void software_node_unregister_node_group(
+ const struct software_node **node_group)
{
- unsigned int i;
+ unsigned int i = 0;
if (!node_group)
return;
- for (i = 0; node_group[i]; i++)
+ while (node_group[i])
+ i++;
+
+ while (i--)
software_node_unregister(node_group[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
@@ -843,22 +999,99 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
+/**
+ * device_add_software_node - Assign software node to a device
+ * @dev: The device the software node is meant for.
+ * @swnode: The software node.
+ *
+ * This function will register @swnode and make it the secondary firmware node
+ * pointer of @dev. If @dev has no primary node, then @swnode will become the primary
+ * node.
+ */
+int device_add_software_node(struct device *dev, const struct software_node *swnode)
+{
+ int ret;
+
+ /* Only one software node per device. */
+ if (dev_to_swnode(dev))
+ return -EBUSY;
+
+ ret = software_node_register(swnode);
+ if (ret)
+ return ret;
+
+ set_secondary_fwnode(dev, software_node_fwnode(swnode));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_software_node);
+
+/**
+ * device_remove_software_node - Remove device's software node
+ * @dev: The device with the software node.
+ *
+ * This function will unregister the software node of @dev.
+ */
+void device_remove_software_node(struct device *dev)
+{
+ struct swnode *swnode;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ software_node_notify(dev, KOBJ_REMOVE);
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(device_remove_software_node);
+
+/**
+ * device_create_managed_software_node - Create a software node for a device
+ * @dev: The device the software node is assigned to.
+ * @properties: Device properties for the software node.
+ * @parent: Parent of the software node.
+ *
+ * Creates a software node as a managed resource for @dev, which means the
+ * lifetime of the newly created software node is tied to the lifetime of @dev.
+ * Software nodes created with this function should not be reused or shared
+ * because of that. The function takes a deep copy of @properties for the
+ * software node.
+ *
+ * Since the new software node is assigned directly to @dev, and since it should
+ * not be shared, it is not returned to the caller. The function returns 0 on
+ * success, and errno in case of an error.
+ */
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent)
+{
+ struct fwnode_handle *p = software_node_fwnode(parent);
+ struct fwnode_handle *fwnode;
+
+ if (parent && !p)
+ return -EINVAL;
+
+ fwnode = fwnode_create_software_node(properties, p);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ to_swnode(fwnode)->managed = true;
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+
int software_node_notify(struct device *dev, unsigned long action)
{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
struct swnode *swnode;
int ret;
- if (!fwnode)
- return 0;
-
- if (!is_software_node(fwnode))
- fwnode = fwnode->secondary;
- if (!is_software_node(fwnode))
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
return 0;
- swnode = to_swnode(fwnode);
-
switch (action) {
case KOBJ_ADD:
ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
@@ -878,6 +1111,11 @@ int software_node_notify(struct device *dev, unsigned long action)
sysfs_remove_link(&swnode->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "software_node");
kobject_put(&swnode->kobj);
+
+ if (swnode->managed) {
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+ }
break;
default:
break;
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 3ca56367c84b..2f15fae8625f 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
+CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 262326973ee0..fd236158f32d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -66,6 +66,12 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
+config N64CART
+ bool "N64 cart support"
+ depends on MACH_NINTENDO64
+ help
+ Support for the N64 cart.
+
config CDROM
tristate
select BLK_SCSI_REQUEST
@@ -267,16 +273,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_SKD
- tristate "STEC S1120 Block Driver"
- depends on PCI
- depends on 64BIT
- help
- Saying Y or M here will enable support for the
- STEC, Inc. S1120 PCIe SSD.
-
- Use device /dev/skd$N amd /dev/skd$Np$M.
-
config BLK_DEV_SX8
tristate "Promise SATA SX8 support"
depends on PCI
@@ -445,6 +441,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a3170859e01d..e3e3f1c79a82 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,12 +17,12 @@ obj-$(CONFIG_PS3_DISK) += ps3disk.o
obj-$(CONFIG_PS3_VRAM) += ps3vram.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
+obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
-obj-$(CONFIG_BLK_DEV_SKD) += skd.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -43,5 +43,4 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
-skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ac720bdcd983..ecd77897a761 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1046,7 +1046,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
__blk_mq_end_request(rq, err);
- /* cf. http://lkml.org/lkml/2006/10/31/28 */
+ /* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */
if (!fastfail)
blk_mq_run_hw_queues(q, true);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c43a6ab4b1f3..18bf99906662 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -284,15 +284,11 @@ out:
static blk_qc_t brd_submit_bio(struct bio *bio)
{
- struct brd_device *brd = bio->bi_disk->private_data;
+ struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
+ sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
- sector_t sector;
struct bvec_iter iter;
- sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
- goto io_error;
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 7227fc7ab8ed..72cf7603d51f 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,7 +138,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_drbd(GFP_NOIO);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index df53dca5d02c..c1f816f896a8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,7 +976,7 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_drbd(GFP_NOIO);
+ struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
struct page *page;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8f879e5c2f67..7d9cc433b758 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1422,8 +1422,6 @@ extern mempool_t drbd_md_io_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
extern struct bio_set drbd_md_io_bio_set;
-/* to allocate from that set */
-extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
/* And a bio_set for cloning */
extern struct bio_set drbd_io_bio_set;
@@ -1449,7 +1447,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
-extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
+extern void __drbd_make_request(struct drbd_device *, struct bio *);
extern blk_qc_t drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1579,8 +1577,8 @@ static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_disk) {
- drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
+ if (!bio->bi_bdev) {
+ drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1c8c18b2a25f..25cd8a2f729d 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -138,19 +138,6 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-struct bio *bio_alloc_drbd(gfp_t gfp_mask)
-{
- struct bio *bio;
-
- if (!bioset_initialized(&drbd_md_io_bio_set))
- return bio_alloc(gfp_mask, 1);
-
- bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
- if (!bio)
- return NULL;
- return bio;
-}
-
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
@@ -2288,7 +2275,6 @@ static void do_retry(struct work_struct *ws)
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
- unsigned long start_jif = req->start_jif;
bool expected;
expected =
@@ -2323,7 +2309,7 @@ static void do_retry(struct work_struct *ws)
/* We are not just doing submit_bio_noacct(),
* as we want to keep the start_time information. */
inc_ap_bio(device);
- __drbd_make_request(device, bio, start_jif);
+ __drbd_make_request(device, bio);
}
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 09c86ef3f0fd..c3f09a122f20 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -111,8 +111,10 @@ static struct page *page_chain_tail(struct page *page, int *len)
{
struct page *tmp;
int i = 1;
- while ((tmp = page_chain_next(page)))
- ++i, page = tmp;
+ while ((tmp = page_chain_next(page))) {
+ ++i;
+ page = tmp;
+ }
if (len)
*len = i;
return page;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 330f851cb8f0..9398c2c2cb2d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,7 +30,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- drbd_req_make_private_bio(req, bio_src);
+ req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
+
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
@@ -1188,7 +1191,7 @@ static void drbd_queue_write(struct drbd_device *device, struct drbd_request *re
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
static struct drbd_request *
-drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
+drbd_request_prepare(struct drbd_device *device, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
@@ -1416,9 +1419,9 @@ out:
complete_master_bio(device, &m);
}
-void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
+void __drbd_make_request(struct drbd_device *device, struct bio *bio)
{
- struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
+ struct drbd_request *req = drbd_request_prepare(device, bio);
if (IS_ERR_OR_NULL(req))
return;
drbd_send_and_submit(device, req);
@@ -1595,20 +1598,17 @@ void do_submit(struct work_struct *ws)
blk_qc_t drbd_submit_bio(struct bio *bio)
{
- struct drbd_device *device = bio->bi_disk->private_data;
- unsigned long start_jif;
+ struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
blk_queue_split(&bio);
- start_jif = jiffies;
-
/*
* what we "blindly" assume:
*/
D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(device);
- __drbd_make_request(device, bio, start_jif);
+ __drbd_make_request(device, bio);
return BLK_QC_T_NONE;
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 55bb0f8721fa..511f39a08de4 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -256,18 +256,6 @@ enum drbd_req_state_bits {
#define MR_WRITE 1
#define MR_READ 2
-static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
-{
- struct bio *bio;
- bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
-
- req->private_bio = bio;
-
- bio->bi_private = req;
- bio->bi_end_io = drbd_request_endio;
- bio->bi_next = NULL;
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_iter.bi_size, or similar. But that would be too ugly. */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 02044ab7f767..64563bfdf0da 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1523,8 +1523,11 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
- drbd_req_make_private_bio(req, req->master_bio);
+ req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+ &drbd_io_bio_set);
bio_set_dev(req->private_bio, device->ldev->backing_bdev);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index dfe1dfc901cc..0b71292d9d5a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4121,23 +4121,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
- if (!(mode & FMODE_NDELAY)) {
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- drive_state[drive].last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
- &drive_state[drive].flags);
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
- if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
- goto out;
- }
- res = -EROFS;
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ drive_state[drive].last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
+
+ res = -EROFS;
+
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
+ goto out;
+
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e5ff328f0917..a370cde3ddd4 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -663,7 +663,7 @@ static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+ return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
}
static int loop_validate_file(struct file *file, struct block_device *bdev)
@@ -704,7 +704,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
int error;
bool partscan;
- error = mutex_lock_killable(&loop_ctl_mutex);
+ error = mutex_lock_killable(&lo->lo_mutex);
if (error)
return error;
error = -ENXIO;
@@ -743,9 +743,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
- * We must drop file reference outside of loop_ctl_mutex as dropping
+ * We must drop file reference outside of lo_mutex as dropping
* the file ref can take bd_mutex which creates circular locking
* dependency.
*/
@@ -755,7 +755,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return 0;
out_err:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (file)
fput(file);
return error;
@@ -1092,7 +1092,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
goto out_putf;
}
- error = mutex_lock_killable(&loop_ctl_mutex);
+ error = mutex_lock_killable(&lo->lo_mutex);
if (error)
goto out_bdev;
@@ -1171,7 +1171,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
if (!(mode & FMODE_EXCL))
@@ -1179,7 +1179,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return 0;
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
out_bdev:
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
@@ -1200,7 +1200,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
bool partscan = false;
int lo_number;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
err = -ENXIO;
goto out_unlock;
@@ -1212,6 +1212,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
goto out_unlock;
}
+ if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
+ blk_queue_write_cache(lo->lo_queue, false, false);
+
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);
@@ -1253,7 +1256,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo_number = lo->lo_number;
loop_unprepare_queue(lo);
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan) {
/*
* bd_mutex has been held already in release path, so don't
@@ -1284,18 +1287,17 @@ out_unlock:
* protects us from all the other places trying to change the 'lo'
* device.
*/
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
lo->lo_state = Lo_unbound;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
- * Need not hold loop_ctl_mutex to fput backing file.
- * Calling fput holding loop_ctl_mutex triggers a circular
- * lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before loop_ctl_mutex.
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
+ * lo_mutex triggers a circular lock dependency possibility warning as
+ * fput can take bd_mutex which is usually taken before lo_mutex.
*/
if (filp)
fput(filp);
@@ -1306,11 +1308,11 @@ static int loop_clr_fd(struct loop_device *lo)
{
int err;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return -ENXIO;
}
/*
@@ -1325,11 +1327,11 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return 0;
}
lo->lo_state = Lo_rundown;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return __loop_clr_fd(lo, false);
}
@@ -1344,7 +1346,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
bool partscan = false;
bool size_changed = false;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size &&
@@ -1411,7 +1413,7 @@ out_unfreeze:
partscan = true;
}
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
@@ -1425,11 +1427,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
struct kstat stat;
int ret;
- ret = mutex_lock_killable(&loop_ctl_mutex);
+ ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
return ret;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return -ENXIO;
}
@@ -1448,10 +1450,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size);
}
- /* Drop loop_ctl_mutex while we call into the filesystem. */
+ /* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
path_get(&path);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
@@ -1637,7 +1639,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
{
int err;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
switch (cmd) {
@@ -1653,7 +1655,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return err;
}
@@ -1879,27 +1881,33 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
struct loop_device *lo;
int err;
+ /*
+ * take loop_ctl_mutex to protect lo pointer from race with
+ * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
+ * release it prior to updating lo->lo_refcnt.
+ */
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
lo = bdev->bd_disk->private_data;
if (!lo) {
- err = -ENXIO;
- goto out;
+ mutex_unlock(&loop_ctl_mutex);
+ return -ENXIO;
}
-
- atomic_inc(&lo->lo_refcnt);
-out:
+ err = mutex_lock_killable(&lo->lo_mutex);
mutex_unlock(&loop_ctl_mutex);
- return err;
+ if (err)
+ return err;
+ atomic_inc(&lo->lo_refcnt);
+ mutex_unlock(&lo->lo_mutex);
+ return 0;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
{
- struct loop_device *lo;
+ struct loop_device *lo = disk->private_data;
- mutex_lock(&loop_ctl_mutex);
- lo = disk->private_data;
+ mutex_lock(&lo->lo_mutex);
if (atomic_dec_return(&lo->lo_refcnt))
goto out_unlock;
@@ -1907,7 +1915,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
if (lo->lo_state != Lo_bound)
goto out_unlock;
lo->lo_state = Lo_rundown;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
@@ -1924,7 +1932,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
}
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
}
static const struct block_device_operations lo_fops = {
@@ -1963,10 +1971,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return 0;
}
@@ -2152,6 +2160,7 @@ static int loop_add(struct loop_device **l, int i)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
atomic_set(&lo->lo_refcnt, 0);
+ mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
@@ -2182,6 +2191,7 @@ static void loop_remove(struct loop_device *lo)
blk_cleanup_queue(lo->lo_queue);
blk_mq_free_tag_set(&lo->tag_set);
put_disk(lo->lo_disk);
+ mutex_destroy(&lo->lo_mutex);
kfree(lo);
}
@@ -2261,15 +2271,21 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
+ ret = mutex_lock_killable(&lo->lo_mutex);
+ if (ret)
+ break;
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
+ mutex_unlock(&lo->lo_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
+ mutex_unlock(&lo->lo_mutex);
break;
}
lo->lo_disk->private_data = NULL;
+ mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index af75a5ee4094..a3c04f310672 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -62,6 +62,7 @@ struct loop_device {
struct request_queue *lo_queue;
struct blk_mq_tag_set tag_set;
struct gendisk *lo_disk;
+ struct mutex lo_mutex;
};
struct loop_cmd {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 53ac59d19ae5..3be0dbc674bd 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1015,7 +1015,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
- blk_execute_rq(rq->q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
@@ -3924,23 +3924,18 @@ static DEFINE_HANDLER(7);
static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
{
- int pos;
unsigned short pcie_dev_ctrl;
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pos) {
- pci_read_config_word(pdev,
- pos + PCI_EXP_DEVCTL,
- &pcie_dev_ctrl);
- if (pcie_dev_ctrl & (1 << 11) ||
- pcie_dev_ctrl & (1 << 4)) {
+ if (pci_is_pcie(pdev)) {
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl);
+ if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN ||
+ pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) {
dev_info(&dd->pdev->dev,
"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
pdev->vendor, pdev->device);
pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
PCI_EXP_DEVCTL_RELAX_EN);
- pci_write_config_word(pdev,
- pos + PCI_EXP_DEVCTL,
+ pcie_capability_write_word(pdev, PCI_EXP_DEVCTL,
pcie_dev_ctrl);
}
}
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
new file mode 100644
index 000000000000..47bdf324e962
--- /dev/null
+++ b/drivers/block/n64cart.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the N64 cart.
+ *
+ * Copyright (c) 2021 Lauri Kasanen
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+enum {
+ PI_DRAM_REG = 0,
+ PI_CART_REG,
+ PI_READ_REG,
+ PI_WRITE_REG,
+ PI_STATUS_REG,
+};
+
+#define PI_STATUS_DMA_BUSY (1 << 0)
+#define PI_STATUS_IO_BUSY (1 << 1)
+
+#define CART_DOMAIN 0x10000000
+#define CART_MAX 0x1FFFFFFF
+
+#define MIN_ALIGNMENT 8
+
+static u32 __iomem *reg_base;
+
+static unsigned int start;
+module_param(start, uint, 0);
+MODULE_PARM_DESC(start, "Start address of the cart block data");
+
+static unsigned int size;
+module_param(size, uint, 0);
+MODULE_PARM_DESC(size, "Size of the cart block data, in bytes");
+
+static void n64cart_write_reg(const u8 reg, const u32 value)
+{
+ writel(value, reg_base + reg);
+}
+
+static u32 n64cart_read_reg(const u8 reg)
+{
+ return readl(reg_base + reg);
+}
+
+static void n64cart_wait_dma(void)
+{
+ while (n64cart_read_reg(PI_STATUS_REG) &
+ (PI_STATUS_DMA_BUSY | PI_STATUS_IO_BUSY))
+ cpu_relax();
+}
+
+/*
+ * Process a single bvec of a bio.
+ */
+static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
+{
+ dma_addr_t dma_addr;
+ const u32 bstart = pos + start;
+
+ /* Alignment check */
+ WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) ||
+ (bv->bv_len & (MIN_ALIGNMENT - 1)));
+
+ dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(dev, dma_addr))
+ return false;
+
+ n64cart_wait_dma();
+
+ n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+ n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
+ n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
+
+ n64cart_wait_dma();
+
+ dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE);
+ return true;
+}
+
+static blk_qc_t n64cart_submit_bio(struct bio *bio)
+{
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ struct device *dev = bio->bi_disk->private_data;
+ u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ if (!n64cart_do_bvec(dev, &bvec, pos))
+ goto io_error;
+ pos += bvec.bv_len;
+ }
+
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
+io_error:
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
+}
+
+static const struct block_device_operations n64cart_fops = {
+ .owner = THIS_MODULE,
+ .submit_bio = n64cart_submit_bio,
+};
+
+/*
+ * The target device is embedded and RAM-constrained. We save RAM
+ * by initializing in __init code that gets dropped late in boot.
+ * For the same reason there is no module or unloading support.
+ */
+static int __init n64cart_probe(struct platform_device *pdev)
+{
+ struct gendisk *disk;
+
+ if (!start || !size) {
+ pr_err("start or size not specified\n");
+ return -ENODEV;
+ }
+
+ if (size & 4095) {
+ pr_err("size must be a multiple of 4K\n");
+ return -ENODEV;
+ }
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!reg_base)
+ return -EINVAL;
+
+ disk = alloc_disk(0);
+ if (!disk)
+ return -ENOMEM;
+
+ disk->queue = blk_alloc_queue(NUMA_NO_NODE);
+ if (!disk->queue)
+ return -ENOMEM;
+
+ disk->first_minor = 0;
+ disk->flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT;
+ disk->fops = &n64cart_fops;
+ disk->private_data = &pdev->dev;
+ strcpy(disk->disk_name, "n64cart");
+
+ set_capacity(disk, size >> SECTOR_SHIFT);
+ set_disk_ro(disk, 1);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ blk_queue_physical_block_size(disk->queue, 4096);
+ blk_queue_logical_block_size(disk->queue, 4096);
+
+ add_disk(disk);
+
+ pr_info("n64cart: %u kb disk\n", size / 1024);
+
+ return 0;
+}
+
+static struct platform_driver n64cart_driver = {
+ .driver = {
+ .name = "n64cart",
+ },
+};
+
+static int __init n64cart_init(void)
+{
+ return platform_driver_probe(&n64cart_driver, n64cart_probe);
+}
+
+module_init(n64cart_init);
+
+MODULE_AUTHOR("Lauri Kasanen <cand@gmx.com>");
+MODULE_DESCRIPTION("Driver for the N64 cart");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6727358e147d..4ff71b579cfc 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -78,8 +78,7 @@ struct link_dead_args {
#define NBD_RT_HAS_PID_FILE 3
#define NBD_RT_HAS_CONFIG_REF 4
#define NBD_RT_BOUND 5
-#define NBD_RT_DESTROY_ON_DISCONNECT 6
-#define NBD_RT_DISCONNECT_ON_CLOSE 7
+#define NBD_RT_DISCONNECT_ON_CLOSE 6
#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1
@@ -1022,6 +1021,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@@ -1060,10 +1065,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
@@ -1521,17 +1528,7 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
return 0;
}
-static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nbd_dbg_tasks_show, inode->i_private);
-}
-
-static const struct file_operations nbd_dbg_tasks_ops = {
- .open = nbd_dbg_tasks_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
{
@@ -1556,17 +1553,7 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
return 0;
}
-static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nbd_dbg_flags_show, inode->i_private);
-}
-
-static const struct file_operations nbd_dbg_flags_ops = {
- .open = nbd_dbg_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
@@ -1584,11 +1571,11 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
}
config->dbg_dir = dir;
- debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
- debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
return 0;
}
@@ -1916,12 +1903,21 @@ again:
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags);
- set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
- put_dev = true;
+ /*
+ * We have 1 ref to keep the device around, and then 1
+ * ref for our current operation here, which will be
+ * inherited by the config. If we already have
+ * DESTROY_ON_DISCONNECT set then we know we don't have
+ * that extra ref already held so we don't need the
+ * put_dev.
+ */
+ if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
+ put_dev = true;
} else {
- clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
+ if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
+ refcount_inc(&nbd->refs);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
@@ -2092,15 +2088,13 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags))
+ if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
put_dev = true;
- set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} else {
- if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags))
+ if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
refcount_inc(&nbd->refs);
- clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 5357c3a4a36f..d6c821d48090 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1420,7 +1420,7 @@ static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = bio->bi_disk->private_data;
+ struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 148b871f263b..bfcab1c782b5 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -6,7 +6,10 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+ return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
@@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
return -EINVAL;
}
- zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
- dev_capacity_sects = MB_TO_SECTS(dev->size);
- dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
- dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
- if (dev_capacity_sects & (dev->zone_size_sects - 1))
- dev->nr_zones++;
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+ dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+ >> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);
@@ -146,10 +148,6 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
sector += dev->zone_size_sects;
}
- q->limits.zoned = BLK_ZONED_HM;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-
return 0;
}
@@ -158,6 +156,10 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
+ blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+
if (queue_is_mq(q)) {
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index a7af4f27b7c3..897acda20ac8 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
- blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
+ blk_execute_rq(disk->gd, rq, 0);
blk_put_request(rq);
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b8bb8ec7538d..fc4b0f1aa86d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -722,7 +722,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
+ blk_execute_rq(pd->bdev->bd_disk, rq, 0);
if (scsi_req(rq)->result)
ret = -EIO;
out:
@@ -2374,7 +2374,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
blk_queue_split(&bio);
- pd = bio->bi_disk->queue->queuedata;
+ pd = bio->bi_bdev->bd_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@@ -2418,7 +2418,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
split = bio;
}
- pkt_make_request_write(bio->bi_disk->queue, split);
+ pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b71d28372ef3..1d738999fb69 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -581,7 +581,7 @@ out:
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
- struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
+ struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 59cfe71d0b3a..bbb88eb009e0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -692,29 +692,10 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
put_device(&rbd_dev->dev);
}
-static int rbd_set_read_only(struct block_device *bdev, bool ro)
-{
- struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
-
- /*
- * Both images mapped read-only and snapshots can't be marked
- * read-write.
- */
- if (!ro) {
- if (rbd_is_ro(rbd_dev))
- return -EROFS;
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- }
-
- return 0;
-}
-
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
- .set_read_only = rbd_set_read_only,
};
/*
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
Milind Dumbare <Milind.dumbare@gmail.com>
Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 96e3f9fe8241..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
kfree(iu);
}
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
} else {
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index b8e44331e494..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
{
- mutex_lock(&sess_dev->sess->lock);
- rnbd_srv_destroy_dev_session_sysfs(sess_dev);
- mutex_unlock(&sess_dev->sess->lock);
+ struct rnbd_srv_session *sess = sess_dev->sess;
+
sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
}
static int process_msg_close(struct rtrs_srv *rtrs,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index edacefff6e35..9a28322a8cd8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -122,7 +122,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
- struct rsxx_cardinfo *card = bio->bi_disk->private_data;
+ struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 1914f5488b22..0574f4495755 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -944,8 +944,7 @@ failed_dma_setup:
ctrl->done_wq = NULL;
}
- if (ctrl->trackers)
- vfree(ctrl->trackers);
+ vfree(ctrl->trackers);
if (ctrl->status.buf)
dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
deleted file mode 100644
index a962b4551bed..000000000000
--- a/drivers/block/skd_main.c
+++ /dev/null
@@ -1,3670 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
- * was acquired by Western Digital in 2012.
- *
- * Copyright 2012 sTec, Inc.
- * Copyright (c) 2017 Western Digital Corporation or its affiliates.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <linux/err.h>
-#include <linux/aer.h>
-#include <linux/wait.h>
-#include <linux/stringify.h>
-#include <scsi/scsi.h>
-#include <scsi/sg.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-
-#include "skd_s1120.h"
-
-static int skd_dbg_level;
-static int skd_isr_comp_limit = 4;
-
-#define SKD_ASSERT(expr) \
- do { \
- if (unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- # expr, __FILE__, __func__, __LINE__); \
- } \
- } while (0)
-
-#define DRV_NAME "skd"
-#define PFX DRV_NAME ": "
-
-MODULE_LICENSE("GPL");
-
-MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
-
-#define PCI_VENDOR_ID_STEC 0x1B39
-#define PCI_DEVICE_ID_S1120 0x0001
-
-#define SKD_FUA_NV (1 << 1)
-#define SKD_MINORS_PER_DEVICE 16
-
-#define SKD_MAX_QUEUE_DEPTH 200u
-
-#define SKD_PAUSE_TIMEOUT (5 * 1000)
-
-#define SKD_N_FITMSG_BYTES (512u)
-#define SKD_MAX_REQ_PER_MSG 14
-
-#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
-
-/* SG elements are 32 bytes, so we can make this 4096 and still be under the
- * 128KB limit. That allows 4096*4K = 16M xfer size
- */
-#define SKD_N_SG_PER_REQ_DEFAULT 256u
-
-#define SKD_N_COMPLETION_ENTRY 256u
-#define SKD_N_READ_CAP_BYTES (8u)
-
-#define SKD_N_INTERNAL_BYTES (512u)
-
-#define SKD_SKCOMP_SIZE \
- ((sizeof(struct fit_completion_entry_v1) + \
- sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
-
-/* 5 bits of uniqifier, 0xF800 */
-#define SKD_ID_TABLE_MASK (3u << 8u)
-#define SKD_ID_RW_REQUEST (0u << 8u)
-#define SKD_ID_INTERNAL (1u << 8u)
-#define SKD_ID_FIT_MSG (3u << 8u)
-#define SKD_ID_SLOT_MASK 0x00FFu
-#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
-
-#define SKD_N_MAX_SECTORS 2048u
-
-#define SKD_MAX_RETRIES 2u
-
-#define SKD_TIMER_SECONDS(seconds) (seconds)
-#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
-
-#define INQ_STD_NBYTES 36
-
-enum skd_drvr_state {
- SKD_DRVR_STATE_LOAD,
- SKD_DRVR_STATE_IDLE,
- SKD_DRVR_STATE_BUSY,
- SKD_DRVR_STATE_STARTING,
- SKD_DRVR_STATE_ONLINE,
- SKD_DRVR_STATE_PAUSING,
- SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_RESTARTING,
- SKD_DRVR_STATE_RESUMING,
- SKD_DRVR_STATE_STOPPING,
- SKD_DRVR_STATE_FAULT,
- SKD_DRVR_STATE_DISAPPEARED,
- SKD_DRVR_STATE_PROTOCOL_MISMATCH,
- SKD_DRVR_STATE_BUSY_ERASE,
- SKD_DRVR_STATE_BUSY_SANITIZE,
- SKD_DRVR_STATE_BUSY_IMMINENT,
- SKD_DRVR_STATE_WAIT_BOOT,
- SKD_DRVR_STATE_SYNCING,
-};
-
-#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
-#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
-#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
-#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
-#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
-#define SKD_START_WAIT_SECONDS 90u
-
-enum skd_req_state {
- SKD_REQ_STATE_IDLE,
- SKD_REQ_STATE_SETUP,
- SKD_REQ_STATE_BUSY,
- SKD_REQ_STATE_COMPLETED,
- SKD_REQ_STATE_TIMEOUT,
-};
-
-enum skd_check_status_action {
- SKD_CHECK_STATUS_REPORT_GOOD,
- SKD_CHECK_STATUS_REPORT_SMART_ALERT,
- SKD_CHECK_STATUS_REQUEUE_REQUEST,
- SKD_CHECK_STATUS_REPORT_ERROR,
- SKD_CHECK_STATUS_BUSY_IMMINENT,
-};
-
-struct skd_msg_buf {
- struct fit_msg_hdr fmh;
- struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
-};
-
-struct skd_fitmsg_context {
- u32 id;
-
- u32 length;
-
- struct skd_msg_buf *msg_buf;
- dma_addr_t mb_dma_address;
-};
-
-struct skd_request_context {
- enum skd_req_state state;
-
- u16 id;
- u32 fitmsg_id;
-
- u8 flush_cmd;
-
- enum dma_data_direction data_dir;
- struct scatterlist *sg;
- u32 n_sg;
- u32 sg_byte_count;
-
- struct fit_sg_descriptor *sksg_list;
- dma_addr_t sksg_dma_address;
-
- struct fit_completion_entry_v1 completion;
-
- struct fit_comp_error_info err_info;
- int retries;
-
- blk_status_t status;
-};
-
-struct skd_special_context {
- struct skd_request_context req;
-
- void *data_buf;
- dma_addr_t db_dma_address;
-
- struct skd_msg_buf *msg_buf;
- dma_addr_t mb_dma_address;
-};
-
-typedef enum skd_irq_type {
- SKD_IRQ_LEGACY,
- SKD_IRQ_MSI,
- SKD_IRQ_MSIX
-} skd_irq_type_t;
-
-#define SKD_MAX_BARS 2
-
-struct skd_device {
- void __iomem *mem_map[SKD_MAX_BARS];
- resource_size_t mem_phys[SKD_MAX_BARS];
- u32 mem_size[SKD_MAX_BARS];
-
- struct skd_msix_entry *msix_entries;
-
- struct pci_dev *pdev;
- int pcie_error_reporting_is_enabled;
-
- spinlock_t lock;
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct request_queue *queue;
- struct skd_fitmsg_context *skmsg;
- struct device *class_dev;
- int gendisk_on;
- int sync_done;
-
- u32 devno;
- u32 major;
- char isr_name[30];
-
- enum skd_drvr_state state;
- u32 drive_state;
-
- u32 cur_max_queue_depth;
- u32 queue_low_water_mark;
- u32 dev_max_queue_depth;
-
- u32 num_fitmsg_context;
- u32 num_req_context;
-
- struct skd_fitmsg_context *skmsg_table;
-
- struct skd_special_context internal_skspcl;
- u32 read_cap_blocksize;
- u32 read_cap_last_lba;
- int read_cap_is_valid;
- int inquiry_is_valid;
- u8 inq_serial_num[13]; /*12 chars plus null term */
-
- u8 skcomp_cycle;
- u32 skcomp_ix;
- struct kmem_cache *msgbuf_cache;
- struct kmem_cache *sglist_cache;
- struct kmem_cache *databuf_cache;
- struct fit_completion_entry_v1 *skcomp_table;
- struct fit_comp_error_info *skerr_table;
- dma_addr_t cq_dma_address;
-
- wait_queue_head_t waitq;
-
- struct timer_list timer;
- u32 timer_countdown;
- u32 timer_substate;
-
- int sgs_per_request;
- u32 last_mtd;
-
- u32 proto_ver;
-
- int dbg_level;
- u32 connect_time_stamp;
- int connect_retries;
-#define SKD_MAX_CONNECT_RETRIES 16
- u32 drive_jiffies;
-
- u32 timo_slot;
-
- struct work_struct start_queue;
- struct work_struct completion_worker;
-};
-
-#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
-#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
-#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
-
-static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
-{
- u32 val = readl(skdev->mem_map[1] + offset);
-
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
- return val;
-}
-
-static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
- u32 offset)
-{
- writel(val, skdev->mem_map[1] + offset);
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
-}
-
-static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
- u32 offset)
-{
- writeq(val, skdev->mem_map[1] + offset);
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
- val);
-}
-
-
-#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
-static int skd_isr_type = SKD_IRQ_DEFAULT;
-
-module_param(skd_isr_type, int, 0444);
-MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
- " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
-
-#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
-static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
-
-module_param(skd_max_req_per_msg, int, 0444);
-MODULE_PARM_DESC(skd_max_req_per_msg,
- "Maximum SCSI requests packed in a single message."
- " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
-
-#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
-#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
-static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
-
-module_param(skd_max_queue_depth, int, 0444);
-MODULE_PARM_DESC(skd_max_queue_depth,
- "Maximum SCSI requests issued to s1120."
- " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
-
-static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
-module_param(skd_sgs_per_request, int, 0444);
-MODULE_PARM_DESC(skd_sgs_per_request,
- "Maximum SG elements per block request."
- " (1-4096, default==256)");
-
-static int skd_max_pass_thru = 1;
-module_param(skd_max_pass_thru, int, 0444);
-MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time. IGNORED");
-
-module_param(skd_dbg_level, int, 0444);
-MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
-
-module_param(skd_isr_comp_limit, int, 0444);
-MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
-
-/* Major device number dynamically assigned. */
-static u32 skd_major;
-
-static void skd_destruct(struct skd_device *skdev);
-static const struct block_device_operations skd_blockdev_ops;
-static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg);
-static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl);
-static bool skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
-static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
-
-static void skd_restart_device(struct skd_device *skdev);
-static int skd_quiesce_dev(struct skd_device *skdev);
-static int skd_unquiesce_dev(struct skd_device *skdev);
-static void skd_disable_interrupts(struct skd_device *skdev);
-static void skd_isr_fwstate(struct skd_device *skdev);
-static void skd_recover_requests(struct skd_device *skdev);
-static void skd_soft_reset(struct skd_device *skdev);
-
-const char *skd_drive_state_to_str(int state);
-const char *skd_skdev_state_to_str(enum skd_drvr_state state);
-static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event);
-
-/*
- *****************************************************************************
- * READ/WRITE REQUESTS
- *****************************************************************************
- */
-static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
-{
- int *count = data;
-
- count++;
- return true;
-}
-
-static int skd_in_flight(struct skd_device *skdev)
-{
- int count = 0;
-
- blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
-
- return count;
-}
-
-static void
-skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
- int data_dir, unsigned lba,
- unsigned count)
-{
- if (data_dir == READ)
- scsi_req->cdb[0] = READ_10;
- else
- scsi_req->cdb[0] = WRITE_10;
-
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
- scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
- scsi_req->cdb[4] = (lba & 0xff00) >> 8;
- scsi_req->cdb[5] = (lba & 0xff);
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = (count & 0xff00) >> 8;
- scsi_req->cdb[8] = count & 0xff;
- scsi_req->cdb[9] = 0;
-}
-
-static void
-skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq)
-{
- skreq->flush_cmd = 1;
-
- scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = 0;
- scsi_req->cdb[3] = 0;
- scsi_req->cdb[4] = 0;
- scsi_req->cdb[5] = 0;
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = 0;
- scsi_req->cdb[8] = 0;
- scsi_req->cdb[9] = 0;
-}
-
-/*
- * Return true if and only if all pending requests should be failed.
- */
-static bool skd_fail_all(struct request_queue *q)
-{
- struct skd_device *skdev = q->queuedata;
-
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
-
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- return false;
-
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- return true;
- }
-}
-
-static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *mqd)
-{
- struct request *const req = mqd->rq;
- struct request_queue *const q = req->q;
- struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg;
- struct fit_msg_hdr *fmh;
- const u32 tag = blk_mq_unique_tag(req);
- struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
- struct skd_scsi_request *scsi_req;
- unsigned long flags = 0;
- const u32 lba = blk_rq_pos(req);
- const u32 count = blk_rq_sectors(req);
- const int data_dir = rq_data_dir(req);
-
- if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
- return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
-
- if (!(req->rq_flags & RQF_DONTPREP)) {
- skreq->retries = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
-
- blk_mq_start_request(req);
-
- WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
- tag, skd_max_queue_depth, q->nr_requests);
-
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
-
- dev_dbg(&skdev->pdev->dev,
- "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
- lba, count, count, data_dir);
-
- skreq->id = tag + SKD_ID_RW_REQUEST;
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
-
- skreq->fitmsg_id = 0;
-
- skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
- dev_dbg(&skdev->pdev->dev, "error Out\n");
- skreq->status = BLK_STS_RESOURCE;
- blk_mq_complete_request(req);
- return BLK_STS_OK;
- }
-
- dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
- skreq->n_sg *
- sizeof(struct fit_sg_descriptor),
- DMA_TO_DEVICE);
-
- /* Either a FIT msg is in progress or we have to start one. */
- if (skd_max_req_per_msg == 1) {
- skmsg = NULL;
- } else {
- spin_lock_irqsave(&skdev->lock, flags);
- skmsg = skdev->skmsg;
- }
- if (!skmsg) {
- skmsg = &skdev->skmsg_table[tag];
- skdev->skmsg = skmsg;
-
- /* Initialize the FIT msg header */
- fmh = &skmsg->msg_buf->fmh;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- } else {
- fmh = &skmsg->msg_buf->fmh;
- }
-
- skreq->fitmsg_id = skmsg->id;
-
- scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
- memset(scsi_req, 0, sizeof(*scsi_req));
-
- scsi_req->hdr.tag = skreq->id;
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skreq->sksg_dma_address);
-
- if (req_op(req) == REQ_OP_FLUSH) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
-
- if (req->cmd_flags & REQ_FUA)
- scsi_req->cdb[1] |= SKD_FUA_NV;
-
- scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
-
- /* Complete resource allocations. */
- skreq->state = SKD_REQ_STATE_BUSY;
-
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
-
- dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
- skd_in_flight(skdev));
-
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skd_max_req_per_msg == 1) {
- skd_send_fitmsg(skdev, skmsg);
- } else {
- if (mqd->last ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skdev->skmsg = NULL;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
-
- return BLK_STS_OK;
-}
-
-static enum blk_eh_timer_return skd_timed_out(struct request *req,
- bool reserved)
-{
- struct skd_device *skdev = req->q->queuedata;
-
- dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
- blk_mq_unique_tag(req));
-
- return BLK_EH_RESET_TIMER;
-}
-
-static void skd_complete_rq(struct request *req)
-{
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
-
- blk_mq_end_request(req, skreq->status);
-}
-
-static bool skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- struct request *req = blk_mq_rq_from_pdu(skreq);
- struct scatterlist *sgl = &skreq->sg[0], *sg;
- int n_sg;
- int i;
-
- skreq->sg_byte_count = 0;
-
- WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
- skreq->data_dir != DMA_FROM_DEVICE);
-
- n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
- if (n_sg <= 0)
- return false;
-
- /*
- * Map scatterlist to PCI bus addresses.
- * Note PCI might change the number of entries.
- */
- n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
- if (n_sg <= 0)
- return false;
-
- SKD_ASSERT(n_sg <= skdev->sgs_per_request);
-
- skreq->n_sg = n_sg;
-
- for_each_sg(sgl, sg, n_sg, i) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(sg);
- uint64_t dma_addr = sg_dma_address(sg);
-
- sgd->control = FIT_SGD_CONTROL_NOT_LAST;
- sgd->byte_count = cnt;
- skreq->sg_byte_count += cnt;
- sgd->host_side_addr = dma_addr;
- sgd->dev_side_addr = 0;
- }
-
- skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
- skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
-
- if (unlikely(skdev->dbg_level > 1)) {
- dev_dbg(&skdev->pdev->dev,
- "skreq=%x sksg_list=%p sksg_dma=%pad\n",
- skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
- for (i = 0; i < n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
-
- dev_dbg(&skdev->pdev->dev,
- " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- return true;
-}
-
-static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- /*
- * restore the next ptr for next IO request so we
- * don't have to set it every time.
- */
- skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
- skreq->sksg_dma_address +
- ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
- skreq->data_dir);
-}
-
-/*
- *****************************************************************************
- * TIMER
- *****************************************************************************
- */
-
-static void skd_timer_tick_not_online(struct skd_device *skdev);
-
-static void skd_start_queue(struct work_struct *work)
-{
- struct skd_device *skdev = container_of(work, typeof(*skdev),
- start_queue);
-
- /*
- * Although it is safe to call blk_start_queue() from interrupt
- * context, blk_mq_start_hw_queues() must not be called from
- * interrupt context.
- */
- blk_mq_start_hw_queues(skdev->queue);
-}
-
-static void skd_timer_tick(struct timer_list *t)
-{
- struct skd_device *skdev = from_timer(skdev, t, timer);
- unsigned long reqflags;
- u32 state;
-
- if (skdev->state == SKD_DRVR_STATE_FAULT)
- /* The driver has declared fault, and we want it to
- * stay that way until driver is reloaded.
- */
- return;
-
- spin_lock_irqsave(&skdev->lock, reqflags);
-
- state = SKD_READL(skdev, FIT_STATUS);
- state &= FIT_SR_DRIVE_STATE_MASK;
- if (state != skdev->drive_state)
- skd_isr_fwstate(skdev);
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE)
- skd_timer_tick_not_online(skdev);
-
- mod_timer(&skdev->timer, (jiffies + HZ));
-
- spin_unlock_irqrestore(&skdev->lock, reqflags);
-}
-
-static void skd_timer_tick_not_online(struct skd_device *skdev)
-{
- switch (skdev->state) {
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- break;
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- dev_dbg(&skdev->pdev->dev,
- "drive busy sanitize[%x], driver[%x]\n",
- skdev->drive_state, skdev->state);
- /* If we've been in sanitize for 3 seconds, we figure we're not
- * going to get anymore completions, so recover requests now
- */
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_recover_requests(skdev);
- break;
-
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
- skdev->state, skdev->timer_countdown);
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "busy[%x], timedout=%d, restarting device.",
- skdev->state, skdev->timer_countdown);
- skd_restart_device(skdev);
- break;
-
- case SKD_DRVR_STATE_WAIT_BOOT:
- case SKD_DRVR_STATE_STARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
-
- dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
- skdev->drive_state);
-
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_ONLINE:
- /* shouldn't get here. */
- break;
-
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- break;
-
- case SKD_DRVR_STATE_RESTARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
- dev_err(&skdev->pdev->dev,
- "DriveFault Reconnect Timeout (%x)\n",
- skdev->drive_state);
-
- /*
- * Recovering does two things:
- * 1. completes IO with error
- * 2. reclaims dma resources
- * When is it safe to recover requests?
- * - if the drive state is faulted
- * - if the state is still soft reset after out timeout
- * - if the drive registers are dead (state = FF)
- * If it is "unsafe", we still need to recover, so we will
- * disable pci bus mastering and disable our interrupts.
- */
-
- if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
- (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
- (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
- /* It never came out of soft reset. Try to
- * recover the requests and then let them
- * fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev);
- else {
- dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
- skdev->drive_state);
- pci_disable_device(skdev->pdev);
- skd_disable_interrupts(skdev);
- skd_recover_requests(skdev);
- }
-
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_RESUMING:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- break;
- }
-}
-
-static int skd_start_timer(struct skd_device *skdev)
-{
- int rc;
-
- timer_setup(&skdev->timer, skd_timer_tick, 0);
-
- rc = mod_timer(&skdev->timer, (jiffies + HZ));
- if (rc)
- dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
- return rc;
-}
-
-static void skd_kill_timer(struct skd_device *skdev)
-{
- del_timer_sync(&skdev->timer);
-}
-
-/*
- *****************************************************************************
- * INTERNAL REQUESTS -- generated by driver itself
- *****************************************************************************
- */
-
-static int skd_format_internal_skspcl(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct fit_msg_hdr *fmh;
- uint64_t dma_address;
- struct skd_scsi_request *scsi;
-
- fmh = &skspcl->msg_buf->fmh;
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
-
- scsi = &skspcl->msg_buf->scsi[0];
- memset(scsi, 0, sizeof(*scsi));
- dma_address = skspcl->req.sksg_dma_address;
- scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
- skspcl->req.n_sg = 1;
- sgd->control = FIT_SGD_CONTROL_LAST;
- sgd->byte_count = 0;
- sgd->host_side_addr = skspcl->db_dma_address;
- sgd->dev_side_addr = 0;
- sgd->next_desc_ptr = 0LL;
-
- return 1;
-}
-
-#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
-
-static void skd_send_internal_skspcl(struct skd_device *skdev,
- struct skd_special_context *skspcl,
- u8 opcode)
-{
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct skd_scsi_request *scsi;
- unsigned char *buf = skspcl->data_buf;
- int i;
-
- if (skspcl->req.state != SKD_REQ_STATE_IDLE)
- /*
- * A refresh is already in progress.
- * Just wait for it to finish.
- */
- return;
-
- skspcl->req.state = SKD_REQ_STATE_BUSY;
-
- scsi = &skspcl->msg_buf->scsi[0];
- scsi->hdr.tag = skspcl->req.id;
-
- memset(scsi->cdb, 0, sizeof(scsi->cdb));
-
- switch (opcode) {
- case TEST_UNIT_READY:
- scsi->cdb[0] = TEST_UNIT_READY;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
-
- case READ_CAPACITY:
- scsi->cdb[0] = READ_CAPACITY;
- sgd->byte_count = SKD_N_READ_CAP_BYTES;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
-
- case INQUIRY:
- scsi->cdb[0] = INQUIRY;
- scsi->cdb[1] = 0x01; /* evpd */
- scsi->cdb[2] = 0x80; /* serial number page */
- scsi->cdb[4] = 0x10;
- sgd->byte_count = 16;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
-
- case SYNCHRONIZE_CACHE:
- scsi->cdb[0] = SYNCHRONIZE_CACHE;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
-
- case WRITE_BUFFER:
- scsi->cdb[0] = WRITE_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- /* fill incrementing byte pattern */
- for (i = 0; i < sgd->byte_count; i++)
- buf[i] = i & 0xFF;
- break;
-
- case READ_BUFFER:
- scsi->cdb[0] = READ_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- memset(skspcl->data_buf, 0, sgd->byte_count);
- break;
-
- default:
- SKD_ASSERT("Don't know what to send");
- return;
-
- }
- skd_send_special_fitmsg(skdev, skspcl);
-}
-
-static void skd_refresh_device_data(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
-
- skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
-}
-
-static int skd_chk_read_buf(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- unsigned char *buf = skspcl->data_buf;
- int i;
-
- /* check for incrementing byte pattern */
- for (i = 0; i < WR_BUF_SIZE; i++)
- if (buf[i] != (i & 0xFF))
- return 1;
-
- return 0;
-}
-
-static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
- u8 code, u8 qual, u8 fruc)
-{
- /* If the check condition is of special interest, log a message */
- if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
- && (code == 0x04) && (qual == 0x06)) {
- dev_err(&skdev->pdev->dev,
- "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- key, code, qual, fruc);
- }
-}
-
-static void skd_complete_internal(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- u8 *buf = skspcl->data_buf;
- u8 status;
- int i;
- struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
-
- lockdep_assert_held(&skdev->lock);
-
- SKD_ASSERT(skspcl == &skdev->internal_skspcl);
-
- dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
-
- dma_sync_single_for_cpu(&skdev->pdev->dev,
- skspcl->db_dma_address,
- skspcl->req.sksg_list[0].byte_count,
- DMA_BIDIRECTIONAL);
-
- skspcl->req.completion = *skcomp;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- status = skspcl->req.completion.status;
-
- skd_log_check_status(skdev, status, skerr->key, skerr->code,
- skerr->qual, skerr->fruc);
-
- switch (scsi->cdb[0]) {
- case TEST_UNIT_READY:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR))
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "TUR failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** TUR failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case WRITE_BUFFER:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "write buffer failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** write buffer failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case READ_BUFFER:
- if (status == SAM_STAT_GOOD) {
- if (skd_chk_read_buf(skdev, skspcl) == 0)
- skd_send_internal_skspcl(skdev, skspcl,
- READ_CAPACITY);
- else {
- dev_err(&skdev->pdev->dev,
- "*** W/R Buffer mismatch %d ***\n",
- skdev->connect_retries);
- if (skdev->connect_retries <
- SKD_MAX_CONNECT_RETRIES) {
- skdev->connect_retries++;
- skd_soft_reset(skdev);
- } else {
- dev_err(&skdev->pdev->dev,
- "W/R Buffer Connect Error\n");
- return;
- }
- }
-
- } else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** read buffer failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case READ_CAPACITY:
- skdev->read_cap_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->read_cap_last_lba =
- (buf[0] << 24) | (buf[1] << 16) |
- (buf[2] << 8) | buf[3];
- skdev->read_cap_blocksize =
- (buf[4] << 24) | (buf[5] << 16) |
- (buf[6] << 8) | buf[7];
-
- dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
-
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
-
- skdev->read_cap_is_valid = 1;
-
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR)) {
- skdev->read_cap_last_lba = ~0;
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else {
- dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case INQUIRY:
- skdev->inquiry_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->inquiry_is_valid = 1;
-
- for (i = 0; i < 12; i++)
- skdev->inq_serial_num[i] = buf[i + 4];
- skdev->inq_serial_num[12] = 0;
- }
-
- if (skd_unquiesce_dev(skdev) < 0)
- dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
- /* connection is complete */
- skdev->connect_retries = 0;
- break;
-
- case SYNCHRONIZE_CACHE:
- if (status == SAM_STAT_GOOD)
- skdev->sync_done = 1;
- else
- skdev->sync_done = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- default:
- SKD_ASSERT("we didn't send this");
- }
-}
-
-/*
- *****************************************************************************
- * FIT MESSAGES
- *****************************************************************************
- */
-
-static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg)
-{
- u64 qcmd;
-
- dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
- &skmsg->mb_dma_address, skd_in_flight(skdev));
- dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
-
- qcmd = skmsg->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL;
-
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skmsg->msg_buf;
- int i;
- for (i = 0; i < skmsg->length; i += 8) {
- dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
- &bp[i]);
- if (i == 0)
- i = 64 - 8;
- }
- }
-
- if (skmsg->length > 256)
- qcmd |= FIT_QCMD_MSGSIZE_512;
- else if (skmsg->length > 128)
- qcmd |= FIT_QCMD_MSGSIZE_256;
- else if (skmsg->length > 64)
- qcmd |= FIT_QCMD_MSGSIZE_128;
- else
- /*
- * This makes no sense because the FIT msg header is
- * 64 bytes. If the msg is only 64 bytes long it has
- * no payload.
- */
- qcmd |= FIT_QCMD_MSGSIZE_64;
-
- dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
- skmsg->length, DMA_TO_DEVICE);
-
- /* Make sure skd_msg_buf is written before the doorbell is triggered. */
- smp_wmb();
-
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-}
-
-static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- u64 qcmd;
-
- WARN_ON_ONCE(skspcl->req.n_sg != 1);
-
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skspcl->msg_buf;
- int i;
-
- for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
- &bp[i]);
- if (i == 0)
- i = 64 - 8;
- }
-
- dev_dbg(&skdev->pdev->dev,
- "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- &skspcl->req.sksg_dma_address);
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct fit_sg_descriptor *sgd =
- &skspcl->req.sksg_list[i];
-
- dev_dbg(&skdev->pdev->dev,
- " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- /*
- * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
- * and one 64-byte SSDI command.
- */
- qcmd = skspcl->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
-
- dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
- SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
- dma_sync_single_for_device(&skdev->pdev->dev,
- skspcl->req.sksg_dma_address,
- 1 * sizeof(struct fit_sg_descriptor),
- DMA_TO_DEVICE);
- dma_sync_single_for_device(&skdev->pdev->dev,
- skspcl->db_dma_address,
- skspcl->req.sksg_list[0].byte_count,
- DMA_BIDIRECTIONAL);
-
- /* Make sure skd_msg_buf is written before the doorbell is triggered. */
- smp_wmb();
-
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-}
-
-/*
- *****************************************************************************
- * COMPLETION QUEUE
- *****************************************************************************
- */
-
-static void skd_complete_other(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr);
-
-struct sns_info {
- u8 type;
- u8 stat;
- u8 key;
- u8 asc;
- u8 ascq;
- u8 mask;
- enum skd_check_status_action action;
-};
-
-static struct sns_info skd_chkstat_table[] = {
- /* Good */
- { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
- SKD_CHECK_STATUS_REPORT_GOOD },
-
- /* Smart alerts */
- { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
-
- /* Retry (with limits) */
- { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
-
- /* Busy (or about to be) */
- { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
- SKD_CHECK_STATUS_BUSY_IMMINENT },
-};
-
-/*
- * Look up status and sense data to decide how to handle the error
- * from the device.
- * mask says which fields must match e.g., mask=0x18 means check
- * type and stat, ignore key, asc, ascq.
- */
-
-static enum skd_check_status_action
-skd_check_status(struct skd_device *skdev,
- u8 cmp_status, struct fit_comp_error_info *skerr)
-{
- int i;
-
- dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
-
- dev_dbg(&skdev->pdev->dev,
- "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
-
- /* Does the info match an entry in the good category? */
- for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
- struct sns_info *sns = &skd_chkstat_table[i];
-
- if (sns->mask & 0x10)
- if (skerr->type != sns->type)
- continue;
-
- if (sns->mask & 0x08)
- if (cmp_status != sns->stat)
- continue;
-
- if (sns->mask & 0x04)
- if (skerr->key != sns->key)
- continue;
-
- if (sns->mask & 0x02)
- if (skerr->code != sns->asc)
- continue;
-
- if (sns->mask & 0x01)
- if (skerr->qual != sns->ascq)
- continue;
-
- if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- dev_err(&skdev->pdev->dev,
- "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
- skerr->key, skerr->code, skerr->qual);
- }
- return sns->action;
- }
-
- /* No other match, so nonzero status means error,
- * zero status means good
- */
- if (cmp_status) {
- dev_dbg(&skdev->pdev->dev, "status check: error\n");
- return SKD_CHECK_STATUS_REPORT_ERROR;
- }
-
- dev_dbg(&skdev->pdev->dev, "status check good default\n");
- return SKD_CHECK_STATUS_REPORT_GOOD;
-}
-
-static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq,
- struct request *req)
-{
- u8 cmp_status = skreq->completion.status;
-
- switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
- case SKD_CHECK_STATUS_REPORT_GOOD:
- case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skreq->status = BLK_STS_OK;
- if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
- break;
-
- case SKD_CHECK_STATUS_BUSY_IMMINENT:
- skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_mq_requeue_request(req, true);
- dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
- skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
- skdev->timer_countdown = SKD_TIMER_MINUTES(20);
- skd_quiesce_dev(skdev);
- break;
-
- case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if (++skreq->retries < SKD_MAX_RETRIES) {
- skd_log_skreq(skdev, skreq, "retry");
- blk_mq_requeue_request(req, true);
- break;
- }
- fallthrough;
-
- case SKD_CHECK_STATUS_REPORT_ERROR:
- default:
- skreq->status = BLK_STS_IOERR;
- if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
- break;
- }
-}
-
-static void skd_release_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- /*
- * Reclaim the skd_request_context
- */
- skreq->state = SKD_REQ_STATE_IDLE;
-}
-
-static int skd_isr_completion_posted(struct skd_device *skdev,
- int limit, int *enqueued)
-{
- struct fit_completion_entry_v1 *skcmp;
- struct fit_comp_error_info *skerr;
- u16 req_id;
- u32 tag;
- u16 hwq = 0;
- struct request *rq;
- struct skd_request_context *skreq;
- u16 cmp_cntxt;
- u8 cmp_status;
- u8 cmp_cycle;
- u32 cmp_bytes;
- int rc = 0;
- int processed = 0;
-
- lockdep_assert_held(&skdev->lock);
-
- for (;; ) {
- SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
-
- skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
- cmp_cycle = skcmp->cycle;
- cmp_cntxt = skcmp->tag;
- cmp_status = skcmp->status;
- cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
-
- skerr = &skdev->skerr_table[skdev->skcomp_ix];
-
- dev_dbg(&skdev->pdev->dev,
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
- skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
- cmp_cntxt, cmp_status, skd_in_flight(skdev),
- cmp_bytes, skdev->proto_ver);
-
- if (cmp_cycle != skdev->skcomp_cycle) {
- dev_dbg(&skdev->pdev->dev, "end of completions\n");
- break;
- }
- /*
- * Update the completion queue head index and possibly
- * the completion cycle count. 8-bit wrap-around.
- */
- skdev->skcomp_ix++;
- if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle++;
- }
-
- /*
- * The command context is a unique 32-bit ID. The low order
- * bits help locate the request. The request is usually a
- * r/w request (see skd_start() above) or a special request.
- */
- req_id = cmp_cntxt;
- tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
-
- /* Is this other than a r/w request? */
- if (tag >= skdev->num_req_context) {
- /*
- * This is not a completion for a r/w request.
- */
- WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
- tag));
- skd_complete_other(skdev, skcmp, skerr);
- continue;
- }
-
- rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
- if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
- tag))
- continue;
- skreq = blk_mq_rq_to_pdu(rq);
-
- /*
- * Make sure the request ID for the slot matches.
- */
- if (skreq->id != req_id) {
- dev_err(&skdev->pdev->dev,
- "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- req_id, skreq->id, cmp_cntxt);
-
- continue;
- }
-
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
-
- skreq->completion = *skcmp;
- if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
- skreq->err_info = *skerr;
- skd_log_check_status(skdev, cmp_status, skerr->key,
- skerr->code, skerr->qual,
- skerr->fruc);
- }
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- skd_release_skreq(skdev, skreq);
-
- /*
- * Capture the outcome and post it back to the native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD)) {
- skreq->status = BLK_STS_OK;
- if (likely(!blk_should_fake_timeout(rq->q)))
- blk_mq_complete_request(rq);
- } else {
- skd_resolve_req_exception(skdev, skreq, rq);
- }
-
- /* skd_isr_comp_limit equal zero means no limit */
- if (limit) {
- if (++processed >= limit) {
- rc = 1;
- break;
- }
- }
- }
-
- if (skdev->state == SKD_DRVR_STATE_PAUSING &&
- skd_in_flight(skdev) == 0) {
- skdev->state = SKD_DRVR_STATE_PAUSED;
- wake_up_interruptible(&skdev->waitq);
- }
-
- return rc;
-}
-
-static void skd_complete_other(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr)
-{
- u32 req_id = 0;
- u32 req_table;
- u32 req_slot;
- struct skd_special_context *skspcl;
-
- lockdep_assert_held(&skdev->lock);
-
- req_id = skcomp->tag;
- req_table = req_id & SKD_ID_TABLE_MASK;
- req_slot = req_id & SKD_ID_SLOT_MASK;
-
- dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
- req_id, req_slot);
-
- /*
- * Based on the request id, determine how to dispatch this completion.
- * This swich/case is finding the good cases and forwarding the
- * completion entry. Errors are reported below the switch.
- */
- switch (req_table) {
- case SKD_ID_RW_REQUEST:
- /*
- * The caller, skd_isr_completion_posted() above,
- * handles r/w requests. The only way we get here
- * is if the req_slot is out of bounds.
- */
- break;
-
- case SKD_ID_INTERNAL:
- if (req_slot == 0) {
- skspcl = &skdev->internal_skspcl;
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_internal(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
-
- case SKD_ID_FIT_MSG:
- /*
- * These id's should never appear in a completion record.
- */
- break;
-
- default:
- /*
- * These id's should never appear anywhere;
- */
- break;
- }
-
- /*
- * If we get here it is a bad or stale id.
- */
-}
-
-static void skd_reset_skcomp(struct skd_device *skdev)
-{
- memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
-
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle = 1;
-}
-
-/*
- *****************************************************************************
- * INTERRUPTS
- *****************************************************************************
- */
-static void skd_completion_worker(struct work_struct *work)
-{
- struct skd_device *skdev =
- container_of(work, struct skd_device, completion_worker);
- unsigned long flags;
- int flush_enqueued = 0;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- /*
- * pass in limit=0, which means no limit..
- * process everything in compq
- */
- skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- schedule_work(&skdev->start_queue);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-}
-
-static void skd_isr_msg_from_dev(struct skd_device *skdev);
-
-static irqreturn_t
-skd_isr(int irq, void *ptr)
-{
- struct skd_device *skdev = ptr;
- u32 intstat;
- u32 ack;
- int rc = 0;
- int deferred = 0;
- int flush_enqueued = 0;
-
- spin_lock(&skdev->lock);
-
- for (;; ) {
- intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
-
- ack = FIT_INT_DEF_MASK;
- ack &= intstat;
-
- dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
- ack);
-
- /* As long as there is an int pending on device, keep
- * running loop. When none, get out, but if we've never
- * done any processing, call completion handler?
- */
- if (ack == 0) {
- /* No interrupts on device, but run the completion
- * processor anyway?
- */
- if (rc == 0)
- if (likely (skdev->state
- == SKD_DRVR_STATE_ONLINE))
- deferred = 1;
- break;
- }
-
- rc = IRQ_HANDLED;
-
- SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
-
- if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
- (skdev->state != SKD_DRVR_STATE_STOPPING))) {
- if (intstat & FIT_ISH_COMPLETION_POSTED) {
- /*
- * If we have already deferred completion
- * processing, don't bother running it again
- */
- if (deferred == 0)
- deferred =
- skd_isr_completion_posted(skdev,
- skd_isr_comp_limit, &flush_enqueued);
- }
-
- if (intstat & FIT_ISH_FW_STATE_CHANGE) {
- skd_isr_fwstate(skdev);
- if (skdev->state == SKD_DRVR_STATE_FAULT ||
- skdev->state ==
- SKD_DRVR_STATE_DISAPPEARED) {
- spin_unlock(&skdev->lock);
- return rc;
- }
- }
-
- if (intstat & FIT_ISH_MSG_FROM_DEV)
- skd_isr_msg_from_dev(skdev);
- }
- }
-
- if (unlikely(flush_enqueued))
- schedule_work(&skdev->start_queue);
-
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- spin_unlock(&skdev->lock);
-
- return rc;
-}
-
-static void skd_drive_fault(struct skd_device *skdev)
-{
- skdev->state = SKD_DRVR_STATE_FAULT;
- dev_err(&skdev->pdev->dev, "Drive FAULT\n");
-}
-
-static void skd_drive_disappeared(struct skd_device *skdev)
-{
- skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
-}
-
-static void skd_isr_fwstate(struct skd_device *skdev)
-{
- u32 sense;
- u32 state;
- u32 mtd;
- int prev_driver_state = skdev->state;
-
- sense = SKD_READL(skdev, FIT_STATUS);
- state = sense & FIT_SR_DRIVE_STATE_MASK;
-
- dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
-
- skdev->drive_state = state;
-
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_INIT:
- if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
- skd_disable_interrupts(skdev);
- break;
- }
- if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev);
- if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
- skdev->timer_countdown = SKD_STARTING_TIMO;
- skdev->state = SKD_DRVR_STATE_STARTING;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_SR_DRIVE_ONLINE:
- skdev->cur_max_queue_depth = skd_max_queue_depth;
- if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
- skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
-
- skdev->queue_low_water_mark =
- skdev->cur_max_queue_depth * 2 / 3 + 1;
- if (skdev->queue_low_water_mark < 1)
- skdev->queue_low_water_mark = 1;
- dev_info(&skdev->pdev->dev,
- "Queue depth limit=%d dev=%d lowat=%d\n",
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth,
- skdev->queue_low_water_mark);
-
- skd_refresh_device_data(skdev);
- break;
-
- case FIT_SR_DRIVE_BUSY:
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- skd_quiesce_dev(skdev);
- break;
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- /* set timer for 3 seconds, we'll abort any unfinished
- * commands after that expires
- */
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- schedule_work(&skdev->start_queue);
- break;
- case FIT_SR_DRIVE_BUSY_ERASE:
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_OFFLINE:
- skdev->state = SKD_DRVR_STATE_IDLE;
- break;
- case FIT_SR_DRIVE_SOFT_RESET:
- switch (skdev->state) {
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- /* Expected by a caller of skd_soft_reset() */
- break;
- default:
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- break;
- }
- break;
- case FIT_SR_DRIVE_FW_BOOTING:
- dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
-
- case FIT_SR_DRIVE_DEGRADED:
- case FIT_SR_PCIE_LINK_DOWN:
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- break;
-
- case FIT_SR_DRIVE_FAULT:
- skd_drive_fault(skdev);
- skd_recover_requests(skdev);
- schedule_work(&skdev->start_queue);
- break;
-
- /* PCIe bus returned all Fs? */
- case 0xFF:
- dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
- sense);
- skd_drive_disappeared(skdev);
- skd_recover_requests(skdev);
- schedule_work(&skdev->start_queue);
- break;
- default:
- /*
- * Uknown FW State. Wait for a state we recognize.
- */
- break;
- }
- dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
-}
-
-static bool skd_recover_request(struct request *req, void *data, bool reserved)
-{
- struct skd_device *const skdev = data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
-
- if (skreq->state != SKD_REQ_STATE_BUSY)
- return true;
-
- skd_log_skreq(skdev, skreq, "recover");
-
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->status = BLK_STS_IOERR;
- blk_mq_complete_request(req);
- return true;
-}
-
-static void skd_recover_requests(struct skd_device *skdev)
-{
- blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
-}
-
-static void skd_isr_msg_from_dev(struct skd_device *skdev)
-{
- u32 mfd;
- u32 mtd;
- u32 data;
-
- mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
-
- dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
- skdev->last_mtd);
-
- /* ignore any mtd that is an ack for something we didn't send */
- if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
- return;
-
- switch (FIT_MXD_TYPE(mfd)) {
- case FIT_MTD_FITFW_INIT:
- skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
-
- if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- dev_err(&skdev->pdev->dev, "protocol mismatch\n");
- dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
- skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
- dev_err(&skdev->pdev->dev, " please upgrade driver\n");
- skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_GET_CMDQ_DEPTH:
- skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
- SKD_N_COMPLETION_ENTRY);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_SET_COMPQ_DEPTH:
- SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_SET_COMPQ_ADDR:
- skd_reset_skcomp(skdev);
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_HOST_ID:
- /* hardware interface overflows in y2106 */
- skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
- data = skdev->connect_time_stamp & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
- skdev->drive_jiffies = FIT_MXD_DATA(mfd);
- data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
- skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
- mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
-
- dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
- skdev->connect_time_stamp, skdev->drive_jiffies);
- break;
-
- case FIT_MTD_ARM_QUEUE:
- skdev->last_mtd = 0;
- /*
- * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
- */
- break;
-
- default:
- break;
- }
-}
-
-static void skd_disable_interrupts(struct skd_device *skdev)
-{
- u32 sense;
-
- sense = SKD_READL(skdev, FIT_CONTROL);
- sense &= ~FIT_CR_ENABLE_INTERRUPTS;
- SKD_WRITEL(skdev, sense, FIT_CONTROL);
- dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
-
- /* Note that the 1s is written. A 1-bit means
- * disable, a 0 means enable.
- */
- SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
-}
-
-static void skd_enable_interrupts(struct skd_device *skdev)
-{
- u32 val;
-
- /* unmask interrupts first */
- val = FIT_ISH_FW_STATE_CHANGE +
- FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
-
- /* Note that the compliment of mask is written. A 1-bit means
- * disable, a 0 means enable. */
- SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
-
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= FIT_CR_ENABLE_INTERRUPTS;
- dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
-}
-
-/*
- *****************************************************************************
- * START, STOP, RESTART, QUIESCE, UNQUIESCE
- *****************************************************************************
- */
-
-static void skd_soft_reset(struct skd_device *skdev)
-{
- u32 val;
-
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= (FIT_CR_SOFT_RESET);
- dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
-}
-
-static void skd_start_device(struct skd_device *skdev)
-{
- unsigned long flags;
- u32 sense;
- u32 state;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
-
- sense = SKD_READL(skdev, FIT_STATUS);
-
- dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
-
- state = sense & FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
-
- skdev->state = SKD_DRVR_STATE_STARTING;
- skdev->timer_countdown = SKD_STARTING_TIMO;
-
- skd_enable_interrupts(skdev);
-
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_OFFLINE:
- dev_err(&skdev->pdev->dev, "Drive offline...\n");
- break;
-
- case FIT_SR_DRIVE_FW_BOOTING:
- dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
-
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_BUSY_ERASE:
- dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_INIT:
- case FIT_SR_DRIVE_ONLINE:
- skd_soft_reset(skdev);
- break;
-
- case FIT_SR_DRIVE_BUSY:
- dev_err(&skdev->pdev->dev, "Drive Busy...\n");
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_SOFT_RESET:
- dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
- break;
-
- case FIT_SR_DRIVE_FAULT:
- /* Fault state is bad...soft reset won't do it...
- * Hard reset, maybe, but does it work on device?
- * For now, just fault so the system doesn't hang.
- */
- skd_drive_fault(skdev);
- /*start the queue so we can respond with error to requests */
- dev_dbg(&skdev->pdev->dev, "starting queue\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case 0xFF:
- /* Most likely the device isn't there or isn't responding
- * to the BAR1 addresses. */
- skd_drive_disappeared(skdev);
- /*start the queue so we can respond with error to requests */
- dev_dbg(&skdev->pdev->dev,
- "starting queue to error-out reqs\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- default:
- dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
- skdev->drive_state);
- break;
- }
-
- state = SKD_READL(skdev, FIT_CONTROL);
- dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_HW_VERSION);
- dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-}
-
-static void skd_stop_device(struct skd_device *skdev)
-{
- unsigned long flags;
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- u32 dev_state;
- int i;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
- goto stop_out;
- }
-
- if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
- goto stop_out;
- }
-
- skdev->state = SKD_DRVR_STATE_SYNCING;
- skdev->sync_done = 0;
-
- skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- wait_event_interruptible_timeout(skdev->waitq,
- (skdev->sync_done), (10 * HZ));
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- switch (skdev->sync_done) {
- case 0:
- dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
- break;
- case 1:
- dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
- break;
- default:
- dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
- }
-
-stop_out:
- skdev->state = SKD_DRVR_STATE_STOPPING;
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- skd_kill_timer(skdev);
-
- spin_lock_irqsave(&skdev->lock, flags);
- skd_disable_interrupts(skdev);
-
- /* ensure all ints on device are cleared */
- /* soft reset the device to unload with a clean slate */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
- SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- /* poll every 100ms, 1 second timeout */
- for (i = 0; i < 10; i++) {
- dev_state =
- SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
- if (dev_state == FIT_SR_DRIVE_INIT)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100));
- }
-
- if (dev_state != FIT_SR_DRIVE_INIT)
- dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
- dev_state);
-}
-
-/* assume spinlock is held */
-static void skd_restart_device(struct skd_device *skdev)
-{
- u32 state;
-
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
-
- state = SKD_READL(skdev, FIT_STATUS);
-
- dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
-
- state &= FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
-
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- skdev->timer_countdown = SKD_RESTARTING_TIMO;
-
- skd_soft_reset(skdev);
-}
-
-/* assume spinlock is held */
-static int skd_quiesce_dev(struct skd_device *skdev)
-{
- int rc = 0;
-
- switch (skdev->state) {
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- dev_dbg(&skdev->pdev->dev, "stopping queue\n");
- blk_mq_stop_hw_queues(skdev->queue);
- break;
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_RESUMING:
- default:
- rc = -EINVAL;
- dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
- skdev->state);
- }
- return rc;
-}
-
-/* assume spinlock is held */
-static int skd_unquiesce_dev(struct skd_device *skdev)
-{
- int prev_driver_state = skdev->state;
-
- skd_log_skdev(skdev, "unquiesce");
- if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
- return 0;
- }
- if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
- /*
- * If there has been an state change to other than
- * ONLINE, we will rely on controller state change
- * to come back online and restart the queue.
- * The BUSY state means that driver is ready to
- * continue normal processing but waiting for controller
- * to become available.
- */
- skdev->state = SKD_DRVR_STATE_BUSY;
- dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
- return 0;
- }
-
- /*
- * Drive has just come online, driver is either in startup,
- * paused performing a task, or bust waiting for hardware.
- */
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- skdev->state = SKD_DRVR_STATE_ONLINE;
- dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- dev_dbg(&skdev->pdev->dev,
- "**** device ONLINE...starting block queue\n");
- dev_dbg(&skdev->pdev->dev, "starting queue\n");
- dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = 1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- dev_dbg(&skdev->pdev->dev,
- "**** driver state %d, not implemented\n",
- skdev->state);
- return -EBUSY;
- }
- return 0;
-}
-
-/*
- *****************************************************************************
- * PCIe MSI/MSI-X INTERRUPT HANDLERS
- *****************************************************************************
- */
-
-static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
- skd_isr_fwstate(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- int flush_enqueued = 0;
- int deferred;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
- deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
- &flush_enqueued);
- if (flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
- skd_isr_msg_from_dev(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- *****************************************************************************
- * PCIe MSI/MSI-X SETUP
- *****************************************************************************
- */
-
-struct skd_msix_entry {
- char isr_name[30];
-};
-
-struct skd_init_msix_entry {
- const char *name;
- irq_handler_t handler;
-};
-
-#define SKD_MAX_MSIX_COUNT 13
-#define SKD_MIN_MSIX_COUNT 7
-#define SKD_BASE_MSIX_IRQ 4
-
-static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
- { "(DMA 0)", skd_reserved_isr },
- { "(DMA 1)", skd_reserved_isr },
- { "(DMA 2)", skd_reserved_isr },
- { "(DMA 3)", skd_reserved_isr },
- { "(State Change)", skd_statec_isr },
- { "(COMPL_Q)", skd_comp_q },
- { "(MSG)", skd_msg_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Queue Full 0)", skd_qfull_isr },
- { "(Queue Full 1)", skd_qfull_isr },
- { "(Queue Full 2)", skd_qfull_isr },
- { "(Queue Full 3)", skd_qfull_isr },
-};
-
-static int skd_acquire_msix(struct skd_device *skdev)
-{
- int i, rc;
- struct pci_dev *pdev = skdev->pdev;
-
- rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
- PCI_IRQ_MSIX);
- if (rc < 0) {
- dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
- goto out;
- }
-
- skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
- sizeof(struct skd_msix_entry), GFP_KERNEL);
- if (!skdev->msix_entries) {
- rc = -ENOMEM;
- dev_err(&skdev->pdev->dev, "msix table allocation error\n");
- goto out;
- }
-
- /* Enable MSI-X vectors for the base queue */
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
- struct skd_msix_entry *qentry = &skdev->msix_entries[i];
-
- snprintf(qentry->isr_name, sizeof(qentry->isr_name),
- "%s%d-msix %s", DRV_NAME, skdev->devno,
- msix_entries[i].name);
-
- rc = devm_request_irq(&skdev->pdev->dev,
- pci_irq_vector(skdev->pdev, i),
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
- if (rc) {
- dev_err(&skdev->pdev->dev,
- "Unable to register(%d) MSI-X handler %d: %s\n",
- rc, i, qentry->isr_name);
- goto msix_out;
- }
- }
-
- dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
- SKD_MAX_MSIX_COUNT);
- return 0;
-
-msix_out:
- while (--i >= 0)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
-out:
- kfree(skdev->msix_entries);
- skdev->msix_entries = NULL;
- return rc;
-}
-
-static int skd_acquire_irq(struct skd_device *skdev)
-{
- struct pci_dev *pdev = skdev->pdev;
- unsigned int irq_flag = PCI_IRQ_LEGACY;
- int rc;
-
- if (skd_isr_type == SKD_IRQ_MSIX) {
- rc = skd_acquire_msix(skdev);
- if (!rc)
- return 0;
-
- dev_err(&skdev->pdev->dev,
- "failed to enable MSI-X, re-trying with MSI %d\n", rc);
- }
-
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
- skdev->devno);
-
- if (skd_isr_type != SKD_IRQ_LEGACY)
- irq_flag |= PCI_IRQ_MSI;
- rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
- if (rc < 0) {
- dev_err(&skdev->pdev->dev,
- "failed to allocate the MSI interrupt %d\n", rc);
- return rc;
- }
-
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- pdev->msi_enabled ? 0 : IRQF_SHARED,
- skdev->isr_name, skdev);
- if (rc) {
- pci_free_irq_vectors(pdev);
- dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
- rc);
- return rc;
- }
-
- return 0;
-}
-
-static void skd_release_irq(struct skd_device *skdev)
-{
- struct pci_dev *pdev = skdev->pdev;
-
- if (skdev->msix_entries) {
- int i;
-
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
- skdev);
- }
-
- kfree(skdev->msix_entries);
- skdev->msix_entries = NULL;
- } else {
- devm_free_irq(&pdev->dev, pdev->irq, skdev);
- }
-
- pci_free_irq_vectors(pdev);
-}
-
-/*
- *****************************************************************************
- * CONSTRUCT
- *****************************************************************************
- */
-
-static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
- dma_addr_t *dma_handle, gfp_t gfp,
- enum dma_data_direction dir)
-{
- struct device *dev = &skdev->pdev->dev;
- void *buf;
-
- buf = kmem_cache_alloc(s, gfp);
- if (!buf)
- return NULL;
- *dma_handle = dma_map_single(dev, buf,
- kmem_cache_size(s), dir);
- if (dma_mapping_error(dev, *dma_handle)) {
- kmem_cache_free(s, buf);
- buf = NULL;
- }
- return buf;
-}
-
-static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
- void *vaddr, dma_addr_t dma_handle,
- enum dma_data_direction dir)
-{
- if (!vaddr)
- return;
-
- dma_unmap_single(&skdev->pdev->dev, dma_handle,
- kmem_cache_size(s), dir);
- kmem_cache_free(s, vaddr);
-}
-
-static int skd_cons_skcomp(struct skd_device *skdev)
-{
- int rc = 0;
- struct fit_completion_entry_v1 *skcomp;
-
- dev_dbg(&skdev->pdev->dev,
- "comp pci_alloc, total bytes %zd entries %d\n",
- SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
-
- skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address, GFP_KERNEL);
-
- if (skcomp == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skdev->skcomp_table = skcomp;
- skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
- sizeof(*skcomp) *
- SKD_N_COMPLETION_ENTRY);
-
-err_out:
- return rc;
-}
-
-static int skd_cons_skmsg(struct skd_device *skdev)
-{
- int rc = 0;
- u32 i;
-
- dev_dbg(&skdev->pdev->dev,
- "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
- sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
-
- skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context),
- GFP_KERNEL);
- if (skdev->skmsg_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
-
- skmsg = &skdev->skmsg_table[i];
-
- skmsg->id = i + SKD_ID_FIT_MSG;
-
- skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
- SKD_N_FITMSG_BYTES,
- &skmsg->mb_dma_address,
- GFP_KERNEL);
- if (skmsg->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
- (FIT_QCMD_ALIGN - 1),
- "not aligned: msg_buf %p mb_dma_address %pad\n",
- skmsg->msg_buf, &skmsg->mb_dma_address);
- }
-
-err_out:
- return rc;
-}
-
-static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
- u32 n_sg,
- dma_addr_t *ret_dma_addr)
-{
- struct fit_sg_descriptor *sg_list;
-
- sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
- GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
-
- if (sg_list != NULL) {
- uint64_t dma_address = *ret_dma_addr;
- u32 i;
-
- for (i = 0; i < n_sg - 1; i++) {
- uint64_t ndp_off;
- ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
-
- sg_list[i].next_desc_ptr = dma_address + ndp_off;
- }
- sg_list[i].next_desc_ptr = 0LL;
- }
-
- return sg_list;
-}
-
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- dma_addr_t dma_addr)
-{
- if (WARN_ON_ONCE(!sg_list))
- return;
-
- skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
- DMA_TO_DEVICE);
-}
-
-static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct skd_device *skdev = set->driver_data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->sg = (void *)(skreq + 1);
- sg_init_table(skreq->sg, skd_sgs_per_request);
- skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
- &skreq->sksg_dma_address);
-
- return skreq->sksg_list ? 0 : -ENOMEM;
-}
-
-static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx)
-{
- struct skd_device *skdev = set->driver_data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-
- skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
-}
-
-static int skd_cons_sksb(struct skd_device *skdev)
-{
- int rc = 0;
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->internal_skspcl;
-
- skspcl->req.id = 0 + SKD_ID_INTERNAL;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
- &skspcl->db_dma_address,
- GFP_DMA | __GFP_ZERO,
- DMA_BIDIRECTIONAL);
- if (skspcl->data_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
- &skspcl->mb_dma_address,
- GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
- &skspcl->req.sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- if (!skd_format_internal_skspcl(skdev)) {
- rc = -EINVAL;
- goto err_out;
- }
-
-err_out:
- return rc;
-}
-
-static const struct blk_mq_ops skd_mq_ops = {
- .queue_rq = skd_mq_queue_rq,
- .complete = skd_complete_rq,
- .timeout = skd_timed_out,
- .init_request = skd_init_request,
- .exit_request = skd_exit_request,
-};
-
-static int skd_cons_disk(struct skd_device *skdev)
-{
- int rc = 0;
- struct gendisk *disk;
- struct request_queue *q;
- unsigned long flags;
-
- disk = alloc_disk(SKD_MINORS_PER_DEVICE);
- if (!disk) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skdev->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
-
- disk->major = skdev->major;
- disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
- disk->fops = &skd_blockdev_ops;
- disk->private_data = skdev;
-
- memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
- skdev->tag_set.ops = &skd_mq_ops;
- skdev->tag_set.nr_hw_queues = 1;
- skdev->tag_set.queue_depth = skd_max_queue_depth;
- skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
- skdev->sgs_per_request * sizeof(struct scatterlist);
- skdev->tag_set.numa_node = NUMA_NO_NODE;
- skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
- skdev->tag_set.driver_data = skdev;
- rc = blk_mq_alloc_tag_set(&skdev->tag_set);
- if (rc)
- goto err_out;
- q = blk_mq_init_queue(&skdev->tag_set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(&skdev->tag_set);
- rc = PTR_ERR(q);
- goto err_out;
- }
- q->queuedata = skdev;
-
- skdev->queue = q;
- disk->queue = q;
-
- blk_queue_write_cache(q, true, true);
- blk_queue_max_segments(q, skdev->sgs_per_request);
- blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
-
- /* set optimal I/O size to 8KB */
- blk_queue_io_opt(q, 8192);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-
- blk_queue_rq_timeout(q, 8 * HZ);
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "stopping queue\n");
- blk_mq_stop_hw_queues(skdev->queue);
- spin_unlock_irqrestore(&skdev->lock, flags);
-
-err_out:
- return rc;
-}
-
-#define SKD_N_DEV_TABLE 16u
-static u32 skd_next_devno;
-
-static struct skd_device *skd_construct(struct pci_dev *pdev)
-{
- struct skd_device *skdev;
- int blk_major = skd_major;
- size_t size;
- int rc;
-
- skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
-
- if (!skdev) {
- dev_err(&pdev->dev, "memory alloc failure\n");
- return NULL;
- }
-
- skdev->state = SKD_DRVR_STATE_LOAD;
- skdev->pdev = pdev;
- skdev->devno = skd_next_devno++;
- skdev->major = blk_major;
- skdev->dev_max_queue_depth = 0;
-
- skdev->num_req_context = skd_max_queue_depth;
- skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->cur_max_queue_depth = 1;
- skdev->queue_low_water_mark = 1;
- skdev->proto_ver = 99;
- skdev->sgs_per_request = skd_sgs_per_request;
- skdev->dbg_level = skd_dbg_level;
-
- spin_lock_init(&skdev->lock);
-
- INIT_WORK(&skdev->start_queue, skd_start_queue);
- INIT_WORK(&skdev->completion_worker, skd_completion_worker);
-
- size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
- skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->msgbuf_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
- "skd-msgbuf: %d < %zd\n",
- kmem_cache_size(skdev->msgbuf_cache), size);
- size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
- skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->sglist_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
- "skd-sglist: %d < %zd\n",
- kmem_cache_size(skdev->sglist_cache), size);
- size = SKD_N_INTERNAL_BYTES;
- skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->databuf_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
- "skd-databuf: %d < %zd\n",
- kmem_cache_size(skdev->databuf_cache), size);
-
- dev_dbg(&skdev->pdev->dev, "skcomp\n");
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "skmsg\n");
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "sksb\n");
- rc = skd_cons_sksb(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "disk\n");
- rc = skd_cons_disk(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "VICTORY\n");
- return skdev;
-
-err_out:
- dev_dbg(&skdev->pdev->dev, "construct failed\n");
- skd_destruct(skdev);
- return NULL;
-}
-
-/*
- *****************************************************************************
- * DESTRUCT (FREE)
- *****************************************************************************
- */
-
-static void skd_free_skcomp(struct skd_device *skdev)
-{
- if (skdev->skcomp_table)
- dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- skdev->skcomp_table, skdev->cq_dma_address);
-
- skdev->skcomp_table = NULL;
- skdev->cq_dma_address = 0;
-}
-
-static void skd_free_skmsg(struct skd_device *skdev)
-{
- u32 i;
-
- if (skdev->skmsg_table == NULL)
- return;
-
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
-
- skmsg = &skdev->skmsg_table[i];
-
- if (skmsg->msg_buf != NULL) {
- dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
- skmsg->msg_buf,
- skmsg->mb_dma_address);
- }
- skmsg->msg_buf = NULL;
- skmsg->mb_dma_address = 0;
- }
-
- kfree(skdev->skmsg_table);
- skdev->skmsg_table = NULL;
-}
-
-static void skd_free_sksb(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
-
- skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
- skspcl->db_dma_address, DMA_BIDIRECTIONAL);
-
- skspcl->data_buf = NULL;
- skspcl->db_dma_address = 0;
-
- skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
- skspcl->mb_dma_address, DMA_TO_DEVICE);
-
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
-
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
-
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
-}
-
-static void skd_free_disk(struct skd_device *skdev)
-{
- struct gendisk *disk = skdev->disk;
-
- if (disk && (disk->flags & GENHD_FL_UP))
- del_gendisk(disk);
-
- if (skdev->queue) {
- blk_cleanup_queue(skdev->queue);
- skdev->queue = NULL;
- if (disk)
- disk->queue = NULL;
- }
-
- if (skdev->tag_set.tags)
- blk_mq_free_tag_set(&skdev->tag_set);
-
- put_disk(disk);
- skdev->disk = NULL;
-}
-
-static void skd_destruct(struct skd_device *skdev)
-{
- if (skdev == NULL)
- return;
-
- cancel_work_sync(&skdev->start_queue);
-
- dev_dbg(&skdev->pdev->dev, "disk\n");
- skd_free_disk(skdev);
-
- dev_dbg(&skdev->pdev->dev, "sksb\n");
- skd_free_sksb(skdev);
-
- dev_dbg(&skdev->pdev->dev, "skmsg\n");
- skd_free_skmsg(skdev);
-
- dev_dbg(&skdev->pdev->dev, "skcomp\n");
- skd_free_skcomp(skdev);
-
- kmem_cache_destroy(skdev->databuf_cache);
- kmem_cache_destroy(skdev->sglist_cache);
- kmem_cache_destroy(skdev->msgbuf_cache);
-
- dev_dbg(&skdev->pdev->dev, "skdev\n");
- kfree(skdev);
-}
-
-/*
- *****************************************************************************
- * BLOCK DEVICE (BDEV) GLUE
- *****************************************************************************
- */
-
-static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct skd_device *skdev;
- u64 capacity;
-
- skdev = bdev->bd_disk->private_data;
-
- dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
- bdev->bd_disk->disk_name, current->comm);
-
- if (skdev->read_cap_is_valid) {
- capacity = get_capacity(skdev->disk);
- geo->heads = 64;
- geo->sectors = 255;
- geo->cylinders = (capacity) / (255 * 64);
-
- return 0;
- }
- return -EIO;
-}
-
-static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
-{
- dev_dbg(&skdev->pdev->dev, "add_disk\n");
- device_add_disk(parent, skdev->disk, NULL);
- return 0;
-}
-
-static const struct block_device_operations skd_blockdev_ops = {
- .owner = THIS_MODULE,
- .getgeo = skd_bdev_getgeo,
-};
-
-/*
- *****************************************************************************
- * PCIe DRIVER GLUE
- *****************************************************************************
- */
-
-static const struct pci_device_id skd_pci_tbl[] = {
- { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { 0 } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
-
-static char *skd_pci_info(struct skd_device *skdev, char *str)
-{
- int pcie_reg;
-
- strcpy(str, "PCIe (");
- pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
-
- if (pcie_reg) {
-
- char lwstr[6];
- uint16_t pcie_lstat, lspeed, lwidth;
-
- pcie_reg += 0x12;
- pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
- lspeed = pcie_lstat & (0xF);
- lwidth = (pcie_lstat & 0x3F0) >> 4;
-
- if (lspeed == 1)
- strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
- strcat(str, "5.0GT/s ");
- else
- strcat(str, "<unknown> ");
- snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
- strcat(str, lwstr);
- }
- return str;
-}
-
-static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int i;
- int rc = 0;
- char pci_str[32];
- struct skd_device *skdev;
-
- dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
- pdev->device);
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
-
- if (!skd_major) {
- rc = register_blkdev(0, DRV_NAME);
- if (rc < 0)
- goto err_out_regions;
- BUG_ON(!rc);
- skd_major = rc;
- }
-
- skdev = skd_construct(pdev);
- if (skdev == NULL) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- skd_pci_info(skdev, pci_str);
- dev_info(&pdev->dev, "%s 64bit\n", pci_str);
-
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "bad enable of PCIe error reporting rc=%d\n", rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
-
- pci_set_drvdata(pdev, skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++) {
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- dev_err(&pdev->dev,
- "Unable to map adapter memory!\n");
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
- skdev->mem_size[i]);
- }
-
- rc = skd_acquire_irq(skdev);
- if (rc) {
- dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
- goto err_out_iounmap;
- }
-
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
-
- init_waitqueue_head(&skdev->waitq);
-
- skd_start_device(skdev);
-
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (skdev->gendisk_on),
- (SKD_START_WAIT_SECONDS * HZ));
- if (skdev->gendisk_on > 0) {
- /* device came on-line after reset */
- skd_bdev_attach(&pdev->dev, skdev);
- rc = 0;
- } else {
- /* we timed out, something is wrong with the device,
- don't add the disk structure */
- dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
- rc);
- /* in case of no error; we timeout with ENXIO */
- if (!rc)
- rc = -ENXIO;
- goto err_out_timer;
- }
-
- return rc;
-
-err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
-err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- skd_destruct(skdev);
-
-err_out_regions:
- pci_release_regions(pdev);
-
-err_out:
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- return rc;
-}
-
-static void skd_pci_remove(struct pci_dev *pdev)
-{
- int i;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return;
- }
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- skd_destruct(skdev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- return;
-}
-
-static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int i;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return -EIO;
- }
-
- skd_stop_device(skdev);
-
- skd_release_irq(skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- pci_release_regions(pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int skd_pci_resume(struct pci_dev *pdev)
-{
- int i;
- int rc = 0;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return -1;
- }
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
-
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "bad enable of PCIe error reporting rc=%d\n", rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
-
- for (i = 0; i < SKD_MAX_BARS; i++) {
-
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- dev_err(&pdev->dev, "Unable to map adapter memory!\n");
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
- skdev->mem_size[i]);
- }
- rc = skd_acquire_irq(skdev);
- if (rc) {
- dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
- goto err_out_iounmap;
- }
-
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
-
- init_waitqueue_head(&skdev->waitq);
-
- skd_start_device(skdev);
-
- return rc;
-
-err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
-err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
-err_out_regions:
- pci_release_regions(pdev);
-
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void skd_pci_shutdown(struct pci_dev *pdev)
-{
- struct skd_device *skdev;
-
- dev_err(&pdev->dev, "%s called\n", __func__);
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return;
- }
-
- dev_err(&pdev->dev, "calling stop\n");
- skd_stop_device(skdev);
-}
-
-static struct pci_driver skd_driver = {
- .name = DRV_NAME,
- .id_table = skd_pci_tbl,
- .probe = skd_pci_probe,
- .remove = skd_pci_remove,
- .suspend = skd_pci_suspend,
- .resume = skd_pci_resume,
- .shutdown = skd_pci_shutdown,
-};
-
-/*
- *****************************************************************************
- * LOGGING SUPPORT
- *****************************************************************************
- */
-
-const char *skd_drive_state_to_str(int state)
-{
- switch (state) {
- case FIT_SR_DRIVE_OFFLINE:
- return "OFFLINE";
- case FIT_SR_DRIVE_INIT:
- return "INIT";
- case FIT_SR_DRIVE_ONLINE:
- return "ONLINE";
- case FIT_SR_DRIVE_BUSY:
- return "BUSY";
- case FIT_SR_DRIVE_FAULT:
- return "FAULT";
- case FIT_SR_DRIVE_DEGRADED:
- return "DEGRADED";
- case FIT_SR_PCIE_LINK_DOWN:
- return "INK_DOWN";
- case FIT_SR_DRIVE_SOFT_RESET:
- return "SOFT_RESET";
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- return "NEED_FW";
- case FIT_SR_DRIVE_INIT_FAULT:
- return "INIT_FAULT";
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case FIT_SR_DRIVE_BUSY_ERASE:
- return "BUSY_ERASE";
- case FIT_SR_DRIVE_FW_BOOTING:
- return "FW_BOOTING";
- default:
- return "???";
- }
-}
-
-const char *skd_skdev_state_to_str(enum skd_drvr_state state)
-{
- switch (state) {
- case SKD_DRVR_STATE_LOAD:
- return "LOAD";
- case SKD_DRVR_STATE_IDLE:
- return "IDLE";
- case SKD_DRVR_STATE_BUSY:
- return "BUSY";
- case SKD_DRVR_STATE_STARTING:
- return "STARTING";
- case SKD_DRVR_STATE_ONLINE:
- return "ONLINE";
- case SKD_DRVR_STATE_PAUSING:
- return "PAUSING";
- case SKD_DRVR_STATE_PAUSED:
- return "PAUSED";
- case SKD_DRVR_STATE_RESTARTING:
- return "RESTARTING";
- case SKD_DRVR_STATE_RESUMING:
- return "RESUMING";
- case SKD_DRVR_STATE_STOPPING:
- return "STOPPING";
- case SKD_DRVR_STATE_SYNCING:
- return "SYNCING";
- case SKD_DRVR_STATE_FAULT:
- return "FAULT";
- case SKD_DRVR_STATE_DISAPPEARED:
- return "DISAPPEARED";
- case SKD_DRVR_STATE_BUSY_ERASE:
- return "BUSY_ERASE";
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- return "BUSY_IMMINENT";
- case SKD_DRVR_STATE_WAIT_BOOT:
- return "WAIT_BOOT";
-
- default:
- return "???";
- }
-}
-
-static const char *skd_skreq_state_to_str(enum skd_req_state state)
-{
- switch (state) {
- case SKD_REQ_STATE_IDLE:
- return "IDLE";
- case SKD_REQ_STATE_SETUP:
- return "SETUP";
- case SKD_REQ_STATE_BUSY:
- return "BUSY";
- case SKD_REQ_STATE_COMPLETED:
- return "COMPLETED";
- case SKD_REQ_STATE_TIMEOUT:
- return "TIMEOUT";
- default:
- return "???";
- }
-}
-
-static void skd_log_skdev(struct skd_device *skdev, const char *event)
-{
- dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
- dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
- skd_in_flight(skdev), skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
- skdev->skcomp_cycle, skdev->skcomp_ix);
-}
-
-static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event)
-{
- struct request *req = blk_mq_rq_from_pdu(skreq);
- u32 lba = blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
- dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
- skreq->fitmsg_id);
- dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
- skreq->data_dir, skreq->n_sg);
-
- dev_dbg(&skdev->pdev->dev,
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
- count, count, (int)rq_data_dir(req));
-}
-
-/*
- *****************************************************************************
- * MODULE GLUE
- *****************************************************************************
- */
-
-static int __init skd_init(void)
-{
- BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
- BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
- BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
- BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
- BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
- BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
- BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
- BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
-
- switch (skd_isr_type) {
- case SKD_IRQ_LEGACY:
- case SKD_IRQ_MSI:
- case SKD_IRQ_MSIX:
- break;
- default:
- pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
- skd_isr_type, SKD_IRQ_DEFAULT);
- skd_isr_type = SKD_IRQ_DEFAULT;
- }
-
- if (skd_max_queue_depth < 1 ||
- skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
- pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
- skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
- skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
- }
-
- if (skd_max_req_per_msg < 1 ||
- skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
- pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
- skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
- skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
- }
-
- if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
- pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
- skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
- skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
- }
-
- if (skd_dbg_level < 0 || skd_dbg_level > 2) {
- pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
- skd_dbg_level, 0);
- skd_dbg_level = 0;
- }
-
- if (skd_isr_comp_limit < 0) {
- pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
- skd_isr_comp_limit, 0);
- skd_isr_comp_limit = 0;
- }
-
- return pci_register_driver(&skd_driver);
-}
-
-static void __exit skd_exit(void)
-{
- pci_unregister_driver(&skd_driver);
-
- if (skd_major)
- unregister_blkdev(skd_major, DRV_NAME);
-}
-
-module_init(skd_init);
-module_exit(skd_exit);
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
deleted file mode 100644
index c30bb98c7cd2..000000000000
--- a/drivers/block/skd_s1120.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 STEC, Inc.
- * Copyright (c) 2017 Western Digital Corporation or its affiliates.
- */
-
-
-#ifndef SKD_S1120_H
-#define SKD_S1120_H
-
-/*
- * Q-channel, 64-bit r/w
- */
-#define FIT_Q_COMMAND 0x400u
-#define FIT_QCMD_QID_MASK (0x3 << 1)
-#define FIT_QCMD_QID0 (0x0 << 1)
-#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
-#define FIT_QCMD_QID1 (0x1 << 1)
-#define FIT_QCMD_QID2 (0x2 << 1)
-#define FIT_QCMD_QID3 (0x3 << 1)
-#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
-#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
-#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
-#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
-#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
-#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_ALIGN L1_CACHE_BYTES
-
-/*
- * Control, 32-bit r/w
- */
-#define FIT_CONTROL 0x500u
-#define FIT_CR_HARD_RESET (1u << 0u)
-#define FIT_CR_SOFT_RESET (1u << 1u)
-#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
-#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
-
-/*
- * Status, 32-bit, r/o
- */
-#define FIT_STATUS 0x510u
-#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
-#define FIT_SR_SIGNATURE (0xFF << 8)
-#define FIT_SR_PIO_DMA (1 << 16)
-#define FIT_SR_DRIVE_OFFLINE 0x00
-#define FIT_SR_DRIVE_INIT 0x01
-/* #define FIT_SR_DRIVE_READY 0x02 */
-#define FIT_SR_DRIVE_ONLINE 0x03
-#define FIT_SR_DRIVE_BUSY 0x04
-#define FIT_SR_DRIVE_FAULT 0x05
-#define FIT_SR_DRIVE_DEGRADED 0x06
-#define FIT_SR_PCIE_LINK_DOWN 0x07
-#define FIT_SR_DRIVE_SOFT_RESET 0x08
-#define FIT_SR_DRIVE_INIT_FAULT 0x09
-#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
-#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
-#define FIT_SR_DRIVE_FW_BOOTING 0x0C
-#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
-#define FIT_SR_DEVICE_MISSING 0xFF
-#define FIT_SR__RESERVED 0xFFFFFF00u
-
-/*
- * FIT_STATUS - Status register data definition
- */
-#define FIT_SR_STATE_MASK (0xFF << 0)
-#define FIT_SR_SIGNATURE (0xFF << 8)
-#define FIT_SR_PIO_DMA (1 << 16)
-
-/*
- * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
- */
-#define FIT_INT_STATUS_HOST 0x520u
-#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
-#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
-#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
-#define FIT_ISH_UNDEFINED_3 (1u << 3u)
-#define FIT_ISH_UNDEFINED_4 (1u << 4u)
-#define FIT_ISH_Q0_FULL (1u << 5u)
-#define FIT_ISH_Q1_FULL (1u << 6u)
-#define FIT_ISH_Q2_FULL (1u << 7u)
-#define FIT_ISH_Q3_FULL (1u << 8u)
-#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
-#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
-
-#define FIT_INT_DEF_MASK \
- (FIT_ISH_FW_STATE_CHANGE | \
- FIT_ISH_COMPLETION_POSTED | \
- FIT_ISH_MSG_FROM_DEV | \
- FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL | \
- FIT_ISH_QCMD_FIFO_OVERRUN | \
- FIT_ISH_BAD_EXP_ROM_READ)
-
-#define FIT_INT_QUEUE_FULL \
- (FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL)
-
-#define MSI_MSG_NWL_ERROR_0 0x00000000
-#define MSI_MSG_NWL_ERROR_1 0x00000001
-#define MSI_MSG_NWL_ERROR_2 0x00000002
-#define MSI_MSG_NWL_ERROR_3 0x00000003
-#define MSI_MSG_STATE_CHANGE 0x00000004
-#define MSI_MSG_COMPLETION_POSTED 0x00000005
-#define MSI_MSG_MSG_FROM_DEV 0x00000006
-#define MSI_MSG_RESERVED_0 0x00000007
-#define MSI_MSG_RESERVED_1 0x00000008
-#define MSI_MSG_QUEUE_0_FULL 0x00000009
-#define MSI_MSG_QUEUE_1_FULL 0x0000000A
-#define MSI_MSG_QUEUE_2_FULL 0x0000000B
-#define MSI_MSG_QUEUE_3_FULL 0x0000000C
-
-#define FIT_INT_RESERVED_MASK \
- (FIT_ISH_UNDEFINED_3 | \
- FIT_ISH_UNDEFINED_4)
-
-/*
- * Interrupt mask, 32-bit r/w
- * Bit definitions are the same as FIT_INT_STATUS_HOST
- */
-#define FIT_INT_MASK_HOST 0x528u
-
-/*
- * Message to device, 32-bit r/w
- */
-#define FIT_MSG_TO_DEVICE 0x540u
-
-/*
- * Message from device, 32-bit, r/o
- */
-#define FIT_MSG_FROM_DEVICE 0x548u
-
-/*
- * 32-bit messages to/from device, composition/extraction macros
- */
-#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
- ((((TYPE) & 0xFFu) << 24u) | \
- (((PARAM) & 0xFFu) << 16u) | \
- (((DATA) & 0xFFFFu) << 0u))
-#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
-#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
-#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
-
-/*
- * Types of messages to/from device
- */
-#define FIT_MTD_FITFW_INIT 0x01u
-#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
-#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
-#define FIT_MTD_SET_COMPQ_ADDR 0x04u
-#define FIT_MTD_ARM_QUEUE 0x05u
-#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
-#define FIT_MFD_SMART_EXCEEDED 0x10u
-#define FIT_MFD_POWER_DOWN 0x11u
-#define FIT_MFD_OFFLINE 0x12u
-#define FIT_MFD_ONLINE 0x13u
-#define FIT_MFD_FW_RESTARTING 0x14u
-#define FIT_MFD_PM_ACTIVE 0x15u
-#define FIT_MFD_PM_STANDBY 0x16u
-#define FIT_MFD_PM_SLEEP 0x17u
-#define FIT_MFD_CMD_PROGRESS 0x18u
-
-#define FIT_MTD_DEBUG 0xFEu
-#define FIT_MFD_DEBUG 0xFFu
-
-#define FIT_MFD_MASK (0xFFu)
-#define FIT_MFD_DATA_MASK (0xFFu)
-#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
-#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
-
-/*
- * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
- * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
- * (was Response buffer in docs)
- */
-#define FIT_MSG_TO_DEVICE_ARG 0x580u
-
-/*
- * Hardware (ASIC) version, 32-bit r/o
- */
-#define FIT_HW_VERSION 0x588u
-
-/*
- * Scatter/gather list descriptor.
- * 32-bytes and must be aligned on a 32-byte boundary.
- * All fields are in little endian order.
- */
-struct fit_sg_descriptor {
- uint32_t control;
- uint32_t byte_count;
- uint64_t host_side_addr;
- uint64_t dev_side_addr;
- uint64_t next_desc_ptr;
-};
-
-#define FIT_SGD_CONTROL_NOT_LAST 0x000u
-#define FIT_SGD_CONTROL_LAST 0x40Eu
-
-/*
- * Header at the beginning of a FIT message. The header
- * is followed by SSDI requests each 64 bytes.
- * A FIT message can be up to 512 bytes long and must start
- * on a 64-byte boundary.
- */
-struct fit_msg_hdr {
- uint8_t protocol_id;
- uint8_t num_protocol_cmds_coalesced;
- uint8_t _reserved[62];
-};
-
-#define FIT_PROTOCOL_ID_FIT 1
-#define FIT_PROTOCOL_ID_SSDI 2
-#define FIT_PROTOCOL_ID_SOFIT 3
-
-
-#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
-#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
-
-/*
- * Format of a completion entry. The completion queue is circular
- * and must have at least as many entries as the maximum number
- * of commands that may be issued to the device.
- *
- * There are no head/tail pointers. The cycle value is used to
- * infer the presence of new completion records.
- * Initially the cycle in all entries is 0, the index is 0, and
- * the cycle value to expect is 1. When completions are added
- * their cycle values are set to 1. When the index wraps the
- * cycle value to expect is incremented.
- *
- * Command_context is opaque and taken verbatim from the SSDI command.
- * All other fields are big endian.
- */
-#define FIT_PROTOCOL_VERSION_0 0
-
-/*
- * Protocol major version 1 completion entry.
- * The major protocol version is found in bits
- * 20-23 of the FIT_MTD_FITFW_INIT response.
- */
-struct fit_completion_entry_v1 {
- __be32 num_returned_bytes;
- uint16_t tag;
- uint8_t status; /* SCSI status */
- uint8_t cycle;
-};
-#define FIT_PROTOCOL_VERSION_1 1
-#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
-
-struct fit_comp_error_info {
- uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
- uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
- uint8_t reserved0; /* 01: Obsolete field */
- uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
- uint8_t reserved2:1; /* 02: Reserved bit. */
- uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
- uint8_t end_medium:1; /* 02: End of Medium */
- uint8_t file_mark:1; /* 02: Filemark */
- uint8_t info[4]; /* 03: */
- uint8_t reserved1; /* 07: Additional Sense Length */
- uint8_t cmd_spec[4]; /* 08: Command Specific Information */
- uint8_t code; /* 0C: Additional Sense Code */
- uint8_t qual; /* 0D: Additional Sense Code Qualifier */
- uint8_t fruc; /* 0E: Field Replaceable Unit Code */
- uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
- uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
- uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
- uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
- uint16_t uec; /* 14: Additional Sense Bytes */
- uint64_t per __packed; /* 16: Additional Sense Bytes */
- uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
-};
-
-
-/* Task management constants */
-#define SOFT_TASK_SIMPLE 0x00
-#define SOFT_TASK_HEAD_OF_QUEUE 0x01
-#define SOFT_TASK_ORDERED 0x02
-
-/* Version zero has the last 32 bits reserved,
- * Version one has the last 32 bits sg_list_len_bytes;
- */
-struct skd_command_header {
- __be64 sg_list_dma_address;
- uint16_t tag;
- uint8_t attribute;
- uint8_t add_cdb_len; /* In 32 bit words */
- __be32 sg_list_len_bytes;
-};
-
-struct skd_scsi_request {
- struct skd_command_header hdr;
- unsigned char cdb[16];
-/* unsigned char _reserved[16]; */
-};
-
-struct driver_inquiry_data {
- uint8_t peripheral_device_type:5;
- uint8_t qualifier:3;
- uint8_t page_code;
- __be16 page_length;
- __be16 pcie_bus_number;
- uint8_t pcie_device_number;
- uint8_t pcie_function_number;
- uint8_t pcie_link_speed;
- uint8_t pcie_link_lanes;
- __be16 pcie_vendor_id;
- __be16 pcie_device_id;
- __be16 pcie_subsystem_vendor_id;
- __be16 pcie_subsystem_device_id;
- uint8_t reserved1[2];
- uint8_t reserved2[3];
- uint8_t driver_version_length;
- uint8_t driver_version[0x14];
-};
-
-#endif /* SKD_S1120_H */
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 4478eb7efee0..2cdf2771f8e8 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -539,7 +539,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
@@ -578,7 +578,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 2b95d7b33b91..982732dbe82e 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -521,7 +521,7 @@ static int mm_check_plugged(struct cardinfo *card)
static blk_qc_t mm_submit_bio(struct bio *bio)
{
- struct cardinfo *card = bio->bi_disk->private_data;
+ struct cardinfo *card = bio->bi_bdev->bd_disk->private_data;
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_iter.bi_sector,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 145606dc52db..b9fa3ef5b57c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -320,7 +320,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
- blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+ blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_put_request(req);
@@ -705,6 +705,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
+ unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -756,16 +757,18 @@ static int virtblk_probe(struct virtio_device *vdev)
}
/* Default queue sizing is to fill the ring. */
- if (!virtblk_queue_depth) {
- virtblk_queue_depth = vblk->vqs[0].vq->num_free;
+ if (likely(!virtblk_queue_depth)) {
+ queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- virtblk_queue_depth /= 2;
+ queue_depth /= 2;
+ } else {
+ queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
- vblk->tag_set.queue_depth = virtblk_queue_depth;
+ vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 9ebf53903d7b..1cdf09ff67b6 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -794,8 +794,13 @@ again:
pages[i]->persistent_gnt = persistent_gnt;
} else {
if (gnttab_page_cache_get(&ring->free_pages,
- &pages[i]->page))
- goto out_of_memory;
+ &pages[i]->page)) {
+ gnttab_page_cache_put(&ring->free_pages,
+ pages_to_gnt,
+ segs_to_map);
+ ret = -ENOMEM;
+ goto out;
+ }
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
pages[i]->persistent_gnt = NULL;
@@ -811,10 +816,8 @@ again:
break;
}
- if (segs_to_map) {
+ if (segs_to_map)
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
- BUG_ON(ret);
- }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
@@ -830,7 +833,7 @@ again:
gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ ret |= !ret;
goto next;
}
pages[seg_idx]->handle = map[new_map_idx].handle;
@@ -882,17 +885,18 @@ next:
}
segs_to_map = 0;
last_map = map_until;
- if (map_until != num)
+ if (!ret && map_until != num)
goto again;
- return ret;
-
-out_of_memory:
- pr_alert("%s: out of memory\n", __func__);
- gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
- for (i = last_map; i < num; i++)
+out:
+ for (i = last_map; i < num; i++) {
+ /* Don't zap current batch's valid persistent grants. */
+ if(i >= last_map + segs_to_map)
+ pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
- return -ENOMEM;
+ }
+
+ return ret;
}
static int xen_blkbk_map_seg(struct pending_req *pending_req)
@@ -1322,9 +1326,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
-
- int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
- bio = bio_alloc(GFP_KERNEL, nr_iovecs);
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
if (unlikely(bio == NULL))
goto fail_put_bio;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 9860d4842f36..c2aaf690352c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -245,7 +245,7 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (req_prod - rsp_prod > size)
goto fail;
- err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0)
goto fail;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5265975b3fba..e1c6798889f4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e2933cb7a82a..a711a2e2a794 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -530,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
return len;
out:
- if (bitmap)
- kvfree(bitmap);
+ kvfree(bitmap);
if (bdev)
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
@@ -1082,7 +1081,7 @@ static ssize_t mm_stat_show(struct device *dev,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
- pool_stats.pages_compacted,
+ atomic_long_read(&pool_stats.pages_compacted),
(u64)atomic64_read(&zram->stats.huge_pages),
(u64)atomic64_read(&zram->stats.huge_pages_since));
up_read(&zram->init_lock);
@@ -1596,7 +1595,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
*/
static blk_qc_t zram_submit_bio(struct bio *bio)
{
- struct zram *zram = bio->bi_disk->private_data;
+ struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 41ff2071d7ef..88ce5f0ffc4b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
tlv = (struct intel_tlv *)skb->data;
switch (tlv->type) {
case INTEL_TLV_CNVI_TOP:
- version->cnvi_top =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvi_top = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVR_TOP:
- version->cnvr_top =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvr_top = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVI_BT:
- version->cnvi_bt =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvi_bt = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVR_BT:
- version->cnvr_bt =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvr_bt = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_DEV_REV_ID:
- version->dev_rev_id =
- __le16_to_cpu(get_unaligned_le16(tlv->val));
+ version->dev_rev_id = get_unaligned_le16(tlv->val);
break;
case INTEL_TLV_IMAGE_TYPE:
version->img_type = tlv->val[0];
break;
case INTEL_TLV_TIME_STAMP:
- version->timestamp =
- __le16_to_cpu(get_unaligned_le16(tlv->val));
+ version->timestamp = get_unaligned_le16(tlv->val);
break;
case INTEL_TLV_BUILD_TYPE:
version->build_type = tlv->val[0];
break;
case INTEL_TLV_BUILD_NUM:
- version->build_num =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->build_num = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_SECURE_BOOT:
version->secure_boot = tlv->val[0];
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 5f9f02795631..9872ef18f9fe 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -442,15 +442,15 @@ static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
}
switch ((&pkts[i])->lsize) {
- case 1:
- dlen = skb->data[(&pkts[i])->loff];
- break;
- case 2:
- dlen = get_unaligned_le16(skb->data +
+ case 1:
+ dlen = skb->data[(&pkts[i])->loff];
+ break;
+ case 2:
+ dlen = get_unaligned_le16(skb->data +
(&pkts[i])->loff);
- break;
- default:
- goto err_kfree_skb;
+ break;
+ default:
+ goto err_kfree_skb;
}
pad_size = skb->len - (&pkts[i])->hlen - dlen;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index f85a55add9be..25114f0d1319 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -94,6 +94,53 @@ out:
}
EXPORT_SYMBOL_GPL(qca_read_soc_version);
+static int qca_read_fw_build_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ char cmd, build_label[QCA_FW_BUILD_VER_LEN];
+ int build_lbl_len, err = 0;
+
+ bt_dev_dbg(hdev, "QCA read fw build info");
+
+ cmd = EDL_GET_BUILD_INFO_CMD;
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+ &cmd, 0, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Reading QCA fw build info failed (%d)",
+ err);
+ return err;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl) {
+ bt_dev_err(hdev, "QCA read fw build info with no header");
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_GET_BUILD_INFO_CMD) {
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ build_lbl_len = edl->data[0];
+ if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
+ memcpy(build_label, edl->data + 1, build_lbl_len);
+ *(build_label + build_lbl_len) = '\0';
+ }
+
+ hci_set_fw_info(hdev, "%s", build_label);
+
+out:
+ kfree_skb(skb);
+ return err;
+}
+
static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ /* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
+ * VsMsftOpCode.
+ */
+ switch (soc_type) {
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ hci_set_msft_opcode(hdev, 0xFD70);
+ break;
+ default:
+ break;
+ }
+
/* Perform HCI reset */
err = qca_send_reset(hdev);
if (err < 0) {
@@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ if (soc_type == QCA_WCN3991) {
+ /* get fw build info */
+ err = qca_read_fw_build_info(hdev);
+ if (err < 0)
+ return err;
+ }
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e73b8f8775bd..b19add7675a4 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -11,6 +11,7 @@
#define EDL_PATCH_CMD_LEN (1)
#define EDL_PATCH_VER_REQ_CMD (0x19)
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
+#define EDL_GET_BUILD_INFO_CMD (0x20)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 98d53764871f..2acb719e596f 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
btqcomsmd_cmd_callback, btq);
- if (IS_ERR(btq->cmd_channel))
- return PTR_ERR(btq->cmd_channel);
+ if (IS_ERR(btq->cmd_channel)) {
+ ret = PTR_ERR(btq->cmd_channel);
+ goto destroy_acl_channel;
+ }
hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
+ if (!hdev) {
+ ret = -ENOMEM;
+ goto destroy_cmd_channel;
+ }
hci_set_drvdata(hdev, btq);
btq->hdev = hdev;
@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev);
- if (ret < 0) {
- hci_free_dev(hdev);
- return ret;
- }
+ if (ret < 0)
+ goto hci_free_dev;
platform_set_drvdata(pdev, btq);
return 0;
+
+hci_free_dev:
+ hci_free_dev(hdev);
+destroy_cmd_channel:
+ rpmsg_destroy_ept(btq->cmd_channel);
+destroy_acl_channel:
+ rpmsg_destroy_ept(btq->acl_channel);
+
+ return ret;
}
static int btqcomsmd_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index a4f7cace66b0..e7fe5fb22753 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -38,6 +38,19 @@
.hci_ver = (hciv), \
.hci_bus = (bus)
+enum btrtl_chip_id {
+ CHIP_ID_8723A,
+ CHIP_ID_8723B,
+ CHIP_ID_8821A,
+ CHIP_ID_8761A,
+ CHIP_ID_8822B = 8,
+ CHIP_ID_8723D,
+ CHIP_ID_8821C,
+ CHIP_ID_8822C = 13,
+ CHIP_ID_8761B,
+ CHIP_ID_8852A = 18,
+};
+
struct id_table {
__u16 match_flags;
__u16 lmp_subver;
@@ -58,6 +71,7 @@ struct btrtl_device_info {
u8 *cfg_data;
int cfg_len;
bool drop_fw;
+ int project_id;
};
static const struct id_table ic_id_table[] = {
@@ -307,8 +321,10 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Find project_id in table */
for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
- if (project_id == project_id_to_lmp_subver[i].id)
+ if (project_id == project_id_to_lmp_subver[i].id) {
+ btrtl_dev->project_id = project_id;
break;
+ }
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
@@ -658,6 +674,12 @@ out_free:
}
}
+ /* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0
+ * for VsMsftOpCode.
+ */
+ if (lmp_subver == RTL_ROM_LMP_8822B)
+ hci_set_msft_opcode(hdev, 0xFCF0);
+
return btrtl_dev;
err_free:
@@ -708,13 +730,28 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
ret = btrtl_download_firmware(hdev, btrtl_dev);
- btrtl_free(btrtl_dev);
-
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ /* Enable central-peripheral role (able to create new connections with
+ * an existing connection in slave role).
+ */
+ /* Enable WBS supported for the specific Realtek devices. */
+ switch (btrtl_dev->project_id) {
+ case CHIP_ID_8822C:
+ case CHIP_ID_8852A:
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ break;
+ default:
+ rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
+ rtl_dev_dbg(hdev, "WBS supported not enabled.");
+ break;
+ }
+
+ btrtl_free(btrtl_dev);
return ret;
}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 03b83aa91277..52683fd22e05 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -368,6 +368,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEWGEN |
BTUSB_WIDEBAND_SPEECH},
+ { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_NEWGEN |
+ BTUSB_WIDEBAND_SPEECH},
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -506,7 +508,6 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14
-#define BTUSB_USE_ALT1_FOR_WBS 15
struct btusb_data {
struct hci_dev *hdev;
@@ -1736,15 +1737,12 @@ static void btusb_work(struct work_struct *work)
new_alts = data->sco_num;
}
} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
- /* Check if Alt 6 is supported for Transparent audio */
- if (btusb_find_altsetting(data, 6)) {
- data->usb_alt6_packet_flow = true;
- new_alts = 6;
- } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
- new_alts = 1;
- } else {
- bt_dev_err(hdev, "Device does not support ALT setting 6");
- }
+ /* Bluetooth USB spec recommends alt 6 (63 bytes), but
+ * many adapters do not support it. Alt 1 appears to
+ * work for all adapters that do not have alt 6, and
+ * which work with WBS at all.
+ */
+ new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -1903,7 +1901,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
le16_to_cpu(rp->lmp_subver) == 0x1012 &&
le16_to_cpu(rp->hci_rev) == 0x0810 &&
le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) {
- bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n");
+ bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues");
pm_runtime_allow(&data->udev->dev);
@@ -1911,7 +1909,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
if (ret >= 0)
msleep(200);
else
- bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n");
+ bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround");
pm_runtime_forbid(&data->udev->dev);
@@ -2924,7 +2922,10 @@ finish:
* extension are using 0xFC1E for VsMsftOpCode.
*/
switch (ver.hw_variant) {
+ case 0x11: /* JfP */
case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* CcP */
hci_set_msft_opcode(hdev, 0xFC1E);
break;
}
@@ -3127,6 +3128,12 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define HCI_WMT_MAX_EVENT_SIZE 64
+/* It is for mt79xx download rom patch*/
+#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
+#define MTK_FW_ROM_PATCH_GD_SIZE 64
+#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
+#define MTK_SEC_MAP_COMMON_SIZE 12
+#define MTK_SEC_MAP_NEED_SEND_SIZE 52
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
@@ -3138,6 +3145,7 @@ enum {
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_PROGRESS,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
@@ -3153,7 +3161,7 @@ struct btmtk_wmt_hdr {
struct btmtk_hci_wmt_cmd {
struct btmtk_wmt_hdr hdr;
- u8 data[256];
+ u8 data[];
} __packed;
struct btmtk_hci_wmt_evt {
@@ -3182,6 +3190,40 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
+struct btmtk_patch_header {
+ u8 datetime[16];
+ u8 platform[4];
+ __le16 hwver;
+ __le16 swver;
+ __le32 magicnum;
+} __packed;
+
+struct btmtk_global_desc {
+ __le32 patch_ver;
+ __le32 sub_sys;
+ __le32 feature_opt;
+ __le32 section_num;
+} __packed;
+
+struct btmtk_section_map {
+ __le32 sectype;
+ __le32 secoffset;
+ __le32 secsize;
+ union {
+ __le32 u4SecSpec[13];
+ struct {
+ __le32 dlAddr;
+ __le32 dlsize;
+ __le32 seckeyidx;
+ __le32 alignlen;
+ __le32 sectype;
+ __le32 dlmodecrctype;
+ __le32 crc;
+ __le32 reserved[6];
+ } bin_info_spec;
+ };
+} __packed;
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -3199,7 +3241,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
- goto err_out;
+ return;
}
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
@@ -3217,13 +3259,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
- if (!data->evt_skb)
- goto err_out;
+ if (!data->evt_skb) {
+ kfree_skb(skb);
+ return;
+ }
}
err = hci_recv_frame(hdev, skb);
- if (err < 0)
- goto err_free_skb;
+ if (err < 0) {
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+ return;
+ }
if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
&data->flags)) {
@@ -3232,11 +3279,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
-err_out:
- return;
-err_free_skb:
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */
@@ -3252,7 +3294,7 @@ err_free_skb:
* to generate the event. Otherwise, the WMT event cannot return from
* the device successfully.
*/
- udelay(100);
+ udelay(500);
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -3327,7 +3369,7 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct btmtk_hci_wmt_cmd wc;
+ struct btmtk_hci_wmt_cmd *wc;
struct btmtk_wmt_hdr *hdr;
int err;
@@ -3341,20 +3383,24 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
if (hlen > 255)
return -EINVAL;
- hdr = (struct btmtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ goto err_free_wc;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -3371,13 +3417,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -3405,6 +3452,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
else
status = BTMTK_WMT_ON_UNDONE;
break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
}
if (wmt_params->status)
@@ -3413,6 +3468,119 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
err_free_skb:
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
+ return err;
+}
+
+static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct btmtk_global_desc *globaldesc = NULL;
+ struct btmtk_section_map *sectionmap;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ const u8 *fw_bin_ptr;
+ int err, dlen, i, status;
+ u8 flag, first_block, retry;
+ u32 section_num, dl_size, section_offset;
+ u8 cmd[64];
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_bin_ptr = fw_ptr;
+ globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
+ section_num = globaldesc->section_num;
+
+ for (i = 0; i < section_num; i++) {
+ first_block = 1;
+ fw_ptr = fw_bin_ptr;
+ sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
+
+ section_offset = sectionmap->secoffset;
+ dl_size = sectionmap->bin_info_spec.dlsize;
+
+ if (dl_size > 0) {
+ retry = 20;
+ while (retry > 0) {
+ cmd[0] = 0; /* 0 means legacy dl mode. */
+ memcpy(cmd + 1,
+ fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
+ MTK_SEC_MAP_COMMON_SIZE,
+ MTK_SEC_MAP_NEED_SEND_SIZE + 1);
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = &status;
+ wmt_params.flag = 0;
+ wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
+ wmt_params.data = &cmd;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ if (status == BTMTK_WMT_PATCH_UNDONE) {
+ break;
+ } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
+ msleep(100);
+ retry--;
+ } else if (status == BTMTK_WMT_PATCH_DONE) {
+ goto next_section;
+ } else {
+ bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
+ status);
+ goto err_release_fw;
+ }
+ }
+
+ fw_ptr += section_offset;
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (dl_size > 0) {
+ dlen = min_t(int, 250, dl_size);
+ if (first_block == 1) {
+ flag = 1;
+ first_block = 0;
+ } else if (dl_size - dlen <= 0) {
+ flag = 3;
+ } else {
+ flag = 2;
+ }
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ dl_size -= dlen;
+ fw_ptr += dlen;
+ }
+ }
+next_section:
+ continue;
+ }
+ /* Wait a few moments for firmware activation done */
+ usleep_range(100000, 120000);
+
+err_release_fw:
+ release_firmware(fw);
return err;
}
@@ -3465,7 +3633,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
- /* Tell deivice the position in sequence */
+ /* Tell device the position in sequence */
if (fw_size - dlen <= 0)
flag = 3;
else if (fw_size < fw->size - 30)
@@ -3555,9 +3723,9 @@ err_free_buf:
return err;
}
-static int btusb_mtk_id_get(struct btusb_data *data, u32 *id)
+static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
{
- return btusb_mtk_reg_read(data, 0x80000008, id);
+ return btusb_mtk_reg_read(data, reg, id);
}
static int btusb_mtk_setup(struct hci_dev *hdev)
@@ -3571,16 +3739,31 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
const char *fwname;
int err, status;
u32 dev_id;
+ char fw_bin_name[64];
+ u32 fw_version;
u8 param;
calltime = ktime_get();
- err = btusb_mtk_id_get(data, &dev_id);
+ err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
+ if (!dev_id) {
+ err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+ err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+ }
+
switch (dev_id) {
case 0x7663:
fwname = FIRMWARE_MT7663;
@@ -3588,8 +3771,28 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7961:
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
+ err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name);
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+ goto done;
default:
- bt_dev_err(hdev, "Unsupported support hardware variant (%08x)",
+ bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
dev_id);
return -ENODEV;
}
@@ -3665,6 +3868,7 @@ ignore_func_on:
}
kfree_skb(skb);
+done:
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
@@ -3725,7 +3929,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) {
- bt_dev_err(hdev, "%s: No memory\n", __func__);
+ bt_dev_err(hdev, "%s: No memory", __func__);
return -ENOMEM;
}
@@ -3734,7 +3938,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
ret = btusb_send_frame(hdev, skb);
if (ret) {
- bt_dev_err(hdev, "%s: configuration failed\n", __func__);
+ bt_dev_err(hdev, "%s: configuration failed", __func__);
kfree_skb(skb);
return ret;
}
@@ -4069,6 +4273,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
info = &qca_devices_table[i];
}
if (!info) {
+ /* If the rom_version is not matched in the qca_devices_table
+ * and the high ROM version is not zero, we assume this chip no
+ * need to load the rampatch and nvm.
+ */
+ if (ver_rom & ~0xffffU)
+ return 0;
+
bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
return -ENODEV;
}
@@ -4264,6 +4475,20 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
return !device_may_wakeup(&data->udev->dev);
}
+static int btusb_shutdown_qca(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -4523,6 +4748,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_WCN6855) {
data->setup_on_usb = btusb_setup_qca;
+ hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
@@ -4548,10 +4774,6 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
- if (btusb_find_altsetting(data, 1))
- set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
- else
- bt_dev_err(hdev, "Device does not support ALT setting 1");
}
if (!reset)
@@ -4627,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = NULL;
}
- if (enable_autosuspend)
- usb_enable_autosuspend(data->udev);
+ if (!enable_autosuspend)
+ usb_disable_autosuspend(data->udev);
err = hci_register_dev(hdev);
if (err < 0)
@@ -4688,6 +4910,9 @@ static void btusb_disconnect(struct usb_interface *intf)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
+
+ if (!enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8ea5ca8d71d6..3764ceb6fa0d 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
{ BCM_RECV_NULL, .recv = hci_recv_diag },
{ BCM_RECV_TYPE49, .recv = hci_recv_diag },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 7be16a7f653b..27e96681d583 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5)
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
+ /* Enable controller to do both LE scan and BR/EDR inquiry
+ * simultaneously.
+ */
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
+
out_free:
btrtl_free(btrtl_dev);
@@ -1022,6 +1027,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
.data = (const void *)&rtl_vnd },
{ .compatible = "realtek,rtl8723bs-bt",
.data = (const void *)&rtl_vnd },
+ { .compatible = "realtek,rtl8723ds-bt",
+ .data = (const void *)&rtl_vnd },
#endif
{ },
};
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f83d67eafc9f..637c5b8c2aa1 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
goto no_schedule;
- if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
- set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
goto no_schedule;
- }
BT_DBG("");
@@ -174,10 +173,10 @@ restart:
kfree_skb(skb);
}
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
goto restart;
- clear_bit(HCI_UART_SENDING, &hu->tx_state);
wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
}
@@ -802,7 +801,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
* We don't provide read/write/poll interface for user space.
*/
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr)
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset)
{
return 0;
}
@@ -819,29 +819,28 @@ static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
return 0;
}
+static struct tty_ldisc_ops hci_uart_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_hci",
+ .open = hci_uart_tty_open,
+ .close = hci_uart_tty_close,
+ .read = hci_uart_tty_read,
+ .write = hci_uart_tty_write,
+ .ioctl = hci_uart_tty_ioctl,
+ .compat_ioctl = hci_uart_tty_ioctl,
+ .poll = hci_uart_tty_poll,
+ .receive_buf = hci_uart_tty_receive,
+ .write_wakeup = hci_uart_tty_wakeup,
+};
+
static int __init hci_uart_init(void)
{
- static struct tty_ldisc_ops hci_uart_ldisc;
int err;
BT_INFO("HCI UART driver ver %s", VERSION);
/* Register the tty discipline */
-
- memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
- hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
- hci_uart_ldisc.name = "n_hci";
- hci_uart_ldisc.open = hci_uart_tty_open;
- hci_uart_ldisc.close = hci_uart_tty_close;
- hci_uart_ldisc.read = hci_uart_tty_read;
- hci_uart_ldisc.write = hci_uart_tty_write;
- hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
- hci_uart_ldisc.compat_ioctl = hci_uart_tty_ioctl;
- hci_uart_ldisc.poll = hci_uart_tty_poll;
- hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
- hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
- hci_uart_ldisc.owner = THIS_MODULE;
-
err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4a963682c702..de36af63e182 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -50,7 +50,8 @@
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000
-#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000)
+#define IBS_DISABLE_SSR_TIMEOUT_MS \
+ (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
#define FW_DOWNLOAD_TIMEOUT_MS 3000
/* susclk rate */
@@ -76,7 +77,8 @@ enum qca_flags {
QCA_MEMDUMP_COLLECTION,
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED,
- QCA_BT_OFF
+ QCA_BT_OFF,
+ QCA_ROM_FW
};
enum qca_capabilities {
@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
dump_size = __le32_to_cpu(dump->dump_size);
if (!(dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size");
+ kfree(qca_memdump);
kfree_skb(skb);
+ qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret)
return ret;
+ clear_bit(QCA_ROM_FW, &qca->flags);
/* Patch downloading has to be done without IBS mode */
set_bit(QCA_IBS_DISABLED, &qca->flags);
@@ -1718,12 +1723,14 @@ retry:
hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
+ set_bit(QCA_ROM_FW, &qca->flags);
ret = 0;
} else if (ret == -EAGAIN) {
/*
* Userspace firmware loader will return -EAGAIN in case no
* patch/nvm-config is found, so run with original fw/config.
*/
+ set_bit(QCA_ROM_FW, &qca->flags);
ret = 0;
}
@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
set_bit(QCA_SUSPENDING, &qca->flags);
- if (test_bit(QCA_BT_OFF, &qca->flags))
+ /* if BT SoC is running with default firmware then it does not
+ * support in-band sleep
+ */
+ if (test_bit(QCA_ROM_FW, &qca->flags))
+ return 0;
+
+ /* During SSR after memory dump collection, controller will be
+ * powered off and then powered on.If controller is powered off
+ * during SSR then we should wait until SSR is completed.
+ */
+ if (test_bit(QCA_BT_OFF, &qca->flags) &&
+ !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
return 0;
- if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
+ if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
+ test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
IBS_DISABLE_SSR_TIMEOUT_MS :
FW_DOWNLOAD_TIMEOUT_MS;
/* QCA_IBS_DISABLED flag is set to true, During FW download
* and during memory dump collection. It is reset to false,
- * After FW download complete and after memory dump collections.
+ * After FW download complete.
*/
wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
}
}
- /* After memory dump collection, Controller is powered off.*/
- if (test_bit(QCA_BT_OFF, &qca->flags))
- return 0;
-
cancel_work_sync(&qca->ws_awake_device);
cancel_work_sync(&qca->ws_awake_rx);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index ef96ad06fa54..9e03402ef1b3 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb);
}
- } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
- clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
}
/* ------- Interface to HCI layer ------ */
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0c262c2aeaf2..e7f7eee6ee9a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -80,7 +80,7 @@ config MOXTET
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
- depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC && !C6X)
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC)
depends on HAS_IOMEM
select INDIRECT_PIO if ARM64
help
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 845b6c43fef8..2344d560b144 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
index c23c77c9b705..b1fd55901c50 100644
--- a/drivers/bus/fsl-mc/Kconfig
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -14,3 +14,10 @@ config FSL_MC_BUS
architecture. The fsl-mc bus driver handles discovery of
DPAA2 objects (which are represented as Linux devices) and
binding objects to drivers.
+
+config FSL_MC_UAPI_SUPPORT
+ bool "Management Complex (MC) userspace support"
+ depends on FSL_MC_BUS
+ help
+ Provides userspace support for interrogating, creating, destroying or
+ configuring DPAA2 objects exported by the Management Complex.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
index 3c518c7e8374..4ae292a30e53 100644
--- a/drivers/bus/fsl-mc/Makefile
+++ b/drivers/bus/fsl-mc/Makefile
@@ -16,3 +16,6 @@ mc-bus-driver-objs := fsl-mc-bus.o \
fsl-mc-allocator.o \
fsl-mc-msi.o \
dpmcp.o
+
+# MC userspace support
+obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 68488a7ad0d6..e3e2ae41c22b 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -237,8 +237,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* populated before they can get allocation requests from probe callbacks
* of the device drivers for the non-allocatable devices.
*/
-static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
- bool alloc_interrupts)
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
{
int num_child_objects;
int dprc_get_obj_failures;
@@ -458,8 +458,9 @@ out:
/*
* Disable and clear interrupt for a given DPRC object
*/
-static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+int disable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
struct fsl_mc_io *mc_io = mc_dev->mc_io;
@@ -496,9 +497,18 @@ static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 0;
+
return 0;
}
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ return mc_bus->irq_enabled;
+}
+
static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
{
int error;
@@ -525,8 +535,9 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
return 0;
}
-static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+int enable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/*
@@ -554,6 +565,8 @@ static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 1;
+
return 0;
}
@@ -603,6 +616,7 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
struct irq_domain *mc_msi_domain;
bool mc_io_created = false;
bool msi_domain_set = false;
+ bool uapi_created = false;
u16 major_ver, minor_ver;
size_t region_size;
int error;
@@ -635,6 +649,11 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
return error;
mc_io_created = true;
+ } else {
+ error = fsl_mc_uapi_create_device_file(mc_bus);
+ if (error < 0)
+ return -EPROBE_DEFER;
+ uapi_created = true;
}
mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
@@ -692,6 +711,9 @@ error_cleanup_msi_domain:
mc_dev->mc_io = NULL;
}
+ if (uapi_created)
+ fsl_mc_uapi_remove_device_file(mc_bus);
+
return error;
}
EXPORT_SYMBOL_GPL(dprc_setup);
@@ -763,6 +785,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
int dprc_cleanup(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/* this function should be called only for DPRCs, it
@@ -793,6 +816,8 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
fsl_destroy_mc_io(mc_dev->mc_io);
mc_dev->mc_io = NULL;
+ } else {
+ fsl_mc_uapi_remove_device_file(mc_bus);
}
return 0;
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index b8e6acdf932e..380ad1fdb745 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -41,7 +41,7 @@ struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
- void *fsl_mc_regs;
+ void __iomem *fsl_mc_regs;
};
/**
@@ -208,12 +208,108 @@ static struct attribute *fsl_mc_dev_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_dev);
+static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ struct fsl_mc_bus *root_mc_bus;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+ dprc_scan_objects(root_mc_dev, NULL);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+
+exit:
+ return 0;
+}
+
+static ssize_t rescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ unsigned long val;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ enable_dprc_irq(root_mc_dev);
+ else
+ disable_dprc_irq(root_mc_dev);
+
+exit:
+ return 0;
+}
+
+static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
+exit:
+ return 0;
+}
+
+static ssize_t autorescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
+
+ return count;
+}
+
+static ssize_t autorescan_show(struct bus_type *bus, char *buf)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
+ return strlen(buf);
+}
+
+static BUS_ATTR_RW(autorescan);
+
+static struct attribute *fsl_mc_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ &bus_attr_autorescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_bus);
+
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
.dev_groups = fsl_mc_dev_groups,
+ .bus_groups = fsl_mc_bus_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -292,6 +388,11 @@ struct device_type fsl_mc_bus_dpdmai_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+struct device_type fsl_mc_bus_dpdbg_type = {
+ .name = "fsl_mc_bus_dpdbg"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -313,6 +414,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpaiop_type, "dpaiop" },
{ &fsl_mc_bus_dpci_type, "dpci" },
{ &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { &fsl_mc_bus_dpdbg_type, "dpdbg" },
{ NULL, NULL }
};
int i;
@@ -840,6 +942,15 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Differentiate this case by returning EPROBE_DEFER.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPROBE_DEFER);
+
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index c932387641fa..1958fa065360 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -10,6 +10,8 @@
#include <linux/fsl/mc.h>
#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
/*
* Data Path Management Complex (DPMNG) General API
@@ -543,6 +545,22 @@ struct fsl_mc_resource_pool {
};
/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
* struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
* @mc_dev: fsl-mc device for the bus device itself.
* @resource_pools: array of resource pools (one pool per resource type)
@@ -551,6 +569,7 @@ struct fsl_mc_resource_pool {
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
* @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
@@ -558,6 +577,8 @@ struct fsl_mc_bus {
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
};
#define to_fsl_mc_bus(_mc_dev) \
@@ -574,6 +595,9 @@ int __init dprc_driver_init(void);
void dprc_driver_exit(void);
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_allocator_driver_exit(void);
@@ -612,4 +636,29 @@ void fsl_mc_get_root_dprc(struct device *dev,
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev);
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_UAPI_SUPPORT
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus);
+
+#else
+
+static inline int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ return 0;
+}
+
+static inline void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+}
+
+#endif
+
+int disable_dprc_irq(struct fsl_mc_device *mc_dev);
+int enable_dprc_irq(struct fsl_mc_device *mc_dev);
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
new file mode 100644
index 000000000000..9c4c1395fcdb
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Complex (MC) userspace support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "fsl-mc-private.h"
+
+struct uapi_priv_data {
+ struct fsl_mc_uapi *uapi;
+ struct fsl_mc_io *mc_io;
+};
+
+struct fsl_mc_cmd_desc {
+ u16 cmdid_value;
+ u16 cmdid_mask;
+ int size;
+ bool token;
+ int flags;
+};
+
+#define FSL_MC_CHECK_MODULE_ID BIT(0)
+#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
+
+enum fsl_mc_cmd_index {
+ DPDBG_DUMP = 0,
+ DPDBG_SET,
+ DPRC_GET_CONTAINER_ID,
+ DPRC_CREATE_CONT,
+ DPRC_DESTROY_CONT,
+ DPRC_ASSIGN,
+ DPRC_UNASSIGN,
+ DPRC_GET_OBJ_COUNT,
+ DPRC_GET_OBJ,
+ DPRC_GET_RES_COUNT,
+ DPRC_GET_RES_IDS,
+ DPRC_SET_OBJ_LABEL,
+ DPRC_SET_LOCKED,
+ DPRC_CONNECT,
+ DPRC_DISCONNECT,
+ DPRC_GET_POOL,
+ DPRC_GET_POOL_COUNT,
+ DPRC_GET_CONNECTION,
+ DPCI_GET_LINK_STATE,
+ DPCI_GET_PEER_ATTR,
+ DPAIOP_GET_SL_VERSION,
+ DPAIOP_GET_STATE,
+ DPMNG_GET_VERSION,
+ DPSECI_GET_TX_QUEUE,
+ DPMAC_GET_COUNTER,
+ DPMAC_GET_MAC_ADDR,
+ DPNI_SET_PRIM_MAC,
+ DPNI_GET_PRIM_MAC,
+ DPNI_GET_STATISTICS,
+ DPNI_GET_LINK_STATE,
+ DPNI_GET_MAX_FRAME_LENGTH,
+ DPSW_GET_TAILDROP,
+ DPSW_SET_TAILDROP,
+ DPSW_IF_GET_COUNTER,
+ DPSW_IF_GET_MAX_FRAME_LENGTH,
+ DPDMUX_GET_COUNTER,
+ DPDMUX_IF_GET_MAX_FRAME_LENGTH,
+ GET_ATTR,
+ GET_IRQ_MASK,
+ GET_IRQ_STATUS,
+ CLOSE,
+ OPEN,
+ GET_API_VERSION,
+ DESTROY,
+ CREATE,
+};
+
+static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ [DPDBG_DUMP] = {
+ .cmdid_value = 0x1300,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPDBG_SET] = {
+ .cmdid_value = 0x1400,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPRC_GET_CONTAINER_ID] = {
+ .cmdid_value = 0x8300,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPRC_CREATE_CONT] = {
+ .cmdid_value = 0x1510,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DESTROY_CONT] = {
+ .cmdid_value = 0x1520,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_ASSIGN] = {
+ .cmdid_value = 0x1570,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_UNASSIGN] = {
+ .cmdid_value = 0x1580,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_OBJ_COUNT] = {
+ .cmdid_value = 0x1590,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ },
+ [DPRC_GET_OBJ] = {
+ .cmdid_value = 0x15A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_RES_COUNT] = {
+ .cmdid_value = 0x15B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+ [DPRC_GET_RES_IDS] = {
+ .cmdid_value = 0x15C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ },
+ [DPRC_SET_OBJ_LABEL] = {
+ .cmdid_value = 0x1610,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 48,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_SET_LOCKED] = {
+ .cmdid_value = 0x16B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_CONNECT] = {
+ .cmdid_value = 0x1670,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 56,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DISCONNECT] = {
+ .cmdid_value = 0x1680,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_POOL] = {
+ .cmdid_value = 0x1690,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_POOL_COUNT] = {
+ .cmdid_value = 0x16A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPRC_GET_CONNECTION] = {
+ .cmdid_value = 0x16C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+
+ [DPCI_GET_LINK_STATE] = {
+ .cmdid_value = 0x0E10,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPCI_GET_PEER_ATTR] = {
+ .cmdid_value = 0x0E20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_SL_VERSION] = {
+ .cmdid_value = 0x2820,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_STATE] = {
+ .cmdid_value = 0x2830,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPMNG_GET_VERSION] = {
+ .cmdid_value = 0x8310,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPSECI_GET_TX_QUEUE] = {
+ .cmdid_value = 0x1970,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPMAC_GET_COUNTER] = {
+ .cmdid_value = 0x0c40,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 9,
+ },
+ [DPMAC_GET_MAC_ADDR] = {
+ .cmdid_value = 0x0c50,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_SET_PRIM_MAC] = {
+ .cmdid_value = 0x2240,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPNI_GET_PRIM_MAC] = {
+ .cmdid_value = 0x2250,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_STATISTICS] = {
+ .cmdid_value = 0x25D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPNI_GET_LINK_STATE] = {
+ .cmdid_value = 0x2150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x2170,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPSW_GET_TAILDROP] = {
+ .cmdid_value = 0x0A80,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPSW_SET_TAILDROP] = {
+ .cmdid_value = 0x0A90,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 24,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPSW_IF_GET_COUNTER] = {
+ .cmdid_value = 0x0340,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPSW_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0450,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPDMUX_GET_COUNTER] = {
+ .cmdid_value = 0x0b20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0a20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [GET_ATTR] = {
+ .cmdid_value = 0x0040,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [GET_IRQ_MASK] = {
+ .cmdid_value = 0x0150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [GET_IRQ_STATUS] = {
+ .cmdid_value = 0x0160,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [CLOSE] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+
+ /* Common commands amongst all types of objects. Must be checked last. */
+ [OPEN] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [GET_API_VERSION] = {
+ .cmdid_value = 0xA000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 8,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [DESTROY] = {
+ .cmdid_value = 0x9800,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [CREATE] = {
+ .cmdid_value = 0x9000,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 64,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+};
+
+#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
+
+#define FSL_MC_MAX_MODULE_ID 0x10
+
+static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_command *mc_cmd)
+{
+ struct fsl_mc_cmd_desc *desc = NULL;
+ int mc_cmd_max_size, i;
+ bool token_provided;
+ u16 cmdid, module_id;
+ char *mc_cmd_end;
+ char sum = 0;
+
+ /* Check if this is an accepted MC command */
+ cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
+ for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
+ desc = &fsl_mc_accepted_cmds[i];
+ if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
+ break;
+ }
+ if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
+ return -EACCES;
+ }
+
+ /* Check if the size of the command is honored. Anything beyond the
+ * last valid byte of the command should be zeroed.
+ */
+ mc_cmd_max_size = sizeof(*mc_cmd);
+ mc_cmd_end = ((char *)mc_cmd) + desc->size;
+ for (i = desc->size; i < mc_cmd_max_size; i++)
+ sum |= *mc_cmd_end++;
+ if (sum) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
+ cmdid, desc->size);
+ return -EACCES;
+ }
+
+ /* Some MC commands request a token to be passed so that object
+ * identification is possible. Check if the token passed in the command
+ * is as expected.
+ */
+ token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
+ if (token_provided != desc->token) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
+ cmdid, mc_cmd_hdr_read_token(mc_cmd));
+ return -EACCES;
+ }
+
+ /* If needed, check if the module ID passed is valid */
+ if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
+ /* The module ID is represented by bits [4:9] from the cmdid */
+ module_id = (cmdid & GENMASK(9, 4)) >> 4;
+ if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
+ cmdid, module_id);
+ return -EACCES;
+ }
+ }
+
+ /* Some commands alter how hardware resources are managed. For these
+ * commands, check for CAP_NET_ADMIN.
+ */
+ if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
+ if (!capable(CAP_NET_ADMIN)) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
+ cmdid);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
+ struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_command mc_cmd;
+ int error;
+
+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ error = fsl_mc_command_check(mc_dev, &mc_cmd);
+ if (error)
+ return error;
+
+ error = mc_send_command(mc_io, &mc_cmd);
+ if (error)
+ return error;
+
+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
+{
+ struct fsl_mc_device *root_mc_device;
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_io *dynamic_mc_io;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
+ mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (!mc_uapi->local_instance_in_use) {
+ priv_data->mc_io = mc_uapi->static_mc_io;
+ mc_uapi->local_instance_in_use = 1;
+ } else {
+ error = fsl_mc_portal_allocate(root_mc_device, 0,
+ &dynamic_mc_io);
+ if (error) {
+ dev_dbg(&root_mc_device->dev,
+ "Could not allocate MC portal\n");
+ goto error_portal_allocate;
+ }
+
+ priv_data->mc_io = dynamic_mc_io;
+ }
+ priv_data->uapi = mc_uapi;
+ filep->private_data = priv_data;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+
+error_portal_allocate:
+ mutex_unlock(&mc_uapi->mutex);
+ kfree(priv_data);
+
+ return error;
+}
+
+static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
+{
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_io *mc_io;
+
+ priv_data = filep->private_data;
+ mc_uapi = priv_data->uapi;
+ mc_io = priv_data->mc_io;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (mc_io == mc_uapi->static_mc_io)
+ mc_uapi->local_instance_in_use = 0;
+ else
+ fsl_mc_portal_free(mc_io);
+
+ kfree(filep->private_data);
+ filep->private_data = NULL;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+}
+
+static long fsl_mc_uapi_dev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uapi_priv_data *priv_data = file->private_data;
+ struct fsl_mc_device *root_mc_device;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ switch (cmd) {
+ case FSL_MC_SEND_MC_COMMAND:
+ error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
+ break;
+ default:
+ dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+static const struct file_operations fsl_mc_uapi_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_mc_uapi_dev_open,
+ .release = fsl_mc_uapi_dev_release,
+ .unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
+};
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
+ struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
+ int error;
+
+ mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
+ mc_uapi->misc.name = dev_name(&mc_dev->dev);
+ mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
+
+ error = misc_register(&mc_uapi->misc);
+ if (error)
+ return error;
+
+ mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
+
+ mutex_init(&mc_uapi->mutex);
+
+ return 0;
+}
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+ misc_deregister(&mc_bus->uapi_misc.misc);
+}
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 85a0225db522..b291b35e3884 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -35,7 +35,7 @@ static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
return (enum mc_cmd_status)hdr->status;
}
-static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
{
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index f0697f433c2f..be4eebb0971b 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -151,12 +151,17 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
+ /* if controller driver has set irq_flags, use it */
+ if (mhi_cntrl->irq_flags)
+ irq_flags = mhi_cntrl->irq_flags;
+
/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ irq_flags,
"bhi", mhi_cntrl);
if (ret)
return ret;
@@ -174,7 +179,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ irq_flags,
"mhi", mhi_event);
if (ret) {
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
@@ -552,6 +557,9 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
tre_ring = &mhi_chan->tre_ring;
chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ if (!chan_ctxt->rbase) /* Already uninitialized */
+ return;
+
mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
tre_ring->pre_aligned, tre_ring->dma_handle);
vfree(buf_ring->base);
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index d34d7e90e38d..4e0131b94056 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -111,7 +111,14 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
dma_addr_t db;
db = ring->iommu_base + (ring->wp - ring->base);
+
+ /*
+ * Writes to the new ring element must be visible to the hardware
+ * before letting h/w know there is new element to fetch.
+ */
+ dma_wmb();
*ring->ctxt_wp = db;
+
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
ring->db_addr, db);
}
@@ -135,6 +142,19 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->reset) {
+ mhi_cntrl->reset(mhi_cntrl);
+ return;
+ }
+
+ /* Generic MHI SoC reset */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+}
+EXPORT_SYMBOL_GPL(mhi_soc_reset);
+
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
@@ -260,6 +280,18 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
+
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -947,118 +979,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp);
}
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_buf_info buf_info = { };
+ unsigned long flags;
int ret;
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
- return -EINVAL;
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return -EIO;
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto exit_unlock;
}
- /* we're in M3 or transitioning to M3 */
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+ goto exit_unlock;
+
+ /* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
- /* Toggle wake to exit out of M2 */
+ /* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
- buf_info.v_addr = skb->data;
- buf_info.cb_buf = skb;
- buf_info.len = len;
-
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
+ if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ ret = -EIO;
+ goto exit_unlock;
}
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- return 0;
+exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
- int ret;
-
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
- return -EINVAL;
-
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- dev_err(dev, "MHI is not in activate state, PM state: %s\n",
- to_mhi_pm_state_str(mhi_cntrl->pm_state));
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- return -EIO;
- }
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
- }
-
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);
@@ -1112,57 +1114,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { };
- unsigned long flags;
- int ret;
-
- /*
- * this check here only as a guard, it's always
- * possible mhi can enter error while executing rest of function,
- * which is not fatal so we do not need to hold pm_lock
- */
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
- return -EIO;
-
- tre_ring = &mhi_chan->tre_ring;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret))
- return ret;
-
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- unsigned long flags;
-
- read_lock_irqsave(&mhi_chan->lock, flags);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irqrestore(&mhi_chan->lock, flags);
- }
-
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index f5bee76ea061..20673a4b4a3c 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -8,13 +8,21 @@
* Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
*/
+#include <linux/aer.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#define MHI_PCI_DEFAULT_BAR_NUM 0
+#define MHI_POST_RESET_DELAY_MS 500
+
+#define HEALTH_CHECK_PERIOD (HZ * 2)
+
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
@@ -76,6 +84,36 @@ struct mhi_pci_dev_info {
.offload_channel = false, \
}
+#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ }
+
#define MHI_EVENT_CONFIG_DATA(ev_ring) \
{ \
.num_elements = 128, \
@@ -91,8 +129,8 @@ struct mhi_pci_dev_info {
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
{ \
- .num_elements = 128, \
- .irq_moderation_ms = 5, \
+ .num_elements = 2048, \
+ .irq_moderation_ms = 1, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
@@ -104,27 +142,31 @@ struct mhi_pci_dev_info {
}
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
- MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
- MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
};
-static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
/* first ring is control+data ring */
MHI_EVENT_CONFIG_CTRL(0),
+ /* DIAG dedicated event ring */
+ MHI_EVENT_CONFIG_DATA(1),
/* Hardware channels request dedicated hardware event rings */
- MHI_EVENT_CONFIG_HW_DATA(1, 100),
- MHI_EVENT_CONFIG_HW_DATA(2, 101)
+ MHI_EVENT_CONFIG_HW_DATA(2, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 101)
};
-static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
- .timeout_ms = 5000,
+ .timeout_ms = 8000,
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
.ch_cfg = modem_qcom_v1_mhi_channels,
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
@@ -147,6 +189,18 @@ static const struct pci_device_id mhi_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+enum mhi_pci_device_status {
+ MHI_PCI_DEV_STARTED,
+};
+
+struct mhi_pci_device {
+ struct mhi_controller mhi_cntrl;
+ struct pci_saved_state *pci_state;
+ struct work_struct recovery_work;
+ struct timer_list health_check_timer;
+ unsigned long status;
+};
+
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr, u32 *out)
{
@@ -163,7 +217,31 @@ static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+
/* Nothing to do for now */
+ switch (cb) {
+ case MHI_CB_FATAL_ERROR:
+ case MHI_CB_SYS_ERROR:
+ dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ u16 vendor = 0;
+
+ if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ return false;
+
+ if (vendor == (u16) ~0 || vendor == 0)
+ return false;
+
+ return true;
}
static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
@@ -227,8 +305,12 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
}
if (nr_vectors < mhi_cntrl->nr_irqs) {
- dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
- nr_vectors, mhi_cntrl_config->num_events);
+ dev_warn(&pdev->dev, "using shared MSI\n");
+
+ /* Patch msi vectors, use only one (shared) */
+ for (i = 0; i < mhi_cntrl_config->num_events; i++)
+ mhi_cntrl_config->event_cfg[i].irq = 0;
+ mhi_cntrl->nr_irqs = 1;
}
irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
@@ -257,20 +339,89 @@ static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
/* no PM for now */
}
+static void mhi_pci_recovery_work(struct work_struct *work)
+{
+ struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
+ recovery_work);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ dev_warn(&pdev->dev, "device recovery started\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* Check if we can recover without full reset */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ if (!mhi_pci_is_alive(mhi_cntrl))
+ goto err_try_reset;
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err)
+ goto err_try_reset;
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err)
+ goto err_unprepare;
+
+ dev_dbg(&pdev->dev, "Recovery completed\n");
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ return;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_try_reset:
+ if (pci_reset_function(pdev))
+ dev_err(&pdev->dev, "Recovery failed\n");
+}
+
+static void health_check(struct timer_list *t)
+{
+ struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ return;
+ }
+
+ /* reschedule in two seconds */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
const struct mhi_controller_config *mhi_cntrl_config;
+ struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
int err;
dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
- mhi_cntrl = mhi_alloc_controller();
- if (!mhi_cntrl)
+ /* mhi_pdev.mhi_cntrl must be zero-initialized */
+ mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
+ if (!mhi_pdev)
return -ENOMEM;
+ INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl_config = info->config;
+ mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
@@ -285,17 +436,23 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
- goto err_release;
+ return err;
err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
if (err)
- goto err_release;
+ return err;
+
+ pci_set_drvdata(pdev, mhi_pdev);
+
+ /* Have stored pci confspace at hand for restore in sudden PCI error */
+ pci_save_state(pdev);
+ mhi_pdev->pci_state = pci_store_saved_state(pdev);
- pci_set_drvdata(pdev, mhi_cntrl);
+ pci_enable_pcie_error_reporting(pdev);
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
if (err)
- goto err_release;
+ return err;
/* MHI bus does not power up the controller by default */
err = mhi_prepare_for_power_up(mhi_cntrl);
@@ -310,33 +467,209 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_unprepare;
}
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+
+ /* start health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return 0;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_unregister:
mhi_unregister_controller(mhi_cntrl);
-err_release:
- mhi_free_controller(mhi_cntrl);
return err;
}
static void mhi_pci_remove(struct pci_dev *pdev)
{
- struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
- mhi_power_down(mhi_cntrl, true);
- mhi_unprepare_after_power_down(mhi_cntrl);
mhi_unregister_controller(mhi_cntrl);
- mhi_free_controller(mhi_cntrl);
}
+static void mhi_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_info(&pdev->dev, "reset\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* cause internal device reset */
+ mhi_soc_reset(mhi_cntrl);
+
+ /* Be sure device reset has been executed */
+ msleep(MHI_POST_RESET_DELAY_MS);
+}
+
+static void mhi_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ /* Restore initial known working PCI state */
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ /* Is device status available ? */
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(&pdev->dev, "reset failed\n");
+ return;
+ }
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ return;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ return;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ } else {
+ /* Nothing to do */
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
+{
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mhi_pci_io_resume(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+
+ dev_err(&pdev->dev, "PCI slot reset done\n");
+
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+}
+
+static const struct pci_error_handlers mhi_pci_err_handler = {
+ .error_detected = mhi_pci_error_detected,
+ .slot_reset = mhi_pci_slot_reset,
+ .resume = mhi_pci_io_resume,
+ .reset_prepare = mhi_pci_reset_prepare,
+ .reset_done = mhi_pci_reset_done,
+};
+
+static int __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ /* Transition to M3 state */
+ mhi_pm_suspend(mhi_cntrl);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_set_master(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_recovery;
+
+ /* Exit M3, transition to M0 state */
+ err = mhi_pm_resume(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to resume device: %d\n", err);
+ goto err_recovery;
+ }
+
+ /* Resume health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ return 0;
+
+err_recovery:
+ /* The device may have loose power or crashed, try recovering it */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return err;
+}
+
+static const struct dev_pm_ops mhi_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+};
+
static struct pci_driver mhi_pci_driver = {
.name = "mhi-pci-generic",
.id_table = mhi_pci_id_table,
.probe = mhi_pci_probe,
- .remove = mhi_pci_remove
+ .remove = mhi_pci_remove,
+ .err_handler = &mhi_pci_err_handler,
+ .driver.pm = &mhi_pci_pm_ops
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 2519ceede64b..dd9e7343a5e3 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1111,7 +1111,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
if (!mbus->sdramwins_base) {
- iounmap(mbus_state.mbuswins_base);
+ iounmap(mbus->mbuswins_base);
return -ENOMEM;
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index c5eb46cbf388..01a3d0cd08ed 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -16,6 +16,7 @@
static int simple_pm_bus_probe(struct platform_device *pdev)
{
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -23,7 +24,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1bb00a959c67..d46db132d085 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -45,6 +45,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -126,6 +128,7 @@ struct sunxi_rsb {
struct completion complete;
struct mutex lock;
unsigned int status;
+ u32 clk_freq;
};
/* bus / slave device related functions */
@@ -170,7 +173,9 @@ static int sunxi_rsb_device_remove(struct device *dev)
{
const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
- return drv->remove(to_sunxi_rsb_device(dev));
+ drv->remove(to_sunxi_rsb_device(dev));
+
+ return 0;
}
static struct bus_type sunxi_rsb_bus = {
@@ -335,6 +340,10 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -350,6 +359,9 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -377,6 +389,10 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -387,6 +403,9 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -614,11 +633,100 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
return 0;
}
-static const struct of_device_id sunxi_rsb_of_match_table[] = {
- { .compatible = "allwinner,sun8i-a23-rsb" },
- {}
-};
-MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ unsigned long p_clk_freq;
+ u32 clk_delay, reg;
+ int clk_div, ret;
+
+ ret = clk_prepare_enable(rsb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(rsb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ /* reset the controller */
+ writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+ readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+ !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+ /*
+ * Clock frequency and delay calculation code is from
+ * Allwinner U-boot sources.
+ *
+ * From A83 user manual:
+ * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+ */
+ p_clk_freq = clk_get_rate(rsb->clk);
+ clk_div = p_clk_freq / rsb->clk_freq / 2;
+ if (!clk_div)
+ clk_div = 1;
+ else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+ clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+ clk_delay = clk_div >> 1;
+ if (!clk_delay)
+ clk_delay = 1;
+
+ dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+ writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+ rsb->regs + RSB_CCR);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(rsb->clk);
+
+ return ret;
+}
+
+static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
+{
+ /* Keep the clock and PM reference counts consistent. */
+ if (pm_runtime_status_suspended(rsb->dev))
+ pm_runtime_resume(rsb->dev);
+ reset_control_assert(rsb->rstc);
+ clk_disable_unprepare(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rsb->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_runtime_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ sunxi_rsb_hw_exit(rsb);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return sunxi_rsb_hw_init(rsb);
+}
static int sunxi_rsb_probe(struct platform_device *pdev)
{
@@ -626,10 +734,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct resource *r;
struct sunxi_rsb *rsb;
- unsigned long p_clk_freq;
- u32 clk_delay, clk_freq = 3000000;
- int clk_div, irq, ret;
- u32 reg;
+ u32 clk_freq = 3000000;
+ int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
if (clk_freq > RSB_MAX_FREQ) {
@@ -644,6 +750,7 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return -ENOMEM;
rsb->dev = dev;
+ rsb->clk_freq = clk_freq;
platform_set_drvdata(pdev, rsb);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rsb->regs = devm_ioremap_resource(dev, r);
@@ -661,79 +768,41 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_prepare_enable(rsb->clk);
- if (ret) {
- dev_err(dev, "failed to enable clk: %d\n", ret);
- return ret;
- }
-
- p_clk_freq = clk_get_rate(rsb->clk);
-
rsb->rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rsb->rstc)) {
ret = PTR_ERR(rsb->rstc);
dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- goto err_clk_disable;
- }
-
- ret = reset_control_deassert(rsb->rstc);
- if (ret) {
- dev_err(dev, "failed to deassert reset line: %d\n", ret);
- goto err_clk_disable;
+ return ret;
}
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
- /* reset the controller */
- writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
- readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
- !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
-
- /*
- * Clock frequency and delay calculation code is from
- * Allwinner U-boot sources.
- *
- * From A83 user manual:
- * bus clock frequency = parent clock frequency / (2 * (divider + 1))
- */
- clk_div = p_clk_freq / clk_freq / 2;
- if (!clk_div)
- clk_div = 1;
- else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
- clk_div = RSB_CCR_MAX_CLK_DIV + 1;
-
- clk_delay = clk_div >> 1;
- if (!clk_delay)
- clk_delay = 1;
-
- dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
- writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
- rsb->regs + RSB_CCR);
-
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
if (ret) {
dev_err(dev, "can't register interrupt handler irq %d: %d\n",
irq, ret);
- goto err_reset_assert;
+ return ret;
}
+ ret = sunxi_rsb_hw_init(rsb);
+ if (ret)
+ return ret;
+
/* initialize all devices on the bus into RSB mode */
ret = sunxi_rsb_init_device_mode(rsb);
if (ret)
dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
of_rsb_register_devices(rsb);
return 0;
-
-err_reset_assert:
- reset_control_assert(rsb->rstc);
-
-err_clk_disable:
- clk_disable_unprepare(rsb->clk);
-
- return ret;
}
static int sunxi_rsb_remove(struct platform_device *pdev)
@@ -741,18 +810,40 @@ static int sunxi_rsb_remove(struct platform_device *pdev)
struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
- reset_control_assert(rsb->rstc);
- clk_disable_unprepare(rsb->clk);
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
return 0;
}
+static void sunxi_rsb_shutdown(struct platform_device *pdev)
+{
+ struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
+}
+
+static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
+ sunxi_rsb_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sunxi_rsb_suspend, sunxi_rsb_resume)
+};
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a23-rsb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
.remove = sunxi_rsb_remove,
+ .shutdown = sunxi_rsb_shutdown,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
+ .pm = &sunxi_rsb_dev_pm_ops,
},
};
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8f0e52a71493..90ad34c6ef8e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2214,7 +2214,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(q, cdi->disk, rq, 0);
+ blk_execute_rq(cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;
diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
index 954a8411d67d..0eb80f786f4d 100644
--- a/drivers/char/hw_random/ingenic-trng.c
+++ b/drivers/char/hw_random/ingenic-trng.c
@@ -113,13 +113,17 @@ static int ingenic_trng_probe(struct platform_device *pdev)
ret = hwrng_register(&trng->rng);
if (ret) {
dev_err(&pdev->dev, "Failed to register hwrng\n");
- return ret;
+ goto err_unprepare_clk;
}
platform_set_drvdata(pdev, trng);
dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
return 0;
+
+err_unprepare_clk:
+ clk_disable_unprepare(trng->clk);
+ return ret;
}
static int ingenic_trng_remove(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 01583faf9893..a43743887db1 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -28,7 +28,6 @@
#define RNG_CTRL_OFFSET 0x00
#define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF
#define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001
-#define RNG_CTRL_RNG_RBGEN_DISABLE 0x00000000
#define RNG_SOFT_RESET_OFFSET 0x04
#define RNG_SOFT_RESET 0x00000001
@@ -54,15 +53,24 @@ struct iproc_rng200_dev {
#define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng)
-static void iproc_rng200_restart(void __iomem *rng_base)
+static void iproc_rng200_enable_set(void __iomem *rng_base, bool enable)
{
- uint32_t val;
+ u32 val;
- /* Disable RBG */
val = ioread32(rng_base + RNG_CTRL_OFFSET);
val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+
+ if (enable)
+ val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+
iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+}
+
+static void iproc_rng200_restart(void __iomem *rng_base)
+{
+ uint32_t val;
+
+ iproc_rng200_enable_set(rng_base, false);
/* Clear all interrupt status */
iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET);
@@ -84,11 +92,7 @@ static void iproc_rng200_restart(void __iomem *rng_base)
val &= ~RBG_SOFT_RESET;
iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
- /* Enable RBG */
- val = ioread32(rng_base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_ENABLE;
- iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(rng_base, true);
}
static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
@@ -155,13 +159,8 @@ static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
static int iproc_rng200_init(struct hwrng *rng)
{
struct iproc_rng200_dev *priv = to_rng_priv(rng);
- uint32_t val;
- /* Setup RNG. */
- val = ioread32(priv->base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_ENABLE;
- iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(priv->base, true);
return 0;
}
@@ -169,13 +168,8 @@ static int iproc_rng200_init(struct hwrng *rng)
static void iproc_rng200_cleanup(struct hwrng *rng)
{
struct iproc_rng200_dev *priv = to_rng_priv(rng);
- uint32_t val;
- /* Disable RNG hardware */
- val = ioread32(priv->base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_DISABLE;
- iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(priv->base, false);
}
static int iproc_rng200_probe(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index b0ded41eb865..67947a19aa22 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -69,11 +69,10 @@ out_clk:
return ret;
}
-static int nmk_rng_remove(struct amba_device *dev)
+static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
clk_disable(rng_clk);
- return 0;
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index a99d82949981..135a82590923 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -243,7 +243,7 @@ static int optee_rng_probe(struct device *dev)
if (err)
goto out_sess;
- err = hwrng_register(&pvt_data.optee_rng);
+ err = devm_hwrng_register(dev, &pvt_data.optee_rng);
if (err) {
dev_err(dev, "hwrng registration failed (%d)\n", err);
goto out_sess;
@@ -263,7 +263,6 @@ out_ctx:
static int optee_rng_remove(struct device *dev)
{
- hwrng_unregister(&pvt_data.optee_rng);
tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
tee_client_close_context(pvt_data.ctx);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index e262445fed5f..8ea1fc831eb7 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
*/
if (retval > 0)
usleep_range(period_us,
- period_us + min(1, period_us / 100));
+ period_us + max(1, period_us / 100));
*(u32 *)data = readl(priv->io_base);
retval += sizeof(u32);
@@ -169,7 +169,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->present = 1;
complete(&priv->completion);
- err = hwrng_register(&priv->rng_ops);
+ err = devm_hwrng_register(&pdev->dev, &priv->rng_ops);
if (err) {
dev_err(&pdev->dev, "problem registering\n");
return err;
@@ -185,7 +185,6 @@ static int timeriomem_rng_remove(struct platform_device *pdev)
{
struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&priv->rng_ops);
hrtimer_cancel(&priv->timer);
return 0;
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 382b28f1cf2f..49b8f22fdcf0 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -137,7 +137,7 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
{
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
u8 rq_sa, netf_rq_lun, msg_len;
- union i2c_smbus_data data;
+ struct i2c_client *temp_client;
u8 msg[MAX_MSG_LEN];
ssize_t ret;
@@ -160,21 +160,21 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
}
/*
- * subtract rq_sa and netf_rq_lun from the length of the msg passed to
- * i2c_smbus_xfer
+ * subtract rq_sa and netf_rq_lun from the length of the msg. Fill the
+ * temporary client. Note that its use is an exception for IPMI.
*/
msg_len = msg[IPMB_MSG_LEN_IDX] - SMBUS_MSG_HEADER_LENGTH;
- if (msg_len > I2C_SMBUS_BLOCK_MAX)
- msg_len = I2C_SMBUS_BLOCK_MAX;
+ temp_client = kmemdup(ipmb_dev->client, sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->addr = rq_sa;
- data.block[0] = msg_len;
- memcpy(&data.block[1], msg + SMBUS_MSG_IDX_OFFSET, msg_len);
- ret = i2c_smbus_xfer(ipmb_dev->client->adapter, rq_sa,
- ipmb_dev->client->flags,
- I2C_SMBUS_WRITE, netf_rq_lun,
- I2C_SMBUS_BLOCK_DATA, &data);
+ ret = i2c_smbus_write_block_data(temp_client, netf_rq_lun, msg_len,
+ msg + SMBUS_MSG_IDX_OFFSET);
+ kfree(temp_client);
- return ret ? : count;
+ return ret < 0 ? ret : count;
}
static __poll_t ipmb_poll(struct file *file, poll_table *wait)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 94c2b556cf97..869b9f5e8e03 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,9 +31,6 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
-#include <linux/pseudo_fs.h>
-#include <uapi/linux/magic.h>
-#include <linux/mount.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
@@ -294,13 +291,6 @@ static int uncached_access(struct file *file, phys_addr_t addr)
* attribute aliases.
*/
return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#elif defined(CONFIG_MIPS)
- {
- extern int __uncached_access(struct file *file,
- unsigned long addr);
-
- return __uncached_access(file, addr);
- }
#else
/*
* Accessing memory above the top the kernel knows about or through a
@@ -836,42 +826,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
-static struct inode *devmem_inode;
-
-#ifdef CONFIG_IO_STRICT_DEVMEM
-void revoke_devmem(struct resource *res)
-{
- /* pairs with smp_store_release() in devmem_init_inode() */
- struct inode *inode = smp_load_acquire(&devmem_inode);
-
- /*
- * Check that the initialization has completed. Losing the race
- * is ok because it means drivers are claiming resources before
- * the fs_initcall level of init and prevent /dev/mem from
- * establishing mappings.
- */
- if (!inode)
- return;
-
- /*
- * The expectation is that the driver has successfully marked
- * the resource busy by this point, so devmem_is_allowed()
- * should start returning false, however for performance this
- * does not iterate the entire resource range.
- */
- if (devmem_is_allowed(PHYS_PFN(res->start)) &&
- devmem_is_allowed(PHYS_PFN(res->end))) {
- /*
- * *cringe* iomem=relaxed says "go ahead, what's the
- * worst that can happen?"
- */
- return;
- }
-
- unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
-}
-#endif
-
static int open_port(struct inode *inode, struct file *filp)
{
int rc;
@@ -891,8 +845,7 @@ static int open_port(struct inode *inode, struct file *filp)
* revocations when drivers want to take over a /dev/mem mapped
* range.
*/
- inode->i_mapping = devmem_inode->i_mapping;
- filp->f_mapping = inode->i_mapping;
+ filp->f_mapping = iomem_get_mapping();
return 0;
}
@@ -1024,48 +977,6 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class;
-static int devmem_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type devmem_fs_type = {
- .name = "devmem",
- .owner = THIS_MODULE,
- .init_fs_context = devmem_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-static int devmem_init_inode(void)
-{
- static struct vfsmount *devmem_vfs_mount;
- static int devmem_fs_cnt;
- struct inode *inode;
- int rc;
-
- rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
- if (rc < 0) {
- pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
- return rc;
- }
-
- inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
- simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
- return rc;
- }
-
- /*
- * Publish /dev/mem initialized.
- * Pairs with smp_load_acquire() in revoke_devmem().
- */
- smp_store_release(&devmem_inode, inode);
-
- return 0;
-}
-
static int __init chr_dev_init(void)
{
int minor;
@@ -1087,8 +998,6 @@ static int __init chr_dev_init(void)
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
- if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
- continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index e342daa73d1b..2be8d9a8eec5 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2494,8 +2494,6 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
__FILE__, __LINE__, tty->driver->name, port->count);
- port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
retval = -EBUSY;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5f3b8ac9d97b..0fe9e200e4c8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1261,8 +1261,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
cycles_t cycles = random_get_entropy();
__u32 c_high, j_high;
__u64 ip;
- unsigned long seed;
- int credit = 0;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1298,23 +1296,12 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_pool->last = now;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
-
- /*
- * If we have architectural seed generator, produce a seed and
- * add it to the pool. For the sake of paranoia don't let the
- * architectural seed generator dominate the input from the
- * interrupt noise.
- */
- if (arch_get_random_seed_long(&seed)) {
- __mix_pool_bytes(r, &seed, sizeof(seed));
- credit = 1;
- }
spin_unlock(&r->lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, credit + 1);
+ credit_entropy_bits(r, 1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1972,7 +1959,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, NULL);
+ crng_reseed(&primary_crng, &input_pool);
crng_global_init_time = jiffies - 1;
return 0;
default:
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a18c314da211..4308f9ca7a43 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -86,6 +86,16 @@ config TCG_TIS_SYNQUACER
To compile this driver as a module, choose M here;
the module will be called tpm_tis_synquacer.
+config TCG_TIS_I2C_CR50
+ tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
+ depends on I2C
+ select TCG_CR50
+ help
+ This is a driver for the Google cr50 I2C TPM interface which is a
+ custom microcontroller and requires a custom i2c protocol interface
+ to handle the limitations of the hardware. To compile this driver
+ as a module, choose M here; the module will be called tcg_tis_i2c_cr50.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 84db4fb3a9c9..66d39ea6bd10 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
tpm_tis_spi-y := tpm_tis_spi_main.o
tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 2c96977ad080..8aa9057601d6 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -210,6 +210,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index ddaeceb7e109..19e23fcc6bc8 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -278,6 +278,8 @@ static void tpm_devs_release(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+ dump_stack();
+
/* release the master device reference */
put_device(&chip->dev);
}
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 1784530b8387..c08cbb306636 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -20,7 +20,6 @@
#include "tpm-dev.h"
static struct workqueue_struct *tpm_dev_wq;
-static DEFINE_MUTEX(tpm_dev_wq_lock);
static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
u8 *buf, size_t bufsiz)
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index e2ff0b273a0f..63f03cfb8e6a 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -337,11 +337,190 @@ static const struct attribute_group tpm2_dev_group = {
.attrs = tpm2_dev_attrs,
};
+struct tpm_pcr_attr {
+ int alg_id;
+ int pcr;
+ struct device_attribute attr;
+};
+
+#define to_tpm_pcr_attr(a) container_of(a, struct tpm_pcr_attr, attr)
+
+static ssize_t pcr_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_pcr_attr *ha = to_tpm_pcr_attr(attr);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm_digest digest;
+ int i;
+ int digest_size = 0;
+ int rc;
+ char *str = buf;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ if (ha->alg_id == chip->allocated_banks[i].alg_id)
+ digest_size = chip->allocated_banks[i].digest_size;
+ /* should never happen */
+ if (!digest_size)
+ return -EINVAL;
+
+ digest.alg_id = ha->alg_id;
+ rc = tpm_pcr_read(chip, ha->pcr, &digest);
+ if (rc)
+ return rc;
+ for (i = 0; i < digest_size; i++)
+ str += sprintf(str, "%02X", digest.digest[i]);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+/*
+ * The following set of defines represents all the magic to build
+ * the per hash attribute groups for displaying each bank of PCRs.
+ * The only slight problem with this approach is that every PCR is
+ * hard coded to be present, so you don't know if an PCR is missing
+ * until a cat of the file returns -EINVAL
+ *
+ * Also note you must ignore checkpatch warnings in this macro
+ * code. This is deep macro magic that checkpatch.pl doesn't
+ * understand.
+ */
+
+/* Note, this must match TPM2_PLATFORM_PCR which is fixed at 24. */
+#define _TPM_HELPER(_alg, _hash, F) \
+ F(_alg, _hash, 0) \
+ F(_alg, _hash, 1) \
+ F(_alg, _hash, 2) \
+ F(_alg, _hash, 3) \
+ F(_alg, _hash, 4) \
+ F(_alg, _hash, 5) \
+ F(_alg, _hash, 6) \
+ F(_alg, _hash, 7) \
+ F(_alg, _hash, 8) \
+ F(_alg, _hash, 9) \
+ F(_alg, _hash, 10) \
+ F(_alg, _hash, 11) \
+ F(_alg, _hash, 12) \
+ F(_alg, _hash, 13) \
+ F(_alg, _hash, 14) \
+ F(_alg, _hash, 15) \
+ F(_alg, _hash, 16) \
+ F(_alg, _hash, 17) \
+ F(_alg, _hash, 18) \
+ F(_alg, _hash, 19) \
+ F(_alg, _hash, 20) \
+ F(_alg, _hash, 21) \
+ F(_alg, _hash, 22) \
+ F(_alg, _hash, 23)
+
+/* ignore checkpatch warning about trailing ; in macro. */
+#define PCR_ATTR(_alg, _hash, _pcr) \
+ static struct tpm_pcr_attr dev_attr_pcr_##_hash##_##_pcr = { \
+ .alg_id = _alg, \
+ .pcr = _pcr, \
+ .attr = { \
+ .attr = { \
+ .name = __stringify(_pcr), \
+ .mode = 0444 \
+ }, \
+ .show = pcr_value_show \
+ } \
+ };
+
+#define PCR_ATTRS(_alg, _hash) \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR)
+
+/* ignore checkpatch warning about trailing , in macro. */
+#define PCR_ATTR_VAL(_alg, _hash, _pcr) \
+ &dev_attr_pcr_##_hash##_##_pcr.attr.attr,
+
+#define PCR_ATTR_GROUP_ARRAY(_alg, _hash) \
+ static struct attribute *pcr_group_attrs_##_hash[] = { \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR_VAL) \
+ NULL \
+ }
+
+#define PCR_ATTR_GROUP(_alg, _hash) \
+ static struct attribute_group pcr_group_##_hash = { \
+ .name = "pcr-" __stringify(_hash), \
+ .attrs = pcr_group_attrs_##_hash \
+ }
+
+#define PCR_ATTR_BUILD(_alg, _hash) \
+ PCR_ATTRS(_alg, _hash) \
+ PCR_ATTR_GROUP_ARRAY(_alg, _hash); \
+ PCR_ATTR_GROUP(_alg, _hash)
+/*
+ * End of macro structure to build an attribute group containing 24
+ * PCR value files for each supported hash algorithm
+ */
+
+/*
+ * The next set of macros implements the cleverness for each hash to
+ * build a static attribute group called pcr_group_<hash> which can be
+ * added to chip->groups[].
+ *
+ * The first argument is the TPM algorithm id and the second is the
+ * hash used as both the suffix and the group name. Note: the group
+ * name is a directory in the top level tpm class with the name
+ * pcr-<hash>, so it must not clash with any other names already
+ * in the sysfs directory.
+ */
+PCR_ATTR_BUILD(TPM_ALG_SHA1, sha1);
+PCR_ATTR_BUILD(TPM_ALG_SHA256, sha256);
+PCR_ATTR_BUILD(TPM_ALG_SHA384, sha384);
+PCR_ATTR_BUILD(TPM_ALG_SHA512, sha512);
+PCR_ATTR_BUILD(TPM_ALG_SM3_256, sm3);
+
+
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ int i;
+
WARN_ON(chip->groups_cnt != 0);
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
else
chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
+
+ /* add one group for each bank hash */
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ switch (chip->allocated_banks[i].alg_id) {
+ case TPM_ALG_SHA1:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha1;
+ break;
+ case TPM_ALG_SHA256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha256;
+ break;
+ case TPM_ALG_SHA384:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha384;
+ break;
+ case TPM_ALG_SHA512:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha512;
+ break;
+ case TPM_ALG_SM3_256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sm3;
+ break;
+ default:
+ /*
+ * If triggers, send a patch to add both a
+ * PCR_ATTR_BUILD() macro above for the
+ * missing algorithm as well as an additional
+ * case in this switch statement.
+ */
+ dev_err(&chip->dev,
+ "TPM with unsupported bank algorithm 0x%04x",
+ chip->allocated_banks[i].alg_id);
+ break;
+ }
+ }
+
+ /*
+ * This will only trigger if someone has added an additional
+ * hash to the tpm_algorithms enum without incrementing
+ * TPM_MAX_HASHES.
+ */
+ WARN_ON(chip->groups_cnt > TPM_MAX_HASHES + 1);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 947d1db0a5cc..283f78211c3a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -164,8 +164,6 @@ extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
- size_t min_rsp_body_length, const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm_auto_startup(struct tpm_chip *chip);
@@ -194,8 +192,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
-__must_check int tpm_try_get_ops(struct tpm_chip *chip);
-void tpm_put_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b2dab941cb7f..40018a73b3cb 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -358,7 +358,7 @@ static struct attribute *ppi_attrs[] = {
&dev_attr_tcg_operations.attr,
&dev_attr_vs_operations.attr, NULL,
};
-static struct attribute_group ppi_attr_grp = {
+static const struct attribute_group ppi_attr_grp = {
.name = "ppi",
.attrs = ppi_attrs
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 92c51c6cfd1b..431919d5f48a 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
if (rc < 0)
return false;
- if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
+ | TPM_ACCESS_REQUEST_USE)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
priv->locality = l;
return true;
@@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
-static bool locality_inactive(struct tpm_chip *chip, int l)
-{
- struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- int rc;
- u8 access;
-
- rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
- if (rc < 0)
- return false;
-
- if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
- == TPM_ACCESS_VALID)
- return true;
-
- return false;
-}
-
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- unsigned long stop, timeout;
- long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
- stop = jiffies + chip->timeout_a;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -1;
-
- rc = wait_event_interruptible_timeout(priv->int_queue,
- (locality_inactive(chip, l)),
- timeout);
-
- if (rc > 0)
- return 0;
-
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- if (locality_inactive(chip, l))
- return 0;
- tpm_msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- }
- return -1;
+ return 0;
}
static int request_locality(struct tpm_chip *chip, int l)
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
new file mode 100644
index 000000000000..ec9a65e7887d
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Based on Infineon TPM driver by Peter Huewe.
+ *
+ * cr50 is a firmware for H1 secure modules that requires special
+ * handling for the I2C interface.
+ *
+ * - Use an interrupt for transaction status instead of hardcoded delays.
+ * - Must use write+wait+read read protocol.
+ * - All 4 bytes of status register must be read/written at once.
+ * - Burst count max is 63 bytes, and burst count behaves slightly differently
+ * than other I2C TPMs.
+ * - When reading from FIFO the full burstcnt must be read instead of just
+ * reading header and determining the remainder.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+
+#define TPM_CR50_MAX_BUFSIZE 64
+#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
+#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
+#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
+#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+
+#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
+#define TPM_I2C_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_I2C_DID_VID(l) (0x0006 | ((l) << 4))
+
+/**
+ * struct tpm_i2c_cr50_priv_data - Driver private data.
+ * @irq: Irq number used for this chip.
+ * If irq <= 0, then a fixed timeout is used instead of waiting for irq.
+ * @tpm_ready: Struct used by irq handler to signal R/W readiness.
+ * @buf: Buffer used for i2c writes, with i2c address prepended to content.
+ *
+ * Private driver struct used by kernel threads and interrupt context.
+ */
+struct tpm_i2c_cr50_priv_data {
+ int irq;
+ struct completion tpm_ready;
+ u8 buf[TPM_CR50_MAX_BUFSIZE];
+};
+
+/**
+ * tpm_cr50_i2c_int_handler() - cr50 interrupt handler.
+ * @dummy: Unused parameter.
+ * @tpm_info: TPM chip information.
+ *
+ * The cr50 interrupt handler signals waiting threads that the
+ * interrupt has been asserted. It does not do any interrupt triggered
+ * processing but is instead used to avoid fixed delays.
+ *
+ * Return:
+ * IRQ_HANDLED signifies irq was handled by this device.
+ */
+static irqreturn_t tpm_cr50_i2c_int_handler(int dummy, void *tpm_info)
+{
+ struct tpm_chip *chip = tpm_info;
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ complete(&priv->tpm_ready);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_cr50_i2c_wait_tpm_ready() - Wait for tpm to signal ready.
+ * @chip: A TPM chip.
+ *
+ * Wait for completion interrupt if available, otherwise use a fixed
+ * delay for the TPM to be ready.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* Use a safe fixed delay if interrupt is not supported */
+ if (priv->irq <= 0) {
+ msleep(TPM_CR50_TIMEOUT_NOIRQ_MS);
+ return 0;
+ }
+
+ /* Wait for interrupt to indicate TPM is ready to respond */
+ if (!wait_for_completion_timeout(&priv->tpm_ready,
+ msecs_to_jiffies(chip->timeout_a))) {
+ dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_enable_tpm_irq() - Enable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_enable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0) {
+ reinit_completion(&priv->tpm_ready);
+ enable_irq(priv->irq);
+ }
+}
+
+/**
+ * tpm_cr50_i2c_disable_tpm_irq() - Disable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_disable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0)
+ disable_irq(priv->irq);
+}
+
+/**
+ * tpm_cr50_i2c_transfer_message() - Transfer a message over i2c.
+ * @dev: Device information.
+ * @adapter: I2C adapter.
+ * @msg: Message to transfer.
+ *
+ * Call unlocked i2c transfer routine with the provided parameters and
+ * retry in case of bus errors.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_transfer_message(struct device *dev,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msg)
+{
+ unsigned int try;
+ int rc;
+
+ for (try = 0; try < TPM_CR50_I2C_MAX_RETRIES; try++) {
+ rc = __i2c_transfer(adapter, msg, 1);
+ if (rc == 1)
+ return 0; /* Successfully transferred the message */
+ if (try)
+ dev_warn(dev, "i2c transfer failed (attempt %d/%d): %d\n",
+ try + 1, TPM_CR50_I2C_MAX_RETRIES, rc);
+ usleep_range(TPM_CR50_I2C_RETRY_DELAY_LO, TPM_CR50_I2C_RETRY_DELAY_HI);
+ }
+
+ /* No i2c message transferred */
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_i2c_read() - Read from TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to read from.
+ * @buffer: Read destination, provided by caller.
+ * @len: Number of bytes to read.
+ *
+ * Sends the register address byte to the TPM, then waits until TPM
+ * is ready via interrupt signal or timeout expiration, then 'len'
+ * bytes are read from TPM response into the provided 'buffer'.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg_reg_addr = {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg_response = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ int rc;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send the register address byte to the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_reg_addr);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready with response data */
+ rc = tpm_cr50_i2c_wait_tpm_ready(chip);
+ if (rc < 0)
+ goto out;
+
+ /* Read response data from the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_response);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_write()- Write to TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to write to.
+ * @buffer: Data to write.
+ * @len: Number of bytes to write.
+ *
+ * The provided address is prepended to the data in 'buffer', the
+ * cobined address+data is sent to the TPM, then wait for TPM to
+ * indicate it is done writing.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
+ size_t len)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .len = len + 1,
+ .buf = priv->buf
+ };
+ int rc;
+
+ if (len > TPM_CR50_MAX_BUFSIZE - 1)
+ return -EINVAL;
+
+ /* Prepend the 'register address' to the buffer */
+ priv->buf[0] = addr;
+ memcpy(priv->buf + 1, buffer, len);
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send write request buffer with address */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready, ignore timeout */
+ tpm_cr50_i2c_wait_tpm_ready(chip);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_check_locality(struct tpm_chip *chip)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
+ u8 buf;
+ int rc;
+
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ if ((buf & mask) == mask)
+ return 0;
+
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_release_locality() - Release TPM locality.
+ * @chip: A TPM chip.
+ * @force: Flag to force release if set.
+ */
+static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
+ u8 addr = TPM_I2C_ACCESS(0);
+ u8 buf;
+
+ if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
+ return;
+
+ if (force || (buf & mask) == mask) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ }
+}
+
+/**
+ * tpm_cr50_request_locality() - Request TPM locality 0.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_request_locality(struct tpm_chip *chip)
+{
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+ unsigned long stop;
+ int rc;
+
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_status() - Read cr50 tis status.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be read.
+ *
+ * Return:
+ * TPM status byte.
+ */
+static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
+{
+ u8 buf[4];
+
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ return 0;
+
+ return buf[0];
+}
+
+/**
+ * tpm_cr50_i2c_tis_set_ready() - Set status register to ready.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be written.
+ */
+static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
+{
+ u8 buf[4] = { TPM_STS_COMMAND_READY };
+
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+}
+
+/**
+ * tpm_cr50_i2c_get_burst_and_status() - Get burst count and status.
+ * @chip: A TPM chip.
+ * @mask: Status mask.
+ * @burst: Return value for burst.
+ * @status: Return value for status.
+ *
+ * cr50 uses bytes 3:2 of status register for burst count and
+ * all 4 bytes must be read.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
+ size_t *burst, u32 *status)
+{
+ unsigned long stop;
+ u8 buf[4];
+
+ *status = 0;
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_b;
+
+ do {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ continue;
+ }
+
+ *status = *buf;
+ *burst = le16_to_cpup((__le16 *)(buf + 1));
+
+ if ((*status & mask) == mask &&
+ *burst > 0 && *burst <= TPM_CR50_MAX_BUFSIZE - 1)
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ dev_err(&chip->dev, "Timeout reading burst and status\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_recv() - TPM reception callback.
+ * @chip: A TPM chip.
+ * @buf: Reception buffer.
+ * @buf_len: Buffer length to read.
+ *
+ * Return:
+ * - >= 0: Number of read bytes.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
+{
+
+ u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
+ size_t burstcnt, cur, len, expected;
+ u8 addr = TPM_I2C_DATA_FIFO(0);
+ u32 status;
+ int rc;
+
+ if (buf_len < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev,
+ "Unexpected burstcnt: %zu (max=%zu, min=%d)\n",
+ burstcnt, buf_len, TPM_HEADER_SIZE);
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Read first chunk of burstcnt bytes */
+ rc = tpm_cr50_i2c_read(chip, addr, buf, burstcnt);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read of first chunk failed\n");
+ goto out_err;
+ }
+
+ /* Determine expected data in the return buffer */
+ expected = be32_to_cpup((__be32 *)(buf + 2));
+ if (expected > buf_len) {
+ dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ goto out_err;
+ }
+
+ /* Now read the rest of the data */
+ cur = burstcnt;
+ while (cur < expected) {
+ /* Read updated burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ len = min_t(size_t, burstcnt, expected - cur);
+ rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read failed\n");
+ goto out_err;
+ }
+
+ cur += len;
+ }
+
+ /* Ensure TPM is done reading data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Data still available\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ tpm_cr50_release_locality(chip, false);
+ return cur;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_tis_send() - TPM transmission callback.
+ * @chip: A TPM chip.
+ * @buf: Buffer to send.
+ * @len: Buffer length.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ size_t burstcnt, limit, sent = 0;
+ u8 tpm_go[4] = { TPM_STS_GO };
+ unsigned long stop;
+ u32 status;
+ int rc;
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Wait until TPM is ready for a command */
+ stop = jiffies + chip->timeout_b;
+ while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
+ if (time_after(jiffies, stop)) {
+ rc = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ tpm_cr50_i2c_tis_set_ready(chip);
+ }
+
+ while (len > 0) {
+ u8 mask = TPM_STS_VALID;
+
+ /* Wait for data if this is not the first chunk */
+ if (sent > 0)
+ mask |= TPM_STS_DATA_EXPECT;
+
+ /* Read burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ /*
+ * Use burstcnt - 1 to account for the address byte
+ * that is inserted by tpm_cr50_i2c_write()
+ */
+ limit = min_t(size_t, burstcnt - 1, len);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Write failed\n");
+ goto out_err;
+ }
+
+ sent += limit;
+ len -= limit;
+ }
+
+ /* Ensure TPM is not expecting more data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_EXPECT) {
+ dev_err(&chip->dev, "Data still expected\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Start the TPM command */
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ sizeof(tpm_go));
+ if (rc < 0) {
+ dev_err(&chip->dev, "Start command failed\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_req_canceled() - Callback to notify a request cancel.
+ * @chip: A TPM chip.
+ * @status: Status given by the cancel callback.
+ *
+ * Return:
+ * True if command is ready, False otherwise.
+ */
+static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static const struct tpm_class_ops cr50_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = &tpm_cr50_i2c_tis_status,
+ .recv = &tpm_cr50_i2c_tis_recv,
+ .send = &tpm_cr50_i2c_tis_send,
+ .cancel = &tpm_cr50_i2c_tis_set_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = &tpm_cr50_i2c_req_canceled,
+};
+
+static const struct i2c_device_id cr50_i2c_table[] = {
+ {"cr50_i2c", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cr50_i2c_table);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cr50_i2c_acpi_id[] = {
+ { "GOOG0005", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cr50_i2c_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cr50_i2c_match[] = {
+ { .compatible = "google,cr50", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
+#endif
+
+/**
+ * tpm_cr50_i2c_probe() - Driver probe function.
+ * @client: I2C client information.
+ * @id: I2C device id.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tpm_i2c_cr50_priv_data *priv;
+ struct device *dev = &client->dev;
+ struct tpm_chip *chip;
+ u32 vendor;
+ u8 buf[4];
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &cr50_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* cr50 is a TPM 2.0 chip */
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+ init_completion(&priv->tpm_ready);
+
+ if (client->irq > 0) {
+ rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev->driver->name, chip);
+ if (rc < 0) {
+ dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
+ return rc;
+ }
+
+ disable_irq(client->irq);
+ priv->irq = client->irq;
+ } else {
+ dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
+ TPM_CR50_TIMEOUT_NOIRQ_MS);
+ }
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0) {
+ dev_err(dev, "Could not request locality\n");
+ return rc;
+ }
+
+ /* Read four bytes from DID_VID register */
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ if (rc < 0) {
+ dev_err(dev, "Could not read vendor id\n");
+ tpm_cr50_release_locality(chip, true);
+ return rc;
+ }
+
+ vendor = le32_to_cpup((__le32 *)buf);
+ if (vendor != TPM_CR50_I2C_DID_VID) {
+ dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
+ tpm_cr50_release_locality(chip, true);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "cr50 TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ client->addr, client->irq, vendor >> 16);
+
+ return tpm_chip_register(chip);
+}
+
+/**
+ * tpm_cr50_i2c_remove() - Driver remove function.
+ * @client: I2C client information.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ if (!chip) {
+ dev_err(dev, "Could not get client data at remove\n");
+ return -ENODEV;
+ }
+
+ tpm_chip_unregister(chip);
+ tpm_cr50_release_locality(chip, true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver cr50_i2c_driver = {
+ .id_table = cr50_i2c_table,
+ .probe = tpm_cr50_i2c_probe,
+ .remove = tpm_cr50_i2c_remove,
+ .driver = {
+ .name = "cr50_i2c",
+ .pm = &cr50_i2c_pm,
+ .acpi_match_table = ACPI_PTR(cr50_i2c_acpi_id),
+ .of_match_table = of_match_ptr(of_cr50_i2c_match),
+ },
+};
+
+module_i2c_driver(cr50_i2c_driver);
+
+MODULE_DESCRIPTION("cr50 TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 85856cff506c..a588d56502d4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -247,7 +247,8 @@ config CLK_TWL6040
config COMMON_CLK_AXI_CLKGEN
tristate "AXI clkgen driver"
- depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on HAS_IOMEM || COMPILE_TEST
+ depends on OF
help
Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
FPGAs. It is commonly used in Analog Devices' reference designs.
@@ -368,6 +369,13 @@ config COMMON_CLK_FIXED_MMIO
help
Support for Memory Mapped IO Fixed clocks
+config COMMON_CLK_K210
+ bool "Clock driver for the Canaan Kendryte K210 SoC"
+ depends on OF && RISCV && SOC_CANAAN
+ default SOC_CANAAN
+ help
+ Support for the Canaan Kendryte K210 RISC-V SoC clocks.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -379,6 +387,7 @@ source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
+source "drivers/clk/mstar/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
@@ -392,6 +401,7 @@ source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
source "drivers/clk/x86/Kconfig"
+source "drivers/clk/xilinx/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
endif
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index dbdc590e7de3..b22ae4f81e0b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
-obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
@@ -37,6 +36,7 @@ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
+obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
@@ -63,9 +63,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
-obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
-obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
@@ -95,6 +93,7 @@ obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
+obj-y += mstar/
obj-y += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
obj-$(CONFIG_COMMON_CLK_NXP) += nxp/
@@ -105,7 +104,6 @@ obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
-obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_ARCH_AGILEX) += socfpga/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
@@ -122,6 +120,6 @@ obj-y += versatile/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
-obj-$(CONFIG_ARCH_ZX) += zte/
+obj-y += xilinx/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 0fad1009f315..428a6f4b9ebc 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -215,5 +215,4 @@ err_free:
* deferring properly. Once this is fixed, this can be switched to a platform
* driver.
*/
-CLK_OF_DECLARE_DRIVER(at91rm9200_pmc, "atmel,at91rm9200-pmc",
- at91rm9200_pmc_setup);
+CLK_OF_DECLARE(at91rm9200_pmc, "atmel,at91rm9200-pmc", at91rm9200_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index ceb5495f723a..b29843bea278 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -491,26 +491,26 @@ static void __init at91sam9260_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9260_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9260_pmc, "atmel,at91sam9260-pmc",
- at91sam9260_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9260_pmc, "atmel,at91sam9260-pmc", at91sam9260_pmc_setup);
static void __init at91sam9261_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9261_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9261_pmc, "atmel,at91sam9261-pmc",
- at91sam9261_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9261_pmc, "atmel,at91sam9261-pmc", at91sam9261_pmc_setup);
static void __init at91sam9263_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9263_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9263_pmc, "atmel,at91sam9263-pmc",
- at91sam9263_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9263_pmc, "atmel,at91sam9263-pmc", at91sam9263_pmc_setup);
static void __init at91sam9g20_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9g20_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g20_pmc, "atmel,at91sam9g20-pmc",
- at91sam9g20_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g20_pmc, "atmel,at91sam9g20-pmc", at91sam9g20_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 0214333dedd3..15da0dfe3ef2 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -228,5 +228,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(at91sam9g45_pmc, "atmel,at91sam9g45-pmc",
- at91sam9g45_pmc_setup);
+CLK_OF_DECLARE(at91sam9g45_pmc, "atmel,at91sam9g45-pmc", at91sam9g45_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index f9db5316a7f1..7fe435f4b46b 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -255,5 +255,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(at91sam9n12_pmc, "atmel,at91sam9n12-pmc",
- at91sam9n12_pmc_setup);
+CLK_OF_DECLARE(at91sam9n12_pmc, "atmel,at91sam9n12-pmc", at91sam9n12_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 66736e03cfef..ecbabf5162bd 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -186,4 +186,5 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
err_free:
kfree(at91sam9rl_pmc);
}
-CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 79b9d3667228..5cce48c64ea2 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -302,33 +302,33 @@ static void __init at91sam9g15_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g15_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g15_pmc, "atmel,at91sam9g15-pmc",
- at91sam9g15_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g15_pmc, "atmel,at91sam9g15-pmc", at91sam9g15_pmc_setup);
static void __init at91sam9g25_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g25_periphck, false);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g25_pmc, "atmel,at91sam9g25-pmc",
- at91sam9g25_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g25_pmc, "atmel,at91sam9g25-pmc", at91sam9g25_pmc_setup);
static void __init at91sam9g35_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g35_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g35_pmc, "atmel,at91sam9g35-pmc",
- at91sam9g35_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g35_pmc, "atmel,at91sam9g35-pmc", at91sam9g35_pmc_setup);
static void __init at91sam9x25_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9x25_periphck, false);
}
-CLK_OF_DECLARE_DRIVER(at91sam9x25_pmc, "atmel,at91sam9x25-pmc",
- at91sam9x25_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9x25_pmc, "atmel,at91sam9x25-pmc", at91sam9x25_pmc_setup);
static void __init at91sam9x35_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9x35_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9x35_pmc, "atmel,at91sam9x35-pmc",
- at91sam9x35_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9x35_pmc, "atmel,at91sam9x35-pmc", at91sam9x35_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 9a5cbc7cd55a..3d1f78176c3e 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -372,4 +372,5 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
err_free:
kfree(sama5d2_pmc);
}
-CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
+
+CLK_OF_DECLARE(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 87009ee8effc..d376257807d2 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -255,4 +255,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
+CLK_OF_DECLARE(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 57fff790188b..5cbaac68da44 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -286,4 +286,5 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
err_free:
kfree(sama5d4_pmc);
}
-CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
+
+CLK_OF_DECLARE(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 274441e2ddb2..33da30f99c79 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -704,7 +704,7 @@ static const struct clk_ops iproc_clk_ops = {
.set_rate = iproc_clk_set_rate,
};
-/**
+/*
* Some PLLs require the PLL SW override bit to be set before changes can be
* applied to the PLL
*/
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 177368cac6dd..a55b37fc2c8b 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -17,7 +17,8 @@
#define ASPEED_G6_NUM_CLKS 71
-#define ASPEED_G6_SILICON_REV 0x004
+#define ASPEED_G6_SILICON_REV 0x014
+#define CHIP_REVISION_ID GENMASK(23, 16)
#define ASPEED_G6_RESET_CTRL 0x040
#define ASPEED_G6_RESET_CTRL2 0x050
@@ -190,18 +191,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
{
unsigned int mult, div;
+ u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
- if (val & BIT(20)) {
- /* Pass through mode */
- mult = div = 1;
+ if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
+ if (val & BIT(24)) {
+ /* Pass through mode */
+ mult = div = 1;
+ } else {
+ /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
+ u32 m = val & 0x1fff;
+ u32 n = (val >> 13) & 0x3f;
+ u32 p = (val >> 19) & 0xf;
+
+ mult = (m + 1);
+ div = (n + 1) * (p + 1);
+ }
} else {
- /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
- u32 m = (val >> 5) & 0x3f;
- u32 od = (val >> 4) & 0x1;
- u32 n = val & 0xf;
+ if (val & BIT(20)) {
+ /* Pass through mode */
+ mult = div = 1;
+ } else {
+ /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
+ u32 m = (val >> 5) & 0x3f;
+ u32 od = (val >> 4) & 0x1;
+ u32 n = val & 0xf;
- mult = (2 - od) * (m + 2);
- div = n + 1;
+ mult = (2 - od) * (m + 2);
+ div = n + 1;
+ }
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index ad86e031ba3e..ac6ff736ac8f 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -108,6 +108,13 @@ static uint32_t axi_clkgen_lookup_lock(unsigned int m)
return 0x1f1f00fa;
}
+static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
+ .fpfd_min = 10000,
+ .fpfd_max = 450000,
+ .fvco_min = 800000,
+ .fvco_max = 1600000,
+};
+
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
.fpfd_max = 300000,
@@ -503,7 +510,6 @@ static int axi_clkgen_probe(struct platform_device *pdev)
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
- struct resource *mem;
unsigned int i;
int ret;
@@ -515,8 +521,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (!axi_clkgen)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
+ axi_clkgen->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
@@ -561,6 +566,10 @@ static int axi_clkgen_remove(struct platform_device *pdev)
static const struct of_device_id axi_clkgen_ids[] = {
{
+ .compatible = "adi,zynqmp-axi-clkgen-2.00.a",
+ .data = &axi_clkgen_zynqmp_default_limits,
+ },
+ {
.compatible = "adi,axi-clkgen-2.00.a",
.data = &axi_clkgen_zynq_default_limits,
},
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index b52e8d6f660c..17d90e09f1c0 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -31,12 +31,12 @@ struct bd718xx_clk {
u8 reg;
u8 mask;
struct platform_device *pdev;
- struct rohm_regmap_dev *mfd;
+ struct regmap *regmap;
};
static int bd71837_clk_set(struct bd718xx_clk *c, unsigned int status)
{
- return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status);
+ return regmap_update_bits(c->regmap, c->reg, c->mask, status);
}
static void bd71837_clk_disable(struct clk_hw *hw)
@@ -62,7 +62,7 @@ static int bd71837_clk_is_enabled(struct clk_hw *hw)
int rval;
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
- rval = regmap_read(c->mfd->regmap, c->reg, &enabled);
+ rval = regmap_read(c->regmap, c->reg, &enabled);
if (rval)
return rval;
@@ -82,7 +82,6 @@ static int bd71837_clk_probe(struct platform_device *pdev)
int rval = -ENOMEM;
const char *parent_clk;
struct device *parent = pdev->dev.parent;
- struct rohm_regmap_dev *mfd = dev_get_drvdata(parent);
struct clk_init_data init = {
.name = "bd718xx-32k-out",
.ops = &bd71837_clk_ops,
@@ -93,6 +92,10 @@ static int bd71837_clk_probe(struct platform_device *pdev)
if (!c)
return -ENOMEM;
+ c->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!c->regmap)
+ return -ENODEV;
+
init.num_parents = 1;
parent_clk = of_clk_get_parent_name(parent->of_node, 0);
@@ -119,7 +122,6 @@ static int bd71837_clk_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unknown clk chip\n");
return -EINVAL;
}
- c->mfd = mfd;
c->pdev = pdev;
c->hw.init = &init;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c499799693cc..344997203f0e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -494,8 +494,13 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
else
init.ops = &clk_divider_ops;
init.flags = flags;
- init.parent_names = (parent_name ? &parent_name: NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.parent_hws = parent_hw ? &parent_hw : NULL;
+ init.parent_data = parent_data;
+ if (parent_name || parent_hw || parent_data)
+ init.num_parents = 1;
+ else
+ init.num_parents = 0;
/* struct clk_divider assignments */
div->reg = reg;
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
deleted file mode 100644
index 85beaacb4088..000000000000
--- a/drivers/clk/clk-efm32gg.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- */
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-
-#include <dt-bindings/clock/efm32-cmu.h>
-
-#define CMU_HFPERCLKEN0 0x44
-#define CMU_MAX_CLKS 37
-
-static struct clk_hw_onecell_data *clk_data;
-
-static void __init efm32gg_cmu_init(struct device_node *np)
-{
- int i;
- void __iomem *base;
- struct clk_hw **hws;
-
- clk_data = kzalloc(struct_size(clk_data, hws, CMU_MAX_CLKS),
- GFP_KERNEL);
-
- if (!clk_data)
- return;
-
- hws = clk_data->hws;
-
- for (i = 0; i < CMU_MAX_CLKS; ++i)
- hws[i] = ERR_PTR(-ENOENT);
-
- base = of_iomap(np, 0);
- if (!base) {
- pr_warn("Failed to map address range for efm32gg,cmu node\n");
- return;
- }
-
- hws[clk_HFXO] = clk_hw_register_fixed_rate(NULL, "HFXO", NULL, 0,
- 48000000);
-
- hws[clk_HFPERCLKUSART0] = clk_hw_register_gate(NULL, "HFPERCLK.USART0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
- hws[clk_HFPERCLKUSART1] = clk_hw_register_gate(NULL, "HFPERCLK.USART1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL);
- hws[clk_HFPERCLKUSART2] = clk_hw_register_gate(NULL, "HFPERCLK.USART2",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL);
- hws[clk_HFPERCLKUART0] = clk_hw_register_gate(NULL, "HFPERCLK.UART0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL);
- hws[clk_HFPERCLKUART1] = clk_hw_register_gate(NULL, "HFPERCLK.UART1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL);
- hws[clk_HFPERCLKTIMER0] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL);
- hws[clk_HFPERCLKTIMER1] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL);
- hws[clk_HFPERCLKTIMER2] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER2",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL);
- hws[clk_HFPERCLKTIMER3] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER3",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL);
- hws[clk_HFPERCLKACMP0] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL);
- hws[clk_HFPERCLKACMP1] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL);
- hws[clk_HFPERCLKI2C0] = clk_hw_register_gate(NULL, "HFPERCLK.I2C0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL);
- hws[clk_HFPERCLKI2C1] = clk_hw_register_gate(NULL, "HFPERCLK.I2C1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL);
- hws[clk_HFPERCLKGPIO] = clk_hw_register_gate(NULL, "HFPERCLK.GPIO",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL);
- hws[clk_HFPERCLKVCMP] = clk_hw_register_gate(NULL, "HFPERCLK.VCMP",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL);
- hws[clk_HFPERCLKPRS] = clk_hw_register_gate(NULL, "HFPERCLK.PRS",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL);
- hws[clk_HFPERCLKADC0] = clk_hw_register_gate(NULL, "HFPERCLK.ADC0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL);
- hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
-
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
-}
-CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 910e6e74ae90..4f7bf3929d6d 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -64,10 +64,16 @@ const struct clk_ops clk_fixed_factor_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
+{
+ clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+}
+
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
const char *name, const char *parent_name, int index,
- unsigned long flags, unsigned int mult, unsigned int div)
+ unsigned long flags, unsigned int mult, unsigned int div,
+ bool devm)
{
struct clk_fixed_factor *fix;
struct clk_init_data init = { };
@@ -75,7 +81,15 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
struct clk_hw *hw;
int ret;
- fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ /* You can't use devm without a dev */
+ if (devm && !dev)
+ return ERR_PTR(-EINVAL);
+
+ if (devm)
+ fix = devres_alloc(devm_clk_hw_register_fixed_factor_release,
+ sizeof(*fix), GFP_KERNEL);
+ else
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
if (!fix)
return ERR_PTR(-ENOMEM);
@@ -99,9 +113,13 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
else
ret = of_clk_hw_register(np, hw);
if (ret) {
- kfree(fix);
+ if (devm)
+ devres_free(fix);
+ else
+ kfree(fix);
hw = ERR_PTR(ret);
- }
+ } else if (devm)
+ devres_add(dev, fix);
return hw;
}
@@ -111,7 +129,7 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
- flags, mult, div);
+ flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
@@ -153,6 +171,15 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
+
#ifdef CONFIG_OF
static const struct of_device_id set_rate_parent_matches[] = {
{ .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
@@ -185,7 +212,7 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
flags |= CLK_SET_RATE_PARENT;
hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
- flags, mult, div);
+ flags, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 51f26619b6a2..5225d17d6b3f 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -55,7 +55,7 @@ static void __init of_fixed_mmio_clk_setup(struct device_node *node)
}
CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
-/**
+/*
* This is not executed when of_fixed_mmio_clk_setup succeeded.
*/
static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
new file mode 100644
index 000000000000..6c84abf5b2e3
--- /dev/null
+++ b/drivers/clk/clk-k210.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) "k210-clk: " fmt
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <soc/canaan/k210-sysctl.h>
+
+#include <dt-bindings/clock/k210-clk.h>
+
+struct k210_sysclk;
+
+struct k210_clk {
+ int id;
+ struct k210_sysclk *ksc;
+ struct clk_hw hw;
+};
+
+struct k210_clk_cfg {
+ const char *name;
+ u8 gate_reg;
+ u8 gate_bit;
+ u8 div_reg;
+ u8 div_shift;
+ u8 div_width;
+ u8 div_type;
+ u8 mux_reg;
+ u8 mux_bit;
+};
+
+enum k210_clk_div_type {
+ K210_DIV_NONE,
+ K210_DIV_ONE_BASED,
+ K210_DIV_DOUBLE_ONE_BASED,
+ K210_DIV_POWER_OF_TWO,
+};
+
+#define K210_GATE(_reg, _bit) \
+ .gate_reg = (_reg), \
+ .gate_bit = (_bit)
+
+#define K210_DIV(_reg, _shift, _width, _type) \
+ .div_reg = (_reg), \
+ .div_shift = (_shift), \
+ .div_width = (_width), \
+ .div_type = (_type)
+
+#define K210_MUX(_reg, _bit) \
+ .mux_reg = (_reg), \
+ .mux_bit = (_bit)
+
+static struct k210_clk_cfg k210_clk_cfgs[K210_NUM_CLKS] = {
+ /* Gated clocks, no mux, no divider */
+ [K210_CLK_CPU] = {
+ .name = "cpu",
+ K210_GATE(K210_SYSCTL_EN_CENT, 0)
+ },
+ [K210_CLK_DMA] = {
+ .name = "dma",
+ K210_GATE(K210_SYSCTL_EN_PERI, 1)
+ },
+ [K210_CLK_FFT] = {
+ .name = "fft",
+ K210_GATE(K210_SYSCTL_EN_PERI, 4)
+ },
+ [K210_CLK_GPIO] = {
+ .name = "gpio",
+ K210_GATE(K210_SYSCTL_EN_PERI, 5)
+ },
+ [K210_CLK_UART1] = {
+ .name = "uart1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 16)
+ },
+ [K210_CLK_UART2] = {
+ .name = "uart2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 17)
+ },
+ [K210_CLK_UART3] = {
+ .name = "uart3",
+ K210_GATE(K210_SYSCTL_EN_PERI, 18)
+ },
+ [K210_CLK_FPIOA] = {
+ .name = "fpioa",
+ K210_GATE(K210_SYSCTL_EN_PERI, 20)
+ },
+ [K210_CLK_SHA] = {
+ .name = "sha",
+ K210_GATE(K210_SYSCTL_EN_PERI, 26)
+ },
+ [K210_CLK_AES] = {
+ .name = "aes",
+ K210_GATE(K210_SYSCTL_EN_PERI, 19)
+ },
+ [K210_CLK_OTP] = {
+ .name = "otp",
+ K210_GATE(K210_SYSCTL_EN_PERI, 27)
+ },
+ [K210_CLK_RTC] = {
+ .name = "rtc",
+ K210_GATE(K210_SYSCTL_EN_PERI, 29)
+ },
+
+ /* Gated divider clocks */
+ [K210_CLK_SRAM0] = {
+ .name = "sram0",
+ K210_GATE(K210_SYSCTL_EN_CENT, 1),
+ K210_DIV(K210_SYSCTL_THR0, 0, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_SRAM1] = {
+ .name = "sram1",
+ K210_GATE(K210_SYSCTL_EN_CENT, 2),
+ K210_DIV(K210_SYSCTL_THR0, 4, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_ROM] = {
+ .name = "rom",
+ K210_GATE(K210_SYSCTL_EN_PERI, 0),
+ K210_DIV(K210_SYSCTL_THR0, 16, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_DVP] = {
+ .name = "dvp",
+ K210_GATE(K210_SYSCTL_EN_PERI, 3),
+ K210_DIV(K210_SYSCTL_THR0, 12, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB0] = {
+ .name = "apb0",
+ K210_GATE(K210_SYSCTL_EN_CENT, 3),
+ K210_DIV(K210_SYSCTL_SEL0, 3, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB1] = {
+ .name = "apb1",
+ K210_GATE(K210_SYSCTL_EN_CENT, 4),
+ K210_DIV(K210_SYSCTL_SEL0, 6, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB2] = {
+ .name = "apb2",
+ K210_GATE(K210_SYSCTL_EN_CENT, 5),
+ K210_DIV(K210_SYSCTL_SEL0, 9, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_AI] = {
+ .name = "ai",
+ K210_GATE(K210_SYSCTL_EN_PERI, 2),
+ K210_DIV(K210_SYSCTL_THR0, 8, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_SPI0] = {
+ .name = "spi0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 6),
+ K210_DIV(K210_SYSCTL_THR1, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_SPI1] = {
+ .name = "spi1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 7),
+ K210_DIV(K210_SYSCTL_THR1, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_SPI2] = {
+ .name = "spi2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 8),
+ K210_DIV(K210_SYSCTL_THR1, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C0] = {
+ .name = "i2c0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 13),
+ K210_DIV(K210_SYSCTL_THR5, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C1] = {
+ .name = "i2c1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 14),
+ K210_DIV(K210_SYSCTL_THR5, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C2] = {
+ .name = "i2c2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 15),
+ K210_DIV(K210_SYSCTL_THR5, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_WDT0] = {
+ .name = "wdt0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 24),
+ K210_DIV(K210_SYSCTL_THR6, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_WDT1] = {
+ .name = "wdt1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 25),
+ K210_DIV(K210_SYSCTL_THR6, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S0] = {
+ .name = "i2s0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 10),
+ K210_DIV(K210_SYSCTL_THR3, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S1] = {
+ .name = "i2s1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 11),
+ K210_DIV(K210_SYSCTL_THR3, 16, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S2] = {
+ .name = "i2s2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 12),
+ K210_DIV(K210_SYSCTL_THR4, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+
+ /* Divider clocks, no gate, no mux */
+ [K210_CLK_I2S0_M] = {
+ .name = "i2s0_m",
+ K210_DIV(K210_SYSCTL_THR4, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S1_M] = {
+ .name = "i2s1_m",
+ K210_DIV(K210_SYSCTL_THR4, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S2_M] = {
+ .name = "i2s2_m",
+ K210_DIV(K210_SYSCTL_THR4, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+
+ /* Muxed gated divider clocks */
+ [K210_CLK_SPI3] = {
+ .name = "spi3",
+ K210_GATE(K210_SYSCTL_EN_PERI, 9),
+ K210_DIV(K210_SYSCTL_THR1, 24, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 12)
+ },
+ [K210_CLK_TIMER0] = {
+ .name = "timer0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 21),
+ K210_DIV(K210_SYSCTL_THR2, 0, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 13)
+ },
+ [K210_CLK_TIMER1] = {
+ .name = "timer1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 22),
+ K210_DIV(K210_SYSCTL_THR2, 8, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 14)
+ },
+ [K210_CLK_TIMER2] = {
+ .name = "timer2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 23),
+ K210_DIV(K210_SYSCTL_THR2, 16, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 15)
+ },
+};
+
+/*
+ * PLL control register bits.
+ */
+#define K210_PLL_CLKR GENMASK(3, 0)
+#define K210_PLL_CLKF GENMASK(9, 4)
+#define K210_PLL_CLKOD GENMASK(13, 10)
+#define K210_PLL_BWADJ GENMASK(19, 14)
+#define K210_PLL_RESET (1 << 20)
+#define K210_PLL_PWRD (1 << 21)
+#define K210_PLL_INTFB (1 << 22)
+#define K210_PLL_BYPASS (1 << 23)
+#define K210_PLL_TEST (1 << 24)
+#define K210_PLL_EN (1 << 25)
+#define K210_PLL_SEL GENMASK(27, 26) /* PLL2 only */
+
+/*
+ * PLL lock register bits.
+ */
+#define K210_PLL_LOCK 0
+#define K210_PLL_CLEAR_SLIP 2
+#define K210_PLL_TEST_OUT 3
+
+/*
+ * Clock selector register bits.
+ */
+#define K210_ACLK_SEL BIT(0)
+#define K210_ACLK_DIV GENMASK(2, 1)
+
+/*
+ * PLLs.
+ */
+enum k210_pll_id {
+ K210_PLL0, K210_PLL1, K210_PLL2, K210_PLL_NUM
+};
+
+struct k210_pll {
+ enum k210_pll_id id;
+ struct k210_sysclk *ksc;
+ void __iomem *base;
+ void __iomem *reg;
+ void __iomem *lock;
+ u8 lock_shift;
+ u8 lock_width;
+ struct clk_hw hw;
+};
+#define to_k210_pll(_hw) container_of(_hw, struct k210_pll, hw)
+
+/*
+ * PLLs configuration: by default PLL0 runs at 780 MHz and PLL1 at 299 MHz.
+ * The first 2 SRAM banks depend on ACLK/CPU clock which is by default PLL0
+ * rate divided by 2. Set PLL1 to 390 MHz so that the third SRAM bank has the
+ * same clock as the first 2.
+ */
+struct k210_pll_cfg {
+ u32 reg;
+ u8 lock_shift;
+ u8 lock_width;
+ u32 r;
+ u32 f;
+ u32 od;
+ u32 bwadj;
+};
+
+static struct k210_pll_cfg k210_plls_cfg[] = {
+ { K210_SYSCTL_PLL0, 0, 2, 0, 59, 1, 59 }, /* 780 MHz */
+ { K210_SYSCTL_PLL1, 8, 1, 0, 59, 3, 59 }, /* 390 MHz */
+ { K210_SYSCTL_PLL2, 16, 1, 0, 22, 1, 22 }, /* 299 MHz */
+};
+
+/**
+ * struct k210_sysclk - sysclk driver data
+ * @regs: system controller registers start address
+ * @clk_lock: clock setting spinlock
+ * @plls: SoC PLLs descriptors
+ * @aclk: ACLK clock
+ * @clks: All other clocks
+ */
+struct k210_sysclk {
+ void __iomem *regs;
+ spinlock_t clk_lock;
+ struct k210_pll plls[K210_PLL_NUM];
+ struct clk_hw aclk;
+ struct k210_clk clks[K210_NUM_CLKS];
+};
+
+#define to_k210_sysclk(_hw) container_of(_hw, struct k210_sysclk, aclk)
+
+/*
+ * Set ACLK parent selector: 0 for IN0, 1 for PLL0.
+ */
+static void k210_aclk_set_selector(void __iomem *regs, u8 sel)
+{
+ u32 reg = readl(regs + K210_SYSCTL_SEL0);
+
+ if (sel)
+ reg |= K210_ACLK_SEL;
+ else
+ reg &= K210_ACLK_SEL;
+ writel(reg, regs + K210_SYSCTL_SEL0);
+}
+
+static void k210_init_pll(void __iomem *regs, enum k210_pll_id pllid,
+ struct k210_pll *pll)
+{
+ pll->id = pllid;
+ pll->reg = regs + k210_plls_cfg[pllid].reg;
+ pll->lock = regs + K210_SYSCTL_PLL_LOCK;
+ pll->lock_shift = k210_plls_cfg[pllid].lock_shift;
+ pll->lock_width = k210_plls_cfg[pllid].lock_width;
+}
+
+static void k210_pll_wait_for_lock(struct k210_pll *pll)
+{
+ u32 reg, mask = GENMASK(pll->lock_shift + pll->lock_width - 1,
+ pll->lock_shift);
+
+ while (true) {
+ reg = readl(pll->lock);
+ if ((reg & mask) == mask)
+ break;
+
+ reg |= BIT(pll->lock_shift + K210_PLL_CLEAR_SLIP);
+ writel(reg, pll->lock);
+ }
+}
+
+static bool k210_pll_hw_is_enabled(struct k210_pll *pll)
+{
+ u32 reg = readl(pll->reg);
+ u32 mask = K210_PLL_PWRD | K210_PLL_EN;
+
+ if (reg & K210_PLL_RESET)
+ return false;
+
+ return (reg & mask) == mask;
+}
+
+static void k210_pll_enable_hw(void __iomem *regs, struct k210_pll *pll)
+{
+ struct k210_pll_cfg *pll_cfg = &k210_plls_cfg[pll->id];
+ u32 reg;
+
+ if (k210_pll_hw_is_enabled(pll))
+ return;
+
+ /*
+ * For PLL0, we need to re-parent ACLK to IN0 to keep the CPU cores and
+ * SRAM running.
+ */
+ if (pll->id == K210_PLL0)
+ k210_aclk_set_selector(regs, 0);
+
+ /* Set PLL factors */
+ reg = readl(pll->reg);
+ reg &= ~GENMASK(19, 0);
+ reg |= FIELD_PREP(K210_PLL_CLKR, pll_cfg->r);
+ reg |= FIELD_PREP(K210_PLL_CLKF, pll_cfg->f);
+ reg |= FIELD_PREP(K210_PLL_CLKOD, pll_cfg->od);
+ reg |= FIELD_PREP(K210_PLL_BWADJ, pll_cfg->bwadj);
+ reg |= K210_PLL_PWRD;
+ writel(reg, pll->reg);
+
+ /*
+ * Reset the PLL: ensure reset is low before asserting it.
+ * The magic NOPs come from the Kendryte reference SDK.
+ */
+ reg &= ~K210_PLL_RESET;
+ writel(reg, pll->reg);
+ reg |= K210_PLL_RESET;
+ writel(reg, pll->reg);
+ nop();
+ nop();
+ reg &= ~K210_PLL_RESET;
+ writel(reg, pll->reg);
+
+ k210_pll_wait_for_lock(pll);
+
+ reg &= ~K210_PLL_BYPASS;
+ reg |= K210_PLL_EN;
+ writel(reg, pll->reg);
+
+ if (pll->id == K210_PLL0)
+ k210_aclk_set_selector(regs, 1);
+}
+
+static int k210_pll_enable(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ k210_pll_enable_hw(ksc->regs, pll);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static void k210_pll_disable(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+ u32 reg;
+
+ /*
+ * Bypassing before powering off is important so child clocks do not
+ * stop working. This is especially important for pll0, the indirect
+ * parent of the cpu clock.
+ */
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(pll->reg);
+ reg |= K210_PLL_BYPASS;
+ writel(reg, pll->reg);
+
+ reg &= ~K210_PLL_PWRD;
+ reg &= ~K210_PLL_EN;
+ writel(reg, pll->reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+}
+
+static int k210_pll_is_enabled(struct clk_hw *hw)
+{
+ return k210_pll_hw_is_enabled(to_k210_pll(hw));
+}
+
+static unsigned long k210_pll_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ u32 reg = readl(pll->reg);
+ u32 r, f, od;
+
+ if (reg & K210_PLL_BYPASS)
+ return parent_rate;
+
+ if (!(reg & K210_PLL_PWRD))
+ return 0;
+
+ r = FIELD_GET(K210_PLL_CLKR, reg) + 1;
+ f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
+ od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
+
+ return (u64)parent_rate * f / (r * od);
+}
+
+static const struct clk_ops k210_pll_ops = {
+ .enable = k210_pll_enable,
+ .disable = k210_pll_disable,
+ .is_enabled = k210_pll_is_enabled,
+ .recalc_rate = k210_pll_get_rate,
+};
+
+static int k210_pll2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ reg = readl(pll->reg);
+ reg &= ~K210_PLL_SEL;
+ reg |= FIELD_PREP(K210_PLL_SEL, index);
+ writel(reg, pll->reg);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_pll2_get_parent(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ u32 reg = readl(pll->reg);
+
+ return FIELD_GET(K210_PLL_SEL, reg);
+}
+
+static const struct clk_ops k210_pll2_ops = {
+ .enable = k210_pll_enable,
+ .disable = k210_pll_disable,
+ .is_enabled = k210_pll_is_enabled,
+ .recalc_rate = k210_pll_get_rate,
+ .set_parent = k210_pll2_set_parent,
+ .get_parent = k210_pll2_get_parent,
+};
+
+static int __init k210_register_pll(struct device_node *np,
+ struct k210_sysclk *ksc,
+ enum k210_pll_id pllid, const char *name,
+ int num_parents, const struct clk_ops *ops)
+{
+ struct k210_pll *pll = &ksc->plls[pllid];
+ struct clk_init_data init = {};
+ const struct clk_parent_data parent_data[] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw },
+ { .hw = &ksc->plls[K210_PLL1].hw },
+ };
+
+ init.name = name;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+ init.ops = ops;
+
+ pll->hw.init = &init;
+ pll->ksc = ksc;
+
+ return of_clk_hw_register(np, &pll->hw);
+}
+
+static int __init k210_register_plls(struct device_node *np,
+ struct k210_sysclk *ksc)
+{
+ int i, ret;
+
+ for (i = 0; i < K210_PLL_NUM; i++)
+ k210_init_pll(ksc->regs, i, &ksc->plls[i]);
+
+ /* PLL0 and PLL1 only have IN0 as parent */
+ ret = k210_register_pll(np, ksc, K210_PLL0, "pll0", 1, &k210_pll_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL0 failed\n", np);
+ return ret;
+ }
+ ret = k210_register_pll(np, ksc, K210_PLL1, "pll1", 1, &k210_pll_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL1 failed\n", np);
+ return ret;
+ }
+
+ /* PLL2 has IN0, PLL0 and PLL1 as parents */
+ ret = k210_register_pll(np, ksc, K210_PLL2, "pll2", 3, &k210_pll2_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL2 failed\n", np);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k210_aclk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ k210_aclk_set_selector(ksc->regs, index);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_aclk_get_parent(struct clk_hw *hw)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ u32 sel;
+
+ sel = readl(ksc->regs + K210_SYSCTL_SEL0) & K210_ACLK_SEL;
+
+ return sel ? 1 : 0;
+}
+
+static unsigned long k210_aclk_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ u32 reg = readl(ksc->regs + K210_SYSCTL_SEL0);
+ unsigned int shift;
+
+ if (!(reg & 0x1))
+ return parent_rate;
+
+ shift = FIELD_GET(K210_ACLK_DIV, reg);
+
+ return parent_rate / (2UL << shift);
+}
+
+static const struct clk_ops k210_aclk_ops = {
+ .set_parent = k210_aclk_set_parent,
+ .get_parent = k210_aclk_get_parent,
+ .recalc_rate = k210_aclk_get_rate,
+};
+
+/*
+ * ACLK has IN0 and PLL0 as parents.
+ */
+static int __init k210_register_aclk(struct device_node *np,
+ struct k210_sysclk *ksc)
+{
+ struct clk_init_data init = {};
+ const struct clk_parent_data parent_data[] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw },
+ };
+ int ret;
+
+ init.name = "aclk";
+ init.parent_data = parent_data;
+ init.num_parents = 2;
+ init.ops = &k210_aclk_ops;
+ ksc->aclk.init = &init;
+
+ ret = of_clk_hw_register(np, &ksc->aclk);
+ if (ret) {
+ pr_err("%pOFP: register aclk failed\n", np);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define to_k210_clk(_hw) container_of(_hw, struct k210_clk, hw)
+
+static int k210_clk_enable(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ if (!cfg->gate_reg)
+ return 0;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->gate_reg);
+ reg |= BIT(cfg->gate_bit);
+ writel(reg, ksc->regs + cfg->gate_reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static void k210_clk_disable(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ if (!cfg->gate_reg)
+ return;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->gate_reg);
+ reg &= ~BIT(cfg->gate_bit);
+ writel(reg, ksc->regs + cfg->gate_reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+}
+
+static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->mux_reg);
+ if (index)
+ reg |= BIT(cfg->mux_bit);
+ else
+ reg &= ~BIT(cfg->mux_bit);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_clk_get_parent(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg, idx;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->mux_reg);
+ idx = (reg & BIT(cfg->mux_bit)) ? 1 : 0;
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return idx;
+}
+
+static unsigned long k210_clk_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ u32 reg, div_val;
+
+ if (!cfg->div_reg)
+ return parent_rate;
+
+ reg = readl(ksc->regs + cfg->div_reg);
+ div_val = (reg >> cfg->div_shift) & GENMASK(cfg->div_width - 1, 0);
+
+ switch (cfg->div_type) {
+ case K210_DIV_ONE_BASED:
+ return parent_rate / (div_val + 1);
+ case K210_DIV_DOUBLE_ONE_BASED:
+ return parent_rate / ((div_val + 1) * 2);
+ case K210_DIV_POWER_OF_TWO:
+ return parent_rate / (2UL << div_val);
+ case K210_DIV_NONE:
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops k210_clk_mux_ops = {
+ .enable = k210_clk_enable,
+ .disable = k210_clk_disable,
+ .set_parent = k210_clk_set_parent,
+ .get_parent = k210_clk_get_parent,
+ .recalc_rate = k210_clk_get_rate,
+};
+
+static const struct clk_ops k210_clk_ops = {
+ .enable = k210_clk_enable,
+ .disable = k210_clk_disable,
+ .recalc_rate = k210_clk_get_rate,
+};
+
+static void __init k210_register_clk(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ const struct clk_parent_data *parent_data,
+ int num_parents, unsigned long flags)
+{
+ struct k210_clk *kclk = &ksc->clks[id];
+ struct clk_init_data init = {};
+ int ret;
+
+ init.name = k210_clk_cfgs[id].name;
+ init.flags = flags;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+ if (num_parents > 1)
+ init.ops = &k210_clk_mux_ops;
+ else
+ init.ops = &k210_clk_ops;
+
+ kclk->id = id;
+ kclk->ksc = ksc;
+ kclk->hw.init = &init;
+
+ ret = of_clk_hw_register(np, &kclk->hw);
+ if (ret) {
+ pr_err("%pOFP: register clock %s failed\n",
+ np, k210_clk_cfgs[id].name);
+ kclk->id = -1;
+ }
+}
+
+/*
+ * All muxed clocks have IN0 and PLL0 as parents.
+ */
+static inline void __init k210_register_mux_clk(struct device_node *np,
+ struct k210_sysclk *ksc, int id)
+{
+ const struct clk_parent_data parent_data[2] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw }
+ };
+
+ k210_register_clk(np, ksc, id, parent_data, 2, 0);
+}
+
+static inline void __init k210_register_in0_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id)
+{
+ const struct clk_parent_data parent_data = {
+ /* .index = 0 for in0 */
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, 0);
+}
+
+static inline void __init k210_register_pll_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ enum k210_pll_id pllid,
+ unsigned long flags)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->plls[pllid].hw,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, flags);
+}
+
+static inline void __init k210_register_aclk_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ unsigned long flags)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->aclk,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, flags);
+}
+
+static inline void __init k210_register_clk_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ int parent_id)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->clks[parent_id].hw,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, 0);
+}
+
+static struct clk_hw *k210_clk_hw_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct k210_sysclk *ksc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= K210_NUM_CLKS)
+ return ERR_PTR(-EINVAL);
+
+ return &ksc->clks[idx].hw;
+}
+
+static void __init k210_clk_init(struct device_node *np)
+{
+ struct device_node *sysctl_np;
+ struct k210_sysclk *ksc;
+ int i, ret;
+
+ ksc = kzalloc(sizeof(*ksc), GFP_KERNEL);
+ if (!ksc)
+ return;
+
+ spin_lock_init(&ksc->clk_lock);
+ sysctl_np = of_get_parent(np);
+ ksc->regs = of_iomap(sysctl_np, 0);
+ of_node_put(sysctl_np);
+ if (!ksc->regs) {
+ pr_err("%pOFP: failed to map registers\n", np);
+ return;
+ }
+
+ ret = k210_register_plls(np, ksc);
+ if (ret)
+ return;
+
+ ret = k210_register_aclk(np, ksc);
+ if (ret)
+ return;
+
+ /*
+ * Critical clocks: there are no consumers of the SRAM clocks,
+ * including the AI clock for the third SRAM bank. The CPU clock
+ * is only referenced by the uarths serial device and so would be
+ * disabled if the serial console is disabled to switch to another
+ * console. Mark all these clocks as critical so that they are never
+ * disabled by the core clock management.
+ */
+ k210_register_aclk_child(np, ksc, K210_CLK_CPU, CLK_IS_CRITICAL);
+ k210_register_aclk_child(np, ksc, K210_CLK_SRAM0, CLK_IS_CRITICAL);
+ k210_register_aclk_child(np, ksc, K210_CLK_SRAM1, CLK_IS_CRITICAL);
+ k210_register_pll_child(np, ksc, K210_CLK_AI, K210_PLL1,
+ CLK_IS_CRITICAL);
+
+ /* Clocks with aclk as source */
+ k210_register_aclk_child(np, ksc, K210_CLK_DMA, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_FFT, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_ROM, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_DVP, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB0, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB1, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB2, 0);
+
+ /* Clocks with PLL0 as source */
+ k210_register_pll_child(np, ksc, K210_CLK_SPI0, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_SPI1, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_SPI2, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C0, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C1, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C2, K210_PLL0, 0);
+
+ /* Clocks with PLL2 as source */
+ k210_register_pll_child(np, ksc, K210_CLK_I2S0, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S1, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S2, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S0_M, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S1_M, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S2_M, K210_PLL2, 0);
+
+ /* Clocks with IN0 as source */
+ k210_register_in0_child(np, ksc, K210_CLK_WDT0);
+ k210_register_in0_child(np, ksc, K210_CLK_WDT1);
+ k210_register_in0_child(np, ksc, K210_CLK_RTC);
+
+ /* Clocks with APB0 as source */
+ k210_register_clk_child(np, ksc, K210_CLK_GPIO, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART1, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART2, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART3, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_FPIOA, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_SHA, K210_CLK_APB0);
+
+ /* Clocks with APB1 as source */
+ k210_register_clk_child(np, ksc, K210_CLK_AES, K210_CLK_APB1);
+ k210_register_clk_child(np, ksc, K210_CLK_OTP, K210_CLK_APB1);
+
+ /* Mux clocks with in0 or pll0 as source */
+ k210_register_mux_clk(np, ksc, K210_CLK_SPI3);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER0);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER1);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER2);
+
+ /* Check for registration errors */
+ for (i = 0; i < K210_NUM_CLKS; i++) {
+ if (ksc->clks[i].id != i)
+ return;
+ }
+
+ ret = of_clk_add_hw_provider(np, k210_clk_hw_onecell_get, ksc);
+ if (ret) {
+ pr_err("%pOFP: add clock provider failed %d\n", np, ret);
+ return;
+ }
+
+ pr_info("%pOFP: CPU running at %lu MHz\n",
+ np, clk_hw_get_rate(&ksc->clks[K210_CLK_CPU].hw) / 1000000);
+}
+
+CLK_OF_DECLARE(k210_clk, "canaan,k210-clk", k210_clk_init);
+
+/*
+ * Enable PLL1 to be able to use the AI SRAM.
+ */
+void __init k210_clk_early_init(void __iomem *regs)
+{
+ struct k210_pll pll1;
+
+ /* Make sure ACLK selector is set to PLL0 */
+ k210_aclk_set_selector(regs, 1);
+
+ /* Startup PLL1 to enable the aisram bank for general memory use */
+ k210_init_pll(regs, K210_PLL1, &pll1);
+ k210_pll_enable_hw(regs, &pll1);
+}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 27a86b7a34db..e677bb5a784b 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -361,13 +361,6 @@ static const struct npcm7xx_clk_mux_data npcm7xx_muxes[] __initconst = {
dvcssel_mux_parents, ARRAY_SIZE(dvcssel_mux_parents), 0, -1},
};
-/* fixed ratio dividers (no register): */
-static const struct npcm7xx_clk_div_fixed_data npcm7xx_divs_fx[] __initconst = {
- { 1, 2, NPCM7XX_CLK_S_MC, NPCM7XX_CLK_S_MC_MUX, 0, NPCM7XX_CLK_MC},
- { 1, 2, NPCM7XX_CLK_S_PLL1_DIV2, NPCM7XX_CLK_S_PLL1, 0, -1},
- { 1, 2, NPCM7XX_CLK_S_PLL2_DIV2, NPCM7XX_CLK_S_PLL2, 0, -1},
-};
-
/* configurable dividers: */
static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
{NPCM7XX_CLKDIV1, 28, 3, NPCM7XX_CLK_S_ADC,
@@ -435,107 +428,6 @@ static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
};
-static const struct npcm7xx_clk_gate_data npcm7xx_gates[] __initconst = {
- {NPCM7XX_CLKEN1, 31, "smb1-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 30, "smb0-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 29, "smb7-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 28, "smb6-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 27, "adc-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 26, "wdt-gate", NPCM7XX_CLK_S_TIMER, 0},
- {NPCM7XX_CLKEN1, 25, "usbdev3-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 24, "usbdev6-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 23, "usbdev5-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 22, "usbdev4-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 21, "emc2-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 20, "timer5_9-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 19, "timer0_4-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 18, "pwmm0-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN1, 17, "huart-gate", NPCM7XX_CLK_S_UART, 0},
- {NPCM7XX_CLKEN1, 16, "smb5-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 15, "smb4-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 14, "smb3-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 13, "smb2-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 12, "mc-gate", NPCM7XX_CLK_S_MC, 0},
- {NPCM7XX_CLKEN1, 11, "uart01-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 10, "aes-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 9, "peci-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN1, 8, "usbdev2-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 7, "uart23-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 6, "emc1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 5, "usbdev1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 4, "shm-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 3 is reserved */
- {NPCM7XX_CLKEN1, 2, "kcs-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 1, "spi3-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 0, "spi0-gate", NPCM7XX_CLK_S_AHB, 0},
-
- {NPCM7XX_CLKEN2, 31, "cp-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 30, "tock-gate", NPCM7XX_CLK_S_TOCK, 0},
- /* bit 29 is reserved */
- {NPCM7XX_CLKEN2, 28, "gmac1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 27, "usbif-gate", NPCM7XX_CLK_S_USBIF, 0},
- {NPCM7XX_CLKEN2, 26, "usbhost-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 25, "gmac2-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 24 is reserved */
- {NPCM7XX_CLKEN2, 23, "pspi2-gate", NPCM7XX_CLK_S_APB5, 0},
- {NPCM7XX_CLKEN2, 22, "pspi1-gate", NPCM7XX_CLK_S_APB5, 0},
- {NPCM7XX_CLKEN2, 21, "3des-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 20 is reserved */
- {NPCM7XX_CLKEN2, 19, "siox2-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN2, 18, "siox1-gate", NPCM7XX_CLK_S_APB3, 0},
- /* bit 17 is reserved */
- {NPCM7XX_CLKEN2, 16, "fuse-gate", NPCM7XX_CLK_S_APB4, 0},
- /* bit 15 is reserved */
- {NPCM7XX_CLKEN2, 14, "vcd-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 13, "ece-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 12, "vdma-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 11, "ahbpcibrg-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 10, "gfxsys-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN2, 9, "sdhc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 8, "mmc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 7, "mft7-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 6, "mft6-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 5, "mft5-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 4, "mft4-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 3, "mft3-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 2, "mft2-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 1, "mft1-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 0, "mft0-gate", NPCM7XX_CLK_S_APB4, 0},
-
- {NPCM7XX_CLKEN3, 31, "gpiom7-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 30, "gpiom6-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 29, "gpiom5-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 28, "gpiom4-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 27, "gpiom3-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 26, "gpiom2-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 25, "gpiom1-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 24, "gpiom0-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 23, "espi-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 22, "smb11-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 21, "smb10-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 20, "smb9-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 19, "smb8-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 18, "smb15-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 17, "rng-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 16, "timer10_14-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 15, "pcirc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 14, "sececc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 13, "sha-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 12, "smb14-gate", NPCM7XX_CLK_S_APB2, 0},
- /* bit 11 is reserved */
- /* bit 10 is reserved */
- {NPCM7XX_CLKEN3, 9, "pcimbx-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 8 is reserved */
- {NPCM7XX_CLKEN3, 7, "usbdev9-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 6, "usbdev8-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 5, "usbdev7-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 4, "usbdev0-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 3, "smb13-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 2, "spix-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 1, "smb12-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 0, "pwmm1-gate", NPCM7XX_CLK_S_APB3, 0},
-};
-
static DEFINE_SPINLOCK(npcm7xx_clk_lock);
static void __init npcm7xx_clk_init(struct device_node *clk_np)
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 70aa521e7e7f..88898b97a443 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Freescale Semiconductor, Inc.
+ * Copyright 2021 NXP
*
* clock driver for Freescale QorIQ SoCs.
*/
@@ -564,7 +565,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, 1, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -580,7 +583,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, 1, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -591,7 +596,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,ls1028a-clockgen",
@@ -605,7 +611,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -620,7 +627,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -635,7 +643,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -649,7 +658,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -660,7 +670,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
},
{
.compat = "fsl,ls2080a-clockgen",
@@ -670,7 +680,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x37,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -681,7 +693,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
- .pll_mask = 0x37,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -694,7 +708,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p3041-clockgen",
@@ -706,7 +721,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p4080-clockgen",
@@ -718,7 +734,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
- .pll_mask = 0x1f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGA_PLL3) | BIT(CGA_PLL4),
},
{
.compat = "fsl,p5020-clockgen",
@@ -730,7 +748,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p5040-clockgen",
@@ -742,7 +761,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x0f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3),
},
{
.compat = "fsl,t1023-clockgen",
@@ -757,7 +777,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
.flags = CG_PLL_8BIT,
},
{
@@ -770,7 +790,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -786,7 +807,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -802,7 +824,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{},
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 34b25609f55f..eea50121718a 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2010, 2011 Ericsson AB.
* Copyright (C) 2011 Guenter Roeck.
- * Copyright (C) 2011 - 2013 Xilinx Inc.
+ * Copyright (C) 2011 - 2021 Xilinx Inc.
*
* Author: Guenter Roeck <guenter.roeck@ericsson.com>
* Sören Brinkmann <soren.brinkmann@xilinx.com>
@@ -123,14 +123,18 @@ static int si570_get_divs(struct clk_si570 *data, u64 *rfreq,
* si570_get_defaults() - Get default values
* @data: Driver data structure
* @fout: Factory frequency output
+ * @skip_recall: If true, don't recall NVM into RAM
* Returns 0 on success, negative errno otherwise.
*/
-static int si570_get_defaults(struct clk_si570 *data, u64 fout)
+static int si570_get_defaults(struct clk_si570 *data, u64 fout,
+ bool skip_recall)
{
int err;
u64 fdco;
- regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_RECALL);
+ if (!skip_recall)
+ regmap_write(data->regmap, SI570_REG_CONTROL,
+ SI570_CNTRL_RECALL);
err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div);
if (err)
@@ -400,6 +404,7 @@ static int si570_probe(struct i2c_client *client,
struct clk_si570 *data;
struct clk_init_data init;
u32 initial_fout, factory_fout, stability;
+ bool skip_recall;
int err;
enum clk_si570_variant variant = id->driver_data;
@@ -441,6 +446,9 @@ static int si570_probe(struct i2c_client *client,
return err;
}
+ skip_recall = of_property_read_bool(client->dev.of_node,
+ "silabs,skip-recall");
+
data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
@@ -448,7 +456,7 @@ static int si570_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, data);
- err = si570_get_defaults(data, factory_fout);
+ err = si570_get_defaults(data, factory_fout, skip_recall);
if (err)
return err;
diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c
deleted file mode 100644
index fe12a43f7a40..000000000000
--- a/drivers/clk/clk-tango4.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#define CLK_COUNT 4 /* cpu_clk, sys_clk, usb_clk, sdio_clk */
-static struct clk *clks[CLK_COUNT];
-static struct clk_onecell_data clk_data = { clks, CLK_COUNT };
-
-#define SYSCLK_DIV 0x20
-#define CPUCLK_DIV 0x24
-#define DIV_BYPASS BIT(23)
-
-/*** CLKGEN_PLL ***/
-#define extract_pll_n(val) ((val >> 0) & ((1u << 7) - 1))
-#define extract_pll_k(val) ((val >> 13) & ((1u << 3) - 1))
-#define extract_pll_m(val) ((val >> 16) & ((1u << 3) - 1))
-#define extract_pll_isel(val) ((val >> 24) & ((1u << 3) - 1))
-
-static void __init make_pll(int idx, const char *parent, void __iomem *base)
-{
- char name[8];
- u32 val, mul, div;
-
- sprintf(name, "pll%d", idx);
- val = readl(base + idx * 8);
- mul = extract_pll_n(val) + 1;
- div = (extract_pll_m(val) + 1) << extract_pll_k(val);
- clk_register_fixed_factor(NULL, name, parent, 0, mul, div);
- if (extract_pll_isel(val) != 1)
- panic("%s: input not set to XTAL_IN\n", name);
-}
-
-static void __init make_cd(int idx, void __iomem *base)
-{
- char name[8];
- u32 val, mul, div;
-
- sprintf(name, "cd%d", idx);
- val = readl(base + idx * 8);
- mul = 1 << 27;
- div = (2 << 27) + val;
- clk_register_fixed_factor(NULL, name, "pll2", 0, mul, div);
- if (val > 0xf0000000)
- panic("%s: unsupported divider %x\n", name, val);
-}
-
-static void __init tango4_clkgen_setup(struct device_node *np)
-{
- struct clk **pp = clk_data.clks;
- void __iomem *base = of_iomap(np, 0);
- const char *parent = of_clk_get_parent_name(np, 0);
-
- if (!base)
- panic("%pOFn: invalid address\n", np);
-
- if (readl(base + CPUCLK_DIV) & DIV_BYPASS)
- panic("%pOFn: unsupported cpuclk setup\n", np);
-
- if (readl(base + SYSCLK_DIV) & DIV_BYPASS)
- panic("%pOFn: unsupported sysclk setup\n", np);
-
- writel(0x100, base + CPUCLK_DIV); /* disable frequency ramping */
-
- make_pll(0, parent, base);
- make_pll(1, parent, base);
- make_pll(2, parent, base);
- make_cd(2, base + 0x80);
- make_cd(6, base + 0x80);
-
- pp[0] = clk_register_divider(NULL, "cpu_clk", "pll0", 0,
- base + CPUCLK_DIV, 8, 8, CLK_DIVIDER_ONE_BASED, NULL);
- pp[1] = clk_register_fixed_factor(NULL, "sys_clk", "pll1", 0, 1, 4);
- pp[2] = clk_register_fixed_factor(NULL, "usb_clk", "cd2", 0, 1, 2);
- pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2);
-
- if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3]))
- panic("%pOFn: clk registration failed\n", np);
-
- if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data))
- panic("%pOFn: clk provider registration failed\n", np);
-}
-CLK_OF_DECLARE(tango4_clkgen, "sigma,tango4-clkgen", tango4_clkgen_setup);
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
deleted file mode 100644
index e228c07c4c6e..000000000000
--- a/drivers/clk/clk-u300.c
+++ /dev/null
@@ -1,1199 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * U300 clock implementation
- * Copyright (C) 2007-2012 ST-Ericsson AB
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/clkdev.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/spinlock.h>
-#include <linux/of.h>
-#include <linux/platform_data/clk-u300.h>
-
-/* APP side SYSCON registers */
-/* CLK Control Register 16bit (R/W) */
-#define U300_SYSCON_CCR (0x0000)
-#define U300_SYSCON_CCR_I2S1_USE_VCXO (0x0040)
-#define U300_SYSCON_CCR_I2S0_USE_VCXO (0x0020)
-#define U300_SYSCON_CCR_TURN_VCXO_ON (0x0008)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK (0x0007)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER (0x04)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW (0x03)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE (0x02)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH (0x01)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST (0x00)
-/* CLK Status Register 16bit (R/W) */
-#define U300_SYSCON_CSR (0x0004)
-#define U300_SYSCON_CSR_PLL208_LOCK_IND (0x0002)
-#define U300_SYSCON_CSR_PLL13_LOCK_IND (0x0001)
-/* Reset lines for SLOW devices 16bit (R/W) */
-#define U300_SYSCON_RSR (0x0014)
-#define U300_SYSCON_RSR_PPM_RESET_EN (0x0200)
-#define U300_SYSCON_RSR_ACC_TMR_RESET_EN (0x0100)
-#define U300_SYSCON_RSR_APP_TMR_RESET_EN (0x0080)
-#define U300_SYSCON_RSR_RTC_RESET_EN (0x0040)
-#define U300_SYSCON_RSR_KEYPAD_RESET_EN (0x0020)
-#define U300_SYSCON_RSR_GPIO_RESET_EN (0x0010)
-#define U300_SYSCON_RSR_EH_RESET_EN (0x0008)
-#define U300_SYSCON_RSR_BTR_RESET_EN (0x0004)
-#define U300_SYSCON_RSR_UART_RESET_EN (0x0002)
-#define U300_SYSCON_RSR_SLOW_BRIDGE_RESET_EN (0x0001)
-/* Reset lines for FAST devices 16bit (R/W) */
-#define U300_SYSCON_RFR (0x0018)
-#define U300_SYSCON_RFR_UART1_RESET_ENABLE (0x0080)
-#define U300_SYSCON_RFR_SPI_RESET_ENABLE (0x0040)
-#define U300_SYSCON_RFR_MMC_RESET_ENABLE (0x0020)
-#define U300_SYSCON_RFR_PCM_I2S1_RESET_ENABLE (0x0010)
-#define U300_SYSCON_RFR_PCM_I2S0_RESET_ENABLE (0x0008)
-#define U300_SYSCON_RFR_I2C1_RESET_ENABLE (0x0004)
-#define U300_SYSCON_RFR_I2C0_RESET_ENABLE (0x0002)
-#define U300_SYSCON_RFR_FAST_BRIDGE_RESET_ENABLE (0x0001)
-/* Reset lines for the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_RRR (0x001c)
-#define U300_SYSCON_RRR_CDS_RESET_EN (0x4000)
-#define U300_SYSCON_RRR_ISP_RESET_EN (0x2000)
-#define U300_SYSCON_RRR_INTCON_RESET_EN (0x1000)
-#define U300_SYSCON_RRR_MSPRO_RESET_EN (0x0800)
-#define U300_SYSCON_RRR_XGAM_RESET_EN (0x0100)
-#define U300_SYSCON_RRR_XGAM_VC_SYNC_RESET_EN (0x0080)
-#define U300_SYSCON_RRR_NANDIF_RESET_EN (0x0040)
-#define U300_SYSCON_RRR_EMIF_RESET_EN (0x0020)
-#define U300_SYSCON_RRR_DMAC_RESET_EN (0x0010)
-#define U300_SYSCON_RRR_CPU_RESET_EN (0x0008)
-#define U300_SYSCON_RRR_APEX_RESET_EN (0x0004)
-#define U300_SYSCON_RRR_AHB_RESET_EN (0x0002)
-#define U300_SYSCON_RRR_AAIF_RESET_EN (0x0001)
-/* Clock enable for SLOW peripherals 16bit (R/W) */
-#define U300_SYSCON_CESR (0x0020)
-#define U300_SYSCON_CESR_PPM_CLK_EN (0x0200)
-#define U300_SYSCON_CESR_ACC_TMR_CLK_EN (0x0100)
-#define U300_SYSCON_CESR_APP_TMR_CLK_EN (0x0080)
-#define U300_SYSCON_CESR_KEYPAD_CLK_EN (0x0040)
-#define U300_SYSCON_CESR_GPIO_CLK_EN (0x0010)
-#define U300_SYSCON_CESR_EH_CLK_EN (0x0008)
-#define U300_SYSCON_CESR_BTR_CLK_EN (0x0004)
-#define U300_SYSCON_CESR_UART_CLK_EN (0x0002)
-#define U300_SYSCON_CESR_SLOW_BRIDGE_CLK_EN (0x0001)
-/* Clock enable for FAST peripherals 16bit (R/W) */
-#define U300_SYSCON_CEFR (0x0024)
-#define U300_SYSCON_CEFR_UART1_CLK_EN (0x0200)
-#define U300_SYSCON_CEFR_I2S1_CORE_CLK_EN (0x0100)
-#define U300_SYSCON_CEFR_I2S0_CORE_CLK_EN (0x0080)
-#define U300_SYSCON_CEFR_SPI_CLK_EN (0x0040)
-#define U300_SYSCON_CEFR_MMC_CLK_EN (0x0020)
-#define U300_SYSCON_CEFR_I2S1_CLK_EN (0x0010)
-#define U300_SYSCON_CEFR_I2S0_CLK_EN (0x0008)
-#define U300_SYSCON_CEFR_I2C1_CLK_EN (0x0004)
-#define U300_SYSCON_CEFR_I2C0_CLK_EN (0x0002)
-#define U300_SYSCON_CEFR_FAST_BRIDGE_CLK_EN (0x0001)
-/* Clock enable for the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_CERR (0x0028)
-#define U300_SYSCON_CERR_CDS_CLK_EN (0x2000)
-#define U300_SYSCON_CERR_ISP_CLK_EN (0x1000)
-#define U300_SYSCON_CERR_MSPRO_CLK_EN (0x0800)
-#define U300_SYSCON_CERR_AHB_SUBSYS_BRIDGE_CLK_EN (0x0400)
-#define U300_SYSCON_CERR_SEMI_CLK_EN (0x0200)
-#define U300_SYSCON_CERR_XGAM_CLK_EN (0x0100)
-#define U300_SYSCON_CERR_VIDEO_ENC_CLK_EN (0x0080)
-#define U300_SYSCON_CERR_NANDIF_CLK_EN (0x0040)
-#define U300_SYSCON_CERR_EMIF_CLK_EN (0x0020)
-#define U300_SYSCON_CERR_DMAC_CLK_EN (0x0010)
-#define U300_SYSCON_CERR_CPU_CLK_EN (0x0008)
-#define U300_SYSCON_CERR_APEX_CLK_EN (0x0004)
-#define U300_SYSCON_CERR_AHB_CLK_EN (0x0002)
-#define U300_SYSCON_CERR_AAIF_CLK_EN (0x0001)
-/* Single block clock enable 16bit (-/W) */
-#define U300_SYSCON_SBCER (0x002c)
-#define U300_SYSCON_SBCER_PPM_CLK_EN (0x0009)
-#define U300_SYSCON_SBCER_ACC_TMR_CLK_EN (0x0008)
-#define U300_SYSCON_SBCER_APP_TMR_CLK_EN (0x0007)
-#define U300_SYSCON_SBCER_KEYPAD_CLK_EN (0x0006)
-#define U300_SYSCON_SBCER_GPIO_CLK_EN (0x0004)
-#define U300_SYSCON_SBCER_EH_CLK_EN (0x0003)
-#define U300_SYSCON_SBCER_BTR_CLK_EN (0x0002)
-#define U300_SYSCON_SBCER_UART_CLK_EN (0x0001)
-#define U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN (0x0000)
-#define U300_SYSCON_SBCER_UART1_CLK_EN (0x0019)
-#define U300_SYSCON_SBCER_I2S1_CORE_CLK_EN (0x0018)
-#define U300_SYSCON_SBCER_I2S0_CORE_CLK_EN (0x0017)
-#define U300_SYSCON_SBCER_SPI_CLK_EN (0x0016)
-#define U300_SYSCON_SBCER_MMC_CLK_EN (0x0015)
-#define U300_SYSCON_SBCER_I2S1_CLK_EN (0x0014)
-#define U300_SYSCON_SBCER_I2S0_CLK_EN (0x0013)
-#define U300_SYSCON_SBCER_I2C1_CLK_EN (0x0012)
-#define U300_SYSCON_SBCER_I2C0_CLK_EN (0x0011)
-#define U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN (0x0010)
-#define U300_SYSCON_SBCER_CDS_CLK_EN (0x002D)
-#define U300_SYSCON_SBCER_ISP_CLK_EN (0x002C)
-#define U300_SYSCON_SBCER_MSPRO_CLK_EN (0x002B)
-#define U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN (0x002A)
-#define U300_SYSCON_SBCER_SEMI_CLK_EN (0x0029)
-#define U300_SYSCON_SBCER_XGAM_CLK_EN (0x0028)
-#define U300_SYSCON_SBCER_VIDEO_ENC_CLK_EN (0x0027)
-#define U300_SYSCON_SBCER_NANDIF_CLK_EN (0x0026)
-#define U300_SYSCON_SBCER_EMIF_CLK_EN (0x0025)
-#define U300_SYSCON_SBCER_DMAC_CLK_EN (0x0024)
-#define U300_SYSCON_SBCER_CPU_CLK_EN (0x0023)
-#define U300_SYSCON_SBCER_APEX_CLK_EN (0x0022)
-#define U300_SYSCON_SBCER_AHB_CLK_EN (0x0021)
-#define U300_SYSCON_SBCER_AAIF_CLK_EN (0x0020)
-/* Single block clock disable 16bit (-/W) */
-#define U300_SYSCON_SBCDR (0x0030)
-/* Same values as above for SBCER */
-/* Clock force SLOW peripherals 16bit (R/W) */
-#define U300_SYSCON_CFSR (0x003c)
-#define U300_SYSCON_CFSR_PPM_CLK_FORCE_EN (0x0200)
-#define U300_SYSCON_CFSR_ACC_TMR_CLK_FORCE_EN (0x0100)
-#define U300_SYSCON_CFSR_APP_TMR_CLK_FORCE_EN (0x0080)
-#define U300_SYSCON_CFSR_KEYPAD_CLK_FORCE_EN (0x0020)
-#define U300_SYSCON_CFSR_GPIO_CLK_FORCE_EN (0x0010)
-#define U300_SYSCON_CFSR_EH_CLK_FORCE_EN (0x0008)
-#define U300_SYSCON_CFSR_BTR_CLK_FORCE_EN (0x0004)
-#define U300_SYSCON_CFSR_UART_CLK_FORCE_EN (0x0002)
-#define U300_SYSCON_CFSR_SLOW_BRIDGE_CLK_FORCE_EN (0x0001)
-/* Clock force FAST peripherals 16bit (R/W) */
-#define U300_SYSCON_CFFR (0x40)
-/* Values not defined. Define if you want to use them. */
-/* Clock force the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_CFRR (0x44)
-#define U300_SYSCON_CFRR_CDS_CLK_FORCE_EN (0x2000)
-#define U300_SYSCON_CFRR_ISP_CLK_FORCE_EN (0x1000)
-#define U300_SYSCON_CFRR_MSPRO_CLK_FORCE_EN (0x0800)
-#define U300_SYSCON_CFRR_AHB_SUBSYS_BRIDGE_CLK_FORCE_EN (0x0400)
-#define U300_SYSCON_CFRR_SEMI_CLK_FORCE_EN (0x0200)
-#define U300_SYSCON_CFRR_XGAM_CLK_FORCE_EN (0x0100)
-#define U300_SYSCON_CFRR_VIDEO_ENC_CLK_FORCE_EN (0x0080)
-#define U300_SYSCON_CFRR_NANDIF_CLK_FORCE_EN (0x0040)
-#define U300_SYSCON_CFRR_EMIF_CLK_FORCE_EN (0x0020)
-#define U300_SYSCON_CFRR_DMAC_CLK_FORCE_EN (0x0010)
-#define U300_SYSCON_CFRR_CPU_CLK_FORCE_EN (0x0008)
-#define U300_SYSCON_CFRR_APEX_CLK_FORCE_EN (0x0004)
-#define U300_SYSCON_CFRR_AHB_CLK_FORCE_EN (0x0002)
-#define U300_SYSCON_CFRR_AAIF_CLK_FORCE_EN (0x0001)
-/* PLL208 Frequency Control 16bit (R/W) */
-#define U300_SYSCON_PFCR (0x48)
-#define U300_SYSCON_PFCR_DPLL_MULT_NUM (0x000F)
-/* Power Management Control 16bit (R/W) */
-#define U300_SYSCON_PMCR (0x50)
-#define U300_SYSCON_PMCR_DCON_ENABLE (0x0002)
-#define U300_SYSCON_PMCR_PWR_MGNT_ENABLE (0x0001)
-/* Reset Out 16bit (R/W) */
-#define U300_SYSCON_RCR (0x6c)
-#define U300_SYSCON_RCR_RESOUT0_RST_N_DISABLE (0x0001)
-/* EMIF Slew Rate Control 16bit (R/W) */
-#define U300_SYSCON_SRCLR (0x70)
-#define U300_SYSCON_SRCLR_MASK (0x03FF)
-#define U300_SYSCON_SRCLR_VALUE (0x03FF)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_5_B (0x0200)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_5_A (0x0100)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_4_B (0x0080)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_4_A (0x0040)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_3_B (0x0020)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_3_A (0x0010)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_2_B (0x0008)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_2_A (0x0004)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_1_B (0x0002)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_1_A (0x0001)
-/* EMIF Clock Control Register 16bit (R/W) */
-#define U300_SYSCON_ECCR (0x0078)
-#define U300_SYSCON_ECCR_MASK (0x000F)
-#define U300_SYSCON_ECCR_EMIF_1_STATIC_CLK_EN_N_DISABLE (0x0008)
-#define U300_SYSCON_ECCR_EMIF_1_RET_OUT_CLK_EN_N_DISABLE (0x0004)
-#define U300_SYSCON_ECCR_EMIF_MEMCLK_RET_EN_N_DISABLE (0x0002)
-#define U300_SYSCON_ECCR_EMIF_SDRCLK_RET_EN_N_DISABLE (0x0001)
-/* MMC/MSPRO frequency divider register 0 16bit (R/W) */
-#define U300_SYSCON_MMF0R (0x90)
-#define U300_SYSCON_MMF0R_MASK (0x00FF)
-#define U300_SYSCON_MMF0R_FREQ_0_HIGH_MASK (0x00F0)
-#define U300_SYSCON_MMF0R_FREQ_0_LOW_MASK (0x000F)
-/* MMC/MSPRO frequency divider register 1 16bit (R/W) */
-#define U300_SYSCON_MMF1R (0x94)
-#define U300_SYSCON_MMF1R_MASK (0x00FF)
-#define U300_SYSCON_MMF1R_FREQ_1_HIGH_MASK (0x00F0)
-#define U300_SYSCON_MMF1R_FREQ_1_LOW_MASK (0x000F)
-/* Clock control for the MMC and MSPRO blocks 16bit (R/W) */
-#define U300_SYSCON_MMCR (0x9C)
-#define U300_SYSCON_MMCR_MASK (0x0003)
-#define U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE (0x0002)
-#define U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE (0x0001)
-/* SYS_0_CLK_CONTROL first clock control 16bit (R/W) */
-#define U300_SYSCON_S0CCR (0x120)
-#define U300_SYSCON_S0CCR_FIELD_MASK (0x43FF)
-#define U300_SYSCON_S0CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S0CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S0CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S0CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S0CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SYS_1_CLK_CONTROL second clock control 16 bit (R/W) */
-#define U300_SYSCON_S1CCR (0x124)
-#define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF)
-#define U300_SYSCON_S1CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S1CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S1CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S1CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S1CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SYS_2_CLK_CONTROL third clock control 16 bit (R/W) */
-#define U300_SYSCON_S2CCR (0x128)
-#define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF)
-#define U300_SYSCON_S2CCR_CLK_STEAL (0x8000)
-#define U300_SYSCON_S2CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S2CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S2CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S2CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S2CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SC_PLL_IRQ_CONTROL 16bit (R/W) */
-#define U300_SYSCON_PICR (0x0130)
-#define U300_SYSCON_PICR_MASK (0x00FF)
-#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_LOW_ENABLE (0x0080)
-#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_HIGH_ENABLE (0x0040)
-#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_LOW_ENABLE (0x0020)
-#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_HIGH_ENABLE (0x0010)
-#define U300_SYSCON_PICR_IRQMASK_PLL13_UNLOCK_ENABLE (0x0008)
-#define U300_SYSCON_PICR_IRQMASK_PLL13_LOCK_ENABLE (0x0004)
-#define U300_SYSCON_PICR_IRQMASK_PLL208_UNLOCK_ENABLE (0x0002)
-#define U300_SYSCON_PICR_IRQMASK_PLL208_LOCK_ENABLE (0x0001)
-/* SC_PLL_IRQ_STATUS 16 bit (R/-) */
-#define U300_SYSCON_PISR (0x0134)
-#define U300_SYSCON_PISR_MASK (0x000F)
-#define U300_SYSCON_PISR_PLL13_UNLOCK_IND (0x0008)
-#define U300_SYSCON_PISR_PLL13_LOCK_IND (0x0004)
-#define U300_SYSCON_PISR_PLL208_UNLOCK_IND (0x0002)
-#define U300_SYSCON_PISR_PLL208_LOCK_IND (0x0001)
-/* SC_PLL_IRQ_CLEAR 16 bit (-/W) */
-#define U300_SYSCON_PICLR (0x0138)
-#define U300_SYSCON_PICLR_MASK (0x000F)
-#define U300_SYSCON_PICLR_RWMASK (0x0000)
-#define U300_SYSCON_PICLR_PLL13_UNLOCK_SC (0x0008)
-#define U300_SYSCON_PICLR_PLL13_LOCK_SC (0x0004)
-#define U300_SYSCON_PICLR_PLL208_UNLOCK_SC (0x0002)
-#define U300_SYSCON_PICLR_PLL208_LOCK_SC (0x0001)
-/* Clock activity observability register 0 */
-#define U300_SYSCON_C0OAR (0x140)
-#define U300_SYSCON_C0OAR_MASK (0xFFFF)
-#define U300_SYSCON_C0OAR_VALUE (0xFFFF)
-#define U300_SYSCON_C0OAR_BT_H_CLK (0x8000)
-#define U300_SYSCON_C0OAR_ASPB_P_CLK (0x4000)
-#define U300_SYSCON_C0OAR_APP_SEMI_H_CLK (0x2000)
-#define U300_SYSCON_C0OAR_APP_SEMI_CLK (0x1000)
-#define U300_SYSCON_C0OAR_APP_MMC_MSPRO_CLK (0x0800)
-#define U300_SYSCON_C0OAR_APP_I2S1_CLK (0x0400)
-#define U300_SYSCON_C0OAR_APP_I2S0_CLK (0x0200)
-#define U300_SYSCON_C0OAR_APP_CPU_CLK (0x0100)
-#define U300_SYSCON_C0OAR_APP_52_CLK (0x0080)
-#define U300_SYSCON_C0OAR_APP_208_CLK (0x0040)
-#define U300_SYSCON_C0OAR_APP_104_CLK (0x0020)
-#define U300_SYSCON_C0OAR_APEX_CLK (0x0010)
-#define U300_SYSCON_C0OAR_AHPB_M_H_CLK (0x0008)
-#define U300_SYSCON_C0OAR_AHB_CLK (0x0004)
-#define U300_SYSCON_C0OAR_AFPB_P_CLK (0x0002)
-#define U300_SYSCON_C0OAR_AAIF_CLK (0x0001)
-/* Clock activity observability register 1 */
-#define U300_SYSCON_C1OAR (0x144)
-#define U300_SYSCON_C1OAR_MASK (0x3FFE)
-#define U300_SYSCON_C1OAR_VALUE (0x3FFE)
-#define U300_SYSCON_C1OAR_NFIF_F_CLK (0x2000)
-#define U300_SYSCON_C1OAR_MSPRO_CLK (0x1000)
-#define U300_SYSCON_C1OAR_MMC_P_CLK (0x0800)
-#define U300_SYSCON_C1OAR_MMC_CLK (0x0400)
-#define U300_SYSCON_C1OAR_KP_P_CLK (0x0200)
-#define U300_SYSCON_C1OAR_I2C1_P_CLK (0x0100)
-#define U300_SYSCON_C1OAR_I2C0_P_CLK (0x0080)
-#define U300_SYSCON_C1OAR_GPIO_CLK (0x0040)
-#define U300_SYSCON_C1OAR_EMIF_MPMC_CLK (0x0020)
-#define U300_SYSCON_C1OAR_EMIF_H_CLK (0x0010)
-#define U300_SYSCON_C1OAR_EVHIST_CLK (0x0008)
-#define U300_SYSCON_C1OAR_PPM_CLK (0x0004)
-#define U300_SYSCON_C1OAR_DMA_CLK (0x0002)
-/* Clock activity observability register 2 */
-#define U300_SYSCON_C2OAR (0x148)
-#define U300_SYSCON_C2OAR_MASK (0x0FFF)
-#define U300_SYSCON_C2OAR_VALUE (0x0FFF)
-#define U300_SYSCON_C2OAR_XGAM_CDI_CLK (0x0800)
-#define U300_SYSCON_C2OAR_XGAM_CLK (0x0400)
-#define U300_SYSCON_C2OAR_VC_H_CLK (0x0200)
-#define U300_SYSCON_C2OAR_VC_CLK (0x0100)
-#define U300_SYSCON_C2OAR_UA_P_CLK (0x0080)
-#define U300_SYSCON_C2OAR_TMR1_CLK (0x0040)
-#define U300_SYSCON_C2OAR_TMR0_CLK (0x0020)
-#define U300_SYSCON_C2OAR_SPI_P_CLK (0x0010)
-#define U300_SYSCON_C2OAR_PCM_I2S1_CORE_CLK (0x0008)
-#define U300_SYSCON_C2OAR_PCM_I2S1_CLK (0x0004)
-#define U300_SYSCON_C2OAR_PCM_I2S0_CORE_CLK (0x0002)
-#define U300_SYSCON_C2OAR_PCM_I2S0_CLK (0x0001)
-
-
-/*
- * The clocking hierarchy currently looks like this.
- * NOTE: the idea is NOT to show how the clocks are routed on the chip!
- * The ideas is to show dependencies, so a clock higher up in the
- * hierarchy has to be on in order for another clock to be on. Now,
- * both CPU and DMA can actually be on top of the hierarchy, and that
- * is not modeled currently. Instead we have the backbone AMBA bus on
- * top. This bus cannot be programmed in any way but conceptually it
- * needs to be active for the bridges and devices to transport data.
- *
- * Please be aware that a few clocks are hw controlled, which mean that
- * the hw itself can turn on/off or change the rate of the clock when
- * needed!
- *
- * AMBA bus
- * |
- * +- CPU
- * +- FSMC NANDIF NAND Flash interface
- * +- SEMI Shared Memory interface
- * +- ISP Image Signal Processor (U335 only)
- * +- CDS (U335 only)
- * +- DMA Direct Memory Access Controller
- * +- AAIF APP/ACC Interface (Mobile Scalable Link, MSL)
- * +- APEX
- * +- VIDEO_ENC AVE2/3 Video Encoder
- * +- XGAM Graphics Accelerator Controller
- * +- AHB
- * |
- * +- ahb:0 AHB Bridge
- * | |
- * | +- ahb:1 INTCON Interrupt controller
- * | +- ahb:3 MSPRO Memory Stick Pro controller
- * | +- ahb:4 EMIF External Memory interface
- * |
- * +- fast:0 FAST bridge
- * | |
- * | +- fast:1 MMCSD MMC/SD card reader controller
- * | +- fast:2 I2S0 PCM I2S channel 0 controller
- * | +- fast:3 I2S1 PCM I2S channel 1 controller
- * | +- fast:4 I2C0 I2C channel 0 controller
- * | +- fast:5 I2C1 I2C channel 1 controller
- * | +- fast:6 SPI SPI controller
- * | +- fast:7 UART1 Secondary UART (U335 only)
- * |
- * +- slow:0 SLOW bridge
- * |
- * +- slow:1 SYSCON (not possible to control)
- * +- slow:2 WDOG Watchdog
- * +- slow:3 UART0 primary UART
- * +- slow:4 TIMER_APP Application timer - used in Linux
- * +- slow:5 KEYPAD controller
- * +- slow:6 GPIO controller
- * +- slow:7 RTC controller
- * +- slow:8 BT Bus Tracer (not used currently)
- * +- slow:9 EH Event Handler (not used currently)
- * +- slow:a TIMER_ACC Access style timer (not used currently)
- * +- slow:b PPM (U335 only, what is that?)
- */
-
-/* Global syscon virtual base */
-static void __iomem *syscon_vbase;
-
-/**
- * struct clk_syscon - U300 syscon clock
- * @hw: corresponding clock hardware entry
- * @hw_ctrld: whether this clock is hardware controlled (for refcount etc)
- * and does not need any magic pokes to be enabled/disabled
- * @reset: state holder, whether this block's reset line is asserted or not
- * @res_reg: reset line enable/disable flag register
- * @res_bit: bit for resetting or taking this consumer out of reset
- * @en_reg: clock line enable/disable flag register
- * @en_bit: bit for enabling/disabling this consumer clock line
- * @clk_val: magic value to poke in the register to enable/disable
- * this one clock
- */
-struct clk_syscon {
- struct clk_hw hw;
- bool hw_ctrld;
- bool reset;
- void __iomem *res_reg;
- u8 res_bit;
- void __iomem *en_reg;
- u8 en_bit;
- u16 clk_val;
-};
-
-#define to_syscon(_hw) container_of(_hw, struct clk_syscon, hw)
-
-static DEFINE_SPINLOCK(syscon_resetreg_lock);
-
-/*
- * Reset control functions. We remember if a block has been
- * taken out of reset and don't remove the reset assertion again
- * and vice versa. Currently we only remove resets so the
- * enablement function is defined out.
- */
-static void syscon_block_reset_enable(struct clk_syscon *sclk)
-{
- unsigned long iflags;
- u16 val;
-
- /* Not all blocks support resetting */
- if (!sclk->res_reg)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(sclk->res_reg);
- val |= BIT(sclk->res_bit);
- writew(val, sclk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- sclk->reset = true;
-}
-
-static void syscon_block_reset_disable(struct clk_syscon *sclk)
-{
- unsigned long iflags;
- u16 val;
-
- /* Not all blocks support resetting */
- if (!sclk->res_reg)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(sclk->res_reg);
- val &= ~BIT(sclk->res_bit);
- writew(val, sclk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- sclk->reset = false;
-}
-
-static int syscon_clk_prepare(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* If the block is in reset, bring it out */
- if (sclk->reset)
- syscon_block_reset_disable(sclk);
- return 0;
-}
-
-static void syscon_clk_unprepare(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Please don't force the console into reset */
- if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
- return;
- /* When unpreparing, force block into reset */
- if (!sclk->reset)
- syscon_block_reset_enable(sclk);
-}
-
-static int syscon_clk_enable(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Don't touch the hardware controlled clocks */
- if (sclk->hw_ctrld)
- return 0;
- /* These cannot be controlled */
- if (sclk->clk_val == 0xFFFFU)
- return 0;
-
- writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCER);
- return 0;
-}
-
-static void syscon_clk_disable(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Don't touch the hardware controlled clocks */
- if (sclk->hw_ctrld)
- return;
- if (sclk->clk_val == 0xFFFFU)
- return;
- /* Please don't disable the console port */
- if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
- return;
-
- writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCDR);
-}
-
-static int syscon_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 val;
-
- /* If no enable register defined, it's always-on */
- if (!sclk->en_reg)
- return 1;
-
- val = readw(sclk->en_reg);
- val &= BIT(sclk->en_bit);
-
- return val ? 1 : 0;
-}
-
-static u16 syscon_get_perf(void)
-{
- u16 val;
-
- val = readw(syscon_vbase + U300_SYSCON_CCR);
- val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- return val;
-}
-
-static unsigned long
-syscon_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 perf = syscon_get_perf();
-
- switch (sclk->clk_val) {
- case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
- case U300_SYSCON_SBCER_I2C0_CLK_EN:
- case U300_SYSCON_SBCER_I2C1_CLK_EN:
- case U300_SYSCON_SBCER_MMC_CLK_EN:
- case U300_SYSCON_SBCER_SPI_CLK_EN:
- /* The FAST clocks have one progression */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- default:
- return parent_rate; /* 26 MHz */
- }
- case U300_SYSCON_SBCER_DMAC_CLK_EN:
- case U300_SYSCON_SBCER_NANDIF_CLK_EN:
- case U300_SYSCON_SBCER_XGAM_CLK_EN:
- /* AMBA interconnect peripherals */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 6500000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 26000000;
- default:
- return parent_rate; /* 52 MHz */
- }
- case U300_SYSCON_SBCER_SEMI_CLK_EN:
- case U300_SYSCON_SBCER_EMIF_CLK_EN:
- /* EMIF speeds */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- default:
- return 104000000;
- }
- case U300_SYSCON_SBCER_CPU_CLK_EN:
- /* And the fast CPU clock */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- return 104000000;
- default:
- return parent_rate; /* 208 MHz */
- }
- default:
- /*
- * The SLOW clocks and default just inherit the rate of
- * their parent (typically PLL13 13 MHz).
- */
- return parent_rate;
- }
-}
-
-static long
-syscon_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
- return *prate;
- /* We really only support setting the rate of the CPU clock */
- if (rate <= 13000000)
- return 13000000;
- if (rate <= 52000000)
- return 52000000;
- if (rate <= 104000000)
- return 104000000;
- return 208000000;
-}
-
-static int syscon_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 val;
-
- /* We only support setting the rate of the CPU clock */
- if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
- return -EINVAL;
- switch (rate) {
- case 13000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
- break;
- case 52000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
- break;
- case 104000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
- break;
- case 208000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
- break;
- default:
- return -EINVAL;
- }
- val |= readw(syscon_vbase + U300_SYSCON_CCR) &
- ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
- writew(val, syscon_vbase + U300_SYSCON_CCR);
- return 0;
-}
-
-static const struct clk_ops syscon_clk_ops = {
- .prepare = syscon_clk_prepare,
- .unprepare = syscon_clk_unprepare,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .is_enabled = syscon_clk_is_enabled,
- .recalc_rate = syscon_clk_recalc_rate,
- .round_rate = syscon_clk_round_rate,
- .set_rate = syscon_clk_set_rate,
-};
-
-static struct clk_hw * __init
-syscon_clk_register(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- bool hw_ctrld,
- void __iomem *res_reg, u8 res_bit,
- void __iomem *en_reg, u8 en_bit,
- u16 clk_val)
-{
- struct clk_hw *hw;
- struct clk_syscon *sclk;
- struct clk_init_data init;
- int ret;
-
- sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
- if (!sclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &syscon_clk_ops;
- init.flags = flags;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- sclk->hw.init = &init;
- sclk->hw_ctrld = hw_ctrld;
- /* Assume the block is in reset at registration */
- sclk->reset = true;
- sclk->res_reg = res_reg;
- sclk->res_bit = res_bit;
- sclk->en_reg = en_reg;
- sclk->en_bit = en_bit;
- sclk->clk_val = clk_val;
-
- hw = &sclk->hw;
- ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(sclk);
- hw = ERR_PTR(ret);
- }
-
- return hw;
-}
-
-#define U300_CLK_TYPE_SLOW 0
-#define U300_CLK_TYPE_FAST 1
-#define U300_CLK_TYPE_REST 2
-
-/**
- * struct u300_clock - defines the bits and pieces for a certain clock
- * @type: the clock type, slow fast or rest
- * @id: the bit in the slow/fast/rest register for this clock
- * @hw_ctrld: whether the clock is hardware controlled
- * @clk_val: a value to poke in the one-write enable/disable registers
- */
-struct u300_clock {
- u8 type;
- u8 id;
- bool hw_ctrld;
- u16 clk_val;
-};
-
-static struct u300_clock const u300_clk_lookup[] __initconst = {
- {
- .type = U300_CLK_TYPE_REST,
- .id = 3,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_CPU_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 4,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_DMAC_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 5,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_EMIF_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 6,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_NANDIF_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 8,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_XGAM_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 9,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_SEMI_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 10,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 12,
- .hw_ctrld = false,
- /* INTCON: cannot be enabled, just taken out of reset */
- .clk_val = 0xFFFFU,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 0,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 1,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_I2C0_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 2,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_I2C1_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 5,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_MMC_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 6,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_SPI_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 0,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 1,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_UART_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 4,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_GPIO_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 6,
- .hw_ctrld = true,
- /* No clock enable register bit */
- .clk_val = 0xFFFFU,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 7,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_APP_TMR_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 8,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_ACC_TMR_CLK_EN,
- },
-};
-
-static void __init of_u300_syscon_clk_init(struct device_node *np)
-{
- struct clk_hw *hw = ERR_PTR(-EINVAL);
- const char *clk_name = np->name;
- const char *parent_name;
- void __iomem *res_reg;
- void __iomem *en_reg;
- u32 clk_type;
- u32 clk_id;
- int i;
-
- if (of_property_read_u32(np, "clock-type", &clk_type)) {
- pr_err("%s: syscon clock \"%s\" missing clock-type property\n",
- __func__, clk_name);
- return;
- }
- if (of_property_read_u32(np, "clock-id", &clk_id)) {
- pr_err("%s: syscon clock \"%s\" missing clock-id property\n",
- __func__, clk_name);
- return;
- }
- parent_name = of_clk_get_parent_name(np, 0);
-
- switch (clk_type) {
- case U300_CLK_TYPE_SLOW:
- res_reg = syscon_vbase + U300_SYSCON_RSR;
- en_reg = syscon_vbase + U300_SYSCON_CESR;
- break;
- case U300_CLK_TYPE_FAST:
- res_reg = syscon_vbase + U300_SYSCON_RFR;
- en_reg = syscon_vbase + U300_SYSCON_CEFR;
- break;
- case U300_CLK_TYPE_REST:
- res_reg = syscon_vbase + U300_SYSCON_RRR;
- en_reg = syscon_vbase + U300_SYSCON_CERR;
- break;
- default:
- pr_err("unknown clock type %x specified\n", clk_type);
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(u300_clk_lookup); i++) {
- const struct u300_clock *u3clk = &u300_clk_lookup[i];
-
- if (u3clk->type == clk_type && u3clk->id == clk_id)
- hw = syscon_clk_register(NULL, clk_name, parent_name,
- 0, u3clk->hw_ctrld,
- res_reg, u3clk->id,
- en_reg, u3clk->id,
- u3clk->clk_val);
- }
-
- if (!IS_ERR(hw)) {
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-
- /*
- * Some few system clocks - device tree does not
- * represent clocks without a corresponding device node.
- * for now we add these three clocks here.
- */
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 5)
- clk_hw_register_clkdev(hw, NULL, "pl172");
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 9)
- clk_hw_register_clkdev(hw, NULL, "semi");
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 12)
- clk_hw_register_clkdev(hw, NULL, "intcon");
- }
-}
-
-/**
- * struct clk_mclk - U300 MCLK clock (MMC/SD clock)
- * @hw: corresponding clock hardware entry
- * @is_mspro: if this is the memory stick clock rather than MMC/SD
- */
-struct clk_mclk {
- struct clk_hw hw;
- bool is_mspro;
-};
-
-#define to_mclk(_hw) container_of(_hw, struct clk_mclk, hw)
-
-static int mclk_clk_prepare(struct clk_hw *hw)
-{
- struct clk_mclk *mclk = to_mclk(hw);
- u16 val;
-
- /* The MMC and MSPRO clocks need some special set-up */
- if (!mclk->is_mspro) {
- /* Set default MMC clock divisor to 18.9 MHz */
- writew(0x0054U, syscon_vbase + U300_SYSCON_MMF0R);
- val = readw(syscon_vbase + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Disable MSPRO frequency */
- val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_MMCR);
- } else {
- val = readw(syscon_vbase + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Enable MSPRO frequency */
- val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_MMCR);
- }
-
- return 0;
-}
-
-static unsigned long
-mclk_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u16 perf = syscon_get_perf();
-
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- /*
- * Here, the 208 MHz PLL gets shut down and the always
- * on 13 MHz PLL used for RTC etc kicks into use
- * instead.
- */
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- {
- /*
- * This clock is under program control. The register is
- * divided in two nybbles, bit 7-4 gives cycles-1 to count
- * high, bit 3-0 gives cycles-1 to count low. Distribute
- * these with no more than 1 cycle difference between
- * low and high and add low and high to get the actual
- * divisor. The base PLL is 208 MHz. Writing 0x00 will
- * divide by 1 and 1 so the highest frequency possible
- * is 104 MHz.
- *
- * e.g. 0x54 =>
- * f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
- */
- u16 val = readw(syscon_vbase + U300_SYSCON_MMF0R) &
- U300_SYSCON_MMF0R_MASK;
- switch (val) {
- case 0x0054:
- return 18900000;
- case 0x0044:
- return 20800000;
- case 0x0043:
- return 23100000;
- case 0x0033:
- return 26000000;
- case 0x0032:
- return 29700000;
- case 0x0022:
- return 34700000;
- case 0x0021:
- return 41600000;
- case 0x0011:
- return 52000000;
- case 0x0000:
- return 104000000;
- default:
- break;
- }
- }
- default:
- break;
- }
- return parent_rate;
-}
-
-static long
-mclk_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- if (rate <= 18900000)
- return 18900000;
- if (rate <= 20800000)
- return 20800000;
- if (rate <= 23100000)
- return 23100000;
- if (rate <= 26000000)
- return 26000000;
- if (rate <= 29700000)
- return 29700000;
- if (rate <= 34700000)
- return 34700000;
- if (rate <= 41600000)
- return 41600000;
- /* Highest rate */
- return 52000000;
-}
-
-static int mclk_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- u16 val;
- u16 reg;
-
- switch (rate) {
- case 18900000:
- val = 0x0054;
- break;
- case 20800000:
- val = 0x0044;
- break;
- case 23100000:
- val = 0x0043;
- break;
- case 26000000:
- val = 0x0033;
- break;
- case 29700000:
- val = 0x0032;
- break;
- case 34700000:
- val = 0x0022;
- break;
- case 41600000:
- val = 0x0021;
- break;
- case 52000000:
- val = 0x0011;
- break;
- case 104000000:
- val = 0x0000;
- break;
- default:
- return -EINVAL;
- }
-
- reg = readw(syscon_vbase + U300_SYSCON_MMF0R) &
- ~U300_SYSCON_MMF0R_MASK;
- writew(reg | val, syscon_vbase + U300_SYSCON_MMF0R);
- return 0;
-}
-
-static const struct clk_ops mclk_ops = {
- .prepare = mclk_clk_prepare,
- .recalc_rate = mclk_clk_recalc_rate,
- .round_rate = mclk_clk_round_rate,
- .set_rate = mclk_clk_set_rate,
-};
-
-static struct clk_hw * __init
-mclk_clk_register(struct device *dev, const char *name,
- const char *parent_name, bool is_mspro)
-{
- struct clk_hw *hw;
- struct clk_mclk *mclk;
- struct clk_init_data init;
- int ret;
-
- mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
- if (!mclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = "mclk";
- init.ops = &mclk_ops;
- init.flags = 0;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- mclk->hw.init = &init;
- mclk->is_mspro = is_mspro;
-
- hw = &mclk->hw;
- ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(mclk);
- hw = ERR_PTR(ret);
- }
-
- return hw;
-}
-
-static void __init of_u300_syscon_mclk_init(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *clk_name = np->name;
- const char *parent_name;
-
- parent_name = of_clk_get_parent_name(np, 0);
- hw = mclk_clk_register(NULL, clk_name, parent_name, false);
- if (!IS_ERR(hw))
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
-static const struct of_device_id u300_clk_match[] __initconst = {
- {
- .compatible = "fixed-clock",
- .data = of_fixed_clk_setup,
- },
- {
- .compatible = "fixed-factor-clock",
- .data = of_fixed_factor_clk_setup,
- },
- {
- .compatible = "stericsson,u300-syscon-clk",
- .data = of_u300_syscon_clk_init,
- },
- {
- .compatible = "stericsson,u300-syscon-mclk",
- .data = of_u300_syscon_mclk_init,
- },
- {}
-};
-
-
-void __init u300_clk_init(void __iomem *base)
-{
- u16 val;
-
- syscon_vbase = base;
-
- /* Set system to run at PLL208, max performance, a known state. */
- val = readw(syscon_vbase + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- writew(val, syscon_vbase + U300_SYSCON_CCR);
- /* Wait for the PLL208 to lock if not locked in yet */
- while (!(readw(syscon_vbase + U300_SYSCON_CSR) &
- U300_SYSCON_CSR_PLL208_LOCK_IND));
-
- /* Power management enable */
- val = readw(syscon_vbase + U300_SYSCON_PMCR);
- val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_PMCR);
-
- of_clk_init(u300_clk_match);
-}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 43db67337bc0..344cd6c61188 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -759,6 +759,63 @@ static int vc5_update_power(struct device_node *np_output,
return 0;
}
+static int vc5_map_cap_value(u32 femtofarads)
+{
+ int mapped_value;
+
+ /*
+ * The datasheet explicitly states 9000 - 25000 with 0.5pF
+ * steps, but the Programmer's guide shows the steps are 0.430pF.
+ * After getting feedback from Renesas, the .5pF steps were the
+ * goal, but 430nF was the actual values.
+ * Because of this, the actual range goes to 22760 instead of 25000
+ */
+ if (femtofarads < 9000 || femtofarads > 22760)
+ return -EINVAL;
+
+ /*
+ * The Programmer's guide shows XTAL[5:0] but in reality,
+ * XTAL[0] and XTAL[1] are both LSB which makes the math
+ * strange. With clarfication from Renesas, setting the
+ * values should be simpler by ignoring XTAL[0]
+ */
+ mapped_value = DIV_ROUND_CLOSEST(femtofarads - 9000, 430);
+
+ /*
+ * Since the calculation ignores XTAL[0], there is one
+ * special case where mapped_value = 32. In reality, this means
+ * the real mapped value should be 111111b. In other cases,
+ * the mapped_value needs to be shifted 1 to the left.
+ */
+ if (mapped_value > 31)
+ mapped_value = 0x3f;
+ else
+ mapped_value <<= 1;
+
+ return mapped_value;
+}
+static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data *vc5)
+{
+ u32 value;
+ int mapped_value;
+
+ if (!of_property_read_u32(node, "idt,xtal-load-femtofarads", &value)) {
+ mapped_value = vc5_map_cap_value(value);
+ if (mapped_value < 0)
+ return mapped_value;
+
+ /*
+ * The mapped_value is really the high 6 bits of
+ * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
+ * shift the value 2 places.
+ */
+ regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03, mapped_value << 2);
+ regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03, mapped_value << 2);
+ }
+
+ return 0;
+}
+
static int vc5_update_slew(struct device_node *np_output,
struct vc5_out_data *clk_out)
{
@@ -884,6 +941,13 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EINVAL;
}
+ /* Configure Optional Loading Capacitance for external XTAL */
+ if (!(vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)) {
+ ret = vc5_update_cap_load(client->dev.of_node, vc5);
+ if (ret)
+ goto err_clk_register;
+ }
+
init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node);
init.ops = &vc5_mux_ops;
init.flags = 0;
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 3fd53057c01f..857217cbcef8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -206,17 +206,16 @@ static void xgene_pcppllclk_init(struct device_node *np)
* @hw: handle between common and hardware-specific interfaces
* @reg: register containing the fractional scale multiplier (scaler)
* @shift: shift to the unit bit field
+ * @mask: mask to the unit bit field
* @denom: 1/denominator unit
* @lock: register lock
- * Flags:
- * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
+ * @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
* from the register plus one. For example,
* 0 for (0 + 1) / denom,
* 1 for (1 + 1) / denom and etc.
* If this flag is set, it is
* 0 for (denom - 0) / denom,
* 1 for (denom - 1) / denom and etc.
- *
*/
struct xgene_clk_pmd {
struct clk_hw hw;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8c1d04db990d..5052541a0986 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1164,6 +1164,27 @@ int clk_enable(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_enable);
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return clk && !(clk->core->ops->enable && clk->core->ops->disable);
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
+
static int clk_core_prepare_enable(struct clk_core *core)
{
int ret;
@@ -4555,6 +4576,8 @@ int of_clk_add_provider(struct device_node *np,
if (ret < 0)
of_clk_del_provider(np);
+ fwnode_dev_initialized(&np->fwnode, true);
+
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
@@ -4672,6 +4695,7 @@ void of_clk_del_provider(struct device_node *np)
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
+ fwnode_dev_initialized(&np->fwnode, false);
of_node_put(cp->node);
kfree(cp);
break;
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3061896503f3..47d9ec3abd2f 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 7b13fb57d842..c44e18c6f63f 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -51,16 +51,6 @@ enum mx31_clks {
static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
-static struct clk ** const uart_clks[] __initconst = {
- &clk[ipg],
- &clk[uart1_gate],
- &clk[uart2_gate],
- &clk[uart3_gate],
- &clk[uart4_gate],
- &clk[uart5_gate],
- NULL
-};
-
static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
{
clk[dummy] = imx_clk_fixed("dummy", 0);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b2ff187cedab..521d6136d22c 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -338,10 +338,10 @@ static void init_ldb_clks(struct device_node *np, void __iomem *ccm_base)
of_assigned_ldb_sels(np, &sel[0][3], &sel[1][3]);
for (i = 0; i < 2; i++) {
- /* Warn if a glitch might have been introduced already */
+ /* Print a notice if a glitch might have been introduced already */
if (sel[i][0] != 3) {
- pr_warn("ccm: ldb_di%d_sel already changed from reset value: %d\n",
- i, sel[i][0]);
+ pr_notice("ccm: possible glitch: ldb_di%d_sel already changed from reset value: %d\n",
+ i, sel[i][0]);
}
if (sel[i][0] == sel[i][3])
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 2f9361946a0e..29eab05c9068 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -6,6 +6,7 @@
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/imx.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 7c905861af5d..6a01eec36dd0 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -288,6 +288,11 @@ static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", "
static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m",
"sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", };
+static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -410,6 +415,13 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
+ hws[IMX8MM_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
+ hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
+ hws[IMX8MM_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base)))
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 3c21db942d5b..324c5fd0aa04 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -281,6 +281,11 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
"sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
"video_pll1_out", "osc_32k", };
+static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "dummy", "dummy", "gpu_pll_out", "dummy",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -405,6 +410,13 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MN_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
+ hws[IMX8MN_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
+ hws[IMX8MN_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
+ hws[IMX8MN_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base))) {
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 779ea69e639c..4dd4ae9d022b 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -270,6 +270,14 @@ static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "os
static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
"sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", };
+static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy", "dummy", "ckil",
+ "audio_pll1_out_monitor", "audio_pll2_out_monitor",
+ "video_pll1_out_monitor", "gpu_pll_out_monitor",
+ "vpu_pll_out_monitor", "arm_pll_out_monitor",
+ "sys_pll1_out_monitor", "sys_pll2_out_monitor",
+ "sys_pll3_out_monitor", "dram_pll_out_monitor",
+ "video_pll2_out_monitor", };
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -399,6 +407,20 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
+ hws[IMX8MQ_CLK_MON_AUDIO_PLL1_DIV] = imx_clk_hw_divider("audio_pll1_out_monitor", "audio_pll1_bypass", base + 0x78, 0, 3);
+ hws[IMX8MQ_CLK_MON_AUDIO_PLL2_DIV] = imx_clk_hw_divider("audio_pll2_out_monitor", "audio_pll2_bypass", base + 0x78, 4, 3);
+ hws[IMX8MQ_CLK_MON_VIDEO_PLL1_DIV] = imx_clk_hw_divider("video_pll1_out_monitor", "video_pll1_bypass", base + 0x78, 8, 3);
+ hws[IMX8MQ_CLK_MON_GPU_PLL_DIV] = imx_clk_hw_divider("gpu_pll_out_monitor", "gpu_pll_bypass", base + 0x78, 12, 3);
+ hws[IMX8MQ_CLK_MON_VPU_PLL_DIV] = imx_clk_hw_divider("vpu_pll_out_monitor", "vpu_pll_bypass", base + 0x78, 16, 3);
+ hws[IMX8MQ_CLK_MON_ARM_PLL_DIV] = imx_clk_hw_divider("arm_pll_out_monitor", "arm_pll_bypass", base + 0x78, 20, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL1_DIV] = imx_clk_hw_divider("sys_pll1_out_monitor", "sys1_pll_out", base + 0x7c, 0, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL2_DIV] = imx_clk_hw_divider("sys_pll2_out_monitor", "sys2_pll_out", base + 0x7c, 4, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL3_DIV] = imx_clk_hw_divider("sys_pll3_out_monitor", "sys3_pll_out", base + 0x7c, 8, 3);
+ hws[IMX8MQ_CLK_MON_DRAM_PLL_DIV] = imx_clk_hw_divider("dram_pll_out_monitor", "dram_pll_out", base + 0x7c, 12, 3);
+ hws[IMX8MQ_CLK_MON_VIDEO_PLL2_DIV] = imx_clk_hw_divider("video_pll2_out_monitor", "video2_pll_out", base + 0x7c, 16, 3);
+ hws[IMX8MQ_CLK_MON_SEL] = imx_clk_hw_mux("pllout_monitor_sel", base + 0x74, 0, 4, pllout_monitor_sels, ARRAY_SIZE(pllout_monitor_sels));
+ hws[IMX8MQ_CLK_MON_CLK2_OUT] = imx_clk_hw_gate("pllout_monitor_clk2", "pllout_monitor_sel", base + 0x74, 4);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base)))
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 5b3d4ede7c7c..fbf1170c09ed 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -17,6 +17,14 @@
#include <dt-bindings/clock/imx8-clock.h>
#include <dt-bindings/firmware/imx/rsrc.h>
+static const char *dc0_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "dc0_pll0_clk",
+ "dc0_pll1_clk",
+ "dc0_bypass0_clk",
+};
+
static int imx8qxp_clk_probe(struct platform_device *pdev)
{
struct device_node *ccm_node = pdev->dev.of_node;
@@ -115,12 +123,26 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC, clk_cells);
/* Display controller SS */
- clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
- clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
+ clks[IMX_DC0_DISP0_CLK] = imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_DC0_DISP1_CLK] = imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
+ clks[IMX_DC0_PLL0_CLK] = imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL, clk_cells);
+ clks[IMX_DC0_PLL1_CLK] = imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL, clk_cells);
+ clks[IMX_DC0_BYPASS0_CLK] = imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_DC0_BYPASS1_CLK] = imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS, clk_cells);
/* MIPI-LVDS SS */
+ clks[IMX_MIPI0_LVDS_PIXEL_CLK] = imx_clk_scu("mipi0_lvds_pixel_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_LVDS_BYPASS_CLK] = imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_MIPI0_LVDS_PHY_CLK] = imx_clk_scu("mipi0_lvds_phy_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3, clk_cells);
clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_PWM0_CLK] = imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_MIPI1_LVDS_PIXEL_CLK] = imx_clk_scu("mipi1_lvds_pixel_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_LVDS_BYPASS_CLK] = imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_MIPI1_LVDS_PHY_CLK] = imx_clk_scu("mipi1_lvds_phy_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3, clk_cells);
+ clks[IMX_MIPI1_I2C0_CLK] = imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_I2C1_CLK] = imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_PWM0_CLK] = imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
/* MIPI CSI SS */
clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER, clk_cells);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index ce8475098b31..886e2d9fced5 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -426,66 +426,77 @@ config COMMON_CLK_MT8183
config COMMON_CLK_MT8183_AUDIOSYS
bool "Clock driver for MediaTek MT8183 audiosys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 audiosys clocks.
config COMMON_CLK_MT8183_CAMSYS
bool "Clock driver for MediaTek MT8183 camsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 camsys clocks.
config COMMON_CLK_MT8183_IMGSYS
bool "Clock driver for MediaTek MT8183 imgsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 imgsys clocks.
config COMMON_CLK_MT8183_IPU_CORE0
bool "Clock driver for MediaTek MT8183 ipu_core0"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core0 clocks.
config COMMON_CLK_MT8183_IPU_CORE1
bool "Clock driver for MediaTek MT8183 ipu_core1"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core1 clocks.
config COMMON_CLK_MT8183_IPU_ADL
bool "Clock driver for MediaTek MT8183 ipu_adl"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_adl clocks.
config COMMON_CLK_MT8183_IPU_CONN
bool "Clock driver for MediaTek MT8183 ipu_conn"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_conn clocks.
config COMMON_CLK_MT8183_MFGCFG
bool "Clock driver for MediaTek MT8183 mfgcfg"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mfgcfg clocks.
config COMMON_CLK_MT8183_MMSYS
bool "Clock driver for MediaTek MT8183 mmsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mmsys clocks.
config COMMON_CLK_MT8183_VDECSYS
bool "Clock driver for MediaTek MT8183 vdecsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vdecsys clocks.
config COMMON_CLK_MT8183_VENCSYS
bool "Clock driver for MediaTek MT8183 vencsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vencsys clocks.
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index dcc1352bf13c..b0c61709bacc 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -17,29 +17,36 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
-static int mtk_clk_mux_enable(struct clk_hw *hw)
+static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = BIT(mux->data->gate_shift);
-
- return regmap_update_bits(mux->regmap, mux->data->mux_ofs,
- mask, ~mask);
-}
+ unsigned long flags = 0;
-static void mtk_clk_mux_disable(struct clk_hw *hw)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = BIT(mux->data->gate_shift);
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
- regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask);
-}
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ /*
+ * If the parent has been changed when the clock was disabled, it will
+ * not be effective yet. Set the update bit to ensure the mux gets
+ * updated.
+ */
+ if (mux->reparent && mux->data->upd_shift >= 0) {
+ regmap_write(mux->regmap, mux->data->upd_ofs,
+ BIT(mux->data->upd_shift));
+ mux->reparent = false;
+ }
-static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
- return regmap_write(mux->regmap, mux->data->clr_ofs,
- BIT(mux->data->gate_shift));
+ return 0;
}
static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
@@ -72,28 +79,6 @@ static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
return val;
}
-static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = GENMASK(mux->data->mux_width - 1, 0);
- unsigned long flags = 0;
-
- if (mux->lock)
- spin_lock_irqsave(mux->lock, flags);
- else
- __acquire(mux->lock);
-
- regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask,
- index << mux->data->mux_shift);
-
- if (mux->lock)
- spin_unlock_irqrestore(mux->lock, flags);
- else
- __release(mux->lock);
-
- return 0;
-}
-
static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -116,9 +101,11 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
regmap_write(mux->regmap, mux->data->set_ofs,
index << mux->data->mux_shift);
- if (mux->data->upd_shift >= 0)
+ if (mux->data->upd_shift >= 0) {
regmap_write(mux->regmap, mux->data->upd_ofs,
BIT(mux->data->upd_shift));
+ mux->reparent = true;
+ }
}
if (mux->lock)
@@ -129,25 +116,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0;
}
-const struct clk_ops mtk_mux_ops = {
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_lock,
-};
-
-const struct clk_ops mtk_mux_clr_set_upd_ops = {
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_setclr_lock,
-};
-
-const struct clk_ops mtk_mux_gate_ops = {
- .enable = mtk_clk_mux_enable,
- .disable = mtk_clk_mux_disable,
- .is_enabled = mtk_clk_mux_is_enabled,
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_lock,
-};
-
-const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
+static const struct clk_ops mtk_mux_ops = {
.enable = mtk_clk_mux_enable_setclr,
.disable = mtk_clk_mux_disable_setclr,
.is_enabled = mtk_clk_mux_is_enabled,
@@ -171,7 +140,7 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
init.flags = mux->flags | CLK_SET_RATE_PARENT;
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
- init.ops = mux->ops;
+ init.ops = &mtk_mux_ops;
clk_mux->regmap = regmap;
clk_mux->data = mux;
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 8e2f927dd2ff..f1946161ade1 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -14,6 +14,7 @@ struct mtk_clk_mux {
struct regmap *regmap;
const struct mtk_mux *data;
spinlock_t *lock;
+ bool reparent;
};
struct mtk_mux {
@@ -32,19 +33,12 @@ struct mtk_mux {
u8 gate_shift;
s8 upd_shift;
- const struct clk_ops *ops;
-
signed char num_parents;
};
-extern const struct clk_ops mtk_mux_ops;
-extern const struct clk_ops mtk_mux_clr_set_upd_ops;
-extern const struct clk_ops mtk_mux_gate_ops;
-extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
-
#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags, _ops) { \
+ _gate, _upd_ofs, _upd, _flags) { \
.id = _id, \
.name = _name, \
.mux_ofs = _mux_ofs, \
@@ -58,7 +52,6 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = _flags, \
- .ops = &_ops, \
}
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
@@ -66,8 +59,7 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_gate, _upd_ofs, _upd, _flags) \
GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags, \
- mtk_mux_gate_clr_set_upd_ops)
+ _gate, _upd_ofs, _upd, _flags) \
#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 0e44695b8772..2ad3801398dc 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -1879,7 +1879,6 @@ static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
-static MESON_GATE(axg_mipi_enable, HHI_MIPI_CNTL0, 29);
/* Always On (AO) domain gates */
@@ -1974,7 +1973,6 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_PCIE_REF] = &axg_pcie_ref.hw,
[CLKID_PCIE_CML_EN0] = &axg_pcie_cml_en0.hw,
[CLKID_PCIE_CML_EN1] = &axg_pcie_cml_en1.hw,
- [CLKID_MIPI_ENABLE] = &axg_mipi_enable.hw,
[CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
[CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
[CLKID_GEN_CLK] = &axg_gen_clk.hw,
@@ -2115,7 +2113,6 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_pcie_ref,
&axg_pcie_cml_en0,
&axg_pcie_cml_en1,
- &axg_mipi_enable,
&axg_gen_clk_sel,
&axg_gen_clk_div,
&axg_gen_clk,
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index 481b307ea3cb..23ea87964af2 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -16,7 +16,6 @@
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
-#define HHI_MIPI_CNTL0 0x00
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index b17a13e9337c..49f27fe53213 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -365,13 +365,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- unsigned int enabled, m, n, frac = 0, ret;
+ unsigned int enabled, m, n, frac = 0;
unsigned long old_rate;
+ int ret;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
- old_rate = rate;
+ old_rate = clk_hw_get_rate(hw);
ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
if (ret)
@@ -393,7 +394,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (!enabled)
return 0;
- if (meson_clk_pll_enable(hw)) {
+ ret = meson_clk_pll_enable(hw);
+ if (ret) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
/*
@@ -405,7 +407,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
meson_clk_pll_set_rate(hw, old_rate, parent_rate);
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 862f0756b50f..a844d35b553a 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -52,15 +52,6 @@ static const struct pll_params_table sys_pll_params_table[] = {
{ /* sentinel */ },
};
-static struct clk_fixed_rate meson8b_xtal = {
- .fixed_rate = 24000000,
- .hw.init = &(struct clk_init_data){
- .name = "xtal",
- .num_parents = 0,
- .ops = &clk_fixed_rate_ops,
- },
-};
-
static struct clk_regmap meson8b_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -2715,7 +2706,6 @@ static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
static struct clk_hw_onecell_data meson8_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -2922,7 +2912,6 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -3140,7 +3129,6 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -3725,36 +3713,19 @@ static struct meson8b_nb_data meson8b_cpu_nb_data = {
.nb.notifier_call = meson8b_cpu_clk_notifier_cb,
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
const char *notifier_clk_name;
struct clk *notifier_clk;
- void __iomem *clk_base;
struct regmap *map;
int i, ret;
map = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(map)) {
- pr_info("failed to get HHI regmap - Trying obsolete regs\n");
-
- /* Generic clocks, PLLs and some of the reset-bits */
- clk_base = of_iomap(np, 1);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return;
- }
-
- map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config);
- if (IS_ERR(map))
- return;
+ pr_err("failed to get HHI regmap - Trying obsolete regs\n");
+ return;
}
rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
@@ -3778,16 +3749,10 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
meson8b_clk_regmaps[i]->map = map;
/*
- * always skip CLKID_UNUSED and also skip XTAL if the .dtb provides the
- * XTAL clock as input.
+ * register all clks and start with the first used ID (which is
+ * CLKID_PLL_FIXED)
*/
- if (!IS_ERR(of_clk_get_by_name(np, "xtal")))
- i = CLKID_PLL_FIXED;
- else
- i = CLKID_XTAL;
-
- /* register all clks */
- for (; i < CLK_NR_CLKS; i++) {
+ for (i = CLKID_PLL_FIXED; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
if (!clk_hw_onecell_data->hws[i])
continue;
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index eea69d498bd2..7aa7f4a9564f 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
diff --git a/drivers/clk/mstar/Kconfig b/drivers/clk/mstar/Kconfig
new file mode 100644
index 000000000000..de37e1bce2d2
--- /dev/null
+++ b/drivers/clk/mstar/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MSTAR_MSC313_MPLL
+ bool "MStar MPLL driver"
+ depends on ARCH_MSTARV7 || COMPILE_TEST
+ default ARCH_MSTARV7
+ select REGMAP_MMIO
+ help
+ Support for the MPLL PLL and dividers block present on
+ MStar/Sigmastar SoCs.
diff --git a/drivers/clk/mstar/Makefile b/drivers/clk/mstar/Makefile
new file mode 100644
index 000000000000..f8dcd25ede1d
--- /dev/null
+++ b/drivers/clk/mstar/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for mstar specific clk
+#
+
+obj-$(CONFIG_MSTAR_MSC313_MPLL) += clk-msc313-mpll.o
diff --git a/drivers/clk/mstar/clk-msc313-mpll.c b/drivers/clk/mstar/clk-msc313-mpll.c
new file mode 100644
index 000000000000..61beb4e87525
--- /dev/null
+++ b/drivers/clk/mstar/clk-msc313-mpll.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MStar MSC313 MPLL driver
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#define REG_CONFIG1 0x8
+#define REG_CONFIG2 0xc
+
+static const struct regmap_config msc313_mpll_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 4,
+};
+
+static const struct reg_field config1_loop_div_first = REG_FIELD(REG_CONFIG1, 8, 9);
+static const struct reg_field config1_input_div_first = REG_FIELD(REG_CONFIG1, 4, 5);
+static const struct reg_field config2_output_div_first = REG_FIELD(REG_CONFIG2, 12, 13);
+static const struct reg_field config2_loop_div_second = REG_FIELD(REG_CONFIG2, 0, 7);
+
+static const unsigned int output_dividers[] = {
+ 2, 3, 4, 5, 6, 7, 10
+};
+
+#define NUMOUTPUTS (ARRAY_SIZE(output_dividers) + 1)
+
+struct msc313_mpll {
+ struct clk_hw clk_hw;
+ struct regmap_field *input_div;
+ struct regmap_field *loop_div_first;
+ struct regmap_field *loop_div_second;
+ struct regmap_field *output_div;
+ struct clk_hw_onecell_data *clk_data;
+};
+
+#define to_mpll(_hw) container_of(_hw, struct msc313_mpll, clk_hw)
+
+static unsigned long msc313_mpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msc313_mpll *mpll = to_mpll(hw);
+ unsigned int input_div, output_div, loop_first, loop_second;
+ unsigned long output_rate;
+
+ regmap_field_read(mpll->input_div, &input_div);
+ regmap_field_read(mpll->output_div, &output_div);
+ regmap_field_read(mpll->loop_div_first, &loop_first);
+ regmap_field_read(mpll->loop_div_second, &loop_second);
+
+ output_rate = parent_rate / (1 << input_div);
+ output_rate *= (1 << loop_first) * max(loop_second, 1U);
+ output_rate /= max(output_div, 1U);
+
+ return output_rate;
+}
+
+static const struct clk_ops msc313_mpll_ops = {
+ .recalc_rate = msc313_mpll_recalc_rate,
+};
+
+static const struct clk_parent_data mpll_parent = {
+ .index = 0,
+};
+
+static int msc313_mpll_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct msc313_mpll *mpll;
+ struct clk_init_data clk_init = { };
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ char *outputname;
+ struct clk_hw *divhw;
+ int ret, i;
+
+ mpll = devm_kzalloc(dev, sizeof(*mpll), GFP_KERNEL);
+ if (!mpll)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &msc313_mpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ mpll->input_div = devm_regmap_field_alloc(dev, regmap, config1_input_div_first);
+ if (IS_ERR(mpll->input_div))
+ return PTR_ERR(mpll->input_div);
+ mpll->output_div = devm_regmap_field_alloc(dev, regmap, config2_output_div_first);
+ if (IS_ERR(mpll->output_div))
+ return PTR_ERR(mpll->output_div);
+ mpll->loop_div_first = devm_regmap_field_alloc(dev, regmap, config1_loop_div_first);
+ if (IS_ERR(mpll->loop_div_first))
+ return PTR_ERR(mpll->loop_div_first);
+ mpll->loop_div_second = devm_regmap_field_alloc(dev, regmap, config2_loop_div_second);
+ if (IS_ERR(mpll->loop_div_second))
+ return PTR_ERR(mpll->loop_div_second);
+
+ mpll->clk_data = devm_kzalloc(dev, struct_size(mpll->clk_data, hws,
+ ARRAY_SIZE(output_dividers)), GFP_KERNEL);
+ if (!mpll->clk_data)
+ return -ENOMEM;
+
+ clk_init.name = dev_name(dev);
+ clk_init.ops = &msc313_mpll_ops;
+ clk_init.parent_data = &mpll_parent;
+ clk_init.num_parents = 1;
+ mpll->clk_hw.init = &clk_init;
+
+ ret = devm_clk_hw_register(dev, &mpll->clk_hw);
+ if (ret)
+ return ret;
+
+ mpll->clk_data->num = NUMOUTPUTS;
+ mpll->clk_data->hws[0] = &mpll->clk_hw;
+
+ for (i = 0; i < ARRAY_SIZE(output_dividers); i++) {
+ outputname = devm_kasprintf(dev, GFP_KERNEL, "%s_div_%u",
+ clk_init.name, output_dividers[i]);
+ if (!outputname)
+ return -ENOMEM;
+ divhw = devm_clk_hw_register_fixed_factor(dev, outputname,
+ clk_init.name, 0, 1, output_dividers[i]);
+ if (IS_ERR(divhw))
+ return PTR_ERR(divhw);
+ mpll->clk_data->hws[i + 1] = divhw;
+ }
+
+ platform_set_drvdata(pdev, mpll);
+
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ mpll->clk_data);
+}
+
+static const struct of_device_id msc313_mpll_of_match[] = {
+ { .compatible = "mstar,msc313-mpll", },
+ {}
+};
+
+static struct platform_driver msc313_mpll_driver = {
+ .driver = {
+ .name = "mstar-msc313-mpll",
+ .of_match_table = msc313_mpll_of_match,
+ },
+ .probe = msc313_mpll_probe,
+};
+builtin_platform_driver(msc313_mpll_driver);
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index b4259b60dcfd..08ba59ec3fb1 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -30,7 +30,7 @@
#define APN806_MAX_DIVIDER 32
-/**
+/*
* struct cpu_dfs_regs: CPU DFS register mapping
* @divider_reg: full integer ratio from PLL frequency to CPU clock frequency
* @force_reg: request to force new ratio regardless of relation to other clocks
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d32bb12cd8d0..45646b867cdb 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -28,6 +28,14 @@ config QCOM_A53PLL
Say Y if you want to support higher CPU frequencies on MSM8916
devices.
+config QCOM_A7PLL
+ tristate "SDX55 A7 PLL"
+ help
+ Support for the A7 PLL on SDX55 devices. It provides the CPU with
+ frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on SDX55
+ devices.
+
config QCOM_CLK_APCS_MSM8916
tristate "MSM8916 APCS Clock Controller"
depends on QCOM_APCS_IPC || COMPILE_TEST
@@ -46,6 +54,15 @@ config QCOM_CLK_APCC_MSM8996
Say Y if you want to support CPU clock scaling using CPUfreq
drivers for dynamic power management.
+config QCOM_CLK_APCS_SDX55
+ tristate "SDX55 APCS Clock Controller"
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ help
+ Support for the APCS Clock Controller on SDX55 platform. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as SDX55.
+
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
depends on MFD_QCOM_RPM
@@ -317,6 +334,24 @@ config SC_GCC_7180
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
+config SC_GCC_7280
+ tristate "SC7280 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC7280 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, PCIe etc.
+
+config SC_GCC_8180X
+ tristate "SC8180X Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC8180X devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
config SC_LPASS_CORECC_7180
tristate "SC7180 LPASS Core Clock Controller"
select SC_GCC_7180
@@ -366,6 +401,24 @@ config SDM_GCC_660
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
+config SDM_MMCC_660
+ tristate "SDM660 Multimedia Clock Controller"
+ select SDM_GCC_660
+ select QCOM_GDSC
+ help
+ Support for the multimedia clock controller on SDM660 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config SDM_GPUCC_660
+ tristate "SDM660 Graphics Clock Controller"
+ select SDM_GCC_660
+ select QCOM_GDSC
+ help
+ Support for the graphics clock controller on SDM630/636/660 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics
+
config QCS_TURING_404
tristate "QCS404 Turing Clock Controller"
help
@@ -454,6 +507,14 @@ config SM_GCC_8250
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8350
+ tristate "SM8350 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8350 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9e5e0e3cb7b4..c8291312e723 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -44,8 +44,10 @@ obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_A7PLL) += a7-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_APCC_MSM8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_CLK_APCS_SDX55) += apcs-sdx55.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
@@ -55,6 +57,8 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
+obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
+obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
@@ -62,6 +66,8 @@ obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
+obj-$(CONFIG_SDM_MMCC_660) += mmcc-sdm660.o
+obj-$(CONFIG_SDM_GPUCC_660) += gpucc-sdm660.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
@@ -70,6 +76,7 @@ obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
+obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
new file mode 100644
index 000000000000..e171d3caf2cf
--- /dev/null
+++ b/drivers/clk/qcom/a7-pll.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm A7 PLL driver
+ *
+ * Copyright (c) 2020, Linaro Limited
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-alpha-pll.h"
+
+#define LUCID_PLL_OFF_L_VAL 0x04
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll a7pll = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "a7pll",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config a7pll_config = {
+ .l = 0x39,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x2261,
+ .config_ctl_hi1_val = 0x029A699C,
+ .user_ctl_val = 0x1,
+ .user_ctl_hi_val = 0x805,
+};
+
+static const struct regmap_config a7pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int qcom_a7pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ void __iomem *base;
+ u32 l_val;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &a7pll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Configure PLL only if the l_val is zero */
+ regmap_read(regmap, a7pll.offset + LUCID_PLL_OFF_L_VAL, &l_val);
+ if (!l_val)
+ clk_lucid_pll_configure(&a7pll, regmap, &a7pll_config);
+
+ ret = devm_clk_register_regmap(dev, &a7pll.clkr);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &a7pll.clkr.hw);
+}
+
+static const struct of_device_id qcom_a7pll_match_table[] = {
+ { .compatible = "qcom,sdx55-a7pll" },
+ { }
+};
+
+static struct platform_driver qcom_a7pll_driver = {
+ .probe = qcom_a7pll_probe,
+ .driver = {
+ .name = "qcom-a7pll",
+ .of_match_table = qcom_a7pll_match_table,
+ },
+};
+module_platform_driver(qcom_a7pll_driver);
+
+MODULE_DESCRIPTION("Qualcomm A7 PLL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
new file mode 100644
index 000000000000..d0edabebf9c2
--- /dev/null
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDX55 APCS clock controller driver
+ *
+ * Copyright (c) 2020, Linaro Limited
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+static const u32 apcs_mux_clk_parent_map[] = { 0, 1, 5 };
+
+static const struct clk_parent_data pdata[] = {
+ { .fw_name = "ref" },
+ { .fw_name = "aux" },
+ { .fw_name = "pll" },
+};
+
+/*
+ * We use the notifier function for switching to a temporary safe configuration
+ * (mux and divider), while the A7 PLL is reconfigured.
+ */
+static int a7cc_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ int ret = 0;
+ struct clk_regmap_mux_div *md = container_of(nb,
+ struct clk_regmap_mux_div,
+ clk_nb);
+ if (event == PRE_RATE_CHANGE)
+ /* set the mux and divider to safe frequency (400mhz) */
+ ret = mux_div_set_src_div(md, 1, 2);
+
+ return notifier_from_errno(ret);
+}
+
+static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct device *cpu_dev;
+ struct clk_regmap_mux_div *a7cc;
+ struct regmap *regmap;
+ struct clk_init_data init = { };
+ int ret;
+
+ regmap = dev_get_regmap(parent, NULL);
+ if (!regmap) {
+ dev_err_probe(dev, -ENODEV, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ a7cc = devm_kzalloc(dev, sizeof(*a7cc), GFP_KERNEL);
+ if (!a7cc)
+ return -ENOMEM;
+
+ init.name = "a7mux";
+ init.parent_data = pdata;
+ init.num_parents = ARRAY_SIZE(pdata);
+ init.ops = &clk_regmap_mux_div_ops;
+
+ a7cc->clkr.hw.init = &init;
+ a7cc->clkr.regmap = regmap;
+ a7cc->reg_offset = 0x8;
+ a7cc->hid_width = 5;
+ a7cc->hid_shift = 0;
+ a7cc->src_width = 3;
+ a7cc->src_shift = 8;
+ a7cc->parent_map = apcs_mux_clk_parent_map;
+
+ a7cc->pclk = devm_clk_get(parent, "pll");
+ if (IS_ERR(a7cc->pclk)) {
+ ret = PTR_ERR(a7cc->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err_probe(dev, ret, "Failed to get PLL clk\n");
+ return ret;
+ }
+
+ a7cc->clk_nb.notifier_call = a7cc_notifier_cb;
+ ret = clk_notifier_register(a7cc->pclk, &a7cc->clk_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register clock notifier\n");
+ return ret;
+ }
+
+ ret = devm_clk_register_regmap(dev, &a7cc->clkr);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register regmap clock\n");
+ goto err;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &a7cc->clkr.hw);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to add clock provider\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, a7cc);
+
+ /*
+ * Attach the power domain to cpudev. Since there is no dedicated driver
+ * for CPUs and the SDX55 platform lacks hardware specific CPUFreq
+ * driver, there seems to be no better place to do this. So do it here!
+ */
+ cpu_dev = get_cpu_device(0);
+ dev_pm_domain_attach(cpu_dev, true);
+
+ return 0;
+
+err:
+ clk_notifier_unregister(a7cc->pclk, &a7cc->clk_nb);
+ return ret;
+}
+
+static int qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
+{
+ struct device *cpu_dev = get_cpu_device(0);
+ struct clk_regmap_mux_div *a7cc = platform_get_drvdata(pdev);
+
+ clk_notifier_unregister(a7cc->pclk, &a7cc->clk_nb);
+ dev_pm_domain_detach(cpu_dev, true);
+
+ return 0;
+}
+
+static struct platform_driver qcom_apcs_sdx55_clk_driver = {
+ .probe = qcom_apcs_sdx55_clk_probe,
+ .remove = qcom_apcs_sdx55_clk_remove,
+ .driver = {
+ .name = "qcom-sdx55-acps-clk",
+ },
+};
+module_platform_driver(qcom_apcs_sdx55_clk_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SDX55 APCS clock driver");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 21c357c26ec4..c6eb99169ddc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -156,6 +156,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
/* LUCID PLL specific settings and offsets */
#define LUCID_PCAL_DONE BIT(27)
+/* LUCID 5LPE PLL specific settings and offsets */
+#define LUCID_5LPE_PCAL_DONE BIT(11)
+#define LUCID_5LPE_ALPHA_PLL_ACK_LATCH BIT(13)
+#define LUCID_5LPE_PLL_LATCH_INPUT BIT(14)
+#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
+
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
@@ -777,15 +783,15 @@ static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
struct regmap *regmap)
{
- u32 mode_regval, opmode_regval;
+ u32 mode_val, opmode_val;
int ret;
- ret = regmap_read(regmap, PLL_MODE(pll), &mode_regval);
- ret |= regmap_read(regmap, PLL_OPMODE(pll), &opmode_regval);
+ ret = regmap_read(regmap, PLL_MODE(pll), &mode_val);
+ ret |= regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
if (ret)
return 0;
- return ((opmode_regval & PLL_RUN) && (mode_regval & PLL_OUTCTRL));
+ return ((opmode_val & PLL_RUN) && (mode_val & PLL_OUTCTRL));
}
static int clk_trion_pll_is_enabled(struct clk_hw *hw)
@@ -1445,12 +1451,12 @@ EXPORT_SYMBOL_GPL(clk_trion_pll_configure);
static int __alpha_pll_trion_prepare(struct clk_hw *hw, u32 pcal_done)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- u32 regval;
+ u32 val;
int ret;
/* Return early if calibration is not needed. */
- regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
- if (regval & pcal_done)
+ regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &val);
+ if (val & pcal_done)
return 0;
/* On/off to calibrate */
@@ -1471,12 +1477,12 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
return __alpha_pll_trion_prepare(hw, LUCID_PCAL_DONE);
}
-static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long prate)
+static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u32 latch_bit, u32 latch_ack)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long rrate;
- u32 regval, l, alpha_width = pll_alpha_width(pll);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
u64 a;
int ret;
@@ -1490,22 +1496,20 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
/* Latch the PLL input */
- ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
- PLL_UPDATE, PLL_UPDATE);
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), latch_bit, latch_bit);
if (ret)
return ret;
/* Wait for 2 reference cycles before checking the ACK bit. */
udelay(1);
- regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
- if (!(regval & ALPHA_PLL_ACK_LATCH)) {
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (!(val & latch_ack)) {
pr_err("Lucid PLL latch failed. Output may be unstable!\n");
return -EINVAL;
}
/* Return the latch input to 0 */
- ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
- PLL_UPDATE, 0);
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), latch_bit, 0);
if (ret)
return ret;
@@ -1520,6 +1524,12 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __alpha_pll_trion_set_rate(hw, rate, prate, PLL_UPDATE, ALPHA_PLL_ACK_LATCH);
+}
+
const struct clk_ops clk_alpha_pll_trion_ops = {
.prepare = alpha_pll_trion_prepare,
.enable = clk_trion_pll_enable,
@@ -1600,3 +1610,170 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+
+static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ /* Check if PLL is already enabled, return if enabled */
+ ret = trion_pll_is_enabled(pll, pll->clkr.regmap);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ regmap_write(pll->clkr.regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ return regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+static void alpha_pll_lucid_5lpe_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(pll->clkr.regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+/*
+ * The Lucid 5LPE PLL requires a power-on self-calibration which happens
+ * when the PLL comes out of reset. Calibrate in case it is not completed.
+ */
+static int alpha_pll_lucid_5lpe_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *p;
+ u32 val = 0;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (val & LUCID_5LPE_PCAL_DONE)
+ return 0;
+
+ p = clk_hw_get_parent(hw);
+ if (!p)
+ return -EINVAL;
+
+ ret = alpha_pll_lucid_5lpe_enable(hw);
+ if (ret)
+ return ret;
+
+ alpha_pll_lucid_5lpe_disable(hw);
+
+ return 0;
+}
+
+static int alpha_pll_lucid_5lpe_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __alpha_pll_trion_set_rate(hw, rate, prate,
+ LUCID_5LPE_PLL_LATCH_INPUT,
+ LUCID_5LPE_ALPHA_PLL_ACK_LATCH);
+}
+
+static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+ u32 mask;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN)
+ return 0;
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ mask = GENMASK(pll->width + pll->post_div_shift - 1, pll->post_div_shift);
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ mask, val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
+ .prepare = alpha_pll_lucid_5lpe_prepare,
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_lucid_5lpe_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_5lpe_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 0ea30d2f3da1..6943e933be0f 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -144,6 +144,10 @@ extern const struct clk_ops clk_alpha_pll_lucid_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
extern const struct clk_ops clk_alpha_pll_agera_ops;
+extern const struct clk_ops clk_alpha_pll_lucid_5lpe_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 86d2b8b90173..99efcc7f8d88 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -153,6 +153,15 @@ struct clk_rcg2 {
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+struct clk_rcg2_gfx3d {
+ u8 div;
+ struct clk_rcg2 rcg;
+ struct clk_hw **hws;
+};
+
+#define to_clk_rcg2_gfx3d(_hw) \
+ container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
+
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_edp_pixel_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 59a5a0f261f3..42f13a2d1cc1 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -728,40 +728,51 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_rate_request parent_req = { };
- struct clk_hw *p2, *p8, *p9, *xo;
- unsigned long p9_rate;
+ struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
+ struct clk_hw *xo, *p0, *p1, *p2;
+ unsigned long request, p0_rate;
int ret;
+ p0 = cgfx->hws[0];
+ p1 = cgfx->hws[1];
+ p2 = cgfx->hws[2];
+ /*
+ * This function does ping-pong the RCG between PLLs: if we don't
+ * have at least one fixed PLL and two variable ones,
+ * then it's not going to work correctly.
+ */
+ if (WARN_ON(!p0 || !p1 || !p2))
+ return -EINVAL;
+
xo = clk_hw_get_parent_by_index(hw, 0);
if (req->rate == clk_hw_get_rate(xo)) {
req->best_parent_hw = xo;
return 0;
}
- p9 = clk_hw_get_parent_by_index(hw, 2);
- p2 = clk_hw_get_parent_by_index(hw, 3);
- p8 = clk_hw_get_parent_by_index(hw, 4);
+ request = req->rate;
+ if (cgfx->div > 1)
+ parent_req.rate = request = request * cgfx->div;
- /* PLL9 is a fixed rate PLL */
- p9_rate = clk_hw_get_rate(p9);
+ /* This has to be a fixed rate PLL */
+ p0_rate = clk_hw_get_rate(p0);
- parent_req.rate = req->rate = min(req->rate, p9_rate);
- if (req->rate == p9_rate) {
- req->rate = req->best_parent_rate = p9_rate;
- req->best_parent_hw = p9;
+ if (request == p0_rate) {
+ req->rate = req->best_parent_rate = p0_rate;
+ req->best_parent_hw = p0;
return 0;
}
- if (req->best_parent_hw == p9) {
+ if (req->best_parent_hw == p0) {
/* Are we going back to a previously used rate? */
- if (clk_hw_get_rate(p8) == req->rate)
- req->best_parent_hw = p8;
- else
+ if (clk_hw_get_rate(p2) == request)
req->best_parent_hw = p2;
- } else if (req->best_parent_hw == p8) {
- req->best_parent_hw = p2;
+ else
+ req->best_parent_hw = p1;
+ } else if (req->best_parent_hw == p2) {
+ req->best_parent_hw = p1;
} else {
- req->best_parent_hw = p8;
+ req->best_parent_hw = p2;
}
ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
@@ -769,6 +780,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
return ret;
req->rate = req->best_parent_rate = parent_req.rate;
+ if (cgfx->div > 1)
+ req->rate /= cgfx->div;
return 0;
}
@@ -776,12 +789,16 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index)
{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
+ struct clk_rcg2 *rcg = &cgfx->rcg;
u32 cfg;
int ret;
- /* Just mux it, we don't use the division or m/n hardware */
cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ /* On some targets, the GFX3D RCG may need to divide PLL frequency */
+ if (cgfx->div > 1)
+ cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
+
ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index ce80db27ccf2..92ac4e0d7dbe 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -87,6 +87,7 @@ EXPORT_SYMBOL_GPL(clk_disable_regmap);
/**
* devm_clk_register_regmap - register a clk_regmap clock
*
+ * @dev: reference to the caller's device
* @rclk: clk to operate on
*
* Clocks that use regmap for their register I/O should register their
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index f71d228fd6bd..a18811c38018 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -73,62 +73,6 @@
}, \
}
-#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
- static struct clk_rpm _platform##_##_active; \
- static struct clk_rpm _platform##_##_name = { \
- .rpm_clk_id = (r_id), \
- .active_only = true, \
- .peer = &_platform##_##_active, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_name, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
- }, \
- }; \
- static struct clk_rpm _platform##_##_active = { \
- .rpm_clk_id = (r_id), \
- .peer = &_platform##_##_name, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_active, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
- }, \
- }
-
-#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
- static struct clk_rpm _platform##_##_active; \
- static struct clk_rpm _platform##_##_name = { \
- .rpm_clk_id = (r_id), \
- .peer = &_platform##_##_active, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_name, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
- }, \
- }; \
- static struct clk_rpm _platform##_##_active = { \
- .rpm_clk_id = (r_id), \
- .active_only = true, \
- .peer = &_platform##_##_name, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_active, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
- }, \
- }
-
#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
struct rpm_cc;
@@ -450,13 +394,6 @@ static const struct clk_ops clk_rpm_ops = {
.recalc_rate = clk_rpm_recalc_rate,
};
-static const struct clk_ops clk_rpm_branch_ops = {
- .prepare = clk_rpm_prepare,
- .unprepare = clk_rpm_unprepare,
- .round_rate = clk_rpm_round_rate,
- .recalc_rate = clk_rpm_recalc_rate,
-};
-
/* MSM8660/APQ8060 */
DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 6a2a13c5058e..91dc390a583b 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -348,6 +348,10 @@ DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk3, rf_clk3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
DEFINE_CLK_RPMH_BCM(sdm845, ce, "CE0");
@@ -431,6 +435,26 @@ static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
.num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
};
+static struct clk_hw *sc8180x_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sc8180x_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sc8180x_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sc8180x_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sc8180x_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sc8180x_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sc8180x_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
+ .clks = sc8180x_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
+};
+
DEFINE_CLK_RPMH_VRM(sm8250, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2);
static struct clk_hw *sm8250_rpmh_clocks[] = {
@@ -486,6 +510,27 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
};
+static struct clk_hw *sc7280_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_PKA_CLK] = &sm8350_pka.hw,
+ [RPMH_HWKM_CLK] = &sm8350_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc7280 = {
+ .clks = sc7280_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc7280_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -570,11 +615,13 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index ef5137fd50f3..8abad4032de7 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -1276,16 +1276,15 @@ static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_fepll *pll = to_clk_fepll(hw);
const struct freq_tbl *f;
u32 mask;
- int ret;
f = qcom_find_freq(pll->freq_tbl, rate);
if (!f)
return -EINVAL;
mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift;
- ret = regmap_update_bits(pll->cdiv.clkr.regmap,
- pll->cdiv.reg, mask,
- f->pre_div << pll->cdiv.shift);
+ regmap_update_bits(pll->cdiv.clkr.regmap,
+ pll->cdiv.reg, mask,
+ f->pre_div << pll->cdiv.shift);
/*
* There is no status bit which can be checked for successful CPU
* divider update operation so using delay for the same.
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 9d7016bcd680..050c91af888e 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
.name = "gpll0",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll0_out_even = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_main = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_test = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
.name = "gpll1",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll1_out_even = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_main = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_test = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll2 = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
.name = "gpll2",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll2_out_even = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_main = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_test = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
.name = "gpll3",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll3_out_even = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_main = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_test = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll4 = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
.name = "gpll4",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll4_out_even = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_main = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_test = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
@@ -1341,6 +1341,22 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
+static struct clk_branch gcc_mmss_gpll0_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_main",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_mss_gpll0_div_clk_src = {
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
@@ -2065,6 +2081,12 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ /*
+ * The GPU IOMMU depends on this clock and hypervisor
+ * will crash the SoC if this clock goes down, due to
+ * secure contexts protection.
+ */
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -2144,6 +2166,25 @@ static struct clk_branch gcc_hmss_trig_clk = {
},
};
+static struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+ F( 300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F( 600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll0_clk_src = {
+ .cmd_rcgr = 0x4805c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_gpll0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "hmss_gpll0_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_names_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.halt_reg = 0x9004,
.halt_check = BRANCH_HALT,
@@ -2944,6 +2985,8 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
[GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
+ [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index d82d725ac231..88e896abb663 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
@@ -934,19 +919,6 @@ static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
},
};
-static struct clk_branch gcc_camera_xo_clk = {
- .halt_reg = 0xb02c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb02c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ce1_ahb_clk = {
.halt_reg = 0x4100c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1111,19 +1083,6 @@ static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
},
};
-static struct clk_branch gcc_disp_xo_clk = {
- .halt_reg = 0xb030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb030,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_disp_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@@ -2174,19 +2133,6 @@ static struct clk_branch gcc_video_throttle_axi_clk = {
},
};
-static struct clk_branch gcc_video_xo_clk = {
- .halt_reg = 0xb028,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb028,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_video_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_mss_cfg_ahb_clk = {
.halt_reg = 0x8a000,
.halt_check = BRANCH_HALT,
@@ -2317,10 +2263,8 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
- [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
[GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
[GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
[GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
@@ -2333,7 +2277,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
- [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@@ -2429,7 +2372,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
[GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
[GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
- [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
[GPLL6] = &gpll6.clkr,
@@ -2519,12 +2461,16 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
new file mode 100644
index 000000000000..22736c16ed16
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -0,0 +1,3603 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL0_OUT_ODD,
+ P_GCC_GPLL10_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_GCC_MSS_GPLL0_MAIN_DIV_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_gcc_gpll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll10 = {
+ .offset = 0x1e000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_main_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_main_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "pcie_1_pipe_clk", .name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL10_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll10.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_MSS_GPLL0_MAIN_DIV_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_mss_gpll0_main_div_clk_src.clkr.hw },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x8d054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0xf060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x9e060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x4800c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL10_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL10_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x7500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x7502c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x9e020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb30_sec_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9e038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9e064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sec_ctrl_clk_src[] = {
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
+ .cmd_rcgr = 0x3d02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sec_ctrl_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sec_ctrl_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x48024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9e050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_edp_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_edp_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x6b080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x8d084,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x90010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_center_sf_axi_clk = {
+ .halt_reg = 0x8d088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_center_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x9e080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x2601c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2601c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x9e07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_clk = {
+ .halt_reg = 0x8d080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_core_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x90018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1872c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x1885c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x75004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x75024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x48178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_throttle_pcie_ahb_clk = {
+ .halt_reg = 0x9001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_throttle_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_titan_nrt_throttle_core_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_titan_nrt_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_titan_rt_throttle_core_clk = {
+ .halt_reg = 0x26018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_titan_rt_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x9e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x9e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x9e018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xf05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_lpass_clk = {
+ .halt_reg = 0x47020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_lpass_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_offline_axi_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_offline_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_mss_q6ss_boot_clk_src = {
+ .reg = 0x8a2a4,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6ss_boot_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x9e054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x9e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x9e05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9e05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_mvp_throttle_core_clk = {
+ .halt_reg = 0x28010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_mvp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_ahb_clk = {
+ .halt_reg = 0x9d154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_ahb_bdg_mst_clk = {
+ .halt_reg = 0x9d158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_ahb_bdg_mst_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_rscp_clk = {
+ .halt_reg = 0x9d16c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d16c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_rscp_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "gcc_pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0x9e004,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d05c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sc7280_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL0_OUT_ODD] = &gcc_gpll0_out_odd.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL10] = &gcc_gpll10.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_THROTTLE_CORE_CLK] = &gcc_pcie_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
+ [GCC_TITAN_NRT_THROTTLE_CORE_CLK] =
+ &gcc_titan_nrt_throttle_core_clk.clkr,
+ [GCC_TITAN_RT_THROTTLE_CORE_CLK] = &gcc_titan_rt_throttle_core_clk.clkr,
+ [GCC_UFS_1_CLKREF_EN] = &gcc_ufs_1_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] =
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] =
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] =
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_MVP_THROTTLE_CORE_CLK] =
+ &gcc_video_mvp_throttle_core_clk.clkr,
+ [GCC_CFG_NOC_LPASS_CLK] = &gcc_cfg_noc_lpass_clk.clkr,
+ [GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC] = &gcc_mss_gpll0_main_div_clk_src.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_OFFLINE_AXI_CLK] = &gcc_mss_offline_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_Q6SS_BOOT_CLK_SRC] = &gcc_mss_q6ss_boot_clk_src.clkr,
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK] =
+ &gcc_aggre_noc_pcie_center_sf_axi_clk.clkr,
+ [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr,
+ [GCC_EDP_CLKREF_EN] = &gcc_edp_clkref_en.clkr,
+ [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
+ [GCC_WPSS_AHB_CLK] = &gcc_wpss_ahb_clk.clkr,
+ [GCC_WPSS_AHB_BDG_MST_CLK] = &gcc_wpss_ahb_bdg_mst_clk.clkr,
+ [GCC_WPSS_RSCP_CLK] = &gcc_wpss_rscp_clk.clkr,
+};
+
+static struct gdsc *gcc_sc7280_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sc7280_resets[] = {
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC1_BCR] = { 0x75000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x9e000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f128,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc7280_desc = {
+ .config = &gcc_sc7280_regmap_config,
+ .clks = gcc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc7280_clocks),
+ .resets = gcc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc7280_resets),
+ .gdscs = gcc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc7280_gdscs),
+};
+
+static const struct of_device_id gcc_sc7280_match_table[] = {
+ { .compatible = "qcom,gcc-sc7280" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc7280_match_table);
+
+static int gcc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CAMERA_AHB_CLK/XO_CLK, GCC_DISP_AHB_CLK/XO_CLK
+ * GCC_VIDEO_AHB_CLK/XO_CLK, GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2701C, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sc7280_desc, regmap);
+}
+
+static struct platform_driver gcc_sc7280_driver = {
+ .probe = gcc_sc7280_probe,
+ .driver = {
+ .name = "gcc-sc7280",
+ .of_match_table = gcc_sc7280_match_table,
+ },
+};
+
+static int __init gcc_sc7280_init(void)
+{
+ return platform_driver_register(&gcc_sc7280_driver);
+}
+subsys_initcall(gcc_sc7280_init);
+
+static void __exit gcc_sc7280_exit(void)
+{
+ platform_driver_unregister(&gcc_sc7280_driver);
+}
+module_exit(gcc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
new file mode 100644
index 000000000000..90525ae1bb3a
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc8180x.c
@@ -0,0 +1,4629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc8180x.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_AUD_REF_CLK,
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL2_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL5_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_trion_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_1[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo", },
+ { .fw_name = "sleep_clk", },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_MAIN, 2 },
+ { P_GPLL5_OUT_MAIN, 3 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "gpll2" },
+ { .name = "gpll5" },
+ { .hw = &gpll1.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo", },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_6[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "gppl9" },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_8[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "aud_ref_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0xbe004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0xbf004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_npu_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(403000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ F(533000000, P_GPLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_npu_axi_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_npu_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0x9d02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3_aux_clk_src = {
+ .cmd_rcgr = 0xa302c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_1_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_1_core_clk_src = {
+ .cmd_rcgr = 0x4a00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_1_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_1_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17998,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_2_axi_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_axi_clk_src = {
+ .cmd_rcgr = 0xa2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_ice_core_clk_src = {
+ .cmd_rcgr = 0xa2060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_2_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa2094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_unipro_core_clk_src = {
+ .cmd_rcgr = 0xa2078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x75060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x1001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x750c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x750c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x750c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750c0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_aggre_ufs_card_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_aggre_ufs_phy_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0xb034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x10078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_ptp_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_rgmii_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0xbe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0xbf000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbf000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_out_even.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_at_clk = {
+ .halt_reg = 0x4d010,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_npu_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_out_even.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_trig_clk = {
+ .halt_reg = 0x4d00c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_refgen_clk = {
+ .halt_reg = 0x6f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_phy_refgen_clk = {
+ .halt_reg = 0x6f034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie2_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_phy_refgen_clk = {
+ .halt_reg = 0x6f038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie3_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_1_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x9d020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_2_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x9d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_aux_clk = {
+ .halt_reg = 0xa3020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_3_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_cfg_ahb_clk = {
+ .halt_reg = 0xa301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa301c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_mstr_axi_clk = {
+ .halt_reg = 0xa3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_pipe_clk = {
+ .halt_reg = 0xa3024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_axi_clk = {
+ .halt_reg = 0xa3014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa3014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_q2a_axi_clk = {
+ .halt_reg = 0xa3010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pdm2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_1_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_1_core_clk = {
+ .halt_reg = 0x4a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qspi_1_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qspi_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17994,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc4_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_tsif_ref_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_ahb_clk = {
+ .halt_reg = 0xa2014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_axi_clk = {
+ .halt_reg = 0xa2010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_ice_core_clk = {
+ .halt_reg = 0xa205c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa205c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_phy_aux_clk = {
+ .halt_reg = 0xa2090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_rx_symbol_0_clk = {
+ .halt_reg = 0xa201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_rx_symbol_1_clk = {
+ .halt_reg = 0xa20ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa20ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_tx_symbol_0_clk = {
+ .halt_reg = 0xa2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_unipro_core_clk = {
+ .halt_reg = 0xa2058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_ice_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x75090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x75090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75090,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_phy_aux_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750ac,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_unipro_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_ice_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_phy_aux_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770ac,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x770ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_unipro_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0xa605c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axic_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axic_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_2_gdsc = {
+ .gdscr = 0x9d004,
+ .pd = {
+ .name = "pcie_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_2_gdsc = {
+ .gdscr = 0xa2004,
+ .pd = {
+ .name = "ufs_card_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_3_gdsc = {
+ .gdscr = 0xa3004,
+ .pd = {
+ .name = "pcie_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_mp_gdsc = {
+ .gdscr = 0xa6004,
+ .pd = {
+ .name = "usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gcc_sc8180x_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_AXI_CLK_SRC] = &gcc_npu_axi_clk_src.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
+ [GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
+ [GCC_PCIE3_PHY_REFGEN_CLK] = &gcc_pcie3_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_CLKREF_CLK] = &gcc_pcie_2_clkref_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK] = &gcc_pcie_3_aux_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK_SRC] = &gcc_pcie_3_aux_clk_src.clkr,
+ [GCC_PCIE_3_CFG_AHB_CLK] = &gcc_pcie_3_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3_CLKREF_CLK] = &gcc_pcie_3_clkref_clk.clkr,
+ [GCC_PCIE_3_MSTR_AXI_CLK] = &gcc_pcie_3_mstr_axi_clk.clkr,
+ [GCC_PCIE_3_PIPE_CLK] = &gcc_pcie_3_pipe_clk.clkr,
+ [GCC_PCIE_3_SLV_AXI_CLK] = &gcc_pcie_3_slv_axi_clk.clkr,
+ [GCC_PCIE_3_SLV_Q2A_AXI_CLK] = &gcc_pcie_3_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_1_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_1_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_1_CORE_CLK] = &gcc_qspi_1_core_clk.clkr,
+ [GCC_QSPI_1_CORE_CLK_SRC] = &gcc_qspi_1_core_clk_src.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_2_AHB_CLK] = &gcc_ufs_card_2_ahb_clk.clkr,
+ [GCC_UFS_CARD_2_AXI_CLK] = &gcc_ufs_card_2_axi_clk.clkr,
+ [GCC_UFS_CARD_2_AXI_CLK_SRC] = &gcc_ufs_card_2_axi_clk_src.clkr,
+ [GCC_UFS_CARD_2_ICE_CORE_CLK] = &gcc_ufs_card_2_ice_core_clk.clkr,
+ [GCC_UFS_CARD_2_ICE_CORE_CLK_SRC] = &gcc_ufs_card_2_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_2_PHY_AUX_CLK] = &gcc_ufs_card_2_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_2_PHY_AUX_CLK_SRC] = &gcc_ufs_card_2_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_2_RX_SYMBOL_0_CLK] = &gcc_ufs_card_2_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_2_RX_SYMBOL_1_CLK] = &gcc_ufs_card_2_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_2_TX_SYMBOL_0_CLK] = &gcc_ufs_card_2_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_2_UNIPRO_CORE_CLK] = &gcc_ufs_card_2_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_2_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_VIDEO_AXIC_CLK] = &gcc_video_axic_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL7] = &gpll7.clkr,
+};
+
+static const struct qcom_reset_map gcc_sc8180x_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_NPU_BCR] = { 0x4d000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_2_BCR] = { 0x9d000 },
+ [GCC_PCIE_2_PHY_BCR] = { 0xa701c },
+ [GCC_PCIE_3_BCR] = { 0xa3000 },
+ [GCC_PCIE_3_PHY_BCR] = { 0xa801c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QSPI_1_BCR] = { 0x4a000 },
+ [GCC_QSPI_BCR] = { 0x24008 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_5_BCR] = { 0x12010 },
+ [GCC_QUSB2PHY_MP0_BCR] = { 0x12008 },
+ [GCC_QUSB2PHY_MP1_BCR] = { 0x1200c },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_PRIM_SP1_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_SP0_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_PRIM_SP1_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_2_BCR] = { 0xa2000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_MP_BCR] = { 0xa6000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXIC_CLK_BCR] = { 0xb02c, 2 },
+ [GCC_VIDEO_AXI0_CLK_BCR] = { 0xb024, 2 },
+ [GCC_VIDEO_AXI1_CLK_BCR] = { 0xb028, 2 },
+};
+
+static struct gdsc *gcc_sc8180x_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_2_GDSC] = &pcie_2_gdsc,
+ [PCIE_3_GDSC] = &pcie_3_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_CARD_2_GDSC] = &ufs_card_2_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_MP_GDSC] = &usb30_mp_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+};
+
+static const struct regmap_config gcc_sc8180x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc0004,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc8180x_desc = {
+ .config = &gcc_sc8180x_regmap_config,
+ .clks = gcc_sc8180x_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc8180x_clocks),
+ .resets = gcc_sc8180x_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc8180x_resets),
+ .gdscs = gcc_sc8180x_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc8180x_gdscs),
+};
+
+static const struct of_device_id gcc_sc8180x_match_table[] = {
+ { .compatible = "qcom,gcc-sc8180x" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc8180x_match_table);
+
+static int gcc_sc8180x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc8180x_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Enable the following always-on clocks:
+ * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
+ * GCC_VIDEO_XO_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_XO_CLK,
+ * GCC_CPUSS_GNOC_CLK, GCC_CPUSS_DVM_BUS_CLK, GCC_NPU_CFG_AHB_CLK and
+ * GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0xb004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb040, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb044, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb048, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x48190, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x4d004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ /* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ return qcom_cc_really_probe(pdev, &gcc_sc8180x_desc, regmap);
+}
+
+static struct platform_driver gcc_sc8180x_driver = {
+ .probe = gcc_sc8180x_probe,
+ .driver = {
+ .name = "gcc-sc8180x",
+ .of_match_table = gcc_sc8180x_match_table,
+ },
+};
+
+static int __init gcc_sc8180x_init(void)
+{
+ return platform_driver_register(&gcc_sc8180x_driver);
+}
+core_initcall(gcc_sc8180x_init);
+
+static void __exit gcc_sc8180x_exit(void)
+{
+ platform_driver_unregister(&gcc_sc8180x_driver);
+}
+module_exit(gcc_sc8180x_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC8180x driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 31258795e7b8..6394257ca8c0 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -1571,6 +1571,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1684,6 +1685,12 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_noc_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ /*
+ * Any access to mmss depends on this clock.
+ * Gating this clock has been shown to crash the system
+ * when mmssnoc_axi_rpm_clk is inited in rpmcc.
+ */
+ .flags = CLK_IS_CRITICAL,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 6cb6617b8d88..ab594a0f0c40 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
new file mode 100644
index 000000000000..1c23b9f84900
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -0,0 +1,3890 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_CARD_RX_SYMBOL_0_CLK,
+ P_UFS_CARD_RX_SYMBOL_1_CLK,
+ P_UFS_CARD_TX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_5lpe_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "pcie_0_pipe_clk", },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_UFS_CARD_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "ufs_card_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_UFS_CARD_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "ufs_card_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_CARD_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "ufs_card_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .fw_name = "usb3_uni_phy_sec_gcc_usb30_pipe_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x8d054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_0_clk_src = {
+ .reg = 0x75058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_1_clk_src = {
+ .reg = 0x750c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_tx_symbol_0_clk_src = {
+ .reg = 0x75048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0xf060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = 3,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x10060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = 3,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e4d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7506c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x750a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x10050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x6b080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x8d084,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x9000c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9000c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x10080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x10080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x8d080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x23278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x23270,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e26c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e39c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e5fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_throttle_pcie_ahb_clk = {
+ .halt_reg = 0x9044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_throttle_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk__force_mem_core_on = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk__force_mem_core_on",
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk__force_mem_core_on = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk__force_mem_core_on",
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xf05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_en = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x28010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x28018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
+ .gdscr = 0x7d06c,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm8350_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_EN] = &gcc_pcie_1_clkref_en.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
+ [GCC_UFS_1_CLKREF_EN] = &gcc_ufs_1_clkref_en.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_card_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK__FORCE_MEM_CORE_ON] =
+ &gcc_usb30_prim_master_clk__force_mem_core_on.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK__FORCE_MEM_CORE_ON] =
+ &gcc_usb30_sec_master_clk__force_mem_core_on.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8350_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8350_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 },
+ [GCC_VIDEO_BCR] = { 0x28000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9c100,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8350_desc = {
+ .config = &gcc_sm8350_regmap_config,
+ .clks = gcc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8350_clocks),
+ .resets = gcc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8350_resets),
+ .gdscs = gcc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8350_gdscs),
+};
+
+static const struct of_device_id gcc_sm8350_match_table[] = {
+ { .compatible = "qcom,gcc-sm8350" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8350_match_table);
+
+static int gcc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to map gcc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ /*
+ * Keep the critical clock always-On
+ * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK, GCC_DISP_XO_CLK,
+ * GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x26018, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2701c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28020, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8350_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8350_driver = {
+ .probe = gcc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-gcc",
+ .of_match_table = gcc_sm8350_match_table,
+ },
+};
+
+static int __init gcc_sm8350_init(void)
+{
+ return platform_driver_register(&gcc_sm8350_driver);
+}
+subsys_initcall(gcc_sm8350_init);
+
+static void __exit gcc_sm8350_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8350_driver);
+}
+module_exit(gcc_sm8350_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index af26e0695b86..51ed640e527b 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -183,7 +183,10 @@ static inline int gdsc_assert_reset(struct gdsc *sc)
static inline void gdsc_force_mem_on(struct gdsc *sc)
{
int i;
- u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+ u32 mask = RETAIN_MEM;
+
+ if (!(sc->flags & NO_RET_PERIPH))
+ mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
@@ -192,7 +195,10 @@ static inline void gdsc_force_mem_on(struct gdsc *sc)
static inline void gdsc_clear_mem_on(struct gdsc *sc)
{
int i;
- u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+ u32 mask = RETAIN_MEM;
+
+ if (!(sc->flags & NO_RET_PERIPH))
+ mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index bd537438c793..5bb396b344d1 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -42,7 +42,7 @@ struct gdsc {
#define PWRSTS_ON BIT(2)
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
- const u8 flags;
+ const u16 flags;
#define VOTABLE BIT(0)
#define CLAMP_IO BIT(1)
#define HW_CTRL BIT(2)
@@ -51,6 +51,7 @@ struct gdsc {
#define POLL_CFG_GDSCR BIT(5)
#define ALWAYS_ON BIT(6)
#define RETAIN_FF_ENABLE BIT(7)
+#define NO_RET_PERIPH BIT(8)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index 9b3923af02a1..fedfffaf0a8d 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -50,6 +50,11 @@ static struct clk_branch gpucc_cxo_clk = {
},
};
+static struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+ { 125000000, 1000000000, 1 },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -61,11 +66,13 @@ static const struct clk_div_table post_div_table_fabia_even[] = {
static struct clk_alpha_pll gpupll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "gpupll0",
.parent_hws = (const struct clk_hw *[]){ &gpucc_cxo_clk.clkr.hw },
.num_parents = 1,
- .ops = &clk_alpha_pll_fixed_fabia_ops,
+ .ops = &clk_alpha_pll_fabia_ops,
},
};
@@ -80,6 +87,7 @@ static struct clk_alpha_pll_postdiv gpupll0_out_even = {
.name = "gpupll0_out_even",
.parent_hws = (const struct clk_hw *[]){ &gpupll0.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
@@ -253,12 +261,16 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x1094,
.clamp_io_ctrl = 0x130,
+ .resets = (unsigned int []){ GPU_GX_BCR },
+ .reset_count = 1,
+ .cxcs = (unsigned int []){ 0x1098 },
+ .cxc_count = 1,
.pd = {
.name = "gpu_gx",
},
.parent = &gpu_cx_gdsc.pd,
- .pwrsts = PWRSTS_OFF_ON,
- .flags = CLAMP_IO | AON_RESET,
+ .pwrsts = PWRSTS_OFF_ON | PWRSTS_RET,
+ .flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
};
static struct clk_regmap *gpucc_msm8998_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
new file mode 100644
index 000000000000..1ebcceb3a50d
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm660.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_GPU_XO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_PLL0_PLL_OUT_MAIN,
+ P_GPU_PLL1_PLL_OUT_MAIN,
+};
+
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct pll_vco gpu_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static struct clk_alpha_pll gpu_pll0_pll_out_main = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_pll0_pll_out_main",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpucc_cxo_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll gpu_pll1_pll_out_main = {
+ .offset = 0x40,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_pll1_pll_out_main",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpucc_cxo_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static const struct parent_map gpucc_parent_map_1[] = {
+ { P_GPU_XO, 0 },
+ { P_GPU_PLL0_PLL_OUT_MAIN, 1 },
+ { P_GPU_PLL1_PLL_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_1[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .hw = &gpu_pll0_pll_out_main.clkr.hw },
+ { .hw = &gpu_pll1_pll_out_main.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+};
+
+static struct clk_rcg2_gfx3d gfx3d_clk_src = {
+ .div = 2,
+ .rcg = {
+ .cmd_rcgr = 0x1070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gpucc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_gfx3d_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ },
+ },
+ .hws = (struct clk_hw*[]){
+ &gpucc_cxo_clk.clkr.hw,
+ &gpu_pll0_pll_out_main.clkr.hw,
+ &gpu_pll1_pll_out_main.clkr.hw,
+ }
+};
+
+static struct clk_branch gpucc_gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.rcg.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct parent_map gpucc_parent_map_0[] = {
+ { P_GPU_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk", .name = "gcc_gpu_gpll0_div_clk" },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_GPU_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_data = gpucc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_GPU_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_data = gpucc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpucc_rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbbmtimer_clk",
+ .parent_names = (const char *[]){
+ "rbbmtimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x1004,
+ .gds_hw_ctrl = 0x1008,
+ .pd = {
+ .name = "gpu_cx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x1094,
+ .clamp_io_ctrl = 0x130,
+ .resets = (unsigned int []){ GPU_GX_BCR },
+ .reset_count = 1,
+ .cxcs = (unsigned int []){ 0x1098 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .parent = &gpu_cx_gdsc.pd,
+ .pwrsts = PWRSTS_OFF | PWRSTS_ON | PWRSTS_RET,
+ .flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
+};
+
+static struct gdsc *gpucc_sdm660_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpucc_sdm660_resets[] = {
+ [GPU_CX_BCR] = { 0x1000 },
+ [RBCPR_BCR] = { 0x1050 },
+ [GPU_GX_BCR] = { 0x1090 },
+ [SPDM_BCR] = { 0x10E0 },
+};
+
+static struct clk_regmap *gpucc_sdm660_clocks[] = {
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+ [GPU_PLL0_PLL] = &gpu_pll0_pll_out_main.clkr,
+ [GPU_PLL1_PLL] = &gpu_pll1_pll_out_main.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.rcg.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [GPUCC_RBCPR_CLK] = &gpucc_rbcpr_clk.clkr,
+ [GPUCC_GFX3D_CLK] = &gpucc_gfx3d_clk.clkr,
+ [GPUCC_RBBMTIMER_CLK] = &gpucc_rbbmtimer_clk.clkr,
+};
+
+static const struct regmap_config gpucc_660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9034,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_sdm660_desc = {
+ .config = &gpucc_660_regmap_config,
+ .clks = gpucc_sdm660_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_sdm660_clocks),
+ .resets = gpucc_sdm660_resets,
+ .num_resets = ARRAY_SIZE(gpucc_sdm660_resets),
+ .gdscs = gpucc_sdm660_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_sdm660_gdscs),
+};
+
+static const struct of_device_id gpucc_sdm660_match_table[] = {
+ { .compatible = "qcom,gpucc-sdm660" },
+ { .compatible = "qcom,gpucc-sdm630" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_sdm660_match_table);
+
+static int gpucc_sdm660_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config gpu_pll_config = {
+ .config_ctl_val = 0x4001055b,
+ .alpha = 0xaaaaab00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+ };
+
+ regmap = qcom_cc_map(pdev, &gpucc_sdm660_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* 800MHz configuration for GPU PLL0 */
+ gpu_pll_config.l = 0x29;
+ gpu_pll_config.alpha_hi = 0xaa;
+ clk_alpha_pll_configure(&gpu_pll0_pll_out_main, regmap, &gpu_pll_config);
+
+ /* 740MHz configuration for GPU PLL1 */
+ gpu_pll_config.l = 0x26;
+ gpu_pll_config.alpha_hi = 0x8a;
+ clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap, &gpu_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gpucc_sdm660_desc, regmap);
+}
+
+static struct platform_driver gpucc_sdm660_driver = {
+ .probe = gpucc_sdm660_probe,
+ .driver = {
+ .name = "gpucc-sdm660",
+ .of_match_table = gpucc_sdm660_match_table,
+ },
+};
+module_platform_driver(gpucc_sdm660_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDM630/SDM660 GPUCC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
index d366c7c2abc7..f5e31e692b9b 100644
--- a/drivers/clk/qcom/lpass-gfm-sm8250.c
+++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
@@ -33,14 +33,13 @@ struct clk_gfm {
void __iomem *gfm_mux;
};
-#define GFM_MASK BIT(1)
#define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
static u8 clk_gfm_get_parent(struct clk_hw *hw)
{
struct clk_gfm *clk = to_clk_gfm(hw);
- return readl(clk->gfm_mux) & GFM_MASK;
+ return readl(clk->gfm_mux) & clk->mux_mask;
}
static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
@@ -51,9 +50,10 @@ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
val = readl(clk->gfm_mux);
if (index)
- val |= GFM_MASK;
+ val |= clk->mux_mask;
else
- val &= ~GFM_MASK;
+ val &= ~clk->mux_mask;
+
writel(val, clk->gfm_mux);
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 015426262d08..a1552b6771bc 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -74,22 +74,6 @@ static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"dsi1pll",
};
-static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_GPLL0, 5 },
- { P_MMPLL2, 3 }
-};
-
-static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "mmpll1_vote",
- "mmss_gpll0_vote",
- "mmpll2",
-};
-
static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
{ P_XO, 0 },
{ P_MMPLL0, 1 },
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 3b3aac07fb2d..24843e4f2599 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -528,16 +528,23 @@ static struct clk_rcg2 maxi_clk_src = {
},
};
-static struct clk_rcg2 gfx3d_clk_src = {
- .cmd_rcgr = 0x4000,
- .hid_width = 5,
- .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gfx3d_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
- .num_parents = 6,
- .ops = &clk_gfx3d_ops,
- .flags = CLK_SET_RATE_PARENT,
+static struct clk_rcg2_gfx3d gfx3d_clk_src = {
+ .rcg = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = 6,
+ .ops = &clk_gfx3d_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+ .hws = (struct clk_hw*[]) {
+ &mmpll9.clkr.hw,
+ &mmpll2.clkr.hw,
+ &mmpll8.clkr.hw
},
};
@@ -3089,7 +3096,7 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[AHB_CLK_SRC] = &ahb_clk_src.clkr,
[AXI_CLK_SRC] = &axi_clk_src.clkr,
[MAXI_CLK_SRC] = &maxi_clk_src.clkr,
- [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.rcg.clkr,
[RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
[ISENSE_CLK_SRC] = &isense_clk_src.clkr,
[RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index dd68983fe22e..467dadccde02 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -1211,6 +1211,8 @@ static struct clk_rcg2 vfe1_clk_src = {
static struct clk_branch misc_ahb_clk = {
.halt_reg = 0x328,
+ .hwcg_reg = 0x328,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x328,
.enable_mask = BIT(0),
@@ -1241,6 +1243,8 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_ahb_clk = {
.halt_reg = 0x1030,
+ .hwcg_reg = 0x1030,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x1030,
.enable_mask = BIT(0),
@@ -1315,6 +1319,8 @@ static struct clk_branch video_subcore1_clk = {
static struct clk_branch mdss_ahb_clk = {
.halt_reg = 0x2308,
+ .hwcg_reg = 0x2308,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x2308,
.enable_mask = BIT(0),
@@ -2496,6 +2502,8 @@ static struct clk_branch mnoc_ahb_clk = {
static struct clk_branch bimc_smmu_ahb_clk = {
.halt_reg = 0xe004,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xe004,
.enable_mask = BIT(0),
@@ -2511,6 +2519,8 @@ static struct clk_branch bimc_smmu_ahb_clk = {
static struct clk_branch bimc_smmu_axi_clk = {
.halt_reg = 0xe008,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xe008,
.enable_mask = BIT(0),
@@ -2653,7 +2663,7 @@ static struct gdsc bimc_smmu_gdsc = {
.name = "bimc_smmu",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL,
+ .flags = HW_CTRL | ALWAYS_ON,
};
static struct clk_regmap *mmcc_msm8998_clocks[] = {
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
new file mode 100644
index 000000000000..941993bc610d
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -0,0 +1,2864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Martin Botka <martin.botka@somainline.org>
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+
+
+#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_DSI0PLL_BYTE,
+ P_DSI0PLL,
+ P_DSI1PLL_BYTE,
+ P_DSI1PLL,
+ P_GPLL0,
+ P_GPLL0_DIV,
+ P_MMPLL0,
+ P_MMPLL10,
+ P_MMPLL3,
+ P_MMPLL4,
+ P_MMPLL5,
+ P_MMPLL6,
+ P_MMPLL7,
+ P_MMPLL8,
+ P_SLEEP_CLK,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV,
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+/* Voteable PLL */
+static struct clk_alpha_pll mmpll0 = {
+ .offset = 0xc000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x1f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll mmpll6 = {
+ .offset = 0xf0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x1f0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* APSS controlled PLLs */
+static struct pll_vco vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static struct pll_vco mmpll3_vco[] = {
+ { 750000000, 1500000000, 1 },
+};
+
+static const struct alpha_pll_config mmpll10_config = {
+ .l = 0x1e,
+ .config_ctl_val = 0x00004289,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll10 = {
+ .offset = 0x190,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll3_config = {
+ .l = 0x2e,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x1 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll3 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll3_vco,
+ .num_vco = ARRAY_SIZE(mmpll3_vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll4_config = {
+ .l = 0x28,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll4 = {
+ .offset = 0x50,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll5_config = {
+ .l = 0x2a,
+ .config_ctl_val = 0x4001055b,
+ .alpha_hi = 0xf8,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll5 = {
+ .offset = 0xa0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll7_config = {
+ .l = 0x32,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll7 = {
+ .offset = 0x140,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll8_config = {
+ .l = 0x30,
+ .alpha_hi = 0x70,
+ .alpha_en_mask = BIT(24),
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll8 = {
+ .offset = 0x1c0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dsibyte[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL4, 1 },
+ { P_MMPLL7, 2 },
+ { P_MMPLL10, 3 },
+ { P_SLEEP_CLK, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL7, 2 },
+ { P_MMPLL10, 3 },
+ { P_SLEEP_CLK, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dplink_dpvco_map[] = {
+ { P_XO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dplink_dpvco[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dp_link_2x_clk_divsel_five" },
+ { .fw_name = "dp_vco_divided_clk_src_mux" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_MMPLL7, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll5.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+ { .fw_name = "dsi1pll" },
+};
+
+static const struct parent_map mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_MMPLL6, 5 },
+ { P_GPLL0, 6 },
+};
+
+static const struct clk_parent_data mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .hw = &mmpll6.clkr.hw },
+ { .fw_name = "gpll0" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_GPLL0, 5 },
+ { P_MMPLL6, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll6.clkr.hw },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL8, 2 },
+ { P_MMPLL3, 3 },
+ { P_MMPLL6, 4 },
+ { P_GPLL0, 5 },
+ { P_MMPLL7, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .hw = &mmpll6.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll7.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
+ F(80800000, P_MMPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ahb_clk_src",
+ .parent_data = mmcc_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(24000, P_XO, 16, 1, 50),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(12000000, P_GPLL0_DIV, 10, 2, 5),
+ F(13043478, P_GPLL0_DIV, 1, 1, 23),
+ F(24000000, P_GPLL0_DIV, 1, 2, 25),
+ F(50000000, P_GPLL0_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(37500000, P_GPLL0_DIV, 8, 0, 0),
+ F(50000000, P_GPLL0_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(256000000, P_MMPLL4, 3, 0, 0),
+ F(384000000, P_MMPLL4, 2, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ F(540000000, P_MMPLL6, 2, 0, 0),
+ F(576000000, P_MMPLL10, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6_map,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_MMPLL8, 3, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL8, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csiphy_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csiphy_clk_src = {
+ .cmd_rcgr = 0x3800,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_aux_clk_src = {
+ .cmd_rcgr = 0x2260,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_aux_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_crypto_clk_src[] = {
+ F(101250000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ F(168750000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ F(337500000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .freq_tbl = ftbl_dp_crypto_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_crypto_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_gtc_clk_src[] = {
+ F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_gtc_clk_src = {
+ .cmd_rcgr = 0x2280,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_gtc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_gtc_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_link_clk_src[] = {
+ F(162000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ F(270000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ F(540000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_link_clk_src = {
+ .cmd_rcgr = 0x2200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .freq_tbl = ftbl_dp_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_link_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2240,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_pixel_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_dp_ops,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(66666667, P_GPLL0_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0, 4.5, 0, 0),
+ F(219428571, P_MMPLL4, 3.5, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk0_clk_src[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(8000000, P_GPLL0_DIV, 1, 2, 75),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16666667, P_GPLL0_DIV, 2, 1, 9),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_MMPLL10, 1, 1, 24),
+ F(33333333, P_GPLL0_DIV, 1, 1, 9),
+ F(48000000, P_GPLL0, 1, 2, 25),
+ F(66666667, P_GPLL0, 1, 1, 9),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(150000000, P_GPLL0_DIV, 2, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(275000000, P_MMPLL5, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5, 2.5, 0, 0),
+ F(412500000, P_MMPLL5, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_rot_clk_src[] = {
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(275000000, P_MMPLL5, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5, 2.5, 0, 0),
+ F(412500000, P_MMPLL5, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rot_clk_src = {
+ .cmd_rcgr = 0x21a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rot_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(256000000, P_MMPLL4, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ F(540000000, P_MMPLL6, 2, 0, 0),
+ F(576000000, P_MMPLL10, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+ F(133333333, P_GPLL0, 4.5, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(441600000, P_MMPLL3, 2, 0, 0),
+ F(518400000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_core_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ahb_clk = {
+ .halt_reg = 0x348c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x348c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x348c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_clk = {
+ .halt_reg = 0x3344,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+ .halt_reg = 0x36c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+ .halt_reg = 0x36c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy0_clk = {
+ .halt_reg = 0x3740,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3740,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy1_clk = {
+ .halt_reg = 0x3744,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy2_clk = {
+ .halt_reg = 0x3748,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+
+static struct clk_branch camss_cphy_csid0_clk = {
+ .halt_reg = 0x3730,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3730,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy0_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid1_clk = {
+ .halt_reg = 0x3734,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3734,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy1_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid2_clk = {
+ .halt_reg = 0x3738,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3738,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy2_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid3_clk = {
+ .halt_reg = 0x373c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x373c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_gp0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_camss_axi_clk = {
+ .halt_reg = 0x3c3c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c3c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_camss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+ .halt_reg = 0x3668,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3668,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+ .halt_reg = 0x3720,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3720,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+ .halt_reg = 0x3678,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3678,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+ .halt_reg = 0x3724,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3724,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_axi_clk = {
+ .halt_reg = 0x36bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch csiphy_ahb2crif_clk = {
+ .halt_reg = 0x374c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x374c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x374c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "csiphy_ahb2crif_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(323200000, P_MMPLL0, 2.5, 0, 0),
+ F(406000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+/* RO to linux */
+static struct clk_rcg2 axi_clk_src = {
+ .cmd_rcgr = 0xd000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "axi_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_mdss_axi_clk = {
+ .halt_reg = 0x246c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x246c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x246c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div mdss_byte0_intf_div_clk = {
+ .reg = 0x237c,
+ .shift = 0,
+ .width = 2,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_intf_div_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_intf_clk = {
+ .halt_reg = 0x2374,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2374,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdss_byte0_intf_div_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div mdss_byte1_intf_div_clk = {
+ .reg = 0x2380,
+ .shift = 0,
+ .width = 2,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_intf_div_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_intf_clk = {
+ .halt_reg = 0x2378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2378,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdss_byte1_intf_div_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_aux_clk = {
+ .halt_reg = 0x2364,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2364,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_crypto_clk = {
+ .halt_reg = 0x235c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x235c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_gtc_clk = {
+ .halt_reg = 0x2368,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2368,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_gtc_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_gtc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_link_clk = {
+ .halt_reg = 0x2354,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2354,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Reset state of MDSS_DP_LINK_INTF_DIV is 0x3 (div-4) */
+static struct clk_branch mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2358,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2358,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_pixel_clk = {
+ .halt_reg = 0x2360,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2360,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_pixel_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_dp_ahb_clk = {
+ .halt_reg = 0x230c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_dp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_rot_clk = {
+ .halt_reg = 0x2350,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2350,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_rot_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rot_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mnoc_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch misc_ahb_clk = {
+ .halt_reg = 0x328,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x328,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "misc_ahb_clk",
+ /*
+ * Dependency to be enabled before the branch is
+ * enabled.
+ */
+ .parent_hws = (const struct clk_hw *[]){ &mnoc_ahb_clk.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch misc_cxo_clk = {
+ .halt_reg = 0x324,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "misc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch snoc_dvm_axi_clk = {
+ .halt_reg = 0xe040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "snoc_dvm_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_ahb_clk = {
+ .halt_reg = 0x1030,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_axi_clk = {
+ .halt_reg = 0x1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_video_axi_clk = {
+ .halt_reg = 0x118c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x118c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x118c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_core_clk = {
+ .halt_reg = 0x1028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_core_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_subcore0_clk = {
+ .halt_reg = 0x1048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .parent = &venus_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+ .cxcs = (unsigned int []){ 0x2040 },
+ .cxc_count = 1,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x34a0,
+ .pd = {
+ .name = "camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .pd = {
+ .name = "camss_vfe0",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .pd = {
+ .name = "camss_vfe1_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .pd = {
+ .name = "camss_cpp",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+/* This GDSC seems to hang the whole multimedia subsystem.
+static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &bimc_smmu_gdsc.pd,
+ .flags = HW_CTRL,
+};
+*/
+
+static struct clk_regmap *mmcc_660_clocks[] = {
+ [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [CSIPHY_CLK_SRC] = &csiphy_clk_src.clkr,
+ [DP_AUX_CLK_SRC] = &dp_aux_clk_src.clkr,
+ [DP_CRYPTO_CLK_SRC] = &dp_crypto_clk_src.clkr,
+ [DP_GTC_CLK_SRC] = &dp_gtc_clk_src.clkr,
+ [DP_LINK_CLK_SRC] = &dp_link_clk_src.clkr,
+ [DP_PIXEL_CLK_SRC] = &dp_pixel_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [MMPLL0_PLL] = &mmpll0.clkr,
+ [MMPLL10_PLL] = &mmpll10.clkr,
+ [MMPLL3_PLL] = &mmpll3.clkr,
+ [MMPLL4_PLL] = &mmpll4.clkr,
+ [MMPLL5_PLL] = &mmpll5.clkr,
+ [MMPLL6_PLL] = &mmpll6.clkr,
+ [MMPLL7_PLL] = &mmpll7.clkr,
+ [MMPLL8_PLL] = &mmpll8.clkr,
+ [BIMC_SMMU_AHB_CLK] = &bimc_smmu_ahb_clk.clkr,
+ [BIMC_SMMU_AXI_CLK] = &bimc_smmu_axi_clk.clkr,
+ [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+ [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+ [CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+ [CAMSS_CPHY_CSID0_CLK] = &camss_cphy_csid0_clk.clkr,
+ [CAMSS_CPHY_CSID1_CLK] = &camss_cphy_csid1_clk.clkr,
+ [CAMSS_CPHY_CSID2_CLK] = &camss_cphy_csid2_clk.clkr,
+ [CAMSS_CPHY_CSID3_CLK] = &camss_cphy_csid3_clk.clkr,
+ [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+ [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+ [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+ [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_CSIPHY0_CLK] = &camss_csiphy0_clk.clkr,
+ [CAMSS_CSIPHY1_CLK] = &camss_csiphy1_clk.clkr,
+ [CAMSS_CSIPHY2_CLK] = &camss_csiphy2_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+ [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+ [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+ [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+ [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+ [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+ [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+ [CAMSS_VFE_VBIF_AHB_CLK] = &camss_vfe_vbif_ahb_clk.clkr,
+ [CAMSS_VFE_VBIF_AXI_CLK] = &camss_vfe_vbif_axi_clk.clkr,
+ [CSIPHY_AHB2CRIF_CLK] = &csiphy_ahb2crif_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE0_INTF_CLK] = &mdss_byte0_intf_clk.clkr,
+ [MDSS_BYTE0_INTF_DIV_CLK] = &mdss_byte0_intf_div_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_BYTE1_INTF_CLK] = &mdss_byte1_intf_clk.clkr,
+ [MDSS_DP_AUX_CLK] = &mdss_dp_aux_clk.clkr,
+ [MDSS_DP_CRYPTO_CLK] = &mdss_dp_crypto_clk.clkr,
+ [MDSS_DP_GTC_CLK] = &mdss_dp_gtc_clk.clkr,
+ [MDSS_DP_LINK_CLK] = &mdss_dp_link_clk.clkr,
+ [MDSS_DP_LINK_INTF_CLK] = &mdss_dp_link_intf_clk.clkr,
+ [MDSS_DP_PIXEL_CLK] = &mdss_dp_pixel_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [MDSS_HDMI_DP_AHB_CLK] = &mdss_hdmi_dp_ahb_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_ROT_CLK] = &mdss_rot_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MISC_AHB_CLK] = &misc_ahb_clk.clkr,
+ [MISC_CXO_CLK] = &misc_cxo_clk.clkr,
+ [MNOC_AHB_CLK] = &mnoc_ahb_clk.clkr,
+ [SNOC_DVM_AXI_CLK] = &snoc_dvm_axi_clk.clkr,
+ [THROTTLE_CAMSS_AXI_CLK] = &throttle_camss_axi_clk.clkr,
+ [THROTTLE_MDSS_AXI_CLK] = &throttle_mdss_axi_clk.clkr,
+ [THROTTLE_VIDEO_AXI_CLK] = &throttle_video_axi_clk.clkr,
+ [VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+ [VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+ [VIDEO_CORE_CLK] = &video_core_clk.clkr,
+ [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [ROT_CLK_SRC] = &rot_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [MDSS_BYTE1_INTF_DIV_CLK] = &mdss_byte1_intf_div_clk.clkr,
+ [AXI_CLK_SRC] = &axi_clk_src.clkr,
+};
+
+static struct gdsc *mmcc_sdm660_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [CAMSS_VFE0_GDSC] = &camss_vfe0_gdsc,
+ [CAMSS_VFE1_GDSC] = &camss_vfe1_gdsc,
+ [CAMSS_CPP_GDSC] = &camss_cpp_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_660_resets[] = {
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+};
+
+static const struct regmap_config mmcc_660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc mmcc_660_desc = {
+ .config = &mmcc_660_regmap_config,
+ .clks = mmcc_660_clocks,
+ .num_clks = ARRAY_SIZE(mmcc_660_clocks),
+ .resets = mmcc_660_resets,
+ .num_resets = ARRAY_SIZE(mmcc_660_resets),
+ .gdscs = mmcc_sdm660_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_sdm660_gdscs),
+};
+
+static const struct of_device_id mmcc_660_match_table[] = {
+ { .compatible = "qcom,mmcc-sdm660" },
+ { .compatible = "qcom,mmcc-sdm630", .data = (void *)1UL },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_660_match_table);
+
+static void sdm630_clock_override(void)
+{
+ /* SDM630 has only one DSI */
+ mmcc_660_desc.clks[BYTE1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_CLK] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_INTF_DIV_CLK] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_INTF_CLK] = NULL;
+ mmcc_660_desc.clks[ESC1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_ESC1_CLK] = NULL;
+ mmcc_660_desc.clks[PCLK1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_PCLK1_CLK] = NULL;
+}
+
+static int mmcc_660_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct regmap *regmap;
+ bool is_sdm630;
+
+ id = of_match_device(mmcc_660_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+ is_sdm630 = !!(id->data);
+
+ regmap = qcom_cc_map(pdev, &mmcc_660_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (is_sdm630)
+ sdm630_clock_override();
+
+ clk_alpha_pll_configure(&mmpll3, regmap, &mmpll3_config);
+ clk_alpha_pll_configure(&mmpll4, regmap, &mmpll4_config);
+ clk_alpha_pll_configure(&mmpll5, regmap, &mmpll5_config);
+ clk_alpha_pll_configure(&mmpll7, regmap, &mmpll7_config);
+ clk_alpha_pll_configure(&mmpll8, regmap, &mmpll8_config);
+ clk_alpha_pll_configure(&mmpll10, regmap, &mmpll10_config);
+
+ return qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
+}
+
+static struct platform_driver mmcc_660_driver = {
+ .probe = mmcc_660_probe,
+ .driver = {
+ .name = "mmcc-sdm660",
+ .of_match_table = mmcc_660_match_table,
+ },
+};
+module_platform_driver(mmcc_660_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDM630/SDM660 MMCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 2797c61f5938..b0efadc19634 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -169,6 +169,21 @@ static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
},
};
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0xd54,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
.reg = 0xcf4,
.shift = 0,
@@ -202,6 +217,24 @@ static struct clk_branch video_cc_mvs0c_clk = {
},
};
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0xd34,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xd34,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch video_cc_mvs1_div2_clk = {
.halt_reg = 0xdf4,
.halt_check = BRANCH_HALT_VOTED,
@@ -245,6 +278,7 @@ static struct gdsc mvs0c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs1c_gdsc = {
@@ -254,6 +288,7 @@ static struct gdsc mvs1c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs0_gdsc = {
@@ -263,6 +298,7 @@ static struct gdsc mvs0_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs1_gdsc = {
@@ -272,10 +308,13 @@ static struct gdsc mvs1_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct clk_regmap *video_cc_sm8250_clocks[] = {
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
[VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
[VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
[VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
[VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 18915d668a30..607e64a17d72 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -148,6 +148,7 @@ config CLK_R8A77995
config CLK_R8A779A0
bool "R-Car V3U clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
config CLK_R9A06G032
@@ -162,12 +163,16 @@ config CLK_SH73A0
# Family
+config CLK_RCAR_CPG_LIB
+ bool "CPG/MSSR library functions" if COMPILE_TEST
+
config CLK_RCAR_GEN2_CPG
bool "R-Car Gen2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_GEN3_CPG
bool "R-Car Gen3 and RZ/G2 CPG clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_USB2_CLOCK_SEL
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index c803912ef2ce..ef0d2bba92bf 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
+obj-$(CONFIG_CLK_RCAR_CPG_LIB) += rcar-cpg-lib.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 2cd6e3876fbd..41593c126faf 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -128,6 +128,11 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1),
+ DEF_MOD("tmu4", 121, R8A7796_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A7796_CLK_CP),
DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
DEF_MOD("scif3", 204, R8A7796_CLK_S3D4),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 2b55a06ac5cf..46a157732759 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -123,6 +123,11 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("fdp1-0", 119, R8A77965_CLK_S0D1),
+ DEF_MOD("tmu4", 121, R8A77965_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A77965_CLK_CP),
DEF_MOD("scif5", 202, R8A77965_CLK_S3D4),
DEF_MOD("scif4", 203, R8A77965_CLK_S3D4),
DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 2b97ab61d044..2d172f80b34c 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -124,6 +124,11 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77990_CLK_S0D6C),
+ DEF_MOD("tmu3", 122, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A77990_CLK_CP),
DEF_MOD("scif5", 202, R8A77990_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A77990_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A77990_CLK_S3D4C),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 5b4691117b47..9cfd00cf4e69 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -111,6 +111,11 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77995_CLK_S1D4C),
+ DEF_MOD("tmu3", 122, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A77995_CLK_CP),
DEF_MOD("scif5", 202, R8A77995_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A77995_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A77995_CLK_S3D4C),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index aa5389b04d74..f23fe9d5e5e1 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -25,6 +25,7 @@
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+#include "rcar-cpg-lib.h"
#include "renesas-cpg-mssr.h"
enum rcar_r8a779a0_clk_types {
@@ -32,6 +33,7 @@ enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_PLL1,
CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
CLK_TYPE_R8A779A0_PLL5,
+ CLK_TYPE_R8A779A0_SD,
CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
};
@@ -69,7 +71,6 @@ enum clk_ids {
CLK_PLL5_DIV2,
CLK_PLL5_DIV4,
CLK_S1,
- CLK_S2,
CLK_S3,
CLK_SDSRC,
CLK_RPCSRC,
@@ -83,6 +84,9 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
+
#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
(_parent0) << 16 | (_parent1), \
@@ -114,6 +118,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
@@ -137,7 +142,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("icu", R8A779A0_CLK_ICU, CLK_PLL5_DIV4, 2, 1),
DEF_FIXED("icud2", R8A779A0_CLK_ICUD2, CLK_PLL5_DIV4, 4, 1),
DEF_FIXED("vcbus", R8A779A0_CLK_VCBUS, CLK_PLL5_DIV4, 1, 1),
- DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_MAIN, 2, 1),
+ DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_SD("sd0", R8A779A0_CLK_SD0, CLK_SDSRC, 0x870),
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
@@ -148,14 +156,42 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("avb0", 211, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb1", 212, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb2", 213, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2),
DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
DEF_MOD("csi43", 402, R8A779A0_CLK_CSI0),
+ DEF_MOD("fcpvd0", 508, R8A779A0_CLK_S3D1),
+ DEF_MOD("fcpvd1", 509, R8A779A0_CLK_S3D1),
+ DEF_MOD("hscif0", 514, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif1", 515, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif2", 516, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif3", 517, R8A779A0_CLK_S1D2),
+ DEF_MOD("i2c0", 518, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c1", 519, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c2", 520, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c3", 521, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c4", 522, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c5", 523, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c6", 524, R8A779A0_CLK_S1D4),
+ DEF_MOD("msi0", 618, R8A779A0_CLK_MSO),
+ DEF_MOD("msi1", 619, R8A779A0_CLK_MSO),
+ DEF_MOD("msi2", 620, R8A779A0_CLK_MSO),
+ DEF_MOD("msi3", 621, R8A779A0_CLK_MSO),
+ DEF_MOD("msi4", 622, R8A779A0_CLK_MSO),
+ DEF_MOD("msi5", 623, R8A779A0_CLK_MSO),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
+ DEF_MOD("sdhi0", 706, R8A779A0_CLK_SD0),
+ DEF_MOD("sydm1", 709, R8A779A0_CLK_S1D2),
+ DEF_MOD("sydm2", 710, R8A779A0_CLK_S1D2),
DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
@@ -188,10 +224,19 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vin35", 827, R8A779A0_CLK_S1D1),
DEF_MOD("vin36", 828, R8A779A0_CLK_S1D1),
DEF_MOD("vin37", 829, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspd0", 830, R8A779A0_CLK_S3D1),
+ DEF_MOD("vspd1", 831, R8A779A0_CLK_S3D1),
+ DEF_MOD("rwdt", 907, R8A779A0_CLK_R),
+ DEF_MOD("pfc0", 915, R8A779A0_CLK_CP),
+ DEF_MOD("pfc1", 916, R8A779A0_CLK_CP),
+ DEF_MOD("pfc2", 917, R8A779A0_CLK_CP),
+ DEF_MOD("pfc3", 918, R8A779A0_CLK_CP),
+ DEF_MOD("vspx0", 1028, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx1", 1029, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx2", 1030, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
};
-static spinlock_t cpg_lock;
-
static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
@@ -230,6 +275,12 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll5_div;
break;
+ case CLK_TYPE_R8A779A0_SD:
+ return cpg_sd_clk_register(core->name, base, core->offset,
+ __clk_get_name(parent), notifiers,
+ false);
+ break;
+
case CLK_TYPE_R8A779A0_MDSEL:
/*
* Clock selectable between two parents and two fixed dividers
@@ -261,6 +312,10 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
__clk_get_name(parent), 0, mult, div);
}
+static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(907), /* RWDT */
+};
+
/*
* CPG Clock Data
*/
@@ -311,6 +366,10 @@ const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
.num_mod_clks = ARRAY_SIZE(r8a779a0_mod_clks),
.num_hw_mod_clks = 15 * 32,
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a779a0_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a779a0_crit_mod_clks),
+
/* Callbacks */
.init = r8a779a0_cpg_mssr_init,
.cpg_clk_register = rcar_r8a779a0_cpg_clk_register,
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
new file mode 100644
index 000000000000..7e7e5d1341d5
--- /dev/null
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen3 Clock Pulse Generator Library
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on clk-rcar-gen3.c
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "rcar-cpg-lib.h"
+
+spinlock_t cpg_lock;
+
+void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&cpg_lock, flags);
+ val = readl(reg);
+ val &= ~clear;
+ val |= set;
+ writel(val, reg);
+ spin_unlock_irqrestore(&cpg_lock, flags);
+};
+
+static int cpg_simple_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpg_simple_notifier *csn =
+ container_of(nb, struct cpg_simple_notifier, nb);
+
+ switch (action) {
+ case PM_EVENT_SUSPEND:
+ csn->saved = readl(csn->reg);
+ return NOTIFY_OK;
+
+ case PM_EVENT_RESUME:
+ writel(csn->saved, csn->reg);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn)
+{
+ csn->nb.notifier_call = cpg_simple_notifier_call;
+ raw_notifier_chain_register(notifiers, &csn->nb);
+}
+
+/*
+ * SDn Clock
+ */
+#define CPG_SD_STP_HCK BIT(9)
+#define CPG_SD_STP_CK BIT(8)
+
+#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
+#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
+
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
+{ \
+ .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
+ ((sd_srcfc) << 2) | \
+ ((sd_fc) << 0), \
+ .div = (sd_div), \
+}
+
+struct sd_div_table {
+ u32 val;
+ unsigned int div;
+};
+
+struct sd_clock {
+ struct clk_hw hw;
+ const struct sd_div_table *div_table;
+ struct cpg_simple_notifier csn;
+ unsigned int div_num;
+ unsigned int cur_div_idx;
+};
+
+/* SDn divider
+ * sd_srcfc sd_fc div
+ * stp_hck (div) (div) = sd_srcfc x sd_fc
+ *---------------------------------------------------------
+ * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
+ * 0 1 (2) 1 (4) 8 : SDR50
+ * 1 2 (4) 1 (4) 16 : HS / SDR25
+ * 1 3 (8) 1 (4) 32 : NS / SDR12
+ * 1 4 (16) 1 (4) 64
+ * 0 0 (1) 0 (2) 2
+ * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
+ * 1 2 (4) 0 (2) 8
+ * 1 3 (8) 0 (2) 16
+ * 1 4 (16) 0 (2) 32
+ *
+ * NOTE: There is a quirk option to ignore the first row of the dividers
+ * table when searching for suitable settings. This is because HS400 on
+ * early ES versions of H3 and M3-W requires a specific setting to work.
+ */
+static const struct sd_div_table cpg_sd_div_table[] = {
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
+};
+
+#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
+
+static int cpg_sd_clock_enable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
+ clock->div_table[clock->cur_div_idx].val &
+ CPG_SD_STP_MASK);
+
+ return 0;
+}
+
+static void cpg_sd_clock_disable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
+}
+
+static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
+}
+
+static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[clock->cur_div_idx].div);
+}
+
+static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long calc_rate, diff;
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++) {
+ calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
+ clock->div_table[i].div);
+ if (calc_rate < req->min_rate || calc_rate > req->max_rate)
+ continue;
+
+ diff = calc_rate > req->rate ? calc_rate - req->rate
+ : req->rate - calc_rate;
+ if (diff < diff_min) {
+ best_rate = calc_rate;
+ diff_min = diff;
+ }
+ }
+
+ if (best_rate == ULONG_MAX)
+ return -EINVAL;
+
+ req->rate = best_rate;
+ return 0;
+}
+
+static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ clock->cur_div_idx = i;
+
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
+ clock->div_table[i].val &
+ (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
+
+ return 0;
+}
+
+static const struct clk_ops cpg_sd_clock_ops = {
+ .enable = cpg_sd_clock_enable,
+ .disable = cpg_sd_clock_disable,
+ .is_enabled = cpg_sd_clock_is_enabled,
+ .recalc_rate = cpg_sd_clock_recalc_rate,
+ .determine_rate = cpg_sd_clock_determine_rate,
+ .set_rate = cpg_sd_clock_set_rate,
+};
+
+struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
+ struct raw_notifier_head *notifiers, bool skip_first)
+{
+ struct clk_init_data init;
+ struct sd_clock *clock;
+ struct clk *clk;
+ u32 val;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_sd_clock_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->csn.reg = base + offset;
+ clock->hw.init = &init;
+ clock->div_table = cpg_sd_div_table;
+ clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+
+ if (skip_first) {
+ clock->div_table++;
+ clock->div_num--;
+ }
+
+ val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
+ val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
+ writel(val, clock->csn.reg);
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk))
+ goto free_clock;
+
+ cpg_simple_notifier_register(notifiers, &clock->csn);
+ return clk;
+
+free_clock:
+ kfree(clock);
+ return clk;
+}
+
+
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
new file mode 100644
index 000000000000..d00c91b116ca
--- /dev/null
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen3 Clock Pulse Generator Library
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on clk-rcar-gen3.c
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#ifndef __CLK_RENESAS_RCAR_CPG_LIB_H__
+#define __CLK_RENESAS_RCAR_CPG_LIB_H__
+
+extern spinlock_t cpg_lock;
+
+struct cpg_simple_notifier {
+ struct notifier_block nb;
+ void __iomem *reg;
+ u32 saved;
+};
+
+void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn);
+
+void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set);
+
+struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
+ struct raw_notifier_head *notifiers, bool skip_first);
+
+#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 063b61151488..17826599e9dd 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -23,6 +23,7 @@
#include <linux/sys_soc.h>
#include "renesas-cpg-mssr.h"
+#include "rcar-cpg-lib.h"
#include "rcar-gen3-cpg.h"
#define CPG_PLL0CR 0x00d8
@@ -31,52 +32,6 @@
#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
-static spinlock_t cpg_lock;
-
-static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
-{
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&cpg_lock, flags);
- val = readl(reg);
- val &= ~clear;
- val |= set;
- writel(val, reg);
- spin_unlock_irqrestore(&cpg_lock, flags);
-};
-
-struct cpg_simple_notifier {
- struct notifier_block nb;
- void __iomem *reg;
- u32 saved;
-};
-
-static int cpg_simple_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct cpg_simple_notifier *csn =
- container_of(nb, struct cpg_simple_notifier, nb);
-
- switch (action) {
- case PM_EVENT_SUSPEND:
- csn->saved = readl(csn->reg);
- return NOTIFY_OK;
-
- case PM_EVENT_RESUME:
- writel(csn->saved, csn->reg);
- return NOTIFY_OK;
- }
- return NOTIFY_DONE;
-}
-
-static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
- struct cpg_simple_notifier *csn)
-{
- csn->nb.notifier_call = cpg_simple_notifier_call;
- raw_notifier_chain_register(notifiers, &csn->nb);
-}
-
/*
* Z Clock & Z2 Clock
*
@@ -215,217 +170,6 @@ static struct clk * __init cpg_z_clk_register(const char *name,
return clk;
}
-/*
- * SDn Clock
- */
-#define CPG_SD_STP_HCK BIT(9)
-#define CPG_SD_STP_CK BIT(8)
-
-#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
-#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
-{ \
- .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((sd_srcfc) << 2) | \
- ((sd_fc) << 0), \
- .div = (sd_div), \
-}
-
-struct sd_div_table {
- u32 val;
- unsigned int div;
-};
-
-struct sd_clock {
- struct clk_hw hw;
- const struct sd_div_table *div_table;
- struct cpg_simple_notifier csn;
- unsigned int div_num;
- unsigned int cur_div_idx;
-};
-
-/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck (div) (div) = sd_srcfc x sd_fc
- *---------------------------------------------------------
- * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 1 (2) 1 (4) 8 : SDR50
- * 1 2 (4) 1 (4) 16 : HS / SDR25
- * 1 3 (8) 1 (4) 32 : NS / SDR12
- * 1 4 (16) 1 (4) 64
- * 0 0 (1) 0 (2) 2
- * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 2 (4) 0 (2) 8
- * 1 3 (8) 0 (2) 16
- * 1 4 (16) 0 (2) 32
- *
- * NOTE: There is a quirk option to ignore the first row of the dividers
- * table when searching for suitable settings. This is because HS400 on
- * early ES versions of H3 and M3-W requires a specific setting to work.
- */
-static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
-};
-
-#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
-
-static int cpg_sd_clock_enable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
- clock->div_table[clock->cur_div_idx].val &
- CPG_SD_STP_MASK);
-
- return 0;
-}
-
-static void cpg_sd_clock_disable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
-}
-
-static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
-}
-
-static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[clock->cur_div_idx].div);
-}
-
-static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned long calc_rate, diff;
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
- clock->div_table[i].div);
- if (calc_rate < req->min_rate || calc_rate > req->max_rate)
- continue;
-
- diff = calc_rate > req->rate ? calc_rate - req->rate
- : req->rate - calc_rate;
- if (diff < diff_min) {
- best_rate = calc_rate;
- diff_min = diff;
- }
- }
-
- if (best_rate == ULONG_MAX)
- return -EINVAL;
-
- req->rate = best_rate;
- return 0;
-}
-
-static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++)
- if (rate == DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[i].div))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- clock->cur_div_idx = i;
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
- clock->div_table[i].val &
- (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
-
- return 0;
-}
-
-static const struct clk_ops cpg_sd_clock_ops = {
- .enable = cpg_sd_clock_enable,
- .disable = cpg_sd_clock_disable,
- .is_enabled = cpg_sd_clock_is_enabled,
- .recalc_rate = cpg_sd_clock_recalc_rate,
- .determine_rate = cpg_sd_clock_determine_rate,
- .set_rate = cpg_sd_clock_set_rate,
-};
-
-static u32 cpg_quirks __initdata;
-
-#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
-#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
-
-static struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers)
-{
- struct clk_init_data init;
- struct sd_clock *clock;
- struct clk *clk;
- u32 val;
-
- clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- clock->csn.reg = base + offset;
- clock->hw.init = &init;
- clock->div_table = cpg_sd_div_table;
- clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
-
- if (cpg_quirks & SD_SKIP_FIRST) {
- clock->div_table++;
- clock->div_num--;
- }
-
- val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
- val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
-
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
- goto free_clock;
-
- cpg_simple_notifier_register(notifiers, &clock->csn);
- return clk;
-
-free_clock:
- kfree(clock);
- return clk;
-}
-
struct rpc_clock {
struct clk_divider div;
struct clk_gate gate;
@@ -518,6 +262,12 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
+static u32 cpg_quirks __initdata;
+
+#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
+#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
+#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
+
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
@@ -613,7 +363,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_SD:
return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers);
+ __clk_get_name(parent), notifiers,
+ cpg_quirks & SD_SKIP_FIRST);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1c3215dc4877..bffbc3d2faf5 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -136,8 +136,8 @@ static const u16 srstclr_for_v3u[] = {
* @control_regs: Pointer to control registers array
* @reset_regs: Pointer to reset registers array
* @reset_clear_regs: Pointer to reset clearing registers array
- * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
- * @smstpcr_saved[].val: Saved values of SMSTPCR[]
+ * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
+ * [].val: Saved values of SMSTPCR[]
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 0dc478a19451..fa9027fb1920 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -51,10 +51,6 @@
*/
struct rockchip_cpuclk {
struct clk_hw hw;
-
- struct clk_mux cpu_mux;
- const struct clk_ops *cpu_mux_ops;
-
struct clk *alt_parent;
void __iomem *reg_base;
struct notifier_block clk_nb;
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index ccd5c270c213..64f7faad2148 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -145,7 +145,7 @@ static const struct clk_ops clk_half_divider_ops = {
.set_rate = clk_half_divider_set_rate,
};
-/**
+/*
* Register a clock branch.
* Most clock branches have a form like
*
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 4c6c9167ef50..fe937bcdb487 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -97,7 +97,7 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
return ret;
}
-/**
+/*
* PLL used in RK3036
*/
@@ -358,7 +358,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.init = rockchip_rk3036_pll_init,
};
-/**
+/*
* PLL used in RK3066, RK3188 and RK3288
*/
@@ -577,7 +577,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.init = rockchip_rk3066_pll_init,
};
-/**
+/*
* PLL used in RK3399
*/
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 55443349439b..9a0dab9448db 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -474,7 +474,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
RK3368_CLKGATE_CON(4), 5, GFLAGS),
- COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
+ COMPOSITE_NOGATE(SCLK_VIP_OUT, "sclk_vip_out", mux_vip_out_p, 0,
RK3368_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS),
COMPOSITE_NODIV(SCLK_EDP_24M, "sclk_edp_24m", mux_edp_24m_p, 0,
@@ -818,8 +818,8 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* pclk_vio gates
* pclk_vio comes from the exactly same source as hclk_vio
*/
- GATE(0, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
- GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_DPHYRX, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 11, GFLAGS),
+ GATE(PCLK_DPHYTX0, "pclk_dphytx0", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 10, GFLAGS),
/* pclk_pd_pmu gates */
GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 336481bc6cc7..049e5e0b64f6 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -24,7 +24,7 @@
#include <linux/rational.h>
#include "clk.h"
-/**
+/*
* Register a clock branch.
* Most clock branches have a form like
*
@@ -170,7 +170,7 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-/**
+/*
* fractional divider must set that denominator is 20 times larger than
* numerator to generate precise clock frequency.
*/
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index c8271efa7bdc..c220677dc010 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -13,9 +13,4 @@
extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
-static const struct prci_clk_desc prci_clk_fu540 = {
- .clks = __prci_init_clocks_fu540,
- .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
-};
-
#endif /* __SIFIVE_CLK_FU540_PRCI_H */
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index c78b042750e2..1490b01ce629 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -12,6 +12,11 @@
#include "fu540-prci.h"
#include "fu740-prci.h"
+static const struct prci_clk_desc prci_clk_fu540 = {
+ .clks = __prci_init_clocks_fu540,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
+};
+
/*
* Private functions
*/
diff --git a/drivers/clk/sirf/Makefile b/drivers/clk/sirf/Makefile
deleted file mode 100644
index 0ff61f87cddb..000000000000
--- a/drivers/clk/sirf/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for sirf specific clk
-#
-
-obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o clk-atlas7.o
diff --git a/drivers/clk/sirf/atlas6.h b/drivers/clk/sirf/atlas6.h
deleted file mode 100644
index cb871e30a175..000000000000
--- a/drivers/clk/sirf/atlas6.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0020
-#define SIRFSOC_CLKC_CPU_CFG 0x0024
-#define SIRFSOC_CLKC_MEM_CFG 0x0028
-#define SIRFSOC_CLKC_MEMDIV_CFG 0x002C
-#define SIRFSOC_CLKC_SYS_CFG 0x0030
-#define SIRFSOC_CLKC_IO_CFG 0x0034
-#define SIRFSOC_CLKC_DSP_CFG 0x0038
-#define SIRFSOC_CLKC_GFX_CFG 0x003c
-#define SIRFSOC_CLKC_MM_CFG 0x0040
-#define SIRFSOC_CLKC_GFX2D_CFG 0x0040
-#define SIRFSOC_CLKC_LCD_CFG 0x0044
-#define SIRFSOC_CLKC_MMC01_CFG 0x0048
-#define SIRFSOC_CLKC_MMC23_CFG 0x004C
-#define SIRFSOC_CLKC_MMC45_CFG 0x0050
-#define SIRFSOC_CLKC_NAND_CFG 0x0054
-#define SIRFSOC_CLKC_NANDDIV_CFG 0x0058
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0080
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0084
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0088
-#define SIRFSOC_CLKC_PLL1_CFG1 0x008c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0090
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0094
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0098
-#define SIRFSOC_CLKC_PLL2_CFG2 0x009c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x00A0
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
deleted file mode 100644
index b95483bb6a5e..000000000000
--- a/drivers/clk/sirf/clk-atlas6.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFatlasVI
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#include "atlas6.h"
-#include "clk-common.c"
-
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC01_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC23_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC45_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
-static const struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_nand = {
- .regofs = SIRFSOC_CLKC_NAND_CFG,
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
-enum atlas6_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, gfx2d, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, cphif, maxclk,
-};
-
-static __initdata struct clk_hw *atlas6_clk_hw_array[maxclk] = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_gfx2d.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
- &clk_cphif.hw,
-};
-
-static struct clk *atlas6_clks[maxclk];
-
-static void __init atlas6_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
- atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
- 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
- BUG_ON(IS_ERR(atlas6_clks[i]));
- }
- clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
- clk_register_clkdev(atlas6_clks[io], NULL, "io");
- clk_register_clkdev(atlas6_clks[mem], NULL, "mem");
- clk_register_clkdev(atlas6_clks[mem], NULL, "osc");
-
- clk_data.clks = atlas6_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(atlas6_clk, "sirf,atlas6-clkc", atlas6_clk_init);
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
deleted file mode 100644
index 3f57fefd13bb..000000000000
--- a/drivers/clk/sirf/clk-atlas7.c
+++ /dev/null
@@ -1,1682 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFAtlas7
- *
- * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/reset-controller.h>
-#include <linux/slab.h>
-
-#define SIRFSOC_CLKC_MEMPLL_AB_FREQ 0x0000
-#define SIRFSOC_CLKC_MEMPLL_AB_SSC 0x0004
-#define SIRFSOC_CLKC_MEMPLL_AB_CTRL0 0x0008
-#define SIRFSOC_CLKC_MEMPLL_AB_CTRL1 0x000c
-#define SIRFSOC_CLKC_MEMPLL_AB_STATUS 0x0010
-#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_ADDR 0x0014
-#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_DATA 0x0018
-
-#define SIRFSOC_CLKC_CPUPLL_AB_FREQ 0x001c
-#define SIRFSOC_CLKC_CPUPLL_AB_SSC 0x0020
-#define SIRFSOC_CLKC_CPUPLL_AB_CTRL0 0x0024
-#define SIRFSOC_CLKC_CPUPLL_AB_CTRL1 0x0028
-#define SIRFSOC_CLKC_CPUPLL_AB_STATUS 0x002c
-
-#define SIRFSOC_CLKC_SYS0PLL_AB_FREQ 0x0030
-#define SIRFSOC_CLKC_SYS0PLL_AB_SSC 0x0034
-#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL0 0x0038
-#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL1 0x003c
-#define SIRFSOC_CLKC_SYS0PLL_AB_STATUS 0x0040
-
-#define SIRFSOC_CLKC_SYS1PLL_AB_FREQ 0x0044
-#define SIRFSOC_CLKC_SYS1PLL_AB_SSC 0x0048
-#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL0 0x004c
-#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL1 0x0050
-#define SIRFSOC_CLKC_SYS1PLL_AB_STATUS 0x0054
-
-#define SIRFSOC_CLKC_SYS2PLL_AB_FREQ 0x0058
-#define SIRFSOC_CLKC_SYS2PLL_AB_SSC 0x005c
-#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL0 0x0060
-#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL1 0x0064
-#define SIRFSOC_CLKC_SYS2PLL_AB_STATUS 0x0068
-
-#define SIRFSOC_CLKC_SYS3PLL_AB_FREQ 0x006c
-#define SIRFSOC_CLKC_SYS3PLL_AB_SSC 0x0070
-#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL0 0x0074
-#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL1 0x0078
-#define SIRFSOC_CLKC_SYS3PLL_AB_STATUS 0x007c
-
-#define SIRFSOC_ABPLL_CTRL0_SSEN 0x00001000
-#define SIRFSOC_ABPLL_CTRL0_BYPASS 0x00000010
-#define SIRFSOC_ABPLL_CTRL0_RESET 0x00000001
-
-#define SIRFSOC_CLKC_AUDIO_DTO_INC 0x0088
-#define SIRFSOC_CLKC_DISP0_DTO_INC 0x008c
-#define SIRFSOC_CLKC_DISP1_DTO_INC 0x0090
-
-#define SIRFSOC_CLKC_AUDIO_DTO_SRC 0x0094
-#define SIRFSOC_CLKC_AUDIO_DTO_ENA 0x0098
-#define SIRFSOC_CLKC_AUDIO_DTO_DROFF 0x009c
-
-#define SIRFSOC_CLKC_DISP0_DTO_SRC 0x00a0
-#define SIRFSOC_CLKC_DISP0_DTO_ENA 0x00a4
-#define SIRFSOC_CLKC_DISP0_DTO_DROFF 0x00a8
-
-#define SIRFSOC_CLKC_DISP1_DTO_SRC 0x00ac
-#define SIRFSOC_CLKC_DISP1_DTO_ENA 0x00b0
-#define SIRFSOC_CLKC_DISP1_DTO_DROFF 0x00b4
-
-#define SIRFSOC_CLKC_I2S_CLK_SEL 0x00b8
-#define SIRFSOC_CLKC_I2S_SEL_STAT 0x00bc
-
-#define SIRFSOC_CLKC_USBPHY_CLKDIV_CFG 0x00c0
-#define SIRFSOC_CLKC_USBPHY_CLKDIV_ENA 0x00c4
-#define SIRFSOC_CLKC_USBPHY_CLK_SEL 0x00c8
-#define SIRFSOC_CLKC_USBPHY_CLK_SEL_STAT 0x00cc
-
-#define SIRFSOC_CLKC_BTSS_CLKDIV_CFG 0x00d0
-#define SIRFSOC_CLKC_BTSS_CLKDIV_ENA 0x00d4
-#define SIRFSOC_CLKC_BTSS_CLK_SEL 0x00d8
-#define SIRFSOC_CLKC_BTSS_CLK_SEL_STAT 0x00dc
-
-#define SIRFSOC_CLKC_RGMII_CLKDIV_CFG 0x00e0
-#define SIRFSOC_CLKC_RGMII_CLKDIV_ENA 0x00e4
-#define SIRFSOC_CLKC_RGMII_CLK_SEL 0x00e8
-#define SIRFSOC_CLKC_RGMII_CLK_SEL_STAT 0x00ec
-
-#define SIRFSOC_CLKC_CPU_CLKDIV_CFG 0x00f0
-#define SIRFSOC_CLKC_CPU_CLKDIV_ENA 0x00f4
-#define SIRFSOC_CLKC_CPU_CLK_SEL 0x00f8
-#define SIRFSOC_CLKC_CPU_CLK_SEL_STAT 0x00fc
-
-#define SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG 0x0100
-#define SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA 0x0104
-#define SIRFSOC_CLKC_SDPHY01_CLK_SEL 0x0108
-#define SIRFSOC_CLKC_SDPHY01_CLK_SEL_STAT 0x010c
-
-#define SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG 0x0110
-#define SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA 0x0114
-#define SIRFSOC_CLKC_SDPHY23_CLK_SEL 0x0118
-#define SIRFSOC_CLKC_SDPHY23_CLK_SEL_STAT 0x011c
-
-#define SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG 0x0120
-#define SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA 0x0124
-#define SIRFSOC_CLKC_SDPHY45_CLK_SEL 0x0128
-#define SIRFSOC_CLKC_SDPHY45_CLK_SEL_STAT 0x012c
-
-#define SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG 0x0130
-#define SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA 0x0134
-#define SIRFSOC_CLKC_SDPHY67_CLK_SEL 0x0138
-#define SIRFSOC_CLKC_SDPHY67_CLK_SEL_STAT 0x013c
-
-#define SIRFSOC_CLKC_CAN_CLKDIV_CFG 0x0140
-#define SIRFSOC_CLKC_CAN_CLKDIV_ENA 0x0144
-#define SIRFSOC_CLKC_CAN_CLK_SEL 0x0148
-#define SIRFSOC_CLKC_CAN_CLK_SEL_STAT 0x014c
-
-#define SIRFSOC_CLKC_DEINT_CLKDIV_CFG 0x0150
-#define SIRFSOC_CLKC_DEINT_CLKDIV_ENA 0x0154
-#define SIRFSOC_CLKC_DEINT_CLK_SEL 0x0158
-#define SIRFSOC_CLKC_DEINT_CLK_SEL_STAT 0x015c
-
-#define SIRFSOC_CLKC_NAND_CLKDIV_CFG 0x0160
-#define SIRFSOC_CLKC_NAND_CLKDIV_ENA 0x0164
-#define SIRFSOC_CLKC_NAND_CLK_SEL 0x0168
-#define SIRFSOC_CLKC_NAND_CLK_SEL_STAT 0x016c
-
-#define SIRFSOC_CLKC_DISP0_CLKDIV_CFG 0x0170
-#define SIRFSOC_CLKC_DISP0_CLKDIV_ENA 0x0174
-#define SIRFSOC_CLKC_DISP0_CLK_SEL 0x0178
-#define SIRFSOC_CLKC_DISP0_CLK_SEL_STAT 0x017c
-
-#define SIRFSOC_CLKC_DISP1_CLKDIV_CFG 0x0180
-#define SIRFSOC_CLKC_DISP1_CLKDIV_ENA 0x0184
-#define SIRFSOC_CLKC_DISP1_CLK_SEL 0x0188
-#define SIRFSOC_CLKC_DISP1_CLK_SEL_STAT 0x018c
-
-#define SIRFSOC_CLKC_GPU_CLKDIV_CFG 0x0190
-#define SIRFSOC_CLKC_GPU_CLKDIV_ENA 0x0194
-#define SIRFSOC_CLKC_GPU_CLK_SEL 0x0198
-#define SIRFSOC_CLKC_GPU_CLK_SEL_STAT 0x019c
-
-#define SIRFSOC_CLKC_GNSS_CLKDIV_CFG 0x01a0
-#define SIRFSOC_CLKC_GNSS_CLKDIV_ENA 0x01a4
-#define SIRFSOC_CLKC_GNSS_CLK_SEL 0x01a8
-#define SIRFSOC_CLKC_GNSS_CLK_SEL_STAT 0x01ac
-
-#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG0 0x01b0
-#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG1 0x01b4
-#define SIRFSOC_CLKC_SHARED_DIVIDER_ENA 0x01b8
-
-#define SIRFSOC_CLKC_SYS_CLK_SEL 0x01bc
-#define SIRFSOC_CLKC_SYS_CLK_SEL_STAT 0x01c0
-#define SIRFSOC_CLKC_IO_CLK_SEL 0x01c4
-#define SIRFSOC_CLKC_IO_CLK_SEL_STAT 0x01c8
-#define SIRFSOC_CLKC_G2D_CLK_SEL 0x01cc
-#define SIRFSOC_CLKC_G2D_CLK_SEL_STAT 0x01d0
-#define SIRFSOC_CLKC_JPENC_CLK_SEL 0x01d4
-#define SIRFSOC_CLKC_JPENC_CLK_SEL_STAT 0x01d8
-#define SIRFSOC_CLKC_VDEC_CLK_SEL 0x01dc
-#define SIRFSOC_CLKC_VDEC_CLK_SEL_STAT 0x01e0
-#define SIRFSOC_CLKC_GMAC_CLK_SEL 0x01e4
-#define SIRFSOC_CLKC_GMAC_CLK_SEL_STAT 0x01e8
-#define SIRFSOC_CLKC_USB_CLK_SEL 0x01ec
-#define SIRFSOC_CLKC_USB_CLK_SEL_STAT 0x01f0
-#define SIRFSOC_CLKC_KAS_CLK_SEL 0x01f4
-#define SIRFSOC_CLKC_KAS_CLK_SEL_STAT 0x01f8
-#define SIRFSOC_CLKC_SEC_CLK_SEL 0x01fc
-#define SIRFSOC_CLKC_SEC_CLK_SEL_STAT 0x0200
-#define SIRFSOC_CLKC_SDR_CLK_SEL 0x0204
-#define SIRFSOC_CLKC_SDR_CLK_SEL_STAT 0x0208
-#define SIRFSOC_CLKC_VIP_CLK_SEL 0x020c
-#define SIRFSOC_CLKC_VIP_CLK_SEL_STAT 0x0210
-#define SIRFSOC_CLKC_NOCD_CLK_SEL 0x0214
-#define SIRFSOC_CLKC_NOCD_CLK_SEL_STAT 0x0218
-#define SIRFSOC_CLKC_NOCR_CLK_SEL 0x021c
-#define SIRFSOC_CLKC_NOCR_CLK_SEL_STAT 0x0220
-#define SIRFSOC_CLKC_TPIU_CLK_SEL 0x0224
-#define SIRFSOC_CLKC_TPIU_CLK_SEL_STAT 0x0228
-
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_SET 0x022c
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_CLR 0x0230
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_STAT 0x0234
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_SET 0x0238
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_CLR 0x023c
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_STAT 0x0240
-
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_SET 0x0244
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_CLR 0x0248
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_STAT 0x024c
-
-#define SIRFSOC_CLKC_RSTC_A7_SW_RST 0x0308
-
-#define SIRFSOC_CLKC_LEAF_CLK_EN1_SET 0x04a0
-#define SIRFSOC_CLKC_LEAF_CLK_EN2_SET 0x04b8
-#define SIRFSOC_CLKC_LEAF_CLK_EN3_SET 0x04d0
-#define SIRFSOC_CLKC_LEAF_CLK_EN4_SET 0x04e8
-#define SIRFSOC_CLKC_LEAF_CLK_EN5_SET 0x0500
-#define SIRFSOC_CLKC_LEAF_CLK_EN6_SET 0x0518
-#define SIRFSOC_CLKC_LEAF_CLK_EN7_SET 0x0530
-#define SIRFSOC_CLKC_LEAF_CLK_EN8_SET 0x0548
-
-#define SIRFSOC_NOC_CLK_IDLEREQ_SET 0x02D0
-#define SIRFSOC_NOC_CLK_IDLEREQ_CLR 0x02D4
-#define SIRFSOC_NOC_CLK_SLVRDY_SET 0x02E8
-#define SIRFSOC_NOC_CLK_SLVRDY_CLR 0x02EC
-#define SIRFSOC_NOC_CLK_IDLE_STATUS 0x02F4
-
-struct clk_pll {
- struct clk_hw hw;
- u16 regofs; /* register offset */
-};
-#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw)
-
-struct clk_dto {
- struct clk_hw hw;
- u16 inc_offset; /* dto increment offset */
- u16 src_offset; /* dto src offset */
-};
-#define to_dtoclk(_hw) container_of(_hw, struct clk_dto, hw)
-
-enum clk_unit_type {
- CLK_UNIT_NOC_OTHER,
- CLK_UNIT_NOC_CLOCK,
- CLK_UNIT_NOC_SOCKET,
-};
-
-struct clk_unit {
- struct clk_hw hw;
- u16 regofs;
- u16 bit;
- u32 type;
- u8 idle_bit;
- spinlock_t *lock;
-};
-#define to_unitclk(_hw) container_of(_hw, struct clk_unit, hw)
-
-struct atlas7_div_init_data {
- const char *div_name;
- const char *parent_name;
- const char *gate_name;
- unsigned long flags;
- u8 divider_flags;
- u8 gate_flags;
- u32 div_offset;
- u8 shift;
- u8 width;
- u32 gate_offset;
- u8 gate_bit;
- spinlock_t *lock;
-};
-
-struct atlas7_mux_init_data {
- const char *mux_name;
- const char * const *parent_names;
- u8 parent_num;
- unsigned long flags;
- u8 mux_flags;
- u32 mux_offset;
- u8 shift;
- u8 width;
-};
-
-struct atlas7_unit_init_data {
- u32 index;
- const char *unit_name;
- const char *parent_name;
- unsigned long flags;
- u32 regofs;
- u8 bit;
- u32 type;
- u8 idle_bit;
- spinlock_t *lock;
-};
-
-struct atlas7_reset_desc {
- const char *name;
- u32 clk_ofs;
- u8 clk_bit;
- u32 rst_ofs;
- u8 rst_bit;
- spinlock_t *lock;
-};
-
-static void __iomem *sirfsoc_clk_vbase;
-static struct clk_onecell_data clk_data;
-
-static const struct clk_div_table pll_div_table[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 8 },
- { .val = 4, .div = 16 },
- { .val = 5, .div = 32 },
-};
-
-static DEFINE_SPINLOCK(cpupll_ctrl1_lock);
-static DEFINE_SPINLOCK(mempll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys0pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys1pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys2pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys3pll_ctrl1_lock);
-static DEFINE_SPINLOCK(usbphy_div_lock);
-static DEFINE_SPINLOCK(btss_div_lock);
-static DEFINE_SPINLOCK(rgmii_div_lock);
-static DEFINE_SPINLOCK(cpu_div_lock);
-static DEFINE_SPINLOCK(sdphy01_div_lock);
-static DEFINE_SPINLOCK(sdphy23_div_lock);
-static DEFINE_SPINLOCK(sdphy45_div_lock);
-static DEFINE_SPINLOCK(sdphy67_div_lock);
-static DEFINE_SPINLOCK(can_div_lock);
-static DEFINE_SPINLOCK(deint_div_lock);
-static DEFINE_SPINLOCK(nand_div_lock);
-static DEFINE_SPINLOCK(disp0_div_lock);
-static DEFINE_SPINLOCK(disp1_div_lock);
-static DEFINE_SPINLOCK(gpu_div_lock);
-static DEFINE_SPINLOCK(gnss_div_lock);
-/* gate register shared */
-static DEFINE_SPINLOCK(share_div_lock);
-static DEFINE_SPINLOCK(root0_gate_lock);
-static DEFINE_SPINLOCK(root1_gate_lock);
-static DEFINE_SPINLOCK(leaf0_gate_lock);
-static DEFINE_SPINLOCK(leaf1_gate_lock);
-static DEFINE_SPINLOCK(leaf2_gate_lock);
-static DEFINE_SPINLOCK(leaf3_gate_lock);
-static DEFINE_SPINLOCK(leaf4_gate_lock);
-static DEFINE_SPINLOCK(leaf5_gate_lock);
-static DEFINE_SPINLOCK(leaf6_gate_lock);
-static DEFINE_SPINLOCK(leaf7_gate_lock);
-static DEFINE_SPINLOCK(leaf8_gate_lock);
-
-static inline unsigned long clkc_readl(unsigned reg)
-{
- return readl(sirfsoc_clk_vbase + reg);
-}
-
-static inline void clkc_writel(u32 val, unsigned reg)
-{
- writel(val, sirfsoc_clk_vbase + reg);
-}
-
-/*
-* ABPLL
-* integer mode: Fvco = Fin * 2 * NF / NR
-* Spread Spectrum mode: Fvco = Fin * SSN / NR
-* SSN = 2^24 / (256 * ((ssdiv >> ssdepth) << ssdepth) + (ssmod << ssdepth))
-*/
-static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long fin = parent_rate;
- struct clk_pll *clk = to_pllclk(hw);
- u64 rate;
- u32 regctrl0 = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_CTRL0 -
- SIRFSOC_CLKC_MEMPLL_AB_FREQ);
- u32 regfreq = clkc_readl(clk->regofs);
- u32 regssc = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_SSC -
- SIRFSOC_CLKC_MEMPLL_AB_FREQ);
- u32 nr = (regfreq >> 16 & (BIT(3) - 1)) + 1;
- u32 nf = (regfreq & (BIT(9) - 1)) + 1;
- u32 ssdiv = regssc >> 8 & (BIT(12) - 1);
- u32 ssdepth = regssc >> 20 & (BIT(2) - 1);
- u32 ssmod = regssc & (BIT(8) - 1);
-
- if (regctrl0 & SIRFSOC_ABPLL_CTRL0_BYPASS)
- return fin;
-
- if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) {
- rate = fin;
- rate *= 1 << 24;
- do_div(rate, nr);
- do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth)
- + (ssmod << ssdepth)));
- } else {
- rate = 2 * fin;
- rate *= nf;
- do_div(rate, nr);
- }
- return rate;
-}
-
-static const struct clk_ops ab_pll_ops = {
- .recalc_rate = pll_clk_recalc_rate,
-};
-
-static const char * const pll_clk_parents[] = {
- "xin",
-};
-
-static const struct clk_init_data clk_cpupll_init = {
- .name = "cpupll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_cpupll = {
- .regofs = SIRFSOC_CLKC_CPUPLL_AB_FREQ,
- .hw = {
- .init = &clk_cpupll_init,
- },
-};
-
-static const struct clk_init_data clk_mempll_init = {
- .name = "mempll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_mempll = {
- .regofs = SIRFSOC_CLKC_MEMPLL_AB_FREQ,
- .hw = {
- .init = &clk_mempll_init,
- },
-};
-
-static const struct clk_init_data clk_sys0pll_init = {
- .name = "sys0pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys0pll = {
- .regofs = SIRFSOC_CLKC_SYS0PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys0pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys1pll_init = {
- .name = "sys1pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys1pll = {
- .regofs = SIRFSOC_CLKC_SYS1PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys1pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys2pll_init = {
- .name = "sys2pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys2pll = {
- .regofs = SIRFSOC_CLKC_SYS2PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys2pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys3pll_init = {
- .name = "sys3pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys3pll = {
- .regofs = SIRFSOC_CLKC_SYS3PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys3pll_init,
- },
-};
-
-/*
- * DTO in clkc, default enable double resolution mode
- * double resolution mode:fout = fin * finc / 2^29
- * normal mode:fout = fin * finc / 2^28
- */
-#define DTO_RESL_DOUBLE (1ULL << 29)
-#define DTO_RESL_NORMAL (1ULL << 28)
-
-static int dto_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_dto *clk = to_dtoclk(hw);
- int reg;
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- return !!(clkc_readl(reg) & BIT(0));
-}
-
-static int dto_clk_enable(struct clk_hw *hw)
-{
- u32 val, reg;
- struct clk_dto *clk = to_dtoclk(hw);
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- val = clkc_readl(reg) | BIT(0);
- clkc_writel(val, reg);
- return 0;
-}
-
-static void dto_clk_disable(struct clk_hw *hw)
-{
- u32 val, reg;
- struct clk_dto *clk = to_dtoclk(hw);
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- val = clkc_readl(reg) & ~BIT(0);
- clkc_writel(val, reg);
-}
-
-static unsigned long dto_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u64 rate = parent_rate;
- struct clk_dto *clk = to_dtoclk(hw);
- u32 finc = clkc_readl(clk->inc_offset);
- u32 droff = clkc_readl(clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
-
- rate *= finc;
- if (droff & BIT(0))
- /* Double resolution off */
- do_div(rate, DTO_RESL_NORMAL);
- else
- do_div(rate, DTO_RESL_DOUBLE);
-
- return rate;
-}
-
-static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u64 dividend = rate * DTO_RESL_DOUBLE;
-
- do_div(dividend, *parent_rate);
- dividend *= *parent_rate;
- do_div(dividend, DTO_RESL_DOUBLE);
-
- return dividend;
-}
-
-static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- u64 dividend = rate * DTO_RESL_DOUBLE;
- struct clk_dto *clk = to_dtoclk(hw);
-
- do_div(dividend, parent_rate);
- clkc_writel(0, clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
- clkc_writel(dividend, clk->inc_offset);
-
- return 0;
-}
-
-static u8 dto_clk_get_parent(struct clk_hw *hw)
-{
- struct clk_dto *clk = to_dtoclk(hw);
-
- return clkc_readl(clk->src_offset);
-}
-
-/*
- * dto need CLK_SET_PARENT_GATE
- */
-static int dto_clk_set_parent(struct clk_hw *hw, u8 index)
-{
- struct clk_dto *clk = to_dtoclk(hw);
-
- clkc_writel(index, clk->src_offset);
- return 0;
-}
-
-static const struct clk_ops dto_ops = {
- .is_enabled = dto_clk_is_enabled,
- .enable = dto_clk_enable,
- .disable = dto_clk_disable,
- .recalc_rate = dto_clk_recalc_rate,
- .round_rate = dto_clk_round_rate,
- .set_rate = dto_clk_set_rate,
- .get_parent = dto_clk_get_parent,
- .set_parent = dto_clk_set_parent,
-};
-
-/* dto parent clock as syspllvco/clk1 */
-static const char * const audiodto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_audiodto_init = {
- .name = "audio_dto",
- .ops = &dto_ops,
- .parent_names = audiodto_clk_parents,
- .num_parents = ARRAY_SIZE(audiodto_clk_parents),
-};
-
-static struct clk_dto clk_audio_dto = {
- .inc_offset = SIRFSOC_CLKC_AUDIO_DTO_INC,
- .src_offset = SIRFSOC_CLKC_AUDIO_DTO_SRC,
- .hw = {
- .init = &clk_audiodto_init,
- },
-};
-
-static const char * const disp0dto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_disp0dto_init = {
- .name = "disp0_dto",
- .ops = &dto_ops,
- .parent_names = disp0dto_clk_parents,
- .num_parents = ARRAY_SIZE(disp0dto_clk_parents),
-};
-
-static struct clk_dto clk_disp0_dto = {
- .inc_offset = SIRFSOC_CLKC_DISP0_DTO_INC,
- .src_offset = SIRFSOC_CLKC_DISP0_DTO_SRC,
- .hw = {
- .init = &clk_disp0dto_init,
- },
-};
-
-static const char * const disp1dto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_disp1dto_init = {
- .name = "disp1_dto",
- .ops = &dto_ops,
- .parent_names = disp1dto_clk_parents,
- .num_parents = ARRAY_SIZE(disp1dto_clk_parents),
-};
-
-static struct clk_dto clk_disp1_dto = {
- .inc_offset = SIRFSOC_CLKC_DISP1_DTO_INC,
- .src_offset = SIRFSOC_CLKC_DISP1_DTO_SRC,
- .hw = {
- .init = &clk_disp1dto_init,
- },
-};
-
-static struct atlas7_div_init_data divider_list[] __initdata = {
- /* div_name, parent_name, gate_name, clk_flag, divider_flag, gate_flag, div_offset, shift, wdith, gate_offset, bit_enable, lock */
- { "sys0pll_qa1", "sys0pll_fixdiv", "sys0pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 0, &usbphy_div_lock },
- { "sys1pll_qa1", "sys1pll_fixdiv", "sys1pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 4, &usbphy_div_lock },
- { "sys2pll_qa1", "sys2pll_fixdiv", "sys2pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 8, &usbphy_div_lock },
- { "sys3pll_qa1", "sys3pll_fixdiv", "sys3pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 12, &usbphy_div_lock },
- { "sys0pll_qa2", "sys0pll_fixdiv", "sys0pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 0, &btss_div_lock },
- { "sys1pll_qa2", "sys1pll_fixdiv", "sys1pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 4, &btss_div_lock },
- { "sys2pll_qa2", "sys2pll_fixdiv", "sys2pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 8, &btss_div_lock },
- { "sys3pll_qa2", "sys3pll_fixdiv", "sys3pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 12, &btss_div_lock },
- { "sys0pll_qa3", "sys0pll_fixdiv", "sys0pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 0, &rgmii_div_lock },
- { "sys1pll_qa3", "sys1pll_fixdiv", "sys1pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 4, &rgmii_div_lock },
- { "sys2pll_qa3", "sys2pll_fixdiv", "sys2pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 8, &rgmii_div_lock },
- { "sys3pll_qa3", "sys3pll_fixdiv", "sys3pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 12, &rgmii_div_lock },
- { "sys0pll_qa4", "sys0pll_fixdiv", "sys0pll_a4", 0, 0, 0, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 0, &cpu_div_lock },
- { "sys1pll_qa4", "sys1pll_fixdiv", "sys1pll_a4", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 4, &cpu_div_lock },
- { "sys0pll_qa5", "sys0pll_fixdiv", "sys0pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 0, &sdphy01_div_lock },
- { "sys1pll_qa5", "sys1pll_fixdiv", "sys1pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 4, &sdphy01_div_lock },
- { "sys2pll_qa5", "sys2pll_fixdiv", "sys2pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 8, &sdphy01_div_lock },
- { "sys3pll_qa5", "sys3pll_fixdiv", "sys3pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 12, &sdphy01_div_lock },
- { "sys0pll_qa6", "sys0pll_fixdiv", "sys0pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 0, &sdphy23_div_lock },
- { "sys1pll_qa6", "sys1pll_fixdiv", "sys1pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 4, &sdphy23_div_lock },
- { "sys2pll_qa6", "sys2pll_fixdiv", "sys2pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 8, &sdphy23_div_lock },
- { "sys3pll_qa6", "sys3pll_fixdiv", "sys3pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 12, &sdphy23_div_lock },
- { "sys0pll_qa7", "sys0pll_fixdiv", "sys0pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 0, &sdphy45_div_lock },
- { "sys1pll_qa7", "sys1pll_fixdiv", "sys1pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 4, &sdphy45_div_lock },
- { "sys2pll_qa7", "sys2pll_fixdiv", "sys2pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 8, &sdphy45_div_lock },
- { "sys3pll_qa7", "sys3pll_fixdiv", "sys3pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 12, &sdphy45_div_lock },
- { "sys0pll_qa8", "sys0pll_fixdiv", "sys0pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 0, &sdphy67_div_lock },
- { "sys1pll_qa8", "sys1pll_fixdiv", "sys1pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 4, &sdphy67_div_lock },
- { "sys2pll_qa8", "sys2pll_fixdiv", "sys2pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 8, &sdphy67_div_lock },
- { "sys3pll_qa8", "sys3pll_fixdiv", "sys3pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 12, &sdphy67_div_lock },
- { "sys0pll_qa9", "sys0pll_fixdiv", "sys0pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 0, &can_div_lock },
- { "sys1pll_qa9", "sys1pll_fixdiv", "sys1pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 4, &can_div_lock },
- { "sys2pll_qa9", "sys2pll_fixdiv", "sys2pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 8, &can_div_lock },
- { "sys3pll_qa9", "sys3pll_fixdiv", "sys3pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 12, &can_div_lock },
- { "sys0pll_qa10", "sys0pll_fixdiv", "sys0pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 0, &deint_div_lock },
- { "sys1pll_qa10", "sys1pll_fixdiv", "sys1pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 4, &deint_div_lock },
- { "sys2pll_qa10", "sys2pll_fixdiv", "sys2pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 8, &deint_div_lock },
- { "sys3pll_qa10", "sys3pll_fixdiv", "sys3pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 12, &deint_div_lock },
- { "sys0pll_qa11", "sys0pll_fixdiv", "sys0pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 0, &nand_div_lock },
- { "sys1pll_qa11", "sys1pll_fixdiv", "sys1pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 4, &nand_div_lock },
- { "sys2pll_qa11", "sys2pll_fixdiv", "sys2pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 8, &nand_div_lock },
- { "sys3pll_qa11", "sys3pll_fixdiv", "sys3pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 12, &nand_div_lock },
- { "sys0pll_qa12", "sys0pll_fixdiv", "sys0pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 0, &disp0_div_lock },
- { "sys1pll_qa12", "sys1pll_fixdiv", "sys1pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 4, &disp0_div_lock },
- { "sys2pll_qa12", "sys2pll_fixdiv", "sys2pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 8, &disp0_div_lock },
- { "sys3pll_qa12", "sys3pll_fixdiv", "sys3pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 12, &disp0_div_lock },
- { "sys0pll_qa13", "sys0pll_fixdiv", "sys0pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 0, &disp1_div_lock },
- { "sys1pll_qa13", "sys1pll_fixdiv", "sys1pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 4, &disp1_div_lock },
- { "sys2pll_qa13", "sys2pll_fixdiv", "sys2pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 8, &disp1_div_lock },
- { "sys3pll_qa13", "sys3pll_fixdiv", "sys3pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 12, &disp1_div_lock },
- { "sys0pll_qa14", "sys0pll_fixdiv", "sys0pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 0, &gpu_div_lock },
- { "sys1pll_qa14", "sys1pll_fixdiv", "sys1pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 4, &gpu_div_lock },
- { "sys2pll_qa14", "sys2pll_fixdiv", "sys2pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 8, &gpu_div_lock },
- { "sys3pll_qa14", "sys3pll_fixdiv", "sys3pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 12, &gpu_div_lock },
- { "sys0pll_qa15", "sys0pll_fixdiv", "sys0pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 0, &gnss_div_lock },
- { "sys1pll_qa15", "sys1pll_fixdiv", "sys1pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 4, &gnss_div_lock },
- { "sys2pll_qa15", "sys2pll_fixdiv", "sys2pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 8, &gnss_div_lock },
- { "sys3pll_qa15", "sys3pll_fixdiv", "sys3pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 12, &gnss_div_lock },
- { "sys1pll_qa18", "sys1pll_fixdiv", "sys1pll_a18", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 24, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 12, &share_div_lock },
- { "sys1pll_qa19", "sys1pll_fixdiv", "sys1pll_a19", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 16, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 8, &share_div_lock },
- { "sys1pll_qa20", "sys1pll_fixdiv", "sys1pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 4, &share_div_lock },
- { "sys2pll_qa20", "sys2pll_fixdiv", "sys2pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 0, &share_div_lock },
- { "sys1pll_qa17", "sys1pll_fixdiv", "sys1pll_a17", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 20, &share_div_lock },
- { "sys0pll_qa20", "sys0pll_fixdiv", "sys0pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 16, &share_div_lock },
-};
-
-static const char * const i2s_clk_parents[] = {
- "xin",
- "xinw",
- "audio_dto",
- /* "pwm_i2s01" */
-};
-
-static const char * const usbphy_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a1",
- "sys1pll_a1",
- "sys2pll_a1",
- "sys3pll_a1",
-};
-
-static const char * const btss_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a2",
- "sys1pll_a2",
- "sys2pll_a2",
- "sys3pll_a2",
-};
-
-static const char * const rgmii_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a3",
- "sys1pll_a3",
- "sys2pll_a3",
- "sys3pll_a3",
-};
-
-static const char * const cpu_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a4",
- "sys1pll_a4",
- "cpupll_clk1",
-};
-
-static const char * const sdphy01_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a5",
- "sys1pll_a5",
- "sys2pll_a5",
- "sys3pll_a5",
-};
-
-static const char * const sdphy23_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a6",
- "sys1pll_a6",
- "sys2pll_a6",
- "sys3pll_a6",
-};
-
-static const char * const sdphy45_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a7",
- "sys1pll_a7",
- "sys2pll_a7",
- "sys3pll_a7",
-};
-
-static const char * const sdphy67_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a8",
- "sys1pll_a8",
- "sys2pll_a8",
- "sys3pll_a8",
-};
-
-static const char * const can_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a9",
- "sys1pll_a9",
- "sys2pll_a9",
- "sys3pll_a9",
-};
-
-static const char * const deint_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a10",
- "sys1pll_a10",
- "sys2pll_a10",
- "sys3pll_a10",
-};
-
-static const char * const nand_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a11",
- "sys1pll_a11",
- "sys2pll_a11",
- "sys3pll_a11",
-};
-
-static const char * const disp0_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a12",
- "sys1pll_a12",
- "sys2pll_a12",
- "sys3pll_a12",
- "disp0_dto",
-};
-
-static const char * const disp1_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a13",
- "sys1pll_a13",
- "sys2pll_a13",
- "sys3pll_a13",
- "disp1_dto",
-};
-
-static const char * const gpu_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a14",
- "sys1pll_a14",
- "sys2pll_a14",
- "sys3pll_a14",
-};
-
-static const char * const gnss_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a15",
- "sys1pll_a15",
- "sys2pll_a15",
- "sys3pll_a15",
-};
-
-static const char * const sys_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const io_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const g2d_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const jpenc_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const vdec_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const gmac_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const usb_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const kas_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const sec_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const sdr_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const vip_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const nocd_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const nocr_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const tpiu_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static struct atlas7_mux_init_data mux_list[] __initdata = {
- /* mux_name, parent_names, parent_num, flags, mux_flags, mux_offset, shift, width */
- { "i2s_mux", i2s_clk_parents, ARRAY_SIZE(i2s_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 2 },
- { "usbphy_mux", usbphy_clk_parents, ARRAY_SIZE(usbphy_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 3 },
- { "btss_mux", btss_clk_parents, ARRAY_SIZE(btss_clk_parents), 0, 0, SIRFSOC_CLKC_BTSS_CLK_SEL, 0, 3 },
- { "rgmii_mux", rgmii_clk_parents, ARRAY_SIZE(rgmii_clk_parents), 0, 0, SIRFSOC_CLKC_RGMII_CLK_SEL, 0, 3 },
- { "cpu_mux", cpu_clk_parents, ARRAY_SIZE(cpu_clk_parents), 0, 0, SIRFSOC_CLKC_CPU_CLK_SEL, 0, 3 },
- { "sdphy01_mux", sdphy01_clk_parents, ARRAY_SIZE(sdphy01_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY01_CLK_SEL, 0, 3 },
- { "sdphy23_mux", sdphy23_clk_parents, ARRAY_SIZE(sdphy23_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY23_CLK_SEL, 0, 3 },
- { "sdphy45_mux", sdphy45_clk_parents, ARRAY_SIZE(sdphy45_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY45_CLK_SEL, 0, 3 },
- { "sdphy67_mux", sdphy67_clk_parents, ARRAY_SIZE(sdphy67_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY67_CLK_SEL, 0, 3 },
- { "can_mux", can_clk_parents, ARRAY_SIZE(can_clk_parents), 0, 0, SIRFSOC_CLKC_CAN_CLK_SEL, 0, 3 },
- { "deint_mux", deint_clk_parents, ARRAY_SIZE(deint_clk_parents), 0, 0, SIRFSOC_CLKC_DEINT_CLK_SEL, 0, 3 },
- { "nand_mux", nand_clk_parents, ARRAY_SIZE(nand_clk_parents), 0, 0, SIRFSOC_CLKC_NAND_CLK_SEL, 0, 3 },
- { "disp0_mux", disp0_clk_parents, ARRAY_SIZE(disp0_clk_parents), 0, 0, SIRFSOC_CLKC_DISP0_CLK_SEL, 0, 3 },
- { "disp1_mux", disp1_clk_parents, ARRAY_SIZE(disp1_clk_parents), 0, 0, SIRFSOC_CLKC_DISP1_CLK_SEL, 0, 3 },
- { "gpu_mux", gpu_clk_parents, ARRAY_SIZE(gpu_clk_parents), 0, 0, SIRFSOC_CLKC_GPU_CLK_SEL, 0, 3 },
- { "gnss_mux", gnss_clk_parents, ARRAY_SIZE(gnss_clk_parents), 0, 0, SIRFSOC_CLKC_GNSS_CLK_SEL, 0, 3 },
- { "sys_mux", sys_clk_parents, ARRAY_SIZE(sys_clk_parents), 0, 0, SIRFSOC_CLKC_SYS_CLK_SEL, 0, 3 },
- { "io_mux", io_clk_parents, ARRAY_SIZE(io_clk_parents), 0, 0, SIRFSOC_CLKC_IO_CLK_SEL, 0, 3 },
- { "g2d_mux", g2d_clk_parents, ARRAY_SIZE(g2d_clk_parents), 0, 0, SIRFSOC_CLKC_G2D_CLK_SEL, 0, 3 },
- { "jpenc_mux", jpenc_clk_parents, ARRAY_SIZE(jpenc_clk_parents), 0, 0, SIRFSOC_CLKC_JPENC_CLK_SEL, 0, 3 },
- { "vdec_mux", vdec_clk_parents, ARRAY_SIZE(vdec_clk_parents), 0, 0, SIRFSOC_CLKC_VDEC_CLK_SEL, 0, 3 },
- { "gmac_mux", gmac_clk_parents, ARRAY_SIZE(gmac_clk_parents), 0, 0, SIRFSOC_CLKC_GMAC_CLK_SEL, 0, 3 },
- { "usb_mux", usb_clk_parents, ARRAY_SIZE(usb_clk_parents), 0, 0, SIRFSOC_CLKC_USB_CLK_SEL, 0, 3 },
- { "kas_mux", kas_clk_parents, ARRAY_SIZE(kas_clk_parents), 0, 0, SIRFSOC_CLKC_KAS_CLK_SEL, 0, 3 },
- { "sec_mux", sec_clk_parents, ARRAY_SIZE(sec_clk_parents), 0, 0, SIRFSOC_CLKC_SEC_CLK_SEL, 0, 3 },
- { "sdr_mux", sdr_clk_parents, ARRAY_SIZE(sdr_clk_parents), 0, 0, SIRFSOC_CLKC_SDR_CLK_SEL, 0, 3 },
- { "vip_mux", vip_clk_parents, ARRAY_SIZE(vip_clk_parents), 0, 0, SIRFSOC_CLKC_VIP_CLK_SEL, 0, 3 },
- { "nocd_mux", nocd_clk_parents, ARRAY_SIZE(nocd_clk_parents), 0, 0, SIRFSOC_CLKC_NOCD_CLK_SEL, 0, 3 },
- { "nocr_mux", nocr_clk_parents, ARRAY_SIZE(nocr_clk_parents), 0, 0, SIRFSOC_CLKC_NOCR_CLK_SEL, 0, 3 },
- { "tpiu_mux", tpiu_clk_parents, ARRAY_SIZE(tpiu_clk_parents), 0, 0, SIRFSOC_CLKC_TPIU_CLK_SEL, 0, 3 },
-};
-
- /* new unit should add start from the tail of list */
-static struct atlas7_unit_init_data unit_list[] __initdata = {
- /* unit_name, parent_name, flags, regofs, bit, lock */
- { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, 0, 0, &root0_gate_lock },
- { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, 0, 0, &root0_gate_lock },
- { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, 0, 0, &root0_gate_lock },
- { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, 0, 0, &root0_gate_lock },
- { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, 0, 0, &root0_gate_lock },
- { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, 0, 0, &root0_gate_lock },
- { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, 0, 0, &root0_gate_lock },
- { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, 0, 0, &root0_gate_lock },
- { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, 0, 0, &root0_gate_lock },
- { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, 0, 0, &root0_gate_lock },
- { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, 0, 0, &root0_gate_lock },
- { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, 0, 0, &root0_gate_lock },
- { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, 0, 0, &root0_gate_lock },
- { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, 0, 0, &root0_gate_lock },
- { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, 0, 0, &root0_gate_lock },
- { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, 0, 0, &root0_gate_lock },
- { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, 0, 0, &root0_gate_lock },
- { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, 0, 0, &root0_gate_lock },
- { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, 0, 0, &root0_gate_lock },
- { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, 0, 0, &root0_gate_lock },
- { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, 0, 0, &root0_gate_lock },
- { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, 0, 0, &root0_gate_lock },
- { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, 0, 0, &root0_gate_lock },
- { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, 0, 0, &root1_gate_lock },
- { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, 0, 0, &root1_gate_lock },
- { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, 0, 0, &root1_gate_lock },
- { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, 0, 0, &root1_gate_lock },
- { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, 0, 0, &root1_gate_lock },
- { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, 0, 0, &root1_gate_lock },
- { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, 0, 0, &root1_gate_lock },
- { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, 0, 0, &root1_gate_lock },
- { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, 0, 0, &root1_gate_lock },
- { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, 0, 0, &root1_gate_lock },
- { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, 0, 0, &root1_gate_lock },
- { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, 0, 0, &root1_gate_lock },
- { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, 0, 0, &root1_gate_lock },
- { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, 0, 0, &root1_gate_lock },
- { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, 0, 0, &root1_gate_lock },
- { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, 0, 0, &root1_gate_lock },
- { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, 0, 0, &root1_gate_lock },
- { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, 0, 0, &root1_gate_lock },
- { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, 0, 0, &root1_gate_lock },
- { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, 0, 0, &root1_gate_lock },
- { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, 0, 0, &root1_gate_lock },
- { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, 0, 0, &root1_gate_lock },
- { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, 0, 0, &root1_gate_lock },
- { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, CLK_UNIT_NOC_CLOCK, 4, &leaf1_gate_lock },
- { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, 0, 0, &leaf1_gate_lock },
- { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, 0, 0, &leaf1_gate_lock },
- { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, 0, 0, &leaf1_gate_lock },
- { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, 0, 0, &leaf1_gate_lock },
- { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, 0, 0, &leaf1_gate_lock },
- { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, 0, 0, &leaf1_gate_lock },
- { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, CLK_UNIT_NOC_SOCKET, 7, &leaf1_gate_lock },
- { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, CLK_UNIT_NOC_SOCKET, 8, &leaf1_gate_lock },
- { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
- { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, 0, 0, &leaf1_gate_lock },
- { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, CLK_UNIT_NOC_SOCKET, 4, &leaf1_gate_lock },
- { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, CLK_UNIT_NOC_SOCKET, 5, &leaf1_gate_lock },
- { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, CLK_UNIT_NOC_SOCKET, 6, &leaf1_gate_lock },
- { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, CLK_UNIT_NOC_SOCKET, 1, &leaf1_gate_lock },
- { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, CLK_UNIT_NOC_SOCKET, 2, &leaf1_gate_lock },
- { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, CLK_UNIT_NOC_SOCKET, 0, &leaf1_gate_lock },
- { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
- { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, 0, 0, &leaf1_gate_lock },
- { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, 0, 0, &leaf1_gate_lock },
- { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, CLK_UNIT_NOC_CLOCK, 20, &leaf2_gate_lock },
- { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, 0, 0, &leaf2_gate_lock },
- { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, 0, 0, &leaf2_gate_lock },
- { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, 0, 0, &leaf2_gate_lock },
- { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, 0, 0, &leaf2_gate_lock },
- { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, 0, 0, &leaf2_gate_lock },
- { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, 0, 0, &leaf2_gate_lock },
- { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, 0, 0, &leaf2_gate_lock },
- { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, CLK_UNIT_NOC_CLOCK, 21, &leaf2_gate_lock },
- { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, 0, 0, &leaf2_gate_lock },
- { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, 0, 0, &leaf2_gate_lock },
- { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, 0, 0, &leaf2_gate_lock },
- { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, CLK_UNIT_NOC_CLOCK, 22, &leaf2_gate_lock },
- { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, CLK_UNIT_NOC_CLOCK, 18, &leaf2_gate_lock },
- { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, CLK_UNIT_NOC_CLOCK, 23, &leaf2_gate_lock },
- { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, CLK_UNIT_NOC_CLOCK, 19, &leaf2_gate_lock },
- { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, CLK_UNIT_NOC_CLOCK, 17, &leaf2_gate_lock },
- { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, 0, 0, &leaf2_gate_lock },
- { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, 0, 0, &leaf2_gate_lock },
- { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, 0, 0, &leaf2_gate_lock },
- { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, 0, 0, &leaf3_gate_lock },
- { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, CLK_UNIT_NOC_CLOCK, 10, &leaf3_gate_lock },
- { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, CLK_UNIT_NOC_SOCKET, 14, &leaf3_gate_lock },
- { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, CLK_UNIT_NOC_SOCKET, 11, &leaf3_gate_lock },
- { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, CLK_UNIT_NOC_SOCKET, 13, &leaf3_gate_lock },
- { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, CLK_UNIT_NOC_SOCKET, 15, &leaf3_gate_lock },
- { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, CLK_UNIT_NOC_SOCKET, 16, &leaf3_gate_lock },
- { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, CLK_UNIT_NOC_SOCKET, 17, &leaf3_gate_lock },
- { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, CLK_UNIT_NOC_SOCKET, 18, &leaf3_gate_lock },
- { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, CLK_UNIT_NOC_SOCKET, 12, &leaf3_gate_lock },
- { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, 0, 0, &leaf3_gate_lock },
- { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, CLK_UNIT_NOC_CLOCK, 7, &leaf3_gate_lock },
- { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, CLK_UNIT_NOC_CLOCK, 9, &leaf3_gate_lock },
- { 99, "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, CLK_UNIT_NOC_CLOCK, 8, &leaf3_gate_lock },
- { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, 0, 0, &leaf3_gate_lock },
- { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, 0, 0, &leaf3_gate_lock },
- { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, CLK_UNIT_NOC_CLOCK, 3, &leaf4_gate_lock },
- { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, CLK_UNIT_NOC_CLOCK, 1, &leaf4_gate_lock },
- { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, CLK_UNIT_NOC_CLOCK, 12, &leaf4_gate_lock },
- { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, CLK_UNIT_NOC_SOCKET, 21, &leaf4_gate_lock },
- { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, CLK_UNIT_NOC_SOCKET, 20, &leaf4_gate_lock },
- { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, CLK_UNIT_NOC_SOCKET, 19, &leaf4_gate_lock },
- { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, 0, 0, &leaf4_gate_lock },
- { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, 0, 0, &leaf4_gate_lock },
- { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, CLK_UNIT_NOC_CLOCK, 13, &leaf4_gate_lock },
- { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, 0, 0, &leaf4_gate_lock },
- { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, CLK_UNIT_NOC_CLOCK, 14, &leaf4_gate_lock },
- { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, CLK_UNIT_NOC_CLOCK, 15, &leaf4_gate_lock },
- { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, CLK_UNIT_NOC_CLOCK, 16, &leaf4_gate_lock },
- { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, 0, 0, &leaf4_gate_lock },
- { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, 0, 0, &leaf4_gate_lock },
- { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, 0, 0, &leaf4_gate_lock },
- { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, 0, 0, &leaf5_gate_lock },
- { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, 0, 0, &leaf5_gate_lock },
- { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, 0, 0, &leaf5_gate_lock },
- { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, 0, 0, &leaf5_gate_lock },
- { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, CLK_UNIT_NOC_SOCKET, 9, &leaf6_gate_lock },
- { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, CLK_UNIT_NOC_SOCKET, 10, &leaf6_gate_lock },
- { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, 0, 0, &leaf6_gate_lock },
- { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, 0, 0, &leaf6_gate_lock },
- { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, CLK_UNIT_NOC_CLOCK, 0, &leaf7_gate_lock },
- { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, CLK_UNIT_NOC_CLOCK, 11, &leaf7_gate_lock },
- { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, 0, 0, &leaf7_gate_lock },
- { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, 0, 0, &leaf8_gate_lock },
- { 130, "dmac4_io", "a7ca_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, 0, 0, &leaf8_gate_lock },
- { 131, "uart6_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, 0, 0, &leaf8_gate_lock },
- { 132, "usp3_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, 0, 0, &leaf8_gate_lock },
- { 133, "a7ca_io", "noc_btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, 0, 0, &leaf8_gate_lock },
- { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, 0, 0, &leaf8_gate_lock },
- { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, 0, 0, &leaf8_gate_lock },
- { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, 0, 0, &root1_gate_lock },
- { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, 0, 0, &leaf8_gate_lock },
- { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, 0, 0, &leaf0_gate_lock },
- { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, 0, 0, &leaf0_gate_lock },
- { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, 0, 0, &leaf0_gate_lock },
- { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, 0, 0, &leaf0_gate_lock },
-};
-
-static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)];
-
-static int unit_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_unit *clk = to_unitclk(hw);
- u32 reg;
-
- reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_STAT - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
-
- return !!(clkc_readl(reg) & BIT(clk->bit));
-}
-
-static int unit_clk_enable(struct clk_hw *hw)
-{
- u32 reg;
- struct clk_unit *clk = to_unitclk(hw);
- unsigned long flags;
-
- reg = clk->regofs;
-
- spin_lock_irqsave(clk->lock, flags);
- clkc_writel(BIT(clk->bit), reg);
- if (clk->type == CLK_UNIT_NOC_CLOCK)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
- else if (clk->type == CLK_UNIT_NOC_SOCKET)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_SET);
-
- spin_unlock_irqrestore(clk->lock, flags);
- return 0;
-}
-
-static void unit_clk_disable(struct clk_hw *hw)
-{
- u32 reg;
- u32 i = 0;
- struct clk_unit *clk = to_unitclk(hw);
- unsigned long flags;
-
- reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_CLR - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
- spin_lock_irqsave(clk->lock, flags);
- if (clk->type == CLK_UNIT_NOC_CLOCK) {
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_SET);
- while (!(clkc_readl(SIRFSOC_NOC_CLK_IDLE_STATUS) &
- BIT(clk->idle_bit)) && (i++ < 100)) {
- cpu_relax();
- udelay(10);
- }
-
- if (i == 100) {
- pr_err("unit NoC Clock disconnect Error:timeout\n");
- /*once timeout, undo idlereq by CLR*/
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
- goto err;
- }
-
- } else if (clk->type == CLK_UNIT_NOC_SOCKET)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_CLR);
-
- clkc_writel(BIT(clk->bit), reg);
-err:
- spin_unlock_irqrestore(clk->lock, flags);
-}
-
-static const struct clk_ops unit_clk_ops = {
- .is_enabled = unit_clk_is_enabled,
- .enable = unit_clk_enable,
- .disable = unit_clk_disable,
-};
-
-static struct clk * __init
-atlas7_unit_clk_register(struct device *dev, const char *name,
- const char * const parent_name, unsigned long flags,
- u32 regofs, u8 bit, u32 type, u8 idle_bit, spinlock_t *lock)
-{
- struct clk *clk;
- struct clk_unit *unit;
- struct clk_init_data init;
-
- unit = kzalloc(sizeof(*unit), GFP_KERNEL);
- if (!unit)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.ops = &unit_clk_ops;
- init.flags = flags;
-
- unit->hw.init = &init;
- unit->regofs = regofs;
- unit->bit = bit;
-
- unit->type = type;
- unit->idle_bit = idle_bit;
- unit->lock = lock;
-
- clk = clk_register(dev, &unit->hw);
- if (IS_ERR(clk))
- kfree(unit);
-
- return clk;
-}
-
-static struct atlas7_reset_desc atlas7_reset_unit[] = {
- { "PWM", 0x0244, 0, 0x0320, 0, &leaf0_gate_lock }, /* 0-5 */
- { "THCGUM", 0x0244, 3, 0x0320, 1, &leaf0_gate_lock },
- { "CVD", 0x04A0, 0, 0x032C, 0, &leaf1_gate_lock },
- { "TIMER", 0x04A0, 1, 0x032C, 1, &leaf1_gate_lock },
- { "PULSEC", 0x04A0, 2, 0x032C, 2, &leaf1_gate_lock },
- { "TSC", 0x04A0, 3, 0x032C, 3, &leaf1_gate_lock },
- { "IOCTOP", 0x04A0, 4, 0x032C, 4, &leaf1_gate_lock }, /* 6-10 */
- { "RSC", 0x04A0, 5, 0x032C, 5, &leaf1_gate_lock },
- { "DVM", 0x04A0, 6, 0x032C, 6, &leaf1_gate_lock },
- { "LVDS", 0x04A0, 7, 0x032C, 7, &leaf1_gate_lock },
- { "KAS", 0x04A0, 8, 0x032C, 8, &leaf1_gate_lock },
- { "AC97", 0x04A0, 9, 0x032C, 9, &leaf1_gate_lock }, /* 11-15 */
- { "USP0", 0x04A0, 10, 0x032C, 10, &leaf1_gate_lock },
- { "USP1", 0x04A0, 11, 0x032C, 11, &leaf1_gate_lock },
- { "USP2", 0x04A0, 12, 0x032C, 12, &leaf1_gate_lock },
- { "DMAC2", 0x04A0, 13, 0x032C, 13, &leaf1_gate_lock },
- { "DMAC3", 0x04A0, 14, 0x032C, 14, &leaf1_gate_lock }, /* 16-20 */
- { "AUDIO", 0x04A0, 15, 0x032C, 15, &leaf1_gate_lock },
- { "I2S1", 0x04A0, 17, 0x032C, 16, &leaf1_gate_lock },
- { "PMU_AUDIO", 0x04A0, 22, 0x032C, 17, &leaf1_gate_lock },
- { "THAUDMSCM", 0x04A0, 23, 0x032C, 18, &leaf1_gate_lock },
- { "SYS2PCI", 0x04B8, 0, 0x0338, 0, &leaf2_gate_lock }, /* 21-25 */
- { "PCIARB", 0x04B8, 1, 0x0338, 1, &leaf2_gate_lock },
- { "PCICOPY", 0x04B8, 2, 0x0338, 2, &leaf2_gate_lock },
- { "ROM", 0x04B8, 3, 0x0338, 3, &leaf2_gate_lock },
- { "SDIO23", 0x04B8, 4, 0x0338, 4, &leaf2_gate_lock },
- { "SDIO45", 0x04B8, 5, 0x0338, 5, &leaf2_gate_lock }, /* 26-30 */
- { "SDIO67", 0x04B8, 6, 0x0338, 6, &leaf2_gate_lock },
- { "VIP1", 0x04B8, 7, 0x0338, 7, &leaf2_gate_lock },
- { "VPP0", 0x04B8, 11, 0x0338, 8, &leaf2_gate_lock },
- { "LCD0", 0x04B8, 12, 0x0338, 9, &leaf2_gate_lock },
- { "VPP1", 0x04B8, 13, 0x0338, 10, &leaf2_gate_lock }, /* 31-35 */
- { "LCD1", 0x04B8, 14, 0x0338, 11, &leaf2_gate_lock },
- { "DCU", 0x04B8, 15, 0x0338, 12, &leaf2_gate_lock },
- { "GPIO", 0x04B8, 18, 0x0338, 13, &leaf2_gate_lock },
- { "DAPA_VDIFM", 0x04B8, 17, 0x0338, 15, &leaf2_gate_lock },
- { "THVDIFM", 0x04B8, 19, 0x0338, 16, &leaf2_gate_lock }, /* 36-40 */
- { "RGMII", 0x04D0, 0, 0x0344, 0, &leaf3_gate_lock },
- { "GMAC", 0x04D0, 1, 0x0344, 1, &leaf3_gate_lock },
- { "UART1", 0x04D0, 2, 0x0344, 2, &leaf3_gate_lock },
- { "DMAC0", 0x04D0, 3, 0x0344, 3, &leaf3_gate_lock },
- { "UART0", 0x04D0, 4, 0x0344, 4, &leaf3_gate_lock }, /* 41-45 */
- { "UART2", 0x04D0, 5, 0x0344, 5, &leaf3_gate_lock },
- { "UART3", 0x04D0, 6, 0x0344, 6, &leaf3_gate_lock },
- { "UART4", 0x04D0, 7, 0x0344, 7, &leaf3_gate_lock },
- { "UART5", 0x04D0, 8, 0x0344, 8, &leaf3_gate_lock },
- { "SPI1", 0x04D0, 9, 0x0344, 9, &leaf3_gate_lock }, /* 46-50 */
- { "GNSS_SYS_M0", 0x04D0, 10, 0x0344, 10, &leaf3_gate_lock },
- { "CANBUS1", 0x04D0, 12, 0x0344, 11, &leaf3_gate_lock },
- { "CCSEC", 0x04D0, 15, 0x0344, 12, &leaf3_gate_lock },
- { "CCPUB", 0x04D0, 16, 0x0344, 13, &leaf3_gate_lock },
- { "DAPA_GNSSM", 0x04D0, 13, 0x0344, 14, &leaf3_gate_lock }, /* 51-55 */
- { "THGNSSM", 0x04D0, 14, 0x0344, 15, &leaf3_gate_lock },
- { "VDEC", 0x04E8, 0, 0x0350, 0, &leaf4_gate_lock },
- { "JPENC", 0x04E8, 1, 0x0350, 1, &leaf4_gate_lock },
- { "G2D", 0x04E8, 2, 0x0350, 2, &leaf4_gate_lock },
- { "I2C0", 0x04E8, 3, 0x0350, 3, &leaf4_gate_lock }, /* 56-60 */
- { "I2C1", 0x04E8, 4, 0x0350, 4, &leaf4_gate_lock },
- { "GPIO0", 0x04E8, 5, 0x0350, 5, &leaf4_gate_lock },
- { "NAND", 0x04E8, 6, 0x0350, 6, &leaf4_gate_lock },
- { "SDIO01", 0x04E8, 7, 0x0350, 7, &leaf4_gate_lock },
- { "SYS2PCI2", 0x04E8, 8, 0x0350, 8, &leaf4_gate_lock }, /* 61-65 */
- { "USB0", 0x04E8, 11, 0x0350, 9, &leaf4_gate_lock },
- { "USB1", 0x04E8, 12, 0x0350, 10, &leaf4_gate_lock },
- { "THMEDIAM", 0x04E8, 15, 0x0350, 11, &leaf4_gate_lock },
- { "MEMC_DDRPHY", 0x0500, 0, 0x035C, 0, &leaf5_gate_lock },
- { "MEMC_UPCTL", 0x0500, 0, 0x035C, 1, &leaf5_gate_lock }, /* 66-70 */
- { "DAPA_MEM", 0x0500, 1, 0x035C, 2, &leaf5_gate_lock },
- { "MEMC_MEMDIV", 0x0500, 0, 0x035C, 3, &leaf5_gate_lock },
- { "THDDRM", 0x0500, 3, 0x035C, 4, &leaf5_gate_lock },
- { "CORESIGHT", 0x0518, 3, 0x0368, 13, &leaf6_gate_lock },
- { "THCPUM", 0x0518, 4, 0x0368, 17, &leaf6_gate_lock }, /* 71-75 */
- { "GRAPHIC", 0x0530, 0, 0x0374, 0, &leaf7_gate_lock },
- { "VSS_SDR", 0x0530, 1, 0x0374, 1, &leaf7_gate_lock },
- { "THGPUM", 0x0530, 2, 0x0374, 2, &leaf7_gate_lock },
- { "DMAC4", 0x0548, 2, 0x0380, 1, &leaf8_gate_lock },
- { "UART6", 0x0548, 3, 0x0380, 2, &leaf8_gate_lock }, /* 76- */
- { "USP3", 0x0548, 4, 0x0380, 3, &leaf8_gate_lock },
- { "THBTM", 0x0548, 5, 0x0380, 5, &leaf8_gate_lock },
- { "A7CA", 0x0548, 1, 0x0380, 0, &leaf8_gate_lock },
- { "A7CA_APB", 0x0548, 5, 0x0380, 4, &leaf8_gate_lock },
-};
-
-static int atlas7_reset_module(struct reset_controller_dev *rcdev,
- unsigned long reset_idx)
-{
- struct atlas7_reset_desc *reset = &atlas7_reset_unit[reset_idx];
- unsigned long flags;
-
- /*
- * HW suggest unit reset sequence:
- * assert sw reset (0)
- * setting sw clk_en to if the clock was disabled before reset
- * delay 16 clocks
- * disable clock (sw clk_en = 0)
- * de-assert reset (1)
- * after this sequence, restore clock or not is decided by SW
- */
-
- spin_lock_irqsave(reset->lock, flags);
- /* clock enable or not */
- if (clkc_readl(reset->clk_ofs + 8) & (1 << reset->clk_bit)) {
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
- udelay(2);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
- /* restore clock enable */
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
- } else {
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
- udelay(2);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
- }
- spin_unlock_irqrestore(reset->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops atlas7_rst_ops = {
- .reset = atlas7_reset_module,
-};
-
-static struct reset_controller_dev atlas7_rst_ctlr = {
- .ops = &atlas7_rst_ops,
- .owner = THIS_MODULE,
- .of_reset_n_cells = 1,
-};
-
-static void __init atlas7_clk_init(struct device_node *np)
-{
- struct clk *clk;
- struct atlas7_div_init_data *div;
- struct atlas7_mux_init_data *mux;
- struct atlas7_unit_init_data *unit;
- int i;
- int ret;
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- of_node_put(np);
-
- clk = clk_register(NULL, &clk_cpupll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_mempll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys0pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys1pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys2pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys3pll.hw);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "cpupll_div1", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "cpupll_div2", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "cpupll_div3", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "mempll_div1", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "mempll_div2", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "mempll_div3", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "sys0pll_div1", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys0pll_div2", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys0pll_div3", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys0pll_fixdiv", "sys0pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys1pll_div1", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys1pll_div2", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys1pll_div3", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys1pll_fixdiv", "sys1pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys2pll_div1", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys2pll_div2", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys2pll_div3", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys2pll_fixdiv", "sys2pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys3pll_div1", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys3pll_div2", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys3pll_div3", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys3pll_fixdiv", "sys3pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "xinw_fixdiv_btslow", "xinw",
- CLK_SET_RATE_PARENT, 1, 4);
-
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk1", "cpupll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 12, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk2", "cpupll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 13, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk3", "cpupll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 14, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "mempll_clk1", "mempll_div1",
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 12, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "mempll_clk2", "mempll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 13, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "mempll_clk3", "mempll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 14, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys0pll_clk1", "sys0pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 12, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys0pll_clk2", "sys0pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 13, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys0pll_clk3", "sys0pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 14, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys1pll_clk1", "sys1pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 12, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys1pll_clk2", "sys1pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 13, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys1pll_clk3", "sys1pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 14, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys2pll_clk1", "sys2pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 12, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys2pll_clk2", "sys2pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 13, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys2pll_clk3", "sys2pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 14, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys3pll_clk1", "sys3pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 12, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys3pll_clk2", "sys3pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 13, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys3pll_clk3", "sys3pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 14, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_audio_dto.hw);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_disp0_dto.hw);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_disp1_dto.hw);
- BUG_ON(!clk);
-
- for (i = 0; i < ARRAY_SIZE(divider_list); i++) {
- div = &divider_list[i];
- clk = clk_register_divider(NULL, div->div_name,
- div->parent_name, div->divider_flags, sirfsoc_clk_vbase + div->div_offset,
- div->shift, div->width, 0, div->lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, div->gate_name, div->div_name,
- div->gate_flags, sirfsoc_clk_vbase + div->gate_offset,
- div->gate_bit, 0, div->lock);
- BUG_ON(!clk);
- }
- /* ignore selector status register check */
- for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
- mux = &mux_list[i];
- clk = clk_register_mux(NULL, mux->mux_name, mux->parent_names,
- mux->parent_num, mux->flags,
- sirfsoc_clk_vbase + mux->mux_offset,
- mux->shift, mux->width,
- mux->mux_flags, NULL);
- atlas7_clks[ARRAY_SIZE(unit_list) + i] = clk;
- BUG_ON(!clk);
- }
-
- for (i = 0; i < ARRAY_SIZE(unit_list); i++) {
- unit = &unit_list[i];
- atlas7_clks[i] = atlas7_unit_clk_register(NULL, unit->unit_name, unit->parent_name,
- unit->flags, unit->regofs, unit->bit, unit->type, unit->idle_bit, unit->lock);
- BUG_ON(!atlas7_clks[i]);
- }
-
- clk_data.clks = atlas7_clks;
- clk_data.clk_num = ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list);
-
- ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- BUG_ON(ret);
-
- atlas7_rst_ctlr.of_node = np;
- atlas7_rst_ctlr.nr_resets = ARRAY_SIZE(atlas7_reset_unit);
- reset_controller_register(&atlas7_rst_ctlr);
-}
-CLK_OF_DECLARE(atlas7_clk, "sirf,atlas7-car", atlas7_clk_init);
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
deleted file mode 100644
index dcf4e25a0216..000000000000
--- a/drivers/clk/sirf/clk-common.c
+++ /dev/null
@@ -1,1037 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * common clks module for all SiRF SoCs
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/clk.h>
-
-#define KHZ 1000
-#define MHZ (KHZ * KHZ)
-
-static void __iomem *sirfsoc_clk_vbase;
-static void __iomem *sirfsoc_rsc_vbase;
-static struct clk_onecell_data clk_data;
-
-/*
- * SiRFprimaII clock controller
- * - 2 oscillators: osc-26MHz, rtc-32.768KHz
- * - 3 standard configurable plls: pll1, pll2 & pll3
- * - 2 exclusive plls: usb phy pll and sata phy pll
- * - 8 clock domains: cpu/cpudiv, mem/memdiv, sys/io, dsp, graphic, multimedia,
- * display and sdphy.
- * Each clock domain can select its own clock source from five clock sources,
- * X_XIN, X_XINW, PLL1, PLL2 and PLL3. The domain clock is used as the source
- * clock of the group clock.
- * - dsp domain: gps, mf
- * - io domain: dmac, nand, audio, uart, i2c, spi, usp, pwm, pulse
- * - sys domain: security
- */
-
-struct clk_pll {
- struct clk_hw hw;
- unsigned short regofs; /* register offset */
-};
-
-#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw)
-
-struct clk_dmn {
- struct clk_hw hw;
- signed char enable_bit; /* enable bit: 0 ~ 63 */
- unsigned short regofs; /* register offset */
-};
-
-#define to_dmnclk(_hw) container_of(_hw, struct clk_dmn, hw)
-
-struct clk_std {
- struct clk_hw hw;
- signed char enable_bit; /* enable bit: 0 ~ 63 */
-};
-
-#define to_stdclk(_hw) container_of(_hw, struct clk_std, hw)
-
-static int std_clk_is_enabled(struct clk_hw *hw);
-static int std_clk_enable(struct clk_hw *hw);
-static void std_clk_disable(struct clk_hw *hw);
-
-static inline unsigned long clkc_readl(unsigned reg)
-{
- return readl(sirfsoc_clk_vbase + reg);
-}
-
-static inline void clkc_writel(u32 val, unsigned reg)
-{
- writel(val, sirfsoc_clk_vbase + reg);
-}
-
-/*
- * std pll
- */
-
-static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long fin = parent_rate;
- struct clk_pll *clk = to_pllclk(hw);
- u32 regcfg2 = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 -
- SIRFSOC_CLKC_PLL1_CFG0;
-
- if (clkc_readl(regcfg2) & BIT(2)) {
- /* pll bypass mode */
- return fin;
- } else {
- /* fout = fin * nf / nr / od */
- u32 cfg0 = clkc_readl(clk->regofs);
- u32 nf = (cfg0 & (BIT(13) - 1)) + 1;
- u32 nr = ((cfg0 >> 13) & (BIT(6) - 1)) + 1;
- u32 od = ((cfg0 >> 19) & (BIT(4) - 1)) + 1;
- WARN_ON(fin % MHZ);
- return fin / MHZ * nf / nr / od * MHZ;
- }
-}
-
-static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long fin, nf, nr, od;
- u64 dividend;
-
- /*
- * fout = fin * nf / (nr * od);
- * set od = 1, nr = fin/MHz, so fout = nf * MHz
- */
- rate = rate - rate % MHZ;
-
- nf = rate / MHZ;
- if (nf > BIT(13))
- nf = BIT(13);
- if (nf < 1)
- nf = 1;
-
- fin = *parent_rate;
-
- nr = fin / MHZ;
- if (nr > BIT(6))
- nr = BIT(6);
- od = 1;
-
- dividend = (u64)fin * nf;
- do_div(dividend, nr * od);
-
- return (long)dividend;
-}
-
-static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_pll *clk = to_pllclk(hw);
- unsigned long fin, nf, nr, od, reg;
-
- /*
- * fout = fin * nf / (nr * od);
- * set od = 1, nr = fin/MHz, so fout = nf * MHz
- */
-
- nf = rate / MHZ;
- if (unlikely((rate % MHZ) || nf > BIT(13) || nf < 1))
- return -EINVAL;
-
- fin = parent_rate;
- BUG_ON(fin < MHZ);
-
- nr = fin / MHZ;
- BUG_ON((fin % MHZ) || nr > BIT(6));
-
- od = 1;
-
- reg = (nf - 1) | ((nr - 1) << 13) | ((od - 1) << 19);
- clkc_writel(reg, clk->regofs);
-
- reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG1 - SIRFSOC_CLKC_PLL1_CFG0;
- clkc_writel((nf >> 1) - 1, reg);
-
- reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 - SIRFSOC_CLKC_PLL1_CFG0;
- while (!(clkc_readl(reg) & BIT(6)))
- cpu_relax();
-
- return 0;
-}
-
-static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * SiRF SoC has not cpu clock control,
- * So bypass to it's parent pll.
- */
- struct clk_hw *parent_clk = clk_hw_get_parent(hw);
- struct clk_hw *pll_parent_clk = clk_hw_get_parent(parent_clk);
- unsigned long pll_parent_rate = clk_hw_get_rate(pll_parent_clk);
- return pll_clk_round_rate(parent_clk, rate, &pll_parent_rate);
-}
-
-static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- /*
- * SiRF SoC has not cpu clock control,
- * So return the parent pll rate.
- */
- struct clk_hw *parent_clk = clk_hw_get_parent(hw);
- return clk_hw_get_rate(parent_clk);
-}
-
-static const struct clk_ops std_pll_ops = {
- .recalc_rate = pll_clk_recalc_rate,
- .round_rate = pll_clk_round_rate,
- .set_rate = pll_clk_set_rate,
-};
-
-static const char * const pll_clk_parents[] = {
- "osc",
-};
-
-static const struct clk_init_data clk_pll1_init = {
- .name = "pll1",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static const struct clk_init_data clk_pll2_init = {
- .name = "pll2",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static const struct clk_init_data clk_pll3_init = {
- .name = "pll3",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_pll1 = {
- .regofs = SIRFSOC_CLKC_PLL1_CFG0,
- .hw = {
- .init = &clk_pll1_init,
- },
-};
-
-static struct clk_pll clk_pll2 = {
- .regofs = SIRFSOC_CLKC_PLL2_CFG0,
- .hw = {
- .init = &clk_pll2_init,
- },
-};
-
-static struct clk_pll clk_pll3 = {
- .regofs = SIRFSOC_CLKC_PLL3_CFG0,
- .hw = {
- .init = &clk_pll3_init,
- },
-};
-
-/*
- * usb uses specified pll
- */
-
-static int usb_pll_clk_enable(struct clk_hw *hw)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- reg &= ~(SIRFSOC_USBPHY_PLL_POWERDOWN | SIRFSOC_USBPHY_PLL_BYPASS);
- writel(reg, sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- while (!(readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL) &
- SIRFSOC_USBPHY_PLL_LOCK))
- cpu_relax();
-
- return 0;
-}
-
-static void usb_pll_clk_disable(struct clk_hw *clk)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- reg |= (SIRFSOC_USBPHY_PLL_POWERDOWN | SIRFSOC_USBPHY_PLL_BYPASS);
- writel(reg, sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
-}
-
-static unsigned long usb_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- return (reg & SIRFSOC_USBPHY_PLL_BYPASS) ? parent_rate : 48*MHZ;
-}
-
-static const struct clk_ops usb_pll_ops = {
- .enable = usb_pll_clk_enable,
- .disable = usb_pll_clk_disable,
- .recalc_rate = usb_pll_clk_recalc_rate,
-};
-
-static const struct clk_init_data clk_usb_pll_init = {
- .name = "usb_pll",
- .ops = &usb_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_hw usb_pll_clk_hw = {
- .init = &clk_usb_pll_init,
-};
-
-/*
- * clock domains - cpu, mem, sys/io, dsp, gfx
- */
-
-static const char * const dmn_clk_parents[] = {
- "rtc",
- "osc",
- "pll1",
- "pll2",
- "pll3",
-};
-
-static u8 dmn_clk_get_parent(struct clk_hw *hw)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- u32 cfg = clkc_readl(clk->regofs);
- const char *name = clk_hw_get_name(hw);
-
- /* parent of io domain can only be pll3 */
- if (strcmp(name, "io") == 0)
- return 4;
-
- WARN_ON((cfg & (BIT(3) - 1)) > 4);
-
- return cfg & (BIT(3) - 1);
-}
-
-static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- u32 cfg = clkc_readl(clk->regofs);
- const char *name = clk_hw_get_name(hw);
-
- /* parent of io domain can only be pll3 */
- if (strcmp(name, "io") == 0)
- return -EINVAL;
-
- cfg &= ~(BIT(3) - 1);
- clkc_writel(cfg | parent, clk->regofs);
- /* BIT(3) - switching status: 1 - busy, 0 - done */
- while (clkc_readl(clk->regofs) & BIT(3))
- cpu_relax();
-
- return 0;
-}
-
-static unsigned long dmn_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-
-{
- unsigned long fin = parent_rate;
- struct clk_dmn *clk = to_dmnclk(hw);
-
- u32 cfg = clkc_readl(clk->regofs);
-
- if (cfg & BIT(24)) {
- /* fcd bypass mode */
- return fin;
- } else {
- /*
- * wait count: bit[19:16], hold count: bit[23:20]
- */
- u32 wait = (cfg >> 16) & (BIT(4) - 1);
- u32 hold = (cfg >> 20) & (BIT(4) - 1);
-
- return fin / (wait + hold + 2);
- }
-}
-
-static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long fin;
- unsigned ratio, wait, hold;
- const char *name = clk_hw_get_name(hw);
- unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
-
- fin = *parent_rate;
- ratio = fin / rate;
-
- if (ratio < 2)
- ratio = 2;
- if (ratio > BIT(bits + 1))
- ratio = BIT(bits + 1);
-
- wait = (ratio >> 1) - 1;
- hold = ratio - wait - 2;
-
- return fin / (wait + hold + 2);
-}
-
-static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- unsigned long fin;
- unsigned ratio, wait, hold, reg;
- const char *name = clk_hw_get_name(hw);
- unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
-
- fin = parent_rate;
- ratio = fin / rate;
-
- if (unlikely(ratio < 2 || ratio > BIT(bits + 1)))
- return -EINVAL;
-
- WARN_ON(fin % rate);
-
- wait = (ratio >> 1) - 1;
- hold = ratio - wait - 2;
-
- reg = clkc_readl(clk->regofs);
- reg &= ~(((BIT(bits) - 1) << 16) | ((BIT(bits) - 1) << 20));
- reg |= (wait << 16) | (hold << 20) | BIT(25);
- clkc_writel(reg, clk->regofs);
-
- /* waiting FCD been effective */
- while (clkc_readl(clk->regofs) & BIT(25))
- cpu_relax();
-
- return 0;
-}
-
-static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- int ret1, ret2;
- struct clk *cur_parent;
-
- if (rate == clk_get_rate(clk_pll1.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
- return ret1;
- }
-
- if (rate == clk_get_rate(clk_pll2.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
- return ret1;
- }
-
- if (rate == clk_get_rate(clk_pll3.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll3.hw.clk);
- return ret1;
- }
-
- cur_parent = clk_get_parent(hw->clk);
-
- /* switch to tmp pll before setting parent clock's rate */
- if (cur_parent == clk_pll1.hw.clk) {
- ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
- BUG_ON(ret1);
- }
-
- ret2 = clk_set_rate(clk_pll1.hw.clk, rate);
-
- ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
-
- return ret2 ? ret2 : ret1;
-}
-
-static const struct clk_ops msi_ops = {
- .set_rate = dmn_clk_set_rate,
- .round_rate = dmn_clk_round_rate,
- .recalc_rate = dmn_clk_recalc_rate,
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
-};
-
-static const struct clk_init_data clk_mem_init = {
- .name = "mem",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_mem = {
- .regofs = SIRFSOC_CLKC_MEM_CFG,
- .hw = {
- .init = &clk_mem_init,
- },
-};
-
-static const struct clk_init_data clk_sys_init = {
- .name = "sys",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
- .flags = CLK_SET_RATE_GATE,
-};
-
-static struct clk_dmn clk_sys = {
- .regofs = SIRFSOC_CLKC_SYS_CFG,
- .hw = {
- .init = &clk_sys_init,
- },
-};
-
-static const struct clk_init_data clk_io_init = {
- .name = "io",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_io = {
- .regofs = SIRFSOC_CLKC_IO_CFG,
- .hw = {
- .init = &clk_io_init,
- },
-};
-
-static const struct clk_ops cpu_ops = {
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
- .set_rate = cpu_clk_set_rate,
- .round_rate = cpu_clk_round_rate,
- .recalc_rate = cpu_clk_recalc_rate,
-};
-
-static const struct clk_init_data clk_cpu_init = {
- .name = "cpu",
- .ops = &cpu_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
- .flags = CLK_SET_RATE_PARENT,
-};
-
-static struct clk_dmn clk_cpu = {
- .regofs = SIRFSOC_CLKC_CPU_CFG,
- .hw = {
- .init = &clk_cpu_init,
- },
-};
-
-static const struct clk_ops dmn_ops = {
- .is_enabled = std_clk_is_enabled,
- .enable = std_clk_enable,
- .disable = std_clk_disable,
- .set_rate = dmn_clk_set_rate,
- .round_rate = dmn_clk_round_rate,
- .recalc_rate = dmn_clk_recalc_rate,
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
-};
-
-/* dsp, gfx, mm, lcd and vpp domain */
-
-static const struct clk_init_data clk_dsp_init = {
- .name = "dsp",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_dsp = {
- .regofs = SIRFSOC_CLKC_DSP_CFG,
- .enable_bit = 0,
- .hw = {
- .init = &clk_dsp_init,
- },
-};
-
-static const struct clk_init_data clk_gfx_init = {
- .name = "gfx",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_gfx = {
- .regofs = SIRFSOC_CLKC_GFX_CFG,
- .enable_bit = 8,
- .hw = {
- .init = &clk_gfx_init,
- },
-};
-
-static const struct clk_init_data clk_mm_init = {
- .name = "mm",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_mm = {
- .regofs = SIRFSOC_CLKC_MM_CFG,
- .enable_bit = 9,
- .hw = {
- .init = &clk_mm_init,
- },
-};
-
-/*
- * for atlas6, gfx2d holds the bit of prima2's clk_mm
- */
-#define clk_gfx2d clk_mm
-
-static const struct clk_init_data clk_lcd_init = {
- .name = "lcd",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_lcd = {
- .regofs = SIRFSOC_CLKC_LCD_CFG,
- .enable_bit = 10,
- .hw = {
- .init = &clk_lcd_init,
- },
-};
-
-static const struct clk_init_data clk_vpp_init = {
- .name = "vpp",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_vpp = {
- .regofs = SIRFSOC_CLKC_LCD_CFG,
- .enable_bit = 11,
- .hw = {
- .init = &clk_vpp_init,
- },
-};
-
-static const struct clk_init_data clk_mmc01_init = {
- .name = "mmc01",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static const struct clk_init_data clk_mmc23_init = {
- .name = "mmc23",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static const struct clk_init_data clk_mmc45_init = {
- .name = "mmc45",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-/*
- * peripheral controllers in io domain
- */
-
-static int std_clk_is_enabled(struct clk_hw *hw)
-{
- u32 reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- return !!(clkc_readl(reg) & BIT(bit));
-}
-
-static int std_clk_enable(struct clk_hw *hw)
-{
- u32 val, reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- BUG_ON(clk->enable_bit < 0 || clk->enable_bit > 63);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- val = clkc_readl(reg) | BIT(bit);
- clkc_writel(val, reg);
- return 0;
-}
-
-static void std_clk_disable(struct clk_hw *hw)
-{
- u32 val, reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- BUG_ON(clk->enable_bit < 0 || clk->enable_bit > 63);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- val = clkc_readl(reg) & ~BIT(bit);
- clkc_writel(val, reg);
-}
-
-static const char * const std_clk_io_parents[] = {
- "io",
-};
-
-static const struct clk_ops ios_ops = {
- .is_enabled = std_clk_is_enabled,
- .enable = std_clk_enable,
- .disable = std_clk_disable,
-};
-
-static const struct clk_init_data clk_cphif_init = {
- .name = "cphif",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_cphif = {
- .enable_bit = 20,
- .hw = {
- .init = &clk_cphif_init,
- },
-};
-
-static const struct clk_init_data clk_dmac0_init = {
- .name = "dmac0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_dmac0 = {
- .enable_bit = 32,
- .hw = {
- .init = &clk_dmac0_init,
- },
-};
-
-static const struct clk_init_data clk_dmac1_init = {
- .name = "dmac1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_dmac1 = {
- .enable_bit = 33,
- .hw = {
- .init = &clk_dmac1_init,
- },
-};
-
-static const struct clk_init_data clk_audio_init = {
- .name = "audio",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_audio = {
- .enable_bit = 35,
- .hw = {
- .init = &clk_audio_init,
- },
-};
-
-static const struct clk_init_data clk_uart0_init = {
- .name = "uart0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart0 = {
- .enable_bit = 36,
- .hw = {
- .init = &clk_uart0_init,
- },
-};
-
-static const struct clk_init_data clk_uart1_init = {
- .name = "uart1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart1 = {
- .enable_bit = 37,
- .hw = {
- .init = &clk_uart1_init,
- },
-};
-
-static const struct clk_init_data clk_uart2_init = {
- .name = "uart2",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart2 = {
- .enable_bit = 38,
- .hw = {
- .init = &clk_uart2_init,
- },
-};
-
-static const struct clk_init_data clk_usp0_init = {
- .name = "usp0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp0 = {
- .enable_bit = 39,
- .hw = {
- .init = &clk_usp0_init,
- },
-};
-
-static const struct clk_init_data clk_usp1_init = {
- .name = "usp1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp1 = {
- .enable_bit = 40,
- .hw = {
- .init = &clk_usp1_init,
- },
-};
-
-static const struct clk_init_data clk_usp2_init = {
- .name = "usp2",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp2 = {
- .enable_bit = 41,
- .hw = {
- .init = &clk_usp2_init,
- },
-};
-
-static const struct clk_init_data clk_vip_init = {
- .name = "vip",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_vip = {
- .enable_bit = 42,
- .hw = {
- .init = &clk_vip_init,
- },
-};
-
-static const struct clk_init_data clk_spi0_init = {
- .name = "spi0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_spi0 = {
- .enable_bit = 43,
- .hw = {
- .init = &clk_spi0_init,
- },
-};
-
-static const struct clk_init_data clk_spi1_init = {
- .name = "spi1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_spi1 = {
- .enable_bit = 44,
- .hw = {
- .init = &clk_spi1_init,
- },
-};
-
-static const struct clk_init_data clk_tsc_init = {
- .name = "tsc",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_tsc = {
- .enable_bit = 45,
- .hw = {
- .init = &clk_tsc_init,
- },
-};
-
-static const struct clk_init_data clk_i2c0_init = {
- .name = "i2c0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_i2c0 = {
- .enable_bit = 46,
- .hw = {
- .init = &clk_i2c0_init,
- },
-};
-
-static const struct clk_init_data clk_i2c1_init = {
- .name = "i2c1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_i2c1 = {
- .enable_bit = 47,
- .hw = {
- .init = &clk_i2c1_init,
- },
-};
-
-static const struct clk_init_data clk_pwmc_init = {
- .name = "pwmc",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_pwmc = {
- .enable_bit = 48,
- .hw = {
- .init = &clk_pwmc_init,
- },
-};
-
-static const struct clk_init_data clk_efuse_init = {
- .name = "efuse",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_efuse = {
- .enable_bit = 49,
- .hw = {
- .init = &clk_efuse_init,
- },
-};
-
-static const struct clk_init_data clk_pulse_init = {
- .name = "pulse",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_pulse = {
- .enable_bit = 50,
- .hw = {
- .init = &clk_pulse_init,
- },
-};
-
-static const char * const std_clk_dsp_parents[] = {
- "dsp",
-};
-
-static const struct clk_init_data clk_gps_init = {
- .name = "gps",
- .ops = &ios_ops,
- .parent_names = std_clk_dsp_parents,
- .num_parents = ARRAY_SIZE(std_clk_dsp_parents),
-};
-
-static struct clk_std clk_gps = {
- .enable_bit = 1,
- .hw = {
- .init = &clk_gps_init,
- },
-};
-
-static const struct clk_init_data clk_mf_init = {
- .name = "mf",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_mf = {
- .enable_bit = 2,
- .hw = {
- .init = &clk_mf_init,
- },
-};
-
-static const char * const std_clk_sys_parents[] = {
- "sys",
-};
-
-static const struct clk_init_data clk_security_init = {
- .name = "security",
- .ops = &ios_ops,
- .parent_names = std_clk_sys_parents,
- .num_parents = ARRAY_SIZE(std_clk_sys_parents),
-};
-
-static struct clk_std clk_security = {
- .enable_bit = 19,
- .hw = {
- .init = &clk_security_init,
- },
-};
-
-static const char * const std_clk_usb_parents[] = {
- "usb_pll",
-};
-
-static const struct clk_init_data clk_usb0_init = {
- .name = "usb0",
- .ops = &ios_ops,
- .parent_names = std_clk_usb_parents,
- .num_parents = ARRAY_SIZE(std_clk_usb_parents),
-};
-
-static struct clk_std clk_usb0 = {
- .enable_bit = 16,
- .hw = {
- .init = &clk_usb0_init,
- },
-};
-
-static const struct clk_init_data clk_usb1_init = {
- .name = "usb1",
- .ops = &ios_ops,
- .parent_names = std_clk_usb_parents,
- .num_parents = ARRAY_SIZE(std_clk_usb_parents),
-};
-
-static struct clk_std clk_usb1 = {
- .enable_bit = 17,
- .hw = {
- .init = &clk_usb1_init,
- },
-};
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
deleted file mode 100644
index d17b345f4d2d..000000000000
--- a/drivers/clk/sirf/clk-prima2.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFprimaII
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#include "prima2.h"
-#include "clk-common.c"
-
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
-static const struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_nand = {
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
-enum prima2_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, cphif, maxclk,
-};
-
-static __initdata struct clk_hw *prima2_clk_hw_array[maxclk] = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_mm.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
- &clk_cphif.hw,
-};
-
-static struct clk *prima2_clks[maxclk];
-
-static void __init prima2_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
- prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
- 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(IS_ERR(prima2_clks[i]));
- }
- clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
- clk_register_clkdev(prima2_clks[io], NULL, "io");
- clk_register_clkdev(prima2_clks[mem], NULL, "mem");
- clk_register_clkdev(prima2_clks[mem], NULL, "osc");
-
- clk_data.clks = prima2_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(prima2_clk, "sirf,prima2-clkc", prima2_clk_init);
diff --git a/drivers/clk/sirf/prima2.h b/drivers/clk/sirf/prima2.h
deleted file mode 100644
index 2fb56941795d..000000000000
--- a/drivers/clk/sirf/prima2.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0014
-#define SIRFSOC_CLKC_CPU_CFG 0x0018
-#define SIRFSOC_CLKC_MEM_CFG 0x001c
-#define SIRFSOC_CLKC_SYS_CFG 0x0020
-#define SIRFSOC_CLKC_IO_CFG 0x0024
-#define SIRFSOC_CLKC_DSP_CFG 0x0028
-#define SIRFSOC_CLKC_GFX_CFG 0x002c
-#define SIRFSOC_CLKC_MM_CFG 0x0030
-#define SIRFSOC_CLKC_LCD_CFG 0x0034
-#define SIRFSOC_CLKC_MMC_CFG 0x0038
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
-#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
-#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index bb3e80928ebe..7689bdd0a914 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -196,6 +196,17 @@ static const struct stratix10_pll_clock agilex_pll_clks[] = {
0, 0x9c},
};
+static const struct n5x_perip_c_clock n5x_main_perip_c_clks[] = {
+ { AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x54, 0},
+ { AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x54, 8},
+ { AGILEX_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, 0x54, 16},
+ { AGILEX_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, 0x54, 24},
+ { AGILEX_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, 0xA8, 0},
+ { AGILEX_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, 0xA8, 8},
+ { AGILEX_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, 0xA8, 16},
+ { AGILEX_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, 0xA8, 24},
+};
+
static const struct stratix10_perip_c_clock agilex_main_perip_c_clks[] = {
{ AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x58},
{ AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x5C},
@@ -289,6 +300,25 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
10, 0, 0, 0, 0, 0, 4},
};
+static int n5x_clk_register_c_perip(const struct n5x_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = n5x_register_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
int nums, struct stratix10_clock_data *data)
{
@@ -367,6 +397,26 @@ static int agilex_clk_register_pll(const struct stratix10_pll_clock *clks,
return 0;
}
+static int n5x_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = n5x_register_pll(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_device *pdev,
int nr_clks)
{
@@ -401,7 +451,7 @@ static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_de
return clk_data;
}
-static int agilex_clkmgr_probe(struct platform_device *pdev)
+static int agilex_clkmgr_init(struct platform_device *pdev)
{
struct stratix10_clock_data *clk_data;
@@ -423,9 +473,43 @@ static int agilex_clkmgr_probe(struct platform_device *pdev)
return 0;
}
+static int n5x_clkmgr_init(struct platform_device *pdev)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_agilex_clk_init(pdev, AGILEX_NUM_CLKS);
+ if (IS_ERR(clk_data))
+ return PTR_ERR(clk_data);
+
+ n5x_clk_register_pll(agilex_pll_clks, ARRAY_SIZE(agilex_pll_clks), clk_data);
+
+ n5x_clk_register_c_perip(n5x_main_perip_c_clks,
+ ARRAY_SIZE(n5x_main_perip_c_clks), clk_data);
+
+ agilex_clk_register_cnt_perip(agilex_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex_main_perip_cnt_clks),
+ clk_data);
+
+ agilex_clk_register_gate(agilex_gate_clks, ARRAY_SIZE(agilex_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static int agilex_clkmgr_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *init_func);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+ return probe_func(pdev);
+}
+
static const struct of_device_id agilex_clkmgr_match_table[] = {
{ .compatible = "intel,agilex-clkmgr",
- .data = agilex_clkmgr_probe },
+ .data = agilex_clkmgr_init },
+ { .compatible = "intel,easic-n5x-clkmgr",
+ .data = n5x_clkmgr_init },
{ }
};
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 397b77b89b16..0ff2b9d24035 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -15,6 +15,21 @@
#define to_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+static unsigned long n5x_clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div;
+ unsigned long shift = socfpgaclk->shift;
+ u32 val;
+
+ val = readl(socfpgaclk->hw.reg);
+ val &= (0x1f << shift);
+ div = (val >> shift) + 1;
+
+ return parent_rate / div;
+}
+
static unsigned long clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -63,6 +78,11 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
return parent;
}
+static const struct clk_ops n5x_peri_c_clk_ops = {
+ .recalc_rate = n5x_clk_peri_c_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
static const struct clk_ops peri_c_clk_ops = {
.recalc_rate = clk_peri_c_clk_recalc_rate,
.get_parent = clk_periclk_get_parent,
@@ -107,6 +127,39 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
return clk;
}
+struct clk *n5x_register_periph(const struct n5x_perip_c_clock *clks,
+ void __iomem *regbase)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ periph_clk->hw.reg = regbase + clks->offset;
+ periph_clk->shift = clks->shift;
+
+ init.name = name;
+ init.ops = &n5x_peri_c_clk_ops;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = parent_name ? &parent_name : NULL;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
+
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks,
void __iomem *regbase)
{
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index db54f7d806a0..3338f054fe98 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -73,7 +73,6 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFGPA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
- int rc;
int i = 0;
of_property_read_u32(node, "reg", &reg);
@@ -108,7 +107,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
kfree(pll_clk);
return NULL;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 4e268953b7da..f6f66e08e1f4 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -27,10 +27,37 @@
#define SWCTRLBTCLKSEL_MASK 0x200
#define SWCTRLBTCLKSEL_SHIFT 9
+#define SOCFPGA_N5X_PLLDIV_FDIV_MASK GENMASK(16, 8)
+#define SOCFPGA_N5X_PLLDIV_FDIV_SHIFT 8
+#define SOCFPGA_N5X_PLLDIV_RDIV_MASK GENMASK(5, 0)
+#define SOCFPGA_N5X_PLLDIV_QDIV_MASK GENMASK(26, 24)
+#define SOCFPGA_N5X_PLLDIV_QDIV_SHIFT 24
+
#define SOCFPGA_BOOT_CLK "boot_clk"
#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+static unsigned long n5x_clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long fdiv, reg, rdiv, qdiv;
+ u32 power = 1;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg + 0x8);
+ fdiv = (reg & SOCFPGA_N5X_PLLDIV_FDIV_MASK) >> SOCFPGA_N5X_PLLDIV_FDIV_SHIFT;
+ rdiv = (reg & SOCFPGA_N5X_PLLDIV_RDIV_MASK);
+ qdiv = (reg & SOCFPGA_N5X_PLLDIV_QDIV_MASK) >> SOCFPGA_N5X_PLLDIV_QDIV_SHIFT;
+
+ while (qdiv) {
+ power *= 2;
+ qdiv--;
+ }
+
+ return ((parent_rate * 2 * (fdiv + 1)) / ((rdiv + 1) * power));
+}
+
static unsigned long agilex_clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -123,6 +150,25 @@ static int clk_pll_prepare(struct clk_hw *hwclk)
return 0;
}
+static int n5x_clk_pll_prepare(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 reg;
+
+ /* Bring PLL out of reset */
+ reg = readl(socfpgaclk->hw.reg + 0x4);
+ reg |= SOCFPGA_PLL_RESET_MASK;
+ writel(reg, socfpgaclk->hw.reg + 0x4);
+
+ return 0;
+}
+
+static const struct clk_ops n5x_clk_pll_ops = {
+ .recalc_rate = n5x_clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = n5x_clk_pll_prepare,
+};
+
static const struct clk_ops agilex_clk_pll_ops = {
.recalc_rate = agilex_clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
@@ -214,3 +260,40 @@ struct clk *agilex_register_pll(const struct stratix10_pll_clock *clks,
}
return clk;
}
+
+struct clk *n5x_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &n5x_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index e5fb786843f3..3cf99df7d005 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -80,7 +80,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
- int rc;
of_property_read_u32(node, "reg", &reg);
@@ -111,7 +110,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
kfree(pll_clk);
return NULL;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index f9d5d724c694..420deed677ce 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -30,6 +30,17 @@ struct stratix10_perip_c_clock {
unsigned long offset;
};
+struct n5x_perip_c_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ unsigned long shift;
+};
+
struct stratix10_perip_cnt_clock {
unsigned int id;
const char *name;
@@ -64,8 +75,12 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
struct clk *agilex_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
+struct clk *n5x_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg);
struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
- void __iomem *);
+ void __iomem *reg);
+struct clk *n5x_register_periph(const struct n5x_perip_c_clock *clks,
+ void __iomem *reg);
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
void __iomem *);
struct clk *s10_register_gate(const struct stratix10_gate_clock *,
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 591248c9a88e..8c8974866789 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9163bbb46411..c0dc94355c87 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index f1adc858b590..dd6062e043e0 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -172,10 +172,10 @@ static const struct clkgen_quadfs_data st_fs660c32_D = {
* ST quad channel frequency synthesizer block
*
* @hw: handle between common and hardware-specific interfaces.
- * @ndiv: regmap field for the ndiv control.
* @regs_base: base address of the configuration registers.
* @lock: spinlock.
- *
+ * @data: local driver data
+ * @ndiv: regmap field for the ndiv control.
*/
struct st_clk_quadfs_pll {
struct clk_hw hw;
@@ -426,7 +426,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
* parent - fixed parent. No clk_set_parent support
*/
-/**
+/*
* struct st_clk_quadfs_fsynth - One clock output from a four channel digital
* frequency synthesizer (fsynth) block.
*
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index c3952f2c42ba..119c5b33080c 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -130,12 +130,11 @@ static struct clkgen_pll_data st_pll4600c28_418_a9 = {
* parent - fixed parent. No clk_set_parent support
*/
-/**
+/*
* PLL clock that is integrated in the ClockGenA instances on the STiH415
* and STiH416.
*
* @hw: handle between common and hardware-specific interfaces.
- * @type: PLL instance type.
* @regs_base: base of the PLL configuration register(s).
*
*/
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index ce5f5847d5d3..cd46d8853876 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -32,8 +32,13 @@ config SUN50I_H6_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_H616_CCU
+ bool "Support for the Allwinner H616 CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN50I_H6_R_CCU
- bool "Support for the Allwinner H6 PRCM CCU"
+ bool "Support for the Allwinner H6 and H616 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 3eb5cff40eac..96c324306d97 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN50I_A100_CCU) += ccu-sun50i-a100.o
obj-$(CONFIG_SUN50I_A100_R_CCU) += ccu-sun50i-a100-r.o
obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
+obj-$(CONFIG_SUN50I_H616_CCU) += ccu-sun50i-h616.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 50f8d1bc7046..f8909a7ed553 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -91,6 +91,8 @@ static SUNXI_CCU_GATE(r_apb2_uart_clk, "r-apb2-uart", "r-apb2",
0x18c, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb2_i2c_clk, "r-apb2-i2c", "r-apb2",
0x19c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb2_rsb_clk, "r-apb2-rsb", "r-apb2",
+ 0x1bc, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1",
0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1",
@@ -130,12 +132,23 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
&r_apb1_pwm_clk.common,
&r_apb2_uart_clk.common,
&r_apb2_i2c_clk.common,
+ &r_apb2_rsb_clk.common,
&r_apb1_ir_clk.common,
&r_apb1_w1_clk.common,
&ir_clk.common,
&w1_clk.common,
};
+static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
+ &r_apb1_clk.common,
+ &r_apb2_clk.common,
+ &r_apb1_twd_clk.common,
+ &r_apb2_i2c_clk.common,
+ &r_apb2_rsb_clk.common,
+ &r_apb1_ir_clk.common,
+ &ir_clk.common,
+};
+
static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
@@ -147,6 +160,7 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
[CLK_R_APB1_PWM] = &r_apb1_pwm_clk.common.hw,
[CLK_R_APB2_UART] = &r_apb2_uart_clk.common.hw,
[CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
+ [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
[CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
[CLK_R_APB1_W1] = &r_apb1_w1_clk.common.hw,
[CLK_IR] = &ir_clk.common.hw,
@@ -155,16 +169,38 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.num = CLK_NUMBER,
};
+static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = {
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_APB2] = &r_apb2_clk.common.hw,
+ [CLK_R_APB1_TWD] = &r_apb1_twd_clk.common.hw,
+ [CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
+ [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
+ [CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB1_PWM] = { 0x13c, BIT(16) },
[RST_R_APB2_UART] = { 0x18c, BIT(16) },
[RST_R_APB2_I2C] = { 0x19c, BIT(16) },
+ [RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
[RST_R_APB1_IR] = { 0x1cc, BIT(16) },
[RST_R_APB1_W1] = { 0x1ec, BIT(16) },
};
+static struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
+ [RST_R_APB1_TWD] = { 0x12c, BIT(16) },
+ [RST_R_APB2_I2C] = { 0x19c, BIT(16) },
+ [RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
+ [RST_R_APB1_IR] = { 0x1cc, BIT(16) },
+};
+
static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
.ccu_clks = sun50i_h6_r_ccu_clks,
.num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
@@ -175,6 +211,16 @@ static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_h6_r_ccu_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
+ .ccu_clks = sun50i_h616_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h616_r_ccu_clks),
+
+ .hw_clks = &sun50i_h616_r_hw_clks,
+
+ .resets = sun50i_h616_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h616_r_ccu_resets),
+};
+
static void __init sunxi_r_ccu_init(struct device_node *node,
const struct sunxi_ccu_desc *desc)
{
@@ -195,3 +241,10 @@ static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
}
CLK_OF_DECLARE(sun50i_h6_r_ccu, "allwinner,sun50i-h6-r-ccu",
sun50i_h6_r_ccu_setup);
+
+static void __init sun50i_h616_r_ccu_setup(struct device_node *node)
+{
+ sunxi_r_ccu_init(node, &sun50i_h616_r_ccu_desc);
+}
+CLK_OF_DECLARE(sun50i_h616_r_ccu, "allwinner,sun50i-h616-r-ccu",
+ sun50i_h616_r_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
index 782117dc0b28..7e290b840803 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
@@ -14,6 +14,6 @@
#define CLK_R_APB2 3
-#define CLK_NUMBER (CLK_W1 + 1)
+#define CLK_NUMBER (CLK_R_APB2_RSB + 1)
#endif /* _CCU_SUN50I_H6_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index f2497d0a4683..bff446b78290 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -237,7 +237,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
psi_ahb1_ahb2_parents,
0x510,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -246,19 +246,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
"psi-ahb1-ahb2",
"pll-periph0" };
static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -682,7 +682,7 @@ static struct ccu_mux hdmi_cec_clk = {
.common = {
.reg = 0xb10,
- .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
hdmi_cec_parents,
&ccu_mux_ops,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
new file mode 100644
index 000000000000..225307305880
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -0,0 +1,1150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Arm Ltd.
+ * Based on the H6 CCU driver, which is:
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-h616.h"
+
+/*
+ * The CPU PLL is actually NP clock, with P being /1, /2 or /4. However
+ * P should only be used for output frequencies lower than 288 MHz.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ *
+ * The M factor is present in the register's description, but not in the
+ * frequency formula, and it's documented as "M is only used for backdoor
+ * testing", so it's not modelled and then force to 0.
+ */
+#define SUN50I_H616_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN50I_H616_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT("pll-ddr0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_DDR1_REG 0x018
+static struct ccu_nkmp pll_ddr1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x018,
+ .hw.init = CLK_HW_INIT("pll-ddr1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_PERIPH0_REG 0x020
+static struct ccu_nkmp pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x020,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_PERIPH1_REG 0x028
+static struct ccu_nkmp pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * For Video PLLs, the output divider is described as "used for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN50I_H616_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x040,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video0", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x048,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video1", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VIDEO2_REG 0x050
+static struct ccu_nm pll_video2_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x050,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video2", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT("pll-ve", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_DE_REG 0x060
+static struct ccu_nkmp pll_de_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x060,
+ .hw.init = CLK_HW_INIT("pll-de", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * TODO: Determine SDM settings for the audio PLL. The manual suggests
+ * PLL_FACTOR_N=16, PLL_POST_DIV_P=2, OUTPUT_DIV=2, pattern=0xe000c49b
+ * for 24.576 MHz, and PLL_FACTOR_N=22, PLL_POST_DIV_P=3, OUTPUT_DIV=2,
+ * pattern=0xe001288c for 22.5792 MHz.
+ * This clashes with our fixed PLL_POST_DIV_P.
+ */
+#define SUN50I_H616_PLL_AUDIO_REG 0x078
+static struct ccu_nm pll_audio_hs_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x078,
+ .hw.init = CLK_HW_INIT("pll-audio-hs", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const cpux_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-cpux", "pll-periph0" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 3, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x500, 0, 2, 0);
+static SUNXI_CCU_M(cpux_apb_clk, "cpux-apb", "cpux", 0x500, 8, 2, 0);
+
+static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents,
+ 0x510,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x540,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const de_parents[] = { "pll-de", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
+ 0x60c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
+ de_parents,
+ 0x620,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
+ 0x62c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(g2d_clk, "g2d", de_parents, 0x630,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_g2d_clk, "bus-g2d", "psi-ahb1-ahb2",
+ 0x63c, BIT(0), 0);
+
+static const char * const gpu0_parents[] = { "pll-gpu", "gpu1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu0_clk, "gpu0", gpu0_parents, 0x670,
+ 0, 2, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(gpu1_clk, "gpu1", "pll-periph0-2x", 0x674,
+ 0, 2, /* M */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
+ 0x67c, BIT(0), 0);
+
+static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "psi-ahb1-ahb2",
+ 0x68c, BIT(0), 0);
+
+static const char * const ve_parents[] = { "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "psi-ahb1-ahb2",
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "psi-ahb1-ahb2",
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
+ 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
+static struct ccu_div dram_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x800,
+ .hw.init = CLK_HW_INIT_PARENTS("dram",
+ dram_parents,
+ &ccu_div_ops,
+ CLK_IS_CRITICAL),
+ },
+};
+
+static SUNXI_CCU_GATE(mbus_dma_clk, "mbus-dma", "mbus",
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE(mbus_ve_clk, "mbus-ve", "mbus",
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE(mbus_ce_clk, "mbus-ce", "mbus",
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE(mbus_ts_clk, "mbus-ts", "mbus",
+ 0x804, BIT(3), 0);
+static SUNXI_CCU_GATE(mbus_nand_clk, "mbus-nand", "mbus",
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE(mbus_g2d_clk, "mbus-g2d", "mbus",
+ 0x804, BIT(10), 0);
+
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "psi-ahb1-ahb2",
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const char * const nand_spi_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", nand_spi_parents, 0x810,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", nand_spi_parents, 0x814,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
+
+static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb3", 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x90c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2", 0x90c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_uart5_clk, "bus-uart5", "apb2", 0x90c, BIT(5), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", 0x91c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_i2c4_clk, "bus-i2c4", "apb2", 0x91c, BIT(4), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", nand_spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", nand_spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb3", 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb3", 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE(emac_25m_clk, "emac-25m", "ahb3", 0x970,
+ BIT(31) | BIT(30), 0);
+
+static SUNXI_CCU_GATE(bus_emac0_clk, "bus-emac0", "ahb3", 0x97c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_emac1_clk, "bus-emac1", "ahb3", 0x97c, BIT(1), 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x9b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb3", 0x9bc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", 0x9fc, BIT(0), 0);
+
+static const char * const audio_parents[] = { "pll-audio-1x", "pll-audio-2x",
+ "pll-audio-4x", "pll-audio-hs" };
+static struct ccu_div spdif_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa20,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", 0xa2c, BIT(0), 0);
+
+static struct ccu_div dmic_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa40,
+ .hw.init = CLK_HW_INIT_PARENTS("dmic",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_dmic_clk, "bus-dmic", "apb1", 0xa4c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_1x_clk, "audio-codec-1x",
+ audio_parents, 0xa50,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_4x_clk, "audio-codec-4x",
+ audio_parents, 0xa54,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_audio_codec_clk, "bus-audio-codec", "apb1", 0xa5c,
+ BIT(0), 0);
+
+static struct ccu_div audio_hub_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa60,
+ .hw.init = CLK_HW_INIT_PARENTS("audio-hub",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_audio_hub_clk, "bus-audio-hub", "apb1", 0xa6c, BIT(0), 0);
+
+/*
+ * There are OHCI 12M clock source selection bits for the four USB 2.0 ports.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN50I_H616_USB0_CLK_REG 0xa70
+#define SUN50I_H616_USB1_CLK_REG 0xa74
+#define SUN50I_H616_USB2_CLK_REG 0xa78
+#define SUN50I_H616_USB3_CLK_REG 0xa7c
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", 0xa70, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0xa70, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc12M", 0xa74, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", 0xa74, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc12M", 0xa78, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M", 0xa78, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc12M", 0xa7c, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc24M", 0xa7c, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb3", 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb3", 0xa8c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb3", 0xa8c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb3", 0xa8c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb3", 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb3", 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb3", 0xa8c, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb3", 0xa8c, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE(bus_keyadc_clk, "bus-keyadc", "apb1", 0xa9c, BIT(0), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video0-4x",
+ "pll-video2", "pll-video2-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, 0xb00,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0xb04, BIT(31), 0);
+
+static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
+static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
+ { .index = 1, .div = 36621 },
+};
+
+#define SUN50I_H616_HDMI_CEC_CLK_REG 0xb10
+static struct ccu_mux hdmi_cec_clk = {
+ .enable = BIT(31) | BIT(30),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .fixed_predivs = hdmi_cec_predivs,
+ .n_predivs = ARRAY_SIZE(hdmi_cec_predivs),
+ },
+
+ .common = {
+ .reg = 0xb10,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
+ hdmi_cec_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb3", 0xb1c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb3",
+ 0xb5c, BIT(0), 0);
+
+static const char * const tcon_tv_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
+ tcon_tv_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1",
+ tcon_tv_parents, 0xb84,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
+ 0xb9c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon_tv1_clk, "bus-tcon-tv1", "ahb3",
+ 0xb9c, BIT(1), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(tve0_clk, "tve0",
+ tcon_tv_parents, 0xbb0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_tve_top_clk, "bus-tve-top", "ahb3",
+ 0xbbc, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tve0_clk, "bus-tve0", "ahb3",
+ 0xbbc, BIT(1), 0);
+
+static const char * const hdcp_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdcp_clk, "hdcp", hdcp_parents, 0xc40,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_hdcp_clk, "bus-hdcp", "ahb3", 0xc4c, BIT(0), 0);
+
+/* Fixed factor clocks */
+static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0);
+
+static const struct clk_hw *clk_parent_pll_audio[] = {
+ &pll_audio_hs_clk.common.hw
+};
+
+/*
+ * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
+ * rates can be set exactly in conjunction with sigma-delta modulation.
+ */
+static CLK_FIXED_FACTOR_HWS(pll_audio_1x_clk, "pll-audio-1x",
+ clk_parent_pll_audio,
+ 96, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
+ clk_parent_pll_audio,
+ 48, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x",
+ clk_parent_pll_audio,
+ 24, 1, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *pll_periph0_parents[] = {
+ &pll_periph0_clk.common.hw
+};
+
+static CLK_FIXED_FACTOR_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_parents,
+ 1, 2, 0);
+
+static const struct clk_hw *pll_periph1_parents[] = {
+ &pll_periph1_clk.common.hw
+};
+
+static CLK_FIXED_FACTOR_HWS(pll_periph1_2x_clk, "pll-periph1-2x",
+ pll_periph1_parents,
+ 1, 2, 0);
+
+static CLK_FIXED_FACTOR_HW(pll_video0_4x_clk, "pll-video0-4x",
+ &pll_video0_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HW(pll_video1_4x_clk, "pll-video1-4x",
+ &pll_video1_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HW(pll_video2_4x_clk, "pll-video2-4x",
+ &pll_video2_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_h616_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_ddr1_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_clk.common,
+ &pll_video1_clk.common,
+ &pll_video2_clk.common,
+ &pll_ve_clk.common,
+ &pll_de_clk.common,
+ &pll_audio_hs_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb1_ahb2_clk.common,
+ &ahb3_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &deinterlace_clk.common,
+ &bus_deinterlace_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &gpu0_clk.common,
+ &bus_gpu_clk.common,
+ &gpu1_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_psi_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_ts_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_g2d_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_i2c4_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &emac_25m_clk.common,
+ &bus_emac0_clk.common,
+ &bus_emac1_clk.common,
+ &ts_clk.common,
+ &bus_ts_clk.common,
+ &bus_ths_clk.common,
+ &spdif_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_codec_1x_clk.common,
+ &audio_codec_4x_clk.common,
+ &bus_audio_codec_clk.common,
+ &audio_hub_clk.common,
+ &bus_audio_hub_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_phy0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_phy1_clk.common,
+ &usb_ohci2_clk.common,
+ &usb_phy2_clk.common,
+ &usb_ohci3_clk.common,
+ &usb_phy3_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ohci2_clk.common,
+ &bus_ohci3_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ehci2_clk.common,
+ &bus_ehci3_clk.common,
+ &bus_otg_clk.common,
+ &bus_keyadc_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_tcon_top_clk.common,
+ &tcon_tv0_clk.common,
+ &tcon_tv1_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &bus_tcon_tv1_clk.common,
+ &tve0_clk.common,
+ &bus_tve_top_clk.common,
+ &bus_tve0_clk.common,
+ &hdcp_clk.common,
+ &bus_hdcp_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
+ .hws = {
+ [CLK_OSC12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.hw,
+ [CLK_PLL_VIDEO2] = &pll_video2_clk.common.hw,
+ [CLK_PLL_VIDEO2_4X] = &pll_video2_4x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_AUDIO_HS] = &pll_audio_hs_clk.common.hw,
+ [CLK_PLL_AUDIO_1X] = &pll_audio_1x_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB1_AHB2] = &psi_ahb1_ahb2_clk.common.hw,
+ [CLK_AHB3] = &ahb3_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_GPU0] = &gpu0_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_GPU1] = &gpu1_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PSI] = &bus_psi_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TS] = &mbus_ts_clk.common.hw,
+ [CLK_MBUS_NAND] = &mbus_nand_clk.common.hw,
+ [CLK_MBUS_G2D] = &mbus_g2d_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_EMAC_25M] = &emac_25m_clk.common.hw,
+ [CLK_BUS_EMAC0] = &bus_emac0_clk.common.hw,
+ [CLK_BUS_EMAC1] = &bus_emac1_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_CODEC_1X] = &audio_codec_1x_clk.common.hw,
+ [CLK_AUDIO_CODEC_4X] = &audio_codec_4x_clk.common.hw,
+ [CLK_BUS_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_AUDIO_HUB] = &audio_hub_clk.common.hw,
+ [CLK_BUS_AUDIO_HUB] = &bus_audio_hub_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
+ [CLK_USB_PHY2] = &usb_phy2_clk.common.hw,
+ [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
+ [CLK_USB_PHY3] = &usb_phy3_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw,
+ [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw,
+ [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_KEYADC] = &bus_keyadc_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
+ [CLK_TVE0] = &tve0_clk.common.hw,
+ [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw,
+ [CLK_BUS_TVE0] = &bus_tve0_clk.common.hw,
+ [CLK_HDCP] = &hdcp_clk.common.hw,
+ [CLK_BUS_HDCP] = &bus_hdcp_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_h616_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DEINTERLACE] = { 0x62c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PSI] = { 0x79c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_IOMMU] = { 0x7bc, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_I2C4] = { 0x91c, BIT(20) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC0] = { 0x97c, BIT(16) },
+ [RST_BUS_EMAC1] = { 0x97c, BIT(17) },
+ [RST_BUS_TS] = { 0x9bc, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO_CODEC] = { 0xa5c, BIT(16) },
+ [RST_BUS_AUDIO_HUB] = { 0xa6c, BIT(16) },
+
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_USB_PHY2] = { 0xa78, BIT(30) },
+ [RST_USB_PHY3] = { 0xa7c, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_OHCI2] = { 0xa8c, BIT(18) },
+ [RST_BUS_OHCI3] = { 0xa8c, BIT(19) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_EHCI2] = { 0xa8c, BIT(22) },
+ [RST_BUS_EHCI3] = { 0xa8c, BIT(23) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_KEYADC] = { 0xa9c, BIT(16) },
+
+ [RST_BUS_HDMI] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
+ [RST_BUS_TVE0] = { 0xbbc, BIT(17) },
+ [RST_BUS_HDCP] = { 0xc4c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_h616_ccu_desc = {
+ .ccu_clks = sun50i_h616_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h616_ccu_clks),
+
+ .hw_clks = &sun50i_h616_hw_clks,
+
+ .resets = sun50i_h616_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h616_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN50I_H616_PLL_CPUX_REG,
+ SUN50I_H616_PLL_DDR0_REG,
+ SUN50I_H616_PLL_DDR1_REG,
+ SUN50I_H616_PLL_PERIPH0_REG,
+ SUN50I_H616_PLL_PERIPH1_REG,
+ SUN50I_H616_PLL_GPU_REG,
+ SUN50I_H616_PLL_VIDEO0_REG,
+ SUN50I_H616_PLL_VIDEO1_REG,
+ SUN50I_H616_PLL_VIDEO2_REG,
+ SUN50I_H616_PLL_VE_REG,
+ SUN50I_H616_PLL_DE_REG,
+ SUN50I_H616_PLL_AUDIO_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN50I_H616_PLL_VIDEO0_REG,
+ SUN50I_H616_PLL_VIDEO1_REG,
+ SUN50I_H616_PLL_VIDEO2_REG,
+};
+
+static const u32 usb2_clk_regs[] = {
+ SUN50I_H616_USB0_CLK_REG,
+ SUN50I_H616_USB1_CLK_REG,
+ SUN50I_H616_USB2_CLK_REG,
+ SUN50I_H616_USB3_CLK_REG,
+};
+
+static void __init sun50i_h616_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%pOF: Could not map clock registers\n", node);
+ return;
+ }
+
+ /* Enable the lock bits and the output enable bits on all PLLs */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(29) | BIT(27);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /*
+ * Force OHCI 12M clock sources to 00 (12MHz divided from 48MHz)
+ *
+ * This clock mux is still mysterious, and the code just enforces
+ * it to have a valid clock parent.
+ */
+ for (i = 0; i < ARRAY_SIZE(usb2_clk_regs); i++) {
+ val = readl(reg + usb2_clk_regs[i]);
+ val &= ~GENMASK(25, 24);
+ writel(val, reg + usb2_clk_regs[i]);
+ }
+
+ /*
+ * Force the post-divider of pll-audio to 12 and the output divider
+ * of it to 2, so 24576000 and 22579200 rates can be set exactly.
+ */
+ val = readl(reg + SUN50I_H616_PLL_AUDIO_REG);
+ val &= ~(GENMASK(21, 16) | BIT(0));
+ writel(val | (11 << 16) | BIT(0), reg + SUN50I_H616_PLL_AUDIO_REG);
+
+ /*
+ * First clock parent (osc32K) is unusable for CEC. But since there
+ * is no good way to force parent switch (both run with same frequency),
+ * just set second clock parent here.
+ */
+ val = readl(reg + SUN50I_H616_HDMI_CEC_CLK_REG);
+ val |= BIT(24);
+ writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
+
+ i = sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
+ if (i)
+ pr_err("%pOF: probing clocks fails: %d\n", node, i);
+}
+
+CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
+ sun50i_h616_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
new file mode 100644
index 000000000000..dd671b413f22
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Arm Ltd.
+ */
+
+#ifndef _CCU_SUN50I_H616_H_
+#define _CCU_SUN50I_H616_H_
+
+#include <dt-bindings/clock/sun50i-h616-ccu.h>
+#include <dt-bindings/reset/sun50i-h616-ccu.h>
+
+#define CLK_OSC12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_DDR0 2
+#define CLK_PLL_DDR1 3
+
+/* PLL_PERIPH0 exported for PRCM */
+
+#define CLK_PLL_PERIPH0_2X 5
+#define CLK_PLL_PERIPH1 6
+#define CLK_PLL_PERIPH1_2X 7
+#define CLK_PLL_GPU 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO0_4X 10
+#define CLK_PLL_VIDEO1 11
+#define CLK_PLL_VIDEO1_4X 12
+#define CLK_PLL_VIDEO2 13
+#define CLK_PLL_VIDEO2_4X 14
+#define CLK_PLL_VE 15
+#define CLK_PLL_DE 16
+#define CLK_PLL_AUDIO_HS 17
+#define CLK_PLL_AUDIO_1X 18
+#define CLK_PLL_AUDIO_2X 19
+#define CLK_PLL_AUDIO_4X 20
+
+/* CPUX clock exported for DVFS */
+
+#define CLK_AXI 22
+#define CLK_CPUX_APB 23
+#define CLK_PSI_AHB1_AHB2 24
+#define CLK_AHB3 25
+
+/* APB1 clock exported for PIO */
+
+#define CLK_APB2 27
+#define CLK_MBUS 28
+
+/* All module clocks and bus gates are exported except DRAM */
+
+#define CLK_DRAM 49
+
+#define CLK_BUS_DRAM 56
+
+#define CLK_NUMBER (CLK_BUS_HDCP + 1)
+
+#endif /* _CCU_SUN50I_H616_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index fa4ecb915590..9d3a76604d94 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
rate = *parent_rate / p / m;
} else {
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index cb5daa4b37db..65810937a13a 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -20,7 +20,7 @@ static DEFINE_SPINLOCK(ve_lock);
#define SUN4I_VE_DIVIDER_WIDTH 3
#define SUN4I_VE_RESET 0
-/**
+/*
* sunxi_ve_reset... - reset bit in ve clk registers handling
*/
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 0cca91e075a5..f9d715ec9908 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -14,7 +14,7 @@
#include "clk-factors.h"
-/**
+/*
* sun4i_a10_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
* MOD0 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index e1aa1fbac48a..5fe7049ea693 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -23,7 +23,7 @@ static DEFINE_SPINLOCK(clk_lock);
/* Maximum number of parents our clocks have */
#define SUNXI_MAX_PARENTS 5
-/**
+/*
* sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
@@ -71,7 +71,7 @@ static void sun4i_get_pll1_factors(struct factors_request *req)
req->n = div / 4;
}
-/**
+/*
* sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1
* PLL1 rate is calculated as follows
* rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
@@ -147,7 +147,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req)
}
}
-/**
+/*
* sun8i_a23_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1);
@@ -191,7 +191,7 @@ static void sun8i_a23_get_pll1_factors(struct factors_request *req)
req->n = div / 4 - 1;
}
-/**
+/*
* sun4i_get_pll5_factors() - calculates n, k factors for PLL5
* PLL5 rate is calculated as follows
* rate = parent_rate * n * (k + 1)
@@ -218,7 +218,7 @@ static void sun4i_get_pll5_factors(struct factors_request *req)
req->n = DIV_ROUND_UP(div, (req->k + 1));
}
-/**
+/*
* sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
* PLL6x2 rate is calculated as follows
* rate = parent_rate * (n + 1) * (k + 1)
@@ -240,7 +240,7 @@ static void sun6i_a31_get_pll6_factors(struct factors_request *req)
req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
}
-/**
+/*
* sun5i_a13_get_ahb_factors() - calculates m, p factors for AHB
* AHB rate is calculated as follows
* rate = parent_rate >> p
@@ -276,7 +276,7 @@ static void sun5i_a13_get_ahb_factors(struct factors_request *req)
#define SUN6I_AHB1_PARENT_PLL6 3
-/**
+/*
* sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
* AHB rate is calculated as follows
* rate = parent_rate >> p
@@ -320,7 +320,7 @@ static void sun6i_get_ahb1_factors(struct factors_request *req)
req->m = calcm - 1;
}
-/**
+/*
* sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
* parent index
*/
@@ -336,7 +336,7 @@ static void sun6i_ahb1_recalc(struct factors_request *req)
req->rate >>= req->p;
}
-/**
+/*
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -375,7 +375,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
-/**
+/*
* sun7i_a20_get_out_factors() - calculates m, p factors for CLK_OUT_A/B
* CLK_OUT rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -408,7 +408,7 @@ static void sun7i_a20_get_out_factors(struct factors_request *req)
req->p = calcp;
}
-/**
+/*
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
@@ -625,7 +625,7 @@ CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
sun7i_out_clk_setup);
-/**
+/*
* sunxi_mux_clk_setup() - Setup function for muxes
*/
@@ -717,7 +717,7 @@ CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
sun8i_ahb2_clk_setup);
-/**
+/*
* sunxi_divider_clk_setup() - Setup function for simple divider clocks
*/
@@ -853,7 +853,7 @@ CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
-/**
+/*
* sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
*/
@@ -863,7 +863,7 @@ struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
};
-/**
+/*
* sunxi_divs_clk_setup() helper data
*/
@@ -929,7 +929,7 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
}
};
-/**
+/*
* sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
*
* These clocks look something like this
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index deaa4605824c..90df619dc087 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -7,3 +7,6 @@ config TEGRA_CLK_DFLL
depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
+
+config TEGRA124_CLK_EMC
+ bool
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index eec2313fd37e..7b1816856eb5 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
-obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o
+obj-$(CONFIG_TEGRA124_CLK_EMC) += clk-tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
index 745f9faa98d8..bdf6f4a51617 100644
--- a/drivers/clk/tegra/clk-tegra124-emc.c
+++ b/drivers/clk/tegra/clk-tegra124-emc.c
@@ -11,7 +11,9 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/tegra.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -21,7 +23,6 @@
#include <linux/string.h>
#include <soc/tegra/fuse.h>
-#include <soc/tegra/emc.h>
#include "clk.h"
@@ -80,6 +81,9 @@ struct tegra_clk_emc {
int num_timings;
struct emc_timing *timings;
spinlock_t *lock;
+
+ tegra124_emc_prepare_timing_change_cb *prepare_timing_change;
+ tegra124_emc_complete_timing_change_cb *complete_timing_change;
};
/* Common clock framework callback implementations */
@@ -176,6 +180,9 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
if (tegra->emc)
return tegra->emc;
+ if (!tegra->prepare_timing_change || !tegra->complete_timing_change)
+ return NULL;
+
if (!tegra->emc_node)
return NULL;
@@ -241,7 +248,7 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
div = timing->parent_rate / (timing->rate / 2) - 2;
- err = tegra_emc_prepare_timing_change(emc, timing->rate);
+ err = tegra->prepare_timing_change(emc, timing->rate);
if (err)
return err;
@@ -259,7 +266,7 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
spin_unlock_irqrestore(tegra->lock, flags);
- tegra_emc_complete_timing_change(emc, timing->rate);
+ tegra->complete_timing_change(emc, timing->rate);
clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
clk_disable_unprepare(tegra->prev_parent);
@@ -473,8 +480,8 @@ static const struct clk_ops tegra_clk_emc_ops = {
.get_parent = emc_get_parent,
};
-struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
- spinlock_t *lock)
+struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock)
{
struct tegra_clk_emc *tegra;
struct clk_init_data init;
@@ -538,3 +545,27 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
return clk;
};
+
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+ struct clk *clk = __clk_lookup("emc");
+ struct tegra_clk_emc *tegra;
+ struct clk_hw *hw;
+
+ if (clk) {
+ hw = __clk_get_hw(clk);
+ tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+ tegra->prepare_timing_change = prep_cb;
+ tegra->complete_timing_change = complete_cb;
+ }
+}
+EXPORT_SYMBOL_GPL(tegra124_clk_set_emc_callbacks);
+
+bool tegra124_clk_emc_driver_available(struct clk_hw *hw)
+{
+ struct tegra_clk_emc *tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+ return tegra->prepare_timing_change && tegra->complete_timing_change;
+}
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e931319dcc9d..934520aab6e3 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1500,6 +1500,26 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
writel(plld_base, clk_base + PLLD_BASE);
}
+static struct clk *tegra124_clk_src_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+ clk = of_clk_src_onecell_get(clkspec, data);
+ if (IS_ERR(clk))
+ return clk;
+
+ hw = __clk_get_hw(clk);
+
+ if (clkspec->args[0] == TEGRA124_CLK_EMC) {
+ if (!tegra124_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return clk;
+}
+
/**
* tegra124_132_clock_init_post - clock initialization postamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
@@ -1516,10 +1536,10 @@ static void __init tegra124_132_clock_init_post(struct device_node *np)
&pll_x_params);
tegra_init_special_resets(1, tegra124_reset_assert,
tegra124_reset_deassert);
- tegra_add_of_provider(np, of_clk_src_onecell_get);
+ tegra_add_of_provider(np, tegra124_clk_src_onecell_get);
- clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
- &emc_lock);
+ clks[TEGRA124_CLK_EMC] = tegra124_clk_register_emc(clk_base, np,
+ &emc_lock);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 37244a7e68c2..16dbf83d2f62 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1104,12 +1104,9 @@ static void tegra30_cpu_out_of_reset(u32 cpu)
static void tegra30_enable_cpu_clock(u32 cpu)
{
- unsigned int reg;
-
writel(CPU_CLOCK(cpu),
clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
- reg = readl(clk_base +
- TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ readl(clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
}
static void tegra30_disable_cpu_clock(u32 cpu)
@@ -1256,6 +1253,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 6b565f6b5f66..c3e36b5dcc75 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -881,16 +881,22 @@ void tegra_super_clk_gen5_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-#ifdef CONFIG_TEGRA124_EMC
-struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
- spinlock_t *lock);
+#ifdef CONFIG_TEGRA124_CLK_EMC
+struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock);
+bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw);
#else
-static inline struct clk *tegra_clk_register_emc(void __iomem *base,
- struct device_node *np,
- spinlock_t *lock)
+static inline struct clk *
+tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock)
{
return NULL;
}
+
+static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
+{
+ return false;
+}
#endif
void tegra114_clock_tune_cpu_trimmers_high(void);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 21115c4e5d3a..a7fdc7622913 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -86,6 +86,7 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
* @dev: the struct device * for which the OPP table is built
* @tables: array of CVB tables
* @count: size of the previously mentioned array
+ * @align: parameters of the regulator step and offset
* @process_id: process id of the HW module
* @speedo_id: speedo id of the HW module
* @speedo_value: speedo value of the HW module
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 87ece6cd4226..dfaa4d1f0b64 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -269,8 +269,9 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/**
* omap2_dpll_round_rate - round a target rate for an OMAP DPLL
- * @clk: struct clk * for a DPLL
+ * @hw: struct clk_hw containing the struct clk * for a DPLL
* @target_rate: desired DPLL clock rate
+ * @parent_rate: parent's DPLL clock rate
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 700b7f44f671..74831b2752b3 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -97,7 +97,7 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw)
/**
* omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
- * @clk: OMAP clock struct ptr to use
+ * @hw: Pointer to clk_hw_omap used to obtain OMAP clock struct ptr to use
*
* Convert a clockdomain name stored in a struct clk 'clk' into a
* clockdomain pointer, and save it into the struct clk. Intended to be
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 247510e306e2..d6f1ac5b53e1 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -151,7 +151,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
/**
* _register_dpll - low level registration of a DPLL clock
- * @hw: hardware clock definition for the clock
+ * @user: pointer to the hardware clock definition for the clock
* @node: device node for the clock
*
* Finalizes DPLL registration process. In case a failure (clk-ref or
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 2490026948b4..6097b099a5df 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -125,7 +125,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
return f;
}
-/*
+/**
* _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
* @clk: pointer to a DPLL struct clk
*
@@ -168,7 +168,7 @@ done:
return r;
}
-/*
+/**
* _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
* @clk: pointer to a DPLL struct clk
*
@@ -204,7 +204,7 @@ static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
return r;
}
-/*
+/**
* _omap3_noncore_dpll_stop - instruct a DPLL to stop
* @clk: pointer to a DPLL struct clk
*
@@ -291,7 +291,7 @@ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
*sd_div = sd;
}
-/*
+/**
* _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
* @clk: struct clk * of DPLL to set
* @freqsel: FREQSEL value to set
@@ -406,7 +406,8 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
/**
* omap3_dpll_recalc - recalculate DPLL rate
- * @clk: DPLL struct clk
+ * @hw: struct clk_hw containing the DPLL struct clk
+ * @parent_rate: clock rate of the DPLL parent
*
* Recalculate and propagate the DPLL rate.
*/
@@ -421,7 +422,7 @@ unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
/**
* omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
- * @clk: pointer to a DPLL struct clk
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
*
* Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
* The choice of modes depends on the DPLL's programmed rate: if it is
@@ -470,7 +471,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
/**
* omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
- * @clk: pointer to a DPLL struct clk
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
*
* Instructs a non-CORE DPLL to enter low-power stop. This function is
* intended for use in struct clkops. No return value.
@@ -745,7 +746,8 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
/**
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
+ * @hw: pointer struct clk_hw
+ * @parent_rate: clock rate of the DPLL parent
*
* Using parent clock DPLL data, look up DPLL state. If locked, set our
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
@@ -913,7 +915,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
* omap3_dpll4_set_rate - set rate for omap3 per-dpll
* @hw: clock to change
* @rate: target rate for clock
- * @parent_rate: rate of the parent clock
+ * @parent_rate: clock rate of the DPLL parent
*
* Check if the current SoC supports the per-dpll reprogram operation
* or not, and then do the rate change if supported. Returns -EINVAL
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 89c3ed1a24b8..3fc2cab69a3f 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -102,7 +102,8 @@ static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
/**
* omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to compute the rate for
+ * @hw: pointer to the clock to compute the rate for
+ * @parent_rate: clock rate of the DPLL parent
*
* Compute the output rate for the OMAP4 DPLL represented by @clk.
* Takes the REGM4XEN bit into consideration, which is needed for the
@@ -134,8 +135,9 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
/**
* omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to round a rate for
+ * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
* @target_rate: the desired rate of the DPLL
+ * @parent_rate: clock rate of the DPLL parent
*
* Compute the rate that would be programmed into the DPLL hardware
* for @clk if set_rate() were to be provided with the rate
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 42389558418c..b1d0fdb40a75 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -55,7 +55,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
/**
* omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
* from HSDivider PWRDN problem Implements Errata ID: i556.
- * @clk: DPLL output struct clk
+ * @hw: DPLL output struct clk_hw
*
* 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
* dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 692be2fd9261..fdd6aa3cb1fc 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -36,8 +36,9 @@
/**
* struct clk_icst - ICST VCO clock wrapper
* @hw: corresponding clock hardware entry
- * @vcoreg: VCO register address
- * @lockreg: VCO lock register address
+ * @map: register map
+ * @vcoreg_off: VCO register address
+ * @lockreg_off: VCO lock register address
* @params: parameters for this ICST instance
* @rate: current rate
* @ctype: the type of control register for the ICST
@@ -428,7 +429,7 @@ static const struct icst_params icst307_params = {
.idx2s = icst307_idx2s,
};
-/**
+/*
* The core modules on the Integrator/AP and Integrator/CP have
* especially crippled ICST525 control.
*/
diff --git a/drivers/clk/xilinx/Kconfig b/drivers/clk/xilinx/Kconfig
new file mode 100644
index 000000000000..5224114176ed
--- /dev/null
+++ b/drivers/clk/xilinx/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config XILINX_VCU
+ tristate "Xilinx VCU logicoreIP Init"
+ depends on HAS_IOMEM && COMMON_CLK
+ select REGMAP_MMIO
+ help
+ Provides the driver to enable and disable the isolation between the
+ processing system and programmable logic part by using the logicoreIP
+ register set. This driver also configures the frequency based on the
+ clock information from the logicoreIP register set.
+
+ If you say yes here you get support for the logicoreIP.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xlnx_vcu.
+
diff --git a/drivers/clk/xilinx/Makefile b/drivers/clk/xilinx/Makefile
new file mode 100644
index 000000000000..dee8fd51e303
--- /dev/null
+++ b/drivers/clk/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
new file mode 100644
index 000000000000..d66b1315114e
--- /dev/null
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/xlnx-vcu.h>
+
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET BIT(0)
+#define VCU_PLL_CTRL_POR_IN BIT(1)
+#define VCU_PLL_CTRL_PWR_POR BIT(2)
+#define VCU_PLL_CTRL_BYPASS BIT(3)
+#define VCU_PLL_CTRL_FBDIV GENMASK(14, 8)
+#define VCU_PLL_CTRL_CLKOUTDIV GENMASK(18, 16)
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES GENMASK(3, 0)
+#define VCU_PLL_CFG_CP GENMASK(8, 5)
+#define VCU_PLL_CFG_LFHF GENMASK(12, 10)
+#define VCU_PLL_CFG_LOCK_CNT GENMASK(22, 13)
+#define VCU_PLL_CFG_LOCK_DLY GENMASK(31, 25)
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS BIT(0)
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ * @pll: handle for the VCU PLL
+ * @pll_post: handle for the VCU PLL post divider
+ * @clk_data: clocks provided by the vcu clock provider
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ struct regmap *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+ struct clk_hw *pll;
+ struct clk_hw *pll_post;
+ struct clk_hw_onecell_data *clk_data;
+};
+
+static struct regmap_config vcu_settings_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+/**
+ * xvcu_read - Read from the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ *
+ * Return: Returns 32bit value from VCU register specified
+ *
+ */
+static inline u32 xvcu_read(void __iomem *iomem, u32 offset)
+{
+ return ioread32(iomem + offset);
+}
+
+/**
+ * xvcu_write - Write to the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @value: Value to write
+ */
+static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
+{
+ iowrite32(value, iomem + offset);
+}
+
+#define to_vcu_pll(_hw) container_of(_hw, struct vcu_pll, hw)
+
+struct vcu_pll {
+ struct clk_hw hw;
+ void __iomem *reg_base;
+ unsigned long fvco_min;
+ unsigned long fvco_max;
+};
+
+static int xvcu_pll_wait_for_lock(struct vcu_pll *pll)
+{
+ void __iomem *base = pll->reg_base;
+ unsigned long timeout;
+ u32 lock_status;
+
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ lock_status = xvcu_read(base, VCU_PLL_STATUS);
+ if (lock_status & VCU_PLL_STATUS_LOCK_STATUS)
+ return 0;
+ } while (!time_after(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static struct clk_hw *xvcu_register_pll_post(struct device *dev,
+ const char *name,
+ const struct clk_hw *parent_hw,
+ void __iomem *reg_base)
+{
+ u32 div;
+ u32 vcu_pll_ctrl;
+
+ /*
+ * The output divider of the PLL must be set to 1/2 to meet the
+ * timing in the design.
+ */
+ vcu_pll_ctrl = xvcu_read(reg_base, VCU_PLL_CTRL);
+ div = FIELD_GET(VCU_PLL_CTRL_CLKOUTDIV, vcu_pll_ctrl);
+ if (div != 1)
+ return ERR_PTR(-EINVAL);
+
+ return clk_hw_register_fixed_factor(dev, "vcu_pll_post",
+ clk_hw_get_name(parent_hw),
+ CLK_SET_RATE_PARENT, 1, 2);
+}
+
+static const struct xvcu_pll_cfg *xvcu_find_cfg(int div)
+{
+ const struct xvcu_pll_cfg *cfg = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvcu_pll_cfg) - 1; i++)
+ if (xvcu_pll_cfg[i].fbdiv == div)
+ cfg = &xvcu_pll_cfg[i];
+
+ return cfg;
+}
+
+static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
+{
+ void __iomem *base = pll->reg_base;
+ const struct xvcu_pll_cfg *cfg = NULL;
+ u32 vcu_pll_ctrl;
+ u32 cfg_val;
+
+ cfg = xvcu_find_cfg(div);
+ if (!cfg)
+ return -EINVAL;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_FBDIV;
+ vcu_pll_ctrl |= FIELD_PREP(VCU_PLL_CTRL_FBDIV, cfg->fbdiv);
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ cfg_val = FIELD_PREP(VCU_PLL_CFG_RES, cfg->res) |
+ FIELD_PREP(VCU_PLL_CFG_CP, cfg->cp) |
+ FIELD_PREP(VCU_PLL_CFG_LFHF, cfg->lfhf) |
+ FIELD_PREP(VCU_PLL_CFG_LOCK_CNT, cfg->lock_cnt) |
+ FIELD_PREP(VCU_PLL_CFG_LOCK_DLY, cfg->lock_dly);
+ xvcu_write(base, VCU_PLL_CFG, cfg_val);
+
+ return 0;
+}
+
+static long xvcu_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ unsigned int feedback_div;
+
+ rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+
+ feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
+
+ return *parent_rate * feedback_div;
+}
+
+static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ unsigned int div;
+ u32 vcu_pll_ctrl;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ div = FIELD_GET(VCU_PLL_CTRL_FBDIV, vcu_pll_ctrl);
+
+ return div * parent_rate;
+}
+
+static int xvcu_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+
+ return xvcu_pll_set_div(pll, rate / parent_rate);
+}
+
+static int xvcu_pll_enable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ u32 vcu_pll_ctrl;
+ int ret;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl |= VCU_PLL_CTRL_BYPASS;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_POR_IN;
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_PWR_POR;
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_RESET;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ ret = xvcu_pll_wait_for_lock(pll);
+ if (ret) {
+ pr_err("VCU PLL is not locked\n");
+ goto err;
+ }
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_BYPASS;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+err:
+ return ret;
+}
+
+static void xvcu_pll_disable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ u32 vcu_pll_ctrl;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl |= VCU_PLL_CTRL_POR_IN;
+ vcu_pll_ctrl |= VCU_PLL_CTRL_PWR_POR;
+ vcu_pll_ctrl |= VCU_PLL_CTRL_RESET;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+}
+
+static const struct clk_ops vcu_pll_ops = {
+ .enable = xvcu_pll_enable,
+ .disable = xvcu_pll_disable,
+ .round_rate = xvcu_pll_round_rate,
+ .recalc_rate = xvcu_pll_recalc_rate,
+ .set_rate = xvcu_pll_set_rate,
+};
+
+static struct clk_hw *xvcu_register_pll(struct device *dev,
+ void __iomem *reg_base,
+ const char *name, const char *parent,
+ unsigned long flags)
+{
+ struct vcu_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ init.name = name;
+ init.parent_names = &parent;
+ init.ops = &vcu_pll_ops;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll = devm_kmalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &init;
+ pll->reg_base = reg_base;
+ pll->fvco_min = FVCO_MIN;
+ pll->fvco_max = FVCO_MAX;
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(hw, pll->fvco_min, pll->fvco_max);
+
+ return hw;
+}
+
+static struct clk_hw *xvcu_clk_hw_register_leaf(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ u8 num_parents,
+ void __iomem *reg)
+{
+ u8 mux_flags = CLK_MUX_ROUND_CLOSEST;
+ u8 divider_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
+ struct clk_hw *mux = NULL;
+ struct clk_hw *divider = NULL;
+ struct clk_hw *gate = NULL;
+ char *name_mux;
+ char *name_div;
+ int err;
+ /* Protect register shared by clocks */
+ spinlock_t *lock;
+
+ lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL);
+ if (!lock)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_init(lock);
+
+ name_mux = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_mux");
+ if (!name_mux)
+ return ERR_PTR(-ENOMEM);
+ mux = clk_hw_register_mux_parent_data(dev, name_mux,
+ parent_data, num_parents,
+ CLK_SET_RATE_PARENT,
+ reg, 0, 1, mux_flags, lock);
+ if (IS_ERR(mux))
+ return mux;
+
+ name_div = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_div");
+ if (!name_div) {
+ err = -ENOMEM;
+ goto unregister_mux;
+ }
+ divider = clk_hw_register_divider_parent_hw(dev, name_div, mux,
+ CLK_SET_RATE_PARENT,
+ reg, 4, 6, divider_flags,
+ lock);
+ if (IS_ERR(divider)) {
+ err = PTR_ERR(divider);
+ goto unregister_mux;
+ }
+
+ gate = clk_hw_register_gate_parent_hw(dev, name, divider,
+ CLK_SET_RATE_PARENT, reg, 12, 0,
+ lock);
+ if (IS_ERR(gate)) {
+ err = PTR_ERR(gate);
+ goto unregister_divider;
+ }
+
+ return gate;
+
+unregister_divider:
+ clk_hw_unregister_divider(divider);
+unregister_mux:
+ clk_hw_unregister_mux(mux);
+
+ return ERR_PTR(err);
+}
+
+static void xvcu_clk_hw_unregister_leaf(struct clk_hw *hw)
+{
+ struct clk_hw *gate = hw;
+ struct clk_hw *divider;
+ struct clk_hw *mux;
+
+ if (!gate)
+ return;
+
+ divider = clk_hw_get_parent(gate);
+ clk_hw_unregister_gate(gate);
+ if (!divider)
+ return;
+
+ mux = clk_hw_get_parent(divider);
+ clk_hw_unregister_mux(mux);
+ if (!divider)
+ return;
+
+ clk_hw_unregister_divider(divider);
+}
+
+static int xvcu_register_clock_provider(struct xvcu_device *xvcu)
+{
+ struct device *dev = xvcu->dev;
+ struct clk_parent_data parent_data[2] = { 0 };
+ struct clk_hw_onecell_data *data;
+ struct clk_hw **hws;
+ struct clk_hw *hw;
+ void __iomem *reg_base = xvcu->vcu_slcr_ba;
+
+ data = devm_kzalloc(dev, struct_size(data, hws, CLK_XVCU_NUM_CLOCKS), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->num = CLK_XVCU_NUM_CLOCKS;
+ hws = data->hws;
+
+ xvcu->clk_data = data;
+
+ hw = xvcu_register_pll(dev, reg_base,
+ "vcu_pll", __clk_get_name(xvcu->pll_ref),
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ xvcu->pll = hw;
+
+ hw = xvcu_register_pll_post(dev, "vcu_pll_post", xvcu->pll, reg_base);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ xvcu->pll_post = hw;
+
+ parent_data[0].fw_name = "pll_ref";
+ parent_data[1].hw = xvcu->pll_post;
+
+ hws[CLK_XVCU_ENC_CORE] =
+ xvcu_clk_hw_register_leaf(dev, "venc_core_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_ENC_CORE_CTRL);
+ hws[CLK_XVCU_ENC_MCU] =
+ xvcu_clk_hw_register_leaf(dev, "venc_mcu_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_ENC_MCU_CTRL);
+ hws[CLK_XVCU_DEC_CORE] =
+ xvcu_clk_hw_register_leaf(dev, "vdec_core_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_DEC_CORE_CTRL);
+ hws[CLK_XVCU_DEC_MCU] =
+ xvcu_clk_hw_register_leaf(dev, "vdec_mcu_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_DEC_MCU_CTRL);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
+}
+
+static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
+{
+ struct clk_hw_onecell_data *data = xvcu->clk_data;
+ struct clk_hw **hws = data->hws;
+
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_DEC_MCU]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_DEC_MCU]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_DEC_CORE]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_DEC_CORE]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_MCU]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
+
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
+}
+
+/**
+ * xvcu_probe - Probe existence of the logicoreIP
+ * and initialize PLL
+ *
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xvcu_device *xvcu;
+ void __iomem *regs;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ xvcu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_info(&pdev->dev,
+ "could not find xlnx,vcu-settings: trying direct register access\n");
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ devm_regmap_init_mmio(&pdev->dev, regs,
+ &vcu_settings_regmap_config);
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(xvcu->logicore_reg_ba);
+ }
+ }
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+
+ ret = xvcu_register_clock_provider(xvcu);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register clock provider\n");
+ goto error_clk_provider;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ return 0;
+
+error_clk_provider:
+ xvcu_unregister_clock_provider(xvcu);
+ clk_disable_unprepare(xvcu->aclk);
+ return ret;
+}
+
+/**
+ * xvcu_remove - Insert gasket isolation
+ * and disable the clock
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ xvcu_unregister_clock_provider(xvcu);
+
+ /* Add the Gasket isolation and put the VCU in reset. */
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
+
+static struct platform_driver xvcu_driver = {
+ .driver = {
+ .name = "xilinx-vcu",
+ .of_match_table = xvcu_of_id_table,
+ },
+ .probe = xvcu_probe,
+ .remove = xvcu_remove,
+};
+
+module_platform_driver(xvcu_driver);
+
+MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU init Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile
deleted file mode 100644
index f130643b695d..000000000000
--- a/drivers/clk/zte/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y := clk.o
-obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o
-obj-$(CONFIG_ARCH_ZX) += clk-zx296718.o
diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c
deleted file mode 100644
index e846f2a34feb..000000000000
--- a/drivers/clk/zte/clk-zx296702.c
+++ /dev/null
@@ -1,741 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <dt-bindings/clock/zx296702-clock.h>
-#include "clk.h"
-
-static DEFINE_SPINLOCK(reg_lock);
-
-static void __iomem *topcrm_base;
-static void __iomem *lsp0crpm_base;
-static void __iomem *lsp1crpm_base;
-
-static struct clk *topclk[ZX296702_TOPCLK_END];
-static struct clk *lsp0clk[ZX296702_LSP0CLK_END];
-static struct clk *lsp1clk[ZX296702_LSP1CLK_END];
-
-static struct clk_onecell_data topclk_data;
-static struct clk_onecell_data lsp0clk_data;
-static struct clk_onecell_data lsp1clk_data;
-
-#define CLK_MUX (topcrm_base + 0x04)
-#define CLK_DIV (topcrm_base + 0x08)
-#define CLK_EN0 (topcrm_base + 0x0c)
-#define CLK_EN1 (topcrm_base + 0x10)
-#define VOU_LOCAL_CLKEN (topcrm_base + 0x68)
-#define VOU_LOCAL_CLKSEL (topcrm_base + 0x70)
-#define VOU_LOCAL_DIV2_SET (topcrm_base + 0x74)
-#define CLK_MUX1 (topcrm_base + 0x8c)
-
-#define CLK_SDMMC1 (lsp0crpm_base + 0x0c)
-#define CLK_GPIO (lsp0crpm_base + 0x2c)
-#define CLK_SPDIF0 (lsp0crpm_base + 0x10)
-#define SPDIF0_DIV (lsp0crpm_base + 0x14)
-#define CLK_I2S0 (lsp0crpm_base + 0x18)
-#define I2S0_DIV (lsp0crpm_base + 0x1c)
-#define CLK_I2S1 (lsp0crpm_base + 0x20)
-#define I2S1_DIV (lsp0crpm_base + 0x24)
-#define CLK_I2S2 (lsp0crpm_base + 0x34)
-#define I2S2_DIV (lsp0crpm_base + 0x38)
-
-#define CLK_UART0 (lsp1crpm_base + 0x20)
-#define CLK_UART1 (lsp1crpm_base + 0x24)
-#define CLK_SDMMC0 (lsp1crpm_base + 0x2c)
-#define CLK_SPDIF1 (lsp1crpm_base + 0x30)
-#define SPDIF1_DIV (lsp1crpm_base + 0x34)
-
-static const struct zx_pll_config pll_a9_config[] = {
- { .rate = 700000000, .cfg0 = 0x800405d1, .cfg1 = 0x04555555 },
- { .rate = 800000000, .cfg0 = 0x80040691, .cfg1 = 0x04aaaaaa },
- { .rate = 900000000, .cfg0 = 0x80040791, .cfg1 = 0x04000000 },
- { .rate = 1000000000, .cfg0 = 0x80040851, .cfg1 = 0x04555555 },
- { .rate = 1100000000, .cfg0 = 0x80040911, .cfg1 = 0x04aaaaaa },
- { .rate = 1200000000, .cfg0 = 0x80040a11, .cfg1 = 0x04000000 },
-};
-
-static const struct clk_div_table main_hlk_div[] = {
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { /* sentinel */ }
-};
-
-static const struct clk_div_table a9_as1_aclk_divider[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { /* sentinel */ }
-};
-
-static const struct clk_div_table sec_wclk_divider[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { .val = 5, .div = 6, },
- { .val = 7, .div = 8, },
- { /* sentinel */ }
-};
-
-static const char * const matrix_aclk_sel[] = {
- "pll_mm0_198M",
- "osc",
- "clk_148M5",
- "pll_lsp_104M",
-};
-
-static const char * const a9_wclk_sel[] = {
- "pll_a9",
- "osc",
- "clk_500",
- "clk_250",
-};
-
-static const char * const a9_as1_aclk_sel[] = {
- "clk_250",
- "osc",
- "pll_mm0_396M",
- "pll_mac_333M",
-};
-
-static const char * const a9_trace_clkin_sel[] = {
- "clk_74M25",
- "pll_mm1_108M",
- "clk_125",
- "clk_148M5",
-};
-
-static const char * const decppu_aclk_sel[] = {
- "clk_250",
- "pll_mm0_198M",
- "pll_lsp_104M",
- "pll_audio_294M912",
-};
-
-static const char * const vou_main_wclk_sel[] = {
- "clk_148M5",
- "clk_74M25",
- "clk_27",
- "pll_mm1_54M",
-};
-
-static const char * const vou_scaler_wclk_sel[] = {
- "clk_250",
- "pll_mac_333M",
- "pll_audio_294M912",
- "pll_mm0_198M",
-};
-
-static const char * const r2d_wclk_sel[] = {
- "pll_audio_294M912",
- "pll_mac_333M",
- "pll_a9_350M",
- "pll_mm0_396M",
-};
-
-static const char * const ddr_wclk_sel[] = {
- "pll_mac_333M",
- "pll_ddr_266M",
- "pll_audio_294M912",
- "pll_mm0_198M",
-};
-
-static const char * const nand_wclk_sel[] = {
- "pll_lsp_104M",
- "osc",
-};
-
-static const char * const lsp_26_wclk_sel[] = {
- "pll_lsp_26M",
- "osc",
-};
-
-static const char * const vl0_sel[] = {
- "vou_main_channel_div",
- "vou_aux_channel_div",
-};
-
-static const char * const hdmi_sel[] = {
- "vou_main_channel_wclk",
- "vou_aux_channel_wclk",
-};
-
-static const char * const sdmmc0_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const sdmmc1_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static const char * const uart_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const spdif0_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static const char * const spdif1_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const i2s_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static inline struct clk *zx_divtbl(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width,
- const struct clk_div_table *table)
-{
- return clk_register_divider_table(NULL, name, parent, 0, reg, shift,
- width, 0, table, &reg_lock);
-}
-
-static inline struct clk *zx_div(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width)
-{
- return clk_register_divider(NULL, name, parent, 0,
- reg, shift, width, 0, &reg_lock);
-}
-
-static inline struct clk *zx_mux(const char *name, const char * const *parents,
- int num_parents, void __iomem *reg, u8 shift, u8 width)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- 0, reg, shift, width, 0, &reg_lock);
-}
-
-static inline struct clk *zx_gate(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_register_gate(NULL, name, parent, CLK_IGNORE_UNUSED,
- reg, shift, CLK_SET_RATE_PARENT, &reg_lock);
-}
-
-static void __init zx296702_top_clocks_init(struct device_node *np)
-{
- struct clk **clk = topclk;
- int i;
-
- topcrm_base = of_iomap(np, 0);
- WARN_ON(!topcrm_base);
-
- clk[ZX296702_OSC] =
- clk_register_fixed_rate(NULL, "osc", NULL, 0, 30000000);
- clk[ZX296702_PLL_A9] =
- clk_register_zx_pll("pll_a9", "osc", 0, topcrm_base
- + 0x01c, pll_a9_config,
- ARRAY_SIZE(pll_a9_config), &reg_lock);
-
- /* TODO: pll_a9_350M look like changeble follow a9 pll */
- clk[ZX296702_PLL_A9_350M] =
- clk_register_fixed_rate(NULL, "pll_a9_350M", "osc", 0,
- 350000000);
- clk[ZX296702_PLL_MAC_1000M] =
- clk_register_fixed_rate(NULL, "pll_mac_1000M", "osc", 0,
- 1000000000);
- clk[ZX296702_PLL_MAC_333M] =
- clk_register_fixed_rate(NULL, "pll_mac_333M", "osc", 0,
- 333000000);
- clk[ZX296702_PLL_MM0_1188M] =
- clk_register_fixed_rate(NULL, "pll_mm0_1188M", "osc", 0,
- 1188000000);
- clk[ZX296702_PLL_MM0_396M] =
- clk_register_fixed_rate(NULL, "pll_mm0_396M", "osc", 0,
- 396000000);
- clk[ZX296702_PLL_MM0_198M] =
- clk_register_fixed_rate(NULL, "pll_mm0_198M", "osc", 0,
- 198000000);
- clk[ZX296702_PLL_MM1_108M] =
- clk_register_fixed_rate(NULL, "pll_mm1_108M", "osc", 0,
- 108000000);
- clk[ZX296702_PLL_MM1_72M] =
- clk_register_fixed_rate(NULL, "pll_mm1_72M", "osc", 0,
- 72000000);
- clk[ZX296702_PLL_MM1_54M] =
- clk_register_fixed_rate(NULL, "pll_mm1_54M", "osc", 0,
- 54000000);
- clk[ZX296702_PLL_LSP_104M] =
- clk_register_fixed_rate(NULL, "pll_lsp_104M", "osc", 0,
- 104000000);
- clk[ZX296702_PLL_LSP_26M] =
- clk_register_fixed_rate(NULL, "pll_lsp_26M", "osc", 0,
- 26000000);
- clk[ZX296702_PLL_DDR_266M] =
- clk_register_fixed_rate(NULL, "pll_ddr_266M", "osc", 0,
- 266000000);
- clk[ZX296702_PLL_AUDIO_294M912] =
- clk_register_fixed_rate(NULL, "pll_audio_294M912", "osc", 0,
- 294912000);
-
- /* bus clock */
- clk[ZX296702_MATRIX_ACLK] =
- zx_mux("matrix_aclk", matrix_aclk_sel,
- ARRAY_SIZE(matrix_aclk_sel), CLK_MUX, 2, 2);
- clk[ZX296702_MAIN_HCLK] =
- zx_divtbl("main_hclk", "matrix_aclk", CLK_DIV, 0, 2,
- main_hlk_div);
- clk[ZX296702_MAIN_PCLK] =
- zx_divtbl("main_pclk", "matrix_aclk", CLK_DIV, 2, 2,
- main_hlk_div);
-
- /* cpu clock */
- clk[ZX296702_CLK_500] =
- clk_register_fixed_factor(NULL, "clk_500", "pll_mac_1000M", 0,
- 1, 2);
- clk[ZX296702_CLK_250] =
- clk_register_fixed_factor(NULL, "clk_250", "pll_mac_1000M", 0,
- 1, 4);
- clk[ZX296702_CLK_125] =
- clk_register_fixed_factor(NULL, "clk_125", "clk_250", 0, 1, 2);
- clk[ZX296702_CLK_148M5] =
- clk_register_fixed_factor(NULL, "clk_148M5", "pll_mm0_1188M", 0,
- 1, 8);
- clk[ZX296702_CLK_74M25] =
- clk_register_fixed_factor(NULL, "clk_74M25", "pll_mm0_1188M", 0,
- 1, 16);
- clk[ZX296702_A9_WCLK] =
- zx_mux("a9_wclk", a9_wclk_sel, ARRAY_SIZE(a9_wclk_sel), CLK_MUX,
- 0, 2);
- clk[ZX296702_A9_AS1_ACLK_MUX] =
- zx_mux("a9_as1_aclk_mux", a9_as1_aclk_sel,
- ARRAY_SIZE(a9_as1_aclk_sel), CLK_MUX, 4, 2);
- clk[ZX296702_A9_TRACE_CLKIN_MUX] =
- zx_mux("a9_trace_clkin_mux", a9_trace_clkin_sel,
- ARRAY_SIZE(a9_trace_clkin_sel), CLK_MUX1, 0, 2);
- clk[ZX296702_A9_AS1_ACLK_DIV] =
- zx_divtbl("a9_as1_aclk_div", "a9_as1_aclk_mux", CLK_DIV, 4, 2,
- a9_as1_aclk_divider);
-
- /* multi-media clock */
- clk[ZX296702_CLK_2] =
- clk_register_fixed_factor(NULL, "clk_2", "pll_mm1_72M", 0,
- 1, 36);
- clk[ZX296702_CLK_27] =
- clk_register_fixed_factor(NULL, "clk_27", "pll_mm1_54M", 0,
- 1, 2);
- clk[ZX296702_DECPPU_ACLK_MUX] =
- zx_mux("decppu_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 6, 2);
- clk[ZX296702_PPU_ACLK_MUX] =
- zx_mux("ppu_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 8, 2);
- clk[ZX296702_MALI400_ACLK_MUX] =
- zx_mux("mali400_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 12, 2);
- clk[ZX296702_VOU_ACLK_MUX] =
- zx_mux("vou_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 10, 2);
- clk[ZX296702_VOU_MAIN_WCLK_MUX] =
- zx_mux("vou_main_wclk_mux", vou_main_wclk_sel,
- ARRAY_SIZE(vou_main_wclk_sel), CLK_MUX, 14, 2);
- clk[ZX296702_VOU_AUX_WCLK_MUX] =
- zx_mux("vou_aux_wclk_mux", vou_main_wclk_sel,
- ARRAY_SIZE(vou_main_wclk_sel), CLK_MUX, 16, 2);
- clk[ZX296702_VOU_SCALER_WCLK_MUX] =
- zx_mux("vou_scaler_wclk_mux", vou_scaler_wclk_sel,
- ARRAY_SIZE(vou_scaler_wclk_sel), CLK_MUX,
- 18, 2);
- clk[ZX296702_R2D_ACLK_MUX] =
- zx_mux("r2d_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 20, 2);
- clk[ZX296702_R2D_WCLK_MUX] =
- zx_mux("r2d_wclk_mux", r2d_wclk_sel,
- ARRAY_SIZE(r2d_wclk_sel), CLK_MUX, 22, 2);
-
- /* other clock */
- clk[ZX296702_CLK_50] =
- clk_register_fixed_factor(NULL, "clk_50", "pll_mac_1000M",
- 0, 1, 20);
- clk[ZX296702_CLK_25] =
- clk_register_fixed_factor(NULL, "clk_25", "pll_mac_1000M",
- 0, 1, 40);
- clk[ZX296702_CLK_12] =
- clk_register_fixed_factor(NULL, "clk_12", "pll_mm1_72M",
- 0, 1, 6);
- clk[ZX296702_CLK_16M384] =
- clk_register_fixed_factor(NULL, "clk_16M384",
- "pll_audio_294M912", 0, 1, 18);
- clk[ZX296702_CLK_32K768] =
- clk_register_fixed_factor(NULL, "clk_32K768", "clk_16M384",
- 0, 1, 500);
- clk[ZX296702_SEC_WCLK_DIV] =
- zx_divtbl("sec_wclk_div", "pll_lsp_104M", CLK_DIV, 6, 3,
- sec_wclk_divider);
- clk[ZX296702_DDR_WCLK_MUX] =
- zx_mux("ddr_wclk_mux", ddr_wclk_sel,
- ARRAY_SIZE(ddr_wclk_sel), CLK_MUX, 24, 2);
- clk[ZX296702_NAND_WCLK_MUX] =
- zx_mux("nand_wclk_mux", nand_wclk_sel,
- ARRAY_SIZE(nand_wclk_sel), CLK_MUX, 24, 2);
- clk[ZX296702_LSP_26_WCLK_MUX] =
- zx_mux("lsp_26_wclk_mux", lsp_26_wclk_sel,
- ARRAY_SIZE(lsp_26_wclk_sel), CLK_MUX, 27, 1);
-
- /* gates */
- clk[ZX296702_A9_AS0_ACLK] =
- zx_gate("a9_as0_aclk", "matrix_aclk", CLK_EN0, 0);
- clk[ZX296702_A9_AS1_ACLK] =
- zx_gate("a9_as1_aclk", "a9_as1_aclk_div", CLK_EN0, 1);
- clk[ZX296702_A9_TRACE_CLKIN] =
- zx_gate("a9_trace_clkin", "a9_trace_clkin_mux", CLK_EN0, 2);
- clk[ZX296702_DECPPU_AXI_M_ACLK] =
- zx_gate("decppu_axi_m_aclk", "decppu_aclk_mux", CLK_EN0, 3);
- clk[ZX296702_DECPPU_AHB_S_HCLK] =
- zx_gate("decppu_ahb_s_hclk", "main_hclk", CLK_EN0, 4);
- clk[ZX296702_PPU_AXI_M_ACLK] =
- zx_gate("ppu_axi_m_aclk", "ppu_aclk_mux", CLK_EN0, 5);
- clk[ZX296702_PPU_AHB_S_HCLK] =
- zx_gate("ppu_ahb_s_hclk", "main_hclk", CLK_EN0, 6);
- clk[ZX296702_VOU_AXI_M_ACLK] =
- zx_gate("vou_axi_m_aclk", "vou_aclk_mux", CLK_EN0, 7);
- clk[ZX296702_VOU_APB_PCLK] =
- zx_gate("vou_apb_pclk", "main_pclk", CLK_EN0, 8);
- clk[ZX296702_VOU_MAIN_CHANNEL_WCLK] =
- zx_gate("vou_main_channel_wclk", "vou_main_wclk_mux",
- CLK_EN0, 9);
- clk[ZX296702_VOU_AUX_CHANNEL_WCLK] =
- zx_gate("vou_aux_channel_wclk", "vou_aux_wclk_mux",
- CLK_EN0, 10);
- clk[ZX296702_VOU_HDMI_OSCLK_CEC] =
- zx_gate("vou_hdmi_osclk_cec", "clk_2", CLK_EN0, 11);
- clk[ZX296702_VOU_SCALER_WCLK] =
- zx_gate("vou_scaler_wclk", "vou_scaler_wclk_mux", CLK_EN0, 12);
- clk[ZX296702_MALI400_AXI_M_ACLK] =
- zx_gate("mali400_axi_m_aclk", "mali400_aclk_mux", CLK_EN0, 13);
- clk[ZX296702_MALI400_APB_PCLK] =
- zx_gate("mali400_apb_pclk", "main_pclk", CLK_EN0, 14);
- clk[ZX296702_R2D_WCLK] =
- zx_gate("r2d_wclk", "r2d_wclk_mux", CLK_EN0, 15);
- clk[ZX296702_R2D_AXI_M_ACLK] =
- zx_gate("r2d_axi_m_aclk", "r2d_aclk_mux", CLK_EN0, 16);
- clk[ZX296702_R2D_AHB_HCLK] =
- zx_gate("r2d_ahb_hclk", "main_hclk", CLK_EN0, 17);
- clk[ZX296702_DDR3_AXI_S0_ACLK] =
- zx_gate("ddr3_axi_s0_aclk", "matrix_aclk", CLK_EN0, 18);
- clk[ZX296702_DDR3_APB_PCLK] =
- zx_gate("ddr3_apb_pclk", "main_pclk", CLK_EN0, 19);
- clk[ZX296702_DDR3_WCLK] =
- zx_gate("ddr3_wclk", "ddr_wclk_mux", CLK_EN0, 20);
- clk[ZX296702_USB20_0_AHB_HCLK] =
- zx_gate("usb20_0_ahb_hclk", "main_hclk", CLK_EN0, 21);
- clk[ZX296702_USB20_0_EXTREFCLK] =
- zx_gate("usb20_0_extrefclk", "clk_12", CLK_EN0, 22);
- clk[ZX296702_USB20_1_AHB_HCLK] =
- zx_gate("usb20_1_ahb_hclk", "main_hclk", CLK_EN0, 23);
- clk[ZX296702_USB20_1_EXTREFCLK] =
- zx_gate("usb20_1_extrefclk", "clk_12", CLK_EN0, 24);
- clk[ZX296702_USB20_2_AHB_HCLK] =
- zx_gate("usb20_2_ahb_hclk", "main_hclk", CLK_EN0, 25);
- clk[ZX296702_USB20_2_EXTREFCLK] =
- zx_gate("usb20_2_extrefclk", "clk_12", CLK_EN0, 26);
- clk[ZX296702_GMAC_AXI_M_ACLK] =
- zx_gate("gmac_axi_m_aclk", "matrix_aclk", CLK_EN0, 27);
- clk[ZX296702_GMAC_APB_PCLK] =
- zx_gate("gmac_apb_pclk", "main_pclk", CLK_EN0, 28);
- clk[ZX296702_GMAC_125_CLKIN] =
- zx_gate("gmac_125_clkin", "clk_125", CLK_EN0, 29);
- clk[ZX296702_GMAC_RMII_CLKIN] =
- zx_gate("gmac_rmii_clkin", "clk_50", CLK_EN0, 30);
- clk[ZX296702_GMAC_25M_CLK] =
- zx_gate("gmac_25M_clk", "clk_25", CLK_EN0, 31);
- clk[ZX296702_NANDFLASH_AHB_HCLK] =
- zx_gate("nandflash_ahb_hclk", "main_hclk", CLK_EN1, 0);
- clk[ZX296702_NANDFLASH_WCLK] =
- zx_gate("nandflash_wclk", "nand_wclk_mux", CLK_EN1, 1);
- clk[ZX296702_LSP0_APB_PCLK] =
- zx_gate("lsp0_apb_pclk", "main_pclk", CLK_EN1, 2);
- clk[ZX296702_LSP0_AHB_HCLK] =
- zx_gate("lsp0_ahb_hclk", "main_hclk", CLK_EN1, 3);
- clk[ZX296702_LSP0_26M_WCLK] =
- zx_gate("lsp0_26M_wclk", "lsp_26_wclk_mux", CLK_EN1, 4);
- clk[ZX296702_LSP0_104M_WCLK] =
- zx_gate("lsp0_104M_wclk", "pll_lsp_104M", CLK_EN1, 5);
- clk[ZX296702_LSP0_16M384_WCLK] =
- zx_gate("lsp0_16M384_wclk", "clk_16M384", CLK_EN1, 6);
- clk[ZX296702_LSP1_APB_PCLK] =
- zx_gate("lsp1_apb_pclk", "main_pclk", CLK_EN1, 7);
- /* FIXME: wclk enable bit is bit8. We hack it as reserved 31 for
- * UART does not work after parent clk is disabled/enabled */
- clk[ZX296702_LSP1_26M_WCLK] =
- zx_gate("lsp1_26M_wclk", "lsp_26_wclk_mux", CLK_EN1, 31);
- clk[ZX296702_LSP1_104M_WCLK] =
- zx_gate("lsp1_104M_wclk", "pll_lsp_104M", CLK_EN1, 9);
- clk[ZX296702_LSP1_32K_CLK] =
- zx_gate("lsp1_32K_clk", "clk_32K768", CLK_EN1, 10);
- clk[ZX296702_AON_HCLK] =
- zx_gate("aon_hclk", "main_hclk", CLK_EN1, 11);
- clk[ZX296702_SYS_CTRL_PCLK] =
- zx_gate("sys_ctrl_pclk", "main_pclk", CLK_EN1, 12);
- clk[ZX296702_DMA_PCLK] =
- zx_gate("dma_pclk", "main_pclk", CLK_EN1, 13);
- clk[ZX296702_DMA_ACLK] =
- zx_gate("dma_aclk", "matrix_aclk", CLK_EN1, 14);
- clk[ZX296702_SEC_HCLK] =
- zx_gate("sec_hclk", "main_hclk", CLK_EN1, 15);
- clk[ZX296702_AES_WCLK] =
- zx_gate("aes_wclk", "sec_wclk_div", CLK_EN1, 16);
- clk[ZX296702_DES_WCLK] =
- zx_gate("des_wclk", "sec_wclk_div", CLK_EN1, 17);
- clk[ZX296702_IRAM_ACLK] =
- zx_gate("iram_aclk", "matrix_aclk", CLK_EN1, 18);
- clk[ZX296702_IROM_ACLK] =
- zx_gate("irom_aclk", "matrix_aclk", CLK_EN1, 19);
- clk[ZX296702_BOOT_CTRL_HCLK] =
- zx_gate("boot_ctrl_hclk", "main_hclk", CLK_EN1, 20);
- clk[ZX296702_EFUSE_CLK_30] =
- zx_gate("efuse_clk_30", "osc", CLK_EN1, 21);
-
- /* TODO: add VOU Local clocks */
- clk[ZX296702_VOU_MAIN_CHANNEL_DIV] =
- zx_div("vou_main_channel_div", "vou_main_channel_wclk",
- VOU_LOCAL_DIV2_SET, 1, 1);
- clk[ZX296702_VOU_AUX_CHANNEL_DIV] =
- zx_div("vou_aux_channel_div", "vou_aux_channel_wclk",
- VOU_LOCAL_DIV2_SET, 0, 1);
- clk[ZX296702_VOU_TV_ENC_HD_DIV] =
- zx_div("vou_tv_enc_hd_div", "vou_tv_enc_hd_mux",
- VOU_LOCAL_DIV2_SET, 3, 1);
- clk[ZX296702_VOU_TV_ENC_SD_DIV] =
- zx_div("vou_tv_enc_sd_div", "vou_tv_enc_sd_mux",
- VOU_LOCAL_DIV2_SET, 2, 1);
- clk[ZX296702_VL0_MUX] =
- zx_mux("vl0_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 8, 1);
- clk[ZX296702_VL1_MUX] =
- zx_mux("vl1_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 9, 1);
- clk[ZX296702_VL2_MUX] =
- zx_mux("vl2_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 10, 1);
- clk[ZX296702_GL0_MUX] =
- zx_mux("gl0_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 5, 1);
- clk[ZX296702_GL1_MUX] =
- zx_mux("gl1_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 6, 1);
- clk[ZX296702_GL2_MUX] =
- zx_mux("gl2_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 7, 1);
- clk[ZX296702_WB_MUX] =
- zx_mux("wb_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 11, 1);
- clk[ZX296702_HDMI_MUX] =
- zx_mux("hdmi_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 4, 1);
- clk[ZX296702_VOU_TV_ENC_HD_MUX] =
- zx_mux("vou_tv_enc_hd_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 3, 1);
- clk[ZX296702_VOU_TV_ENC_SD_MUX] =
- zx_mux("vou_tv_enc_sd_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 2, 1);
- clk[ZX296702_VL0_CLK] =
- zx_gate("vl0_clk", "vl0_mux", VOU_LOCAL_CLKEN, 8);
- clk[ZX296702_VL1_CLK] =
- zx_gate("vl1_clk", "vl1_mux", VOU_LOCAL_CLKEN, 9);
- clk[ZX296702_VL2_CLK] =
- zx_gate("vl2_clk", "vl2_mux", VOU_LOCAL_CLKEN, 10);
- clk[ZX296702_GL0_CLK] =
- zx_gate("gl0_clk", "gl0_mux", VOU_LOCAL_CLKEN, 5);
- clk[ZX296702_GL1_CLK] =
- zx_gate("gl1_clk", "gl1_mux", VOU_LOCAL_CLKEN, 6);
- clk[ZX296702_GL2_CLK] =
- zx_gate("gl2_clk", "gl2_mux", VOU_LOCAL_CLKEN, 7);
- clk[ZX296702_WB_CLK] =
- zx_gate("wb_clk", "wb_mux", VOU_LOCAL_CLKEN, 11);
- clk[ZX296702_CL_CLK] =
- zx_gate("cl_clk", "vou_main_channel_div", VOU_LOCAL_CLKEN, 12);
- clk[ZX296702_MAIN_MIX_CLK] =
- zx_gate("main_mix_clk", "vou_main_channel_div",
- VOU_LOCAL_CLKEN, 4);
- clk[ZX296702_AUX_MIX_CLK] =
- zx_gate("aux_mix_clk", "vou_aux_channel_div",
- VOU_LOCAL_CLKEN, 3);
- clk[ZX296702_HDMI_CLK] =
- zx_gate("hdmi_clk", "hdmi_mux", VOU_LOCAL_CLKEN, 2);
- clk[ZX296702_VOU_TV_ENC_HD_DAC_CLK] =
- zx_gate("vou_tv_enc_hd_dac_clk", "vou_tv_enc_hd_div",
- VOU_LOCAL_CLKEN, 1);
- clk[ZX296702_VOU_TV_ENC_SD_DAC_CLK] =
- zx_gate("vou_tv_enc_sd_dac_clk", "vou_tv_enc_sd_div",
- VOU_LOCAL_CLKEN, 0);
-
- /* CA9 PERIPHCLK = a9_wclk / 2 */
- clk[ZX296702_A9_PERIPHCLK] =
- clk_register_fixed_factor(NULL, "a9_periphclk", "a9_wclk",
- 0, 1, 2);
-
- for (i = 0; i < ARRAY_SIZE(topclk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- topclk_data.clks = topclk;
- topclk_data.clk_num = ARRAY_SIZE(topclk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &topclk_data);
-}
-CLK_OF_DECLARE(zx296702_top_clk, "zte,zx296702-topcrm-clk",
- zx296702_top_clocks_init);
-
-static void __init zx296702_lsp0_clocks_init(struct device_node *np)
-{
- struct clk **clk = lsp0clk;
- int i;
-
- lsp0crpm_base = of_iomap(np, 0);
- WARN_ON(!lsp0crpm_base);
-
- /* SDMMC1 */
- clk[ZX296702_SDMMC1_WCLK_MUX] =
- zx_mux("sdmmc1_wclk_mux", sdmmc1_wclk_sel,
- ARRAY_SIZE(sdmmc1_wclk_sel), CLK_SDMMC1, 4, 1);
- clk[ZX296702_SDMMC1_WCLK_DIV] =
- zx_div("sdmmc1_wclk_div", "sdmmc1_wclk_mux", CLK_SDMMC1, 12, 4);
- clk[ZX296702_SDMMC1_WCLK] =
- zx_gate("sdmmc1_wclk", "sdmmc1_wclk_div", CLK_SDMMC1, 1);
- clk[ZX296702_SDMMC1_PCLK] =
- zx_gate("sdmmc1_pclk", "lsp0_apb_pclk", CLK_SDMMC1, 0);
-
- clk[ZX296702_GPIO_CLK] =
- zx_gate("gpio_clk", "lsp0_apb_pclk", CLK_GPIO, 0);
-
- /* SPDIF */
- clk[ZX296702_SPDIF0_WCLK_MUX] =
- zx_mux("spdif0_wclk_mux", spdif0_wclk_sel,
- ARRAY_SIZE(spdif0_wclk_sel), CLK_SPDIF0, 4, 1);
- clk[ZX296702_SPDIF0_WCLK] =
- zx_gate("spdif0_wclk", "spdif0_wclk_mux", CLK_SPDIF0, 1);
- clk[ZX296702_SPDIF0_PCLK] =
- zx_gate("spdif0_pclk", "lsp0_apb_pclk", CLK_SPDIF0, 0);
-
- clk[ZX296702_SPDIF0_DIV] =
- clk_register_zx_audio("spdif0_div", "spdif0_wclk", 0,
- SPDIF0_DIV);
-
- /* I2S */
- clk[ZX296702_I2S0_WCLK_MUX] =
- zx_mux("i2s0_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S0, 4, 1);
- clk[ZX296702_I2S0_WCLK] =
- zx_gate("i2s0_wclk", "i2s0_wclk_mux", CLK_I2S0, 1);
- clk[ZX296702_I2S0_PCLK] =
- zx_gate("i2s0_pclk", "lsp0_apb_pclk", CLK_I2S0, 0);
-
- clk[ZX296702_I2S0_DIV] =
- clk_register_zx_audio("i2s0_div", "i2s0_wclk", 0, I2S0_DIV);
-
- clk[ZX296702_I2S1_WCLK_MUX] =
- zx_mux("i2s1_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S1, 4, 1);
- clk[ZX296702_I2S1_WCLK] =
- zx_gate("i2s1_wclk", "i2s1_wclk_mux", CLK_I2S1, 1);
- clk[ZX296702_I2S1_PCLK] =
- zx_gate("i2s1_pclk", "lsp0_apb_pclk", CLK_I2S1, 0);
-
- clk[ZX296702_I2S1_DIV] =
- clk_register_zx_audio("i2s1_div", "i2s1_wclk", 0, I2S1_DIV);
-
- clk[ZX296702_I2S2_WCLK_MUX] =
- zx_mux("i2s2_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S2, 4, 1);
- clk[ZX296702_I2S2_WCLK] =
- zx_gate("i2s2_wclk", "i2s2_wclk_mux", CLK_I2S2, 1);
- clk[ZX296702_I2S2_PCLK] =
- zx_gate("i2s2_pclk", "lsp0_apb_pclk", CLK_I2S2, 0);
-
- clk[ZX296702_I2S2_DIV] =
- clk_register_zx_audio("i2s2_div", "i2s2_wclk", 0, I2S2_DIV);
-
- for (i = 0; i < ARRAY_SIZE(lsp0clk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- lsp0clk_data.clks = lsp0clk;
- lsp0clk_data.clk_num = ARRAY_SIZE(lsp0clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &lsp0clk_data);
-}
-CLK_OF_DECLARE(zx296702_lsp0_clk, "zte,zx296702-lsp0crpm-clk",
- zx296702_lsp0_clocks_init);
-
-static void __init zx296702_lsp1_clocks_init(struct device_node *np)
-{
- struct clk **clk = lsp1clk;
- int i;
-
- lsp1crpm_base = of_iomap(np, 0);
- WARN_ON(!lsp1crpm_base);
-
- /* UART0 */
- clk[ZX296702_UART0_WCLK_MUX] =
- zx_mux("uart0_wclk_mux", uart_wclk_sel,
- ARRAY_SIZE(uart_wclk_sel), CLK_UART0, 4, 1);
- /* FIXME: uart wclk enable bit is bit1 in. We hack it as reserved 31 for
- * UART does not work after parent clk is disabled/enabled */
- clk[ZX296702_UART0_WCLK] =
- zx_gate("uart0_wclk", "uart0_wclk_mux", CLK_UART0, 31);
- clk[ZX296702_UART0_PCLK] =
- zx_gate("uart0_pclk", "lsp1_apb_pclk", CLK_UART0, 0);
-
- /* UART1 */
- clk[ZX296702_UART1_WCLK_MUX] =
- zx_mux("uart1_wclk_mux", uart_wclk_sel,
- ARRAY_SIZE(uart_wclk_sel), CLK_UART1, 4, 1);
- clk[ZX296702_UART1_WCLK] =
- zx_gate("uart1_wclk", "uart1_wclk_mux", CLK_UART1, 1);
- clk[ZX296702_UART1_PCLK] =
- zx_gate("uart1_pclk", "lsp1_apb_pclk", CLK_UART1, 0);
-
- /* SDMMC0 */
- clk[ZX296702_SDMMC0_WCLK_MUX] =
- zx_mux("sdmmc0_wclk_mux", sdmmc0_wclk_sel,
- ARRAY_SIZE(sdmmc0_wclk_sel), CLK_SDMMC0, 4, 1);
- clk[ZX296702_SDMMC0_WCLK_DIV] =
- zx_div("sdmmc0_wclk_div", "sdmmc0_wclk_mux", CLK_SDMMC0, 12, 4);
- clk[ZX296702_SDMMC0_WCLK] =
- zx_gate("sdmmc0_wclk", "sdmmc0_wclk_div", CLK_SDMMC0, 1);
- clk[ZX296702_SDMMC0_PCLK] =
- zx_gate("sdmmc0_pclk", "lsp1_apb_pclk", CLK_SDMMC0, 0);
-
- clk[ZX296702_SPDIF1_WCLK_MUX] =
- zx_mux("spdif1_wclk_mux", spdif1_wclk_sel,
- ARRAY_SIZE(spdif1_wclk_sel), CLK_SPDIF1, 4, 1);
- clk[ZX296702_SPDIF1_WCLK] =
- zx_gate("spdif1_wclk", "spdif1_wclk_mux", CLK_SPDIF1, 1);
- clk[ZX296702_SPDIF1_PCLK] =
- zx_gate("spdif1_pclk", "lsp1_apb_pclk", CLK_SPDIF1, 0);
-
- clk[ZX296702_SPDIF1_DIV] =
- clk_register_zx_audio("spdif1_div", "spdif1_wclk", 0,
- SPDIF1_DIV);
-
- for (i = 0; i < ARRAY_SIZE(lsp1clk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- lsp1clk_data.clks = lsp1clk;
- lsp1clk_data.clk_num = ARRAY_SIZE(lsp1clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &lsp1clk_data);
-}
-CLK_OF_DECLARE(zx296702_lsp1_clk, "zte,zx296702-lsp1crpm-clk",
- zx296702_lsp1_clocks_init);
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
deleted file mode 100644
index dd7045bc48c1..000000000000
--- a/drivers/clk/zte/clk-zx296718.c
+++ /dev/null
@@ -1,1074 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 - 2016 ZTE Corporation.
- * Copyright (C) 2016 Linaro Ltd.
- */
-#include <linux/clk-provider.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/clock/zx296718-clock.h>
-#include "clk.h"
-
-/* TOP CRM */
-#define TOP_CLK_MUX0 0x04
-#define TOP_CLK_MUX1 0x08
-#define TOP_CLK_MUX2 0x0c
-#define TOP_CLK_MUX3 0x10
-#define TOP_CLK_MUX4 0x14
-#define TOP_CLK_MUX5 0x18
-#define TOP_CLK_MUX6 0x1c
-#define TOP_CLK_MUX7 0x20
-#define TOP_CLK_MUX9 0x28
-
-
-#define TOP_CLK_GATE0 0x34
-#define TOP_CLK_GATE1 0x38
-#define TOP_CLK_GATE2 0x3c
-#define TOP_CLK_GATE3 0x40
-#define TOP_CLK_GATE4 0x44
-#define TOP_CLK_GATE5 0x48
-#define TOP_CLK_GATE6 0x4c
-
-#define TOP_CLK_DIV0 0x58
-
-#define PLL_CPU_REG 0x80
-#define PLL_VGA_REG 0xb0
-#define PLL_DDR_REG 0xa0
-
-/* LSP0 CRM */
-#define LSP0_TIMER3_CLK 0x4
-#define LSP0_TIMER4_CLK 0x8
-#define LSP0_TIMER5_CLK 0xc
-#define LSP0_UART3_CLK 0x10
-#define LSP0_UART1_CLK 0x14
-#define LSP0_UART2_CLK 0x18
-#define LSP0_SPIFC0_CLK 0x1c
-#define LSP0_I2C4_CLK 0x20
-#define LSP0_I2C5_CLK 0x24
-#define LSP0_SSP0_CLK 0x28
-#define LSP0_SSP1_CLK 0x2c
-#define LSP0_USIM0_CLK 0x30
-#define LSP0_GPIO_CLK 0x34
-#define LSP0_I2C3_CLK 0x38
-
-/* LSP1 CRM */
-#define LSP1_UART4_CLK 0x08
-#define LSP1_UART5_CLK 0x0c
-#define LSP1_PWM_CLK 0x10
-#define LSP1_I2C2_CLK 0x14
-#define LSP1_SSP2_CLK 0x1c
-#define LSP1_SSP3_CLK 0x20
-#define LSP1_SSP4_CLK 0x24
-#define LSP1_USIM1_CLK 0x28
-
-/* audio lsp */
-#define AUDIO_I2S0_DIV_CFG1 0x10
-#define AUDIO_I2S0_DIV_CFG2 0x14
-#define AUDIO_I2S0_CLK 0x18
-#define AUDIO_I2S1_DIV_CFG1 0x20
-#define AUDIO_I2S1_DIV_CFG2 0x24
-#define AUDIO_I2S1_CLK 0x28
-#define AUDIO_I2S2_DIV_CFG1 0x30
-#define AUDIO_I2S2_DIV_CFG2 0x34
-#define AUDIO_I2S2_CLK 0x38
-#define AUDIO_I2S3_DIV_CFG1 0x40
-#define AUDIO_I2S3_DIV_CFG2 0x44
-#define AUDIO_I2S3_CLK 0x48
-#define AUDIO_I2C0_CLK 0x50
-#define AUDIO_SPDIF0_DIV_CFG1 0x60
-#define AUDIO_SPDIF0_DIV_CFG2 0x64
-#define AUDIO_SPDIF0_CLK 0x68
-#define AUDIO_SPDIF1_DIV_CFG1 0x70
-#define AUDIO_SPDIF1_DIV_CFG2 0x74
-#define AUDIO_SPDIF1_CLK 0x78
-#define AUDIO_TIMER_CLK 0x80
-#define AUDIO_TDM_CLK 0x90
-#define AUDIO_TS_CLK 0xa0
-
-static DEFINE_SPINLOCK(clk_lock);
-
-static const struct zx_pll_config pll_cpu_table[] = {
- PLL_RATE(1312000000, 0x00103621, 0x04aaaaaa),
- PLL_RATE(1407000000, 0x00103a21, 0x04aaaaaa),
- PLL_RATE(1503000000, 0x00103e21, 0x04aaaaaa),
- PLL_RATE(1600000000, 0x00104221, 0x04aaaaaa),
-};
-
-static const struct zx_pll_config pll_vga_table[] = {
- PLL_RATE(36000000, 0x00102464, 0x04000000), /* 800x600@56 */
- PLL_RATE(40000000, 0x00102864, 0x04000000), /* 800x600@60 */
- PLL_RATE(49500000, 0x00103164, 0x04800000), /* 800x600@75 */
- PLL_RATE(50000000, 0x00103264, 0x04000000), /* 800x600@72 */
- PLL_RATE(56250000, 0x00103864, 0x04400000), /* 800x600@85 */
- PLL_RATE(65000000, 0x00104164, 0x04000000), /* 1024x768@60 */
- PLL_RATE(74375000, 0x00104a64, 0x04600000), /* 1280x720@60 */
- PLL_RATE(75000000, 0x00104b64, 0x04800000), /* 1024x768@70 */
- PLL_RATE(78750000, 0x00104e64, 0x04c00000), /* 1024x768@75 */
- PLL_RATE(85500000, 0x00105564, 0x04800000), /* 1360x768@60 */
- PLL_RATE(106500000, 0x00106a64, 0x04800000), /* 1440x900@60 */
- PLL_RATE(108000000, 0x00106c64, 0x04000000), /* 1280x1024@60 */
- PLL_RATE(110000000, 0x00106e64, 0x04000000), /* 1024x768@85 */
- PLL_RATE(135000000, 0x00105a44, 0x04000000), /* 1280x1024@75 */
- PLL_RATE(136750000, 0x00104462, 0x04600000), /* 1440x900@75 */
- PLL_RATE(148500000, 0x00104a62, 0x04400000), /* 1920x1080@60 */
- PLL_RATE(157000000, 0x00104e62, 0x04800000), /* 1440x900@85 */
- PLL_RATE(157500000, 0x00104e62, 0x04c00000), /* 1280x1024@85 */
- PLL_RATE(162000000, 0x00105162, 0x04000000), /* 1600x1200@60 */
- PLL_RATE(193250000, 0x00106062, 0x04a00000), /* 1920x1200@60 */
-};
-
-PNAME(osc) = {
- "osc24m",
- "osc32k",
-};
-
-PNAME(dbg_wclk_p) = {
- "clk334m",
- "clk466m",
- "clk396m",
- "clk250m",
-};
-
-PNAME(a72_coreclk_p) = {
- "osc24m",
- "pll_mm0_1188m",
- "pll_mm1_1296m",
- "clk1000m",
- "clk648m",
- "clk1600m",
- "pll_audio_1800m",
- "pll_vga_1800m",
-};
-
-PNAME(cpu_periclk_p) = {
- "osc24m",
- "clk500m",
- "clk594m",
- "clk466m",
- "clk294m",
- "clk334m",
- "clk250m",
- "clk125m",
-};
-
-PNAME(a53_coreclk_p) = {
- "osc24m",
- "clk1000m",
- "pll_mm0_1188m",
- "clk648m",
- "clk500m",
- "clk800m",
- "clk1600m",
- "pll_audio_1800m",
-};
-
-PNAME(sec_wclk_p) = {
- "osc24m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk198m",
- "clk148m5",
- "clk99m",
-};
-
-PNAME(sd_nand_wclk_p) = {
- "osc24m",
- "clk49m5",
- "clk99m",
- "clk198m",
- "clk167m",
- "clk148m5",
- "clk125m",
- "clk216m",
-};
-
-PNAME(emmc_wclk_p) = {
- "osc24m",
- "clk198m",
- "clk99m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk148m5",
-};
-
-PNAME(clk32_p) = {
- "osc32k",
- "clk32k768",
-};
-
-PNAME(usb_ref24m_p) = {
- "osc32k",
- "clk32k768",
-};
-
-PNAME(sys_noc_alck_p) = {
- "osc24m",
- "clk250m",
- "clk198m",
- "clk148m5",
- "clk108m",
- "clk54m",
- "clk216m",
- "clk240m",
-};
-
-PNAME(vde_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk480m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(vce_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(hde_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(gpu_aclk_p) = {
- "clk334m",
- "clk648m",
- "clk594m",
- "clk500m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(sappu_aclk_p) = {
- "clk396m",
- "clk500m",
- "clk250m",
- "clk148m5",
-};
-
-PNAME(sappu_wclk_p) = {
- "clk198m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk148m5",
- "clk125m",
- "clk99m",
-};
-
-PNAME(vou_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(vou_main_wclk_p) = {
- "clk108m",
- "clk594m",
- "clk297m",
- "clk148m5",
- "clk74m25",
- "clk54m",
- "clk27m",
- "clk_vga",
-};
-
-PNAME(vou_aux_wclk_p) = {
- "clk108m",
- "clk148m5",
- "clk74m25",
- "clk54m",
- "clk27m",
- "clk_vga",
- "clk54m_mm0",
- "clk"
-};
-
-PNAME(vou_ppu_wclk_p) = {
- "clk334m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
- "clk99m",
-};
-
-PNAME(vga_i2c_wclk_p) = {
- "osc24m",
- "clk99m",
-};
-
-PNAME(viu_m0_aclk_p) = {
- "clk334m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
- "osc24m",
-};
-
-PNAME(viu_m1_aclk_p) = {
- "clk198m",
- "clk250m",
- "clk297m",
- "clk125m",
- "clk396m",
- "clk334m",
- "clk148m5",
- "osc24m",
-};
-
-PNAME(viu_clk_p) = {
- "clk198m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk396m",
- "clk125m",
- "clk99m",
- "clk148m5",
-};
-
-PNAME(viu_jpeg_clk_p) = {
- "clk334m",
- "clk480m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
-};
-
-PNAME(ts_sys_clk_p) = {
- "clk192m",
- "clk167m",
- "clk125m",
- "clk99m",
-};
-
-PNAME(wdt_ares_p) = {
- "osc24m",
- "clk32k"
-};
-
-static struct clk_zx_pll zx296718_pll_clk[] = {
- ZX296718_PLL("pll_cpu", "osc24m", PLL_CPU_REG, pll_cpu_table),
- ZX296718_PLL("pll_vga", "osc24m", PLL_VGA_REG, pll_vga_table),
-};
-
-static struct zx_clk_fixed_factor top_ffactor_clk[] = {
- FFACTOR(0, "clk4m", "osc24m", 1, 6, 0),
- FFACTOR(0, "clk2m", "osc24m", 1, 12, 0),
- /* pll cpu */
- FFACTOR(0, "clk1600m", "pll_cpu", 1, 1, CLK_SET_RATE_PARENT),
- FFACTOR(0, "clk800m", "pll_cpu", 1, 2, CLK_SET_RATE_PARENT),
- /* pll mac */
- FFACTOR(0, "clk25m", "pll_mac", 1, 40, 0),
- FFACTOR(0, "clk125m", "pll_mac", 1, 8, 0),
- FFACTOR(0, "clk250m", "pll_mac", 1, 4, 0),
- FFACTOR(0, "clk50m", "pll_mac", 1, 20, 0),
- FFACTOR(0, "clk500m", "pll_mac", 1, 2, 0),
- FFACTOR(0, "clk1000m", "pll_mac", 1, 1, 0),
- FFACTOR(0, "clk334m", "pll_mac", 1, 3, 0),
- FFACTOR(0, "clk167m", "pll_mac", 1, 6, 0),
- /* pll mm */
- FFACTOR(0, "clk54m_mm0", "pll_mm0", 1, 22, 0),
- FFACTOR(0, "clk74m25", "pll_mm0", 1, 16, 0),
- FFACTOR(0, "clk148m5", "pll_mm0", 1, 8, 0),
- FFACTOR(0, "clk297m", "pll_mm0", 1, 4, 0),
- FFACTOR(0, "clk594m", "pll_mm0", 1, 2, 0),
- FFACTOR(0, "pll_mm0_1188m", "pll_mm0", 1, 1, 0),
- FFACTOR(0, "clk396m", "pll_mm0", 1, 3, 0),
- FFACTOR(0, "clk198m", "pll_mm0", 1, 6, 0),
- FFACTOR(0, "clk99m", "pll_mm0", 1, 12, 0),
- FFACTOR(0, "clk49m5", "pll_mm0", 1, 24, 0),
- /* pll mm */
- FFACTOR(0, "clk324m", "pll_mm1", 1, 4, 0),
- FFACTOR(0, "clk648m", "pll_mm1", 1, 2, 0),
- FFACTOR(0, "pll_mm1_1296m", "pll_mm1", 1, 1, 0),
- FFACTOR(0, "clk216m", "pll_mm1", 1, 6, 0),
- FFACTOR(0, "clk432m", "pll_mm1", 1, 3, 0),
- FFACTOR(0, "clk108m", "pll_mm1", 1, 12, 0),
- FFACTOR(0, "clk72m", "pll_mm1", 1, 18, 0),
- FFACTOR(0, "clk27m", "pll_mm1", 1, 48, 0),
- FFACTOR(0, "clk54m", "pll_mm1", 1, 24, 0),
- /* vga */
- FFACTOR(0, "pll_vga_1800m", "pll_vga", 1, 1, 0),
- FFACTOR(0, "clk_vga", "pll_vga", 1, 1, CLK_SET_RATE_PARENT),
- /* pll ddr */
- FFACTOR(0, "clk466m", "pll_ddr", 1, 2, 0),
-
- /* pll audio */
- FFACTOR(0, "pll_audio_1800m", "pll_audio", 1, 1, 0),
- FFACTOR(0, "clk32k768", "pll_audio", 1, 27000, 0),
- FFACTOR(0, "clk16m384", "pll_audio", 1, 54, 0),
- FFACTOR(0, "clk294m", "pll_audio", 1, 3, 0),
-
- /* pll hsic*/
- FFACTOR(0, "clk240m", "pll_hsic", 1, 4, 0),
- FFACTOR(0, "clk480m", "pll_hsic", 1, 2, 0),
- FFACTOR(0, "clk192m", "pll_hsic", 1, 5, 0),
- FFACTOR(0, "clk_pll_24m", "pll_hsic", 1, 40, 0),
- FFACTOR(0, "emmc_mux_div2", "emmc_mux", 1, 2, CLK_SET_RATE_PARENT),
-};
-
-static const struct clk_div_table noc_div_table[] = {
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
-};
-static struct zx_clk_div top_div_clk[] = {
- DIV_T(0, "sys_noc_hclk", "sys_noc_aclk", TOP_CLK_DIV0, 0, 2, 0, noc_div_table),
- DIV_T(0, "sys_noc_pclk", "sys_noc_aclk", TOP_CLK_DIV0, 4, 2, 0, noc_div_table),
-};
-
-static struct zx_clk_mux top_mux_clk[] = {
- MUX(0, "dbg_mux", dbg_wclk_p, TOP_CLK_MUX0, 12, 2),
- MUX(0, "a72_mux", a72_coreclk_p, TOP_CLK_MUX0, 8, 3),
- MUX(0, "cpu_peri_mux", cpu_periclk_p, TOP_CLK_MUX0, 4, 3),
- MUX_F(0, "a53_mux", a53_coreclk_p, TOP_CLK_MUX0, 0, 3, CLK_SET_RATE_PARENT, 0),
- MUX(0, "sys_noc_aclk", sys_noc_alck_p, TOP_CLK_MUX1, 0, 3),
- MUX(0, "sec_mux", sec_wclk_p, TOP_CLK_MUX2, 16, 3),
- MUX(0, "sd1_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 12, 3),
- MUX(0, "sd0_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 8, 3),
- MUX(0, "emmc_mux", emmc_wclk_p, TOP_CLK_MUX2, 4, 3),
- MUX(0, "nand_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 0, 3),
- MUX(0, "usb_ref24m_mux", usb_ref24m_p, TOP_CLK_MUX9, 16, 1),
- MUX(0, "clk32k", clk32_p, TOP_CLK_MUX9, 12, 1),
- MUX_F(0, "wdt_mux", wdt_ares_p, TOP_CLK_MUX9, 8, 1, CLK_SET_RATE_PARENT, 0),
- MUX(0, "timer_mux", osc, TOP_CLK_MUX9, 4, 1),
- MUX(0, "vde_mux", vde_aclk_p, TOP_CLK_MUX4, 0, 3),
- MUX(0, "vce_mux", vce_aclk_p, TOP_CLK_MUX4, 4, 3),
- MUX(0, "hde_mux", hde_aclk_p, TOP_CLK_MUX4, 8, 3),
- MUX(0, "gpu_mux", gpu_aclk_p, TOP_CLK_MUX5, 0, 3),
- MUX(0, "sappu_a_mux", sappu_aclk_p, TOP_CLK_MUX5, 4, 2),
- MUX(0, "sappu_w_mux", sappu_wclk_p, TOP_CLK_MUX5, 8, 3),
- MUX(0, "vou_a_mux", vou_aclk_p, TOP_CLK_MUX7, 0, 3),
- MUX_F(0, "vou_main_w_mux", vou_main_wclk_p, TOP_CLK_MUX7, 4, 3, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "vou_aux_w_mux", vou_aux_wclk_p, TOP_CLK_MUX7, 8, 3, CLK_SET_RATE_PARENT, 0),
- MUX(0, "vou_ppu_w_mux", vou_ppu_wclk_p, TOP_CLK_MUX7, 12, 3),
- MUX(0, "vga_i2c_mux", vga_i2c_wclk_p, TOP_CLK_MUX7, 16, 1),
- MUX(0, "viu_m0_a_mux", viu_m0_aclk_p, TOP_CLK_MUX6, 0, 3),
- MUX(0, "viu_m1_a_mux", viu_m1_aclk_p, TOP_CLK_MUX6, 4, 3),
- MUX(0, "viu_w_mux", viu_clk_p, TOP_CLK_MUX6, 8, 3),
- MUX(0, "viu_jpeg_w_mux", viu_jpeg_clk_p, TOP_CLK_MUX6, 12, 3),
- MUX(0, "ts_sys_mux", ts_sys_clk_p, TOP_CLK_MUX6, 16, 2),
-};
-
-static struct zx_clk_gate top_gate_clk[] = {
- GATE(CPU_DBG_GATE, "dbg_wclk", "dbg_mux", TOP_CLK_GATE0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(A72_GATE, "a72_coreclk", "a72_mux", TOP_CLK_GATE0, 3, CLK_SET_RATE_PARENT, 0),
- GATE(CPU_PERI_GATE, "cpu_peri", "cpu_peri_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
- GATE(A53_GATE, "a53_coreclk", "a53_mux", TOP_CLK_GATE0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(SD1_WCLK, "sd1_wclk", "sd1_mux", TOP_CLK_GATE1, 13, CLK_SET_RATE_PARENT, 0),
- GATE(SD0_WCLK, "sd0_wclk", "sd0_mux", TOP_CLK_GATE1, 9, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_WCLK, "emmc_wclk", "emmc_mux_div2", TOP_CLK_GATE0, 5, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_NAND_AXI, "emmc_nand_aclk", "sys_noc_aclk", TOP_CLK_GATE1, 4, CLK_SET_RATE_PARENT, 0),
- GATE(NAND_WCLK, "nand_wclk", "nand_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_NAND_AHB, "emmc_nand_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(0, "lsp1_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 31, 0, 0),
- GATE(LSP1_148M5, "lsp1_148m5", "clk148m5", TOP_CLK_GATE2, 30, 0, 0),
- GATE(LSP1_99M, "lsp1_99m", "clk99m", TOP_CLK_GATE2, 29, 0, 0),
- GATE(LSP1_24M, "lsp1_24m", "osc24m", TOP_CLK_GATE2, 28, 0, 0),
- GATE(LSP0_74M25, "lsp0_74m25", "clk74m25", TOP_CLK_GATE2, 25, 0, 0),
- GATE(0, "lsp0_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 24, 0, 0),
- GATE(LSP0_32K, "lsp0_32k", "osc32k", TOP_CLK_GATE2, 23, 0, 0),
- GATE(LSP0_148M5, "lsp0_148m5", "clk148m5", TOP_CLK_GATE2, 22, 0, 0),
- GATE(LSP0_99M, "lsp0_99m", "clk99m", TOP_CLK_GATE2, 21, 0, 0),
- GATE(LSP0_24M, "lsp0_24m", "osc24m", TOP_CLK_GATE2, 20, 0, 0),
- GATE(AUDIO_99M, "audio_99m", "clk99m", TOP_CLK_GATE5, 27, 0, 0),
- GATE(AUDIO_24M, "audio_24m", "osc24m", TOP_CLK_GATE5, 28, 0, 0),
- GATE(AUDIO_16M384, "audio_16m384", "clk16m384", TOP_CLK_GATE5, 29, 0, 0),
- GATE(AUDIO_32K, "audio_32k", "clk32k", TOP_CLK_GATE5, 30, 0, 0),
- GATE(WDT_WCLK, "wdt_wclk", "wdt_mux", TOP_CLK_GATE6, 9, CLK_SET_RATE_PARENT, 0),
- GATE(TIMER_WCLK, "timer_wclk", "timer_mux", TOP_CLK_GATE6, 5, CLK_SET_RATE_PARENT, 0),
- GATE(VDE_ACLK, "vde_aclk", "vde_mux", TOP_CLK_GATE3, 0, CLK_SET_RATE_PARENT, 0),
- GATE(VCE_ACLK, "vce_aclk", "vce_mux", TOP_CLK_GATE3, 4, CLK_SET_RATE_PARENT, 0),
- GATE(HDE_ACLK, "hde_aclk", "hde_mux", TOP_CLK_GATE3, 8, CLK_SET_RATE_PARENT, 0),
- GATE(GPU_ACLK, "gpu_aclk", "gpu_mux", TOP_CLK_GATE3, 16, CLK_SET_RATE_PARENT, 0),
- GATE(SAPPU_ACLK, "sappu_aclk", "sappu_a_mux", TOP_CLK_GATE3, 20, CLK_SET_RATE_PARENT, 0),
- GATE(SAPPU_WCLK, "sappu_wclk", "sappu_w_mux", TOP_CLK_GATE3, 22, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_ACLK, "vou_aclk", "vou_a_mux", TOP_CLK_GATE4, 16, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_MAIN_WCLK, "vou_main_wclk", "vou_main_w_mux", TOP_CLK_GATE4, 18, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_AUX_WCLK, "vou_aux_wclk", "vou_aux_w_mux", TOP_CLK_GATE4, 19, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_PPU_WCLK, "vou_ppu_wclk", "vou_ppu_w_mux", TOP_CLK_GATE4, 20, CLK_SET_RATE_PARENT, 0),
- GATE(MIPI_CFG_CLK, "mipi_cfg_clk", "osc24m", TOP_CLK_GATE4, 21, 0, 0),
- GATE(VGA_I2C_WCLK, "vga_i2c_wclk", "vga_i2c_mux", TOP_CLK_GATE4, 23, CLK_SET_RATE_PARENT, 0),
- GATE(MIPI_REF_CLK, "mipi_ref_clk", "clk27m", TOP_CLK_GATE4, 24, 0, 0),
- GATE(HDMI_OSC_CEC, "hdmi_osc_cec", "clk2m", TOP_CLK_GATE4, 22, 0, 0),
- GATE(HDMI_OSC_CLK, "hdmi_osc_clk", "clk240m", TOP_CLK_GATE4, 25, 0, 0),
- GATE(HDMI_XCLK, "hdmi_xclk", "osc24m", TOP_CLK_GATE4, 26, 0, 0),
- GATE(VIU_M0_ACLK, "viu_m0_aclk", "viu_m0_a_mux", TOP_CLK_GATE4, 0, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_M1_ACLK, "viu_m1_aclk", "viu_m1_a_mux", TOP_CLK_GATE4, 1, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_WCLK, "viu_wclk", "viu_w_mux", TOP_CLK_GATE4, 2, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_JPEG_WCLK, "viu_jpeg_wclk", "viu_jpeg_w_mux", TOP_CLK_GATE4, 3, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_CFG_CLK, "viu_cfg_clk", "osc24m", TOP_CLK_GATE4, 6, 0, 0),
- GATE(TS_SYS_WCLK, "ts_sys_wclk", "ts_sys_mux", TOP_CLK_GATE5, 2, CLK_SET_RATE_PARENT, 0),
- GATE(TS_SYS_108M, "ts_sys_108m", "clk108m", TOP_CLK_GATE5, 3, 0, 0),
- GATE(USB20_HCLK, "usb20_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 12, 0, 0),
- GATE(USB20_PHY_CLK, "usb20_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 13, 0, 0),
- GATE(USB21_HCLK, "usb21_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 14, 0, 0),
- GATE(USB21_PHY_CLK, "usb21_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 15, 0, 0),
- GATE(GMAC_RMIICLK, "gmac_rmii_clk", "clk50m", TOP_CLK_GATE2, 3, 0, 0),
- GATE(GMAC_PCLK, "gmac_pclk", "clk198m", TOP_CLK_GATE2, 1, 0, 0),
- GATE(GMAC_ACLK, "gmac_aclk", "clk49m5", TOP_CLK_GATE2, 0, 0, 0),
- GATE(GMAC_RFCLK, "gmac_refclk", "clk25m", TOP_CLK_GATE2, 4, 0, 0),
- GATE(SD1_AHB, "sd1_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 12, 0, 0),
- GATE(SD0_AHB, "sd0_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 8, 0, 0),
- GATE(TEMPSENSOR_GATE, "tempsensor_gate", "clk4m", TOP_CLK_GATE5, 31, 0, 0),
-};
-
-static struct clk_hw_onecell_data top_hw_onecell_data = {
- .num = TOP_NR_CLKS,
- .hws = {
- [TOP_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init top_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
- zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
- name = zx296718_pll_clk[i].hw.init->name;
- ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
- if (top_ffactor_clk[i].id)
- top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
- &top_ffactor_clk[i].factor.hw;
-
- name = top_ffactor_clk[i].factor.hw.init->name;
- ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
- if (top_mux_clk[i].id)
- top_hw_onecell_data.hws[top_mux_clk[i].id] =
- &top_mux_clk[i].mux.hw;
-
- top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = top_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
- if (top_gate_clk[i].id)
- top_hw_onecell_data.hws[top_gate_clk[i].id] =
- &top_gate_clk[i].gate.hw;
-
- top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = top_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
- if (top_div_clk[i].id)
- top_hw_onecell_data.hws[top_div_clk[i].id] =
- &top_div_clk[i].div.hw;
-
- top_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = top_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &top_hw_onecell_data);
- if (ret) {
- pr_err("failed to register top clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct clk_div_table common_even_div_table[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { .val = 5, .div = 6, },
- { .val = 7, .div = 8, },
- { .val = 9, .div = 10, },
- { .val = 11, .div = 12, },
- { .val = 13, .div = 14, },
- { .val = 15, .div = 16, },
-};
-
-static const struct clk_div_table common_div_table[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 2, .div = 3, },
- { .val = 3, .div = 4, },
- { .val = 4, .div = 5, },
- { .val = 5, .div = 6, },
- { .val = 6, .div = 7, },
- { .val = 7, .div = 8, },
- { .val = 8, .div = 9, },
- { .val = 9, .div = 10, },
- { .val = 10, .div = 11, },
- { .val = 11, .div = 12, },
- { .val = 12, .div = 13, },
- { .val = 13, .div = 14, },
- { .val = 14, .div = 15, },
- { .val = 15, .div = 16, },
-};
-
-PNAME(lsp0_wclk_common_p) = {
- "lsp0_24m",
- "lsp0_99m",
-};
-
-PNAME(lsp0_wclk_timer3_p) = {
- "timer3_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_timer4_p) = {
- "timer4_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_timer5_p) = {
- "timer5_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_spifc0_p) = {
- "lsp0_148m5",
- "lsp0_24m",
- "lsp0_99m",
- "lsp0_74m25"
-};
-
-PNAME(lsp0_wclk_ssp_p) = {
- "lsp0_148m5",
- "lsp0_99m",
- "lsp0_24m",
-};
-
-static struct zx_clk_mux lsp0_mux_clk[] = {
- MUX(0, "timer3_wclk_mux", lsp0_wclk_timer3_p, LSP0_TIMER3_CLK, 4, 1),
- MUX(0, "timer4_wclk_mux", lsp0_wclk_timer4_p, LSP0_TIMER4_CLK, 4, 1),
- MUX(0, "timer5_wclk_mux", lsp0_wclk_timer5_p, LSP0_TIMER5_CLK, 4, 1),
- MUX(0, "uart3_wclk_mux", lsp0_wclk_common_p, LSP0_UART3_CLK, 4, 1),
- MUX(0, "uart1_wclk_mux", lsp0_wclk_common_p, LSP0_UART1_CLK, 4, 1),
- MUX(0, "uart2_wclk_mux", lsp0_wclk_common_p, LSP0_UART2_CLK, 4, 1),
- MUX(0, "spifc0_wclk_mux", lsp0_wclk_spifc0_p, LSP0_SPIFC0_CLK, 4, 2),
- MUX(0, "i2c4_wclk_mux", lsp0_wclk_common_p, LSP0_I2C4_CLK, 4, 1),
- MUX(0, "i2c5_wclk_mux", lsp0_wclk_common_p, LSP0_I2C5_CLK, 4, 1),
- MUX(0, "ssp0_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP0_CLK, 4, 1),
- MUX(0, "ssp1_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP1_CLK, 4, 1),
- MUX(0, "i2c3_wclk_mux", lsp0_wclk_common_p, LSP0_I2C3_CLK, 4, 1),
-};
-
-static struct zx_clk_gate lsp0_gate_clk[] = {
- GATE(LSP0_TIMER3_WCLK, "timer3_wclk", "timer3_wclk_mux", LSP0_TIMER3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_TIMER4_WCLK, "timer4_wclk", "timer4_wclk_mux", LSP0_TIMER4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_TIMER5_WCLK, "timer5_wclk", "timer5_wclk_mux", LSP0_TIMER5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART3_WCLK, "uart3_wclk", "uart3_wclk_mux", LSP0_UART3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART1_WCLK, "uart1_wclk", "uart1_wclk_mux", LSP0_UART1_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART2_WCLK, "uart2_wclk", "uart2_wclk_mux", LSP0_UART2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SPIFC0_WCLK, "spifc0_wclk", "spifc0_wclk_mux", LSP0_SPIFC0_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C4_WCLK, "i2c4_wclk", "i2c4_wclk_mux", LSP0_I2C4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C5_WCLK, "i2c5_wclk", "i2c5_wclk_mux", LSP0_I2C5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SSP0_WCLK, "ssp0_wclk", "ssp0_div", LSP0_SSP0_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SSP1_WCLK, "ssp1_wclk", "ssp1_div", LSP0_SSP1_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C3_WCLK, "i2c3_wclk", "i2c3_wclk_mux", LSP0_I2C3_CLK, 1, CLK_SET_RATE_PARENT, 0),
-};
-
-static struct zx_clk_div lsp0_div_clk[] = {
- DIV_T(0, "timer3_div", "lsp0_24m", LSP0_TIMER3_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "timer4_div", "lsp0_24m", LSP0_TIMER4_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "timer5_div", "lsp0_24m", LSP0_TIMER5_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "ssp0_div", "ssp0_wclk_mux", LSP0_SSP0_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "ssp1_div", "ssp1_wclk_mux", LSP0_SSP1_CLK, 12, 4, 0, common_even_div_table),
-};
-
-static struct clk_hw_onecell_data lsp0_hw_onecell_data = {
- .num = LSP0_NR_CLKS,
- .hws = {
- [LSP0_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init lsp0_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_mux_clk); i++) {
- if (lsp0_mux_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_mux_clk[i].id] =
- &lsp0_mux_clk[i].mux.hw;
-
- lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = lsp0_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
- if (lsp0_gate_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_gate_clk[i].id] =
- &lsp0_gate_clk[i].gate.hw;
-
- lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = lsp0_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
- if (lsp0_div_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_div_clk[i].id] =
- &lsp0_div_clk[i].div.hw;
-
- lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = lsp0_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &lsp0_hw_onecell_data);
- if (ret) {
- pr_err("failed to register lsp0 clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-PNAME(lsp1_wclk_common_p) = {
- "lsp1_24m",
- "lsp1_99m",
-};
-
-PNAME(lsp1_wclk_ssp_p) = {
- "lsp1_148m5",
- "lsp1_99m",
- "lsp1_24m",
-};
-
-static struct zx_clk_mux lsp1_mux_clk[] = {
- MUX(0, "uart4_wclk_mux", lsp1_wclk_common_p, LSP1_UART4_CLK, 4, 1),
- MUX(0, "uart5_wclk_mux", lsp1_wclk_common_p, LSP1_UART5_CLK, 4, 1),
- MUX(0, "pwm_wclk_mux", lsp1_wclk_common_p, LSP1_PWM_CLK, 4, 1),
- MUX(0, "i2c2_wclk_mux", lsp1_wclk_common_p, LSP1_I2C2_CLK, 4, 1),
- MUX(0, "ssp2_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP2_CLK, 4, 2),
- MUX(0, "ssp3_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP3_CLK, 4, 2),
- MUX(0, "ssp4_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP4_CLK, 4, 2),
- MUX(0, "usim1_wclk_mux", lsp1_wclk_common_p, LSP1_USIM1_CLK, 4, 1),
-};
-
-static struct zx_clk_div lsp1_div_clk[] = {
- DIV_T(0, "pwm_div", "pwm_wclk_mux", LSP1_PWM_CLK, 12, 4, CLK_SET_RATE_PARENT, common_div_table),
- DIV_T(0, "ssp2_div", "ssp2_wclk_mux", LSP1_SSP2_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
- DIV_T(0, "ssp3_div", "ssp3_wclk_mux", LSP1_SSP3_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
- DIV_T(0, "ssp4_div", "ssp4_wclk_mux", LSP1_SSP4_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
-};
-
-static struct zx_clk_gate lsp1_gate_clk[] = {
- GATE(LSP1_UART4_WCLK, "lsp1_uart4_wclk", "uart4_wclk_mux", LSP1_UART4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_UART5_WCLK, "lsp1_uart5_wclk", "uart5_wclk_mux", LSP1_UART5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_PWM_WCLK, "lsp1_pwm_wclk", "pwm_div", LSP1_PWM_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_PWM_PCLK, "lsp1_pwm_pclk", "lsp1_pclk", LSP1_PWM_CLK, 0, 0, 0),
- GATE(LSP1_I2C2_WCLK, "lsp1_i2c2_wclk", "i2c2_wclk_mux", LSP1_I2C2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP2_WCLK, "lsp1_ssp2_wclk", "ssp2_div", LSP1_SSP2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP3_WCLK, "lsp1_ssp3_wclk", "ssp3_div", LSP1_SSP3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP4_WCLK, "lsp1_ssp4_wclk", "ssp4_div", LSP1_SSP4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_USIM1_WCLK, "lsp1_usim1_wclk", "usim1_wclk_mux", LSP1_USIM1_CLK, 1, CLK_SET_RATE_PARENT, 0),
-};
-
-static struct clk_hw_onecell_data lsp1_hw_onecell_data = {
- .num = LSP1_NR_CLKS,
- .hws = {
- [LSP1_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init lsp1_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_mux_clk); i++) {
- if (lsp1_mux_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_mux_clk[i].id] =
- &lsp0_mux_clk[i].mux.hw;
-
- lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = lsp1_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
- if (lsp1_gate_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_gate_clk[i].id] =
- &lsp1_gate_clk[i].gate.hw;
-
- lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = lsp1_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
- if (lsp1_div_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_div_clk[i].id] =
- &lsp1_div_clk[i].div.hw;
-
- lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = lsp1_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &lsp1_hw_onecell_data);
- if (ret) {
- pr_err("failed to register lsp1 clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-PNAME(audio_wclk_common_p) = {
- "audio_99m",
- "audio_24m",
-};
-
-PNAME(audio_timer_p) = {
- "audio_24m",
- "audio_32k",
-};
-
-static struct zx_clk_mux audio_mux_clk[] = {
- MUX(I2S0_WCLK_MUX, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1),
- MUX(I2S1_WCLK_MUX, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1),
- MUX(I2S2_WCLK_MUX, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1),
- MUX(I2S3_WCLK_MUX, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1),
- MUX(0, "i2c0_wclk_mux", audio_wclk_common_p, AUDIO_I2C0_CLK, 0, 1),
- MUX(0, "spdif0_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF0_CLK, 0, 1),
- MUX(0, "spdif1_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF1_CLK, 0, 1),
- MUX(0, "timer_wclk_mux", audio_timer_p, AUDIO_TIMER_CLK, 0, 1),
-};
-
-static struct clk_zx_audio_divider audio_adiv_clk[] = {
- AUDIO_DIV(0, "i2s0_wclk_div", "i2s0_wclk_mux", AUDIO_I2S0_DIV_CFG1),
- AUDIO_DIV(0, "i2s1_wclk_div", "i2s1_wclk_mux", AUDIO_I2S1_DIV_CFG1),
- AUDIO_DIV(0, "i2s2_wclk_div", "i2s2_wclk_mux", AUDIO_I2S2_DIV_CFG1),
- AUDIO_DIV(0, "i2s3_wclk_div", "i2s3_wclk_mux", AUDIO_I2S3_DIV_CFG1),
- AUDIO_DIV(0, "spdif0_wclk_div", "spdif0_wclk_mux", AUDIO_SPDIF0_DIV_CFG1),
- AUDIO_DIV(0, "spdif1_wclk_div", "spdif1_wclk_mux", AUDIO_SPDIF1_DIV_CFG1),
-};
-
-static struct zx_clk_div audio_div_clk[] = {
- DIV_T(0, "tdm_wclk_div", "audio_16m384", AUDIO_TDM_CLK, 8, 4, 0, common_div_table),
-};
-
-static struct zx_clk_gate audio_gate_clk[] = {
- GATE(AUDIO_I2S0_WCLK, "i2s0_wclk", "i2s0_wclk_div", AUDIO_I2S0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S1_WCLK, "i2s1_wclk", "i2s1_wclk_div", AUDIO_I2S1_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S2_WCLK, "i2s2_wclk", "i2s2_wclk_div", AUDIO_I2S2_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S3_WCLK, "i2s3_wclk", "i2s3_wclk_div", AUDIO_I2S3_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S0_PCLK, "i2s0_pclk", "clk49m5", AUDIO_I2S0_CLK, 8, 0, 0),
- GATE(AUDIO_I2S1_PCLK, "i2s1_pclk", "clk49m5", AUDIO_I2S1_CLK, 8, 0, 0),
- GATE(AUDIO_I2S2_PCLK, "i2s2_pclk", "clk49m5", AUDIO_I2S2_CLK, 8, 0, 0),
- GATE(AUDIO_I2S3_PCLK, "i2s3_pclk", "clk49m5", AUDIO_I2S3_CLK, 8, 0, 0),
- GATE(AUDIO_I2C0_WCLK, "i2c0_wclk", "i2c0_wclk_mux", AUDIO_I2C0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_SPDIF0_WCLK, "spdif0_wclk", "spdif0_wclk_div", AUDIO_SPDIF0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_SPDIF1_WCLK, "spdif1_wclk", "spdif1_wclk_div", AUDIO_SPDIF1_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_TDM_WCLK, "tdm_wclk", "tdm_wclk_div", AUDIO_TDM_CLK, 17, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_TS_PCLK, "tempsensor_pclk", "clk49m5", AUDIO_TS_CLK, 1, 0, 0),
-};
-
-static struct clk_hw_onecell_data audio_hw_onecell_data = {
- .num = AUDIO_NR_CLKS,
- .hws = {
- [AUDIO_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init audio_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map audio clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_mux_clk); i++) {
- if (audio_mux_clk[i].id)
- audio_hw_onecell_data.hws[audio_mux_clk[i].id] =
- &audio_mux_clk[i].mux.hw;
-
- audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = audio_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
- if (audio_adiv_clk[i].id)
- audio_hw_onecell_data.hws[audio_adiv_clk[i].id] =
- &audio_adiv_clk[i].hw;
-
- audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
- name = audio_adiv_clk[i].hw.init->name;
- ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
- if (audio_div_clk[i].id)
- audio_hw_onecell_data.hws[audio_div_clk[i].id] =
- &audio_div_clk[i].div.hw;
-
- audio_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = audio_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
- if (audio_gate_clk[i].id)
- audio_hw_onecell_data.hws[audio_gate_clk[i].id] =
- &audio_gate_clk[i].gate.hw;
-
- audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = audio_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &audio_hw_onecell_data);
- if (ret) {
- pr_err("failed to register audio clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id zx_clkc_match_table[] = {
- { .compatible = "zte,zx296718-topcrm", .data = &top_clocks_init },
- { .compatible = "zte,zx296718-lsp0crm", .data = &lsp0_clocks_init },
- { .compatible = "zte,zx296718-lsp1crm", .data = &lsp1_clocks_init },
- { .compatible = "zte,zx296718-audiocrm", .data = &audio_clocks_init },
- { }
-};
-
-static int zx_clkc_probe(struct platform_device *pdev)
-{
- int (*init_fn)(struct device_node *np);
- struct device_node *np = pdev->dev.of_node;
-
- init_fn = of_device_get_match_data(&pdev->dev);
- if (!init_fn) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
-
- return init_fn(np);
-}
-
-static struct platform_driver zx_clk_driver = {
- .probe = zx_clkc_probe,
- .driver = {
- .name = "zx296718-clkc",
- .of_match_table = zx_clkc_match_table,
- },
-};
-
-static int __init zx_clk_init(void)
-{
- return platform_driver_register(&zx_clk_driver);
-}
-core_initcall(zx_clk_init);
diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c
deleted file mode 100644
index 8bda6d41ad3a..000000000000
--- a/drivers/clk/zte/clk.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/gcd.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <asm/div64.h>
-
-#include "clk.h"
-
-#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw)
-#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw)
-
-#define CFG0_CFG1_OFFSET 4
-#define LOCK_FLAG 30
-#define POWER_DOWN 31
-
-static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate)
-{
- const struct zx_pll_config *config = zx_pll->lookup_table;
- int i;
-
- for (i = 0; i < zx_pll->count; i++) {
- if (config[i].rate > rate)
- return i > 0 ? i - 1 : 0;
-
- if (config[i].rate == rate)
- return i;
- }
-
- return i - 1;
-}
-
-static int hw_to_idx(struct clk_zx_pll *zx_pll)
-{
- const struct zx_pll_config *config = zx_pll->lookup_table;
- u32 hw_cfg0, hw_cfg1;
- int i;
-
- hw_cfg0 = readl_relaxed(zx_pll->reg_base);
- hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
- /* For matching the value in lookup table */
- hw_cfg0 &= ~BIT(zx_pll->lock_bit);
-
- /* Check availability of pd_bit */
- if (zx_pll->pd_bit < 32)
- hw_cfg0 |= BIT(zx_pll->pd_bit);
-
- for (i = 0; i < zx_pll->count; i++) {
- if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1)
- return i;
- }
-
- return -EINVAL;
-}
-
-static unsigned long zx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- int idx;
-
- idx = hw_to_idx(zx_pll);
- if (unlikely(idx == -EINVAL))
- return 0;
-
- return zx_pll->lookup_table[idx].rate;
-}
-
-static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- int idx;
-
- idx = rate_to_idx(zx_pll, rate);
-
- return zx_pll->lookup_table[idx].rate;
-}
-
-static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- /* Assume current cpu is not running on current PLL */
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- const struct zx_pll_config *config;
- int idx;
-
- idx = rate_to_idx(zx_pll, rate);
- config = &zx_pll->lookup_table[idx];
-
- writel_relaxed(config->cfg0, zx_pll->reg_base);
- writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
- return 0;
-}
-
-static int zx_pll_enable(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- /* If pd_bit is not available, simply return success. */
- if (zx_pll->pd_bit > 31)
- return 0;
-
- reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg & ~BIT(zx_pll->pd_bit), zx_pll->reg_base);
-
- return readl_relaxed_poll_timeout(zx_pll->reg_base, reg,
- reg & BIT(zx_pll->lock_bit), 0, 100);
-}
-
-static void zx_pll_disable(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- if (zx_pll->pd_bit > 31)
- return;
-
- reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg | BIT(zx_pll->pd_bit), zx_pll->reg_base);
-}
-
-static int zx_pll_is_enabled(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_pll->reg_base);
-
- return !(reg & BIT(zx_pll->pd_bit));
-}
-
-const struct clk_ops zx_pll_ops = {
- .recalc_rate = zx_pll_recalc_rate,
- .round_rate = zx_pll_round_rate,
- .set_rate = zx_pll_set_rate,
- .enable = zx_pll_enable,
- .disable = zx_pll_disable,
- .is_enabled = zx_pll_is_enabled,
-};
-EXPORT_SYMBOL(zx_pll_ops);
-
-struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg_base,
- const struct zx_pll_config *lookup_table,
- int count, spinlock_t *lock)
-{
- struct clk_zx_pll *zx_pll;
- struct clk *clk;
- struct clk_init_data init;
-
- zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL);
- if (!zx_pll)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &zx_pll_ops;
- init.flags = flags;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
-
- zx_pll->reg_base = reg_base;
- zx_pll->lookup_table = lookup_table;
- zx_pll->count = count;
- zx_pll->lock_bit = LOCK_FLAG;
- zx_pll->pd_bit = POWER_DOWN;
- zx_pll->lock = lock;
- zx_pll->hw.init = &init;
-
- clk = clk_register(NULL, &zx_pll->hw);
- if (IS_ERR(clk))
- kfree(zx_pll);
-
- return clk;
-}
-
-#define BPAR 1000000
-static u32 calc_reg(u32 parent_rate, u32 rate)
-{
- u32 sel, integ, fra_div, tmp;
- u64 tmp64 = (u64)parent_rate * BPAR;
-
- do_div(tmp64, rate);
- integ = (u32)tmp64 / BPAR;
- integ = integ >> 1;
-
- tmp = (u32)tmp64 % BPAR;
- sel = tmp / BPAR;
-
- tmp = tmp % BPAR;
- fra_div = tmp * 0xff / BPAR;
- tmp = (sel << 24) | (integ << 16) | (0xff << 8) | fra_div;
-
- /* Set I2S integer divider as 1. This bit is reserved for SPDIF
- * and do no harm.
- */
- tmp |= BIT(28);
- return tmp;
-}
-
-static u32 calc_rate(u32 reg, u32 parent_rate)
-{
- u32 sel, integ, fra_div, tmp;
- u64 tmp64 = (u64)parent_rate * BPAR;
-
- tmp = reg;
- sel = (tmp >> 24) & BIT(0);
- integ = (tmp >> 16) & 0xff;
- fra_div = tmp & 0xff;
-
- tmp = fra_div * BPAR;
- tmp = tmp / 0xff;
- tmp += sel * BPAR;
- tmp += 2 * integ * BPAR;
- do_div(tmp64, tmp);
-
- return (u32)tmp64;
-}
-
-static unsigned long zx_audio_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- return calc_rate(reg, parent_rate);
-}
-
-static long zx_audio_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- u32 reg;
-
- if (rate * 2 > *prate)
- return -EINVAL;
-
- reg = calc_reg(*prate, rate);
- return calc_rate(reg, *prate);
-}
-
-static int zx_audio_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = calc_reg(parent_rate, rate);
- writel_relaxed(reg, zx_audio->reg_base);
-
- return 0;
-}
-
-#define ZX_AUDIO_EN BIT(25)
-static int zx_audio_enable(struct clk_hw *hw)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- writel_relaxed(reg & ~ZX_AUDIO_EN, zx_audio->reg_base);
- return 0;
-}
-
-static void zx_audio_disable(struct clk_hw *hw)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- writel_relaxed(reg | ZX_AUDIO_EN, zx_audio->reg_base);
-}
-
-static const struct clk_ops zx_audio_ops = {
- .recalc_rate = zx_audio_recalc_rate,
- .round_rate = zx_audio_round_rate,
- .set_rate = zx_audio_set_rate,
- .enable = zx_audio_enable,
- .disable = zx_audio_disable,
-};
-
-struct clk *clk_register_zx_audio(const char *name,
- const char * const parent_name,
- unsigned long flags,
- void __iomem *reg_base)
-{
- struct clk_zx_audio *zx_audio;
- struct clk *clk;
- struct clk_init_data init;
-
- zx_audio = kzalloc(sizeof(*zx_audio), GFP_KERNEL);
- if (!zx_audio)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &zx_audio_ops;
- init.flags = flags;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
-
- zx_audio->reg_base = reg_base;
- zx_audio->hw.init = &init;
-
- clk = clk_register(NULL, &zx_audio->hw);
- if (IS_ERR(clk))
- kfree(zx_audio);
-
- return clk;
-}
-
-#define CLK_AUDIO_DIV_FRAC BIT(0)
-#define CLK_AUDIO_DIV_INT BIT(1)
-#define CLK_AUDIO_DIV_UNCOMMON BIT(1)
-
-#define CLK_AUDIO_DIV_FRAC_NSHIFT 16
-#define CLK_AUDIO_DIV_INT_FRAC_RE BIT(16)
-#define CLK_AUDIO_DIV_INT_FRAC_MAX (0xffff)
-#define CLK_AUDIO_DIV_INT_FRAC_MIN (0x2)
-#define CLK_AUDIO_DIV_INT_INT_SHIFT 24
-#define CLK_AUDIO_DIV_INT_INT_WIDTH 4
-
-struct zx_clk_audio_div_table {
- unsigned long rate;
- unsigned int int_reg;
- unsigned int frac_reg;
-};
-
-#define to_clk_zx_audio_div(_hw) container_of(_hw, struct clk_zx_audio_divider, hw)
-
-static unsigned long audio_calc_rate(struct clk_zx_audio_divider *audio_div,
- u32 reg_frac, u32 reg_int,
- unsigned long parent_rate)
-{
- unsigned long rate, m, n;
-
- m = reg_frac & 0xffff;
- n = (reg_frac >> 16) & 0xffff;
-
- m = (reg_int & 0xffff) * n + m;
- rate = (parent_rate * n) / m;
-
- return rate;
-}
-
-static void audio_calc_reg(struct clk_zx_audio_divider *audio_div,
- struct zx_clk_audio_div_table *div_table,
- unsigned long rate, unsigned long parent_rate)
-{
- unsigned int reg_int, reg_frac;
- unsigned long m, n, div;
-
- reg_int = parent_rate / rate;
-
- if (reg_int > CLK_AUDIO_DIV_INT_FRAC_MAX)
- reg_int = CLK_AUDIO_DIV_INT_FRAC_MAX;
- else if (reg_int < CLK_AUDIO_DIV_INT_FRAC_MIN)
- reg_int = 0;
- m = parent_rate - rate * reg_int;
- n = rate;
-
- div = gcd(m, n);
- m = m / div;
- n = n / div;
-
- if ((m >> 16) || (n >> 16)) {
- if (m > n) {
- n = n * 0xffff / m;
- m = 0xffff;
- } else {
- m = m * 0xffff / n;
- n = 0xffff;
- }
- }
- reg_frac = m | (n << 16);
-
- div_table->rate = parent_rate * n / (reg_int * n + m);
- div_table->int_reg = reg_int;
- div_table->frac_reg = reg_frac;
-}
-
-static unsigned long zx_audio_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- u32 reg_frac, reg_int;
-
- reg_frac = readl_relaxed(zx_audio_div->reg_base);
- reg_int = readl_relaxed(zx_audio_div->reg_base + 0x4);
-
- return audio_calc_rate(zx_audio_div, reg_frac, reg_int, parent_rate);
-}
-
-static long zx_audio_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- struct zx_clk_audio_div_table divt;
-
- audio_calc_reg(zx_audio_div, &divt, rate, *prate);
-
- return audio_calc_rate(zx_audio_div, divt.frac_reg, divt.int_reg, *prate);
-}
-
-static int zx_audio_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- struct zx_clk_audio_div_table divt;
- unsigned int val;
-
- audio_calc_reg(zx_audio_div, &divt, rate, parent_rate);
- if (divt.rate != rate)
- pr_debug("the real rate is:%ld", divt.rate);
-
- writel_relaxed(divt.frac_reg, zx_audio_div->reg_base);
-
- val = readl_relaxed(zx_audio_div->reg_base + 0x4);
- val &= ~0xffff;
- val |= divt.int_reg | CLK_AUDIO_DIV_INT_FRAC_RE;
- writel_relaxed(val, zx_audio_div->reg_base + 0x4);
-
- mdelay(1);
-
- val = readl_relaxed(zx_audio_div->reg_base + 0x4);
- val &= ~CLK_AUDIO_DIV_INT_FRAC_RE;
- writel_relaxed(val, zx_audio_div->reg_base + 0x4);
-
- return 0;
-}
-
-const struct clk_ops zx_audio_div_ops = {
- .recalc_rate = zx_audio_div_recalc_rate,
- .round_rate = zx_audio_div_round_rate,
- .set_rate = zx_audio_div_set_rate,
-};
diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h
deleted file mode 100644
index aeaf2a380ba6..000000000000
--- a/drivers/clk/zte/clk.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2015 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#ifndef __ZTE_CLK_H
-#define __ZTE_CLK_H
-#include <linux/clk-provider.h>
-#include <linux/spinlock.h>
-
-#define PNAME(x) static const char *x[]
-
-struct zx_pll_config {
- unsigned long rate;
- u32 cfg0;
- u32 cfg1;
-};
-
-struct clk_zx_pll {
- struct clk_hw hw;
- void __iomem *reg_base;
- const struct zx_pll_config *lookup_table; /* order by rate asc */
- int count;
- spinlock_t *lock;
- u8 pd_bit; /* power down bit */
- u8 lock_bit; /* pll lock flag bit */
-};
-
-#define PLL_RATE(_rate, _cfg0, _cfg1) \
-{ \
- .rate = _rate, \
- .cfg0 = _cfg0, \
- .cfg1 = _cfg1, \
-}
-
-#define ZX_PLL(_name, _parent, _reg, _table, _pd, _lock) \
-{ \
- .reg_base = (void __iomem *) _reg, \
- .lookup_table = _table, \
- .count = ARRAY_SIZE(_table), \
- .pd_bit = _pd, \
- .lock_bit = _lock, \
- .hw.init = CLK_HW_INIT(_name, _parent, &zx_pll_ops, \
- CLK_GET_RATE_NOCACHE), \
-}
-
-/*
- * The pd_bit is not available on ZX296718, so let's pass something
- * bigger than 31, e.g. 0xff, to indicate that.
- */
-#define ZX296718_PLL(_name, _parent, _reg, _table) \
-ZX_PLL(_name, _parent, _reg, _table, 0xff, 30)
-
-struct zx_clk_gate {
- struct clk_gate gate;
- u16 id;
-};
-
-#define GATE(_id, _name, _parent, _reg, _bit, _flag, _gflags) \
-{ \
- .gate = { \
- .reg = (void __iomem *) _reg, \
- .bit_idx = (_bit), \
- .flags = _gflags, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_gate_ops, \
- _flag | CLK_IGNORE_UNUSED), \
- }, \
- .id = _id, \
-}
-
-struct zx_clk_fixed_factor {
- struct clk_fixed_factor factor;
- u16 id;
-};
-
-#define FFACTOR(_id, _name, _parent, _mult, _div, _flag) \
-{ \
- .factor = { \
- .div = _div, \
- .mult = _mult, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_fixed_factor_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-struct zx_clk_mux {
- struct clk_mux mux;
- u16 id;
-};
-
-#define MUX_F(_id, _name, _parent, _reg, _shift, _width, _flag, _mflag) \
-{ \
- .mux = { \
- .reg = (void __iomem *) _reg, \
- .mask = BIT(_width) - 1, \
- .shift = _shift, \
- .flags = _mflag, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parent, \
- &clk_mux_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-#define MUX(_id, _name, _parent, _reg, _shift, _width) \
-MUX_F(_id, _name, _parent, _reg, _shift, _width, 0, 0)
-
-struct zx_clk_div {
- struct clk_divider div;
- u16 id;
-};
-
-#define DIV_T(_id, _name, _parent, _reg, _shift, _width, _flag, _table) \
-{ \
- .div = { \
- .reg = (void __iomem *) _reg, \
- .shift = _shift, \
- .width = _width, \
- .flags = 0, \
- .table = _table, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_divider_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-struct clk_zx_audio_divider {
- struct clk_hw hw;
- void __iomem *reg_base;
- unsigned int rate_count;
- spinlock_t *lock;
- u16 id;
-};
-
-#define AUDIO_DIV(_id, _name, _parent, _reg) \
-{ \
- .reg_base = (void __iomem *) _reg, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &zx_audio_div_ops, \
- 0), \
- .id = _id, \
-}
-
-struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg_base,
- const struct zx_pll_config *lookup_table, int count, spinlock_t *lock);
-
-struct clk_zx_audio {
- struct clk_hw hw;
- void __iomem *reg_base;
-};
-
-struct clk *clk_register_zx_audio(const char *name,
- const char * const parent_name,
- unsigned long flags, void __iomem *reg_base);
-
-extern const struct clk_ops zx_pll_ops;
-extern const struct clk_ops zx_audio_div_ops;
-
-#endif
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index ffbb9008c1c9..204b83d911b9 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -103,7 +103,6 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
const char **parents, int enable)
{
- struct clk *clk;
u32 enable_reg;
char *mux_name;
char *div0_name;
@@ -131,15 +130,15 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
if (!div1_name)
goto err_div1_name;
- clk = clk_register_mux(NULL, mux_name, parents, 4,
+ clk_register_mux(NULL, mux_name, parents, 4,
CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
fclk_lock);
- clk = clk_register_divider(NULL, div0_name, mux_name,
+ clk_register_divider(NULL, div0_name, mux_name,
0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, fclk_lock);
- clk = clk_register_divider(NULL, div1_name, div0_name,
+ clk_register_divider(NULL, div1_name, div0_name,
CLK_SET_RATE_PARENT, fclk_ctrl_reg, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
fclk_lock);
@@ -176,7 +175,6 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
const char *clk_name1, void __iomem *clk_ctrl,
const char **parents, unsigned int two_gates)
{
- struct clk *clk;
char *mux_name;
char *div_name;
spinlock_t *lock;
@@ -189,10 +187,10 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name0);
div_name = kasprintf(GFP_KERNEL, "%s_div", clk_name0);
- clk = clk_register_mux(NULL, mux_name, parents, 4,
+ clk_register_mux(NULL, mux_name, parents, 4,
CLK_SET_RATE_NO_REPARENT, clk_ctrl, 4, 2, 0, lock);
- clk = clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
+ clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, lock);
clks[clk0] = clk_register_gate(NULL, clk_name0, div_name,
@@ -217,7 +215,6 @@ static void __init zynq_clk_setup(struct device_node *np)
int i;
u32 tmp;
int ret;
- struct clk *clk;
char *clk_name;
unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
@@ -257,19 +254,19 @@ static void __init zynq_clk_setup(struct device_node *np)
ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, 0, tmp);
/* PLLs */
- clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
+ clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
SLCR_PLL_STATUS, 0, &armpll_lock);
clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);
- clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
+ clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
SLCR_PLL_STATUS, 1, &ddrpll_lock);
clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);
- clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
+ clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
SLCR_PLL_STATUS, 2, &iopll_lock);
clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
@@ -277,10 +274,10 @@ static void __init zynq_clk_setup(struct device_node *np)
/* CPU clocks */
tmp = readl(SLCR_621_TRUE) & 1;
- clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
+ clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
&armclk_lock);
- clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
+ clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);
@@ -288,20 +285,20 @@ static void __init zynq_clk_setup(struct device_node *np)
"cpu_div", CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
SLCR_ARM_CLK_CTRL, 24, 0, &armclk_lock);
- clk = clk_register_fixed_factor(NULL, "cpu_3or2x_div", "cpu_div", 0,
+ clk_register_fixed_factor(NULL, "cpu_3or2x_div", "cpu_div", 0,
1, 2);
clks[cpu_3or2x] = clk_register_gate(NULL, clk_output_name[cpu_3or2x],
"cpu_3or2x_div", CLK_IGNORE_UNUSED,
SLCR_ARM_CLK_CTRL, 25, 0, &armclk_lock);
- clk = clk_register_fixed_factor(NULL, "cpu_2x_div", "cpu_div", 0, 1,
+ clk_register_fixed_factor(NULL, "cpu_2x_div", "cpu_div", 0, 1,
2 + tmp);
clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
26, 0, &armclk_lock);
clk_prepare_enable(clks[cpu_2x]);
- clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
+ clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
4 + 2 * tmp);
clks[cpu_1x] = clk_register_gate(NULL, clk_output_name[cpu_1x],
"cpu_1x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 27,
@@ -324,23 +321,23 @@ static void __init zynq_clk_setup(struct device_node *np)
&swdtclk_lock);
/* DDR clocks */
- clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
+ clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
SLCR_DDR_CLK_CTRL, 26, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
clks[ddr2x] = clk_register_gate(NULL, clk_output_name[ddr2x],
"ddr2x_div", 0, SLCR_DDR_CLK_CTRL, 1, 0, &ddrclk_lock);
clk_prepare_enable(clks[ddr2x]);
- clk = clk_register_divider(NULL, "ddr3x_div", "ddrpll", 0,
+ clk_register_divider(NULL, "ddr3x_div", "ddrpll", 0,
SLCR_DDR_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
clks[ddr3x] = clk_register_gate(NULL, clk_output_name[ddr3x],
"ddr3x_div", 0, SLCR_DDR_CLK_CTRL, 0, 0, &ddrclk_lock);
clk_prepare_enable(clks[ddr3x]);
- clk = clk_register_divider(NULL, "dci_div0", "ddrpll", 0,
+ clk_register_divider(NULL, "dci_div0", "ddrpll", 0,
SLCR_DCI_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dciclk_lock);
- clk = clk_register_divider(NULL, "dci_div1", "dci_div0",
+ clk_register_divider(NULL, "dci_div1", "dci_div0",
CLK_SET_RATE_PARENT, SLCR_DCI_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&dciclk_lock);
@@ -385,17 +382,17 @@ static void __init zynq_clk_setup(struct device_node *np)
gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
+ clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
&gem0clk_lock);
- clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
+ clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
- clk = clk_register_divider(NULL, "gem0_div1", "gem0_div0",
+ clk_register_divider(NULL, "gem0_div1", "gem0_div0",
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
- clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+ clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SLCR_GEM0_CLK_CTRL, 6, 1, 0,
&gem0clk_lock);
@@ -410,17 +407,17 @@ static void __init zynq_clk_setup(struct device_node *np)
gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
+ clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
&gem1clk_lock);
- clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
+ clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
- clk = clk_register_divider(NULL, "gem1_div1", "gem1_div0",
+ clk_register_divider(NULL, "gem1_div1", "gem1_div0",
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
- clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+ clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SLCR_GEM1_CLK_CTRL, 6, 1, 0,
&gem1clk_lock);
@@ -442,27 +439,27 @@ static void __init zynq_clk_setup(struct device_node *np)
can_mio_mux_parents[i] = dummy_nm;
}
kfree(clk_name);
- clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ clk_register_mux(NULL, "can_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
&canclk_lock);
- clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
+ clk_register_divider(NULL, "can_div0", "can_mux", 0,
SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
- clk = clk_register_divider(NULL, "can_div1", "can_div0",
+ clk_register_divider(NULL, "can_div1", "can_div0",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&canclk_lock);
- clk = clk_register_gate(NULL, "can0_gate", "can_div1",
+ clk_register_gate(NULL, "can0_gate", "can_div1",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 0, 0,
&canclk_lock);
- clk = clk_register_gate(NULL, "can1_gate", "can_div1",
+ clk_register_gate(NULL, "can1_gate", "can_div1",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
&canclk_lock);
- clk = clk_register_mux(NULL, "can0_mio_mux",
+ clk_register_mux(NULL, "can0_mio_mux",
can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
&canmioclk_lock);
- clk = clk_register_mux(NULL, "can1_mio_mux",
+ clk_register_mux(NULL, "can1_mio_mux",
can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
0, &canmioclk_lock);
@@ -482,13 +479,13 @@ static void __init zynq_clk_setup(struct device_node *np)
dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
+ clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
&dbgclk_lock);
- clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
+ clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
- clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
+ clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
&dbgclk_lock);
clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index dcb2037a9596..54f4184de89a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -173,12 +173,12 @@ static const struct clk_ops zynq_pll_ops = {
/**
* clk_register_zynq_pll() - Register PLL with the clock framework
- * @name PLL name
- * @parent Parent clock name
- * @pll_ctrl Pointer to PLL control register
- * @pll_status Pointer to PLL status register
- * @lock_index Bit index to this PLL's lock status bit in @pll_status
- * @lock Register lock
+ * @name: PLL name
+ * @parent: Parent clock name
+ * @pll_ctrl: Pointer to PLL control register
+ * @pll_status: Pointer to PLL status register
+ * @lock_index: Bit index to this PLL's lock status bit in @pll_status
+ * @lock: Register lock
* Returns handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 66da02b83d39..e9bf7958b821 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -35,6 +35,7 @@
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ * @max_div: maximum supported divisor (fetched from firmware)
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 14c7c4712478..39aa21d01e05 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -79,6 +79,7 @@ config IXP4XX_TIMER
bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select CLKSRC_MMIO
+ select TIMER_OF if OF
help
Enables support for the Intel XScale IXP4xx SoC timer.
@@ -197,12 +198,6 @@ config CLPS711X_TIMER
help
Enables support for the Cirrus Logic PS711 timer.
-config ATLAS7_TIMER
- bool "Atlas7 timer driver" if COMPILE_TEST
- select CLKSRC_MMIO
- help
- Enables support for the Atlas7 timer.
-
config MXS_TIMER
bool "MXS timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -210,19 +205,6 @@ config MXS_TIMER
help
Enables support for the MXS timer.
-config PRIMA2_TIMER
- bool "Prima2 timer driver" if COMPILE_TEST
- select CLKSRC_MMIO
- help
- Enables support for the Prima2 timer.
-
-config U300_TIMER
- bool "U300 timer driver" if COMPILE_TEST
- depends on ARM
- select CLKSRC_MMIO
- help
- Enables support for the U300 timer.
-
config NSPIRE_TIMER
bool "NSpire timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -242,15 +224,6 @@ config INTEGRATOR_AP_TIMER
help
Enables support for the Integrator-AP timer.
-config CLKSRC_EFM32
- bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
- depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
- select CLKSRC_MMIO
- default ARCH_EFM32
- help
- Support to use the timers of EFM32 SoCs as clock source and clock
- event device.
-
config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on HAS_IOMEM
@@ -567,14 +540,6 @@ config CLKSRC_MIPS_GIC
select CLOCKSOURCE_WATCHDOG
select TIMER_OF
-config CLKSRC_TANGO_XTAL
- bool "Clocksource for Tango SoC" if COMPILE_TEST
- depends on ARM
- select TIMER_OF
- select CLKSRC_MMIO
- help
- This enables the clocksource for Tango SoC.
-
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
depends on HAS_IOMEM
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 3c75cbbf8533..c17ee32a7151 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -30,11 +30,8 @@ obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += timer-orion.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
-obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += timer-pxa.o
-obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
-obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
@@ -43,7 +40,6 @@ obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
-obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_STM32_LP) += timer-stm32-lp.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
@@ -73,7 +69,6 @@ obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
-obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_TIMER_IMX_SYS_CTR) += timer-imx-sysctr.o
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ba04cb381cd3..269a691bd2c4 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -426,6 +426,9 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
+ if (hv_root_partition)
+ return false;
+
hv_read_reference_counter = read_hv_clock_tsc;
phys_addr = virt_to_phys(hv_get_tsc_page());
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index bc96a4cbf26c..e52e12d27d2a 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -131,10 +131,7 @@ static void mxs_irq_clear(char *state)
/* Clear pending interrupt */
timrot_irq_acknowledge();
-
-#ifdef DEBUG
- pr_info("%s: changing mode to %s\n", __func__, state)
-#endif /* DEBUG */
+ pr_debug("%s: changing mode to %s\n", __func__, state);
}
static int mxs_shutdown(struct clock_event_device *evt)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index e258230d432c..c98f8851fd68 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -235,6 +235,8 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
+#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
+
static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
@@ -853,6 +855,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
unsigned int hwidx, bool clockevent,
bool clocksource, struct sh_cmt_device *cmt)
{
+ u32 value;
int ret;
/* Skip unused channels. */
@@ -882,6 +885,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
ch->ioctrl = ch->iostart + 0x10;
ch->timer_bit = 0;
+
+ /* Enable the clock supply to the channel */
+ value = ioread32(cmt->mapbase + CMCLKE);
+ value |= BIT(hwidx);
+ iowrite32(value, cmt->mapbase + CMCLKE);
break;
}
@@ -1014,12 +1022,10 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
else
cmt->rate = clk_get_rate(cmt->clk) / 8;
- clk_disable(cmt->clk);
-
/* Map the memory resource(s). */
ret = sh_cmt_map_memory(cmt);
if (ret < 0)
- goto err_clk_unprepare;
+ goto err_clk_disable;
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
@@ -1047,6 +1053,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
+ clk_disable(cmt->clk);
+
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1054,6 +1062,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
err_unmap:
kfree(cmt->channels);
iounmap(cmt->mapbase);
+err_clk_disable:
+ clk_disable(cmt->clk);
err_clk_unprepare:
clk_unprepare(cmt->clk);
err_clk_put:
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
deleted file mode 100644
index c21c91c2bc56..000000000000
--- a/drivers/clocksource/timer-atlas7.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * System timer for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/cpu.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/sched_clock.h>
-
-#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
-#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
-#define SIRFSOC_TIMER_MATCH_0 0x0018
-#define SIRFSOC_TIMER_MATCH_1 0x001c
-#define SIRFSOC_TIMER_COUNTER_0 0x0048
-#define SIRFSOC_TIMER_COUNTER_1 0x004c
-#define SIRFSOC_TIMER_INTR_STATUS 0x0060
-#define SIRFSOC_TIMER_WATCHDOG_EN 0x0064
-#define SIRFSOC_TIMER_64COUNTER_CTRL 0x0068
-#define SIRFSOC_TIMER_64COUNTER_LO 0x006c
-#define SIRFSOC_TIMER_64COUNTER_HI 0x0070
-#define SIRFSOC_TIMER_64COUNTER_LOAD_LO 0x0074
-#define SIRFSOC_TIMER_64COUNTER_LOAD_HI 0x0078
-#define SIRFSOC_TIMER_64COUNTER_RLATCHED_LO 0x007c
-#define SIRFSOC_TIMER_64COUNTER_RLATCHED_HI 0x0080
-
-#define SIRFSOC_TIMER_REG_CNT 6
-
-static unsigned long atlas7_timer_rate;
-
-static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
- SIRFSOC_TIMER_WATCHDOG_EN,
- SIRFSOC_TIMER_32COUNTER_0_CTRL,
- SIRFSOC_TIMER_32COUNTER_1_CTRL,
- SIRFSOC_TIMER_64COUNTER_CTRL,
- SIRFSOC_TIMER_64COUNTER_RLATCHED_LO,
- SIRFSOC_TIMER_64COUNTER_RLATCHED_HI,
-};
-
-static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
-
-static void __iomem *sirfsoc_timer_base;
-
-/* disable count and interrupt */
-static inline void sirfsoc_timer_count_disable(int idx)
-{
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) & ~0x7,
- sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
-}
-
-/* enable count and interrupt */
-static inline void sirfsoc_timer_count_enable(int idx)
-{
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) | 0x3,
- sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
-}
-
-/* timer interrupt handler */
-static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *ce = dev_id;
- int cpu = smp_processor_id();
-
- /* clear timer interrupt */
- writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
-
- if (clockevent_state_oneshot(ce))
- sirfsoc_timer_count_disable(cpu);
-
- ce->event_handler(ce);
-
- return IRQ_HANDLED;
-}
-
-/* read 64-bit timer counter */
-static u64 sirfsoc_timer_read(struct clocksource *cs)
-{
- u64 cycles;
-
- writel_relaxed((readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(0)) & ~BIT(1), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
-
- cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_HI);
- cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_LO);
-
- return cycles;
-}
-
-static int sirfsoc_timer_set_next_event(unsigned long delta,
- struct clock_event_device *ce)
-{
- int cpu = smp_processor_id();
-
- /* disable timer first, then modify the related registers */
- sirfsoc_timer_count_disable(cpu);
-
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0 +
- 4 * cpu);
- writel_relaxed(delta, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0 +
- 4 * cpu);
-
- /* enable the tick */
- sirfsoc_timer_count_enable(cpu);
-
- return 0;
-}
-
-/* Oneshot is enabled in set_next_event */
-static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
-{
- sirfsoc_timer_count_disable(smp_processor_id());
- return 0;
-}
-
-static void sirfsoc_clocksource_suspend(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
- sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-}
-
-static void sirfsoc_clocksource_resume(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
- writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
- sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
- sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
-
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
-}
-
-static struct clock_event_device __percpu *sirfsoc_clockevent;
-
-static struct clocksource sirfsoc_clocksource = {
- .name = "sirfsoc_clocksource",
- .rating = 200,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .read = sirfsoc_timer_read,
- .suspend = sirfsoc_clocksource_suspend,
- .resume = sirfsoc_clocksource_resume,
-};
-
-static unsigned int sirfsoc_timer_irq, sirfsoc_timer1_irq;
-
-static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
-{
- struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
- unsigned int irq;
- const char *name;
-
- if (cpu == 0) {
- irq = sirfsoc_timer_irq;
- name = "sirfsoc_timer0";
- } else {
- irq = sirfsoc_timer1_irq;
- name = "sirfsoc_timer1";
- }
-
- ce->irq = irq;
- ce->name = "local_timer";
- ce->features = CLOCK_EVT_FEAT_ONESHOT;
- ce->rating = 200;
- ce->set_state_shutdown = sirfsoc_timer_shutdown;
- ce->set_state_oneshot = sirfsoc_timer_shutdown;
- ce->tick_resume = sirfsoc_timer_shutdown;
- ce->set_next_event = sirfsoc_timer_set_next_event;
- clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
- ce->max_delta_ns = clockevent_delta2ns(-2, ce);
- ce->max_delta_ticks = (unsigned long)-2;
- ce->min_delta_ns = clockevent_delta2ns(2, ce);
- ce->min_delta_ticks = 2;
- ce->cpumask = cpumask_of(cpu);
-
- BUG_ON(request_irq(ce->irq, sirfsoc_timer_interrupt,
- IRQF_TIMER | IRQF_NOBALANCING, name, ce));
- irq_force_affinity(ce->irq, cpumask_of(cpu));
-
- clockevents_register_device(ce);
- return 0;
-}
-
-static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
-{
- struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
-
- sirfsoc_timer_count_disable(1);
-
- if (cpu == 0)
- free_irq(sirfsoc_timer_irq, ce);
- else
- free_irq(sirfsoc_timer1_irq, ce);
- return 0;
-}
-
-static int __init sirfsoc_clockevent_init(void)
-{
- sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
- BUG_ON(!sirfsoc_clockevent);
-
- /* Install and invoke hotplug callbacks */
- return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
- "clockevents/marco:starting",
- sirfsoc_local_timer_starting_cpu,
- sirfsoc_local_timer_dying_cpu);
-}
-
-/* initialize the kernel jiffy timer source */
-static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
-{
- struct clk *clk;
-
- clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
-
- BUG_ON(clk_prepare_enable(clk));
-
- atlas7_timer_rate = clk_get_rate(clk);
-
- /* timer dividers: 0, not divided */
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
-
- /* Initialize timer counters to 0 */
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_1);
-
- /* Clear all interrupts */
- writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
-
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
-
- return sirfsoc_clockevent_init();
-}
-
-static int __init sirfsoc_of_timer_init(struct device_node *np)
-{
- sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base) {
- pr_err("unable to map timer cpu registers\n");
- return -ENXIO;
- }
-
- sirfsoc_timer_irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq) {
- pr_err("No irq passed for timer0 via DT\n");
- return -EINVAL;
- }
-
- sirfsoc_timer1_irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq) {
- pr_err("No irq passed for timer1 via DT\n");
- return -EINVAL;
- }
-
- return sirfsoc_atlas7_timer_init(np);
-}
-TIMER_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index bb4eee31ae08..9996c0542520 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -7,6 +7,8 @@
* (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -17,9 +19,6 @@
#include <clocksource/timer-davinci.h>
-#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
#define DAVINCI_TIMER_REG_PRD12 0x18
diff --git a/drivers/clocksource/timer-efm32.c b/drivers/clocksource/timer-efm32.c
deleted file mode 100644
index 441a4b916841..000000000000
--- a/drivers/clocksource/timer-efm32.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/clk.h>
-
-#define TIMERn_CTRL 0x00
-#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
-#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
-#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
-#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
-#define TIMERn_CTRL_OSMEN 0x00000010
-#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
-#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
-#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
-
-#define TIMERn_CMD 0x04
-#define TIMERn_CMD_START 0x00000001
-#define TIMERn_CMD_STOP 0x00000002
-
-#define TIMERn_IEN 0x0c
-#define TIMERn_IF 0x10
-#define TIMERn_IFS 0x14
-#define TIMERn_IFC 0x18
-#define TIMERn_IRQ_UF 0x00000002
-
-#define TIMERn_TOP 0x1c
-#define TIMERn_CNT 0x24
-
-struct efm32_clock_event_ddata {
- struct clock_event_device evtdev;
- void __iomem *base;
- unsigned periodic_top;
-};
-
-static int efm32_clock_event_shutdown(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- return 0;
-}
-
-static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_OSMEN |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- return 0;
-}
-
-static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
- return 0;
-}
-
-static int efm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(evt, ddata->base + TIMERn_CNT);
- writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
-
- return 0;
-}
-
-static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
-{
- struct efm32_clock_event_ddata *ddata = dev_id;
-
- writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
-
- ddata->evtdev.event_handler(&ddata->evtdev);
-
- return IRQ_HANDLED;
-}
-
-static struct efm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "efm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = efm32_clock_event_shutdown,
- .set_state_periodic = efm32_clock_event_set_periodic,
- .set_state_oneshot = efm32_clock_event_set_oneshot,
- .set_next_event = efm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
-
-static int __init efm32_clocksource_init(struct device_node *np)
-{
- struct clk *clk;
- void __iomem *base;
- unsigned long rate;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clocksource (%d)\n", ret);
- goto err_clk_get;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clocksource (%d)\n",
- ret);
- goto err_clk_enable;
- }
- rate = clk_get_rate(clk);
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -EADDRNOTAVAIL;
- pr_err("failed to map registers for clocksource\n");
- goto err_iomap;
- }
-
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
- writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
-
- ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
- DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
- clocksource_mmio_readl_up);
- if (ret) {
- pr_err("failed to init clocksource (%d)\n", ret);
- goto err_clocksource_init;
- }
-
- return 0;
-
-err_clocksource_init:
-
- iounmap(base);
-err_iomap:
-
- clk_disable_unprepare(clk);
-err_clk_enable:
-
- clk_put(clk);
-err_clk_get:
-
- return ret;
-}
-
-static int __init efm32_clockevent_init(struct device_node *np)
-{
- struct clk *clk;
- void __iomem *base;
- unsigned long rate;
- int irq;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
- rate = clk_get_rate(clk);
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -EADDRNOTAVAIL;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -ENOENT;
- pr_err("failed to get irq for clockevent\n");
- goto err_get_irq;
- }
-
- writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
-
- clock_event_ddata.base = base;
- clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
-
- clockevents_config_and_register(&clock_event_ddata.evtdev,
- DIV_ROUND_CLOSEST(rate, 1024),
- 0xf, 0xffff);
-
- ret = request_irq(irq, efm32_clock_event_handler, IRQF_TIMER,
- "efm32 clockevent", &clock_event_ddata);
- if (ret) {
- pr_err("Failed setup irq\n");
- goto err_setup_irq;
- }
-
- return 0;
-
-err_setup_irq:
-err_get_irq:
-
- iounmap(base);
-err_iomap:
-
- clk_disable_unprepare(clk);
-err_clk_enable:
-
- clk_put(clk);
-err_clk_get:
-
- return ret;
-}
-
-/*
- * This function asserts that we have exactly one clocksource and one
- * clock_event_device in the end.
- */
-static int __init efm32_timer_init(struct device_node *np)
-{
- static int has_clocksource, has_clockevent;
- int ret = 0;
-
- if (!has_clocksource) {
- ret = efm32_clocksource_init(np);
- if (!ret) {
- has_clocksource = 1;
- return 0;
- }
- }
-
- if (!has_clockevent) {
- ret = efm32_clockevent_init(np);
- if (!ret) {
- has_clockevent = 1;
- return 0;
- }
- }
-
- return ret;
-}
-TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
-TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index 59e11ca8ee73..ab623b25a47b 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -71,10 +71,24 @@ struct mchp_pit64b_clkevt {
struct clock_event_device clkevt;
};
-#define to_mchp_pit64b_timer(x) \
+#define clkevt_to_mchp_pit64b_timer(x) \
((struct mchp_pit64b_timer *)container_of(x,\
struct mchp_pit64b_clkevt, clkevt))
+/**
+ * mchp_pit64b_clksrc - PIT64B clocksource data structure
+ * @timer: PIT64B timer
+ * @clksrc: clocksource
+ */
+struct mchp_pit64b_clksrc {
+ struct mchp_pit64b_timer timer;
+ struct clocksource clksrc;
+};
+
+#define clksrc_to_mchp_pit64b_timer(x) \
+ ((struct mchp_pit64b_timer *)container_of(x,\
+ struct mchp_pit64b_clksrc, clksrc))
+
/* Base address for clocksource timer. */
static void __iomem *mchp_pit64b_cs_base;
/* Default cycles for clockevent timer. */
@@ -116,6 +130,36 @@ static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
}
+static void mchp_pit64b_suspend(struct mchp_pit64b_timer *timer)
+{
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer->gclk);
+ clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_resume(struct mchp_pit64b_timer *timer)
+{
+ clk_prepare_enable(timer->pclk);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_prepare_enable(timer->gclk);
+}
+
+static void mchp_pit64b_clksrc_suspend(struct clocksource *cs)
+{
+ struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
+
+ mchp_pit64b_suspend(timer);
+}
+
+static void mchp_pit64b_clksrc_resume(struct clocksource *cs)
+{
+ struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
+
+ mchp_pit64b_resume(timer);
+ mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+}
+
static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
@@ -128,7 +172,7 @@ static u64 mchp_pit64b_sched_read_clk(void)
static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
@@ -137,7 +181,7 @@ static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
MCHP_PIT64B_IER_PERIOD);
@@ -148,7 +192,7 @@ static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
MCHP_PIT64B_IER_PERIOD);
@@ -158,21 +202,16 @@ static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
- if (timer->mode & MCHP_PIT64B_MR_SGCLK)
- clk_disable_unprepare(timer->gclk);
- clk_disable_unprepare(timer->pclk);
+ mchp_pit64b_suspend(timer);
}
static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- clk_prepare_enable(timer->pclk);
- if (timer->mode & MCHP_PIT64B_MR_SGCLK)
- clk_prepare_enable(timer->gclk);
+ mchp_pit64b_resume(timer);
}
static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
@@ -296,20 +335,37 @@ done:
static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
u32 clk_rate)
{
+ struct mchp_pit64b_clksrc *cs;
int ret;
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
mchp_pit64b_cs_base = timer->base;
- ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
- 210, 64, mchp_pit64b_clksrc_read);
+ cs->timer.base = timer->base;
+ cs->timer.pclk = timer->pclk;
+ cs->timer.gclk = timer->gclk;
+ cs->timer.mode = timer->mode;
+ cs->clksrc.name = MCHP_PIT64B_NAME;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(64);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.rating = 210;
+ cs->clksrc.read = mchp_pit64b_clksrc_read;
+ cs->clksrc.suspend = mchp_pit64b_clksrc_suspend;
+ cs->clksrc.resume = mchp_pit64b_clksrc_resume;
+
+ ret = clocksource_register_hz(&cs->clksrc, clk_rate);
if (ret) {
pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
/* Stop timer. */
writel_relaxed(MCHP_PIT64B_CR_SWRST,
timer->base + MCHP_PIT64B_CR);
+ kfree(cs);
return ret;
}
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
deleted file mode 100644
index c5d469342a9d..000000000000
--- a/drivers/clocksource/timer-prima2.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * System timer for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/sched_clock.h>
-
-#define PRIMA2_CLOCK_FREQ 1000000
-
-#define SIRFSOC_TIMER_COUNTER_LO 0x0000
-#define SIRFSOC_TIMER_COUNTER_HI 0x0004
-#define SIRFSOC_TIMER_MATCH_0 0x0008
-#define SIRFSOC_TIMER_MATCH_1 0x000C
-#define SIRFSOC_TIMER_MATCH_2 0x0010
-#define SIRFSOC_TIMER_MATCH_3 0x0014
-#define SIRFSOC_TIMER_MATCH_4 0x0018
-#define SIRFSOC_TIMER_MATCH_5 0x001C
-#define SIRFSOC_TIMER_STATUS 0x0020
-#define SIRFSOC_TIMER_INT_EN 0x0024
-#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
-#define SIRFSOC_TIMER_DIV 0x002C
-#define SIRFSOC_TIMER_LATCH 0x0030
-#define SIRFSOC_TIMER_LATCHED_LO 0x0034
-#define SIRFSOC_TIMER_LATCHED_HI 0x0038
-
-#define SIRFSOC_TIMER_WDT_INDEX 5
-
-#define SIRFSOC_TIMER_LATCH_BIT BIT(0)
-
-#define SIRFSOC_TIMER_REG_CNT 11
-
-static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
- SIRFSOC_TIMER_MATCH_0, SIRFSOC_TIMER_MATCH_1, SIRFSOC_TIMER_MATCH_2,
- SIRFSOC_TIMER_MATCH_3, SIRFSOC_TIMER_MATCH_4, SIRFSOC_TIMER_MATCH_5,
- SIRFSOC_TIMER_INT_EN, SIRFSOC_TIMER_WATCHDOG_EN, SIRFSOC_TIMER_DIV,
- SIRFSOC_TIMER_LATCHED_LO, SIRFSOC_TIMER_LATCHED_HI,
-};
-
-static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
-
-static void __iomem *sirfsoc_timer_base;
-
-/* timer0 interrupt handler */
-static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *ce = dev_id;
-
- WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) &
- BIT(0)));
-
- /* clear timer0 interrupt */
- writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
-
- ce->event_handler(ce);
-
- return IRQ_HANDLED;
-}
-
-/* read 64-bit timer counter */
-static u64 notrace sirfsoc_timer_read(struct clocksource *cs)
-{
- u64 cycles;
-
- /* latch the 64-bit timer counter */
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI);
- cycles = (cycles << 32) |
- readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
-
- return cycles;
-}
-
-static int sirfsoc_timer_set_next_event(unsigned long delta,
- struct clock_event_device *ce)
-{
- unsigned long now, next;
-
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
- next = now + delta;
- writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0);
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
-
- return next - now > delta ? -ETIME : 0;
-}
-
-static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
-{
- u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-
- writel_relaxed(val & ~BIT(0),
- sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- return 0;
-}
-
-static int sirfsoc_timer_set_oneshot(struct clock_event_device *evt)
-{
- u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-
- writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- return 0;
-}
-
-static void sirfsoc_clocksource_suspend(struct clocksource *cs)
-{
- int i;
-
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
- sirfsoc_timer_reg_val[i] =
- readl_relaxed(sirfsoc_timer_base +
- sirfsoc_timer_reg_list[i]);
-}
-
-static void sirfsoc_clocksource_resume(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
- writel_relaxed(sirfsoc_timer_reg_val[i],
- sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
- sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
- sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
-}
-
-static struct clock_event_device sirfsoc_clockevent = {
- .name = "sirfsoc_clockevent",
- .rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = sirfsoc_timer_shutdown,
- .set_state_oneshot = sirfsoc_timer_set_oneshot,
- .set_next_event = sirfsoc_timer_set_next_event,
-};
-
-static struct clocksource sirfsoc_clocksource = {
- .name = "sirfsoc_clocksource",
- .rating = 200,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .read = sirfsoc_timer_read,
- .suspend = sirfsoc_clocksource_suspend,
- .resume = sirfsoc_clocksource_resume,
-};
-
-/* Overwrite weak default sched_clock with more precise one */
-static u64 notrace sirfsoc_read_sched_clock(void)
-{
- return sirfsoc_timer_read(NULL);
-}
-
-static void __init sirfsoc_clockevent_init(void)
-{
- sirfsoc_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sirfsoc_clockevent, PRIMA2_CLOCK_FREQ,
- 2, -2);
-}
-
-/* initialize the kernel jiffy timer source */
-static int __init sirfsoc_prima2_timer_init(struct device_node *np)
-{
- unsigned long rate;
- unsigned int irq;
- struct clk *clk;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_err("Failed to get clock\n");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Failed to enable clock\n");
- return ret;
- }
-
- rate = clk_get_rate(clk);
-
- if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
- pr_err("Invalid clock rate\n");
- return -EINVAL;
- }
-
- sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base) {
- pr_err("unable to map timer cpu registers\n");
- return -ENXIO;
- }
-
- irq = irq_of_parse_and_map(np, 0);
-
- writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1,
- sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
- writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
-
- ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
- if (ret) {
- pr_err("Failed to register clocksource\n");
- return ret;
- }
-
- sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
-
- ret = request_irq(irq, sirfsoc_timer_interrupt, IRQF_TIMER,
- "sirfsoc_timer0", &sirfsoc_clockevent);
- if (ret) {
- pr_err("Failed to setup irq\n");
- return ret;
- }
-
- sirfsoc_clockevent_init();
-
- return 0;
-}
-TIMER_OF_DECLARE(sirfsoc_prima2_timer,
- "sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-tango-xtal.c b/drivers/clocksource/timer-tango-xtal.c
deleted file mode 100644
index 3f94e454ef99..000000000000
--- a/drivers/clocksource/timer-tango-xtal.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/clocksource.h>
-#include <linux/sched_clock.h>
-#include <linux/of_address.h>
-#include <linux/printk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-static void __iomem *xtal_in_cnt;
-static struct delay_timer delay_timer;
-
-static unsigned long notrace read_xtal_counter(void)
-{
- return readl_relaxed(xtal_in_cnt);
-}
-
-static u64 notrace read_sched_clock(void)
-{
- return read_xtal_counter();
-}
-
-static int __init tango_clocksource_init(struct device_node *np)
-{
- struct clk *clk;
- int xtal_freq, ret;
-
- xtal_in_cnt = of_iomap(np, 0);
- if (xtal_in_cnt == NULL) {
- pr_err("%pOF: invalid address\n", np);
- return -ENXIO;
- }
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_err("%pOF: invalid clock\n", np);
- return PTR_ERR(clk);
- }
-
- xtal_freq = clk_get_rate(clk);
- delay_timer.freq = xtal_freq;
- delay_timer.read_current_timer = read_xtal_counter;
-
- ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
- 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("%pOF: registration failed\n", np);
- return ret;
- }
-
- sched_clock_register(read_sched_clock, 32, xtal_freq);
- register_current_timer_delay(&delay_timer);
-
- return 0;
-}
-
-TIMER_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
deleted file mode 100644
index 37cba8dfd45f..000000000000
--- a/drivers/clocksource/timer-u300.c
+++ /dev/null
@@ -1,457 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * Timer COH 901 328, runs the OS timer interrupt.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/* Generic stuff */
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-/*
- * APP side special timer registers
- * This timer contains four timers which can fire an interrupt each.
- * OS (operating system) timer @ 32768 Hz
- * DD (device driver) timer @ 1 kHz
- * GP1 (general purpose 1) timer @ 1MHz
- * GP2 (general purpose 2) timer @ 1MHz
- */
-
-/* Reset OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_ROST (0x0000)
-#define U300_TIMER_APP_ROST_TIMER_RESET (0x00000000)
-/* Enable OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_EOST (0x0004)
-#define U300_TIMER_APP_EOST_TIMER_ENABLE (0x00000000)
-/* Disable OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_DOST (0x0008)
-#define U300_TIMER_APP_DOST_TIMER_DISABLE (0x00000000)
-/* OS Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SOSTM (0x000c)
-#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT (0x00000001)
-/* OS Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_OSTS (0x0010)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_OSTS_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_OSTS_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND (0x00000080)
-/* OS Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_OSTCC (0x0014)
-/* OS Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_OSTTC (0x0018)
-/* OS Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_OSTIE (0x001c)
-#define U300_TIMER_APP_OSTIE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_OSTIE_IRQ_ENABLE (0x00000001)
-/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_OSTIA (0x0020)
-#define U300_TIMER_APP_OSTIA_IRQ_ACK (0x00000080)
-
-/* Reset DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_RDDT (0x0040)
-#define U300_TIMER_APP_RDDT_TIMER_RESET (0x00000000)
-/* Enable DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_EDDT (0x0044)
-#define U300_TIMER_APP_EDDT_TIMER_ENABLE (0x00000000)
-/* Disable DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_DDDT (0x0048)
-#define U300_TIMER_APP_DDDT_TIMER_DISABLE (0x00000000)
-/* DD Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SDDTM (0x004c)
-#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT (0x00000001)
-/* DD Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_DDTS (0x0050)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_DDTS_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_DDTS_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND (0x00000080)
-/* DD Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_DDTCC (0x0054)
-/* DD Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_DDTTC (0x0058)
-/* DD Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_DDTIE (0x005c)
-#define U300_TIMER_APP_DDTIE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_DDTIE_IRQ_ENABLE (0x00000001)
-/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_DDTIA (0x0060)
-#define U300_TIMER_APP_DDTIA_IRQ_ACK (0x00000080)
-
-/* Reset GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_RGPT1 (0x0080)
-#define U300_TIMER_APP_RGPT1_TIMER_RESET (0x00000000)
-/* Enable GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_EGPT1 (0x0084)
-#define U300_TIMER_APP_EGPT1_TIMER_ENABLE (0x00000000)
-/* Disable GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_DGPT1 (0x0088)
-#define U300_TIMER_APP_DGPT1_TIMER_DISABLE (0x00000000)
-/* GP1 Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SGPT1M (0x008c)
-#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT (0x00000001)
-/* GP1 Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT1S (0x0090)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_GPT1S_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_GPT1S_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND (0x00000080)
-/* GP1 Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT1CC (0x0094)
-/* GP1 Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_GPT1TC (0x0098)
-/* GP1 Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT1IE (0x009c)
-#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE (0x00000001)
-/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT1IA (0x00a0)
-#define U300_TIMER_APP_GPT1IA_IRQ_ACK (0x00000080)
-
-/* Reset GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_RGPT2 (0x00c0)
-#define U300_TIMER_APP_RGPT2_TIMER_RESET (0x00000000)
-/* Enable GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_EGPT2 (0x00c4)
-#define U300_TIMER_APP_EGPT2_TIMER_ENABLE (0x00000000)
-/* Disable GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_DGPT2 (0x00c8)
-#define U300_TIMER_APP_DGPT2_TIMER_DISABLE (0x00000000)
-/* GP2 Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SGPT2M (0x00cc)
-#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT (0x00000001)
-/* GP2 Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT2S (0x00d0)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_GPT2S_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_GPT2S_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND (0x00000080)
-/* GP2 Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT2CC (0x00d4)
-/* GP2 Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_GPT2TC (0x00d8)
-/* GP2 Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT2IE (0x00dc)
-#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE (0x00000001)
-/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT2IA (0x00e0)
-#define U300_TIMER_APP_GPT2IA_IRQ_ACK (0x00000080)
-
-/* Clock request control register - all four timers */
-#define U300_TIMER_APP_CRC (0x100)
-#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE (0x00000001)
-
-static void __iomem *u300_timer_base;
-
-struct u300_clockevent_data {
- struct clock_event_device cevd;
- unsigned ticks_per_jiffy;
-};
-
-static int u300_shutdown(struct clock_event_device *evt)
-{
- /* Disable interrupts on GP1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- return 0;
-}
-
-/*
- * If we have oneshot timer active, the oneshot scheduling function
- * u300_set_next_event() is called immediately after.
- */
-static int u300_set_oneshot(struct clock_event_device *evt)
-{
- /* Just return; here? */
- /*
- * The actual event will be programmed by the next event hook,
- * so we just set a dummy value somewhere at the end of the
- * universe here.
- */
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Expire far in the future, u300_set_next_event() will be
- * called soon...
- */
- writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
- /* We run one shot per tick here! */
- writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable interrupts for this timer */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Enable timer */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-static int u300_set_periodic(struct clock_event_device *evt)
-{
- struct u300_clockevent_data *cevdata =
- container_of(evt, struct u300_clockevent_data, cevd);
-
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Set the periodic mode to a certain number of ticks per
- * jiffy.
- */
- writel(cevdata->ticks_per_jiffy,
- u300_timer_base + U300_TIMER_APP_GPT1TC);
- /*
- * Set continuous mode, so the timer keeps triggering
- * interrupts.
- */
- writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable timer interrupts */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Then enable the OS timer again */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-/*
- * The app timer in one shot mode obviously has to be reprogrammed
- * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace
- * the interrupt disable + timer disable commands with a reset command,
- * it will fail miserably. Apparently (and I found this the hard way)
- * the timer is very sensitive to the instruction order, though you don't
- * get that impression from the data sheet.
- */
-static int u300_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-
-{
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /* Reset the General Purpose timer 1. */
- writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT1);
- /* IRQ in n * cycles */
- writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC);
- /*
- * We run one shot per tick here! (This is necessary to reconfigure,
- * the timer will tilt if you don't!)
- */
- writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable timer interrupts */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Then enable the OS timer again */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-static struct u300_clockevent_data u300_clockevent_data = {
- /* Use general purpose timer 1 as clock event */
- .cevd = {
- .name = "GPT1",
- /* Reasonably fast and accurate clock event */
- .rating = 300,
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = u300_set_next_event,
- .set_state_shutdown = u300_shutdown,
- .set_state_periodic = u300_set_periodic,
- .set_state_oneshot = u300_set_oneshot,
- },
-};
-
-/* Clock event timer interrupt handler */
-static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &u300_clockevent_data.cevd;
- /* ACK/Clear timer IRQ for the APP GPT1 Timer */
-
- writel(U300_TIMER_APP_GPT1IA_IRQ_ACK,
- u300_timer_base + U300_TIMER_APP_GPT1IA);
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-/*
- * Override the global weak sched_clock symbol with this
- * local implementation which uses the clocksource to get some
- * better resolution when scheduling the kernel. We accept that
- * this wraps around for now, since it is just a relative time
- * stamp. (Inspired by OMAP implementation.)
- */
-
-static u64 notrace u300_read_sched_clock(void)
-{
- return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
-}
-
-static unsigned long u300_read_current_timer(void)
-{
- return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
-}
-
-static struct delay_timer u300_delay_timer;
-
-/*
- * This sets up the system timers, clock source and clock event.
- */
-static int __init u300_timer_init_of(struct device_node *np)
-{
- unsigned int irq;
- struct clk *clk;
- unsigned long rate;
- int ret;
-
- u300_timer_base = of_iomap(np, 0);
- if (!u300_timer_base) {
- pr_err("could not ioremap system timer\n");
- return -ENXIO;
- }
-
- /* Get the IRQ for the GP1 timer */
- irq = irq_of_parse_and_map(np, 2);
- if (!irq) {
- pr_err("no IRQ for system timer\n");
- return -EINVAL;
- }
-
- pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
-
- /* Clock the interrupt controller */
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- rate = clk_get_rate(clk);
-
- u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
-
- sched_clock_register(u300_read_sched_clock, 32, rate);
-
- u300_delay_timer.read_current_timer = &u300_read_current_timer;
- u300_delay_timer.freq = rate;
- register_current_timer_delay(&u300_delay_timer);
-
- /*
- * Disable the "OS" and "DD" timers - these are designed for Symbian!
- * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
- */
- writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
- u300_timer_base + U300_TIMER_APP_CRC);
- writel(U300_TIMER_APP_ROST_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_ROST);
- writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DOST);
- writel(U300_TIMER_APP_RDDT_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RDDT);
- writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DDDT);
-
- /* Reset the General Purpose timer 1. */
- writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT1);
-
- /* Set up the IRQ handler */
- ret = request_irq(irq, u300_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "U300 Timer Tick", NULL);
- if (ret)
- return ret;
-
- /* Reset the General Purpose timer 2 */
- writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT2);
- /* Set this timer to run around forever */
- writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC);
- /* Set continuous mode so it wraps around */
- writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
- u300_timer_base + U300_TIMER_APP_SGPT2M);
- /* Disable timer interrupts */
- writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT2IE);
- /* Then enable the GP2 timer to use as a free running us counter */
- writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT2);
-
- /* Use general purpose timer 2 as clock source */
- ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
- "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("timer: failed to initialize U300 clock source\n");
- return ret;
- }
-
- /* Configure and register the clockevent */
- clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
- 1, 0xffffffff);
-
- /*
- * TODO: init and register the rest of the timers too, they can be
- * used by hrtimers!
- */
- return 0;
-}
-
-TIMER_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
- u300_timer_init_of);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index a60aee1a1a29..65df9ef5b5bc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
@@ -302,11 +272,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
.write = ti_eqep_position_ceiling_write,
},
{
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
- {
.name = "enable",
.read = ti_eqep_position_enable_read,
.write = ti_eqep_position_enable_write,
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1f73fa75b1a0..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -289,11 +289,6 @@ config ARM_STI_CPUFREQ
this config option if you wish to add CPUFreq support for STi based
SoCs.
-config ARM_TANGO_CPUFREQ
- bool
- depends on CPUFREQ_DT && ARCH_TANGO
- default y
-
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA && CPUFREQ_DT
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 399526289320..92701a18bdd9 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -62,16 +62,6 @@ config X86_ACPI_CPUFREQ_CPB
By enabling this option the acpi_cpufreq driver provides the old
entry in addition to the new boost ones, for compatibility reasons.
-config X86_SFI_CPUFREQ
- tristate "SFI Performance-States driver"
- depends on X86_INTEL_MID && SFI
- help
- This adds a CPUFreq driver for some Silvermont based Intel Atom
- architectures like Z34xx and Z35xx which enumerate processor
- performance states through SFI.
-
- If in doubt, say N.
-
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
depends on MELAN
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f1b7e3dd6e5d..27d3bd7ea9d4 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
-obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
@@ -79,7 +78,6 @@ obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
-obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1e4fbb002a31..d1bbc16fba4b 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -628,16 +629,53 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u64 get_max_boost_ratio(unsigned int cpu)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ int ret;
+
+ if (acpi_pstate_strict)
+ return 0;
+
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+
+ highest_perf = perf_caps.highest_perf;
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ return 0;
+ }
+
+ if (highest_perf < nominal_perf) {
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
+ return 0;
+ }
+
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+}
+#else
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+#endif
+
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int i;
- unsigned int valid_states = 0;
- unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *freq_table;
+ struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
+ unsigned int cpu = policy->cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
unsigned int result = 0;
- struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
- struct acpi_processor_performance *perf;
- struct cpufreq_frequency_table *freq_table;
+ u64 max_boost_ratio;
+ unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -785,6 +823,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
+ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+ unsigned int freq = freq_table[0].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+ * use it to set cpuinfo.max_freq.
+ */
+ policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ } else {
+ /*
+ * If the maximum "boost" frequency is unknown, ask the arch
+ * scale-invariance code to use the "nominal" performance for
+ * CPU utilization scaling so as to prevent the schedutil
+ * governor from selecting inadequate CPU frequencies.
+ */
+ arch_set_max_freq_ratio(true);
+ }
+
policy->freq_table = freq_table;
perf->state = 0;
@@ -858,8 +918,9 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
+ unsigned int freq = policy->freq_table[0].frequency;
- if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
+ if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 3e31e5d28b79..4153150e20db 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -597,6 +597,16 @@ unmap_base:
return ret;
}
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -732,21 +742,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
brcm_avs_driver.driver_data = pdev;
- return cpufreq_register_driver(&brcm_avs_driver);
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
- if (ret)
- return ret;
+ WARN_ON(ret);
- priv = platform_get_drvdata(pdev);
- iounmap(priv->base);
- iounmap(priv->avs_intr_base);
+ brcm_avs_prepare_uninit(pdev);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bd2db0188cbb..3ba2f716fe97 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -141,8 +141,6 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "st,stih410", },
{ .compatible = "st,stih418", },
- { .compatible = "sigma,tango4", },
-
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ad4234518ef6..b1e1bdc63b01 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -175,7 +175,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d0a3525ce27f..1d1b563cea4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2101,7 +2101,7 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
* @cpu: Target CPU.
* @min_perf: Minimum (required) performance level (units of @capacity).
- * @target_perf: Terget (desired) performance level (units of @capacity).
+ * @target_perf: Target (desired) performance level (units of @capacity).
* @capacity: Capacity of the target CPU.
*
* Carry out a fast performance level switch of @cpu without sleeping.
@@ -2810,8 +2810,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret)
goto err_boost_unreg;
- if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
- list_empty(&cpufreq_policy_list)) {
+ if (unlikely(list_empty(&cpufreq_policy_list))) {
/* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 91f477a6cbc4..9e97f60f8199 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -95,7 +95,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver davinci_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f839dc9852c0..d3f756f7b5a0 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -52,7 +52,13 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
}
policy->min = policy->cpuinfo.min_freq = min_freq;
- policy->max = policy->cpuinfo.max_freq = max_freq;
+ policy->max = max_freq;
+ /*
+ * If the driver has set its own cpuinfo.max_freq above max_freq, leave
+ * it as is.
+ */
+ if (policy->cpuinfo.max_freq < max_freq)
+ policy->max = policy->cpuinfo.max_freq = max_freq;
if (policy->min == ~0)
return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1a660466dd75..5175ae3cac44 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -829,13 +819,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
int *current_max)
{
u64 cap;
- rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
- WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(cpu->hwp_cap_cached, cap);
if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
@@ -924,7 +914,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
}
value &= ~GENMASK_ULL(31, 0);
- min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
+ min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
@@ -1223,7 +1213,7 @@ static void update_qos_request(enum freq_qos_req_type type)
continue;
if (hwp_active)
- intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
else
turbo_max = cpu->pstate.turbo_pstate;
@@ -1724,21 +1714,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
- intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
cpu->pstate.turbo_pstate = phy_max;
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
}
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -1760,6 +1751,7 @@ static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
{
u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
u32 max_limit = (hwp_req & 0xff00) >> 8;
u32 min_limit = (hwp_req & 0xff);
u32 boost_level1;
@@ -1786,14 +1778,14 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
cpu->hwp_boost_min = min_limit;
/* level at half way mark between min and guranteed */
- boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
+ boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
if (cpu->hwp_boost_min < boost_level1)
cpu->hwp_boost_min = boost_level1;
- else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
- cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
- else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
- max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
+ max_limit != HWP_GUARANTEED_PERF(hwp_cap))
cpu->hwp_boost_min = max_limit;
else
return;
@@ -2217,7 +2209,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* rather than pure ratios.
*/
if (hwp_active) {
- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
} else {
max_state = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
@@ -2332,7 +2324,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
if (hwp_active) {
int max_state, turbo_max;
- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
max_freq = max_state * cpu->pstate.scaling;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2506,7 +2498,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
* driver call was via the normal or fast switch path. Various graphs
* output from the intel_pstate_tracer.py utility that include core_busy
* (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
- * so we use 10 to indicate the the normal path through the driver, and
+ * so we use 10 to indicate the normal path through the driver, and
* 90 to indicate the fast switch path through the driver.
* The scaled_busy field is not used, and is set to 0.
*/
@@ -2536,7 +2528,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
+static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
u32 desired, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
@@ -2560,7 +2552,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
-static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
@@ -2582,10 +2574,10 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
fast_switch);
} else if (target_pstate != old_pstate) {
- intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+ intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
cpu->pstate.current_pstate = target_pstate;
@@ -2653,12 +2645,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
- cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
- cpu->pstate.turbo_pstate;
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -2682,7 +2675,7 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
- intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
+ intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
cpu->pstate.current_pstate = target_pstate;
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
@@ -2718,7 +2711,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (hwp_active) {
u64 value;
- intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index 86f612593e49..fb72d709db56 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -116,7 +116,7 @@ static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver ls1x_cpufreq_driver = {
.name = "cpufreq-ls1x",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ls1x_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 022e3e966e71..f2e491b25b07 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -463,7 +463,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver mtk_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 3694bb030df3..e035ee216b0f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -144,7 +144,7 @@ static int omap_cpu_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 73621bc11976..4f20c6a9108d 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,8 +439,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
- .flags = CPUFREQ_PM_NO_WARN |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9ed5341dc515..d3c23447b892 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -32,6 +32,7 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
+ struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
};
@@ -54,7 +55,7 @@ static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
if (IS_ERR(opp))
return PTR_ERR(opp);
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
}
@@ -280,6 +281,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
+ struct resource *res;
void __iomem *base;
struct qcom_cpufreq_data *data;
int ret, index;
@@ -303,18 +305,33 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
index = args.args[0];
- base = devm_platform_ioremap_resource(pdev, index);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (!res) {
+ dev_err(dev, "failed to get mem resource %d\n", index);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "failed to request resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+ ret = PTR_ERR(base);
+ goto release_region;
+ }
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
- goto error;
+ goto unmap_base;
}
data->soc_data = of_device_get_match_data(&pdev->dev);
data->base = base;
+ data->res = res;
/* HW should be in enabled state to proceed */
if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
@@ -347,9 +364,19 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ }
+
return 0;
error:
- devm_iounmap(dev, base);
+ kfree(data);
+unmap_base:
+ iounmap(data->base);
+release_region:
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -357,12 +384,15 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
- struct platform_device *pdev = cpufreq_get_driver_data();
+ struct resource *res = data->res;
+ void __iomem *base = data->base;
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
kfree(policy->freq_table);
- devm_iounmap(&pdev->dev, data->base);
+ kfree(data);
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
@@ -374,7 +404,7 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
};
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 37efc0dc3f91..7380c32b238e 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -420,7 +420,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
#endif
static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.target = s3c_cpufreq_target,
.get = cpufreq_generic_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index bed496cf8d24..69786e5bbf05 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -574,7 +574,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
}
static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 5c075ef6adc0..252b9fc26124 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -186,7 +186,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index d9d04d935b3a..1a83c8678a63 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -310,7 +310,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 491a0a24fb1e..5bd03b59887f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -217,7 +217,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index e5140ad63db8..d6a698a1b5d1 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -191,7 +191,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
deleted file mode 100644
index 45cfdf67cf03..000000000000
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SFI Performance States Driver
- *
- * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
- * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sfi.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-
-#include <asm/msr.h>
-
-static struct cpufreq_frequency_table *freq_table;
-static struct sfi_freq_table_entry *sfi_cpufreq_array;
-static int num_freq_table_entries;
-
-static int sfi_parse_freq(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_freq_table_entry *pentry;
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_freq_table_entry);
- if (num_freq_table_entries <= 1) {
- pr_err("No p-states discovered\n");
- return -ENODEV;
- }
-
- pentry = (struct sfi_freq_table_entry *)sb->pentry;
- totallen = num_freq_table_entries * sizeof(*pentry);
-
- sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
- if (!sfi_cpufreq_array)
- return -ENOMEM;
-
- return 0;
-}
-
-static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
-{
- unsigned int next_perf_state = 0; /* Index into perf table */
- u32 lo, hi;
-
- next_perf_state = policy->freq_table[index].driver_data;
-
- rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
- lo = (lo & ~INTEL_PERF_CTL_MASK) |
- ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
- INTEL_PERF_CTL_MASK);
- wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
-
- return 0;
-}
-
-static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
- policy->cpuinfo.transition_latency = 100000; /* 100us */
- policy->freq_table = freq_table;
-
- return 0;
-}
-
-static struct cpufreq_driver sfi_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = sfi_cpufreq_target,
- .init = sfi_cpufreq_cpu_init,
- .name = "sfi-cpufreq",
- .attr = cpufreq_generic_attr,
-};
-
-static int __init sfi_cpufreq_init(void)
-{
- int ret, i;
-
- /* parse the freq table from SFI */
- ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
- if (ret)
- return ret;
-
- freq_table = kcalloc(num_freq_table_entries + 1, sizeof(*freq_table),
- GFP_KERNEL);
- if (!freq_table) {
- ret = -ENOMEM;
- goto err_free_array;
- }
-
- for (i = 0; i < num_freq_table_entries; i++) {
- freq_table[i].driver_data = i;
- freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
- }
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- ret = cpufreq_register_driver(&sfi_cpufreq_driver);
- if (ret)
- goto err_free_tbl;
-
- return ret;
-
-err_free_tbl:
- kfree(freq_table);
-err_free_array:
- kfree(sfi_cpufreq_array);
- return ret;
-}
-late_initcall(sfi_cpufreq_init);
-
-static void __exit sfi_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&sfi_cpufreq_driver);
- kfree(freq_table);
- kfree(sfi_cpufreq_array);
-}
-module_exit(sfi_cpufreq_exit);
-
-MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
-MODULE_DESCRIPTION("SFI Performance-States Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 73bd8dc47074..7d0d62a06bf3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -160,7 +160,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c
deleted file mode 100644
index 89a7f860bfe8..000000000000
--- a/drivers/cpufreq/tango-cpufreq.c
+++ /dev/null
@@ -1,38 +0,0 @@
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-
-static const struct of_device_id machines[] __initconst = {
- { .compatible = "sigma,tango4" },
- { /* sentinel */ }
-};
-
-static int __init tango_cpufreq_init(void)
-{
- struct device *cpu_dev = get_cpu_device(0);
- unsigned long max_freq;
- struct clk *cpu_clk;
- void *res;
-
- if (!of_match_node(machines, of_root))
- return -ENODEV;
-
- cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk))
- return -ENODEV;
-
- max_freq = clk_get_rate(cpu_clk);
-
- dev_pm_opp_add(cpu_dev, max_freq / 1, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 2, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 3, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 5, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 9, 0);
-
- res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0);
-
- return PTR_ERR_OR_ZERO(res);
-}
-device_initcall(tango_cpufreq_init);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index e566ea298b59..5d1943e787b0 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -117,7 +117,7 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = tegra186_cpufreq_get,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 6a67f36f3b80..a9620e4489ae 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -272,8 +272,7 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 8c893043953e..e8db3d75be25 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,6 +32,16 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
+static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+{
+ dev_pm_opp_put_supported_hw(opp_table);
+}
+
+static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
+{
+ platform_device_unregister(cpufreq_dt);
+}
+
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
@@ -68,42 +78,31 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
return err;
}
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_put_supported_hw,
+ opp_table);
+ if (err)
+ return err;
+
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
err = PTR_ERR_OR_ZERO(cpufreq_dt);
if (err) {
dev_err(&pdev->dev,
"failed to create cpufreq-dt device: %d\n", err);
- goto err_put_supported_hw;
+ return err;
}
- platform_set_drvdata(pdev, cpufreq_dt);
-
- return 0;
-
-err_put_supported_hw:
- dev_pm_opp_put_supported_hw(opp_table);
-
- return err;
-}
-
-static int tegra20_cpufreq_remove(struct platform_device *pdev)
-{
- struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
-
- cpufreq_dt = platform_get_drvdata(pdev);
- platform_device_unregister(cpufreq_dt);
-
- opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
- dev_pm_opp_put_supported_hw(opp_table);
- dev_pm_opp_put_opp_table(opp_table);
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_dt_unregister,
+ cpufreq_dt);
+ if (err)
+ return err;
return 0;
}
static struct platform_driver tegra20_cpufreq_driver = {
.probe = tegra20_cpufreq_probe,
- .remove = tegra20_cpufreq_remove,
.driver = {
.name = "tegra20-cpufreq",
},
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index f711d8eaea6a..51dfa9ae6cf5 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -486,8 +486,7 @@ static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ve_spc_cpufreq_set_target,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bbd51703e738..9a4c275a1335 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -348,7 +348,7 @@ config CRYPTO_DEV_PPC4XX
config HW_RANDOM_PPC4XX
bool "PowerPC 4xx generic true random number generator support"
- depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+ depends on CRYPTO_DEV_PPC4XX && HW_RANDOM=y
default y
help
This option provides the kernel-side support for the TRNG hardware
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
+ select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
@@ -403,24 +404,6 @@ config CRYPTO_DEV_OMAP_DES
endif # CRYPTO_DEV_OMAP
-config CRYPTO_DEV_PICOXCELL
- tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
- depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK
- select CRYPTO_AEAD
- select CRYPTO_AES
- select CRYPTO_AUTHENC
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_SEQIV
- help
- This option enables support for the hardware offload engines in the
- Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
- and for 3gpp Layer 2 ciphering support.
-
- Saying m here will build a module named picoxcell_crypto.
-
config CRYPTO_DEV_SAHARA
tristate "Support for SAHARA crypto accelerator"
depends on ARCH_MXC && OF
@@ -772,21 +755,6 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
-config CRYPTO_DEV_MEDIATEK
- tristate "MediaTek's EIP97 Cryptographic Engine driver"
- depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
- select CRYPTO_LIB_AES
- select CRYPTO_AEAD
- select CRYPTO_SKCIPHER
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- select CRYPTO_HMAC
- help
- This driver allows you to utilize the hardware crypto accelerator
- EIP97 which can be found on the MT7623 MT2701, MT8521p, etc ....
- Select this if you want to use it for AES/SHA1/SHA2 algorithms.
-
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fff9a70348e1..fa22cb19e242 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
-obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
@@ -31,7 +30,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
-obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 180c8a9db819..856fb2045656 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -32,6 +32,15 @@ config CRYPTO_DEV_SUN4I_SS_PRNG
Select this option if you want to provide kernel-side support for
the Pseudo-Random Number Generator found in the Security System.
+config CRYPTO_DEV_SUN4I_SS_DEBUG
+ bool "Enable sun4i-ss stats"
+ depends on CRYPTO_DEV_SUN4I_SS
+ depends on DEBUG_FS
+ help
+ Say y to enable sun4i-ss debug stats.
+ This will create /sys/kernel/debug/sun4i-ss/stats for displaying
+ the number of requests per algorithm.
+
config CRYPTO_DEV_SUN8I_CE
tristate "Support for Allwinner Crypto Engine cryptographic offloader"
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index b72de8939497..c2e6f5ed1d79 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
u32 mode = ctx->mode;
+ void *backup_iv = NULL;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
u32 tx_cnt = 0;
@@ -30,9 +31,13 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
unsigned long flags;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
if (!areq->cryptlen)
return 0;
@@ -42,52 +47,77 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
return -EINVAL;
}
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ algt->stat_opti++;
+ algt->stat_bytes += areq->cryptlen;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen / 4;
oleft = areq->cryptlen / 4;
oi = 0;
oo = 0;
do {
- todo = min(rx_cnt, ileft);
- todo = min_t(size_t, todo, (mi.length - oi) / 4);
- if (todo) {
- ileft -= todo;
- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
- oi += todo * 4;
- }
- if (oi == mi.length) {
- sg_miter_next(&mi);
- oi = 0;
+ if (ileft) {
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
+ if (todo) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+ oi += todo * 4;
+ }
+ if (oi == mi.length) {
+ pi += mi.length;
+ oi = 0;
+ }
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
todo = min(tx_cnt, oleft);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
@@ -96,33 +126,41 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oo += todo * 4;
}
if (oo == mo.length) {
- sg_miter_next(&mo);
oo = 0;
+ po += mo.length;
}
+ sg_miter_stop(&mo);
} while (oleft);
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
return err;
}
-
static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
int err;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ algt->stat_fb++;
+ }
skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
@@ -161,13 +199,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ void *backup_iv = NULL;
struct sg_mapping_iter mi, mo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
unsigned int oi, oo; /* offset for in and out */
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
- bool need_fallback;
+ bool need_fallback = false;
if (!areq->cryptlen)
return 0;
@@ -186,12 +227,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we can use the SS optimized function
*/
while (in_sg && no_chunk == 1) {
- if (in_sg->length % 4)
+ if ((in_sg->length | in_sg->offset) & 3u)
no_chunk = 0;
in_sg = sg_next(in_sg);
}
while (out_sg && no_chunk == 1) {
- if (out_sg->length % 4)
+ if ((out_sg->length | out_sg->offset) & 3u)
no_chunk = 0;
out_sg = sg_next(out_sg);
}
@@ -202,30 +243,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (need_fallback)
return sun4i_ss_cipher_poll_fallback(areq);
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt->stat_req++;
+ algt->stat_bytes += areq->cryptlen;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen;
oleft = areq->cryptlen;
oi = 0;
@@ -233,8 +275,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
-
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@@ -256,52 +306,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min(rx_cnt * 4 - ob, ileft);
todo = min_t(size_t, todo, mi.length - oi);
- memcpy(buf + ob, mi.addr + oi, todo);
+ memcpy(ss->buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
ob += todo;
if (!(ob % 4)) {
- writesl(ss->base + SS_RXFIFO, buf,
+ writesl(ss->base + SS_RXFIFO, ss->buf,
ob / 4);
ob = 0;
}
}
if (oi == mi.length) {
- sg_miter_next(&mi);
+ pi += mi.length;
oi = 0;
}
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev,
- "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
- mode,
- oi, mi.length, ileft, areq->cryptlen, rx_cnt,
- oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
if (!tx_cnt)
continue;
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/* todo in 4bytes word */
todo = min(tx_cnt, oleft / 4);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
+
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
oo += todo * 4;
if (oo == mo.length) {
- sg_miter_next(&mo);
+ po += mo.length;
oo = 0;
}
} else {
- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
-
/*
* read obl bytes in bufo, we read at maximum for
* emptying the device
*/
- readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+ readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
obl = tx_cnt * 4;
obo = 0;
do {
@@ -313,28 +368,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min_t(size_t,
mo.length - oo, obl - obo);
- memcpy(mo.addr + oo, bufo + obo, todo);
+ memcpy(mo.addr + oo, ss->bufo + obo, todo);
oleft -= todo;
obo += todo;
oo += todo;
if (oo == mo.length) {
+ po += mo.length;
sg_miter_next(&mo);
oo = 0;
}
} while (obo < obl);
/* bufo must be fully used here */
}
+ sg_miter_stop(&mo);
}
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
@@ -503,7 +561,6 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
-
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
@@ -590,5 +647,4 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
-
}
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index a2b67f7f8a81..709905ec4680 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
#include <linux/crypto.h>
+#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -234,6 +235,51 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
+static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu opti=%lu fallback=%lu tsize=%lu\n",
+ ss_algs[i].alg.crypto.base.cra_driver_name,
+ ss_algs[i].alg.crypto.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_opti, ss_algs[i].stat_fb,
+ ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
+ ss_algs[i].alg.rng.base.cra_driver_name,
+ ss_algs[i].alg.rng.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu\n",
+ ss_algs[i].alg.hash.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].stat_req);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun4i_ss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun4i_ss_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* Power management strategy: The device is suspended unless a TFM exists for
* one of the algorithms proposed by this driver.
@@ -454,6 +500,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
break;
}
}
+
+ /* Ignore error of debugfs */
+ ss->dbgfs_dir = debugfs_create_dir("sun4i-ss", NULL);
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444, ss->dbgfs_dir, ss,
+ &sun4i_ss_debugfs_fops);
+
return 0;
error_alg:
i--;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index 1dff48558f53..c1b4585e9bbc 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -191,8 +191,10 @@ static int sun4i_hash(struct ahash_request *areq)
u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
struct sun4i_ss_ctx *ss = tfmctx->ss;
+ struct sun4i_ss_alg_template *algt;
struct scatterlist *in_sg = areq->src;
struct sg_mapping_iter mi;
int in_r, err = 0;
@@ -398,6 +400,10 @@ static int sun4i_hash(struct ahash_request *areq)
*/
hash_final:
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ algt->stat_req++;
+ }
/* write the remaining words of the wait buffer */
if (op->len) {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 729aafdbea84..443160a114bb 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "sun4i-ss.h"
int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
@@ -32,6 +33,11 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
if (err < 0)
return err;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+ }
+
spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 5c291e4a6857..0fee6f4e2d90 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -148,10 +148,14 @@ struct sun4i_ss_ctx {
struct reset_control *reset;
struct device *dev;
struct resource *res;
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
spinlock_t slock; /* control the use of the device */
#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
u32 seed[SS_SEED_LEN / BITS_PER_LONG];
#endif
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
};
struct sun4i_ss_alg_template {
@@ -163,6 +167,10 @@ struct sun4i_ss_alg_template {
struct rng_alg rng;
} alg;
struct sun4i_ss_ctx *ss;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_bytes;
+ unsigned long stat_opti;
};
struct sun4i_tfm_ctx {
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 30390a7324b2..851b149f7170 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,7 @@
/* ================= Device Structure ================== */
-struct device_private iproc_priv;
+struct bcm_device_private iproc_priv;
/* ==================== Parameters ===================== */
@@ -471,10 +471,8 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
-#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
struct skcipher_request *req = skcipher_request_cast(areq);
-#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -996,13 +994,11 @@ static int ahash_req_done(struct iproc_reqctx_s *rctx)
static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
{
struct iproc_ctx_s *ctx = rctx->ctx;
-#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-#endif
/*
* Save hash to use as input to next op if incremental. Might be copying
* too much, but that's easier than figuring out actual digest size here
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 0ad5892b445d..71281a3bdbdc 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -420,7 +420,7 @@ struct spu_hw {
u32 num_chan;
};
-struct device_private {
+struct bcm_device_private {
struct platform_device *pdev;
struct spu_hw spu;
@@ -467,6 +467,6 @@ struct device_private {
struct mbox_chan **mbox;
};
-extern struct device_private iproc_priv;
+extern struct bcm_device_private iproc_priv;
#endif
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index fe126f95c702..007abf92cc05 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -41,7 +41,7 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
packet_log("SPU Message header %p len: %u\n", buf, buf_len);
/* ========== Decode MH ========== */
- packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" MH 0x%08x\n", be32_to_cpup((__be32 *)ptr));
if (spuh->mh.flags & MH_SCTX_PRES)
packet_log(" SCTX present\n");
if (spuh->mh.flags & MH_BDESC_PRES)
@@ -273,22 +273,21 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/* ========== Decode BDESC ========== */
if (spuh->mh.flags & MH_BDESC_PRES) {
-#ifdef DEBUG
struct BDESC_HEADER *bdesc = (struct BDESC_HEADER *)ptr;
-#endif
- packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+
+ packet_log(" BDESC[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetMAC:%u LengthMAC:%u\n",
be16_to_cpu(bdesc->offset_mac),
be16_to_cpu(bdesc->length_mac));
ptr += sizeof(u32);
- packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" BDESC[1] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
be16_to_cpu(bdesc->offset_crypto),
be16_to_cpu(bdesc->length_crypto));
ptr += sizeof(u32);
- packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" BDESC[2] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetICV:%u OffsetIV:%u\n",
be16_to_cpu(bdesc->offset_icv),
be16_to_cpu(bdesc->offset_iv));
@@ -297,10 +296,9 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/* ========== Decode BD ========== */
if (spuh->mh.flags & MH_BD_PRES) {
-#ifdef DEBUG
struct BD_HEADER *bd = (struct BD_HEADER *)ptr;
-#endif
- packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+
+ packet_log(" BD[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" Size:%ubytes PrevLength:%u\n",
be16_to_cpu(bd->size), be16_to_cpu(bd->prev_length));
ptr += 4;
@@ -1056,9 +1054,9 @@ void spum_request_pad(u8 *pad_start,
/* add the size at the end as required per alg */
if (auth_alg == HASH_ALG_MD5)
- *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+ *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
else /* SHA1, SHA2-224, SHA2-256 */
- *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
ptr += sizeof(u64);
}
}
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index c860ffb0b4c3..2db35b5ccaa2 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -964,7 +964,6 @@ u32 spu2_create_request(u8 *spu_hdr,
unsigned int cipher_offset = aead_parms->assoc_size +
aead_parms->aad_pad_len + aead_parms->iv_len;
-#ifdef DEBUG
/* total size of the data following OMD (without STAT word padding) */
unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
aead_parms->iv_len,
@@ -973,7 +972,6 @@ u32 spu2_create_request(u8 *spu_hdr,
aead_parms->aad_pad_len,
aead_parms->data_pad_len,
hash_parms->pad_len);
-#endif
unsigned int assoc_size = aead_parms->assoc_size;
if (req_opts->is_aead &&
@@ -1263,9 +1261,9 @@ void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
/* add the size at the end as required per alg */
if (auth_alg == HASH_ALG_MD5)
- *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+ *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
else /* SHA1, SHA2-224, SHA2-256 */
- *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
ptr += sizeof(u64);
}
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
index 6e666bfb3cfc..a76d4e054466 100644
--- a/drivers/crypto/bcm/spu2.h
+++ b/drivers/crypto/bcm/spu2.h
@@ -73,10 +73,10 @@ enum spu2_ret_md_opts {
/* Fixed Metadata format */
struct SPU2_FMD {
- u64 ctrl0;
- u64 ctrl1;
- u64 ctrl2;
- u64 ctrl3;
+ __le64 ctrl0;
+ __le64 ctrl1;
+ __le64 ctrl2;
+ __le64 ctrl3;
};
#define FMD_SIZE sizeof(struct SPU2_FMD)
diff --git a/drivers/crypto/bcm/spum.h b/drivers/crypto/bcm/spum.h
index 6116ad1dd26e..f062f75808de 100644
--- a/drivers/crypto/bcm/spum.h
+++ b/drivers/crypto/bcm/spum.h
@@ -69,18 +69,18 @@
/* Buffer Descriptor Header [BDESC]. SPU in big-endian mode. */
struct BDESC_HEADER {
- u16 offset_mac; /* word 0 [31-16] */
- u16 length_mac; /* word 0 [15-0] */
- u16 offset_crypto; /* word 1 [31-16] */
- u16 length_crypto; /* word 1 [15-0] */
- u16 offset_icv; /* word 2 [31-16] */
- u16 offset_iv; /* word 2 [15-0] */
+ __be16 offset_mac; /* word 0 [31-16] */
+ __be16 length_mac; /* word 0 [15-0] */
+ __be16 offset_crypto; /* word 1 [31-16] */
+ __be16 length_crypto; /* word 1 [15-0] */
+ __be16 offset_icv; /* word 2 [31-16] */
+ __be16 offset_iv; /* word 2 [15-0] */
};
/* Buffer Data Header [BD]. SPU in big-endian mode. */
struct BD_HEADER {
- u16 size;
- u16 prev_length;
+ __be16 size;
+ __be16 prev_length;
};
/* Command Context Header. SPU-M in big endian mode. */
@@ -144,13 +144,13 @@ struct MHEADER {
/* Generic Mode Security Context Structure [SCTX] */
struct SCTX {
/* word 0: protocol flags */
- u32 proto_flags;
+ __be32 proto_flags;
/* word 1: cipher flags */
- u32 cipher_flags;
+ __be32 cipher_flags;
/* word 2: Extended cipher flags */
- u32 ecf;
+ __be32 ecf;
};
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 2b304fc78059..c4669a96eaec 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -268,6 +268,7 @@ do_shash_err:
return rc;
}
+#ifdef DEBUG
/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
{
@@ -289,6 +290,7 @@ void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
if (debug_logging_sleep)
msleep(debug_logging_sleep);
}
+#endif
/* Returns the name for a given cipher alg/mode */
char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
@@ -348,7 +350,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
- struct device_private *ipriv;
+ struct bcm_device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
index a89b2b9c1f52..61c256384816 100644
--- a/drivers/crypto/bcm/util.h
+++ b/drivers/crypto/bcm/util.h
@@ -58,12 +58,26 @@ void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len);
#else /* !DEBUG_ON */
-#define flow_log(...) do {} while (0)
-#define flow_dump(msg, var, var_len) do {} while (0)
-#define packet_log(...) do {} while (0)
-#define packet_dump(msg, var, var_len) do {} while (0)
-
-#define dump_sg(sg, skip, len) do {} while (0)
+static inline void flow_log(const char *format, ...)
+{
+}
+
+static inline void flow_dump(const char *msg, const void *var, size_t var_len)
+{
+}
+
+static inline void packet_log(const char *format, ...)
+{
+}
+
+static inline void packet_dump(const char *msg, const void *var, size_t var_len)
+{
+}
+
+static inline void dump_sg(struct scatterlist *sg, unsigned int skip,
+ unsigned int len)
+{
+}
#endif /* DEBUG_ON */
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 8ebf18398166..806bb20d2aa1 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -19,8 +19,8 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#ifdef CONFIG_CAAM_QI
/*
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index f016448e43bb..112b12a32542 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -233,10 +233,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
- curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
- c_size + CPT_NEXT_CHUNK_PTR_SIZE,
- &curr->dma_addr,
- GFP_KERNEL);
+ curr->head = dma_alloc_coherent(&pdev->dev,
+ c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr,
+ GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
i, queue->nchunks);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 476113e12489..cb9b4c4e371e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -128,6 +128,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
default: return 0;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cdfee501fbd9..78833491f534 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -921,7 +921,7 @@ static int cc_cipher_process(struct skcipher_request *req,
return crypto_skcipher_decrypt(subreq);
}
- /* The IV we are handed may be allocted from the stack so
+ /* The IV we are handed may be allocated from the stack so
* we must copy it to a DMAable buffer before use.
*/
req_ctx->iv = kmemdup(iv, ivsize, flags);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 5f1d4602eb8f..f49579aa1452 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -23,7 +23,6 @@
#include <crypto/authenc.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
-#include <linux/version.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index f4f18bfc2247..4ee010f39912 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/io.h>
@@ -434,3 +435,4 @@ module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index f69252b24671..181c109b19f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -14,8 +14,7 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3,
- HPRE_CLUSTERS_NUM,
+ HPRE_CLUSTER3
};
enum hpre_ctrl_dbgfs_file {
@@ -36,7 +35,10 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
+#define HPRE_CLUSTERS_NUM_V3 1
+#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
int index;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e5c991913f09..e7a2c70eb9cf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "hpre.h"
#define HPRE_QUEUE_NUM_V2 1024
@@ -29,13 +30,14 @@
#define HPRE_BD_ARUSR_CFG 0x301030
#define HPRE_BD_AWUSR_CFG 0x301034
#define HPRE_TYPES_ENB 0x301038
+#define HPRE_RSA_ENB BIT(0)
+#define HPRE_ECC_ENB BIT(1)
#define HPRE_DATA_RUSER_CFG 0x30103c
#define HPRE_DATA_WUSER_CFG 0x301040
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_CORE_INT_DISABLE 0x003fffff
-#define HPRE_RAS_ECC_1BIT_TH 0x30140c
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -45,7 +47,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE 0x1
+#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
@@ -73,7 +75,8 @@
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
-#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_CLUSTER_CORE_MASK_V2 0xf
+#define HPRE_CLUSTER_CORE_MASK_V3 0xff
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
@@ -86,6 +89,11 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
+#define HPRE_CLUSTERS_NUM(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
+#define HPRE_CLUSTER_CORE_MASK(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
+ HPRE_CLUSTER_CORE_MASK_V2)
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -129,7 +137,11 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
- { /* sentinel */ }
+ { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
+ { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
+ {
+ /* sentinel */
+ }
};
static const u64 hpre_cluster_offsets[] = {
@@ -178,6 +190,19 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"invalid_req_cnt"
};
+static const struct kernel_param_ops hpre_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means hpre only register to crypto,
+ * uacce_mode = 1 means hpre both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
@@ -214,6 +239,30 @@ struct hisi_qp *hpre_create_qp(void)
return NULL;
}
+static void hpre_pasid_enable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
+static void hpre_pasid_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -238,8 +287,40 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
+static int hpre_set_cluster(struct hisi_qm *qm)
+{
+ u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ u32 val = 0;
+ int ret, i;
+
+ for (i = 0; i < clusters_num; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(cluster_core_mask,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & cluster_core_mask) ==
+ cluster_core_mask),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
/*
- * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -257,9 +338,8 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
- int ret, i;
u32 val;
+ int ret;
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
@@ -270,11 +350,15 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
- writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ if (qm->ver >= QM_HW_V3)
+ writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
+ HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ else
+ writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+
writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
- writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
@@ -291,37 +375,29 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
return -ETIMEDOUT;
}
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- offset = i * HPRE_CLSTR_ADDR_INTRVL;
+ ret = hpre_set_cluster(qm);
+ if (ret)
+ return -ETIMEDOUT;
- /* clusters initiating */
- writel(HPRE_CLUSTER_CORE_MASK,
- HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
- writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
- HPRE_CORE_INI_STATUS), val,
- ((val & HPRE_CLUSTER_CORE_MASK) ==
- HPRE_CLUSTER_CORE_MASK),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_err(dev,
- "cluster %d int st status timeout!\n", i);
- return -ETIMEDOUT;
- }
- }
+ /* This setting is only needed by Kunpeng 920. */
+ if (qm->ver == QM_HW_V2) {
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
- ret = hpre_cfg_by_dsm(qm);
- if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ disable_flr_of_bme(qm);
- disable_flr_of_bme(qm);
+ /* Enable data buffer pasid */
+ if (qm->use_sva)
+ hpre_pasid_enable(qm);
+ }
return ret;
}
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
unsigned long offset;
int i;
@@ -330,7 +406,7 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
/* clear clusterX/cluster_ctrl */
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
}
@@ -629,13 +705,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i, ret;
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret < 0)
return -EINVAL;
@@ -734,6 +811,11 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return -EINVAL;
}
+ if (pdev->revision >= QM_HW_V3)
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ else
+ qm->algs = "rsa\ndh\n";
+ qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
@@ -799,6 +881,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.fe = 0,
.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR,
+ .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
.msi_wr_port = HPRE_WR_MSI_PORT,
.acpi_rst = "HRST",
}
@@ -872,6 +955,14 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_qm_start;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_with_alg_register;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -904,20 +995,24 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &hpre_devices);
hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ ret = hisi_qm_sriov_disable(pdev, true);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
}
}
+
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
if (qm->fun_type == QM_HW_PF) {
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
+ hisi_qm_dev_err_uninit(qm);
}
- hpre_debugfs_exit(qm);
- hisi_qm_stop(qm, QM_NORMAL);
- hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f21ccae0e8ea..13cb4216561a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -54,6 +54,8 @@
#define QM_SQ_PRIORITY_SHIFT 0
#define QM_SQ_ORDERS_SHIFT 4
#define QM_SQ_TYPE_SHIFT 8
+#define QM_QC_PASID_ENABLE 0x1
+#define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
@@ -120,7 +122,7 @@
#define QM_CQC_VFT_VALID (1ULL << 28)
#define QM_SQC_VFT_BASE_SHIFT_V2 28
-#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
+#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
@@ -147,7 +149,6 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
-#define QM_DEV_RESET_FLAG 0
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
#define ACC_VENDOR_ID_VALUE 0x5a5a
@@ -185,6 +186,10 @@
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_EQ_DEPTH (1024 * 2)
+#define QM_DRIVER_REMOVING 0
+#define QM_RST_SCHED 1
+#define QM_RESETTING 2
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -619,6 +624,9 @@ static void qm_cq_head_update(struct hisi_qp *qp)
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
{
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
+ return;
+
if (qp->event_cb) {
qp->event_cb(qp);
return;
@@ -717,7 +725,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
else
- dev_err(&qm->pdev->dev, "unknown error type %d\n",
+ dev_err(&qm->pdev->dev, "unknown error type %u\n",
type);
if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
@@ -1121,7 +1129,7 @@ static int dump_show(struct hisi_qm *qm, void *info,
dev_info(dev, "%s DUMP\n", info_name);
for (i = 0; i < info_size; i += BYTE_PER_DW) {
- pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
info_buf[i], info_buf[i + 1UL],
info_buf[i + 2UL], info_buf[i + 3UL]);
}
@@ -1154,7 +1162,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1200,7 +1208,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1279,7 +1287,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, q_id);
if (ret || *q_id >= qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
return -EINVAL;
}
@@ -1604,7 +1612,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
+ u32 error_status, tmp, val;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -1615,9 +1623,13 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status == QM_DB_RANDOM_INVALID) {
+ val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
+ /* ce error does not need to be reset */
+ if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_ini->err_info.nfe,
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1685,6 +1697,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qp->is_in_kernel = true;
qm->qp_in_used++;
atomic_set(&qp->qp_status.flags, QP_INIT);
@@ -1747,12 +1760,6 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
if (!sqc)
return -ENOMEM;
- sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, sqc_dma)) {
- kfree(sqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1765,6 +1772,17 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc->cq_num = cpu_to_le16(qp_id);
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
+ QM_QC_PASID_ENABLE_SHIFT);
+
+ sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, sqc_dma)) {
+ kfree(sqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1784,12 +1802,6 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
if (!cqc)
return -ENOMEM;
- cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cqc_dma)) {
- kfree(cqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1802,6 +1814,16 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
+
+ cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cqc_dma)) {
+ kfree(cqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -1865,6 +1887,28 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
+ * qp_stop_fail_cb() - call request cb.
+ * @qp: stopped failed qp.
+ *
+ * Callback function should be called whether task completed or not.
+ */
+static void qp_stop_fail_cb(struct hisi_qp *qp)
+{
+ int qp_used = atomic_read(&qp->qp_status.used);
+ u16 cur_tail = qp->qp_status.sq_tail;
+ u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ struct hisi_qm *qm = qp->qm;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < qp_used; i++) {
+ pos = (i + cur_head) % QM_Q_DEPTH;
+ qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
+ atomic_dec(&qp->qp_status.used);
+ }
+}
+
+/**
* qm_drain_qp() - Drain a qp.
* @qp: The qp we want to drain.
*
@@ -1959,6 +2003,9 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
+ if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
+ qp_stop_fail_cb(qp);
+
dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
@@ -2065,6 +2112,7 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
+ qp->is_in_kernel = false;
return 0;
}
@@ -2206,7 +2254,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (IS_ERR(uacce))
return PTR_ERR(uacce);
- if (uacce->flags & UACCE_DEV_SVA) {
+ if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
qm->use_sva = true;
} else {
/* only consider sva case */
@@ -2248,17 +2296,15 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
*/
static int qm_frozen(struct hisi_qm *qm)
{
- down_write(&qm->qps_lock);
-
- if (qm->is_frozen) {
- up_write(&qm->qps_lock);
+ if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
return 0;
- }
+
+ down_write(&qm->qps_lock);
if (!qm->qp_in_used) {
qm->qp_in_used = qm->qp_num;
- qm->is_frozen = true;
up_write(&qm->qps_lock);
+ set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
return 0;
}
@@ -2311,6 +2357,10 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
msleep(WAIT_PERIOD);
}
+ while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
+ test_bit(QM_RESETTING, &qm->misc_ctl))
+ msleep(WAIT_PERIOD);
+
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -2439,7 +2489,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
- qm->is_frozen = false;
+ qm->misc_ctl = false;
}
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
@@ -2558,15 +2608,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
dma_addr_t eqc_dma;
int ret;
- eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); //todo
+ eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
if (!eqc)
return -ENOMEM;
- eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, eqc_dma)) {
- kfree(eqc);
- return -ENOMEM;
- }
eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
@@ -2574,6 +2618,13 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, eqc_dma)) {
+ kfree(eqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -2591,6 +2642,11 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
if (!aeqc)
return -ENOMEM;
+
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, aeqc_dma)) {
@@ -2598,10 +2654,6 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
- aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
-
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2677,7 +2729,7 @@ int hisi_qm_start(struct hisi_qm *qm)
return -EPERM;
}
- dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
+ dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
@@ -3112,7 +3164,7 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
mutex_unlock(&qm_list->lock);
if (ret)
- pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
node, alg_type, qp_num);
err:
@@ -3248,7 +3300,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, 0);
+ return hisi_qm_sriov_disable(pdev, false);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
}
@@ -3269,12 +3321,19 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
- if (!qm->err_ini->log_dev_hw_err) {
- dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->log_dev_hw_err)
+ qm->err_ini->log_dev_hw_err(qm, err_sts);
+
+ /* ce error does not need to be reset */
+ if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
+ qm->err_ini->err_info.dev_ce_mask) {
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm,
+ err_sts);
+
+ return ACC_ERR_RECOVERED;
}
- qm->err_ini->log_dev_hw_err(qm, err_sts);
return ACC_ERR_NEED_RESET;
}
@@ -3313,7 +3372,7 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -3465,7 +3524,7 @@ static int qm_reset_prepare_ready(struct hisi_qm *qm)
int delay = 0;
/* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return -EBUSY;
@@ -3489,6 +3548,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop VFs!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
}
@@ -3496,9 +3556,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+
return 0;
}
@@ -3736,7 +3799,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
hisi_qm_dev_err_init(qm);
qm_restart_done(qm);
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return 0;
}
@@ -3749,18 +3812,23 @@ static int qm_controller_reset(struct hisi_qm *qm)
pci_info(pdev, "Controller resetting...\n");
ret = qm_controller_reset_prepare(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return ret;
+ }
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
ret = qm_controller_reset_done(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
+ }
pci_info(pdev, "Controller reset complete\n");
@@ -3867,8 +3935,6 @@ static bool qm_flr_reset_complete(struct pci_dev *pdev)
return false;
}
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
-
return true;
}
@@ -3912,6 +3978,8 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
flr_done:
if (qm_flr_reset_complete(pdev))
pci_info(pdev, "FLR reset complete\n");
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
@@ -3922,7 +3990,9 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
ret = qm_process_dev_error(qm);
- if (ret == ACC_ERR_NEED_RESET)
+ if (ret == ACC_ERR_NEED_RESET &&
+ !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
+ !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
schedule_work(&qm->rst_work);
return IRQ_HANDLED;
@@ -3934,21 +4004,20 @@ static int qm_irq_register(struct hisi_qm *qm)
int ret;
ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_irq, 0, qm->dev_name, qm);
if (ret)
return ret;
if (qm->ver != QM_HW_V1) {
ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_aeq_irq, 0, qm->dev_name, qm);
if (ret)
goto err_aeq_irq;
if (qm->fun_type == QM_HW_PF) {
ret = request_irq(pci_irq_vector(pdev,
QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
+ qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
goto err_abonormal_irq;
}
@@ -4004,6 +4073,9 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
int flag = 0;
int ret = 0;
+ /* HW V2 not support both use uacce sva mode and hardware crypto algs */
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return 0;
mutex_lock(&qm_list->lock);
if (list_empty(&qm_list->list))
@@ -4035,6 +4107,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
*/
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return;
+
mutex_lock(&qm_list->lock);
list_del(&qm->list);
mutex_unlock(&qm_list->lock);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 8624d1288afe..54967c6b9c78 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -85,6 +85,11 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
@@ -168,6 +173,7 @@ struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
+ u32 dev_ce_mask;
u32 ce;
u32 nfe;
u32 fe;
@@ -225,7 +231,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
struct hisi_qm_err_status err_status;
- unsigned long reset_flag;
+ unsigned long misc_ctl; /* driver removing and reset sched */
struct rw_semaphore qps_lock;
struct idr qp_idr;
@@ -249,6 +255,7 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t phys_size;
struct uacce_device *uacce;
+ int mode;
};
struct hisi_qp_status {
@@ -282,6 +289,7 @@ struct hisi_qp {
struct hisi_qm *qm;
bool is_resetting;
+ bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
};
@@ -299,7 +307,7 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
if (!pdev) {
q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
+ pr_info("No device found currently, suppose queue number is %u\n",
q_num);
} else {
if (pdev->revision == QM_HW_V1)
@@ -333,6 +341,27 @@ static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b35c1c2271a3..dc68ba76f65e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "sec.h"
@@ -74,6 +75,16 @@
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)
+#define SEC_USER1_ENABLE_DATA_SSV BIT(16)
+#define SEC_USER1_WB_CONTEXT_SSV BIT(8)
+#define SEC_USER1_WB_DATA_SSV BIT(0)
+#define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \
+ SEC_USER1_ENABLE_DATA_SSV | \
+ SEC_USER1_WB_CONTEXT_SSV | \
+ SEC_USER1_WB_DATA_SSV)
+#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
+#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
@@ -233,6 +244,18 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+static const struct kernel_param_ops sec_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means sec only register to crypto,
+ * uacce_mode = 1 means sec both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -299,7 +322,11 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
- reg |= SEC_USER1_SMMU_NORMAL;
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
@@ -725,6 +752,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
.msi_wr_port = BIT(0),
.acpi_rst = "SRST",
}
@@ -758,6 +786,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
+ qm->algs = "cipher\ndigest\naead\n";
+ qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -885,6 +915,14 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_stop;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_alg_unregister;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -912,7 +950,7 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &sec_devices);
hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
sec_debugfs_exit(qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 4fb5a32bf830..02c445722445 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -66,6 +66,7 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
+#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
@@ -211,6 +212,19 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
+static const struct kernel_param_ops zip_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means zip only register to crypto,
+ * uacce_mode = 1 means zip both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
@@ -279,7 +293,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (qm->use_sva) {
+ if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -314,7 +328,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_CE_ENABLE,
+ qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
@@ -714,6 +729,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
.msi_wr_port = HZIP_WR_PORT,
.acpi_rst = "ZRST",
}
@@ -752,6 +768,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->algs = "zlib\ngzip";
+ qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -887,7 +904,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_qm_alg_unregister(qm, &zip_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 2e1562108a85..6364583b88b2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1166,11 +1166,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
dev = &plf_pdev->dev;
irq = platform_get_irq_byname(plf_pdev, irq_name);
- if (irq < 0) {
- dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
- irq_name, irq);
+ if (irq < 0)
return irq;
- }
} else {
return -ENXIO;
}
@@ -1999,3 +1996,4 @@ MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 50fb6d90a2e0..bc60b5802256 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -13,6 +13,7 @@
#include <crypto/sha3.h>
#include <crypto/skcipher.h>
#include <crypto/sm3.h>
+#include <crypto/internal/cipher.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index f2e17b0c4fa0..00cf8f028cb9 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -38,3 +38,34 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
Provides OCS version of cts(cbc(aes)) and cts(cbc(sm4)).
Intel does not recommend use of CTS mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU
+ tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF || COMPILE_TEST
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
+ Control Unit (HCU) hardware acceleration for use with Crypto API.
+
+ Provides OCS HCU hardware acceleration of sha256, sha384, sha512, and
+ sm3, as well as the HMAC variant of these algorithms.
+
+ Say Y or M if you're building for the Intel Keem Bay SoC. If compiled
+ as a module, the module will be called keembay-ocs-hcu.
+
+ If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ bool "Enable sha224 and hmac(sha224) support in Intel Keem Bay OCS HCU"
+ depends on CRYPTO_DEV_KEEMBAY_OCS_HCU
+ help
+ Enables support for sha224 and hmac(sha224) algorithms in the Intel
+ Keem Bay OCS HCU driver. Intel recommends not to use these
+ algorithms.
+
+ Provides OCS HCU hardware acceleration of sha224 and hmac(224).
+
+ If unsure, say N.
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile
index f21e2c4ab3b3..aea03d4432c4 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/keembay/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
+keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
new file mode 100644
index 000000000000..c4b97b4160e9
--- /dev/null
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+
+#include "ocs-hcu.h"
+
+#define DRV_NAME "keembay-ocs-hcu"
+
+/* Flag marking a final request. */
+#define REQ_FINAL BIT(0)
+/* Flag marking a HMAC request. */
+#define REQ_FLAGS_HMAC BIT(1)
+/* Flag set when HW HMAC is being used. */
+#define REQ_FLAGS_HMAC_HW BIT(2)
+/* Flag set when SW HMAC is being used. */
+#define REQ_FLAGS_HMAC_SW BIT(3)
+
+/**
+ * struct ocs_hcu_ctx: OCS HCU Transform context.
+ * @engine_ctx: Crypto Engine context.
+ * @hcu_dev: The OCS HCU device used by the transformation.
+ * @key: The key (used only for HMAC transformations).
+ * @key_len: The length of the key.
+ * @is_sm3_tfm: Whether or not this is an SM3 transformation.
+ * @is_hmac_tfm: Whether or not this is a HMAC transformation.
+ */
+struct ocs_hcu_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct ocs_hcu_dev *hcu_dev;
+ u8 key[SHA512_BLOCK_SIZE];
+ size_t key_len;
+ bool is_sm3_tfm;
+ bool is_hmac_tfm;
+};
+
+/**
+ * struct ocs_hcu_rctx - Context for the request.
+ * @hcu_dev: OCS HCU device to be used to service the request.
+ * @flags: Flags tracking request status.
+ * @algo: Algorithm to use for the request.
+ * @blk_sz: Block size of the transformation / request.
+ * @dig_sz: Digest size of the transformation / request.
+ * @dma_list: OCS DMA linked list.
+ * @hash_ctx: OCS HCU hashing context.
+ * @buffer: Buffer to store: partial block of data and SW HMAC
+ * artifacts (ipad, opad, etc.).
+ * @buf_cnt: Number of bytes currently stored in the buffer.
+ * @buf_dma_addr: The DMA address of @buffer (when mapped).
+ * @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
+ * @sg: Head of the scatterlist entries containing data.
+ * @sg_data_total: Total data in the SG list at any time.
+ * @sg_data_offset: Offset into the data of the current individual SG node.
+ * @sg_dma_nents: Number of sg entries mapped in dma_list.
+ */
+struct ocs_hcu_rctx {
+ struct ocs_hcu_dev *hcu_dev;
+ u32 flags;
+ enum ocs_hcu_algo algo;
+ size_t blk_sz;
+ size_t dig_sz;
+ struct ocs_hcu_dma_list *dma_list;
+ struct ocs_hcu_hash_ctx hash_ctx;
+ /*
+ * Buffer is double the block size because we need space for SW HMAC
+ * artifacts, i.e:
+ * - ipad (1 block) + a possible partial block of data.
+ * - opad (1 block) + digest of H(k ^ ipad || m)
+ */
+ u8 buffer[2 * SHA512_BLOCK_SIZE];
+ size_t buf_cnt;
+ dma_addr_t buf_dma_addr;
+ size_t buf_dma_count;
+ struct scatterlist *sg;
+ unsigned int sg_data_total;
+ unsigned int sg_data_offset;
+ unsigned int sg_dma_nents;
+};
+
+/**
+ * struct ocs_hcu_drv - Driver data
+ * @dev_list: The list of HCU devices.
+ * @lock: The lock protecting dev_list.
+ */
+struct ocs_hcu_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+static struct ocs_hcu_drv ocs_hcu = {
+ .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
+};
+
+/*
+ * Return the total amount of data in the request; that is: the data in the
+ * request buffer + the data in the sg list.
+ */
+static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
+{
+ return rctx->sg_data_total + rctx->buf_cnt;
+}
+
+/* Move remaining content of scatter-gather list to context buffer. */
+static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
+{
+ size_t count;
+
+ if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
+ WARN(1, "%s: sg data does not fit in buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ while (rctx->sg_data_total) {
+ if (!rctx->sg) {
+ WARN(1, "%s: unexpected NULL sg\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * If current sg has been fully processed, skip to the next
+ * one.
+ */
+ if (rctx->sg_data_offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ rctx->sg_data_offset = 0;
+ continue;
+ }
+ /*
+ * Determine the maximum data available to copy from the node.
+ * Minimum of the length left in the sg node, or the total data
+ * in the request.
+ */
+ count = min(rctx->sg->length - rctx->sg_data_offset,
+ rctx->sg_data_total);
+ /* Copy from scatter-list entry to context buffer. */
+ scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
+ rctx->sg, rctx->sg_data_offset,
+ count, 0);
+
+ rctx->sg_data_offset += count;
+ rctx->sg_data_total -= count;
+ rctx->buf_cnt += count;
+ }
+
+ return 0;
+}
+
+static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ /* If the HCU device for the request was previously set, return it. */
+ if (tctx->hcu_dev)
+ return tctx->hcu_dev;
+
+ /*
+ * Otherwise, get the first HCU device available (there should be one
+ * and only one device).
+ */
+ spin_lock_bh(&ocs_hcu.lock);
+ tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
+ struct ocs_hcu_dev,
+ list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return tctx->hcu_dev;
+}
+
+/* Free OCS DMA linked list and DMA-able context buffer. */
+static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
+ struct ocs_hcu_rctx *rctx)
+{
+ struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
+ struct device *dev = hcu_dev->dev;
+
+ /* Unmap rctx->buffer (if mapped). */
+ if (rctx->buf_dma_count) {
+ dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
+ DMA_TO_DEVICE);
+ rctx->buf_dma_count = 0;
+ }
+
+ /* Unmap req->src (if mapped). */
+ if (rctx->sg_dma_nents) {
+ dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ rctx->sg_dma_nents = 0;
+ }
+
+ /* Free dma_list (if allocated). */
+ if (rctx->dma_list) {
+ ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
+ rctx->dma_list = NULL;
+ }
+}
+
+/*
+ * Prepare for DMA operation:
+ * - DMA-map request context buffer (if needed)
+ * - DMA-map SG list (only the entries to be processed, see note below)
+ * - Allocate OCS HCU DMA linked list (number of elements = SG entries to
+ * process + context buffer (if not empty)).
+ * - Add DMA-mapped request context buffer to OCS HCU DMA list.
+ * - Add SG entries to DMA list.
+ *
+ * Note: if this is a final request, we process all the data in the SG list,
+ * otherwise we can only process up to the maximum amount of block-aligned data
+ * (the remainder will be put into the context buffer and processed in the next
+ * request).
+ */
+static int kmb_ocs_dma_prepare(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct device *dev = rctx->hcu_dev->dev;
+ unsigned int remainder = 0;
+ unsigned int total;
+ size_t nents;
+ size_t count;
+ int rc;
+ int i;
+
+ /* This function should be called only when there is data to process. */
+ total = kmb_get_total_data(rctx);
+ if (!total)
+ return -EINVAL;
+
+ /*
+ * If this is not a final DMA (terminated DMA), the data passed to the
+ * HCU must be aligned to the block size; compute the remainder data to
+ * be processed in the next request.
+ */
+ if (!(rctx->flags & REQ_FINAL))
+ remainder = total % rctx->blk_sz;
+
+ /* Determine the number of scatter gather list entries to process. */
+ nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+
+ /* If there are entries to process, map them. */
+ if (nents) {
+ rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
+ DMA_TO_DEVICE);
+ if (!rctx->sg_dma_nents) {
+ dev_err(dev, "Failed to MAP SG\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ /*
+ * The value returned by dma_map_sg() can be < nents; so update
+ * nents accordingly.
+ */
+ nents = rctx->sg_dma_nents;
+ }
+
+ /*
+ * If context buffer is not empty, map it and add extra DMA entry for
+ * it.
+ */
+ if (rctx->buf_cnt) {
+ rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
+ rctx->buf_cnt,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
+ dev_err(dev, "Failed to map request context buffer\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ rctx->buf_dma_count = rctx->buf_cnt;
+ /* Increase number of dma entries. */
+ nents++;
+ }
+
+ /* Allocate OCS HCU DMA list. */
+ rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
+ if (!rctx->dma_list) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Add request context buffer (if previously DMA-mapped) */
+ if (rctx->buf_dma_count) {
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
+ rctx->buf_dma_addr,
+ rctx->buf_dma_count);
+ if (rc)
+ goto cleanup;
+ }
+
+ /* Add the SG nodes to be processed to the DMA linked list. */
+ for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
+ /*
+ * The number of bytes to add to the list entry is the minimum
+ * between:
+ * - The DMA length of the SG entry.
+ * - The data left to be processed.
+ */
+ count = min(rctx->sg_data_total - remainder,
+ sg_dma_len(rctx->sg) - rctx->sg_data_offset);
+ /*
+ * Do not create a zero length DMA descriptor. Check in case of
+ * zero length SG node.
+ */
+ if (count == 0)
+ continue;
+ /* Add sg to HCU DMA list. */
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
+ rctx->dma_list,
+ rctx->sg->dma_address,
+ count);
+ if (rc)
+ goto cleanup;
+
+ /* Update amount of data remaining in SG list. */
+ rctx->sg_data_total -= count;
+
+ /*
+ * If remaining data is equal to remainder (note: 'less than'
+ * case should never happen in practice), we are done: update
+ * offset and exit the loop.
+ */
+ if (rctx->sg_data_total <= remainder) {
+ WARN_ON(rctx->sg_data_total < remainder);
+ rctx->sg_data_offset += count;
+ break;
+ }
+
+ /*
+ * If we get here is because we need to process the next sg in
+ * the list; set offset within the sg to 0.
+ */
+ rctx->sg_data_offset = 0;
+ }
+
+ return 0;
+cleanup:
+ dev_err(dev, "Failed to prepare DMA.\n");
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ return rc;
+}
+
+static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Clear buffer of any data. */
+ memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
+}
+
+static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
+}
+
+static int prepare_ipad(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int i;
+
+ WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
+ WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
+ "%s: HMAC_SW flag is not set\n", __func__);
+ /*
+ * Key length must be equal to block size. If key is shorter,
+ * we pad it with zero (note: key cannot be longer, since
+ * longer keys are hashed by kmb_ocs_hcu_setkey()).
+ */
+ if (ctx->key_len > rctx->blk_sz) {
+ WARN(1, "%s: Invalid key length in tfm context\n", __func__);
+ return -EINVAL;
+ }
+ memzero_explicit(&ctx->key[ctx->key_len],
+ rctx->blk_sz - ctx->key_len);
+ ctx->key_len = rctx->blk_sz;
+ /*
+ * Prepare IPAD for HMAC. Only done for first block.
+ * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
+ * k ^ ipad will be first hashed block.
+ * k ^ opad will be calculated in the final request.
+ * Only needed if not using HW HMAC.
+ */
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
+ rctx->buf_cnt = rctx->blk_sz;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+ int rc;
+ int i;
+
+ if (!hcu_dev) {
+ rc = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * If hardware HMAC flag is set, perform HMAC in hardware.
+ *
+ * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_HW) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
+ rctx->dma_list, req->result, rctx->dig_sz);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* Handle update request case. */
+ if (!(rctx->flags & REQ_FINAL)) {
+ /* Update should always have input data. */
+ if (!kmb_get_total_data(rctx))
+ return -EINVAL;
+
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ /*
+ * Reset request buffer count (data in the buffer was just
+ * processed).
+ */
+ rctx->buf_cnt = 0;
+ /*
+ * Move remaining sg data into the request buffer, so that it
+ * will be processed during the next request.
+ *
+ * NOTE: we have remaining data if kmb_get_total_data() was not
+ * a multiple of block size.
+ */
+ rc = flush_sg_to_ocs_buffer(rctx);
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* If we get here, this is a final request. */
+
+ /* If there is data to process, use finup. */
+ if (kmb_get_total_data(rctx)) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list,
+ req->result, rctx->dig_sz);
+ /* Free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ } else { /* Otherwise (if we have no data), use final. */
+ rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /*
+ * If we are finalizing a SW HMAC request, we just computed the result
+ * of: H(k ^ ipad || m).
+ *
+ * We now need to complete the HMAC calculation with the OPAD step,
+ * that is, we need to compute H(k ^ opad || digest), where digest is
+ * the digest we just obtained, i.e., H(k ^ ipad || m).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_SW) {
+ /*
+ * Compute k ^ opad and store it in the request buffer (which
+ * is not used anymore at this point).
+ * Note: key has been padded / hashed already (so keylen ==
+ * blksz) .
+ */
+ WARN_ON(tctx->key_len != rctx->blk_sz);
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
+ /* Now append the digest to the rest of the buffer. */
+ for (i = 0; (i < rctx->dig_sz); i++)
+ rctx->buffer[rctx->blk_sz + i] = req->result[i];
+
+ /* Now hash the buffer to obtain the final HMAC. */
+ rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
+ rctx->blk_sz + rctx->dig_sz, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /* Perform secure clean-up. */
+ kmb_ocs_hcu_secure_cleanup(req);
+done:
+ crypto_finalize_hash_request(hcu_dev->engine, req, 0);
+
+ return 0;
+
+error:
+ kmb_ocs_hcu_secure_cleanup(req);
+ return rc;
+}
+
+static int kmb_ocs_hcu_init(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ /* Initialize entire request context to zero. */
+ memset(rctx, 0, sizeof(*rctx));
+
+ rctx->hcu_dev = hcu_dev;
+ rctx->dig_sz = crypto_ahash_digestsize(tfm);
+
+ switch (rctx->dig_sz) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ rctx->blk_sz = SHA224_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA224;
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ rctx->blk_sz = SHA256_BLOCK_SIZE;
+ /*
+ * SHA256 and SM3 have the same digest size: use info from tfm
+ * context to find out which one we should use.
+ */
+ rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
+ OCS_HCU_ALGO_SHA256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->blk_sz = SHA384_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->blk_sz = SHA512_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Initialize intermediate data. */
+ ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
+
+ /* If this a HMAC request, set HMAC flag. */
+ if (ctx->is_hmac_tfm)
+ rctx->flags |= REQ_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_update(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ int rc;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ /*
+ * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
+ * HMAC does not support context switching (there it can only be used
+ * with finup() or digest()).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * If remaining sg_data fits into ctx buffer, just copy it there; we'll
+ * process it at the next update() or final().
+ */
+ if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
+ return flush_sg_to_ocs_buffer(rctx);
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
+static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int rc;
+
+ rctx->flags |= REQ_FINAL;
+
+ /*
+ * If this is a HMAC request and, so far, we didn't have to switch to
+ * SW HMAC, check if we can use HW HMAC.
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ /*
+ * If we are here, it means we never processed any data so far,
+ * so we can use HW HMAC, but only if there is some data to
+ * process (since OCS HW MAC does not support zero-length
+ * messages) and the key length is supported by the hardware
+ * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
+ * be used, fall back to SW-assisted HMAC.
+ */
+ if (kmb_get_total_data(rctx) &&
+ ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
+ rctx->flags |= REQ_FLAGS_HMAC_HW;
+ } else {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+static int kmb_ocs_hcu_final(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ rctx->sg_data_total = 0;
+ rctx->sg_data_offset = 0;
+ rctx->sg = NULL;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_finup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_digest(struct ahash_request *req)
+{
+ int rc = 0;
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ rc = kmb_ocs_hcu_init(req);
+ if (rc)
+ return rc;
+
+ rc = kmb_ocs_hcu_finup(req);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ size_t blk_sz = crypto_ahash_blocksize(tfm);
+ struct crypto_ahash *ahash_tfm;
+ struct ahash_request *req;
+ struct crypto_wait wait;
+ struct scatterlist sg;
+ const char *alg_name;
+ int rc;
+
+ /*
+ * Key length must be equal to block size:
+ * - If key is shorter, we are done for now (the key will be padded
+ * later on); this is to maximize the use of HW HMAC (which works
+ * only for keys <= 64 bytes).
+ * - If key is longer, we hash it.
+ */
+ if (keylen <= blk_sz) {
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+ return 0;
+ }
+
+ switch (digestsize) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ alg_name = "sha224-keembay-ocs";
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
+ "sha256-keembay-ocs";
+ break;
+ case SHA384_DIGEST_SIZE:
+ alg_name = "sha384-keembay-ocs";
+ break;
+ case SHA512_DIGEST_SIZE:
+ alg_name = "sha512-keembay-ocs";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ rc = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ sg_init_one(&sg, key, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (rc == 0)
+ ctx->key_len = digestsize;
+
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+
+ return rc;
+}
+
+/* Set request size and initialize tfm context. */
+static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ocs_hcu_rctx));
+
+ /* Init context to 0. */
+ memzero_explicit(ctx, sizeof(*ctx));
+ /* Set engine ops. */
+ ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
+}
+
+static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+/* Function called when 'tfm' is de-initialized. */
+static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Clear the key. */
+ memzero_explicit(ctx->key, sizeof(ctx->key));
+}
+
+static struct ahash_alg ocs_hcu_algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sm3_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "hmac-sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+};
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_hcu_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-hcu",
+ },
+ {}
+};
+
+static int kmb_ocs_hcu_remove(struct platform_device *pdev)
+{
+ struct ocs_hcu_dev *hcu_dev;
+ int rc;
+
+ hcu_dev = platform_get_drvdata(pdev);
+ if (!hcu_dev)
+ return -ENODEV;
+
+ crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+
+ rc = crypto_engine_exit(hcu_dev->engine);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_hcu_dev *hcu_dev;
+ struct resource *hcu_mem;
+ int rc;
+
+ hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
+ if (!hcu_dev)
+ return -ENOMEM;
+
+ hcu_dev->dev = dev;
+
+ platform_set_drvdata(pdev, hcu_dev);
+ rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
+ if (rc)
+ return rc;
+
+ /* Get the memory address and remap. */
+ hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!hcu_mem) {
+ dev_err(dev, "Could not retrieve io mem resource.\n");
+ return -ENODEV;
+ }
+
+ hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+ if (IS_ERR(hcu_dev->io_base)) {
+ dev_err(dev, "Could not io-remap mem resource.\n");
+ return PTR_ERR(hcu_dev->io_base);
+ }
+
+ init_completion(&hcu_dev->irq_done);
+
+ /* Get and request IRQ. */
+ hcu_dev->irq = platform_get_irq(pdev, 0);
+ if (hcu_dev->irq < 0)
+ return hcu_dev->irq;
+
+ rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
+ ocs_hcu_irq_handler, NULL, 0,
+ "keembay-ocs-hcu", hcu_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ.\n");
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&hcu_dev->list);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ /* Initialize crypto engine */
+ hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hcu_dev->engine)
+ goto list_del;
+
+ rc = crypto_engine_start(hcu_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start engine.\n");
+ goto cleanup;
+ }
+
+ /* Security infrastructure guarantees OCS clock is enabled. */
+
+ rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ if (rc) {
+ dev_err(dev, "Could not register algorithms.\n");
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ crypto_engine_exit(hcu_dev->engine);
+list_del:
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_hcu_driver = {
+ .probe = kmb_ocs_hcu_probe,
+ .remove = kmb_ocs_hcu_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_hcu_of_match,
+ },
+};
+
+module_platform_driver(kmb_ocs_hcu_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-aes.c b/drivers/crypto/keembay/ocs-aes.c
index cc286adb1c4a..be9f32fc8f42 100644
--- a/drivers/crypto/keembay/ocs-aes.c
+++ b/drivers/crypto/keembay/ocs-aes.c
@@ -958,14 +958,14 @@ int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
ocs_aes_write_last_data_blk_len(aes_dev, src_size);
/* Write ciphertext bit length */
- bit_len = src_size * 8;
+ bit_len = (u64)src_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
val = bit_len >> 32;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
/* Write aad bit length */
- bit_len = aad_size * 8;
+ bit_len = (u64)aad_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
val = bit_len >> 32;
@@ -1080,15 +1080,15 @@ static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
/*
* q is the octet length of Q.
* q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
- * q - 1 == iv[0]
+ * q - 1 == iv[0] & 0x7;
*/
b0[0] |= iv[0] & 0x7;
/*
* Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
* and must be copied to b0[1]..b0[15-q].
- * q == iv[0] + 1
+ * q == (iv[0] & 0x7) + 1
*/
- q = iv[0] + 1;
+ q = (iv[0] & 0x7) + 1;
for (i = 1; i <= 15 - q; i++)
b0[i] = iv[i];
/*
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
new file mode 100644
index 000000000000..81eecacf603a
--- /dev/null
+++ b/drivers/crypto/keembay/ocs-hcu.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <crypto/sha2.h>
+
+#include "ocs-hcu.h"
+
+/* Registers. */
+#define OCS_HCU_MODE 0x00
+#define OCS_HCU_CHAIN 0x04
+#define OCS_HCU_OPERATION 0x08
+#define OCS_HCU_KEY_0 0x0C
+#define OCS_HCU_ISR 0x50
+#define OCS_HCU_IER 0x54
+#define OCS_HCU_STATUS 0x58
+#define OCS_HCU_MSG_LEN_LO 0x60
+#define OCS_HCU_MSG_LEN_HI 0x64
+#define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
+#define OCS_HCU_DMA_SRC_ADDR 0x400
+#define OCS_HCU_DMA_SRC_SIZE 0x408
+#define OCS_HCU_DMA_DST_SIZE 0x40C
+#define OCS_HCU_DMA_DMA_MODE 0x410
+#define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
+#define OCS_HCU_DMA_MSI_ISR 0x480
+#define OCS_HCU_DMA_MSI_IER 0x484
+#define OCS_HCU_DMA_MSI_MASK 0x488
+
+/* Register bit definitions. */
+#define HCU_MODE_ALGO_SHIFT 16
+#define HCU_MODE_HMAC_SHIFT 22
+
+#define HCU_STATUS_BUSY BIT(0)
+
+#define HCU_BYTE_ORDER_SWAP BIT(0)
+
+#define HCU_IRQ_HASH_DONE BIT(2)
+#define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
+
+#define HCU_DMA_IRQ_SRC_DONE BIT(0)
+#define HCU_DMA_IRQ_SAI_ERR BIT(2)
+#define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
+#define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
+#define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
+#define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
+#define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
+#define HCU_DMA_IRQ_CRD_ERR BIT(8)
+#define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
+ HCU_DMA_IRQ_BAD_COMP_ERR | \
+ HCU_DMA_IRQ_INBUF_RD_ERR | \
+ HCU_DMA_IRQ_INBUF_WD_ERR | \
+ HCU_DMA_IRQ_OUTBUF_WR_ERR | \
+ HCU_DMA_IRQ_OUTBUF_RD_ERR | \
+ HCU_DMA_IRQ_CRD_ERR)
+
+#define HCU_DMA_SNOOP_MASK (0x7 << 28)
+#define HCU_DMA_SRC_LL_EN BIT(25)
+#define HCU_DMA_EN BIT(31)
+
+#define OCS_HCU_ENDIANNESS_VALUE 0x2A
+
+#define HCU_DMA_MSI_UNMASK BIT(0)
+#define HCU_DMA_MSI_DISABLE 0
+#define HCU_IRQ_DISABLE 0
+
+#define OCS_HCU_START BIT(0)
+#define OCS_HCU_TERMINATE BIT(1)
+
+#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
+
+#define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
+
+#define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
+
+#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
+#define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
+
+/*
+ * While polling on a busy HCU, wait maximum 200us between one check and the
+ * other.
+ */
+#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
+/* Wait on a busy HCU for maximum 1 second. */
+#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
+
+/**
+ * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * @src_addr: Source address of the data.
+ * @src_len: Length of data to be fetched.
+ * @nxt_desc: Next descriptor to fetch.
+ * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_hcu_dma_entry {
+ u32 src_addr;
+ u32 src_len;
+ u32 nxt_desc;
+ u32 ll_flags;
+};
+
+/**
+ * struct ocs_dma_list - OCS-specific DMA linked list.
+ * @head: The head of the list (points to the array backing the list).
+ * @tail: The current tail of the list; NULL if the list is empty.
+ * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
+ * array).
+ * @max_nents: Maximum number of entries in the list (i.e., number of elements
+ * in the backing array).
+ *
+ * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
+ * backing the list is allocated with dma_alloc_coherent() and pointed by
+ * @head.
+ */
+struct ocs_hcu_dma_list {
+ struct ocs_hcu_dma_entry *head;
+ struct ocs_hcu_dma_entry *tail;
+ dma_addr_t dma_addr;
+ size_t max_nents;
+};
+
+static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
+ case OCS_HCU_ALGO_SHA384:
+ case OCS_HCU_ALGO_SHA512:
+ return OCS_HCU_NUM_CHAINS_SHA384_512;
+ default:
+ return 0;
+ };
+}
+
+static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ return SHA224_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ /* SM3 shares the same block size. */
+ return SHA256_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA384:
+ return SHA384_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA512:
+ return SHA512_DIGEST_SIZE;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
+ * @hcu_dev: OCS HCU device to wait for.
+ *
+ * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
+ * expired.
+ */
+static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
+{
+ long val;
+
+ return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
+ !(val & HCU_STATUS_BUSY),
+ OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
+ OCS_HCU_WAIT_BUSY_TIMEOUT_US);
+}
+
+static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
+ hcu_dev->irq_err = false;
+ /* Enable error and HCU done interrupts. */
+ writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
+ hcu_dev->io_base + OCS_HCU_IER);
+}
+
+static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ hcu_dev->irq_err = false;
+ /* Only operating on DMA source completion and error interrupts. */
+ writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
+ hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+ /* Unmask */
+ writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
+}
+
+static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
+{
+ writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
+ writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+}
+
+static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
+{
+ int rc;
+
+ rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
+ if (rc)
+ goto exit;
+
+ if (hcu_dev->irq_err) {
+ /* Unset flag and return error. */
+ hcu_dev->irq_err = false;
+ rc = -EIO;
+ goto exit;
+ }
+
+exit:
+ ocs_hcu_irq_dis(hcu_dev);
+
+ return rc;
+}
+
+/**
+ * ocs_hcu_get_intermediate_data() - Get intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: Where to store the intermediate.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to save the current hashing process state in order to
+ * continue it in the future.
+ *
+ * Note: once all data has been processed, the intermediate data actually
+ * contains the hashing result. So this function is also used to retrieve the
+ * final result of a hashing process.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain;
+ int rc;
+ int i;
+
+ /* Data not requested. */
+ if (!data)
+ return -EINVAL;
+
+ chain = (u32 *)data->digest;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_set_intermediate_data() - Set intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: The intermediate data to be set.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to continue a previous hashing process.
+ */
+static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain = (u32 *)data->digest;
+ int i;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+}
+
+static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
+ enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
+{
+ u32 *chain;
+ int rc;
+ int i;
+
+ if (!dgst)
+ return -EINVAL;
+
+ /* Length of the output buffer must match the algo digest size. */
+ if (dgst_len != ocs_hcu_digest_size(algo))
+ return -EINVAL;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ chain = (u32 *)dgst;
+ for (i = 0; i < dgst_len / sizeof(u32); i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hw_cfg() - Configure the HCU hardware.
+ * @hcu_dev: The HCU device to configure.
+ * @algo: The algorithm to be used by the HCU device.
+ * @use_hmac: Whether or not HW HMAC should be used.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ bool use_hmac)
+{
+ u32 cfg;
+ int rc;
+
+ if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
+ algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
+ algo != OCS_HCU_ALGO_SM3)
+ return -EINVAL;
+
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Ensure interrupts are disabled. */
+ ocs_hcu_irq_dis(hcu_dev);
+
+ /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
+ cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
+ cfg |= algo << HCU_MODE_ALGO_SHIFT;
+ if (use_hmac)
+ cfg |= BIT(HCU_MODE_HMAC_SHIFT);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device whose key registers should be cleared.
+ */
+static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
+{
+ int reg_off;
+
+ /* Clear OCS_HCU_KEY_[0..15] */
+ for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
+ writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
+}
+
+/**
+ * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device the key should be written to.
+ * @key: The key to be written.
+ * @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
+{
+ u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
+ int i;
+
+ if (len > OCS_HCU_HW_KEY_LEN)
+ return -EINVAL;
+
+ /* Copy key into temporary u32 array. */
+ memcpy(key_u32, key, len);
+
+ /*
+ * Hardware requires all the bytes of the HW Key vector to be
+ * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
+ */
+ memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
+
+ /*
+ * OCS hardware expects the MSB of the key to be written at the highest
+ * address of the HCU Key vector; in other word, the key must be
+ * written in reverse order.
+ *
+ * Therefore, we first enable byte swapping for the HCU key vector;
+ * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
+ * swapped:
+ * 3 <---> 0, 2 <---> 1.
+ */
+ writel(HCU_BYTE_ORDER_SWAP,
+ hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
+ /*
+ * And then we write the 32-bit words composing the key starting from
+ * the end of the key.
+ */
+ for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
+ writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
+ hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
+
+ memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
+ * @hcu_dev: The OCS HCU device to use.
+ * @dma_list: The OCS DMA list mapping the data to hash.
+ * @finalize: Whether or not this is the last hashing operation and therefore
+ * the final hash should be compute even if data is not
+ * block-aligned.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_dma_list *dma_list,
+ bool finalize)
+{
+ u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
+ int rc;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ /*
+ * For final requests we use HCU_DONE IRQ to be notified when all input
+ * data has been processed by the HCU; however, we cannot do so for
+ * non-final requests, because we don't get a HCU_DONE IRQ when we
+ * don't terminate the operation.
+ *
+ * Therefore, for non-final requests, we use the DMA IRQ, which
+ * triggers when DMA has finishing feeding all the input data to the
+ * HCU, but the HCU may still be processing it. This is fine, since we
+ * will wait for the HCU processing to be completed when we try to read
+ * intermediate results, in ocs_hcu_get_intermediate_data().
+ */
+ if (finalize)
+ ocs_hcu_done_irq_en(hcu_dev);
+ else
+ ocs_hcu_dma_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+ writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
+
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ if (finalize)
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents)
+{
+ struct ocs_hcu_dma_list *dma_list;
+
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
+ return NULL;
+
+ /* Total size of the DMA list to allocate. */
+ dma_list->head = dma_alloc_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * max_nents,
+ &dma_list->dma_addr, GFP_KERNEL);
+ if (!dma_list->head) {
+ kfree(dma_list);
+ return NULL;
+ }
+ dma_list->max_nents = max_nents;
+ dma_list->tail = NULL;
+
+ return dma_list;
+}
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list)
+{
+ if (!dma_list)
+ return;
+
+ dma_free_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * dma_list->max_nents,
+ dma_list->head, dma_list->dma_addr);
+
+ kfree(dma_list);
+}
+
+/* Add a new DMA entry at the end of the OCS DMA list. */
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len)
+{
+ struct device *dev = hcu_dev->dev;
+ struct ocs_hcu_dma_entry *old_tail;
+ struct ocs_hcu_dma_entry *new_tail;
+
+ if (!len)
+ return 0;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ if (addr & ~OCS_HCU_DMA_BIT_MASK) {
+ dev_err(dev,
+ "Unexpected error: Invalid DMA address for OCS HCU\n");
+ return -EINVAL;
+ }
+
+ old_tail = dma_list->tail;
+ new_tail = old_tail ? old_tail + 1 : dma_list->head;
+
+ /* Check if list is full. */
+ if (new_tail - dma_list->head >= dma_list->max_nents)
+ return -ENOMEM;
+
+ /*
+ * If there was an old tail (i.e., this is not the first element we are
+ * adding), un-terminate the old tail and make it point to the new one.
+ */
+ if (old_tail) {
+ old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
+ /*
+ * The old tail 'nxt_desc' must point to the DMA address of the
+ * new tail.
+ */
+ old_tail->nxt_desc = dma_list->dma_addr +
+ sizeof(*dma_list->tail) * (new_tail -
+ dma_list->head);
+ }
+
+ new_tail->src_addr = (u32)addr;
+ new_tail->src_len = (u32)len;
+ new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+ new_tail->nxt_desc = 0;
+
+ /* Update list tail with new tail. */
+ dma_list->tail = new_tail;
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hash_init() - Initialize hash operation context.
+ * @ctx: The context to initialize.
+ * @algo: The hashing algorithm to use.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->algo = algo;
+ ctx->idata.msg_len_lo = 0;
+ ctx->idata.msg_len_hi = 0;
+ /* No need to set idata.digest to 0. */
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_digest() - Perform a hashing iteration.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
+ if (rc)
+ return rc;
+
+ /* Update idata and return. */
+ return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+}
+
+/**
+ * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hash_final() - Finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /*
+ * Enable HCU interrupts, so that HCU_DONE will be triggered once the
+ * final hash is computed.
+ */
+ ocs_hcu_done_irq_en(hcu_dev);
+ reinit_completion(&hcu_dev->irq_done);
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_digest() - Compute hash digest.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use.
+ * @data: The input data to process.
+ * @data_len: The length of @data.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len)
+{
+ struct device *dev = hcu_dev->dev;
+ dma_addr_t dma_handle;
+ u32 reg;
+ int rc;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
+ if (rc)
+ return rc;
+
+ dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_handle))
+ return -EIO;
+
+ reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
+
+ ocs_hcu_done_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+
+ writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
+ writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+ writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hmac() - Compute HMAC.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use with HMAC.
+ * @key: The key to use.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @key_len: The length of @key.
+ * @dgst: The buffer where to save the computed HMAC.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ /* Ensure 'key' is not NULL. */
+ if (!key || key_len == 0)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_write_key(hcu_dev, key, key_len);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+
+ /* Clear HW key before processing return code. */
+ ocs_hcu_clear_key(hcu_dev);
+
+ if (rc)
+ return rc;
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_hcu_dev *hcu_dev = dev_id;
+ u32 hcu_irq;
+ u32 dma_irq;
+
+ /* Read and clear the HCU interrupt. */
+ hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
+ writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
+
+ /* Read and clear the HCU DMA interrupt. */
+ dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+
+ /* Check for errors. */
+ if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
+ hcu_dev->irq_err = true;
+ goto complete;
+ }
+
+ /* Check for DONE IRQs. */
+ if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
+ goto complete;
+
+ return IRQ_NONE;
+
+complete:
+ complete(&hcu_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-hcu.h b/drivers/crypto/keembay/ocs-hcu.h
new file mode 100644
index 000000000000..fbbbb92a0592
--- /dev/null
+++ b/drivers/crypto/keembay/ocs-hcu.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+
+#ifndef _CRYPTO_OCS_HCU_H
+#define _CRYPTO_OCS_HCU_H
+
+#define OCS_HCU_DMA_BIT_MASK DMA_BIT_MASK(32)
+
+#define OCS_HCU_HW_KEY_LEN 64
+
+struct ocs_hcu_dma_list;
+
+enum ocs_hcu_algo {
+ OCS_HCU_ALGO_SHA256 = 2,
+ OCS_HCU_ALGO_SHA224 = 3,
+ OCS_HCU_ALGO_SHA384 = 4,
+ OCS_HCU_ALGO_SHA512 = 5,
+ OCS_HCU_ALGO_SM3 = 6,
+};
+
+/**
+ * struct ocs_hcu_dev - OCS HCU device context.
+ * @list: List of device contexts.
+ * @dev: OCS HCU device.
+ * @io_base: Base address of OCS HCU registers.
+ * @engine: Crypto engine for the device.
+ * @irq: IRQ number.
+ * @irq_done: Completion for IRQ.
+ * @irq_err: Flag indicating an IRQ error has happened.
+ */
+struct ocs_hcu_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *io_base;
+ struct crypto_engine *engine;
+ int irq;
+ struct completion irq_done;
+ bool irq_err;
+};
+
+/**
+ * struct ocs_hcu_idata - Intermediate data generated by the HCU.
+ * @msg_len_lo: Length of data the HCU has operated on in bits, low 32b.
+ * @msg_len_hi: Length of data the HCU has operated on in bits, high 32b.
+ * @digest: The digest read from the HCU. If the HCU is terminated, it will
+ * contain the actual hash digest. Otherwise it is the intermediate
+ * state.
+ */
+struct ocs_hcu_idata {
+ u32 msg_len_lo;
+ u32 msg_len_hi;
+ u8 digest[SHA512_DIGEST_SIZE];
+};
+
+/**
+ * struct ocs_hcu_hash_ctx - Context for OCS HCU hashing operation.
+ * @algo: The hashing algorithm being used.
+ * @idata: The current intermediate data.
+ */
+struct ocs_hcu_hash_ctx {
+ enum ocs_hcu_algo algo;
+ struct ocs_hcu_idata idata;
+};
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id);
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents);
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len);
+
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo);
+
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len);
+
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+#endif /* _CRYPTO_OCS_HCU_H */
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 13063384f958..9125199f1702 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -35,3 +35,18 @@ config CRYPTO_DEV_OCTEONTX_CPT
To compile this driver as module, choose M here:
the modules will be called octeontx-cpt and octeontx-cptvf
+
+config CRYPTO_DEV_OCTEONTX2_CPT
+ tristate "Marvell OcteonTX2 CPT driver"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ depends on NET_VENDOR_MARVELL
+ select OCTEONTX2_MBOX
+ select CRYPTO_DEV_MARVELL
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index 6c6a1519b0f1..39db6d9c0aaf 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2/
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 06211858bf2e..f14aac532f53 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
engine->pool = of_gen_pool_get(cesa->dev->of_node,
"marvell,crypto-srams", idx);
if (engine->pool) {
- engine->sram = gen_pool_dma_alloc(engine->pool,
- cesa->sram_size,
- &engine->sram_dma);
- if (engine->sram)
+ engine->sram_pool = gen_pool_dma_alloc(engine->pool,
+ cesa->sram_size,
+ &engine->sram_dma);
+ if (engine->sram_pool)
return 0;
engine->pool = NULL;
@@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_engine *engine = &cesa->engines[idx];
if (engine->pool)
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
cesa->sram_size);
else
dma_unmap_resource(cesa->dev, engine->sram_dma,
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index fabfaaccca87..c1007f2ba79c 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
__le32 byte_cnt;
union {
__le32 src;
- dma_addr_t src_dma;
+ u32 src_dma;
};
union {
__le32 dst;
- dma_addr_t dst_dma;
+ u32 dst_dma;
};
__le32 next_dma;
@@ -428,6 +428,7 @@ struct mv_cesa_dev {
* @id: engine id
* @regs: engine registers
* @sram: SRAM memory region
+ * @sram_pool: SRAM memory region from pool
* @sram_dma: DMA address of the SRAM memory region
* @lock: engine lock
* @req: current crypto request
@@ -448,7 +449,10 @@ struct mv_cesa_dev {
struct mv_cesa_engine {
int id;
void __iomem *regs;
- void __iomem *sram;
+ union {
+ void __iomem *sram;
+ void *sram_pool;
+ };
dma_addr_t sram_dma;
spinlock_t lock;
struct crypto_async_request *req;
@@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags);
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram);
+
+static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ true);
+}
+
+static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ false);
+}
+
/* Algorithm definitions */
extern struct ahash_alg mv_md5_alg;
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b4a6ff9dd6d5..b739d3b873dc 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
- len = sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- len, sreq->offset);
+ len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET, len,
+ sreq->offset);
sreq->size = len;
mv_cesa_set_crypt_op_len(&sreq->op, len);
/* FIXME: only update enc_len field */
if (!sreq->skip_ctx) {
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
sreq->skip_ctx = true;
- } else {
+ } else if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
+ else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
- }
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
@@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
- len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- sreq->size, sreq->offset);
+ len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
+ CESA_SA_DATA_SRAM_OFFSET, sreq->size,
+ sreq->offset);
sreq->offset += len;
if (sreq->offset < req->cryptlen)
@@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
basereq = &creq->base;
memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
- } else {
+ } else if (engine->pool)
+ memcpy(skreq->iv,
+ engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ else
memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
- }
}
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 8cf9fd518d86..c72b0672fc71 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
int i;
mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
+ else
+ memcpy_toio(engine->sram, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
@@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
engine->regs + CESA_IVDIG(i));
}
- if (creq->cache_ptr)
- memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, creq->cache_ptr);
+ if (creq->cache_ptr) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ else
+ memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ }
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
@@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
}
if (len - creq->cache_ptr)
- sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET +
- creq->cache_ptr,
- len - creq->cache_ptr,
- sreq->offset);
+ sreq->offset += mv_cesa_sg_copy_to_sram(
+ engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
+ len - creq->cache_ptr, sreq->offset);
op = &creq->op_tmpl;
@@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
len &= CESA_HASH_BLOCK_SIZE_MSK;
new_cache_ptr = 64 - trailerlen;
- memcpy_fromio(creq->cache,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET + len,
- new_cache_ptr);
+ if (engine->pool)
+ memcpy(creq->cache,
+ engine->sram_pool +
+ CESA_SA_DATA_SRAM_OFFSET + len,
+ new_cache_ptr);
+ else
+ memcpy_fromio(creq->cache,
+ engine->sram +
+ CESA_SA_DATA_SRAM_OFFSET +
+ len,
+ new_cache_ptr);
} else {
i = mv_cesa_ahash_pad_req(creq, creq->cache);
len += i;
- memcpy_toio(engine->sram + len +
- CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, i);
+ if (engine->pool)
+ memcpy(engine->sram_pool + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
+ else
+ memcpy_toio(engine->sram + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
@@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
/* FIXME: only update enc_len field */
- memcpy_toio(engine->sram, op, sizeof(*op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, op, sizeof(*op));
+ else
+ memcpy_toio(engine->sram, op, sizeof(*op));
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 5d9c48fb72b2..f0b5537038c2 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -177,7 +177,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
/*
* Save the last request in error to engine->req, so that the core
- * knows which request was fautly
+ * knows which request was faulty
*/
if (res) {
spin_lock_bh(&engine->lock);
@@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
return 0;
}
+
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_sram)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ if (!sg_miter_skip(&miter, skip))
+ return 0;
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_sram) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + sram_off + offset,
+ miter.addr, len);
+ else
+ memcpy_toio(engine->sram + sram_off + offset,
+ miter.addr, len);
+ } else {
+ if (engine->pool)
+ memcpy(miter.addr,
+ engine->sram_pool + sram_off + offset,
+ len);
+ else
+ memcpy_fromio(miter.addr,
+ engine->sram + sram_off + offset,
+ len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+
+ return offset;
+}
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..b9c6201019e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
+
+octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
+octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
new file mode 100644
index 000000000000..3518fac29834
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_COMMON_H
+#define __OTX2_CPT_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include "otx2_cpt_hw_types.h"
+#include "rvu.h"
+#include "mbox.h"
+
+#define OTX2_CPT_MAX_VFS_NUM 128
+#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
+ (((blk) << 20) | ((slot) << 12) | (offs))
+#define OTX2_CPT_RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
+#define OTX2_CPT_NAME_LENGTH 64
+#define OTX2_CPT_DMA_MINALIGN 128
+
+#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
+
+enum otx2_cpt_eng_type {
+ OTX2_CPT_AE_TYPES = 1,
+ OTX2_CPT_SE_TYPES = 2,
+ OTX2_CPT_IE_TYPES = 3,
+ OTX2_CPT_MAX_ENG_TYPES,
+};
+
+/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
+#define MBOX_MSG_GET_CAPS 0xBFD
+#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
+
+/*
+ * Message request and response to get engine group number
+ * which has attached a given type of engines (SE, AE, IE)
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_egrp_num_msg {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+};
+
+struct otx2_cpt_egrp_num_rsp {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+ u8 eng_grp_num;
+};
+
+/*
+ * Message request and response to get kernel crypto limits
+ * This messages are only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_kvf_limits_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_kvf_limits_rsp {
+ struct mbox_msghdr hdr;
+ u8 kvf_limits;
+};
+
+/* CPT HW capabilities */
+union otx2_cpt_eng_caps {
+ u64 u;
+ struct {
+ u64 reserved_0_4:5;
+ u64 mul:1;
+ u64 sha1_sha2:1;
+ u64 chacha20:1;
+ u64 zuc_snow3g:1;
+ u64 sha3:1;
+ u64 aes:1;
+ u64 kasumi:1;
+ u64 des:1;
+ u64 crc:1;
+ u64 reserved_14_63:50;
+ };
+};
+
+/*
+ * Message request and response to get HW capabilities for each
+ * engine type (SE, IE, AE).
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_caps_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_caps_rsp {
+ struct mbox_msghdr hdr;
+ u16 cpt_pf_drv_version;
+ u8 cpt_revision;
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+};
+
+static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs, u64 val)
+{
+ writeq_relaxed(val, reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs)
+{
+ return readq_relaxed(reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ struct pci_dev *pdev);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
+ struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val);
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+struct otx2_cptlfs_info;
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
new file mode 100644
index 000000000000..ecafc42f37a2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_HW_TYPES_H
+#define __OTX2_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
+#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+
+/* Mailbox interrupts offset */
+#define OTX2_CPT_PF_MBOX_INT 6
+#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+
+/* Maximum supported microcode groups */
+#define OTX2_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX2_CPT_INST_SIZE 64
+/*
+ * CPT VF MSIX vectors and their offsets
+ */
+#define OTX2_CPT_VF_MSIX_VECTORS 1
+#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+
+/* CPT LF MSIX vectors */
+#define OTX2_CPT_LF_MSIX_VECTORS 2
+
+/* OcteonTX2 CPT PF registers */
+#define OTX2_CPT_PF_CONSTANTS (0x0)
+#define OTX2_CPT_PF_RESET (0x100)
+#define OTX2_CPT_PF_DIAG (0x120)
+#define OTX2_CPT_PF_BIST_STATUS (0x160)
+#define OTX2_CPT_PF_ECC0_CTL (0x200)
+#define OTX2_CPT_PF_ECC0_FLIP (0x210)
+#define OTX2_CPT_PF_ECC0_INT (0x220)
+#define OTX2_CPT_PF_ECC0_INT_W1S (0x230)
+#define OTX2_CPT_PF_ECC0_ENA_W1S (0x240)
+#define OTX2_CPT_PF_ECC0_ENA_W1C (0x250)
+#define OTX2_CPT_PF_MBOX_INTX(b) (0x400 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_INT_W1SX(b) (0x420 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1CX(b) (0x440 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1SX(b) (0x460 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INT (0x500)
+#define OTX2_CPT_PF_EXEC_INT_W1S (0x520)
+#define OTX2_CPT_PF_EXEC_ENA_W1C (0x540)
+#define OTX2_CPT_PF_EXEC_ENA_W1S (0x560)
+#define OTX2_CPT_PF_GX_EN(b) (0x600 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INFO (0x700)
+#define OTX2_CPT_PF_EXEC_BUSY (0x800)
+#define OTX2_CPT_PF_EXEC_INFO0 (0x900)
+#define OTX2_CPT_PF_EXEC_INFO1 (0x910)
+#define OTX2_CPT_PF_INST_REQ_PC (0x10000)
+#define OTX2_CPT_PF_INST_LATENCY_PC (0x10020)
+#define OTX2_CPT_PF_RD_REQ_PC (0x10040)
+#define OTX2_CPT_PF_RD_LATENCY_PC (0x10060)
+#define OTX2_CPT_PF_RD_UC_PC (0x10080)
+#define OTX2_CPT_PF_ACTIVE_CYCLES_PC (0x10100)
+#define OTX2_CPT_PF_EXE_CTL (0x4000000)
+#define OTX2_CPT_PF_EXE_STATUS (0x4000008)
+#define OTX2_CPT_PF_EXE_CLK (0x4000010)
+#define OTX2_CPT_PF_EXE_DBG_CTL (0x4000018)
+#define OTX2_CPT_PF_EXE_DBG_DATA (0x4000020)
+#define OTX2_CPT_PF_EXE_BIST_STATUS (0x4000028)
+#define OTX2_CPT_PF_EXE_REQ_TIMER (0x4000030)
+#define OTX2_CPT_PF_EXE_MEM_CTL (0x4000038)
+#define OTX2_CPT_PF_EXE_PERF_CTL (0x4001000)
+#define OTX2_CPT_PF_EXE_DBG_CNTX(b) (0x4001100 | (b) << 3)
+#define OTX2_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180)
+#define OTX2_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200 | (b) << 3)
+#define OTX2_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240 | (b) << 3)
+#define OTX2_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000 | (b) << 3)
+#define OTX2_CPT_PF_QX_CTL(b) (0x8000000 | (b) << 20)
+#define OTX2_CPT_PF_QX_GMCTL(b) (0x8000020 | (b) << 20)
+#define OTX2_CPT_PF_QX_CTL2(b) (0x8000100 | (b) << 20)
+#define OTX2_CPT_PF_VFX_MBOXX(b, c) (0x8001000 | (b) << 20 | \
+ (c) << 8)
+
+/* OcteonTX2 CPT LF registers */
+#define OTX2_CPT_LF_CTL (0x10)
+#define OTX2_CPT_LF_DONE_WAIT (0x30)
+#define OTX2_CPT_LF_INPROG (0x40)
+#define OTX2_CPT_LF_DONE (0x50)
+#define OTX2_CPT_LF_DONE_ACK (0x60)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1S (0x90)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1C (0xa0)
+#define OTX2_CPT_LF_MISC_INT (0xb0)
+#define OTX2_CPT_LF_MISC_INT_W1S (0xc0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S (0xd0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C (0xe0)
+#define OTX2_CPT_LF_Q_BASE (0xf0)
+#define OTX2_CPT_LF_Q_SIZE (0x100)
+#define OTX2_CPT_LF_Q_INST_PTR (0x110)
+#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
+#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
+/* LMT LF registers */
+#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
+#define OTX2_CPT_LMT_LF_LMTLINEX(a) (OTX2_CPT_LMT_LFBASE | 0x000 | \
+ (a) << 12)
+/* RVU VF registers */
+#define OTX2_RVU_VF_INT (0x20)
+#define OTX2_RVU_VF_INT_W1S (0x28)
+#define OTX2_RVU_VF_INT_ENA_W1S (0x30)
+#define OTX2_RVU_VF_INT_ENA_W1C (0x38)
+
+/*
+ * Enumeration otx2_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx2_cpt_ucode_comp_code_e {
+ OTX2_CPT_UCC_SUCCESS = 0x00,
+ OTX2_CPT_UCC_INVALID_OPCODE = 0x01,
+
+ /* Scatter gather */
+ OTX2_CPT_UCC_SG_WRITE_LENGTH = 0x02,
+ OTX2_CPT_UCC_SG_LIST = 0x03,
+ OTX2_CPT_UCC_SG_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx2_cpt_comp_e
+ *
+ * OcteonTX2 CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx2_cpt_comp_e {
+ OTX2_CPT_COMP_E_NOTDONE = 0x00,
+ OTX2_CPT_COMP_E_GOOD = 0x01,
+ OTX2_CPT_COMP_E_FAULT = 0x02,
+ OTX2_CPT_COMP_E_HWERR = 0x04,
+ OTX2_CPT_COMP_E_INSTERR = 0x05,
+ OTX2_CPT_COMP_E_LAST_ENTRY = 0x06
+};
+
+/*
+ * Enumeration otx2_cpt_vf_int_vec_e
+ *
+ * OcteonTX2 CPT VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_vf_int_vec_e {
+ OTX2_CPT_VF_INT_VEC_E_MBOX = 0x00
+};
+
+/*
+ * Enumeration otx2_cpt_lf_int_vec_e
+ *
+ * OcteonTX2 CPT LF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_lf_int_vec_e {
+ OTX2_CPT_LF_INT_VEC_E_MISC = 0x00,
+ OTX2_CPT_LF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure otx2_cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx2_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+ /* Word 0 */
+ u64 nixtxl:3;
+ u64 doneint:1;
+ u64 nixtx_addr:60;
+ /* Word 1 */
+ u64 res_addr;
+ /* Word 2 */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_175:4;
+ u64 rvu_pf_func:16;
+ /* Word 3 */
+ u64 qord:1;
+ u64 reserved_194_193:2;
+ u64 wq_ptr:61;
+ /* Word 4 */
+ u64 ei0;
+ /* Word 5 */
+ u64 ei1;
+ /* Word 6 */
+ u64 ei2;
+ /* Word 7 */
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure otx2_cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx2_cpt_res_s {
+ u64 u[2];
+
+ struct {
+ u64 compcode:8;
+ u64 uc_compcode:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+ u64 reserved_64_127;
+ } s;
+};
+
+/*
+ * Register (RVU_PF_BAR0) cpt#_af_constants1
+ *
+ * CPT AF Constants Register
+ * This register contains implementation-related parameters of CPT.
+ */
+union otx2_cptx_af_constants1 {
+ u64 u;
+ struct otx2_cptx_af_constants1_s {
+ u64 se:16;
+ u64 ie:16;
+ u64 ae:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int
+ *
+ * This register contain the per-queue miscellaneous interrupts.
+ *
+ */
+union otx2_cptx_lf_misc_int {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int_ena_w1s
+ *
+ * This register sets interrupt enable bits.
+ *
+ */
+union otx2_cptx_lf_misc_int_ena_w1s {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_ena_w1s_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_ctl
+ *
+ * This register configures the queue.
+ *
+ * When the queue is not execution-quiescent (see CPT_LF_INPROG[EENA,INFLIGHT]),
+ * software must only write this register with [ENA]=0.
+ */
+union otx2_cptx_lf_ctl {
+ u64 u;
+ struct otx2_cptx_lf_ctl_s {
+ u64 ena:1;
+ u64 fc_ena:1;
+ u64 fc_up_crossing:1;
+ u64 reserved_3:1;
+ u64 fc_hyst_bits:4;
+ u64 reserved_8_63:56;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done_wait
+ *
+ * This register specifies the per-queue interrupt coalescing settings.
+ */
+union otx2_cptx_lf_done_wait {
+ u64 u;
+ struct otx2_cptx_lf_done_wait_s {
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done
+ *
+ * This register contain the per-queue instruction done count.
+ */
+union otx2_cptx_lf_done {
+ u64 u;
+ struct otx2_cptx_lf_done_s {
+ u64 done:20;
+ u64 reserved_20_63:44;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_inprog
+ *
+ * These registers contain the per-queue instruction in flight registers.
+ *
+ */
+union otx2_cptx_lf_inprog {
+ u64 u;
+ struct otx2_cptx_lf_inprog_s {
+ u64 inflight:9;
+ u64 reserved_9_15:7;
+ u64 eena:1;
+ u64 grp_drp:1;
+ u64 reserved_18_30:13;
+ u64 grb_partial:1;
+ u64 grb_cnt:8;
+ u64 gwb_cnt:8;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_base
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_BASE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_base {
+ u64 u;
+ struct otx2_cptx_lf_q_base_s {
+ u64 fault:1;
+ u64 reserved_1_6:6;
+ u64 addr:46;
+ u64 reserved_53_63:11;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_size
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_SIZE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_size {
+ u64 u;
+ struct otx2_cptx_lf_q_size_s {
+ u64 size_div40:15;
+ u64 reserved_15_63:49;
+ } s;
+};
+
+/*
+ * RVU_PF_BAR0 - cpt_af_lf_ctl
+ *
+ * This register configures queues. This register should be written only
+ * when the queue is execution-quiescent (see CPT_LF_INPROG[INFLIGHT]).
+ */
+union otx2_cptx_af_lf_ctrl {
+ u64 u;
+ struct otx2_cptx_af_lf_ctrl_s {
+ u64 pri:1;
+ u64 reserved_1_8:8;
+ u64 pf_func_inst:1;
+ u64 cont_err:1;
+ u64 reserved_11_15:5;
+ u64 nixtx_en:1;
+ u64 reserved_17_47:31;
+ u64 grp:8;
+ u64 reserved_56_63:8;
+ } s;
+};
+
+#endif /* __OTX2_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
new file mode 100644
index 000000000000..51cb6404ded7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ struct mbox_msghdr *req;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct ready_msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_READY;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 0;
+ reg_msg->reg_offset = reg;
+ reg_msg->ret_val = val;
+
+ return 0;
+}
+
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 1;
+ reg_msg->reg_offset = reg;
+ reg_msg->val = val;
+
+ return 0;
+}
+
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_attach *req;
+ int ret;
+
+ req = (struct rsrc_attach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_ATTACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->cptlfs = lfs->lfs_num;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (!lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_detach *req;
+ int ret;
+
+ req = (struct rsrc_detach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct mbox_msghdr *req;
+ int ret, i;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msix_offset_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->id = MBOX_MSG_MSIX_OFFSET;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (lfs->lf[i].msix_offset == MSIX_VECTOR_INVALID) {
+ dev_err(&pdev->dev,
+ "Invalid msix offset %d for LF %d\n",
+ lfs->lf[i].msix_offset, i);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
new file mode 100644
index 000000000000..dbb1ee746f4c
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_REQMGR_H
+#define __OTX2_CPT_REQMGR_H
+
+#include "otx2_cpt_common.h"
+
+/* Completion code size and initial value */
+#define OTX2_CPT_COMPLETION_CODE_SIZE 8
+#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX2_CPT_MAX_SG_IN_CNT 50
+#define OTX2_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX2_CPT_DMA_MODE_DIRECT 0
+#define OTX2_CPT_DMA_MODE_SG 1
+
+/* Context source CPTR or DPTR */
+#define OTX2_CPT_FROM_CPTR 0
+#define OTX2_CPT_FROM_DPTR 1
+
+#define OTX2_CPT_MAX_REQ_SIZE 65535
+
+union otx2_cpt_opcode {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx2_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx2_cpt_opcode opcode;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx2_cpt_iq_cmd_word0 {
+ u64 u;
+ struct {
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
+ } s;
+};
+
+union otx2_cpt_iq_cmd_word3 {
+ u64 u;
+ struct {
+ u64 cptr:61;
+ u64 grp:3;
+ } s;
+};
+
+struct otx2_cpt_iq_command {
+ union otx2_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx2_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx2_cpt_pending_entry {
+ void *completion_addr; /* Completion address */
+ void *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx2_cpt_pending_queue {
+ struct otx2_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx2_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx2_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved_6_31:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved_6_31:26;
+#endif
+ } s;
+};
+
+struct otx2_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx2_cptvf_request req;/* Request information (core specific) */
+ union otx2_cpt_ctrl_info ctrl;/* User control information */
+ struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
+ struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 in_cnt; /* Number of input buffers */
+ u8 out_cnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx2_cpt_inst_info {
+ struct otx2_cpt_pending_entry *pentry;
+ struct otx2_cpt_req_info *req;
+ struct pci_dev *pdev;
+ void *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+struct otx2_cpt_sglist_component {
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
+};
+
+static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
+ struct otx2_cpt_inst_info *info)
+{
+ struct otx2_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->out_cnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->in_cnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kfree(info);
+}
+
+struct otx2_cptlf_wqe;
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num);
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+
+#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
new file mode 100644
index 000000000000..823a4571fd67
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+#include "rvu_reg.h"
+
+#define CPT_TIMER_HOLD 0x03F
+#define CPT_COUNT_HOLD 32
+
+static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
+ int time_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.time_wait = time_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.num_wait = num_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
+ int time_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
+}
+
+static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
+}
+
+static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.pri = pri ? 1 : 0;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
+ int eng_grps_mask)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.grp = eng_grps_mask;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
+ int eng_grp_mask, int pri)
+{
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ ret = cptlf_set_pri(&lfs->lf[slot], pri);
+ if (ret)
+ return ret;
+
+ ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+
+ /* Set instruction queues base addresses */
+ otx2_cptlf_set_iqueues_base_addr(lfs);
+
+ /* Set instruction queues sizes */
+ otx2_cptlf_set_iqueues_size(lfs);
+
+ /* Set done interrupts time wait */
+ cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
+
+ /* Set done interrupts num wait */
+ cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
+
+ /* Enable instruction queues */
+ otx2_cptlf_enable_iqueues(lfs);
+}
+
+static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+}
+
+static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
+{
+ union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
+ u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
+ OTX2_CPT_LF_MISC_INT_ENA_W1C;
+ int slot;
+
+ irq_misc.s.fault = 0x1;
+ irq_misc.s.hwerr = 0x1;
+ irq_misc.s.irde = 0x1;
+ irq_misc.s.nqerr = 0x1;
+ irq_misc.s.nwrp = 0x1;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
+ irq_misc.u);
+}
+
+static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ /* Enable done interrupts */
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
+ /* Enable Misc interrupts */
+ cptlf_set_misc_intrs(lfs, true);
+}
+
+static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
+ cptlf_set_misc_intrs(lfs, false);
+}
+
+static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_done irq_cnt;
+
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE);
+ return irq_cnt.s.done;
+}
+
+static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
+{
+ union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
+ struct otx2_cptlf_info *lf = arg;
+ struct device *dev;
+
+ dev = &lf->lfs->pdev->dev;
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT);
+ irq_misc_ack.u = 0x0;
+
+ if (irq_misc.s.fault) {
+ dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.fault = 0x1;
+
+ } else if (irq_misc.s.hwerr) {
+ dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
+ lf->slot);
+ irq_misc_ack.s.hwerr = 0x1;
+
+ } else if (irq_misc.s.nwrp) {
+ dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.nwrp = 0x1;
+
+ } else if (irq_misc.s.irde) {
+ dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
+ irq_misc_ack.s.irde = 0x1;
+
+ } else if (irq_misc.s.nqerr) {
+ dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
+ irq_misc_ack.s.nqerr = 0x1;
+
+ } else {
+ dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Acknowledge interrupts */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+ struct otx2_cptlf_info *lf = arg;
+ int irq_cnt;
+
+ /* Read the number of completed requests */
+ irq_cnt = cptlf_read_done_cnt(lf);
+ if (irq_cnt) {
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ /* Acknowledge the number of completed requests */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_ACK, irq_cnt);
+
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+ if (unlikely(!lf->wqe)) {
+ dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
+ lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Schedule processing of completed requests */
+ tasklet_hi_schedule(&lf->wqe->work);
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, offs, vector;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ if (!lfs->lf[i].is_irq_reg[offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[offs] = false;
+ }
+ }
+ cptlf_disable_intrs(lfs);
+}
+
+static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ int lf_num, int irq_offset,
+ irq_handler_t handler)
+{
+ int ret, vector;
+
+ vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
+ irq_offset);
+ ret = request_irq(vector, handler, 0,
+ lfs->lf[lf_num].irq_name[irq_offset],
+ &lfs->lf[lf_num]);
+ if (ret)
+ return ret;
+
+ lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
+
+ return ret;
+}
+
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int irq_offs, ret, i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_misc_intr_handler);
+ if (ret)
+ goto free_irq;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
+ i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_done_intr_handler);
+ if (ret)
+ goto free_irq;
+ }
+ cptlf_enable_intrs(lfs);
+ return 0;
+
+free_irq:
+ otx2_cptlf_unregister_interrupts(lfs);
+ return ret;
+}
+
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ int slot, offs;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
+ irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lfs->lf[slot].msix_offset +
+ offs), NULL);
+ free_cpumask_var(lfs->lf[slot].affinity_mask);
+ }
+}
+
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_info *lf = lfs->lf;
+ int slot, offs, ret;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
+ dev_err(&lfs->pdev->dev,
+ "cpumask allocation failed for LF %d", slot);
+ ret = -ENOMEM;
+ goto free_affinity_mask;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(slot,
+ dev_to_node(&lfs->pdev->dev)),
+ lf[slot].affinity_mask);
+
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lf[slot].msix_offset + offs),
+ lf[slot].affinity_mask);
+ if (ret)
+ goto free_affinity_mask;
+ }
+ }
+ return 0;
+
+free_affinity_mask:
+ otx2_cptlf_free_irqs_affinity(lfs);
+ return ret;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ int lfs_num)
+{
+ int slot, ret;
+
+ if (!lfs->pdev || !lfs->reg_base)
+ return -EINVAL;
+
+ lfs->lfs_num = lfs_num;
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lfs->lf[slot].lfs = lfs;
+ lfs->lf[slot].slot = slot;
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ OTX2_CPT_LMT_LF_LMTLINEX(0));
+ lfs->lf[slot].ioreg = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_NQX(0));
+ }
+ /* Send request to attach LFs */
+ ret = otx2_cpt_attach_rscrs_msg(lfs);
+ if (ret)
+ goto clear_lfs_num;
+
+ ret = otx2_cpt_alloc_instruction_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating instruction queues failed\n");
+ goto detach_rsrcs;
+ }
+ cptlf_hw_init(lfs);
+ /*
+ * Allow each LF to execute requests destined to any of 8 engine
+ * groups and set queue priority of each LF to high
+ */
+ ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
+ if (ret)
+ goto free_iq;
+
+ return 0;
+
+free_iq:
+ otx2_cpt_free_instruction_queues(lfs);
+ cptlf_hw_cleanup(lfs);
+detach_rsrcs:
+ otx2_cpt_detach_rsrcs_msg(lfs);
+clear_lfs_num:
+ lfs->lfs_num = 0;
+ return ret;
+}
+
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ lfs->lfs_num = 0;
+ /* Cleanup LFs hardware side */
+ cptlf_hw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
new file mode 100644
index 000000000000..314e97354100
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+#ifndef __OTX2_CPTLF_H
+#define __OTX2_CPTLF_H
+
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <mbox.h>
+#include <rvu.h>
+#include "otx2_cpt_common.h"
+#include "otx2_cpt_reqmgr.h"
+
+/*
+ * CPT instruction and pending queues user requested length in CPT_INST_S msgs
+ */
+#define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200
+
+/*
+ * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S
+ * messages.
+ */
+#define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40)
+
+/*
+ * CPT instruction and pending queues length in CPT_INST_S messages
+ */
+#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+
+/* CPT instruction queue length in bytes */
+#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
+ OTX2_CPT_INST_SIZE)
+
+/* CPT instruction group queue length in bytes */
+#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+
+/* CPT FC length in bytes */
+#define OTX2_CPT_Q_FC_LEN 128
+
+/* CPT instruction queue alignment */
+#define OTX2_CPT_INST_Q_ALIGNMENT 128
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF
+
+/* Maximum LFs supported in OcteonTX2 for CPT */
+#define OTX2_CPT_MAX_LFS_NUM 64
+
+/* Queue priority */
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+#define OTX2_CPT_QUEUE_LOW_PRIO 0x0
+
+enum otx2_cptlf_state {
+ OTX2_CPTLF_IN_RESET,
+ OTX2_CPTLF_STARTED,
+};
+
+struct otx2_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+struct otx2_cptlfs_info;
+struct otx2_cptlf_wqe {
+ struct tasklet_struct work;
+ struct otx2_cptlfs_info *lfs;
+ u8 lf_num;
+};
+
+struct otx2_cptlf_info {
+ struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */
+ void __iomem *lmtline; /* Address of LMTLINE */
+ void __iomem *ioreg; /* LMTLINE send register */
+ int msix_offset; /* MSI-X interrupts offset */
+ cpumask_var_t affinity_mask; /* IRQs affinity mask */
+ u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */
+ u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */
+ u8 slot; /* Slot number of this LF */
+
+ struct otx2_cpt_inst_queue iqueue;/* Instruction queue */
+ struct otx2_cpt_pending_queue pqueue; /* Pending queue */
+ struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
+};
+
+struct otx2_cptlfs_info {
+ /* Registers start address of VF/PF LFs are attached to */
+ void __iomem *reg_base;
+ struct pci_dev *pdev; /* Device LFs are attached to */
+ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
+ struct otx2_mbox *mbox;
+ u8 are_lfs_attached; /* Whether CPT LFs are attached */
+ u8 lfs_num; /* Number of CPT LFs */
+ u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kvf_limits; /* Kernel crypto limits */
+ atomic_t state; /* LF's state. started/reset */
+};
+
+static inline void otx2_cpt_free_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ if (iq->real_vaddr)
+ dma_free_coherent(&lfs->pdev->dev,
+ iq->size,
+ iq->real_vaddr,
+ iq->real_dma_addr);
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+ }
+}
+
+static inline int otx2_cpt_alloc_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int ret = 0, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ iq->size = OTX2_CPT_INST_QLEN_BYTES +
+ OTX2_CPT_Q_FC_LEN +
+ OTX2_CPT_INST_GRP_QLEN_BYTES +
+ OTX2_CPT_INST_Q_ALIGNMENT;
+ iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr,
+ OTX2_CPT_INST_Q_ALIGNMENT);
+ }
+ return 0;
+
+error:
+ otx2_cpt_free_instruction_queues(lfs);
+ return ret;
+}
+
+static inline void otx2_cptlf_set_iqueues_base_addr(
+ struct otx2_cptlfs_info *lfs)
+{
+ union otx2_cptx_lf_q_base lf_q_base;
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_Q_BASE, lf_q_base.u);
+ }
+}
+
+static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
+
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
+}
+
+static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
+ union otx2_cptx_lf_inprog lf_inprog;
+ int timeout = 20;
+
+ /* Disable instructions enqueuing */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+
+ /* Wait for instruction queue to become empty */
+ do {
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_INPROG);
+ if (!lf_inprog.s.inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ dev_err(&lf->lfs->pdev->dev,
+ "Error LF %d is still busy.\n", lf->slot);
+ break;
+ }
+
+ } while (1);
+
+ /*
+ * Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ lf_inprog.s.eena = 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_ctl lf_ctl;
+
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL);
+
+ /* Set iqueue's enqueuing */
+ lf_ctl.s.ena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_enq(lf, true);
+}
+
+static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_inprog lf_inprog;
+
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG);
+
+ /* Set iqueue's execution */
+ lf_inprog.s.eena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, true);
+}
+
+static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, false);
+}
+
+static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
+ otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
+ }
+}
+
+static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst,
+ struct otx2_cpt_iq_command *iq_cmd,
+ u64 comp_baddr)
+{
+ cptinst->u[0] = 0x0;
+ cptinst->s.doneint = true;
+ cptinst->s.res_addr = comp_baddr;
+ cptinst->u[2] = 0x0;
+ cptinst->u[3] = 0x0;
+ cptinst->s.ei0 = iq_cmd->cmd.u;
+ cptinst->s.ei1 = iq_cmd->dptr;
+ cptinst->s.ei2 = iq_cmd->rptr;
+ cptinst->s.ei3 = iq_cmd->cptr.u;
+}
+
+/*
+ * On OcteonTX2 platform the parameter insts_num is used as a count of
+ * instructions to be enqueued. The valid values for insts_num are:
+ * 1 - 1 CPT instruction will be enqueued during LMTST operation
+ * 2 - 2 CPT instructions will be enqueued during LMTST operation
+ */
+static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,
+ u32 insts_num, struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ long ret;
+
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+
+ /*
+ * LDEOR initiates atomic transfer to I/O device
+ * The following will cause the LMTST to fail (the LDEOR
+ * returns zero):
+ * - No stores have been performed to the LMTLINE since it was
+ * last invalidated.
+ * - The bytes which have been stored to LMTLINE since it was
+ * last invalidated form a pattern that is non-contiguous, does
+ * not start at byte 0, or does not end on a 8-byte boundary.
+ * (i.e.comprises a formation of other than 1–16 8-byte
+ * words.)
+ *
+ * These rules are designed such that an operating system
+ * context switch or hypervisor guest switch need have no
+ * knowledge of the LMTST operations; the switch code does not
+ * need to store to LMTCANCEL. Also note as LMTLINE data cannot
+ * be read, there is no information leakage between processes.
+ */
+ ret = otx2_lmt_flush(lf->ioreg);
+
+ } while (!ret);
+}
+
+static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
+{
+ return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
+ int lfs_num);
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
new file mode 100644
index 000000000000..8c899ad531a5
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_H
+#define __OTX2_CPTPF_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptpf_dev;
+struct otx2_cptvf_info {
+ struct otx2_cptpf_dev *cptpf; /* PF pointer this VF belongs to */
+ struct work_struct vfpf_mbox_work;
+ struct pci_dev *vf_dev;
+ int vf_id;
+ int intr_idx;
+};
+
+struct cptpf_flr_work {
+ struct work_struct work;
+ struct otx2_cptpf_dev *pf;
+};
+
+struct otx2_cptpf_dev {
+ void __iomem *reg_base; /* CPT PF registers start address */
+ void __iomem *afpf_mbox_base; /* PF-AF mbox start address */
+ void __iomem *vfpf_mbox_base; /* VF-PF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
+ struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ /* HW capabilities for each engine type */
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+ bool is_eng_caps_discovered;
+
+ /* AF <=> PF mbox */
+ struct otx2_mbox afpf_mbox;
+ struct work_struct afpf_mbox_work;
+ struct workqueue_struct *afpf_mbox_wq;
+
+ /* VF <=> PF mbox */
+ struct otx2_mbox vfpf_mbox;
+ struct workqueue_struct *vfpf_mbox_wq;
+
+ struct workqueue_struct *flr_wq;
+ struct cptpf_flr_work *flr_work;
+
+ u8 pf_id; /* RVU PF number */
+ u8 max_vfs; /* Maximum number of VFs supported by CPT */
+ u8 enabled_vfs; /* Number of enabled VFs */
+ u8 kvf_limits; /* Kernel crypto limits */
+};
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+
+#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
new file mode 100644
index 000000000000..5277e04badd9
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/firmware.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+#define OTX2_CPT_DRV_NAME "octeontx2-cpt"
+#define OTX2_CPT_DRV_STRING "Marvell OcteonTX2 CPT Physical Function Driver"
+
+static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int ena_bits;
+
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
+
+ /* Enable VF interrupts for VFs from 0 to 63 */
+ ena_bits = ((num_vfs - 1) % 64);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (num_vfs > 64) {
+ /* Enable VF interrupts for VFs from 64 to 127 */
+ ena_bits = num_vfs - 64 - 1;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF-PF interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ /* Enable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
+}
+
+static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_flr_wq_handler(struct work_struct *work)
+{
+ struct cptpf_flr_work *flr_work;
+ struct otx2_cptpf_dev *pf;
+ struct mbox_msghdr *req;
+ struct otx2_mbox *mbox;
+ int vf, reg = 0;
+
+ flr_work = container_of(work, struct cptpf_flr_work, work);
+ pf = flr_work->pf;
+ mbox = &pf->afpf_mbox;
+
+ vf = flr_work - pf->flr_work;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req)
+ return;
+
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->id = MBOX_MSG_VF_FLR;
+ req->pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
+{
+ int reg, dev, vf, start_vf, num_reg = 1;
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
+}
+
+static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, vector;
+
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ /* Register VF-PF mailbox interrupt handler */
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR0 irq\n");
+ goto free_mbox0_irq;
+ }
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
+ "CPTVFPF Mbox1", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox1 irq\n");
+ goto free_flr0_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_mbox1_irq;
+ }
+ }
+ cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_enable_vf_flr_intrs(cptpf);
+
+ return 0;
+
+free_mbox1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+free_flr0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+free_mbox0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+ return ret;
+}
+
+static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ kfree(pf->flr_work);
+}
+
+static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ int vf;
+
+ cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
+ if (!cptpf->flr_wq)
+ return -ENOMEM;
+
+ cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
+ GFP_KERNEL);
+ if (!cptpf->flr_work)
+ goto destroy_wq;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ cptpf->flr_work[vf].pf = cptpf;
+ INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
+ }
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(cptpf->flr_wq);
+ return -ENOMEM;
+}
+
+static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ u64 vfpf_mbox_base;
+ int err, i;
+
+ cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->vfpf_mbox_wq)
+ return -ENOMEM;
+
+ /* Map VF-PF mailbox memory */
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+ if (!vfpf_mbox_base) {
+ dev_err(dev, "VF-PF mailbox address not configured\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
+ MBOX_SIZE * cptpf->max_vfs);
+ if (!cptpf->vfpf_mbox_base) {
+ dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
+ num_vfs);
+ if (err)
+ goto free_wqe;
+
+ for (i = 0; i < num_vfs; i++) {
+ cptpf->vf[i].vf_id = i;
+ cptpf->vf[i].cptpf = cptpf;
+ cptpf->vf[i].intr_idx = i % 64;
+ INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
+ otx2_cptpf_vfpf_mbox_handler);
+ }
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->vfpf_mbox);
+}
+
+static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ /* Disable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
+ 0x1ULL);
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+}
+
+static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ /* Register AF-PF mailbox interrupt handler */
+ ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
+ "CPTAFPF Mbox", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFAF mbox irq\n");
+ return ret;
+ }
+ /* Clear interrupt if any, to avoid spurious interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+ /* Enable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
+ 0x1ULL);
+
+ ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret) {
+ dev_warn(dev,
+ "AF not responding to mailbox, deferring probe\n");
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
+{
+ int err;
+
+ cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->afpf_mbox_wq)
+ return -ENOMEM;
+
+ err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ if (err)
+ goto error;
+
+ INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ return 0;
+
+error:
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
+}
+
+static ssize_t kvf_limits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->kvf_limits);
+}
+
+static ssize_t kvf_limits_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ int lfs_num;
+
+ if (kstrtoint(buf, 0, &lfs_num)) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ if (lfs_num < 1 || lfs_num > num_online_cpus()) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ cptpf->kvf_limits = lfs_num;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(kvf_limits);
+static struct attribute *cptpf_attrs[] = {
+ &dev_attr_kvf_limits.attr,
+ NULL
+};
+
+static const struct attribute_group cptpf_sysfs_group = {
+ .attrs = cptpf_attrs,
+};
+
+static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
+{
+ u64 rev;
+
+ rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /*
+ * Check if AF has setup revision for RVUM block, otherwise
+ * driver probe should be deferred until AF driver comes up
+ */
+ if (!rev) {
+ dev_warn(&cptpf->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int timeout = 10, ret;
+ u64 reg = 0;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, 0x1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, &reg);
+ if (ret)
+ return ret;
+
+ if (!((reg >> 63) & 0x1))
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+ } while (1);
+
+ return ret;
+}
+
+static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
+{
+ union otx2_cptx_af_constants1 af_cnsts1 = {0};
+ int ret = 0;
+
+ /* Reset the CPT PF device */
+ ret = cptpf_device_reset(cptpf);
+ if (ret)
+ return ret;
+
+ /* Get number of SE, IE and AE engines */
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
+ cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
+ cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
+
+ /* Disable all cores */
+ ret = otx2_cpt_disable_all_cores(cptpf);
+
+ return ret;
+}
+
+static int cptpf_sriov_disable(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(pdev);
+
+ if (!num_vfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf_flr_wq_destroy(cptpf);
+ cptpf_vfpf_mbox_destroy(cptpf);
+ module_put(THIS_MODULE);
+ cptpf->enabled_vfs = 0;
+
+ return 0;
+}
+
+static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int ret;
+
+ /* Initialize VF<=>PF mailbox */
+ ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = cptpf_flr_wq_init(cptpf, num_vfs);
+ if (ret)
+ goto destroy_mbox;
+ /* Register VF<=>PF mailbox interrupt */
+ ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
+ if (ret)
+ goto destroy_flr;
+
+ /* Get CPT HW capabilities using LOAD_FVC operation. */
+ ret = otx2_cpt_discover_eng_capabilities(cptpf);
+ if (ret)
+ goto disable_intr;
+
+ ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
+ if (ret)
+ goto disable_intr;
+
+ cptpf->enabled_vfs = num_vfs;
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ goto disable_intr;
+
+ dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
+
+ try_module_get(THIS_MODULE);
+ return num_vfs;
+
+disable_intr:
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf->enabled_vfs = 0;
+destroy_flr:
+ cptpf_flr_wq_destroy(cptpf);
+destroy_mbox:
+ cptpf_vfpf_mbox_destroy(cptpf);
+ return ret;
+}
+
+static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs > 0) {
+ return cptpf_sriov_enable(pdev, num_vfs);
+ } else {
+ return cptpf_sriov_disable(pdev);
+ }
+}
+
+static int otx2_cptpf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptpf_dev *cptpf;
+ int err;
+
+ cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
+ if (!cptpf)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map PF's configuration registers */
+ err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPT_DRV_NAME);
+ if (err) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptpf);
+ cptpf->pdev = pdev;
+
+ cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ /* Check if AF driver is up, otherwise defer probe */
+ err = cpt_is_pf_usable(cptpf);
+ if (err)
+ goto clear_drvdata;
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto clear_drvdata;
+ }
+ err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for %d msix vectors failed\n",
+ RVU_PF_INT_VEC_CNT);
+ goto clear_drvdata;
+ }
+ /* Initialize AF-PF mailbox */
+ err = cptpf_afpf_mbox_init(cptpf);
+ if (err)
+ goto clear_drvdata;
+ /* Register mailbox interrupt */
+ err = cptpf_register_afpf_mbox_intr(cptpf);
+ if (err)
+ goto destroy_afpf_mbox;
+
+ cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
+ /* Initialize engine groups */
+ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
+ if (err)
+ goto unregister_intr;
+
+ err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
+ if (err)
+ goto cleanup_eng_grps;
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+unregister_intr:
+ cptpf_disable_afpf_mbox_intr(cptpf);
+destroy_afpf_mbox:
+ cptpf_afpf_mbox_destroy(cptpf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void otx2_cptpf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+
+ if (!cptpf)
+ return;
+
+ cptpf_sriov_disable(pdev);
+ /* Delete sysfs entry created for kernel VF limits */
+ sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
+ /* Cleanup engine groups */
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+ /* Disable AF-PF mailbox interrupt */
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ /* Destroy AF-PF mbox */
+ cptpf_afpf_mbox_destroy(cptpf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cpt_pci_driver = {
+ .name = OTX2_CPT_DRV_NAME,
+ .id_table = otx2_cpt_id_table,
+ .probe = otx2_cptpf_probe,
+ .remove = otx2_cptpf_remove,
+ .sriov_configure = otx2_cptpf_sriov_configure
+};
+
+module_pci_driver(otx2_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
new file mode 100644
index 000000000000..186f1c1190c1
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+/*
+ * CPT PF driver version, It will be incremented by 1 for every feature
+ * addition in CPT mailbox messages.
+ */
+#define OTX2_CPT_PF_DRV_VERSION 0x1
+
+static int forward_to_af(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct mbox_msghdr *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_caps_rsp *rsp;
+
+ rsp = (struct otx2_cpt_caps_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
+ sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_CAPS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
+ rsp->cpt_revision = cptpf->pdev->revision;
+ memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
+
+ return 0;
+}
+
+static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_egrp_num_msg *grp_req;
+ struct otx2_cpt_egrp_num_rsp *rsp;
+
+ grp_req = (struct otx2_cpt_egrp_num_msg *)req;
+ rsp = (struct otx2_cpt_egrp_num_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->eng_type = grp_req->eng_type;
+ rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ grp_req->eng_type);
+
+ return 0;
+}
+
+static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_kvf_limits_rsp *rsp;
+
+ rsp = (struct otx2_cpt_kvf_limits_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->kvf_limits = cptpf->kvf_limits;
+
+ return 0;
+}
+
+static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ int err = 0;
+
+ /* Check if msg is valid, if not reply with an invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ switch (req->id) {
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ err = handle_msg_get_eng_grp_num(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_CAPS:
+ err = handle_msg_get_caps(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ err = handle_msg_kvf_limits(cptpf, vf, req);
+ break;
+ default:
+ err = forward_to_af(cptpf, vf, req, size);
+ break;
+ }
+ return err;
+
+inval_msg:
+ otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
+ otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
+ return err;
+}
+
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_cptvf_info *vf;
+ int i, vf_idx;
+ u64 intr;
+
+ /*
+ * Check which VF has raised an interrupt and schedule
+ * corresponding work queue to process the messages
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
+ vf = &cptpf->vf[vf_idx];
+ if (intr & (1ULL << vf->intr_idx)) {
+ queue_work(cptpf->vfpf_mbox_wq,
+ &vf->vfpf_mbox_work);
+ /* Clear the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
+ 0, RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_cptvf_info *vf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i, err;
+
+ vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
+ cptpf = vf->cptpf;
+ mbox = &cptpf->vfpf_mbox;
+ /* sync with mbox memory region */
+ smp_rmb();
+ mdev = &mbox->dev[vf->vf_id];
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ err = cptpf_handle_vf_req(cptpf, vf, msg,
+ msg->next_msgoff - offset);
+ /*
+ * Behave as the AF, drop the msg if there is
+ * no memory, timeout handling also goes here
+ */
+ if (err == -ENOMEM || err == -EIO)
+ break;
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
+ 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
+ if (msg->rc) {
+ dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_rd_wr->is_write)
+ *rsp_rd_wr->ret_val = rsp_rd_wr->val;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 0;
+ break;
+
+ default:
+ dev_err(dev,
+ "Unsupported msg %d received.\n", msg->id);
+ break;
+ }
+}
+
+static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
+ int vf_id, int size)
+{
+ struct otx2_mbox *vfpf_mbox;
+ struct mbox_msghdr *fwd;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ vfpf_mbox = &cptpf->vfpf_mbox;
+ vf_id--;
+ if (vf_id >= cptpf->enabled_vfs) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, cptpf->enabled_vfs);
+ return;
+ }
+ if (msg->id == MBOX_MSG_VF_FLR)
+ return;
+
+ fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
+ if (!fwd) {
+ dev_err(&cptpf->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ return;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+}
+
+/* Handle mailbox messages received from AF */
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox *afpf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, vf_id, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
+ afpf_mbox = &cptpf->afpf_mbox;
+ mdev = &afpf_mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
+ offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
+ offset);
+ vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
+ RVU_PFVF_FUNC_MASK;
+ if (vf_id > 0)
+ forward_to_vf(cptpf, msg, vf_id,
+ msg->next_msgoff - offset);
+ else
+ process_afpf_mbox_msg(cptpf, msg);
+
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(afpf_mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
new file mode 100644
index 000000000000..1dc3ba298139
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cpt_reqmgr.h"
+#include "rvu_reg.h"
+
+#define CSR_DELAY 30
+
+#define LOADFVC_RLEN 8
+#define LOADFVC_MAJOR_OP 0x01
+#define LOADFVC_MINOR_OP 0x08
+
+struct fw_info_t {
+ struct list_head ucodes;
+};
+
+static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx2\n",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d\n",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ if (eng_grp->ucode[1].type)
+ return true;
+ else
+ return false;
+}
+
+static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ str = "IE";
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX2_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX2_CPT_IE_TYPES):
+ str = "IE";
+ break;
+
+ case (1 << OTX2_CPT_AE_TYPES):
+ str = "AE";
+ break;
+
+ case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
+ str = "SE+IPSEC";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct device *dev,
+ struct otx2_cpt_ucode_hdr *ucode_hdr,
+ int *ucode_type)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
+ char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ struct pci_dev *pdev = cptpf->pdev;
+ int i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
+ return -EINVAL;
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
+ nn == OTX2_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
+ nn == OTX2_CPT_IE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_IE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX2_CPT_AE_UC_TYPE)
+ val |= 1 << OTX2_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
+ dma_addr_t dma_addr)
+{
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_UCODE_BASE(eng),
+ (u64)dma_addr);
+}
+
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_engs_rsvd *engs;
+ dma_addr_t dma_addr;
+ int i, bit, ret;
+
+ /* Set PF number for microcode fetches */
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_PF_FUNC,
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ dma_addr = engs->ucode->dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
+ if (!eng_grp->g->eng_ref_cnt[bit]) {
+ ret = __write_ucode_base(cptpf, bit, dma_addr);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int i, timeout = 10;
+ int busy, ret;
+ u64 reg = 0;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Detach the cores from group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & (1ull << eng_grp->idx)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << eng_grp->idx);
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!eng_grp->g->eng_ref_cnt[i]) {
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ u64 reg = 0;
+ int i, ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Attach the cores to the group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (!(reg & (1ull << eng_grp->idx))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << eng_grp->idx;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable the cores */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1);
+ if (ret)
+ return ret;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+
+ return ret;
+}
+
+static int load_fw(struct device *dev, struct fw_info_t *fw_info,
+ char *filename)
+{
+ struct otx2_cpt_ucode_hdr *ucode_hdr;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int ucode_type, ucode_size;
+ int ret;
+
+ uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
+ if (!uc_info)
+ return -ENOMEM;
+
+ ret = request_firmware(&uc_info->fw, filename, dev);
+ if (ret)
+ goto free_uc_info;
+
+ ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ if (ret)
+ goto release_fw;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size) {
+ dev_err(dev, "Ucode %s invalid size\n", filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ set_ucode_filename(&uc_info->ucode, filename);
+ memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_num = ucode_hdr->ver_num;
+ uc_info->ucode.type = ucode_type;
+ uc_info->ucode.size = ucode_size;
+ list_add_tail(&uc_info->list, &fw_info->ucodes);
+
+ return 0;
+
+release_fw:
+ release_firmware(uc_info->fw);
+free_uc_info:
+ kfree(uc_info);
+ return ret;
+}
+
+static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr, *temp;
+
+ if (!fw_info)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
+ list_del(&curr->list);
+ release_firmware(curr->fw);
+ kfree(curr);
+ }
+}
+
+static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
+ int ucode_type)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ return curr;
+ }
+ return NULL;
+}
+
+static void print_uc_info(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d\n", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->fw->data);
+ }
+}
+
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+{
+ char filename[OTX2_CPT_NAME_LENGTH];
+ char eng_type[8] = {0};
+ int ret, e, i;
+
+ INIT_LIST_HEAD(&fw_info->ucodes);
+
+ for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
+ strcpy(eng_type, get_eng_type_str(e));
+ for (i = 0; i < strlen(eng_type); i++)
+ eng_type[i] = tolower(eng_type[i]);
+
+ snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
+ pdev->revision, eng_type);
+ /* Request firmware for each engine type */
+ ret = load_fw(&pdev->dev, fw_info, filename);
+ if (ret)
+ goto release_fw;
+ }
+ print_uc_info(fw_info);
+ return 0;
+
+release_fw:
+ cpt_ucode_release_fw(fw_info);
+ return ret;
+}
+
+static struct otx2_cpt_engs_rsvd *find_engines_by_type(
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx2_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail->ie_cnt += val;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int release_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs)
+{
+ struct otx2_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail_cnt = grp->g->avail.ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d\n",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs, int ucodes_cnt)
+{
+ int i, ret = 0;
+
+ /* Validate if a number of requested engines are available */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
+ ucode->va = NULL;
+ ucode->dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx2_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
+ GFP_KERNEL);
+ if (!ucode->va)
+ return -ENOMEM;
+
+ memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
+ ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ cpu_to_be64s(&((u64 *)ucode->va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ cpu_to_be16s(&((u16 *)ucode->va)[i]);
+ return 0;
+}
+
+static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ /* Point microcode to each core of the group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Attach the cores to the group and enable them */
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ /* Disable all engines used by this group */
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ /* Clear UCODE_BASE register for each engine used by this group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
+ struct otx2_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
+{
+ struct otx2_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
+ struct otx2_cpt_engines *engs, int engs_cnt)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirror_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ struct otx2_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type &&
+ eng_grps->grp[i].ucode[1].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx2_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ if (!eng_grp->is_enabled)
+ return 0;
+
+ if (eng_grp->mirror.ref_count)
+ return -EINVAL;
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+
+ if (eng_grp->engs[1].type) {
+ if (is_2nd_ucode_used(eng_grp))
+ eng_grp->engs[1].ucode = &eng_grp->ucode[1];
+ else
+ eng_grp->engs[1].ucode = ucode;
+ }
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grps *eng_grps,
+ struct otx2_cpt_engines *engs, int ucodes_cnt,
+ void *ucode_data[], int is_print)
+{
+ struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
+ struct otx2_cpt_eng_grp_info *eng_grp;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int i, ret = 0;
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used\n");
+ return -ENOSPC;
+ }
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = uc_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ uc_info->fw->data);
+ if (ret)
+ goto unload_ucode;
+ }
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
+ }
+ ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
+ if (ret)
+ goto unload_ucode;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+
+ if (!is_print)
+ return 0;
+
+ if (mirrored_eng_grp)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d\n",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(eng_grp))
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[1].ver_str);
+
+ return 0;
+
+release_engs:
+ release_engines(dev, eng_grp);
+unload_ucode:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+ return ret;
+}
+
+static void delete_engine_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+}
+
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
+{
+
+ int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ struct otx2_cpt_eng_grp_info *grp;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ if (!grp->is_enabled)
+ continue;
+
+ if (eng_type == OTX2_CPT_SE_TYPES) {
+ if (eng_grp_has_eng_type(grp, eng_type) &&
+ !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
+ eng_grp_num = i;
+ break;
+ }
+ } else {
+ if (eng_grp_has_eng_type(grp, eng_type)) {
+ eng_grp_num = i;
+ break;
+ }
+ }
+ }
+ return eng_grp_num;
+}
+
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ /*
+ * We don't create engine groups if it was already
+ * made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_grps_created)
+ return 0;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto release_fw;
+
+ /*
+ * Create engine group with SE+IE engines for IPSec.
+ * All SE engines will be shared with engine group 0.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+
+ if (uc_info[1] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+ engs[1].type = OTX2_CPT_IE_TYPES;
+ engs[1].count = eng_grps->avail.max_ie_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ /*
+ * Create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ eng_grps->is_grps_created = true;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int i, ret, busy, total_cores;
+ int timeout = 10;
+ u64 reg = 0;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ /* Disengage the cores from groups */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), 0x0);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.eng_ref_cnt[i] = 0;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret)
+ return ret;
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
+
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j;
+
+ delete_engine_grps(pdev, eng_grps);
+ /* Release memory */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+}
+
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j, ret;
+
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ie_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d\n",
+ eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto cleanup_eng_grps;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto cleanup_eng_grps;
+ }
+ }
+ }
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
+
+static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto release_fw;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_IE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+/*
+ * Get CPT HW capabilities using LOAD_FVC operation.
+ */
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_opcode opcode;
+ union otx2_cpt_res_s *result;
+ union otx2_cpt_inst_s inst;
+ dma_addr_t rptr_baddr;
+ struct pci_dev *pdev;
+ u32 len, compl_rlen;
+ int ret, etype;
+ void *rptr;
+
+ /*
+ * We don't get capabilities if it was already done
+ * (when user enabled VFs for the first time)
+ */
+ if (cptpf->is_eng_caps_discovered)
+ return 0;
+
+ pdev = cptpf->pdev;
+ /*
+ * Create engine groups for each type to submit LOAD_FVC op and
+ * get engine's capabilities.
+ */
+ ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
+ if (ret)
+ goto delete_grps;
+
+ lfs->pdev = pdev;
+ lfs->reg_base = cptpf->reg_base;
+ lfs->mbox = &cptpf->afpf_mbox;
+ ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret)
+ goto delete_grps;
+
+ compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
+ len = compl_rlen + LOADFVC_RLEN;
+
+ result = kzalloc(len, GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto lf_cleanup;
+ }
+ rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
+ dev_err(&pdev->dev, "DMA mapping failed\n");
+ ret = -EFAULT;
+ goto free_result;
+ }
+ rptr = (u8 *)result + compl_rlen;
+
+ /* Fill in the command */
+ opcode.s.major = LOADFVC_MAJOR_OP;
+ opcode.s.minor = LOADFVC_MINOR_OP;
+
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses */
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = 0;
+ iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.cptr.u = 0;
+
+ for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+ iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ etype);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+
+ while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
+ cpu_relax();
+
+ cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
+ }
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ cptpf->is_eng_caps_discovered = true;
+
+free_result:
+ kfree(result);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+delete_grps:
+ delete_engine_grps(pdev, &cptpf->eng_grps);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
new file mode 100644
index 000000000000..6b0d432de0af
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_UCODE_H
+#define __OTX2_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+
+/*
+ * On OcteonTX2 platform IPSec ucode can use both IE and SE engines therefore
+ * IE and SE engines can be attached to the same engine group.
+ */
+#define OTX2_CPT_MAX_ETYPES_PER_GRP 2
+
+/* CPT ucode signature size */
+#define OTX2_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX2_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX2 platform */
+#define OTX2_CPT_MAX_ENGINES 128
+
+#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+
+/* Microcode types */
+enum otx2_cpt_ucode_type {
+ OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX2_CPT_SE_UC_TYPE1 = 20,/* SE-MAIN - combination of 21 and 22 */
+ OTX2_CPT_SE_UC_TYPE2 = 21,/* Fast Path IPSec + AirCrypto */
+ OTX2_CPT_SE_UC_TYPE3 = 22,/*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Feature IPSec + AirCrypto + Kasumi
+ */
+ OTX2_CPT_IE_UC_TYPE1 = 30, /* IE-MAIN - combination of 31 and 32 */
+ OTX2_CPT_IE_UC_TYPE2 = 31, /* Fast Path IPSec */
+ OTX2_CPT_IE_UC_TYPE3 = 32, /*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Future IPSec
+ */
+};
+
+struct otx2_cpt_bitmap {
+ unsigned long bits[OTX2_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx2_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx2_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx2_cpt_ucode_hdr {
+ struct otx2_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ __be32 code_length;
+ u32 padding[3];
+};
+
+struct otx2_cpt_ucode {
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable
+ * format
+ */
+ struct otx2_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX2_CPT_NAME_LENGTH];/* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE, IE, AE or SE+IE */
+};
+
+struct otx2_cpt_uc_info_t {
+ struct list_head list;
+ struct otx2_cpt_ucode ucode;/* microcode information */
+ const struct firmware *fw;
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx2_cpt_engs_available {
+ int max_se_cnt;
+ int max_ie_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ie_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx2_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx2_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx2_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx2_cpt_eng_grp_info {
+ struct otx2_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ /* engines attached */
+ struct otx2_cpt_engs_rsvd engs[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* ucodes information */
+ struct otx2_cpt_ucode ucode[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* engine group mirroring information */
+ struct otx2_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx2_cpt_eng_grps {
+ struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
+ struct otx2_cpt_engs_available avail;
+ void *obj; /* device specific data */
+ int engs_num; /* total number of engines supported */
+ u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_grps_created; /* Is the engine groups are already created */
+};
+struct otx2_cptpf_dev;
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
new file mode 100644
index 000000000000..4f0a169fddbd
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTVF_H
+#define __OTX2_CPTVF_H
+
+#include "mbox.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptvf_dev {
+ void __iomem *reg_base; /* Register start address */
+ void __iomem *pfvf_mbox_base; /* PF-VF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this VF */
+ u8 vf_id; /* Virtual function index */
+
+ /* PF <=> VF mbox */
+ struct otx2_mbox pfvf_mbox;
+ struct work_struct pfvf_mbox_work;
+ struct workqueue_struct *pfvf_mbox_wq;
+};
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
new file mode 100644
index 000000000000..a72723455df7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -0,0 +1,1758 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/xts.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx2_cptvf.h"
+#include "otx2_cptvf_algs.h"
+#include "otx2_cpt_reqmgr.h"
+
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+ /*
+ * On OcteonTX2 platform CPT instruction queue is bound to each
+ * local function LF, in turn LFs can be attached to PF
+ * or VF therefore we always use first device. We get maximum
+ * performance if one CPT queue is available for each cpu
+ * otherwise CPT queues need to be shared between cpus.
+ */
+ if (*cpu_num >= se_devices.desc[0].num_queues)
+ *cpu_num %= se_devices.desc[0].num_queues;
+ *pdev = se_devices.desc[0].dev;
+
+ put_cpu();
+
+ return 0;
+}
+
+static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
+{
+ struct otx2_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx2_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ if (inst_info) {
+ cpt_req = inst_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type ==
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx2_cpt_req_info *req_info;
+ struct otx2_cpt_req_ctx *rctx;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx2_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (inst_info) {
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ u32 len, sg_len;
+ u8 *ptr;
+
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ sg_len = outp_sg->length - offset;
+ len = (nbytes < sg_len) ? nbytes : sg_len;
+ ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX2_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline int create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
+ skcipher_request_set_callback(&rctx->sk_fbk_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
+ crypto_skcipher_decrypt(&rctx->sk_fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
+ return skcipher_do_fallback(req, enc);
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = otx2_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx2_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ ctx->enc_align_len = 1;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX2_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+ ctx->enc_align_len = 8;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
+ ctx->enc_align_len = 16;
+ else
+ ctx->enc_align_len = 1;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
+}
+
+static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
+}
+
+static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return cpt_skcipher_fallback_init(ctx, alg);
+}
+
+static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_skcipher(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
+ struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ case OTX2_CPT_AES_ECB:
+ ctx->enc_align_len = 16;
+ break;
+ case OTX2_CPT_DES3_CBC:
+ case OTX2_CPT_DES3_ECB:
+ ctx->enc_align_len = 8;
+ break;
+ case OTX2_CPT_AES_GCM:
+ case OTX2_CPT_CIPHER_NULL:
+ ctx->enc_align_len = 1;
+ break;
+ }
+ crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
+
+ return cpt_aead_fallback_init(ctx, alg);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
+}
+
+static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_aead(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (crypto_rfc4106_check_authsize(authsize))
+ return -EINVAL;
+
+ tfm->authsize = authsize;
+ /* Set authsize for fallback case */
+ if (ctx->fbk_cipher)
+ ctx->fbk_cipher->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->is_trunc_hmac = true;
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx2_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ cpu_to_be32_array(buf, buf, len / 4);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *src = buf;
+ int i = 0;
+
+ for (i = 0 ; i < len / 8; i++, src++)
+ cpu_to_be64s(src);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX2_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA384:
+ case OTX2_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX2_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX2_CPT_AES_GCM:
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ cpu_to_be64s(&rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+}
+
+static inline int create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static inline void create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+}
+
+static inline int create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst)
+ return -ENOENT;
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len)
+ return -EINVAL;
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->out_cnt = argcnt;
+ return 0;
+
+error_free:
+ kfree(ptr);
+ return status;
+}
+
+static int aead_do_fallback(struct aead_request *req, bool is_enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ /* Store the cipher tfm and then use the fallback tfm */
+ aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
+ aead_request_set_callback(&rctx->fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
+ crypto_aead_decrypt(&rctx->fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ req_info->callback = otx2_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX2_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
+ break;
+
+ case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
+ create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (!req_info->req.param2 ||
+ (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
+ (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
+ return aead_do_fallback(req, enc);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return otx2_cpt_do_request(pdev, req_info, cpu_num);
+}
+
+static int otx2_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx2_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx2_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_xts_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx2_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx2_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_gcm_aes_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx2_cpt_aead_gcm_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &=
+ ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
+ otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx2_cpt_aeads,
+ ARRAY_SIZE(otx2_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = lptr;
+ struct cpt_device_desc *rdesc = rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ count = atomic_read(&se_devices.count);
+ if (count >= OTX2_CPT_MAX_LFS_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+
+unlock:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++) {
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
+ goto unlock;
+ }
+ if (atomic_dec_and_test(&se_devices.count)) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+
+unlock:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
new file mode 100644
index 000000000000..f04184bd1744
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_ALGS_H
+#define __OTX2_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <crypto/aead.h>
+#include "otx2_cpt_common.h"
+
+#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX2_CPT_MAX_KEY_SIZE (OTX2_CPT_MAX_ENC_KEY_SIZE + \
+ OTX2_CPT_MAX_HASH_KEY_SIZE)
+enum otx2_cpt_request_type {
+ OTX2_CPT_ENC_DEC_REQ = 0x1,
+ OTX2_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX2_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx2_cpt_major_opcodes {
+ OTX2_CPT_MAJOR_OP_MISC = 0x01,
+ OTX2_CPT_MAJOR_OP_FC = 0x33,
+ OTX2_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx2_cpt_cipher_type {
+ OTX2_CPT_CIPHER_NULL = 0x0,
+ OTX2_CPT_DES3_CBC = 0x1,
+ OTX2_CPT_DES3_ECB = 0x2,
+ OTX2_CPT_AES_CBC = 0x3,
+ OTX2_CPT_AES_ECB = 0x4,
+ OTX2_CPT_AES_CFB = 0x5,
+ OTX2_CPT_AES_CTR = 0x6,
+ OTX2_CPT_AES_GCM = 0x7,
+ OTX2_CPT_AES_XTS = 0x8
+};
+
+enum otx2_cpt_mac_type {
+ OTX2_CPT_MAC_NULL = 0x0,
+ OTX2_CPT_MD5 = 0x1,
+ OTX2_CPT_SHA1 = 0x2,
+ OTX2_CPT_SHA224 = 0x3,
+ OTX2_CPT_SHA256 = 0x4,
+ OTX2_CPT_SHA384 = 0x5,
+ OTX2_CPT_SHA512 = 0x6,
+ OTX2_CPT_GMAC = 0x7
+};
+
+enum otx2_cpt_aes_key_len {
+ OTX2_CPT_AES_128_BIT = 0x1,
+ OTX2_CPT_AES_192_BIT = 0x2,
+ OTX2_CPT_AES_256_BIT = 0x3
+};
+
+union otx2_cpt_encr_ctrl {
+ u64 u;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved_59:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved_49_51:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved_32_39:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved_32_39:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved_49_51:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved_59:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx2_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx2_cpt_fc_enc_ctx {
+ union otx2_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx2_cpt_fc_hmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx2_cpt_fc_ctx {
+ struct otx2_cpt_fc_enc_ctx enc;
+ union otx2_cpt_fc_hmac_ctx hmac;
+};
+
+struct otx2_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX2_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+ u8 enc_align_len;
+ struct crypto_skcipher *fbk_cipher;
+};
+
+union otx2_cpt_offset_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx2_cpt_req_ctx {
+ struct otx2_cpt_req_info cpt_req;
+ union otx2_cpt_offset_ctrl ctrl_word;
+ struct otx2_cpt_fc_ctx fctx;
+ union {
+ struct skcipher_request sk_fbk_req;
+ struct aead_request fbk_req;
+ };
+};
+
+struct otx2_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx2_cpt_aead_ctx {
+ u8 key[OTX2_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx2_cpt_sdesc *sdesc;
+ struct crypto_aead *fbk_cipher;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+ u8 enc_align_len;
+};
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices);
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod);
+
+#endif /* __OTX2_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
new file mode 100644
index 000000000000..47f378731024
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cptvf_algs.h"
+#include <rvu_reg.h>
+
+#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
+
+static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+
+ /* Enable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Disable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+}
+
+static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
+{
+ int ret, irq;
+ int num_vec;
+
+ num_vec = pci_msix_vec_count(cptvf->pdev);
+ if (num_vec <= 0)
+ return -EINVAL;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cptvf->pdev->dev,
+ "Request for %d msix vectors failed\n", num_vec);
+ return ret;
+ }
+ irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
+ /* Register VF<=>PF mailbox interrupt handler */
+ ret = devm_request_irq(&cptvf->pdev->dev, irq,
+ otx2_cptvf_pfvf_mbox_intr, 0,
+ "CPTPFVF Mbox", cptvf);
+ if (ret)
+ return ret;
+ /* Enable PF-VF mailbox interrupts */
+ cptvf_enable_pfvf_mbox_intrs(cptvf);
+
+ ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
+ if (ret) {
+ dev_warn(&cptvf->pdev->dev,
+ "PF not responding to mailbox, deferring probe\n");
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
+{
+ int ret;
+
+ cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptvf->pfvf_mbox_wq)
+ return -ENOMEM;
+
+ ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
+ cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ if (ret)
+ goto free_wqe;
+
+ INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ return ret;
+}
+
+static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
+{
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
+}
+
+static void cptlf_work_handler(unsigned long data)
+{
+ otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
+}
+
+static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].wqe)
+ continue;
+
+ tasklet_kill(&lfs->lf[i].wqe->work);
+ kfree(lfs->lf[i].wqe);
+ lfs->lf[i].wqe = NULL;
+ }
+}
+
+static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_wqe *wqe;
+ int i, ret = 0;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
+ if (!wqe) {
+ ret = -ENOMEM;
+ goto cleanup_tasklet;
+ }
+
+ tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
+ wqe->lfs = lfs;
+ wqe->lf_num = i;
+ lfs->lf[i].wqe = wqe;
+ }
+ return 0;
+
+cleanup_tasklet:
+ cleanup_tasklet_work(lfs);
+ return ret;
+}
+
+static void free_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ kfree(lfs->lf[i].pqueue.head);
+ lfs->lf[i].pqueue.head = NULL;
+ }
+}
+
+static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int size, ret, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
+ size = lfs->lf[i].pqueue.qlen *
+ sizeof(struct otx2_cpt_pending_entry);
+
+ lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
+ if (!lfs->lf[i].pqueue.head) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize spin lock */
+ spin_lock_init(&lfs->lf[i].pqueue.lock);
+ }
+ return 0;
+
+error:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ cleanup_tasklet_work(lfs);
+ free_pending_queues(lfs);
+}
+
+static int lf_sw_init(struct otx2_cptlfs_info *lfs)
+{
+ int ret;
+
+ ret = alloc_pending_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating pending queues failed\n");
+ return ret;
+ }
+ ret = init_tasklet_work(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Tasklet work init failed\n");
+ goto pending_queues_free;
+ }
+ return 0;
+
+pending_queues_free:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
+
+ /* Remove interrupts affinity */
+ otx2_cptlf_free_irqs_affinity(lfs);
+ /* Disable instruction queue */
+ otx2_cptlf_disable_iqueues(lfs);
+ /* Unregister crypto algorithms */
+ otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
+ /* Unregister LFs interrupts */
+ otx2_cptlf_unregister_interrupts(lfs);
+ /* Cleanup LFs software side */
+ lf_sw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
+
+static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct device *dev = &cptvf->pdev->dev;
+ int ret, lfs_num;
+ u8 eng_grp_msk;
+
+ /* Get engine group number for symmetric crypto */
+ cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
+ if (ret)
+ return ret;
+
+ if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev, "Engine group for kernel crypto not available\n");
+ ret = -ENOENT;
+ return ret;
+ }
+ eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
+ if (ret)
+ return ret;
+
+ lfs->reg_base = cptvf->reg_base;
+ lfs->pdev = cptvf->pdev;
+ lfs->mbox = &cptvf->pfvf_mbox;
+
+ lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
+ num_online_cpus();
+ ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
+ lfs_num);
+ if (ret)
+ return ret;
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Initialize LFs software side */
+ ret = lf_sw_init(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register LFs interrupts */
+ ret = otx2_cptlf_register_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ /* Set interrupts affinity */
+ ret = otx2_cptlf_set_irqs_affinity(lfs);
+ if (ret)
+ goto unregister_intr;
+
+ atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
+ /* Register crypto algorithms */
+ ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
+ if (ret) {
+ dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
+ goto disable_irqs;
+ }
+ return 0;
+
+disable_irqs:
+ otx2_cptlf_free_irqs_affinity(lfs);
+unregister_intr:
+ otx2_cptlf_unregister_interrupts(lfs);
+cleanup_lf_sw:
+ lf_sw_cleanup(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+
+ return ret;
+}
+
+static int otx2_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptvf_dev *cptvf;
+ int ret;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map VF's configuration registers */
+ ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPTVF_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENODEV;
+ goto clear_drvdata;
+ }
+ /* Initialize PF<=>VF mailbox */
+ ret = cptvf_pfvf_mbox_init(cptvf);
+ if (ret)
+ goto clear_drvdata;
+
+ /* Register interrupts */
+ ret = cptvf_register_interrupts(cptvf);
+ if (ret)
+ goto destroy_pfvf_mbox;
+
+ /* Initialize CPT LFs */
+ ret = cptvf_lf_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
+ return 0;
+
+unregister_interrupts:
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+destroy_pfvf_mbox:
+ cptvf_pfvf_mbox_destroy(cptvf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void otx2_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT VF device.\n");
+ return;
+ }
+ cptvf_lf_shutdown(&cptvf->lfs);
+ /* Disable PF-VF mailbox interrupt */
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ /* Destroy PF-VF mbox */
+ cptvf_pfvf_mbox_destroy(cptvf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cptvf_pci_driver = {
+ .name = OTX2_CPTVF_DRV_NAME,
+ .id_table = otx2_cptvf_id_table,
+ .probe = otx2_cptvf_probe,
+ .remove = otx2_cptvf_remove,
+};
+
+module_pci_driver(otx2_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
new file mode 100644
index 000000000000..5d73b711cba6
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include <rvu_reg.h>
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptvf_dev *cptvf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptvf->pfvf_mbox_wq, &cptvf->pfvf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT, 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
+ struct mbox_msghdr *msg)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct otx2_cpt_kvf_limits_rsp *rsp_limits;
+ struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct cpt_rd_wr_reg_msg *rsp_reg;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptvf->vf_id = ((msg->pcifunc >> RVU_PFVF_FUNC_SHIFT)
+ & RVU_PFVF_FUNC_MASK) - 1;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ /* Check if resources were successfully attached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ /* Check if resources were successfully detached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_reg = (struct cpt_rd_wr_reg_msg *) msg;
+ if (msg->rc) {
+ dev_err(&cptvf->pdev->dev,
+ "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_reg->reg_offset, rsp_reg->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_reg->is_write)
+ *rsp_reg->ret_val = rsp_reg->val;
+ break;
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
+ cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
+ cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+}
+
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptvf_dev *cptvf;
+ struct otx2_mbox *pfvf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, i;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
+ pfvf_mbox = &cptvf->pfvf_mbox;
+ mdev = &pfvf_mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + pfvf_mbox->rx_start +
+ offset);
+ process_pfvf_mbox_mbox_msg(cptvf, msg);
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(pfvf_mbox, 0);
+}
+
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx2_cpt_egrp_num_msg *req;
+
+ req = (struct otx2_cpt_egrp_num_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_egrp_num_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->eng_type = eng_type;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+ int ret;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_kvf_limits_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_KVF_LIMITS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
new file mode 100644
index 000000000000..d5c1c1b7c7e4
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cptvf.h"
+#include "otx2_cpt_common.h"
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+/* Default command timeout in seconds */
+#define CPT_COMMAND_TIMEOUT 4
+#define CPT_TIME_IN_RESET_COUNT 5
+
+static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->in_cnt);
+ for (i = 0; i < req->in_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+ pr_debug("Scatter list size %d\n", req->out_cnt);
+ for (i = 0; i < req->out_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
+ struct otx2_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx2_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / 4;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ fallthrough;
+ case 2:
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ fallthrough;
+ case 1:
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ struct otx2_cpt_pending_queue *pqueue,
+ struct otx2_cptlf_info *lf)
+{
+ struct otx2_cptvf_request *cpt_req = &req->req;
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx2_cpt_inst_info *info = NULL;
+ union otx2_cpt_res_s *result = NULL;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ if (unlikely(!otx2_cptlf_started(lf->lfs)))
+ return -ENODEV;
+
+ info = info_create(pdev, req, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Setting up cpt inst info failed");
+ return -ENOMEM;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = info->completion_addr;
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ goto destroy_info;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = 0;
+ iq_cmd.cptr.u = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
+
+ /* Print debug info if enabled */
+ otx2_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ otx2_cpt_send_cmd(&cptinst, 1, lf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+destroy_info:
+ spin_unlock_bh(&pqueue->lock);
+ otx2_cpt_info_destroy(pdev, info);
+ return ret;
+}
+
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+
+ return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
+ &lfs->lf[cpu_num]);
+}
+
+static int cpt_process_ccode(struct pci_dev *pdev,
+ union otx2_cpt_res_s *cpt_status,
+ struct otx2_cpt_inst_info *info,
+ u32 *res_code)
+{
+ u8 uc_ccode = cpt_status->s.uc_compcode;
+ u8 ccode = cpt_status->s.compcode;
+
+ switch (ccode) {
+ case OTX2_CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_INSTERR:
+ dev_err(&pdev->dev,
+ "Request failed with instruction error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_NOTDONE:
+ /* check for timeout */
+ if (time_after_eq(jiffies, info->time_in +
+ CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev,
+ "Request timed out 0x%p", info->req);
+ else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
+ info->time_in = jiffies;
+ info->extra_time++;
+ }
+ return 1;
+
+ case OTX2_CPT_COMP_E_GOOD:
+ /*
+ * Check microcode completion code, it is only valid
+ * when completion code is CPT_COMP_E::GOOD
+ */
+ if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (info->req->is_trunc_hmac &&
+ uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ cpt_status->s.uc_compcode);
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+ }
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "Request returned invalid status %d\n", ccode);
+ break;
+ }
+ return 0;
+}
+
+static inline void process_pending_queue(struct pci_dev *pdev,
+ struct otx2_cpt_pending_queue *pqueue)
+{
+ struct otx2_cpt_pending_entry *resume_pentry = NULL;
+ void (*callback)(int status, void *arg, void *req);
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_res_s *cpt_status = NULL;
+ struct otx2_cpt_inst_info *info = NULL;
+ struct otx2_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ info = pentry->info;
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(pdev, cpt_status, info, &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, info);
+ }
+}
+
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
+{
+ process_pending_queue(wqe->lfs->pdev,
+ &wqe->lfs->lf[wqe->lf_num].pqueue);
+}
+
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ return cptvf->lfs.kcrypto_eng_grp_num;
+}
diff --git a/drivers/crypto/mediatek/Makefile b/drivers/crypto/mediatek/Makefile
deleted file mode 100644
index 196a4653974e..000000000000
--- a/drivers/crypto/mediatek/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o
-mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
deleted file mode 100644
index 7323066724c3..000000000000
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ /dev/null
@@ -1,1271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- *
- * Driver for EIP97 AES acceleration.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- *
- * Some ideas are from atmel-aes.c drivers.
- */
-
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/internal/skcipher.h>
-#include "mtk-platform.h"
-
-#define AES_QUEUE_SIZE 512
-#define AES_BUF_ORDER 2
-#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
- & ~(AES_BLOCK_SIZE - 1))
-#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
- AES_BLOCK_SIZE * 2)
-#define AES_MAX_CT_SIZE 6
-
-#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
-
-/* AES-CBC/ECB/CTR/OFB/CFB command token */
-#define AES_CMD0 cpu_to_le32(0x05000000)
-#define AES_CMD1 cpu_to_le32(0x2d060000)
-#define AES_CMD2 cpu_to_le32(0xe4a63806)
-/* AES-GCM command token */
-#define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
-#define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
-#define AES_GCM_CMD2 cpu_to_le32(0x25000010)
-#define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
-#define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
-#define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
-#define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
-
-/* AES transform information word 0 fields */
-#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
-#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
-#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
-#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
-#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
-#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
-#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
-#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
-#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
-#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
-/* AES transform information word 1 fields */
-#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
-#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
-#define AES_TFM_OFB cpu_to_le32(0x4 << 0)
-#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
-#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
-#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
-#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
-#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
-#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
-#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
-
-/* AES flags */
-#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
-#define AES_FLAGS_ECB BIT(0)
-#define AES_FLAGS_CBC BIT(1)
-#define AES_FLAGS_CTR BIT(2)
-#define AES_FLAGS_OFB BIT(3)
-#define AES_FLAGS_CFB128 BIT(4)
-#define AES_FLAGS_GCM BIT(5)
-#define AES_FLAGS_ENCRYPT BIT(6)
-#define AES_FLAGS_BUSY BIT(7)
-
-#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
-
-/**
- * mtk_aes_info - hardware information of AES
- * @cmd: command token, hardware instruction
- * @tfm: transform state of cipher algorithm.
- * @state: contains keys and initial vectors.
- *
- * Memory layout of GCM buffer:
- * /-----------\
- * | AES KEY | 128/196/256 bits
- * |-----------|
- * | HASH KEY | a string 128 zero bits encrypted using the block cipher
- * |-----------|
- * | IVs | 4 * 4 bytes
- * \-----------/
- *
- * The engine requires all these info to do:
- * - Commands decoding and control of the engine's data path.
- * - Coordinating hardware data fetch and store operations.
- * - Result token construction and output.
- */
-struct mtk_aes_info {
- __le32 cmd[AES_MAX_CT_SIZE];
- __le32 tfm[2];
- __le32 state[AES_MAX_STATE_BUF_SIZE];
-};
-
-struct mtk_aes_reqctx {
- u64 mode;
-};
-
-struct mtk_aes_base_ctx {
- struct mtk_cryp *cryp;
- u32 keylen;
- __le32 key[12];
- __le32 keymode;
-
- mtk_aes_fn start;
-
- struct mtk_aes_info info;
- dma_addr_t ct_dma;
- dma_addr_t tfm_dma;
-
- __le32 ct_hdr;
- u32 ct_size;
-};
-
-struct mtk_aes_ctx {
- struct mtk_aes_base_ctx base;
-};
-
-struct mtk_aes_ctr_ctx {
- struct mtk_aes_base_ctx base;
-
- __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
- size_t offset;
- struct scatterlist src[2];
- struct scatterlist dst[2];
-};
-
-struct mtk_aes_gcm_ctx {
- struct mtk_aes_base_ctx base;
-
- u32 authsize;
- size_t textlen;
-};
-
-struct mtk_aes_drv {
- struct list_head dev_list;
- /* Device list lock */
- spinlock_t lock;
-};
-
-static struct mtk_aes_drv mtk_aes = {
- .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
-};
-
-static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
-{
- return readl_relaxed(cryp->base + offset);
-}
-
-static inline void mtk_aes_write(struct mtk_cryp *cryp,
- u32 offset, u32 value)
-{
- writel_relaxed(value, cryp->base + offset);
-}
-
-static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
-{
- struct mtk_cryp *cryp = NULL;
- struct mtk_cryp *tmp;
-
- spin_lock_bh(&mtk_aes.lock);
- if (!ctx->cryp) {
- list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
- cryp = tmp;
- break;
- }
- ctx->cryp = cryp;
- } else {
- cryp = ctx->cryp;
- }
- spin_unlock_bh(&mtk_aes.lock);
-
- return cryp;
-}
-
-static inline size_t mtk_aes_padlen(size_t len)
-{
- len &= AES_BLOCK_SIZE - 1;
- return len ? AES_BLOCK_SIZE - len : 0;
-}
-
-static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
- struct mtk_aes_dma *dma)
-{
- int nents;
-
- if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
- return false;
-
- for (nents = 0; sg; sg = sg_next(sg), ++nents) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return false;
-
- if (len <= sg->length) {
- if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
- return false;
-
- dma->nents = nents + 1;
- dma->remainder = sg->length - len;
- sg->length = len;
- return true;
- }
-
- if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
- return false;
-
- len -= sg->length;
- }
-
- return false;
-}
-
-static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
- const struct mtk_aes_reqctx *rctx)
-{
- /* Clear all but persistent flags and set request flags. */
- aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
-}
-
-static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
-{
- struct scatterlist *sg = dma->sg;
- int nents = dma->nents;
-
- if (!dma->remainder)
- return;
-
- while (--nents > 0 && sg)
- sg = sg_next(sg);
-
- if (!sg)
- return;
-
- sg->length += dma->remainder;
-}
-
-static inline int mtk_aes_complete(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes,
- int err)
-{
- aes->flags &= ~AES_FLAGS_BUSY;
- aes->areq->complete(aes->areq, err);
- /* Handle new request */
- tasklet_schedule(&aes->queue_task);
- return err;
-}
-
-/*
- * Write descriptors for processing. This will configure the engine, load
- * the transform information and then start the packet processing.
- */
-static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_ring *ring = cryp->ring[aes->id];
- struct mtk_desc *cmd = NULL, *res = NULL;
- struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
- u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
- int nents;
-
- /* Write command descriptors */
- for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
- cmd = ring->cmd_next;
- cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
- cmd->buf = cpu_to_le32(sg_dma_address(ssg));
-
- if (nents == 0) {
- cmd->hdr |= MTK_DESC_FIRST |
- MTK_DESC_CT_LEN(aes->ctx->ct_size);
- cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
- cmd->ct_hdr = aes->ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
- }
-
- /* Shift ring buffer and check boundary */
- if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
- ring->cmd_next = ring->cmd_base;
- }
- cmd->hdr |= MTK_DESC_LAST;
-
- /* Prepare result descriptors */
- for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
- res = ring->res_next;
- res->hdr = MTK_DESC_BUF_LEN(dsg->length);
- res->buf = cpu_to_le32(sg_dma_address(dsg));
-
- if (nents == 0)
- res->hdr |= MTK_DESC_FIRST;
-
- /* Shift ring buffer and check boundary */
- if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
- ring->res_next = ring->res_base;
- }
- res->hdr |= MTK_DESC_LAST;
-
- /* Pointer to current result descriptor */
- ring->res_prev = res;
-
- /* Prepare enough space for authenticated tag */
- if (aes->flags & AES_FLAGS_GCM)
- le32_add_cpu(&res->hdr, AES_BLOCK_SIZE);
-
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
- mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
-
- return -EINPROGRESS;
-}
-
-static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
-
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
- DMA_TO_DEVICE);
-
- if (aes->src.sg == aes->dst.sg) {
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_BIDIRECTIONAL);
-
- if (aes->src.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->src);
- } else {
- dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
- DMA_FROM_DEVICE);
-
- if (aes->dst.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->dst);
-
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_TO_DEVICE);
-
- if (aes->src.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->src);
- }
-
- if (aes->dst.sg == &aes->aligned_sg)
- sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
- aes->buf, aes->total);
-}
-
-static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_info *info = &ctx->info;
-
- ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
- goto exit;
-
- ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
-
- if (aes->src.sg == aes->dst.sg) {
- aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
- aes->src.nents,
- DMA_BIDIRECTIONAL);
- aes->dst.sg_len = aes->src.sg_len;
- if (unlikely(!aes->src.sg_len))
- goto sg_map_err;
- } else {
- aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
- aes->src.nents, DMA_TO_DEVICE);
- if (unlikely(!aes->src.sg_len))
- goto sg_map_err;
-
- aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
- aes->dst.nents, DMA_FROM_DEVICE);
- if (unlikely(!aes->dst.sg_len)) {
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_TO_DEVICE);
- goto sg_map_err;
- }
- }
-
- return mtk_aes_xmit(cryp, aes);
-
-sg_map_err:
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
-exit:
- return mtk_aes_complete(cryp, aes, -EINVAL);
-}
-
-/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
-static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- size_t len)
-{
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_info *info = &ctx->info;
- u32 cnt = 0;
-
- ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
- info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
- info->cmd[cnt++] = AES_CMD1;
-
- info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
- if (aes->flags & AES_FLAGS_ENCRYPT)
- info->tfm[0] |= AES_TFM_BASIC_OUT;
- else
- info->tfm[0] |= AES_TFM_BASIC_IN;
-
- switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
- case AES_FLAGS_CBC:
- info->tfm[1] = AES_TFM_CBC;
- break;
- case AES_FLAGS_ECB:
- info->tfm[1] = AES_TFM_ECB;
- goto ecb;
- case AES_FLAGS_CTR:
- info->tfm[1] = AES_TFM_CTR_LOAD;
- goto ctr;
- case AES_FLAGS_OFB:
- info->tfm[1] = AES_TFM_OFB;
- break;
- case AES_FLAGS_CFB128:
- info->tfm[1] = AES_TFM_CFB128;
- break;
- default:
- /* Should not happen... */
- return;
- }
-
- memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE);
-ctr:
- le32_add_cpu(&info->tfm[0],
- le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE))));
- info->tfm[1] |= AES_TFM_FULL_IV;
- info->cmd[cnt++] = AES_CMD2;
-ecb:
- ctx->ct_size = cnt;
-}
-
-static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- struct scatterlist *src, struct scatterlist *dst,
- size_t len)
-{
- size_t padlen = 0;
- bool src_aligned, dst_aligned;
-
- aes->total = len;
- aes->src.sg = src;
- aes->dst.sg = dst;
- aes->real_dst = dst;
-
- src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
- if (src == dst)
- dst_aligned = src_aligned;
- else
- dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
-
- if (!src_aligned || !dst_aligned) {
- padlen = mtk_aes_padlen(len);
-
- if (len + padlen > AES_BUF_SIZE)
- return mtk_aes_complete(cryp, aes, -ENOMEM);
-
- if (!src_aligned) {
- sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
- aes->src.sg = &aes->aligned_sg;
- aes->src.nents = 1;
- aes->src.remainder = 0;
- }
-
- if (!dst_aligned) {
- aes->dst.sg = &aes->aligned_sg;
- aes->dst.nents = 1;
- aes->dst.remainder = 0;
- }
-
- sg_init_table(&aes->aligned_sg, 1);
- sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
- }
-
- mtk_aes_info_init(cryp, aes, len + padlen);
-
- return mtk_aes_map(cryp, aes);
-}
-
-static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct crypto_async_request *new_areq)
-{
- struct mtk_aes_rec *aes = cryp->aes[id];
- struct crypto_async_request *areq, *backlog;
- struct mtk_aes_base_ctx *ctx;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&aes->lock, flags);
- if (new_areq)
- ret = crypto_enqueue_request(&aes->queue, new_areq);
- if (aes->flags & AES_FLAGS_BUSY) {
- spin_unlock_irqrestore(&aes->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&aes->queue);
- areq = crypto_dequeue_request(&aes->queue);
- if (areq)
- aes->flags |= AES_FLAGS_BUSY;
- spin_unlock_irqrestore(&aes->lock, flags);
-
- if (!areq)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- ctx = crypto_tfm_ctx(areq->tfm);
- /* Write key into state buffer */
- memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
-
- aes->areq = areq;
- aes->ctx = ctx;
-
- return ctx->start(cryp, aes);
-}
-
-static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes)
-{
- return mtk_aes_complete(cryp, aes, 0);
-}
-
-static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
-
- mtk_aes_set_mode(aes, rctx);
- aes->resume = mtk_aes_transfer_complete;
-
- return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
-}
-
-static inline struct mtk_aes_ctr_ctx *
-mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct mtk_aes_ctr_ctx, base);
-}
-
-static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct scatterlist *src, *dst;
- u32 start, end, ctr, blocks;
- size_t datalen;
- bool fragmented = false;
-
- /* Check for transfer completion. */
- cctx->offset += aes->total;
- if (cctx->offset >= req->cryptlen)
- return mtk_aes_transfer_complete(cryp, aes);
-
- /* Compute data length. */
- datalen = req->cryptlen - cctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
- ctr = be32_to_cpu(cctx->iv[3]);
-
- /* Check 32bit counter overflow. */
- start = ctr;
- end = start + blocks - 1;
- if (end < start) {
- ctr = 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
-
- /* Jump to offset. */
- src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
- dst = ((req->src == req->dst) ? src :
- scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
-
- /* Write IVs into transform state buffer. */
- memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE);
-
- if (unlikely(fragmented)) {
- /*
- * Increment the counter manually to cope with the hardware
- * counter overflow.
- */
- cctx->iv[3] = cpu_to_be32(ctr);
- crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
- }
-
- return mtk_aes_dma(cryp, aes, src, dst, datalen);
-}
-
-static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
-
- mtk_aes_set_mode(aes, rctx);
-
- memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
- cctx->offset = 0;
- aes->total = 0;
- aes->resume = mtk_aes_ctr_transfer;
-
- return mtk_aes_ctr_transfer(cryp, aes);
-}
-
-/* Check and set the AES key to transform state buffer */
-static int mtk_aes_setkey(struct crypto_skcipher *tfm,
- const u8 *key, u32 keylen)
-{
- struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->keymode = AES_TFM_128BITS;
- break;
- case AES_KEYSIZE_192:
- ctx->keymode = AES_TFM_192BITS;
- break;
- case AES_KEYSIZE_256:
- ctx->keymode = AES_TFM_256BITS;
- break;
-
- default:
- return -EINVAL;
- }
-
- ctx->keylen = SIZE_IN_WORDS(keylen);
- memcpy(ctx->key, key, keylen);
-
- return 0;
-}
-
-static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct mtk_aes_reqctx *rctx;
- struct mtk_cryp *cryp;
-
- cryp = mtk_aes_find_dev(ctx);
- if (!cryp)
- return -ENODEV;
-
- rctx = skcipher_request_ctx(req);
- rctx->mode = mode;
-
- return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
- &req->base);
-}
-
-static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
-}
-
-static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ECB);
-}
-
-static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
-}
-
-static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CBC);
-}
-
-static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
-}
-
-static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CTR);
-}
-
-static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
-}
-
-static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_OFB);
-}
-
-static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
-}
-
-static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CFB128);
-}
-
-static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
-{
- struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_start;
- return 0;
-}
-
-static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
-{
- struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_ctr_start;
- return 0;
-}
-
-static struct skcipher_alg aes_algs[] = {
-{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cbc_encrypt,
- .decrypt = mtk_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .init = mtk_aes_init_tfm,
-},
-{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ecb_encrypt,
- .decrypt = mtk_aes_ecb_decrypt,
- .init = mtk_aes_init_tfm,
-},
-{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ctr_encrypt,
- .decrypt = mtk_aes_ctr_decrypt,
- .init = mtk_aes_ctr_init_tfm,
-},
-{
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "ofb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ofb_encrypt,
- .decrypt = mtk_aes_ofb_decrypt,
-},
-{
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cfb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cfb_encrypt,
- .decrypt = mtk_aes_cfb_decrypt,
-},
-};
-
-static inline struct mtk_aes_gcm_ctx *
-mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct mtk_aes_gcm_ctx, base);
-}
-
-/*
- * Engine will verify and compare tag automatically, so we just need
- * to check returned status which stored in the result descriptor.
- */
-static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes)
-{
- __le32 status = cryp->ring[aes->id]->res_prev->ct;
-
- return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
- -EBADMSG : 0);
-}
-
-/* Initialize transform information of GCM mode */
-static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes,
- size_t len)
-{
- struct aead_request *req = aead_request_cast(aes->areq);
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct mtk_aes_info *info = &ctx->info;
- u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- u32 cnt = 0;
-
- ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
-
- info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
- info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
- info->cmd[cnt++] = AES_GCM_CMD2;
- info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
-
- if (aes->flags & AES_FLAGS_ENCRYPT) {
- info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
- info->tfm[0] = AES_TFM_GCM_OUT;
- } else {
- info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
- info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
- info->tfm[0] = AES_TFM_GCM_IN;
- }
- ctx->ct_size = cnt;
-
- info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
- ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
- ctx->keymode;
- info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
- AES_TFM_ENC_HASH;
-
- memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE),
- req->iv, ivsize);
-}
-
-static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- struct scatterlist *src, struct scatterlist *dst,
- size_t len)
-{
- bool src_aligned, dst_aligned;
-
- aes->src.sg = src;
- aes->dst.sg = dst;
- aes->real_dst = dst;
-
- src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
- if (src == dst)
- dst_aligned = src_aligned;
- else
- dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
-
- if (!src_aligned || !dst_aligned) {
- if (aes->total > AES_BUF_SIZE)
- return mtk_aes_complete(cryp, aes, -ENOMEM);
-
- if (!src_aligned) {
- sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
- aes->src.sg = &aes->aligned_sg;
- aes->src.nents = 1;
- aes->src.remainder = 0;
- }
-
- if (!dst_aligned) {
- aes->dst.sg = &aes->aligned_sg;
- aes->dst.nents = 1;
- aes->dst.remainder = 0;
- }
-
- sg_init_table(&aes->aligned_sg, 1);
- sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
- }
-
- mtk_aes_gcm_info_init(cryp, aes, len);
-
- return mtk_aes_map(cryp, aes);
-}
-
-/* Todo: GMAC */
-static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
- struct aead_request *req = aead_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
- u32 len = req->assoclen + req->cryptlen;
-
- mtk_aes_set_mode(aes, rctx);
-
- if (aes->flags & AES_FLAGS_ENCRYPT) {
- u32 tag[4];
-
- aes->resume = mtk_aes_transfer_complete;
- /* Compute total process length. */
- aes->total = len + gctx->authsize;
- /* Hardware will append authenticated tag to output buffer */
- scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
- } else {
- aes->resume = mtk_aes_gcm_tag_verify;
- aes->total = len;
- }
-
- return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
-}
-
-static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
- struct mtk_cryp *cryp;
- bool enc = !!(mode & AES_FLAGS_ENCRYPT);
-
- cryp = mtk_aes_find_dev(ctx);
- if (!cryp)
- return -ENODEV;
-
- /* Compute text length. */
- gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
-
- /* Empty messages are not supported yet */
- if (!gctx->textlen && !req->assoclen)
- return -EINVAL;
-
- rctx->mode = AES_FLAGS_GCM | mode;
-
- return mtk_aes_handle_queue(cryp, enc, &req->base);
-}
-
-/*
- * Because of the hardware limitation, we need to pre-calculate key(H)
- * for the GHASH operation. The result of the encryption operation
- * need to be stored in the transform state buffer.
- */
-static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
- u32 keylen)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- union {
- u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)];
- u8 x8[AES_BLOCK_SIZE];
- } hash = {};
- struct crypto_aes_ctx aes_ctx;
- int err;
- int i;
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->keymode = AES_TFM_128BITS;
- break;
- case AES_KEYSIZE_192:
- ctx->keymode = AES_TFM_192BITS;
- break;
- case AES_KEYSIZE_256:
- ctx->keymode = AES_TFM_256BITS;
- break;
-
- default:
- return -EINVAL;
- }
-
- ctx->keylen = SIZE_IN_WORDS(keylen);
-
- err = aes_expandkey(&aes_ctx, key, keylen);
- if (err)
- return err;
-
- aes_encrypt(&aes_ctx, hash.x8, hash.x8);
- memzero_explicit(&aes_ctx, sizeof(aes_ctx));
-
- memcpy(ctx->key, key, keylen);
-
- /* Why do we need to do this? */
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- hash.x32[i] = swab32(hash.x32[i]);
-
- memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE);
-
- return 0;
-}
-
-static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
- u32 authsize)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
-
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 8:
- case 12:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- gctx->authsize = authsize;
- return 0;
-}
-
-static int mtk_aes_gcm_encrypt(struct aead_request *req)
-{
- return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
-}
-
-static int mtk_aes_gcm_decrypt(struct aead_request *req)
-{
- return mtk_aes_gcm_crypt(req, 0);
-}
-
-static int mtk_aes_gcm_init(struct crypto_aead *aead)
-{
- struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
-
- crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_gcm_start;
- return 0;
-}
-
-static struct aead_alg aes_gcm_alg = {
- .setkey = mtk_aes_gcm_setkey,
- .setauthsize = mtk_aes_gcm_setauthsize,
- .encrypt = mtk_aes_gcm_encrypt,
- .decrypt = mtk_aes_gcm_decrypt,
- .init = mtk_aes_gcm_init,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
-
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
-};
-
-static void mtk_aes_queue_task(unsigned long data)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
-
- mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
-}
-
-static void mtk_aes_done_task(unsigned long data)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
- struct mtk_cryp *cryp = aes->cryp;
-
- mtk_aes_unmap(cryp, aes);
- aes->resume(cryp, aes);
-}
-
-static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
- struct mtk_cryp *cryp = aes->cryp;
- u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
-
- mtk_aes_write(cryp, RDR_STAT(aes->id), val);
-
- if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(aes->id),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&aes->done_task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-/*
- * The purpose of creating encryption and decryption records is
- * to process outbound/inbound data in parallel, it can improve
- * performance in most use cases, such as IPSec VPN, especially
- * under heavy network traffic.
- */
-static int mtk_aes_record_init(struct mtk_cryp *cryp)
-{
- struct mtk_aes_rec **aes = cryp->aes;
- int i, err = -ENOMEM;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
- if (!aes[i])
- goto err_cleanup;
-
- aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
- AES_BUF_ORDER);
- if (!aes[i]->buf)
- goto err_cleanup;
-
- aes[i]->cryp = cryp;
-
- spin_lock_init(&aes[i]->lock);
- crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
-
- tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
- (unsigned long)aes[i]);
- tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
- (unsigned long)aes[i]);
- }
-
- /* Link to ring0 and ring1 respectively */
- aes[0]->id = MTK_RING0;
- aes[1]->id = MTK_RING1;
-
- return 0;
-
-err_cleanup:
- for (; i--; ) {
- free_page((unsigned long)aes[i]->buf);
- kfree(aes[i]);
- }
-
- return err;
-}
-
-static void mtk_aes_record_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->aes[i]->done_task);
- tasklet_kill(&cryp->aes[i]->queue_task);
-
- free_page((unsigned long)cryp->aes[i]->buf);
- kfree(cryp->aes[i]);
- }
-}
-
-static void mtk_aes_unregister_algs(void)
-{
- int i;
-
- crypto_unregister_aead(&aes_gcm_alg);
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_skcipher(&aes_algs[i]);
-}
-
-static int mtk_aes_register_algs(void)
-{
- int err, i;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_skcipher(&aes_algs[i]);
- if (err)
- goto err_aes_algs;
- }
-
- err = crypto_register_aead(&aes_gcm_alg);
- if (err)
- goto err_aes_algs;
-
- return 0;
-
-err_aes_algs:
- for (; i--; )
- crypto_unregister_skcipher(&aes_algs[i]);
-
- return err;
-}
-
-int mtk_cipher_alg_register(struct mtk_cryp *cryp)
-{
- int ret;
-
- INIT_LIST_HEAD(&cryp->aes_list);
-
- /* Initialize two cipher records */
- ret = mtk_aes_record_init(cryp);
- if (ret)
- goto err_record;
-
- ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
- 0, "mtk-aes", cryp->aes[0]);
- if (ret) {
- dev_err(cryp->dev, "unable to request AES irq.\n");
- goto err_res;
- }
-
- ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
- 0, "mtk-aes", cryp->aes[1]);
- if (ret) {
- dev_err(cryp->dev, "unable to request AES irq.\n");
- goto err_res;
- }
-
- /* Enable ring0 and ring1 interrupt */
- mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
- mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
-
- spin_lock(&mtk_aes.lock);
- list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
- spin_unlock(&mtk_aes.lock);
-
- ret = mtk_aes_register_algs();
- if (ret)
- goto err_algs;
-
- return 0;
-
-err_algs:
- spin_lock(&mtk_aes.lock);
- list_del(&cryp->aes_list);
- spin_unlock(&mtk_aes.lock);
-err_res:
- mtk_aes_record_free(cryp);
-err_record:
-
- dev_err(cryp->dev, "mtk-aes initialization failed.\n");
- return ret;
-}
-
-void mtk_cipher_alg_release(struct mtk_cryp *cryp)
-{
- spin_lock(&mtk_aes.lock);
- list_del(&cryp->aes_list);
- spin_unlock(&mtk_aes.lock);
-
- mtk_aes_unregister_algs();
- mtk_aes_record_free(cryp);
-}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
deleted file mode 100644
index 9d878620e5c9..000000000000
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for EIP97 cryptographic accelerator.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include "mtk-platform.h"
-
-#define MTK_BURST_SIZE_MSK GENMASK(7, 4)
-#define MTK_BURST_SIZE(x) ((x) << 4)
-#define MTK_DESC_SIZE(x) ((x) << 0)
-#define MTK_DESC_OFFSET(x) ((x) << 16)
-#define MTK_DESC_FETCH_SIZE(x) ((x) << 0)
-#define MTK_DESC_FETCH_THRESH(x) ((x) << 16)
-#define MTK_DESC_OVL_IRQ_EN BIT(25)
-#define MTK_DESC_ATP_PRESENT BIT(30)
-
-#define MTK_DFSE_IDLE GENMASK(3, 0)
-#define MTK_DFSE_THR_CTRL_EN BIT(30)
-#define MTK_DFSE_THR_CTRL_RESET BIT(31)
-#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0))
-#define MTK_DFSE_MIN_DATA(x) ((x) << 0)
-#define MTK_DFSE_MAX_DATA(x) ((x) << 8)
-#define MTK_DFE_MIN_CTRL(x) ((x) << 16)
-#define MTK_DFE_MAX_CTRL(x) ((x) << 24)
-
-#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8)
-#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12)
-#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0)
-#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4)
-#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0))
-#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
-#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0))
-#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
-#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0))
-
-#define MTK_PE_TK_LOC_AVL BIT(2)
-#define MTK_PE_PROC_HELD BIT(14)
-#define MTK_PE_TK_TIMEOUT_EN BIT(22)
-#define MTK_PE_INPUT_DMA_ERR BIT(0)
-#define MTK_PE_OUTPUT_DMA_ERR BIT(1)
-#define MTK_PE_PKT_PORC_ERR BIT(2)
-#define MTK_PE_PKT_TIMEOUT BIT(3)
-#define MTK_PE_FATAL_ERR BIT(14)
-#define MTK_PE_INPUT_DMA_ERR_EN BIT(16)
-#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17)
-#define MTK_PE_PKT_PORC_ERR_EN BIT(18)
-#define MTK_PE_PKT_TIMEOUT_EN BIT(19)
-#define MTK_PE_FATAL_ERR_EN BIT(30)
-#define MTK_PE_INT_OUT_EN BIT(31)
-
-#define MTK_HIA_SIGNATURE ((u16)0x35ca)
-#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0))
-#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0))
-#define MTK_CDR_STAT_CLR GENMASK(4, 0)
-#define MTK_RDR_STAT_CLR GENMASK(7, 0)
-
-#define MTK_AIC_INT_MSK GENMASK(5, 0)
-#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20))
-#define MTK_AIC_VER11 0x011036c9
-#define MTK_AIC_VER12 0x012036c9
-#define MTK_AIC_G_CLR GENMASK(30, 20)
-
-/**
- * EIP97 is an integrated security subsystem to accelerate cryptographic
- * functions and protocols to offload the host processor.
- * Some important hardware modules are briefly introduced below:
- *
- * Host Interface Adapter(HIA) - the main interface between the host
- * system and the hardware subsystem. It is responsible for attaching
- * processing engine to the specific host bus interface and provides a
- * standardized software view for off loading tasks to the engine.
- *
- * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
- * CD the host has prepared in the CDR. It monitors the fill level of its
- * CD-FIFO and if there's sufficient space for the next block of descriptors,
- * then it fires off a DMA request to fetch a block of CDs.
- *
- * Data fetch engine(DFE) - It is responsible for parsing the CD and
- * setting up the required control and packet data DMA transfers from
- * system memory to the processing engine.
- *
- * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
- * but target is result descriptors, Moreover, it also handles the RD
- * updates under control of the DSE. For each packet data segment
- * processed, the DSE triggers the RDR Manager to write the updated RD.
- * If triggered to update, the RDR Manager sets up a DMA operation to
- * copy the RD from the DSE to the correct location in the RDR.
- *
- * Data Store Engine(DSE) - It is responsible for parsing the prepared RD
- * and setting up the required control and packet data DMA transfers from
- * the processing engine to system memory.
- *
- * Advanced Interrupt Controllers(AICs) - receive interrupt request signals
- * from various sources and combine them into one interrupt output.
- * The AICs are used by:
- * - One for the HIA global and processing engine interrupts.
- * - The others for the descriptor ring interrupts.
- */
-
-/* Cryptographic engine capabilities */
-struct mtk_sys_cap {
- /* host interface adapter */
- u32 hia_ver;
- u32 hia_opt;
- /* packet engine */
- u32 pkt_eng_opt;
- /* global hardware */
- u32 hw_opt;
-};
-
-static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
-{
- /* Assign rings to DFE/DSE thread and enable it */
- writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
- writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
-}
-
-static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
- struct mtk_sys_cap *cap)
-{
- u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
- u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
- u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
- u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
- u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
-
- writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
- MTK_DFSE_MAX_DATA(ipbuf) |
- MTK_DFE_MIN_CTRL(itbuf - 1) |
- MTK_DFE_MAX_CTRL(itbuf),
- cryp->base + DFE_CFG);
-
- writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
- MTK_DFSE_MAX_DATA(opbuf),
- cryp->base + DSE_CFG);
-
- writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
- MTK_IN_BUF_MAX_THRESH(ipbuf),
- cryp->base + PE_IN_DBUF_THRESH);
-
- writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
- MTK_IN_BUF_MAX_THRESH(itbuf),
- cryp->base + PE_IN_TBUF_THRESH);
-
- writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
- MTK_OUT_BUF_MAX_THRESH(opbuf),
- cryp->base + PE_OUT_DBUF_THRESH);
-
- writel(0, cryp->base + PE_OUT_TBUF_THRESH);
- writel(0, cryp->base + PE_OUT_BUF_CTRL);
-}
-
-static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
-{
- int ret = -EINVAL;
- u32 val;
-
- /* Check for completion of all DMA transfers */
- val = readl(cryp->base + DFE_THR_STAT);
- if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
- val = readl(cryp->base + DSE_THR_STAT);
- if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
- ret = 0;
- }
-
- if (!ret) {
- /* Take DFE/DSE thread out of reset */
- writel(0, cryp->base + DFE_THR_CTRL);
- writel(0, cryp->base + DSE_THR_CTRL);
- } else {
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
-{
- /* Reset DSE/DFE and correct system priorities for all rings. */
- writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
- writel(0, cryp->base + DFE_PRIO_0);
- writel(0, cryp->base + DFE_PRIO_1);
- writel(0, cryp->base + DFE_PRIO_2);
- writel(0, cryp->base + DFE_PRIO_3);
-
- writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
- writel(0, cryp->base + DSE_PRIO_0);
- writel(0, cryp->base + DSE_PRIO_1);
- writel(0, cryp->base + DSE_PRIO_2);
- writel(0, cryp->base + DSE_PRIO_3);
-
- return mtk_dfe_dse_state_check(cryp);
-}
-
-static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
- int i, struct mtk_sys_cap *cap)
-{
- /* Full descriptor that fits FIFO minus one */
- u32 count =
- ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
-
- /* Temporarily disable external triggering */
- writel(0, cryp->base + CDR_CFG(i));
-
- /* Clear CDR count */
- writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
- writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
-
- writel(0, cryp->base + CDR_PREP_PNTR(i));
- writel(0, cryp->base + CDR_PROC_PNTR(i));
- writel(0, cryp->base + CDR_DMA_CFG(i));
-
- /* Configure CDR host address space */
- writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
- writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
-
- writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
-
- /* Clear and disable all CDR interrupts */
- writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
-
- /*
- * Set command descriptor offset and enable additional
- * token present in descriptor.
- */
- writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
- MTK_DESC_OFFSET(MTK_DESC_OFF) |
- MTK_DESC_ATP_PRESENT,
- cryp->base + CDR_DESC_SIZE(i));
-
- writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
- MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
- cryp->base + CDR_CFG(i));
-}
-
-static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
- int i, struct mtk_sys_cap *cap)
-{
- u32 rndup = 2;
- u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
-
- /* Temporarily disable external triggering */
- writel(0, cryp->base + RDR_CFG(i));
-
- /* Clear RDR count */
- writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
- writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
-
- writel(0, cryp->base + RDR_PREP_PNTR(i));
- writel(0, cryp->base + RDR_PROC_PNTR(i));
- writel(0, cryp->base + RDR_DMA_CFG(i));
-
- /* Configure RDR host address space */
- writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
- writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
-
- writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
- writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
-
- /*
- * RDR manager generates update interrupts on a per-completed-packet,
- * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
- * for the RDR exceeds the number of packets.
- */
- writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
- cryp->base + RDR_THRESH(i));
-
- /*
- * Configure a threshold and time-out value for the processed
- * result descriptors (or complete packets) that are written to
- * the RDR.
- */
- writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
- cryp->base + RDR_DESC_SIZE(i));
-
- /*
- * Configure HIA fetch size and fetch threshold that are used to
- * fetch blocks of multiple descriptors.
- */
- writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
- MTK_DESC_FETCH_THRESH(count * rndup) |
- MTK_DESC_OVL_IRQ_EN,
- cryp->base + RDR_CFG(i));
-}
-
-static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
-{
- struct mtk_sys_cap cap;
- int i, err;
- u32 val;
-
- cap.hia_ver = readl(cryp->base + HIA_VERSION);
- cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
- cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
-
- if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
- return -EINVAL;
-
- /* Configure endianness conversion method for master (DMA) interface */
- writel(0, cryp->base + EIP97_MST_CTRL);
-
- /* Set HIA burst size */
- val = readl(cryp->base + HIA_MST_CTRL);
- val &= ~MTK_BURST_SIZE_MSK;
- val |= MTK_BURST_SIZE(5);
- writel(val, cryp->base + HIA_MST_CTRL);
-
- err = mtk_dfe_dse_reset(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
- return err;
- }
-
- mtk_dfe_dse_buf_setup(cryp, &cap);
-
- /* Enable the 4 rings for the packet engines. */
- mtk_desc_ring_link(cryp, 0xf);
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- mtk_cmd_desc_ring_setup(cryp, i, &cap);
- mtk_res_desc_ring_setup(cryp, i, &cap);
- }
-
- writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
- cryp->base + PE_TOKEN_CTRL_STAT);
-
- /* Clear all pending interrupts */
- writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
- writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
- MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
- MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
- MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
- MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
- MTK_PE_INT_OUT_EN,
- cryp->base + PE_INTERRUPT_CTRL_STAT);
-
- return 0;
-}
-
-static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
-{
- u32 val;
-
- if (hw == MTK_RING_MAX)
- val = readl(cryp->base + AIC_G_VERSION);
- else
- val = readl(cryp->base + AIC_VERSION(hw));
-
- val &= MTK_AIC_VER_MSK;
- if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
- return -ENXIO;
-
- if (hw == MTK_RING_MAX)
- val = readl(cryp->base + AIC_G_OPTIONS);
- else
- val = readl(cryp->base + AIC_OPTIONS(hw));
-
- val &= MTK_AIC_INT_MSK;
- if (!val || val > 32)
- return -ENXIO;
-
- return 0;
-}
-
-static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
-{
- int err;
-
- err = mtk_aic_cap_check(cryp, hw);
- if (err)
- return err;
-
- /* Disable all interrupts and set initial configuration */
- if (hw == MTK_RING_MAX) {
- writel(0, cryp->base + AIC_G_ENABLE_CTRL);
- writel(0, cryp->base + AIC_G_POL_CTRL);
- writel(0, cryp->base + AIC_G_TYPE_CTRL);
- writel(0, cryp->base + AIC_G_ENABLE_SET);
- } else {
- writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
- writel(0, cryp->base + AIC_POL_CTRL(hw));
- writel(0, cryp->base + AIC_TYPE_CTRL(hw));
- writel(0, cryp->base + AIC_ENABLE_SET(hw));
- }
-
- return 0;
-}
-
-static int mtk_accelerator_init(struct mtk_cryp *cryp)
-{
- int i, err;
-
- /* Initialize advanced interrupt controller(AIC) */
- for (i = 0; i < MTK_IRQ_NUM; i++) {
- err = mtk_aic_init(cryp, i);
- if (err) {
- dev_err(cryp->dev, "Failed to initialize AIC.\n");
- return err;
- }
- }
-
- /* Initialize packet engine */
- err = mtk_packet_engine_setup(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to configure packet engine.\n");
- return err;
- }
-
- return 0;
-}
-
-static void mtk_desc_dma_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- cryp->ring[i]->res_base,
- cryp->ring[i]->res_dma);
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- cryp->ring[i]->cmd_base,
- cryp->ring[i]->cmd_dma);
- kfree(cryp->ring[i]);
- }
-}
-
-static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
-{
- struct mtk_ring **ring = cryp->ring;
- int i;
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
- if (!ring[i])
- goto err_cleanup;
-
- ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->cmd_dma,
- GFP_KERNEL);
- if (!ring[i]->cmd_base)
- goto err_cleanup;
-
- ring[i]->res_base = dma_alloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->res_dma,
- GFP_KERNEL);
- if (!ring[i]->res_base)
- goto err_cleanup;
-
- ring[i]->cmd_next = ring[i]->cmd_base;
- ring[i]->res_next = ring[i]->res_base;
- }
- return 0;
-
-err_cleanup:
- do {
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- ring[i]->res_base, ring[i]->res_dma);
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- ring[i]->cmd_base, ring[i]->cmd_dma);
- kfree(ring[i]);
- } while (i--);
- return -ENOMEM;
-}
-
-static int mtk_crypto_probe(struct platform_device *pdev)
-{
- struct mtk_cryp *cryp;
- int i, err;
-
- cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
- if (!cryp)
- return -ENOMEM;
-
- cryp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(cryp->base))
- return PTR_ERR(cryp->base);
-
- for (i = 0; i < MTK_IRQ_NUM; i++) {
- cryp->irq[i] = platform_get_irq(pdev, i);
- if (cryp->irq[i] < 0)
- return cryp->irq[i];
- }
-
- cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
- if (IS_ERR(cryp->clk_cryp))
- return -EPROBE_DEFER;
-
- cryp->dev = &pdev->dev;
- pm_runtime_enable(cryp->dev);
- pm_runtime_get_sync(cryp->dev);
-
- err = clk_prepare_enable(cryp->clk_cryp);
- if (err)
- goto err_clk_cryp;
-
- /* Allocate four command/result descriptor rings */
- err = mtk_desc_ring_alloc(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
- goto err_resource;
- }
-
- /* Initialize hardware modules */
- err = mtk_accelerator_init(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
- goto err_engine;
- }
-
- err = mtk_cipher_alg_register(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
- goto err_cipher;
- }
-
- err = mtk_hash_alg_register(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to register hash algorithm.\n");
- goto err_hash;
- }
-
- platform_set_drvdata(pdev, cryp);
- return 0;
-
-err_hash:
- mtk_cipher_alg_release(cryp);
-err_cipher:
- mtk_dfe_dse_reset(cryp);
-err_engine:
- mtk_desc_dma_free(cryp);
-err_resource:
- clk_disable_unprepare(cryp->clk_cryp);
-err_clk_cryp:
- pm_runtime_put_sync(cryp->dev);
- pm_runtime_disable(cryp->dev);
-
- return err;
-}
-
-static int mtk_crypto_remove(struct platform_device *pdev)
-{
- struct mtk_cryp *cryp = platform_get_drvdata(pdev);
-
- mtk_hash_alg_release(cryp);
- mtk_cipher_alg_release(cryp);
- mtk_desc_dma_free(cryp);
-
- clk_disable_unprepare(cryp->clk_cryp);
-
- pm_runtime_put_sync(cryp->dev);
- pm_runtime_disable(cryp->dev);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static const struct of_device_id of_crypto_id[] = {
- { .compatible = "mediatek,eip97-crypto" },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_crypto_id);
-
-static struct platform_driver mtk_crypto_driver = {
- .probe = mtk_crypto_probe,
- .remove = mtk_crypto_remove,
- .driver = {
- .name = "mtk-crypto",
- .of_match_table = of_crypto_id,
- },
-};
-module_platform_driver(mtk_crypto_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
-MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
deleted file mode 100644
index 47920c51abac..000000000000
--- a/drivers/crypto/mediatek/mtk-platform.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for EIP97 cryptographic accelerator.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#ifndef __MTK_PLATFORM_H_
-#define __MTK_PLATFORM_H_
-
-#include <crypto/algapi.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
-#include <linux/crypto.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include "mtk-regs.h"
-
-#define MTK_RDR_PROC_THRESH BIT(0)
-#define MTK_RDR_PROC_MODE BIT(23)
-#define MTK_CNT_RST BIT(31)
-#define MTK_IRQ_RDR0 BIT(1)
-#define MTK_IRQ_RDR1 BIT(3)
-#define MTK_IRQ_RDR2 BIT(5)
-#define MTK_IRQ_RDR3 BIT(7)
-
-#define SIZE_IN_WORDS(x) ((x) >> 2)
-
-/**
- * Ring 0/1 are used by AES encrypt and decrypt.
- * Ring 2/3 are used by SHA.
- */
-enum {
- MTK_RING0,
- MTK_RING1,
- MTK_RING2,
- MTK_RING3,
- MTK_RING_MAX
-};
-
-#define MTK_REC_NUM (MTK_RING_MAX / 2)
-#define MTK_IRQ_NUM 5
-
-/**
- * struct mtk_desc - DMA descriptor
- * @hdr: the descriptor control header
- * @buf: DMA address of input buffer segment
- * @ct: DMA address of command token that control operation flow
- * @ct_hdr: the command token control header
- * @tag: the user-defined field
- * @tfm: DMA address of transform state
- * @bound: align descriptors offset boundary
- *
- * Structure passed to the crypto engine to describe where source
- * data needs to be fetched and how it needs to be processed.
- */
-struct mtk_desc {
- __le32 hdr;
- __le32 buf;
- __le32 ct;
- __le32 ct_hdr;
- __le32 tag;
- __le32 tfm;
- __le32 bound[2];
-};
-
-#define MTK_DESC_NUM 512
-#define MTK_DESC_OFF SIZE_IN_WORDS(sizeof(struct mtk_desc))
-#define MTK_DESC_SZ (MTK_DESC_OFF - 2)
-#define MTK_DESC_RING_SZ ((sizeof(struct mtk_desc) * MTK_DESC_NUM))
-#define MTK_DESC_CNT(x) ((MTK_DESC_OFF * (x)) << 2)
-#define MTK_DESC_LAST cpu_to_le32(BIT(22))
-#define MTK_DESC_FIRST cpu_to_le32(BIT(23))
-#define MTK_DESC_BUF_LEN(x) cpu_to_le32(x)
-#define MTK_DESC_CT_LEN(x) cpu_to_le32((x) << 24)
-
-/**
- * struct mtk_ring - Descriptor ring
- * @cmd_base: pointer to command descriptor ring base
- * @cmd_next: pointer to the next command descriptor
- * @cmd_dma: DMA address of command descriptor ring
- * @res_base: pointer to result descriptor ring base
- * @res_next: pointer to the next result descriptor
- * @res_prev: pointer to the previous result descriptor
- * @res_dma: DMA address of result descriptor ring
- *
- * A descriptor ring is a circular buffer that is used to manage
- * one or more descriptors. There are two type of descriptor rings;
- * the command descriptor ring and result descriptor ring.
- */
-struct mtk_ring {
- struct mtk_desc *cmd_base;
- struct mtk_desc *cmd_next;
- dma_addr_t cmd_dma;
- struct mtk_desc *res_base;
- struct mtk_desc *res_next;
- struct mtk_desc *res_prev;
- dma_addr_t res_dma;
-};
-
-/**
- * struct mtk_aes_dma - Structure that holds sg list info
- * @sg: pointer to scatter-gather list
- * @nents: number of entries in the sg list
- * @remainder: remainder of sg list
- * @sg_len: number of entries in the sg mapped list
- */
-struct mtk_aes_dma {
- struct scatterlist *sg;
- int nents;
- u32 remainder;
- u32 sg_len;
-};
-
-struct mtk_aes_base_ctx;
-struct mtk_aes_rec;
-struct mtk_cryp;
-
-typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
-
-/**
- * struct mtk_aes_rec - AES operation record
- * @cryp: pointer to Cryptographic device
- * @queue: crypto request queue
- * @areq: pointer to async request
- * @done_task: the tasklet is use in AES interrupt
- * @queue_task: the tasklet is used to dequeue request
- * @ctx: pointer to current context
- * @src: the structure that holds source sg list info
- * @dst: the structure that holds destination sg list info
- * @aligned_sg: the scatter list is use to alignment
- * @real_dst: pointer to the destination sg list
- * @resume: pointer to resume function
- * @total: request buffer length
- * @buf: pointer to page buffer
- * @id: the current use of ring
- * @flags: it's describing AES operation state
- * @lock: the async queue lock
- *
- * Structure used to record AES execution state.
- */
-struct mtk_aes_rec {
- struct mtk_cryp *cryp;
- struct crypto_queue queue;
- struct crypto_async_request *areq;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
- struct mtk_aes_base_ctx *ctx;
- struct mtk_aes_dma src;
- struct mtk_aes_dma dst;
-
- struct scatterlist aligned_sg;
- struct scatterlist *real_dst;
-
- mtk_aes_fn resume;
-
- size_t total;
- void *buf;
-
- u8 id;
- unsigned long flags;
- /* queue lock */
- spinlock_t lock;
-};
-
-/**
- * struct mtk_sha_rec - SHA operation record
- * @cryp: pointer to Cryptographic device
- * @queue: crypto request queue
- * @req: pointer to ahash request
- * @done_task: the tasklet is use in SHA interrupt
- * @queue_task: the tasklet is used to dequeue request
- * @id: the current use of ring
- * @flags: it's describing SHA operation state
- * @lock: the async queue lock
- *
- * Structure used to record SHA execution state.
- */
-struct mtk_sha_rec {
- struct mtk_cryp *cryp;
- struct crypto_queue queue;
- struct ahash_request *req;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
-
- u8 id;
- unsigned long flags;
- /* queue lock */
- spinlock_t lock;
-};
-
-/**
- * struct mtk_cryp - Cryptographic device
- * @base: pointer to mapped register I/O base
- * @dev: pointer to device
- * @clk_cryp: pointer to crypto clock
- * @irq: global system and rings IRQ
- * @ring: pointer to descriptor rings
- * @aes: pointer to operation record of AES
- * @sha: pointer to operation record of SHA
- * @aes_list: device list of AES
- * @sha_list: device list of SHA
- * @rec: it's used to select SHA record for tfm
- *
- * Structure storing cryptographic device information.
- */
-struct mtk_cryp {
- void __iomem *base;
- struct device *dev;
- struct clk *clk_cryp;
- int irq[MTK_IRQ_NUM];
-
- struct mtk_ring *ring[MTK_RING_MAX];
- struct mtk_aes_rec *aes[MTK_REC_NUM];
- struct mtk_sha_rec *sha[MTK_REC_NUM];
-
- struct list_head aes_list;
- struct list_head sha_list;
-
- bool rec;
-};
-
-int mtk_cipher_alg_register(struct mtk_cryp *cryp);
-void mtk_cipher_alg_release(struct mtk_cryp *cryp);
-int mtk_hash_alg_register(struct mtk_cryp *cryp);
-void mtk_hash_alg_release(struct mtk_cryp *cryp);
-
-#endif /* __MTK_PLATFORM_H_ */
diff --git a/drivers/crypto/mediatek/mtk-regs.h b/drivers/crypto/mediatek/mtk-regs.h
deleted file mode 100644
index d3defda7a750..000000000000
--- a/drivers/crypto/mediatek/mtk-regs.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Support for MediaTek cryptographic accelerator.
- *
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#ifndef __MTK_REGS_H__
-#define __MTK_REGS_H__
-
-/* HIA, Command Descriptor Ring Manager */
-#define CDR_BASE_ADDR_LO(x) (0x0 + ((x) << 12))
-#define CDR_BASE_ADDR_HI(x) (0x4 + ((x) << 12))
-#define CDR_DATA_BASE_ADDR_LO(x) (0x8 + ((x) << 12))
-#define CDR_DATA_BASE_ADDR_HI(x) (0xC + ((x) << 12))
-#define CDR_ACD_BASE_ADDR_LO(x) (0x10 + ((x) << 12))
-#define CDR_ACD_BASE_ADDR_HI(x) (0x14 + ((x) << 12))
-#define CDR_RING_SIZE(x) (0x18 + ((x) << 12))
-#define CDR_DESC_SIZE(x) (0x1C + ((x) << 12))
-#define CDR_CFG(x) (0x20 + ((x) << 12))
-#define CDR_DMA_CFG(x) (0x24 + ((x) << 12))
-#define CDR_THRESH(x) (0x28 + ((x) << 12))
-#define CDR_PREP_COUNT(x) (0x2C + ((x) << 12))
-#define CDR_PROC_COUNT(x) (0x30 + ((x) << 12))
-#define CDR_PREP_PNTR(x) (0x34 + ((x) << 12))
-#define CDR_PROC_PNTR(x) (0x38 + ((x) << 12))
-#define CDR_STAT(x) (0x3C + ((x) << 12))
-
-/* HIA, Result Descriptor Ring Manager */
-#define RDR_BASE_ADDR_LO(x) (0x800 + ((x) << 12))
-#define RDR_BASE_ADDR_HI(x) (0x804 + ((x) << 12))
-#define RDR_DATA_BASE_ADDR_LO(x) (0x808 + ((x) << 12))
-#define RDR_DATA_BASE_ADDR_HI(x) (0x80C + ((x) << 12))
-#define RDR_ACD_BASE_ADDR_LO(x) (0x810 + ((x) << 12))
-#define RDR_ACD_BASE_ADDR_HI(x) (0x814 + ((x) << 12))
-#define RDR_RING_SIZE(x) (0x818 + ((x) << 12))
-#define RDR_DESC_SIZE(x) (0x81C + ((x) << 12))
-#define RDR_CFG(x) (0x820 + ((x) << 12))
-#define RDR_DMA_CFG(x) (0x824 + ((x) << 12))
-#define RDR_THRESH(x) (0x828 + ((x) << 12))
-#define RDR_PREP_COUNT(x) (0x82C + ((x) << 12))
-#define RDR_PROC_COUNT(x) (0x830 + ((x) << 12))
-#define RDR_PREP_PNTR(x) (0x834 + ((x) << 12))
-#define RDR_PROC_PNTR(x) (0x838 + ((x) << 12))
-#define RDR_STAT(x) (0x83C + ((x) << 12))
-
-/* HIA, Ring AIC */
-#define AIC_POL_CTRL(x) (0xE000 - ((x) << 12))
-#define AIC_TYPE_CTRL(x) (0xE004 - ((x) << 12))
-#define AIC_ENABLE_CTRL(x) (0xE008 - ((x) << 12))
-#define AIC_RAW_STAL(x) (0xE00C - ((x) << 12))
-#define AIC_ENABLE_SET(x) (0xE00C - ((x) << 12))
-#define AIC_ENABLED_STAT(x) (0xE010 - ((x) << 12))
-#define AIC_ACK(x) (0xE010 - ((x) << 12))
-#define AIC_ENABLE_CLR(x) (0xE014 - ((x) << 12))
-#define AIC_OPTIONS(x) (0xE018 - ((x) << 12))
-#define AIC_VERSION(x) (0xE01C - ((x) << 12))
-
-/* HIA, Global AIC */
-#define AIC_G_POL_CTRL 0xF800
-#define AIC_G_TYPE_CTRL 0xF804
-#define AIC_G_ENABLE_CTRL 0xF808
-#define AIC_G_RAW_STAT 0xF80C
-#define AIC_G_ENABLE_SET 0xF80C
-#define AIC_G_ENABLED_STAT 0xF810
-#define AIC_G_ACK 0xF810
-#define AIC_G_ENABLE_CLR 0xF814
-#define AIC_G_OPTIONS 0xF818
-#define AIC_G_VERSION 0xF81C
-
-/* HIA, Data Fetch Engine */
-#define DFE_CFG 0xF000
-#define DFE_PRIO_0 0xF010
-#define DFE_PRIO_1 0xF014
-#define DFE_PRIO_2 0xF018
-#define DFE_PRIO_3 0xF01C
-
-/* HIA, Data Fetch Engine access monitoring for CDR */
-#define DFE_RING_REGION_LO(x) (0xF080 + ((x) << 3))
-#define DFE_RING_REGION_HI(x) (0xF084 + ((x) << 3))
-
-/* HIA, Data Fetch Engine thread control and status for thread */
-#define DFE_THR_CTRL 0xF200
-#define DFE_THR_STAT 0xF204
-#define DFE_THR_DESC_CTRL 0xF208
-#define DFE_THR_DESC_DPTR_LO 0xF210
-#define DFE_THR_DESC_DPTR_HI 0xF214
-#define DFE_THR_DESC_ACDPTR_LO 0xF218
-#define DFE_THR_DESC_ACDPTR_HI 0xF21C
-
-/* HIA, Data Store Engine */
-#define DSE_CFG 0xF400
-#define DSE_PRIO_0 0xF410
-#define DSE_PRIO_1 0xF414
-#define DSE_PRIO_2 0xF418
-#define DSE_PRIO_3 0xF41C
-
-/* HIA, Data Store Engine access monitoring for RDR */
-#define DSE_RING_REGION_LO(x) (0xF480 + ((x) << 3))
-#define DSE_RING_REGION_HI(x) (0xF484 + ((x) << 3))
-
-/* HIA, Data Store Engine thread control and status for thread */
-#define DSE_THR_CTRL 0xF600
-#define DSE_THR_STAT 0xF604
-#define DSE_THR_DESC_CTRL 0xF608
-#define DSE_THR_DESC_DPTR_LO 0xF610
-#define DSE_THR_DESC_DPTR_HI 0xF614
-#define DSE_THR_DESC_S_DPTR_LO 0xF618
-#define DSE_THR_DESC_S_DPTR_HI 0xF61C
-#define DSE_THR_ERROR_STAT 0xF620
-
-/* HIA Global */
-#define HIA_MST_CTRL 0xFFF4
-#define HIA_OPTIONS 0xFFF8
-#define HIA_VERSION 0xFFFC
-
-/* Processing Engine Input Side, Processing Engine */
-#define PE_IN_DBUF_THRESH 0x10000
-#define PE_IN_TBUF_THRESH 0x10100
-
-/* Packet Engine Configuration / Status Registers */
-#define PE_TOKEN_CTRL_STAT 0x11000
-#define PE_FUNCTION_EN 0x11004
-#define PE_CONTEXT_CTRL 0x11008
-#define PE_INTERRUPT_CTRL_STAT 0x11010
-#define PE_CONTEXT_STAT 0x1100C
-#define PE_OUT_TRANS_CTRL_STAT 0x11018
-#define PE_OUT_BUF_CTRL 0x1101C
-
-/* Packet Engine PRNG Registers */
-#define PE_PRNG_STAT 0x11040
-#define PE_PRNG_CTRL 0x11044
-#define PE_PRNG_SEED_L 0x11048
-#define PE_PRNG_SEED_H 0x1104C
-#define PE_PRNG_KEY_0_L 0x11050
-#define PE_PRNG_KEY_0_H 0x11054
-#define PE_PRNG_KEY_1_L 0x11058
-#define PE_PRNG_KEY_1_H 0x1105C
-#define PE_PRNG_RES_0 0x11060
-#define PE_PRNG_RES_1 0x11064
-#define PE_PRNG_RES_2 0x11068
-#define PE_PRNG_RES_3 0x1106C
-#define PE_PRNG_LFSR_L 0x11070
-#define PE_PRNG_LFSR_H 0x11074
-
-/* Packet Engine AIC */
-#define PE_EIP96_AIC_POL_CTRL 0x113C0
-#define PE_EIP96_AIC_TYPE_CTRL 0x113C4
-#define PE_EIP96_AIC_ENABLE_CTRL 0x113C8
-#define PE_EIP96_AIC_RAW_STAT 0x113CC
-#define PE_EIP96_AIC_ENABLE_SET 0x113CC
-#define PE_EIP96_AIC_ENABLED_STAT 0x113D0
-#define PE_EIP96_AIC_ACK 0x113D0
-#define PE_EIP96_AIC_ENABLE_CLR 0x113D4
-#define PE_EIP96_AIC_OPTIONS 0x113D8
-#define PE_EIP96_AIC_VERSION 0x113DC
-
-/* Packet Engine Options & Version Registers */
-#define PE_EIP96_OPTIONS 0x113F8
-#define PE_EIP96_VERSION 0x113FC
-
-/* Processing Engine Output Side */
-#define PE_OUT_DBUF_THRESH 0x11C00
-#define PE_OUT_TBUF_THRESH 0x11D00
-
-/* Processing Engine Local AIC */
-#define PE_AIC_POL_CTRL 0x11F00
-#define PE_AIC_TYPE_CTRL 0x11F04
-#define PE_AIC_ENABLE_CTRL 0x11F08
-#define PE_AIC_RAW_STAT 0x11F0C
-#define PE_AIC_ENABLE_SET 0x11F0C
-#define PE_AIC_ENABLED_STAT 0x11F10
-#define PE_AIC_ENABLE_CLR 0x11F14
-#define PE_AIC_OPTIONS 0x11F18
-#define PE_AIC_VERSION 0x11F1C
-
-/* Processing Engine General Configuration and Version */
-#define PE_IN_FLIGHT 0x11FF0
-#define PE_OPTIONS 0x11FF8
-#define PE_VERSION 0x11FFC
-
-/* EIP-97 - Global */
-#define EIP97_CLOCK_STATE 0x1FFE4
-#define EIP97_FORCE_CLOCK_ON 0x1FFE8
-#define EIP97_FORCE_CLOCK_OFF 0x1FFEC
-#define EIP97_MST_CTRL 0x1FFF4
-#define EIP97_OPTIONS 0x1FFF8
-#define EIP97_VERSION 0x1FFFC
-#endif /* __MTK_REGS_H__ */
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
deleted file mode 100644
index f55aacdafbef..000000000000
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ /dev/null
@@ -1,1353 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- *
- * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- *
- * Some ideas are from atmel-sha.c and omap-sham.c drivers.
- */
-
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include "mtk-platform.h"
-
-#define SHA_ALIGN_MSK (sizeof(u32) - 1)
-#define SHA_QUEUE_SIZE 512
-#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
-
-#define SHA_OP_UPDATE 1
-#define SHA_OP_FINAL 2
-
-#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
-#define SHA_MAX_DIGEST_BUF_SIZE 32
-
-/* SHA command token */
-#define SHA_CT_SIZE 5
-#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
-#define SHA_CMD0 cpu_to_le32(0x03020000)
-#define SHA_CMD1 cpu_to_le32(0x21060000)
-#define SHA_CMD2 cpu_to_le32(0xe0e63802)
-
-/* SHA transform information */
-#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
-#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
-#define SHA_TFM_START cpu_to_le32(0x1 << 4)
-#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
-#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
-#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
-#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
-#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
-#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
-#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
-#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
-
-/* SHA flags */
-#define SHA_FLAGS_BUSY BIT(0)
-#define SHA_FLAGS_FINAL BIT(1)
-#define SHA_FLAGS_FINUP BIT(2)
-#define SHA_FLAGS_SG BIT(3)
-#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
-#define SHA_FLAGS_SHA1 BIT(4)
-#define SHA_FLAGS_SHA224 BIT(5)
-#define SHA_FLAGS_SHA256 BIT(6)
-#define SHA_FLAGS_SHA384 BIT(7)
-#define SHA_FLAGS_SHA512 BIT(8)
-#define SHA_FLAGS_HMAC BIT(9)
-#define SHA_FLAGS_PAD BIT(10)
-
-/**
- * mtk_sha_info - hardware information of AES
- * @cmd: command token, hardware instruction
- * @tfm: transform state of cipher algorithm.
- * @state: contains keys and initial vectors.
- *
- */
-struct mtk_sha_info {
- __le32 ctrl[2];
- __le32 cmd[3];
- __le32 tfm[2];
- __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
-};
-
-struct mtk_sha_reqctx {
- struct mtk_sha_info info;
- unsigned long flags;
- unsigned long op;
-
- u64 digcnt;
- size_t bufcnt;
- dma_addr_t dma_addr;
-
- __le32 ct_hdr;
- u32 ct_size;
- dma_addr_t ct_dma;
- dma_addr_t tfm_dma;
-
- /* Walk state */
- struct scatterlist *sg;
- u32 offset; /* Offset in current sg */
- u32 total; /* Total request */
- size_t ds;
- size_t bs;
-
- u8 *buffer;
-};
-
-struct mtk_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
- u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
-};
-
-struct mtk_sha_ctx {
- struct mtk_cryp *cryp;
- unsigned long flags;
- u8 id;
- u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
-
- struct mtk_sha_hmac_ctx base[];
-};
-
-struct mtk_sha_drv {
- struct list_head dev_list;
- /* Device list lock */
- spinlock_t lock;
-};
-
-static struct mtk_sha_drv mtk_sha = {
- .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
-};
-
-static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct ahash_request *req);
-
-static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
-{
- return readl_relaxed(cryp->base + offset);
-}
-
-static inline void mtk_sha_write(struct mtk_cryp *cryp,
- u32 offset, u32 value)
-{
- writel_relaxed(value, cryp->base + offset);
-}
-
-static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
- struct mtk_desc **cmd_curr,
- struct mtk_desc **res_curr,
- int *count)
-{
- *cmd_curr = ring->cmd_next++;
- *res_curr = ring->res_next++;
- (*count)++;
-
- if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
- ring->cmd_next = ring->cmd_base;
- ring->res_next = ring->res_base;
- }
-}
-
-static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
-{
- struct mtk_cryp *cryp = NULL;
- struct mtk_cryp *tmp;
-
- spin_lock_bh(&mtk_sha.lock);
- if (!tctx->cryp) {
- list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
- cryp = tmp;
- break;
- }
- tctx->cryp = cryp;
- } else {
- cryp = tctx->cryp;
- }
-
- /*
- * Assign record id to tfm in round-robin fashion, and this
- * will help tfm to bind to corresponding descriptor rings.
- */
- tctx->id = cryp->rec;
- cryp->rec = !cryp->rec;
-
- spin_unlock_bh(&mtk_sha.lock);
-
- return cryp;
-}
-
-static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
-{
- size_t count;
-
- while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
- count = min(ctx->sg->length - ctx->offset, ctx->total);
- count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
-
- if (count <= 0) {
- /*
- * Check if count <= 0 because the buffer is full or
- * because the sg length is 0. In the latest case,
- * check if there is another sg in the list, a 0 length
- * sg doesn't necessarily mean the end of the sg list.
- */
- if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
- ctx->sg = sg_next(ctx->sg);
- continue;
- } else {
- break;
- }
- }
-
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
- ctx->offset, count, 0);
-
- ctx->bufcnt += count;
- ctx->offset += count;
- ctx->total -= count;
-
- if (ctx->offset == ctx->sg->length) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- else
- ctx->total = 0;
- }
- }
-
- return 0;
-}
-
-/*
- * The purpose of this padding is to ensure that the padded message is a
- * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
- * The bit "1" is appended at the end of the message followed by
- * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
- * 128 bits block (SHA384/SHA512) equals to the message length in bits
- * is appended.
- *
- * For SHA1/SHA224/SHA256, padlen is calculated as followed:
- * - if message length < 56 bytes then padlen = 56 - message length
- * - else padlen = 64 + 56 - message length
- *
- * For SHA384/SHA512, padlen is calculated as followed:
- * - if message length < 112 bytes then padlen = 112 - message length
- * - else padlen = 128 + 112 - message length
- */
-static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
-{
- u32 index, padlen;
- __be64 bits[2];
- u64 size = ctx->digcnt;
-
- size += ctx->bufcnt;
- size += len;
-
- bits[1] = cpu_to_be64(size << 3);
- bits[0] = cpu_to_be64(size >> 61);
-
- switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
- case SHA_FLAGS_SHA384:
- case SHA_FLAGS_SHA512:
- index = ctx->bufcnt & 0x7f;
- padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
- ctx->bufcnt += padlen + 16;
- ctx->flags |= SHA_FLAGS_PAD;
- break;
-
- default:
- index = ctx->bufcnt & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
- ctx->bufcnt += padlen + 8;
- ctx->flags |= SHA_FLAGS_PAD;
- break;
- }
-}
-
-/* Initialize basic transform information of SHA */
-static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
-{
- struct mtk_sha_info *info = &ctx->info;
-
- ctx->ct_hdr = SHA_CT_CTRL_HDR;
- ctx->ct_size = SHA_CT_SIZE;
-
- info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
-
- switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
- case SHA_FLAGS_SHA1:
- info->tfm[0] |= SHA_TFM_SHA1;
- break;
- case SHA_FLAGS_SHA224:
- info->tfm[0] |= SHA_TFM_SHA224;
- break;
- case SHA_FLAGS_SHA256:
- info->tfm[0] |= SHA_TFM_SHA256;
- break;
- case SHA_FLAGS_SHA384:
- info->tfm[0] |= SHA_TFM_SHA384;
- break;
- case SHA_FLAGS_SHA512:
- info->tfm[0] |= SHA_TFM_SHA512;
- break;
-
- default:
- /* Should not happen... */
- return;
- }
-
- info->tfm[1] = SHA_TFM_HASH_STORE;
- info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
- info->ctrl[1] = info->tfm[1];
-
- info->cmd[0] = SHA_CMD0;
- info->cmd[1] = SHA_CMD1;
- info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
-}
-
-/*
- * Update input data length field of transform information and
- * map it to DMA region.
- */
-static int mtk_sha_info_update(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- size_t len1, size_t len2)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- struct mtk_sha_info *info = &ctx->info;
-
- ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
- ctx->ct_hdr |= cpu_to_le32(len1 + len2);
- info->cmd[0] &= ~SHA_DATA_LEN_MSK;
- info->cmd[0] |= cpu_to_le32(len1 + len2);
-
- /* Setting SHA_TFM_START only for the first iteration */
- if (ctx->digcnt)
- info->ctrl[0] &= ~SHA_TFM_START;
-
- ctx->digcnt += len1;
-
- ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
- dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
- return -EINVAL;
- }
-
- ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
-
- return 0;
-}
-
-/*
- * Because of hardware limitation, we must pre-calculate the inner
- * and outer digest that need to be processed firstly by engine, then
- * apply the result digest to the input message. These complex hashing
- * procedures limits HMAC performance, so we use fallback SW encoding.
- */
-static int mtk_sha_finish_hmac(struct ahash_request *req)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- SHASH_DESC_ON_STACK(shash, bctx->shash);
-
- shash->tfm = bctx->shash;
-
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
- crypto_shash_finup(shash, req->result, ctx->ds, req->result);
-}
-
-/* Initialize request context */
-static int mtk_sha_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->flags = 0;
- ctx->ds = crypto_ahash_digestsize(tfm);
-
- switch (ctx->ds) {
- case SHA1_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA1;
- ctx->bs = SHA1_BLOCK_SIZE;
- break;
- case SHA224_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA224;
- ctx->bs = SHA224_BLOCK_SIZE;
- break;
- case SHA256_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA256;
- ctx->bs = SHA256_BLOCK_SIZE;
- break;
- case SHA384_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA384;
- ctx->bs = SHA384_BLOCK_SIZE;
- break;
- case SHA512_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA512;
- ctx->bs = SHA512_BLOCK_SIZE;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->bufcnt = 0;
- ctx->digcnt = 0;
- ctx->buffer = tctx->buf;
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- memcpy(ctx->buffer, bctx->ipad, ctx->bs);
- ctx->bufcnt = ctx->bs;
- ctx->flags |= SHA_FLAGS_HMAC;
- }
-
- return 0;
-}
-
-static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
- dma_addr_t addr1, size_t len1,
- dma_addr_t addr2, size_t len2)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd, *res;
- int err, count = 0;
-
- err = mtk_sha_info_update(cryp, sha, len1, len2);
- if (err)
- return err;
-
- /* Fill in the command/result descriptors */
- mtk_sha_ring_shift(ring, &cmd, &res, &count);
-
- res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
- cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
- MTK_DESC_CT_LEN(ctx->ct_size);
- cmd->buf = cpu_to_le32(addr1);
- cmd->ct = cpu_to_le32(ctx->ct_dma);
- cmd->ct_hdr = ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(ctx->tfm_dma);
-
- if (len2) {
- mtk_sha_ring_shift(ring, &cmd, &res, &count);
-
- res->hdr = MTK_DESC_BUF_LEN(len2);
- cmd->hdr = MTK_DESC_BUF_LEN(len2);
- cmd->buf = cpu_to_le32(addr2);
- }
-
- cmd->hdr |= MTK_DESC_LAST;
- res->hdr |= MTK_DESC_LAST;
-
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
-
- return -EINPROGRESS;
-}
-
-static int mtk_sha_dma_map(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- struct mtk_sha_reqctx *ctx,
- size_t count)
-{
- ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
- dev_err(cryp->dev, "dma map error\n");
- return -EINVAL;
- }
-
- ctx->flags &= ~SHA_FLAGS_SG;
-
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
-}
-
-static int mtk_sha_update_slow(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- size_t count;
- u32 final;
-
- mtk_sha_append_sg(ctx);
-
- final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
-
- dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
-
- if (final) {
- sha->flags |= SHA_FLAGS_FINAL;
- mtk_sha_fill_padding(ctx, 0);
- }
-
- if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- return mtk_sha_dma_map(cryp, sha, ctx, count);
- }
- return 0;
-}
-
-static int mtk_sha_update_start(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- u32 len, final, tail;
- struct scatterlist *sg;
-
- if (!ctx->total)
- return 0;
-
- if (ctx->bufcnt || ctx->offset)
- return mtk_sha_update_slow(cryp, sha);
-
- sg = ctx->sg;
-
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return mtk_sha_update_slow(cryp, sha);
-
- if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
- /* size is not ctx->bs aligned */
- return mtk_sha_update_slow(cryp, sha);
-
- len = min(ctx->total, sg->length);
-
- if (sg_is_last(sg)) {
- if (!(ctx->flags & SHA_FLAGS_FINUP)) {
- /* not last sg must be ctx->bs aligned */
- tail = len & (ctx->bs - 1);
- len -= tail;
- }
- }
-
- ctx->total -= len;
- ctx->offset = len; /* offset where to start slow */
-
- final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
-
- /* Add padding */
- if (final) {
- size_t count;
-
- tail = len & (ctx->bs - 1);
- len -= tail;
- ctx->total += tail;
- ctx->offset = len; /* offset where to start slow */
-
- sg = ctx->sg;
- mtk_sha_append_sg(ctx);
- mtk_sha_fill_padding(ctx, len);
-
- ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
- dev_err(cryp->dev, "dma map bytes error\n");
- return -EINVAL;
- }
-
- sha->flags |= SHA_FLAGS_FINAL;
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- if (len == 0) {
- ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
- count, 0, 0);
-
- } else {
- ctx->sg = sg;
- if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
- dev_err(cryp->dev, "dma_map_sg error\n");
- return -EINVAL;
- }
-
- ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
- len, ctx->dma_addr, count);
- }
- }
-
- if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
- dev_err(cryp->dev, "dma_map_sg error\n");
- return -EINVAL;
- }
-
- ctx->flags |= SHA_FLAGS_SG;
-
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
- len, 0, 0);
-}
-
-static int mtk_sha_final_req(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- size_t count;
-
- mtk_sha_fill_padding(ctx, 0);
-
- sha->flags |= SHA_FLAGS_FINAL;
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- return mtk_sha_dma_map(cryp, sha, ctx, count);
-}
-
-/* Copy ready hash (+ finalize hmac) */
-static int mtk_sha_finish(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- __le32 *digest = ctx->info.digest;
- u32 *result = (u32 *)req->result;
- int i;
-
- /* Get the hash from the digest buffer */
- for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
- result[i] = le32_to_cpu(digest[i]);
-
- if (ctx->flags & SHA_FLAGS_HMAC)
- return mtk_sha_finish_hmac(req);
-
- return 0;
-}
-
-static void mtk_sha_finish_req(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- int err)
-{
- if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
- err = mtk_sha_finish(sha->req);
-
- sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
-
- sha->req->base.complete(&sha->req->base, err);
-
- /* Handle new request */
- tasklet_schedule(&sha->queue_task);
-}
-
-static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct ahash_request *req)
-{
- struct mtk_sha_rec *sha = cryp->sha[id];
- struct crypto_async_request *async_req, *backlog;
- struct mtk_sha_reqctx *ctx;
- unsigned long flags;
- int err = 0, ret = 0;
-
- spin_lock_irqsave(&sha->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&sha->queue, req);
-
- if (SHA_FLAGS_BUSY & sha->flags) {
- spin_unlock_irqrestore(&sha->lock, flags);
- return ret;
- }
-
- backlog = crypto_get_backlog(&sha->queue);
- async_req = crypto_dequeue_request(&sha->queue);
- if (async_req)
- sha->flags |= SHA_FLAGS_BUSY;
- spin_unlock_irqrestore(&sha->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- ctx = ahash_request_ctx(req);
-
- sha->req = req;
-
- mtk_sha_info_init(ctx);
-
- if (ctx->op == SHA_OP_UPDATE) {
- err = mtk_sha_update_start(cryp, sha);
- if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
- /* No final() after finup() */
- err = mtk_sha_final_req(cryp, sha);
- } else if (ctx->op == SHA_OP_FINAL) {
- err = mtk_sha_final_req(cryp, sha);
- }
-
- if (unlikely(err != -EINPROGRESS))
- /* Task will not finish it, so do it here */
- mtk_sha_finish_req(cryp, sha, err);
-
- return ret;
-}
-
-static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
-
- ctx->op = op;
-
- return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
-}
-
-static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
-
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
- DMA_BIDIRECTIONAL);
-
- if (ctx->flags & SHA_FLAGS_SG) {
- dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
- if (ctx->sg->length == ctx->offset) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- }
- if (ctx->flags & SHA_FLAGS_PAD) {
- dma_unmap_single(cryp->dev, ctx->dma_addr,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- }
- } else
- dma_unmap_single(cryp->dev, ctx->dma_addr,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
-}
-
-static void mtk_sha_complete(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- int err = 0;
-
- err = mtk_sha_update_start(cryp, sha);
- if (err != -EINPROGRESS)
- mtk_sha_finish_req(cryp, sha, err);
-}
-
-static int mtk_sha_update(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->total = req->nbytes;
- ctx->sg = req->src;
- ctx->offset = 0;
-
- if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
- !(ctx->flags & SHA_FLAGS_FINUP))
- return mtk_sha_append_sg(ctx);
-
- return mtk_sha_enqueue(req, SHA_OP_UPDATE);
-}
-
-static int mtk_sha_final(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->flags |= SHA_FLAGS_FINUP;
-
- if (ctx->flags & SHA_FLAGS_PAD)
- return mtk_sha_finish(req);
-
- return mtk_sha_enqueue(req, SHA_OP_FINAL);
-}
-
-static int mtk_sha_finup(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- int err1, err2;
-
- ctx->flags |= SHA_FLAGS_FINUP;
-
- err1 = mtk_sha_update(req);
- if (err1 == -EINPROGRESS ||
- (err1 == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
- return err1;
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed
- */
- err2 = mtk_sha_final(req);
-
- return err1 ?: err2;
-}
-
-static int mtk_sha_digest(struct ahash_request *req)
-{
- return mtk_sha_init(req) ?: mtk_sha_finup(req);
-}
-
-static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
- u32 keylen)
-{
- struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
- size_t bs = crypto_shash_blocksize(bctx->shash);
- size_t ds = crypto_shash_digestsize(bctx->shash);
- int err, i;
-
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
- bctx->ipad);
- if (err)
- return err;
- keylen = ds;
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return 0;
-}
-
-static int mtk_sha_export(struct ahash_request *req, void *out)
-{
- const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
-}
-
-static int mtk_sha_import(struct ahash_request *req, const void *in)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- memcpy(ctx, in, sizeof(*ctx));
- return 0;
-}
-
-static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
- const char *alg_base)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
- struct mtk_cryp *cryp = NULL;
-
- cryp = mtk_sha_find_dev(tctx);
- if (!cryp)
- return -ENODEV;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mtk_sha_reqctx));
-
- if (alg_base) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- pr_err("base driver %s could not be loaded.\n",
- alg_base);
-
- return PTR_ERR(bctx->shash);
- }
- }
- return 0;
-}
-
-static int mtk_sha_cra_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, NULL);
-}
-
-static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha1");
-}
-
-static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha224");
-}
-
-static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha256");
-}
-
-static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha384");
-}
-
-static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha512");
-}
-
-static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static struct ahash_alg algs_sha1_sha224_sha256[] = {
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "mtk-sha1",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha224",
- .cra_driver_name = "mtk-sha224",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "mtk-sha256",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "mtk-hmac-sha1",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha1_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "mtk-hmac-sha224",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha224_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "mtk-hmac-sha256",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha256_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-};
-
-static struct ahash_alg algs_sha384_sha512[] = {
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha384",
- .cra_driver_name = "mtk-sha384",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha512",
- .cra_driver_name = "mtk-sha512",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "mtk-hmac-sha384",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha384_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "mtk-hmac-sha512",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha512_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-};
-
-static void mtk_sha_queue_task(unsigned long data)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
-
- mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
-}
-
-static void mtk_sha_done_task(unsigned long data)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
- struct mtk_cryp *cryp = sha->cryp;
-
- mtk_sha_unmap(cryp, sha);
- mtk_sha_complete(cryp, sha);
-}
-
-static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
- struct mtk_cryp *cryp = sha->cryp;
- u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
-
- mtk_sha_write(cryp, RDR_STAT(sha->id), val);
-
- if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(sha->id),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&sha->done_task);
- } else {
- dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-/*
- * The purpose of two SHA records is used to get extra performance.
- * It is similar to mtk_aes_record_init().
- */
-static int mtk_sha_record_init(struct mtk_cryp *cryp)
-{
- struct mtk_sha_rec **sha = cryp->sha;
- int i, err = -ENOMEM;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
- if (!sha[i])
- goto err_cleanup;
-
- sha[i]->cryp = cryp;
-
- spin_lock_init(&sha[i]->lock);
- crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
-
- tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
- (unsigned long)sha[i]);
- tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
- (unsigned long)sha[i]);
- }
-
- /* Link to ring2 and ring3 respectively */
- sha[0]->id = MTK_RING2;
- sha[1]->id = MTK_RING3;
-
- cryp->rec = 1;
-
- return 0;
-
-err_cleanup:
- for (; i--; )
- kfree(sha[i]);
- return err;
-}
-
-static void mtk_sha_record_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->sha[i]->done_task);
- tasklet_kill(&cryp->sha[i]->queue_task);
-
- kfree(cryp->sha[i]);
- }
-}
-
-static void mtk_sha_unregister_algs(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
- crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
-
- for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
- crypto_unregister_ahash(&algs_sha384_sha512[i]);
-}
-
-static int mtk_sha_register_algs(void)
-{
- int err, i;
-
- for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
- err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
- if (err)
- goto err_sha_224_256_algs;
- }
-
- for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
- err = crypto_register_ahash(&algs_sha384_sha512[i]);
- if (err)
- goto err_sha_384_512_algs;
- }
-
- return 0;
-
-err_sha_384_512_algs:
- for (; i--; )
- crypto_unregister_ahash(&algs_sha384_sha512[i]);
- i = ARRAY_SIZE(algs_sha1_sha224_sha256);
-err_sha_224_256_algs:
- for (; i--; )
- crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
-
- return err;
-}
-
-int mtk_hash_alg_register(struct mtk_cryp *cryp)
-{
- int err;
-
- INIT_LIST_HEAD(&cryp->sha_list);
-
- /* Initialize two hash records */
- err = mtk_sha_record_init(cryp);
- if (err)
- goto err_record;
-
- err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
- 0, "mtk-sha", cryp->sha[0]);
- if (err) {
- dev_err(cryp->dev, "unable to request sha irq0.\n");
- goto err_res;
- }
-
- err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
- 0, "mtk-sha", cryp->sha[1]);
- if (err) {
- dev_err(cryp->dev, "unable to request sha irq1.\n");
- goto err_res;
- }
-
- /* Enable ring2 and ring3 interrupt for hash */
- mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
- mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
-
- spin_lock(&mtk_sha.lock);
- list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
- spin_unlock(&mtk_sha.lock);
-
- err = mtk_sha_register_algs();
- if (err)
- goto err_algs;
-
- return 0;
-
-err_algs:
- spin_lock(&mtk_sha.lock);
- list_del(&cryp->sha_list);
- spin_unlock(&mtk_sha.lock);
-err_res:
- mtk_sha_record_free(cryp);
-err_record:
-
- dev_err(cryp->dev, "mtk-sha initialization failed.\n");
- return err;
-}
-
-void mtk_hash_alg_release(struct mtk_cryp *cryp)
-{
- spin_lock(&mtk_sha.lock);
- list_del(&cryp->sha_list);
- spin_unlock(&mtk_sha.lock);
-
- mtk_sha_unregister_algs();
- mtk_sha_record_free(cryp);
-}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
deleted file mode 100644
index 84f9c16d984c..000000000000
--- a/drivers/crypto/picoxcell_crypto.c
+++ /dev/null
@@ -1,1807 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
- */
-#include <crypto/internal/aead.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/authenc.h>
-#include <crypto/internal/des.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/rtnetlink.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-
-#include "picoxcell_crypto_regs.h"
-
-/*
- * The threshold for the number of entries in the CMD FIFO available before
- * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
- * number of interrupts raised to the CPU.
- */
-#define CMD0_IRQ_THRESHOLD 1
-
-/*
- * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
- * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
- * When there are packets in flight but lower than the threshold, we enable
- * the timer and at expiry, attempt to remove any processed packets from the
- * queue and if there are still packets left, schedule the timer again.
- */
-#define PACKET_TIMEOUT 1
-
-/* The priority to register each algorithm with. */
-#define SPACC_CRYPTO_ALG_PRIORITY 10000
-
-#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
-#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
-#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
-#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
-#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
-#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
-#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
-#define SPACC_CRYPTO_L2_MAX_CTXS 128
-#define SPACC_CRYPTO_L2_FIFO_SZ 128
-
-#define MAX_DDT_LEN 16
-
-/* DDT format. This must match the hardware DDT format exactly. */
-struct spacc_ddt {
- dma_addr_t p;
- u32 len;
-};
-
-/*
- * Asynchronous crypto request structure.
- *
- * This structure defines a request that is either queued for processing or
- * being processed.
- */
-struct spacc_req {
- struct list_head list;
- struct spacc_engine *engine;
- struct crypto_async_request *req;
- int result;
- bool is_encrypt;
- unsigned ctx_id;
- dma_addr_t src_addr, dst_addr;
- struct spacc_ddt *src_ddt, *dst_ddt;
- void (*complete)(struct spacc_req *req);
- struct skcipher_request fallback_req; // keep at the end
-};
-
-struct spacc_aead {
- unsigned long ctrl_default;
- unsigned long type;
- struct aead_alg alg;
- struct spacc_engine *engine;
- struct list_head entry;
- int key_offs;
- int iv_offs;
-};
-
-struct spacc_engine {
- void __iomem *regs;
- struct list_head pending;
- int next_ctx;
- spinlock_t hw_lock;
- int in_flight;
- struct list_head completed;
- struct list_head in_progress;
- struct tasklet_struct complete;
- unsigned long fifo_sz;
- void __iomem *cipher_ctx_base;
- void __iomem *hash_key_base;
- struct spacc_alg *algs;
- unsigned num_algs;
- struct list_head registered_algs;
- struct spacc_aead *aeads;
- unsigned num_aeads;
- struct list_head registered_aeads;
- size_t cipher_pg_sz;
- size_t hash_pg_sz;
- const char *name;
- struct clk *clk;
- struct device *dev;
- unsigned max_ctxs;
- struct timer_list packet_timeout;
- unsigned stat_irq_thresh;
- struct dma_pool *req_pool;
-};
-
-/* Algorithm type mask. */
-#define SPACC_CRYPTO_ALG_MASK 0x7
-
-/* SPACC definition of a crypto algorithm. */
-struct spacc_alg {
- unsigned long ctrl_default;
- unsigned long type;
- struct skcipher_alg alg;
- struct spacc_engine *engine;
- struct list_head entry;
- int key_offs;
- int iv_offs;
-};
-
-/* Generic context structure for any algorithm type. */
-struct spacc_generic_ctx {
- struct spacc_engine *engine;
- int flags;
- int key_offs;
- int iv_offs;
-};
-
-/* Block cipher context. */
-struct spacc_ablk_ctx {
- struct spacc_generic_ctx generic;
- u8 key[AES_MAX_KEY_SIZE];
- u8 key_len;
- /*
- * The fallback cipher. If the operation can't be done in hardware,
- * fallback to a software version.
- */
- struct crypto_skcipher *sw_cipher;
-};
-
-/* AEAD cipher context. */
-struct spacc_aead_ctx {
- struct spacc_generic_ctx generic;
- u8 cipher_key[AES_MAX_KEY_SIZE];
- u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
- u8 cipher_key_len;
- u8 hash_key_len;
- struct crypto_aead *sw_cipher;
-};
-
-static int spacc_ablk_submit(struct spacc_req *req);
-
-static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
-{
- return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
-}
-
-static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
-{
- return container_of(alg, struct spacc_aead, alg);
-}
-
-static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
-{
- u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
-
- return fifo_stat & SPA_FIFO_CMD_FULL;
-}
-
-/*
- * Given a cipher context, and a context number, get the base address of the
- * context page.
- *
- * Returns the address of the context page where the key/context may
- * be written.
- */
-static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
- unsigned indx,
- bool is_cipher_ctx)
-{
- return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
- (indx * ctx->engine->cipher_pg_sz) :
- ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
-}
-
-/* The context pages can only be written with 32-bit accesses. */
-static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
- unsigned count)
-{
- const u32 *src32 = (const u32 *) src;
-
- while (count--)
- writel(*src32++, dst++);
-}
-
-static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
- void __iomem *page_addr, const u8 *key,
- size_t key_len, const u8 *iv, size_t iv_len)
-{
- void __iomem *key_ptr = page_addr + ctx->key_offs;
- void __iomem *iv_ptr = page_addr + ctx->iv_offs;
-
- memcpy_toio32(key_ptr, key, key_len / 4);
- memcpy_toio32(iv_ptr, iv, iv_len / 4);
-}
-
-/*
- * Load a context into the engines context memory.
- *
- * Returns the index of the context page where the context was loaded.
- */
-static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
- const u8 *ciph_key, size_t ciph_len,
- const u8 *iv, size_t ivlen, const u8 *hash_key,
- size_t hash_len)
-{
- unsigned indx = ctx->engine->next_ctx++;
- void __iomem *ciph_page_addr, *hash_page_addr;
-
- ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
- hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
-
- ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
- spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
- ivlen);
- writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
- (1 << SPA_KEY_SZ_CIPHER_OFFSET),
- ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
-
- if (hash_key) {
- memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
- writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
- ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
- }
-
- return indx;
-}
-
-static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
-{
- ddt->p = phys;
- ddt->len = len;
-}
-
-/*
- * Take a crypto request and scatterlists for the data and turn them into DDTs
- * for passing to the crypto engines. This also DMA maps the data so that the
- * crypto engines can DMA to/from them.
- */
-static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
- struct scatterlist *payload,
- unsigned nbytes,
- enum dma_data_direction dir,
- dma_addr_t *ddt_phys)
-{
- unsigned mapped_ents;
- struct scatterlist *cur;
- struct spacc_ddt *ddt;
- int i;
- int nents;
-
- nents = sg_nents_for_len(payload, nbytes);
- if (nents < 0) {
- dev_err(engine->dev, "Invalid numbers of SG.\n");
- return NULL;
- }
- mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
-
- if (mapped_ents + 1 > MAX_DDT_LEN)
- goto out;
-
- ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
- if (!ddt)
- goto out;
-
- for_each_sg(payload, cur, mapped_ents, i)
- ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
- ddt_set(&ddt[mapped_ents], 0, 0);
-
- return ddt;
-
-out:
- dma_unmap_sg(engine->dev, payload, nents, dir);
- return NULL;
-}
-
-static int spacc_aead_make_ddts(struct aead_request *areq)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- struct spacc_req *req = aead_request_ctx(areq);
- struct spacc_engine *engine = req->engine;
- struct spacc_ddt *src_ddt, *dst_ddt;
- unsigned total;
- int src_nents, dst_nents;
- struct scatterlist *cur;
- int i, dst_ents, src_ents;
-
- total = areq->assoclen + areq->cryptlen;
- if (req->is_encrypt)
- total += crypto_aead_authsize(aead);
-
- src_nents = sg_nents_for_len(areq->src, total);
- if (src_nents < 0) {
- dev_err(engine->dev, "Invalid numbers of src SG.\n");
- return src_nents;
- }
- if (src_nents + 1 > MAX_DDT_LEN)
- return -E2BIG;
-
- dst_nents = 0;
- if (areq->src != areq->dst) {
- dst_nents = sg_nents_for_len(areq->dst, total);
- if (dst_nents < 0) {
- dev_err(engine->dev, "Invalid numbers of dst SG.\n");
- return dst_nents;
- }
- if (src_nents + 1 > MAX_DDT_LEN)
- return -E2BIG;
- }
-
- src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
- if (!src_ddt)
- goto err;
-
- dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
- if (!dst_ddt)
- goto err_free_src;
-
- req->src_ddt = src_ddt;
- req->dst_ddt = dst_ddt;
-
- if (dst_nents) {
- src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
- DMA_TO_DEVICE);
- if (!src_ents)
- goto err_free_dst;
-
- dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
- DMA_FROM_DEVICE);
-
- if (!dst_ents) {
- dma_unmap_sg(engine->dev, areq->src, src_nents,
- DMA_TO_DEVICE);
- goto err_free_dst;
- }
- } else {
- src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (!src_ents)
- goto err_free_dst;
- dst_ents = src_ents;
- }
-
- /*
- * Now map in the payload for the source and destination and terminate
- * with the NULL pointers.
- */
- for_each_sg(areq->src, cur, src_ents, i)
- ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
-
- /* For decryption we need to skip the associated data. */
- total = req->is_encrypt ? 0 : areq->assoclen;
- for_each_sg(areq->dst, cur, dst_ents, i) {
- unsigned len = sg_dma_len(cur);
-
- if (len <= total) {
- total -= len;
- continue;
- }
-
- ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
- }
-
- ddt_set(src_ddt, 0, 0);
- ddt_set(dst_ddt, 0, 0);
-
- return 0;
-
-err_free_dst:
- dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
-err_free_src:
- dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
-err:
- return -ENOMEM;
-}
-
-static void spacc_aead_free_ddts(struct spacc_req *req)
-{
- struct aead_request *areq = container_of(req->req, struct aead_request,
- base);
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- unsigned total = areq->assoclen + areq->cryptlen +
- (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
- struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
- struct spacc_engine *engine = aead_ctx->generic.engine;
- int nents = sg_nents_for_len(areq->src, total);
-
- /* sg_nents_for_len should not fail since it works when mapping sg */
- if (unlikely(nents < 0)) {
- dev_err(engine->dev, "Invalid numbers of src SG.\n");
- return;
- }
-
- if (areq->src != areq->dst) {
- dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
- nents = sg_nents_for_len(areq->dst, total);
- if (unlikely(nents < 0)) {
- dev_err(engine->dev, "Invalid numbers of dst SG.\n");
- return;
- }
- dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
- } else
- dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
-
- dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
- dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
-}
-
-static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
- dma_addr_t ddt_addr, struct scatterlist *payload,
- unsigned nbytes, enum dma_data_direction dir)
-{
- int nents = sg_nents_for_len(payload, nbytes);
-
- if (nents < 0) {
- dev_err(req->engine->dev, "Invalid numbers of SG.\n");
- return;
- }
-
- dma_unmap_sg(req->engine->dev, payload, nents, dir);
- dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
-}
-
-static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_authenc_keys keys;
- int err;
-
- crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- if (err)
- return err;
-
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- goto badkey;
-
- if (keys.enckeylen > AES_MAX_KEY_SIZE)
- goto badkey;
-
- if (keys.authkeylen > sizeof(ctx->hash_ctx))
- goto badkey;
-
- memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
- ctx->cipher_key_len = keys.enckeylen;
-
- memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
- ctx->hash_key_len = keys.authkeylen;
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
-
-badkey:
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
-}
-
-static int spacc_aead_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
-{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
-
- return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
-}
-
-/*
- * Check if an AEAD request requires a fallback operation. Some requests can't
- * be completed in hardware because the hardware may not support certain key
- * sizes. In these cases we need to complete the request in software.
- */
-static int spacc_aead_need_fallback(struct aead_request *aead_req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
-
- /*
- * If we have a non-supported key-length, then we need to do a
- * software fallback.
- */
- if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES &&
- ctx->cipher_key_len != AES_KEYSIZE_128 &&
- ctx->cipher_key_len != AES_KEYSIZE_256)
- return 1;
-
- return 0;
-}
-
-static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
- bool is_encrypt)
-{
- struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
- struct aead_request *subreq = aead_request_ctx(req);
-
- aead_request_set_tfm(subreq, ctx->sw_cipher);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
- aead_request_set_ad(subreq, req->assoclen);
-
- return is_encrypt ? crypto_aead_encrypt(subreq) :
- crypto_aead_decrypt(subreq);
-}
-
-static void spacc_aead_complete(struct spacc_req *req)
-{
- spacc_aead_free_ddts(req);
- req->req->complete(req->req, req->result);
-}
-
-static int spacc_aead_submit(struct spacc_req *req)
-{
- struct aead_request *aead_req =
- container_of(req->req, struct aead_request, base);
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- unsigned int authsize = crypto_aead_authsize(aead);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl, proc_len, assoc_len;
-
- req->result = -EINPROGRESS;
- req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
- ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
- ctx->hash_ctx, ctx->hash_key_len);
-
- /* Set the source and destination DDT pointers. */
- writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
- writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
- writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
-
- assoc_len = aead_req->assoclen;
- proc_len = aead_req->cryptlen + assoc_len;
-
- /*
- * If we are decrypting, we need to take the length of the ICV out of
- * the processing length.
- */
- if (!req->is_encrypt)
- proc_len -= authsize;
-
- writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
- writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
- writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
- writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
- writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
-
- ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
- (1 << SPA_CTRL_ICV_APPEND);
- if (req->is_encrypt)
- ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
- else
- ctrl |= (1 << SPA_CTRL_KEY_EXP);
-
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
-
- return -EINPROGRESS;
-}
-
-static int spacc_req_submit(struct spacc_req *req);
-
-static void spacc_push(struct spacc_engine *engine)
-{
- struct spacc_req *req;
-
- while (!list_empty(&engine->pending) &&
- engine->in_flight + 1 <= engine->fifo_sz) {
-
- ++engine->in_flight;
- req = list_first_entry(&engine->pending, struct spacc_req,
- list);
- list_move_tail(&req->list, &engine->in_progress);
-
- req->result = spacc_req_submit(req);
- }
-}
-
-/*
- * Setup an AEAD request for processing. This will configure the engine, load
- * the context and then start the packet processing.
- */
-static int spacc_aead_setup(struct aead_request *req,
- unsigned alg_type, bool is_encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_engine *engine = to_spacc_aead(alg)->engine;
- struct spacc_req *dev_req = aead_request_ctx(req);
- int err;
- unsigned long flags;
-
- dev_req->req = &req->base;
- dev_req->is_encrypt = is_encrypt;
- dev_req->result = -EBUSY;
- dev_req->engine = engine;
- dev_req->complete = spacc_aead_complete;
-
- if (unlikely(spacc_aead_need_fallback(req) ||
- ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
- return spacc_aead_do_fallback(req, alg_type, is_encrypt);
-
- if (err)
- goto out;
-
- err = -EINPROGRESS;
- spin_lock_irqsave(&engine->hw_lock, flags);
- if (unlikely(spacc_fifo_cmd_full(engine)) ||
- engine->in_flight + 1 > engine->fifo_sz) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- err = -EBUSY;
- spin_unlock_irqrestore(&engine->hw_lock, flags);
- goto out_free_ddts;
- }
- list_add_tail(&dev_req->list, &engine->pending);
- } else {
- list_add_tail(&dev_req->list, &engine->pending);
- spacc_push(engine);
- }
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- goto out;
-
-out_free_ddts:
- spacc_aead_free_ddts(dev_req);
-out:
- return err;
-}
-
-static int spacc_aead_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
-
- return spacc_aead_setup(req, alg->type, 1);
-}
-
-static int spacc_aead_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
-
- return spacc_aead_setup(req, alg->type, 0);
-}
-
-/*
- * Initialise a new AEAD context. This is responsible for allocating the
- * fallback cipher and initialising the context.
- */
-static int spacc_aead_cra_init(struct crypto_aead *tfm)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_alg *alg = crypto_aead_alg(tfm);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_engine *engine = spacc_alg->engine;
-
- ctx->generic.flags = spacc_alg->type;
- ctx->generic.engine = engine;
- ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher))
- return PTR_ERR(ctx->sw_cipher);
- ctx->generic.key_offs = spacc_alg->key_offs;
- ctx->generic.iv_offs = spacc_alg->iv_offs;
-
- crypto_aead_set_reqsize(
- tfm,
- max(sizeof(struct spacc_req),
- sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->sw_cipher)));
-
- return 0;
-}
-
-/*
- * Destructor for an AEAD context. This is called when the transform is freed
- * and must free the fallback cipher.
- */
-static void spacc_aead_cra_exit(struct crypto_aead *tfm)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
- crypto_free_aead(ctx->sw_cipher);
-}
-
-/*
- * Set the DES key for a block cipher transform. This also performs weak key
- * checking if the transform has requested it.
- */
-static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- err = verify_skcipher_des_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
- return 0;
-}
-
-/*
- * Set the 3DES key for a block cipher transform. This also performs weak key
- * checking if the transform has requested it.
- */
-static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- err = verify_skcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
- return 0;
-}
-
-/*
- * Set the key for an AES block cipher. Some key lengths are not supported in
- * hardware so this must also check whether a fallback is needed.
- */
-static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = 0;
-
- if (len > AES_MAX_KEY_SIZE)
- return -EINVAL;
-
- /*
- * IPSec engine only supports 128 and 256 bit AES keys. If we get a
- * request for any other size (192 bits) then we need to do a software
- * fallback.
- */
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
- if (!ctx->sw_cipher)
- return -EINVAL;
-
- /*
- * Set the fallback transform to use the same request flags as
- * the hardware transform.
- */
- crypto_skcipher_clear_flags(ctx->sw_cipher,
- CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
- cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
- if (err)
- goto sw_setkey_failed;
- }
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
-sw_setkey_failed:
- return err;
-}
-
-static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = 0;
-
- if (len > AES_MAX_KEY_SIZE) {
- err = -EINVAL;
- goto out;
- }
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
-out:
- return err;
-}
-
-static int spacc_ablk_need_fallback(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
- struct spacc_ablk_ctx *ctx;
-
- ctx = crypto_skcipher_ctx(tfm);
-
- return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES &&
- ctx->key_len != AES_KEYSIZE_128 &&
- ctx->key_len != AES_KEYSIZE_256;
-}
-
-static void spacc_ablk_complete(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
-
- if (ablk_req->src != ablk_req->dst) {
- spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
- ablk_req->cryptlen, DMA_TO_DEVICE);
- spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->cryptlen, DMA_FROM_DEVICE);
- } else
- spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->cryptlen, DMA_BIDIRECTIONAL);
-
- req->req->complete(req->req, req->result);
-}
-
-static int spacc_ablk_submit(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl;
-
- req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
- ctx->key_len, ablk_req->iv, alg->ivsize,
- NULL, 0);
-
- writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
- writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
- writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
-
- writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
- writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
- writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
- writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
-
- ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
- (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
- (1 << SPA_CTRL_KEY_EXP));
-
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
-
- return -EINPROGRESS;
-}
-
-static int spacc_ablk_do_fallback(struct skcipher_request *req,
- unsigned alg_type, bool is_encrypt)
-{
- struct crypto_tfm *old_tfm =
- crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- struct spacc_req *dev_req = skcipher_request_ctx(req);
- int err;
-
- /*
- * Change the request to use the software fallback transform, and once
- * the ciphering has completed, put the old transform back into the
- * request.
- */
- skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
- skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
- req->base.complete, req->base.data);
- skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
- req->cryptlen, req->iv);
- err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
- crypto_skcipher_decrypt(&dev_req->fallback_req);
-
- return err;
-}
-
-static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
- bool is_encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
- struct spacc_req *dev_req = skcipher_request_ctx(req);
- unsigned long flags;
- int err = -ENOMEM;
-
- dev_req->req = &req->base;
- dev_req->is_encrypt = is_encrypt;
- dev_req->engine = engine;
- dev_req->complete = spacc_ablk_complete;
- dev_req->result = -EINPROGRESS;
-
- if (unlikely(spacc_ablk_need_fallback(dev_req)))
- return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
-
- /*
- * Create the DDT's for the engine. If we share the same source and
- * destination then we can optimize by reusing the DDT's.
- */
- if (req->src != req->dst) {
- dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
- req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
- if (!dev_req->src_ddt)
- goto out;
-
- dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
- if (!dev_req->dst_ddt)
- goto out_free_src;
- } else {
- dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
- if (!dev_req->dst_ddt)
- goto out;
-
- dev_req->src_ddt = NULL;
- dev_req->src_addr = dev_req->dst_addr;
- }
-
- err = -EINPROGRESS;
- spin_lock_irqsave(&engine->hw_lock, flags);
- /*
- * Check if the engine will accept the operation now. If it won't then
- * we either stick it on the end of a pending list if we can backlog,
- * or bailout with an error if not.
- */
- if (unlikely(spacc_fifo_cmd_full(engine)) ||
- engine->in_flight + 1 > engine->fifo_sz) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- err = -EBUSY;
- spin_unlock_irqrestore(&engine->hw_lock, flags);
- goto out_free_ddts;
- }
- list_add_tail(&dev_req->list, &engine->pending);
- } else {
- list_add_tail(&dev_req->list, &engine->pending);
- spacc_push(engine);
- }
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- goto out;
-
-out_free_ddts:
- spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
- req->cryptlen, req->src == req->dst ?
- DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
-out_free_src:
- if (req->src != req->dst)
- spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
- req->src, req->cryptlen, DMA_TO_DEVICE);
-out:
- return err;
-}
-
-static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- struct spacc_engine *engine = spacc_alg->engine;
-
- ctx->generic.flags = spacc_alg->type;
- ctx->generic.engine = engine;
- if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher)) {
- dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->base.cra_name);
- return PTR_ERR(ctx->sw_cipher);
- }
- crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
- crypto_skcipher_reqsize(ctx->sw_cipher));
- } else {
- /* take the size without the fallback skcipher_request at the end */
- crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
- fallback_req));
- }
-
- ctx->generic.key_offs = spacc_alg->key_offs;
- ctx->generic.iv_offs = spacc_alg->iv_offs;
-
- return 0;
-}
-
-static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->sw_cipher);
-}
-
-static int spacc_ablk_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
-
- return spacc_ablk_setup(req, spacc_alg->type, 1);
-}
-
-static int spacc_ablk_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
-
- return spacc_ablk_setup(req, spacc_alg->type, 0);
-}
-
-static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
-{
- return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
- SPA_FIFO_STAT_EMPTY;
-}
-
-static void spacc_process_done(struct spacc_engine *engine)
-{
- struct spacc_req *req;
- unsigned long flags;
-
- spin_lock_irqsave(&engine->hw_lock, flags);
-
- while (!spacc_fifo_stat_empty(engine)) {
- req = list_first_entry(&engine->in_progress, struct spacc_req,
- list);
- list_move_tail(&req->list, &engine->completed);
- --engine->in_flight;
-
- /* POP the status register. */
- writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
- req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
- SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
-
- /*
- * Convert the SPAcc error status into the standard POSIX error
- * codes.
- */
- if (unlikely(req->result)) {
- switch (req->result) {
- case SPA_STATUS_ICV_FAIL:
- req->result = -EBADMSG;
- break;
-
- case SPA_STATUS_MEMORY_ERROR:
- dev_warn(engine->dev,
- "memory error triggered\n");
- req->result = -EFAULT;
- break;
-
- case SPA_STATUS_BLOCK_ERROR:
- dev_warn(engine->dev,
- "block error triggered\n");
- req->result = -EIO;
- break;
- }
- }
- }
-
- tasklet_schedule(&engine->complete);
-
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-}
-
-static irqreturn_t spacc_spacc_irq(int irq, void *dev)
-{
- struct spacc_engine *engine = (struct spacc_engine *)dev;
- u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
-
- writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
- spacc_process_done(engine);
-
- return IRQ_HANDLED;
-}
-
-static void spacc_packet_timeout(struct timer_list *t)
-{
- struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
-
- spacc_process_done(engine);
-}
-
-static int spacc_req_submit(struct spacc_req *req)
-{
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
-
- if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
- return spacc_aead_submit(req);
- else
- return spacc_ablk_submit(req);
-}
-
-static void spacc_spacc_complete(unsigned long data)
-{
- struct spacc_engine *engine = (struct spacc_engine *)data;
- struct spacc_req *req, *tmp;
- unsigned long flags;
- LIST_HEAD(completed);
-
- spin_lock_irqsave(&engine->hw_lock, flags);
-
- list_splice_init(&engine->completed, &completed);
- spacc_push(engine);
- if (engine->in_flight)
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- list_for_each_entry_safe(req, tmp, &completed, list) {
- list_del(&req->list);
- req->complete(req);
- }
-}
-
-#ifdef CONFIG_PM
-static int spacc_suspend(struct device *dev)
-{
- struct spacc_engine *engine = dev_get_drvdata(dev);
-
- /*
- * We only support standby mode. All we have to do is gate the clock to
- * the spacc. The hardware will preserve state until we turn it back
- * on again.
- */
- clk_disable(engine->clk);
-
- return 0;
-}
-
-static int spacc_resume(struct device *dev)
-{
- struct spacc_engine *engine = dev_get_drvdata(dev);
-
- return clk_enable(engine->clk);
-}
-
-static const struct dev_pm_ops spacc_pm_ops = {
- .suspend = spacc_suspend,
- .resume = spacc_resume,
-};
-#endif /* CONFIG_PM */
-
-static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
-{
- return dev ? dev_get_drvdata(dev) : NULL;
-}
-
-static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct spacc_engine *engine = spacc_dev_to_engine(dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
-}
-
-static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct spacc_engine *engine = spacc_dev_to_engine(dev);
- unsigned long thresh;
-
- if (kstrtoul(buf, 0, &thresh))
- return -EINVAL;
-
- thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
-
- engine->stat_irq_thresh = thresh;
- writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
- engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
-
- return len;
-}
-static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
- spacc_stat_irq_thresh_store);
-
-static struct spacc_alg ipsec_engine_algs[] = {
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
- .alg = {
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
- .alg = {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3-ede-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3-ede-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
-};
-
-static struct spacc_aead ipsec_engine_aeads[] = {
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA |
- SPA_CTRL_HASH_MODE_HMAC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA256 |
- SPA_CTRL_HASH_MODE_HMAC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA256 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha256),"
- "cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
-};
-
-static struct spacc_alg l2_engine_algs[] = {
- {
- .key_offs = 0,
- .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
- .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
- SPA_CTRL_CIPH_MODE_F8,
- .alg = {
- .base.cra_name = "f8(kasumi)",
- .base.cra_driver_name = "f8-kasumi-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = 8,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_kasumi_f8_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = 16,
- .max_keysize = 16,
- .ivsize = 8,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id spacc_of_id_table[] = {
- { .compatible = "picochip,spacc-ipsec" },
- { .compatible = "picochip,spacc-l2" },
- {}
-};
-MODULE_DEVICE_TABLE(of, spacc_of_id_table);
-#endif /* CONFIG_OF */
-
-static void spacc_tasklet_kill(void *data)
-{
- tasklet_kill(data);
-}
-
-static int spacc_probe(struct platform_device *pdev)
-{
- int i, err, ret;
- struct resource *irq;
- struct device_node *np = pdev->dev.of_node;
- struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
- GFP_KERNEL);
- if (!engine)
- return -ENOMEM;
-
- if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
- engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
- engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
- engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
- engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
- engine->algs = ipsec_engine_algs;
- engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
- engine->aeads = ipsec_engine_aeads;
- engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
- } else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
- engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
- engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
- engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
- engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
- engine->algs = l2_engine_algs;
- engine->num_algs = ARRAY_SIZE(l2_engine_algs);
- } else {
- return -EINVAL;
- }
-
- engine->name = dev_name(&pdev->dev);
-
- engine->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(engine->regs))
- return PTR_ERR(engine->regs);
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no memory/irq resource for engine\n");
- return -ENXIO;
- }
-
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
-
- ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
- &engine->complete);
- if (ret)
- return ret;
-
- if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
- engine->name, engine)) {
- dev_err(engine->dev, "failed to request IRQ\n");
- return -EBUSY;
- }
-
- engine->dev = &pdev->dev;
- engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
- engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
-
- engine->req_pool = dmam_pool_create(engine->name, engine->dev,
- MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
- if (!engine->req_pool)
- return -ENOMEM;
-
- spin_lock_init(&engine->hw_lock);
-
- engine->clk = clk_get(&pdev->dev, "ref");
- if (IS_ERR(engine->clk)) {
- dev_info(&pdev->dev, "clk unavailable\n");
- return PTR_ERR(engine->clk);
- }
-
- if (clk_prepare_enable(engine->clk)) {
- dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- ret = -EIO;
- goto err_clk_put;
- }
-
- /*
- * Use an IRQ threshold of 50% as a default. This seems to be a
- * reasonable trade off of latency against throughput but can be
- * changed at runtime.
- */
- engine->stat_irq_thresh = (engine->fifo_sz / 2);
-
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
- /*
- * Configure the interrupts. We only use the STAT_CNT interrupt as we
- * only submit a new packet for processing when we complete another in
- * the queue. This minimizes time spent in the interrupt handler.
- */
- writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
- engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
- writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
- engine->regs + SPA_IRQ_EN_REG_OFFSET);
-
- timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
-
- INIT_LIST_HEAD(&engine->pending);
- INIT_LIST_HEAD(&engine->completed);
- INIT_LIST_HEAD(&engine->in_progress);
- engine->in_flight = 0;
-
- platform_set_drvdata(pdev, engine);
-
- ret = -EINVAL;
- INIT_LIST_HEAD(&engine->registered_algs);
- for (i = 0; i < engine->num_algs; ++i) {
- engine->algs[i].engine = engine;
- err = crypto_register_skcipher(&engine->algs[i].alg);
- if (!err) {
- list_add_tail(&engine->algs[i].entry,
- &engine->registered_algs);
- ret = 0;
- }
- if (err)
- dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->algs[i].alg.base.cra_name);
- else
- dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->algs[i].alg.base.cra_name);
- }
-
- INIT_LIST_HEAD(&engine->registered_aeads);
- for (i = 0; i < engine->num_aeads; ++i) {
- engine->aeads[i].engine = engine;
- err = crypto_register_aead(&engine->aeads[i].alg);
- if (!err) {
- list_add_tail(&engine->aeads[i].entry,
- &engine->registered_aeads);
- ret = 0;
- }
- if (err)
- dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->aeads[i].alg.base.cra_name);
- else
- dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->aeads[i].alg.base.cra_name);
- }
-
- if (!ret)
- return 0;
-
- del_timer_sync(&engine->packet_timeout);
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
-err_clk_disable:
- clk_disable_unprepare(engine->clk);
-err_clk_put:
- clk_put(engine->clk);
-
- return ret;
-}
-
-static int spacc_remove(struct platform_device *pdev)
-{
- struct spacc_aead *aead, *an;
- struct spacc_alg *alg, *next;
- struct spacc_engine *engine = platform_get_drvdata(pdev);
-
- del_timer_sync(&engine->packet_timeout);
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
-
- list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
- list_del(&aead->entry);
- crypto_unregister_aead(&aead->alg);
- }
-
- list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
- list_del(&alg->entry);
- crypto_unregister_skcipher(&alg->alg);
- }
-
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
-
- return 0;
-}
-
-static struct platform_driver spacc_driver = {
- .probe = spacc_probe,
- .remove = spacc_remove,
- .driver = {
- .name = "picochip,spacc",
-#ifdef CONFIG_PM
- .pm = &spacc_pm_ops,
-#endif /* CONFIG_PM */
- .of_match_table = of_match_ptr(spacc_of_id_table),
- },
-};
-
-module_platform_driver(spacc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
deleted file mode 100644
index b870a50238ba..000000000000
--- a/drivers/crypto/picoxcell_crypto_regs.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2010 Picochip Ltd., Jamie Iles
- */
-#ifndef __PICOXCELL_CRYPTO_REGS_H__
-#define __PICOXCELL_CRYPTO_REGS_H__
-
-#define SPA_STATUS_OK 0
-#define SPA_STATUS_ICV_FAIL 1
-#define SPA_STATUS_MEMORY_ERROR 2
-#define SPA_STATUS_BLOCK_ERROR 3
-
-#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16
-#define SPA_IRQ_STAT_STAT_MASK (1 << 4)
-#define SPA_FIFO_STAT_STAT_OFFSET 16
-#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET)
-#define SPA_STATUS_RES_CODE_OFFSET 24
-#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET)
-#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8
-#define SPA_KEY_SZ_CIPHER_OFFSET 31
-
-#define SPA_IRQ_EN_REG_OFFSET 0x00000000
-#define SPA_IRQ_STAT_REG_OFFSET 0x00000004
-#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008
-#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C
-#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010
-#define SPA_SRC_PTR_REG_OFFSET 0x00000020
-#define SPA_DST_PTR_REG_OFFSET 0x00000024
-#define SPA_OFFSET_REG_OFFSET 0x00000028
-#define SPA_AAD_LEN_REG_OFFSET 0x0000002C
-#define SPA_PROC_LEN_REG_OFFSET 0x00000030
-#define SPA_ICV_LEN_REG_OFFSET 0x00000034
-#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038
-#define SPA_SW_CTRL_REG_OFFSET 0x0000003C
-#define SPA_CTRL_REG_OFFSET 0x00000040
-#define SPA_AUX_INFO_REG_OFFSET 0x0000004C
-#define SPA_STAT_POP_REG_OFFSET 0x00000050
-#define SPA_STATUS_REG_OFFSET 0x00000054
-#define SPA_KEY_SZ_REG_OFFSET 0x00000100
-#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000
-#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000
-#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000
-
-#define SPA_IRQ_EN_REG_RESET 0x00000000
-#define SPA_IRQ_CTRL_REG_RESET 0x00000000
-#define SPA_FIFO_STAT_REG_RESET 0x00000000
-#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000
-#define SPA_SRC_PTR_REG_RESET 0x00000000
-#define SPA_DST_PTR_REG_RESET 0x00000000
-#define SPA_OFFSET_REG_RESET 0x00000000
-#define SPA_AAD_LEN_REG_RESET 0x00000000
-#define SPA_PROC_LEN_REG_RESET 0x00000000
-#define SPA_ICV_LEN_REG_RESET 0x00000000
-#define SPA_ICV_OFFSET_REG_RESET 0x00000000
-#define SPA_SW_CTRL_REG_RESET 0x00000000
-#define SPA_CTRL_REG_RESET 0x00000000
-#define SPA_AUX_INFO_REG_RESET 0x00000000
-#define SPA_STAT_POP_REG_RESET 0x00000000
-#define SPA_STATUS_REG_RESET 0x00000000
-#define SPA_KEY_SZ_REG_RESET 0x00000000
-
-#define SPA_CTRL_HASH_ALG_IDX 4
-#define SPA_CTRL_CIPH_MODE_IDX 8
-#define SPA_CTRL_HASH_MODE_IDX 12
-#define SPA_CTRL_CTX_IDX 16
-#define SPA_CTRL_ENCRYPT_IDX 24
-#define SPA_CTRL_AAD_COPY 25
-#define SPA_CTRL_ICV_PT 26
-#define SPA_CTRL_ICV_ENC 27
-#define SPA_CTRL_ICV_APPEND 28
-#define SPA_CTRL_KEY_EXP 29
-
-#define SPA_KEY_SZ_CXT_IDX 8
-#define SPA_KEY_SZ_CIPHER_IDX 31
-
-#define SPA_IRQ_EN_CMD0_EN (1 << 0)
-#define SPA_IRQ_EN_STAT_EN (1 << 4)
-#define SPA_IRQ_EN_GLBL_EN (1 << 31)
-
-#define SPA_CTRL_CIPH_ALG_NULL 0x00
-#define SPA_CTRL_CIPH_ALG_DES 0x01
-#define SPA_CTRL_CIPH_ALG_AES 0x02
-#define SPA_CTRL_CIPH_ALG_RC4 0x03
-#define SPA_CTRL_CIPH_ALG_MULTI2 0x04
-#define SPA_CTRL_CIPH_ALG_KASUMI 0x05
-
-#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX)
-
-#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX)
-
-#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX)
-#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX)
-#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX)
-
-#define SPA_FIFO_STAT_EMPTY (1 << 31)
-#define SPA_FIFO_CMD_FULL (1 << 7)
-
-#endif /* __PICOXCELL_CRYPTO_REGS_H__ */
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 846a3d90b41a..77783feb62b2 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -11,7 +11,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
- select CRYPTO_AES
+ select CRYPTO_LIB_AES
select FW_LOADER
config CRYPTO_DEV_QAT_DH895xCC
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 344bfae45bff..6a9be01fdf33 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -19,7 +19,7 @@ static struct adf_fw_config adf_4xxx_fw_config[] = {
};
/* Worker thread to service arbiter mappings */
-static u32 thrd_to_arb_map[] = {
+static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
@@ -119,17 +119,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_1;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- unsigned long ae_mask = hw_device->ae_mask;
- int i;
-
- for_each_clear_bit(i, &ae_mask, ADF_4XXX_MAX_ACCELENGINES)
- thrd_to_arb_map[i] = 0;
-
- *arb_map_config = thrd_to_arb_map;
+ return thrd_to_arb_map;
}
static void get_arb_info(struct arb_info *arb_info)
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index eb45f1b1ae3e..f5990d042c9a 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -7,8 +7,8 @@
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_6_me_sku[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA
};
@@ -101,18 +101,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_6_me_sku;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index babdffbcb846..cadcf12884c8 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -7,13 +7,8 @@
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_8_me_sku[] = {
- 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
- 0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
-};
-
-static const u32 thrd_to_arb_map_10_me_sku[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
@@ -108,21 +103,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
-{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_2:
- *arb_map_config = thrd_to_arb_map_8_me_sku;
- break;
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_10_me_sku;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+static const u32 *adf_get_arbiter_mapping(void)
+{
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index c46a5805b294..5527344546e5 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -168,8 +168,7 @@ struct adf_hw_device_data {
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
- void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
- const u32 **cfg);
+ const u32 *(*get_arb_mapping)(void);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index eb9b3be9d8eb..96b437bfe3de 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -464,3 +464,4 @@ MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 9f5240d9488b..64e4596a24f4 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -19,6 +19,7 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ unsigned long ae_mask = hw_data->ae_mask;
u32 arb_off, wt_off, arb_cfg;
const u32 *thd_2_arb_cfg;
struct arb_info info;
@@ -35,12 +36,9 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
/* Map worker threads to service arbiters */
- hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+ thd_2_arb_cfg = hw_data->get_arb_mapping();
- if (!thd_2_arb_cfg)
- return -EFAULT;
-
- for (i = 0; i < hw_data->num_engines; i++)
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5a7030acdc33..888c1e047295 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
+#include <linux/nospec.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
#include "adf_transport_access_macros.h"
@@ -246,6 +247,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
return -EFAULT;
}
+ ring_num = array_index_nospec(ring_num, num_rings_per_bank);
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 1205186ad51e..e69e5907f595 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -62,8 +62,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
seq_printf(sfile, "head %x, tail %x, empty: %d\n",
head, tail, (empty & 1 << ring->ring_number)
>> ring->ring_number);
- seq_printf(sfile, "ring size %d, msg size %d\n",
- ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+ seq_printf(sfile, "ring size %lld, msg size %d\n",
+ (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
seq_puts(sfile, "----------- Ring data ------------\n");
return 0;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 31c7a206a629..ff78c73c47e3 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 2c863d25327a..b0b78445418b 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -321,13 +321,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(struct qat_dh_input_params),
+ sizeof(qat_req->in.dh.in.b),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(struct qat_dh_output_params),
+ sizeof(qat_req->out.dh.r),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -716,13 +716,13 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(struct qat_rsa_input_params),
+ sizeof(qat_req->in.rsa.enc.m),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(struct qat_rsa_output_params),
+ sizeof(qat_req->out.rsa.enc.c),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -864,13 +864,13 @@ static int qat_rsa_dec(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(struct qat_rsa_input_params),
+ sizeof(qat_req->in.rsa.dec.c),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(struct qat_rsa_output_params),
+ sizeof(qat_req->out.rsa.dec.m),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1e83d9397b11..7dd7cd6c3ef8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,14 +7,8 @@
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_sku4[] = {
- 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
- 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000
-};
-
-static const u32 thrd_to_arb_map_sku6[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
@@ -127,23 +121,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_1:
- *arb_map_config = thrd_to_arb_map_sku4;
- break;
-
- case DEV_SKU_2:
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_sku6;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8b5be29cb4dc..457084b344c1 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1350,12 +1350,6 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
crypto_unregister_ahash(&sha_v4_algs[i]);
}
-static const struct platform_device_id sahara_platform_ids[] = {
- { .name = "sahara-imx27" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
-
static const struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
@@ -1540,7 +1534,6 @@ static struct platform_driver sahara_driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
},
- .id_table = sahara_platform_ids,
};
module_platform_driver(sahara_driver);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 2670c30332fa..2a4793176c71 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -1229,7 +1229,7 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
cr = stm32_cryp_read(cryp, CRYP_CR);
stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->last_ctr);
stm32_cryp_write(cryp, CRYP_CR, cr);
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 4fd85f31630a..25c9f825b8b5 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1093,11 +1093,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
unsigned int offset, int datalen, int elen,
- struct talitos_ptr *link_tbl_ptr)
+ struct talitos_ptr *link_tbl_ptr, int align)
{
int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
int cryptlen = datalen + elen;
+ int padding = ALIGN(cryptlen, align) - cryptlen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@@ -1121,7 +1122,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
offset += datalen;
}
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, len, 0);
+ sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1144,10 +1145,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
unsigned int offset, int tbl_off, int elen,
- bool force)
+ bool force, int align)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ int aligned_len = ALIGN(len, align);
if (!src) {
to_talitos_ptr(ptr, 0, 0, is_sec1);
@@ -1155,22 +1157,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1 && !force) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
- &edesc->link_tbl[tbl_off]);
+ &edesc->link_tbl[tbl_off], align);
if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1182,7 +1184,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
- tbl_off, 0, false);
+ tbl_off, 0, false, 1);
}
/*
@@ -1251,7 +1253,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
sg_count, areq->assoclen, tbl_off, elen,
- false);
+ false, 1);
if (ret > 1) {
tbl_off += ret;
@@ -1271,7 +1273,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = 0;
ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
sg_count, areq->assoclen, tbl_off, elen,
- is_ipsec_esp && !encrypt);
+ is_ipsec_esp && !encrypt, 1);
tbl_off += ret;
if (!encrypt && is_ipsec_esp) {
@@ -1577,6 +1579,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
+ (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
/* first DWORD empty */
@@ -1597,8 +1601,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/*
* cipher in
*/
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[3], sg_count, 0, 0);
+ sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
+ sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
if (sg_count > 1)
sync_needed = true;
@@ -2763,6 +2767,22 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-talitos",
+ .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CTR,
+ },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -3178,6 +3198,12 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
+ if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
+ DESC_TYPE(t_alg->algt.desc_hdr_template) !=
+ DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
+ devm_kfree(dev, t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 1469b956948a..32825119e880 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
+#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 2bc5d4e1adf4..d05c02baebcf 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -14,6 +14,7 @@
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "aesp8-ppc.h"
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 01774a4d26a2..5764d4438388 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -7,6 +7,12 @@ struct aes_key {
int rounds;
};
+extern struct shash_alg p8_ghash_alg;
+extern struct crypto_alg p8_aes_alg;
+extern struct skcipher_alg p8_aes_cbc_alg;
+extern struct skcipher_alg p8_aes_ctr_alg;
+extern struct skcipher_alg p8_aes_xts_alg;
+
int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
struct aes_key *key);
int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 3e0335fb406c..a40d08e75fc0 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -17,11 +17,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-extern struct shash_alg p8_ghash_alg;
-extern struct crypto_alg p8_aes_alg;
-extern struct skcipher_alg p8_aes_cbc_alg;
-extern struct skcipher_alg p8_aes_ctr_alg;
-extern struct skcipher_alg p8_aes_xts_alg;
+#include "aesp8-ppc.h"
static int __init p8_init(void)
{
@@ -78,3 +74,4 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
"support on Power 8");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
new file mode 100644
index 000000000000..97dc4d751651
--- /dev/null
+++ b/drivers/cxl/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig CXL_BUS
+ tristate "CXL (Compute Express Link) Devices Support"
+ depends on PCI
+ help
+ CXL is a bus that is electrically compatible with PCI Express, but
+ layers three protocols on that signalling (CXL.io, CXL.cache, and
+ CXL.mem). The CXL.cache protocol allows devices to hold cachelines
+ locally, the CXL.mem protocol allows devices to be fully coherent
+ memory targets, the CXL.io protocol is equivalent to PCI Express.
+ Say 'y' to enable support for the configuration and management of
+ devices supporting these protocols.
+
+if CXL_BUS
+
+config CXL_MEM
+ tristate "CXL.mem: Memory Devices"
+ help
+ The CXL.mem protocol allows a device to act as a provider of
+ "System RAM" and/or "Persistent Memory" that is fully coherent
+ as if the memory was attached to the typical CPU memory
+ controller.
+
+ Say 'y/m' to enable a driver (named "cxl_mem.ko" when built as
+ a module) that will attach to CXL.mem devices for
+ configuration, provisioning, and health monitoring. This
+ driver is required for dynamic provisioning of CXL.mem
+ attached memory which is a prerequisite for persistent memory
+ support. Typically volatile memory is mapped by platform
+ firmware and included in the platform memory map, but in some
+ cases the OS is responsible for mapping that memory. See
+ Chapter 2.3 Type 3 CXL Device in the CXL 2.0 specification.
+
+ If unsure say 'm'.
+
+config CXL_MEM_RAW_COMMANDS
+ bool "RAW Command Interface for Memory Devices"
+ depends on CXL_MEM
+ help
+ Enable CXL RAW command interface.
+
+ The CXL driver ioctl interface may assign a kernel ioctl command
+ number for each specification defined opcode. At any given point in
+ time the number of opcodes that the specification defines and a device
+ may implement may exceed the kernel's set of associated ioctl function
+ numbers. The mismatch is either by omission, specification is too new,
+ or by design. When prototyping new hardware, or developing / debugging
+ the driver it is useful to be able to submit any possible command to
+ the hardware, even commands that may crash the kernel due to their
+ potential impact to memory currently in use by the kernel.
+
+ If developing CXL hardware or the driver say Y, otherwise say N.
+endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
new file mode 100644
index 000000000000..a314a1891f4d
--- /dev/null
+++ b/drivers/cxl/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_bus.o
+obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
+cxl_bus-y := bus.o
+cxl_mem-y := mem.o
diff --git a/drivers/cxl/bus.c b/drivers/cxl/bus.c
new file mode 100644
index 000000000000..58f74796d525
--- /dev/null
+++ b/drivers/cxl/bus.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/device.h>
+#include <linux/module.h>
+
+/**
+ * DOC: cxl bus
+ *
+ * The CXL bus provides namespace for control devices and a rendezvous
+ * point for cross-device interleave coordination.
+ */
+struct bus_type cxl_bus_type = {
+ .name = "cxl",
+};
+EXPORT_SYMBOL_GPL(cxl_bus_type);
+
+static __init int cxl_bus_init(void)
+{
+ return bus_register(&cxl_bus_type);
+}
+
+static void cxl_bus_exit(void)
+{
+ bus_unregister(&cxl_bus_type);
+}
+
+module_init(cxl_bus_init);
+module_exit(cxl_bus_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
new file mode 100644
index 000000000000..6f14838c2d25
--- /dev/null
+++ b/drivers/cxl/cxl.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_H__
+#define __CXL_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
+#define CXLDEV_CAP_ARRAY_OFFSET 0x0
+#define CXLDEV_CAP_ARRAY_CAP_ID 0
+#define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
+/* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
+#define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
+/* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
+#define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
+#define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
+#define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
+#define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
+
+/* CXL 2.0 8.2.8.4 Mailbox Registers */
+#define CXLDEV_MBOX_CAPS_OFFSET 0x00
+#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
+#define CXLDEV_MBOX_CTRL_OFFSET 0x04
+#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
+#define CXLDEV_MBOX_CMD_OFFSET 0x08
+#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
+#define CXLDEV_MBOX_STATUS_OFFSET 0x10
+#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
+#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
+#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
+
+/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
+#define CXLMDEV_STATUS_OFFSET 0x0
+#define CXLMDEV_DEV_FATAL BIT(0)
+#define CXLMDEV_FW_HALT BIT(1)
+#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
+#define CXLMDEV_MS_NOT_READY 0
+#define CXLMDEV_MS_READY 1
+#define CXLMDEV_MS_ERROR 2
+#define CXLMDEV_MS_DISABLED 3
+#define CXLMDEV_READY(status) \
+ (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
+ CXLMDEV_MS_READY)
+#define CXLMDEV_MBOX_IF_READY BIT(4)
+#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
+#define CXLMDEV_RESET_NEEDED_NOT 0
+#define CXLMDEV_RESET_NEEDED_COLD 1
+#define CXLMDEV_RESET_NEEDED_WARM 2
+#define CXLMDEV_RESET_NEEDED_HOT 3
+#define CXLMDEV_RESET_NEEDED_CXL 4
+#define CXLMDEV_RESET_NEEDED(status) \
+ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
+ CXLMDEV_RESET_NEEDED_NOT)
+
+struct cxl_memdev;
+/**
+ * struct cxl_mem - A CXL memory device
+ * @pdev: The PCI device associated with this CXL device.
+ * @regs: IO mappings to the device's MMIO
+ * @status_regs: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox_regs: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev_regs: CXL 2.0 8.2.8.5 Memory Device Registers
+ * @payload_size: Size of space for payload
+ * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: Mutex to synchronize mailbox access.
+ * @firmware_version: Firmware version for the memory device.
+ * @enabled_commands: Hardware commands found enabled in CEL.
+ * @pmem_range: Persistent memory capacity information.
+ * @ram_range: Volatile memory capacity information.
+ */
+struct cxl_mem {
+ struct pci_dev *pdev;
+ void __iomem *regs;
+ struct cxl_memdev *cxlmd;
+
+ void __iomem *status_regs;
+ void __iomem *mbox_regs;
+ void __iomem *memdev_regs;
+
+ size_t payload_size;
+ struct mutex mbox_mutex; /* Protects device mailbox and firmware */
+ char firmware_version[0x10];
+ unsigned long *enabled_cmds;
+
+ struct range pmem_range;
+ struct range ram_range;
+};
+
+extern struct bus_type cxl_bus_type;
+#endif /* __CXL_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
new file mode 100644
index 000000000000..244cb7d89678
--- /dev/null
+++ b/drivers/cxl/mem.c
@@ -0,0 +1,1552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <uapi/linux/cxl_mem.h>
+#include <linux/security.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "pci.h"
+#include "cxl.h"
+
+/**
+ * DOC: cxl mem
+ *
+ * This implements a CXL memory device ("type-3") as it is defined by the
+ * Compute Express Link specification.
+ *
+ * The driver has several responsibilities, mainly:
+ * - Create the memX device and register on the CXL bus.
+ * - Enumerate device's register interface and map them.
+ * - Probe the device attributes to establish sysfs interface.
+ * - Provide an IOCTL interface to userspace to communicate with the device for
+ * things like firmware update.
+ * - Support management of interleave sets.
+ * - Handle and manage error conditions.
+ */
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+#define cxl_doorbell_busy(cxlm) \
+ (readl((cxlm)->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET) & \
+ CXLDEV_MBOX_CTRL_DOORBELL)
+
+/* CXL 2.0 - 8.2.8.4 */
+#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
+
+enum opcode {
+ CXL_MBOX_OP_INVALID = 0x0000,
+ CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
+ CXL_MBOX_OP_GET_FW_INFO = 0x0200,
+ CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
+ CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
+ CXL_MBOX_OP_GET_LOG = 0x0401,
+ CXL_MBOX_OP_IDENTIFY = 0x4000,
+ CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
+ CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
+ CXL_MBOX_OP_GET_LSA = 0x4102,
+ CXL_MBOX_OP_SET_LSA = 0x4103,
+ CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
+ CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
+ CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
+ CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
+ CXL_MBOX_OP_MAX = 0x10000
+};
+
+/**
+ * struct mbox_cmd - A command to be submitted to hardware.
+ * @opcode: (input) The command set and command submitted to hardware.
+ * @payload_in: (input) Pointer to the input payload.
+ * @payload_out: (output) Pointer to the output payload. Must be allocated by
+ * the caller.
+ * @size_in: (input) Number of bytes to load from @payload_in.
+ * @size_out: (input) Max number of bytes loaded into @payload_out.
+ * (output) Number of bytes generated by the device. For fixed size
+ * outputs commands this is always expected to be deterministic. For
+ * variable sized output commands, it tells the exact number of bytes
+ * written.
+ * @return_code: (output) Error code returned from hardware.
+ *
+ * This is the primary mechanism used to send commands to the hardware.
+ * All the fields except @payload_* correspond exactly to the fields described in
+ * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
+ * @payload_out are written to, and read from the Command Payload Registers
+ * defined in CXL 2.0 8.2.8.4.8.
+ */
+struct mbox_cmd {
+ u16 opcode;
+ void *payload_in;
+ void *payload_out;
+ size_t size_in;
+ size_t size_out;
+ u16 return_code;
+#define CXL_MBOX_SUCCESS 0
+};
+
+/**
+ * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
+ * @dev: driver core device object
+ * @cdev: char dev core object for ioctl operations
+ * @cxlm: pointer to the parent device driver data
+ * @ops_active: active user of @cxlm in ops handlers
+ * @ops_dead: completion when all @cxlm ops users have exited
+ * @id: id number of this memdev instance.
+ */
+struct cxl_memdev {
+ struct device dev;
+ struct cdev cdev;
+ struct cxl_mem *cxlm;
+ struct percpu_ref ops_active;
+ struct completion ops_dead;
+ int id;
+};
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+static struct dentry *cxl_debugfs;
+static bool cxl_raw_allow_all;
+
+enum {
+ CEL_UUID,
+ VENDOR_DEBUG_UUID,
+};
+
+/* See CXL 2.0 Table 170. Get Log Input Payload */
+static const uuid_t log_uuid[] = {
+ [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96,
+ 0xb1, 0x62, 0x3b, 0x3f, 0x17),
+ [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f,
+ 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86),
+};
+
+/**
+ * struct cxl_mem_command - Driver representation of a memory device command
+ * @info: Command information as it exists for the UAPI
+ * @opcode: The actual bits used for the mailbox protocol
+ * @flags: Set of flags effecting driver behavior.
+ *
+ * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
+ * will be enabled by the driver regardless of what hardware may have
+ * advertised.
+ *
+ * The cxl_mem_command is the driver's internal representation of commands that
+ * are supported by the driver. Some of these commands may not be supported by
+ * the hardware. The driver will use @info to validate the fields passed in by
+ * the user then submit the @opcode to the hardware.
+ *
+ * See struct cxl_command_info.
+ */
+struct cxl_mem_command {
+ struct cxl_command_info info;
+ enum opcode opcode;
+ u32 flags;
+#define CXL_CMD_FLAG_NONE 0
+#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
+};
+
+#define CXL_CMD(_id, sin, sout, _flags) \
+ [CXL_MEM_COMMAND_ID_##_id] = { \
+ .info = { \
+ .id = CXL_MEM_COMMAND_ID_##_id, \
+ .size_in = sin, \
+ .size_out = sout, \
+ }, \
+ .opcode = CXL_MBOX_OP_##_id, \
+ .flags = _flags, \
+ }
+
+/*
+ * This table defines the supported mailbox commands for the driver. This table
+ * is made up of a UAPI structure. Non-negative values as parameters in the
+ * table will be validated against the user's input. For example, if size_in is
+ * 0, and the user passed in 1, it is an error.
+ */
+static struct cxl_mem_command mem_commands[] = {
+ CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
+#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
+ CXL_CMD(RAW, ~0, ~0, 0),
+#endif
+ CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
+ CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
+ CXL_CMD(GET_LSA, 0x8, ~0, 0),
+ CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
+ CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+};
+
+/*
+ * Commands that RAW doesn't permit. The rationale for each:
+ *
+ * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
+ * coordination of transaction timeout values at the root bridge level.
+ *
+ * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
+ * and needs to be coordinated with HDM updates.
+ *
+ * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
+ * driver and any writes from userspace invalidates those contents.
+ *
+ * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
+ * to the device after it is marked clean, userspace can not make that
+ * assertion.
+ *
+ * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
+ * is kept up to date with patrol notifications and error management.
+ */
+static u16 cxl_disabled_raw_commands[] = {
+ CXL_MBOX_OP_ACTIVATE_FW,
+ CXL_MBOX_OP_SET_PARTITION_INFO,
+ CXL_MBOX_OP_SET_LSA,
+ CXL_MBOX_OP_SET_SHUTDOWN_STATE,
+ CXL_MBOX_OP_SCAN_MEDIA,
+ CXL_MBOX_OP_GET_SCAN_MEDIA,
+};
+
+/*
+ * Command sets that RAW doesn't permit. All opcodes in this set are
+ * disabled because they pass plain text security payloads over the
+ * user/kernel boundary. This functionality is intended to be wrapped
+ * behind the keys ABI which allows for encrypted payloads in the UAPI
+ */
+static u8 security_command_sets[] = {
+ 0x44, /* Sanitize */
+ 0x45, /* Persistent Memory Data-at-rest Security */
+ 0x46, /* Security Passthrough */
+};
+
+#define cxl_for_each_cmd(cmd) \
+ for ((cmd) = &mem_commands[0]; \
+ ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++)
+
+#define cxl_cmd_count ARRAY_SIZE(mem_commands)
+
+static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
+{
+ const unsigned long start = jiffies;
+ unsigned long end = start;
+
+ while (cxl_doorbell_busy(cxlm)) {
+ end = jiffies;
+
+ if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
+ /* Check again in case preempted before timeout test */
+ if (!cxl_doorbell_busy(cxlm))
+ break;
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms",
+ jiffies_to_msecs(end) - jiffies_to_msecs(start));
+ return 0;
+}
+
+static bool cxl_is_security_command(u16 opcode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
+ if (security_command_sets[i] == (opcode >> 8))
+ return true;
+ return false;
+}
+
+static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
+ struct mbox_cmd *mbox_cmd)
+{
+ struct device *dev = &cxlm->pdev->dev;
+
+ dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
+ mbox_cmd->opcode, mbox_cmd->size_in);
+}
+
+/**
+ * __cxl_mem_mbox_send_cmd() - Execute a mailbox command
+ * @cxlm: The CXL memory device to communicate with.
+ * @mbox_cmd: Command to send to the memory device.
+ *
+ * Context: Any context. Expects mbox_mutex to be held.
+ * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
+ * Caller should check the return code in @mbox_cmd to make sure it
+ * succeeded.
+ *
+ * This is a generic form of the CXL mailbox send command thus only using the
+ * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
+ * devices, and perhaps other types of CXL devices may have further information
+ * available upon error conditions. Driver facilities wishing to send mailbox
+ * commands should use the wrapper command.
+ *
+ * The CXL spec allows for up to two mailboxes. The intention is for the primary
+ * mailbox to be OS controlled and the secondary mailbox to be used by system
+ * firmware. This allows the OS and firmware to communicate with the device and
+ * not need to coordinate with each other. The driver only uses the primary
+ * mailbox.
+ */
+static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
+ struct mbox_cmd *mbox_cmd)
+{
+ void __iomem *payload = cxlm->mbox_regs + CXLDEV_MBOX_PAYLOAD_OFFSET;
+ u64 cmd_reg, status_reg;
+ size_t out_len;
+ int rc;
+
+ lockdep_assert_held(&cxlm->mbox_mutex);
+
+ /*
+ * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
+ * 1. Caller reads MB Control Register to verify doorbell is clear
+ * 2. Caller writes Command Register
+ * 3. Caller writes Command Payload Registers if input payload is non-empty
+ * 4. Caller writes MB Control Register to set doorbell
+ * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
+ * 6. Caller reads MB Status Register to fetch Return code
+ * 7. If command successful, Caller reads Command Register to get Payload Length
+ * 8. If output payload is non-empty, host reads Command Payload Registers
+ *
+ * Hardware is free to do whatever it wants before the doorbell is rung,
+ * and isn't allowed to change anything after it clears the doorbell. As
+ * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
+ * also happen in any order (though some orders might not make sense).
+ */
+
+ /* #1 */
+ if (cxl_doorbell_busy(cxlm)) {
+ dev_err_ratelimited(&cxlm->pdev->dev,
+ "Mailbox re-busy after acquiring\n");
+ return -EBUSY;
+ }
+
+ cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
+ mbox_cmd->opcode);
+ if (mbox_cmd->size_in) {
+ if (WARN_ON(!mbox_cmd->payload_in))
+ return -EINVAL;
+
+ cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
+ mbox_cmd->size_in);
+ memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
+ }
+
+ /* #2, #3 */
+ writeq(cmd_reg, cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+
+ /* #4 */
+ dev_dbg(&cxlm->pdev->dev, "Sending command\n");
+ writel(CXLDEV_MBOX_CTRL_DOORBELL,
+ cxlm->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET);
+
+ /* #5 */
+ rc = cxl_mem_wait_for_doorbell(cxlm);
+ if (rc == -ETIMEDOUT) {
+ cxl_mem_mbox_timeout(cxlm, mbox_cmd);
+ return rc;
+ }
+
+ /* #6 */
+ status_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_STATUS_OFFSET);
+ mbox_cmd->return_code =
+ FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
+
+ if (mbox_cmd->return_code != 0) {
+ dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n");
+ return 0;
+ }
+
+ /* #7 */
+ cmd_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+ out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
+
+ /* #8 */
+ if (out_len && mbox_cmd->payload_out) {
+ /*
+ * Sanitize the copy. If hardware misbehaves, out_len per the
+ * spec can actually be greater than the max allowed size (21
+ * bits available but spec defined 1M max). The caller also may
+ * have requested less data than the hardware supplied even
+ * within spec.
+ */
+ size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
+
+ memcpy_fromio(mbox_cmd->payload_out, payload, n);
+ mbox_cmd->size_out = n;
+ } else {
+ mbox_cmd->size_out = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
+ * @cxlm: The memory device to gain access to.
+ *
+ * Context: Any context. Takes the mbox_mutex.
+ * Return: 0 if exclusive access was acquired.
+ */
+static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ u64 md_status;
+ int rc;
+
+ mutex_lock_io(&cxlm->mbox_mutex);
+
+ /*
+ * XXX: There is some amount of ambiguity in the 2.0 version of the spec
+ * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
+ * bit is to allow firmware running on the device to notify the driver
+ * that it's ready to receive commands. It is unclear if the bit needs
+ * to be read for each transaction mailbox, ie. the firmware can switch
+ * it on and off as needed. Second, there is no defined timeout for
+ * mailbox ready, like there is for the doorbell interface.
+ *
+ * Assumptions:
+ * 1. The firmware might toggle the Mailbox Interface Ready bit, check
+ * it for every command.
+ *
+ * 2. If the doorbell is clear, the firmware should have first set the
+ * Mailbox Interface Ready bit. Therefore, waiting for the doorbell
+ * to be ready is sufficient.
+ */
+ rc = cxl_mem_wait_for_doorbell(cxlm);
+ if (rc) {
+ dev_warn(dev, "Mailbox interface not ready\n");
+ goto out;
+ }
+
+ md_status = readq(cxlm->memdev_regs + CXLMDEV_STATUS_OFFSET);
+ if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
+ dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Hardware shouldn't allow a ready status but also have failure bits
+ * set. Spit out an error, this should be a bug report
+ */
+ rc = -EFAULT;
+ if (md_status & CXLMDEV_DEV_FATAL) {
+ dev_err(dev, "mbox: reported ready, but fatal\n");
+ goto out;
+ }
+ if (md_status & CXLMDEV_FW_HALT) {
+ dev_err(dev, "mbox: reported ready, but halted\n");
+ goto out;
+ }
+ if (CXLMDEV_RESET_NEEDED(md_status)) {
+ dev_err(dev, "mbox: reported ready, but reset needed\n");
+ goto out;
+ }
+
+ /* with lock held */
+ return 0;
+
+out:
+ mutex_unlock(&cxlm->mbox_mutex);
+ return rc;
+}
+
+/**
+ * cxl_mem_mbox_put() - Release exclusive access to the mailbox.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Context: Any context. Expects mbox_mutex to be held.
+ */
+static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
+{
+ mutex_unlock(&cxlm->mbox_mutex);
+}
+
+/**
+ * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
+ * @cxlm: The CXL memory device to communicate with.
+ * @cmd: The validated command.
+ * @in_payload: Pointer to userspace's input payload.
+ * @out_payload: Pointer to userspace's output payload.
+ * @size_out: (Input) Max payload size to copy out.
+ * (Output) Payload size hardware generated.
+ * @retval: Hardware generated return code from the operation.
+ *
+ * Return:
+ * * %0 - Mailbox transaction succeeded. This implies the mailbox
+ * protocol completed successfully not that the operation itself
+ * was successful.
+ * * %-ENOMEM - Couldn't allocate a bounce buffer.
+ * * %-EFAULT - Something happened with copy_to/from_user.
+ * * %-EINTR - Mailbox acquisition interrupted.
+ * * %-EXXX - Transaction level failures.
+ *
+ * Creates the appropriate mailbox command and dispatches it on behalf of a
+ * userspace request. The input and output payloads are copied between
+ * userspace.
+ *
+ * See cxl_send_cmd().
+ */
+static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+ const struct cxl_mem_command *cmd,
+ u64 in_payload, u64 out_payload,
+ s32 *size_out, u32 *retval)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ struct mbox_cmd mbox_cmd = {
+ .opcode = cmd->opcode,
+ .size_in = cmd->info.size_in,
+ .size_out = cmd->info.size_out,
+ };
+ int rc;
+
+ if (cmd->info.size_out) {
+ mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
+ if (!mbox_cmd.payload_out)
+ return -ENOMEM;
+ }
+
+ if (cmd->info.size_in) {
+ mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
+ cmd->info.size_in);
+ if (IS_ERR(mbox_cmd.payload_in)) {
+ kvfree(mbox_cmd.payload_out);
+ return PTR_ERR(mbox_cmd.payload_in);
+ }
+ }
+
+ rc = cxl_mem_mbox_get(cxlm);
+ if (rc)
+ goto out;
+
+ dev_dbg(dev,
+ "Submitting %s command for user\n"
+ "\topcode: %x\n"
+ "\tsize: %ub\n",
+ cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
+ cmd->info.size_in);
+
+ dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
+ "raw command path used\n");
+
+ rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
+ cxl_mem_mbox_put(cxlm);
+ if (rc)
+ goto out;
+
+ /*
+ * @size_out contains the max size that's allowed to be written back out
+ * to userspace. While the payload may have written more output than
+ * this it will have to be ignored.
+ */
+ if (mbox_cmd.size_out) {
+ dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
+ "Invalid return size\n");
+ if (copy_to_user(u64_to_user_ptr(out_payload),
+ mbox_cmd.payload_out, mbox_cmd.size_out)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+
+ *size_out = mbox_cmd.size_out;
+ *retval = mbox_cmd.return_code;
+
+out:
+ kvfree(mbox_cmd.payload_in);
+ kvfree(mbox_cmd.payload_out);
+ return rc;
+}
+
+static bool cxl_mem_raw_command_allowed(u16 opcode)
+{
+ int i;
+
+ if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
+ return false;
+
+ if (security_locked_down(LOCKDOWN_NONE))
+ return false;
+
+ if (cxl_raw_allow_all)
+ return true;
+
+ if (cxl_is_security_command(opcode))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
+ if (cxl_disabled_raw_commands[i] == opcode)
+ return false;
+
+ return true;
+}
+
+/**
+ * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
+ * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @send_cmd: &struct cxl_send_command copied in from userspace.
+ * @out_cmd: Sanitized and populated &struct cxl_mem_command.
+ *
+ * Return:
+ * * %0 - @out_cmd is ready to send.
+ * * %-ENOTTY - Invalid command specified.
+ * * %-EINVAL - Reserved fields or invalid values were used.
+ * * %-ENOMEM - Input or output buffer wasn't sized properly.
+ * * %-EPERM - Attempted to use a protected command.
+ *
+ * The result of this command is a fully validated command in @out_cmd that is
+ * safe to send to the hardware.
+ *
+ * See handle_mailbox_cmd_from_user()
+ */
+static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+ const struct cxl_send_command *send_cmd,
+ struct cxl_mem_command *out_cmd)
+{
+ const struct cxl_command_info *info;
+ struct cxl_mem_command *c;
+
+ if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
+ return -ENOTTY;
+
+ /*
+ * The user can never specify an input payload larger than what hardware
+ * supports, but output can be arbitrarily large (simply write out as
+ * much data as the hardware provides).
+ */
+ if (send_cmd->in.size > cxlm->payload_size)
+ return -EINVAL;
+
+ /*
+ * Checks are bypassed for raw commands but a WARN/taint will occur
+ * later in the callchain
+ */
+ if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
+ const struct cxl_mem_command temp = {
+ .info = {
+ .id = CXL_MEM_COMMAND_ID_RAW,
+ .flags = 0,
+ .size_in = send_cmd->in.size,
+ .size_out = send_cmd->out.size,
+ },
+ .opcode = send_cmd->raw.opcode
+ };
+
+ if (send_cmd->raw.rsvd)
+ return -EINVAL;
+
+ /*
+ * Unlike supported commands, the output size of RAW commands
+ * gets passed along without further checking, so it must be
+ * validated here.
+ */
+ if (send_cmd->out.size > cxlm->payload_size)
+ return -EINVAL;
+
+ if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
+ return -EPERM;
+
+ memcpy(out_cmd, &temp, sizeof(temp));
+
+ return 0;
+ }
+
+ if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
+ return -EINVAL;
+
+ if (send_cmd->rsvd)
+ return -EINVAL;
+
+ if (send_cmd->in.rsvd || send_cmd->out.rsvd)
+ return -EINVAL;
+
+ /* Convert user's command into the internal representation */
+ c = &mem_commands[send_cmd->id];
+ info = &c->info;
+
+ /* Check that the command is enabled for hardware */
+ if (!test_bit(info->id, cxlm->enabled_cmds))
+ return -ENOTTY;
+
+ /* Check the input buffer is the expected size */
+ if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
+ return -ENOMEM;
+
+ /* Check the output buffer is at least large enough */
+ if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
+ return -ENOMEM;
+
+ memcpy(out_cmd, c, sizeof(*c));
+ out_cmd->info.size_in = send_cmd->in.size;
+ /*
+ * XXX: out_cmd->info.size_out will be controlled by the driver, and the
+ * specified number of bytes @send_cmd->out.size will be copied back out
+ * to userspace.
+ */
+
+ return 0;
+}
+
+static int cxl_query_cmd(struct cxl_memdev *cxlmd,
+ struct cxl_mem_query_commands __user *q)
+{
+ struct device *dev = &cxlmd->dev;
+ struct cxl_mem_command *cmd;
+ u32 n_commands;
+ int j = 0;
+
+ dev_dbg(dev, "Query IOCTL\n");
+
+ if (get_user(n_commands, &q->n_commands))
+ return -EFAULT;
+
+ /* returns the total number if 0 elements are requested. */
+ if (n_commands == 0)
+ return put_user(cxl_cmd_count, &q->n_commands);
+
+ /*
+ * otherwise, return max(n_commands, total commands) cxl_command_info
+ * structures.
+ */
+ cxl_for_each_cmd(cmd) {
+ const struct cxl_command_info *info = &cmd->info;
+
+ if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
+ return -EFAULT;
+
+ if (j == n_commands)
+ break;
+ }
+
+ return 0;
+}
+
+static int cxl_send_cmd(struct cxl_memdev *cxlmd,
+ struct cxl_send_command __user *s)
+{
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct device *dev = &cxlmd->dev;
+ struct cxl_send_command send;
+ struct cxl_mem_command c;
+ int rc;
+
+ dev_dbg(dev, "Send IOCTL\n");
+
+ if (copy_from_user(&send, s, sizeof(send)))
+ return -EFAULT;
+
+ rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+ if (rc)
+ return rc;
+
+ /* Prepare to handle a full payload for variable sized output */
+ if (c.info.size_out < 0)
+ c.info.size_out = cxlm->payload_size;
+
+ rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+ send.out.payload, &send.out.size,
+ &send.retval);
+ if (rc)
+ return rc;
+
+ if (copy_to_user(s, &send, sizeof(send)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case CXL_MEM_QUERY_COMMANDS:
+ return cxl_query_cmd(cxlmd, (void __user *)arg);
+ case CXL_MEM_SEND_COMMAND:
+ return cxl_send_cmd(cxlmd, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cxl_memdev *cxlmd;
+ struct inode *inode;
+ int rc = -ENOTTY;
+
+ inode = file_inode(file);
+ cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ if (!percpu_ref_tryget_live(&cxlmd->ops_active))
+ return -ENXIO;
+
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+
+ percpu_ref_put(&cxlmd->ops_active);
+
+ return rc;
+}
+
+static const struct file_operations cxl_memdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cxl_memdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
+{
+ struct cxl_mem_command *c;
+
+ cxl_for_each_cmd(c)
+ if (c->opcode == opcode)
+ return c;
+
+ return NULL;
+}
+
+/**
+ * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
+ * @cxlm: The CXL memory device to communicate with.
+ * @opcode: Opcode for the mailbox command.
+ * @in: The input payload for the mailbox command.
+ * @in_size: The length of the input payload
+ * @out: Caller allocated buffer for the output.
+ * @out_size: Expected size of output.
+ *
+ * Context: Any context. Will acquire and release mbox_mutex.
+ * Return:
+ * * %>=0 - Number of bytes returned in @out.
+ * * %-E2BIG - Payload is too large for hardware.
+ * * %-EBUSY - Couldn't acquire exclusive mailbox access.
+ * * %-EFAULT - Hardware error occurred.
+ * * %-ENXIO - Command completed, but device reported an error.
+ * * %-EIO - Unexpected output size.
+ *
+ * Mailbox commands may execute successfully yet the device itself reported an
+ * error. While this distinction can be useful for commands from userspace, the
+ * kernel will only be able to use results when both are successful.
+ *
+ * See __cxl_mem_mbox_send_cmd()
+ */
+static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
+ void *in, size_t in_size,
+ void *out, size_t out_size)
+{
+ const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+ struct mbox_cmd mbox_cmd = {
+ .opcode = opcode,
+ .payload_in = in,
+ .size_in = in_size,
+ .size_out = out_size,
+ .payload_out = out,
+ };
+ int rc;
+
+ if (out_size > cxlm->payload_size)
+ return -E2BIG;
+
+ rc = cxl_mem_mbox_get(cxlm);
+ if (rc)
+ return rc;
+
+ rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
+ cxl_mem_mbox_put(cxlm);
+ if (rc)
+ return rc;
+
+ /* TODO: Map return code to proper kernel style errno */
+ if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
+ return -ENXIO;
+
+ /*
+ * Variable sized commands can't be validated and so it's up to the
+ * caller to do that if they wish.
+ */
+ if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * cxl_mem_setup_regs() - Setup necessary MMIO.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Return: 0 if all necessary registers mapped.
+ *
+ * A memory device is required by spec to implement a certain set of MMIO
+ * regions. The purpose of this function is to enumerate and map those
+ * registers.
+ */
+static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ int cap, cap_count;
+ u64 cap_array;
+
+ cap_array = readq(cxlm->regs + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return -ENODEV;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 offset;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(cxlm->regs + cap * 0x10));
+ offset = readl(cxlm->regs + cap * 0x10 + 0x4);
+ register_block = cxlm->regs + offset;
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+ cxlm->status_regs = register_block;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ cxlm->mbox_regs = register_block;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ cxlm->memdev_regs = register_block;
+ break;
+ default:
+ dev_dbg(dev, "Unknown cap ID: %d (0x%x)\n", cap_id, offset);
+ break;
+ }
+ }
+
+ if (!cxlm->status_regs || !cxlm->mbox_regs || !cxlm->memdev_regs) {
+ dev_err(dev, "registers not found: %s%s%s\n",
+ !cxlm->status_regs ? "status " : "",
+ !cxlm->mbox_regs ? "mbox " : "",
+ !cxlm->memdev_regs ? "memdev" : "");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
+{
+ const int cap = readl(cxlm->mbox_regs + CXLDEV_MBOX_CAPS_OFFSET);
+
+ cxlm->payload_size =
+ 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
+
+ /*
+ * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
+ *
+ * If the size is too small, mandatory commands will not work and so
+ * there's no point in going forward. If the size is too large, there's
+ * no harm is soft limiting it.
+ */
+ cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
+ if (cxlm->payload_size < 256) {
+ dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)",
+ cxlm->payload_size);
+ return -ENXIO;
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu",
+ cxlm->payload_size);
+
+ return 0;
+}
+
+static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
+ u32 reg_hi)
+{
+ struct device *dev = &pdev->dev;
+ struct cxl_mem *cxlm;
+ void __iomem *regs;
+ u64 offset;
+ u8 bar;
+ int rc;
+
+ cxlm = devm_kzalloc(&pdev->dev, sizeof(*cxlm), GFP_KERNEL);
+ if (!cxlm) {
+ dev_err(dev, "No memory available\n");
+ return NULL;
+ }
+
+ offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+ bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
+
+ /* Basic sanity check that BAR is big enough */
+ if (pci_resource_len(pdev, bar) < offset) {
+ dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
+ &pdev->resource[bar], (unsigned long long)offset);
+ return NULL;
+ }
+
+ rc = pcim_iomap_regions(pdev, BIT(bar), pci_name(pdev));
+ if (rc) {
+ dev_err(dev, "failed to map registers\n");
+ return NULL;
+ }
+ regs = pcim_iomap_table(pdev)[bar];
+
+ mutex_init(&cxlm->mbox_mutex);
+ cxlm->pdev = pdev;
+ cxlm->regs = regs + offset;
+ cxlm->enabled_cmds =
+ devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count),
+ sizeof(unsigned long),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!cxlm->enabled_cmds) {
+ dev_err(dev, "No memory available for bitmap\n");
+ return NULL;
+ }
+
+ dev_dbg(dev, "Mapped CXL Memory Device resource\n");
+ return cxlm;
+}
+
+static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
+{
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ return 0;
+
+ while (pos) {
+ u16 vendor, id;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
+ if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos,
+ PCI_EXT_CAP_ID_DVSEC);
+ }
+
+ return 0;
+}
+
+static struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+ return container_of(dev, struct cxl_memdev, dev);
+}
+
+static void cxl_memdev_release(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ percpu_ref_exit(&cxlmd->ops_active);
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+ kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid)
+{
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sprintf(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->ram_range);
+
+ return sprintf(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+ __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->pmem_range);
+
+ return sprintf(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+ __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_payload_max.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+ &dev_attr_pmem_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+ &dev_attr_ram_size.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+ .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+ .name = "ram",
+ .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+ .name = "pmem",
+ .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+ &cxl_memdev_attribute_group,
+ &cxl_memdev_ram_attribute_group,
+ &cxl_memdev_pmem_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+ .name = "cxl_memdev",
+ .release = cxl_memdev_release,
+ .devnode = cxl_memdev_devnode,
+ .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxlmdev_unregister(void *_cxlmd)
+{
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+
+ percpu_ref_kill(&cxlmd->ops_active);
+ cdev_device_del(&cxlmd->cdev, dev);
+ wait_for_completion(&cxlmd->ops_dead);
+ cxlmd->cxlm = NULL;
+ put_device(dev);
+}
+
+static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(ref, typeof(*cxlmd), ops_active);
+
+ complete(&cxlmd->ops_dead);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+ if (!cxlmd)
+ return -ENOMEM;
+ init_completion(&cxlmd->ops_dead);
+
+ /*
+ * @cxlm is deallocated when the driver unbinds so operations
+ * that are using it need to hold a live reference.
+ */
+ cxlmd->cxlm = cxlm;
+ rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
+ GFP_KERNEL);
+ if (rc)
+ goto err_ref;
+
+ rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ if (rc < 0)
+ goto err_id;
+ cxlmd->id = rc;
+
+ dev = &cxlmd->dev;
+ device_initialize(dev);
+ dev->parent = &pdev->dev;
+ dev->bus = &cxl_bus_type;
+ dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+ dev->type = &cxl_memdev_type;
+ dev_set_name(dev, "mem%d", cxlmd->id);
+
+ cdev = &cxlmd->cdev;
+ cdev_init(cdev, &cxl_memdev_fops);
+
+ rc = cdev_device_add(cdev, dev);
+ if (rc)
+ goto err_add;
+
+ return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+
+err_add:
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+err_id:
+ /*
+ * Theoretically userspace could have already entered the fops,
+ * so flush ops_active.
+ */
+ percpu_ref_kill(&cxlmd->ops_active);
+ wait_for_completion(&cxlmd->ops_dead);
+ percpu_ref_exit(&cxlmd->ops_active);
+err_ref:
+ kfree(cxlmd);
+
+ return rc;
+}
+
+static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+{
+ u32 remaining = size;
+ u32 offset = 0;
+
+ while (remaining) {
+ u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+ struct cxl_mbox_get_log {
+ uuid_t uuid;
+ __le32 offset;
+ __le32 length;
+ } __packed log = {
+ .uuid = *uuid,
+ .offset = cpu_to_le32(offset),
+ .length = cpu_to_le32(xfer_size)
+ };
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
+ sizeof(log), out, xfer_size);
+ if (rc < 0)
+ return rc;
+
+ out += xfer_size;
+ remaining -= xfer_size;
+ offset += xfer_size;
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_walk_cel() - Walk through the Command Effects Log.
+ * @cxlm: Device.
+ * @size: Length of the Command Effects Log.
+ * @cel: CEL
+ *
+ * Iterate over each entry in the CEL and determine if the driver supports the
+ * command. If so, the command is enabled for the device and can be used later.
+ */
+static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+{
+ struct cel_entry {
+ __le16 opcode;
+ __le16 effect;
+ } __packed * cel_entry;
+ const int cel_entries = size / sizeof(*cel_entry);
+ int i;
+
+ cel_entry = (struct cel_entry *)cel;
+
+ for (i = 0; i < cel_entries; i++) {
+ u16 opcode = le16_to_cpu(cel_entry[i].opcode);
+ struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+
+ if (!cmd) {
+ dev_dbg(&cxlm->pdev->dev,
+ "Opcode 0x%04x unsupported by driver", opcode);
+ continue;
+ }
+
+ set_bit(cmd->info.id, cxlm->enabled_cmds);
+ }
+}
+
+struct cxl_mbox_get_supported_logs {
+ __le16 entries;
+ u8 rsvd[6];
+ struct gsl_entry {
+ uuid_t uuid;
+ __le32 size;
+ } __packed entry[];
+} __packed;
+
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_get_supported_logs *ret;
+ int rc;
+
+ ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
+ 0, ret, cxlm->payload_size);
+ if (rc < 0) {
+ kvfree(ret);
+ return ERR_PTR(rc);
+ }
+
+ return ret;
+}
+
+/**
+ * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlm: The device.
+ *
+ * Returns 0 if enumerate completed successfully.
+ *
+ * CXL devices have optional support for certain commands. This function will
+ * determine the set of supported commands for the hardware and update the
+ * enabled_cmds bitmap in the @cxlm.
+ */
+static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_get_supported_logs *gsl;
+ struct device *dev = &cxlm->pdev->dev;
+ struct cxl_mem_command *cmd;
+ int i, rc;
+
+ gsl = cxl_get_gsl(cxlm);
+ if (IS_ERR(gsl))
+ return PTR_ERR(gsl);
+
+ rc = -ENOENT;
+ for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
+ u32 size = le32_to_cpu(gsl->entry[i].size);
+ uuid_t uuid = gsl->entry[i].uuid;
+ u8 *log;
+
+ dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
+
+ if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
+ continue;
+
+ log = kvmalloc(size, GFP_KERNEL);
+ if (!log) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = cxl_xfer_log(cxlm, &uuid, size, log);
+ if (rc) {
+ kvfree(log);
+ goto out;
+ }
+
+ cxl_walk_cel(cxlm, size, log);
+ kvfree(log);
+
+ /* In case CEL was bogus, enable some default commands. */
+ cxl_for_each_cmd(cmd)
+ if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
+ set_bit(cmd->info.id, cxlm->enabled_cmds);
+
+ /* Found the required CEL */
+ rc = 0;
+ }
+
+out:
+ kvfree(gsl);
+ return rc;
+}
+
+/**
+ * cxl_mem_identify() - Send the IDENTIFY command to the device.
+ * @cxlm: The device to identify.
+ *
+ * Return: 0 if identify was executed successfully.
+ *
+ * This will dispatch the identify command to the device and on success populate
+ * structures to be exported to sysfs.
+ */
+static int cxl_mem_identify(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_identify {
+ char fw_revision[0x10];
+ __le64 total_capacity;
+ __le64 volatile_capacity;
+ __le64 persistent_capacity;
+ __le64 partition_align;
+ __le16 info_event_log_size;
+ __le16 warning_event_log_size;
+ __le16 failure_event_log_size;
+ __le16 fatal_event_log_size;
+ __le32 lsa_size;
+ u8 poison_list_max_mer[3];
+ __le16 inject_poison_limit;
+ u8 poison_caps;
+ u8 qos_telemetry_caps;
+ } __packed id;
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+ sizeof(id));
+ if (rc < 0)
+ return rc;
+
+ /*
+ * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
+ * For now, only the capacity is exported in sysfs
+ */
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+
+ cxlm->pmem_range.start = 0;
+ cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+
+ memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+
+ return 0;
+}
+
+static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cxl_mem *cxlm = NULL;
+ u32 regloc_size, regblocks;
+ int rc, regloc, i;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_OFFSET);
+ if (!regloc) {
+ dev_err(dev, "register location dvsec not found\n");
+ return -ENXIO;
+ }
+
+ /* Get the size of the Register Locator DVSEC */
+ pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
+ regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
+
+ regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
+ regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
+
+ for (i = 0; i < regblocks; i++, regloc += 8) {
+ u32 reg_lo, reg_hi;
+ u8 reg_type;
+
+ /* "register low and high" contain other bits */
+ pci_read_config_dword(pdev, regloc, &reg_lo);
+ pci_read_config_dword(pdev, regloc + 4, &reg_hi);
+
+ reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
+
+ if (reg_type == CXL_REGLOC_RBI_MEMDEV) {
+ cxlm = cxl_mem_create(pdev, reg_lo, reg_hi);
+ break;
+ }
+ }
+
+ if (!cxlm)
+ return -ENODEV;
+
+ rc = cxl_mem_setup_regs(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_setup_mailbox(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_enumerate_cmds(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_identify(cxlm);
+ if (rc)
+ return rc;
+
+ return cxl_mem_add_memdev(cxlm);
+}
+
+static const struct pci_device_id cxl_mem_pci_tbl[] = {
+ /* PCI class code for CXL.mem Type-3 Devices */
+ { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
+ { /* terminate list */ },
+};
+MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
+
+static struct pci_driver cxl_mem_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxl_mem_pci_tbl,
+ .probe = cxl_mem_probe,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static __init int cxl_mem_init(void)
+{
+ struct dentry *mbox_debugfs;
+ dev_t devt;
+ int rc;
+
+ rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+ if (rc)
+ return rc;
+
+ cxl_mem_major = MAJOR(devt);
+
+ rc = pci_register_driver(&cxl_mem_driver);
+ if (rc) {
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
+ CXL_MEM_MAX_DEVS);
+ return rc;
+ }
+
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+ mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
+ &cxl_raw_allow_all);
+
+ return 0;
+}
+
+static __exit void cxl_mem_exit(void)
+{
+ debugfs_remove_recursive(cxl_debugfs);
+ pci_unregister_driver(&cxl_mem_driver);
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(cxl_mem_init);
+module_exit(cxl_mem_exit);
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
new file mode 100644
index 000000000000..af3ec078cf6c
--- /dev/null
+++ b/drivers/cxl/pci.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#ifndef __CXL_PCI_H__
+#define __CXL_PCI_H__
+
+#define CXL_MEMORY_PROGIF 0x10
+
+/*
+ * See section 8.1 Configuration Space Registers in the CXL 2.0
+ * Specification
+ */
+#define PCI_DVSEC_HEADER1_LENGTH_MASK GENMASK(31, 20)
+#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98
+#define PCI_DVSEC_ID_CXL 0x0
+
+#define PCI_DVSEC_ID_CXL_REGLOC_OFFSET 0x8
+#define PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET 0xC
+
+/* BAR Indicator Register (BIR) */
+#define CXL_REGLOC_BIR_MASK GENMASK(2, 0)
+
+/* Register Block Identifier (RBI) */
+#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
+#define CXL_REGLOC_RBI_EMPTY 0
+#define CXL_REGLOC_RBI_COMPONENT 1
+#define CXL_REGLOC_RBI_VIRT 2
+#define CXL_REGLOC_RBI_MEMDEV 3
+
+#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
+
+#endif /* __CXL_PCI_H__ */
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 737b207c9e30..452e85ae87a8 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -179,7 +179,10 @@ static int dax_bus_remove(struct device *dev)
struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
struct dev_dax *dev_dax = to_dev_dax(dev);
- return dax_drv->remove(dev_dax);
+ if (dax_drv->remove)
+ dax_drv->remove(dev_dax);
+
+ return 0;
}
static struct bus_type dax_bus_type = {
@@ -1038,7 +1041,7 @@ static ssize_t range_parse(const char *opt, size_t len, struct range *range)
{
unsigned long long addr = 0;
char *start, *end, *str;
- ssize_t rc = EINVAL;
+ ssize_t rc = -EINVAL;
str = kstrdup(opt, GFP_KERNEL);
if (!str)
@@ -1392,6 +1395,13 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
struct device_driver *drv = &dax_drv->drv;
int rc = 0;
+ /*
+ * dax_bus_probe() calls dax_drv->probe() unconditionally.
+ * So better be safe than sorry and ensure it is provided.
+ */
+ if (!dax_drv->probe)
+ return -EINVAL;
+
INIT_LIST_HEAD(&dax_drv->ids);
drv->owner = module;
drv->name = mod_name;
@@ -1409,7 +1419,15 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
mutex_unlock(&dax_bus_lock);
if (rc)
return rc;
- return driver_register(drv);
+
+ rc = driver_register(drv);
+ if (rc && dax_drv->match_always) {
+ mutex_lock(&dax_bus_lock);
+ match_always_count -= dax_drv->match_always;
+ mutex_unlock(&dax_bus_lock);
+ }
+
+ return rc;
}
EXPORT_SYMBOL_GPL(__dax_driver_register);
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 72b92f95509f..1e946ad7780a 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -39,7 +39,7 @@ struct dax_device_driver {
struct list_head ids;
int match_always;
int (*probe)(struct dev_dax *dev);
- int (*remove)(struct dev_dax *dev);
+ void (*remove)(struct dev_dax *dev);
};
int __dax_driver_register(struct dax_device_driver *dax_drv,
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 5da2980bb16b..db92573c94e8 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -452,15 +452,9 @@ int dev_dax_probe(struct dev_dax *dev_dax)
}
EXPORT_SYMBOL_GPL(dev_dax_probe);
-static int dev_dax_remove(struct dev_dax *dev_dax)
-{
- /* all probe actions are unwound by devm */
- return 0;
-}
-
static struct dax_device_driver device_dax_driver = {
.probe = dev_dax_probe,
- .remove = dev_dax_remove,
+ /* all probe actions are unwound by devm, so .remove isn't necessary */
.match_always = 1,
};
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 403ec42472d1..ac231cc36359 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -136,7 +136,7 @@ err_res_name:
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
+static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
struct device *dev = &dev_dax->dev;
@@ -176,11 +176,9 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
kfree(data);
dev_set_drvdata(dev, NULL);
}
-
- return 0;
}
#else
-static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
+static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
/*
* Without hotremove purposely leak the request_mem_region() for the
@@ -190,7 +188,6 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
* request_mem_region().
*/
any_hotremove_failed = true;
- return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
index 863c114fd88c..d81dc35fd65d 100644
--- a/drivers/dax/pmem/compat.c
+++ b/drivers/dax/pmem/compat.c
@@ -41,10 +41,9 @@ static int dax_pmem_compat_release(struct device *dev, void *data)
return 0;
}
-static int dax_pmem_compat_remove(struct device *dev)
+static void dax_pmem_compat_remove(struct device *dev)
{
device_for_each_child(dev, NULL, dax_pmem_compat_release);
- return 0;
}
static struct nd_device_driver dax_pmem_compat_driver = {
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index cadbd0a1a1ef..5fa6ae9dbc8b 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -480,7 +480,7 @@ static void dax_free_inode(struct inode *inode)
kfree(dax_dev->host);
dax_dev->host = NULL;
if (inode->i_rdev)
- ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+ ida_simple_remove(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 6aa10de792b3..bf3047896e41 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -757,6 +757,9 @@ static void devfreq_dev_release(struct device *dev)
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
+ if (devfreq->opp_table)
+ dev_pm_opp_put_opp_table(devfreq->opp_table);
+
mutex_destroy(&devfreq->lock);
kfree(devfreq);
}
@@ -844,6 +847,10 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
+ devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
+ if (IS_ERR(devfreq->opp_table))
+ devfreq->opp_table = NULL;
+
atomic_set(&devfreq->suspend_count, 0);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
@@ -893,13 +900,13 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_devfreq;
devfreq->nb_min.notifier_call = qos_min_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min,
DEV_PM_QOS_MIN_FREQUENCY);
if (err)
goto err_devfreq;
devfreq->nb_max.notifier_call = qos_max_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max,
DEV_PM_QOS_MAX_FREQUENCY);
if (err)
goto err_devfreq;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index 2a52f97b542d..70f44b3ca42e 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -40,7 +40,7 @@
/*
* Definition of governor attribute flags except for common sysfs attributes
* - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interal sysfs attribute
+ * : Indicate polling_interval sysfs attribute
* - DEVFREQ_GOV_ATTR_TIMER
* : Indicate timer sysfs attribute
*/
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 63332e4a65ae..b094132bd20b 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -19,18 +19,16 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
unsigned long child_freq = ULONG_MAX;
- struct dev_pm_opp *opp;
- int i, count, ret = 0;
+ struct dev_pm_opp *opp, *p_opp;
+ int i, count;
/*
* If the devfreq device with passive governor has the specific method
* to determine the next frequency, should use the get_target_freq()
* of struct devfreq_passive_data.
*/
- if (p_data->get_target_freq) {
- ret = p_data->get_target_freq(devfreq, freq);
- goto out;
- }
+ if (p_data->get_target_freq)
+ return p_data->get_target_freq(devfreq, freq);
/*
* If the parent and passive devfreq device uses the OPP table,
@@ -56,26 +54,35 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
* list of parent device. Because in this case, *freq is temporary
* value which is decided by ondemand governor.
*/
- opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- goto out;
- }
+ if (devfreq->opp_table && parent_devfreq->opp_table) {
+ p_opp = devfreq_recommended_opp(parent_devfreq->dev.parent,
+ freq, 0);
+ if (IS_ERR(p_opp))
+ return PTR_ERR(p_opp);
+
+ opp = dev_pm_opp_xlate_required_opp(parent_devfreq->opp_table,
+ devfreq->opp_table, p_opp);
+ dev_pm_opp_put(p_opp);
- dev_pm_opp_put(opp);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *freq = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+ }
/*
- * Get the OPP table's index of decided freqeuncy by governor
+ * Get the OPP table's index of decided frequency by governor
* of parent device.
*/
for (i = 0; i < parent_devfreq->profile->max_state; i++)
if (parent_devfreq->profile->freq_table[i] == *freq)
break;
- if (i == parent_devfreq->profile->max_state) {
- ret = -EINVAL;
- goto out;
- }
+ if (i == parent_devfreq->profile->max_state)
+ return -EINVAL;
/* Get the suitable frequency by using index of parent device. */
if (i < devfreq->profile->max_state) {
@@ -88,8 +95,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
/* Return the suitable frequency for passive device. */
*freq = child_freq;
-out:
- return ret;
+ return 0;
}
static int devfreq_passive_notifier_call(struct notifier_block *nb,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e912166a993..9e9d3b4c6d48 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -400,7 +400,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
default:
ret = -EINVAL;
goto err_edev;
- };
+ }
no_pmu:
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 117cad7968ab..ce83f883ca65 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -647,7 +647,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
return PTR_ERR(opp);
}
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
@@ -849,7 +849,7 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = dev_pm_opp_of_add_table(&pdev->dev);
+ err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
goto put_hw;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac95..4e16c71c24b7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
This is marked experimental because we don't yet have a consistent
execution context and memory management between drivers.
+config DMABUF_DEBUG
+ bool "DMA-BUF debug checks"
+ default y if DMA_API_DEBUG
+ help
+ This option enables additional checks for DMA-BUF importers and
+ exporters. Specifically it validates that importers do not peek at the
+ underlying struct page when they import a buffer.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e63684d4cd90..f264b70c383e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -480,7 +493,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -496,9 +509,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -584,7 +598,7 @@ err_module:
EXPORT_SYMBOL_GPL(dma_buf_export);
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -608,10 +622,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
EXPORT_SYMBOL_GPL(dma_buf_fd);
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -652,9 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf)
}
EXPORT_SYMBOL_GPL(dma_buf_put);
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+ if (!IS_ERR_OR_NULL(sg_table))
+ mangle_sg_table(sg_table);
+
+ return sg_table;
+}
+
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @importer_ops: [in] importer operations for the attachment
@@ -663,6 +704,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -721,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
goto err_unlock;
}
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
@@ -768,13 +812,24 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -785,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
@@ -805,9 +860,15 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
* dma_buf_pin - Lock down the DMA-buf
- *
* @attach: [in] attachment which should be pinned
*
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
* Returns:
* 0 on success, negative error code on failure.
*/
@@ -816,6 +877,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->pin)
@@ -826,14 +889,19 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_pin);
/**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
* @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
@@ -894,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
}
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ sg_table = __map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -957,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ __unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
@@ -1001,15 +1069,15 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
+ *
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
* void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -1061,11 +1129,12 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* shootdowns would increase the complexity quite a bit.
*
* Interface::
+ *
* int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
* unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -1098,6 +1167,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1108,6 +1182,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1141,6 +1217,8 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
@@ -1199,7 +1277,10 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
*
* Returns 0 on success, or a negative errno code otherwise.
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 7475e09b0680..d64fc03929be 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -312,22 +312,25 @@ void __dma_fence_might_wait(void)
/**
- * dma_fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_timestamp_locked - signal completion of a fence
* @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
- * only be effective the first time.
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
*
- * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
- * held.
+ * Unlike dma_fence_signal_timestamp(), this function must be called with
+ * &dma_fence.lock held.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
-int dma_fence_signal_locked(struct dma_fence *fence)
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp)
{
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
@@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence)
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
- fence->timestamp = ktime_get();
+ fence->timestamp = timestamp;
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
@@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence)
return 0;
}
+EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
+
+/**
+ * dma_fence_signal_timestamp - signal completion of a fence
+ * @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!fence)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_timestamp_locked(fence, timestamp);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal_timestamp);
+
+/**
+ * dma_fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_locked(struct dma_fence *fence)
+{
+ return dma_fence_signal_timestamp_locked(fence, ktime_get());
+}
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
@@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
- ret = dma_fence_signal_locked(fence);
+ ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_end_signalling(tmp);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index afd22c9dbdcf..6b5db954569f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
unsigned int fd_flags,
unsigned int heap_flags)
{
+ struct dma_buf *dmabuf;
+ int fd;
+
/*
* Allocations from all heaps have to begin
* and end on page boundaries.
@@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
if (!len)
return -EINVAL;
- return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, fd_flags);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ }
+ return fd;
}
static int dma_heap_open(struct inode *inode, struct file *file)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 3c4e34301172..5d64eccd21d6 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
buffer->vaddr = NULL;
}
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
@@ -268,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
.release = cma_heap_dma_buf_release,
};
-static int cma_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;
@@ -286,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -345,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
-
- return ret;
+ return dmabuf;
free_pages:
kfree(buffer->pages);
@@ -362,7 +357,7 @@ free_cma:
free_buffer:
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops cma_heap_ops = {
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 17e0e9a68baf..29e49ac17251 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
return NULL;
}
-static int system_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap,
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
goto free_buffer;
+ }
page = alloc_largest_available(size_remaining, max_order);
if (!page)
@@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
- return ret;
+ return dmabuf;
free_pages:
for_each_sgtable_sg(table, sg, i) {
@@ -418,7 +413,7 @@ free_buffer:
__free_pages(page, compound_order(page));
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops system_heap_ops = {
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index e593064341c8..c8a12d7ad71a 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -471,8 +471,11 @@ static int thread_signal_callback(void *arg)
dma_fence_signal(f1);
smp_store_mb(cb.seen, false);
- if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
- miss++, cb.seen = true;
+ if (!f2 ||
+ dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
+ miss++;
+ cb.seen = true;
+ }
if (!t->before)
dma_fence_signal(f1);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d242c7632621..0c2827fd8c19 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,13 +124,6 @@ config BCM_SBA_RAID
has the capability to offload memcpy, xor and pq computation
for raid5/6.
-config COH901318
- bool "ST-Ericsson COH901318 DMA support"
- select DMA_ENGINE
- depends on ARCH_U300 || COMPILE_TEST
- help
- Enable support for ST-Ericsson COH 901 318 DMA.
-
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
depends on ARCH_BCM2835
@@ -179,6 +172,7 @@ config DMA_SUN6I
config DW_AXI_DMAC
tristate "Synopsys DesignWare AXI DMA support"
depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -378,14 +372,14 @@ config MILBEAUT_XDMAC
XDMAC device.
config MMP_PDMA
- bool "MMP PDMA support"
+ tristate "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
config MMP_TDMA
- bool "MMP Two-Channel DMA support"
+ tristate "MMP Two-Channel DMA support"
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select GENERIC_ALLOCATOR
@@ -519,13 +513,6 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
-config SIRF_DMA
- tristate "CSR SiRFprimaII/SiRFmarco DMA support"
- depends on ARCH_SIRF
- select DMA_ENGINE
- help
- Enable support for the CSR SiRFprimaII DMA engine.
-
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
@@ -710,15 +697,6 @@ config XILINX_ZYNQMP_DPDMA
driver provides the dmaengine required by the DisplayPort subsystem
display driver.
-config ZX_DMA
- tristate "ZTE ZX DMA support"
- depends on ARCH_ZX || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support the DMA engine for ZTE ZX family platform devices.
-
-
# driver files
source "drivers/dma/bestcomm/Kconfig"
@@ -740,6 +718,8 @@ source "drivers/dma/ti/Kconfig"
source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+source "drivers/dma/lgm/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 948a8da05f8b..aa69094e3547 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
-obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
@@ -65,7 +64,6 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
@@ -79,9 +77,9 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
-obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
+obj-$(CONFIG_INTEL_LDMA) += lgm/
obj-y += mediatek/
obj-y += qcom/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7eaee5b705b1..30ae36124b1d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -54,6 +54,25 @@ module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
+/**
+ * struct at_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @cap_mask: dma_capability flags supported by the platform
+ */
+struct at_dma_platform_data {
+ unsigned int nr_channels;
+ dma_cap_mask_t cap_mask;
+};
+
+/**
+ * struct at_dma_slave - Controller-specific information about a slave
+ * @dma_dev: required DMA master device
+ * @cfg: Platform-specific initializer for the CFG register
+ */
+struct at_dma_slave {
+ struct device *dma_dev;
+ u32 cfg;
+};
/* prototypes */
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 80fc2fe8c77e..4d1ebc040031 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -7,8 +7,6 @@
#ifndef AT_HDMAC_REGS_H
#define AT_HDMAC_REGS_H
-#include <linux/platform_data/dma-atmel.h>
-
#define AT_DMA_MAX_NR_CHANNELS 8
@@ -148,7 +146,31 @@
#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
/* Bitfields in CFG */
-/* are in at_hdmac.h */
+#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
+
+#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
+#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
+#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
+#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
+#define ATC_SRC_H2SEL_SW (0x0 << 9)
+#define ATC_SRC_H2SEL_HW (0x1 << 9)
+#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
+#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
+#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
+#define ATC_DST_H2SEL_SW (0x0 << 13)
+#define ATC_DST_H2SEL_HW (0x1 << 13)
+#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
+#define ATC_SOD (0x1 << 16) /* Stop On Done */
+#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
+#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
+#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
+#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
+#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
+#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
+#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
+#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
+#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
+#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
/* Bitfields in SPIP */
#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
deleted file mode 100644
index 95b9b2f5358e..000000000000
--- a/drivers/dma/coh901318.c
+++ /dev/null
@@ -1,2808 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * driver/dma/coh901318.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * DMA driver for COH 901 318
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h> /* printk() */
-#include <linux/fs.h> /* everything... */
-#include <linux/scatterlist.h>
-#include <linux/slab.h> /* kmalloc() */
-#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/irqreturn.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/platform_data/dma-coh901318.h>
-#include <linux/of_dma.h>
-
-#include "coh901318.h"
-#include "dmaengine.h"
-
-#define COH901318_MOD32_MASK (0x1F)
-#define COH901318_WORD_MASK (0xFFFFFFFF)
-/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
-#define COH901318_INT_STATUS1 (0x0000)
-#define COH901318_INT_STATUS2 (0x0004)
-/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_TC_INT_STATUS1 (0x0008)
-#define COH901318_TC_INT_STATUS2 (0x000C)
-/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_TC_INT_CLEAR1 (0x0010)
-#define COH901318_TC_INT_CLEAR2 (0x0014)
-/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
-#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
-/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
-#define COH901318_BE_INT_STATUS1 (0x0020)
-#define COH901318_BE_INT_STATUS2 (0x0024)
-/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_BE_INT_CLEAR1 (0x0028)
-#define COH901318_BE_INT_CLEAR2 (0x002C)
-/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
-#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
-
-/*
- * CX_CFG - Channel Configuration Registers 32bit (R/W)
- */
-#define COH901318_CX_CFG (0x0100)
-#define COH901318_CX_CFG_SPACING (0x04)
-/* Channel enable activates tha dma job */
-#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
-#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
-/* Request Mode */
-#define COH901318_CX_CFG_RM_MASK (0x00000006)
-#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
-#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
-#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
-/* Linked channel request field. RM must == 11 */
-#define COH901318_CX_CFG_LCRF_SHIFT 3
-#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
-#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
-/* Terminal Counter Interrupt Request Mask */
-#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
-#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
-/* Bus Error interrupt Mask */
-#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
-#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
-
-/*
- * CX_STAT - Channel Status Registers 32bit (R/-)
- */
-#define COH901318_CX_STAT (0x0200)
-#define COH901318_CX_STAT_SPACING (0x04)
-#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
-#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
-#define COH901318_CX_STAT_ACTIVE (0x00000002)
-#define COH901318_CX_STAT_ENABLED (0x00000001)
-
-/*
- * CX_CTRL - Channel Control Registers 32bit (R/W)
- */
-#define COH901318_CX_CTRL (0x0400)
-#define COH901318_CX_CTRL_SPACING (0x10)
-/* Transfer Count Enable */
-#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
-#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
-/* Transfer Count Value 0 - 4095 */
-#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
-/* Burst count */
-#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
-#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
-/* Source bus size */
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
-/* Source address increment */
-#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
-#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
-/* Destination Bus Size */
-#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
-/* Destination address increment */
-#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
-#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
-/* Master Mode (Master2 is only connected to MSL) */
-#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
-#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
-/* Terminal Count flag to PER enable */
-#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
-#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
-/* Terminal Count flags to CPU enable */
-#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
-#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
-/* Hand shake to peripheral */
-#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
-#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
-#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
-#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
-/* DMA mode */
-#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
-#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
-/* Primary Request Data Destination */
-#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
-#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
-#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
-
-/*
- * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
- */
-#define COH901318_CX_SRC_ADDR (0x0404)
-#define COH901318_CX_SRC_ADDR_SPACING (0x10)
-
-/*
- * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
- */
-#define COH901318_CX_DST_ADDR (0x0408)
-#define COH901318_CX_DST_ADDR_SPACING (0x10)
-
-/*
- * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
- */
-#define COH901318_CX_LNK_ADDR (0x040C)
-#define COH901318_CX_LNK_ADDR_SPACING (0x10)
-#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
-
-/**
- * struct coh901318_params - parameters for DMAC configuration
- * @config: DMA config register
- * @ctrl_lli_last: DMA control register for the last lli in the list
- * @ctrl_lli: DMA control register for an lli
- * @ctrl_lli_chained: DMA control register for a chained lli
- */
-struct coh901318_params {
- u32 config;
- u32 ctrl_lli_last;
- u32 ctrl_lli;
- u32 ctrl_lli_chained;
-};
-
-/**
- * struct coh_dma_channel - dma channel base
- * @name: ascii name of dma channel
- * @number: channel id number
- * @desc_nbr_max: number of preallocated descriptors
- * @priority_high: prio of channel, 0 low otherwise high.
- * @param: configuration parameters
- */
-struct coh_dma_channel {
- const char name[32];
- const int number;
- const int desc_nbr_max;
- const int priority_high;
- const struct coh901318_params param;
-};
-
-/**
- * struct powersave - DMA power save structure
- * @lock: lock protecting data in this struct
- * @started_channels: bit mask indicating active dma channels
- */
-struct powersave {
- spinlock_t lock;
- u64 started_channels;
-};
-
-/* points out all dma slave channels.
- * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
- * Select all channels from A to B, end of list is marked with -1,-1
- */
-static int dma_slave_channels[] = {
- U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
- U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
-
-/* points out all dma memcpy channels. */
-static int dma_memcpy_channels[] = {
- U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
-
-#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
- COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
- COH901318_CX_CFG_LCR_DISABLE | \
- COH901318_CX_CFG_TC_IRQ_ENABLE | \
- COH901318_CX_CFG_BE_IRQ_ENABLE)
-#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_DISABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_DISABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_ENABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-
-static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
- {
- .number = U300_DMA_MSL_TX_0,
- .name = "MSL TX 0",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_TX_1,
- .name = "MSL TX 1",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_2,
- .name = "MSL TX 2",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .desc_nbr_max = 10,
- },
- {
- .number = U300_DMA_MSL_TX_3,
- .name = "MSL TX 3",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_4,
- .name = "MSL TX 4",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_5,
- .name = "MSL TX 5",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_TX_6,
- .name = "MSL TX 6",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_RX_0,
- .name = "MSL RX 0",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_RX_1,
- .name = "MSL RX 1",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_2,
- .name = "MSL RX 2",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_3,
- .name = "MSL RX 3",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_4,
- .name = "MSL RX 4",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_5,
- .name = "MSL RX 5",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_6,
- .name = "MSL RX 6",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_MMCSD_RX_TX,
- .name = "MMCSD RX TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
-
- },
- {
- .number = U300_DMA_MSPRO_TX,
- .name = "MSPRO TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSPRO_RX,
- .name = "MSPRO RX",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_UART0_TX,
- .name = "UART0 TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_UART0_RX,
- .name = "UART0 RX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_APEX_TX,
- .name = "APEX TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_APEX_RX,
- .name = "APEX RX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_PCM_I2S0_TX,
- .name = "PCM I2S0 TX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_PCM_I2S0_RX,
- .name = "PCM I2S0 RX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_PCM_I2S1_TX,
- .name = "PCM I2S1 TX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_PCM_I2S1_RX,
- .name = "PCM I2S1 RX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_XGAM_CDI,
- .name = "XGAM CDI",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_XGAM_PDI,
- .name = "XGAM PDI",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_SPI_TX,
- .name = "SPI TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_SPI_RX,
- .name = "SPI RX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
-
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_0,
- .name = "GENERAL 00",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_1,
- .name = "GENERAL 01",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_2,
- .name = "GENERAL 02",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_3,
- .name = "GENERAL 03",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_4,
- .name = "GENERAL 04",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_5,
- .name = "GENERAL 05",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_6,
- .name = "GENERAL 06",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_7,
- .name = "GENERAL 07",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_8,
- .name = "GENERAL 08",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_UART1_TX,
- .name = "UART1 TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_UART1_RX,
- .name = "UART1 RX",
- .priority_high = 0,
- }
-};
-
-#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
-
-#ifdef VERBOSE_DEBUG
-#define COH_DBG(x) ({ if (1) x; 0; })
-#else
-#define COH_DBG(x) ({ if (0) x; 0; })
-#endif
-
-struct coh901318_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
- struct scatterlist *sg;
- unsigned int sg_len;
- struct coh901318_lli *lli;
- enum dma_transfer_direction dir;
- unsigned long flags;
- u32 head_config;
- u32 head_ctrl;
-};
-
-struct coh901318_base {
- struct device *dev;
- void __iomem *virtbase;
- unsigned int irq;
- struct coh901318_pool pool;
- struct powersave pm;
- struct dma_device dma_slave;
- struct dma_device dma_memcpy;
- struct coh901318_chan *chans;
-};
-
-struct coh901318_chan {
- spinlock_t lock;
- int allocated;
- int id;
- int stopped;
-
- struct work_struct free_work;
- struct dma_chan chan;
-
- struct tasklet_struct tasklet;
-
- struct list_head active;
- struct list_head queue;
- struct list_head free;
-
- unsigned long nbr_active_done;
- unsigned long busy;
-
- struct dma_slave_config config;
- u32 addr;
- u32 ctrl;
-
- struct coh901318_base *base;
-};
-
-static void coh901318_list_print(struct coh901318_chan *cohc,
- struct coh901318_lli *lli)
-{
- struct coh901318_lli *l = lli;
- int i = 0;
-
- while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad"
- ", dst %pad, link %pad virt_link_addr 0x%p\n",
- i, l, l->control, &l->src_addr, &l->dst_addr,
- &l->link_addr, l->virt_link_addr);
- i++;
- l = l->virt_link_addr;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
-
-static struct coh901318_base *debugfs_dma_base;
-static struct dentry *dma_dentry;
-
-static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *f_pos)
-{
- u64 started_channels = debugfs_dma_base->pm.started_channels;
- int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
- char *dev_buf;
- char *tmp;
- int ret;
- int i;
-
- dev_buf = kmalloc(4*1024, GFP_KERNEL);
- if (dev_buf == NULL)
- return -ENOMEM;
- tmp = dev_buf;
-
- tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
-
- for (i = 0; i < U300_DMA_CHANNELS; i++) {
- if (started_channels & (1ULL << i))
- tmp += sprintf(tmp, "channel %d\n", i);
- }
-
- tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
-
- ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
- tmp - dev_buf);
- kfree(dev_buf);
- return ret;
-}
-
-static const struct file_operations coh901318_debugfs_status_operations = {
- .open = simple_open,
- .read = coh901318_debugfs_read,
- .llseek = default_llseek,
-};
-
-
-static int __init init_coh901318_debugfs(void)
-{
-
- dma_dentry = debugfs_create_dir("dma", NULL);
-
- debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL,
- &coh901318_debugfs_status_operations);
- return 0;
-}
-
-static void __exit exit_coh901318_debugfs(void)
-{
- debugfs_remove_recursive(dma_dentry);
-}
-
-module_init(init_coh901318_debugfs);
-module_exit(exit_coh901318_debugfs);
-#else
-
-#define COH901318_DEBUGFS_ASSIGN(x, y)
-
-#endif /* CONFIG_DEBUG_FS */
-
-static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct coh901318_chan, chan);
-}
-
-static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config,
- enum dma_transfer_direction direction);
-
-static inline const struct coh901318_params *
-cohc_chan_param(struct coh901318_chan *cohc)
-{
- return &chan_config[cohc->id].param;
-}
-
-static inline const struct coh_dma_channel *
-cohc_chan_conf(struct coh901318_chan *cohc)
-{
- return &chan_config[cohc->id];
-}
-
-static void enable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- pm->started_channels &= ~(1ULL << cohc->id);
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-static void disable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- pm->started_channels |= (1ULL << cohc->id);
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-
-static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(control,
- virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
- return 0;
-}
-
-static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(conf,
- virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- return 0;
-}
-
-
-static int coh901318_start(struct coh901318_chan *cohc)
-{
- u32 val;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- disable_powersave(cohc);
-
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Enable channel */
- val |= COH901318_CX_CFG_CH_ENABLE;
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- return 0;
-}
-
-static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *lli)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- BUG_ON(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*channel) &
- COH901318_CX_STAT_ACTIVE);
-
- writel(lli->src_addr,
- virtbase + COH901318_CX_SRC_ADDR +
- COH901318_CX_SRC_ADDR_SPACING * channel);
-
- writel(lli->dst_addr, virtbase +
- COH901318_CX_DST_ADDR +
- COH901318_CX_DST_ADDR_SPACING * channel);
-
- writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
- COH901318_CX_LNK_ADDR_SPACING * channel);
-
- writel(lli->control, virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
-
- return 0;
-}
-
-static struct coh901318_desc *
-coh901318_desc_get(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *desc;
-
- if (list_empty(&cohc->free)) {
- /* alloc new desc because we're out of used ones
- * TODO: alloc a pile of descs instead of just one,
- * avoid many small allocations.
- */
- desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
- if (desc == NULL)
- goto out;
- INIT_LIST_HEAD(&desc->node);
- dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
- } else {
- /* Reuse an old desc. */
- desc = list_first_entry(&cohc->free,
- struct coh901318_desc,
- node);
- list_del(&desc->node);
- /* Initialize it a bit so it's not insane */
- desc->sg = NULL;
- desc->sg_len = 0;
- desc->desc.callback = NULL;
- desc->desc.callback_param = NULL;
- }
-
- out:
- return desc;
-}
-
-static void
-coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
-{
- list_add_tail(&cohd->node, &cohc->free);
-}
-
-/* call with irq lock held */
-static void
-coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->active);
-}
-
-static struct coh901318_desc *
-coh901318_first_active_get(struct coh901318_chan *cohc)
-{
- return list_first_entry_or_null(&cohc->active, struct coh901318_desc,
- node);
-}
-
-static void
-coh901318_desc_remove(struct coh901318_desc *cohd)
-{
- list_del(&cohd->node);
-}
-
-static void
-coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->queue);
-}
-
-static struct coh901318_desc *
-coh901318_first_queued(struct coh901318_chan *cohc)
-{
- return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
- node);
-}
-
-static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
-{
- struct coh901318_lli *lli = in_lli;
- u32 bytes = 0;
-
- while (lli) {
- bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
- lli = lli->virt_link_addr;
- }
- return bytes;
-}
-
-/*
- * Get the number of bytes left to transfer on this channel,
- * it is unwise to call this before stopping the channel for
- * absolute measures, but for a rough guess you can still call
- * it.
- */
-static u32 coh901318_get_bytes_left(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_desc *cohd;
- struct list_head *pos;
- unsigned long flags;
- u32 left = 0;
- int i = 0;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * If there are many queued jobs, we iterate and add the
- * size of them all. We take a special look on the first
- * job though, since it is probably active.
- */
- list_for_each(pos, &cohc->active) {
- /*
- * The first job in the list will be working on the
- * hardware. The job can be stopped but still active,
- * so that the transfer counter is somewhere inside
- * the buffer.
- */
- cohd = list_entry(pos, struct coh901318_desc, node);
-
- if (i == 0) {
- struct coh901318_lli *lli;
- dma_addr_t ladd;
-
- /* Read current transfer count value */
- left = readl(cohc->base->virtbase +
- COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * cohc->id) &
- COH901318_CX_CTRL_TC_VALUE_MASK;
-
- /* See if the transfer is linked... */
- ladd = readl(cohc->base->virtbase +
- COH901318_CX_LNK_ADDR +
- COH901318_CX_LNK_ADDR_SPACING *
- cohc->id) &
- ~COH901318_CX_LNK_LINK_IMMEDIATE;
- /* Single transaction */
- if (!ladd)
- continue;
-
- /*
- * Linked transaction, follow the lli, find the
- * currently processing lli, and proceed to the next
- */
- lli = cohd->lli;
- while (lli && lli->link_addr != ladd)
- lli = lli->virt_link_addr;
-
- if (lli)
- lli = lli->virt_link_addr;
-
- /*
- * Follow remaining lli links around to count the total
- * number of bytes left
- */
- left += coh901318_get_bytes_in_lli(lli);
- } else {
- left += coh901318_get_bytes_in_lli(cohd->lli);
- }
- i++;
- }
-
- /* Also count bytes in the queued jobs */
- list_for_each(pos, &cohc->queue) {
- cohd = list_entry(pos, struct coh901318_desc, node);
- left += coh901318_get_bytes_in_lli(cohd->lli);
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return left;
-}
-
-/*
- * Pauses a transfer without losing data. Enables power save.
- * Use this function in conjunction with coh901318_resume.
- */
-static int coh901318_pause(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable channel in HW */
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Stopping infinite transfer */
- if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
- (val & COH901318_CX_CFG_CH_ENABLE))
- cohc->stopped = 1;
-
-
- val &= ~COH901318_CX_CFG_CH_ENABLE;
- /* Enable twice, HW bug work around */
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Spin-wait for it to actually go inactive */
- while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
- channel) & COH901318_CX_STAT_ACTIVE)
- cpu_relax();
-
- /* Check if we stopped an active job */
- if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
- cohc->stopped = 1;
-
- enable_powersave(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
- return 0;
-}
-
-/* Resumes a transfer that has been stopped via 300_dma_stop(..).
- Power save is handled.
-*/
-static int coh901318_resume(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- disable_powersave(cohc);
-
- if (cohc->stopped) {
- /* Enable channel in HW */
- val = readl(cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- val |= COH901318_CX_CFG_CH_ENABLE;
-
- writel(val, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
-
- cohc->stopped = 0;
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
- return 0;
-}
-
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned long ch_nr = (unsigned long) chan_id;
-
- if (ch_nr == to_coh901318_chan(chan)->id)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(coh901318_filter_id);
-
-struct coh901318_filter_args {
- struct coh901318_base *base;
- unsigned int ch_nr;
-};
-
-static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data)
-{
- struct coh901318_filter_args *args = data;
-
- if (&args->base->dma_slave == chan->device &&
- args->ch_nr == to_coh901318_chan(chan)->id)
- return true;
-
- return false;
-}
-
-static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct coh901318_filter_args args = {
- .base = ofdma->of_dma_data,
- .ch_nr = dma_spec->args[0],
- };
- dma_cap_mask_t cap;
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- return dma_request_channel(cap, coh901318_filter_base_and_id, &args);
-}
-/*
- * DMA channel allocation
- */
-static int coh901318_config(struct coh901318_chan *cohc,
- struct coh901318_params *param)
-{
- const struct coh901318_params *p;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- if (param)
- p = param;
- else
- p = cohc_chan_param(cohc);
-
- /* Clear any pending BE or TC interrupt */
- if (channel < 32) {
- writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (channel - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (channel - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- coh901318_set_conf(cohc, p->config);
- coh901318_set_ctrl(cohc, p->ctrl_lli_last);
-
- return 0;
-}
-
-/* must lock when calling this function
- * start queued jobs, if any
- * TODO: start all queued jobs in one go
- *
- * Returns descriptor if queued job is started otherwise NULL.
- * If the queue is empty NULL is returned.
- */
-static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *cohd;
-
- /*
- * start queued jobs, if any
- * TODO: transmit all queued jobs in one go
- */
- cohd = coh901318_first_queued(cohc);
-
- if (cohd != NULL) {
- /* Remove from queue */
- coh901318_desc_remove(cohd);
- /* initiate DMA job */
- cohc->busy = 1;
-
- coh901318_desc_submit(cohc, cohd);
-
- /* Program the transaction head */
- coh901318_set_conf(cohc, cohd->head_config);
- coh901318_set_ctrl(cohc, cohd->head_ctrl);
- coh901318_prep_linked_list(cohc, cohd->lli);
-
- /* start dma job on this channel */
- coh901318_start(cohc);
-
- }
-
- return cohd;
-}
-
-/*
- * This tasklet is called from the interrupt handler to
- * handle each descriptor (DMA job) that is sent to a channel.
- */
-static void dma_tasklet(struct tasklet_struct *t)
-{
- struct coh901318_chan *cohc = from_tasklet(cohc, t, tasklet);
- struct coh901318_desc *cohd_fin;
- unsigned long flags;
- struct dmaengine_desc_callback cb;
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->nbr_active_done);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* get first active descriptor entry from list */
- cohd_fin = coh901318_first_active_get(cohc);
-
- if (cohd_fin == NULL)
- goto err;
-
- /* locate callback to client */
- dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
-
- /* sign this job as completed on the channel */
- dma_cookie_complete(&cohd_fin->desc);
-
- /* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd_fin);
- coh901318_desc_free(cohc, cohd_fin);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- /* Call the callback when we're done */
- dmaengine_desc_callback_invoke(&cb, NULL);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * If another interrupt fired while the tasklet was scheduling,
- * we don't get called twice, so we have this number of active
- * counter that keep track of the number of IRQs expected to
- * be handled for this channel. If there happen to be more than
- * one IRQ to be ack:ed, we simply schedule this tasklet again.
- */
- cohc->nbr_active_done--;
- if (cohc->nbr_active_done) {
- dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
- "came in while we were scheduling this tasklet\n");
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return;
-
- err:
- spin_unlock_irqrestore(&cohc->lock, flags);
- dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
-}
-
-
-/* called from interrupt context */
-static void dma_tc_handle(struct coh901318_chan *cohc)
-{
- /*
- * If the channel is not allocated, then we shouldn't have
- * any TC interrupts on it.
- */
- if (!cohc->allocated) {
- dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
- "unallocated channel\n");
- return;
- }
-
- /*
- * When we reach this point, at least one queue item
- * should have been moved over from cohc->queue to
- * cohc->active and run to completion, that is why we're
- * getting a terminal count interrupt is it not?
- * If you get this BUG() the most probable cause is that
- * the individual nodes in the lli chain have IRQ enabled,
- * so check your platform config for lli chain ctrl.
- */
- BUG_ON(list_empty(&cohc->active));
-
- cohc->nbr_active_done++;
-
- /*
- * This attempt to take a job from cohc->queue, put it
- * into cohc->active and start it.
- */
- if (coh901318_queue_start(cohc) == NULL)
- cohc->busy = 0;
-
- /*
- * This tasklet will remove items from cohc->active
- * and thus terminates them.
- */
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
-}
-
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- u32 status1;
- u32 status2;
- int i;
- int ch;
- struct coh901318_base *base = dev_id;
- struct coh901318_chan *cohc;
- void __iomem *virtbase = base->virtbase;
-
- status1 = readl(virtbase + COH901318_INT_STATUS1);
- status2 = readl(virtbase + COH901318_INT_STATUS2);
-
- if (unlikely(status1 == 0 && status2 == 0)) {
- dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
- return IRQ_HANDLED;
- }
-
- /* TODO: consider handle IRQ in tasklet here to
- * minimize interrupt latency */
-
- /* Check the first 32 DMA channels for IRQ */
- while (status1) {
- /* Find first bit set, return as a number. */
- i = ffs(status1) - 1;
- ch = i;
-
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status1 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- BUG_ON(1);
- /* Clear BE interrupt */
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS1))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
-
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initiate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- /* Check the remaining 32 DMA channels for IRQ */
- while (status2) {
- /* Find first bit set, return as a number. */
- i = ffs(status2) - 1;
- ch = i + 32;
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status2 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- /* Clear BE interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS2))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
- BUG_ON(1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initiate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- return IRQ_HANDLED;
-}
-
-static int coh901318_terminate_all(struct dma_chan *chan)
-{
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_desc *cohd;
- void __iomem *virtbase = cohc->base->virtbase;
-
- /* The remainder of this function terminates the transfer */
- coh901318_pause(chan);
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Clear any pending BE or TC interrupt */
- if (cohc->id < 32) {
- writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- enable_powersave(cohc);
-
- while ((cohd = coh901318_first_active_get(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
- }
-
- while ((cohd = coh901318_first_queued(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
- }
-
-
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return 0;
-}
-
-static int coh901318_alloc_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- unsigned long flags;
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
- __func__, cohc->id);
-
- if (chan->client_count > 1)
- return -EBUSY;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- coh901318_config(cohc, NULL);
-
- cohc->allocated = 1;
- dma_cookie_init(chan);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return 1;
-}
-
-static void
-coh901318_free_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable HW */
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING*channel);
-
- cohc->allocated = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- coh901318_terminate_all(chan);
-}
-
-
-static dma_cookie_t
-coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
- desc);
- struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&cohc->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- coh901318_desc_queue(cohc, cohd);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return cookie;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t size, unsigned long flags)
-{
- struct coh901318_lli *lli;
- struct coh901318_desc *cohd;
- unsigned long flg;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int lli_len;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
- int ret;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src %pad dest %pad size %zu\n",
- __func__, cohc->id, &src, &dest, size);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- lli_len++;
-
- lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
-
- if (lli == NULL)
- goto err;
-
- ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, lli, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- if (ret)
- goto err;
-
- COH_DBG(coh901318_list_print(cohc, lli));
-
- /* Pick a descriptor to handle this transfer */
- cohd = coh901318_desc_get(cohc);
- cohd->lli = lli;
- cohd->flags = flags;
- cohd->desc.tx_submit = coh901318_tx_submit;
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err:
- spin_unlock_irqrestore(&cohc->lock, flg);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *lli;
- struct coh901318_desc *cohd;
- const struct coh901318_params *params;
- struct scatterlist *sg;
- int len = 0;
- int size;
- int i;
- u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
- u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
- u32 config;
- unsigned long flg;
- int ret;
-
- if (!sgl)
- goto out;
- if (sg_dma_len(sgl) == 0)
- goto out;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
- __func__, sg_len, direction);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- params = cohc_chan_param(cohc);
- config = params->config;
- /*
- * Add runtime-specific control on top, make
- * sure the bits you set per peripheral channel are
- * cleared in the default config from the platform.
- */
- ctrl_chained |= cohc->ctrl;
- ctrl_last |= cohc->ctrl;
- ctrl |= cohc->ctrl;
-
- if (direction == DMA_MEM_TO_DEV) {
- u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
-
- config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
- ctrl_chained |= tx_flags;
- ctrl_last |= tx_flags;
- ctrl |= tx_flags;
- } else if (direction == DMA_DEV_TO_MEM) {
- u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
-
- config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
- ctrl_chained |= rx_flags;
- ctrl_last |= rx_flags;
- ctrl |= rx_flags;
- } else
- goto err_direction;
-
- /* The dma only supports transmitting packages up to
- * MAX_DMA_PACKET_SIZE. Calculate to total number of
- * dma elemts required to send the entire sg list
- */
- for_each_sg(sgl, sg, sg_len, i) {
- unsigned int factor;
- size = sg_dma_len(sg);
-
- if (size <= MAX_DMA_PACKET_SIZE) {
- len++;
- continue;
- }
-
- factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- factor++;
-
- len += factor;
- }
-
- pr_debug("Allocate %d lli:s for this transfer\n", len);
- lli = coh901318_lli_alloc(&cohc->base->pool, len);
-
- if (lli == NULL)
- goto err_dma_alloc;
-
- coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
-
- /* initiate allocated lli list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc->addr,
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- if (ret)
- goto err_lli_fill;
-
-
- COH_DBG(coh901318_list_print(cohc, lli));
-
- /* Pick a descriptor to handle this transfer */
- cohd = coh901318_desc_get(cohc);
- cohd->head_config = config;
- /*
- * Set the default head ctrl for the channel to the one from the
- * lli, things may have changed due to odd buffer alignment
- * etc.
- */
- cohd->head_ctrl = lli->control;
- cohd->dir = direction;
- cohd->flags = flags;
- cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->lli = lli;
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err_lli_fill:
- err_dma_alloc:
- err_direction:
- spin_unlock_irqrestore(&cohc->lock, flg);
- out:
- return NULL;
-}
-
-static enum dma_status
-coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- enum dma_status ret;
-
- ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
- return ret;
-
- dma_set_residue(txstate, coh901318_get_bytes_left(chan));
-
- if (ret == DMA_IN_PROGRESS && cohc->stopped)
- ret = DMA_PAUSED;
-
- return ret;
-}
-
-static void
-coh901318_issue_pending(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * Busy means that pending jobs are already being processed,
- * and then there is no point in starting the queue: the
- * terminal count interrupt on the channel will take the next
- * job on the queue and execute it anyway.
- */
- if (!cohc->busy)
- coh901318_queue_start(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-
-/*
- * Here we wrap in the runtime dma control interface
- */
-struct burst_table {
- int burst_8bit;
- int burst_16bit;
- int burst_32bit;
- u32 reg;
-};
-
-static const struct burst_table burst_sizes[] = {
- {
- .burst_8bit = 64,
- .burst_16bit = 32,
- .burst_32bit = 16,
- .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
- },
- {
- .burst_8bit = 48,
- .burst_16bit = 24,
- .burst_32bit = 12,
- .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
- },
- {
- .burst_8bit = 32,
- .burst_16bit = 16,
- .burst_32bit = 8,
- .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
- },
- {
- .burst_8bit = 16,
- .burst_16bit = 8,
- .burst_32bit = 4,
- .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
- },
- {
- .burst_8bit = 8,
- .burst_16bit = 4,
- .burst_32bit = 2,
- .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
- },
- {
- .burst_8bit = 4,
- .burst_16bit = 2,
- .burst_32bit = 1,
- .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
- },
- {
- .burst_8bit = 2,
- .burst_16bit = 1,
- .burst_32bit = 0,
- .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
- },
- {
- .burst_8bit = 1,
- .burst_16bit = 0,
- .burst_32bit = 0,
- .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
- },
-};
-
-static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config,
- enum dma_transfer_direction direction)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- dma_addr_t addr;
- enum dma_slave_buswidth addr_width;
- u32 maxburst;
- u32 ctrl = 0;
- int i = 0;
-
- /* We only support mem to per or per to mem transfers */
- if (direction == DMA_DEV_TO_MEM) {
- addr = config->src_addr;
- addr_width = config->src_addr_width;
- maxburst = config->src_maxburst;
- } else if (direction == DMA_MEM_TO_DEV) {
- addr = config->dst_addr;
- addr_width = config->dst_addr_width;
- maxburst = config->dst_maxburst;
- } else {
- dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
- return -EINVAL;
- }
-
- dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
- addr_width);
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_8bit <= maxburst)
- break;
- i++;
- }
-
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_16bit <= maxburst)
- break;
- i++;
- }
-
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- /* Direction doesn't matter here, it's 32/32 bits */
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_32bit <= maxburst)
- break;
- i++;
- }
-
- break;
- default:
- dev_err(COHC_2_DEV(cohc),
- "bad runtimeconfig: alien address width\n");
- return -EINVAL;
- }
-
- ctrl |= burst_sizes[i].reg;
- dev_dbg(COHC_2_DEV(cohc),
- "selected burst size %d bytes for address width %d bytes, maxburst %d\n",
- burst_sizes[i].burst_8bit, addr_width, maxburst);
-
- cohc->addr = addr;
- cohc->ctrl = ctrl;
-
- return 0;
-}
-
-static int coh901318_dma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
-
- memcpy(&cohc->config, config, sizeof(*config));
-
- return 0;
-}
-
-static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
- struct coh901318_base *base)
-{
- int chans_i;
- int i = 0;
- struct coh901318_chan *cohc;
-
- INIT_LIST_HEAD(&dma->channels);
-
- for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
- for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
- cohc = &base->chans[i];
-
- cohc->base = base;
- cohc->chan.device = dma;
- cohc->id = i;
-
- /* TODO: do we really need this lock if only one
- * client is connected to each channel?
- */
-
- spin_lock_init(&cohc->lock);
-
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
- INIT_LIST_HEAD(&cohc->free);
- INIT_LIST_HEAD(&cohc->active);
- INIT_LIST_HEAD(&cohc->queue);
-
- tasklet_setup(&cohc->tasklet, dma_tasklet);
-
- list_add_tail(&cohc->chan.device_node,
- &dma->channels);
- }
- }
-}
-
-static int __init coh901318_probe(struct platform_device *pdev)
-{
- int err = 0;
- struct coh901318_base *base;
- int irq;
- struct resource *io;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -ENODEV;
-
- /* Map DMA controller registers to virtual memory */
- if (devm_request_mem_region(&pdev->dev,
- io->start,
- resource_size(io),
- pdev->dev.driver->name) == NULL)
- return -ENOMEM;
-
- base = devm_kzalloc(&pdev->dev,
- ALIGN(sizeof(struct coh901318_base), 4) +
- U300_DMA_CHANNELS *
- sizeof(struct coh901318_chan),
- GFP_KERNEL);
- if (!base)
- return -ENOMEM;
-
- base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
-
- base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
- if (!base->virtbase)
- return -ENOMEM;
-
- base->dev = &pdev->dev;
- spin_lock_init(&base->pm.lock);
- base->pm.started_channels = 0;
-
- COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
- "coh901318", base);
- if (err)
- return err;
-
- base->irq = irq;
-
- err = coh901318_pool_create(&base->pool, &pdev->dev,
- sizeof(struct coh901318_lli),
- 32);
- if (err)
- return err;
-
- /* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, dma_slave_channels,
- base);
-
- dma_cap_zero(base->dma_slave.cap_mask);
- dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
-
- base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_tx_status = coh901318_tx_status;
- base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_config = coh901318_dma_slave_config;
- base->dma_slave.device_pause = coh901318_pause;
- base->dma_slave.device_resume = coh901318_resume;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
- base->dma_slave.dev = &pdev->dev;
-
- err = dma_async_device_register(&base->dma_slave);
-
- if (err)
- goto err_register_slave;
-
- /* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
- base);
-
- dma_cap_zero(base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_tx_status = coh901318_tx_status;
- base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_config = coh901318_dma_slave_config;
- base->dma_memcpy.device_pause = coh901318_pause;
- base->dma_memcpy.device_resume = coh901318_resume;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
- base->dma_memcpy.dev = &pdev->dev;
- /*
- * This controller can only access address at even 32bit boundaries,
- * i.e. 2^2
- */
- base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
- err = dma_async_device_register(&base->dma_memcpy);
-
- if (err)
- goto err_register_memcpy;
-
- err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate,
- base);
- if (err)
- goto err_register_of_dma;
-
- platform_set_drvdata(pdev, base);
- dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n",
- base->virtbase);
-
- return err;
-
- err_register_of_dma:
- dma_async_device_unregister(&base->dma_memcpy);
- err_register_memcpy:
- dma_async_device_unregister(&base->dma_slave);
- err_register_slave:
- coh901318_pool_destroy(&base->pool);
- return err;
-}
-static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
-{
- int chans_i;
- int i = 0;
- struct coh901318_chan *cohc;
-
- for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
- for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
- cohc = &base->chans[i];
-
- tasklet_kill(&cohc->tasklet);
- }
- }
-
-}
-
-static int coh901318_remove(struct platform_device *pdev)
-{
- struct coh901318_base *base = platform_get_drvdata(pdev);
-
- devm_free_irq(&pdev->dev, base->irq, base);
-
- coh901318_base_remove(base, dma_slave_channels);
- coh901318_base_remove(base, dma_memcpy_channels);
-
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&base->dma_memcpy);
- dma_async_device_unregister(&base->dma_slave);
- coh901318_pool_destroy(&base->pool);
- return 0;
-}
-
-static const struct of_device_id coh901318_dt_match[] = {
- { .compatible = "stericsson,coh901318" },
- {},
-};
-
-static struct platform_driver coh901318_driver = {
- .remove = coh901318_remove,
- .driver = {
- .name = "coh901318",
- .of_match_table = coh901318_dt_match,
- },
-};
-
-static int __init coh901318_init(void)
-{
- return platform_driver_probe(&coh901318_driver, coh901318_probe);
-}
-subsys_initcall(coh901318_init);
-
-static void __exit coh901318_exit(void)
-{
- platform_driver_unregister(&coh901318_driver);
-}
-module_exit(coh901318_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Per Friden");
diff --git a/drivers/dma/coh901318.h b/drivers/dma/coh901318.h
deleted file mode 100644
index bbf533600558..000000000000
--- a/drivers/dma/coh901318.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2013 ST-Ericsson
- * DMA driver for COH 901 318
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#ifndef COH901318_H
-#define COH901318_H
-
-#define MAX_DMA_PACKET_SIZE_SHIFT 11
-#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
-
-struct device;
-
-struct coh901318_pool {
- spinlock_t lock;
- struct dma_pool *dmapool;
- struct device *dev;
-
-#ifdef CONFIG_DEBUG_FS
- int debugfs_pool_counter;
-#endif
-};
-
-/**
- * struct coh901318_lli - linked list item for DMAC
- * @control: control settings for DMAC
- * @src_addr: transfer source address
- * @dst_addr: transfer destination address
- * @link_addr: physical address to next lli
- * @virt_link_addr: virtual address of next lli (only used by pool_free)
- * @phy_this: physical address of current lli (only used by pool_free)
- */
-struct coh901318_lli {
- u32 control;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- dma_addr_t link_addr;
-
- void *virt_link_addr;
- dma_addr_t phy_this;
-};
-
-/**
- * coh901318_pool_create() - Creates an dma pool for lli:s
- * @pool: pool handle
- * @dev: dma device
- * @lli_nbr: number of lli:s in the pool
- * @algin: address alignemtn of lli:s
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t lli_nbr, size_t align);
-
-/**
- * coh901318_pool_destroy() - Destroys the dma pool
- * @pool: pool handle
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_destroy(struct coh901318_pool *pool);
-
-/**
- * coh901318_lli_alloc() - Allocates a linked list
- *
- * @pool: pool handle
- * @len: length to list
- * return: none NULL if success otherwise NULL
- */
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool,
- unsigned int len);
-
-/**
- * coh901318_lli_free() - Returns the linked list items to the pool
- * @pool: pool handle
- * @lli: reference to lli pointer to be freed
- */
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli);
-
-/**
- * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
- * @pool: pool handle
- * @lli: allocated lli
- * @src: src address
- * @size: transfer size
- * @dst: destination address
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t src, unsigned int size,
- dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @buf: transfer buffer
- * @size: transfer size
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_transfer_direction dir);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @sg: scatter gather list
- * @nents: number of entries in sg
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl: ctrl of middle lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * @ctrl_irq_mask: ctrl mask for CPU interrupt
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sg, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained,
- u32 ctrl, u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-
-#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
deleted file mode 100644
index 6b6c2fd0865a..000000000000
--- a/drivers/dma/coh901318_lli.c
+++ /dev/null
@@ -1,313 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * driver/dma/coh901318_lli.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * Support functions for handling lli for dma
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#include <linux/spinlock.h>
-#include <linux/memory.h>
-#include <linux/gfp.h>
-#include <linux/dmapool.h>
-#include <linux/dmaengine.h>
-
-#include "coh901318.h"
-
-#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
-#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
-#else
-#define DEBUGFS_POOL_COUNTER_RESET(pool)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
-#endif
-
-static struct coh901318_lli *
-coh901318_lli_next(struct coh901318_lli *data)
-{
- if (data == NULL || data->link_addr == 0)
- return NULL;
-
- return (struct coh901318_lli *) data->virt_link_addr;
-}
-
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t size, size_t align)
-{
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
-
- DEBUGFS_POOL_COUNTER_RESET(pool);
- return 0;
-}
-
-int coh901318_pool_destroy(struct coh901318_pool *pool)
-{
-
- dma_pool_destroy(pool->dmapool);
- return 0;
-}
-
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
-{
- int i;
- struct coh901318_lli *head;
- struct coh901318_lli *lli;
- struct coh901318_lli *lli_prev;
- dma_addr_t phy;
-
- if (len == 0)
- return NULL;
-
- spin_lock(&pool->lock);
-
- head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (head == NULL)
- goto err;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
-
- lli = head;
- lli->phy_this = phy;
- lli->link_addr = 0x00000000;
- lli->virt_link_addr = NULL;
-
- for (i = 1; i < len; i++) {
- lli_prev = lli;
-
- lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (lli == NULL)
- goto err_clean_up;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
- lli->phy_this = phy;
- lli->link_addr = 0x00000000;
- lli->virt_link_addr = NULL;
-
- lli_prev->link_addr = phy;
- lli_prev->virt_link_addr = lli;
- }
-
- spin_unlock(&pool->lock);
-
- return head;
-
- err:
- spin_unlock(&pool->lock);
- return NULL;
-
- err_clean_up:
- lli_prev->link_addr = 0x00000000U;
- spin_unlock(&pool->lock);
- coh901318_lli_free(pool, &head);
- return NULL;
-}
-
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli)
-{
- struct coh901318_lli *l;
- struct coh901318_lli *next;
-
- if (lli == NULL)
- return;
-
- l = *lli;
-
- if (l == NULL)
- return;
-
- spin_lock(&pool->lock);
-
- while (l->link_addr) {
- next = l->virt_link_addr;
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
- l = next;
- }
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
-
- spin_unlock(&pool->lock);
- *lli = NULL;
-}
-
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t source, unsigned int size,
- dma_addr_t destination, u32 ctrl_chained,
- u32 ctrl_eom)
-{
- int s = size;
- dma_addr_t src = source;
- dma_addr_t dst = destination;
-
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- while (lli->link_addr) {
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- s -= MAX_DMA_PACKET_SIZE;
- lli = coh901318_lli_next(lli);
-
- src += MAX_DMA_PACKET_SIZE;
- dst += MAX_DMA_PACKET_SIZE;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- return 0;
-}
-
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_transfer_direction dir)
-{
- int s = size;
- dma_addr_t src;
- dma_addr_t dst;
-
-
- if (dir == DMA_MEM_TO_DEV) {
- src = buf;
- dst = dev_addr;
-
- } else if (dir == DMA_DEV_TO_MEM) {
-
- src = dev_addr;
- dst = buf;
- } else {
- return -EINVAL;
- }
-
- while (lli->link_addr) {
- size_t block_size = MAX_DMA_PACKET_SIZE;
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
-
- /* If we are on the next-to-final block and there will
- * be less than half a DMA packet left for the last
- * block, then we want to make this block a little
- * smaller to balance the sizes. This is meant to
- * avoid too small transfers if the buffer size is
- * (MAX_DMA_PACKET_SIZE*N + 1) */
- if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
- block_size = MAX_DMA_PACKET_SIZE/2;
-
- s -= block_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- lli = coh901318_lli_next(lli);
-
- if (dir == DMA_MEM_TO_DEV)
- src += block_size;
- else if (dir == DMA_DEV_TO_MEM)
- dst += block_size;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- return 0;
-}
-
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sgl, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
- u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask)
-{
- int i;
- struct scatterlist *sg;
- u32 ctrl_sg;
- dma_addr_t src = 0;
- dma_addr_t dst = 0;
- u32 bytes_to_transfer;
- u32 elem_size;
-
- if (lli == NULL)
- goto err;
-
- spin_lock(&pool->lock);
-
- if (dir == DMA_MEM_TO_DEV)
- dst = dev_addr;
- else if (dir == DMA_DEV_TO_MEM)
- src = dev_addr;
- else
- goto err;
-
- for_each_sg(sgl, sg, nents, i) {
- if (sg_is_chain(sg)) {
- /* sg continues to the next sg-element don't
- * send ctrl_finish until the last
- * sg-element in the chain
- */
- ctrl_sg = ctrl_chained;
- } else if (i == nents - 1)
- ctrl_sg = ctrl_last;
- else
- ctrl_sg = ctrl ? ctrl : ctrl_last;
-
-
- if (dir == DMA_MEM_TO_DEV)
- /* increment source address */
- src = sg_dma_address(sg);
- else
- /* increment destination address */
- dst = sg_dma_address(sg);
-
- bytes_to_transfer = sg_dma_len(sg);
-
- while (bytes_to_transfer) {
- u32 val;
-
- if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
- elem_size = MAX_DMA_PACKET_SIZE;
- val = ctrl_chained;
- } else {
- elem_size = bytes_to_transfer;
- val = ctrl_sg;
- }
-
- lli->control = val | elem_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- if (dir == DMA_DEV_TO_MEM)
- dst += elem_size;
- else
- src += elem_size;
-
- BUG_ON(lli->link_addr & 3);
-
- bytes_to_transfer -= elem_size;
- lli = coh901318_lli_next(lli);
- }
-
- }
- spin_unlock(&pool->lock);
-
- return 0;
- err:
- spin_unlock(&pool->lock);
- return -EINVAL;
-}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 612d353648cf..ebee94dbd630 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1004,6 +1004,18 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
JZ_SOC_DATA_BREAK_LINKS,
};
+static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
+ .nb_channels = 5,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
+ .nb_channels = 5,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 6,
@@ -1031,6 +1043,8 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+ { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
+ { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 962cbb5e5f7f..fe6a460c4373 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
mutex_lock(&dma_list_mutex);
- list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index e164f3295f5d..d9e4ac3edb4e 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -12,15 +12,20 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "dw-axi-dmac.h"
@@ -195,43 +200,56 @@ static inline const char *axi_chan_name(struct axi_dma_chan *chan)
return dma_chan_name(&chan->vc.chan);
}
-static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
+static struct axi_dma_desc *axi_desc_alloc(u32 num)
{
- struct dw_axi_dma *dw = chan->chip->dw;
struct axi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
+ if (!desc->hw_desc) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
+ dma_addr_t *addr)
+{
+ struct axi_dma_lli *lli;
dma_addr_t phys;
- desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
- if (unlikely(!desc)) {
+ lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
+ if (unlikely(!lli)) {
dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
axi_chan_name(chan));
return NULL;
}
atomic_inc(&chan->descs_allocated);
- INIT_LIST_HEAD(&desc->xfer_list);
- desc->vd.tx.phys = phys;
- desc->chan = chan;
+ *addr = phys;
- return desc;
+ return lli;
}
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- struct dw_axi_dma *dw = chan->chip->dw;
- struct axi_dma_desc *child, *_next;
- unsigned int descs_put = 0;
+ int count = atomic_read(&chan->descs_allocated);
+ struct axi_dma_hw_desc *hw_desc;
+ int descs_put;
- list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
- list_del(&child->xfer_list);
- dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
- descs_put++;
+ for (descs_put = 0; descs_put < count; descs_put++) {
+ hw_desc = &desc->hw_desc[descs_put];
+ dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
}
- dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
- descs_put++;
-
+ kfree(desc->hw_desc);
+ kfree(desc);
atomic_sub(descs_put, &chan->descs_allocated);
dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
axi_chan_name(chan), descs_put,
@@ -248,19 +266,41 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
- enum dma_status ret;
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ u32 completed_length;
+ unsigned long flags;
+ u32 completed_blocks;
+ size_t bytes = 0;
+ u32 length;
+ u32 len;
- ret = dma_cookie_status(dchan, cookie, txstate);
+ status = dma_cookie_status(dchan, cookie, txstate);
+ if (status == DMA_COMPLETE || !txstate)
+ return status;
- if (chan->is_paused && ret == DMA_IN_PROGRESS)
- ret = DMA_PAUSED;
+ spin_lock_irqsave(&chan->vc.lock, flags);
- return ret;
+ vdesc = vchan_find_desc(&chan->vc, cookie);
+ if (vdesc) {
+ length = vd_to_axi_desc(vdesc)->length;
+ completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
+ len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
+ completed_length = completed_blocks * len;
+ bytes = length - completed_length;
+ } else {
+ bytes = vd_to_axi_desc(vdesc)->length;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ dma_set_residue(txstate, bytes);
+
+ return status;
}
-static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.llp = cpu_to_le64(adr);
+ desc->lli->llp = cpu_to_le64(adr);
}
static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
@@ -268,6 +308,29 @@ static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
axi_chan_iowrite64(chan, CH_LLP, adr);
}
+static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
+{
+ u32 offset = DMAC_APB_BYTE_WR_CH_EN;
+ u32 reg_width, val;
+
+ if (!chan->chip->apb_regs) {
+ dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
+ return;
+ }
+
+ reg_width = __ffs(chan->config.dst_addr_width);
+ if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
+ offset = DMAC_APB_HALFWORD_WR_CH_EN;
+
+ val = ioread32(chan->chip->apb_regs + offset);
+
+ if (set)
+ val |= BIT(chan->id);
+ else
+ val &= ~BIT(chan->id);
+
+ iowrite32(val, chan->chip->apb_regs + offset);
+}
/* Called in chan locked context */
static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
@@ -293,9 +356,26 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
priority << CH_CFG_H_PRIORITY_POS |
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
+ switch (chan->direction) {
+ case DMA_MEM_TO_DEV:
+ dw_axi_dma_set_byte_halfword(chan, true);
+ reg |= (chan->config.device_fc ?
+ DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
+ DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
+ << CH_CFG_H_TT_FC_POS;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg |= (chan->config.device_fc ?
+ DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
+ DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
+ << CH_CFG_H_TT_FC_POS;
+ break;
+ default:
+ break;
+ }
axi_chan_iowrite32(chan, CH_CFG_H, reg);
- write_chan_llp(chan, first->vd.tx.phys | lms);
+ write_chan_llp(chan, first->hw_desc[0].llp | lms);
irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
axi_chan_irq_sig_set(chan, irq_mask);
@@ -333,6 +413,13 @@ static void dma_chan_issue_pending(struct dma_chan *dchan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
+static void dw_axi_dma_synchronize(struct dma_chan *dchan)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+ vchan_synchronize(&chan->vc);
+}
+
static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
@@ -344,6 +431,15 @@ static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
return -EBUSY;
}
+ /* LLI address must be aligned to a 64-byte boundary */
+ chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->chip->dev,
+ sizeof(struct axi_dma_lli),
+ 64, 0);
+ if (!chan->desc_pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
pm_runtime_get(chan->chip->dev);
@@ -365,6 +461,8 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
vchan_free_chan_resources(&chan->vc);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
dev_vdbg(dchan2dev(dchan),
"%s: free resources, descriptor still allocated: %u\n",
axi_chan_name(chan), atomic_read(&chan->descs_allocated));
@@ -372,73 +470,398 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
+static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
+ u32 handshake_num, bool set)
+{
+ unsigned long start = 0;
+ unsigned long reg_value;
+ unsigned long reg_mask;
+ unsigned long reg_set;
+ unsigned long mask;
+ unsigned long val;
+
+ if (!chip->apb_regs) {
+ dev_dbg(chip->dev, "apb_regs not initialized\n");
+ return;
+ }
+
+ /*
+ * An unused DMA channel has a default value of 0x3F.
+ * Lock the DMA channel by assign a handshake number to the channel.
+ * Unlock the DMA channel by assign 0x3F to the channel.
+ */
+ if (set) {
+ reg_set = UNUSED_CHANNEL;
+ val = handshake_num;
+ } else {
+ reg_set = handshake_num;
+ val = UNUSED_CHANNEL;
+ }
+
+ reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+
+ for_each_set_clump8(start, reg_mask, &reg_value, 64) {
+ if (reg_mask == reg_set) {
+ mask = GENMASK_ULL(start + 7, start);
+ reg_value &= ~mask;
+ reg_value |= rol64(val, start);
+ lo_hi_writeq(reg_value,
+ chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+ break;
+ }
+ }
+}
+
/*
* If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
* as 1, it understands that the current block is the final block in the
* transfer and completes the DMA transfer operation at the end of current
* block transfer.
*/
-static void set_desc_last(struct axi_dma_desc *desc)
+static void set_desc_last(struct axi_dma_hw_desc *desc)
{
u32 val;
- val = le32_to_cpu(desc->lli.ctl_hi);
+ val = le32_to_cpu(desc->lli->ctl_hi);
val |= CH_CTL_H_LLI_LAST;
- desc->lli.ctl_hi = cpu_to_le32(val);
+ desc->lli->ctl_hi = cpu_to_le32(val);
}
-static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.sar = cpu_to_le64(adr);
+ desc->lli->sar = cpu_to_le64(adr);
}
-static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.dar = cpu_to_le64(adr);
+ desc->lli->dar = cpu_to_le64(adr);
}
-static void set_desc_src_master(struct axi_dma_desc *desc)
+static void set_desc_src_master(struct axi_dma_hw_desc *desc)
{
u32 val;
/* Select AXI0 for source master */
- val = le32_to_cpu(desc->lli.ctl_lo);
+ val = le32_to_cpu(desc->lli->ctl_lo);
val &= ~CH_CTL_L_SRC_MAST;
- desc->lli.ctl_lo = cpu_to_le32(val);
+ desc->lli->ctl_lo = cpu_to_le32(val);
}
-static void set_desc_dest_master(struct axi_dma_desc *desc)
+static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
+ struct axi_dma_desc *desc)
{
u32 val;
/* Select AXI1 for source master if available */
- val = le32_to_cpu(desc->lli.ctl_lo);
+ val = le32_to_cpu(hw_desc->lli->ctl_lo);
if (desc->chan->chip->dw->hdata->nr_masters > 1)
val |= CH_CTL_L_DST_MAST;
else
val &= ~CH_CTL_L_DST_MAST;
- desc->lli.ctl_lo = cpu_to_le32(val);
+ hw_desc->lli->ctl_lo = cpu_to_le32(val);
+}
+
+static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
+ struct axi_dma_hw_desc *hw_desc,
+ dma_addr_t mem_addr, size_t len)
+{
+ unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
+ unsigned int reg_width;
+ unsigned int mem_width;
+ dma_addr_t device_addr;
+ size_t axi_block_ts;
+ size_t block_ts;
+ u32 ctllo, ctlhi;
+ u32 burst_len;
+
+ axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+ mem_width = __ffs(data_width | mem_addr | len);
+ if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
+ mem_width = DWAXIDMAC_TRANS_WIDTH_32;
+
+ if (!IS_ALIGNED(mem_addr, 4)) {
+ dev_err(chan->chip->dev, "invalid buffer alignment\n");
+ return -EINVAL;
+ }
+
+ switch (chan->direction) {
+ case DMA_MEM_TO_DEV:
+ reg_width = __ffs(chan->config.dst_addr_width);
+ device_addr = chan->config.dst_addr;
+ ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
+ mem_width << CH_CTL_L_SRC_WIDTH_POS |
+ DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
+ DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
+ block_ts = len >> mem_width;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg_width = __ffs(chan->config.src_addr_width);
+ device_addr = chan->config.src_addr;
+ ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
+ mem_width << CH_CTL_L_DST_WIDTH_POS |
+ DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
+ DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
+ block_ts = len >> reg_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (block_ts > axi_block_ts)
+ return -EINVAL;
+
+ hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
+ if (unlikely(!hw_desc->lli))
+ return -ENOMEM;
+
+ ctlhi = CH_CTL_H_LLI_VALID;
+
+ if (chan->chip->dw->hdata->restrict_axi_burst_len) {
+ burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
+ ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
+ burst_len << CH_CTL_H_ARLEN_POS |
+ burst_len << CH_CTL_H_AWLEN_POS;
+ }
+
+ hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ write_desc_sar(hw_desc, mem_addr);
+ write_desc_dar(hw_desc, device_addr);
+ } else {
+ write_desc_sar(hw_desc, device_addr);
+ write_desc_dar(hw_desc, mem_addr);
+ }
+
+ hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
+
+ ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
+ DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
+ hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
+
+ set_desc_src_master(hw_desc);
+
+ hw_desc->len = len;
+ return 0;
+}
+
+static size_t calculate_block_len(struct axi_dma_chan *chan,
+ dma_addr_t dma_addr, size_t buf_len,
+ enum dma_transfer_direction direction)
+{
+ u32 data_width, reg_width, mem_width;
+ size_t axi_block_ts, block_len;
+
+ axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ data_width = BIT(chan->chip->dw->hdata->m_data_width);
+ mem_width = __ffs(data_width | dma_addr | buf_len);
+ if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
+ mem_width = DWAXIDMAC_TRANS_WIDTH_32;
+
+ block_len = axi_block_ts << mem_width;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg_width = __ffs(chan->config.src_addr_width);
+ block_len = axi_block_ts << reg_width;
+ break;
+ default:
+ block_len = 0;
+ }
+
+ return block_len;
+}
+
+static struct dma_async_tx_descriptor *
+dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ dma_addr_t src_addr = dma_addr;
+ u32 num_periods, num_segments;
+ size_t axi_block_len;
+ u32 total_segments;
+ u32 segment_len;
+ unsigned int i;
+ int status;
+ u64 llp = 0;
+ u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+ num_periods = buf_len / period_len;
+
+ axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
+ if (axi_block_len == 0)
+ return NULL;
+
+ num_segments = DIV_ROUND_UP(period_len, axi_block_len);
+ segment_len = DIV_ROUND_UP(period_len, num_segments);
+
+ total_segments = num_periods * num_segments;
+
+ desc = axi_desc_alloc(total_segments);
+ if (unlikely(!desc))
+ goto err_desc_get;
+
+ chan->direction = direction;
+ desc->chan = chan;
+ chan->cyclic = true;
+ desc->length = 0;
+ desc->period_len = period_len;
+
+ for (i = 0; i < total_segments; i++) {
+ hw_desc = &desc->hw_desc[i];
+
+ status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
+ segment_len);
+ if (status < 0)
+ goto err_desc_get;
+
+ desc->length += hw_desc->len;
+ /* Set end-of-link to the linked descriptor, so that cyclic
+ * callback function can be triggered during interrupt.
+ */
+ set_desc_last(hw_desc);
+
+ src_addr += segment_len;
+ }
+
+ llp = desc->hw_desc[0].llp;
+
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--total_segments];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (total_segments);
+
+ dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+err_desc_get:
+ if (desc)
+ axi_desc_put(desc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ u32 num_segments, segment_len;
+ unsigned int loop = 0;
+ struct scatterlist *sg;
+ size_t axi_block_len;
+ u32 len, num_sgs = 0;
+ unsigned int i;
+ dma_addr_t mem;
+ int status;
+ u64 llp = 0;
+ u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
+ return NULL;
+
+ mem = sg_dma_address(sgl);
+ len = sg_dma_len(sgl);
+
+ axi_block_len = calculate_block_len(chan, mem, len, direction);
+ if (axi_block_len == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
+
+ desc = axi_desc_alloc(num_sgs);
+ if (unlikely(!desc))
+ goto err_desc_get;
+
+ desc->chan = chan;
+ desc->length = 0;
+ chan->direction = direction;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
+ segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
+
+ do {
+ hw_desc = &desc->hw_desc[loop++];
+ status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
+ if (status < 0)
+ goto err_desc_get;
+
+ desc->length += hw_desc->len;
+ len -= segment_len;
+ mem += segment_len;
+ } while (len >= segment_len);
+ }
+
+ /* Set end-of-link to the last link descriptor of list */
+ set_desc_last(&desc->hw_desc[num_sgs - 1]);
+
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--num_sgs];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (num_sgs);
+
+ dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+err_desc_get:
+ if (desc)
+ axi_desc_put(desc);
+
+ return NULL;
}
static struct dma_async_tx_descriptor *
dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
dma_addr_t src_adr, size_t len, unsigned long flags)
{
- struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
size_t block_ts, max_block_ts, xfer_len;
- u32 xfer_width, reg;
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ u32 xfer_width, reg, num;
+ u64 llp = 0;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+ xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
+ num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
+ desc = axi_desc_alloc(num);
+ if (unlikely(!desc))
+ goto err_desc_get;
+ desc->chan = chan;
+ num = 0;
+ desc->length = 0;
while (len) {
xfer_len = len;
+ hw_desc = &desc->hw_desc[num];
/*
* Take care for the alignment.
* Actually source and destination widths can be different, but
@@ -457,13 +880,13 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
xfer_len = max_block_ts << xfer_width;
}
- desc = axi_desc_get(chan);
- if (unlikely(!desc))
+ hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
+ if (unlikely(!hw_desc->lli))
goto err_desc_get;
- write_desc_sar(desc, src_adr);
- write_desc_dar(desc, dst_adr);
- desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
+ write_desc_sar(hw_desc, src_adr);
+ write_desc_dar(hw_desc, dst_adr);
+ hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
reg = CH_CTL_H_LLI_VALID;
if (chan->chip->dw->hdata->restrict_axi_burst_len) {
@@ -474,7 +897,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
CH_CTL_H_AWLEN_EN |
burst_len << CH_CTL_H_AWLEN_POS);
}
- desc->lli.ctl_hi = cpu_to_le32(reg);
+ hw_desc->lli->ctl_hi = cpu_to_le32(reg);
reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
@@ -482,62 +905,68 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
xfer_width << CH_CTL_L_SRC_WIDTH_POS |
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
- desc->lli.ctl_lo = cpu_to_le32(reg);
+ hw_desc->lli->ctl_lo = cpu_to_le32(reg);
- set_desc_src_master(desc);
- set_desc_dest_master(desc);
-
- /* Manage transfer list (xfer_list) */
- if (!first) {
- first = desc;
- } else {
- list_add_tail(&desc->xfer_list, &first->xfer_list);
- write_desc_llp(prev, desc->vd.tx.phys | lms);
- }
- prev = desc;
+ set_desc_src_master(hw_desc);
+ set_desc_dest_master(hw_desc, desc);
+ hw_desc->len = xfer_len;
+ desc->length += hw_desc->len;
/* update the length and addresses for the next loop cycle */
len -= xfer_len;
dst_adr += xfer_len;
src_adr += xfer_len;
+ num++;
}
- /* Total len of src/dest sg == 0, so no descriptor were allocated */
- if (unlikely(!first))
- return NULL;
-
/* Set end-of-link to the last link descriptor of list */
- set_desc_last(desc);
+ set_desc_last(&desc->hw_desc[num - 1]);
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--num];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (num);
- return vchan_tx_prep(&chan->vc, &first->vd, flags);
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
err_desc_get:
- if (first)
- axi_desc_put(first);
+ if (desc)
+ axi_desc_put(desc);
return NULL;
}
+static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+ memcpy(&chan->config, config, sizeof(*config));
+
+ return 0;
+}
+
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
- struct axi_dma_desc *desc)
+ struct axi_dma_hw_desc *desc)
{
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
- le64_to_cpu(desc->lli.sar),
- le64_to_cpu(desc->lli.dar),
- le64_to_cpu(desc->lli.llp),
- le32_to_cpu(desc->lli.block_ts_lo),
- le32_to_cpu(desc->lli.ctl_hi),
- le32_to_cpu(desc->lli.ctl_lo));
+ le64_to_cpu(desc->lli->sar),
+ le64_to_cpu(desc->lli->dar),
+ le64_to_cpu(desc->lli->llp),
+ le32_to_cpu(desc->lli->block_ts_lo),
+ le32_to_cpu(desc->lli->ctl_hi),
+ le32_to_cpu(desc->lli->ctl_lo));
}
static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_desc *desc_head)
{
- struct axi_dma_desc *desc;
+ int count = atomic_read(&chan->descs_allocated);
+ int i;
- axi_chan_dump_lli(chan, desc_head);
- list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
- axi_chan_dump_lli(chan, desc);
+ for (i = 0; i < count; i++)
+ axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
}
static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
@@ -570,8 +999,13 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
{
+ int count = atomic_read(&chan->descs_allocated);
+ struct axi_dma_hw_desc *hw_desc;
+ struct axi_dma_desc *desc;
struct virt_dma_desc *vd;
unsigned long flags;
+ u64 llp;
+ int i;
spin_lock_irqsave(&chan->vc.lock, flags);
if (unlikely(axi_chan_is_hw_enable(chan))) {
@@ -582,12 +1016,34 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
- /* Remove the completed descriptor from issued list before completing */
- list_del(&vd->node);
- vchan_cookie_complete(vd);
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
+ if (chan->cyclic) {
+ desc = vd_to_axi_desc(vd);
+ if (desc) {
+ llp = lo_hi_readq(chan->chan_regs + CH_LLP);
+ for (i = 0; i < count; i++) {
+ hw_desc = &desc->hw_desc[i];
+ if (hw_desc->llp == llp) {
+ axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
+ hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
+ desc->completed_blocks = i;
+
+ if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
+ vchan_cyclic_callback(vd);
+ break;
+ }
+ }
+
+ axi_chan_enable(chan);
+ }
+ } else {
+ /* Remove the completed descriptor from issued list before completing */
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+
+ /* Submit queued descriptors after processing the completed ones */
+ axi_chan_start_first_queued(chan);
+ }
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
@@ -627,15 +1083,31 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
static int dma_chan_terminate_all(struct dma_chan *dchan)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
unsigned long flags;
+ u32 val;
+ int ret;
LIST_HEAD(head);
- spin_lock_irqsave(&chan->vc.lock, flags);
-
axi_chan_disable(chan);
+ ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
+ !(val & chan_active), 1000, 10000);
+ if (ret == -ETIMEDOUT)
+ dev_warn(dchan2dev(dchan),
+ "%s failed to stop\n", axi_chan_name(chan));
+
+ if (chan->direction != DMA_MEM_TO_MEM)
+ dw_axi_dma_set_hw_channel(chan->chip,
+ chan->hw_handshake_num, false);
+ if (chan->direction == DMA_MEM_TO_DEV)
+ dw_axi_dma_set_byte_halfword(chan, false);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
vchan_get_all_descriptors(&chan->vc, &head);
+ chan->cyclic = false;
spin_unlock_irqrestore(&chan->vc.lock, flags);
vchan_dma_desc_free_list(&chan->vc, &head);
@@ -746,6 +1218,22 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
return axi_dma_resume(chip);
}
+static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_axi_dma *dw = ofdma->of_dma_data;
+ struct axi_dma_chan *chan;
+ struct dma_chan *dchan;
+
+ dchan = dma_get_any_slave_channel(&dw->dma);
+ if (!dchan)
+ return NULL;
+
+ chan = dchan_to_axi_dma_chan(dchan);
+ chan->hw_handshake_num = dma_spec->args[0];
+ return dchan;
+}
+
static int parse_device_properties(struct axi_dma_chip *chip)
{
struct device *dev = chip->dev;
@@ -816,6 +1304,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
static int dw_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
struct resource *mem;
struct dw_axi_dma *dw;
@@ -848,6 +1337,12 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
+ if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
+ chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(chip->apb_regs))
+ return PTR_ERR(chip->apb_regs);
+ }
+
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
if (IS_ERR(chip->core_clk))
return PTR_ERR(chip->core_clk);
@@ -870,13 +1365,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Lli address must be aligned to a 64-byte boundary */
- dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
- sizeof(struct axi_dma_desc), 64, 0);
- if (!dw->desc_pool) {
- dev_err(chip->dev, "No memory for descriptors dma pool\n");
- return -ENOMEM;
- }
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
@@ -893,13 +1381,16 @@ static int dw_probe(struct platform_device *pdev)
/* Set capabilities */
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
- dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dw->dma.dev = chip->dev;
dw->dma.device_tx_status = dma_chan_tx_status;
@@ -912,7 +1403,18 @@ static int dw_probe(struct platform_device *pdev)
dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
+ dw->dma.device_synchronize = dw_axi_dma_synchronize;
+ dw->dma.device_config = dw_axi_dma_chan_slave_config;
+ dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
+ dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
+ /*
+ * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
+ * supported blocks is 1024. Device register width is 4 bytes.
+ * Therefore, set constraint to 1024 * 4.
+ */
+ dw->dma.dev->dma_parms = &dw->dma_parms;
+ dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
platform_set_drvdata(pdev, chip);
pm_runtime_enable(chip->dev);
@@ -935,6 +1437,13 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
+ /* Register with OF helpers for DMA lookups */
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ dw_axi_dma_of_xlate, dw);
+ if (ret < 0)
+ dev_warn(&pdev->dev,
+ "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
+
dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
dw->hdata->nr_channels);
@@ -968,6 +1477,8 @@ static int dw_remove(struct platform_device *pdev)
devm_free_irq(chip->dev, chip->irq, chip);
+ of_dma_controller_free(chip->dev->of_node);
+
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
@@ -983,6 +1494,7 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,axi-dma-1.01a" },
+ { .compatible = "intel,kmb-axi-dma" },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 18b6014cf9b4..b69897887c76 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -37,10 +37,16 @@ struct axi_dma_chan {
struct axi_dma_chip *chip;
void __iomem *chan_regs;
u8 id;
+ u8 hw_handshake_num;
atomic_t descs_allocated;
+ struct dma_pool *desc_pool;
struct virt_dma_chan vc;
+ struct axi_dma_desc *desc;
+ struct dma_slave_config config;
+ enum dma_transfer_direction direction;
+ bool cyclic;
/* these other elements are all protected by vc.lock */
bool is_paused;
};
@@ -48,7 +54,7 @@ struct axi_dma_chan {
struct dw_axi_dma {
struct dma_device dma;
struct dw_axi_dma_hcfg *hdata;
- struct dma_pool *desc_pool;
+ struct device_dma_parameters dma_parms;
/* channels */
struct axi_dma_chan *chan;
@@ -58,6 +64,7 @@ struct axi_dma_chip {
struct device *dev;
int irq;
void __iomem *regs;
+ void __iomem *apb_regs;
struct clk *core_clk;
struct clk *cfgr_clk;
struct dw_axi_dma *dw;
@@ -80,12 +87,20 @@ struct __packed axi_dma_lli {
__le32 reserved_hi;
};
+struct axi_dma_hw_desc {
+ struct axi_dma_lli *lli;
+ dma_addr_t llp;
+ u32 len;
+};
+
struct axi_dma_desc {
- struct axi_dma_lli lli;
+ struct axi_dma_hw_desc *hw_desc;
struct virt_dma_desc vd;
struct axi_dma_chan *chan;
- struct list_head xfer_list;
+ u32 completed_blocks;
+ u32 length;
+ u32 period_len;
};
static inline struct device *dchan2dev(struct dma_chan *dchan)
@@ -157,6 +172,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */
#define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */
+/* These Apb registers are used by Intel KeemBay SoC */
+#define DMAC_APB_CFG 0x000 /* DMAC Apb Configuration Register */
+#define DMAC_APB_STAT 0x004 /* DMAC Apb Status Register */
+#define DMAC_APB_DEBUG_STAT_0 0x008 /* DMAC Apb Debug Status Register 0 */
+#define DMAC_APB_DEBUG_STAT_1 0x00C /* DMAC Apb Debug Status Register 1 */
+#define DMAC_APB_HW_HS_SEL_0 0x010 /* DMAC Apb HW HS register 0 */
+#define DMAC_APB_HW_HS_SEL_1 0x014 /* DMAC Apb HW HS register 1 */
+#define DMAC_APB_LPI 0x018 /* DMAC Apb Low Power Interface Reg */
+#define DMAC_APB_BYTE_WR_CH_EN 0x01C /* DMAC Apb Byte Write Enable */
+#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
+
+#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
+#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
#define DMAC_EN_POS 0
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 19a23767533a..7ab83fe601ed 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- pm_runtime_get_sync(dw->dma.dev);
-
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
@@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
@@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
if (!dw->in_use)
do_dw_dma_off(dw);
- pm_runtime_put_sync_suspend(dw->dma.dev);
-
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0feb323bae1e..f8459cc5315d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
+ unsigned int i;
int err;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
@@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
return 0;
out_free_fdev:
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (fdev->chan[i])
+ fsl_dma_chan_remove(fdev->chan[i]);
+ }
irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
out_free:
@@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
}
+ irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
kfree(fdev);
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 07cc7320a614..9045a6f7f589 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -26,22 +26,12 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
- struct pci_dev *pdev = to_pci_dev(chip->dev);
u32 dmaisr;
u32 status;
unsigned short i;
int ret = 0;
int err;
- /*
- * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
- * to have different numbers, is shared between HSU DMA and UART IPs.
- * Thus on such SoCs we are expecting that IRQ handler is called in
- * UART driver only.
- */
- if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
- return IRQ_HANDLED;
-
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
if (dmaisr & 0x1) {
@@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_register_irq;
+ /*
+ * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+ * to have different numbers, is shared between HSU DMA and UART IPs.
+ * Thus on such SoCs we are expecting that IRQ handler is called in
+ * UART driver only. Instead of handling the spurious interrupt
+ * from HSU DMA here and waste CPU time and delay HSU UART interrupt
+ * handling, disable the interrupt entirely.
+ */
+ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+ disable_irq_nosync(chip->irq);
+
pci_set_drvdata(pdev, chip);
return 0;
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 95f94a3ed6be..84a6ea60ecf0 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
return false;
}
+static inline bool idxd_device_is_halted(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ return (gensts.state == IDXD_DEVICE_STATE_HALT);
+}
+
/*
* This is function is only used for reset during probe and will
* poll for completion. Once the device is setup with interrupts,
* all commands will be done via interrupt completion.
*/
-void idxd_device_init_reset(struct idxd_device *idxd)
+int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ return -ENXIO;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
@@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
IDXD_CMDSTS_ACTIVE)
cpu_relax();
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return 0;
}
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
@@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ *status = IDXD_CMDSTS_HW_ERR;
+ return;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = cmd_code;
cmd.operand = operand;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 8ed2773d8285..a15e50126434 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = &idxd->pdev->dev;
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
@@ -205,5 +206,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+ struct dma_chan *chan = &wq->dma_chan;
+
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ list_del(&chan->device_node);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 5a50e91c71bf..81a0e65fd316 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
-void idxd_device_init_reset(struct idxd_device *idxd);
+int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2c051e07c34c..085a0c3b62c6 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -26,12 +26,16 @@ MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+static bool sva = true;
+module_param(sva, bool, 0644);
+MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+
#define DRV_NAME "idxd"
bool support_enqcmd;
static struct idr idxd_idrs[IDXD_TYPE_MAX];
-static struct mutex idxd_idr_lock;
+static DEFINE_MUTEX(idxd_idr_lock);
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
@@ -335,15 +339,20 @@ static int idxd_probe(struct idxd_device *idxd)
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
- idxd_device_init_reset(idxd);
+ rc = idxd_device_init_reset(idxd);
+ if (rc < 0)
+ return rc;
+
dev_dbg(dev, "IDXD reset complete\n");
- if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
+ if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
rc = idxd_enable_system_pasid(idxd);
if (rc < 0)
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ } else if (!sva) {
+ dev_warn(dev, "User forced SVA off via module param.\n");
}
idxd_read_caps(idxd);
@@ -544,7 +553,6 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++)
idr_init(&idxd_idrs[i]);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 593a2f6ed16c..a60ca11a5784 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
return IRQ_WAKE_THREAD;
}
-irqreturn_t idxd_misc_thread(int vec, void *data)
+static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
- u32 cause, val = 0;
+ u32 val = 0;
int i;
bool err = false;
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
for (i = 0; i < 4; i++)
@@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
val);
if (!err)
- goto out;
+ return 0;
/*
* This case should rarely happen and typically is due to software
@@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
+ return -ENXIO;
}
}
- out:
+ return 0;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ int rc;
+ u32 cause;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ while (cause) {
+ rc = process_misc_interrupts(idxd, cause);
+ if (rc < 0)
+ break;
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ }
+
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
-static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
{
/*
* Completion address can be bad as well. Check fault address match for descriptor
* and completion address.
*/
- if ((u64)desc->hw == fault_addr ||
- (u64)desc->completion == fault_addr) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+ if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
+ struct idxd_device *idxd = desc->wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
return true;
}
return false;
}
-static bool complete_desc(struct idxd_desc *desc)
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
- if (desc->completion->status) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
- return true;
- }
-
- return false;
+ idxd_dma_complete_txd(desc, reason);
+ idxd_free_desc(desc->wq, desc);
}
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
@@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ enum idxd_complete_type reason;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
goto out;
- llist_for_each_entry_safe(desc, t, head, llnode) {
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
- if (completed) {
- idxd_free_desc(desc->wq, desc);
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- break;
} else {
spin_lock_irqsave(&irq_entry->list_lock, flags);
list_add_tail(&desc->list,
@@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
{
- struct list_head *node, *next;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ LIST_HEAD(flist);
+ struct idxd_desc *desc, *n;
+ enum idxd_complete_type reason;
*processed = 0;
- spin_lock_irqsave(&irq_entry->list_lock, flags);
- if (list_empty(&irq_entry->work_list))
- goto out;
-
- list_for_each_safe(node, next, &irq_entry->work_list) {
- struct idxd_desc *desc =
- container_of(node, struct idxd_desc, list);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
+ /*
+ * This lock protects list corruption from access of list outside of the irq handler
+ * thread.
+ */
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
+ if (list_empty(&irq_entry->work_list)) {
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ return 0;
+ }
- if (completed) {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
+ if (desc->completion->status) {
list_del(&desc->list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- idxd_free_desc(desc->wq, desc);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- return queued;
+ list_add_tail(&desc->list, &flist);
} else {
queued++;
}
- spin_lock_irqsave(&irq_entry->list_lock, flags);
}
- out:
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+
+ list_for_each_entry(desc, &flist, list) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
+ }
+
return queued;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 266423a2cabc..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 41ba21eea7c8..d5590c08db51 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1952,8 +1952,6 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
static int sdma_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(sdma_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *spba_bus;
const char *fw_name;
@@ -1961,17 +1959,9 @@ static int sdma_probe(struct platform_device *pdev)
int irq;
struct resource *iores;
struct resource spba_res;
- struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
- const struct sdma_driver_data *drvdata = NULL;
-
- drvdata = of_id->data;
- if (!drvdata) {
- dev_err(&pdev->dev, "unable to find driver data\n");
- return -EINVAL;
- }
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
@@ -1984,7 +1974,7 @@ static int sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
- sdma->drvdata = drvdata;
+ sdma->drvdata = of_device_get_match_data(sdma->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -2063,8 +2053,6 @@ static int sdma_probe(struct platform_device *pdev)
if (sdma->drvdata->script_addrs)
sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
- if (pdata && pdata->script_addrs)
- sdma_add_scripts(sdma, pdata->script_addrs);
sdma->dma_device.dev = &pdev->dev;
@@ -2110,30 +2098,18 @@ static int sdma_probe(struct platform_device *pdev)
}
/*
- * Kick off firmware loading as the very last step:
- * attempt to load firmware only if we're not on the error path, because
- * the firmware callback requires a fully functional and allocated sdma
- * instance.
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
*/
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
} else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret) {
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- } else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
return 0;
diff --git a/drivers/dma/lgm/Kconfig b/drivers/dma/lgm/Kconfig
new file mode 100644
index 000000000000..9194330ed0f2
--- /dev/null
+++ b/drivers/dma/lgm/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INTEL_LDMA
+ bool "Lightning Mountain centralized DMA controllers"
+ depends on X86 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for Intel Lightning Mountain SOC DMA controllers.
+ These controllers provide DMA capabilities for a variety of on-chip
+ devices such as HSNAND and GSWIP (Gigabit Switch IP).
diff --git a/drivers/dma/lgm/Makefile b/drivers/dma/lgm/Makefile
new file mode 100644
index 000000000000..f318a8eff464
--- /dev/null
+++ b/drivers/dma/lgm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INTEL_LDMA) += lgm-dma.o
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
new file mode 100644
index 000000000000..efe8bd3a0e2a
--- /dev/null
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lightning Mountain centralized DMA controller driver
+ *
+ * Copyright (c) 2016 - 2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define DRIVER_NAME "lgm-dma"
+
+#define DMA_ID 0x0008
+#define DMA_ID_REV GENMASK(7, 0)
+#define DMA_ID_PNR GENMASK(19, 16)
+#define DMA_ID_CHNR GENMASK(26, 20)
+#define DMA_ID_DW_128B BIT(27)
+#define DMA_ID_AW_36B BIT(28)
+#define DMA_VER32 0x32
+#define DMA_VER31 0x31
+#define DMA_VER22 0x0A
+
+#define DMA_CTRL 0x0010
+#define DMA_CTRL_RST BIT(0)
+#define DMA_CTRL_DSRAM_PATH BIT(1)
+#define DMA_CTRL_DBURST_WR BIT(3)
+#define DMA_CTRL_VLD_DF_ACK BIT(4)
+#define DMA_CTRL_CH_FL BIT(6)
+#define DMA_CTRL_DS_FOD BIT(7)
+#define DMA_CTRL_DRB BIT(8)
+#define DMA_CTRL_ENBE BIT(9)
+#define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16)
+#define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30)
+#define DMA_CTRL_PKTARB BIT(31)
+
+#define DMA_CPOLL 0x0014
+#define DMA_CPOLL_CNT GENMASK(15, 4)
+#define DMA_CPOLL_EN BIT(31)
+
+#define DMA_CS 0x0018
+#define DMA_CS_MASK GENMASK(5, 0)
+
+#define DMA_CCTRL 0x001C
+#define DMA_CCTRL_ON BIT(0)
+#define DMA_CCTRL_RST BIT(1)
+#define DMA_CCTRL_CH_POLL_EN BIT(2)
+#define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */
+#define DMA_CDBA_MSB GENMASK(7, 4)
+#define DMA_CCTRL_DIR_TX BIT(8)
+#define DMA_CCTRL_CLASS GENMASK(11, 9)
+#define DMA_CCTRL_CLASSH GENMASK(19, 18)
+#define DMA_CCTRL_WR_NP_EN BIT(21)
+#define DMA_CCTRL_PDEN BIT(23)
+#define DMA_MAX_CLASS (SZ_32 - 1)
+
+#define DMA_CDBA 0x0020
+#define DMA_CDLEN 0x0024
+#define DMA_CIS 0x0028
+#define DMA_CIE 0x002C
+#define DMA_CI_EOP BIT(1)
+#define DMA_CI_DUR BIT(2)
+#define DMA_CI_DESCPT BIT(3)
+#define DMA_CI_CHOFF BIT(4)
+#define DMA_CI_RDERR BIT(5)
+#define DMA_CI_ALL \
+ (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
+
+#define DMA_PS 0x0040
+#define DMA_PCTRL 0x0044
+#define DMA_PCTRL_RXBL16 BIT(0)
+#define DMA_PCTRL_TXBL16 BIT(1)
+#define DMA_PCTRL_RXBL GENMASK(3, 2)
+#define DMA_PCTRL_RXBL_8 3
+#define DMA_PCTRL_TXBL GENMASK(5, 4)
+#define DMA_PCTRL_TXBL_8 3
+#define DMA_PCTRL_PDEN BIT(6)
+#define DMA_PCTRL_RXBL32 BIT(7)
+#define DMA_PCTRL_RXENDI GENMASK(9, 8)
+#define DMA_PCTRL_TXENDI GENMASK(11, 10)
+#define DMA_PCTRL_TXBL32 BIT(15)
+#define DMA_PCTRL_MEM_FLUSH BIT(16)
+
+#define DMA_IRNEN1 0x00E8
+#define DMA_IRNCR1 0x00EC
+#define DMA_IRNEN 0x00F4
+#define DMA_IRNCR 0x00F8
+#define DMA_C_DP_TICK 0x100
+#define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0)
+#define DMA_C_DP_TICK_TIKARB GENMASK(31, 16)
+
+#define DMA_C_HDRM 0x110
+/*
+ * If header mode is set in DMA descriptor,
+ * If bit 30 is disabled, HDR_LEN must be configured according to channel
+ * requirement.
+ * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
+ * be configured. It will enable check sum for switch
+ * If header mode is not set in DMA descriptor,
+ * This register setting doesn't matter
+ */
+#define DMA_C_HDRM_HDR_SUM BIT(30)
+
+#define DMA_C_BOFF 0x120
+#define DMA_C_BOFF_BOF_LEN GENMASK(7, 0)
+#define DMA_C_BOFF_EN BIT(31)
+
+#define DMA_ORRC 0x190
+#define DMA_ORRC_ORRCNT GENMASK(8, 4)
+#define DMA_ORRC_EN BIT(31)
+
+#define DMA_C_ENDIAN 0x200
+#define DMA_C_END_DATAENDI GENMASK(1, 0)
+#define DMA_C_END_DE_EN BIT(7)
+#define DMA_C_END_DESENDI GENMASK(9, 8)
+#define DMA_C_END_DES_EN BIT(16)
+
+/* DMA controller capability */
+#define DMA_ADDR_36BIT BIT(0)
+#define DMA_DATA_128BIT BIT(1)
+#define DMA_CHAN_FLOW_CTL BIT(2)
+#define DMA_DESC_FOD BIT(3)
+#define DMA_DESC_IN_SRAM BIT(4)
+#define DMA_EN_BYTE_EN BIT(5)
+#define DMA_DBURST_WR BIT(6)
+#define DMA_VALID_DESC_FETCH_ACK BIT(7)
+#define DMA_DFT_DRB BIT(8)
+
+#define DMA_ORRC_MAX_CNT (SZ_32 - 1)
+#define DMA_DFT_POLL_CNT SZ_4
+#define DMA_DFT_BURST_V22 SZ_2
+#define DMA_BURSTL_8DW SZ_8
+#define DMA_BURSTL_16DW SZ_16
+#define DMA_BURSTL_32DW SZ_32
+#define DMA_DFT_BURST DMA_BURSTL_16DW
+#define DMA_MAX_DESC_NUM (SZ_8K - 1)
+#define DMA_CHAN_BOFF_MAX (SZ_256 - 1)
+#define DMA_DFT_ENDIAN 0
+
+#define DMA_DFT_DESC_TCNT 50
+#define DMA_HDR_LEN_MAX (SZ_16K - 1)
+
+/* DMA flags */
+#define DMA_TX_CH BIT(0)
+#define DMA_RX_CH BIT(1)
+#define DEVICE_ALLOC_DESC BIT(2)
+#define CHAN_IN_USE BIT(3)
+#define DMA_HW_DESC BIT(4)
+
+/* Descriptor fields */
+#define DESC_DATA_LEN GENMASK(15, 0)
+#define DESC_BYTE_OFF GENMASK(25, 23)
+#define DESC_EOP BIT(28)
+#define DESC_SOP BIT(29)
+#define DESC_C BIT(30)
+#define DESC_OWN BIT(31)
+
+#define DMA_CHAN_RST 1
+#define DMA_MAX_SIZE (BIT(16) - 1)
+#define MAX_LOWER_CHANS 32
+#define MASK_LOWER_CHANS GENMASK(4, 0)
+#define DMA_OWN 1
+#define HIGH_4_BITS GENMASK(3, 0)
+#define DMA_DFT_DESC_NUM 1
+#define DMA_PKT_DROP_DIS 0
+
+enum ldma_chan_on_off {
+ DMA_CH_OFF = 0,
+ DMA_CH_ON = 1,
+};
+
+enum {
+ DMA_TYPE_TX = 0,
+ DMA_TYPE_RX,
+ DMA_TYPE_MCPY,
+};
+
+struct ldma_dev;
+struct ldma_port;
+
+struct ldma_chan {
+ struct virt_dma_chan vchan;
+ struct ldma_port *port; /* back pointer */
+ char name[8]; /* Channel name */
+ int nr; /* Channel id in hardware */
+ u32 flags; /* central way or channel based way */
+ enum ldma_chan_on_off onoff;
+ dma_addr_t desc_phys;
+ void *desc_base; /* Virtual address */
+ u32 desc_cnt; /* Number of descriptors */
+ int rst;
+ u32 hdrm_len;
+ bool hdrm_csum;
+ u32 boff_len;
+ u32 data_endian;
+ u32 desc_endian;
+ bool pden;
+ bool desc_rx_np;
+ bool data_endian_en;
+ bool desc_endian_en;
+ bool abc_en;
+ bool desc_init;
+ struct dma_pool *desc_pool; /* Descriptors pool */
+ u32 desc_num;
+ struct dw2_desc_sw *ds;
+ struct work_struct work;
+ struct dma_slave_config config;
+};
+
+struct ldma_port {
+ struct ldma_dev *ldev; /* back pointer */
+ u32 portid;
+ u32 rxbl;
+ u32 txbl;
+ u32 rxendi;
+ u32 txendi;
+ u32 pkt_drop;
+};
+
+/* Instance specific data */
+struct ldma_inst_data {
+ bool desc_in_sram;
+ bool chan_fc;
+ bool desc_fod; /* Fetch On Demand */
+ bool valid_desc_fetch_ack;
+ u32 orrc; /* Outstanding read count */
+ const char *name;
+ u32 type;
+};
+
+struct ldma_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rst;
+ struct clk *core_clk;
+ struct dma_device dma_dev;
+ u32 ver;
+ int irq;
+ struct ldma_port *ports;
+ struct ldma_chan *chans; /* channel list on this DMA or port */
+ spinlock_t dev_lock; /* Controller register exclusive */
+ u32 chan_nrs;
+ u32 port_nrs;
+ u32 channels_mask;
+ u32 flags;
+ u32 pollcnt;
+ const struct ldma_inst_data *inst;
+ struct workqueue_struct *wq;
+};
+
+struct dw2_desc {
+ u32 field;
+ u32 addr;
+} __packed __aligned(8);
+
+struct dw2_desc_sw {
+ struct virt_dma_desc vdesc;
+ struct ldma_chan *chan;
+ dma_addr_t desc_phys;
+ size_t desc_cnt;
+ size_t size;
+ struct dw2_desc *desc_hw;
+};
+
+static inline void
+ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
+{
+ u32 old_val, new_val;
+
+ old_val = readl(d->base + ofs);
+ new_val = (old_val & ~mask) | (val & mask);
+
+ if (new_val != old_val)
+ writel(new_val, d->base + ofs);
+}
+
+static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ldma_chan, vchan.chan);
+}
+
+static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
+{
+ return container_of(dma_dev, struct ldma_dev, dma_dev);
+}
+
+static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct dw2_desc_sw, vdesc);
+}
+
+static inline bool ldma_chan_tx(struct ldma_chan *c)
+{
+ return !!(c->flags & DMA_TX_CH);
+}
+
+static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
+{
+ return !!(c->flags & DMA_HW_DESC);
+}
+
+static void ldma_dev_reset(struct ldma_dev *d)
+
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_PKTARB;
+ u32 val = enable ? DMA_CTRL_PKTARB : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_DSRAM_PATH;
+ u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_TX)
+ return;
+
+ mask = DMA_CTRL_CH_FL;
+ val = enable ? DMA_CTRL_CH_FL : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_global_polling_enable(struct ldma_dev *d)
+{
+ unsigned long flags;
+ u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;
+ u32 val = DMA_CPOLL_EN;
+
+ val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt);
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CPOLL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type == DMA_TYPE_MCPY)
+ return;
+
+ mask = DMA_CTRL_DS_FOD;
+ val = enable ? DMA_CTRL_DS_FOD : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_ENBE;
+ u32 val = enable ? DMA_CTRL_ENBE : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_orrc_cfg(struct ldma_dev *d)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 mask;
+
+ if (d->inst->type == DMA_TYPE_RX)
+ return;
+
+ mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
+ if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
+ val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_ORRC);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt)
+{
+ u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31;
+ unsigned long flags;
+ u32 val;
+
+ if (enable)
+ val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt);
+ else
+ val = 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
+ return;
+
+ mask = DMA_CTRL_DBURST_WR;
+ val = enable ? DMA_CTRL_DBURST_WR : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_TX)
+ return;
+
+ mask = DMA_CTRL_VLD_DF_ACK;
+ val = enable ? DMA_CTRL_VLD_DF_ACK : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_DRB;
+ u32 val = enable ? DMA_CTRL_DRB : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static int ldma_dev_cfg(struct ldma_dev *d)
+{
+ bool enable;
+
+ ldma_dev_pkt_arb_cfg(d, true);
+ ldma_dev_global_polling_enable(d);
+
+ enable = !!(d->flags & DMA_DFT_DRB);
+ ldma_dev_drb_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_EN_BYTE_EN);
+ ldma_dev_byte_enable_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_CHAN_FLOW_CTL);
+ ldma_dev_chan_flow_ctl_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DESC_FOD);
+ ldma_dev_desc_fetch_on_demand_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DESC_IN_SRAM);
+ ldma_dev_sram_desc_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DBURST_WR);
+ ldma_dev_dburst_wr_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK);
+ ldma_dev_vld_fetch_ack_cfg(d, enable);
+
+ if (d->ver > DMA_VER22) {
+ ldma_dev_orrc_cfg(d);
+ ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT);
+ }
+
+ dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
+ d->inst->name, readl(d->base + DMA_CTRL));
+
+ return 0;
+}
+
+static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 class_low, class_high;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ reg = readl(d->base + DMA_CCTRL);
+ /* Read from hardware */
+ if (reg & DMA_CCTRL_DIR_TX)
+ c->flags |= DMA_TX_CH;
+ else
+ c->flags |= DMA_RX_CH;
+
+ /* Keep the class value unchanged */
+ class_low = FIELD_GET(DMA_CCTRL_CLASS, reg);
+ class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg);
+ val &= ~DMA_CCTRL_CLASS;
+ val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low);
+ val &= ~DMA_CCTRL_CLASSH;
+ val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high);
+ writel(val, d->base + DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ return 0;
+}
+
+static void ldma_chan_irq_init(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 enofs, crofs;
+ u32 cn_bit;
+
+ if (c->nr < MAX_LOWER_CHANS) {
+ enofs = DMA_IRNEN;
+ crofs = DMA_IRNCR;
+ } else {
+ enofs = DMA_IRNEN1;
+ crofs = DMA_IRNCR1;
+ }
+
+ cn_bit = BIT(c->nr & MASK_LOWER_CHANS);
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+
+ /* Clear all interrupts and disabled it */
+ writel(0, d->base + DMA_CIE);
+ writel(DMA_CI_ALL, d->base + DMA_CIS);
+
+ ldma_update_bits(d, cn_bit, 0, enofs);
+ writel(cn_bit, d->base + crofs);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 class_val;
+
+ if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
+ return;
+
+ /* 3 bits low */
+ class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7);
+ /* 2 bits high */
+ class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3);
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val,
+ DMA_CCTRL);
+}
+
+static int ldma_chan_on(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ /* If descriptors not configured, not allow to turn on channel */
+ if (WARN_ON(!c->desc_init))
+ return -EINVAL;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ c->onoff = DMA_CH_ON;
+
+ return 0;
+}
+
+static int ldma_chan_off(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
+ !(val & DMA_CCTRL_ON), 0, 10000);
+ if (ret)
+ return ret;
+
+ c->onoff = DMA_CH_OFF;
+
+ return 0;
+}
+
+static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
+ int desc_num)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ writel(lower_32_bits(desc_base), d->base + DMA_CDBA);
+
+ /* Higher 4 bits of 36 bit addressing */
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS;
+
+ ldma_update_bits(d, DMA_CDBA_MSB,
+ FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
+ }
+ writel(desc_num, d->base + DMA_CDLEN);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ c->desc_init = true;
+}
+
+static struct dma_async_tx_descriptor *
+ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ struct dma_async_tx_descriptor *tx;
+ struct dw2_desc_sw *ds;
+
+ if (!desc_num) {
+ dev_err(d->dev, "Channel %d must allocate descriptor first\n",
+ c->nr);
+ return NULL;
+ }
+
+ if (desc_num > DMA_MAX_DESC_NUM) {
+ dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
+ c->nr, desc_num);
+ return NULL;
+ }
+
+ ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
+
+ c->flags |= DMA_HW_DESC;
+ c->desc_cnt = desc_num;
+ c->desc_phys = desc_base;
+
+ ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+ if (!ds)
+ return NULL;
+
+ tx = &ds->vdesc.tx;
+ dma_async_tx_descriptor_init(tx, chan);
+
+ return tx;
+}
+
+static int ldma_chan_reset(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = ldma_chan_off(c);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
+ !(val & DMA_CCTRL_RST), 0, 10000);
+ if (ret)
+ return ret;
+
+ c->rst = 1;
+ c->desc_init = false;
+
+ return 0;
+}
+
+static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
+ u32 val;
+
+ if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX)
+ val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN;
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_BOFF);
+}
+
+static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
+ u32 endian_type)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
+ u32 val;
+
+ if (enable)
+ val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type);
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
+}
+
+static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
+ u32 endian_type)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
+ u32 val;
+
+ if (enable)
+ val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type);
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
+}
+
+static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ /* NB, csum disabled, hdr length must be provided */
+ if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX))
+ return;
+
+ mask = DMA_C_HDRM_HDR_SUM;
+ val = DMA_C_HDRM_HDR_SUM;
+
+ if (!csum && hdr_len)
+ val = hdr_len;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_HDRM);
+}
+
+static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ /* Only valid for RX channel */
+ if (ldma_chan_tx(c))
+ return;
+
+ mask = DMA_CCTRL_WR_NP_EN;
+ val = enable ? DMA_CCTRL_WR_NP_EN : 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_CCTRL);
+}
+
+static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ if (d->ver < DMA_VER32 || ldma_chan_tx(c))
+ return;
+
+ mask = DMA_CCTRL_CH_ABC;
+ val = enable ? DMA_CCTRL_CH_ABC : 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_CCTRL);
+}
+
+static int ldma_port_cfg(struct ldma_port *p)
+{
+ unsigned long flags;
+ struct ldma_dev *d;
+ u32 reg;
+
+ d = p->ldev;
+ reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi);
+ reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi);
+
+ if (d->ver == DMA_VER22) {
+ reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl);
+ reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl);
+ } else {
+ reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop);
+
+ if (p->txbl == DMA_BURSTL_32DW)
+ reg |= DMA_PCTRL_TXBL32;
+ else if (p->txbl == DMA_BURSTL_16DW)
+ reg |= DMA_PCTRL_TXBL16;
+ else
+ reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8);
+
+ if (p->rxbl == DMA_BURSTL_32DW)
+ reg |= DMA_PCTRL_RXBL32;
+ else if (p->rxbl == DMA_BURSTL_16DW)
+ reg |= DMA_PCTRL_RXBL16;
+ else
+ reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8);
+ }
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ writel(p->portid, d->base + DMA_PS);
+ writel(reg, d->base + DMA_PCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ reg = readl(d->base + DMA_PCTRL); /* read back */
+ dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg);
+
+ return 0;
+}
+
+static int ldma_chan_cfg(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 reg;
+
+ reg = c->pden ? DMA_CCTRL_PDEN : 0;
+ reg |= c->onoff ? DMA_CCTRL_ON : 0;
+ reg |= c->rst ? DMA_CCTRL_RST : 0;
+
+ ldma_chan_cctrl_cfg(c, reg);
+ ldma_chan_irq_init(c);
+
+ if (d->ver <= DMA_VER22)
+ return 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_chan_set_class(c, c->nr);
+ ldma_chan_byte_offset_cfg(c, c->boff_len);
+ ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
+ ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
+ ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
+ ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
+ ldma_chan_abc_cfg(c, c->abc_en);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ if (ldma_chan_is_hw_desc(c))
+ ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
+
+ return 0;
+}
+
+static void ldma_dev_init(struct ldma_dev *d)
+{
+ unsigned long ch_mask = (unsigned long)d->channels_mask;
+ struct ldma_port *p;
+ struct ldma_chan *c;
+ int i;
+ u32 j;
+
+ spin_lock_init(&d->dev_lock);
+ ldma_dev_reset(d);
+ ldma_dev_cfg(d);
+
+ /* DMA port initialization */
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ ldma_port_cfg(p);
+ }
+
+ /* DMA channel initialization */
+ for_each_set_bit(j, &ch_mask, d->chan_nrs) {
+ c = &d->chans[j];
+ ldma_chan_cfg(c);
+ }
+}
+
+static int ldma_cfg_init(struct ldma_dev *d)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(d->dev);
+ struct ldma_port *p;
+ int i;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
+ d->flags |= DMA_EN_BYTE_EN;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr"))
+ d->flags |= DMA_DBURST_WR;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
+ d->flags |= DMA_DFT_DRB;
+
+ if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
+ &d->pollcnt))
+ d->pollcnt = DMA_DFT_POLL_CNT;
+
+ if (d->inst->chan_fc)
+ d->flags |= DMA_CHAN_FLOW_CTL;
+
+ if (d->inst->desc_fod)
+ d->flags |= DMA_DESC_FOD;
+
+ if (d->inst->desc_in_sram)
+ d->flags |= DMA_DESC_IN_SRAM;
+
+ if (d->inst->valid_desc_fetch_ack)
+ d->flags |= DMA_VALID_DESC_FETCH_ACK;
+
+ if (d->ver > DMA_VER22) {
+ if (!d->port_nrs)
+ return -EINVAL;
+
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ p->rxendi = DMA_DFT_ENDIAN;
+ p->txendi = DMA_DFT_ENDIAN;
+ p->rxbl = DMA_DFT_BURST;
+ p->txbl = DMA_DFT_BURST;
+ p->pkt_drop = DMA_PKT_DROP_DIS;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
+{
+ struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
+ struct ldma_chan *c = ds->chan;
+
+ dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
+ kfree(ds);
+}
+
+static struct dw2_desc_sw *
+dma_alloc_desc_resource(int num, struct ldma_chan *c)
+{
+ struct device *dev = c->vchan.chan.device->dev;
+ struct dw2_desc_sw *ds;
+
+ if (num > c->desc_num) {
+ dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
+ return NULL;
+ }
+
+ ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+ if (!ds)
+ return NULL;
+
+ ds->chan = c;
+ ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
+ &ds->desc_phys);
+ if (!ds->desc_hw) {
+ dev_dbg(dev, "out of memory for link descriptor\n");
+ kfree(ds);
+ return NULL;
+ }
+ ds->desc_cnt = num;
+
+ return ds;
+}
+
+static void ldma_chan_irq_en(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ writel(c->nr, d->base + DMA_CS);
+ writel(DMA_CI_EOP, d->base + DMA_CIE);
+ writel(BIT(c->nr), d->base + DMA_IRNEN);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_issue_pending(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ if (d->ver == DMA_VER22) {
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ if (vchan_issue_pending(&c->vchan)) {
+ struct virt_dma_desc *vdesc;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&c->vchan);
+ if (!vdesc) {
+ c->ds = NULL;
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ return;
+ }
+ list_del(&vdesc->node);
+ c->ds = to_lgm_dma_desc(vdesc);
+ ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
+ ldma_chan_irq_en(c);
+ }
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ }
+ ldma_chan_on(c);
+}
+
+static void ldma_synchronize(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ /*
+ * clear any pending work if any. In that
+ * case the resource needs to be free here.
+ */
+ cancel_work_sync(&c->work);
+ vchan_synchronize(&c->vchan);
+ if (c->ds)
+ dma_free_desc_resource(&c->ds->vdesc);
+}
+
+static int ldma_terminate_all(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ vchan_get_all_descriptors(&c->vchan, &head);
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ vchan_dma_desc_free_list(&c->vchan, &head);
+
+ return ldma_chan_reset(c);
+}
+
+static int ldma_resume_chan(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ ldma_chan_on(c);
+
+ return 0;
+}
+
+static int ldma_pause_chan(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ return ldma_chan_off(c);
+}
+
+static enum dma_status
+ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ enum dma_status status = DMA_COMPLETE;
+
+ if (d->ver == DMA_VER22)
+ status = dma_cookie_status(chan, cookie, txstate);
+
+ return status;
+}
+
+static void dma_chan_irq(int irq, void *data)
+{
+ struct ldma_chan *c = data;
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 stat;
+
+ /* Disable channel interrupts */
+ writel(c->nr, d->base + DMA_CS);
+ stat = readl(d->base + DMA_CIS);
+ if (!stat)
+ return;
+
+ writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
+ writel(stat, d->base + DMA_CIS);
+ queue_work(d->wq, &c->work);
+}
+
+static irqreturn_t dma_interrupt(int irq, void *dev_id)
+{
+ struct ldma_dev *d = dev_id;
+ struct ldma_chan *c;
+ unsigned long irncr;
+ u32 cid;
+
+ irncr = readl(d->base + DMA_IRNCR);
+ if (!irncr) {
+ dev_err(d->dev, "dummy interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(cid, &irncr, d->chan_nrs) {
+ /* Mask */
+ writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
+ /* Ack */
+ writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
+
+ c = &d->chans[cid];
+ dma_chan_irq(irq, c);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void prep_slave_burst_len(struct ldma_chan *c)
+{
+ struct ldma_port *p = c->port;
+ struct dma_slave_config *cfg = &c->config;
+
+ if (cfg->dst_maxburst)
+ cfg->src_maxburst = cfg->dst_maxburst;
+
+ /* TX and RX has the same burst length */
+ p->txbl = ilog2(cfg->src_maxburst);
+ p->rxbl = p->txbl;
+}
+
+static struct dma_async_tx_descriptor *
+ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ size_t len, avail, total = 0;
+ struct dw2_desc *hw_ds;
+ struct dw2_desc_sw *ds;
+ struct scatterlist *sg;
+ int num = sglen, i;
+ dma_addr_t addr;
+
+ if (!sgl)
+ return NULL;
+
+ if (d->ver > DMA_VER22)
+ return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = dma_alloc_desc_resource(num, c);
+ if (!ds)
+ return NULL;
+
+ c->ds = ds;
+
+ num = 0;
+ /* sop and eop has to be handled nicely */
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ hw_ds = &ds->desc_hw[num];
+ switch (sglen) {
+ case 1:
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+ break;
+ default:
+ if (num == 0) {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+ } else if (num == (sglen - 1)) {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+ } else {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+ }
+ break;
+ }
+ /* Only 32 bit address supported */
+ hw_ds->addr = (u32)addr;
+
+ hw_ds->field &= ~DESC_DATA_LEN;
+ hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
+
+ hw_ds->field &= ~DESC_C;
+ hw_ds->field |= FIELD_PREP(DESC_C, 0);
+
+ hw_ds->field &= ~DESC_BYTE_OFF;
+ hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
+
+ /* Ensure data ready before ownership change */
+ wmb();
+ hw_ds->field &= ~DESC_OWN;
+ hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
+
+ /* Ensure ownership changed before moving forward */
+ wmb();
+ num++;
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->size = total;
+ prep_slave_burst_len(c);
+
+ return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
+}
+
+static int
+ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ memcpy(&c->config, cfg, sizeof(c->config));
+
+ return 0;
+}
+
+static int ldma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ struct device *dev = c->vchan.chan.device->dev;
+ size_t desc_sz;
+
+ if (d->ver > DMA_VER22) {
+ c->flags |= CHAN_IN_USE;
+ return 0;
+ }
+
+ if (c->desc_pool)
+ return c->desc_num;
+
+ desc_sz = c->desc_num * sizeof(struct dw2_desc);
+ c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
+ __alignof__(struct dw2_desc), 0);
+
+ if (!c->desc_pool) {
+ dev_err(dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return c->desc_num;
+}
+
+static void ldma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+
+ if (d->ver == DMA_VER22) {
+ dma_pool_destroy(c->desc_pool);
+ c->desc_pool = NULL;
+ vchan_free_chan_resources(to_virt_chan(chan));
+ ldma_chan_reset(c);
+ } else {
+ c->flags &= ~CHAN_IN_USE;
+ }
+}
+
+static void dma_work(struct work_struct *work)
+{
+ struct ldma_chan *c = container_of(work, struct ldma_chan, work);
+ struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
+ struct virt_dma_chan *vc = &c->vchan;
+ struct dmaengine_desc_callback cb;
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ dmaengine_desc_get_callback(tx, &cb);
+ dma_cookie_complete(tx);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ dmaengine_desc_get_callback(tx, &cb);
+ dma_cookie_complete(tx);
+ list_del(&vd->node);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ vchan_vdesc_fini(vd);
+ }
+ c->ds = NULL;
+}
+
+static void
+update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
+{
+ if (ldma_chan_tx(c))
+ p->txbl = ilog2(burst);
+ else
+ p->rxbl = ilog2(burst);
+}
+
+static void
+update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
+{
+ if (ldma_chan_tx(c))
+ p->txbl = burst;
+ else
+ p->rxbl = burst;
+}
+
+static int
+update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
+{
+ struct ldma_dev *d = ofdma->of_dma_data;
+ u32 chan_id = spec->args[0];
+ u32 port_id = spec->args[1];
+ u32 burst = spec->args[2];
+ struct ldma_port *p;
+ struct ldma_chan *c;
+
+ if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
+ return 0;
+
+ p = &d->ports[port_id];
+ c = &d->chans[chan_id];
+ c->port = p;
+
+ if (d->ver == DMA_VER22)
+ update_burst_len_v22(c, p, burst);
+ else
+ update_burst_len_v3X(c, p, burst);
+
+ ldma_port_cfg(p);
+
+ return 1;
+}
+
+static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
+ struct of_dma *ofdma)
+{
+ struct ldma_dev *d = ofdma->of_dma_data;
+ u32 chan_id = spec->args[0];
+ int ret;
+
+ if (!spec->args_count)
+ return NULL;
+
+ /* if args_count is 1 driver use default settings */
+ if (spec->args_count > 1) {
+ ret = update_client_configs(ofdma, spec);
+ if (!ret)
+ return NULL;
+ }
+
+ return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
+}
+
+static void ldma_dma_init_v22(int i, struct ldma_dev *d)
+{
+ struct ldma_chan *c;
+
+ c = &d->chans[i];
+ c->nr = i; /* Real channel number */
+ c->rst = DMA_CHAN_RST;
+ c->desc_num = DMA_DFT_DESC_NUM;
+ snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
+ INIT_WORK(&c->work, dma_work);
+ c->vchan.desc_free = dma_free_desc_resource;
+ vchan_init(&c->vchan, &d->dma_dev);
+}
+
+static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
+{
+ struct ldma_chan *c;
+
+ c = &d->chans[i];
+ c->data_endian = DMA_DFT_ENDIAN;
+ c->desc_endian = DMA_DFT_ENDIAN;
+ c->data_endian_en = false;
+ c->desc_endian_en = false;
+ c->desc_rx_np = false;
+ c->flags |= DEVICE_ALLOC_DESC;
+ c->onoff = DMA_CH_OFF;
+ c->rst = DMA_CHAN_RST;
+ c->abc_en = true;
+ c->hdrm_csum = false;
+ c->boff_len = 0;
+ c->nr = i;
+ c->vchan.desc_free = dma_free_desc_resource;
+ vchan_init(&c->vchan, &d->dma_dev);
+}
+
+static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
+{
+ int ret;
+
+ ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
+ if (ret < 0) {
+ dev_err(d->dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ d->irq = platform_get_irq(pdev, 0);
+ if (d->irq < 0)
+ return d->irq;
+
+ ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
+ DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI);
+ if (!d->wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ldma_clk_disable(void *data)
+{
+ struct ldma_dev *d = data;
+
+ clk_disable_unprepare(d->core_clk);
+ reset_control_assert(d->rst);
+}
+
+static const struct ldma_inst_data dma0 = {
+ .name = "dma0",
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = false,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data dma2tx = {
+ .name = "dma2tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma1rx = {
+ .name = "dma1rx",
+ .type = DMA_TYPE_RX,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data dma1tx = {
+ .name = "dma1tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma0tx = {
+ .name = "dma0tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma3 = {
+ .name = "dma3",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data toe_dma30 = {
+ .name = "toe_dma30",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data toe_dma31 = {
+ .name = "toe_dma31",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct of_device_id intel_ldma_match[] = {
+ { .compatible = "intel,lgm-cdma", .data = &dma0},
+ { .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
+ { .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
+ { .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
+ { .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
+ { .compatible = "intel,lgm-dma3", .data = &dma3},
+ { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
+ { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
+ {}
+};
+
+static int intel_ldma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dma_device *dma_dev;
+ unsigned long ch_mask;
+ struct ldma_chan *c;
+ struct ldma_port *p;
+ struct ldma_dev *d;
+ u32 id, bitn = 32, j;
+ int i, ret;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ /* Link controller to platform device */
+ d->dev = &pdev->dev;
+
+ d->inst = device_get_match_data(dev);
+ if (!d->inst) {
+ dev_err(dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ d->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ /* Power up and reset the dma engine, some DMAs always on?? */
+ d->core_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(d->core_clk))
+ return PTR_ERR(d->core_clk);
+ clk_prepare_enable(d->core_clk);
+
+ d->rst = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(d->rst))
+ return PTR_ERR(d->rst);
+ reset_control_deassert(d->rst);
+
+ ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
+ if (ret) {
+ dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
+ return ret;
+ }
+
+ id = readl(d->base + DMA_ID);
+ d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id);
+ d->port_nrs = FIELD_GET(DMA_ID_PNR, id);
+ d->ver = FIELD_GET(DMA_ID_REV, id);
+
+ if (id & DMA_ID_AW_36B)
+ d->flags |= DMA_ADDR_36BIT;
+
+ if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B))
+ bitn = 36;
+
+ if (id & DMA_ID_DW_128B)
+ d->flags |= DMA_DATA_128BIT;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn));
+ if (ret) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return ret;
+ }
+
+ if (d->ver == DMA_VER22) {
+ ret = ldma_init_v22(d, pdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
+ if (ret < 0)
+ d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
+
+ dma_dev = &d->dma_dev;
+
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ /* Channel initializations */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Port Initializations */
+ d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
+ if (!d->ports)
+ return -ENOMEM;
+
+ /* Channels Initializations */
+ d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
+ if (!d->chans)
+ return -ENOMEM;
+
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ p->portid = i;
+ p->ldev = d;
+ }
+
+ ret = ldma_cfg_init(d);
+ if (ret)
+ return ret;
+
+ dma_dev->dev = &pdev->dev;
+
+ ch_mask = (unsigned long)d->channels_mask;
+ for_each_set_bit(j, &ch_mask, d->chan_nrs) {
+ if (d->ver == DMA_VER22)
+ ldma_dma_init_v22(j, d);
+ else
+ ldma_dma_init_v3X(j, d);
+ }
+
+ dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = ldma_free_chan_resources;
+ dma_dev->device_terminate_all = ldma_terminate_all;
+ dma_dev->device_issue_pending = ldma_issue_pending;
+ dma_dev->device_tx_status = ldma_tx_status;
+ dma_dev->device_resume = ldma_resume_chan;
+ dma_dev->device_pause = ldma_pause_chan;
+ dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
+
+ if (d->ver == DMA_VER22) {
+ dma_dev->device_config = ldma_slave_config;
+ dma_dev->device_synchronize = ldma_synchronize;
+ dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_DEV_TO_MEM);
+ dma_dev->residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ }
+
+ platform_set_drvdata(pdev, d);
+
+ ldma_dev_init(d);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register slave DMA engine device\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d);
+ if (ret) {
+ dev_err(dev, "Failed to register of DMA controller\n");
+ dma_async_device_unregister(dma_dev);
+ return ret;
+ }
+
+ dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver,
+ d->port_nrs, d->chan_nrs);
+
+ return 0;
+}
+
+static struct platform_driver intel_ldma_driver = {
+ .probe = intel_ldma_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = intel_ldma_match,
+ },
+};
+
+/*
+ * Perform this driver as device_initcall to make sure initialization happens
+ * before its DMA clients of some are platform specific and also to provide
+ * registered DMA channels and DMA capabilities to clients before their
+ * initialization.
+ */
+static int __init intel_ldma_init(void)
+{
+ return platform_driver_register(&intel_ldma_driver);
+}
+
+device_initcall(intel_ldma_init);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 584c931e807a..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b84303be8edf..89f1814ff27a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,6 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of.h>
-#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -1148,19 +1147,6 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
-
- if (chan->device->dev->driver != &mmp_pdma_driver.driver)
- return false;
-
- c->drcmr = *(unsigned int *)param;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
-
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 9fede32641e9..1f0bbaed4643 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1080,8 +1080,9 @@ static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
}
static const struct of_device_id owl_dma_match[] = {
- { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
+ { .compatible = "actions,s500-dma", .data = (void *)S900_DMA,},
{ .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
+ { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, owl_dma_match);
@@ -1245,6 +1246,7 @@ static int owl_dma_remove(struct platform_device *pdev)
owl_dma_free(od);
clk_disable_unprepare(od->clk);
+ dma_pool_destroy(od->lli_pool);
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bc0f66af0f11..fd8d2bc3be9f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3195,7 +3195,7 @@ probe_err2:
return ret;
}
-static int pl330_remove(struct amba_device *adev)
+static void pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
@@ -3235,7 +3235,6 @@ static int pl330_remove(struct amba_device *adev)
if (pl330->rstc)
reset_control_assert(pl330->rstc);
- return 0;
}
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5773d474d8f..c8a77b428b52 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
@@ -1274,13 +1270,13 @@ static int bam_dma_probe(struct platform_device *pdev)
dev_err(bdev->dev, "num-ees unspecified in dt\n");
}
- bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
- if (IS_ERR(bdev->bamclk)) {
- if (!bdev->controlled_remotely)
- return PTR_ERR(bdev->bamclk);
+ if (bdev->controlled_remotely)
+ bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
+ else
+ bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
- bdev->bamclk = NULL;
- }
+ if (IS_ERR(bdev->bamclk))
+ return PTR_ERR(bdev->bamclk);
ret = clk_prepare_enable(bdev->bamclk);
if (ret) {
@@ -1354,7 +1350,7 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
- if (bdev->controlled_remotely) {
+ if (!bdev->bamclk) {
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1442,10 +1438,10 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
{
struct bam_device *bdev = dev_get_drvdata(dev);
- if (!bdev->controlled_remotely)
+ if (bdev->bamclk) {
pm_runtime_force_suspend(dev);
-
- clk_unprepare(bdev->bamclk);
+ clk_unprepare(bdev->bamclk);
+ }
return 0;
}
@@ -1455,12 +1451,13 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
struct bam_device *bdev = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare(bdev->bamclk);
- if (ret)
- return ret;
+ if (bdev->bamclk) {
+ ret = clk_prepare(bdev->bamclk);
+ if (ret)
+ return ret;
- if (!bdev->controlled_remotely)
pm_runtime_force_resume(dev);
+ }
return 0;
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index d2334f535de2..57f5ee4235c7 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
gpi_write_reg(gpii, addr, val);
}
-static inline void
+static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
void __iomem *addr = gpii->regs + offset;
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
len = 1 << bit;
ring->alloc_size = (len + (len - 1));
dev_dbg(gpii->gpi_dev->dev,
- "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
elements, el_size, (elements * el_size), len,
ring->alloc_size);
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
ring->alloc_size,
&ring->dma_handle, GFP_KERNEL);
if (!ring->pre_aligned) {
- dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
ring->alloc_size);
return -ENOMEM;
}
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
smp_wmb();
dev_dbg(gpii->gpi_dev->dev,
- "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
- ring->dma_handle, ring->phys_addr, ring->len,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
ring->el_size, ring->elements);
return 0;
@@ -1700,7 +1700,7 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
- };
+ }
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
return ret;
error_start_chan:
- for (i = i - 1; i >= 0; i++) {
+ for (i = i - 1; i >= 0; i--) {
gpi_stop_chan(&gpii->gchan[i]);
gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
}
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index a57705356e8b..d530c1bf11d9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -189,7 +189,8 @@ struct rcar_dmac_chan {
* struct rcar_dmac - R-Car Gen2 DMA Controller
* @engine: base DMA engine object
* @dev: the hardware device
- * @iomem: remapped I/O memory base
+ * @dmac_base: remapped base register block
+ * @chan_base: remapped channel register block (optional)
* @n_channels: number of available channels
* @channels: array of DMAC channels
* @channels_mask: bitfield of which DMA channels are managed by this driver
@@ -198,7 +199,8 @@ struct rcar_dmac_chan {
struct rcar_dmac {
struct dma_device engine;
struct device *dev;
- void __iomem *iomem;
+ void __iomem *dmac_base;
+ void __iomem *chan_base;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -209,6 +211,10 @@ struct rcar_dmac {
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+#define for_each_rcar_dmac_chan(i, dmac, chan) \
+ for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
+ if (!((dmac)->channels_mask & BIT(i))) continue; else
+
/*
* struct rcar_dmac_of_data - This driver's OF data
* @chan_offset_base: DMAC channels base offset
@@ -230,7 +236,7 @@ struct rcar_dmac_of_data {
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
#define RCAR_DMAOR_AE (1 << 2)
#define RCAR_DMAOR_DME (1 << 0)
-#define RCAR_DMACHCLR 0x0080
+#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
#define RCAR_DMADPSEC 0x00a0
#define RCAR_DMASAR 0x0000
@@ -293,6 +299,9 @@ struct rcar_dmac_of_data {
#define RCAR_DMAFIXDAR 0x0014
#define RCAR_DMAFIXDPBASE 0x0060
+/* For R-Car V3U */
+#define RCAR_V3U_DMACHCLR 0x0100
+
/* Hardcode the MEMCPY transfer size to 4 bytes. */
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
@@ -303,17 +312,17 @@ struct rcar_dmac_of_data {
static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
{
if (reg == RCAR_DMAOR)
- writew(data, dmac->iomem + reg);
+ writew(data, dmac->dmac_base + reg);
else
- writel(data, dmac->iomem + reg);
+ writel(data, dmac->dmac_base + reg);
}
static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
{
if (reg == RCAR_DMAOR)
- return readw(dmac->iomem + reg);
+ return readw(dmac->dmac_base + reg);
else
- return readl(dmac->iomem + reg);
+ return readl(dmac->dmac_base + reg);
}
static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
@@ -332,6 +341,28 @@ static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
writel(data, chan->iomem + reg);
}
+static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
+ struct rcar_dmac_chan *chan)
+{
+ if (dmac->chan_base)
+ rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ else
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
+}
+
+static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
+{
+ struct rcar_dmac_chan *chan;
+ unsigned int i;
+
+ if (dmac->chan_base) {
+ for_each_rcar_dmac_chan(i, dmac, chan)
+ rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ } else {
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ }
+}
+
/* -----------------------------------------------------------------------------
* Initialization and configuration
*/
@@ -447,7 +478,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ rcar_dmac_chan_clear_all(dmac);
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
@@ -817,15 +848,11 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
{
+ struct rcar_dmac_chan *chan;
unsigned int i;
/* Stop all channels. */
- for (i = 0; i < dmac->n_channels; ++i) {
- struct rcar_dmac_chan *chan = &dmac->channels[i];
-
- if (!(dmac->channels_mask & BIT(i)))
- continue;
-
+ for_each_rcar_dmac_chan(i, dmac, chan) {
/* Stop and reinitialize the channel. */
spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
@@ -1566,7 +1593,7 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
* because channel is already stopped in error case.
* We need to clear register and check DE bit as recovery.
*/
- rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
+ rcar_dmac_chan_clear(dmac, chan);
rcar_dmac_chcr_de_barrier(chan);
reinit = true;
goto spin_lock_end;
@@ -1732,9 +1759,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
*/
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
- struct rcar_dmac_chan *rchan,
- const struct rcar_dmac_of_data *data,
- unsigned int index)
+ struct rcar_dmac_chan *rchan)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
struct dma_chan *chan = &rchan->chan;
@@ -1742,9 +1767,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
char *irqname;
int ret;
- rchan->index = index;
- rchan->iomem = dmac->iomem + data->chan_offset_base +
- data->chan_offset_stride * index;
rchan->mid_rid = -EINVAL;
spin_lock_init(&rchan->lock);
@@ -1756,13 +1778,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
INIT_LIST_HEAD(&rchan->desc.wait);
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ sprintf(pdev_irqname, "ch%u", rchan->index);
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (rchan->irq < 0)
return -ENODEV;
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
- dev_name(dmac->dev), index);
+ dev_name(dmac->dev), rchan->index);
if (!irqname)
return -ENOMEM;
@@ -1828,9 +1850,11 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+ const struct rcar_dmac_of_data *data;
+ struct rcar_dmac_chan *chan;
struct dma_device *engine;
+ void __iomem *chan_base;
struct rcar_dmac *dmac;
- const struct rcar_dmac_of_data *data;
unsigned int i;
int ret;
@@ -1868,9 +1892,24 @@ static int rcar_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dmac->iomem))
- return PTR_ERR(dmac->iomem);
+ dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->dmac_base))
+ return PTR_ERR(dmac->dmac_base);
+
+ if (!data->chan_offset_base) {
+ dmac->chan_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->chan_base))
+ return PTR_ERR(dmac->chan_base);
+
+ chan_base = dmac->chan_base;
+ } else {
+ chan_base = dmac->dmac_base + data->chan_offset_base;
+ }
+
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ chan->index = i;
+ chan->iomem = chan_base + i * data->chan_offset_stride;
+ }
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
@@ -1916,11 +1955,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->channels);
- for (i = 0; i < dmac->n_channels; ++i) {
- if (!(dmac->channels_mask & BIT(i)))
- continue;
-
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ ret = rcar_dmac_chan_probe(dmac, chan);
if (ret < 0)
goto error;
}
@@ -1968,14 +2004,22 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
}
static const struct rcar_dmac_of_data rcar_dmac_data = {
- .chan_offset_base = 0x8000,
- .chan_offset_stride = 0x80,
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
+static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
+ .chan_offset_base = 0x0,
+ .chan_offset_stride = 0x1000,
};
static const struct of_device_id rcar_dmac_of_ids[] = {
{
.compatible = "renesas,rcar-dmac",
.data = &rcar_dmac_data,
+ }, {
+ .compatible = "renesas,dmac-r8a779a0",
+ .data = &rcar_v3u_dmac_data,
},
{ /* Sentinel */ }
};
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
deleted file mode 100644
index a5c2843384fd..000000000000
--- a/drivers/dma/sirf-dma.c
+++ /dev/null
@@ -1,1170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DMA controller driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/of_dma.h>
-#include <linux/sirfsoc_dma.h>
-
-#include "dmaengine.h"
-
-#define SIRFSOC_DMA_VER_A7V1 1
-#define SIRFSOC_DMA_VER_A7V2 2
-#define SIRFSOC_DMA_VER_A6 4
-
-#define SIRFSOC_DMA_DESCRIPTORS 16
-#define SIRFSOC_DMA_CHANNELS 16
-#define SIRFSOC_DMA_TABLE_NUM 256
-
-#define SIRFSOC_DMA_CH_ADDR 0x00
-#define SIRFSOC_DMA_CH_XLEN 0x04
-#define SIRFSOC_DMA_CH_YLEN 0x08
-#define SIRFSOC_DMA_CH_CTRL 0x0C
-
-#define SIRFSOC_DMA_WIDTH_0 0x100
-#define SIRFSOC_DMA_CH_VALID 0x140
-#define SIRFSOC_DMA_CH_INT 0x144
-#define SIRFSOC_DMA_INT_EN 0x148
-#define SIRFSOC_DMA_INT_EN_CLR 0x14C
-#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
-#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154
-#define SIRFSOC_DMA_WIDTH_ATLAS7 0x10
-#define SIRFSOC_DMA_VALID_ATLAS7 0x14
-#define SIRFSOC_DMA_INT_ATLAS7 0x18
-#define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c
-#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20
-#define SIRFSOC_DMA_CUR_DATA_ADDR 0x34
-#define SIRFSOC_DMA_MUL_ATLAS7 0x38
-#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158
-#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C
-#define SIRFSOC_DMA_IOBG_SCMD_EN 0x800
-#define SIRFSOC_DMA_EARLY_RESP_SET 0x818
-#define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C
-
-#define SIRFSOC_DMA_MODE_CTRL_BIT 4
-#define SIRFSOC_DMA_DIR_CTRL_BIT 5
-#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2
-#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3
-#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4
-#define SIRFSOC_DMA_TAB_NUM_ATLAS7 7
-#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5
-#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25
-#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32
-
-#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0)
-#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1)
-#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2)
-#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3)
-#define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4)
-#define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5)
-#define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F
-
-/* xlen and dma_width register is in 4 bytes boundary */
-#define SIRFSOC_DMA_WORD_LEN 4
-#define SIRFSOC_DMA_XLEN_MAX_V1 0x800
-#define SIRFSOC_DMA_XLEN_MAX_V2 0x1000
-
-struct sirfsoc_dma_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
-
- /* SiRFprimaII 2D-DMA parameters */
-
- int xlen; /* DMA xlen */
- int ylen; /* DMA ylen */
- int width; /* DMA width */
- int dir;
- bool cyclic; /* is loop DMA? */
- bool chain; /* is chain DMA? */
- u32 addr; /* DMA buffer address */
- u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
-};
-
-struct sirfsoc_dma_chan {
- struct dma_chan chan;
- struct list_head free;
- struct list_head prepared;
- struct list_head queued;
- struct list_head active;
- struct list_head completed;
- unsigned long happened_cyclic;
- unsigned long completed_cyclic;
-
- /* Lock for this structure */
- spinlock_t lock;
-
- int mode;
-};
-
-struct sirfsoc_dma_regs {
- u32 ctrl[SIRFSOC_DMA_CHANNELS];
- u32 interrupt_en;
-};
-
-struct sirfsoc_dma {
- struct dma_device dma;
- struct tasklet_struct tasklet;
- struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
- void __iomem *base;
- int irq;
- struct clk *clk;
- int type;
- void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base);
- struct sirfsoc_dma_regs regs_save;
-};
-
-struct sirfsoc_dmadata {
- void (*exec)(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base);
- int type;
-};
-
-enum sirfsoc_dma_chain_flag {
- SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
- SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
- SIRFSOC_DMA_CHAIN_LOOP = 0x03,
- SIRFSOC_DMA_CHAIN_END = 0x04
-};
-
-#define DRV_NAME "sirfsoc_dma"
-
-static int sirfsoc_dma_runtime_suspend(struct device *dev);
-
-/* Convert struct dma_chan to struct sirfsoc_dma_chan */
-static inline
-struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct sirfsoc_dma_chan, chan);
-}
-
-/* Convert struct dma_chan to struct sirfsoc_dma */
-static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
- return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
-}
-
-static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- if (sdesc->chain) {
- /* DMA v2 HW chain mode */
- writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
- (sdesc->chain <<
- SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
- (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
- base + SIRFSOC_DMA_CH_CTRL);
- } else {
- /* DMA v2 legacy mode */
- writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
- writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
- base + SIRFSOC_DMA_MUL_ATLAS7);
- writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
- (sdesc->chain <<
- SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
- 0x3, base + SIRFSOC_DMA_CH_CTRL);
- }
- writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
- (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
- SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
- base + SIRFSOC_DMA_INT_EN_ATLAS7);
- writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic)
- writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
-}
-
-static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
- writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
- writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
- (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
- base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
- writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
- (1 << cid), base + SIRFSOC_DMA_INT_EN);
- writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic) {
- writel((1 << cid) | 1 << (cid + 16) |
- readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
- base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
- }
-
-}
-
-static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
- writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
- (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
- base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
- writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
- (1 << cid), base + SIRFSOC_DMA_INT_EN);
- writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic) {
- writel((1 << cid) | 1 << (cid + 16) |
- readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
- base + SIRFSOC_DMA_CH_LOOP_CTRL);
- }
-
-}
-
-/* Execute all queued DMA descriptors */
-static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- struct sirfsoc_dma_desc *sdesc = NULL;
- void __iomem *base;
-
- /*
- * lock has been held by functions calling this, so we don't hold
- * lock again
- */
- base = sdma->base;
- sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
- node);
- /* Move the first queued descriptor to active list */
- list_move_tail(&sdesc->node, &schan->active);
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2)
- cid = 0;
-
- /* Start the DMA transfer */
- sdma->exec_desc(sdesc, cid, schan->mode, base);
-
- if (sdesc->cyclic)
- schan->happened_cyclic = schan->completed_cyclic = 0;
-}
-
-/* Interrupt handler */
-static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
-{
- struct sirfsoc_dma *sdma = data;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc = NULL;
- u32 is;
- bool chain;
- int ch;
- void __iomem *reg;
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A6:
- case SIRFSOC_DMA_VER_A7V1:
- is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
- reg = sdma->base + SIRFSOC_DMA_CH_INT;
- while ((ch = fls(is) - 1) >= 0) {
- is &= ~(1 << ch);
- writel_relaxed(1 << ch, reg);
- schan = &sdma->channels[ch];
- spin_lock(&schan->lock);
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
- if (!sdesc->cyclic) {
- /* Execute queued descriptors */
- list_splice_tail_init(&schan->active,
- &schan->completed);
- dma_cookie_complete(&sdesc->desc);
- if (!list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
- } else
- schan->happened_cyclic++;
- spin_unlock(&schan->lock);
- }
- break;
-
- case SIRFSOC_DMA_VER_A7V2:
- is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
-
- reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
- writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
- schan = &sdma->channels[0];
- spin_lock(&schan->lock);
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
- if (!sdesc->cyclic) {
- chain = sdesc->chain;
- if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
- (!chain &&
- (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
- /* Execute queued descriptors */
- list_splice_tail_init(&schan->active,
- &schan->completed);
- dma_cookie_complete(&sdesc->desc);
- if (!list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
- }
- } else if (sdesc->cyclic && (is &
- SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
- schan->happened_cyclic++;
-
- spin_unlock(&schan->lock);
- break;
-
- default:
- break;
- }
-
- /* Schedule tasklet */
- tasklet_schedule(&sdma->tasklet);
-
- return IRQ_HANDLED;
-}
-
-/* process completed descriptors */
-static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
-{
- dma_cookie_t last_cookie = 0;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc;
- struct dma_async_tx_descriptor *desc;
- unsigned long flags;
- unsigned long happened_cyclic;
- LIST_HEAD(list);
- int i;
-
- for (i = 0; i < sdma->dma.chancnt; i++) {
- schan = &sdma->channels[i];
-
- /* Get all completed descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- if (!list_empty(&schan->completed)) {
- list_splice_tail_init(&schan->completed, &list);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Execute callbacks and run dependencies */
- list_for_each_entry(sdesc, &list, node) {
- desc = &sdesc->desc;
-
- dmaengine_desc_get_callback_invoke(desc, NULL);
- last_cookie = desc->cookie;
- dma_run_dependencies(desc);
- }
-
- /* Free descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- list_splice_tail_init(&list, &schan->free);
- schan->chan.completed_cookie = last_cookie;
- spin_unlock_irqrestore(&schan->lock, flags);
- } else {
- if (list_empty(&schan->active)) {
- spin_unlock_irqrestore(&schan->lock, flags);
- continue;
- }
-
- /* for cyclic channel, desc is always in active list */
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
-
- /* cyclic DMA */
- happened_cyclic = schan->happened_cyclic;
- spin_unlock_irqrestore(&schan->lock, flags);
-
- desc = &sdesc->desc;
- while (happened_cyclic != schan->completed_cyclic) {
- dmaengine_desc_get_callback_invoke(desc, NULL);
- schan->completed_cyclic++;
- }
- }
- }
-}
-
-/* DMA Tasklet */
-static void sirfsoc_dma_tasklet(struct tasklet_struct *t)
-{
- struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet);
-
- sirfsoc_dma_process_completed(sdma);
-}
-
-/* Submit descriptor to hardware */
-static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- dma_cookie_t cookie;
-
- sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Move descriptor to queue */
- list_move_tail(&sdesc->node, &schan->queued);
-
- cookie = dma_cookie_assign(txd);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return cookie;
-}
-
-static int sirfsoc_dma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
-
- if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
- return -EINVAL;
-
- spin_lock_irqsave(&schan->lock, flags);
- schan->mode = (config->src_maxburst == 4 ? 1 : 0);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
- writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
- sdma->base + SIRFSOC_DMA_INT_ATLAS7);
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) &
- ~((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- break;
- default:
- break;
- }
-
- list_splice_tail_init(&schan->active, &schan->free);
- list_splice_tail_init(&schan->queued, &schan->free);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) &
- ~((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- break;
-
- default:
- break;
- }
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0x10001,
- sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) |
- ((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- break;
-
- default:
- break;
- }
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-/* Alloc channel resources */
-static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- LIST_HEAD(descs);
- int i;
-
- pm_runtime_get_sync(sdma->dma.dev);
-
- /* Alloc descriptors for this channel */
- for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
- sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
- if (!sdesc) {
- dev_notice(sdma->dma.dev, "Memory allocation error. "
- "Allocated only %u descriptors\n", i);
- break;
- }
-
- dma_async_tx_descriptor_init(&sdesc->desc, chan);
- sdesc->desc.flags = DMA_CTRL_ACK;
- sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
-
- list_add_tail(&sdesc->node, &descs);
- }
-
- /* Return error only if no descriptors were allocated */
- if (i == 0)
- return -ENOMEM;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- list_splice_tail_init(&descs, &schan->free);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return i;
-}
-
-/* Free channel resources */
-static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_desc *sdesc, *tmp;
- unsigned long flags;
- LIST_HEAD(descs);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Channel must be idle */
- BUG_ON(!list_empty(&schan->prepared));
- BUG_ON(!list_empty(&schan->queued));
- BUG_ON(!list_empty(&schan->active));
- BUG_ON(!list_empty(&schan->completed));
-
- /* Move data */
- list_splice_tail_init(&schan->free, &descs);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Free descriptors */
- list_for_each_entry_safe(sdesc, tmp, &descs, node)
- kfree(sdesc);
-
- pm_runtime_put(sdma->dma.dev);
-}
-
-/* Send pending descriptor to hardware */
-static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- if (list_empty(&schan->active) && !list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-}
-
-/* Check request completion status */
-static enum dma_status
-sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
- enum dma_status ret;
- struct sirfsoc_dma_desc *sdesc;
- int cid = schan->chan.chan_id;
- unsigned long dma_pos;
- unsigned long dma_request_bytes;
- unsigned long residue;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- if (list_empty(&schan->active)) {
- ret = dma_cookie_status(chan, cookie, txstate);
- dma_set_residue(txstate, 0);
- spin_unlock_irqrestore(&schan->lock, flags);
- return ret;
- }
- sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
- if (sdesc->cyclic)
- dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
- (sdesc->width * SIRFSOC_DMA_WORD_LEN);
- else
- dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
-
- ret = dma_cookie_status(chan, cookie, txstate);
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2)
- cid = 0;
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
- } else {
- dma_pos = readl_relaxed(
- sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
- }
-
- residue = dma_request_bytes - (dma_pos - sdesc->addr);
- dma_set_residue(txstate, residue);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return ret;
-}
-
-static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
- int ret;
-
- if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
- ret = -EINVAL;
- goto err_dir;
- }
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc) {
- /* try to free completed descriptors */
- sirfsoc_dma_process_completed(sdma);
- ret = 0;
- goto no_desc;
- }
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
-
- /*
- * Number of chunks in a frame can only be 1 for prima2
- * and ylen (number of frame - 1) must be at least 0
- */
- if ((xt->frame_size == 1) && (xt->numf > 0)) {
- sdesc->cyclic = 0;
- sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
- sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
- SIRFSOC_DMA_WORD_LEN;
- sdesc->ylen = xt->numf - 1;
- if (xt->dir == DMA_MEM_TO_DEV) {
- sdesc->addr = xt->src_start;
- sdesc->dir = 1;
- } else {
- sdesc->addr = xt->dst_start;
- sdesc->dir = 0;
- }
-
- list_add_tail(&sdesc->node, &schan->prepared);
- } else {
- pr_err("sirfsoc DMA Invalid xfer\n");
- ret = -EINVAL;
- goto err_xfer;
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-err_xfer:
- spin_unlock_irqrestore(&schan->lock, iflags);
-no_desc:
-err_dir:
- return ERR_PTR(ret);
-}
-
-static struct dma_async_tx_descriptor *
-sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
- size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
-
- /*
- * we only support cycle transfer with 2 period
- * If the X-length is set to 0, it would be the loop mode.
- * The DMA address keeps increasing until reaching the end of a loop
- * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
- * the DMA address goes back to the beginning of this area.
- * In loop mode, the DMA data region is divided into two parts, BUFA
- * and BUFB. DMA controller generates interrupts twice in each loop:
- * when the DMA address reaches the end of BUFA or the end of the
- * BUFB
- */
- if (buf_len != 2 * period_len)
- return ERR_PTR(-EINVAL);
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc)
- return NULL;
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
- sdesc->addr = addr;
- sdesc->cyclic = 1;
- sdesc->xlen = 0;
- sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
- sdesc->width = 1;
- list_add_tail(&sdesc->node, &schan->prepared);
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-}
-
-/*
- * The DMA controller consists of 16 independent DMA channels.
- * Each channel is allocated to a different function
- */
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned int ch_nr = (unsigned int) chan_id;
-
- if (ch_nr == chan->chan_id +
- chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(sirfsoc_dma_filter_id);
-
-#define SIRFSOC_DMA_BUSWIDTHS \
- (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
- BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct sirfsoc_dma *sdma = ofdma->of_dma_data;
- unsigned int request = dma_spec->args[0];
-
- if (request >= SIRFSOC_DMA_CHANNELS)
- return NULL;
-
- return dma_get_slave_channel(&sdma->channels[request].chan);
-}
-
-static int sirfsoc_dma_probe(struct platform_device *op)
-{
- struct device_node *dn = op->dev.of_node;
- struct device *dev = &op->dev;
- struct dma_device *dma;
- struct sirfsoc_dma *sdma;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dmadata *data;
- struct resource res;
- ulong regs_start, regs_size;
- u32 id;
- int ret, i;
-
- sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
- if (!sdma)
- return -ENOMEM;
-
- data = (struct sirfsoc_dmadata *)
- (of_match_device(op->dev.driver->of_match_table,
- &op->dev)->data);
- sdma->exec_desc = data->exec;
- sdma->type = data->type;
-
- if (of_property_read_u32(dn, "cell-index", &id)) {
- dev_err(dev, "Fail to get DMAC index\n");
- return -ENODEV;
- }
-
- sdma->irq = irq_of_parse_and_map(dn, 0);
- if (!sdma->irq) {
- dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
- }
-
- sdma->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(sdma->clk)) {
- dev_err(dev, "failed to get a clock.\n");
- return PTR_ERR(sdma->clk);
- }
-
- ret = of_address_to_resource(dn, 0, &res);
- if (ret) {
- dev_err(dev, "Error parsing memory region!\n");
- goto irq_dispose;
- }
-
- regs_start = res.start;
- regs_size = resource_size(&res);
-
- sdma->base = devm_ioremap(dev, regs_start, regs_size);
- if (!sdma->base) {
- dev_err(dev, "Error mapping memory region!\n");
- ret = -ENOMEM;
- goto irq_dispose;
- }
-
- ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
- if (ret) {
- dev_err(dev, "Error requesting IRQ!\n");
- ret = -EINVAL;
- goto irq_dispose;
- }
-
- dma = &sdma->dma;
- dma->dev = dev;
-
- dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
- dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
- dma->device_issue_pending = sirfsoc_dma_issue_pending;
- dma->device_config = sirfsoc_dma_slave_config;
- dma->device_pause = sirfsoc_dma_pause_chan;
- dma->device_resume = sirfsoc_dma_resume_chan;
- dma->device_terminate_all = sirfsoc_dma_terminate_all;
- dma->device_tx_status = sirfsoc_dma_tx_status;
- dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
- dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
- dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
- dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
- dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-
- INIT_LIST_HEAD(&dma->channels);
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_CYCLIC, dma->cap_mask);
- dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
- schan = &sdma->channels[i];
-
- schan->chan.device = dma;
- dma_cookie_init(&schan->chan);
-
- INIT_LIST_HEAD(&schan->free);
- INIT_LIST_HEAD(&schan->prepared);
- INIT_LIST_HEAD(&schan->queued);
- INIT_LIST_HEAD(&schan->active);
- INIT_LIST_HEAD(&schan->completed);
-
- spin_lock_init(&schan->lock);
- list_add_tail(&schan->chan.device_node, &dma->channels);
- }
-
- tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet);
-
- /* Register DMA engine */
- dev_set_drvdata(dev, sdma);
-
- ret = dma_async_device_register(dma);
- if (ret)
- goto free_irq;
-
- /* Device-tree DMA controller registration */
- ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
- if (ret) {
- dev_err(dev, "failed to register DMA controller\n");
- goto unreg_dma_dev;
- }
-
- pm_runtime_enable(&op->dev);
- dev_info(dev, "initialized SIRFSOC DMAC driver\n");
-
- return 0;
-
-unreg_dma_dev:
- dma_async_device_unregister(dma);
-free_irq:
- free_irq(sdma->irq, sdma);
-irq_dispose:
- irq_dispose_mapping(sdma->irq);
- return ret;
-}
-
-static int sirfsoc_dma_remove(struct platform_device *op)
-{
- struct device *dev = &op->dev;
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
-
- of_dma_controller_free(op->dev.of_node);
- dma_async_device_unregister(&sdma->dma);
- free_irq(sdma->irq, sdma);
- tasklet_kill(&sdma->tasklet);
- irq_dispose_mapping(sdma->irq);
- pm_runtime_disable(&op->dev);
- if (!pm_runtime_status_suspended(&op->dev))
- sirfsoc_dma_runtime_suspend(&op->dev);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
-
- clk_disable_unprepare(sdma->clk);
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(sdma->clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_chan *schan;
- int ch;
- int ret;
- int count;
- u32 int_offset;
-
- /*
- * if we were runtime-suspended before, resume to enable clock
- * before accessing register
- */
- if (pm_runtime_status_suspended(dev)) {
- ret = sirfsoc_dma_runtime_resume(dev);
- if (ret < 0)
- return ret;
- }
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- count = 1;
- int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
- } else {
- count = SIRFSOC_DMA_CHANNELS;
- int_offset = SIRFSOC_DMA_INT_EN;
- }
-
- /*
- * DMA controller will lose all registers while suspending
- * so we need to save registers for active channels
- */
- for (ch = 0; ch < count; ch++) {
- schan = &sdma->channels[ch];
- if (list_empty(&schan->active))
- continue;
- save->ctrl[ch] = readl_relaxed(sdma->base +
- ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
- }
- save->interrupt_en = readl_relaxed(sdma->base + int_offset);
-
- /* Disable clock */
- sirfsoc_dma_runtime_suspend(dev);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
- struct sirfsoc_dma_chan *schan;
- int ch;
- int ret;
- int count;
- u32 int_offset;
- u32 width_offset;
-
- /* Enable clock before accessing register */
- ret = sirfsoc_dma_runtime_resume(dev);
- if (ret < 0)
- return ret;
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- count = 1;
- int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
- width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
- } else {
- count = SIRFSOC_DMA_CHANNELS;
- int_offset = SIRFSOC_DMA_INT_EN;
- width_offset = SIRFSOC_DMA_WIDTH_0;
- }
-
- writel_relaxed(save->interrupt_en, sdma->base + int_offset);
- for (ch = 0; ch < count; ch++) {
- schan = &sdma->channels[ch];
- if (list_empty(&schan->active))
- continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
- writel_relaxed(sdesc->width,
- sdma->base + width_offset + ch * 4);
- writel_relaxed(sdesc->xlen,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(save->ctrl[ch],
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- writel_relaxed(sdesc->addr,
- sdma->base + SIRFSOC_DMA_CH_ADDR);
- } else {
- writel_relaxed(sdesc->addr >> 2,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
-
- }
- }
-
- /* if we were runtime-suspended before, suspend again */
- if (pm_runtime_status_suspended(dev))
- sirfsoc_dma_runtime_suspend(dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
- SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
- .exec = sirfsoc_dma_execute_hw_a6,
- .type = SIRFSOC_DMA_VER_A6,
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
- .exec = sirfsoc_dma_execute_hw_a7v1,
- .type = SIRFSOC_DMA_VER_A7V1,
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
- .exec = sirfsoc_dma_execute_hw_a7v2,
- .type = SIRFSOC_DMA_VER_A7V2,
-};
-
-static const struct of_device_id sirfsoc_dma_match[] = {
- { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
- { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
- { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
-
-static struct platform_driver sirfsoc_dma_driver = {
- .probe = sirfsoc_dma_probe,
- .remove = sirfsoc_dma_remove,
- .driver = {
- .name = DRV_NAME,
- .pm = &sirfsoc_dma_pm_ops,
- .of_match_table = sirfsoc_dma_match,
- },
-};
-
-static __init int sirfsoc_dma_init(void)
-{
- return platform_driver_register(&sirfsoc_dma_driver);
-}
-
-static void __exit sirfsoc_dma_exit(void)
-{
- platform_driver_unregister(&sirfsoc_dma_driver);
-}
-
-subsys_initcall(sirfsoc_dma_init);
-module_exit(sirfsoc_dma_exit);
-
-MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
-MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
-MODULE_DESCRIPTION("SIRFSOC DMA control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 4256e55bbf25..265d7c07b348 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -78,7 +78,7 @@ static int dma40_memcpy_channels[] = {
DB8500_DMA_MEMCPY_EV_5,
};
-/* Default configuration for physcial memcpy */
+/* Default configuration for physical memcpy */
static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
.dir = DMA_MEM_TO_MEM,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index e4637ec786d3..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 87157cbae1b8..96ad21869ba7 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -121,6 +121,11 @@ struct udma_oes_offsets {
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
#define UDMA_FLAG_TDTYPE BIT(2)
+#define UDMA_FLAG_BURST_SIZE BIT(3)
+#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
+ UDMA_FLAG_PDMA_BURST | \
+ UDMA_FLAG_TDTYPE | \
+ UDMA_FLAG_BURST_SIZE)
struct udma_match_data {
enum k3_dma_type type;
@@ -128,6 +133,7 @@ struct udma_match_data {
bool enable_memcpy_support;
u32 flags;
u32 statictr_z_mask;
+ u8 burst_size[3];
};
struct udma_soc_data {
@@ -436,6 +442,18 @@ static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
}
}
+static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
+{
+ int i;
+
+ for (i = 0; i < tpl_map->levels; i++) {
+ if (chan_id >= tpl_map->start_idx[i])
+ return i;
+ }
+
+ return 0;
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
@@ -1811,13 +1829,21 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_tchan *tchan = uc->tchan;
struct udma_rchan *rchan = uc->rchan;
- int ret = 0;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
/* Non synchronized - mem to mem type of transfer */
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
+
req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
@@ -1825,6 +1851,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = ud->atype;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) {
@@ -1839,6 +1869,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_rx.rx_atype = ud->atype;
+ if (burst_size) {
+ req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_rx.rx_burst_size = burst_size;
+ }
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret)
@@ -1854,12 +1888,24 @@ static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct udma_bchan *bchan = uc->bchan;
- int ret = 0;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
+
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
req_tx.index = bchan->id;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1877,7 +1923,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -1918,7 +1964,7 @@ static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_tchan *tchan = uc->tchan;
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
- int ret = 0;
+ int ret;
req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
@@ -1951,7 +1997,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -2028,7 +2074,7 @@ static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_rchan *rchan = uc->rchan;
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- int ret = 0;
+ int ret;
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
@@ -2048,7 +2094,7 @@ static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
- int ret = 0;
+ int ret;
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
@@ -2401,7 +2447,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n");
uc->use_dma_pool = false;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_res_free;
}
uc->use_dma_pool = true;
@@ -4167,6 +4214,11 @@ static struct udma_match_data am654_main_data = {
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am654_mcu_data = {
@@ -4174,38 +4226,63 @@ static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data j721e_main_data = {
.type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
+ },
};
static struct udma_match_data j721e_mcu_data = {
.type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
.enable_memcpy_support = true, /* Supported via bchan */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am64_pktdma_data = {
.type = DMA_TYPE_PKTDMA,
.psil_base = 0x1000,
.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
};
static const struct of_device_id udma_of_match[] = {
@@ -4305,6 +4382,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = ud->rchan_cnt;
break;
case DMA_TYPE_PKTDMA:
cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
@@ -4698,9 +4776,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
ud->tchan_tpl.levels = 1;
}
- ud->tchan_tpl.levels = ud->tchan_tpl.levels;
- ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
- ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
@@ -5045,6 +5123,34 @@ static void udma_dbg_summary_show(struct seq_file *s,
}
#endif /* CONFIG_DEBUG_FS */
+static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
+{
+ const struct udma_match_data *match_data = ud->match_data;
+ u8 tpl;
+
+ if (!match_data->enable_memcpy_support)
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ /* Get the highest TPL level the device supports for memcpy */
+ if (ud->bchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
+ else if (ud->tchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
+ else
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ switch (match_data->burst_size[tpl]) {
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
+ return DMAENGINE_ALIGN_256_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
+ return DMAENGINE_ALIGN_128_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
+ fallthrough;
+ default:
+ return DMAENGINE_ALIGN_64_BYTES;
+ }
+}
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -5201,7 +5307,6 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
DESC_METADATA_ENGINE;
if (ud->match_data->enable_memcpy_support &&
@@ -5283,6 +5388,9 @@ static int udma_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
+ /* Configure the copy_align to the maximum burst size the device supports */
+ ud->ddev.copy_align = udma_get_copy_align(ud);
+
ret = dma_async_device_register(&ud->ddev);
if (ret) {
dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..3aded7861fef 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -800,7 +800,7 @@ xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *desc;
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
deleted file mode 100644
index b057582b2fac..000000000000
--- a/drivers/dma/zx_dma.c
+++ /dev/null
@@ -1,941 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2015 Linaro.
- */
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/of_dma.h>
-
-#include "virt-dma.h"
-
-#define DRIVER_NAME "zx-dma"
-#define DMA_ALIGN 4
-#define DMA_MAX_SIZE (0x10000 - 512)
-#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
-
-#define REG_ZX_SRC_ADDR 0x00
-#define REG_ZX_DST_ADDR 0x04
-#define REG_ZX_TX_X_COUNT 0x08
-#define REG_ZX_TX_ZY_COUNT 0x0c
-#define REG_ZX_SRC_ZY_STEP 0x10
-#define REG_ZX_DST_ZY_STEP 0x14
-#define REG_ZX_LLI_ADDR 0x1c
-#define REG_ZX_CTRL 0x20
-#define REG_ZX_TC_IRQ 0x800
-#define REG_ZX_SRC_ERR_IRQ 0x804
-#define REG_ZX_DST_ERR_IRQ 0x808
-#define REG_ZX_CFG_ERR_IRQ 0x80c
-#define REG_ZX_TC_IRQ_RAW 0x810
-#define REG_ZX_SRC_ERR_IRQ_RAW 0x814
-#define REG_ZX_DST_ERR_IRQ_RAW 0x818
-#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c
-#define REG_ZX_STATUS 0x820
-#define REG_ZX_DMA_GRP_PRIO 0x824
-#define REG_ZX_DMA_ARB 0x828
-
-#define ZX_FORCE_CLOSE BIT(31)
-#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13)
-#define ZX_MAX_BURST_LEN 16
-#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9)
-#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6)
-#define ZX_IRQ_ENABLE_ALL (3 << 4)
-#define ZX_DST_FIFO_MODE BIT(3)
-#define ZX_SRC_FIFO_MODE BIT(2)
-#define ZX_SOFT_REQ BIT(1)
-#define ZX_CH_ENABLE BIT(0)
-
-#define ZX_DMA_BUSWIDTHS \
- (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
- BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-enum zx_dma_burst_width {
- ZX_DMA_WIDTH_8BIT = 0,
- ZX_DMA_WIDTH_16BIT = 1,
- ZX_DMA_WIDTH_32BIT = 2,
- ZX_DMA_WIDTH_64BIT = 3,
-};
-
-struct zx_desc_hw {
- u32 saddr;
- u32 daddr;
- u32 src_x;
- u32 src_zy;
- u32 src_zy_step;
- u32 dst_zy_step;
- u32 reserved1;
- u32 lli;
- u32 ctr;
- u32 reserved[7]; /* pack as hardware registers region size */
-} __aligned(32);
-
-struct zx_dma_desc_sw {
- struct virt_dma_desc vd;
- dma_addr_t desc_hw_lli;
- size_t desc_num;
- size_t size;
- struct zx_desc_hw *desc_hw;
-};
-
-struct zx_dma_phy;
-
-struct zx_dma_chan {
- struct dma_slave_config slave_cfg;
- int id; /* Request phy chan id */
- u32 ccfg;
- u32 cyclic;
- struct virt_dma_chan vc;
- struct zx_dma_phy *phy;
- struct list_head node;
- dma_addr_t dev_addr;
- enum dma_status status;
-};
-
-struct zx_dma_phy {
- u32 idx;
- void __iomem *base;
- struct zx_dma_chan *vchan;
- struct zx_dma_desc_sw *ds_run;
- struct zx_dma_desc_sw *ds_done;
-};
-
-struct zx_dma_dev {
- struct dma_device slave;
- void __iomem *base;
- spinlock_t lock; /* lock for ch and phy */
- struct list_head chan_pending;
- struct zx_dma_phy *phy;
- struct zx_dma_chan *chans;
- struct clk *clk;
- struct dma_pool *pool;
- u32 dma_channels;
- u32 dma_requests;
- int irq;
-};
-
-#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
-
-static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct zx_dma_chan, vc.chan);
-}
-
-static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
-{
- u32 val = 0;
-
- val = readl_relaxed(phy->base + REG_ZX_CTRL);
- val &= ~ZX_CH_ENABLE;
- val |= ZX_FORCE_CLOSE;
- writel_relaxed(val, phy->base + REG_ZX_CTRL);
-
- val = 0x1 << phy->idx;
- writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
-{
- writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
- writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
- writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
- writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
- writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
- writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
- writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
- writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
-}
-
-static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
-{
- return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
-}
-
-static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
-{
- return readl_relaxed(d->base + REG_ZX_STATUS);
-}
-
-static void zx_dma_init_state(struct zx_dma_dev *d)
-{
- /* set same priority */
- writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
- /* clear all irq */
- writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static int zx_dma_start_txd(struct zx_dma_chan *c)
-{
- struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
- struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-
- if (!c->phy)
- return -EAGAIN;
-
- if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
- return -EAGAIN;
-
- if (vd) {
- struct zx_dma_desc_sw *ds =
- container_of(vd, struct zx_dma_desc_sw, vd);
- /*
- * fetch and remove request from vc->desc_issued
- * so vc->desc_issued only contains desc pending
- */
- list_del(&ds->vd.node);
- c->phy->ds_run = ds;
- c->phy->ds_done = NULL;
- /* start dma */
- zx_dma_set_desc(c->phy, ds->desc_hw);
- return 0;
- }
- c->phy->ds_done = NULL;
- c->phy->ds_run = NULL;
- return -EAGAIN;
-}
-
-static void zx_dma_task(struct zx_dma_dev *d)
-{
- struct zx_dma_phy *p;
- struct zx_dma_chan *c, *cn;
- unsigned pch, pch_alloc = 0;
- unsigned long flags;
-
- /* check new dma request of running channel in vc->desc_issued */
- list_for_each_entry_safe(c, cn, &d->slave.channels,
- vc.chan.device_node) {
- spin_lock_irqsave(&c->vc.lock, flags);
- p = c->phy;
- if (p && p->ds_done && zx_dma_start_txd(c)) {
- /* No current txd associated with this channel */
- dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
- /* Mark this channel free */
- c->phy = NULL;
- p->vchan = NULL;
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- }
-
- /* check new channel request in d->chan_pending */
- spin_lock_irqsave(&d->lock, flags);
- while (!list_empty(&d->chan_pending)) {
- c = list_first_entry(&d->chan_pending,
- struct zx_dma_chan, node);
- p = &d->phy[c->id];
- if (!p->vchan) {
- /* remove from d->chan_pending */
- list_del_init(&c->node);
- pch_alloc |= 1 << c->id;
- /* Mark this channel allocated */
- p->vchan = c;
- c->phy = p;
- } else {
- dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
- }
- }
- spin_unlock_irqrestore(&d->lock, flags);
-
- for (pch = 0; pch < d->dma_channels; pch++) {
- if (pch_alloc & (1 << pch)) {
- p = &d->phy[pch];
- c = p->vchan;
- if (c) {
- spin_lock_irqsave(&c->vc.lock, flags);
- zx_dma_start_txd(c);
- spin_unlock_irqrestore(&c->vc.lock, flags);
- }
- }
- }
-}
-
-static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
-{
- struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
- struct zx_dma_phy *p;
- struct zx_dma_chan *c;
- u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
- u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
- u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
- u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
- u32 i, irq_chan = 0, task = 0;
-
- while (tc) {
- i = __ffs(tc);
- tc &= ~BIT(i);
- p = &d->phy[i];
- c = p->vchan;
- if (c) {
- spin_lock(&c->vc.lock);
- if (c->cyclic) {
- vchan_cyclic_callback(&p->ds_run->vd);
- } else {
- vchan_cookie_complete(&p->ds_run->vd);
- p->ds_done = p->ds_run;
- task = 1;
- }
- spin_unlock(&c->vc.lock);
- irq_chan |= BIT(i);
- }
- }
-
- if (serr || derr || cfg)
- dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
- serr, derr, cfg);
-
- writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-
- if (task)
- zx_dma_task(d);
- return IRQ_HANDLED;
-}
-
-static void zx_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- unsigned long flags;
-
- spin_lock_irqsave(&d->lock, flags);
- list_del_init(&c->node);
- spin_unlock_irqrestore(&d->lock, flags);
-
- vchan_free_chan_resources(&c->vc);
- c->ccfg = 0;
-}
-
-static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_phy *p;
- struct virt_dma_desc *vd;
- unsigned long flags;
- enum dma_status ret;
- size_t bytes = 0;
-
- ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_COMPLETE || !state)
- return ret;
-
- spin_lock_irqsave(&c->vc.lock, flags);
- p = c->phy;
- ret = c->status;
-
- /*
- * If the cookie is on our issue queue, then the residue is
- * its total size.
- */
- vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
- bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
- } else if ((!p) || (!p->ds_run)) {
- bytes = 0;
- } else {
- struct zx_dma_desc_sw *ds = p->ds_run;
- u32 clli = 0, index = 0;
-
- bytes = 0;
- clli = zx_dma_get_curr_lli(p);
- index = (clli - ds->desc_hw_lli) /
- sizeof(struct zx_desc_hw) + 1;
- for (; index < ds->desc_num; index++) {
- bytes += ds->desc_hw[index].src_x;
- /* end of lli */
- if (!ds->desc_hw[index].lli)
- break;
- }
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- dma_set_residue(state, bytes);
- return ret;
-}
-
-static void zx_dma_issue_pending(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- unsigned long flags;
- int issue = 0;
-
- spin_lock_irqsave(&c->vc.lock, flags);
- /* add request to vc->desc_issued */
- if (vchan_issue_pending(&c->vc)) {
- spin_lock(&d->lock);
- if (!c->phy && list_empty(&c->node)) {
- /* if new channel, add chan_pending */
- list_add_tail(&c->node, &d->chan_pending);
- issue = 1;
- dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
- }
- spin_unlock(&d->lock);
- } else {
- dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
-
- if (issue)
- zx_dma_task(d);
-}
-
-static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
- dma_addr_t src, size_t len, u32 num, u32 ccfg)
-{
- if ((num + 1) < ds->desc_num)
- ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
- sizeof(struct zx_desc_hw);
- ds->desc_hw[num].saddr = src;
- ds->desc_hw[num].daddr = dst;
- ds->desc_hw[num].src_x = len;
- ds->desc_hw[num].ctr = ccfg;
-}
-
-static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
- struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
-
- if (num > lli_limit) {
- dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
- &c->vc, num, lli_limit);
- return NULL;
- }
-
- ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
- if (!ds)
- return NULL;
-
- ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
- if (!ds->desc_hw) {
- dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
- kfree(ds);
- return NULL;
- }
- ds->desc_num = num;
- return ds;
-}
-
-static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
-{
- switch (width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- return ffs(width) - 1;
- default:
- return ZX_DMA_WIDTH_32BIT;
- }
-}
-
-static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
-{
- struct dma_slave_config *cfg = &c->slave_cfg;
- enum zx_dma_burst_width src_width;
- enum zx_dma_burst_width dst_width;
- u32 maxburst = 0;
-
- switch (dir) {
- case DMA_MEM_TO_MEM:
- c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
- | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
- | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
- | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
- break;
- case DMA_MEM_TO_DEV:
- c->dev_addr = cfg->dst_addr;
- /* dst len is calculated from src width, len and dst width.
- * We need make sure dst len not exceed MAX LEN.
- * Trailing single transaction that does not fill a full
- * burst also require identical src/dst data width.
- */
- dst_width = zx_dma_burst_width(cfg->dst_addr_width);
- maxburst = cfg->dst_maxburst;
- maxburst = maxburst < ZX_MAX_BURST_LEN ?
- maxburst : ZX_MAX_BURST_LEN;
- c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
- | ZX_SRC_BURST_LEN(maxburst - 1)
- | ZX_SRC_BURST_WIDTH(dst_width)
- | ZX_DST_BURST_WIDTH(dst_width);
- break;
- case DMA_DEV_TO_MEM:
- c->dev_addr = cfg->src_addr;
- src_width = zx_dma_burst_width(cfg->src_addr_width);
- maxburst = cfg->src_maxburst;
- maxburst = maxburst < ZX_MAX_BURST_LEN ?
- maxburst : ZX_MAX_BURST_LEN;
- c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
- | ZX_SRC_BURST_LEN(maxburst - 1)
- | ZX_SRC_BURST_WIDTH(src_width)
- | ZX_DST_BURST_WIDTH(src_width);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- size_t copy = 0;
- int num = 0;
-
- if (!len)
- return NULL;
-
- if (zx_pre_config(c, DMA_MEM_TO_MEM))
- return NULL;
-
- num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
-
- ds = zx_alloc_desc_resource(num, chan);
- if (!ds)
- return NULL;
-
- ds->size = len;
- num = 0;
-
- do {
- copy = min_t(size_t, len, DMA_MAX_SIZE);
- zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
-
- src += copy;
- dst += copy;
- len -= copy;
- } while (len);
-
- c->cyclic = 0;
- ds->desc_hw[num - 1].lli = 0; /* end of link */
- ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
- enum dma_transfer_direction dir, unsigned long flags, void *context)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- size_t len, avail, total = 0;
- struct scatterlist *sg;
- dma_addr_t addr, src = 0, dst = 0;
- int num = sglen, i;
-
- if (!sgl)
- return NULL;
-
- if (zx_pre_config(c, dir))
- return NULL;
-
- for_each_sg(sgl, sg, sglen, i) {
- avail = sg_dma_len(sg);
- if (avail > DMA_MAX_SIZE)
- num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
- }
-
- ds = zx_alloc_desc_resource(num, chan);
- if (!ds)
- return NULL;
-
- c->cyclic = 0;
- num = 0;
- for_each_sg(sgl, sg, sglen, i) {
- addr = sg_dma_address(sg);
- avail = sg_dma_len(sg);
- total += avail;
-
- do {
- len = min_t(size_t, avail, DMA_MAX_SIZE);
-
- if (dir == DMA_MEM_TO_DEV) {
- src = addr;
- dst = c->dev_addr;
- } else if (dir == DMA_DEV_TO_MEM) {
- src = c->dev_addr;
- dst = addr;
- }
-
- zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
-
- addr += len;
- avail -= len;
- } while (avail);
- }
-
- ds->desc_hw[num - 1].lli = 0; /* end of link */
- ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
- ds->size = total;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction dir,
- unsigned long flags)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- dma_addr_t src = 0, dst = 0;
- int num_periods = buf_len / period_len;
- int buf = 0, num = 0;
-
- if (period_len > DMA_MAX_SIZE) {
- dev_err(chan->device->dev, "maximum period size exceeded\n");
- return NULL;
- }
-
- if (zx_pre_config(c, dir))
- return NULL;
-
- ds = zx_alloc_desc_resource(num_periods, chan);
- if (!ds)
- return NULL;
- c->cyclic = 1;
-
- while (buf < buf_len) {
- if (dir == DMA_MEM_TO_DEV) {
- src = dma_addr;
- dst = c->dev_addr;
- } else if (dir == DMA_DEV_TO_MEM) {
- src = c->dev_addr;
- dst = dma_addr;
- }
- zx_dma_fill_desc(ds, dst, src, period_len, num++,
- c->ccfg | ZX_IRQ_ENABLE_ALL);
- dma_addr += period_len;
- buf += period_len;
- }
-
- ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
- ds->size = buf_len;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static int zx_dma_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
-
- if (!cfg)
- return -EINVAL;
-
- memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
-
- return 0;
-}
-
-static int zx_dma_terminate_all(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- struct zx_dma_phy *p = c->phy;
- unsigned long flags;
- LIST_HEAD(head);
-
- dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
-
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
- /* Clear the tx descriptor lists */
- spin_lock_irqsave(&c->vc.lock, flags);
- vchan_get_all_descriptors(&c->vc, &head);
- if (p) {
- /* vchan is assigned to a pchan - stop the channel */
- zx_dma_terminate_chan(p, d);
- c->phy = NULL;
- p->vchan = NULL;
- p->ds_run = NULL;
- p->ds_done = NULL;
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- vchan_dma_desc_free_list(&c->vc, &head);
-
- return 0;
-}
-
-static int zx_dma_transfer_pause(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- u32 val = 0;
-
- val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
- val &= ~ZX_CH_ENABLE;
- writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
- return 0;
-}
-
-static int zx_dma_transfer_resume(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- u32 val = 0;
-
- val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
- val |= ZX_CH_ENABLE;
- writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
- return 0;
-}
-
-static void zx_dma_free_desc(struct virt_dma_desc *vd)
-{
- struct zx_dma_desc_sw *ds =
- container_of(vd, struct zx_dma_desc_sw, vd);
- struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
-
- dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
- kfree(ds);
-}
-
-static const struct of_device_id zx6702_dma_dt_ids[] = {
- { .compatible = "zte,zx296702-dma", },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
-
-static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct zx_dma_dev *d = ofdma->of_dma_data;
- unsigned int request = dma_spec->args[0];
- struct dma_chan *chan;
- struct zx_dma_chan *c;
-
- if (request >= d->dma_requests)
- return NULL;
-
- chan = dma_get_any_slave_channel(&d->slave);
- if (!chan) {
- dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
- return NULL;
- }
- c = to_zx_chan(chan);
- c->id = request;
- dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
- c->id, &c->vc);
- return chan;
-}
-
-static int zx_dma_probe(struct platform_device *op)
-{
- struct zx_dma_dev *d;
- int i, ret = 0;
-
- d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->base = devm_platform_ioremap_resource(op, 0);
- if (IS_ERR(d->base))
- return PTR_ERR(d->base);
-
- of_property_read_u32((&op->dev)->of_node,
- "dma-channels", &d->dma_channels);
- of_property_read_u32((&op->dev)->of_node,
- "dma-requests", &d->dma_requests);
- if (!d->dma_requests || !d->dma_channels)
- return -EINVAL;
-
- d->clk = devm_clk_get(&op->dev, NULL);
- if (IS_ERR(d->clk)) {
- dev_err(&op->dev, "no dma clk\n");
- return PTR_ERR(d->clk);
- }
-
- d->irq = platform_get_irq(op, 0);
- ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
- 0, DRIVER_NAME, d);
- if (ret)
- return ret;
-
- /* A DMA memory pool for LLIs, align on 32-byte boundary */
- d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
- LLI_BLOCK_SIZE, 32, 0);
- if (!d->pool)
- return -ENOMEM;
-
- /* init phy channel */
- d->phy = devm_kcalloc(&op->dev,
- d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL);
- if (!d->phy)
- return -ENOMEM;
-
- for (i = 0; i < d->dma_channels; i++) {
- struct zx_dma_phy *p = &d->phy[i];
-
- p->idx = i;
- p->base = d->base + i * 0x40;
- }
-
- INIT_LIST_HEAD(&d->slave.channels);
- dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
- dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
- dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
- d->slave.dev = &op->dev;
- d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
- d->slave.device_tx_status = zx_dma_tx_status;
- d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
- d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
- d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
- d->slave.device_issue_pending = zx_dma_issue_pending;
- d->slave.device_config = zx_dma_config;
- d->slave.device_terminate_all = zx_dma_terminate_all;
- d->slave.device_pause = zx_dma_transfer_pause;
- d->slave.device_resume = zx_dma_transfer_resume;
- d->slave.copy_align = DMA_ALIGN;
- d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
- d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
- d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
- | BIT(DMA_DEV_TO_MEM);
- d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
-
- /* init virtual channel */
- d->chans = devm_kcalloc(&op->dev,
- d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL);
- if (!d->chans)
- return -ENOMEM;
-
- for (i = 0; i < d->dma_requests; i++) {
- struct zx_dma_chan *c = &d->chans[i];
-
- c->status = DMA_IN_PROGRESS;
- INIT_LIST_HEAD(&c->node);
- c->vc.desc_free = zx_dma_free_desc;
- vchan_init(&c->vc, &d->slave);
- }
-
- /* Enable clock before accessing registers */
- ret = clk_prepare_enable(d->clk);
- if (ret < 0) {
- dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
- goto zx_dma_out;
- }
-
- zx_dma_init_state(d);
-
- spin_lock_init(&d->lock);
- INIT_LIST_HEAD(&d->chan_pending);
- platform_set_drvdata(op, d);
-
- ret = dma_async_device_register(&d->slave);
- if (ret)
- goto clk_dis;
-
- ret = of_dma_controller_register((&op->dev)->of_node,
- zx_of_dma_simple_xlate, d);
- if (ret)
- goto of_dma_register_fail;
-
- dev_info(&op->dev, "initialized\n");
- return 0;
-
-of_dma_register_fail:
- dma_async_device_unregister(&d->slave);
-clk_dis:
- clk_disable_unprepare(d->clk);
-zx_dma_out:
- return ret;
-}
-
-static int zx_dma_remove(struct platform_device *op)
-{
- struct zx_dma_chan *c, *cn;
- struct zx_dma_dev *d = platform_get_drvdata(op);
-
- /* explictly free the irq */
- devm_free_irq(&op->dev, d->irq, d);
-
- dma_async_device_unregister(&d->slave);
- of_dma_controller_free((&op->dev)->of_node);
-
- list_for_each_entry_safe(c, cn, &d->slave.channels,
- vc.chan.device_node) {
- list_del(&c->vc.chan.device_node);
- }
- clk_disable_unprepare(d->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int zx_dma_suspend_dev(struct device *dev)
-{
- struct zx_dma_dev *d = dev_get_drvdata(dev);
- u32 stat = 0;
-
- stat = zx_dma_get_chan_stat(d);
- if (stat) {
- dev_warn(d->slave.dev,
- "chan %d is running fail to suspend\n", stat);
- return -1;
- }
- clk_disable_unprepare(d->clk);
- return 0;
-}
-
-static int zx_dma_resume_dev(struct device *dev)
-{
- struct zx_dma_dev *d = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = clk_prepare_enable(d->clk);
- if (ret < 0) {
- dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
- return ret;
- }
- zx_dma_init_state(d);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
-
-static struct platform_driver zx_pdma_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .pm = &zx_dma_pmops,
- .of_match_table = zx6702_dma_dt_ids,
- },
- .probe = zx_dma_probe,
- .remove = zx_dma_remove,
-};
-
-module_platform_driver(zx_pdma_driver);
-
-MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
-MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 81c42664f21b..27d0c4cdc58d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -81,14 +81,13 @@ config EDAC_AMD64
Support for error detection and correction of DRAM ECC errors on
the AMD64 families (>= K8) of memory controllers.
-config EDAC_AMD64_ERROR_INJECTION
- bool "Sysfs HW Error injection facilities"
- depends on EDAC_AMD64
- help
- Recent Opterons (Family 10h and later) provide for Memory Error
- Injection into the ECC detection circuits. The amd64_edac module
- allows the operator/user to inject Uncorrectable and Correctable
- errors into DRAM.
+ When EDAC_DEBUG is enabled, hardware error injection facilities
+ through sysfs are available:
+
+ AMD CPUs up to and excluding family 0x17 provide for Memory
+ Error Injection into the ECC detection circuits. The amd64_edac
+ module allows the operator/user to inject Uncorrectable and
+ Correctable errors into DRAM.
When enabled, in each of the respective memory controller directories
(/sys/devices/system/edac/mc/mcX), there are 3 input files:
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 464d3d8d850a..2d1641a27a28 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -44,12 +44,7 @@ obj-$(CONFIG_EDAC_IE31200) += ie31200_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
-
-amd64_edac_mod-y := amd64_edac.o
-amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
-amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
-
-obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
+obj-$(CONFIG_EDAC_AMD64) += amd64_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f7087ddddb90..9fa4dfc6ebee 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -500,8 +500,8 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
* complete 32-bit values despite the fact that the bitfields in the DHAR
* only represent bits 31-24 of the base and offset values.
*/
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size)
+static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size)
{
struct amd64_pvt *pvt = mci->pvt_info;
@@ -554,7 +554,292 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 0;
}
-EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+#ifdef CONFIG_EDAC_DEBUG
+#define EDAC_DCT_ATTR_SHOW(reg) \
+static ssize_t reg##_show(struct device *dev, \
+ struct device_attribute *mattr, char *data) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct amd64_pvt *pvt = mci->pvt_info; \
+ \
+ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
+}
+
+EDAC_DCT_ATTR_SHOW(dhar);
+EDAC_DCT_ATTR_SHOW(dbam0);
+EDAC_DCT_ATTR_SHOW(top_mem);
+EDAC_DCT_ATTR_SHOW(top_mem2);
+
+static ssize_t hole_show(struct device *dev, struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+
+ u64 hole_base = 0;
+ u64 hole_offset = 0;
+ u64 hole_size = 0;
+
+ get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+ return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+ hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
+static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
+static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
+static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
+static DEVICE_ATTR(dram_hole, S_IRUGO, hole_show, NULL);
+
+static struct attribute *dbg_attrs[] = {
+ &dev_attr_dhar.attr,
+ &dev_attr_dbam.attr,
+ &dev_attr_topmem.attr,
+ &dev_attr_topmem2.attr,
+ &dev_attr_dram_hole.attr,
+ NULL
+};
+
+static const struct attribute_group dbg_group = {
+ .attrs = dbg_attrs,
+};
+
+static ssize_t inject_section_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t inject_section_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 3) {
+ amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.section = (u32) value;
+ return count;
+}
+
+static ssize_t inject_word_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t inject_word_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 8) {
+ amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.word = (u32) value;
+ return count;
+}
+
+static ssize_t inject_ecc_vector_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t inject_ecc_vector_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 16, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & 0xFFFF0000) {
+ amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.bit_map = (u32) value;
+ return count;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t inject_read_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t inject_write_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 section, word_bits, tmp;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
+
+ pr_notice_once("Don't forget to decrease MCE polling interval in\n"
+ "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
+ "so that you can get the error report faster.\n");
+
+ on_each_cpu(disable_caches, NULL, 1);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ retry:
+ /* wait until injection happens */
+ amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
+ if (tmp & F10_NB_ARR_ECC_WR_REQ) {
+ cpu_relax();
+ goto retry;
+ }
+
+ on_each_cpu(enable_caches, NULL, 1);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+
+static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
+ inject_section_show, inject_section_store);
+static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
+ inject_word_show, inject_word_store);
+static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
+ inject_ecc_vector_show, inject_ecc_vector_store);
+static DEVICE_ATTR(inject_write, S_IWUSR,
+ NULL, inject_write_store);
+static DEVICE_ATTR(inject_read, S_IWUSR,
+ NULL, inject_read_store);
+
+static struct attribute *inj_attrs[] = {
+ &dev_attr_inject_section.attr,
+ &dev_attr_inject_word.attr,
+ &dev_attr_inject_ecc_vector.attr,
+ &dev_attr_inject_write.attr,
+ &dev_attr_inject_read.attr,
+ NULL
+};
+
+static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ /* Families which have that injection hw */
+ if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group inj_group = {
+ .attrs = inj_attrs,
+ .is_visible = inj_is_visible,
+};
+#endif /* CONFIG_EDAC_DEBUG */
/*
* Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
@@ -593,8 +878,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
dram_base = get_dram_base(pvt, pvt->mc_node_id);
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
+ ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
if (!ret) {
if ((sys_addr >= (1ULL << 32)) &&
(sys_addr < ((1ULL << 32) + hole_size))) {
@@ -2665,7 +2949,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
if (pvt->umc) {
pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F0) {
- amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
+ edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
return -ENODEV;
}
@@ -2674,7 +2958,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
pci_dev_put(pvt->F0);
pvt->F0 = NULL;
- amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
@@ -2691,7 +2975,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
- amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
+ edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
return -ENODEV;
}
@@ -2701,7 +2985,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
@@ -3244,8 +3528,7 @@ static bool ecc_enabled(struct amd64_pvt *pvt)
MSR_IA32_MCG_CTL, nid);
}
- amd64_info("Node %d: DRAM ECC %s.\n",
- nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en)
return false;
@@ -3342,10 +3625,13 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F15_M60H_CPUS];
pvt->ops = &family_types[F15_M60H_CPUS].ops;
break;
+ /* Richland is only client */
+ } else if (pvt->model == 0x13) {
+ return NULL;
+ } else {
+ fam_type = &family_types[F15_CPUS];
+ pvt->ops = &family_types[F15_CPUS].ops;
}
-
- fam_type = &family_types[F15_CPUS];
- pvt->ops = &family_types[F15_CPUS].ops;
break;
case 0x16:
@@ -3402,20 +3688,13 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
return NULL;
}
- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (pvt->fam == 0xf ?
- (pvt->ext_model >= K8_REV_F ? "revF or later "
- : "revE or earlier ")
- : ""), pvt->mc_node_id);
return fam_type;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
- &amd64_edac_dbg_group,
-#endif
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
- &amd64_edac_inj_group,
+ &dbg_group,
+ &inj_group,
#endif
NULL
};
@@ -3539,6 +3818,7 @@ static int probe_one_instance(unsigned int nid)
pvt->mc_node_id = nid;
pvt->F3 = F3;
+ ret = -ENODEV;
fam_type = per_family_init(pvt);
if (!fam_type)
goto err_enable;
@@ -3579,6 +3859,12 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
+ (pvt->fam == 0xf ?
+ (pvt->ext_model >= K8_REV_F ? "revF or later "
+ : "revE or earlier ")
+ : ""), pvt->mc_node_id);
+
dump_misc_regs(pvt);
return ret;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 52b5d03eeba0..85aa820bc165 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -462,14 +462,6 @@ struct ecc_settings {
} flags;
};
-#ifdef CONFIG_EDAC_DEBUG
-extern const struct attribute_group amd64_edac_dbg_group;
-#endif
-
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
-extern const struct attribute_group amd64_edac_inj_group;
-#endif
-
/*
* Each of the PCI Device IDs types have their own set of hardware accessor
* functions and per device encoding/decoding logic.
@@ -501,9 +493,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
#define amd64_write_pci_cfg(pdev, offset, val) \
__amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size);
-
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/* Injection helpers */
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
deleted file mode 100644
index 393be3351493..000000000000
--- a/drivers/edac/amd64_edac_dbg.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "amd64_edac.h"
-
-#define EDAC_DCT_ATTR_SHOW(reg) \
-static ssize_t amd64_##reg##_show(struct device *dev, \
- struct device_attribute *mattr, \
- char *data) \
-{ \
- struct mem_ctl_info *mci = to_mci(dev); \
- struct amd64_pvt *pvt = mci->pvt_info; \
- return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
-}
-
-EDAC_DCT_ATTR_SHOW(dhar);
-EDAC_DCT_ATTR_SHOW(dbam0);
-EDAC_DCT_ATTR_SHOW(top_mem);
-EDAC_DCT_ATTR_SHOW(top_mem2);
-
-static ssize_t amd64_hole_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
-
- u64 hole_base = 0;
- u64 hole_offset = 0;
- u64 hole_size = 0;
-
- amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
-
- return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
- hole_size);
-}
-
-/*
- * update NUM_DBG_ATTRS in case you add new members
- */
-static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL);
-static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL);
-static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL);
-static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL);
-static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL);
-
-static struct attribute *amd64_edac_dbg_attrs[] = {
- &dev_attr_dhar.attr,
- &dev_attr_dbam.attr,
- &dev_attr_topmem.attr,
- &dev_attr_topmem2.attr,
- &dev_attr_dram_hole.attr,
- NULL
-};
-
-const struct attribute_group amd64_edac_dbg_group = {
- .attrs = amd64_edac_dbg_attrs,
-};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
deleted file mode 100644
index d96d6116f0fb..000000000000
--- a/drivers/edac/amd64_edac_inj.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "amd64_edac.h"
-
-static ssize_t amd64_inject_section_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.section);
-}
-
-/*
- * store error injection section value which refers to one of 4 16-byte sections
- * within a 64-byte cacheline
- *
- * range: 0..3
- */
-static ssize_t amd64_inject_section_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- if (value > 3) {
- amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.section = (u32) value;
- return count;
-}
-
-static ssize_t amd64_inject_word_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.word);
-}
-
-/*
- * store error injection word value which refers to one of 9 16-bit word of the
- * 16-byte (128-bit + ECC bits) section
- *
- * range: 0..8
- */
-static ssize_t amd64_inject_word_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- if (value > 8) {
- amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.word = (u32) value;
- return count;
-}
-
-static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
-}
-
-/*
- * store 16 bit error injection vector which enables injecting errors to the
- * corresponding bit within the error injection word above. When used during a
- * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
- */
-static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 16, &value);
- if (ret < 0)
- return ret;
-
- if (value & 0xFFFF0000) {
- amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.bit_map = (u32) value;
- return count;
-}
-
-/*
- * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
- * fields needed by the injection registers and read the NB Array Data Port.
- */
-static ssize_t amd64_inject_read_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- u32 section, word_bits;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
-
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
-}
-
-/*
- * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
- * fields needed by the injection registers.
- */
-static ssize_t amd64_inject_write_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- u32 section, word_bits, tmp;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
-
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
-
- pr_notice_once("Don't forget to decrease MCE polling interval in\n"
- "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
- "so that you can get the error report faster.\n");
-
- on_each_cpu(disable_caches, NULL, 1);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- retry:
- /* wait until injection happens */
- amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
- if (tmp & F10_NB_ARR_ECC_WR_REQ) {
- cpu_relax();
- goto retry;
- }
-
- on_each_cpu(enable_caches, NULL, 1);
-
- edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
-}
-
-/*
- * update NUM_INJ_ATTRS in case you add new members
- */
-
-static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
- amd64_inject_section_show, amd64_inject_section_store);
-static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
- amd64_inject_word_show, amd64_inject_word_store);
-static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
- amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IWUSR,
- NULL, amd64_inject_write_store);
-static DEVICE_ATTR(inject_read, S_IWUSR,
- NULL, amd64_inject_read_store);
-
-static struct attribute *amd64_edac_inj_attrs[] = {
- &dev_attr_inject_section.attr,
- &dev_attr_inject_word.attr,
- &dev_attr_inject_ecc_vector.attr,
- &dev_attr_inject_write.attr,
- &dev_attr_inject_read.attr,
- NULL
-};
-
-static umode_t amd64_edac_inj_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- struct amd64_pvt *pvt = mci->pvt_info;
-
- if (pvt->fam < 0x10)
- return 0;
- return attr->mode;
-}
-
-const struct attribute_group amd64_edac_inj_group = {
- .attrs = amd64_edac_inj_attrs,
- .is_visible = amd64_edac_inj_is_visible,
-};
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 677095769182..6793f6d799e7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1058,7 +1058,7 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
/* Initialize strings */
mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->ctl_name = ppc4xx_edac_match->compatible,
+ mci->ctl_name = ppc4xx_edac_match->compatible;
mci->dev_name = np->full_name;
/* Initialize callbacks */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 1d2c27a00a4a..2ccd1db5e98f 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1916,7 +1916,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
int i;
for (i = 0; i < 3; i++) {
- irq = platform_get_irq(pdev, i);
+ irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
rc = -EINVAL;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 80db43a22069..68216988391f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -192,7 +192,9 @@ static int fw_unit_remove(struct device *dev)
struct fw_driver *driver =
container_of(dev->driver, struct fw_driver, driver);
- return driver->remove(fw_unit(dev)), 0;
+ driver->remove(fw_unit(dev));
+
+ return 0;
}
static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 5392e1fc6b4e..cacdf1589b10 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -848,8 +848,6 @@ static int scmi_remove(struct platform_device *pdev)
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
- scmi_notification_exit(&info->handle);
-
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
@@ -860,6 +858,8 @@ static int scmi_remove(struct platform_device *pdev)
if (ret)
return ret;
+ scmi_notification_exit(&info->handle);
+
/* Safe to free channels since no more users */
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 82a82a5dc86a..fcbe2677f84b 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -9,9 +9,11 @@
#include <linux/arm-smccc.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include "common.h"
@@ -23,6 +25,8 @@
* @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area
* @func_id: smc/hvc call function id
+ * @irq: Optional; employed when platforms indicates msg completion by intr.
+ * @tx_complete: Optional, employed only when irq is valid.
*/
struct scmi_smc {
@@ -30,8 +34,19 @@ struct scmi_smc {
struct scmi_shared_mem __iomem *shmem;
struct mutex shmem_lock;
u32 func_id;
+ int irq;
+ struct completion tx_complete;
};
+static irqreturn_t smc_msg_done_isr(int irq, void *data)
+{
+ struct scmi_smc *scmi_info = data;
+
+ complete(&scmi_info->tx_complete);
+
+ return IRQ_HANDLED;
+}
+
static bool smc_chan_available(struct device *dev, int idx)
{
struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
@@ -51,7 +66,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct resource res;
struct device_node *np;
u32 func_id;
- int ret;
+ int ret, irq;
if (!tx)
return -ENODEV;
@@ -79,6 +94,24 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (ret < 0)
return ret;
+ /*
+ * If there is an interrupt named "a2p", then the service and
+ * completion of a message is signaled by an interrupt rather than by
+ * the return of the SMC call.
+ */
+ irq = of_irq_get_byname(cdev->of_node, "a2p");
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, smc_msg_done_isr,
+ IRQF_NO_SUSPEND,
+ dev_name(dev), scmi_info);
+ if (ret) {
+ dev_err(dev, "failed to setup SCMI smc irq\n");
+ return ret;
+ }
+ init_completion(&scmi_info->tx_complete);
+ scmi_info->irq = irq;
+ }
+
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
mutex_init(&scmi_info->shmem_lock);
@@ -110,7 +143,14 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
shmem_tx_prepare(scmi_info->shmem, xfer);
+ if (scmi_info->irq)
+ reinit_completion(&scmi_info->tx_complete);
+
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (scmi_info->irq)
+ wait_for_completion(&scmi_info->tx_complete);
+
scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
mutex_unlock(&scmi_info->shmem_lock);
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 34f53d898acb..e1926483ae2f 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -3,8 +3,9 @@
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
@@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
- entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
- entry_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 8a94388e38b3..c23466e05e60 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -38,6 +38,8 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable LTO
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 22ece1ad68a8..b69d63143e0d 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -61,10 +61,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- efi_info("EFI_RNG_PROTOCOL unavailable, KASLR will be disabled\n");
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
efi_nokaslr = true;
} else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed (0x%lx), KASLR will be disabled\n",
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
status);
efi_nokaslr = true;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b50a6c67d9bd..cde0a2ef507d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -672,7 +672,7 @@ typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
union efi_tcg2_protocol {
struct {
void *get_capability;
- efi_status_t (__efiapi *get_event_log)(efi_handle_t,
+ efi_status_t (__efiapi *get_event_log)(efi_tcg2_protocol_t *,
efi_tcg2_event_log_format,
efi_physical_addr_t *,
efi_physical_addr_t *,
@@ -849,4 +849,13 @@ void efi_handle_post_ebs_state(void);
enum efi_secureboot_mode efi_get_secureboot(void);
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+void efi_enable_reset_attack_mitigation(void);
+#else
+static inline void
+efi_enable_reset_attack_mitigation(void) { }
+#endif
+
+void efi_retrieve_tpm2_eventlog(void);
+
#endif
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 0205987a4fd4..dc83ea118c67 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -46,14 +46,13 @@ static int coreboot_bus_probe(struct device *dev)
static int coreboot_bus_remove(struct device *dev)
{
- int ret = 0;
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(dev->driver);
if (driver->remove)
- ret = driver->remove(device);
+ driver->remove(device);
- return ret;
+ return 0;
}
static struct bus_type coreboot_bus_type = {
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 7b7b4a6eedda..beb778674acd 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -72,7 +72,7 @@ struct coreboot_device {
/* A driver for handling devices described in coreboot tables. */
struct coreboot_driver {
int (*probe)(struct coreboot_device *);
- int (*remove)(struct coreboot_device *);
+ void (*remove)(struct coreboot_device *);
struct device_driver drv;
u32 tag;
};
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 916f26adc595..c6dcc1ef93ac 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -72,13 +72,11 @@ static int framebuffer_probe(struct coreboot_device *dev)
return PTR_ERR_OR_ZERO(pdev);
}
-static int framebuffer_remove(struct coreboot_device *dev)
+static void framebuffer_remove(struct coreboot_device *dev)
{
struct platform_device *pdev = dev_get_drvdata(&dev->dev);
platform_device_unregister(pdev);
-
- return 0;
}
static struct coreboot_driver framebuffer_driver = {
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index d17e4d6ac9bc..74b5286518ee 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -91,11 +91,9 @@ static int memconsole_probe(struct coreboot_device *dev)
return memconsole_sysfs_init();
}
-static int memconsole_remove(struct coreboot_device *dev)
+static void memconsole_remove(struct coreboot_device *dev)
{
memconsole_exit();
-
- return 0;
}
static struct coreboot_driver memconsole_driver = {
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index d23c5c69ab52..ee6e08c0592b 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -298,14 +298,12 @@ static int vpd_probe(struct coreboot_device *dev)
return 0;
}
-static int vpd_remove(struct coreboot_device *dev)
+static void vpd_remove(struct coreboot_device *dev)
{
vpd_section_destroy(&ro_vpd);
vpd_section_destroy(&rw_vpd);
kobject_put(vpd_kobj);
-
- return 0;
}
static struct coreboot_driver vpd_driver = {
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 1d2e5b85d7ca..c027d99f2a59 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -13,6 +13,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 7be48c1bec96..f57779fc7ee9 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -965,8 +965,11 @@ EXPORT_SYMBOL(qcom_scm_ice_available);
* qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
* @index: the keyslot to invalidate
*
- * The UFSHCI standard defines a standard way to do this, but it doesn't work on
- * these SoCs; only this SCM call does.
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
+ *
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
@@ -995,10 +998,13 @@ EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
* units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
*
* Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
- * can then be used to encrypt/decrypt UFS I/O requests inline.
+ * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
+ *
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
*
- * The UFSHCI standard defines a standard way to do this, but it doesn't work on
- * these SoCs; only this SCM call does.
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 00c88b809c0c..d52bfc5ed5e4 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -5,16 +5,22 @@
#define pr_fmt(fmt) "smccc: " fmt
+#include <linux/cache.h>
#include <linux/init.h>
#include <linux/arm-smccc.h>
+#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+bool __ro_after_init smccc_trng_available = false;
+
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
{
smccc_version = version;
smccc_conduit = conduit;
+
+ smccc_trng_available = smccc_probe_trng();
}
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 5645226ca3ce..5ff9438b7b46 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -192,6 +192,17 @@ config FPGA_DFL_AFU
to the FPGA infrastructure via a Port. There may be more than one
Port/AFU per DFL based FPGA device.
+config FPGA_DFL_NIOS_INTEL_PAC_N3000
+ tristate "FPGA DFL NIOS Driver for Intel PAC N3000"
+ depends on FPGA_DFL
+ select REGMAP
+ help
+ This is the driver for the N3000 Nios private feature on Intel
+ PAC (Programmable Acceleration Card) N3000. It communicates
+ with the embedded Nios processor to configure the retimers on
+ the card. It also instantiates the SPI master (spi-altera) for
+ the card's BMC (Board Management Controller).
+
config FPGA_DFL_PCI
tristate "FPGA DFL PCIe Device Driver"
depends on PCI && FPGA_DFL
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index d8e21dfc6778..18dc9885883a 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -44,5 +44,7 @@ dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
+obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o
+
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 531266287eee..4299145ef347 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -192,7 +192,7 @@ static struct attribute *fme_perf_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group fme_perf_cpumask_group = {
+static const struct attribute_group fme_perf_cpumask_group = {
.attrs = fme_perf_cpumask_attrs,
};
@@ -225,7 +225,7 @@ static struct attribute *fme_perf_format_attrs[] = {
NULL,
};
-static struct attribute_group fme_perf_format_group = {
+static const struct attribute_group fme_perf_format_group = {
.name = "format",
.attrs = fme_perf_format_attrs,
};
@@ -239,7 +239,7 @@ static struct attribute *fme_perf_events_attrs_empty[] = {
NULL,
};
-static struct attribute_group fme_perf_events_group = {
+static const struct attribute_group fme_perf_events_group = {
.name = "events",
.attrs = fme_perf_events_attrs_empty,
};
diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c
new file mode 100644
index 000000000000..7a95366f6516
--- /dev/null
+++ b/drivers/fpga/dfl-n3000-nios.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFL device driver for Nios private feature on Intel PAC (Programmable
+ * Acceleration Card) N3000
+ *
+ * Copyright (C) 2019-2020 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stddef.h>
+#include <linux/spi/altera.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/*
+ * N3000 Nios private feature registers, named as NIOS_SPI_XX on spec.
+ * NS is the abbreviation of NIOS_SPI.
+ */
+#define N3000_NS_PARAM 0x8
+#define N3000_NS_PARAM_SHIFT_MODE_MSK BIT_ULL(1)
+#define N3000_NS_PARAM_SHIFT_MODE_MSB 0
+#define N3000_NS_PARAM_SHIFT_MODE_LSB 1
+#define N3000_NS_PARAM_DATA_WIDTH GENMASK_ULL(7, 2)
+#define N3000_NS_PARAM_NUM_CS GENMASK_ULL(13, 8)
+#define N3000_NS_PARAM_CLK_POL BIT_ULL(14)
+#define N3000_NS_PARAM_CLK_PHASE BIT_ULL(15)
+#define N3000_NS_PARAM_PERIPHERAL_ID GENMASK_ULL(47, 32)
+
+#define N3000_NS_CTRL 0x10
+#define N3000_NS_CTRL_WR_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_CTRL_ADDR GENMASK_ULL(44, 32)
+#define N3000_NS_CTRL_CMD_MSK GENMASK_ULL(63, 62)
+#define N3000_NS_CTRL_CMD_NOP 0
+#define N3000_NS_CTRL_CMD_RD 1
+#define N3000_NS_CTRL_CMD_WR 2
+
+#define N3000_NS_STAT 0x18
+#define N3000_NS_STAT_RD_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_STAT_RW_VAL BIT_ULL(32)
+
+/* Nios handshake registers, indirect access */
+#define N3000_NIOS_INIT 0x1000
+#define N3000_NIOS_INIT_DONE BIT(0)
+#define N3000_NIOS_INIT_START BIT(1)
+/* Mode for retimer A, link 0, the same below */
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK GENMASK(9, 8)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK GENMASK(11, 10)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK GENMASK(13, 12)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK GENMASK(15, 14)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK GENMASK(17, 16)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK GENMASK(19, 18)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK GENMASK(21, 20)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK GENMASK(23, 22)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO 0x0
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR 0x1
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS 0x2
+
+#define N3000_NIOS_FW_VERSION 0x1004
+#define N3000_NIOS_FW_VERSION_PATCH GENMASK(23, 20)
+#define N3000_NIOS_FW_VERSION_MINOR GENMASK(27, 24)
+#define N3000_NIOS_FW_VERSION_MAJOR GENMASK(31, 28)
+
+/* The retimers we use on Intel PAC N3000 is Parkvale, abbreviated to PKVL */
+#define N3000_NIOS_PKVL_A_MODE_STS 0x1020
+#define N3000_NIOS_PKVL_B_MODE_STS 0x1024
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_MSK GENMASK(15, 8)
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_OK 0x0
+#define N3000_NIOS_PKVL_MODE_STS_ID_MSK GENMASK(7, 0)
+/* When GROUP MASK field == GROUP_OK */
+#define N3000_NIOS_PKVL_MODE_ID_RESET 0x0
+#define N3000_NIOS_PKVL_MODE_ID_4X10G 0x1
+#define N3000_NIOS_PKVL_MODE_ID_4X25G 0x2
+#define N3000_NIOS_PKVL_MODE_ID_2X25G 0x3
+#define N3000_NIOS_PKVL_MODE_ID_2X25G_2X10G 0x4
+#define N3000_NIOS_PKVL_MODE_ID_1X25G 0x5
+
+#define N3000_NIOS_REGBUS_RETRY_COUNT 10000 /* loop count */
+
+#define N3000_NIOS_INIT_TIMEOUT 10000000 /* usec */
+#define N3000_NIOS_INIT_TIME_INTV 100000 /* usec */
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL \
+ (N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK)
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS))
+
+struct n3000_nios {
+ void __iomem *base;
+ struct regmap *regmap;
+ struct device *dev;
+ struct platform_device *altera_spi;
+};
+
+static ssize_t nios_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%x.%x.%x\n",
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MINOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_PATCH, val));
+}
+static DEVICE_ATTR_RO(nios_fw_version);
+
+#define IS_MODE_STATUS_OK(mode_stat) \
+ (FIELD_GET(N3000_NIOS_PKVL_MODE_STS_GROUP_MSK, (mode_stat)) == \
+ N3000_NIOS_PKVL_MODE_STS_GROUP_OK)
+
+#define IS_RETIMER_FEC_SUPPORTED(retimer_mode) \
+ ((retimer_mode) != N3000_NIOS_PKVL_MODE_ID_RESET && \
+ (retimer_mode) != N3000_NIOS_PKVL_MODE_ID_4X10G)
+
+static int get_retimer_mode(struct n3000_nios *nn, unsigned int mode_stat_reg,
+ unsigned int *retimer_mode)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, mode_stat_reg, &val);
+ if (ret)
+ return ret;
+
+ if (!IS_MODE_STATUS_OK(val))
+ return -EFAULT;
+
+ *retimer_mode = FIELD_GET(N3000_NIOS_PKVL_MODE_STS_ID_MSK, val);
+
+ return 0;
+}
+
+static ssize_t retimer_A_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_A_mode);
+
+static ssize_t retimer_B_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_B_mode);
+
+static ssize_t fec_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int val, retimer_a_mode, retimer_b_mode, fec_modes;
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ int ret;
+
+ /* FEC mode setting is not supported in early FW versions */
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) < 3)
+ return sysfs_emit(buf, "not supported\n");
+
+ /* If no 25G links, FEC mode setting is not supported either */
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &retimer_a_mode);
+ if (ret)
+ return ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &retimer_b_mode);
+ if (ret)
+ return ret;
+
+ if (!IS_RETIMER_FEC_SUPPORTED(retimer_a_mode) &&
+ !IS_RETIMER_FEC_SUPPORTED(retimer_b_mode))
+ return sysfs_emit(buf, "not supported\n");
+
+ /* get the valid FEC mode for 25G links */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * FEC mode should always be the same for all links, as we set them
+ * in this way.
+ */
+ fec_modes = (val & N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL);
+ if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL)
+ return sysfs_emit(buf, "no\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL)
+ return sysfs_emit(buf, "kr\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL)
+ return sysfs_emit(buf, "rs\n");
+
+ return -EFAULT;
+}
+static DEVICE_ATTR_RO(fec_mode);
+
+static struct attribute *n3000_nios_attrs[] = {
+ &dev_attr_nios_fw_version.attr,
+ &dev_attr_retimer_A_mode.attr,
+ &dev_attr_retimer_B_mode.attr,
+ &dev_attr_fec_mode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(n3000_nios);
+
+static int n3000_nios_init_done_check(struct n3000_nios *nn)
+{
+ unsigned int val, state_a, state_b;
+ struct device *dev = nn->dev;
+ int ret, ret2;
+
+ /*
+ * The SPI is shared by the Nios core inside the FPGA, Nios will use
+ * this SPI master to do some one time initialization after power up,
+ * and then release the control to OS. The driver needs to poll on
+ * INIT_DONE to see when driver could take the control.
+ *
+ * Please note that after Nios firmware version 3.0.0, INIT_START is
+ * introduced, so driver needs to trigger START firstly and then check
+ * INIT_DONE.
+ */
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If Nios version register is totally uninitialized(== 0x0), then the
+ * Nios firmware is missing. So host could take control of SPI master
+ * safely, but initialization work for Nios is not done. To restore the
+ * card, we need to reprogram a new Nios firmware via the BMC chip on
+ * SPI bus. So the driver doesn't error out, it continues to create the
+ * spi controller device and spi_board_info for BMC.
+ */
+ if (val == 0) {
+ dev_err(dev, "Nios version reg = 0x%x, skip INIT_DONE check, but the retimer may be uninitialized\n",
+ val);
+ return 0;
+ }
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) >= 3) {
+ /* read NIOS_INIT to check if retimer initialization is done */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /* check if retimers are initialized already */
+ if (val & (N3000_NIOS_INIT_DONE | N3000_NIOS_INIT_START))
+ goto nios_init_done;
+
+ /* configure FEC mode per module param */
+ val = N3000_NIOS_INIT_START;
+
+ /*
+ * When the retimer is to be set to 10G mode, there is no FEC
+ * mode setting, so the REQ_FEC_MODE field will be ignored by
+ * Nios firmware in this case. But we should still fill the FEC
+ * mode field cause host could not get the retimer working mode
+ * until the Nios init is done.
+ *
+ * For now the driver doesn't support the retimer FEC mode
+ * switching per user's request. It is always set to Reed
+ * Solomon FEC.
+ *
+ * The driver will set the same FEC mode for all links.
+ */
+ val |= N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL;
+
+ ret = regmap_write(nn->regmap, N3000_NIOS_INIT, val);
+ if (ret)
+ return ret;
+ }
+
+nios_init_done:
+ /* polls on NIOS_INIT_DONE */
+ ret = regmap_read_poll_timeout(nn->regmap, N3000_NIOS_INIT, val,
+ val & N3000_NIOS_INIT_DONE,
+ N3000_NIOS_INIT_TIME_INTV,
+ N3000_NIOS_INIT_TIMEOUT);
+ if (ret)
+ dev_err(dev, "NIOS_INIT_DONE %s\n",
+ (ret == -ETIMEDOUT) ? "timed out" : "check error");
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_A_MODE_STS, &state_a);
+ if (ret2)
+ return ret2;
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_B_MODE_STS, &state_b);
+ if (ret2)
+ return ret2;
+
+ if (!ret) {
+ /*
+ * After INIT_DONE is detected, it still needs to check if the
+ * Nios firmware reports any error during the retimer
+ * configuration.
+ */
+ if (IS_MODE_STATUS_OK(state_a) && IS_MODE_STATUS_OK(state_b))
+ return 0;
+
+ /*
+ * If the retimer configuration is failed, the Nios firmware
+ * will still release the spi controller for host to
+ * communicate with the BMC. It makes possible for people to
+ * reprogram a new Nios firmware and restore the card. So the
+ * driver doesn't error out, it continues to create the spi
+ * controller device and spi_board_info for BMC.
+ */
+ dev_err(dev, "NIOS_INIT_DONE OK, but err on retimer init\n");
+ }
+
+ dev_err(nn->dev, "PKVL_A_MODE_STS 0x%x\n", state_a);
+ dev_err(nn->dev, "PKVL_B_MODE_STS 0x%x\n", state_b);
+
+ return ret;
+}
+
+static struct spi_board_info m10_n3000_info = {
+ .modalias = "m10-n3000",
+ .max_speed_hz = 12500000,
+ .bus_num = 0,
+ .chip_select = 0,
+};
+
+static int create_altera_spi_controller(struct n3000_nios *nn)
+{
+ struct altera_spi_platform_data pdata = { 0 };
+ struct platform_device_info pdevinfo = { 0 };
+ void __iomem *base = nn->base;
+ u64 v;
+
+ v = readq(base + N3000_NS_PARAM);
+
+ pdata.mode_bits = SPI_CS_HIGH;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_POL, v))
+ pdata.mode_bits |= SPI_CPOL;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_PHASE, v))
+ pdata.mode_bits |= SPI_CPHA;
+
+ pdata.num_chipselect = FIELD_GET(N3000_NS_PARAM_NUM_CS, v);
+ pdata.bits_per_word_mask =
+ SPI_BPW_RANGE_MASK(1, FIELD_GET(N3000_NS_PARAM_DATA_WIDTH, v));
+
+ pdata.num_devices = 1;
+ pdata.devices = &m10_n3000_info;
+
+ dev_dbg(nn->dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
+ pdata.num_chipselect, pdata.bits_per_word_mask,
+ pdata.mode_bits);
+
+ pdevinfo.name = "subdev_spi_altera";
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.parent = nn->dev;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+
+ nn->altera_spi = platform_device_register_full(&pdevinfo);
+ return PTR_ERR_OR_ZERO(nn->altera_spi);
+}
+
+static void destroy_altera_spi_controller(struct n3000_nios *nn)
+{
+ platform_device_unregister(nn->altera_spi);
+}
+
+static int n3000_nios_poll_stat_timeout(void __iomem *base, u64 *v)
+{
+ int loops;
+
+ /*
+ * We don't use the time based timeout here for performance.
+ *
+ * The regbus read/write is on the critical path of Intel PAC N3000
+ * image programing. The time based timeout checking will add too much
+ * overhead on it. Usually the state changes in 1 or 2 loops on the
+ * test server, and we set 10000 times loop here for safety.
+ */
+ for (loops = N3000_NIOS_REGBUS_RETRY_COUNT; loops > 0 ; loops--) {
+ *v = readq(base + N3000_NS_STAT);
+ if (*v & N3000_NS_STAT_RW_VAL)
+ break;
+ cpu_relax();
+ }
+
+ return (loops > 0) ? 0 : -ETIMEDOUT;
+}
+
+static int n3000_nios_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_WR) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg) |
+ FIELD_PREP(N3000_NS_CTRL_WR_DATA, val);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to write reg 0x%x val 0x%x: %d\n",
+ reg, val, ret);
+
+ return ret;
+}
+
+static int n3000_nios_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_RD) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to read reg 0x%x: %d\n", reg, ret);
+ else
+ *val = FIELD_GET(N3000_NS_STAT_RD_DATA, v);
+
+ return ret;
+}
+
+static const struct regmap_config n3000_nios_regbus_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+
+ .reg_write = n3000_nios_reg_write,
+ .reg_read = n3000_nios_reg_read,
+};
+
+static int n3000_nios_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct n3000_nios *nn;
+ int ret;
+
+ nn = devm_kzalloc(dev, sizeof(*nn), GFP_KERNEL);
+ if (!nn)
+ return -ENOMEM;
+
+ dev_set_drvdata(&ddev->dev, nn);
+
+ nn->dev = dev;
+
+ nn->base = devm_ioremap_resource(&ddev->dev, &ddev->mmio_res);
+ if (IS_ERR(nn->base))
+ return PTR_ERR(nn->base);
+
+ nn->regmap = devm_regmap_init(dev, NULL, nn, &n3000_nios_regbus_cfg);
+ if (IS_ERR(nn->regmap))
+ return PTR_ERR(nn->regmap);
+
+ ret = n3000_nios_init_done_check(nn);
+ if (ret)
+ return ret;
+
+ ret = create_altera_spi_controller(nn);
+ if (ret)
+ dev_err(dev, "altera spi controller create failed: %d\n", ret);
+
+ return ret;
+}
+
+static void n3000_nios_remove(struct dfl_device *ddev)
+{
+ struct n3000_nios *nn = dev_get_drvdata(&ddev->dev);
+
+ destroy_altera_spi_controller(nn);
+}
+
+#define FME_FEATURE_ID_N3000_NIOS 0xd
+
+static const struct dfl_device_id n3000_nios_ids[] = {
+ { FME_ID, FME_FEATURE_ID_N3000_NIOS },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, n3000_nios_ids);
+
+static struct dfl_driver n3000_nios_driver = {
+ .drv = {
+ .name = "dfl-n3000-nios",
+ .dev_groups = n3000_nios_groups,
+ },
+ .id_table = n3000_nios_ids,
+ .probe = n3000_nios_probe,
+ .remove = n3000_nios_remove,
+};
+
+module_dfl_driver(n3000_nios_driver);
+
+MODULE_DESCRIPTION("Driver for Nios private feature on Intel PAC N3000");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index a2203d03c9e2..04e47e266f26 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -27,6 +27,14 @@
#define DRV_VERSION "0.8"
#define DRV_NAME "dfl-pci"
+#define PCI_VSEC_ID_INTEL_DFLS 0x43
+
+#define PCI_VNDR_DFLS_CNT 0x8
+#define PCI_VNDR_DFLS_RES 0xc
+
+#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
+#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
+
struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
@@ -119,49 +127,94 @@ static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
return table;
}
-/* enumerate feature devices under pci device */
-static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
{
- struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
- int port_num, bar, i, nvec, ret = 0;
- struct dfl_fpga_enum_info *info;
- struct dfl_fpga_cdev *cdev;
+ u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
+ int dfl_res_off, i, bars, voff = 0;
resource_size_t start, len;
- void __iomem *base;
- int *irq_table;
- u32 offset;
- u64 v;
- /* allocate enumeration info via pci_dev */
- info = dfl_fpga_enum_info_alloc(&pcidev->dev);
- if (!info)
- return -ENOMEM;
+ while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
+ vndr_hdr = 0;
+ pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
- /* add irq info for enumeration if the device support irq */
- nvec = cci_pci_alloc_irq(pcidev);
- if (nvec < 0) {
- dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
- ret = nvec;
- goto enum_info_free_exit;
- } else if (nvec) {
- irq_table = cci_pci_create_irq_table(pcidev, nvec);
- if (!irq_table) {
- ret = -ENOMEM;
- goto irq_free_exit;
+ if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
+ pcidev->vendor == PCI_VENDOR_ID_INTEL)
+ break;
+ }
+
+ if (!voff) {
+ dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
+ return -ENODEV;
+ }
+
+ dfl_cnt = 0;
+ pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
+ if (dfl_cnt > PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
+ __func__, dfl_cnt, PCI_STD_NUM_BARS);
+ return -EINVAL;
+ }
+
+ dfl_res_off = voff + PCI_VNDR_DFLS_RES;
+ if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
+ dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
+ dfl_res = GENMASK(31, 0);
+ pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
+
+ bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
+ if (bir >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s bad bir number %d\n",
+ __func__, bir);
+ return -EINVAL;
}
- ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
- kfree(irq_table);
- if (ret)
- goto irq_free_exit;
+ if (bars & BIT(bir)) {
+ dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
+ __func__, bir);
+ return -EINVAL;
+ }
+
+ bars |= BIT(bir);
+
+ len = pci_resource_len(pcidev, bir);
+ offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
+ if (offset >= len) {
+ dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
+ __func__, offset, &len);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
+
+ len -= offset;
+
+ start = pci_resource_start(pcidev, bir) + offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
}
- /* start to find Device Feature List in Bar 0 */
+ return 0;
+}
+
+/* default method of finding dfls starting at offset 0 of bar 0 */
+static int find_dfls_by_default(struct pci_dev *pcidev,
+ struct dfl_fpga_enum_info *info)
+{
+ int port_num, bar, i, ret = 0;
+ resource_size_t start, len;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* start to find Device Feature List from Bar 0 */
base = cci_pci_ioremap_bar0(pcidev);
- if (!base) {
- ret = -ENOMEM;
- goto irq_free_exit;
- }
+ if (!base)
+ return -ENOMEM;
/*
* PF device has FME and Ports/AFUs, and VF device only has one
@@ -208,12 +261,54 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
dfl_fpga_enum_info_add_dfl(info, start, len);
} else {
ret = -ENODEV;
- goto irq_free_exit;
}
/* release I/O mappings for next step enumeration */
pcim_iounmap_regions(pcidev, BIT(0));
+ return ret;
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ int nvec, ret = 0;
+ int *irq_table;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* add irq info for enumeration if the device support irq */
+ nvec = cci_pci_alloc_irq(pcidev);
+ if (nvec < 0) {
+ dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+ ret = nvec;
+ goto enum_info_free_exit;
+ } else if (nvec) {
+ irq_table = cci_pci_create_irq_table(pcidev, nvec);
+ if (!irq_table) {
+ ret = -ENOMEM;
+ goto irq_free_exit;
+ }
+
+ ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+ kfree(irq_table);
+ if (ret)
+ goto irq_free_exit;
+ }
+
+ ret = find_dfls_by_vsec(pcidev, info);
+ if (ret == -ENODEV)
+ ret = find_dfls_by_default(pcidev, info);
+
+ if (ret)
+ goto irq_free_exit;
+
/* start enumeration with prepared enumeration information */
cdev = dfl_fpga_feature_devs_enumerate(info);
if (IS_ERR(cdev)) {
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b450870b75ed..511b20ff35a3 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -10,6 +10,7 @@
* Wu Hao <hao.wu@intel.com>
* Xiao Guangrong <guangrong.xiao@linux.intel.com>
*/
+#include <linux/dfl.h>
#include <linux/fpga-dfl.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -298,8 +299,7 @@ static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct dfl_device *ddev = to_dfl_dev(dev);
- /* The type has 4 valid bits and feature_id has 12 valid bits */
- return add_uevent_var(env, "MODALIAS=dfl:t%01Xf%03X",
+ return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
ddev->type, ddev->feature_id);
}
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5dc758f655b7..2b82c96ba56c 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
@@ -516,88 +517,4 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg);
-/**
- * enum dfl_id_type - define the DFL FIU types
- */
-enum dfl_id_type {
- FME_ID,
- PORT_ID,
- DFL_ID_MAX,
-};
-
-/**
- * struct dfl_device_id - dfl device identifier
- * @type: contains 4 bits DFL FIU type of the device. See enum dfl_id_type.
- * @feature_id: contains 12 bits feature identifier local to its DFL FIU type.
- * @driver_data: driver specific data.
- */
-struct dfl_device_id {
- u8 type;
- u16 feature_id;
- unsigned long driver_data;
-};
-
-/**
- * struct dfl_device - represent an dfl device on dfl bus
- *
- * @dev: generic device interface.
- * @id: id of the dfl device.
- * @type: type of DFL FIU of the device. See enum dfl_id_type.
- * @feature_id: 16 bits feature identifier local to its DFL FIU type.
- * @mmio_res: mmio resource of this dfl device.
- * @irqs: list of Linux IRQ numbers of this dfl device.
- * @num_irqs: number of IRQs supported by this dfl device.
- * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
- * @id_entry: matched id entry in dfl driver's id table.
- */
-struct dfl_device {
- struct device dev;
- int id;
- u8 type;
- u16 feature_id;
- struct resource mmio_res;
- int *irqs;
- unsigned int num_irqs;
- struct dfl_fpga_cdev *cdev;
- const struct dfl_device_id *id_entry;
-};
-
-/**
- * struct dfl_driver - represent an dfl device driver
- *
- * @drv: driver model structure.
- * @id_table: pointer to table of device IDs the driver is interested in.
- * { } member terminated.
- * @probe: mandatory callback for device binding.
- * @remove: callback for device unbinding.
- */
-struct dfl_driver {
- struct device_driver drv;
- const struct dfl_device_id *id_table;
-
- int (*probe)(struct dfl_device *dfl_dev);
- void (*remove)(struct dfl_device *dfl_dev);
-};
-
-#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
-#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
-
-/*
- * use a macro to avoid include chaining to get THIS_MODULE.
- */
-#define dfl_driver_register(drv) \
- __dfl_driver_register(drv, THIS_MODULE)
-int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
-void dfl_driver_unregister(struct dfl_driver *dfl_drv);
-
-/*
- * module_dfl_driver() - Helper macro for drivers that don't do
- * anything special in module init/exit. This eliminates a lot of
- * boilerplate. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit().
- */
-#define module_dfl_driver(__dfl_driver) \
- module_driver(__dfl_driver, dfl_driver_register, \
- dfl_driver_unregister)
-
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 2deccacc3aa7..e9266b2a357f 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -17,7 +17,7 @@ static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
-static spinlock_t bridge_list_lock;
+static DEFINE_SPINLOCK(bridge_list_lock);
/**
* fpga_bridge_enable - Enable transactions on the bridge
@@ -479,8 +479,6 @@ static void fpga_bridge_dev_release(struct device *dev)
static int __init fpga_bridge_dev_init(void)
{
- spin_lock_init(&bridge_list_lock);
-
fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
if (IS_ERR(fpga_bridge_class))
return PTR_ERR(fpga_bridge_class);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c70f46e80a3b..e3607ec4c2e8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -441,8 +441,9 @@ config GPIO_MXC
select GENERIC_IRQ_CHIP
config GPIO_MXS
- def_bool y
+ bool "Freescale MXS GPIO support" if COMPILE_TEST
depends on ARCH_MXS || COMPILE_TEST
+ default y if ARCH_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -486,11 +487,11 @@ config GPIO_PXA
Say yes here to support the PXA GPIO device
config GPIO_RCAR
- tristate "Renesas R-Car GPIO"
+ tristate "Renesas R-Car and RZ/G GPIO support"
depends on ARCH_RENESAS || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
- Say yes here to support GPIO on Renesas R-Car SoCs.
+ Say yes here to support GPIO on Renesas R-Car or RZ/G SoCs.
config GPIO_RDA
bool "RDA Micro GPIO controller support"
@@ -521,7 +522,8 @@ config GPIO_SAMA5D2_PIOBU
config GPIO_SIFIVE
bool "SiFive GPIO support"
- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select REGMAP_MMIO
@@ -593,10 +595,12 @@ config GPIO_TB10X
select OF_GPIO
config GPIO_TEGRA
- bool "NVIDIA Tegra GPIO support"
+ tristate "NVIDIA Tegra GPIO support"
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
@@ -644,6 +648,16 @@ config GPIO_VF610
help
Say yes here to support Vybrid vf610 GPIOs.
+config GPIO_VISCONTI
+ tristate "Toshiba Visconti GPIO support"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support GPIO on Tohisba Visconti.
+
config GPIO_VR41XX
tristate "NEC VR4100 series General-purpose I/O Uint support"
depends on CPU_VR41XX
@@ -666,7 +680,7 @@ config GPIO_WCD934X
tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
depends on MFD_WCD934X && OF_GPIO
help
- This driver is to supprot GPIO block found on the Qualcomm Technologies
+ This driver is to support GPIO block found on the Qualcomm Technologies
Inc WCD9340/WCD9341 Audio Codec.
config GPIO_XGENE
@@ -690,6 +704,8 @@ config GPIO_XGENE_SB
config GPIO_XILINX
tristate "Xilinx GPIO support"
+ select GPIOLIB_IRQCHIP
+ depends on OF_GPIO
help
Say yes here to support the Xilinx FPGA GPIO device
@@ -727,13 +743,6 @@ config GPIO_ZYNQ
help
Say yes here to support Xilinx Zynq GPIO controller.
-config GPIO_ZX
- bool "ZTE ZX GPIO support"
- depends on ARCH_ZX || COMPILE_TEST
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support the GPIO device on ZTE ZX SoCs.
-
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
depends on MACH_LOONGSON32
@@ -1249,13 +1258,6 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
-config GPIO_MSIC
- bool "Intel MSIC mixed signal gpio support"
- depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
- help
- Enable support for GPIO on intel MSIC controllers found in
- intel MID devices
-
config GPIO_PALMAS
bool "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1451,13 +1453,6 @@ config GPIO_BT8XX
If unsure, say N.
-config GPIO_INTEL_MID
- bool "Intel MID GPIO support"
- depends on X86_INTEL_MID
- select GPIOLIB_IRQCHIP
- help
- Say Y here to support Intel MID GPIO.
-
config GPIO_MERRIFIELD
tristate "Intel Merrifield GPIO support"
depends on X86_INTEL_MID
@@ -1633,8 +1628,7 @@ config GPIO_MOCKUP
select IRQ_SIM
help
This enables GPIO Testing driver, which provides a way to test GPIO
- subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
- must be selected for this test.
+ subsystem through sysfs (or char device) and debugfs.
User could use it through the script in
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 35e3b6026665..c58a90a3c3b1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,7 +67,6 @@ obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
-obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
@@ -103,7 +102,6 @@ obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
-obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
@@ -164,6 +162,7 @@ obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
+obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
@@ -180,5 +179,4 @@ obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
obj-$(CONFIG_GPIO_XRA1403) += gpio-xra1403.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
-obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 0229fa79499e..b8b1473a5b1e 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -101,7 +101,7 @@ for a few GPIOs. Those should stay where they are.
At the same time it makes sense to get rid of code duplication in existing or
new coming drivers. For example, gpio-ml-ioh should be incorporated into
-gpio-pch. In similar way gpio-intel-mid into gpio-pxa.
+gpio-pch.
Generic MMIO GPIO
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index dfd8a4876a27..08171431bb8f 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -62,34 +62,6 @@ static char *get_arg(char **args)
return start;
}
-static bool isrange(const char *s)
-{
- size_t n;
-
- if (IS_ERR_OR_NULL(s))
- return false;
-
- while (1) {
- n = strspn(s, "0123456789");
- if (!n)
- return false;
-
- s += n;
-
- switch (*s++) {
- case '\0':
- return true;
-
- case '-':
- case ',':
- break;
-
- default:
- return false;
- }
- }
-}
-
static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
int hwnum, unsigned int *n)
{
@@ -100,8 +72,7 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
if (!lookups)
return -ENOMEM;
- lookups->table[*n] =
- (struct gpiod_lookup)GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+ lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
(*n)++;
memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
@@ -112,10 +83,10 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
static int aggr_parse(struct gpio_aggregator *aggr)
{
+ char *name, *offsets, *p;
char *args = aggr->args;
unsigned long *bitmap;
unsigned int i, n = 0;
- char *name, *offsets;
int error = 0;
bitmap = bitmap_alloc(ARCH_NR_GPIOS, GFP_KERNEL);
@@ -130,7 +101,8 @@ static int aggr_parse(struct gpio_aggregator *aggr)
goto free_bitmap;
}
- if (!isrange(offsets)) {
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
/* Named GPIO line */
error = aggr_add_gpio(aggr, name, U16_MAX, &n);
if (error)
@@ -271,7 +243,7 @@ static DRIVER_ATTR_WO(delete_device);
static struct attribute *gpio_aggregator_attrs[] = {
&driver_attr_new_device.attr,
&driver_attr_delete_device.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(gpio_aggregator);
@@ -545,7 +517,7 @@ static const struct of_device_id gpio_aggregator_dt_ids[] = {
* Add GPIO-operated devices controlled from userspace below,
* or use "driver_override" in sysfs
*/
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
#endif
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 45b3da8da336..397a50d6bc65 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -12,7 +12,8 @@
#define GPIO_OUT_REG(offset) (BD70528_REG_GPIO1_OUT + (offset) * 2)
struct bd70528_gpio {
- struct rohm_regmap_dev chip;
+ struct regmap *regmap;
+ struct device *dev;
struct gpio_chip gpio;
};
@@ -35,11 +36,11 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
val = BD70528_DEBOUNCE_50MS;
break;
default:
- dev_err(bdgpio->chip.dev,
+ dev_err(bdgpio->dev,
"Invalid debounce value %u\n", debounce);
return -EINVAL;
}
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_IN_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_IN_REG(offset),
BD70528_DEBOUNCE_MASK, val);
}
@@ -49,9 +50,9 @@ static int bd70528_get_direction(struct gpio_chip *chip, unsigned int offset)
int val, ret;
/* Do we need to do something to IRQs here? */
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset), &val);
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
if (ret) {
- dev_err(bdgpio->chip.dev, "Could not read gpio direction\n");
+ dev_err(bdgpio->dev, "Could not read gpio direction\n");
return ret;
}
if (val & BD70528_GPIO_OUT_EN_MASK)
@@ -67,13 +68,13 @@ static int bd70528_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD70528_GPIO_DRIVE_MASK,
BD70528_GPIO_OPEN_DRAIN);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD70528_GPIO_DRIVE_MASK,
BD70528_GPIO_PUSH_PULL);
@@ -93,7 +94,7 @@ static int bd70528_direction_input(struct gpio_chip *chip, unsigned int offset)
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
/* Do we need to do something to IRQs here? */
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_EN_MASK,
BD70528_GPIO_OUT_DISABLE);
}
@@ -105,10 +106,10 @@ static void bd70528_gpio_set(struct gpio_chip *chip, unsigned int offset,
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
u8 val = (value) ? BD70528_GPIO_OUT_HI : BD70528_GPIO_OUT_LO;
- ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_MASK, val);
if (ret)
- dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+ dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
}
static int bd70528_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -117,7 +118,7 @@ static int bd70528_direction_output(struct gpio_chip *chip, unsigned int offset,
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
bd70528_gpio_set(chip, offset, value);
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_EN_MASK,
BD70528_GPIO_OUT_ENABLE);
}
@@ -129,11 +130,11 @@ static int bd70528_gpio_get_o(struct bd70528_gpio *bdgpio, unsigned int offset)
int ret;
unsigned int val;
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset), &val);
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
if (!ret)
ret = !!(val & BD70528_GPIO_OUT_MASK);
else
- dev_err(bdgpio->chip.dev, "GPIO (out) state read failed\n");
+ dev_err(bdgpio->dev, "GPIO (out) state read failed\n");
return ret;
}
@@ -143,12 +144,12 @@ static int bd70528_gpio_get_i(struct bd70528_gpio *bdgpio, unsigned int offset)
unsigned int val;
int ret;
- ret = regmap_read(bdgpio->chip.regmap, BD70528_REG_GPIO_STATE, &val);
+ ret = regmap_read(bdgpio->regmap, BD70528_REG_GPIO_STATE, &val);
if (!ret)
ret = !(val & GPIO_IN_STATE_MASK(offset));
else
- dev_err(bdgpio->chip.dev, "GPIO (in) state read failed\n");
+ dev_err(bdgpio->dev, "GPIO (in) state read failed\n");
return ret;
}
@@ -173,29 +174,22 @@ static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
else if (ret == GPIO_LINE_DIRECTION_IN)
ret = bd70528_gpio_get_i(bdgpio, offset);
else
- dev_err(bdgpio->chip.dev, "failed to read GPIO direction\n");
+ dev_err(bdgpio->dev, "failed to read GPIO direction\n");
return ret;
}
static int bd70528_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct bd70528_gpio *bdgpio;
- struct rohm_regmap_dev *bd70528;
int ret;
- bd70528 = dev_get_drvdata(pdev->dev.parent);
- if (!bd70528) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
- GFP_KERNEL);
+ bdgpio = devm_kzalloc(dev, sizeof(*bdgpio), GFP_KERNEL);
if (!bdgpio)
return -ENOMEM;
- bdgpio->chip.dev = &pdev->dev;
- bdgpio->gpio.parent = pdev->dev.parent;
+ bdgpio->dev = dev;
+ bdgpio->gpio.parent = dev->parent;
bdgpio->gpio.label = "bd70528-gpio";
bdgpio->gpio.owner = THIS_MODULE;
bdgpio->gpio.get_direction = bd70528_get_direction;
@@ -208,14 +202,15 @@ static int bd70528_probe(struct platform_device *pdev)
bdgpio->gpio.ngpio = 4;
bdgpio->gpio.base = -1;
#ifdef CONFIG_OF_GPIO
- bdgpio->gpio.of_node = pdev->dev.parent->of_node;
+ bdgpio->gpio.of_node = dev->parent->of_node;
#endif
- bdgpio->chip.regmap = bd70528->regmap;
+ bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!bdgpio->regmap)
+ return -ENODEV;
- ret = devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
- bdgpio);
+ ret = devm_gpiochip_add_data(dev, &bdgpio->gpio, bdgpio);
if (ret)
- dev_err(&pdev->dev, "gpio_init: Failed to add bd70528-gpio\n");
+ dev_err(dev, "gpio_init: Failed to add bd70528-gpio\n");
return ret;
}
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 3dbbc638e9a9..c8e382b53f2f 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -11,7 +11,8 @@
#define HALL_GPIO_OFFSET 3
struct bd71828_gpio {
- struct rohm_regmap_dev chip;
+ struct regmap *regmap;
+ struct device *dev;
struct gpio_chip gpio;
};
@@ -29,10 +30,10 @@ static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
if (offset == HALL_GPIO_OFFSET)
return;
- ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD71828_GPIO_OUT_MASK, val);
if (ret)
- dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+ dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
}
static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -42,10 +43,10 @@ static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
if (offset == HALL_GPIO_OFFSET)
- ret = regmap_read(bdgpio->chip.regmap, BD71828_REG_IO_STAT,
+ ret = regmap_read(bdgpio->regmap, BD71828_REG_IO_STAT,
&val);
else
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset),
&val);
if (!ret)
ret = (val & BD71828_GPIO_OUT_MASK);
@@ -63,12 +64,12 @@ static int bd71828_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD71828_GPIO_DRIVE_MASK,
BD71828_GPIO_OPEN_DRAIN);
case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD71828_GPIO_DRIVE_MASK,
BD71828_GPIO_PUSH_PULL);
@@ -96,22 +97,15 @@ static int bd71828_get_direction(struct gpio_chip *chip, unsigned int offset)
static int bd71828_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct bd71828_gpio *bdgpio;
- struct rohm_regmap_dev *bd71828;
- bd71828 = dev_get_drvdata(pdev->dev.parent);
- if (!bd71828) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
- GFP_KERNEL);
+ bdgpio = devm_kzalloc(dev, sizeof(*bdgpio), GFP_KERNEL);
if (!bdgpio)
return -ENOMEM;
- bdgpio->chip.dev = &pdev->dev;
- bdgpio->gpio.parent = pdev->dev.parent;
+ bdgpio->dev = dev;
+ bdgpio->gpio.parent = dev->parent;
bdgpio->gpio.label = "bd71828-gpio";
bdgpio->gpio.owner = THIS_MODULE;
bdgpio->gpio.get_direction = bd71828_get_direction;
@@ -127,11 +121,12 @@ static int bd71828_probe(struct platform_device *pdev)
* "gpio-reserved-ranges" and exclude them from control
*/
bdgpio->gpio.ngpio = 4;
- bdgpio->gpio.of_node = pdev->dev.parent->of_node;
- bdgpio->chip.regmap = bd71828->regmap;
+ bdgpio->gpio.of_node = dev->parent->of_node;
+ bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!bdgpio->regmap)
+ return -ENODEV;
- return devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
- bdgpio);
+ return devm_gpiochip_add_data(dev, &bdgpio->gpio, bdgpio);
}
static struct platform_driver bd71828_gpio = {
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index c0abc9c6851b..df6102b57734 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -1,31 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * ROHM BD9571MWV-M GPIO driver
+ * ROHM BD9571MWV-M and BD9574MWF-M GPIO driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65086 driver
*
* NOTE: Interrupts are not supported yet.
*/
#include <linux/gpio/driver.h>
+#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/bd9571mwv.h>
struct bd9571mwv_gpio {
+ struct regmap *regmap;
struct gpio_chip chip;
- struct bd9571mwv *bd;
};
static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
@@ -34,7 +27,7 @@ static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
int ret, val;
- ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_DIR, &val);
+ ret = regmap_read(gpio->regmap, BD9571MWV_GPIO_DIR, &val);
if (ret < 0)
return ret;
if (val & BIT(offset))
@@ -48,8 +41,7 @@ static int bd9571mwv_gpio_direction_input(struct gpio_chip *chip,
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
- BIT(offset), 0);
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_DIR, BIT(offset), 0);
return 0;
}
@@ -60,9 +52,9 @@ static int bd9571mwv_gpio_direction_output(struct gpio_chip *chip,
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
/* Set the initial value */
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
BIT(offset), value ? BIT(offset) : 0);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_DIR,
BIT(offset), BIT(offset));
return 0;
@@ -73,7 +65,7 @@ static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
int ret, val;
- ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_IN, &val);
+ ret = regmap_read(gpio->regmap, BD9571MWV_GPIO_IN, &val);
if (ret < 0)
return ret;
@@ -85,7 +77,7 @@ static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
BIT(offset), value ? BIT(offset) : 0);
}
@@ -113,9 +105,9 @@ static int bd9571mwv_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio);
- gpio->bd = dev_get_drvdata(pdev->dev.parent);
+ gpio->regmap = dev_get_regmap(pdev->dev.parent, NULL);
gpio->chip = template_chip;
- gpio->chip.parent = gpio->bd->dev;
+ gpio->chip.parent = pdev->dev.parent;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (ret < 0) {
@@ -127,7 +119,8 @@ static int bd9571mwv_gpio_probe(struct platform_device *pdev)
}
static const struct platform_device_id bd9571mwv_gpio_id_table[] = {
- { "bd9571mwv-gpio", },
+ { "bd9571mwv-gpio", ROHM_CHIP_TYPE_BD9571 },
+ { "bd9574mwf-gpio", ROHM_CHIP_TYPE_BD9574 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, bd9571mwv_gpio_id_table);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 226da8df6f10..ef148b26b587 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -25,83 +25,89 @@
/* Maximum value for gpio line identifiers */
#define EP93XX_GPIO_LINE_MAX 63
+/* Number of GPIO chips in EP93XX */
+#define EP93XX_GPIO_CHIP_NUM 8
+
/* Maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ 23
+#define EP93XX_GPIO_A_IRQ_BASE 64
+#define EP93XX_GPIO_B_IRQ_BASE 72
/*
* Static mapping of GPIO bank F IRQS:
* F0..F7 (16..24) to irq 80..87.
*/
#define EP93XX_GPIO_F_IRQ_BASE 80
-struct ep93xx_gpio {
- void __iomem *base;
- struct gpio_chip gc[8];
+struct ep93xx_gpio_irq_chip {
+ struct irq_chip ic;
+ u8 irq_offset;
+ u8 int_unmasked;
+ u8 int_enabled;
+ u8 int_type1;
+ u8 int_type2;
+ u8 int_debounce;
};
-/*************************************************************************
- * Interrupt handling for EP93xx on-chip GPIOs
- *************************************************************************/
-static unsigned char gpio_int_unmasked[3];
-static unsigned char gpio_int_enabled[3];
-static unsigned char gpio_int_type1[3];
-static unsigned char gpio_int_type2[3];
-static unsigned char gpio_int_debounce[3];
-
-/* Port ordering is: A B F */
-static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
-static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
-static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
-static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
-static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
-
-static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
-{
- BUG_ON(port > 2);
+struct ep93xx_gpio_chip {
+ struct gpio_chip gc;
+ struct ep93xx_gpio_irq_chip *eic;
+};
- writeb_relaxed(0, epg->base + int_en_register_offset[port]);
+struct ep93xx_gpio {
+ void __iomem *base;
+ struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
+};
- writeb_relaxed(gpio_int_type2[port],
- epg->base + int_type2_register_offset[port]);
+#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
- writeb_relaxed(gpio_int_type1[port],
- epg->base + int_type1_register_offset[port]);
+static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
+{
+ struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
- writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
- epg->base + int_en_register_offset[port]);
+ return egc->eic;
}
-static int ep93xx_gpio_port(struct gpio_chip *gc)
+/*************************************************************************
+ * Interrupt handling for EP93xx on-chip GPIOs
+ *************************************************************************/
+#define EP93XX_INT_TYPE1_OFFSET 0x00
+#define EP93XX_INT_TYPE2_OFFSET 0x04
+#define EP93XX_INT_EOI_OFFSET 0x08
+#define EP93XX_INT_EN_OFFSET 0x0c
+#define EP93XX_INT_STATUS_OFFSET 0x10
+#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
+#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
+
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
+ struct ep93xx_gpio_irq_chip *eic)
{
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = 0;
+ writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
- while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
- port++;
+ writeb_relaxed(eic->int_type2,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
- /* This should not happen but is there as a last safeguard */
- if (port == ARRAY_SIZE(epg->gc)) {
- pr_crit("can't find the GPIO port\n");
- return 0;
- }
+ writeb_relaxed(eic->int_type1,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
- return port;
+ writeb_relaxed(eic->int_unmasked & eic->int_enabled,
+ epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
if (enable)
- gpio_int_debounce[port] |= port_mask;
+ eic->int_debounce |= port_mask;
else
- gpio_int_debounce[port] &= ~port_mask;
+ eic->int_debounce &= ~port_mask;
- writeb(gpio_int_debounce[port],
- epg->base + int_debounce_register_offset[port]);
+ writeb(eic->int_debounce,
+ epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
}
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
@@ -122,12 +128,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
*/
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
offset));
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
offset));
chained_irq_exit(irqchip, desc);
@@ -153,52 +159,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_type2 ^= port_mask; /* switch edge direction */
+ ep93xx_gpio_update_int_params(epg, eic);
}
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
+ eic->int_type2 ^= port_mask; /* switch edge direction */
- gpio_int_unmasked[port] &= ~port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~port_mask;
+ ep93xx_gpio_update_int_params(epg, eic);
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] |= BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked |= BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
/*
@@ -209,8 +215,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int offset = d->irq & 7;
int port_mask = BIT(offset);
irq_flow_handler_t handler;
@@ -219,32 +225,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
- gpio_int_type1[port] |= port_mask;
+ eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
if (gc->get(gc, offset))
- gpio_int_type2[port] &= ~port_mask; /* falling */
+ eic->int_type2 &= ~port_mask; /* falling */
else
- gpio_int_type2[port] |= port_mask; /* rising */
+ eic->int_type2 |= port_mask; /* rising */
handler = handle_edge_irq;
break;
default:
@@ -253,22 +259,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handler);
- gpio_int_enabled[port] |= port_mask;
+ eic->int_enabled |= port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ ep93xx_gpio_update_int_params(epg, eic);
return 0;
}
-static struct irq_chip ep93xx_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = ep93xx_gpio_irq_ack,
- .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
- .irq_mask = ep93xx_gpio_irq_mask,
- .irq_unmask = ep93xx_gpio_irq_unmask,
- .irq_set_type = ep93xx_gpio_irq_type,
-};
-
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
@@ -276,17 +273,19 @@ struct ep93xx_gpio_bank {
const char *label;
int data;
int dir;
+ int irq;
int base;
bool has_irq;
bool has_hierarchical_irq;
unsigned int irq_base;
};
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
+ .irq = _irq, \
.base = _base, \
.has_irq = _has_irq, \
.has_hierarchical_irq = _has_hier, \
@@ -295,16 +294,16 @@ struct ep93xx_gpio_bank {
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
/* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, EP93XX_GPIO_A_IRQ_BASE),
/* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, EP93XX_GPIO_B_IRQ_BASE),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
/* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, EP93XX_GPIO_F_IRQ_BASE),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
};
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@@ -321,18 +320,23 @@ static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
return 0;
}
-static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
+static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
{
- return EP93XX_GPIO_F_IRQ_BASE + offset;
+ ic->irq_ack = ep93xx_gpio_irq_ack;
+ ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
+ ic->irq_mask = ep93xx_gpio_irq_mask;
+ ic->irq_unmask = ep93xx_gpio_irq_unmask;
+ ic->irq_set_type = ep93xx_gpio_irq_type;
}
-static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
+static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank)
{
void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir;
+ struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
int err;
@@ -346,8 +350,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
+ struct irq_chip *ic;
+
gc->set_config = ep93xx_gpio_set_config;
- girq->chip = &ep93xx_gpio_irq_chip;
+ egc->eic = devm_kcalloc(dev, 1,
+ sizeof(*egc->eic),
+ GFP_KERNEL);
+ if (!egc->eic)
+ return -ENOMEM;
+ egc->eic->irq_offset = bank->irq;
+ ic = &egc->eic->ic;
+ ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
+ if (!ic->name)
+ return -ENOMEM;
+ ep93xx_init_irq_chip(dev, ic);
+ girq->chip = ic;
}
if (bank->has_irq) {
@@ -355,7 +372,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq->parent_handler = ep93xx_gpio_ab_irq_handler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, 1,
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
@@ -373,29 +390,28 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
/*
* FIXME: convert this to use hierarchical IRQ support!
- * this requires fixing the root irqchip to be hierarchial.
+ * this requires fixing the root irqchip to be hierarchical.
*/
girq->parent_handler = ep93xx_gpio_f_irq_handler;
girq->num_parents = 8;
- girq->parents = devm_kcalloc(dev, 8,
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
/* Pick resources 1..8 for these IRQs */
- for (i = 1; i <= 8; i++)
- girq->parents[i - 1] = platform_get_irq(pdev, i);
- for (i = 0; i < 8; i++) {
- gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
+ for (i = 0; i < girq->num_parents; i++) {
+ girq->parents[i] = platform_get_irq(pdev, i + 1);
+ gpio_irq = bank->irq_base + i;
irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq,
- &ep93xx_gpio_irq_chip,
+ girq->chip,
handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- gc->to_irq = ep93xx_gpio_f_to_irq;
+ girq->first = bank->irq_base;
}
return devm_gpiochip_add_data(dev, gc, epg);
@@ -415,7 +431,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *gc = &epg->gc[i];
+ struct ep93xx_gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
deleted file mode 100644
index 86a10c808ef6..000000000000
--- a/drivers/gpio/gpio-intel-mid.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel MID GPIO driver
- *
- * Copyright (c) 2008-2014,2016 Intel Corporation.
- */
-
-/* Supports:
- * Moorestown platform Langwell chip.
- * Medfield platform Penwell chip.
- * Clovertrail platform Cloverview chip.
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-
-#define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
-#define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
-
-/*
- * Langwell chip has 64 pins and thus there are 2 32bit registers to control
- * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
- * registers to control them, so we only define the order here instead of a
- * structure, to get a bit offset for a pin (use GPDR as an example):
- *
- * nreg = ngpio / 32;
- * reg = offset / 32;
- * bit = offset % 32;
- * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
- *
- * so the bit of reg_addr is to control pin offset's GPDR feature
-*/
-
-enum GPIO_REG {
- GPLR = 0, /* pin level read-only */
- GPDR, /* pin direction */
- GPSR, /* pin set */
- GPCR, /* pin clear */
- GRER, /* rising edge detect */
- GFER, /* falling edge detect */
- GEDR, /* edge detect result */
- GAFR, /* alt function */
-};
-
-/* intel_mid gpio driver data */
-struct intel_mid_gpio_ddata {
- u16 ngpio; /* number of gpio pins */
- u32 chip_irq_type; /* chip interrupt type */
-};
-
-struct intel_mid_gpio {
- struct gpio_chip chip;
- void __iomem *reg_base;
- spinlock_t lock;
- struct pci_dev *pdev;
-};
-
-static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 32;
-
- return priv->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 16;
-
- return priv->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
- u32 value = readl(gafr);
- int shift = (offset % 16) << 1, af = (value >> shift) & 3;
-
- if (af) {
- value &= ~(3 << shift);
- writel(value, gafr);
- }
- return 0;
-}
-
-static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gplr = gpio_reg(chip, offset, GPLR);
-
- return !!(readl(gplr) & BIT(offset % 32));
-}
-
-static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- void __iomem *gpsr, *gpcr;
-
- if (value) {
- gpsr = gpio_reg(chip, offset, GPSR);
- writel(BIT(offset % 32), gpsr);
- } else {
- gpcr = gpio_reg(chip, offset, GPCR);
- writel(BIT(offset % 32), gpcr);
- }
-}
-
-static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- u32 value;
- unsigned long flags;
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- value = readl(gpdr);
- value &= ~BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static int intel_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- unsigned long flags;
-
- intel_gpio_set(chip, offset, value);
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- value = readl(gpdr);
- value |= BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static int intel_mid_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_mid_gpio *priv = gpiochip_get_data(gc);
- u32 gpio = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
- void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
-
- if (gpio >= priv->chip.ngpio)
- return -EINVAL;
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (type & IRQ_TYPE_EDGE_RISING)
- value = readl(grer) | BIT(gpio % 32);
- else
- value = readl(grer) & (~BIT(gpio % 32));
- writel(value, grer);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = readl(gfer) | BIT(gpio % 32);
- else
- value = readl(gfer) & (~BIT(gpio % 32));
- writel(value, gfer);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static void intel_mid_irq_unmask(struct irq_data *d)
-{
-}
-
-static void intel_mid_irq_mask(struct irq_data *d)
-{
-}
-
-static struct irq_chip intel_mid_irqchip = {
- .name = "INTEL_MID-GPIO",
- .irq_mask = intel_mid_irq_mask,
- .irq_unmask = intel_mid_irq_unmask,
- .irq_set_type = intel_mid_irq_type,
-};
-
-static const struct intel_mid_gpio_ddata gpio_lincroft = {
- .ngpio = 64,
-};
-
-static const struct intel_mid_gpio_ddata gpio_penwell_aon = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct intel_mid_gpio_ddata gpio_penwell_core = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct intel_mid_gpio_ddata gpio_cloverview_aon = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE | INTEL_MID_IRQ_TYPE_LEVEL,
-};
-
-static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct pci_device_id intel_gpio_ids[] = {
- {
- /* Lincroft */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
- .driver_data = (kernel_ulong_t)&gpio_lincroft,
- },
- {
- /* Penwell AON */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
- .driver_data = (kernel_ulong_t)&gpio_penwell_aon,
- },
- {
- /* Penwell Core */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
- .driver_data = (kernel_ulong_t)&gpio_penwell_core,
- },
- {
- /* Cloverview Aon */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
- .driver_data = (kernel_ulong_t)&gpio_cloverview_aon,
- },
- {
- /* Cloverview Core */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
- .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
- },
- { }
-};
-
-static void intel_mid_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct intel_mid_gpio *priv = gpiochip_get_data(gc);
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- u32 base, gpio, mask;
- unsigned long pending;
- void __iomem *gedr;
-
- /* check GPIO controller to check which pin triggered the interrupt */
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- gedr = gpio_reg(&priv->chip, base, GEDR);
- while ((pending = readl(gedr))) {
- gpio = __ffs(pending);
- mask = BIT(gpio);
- /* Clear before handling so we can't lose an edge */
- writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(gc->irq.domain,
- base + gpio));
- }
- }
-
- chip->irq_eoi(data);
-}
-
-static int intel_mid_irq_init_hw(struct gpio_chip *chip)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *reg;
- unsigned base;
-
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- /* Clear the rising-edge detect register */
- reg = gpio_reg(&priv->chip, base, GRER);
- writel(0, reg);
- /* Clear the falling-edge detect register */
- reg = gpio_reg(&priv->chip, base, GFER);
- writel(0, reg);
- /* Clear the edge detect status register */
- reg = gpio_reg(&priv->chip, base, GEDR);
- writel(~0, reg);
- }
-
- return 0;
-}
-
-static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
-{
- int err = pm_schedule_suspend(dev, 500);
- return err ?: -EBUSY;
-}
-
-static const struct dev_pm_ops intel_gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, NULL, intel_gpio_runtime_idle)
-};
-
-static int intel_gpio_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- void __iomem *base;
- struct intel_mid_gpio *priv;
- u32 gpio_base;
- u32 irq_base;
- int retval;
- struct gpio_irq_chip *girq;
- struct intel_mid_gpio_ddata *ddata =
- (struct intel_mid_gpio_ddata *)id->driver_data;
-
- retval = pcim_enable_device(pdev);
- if (retval)
- return retval;
-
- retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
- if (retval) {
- dev_err(&pdev->dev, "I/O memory mapping error\n");
- return retval;
- }
-
- base = pcim_iomap_table(pdev)[1];
-
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
-
- /* release the IO mapping, since we already get the info from bar1 */
- pcim_iounmap_regions(pdev, 1 << 1);
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->reg_base = pcim_iomap_table(pdev)[0];
- priv->chip.label = dev_name(&pdev->dev);
- priv->chip.parent = &pdev->dev;
- priv->chip.request = intel_gpio_request;
- priv->chip.direction_input = intel_gpio_direction_input;
- priv->chip.direction_output = intel_gpio_direction_output;
- priv->chip.get = intel_gpio_get;
- priv->chip.set = intel_gpio_set;
- priv->chip.base = gpio_base;
- priv->chip.ngpio = ddata->ngpio;
- priv->chip.can_sleep = false;
- priv->pdev = pdev;
-
- spin_lock_init(&priv->lock);
-
- girq = &priv->chip.irq;
- girq->chip = &intel_mid_irqchip;
- girq->init_hw = intel_mid_irq_init_hw;
- girq->parent_handler = intel_mid_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = pdev->irq;
- girq->first = irq_base;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- pci_set_drvdata(pdev, priv);
-
- retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
- if (retval) {
- dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
- return retval;
- }
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static struct pci_driver intel_gpio_driver = {
- .name = "intel_mid_gpio",
- .id_table = intel_gpio_ids,
- .probe = intel_gpio_probe,
- .driver = {
- .pm = &intel_gpio_pm_ops,
- },
-};
-
-builtin_pci_driver(intel_gpio_driver);
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 7c0a9ef0b500..82b3a913005d 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -325,7 +325,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_edge_irq;
- girq->init_hw = max77620_gpio_irq_init_hw,
+ girq->init_hw = max77620_gpio_irq_init_hw;
girq->threaded = true;
platform_set_drvdata(pdev, mgpio);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 706687fab634..22f3ce218f5d 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -194,6 +194,11 @@ static int mrfld_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
{
u32 debounce;
+ if ((pinconf_to_config_param(config) == PIN_CONFIG_BIAS_DISABLE) ||
+ (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_UP) ||
+ (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_DOWN))
+ return gpiochip_generic_config(chip, offset, config);
+
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
deleted file mode 100644
index 7e3c96e4ab2c..000000000000
--- a/drivers/gpio/gpio-msic.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Medfield MSIC GPIO driver>
- * Copyright (c) 2011, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- * Based on intel_pmic_gpio.c
- */
-
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/* the offset for the mapping of global gpio pin to irq */
-#define MSIC_GPIO_IRQ_OFFSET 0x100
-
-#define MSIC_GPIO_DIR_IN 0
-#define MSIC_GPIO_DIR_OUT BIT(5)
-#define MSIC_GPIO_TRIG_FALL BIT(1)
-#define MSIC_GPIO_TRIG_RISE BIT(2)
-
-/* masks for msic gpio output GPIOxxxxCTLO registers */
-#define MSIC_GPIO_DIR_MASK BIT(5)
-#define MSIC_GPIO_DRV_MASK BIT(4)
-#define MSIC_GPIO_REN_MASK BIT(3)
-#define MSIC_GPIO_RVAL_MASK (BIT(2) | BIT(1))
-#define MSIC_GPIO_DOUT_MASK BIT(0)
-
-/* masks for msic gpio input GPIOxxxxCTLI registers */
-#define MSIC_GPIO_GLBYP_MASK BIT(5)
-#define MSIC_GPIO_DBNC_MASK (BIT(4) | BIT(3))
-#define MSIC_GPIO_INTCNT_MASK (BIT(2) | BIT(1))
-#define MSIC_GPIO_DIN_MASK BIT(0)
-
-#define MSIC_NUM_GPIO 24
-
-struct msic_gpio {
- struct platform_device *pdev;
- struct mutex buslock;
- struct gpio_chip chip;
- int irq;
- unsigned irq_base;
- unsigned long trig_change_mask;
- unsigned trig_type;
-};
-
-/*
- * MSIC has 24 gpios, 16 low voltage (1.2-1.8v) and 8 high voltage (3v).
- * Both the high and low voltage gpios are divided in two banks.
- * GPIOs are numbered with GPIO0LV0 as gpio_base in the following order:
- * GPIO0LV0..GPIO0LV7: low voltage, bank 0, gpio_base
- * GPIO1LV0..GPIO1LV7: low voltage, bank 1, gpio_base + 8
- * GPIO0HV0..GPIO0HV3: high voltage, bank 0, gpio_base + 16
- * GPIO1HV0..GPIO1HV3: high voltage, bank 1, gpio_base + 20
- */
-
-static int msic_gpio_to_ireg(unsigned offset)
-{
- if (offset >= MSIC_NUM_GPIO)
- return -EINVAL;
-
- if (offset < 8)
- return INTEL_MSIC_GPIO0LV0CTLI - offset;
- if (offset < 16)
- return INTEL_MSIC_GPIO1LV0CTLI - offset + 8;
- if (offset < 20)
- return INTEL_MSIC_GPIO0HV0CTLI - offset + 16;
-
- return INTEL_MSIC_GPIO1HV0CTLI - offset + 20;
-}
-
-static int msic_gpio_to_oreg(unsigned offset)
-{
- if (offset >= MSIC_NUM_GPIO)
- return -EINVAL;
-
- if (offset < 8)
- return INTEL_MSIC_GPIO0LV0CTLO - offset;
- if (offset < 16)
- return INTEL_MSIC_GPIO1LV0CTLO - offset + 8;
- if (offset < 20)
- return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
-
- return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
-}
-
-static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- int reg;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return reg;
-
- return intel_msic_reg_update(reg, MSIC_GPIO_DIR_IN, MSIC_GPIO_DIR_MASK);
-}
-
-static int msic_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- int reg;
- unsigned mask;
-
- value = (!!value) | MSIC_GPIO_DIR_OUT;
- mask = MSIC_GPIO_DIR_MASK | MSIC_GPIO_DOUT_MASK;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return reg;
-
- return intel_msic_reg_update(reg, value, mask);
-}
-
-static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- u8 r;
- int ret;
- int reg;
-
- reg = msic_gpio_to_ireg(offset);
- if (reg < 0)
- return reg;
-
- ret = intel_msic_reg_read(reg, &r);
- if (ret < 0)
- return ret;
-
- return !!(r & MSIC_GPIO_DIN_MASK);
-}
-
-static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- int reg;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return;
-
- intel_msic_reg_update(reg, !!value , MSIC_GPIO_DOUT_MASK);
-}
-
-/*
- * This is called from genirq with mg->buslock locked and
- * irq_desc->lock held. We can not access the scu bus here, so we
- * store the change and update in the bus_sync_unlock() function below
- */
-static int msic_irq_type(struct irq_data *data, unsigned type)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- u32 gpio = data->irq - mg->irq_base;
-
- if (gpio >= mg->chip.ngpio)
- return -EINVAL;
-
- /* mark for which gpio the trigger changed, protected by buslock */
- mg->trig_change_mask |= (1 << gpio);
- mg->trig_type = type;
-
- return 0;
-}
-
-static int msic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct msic_gpio *mg = gpiochip_get_data(chip);
- return mg->irq_base + offset;
-}
-
-static void msic_bus_lock(struct irq_data *data)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- mutex_lock(&mg->buslock);
-}
-
-static void msic_bus_sync_unlock(struct irq_data *data)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- int offset;
- int reg;
- u8 trig = 0;
-
- /* We can only get one change at a time as the buslock covers the
- entire transaction. The irq_desc->lock is dropped before we are
- called but that is fine */
- if (mg->trig_change_mask) {
- offset = __ffs(mg->trig_change_mask);
-
- reg = msic_gpio_to_ireg(offset);
- if (reg < 0)
- goto out;
-
- if (mg->trig_type & IRQ_TYPE_EDGE_RISING)
- trig |= MSIC_GPIO_TRIG_RISE;
- if (mg->trig_type & IRQ_TYPE_EDGE_FALLING)
- trig |= MSIC_GPIO_TRIG_FALL;
-
- intel_msic_reg_update(reg, trig, MSIC_GPIO_INTCNT_MASK);
- mg->trig_change_mask = 0;
- }
-out:
- mutex_unlock(&mg->buslock);
-}
-
-/* Firmware does all the masking and unmasking for us, no masking here. */
-static void msic_irq_unmask(struct irq_data *data) { }
-
-static void msic_irq_mask(struct irq_data *data) { }
-
-static struct irq_chip msic_irqchip = {
- .name = "MSIC-GPIO",
- .irq_mask = msic_irq_mask,
- .irq_unmask = msic_irq_unmask,
- .irq_set_type = msic_irq_type,
- .irq_bus_lock = msic_bus_lock,
- .irq_bus_sync_unlock = msic_bus_sync_unlock,
-};
-
-static void msic_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- struct intel_msic *msic = pdev_to_intel_msic(mg->pdev);
- unsigned long pending;
- int i;
- int bitnr;
- u8 pin;
-
- for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) {
- intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin);
- pending = pin;
-
- for_each_set_bit(bitnr, &pending, BITS_PER_BYTE)
- generic_handle_irq(mg->irq_base + i * BITS_PER_BYTE + bitnr);
- }
- chip->irq_eoi(data);
-}
-
-static int platform_msic_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct intel_msic_gpio_pdata *pdata = dev_get_platdata(dev);
- struct msic_gpio *mg;
- int irq = platform_get_irq(pdev, 0);
- int retval;
- int i;
-
- if (irq < 0) {
- dev_err(dev, "no IRQ line: %d\n", irq);
- return irq;
- }
-
- if (!pdata || !pdata->gpio_base) {
- dev_err(dev, "incorrect or missing platform data\n");
- return -EINVAL;
- }
-
- mg = kzalloc(sizeof(*mg), GFP_KERNEL);
- if (!mg)
- return -ENOMEM;
-
- dev_set_drvdata(dev, mg);
-
- mg->pdev = pdev;
- mg->irq = irq;
- mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET;
- mg->chip.label = "msic_gpio";
- mg->chip.direction_input = msic_gpio_direction_input;
- mg->chip.direction_output = msic_gpio_direction_output;
- mg->chip.get = msic_gpio_get;
- mg->chip.set = msic_gpio_set;
- mg->chip.to_irq = msic_gpio_to_irq;
- mg->chip.base = pdata->gpio_base;
- mg->chip.ngpio = MSIC_NUM_GPIO;
- mg->chip.can_sleep = true;
- mg->chip.parent = dev;
-
- mutex_init(&mg->buslock);
-
- retval = gpiochip_add_data(&mg->chip, mg);
- if (retval) {
- dev_err(dev, "Adding MSIC gpio chip failed\n");
- goto err;
- }
-
- for (i = 0; i < mg->chip.ngpio; i++) {
- irq_set_chip_data(i + mg->irq_base, mg);
- irq_set_chip_and_handler(i + mg->irq_base,
- &msic_irqchip,
- handle_simple_irq);
- }
- irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg);
-
- return 0;
-err:
- kfree(mg);
- return retval;
-}
-
-static struct platform_driver platform_msic_gpio_driver = {
- .driver = {
- .name = "msic_gpio",
- },
- .probe = platform_msic_gpio_probe,
-};
-
-static int __init platform_msic_gpio_init(void)
-{
- return platform_driver_register(&platform_msic_gpio_driver);
-}
-subsys_initcall(platform_msic_gpio_init);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 672681a976f5..8f429d9f3661 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -70,7 +70,12 @@
*/
#define PWM_BLINK_ON_DURATION_OFF 0x0
#define PWM_BLINK_OFF_DURATION_OFF 0x4
+#define PWM_BLINK_COUNTER_B_OFF 0x8
+/* Armada 8k variant gpios register offsets */
+#define AP80X_GPIO0_OFF_A8K 0x1040
+#define CP11X_GPIO0_OFF_A8K 0x100
+#define CP11X_GPIO1_OFF_A8K 0x140
/* The MV78200 has per-CPU registers for edge mask and level mask */
#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
@@ -93,6 +98,7 @@
struct mvebu_pwm {
struct regmap *regs;
+ u32 offset;
unsigned long clk_rate;
struct gpio_desc *gpiod;
struct pwm_chip chip;
@@ -283,12 +289,12 @@ mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val)
*/
static unsigned int mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
{
- return PWM_BLINK_ON_DURATION_OFF;
+ return mvpwm->offset + PWM_BLINK_ON_DURATION_OFF;
}
static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
{
- return PWM_BLINK_OFF_DURATION_OFF;
+ return mvpwm->offset + PWM_BLINK_OFF_DURATION_OFF;
}
/*
@@ -667,29 +673,21 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
spin_lock_irqsave(&mvpwm->lock, flags);
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
- do_div(val, mvpwm->clk_rate);
- if (val > UINT_MAX)
- state->duty_cycle = UINT_MAX;
- else if (val)
- state->duty_cycle = val;
+ /* Hardware treats zero as 2^32. See mvebu_pwm_apply(). */
+ if (u > 0)
+ val = u;
else
- state->duty_cycle = 1;
+ val = UINT_MAX + 1ULL;
+ state->duty_cycle = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC,
+ mvpwm->clk_rate);
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
- do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
- state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
+ /* period = on + off duration */
+ if (u > 0)
+ val += u;
+ else
+ val += UINT_MAX + 1ULL;
+ state->period = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC, mvpwm->clk_rate);
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
@@ -711,19 +709,27 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
- if (val > UINT_MAX)
+ if (val > UINT_MAX + 1ULL)
return -EINVAL;
- if (val)
+ /*
+ * Zero on/off values don't work as expected. Experimentation shows
+ * that zero value is treated as 2^32. This behavior is not documented.
+ */
+ if (val == UINT_MAX + 1ULL)
+ on = 0;
+ else if (val)
on = val;
else
on = 1;
- val = (unsigned long long) mvpwm->clk_rate *
- (state->period - state->duty_cycle);
+ val = (unsigned long long) mvpwm->clk_rate * state->period;
do_div(val, NSEC_PER_SEC);
- if (val > UINT_MAX)
+ val -= on;
+ if (val > UINT_MAX + 1ULL)
return -EINVAL;
- if (val)
+ if (val == UINT_MAX + 1ULL)
+ off = 0;
+ else if (val)
off = val;
else
off = 1;
@@ -781,51 +787,80 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
void __iomem *base;
+ u32 offset;
u32 set;
- if (!of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio"))
- return 0;
-
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
- if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ if (of_device_is_compatible(mvchip->chip.of_node,
+ "marvell,armada-370-gpio")) {
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ return 0;
+ offset = 0;
+ } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
return 0;
+ }
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
- /*
- * Use set A for lines of GPIO chip with id 0, B for GPIO chip
- * with id 1. Don't allow further GPIO chips to be used for PWM.
- */
- if (id == 0)
- set = 0;
- else if (id == 1)
- set = U32_MAX;
- else
- return -EINVAL;
- regmap_write(mvchip->regs,
- GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set);
-
mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
if (!mvpwm)
return -ENOMEM;
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
+ mvpwm->offset = offset;
+
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ mvpwm->regs = mvchip->regs;
+
+ switch (mvchip->offset) {
+ case AP80X_GPIO0_OFF_A8K:
+ case CP11X_GPIO0_OFF_A8K:
+ /* Blink counter A */
+ set = 0;
+ break;
+ case CP11X_GPIO1_OFF_A8K:
+ /* Blink counter B */
+ set = U32_MAX;
+ mvpwm->offset += PWM_BLINK_COUNTER_B_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ base = devm_platform_ioremap_resource_byname(pdev, "pwm");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = devm_platform_ioremap_resource_byname(pdev, "pwm");
- if (IS_ERR(base))
- return PTR_ERR(base);
+ mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvpwm->regs))
+ return PTR_ERR(mvpwm->regs);
- mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
- &mvebu_gpio_regmap_config);
- if (IS_ERR(mvpwm->regs))
- return PTR_ERR(mvpwm->regs);
+ /*
+ * Use set A for lines of GPIO chip with id 0, B for GPIO chip
+ * with id 1. Don't allow further GPIO chips to be used for PWM.
+ */
+ if (id == 0)
+ set = 0;
+ else if (id == 1)
+ set = U32_MAX;
+ else
+ return -EINVAL;
+ }
+
+ regmap_write(mvchip->regs,
+ GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set);
mvpwm->clk_rate = clk_get_rate(mvchip->clk);
if (!mvpwm->clk_rate) {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 825b362eb4b7..5ea09fd01544 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -73,6 +73,7 @@
static const struct i2c_device_id pca953x_id[] = {
{ "pca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
+ { "pca9506", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9536", 4 | PCA953X_TYPE, },
@@ -1236,6 +1237,7 @@ static int pca953x_resume(struct device *dev)
static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9505", .data = OF_953X(40, PCA_INT), },
+ { .compatible = "nxp,pca9506", .data = OF_953X(40, PCA_INT), },
{ .compatible = "nxp,pca9534", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "nxp,pca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9536", .data = OF_953X( 4, 0), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a2a8d155c75e..b7568ee33696 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
* reset state. Otherwise it flags pins to be driven low.
*/
gpio->out = ~n_latch;
- gpio->status = gpio->out;
+ gpio->status = gpio->read(gpio->client);
/* Enable irqchip if we have an interrupt */
if (client->irq) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0b572dbc4a36..e7092d5fe700 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -35,6 +35,8 @@ struct gpio_rcar_bank_info {
struct gpio_rcar_info {
bool has_outdtsel;
bool has_both_edge_trigger;
+ bool has_always_in;
+ bool has_inen;
};
struct gpio_rcar_priv {
@@ -62,6 +64,7 @@ struct gpio_rcar_priv {
#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
#define OUTDTSEL 0x40 /* Output Data Select Register */
#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
+#define INEN 0x50 /* General Input Enable Register */
#define RCAR_MAX_GPIO_PER_BANK 32
@@ -302,9 +305,11 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
u32 bit = BIT(offset);
- /* testing on r8a7790 shows that INDT does not show correct pin state
- * when configured as output, so use OUTDT in case of output pins */
- if (gpio_rcar_read(p, INOUTSEL) & bit)
+ /*
+ * Before R-Car Gen3, INDT does not show correct pin state when
+ * configured as output, so use OUTDT in case of output pins
+ */
+ if (!p->info.has_always_in && (gpio_rcar_read(p, INOUTSEL) & bit))
return !!(gpio_rcar_read(p, OUTDT) & bit);
else
return !!(gpio_rcar_read(p, INDT) & bit);
@@ -324,6 +329,11 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return 0;
+ if (p->info.has_always_in) {
+ bits[0] = gpio_rcar_read(p, INDT) & bankmask;
+ return 0;
+ }
+
spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
@@ -383,41 +393,35 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
.has_outdtsel = false,
.has_both_edge_trigger = false,
+ .has_always_in = false,
+ .has_inen = false,
};
static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
.has_outdtsel = true,
.has_both_edge_trigger = true,
+ .has_always_in = false,
+ .has_inen = false,
+};
+
+static const struct gpio_rcar_info gpio_rcar_info_gen3 = {
+ .has_outdtsel = true,
+ .has_both_edge_trigger = true,
+ .has_always_in = true,
+ .has_inen = false,
+};
+
+static const struct gpio_rcar_info gpio_rcar_info_v3u = {
+ .has_outdtsel = true,
+ .has_both_edge_trigger = true,
+ .has_always_in = true,
+ .has_inen = true,
};
static const struct of_device_id gpio_rcar_of_table[] = {
{
- .compatible = "renesas,gpio-r8a7743",
- /* RZ/G1 GPIO is identical to R-Car Gen2. */
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7790",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7791",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7792",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7793",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7794",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7795",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7796",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
+ .compatible = "renesas,gpio-r8a779a0",
+ .data = &gpio_rcar_info_v3u,
}, {
.compatible = "renesas,rcar-gen1-gpio",
.data = &gpio_rcar_info_gen1,
@@ -426,8 +430,7 @@ static const struct of_device_id gpio_rcar_of_table[] = {
.data = &gpio_rcar_info_gen2,
}, {
.compatible = "renesas,rcar-gen3-gpio",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
+ .data = &gpio_rcar_info_gen3,
}, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
@@ -460,6 +463,17 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
return 0;
}
+static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
+{
+ u32 mask = GENMASK(p->gpio_chip.ngpio - 1, 0);
+
+ /* Select "Input Enable" in INEN */
+ if (p->gpio_chip.valid_mask)
+ mask &= p->gpio_chip.valid_mask[0];
+ if (mask)
+ gpio_rcar_write(p, INEN, gpio_rcar_read(p, INEN) | mask);
+}
+
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
@@ -549,6 +563,12 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err1;
}
+ if (p->info.has_inen) {
+ pm_runtime_get_sync(p->dev);
+ gpio_rcar_enable_inputs(p);
+ pm_runtime_put(p->dev);
+ }
+
dev_info(dev, "driving %d GPIOs\n", npins);
return 0;
@@ -624,6 +644,9 @@ static int gpio_rcar_resume(struct device *dev)
}
}
+ if (p->info.has_inen)
+ gpio_rcar_enable_inputs(p);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP*/
diff --git a/drivers/gpio/gpio-sl28cpld.c b/drivers/gpio/gpio-sl28cpld.c
index 889b8f5622c2..52404736ac86 100644
--- a/drivers/gpio/gpio-sl28cpld.c
+++ b/drivers/gpio/gpio-sl28cpld.c
@@ -65,13 +65,13 @@ static int sl28cpld_gpio_irq_init(struct platform_device *pdev,
if (!irq_chip)
return -ENOMEM;
- irq_chip->name = "sl28cpld-gpio-irq",
+ irq_chip->name = "sl28cpld-gpio-irq";
irq_chip->irqs = sl28cpld_gpio_irqs;
irq_chip->num_irqs = ARRAY_SIZE(sl28cpld_gpio_irqs);
irq_chip->num_regs = 1;
irq_chip->status_base = base + GPIO_REG_IP;
irq_chip->mask_base = base + GPIO_REG_IE;
- irq_chip->mask_invert = true,
+ irq_chip->mask_invert = true;
irq_chip->ack_base = base + GPIO_REG_IP;
ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index e19ebff6018c..0025f613d9b3 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -60,7 +60,6 @@ struct tegra_gpio_info;
struct tegra_gpio_bank {
unsigned int bank;
- unsigned int irq;
/*
* IRQ-core code uses raw locking, and thus, nested locking also
@@ -81,7 +80,6 @@ struct tegra_gpio_bank {
u32 dbc_enb[4];
#endif
u32 dbc_cnt[4];
- struct tegra_gpio_info *tgi;
};
struct tegra_gpio_soc_config {
@@ -93,12 +91,12 @@ struct tegra_gpio_soc_config {
struct tegra_gpio_info {
struct device *dev;
void __iomem *regs;
- struct irq_domain *irq_domain;
struct tegra_gpio_bank *bank_info;
const struct tegra_gpio_soc_config *soc;
struct gpio_chip gc;
struct irq_chip ic;
u32 bank_count;
+ unsigned int *irqs;
};
static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
@@ -274,17 +272,10 @@ static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return tegra_gpio_set_debounce(chip, offset, debounce);
}
-static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
-
- return irq_find_mapping(tgi->irq_domain, offset);
-}
-
static void tegra_gpio_irq_ack(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
@@ -292,8 +283,8 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
static void tegra_gpio_irq_mask(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
@@ -301,8 +292,8 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
static void tegra_gpio_irq_unmask(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
@@ -311,11 +302,14 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int gpio = d->hwirq, port = GPIO_PORT(gpio), lvl_type;
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ struct tegra_gpio_bank *bank;
unsigned long flags;
- u32 val;
int ret;
+ u32 val;
+
+ bank = &tgi->bank_info[GPIO_BANK(d->hwirq)];
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -367,13 +361,16 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
irq_set_handler_locked(d, handle_edge_irq);
- return 0;
+ if (d->parent_data)
+ ret = irq_chip_set_type_parent(d, type);
+
+ return ret;
}
static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_irq_mask(d);
@@ -382,13 +379,25 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
- unsigned int port, pin, gpio;
+ struct tegra_gpio_info *tgi = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain = tgi->gc.irq.domain;
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct tegra_gpio_bank *bank = NULL;
+ unsigned int port, pin, gpio, i;
bool unmasked = false;
- u32 lvl;
unsigned long sta;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
- struct tegra_gpio_info *tgi = bank->tgi;
+ u32 lvl;
+
+ for (i = 0; i < tgi->bank_count; i++) {
+ if (tgi->irqs[i] == irq) {
+ bank = &tgi->bank_info[i];
+ break;
+ }
+ }
+
+ if (WARN_ON(bank == NULL))
+ return;
chained_irq_enter(chip, desc);
@@ -411,14 +420,47 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
- generic_handle_irq(irq_find_mapping(tgi->irq_domain,
- gpio + pin));
+ irq = irq_find_mapping(domain, gpio + pin);
+ if (WARN_ON(irq == 0))
+ continue;
+
+ generic_handle_irq(irq);
}
}
if (!unmasked)
chained_irq_exit(chip, desc);
+}
+
+static int tegra_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int hwirq,
+ unsigned int type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = chip->irq.child_offset_to_irq(chip, hwirq);
+ *parent_type = type;
+
+ return 0;
+}
+
+static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = 0;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
}
#ifdef CONFIG_PM_SLEEP
@@ -497,19 +539,31 @@ static int tegra_gpio_suspend(struct device *dev)
static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ struct tegra_gpio_bank *bank;
unsigned int gpio = d->hwirq;
u32 port, bit, mask;
int err;
- err = irq_set_irq_wake(bank->irq, enable);
- if (err)
- return err;
+ bank = &tgi->bank_info[GPIO_BANK(d->hwirq)];
port = GPIO_PORT(gpio);
bit = GPIO_BIT(gpio);
mask = BIT(bit);
+ err = irq_set_irq_wake(tgi->irqs[bank->bank], enable);
+ if (err)
+ return err;
+
+ if (d->parent_data) {
+ err = irq_chip_set_wake_parent(d, enable);
+ if (err) {
+ irq_set_irq_wake(tgi->irqs[bank->bank], !enable);
+ return err;
+ }
+ }
+
if (enable)
bank->wake_enb[port] |= mask;
else
@@ -519,6 +573,35 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
}
#endif
+static int tegra_gpio_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
+static int tegra_gpio_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ tegra_gpio_enable(tgi, d->hwirq);
+
+ return gpiochip_reqres_irq(chip, d->hwirq);
+}
+
+static void tegra_gpio_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ gpiochip_relres_irq(chip, d->hwirq);
+ tegra_gpio_enable(tgi, d->hwirq);
+}
+
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -526,7 +609,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
{
- struct tegra_gpio_info *tgi = s->private;
+ struct tegra_gpio_info *tgi = dev_get_drvdata(s->private);
unsigned int i, j;
for (i = 0; i < tgi->bank_count; i++) {
@@ -548,12 +631,10 @@ static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(tegra_dbg_gpio);
-
static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
- debugfs_create_file("tegra_gpio", 0444, NULL, tgi,
- &tegra_dbg_gpio_fops);
+ debugfs_create_devm_seqfile(tgi->dev, "tegra_gpio", NULL,
+ tegra_dbg_gpio_show);
}
#else
@@ -568,14 +649,18 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-static struct lock_class_key gpio_lock_class;
-static struct lock_class_key gpio_request_class;
+static const struct of_device_id tegra_pmc_of_match[] = {
+ { .compatible = "nvidia,tegra210-pmc", },
+ { /* sentinel */ },
+};
static int tegra_gpio_probe(struct platform_device *pdev)
{
- struct tegra_gpio_info *tgi;
struct tegra_gpio_bank *bank;
- unsigned int gpio, i, j;
+ struct tegra_gpio_info *tgi;
+ struct gpio_irq_chip *irq;
+ struct device_node *np;
+ unsigned int i, j;
int ret;
tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
@@ -604,7 +689,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_output = tegra_gpio_direction_output;
tgi->gc.set = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
- tgi->gc.to_irq = tegra_gpio_to_irq;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
tgi->gc.parent = &pdev->dev;
@@ -619,6 +703,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake;
#endif
+ tgi->ic.irq_request_resources = tegra_gpio_irq_request_resources;
+ tgi->ic.irq_release_resources = tegra_gpio_irq_release_resources;
platform_set_drvdata(pdev, tgi);
@@ -630,11 +716,10 @@ static int tegra_gpio_probe(struct platform_device *pdev)
if (!tgi->bank_info)
return -ENOMEM;
- tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
- tgi->gc.ngpio,
- &irq_domain_simple_ops, NULL);
- if (!tgi->irq_domain)
- return -ENODEV;
+ tgi->irqs = devm_kcalloc(&pdev->dev, tgi->bank_count,
+ sizeof(*tgi->irqs), GFP_KERNEL);
+ if (!tgi->irqs)
+ return -ENOMEM;
for (i = 0; i < tgi->bank_count; i++) {
ret = platform_get_irq(pdev, i);
@@ -643,8 +728,36 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[i];
bank->bank = i;
- bank->irq = ret;
- bank->tgi = tgi;
+
+ tgi->irqs[i] = ret;
+
+ for (j = 0; j < 4; j++) {
+ raw_spin_lock_init(&bank->lvl_lock[j]);
+ spin_lock_init(&bank->dbc_lock[j]);
+ }
+ }
+
+ irq = &tgi->gc.irq;
+ irq->chip = &tgi->ic;
+ irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq;
+ irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec;
+ irq->handler = handle_simple_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = tegra_gpio_irq_handler;
+ irq->parent_handler_data = tgi;
+ irq->num_parents = tgi->bank_count;
+ irq->parents = tgi->irqs;
+
+ np = of_find_matching_node(NULL, tegra_pmc_of_match);
+ if (np) {
+ irq->parent_domain = irq_find_host(np);
+ of_node_put(np);
+
+ if (!irq->parent_domain)
+ return -EPROBE_DEFER;
+
+ tgi->ic.irq_set_affinity = tegra_gpio_irq_set_affinity;
}
tgi->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -660,33 +773,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
}
ret = devm_gpiochip_add_data(&pdev->dev, &tgi->gc, tgi);
- if (ret < 0) {
- irq_domain_remove(tgi->irq_domain);
+ if (ret < 0)
return ret;
- }
-
- for (gpio = 0; gpio < tgi->gc.ngpio; gpio++) {
- int irq = irq_create_mapping(tgi->irq_domain, gpio);
- /* No validity check; all Tegra GPIOs are valid IRQs */
-
- bank = &tgi->bank_info[GPIO_BANK(gpio)];
-
- irq_set_chip_data(irq, bank);
- irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
- irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
- }
-
- for (i = 0; i < tgi->bank_count; i++) {
- bank = &tgi->bank_info[i];
-
- irq_set_chained_handler_and_data(bank->irq,
- tegra_gpio_irq_handler, bank);
-
- for (j = 0; j < 4; j++) {
- raw_spin_lock_init(&bank->lvl_lock[j]);
- spin_lock_init(&bank->dbc_lock[j]);
- }
- }
tegra_gpio_debuginit(tgi);
@@ -715,18 +803,21 @@ static const struct of_device_id tegra_gpio_of_match[] = {
{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_gpio_of_match);
static struct platform_driver tegra_gpio_driver = {
- .driver = {
- .name = "tegra-gpio",
- .pm = &tegra_gpio_pm_ops,
+ .driver = {
+ .name = "tegra-gpio",
+ .pm = &tegra_gpio_pm_ops,
.of_match_table = tegra_gpio_of_match,
},
- .probe = tegra_gpio_probe,
+ .probe = tegra_gpio_probe,
};
-
-static int __init tegra_gpio_init(void)
-{
- return platform_driver_register(&tegra_gpio_driver);
-}
-subsys_initcall(tegra_gpio_init);
+module_platform_driver(tegra_gpio_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra GPIO controller driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Erik Gilling <konkers@google.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 286e0b1f46e4..1bd9e44df718 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -657,7 +657,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.get_direction = tegra186_gpio_get_direction;
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
- gpio->gpio.get = tegra186_gpio_get,
+ gpio->gpio.get = tegra186_gpio_get;
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
new file mode 100644
index 000000000000..0e3d19828eb1
--- /dev/null
+++ b/drivers/gpio/gpio-visconti.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toshiba Visconti GPIO Support
+ *
+ * (C) Copyright 2020 Toshiba Electronic Devices & Storage Corporation
+ * (C) Copyright 2020 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/* register offset */
+#define GPIO_DIR 0x00
+#define GPIO_IDATA 0x08
+#define GPIO_ODATA 0x10
+#define GPIO_OSET 0x18
+#define GPIO_OCLR 0x20
+#define GPIO_INTMODE 0x30
+
+#define BASE_HW_IRQ 24
+
+struct visconti_gpio {
+ void __iomem *base;
+ spinlock_t lock; /* protect gpio register */
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+};
+
+static int visconti_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct visconti_gpio *priv = gpiochip_get_data(gc);
+ u32 offset = irqd_to_hwirq(d);
+ u32 bit = BIT(offset);
+ u32 intc_type = IRQ_TYPE_EDGE_RISING;
+ u32 intmode, odata;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ odata = readl(priv->base + GPIO_ODATA);
+ intmode = readl(priv->base + GPIO_INTMODE);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ odata &= ~bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ odata |= bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ intmode |= bit;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ intc_type = IRQ_TYPE_LEVEL_HIGH;
+ odata &= ~bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ intc_type = IRQ_TYPE_LEVEL_HIGH;
+ odata |= bit;
+ intmode &= ~bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ writel(odata, priv->base + GPIO_ODATA);
+ writel(intmode, priv->base + GPIO_INTMODE);
+ irq_set_irq_type(offset, intc_type);
+
+ ret = irq_chip_set_type_parent(d, type);
+err:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static int visconti_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ /* Interrupts 0..15 mapped to interrupts 24..39 on the GIC */
+ if (child < 16) {
+ /* All these interrupts are level high in the CPU */
+ *parent_type = IRQ_TYPE_LEVEL_HIGH;
+ *parent = child + BASE_HW_IRQ;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = 0;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
+}
+
+static int visconti_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_gpio *priv;
+ struct irq_chip *irq_chip;
+ struct gpio_irq_chip *girq;
+ struct irq_domain *parent;
+ struct device_node *irq_parent;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ irq_parent = of_irq_find_parent(dev->of_node);
+ if (!irq_parent) {
+ dev_err(dev, "No IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "No IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ fwnode = of_node_to_fwnode(irq_parent);
+ of_node_put(irq_parent);
+
+ ret = bgpio_init(&priv->gpio_chip, dev, 4,
+ priv->base + GPIO_IDATA,
+ priv->base + GPIO_OSET,
+ priv->base + GPIO_OCLR,
+ priv->base + GPIO_DIR,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ irq_chip = &priv->irq_chip;
+ irq_chip->name = dev_name(dev);
+ irq_chip->irq_mask = irq_chip_mask_parent;
+ irq_chip->irq_unmask = irq_chip_unmask_parent;
+ irq_chip->irq_eoi = irq_chip_eoi_parent;
+ irq_chip->irq_set_type = visconti_gpio_irq_set_type;
+ irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+
+ girq = &priv->gpio_chip.irq;
+ girq->chip = irq_chip;
+ girq->fwnode = fwnode;
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ if (ret) {
+ dev_err(dev, "failed to add GPIO chip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+}
+
+static const struct of_device_id visconti_gpio_of_match[] = {
+ { .compatible = "toshiba,gpio-tmpv7708", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, visconti_gpio_of_match);
+
+static struct platform_driver visconti_gpio_driver = {
+ .probe = visconti_gpio_probe,
+ .driver = {
+ .name = "visconti_gpio",
+ .of_match_table = of_match_ptr(visconti_gpio_of_match),
+ }
+};
+module_platform_driver(visconti_gpio_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>");
+MODULE_DESCRIPTION("Toshiba Visconti GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 3bf397b8dfbc..69713fd5485b 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -216,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
- c->set_config = vx855gpio_set_config,
+ c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index b5fbba5a783a..a19eeef6cf1e 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -73,6 +73,8 @@
enum ctrl_register {
CTRL_IN,
CTRL_OUT,
+ IRQ_STATUS,
+ IRQ_MASK,
};
/*
@@ -112,22 +114,29 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
return reg;
}
-static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+static inline int to_ireg(int gpio, enum ctrl_register type, unsigned int *mask)
{
- unsigned int reg, mask;
+ unsigned int reg = type == IRQ_STATUS ? IRQ_STATUS_BASE : IRQ_MASK_BASE;
if (gpio < GROUP0_NR_IRQS) {
- reg = IRQ_MASK_BASE;
- mask = BIT(gpio % GROUP0_NR_IRQS);
+ reg += 0;
+ *mask = BIT(gpio);
} else {
- reg = IRQ_MASK_BASE + 1;
- mask = BIT((gpio - GROUP0_NR_IRQS) % GROUP1_NR_IRQS);
+ reg += 1;
+ *mask = BIT(gpio - GROUP0_NR_IRQS);
}
+ return reg;
+}
+
+static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+{
+ unsigned int mask, reg = to_ireg(gpio, IRQ_MASK, &mask);
+
if (wg->set_irq_mask)
- regmap_update_bits(wg->regmap, reg, mask, mask);
+ regmap_set_bits(wg->regmap, reg, mask);
else
- regmap_update_bits(wg->regmap, reg, mask, 0);
+ regmap_clear_bits(wg->regmap, reg, mask);
}
static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
@@ -207,9 +216,9 @@ static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
return;
if (value)
- regmap_update_bits(wg->regmap, reg, 1, 1);
+ regmap_set_bits(wg->regmap, reg, 1);
else
- regmap_update_bits(wg->regmap, reg, 1, 0);
+ regmap_clear_bits(wg->regmap, reg, 1);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
@@ -324,7 +333,8 @@ static struct irq_chip wcove_irqchip = {
static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
{
struct wcove_gpio *wg = (struct wcove_gpio *)data;
- unsigned int pending, virq, gpio, mask, offset;
+ unsigned int virq, gpio;
+ unsigned long pending;
u8 p[2];
if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
@@ -339,15 +349,12 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
/* Iterate until no interrupt is pending */
while (pending) {
/* One iteration is for all pending bits */
- for_each_set_bit(gpio, (const unsigned long *)&pending,
- WCOVE_GPIO_NUM) {
- offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
- mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
- BIT(gpio);
+ for_each_set_bit(gpio, &pending, WCOVE_GPIO_NUM) {
+ unsigned int mask, reg = to_ireg(gpio, IRQ_STATUS, &mask);
+
virq = irq_find_mapping(wg->chip.irq.domain, gpio);
handle_nested_irq(virq);
- regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
- mask, mask);
+ regmap_set_bits(wg->regmap, reg, mask);
}
/* Next iteration */
@@ -367,30 +374,26 @@ static void wcove_gpio_dbg_show(struct seq_file *s,
{
unsigned int ctlo, ctli, irq_mask, irq_status;
struct wcove_gpio *wg = gpiochip_get_data(chip);
- int gpio, offset, group, ret = 0;
+ int gpio, mask, ret = 0;
for (gpio = 0; gpio < WCOVE_GPIO_NUM; gpio++) {
- group = gpio < GROUP0_NR_IRQS ? 0 : 1;
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &ctli);
- ret += regmap_read(wg->regmap, IRQ_MASK_BASE + group,
- &irq_mask);
- ret += regmap_read(wg->regmap, IRQ_STATUS_BASE + group,
- &irq_status);
+ ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_MASK, &mask), &irq_mask);
+ ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_STATUS, &mask), &irq_status);
if (ret) {
pr_err("Failed to read registers: ctrl out/in or irq status/mask\n");
break;
}
- offset = gpio % 8;
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
ctli & 0x1 ? "hi" : "lo",
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
- irq_mask & BIT(offset) ? "mask " : "unmask",
- irq_status & BIT(offset) ? "pending" : " ");
+ irq_mask & mask ? "mask " : "unmask",
+ irq_status & mask ? "pending" : " ");
}
}
@@ -434,7 +437,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
wg->chip.set = wcove_gpio_set;
- wg->chip.set_config = wcove_gpio_set_config,
+ wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
wg->chip.can_sleep = true;
@@ -473,14 +476,12 @@ static int wcove_gpio_probe(struct platform_device *pdev)
}
/* Enable GPIO0 interrupts */
- ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE, GPIO_IRQ0_MASK,
- 0x00);
+ ret = regmap_clear_bits(wg->regmap, IRQ_MASK_BASE + 0, GPIO_IRQ0_MASK);
if (ret)
return ret;
/* Enable GPIO1 interrupts */
- ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE + 1, GPIO_IRQ1_MASK,
- 0x00);
+ ret = regmap_clear_bits(wg->regmap, IRQ_MASK_BASE + 1, GPIO_IRQ1_MASK);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index be539381fd82..b411d3156e0b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -10,10 +10,13 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -22,6 +25,11 @@
#define XGPIO_CHANNEL_OFFSET 0x8
+#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
+#define XGPIO_GIER_IE BIT(31)
+#define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */
+#define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */
+
/* Read/Write access to the GPIO registers */
#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
# define xgpio_readreg(offset) readl(offset)
@@ -36,9 +44,15 @@
* @gc: GPIO chip
* @regs: register block
* @gpio_width: GPIO width for every channel
- * @gpio_state: GPIO state shadow register
+ * @gpio_state: GPIO write state shadow register
+ * @gpio_last_irq_read: GPIO read state register from last interrupt
* @gpio_dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
+ * @irq: IRQ used by GPIO device
+ * @irqchip: IRQ chip
+ * @irq_enable: GPIO IRQ enable/disable bitfield
+ * @irq_rising_edge: GPIO IRQ rising edge enable/disable bitfield
+ * @irq_falling_edge: GPIO IRQ falling edge enable/disable bitfield
* @clk: clock resource for this driver
*/
struct xgpio_instance {
@@ -46,8 +60,14 @@ struct xgpio_instance {
void __iomem *regs;
unsigned int gpio_width[2];
u32 gpio_state[2];
+ u32 gpio_last_irq_read[2];
u32 gpio_dir[2];
- spinlock_t gpio_lock[2];
+ spinlock_t gpio_lock; /* For serializing operations */
+ int irq;
+ struct irq_chip irqchip;
+ u32 irq_enable[2];
+ u32 irq_rising_edge[2];
+ u32 irq_falling_edge[2];
struct clk *clk;
};
@@ -113,7 +133,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
if (val)
@@ -124,7 +144,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -144,7 +164,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
int index = xgpio_index(chip, 0);
int offset, i;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signals */
for (i = 0; i < gc->ngpio; i++) {
@@ -155,9 +175,9 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
index * XGPIO_CHANNEL_OFFSET,
chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
}
if (__test_and_clear_bit(i, mask)) {
offset = xgpio_offset(chip, i);
@@ -171,7 +191,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -190,14 +210,14 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
chip->gpio_dir[index] |= BIT(offset);
xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -221,7 +241,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
if (val)
@@ -236,7 +256,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -259,6 +279,39 @@ static void xgpio_save_regs(struct xgpio_instance *chip)
chip->gpio_dir[1]);
}
+static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->parent);
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
+
+static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pm_runtime_put(chip->parent);
+}
+
+static int __maybe_unused xgpio_suspend(struct device *dev)
+{
+ struct xgpio_instance *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
+
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
/**
* xgpio_remove - Remove method for the GPIO device.
* @pdev: pointer to the platform device
@@ -271,12 +324,224 @@ static int xgpio_remove(struct platform_device *pdev)
{
struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(gpio->clk);
return 0;
}
/**
+ * xgpio_irq_ack - Acknowledge a child GPIO interrupt.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ * This currently does nothing, but irq_ack is unconditionally called by
+ * handle_edge_irq and therefore must be defined.
+ */
+static void xgpio_irq_ack(struct irq_data *irq_data)
+{
+}
+
+static int __maybe_unused xgpio_resume(struct device *dev)
+{
+ struct xgpio_instance *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
+
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ return clk_enable(gpio->clk);
+}
+
+static const struct dev_pm_ops xgpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
+ xgpio_runtime_resume, NULL)
+};
+
+/**
+ * xgpio_irq_mask - Write the specified signal of the GPIO device.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ */
+static void xgpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable[index] &= ~BIT(offset);
+
+ if (!chip->irq_enable[index]) {
+ /* Disable per channel interrupt */
+ u32 temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+
+ temp &= ~BIT(index);
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
+ }
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_irq_unmask - Write the specified signal of the GPIO device.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ */
+static void xgpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+ u32 old_enable = chip->irq_enable[index];
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable[index] |= BIT(offset);
+
+ if (!old_enable) {
+ /* Clear any existing per-channel interrupts */
+ u32 val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET) &
+ BIT(index);
+
+ if (val)
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, val);
+
+ /* Update GPIO IRQ read data before enabling interrupt*/
+ val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
+ index * XGPIO_CHANNEL_OFFSET);
+ chip->gpio_last_irq_read[index] = val;
+
+ /* Enable per channel interrupt */
+ val = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ val |= BIT(index);
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
+ }
+
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_set_irq_type - Write the specified signal of the GPIO device.
+ * @irq_data: Per IRQ and chip data passed down to chip functions
+ * @type: Interrupt type that is to be set for the gpio pin
+ *
+ * Return:
+ * 0 if interrupt type is supported otherwise -EINVAL
+ */
+static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+
+ /*
+ * The Xilinx GPIO hardware provides a single interrupt status
+ * indication for any state change in a given GPIO channel (bank).
+ * Therefore, only rising edge or falling edge triggers are
+ * supported.
+ */
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ chip->irq_rising_edge[index] |= BIT(offset);
+ chip->irq_falling_edge[index] |= BIT(offset);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_rising_edge[index] |= BIT(offset);
+ chip->irq_falling_edge[index] &= ~BIT(offset);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_rising_edge[index] &= ~BIT(offset);
+ chip->irq_falling_edge[index] |= BIT(offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_handler_locked(irq_data, handle_edge_irq);
+ return 0;
+}
+
+/**
+ * xgpio_irqhandler - Gpio interrupt service routine
+ * @desc: Pointer to interrupt description
+ */
+static void xgpio_irqhandler(struct irq_desc *desc)
+{
+ struct xgpio_instance *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ u32 num_channels = chip->gpio_width[1] ? 2 : 1;
+ u32 offset = 0, index;
+ u32 status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
+
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
+
+ chained_irq_enter(irqchip, desc);
+ for (index = 0; index < num_channels; index++) {
+ if ((status & BIT(index))) {
+ unsigned long rising_events, falling_events, all_events;
+ unsigned long flags;
+ u32 data, bit;
+ unsigned int irq;
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+ data = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
+ index * XGPIO_CHANNEL_OFFSET);
+ rising_events = data &
+ ~chip->gpio_last_irq_read[index] &
+ chip->irq_enable[index] &
+ chip->irq_rising_edge[index];
+ falling_events = ~data &
+ chip->gpio_last_irq_read[index] &
+ chip->irq_enable[index] &
+ chip->irq_falling_edge[index];
+ dev_dbg(chip->gc.parent,
+ "IRQ chan %u rising 0x%lx falling 0x%lx\n",
+ index, rising_events, falling_events);
+ all_events = rising_events | falling_events;
+ chip->gpio_last_irq_read[index] = data;
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ for_each_set_bit(bit, &all_events, 32) {
+ irq = irq_find_mapping(chip->gc.irq.domain,
+ offset + bit);
+ generic_handle_irq(irq);
+ }
+ }
+ offset += chip->gpio_width[index];
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+/**
* xgpio_of_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
@@ -289,7 +554,10 @@ static int xgpio_probe(struct platform_device *pdev)
struct xgpio_instance *chip;
int status = 0;
struct device_node *np = pdev->dev.of_node;
- u32 is_dual;
+ u32 is_dual = 0;
+ u32 cells = 2;
+ struct gpio_irq_chip *girq;
+ u32 temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -305,6 +573,15 @@ static int xgpio_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
chip->gpio_dir[0] = 0xFFFFFFFF;
+ /* Update cells with gpio-cells value */
+ if (of_property_read_u32(np, "#gpio-cells", &cells))
+ dev_dbg(&pdev->dev, "Missing gpio-cells property\n");
+
+ if (cells != 2) {
+ dev_err(&pdev->dev, "#gpio-cells mismatch\n");
+ return -EINVAL;
+ }
+
/*
* Check device node and parent device node for device width
* and assume default width of 32
@@ -312,7 +589,10 @@ static int xgpio_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
chip->gpio_width[0] = 32;
- spin_lock_init(&chip->gpio_lock[0]);
+ if (chip->gpio_width[0] > 32)
+ return -EINVAL;
+
+ spin_lock_init(&chip->gpio_lock);
if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
is_dual = 0;
@@ -336,7 +616,8 @@ static int xgpio_probe(struct platform_device *pdev)
&chip->gpio_width[1]))
chip->gpio_width[1] = 32;
- spin_lock_init(&chip->gpio_lock[1]);
+ if (chip->gpio_width[1] > 32)
+ return -EINVAL;
}
chip->gc.base = -1;
@@ -344,8 +625,11 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
+ chip->gc.of_gpio_n_cells = cells;
chip->gc.get = xgpio_get;
chip->gc.set = xgpio_set;
+ chip->gc.request = xgpio_request;
+ chip->gc.free = xgpio_free;
chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(&pdev->dev);
@@ -357,28 +641,68 @@ static int xgpio_probe(struct platform_device *pdev)
}
chip->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(chip->clk)) {
- if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
- dev_dbg(&pdev->dev, "Input clock not found\n");
- return PTR_ERR(chip->clk);
- }
+ if (IS_ERR(chip->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n");
status = clk_prepare_enable(chip->clk);
if (status < 0) {
dev_err(&pdev->dev, "Failed to prepare clk\n");
return status;
}
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
xgpio_save_regs(chip);
+ chip->irq = platform_get_irq_optional(pdev, 0);
+ if (chip->irq <= 0)
+ goto skip_irq;
+
+ chip->irqchip.name = "gpio-xilinx";
+ chip->irqchip.irq_ack = xgpio_irq_ack;
+ chip->irqchip.irq_mask = xgpio_irq_mask;
+ chip->irqchip.irq_unmask = xgpio_irq_unmask;
+ chip->irqchip.irq_set_type = xgpio_set_irq_type;
+
+ /* Disable per-channel interrupts */
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, 0);
+ /* Clear any existing per-channel interrupts */
+ temp = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, temp);
+ /* Enable global interrupts */
+ xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
+
+ girq = &chip->gc.irq;
+ girq->chip = &chip->irqchip;
+ girq->parent_handler = xgpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ status = -ENOMEM;
+ goto err_pm_put;
+ }
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+skip_irq:
status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
dev_err(&pdev->dev, "failed to add GPIO chip\n");
- clk_disable_unprepare(chip->clk);
- return status;
+ goto err_pm_put;
}
+ pm_runtime_put(&pdev->dev);
return 0;
+
+err_pm_put:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ clk_disable_unprepare(chip->clk);
+ return status;
}
static const struct of_device_id xgpio_of_match[] = {
@@ -394,6 +718,7 @@ static struct platform_driver xgpio_plat_driver = {
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
+ .pm = &xgpio_dev_pm_ops,
},
};
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
deleted file mode 100644
index 64bfb722756a..000000000000
--- a/drivers/gpio/gpio-zx.c
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ZTE ZX296702 GPIO driver
- *
- * Author: Jun Nie <jun.nie@linaro.org>
- *
- * Copyright (C) 2015 Linaro Ltd.
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#define ZX_GPIO_DIR 0x00
-#define ZX_GPIO_IVE 0x04
-#define ZX_GPIO_IV 0x08
-#define ZX_GPIO_IEP 0x0C
-#define ZX_GPIO_IEN 0x10
-#define ZX_GPIO_DI 0x14
-#define ZX_GPIO_DO1 0x18
-#define ZX_GPIO_DO0 0x1C
-#define ZX_GPIO_DO 0x20
-
-#define ZX_GPIO_IM 0x28
-#define ZX_GPIO_IE 0x2C
-
-#define ZX_GPIO_MIS 0x30
-#define ZX_GPIO_IC 0x34
-
-#define ZX_GPIO_NR 16
-
-struct zx_gpio {
- raw_spinlock_t lock;
-
- void __iomem *base;
- struct gpio_chip gc;
-};
-
-static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
- unsigned long flags;
- u16 gpiodir;
-
- if (offset >= gc->ngpio)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
- gpiodir &= ~BIT(offset);
- writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
- unsigned long flags;
- u16 gpiodir;
-
- if (offset >= gc->ngpio)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
- gpiodir |= BIT(offset);
- writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
-
- if (value)
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
- else
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static int zx_get_value(struct gpio_chip *gc, unsigned offset)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
-
- return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
-}
-
-static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
-
- if (value)
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
- else
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
-}
-
-static int zx_irq_type(struct irq_data *d, unsigned trigger)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- int offset = irqd_to_hwirq(d);
- unsigned long flags;
- u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
- u16 bit = BIT(offset);
-
- if (offset < 0 || offset >= ZX_GPIO_NR)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
-
- gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
- gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
- gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
- gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
-
- if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- gpiois |= bit;
- if (trigger & IRQ_TYPE_LEVEL_HIGH)
- gpioiev |= bit;
- else
- gpioiev &= ~bit;
- } else
- gpiois &= ~bit;
-
- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
- gpioi_epos |= bit;
- gpioi_eneg |= bit;
- } else {
- if (trigger & IRQ_TYPE_EDGE_RISING) {
- gpioi_epos |= bit;
- gpioi_eneg &= ~bit;
- } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
- gpioi_eneg |= bit;
- gpioi_epos &= ~bit;
- }
- }
-
- writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
- writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
- writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
- writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static void zx_irq_handler(struct irq_desc *desc)
-{
- unsigned long pending;
- int offset;
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
-
- chained_irq_enter(irqchip, desc);
-
- pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
- writew_relaxed(pending, chip->base + ZX_GPIO_IC);
- if (pending) {
- for_each_set_bit(offset, &pending, ZX_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irq.domain,
- offset));
- }
-
- chained_irq_exit(irqchip, desc);
-}
-
-static void zx_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
- u16 gpioie;
-
- raw_spin_lock(&chip->lock);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- raw_spin_unlock(&chip->lock);
-}
-
-static void zx_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
- u16 gpioie;
-
- raw_spin_lock(&chip->lock);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- raw_spin_unlock(&chip->lock);
-}
-
-static struct irq_chip zx_irqchip = {
- .name = "zx-gpio",
- .irq_mask = zx_irq_mask,
- .irq_unmask = zx_irq_unmask,
- .irq_set_type = zx_irq_type,
-};
-
-static int zx_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct zx_gpio *chip;
- struct gpio_irq_chip *girq;
- int irq, id, ret;
-
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(chip->base))
- return PTR_ERR(chip->base);
-
- id = of_alias_get_id(dev->of_node, "gpio");
-
- raw_spin_lock_init(&chip->lock);
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
- chip->gc.direction_input = zx_direction_input;
- chip->gc.direction_output = zx_direction_output;
- chip->gc.get = zx_get_value;
- chip->gc.set = zx_set_value;
- chip->gc.base = ZX_GPIO_NR * id;
- chip->gc.ngpio = ZX_GPIO_NR;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
-
- /*
- * irq_chip support
- */
- writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
- writew_relaxed(0, chip->base + ZX_GPIO_IE);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- girq = &chip->gc.irq;
- girq->chip = &zx_irqchip;
- girq->parent_handler = zx_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- ret = gpiochip_add_data(&chip->gc, chip);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, chip);
- dev_info(dev, "ZX GPIO chip registered\n");
-
- return 0;
-}
-
-static const struct of_device_id zx_gpio_match[] = {
- {
- .compatible = "zte,zx296702-gpio",
- },
- { },
-};
-
-static struct platform_driver zx_gpio_driver = {
- .probe = zx_gpio_probe,
- .driver = {
- .name = "zx_gpio",
- .of_match_table = of_match_ptr(zx_gpio_match),
- },
-};
-builtin_platform_driver(zx_gpio_driver)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 12b679ca552c..1631727bf0da 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -776,6 +776,8 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -1979,6 +1981,21 @@ struct gpio_chardev_data {
#endif
};
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ struct gpio_device *gdev = cdev->gdev;
+ struct gpiochip_info chipinfo;
+
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
+ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2010,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
return abiv;
}
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ /* this doubles as a range check on line_offset */
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
+ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.line_offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2082,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return 0;
}
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ __u32 offset;
+
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= cdev->gdev->ngpio)
+ return -EINVAL;
+
+ if (!test_and_clear_bit(offset, cdev->watched_lines))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -2037,80 +2105,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
+ if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strscpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- strscpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
+ return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (lineinfo_ensure_abi_version(cdev, 1))
- return -EPERM;
-
- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
- clear_bit(lineinfo.line_offset, cdev->watched_lines);
- return -EFAULT;
- }
-
- return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get_v1(cdev, ip,
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
#endif /* CONFIG_GPIO_CDEV_V1 */
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2131,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- if (offset >= cdev->gdev->ngpio)
- return -EINVAL;
-
- if (!test_and_clear_bit(offset, cdev->watched_lines))
- return -EBUSY;
-
- return 0;
+ return lineinfo_unwatch(cdev, ip);
}
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b4a71119a4b0..baf0153b7bca 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1039,3 +1039,14 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(chip->of_node);
}
+
+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
+{
+ /* If the gpiochip has an assigned OF node this takes precedence */
+ if (gc->of_node)
+ gdev->dev.of_node = gc->of_node;
+ else
+ gc->of_node = gdev->dev.of_node;
+ if (gdev->dev.of_node)
+ gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index ed26664f1537..8af2bc899aab 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
int of_gpio_get_count(struct device *dev, const char *con_id);
bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
#else
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
@@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
return false;
}
+static inline void of_gpio_dev_init(struct gpio_chip *gc,
+ struct gpio_device *gdev)
+{
+}
#endif /* CONFIG_OF_GPIO */
extern struct notifier_block gpio_of_notifier;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b02cc2abd3b6..adf55db080d8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -56,8 +56,10 @@
static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
+static int gpio_bus_match(struct device *dev, struct device_driver *drv);
static struct bus_type gpio_bus_type = {
.name = "gpio",
+ .match = gpio_bus_match,
};
/*
@@ -590,20 +592,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->dev.of_node = gc->parent->of_node;
}
-#ifdef CONFIG_OF_GPIO
- /* If the gpiochip has an assigned OF node this takes precedence */
- if (gc->of_node)
- gdev->dev.of_node = gc->of_node;
- else
- gc->of_node = gdev->dev.of_node;
-#endif
+ of_gpio_dev_init(gc, gdev);
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+ if (ret)
+ goto err_free_ida;
+
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
@@ -617,7 +617,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
- goto err_free_ida;
+ goto err_free_dev_name;
}
if (gc->ngpio == 0) {
@@ -768,6 +768,8 @@ err_free_label:
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
+err_free_dev_name:
+ kfree(dev_name(&gdev->dev));
err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
@@ -1489,6 +1491,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
type = IRQ_TYPE_NONE;
}
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
@@ -2548,7 +2553,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
- int first, j, ret;
+ int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
@@ -3460,6 +3465,10 @@ EXPORT_SYMBOL_GPL(gpiod_add_lookup_table);
*/
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
{
+ /* Nothing to remove */
+ if (!table)
+ return;
+
mutex_lock(&gpio_lookup_lock);
list_del(&table->list);
@@ -4202,6 +4211,41 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
+
+static int gpio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ /*
+ * Only match if the fwnode doesn't already have a proper struct device
+ * created for it.
+ */
+ if (dev->fwnode && dev->fwnode->dev != dev)
+ return 0;
+ return 1;
+}
+
+static int gpio_stub_drv_probe(struct device *dev)
+{
+ /*
+ * The DT node of some GPIO chips have a "compatible" property, but
+ * never have a struct device added and probed by a driver to register
+ * the GPIO chip with gpiolib. In such cases, fw_devlink=on will cause
+ * the consumers of the GPIO chip to get probe deferred forever because
+ * they will be waiting for a device associated with the GPIO chip
+ * firmware node to get added and bound to a driver.
+ *
+ * To allow these consumers to probe, we associate the struct
+ * gpio_device of the GPIO chip with the firmware node and then simply
+ * bind it to this stub driver.
+ */
+ return 0;
+}
+
+static struct device_driver gpio_stub_drv = {
+ .name = "gpio_stub_drv",
+ .bus = &gpio_bus_type,
+ .probe = gpio_stub_drv_probe,
+};
+
static int __init gpiolib_dev_init(void)
{
int ret;
@@ -4213,9 +4257,16 @@ static int __init gpiolib_dev_init(void)
return ret;
}
+ if (driver_register(&gpio_stub_drv) < 0) {
+ pr_err("gpiolib: could not register GPIO stub driver\n");
+ bus_unregister(&gpio_bus_type);
+ return ret;
+ }
+
ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
+ driver_unregister(&gpio_stub_drv);
bus_unregister(&gpio_bus_type);
return ret;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0973f408d75f..e392a90ca687 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -15,6 +15,9 @@ menuconfig DRM
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
+# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
+# device and dmabuf fd. Let's make sure that is available for our userspace.
+ select KCMP
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -214,10 +217,6 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
-config DRM_VM
- bool
- depends on DRM && MMU
-
config DRM_SCHED
tristate
depends on DRM
@@ -391,7 +390,6 @@ source "drivers/gpu/drm/xlnx/Kconfig"
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
- select DRM_VM
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fefaff4c832d..926adef289db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_memory.o drm_drv.o \
+ drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -20,9 +20,9 @@ drm-y := drm_auth.o drm_cache.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
drm_managed.o drm_vblank_work.o
-drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
+drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \
+ drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
-drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6bf6cfaea3f1..13ebb1f71e49 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
- amdgpu_fw_attestation.o
+ amdgpu_fw_attestation.o amdgpu_securedisplay.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
# add DF block
amdgpu-y += \
@@ -97,6 +97,7 @@ amdgpu-y += \
tonga_ih.o \
cz_ih.o \
vega10_ih.o \
+ vega20_ih.o \
navi10_ih.o
# add PSP block
@@ -170,7 +171,8 @@ amdgpu-y += \
# add SMUIO block
amdgpu-y += \
smuio_v9_0.o \
- smuio_v11_0.o
+ smuio_v11_0.o \
+ smuio_v11_0_6.o
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5993dd0fdd8e..b6879d97c9c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
@@ -89,6 +88,7 @@
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
#include "amdgpu_nbio.h"
+#include "amdgpu_hdp.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -107,6 +107,7 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_hdp.h"
#define MAX_GPU_INSTANCE 16
@@ -286,7 +287,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
+#define MAX_KIQ_REG_TRY 1000
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -578,7 +579,8 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
- AMD_RESET_METHOD_BACO
+ AMD_RESET_METHOD_BACO,
+ AMD_RESET_METHOD_PCI,
};
/*
@@ -608,7 +610,6 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
- void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -891,6 +892,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vline0_irq;
struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
@@ -921,6 +923,9 @@ struct amdgpu_device {
/* nbio */
struct amdgpu_nbio nbio;
+ /* hdp */
+ struct amdgpu_hdp hdp;
+
/* smuio */
struct amdgpu_smuio smuio;
@@ -1003,6 +1008,12 @@ struct amdgpu_device {
bool in_suspend;
bool in_hibernate;
+ /*
+ * The combination flag in_poweroff_reboot_com used to identify the poweroff
+ * and reboot opt in the s0i3 system-wide suspend.
+ */
+ bool in_poweroff_reboot_com;
+
atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
struct rw_semaphore reset_sem;
@@ -1202,8 +1213,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
-#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_flush_hdp(adev, r) \
+ ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
+#define amdgpu_asic_invalidate_hdp(adev, r) \
+ ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r)))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1222,6 +1235,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index db96d69eb45e..c5343a5eecbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void)
amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
-#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init();
amdgpu_amdkfd_gpuvm_init_mem_limits();
-#else
- ret = -ENOENT;
-#endif
kfd_initialized = !ret;
return ret;
@@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
return adev->have_atomics_support;
}
-
-#ifndef CONFIG_HSA_AMD
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
-{
- return false;
-}
-
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
-{
-}
-
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
-{
- return 0;
-}
-
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
-}
-
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
-{
- return NULL;
-}
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
-{
- return 0;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf)
-{
- return NULL;
-}
-
-bool kgd2kfd_device_init(struct kfd_dev *kfd,
- struct drm_device *ddev,
- const struct kgd2kfd_shared_resources *gpu_resources)
-{
- return false;
-}
-
-void kgd2kfd_device_exit(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_exit(void)
-{
-}
-
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
-{
-}
-
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
-{
- return 0;
-}
-
-int kgd2kfd_pre_reset(struct kfd_dev *kfd)
-{
- return 0;
-}
-
-int kgd2kfd_post_reset(struct kfd_dev *kfd)
-{
- return 0;
-}
-
-void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
-}
-
-void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ea391ca7f2f1..a81d9cacf9b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -94,11 +94,6 @@ enum kgd_engine_type {
KGD_ENGINE_MAX
};
-struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
- struct mm_struct *mm);
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
@@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit);
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+#else
+static inline
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+ return false;
+}
+
+static inline
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+ return NULL;
+}
+
+static inline
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
+static inline
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+{
+ return 0;
+}
+#endif
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
@@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
struct kgd_mem *mem, void **kptr, uint64_t *size);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
-
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
-
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
struct dma_buf *dmabuf,
uint64_t va, void *vm,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
-
-void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
-
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+#else
+static inline
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+}
+static inline
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+}
+
+static inline
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
+{
+}
+#endif
/* KGD2KFD callbacks */
+int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
+#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
@@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-int kgd2kfd_quiesce_mm(struct mm_struct *mm);
-int kgd2kfd_resume_mm(struct mm_struct *mm);
-int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
- struct dma_fence *fence);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
+#else
+static inline int kgd2kfd_init(void)
+{
+ return -ENOENT;
+}
+static inline void kgd2kfd_exit(void)
+{
+}
+
+static inline
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
+ unsigned int asic_type, bool vf)
+{
+ return NULL;
+}
+
+static inline
+bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ return false;
+}
+
+static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+}
+
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+}
+
+static inline
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
+
+static inline
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+}
+#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4763bab7a4d0..62aa1a6f64ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -23,7 +23,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 50016bf9c427..fad3b91f74f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -24,7 +24,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_3_0_offset.h"
#include "gc/gc_10_3_0_sh_mask.h"
-#include "navi10_enum.h"
#include "oss/osssys_5_0_0_offset.h"
#include "oss/osssys_5_0_0_sh_mask.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2d991da2cead..ac0a432a9bf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -26,6 +26,7 @@
#include <linux/sched/task.h>
#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
@@ -453,7 +454,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
- unsigned long bo_size = bo->tbo.mem.size;
+ unsigned long bo_size = bo->tbo.base.size;
if (!va) {
pr_err("Invalid VA when adding BO to VM\n");
@@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = 1;
- bp.domain = alloc_domain;
- bp.flags = alloc_flags;
- bp.type = bo_type;
- bp.resv = NULL;
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
- domain_string(alloc_domain), ret);
+ domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
@@ -1281,7 +1277,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
@@ -1402,7 +1398,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
mutex_lock(&mem->lock);
domain = mem->domain;
- bo_size = bo->tbo.mem.size;
+ bo_size = bo->tbo.base.size;
pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
mem->va,
@@ -1506,7 +1502,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_process_info *process_info =
((struct amdgpu_vm *)vm)->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry;
struct bo_vm_reservation_context ctx;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 306077884a67..6107ac91db25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
};
union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v11.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v11.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
- case 12:
- mem_channel_number = igp_info->v12.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v12.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v21.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 6333cada1e09..cfb1a9a04477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -155,7 +155,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
- if (!adev->asic_funcs->read_bios_from_rom)
+ if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
@@ -291,7 +291,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -304,7 +304,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -348,7 +348,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
- return amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 594a0108e90f..3e240b952e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
return 0;
error_free:
- if (info)
- kvfree(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a6667a2ca0db..0a25fecf488a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -35,6 +35,7 @@
#include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h"
#include "amdgpu_rap.h"
+#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
/**
@@ -1427,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
struct dma_fence *fence;
spin_lock(&sched->job_list_lock);
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
fence = sched->ops->run_job(s_job);
dma_fence_put(fence);
}
@@ -1459,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt:
spin_lock(&sched->job_list_lock);
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
sched->ops->free_job(s_job);
continue;
}
@@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
+ amdgpu_securedisplay_debugfs_init(adev);
+
amdgpu_fw_attestation_debugfs_init(adev);
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1cb7d73f7317..6447cd6ca5a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -930,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
+/**
+ * amdgpu_device_pci_reset - reset the GPU using generic PCI means
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
+ */
+int amdgpu_device_pci_reset(struct amdgpu_device *adev)
+{
+ return pci_reset_function(adev->pdev);
+}
+
/*
* GPU doorbell aperture helpers function.
*/
@@ -1106,8 +1117,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
*/
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
- u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+ int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
unsigned i;
@@ -1138,6 +1148,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (!res)
return 0;
+ /* Limit the BAR size to what is available */
+ rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
+ rbar_size);
+
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
@@ -1423,24 +1437,22 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(dev->pdev, PCI_D0);
- amdgpu_device_load_pci_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ r = pci_enable_device(pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
- drm_kms_helper_poll_enable(dev);
} else {
pr_info("switched off\n");
- drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
- amdgpu_device_cache_pci_state(dev->pdev);
+ amdgpu_device_cache_pci_state(pdev);
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3cold);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1703,8 +1715,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev_to_drm(adev);
- const char *pci_address_name = pci_name(ddev->pdev);
+ const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
@@ -2548,11 +2559,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2667,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
+ if (adev->in_poweroff_reboot_com ||
+ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
@@ -3034,7 +3046,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
- DRM_INFO("Display Core has been requested via kernel parameter "
+ DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}
@@ -3117,7 +3129,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ if (amdgpu_sriov_vf(adev))
+ adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
+ msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
+ else if (amdgpu_passthrough(adev))
adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -3397,7 +3412,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->ddev.pdev);
+ pci_enable_pcie_error_reporting(adev->pdev);
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
@@ -3720,14 +3735,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
r = amdgpu_device_ip_suspend_phase1(adev);
- amdgpu_amdkfd_suspend(adev, !fbcon);
+ amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
+ if (adev->in_poweroff_reboot_com ||
+ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
@@ -3804,7 +3820,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev, !fbcon);
+ r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
return r;
@@ -4155,8 +4171,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
continue;
spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&ring->sched.pending_list,
+ struct drm_sched_job, list);
spin_unlock(&ring->sched.job_list_lock);
if (job)
return true;
@@ -4206,6 +4222,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
break;
default:
goto disabled;
@@ -4455,6 +4473,46 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
up_write(&adev->reset_sem);
}
+/*
+ * to lockup a list of amdgpu devices in a hive safely, if not a hive
+ * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
+ *
+ * unlock won't require roll back.
+ */
+static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
+ return -ENODEV;
+ }
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (!amdgpu_device_lock_adev(tmp_adev, hive))
+ goto roll_back;
+ }
+ } else if (!amdgpu_device_lock_adev(adev, hive))
+ return -EAGAIN;
+
+ return 0;
+roll_back:
+ if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
+ /*
+ * if the lockup iteration break in the middle of a hive,
+ * it may means there may has a race issue,
+ * or a hive device locked up independently.
+ * we may be in trouble and may not, so will try to roll back
+ * the lock and give out a warnning.
+ */
+ dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
+ list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+ }
+ return -EAGAIN;
+}
+
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
@@ -4568,20 +4626,36 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
amdgpu_put_xgmi_hive(hive);
+ if (job)
+ drm_sched_increase_karma(&job->base);
return 0;
}
mutex_lock(&hive->hive_lock);
}
/*
+ * lock the device before we try to operate the linked list
+ * if didn't get the device lock, don't touch the linked list since
+ * others may iterating it.
+ */
+ r = amdgpu_device_lock_hive_adev(adev, hive);
+ if (r) {
+ dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
+ job ? job->base.id : -1);
+
+ /* even we skipped this reset, still need to set the job to guilty */
+ if (job)
+ drm_sched_increase_karma(&job->base);
+ goto skip_recovery;
+ }
+
+ /*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!hive)
- return -ENODEV;
if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
device_list_handle = &hive->device_list;
@@ -4592,13 +4666,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
- dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
- r = 0;
- goto skip_recovery;
- }
-
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
@@ -4736,7 +4803,7 @@ skip_recovery:
amdgpu_put_xgmi_hive(hive);
}
- if (r)
+ if (r && r != -EAGAIN)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
@@ -4786,7 +4853,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
} else {
- if (speed_cap == PCIE_SPEED_16_0GT)
+ if (speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (speed_cap == PCIE_SPEED_16_0GT)
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4806,7 +4879,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
} else {
- if (platform_speed_cap == PCIE_SPEED_16_0GT)
+ if (platform_speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (platform_speed_cap == PCIE_SPEED_16_0GT)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4950,8 +5029,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
/* Fatal error, prepare for slot reset */
- case pci_channel_io_frozen:
- /*
+ case pci_channel_io_frozen:
+ /*
* Cancel and wait for all TDRs in progress if failing to
* set adev->in_gpu_reset in amdgpu_device_lock_adev
*
@@ -5042,7 +5121,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
goto out;
}
- adev->in_pci_err_recovery = true;
+ adev->in_pci_err_recovery = true;
r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
adev->in_pci_err_recovery = false;
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f764803c53a4..48cb33e5b382 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e42175e1acf1..47e0b48dc26f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -40,6 +40,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
/**
* amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
@@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (attach->dev->driver == adev->dev->driver)
return 0;
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0)
+ goto out;
+
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
- return r;
+ goto out;
/*
* We only create shared fences for internal use, but importers
@@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
- return r;
+ goto out;
bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
return 0;
+
+out:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
}
/**
@@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
/**
@@ -269,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
- bo->tbo.num_pages);
+ bo->tbo.ttm->num_pages);
if (IS_ERR(sgt))
return sgt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 72efd579ec5e..4575192d9b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+
+/*
+ * OverDrive(bit 14) disabled by default
+ * GFX DCS(bit 19) disabled by default
+ */
+uint amdgpu_pp_feature_mask = 0xfff7bfff;
uint amdgpu_force_long_training;
int amdgpu_job_hang_limit;
int amdgpu_lbpw = -1;
@@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
* DOC: reset_method (int)
- * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
+ * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
*/
-MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
+MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
/**
@@ -1085,6 +1089,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
@@ -1092,6 +1098,7 @@ static const struct pci_device_id pciidlist[] = {
/* Sienna_Cichlid */
{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1204,7 +1211,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev);
ret = amdgpu_driver_load_kms(adev, ent->driver_data);
@@ -1264,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
+ adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev);
+ adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -1306,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_suspend(drm_dev, true);
+ adev->in_poweroff_reboot_com = true;
+ r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_poweroff_reboot_com = false;
+ return r;
}
static int amdgpu_pmops_restore(struct device *dev)
@@ -1342,11 +1355,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
adev->in_runpm = true;
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
- if (ret)
+ if (ret) {
+ adev->in_runpm = false;
return ret;
+ }
if (amdgpu_device_supports_atpx(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
@@ -1399,7 +1413,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 0bf7d36c6686..51cd49c6f38f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
+ vga_switcheroo_client_fb_set(adev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 7c6e02e35573..8d1ad294cb02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD
uint16_t AttFwIdV2; /* V2 FW ID field */
uint32_t AttFWVersion; /* FW Version */
uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */
- uint16_t AttSource; /* FW source indicator */
- uint16_t RecordValid; /* Indicates whether the record is a valid entry */
- uint8_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
- uint8_t Reserved;
+ uint8_t AttSource; /* FW source indicator */
+ uint8_t RecordValid; /* Indicates whether the record is a valid entry */
+ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
} FW_ATT_RECORD;
static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d0a1fee1f5f6..b443907afcea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv;
}
-retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
@@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in reserved area 0x%LX\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
@@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
args->va_address < AMDGPU_GMC_HOLE_END) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
args->va_address, AMDGPU_GMC_HOLE_START,
AMDGPU_GMC_HOLE_END);
@@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (args->va_address + args->map_size > vm_size) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
args->va_address + args->map_size, vm_size);
return -EINVAL;
}
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(dev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cd2c676a2797..8e0a6c62322e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
}
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
- int pipe, int queue)
+ struct amdgpu_ring *ring)
{
- bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
- int cond;
- /* Policy: alternate between normal and high priority */
- cond = multipipe_policy ? pipe : queue;
-
- return ((cond % 2) != 0);
+ /* Policy: use 1st queue as high priority compute queue if we
+ * have more than one compute queue.
+ */
+ if (adev->gfx.num_compute_rings > 1 &&
+ ring == &adev->gfx.compute_ring[0])
+ return true;
+ return false;
}
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6b5a8f4642cc..72dbcd2bc6a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
- int pipe, int queue);
+ struct amdgpu_ring *ring);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6e679db5e46f..fe1a39ffda72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+ if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
new file mode 100644
index 000000000000..43caf9f8cc11
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_HDP_H__
+#define __AMDGPU_HDP_H__
+
+struct amdgpu_hdp_funcs {
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_hdp {
+ const struct amdgpu_hdp_funcs *funcs;
+};
+
+#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 47cad23a6b9e..bca4dddd5a15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 024d0a563a65..7645223ea0ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
ring->funcs->emit_mem_sync(ring);
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, true);
+
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
@@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
ring->current_ctx = fence_ctx;
if (vm && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
+
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, false);
+
amdgpu_ring_commit(ring);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dcd9b4a8e20b..dc852af4f3b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -205,3 +205,48 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * amdgpu_ih_decode_iv_helper - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ * @entry: IV entry
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position for for Vega10
+ * and later GPUs.
+ */
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
+{
+ /* wptr/rptr are in bytes! */
+ u32 ring_index = ih->rptr >> 2;
+ uint32_t dw[8];
+
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+ dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+ dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+ dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+ dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
+
+ entry->client_id = dw[0] & 0xff;
+ entry->src_id = (dw[0] >> 8) & 0xff;
+ entry->ring_id = (dw[0] >> 16) & 0xff;
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
+ entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+ entry->timestamp_src = dw[2] >> 31;
+ entry->pasid = dw[3] & 0xffff;
+ entry->pasid_src = dw[3] >> 31;
+ entry->src_data[0] = dw[4];
+ entry->src_data[1] = dw[5];
+ entry->src_data[2] = dw[6];
+ entry->src_data[3] = dw[7];
+
+ /* wptr/rptr are in bytes! */
+ ih->rptr += 32;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3c9cfe7eecff..6ed4a85fc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -30,6 +30,18 @@
struct amdgpu_device;
struct amdgpu_iv_entry;
+struct amdgpu_ih_regs {
+ uint32_t ih_rb_base;
+ uint32_t ih_rb_base_hi;
+ uint32_t ih_rb_cntl;
+ uint32_t ih_rb_wptr;
+ uint32_t ih_rb_rptr;
+ uint32_t ih_doorbell_rptr;
+ uint32_t ih_rb_wptr_addr_lo;
+ uint32_t ih_rb_wptr_addr_hi;
+ uint32_t psp_reg_id;
+};
+
/*
* R6xx+ IH ring
*/
@@ -53,6 +65,7 @@ struct amdgpu_ih_ring {
bool enabled;
unsigned rptr;
atomic_t lock;
+ struct amdgpu_ih_regs ih_regs;
};
/* provided by the ih block */
@@ -75,5 +88,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f..afbbec82a289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- } else if (adev->irq.virq[src_id]) {
+ } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+ adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
} else if (!adev->irq.client[client_id].sources) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ff..ff48101bab55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
}
/* Signal all jobs already scheduled to HW */
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b16b32797624..3c37cf1ae8b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
flags |= AMD_IS_PX;
parent = pci_upstream_bridge(adev->pdev);
@@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
*/
r = amdgpu_device_init(adev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -199,7 +199,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
/* only need to skip on ATPX */
@@ -735,10 +735,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!dev_info)
return -ENOMEM;
- dev_info->device_id = dev->pdev->device;
+ dev_info->device_id = adev->pdev->device;
dev_info->chip_rev = adev->rev_id;
dev_info->external_rev = adev->external_rev_id;
- dev_info->pci_rev = dev->pdev->revision;
+ dev_info->pci_rev = adev->pdev->revision;
dev_info->family = adev->family;
dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index e62cc0e1a5ad..7c11bce4514b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -57,7 +57,6 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size);
@@ -89,6 +88,7 @@ struct amdgpu_nbio_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
+ void (*program_aspm)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25ec4d57333f..4b29b8205442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r)
return r;
@@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -911,10 +911,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_flags = bo->tbo.mem.placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
+ if ((mem_type == TTM_PL_VRAM) &&
+ (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
+ !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
+ return -EINVAL;
+
ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
@@ -930,7 +936,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -983,6 +988,7 @@ error:
*/
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 79120ec41396..9ac37569823f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
}
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..839917eb7bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -36,6 +36,7 @@
#include "psp_v12_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_securedisplay.h"
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
@@ -249,7 +250,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
- int timeout = 2000;
+ int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -282,7 +283,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
ras_intr = amdgpu_ras_intr_triggered();
if (ras_intr)
break;
- msleep(1);
+ usleep_range(10, 100);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
@@ -563,7 +564,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1316,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1329,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1468,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1481,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -1642,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
// RAP end
+/* securedisplay start */
+static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for sa ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+static int psp_securedisplay_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_securedisplay_ucode_size,
+ psp->securedisplay_context.securedisplay_shared_mc_addr,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (ret)
+ goto failed;
+
+ psp->securedisplay_context.securedisplay_initialized = true;
+ psp->securedisplay_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->securedisplay_context.mutex);
+
+failed:
+ kfree(cmd);
+ return ret;
+}
+
+static int psp_securedisplay_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_securedisplay_initialize(struct psp_context *psp)
+{
+ int ret;
+ struct securedisplay_cmd *securedisplay_cmd;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_securedisplay_ucode_size ||
+ !psp->adev->psp.ta_securedisplay_start_addr) {
+ dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->securedisplay_context.securedisplay_initialized) {
+ ret = psp_securedisplay_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_securedisplay_load(psp);
+ if (ret)
+ return ret;
+
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (ret) {
+ psp_securedisplay_unload(psp);
+
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ }
+
+ return 0;
+}
+
+static int psp_securedisplay_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO:bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return 0;
+
+ ret = psp_securedisplay_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ /* free securedisplay shared memory */
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ return -EINVAL;
+
+ mutex_lock(&psp->securedisplay_context.mutex);
+
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+
+ mutex_unlock(&psp->securedisplay_context.mutex);
+
+ return ret;
+}
+/* SECUREDISPLAY end */
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -2116,6 +2296,11 @@ skip_memalloc:
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
return 0;
@@ -2166,6 +2351,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_securedisplay_terminate(psp);
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
@@ -2230,6 +2416,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate rap ta\n");
return ret;
}
+ ret = psp_securedisplay_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate securedisplay ta\n");
+ return ret;
+ }
}
ret = psp_asd_unload(psp);
@@ -2313,6 +2504,11 @@ static int psp_resume(void *handle)
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
mutex_unlock(&adev->firmware.mutex);
@@ -2589,11 +2785,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
@@ -2620,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_rap_start_addr = ucode_start_addr;
break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version);
+ psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->ta_securedisplay_start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index da250bc1ac57..cb50ba445f8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -30,6 +30,7 @@
#include "ta_xgmi_if.h"
#include "ta_ras_if.h"
#include "ta_rap_if.h"
+#include "ta_secureDisplay_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
@@ -40,6 +41,7 @@
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
+#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24
@@ -171,6 +173,15 @@ struct psp_rap_context {
struct mutex mutex;
};
+struct psp_securedisplay_context {
+ bool securedisplay_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *securedisplay_shared_bo;
+ uint64_t securedisplay_shared_mc_addr;
+ void *securedisplay_shared_buf;
+ struct mutex mutex;
+};
+
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
@@ -298,12 +309,17 @@ struct psp_context
uint32_t ta_rap_ucode_size;
uint8_t *ta_rap_start_addr;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_ucode_size;
+ uint8_t *ta_securedisplay_start_addr;
+
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
struct psp_rap_context rap_context;
+ struct psp_securedisplay_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
};
@@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..1fb2a91ad30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
dev_warn(adev->dev, "Failed to allow XGMI power down");
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "Failed to allow df cstate");
return ret;
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 1a612f51ecd9..b644c78475fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
unsigned int irq_type, unsigned int hw_prio)
{
- int r, i;
+ int r;
int sched_hw_submission = amdgpu_sched_hw_submission;
u32 *num_sched;
u32 hw_ip;
@@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = DRM_SCHED_PRIORITY_NORMAL;
- mutex_init(&ring->priority_mutex);
+ ring->hw_prio = hw_prio;
if (!ring->no_scheduler) {
hw_ip = ring->funcs->type;
@@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched;
}
- for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
- atomic_set(&ring->num_jobs[i], 0);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7112137689db..56acec1075ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -197,6 +197,7 @@ struct amdgpu_ring_funcs {
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
void (*emit_mem_sync)(struct amdgpu_ring *ring);
+ void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
};
struct amdgpu_ring {
@@ -242,11 +243,7 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
-
- atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
- struct mutex priority_mutex;
- /* protected by priority_mutex */
- int priority;
+ int hw_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
new file mode 100644
index 000000000000..834440ab9ff7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_securedisplay.h"
+
+/**
+ * DOC: AMDGPU SECUREDISPLAY debugfs test interface
+ *
+ * how to use?
+ * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test
+ *
+ * opcode:
+ * 1:Query whether TA is responding used only for validation pupose
+ * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is
+ * send to determine which DIO scratch register should be used to get
+ * ROI and receive i2c_buf as the output.
+ *
+ * You can refer more detail from header file ta_securedisplay_if.h
+ *
+ */
+
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status)
+{
+ switch (status) {
+ case TA_SECUREDISPLAY_STATUS__SUCCESS:
+ break;
+ case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE:
+ dev_err(psp->adev->dev, "Secure display: Generic Failure.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER:
+ dev_err(psp->adev->dev, "Secure display: Invalid Parameter.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__NULL_POINTER:
+ dev_err(psp->adev->dev, "Secure display: Null Pointer.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to write to I2C.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
+ break;
+ default:
+ dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
+ }
+}
+
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id)
+{
+ *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+ memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
+ (*cmd)->cmd_id = command_id;
+}
+
+static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct securedisplay_cmd *securedisplay_cmd;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t phy_id;
+ uint32_t op;
+ int i;
+ char str[64];
+ char i2c_output[256];
+ int ret;
+
+ if (*pos || size > sizeof(str) - 1)
+ return -EINVAL;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return -EFAULT;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ if (size < 3)
+ sscanf(str, "%u ", &op);
+ else
+ sscanf(str, "%u %u", &op, &phy_id);
+
+ switch (op) {
+ case 1:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS)
+ dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ else
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ break;
+ case 2:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id;
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ memset(i2c_output, 0, sizeof(i2c_output));
+ for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
+ sprintf(i2c_output, "%s 0x%X", i2c_output,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+ } else {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ }
+ break;
+ default:
+ dev_err(adev->dev, "Invalid input: %s\n", str);
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_securedisplay_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ if (!adev->psp.securedisplay_context.securedisplay_initialized)
+ return;
+
+ debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
+ adev, &amdgpu_securedisplay_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
new file mode 100644
index 000000000000..fe98574748f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_SECUREDISPLAY_H
+#define _AMDGPU_SECUREDISPLAY_H
+
+#include "amdgpu.h"
+#include "ta_secureDisplay_if.h"
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status);
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 6752d8b13118..792d20261846 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
__entry->type = bo->tbo.mem.mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 4d8f19ab1014..9fd2157b133a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
@@ -637,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+ atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -918,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -1265,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = sgt;
}
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address,
- ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
@@ -2124,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r;
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
while (num_pages) {
@@ -2154,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
while (num_pages) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 0e43b46d3ab5..46449e70348b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_dtm_ucode_version;
uint32_t ta_dtm_offset_bytes;
uint32_t ta_dtm_size_bytes;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_offset_bytes;
+ uint32_t ta_securedisplay_size_bytes;
};
enum ta_fw_type {
@@ -132,6 +135,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
+ TA_FW_TYPE_PSP_SECUREDISPLAY,
};
struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..e2ed4689118a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1170,7 +1170,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
@@ -1202,7 +1202,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e4..ea6a62f67e38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc..99b82f3c2617 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr;
+ void *msg = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
+ msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
if (fence)
*fence = dma_fence_get(f);
@@ -536,7 +538,7 @@ err_free:
err:
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
return r;
}
@@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2d51b7694d1f..5da04d45b637 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+ int ret;
- amdgpu_virt_read_pf2vf_data(adev);
+ ret = amdgpu_virt_read_pf2vf_data(adev);
+ if (ret)
+ goto out;
amdgpu_virt_write_vf2pf_data(adev);
+out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
@@ -571,8 +575,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
DRM_INFO("clean up the vf2pf work item\n");
- flush_delayed_work(&adev->virt.vf2pf_work);
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0768c8686983..ad91c0c3c423 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+ ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ &vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+ &bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d2de2a720a3d..c89b66bb70e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -473,6 +473,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
for (i = 0; pages_left >= pages_per_node; ++i) {
unsigned long pages = rounddown_pow_of_two(pages_left);
+ /* Limit maximum size to 2GB due to SG table limitations */
+ pages = min(pages, (2UL << (30 - PAGE_SHIFT)));
+
r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
pages_per_node, 0,
place->fpfn, lpfn,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 541ef6be390f..659b385b27b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
- struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
+ struct amdgpu_hive_info *hive = NULL;
int ret;
if (!adev->gmc.xgmi.hive_id)
@@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
mutex_lock(&xgmi_mutex);
- if (!list_empty(&xgmi_hive_list)) {
- list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
- if (hive->hive_id == adev->gmc.xgmi.hive_id)
- goto pro_end;
- }
+ list_for_each_entry(hive, &xgmi_hive_list, node) {
+ if (hive->hive_id == adev->gmc.xgmi.hive_id)
+ goto pro_end;
}
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 921a69abda55..5b90efd6f6d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -27,7 +27,6 @@
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_default.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 66c183ddd43e..7b1b18350bf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -26,7 +26,6 @@
#include "athub/athub_2_1_0_offset.h"
#include "athub/athub_2_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 13737b317f7c..4d6832cc7fb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * cik_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
@@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
- return r;
-}
-
-/**
- * cik_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
-{
- int r;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = cik_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index da37f8a900af..307c01301c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index ffcc64ec6473..9810af712cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -294,7 +294,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[21] = {
+ } common_modes[] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -312,13 +312,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1600, 1200},
{1920, 1080},
{1920, 1200},
+ {2560, 1440},
{4096, 3112},
{3656, 2664},
{3840, 2160},
{4096, 2160},
};
- for (i = 0; i < 21; i++) {
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba1086784525..45d1172b7bff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -38,7 +38,6 @@
#include "smuio/smuio_11_0_0_offset.h"
#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
@@ -71,6 +70,11 @@
#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006
+#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1
+
#define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55
#define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0
#define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0
@@ -99,6 +103,10 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@@ -115,6 +123,9 @@
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh 0x0000FFFFL
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
@@ -160,6 +171,9 @@
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3237,7 +3251,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3324,6 +3338,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -3768,9 +3783,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
- case CHIP_VANGOGH:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- break;
default:
break;
}
@@ -4480,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
@@ -4926,8 +4937,15 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
- uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ uint32_t tcc_disable;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
+ } else {
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ }
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
@@ -5691,7 +5709,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -5769,7 +5787,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -5846,7 +5864,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6215,7 +6233,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -6523,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -7192,6 +7209,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (adev->asic_type == CHIP_SIENNA_CICHLID)
gfx_v10_3_program_pbb_mode(adev);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ gfx_v10_3_set_power_brake_sequence(adev);
+
return r;
}
@@ -7377,8 +7397,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ break;
+ default:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ break;
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -7812,6 +7840,20 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data);
+
+ /*
+ * CGPG enablement required and the register to program the hysteresis value
+ * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value
+ * in refclk count. Note that RLC FW is modified to take 16 bits from
+ * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits.
+ *
+ * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20)
+ * as part of CGPG enablement starting point.
+ */
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) {
+ data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
+ WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
+ }
}
static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
@@ -7873,6 +7915,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
break;
case CHIP_VANGOGH:
gfx_v10_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -9169,6 +9212,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+ (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+ (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+ (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+ (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+ (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+ (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+ (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+ (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 37639214cbbb..84d2eaa38101 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_ring.h"
#include "vi.h"
#include "vi_structs.h"
#include "vid.h"
@@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
@@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
+
+/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f
+static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
+ break;
+ case 1:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
+ break;
+ case 2:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
+ break;
+ case 3:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+
+#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
+static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+ /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+ amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
+
+ }
+
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
- 7, /* gfx_v8_0_emit_mem_sync_compute */
+ 7 + /* gfx_v8_0_emit_mem_sync_compute */
+ 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
+ .emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5f4805e4d04a..65db88bb6cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,7 +38,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
-#include "hdp/hdp_4_0_offset.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
@@ -53,6 +52,7 @@
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+#include "asic_reg/gc/gc_9_0_default.h"
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
@@ -2228,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
return amdgpu_ring_init(adev, ring, 1024,
@@ -3391,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev,
- ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6671,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
}
+static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+ val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
+ break;
+ case 1:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
+ break;
+ case 2:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
+ break;
+ case 3:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+
+ /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
+ val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
+
+ }
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6760,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6776,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .emit_wave_limit = gfx_v9_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5648c48be77f..3b7c6c31fce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -27,8 +27,6 @@
#include "gmc_v10_0.h"
#include "umc_v8_7.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -312,7 +310,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
@@ -995,7 +993,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
{
int r;
bool value;
- u32 tmp;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1014,15 +1011,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e22268f9dba7..3686e777c76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -31,8 +31,6 @@
#include "amdgpu_atomfirmware.h"
#include "amdgpu_gem.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "gc/gc_9_0_sh_mask.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -241,60 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = {
};
static const char *mmhub_client_ids_arcturus[][2] = {
+ [0][0] = "DBGU1",
+ [1][0] = "XDP",
[2][0] = "MP1",
- [3][0] = "MP0",
- [10][0] = "UTCL2",
- [13][0] = "OSS",
[14][0] = "HDP",
- [15][0] = "SDMA0",
- [32+15][0] = "SDMA1",
- [64+15][0] = "SDMA2",
- [96+15][0] = "SDMA3",
- [128+15][0] = "SDMA4",
- [160+11][0] = "JPEG",
- [160+12][0] = "VCN",
- [160+13][0] = "VCNU",
- [160+15][0] = "SDMA5",
- [192+10][0] = "UTCL2",
- [192+11][0] = "JPEG1",
- [192+12][0] = "VCN1",
- [192+13][0] = "VCN1U",
- [192+15][0] = "SDMA6",
- [224+15][0] = "SDMA7",
+ [171][0] = "JPEG",
+ [172][0] = "VCN",
+ [173][0] = "VCNU",
+ [203][0] = "JPEG1",
+ [204][0] = "VCN1",
+ [205][0] = "VCN1U",
+ [256][0] = "SDMA0",
+ [257][0] = "SDMA1",
+ [258][0] = "SDMA2",
+ [259][0] = "SDMA3",
+ [260][0] = "SDMA4",
+ [261][0] = "SDMA5",
+ [262][0] = "SDMA6",
+ [263][0] = "SDMA7",
+ [384][0] = "OSS",
[0][1] = "DBGU1",
[1][1] = "XDP",
[2][1] = "MP1",
- [3][1] = "MP0",
- [13][1] = "OSS",
[14][1] = "HDP",
- [15][1] = "SDMA0",
- [32+15][1] = "SDMA1",
- [64+15][1] = "SDMA2",
- [96+15][1] = "SDMA3",
- [128+15][1] = "SDMA4",
- [160+11][1] = "JPEG",
- [160+12][1] = "VCN",
- [160+13][1] = "VCNU",
- [160+15][1] = "SDMA5",
- [192+11][1] = "JPEG1",
- [192+12][1] = "VCN1",
- [192+13][1] = "VCN1U",
- [192+15][1] = "SDMA6",
- [224+15][1] = "SDMA7",
-};
-
-static const u32 golden_settings_vega10_hdp[] =
-{
- 0xf64, 0x0fffffff, 0x00000000,
- 0xf65, 0x0fffffff, 0x00000000,
- 0xf66, 0x0fffffff, 0x00000000,
- 0xf67, 0x0fffffff, 0x00000000,
- 0xf68, 0x0fffffff, 0x00000000,
- 0xf6a, 0x0fffffff, 0x00000000,
- 0xf6b, 0x0fffffff, 0x00000000,
- 0xf6c, 0x0fffffff, 0x00000000,
- 0xf6d, 0x0fffffff, 0x00000000,
- 0xf6e, 0x0fffffff, 0x00000000,
+ [171][1] = "JPEG",
+ [172][1] = "VCN",
+ [173][1] = "VCNU",
+ [203][1] = "JPEG1",
+ [204][1] = "VCN1",
+ [205][1] = "VCN1U",
+ [256][1] = "SDMA0",
+ [257][1] = "SDMA1",
+ [258][1] = "SDMA2",
+ [259][1] = "SDMA3",
+ [260][1] = "SDMA4",
+ [261][1] = "SDMA5",
+ [262][1] = "SDMA6",
+ [263][1] = "SDMA7",
+ [384][1] = "OSS",
};
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
@@ -1571,7 +1553,6 @@ static int gmc_v9_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
int r, i;
- u32 tmp;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1583,31 +1564,13 @@ static int gmc_v9_0_hw_init(void *handle)
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
-
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, true);
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
- WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
- break;
- default:
- break;
- }
-
- WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
-
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+ adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
new file mode 100644
index 000000000000..e46621fed5b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v4_0.h"
+#include "amdgpu_ras.h"
+
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL 0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ else
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+}
+
+static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
+static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
+ adev->asic_type == CHIP_RAVEN) {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ else
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ } else {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+ else
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+ }
+}
+
+static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_HDP_LS */
+ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+ if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+}
+
+static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+}
+
+const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
+ .flush_hdp = hdp_v4_0_flush_hdp,
+ .invalidate_hdp = hdp_v4_0_invalidate_hdp,
+ .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
+ .update_clock_gating = hdp_v4_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v4_0_get_clockgating_state,
+ .init_registers = hdp_v4_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
new file mode 100644
index 000000000000..d1e6399e8c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V4_0_H__
+#define __HDP_V4_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
new file mode 100644
index 000000000000..7a15e669b68d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v5_0.h"
+
+#include "hdp/hdp_5_0_0_offset.h"
+#include "hdp/hdp_5_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ } else {
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+ }
+}
+
+static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch,
+ * forced on IPH & RC clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ IPH_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* HDP 5.0 doesn't support dynamic power mode switch,
+ * disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, enable);
+ /* RC should not use shut down mode, fallback to ds */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* restore IPH & RC clock override after clock/power mode changing */
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+}
+
+static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_0_update_mem_power_gating(adev, enable);
+ hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
+}
+
+static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
+ tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
+ WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+}
+
+const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
+ .flush_hdp = hdp_v5_0_flush_hdp,
+ .invalidate_hdp = hdp_v5_0_invalidate_hdp,
+ .update_clock_gating = hdp_v5_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_0_get_clockgating_state,
+ .init_registers = hdp_v5_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
new file mode 100644
index 000000000000..2d5ec2b419f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V5_0_H__
+#define __HDP_V5_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 37d8b6ca4dab..cc957471f31e 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 985e454463e1..7f30629f21a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
return r;
}
- memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+ memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..ab9be5ad5a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data, def1, data1;
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
- data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
- data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
}
@@ -525,17 +523,44 @@ static void
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
- data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
- else
- data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ uint32_t def, data, def1, data1, def2, data2;
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ } else {
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ }
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
}
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data, data1;
+ int data, data1, data2, data3;
if (amdgpu_sriov_vf(adev))
*flags = 0;
- data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
- data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
/* AMD_CG_SUPPORT_MC_MGCG */
- if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
- !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ }
/* AMD_CG_SUPPORT_MC_LS */
- if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+ && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+ && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
*flags |= AMD_CG_SUPPORT_MC_LS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 7767ccca526b..3ee481557fc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index dd5c1e6ce009..48e588d3c409 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 7ba229e43799..f4e4040bbd25 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -40,6 +40,53 @@
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
+ * navi10_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (NAVI10).
+ */
+static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
*
* @adev: amdgpu_device pointer
@@ -82,133 +129,66 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
}
/**
- * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Enable the interrupt ring buffer (NAVI10).
+ * Toggle the interrupt ring buffer (NAVI10)
*/
-static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- adev->irq.ih.enabled = true;
+ ih_regs = &ih->ih_regs;
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
- }
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ WREG32(ih_regs->ih_rb_cntl, tmp);
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
+ return 0;
}
/**
- * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
*
* @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
*
- * Disable the interrupt ring buffer (NAVI10).
+ * Toggle all the available interrupt ring buffers (NAVI10).
*/
-static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
- }
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
}
+ return 0;
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -253,22 +233,49 @@ static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
return ih_doorbell_rtpr;
}
-static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+/**
+ * navi10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (NAVI10)
+ */
+static int navi10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
- /* Reroute to IH ring 1 for VMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
- /* Reroute IH ring 1 for UMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = navi10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
+
+ return 0;
}
/**
@@ -284,36 +291,21 @@ static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
*/
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_chicken;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
u32 tmp;
+ int ret;
+ int i;
/* disable irqs */
- navi10_ih_disable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
- if (adev->irq.ih1.ring_size)
- navi10_ih_reroute_ih(adev);
-
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
- if (ih->use_bus_addr) {
+ if (ih[0]->use_bus_addr) {
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -334,77 +326,17 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- navi10_ih_doorbell_rptr(ih));
-
- adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
- ih->doorbell_index);
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = navi10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- navi10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- navi10_ih_doorbell_rptr(ih));
}
+ /* update doorbell range for ih ring 0*/
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -418,10 +350,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- navi10_ih_enable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
/* enable wptr force update for self int */
force_update_wptr_for_self_int(adev, 0, 8, true);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -435,7 +372,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
static void navi10_ih_irq_disable(struct amdgpu_device *adev)
{
force_update_wptr_for_self_int(adev, 0, 8, false);
- navi10_ih_disable_interrupts(adev);
+ navi10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -455,23 +392,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev)
static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -486,68 +416,14 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * navi10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void navi10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* navi10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -557,22 +433,15 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -591,6 +460,8 @@ static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
static void navi10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -598,12 +469,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -685,23 +553,8 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
- if (adev->asic_type < CHIP_NAVI10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index =
- (adev->doorbell_index.ih + 1) << 1;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index =
- (adev->doorbell_index.ih + 2) << 1;
- }
+ /* initialize ih control registers offset */
+ navi10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -717,6 +570,7 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -848,7 +702,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
.get_wptr = navi10_ih_get_wptr,
- .decode_iv = navi10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = navi10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b5c3db16c2b0..05ddec7ba7e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -34,6 +34,14 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
@@ -80,15 +88,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
@@ -359,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v2_3_program_ltr(adev);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
@@ -366,7 +470,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
.get_rev_id = nbio_v2_3_get_rev_id,
.mc_access_enable = nbio_v2_3_mc_access_enable,
- .hdp_flush = nbio_v2_3_hdp_flush,
.get_memsize = nbio_v2_3_get_memsize,
.sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
@@ -380,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.init_registers = nbio_v2_3_init_registers,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
.enable_aspm = nbio_v2_3_enable_aspm,
+ .program_aspm = nbio_v2_3_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index d2f1fe55d388..83ea063388fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -29,6 +29,15 @@
#include "nbio/nbio_6_1_sh_mask.h"
#include "nbio/nbio_6_1_smn.h"
#include "vega10_enum.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -50,18 +59,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0,
- mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
- 0);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
-}
-
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
@@ -266,7 +263,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
.get_rev_id = nbio_v6_1_get_rev_id,
.mc_access_enable = nbio_v6_1_mc_access_enable,
- .hdp_flush = nbio_v6_1_hdp_flush,
.get_memsize = nbio_v6_1_get_memsize,
.sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
.enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
@@ -277,4 +273,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
+ .remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index ae685813c419..3c00666a13e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -292,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
.get_rev_id = nbio_v7_0_get_rev_id,
.mc_access_enable = nbio_v7_0_mc_access_enable,
- .hdp_flush = nbio_v7_0_hdp_flush,
.get_memsize = nbio_v7_0_get_memsize,
.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index aa36022670f9..598ce0e93627 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -56,15 +56,6 @@ static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
}
-static void nbio_v7_2_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
@@ -325,7 +316,6 @@ const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
.get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset,
.get_rev_id = nbio_v7_2_get_rev_id,
.mc_access_enable = nbio_v7_2_mc_access_enable,
- .hdp_flush = nbio_v7_2_hdp_flush,
.get_memsize = nbio_v7_2_get_memsize,
.sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index eadc9526d33f..4bc1d1434065 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -82,15 +82,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -541,7 +532,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
.get_rev_id = nbio_v7_4_get_rev_id,
.mc_access_enable = nbio_v7_4_mc_access_enable,
- .hdp_flush = nbio_v7_4_hdp_flush,
.get_memsize = nbio_v7_4_get_memsize,
.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6bee3677394a..160fa5f59805 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -38,9 +38,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
-#include "smuio/smuio_11_0_0_offset.h"
#include "mp/mp_11_0_offset.h"
#include "soc15.h"
@@ -50,6 +47,7 @@
#include "mmhub_v2_0.h"
#include "nbio_v2_3.h"
#include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -62,6 +60,8 @@
#include "dce_virtual.h"
#include "mes_v10_1.h"
#include "mxgpu_nv.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -203,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
+ u32 rom_index_offset, rom_data_offset;
if (bios == NULL)
return false;
@@ -215,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
+ rom_index_offset =
+ adev->smuio.funcs->get_rom_index_offset(adev);
+ rom_data_offset =
+ adev->smuio.funcs->get_rom_data_offset(adev);
+
/* set rom index to 0 */
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+ dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
@@ -336,6 +342,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+static int nv_asic_mode2_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ ret = amdgpu_dpm_mode2_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode2 reset failed\n");
+
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return ret;
+}
+
static bool nv_asic_supports_baco(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
@@ -352,7 +390,9 @@ nv_asic_reset_method(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
- amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+ amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
@@ -360,6 +400,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
amdgpu_reset_method);
switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ return AMD_RESET_METHOD_MODE2;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -377,7 +419,16 @@ static int nv_asic_reset(struct amdgpu_device *adev)
int ret = 0;
struct smu_context *smu = &adev->smu;
- if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ /* skip reset on vangogh for now */
+ if (adev->asic_type == CHIP_VANGOGH)
+ return 0;
+
+ switch (nv_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ ret = amdgpu_device_pci_reset(adev);
+ break;
+ case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
ret = smu_baco_enter(smu);
@@ -386,9 +437,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
ret = smu_baco_exit(smu);
if (ret)
return ret;
- } else {
+ break;
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ ret = nv_asic_mode2_reset(adev);
+ break;
+ default:
dev_info(adev->dev, "MODE1 reset\n");
ret = nv_asic_mode1_reset(adev);
+ break;
}
return ret;
@@ -423,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
static void nv_program_aspm(struct amdgpu_device *adev)
{
-
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->program_aspm))
+ adev->nbio.funcs->program_aspm(adev);
+
}
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
@@ -514,6 +574,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ else
+ adev->smuio.funcs = &smuio_v11_0_funcs;
if (adev->asic_type == CHIP_SIENNA_CICHLID)
adev->gmc.xgmi.supported = true;
@@ -669,22 +735,6 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
return adev->nbio.funcs->get_rev_id(adev);
}
-static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void nv_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- } else {
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
- }
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -768,10 +818,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
* The ASPM function is not fully enabled and verified on
* Navi yet. Temporarily skip this until ASPM enabled.
*/
-#if 0
- if (adev->nbio.funcs->enable_aspm)
+ if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->enable_aspm))
adev->nbio.funcs->enable_aspm(adev, !enter);
-#endif
return 0;
}
@@ -788,8 +838,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.set_uvd_clocks = &nv_set_uvd_clocks,
.set_vce_clocks = &nv_set_vce_clocks,
.get_config_memsize = &nv_get_config_memsize,
- .flush_hdp = &nv_flush_hdp,
- .invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
@@ -1080,120 +1128,6 @@ static int nv_common_soft_reset(void *handle)
return 0;
}
-static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl, hdp_clk_cntl1;
- uint32_t hdp_mem_pwr_cntl;
-
- if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
- AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)))
- return;
-
- hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
-
- /* Before doing clock/power mode switch,
- * forced on IPH & RC clock */
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- IPH_MEM_CLK_SOFT_OVERRIDE, 1);
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- RC_MEM_CLK_SOFT_OVERRIDE, 1);
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-
- /* HDP 5.0 doesn't support dynamic power mode switch,
- * disable clock and power gating before any changing */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_SD_EN, 0);
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* only one clock gating mode (LS/DS/SD) can be enabled */
- if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, enable);
- /* RC should not use shut down mode, fallback to ds */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- }
-
- /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
- * be set for SRAM LS/DS/SD */
- if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 1);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 1);
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* restore IPH & RC clock override after clock/power mode changing */
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
-}
-
-static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
- return;
-
- hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-
- if (enable) {
- hdp_clk_cntl &=
- ~(uint32_t)
- (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
- } else {
- hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-}
-
static int nv_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1213,9 +1147,9 @@ static int nv_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE);
- nv_update_hdp_clock_gating(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ adev->smuio.funcs->update_rom_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1234,31 +1168,15 @@ static int nv_common_set_powergating_state(void *handle,
static void nv_common_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- uint32_t tmp;
if (amdgpu_sriov_vf(adev))
*flags = 0;
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_MGCG */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
- *flags |= AMD_CG_SUPPORT_HDP_MGCG;
-
- /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
- if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_DS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_SD;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
+
+ adev->smuio.funcs->get_clock_gating_state(adev, flags);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index d65a5339d354..3ba7bdfde65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index d7f92634eba2..4b1cc5e9ee92 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
adev->psp.ta_dtm_ucode_version =
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
adev->psp.ta_dtm_ucode_size =
@@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.ta_dtm_start_addr =
(uint8_t *)adev->psp.ta_hdcp_start_addr +
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+
+ adev->psp.ta_securedisplay_ucode_version =
+ le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
+ adev->psp.ta_securedisplay_ucode_size =
+ le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
+ adev->psp.ta_securedisplay_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd4248c93c49..c325d6f53a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -392,37 +392,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v11_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -430,11 +399,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- if ((!amdgpu_sriov_vf(adev)) &&
- !(adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
- psp_v11_0_reroute_ih(psp);
-
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -726,7 +690,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
vfree(buf);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ce56e93c6886..c8c22c1d1e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -46,7 +46,6 @@
#include "sdma6/sdma6_4_2_2_sh_mask.h"
#include "sdma7/sdma7_4_2_2_offset.h"
#include "sdma7/sdma7_4_2_2_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "sdma0/sdma0_4_1_default.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b208b81005bb..d345e324837d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -32,7 +32,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f1ba36a094da..690a5090475a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -119,15 +119,7 @@ static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
- break;
- }
+ release_firmware(adev->sdma.instance[0].fw);
memset((void *)adev->sdma.instance, 0,
sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
@@ -185,23 +177,10 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH) {
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- } else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3cf0589bfea5..6b5cf7882a12 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
u32 i;
int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
/* set mclk/sclk to bypass */
si_set_clk_bypass_mode(adev);
@@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
}
udelay(1);
}
-
- return r;
-}
-
-static int si_asic_reset(struct amdgpu_device *adev)
-{
- int r;
-
- dev_info(adev->dev, "PCI CONFIG reset\n");
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = si_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
@@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev)
static enum amd_reset_method
si_asic_reset_method(struct amdgpu_device *adev)
{
- if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
- amdgpu_reset_method != -1)
+ if (amdgpu_reset_method == AMD_RESET_METHOD_PCI)
+ return amdgpu_reset_method;
+ else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
+ amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
- amdgpu_reset_method);
+ amdgpu_reset_method);
return AMD_RESET_METHOD_LEGACY;
}
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (si_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ r = amdgpu_device_pci_reset(adev);
+ break;
+ default:
+ dev_info(adev->dev, "PCI CONFIG reset\n");
+ r = si_gpu_pci_config_reset(adev);
+ break;
+ }
+
+ return r;
+}
+
static u32 si_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
new file mode 100644
index 000000000000..3a18dbb55c32
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v11_0_6.h"
+#include "smuio/smuio_11_0_6_offset.h"
+#include "smuio/smuio_11_0_6_sh_mask.h"
+
+static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+}
+
+static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+}
+
+static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 def, data;
+
+ /* enable/disable ROM CG is not supported on APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
+ else
+ data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
+
+ if (def != data)
+ WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
+}
+
+static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+{
+ u32 data;
+
+ /* CGTT_ROM_CLK_CTRL0 is not available for APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+ if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
+const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = {
+ .get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset,
+ .update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating,
+ .get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
new file mode 100644
index 000000000000..3c3f4ab0bc9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V11_0_6_H__
+#define __SMUIO_V11_0_6_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs;
+
+#endif /* __SMUIO_V11_0_6_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8a23636ecc27..1221aa6b40a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -40,8 +40,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma1/sdma1_4_0_offset.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
@@ -59,7 +57,9 @@
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
#include "vega10_ih.h"
+#include "vega20_ih.h"
#include "navi10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -83,14 +83,6 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
-/* for Vega20 register name change */
-#define mmHDP_MEM_POWER_CTRL 0x00d4
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
-#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-
/*
* Indirect registers accessor
*/
@@ -241,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
+ if (adev->asic_type == CHIP_RENOIR)
+ return 10000;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
@@ -487,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
- amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
@@ -532,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
return 0;
switch (soc15_asic_reset_method(adev)) {
- case AMD_RESET_METHOD_BACO:
- dev_info(adev->dev, "BACO reset\n");
- return soc15_asic_baco_reset(adev);
- case AMD_RESET_METHOD_MODE2:
- dev_info(adev->dev, "MODE2 reset\n");
- return amdgpu_dpm_mode2_reset(adev);
- default:
- dev_info(adev->dev, "MODE1 reset\n");
- return soc15_asic_mode1_reset(adev);
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ return amdgpu_device_pci_reset(adev);
+ case AMD_RESET_METHOD_BACO:
+ dev_info(adev->dev, "BACO reset\n");
+ return soc15_asic_baco_reset(adev);
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+ dev_info(adev->dev, "MODE1 reset\n");
+ return soc15_asic_mode1_reset(adev);
}
}
@@ -699,6 +697,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v6_1_funcs;
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v4_0_funcs;
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df.funcs = &df_v3_6_funcs;
@@ -729,12 +728,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
}
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
} else {
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
@@ -787,9 +786,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
} else {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
}
@@ -834,35 +833,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
-static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void soc15_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
return true;
}
-static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
-{
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
- return;
- /*read back hdp ras counter to reset it to 0 */
- RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
-}
-
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -1011,8 +987,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega10_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
@@ -1034,9 +1008,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
- .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1239,7 +1210,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x1636)
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
@@ -1293,9 +1265,8 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- if (adev->asic_funcs &&
- adev->asic_funcs->reset_hdp_ras_error_count)
- adev->asic_funcs->reset_hdp_ras_error_count(adev);
+ if (adev->hdp.funcs->reset_ras_error_count)
+ adev->hdp.funcs->reset_ras_error_count(adev);
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
@@ -1421,41 +1392,6 @@ static int soc15_common_soft_reset(void *handle)
return 0;
}
-static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
-{
- uint32_t def, data;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_RENOIR) {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
- else
- data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
- } else {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
- else
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
- }
-}
-
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
@@ -1516,7 +1452,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1533,7 +1469,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1541,7 +1477,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1560,10 +1496,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_LS */
- data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
- if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
new file mode 100644
index 000000000000..5039375bb1d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_SECUREDISPLAY_IF_H
+#define _TA_SECUREDISPLAY_IF_H
+
+/** Secure Display related enumerations */
+/**********************************************************/
+
+/** @enum ta_securedisplay_command
+ * Secure Display Command ID
+ */
+enum ta_securedisplay_command {
+ /* Query whether TA is responding used only for validation purpose */
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
+ /* Send region of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* Maximum Command ID */
+ TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
+};
+
+/** @enum ta_securedisplay_status
+ * Secure Display status returns in shared buffer status
+ */
+enum ta_securedisplay_status {
+ TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */
+ TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */
+ TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */
+ TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/
+ TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */
+ TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/
+ TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/
+
+ TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
+};
+
+/** @enum ta_securedisplay_max_phy
+ * Physical ID number to use for reading corresponding DIO Scratch register for ROI
+ */
+enum ta_securedisplay_max_phy {
+ TA_SECUREDISPLAY_PHY0 = 0,
+ TA_SECUREDISPLAY_PHY1 = 1,
+ TA_SECUREDISPLAY_PHY2 = 2,
+ TA_SECUREDISPLAY_PHY3 = 3,
+ TA_SECUREDISPLAY_MAX_PHY = 4,
+};
+
+/** @enum ta_securedisplay_ta_query_cmd_ret
+ * A predefined specific reteurn value which is 0xAB only used to validate
+ * communication to Secure Display TA is functional.
+ * This value is used to validate whether TA is responding successfully
+ */
+enum ta_securedisplay_ta_query_cmd_ret {
+ /* This is a value to validate if TA is loaded successfully */
+ TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB,
+};
+
+/** @enum ta_securedisplay_buffer_size
+ * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end)
+ * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID
+ */
+enum ta_securedisplay_buffer_size {
+ /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+};
+
+/** Input/output structures for Secure Display commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+
+/** @struct ta_securedisplay_send_roi_crc_input
+ * Physical ID to determine which DIO scratch register should be used to get ROI
+ */
+struct ta_securedisplay_send_roi_crc_input {
+ uint32_t phy_id; /* Physical ID */
+};
+
+/** @union ta_securedisplay_cmd_input
+ * Input buffer
+ */
+union ta_securedisplay_cmd_input {
+ /* send ROI and CRC input buffer format */
+ struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/**
+ * Output structures
+ */
+
+/** @struct ta_securedisplay_query_ta_output
+ * Output buffer format for query TA whether TA is responding used only for validation purpose
+ */
+struct ta_securedisplay_query_ta_output {
+ /* return value from TA when it is queried for validation purpose only */
+ uint32_t query_cmd_ret;
+};
+
+/** @struct ta_securedisplay_send_roi_crc_output
+ * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA
+ * and used to write to I2C used only for validation purpose
+ */
+struct ta_securedisplay_send_roi_crc_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */
+ uint8_t reserved;
+};
+
+/** @union ta_securedisplay_cmd_output
+ * Output buffer
+ */
+union ta_securedisplay_cmd_output {
+ /* Query TA output buffer format used only for validation purpose*/
+ struct ta_securedisplay_query_ta_output query_ta;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/** @struct securedisplay_cmd
+ * Secure Display Command which is shared buffer memory
+ */
+struct securedisplay_cmd {
+ uint32_t cmd_id; /* +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
+ uint32_t reserved[2]; /* +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
+ /**@note Total 48 Bytes */
+};
+
+#endif //_TA_SECUREDISPLAY_IF_H
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index ce3319993b4b..249fcbee7871 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 312ecf6d24a0..7cd67cb2ac5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -36,7 +36,6 @@
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "nbif/nbif_6_1_offset.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c734e31a9e65..6117931fa8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -32,7 +32,6 @@
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index e5ae31eb744e..88626d83e07b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -38,132 +38,120 @@
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
- * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * vega10_ih_init_register_offset - Initialize register offset for ih rings
*
* @adev: amdgpu_device pointer
*
- * Enable the interrupt ring buffer (VEGA10).
+ * Initialize register offset ih rings (VEGA10).
*/
-static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+static void vega10_ih_init_register_offset(struct amdgpu_device *adev)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
}
- adev->irq.ih.enabled = true;
if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
}
if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
}
-
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
}
/**
- * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Disable the interrupt ring buffer (VEGA10).
+ * Toggle the interrupt ring buffer (VEGA10)
*/
-static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
}
} else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ WREG32(ih_regs->ih_rb_cntl, tmp);
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
+ if (enable) {
+ ih->enabled = true;
+ } else {
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
+ return 0;
+}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
+/**
+ * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA10).
+ */
+static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
}
+
+ return 0;
}
static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -209,6 +197,58 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
}
/**
+ * vega10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA10)
+ */
+static int vega10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
@@ -221,116 +261,34 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
*/
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih;
- u32 ih_rb_cntl, ih_chicken;
- int ret = 0;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
u32 tmp;
/* disable irqs */
- vega10_ih_disable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- ih = &adev->irq.ih;
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- if ((adev->asic_type == CHIP_ARCTURUS &&
- adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
- adev->asic_type == CHIP_RENOIR) {
+ if (adev->asic_type == CHIP_RENOIR) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
MC_SPACE_GPA_ENABLE, 1);
- } else {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_FBPA_ENABLE, 1);
}
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- vega10_ih_doorbell_rptr(ih));
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = vega10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- vega10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -345,9 +303,14 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- vega10_ih_enable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
- return ret;
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
}
/**
@@ -359,7 +322,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
*/
static void vega10_ih_irq_disable(struct amdgpu_device *adev)
{
- vega10_ih_disable_interrupts(adev);
+ vega10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -379,25 +342,17 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
-
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -413,69 +368,15 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * vega10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void vega10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* vega10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -485,22 +386,14 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
-
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -519,6 +412,8 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
static void vega10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -526,12 +421,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
vega10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -600,19 +492,23 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
+ /* initialize ih control registers offset */
+ vega10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -628,6 +524,7 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -698,15 +595,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
field_val = enable ? 0 : 1;
/**
- * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
- * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
*/
- if (adev->asic_type > CHIP_VEGA10) {
- data = REG_SET_FIELD(data, IH_CLK_CTRL,
- IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ if (adev->asic_type == CHIP_RENOIR)
data = REG_SET_FIELD(data, IH_CLK_CTRL,
IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
- }
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
@@ -759,7 +652,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .decode_iv = vega10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
new file mode 100644
index 000000000000..5a3c867d5881
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "soc15.h"
+
+#include "oss/osssys_4_2_0_offset.h"
+#include "oss/osssys_4_2_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "vega20_ih.h"
+
+#define MAX_REARM_RETRY 10
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * vega20_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (VEGA20).
+ */
+static void vega20_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
+ * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (VEGA20)
+ */
+static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA20).
+ */
+static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 1 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * vega20_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA20)
+ */
+static int vega20_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega20_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Reroute VMC and UMC interrupts on primary ih ring to
+ * ih ring 1 so they won't lose when bunches of page faults
+ * interrupts overwhelms the interrupt handler(VEGA20)
+ */
+static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* vega20 ih reroute will go through psp
+ * this function is only used for arcturus
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UTCL2 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ }
+}
+
+/**
+ * vega20_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int vega20_ih_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
+ u32 tmp;
+
+ /* disable irqs */
+ ret = vega20_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ if (i == 1)
+ vega20_ih_reroute_ih(adev);
+ ret = vega20_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = vega20_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * vega20_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VEGA20).
+ */
+static void vega20_ih_irq_disable(struct amdgpu_device *adev)
+{
+ vega20_ih_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * vega20_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VEGA20). Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catchup.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * vega20_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ */
+static void vega20_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * vega20_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void vega20_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega20_ih_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * vega20_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int vega20_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = {
+ .process = vega20_ih_self_irq,
+};
+
+static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
+}
+
+static int vega20_ih_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_set_interrupt_funcs(adev);
+ vega20_ih_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int vega20_ih_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
+ /* initialize ih control registers offset */
+ vega20_ih_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int vega20_ih_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+ return 0;
+}
+
+static int vega20_ih_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vega20_ih_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int vega20_ih_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int vega20_ih_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_fini(adev);
+}
+
+static int vega20_ih_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_init(adev);
+}
+
+static bool vega20_ih_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int vega20_ih_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int vega20_ih_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
+static int vega20_ih_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+
+}
+
+static int vega20_ih_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs vega20_ih_ip_funcs = {
+ .name = "vega20_ih",
+ .early_init = vega20_ih_early_init,
+ .late_init = NULL,
+ .sw_init = vega20_ih_sw_init,
+ .sw_fini = vega20_ih_sw_fini,
+ .hw_init = vega20_ih_hw_init,
+ .hw_fini = vega20_ih_hw_fini,
+ .suspend = vega20_ih_suspend,
+ .resume = vega20_ih_resume,
+ .is_idle = vega20_ih_is_idle,
+ .wait_for_idle = vega20_ih_wait_for_idle,
+ .soft_reset = vega20_ih_soft_reset,
+ .set_clockgating_state = vega20_ih_set_clockgating_state,
+ .set_powergating_state = vega20_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs vega20_ih_funcs = {
+ .get_wptr = vega20_ih_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .set_rptr = vega20_ih_set_rptr
+};
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &vega20_ih_funcs;
+}
+
+const struct amdgpu_ip_block_version vega20_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &vega20_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
new file mode 100644
index 000000000000..7af6d8758ee3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VEGA20_IH_H__
+#define __VEGA20_IH_H__
+
+extern const struct amd_ip_funcs vega20_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version vega20_ih_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d56b474b3a21..eafb76aebd00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * vi_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
+ int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
/* disable BM */
pci_clear_master(adev->pdev);
@@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* enable BM */
pci_set_master(adev->pdev);
adev->has_hw_reset = true;
- return 0;
+ r = 0;
+ break;
}
udelay(1);
}
- return -EINVAL;
-}
-
-/**
- * vi_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
-{
- int r;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = vi_gpu_pci_config_reset(adev);
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index e8fb10c41f16..f02c938f75da 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -7,6 +7,8 @@ config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
+ select HMM_MIRROR
select MMU_NOTIFIER
+ select DRM_AMDGPU_USERPTR
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..a5640a6138cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
- crat_table->length += (sub_type_hdr->length * entries);
- crat_table->total_entries += entries;
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length * entries);
+ if (entries) {
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+ }
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 16262e5d93f5..7351dd195274 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
static inline void dqm_lock(struct device_queue_manager *dqm)
{
mutex_lock(&dqm->lock_hidden);
- dqm->saved_flags = memalloc_nofs_save();
+ dqm->saved_flags = memalloc_noreclaim_save();
}
static inline void dqm_unlock(struct device_queue_manager *dqm)
{
- memalloc_nofs_restore(dqm->saved_flags);
+ memalloc_noreclaim_restore(dqm->saved_flags);
mutex_unlock(&dqm->lock_hidden);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 241bd6ff79f4..74a460be077b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -44,6 +44,25 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ /* Only handle clients we care about */
+ if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+ client_id != SOC15_IH_CLIENTID_SDMA0 &&
+ client_id != SOC15_IH_CLIENTID_SDMA1 &&
+ client_id != SOC15_IH_CLIENTID_SDMA2 &&
+ client_id != SOC15_IH_CLIENTID_SDMA3 &&
+ client_id != SOC15_IH_CLIENTID_SDMA4 &&
+ client_id != SOC15_IH_CLIENTID_SDMA5 &&
+ client_id != SOC15_IH_CLIENTID_SDMA6 &&
+ client_id != SOC15_IH_CLIENTID_SDMA7 &&
+ client_id != SOC15_IH_CLIENTID_VMC &&
+ client_id != SOC15_IH_CLIENTID_VMC1 &&
+ client_id != SOC15_IH_CLIENTID_UTCL2 &&
+ client_id != SOC15_IH_CLIENTID_SE0SH &&
+ client_id != SOC15_IH_CLIENTID_SE1SH &&
+ client_id != SOC15_IH_CLIENTID_SE2SH &&
+ client_id != SOC15_IH_CLIENTID_SE3SH)
+ return false;
+
/* This is a known issue for gfx9. Under non HWS, pasid is not set
* in the interrupt payload, so we need to find out the pasid on our
* own.
@@ -96,17 +115,30 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
- if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
- kfd_signal_hw_exception_event(pasid);
- else if (client_id == SOC15_IH_CLIENTID_VMC ||
- client_id == SOC15_IH_CLIENTID_VMC1 ||
- client_id == SOC15_IH_CLIENTID_UTCL2) {
+ if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+ client_id == SOC15_IH_CLIENTID_SE0SH ||
+ client_id == SOC15_IH_CLIENTID_SE1SH ||
+ client_id == SOC15_IH_CLIENTID_SE2SH ||
+ client_id == SOC15_IH_CLIENTID_SE3SH) {
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+ kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_hw_exception_event(pasid);
+ } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+ client_id == SOC15_IH_CLIENTID_SDMA1 ||
+ client_id == SOC15_IH_CLIENTID_SDMA2 ||
+ client_id == SOC15_IH_CLIENTID_SDMA3 ||
+ client_id == SOC15_IH_CLIENTID_SDMA4 ||
+ client_id == SOC15_IH_CLIENTID_SDMA5 ||
+ client_id == SOC15_IH_CLIENTID_SDMA6 ||
+ client_id == SOC15_IH_CLIENTID_SDMA7) {
+ if (source_id == SOC15_INTSRC_SDMA_TRAP)
+ kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index a3fc23873819..0be72789ccbc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -497,8 +497,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
dev->node_props.num_cp_queues);
- sysfs_show_64bit_prop(buffer, offs, "unique_id",
- dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -529,6 +527,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->sdma_fw_version);
+ sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
+
}
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
@@ -1340,7 +1341,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
- dev->node_props.unique_id = amdgpu_amdkfd_get_unique_id(dev->gpu->kgd);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 326d9b26b7aa..416fd910e12e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -57,7 +57,6 @@
struct kfd_node_properties {
uint64_t hive_id;
- uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 519080e9a233..3e1fd1e7d09f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -60,7 +60,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
@@ -938,42 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void event_mall_stutter(struct work_struct *work)
+{
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
+ struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
+ struct amdgpu_display_manager *dm = vblank_work->dm;
+
+ mutex_lock(&dm->dc_lock);
+
+ if (vblank_work->enable)
+ dm->active_vblank_irq_count++;
+ else
+ dm->active_vblank_irq_count--;
+
+
+ dc_allow_idle_optimizations(
+ dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
+
+ DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+
+
+ mutex_unlock(&dm->dc_lock);
+}
+
+static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
{
- dm->crc_win_x_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_START", 0, U16_MAX);
- if (!dm->crc_win_x_start_property)
- return -ENOMEM;
- dm->crc_win_y_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_START", 0, U16_MAX);
- if (!dm->crc_win_y_start_property)
- return -ENOMEM;
+ int max_caps = dc->caps.max_links;
+ struct vblank_workqueue *vblank_work;
+ int i = 0;
- dm->crc_win_x_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_END", 0, U16_MAX);
- if (!dm->crc_win_x_end_property)
- return -ENOMEM;
+ vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(vblank_work)) {
+ kfree(vblank_work);
+ return NULL;
+ }
- dm->crc_win_y_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_END", 0, U16_MAX);
- if (!dm->crc_win_y_end_property)
- return -ENOMEM;
+ for (i = 0; i < max_caps; i++)
+ INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
- return 0;
+ return vblank_work;
}
#endif
-
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -993,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spin_lock_init(&adev->dm.vblank_lock);
+#endif
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
@@ -1051,8 +1060,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.power_down_display_on_boot = true;
- init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1109,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
+
+ if (!adev->dm.vblank_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
+ }
+#endif
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1121,10 +1139,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
-#ifdef CONFIG_DEBUG_FS
- if (create_crtc_crc_properties(&adev->dm))
- DRM_ERROR("amdgpu: failed to create crc property.\n");
-#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -1170,7 +1184,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
- hdcp_destroy(adev->dm.hdcp_workqueue);
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
adev->dm.hdcp_workqueue = NULL;
}
@@ -1817,6 +1831,11 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+#endif
+
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -1872,8 +1891,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -2369,8 +2388,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@@ -2386,9 +2407,6 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
- drm_connector_list_update(connector);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
@@ -3760,10 +3778,53 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
};
+static void get_min_max_dc_plane_scaling(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ int *min_downscale, int *max_upscale)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
+ struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ *max_upscale = plane_cap->max_upscale_factor.nv12;
+ *min_downscale = plane_cap->max_downscale_factor.nv12;
+ break;
+
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ *max_upscale = plane_cap->max_upscale_factor.fp16;
+ *min_downscale = plane_cap->max_downscale_factor.fp16;
+ break;
+
+ default:
+ *max_upscale = plane_cap->max_upscale_factor.argb8888;
+ *min_downscale = plane_cap->max_downscale_factor.argb8888;
+ break;
+ }
+
+ /*
+ * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
+ * scaling factor of 1.0 == 1000 units.
+ */
+ if (*max_upscale == 1)
+ *max_upscale = 1000;
+
+ if (*min_downscale == 1)
+ *min_downscale = 1000;
+}
+
+
static int fill_dc_scaling_info(const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
- int scale_w, scale_h;
+ int scale_w, scale_h, min_downscale, max_upscale;
memset(scaling_info, 0, sizeof(*scaling_info));
@@ -3795,17 +3856,25 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
/* DRM doesn't specify clipping on destination output. */
scaling_info->clip_rect = scaling_info->dst_rect;
- /* TODO: Validate scaling per-format with DC plane caps */
+ /* Validate scaling per-format with DC plane caps */
+ if (state->plane && state->plane->dev && state->fb) {
+ get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
+ &min_downscale, &max_upscale);
+ } else {
+ min_downscale = 250;
+ max_upscale = 16000;
+ }
+
scale_w = scaling_info->dst_rect.width * 1000 /
scaling_info->src_rect.width;
- if (scale_w < 250 || scale_w > 16000)
+ if (scale_w < min_downscale || scale_w > max_upscale)
return -EINVAL;
scale_h = scaling_info->dst_rect.height * 1000 /
scaling_info->src_rect.height;
- if (scale_h < 250 || scale_h > 16000)
+ if (scale_h < min_downscale || scale_h > max_upscale)
return -EINVAL;
/*
@@ -5334,64 +5403,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
- state->crc_window = cur->crc_window;
-#endif
+
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DEBUG_FS
-static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_new_state =
- to_dm_crtc_state(crtc_state);
-
- if (property == adev->dm.crc_win_x_start_property)
- dm_new_state->crc_window.x_start = val;
- else if (property == adev->dm.crc_win_y_start_property)
- dm_new_state->crc_window.y_start = val;
- else if (property == adev->dm.crc_win_x_end_property)
- dm_new_state->crc_window.x_end = val;
- else if (property == adev->dm.crc_win_y_end_property)
- dm_new_state->crc_window.y_end = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_state =
- to_dm_crtc_state(state);
-
- if (property == adev->dm.crc_win_x_start_property)
- *val = dm_state->crc_window.x_start;
- else if (property == adev->dm.crc_win_y_start_property)
- *val = dm_state->crc_window.y_start;
- else if (property == adev->dm.crc_win_x_end_property)
- *val = dm_state->crc_window.x_end;
- else if (property == adev->dm.crc_win_y_end_property)
- *val = dm_state->crc_window.y_end;
- else
- return -EINVAL;
-
- return 0;
-}
-#endif
-
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
@@ -5414,6 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct amdgpu_display_manager *dm = &adev->dm;
+ unsigned long flags;
+#endif
int rc = 0;
if (enable) {
@@ -5429,7 +5450,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
- return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ return -EBUSY;
+
+ if (amdgpu_in_reset(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spin_lock_irqsave(&dm->vblank_lock, flags);
+ dm->vblank_workqueue->dm = dm;
+ dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
+ dm->vblank_workqueue->enable = enable;
+ spin_unlock_irqrestore(&dm->vblank_lock, flags);
+ schedule_work(&dm->vblank_workqueue->mall_work);
+#endif
+
+ return 0;
}
static int dm_enable_vblank(struct drm_crtc *crtc)
@@ -5446,7 +5483,6 @@ static void dm_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
@@ -5458,10 +5494,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
- .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
- .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
};
static enum drm_connector_status
@@ -6425,12 +6457,51 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
static int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
- int max_downscale = 0;
- int max_upscale = INT_MAX;
+ struct drm_framebuffer *fb = state->fb;
+ int min_downscale, max_upscale;
+ int min_scale = 0;
+ int max_scale = INT_MAX;
+
+ /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
+ if (fb && state->crtc) {
+ /* Validate viewport to cover the case when only the position changes */
+ if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
+ int viewport_width = state->crtc_w;
+ int viewport_height = state->crtc_h;
+
+ if (state->crtc_x < 0)
+ viewport_width += state->crtc_x;
+ else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
+ viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
+
+ if (state->crtc_y < 0)
+ viewport_height += state->crtc_y;
+ else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
+ viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
+
+ /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
+ * which is still OK to satisfy the condition below, thereby also covering these cases
+ * (when plane is completely outside of screen).
+ * x2 for width is because of pipe-split.
+ */
+ if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
+ return -EINVAL;
+ }
+
+ /* Get min/max allowed scaling factors from plane caps. */
+ get_min_max_dc_plane_scaling(state->crtc->dev, fb,
+ &min_downscale, &max_upscale);
+ /*
+ * Convert to drm convention: 16.16 fixed point, instead of dc's
+ * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
+ * dst/src, so min_scale = 1.0 / max_upscale, etc.
+ */
+ min_scale = (1000 << 16) / max_upscale;
+ max_scale = (1000 << 16) / min_downscale;
+ }
- /* TODO: These should be checked against DC plane caps */
return drm_atomic_helper_check_plane_state(
- state, new_crtc_state, max_downscale, max_upscale, true, true);
+ state, new_crtc_state, min_scale, max_scale, true, true);
}
static int dm_plane_atomic_check(struct drm_plane *plane,
@@ -6663,25 +6734,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
- struct amdgpu_crtc *acrtc)
-{
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_end_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_end_property,
- 0);
-}
-#endif
-
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@@ -6729,9 +6781,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
- attach_crtc_crc_properties(dm, acrtc);
-#endif
+
return 0;
fail:
@@ -8368,7 +8418,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool configure_crc = false;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8378,30 +8427,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
- }
+
#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
- amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
- if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
- configure_crc = true;
- } else {
- if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
- configure_crc = true;
- }
-
- if (configure_crc)
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
- }
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
+ }
#endif
+ }
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -8486,14 +8526,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -8503,17 +8543,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
@@ -9720,6 +9758,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->max_vfreq = range->max_vfreq;
amdgpu_dm_connector->pixel_clock_mhz =
range->pixel_clock_mhz * 10;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2ee6edb3df93..8bfe901cf237 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -58,10 +58,10 @@
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
-struct amdgpu_dm_irq_handler_data;
struct dc;
struct amdgpu_bo;
struct dmub_srv;
+struct dc_plane_state;
struct common_irq_params {
struct amdgpu_device *adev;
@@ -93,6 +93,20 @@ struct dm_compressor_info {
};
/**
+ * struct vblank_workqueue - Works to be executed in a separate thread during vblank
+ * @mall_work: work for mall stutter
+ * @dm: amdgpu display manager device
+ * @otg_inst: otg instance of which vblank is being set
+ * @enable: true if enable vblank
+ */
+struct vblank_workqueue {
+ struct work_struct mall_work;
+ struct amdgpu_display_manager *dm;
+ int otg_inst;
+ bool enable;
+};
+
+/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
* Describe the backlight support for ACPI or eDP AUX.
@@ -244,6 +258,15 @@ struct amdgpu_display_manager {
struct mutex audio_lock;
/**
+ * @vblank_work_lock:
+ *
+ * Guards access to deferred vblank work state.
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spinlock_t vblank_lock;
+#endif
+
+ /**
* @audio_component:
*
* Used to notify ELD changes to sound driver.
@@ -321,6 +344,10 @@ struct amdgpu_display_manager {
struct hdcp_workqueue *hdcp_workqueue;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct vblank_workqueue *vblank_workqueue;
+#endif
+
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
@@ -336,32 +363,13 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#ifdef CONFIG_DEBUG_FS
- /**
- * @crc_win_x_start_property:
- *
- * X start of the crc calculation window
- */
- struct drm_property *crc_win_x_start_property;
/**
- * @crc_win_y_start_property:
+ * @active_vblank_irq_count:
*
- * Y start of the crc calculation window
+ * number of currently active vblank irqs
*/
- struct drm_property *crc_win_y_start_property;
- /**
- * @crc_win_x_end_property:
- *
- * X end of the crc calculation window
- */
- struct drm_property *crc_win_x_end_property;
- /**
- * @crc_win_y_end_property:
- *
- * Y end of the crc calculation window
- */
- struct drm_property *crc_win_y_end_property;
-#endif
+ uint32_t active_vblank_irq_count;
+
/**
* @mst_encoders:
*
@@ -438,25 +446,11 @@ struct amdgpu_dm_connector {
extern const struct amdgpu_ip_block_version dm_ip_block;
-struct amdgpu_framebuffer;
-struct amdgpu_display_manager;
-struct dc_validation_set;
-struct dc_plane_state;
-
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
};
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
- uint16_t x_start;
- uint16_t y_start;
- uint16_t x_end;
- uint16_t y_end;
- };
-#endif
-
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -479,9 +473,6 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
-#ifdef CONFIG_DEBUG_FS
- struct crc_rec crc_window;
-#endif
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 7b886a779a8c..66cb8730586b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
-static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
-{
- dm_crtc_state->crc_window.x_start = 0;
- dm_crtc_state->crc_window.y_start = 0;
- dm_crtc_state->crc_window.x_end = 0;
- dm_crtc_state->crc_window.y_end = 0;
-}
-
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
- bool ret = true;
-
- if ((dm_crtc_state->crc_window.x_start != 0) ||
- (dm_crtc_state->crc_window.y_start != 0) ||
- (dm_crtc_state->crc_window.x_end != 0) ||
- (dm_crtc_state->crc_window.y_end != 0))
- ret = false;
-
- return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state)
-{
- bool ret = false;
-
- if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
- (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
- (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
- (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
- ret = true;
-
- return ret;
-}
-
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
- struct crc_params *crc_window = NULL, tmp_window;
/* Configuration will be deferred to stream enable. */
if (!stream_state)
@@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable CRTC CRC generation if necessary. */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
- if (!enable)
- amdgpu_dm_set_crc_window_default(dm_crtc_state);
-
- if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
- crc_window = &tmp_window;
-
- tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
- tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
- }
-
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, crc_window, enable, enable)) {
+ stream_state, NULL, enable, enable)) {
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..f7d731797d3f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 11459fb09a37..360952129b6d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -691,7 +691,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return size;
}
-/**
+/*
* Returns the DMCUB tracebuffer contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
*/
@@ -735,7 +735,7 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
return 0;
}
-/**
+/*
* Returns the DMCUB firmware state contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
*/
@@ -1063,7 +1063,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
* echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
*
*/
-static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
+static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
@@ -2214,9 +2214,9 @@ static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
.llseek = default_llseek
};
-static const struct file_operations dp_trigger_hotplug_debugfs_fops = {
+static const struct file_operations trigger_hotplug_debugfs_fops = {
.owner = THIS_MODULE,
- .write = dp_trigger_hotplug,
+ .write = trigger_hotplug,
.llseek = default_llseek
};
@@ -2270,7 +2270,6 @@ static const struct {
const struct file_operations *fops;
} dp_debugfs_entries[] = {
{"link_settings", &dp_link_settings_debugfs_fops},
- {"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -2367,6 +2366,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("output_bpc", 0644, dir, connector,
&output_bpc_fops);
+ debugfs_create_file("trigger_hotplug", 0644, dir, connector,
+ &trigger_hotplug_debugfs_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c2cd184f0bbd..0cdbfcd475ec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
}
-void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
{
int i = 0;
@@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
kfree(hdcp_work->srm);
kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
@@ -449,11 +450,12 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
- display->dig_fe = config->stream_enc_inst;
- link->dig_be = config->link_enc_inst;
+ display->dig_fe = config->dig_fe;
+ link->dig_be = config->dig_be;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
- link->dp.mst_supported = config->mst_supported;
+ link->dp.assr_enabled = config->assr_enabled;
+ link->dp.mst_enabled = config->mst_enabled;
display->adjust.disable = 1;
link->adjust.auth_delay = 3;
link->adjust.hdcp1.disable = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 5159b3a5e5b0..09294ff122fe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
-void hdcp_destroy(struct hdcp_workqueue *work);
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f6f487e9fe2d..5159399f8239 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include <linux/acpi.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <drm/drm_probe_helper.h>
@@ -527,11 +526,11 @@ bool dm_helpers_submit_i2c(
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- bool enable
-)
+ bool enable)
{
uint8_t enable_dsc = enable ? 1 : 0;
struct amdgpu_dm_connector *aconnector;
+ uint8_t ret;
if (!stream)
return false;
@@ -542,13 +541,13 @@ bool dm_helpers_dp_write_dsc_enable(
if (!aconnector->dsc_aux)
return false;
- return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- return false;
+ return (ret > 0);
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 26ed70e5538a..e0000c180ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -662,6 +662,20 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VLINE0,
+ __func__);
+}
+
static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int crtc_id,
@@ -681,6 +695,11 @@ static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
+ .set = amdgpu_dm_set_vline0_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
.set = amdgpu_dm_set_vupdate_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -702,6 +721,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+ adev->vline0_irq.num_types = adev->mode_info.num_crtc;
+ adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
+
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8ab0b9060d2b..41b09ab22233 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/version.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
@@ -833,6 +832,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@@ -850,7 +852,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index bf8fe0471b8f..5bf2f2375b40 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -69,5 +69,7 @@ AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
DC_DMUB += dc_dmub_srv.o
+DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
+AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 24ed03d8cda7..6767fab55c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -73,12 +73,9 @@ uint16_t fixed_point_to_int_frac(
return result;
}
-/**
-* convert_float_matrix
-* This converts a double into HW register spec defined format S2D13.
-* @param :
-* @return None
-*/
+/*
+ * convert_float_matrix - This converts a double into HW register spec defined format S2D13.
+ */
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..ad04ef98e652 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -49,20 +49,24 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
return false;
}
@@ -71,9 +75,13 @@ bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
index 7c0cbf47e8ce..b061497480b8 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -30,9 +30,9 @@
bool is_rgb_cspace(enum dc_color_space output_color_space);
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 23a373ca94b5..c67d21a5ee52 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -911,11 +911,11 @@ static enum bp_result get_ss_info_from_tbl(
* ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -985,10 +985,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param this
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * @ss_info: sprectrum information structure,
+ * return: BIOS parser result code
*/
static enum bp_result get_ss_info_from_tbl(
struct bios_parser *bp,
@@ -1011,9 +1011,10 @@ static enum bp_result get_ss_info_from_tbl(
* from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum info index
+ * @info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
struct bios_parser *bp,
@@ -1076,9 +1077,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* of entries that matches the id
* for, the SS_Info table, there should not be more than 1 entry match.
*
- * @param [in] id, spread sprectrum id
- * @param [out] pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum id
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_ss_info_table(
struct bios_parser *bp,
@@ -1451,16 +1453,14 @@ static enum bp_result get_embedded_panel_info_v1_3(
}
/**
- * bios_parser_get_encoder_cap_info
+ * bios_parser_get_encoder_cap_info - get encoder capability
+ * information of input object id
*
- * @brief
- * Get encoder capability information of input object id
- *
- * @param object_id, Object id
- * @param object_id, encoder cap information structure
- *
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @object_id: object id
+ * @info: encoder cap information structure
*
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
@@ -1490,17 +1490,12 @@ static enum bp_result bios_parser_get_encoder_cap_info(
}
/**
- * get_encoder_cap_record
- *
- * @brief
- * Get encoder cap record for the object
- *
- * @param object, ATOM object
+ * get_encoder_cap_record - Get encoder cap record for the object
*
- * @return atom encoder cap record
- *
- * @note
- * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ * @bp: pointer to the BIOS parser
+ * @object: ATOM object
+ * return: atom encoder cap record
+ * note: search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
*/
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
@@ -1557,8 +1552,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
* the VBIOS that match the SSid (to be converted from signal)
*
- * @param[in] signal, ASSignalType to be converted to SSid
- * @return number of SS Entry that match the signal
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to SSid
+ * return: number of SS Entry that match the signal
*/
static uint32_t bios_parser_get_ss_entry_number(
struct dc_bios *dcb,
@@ -1608,10 +1604,10 @@ static uint32_t bios_parser_get_ss_entry_number(
* get_ss_entry_number_from_ss_info_tbl
* Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
*
- * @note There can only be one entry for each id for SS_Info Table
- *
- * @param [in] id, spread spectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread spectrum id
+ * return: number of SS Entry that match the id
+ * note: There can only be one entry for each id for SS_Info Table
*/
static uint32_t get_ss_entry_number_from_ss_info_tbl(
struct bios_parser *bp,
@@ -1679,8 +1675,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param id, spread sprectrum info index
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: Bios parser result code
*/
static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
{
@@ -1696,8 +1693,9 @@ static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
* Ver 2.1 from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
struct bios_parser *bp,
@@ -1731,8 +1729,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
* the VBIOS that matches id
*
- * @param[in] id, spread sprectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum id
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
struct bios_parser *bp,
@@ -1767,10 +1766,11 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
@@ -2197,13 +2197,10 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
}
/**
- * bios_parser_set_scratch_critical_state
- *
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * bios_parser_set_scratch_critical_state - update critical state
+ * bit in VBIOS scratch register
+ * @dcb: pointer to the DC BIOS
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
@@ -2222,7 +2219,7 @@ static void bios_parser_set_scratch_critical_state(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2372,7 +2369,7 @@ static enum bp_result get_integrated_info_v8(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2509,7 +2506,7 @@ static enum bp_result get_integrated_info_v9(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2585,7 +2582,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-enum bp_result update_slot_layout_info(
+static enum bp_result update_slot_layout_info(
struct dc_bios *dcb,
unsigned int i,
struct slot_layout_info *slot_layout_info,
@@ -2689,7 +2686,7 @@ enum bp_result update_slot_layout_info(
}
-enum bp_result get_bracket_layout_record(
+static enum bp_result get_bracket_layout_record(
struct dc_bios *dcb,
unsigned int bracket_layout_id,
struct slot_layout_info *slot_layout_info)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 670c26583817..9f9fda3118d1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -485,10 +485,11 @@ static struct atom_hpd_int_record *get_hpd_record(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
@@ -801,11 +802,11 @@ static enum bp_result get_ss_info_v4_2(
* ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -1196,13 +1197,11 @@ static bool bios_parser_is_accelerated_mode(
}
/**
- * bios_parser_set_scratch_critical_state
+ * bios_parser_set_scratch_critical_state - update critical state bit
+ * in VBIOS scratch register
*
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * @dcb: pointer to the DC BIO
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 070459e3e407..afc10b954ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
cntl->enable_dp_audio);
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
cntl->enable_dp_audio));
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
* driver choose program it itself, i.e. here we program it
* to 888 by default.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
* driver choose program it itself, i.e. here we pass required
* target rate that includes deep color.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 48b4ef03fc8f..5b77251e0590 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -114,18 +114,14 @@ bool dal_cmd_table_helper_controller_id_to_atom(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: output digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7736c92d55c4..455ee2be15a3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -128,18 +128,14 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ef41b287cbe2..e633f8a51edb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -106,7 +106,6 @@ static void calculate_bandwidth(
bool lpt_enabled;
enum bw_defines sclk_message;
enum bw_defines yclk_message;
- enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
enum bw_defines tiling_mode[maximum_number_of_surfaces];
enum bw_defines surface_type[maximum_number_of_surfaces];
enum bw_defines voltage;
@@ -792,12 +791,8 @@ static void calculate_bandwidth(
data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
}
if (data->stereo_mode[i] == bw_def_top_bottom) {
- v_filter_init_mode[i] = bw_def_manual;
data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
}
- else {
- v_filter_init_mode[i] = bw_def_auto;
- }
if (data->stereo_mode[i] == bw_def_top_bottom) {
data->num_lines_at_frame_start = bw_int_to_fixed(1);
}
@@ -2730,7 +2725,7 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
}
-/**
+/*
* Compare calculated (required) clocks against the clocks available at
* maximum voltage (max Performance Level).
*/
@@ -3001,13 +2996,12 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
return true;
}
-/**
+/*
* Return:
* true - Display(s) configuration supported.
* In this case 'calcs_output' contains data for HW programming
* false - Display(s) configuration not supported (not enough bandwidth).
*/
-
bool bw_calcs(struct dc_context *ctx,
const struct bw_calcs_dceip *dceip,
const struct bw_calcs_vbios *vbios,
@@ -3028,7 +3022,7 @@ bool bw_calcs(struct dc_context *ctx,
calcs_output->all_displays_in_sync = false;
if (data->number_of_displays != 0) {
- uint8_t yclk_lvl, sclk_lvl;
+ uint8_t yclk_lvl;
struct bw_fixed high_sclk = vbios->high_sclk;
struct bw_fixed mid1_sclk = vbios->mid1_sclk;
struct bw_fixed mid2_sclk = vbios->mid2_sclk;
@@ -3049,7 +3043,6 @@ bool bw_calcs(struct dc_context *ctx,
calculate_bandwidth(dceip, vbios, data);
yclk_lvl = data->y_clk_level;
- sclk_lvl = data->sclk_level;
calcs_output->nbp_state_change_enable =
data->nbp_state_change_enable;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 75b8240ed059..e133edc587d3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -187,17 +187,6 @@ static void ramp_up_dispclk_with_dpp(
clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
-static bool is_mpo_enabled(struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count > 1)
- return true;
- }
- return false;
-}
-
static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -295,22 +284,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- // Only increase clocks when display is active and MPO is enabled
- if (display_count && is_mpo_enabled(context)) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
- ((new_clocks->fclk_khz / 1000) * 101) / 100);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
- ((new_clocks->dcfclk_khz / 1000) * 101) / 100);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
- (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
- } else {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
- new_clocks->fclk_khz / 1000);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
- new_clocks->dcfclk_khz / 1000);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
- (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
- }
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index f2114bc910bf..ec9dc265cde0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -257,8 +257,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 5b466f440d67..c7e5a64e06af 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -146,15 +146,15 @@ static noinline void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set D - MALL - SR enter and exit times adjusted for MALL */
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
+ bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index cfa8e02cf103..68942bbc7472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -103,7 +103,7 @@ int dcn301_smu_send_msg_with_param(
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
- result = dcn301_smu_wait_for_response(clk_mgr, 10, 1000);
+ result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 991b9c5beaa3..aadb801447a7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -75,7 +75,8 @@ int vg_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 58eb0d69873a..8f8a13c7cf73 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -175,6 +175,8 @@ static bool create_links(
connectors_num = bios->funcs->get_connectors_number(bios);
+ DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
+
if (connectors_num > ENUM_ID_COUNT) {
dm_error(
"DC: Number of connectors %d exceeds maximum of %d!\n",
@@ -193,6 +195,8 @@ static bool create_links(
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
link_init_params.ctx = dc->ctx;
/* next BIOS object table connector */
link_init_params.connector_index = i;
@@ -201,30 +205,14 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- bool should_destory_link = false;
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (dc->config.edp_not_connected) {
- if (!IS_DIAG_DC(dc->ctx->dce_environment))
- should_destory_link = true;
- } else {
- enum dc_connection_type type;
- dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
- should_destory_link = true;
- }
- }
-
- if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
- } else {
- link_destroy(&link);
- }
}
}
+ DC_LOG_DC("BIOS object table - end");
+
for (i = 0; i < num_virtual_links; i++) {
struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
struct encoder_init_data enc_init = {0};
@@ -284,20 +272,16 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- *****************************************************************************
- * Function: dc_stream_adjust_vmin_vmax
+ * dc_stream_adjust_vmin_vmax:
*
- * @brief
- * Looks up the pipe context of dc_stream_state and updates the
- * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
- * Rate, which is a power-saving feature that targets reducing panel
- * refresh rate while the screen is static
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
*
- * @param [in] dc: dc reference
- * @param [in] stream: Initial dc stream state
- * @param [in] adjust: Updated parameters for vertical_total_min and
- * vertical_total_max
- *****************************************************************************
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -355,6 +339,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
*
@@ -420,7 +405,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
+ * @r_cr: CRC value for the first of the 3 channels stored here.
+ * @g_y: CRC value for the second of the 3 channels stored here.
+ * @b_cb: CRC value for the third of the 3 channels stored here.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
* Return false if stream is not found, or if CRCs are not enabled.
@@ -707,7 +694,6 @@ static bool dc_construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
- dc->soc_bounding_box = init_params->soc_bounding_box;
#endif
if (!dc_construct_ctx(dc, init_params)) {
@@ -757,6 +743,10 @@ static bool dc_construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -764,8 +754,6 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
#endif
- dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
-
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -803,7 +791,8 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
+ struct dc_stream_state *stream, bool lock)
{
int i = 0;
@@ -964,19 +953,15 @@ struct dc *dc_create(const struct dc_init_data *init_params)
struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
unsigned int full_pipe_count;
- if (NULL == dc)
- goto alloc_fail;
+ if (!dc)
+ return NULL;
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
- if (false == dc_construct_ctx(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
} else {
- if (false == dc_construct(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct(dc, init_params))
+ goto destruct_dc;
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
@@ -1007,15 +992,36 @@ struct dc *dc_create(const struct dc_init_data *init_params)
return dc;
-construct_fail:
+destruct_dc:
+ dc_destruct(dc);
kfree(dc);
-
-alloc_fail:
return NULL;
}
+static void detect_edp_presence(struct dc *dc)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+ bool edp_sink_present = true;
+
+ if (!edp_link)
+ return;
+
+ if (dc->config.edp_not_connected) {
+ edp_sink_present = false;
+ } else {
+ enum dc_connection_type type;
+ dc_link_detect_sink(edp_link, &type);
+ if (type == dc_connection_none)
+ edp_sink_present = false;
+ }
+
+ edp_link->edp_sink_present = edp_sink_present;
+}
+
void dc_hardware_init(struct dc *dc)
{
+
+ detect_edp_presence(dc);
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
dc->hwss.init_hw(dc);
}
@@ -1493,7 +1499,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
enum dc_status result = DC_ERROR_UNEXPECTED;
int i;
- if (false == context_changed(dc, context))
+ if (!context_changed(dc, context))
return DC_OK;
DC_LOG_DC("%s: %d streams\n",
@@ -1540,7 +1546,7 @@ bool dc_acquire_release_mpc_3dlut(
if (found_pipe_idx) {
if (acquire && pool->funcs->acquire_post_bldn_3dlut)
ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
- else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
}
}
@@ -2016,7 +2022,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
-/**
+/*
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
@@ -2270,6 +2276,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
+
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -2366,6 +2375,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
+ if (stream_update->pending_test_pattern) {
+ dc_link_dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -2819,7 +2837,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
-/**
+/*
* dc_interrupt_set() - Enable/disable an AMD hw interrupt source
*/
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
@@ -2953,7 +2971,7 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
-/**
+/*
* dc_link_add_remote_sink() - Create a sink and attach it to an existing link
*
* EDID length is in bytes
@@ -3016,7 +3034,7 @@ fail_add_sink:
return NULL;
}
-/**
+/*
* dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
*
* Note that this just removes the struct dc_sink - it doesn't
@@ -3143,9 +3161,11 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
-bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
- struct dc_plane_state *plane)
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr)
{
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e1071b2181f..fa5059f71727 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -203,9 +203,21 @@ static bool program_hpd_filter(const struct dc_link *link)
return result;
}
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+ link->dc->hwss.edp_wait_for_T12(link);
+
+ return true;
+ }
+
+ return false;
+}
+
/**
* dc_link_detect_sink() - Determine if there is a sink connected
*
+ * @link: pointer to the dc link
* @type: Returned connection type
* Does not detect downstream devices, such as MST sinks
* or display connected through active dongles
@@ -342,7 +354,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
return SIGNAL_TYPE_NONE;
}
-/**
+/*
* dc_link_is_dp_sink_present() - Check if there is a native DP
* or passive DP-HDMI dongle connected
*/
@@ -596,8 +608,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
dc_process_hdcp_msg(signal, link, &msg22);
if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
-
msg14.data = &link->hdcp_caps.bcaps.raw;
msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
@@ -605,7 +615,7 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
msg14.link = HDCP_LINK_PRIMARY;
msg14.max_retries = 5;
- status = dc_process_hdcp_msg(signal, link, &msg14);
+ dc_process_hdcp_msg(signal, link, &msg14);
}
}
@@ -830,7 +840,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return false;
}
-/**
+/*
* dc_link_detect() - Detect if a sink is attached to a given link
*
* link->local_sink is created or destroyed as needed.
@@ -1065,9 +1075,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
- if (link->local_sink->edid_caps.panel_patch.disable_fec)
- link->ctx->dc->debug.disable_fec = true;
-
// Check if edid is the same
if ((prev_sink) &&
(edid_status == EDID_THE_SAME || edid_status == EDID_OK))
@@ -1366,13 +1373,17 @@ static bool dc_link_construct(struct dc_link *link,
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
struct panel_cntl_init_data panel_cntl_init_data = { 0 };
- struct integrated_info info = {{{ 0 }}};
+ struct integrated_info *info;
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
DC_LOGGER_INIT(dc_ctx->logger);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto create_fail;
+
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1390,10 +1401,12 @@ static bool dc_link_construct(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
+ DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
}
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
@@ -1408,10 +1421,14 @@ static bool dc_link_construct(struct dc_link *link,
link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
link->ctx->gpio_service);
+
if (link->hpd_gpio) {
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
dal_gpio_unlock_pin(link->hpd_gpio);
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
+
+ DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
+ DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
}
switch (link->link_id.id) {
@@ -1470,6 +1487,11 @@ static bool dc_link_construct(struct dc_link *link,
goto ddc_create_fail;
}
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
link->ddc_hw_inst =
dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
@@ -1508,6 +1530,8 @@ static bool dc_link_construct(struct dc_link *link,
goto link_enc_create_fail;
}
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+
link->link_enc_hw_inst = link->link_enc->transmitter;
for (i = 0; i < 4; i++) {
@@ -1530,16 +1554,20 @@ static bool dc_link_construct(struct dc_link *link,
if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
link->connector_signal == SIGNAL_TYPE_RGB)
continue;
+
+ DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
break;
}
if (bios->integrated_info)
- info = *bios->integrated_info;
+ memcpy(info, bios->integrated_info, sizeof(*info));
/* Look for channel mapping corresponding to connector and device tag */
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
- &info.ext_disp_conn_info.path[i];
+ &info->ext_disp_conn_info.path[i];
if (path->device_connector_id.enum_id == link->link_id.enum_id &&
path->device_connector_id.id == link->link_id.id &&
@@ -1548,10 +1576,14 @@ static bool dc_link_construct(struct dc_link *link,
path->device_acpi_enum == link->device_tag.acpi_device) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
} else if (path->device_tag ==
link->device_tag.dev_id.raw_device_tag) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
}
break;
}
@@ -1570,6 +1602,7 @@ static bool dc_link_construct(struct dc_link *link,
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
device_tag_fail:
link->link_enc->funcs->destroy(&link->link_enc);
@@ -1586,6 +1619,9 @@ create_fail:
link->hpd_gpio = NULL;
}
+ DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
+ kfree(info);
+
return false;
}
@@ -2487,9 +2523,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
@@ -3128,17 +3169,17 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
if (cp_psp && cp_psp->funcs.update_stream_config) {
- struct cp_psp_stream_config config;
-
- memset(&config, 0, sizeof(config));
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- /*stream_enc_inst*/
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
- config.mst_supported = (pipe_ctx->stream->signal ==
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
+ config.mst_enabled = (pipe_ctx->stream->signal ==
SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
@@ -3391,10 +3432,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
}
/**
- *****************************************************************************
- * Function: dc_link_enable_hpd_filter
- *
- * @brief
+ * dc_link_enable_hpd_filter:
* If enable is true, programs HPD filter on associated HPD line using
* delay_on_disconnect/delay_on_connect values dependent on
* link->connector_signal
@@ -3402,9 +3440,8 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
* If enable is false, programs HPD filter on associated HPD line with no
* delays on connect or disconnect
*
- * @param [in] link: pointer to the dc link
- * @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
+ * @link: pointer to the dc link
+ * @enable: boolean specifying whether to enable hbd
*/
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
@@ -3630,7 +3667,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
+ if (dc_link_should_enable_fec(link)) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3651,8 +3688,8 @@ uint32_t dc_link_bandwidth_kbps(
* but the difference is minimal and is in a safe direction,
* which all works well around potential ambiguity of DP 1.4a spec.
*/
- link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
- link_bw_kbps, 32);
+ long long fec_link_bw_kbps = link_bw_kbps * 970LL;
+ link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
}
return link_bw_kbps;
@@ -3682,3 +3719,19 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
}
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ bool is_fec_disable = false;
+ bool ret = false;
+
+ if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.disable_fec) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+ is_fec_disable = true;
+
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
+ ret = true;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index c5936e064360..ae6484ab567b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -37,12 +37,16 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
-/*DP to Dual link DVI converter*/
+#define DC_LOGGER_INIT(logger)
+
+static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";
+/* DP to Dual link DVI converter */
static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2";
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
/* CV smart dongle slave address for retrieving supported HDTV modes*/
@@ -194,6 +198,10 @@ static void ddc_service_construct(
if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
ddc_service->ddc_pin = NULL;
} else {
+ DC_LOGGER_INIT(ddc_service->ctx->logger);
+ DC_LOG_DC("BIOS object table - i2c_line: %d", i2c_info.i2c_line);
+ DC_LOG_DC("BIOS object table - i2c_engine_id: %d", i2c_info.i2c_engine_id);
+
hw_info.ddc_channel = i2c_info.i2c_line;
if (ddc_service->link != NULL)
hw_info.hw_supported = i2c_info.i2c_hw_assist;
@@ -286,6 +294,15 @@ static uint32_t defer_delay_converter_wa(
{
struct dc_link *link = ddc->link;
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
+ !memcmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_DONGLE_BRANCH_DEV_NAME,
+ sizeof(link->dpcd_caps.branch_dev_name)))
+
+ return defer_delay > DPVGA_DONGLE_AUX_DEFER_WA_DELAY ?
+ defer_delay : DPVGA_DONGLE_AUX_DEFER_WA_DELAY;
+
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
!memcmp(link->dpcd_caps.branch_dev_name,
DP_DVI_CONVERTER_ID_4,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 2fc12239b22c..c1391bfb7a9b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -126,9 +126,7 @@ static void dpcd_set_training_pattern(
static enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings)
{
- enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
-
- return pattern;
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
}
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
@@ -892,13 +890,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
@@ -2399,6 +2397,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
@@ -3045,14 +3046,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
@@ -3707,7 +3708,7 @@ bool detect_dp_sink_caps(struct dc_link *link)
/* TODO save sink caps in link->sink */
}
-enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
{
enum dc_link_rate link_rate;
// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
@@ -3992,7 +3993,7 @@ bool dc_link_dp_set_test_pattern(
unsigned int cust_pattern_size)
{
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
+ struct pipe_ctx *pipe_ctx = NULL;
unsigned int lane;
unsigned int i;
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4002,12 +4003,18 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
+ if (pipe_ctx == NULL)
+ return false;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@@ -4339,7 +4346,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4374,7 +4381,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4400,24 +4407,39 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
- enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED;
- struct dpcd_amd_signature amd_signature;
- amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
- amd_signature.device_id_byte1 =
+ enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
+ struct dpcd_amd_signature amd_signature = {0};
+ struct dpcd_amd_device_id amd_device_id = {0};
+
+ amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
- amd_signature.device_id_byte2 =
+ amd_device_id.device_id_byte2 =
(uint8_t)(link->ctx->asic_id.chip_id >> 8);
- memset(&amd_signature.zero, 0, 4);
- amd_signature.dce_version =
+ amd_device_id.dce_version =
(uint8_t)(link->ctx->dce_version);
- amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
- amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
- core_link_write_dpcd(link, DP_SOURCE_OUI,
+ core_link_read_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
(uint8_t *)(&amd_signature),
sizeof(amd_signature));
+ }
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+ (uint8_t *)(&amd_device_id),
+ sizeof(amd_device_id));
if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
link->dc->caps.min_horizontal_blanking_period != 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 07c22556480b..0c26c2ade782 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1117,7 +1117,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
* original h_border_left value in its calculation.
*/
-int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
{
int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
@@ -1128,8 +1128,8 @@ int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
return store_h_border_left;
}
-void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
- int store_h_border_left)
+static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
{
pipe_ctx->stream->dst.x -= store_h_border_left;
pipe_ctx->stream->timing.h_border_left = store_h_border_left;
@@ -1153,8 +1153,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
- pipe_ctx->plane_res.scl_data.viewport.width < 12) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
if (store_h_border_left) {
restore_border_left_from_dst(pipe_ctx,
store_h_border_left);
@@ -1697,7 +1697,7 @@ static bool are_stream_backends_same(
return true;
}
-/**
+/*
* dc_is_stream_unchanged() - Compare two stream states for equivalence.
*
* Checks if there a difference between the two states
@@ -1718,7 +1718,7 @@ bool dc_is_stream_unchanged(
return true;
}
-/**
+/*
* dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
*/
bool dc_is_stream_scaling_unchanged(
@@ -1833,7 +1833,7 @@ static struct audio *find_first_free_audio(
return 0;
}
-/**
+/*
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_add_stream_to_ctx(
@@ -1860,7 +1860,7 @@ enum dc_status dc_add_stream_to_ctx(
return res;
}
-/**
+/*
* dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
*/
enum dc_status dc_remove_stream_from_ctx(
@@ -2075,6 +2075,20 @@ static int acquire_resource_from_hw_enabled_state(
return -1;
}
+static void mark_seamless_boot_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+}
+
enum dc_status resource_map_pool_resources(
const struct dc *dc,
struct dc_state *context,
@@ -2085,22 +2099,20 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
- struct dc_bios *dcb = dc->ctx->dc_bios;
calculate_phy_pix_clks(stream);
- /* TODO: Check Linux */
- if (dc->config.allow_seamless_boot_optimization &&
- !dcb->funcs->is_accelerated_mode(dcb)) {
- if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
- stream->apply_seamless_boot_optimization = true;
- }
+ mark_seamless_boot_stream(dc, stream);
- if (stream->apply_seamless_boot_optimization)
+ if (stream->apply_seamless_boot_optimization) {
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
pool,
stream);
+ if (pipe_idx < 0)
+ /* hw resource was assigned to other stream */
+ stream->apply_seamless_boot_optimization = false;
+ }
if (pipe_idx < 0)
/* acquire new resources */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c103f858375d..25fa712a7847 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -244,7 +244,7 @@ struct dc_stream_status *dc_stream_get_status(
}
#ifndef TRIM_FSFT
-/**
+/*
* dc_optimize_timing_for_fsft() - dc to optimize timing
*/
bool dc_optimize_timing_for_fsft(
@@ -260,8 +260,7 @@ bool dc_optimize_timing_for_fsft(
}
#endif
-
-/**
+/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 3d7d27435f15..e6b9c6a71841 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -115,7 +115,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
-/**
+/*
*****************************************************************************
* Function: dc_plane_get_status
*
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aedadb34548..4eee3a55fa30 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,12 +42,13 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.116"
+#define DC_VER "3.2.122"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
+#define MIN_VIEWPORT_SIZE 12
/*******************************************************************************
* Display Core Interfaces
@@ -171,6 +172,9 @@ struct dc_caps {
bool dmcub_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
+ unsigned int mall_size_per_mem_channel;
+ unsigned int mall_size_total;
+ unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
};
@@ -481,7 +485,6 @@ struct dc_debug_options {
bool performance_trace;
bool az_endpoint_mute_only;
bool always_use_regamma;
- bool p010_mpo_support;
bool recovery_enabled;
bool avoid_vbios_exec_table;
bool scl_reset_length10;
@@ -499,6 +502,9 @@ struct dc_debug_options {
bool dmcub_emulation;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_idle_power_optimizations;
+ unsigned int mall_size_override;
+ unsigned int mall_additional_timer_percent;
+ bool mall_error_as_fatal;
#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
@@ -521,7 +527,6 @@ struct dc_debug_options {
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
bool enable_dram_clock_change_one_display_vactive;
- bool force_ignore_link_settings;
union mem_low_power_enable_options enable_mem_low_power;
};
@@ -633,7 +638,6 @@ struct dc {
const char *build_id;
struct vm_helper *vm_helper;
- const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
};
enum frame_buffer_mode {
@@ -671,16 +675,10 @@ struct dc_init_data {
struct dc_config flags;
uint64_t log_mask;
- /**
- * gpu_info FW provided soc bounding box struct or 0 if not
- * available in FW
- */
- const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
struct dpcd_vendor_signature vendor_signature;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool force_smu_not_present;
#endif
- bool force_ignore_link_settings;
};
struct dc_callback_init {
@@ -1269,8 +1267,8 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
- struct dc_plane_state *plane);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 80a2191a3115..cc6fb838420e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -451,6 +451,9 @@ struct dpcd_amd_signature {
uint8_t AMD_IEEE_TxSignature_byte1;
uint8_t AMD_IEEE_TxSignature_byte2;
uint8_t AMD_IEEE_TxSignature_byte3;
+};
+
+struct dpcd_amd_device_id {
uint8_t device_id_byte1;
uint8_t device_id_byte2;
uint8_t zero[4];
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
new file mode 100644
index 000000000000..0db5b49e9d5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_dmcu.h"
+#include "dc_edid_parser.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->send_edid_cea) {
+ return dmcu->funcs->send_edid_cea(dmcu,
+ offset,
+ total_length,
+ data,
+ length);
+ }
+
+ return false;
+}
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->recv_edid_cea_ack) {
+ return dmcu->funcs->recv_edid_cea_ack(dmcu, offset);
+ }
+
+ return false;
+}
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->recv_amd_vsdb) {
+ return dmcu->funcs->recv_amd_vsdb(dmcu,
+ version,
+ min_frame_rate,
+ max_frame_rate);
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
new file mode 100644
index 000000000000..da67ec06f0a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_EDID_PARSER_H_
+#define _DC_EDID_PARSER_H_
+
+#include "core_types.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length);
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset);
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate);
+
+#endif /* _DC_EDID_PARSER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 57edb25fc381..a612ba6dc389 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -34,6 +34,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "reg_helper.h"
static inline void submit_dmub_read_modify_write(
struct dc_reg_helper_state *offload,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 701aa7178a89..b41e6367b15e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -71,6 +71,7 @@ struct dc_plane_address {
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC cursor_cache_addr;
PHYSICAL_ADDRESS_LOC meta_addr;
union large_integer dcc_const_color;
} grph;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 6d9a60c9dcc0..e189f16bc026 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -103,6 +103,8 @@ struct dc_link {
bool lttpr_non_transparent_mode;
bool is_internal_display;
+ bool edp_sink_present;
+
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
*/
@@ -259,6 +261,13 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+/*
+ * On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
@@ -369,5 +378,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
bool dc_link_is_fec_supported(const struct dc_link *link);
+bool dc_link_should_enable_fec(const struct dc_link *link);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b7910976b81a..80b67b860091 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -130,6 +130,14 @@ union stream_update_flags {
uint32_t raw;
};
+struct test_pattern {
+ enum dp_test_pattern type;
+ enum dp_test_pattern_color_space color_space;
+ struct link_training_settings const *p_link_settings;
+ unsigned char const *p_custom_pattern;
+ unsigned int cust_pattern_size;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
@@ -227,6 +235,8 @@ struct dc_stream_state {
uint32_t stream_id;
bool is_dsc_enabled;
+
+ struct test_pattern test_pattern;
union stream_update_flags update_flags;
};
@@ -261,6 +271,7 @@ struct dc_stream_update {
struct dc_dsc_config *dsc_config;
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
+ struct test_pattern *pending_test_pattern;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 2a2a0fdb9253..7866cf2a668f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -868,7 +868,7 @@ void dce_aud_wall_dto_setup(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_aud_wall_dto_setup(
+static void dce60_aud_wall_dto_setup(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index cda5fd0464bc..d51b5fe91287 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -388,12 +388,6 @@ static enum aux_channel_operation_result get_channel_status(
}
}
-enum i2caux_engine_type get_engine_type(
- const struct dce_aux *engine)
-{
- return I2CAUX_ENGINE_TYPE_AUX;
-}
-
static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
@@ -582,7 +576,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
*operation_result = get_channel_status(aux_engine, &returned_bytes);
if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
- int bytes_replied = 0;
+ int __maybe_unused bytes_replied = 0;
bytes_replied = read_channel_reply(aux_engine, payload->length,
payload->data, payload->reply,
&status);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index 382465862f29..277484cf853e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -124,7 +124,6 @@ struct dce110_aux_registers {
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
- AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index fb733f573715..dec58b3c42e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -113,20 +113,19 @@ static const struct spread_spectrum_data *get_ss_data_entry(
}
/**
- * Function: calculate_fb_and_fractional_fb_divider
+ * calculate_fb_and_fractional_fb_divider - Calculates feedback and fractional
+ * feedback dividers values
*
- * * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+ * @calc_pll_cs: Pointer to clock source information
+ * @target_pix_clk_100hz: Desired frequency in 100 Hz
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @feedback_divider_param: Pointer where to store
+ * calculated feedback divider value
+ * @fract_feedback_divider_param: Pointer where to store
+ * calculated fract feedback divider value
*
- *PARAMETERS:
- * targetPixelClock Desired frequency in 100 Hz
- * ref_divider Reference divider (already known)
- * postDivider Post Divider (already known)
- * feedback_divider_param Pointer where to store
- * calculated feedback divider value
- * fract_feedback_divider_param Pointer where to store
- * calculated fract feedback divider value
- *
- *RETURNS:
+ * return:
* It fills the locations pointed by feedback_divider_param
* and fract_feedback_divider_param
* It returns - true if feedback divider not 0
@@ -175,22 +174,22 @@ static bool calculate_fb_and_fractional_fb_divider(
}
/**
-*calc_fb_divider_checking_tolerance
-*
-*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
-* for passed Reference and Post divider, checking for tolerance.
-*PARAMETERS:
-* pll_settings Pointer to structure
-* ref_divider Reference divider (already known)
-* postDivider Post Divider (already known)
-* tolerance Tolerance for Calculated Pixel Clock to be within
-*
-*RETURNS:
-* It fills the PLLSettings structure with PLL Dividers values
-* if calculated values are within required tolerance
-* It returns - true if error is within tolerance
-* - false if error is not within tolerance
-*/
+ * calc_fb_divider_checking_tolerance - Calculates Feedback and
+ * Fractional Feedback divider values
+ * for passed Reference and Post divider,
+ * checking for tolerance.
+ * @calc_pll_cs: Pointer to clock source information
+ * @pll_settings: Pointer to PLL settings
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @tolerance: Tolerance for Calculated Pixel Clock to be within
+ *
+ * return:
+ * It fills the PLLSettings structure with PLL Dividers values
+ * if calculated values are within required tolerance
+ * It returns - true if error is within tolerance
+ * - false if error is not within tolerance
+ */
static bool calc_fb_divider_checking_tolerance(
struct calc_pll_clock_source *calc_pll_cs,
struct pll_settings *pll_settings,
@@ -241,7 +240,7 @@ static bool calc_fb_divider_checking_tolerance(
pll_settings->calculated_pix_clk_100hz =
actual_calculated_clock_100hz;
pll_settings->vco_freq =
- actual_calculated_clock_100hz * post_divider / 10;
+ div_u64((u64)actual_calculated_clock_100hz * post_divider, 10);
return true;
}
return false;
@@ -460,7 +459,7 @@ static bool pll_adjust_pix_clk(
return false;
}
-/**
+/*
* Calculate PLL Dividers for given Clock Value.
* First will call VBIOS Adjust Exec table to check if requested Pixel clock
* will be Adjusted based on usage.
@@ -871,6 +870,20 @@ static bool dce110_program_pix_clk(
bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
pll_settings->use_external_clk;
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
+ break;
+ case COLOR_DEPTH_121212:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
+ break;
+ case COLOR_DEPTH_161616:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
+ break;
+ default:
+ break;
+ }
+
if (clk_src->bios->funcs->set_pixel_clock(
clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f3ed8b619caf..ddc789daf3b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -57,6 +57,9 @@
#define MCP_SYNC_PHY_LOCK 0x90
#define MCP_SYNC_PHY_UNLOCK 0x91
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
+#define MCP_SEND_EDID_CEA 0xA0
+#define EDID_CEA_CMD_ACK 1
+#define EDID_CEA_CMD_NACK 2
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
// PSP FW version
@@ -65,13 +68,17 @@
//Register access policy version
#define mmMP0_SMN_C2PMSG_91 0x1609B
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static const uint32_t abm_gain_stepsize = 0x0060;
+#endif
+
static bool dce_dmcu_init(struct dmcu *dmcu)
{
// Do nothing
return true;
}
-bool dce_dmcu_load_iram(struct dmcu *dmcu,
+static bool dce_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
const char *src,
unsigned int bytes)
@@ -807,6 +814,120 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
return true;
}
+static bool dcn10_send_edid_cea(struct dmcu *dmcu,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t header, data1, data2;
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+
+ if (length > 8 || length <= 0)
+ return false;
+
+ header = ((uint32_t)offset & 0xFFFF) << 16 | (total_length & 0xFFFF);
+ data1 = (((uint32_t)data[0]) << 24) | (((uint32_t)data[1]) << 16) |
+ (((uint32_t)data[2]) << 8) | ((uint32_t)data[3]);
+ data2 = (((uint32_t)data[4]) << 24) | (((uint32_t)data[5]) << 16) |
+ (((uint32_t)data[6]) << 8) | ((uint32_t)data[7]);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SEND_EDID_CEA);
+
+ REG_WRITE(MASTER_COMM_DATA_REG1, header);
+ REG_WRITE(MASTER_COMM_DATA_REG2, data1);
+ REG_WRITE(MASTER_COMM_DATA_REG3, data2);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
+}
+
+static bool dcn10_get_scp_results(struct dmcu *dmcu,
+ uint32_t *cmd,
+ uint32_t *data1,
+ uint32_t *data2,
+ uint32_t *data3)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+
+ *cmd = REG_READ(SLAVE_COMM_CMD_REG);
+ *data1 = REG_READ(SLAVE_COMM_DATA_REG1);
+ *data2 = REG_READ(SLAVE_COMM_DATA_REG2);
+ *data3 = REG_READ(SLAVE_COMM_DATA_REG3);
+
+ /* clear SCP interrupt */
+ REG_UPDATE(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, 0);
+
+ return true;
+}
+
+static bool dcn10_recv_amd_vsdb(struct dmcu *dmcu,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate)
+{
+ uint32_t data[4];
+ int cmd, ack, len;
+
+ if (!dcn10_get_scp_results(dmcu, &data[0], &data[1], &data[2], &data[3]))
+ return false;
+
+ cmd = data[0] & 0x3FF;
+ len = (data[0] >> 10) & 0x3F;
+ ack = data[1];
+
+ if (cmd != MCP_SEND_EDID_CEA || ack != EDID_CEA_CMD_ACK || len != 12)
+ return false;
+
+ if ((data[2] & 0xFF)) {
+ *version = (data[2] >> 8) & 0xFF;
+ *min_frame_rate = (data[3] >> 16) & 0xFFFF;
+ *max_frame_rate = data[3] & 0xFFFF;
+ return true;
+ }
+
+ return false;
+}
+
+static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset)
+{
+ uint32_t data[4];
+ int cmd, ack;
+
+ if (!dcn10_get_scp_results(dmcu,
+ &data[0], &data[1], &data[2], &data[3]))
+ return false;
+
+ cmd = data[0] & 0x3FF;
+ ack = data[1];
+
+ if (cmd != MCP_SEND_EDID_CEA)
+ return false;
+
+ if (ack == EDID_CEA_CMD_ACK)
+ return true;
+
+ *offset = data[2]; /* nack */
+ return false;
+}
+
#endif //(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dce_funcs = {
@@ -829,6 +950,9 @@ static const struct dmcu_funcs dcn10_funcs = {
.get_psr_state = dcn10_get_dmcu_psr_state,
.set_psr_wait_loop = dcn10_psr_wait_loop,
.get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .send_edid_cea = dcn10_send_edid_cea,
+ .recv_amd_vsdb = dcn10_recv_amd_vsdb,
+ .recv_edid_cea_ack = dcn10_recv_edid_cea_ack,
.is_dmcu_initialized = dcn10_is_dmcu_initialized
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 93e7f34d4775..ff726b35ef6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -40,6 +40,10 @@
SR(MASTER_COMM_DATA_REG3), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_CNTL_REG), \
+ SR(SLAVE_COMM_DATA_REG1), \
+ SR(SLAVE_COMM_DATA_REG2), \
+ SR(SLAVE_COMM_DATA_REG3), \
+ SR(SLAVE_COMM_CMD_REG), \
SR(DMCU_IRAM_RD_CTRL), \
SR(DMCU_IRAM_RD_DATA), \
SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
@@ -112,6 +116,7 @@
DMCU_SF(MASTER_COMM_CMD_REG, \
MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, mask_sh), \
DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
@@ -179,6 +184,7 @@
type UC_IN_RESET; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_INTERRUPT; \
+ type SLAVE_COMM_INTERRUPT; \
type DPHY_RX_FAST_TRAINING_CAPABLE; \
type DPHY_LOAD_BS_COUNT; \
type STATIC_SCREEN1_INT_TO_UC_EN; \
@@ -211,6 +217,11 @@ struct dce_dmcu_registers {
uint32_t MASTER_COMM_DATA_REG3;
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t SLAVE_COMM_DATA_REG1;
+ uint32_t SLAVE_COMM_DATA_REG2;
+ uint32_t SLAVE_COMM_DATA_REG3;
+ uint32_t SLAVE_COMM_CMD_REG;
+ uint32_t SLAVE_COMM_CNTL_REG;
uint32_t DMCU_IRAM_RD_CTRL;
uint32_t DMCU_IRAM_RD_DATA;
uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
@@ -317,6 +328,4 @@ struct dmcu *dcn21_dmcu_create(
void dce_dmcu_destroy(struct dmcu **dmcu);
-static const uint32_t abm_gain_stepsize = 0x0060;
-
#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 7fbd92fbc63a..a524f471e0d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -435,7 +435,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t timeout,
enum i2c_channel_operation_result expected_result)
@@ -502,7 +502,7 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-bool dce_i2c_hw_engine_submit_payload(
+static bool dce_i2c_hw_engine_submit_payload(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *payload,
bool middle_of_transaction,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index 87d8428df6c4..6846afd83701 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -339,7 +339,7 @@ static bool start_sync_sw(
return false;
}
-void dce_i2c_sw_engine_set_speed(
+static void dce_i2c_sw_engine_set_speed(
struct dce_i2c_sw *engine,
uint32_t speed)
{
@@ -353,7 +353,7 @@ void dce_i2c_sw_engine_set_speed(
engine->clock_delay = 12;
}
-bool dce_i2c_sw_engine_acquire_engine(
+static bool dce_i2c_sw_engine_acquire_engine(
struct dce_i2c_sw *engine,
struct ddc *ddc)
{
@@ -397,7 +397,7 @@ bool dce_i2c_engine_acquire_sw(
-void dce_i2c_sw_engine_submit_channel_request(
+static void dce_i2c_sw_engine_submit_channel_request(
struct dce_i2c_sw *engine,
struct i2c_request_transaction_data *req)
{
@@ -440,7 +440,8 @@ void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_SUCCEEDED :
I2C_CHANNEL_OPERATION_FAILED;
}
-bool dce_i2c_sw_engine_submit_payload(
+
+static bool dce_i2c_sw_engine_submit_payload(
struct dce_i2c_sw *engine,
struct i2c_payload *payload,
bool middle_of_transaction)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 210466b2d863..1e77ffee71b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1197,7 +1197,7 @@ void dce110_link_encoder_enable_dp_mst_output(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* enables DP PHY output */
-void dce60_link_encoder_enable_dp_output(
+static void dce60_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1236,7 +1236,7 @@ void dce60_link_encoder_enable_dp_output(
}
/* enables DP PHY output in MST mode */
-void dce60_link_encoder_enable_dp_mst_output(
+static void dce60_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1426,7 +1426,7 @@ void dce110_link_encoder_dp_set_phy_pattern(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* set DP PHY test and training patterns */
-void dce60_link_encoder_dp_set_phy_pattern(
+static void dce60_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
const struct encoder_set_dp_phy_pattern_param *param)
{
@@ -1503,7 +1503,6 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1604,7 +1603,7 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index e459ae65aaf7..4600231da6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -97,7 +97,7 @@ enum {
-/**
+/*
* set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -142,7 +142,7 @@ static void set_truncation(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -183,7 +183,7 @@ static void dce60_set_truncation(
}
#endif
-/**
+/*
* set_spatial_dither
* 1) set spatial dithering mode: pattern of seed
* 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
@@ -291,7 +291,7 @@ static void set_spatial_dither(
FMT_SPATIAL_DITHER_EN, 1);
}
-/**
+/*
* SetTemporalDither (Frame Modulation)
* 1) set temporal dither depth
* 2) select pattern: from hard-coded pattern or programmable pattern
@@ -355,7 +355,7 @@ static void set_temporal_dither(
FMT_TEMPORAL_DITHER_EN, 1);
}
-/**
+/*
* Set Clamping
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -415,7 +415,7 @@ void dce110_opp_set_clamping(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* Set Clamping for DCE6 parts
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -424,7 +424,7 @@ void dce110_opp_set_clamping(
* 7 for programable
* 2) Enable clamp if Limited range requested
*/
-void dce60_opp_set_clamping(
+static void dce60_opp_set_clamping(
struct dce110_opp *opp110,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -465,7 +465,7 @@ void dce60_opp_set_clamping(
}
#endif
-/**
+/*
* set_pixel_encoding
*
* Set Pixel Encoding
@@ -501,7 +501,7 @@ static void set_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_pixel_encoding
* DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg
* Set Pixel Encoding
@@ -545,7 +545,7 @@ void dce110_opp_program_bit_depth_reduction(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_bit_depth_reduction(
+static void dce60_opp_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
{
@@ -568,7 +568,7 @@ void dce110_opp_program_clamping_and_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_clamping_and_pixel_encoding(
+static void dce60_opp_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -678,7 +678,7 @@ void dce110_opp_program_fmt(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_fmt(
+static void dce60_opp_program_fmt(
struct output_pixel_processor *opp,
struct bit_depth_reduction_params *fmt_bit_depth,
struct clamping_and_pixel_encoding_params *clamping)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index 4d484ef60f35..bf1ffc3629c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,7 +111,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
@@ -219,7 +218,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index 761fdfc1f5bd..e92339235863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -50,16 +50,16 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
{
uint64_t current_backlight;
uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period, bl_int_count;
+ uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_READ(BL_PWM_CNTL);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index ada57f745fd7..8d4263da59f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -67,7 +67,6 @@ static void dce110_update_generic_info_packet(
uint32_t packet_index,
const struct dc_info_packet *info_packet)
{
- uint32_t regval;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
@@ -99,7 +98,7 @@ static void dce110_update_generic_info_packet(
}
/* choose which generic packet to use */
{
- regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_READ(AFMT_VBI_PACKET_CONTROL);
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
AFMT_GENERIC_INDEX, packet_index);
}
@@ -564,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.color_depth = crtc_timing->display_color_depth;
if (enc110->base.bp->funcs->encoder_control(
enc110->base.bp, &cntl) != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 130a0a0c8332..151dc7bf6d23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -493,7 +493,6 @@ static void dce60_transform_set_scaler(
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
bool is_scaling_required;
- bool filter_updated = false;
const uint16_t *coeffs_v, *coeffs_h;
/*Use whole line buffer memory always*/
@@ -558,7 +557,6 @@ static void dce60_transform_set_scaler(
xfm_dce->filter_v = coeffs_v;
xfm_dce->filter_h = coeffs_h;
- filter_updated = true;
}
}
@@ -601,12 +599,12 @@ static void set_clamp(
clamp_max = 0x3FC0;
break;
case COLOR_DEPTH_101010:
- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
- clamp_max = 0x3FFC;
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
+ clamp_max = 0x3FF0;
break;
case COLOR_DEPTH_121212:
- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
- clamp_max = 0x3FFF;
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
break;
default:
clamp_max = 0x3FC0;
@@ -1037,34 +1035,23 @@ static void dce60_transform_set_pixel_storage_depth(
const struct bit_depth_reduction_params *bit_depth_params)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
- int pixel_depth, expan_mode;
enum dc_color_depth color_depth;
switch (depth) {
case LB_PIXEL_DEPTH_18BPP:
color_depth = COLOR_DEPTH_666;
- pixel_depth = 2;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_24BPP:
color_depth = COLOR_DEPTH_888;
- pixel_depth = 1;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_30BPP:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_36BPP:
color_depth = COLOR_DEPTH_121212;
- pixel_depth = 3;
- expan_mode = 0;
break;
default:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
BREAK_TO_DEBUGGER();
break;
}
@@ -1113,7 +1100,7 @@ static void program_gamut_remap(
}
-/**
+/*
*****************************************************************************
* Function: dal_transform_wide_gamut_set_gamut_remap
*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 0cf130dc4e52..453aaa5757bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,6 +57,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
union dmub_rb_cmd cmd;
uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
@@ -135,6 +136,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_level.header.type = DMUB_CMD__ABM;
cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
cmd.abm_set_level.abm_set_level_data.level = level;
@@ -160,6 +162,7 @@ static bool dmub_abm_init_config(struct abm *abm,
// Copy iramtable into cw7
memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+ memset(&cmd, 0, sizeof(cmd));
// Fw will copy from cw7 to fw_state
cmd.abm_init_config.header.type = DMUB_CMD__ABM;
cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d399270fd17e..c97ee5abc0ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -33,8 +33,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_hw_lock_flags *hw_locks,
struct dmub_hw_lock_inst_flags *inst_flags)
{
- union dmub_rb_cmd cmd = { 0 };
+ union dmub_rb_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
cmd.lock_hw.header.sub_type = 0;
cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 17e84f34ceba..69e34bef274c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -31,7 +31,7 @@
#define MAX_PIPES 6
-/**
+/*
* Convert dmcub psr state to dmcu psr state.
*/
static enum dc_psr_state convert_psr_state(uint32_t raw_state)
@@ -74,7 +74,7 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
return state;
}
-/**
+/*
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
@@ -90,7 +90,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
*state = convert_psr_state(raw_state);
}
-/**
+/*
* Set PSR version.
*/
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
@@ -101,6 +101,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
return false;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
switch (stream->link->psr_settings.psr_version) {
@@ -121,7 +122,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
return true;
}
-/**
+/*
* Enable/Disable PSR.
*/
static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
@@ -131,7 +132,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
uint32_t retry_count;
enum dc_psr_state state = PSR_STATE0;
-
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_enable.header.type = DMUB_CMD__PSR;
if (enable)
@@ -170,7 +171,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
}
}
-/**
+/*
* Set PSR level.
*/
static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
@@ -184,6 +185,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
if (state == PSR_STATE0)
return;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_set_level.header.type = DMUB_CMD__PSR;
cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
@@ -194,7 +196,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
@@ -233,6 +235,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
@@ -277,7 +280,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
return true;
}
-/**
+/*
* Send command to PSR to force static ENTER and ignore all state changes until exit
*/
static void dmub_psr_force_static(struct dmub_psr *dmub)
@@ -285,6 +288,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_force_static.header.type = DMUB_CMD__PSR;
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
cmd.psr_enable.header.payload_bytes = 0;
@@ -294,7 +298,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Get PSR residency from firmware.
*/
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency)
@@ -316,7 +320,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_get_residency = dmub_psr_get_residency,
};
-/**
+/*
* Construct PSR object.
*/
static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
@@ -325,7 +329,7 @@ static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
psr->funcs = &psr_funcs;
}
-/**
+/*
* Allocate and initialize PSR object.
*/
struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
@@ -342,7 +346,7 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
return psr;
}
-/**
+/*
* Deallocate PSR object.
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index a822d4e2a169..ff20c47f559e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
+
DCE100 = dce100_resource.o dce100_hw_sequencer.o
AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ab9d6c79808..635ef0e7c782 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -58,6 +58,8 @@
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
+#include "dce100_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
@@ -385,7 +387,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -611,7 +613,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce100_link_encoder_create(
+static struct link_encoder *dce100_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -650,7 +652,7 @@ static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_
return &panel_cntl->base;
}
-struct output_pixel_processor *dce100_opp_create(
+static struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -665,7 +667,7 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
-struct dce_aux *dce100_aux_engine_create(
+static struct dce_aux *dce100_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -703,7 +705,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce100_i2c_hw_create(
+static struct dce_i2c_hw *dce100_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -718,7 +720,7 @@ struct dce_i2c_hw *dce100_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce100_clock_source_create(
+static struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -742,7 +744,7 @@ struct clock_source *dce100_clock_source_create(
return NULL;
}
-void dce100_clock_source_destroy(struct clock_source **clk_src)
+static void dce100_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -831,7 +833,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce100_validate_bandwidth(
+static bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -876,7 +878,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-enum dc_status dce100_validate_global(
+static enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index d564c0eb8b04..84ab48df0c26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 72b580a4eb85..44564a4742b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -412,36 +412,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
*compressor = NULL;
}
-bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
- struct fbc_requested_compressed_size size)
-{
- bool result = false;
-
- unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
-
- get_max_support_fbc_buffersize(&max_x, &max_y);
-
- if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
- /*
- * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
- * or 18000 chunks.
- */
- size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
- size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
- size.bits.preferred_must_be_framebuffer_pool = 1;
- size.bits.min_must_be_framebuffer_pool = 1;
-
- result = true;
- }
- /*
- * Maybe to add registry key support with optional size here to override above
- * for debugging purposes
- */
-
- return result;
-}
-
-
void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
{
*max_x = FBC_MAX_X;
@@ -455,31 +425,6 @@ void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
*/
}
-
-unsigned int controller_id_to_index(enum controller_id controller_id)
-{
- unsigned int index = 0;
-
- switch (controller_id) {
- case CONTROLLER_ID_D0:
- index = 0;
- break;
- case CONTROLLER_ID_D1:
- index = 1;
- break;
- case CONTROLLER_ID_D2:
- index = 2;
- break;
- case CONTROLLER_ID_D3:
- index = 3;
- break;
- default:
- break;
- }
- return index;
-}
-
-
static const struct compressor_funcs dce110_compressor_funcs = {
.power_up_fbc = dce110_compressor_power_up_fbc,
.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4c230f1de9a3..caee1c9f54bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -921,6 +921,37 @@ void dce110_edp_power_control(
}
}
+void dce110_edp_wait_for_T12(
+ struct dc_link *link)
+{
+ struct dc_context *ctx = link->ctx;
+
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!link->panel_cntl)
+ return;
+
+ if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
+ link->link_trace.time_stamp.edp_poweroff != 0) {
+ unsigned int t12_duration = 500; // Default T12 as per spec
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long time_since_edp_poweroff_ms =
+ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
+
+ t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+
+ if (time_since_edp_poweroff_ms < t12_duration)
+ msleep(t12_duration - time_since_edp_poweroff_ms);
+ }
+}
+
/*todo: cloned in stream enc, fix*/
/*
* @brief
@@ -1628,7 +1659,7 @@ static struct dc_link *get_edp_link_with_sink(
return link;
}
-/**
+/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
* 2. Disable VGA engine on all controllers
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index d54172d88f5f..8bbb499067f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -34,6 +34,7 @@
#include "inc/dce_calcs.h"
#include "dce/dce_mem_input.h"
+#include "dce110_mem_input_v.h"
static void set_flip_control(
struct dce_mem_input *mem_input110,
@@ -468,7 +469,7 @@ static void program_pixel_format(
}
}
-bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+static bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
uint32_t value;
@@ -483,7 +484,7 @@ bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
return false;
}
-bool dce_mem_input_v_program_surface_flip_and_addr(
+static bool dce_mem_input_v_program_surface_flip_and_addr(
struct mem_input *mem_input,
const struct dc_plane_address *address,
bool flip_immediate)
@@ -560,7 +561,7 @@ static const unsigned int *get_dvmm_hw_setting(
}
}
-void dce_mem_input_v_program_pte_vm(
+static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -633,7 +634,7 @@ void dce_mem_input_v_program_pte_vm(
dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
}
-void dce_mem_input_v_program_surface_config(
+static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -919,7 +920,7 @@ static void program_nbp_watermark_c(
marks);
}
-void dce_mem_input_v_program_display_marks(
+static void dce_mem_input_v_program_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -942,7 +943,7 @@ void dce_mem_input_v_program_display_marks(
}
-void dce_mem_input_program_chroma_display_marks(
+static void dce_mem_input_program_chroma_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -963,7 +964,7 @@ void dce_mem_input_program_chroma_display_marks(
stutter);
}
-void dce110_allocate_mem_input_v(
+static void dce110_allocate_mem_input_v(
struct mem_input *mi,
uint32_t h_total,/* for current stream */
uint32_t v_total,/* for current stream */
@@ -1005,7 +1006,7 @@ void dce110_allocate_mem_input_v(
}
-void dce110_free_mem_input_v(
+static void dce110_free_mem_input_v(
struct mem_input *mi,
uint32_t total_stream_num)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 3f63822b8e28..d7fcc5cccdce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,7 +715,7 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
-struct dce_aux *dce110_aux_engine_create(
+static struct dce_aux *dce110_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -753,7 +753,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce110_i2c_hw_create(
+static struct dce_i2c_hw *dce110_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -768,7 +768,7 @@ struct dce_i2c_hw *dce110_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce110_clock_source_create(
+static struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -792,7 +792,7 @@ struct clock_source *dce110_clock_source_create(
return NULL;
}
-void dce110_clock_source_destroy(struct clock_source **clk_src)
+static void dce110_clock_source_destroy(struct clock_source **clk_src)
{
struct dce110_clk_src *dce110_clk_src;
@@ -1034,8 +1034,8 @@ static bool dce110_validate_bandwidth(
return result;
}
-enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
- struct dc_caps *caps)
+static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+ struct dc_caps *caps)
{
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
@@ -1089,7 +1089,7 @@ static bool dce110_validate_surface_sets(
return true;
}
-enum dc_status dce110_validate_global(
+static enum dc_status dce110_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1272,7 +1272,6 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
/* update the public caps to indicate an underlay is available */
ctx->dc->caps.max_slave_planes = 1;
- ctx->dc->caps.max_slave_planes = 1;
return true;
}
@@ -1333,7 +1332,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1000);
}
-const struct resource_caps *dce110_resource_cap(
+static const struct resource_caps *dce110_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 1ea7db8eeb98..d88a74559edd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -75,7 +75,7 @@ static void dce110_timing_generator_apply_front_porch_workaround(
}
}
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -116,7 +116,7 @@ void dce110_timing_generator_set_early_control(
dm_write_reg(tg->ctx, address, regval);
}
-/**
+/*
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
@@ -175,7 +175,7 @@ void dce110_timing_generator_program_blank_color(
dm_write_reg(tg->ctx, addr, value);
}
-/**
+/*
*****************************************************************************
* Function: disable_stereo
*
@@ -226,7 +226,7 @@ static void disable_stereo(struct timing_generator *tg)
}
#endif
-/**
+/*
* disable_crtc - call ASIC Control Object to disable Timing generator.
*/
bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
@@ -247,11 +247,10 @@ bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-/**
-* program_horz_count_by_2
-* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
-*
-*/
+/*
+ * program_horz_count_by_2
+ * Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+ */
static void program_horz_count_by_2(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
@@ -273,7 +272,7 @@ static void program_horz_count_by_2(
CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
}
-/**
+/*
* program_timing_generator
* Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
* Call ASIC Control Object to program Timings.
@@ -352,7 +351,7 @@ bool dce110_timing_generator_program_timing_generator(
return result == BP_RESULT_OK;
}
-/**
+/*
*****************************************************************************
* Function: set_drr
*
@@ -521,7 +520,7 @@ uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
return field;
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_get_position
*
@@ -557,7 +556,7 @@ void dce110_timing_generator_get_position(struct timing_generator *tg,
CRTC_VERT_COUNT_NOM);
}
-/**
+/*
*****************************************************************************
* Function: get_crtc_scanoutpos
*
@@ -1106,11 +1105,11 @@ void dce110_timing_generator_set_test_pattern(
}
}
-/**
-* dce110_timing_generator_validate_timing
-* The timing generators support a maximum display size of is 8192 x 8192 pixels,
-* including both active display and blanking periods. Check H Total and V Total.
-*/
+/*
+ * dce110_timing_generator_validate_timing
+ * The timing generators support a maximum display size of is 8192 x 8192 pixels,
+ * including both active display and blanking periods. Check H Total and V Total.
+ */
bool dce110_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
@@ -1167,9 +1166,9 @@ bool dce110_timing_generator_validate_timing(
return true;
}
-/**
-* Wait till we are at the beginning of VBlank.
-*/
+/*
+ * Wait till we are at the beginning of VBlank.
+ */
void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
@@ -1191,9 +1190,9 @@ void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_is_in_vertical_blank(tg)) {
@@ -1204,7 +1203,7 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
}
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_setup_global_swap_lock
*
@@ -1215,7 +1214,6 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
* @param [in] gsl_params: setup data
*****************************************************************************
*/
-
void dce110_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
@@ -1351,10 +1349,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
/* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
{
- uint32_t value_crtc_vtotal;
-
- value_crtc_vtotal = dm_read_reg(tg->ctx,
- CRTC_REG(mmCRTC_V_TOTAL));
+ dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL));
set_reg_field_value(value,
0,
@@ -1385,7 +1380,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
dm_write_reg(tg->ctx, address, value);
}
-/**
+/*
*****************************************************************************
* Function: is_counter_moving
*
@@ -1767,7 +1762,7 @@ void dce110_timing_generator_disable_reset_trigger(
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
}
-/**
+/*
*****************************************************************************
* @brief
* Checks whether CRTC triggered reset occurred
@@ -1794,7 +1789,7 @@ bool dce110_timing_generator_did_triggered_reset_occur(
return (force || vert_sync);
}
-/**
+/*
* dce110_timing_generator_disable_vga
* Turn OFF VGA Mode and Timing - DxVGA_CONTROL
* VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
@@ -1840,14 +1835,13 @@ void dce110_timing_generator_disable_vga(
dm_write_reg(tg->ctx, addr, value);
}
-/**
-* set_overscan_color_black
-*
-* @param :black_color is one of the color space
-* :this routine will set overscan black color according to the color space.
-* @return none
-*/
-
+/*
+ * set_overscan_color_black
+ *
+ * @param :black_color is one of the color space
+ * :this routine will set overscan black color according to the color space.
+ * @return none
+ */
void dce110_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index a13a2f58944e..c509384fff54 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -46,17 +46,16 @@
*
**********************************************************************************/
-/**
-* Enable CRTCV
-*/
+/*
+ * Enable CRTCV
+ */
static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
{
/*
-* Set MASTER_UPDATE_MODE to 0
-* This is needed for DRR, and also suggested to be default value by Syed.
-*/
-
+ * Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.
+ */
uint32_t value;
value = 0;
@@ -209,9 +208,9 @@ static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *t
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index b1aaab5590cc..29438c6050db 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -217,16 +217,15 @@ static bool setup_scaling_configuration(
return is_scaling_needed;
}
-/**
-* Function:
-* void program_overscan
-*
-* Purpose: Programs overscan border
-* Input: overscan
-*
-* Output:
- void
-*/
+/*
+ * Function:
+ * void program_overscan
+ *
+ * Purpose: Programs overscan border
+ * Input: overscan
+ *
+ * Output: void
+ */
static void program_overscan(
struct dce_transform *xfm_dce,
const struct scaler_data *data)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 8e090446d511..9de6501702d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+
DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
dce112_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index f99b1c084590..ee55cda854bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -59,7 +59,9 @@
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce100_resource.h"
-#define DC_LOGGER \
+#include "dce112_resource.h"
+
+#define DC_LOGGER \
dc->ctx->logger
#ifndef mmDP_DPHY_INTERNAL_CTRL
@@ -617,7 +619,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dce112_link_encoder_create(
+static struct link_encoder *dce112_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -671,7 +673,7 @@ static struct input_pixel_processor *dce112_ipp_create(
return &ipp->base;
}
-struct output_pixel_processor *dce112_opp_create(
+static struct output_pixel_processor *dce112_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -686,7 +688,7 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
-struct dce_aux *dce112_aux_engine_create(
+static struct dce_aux *dce112_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -724,7 +726,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce112_i2c_hw_create(
+static struct dce_i2c_hw *dce112_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -739,7 +741,7 @@ struct dce_i2c_hw *dce112_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce112_clock_source_create(
+static struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -763,7 +765,7 @@ struct clock_source *dce112_clock_source_create(
return NULL;
}
-void dce112_clock_source_destroy(struct clock_source **clk_src)
+static void dce112_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -1024,7 +1026,7 @@ enum dc_status dce112_add_stream_to_ctx(
return result;
}
-enum dc_status dce112_validate_global(
+static enum dc_status dce112_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1202,7 +1204,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
}
-const struct resource_caps *dce112_resource_cap(
+static const struct resource_caps *dce112_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 37db1f8d45ea..a9cc4b73270b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,6 +24,8 @@
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+
DCE120 = dce120_resource.o dce120_timing_generator.o \
dce120_hw_sequencer.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 66a13aa39c95..d4afe6c824d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -50,6 +50,7 @@ struct dce120_hw_seq_reg_offsets {
uint32_t crtc;
};
+#if 0
static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
{
.crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
@@ -79,7 +80,6 @@ static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
/*******************************************************************************
* Private definitions
******************************************************************************/
-#if 0
static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
{
uint32_t addr;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f1e3d2888eac..c65e4d125c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -423,7 +423,7 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
-struct output_pixel_processor *dce120_opp_create(
+static struct output_pixel_processor *dce120_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -437,7 +437,7 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
-struct dce_aux *dce120_aux_engine_create(
+static struct dce_aux *dce120_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -475,7 +475,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce120_i2c_hw_create(
+static struct dce_i2c_hw *dce120_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 915fbb8e8168..b57c466124e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -69,7 +69,7 @@
#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -98,7 +98,7 @@ static bool dce120_timing_generator_is_in_vertical_blank(
/* determine if given timing can be supported by TG */
-bool dce120_timing_generator_validate_timing(
+static bool dce120_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
enum signal_type signal)
@@ -125,7 +125,7 @@ bool dce120_timing_generator_validate_timing(
return true;
}
-bool dce120_tg_validate_timing(struct timing_generator *tg,
+static bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
@@ -133,7 +133,7 @@ bool dce120_tg_validate_timing(struct timing_generator *tg,
/******** HW programming ************/
/* Disable/Enable Timing Generator */
-bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+static bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
{
enum bp_result result;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -153,7 +153,7 @@ bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-void dce120_timing_generator_set_early_control(
+static void dce120_timing_generator_set_early_control(
struct timing_generator *tg,
uint32_t early_cntl)
{
@@ -166,7 +166,7 @@ void dce120_timing_generator_set_early_control(
/**************** TG current status ******************/
/* return the current frame counter. Used by Linux kernel DRM */
-uint32_t dce120_timing_generator_get_vblank_counter(
+static uint32_t dce120_timing_generator_get_vblank_counter(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -181,7 +181,7 @@ uint32_t dce120_timing_generator_get_vblank_counter(
}
/* Get current H and V position */
-void dce120_timing_generator_get_crtc_position(
+static void dce120_timing_generator_get_crtc_position(
struct timing_generator *tg,
struct crtc_position *position)
{
@@ -207,7 +207,7 @@ void dce120_timing_generator_get_crtc_position(
}
/* wait until TG is in beginning of vertical blank region */
-void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
* in VBlank, we might be very close to Active, in this case wait for
@@ -229,7 +229,7 @@ void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
/* wait until TG is in beginning of active region */
-void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce120_timing_generator_is_in_vertical_blank(tg)) {
if (!tg->funcs->is_counter_moving(tg)) {
@@ -242,7 +242,7 @@ void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
/*********** Timing Generator Synchronization routines ****/
/* Setups Global Swap Lock group, TimingServer or TimingClient*/
-void dce120_timing_generator_setup_global_swap_lock(
+static void dce120_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
{
@@ -279,7 +279,7 @@ void dce120_timing_generator_setup_global_swap_lock(
}
/* Clear all the register writes done by setup_global_swap_lock */
-void dce120_timing_generator_tear_down_global_swap_lock(
+static void dce120_timing_generator_tear_down_global_swap_lock(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -300,7 +300,7 @@ void dce120_timing_generator_tear_down_global_swap_lock(
}
/* Reset slave controllers on master VSync */
-void dce120_timing_generator_enable_reset_trigger(
+static void dce120_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
int source)
{
@@ -347,7 +347,7 @@ void dce120_timing_generator_enable_reset_trigger(
}
/* disabling trigger-reset */
-void dce120_timing_generator_disable_reset_trigger(
+static void dce120_timing_generator_disable_reset_trigger(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -367,7 +367,7 @@ void dce120_timing_generator_disable_reset_trigger(
}
/* Checks whether CRTC triggered reset occurred */
-bool dce120_timing_generator_did_triggered_reset_occur(
+static bool dce120_timing_generator_did_triggered_reset_occur(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -384,7 +384,7 @@ bool dce120_timing_generator_did_triggered_reset_occur(
/******** Stuff to move to other virtual HW objects *****************/
/* Move to enable accelerated mode */
-void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+static void dce120_timing_generator_disable_vga(struct timing_generator *tg)
{
uint32_t offset = 0;
uint32_t value = 0;
@@ -425,7 +425,7 @@ void dce120_timing_generator_disable_vga(struct timing_generator *tg)
}
/* TODO: Should we move it to transform */
/* Fully program CRTC timing in timing generator */
-void dce120_timing_generator_program_blanking(
+static void dce120_timing_generator_program_blanking(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
@@ -485,7 +485,7 @@ void dce120_timing_generator_program_blanking(
/* TODO: Should we move it to opp? */
/* Combine with below and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_program_blank_color(
+static void dce120_timing_generator_program_blank_color(
struct timing_generator *tg,
const struct tg_color *black_color)
{
@@ -498,7 +498,7 @@ void dce120_timing_generator_program_blank_color(
CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
/* Combine with above and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_set_overscan_color_black(
+static void dce120_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
{
@@ -540,7 +540,7 @@ void dce120_timing_generator_set_overscan_color_black(
*/
}
-void dce120_timing_generator_set_drr(
+static void dce120_timing_generator_set_drr(
struct timing_generator *tg,
const struct drr_params *params)
{
@@ -589,50 +589,7 @@ void dce120_timing_generator_set_drr(
}
}
-/**
- *****************************************************************************
- * Function: dce120_timing_generator_get_position
- *
- * @brief
- * Returns CRTC vertical/horizontal counters
- *
- * @param [out] position
- *****************************************************************************
- */
-void dce120_timing_generator_get_position(struct timing_generator *tg,
- struct crtc_position *position)
-{
- uint32_t value;
- struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_STATUS_POSITION,
- tg110->offsets.crtc);
-
- position->horizontal_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_HORZ_COUNT);
-
- position->vertical_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_VERT_COUNT);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_NOM_VERT_POSITION,
- tg110->offsets.crtc);
-
- position->nominal_vcount = get_reg_field_value(
- value,
- CRTC0_CRTC_NOM_VERT_POSITION,
- CRTC_VERT_COUNT_NOM);
-}
-
-
-void dce120_timing_generator_get_crtc_scanoutpos(
+static void dce120_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
@@ -661,7 +618,7 @@ void dce120_timing_generator_get_crtc_scanoutpos(
*v_position = position.vertical_count;
}
-void dce120_timing_generator_enable_advanced_request(
+static void dce120_timing_generator_enable_advanced_request(
struct timing_generator *tg,
bool enable,
const struct dc_crtc_timing *timing)
@@ -699,7 +656,7 @@ void dce120_timing_generator_enable_advanced_request(
value);
}
-void dce120_tg_program_blank_color(struct timing_generator *tg,
+static void dce120_tg_program_blank_color(struct timing_generator *tg,
const struct tg_color *black_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -722,7 +679,7 @@ void dce120_tg_program_blank_color(struct timing_generator *tg,
value);
}
-void dce120_tg_set_overscan_color(struct timing_generator *tg,
+static void dce120_tg_set_overscan_color(struct timing_generator *tg,
const struct tg_color *overscan_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -749,7 +706,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
dce120_timing_generator_program_blanking(tg, timing);
}
-bool dce120_tg_is_blanked(struct timing_generator *tg)
+static bool dce120_tg_is_blanked(struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value = dm_read_reg_soc15(
@@ -770,7 +727,7 @@ bool dce120_tg_is_blanked(struct timing_generator *tg)
return false;
}
-void dce120_tg_set_blank(struct timing_generator *tg,
+static void dce120_tg_set_blank(struct timing_generator *tg,
bool enable_blanking)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -789,7 +746,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
-void dce120_tg_wait_for_state(struct timing_generator *tg,
+static void dce120_tg_wait_for_state(struct timing_generator *tg,
enum crtc_state state)
{
switch (state) {
@@ -806,7 +763,7 @@ void dce120_tg_wait_for_state(struct timing_generator *tg,
}
}
-void dce120_tg_set_colors(struct timing_generator *tg,
+static void dce120_tg_set_colors(struct timing_generator *tg,
const struct tg_color *blank_color,
const struct tg_color *overscan_color)
{
@@ -833,7 +790,7 @@ static void dce120_timing_generator_set_static_screen_control(
CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
-void dce120_timing_generator_set_test_pattern(
+static void dce120_timing_generator_set_test_pattern(
struct timing_generator *tg,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index 7036c3bd0f87..dda596fa1cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index e9dd78c484d6..dcfa0a3efa00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce60_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
@@ -519,7 +521,7 @@ static struct output_pixel_processor *dce60_opp_create(
return &opp->base;
}
-struct dce_aux *dce60_aux_engine_create(
+static struct dce_aux *dce60_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -557,7 +559,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce60_i2c_hw_create(
+static struct dce_i2c_hw *dce60_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -573,7 +575,7 @@ struct dce_i2c_hw *dce60_i2c_hw_create(
return dce_i2c_hw;
}
-struct dce_i2c_sw *dce60_i2c_sw_create(
+static struct dce_i2c_sw *dce60_i2c_sw_create(
struct dc_context *ctx)
{
struct dce_i2c_sw *dce_i2c_sw =
@@ -707,7 +709,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce60_link_encoder_create(
+static struct link_encoder *dce60_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -746,7 +748,7 @@ static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dce60_clock_source_create(
+static struct clock_source *dce60_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -770,7 +772,7 @@ struct clock_source *dce60_clock_source_create(
return NULL;
}
-void dce60_clock_source_destroy(struct clock_source **clk_src)
+static void dce60_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -860,7 +862,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-bool dce60_validate_bandwidth(
+static bool dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -905,7 +907,7 @@ static bool dce60_validate_surface_sets(
return true;
}
-enum dc_status dce60_validate_global(
+static enum dc_status dce60_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index fc1af0ff0ca4..c1a85ee374d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -189,8 +189,8 @@ static bool dce60_is_tg_enabled(struct timing_generator *tg)
return field == 1;
}
-bool dce60_configure_crc(struct timing_generator *tg,
- const struct crc_params *params)
+static bool dce60_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
{
/* Cannot configure crc on a CRTC that is disabled */
if (!dce60_is_tg_enabled(tg))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 666fcb2bdbba..0a9d1a350d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+
DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
dce80_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 390a0fa37239..612450f99278 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce80_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
@@ -402,7 +404,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4d3f7d5e1473..904c2d278998 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -577,7 +577,7 @@ void dpp1_power_on_degamma_lut(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET(CM_MEM_PWR_CTRL, 0,
- SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+ SHARED_MEM_PWR_DIS, power_on ? 0:1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index cfc130e2d6fd..89912bb5014f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -647,8 +647,13 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, plane_id, true);
- hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -1214,6 +1224,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
// signals when OTG blanked. This is to prevent pipe from
// requesting data while in PSR.
tg->funcs->tg_init(tg);
+ hubp->power_gated = true;
continue;
}
@@ -2624,7 +2635,7 @@ static void dcn10_update_dchubp_dpp(
hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
- hubp->funcs->set_blank(hubp, false);
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
void dcn10_blank_pixel_data(
@@ -3124,7 +3135,7 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
- flags.PROGRAM_STEREO == 1 ? true:false,
+ flags.PROGRAM_STEREO == 1,
&stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
@@ -3135,13 +3146,16 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
return;
}
-static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst)
{
int i;
- for (i = 0; i < res_pool->pipe_count; i++) {
- if (res_pool->hubps[i]->inst == mpcc_inst)
- return res_pool->hubps[i];
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_res.hubp
+ && context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) {
+ return &context->res_ctx.pipe_ctx[i];
+ }
+
}
ASSERT(false);
return NULL;
@@ -3164,11 +3178,23 @@ void dcn10_wait_for_mpcc_disconnect(
for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
- struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+ struct pipe_ctx *restore_bottom_pipe;
+ struct pipe_ctx *restore_top_pipe;
+ struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst);
+ ASSERT(inst_pipe_ctx);
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
- hubp->funcs->set_blank(hubp, true);
+ /*
+ * Set top and bottom pipes NULL, as we don't want
+ * to blank those pipes when disconnecting from MPCC
+ */
+ restore_bottom_pipe = inst_pipe_ctx->bottom_pipe;
+ restore_top_pipe = inst_pipe_ctx->top_pipe;
+ inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL;
+ dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true);
+ inst_pipe_ctx->top_pipe = restore_top_pipe;
+ inst_pipe_ctx->bottom_pipe = restore_bottom_pipe;
}
}
@@ -3721,3 +3747,10 @@ void dcn10_get_clock(struct dc *dc,
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
+
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e5691e499023..89e6dfb63da0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -163,6 +163,8 @@ void dcn10_wait_for_mpcc_disconnect(
void dce110_edp_backlight_control(
struct dc_link *link,
bool enable);
+void dce110_edp_wait_for_T12(
+ struct dc_link *link);
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
@@ -202,5 +204,8 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
struct dc_state *context);
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 7f4766e45dff..e8b6065fffad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -47,7 +47,7 @@
unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
- unsigned int ret_vsnprintf;
+ int ret_vsnprintf;
unsigned int chars_printed;
va_list args;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 254300b06b43..2f1b802e66a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 81db0179f7ea..e4701825b5a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
break;
default:
// invalid source select DIG
- ASSERT(false);
result = ENGINE_ID_UNKNOWN;
}
@@ -956,6 +955,21 @@ void dcn10_link_encoder_enable_tmds_output(
}
}
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ enum signal_type signal,
+ uint32_t pixel_clock)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ dcn10_link_encoder_enable_tmds_output(
+ enc, clock_source, color_depth, signal, pixel_clock);
+
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+}
+
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d4caad670855..3e1a582e4b88 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DIG_CLOCK_PATTERN, DIG, id), \
SRI(TMDS_CTL_BITS, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
@@ -83,6 +84,7 @@ struct dcn10_link_enc_hpd_registers {
struct dcn10_link_enc_registers {
uint32_t DIG_BE_CNTL;
uint32_t DIG_BE_EN_CNTL;
+ uint32_t DIG_CLOCK_PATTERN;
uint32_t DP_CONFIG;
uint32_t DP_DPHY_CNTL;
uint32_t DP_DPHY_INTERNAL_CTRL;
@@ -168,6 +170,7 @@ struct dcn10_link_enc_registers {
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+ LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
@@ -218,6 +221,7 @@ struct dcn10_link_enc_registers {
type DIG_HPD_SELECT;\
type DIG_MODE;\
type DIG_FE_SOURCE_SELECT;\
+ type DIG_CLOCK_PATTERN;\
type DPHY_BYPASS;\
type DPHY_ATEST_SEL_LANE0;\
type DPHY_ATEST_SEL_LANE1;\
@@ -536,6 +540,13 @@ void dcn10_link_encoder_enable_tmds_output(
enum signal_type signal,
uint32_t pixel_clock);
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ enum signal_type signal,
+ uint32_t pixel_clock);
+
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 100ce0e28fd5..b096011acb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- uint32_t val = 0;
+ uint32_t val = 0xf;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f033397a84e9..6138f4887de7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -659,6 +659,16 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
+bool optc1_is_locked(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t locked;
+
+ REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
+
+ return (locked == 1);
+}
+
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -1513,6 +1523,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index b12bd9aae52f..b222c67973d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -638,6 +638,7 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
+bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..90e912fef2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cb822df21b7c..0726fb435e2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1570,8 +1575,8 @@ static void dcn20_update_dchubp_dpp(
- if (pipe_ctx->update_flags.bits.enable)
- hubp->funcs->set_blank(hubp, false);
+ if (is_pipe_tree_visible(pipe_ctx))
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
@@ -1765,6 +1770,14 @@ void dcn20_post_unlock_program_front_end(
}
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->vtp_locked) {
+ dc->hwss.set_hubp_blank(dc, pipe, true);
+ pipe->vtp_locked = false;
+ }
+ }
/* WA to apply WM setting*/
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index de9dcbeea150..51a4166e9750 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -94,6 +94,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 15c2ff264ff6..fa013496e26b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -363,7 +363,7 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
- .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e04ecf0fc0db..2c2dbfcd8957 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 11.6,
- .sr_enter_plus_exit_time_us = 13.9,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -2097,6 +2097,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
pipes[pipe_cnt].dout.dp_lanes = 4;
+ pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
@@ -2150,6 +2151,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
default:
/* In case there is no signal, set dp with 4 lanes to allow max config */
+ pipes[pipe_cnt].dout.is_virtual = 1;
pipes[pipe_cnt].dout.output_type = dm_dp;
pipes[pipe_cnt].dout.dp_lanes = 4;
}
@@ -2517,8 +2519,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
- * check in else case.
+ * Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2527,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
@@ -3244,7 +3247,7 @@ restore_dml_state:
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
- bool voltage_supported = false;
+ bool voltage_supported;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
@@ -3505,7 +3508,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
// FCLK:UCLK ratio is 1.08
- min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
+ min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080,
+ 1000000);
calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
min_dcfclk : min_fclk_required_by_uclk;
@@ -3605,7 +3609,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3613,116 +3616,6 @@ static bool init_soc_bounding_box(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- /* TODO: upstream NV12 bounding box when its launched */
- if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
- DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
- return false;
- }
-
- if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
- int i;
-
- dcn2_0_nv12_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn2_0_nv12_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn2_0_nv12_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn2_0_nv12_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn2_0_nv12_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn2_0_nv12_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn2_0_nv12_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn2_0_nv12_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn2_0_nv12_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn2_0_nv12_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn2_0_nv12_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn2_0_nv12_soc.vmm_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn2_0_nv12_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- // HACK!! Lower uclock latency switch time so we don't switch
- dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
- dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn2_0_nv12_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn2_0_nv12_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn2_0_nv12_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn2_0_nv12_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
- dcn2_0_nv12_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 96ee0b82f458..d3b643089603 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -123,7 +123,7 @@ void dcn21_optimize_pwr_state(
* PHY will hang on the next mode set attempt.
* if enable PLL follow by disable PLL (without executing lane enable/disable),
* RDPCS_PHY_DP_MPLLB_STATE remains 1,
- * which indicate that PLL disable attempt actually didn’t go through.
+ * which indicate that PLL disable attempt actually didn't go through.
* As a workaround, insert PHY lane enable/disable before PLL disable.
*/
void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
@@ -143,6 +143,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
struct dc_context *dc = abm->ctx;
uint32_t ramping_boundary = 0xFFFF;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
@@ -212,6 +213,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
if (abm && panel_cntl)
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 074e2713257f..0597391b2171 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#endif
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 1c88d2edd381..072f8c880924 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
+ .dram_clock_change_latency_us = 11.72,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
@@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -1327,8 +1329,8 @@ validate_out:
return out;
}
-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ struct dc_state *context, bool fast_validate)
{
bool out = false;
@@ -1381,6 +1383,22 @@ validate_out:
return out;
}
+
+/*
+ * Some of the functions further below use the FPU, so we need to wrap this
+ * with DC_FP_START()/DC_FP_END(). Use the same approach as for
+ * dcn20_validate_bandwidth in dcn20_resource.c.
+ */
+bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+ DC_FP_START();
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
+ DC_FP_END();
+ return voltage_supported;
+}
+
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
@@ -2030,6 +2048,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..dfd77b3cc84d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
endif
ifdef CONFIG_PPC64
@@ -41,15 +41,12 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 9da66e491116..33985401f25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -133,7 +133,6 @@ static void dpp3_power_on_gamcor_lut(
struct dpp *dpp_base,
bool power_on)
{
- uint32_t power_status;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
@@ -143,12 +142,6 @@ static void dpp3_power_on_gamcor_lut(
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
-
- REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
- if (power_status != 0)
- BREAK_TO_DEBUGGER();
-
-
}
void dpp3_program_cm_dealpha(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 5fa150f34c60..705fbfc37502 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -62,6 +62,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3deb3fb1724d..06dc1e2e8383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
dc->links[i]->link_enc);
+ if (fe == ENGINE_ID_UNKNOWN)
+ continue;
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
@@ -710,8 +712,11 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- unsigned int surface_size, refresh_hz, denom;
uint32_t tmr_delay = 0, tmr_scale = 0;
+ struct dc_cursor_attributes cursor_attr;
+ bool cursor_cache_enable = false;
+ struct dc_stream_state *stream = NULL;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -722,72 +727,150 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
/* First, check no-memory-requests case */
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->stream_status[i]
- .plane_count)
+ if (dc->current_state->stream_status[i].plane_count)
/* Fail eligibility on a visible stream */
break;
}
- if (dc->current_state->stream_count == 1 // single display only
- && dc->current_state->stream_status[0].plane_count == 1 // single surface only
- && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
- // Only 8 and 16 bit formats
- && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
- && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
- surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
- dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
- (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ?
- 8 : 4);
- } else {
- // TODO: remove hard code size
- surface_size = 128 * 1024 * 1024;
+ if (i == dc->current_state->stream_count) {
+ /* Enable no-memory-requests case */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mall.header.type = DMUB_CMD__MALL;
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
+ cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+
+ return true;
+ }
+
+ stream = dc->current_state->streams[0];
+ plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
+
+ if (stream && plane) {
+ cursor_cache_enable = stream->cursor_position.enable &&
+ plane->address.grph.cursor_cache_addr.quad_part;
+ cursor_attr = stream->cursor_attributes;
}
- // TODO: remove hard code size
- if (surface_size < 128 * 1024 * 1024) {
- refresh_hz = div_u64((unsigned long long) dc->current_state->streams[0]->timing.pix_clk_100hz *
- 100LL,
- (dc->current_state->streams[0]->timing.v_total *
- dc->current_state->streams[0]->timing.h_total));
+ /*
+ * Second, check MALL eligibility
+ *
+ * single display only, single surface only, 8 and 16 bit formats only, no VM,
+ * do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW
+ *
+ * TODO: When we implement multi-display, PSR displays will be allowed if there is
+ * a non-PSR display present, since in that case we can't do D0i3.2
+ */
+ if (dc->current_state->stream_count == 1 &&
+ stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F &&
+ plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
+ plane->address.page_table_base.quad_part == 0 &&
+ dc->hwss.does_plane_fit_in_mall &&
+ dc->hwss.does_plane_fit_in_mall(dc, plane,
+ cursor_cache_enable ? &cursor_attr : NULL)) {
+ unsigned int v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
/*
- * Delay_Us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
- * Delay_Us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
- * (Delay_Us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
- * MallFrameCacheTmrDly = ((Delay_Us / 65.28) / 2^MallFrameCacheTmrScale) - 64
- * = (1000000 / refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
- * = 1000000 / (refresh * 65.28 * 2^MallFrameCacheTmrScale) - 64
- * = (1000000 * 100) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
+ * one frame time in microsec:
+ * Delay_Us = 1000000 / refresh
+ * dynamic_delay_us = 1000000 / refresh + 2 * stutter_period
+ *
+ * one frame time modified by 'additional timer percent' (p):
+ * Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100
+ * = dynamic_delay_us * (1 + p / 100)
+ * = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh)
+ *
+ * formula for timer duration based on parameters, from regspec:
+ * dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+ *
+ * dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+ * (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
+ * MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
*
* need to round up the result of the division before the subtraction
*/
- denom = refresh_hz * 6528;
- tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+ unsigned int denom = refresh_hz * 6528;
+ unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us;
+
+ tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+ (100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+ denom) - 64LL;
/* scale should be increased until it fits into 6 bits */
while (tmr_delay & ~0x3F) {
tmr_scale++;
if (tmr_scale > 3) {
- /* The delay exceeds the range of the hystersis timer */
+ /* Delay exceeds range of hysteresis timer */
ASSERT(false);
return false;
}
denom *= 2;
- tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+ tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+ (100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+ denom) - 64LL;
+ }
+
+ /* Copy HW cursor */
+ if (cursor_cache_enable) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mall.header.type = DMUB_CMD__MALL;
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR;
+ cmd.mall.header.payload_bytes =
+ sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+ switch (cursor_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cmd.mall.cursor_bpp = 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cmd.mall.cursor_bpp = 32;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cmd.mall.cursor_bpp = 64;
+ break;
+ }
+
+ cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
+ cmd.mall.cursor_copy_dst.quad_part =
+ plane->address.grph.cursor_cache_addr.quad_part;
+ cmd.mall.cursor_width = cursor_attr.width;
+ cmd.mall.cursor_height = cursor_attr.height;
+ cmd.mall.cursor_pitch = cursor_attr.pitch;
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ /* Use copied cursor, and it's okay to not switch back */
+ cursor_attr.address.quad_part =
+ plane->address.grph.cursor_cache_addr.quad_part;
+ dc_stream_set_cursor_attributes(stream, &cursor_attr);
}
/* Enable MALL */
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
- cmd.mall.header.sub_type =
- DMUB_CMD__MALL_ACTION_ALLOW;
- cmd.mall.header.payload_bytes =
- sizeof(cmd.mall) -
- sizeof(cmd.mall.header);
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW;
+ cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
cmd.mall.tmr_delay = tmr_delay;
cmd.mall.tmr_scale = tmr_scale;
+ cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
@@ -814,6 +897,40 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+{
+ // add meta size?
+ unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
+ (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int mall_size = dc->caps.mall_size_total;
+ unsigned int cursor_size = 0;
+
+ if (dc->debug.mall_size_override)
+ mall_size = 1024 * 1024 * dc->debug.mall_size_override;
+
+ if (cursor_attr) {
+ cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
+
+ switch (cursor_attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+ }
+
+ return (surface_size + cursor_size) < mall_size;
+}
+
void dcn30_hardware_release(struct dc *dc)
{
/* if pstate unsupported, force it supported */
@@ -823,6 +940,53 @@ void dcn30_hardware_release(struct dc *dc)
dc->res_pool->hubbub, true, true);
}
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ if (blank_enable) {
+ struct plane_resource *plane_res = &pipe_ctx->plane_res;
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ /* Wait for enter vblank */
+ stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
+
+ /* Blank HUBP to allow p-state during blank on all timings */
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(plane_res->hubp->funcs->hubp_in_blank(plane_res->hubp));
+ /* Toggle HUBP_DISABLE */
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, true);
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) {
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(mpcc_pipe->plane_res.hubp->funcs->hubp_in_blank(mpcc_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, true);
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, false);
+
+ }
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(odm_pipe->plane_res.hubp->funcs->hubp_in_blank(odm_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, true);
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, false);
+ }
+ } else {
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, false);
+ }
+}
+
void dcn30_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum controller_dp_test_pattern test_pattern,
@@ -831,6 +995,25 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset)
{
- pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
- color_space, color_depth, solid_color, width, height, offset);
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ if (test_pattern != CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) {
+ pipe_ctx->vtp_locked = false;
+ /* turning on DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+
+ /* Defer hubp blank if tg is locked */
+ if (stream_res->tg->funcs->is_tg_enabled(stream_res->tg)) {
+ if (stream_res->tg->funcs->is_locked(stream_res->tg))
+ pipe_ctx->vtp_locked = true;
+ else
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, true);
+ }
+ } else {
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
+ /* turning off DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index 7d32c43aafe0..3b7d4812e311 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -65,6 +65,9 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
+
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
void dcn30_hardware_release(struct dc *dc);
@@ -77,4 +80,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset);
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 6125fe440ad0..204444fead97 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -71,6 +71,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
@@ -91,11 +92,13 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+ .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 3ba3991ee612..8980c90b2277 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -309,6 +309,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 5e126fdf6ec1..8d0f663489ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1716,125 +1716,22 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn30_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc;
struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip;
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !is_soc_bounding_box_valid(dc)) {
+ if (!is_soc_bounding_box_valid(dc)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !is_soc_bounding_box_valid(dc)) {
- int i;
-
- dcn3_0_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn3_0_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn3_0_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn3_0_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn3_0_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn3_0_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn3_0_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn3_0_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn3_0_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn3_0_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn3_0_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn3_0_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn3_0_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn3_0_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn3_0_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn3_0_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn3_0_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn3_0_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn3_0_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn3_0_soc.gpuvm_min_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn3_0_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- dcn3_0_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn3_0_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn3_0_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn3_0_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn3_0_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn3_0_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn3_0_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn3_0_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn3_0_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn3_0_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn3_0_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn3_0_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn3_0_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn3_0_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
- if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
@@ -2292,17 +2189,15 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
unsigned int min_dram_speed_mts_margin = 160;
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
-
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
- for (i = 3; i > 0; i--) {
- if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
- (min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- }
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
@@ -2437,16 +2332,28 @@ validate_out:
return out;
}
-static noinline void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
@@ -2505,7 +2412,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2631,6 +2538,10 @@ static bool dcn30_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.mall_size_per_mem_channel = 8;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+ dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..09264716d1dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -14,21 +14,18 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index bdad72140cbc..b8bf6d61005b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4825c5c1c6ed..5d4b2c60192e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1489,124 +1489,21 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn301_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc;
struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip;
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !is_soc_bounding_box_valid(dc)) {
+ if (!is_soc_bounding_box_valid(dc)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !is_soc_bounding_box_valid(dc)) {
- int i;
-
- dcn3_01_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn3_01_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn3_01_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn3_01_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn3_01_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn3_01_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn3_01_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn3_01_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn3_01_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn3_01_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn3_01_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn3_01_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn3_01_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn3_01_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn3_01_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn3_01_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn3_01_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn3_01_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn3_01_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn3_01_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn3_01_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn3_01_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn3_01_soc.gpuvm_min_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn3_01_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- dcn3_01_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn3_01_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn3_01_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn3_01_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn3_01_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn3_01_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn3_01_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn3_01_soc.num_states; i++) {
- dcn3_01_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn3_01_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn3_01_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn3_01_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn3_01_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn3_01_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn3_01_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn3_01_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn3_01_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
dcn20_patch_bounding_box(dc, loaded_bb);
- if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
@@ -1731,6 +1628,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..101620a8867a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -13,21 +13,18 @@
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 808c4dcdb3ac..4b659b63f75b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -53,6 +53,8 @@
#include "dce/dce_i2c_hw.h"
#include "dce/dce_panel_cntl.h"
#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "clk_mgr.h"
#include "hw_sequencer_private.h"
#include "reg_helper.h"
@@ -162,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 5.20,
- .sr_enter_plus_exit_time_us = 9.60,
+ .sr_exit_time_us = 12,
+ .sr_enter_plus_exit_time_us = 20,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -190,7 +192,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 350,
+ .dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
@@ -238,6 +240,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
+ .disable_psr = true,
};
enum dcn302_clk_src_array_id {
@@ -1213,6 +1216,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
dce_abm_destroy(&pool->multiple_abms[i]);
}
+ if (pool->psr != NULL)
+ dmub_psr_destroy(&pool->psr);
+
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
}
@@ -1224,6 +1230,165 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
+static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
+ dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
+ dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ unsigned int i, j;
+ unsigned int num_states = 0;
+
+ unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+ unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+ unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
+ unsigned int num_dcfclk_sta_targets = 4;
+ unsigned int num_uclk_states;
+
+
+ if (dc->ctx->dc_bios->vram_info.num_chans)
+ dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
+ dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ if (bw_params->clk_table.entries[0].memclk_mhz) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ }
+ if (!max_dcfclk_mhz)
+ max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
+ if (!max_dispclk_mhz)
+ max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
+ if (!max_dppclk_mhz)
+ max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
+ if (!max_phyclk_mhz)
+ max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
+
+ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ num_dcfclk_sta_targets++;
+ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ break;
+ }
+ }
+ /* Update size of array since we "removed" duplicates */
+ num_dcfclk_sta_targets = i + 1;
+ }
+
+ num_uclk_states = bw_params->clk_table.num_entries;
+
+ /* Calculate optimal dcfclk for each uclk */
+ for (i = 0; i < num_uclk_states; i++) {
+ dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ &optimal_dcfclk_for_uclk[i], NULL);
+ if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
+ }
+ }
+
+ /* Calculate optimal uclk for each dcfclk sta target */
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ for (j = 0; j < num_uclk_states; j++) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ break;
+ }
+ }
+ }
+
+ i = 0;
+ j = 0;
+ /* create the final dcfclk and uclk table */
+ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ } else {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ } else {
+ j = num_uclk_states;
+ }
+ }
+ }
+
+ while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ }
+
+ while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
+ dcn3_02_soc.num_states = num_states;
+ for (i = 0; i < dcn3_02_soc.num_states; i++) {
+ dcn3_02_soc.clock_limits[i].state = i;
+ dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+ dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
+ dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
+ dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+ }
+}
+
static struct resource_funcs dcn302_res_pool_funcs = {
.destroy = dcn302_destroy_resource_pool,
.link_enc_create = dcn302_link_encoder_create,
@@ -1240,7 +1405,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn30_update_bw_bounding_box,
+ .update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};
@@ -1311,7 +1476,10 @@ static bool dcn302_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
+ dc->caps.mall_size_per_mem_channel = 4;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+ dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1354,8 +1522,6 @@ static bool dcn302_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
@@ -1469,6 +1635,14 @@ static bool dcn302_resource_construct(
}
pool->timing_generator_count = i;
+ /* PSR */
+ pool->psr = dmub_psr_create(ctx);
+ if (pool->psr == NULL) {
+ dm_error("DC: failed to create psr!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABMs */
for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
index 71f7deed18e3..42d2c73e30bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
@@ -30,4 +30,6 @@
struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
#endif /* _DCN302_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 5da7677627a1..cac0b2c0d31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -30,9 +30,10 @@ struct dc_link;
struct cp_psp_stream_config {
uint8_t otg_inst;
- uint8_t link_enc_inst;
- uint8_t stream_enc_inst;
- uint8_t mst_supported;
+ uint8_t dig_be;
+ uint8_t dig_fe;
+ uint8_t assr_enabled;
+ uint8_t mst_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 45f028986a8d..0f3f510fd83b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3138,7 +3138,7 @@ static void CalculateFlipSchedule(
4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ if ((GPUVMEnable || DCCEnable)) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingRowInVBlankImmediateFlip = dml_max(
@@ -4168,10 +4168,11 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 860e72a51534..210c96cd5b03 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
- mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
- mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
- mode_lib->vba.MinTTUVBlank[k] += 25;
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
@@ -3262,6 +3263,7 @@ static void CalculateFlipSchedule(
static unsigned int TruncToValidBPP(
double DecimalBPP,
+ double DesiredBPP,
bool DSCEnabled,
enum output_encoder_class Output,
enum output_format_class Format,
@@ -3269,31 +3271,31 @@ static unsigned int TruncToValidBPP(
{
if (Output == dm_hdmi) {
if (Format == dm_420) {
- if (DecimalBPP >= 18)
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
- else if (DecimalBPP >= 15)
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
- else if (DecimalBPP >= 12)
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_444) {
- if (DecimalBPP >= 36)
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
- else if (DecimalBPP >= 30)
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
- else if (DecimalBPP >= 24)
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 18)
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
} else {
- if (DecimalBPP / 1.5 >= 24)
+ if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP / 1.5 >= 20)
+ else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
- else if (DecimalBPP / 1.5 >= 16)
+ else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
@@ -3301,53 +3303,86 @@ static unsigned int TruncToValidBPP(
} else {
if (DSCEnabled) {
if (Format == dm_420) {
- if (DecimalBPP < 6)
- return BPP_INVALID;
- else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
- return 1.5 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 6)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 6
+ || DesiredBPP < 6
+ || DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
} else if (Format == dm_n422) {
- if (DecimalBPP < 7)
- return BPP_INVALID;
- else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
- return 2 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 7)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 7
+ || DesiredBPP < 7
+ || DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
} else {
- if (DecimalBPP < 8)
- return BPP_INVALID;
- else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
- return 3 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 8)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 8
+ || DesiredBPP < 8
+ || DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
}
} else if (Format == dm_420) {
- if (DecimalBPP >= 18)
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
- else if (DecimalBPP >= 15)
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
- else if (DecimalBPP >= 12)
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_s422 || Format == dm_n422) {
- if (DecimalBPP >= 24)
+ if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 20)
+ else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
- else if (DecimalBPP >= 16)
+ else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
} else {
- if (DecimalBPP >= 36)
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
- else if (DecimalBPP >= 30)
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
- else if (DecimalBPP >= 24)
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 18)
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
@@ -4136,6 +4171,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4152,6 +4188,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4159,6 +4196,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4181,6 +4219,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4188,6 +4227,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4212,6 +4252,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4219,6 +4260,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4247,10 +4289,11 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 86ff24dffc3e..398210d1af34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -4257,10 +4257,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
@@ -5121,48 +5122,48 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
- if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+ if (!mode_lib->vba.ScaleRatioAndTapsSupport) {
status = DML_FAIL_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+ } else if (!mode_lib->vba.SourceFormatPixelAndScanSupport) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i][0] != true) {
+ } else if (!locals->ViewportSizeSupport[i][0]) {
status = DML_FAIL_VIEWPORT_SIZE;
- } else if (locals->DIOSupport[i] != true) {
+ } else if (!locals->DIOSupport[i]) {
status = DML_FAIL_DIO_SUPPORT;
- } else if (locals->NotEnoughDSCUnits[i] != false) {
+ } else if (locals->NotEnoughDSCUnits[i]) {
status = DML_FAIL_NOT_ENOUGH_DSC;
- } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+ } else if (locals->DSCCLKRequiredMoreThanSupported[i]) {
status = DML_FAIL_DSC_CLK_REQUIRED;
- } else if (locals->ROBSupport[i][0] != true) {
+ } else if (!locals->ROBSupport[i][0]) {
status = DML_FAIL_REORDERING_BUFFER;
- } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+ } else if (!locals->DISPCLK_DPPCLK_Support[i][j]) {
status = DML_FAIL_DISPCLK_DPPCLK;
- } else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+ } else if (!locals->TotalAvailablePipesSupport[i][j]) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
- } else if (mode_lib->vba.NumberOfOTGSupport != true) {
+ } else if (!mode_lib->vba.NumberOfOTGSupport) {
status = DML_FAIL_NUM_OTG;
- } else if (mode_lib->vba.WritebackModeSupport != true) {
+ } else if (!mode_lib->vba.WritebackModeSupport) {
status = DML_FAIL_WRITEBACK_MODE;
- } else if (mode_lib->vba.WritebackLatencySupport != true) {
+ } else if (!mode_lib->vba.WritebackLatencySupport) {
status = DML_FAIL_WRITEBACK_LATENCY;
- } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+ } else if (!mode_lib->vba.WritebackScaleRatioAndTapsSupport) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.CursorSupport != true) {
+ } else if (!mode_lib->vba.CursorSupport) {
status = DML_FAIL_CURSOR_SUPPORT;
- } else if (mode_lib->vba.PitchSupport != true) {
+ } else if (!mode_lib->vba.PitchSupport) {
status = DML_FAIL_PITCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+ } else if (!locals->TotalVerticalActiveBandwidthSupport[i][0]) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
- } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+ } else if (!locals->PTEBufferSizeNotExceeded[i][j]) {
status = DML_FAIL_PTE_BUFFER_SIZE;
- } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+ } else if (mode_lib->vba.NonsupportedDSCInputBPC) {
status = DML_FAIL_DSC_INPUT_BPC;
- } else if ((mode_lib->vba.HostVMEnable != false
- && locals->ImmediateFlipSupportedForState[i][j] != true)) {
+ } else if ((mode_lib->vba.HostVMEnable
+ && !locals->ImmediateFlipSupportedForState[i][j])) {
status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
- } else if (locals->PrefetchSupported[i][j] != true) {
+ } else if (!locals->PrefetchSupported[i][j]) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+ } else if (!locals->VRatioInPrefetchSupported[i][j]) {
status = DML_FAIL_V_RATIO_PREFETCH;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 319dec59bcd1..bc07082c1357 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1219,13 +1219,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -4263,7 +4263,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
- if (v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
+ if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
&& (v->OutputBppPerState[i][k] == 0
|| (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
v->DIOSupport[i] = false;
@@ -5558,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 5b5916b5bc71..0f14f205ebe5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -165,8 +165,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -191,37 +191,37 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else if (!rq_param->yuv420) {
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_c = full_swath_bytes_packed_c;
swath_bytes_l = full_swath_bytes_packed_l / 2;
} else if ((double)full_swath_bytes_packed_l / (double)full_swath_bytes_packed_c < 1.5) {
- req128_l = 0;
- req128_c = 1;
+ req128_l = false;
+ req128_c = true;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c / 2;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = full_swath_bytes_packed_l / 2;
}
} else {
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l/2;
swath_bytes_c = full_swath_bytes_packed_c;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
- req128_c = 1;
+ req128_c = true;
swath_bytes_c = full_swath_bytes_packed_c/2;
}
}
@@ -1006,8 +1006,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double min_dst_y_ttu_vblank = 0;
unsigned int dlg_vblank_start = 0;
- bool dual_plane = 0;
- bool mode_422 = 0;
+ bool dual_plane = false;
+ bool mode_422 = false;
unsigned int access_dir = 0;
unsigned int vp_height_l = 0;
unsigned int vp_width_l = 0;
@@ -1021,7 +1021,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double hratio_c = 0;
double vratio_l = 0;
double vratio_c = 0;
- bool scl_enable = 0;
+ bool scl_enable = false;
double line_time_in_us = 0;
// double vinit_l;
@@ -1156,7 +1156,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index dd0c3b1780d7..0c5128187e08 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -297,6 +297,7 @@ struct _vcs_dpi_display_output_params_st {
int num_active_wb;
int output_bpc;
int output_type;
+ int is_virtual;
int output_format;
int dsc_slices;
int max_audio_sample_rate;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index c9fbb33f05a3..bc0485a59018 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -451,6 +451,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
+ mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] =
+ dout->is_virtual;
if (!dout->dsc_enable)
mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3529fedc4c52..025aa5bd8ea0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -340,6 +340,7 @@ struct vba_vars_st {
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
enum output_encoder_class Output[DC__NUM_DPP__MAX];
+ bool skip_dio_check[DC__NUM_DPP__MAX];
unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
bool SynchronizedVBlank;
unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
index df68430aeb0c..c6e28f6bf1a2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -28,6 +28,7 @@
*/
#include "dm_services.h"
+#include "hw_factory_diag.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
index 8a74f6adb8ee..bf68eb1d9a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -26,6 +26,8 @@
#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+struct hw_factory;
+
/* Initialize HW factory function pointers and pin info */
void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
index bf9068846927..e5138a5a8eb5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "hw_translate_diag.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1ae153eab31d..7a8cec2d7a90 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -107,13 +107,12 @@ static enum gpio_result set_config(
msleep(3);
}
} else {
- uint32_t reg2;
uint32_t sda_pd_dis = 0;
uint32_t scl_pd_dis = 0;
- reg2 = REG_GET_2(gpio.MASK_reg,
- DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
- DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+ REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
if (sda_pd_dis) {
REG_SET(gpio.MASK_reg, regval,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index da73bfb3cacd..92c65d2fa7d7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -119,17 +119,3 @@ bool dal_hw_factory_init(
return false;
}
}
-
-void dal_hw_factory_destroy(
- struct dc_context *ctx,
- struct hw_factory **factory)
-{
- if (!factory || !*factory) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- kfree(*factory);
-
- *factory = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2d77eac66cb0..8efa1b80546d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -333,6 +333,7 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
+ bool vtp_locked;
};
struct resource_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index ffd37696b6b9..316301fc1e30 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -309,9 +309,9 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
{
if (cur_support != calc_support) {
- if (calc_support == true && safe_to_lower)
+ if (calc_support && safe_to_lower)
return true;
- else if (calc_support == false && !safe_to_lower)
+ else if (!calc_support && !safe_to_lower)
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 69d9fbfb4bec..cd1c0dc32bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -74,6 +74,16 @@ struct dmcu_funcs {
bool (*is_dmcu_initialized)(struct dmcu *dmcu);
bool (*lock_phy)(struct dmcu *dmcu);
bool (*unlock_phy)(struct dmcu *dmcu);
+ bool (*send_edid_cea)(struct dmcu *dmcu,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length);
+ bool (*recv_amd_vsdb)(struct dmcu *dmcu,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate);
+ bool (*recv_edid_cea_ack)(struct dmcu *dmcu, int *offset);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f7632fe25976..754832d216fd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -190,6 +190,7 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
+ bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 62804dc7b698..0586ab2ffd6a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -54,6 +54,7 @@ struct hw_sequencer_funcs {
/* Embedded Display Related */
void (*edp_power_control)(struct dc_link *link, bool enable);
void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+ void (*edp_wait_for_T12)(struct dc_link *link);
/* Pipe Programming Related */
void (*init_hw)(struct dc *dc);
@@ -217,6 +218,9 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
+ bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
+
bool (*is_abm_supported)(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
@@ -227,6 +231,10 @@ struct hw_sequencer_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset);
+
+ void (*set_hubp_blank)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index f956b3bde680..34f43cb650f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -58,6 +58,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
@@ -167,6 +179,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
@@ -241,6 +258,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.funcs = &vblank_irq_info_funcs\
}
+#define vline0_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vline0_irq_info_funcs\
+ }
+
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
@@ -349,6 +374,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
+ vline0_int_entry(0),
+ vline0_int_entry(1),
+ vline0_int_entry(2),
+ vline0_int_entry(3),
+ vline0_int_entry(4),
+ vline0_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dcn10 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 1b971265418b..0e0f494fbb5e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
@@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &vblank_irq_info_funcs\
}
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
+ }
+
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 6bf27bde8724..5f245bde54ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index d0ccd81ad5b4..87812d81fed3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -160,6 +160,7 @@ enum irq_type
IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+ IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 249a076d6f69..072b4e7e624b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
+#define DMUB_FW_VERSION_GIT_HASH 0x6444c02e7
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 47
+#define DMUB_FW_VERSION_REVISION 51
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -458,6 +458,10 @@ struct dmub_rb_cmd_mall {
uint16_t cursor_pitch;
uint16_t cursor_height;
uint8_t cursor_bpp;
+ uint8_t debug_bits;
+
+ uint8_t reserved1;
+ uint8_t reserved2;
};
struct dmub_cmd_digx_encoder_control_data {
@@ -487,13 +491,34 @@ struct dmub_rb_cmd_enable_disp_power_gating {
struct dmub_cmd_enable_disp_power_gating_data power_gating;
};
-struct dmub_cmd_dig1_transmitter_control_data {
+struct dmub_dig_transmitter_control_data_v1_7 {
+ uint8_t phyid; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t action; /**< Defined as ATOM_TRANSMITER_ACTION_xxx */
+ union {
+ uint8_t digmode; /**< enum atom_encode_mode_def */
+ uint8_t dplaneset; /**< DP voltage swing and pre-emphasis value, "DP_LANE_SET__xDB_y_zV" */
+ } mode_laneset;
+ uint8_t lanenum; /**< Number of lanes */
+ union {
+ uint32_t symclk_10khz; /**< Symbol Clock in 10Khz */
+ } symclk_units;
+ uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
+ uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
+ uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
+ uint8_t reserved0; /**< For future use */
+ uint8_t reserved1; /**< For future use */
+ uint8_t reserved2[3]; /**< For future use */
+ uint32_t reserved3[11]; /**< For future use */
+};
+
+union dmub_cmd_dig1_transmitter_control_data {
struct dig_transmitter_control_parameters_v1_6 dig;
+ struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7;
};
struct dmub_rb_cmd_dig1_transmitter_control {
struct dmub_cmd_header header;
- struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+ union dmub_cmd_dig1_transmitter_control_data transmitter_control;
};
struct dmub_rb_cmd_dpphy_init {
@@ -624,6 +649,7 @@ enum dmub_cmd_mall_type {
DMUB_CMD__MALL_ACTION_ALLOW = 0,
DMUB_CMD__MALL_ACTION_DISALLOW = 1,
DMUB_CMD__MALL_ACTION_COPY_CURSOR = 2,
+ DMUB_CMD__MALL_ACTION_NO_DF_REQ = 3,
};
struct dmub_cmd_psr_copy_settings_data {
@@ -648,6 +674,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t multi_disp_optimizations_en;
uint16_t init_sdp_deadline;
uint16_t pad2;
+ uint32_t line_time_in_us;
};
struct dmub_rb_cmd_psr_copy_settings {
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cafba1d23c6a..8e8e65fa83c0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -81,6 +81,13 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
}
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub)
+{
+ /* Cached inbox is not supported in this fw version range */
+ return !(dmub->fw_version >= DMUB_FW_VERSION(1, 0, 0) &&
+ dmub->fw_version <= DMUB_FW_VERSION(1, 10, 0));
+}
+
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
@@ -216,7 +223,7 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
/* New firmware can support CW4. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+ if (dmub_dcn20_use_cached_inbox(dmub)) {
REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
@@ -255,7 +262,7 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1)
{
/* New firmware can support CW4 for the inbox. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10))
+ if (dmub_dcn20_use_cached_inbox(dmub))
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
else
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index d438f365cbb0..a62be9c0652e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -198,4 +198,6 @@ void dmub_dcn20_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub);
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index f00df02ded81..b4bc0df2f14a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -26,6 +26,7 @@
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
+#include "dmub_dcn30.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
@@ -154,7 +155,7 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
offset = cw4->offset;
/* New firmware can support CW4. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+ if (dmub_dcn20_use_cached_inbox(dmub)) {
REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index f388d36af0b6..61f64a295f06 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -406,6 +406,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fb_offset = params->fb_offset;
dmub->psp_version = params->psp_version;
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
if (inst_fb && data_fb) {
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
@@ -427,9 +430,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
}
- if (dmub->hw_funcs.reset)
- dmub->hw_funcs.reset(dmub);
-
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
@@ -489,9 +489,6 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (dmub->hw_init == false)
- return DMUB_STATUS_OK;
-
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.c b/drivers/gpu/drm/amd/display/modules/color/color_table.c
index 692e536e7d05..410f2a82b9a2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_table.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.c
@@ -1,10 +1,26 @@
/*
- * Copyright (c) 2019 Advanced Micro Devices, Inc. (unpublished)
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
*
- * All rights reserved. This notice is intended as a precaution against
- * inadvertent publication and does not imply publication or any waiver
- * of confidentiality. The year included in the foregoing notice is the
- * year of creation of the work.
*/
#include "color_table.h"
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 6c678cfb82e3..5c22cf7e6118 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -397,7 +397,7 @@ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
- hdcp->connection.link.dp.mst_supported);
+ hdcp->connection.link.dp.mst_enabled);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 3a367a5968ae..904ce9b88088 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -106,7 +106,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
if (is_dp_hdcp(hdcp))
- dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_enabled;
dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
@@ -548,6 +548,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index eed560eecbab..d223ed3be5d3 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -101,8 +101,8 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
- uint8_t assr_supported;
- uint8_t mst_supported;
+ uint8_t assr_enabled;
+ uint8_t mst_enabled;
};
struct mod_hdcp_hdmi {
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 0fdf7a3e96de..57f198de5e2c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -409,16 +409,11 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/**
- *****************************************************************************
- * Function: mod_build_hf_vsif_infopacket
+ * mod_build_hf_vsif_infopacket - Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
*
- * @brief
- * Prepare HDMI Vendor Specific info frame.
- * Follows HDMI Spec to build up Vendor Specific info frame
- *
- * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
- * @param [out] info_packet: output structure where to store VSIF
- *****************************************************************************
+ * @stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @info_packet: output structure where to store VSIF
*/
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4fd8bce95d84..6270ecbd2438 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -266,7 +266,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+ lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries);
ASSERT(lut_index < params.backlight_lut_array_size);
table->backlight_thresholds[i] = (big_endian) ?
@@ -278,7 +278,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
}
}
-void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -452,7 +452,7 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
params, ram_table);
}
-void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -598,7 +598,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
+static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 9cb9ceb4d74d..a1ece3eecdf5 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -28,6 +28,7 @@
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5 0x00100000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
@@ -36,6 +37,7 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5 0x00000010
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9676016a37ce..43ed6291b2b8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -213,6 +213,7 @@ enum PP_FEATURE_MASK {
PP_ACG_MASK = 0x10000,
PP_STUTTER_MODE = 0x20000,
PP_AVFS_MASK = 0x40000,
+ PP_GFX_DCS_MASK = 0x80000,
};
enum DC_FEATURE_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
new file mode 100644
index 000000000000..bd129266ebfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_OFFSET_HEADER
+#define _osssys_4_2_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define mmIH_VMID_0_LUT 0x0000
+#define mmIH_VMID_0_LUT_BASE_IDX 0
+#define mmIH_VMID_1_LUT 0x0001
+#define mmIH_VMID_1_LUT_BASE_IDX 0
+#define mmIH_VMID_2_LUT 0x0002
+#define mmIH_VMID_2_LUT_BASE_IDX 0
+#define mmIH_VMID_3_LUT 0x0003
+#define mmIH_VMID_3_LUT_BASE_IDX 0
+#define mmIH_VMID_4_LUT 0x0004
+#define mmIH_VMID_4_LUT_BASE_IDX 0
+#define mmIH_VMID_5_LUT 0x0005
+#define mmIH_VMID_5_LUT_BASE_IDX 0
+#define mmIH_VMID_6_LUT 0x0006
+#define mmIH_VMID_6_LUT_BASE_IDX 0
+#define mmIH_VMID_7_LUT 0x0007
+#define mmIH_VMID_7_LUT_BASE_IDX 0
+#define mmIH_VMID_8_LUT 0x0008
+#define mmIH_VMID_8_LUT_BASE_IDX 0
+#define mmIH_VMID_9_LUT 0x0009
+#define mmIH_VMID_9_LUT_BASE_IDX 0
+#define mmIH_VMID_10_LUT 0x000a
+#define mmIH_VMID_10_LUT_BASE_IDX 0
+#define mmIH_VMID_11_LUT 0x000b
+#define mmIH_VMID_11_LUT_BASE_IDX 0
+#define mmIH_VMID_12_LUT 0x000c
+#define mmIH_VMID_12_LUT_BASE_IDX 0
+#define mmIH_VMID_13_LUT 0x000d
+#define mmIH_VMID_13_LUT_BASE_IDX 0
+#define mmIH_VMID_14_LUT 0x000e
+#define mmIH_VMID_14_LUT_BASE_IDX 0
+#define mmIH_VMID_15_LUT 0x000f
+#define mmIH_VMID_15_LUT_BASE_IDX 0
+#define mmIH_VMID_0_LUT_MM 0x0010
+#define mmIH_VMID_0_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_1_LUT_MM 0x0011
+#define mmIH_VMID_1_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_2_LUT_MM 0x0012
+#define mmIH_VMID_2_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_3_LUT_MM 0x0013
+#define mmIH_VMID_3_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_4_LUT_MM 0x0014
+#define mmIH_VMID_4_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_5_LUT_MM 0x0015
+#define mmIH_VMID_5_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_6_LUT_MM 0x0016
+#define mmIH_VMID_6_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_7_LUT_MM 0x0017
+#define mmIH_VMID_7_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_8_LUT_MM 0x0018
+#define mmIH_VMID_8_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_9_LUT_MM 0x0019
+#define mmIH_VMID_9_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_10_LUT_MM 0x001a
+#define mmIH_VMID_10_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_11_LUT_MM 0x001b
+#define mmIH_VMID_11_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_12_LUT_MM 0x001c
+#define mmIH_VMID_12_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_13_LUT_MM 0x001d
+#define mmIH_VMID_13_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_14_LUT_MM 0x001e
+#define mmIH_VMID_14_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_15_LUT_MM 0x001f
+#define mmIH_VMID_15_LUT_MM_BASE_IDX 0
+#define mmIH_COOKIE_0 0x0020
+#define mmIH_COOKIE_0_BASE_IDX 0
+#define mmIH_COOKIE_1 0x0021
+#define mmIH_COOKIE_1_BASE_IDX 0
+#define mmIH_COOKIE_2 0x0022
+#define mmIH_COOKIE_2_BASE_IDX 0
+#define mmIH_COOKIE_3 0x0023
+#define mmIH_COOKIE_3_BASE_IDX 0
+#define mmIH_COOKIE_4 0x0024
+#define mmIH_COOKIE_4_BASE_IDX 0
+#define mmIH_COOKIE_5 0x0025
+#define mmIH_COOKIE_5_BASE_IDX 0
+#define mmIH_COOKIE_6 0x0026
+#define mmIH_COOKIE_6_BASE_IDX 0
+#define mmIH_COOKIE_7 0x0027
+#define mmIH_COOKIE_7_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART0 0x003f
+#define mmIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_0 0x0040
+#define mmSEM_REQ_INPUT_0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_1 0x0041
+#define mmSEM_REQ_INPUT_1_BASE_IDX 0
+#define mmSEM_REQ_INPUT_2 0x0042
+#define mmSEM_REQ_INPUT_2_BASE_IDX 0
+#define mmSEM_REQ_INPUT_3 0x0043
+#define mmSEM_REQ_INPUT_3_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART0 0x007f
+#define mmSEM_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmIH_RB_CNTL 0x0080
+#define mmIH_RB_CNTL_BASE_IDX 0
+#define mmIH_RB_BASE 0x0081
+#define mmIH_RB_BASE_BASE_IDX 0
+#define mmIH_RB_BASE_HI 0x0082
+#define mmIH_RB_BASE_HI_BASE_IDX 0
+#define mmIH_RB_RPTR 0x0083
+#define mmIH_RB_RPTR_BASE_IDX 0
+#define mmIH_RB_WPTR 0x0084
+#define mmIH_RB_WPTR_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_HI 0x0085
+#define mmIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_LO 0x0086
+#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR 0x0087
+#define mmIH_DOORBELL_RPTR_BASE_IDX 0
+#define mmIH_RB_CNTL_RING1 0x008c
+#define mmIH_RB_CNTL_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_RING1 0x008d
+#define mmIH_RB_BASE_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING1 0x008e
+#define mmIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define mmIH_RB_RPTR_RING1 0x008f
+#define mmIH_RB_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_WPTR_RING1 0x0090
+#define mmIH_RB_WPTR_RING1_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING1 0x0093
+#define mmIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_CNTL_RING2 0x0098
+#define mmIH_RB_CNTL_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_RING2 0x0099
+#define mmIH_RB_BASE_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING2 0x009a
+#define mmIH_RB_BASE_HI_RING2_BASE_IDX 0
+#define mmIH_RB_RPTR_RING2 0x009b
+#define mmIH_RB_RPTR_RING2_BASE_IDX 0
+#define mmIH_RB_WPTR_RING2 0x009c
+#define mmIH_RB_WPTR_RING2_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING2 0x009f
+#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX 0
+#define mmIH_VERSION 0x00a5
+#define mmIH_VERSION_BASE_IDX 0
+#define mmIH_CNTL 0x00c0
+#define mmIH_CNTL_BASE_IDX 0
+#define mmIH_CNTL2 0x00c1
+#define mmIH_CNTL2_BASE_IDX 0
+#define mmIH_STATUS 0x00c2
+#define mmIH_STATUS_BASE_IDX 0
+#define mmIH_PERFMON_CNTL 0x00c3
+#define mmIH_PERFMON_CNTL_BASE_IDX 0
+#define mmIH_PERFCOUNTER0_RESULT 0x00c4
+#define mmIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmIH_PERFCOUNTER1_RESULT 0x00c5
+#define mmIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define mmIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define mmIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define mmIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_FCN_ID 0x00cc
+#define mmIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define mmIH_LIMIT_INT_RATE_CNTL 0x00cd
+#define mmIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define mmIH_VF_RB_STATUS 0x00ce
+#define mmIH_VF_RB_STATUS_BASE_IDX 0
+#define mmIH_VF_RB_STATUS2 0x00cf
+#define mmIH_VF_RB_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS 0x00d0
+#define mmIH_VF_RB1_STATUS_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS2 0x00d1
+#define mmIH_VF_RB1_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS 0x00d2
+#define mmIH_VF_RB2_STATUS_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS2 0x00d3
+#define mmIH_VF_RB2_STATUS2_BASE_IDX 0
+#define mmIH_INT_FLOOD_CNTL 0x00d5
+#define mmIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define mmIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define mmIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define mmIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB2_INT_FLOOD_STATUS 0x00d8
+#define mmIH_RB2_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_INT_FLOOD_STATUS 0x00d9
+#define mmIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_STORM_CLIENT_LIST_CNTL 0x00da
+#define mmIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define mmIH_CLK_CTRL 0x00db
+#define mmIH_CLK_CTRL_BASE_IDX 0
+#define mmIH_INT_FLAGS 0x00dc
+#define mmIH_INT_FLAGS_BASE_IDX 0
+#define mmIH_LAST_INT_INFO0 0x00dd
+#define mmIH_LAST_INT_INFO0_BASE_IDX 0
+#define mmIH_LAST_INT_INFO1 0x00de
+#define mmIH_LAST_INT_INFO1_BASE_IDX 0
+#define mmIH_LAST_INT_INFO2 0x00df
+#define mmIH_LAST_INT_INFO2_BASE_IDX 0
+#define mmIH_SCRATCH 0x00e0
+#define mmIH_SCRATCH_BASE_IDX 0
+#define mmIH_CLIENT_CREDIT_ERROR 0x00e1
+#define mmIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define mmIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define mmIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_COOKIE_REC_VIOLATION_LOG 0x00e3
+#define mmIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_CREDIT_STATUS 0x00e4
+#define mmIH_CREDIT_STATUS_BASE_IDX 0
+#define mmIH_MMHUB_ERROR 0x00e5
+#define mmIH_MMHUB_ERROR_BASE_IDX 0
+#define mmIH_MEM_POWER_CTRL 0x00e8
+#define mmIH_MEM_POWER_CTRL_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART2 0x00ff
+#define mmIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmSEM_CLK_CTRL 0x0100
+#define mmSEM_CLK_CTRL_BASE_IDX 0
+#define mmSEM_UTC_CREDIT 0x0101
+#define mmSEM_UTC_CREDIT_BASE_IDX 0
+#define mmSEM_UTC_CONFIG 0x0102
+#define mmSEM_UTC_CONFIG_BASE_IDX 0
+#define mmSEM_UTCL2_TRAN_EN_LUT 0x0103
+#define mmSEM_UTCL2_TRAN_EN_LUT_BASE_IDX 0
+#define mmSEM_MCIF_CONFIG 0x0104
+#define mmSEM_MCIF_CONFIG_BASE_IDX 0
+#define mmSEM_PERFMON_CNTL 0x0105
+#define mmSEM_PERFMON_CNTL_BASE_IDX 0
+#define mmSEM_PERFCOUNTER0_RESULT 0x0106
+#define mmSEM_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSEM_PERFCOUNTER1_RESULT 0x0107
+#define mmSEM_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSEM_STATUS 0x0108
+#define mmSEM_STATUS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0109
+#define mmSEM_MAILBOX_CLIENTCONFIG_BASE_IDX 0
+#define mmSEM_MAILBOX 0x010a
+#define mmSEM_MAILBOX_BASE_IDX 0
+#define mmSEM_MAILBOX_CONTROL 0x010b
+#define mmSEM_MAILBOX_CONTROL_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS 0x010c
+#define mmSEM_CHICKEN_BITS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA 0x010d
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_BASE_IDX 0
+#define mmSEM_GPU_IOV_VIOLATION_LOG 0x010e
+#define mmSEM_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSEM_OUTSTANDING_THRESHOLD 0x010f
+#define mmSEM_OUTSTANDING_THRESHOLD_BASE_IDX 0
+#define mmSEM_MEM_POWER_CTRL 0x0110
+#define mmSEM_MEM_POWER_CTRL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART2 0x017f
+#define mmSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmIH_ACTIVE_FCN_ID 0x0180
+#define mmIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmIH_VIRT_RESET_REQ 0x0181
+#define mmIH_VIRT_RESET_REQ_BASE_IDX 0
+#define mmIH_CLIENT_CFG 0x0184
+#define mmIH_CLIENT_CFG_BASE_IDX 0
+#define mmIH_CLIENT_CFG_INDEX 0x0188
+#define mmIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define mmIH_CLIENT_CFG_DATA 0x0189
+#define mmIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define mmIH_CID_REMAP_INDEX 0x018a
+#define mmIH_CID_REMAP_INDEX_BASE_IDX 0
+#define mmIH_CID_REMAP_DATA 0x018b
+#define mmIH_CID_REMAP_DATA_BASE_IDX 0
+#define mmIH_CHICKEN 0x018c
+#define mmIH_CHICKEN_BASE_IDX 0
+#define mmIH_MMHUB_CNTL 0x018d
+#define mmIH_MMHUB_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_CNTL 0x018e
+#define mmIH_INT_DROP_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE0 0x018f
+#define mmIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE1 0x0190
+#define mmIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK0 0x0191
+#define mmIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK1 0x0192
+#define mmIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART1 0x019f
+#define mmIH_REGISTER_LAST_PART1_BASE_IDX 0
+#define mmSEM_ACTIVE_FCN_ID 0x01a0
+#define mmSEM_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSEM_VIRT_RESET_REQ 0x01a1
+#define mmSEM_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSEM_RESP_SDMA0 0x01a4
+#define mmSEM_RESP_SDMA0_BASE_IDX 0
+#define mmSEM_RESP_SDMA1 0x01a5
+#define mmSEM_RESP_SDMA1_BASE_IDX 0
+#define mmSEM_RESP_UVD 0x01a6
+#define mmSEM_RESP_UVD_BASE_IDX 0
+#define mmSEM_RESP_VCE_0 0x01a7
+#define mmSEM_RESP_VCE_0_BASE_IDX 0
+#define mmSEM_RESP_ACP 0x01a8
+#define mmSEM_RESP_ACP_BASE_IDX 0
+#define mmSEM_RESP_ISP 0x01a9
+#define mmSEM_RESP_ISP_BASE_IDX 0
+#define mmSEM_RESP_VCE_1 0x01aa
+#define mmSEM_RESP_VCE_1_BASE_IDX 0
+#define mmSEM_RESP_VP8 0x01ab
+#define mmSEM_RESP_VP8_BASE_IDX 0
+#define mmSEM_RESP_GC 0x01ac
+#define mmSEM_RESP_GC_BASE_IDX 0
+#define mmSEM_RESP_UVD_1 0x01ad
+#define mmSEM_RESP_UVD_1_BASE_IDX 0
+#define mmSEM_CID_REMAP_INDEX 0x01b0
+#define mmSEM_CID_REMAP_INDEX_BASE_IDX 0
+#define mmSEM_CID_REMAP_DATA 0x01b1
+#define mmSEM_CID_REMAP_DATA_BASE_IDX 0
+#define mmSEM_ATOMIC_OP_LUT 0x01b2
+#define mmSEM_ATOMIC_OP_LUT_BASE_IDX 0
+#define mmSEM_EDC_CONFIG 0x01b3
+#define mmSEM_EDC_CONFIG_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS2 0x01b4
+#define mmSEM_CHICKEN_BITS2_BASE_IDX 0
+#define mmSEM_MMHUB_CNTL 0x01b5
+#define mmSEM_MMHUB_CNTL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART1 0x01bf
+#define mmSEM_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
new file mode 100644
index 000000000000..3ea83ea9ce3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
@@ -0,0 +1,1300 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_SH_MASK_HEADER
+#define _osssys_4_2_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_0
+#define SEM_REQ_INPUT_0__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_0__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_1
+#define SEM_REQ_INPUT_1__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_1__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_2
+#define SEM_REQ_INPUT_2__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_2__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_3
+#define SEM_REQ_INPUT_3__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_3__DATA_MASK 0xFFFFFFFFL
+//SEM_REGISTER_LAST_PART0
+#define SEM_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING2
+#define IH_RB_CNTL_RING2__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING2__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING2__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING2__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING2__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING2__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING2__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING2__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING2__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING2__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING2__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING2__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING2__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING2__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING2
+#define IH_RB_BASE_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING2__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING2
+#define IH_RB_BASE_HI_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING2__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING2
+#define IH_RB_RPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING2__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING2
+#define IH_RB_WPTR_RING2__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING2__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING2__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING2
+#define IH_DOORBELL_RPTR_RING2__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK 0x10000000L
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__RB2_FULL__SHIFT 0xf
+#define IH_STATUS__RB2_FULL_DRAIN__SHIFT 0x10
+#define IH_STATUS__RB2_OVERFLOW__SHIFT 0x11
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__RB2_FULL_MASK 0x00008000L
+#define IH_STATUS__RB2_FULL_DRAIN_MASK 0x00010000L
+#define IH_STATUS__RB2_OVERFLOW_MASK 0x00020000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000007FCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x07FC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_VF_RB2_STATUS
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB2_STATUS2
+#define IH_VF_RB2_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB2_INT_FLOOD_STATUS
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x00FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_CLK_CTRL
+#define SEM_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SEM_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SEM_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define SEM_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SEM_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SEM_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//SEM_UTC_CREDIT
+#define SEM_UTC_CREDIT__UTCL2_CREDIT__SHIFT 0x0
+#define SEM_UTC_CREDIT__WATERMARK__SHIFT 0x8
+#define SEM_UTC_CREDIT__UTCL2_CREDIT_MASK 0x0000001FL
+#define SEM_UTC_CREDIT__WATERMARK_MASK 0x00000F00L
+//SEM_UTC_CONFIG
+#define SEM_UTC_CONFIG__USE_MTYPE__SHIFT 0x0
+#define SEM_UTC_CONFIG__FORCE_SNOOP__SHIFT 0x3
+#define SEM_UTC_CONFIG__FORCE_GCC__SHIFT 0x4
+#define SEM_UTC_CONFIG__USE_PT_SNOOP__SHIFT 0x5
+#define SEM_UTC_CONFIG__USE_MTYPE_MASK 0x00000007L
+#define SEM_UTC_CONFIG__FORCE_SNOOP_MASK 0x00000008L
+#define SEM_UTC_CONFIG__FORCE_GCC_MASK 0x00000010L
+#define SEM_UTC_CONFIG__USE_PT_SNOOP_MASK 0x00000020L
+//SEM_UTCL2_TRAN_EN_LUT
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN__SHIFT 0x0
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN__SHIFT 0x1
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN__SHIFT 0x2
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN__SHIFT 0x3
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN__SHIFT 0x4
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN__SHIFT 0x5
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN__SHIFT 0x6
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN__SHIFT 0x7
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN__SHIFT 0x8
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED__SHIFT 0x9
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN__SHIFT 0x1f
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN_MASK 0x00000001L
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN_MASK 0x00000002L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN_MASK 0x00000004L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN_MASK 0x00000008L
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN_MASK 0x00000010L
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN_MASK 0x00000020L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN_MASK 0x00000040L
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN_MASK 0x00000080L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN_MASK 0x00000100L
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED_MASK 0x7FFFFE00L
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN_MASK 0x80000000L
+//SEM_MCIF_CONFIG
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x0
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT__SHIFT 0x2
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT__SHIFT 0x8
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT_MASK 0x000000FCL
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT_MASK 0x00003F00L
+//SEM_PERFMON_CNTL
+#define SEM_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SEM_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SEM_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SEM_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SEM_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SEM_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SEM_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SEM_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SEM_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SEM_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SEM_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SEM_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SEM_PERFCOUNTER0_RESULT
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_PERFCOUNTER1_RESULT
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_STATUS
+#define SEM_STATUS__SEM_IDLE__SHIFT 0x0
+#define SEM_STATUS__SEM_INTERNAL_IDLE__SHIFT 0x1
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL__SHIFT 0x2
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL__SHIFT 0x3
+#define SEM_STATUS__WRITE1_FIFO_FULL__SHIFT 0x4
+#define SEM_STATUS__CHECK0_FIFO_FULL__SHIFT 0x5
+#define SEM_STATUS__MC_RDREQ_PENDING__SHIFT 0x6
+#define SEM_STATUS__MC_WRREQ_PENDING__SHIFT 0x7
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING__SHIFT 0x8
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING__SHIFT 0x9
+#define SEM_STATUS__UVD_MAILBOX_PENDING__SHIFT 0xa
+#define SEM_STATUS__VCE_MAILBOX_PENDING__SHIFT 0xb
+#define SEM_STATUS__CPG1_MAILBOX_PENDING__SHIFT 0xc
+#define SEM_STATUS__CPG2_MAILBOX_PENDING__SHIFT 0xd
+#define SEM_STATUS__VCE1_MAILBOX_PENDING__SHIFT 0xe
+#define SEM_STATUS__ATC_REQ_PENDING__SHIFT 0xf
+#define SEM_STATUS__OUTSTANDING_CLEAN__SHIFT 0x10
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH__SHIFT 0x11
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH__SHIFT 0x12
+#define SEM_STATUS__INVREQ_CNT_IDLE__SHIFT 0x13
+#define SEM_STATUS__ENTRYLIST_IDLE__SHIFT 0x14
+#define SEM_STATUS__MIF_IDLE__SHIFT 0x15
+#define SEM_STATUS__REGISTER_IDLE__SHIFT 0x16
+#define SEM_STATUS__ATCL2_INVREQ_IDLE__SHIFT 0x17
+#define SEM_STATUS__UVD1_MAILBOX_PENDING__SHIFT 0x18
+#define SEM_STATUS__SWITCH_READY__SHIFT 0x1f
+#define SEM_STATUS__SEM_IDLE_MASK 0x00000001L
+#define SEM_STATUS__SEM_INTERNAL_IDLE_MASK 0x00000002L
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL_MASK 0x00000004L
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL_MASK 0x00000008L
+#define SEM_STATUS__WRITE1_FIFO_FULL_MASK 0x00000010L
+#define SEM_STATUS__CHECK0_FIFO_FULL_MASK 0x00000020L
+#define SEM_STATUS__MC_RDREQ_PENDING_MASK 0x00000040L
+#define SEM_STATUS__MC_WRREQ_PENDING_MASK 0x00000080L
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING_MASK 0x00000100L
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING_MASK 0x00000200L
+#define SEM_STATUS__UVD_MAILBOX_PENDING_MASK 0x00000400L
+#define SEM_STATUS__VCE_MAILBOX_PENDING_MASK 0x00000800L
+#define SEM_STATUS__CPG1_MAILBOX_PENDING_MASK 0x00001000L
+#define SEM_STATUS__CPG2_MAILBOX_PENDING_MASK 0x00002000L
+#define SEM_STATUS__VCE1_MAILBOX_PENDING_MASK 0x00004000L
+#define SEM_STATUS__ATC_REQ_PENDING_MASK 0x00008000L
+#define SEM_STATUS__OUTSTANDING_CLEAN_MASK 0x00010000L
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH_MASK 0x00020000L
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH_MASK 0x00040000L
+#define SEM_STATUS__INVREQ_CNT_IDLE_MASK 0x00080000L
+#define SEM_STATUS__ENTRYLIST_IDLE_MASK 0x00100000L
+#define SEM_STATUS__MIF_IDLE_MASK 0x00200000L
+#define SEM_STATUS__REGISTER_IDLE_MASK 0x00400000L
+#define SEM_STATUS__ATCL2_INVREQ_IDLE_MASK 0x00800000L
+#define SEM_STATUS__UVD1_MAILBOX_PENDING_MASK 0x01000000L
+#define SEM_STATUS__SWITCH_READY_MASK 0x80000000L
+//SEM_MAILBOX_CLIENTCONFIG
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x3
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x6
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x9
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0__SHIFT 0xc
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0xf
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0__SHIFT 0x12
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x15
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001C0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000E00L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0_MASK 0x00007000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0_MASK 0x001C0000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00E00000L
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CONTROL
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x0
+#define SEM_MAILBOX_CONTROL__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CONTROL__RESERVED_MASK 0xFFFF0000L
+//SEM_CHICKEN_BITS
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN__SHIFT 0x0
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN__SHIFT 0x1
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN__SHIFT 0x2
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR__SHIFT 0x3
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN__SHIFT 0x6
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN__SHIFT 0x7
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX__SHIFT 0x8
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX__SHIFT 0xa
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID__SHIFT 0xc
+#define SEM_CHICKEN_BITS__ATOMIC_EN__SHIFT 0xe
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK__SHIFT 0xf
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX__SHIFT 0x10
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN__SHIFT 0x12
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK__SHIFT 0x13
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN_MASK 0x00000001L
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN_MASK 0x00000002L
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN_MASK 0x00000004L
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR_MASK 0x00000018L
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN_MASK 0x00000040L
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN_MASK 0x00000080L
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX_MASK 0x00000300L
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX_MASK 0x00000C00L
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID_MASK 0x00003000L
+#define SEM_CHICKEN_BITS__ATOMIC_EN_MASK 0x00004000L
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK_MASK 0x00008000L
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX_MASK 0x00030000L
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN_MASK 0x00040000L
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK_MASK 0x00080000L
+//SEM_MAILBOX_CLIENTCONFIG_EXTRA
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0__SHIFT 0x4
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0_MASK 0x0000000FL
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0_MASK 0x000000F0L
+//SEM_GPU_IOV_VIOLATION_LOG
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define SEM_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SEM_OUTSTANDING_THRESHOLD
+#define SEM_OUTSTANDING_THRESHOLD__VALUE__SHIFT 0x0
+#define SEM_OUTSTANDING_THRESHOLD__VALUE_MASK 0x000000FFL
+//SEM_MEM_POWER_CTRL
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN__SHIFT 0x1
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN__SHIFT 0x2
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN__SHIFT 0x3
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN_MASK 0x00000002L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN_MASK 0x00000004L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN_MASK 0x00000008L
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT 0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR_MASK 0x0003FFFFL
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK 0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000700L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x00007000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+//SEM_ACTIVE_FCN_ID
+#define SEM_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SEM_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SEM_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SEM_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SEM_VIRT_RESET_REQ
+#define SEM_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SEM_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SEM_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SEM_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SEM_RESP_SDMA0
+#define SEM_RESP_SDMA0__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_SDMA1
+#define SEM_RESP_SDMA1__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD
+#define SEM_RESP_UVD__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_0
+#define SEM_RESP_VCE_0__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ACP
+#define SEM_RESP_ACP__ADDR__SHIFT 0x2
+#define SEM_RESP_ACP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ISP
+#define SEM_RESP_ISP__ADDR__SHIFT 0x2
+#define SEM_RESP_ISP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_1
+#define SEM_RESP_VCE_1__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VP8
+#define SEM_RESP_VP8__ADDR__SHIFT 0x2
+#define SEM_RESP_VP8__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_GC
+#define SEM_RESP_GC__ADDR__SHIFT 0x2
+#define SEM_RESP_GC__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD_1
+#define SEM_RESP_UVD_1__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD_1__ADDR_MASK 0x000FFFFCL
+//SEM_CID_REMAP_INDEX
+#define SEM_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define SEM_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//SEM_CID_REMAP_DATA
+#define SEM_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define SEM_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define SEM_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define SEM_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//SEM_ATOMIC_OP_LUT
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL__SHIFT 0x0
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1__SHIFT 0x7
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL__SHIFT 0xe
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0__SHIFT 0x15
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL_MASK 0x0000007FL
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1_MASK 0x00003F80L
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL_MASK 0x001FC000L
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0_MASK 0x0FE00000L
+//SEM_EDC_CONFIG
+#define SEM_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SEM_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SEM_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SEM_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SEM_CHICKEN_BITS2
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID__SHIFT 0x1
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID_MASK 0x00000002L
+//SEM_MMHUB_CNTL
+#define SEM_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SEM_MMHUB_CNTL__TLVL_VALUE__SHIFT 0x8
+#define SEM_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+#define SEM_MMHUB_CNTL__TLVL_VALUE_MASK 0x00000700L
+//SEM_REGISTER_LAST_PART1
+#define SEM_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
new file mode 100644
index 000000000000..55facadea54b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_OFFSET_HEADER
+#define _smuio_11_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: smuio_smuio_SmuSmuioDec
+// base address: 0x5a000
+#define mmCGTT_ROM_CLK_CTRL0 0x00e4
+#define mmCGTT_ROM_CLK_CTRL0_BASE_IDX 0
+#define mmROM_INDEX 0x00e5
+#define mmROM_INDEX_BASE_IDX 0
+#define mmROM_DATA 0x00e6
+#define mmROM_DATA_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
new file mode 100644
index 000000000000..7d6a2fac2839
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_SH_MASK_HEADER
+#define _smuio_11_0_6_SH_MASK_HEADER
+
+
+//CGTT_ROM_CLK_CTRL0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//ROM_INDEX
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
+//ROM_DATA
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f775aac6c1bd..a41875ac5dfb 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -103,6 +103,7 @@ enum pp_clock_type {
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
AMDGPU_PP_SENSOR_UVD_VCLK,
@@ -155,9 +156,11 @@ enum {
enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_SCLK_VDDC_TABLE,
PP_OD_EDIT_MCLK_VDDC_TABLE,
+ PP_OD_EDIT_CCLK_VDDC_TABLE,
PP_OD_EDIT_VDDC_CURVE,
PP_OD_RESTORE_DEFAULT_TABLE,
- PP_OD_COMMIT_DPM_TABLE
+ PP_OD_COMMIT_DPM_TABLE,
+ PP_OD_EDIT_VDDGFX_OFFSET
};
struct pp_states_info {
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 07633e22e99a..7dff85c81e5a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -33,7 +33,7 @@ struct IP_BASE_INSTANCE
struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ACP_BASE ={ { { { 0x02403800, 0x00480000, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 7b6ef05a1d35..5fa65f191a37 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -36,6 +36,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
+#include <asm/processor.h>
#include "hwmgr.h"
static const struct cg_flag_name clocks[] = {
@@ -730,11 +731,18 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
*
* - minimum and maximum engine clock labeled OD_SCLK
*
- * - maximum memory clock labeled OD_MCLK
+ * - minimum(not available for Vega20 and Navi1x) and maximum memory
+ * clock labeled OD_MCLK
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve.
*
+ * - voltage offset(in mV) applied on target voltage calculation.
+ * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
+ * Cavefish. For these ASICs, the target voltage calculation can be
+ * illustrated by "voltage = voltage calculated from v/f curve +
+ * overdrive vddgfx offset"
+ *
* - a list of valid ranges for sclk, mclk, and voltage curve points
* labeled OD_RANGE
*
@@ -755,6 +763,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* 600mV. "vc 2 1000 1000" will update point3 with clock set
* as 1000Mhz and voltage 1000mV.
*
+ * To update the voltage offset applied for gfxclk/voltage calculation,
+ * enter the new value by writing a string that contains "vo offset".
+ * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
+ * And the offset can be a positive or negative value.
+ *
* - When you have edited all of the states as needed, write "c" (commit)
* to the file to commit your changes
*
@@ -787,6 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (*buf == 's')
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
+ else if (*buf == 'p')
+ type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if(*buf == 'r')
@@ -795,6 +810,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_COMMIT_DPM_TABLE;
else if (!strncmp(buf, "vc", 2))
type = PP_OD_EDIT_VDDC_CURVE;
+ else if (!strncmp(buf, "vo", 2))
+ type = PP_OD_EDIT_VDDGFX_OFFSET;
else
return -EINVAL;
@@ -802,12 +819,14 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str = buf_cpy;
- if (type == PP_OD_EDIT_VDDC_CURVE)
+ if ((type == PP_OD_EDIT_VDDC_CURVE) ||
+ (type == PP_OD_EDIT_VDDGFX_OFFSET))
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ if (strlen(sub_str) == 0)
+ continue;
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -898,7 +917,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
@@ -1074,7 +1095,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
- long level;
+ unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@@ -1087,11 +1108,10 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
+ ret = kstrtoul(sub_str, 0, &level);
+ if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else
@@ -1346,6 +1366,138 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return count;
}
+static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1717,8 +1869,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ if (strlen(sub_str) == 0)
+ continue;
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -2025,6 +2178,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
@@ -2067,7 +2222,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ (is_support_sw_smu(adev) && adev->smu.is_apu) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2087,6 +2243,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (asic_type < CHIP_VEGA12)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
}
if (asic_type == CHIP_ARCTURUS) {
@@ -2897,7 +3059,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
+ int limit_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit = limit_type << 24;
ssize_t size;
int r;
@@ -2911,7 +3074,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
}
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
+ smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2931,7 +3094,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
+ int limit_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit = limit_type << 24;
ssize_t size;
int r;
@@ -2945,7 +3109,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
}
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
+ smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2960,6 +3124,15 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
return size;
}
+static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int limit_type = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
struct device_attribute *attr,
@@ -2967,6 +3140,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
@@ -2981,7 +3155,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
-
+ value |= limit_type << 24;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -3193,6 +3367,12 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
+static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
@@ -3231,6 +3411,12 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
+ &sensor_dev_attr_power1_label.dev_attr.attr,
+ &sensor_dev_attr_power2_average.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_max.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_min.dev_attr.attr,
+ &sensor_dev_attr_power2_cap.dev_attr.attr,
+ &sensor_dev_attr_power2_label.dev_attr.attr,
&sensor_dev_attr_freq1_input.dev_attr.attr,
&sensor_dev_attr_freq1_label.dev_attr.attr,
&sensor_dev_attr_freq2_input.dev_attr.attr,
@@ -3323,8 +3509,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
}
- if (((adev->flags & AMD_IS_APU) ||
- adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */
+ if (((adev->family == AMDGPU_FAMILY_SI) ||
+ ((adev->flags & AMD_IS_APU) &&
+ (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
@@ -3387,6 +3574,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
return 0;
+ /* only Vangogh has fast PPT limit and power labels */
+ if (!(adev->asic_type == CHIP_VANGOGH) &&
+ (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ return 0;
+
return effective_mode;
}
@@ -3465,6 +3662,27 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
+ struct amdgpu_device *adev) {
+ uint16_t *p_val;
+ uint32_t size;
+ int i;
+
+ if (is_support_cclk_dpm(adev)) {
+ p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+ GFP_KERNEL);
+
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
+ (void *)p_val, &size)) {
+ for (i = 0; i < adev->smu.cpu_core_num; i++)
+ seq_printf(m, "\t%u MHz (CPU%d)\n",
+ *(p_val + i), i);
+ }
+
+ kfree(p_val);
+ }
+}
+
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
@@ -3475,6 +3693,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
+
+ amdgpu_debugfs_prints_cpu_info(m, adev);
+
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 4bdbcce7092d..10b0624ade65 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -33,6 +33,8 @@
#define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
#define SMU_FW_NAME_LEN 0x24
+#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+
struct smu_hw_power_state {
unsigned int magic;
};
@@ -159,6 +161,19 @@ enum smu_power_src_type
SMU_POWER_SOURCE_COUNT,
};
+enum smu_ppt_limit_type
+{
+ SMU_DEFAULT_PPT_LIMIT = 0,
+ SMU_FAST_PPT_LIMIT,
+};
+
+enum smu_ppt_limit_level
+{
+ SMU_PPT_LIMIT_MIN = -1,
+ SMU_PPT_LIMIT_CURRENT,
+ SMU_PPT_LIMIT_MAX,
+};
+
enum smu_memory_pool_size
{
SMU_MEMORY_POOL_SIZE_ZERO = 0,
@@ -168,6 +183,17 @@ enum smu_memory_pool_size
SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
};
+struct smu_user_dpm_profile {
+ uint32_t fan_mode;
+ uint32_t power_limit;
+ uint32_t fan_speed_percent;
+ uint32_t flags;
+
+ /* user clock state information */
+ uint32_t clk_mask[SMU_CLK_COUNT];
+ uint32_t clk_dependency;
+};
+
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
do { \
tables[table_id].size = s; \
@@ -459,130 +485,672 @@ struct smu_context
struct work_struct interrupt_work;
unsigned fan_max_rpm;
- unsigned manual_fan_speed_rpm;
+ unsigned manual_fan_speed_percent;
uint32_t gfx_default_hard_min_freq;
uint32_t gfx_default_soft_max_freq;
uint32_t gfx_actual_hard_min_freq;
uint32_t gfx_actual_soft_max_freq;
+
+ /* APU only */
+ uint32_t cpu_default_soft_min_freq;
+ uint32_t cpu_default_soft_max_freq;
+ uint32_t cpu_actual_soft_min_freq;
+ uint32_t cpu_actual_soft_max_freq;
+ uint32_t cpu_core_id_select;
+ uint16_t cpu_core_num;
+
+ struct smu_user_dpm_profile user_dpm_profile;
};
struct i2c_adapter;
+/**
+ * struct pptable_funcs - Callbacks used to interact with the SMU.
+ */
struct pptable_funcs {
+ /**
+ * @run_btc: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * voltage frequency scaling (AVFS).
+ */
int (*run_btc)(struct smu_context *smu);
+
+ /**
+ * @get_allowed_feature_mask: Get allowed feature mask.
+ * &feature_mask: Array to store feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @get_current_power_state: Get the current power state.
+ *
+ * Return: Current power state on success, negative errno on failure.
+ */
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+
+ /**
+ * @set_default_dpm_table: Retrieve the default overdrive settings from
+ * the SMU.
+ */
int (*set_default_dpm_table)(struct smu_context *smu);
+
int (*set_power_state)(struct smu_context *smu);
+
+ /**
+ * @populate_umd_state_clk: Populate the UMD power state table with
+ * defaults.
+ */
int (*populate_umd_state_clk)(struct smu_context *smu);
+
+ /**
+ * @print_clk_levels: Print DPM clock levels for a clock domain
+ * to buffer. Star current level.
+ *
+ * Used for sysfs interfaces.
+ */
int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+
+ /**
+ * @force_clk_levels: Set a range of allowed DPM levels for a clock
+ * domain.
+ * &clk_type: Clock domain.
+ * &mask: Range of allowed DPM levels.
+ */
int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
+
+ /**
+ * @od_edit_dpm_table: Edit the custom overdrive DPM table.
+ * &type: Type of edit.
+ * &input: Edit parameters.
+ * &size: Size of &input.
+ */
int (*od_edit_dpm_table)(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
+
+ /**
+ * @get_clock_by_type_with_latency: Get the speed and latency of a clock
+ * domain.
+ */
int (*get_clock_by_type_with_latency)(struct smu_context *smu,
enum smu_clk_type clk_type,
struct
pp_clock_levels_with_latency
*clocks);
+ /**
+ * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
+ * domain.
+ */
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+
+ /**
+ * @get_power_profile_mode: Print all power profile modes to
+ * buffer. Star current mode.
+ */
int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_power_profile_mode: Set a power profile mode. Also used to
+ * create/set custom power profile modes.
+ * &input: Power profile mode parameters.
+ * &size: Size of &input.
+ */
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+
+ /**
+ * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
+ * management.
+ */
int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
+ * management.
+ */
int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @read_sensor: Read data from a sensor.
+ * &sensor: Sensor to read data from.
+ * &data: Sensor reading.
+ * &size: Size of &data.
+ */
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+
+ /**
+ * @pre_display_config_changed: Prepare GPU for a display configuration
+ * change.
+ *
+ * Disable display tracking and pin memory clock speed to maximum. Used
+ * in display component synchronization.
+ */
int (*pre_display_config_changed)(struct smu_context *smu);
+
+ /**
+ * @display_config_changed: Notify the SMU of the current display
+ * configuration.
+ *
+ * Allows SMU to properly track blanking periods for memory clock
+ * adjustment. Used in display component synchronization.
+ */
int (*display_config_changed)(struct smu_context *smu);
+
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+
+ /**
+ * @notify_smc_display_config: Applies display requirements to the
+ * current power state.
+ *
+ * Optimize deep sleep DCEFclk and mclk for the current display
+ * configuration. Used in display component synchronization.
+ */
int (*notify_smc_display_config)(struct smu_context *smu);
+
+ /**
+ * @is_dpm_running: Check if DPM is running.
+ *
+ * Return: True if DPM is running, false otherwise.
+ */
bool (*is_dpm_running)(struct smu_context *smu);
- int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @get_fan_speed_percent: Get the current fan speed in percent.
+ */
+ int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @set_watermarks_table: Configure and upload the watermarks tables to
+ * the SMU.
+ */
int (*set_watermarks_table)(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges);
+
+ /**
+ * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
+ */
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
+
+ /**
+ * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
+ * &clocks_in_khz: Array of DPM levels.
+ * &num_states: Elements in &clocks_in_khz.
+ */
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
+
+ /**
+ * @set_default_od_settings: Set the overdrive tables to defaults.
+ */
int (*set_default_od_settings)(struct smu_context *smu);
+
+ /**
+ * @set_performance_level: Set a performance level.
+ */
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+
+ /**
+ * @display_disable_memory_clock_switch: Enable/disable dynamic memory
+ * clock switching.
+ *
+ * Disabling this feature forces memory clock speed to maximum.
+ * Enabling sets the minimum memory clock capable of driving the
+ * current display configuration.
+ */
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+
+ /**
+ * @dump_pptable: Print the power play table to the system log.
+ */
void (*dump_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_power_limit: Get the device's power limits.
+ */
int (*get_power_limit)(struct smu_context *smu);
+
+ /**
+ * @get_ppt_limit: Get the device's ppt limits.
+ */
+ int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
+ enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
+
+ /**
+ * @set_df_cstate: Set data fabric cstate.
+ */
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+
+ /**
+ * @allow_xgmi_power_down: Enable/disable external global memory
+ * interconnect power down.
+ */
int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
+
+ /**
+ * @update_pcie_parameters: Update and upload the system's PCIe
+ * capabilites to the SMU.
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+ *
+ * The i2c bus is used internally by the SMU voltage regulators and
+ * other devices. The i2c's EEPROM also stores bad page tables on boards
+ * with ECC.
+ */
int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @i2c_fini: Tear down i2c.
+ */
void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
+ */
void (*get_unique_id)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_clock_table: Get a copy of the DPM clock table.
+ *
+ * Used by display component in bandwidth and watermark calculations.
+ */
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
+
+ /**
+ * @init_microcode: Request the SMU's firmware from the kernel.
+ */
int (*init_microcode)(struct smu_context *smu);
+
+ /**
+ * @load_microcode: Load firmware onto the SMU.
+ */
int (*load_microcode)(struct smu_context *smu);
+
+ /**
+ * @fini_microcode: Release the SMU's firmware.
+ */
void (*fini_microcode)(struct smu_context *smu);
+
+ /**
+ * @init_smc_tables: Initialize the SMU tables.
+ */
int (*init_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @fini_smc_tables: Release the SMU tables.
+ */
int (*fini_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @init_power: Initialize the power gate table context.
+ */
int (*init_power)(struct smu_context *smu);
+
+ /**
+ * @fini_power: Release the power gate table context.
+ */
int (*fini_power)(struct smu_context *smu);
+
+ /**
+ * @check_fw_status: Check the SMU's firmware status.
+ *
+ * Return: Zero if check passes, negative errno on failure.
+ */
int (*check_fw_status)(struct smu_context *smu);
+
+ /**
+ * @setup_pptable: Initialize the power play table and populate it with
+ * default values.
+ */
int (*setup_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_vbios_bootup_values: Get default boot values from the VBIOS.
+ */
int (*get_vbios_bootup_values)(struct smu_context *smu);
+
+ /**
+ * @check_fw_version: Print driver and SMU interface versions to the
+ * system log.
+ *
+ * Interface mismatch is not a critical failure.
+ */
int (*check_fw_version)(struct smu_context *smu);
+
+ /**
+ * @powergate_sdma: Power up/down system direct memory access.
+ */
int (*powergate_sdma)(struct smu_context *smu, bool gate);
+
+ /**
+ * @set_gfx_cgpg: Enable/disable graphics engine course grain power
+ * gating.
+ */
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
+
+ /**
+ * @write_pptable: Write the power play table to the SMU.
+ */
int (*write_pptable)(struct smu_context *smu);
+
+ /**
+ * @set_driver_table_location: Send the location of the driver table to
+ * the SMU.
+ */
int (*set_driver_table_location)(struct smu_context *smu);
+
+ /**
+ * @set_tool_table_location: Send the location of the tool table to the
+ * SMU.
+ */
int (*set_tool_table_location)(struct smu_context *smu);
+
+ /**
+ * @notify_memory_pool_location: Send the location of the memory pool to
+ * the SMU.
+ */
int (*notify_memory_pool_location)(struct smu_context *smu);
+
+ /**
+ * @system_features_control: Enable/disable all SMU features.
+ */
int (*system_features_control)(struct smu_context *smu, bool en);
+
+ /**
+ * @send_smc_msg_with_param: Send a message with a parameter to the SMU.
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+ /**
+ * @send_smc_msg: Send a message to the SMU.
+ * &msg: Type of message.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg)(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
+
+ /**
+ * @init_display_count: Notify the SMU of the number of display
+ * components in current display configuration.
+ */
int (*init_display_count)(struct smu_context *smu, uint32_t count);
+
+ /**
+ * @set_allowed_mask: Notify the SMU of the features currently allowed
+ * by the driver.
+ */
int (*set_allowed_mask)(struct smu_context *smu);
+
+ /**
+ * @get_enabled_mask: Get a mask of features that are currently enabled
+ * on the SMU.
+ * &feature_mask: Array representing enabled feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @feature_is_enabled: Test if a feature is enabled.
+ *
+ * Return: One if enabled, zero if disabled.
+ */
int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @disable_all_features_with_exception: Disable all features with
+ * exception to those in &mask.
+ */
int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @notify_display_change: Enable fast memory clock switching.
+ *
+ * Allows for fine grained memory clock switching but has more stringent
+ * timing requirements.
+ */
int (*notify_display_change)(struct smu_context *smu);
+
+ /**
+ * @set_power_limit: Set power limit in watts.
+ */
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+
+ /**
+ * @init_max_sustainable_clocks: Populate max sustainable clock speed
+ * table with values from the SMU.
+ */
int (*init_max_sustainable_clocks)(struct smu_context *smu);
+
+ /**
+ * @enable_thermal_alert: Enable thermal alert interrupts.
+ */
int (*enable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @disable_thermal_alert: Disable thermal alert interrupts.
+ */
int (*disable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
+ * clock speed in MHz.
+ */
int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
+
+ /**
+ * @display_clock_voltage_request: Set a hard minimum frequency
+ * for a clock domain.
+ */
int (*display_clock_voltage_request)(struct smu_context *smu, struct
pp_display_clock_request
*clock_req);
+
+ /**
+ * @get_fan_control_mode: Get the current fan control mode.
+ */
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+
+ /**
+ * @set_fan_control_mode: Set the fan control mode.
+ */
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
- int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_fan_speed_percent: Set a static fan speed in percent.
+ */
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
+ * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
+ */
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+ /**
+ * @gfx_off_control: Enable/disable graphics engine poweroff.
+ */
int (*gfx_off_control)(struct smu_context *smu, bool enable);
+
+
+ /**
+ * @get_gfx_off_status: Get graphics engine poweroff status.
+ *
+ * Return:
+ * 0 - GFXOFF(default).
+ * 1 - Transition out of GFX State.
+ * 2 - Not in GFXOFF.
+ * 3 - Transition into GFXOFF.
+ */
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
+
+ /**
+ * @register_irq_handler: Register interupt request handlers.
+ */
int (*register_irq_handler)(struct smu_context *smu);
+
+ /**
+ * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
+ */
int (*set_azalia_d3_pme)(struct smu_context *smu);
+
+ /**
+ * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
+ * clock speeds table.
+ *
+ * Provides a way for the display component (DC) to get the max
+ * sustainable clocks from the SMU.
+ */
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
+
+ /**
+ * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+ */
bool (*baco_is_support)(struct smu_context *smu);
+
+ /**
+ * @baco_get_state: Get the current BACO state.
+ *
+ * Return: Current BACO state.
+ */
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
+
+ /**
+ * @baco_set_state: Enter/exit BACO.
+ */
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
+
+ /**
+ * @baco_enter: Enter BACO.
+ */
int (*baco_enter)(struct smu_context *smu);
+
+ /**
+ * @baco_exit: Exit Baco.
+ */
int (*baco_exit)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset_is_support: Check if GPU supports mode1 reset.
+ */
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset: Perform mode1 reset.
+ *
+ * Complete GPU reset.
+ */
int (*mode1_reset)(struct smu_context *smu);
+
+ /**
+ * @mode2_reset: Perform mode2 reset.
+ *
+ * Mode2 reset generally does not reset as many IPs as mode1 reset. The
+ * IPs reset varies by asic.
+ */
int (*mode2_reset)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
+ * domain in MHz.
+ */
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+
+ /**
+ * @set_soft_freq_limited_range: Set the soft frequency range of a clock
+ * domain in MHz.
+ */
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+
+ /**
+ * @set_power_source: Notify the SMU of the current power source.
+ */
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
+
+ /**
+ * @log_thermal_throttling_event: Print a thermal throttling warning to
+ * the system's log.
+ */
void (*log_thermal_throttling_event)(struct smu_context *smu);
+
+ /**
+ * @get_pp_feature_mask: Print a human readable table of enabled
+ * features to buffer.
+ */
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_pp_feature_mask: Request the SMU enable/disable features to
+ * match those enabled in &new_mask.
+ */
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+
+ /**
+ * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
+ *
+ * Return: Size of &table
+ */
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
+
+ /**
+ * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
+ */
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+
+ /**
+ * @gfx_ulv_control: Enable/disable ultra low voltage.
+ */
int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @deep_sleep_control: Enable/disable deep sleep.
+ */
int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @get_fan_parameters: Get fan parameters.
+ *
+ * Get maximum fan speed from the power play table.
+ */
int (*get_fan_parameters)(struct smu_context *smu);
+
+ /**
+ * @post_init: Helper function for asic specific workarounds.
+ */
int (*post_init)(struct smu_context *smu);
+
+ /**
+ * @interrupt_work: Work task scheduled from SMU interrupt handler.
+ */
void (*interrupt_work)(struct smu_context *smu);
+
+ /**
+ * @gpo_control: Enable/disable graphics power optimization if supported.
+ */
int (*gpo_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @gfx_state_change_set: Send the current graphics state to the SMU.
+ */
int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
+
+ /**
+ * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
+ * parameters to defaults.
+ */
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
};
@@ -596,6 +1164,7 @@ typedef enum {
METRICS_CURR_DCLK1,
METRICS_CURR_FCLK,
METRICS_CURR_DCEFCLK,
+ METRICS_AVERAGE_CPUCLK,
METRICS_AVERAGE_GFXCLK,
METRICS_AVERAGE_SOCCLK,
METRICS_AVERAGE_FCLK,
@@ -636,6 +1205,12 @@ enum smu_cmn2asic_mapping_type {
#define FEA_MAP(fea) \
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
+#define FEA_MAP_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
+#define FEA_MAP_HALF_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
#define TAB_MAP(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
@@ -662,7 +1237,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool max_setting);
+ enum smu_ppt_limit_level limit_level);
int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
@@ -718,6 +1293,7 @@ extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 1c19eae93ff1..6e23a3f803a7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -141,7 +141,6 @@ typedef struct {
uint32_t MaxGfxClk;
uint8_t NumDfPstatesEnabled;
- uint8_t NumDpmLevelsEnabled;
uint8_t NumDcfclkLevelsEnabled;
uint8_t NumDispClkLevelsEnabled; //applies to both dispclk and dppclk
uint8_t NumSocClkLevelsEnabled;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 720d15612fe1..aa4822202587 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -133,8 +133,6 @@
__SMU_DUMMY_MAP(PowerUpSdma), \
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinVcn), \
- __SMU_DUMMY_MAP(Spare1), \
- __SMU_DUMMY_MAP(Spare2), \
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
__SMU_DUMMY_MAP(ActiveProcessNotify), \
@@ -211,6 +209,11 @@
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
__SMU_DUMMY_MAP(DisallowGpo), \
__SMU_DUMMY_MAP(Enable2ndUSB20Port), \
+ __SMU_DUMMY_MAP(RequestActiveWgp), \
+ __SMU_DUMMY_MAP(SetFastPPTLimit), \
+ __SMU_DUMMY_MAP(SetSlowPPTLimit), \
+ __SMU_DUMMY_MAP(GetFastPPTLimit), \
+ __SMU_DUMMY_MAP(GetSlowPPTLimit), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -236,10 +239,12 @@ enum smu_clk_type {
SMU_SCLK,
SMU_MCLK,
SMU_PCIE,
+ SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
+ SMU_OD_VDDGFX_OFFSET,
SMU_CLK_COUNT,
};
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 13de692a4213..d4cddd2390a2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
@@ -129,6 +129,15 @@ struct smu_11_0_power_context {
enum smu_11_0_power_state power_state;
};
+struct smu_11_5_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_11_0_power_state power_state;
+
+ uint32_t current_fast_ppt_limit;
+ uint32_t max_fast_ppt_limit;
+};
+
enum smu_v11_0_baco_seq {
BACO_SEQ_BACO = 0,
BACO_SEQ_MSR,
@@ -203,11 +212,8 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
- uint32_t speed);
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed);
+int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
+ uint32_t speed);
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate);
@@ -275,10 +281,6 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
index 55d7892e4e0e..fe130a497d6c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
@@ -104,7 +104,11 @@
#define PPSMC_MSG_DramLogSetDramBufferSize 0x46
#define PPSMC_MSG_RequestActiveWgp 0x47
#define PPSMC_MSG_QueryActiveWgp 0x48
-#define PPSMC_Message_Count 0x49
+#define PPSMC_MSG_SetFastPPTLimit 0x49
+#define PPSMC_MSG_SetSlowPPTLimit 0x4A
+#define PPSMC_MSG_GetFastPPTLimit 0x4B
+#define PPSMC_MSG_GetSlowPPTLimit 0x4C
+#define PPSMC_Message_Count 0x4D
//Argument for PPSMC_MSG_GfxDeviceDriverReset
enum {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index fa2e8cb07967..02de3b6199e5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,7 +60,5 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 6a7de8b898fa..f2cef0930aa9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -33,6 +33,7 @@
#include "ppsmc.h"
#include "amd_acpi.h"
#include "pp_psm.h"
+#include "vega10_hwmgr.h"
extern const struct pp_smumgr_func ci_smu_funcs;
extern const struct pp_smumgr_func smu8_smu_funcs;
@@ -46,7 +47,6 @@ extern const struct pp_smumgr_func vega12_smu_funcs;
extern const struct pp_smumgr_func smu10_smu_funcs;
extern const struct pp_smumgr_func vega20_smu_funcs;
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 83a6504e093c..b1038d30c8dc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -279,7 +279,7 @@ static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
* @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode
*/
int atomctrl_get_memory_pll_dividers_si(
@@ -332,7 +332,7 @@ int atomctrl_get_memory_pll_dividers_si(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
*/
int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index 741e03ad5311..f2a55c1413f5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1362,6 +1362,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
* @hwmgr: Pointer to the hardware manager.
* @entry_index: The index of the entry to be extracted from the table.
* @power_state: The address of the PowerState instance being created.
+ * @call_back_func: The function to call into to fill power state
* Return: -1 if the entry cannot be retrieved.
*/
int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..ed05a30d1139 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 82676c086ce4..c57dc9ae81f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -235,7 +235,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
/**
* smu7_enable_smc_voltage_controller - Enable voltage control
*
- * @hwmgr the address of the powerplay hardware manager.
+ * @hwmgr: the address of the powerplay hardware manager.
* Return: always PP_Result_OK
*/
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
@@ -4501,7 +4501,7 @@ static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
* smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
*
* @hwmgr: the address of the powerplay hardware manager.
- * @usMaxFanRpm: max operating fan RPM value.
+ * @us_max_fan_rpm: max operating fan RPM value.
* Return: The response that came from the SMC.
*/
static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 1b47f94e0331..29c99642d22d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -542,11 +542,11 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0.
-*/
+ * Get Leakage VDDC based on leakage ID.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0.
+ */
static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -600,9 +600,9 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
/**
* Change virtual leakage voltage to actual value.
*
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @voltage: pointer to changing voltage
+ * @leakage_table: pointer to leakage table
*/
static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
@@ -624,13 +624,13 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
+ * Patch voltage lookup table by EVV leakages.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @lookup_table: pointer to voltage lookup table
+ * @leakage_table: pointer to leakage table
+ * return: always 0
+ */
static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
struct vega10_leakage_voltage *leakage_table)
@@ -1001,13 +1001,12 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
}
/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
+ * Remove repeated voltage values and create table with unique values.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @vol_table: the pointer to changing voltage table
+ * return: 0 in success
+ */
static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_voltage_table *vol_table)
{
@@ -1151,11 +1150,11 @@ static void vega10_trim_voltage_table_to_fit_state_table(
}
/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
+ * Create Voltage Tables.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1212,11 +1211,11 @@ static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_init_dpm_state
- * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
+ * vega10_init_dpm_state
+ * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
*
- * @param dpm_state - the address of the DPM Table to initiailize.
- * @return None.
+ * @dpm_state: - the address of the DPM Table to initiailize.
+ * return: None.
*/
static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
{
@@ -1460,11 +1459,11 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_populate_ulv_state
- * @brief Function to provide parameters for Utral Low Voltage state to SMC.
+ * vega10_populate_ulv_state
+ * Function to provide parameters for Utral Low Voltage state to SMC.
*
- * @param hwmgr - the address of the hardware manager.
- * @return Always 0.
+ * @hwmgr: - the address of the hardware manager.
+ * return: Always 0.
*/
static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{
@@ -1545,13 +1544,13 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
}
/**
-* Populates single SMC GFXSCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param gfx_clock the GFX clock to use to populate the structure.
-* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
-*/
-
+ * Populates single SMC GFXSCLK structure using the provided engine clock
+ *
+ * @hwmgr: the address of the hardware manager
+ * @gfx_clock: the GFX clock to use to populate the structure.
+ * @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure.
+ * @acg_freq: ACG frequenty to return (MHz)
+ */
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
uint32_t *acg_freq)
@@ -1610,12 +1609,13 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates single SMC SOCCLK structure using the provided clock.
+ * Populates single SMC SOCCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param soc_clock - the SOC clock to use to populate the structure.
- * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @soc_clock: the SOC clock to use to populate the structure.
+ * @current_soc_did: DFS divider to pass back to caller
+ * @current_vol_index: index of current VDD to pass back to caller
+ * return: 0 on success
*/
static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
@@ -1659,10 +1659,10 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
}
/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @hwmgr: the address of the hardware manager
+ */
static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1746,12 +1746,12 @@ static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
}
}
-/**
- * @brief Populates single SMC GFXCLK structure using the provided clock.
+/*
+ * Populates single SMC GFXCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param mem_clock - the memory clock to use to populate the structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @mem_clock: the memory clock to use to populate the structure.
+ * return: 0 on success..
*/
static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
uint32_t mem_clock, uint8_t *current_mem_vid,
@@ -1808,10 +1808,10 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
*
- * @param pHwMgr - the address of the hardware manager.
- * @return PP_Result_OK on success.
+ * @hwmgr: the address of the hardware manager.
+ * return: PP_Result_OK on success.
*/
static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
@@ -2486,12 +2486,11 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -2864,11 +2863,11 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
/**
- * @brief Tell SMC to enabled the supported DPMs.
+ * Tell SMC to enabled the supported DPMs.
*
- * @param hwmgr - the address of the powerplay hardware manager.
- * @Param bitmap - bitmap for the features to enabled.
- * @return 0 on at least one DPM is successfully enabled.
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @bitmap: bitmap for the features to enabled.
+ * return: 0 on at least one DPM is successfully enabled.
*/
static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
index f752b4ad0c8a..07c06f8c90b0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
@@ -442,5 +442,6 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index dc206fa88c5e..c0753029a8e2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -718,12 +718,11 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
#endif
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index da84012b7fd5..87811b005b85 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -771,12 +771,11 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8b867a6d52b5..d143ef1b460b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -266,6 +266,119 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
+/**
+ * smu_set_user_clk_dependencies - set user profile clock dependencies
+ *
+ * @smu: smu_context pointer
+ * @clk: enum smu_clk_type type
+ *
+ * Enable/Disable the clock dependency for the @clk type.
+ */
+static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
+{
+ if (smu->adev->in_suspend)
+ return;
+
+ /*
+ * mclk, fclk and socclk are interdependent
+ * on each other
+ */
+ if (clk == SMU_MCLK) {
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set mclk dependent clocks(fclk and socclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
+ } else if (clk == SMU_FCLK) {
+ /* give priority to mclk, if mclk dependent clocks are set */
+ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ return;
+
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set fclk dependent clocks(mclk and socclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
+ } else if (clk == SMU_SOCCLK) {
+ /* give priority to mclk, if mclk dependent clocks are set */
+ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ return;
+
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set socclk dependent clocks(mclk and fclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
+ } else
+ /* add clk dependencies here, if any */
+ return;
+}
+
+/**
+ * smu_restore_dpm_user_profile - reinstate user dpm profile
+ *
+ * @smu: smu_context pointer
+ *
+ * Restore the saved user power configurations include power limit,
+ * clock frequencies, fan control mode and fan speed.
+ */
+static void smu_restore_dpm_user_profile(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (!smu->adev->in_suspend)
+ return;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return;
+
+ /* Enable restore flag */
+ smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
+
+ /* set the user dpm power limit */
+ if (smu->user_dpm_profile.power_limit) {
+ ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ }
+
+ /* set the user dpm clock configurations */
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ enum smu_clk_type clk_type;
+
+ for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
+ /*
+ * Iterate over smu clk type and force the saved user clk
+ * configs, skip if clock dependency is enabled
+ */
+ if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
+ smu->user_dpm_profile.clk_mask[clk_type]) {
+ ret = smu_force_clk_levels(smu, clk_type,
+ smu->user_dpm_profile.clk_mask[clk_type]);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
+ clk_type);
+ }
+ }
+ }
+
+ /* set the user dpm fan configurations */
+ if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
+ ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
+ return;
+ }
+
+ if (!ret && smu->user_dpm_profile.fan_speed_percent) {
+ ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
+ }
+ }
+
+ /* Disable restore flag */
+ smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
+}
+
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -288,6 +401,20 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
return false;
}
+bool is_support_cclk_dpm(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
+ return false;
+
+ return true;
+}
+
+
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -405,8 +532,6 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case CHIP_VANGOGH:
vangogh_set_ppt_funcs(smu);
- /* enable the OD by default to allow the fine grain tuning function */
- smu->od_enabled = true;
break;
default:
return -EINVAL;
@@ -478,9 +603,6 @@ static int smu_late_init(void *handle)
smu_set_fine_grain_gfx_freq_parameters(smu);
- if (adev->asic_type == CHIP_VANGOGH)
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -517,6 +639,8 @@ static int smu_late_init(void *handle)
AMD_PP_TASK_COMPLETE_INIT,
false);
+ smu_restore_dpm_user_profile(smu);
+
return 0;
}
@@ -1610,6 +1734,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
mutex_unlock(&smu->mutex);
+ /* reset user dpm clock state */
+ if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
+ smu->user_dpm_profile.clk_dependency = 0;
+ }
+
return ret;
}
@@ -1644,8 +1774,13 @@ int smu_force_clk_levels(struct smu_context *smu,
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
+ smu->user_dpm_profile.clk_mask[clk_type] = mask;
+ smu_set_user_clk_dependencies(smu, clk_type);
+ }
+ }
mutex_unlock(&smu->mutex);
@@ -1887,6 +2022,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
+ u32 percent;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1894,8 +2030,12 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm)
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+ if (smu->ppt_funcs->set_fan_speed_percent) {
+ percent = speed * 100 / smu->fan_max_rpm;
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = percent;
+ }
mutex_unlock(&smu->mutex);
@@ -1904,22 +2044,40 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool max_setting)
+ enum smu_ppt_limit_level limit_level)
{
+ uint32_t limit_type = *limit >> 24;
+ int ret = 0;
+
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
+ if (smu->ppt_funcs->get_ppt_limit)
+ ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+ } else {
+ switch (limit_level) {
+ case SMU_PPT_LIMIT_CURRENT:
+ *limit = smu->current_power_limit;
+ break;
+ case SMU_PPT_LIMIT_MAX:
+ *limit = smu->max_power_limit;
+ break;
+ default:
+ break;
+ }
+ }
mutex_unlock(&smu->mutex);
- return 0;
+ return ret;
}
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
+ uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1927,6 +2085,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
mutex_lock(&smu->mutex);
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ if (smu->ppt_funcs->set_power_limit) {
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ goto out;
+ }
+
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
@@ -1937,8 +2101,11 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
if (!limit)
limit = smu->current_power_limit;
- if (smu->ppt_funcs->set_power_limit)
+ if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.power_limit = limit;
+ }
out:
mutex_unlock(&smu->mutex);
@@ -2115,11 +2282,19 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_control_mode)
+ if (smu->ppt_funcs->set_fan_control_mode) {
ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_mode = value;
+ }
mutex_unlock(&smu->mutex);
+ /* reset user dpm fan speed */
+ if (!ret && value != AMD_FAN_CTRL_MANUAL &&
+ smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = 0;
+
return ret;
}
@@ -2127,17 +2302,15 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
uint32_t percent;
- uint32_t current_rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_rpm) {
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, &current_rpm);
+ if (smu->ppt_funcs->get_fan_speed_percent) {
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
if (!ret) {
- percent = current_rpm * 100 / smu->fan_max_rpm;
*speed = percent > 100 ? 100 : percent;
}
}
@@ -2151,18 +2324,18 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
- uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm) {
+ if (smu->ppt_funcs->set_fan_speed_percent) {
if (speed > 100)
speed = 100;
- rpm = speed * smu->fan_max_rpm / 100;
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = speed;
}
mutex_unlock(&smu->mutex);
@@ -2173,14 +2346,17 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
+ u32 percent;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_rpm)
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+ if (smu->ppt_funcs->get_fan_speed_percent) {
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
+ *speed = percent * smu->fan_max_rpm / 100;
+ }
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index cd7b411457ff..45564a776e9b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1080,15 +1080,27 @@ static int arcturus_read_sensor(struct smu_context *smu,
return ret;
}
-static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int arcturus_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return arcturus_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = arcturus_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int arcturus_get_fan_parameters(struct smu_context *smu)
@@ -2227,7 +2239,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2264,6 +2276,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
arcturus_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2281,7 +2295,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.print_clk_levels = arcturus_print_clk_levels,
.force_clk_levels = arcturus_force_clk_levels,
.read_sensor = arcturus_read_sensor,
- .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
+ .get_fan_speed_percent = arcturus_get_fan_speed_percent,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
.set_performance_level = arcturus_set_performance_level,
@@ -2326,7 +2340,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 51e83123f72a..6e641f1513d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1317,15 +1317,27 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int navi10_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int navi10_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return navi10_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = navi10_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -1673,7 +1685,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+ ret = navi10_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
@@ -2302,7 +2314,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2342,6 +2354,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
smu_v11_0_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2413,7 +2427,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_config_changed = navi10_display_config_changed,
.notify_smc_display_config = navi10_notify_smc_display_config,
.is_dpm_running = navi10_is_dpm_running,
- .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
+ .get_fan_speed_percent = navi10_get_fan_speed_percent,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
.set_watermarks_table = navi10_set_watermarks_table,
@@ -2456,7 +2470,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9608745d732f..af73e1430af5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -261,6 +261,11 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_GPO_BIT);
}
+ if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
+ (adev->asic_type > CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU))
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
+
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
@@ -294,6 +299,12 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
+ if (amdgpu_aspm == 1)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+
return 0;
}
@@ -314,6 +325,12 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
+ /*
+ * Instead of having its own buffer space and get overdrive_table copied,
+ * smu->od_settings just points to the actual overdrive_table
+ */
+ smu->od_settings = &powerplay_table->overdrive_table;
+
return 0;
}
@@ -907,6 +924,22 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
+static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t *min, uint32_t *max)
+{
+ if (min)
+ *min = od_table->min[setting];
+ if (max)
+ *max = od_table->max[setting];
+}
+
static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -915,11 +948,16 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
uint32_t gen_speed, lane_width;
+ uint32_t min_value, max_value;
+ uint32_t smu_version;
switch (clk_type) {
case SMU_GFXCLK:
@@ -995,6 +1033,70 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+ break;
+
+ case SMU_OD_VDDGFX_OFFSET:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900))
+ break;
+
+ size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
+ size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1146,15 +1248,27 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return sienna_cichlid_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
@@ -1694,6 +1808,243 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_dump_od_table(struct smu_context *smu,
+ OverDriveTable_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+
+ dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin,
+ od_table->GfxclkFmax);
+ dev_dbg(smu->adev->dev, "OD: Uclk: (%d, %d)\n", od_table->UclkFmin,
+ od_table->UclkFmax);
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)))
+ dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
+}
+
+static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ OverDriveTable_t *boot_od_table =
+ (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, false);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+ return ret;
+ }
+
+ memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ return 0;
+}
+
+static int sienna_cichlid_od_setting_check_range(struct smu_context *smu,
+ struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n",
+ setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n",
+ setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_7_overdrive_table *od_settings =
+ (struct smu_11_0_7_overdrive_table *)smu->od_settings;
+ struct amdgpu_device *adev = smu->adev;
+ enum SMU_11_0_7_ODSETTING_ID freq_setting;
+ uint16_t *freq_ptr;
+ int i, ret = 0;
+ uint32_t smu_version;
+
+ if (!smu->od_enabled) {
+ dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ dev_err(smu->adev->dev, "OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings,
+ SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1], od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1], od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "UCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->UclkFmax) {
+ dev_info(smu->adev->dev, "UclkFmin (%ld) must be <= UclkFmax (%u)!\n",
+ input[i + 1], od_table->UclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMIN;
+ freq_ptr = &od_table->UclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->UclkFmin) {
+ dev_info(smu->adev->dev, "UclkFmax (%ld) must be >= UclkFmin (%u)!\n",
+ input[i + 1], od_table->UclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMAX;
+ freq_ptr = &od_table->UclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ memcpy(table_context->overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTable_t));
+ fallthrough;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, true);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
+ return ret;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDGFX_OFFSET:
+ if (size != 1) {
+ dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)) {
+ dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
+ "only by 58.41.0 and onwards SMU firmwares!\n");
+ return -EOPNOTSUPP;
+ }
+
+ od_table->VddGfxOffset = (int16_t)input[0];
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
@@ -2372,7 +2723,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
@@ -2610,7 +2961,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
@@ -2653,6 +3004,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
smu_v11_0_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2759,7 +3112,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_config_changed = sienna_cichlid_display_config_changed,
.notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
.is_dpm_running = sienna_cichlid_is_dpm_running,
- .get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
+ .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
.get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
.set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
.set_watermarks_table = sienna_cichlid_set_watermarks_table,
@@ -2802,7 +3155,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
@@ -2817,6 +3170,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b279dbbbce6b..90585461a56e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -474,12 +474,14 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
+ size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
- smu_power->power_context = kzalloc(sizeof(struct smu_11_0_power_context),
- GFP_KERNEL);
+ smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
- smu_power->power_context_size = sizeof(struct smu_11_0_power_context);
+ smu_power->power_context_size = size;
return 0;
}
@@ -1119,6 +1121,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -1136,10 +1139,10 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
- if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
- return AMD_FAN_CTRL_MANUAL;
- else
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return AMD_FAN_CTRL_AUTO;
+ else
+ return smu->user_dpm_profile.fan_mode;
}
static int
@@ -1174,6 +1177,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
}
int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_auto_fan_control(smu, 0))
+ return -EINVAL;
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1181,7 +1213,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
@@ -1201,58 +1233,6 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
- uint32_t speed)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
- uint32_t tach_period, crystal_clock_freq;
-
- if (!speed)
- return -EINVAL;
-
- ret = smu_v11_0_auto_fan_control(smu, 0);
- if (ret)
- return ret;
-
- /*
- * crystal_clock_freq div by 4 is required since the fan control
- * module refers to 25MHz
- */
-
- crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
- tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
- WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
- REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
- CG_TACH_CTRL, TARGET_PERIOD,
- tach_period));
-
- ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-
- return ret;
-}
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t tach_period, crystal_clock_freq;
- uint64_t tmp64;
-
- tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
- CG_TACH_CTRL, TARGET_PERIOD);
- if (!tach_period)
- return -EINVAL;
-
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
-
- tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
- do_div(tmp64, (tach_period * 8));
- *speed = (uint32_t)tmp64;
-
- return 0;
-}
-
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
@@ -2043,30 +2023,6 @@ int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
return link_speed[speed_level];
}
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v1_0);
- gpu_metrics->common_header.format_revision = 1;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v2_0);
- gpu_metrics->common_header.format_revision = 2;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
bool enablement)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 8cb4fcee9a2c..093b01159408 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -31,6 +31,10 @@
#include "smu_v11_5_ppsmc.h"
#include "smu_v11_5_pmfw.h"
#include "smu_cmn.h"
+#include "soc15_common.h"
+#include "asic_reg/gc/gc_10_3_0_offset.h"
+#include "asic_reg/gc/gc_10_3_0_sh_mask.h"
+#include <asm/processor.h>
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -59,7 +63,8 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 0),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
@@ -76,7 +81,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0),
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
- MSG_MAP(Spare1, PPSMC_MSG_spare1, 0),
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0),
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0),
@@ -88,7 +92,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0),
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0),
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0),
- MSG_MAP(Spare2, PPSMC_MSG_spare2, 0),
MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0),
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
@@ -118,6 +121,11 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
+ MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
+ MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
+ MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
+ MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
+ MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -162,6 +170,9 @@ static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(A55_DPM),
FEA_MAP(CVIP_DSP_DPM),
FEA_MAP(MSMU_LOW_POWER),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP_HALF_REVERSE(GFX),
};
static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
@@ -171,6 +182,14 @@ static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(DPMCLOCKS),
};
+static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -242,6 +261,12 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_SOCCLK:
*value = metrics->SocclkFrequency;
break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->DclkFrequency;
+ break;
case METRICS_AVERAGE_UCLK:
*value = metrics->MemclkFrequency;
break;
@@ -252,7 +277,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
@@ -271,6 +297,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
default:
*value = UINT_MAX;
break;
@@ -307,6 +337,13 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
+#ifdef CONFIG_X86
+ /* AMD x86 APU only */
+ smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+#else
+ smu->cpu_core_num = 4;
+#endif
+
return smu_v11_0_init_smc_tables(smu);
}
@@ -316,17 +353,13 @@ static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
@@ -337,54 +370,18 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
}
-static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT)
- | FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)
- | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
- | FEATURE_MASK(FEATURE_PPT_BIT)
- | FEATURE_MASK(FEATURE_TDC_BIT)
- | FEATURE_MASK(FEATURE_FAN_CONTROLLER_BIT)
- | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT);
-
- if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT);
-
- if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
-
- return 0;
-}
-
static bool vangogh_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -402,14 +399,68 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].vclk;
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].dclk;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].memclk;
+
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].fclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vangogh_print_fine_grain_clk(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int size = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
@@ -417,12 +468,71 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
}
break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
case SMU_OD_RANGE:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
break;
default:
break;
@@ -431,6 +541,726 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
return size;
}
+static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *vclk_mask,
+ uint32_t *dclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *fclk_mask,
+ uint32_t *soc_mask)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (fclk_mask)
+ *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (soc_mask)
+ *soc_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ }
+
+ return 0;
+}
+
+static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, feature_id))
+ return false;
+
+ return true;
+}
+
+static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t soc_mask;
+ uint32_t vclk_mask;
+ uint32_t dclk_mask;
+ uint32_t mclk_mask;
+ uint32_t fclk_mask;
+ uint32_t clock_limit;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+ if (max) {
+ ret = vangogh_get_profiling_clk_mask(smu,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+ if (min) {
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+failed:
+ return ret;
+}
+
+static int vangogh_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on vangogh.
+ */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+ if (workload_type < 0) {
+ dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+ profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+ 1 << workload_type,
+ NULL);
+ if (ret) {
+ dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+ workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min << 16, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max << 16, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_DCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0 ;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_SOCCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_MCLK,
+ SMU_FCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_unforce_dpm_levels(struct smu_context *smu)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
+ {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+
+ if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t socclk_freq = 0, fclk_freq = 0;
+ uint32_t vclk_freq = 0, dclk_freq = 0;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int vangogh_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t soc_mask, mclk_mask, fclk_mask;
+ uint32_t vclk_mask = 0, dclk_mask = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
+ vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ NULL,
+ NULL,
+ &mclk_mask,
+ &fclk_mask,
+ NULL);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_peak_clock_by_device(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
static int vangogh_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
@@ -492,6 +1322,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_CPU_CLK:
+ ret = vangogh_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_CPUCLK,
+ (uint32_t *)data);
+ *size = smu->cpu_core_num * sizeof(uint16_t);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -513,7 +1349,7 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
if (clock_ranges) {
if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
@@ -574,7 +1410,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v2_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -590,14 +1426,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@@ -607,22 +1446,55 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_0);
}
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
- long input[], uint32_t size)
+ long input[], uint32_t size)
{
int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->od_enabled) {
- dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+ dev_warn(smu->adev->dev,
+ "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
return -EINVAL;
}
switch (type) {
+ case PP_OD_EDIT_CCLK_VDDC_TABLE:
+ if (size != 3) {
+ dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
+ return -EINVAL;
+ }
+ if (input[0] >= smu->cpu_core_num) {
+ dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
+ smu->cpu_core_num);
+ }
+ smu->cpu_core_id_select = input[0];
+ if (input[1] == 0) {
+ if (input[2] < smu->cpu_default_soft_min_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_min_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_min_freq = input[2];
+ } else if (input[1] == 1) {
+ if (input[2] > smu->cpu_default_soft_max_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_max_freq = input[2];
+ } else {
+ return -EINVAL;
+ }
+ break;
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n");
@@ -631,14 +1503,16 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
if (input[0] == 0) {
if (input[1] < smu->gfx_default_hard_min_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq);
return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} else if (input[0] == 1) {
if (input[1] > smu->gfx_default_soft_max_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq);
return -EINVAL;
}
@@ -654,6 +1528,8 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq, NULL);
@@ -668,6 +1544,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ for (i = 0; i < smu->cpu_core_num; i++) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ (i << 20) | smu->cpu_actual_soft_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ (i << 20) | smu->cpu_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
+ }
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -676,8 +1575,10 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
return -EINVAL;
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
- dev_err(smu->adev->dev, "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
- smu->gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq);
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
return -EINVAL;
}
@@ -694,6 +1595,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Set soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_min_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_max_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
}
break;
default:
@@ -719,18 +1643,245 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
smu->gfx_actual_hard_min_freq = 0;
smu->gfx_actual_soft_max_freq = 0;
+ smu->cpu_default_soft_min_freq = 1400;
+ smu->cpu_default_soft_max_freq = 3500;
+ smu->cpu_actual_soft_min_freq = 0;
+ smu->cpu_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
+static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i];
+ clock_table->SocClocks[i].Vol = table->SocVoltage[i];
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
+ clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
+ clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
return 0;
}
+
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
if (adev->pm.fw_version >= 0x43f1700)
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
- en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
- else
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+ en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
+
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
+ if (!en)
+ return ret;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return 0;
+}
+
+static int vangogh_post_smu_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t tmp;
+ int ret = 0;
+ uint8_t aon_bits = 0;
+ /* Two CUs in one WGP */
+ uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
+ uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* allow message will be sent after enable message on Vangogh*/
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to Enable GfxOff!\n");
+ return ret;
+ }
+ } else {
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
+ }
+
+ /* if all CUs are active, no need to power off any WGPs */
+ if (total_cu == adev->gfx.cu_info.number)
return 0;
+
+ /*
+ * Calculate the total bits number of always on WGPs for all SA/SEs in
+ * RLC_PG_ALWAYS_ON_WGP_MASK.
+ */
+ tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
+ tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
+
+ aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* Do not request any WGPs less than set in the AON_WGP_MASK */
+ if (aon_bits > req_active_wgps) {
+ dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
+ return 0;
+ } else {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
+ }
+}
+
+static int vangogh_mode_reset(struct smu_context *smu, int type)
+{
+ int ret = 0, index = 0;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+ if (index < 0)
+ return index == -EACCES ? 0 : index;
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
+
+ mutex_unlock(&smu->message_lock);
+
+ mdelay(10);
+
+ return ret;
+}
+
+static int vangogh_mode2_reset(struct smu_context *smu)
+{
+ return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
+}
+
+static int vangogh_get_power_limit(struct smu_context *smu)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+ uint32_t ppt_limit;
+ int ret = 0;
+
+ if (smu->adev->pm.fw_version < 0x43f1e00)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
+ return ret;
+ }
+ /* convert from milliwatt to watt */
+ smu->current_power_limit = ppt_limit / 1000;
+ smu->max_power_limit = 29;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+ return ret;
+ }
+ /* convert from milliwatt to watt */
+ power_context->current_fast_ppt_limit = ppt_limit / 1000;
+ power_context->max_fast_ppt_limit = 30;
+
+ return ret;
+}
+
+static int vangogh_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+
+ if (!power_context)
+ return -EOPNOTSUPP;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = power_context->max_fast_ppt_limit;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ *ppt_limit = power_context->current_fast_ppt_limit;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+ uint32_t limit_type = ppt_limit >> 24;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (limit_type) {
+ case SMU_DEFAULT_PPT_LIMIT:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSlowPPTLimit,
+ ppt_limit * 1000, /* convert from watt to milliwatt */
+ NULL);
+ if (ret)
+ return ret;
+
+ smu->current_power_limit = ppt_limit;
+ break;
+ case SMU_FAST_PPT_LIMIT:
+ ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
+ if (ppt_limit > power_context->max_fast_ppt_limit) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is over the max allowed %d\n",
+ ppt_limit, power_context->max_fast_ppt_limit);
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFastPPTLimit,
+ ppt_limit * 1000, /* convert from watt to milliwatt */
+ NULL);
+ if (ret)
+ return ret;
+
+ power_context->current_fast_ppt_limit = ppt_limit;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static const struct pptable_funcs vangogh_ppt_funcs = {
@@ -742,7 +1893,6 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.register_irq_handler = smu_v11_0_register_irq_handler,
- .get_allowed_feature_mask = vangogh_get_allowed_feature_mask,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
@@ -761,6 +1911,18 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_profile_mode = vangogh_set_power_profile_mode,
+ .get_power_profile_mode = vangogh_get_power_profile_mode,
+ .get_dpm_clock_table = vangogh_get_dpm_clock_table,
+ .force_clk_levels = vangogh_force_clk_levels,
+ .set_performance_level = vangogh_set_performance_level,
+ .post_init = vangogh_post_smu_init,
+ .mode2_reset = vangogh_mode2_reset,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .get_ppt_limit = vangogh_get_ppt_limit,
+ .get_power_limit = vangogh_get_power_limit,
+ .set_power_limit = vangogh_set_power_limit,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
@@ -769,5 +1931,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->message_map = vangogh_message_map;
smu->feature_map = vangogh_feature_mask_map;
smu->table_map = vangogh_table_map;
+ smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index eab455493076..c56d4583dc72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -28,9 +28,29 @@
extern void vangogh_set_ppt_funcs(struct smu_context *smu);
/* UMD PState Vangogh Msg Parameters in MHz */
-#define VANGOGH_UMD_PSTATE_GFXCLK 700
-#define VANGOGH_UMD_PSTATE_SOCCLK 678
-#define VANGOGH_UMD_PSTATE_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_STANDARD_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_STANDARD_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_VCLK 705
+#define VANGOGH_UMD_PSTATE_STANDARD_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_PEAK_GFXCLK 1300
+#define VANGOGH_UMD_PSTATE_PEAK_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_PEAK_FCLK 800
+#define VANGOGH_UMD_PSTATE_PEAK_VCLK 705
+#define VANGOGH_UMD_PSTATE_PEAK_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_GFXCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_FCLK 800
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_DCLK 800
+
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_FCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_DCLK 800
/* RLC Power Status */
#define RLC_STATUS_OFF 0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..5faa509f0dba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -56,8 +56,6 @@ static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma, 1),
MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq, 1),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
- MSG_MAP(Spare1, PPSMC_MSG_spare1, 1),
- MSG_MAP(Spare2, PPSMC_MSG_spare2, 1),
MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch, 1),
MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq, 1),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
@@ -188,6 +186,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
@@ -343,12 +342,142 @@ failed:
return ret;
}
+static int renoir_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+ dev_warn(smu->adev->dev,
+ "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+ return ret;
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ uint32_t min = 0, max = 0;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+
+ smu->gfx_default_hard_min_freq = min;
+ smu->gfx_default_soft_max_freq = max;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -358,6 +487,30 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
switch (clk_type) {
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+ }
+ break;
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sprintf(buf + size, "OD_SCLK\n");
+ size += sprintf(buf + size, "0:%10uMhz\n", min);
+ size += sprintf(buf + size, "1:%10uMhz\n", max);
+ }
+ break;
case SMU_GFXCLK:
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
@@ -398,23 +551,35 @@ static int renoir_print_clk_levels(struct smu_context *smu,
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
default:
- return -EINVAL;
+ break;
}
- for (i = 0; i < count; i++) {
- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
- if (ret)
- return ret;
- if (!value)
- continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
- cur_value == value ? "*" : "");
- if (cur_value == value)
- cur_value_match_level = true;
- }
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_DCEFCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
- if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+
+ break;
+ default:
+ break;
+ }
return size;
}
@@ -666,6 +831,10 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
}
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -724,15 +893,27 @@ static int renoir_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, true);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, false);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_unforce_dpm_levels(smu);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
RENOIR_UMD_PSTATE_GFXCLK,
@@ -785,6 +966,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_get_profiling_clk_mask(smu, level,
&sclk_mask,
&mclk_mask,
@@ -796,6 +980,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_set_peak_clock_by_device(smu);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -943,7 +1130,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower << 8;
+ *value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature / 100) *
@@ -1071,7 +1258,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -1112,6 +1299,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->fan_pwm = metrics.FanPwm;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_0);
@@ -1120,7 +1309,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+ return 0;
}
static const struct pptable_funcs renoir_ppt_funcs = {
@@ -1159,6 +1348,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = renoir_get_gpu_metrics,
.gfx_state_change_set = renoir_gfx_state_change_set,
+ .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
+ .od_edit_dpm_table = renoir_od_edit_dpm_table,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..6cc4855c8a37 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
@@ -277,15 +278,3 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
-
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v2_0);
- gpu_metrics->common_header.format_revision = 2;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f8260769061c..bcedd4d92e35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -68,14 +68,6 @@ static const char *smu_get_message_name(struct smu_context *smu,
return __smu_message_names[type];
}
-static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-}
-
static void smu_cmn_read_arg(struct smu_context *smu,
uint32_t *arg)
{
@@ -92,7 +84,7 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
+ return cur_value;
udelay(1);
}
@@ -101,7 +93,29 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
if (i == timeout)
return -ETIME;
- return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ ret = smu_cmn_wait_for_response(smu);
+ if (ret != 0x1) {
+ dev_err(adev->dev, "Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ if (ret != -ETIME)
+ ret = -EIO;
+ return ret;
+ }
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
}
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
@@ -122,29 +136,28 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- ret = smu_cmn_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
+ if (ret)
goto out;
- }
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_cmn_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
+ if (ret != 0x1) {
+ if (ret == -ETIME) {
+ dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
+ smu_get_message_name(smu, msg), index, param);
+ } else {
+ dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param,
+ ret);
+ ret = -EIO;
+ }
goto out;
}
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
+ ret = 0; /* 0 as driver return value */
out:
mutex_unlock(&smu->message_lock);
return ret;
@@ -269,11 +282,13 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
int feature_id;
int ret = 0;
- if (smu->is_apu)
+ if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
return 1;
+
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
@@ -731,3 +746,31 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
return ret;
}
+
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
+{
+ struct metrics_table_header *header = (struct metrics_table_header *)table;
+ uint16_t structure_size;
+
+#define METRICS_VERSION(a, b) ((a << 16) | b )
+
+ switch (METRICS_VERSION(frev, crev)) {
+ case METRICS_VERSION(1, 0):
+ structure_size = sizeof(struct gpu_metrics_v1_0);
+ break;
+ case METRICS_VERSION(2, 0):
+ structure_size = sizeof(struct gpu_metrics_v2_0);
+ break;
+ default:
+ return;
+ }
+
+#undef METRICS_VERSION
+
+ memset(header, 0xFF, structure_size);
+
+ header->format_revision = frev;
+ header->content_revision = crev;
+ header->structure_size = structure_size;
+
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 01e825d83d8d..c69250185575 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,8 @@
#include "amdgpu_smu.h"
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param);
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
@@ -95,5 +97,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache);
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
+
#endif
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 042d7b54a6de..895cdd991af6 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -162,15 +162,10 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
.atomic_update = arc_pgu_plane_atomic_update,
};
-static void arc_pgu_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs arc_pgu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = arc_pgu_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -213,7 +208,7 @@ int arc_pgu_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
&arc_pgu_crtc_funcs, NULL);
if (ret) {
- arc_pgu_plane_destroy(primary);
+ drm_plane_cleanup(primary);
return ret;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f164818ec477..077d006b1fbf 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -145,7 +145,7 @@ static void arcpgu_debugfs_init(struct drm_minor *minor)
}
#endif
-static struct drm_driver arcpgu_drm_driver = {
+static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4b485eb512e2..59172acb9738 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -550,7 +550,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs komeda_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 108e7a31bd26..494075ddbef6 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -510,7 +510,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3ebcf5a52c8b..b7bb90ae787f 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -820,7 +820,6 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 742d43a7edf4..fac1ee79c372 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -39,7 +39,6 @@ static void ast_cursor_fini(struct ast_private *ast)
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -53,14 +52,13 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr)
}
/*
- * Allocate cursor BOs and pins them at the end of VRAM.
+ * Allocate cursor BOs and pin them at the end of VRAM.
*/
int ast_cursor_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
int ret;
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
@@ -77,15 +75,7 @@ int ast_cursor_init(struct ast_private *ast)
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret) {
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- goto err_drm_gem_vram_put;
- }
-
ast->cursor.gbo[i] = gbo;
- ast->cursor.map[i] = map;
}
return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
@@ -94,7 +84,6 @@ err_drm_gem_vram_put:
while (i) {
--i;
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -168,38 +157,37 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
- int ret;
- void *src;
+ struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
+ struct dma_buf_map src_map, dst_map;
void __iomem *dst;
+ void *src;
+ int ret;
if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
return -EINVAL;
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
-
- ret = drm_gem_vram_pin(gbo, 0);
+ ret = drm_gem_vram_vmap(src_gbo, &src_map);
if (ret)
return ret;
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret)
- goto err_drm_gem_vram_unpin;
- src = map.vaddr; /* TODO: Use mapping abstraction properly */
+ src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
+ ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);
- drm_gem_vram_vunmap(gbo, &map);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(dst_gbo, &dst_map);
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return 0;
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return ret;
}
@@ -251,17 +239,26 @@ static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y)
{
+ struct drm_device *dev = &ast->base;
+ struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct dma_buf_map map;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
u8 jreg;
+ int ret;
- dst = ast->cursor.map[ast->cursor.next_index].vaddr;
+ ret = drm_gem_vram_vmap(gbo, &map);
+ if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
+ return;
+ dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
+ drm_gem_vram_vunmap(gbo, &map);
+
if (x < 0) {
x_offset = (-x) + offset_x;
x = 0;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 667b450606ef..ea8164e7a6dc 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -147,7 +147,7 @@ static int ast_drm_freeze(struct drm_device *dev)
error = drm_mode_config_helper_suspend(dev);
if (error)
return error;
- pci_save_state(dev->pdev);
+ pci_save_state(to_pci_dev(dev->dev));
return 0;
}
@@ -162,7 +162,7 @@ static int ast_drm_resume(struct drm_device *dev)
{
int ret;
- if (pci_enable_device(dev->pdev))
+ if (pci_enable_device(to_pci_dev(dev->dev)))
return -EIO;
ret = ast_drm_thaw(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ccaff81924ee..f871fc36c2f7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,7 +28,6 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <linux/dma-buf-map.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
@@ -133,7 +132,6 @@ struct ast_private {
struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
- struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 1b13199858cb..0ac3c2039c4b 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,8 +67,9 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
/* Defaults */
@@ -85,7 +86,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
}
/* Not all families have a P2A bridge */
- if (dev->pdev->device != PCI_CHIP_AST2000)
+ if (pdev->device != PCI_CHIP_AST2000)
return;
/*
@@ -119,6 +120,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
/*
@@ -143,19 +145,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
- if (dev->pdev->revision >= 0x50) {
+ if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
- } else if (dev->pdev->revision >= 0x40) {
+ } else if (pdev->revision >= 0x40) {
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
- } else if (dev->pdev->revision >= 0x30) {
+ } else if (pdev->revision >= 0x30) {
ast->chip = AST2400;
drm_info(dev, "AST 2400 detected\n");
- } else if (dev->pdev->revision >= 0x20) {
+ } else if (pdev->revision >= 0x20) {
ast->chip = AST2300;
drm_info(dev, "AST 2300 detected\n");
- } else if (dev->pdev->revision >= 0x10) {
+ } else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
@@ -265,7 +267,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -409,10 +411,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
return ast;
dev = &ast->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
- ast->regs = pci_iomap(dev->pdev, 1, 0);
+ ast->regs = pci_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
@@ -421,14 +422,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
*/
- if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
- ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ ast->ioregs = pci_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 8392ebde504b..7592f1b9e1f1 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -77,31 +77,32 @@ static u32 ast_get_vram_size(struct ast_private *ast)
static void ast_mm_release(struct drm_device *dev, void *ptr)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
}
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 vram_size;
int ret;
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0),
- vram_size);
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9db371f4054f..988b270fea5e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -903,7 +903,6 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
static const struct drm_crtc_funcs ast_crtc_funcs = {
.reset = ast_crtc_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -1107,6 +1106,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = ast_cursor_init(ast);
@@ -1122,7 +1122,7 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ dev->mode_config.fb_base = pci_resource_start(pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@@ -1259,7 +1259,7 @@ static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 8902c2f84bf9..0607658dde51 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -71,6 +71,7 @@ static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -80,7 +81,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
- if (dev->pdev->revision >= 0x20)
+ if (pdev->revision >= 0x20)
ext_reg_info = extreginfo_ast2300;
else
ext_reg_info = extreginfo_ast2300a0;
@@ -366,11 +367,12 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
- pci_read_config_dword(dev->pdev, 0x04, &reg);
+ pci_read_config_dword(pdev, 0x04, &reg);
reg |= 0x3;
- pci_write_config_dword(dev->pdev, 0x04, reg);
+ pci_write_config_dword(pdev, 0x04, reg);
ast_enable_vga(dev);
ast_open_key(ast);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index c58fa00b4848..05ad75d155e8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -473,7 +473,6 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index fd454225fd19..b469624fe40d 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -121,7 +121,6 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = bochs_load(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dce4672e3fc8..2d7380a9890e 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -110,7 +110,7 @@ int bochs_hw_load_edid(struct bochs_device *bochs)
int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
@@ -201,7 +201,7 @@ void bochs_hw_fini(struct drm_device *dev)
release_region(VBE_DISPI_IOPORT_INDEX, 2);
if (bochs->fb_map)
iounmap(bochs->fb_map);
- pci_release_regions(dev->pdev);
+ pci_release_regions(to_pci_dev(dev->dev));
kfree(bochs->edid);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a0d392c338da..76555ae64e9c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1292,8 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
i2c_unregister_device(adv7511->i2c_packet);
err_i2c_unregister_edid:
@@ -1311,8 +1310,7 @@ static int adv7511_remove(struct i2c_client *i2c)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 4d278573cdb9..05eb759da6fc 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -20,6 +21,8 @@ struct display_connector {
struct gpio_desc *hpd_gpio;
int hpd_irq;
+
+ struct regulator *dp_pwr;
};
static inline struct display_connector *
@@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev)
of_property_read_string(pdev->dev.of_node, "label", &label);
/*
- * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide
* edge interrupts, register an interrupt handler.
*/
if (type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA) {
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
if (IS_ERR(conn->hpd_gpio)) {
@@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* Get the DP PWR for DP connector. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int ret;
+
+ conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
+
+ if (IS_ERR(conn->dp_pwr)) {
+ ret = PTR_ERR(conn->dp_pwr);
+
+ switch (ret) {
+ case -ENODEV:
+ conn->dp_pwr = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (conn->dp_pwr) {
+ ret = regulator_enable(conn->dp_pwr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->dp_pwr)
+ regulator_disable(conn->dp_pwr);
+
drm_bridge_remove(&conn->bridge);
if (!IS_ERR(conn->bridge.ddc))
@@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = {
}, {
.compatible = "vga-connector",
.data = (void *)DRM_MODE_CONNECTOR_VGA,
+ }, {
+ .compatible = "dp-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DisplayPort,
},
{},
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 0c98d27f84ac..fee27952ec6d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
@@ -36,6 +37,7 @@ struct lt9611uxc {
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
@@ -52,6 +54,8 @@ struct lt9611uxc {
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
@@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(&lt9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(&lt9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(&lt9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
@@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
@@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
@@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
@@ -926,6 +956,8 @@ retry:
lt9611uxc->fw_version = ret;
init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
@@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0c79a9ba48bb..dda4fa9a1a08 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3440,8 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
err_iahb:
clk_disable_unprepare(hdmi->iahb_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -3465,8 +3464,7 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 86b06975bfdd..e21078b2f8b5 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -202,7 +202,7 @@ static int thc63_probe(struct platform_device *pdev)
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
- thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+ thc63->vcc = devm_regulator_get(thc63->dev, "vcc");
if (IS_ERR(thc63->vcc)) {
if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 4c7ad46fdd21..5311d03d49cc 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -45,13 +45,9 @@
#include "drm_legacy.h"
-/**
+/*
* Get AGP information.
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
@@ -92,7 +88,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
@@ -103,11 +99,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
*/
int drm_agp_acquire(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(dev->pdev);
+ dev->agp->bridge = agp_backend_acquire(pdev);
if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
@@ -115,13 +113,9 @@ int drm_agp_acquire(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_agp_acquire);
-/**
+/*
* Acquire the AGP device (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
@@ -133,7 +127,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
-/**
+/*
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
@@ -157,7 +151,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
return drm_agp_release(dev);
}
-/**
+/*
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
@@ -187,13 +181,9 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
return drm_agp_enable(dev, *mode);
}
-/**
+/*
* Allocate AGP memory.
*
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
@@ -242,7 +232,7 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
return drm_agp_alloc(dev, request);
}
-/**
+/*
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
@@ -263,13 +253,9 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
return NULL;
}
-/**
+/*
* Unbind AGP memory from the GATT (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
@@ -285,7 +271,7 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
entry = drm_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
+ ret = agp_unbind_memory(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
@@ -301,13 +287,9 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
return drm_agp_unbind(dev, request);
}
-/**
+/*
* Bind AGP memory into the GATT (ioctl)
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
@@ -326,7 +308,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = drm_bind_agp(entry->memory, page);
+ retcode = agp_bind_memory(entry->memory, page);
if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -345,13 +327,9 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
return drm_agp_bind(dev, request);
}
-/**
+/*
* Free AGP memory (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
@@ -369,11 +347,11 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
if (!entry)
return -EINVAL;
if (entry->bound)
- drm_unbind_agp(entry->memory);
+ agp_unbind_memory(entry->memory);
list_del(&entry->head);
- drm_free_agp(entry->memory, entry->pages);
+ agp_free_memory(entry->memory);
kfree(entry);
return 0;
}
@@ -388,7 +366,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
return drm_agp_free(dev, request);
}
-/**
+/*
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
@@ -402,14 +380,15 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return NULL;
- head->bridge = agp_find_bridge(dev->pdev);
+ head->bridge = agp_find_bridge(pdev);
if (!head->bridge) {
- head->bridge = agp_backend_acquire(dev->pdev);
+ head->bridge = agp_backend_acquire(pdev);
if (!head->bridge) {
kfree(head);
return NULL;
@@ -453,8 +432,8 @@ void drm_legacy_agp_clear(struct drm_device *dev)
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
+ agp_unbind_memory(entry->memory);
+ agp_free_memory(entry->memory);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ba1507036f26..560aaecba31b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2040,6 +2040,9 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* should always call this function from their
* &drm_mode_config_funcs.atomic_commit hook.
*
+ * Drivers that need to extend the commit setup to private objects can use the
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
* To be able to use this support drivers need to use a few more helper
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
@@ -2083,8 +2086,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
+ const struct drm_mode_config_helper_funcs *funcs;
int i, ret;
+ funcs = state->dev->mode_config.helper_private;
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
@@ -2169,6 +2175,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_plane_state->commit = drm_crtc_commit_get(commit);
}
+ if (funcs && funcs->atomic_commit_setup)
+ return funcs->atomic_commit_setup(state);
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
@@ -3021,7 +3030,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
@@ -3500,76 +3509,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
- * @crtc: CRTC object
- * @red: red correction table
- * @green: green correction table
- * @blue: green correction table
- * @size: size of the tables
- * @ctx: lock acquire context
- *
- * Implements support for legacy gamma correction table for drivers
- * that support color management through the DEGAMMA_LUT/GAMMA_LUT
- * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
- * how the atomic color management and gamma tables work.
- */
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_property_blob *blob = NULL;
- struct drm_color_lut *blob_data;
- int i, ret = 0;
- bool replaced;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- blob = drm_property_create_blob(dev,
- sizeof(struct drm_color_lut) * size,
- NULL);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- blob = NULL;
- goto fail;
- }
-
- /* Prepare GAMMA_LUT with the legacy values. */
- blob_data = blob->data;
- for (i = 0; i < size; i++) {
- blob_data[i].red = red[i];
- blob_data[i].green = green[i];
- blob_data[i].blue = blue[i];
- }
-
- state->acquire_ctx = ctx;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- /* Reset DEGAMMA_LUT and CTM properties. */
- replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
- crtc_state->color_mgmt_changed |= replaced;
-
- ret = drm_atomic_commit(state);
-
-fail:
- drm_atomic_state_put(state);
- drm_property_blob_put(blob);
- return ret;
-}
-EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
* the input end of a bridge
* @bridge: bridge control structure
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5c2141e9a9f4..26e2f2ffd255 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -185,12 +185,6 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
- * IN_FORMATS:
- * Blob property which contains the set of buffer format and modifier
- * pairs supported by this plane. The blob is a drm_format_modifier_blob
- * struct. Without this property the plane doesn't support buffers with
- * modifiers. Userspace cannot change this property.
- *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index aeb1327e3077..e3d77dfefb0a 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
GFP_KERNEL);
@@ -556,7 +556,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0fe3c496002a..79a50ef1250f 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -30,6 +30,8 @@
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/mem_encrypt.h>
+#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -176,3 +178,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
+
+bool drm_need_swiotlb(int dma_bits)
+{
+ struct resource *tmp;
+ resource_size_t max_iomem = 0;
+
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
+ for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
+ max_iomem = max(max_iomem, tmp->end);
+
+ return max_iomem > ((u64)1 << dma_bits);
+}
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index b7e9e1c2564c..ced09c7c06f9 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -7,6 +7,7 @@
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*/
+#include "drm/drm_modeset_lock.h"
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -1181,9 +1182,11 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_device *dev = client->dev;
struct drm_connector *connector;
struct drm_mode_set *modeset;
+ struct drm_modeset_acquire_ctx ctx;
int j;
+ int ret;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
if (!modeset->crtc->enabled)
continue;
@@ -1195,7 +1198,7 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
dev->mode_config.dpms_property, dpms_mode);
}
}
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
/**
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3bcabc2f6e0e..bb14f488c8f6 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -89,9 +90,8 @@
* modes) appropriately.
*
* There is also support for a legacy gamma table, which is set up by calling
- * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
- * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
- * "GAMMA_LUT" property above.
+ * drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma
+ * ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT".
*
* Support for different non RGB color encodings is controlled through
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
@@ -156,9 +156,6 @@ EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
- *
- * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
- * legacy &drm_crtc_funcs.gamma_set callback.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
@@ -232,6 +229,116 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
/**
+ * drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table
+ * @crtc: CRTC object
+ *
+ * Returns true/false if the given crtc supports setting the legacy gamma
+ * correction table.
+ */
+static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc)
+{
+ u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id;
+
+ if (!crtc->gamma_size)
+ return false;
+
+ if (crtc->funcs->gamma_set)
+ return true;
+
+ return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) ||
+ drm_mode_obj_find_prop_id(&crtc->base, degamma_id));
+}
+
+/**
+ * drm_crtc_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @size: size of the tables
+ * @ctx: lock acquire context
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that have set drm_crtc_funcs.gamma_set or that support color management
+ * through the DEGAMMA_LUT/GAMMA_LUT properties. See
+ * drm_crtc_enable_color_mgmt() and the containing chapter for
+ * how the atomic color management and gamma tables work.
+ *
+ * This function sets the gamma using drm_crtc_funcs.gamma_set if set, or
+ * alternatively using crtc color management properties.
+ */
+static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ u32 size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 gamma_id = dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = dev->mode_config.degamma_lut_property->base.id;
+ bool use_gamma_lut;
+ int i, ret = 0;
+ bool replaced;
+
+ if (crtc->funcs->gamma_set)
+ return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx);
+
+ if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id))
+ use_gamma_lut = true;
+ else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id))
+ use_gamma_lut = false;
+ else
+ return -ENODEV;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = ctx;
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+ use_gamma_lut ? NULL : blob);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+ use_gamma_lut ? blob : NULL);
+ crtc_state->color_mgmt_changed |= replaced;
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ drm_property_blob_put(blob);
+ return ret;
+}
+
+/**
* drm_mode_gamma_set_ioctl - set the gamma table
* @dev: DRM device
* @data: ioctl data
@@ -262,7 +369,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- if (crtc->funcs->gamma_set == NULL)
+ if (!drm_crtc_supports_legacy_gamma(crtc))
return -ENOSYS;
/* memcpy into gamma store */
@@ -290,8 +397,8 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
- crtc->gamma_size, &ctx);
+ ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base,
+ crtc->gamma_size, &ctx);
out:
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 74090fc3aa55..9c4f9947b194 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
@@ -67,7 +68,7 @@
* &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
- * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check.
+ * &drm_mode_config_funcs.atomic_check.
*/
/**
@@ -240,30 +241,12 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Nearest Neighbor scaling filter
*/
-/**
- * drm_crtc_init_with_planes - Initialise a new CRTC object with
- * specified primary and cursor planes.
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @primary: Primary plane for CRTC
- * @cursor: Cursor plane for CRTC
- * @funcs: callbacks for the new CRTC
- * @name: printf style format string for the CRTC name, or NULL for default name
- *
- * Inits a new object created as base part of a driver crtc object. Drivers
- * should use this function instead of drm_crtc_init(), which is only provided
- * for backwards compatibility with drivers which do not yet support universal
- * planes). For really simple hardware which has only 1 plane look at
- * drm_simple_display_pipe_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor,
- const struct drm_crtc_funcs *funcs,
- const char *name, ...)
+__printf(6, 0)
+static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -291,11 +274,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return ret;
if (name) {
- va_list ap;
-
- va_start(ap, name);
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
@@ -339,8 +318,101 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return 0;
}
+
+/**
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ * The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree()
+ * the crtc structure. The crtc structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Note: consider using drmm_crtc_alloc_with_planes() instead of
+ * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
+static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
+ void *ptr)
+{
+ struct drm_crtc *crtc = ptr;
+
+ drm_crtc_cleanup(crtc);
+}
+
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_crtc *crtc;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
+ crtc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
+
/**
* drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 25ce42e79995..61e09f8a8d0f 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_fb_helper.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
-#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-#endif
-
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 5bd0934004e3..eedbb48815b7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -950,6 +950,38 @@ bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE]
EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
/**
+ * drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port
+ * RGB->YCbCr conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @color_spc: Colorspace for which conversion cap is sought
+ *
+ * Returns: whether the downstream facing port can convert RGB->YCbCr for a given
+ * colorspace.
+ */
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ u8 color_spc)
+{
+ if (!drm_dp_is_branch(dpcd))
+ return false;
+
+ if (dpcd[DP_DPCD_REV] < 0x13)
+ return false;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return false;
+
+ return port_cap[3] & color_spc;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion);
+
+/**
* drm_dp_downstream_mode() - return a mode for downstream facing port
* @dev: DRM device
* @dpcd: DisplayPort configuration data
@@ -1204,7 +1236,7 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
- !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT);
+ !drm_dp_has_quirk(desc, DP_DPCD_QUIRK_NO_SINK_COUNT);
}
EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
@@ -1925,87 +1957,6 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
-struct edid_quirk {
- u8 mfg_id[2];
- u8 prod_id[2];
- u32 quirks;
-};
-
-#define MFG(first, second) { (first), (second) }
-#define PROD_ID(first, second) { (first), (second) }
-
-/*
- * Some devices have unreliable OUIDs where they don't set the device ID
- * correctly, and as a result we need to use the EDID for finding additional
- * DP quirks in such cases.
- */
-static const struct edid_quirk edid_quirk_list[] = {
- /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
- * only supports DPCD backlight controls
- */
- { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- /*
- * Some Dell CML 2020 systems have panels support both AUX and PWM
- * backlight control, and some only support AUX backlight control. All
- * said panels start up in AUX mode by default, and we don't have any
- * support for disabling HDR mode on these panels which would be
- * required to switch to PWM backlight control mode (plus, I'm not
- * even sure we want PWM backlight controls over DPCD backlight
- * controls anyway...). Until we have a better way of detecting these,
- * force DPCD backlight mode on all of them.
- */
- { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-};
-
-#undef MFG
-#undef PROD_ID
-
-/**
- * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
- * DP-specific quirks
- * @edid: The EDID to check
- *
- * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
- * of manufacturers don't seem to like following standards and neglect to fill
- * the dev-ID in, making it impossible to only use OUIDs for determining
- * quirks in some cases. This function can be used to check the EDID and look
- * up any additional DP quirks. The bits returned by this function correspond
- * to the quirk bits in &drm_dp_quirk.
- *
- * Returns: a bitmask of quirks, if any. The driver can check this using
- * drm_dp_has_quirk().
- */
-u32 drm_dp_get_edid_quirks(const struct edid *edid)
-{
- const struct edid_quirk *quirk;
- u32 quirks = 0;
- int i;
-
- if (!edid)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
- if (memcmp(quirk->mfg_id, edid->mfg_id,
- sizeof(edid->mfg_id)) == 0 &&
- memcmp(quirk->prod_id, edid->prod_code,
- sizeof(edid->prod_code)) == 0)
- quirks |= quirk->quirks;
- }
-
- DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
- (int)sizeof(edid->mfg_id), edid->mfg_id,
- (int)sizeof(edid->prod_code), edid->prod_code, quirks);
-
- return quirks;
-}
-EXPORT_SYMBOL(drm_dp_get_edid_quirks);
-
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2596,3 +2547,538 @@ void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+
+/**
+ * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns maximum frl bandwidth supported by PCON in GBPS,
+ * returns 0 if not supported.
+ */
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int bw;
+ u8 buf;
+
+ buf = port_cap[2];
+ bw = buf & DP_PCON_MAX_FRL_BW;
+
+ switch (bw) {
+ case DP_PCON_MAX_9GBPS:
+ return 9;
+ case DP_PCON_MAX_18GBPS:
+ return 18;
+ case DP_PCON_MAX_24GBPS:
+ return 24;
+ case DP_PCON_MAX_32GBPS:
+ return 32;
+ case DP_PCON_MAX_40GBPS:
+ return 40;
+ case DP_PCON_MAX_48GBPS:
+ return 48;
+ case DP_PCON_MAX_0GBPS:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
+
+/**
+ * drm_dp_pcon_frl_prepare() - Prepare PCON for FRL.
+ * @aux: DisplayPort AUX channel
+ * @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
+{
+ int ret;
+ u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
+ DP_PCON_ENABLE_LINK_FRL_MODE;
+
+ if (enable_frl_ready_hpd)
+ buf |= DP_PCON_ENABLE_HPD_READY;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
+
+/**
+ * drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if success, else returns false.
+ */
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ if (buf & DP_PCON_FRL_READY)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready);
+
+/**
+ * drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1
+ * @aux: DisplayPort AUX channel
+ * @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink
+ * @concurrent_mode: true if concurrent mode or operation is required,
+ * false otherwise.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ bool concurrent_mode)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (concurrent_mode)
+ buf |= DP_PCON_ENABLE_CONCURRENT_LINK;
+ else
+ buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK;
+
+ switch (max_frl_gbps) {
+ case 9:
+ buf |= DP_PCON_ENABLE_MAX_BW_9GBPS;
+ break;
+ case 18:
+ buf |= DP_PCON_ENABLE_MAX_BW_18GBPS;
+ break;
+ case 24:
+ buf |= DP_PCON_ENABLE_MAX_BW_24GBPS;
+ break;
+ case 32:
+ buf |= DP_PCON_ENABLE_MAX_BW_32GBPS;
+ break;
+ case 40:
+ buf |= DP_PCON_ENABLE_MAX_BW_40GBPS;
+ break;
+ case 48:
+ buf |= DP_PCON_ENABLE_MAX_BW_48GBPS;
+ break;
+ case 0:
+ buf |= DP_PCON_ENABLE_MAX_BW_0GBPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
+
+/**
+ * drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2
+ * @aux: DisplayPort AUX channel
+ * @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink
+ * @extended_train_mode : true for Extended Mode, false for Normal Mode.
+ * In Normal mode, the PCON tries each frl bw from the max_frl_mask starting
+ * from min, and stops when link training is successful. In Extended mode, all
+ * frl bw selected in the mask are trained by the PCON.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ bool extended_train_mode)
+{
+ int ret;
+ u8 buf = max_frl_mask;
+
+ if (extended_train_mode)
+ buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
+
+/**
+ * drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
+
+/**
+ * drm_dp_pcon_frl_enable() - Enable HDMI link through FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf = 0;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+ if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
+ DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n");
+ return -EINVAL;
+ }
+ buf |= DP_PCON_ENABLE_HDMI_LINK;
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
+
+/**
+ * drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if link is active else returns false.
+ */
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ return buf & DP_PCON_HDMI_TX_LINK_ACTIVE;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active);
+
+/**
+ * drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE
+ * @aux: DisplayPort AUX channel
+ * @frl_trained_mask: pointer to store bitmask of the trained bw configuration.
+ * Valid only if the MODE returned is FRL. For Normal Link training mode
+ * only 1 of the bits will be set, but in case of Extended mode, more than
+ * one bits can be set.
+ *
+ * Returns the link mode : TMDS or FRL on success, else returns negative error
+ * code.
+ */
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
+{
+ u8 buf;
+ int mode;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
+ mode = buf & DP_PCON_HDMI_LINK_MODE;
+
+ if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode)
+ *frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1;
+
+ return mode;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode);
+
+/**
+ * drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane
+ * during link failure between PCON and HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @connector: DRM connector
+ * code.
+ **/
+
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+ u8 buf, error_count;
+ int i, num_error;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ for (i = 0; i < hdmi->max_lanes; i++) {
+ if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ return;
+
+ error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
+ switch (error_count) {
+ case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS:
+ num_error = 100;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS:
+ num_error = 10;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS:
+ num_error = 3;
+ break;
+ default:
+ num_error = 0;
+ }
+
+ DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i);
+ }
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
+
+/*
+ * drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns true is PCON encoder is DSC 1.2 else returns false.
+ */
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+ u8 major_v, minor_v;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER];
+ major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT;
+ minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT;
+
+ if (major_v == 1 && minor_v == 2)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2);
+
+/*
+ * drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum no. of slices supported by the PCON DSC Encoder.
+ */
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 slice_cap1, slice_cap2;
+
+ slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER];
+ slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER];
+
+ if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC)
+ return 24;
+ if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC)
+ return 20;
+ if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC)
+ return 16;
+ if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC)
+ return 12;
+ if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC)
+ return 10;
+ if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC)
+ return 8;
+ if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC)
+ return 6;
+ if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC)
+ return 4;
+ if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC)
+ return 2;
+ if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices);
+
+/*
+ * drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum width of the slices in pixel width i.e. no. of pixels x 320.
+ */
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER];
+
+ return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width);
+
+/*
+ * drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns the bpp precision supported by the PCON encoder.
+ */
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER];
+
+ switch (buf & DP_PCON_DSC_BPP_INCR_MASK) {
+ case DP_PCON_DSC_ONE_16TH_BPP:
+ return 16;
+ case DP_PCON_DSC_ONE_8TH_BPP:
+ return 8;
+ case DP_PCON_DSC_ONE_4TH_BPP:
+ return 4;
+ case DP_PCON_DSC_ONE_HALF_BPP:
+ return 2;
+ case DP_PCON_DSC_ONE_BPP:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr);
+
+static
+int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf |= DP_PCON_ENABLE_DSC_ENCODER;
+
+ if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) {
+ buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK;
+ buf |= pps_buf_config << 2;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters
+ * for DSC1.2 between PCON & HDMI2.1 sink
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_default);
+
+/**
+ * drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for
+ * HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
+
+/*
+ * drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder
+ * override registers
+ * @aux: DisplayPort AUX channel
+ * @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height,
+ * bits_per_pixel.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
+
+/*
+ * drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr
+ * @aux: displayPort AUX channel
+ * @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK)
+ buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK);
+ else
+ buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0401b2f47500..309afe61afdd 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2302,7 +2302,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+ drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2751,7 +2752,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_dp_mst_topology_put_mstb(mstb);
mutex_unlock(&mgr->probe_lock);
- if (ret)
+ if (ret > 0)
drm_kms_helper_hotplug_event(dev);
}
@@ -3629,14 +3630,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3705,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
@@ -4212,6 +4225,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
+ break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
@@ -5824,8 +5838,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, 0,
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 734303802bc3..20d22e41d7ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -469,6 +469,9 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
+
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -589,11 +592,7 @@ static int drm_dev_init(struct drm_device *dev,
kref_init(&dev->ref);
dev->dev = get_device(parent);
-#ifdef CONFIG_DRM_LEGACY
- dev->driver = (struct drm_driver *)driver;
-#else
dev->driver = driver;
-#endif
INIT_LIST_HEAD(&dev->managed.resources);
spin_lock_init(&dev->managed.lock);
@@ -675,11 +674,8 @@ static int devm_drm_dev_init(struct device *parent,
if (ret)
return ret;
- ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
- if (ret)
- devm_drm_dev_init_release(dev);
-
- return ret;
+ return devm_add_action_or_reset(parent,
+ devm_drm_dev_init_release, dev);
}
void *__devm_drm_dev_alloc(struct device *parent,
@@ -897,8 +893,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
- ret = 0;
-
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
driver->patchlevel, driver->date,
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 4a475d9696ff..ff602f7ec65b 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -50,6 +50,33 @@ void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
+ * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
+ * @rc_buffer_block_size: block size code, according to DPCD offset 62h
+ * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
+ *
+ * return:
+ * buffer size in bytes, or 0 on invalid input
+ */
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
+{
+ int size = 1024 * (rc_buffer_size + 1);
+
+ switch (rc_buffer_block_size) {
+ case DP_DSC_RC_BUF_BLK_SIZE_1:
+ return 1 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_4:
+ return 4 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_16:
+ return 16 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_64:
+ return 64 * size;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
+
+/**
* drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
* @pps_payload:
@@ -186,8 +213,7 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_payload->rc_model_size =
- cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+ pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
/* PPS 40 */
pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index d18a740fe0f1..ad17fa21cebb 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -29,6 +29,7 @@
#include <drm/drm_mode.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -46,9 +47,10 @@
* KMS frame buffers.
*
* To support dumb objects drivers must implement the &drm_driver.dumb_create
- * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
- * not set and &drm_driver.dumb_map_offset defaults to
- * drm_gem_dumb_map_offset(). See the callbacks for further details.
+ * and &drm_driver.dumb_map_offset operations (the latter defaults to
+ * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles
+ * additionally need to implement the &drm_driver.dumb_destroy operation. See
+ * the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e95cce8e736d..c2bbe7bee7b6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -2075,9 +2076,13 @@ EXPORT_SYMBOL(drm_get_edid);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct pci_dev *pdev = connector->dev->pdev;
+ struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct edid *edid;
+ if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
+ return NULL;
+
vga_switcheroo_lock_ddc(pdev);
edid = drm_get_edid(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
@@ -4851,6 +4856,41 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
info->rgb_quant_range_selectable = true;
}
+static
+void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
+{
+ switch (max_frl_rate) {
+ case 1:
+ *max_lanes = 3;
+ *max_rate_per_lane = 3;
+ break;
+ case 2:
+ *max_lanes = 3;
+ *max_rate_per_lane = 6;
+ break;
+ case 3:
+ *max_lanes = 4;
+ *max_rate_per_lane = 6;
+ break;
+ case 4:
+ *max_lanes = 4;
+ *max_rate_per_lane = 8;
+ break;
+ case 5:
+ *max_lanes = 4;
+ *max_rate_per_lane = 10;
+ break;
+ case 6:
+ *max_lanes = 4;
+ *max_rate_per_lane = 12;
+ break;
+ case 0:
+ default:
+ *max_lanes = 0;
+ *max_rate_per_lane = 0;
+ }
+}
+
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
@@ -4904,6 +4944,74 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
}
}
+ if (hf_vsdb[7]) {
+ u8 max_frl_rate;
+ u8 dsc_max_frl_rate;
+ u8 dsc_max_slices;
+ struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
+
+ DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
+ max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
+ &hdmi->max_frl_rate_per_lane);
+ hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
+
+ if (hdmi_dsc->v_1p2) {
+ hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
+ hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
+
+ if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
+ hdmi_dsc->bpc_supported = 16;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
+ hdmi_dsc->bpc_supported = 12;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
+ hdmi_dsc->bpc_supported = 10;
+ else
+ hdmi_dsc->bpc_supported = 0;
+
+ dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+ &hdmi_dsc->max_frl_rate_per_lane);
+ hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+
+ dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
+ switch (dsc_max_slices) {
+ case 1:
+ hdmi_dsc->max_slices = 1;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 2:
+ hdmi_dsc->max_slices = 2;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 3:
+ hdmi_dsc->max_slices = 4;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 4:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 5:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 6:
+ hdmi_dsc->max_slices = 12;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 7:
+ hdmi_dsc->max_slices = 16;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 0:
+ default:
+ hdmi_dsc->max_slices = 0;
+ hdmi_dsc->clk_per_slice = 0;
+ }
+ }
+ }
+
drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
}
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index e555281f43d4..72e982323a5e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -26,6 +26,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
@@ -72,7 +73,7 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->late_register)
+ if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
return ret;
@@ -86,30 +87,16 @@ void drm_encoder_unregister_all(struct drm_device *dev)
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->early_unregister)
+ if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
}
}
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be subclassed as part of
- * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
- * called from the driver's &drm_encoder_funcs.destroy hook.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type, const char *name, ...)
+__printf(5, 0)
+static int __drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, va_list ap)
{
int ret;
@@ -125,11 +112,7 @@ int drm_encoder_init(struct drm_device *dev,
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
if (name) {
- va_list ap;
-
- va_start(ap, name);
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
@@ -150,6 +133,44 @@ out_put:
return ret;
}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time the driver's
+ * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree()
+ * the encoder structure. The encoder structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
+ * let the DRM managed resource infrastructure take care of cleanup and
+ * deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_encoder_init);
/**
@@ -181,6 +202,48 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_encoder *encoder = ptr;
+
+ if (WARN_ON(!encoder->dev))
+ return;
+
+ drm_encoder_cleanup(encoder);
+}
+
+void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ void *container;
+ struct drm_encoder *encoder;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(funcs && funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-EINVAL);
+
+ encoder = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_encoder_alloc);
+
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b8119510687..b9a616737c0e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
- return -EINVAL;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (cmap->start + cmap->len > crtc->gamma_size)
- return -EINVAL;
+ if (cmap->start + cmap->len > crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
ret = crtc->funcs->gamma_set(crtc, r, g, b,
crtc->gamma_size, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
drm_modeset_unlock_all(fb_helper->dev);
return ret;
@@ -1054,6 +1059,11 @@ retry:
goto out_state;
}
+ /*
+ * FIXME: This always uses gamma_lut. Some HW have only
+ * degamma_lut, in which case we should reset gamma_lut and set
+ * degamma_lut. See drm_crtc_legacy_gamma_set().
+ */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
@@ -2486,6 +2496,11 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
return;
}
+ /*
+ * FIXME: This mixes up depth with bpp, which results in a glorious
+ * mess, resulting in some drivers picking wrong fbdev defaults and
+ * others wrong preferred_depth defaults.
+ */
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
@@ -2499,24 +2514,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
drm_client_register(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
-
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable console.
- */
-int __init drm_fb_helper_modinit(void)
-{
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char name[] = "fbcon";
- struct module *fbcon;
-
- mutex_lock(&module_mutex);
- fbcon = find_module(name);
- mutex_unlock(&module_mutex);
-
- if (!fbcon)
- request_module_nowait(name);
-#endif
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b50380fa80ce..7efbccffc2ea 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -113,8 +113,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* The memory mapping implementation will vary depending on how the driver
* manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
* function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
- * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -240,9 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
* before calling this.
*
* If NULL is passed, this is a no-op.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
*/
void drm_file_free(struct drm_file *file)
{
@@ -371,6 +367,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
+#ifdef CONFIG_DRM_LEGACY
#ifdef __alpha__
/*
* Default the hose
@@ -391,6 +388,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
}
}
#endif
+#endif
return 0;
}
@@ -777,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev,
EXPORT_SYMBOL(drm_event_cancel_free);
/**
- * drm_send_event_locked - send DRM event to file descriptor
+ * drm_send_event_helper - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
*
- * This function sends the event @e, initialized with drm_event_reserve_init(),
- * to its associated userspace DRM file. Callers must already hold
- * &drm_device.event_lock, see drm_send_event() for the unlocked version.
- *
- * Note that the core will take care of unlinking and disarming events when the
- * corresponding DRM file is closed. Drivers need not worry about whether the
- * DRM file for this event still exists and can call this function upon
- * completion of the asynchronous work unconditionally.
+ * This helper function sends the event @e, initialized with
+ * drm_event_reserve_init(), to its associated userspace DRM file.
+ * The timestamp variant of dma_fence_signal is used when the caller
+ * sends a valid timestamp.
*/
-void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+void drm_send_event_helper(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
@@ -801,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- dma_fence_signal(e->fence);
+ if (timestamp)
+ dma_fence_signal_timestamp(e->fence, timestamp);
+ else
+ dma_fence_signal(e->fence);
dma_fence_put(e->fence);
}
@@ -816,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
wake_up_interruptible_poll(&e->file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
}
+
+/**
+ * drm_send_event_timestamp_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_timestamp_locked(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
+{
+ drm_send_event_helper(dev, e, timestamp);
+}
+EXPORT_SYMBOL(drm_send_event_timestamp_locked);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+ drm_send_event_helper(dev, e, 0);
+}
EXPORT_SYMBOL(drm_send_event_locked);
/**
@@ -838,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
- drm_send_event_locked(dev, e);
+ drm_send_event_helper(dev, e, 0);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 92f89cee213e..c2ce78c4edc3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -335,22 +335,12 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-/**
- * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
- * @file: drm file-private structure to remove the dumb handle from
- * @dev: corresponding drm_device
- * @handle: the dumb handle to remove
- *
- * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
- * which use gem to manage their backing storage.
- */
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle)
+ u32 handle)
{
return drm_gem_handle_delete(file, handle);
}
-EXPORT_SYMBOL(drm_gem_dumb_destroy);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
@@ -1078,20 +1068,17 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
drm_gem_object_get(obj);
vma->vm_private_data = obj;
+ vma->vm_ops = obj->funcs->vm_ops;
if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
- if (ret) {
- drm_gem_object_put(obj);
- return ret;
- }
+ if (ret)
+ goto err_drm_gem_object_put;
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
- if (obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else {
- drm_gem_object_put(obj);
- return -EINVAL;
+ if (!vma->vm_ops) {
+ ret = -EINVAL;
+ goto err_drm_gem_object_put;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
@@ -1100,6 +1087,10 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
return 0;
+
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4d5c1d86b022..7942cf05cd93 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -36,8 +36,9 @@
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = drm_gem_cma_prime_vmap,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = drm_gem_cma_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -277,62 +278,6 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
};
EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
-static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-/**
- * drm_gem_cma_mmap - memory-map a CMA GEM object
- * @filp: file object
- * @vma: VMA for the area to be mapped
- *
- * This function implements an augmented version of the GEM DRM file mmap
- * operation for CMA objects: In addition to the usual GEM VMA setup it
- * immediately faults in the entire object instead of using on-demaind
- * faulting. Drivers which employ the CMA helpers should use this function
- * as their ->mmap() handler in the DRM device file's file_operations
- * structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_CMA_FOPS().macro.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *gem_obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- cma_obj = to_drm_gem_cma_obj(gem_obj);
-
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-
#ifndef CONFIG_MMU
/**
* drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
@@ -424,18 +369,18 @@ void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
- * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object
* @obj: GEM object
*
- * This function exports a scatter/gather table suitable for PRIME usage by
+ * This function exports a scatter/gather table by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
* set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
struct sg_table *sgt;
@@ -456,7 +401,7 @@ out:
kfree(sgt);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
/**
* drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
@@ -501,40 +446,13 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/**
- * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function maps a buffer imported via DRM PRIME into a userspace
- * process's address space. Drivers that use the CMA helpers should set this
- * as their &drm_driver.gem_prime_mmap callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- cma_obj = to_drm_gem_cma_obj(obj);
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
-
-/**
- * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
- * This function maps a buffer exported via DRM PRIME into the kernel's
+ * This function maps a buffer into the kernel's
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
@@ -543,7 +461,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -551,7 +469,44 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+
+/**
+ * drm_gem_cma_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer into a userspace process's address space.
+ * In addition to the usual GEM VMA setup it immediately faults in the entire
+ * object instead of using on-demand faulting. Drivers that use the CMA
+ * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags &= ~VM_PFNMAP;
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 02ca22e90290..0b232a73c1b7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
if (gbo->vmap_use_count > 0)
goto out;
- ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
- if (ret)
- return ret;
+ /*
+ * VRAM helpers unmap the BO only on demand. So the previous
+ * page mapping might still be around. Only vmap if the there's
+ * no mapping present.
+ */
+ if (dma_buf_map_is_null(&gbo->map)) {
+ ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+ if (ret)
+ return ret;
+ }
out:
++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
return;
ttm_bo_vunmap(bo, &gbo->map);
+ dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 81d386b5b92a..fad2249ee67b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -191,6 +191,9 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ u32 handle);
+
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09d6e9e2e075..c3bd664ea733 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -122,7 +122,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
dev->driver->irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- if (dev->pdev)
+ if (dev_is_pci(dev->dev))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
@@ -140,7 +140,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
free_irq(irq, dev);
} else {
dev->irq = irq;
@@ -203,7 +203,7 @@ int drm_irq_uninstall(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
@@ -214,12 +214,45 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+static void devm_drm_irq_uninstall(void *data)
+{
+ drm_irq_uninstall(data);
+}
+
+/**
+ * devm_drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
+ *
+ * devm_drm_irq_install is a help function of drm_irq_install.
+ *
+ * if the driver uses devm_drm_irq_install, there is no need
+ * to call drm_irq_uninstall when the drm module get unloaded,
+ * as this will done automagically.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int devm_drm_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ ret = drm_irq_install(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev->dev,
+ devm_drm_irq_uninstall, dev);
+}
+EXPORT_SYMBOL(devm_drm_irq_install);
+
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
+ struct pci_dev *pdev;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
@@ -230,12 +263,13 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
- irq = dev->pdev->irq;
+ pdev = to_pci_dev(dev->dev);
+ irq = pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 221a8528c993..f933da1656eb 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- int ret;
-
- /* Call init functions from specific kms helpers here */
- ret = drm_fb_helper_modinit();
- if (ret < 0)
- goto out;
-
- ret = drm_dp_aux_dev_init();
- if (ret < 0)
- goto out;
-
-out:
- return ret;
+ /*
+ * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable
+ * console.
+ */
+ if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
+ IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ !IS_ENABLED(CONFIG_EXPERT))
+ request_module_nowait("fbcon");
+
+ return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 1be3ea320474..f71358f9eac9 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -127,7 +127,7 @@ static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
#endif
-#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index fbea69d6f909..e4f20a2eb6e7 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -37,7 +37,6 @@
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_cache.h>
@@ -100,24 +99,6 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory *handle, int pages)
-{
- agp_free_memory(handle);
-}
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory *handle, unsigned int start)
-{
- return agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory *handle)
-{
- return agp_unbind_memory(handle);
-}
-
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
@@ -156,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-
-bool drm_need_swiotlb(int dma_bits)
-{
- struct resource *tmp;
- resource_size_t max_iomem = 0;
-
- /*
- * Xen paravirtual hosts require swiotlb regardless of requested dma
- * transfer size.
- *
- * NOTE: Really, what it requires is use of the dma_alloc_coherent
- * allocator used in ttm_dma_populate() instead of
- * ttm_populate_and_map_pages(), which bounce buffers so much in
- * Xen it leads to swiotlb buffer exhaustion.
- */
- if (xen_pv_domain())
- return true;
-
- /*
- * Enforce dma_alloc_coherent when memory encryption is active as well
- * for the same reasons as for Xen paravirtual hosts.
- */
- if (mem_encrypt_active())
- return true;
-
- for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
- max_iomem = max(max_iomem, tmp->end);
- }
-
- return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index f1affc1bb679..37b4b9f0e468 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -195,7 +195,7 @@ void drm_mode_config_reset(struct drm_device *dev)
crtc->funcs->reset(crtc);
drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
+ if (encoder->funcs && encoder->funcs->reset)
encoder->funcs->reset(encoder);
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -625,6 +625,10 @@ static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
void drm_mode_config_validate(struct drm_device *dev)
{
struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ u32 primary_with_crtc = 0, cursor_with_crtc = 0;
+ unsigned int num_primary = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -636,4 +640,49 @@ void drm_mode_config_validate(struct drm_device *dev)
validate_encoder_possible_clones(encoder);
validate_encoder_possible_crtcs(encoder);
}
+
+ drm_for_each_crtc(crtc, dev) {
+ WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
+
+ WARN(crtc->cursor && crtc->funcs->cursor_set,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_set2,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_move,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
+ crtc->base.id, crtc->name);
+
+ if (crtc->primary) {
+ WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->primary->base.id, crtc->primary->name,
+ crtc->base.id, crtc->name);
+ WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
+ "Primary plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->primary->base.id, crtc->primary->name);
+ primary_with_crtc |= drm_plane_mask(crtc->primary);
+ }
+ if (crtc->cursor) {
+ WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->cursor->base.id, crtc->cursor->name,
+ crtc->base.id, crtc->name);
+ WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
+ "Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->cursor->base.id, crtc->cursor->name);
+ cursor_with_crtc |= drm_plane_mask(crtc->cursor);
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ num_primary++;
+ }
+
+ WARN(num_primary != dev->mode_config.num_crtc,
+ "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
+ num_primary, dev->mode_config.num_crtc);
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 33fb2f05ce66..1ac67d4505e0 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock * 1000;
+ num = mode->clock;
den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
- return DIV_ROUND_CLOSEST(num, den);
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 6dba4b8ce4fe..2294a1580d35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -24,6 +24,8 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -36,6 +38,9 @@
#include "drm_legacy.h"
#ifdef CONFIG_DRM_LEGACY
+/* List of devices hanging off drivers with stealth attach. */
+static LIST_HEAD(legacy_dev_list);
+static DEFINE_MUTEX(legacy_dev_list_lock);
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@@ -65,7 +70,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ dmah->vaddr = dma_alloc_coherent(dev->dev, size,
&dmah->busaddr,
GFP_KERNEL);
@@ -88,7 +93,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dma_free_coherent(dev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
@@ -107,16 +112,18 @@ static int drm_get_pci_domain(struct drm_device *dev)
return 0;
#endif /* __alpha__ */
- return pci_domain_nr(dev->pdev->bus);
+ return pci_domain_nr(to_pci_dev(dev->dev)->bus);
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
if (!master->unique)
return -ENOMEM;
@@ -126,12 +133,14 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ (p->busnum & 0xff) != pdev->bus->number ||
+ p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
return -EINVAL;
- p->irq = dev->pdev->irq;
+ p->irq = pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -159,7 +168,7 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
@@ -183,7 +192,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
@@ -196,7 +205,7 @@ static void drm_pci_agp_init(struct drm_device *dev)
static int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
- struct drm_driver *driver)
+ const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -225,10 +234,11 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
if (ret)
goto err_agp;
- /* No locking needed since shadow-attach is single-threaded since it may
- * only be called from the per-driver module init hook. */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
+ mutex_lock(&legacy_dev_list_lock);
+ list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
+ mutex_unlock(&legacy_dev_list_lock);
+ }
return 0;
@@ -249,7 +259,8 @@ err_free:
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
@@ -261,7 +272,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
- INIT_LIST_HEAD(&driver->legacy_dev_list);
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
@@ -295,7 +305,8 @@ EXPORT_SYMBOL(drm_legacy_pci_init);
* Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
* is deprecated and only used by dri1 drivers.
*/
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
@@ -304,11 +315,15 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
if (!(driver->driver_features & DRIVER_LEGACY)) {
WARN_ON(1);
} else {
- list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ mutex_lock(&legacy_dev_list_lock);
+ list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
legacy_dev_list) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
+ if (dev->driver == driver) {
+ list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
+ }
}
+ mutex_unlock(&legacy_dev_list_lock);
}
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index e6231947f987..338650abd267 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -30,6 +30,7 @@
#include <drm/drm_file.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_internal.h"
@@ -40,7 +41,7 @@
* A plane represents an image source that can be blended with or overlayed on
* top of a CRTC during the scanout process. Planes take their input data from a
* &drm_framebuffer object. The plane itself specifies the cropping and scaling
- * of that image, and where it is placed on the visible are of a display
+ * of that image, and where it is placed on the visible area of a display
* pipeline, represented by &drm_crtc. A plane can also have additional
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
@@ -49,14 +50,34 @@
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
- * Cursor and overlay planes are optional. All drivers should provide one
- * primary plane per CRTC to avoid surprising userspace too much. See enum
- * drm_plane_type for a more in-depth discussion of these special uapi-relevant
- * plane types. Special planes are associated with their CRTC by calling
- * drm_crtc_init_with_planes().
- *
* The type of a plane is exposed in the immutable "type" enumeration property,
- * which has one of the following values: "Overlay", "Primary", "Cursor".
+ * which has one of the following values: "Overlay", "Primary", "Cursor" (see
+ * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
+ * &drm_plane.possible_crtcs.
+ *
+ * Each CRTC must have a unique primary plane userspace can attach to enable
+ * the CRTC. In other words, userspace must be able to attach a different
+ * primary plane to each CRTC at the same time. Primary planes can still be
+ * compatible with multiple CRTCs. There must be exactly as many primary planes
+ * as there are CRTCs.
+ *
+ * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core
+ * relies on the driver to set the primary and optionally the cursor plane used
+ * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All
+ * drivers must provide one primary plane per CRTC to avoid surprising legacy
+ * userspace too much.
+ */
+
+/**
+ * DOC: standard plane properties
+ *
+ * DRM planes have a few standardized properties:
+ *
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a struct
+ * drm_format_modifier_blob. Without this property the plane doesn't
+ * support buffers with modifiers. Userspace cannot change this property.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -152,31 +173,16 @@ done:
return 0;
}
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @format_modifiers: array of struct drm_format modifiers terminated by
- * DRM_FORMAT_MOD_INVALID
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type,
- const char *name, ...)
+__printf(9, 0)
+static int __drm_universal_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
unsigned int format_modifier_count = 0;
@@ -237,11 +243,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (name) {
- va_list ap;
-
- va_start(ap, name);
plane->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
@@ -286,8 +288,102 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return 0;
}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook
+ * should call drm_plane_cleanup() and kfree() the plane structure. The plane
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_universal_plane_alloc() instead of
+ * drm_universal_plane_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ return ret;
+}
EXPORT_SYMBOL(drm_universal_plane_init);
+static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_plane *plane = ptr;
+
+ if (WARN_ON(!plane->dev))
+ return;
+
+ drm_plane_cleanup(plane);
+}
+
+void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
+ size_t offset, uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_plane *plane;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ plane = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release,
+ plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
@@ -1163,7 +1259,14 @@ retry:
if (ret)
goto out;
- if (old_fb->format != fb->format) {
+ /*
+ * Only check the FOURCC format code, excluding modifiers. This is
+ * enough for all legacy drivers. Atomic drivers have their own
+ * checks in their ->atomic_check implementation, which will
+ * return -EINVAL if any hw or driver constraint is violated due
+ * to modifier changes.
+ */
+ if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7db55fce35d8..2a54f86856af 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -717,6 +717,8 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
if (obj->funcs && obj->funcs->mmap) {
+ vma->vm_ops = obj->funcs->vm_ops;
+
ret = obj->funcs->mmap(obj, vma);
if (ret)
return ret;
@@ -978,44 +980,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
- * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * drm_prime_sg_to_page_array - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the pages in
+ * @max_entries: size of the passed-in array
+ *
+ * Exports an sg table into an array of pages.
+ *
+ * This function is deprecated and strongly discouraged to be used.
+ * The page array is only useful for page faults and those can corrupt fields
+ * in the struct page if they are not handled by the exporting driver.
+ */
+int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
+ struct page **pages,
+ int max_entries)
+{
+ struct sg_page_iter page_iter;
+ struct page **p = pages;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ if (WARN_ON(p - pages >= max_entries))
+ return -1;
+ *p++ = sg_page_iter_page(&page_iter);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_array);
+
+/**
+ * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
- * @pages: optional array of page pointers to store the page array in
- * @addrs: optional array to store the dma bus address of each page
+ * @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
- * Exports an sg table into an array of pages and addresses. This is currently
- * required by the TTM driver in order to do correct fault handling.
+ * Exports an sg table into an array of addresses.
*
- * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_entries)
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_entries)
{
struct sg_dma_page_iter dma_iter;
- struct sg_page_iter page_iter;
- struct page **p = pages;
dma_addr_t *a = addrs;
- if (pages) {
- for_each_sgtable_page(sgt, &page_iter, 0) {
- if (WARN_ON(p - pages >= max_entries))
- return -1;
- *p++ = sg_page_iter_page(&page_iter);
- }
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (WARN_ON(a - addrs >= max_entries))
+ return -1;
+ *a++ = sg_page_iter_dma_address(&dma_iter);
}
- if (addrs) {
- for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
- if (WARN_ON(a - addrs >= max_entries))
- return -1;
- *a++ = sg_page_iter_dma_address(&dma_iter);
- }
- }
-
return 0;
}
-EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d6017726cc2a..ad59a51eab6d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -515,7 +515,8 @@ retry:
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_override_edid_modes(connector);
- if (count == 0 && connector->status == connector_status_connected)
+ if (count == 0 && (connector->status == connector_status_connected ||
+ connector->status == connector_status_unknown))
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
if (count == 0)
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 743e57c1b44f..6ce8f5cd1eb5 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -55,8 +56,9 @@ static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
* stored in the device structure. Free the encoder's memory as part of
* the device release function.
*
- * FIXME: Later improvements to DRM's resource management may allow for
- * an automated kfree() of the encoder's memory.
+ * Note: consider using drmm_simple_encoder_alloc() instead of
+ * drm_simple_encoder_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -71,6 +73,14 @@ int drm_simple_encoder_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_simple_encoder_init);
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type)
+{
+ return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type,
+ NULL);
+}
+EXPORT_SYMBOL(__drmm_simple_encoder_alloc);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 6e74e6745eca..349146049849 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret)
- return 0;
+ goto out;
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index d30e2f2b8f3c..893165eeddf3 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -74,7 +74,7 @@
* |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
* | | frame as it
* | | travels down
- * | | ("sacn out")
+ * | | ("scan out")
* | Old frame |
* | |
* | |
@@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev,
break;
}
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
- drm_send_event_locked(dev, &e->base);
+ /*
+ * Use the same timestamp for any associated fence signal to avoid
+ * mismatch in timestamps for vsync & fence events triggered by the
+ * same HW event. Frameworks like SurfaceFlinger in Android expects the
+ * retire-fence timestamp to match exactly with HW vsync as it uses it
+ * for its software vsync modeling.
+ */
+ drm_send_event_timestamp_locked(dev, &e->base, now);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 6d5a03b32238..9b3b989d7cad 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -278,7 +278,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index d9bd83203a15..b390dd4d60b7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
- NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 951d5f708e92..6a251e3aa779 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -89,7 +89,6 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST
- select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 967a5cdc120e..1e0c5a7f206e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
- struct frame_vector *vec;
+ struct page **pages;
+ unsigned int npages;
struct sg_table *sgt;
atomic_t refcount;
bool in_pool;
@@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
bool force)
{
struct g2d_cmdlist_userptr *g2d_userptr = obj;
- struct page **pages;
if (!obj)
return;
@@ -398,15 +398,9 @@ out:
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0);
- pages = frame_vector_pages(g2d_userptr->vec);
- if (!IS_ERR(pages)) {
- int i;
-
- for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
- set_page_dirty_lock(pages[i]);
- }
- put_vaddr_frames(g2d_userptr->vec);
- frame_vector_destroy(g2d_userptr->vec);
+ unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
+ true);
+ kvfree(g2d_userptr->pages);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
@@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->vec = frame_vector_create(npages);
- if (!g2d_userptr->vec) {
+ g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
+ GFP_KERNEL);
+ if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
}
- ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- g2d_userptr->vec);
+ ret = pin_user_pages_fast(start, npages,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ g2d_userptr->pages);
if (ret != npages) {
DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n");
if (ret < 0)
- goto err_destroy_framevec;
- ret = -EFAULT;
- goto err_put_framevec;
- }
- if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
+ goto err_destroy_pages;
+ npages = ret;
ret = -EFAULT;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
+ g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
ret = sg_alloc_table_from_pages(sgt,
- frame_vector_pages(g2d_userptr->vec),
+ g2d_userptr->pages,
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
@@ -538,11 +532,11 @@ err_sg_free_table:
err_free_sgt:
kfree(sgt);
-err_put_framevec:
- put_vaddr_frames(g2d_userptr->vec);
+err_unpin_pages:
+ unpin_user_pages(g2d_userptr->pages, npages);
-err_destroy_framevec:
- frame_vector_destroy(g2d_userptr->vec);
+err_destroy_pages:
+ kvfree(g2d_userptr->pages);
err_free:
kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 0e23c93a1094..ec395658a43f 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
- tristate "Intel GMA5/600 KMS Framebuffer"
+ tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
- select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
@@ -19,17 +18,3 @@ config DRM_GMA600
help
Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
platforms with LVDS ports. MIPI is not currently supported.
-
-config DRM_GMA3600
- bool "Intel GMA3600/3650 support (Experimental)"
- depends on DRM_GMA500
- help
- Say yes to include basic support for Intel GMA3600/3650 (Intel
- Cedar Trail) platforms.
-
-config DRM_MEDFIELD
- bool "Intel Medfield support (Experimental)"
- depends on DRM_GMA500 && X86_INTEL_MID
- help
- Say yes to include support for the Intel Medfield platform.
-
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index c8f2c89be99d..884ab1f9063e 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -6,36 +6,35 @@
gma500_gfx-y += \
accel_2d.o \
backlight.o \
+ blitter.o \
+ cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_dp.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o \
framebuffer.o \
gem.o \
+ gma_device.o \
+ gma_display.o \
gtt.o \
intel_bios.o \
- intel_i2c.o \
intel_gmbus.o \
+ intel_i2c.o \
+ mid_bios.o \
mmu.o \
- blitter.o \
power.o \
+ psb_device.o \
psb_drv.o \
- gma_display.o \
- gma_device.o \
psb_intel_display.o \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
psb_lid.o \
- psb_irq.o \
- psb_device.o \
- mid_bios.o
+ psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o \
-gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
- cdv_intel_crt.o \
- cdv_intel_display.o \
- cdv_intel_hdmi.o \
- cdv_intel_lvds.o \
- cdv_intel_dp.o
-
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_lvds.o \
@@ -43,14 +42,4 @@ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
-gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
- mdfld_output.o \
- mdfld_intel_display.o \
- mdfld_dsi_output.o \
- mdfld_dsi_dpi.o \
- mdfld_dsi_pkg_sender.o \
- mdfld_tpo_vid.o \
- mdfld_tmd_vid.o \
- tc35876x-dsi-lvds.o
-
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index e75293e4a52f..19e055dbd4c2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -95,13 +95,14 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
static int cdv_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
- pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ pci_read_config_byte(pdev, 0xF4, &lbpc);
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
@@ -111,6 +112,7 @@ static int cdv_get_brightness(struct backlight_device *bd)
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
@@ -128,7 +130,7 @@ static int cdv_set_brightness(struct backlight_device *bd)
lbpc = level * 0xfe / max + 1;
level /= lbpc;
- pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ pci_write_config_byte(pdev, 0xF4, lbpc);
}
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -205,8 +207,9 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
int i;
dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
@@ -234,6 +237,8 @@ static void cdv_init_pm(struct drm_device *dev)
static void cdv_errata(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
/* Disable bonus launch.
* CPU and GPU competes for memory and display misses updates and
* flickers. Worst with dual core, dual displays.
@@ -242,7 +247,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
@@ -255,12 +260,13 @@ static void cdv_errata(struct drm_device *dev)
static int cdv_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
dev_dbg(dev->dev, "Saving GPU registers.\n");
- pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+ pci_read_config_byte(pdev, 0xF4, &regs->cdv.saveLBB);
regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
@@ -309,11 +315,12 @@ static int cdv_save_display_registers(struct drm_device *dev)
static int cdv_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
u32 temp;
- pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+ pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB);
REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
@@ -421,16 +428,16 @@ static int cdv_power_up(struct drm_device *dev)
static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
- hotplug_work);
+ hotplug_work);
struct drm_device *dev = dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
-}
+}
/* The core driver has received a hotplug IRQ. We are in IRQ context
so extract the needed information and kick off queued processing */
-
+
static int cdv_hotplug_event(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -449,7 +456,7 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
} else {
REG_WRITE(PORT_HOTPLUG_EN, 0);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
- }
+ }
}
static const char *force_audio_names[] = {
@@ -568,9 +575,10 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(dev->pdev))
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 88535f5aacc5..c48c9d322dfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -127,7 +127,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* \return true if CRT is connected.
@@ -278,8 +278,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
+ dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 686385a66167..5d3302249779 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -551,7 +551,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
}
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bfd9a15d63b1..6d3ada39ff86 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -306,7 +306,7 @@ static uint32_t dp_vswing_premph_table[] = {
};
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
- * @intel_dp: DP struct
+ * @encoder: GMA encoder struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
@@ -1687,7 +1687,7 @@ static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
return status;
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 0d12c6ffbc40..e525689f84f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -22,9 +22,6 @@
*
* Authors:
* jim liu <jim.liu@intel.com>
- *
- * FIXME:
- * We should probably make this generic and share it with Medfield
*/
#include <linux/pm_runtime.h>
@@ -56,7 +53,6 @@ struct mid_intel_hdmi_priv {
bool has_hdmi_audio;
/* Should set this when detect hotplug */
bool hdmi_device_connected;
- struct mdfld_hdmi_i2c *i2c_bus;
struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
struct drm_device *dev;
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index eaaf4efec217..5bff7d9e3aa6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -74,7 +74,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-/**
+/*
* Sets the backlight level.
*
* level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
@@ -99,7 +99,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
}
}
-/**
+/*
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
@@ -291,7 +291,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
REG_WRITE(PFIT_CONTROL, pfit_control);
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -471,6 +471,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -554,7 +555,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
"LVDSBLC_B");
if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
gma_encoder->i2c_bus->slave_addr = 0x2C;
@@ -575,7 +576,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
GPIOC,
"LVDSDDC_C");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fc4fda1d258b..ebe9dccf2d83 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -159,7 +159,7 @@ static const struct fb_ops psbfb_unaccel_ops = {
* @dev: our DRM device
* @fb: framebuffer to set up
* @mode_cmd: mode description
- * @gt: backing object
+ * @obj: backing object
*
* Configure and fill in the boilerplate for our frame buffer. Return
* 0 on success or an error code if we fail.
@@ -197,7 +197,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
* @mode_cmd: the description of the requested mode
- * @gt: the backing object
+ * @obj: the backing object
*
* Create a framebuffer object backed by the gt, and fill in the
* boilerplate required
@@ -252,7 +252,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/**
* psbfb_create - create a framebuffer
- * @fbdev: the framebuffer device
+ * @fb_helper: the framebuffer helper
* @sizes: specification of the layout
*
* Create a framebuffer to the specifications provided
@@ -262,6 +262,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
{
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -325,8 +326,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+ info->fix.mmio_start = pci_resource_start(pdev, 0);
+ info->fix.mmio_len = pci_resource_len(pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -529,6 +530,7 @@ void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
drm_mode_config_init(dev);
@@ -540,8 +542,7 @@ void psb_modeset_init(struct drm_device *dev)
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
- &(dev->mode_config.fb_base));
+ pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index db827e591403..fbf420051ef5 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
#include <drm/drm.h>
#include <drm/drm_vma_manager.h>
+#include "gem.h"
#include "psb_drv.h"
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
@@ -49,6 +50,8 @@ const struct drm_gem_object_funcs psb_gem_object_funcs = {
* @dev: our device
* @size: the size requested
* @handlep: returned handle (opaque number)
+ * @stolen: unused
+ * @align: unused
*
* Create a GEM object, fill in the boilerplate and attach a handle to
* it so that userspace can speak about it. This does the core work
@@ -97,7 +100,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
/**
* psb_gem_dumb_create - create a dumb buffer
- * @drm_file: our client file
+ * @file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
@@ -116,7 +119,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/**
* psb_gem_fault - pagefault handler for GEM objects
- * @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index 3741a711b9fd..bae6454ead29 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,6 +8,8 @@
#ifndef _GEM_H
#define _GEM_H
+struct drm_device;
+
extern const struct drm_gem_object_funcs psb_gem_object_funcs;
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 869f30392566..4c91e86f4b14 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -6,12 +6,14 @@
**************************************************************************/
#include "psb_drv.h"
+#include "gma_device.h"
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 3df6d6e850f5..b03f7b8241f2 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -20,7 +20,7 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-/**
+/*
* Returns whether any output on the specified pipe is of the specified type
*/
bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
@@ -180,7 +180,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
return 0;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -559,14 +559,14 @@ int gma_crtc_set_config(struct drm_mode_set *set,
if (!dev_priv->rpm_enabled)
return drm_crtc_helper_set_config(set, ctx);
- pm_runtime_forbid(&dev->pdev->dev);
+ pm_runtime_forbid(dev->dev);
ret = drm_crtc_helper_set_config(set, ctx);
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
return ret;
}
-/**
+/*
* Save HW states of given crtc
*/
void gma_crtc_save(struct drm_crtc *crtc)
@@ -609,7 +609,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
}
-/**
+/*
* Restore HW states of given crtc
*/
void gma_crtc_restore(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index d246b1f70366..e884750bc123 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -340,13 +340,14 @@ static void psb_gtt_alloc(struct drm_device *dev)
void psb_gtt_takedown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
iounmap(dev_priv->gtt_map);
dev_priv->gtt_map = NULL;
}
if (dev_priv->gtt_initialized) {
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
@@ -358,6 +359,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
@@ -376,8 +378,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg = &dev_priv->gtt;
/* Enable the GTT */
- pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
@@ -397,8 +399,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pg->mmu_gatt_start = 0xE0000000;
- pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
- gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
/* CDV doesn't report this. In which case the system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
@@ -407,10 +409,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_start = dev_priv->pge_ctl;
}
- pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
- pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
>> PAGE_SHIFT;
- dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+ dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
@@ -431,7 +433,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
dev_priv->gtt_mem = &fudge;
}
- pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
- PAGE_SIZE;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8ad6337eeba3..d838369f0119 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -50,7 +50,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
uint8_t panel_type;
edp = find_section(bdb, BDB_EDP);
-
+
dev_priv->edp.bpp = 18;
if (!edp) {
if (dev_priv->edp.support) {
@@ -80,7 +80,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
dev_priv->edp.pps = *edp_pps;
DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
dev_priv->edp.pps.t11_t12);
@@ -516,7 +516,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
@@ -574,7 +574,7 @@ int psb_intel_init_bios(struct drm_device *dev)
return 0;
}
-/**
+/*
* Destroy and free VBT data
*/
void psb_intel_destroy_bios(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index a083fbfe35b8..370bd6451bd9 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev->dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -417,7 +417,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"gma500 gmbus %s",
names[i]);
- bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.dev.parent = dev->dev;
bus->adapter.algo_data = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index de8810188190..5e1b4d70c317 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -85,7 +85,6 @@ static void set_data(void *data, int state_high)
/**
* psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
- * @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
*
@@ -117,7 +116,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
@@ -145,7 +144,7 @@ out_free:
/**
* psb_intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
+ * @chan: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
deleted file mode 100644
index b83d59b21de5..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ /dev/null
@@ -1,562 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- **************************************************************************/
-
-#include <linux/delay.h>
-#include <linux/gpio/machine.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mid_bios.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
-#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-#define BRIGHTNESS_MIN_LEVEL 1
-#define BRIGHTNESS_MAX_LEVEL 100
-#define BRIGHTNESS_MASK 0xFF
-#define BLC_POLARITY_NORMAL 0
-#define BLC_POLARITY_INVERSE 1
-#define BLC_ADJUSTMENT_MAX 100
-
-#define MDFLD_BLC_PWM_PRECISION_FACTOR 10
-#define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static struct backlight_device *mdfld_backlight_device;
-
-int mdfld_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev =
- (struct drm_device *)bl_get_data(mdfld_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int level = bd->props.brightness;
-
- DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
-
- /* Perform value bounds checking */
- if (level < BRIGHTNESS_MIN_LEVEL)
- level = BRIGHTNESS_MIN_LEVEL;
-
- if (gma_power_begin(dev, false)) {
- u32 adjusted_level = 0;
-
- /*
- * Adjust the backlight level with the percent in
- * dev_priv->blc_adj2
- */
- adjusted_level = level * dev_priv->blc_adj2;
- adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
- dev_priv->brightness_adjusted = adjusted_level;
-
- if (mdfld_get_panel_type(dev, 0) == TC35876X) {
- if (dev_priv->dpi_panel_on[0] ||
- dev_priv->dpi_panel_on[2])
- tc35876x_brightness_control(dev,
- dev_priv->brightness_adjusted);
- } else {
- if (dev_priv->dpi_panel_on[0])
- mdfld_dsi_brightness_control(dev, 0,
- dev_priv->brightness_adjusted);
- }
-
- if (dev_priv->dpi_panel_on[2])
- mdfld_dsi_brightness_control(dev, 2,
- dev_priv->brightness_adjusted);
- gma_power_end(dev);
- }
-
- /* cache the brightness for later use */
- dev_priv->brightness = level;
- return 0;
-}
-
-static int mdfld_get_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev =
- (struct drm_device *)bl_get_data(mdfld_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
-
- /* return locally cached var instead of HW read (due to DPST etc.) */
- return dev_priv->brightness;
-}
-
-static const struct backlight_ops mdfld_ops = {
- .get_brightness = mdfld_get_brightness,
- .update_status = mdfld_set_brightness,
-};
-
-static int device_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = (struct drm_psb_private *)
- dev->dev_private;
-
- dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
- dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
-
- return 0;
-}
-
-static int mdfld_backlight_init(struct drm_device *dev)
-{
- struct backlight_properties props;
- int ret = 0;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = BRIGHTNESS_MAX_LEVEL;
- props.type = BACKLIGHT_PLATFORM;
- mdfld_backlight_device = backlight_device_register("mdfld-bl",
- NULL, (void *)dev, &mdfld_ops, &props);
-
- if (IS_ERR(mdfld_backlight_device))
- return PTR_ERR(mdfld_backlight_device);
-
- ret = device_backlight_init(dev);
- if (ret)
- return ret;
-
- mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
- mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
- backlight_update_status(mdfld_backlight_device);
- return 0;
-}
-#endif
-
-struct backlight_device *mdfld_get_backlight_device(void)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- return mdfld_backlight_device;
-#else
- return NULL;
-#endif
-}
-
-/*
- * mdfld_save_display_registers
- *
- * Description: We are going to suspend so save current display
- * register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct medfield_state *regs = &dev_priv->regs.mdfld;
- struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
- const struct psb_offset *map = &dev_priv->regmap[pipenum];
- int i;
- u32 *mipi_val;
-
- /* register */
- u32 mipi_reg = MIPI;
-
- switch (pipenum) {
- case 0:
- mipi_val = &regs->saveMIPI;
- break;
- case 1:
- mipi_val = &regs->saveMIPI;
- break;
- case 2:
- /* register */
- mipi_reg = MIPI_C;
- /* pointer to values */
- mipi_val = &regs->saveMIPI_C;
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /* Pipe & plane A info */
- pipe->dpll = PSB_RVDC32(map->dpll);
- pipe->fp0 = PSB_RVDC32(map->fp0);
- pipe->conf = PSB_RVDC32(map->conf);
- pipe->htotal = PSB_RVDC32(map->htotal);
- pipe->hblank = PSB_RVDC32(map->hblank);
- pipe->hsync = PSB_RVDC32(map->hsync);
- pipe->vtotal = PSB_RVDC32(map->vtotal);
- pipe->vblank = PSB_RVDC32(map->vblank);
- pipe->vsync = PSB_RVDC32(map->vsync);
- pipe->src = PSB_RVDC32(map->src);
- pipe->stride = PSB_RVDC32(map->stride);
- pipe->linoff = PSB_RVDC32(map->linoff);
- pipe->tileoff = PSB_RVDC32(map->tileoff);
- pipe->size = PSB_RVDC32(map->size);
- pipe->pos = PSB_RVDC32(map->pos);
- pipe->surf = PSB_RVDC32(map->surf);
- pipe->cntr = PSB_RVDC32(map->cntr);
- pipe->status = PSB_RVDC32(map->status);
-
- /*save palette (gamma) */
- for (i = 0; i < 256; i++)
- pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
-
- if (pipenum == 1) {
- regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
- regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
-
- regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
- regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
- return 0;
- }
-
- *mipi_val = PSB_RVDC32(mipi_reg);
- return 0;
-}
-
-/*
- * mdfld_restore_display_registers
- *
- * Description: We are going to resume so restore display register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
-{
- /* To get panel out of ULPS mode. */
- u32 temp = 0;
- u32 device_ready_reg = DEVICE_READY_REG;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_config *dsi_config = NULL;
- struct medfield_state *regs = &dev_priv->regs.mdfld;
- struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
- const struct psb_offset *map = &dev_priv->regmap[pipenum];
- u32 i;
- u32 dpll;
- u32 timeout = 0;
-
- /* register */
- u32 mipi_reg = MIPI;
-
- /* values */
- u32 dpll_val = pipe->dpll;
- u32 mipi_val = regs->saveMIPI;
-
- switch (pipenum) {
- case 0:
- dpll_val &= ~DPLL_VCO_ENABLE;
- dsi_config = dev_priv->dsi_configs[0];
- break;
- case 1:
- dpll_val &= ~DPLL_VCO_ENABLE;
- break;
- case 2:
- mipi_reg = MIPI_C;
- mipi_val = regs->saveMIPI_C;
- dsi_config = dev_priv->dsi_configs[1];
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /*make sure VGA plane is off. it initializes to on after reset!*/
- PSB_WVDC32(0x80000000, VGACNTRL);
-
- if (pipenum == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
- PSB_RVDC32(map->dpll);
-
- PSB_WVDC32(pipe->fp0, map->fp0);
- } else {
-
- dpll = PSB_RVDC32(map->dpll);
-
- if (!(dpll & DPLL_VCO_ENABLE)) {
-
- /* When ungating power of DPLL, needs to wait 0.5us
- before enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- PSB_WVDC32(pipe->fp0, map->fp0);
- PSB_WVDC32(dpll_val, map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, map->dpll);
- PSB_RVDC32(map->dpll);
-
- /* wait for DSI PLL to lock */
- while (timeout < 20000 &&
- !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
-
- if (timeout == 20000) {
- DRM_ERROR("%s, can't lock DSIPLL.\n",
- __func__);
- return -EINVAL;
- }
- }
- }
- /* Restore mode */
- PSB_WVDC32(pipe->htotal, map->htotal);
- PSB_WVDC32(pipe->hblank, map->hblank);
- PSB_WVDC32(pipe->hsync, map->hsync);
- PSB_WVDC32(pipe->vtotal, map->vtotal);
- PSB_WVDC32(pipe->vblank, map->vblank);
- PSB_WVDC32(pipe->vsync, map->vsync);
- PSB_WVDC32(pipe->src, map->src);
- PSB_WVDC32(pipe->status, map->status);
-
- /*set up the plane*/
- PSB_WVDC32(pipe->stride, map->stride);
- PSB_WVDC32(pipe->linoff, map->linoff);
- PSB_WVDC32(pipe->tileoff, map->tileoff);
- PSB_WVDC32(pipe->size, map->size);
- PSB_WVDC32(pipe->pos, map->pos);
- PSB_WVDC32(pipe->surf, map->surf);
-
- if (pipenum == 1) {
- /* restore palette (gamma) */
- /* udelay(50000); */
- for (i = 0; i < 256; i++)
- PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
- PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
- PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
-
- /*TODO: resume HDMI port */
-
- /*TODO: resume pipe*/
-
- /*enable the plane*/
- PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
-
- return 0;
- }
-
- /*set up pipe related registers*/
- PSB_WVDC32(mipi_val, mipi_reg);
-
- /*setup MIPI adapter + MIPI IP registers*/
- if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipenum);
-
- if (in_atomic() || in_interrupt())
- mdelay(20);
- else
- msleep(20);
-
- /*enable the plane*/
- PSB_WVDC32(pipe->cntr, map->cntr);
-
- if (in_atomic() || in_interrupt())
- mdelay(20);
- else
- msleep(20);
-
- /* LP Hold Release */
- temp = REG_READ(mipi_reg);
- temp |= LP_OUTPUT_HOLD_RELEASE;
- REG_WRITE(mipi_reg, temp);
- mdelay(1);
-
-
- /* Set DSI host to exit from Utra Low Power State */
- temp = REG_READ(device_ready_reg);
- temp &= ~ULPS_MASK;
- temp |= 0x3;
- temp |= EXIT_ULPS_DEV_READY;
- REG_WRITE(device_ready_reg, temp);
- mdelay(1);
-
- temp = REG_READ(device_ready_reg);
- temp &= ~ULPS_MASK;
- temp |= EXITING_ULPS;
- REG_WRITE(device_ready_reg, temp);
- mdelay(1);
-
- /*enable the pipe*/
- PSB_WVDC32(pipe->conf, map->conf);
-
- /* restore palette (gamma) */
- /* udelay(50000); */
- for (i = 0; i < 256; i++)
- PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
- return 0;
-}
-
-static int mdfld_save_registers(struct drm_device *dev)
-{
- /* mdfld_save_cursor_overlay_registers(dev); */
- mdfld_save_display_registers(dev, 0);
- mdfld_save_display_registers(dev, 2);
- mdfld_disable_crtc(dev, 0);
- mdfld_disable_crtc(dev, 2);
-
- return 0;
-}
-
-static int mdfld_restore_registers(struct drm_device *dev)
-{
- mdfld_restore_display_registers(dev, 2);
- mdfld_restore_display_registers(dev, 0);
- /* mdfld_restore_cursor_overlay_registers(dev); */
-
- return 0;
-}
-
-static int mdfld_power_down(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-static int mdfld_power_up(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-/* Medfield */
-static const struct psb_offset mdfld_regmap[3] = {
- {
- .fp0 = MRST_FPA0,
- .fp1 = MRST_FPA1,
- .cntr = DSPACNTR,
- .conf = PIPEACONF,
- .src = PIPEASRC,
- .dpll = MRST_DPLL_A,
- .htotal = HTOTAL_A,
- .hblank = HBLANK_A,
- .hsync = HSYNC_A,
- .vtotal = VTOTAL_A,
- .vblank = VBLANK_A,
- .vsync = VSYNC_A,
- .stride = DSPASTRIDE,
- .size = DSPASIZE,
- .pos = DSPAPOS,
- .surf = DSPASURF,
- .addr = MRST_DSPABASE,
- .status = PIPEASTAT,
- .linoff = DSPALINOFF,
- .tileoff = DSPATILEOFF,
- .palette = PALETTE_A,
- },
- {
- .fp0 = MDFLD_DPLL_DIV0,
- .cntr = DSPBCNTR,
- .conf = PIPEBCONF,
- .src = PIPEBSRC,
- .dpll = MDFLD_DPLL_B,
- .htotal = HTOTAL_B,
- .hblank = HBLANK_B,
- .hsync = HSYNC_B,
- .vtotal = VTOTAL_B,
- .vblank = VBLANK_B,
- .vsync = VSYNC_B,
- .stride = DSPBSTRIDE,
- .size = DSPBSIZE,
- .pos = DSPBPOS,
- .surf = DSPBSURF,
- .addr = MRST_DSPBBASE,
- .status = PIPEBSTAT,
- .linoff = DSPBLINOFF,
- .tileoff = DSPBTILEOFF,
- .palette = PALETTE_B,
- },
- {
- .fp0 = MRST_FPA0, /* This is what the old code did ?? */
- .cntr = DSPCCNTR,
- .conf = PIPECCONF,
- .src = PIPECSRC,
- /* No DPLL_C */
- .dpll = MRST_DPLL_A,
- .htotal = HTOTAL_C,
- .hblank = HBLANK_C,
- .hsync = HSYNC_C,
- .vtotal = VTOTAL_C,
- .vblank = VBLANK_C,
- .vsync = VSYNC_C,
- .stride = DSPCSTRIDE,
- .size = DSPBSIZE,
- .pos = DSPCPOS,
- .surf = DSPCSURF,
- .addr = MDFLD_DSPCBASE,
- .status = PIPECSTAT,
- .linoff = DSPCLINOFF,
- .tileoff = DSPCTILEOFF,
- .palette = PALETTE_C,
- },
-};
-
-/*
- * The GPIO lines for resetting DSI pipe 0 and 2 are available in the
- * PCI device 0000:00:0c.0 on the Medfield.
- */
-static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = {
- .table = {
- GPIO_LOOKUP("0000:00:0c.0", 128, "dsi-pipe0-reset",
- GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", 34, "dsi-pipe2-reset",
- GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static int mdfld_chip_setup(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- dev_priv->regmap = mdfld_regmap;
-
- /* Associate the GPIO lines with the DRM device */
- mdfld_dsi_pipe_gpio_table.dev_id = dev_name(dev->dev);
- gpiod_add_lookup_table(&mdfld_dsi_pipe_gpio_table);
-
- return mid_chip_setup(dev);
-}
-
-const struct psb_ops mdfld_chip_ops = {
- .name = "mdfld",
- .pipes = 3,
- .crtcs = 3,
- .lvds_mask = (1 << 1),
- .hdmi_mask = (1 << 1),
- .cursor_needs_phys = 0,
- .sgx_offset = MRST_SGX_OFFSET,
-
- .chip_setup = mdfld_chip_setup,
- .crtc_helper = &mdfld_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
-
- .output_init = mdfld_output_init,
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mdfld_backlight_init,
-#endif
-
- .save_regs = mdfld_save_registers,
- .restore_regs = mdfld_restore_registers,
- .save_crtc = gma_crtc_save,
- .restore_crtc = gma_crtc_restore,
- .power_down = mdfld_power_down,
- .power_up = mdfld_power_up,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
deleted file mode 100644
index ae1223f631a7..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm_simple_kms_helper.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
- int pipe);
-
-static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) &&
- (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
- & DSI_FIFO_GEN_HS_CTRL_FULL)) {
- udelay(100);
- timeout++;
- }
- if (timeout == 20000)
- DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
- DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
-}
-
-static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
-{
- u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
- & DSI_INTR_STATE_SPL_PKG_SENT))) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
-}
-
-/* For TC35876X */
-
-static void dsi_set_device_ready_state(struct drm_device *dev, int state,
- int pipe)
-{
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
-}
-
-static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
- int state, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
-
- u32 dspcntr = dev_priv->dspcntr[pipe];
- u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (pipe) {
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- } else
- mipi &= (~0x03);
-
- if (state) {
- /*Set up pipe */
- REG_WRITE(pipeconf_reg, BIT(31));
-
- if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
- __func__);
-
- /*Set up display plane */
- REG_WRITE(dspcntr_reg, dspcntr);
- } else {
- u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
-
- /* Put DSI lanes to ULPS to disable pipe */
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
- REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
-
- /* LP Hold */
- REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
- REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
-
- /* Disable display plane */
- REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
-
- /* Flush the plane changes ??? posted write? */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
-
- /* Disable PIPE */
- REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
-
- if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
- __func__);
-
- if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
- dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
- __func__);
- }
-}
-
-static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
- int pipe)
-{
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->dpi_panel_on[pipe]) {
- dev_err(dev->dev, "DPI panel is already off\n");
- return;
- }
- tc35876x_toshiba_bridge_panel_off(dev);
- tc35876x_set_bridge_reset_state(dev, 1);
- dsi_set_pipe_plane_enable_state(dev, 0, pipe);
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- dsi_set_device_ready_state(dev, 0, pipe);
-}
-
-static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
- int pipe)
-{
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (dev_priv->dpi_panel_on[pipe]) {
- dev_err(dev->dev, "DPI panel is already on\n");
- return;
- }
-
- /* For resume path sequence */
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- dsi_set_device_ready_state(dev, 0, pipe);
-
- dsi_set_device_ready_state(dev, 1, pipe);
- tc35876x_set_bridge_reset_state(dev, 0);
- tc35876x_configure_lvds_bridge(dev);
- mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */
- dsi_set_pipe_plane_enable_state(dev, 1, pipe);
-}
-/* End for TC35876X */
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_dsi_tpo_ic_init
- *
- * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
-\* ************************************************************************* */
-static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 dcsChannelNumber = dsi_config->channel_num;
- u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
- u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
- u32 gen_ctrl_val = GEN_LONG_WRITE;
-
- DRM_INFO("Enter mrst init TPO MIPI display.\n");
-
- gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
-
- /* Flip page order */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00008036);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* Write protection key */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af1);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xFC */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5afc);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xB7 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x770000b7);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000044);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
-
- /* 0xB6 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000a0ab6);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xF2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x081010f2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x4a070708);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000c5);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xF8 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x024003f8);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x01030a04);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0e020220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000004);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
-
- /* 0xE2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x398fc3e2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0000916f);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
-
- /* 0xB0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000b0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF4 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x240242f4);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x78ee2002);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2a071050);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x507fee10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x10300710);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
-
- /* 0xBA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x19fe07ba);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x101c0a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000010);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xBB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x28ff07bb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x24280a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000034);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xFB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d05fb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1b1a2130);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x221e180e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x131d2120);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d0508);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1a2131);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x231f160d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x111b2220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535c2008);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1f1d2433);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c251a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c34372d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000023);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* 0xFA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x525c0bfa);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1c232f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2623190e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x18212625);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d0d0e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1e1d2333);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x26231a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1a222725);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d280f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x21202635);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31292013);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31393d33);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000029);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* Set DM */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000100f7);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-}
-
-static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
- int num_lane, int bpp)
-{
- return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
-}
-
-/*
- * Calculate the dpi time basing on a given drm mode @mode
- * return 0 on success.
- * FIXME: I was using proposed mode value for calculation, may need to
- * use crtc mode values later
- */
-int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp)
-{
- int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
- int pclk_vsync, pclk_vfp, pclk_vbp;
-
- pclk_hactive = mode->hdisplay;
- pclk_hfp = mode->hsync_start - mode->hdisplay;
- pclk_hsync = mode->hsync_end - mode->hsync_start;
- pclk_hbp = mode->htotal - mode->hsync_end;
-
- pclk_vfp = mode->vsync_start - mode->vdisplay;
- pclk_vsync = mode->vsync_end - mode->vsync_start;
- pclk_vbp = mode->vtotal - mode->vsync_end;
-
- /*
- * byte clock counts were calculated by following formula
- * bclock_count = pclk_count * bpp / num_lane / 8
- */
- dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hsync, num_lane, bpp);
- dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hbp, num_lane, bpp);
- dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hfp, num_lane, bpp);
- dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hactive, num_lane, bpp);
- dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vsync, num_lane, bpp);
- dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vbp, num_lane, bpp);
- dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vfp, num_lane, bpp);
-
- return 0;
-}
-
-void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- int lane_count = dsi_config->lane_count;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode *mode = dsi_config->mode;
- u32 val;
-
- /*un-ready device*/
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
-
- /*init dsi adapter before kicking off*/
- REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
-
- /*enable all interrupts*/
- REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
-
- /*set up func_prg*/
- val = lane_count;
- val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
-
- switch (dsi_config->bpp) {
- case 16:
- val |= DSI_DPI_COLOR_FORMAT_RGB565;
- break;
- case 18:
- val |= DSI_DPI_COLOR_FORMAT_RGB666;
- break;
- case 24:
- val |= DSI_DPI_COLOR_FORMAT_RGB888;
- break;
- default:
- DRM_ERROR("unsupported color format, bpp = %d\n",
- dsi_config->bpp);
- }
- REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
-
- REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
- (mode->vtotal * mode->htotal * dsi_config->bpp /
- (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
- REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
- 0xffff & DSI_LP_RX_TIMEOUT_MASK);
-
- /*max value: 20 clock cycles of txclkesc*/
- REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
- 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
-
- /*min 21 txclkesc, max: ffffh*/
- REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
- 0xffff & DSI_RESET_TIMER_MASK);
-
- REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
- mode->vdisplay << 16 | mode->hdisplay);
-
- /*set DPI timing registers*/
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
- dsi_config->lane_count, dsi_config->bpp);
-
- REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
- dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
- dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
- dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
- dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
- dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
- dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
- dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-
- REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
-
- /*min: 7d0 max: 4e20*/
- REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
-
- /*set up video mode*/
- val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
- REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
-
- REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
-
- REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
-
- /*TODO: figure out how to setup these registers*/
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
- else
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
-
- REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
-
- /*set device ready*/
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
-}
-
-void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
-{
- struct drm_device *dev = output->dev;
-
- /* clear special packet sent bit */
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- /*send turn on package*/
- REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
-
- /*wait for SPL_PKG_SENT interrupt*/
- mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
-
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- output->panel_on = 1;
-
- /* FIXME the following is disabled to WA the X slow start issue
- for TMD panel
- if (pipe == 2)
- dev_priv->dpi_panel_on2 = true;
- else if (pipe == 0)
- dev_priv->dpi_panel_on = true; */
-}
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
- int pipe)
-{
- struct drm_device *dev = output->dev;
-
- /*if output is on, or mode setting didn't happen, ignore this*/
- if ((!output->panel_on) || output->first_boot) {
- output->first_boot = 0;
- return;
- }
-
- /* Wait for dpi fifo to empty */
- mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
-
- /* Clear the special packet interrupt bit if set */
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
- goto shutdown_out;
-
- REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
-
-shutdown_out:
- output->panel_on = 0;
- output->first_boot = 0;
-
- /* FIXME the following is disabled to WA the X slow start issue
- for TMD panel
- if (pipe == 2)
- dev_priv->dpi_panel_on2 = false;
- else if (pipe == 0)
- dev_priv->dpi_panel_on = false; */
-}
-
-static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /*start up display island if it was shutdown*/
- if (!gma_power_begin(dev, true))
- return;
-
- if (on) {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mdfld_dsi_configure_up(dsi_encoder, pipe);
- else {
- /*enable mipi port*/
- REG_WRITE(MIPI_PORT_CONTROL(pipe),
- REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- }
- dev_priv->dpi_panel_on[pipe] = true;
- } else {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mdfld_dsi_configure_down(dsi_encoder, pipe);
- else {
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-
- /*disable mipi port*/
- REG_WRITE(MIPI_PORT_CONTROL(pipe),
- REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
- REG_READ(MIPI_PORT_CONTROL(pipe));
- }
- dev_priv->dpi_panel_on[pipe] = false;
- }
- gma_power_end(dev);
-}
-
-void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
-{
- mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
-}
-
-bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
- if (fixed_mode) {
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
- return true;
-}
-
-void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, false);
-}
-
-void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, true);
-}
-
-/* For TC35876X */
-/* This functionality was implemented in FW in iCDK */
-/* But removed in DV0 and later. So need to add here. */
-static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
-
- REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
- REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
- REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
- REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
- REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
- REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
- REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
- REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
- REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
- REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
- REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
- REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-}
-
-static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode *mode = dsi_config->mode;
-
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
- dsi_config->lane_count,
- dsi_config->bpp);
-
- REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
- mode->vdisplay << 16 | mode->hdisplay);
- REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
- dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
- dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
- dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
- dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
- dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
- dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
- dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-}
-
-static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- int lane_count = dsi_config->lane_count;
-
- if (pipe) {
- REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
- REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
- } else {
- REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
- REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
- }
-
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
- REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
-
- /* lane_count = 3 */
- REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
-
- mdfld_mipi_set_video_timing(dsi_config, pipe);
-}
-
-static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- struct drm_display_mode *mode = dsi_config->mode;
-
- REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(HSYNC_A,
- ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
-
- REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(VSYNC_A,
- ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
-
- REG_WRITE(PIPEASRC,
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-}
-/* End for TC35876X */
-
-void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf, dspcntr;
-
- u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (WARN_ON(pipe < 0))
- return;
-
- pipeconf = dev_priv->pipeconf[pipe];
- dspcntr = dev_priv->dspcntr[pipe];
-
- if (pipe) {
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- } else {
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mipi &= (~0x03); /* Use all four lanes */
- else
- mipi |= 2;
- }
-
- /*start up display island if it was shutdown*/
- if (!gma_power_begin(dev, true))
- return;
-
- if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- /*
- * The following logic is required to reset the bridge and
- * configure. This also starts the DSI clock at 200MHz.
- */
- tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
- tc35876x_toshiba_bridge_panel_on(dev);
- udelay(100);
- /* Now start the DSI clock */
- REG_WRITE(MRST_DPLL_A, 0x00);
- REG_WRITE(MRST_FPA0, 0xC1);
- REG_WRITE(MRST_DPLL_A, 0x00800000);
- udelay(500);
- REG_WRITE(MRST_DPLL_A, 0x80800000);
-
- if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
- dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
- __func__);
-
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
-
- mipi_set_properties(dsi_config, pipe);
- mdfld_mipi_config(dsi_config, pipe);
- mdfld_set_pipe_timing(dsi_config, pipe);
-
- REG_WRITE(DSPABASE, 0x00);
- REG_WRITE(DSPASIZE,
- ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-
- REG_WRITE(DSPACNTR, 0x98000000);
- REG_WRITE(DSPASURF, 0x00);
-
- REG_WRITE(VGACNTRL, 0x80000000);
- REG_WRITE(DEVICE_READY_REG, 0x00000001);
-
- REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
- } else {
- /*set up mipi port FIXME: do at init time */
- REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
- }
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- /* NOP */
- } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- /* set up DSI controller DPI interface */
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-
- /* Configure MIPI Bridge and Panel */
- tc35876x_configure_lvds_bridge(dev);
- dev_priv->dpi_panel_on[pipe] = true;
- } else {
- /*turn on DPI interface*/
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- }
-
- /*set up pipe*/
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
-
- /*set up display plane*/
- REG_WRITE(dspcntr_reg, dspcntr);
- REG_READ(dspcntr_reg);
-
- msleep(20); /* FIXME: this should wait for vblank */
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- /* NOP */
- } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- } else {
- /* init driver ic */
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- /*init backlight*/
- mdfld_dsi_brightness_init(dsi_config, pipe);
- }
-
- gma_power_end(dev);
-}
-
-/*
- * Init DSI DPI encoder.
- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
- * return pointer of newly allocated DPI encoder, NULL on error
- */
-struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- const struct panel_funcs *p_funcs)
-{
- struct mdfld_dsi_dpi_output *dpi_output = NULL;
- struct mdfld_dsi_config *dsi_config;
- struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- int pipe;
- u32 data;
- int ret;
-
- pipe = dsi_connector->pipe;
-
- if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
- dsi_config = mdfld_dsi_get_config(dsi_connector);
-
- /* panel hard-reset */
- if (p_funcs->reset) {
- ret = p_funcs->reset(dev, pipe);
- if (ret) {
- DRM_ERROR("Panel %d hard-reset failed\n", pipe);
- return NULL;
- }
- }
-
- /* panel drvIC init */
- if (p_funcs->drv_ic_init)
- p_funcs->drv_ic_init(dsi_config, pipe);
-
- /* panel power mode detect */
- ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
- if (ret) {
- DRM_ERROR("Panel %d get power mode failed\n", pipe);
- dsi_connector->status = connector_status_disconnected;
- } else {
- DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
- dsi_connector->status = connector_status_connected;
- }
- }
-
- dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
- if (!dpi_output) {
- DRM_ERROR("No memory\n");
- return NULL;
- }
-
- dpi_output->panel_on = 0;
- dpi_output->dev = dev;
- if (mdfld_get_panel_type(dev, pipe) != TC35876X)
- dpi_output->p_funcs = p_funcs;
- dpi_output->first_boot = 1;
-
- /*get fixed mode*/
- dsi_config = mdfld_dsi_get_config(dsi_connector);
-
- /*create drm encoder object*/
- connector = &dsi_connector->base.base;
- encoder = &dpi_output->base.base.base;
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
- drm_encoder_helper_add(encoder,
- p_funcs->encoder_helper_funcs);
-
- /*attach to given connector*/
- drm_connector_attach_encoder(connector, encoder);
-
- /*set possible crtcs and clones*/
- if (dsi_connector->pipe) {
- encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = 0;
- } else {
- encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = 0;
- }
-
- dsi_connector->base.encoder = &dpi_output->base.base;
-
- return &dpi_output->base;
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
deleted file mode 100644
index 2b40663e1696..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DPI_H__
-#define __MDFLD_DSI_DPI_H__
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-struct mdfld_dsi_dpi_timing {
- u16 hsync_count;
- u16 hbp_count;
- u16 hfp_count;
- u16 hactive_count;
- u16 vsync_count;
- u16 vbp_count;
- u16 vfp_count;
-};
-
-struct mdfld_dsi_dpi_output {
- struct mdfld_dsi_encoder base;
- struct drm_device *dev;
-
- int panel_on;
- int first_boot;
-
- const struct panel_funcs *p_funcs;
-};
-
-#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
- container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
-
-/* Export functions */
-extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp);
-extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- const struct panel_funcs *p_funcs);
-
-/* MDFLD DPI helper functions */
-extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
-extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
- int pipe);
-extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
deleted file mode 100644
index 4aab76613bd9..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/pm_runtime.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-/* get the LABC from command line. */
-static int LABC_control = 1;
-
-#ifdef MODULE
-module_param(LABC_control, int, 0644);
-#else
-
-static int __init parse_LABC_control(char *arg)
-{
- /* LABC control can be passed in as a cmdline parameter */
- /* to enable this feature add LABC=1 to cmdline */
- /* to disable this feature add LABC=0 to cmdline */
- if (!arg)
- return -EINVAL;
-
- if (!strcasecmp(arg, "0"))
- LABC_control = 0;
- else if (!strcasecmp(arg, "1"))
- LABC_control = 1;
-
- return 0;
-}
-early_param("LABC", parse_LABC_control);
-#endif
-
-/**
- * Check and see if the generic control or data buffer is empty and ready.
- */
-void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
- u32 fifo_stat)
-{
- u32 GEN_BF_time_out_count;
-
- /* Check MIPI Adatper command registers */
- for (GEN_BF_time_out_count = 0;
- GEN_BF_time_out_count < GEN_FB_TIME_OUT;
- GEN_BF_time_out_count++) {
- if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
- break;
- udelay(100);
- }
-
- if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
- DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
- gen_fifo_stat_reg);
-}
-
-/**
- * Manage the DSI MIPI keyboard and display brightness.
- * FIXME: this is exported to OSPM code. should work out an specific
- * display interface to OSPM.
- */
-
-void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_get_pkg_sender(dsi_config);
- struct drm_device *dev;
- struct drm_psb_private *dev_priv;
- u32 gen_ctrl_val;
-
- if (!sender) {
- DRM_ERROR("No sender found\n");
- return;
- }
-
- dev = sender->dev;
- dev_priv = dev->dev_private;
-
- /* Set default display backlight value to 85% (0xd8)*/
- mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
- true);
-
- /* Set minimum brightness setting of CABC function to 20% (0x33)*/
- mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
-
- /* Enable backlight or/and LABC */
- gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
- BACKLIGHT_ON;
- if (LABC_control == 1)
- gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
- | GAMMA_AUTO;
-
- if (LABC_control == 1)
- gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
-
- dev_priv->mipi_ctrl_display = gen_ctrl_val;
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
- 1, true);
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
-}
-
-void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
-{
- struct mdfld_dsi_pkg_sender *sender;
- struct drm_psb_private *dev_priv;
- struct mdfld_dsi_config *dsi_config;
- u32 gen_ctrl_val = 0;
- int p_type = TMD_VID;
-
- if (!dev || (pipe != 0 && pipe != 2)) {
- DRM_ERROR("Invalid parameter\n");
- return;
- }
-
- p_type = mdfld_get_panel_type(dev, 0);
-
- dev_priv = dev->dev_private;
-
- if (pipe)
- dsi_config = dev_priv->dsi_configs[1];
- else
- dsi_config = dev_priv->dsi_configs[0];
-
- sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if (!sender) {
- DRM_ERROR("No sender found\n");
- return;
- }
-
- gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
-
- dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
- pipe, gen_ctrl_val);
-
- if (p_type == TMD_VID) {
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
- (u8)gen_ctrl_val, 1, true);
- } else {
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short(sender, write_display_brightness,
- (u8)gen_ctrl_val, 1, true);
-
- /* Enable backlight control */
- if (level == 0)
- gen_ctrl_val = 0;
- else
- gen_ctrl_val = dev_priv->mipi_ctrl_display;
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
- (u8)gen_ctrl_val, 1, true);
- }
-}
-
-static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
- u8 dcs, u32 *data, bool hs)
-{
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if (!sender || !data) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
-}
-
-int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
- bool hs)
-{
- if (!dsi_config || !mode) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
-}
-
-/*
- * NOTE: this function was used by OSPM.
- * TODO: will be removed later, should work out display interfaces for OSPM
- */
-void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
- DRM_ERROR("Invalid parameters\n");
- return;
- }
-
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-}
-
-static void mdfld_dsi_connector_save(struct drm_connector *connector)
-{
-}
-
-static void mdfld_dsi_connector_restore(struct drm_connector *connector)
-{
-}
-
-/* FIXME: start using the force parameter */
-static enum drm_connector_status
-mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct mdfld_dsi_connector *dsi_connector
- = mdfld_dsi_connector(connector);
-
- dsi_connector->status = connector_status_connected;
-
- return dsi_connector->status;
-}
-
-static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t value)
-{
- struct drm_encoder *encoder = connector->encoder;
-
- if (!strcmp(property->name, "scaling mode") && encoder) {
- struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
- bool centerechange;
- uint64_t val;
-
- if (!gma_crtc)
- goto set_prop_error;
-
- switch (value) {
- case DRM_MODE_SCALE_FULLSCREEN:
- break;
- case DRM_MODE_SCALE_NO_SCALE:
- break;
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- goto set_prop_error;
- }
-
- if (drm_object_property_get_value(&connector->base, property, &val))
- goto set_prop_error;
-
- if (val == value)
- goto set_prop_done;
-
- if (drm_object_property_set_value(&connector->base,
- property, value))
- goto set_prop_error;
-
- centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
- (value == DRM_MODE_SCALE_NO_SCALE);
-
- if (gma_crtc->saved_mode.hdisplay != 0 &&
- gma_crtc->saved_mode.vdisplay != 0) {
- if (centerechange) {
- if (!drm_crtc_helper_set_mode(encoder->crtc,
- &gma_crtc->saved_mode,
- encoder->crtc->x,
- encoder->crtc->y,
- encoder->crtc->primary->fb))
- goto set_prop_error;
- } else {
- const struct drm_encoder_helper_funcs *funcs =
- encoder->helper_private;
- funcs->mode_set(encoder,
- &gma_crtc->saved_mode,
- &gma_crtc->saved_adjusted_mode);
- }
- }
- } else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_object_property_set_value(&connector->base, property,
- value))
- goto set_prop_error;
- else
- gma_backlight_set(encoder->dev, value);
- }
-set_prop_done:
- return 0;
-set_prop_error:
- return -1;
-}
-
-static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_pkg_sender *sender;
-
- if (!dsi_connector)
- return;
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- sender = dsi_connector->pkg_sender;
- mdfld_dsi_pkg_sender_destroy(sender);
- kfree(dsi_connector);
-}
-
-static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
- struct drm_display_mode *dup_mode = NULL;
- struct drm_device *dev = connector->dev;
-
- if (fixed_mode) {
- dev_dbg(dev->dev, "fixed_mode %dx%d\n",
- fixed_mode->hdisplay, fixed_mode->vdisplay);
- dup_mode = drm_mode_duplicate(dev, fixed_mode);
- drm_mode_probed_add(connector, dup_mode);
- return 1;
- }
- DRM_ERROR("Didn't get any modes!\n");
- return 0;
-}
-
-static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return MODE_NO_INTERLACE;
-
- /**
- * FIXME: current DC has no fitting unit, reject any mode setting
- * request
- * Will figure out a way to do up-scaling(panel fitting) later.
- **/
- if (fixed_mode) {
- if (mode->hdisplay != fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if (mode->vdisplay != fixed_mode->vdisplay)
- return MODE_PANEL;
- }
-
- return MODE_OK;
-}
-
-static struct drm_encoder *mdfld_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- return &dsi_config->encoder->base.base;
-}
-
-/*DSI connector funcs*/
-static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .detect = mdfld_dsi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = mdfld_dsi_connector_set_property,
- .destroy = mdfld_dsi_connector_destroy,
-};
-
-/*DSI connector helper funcs*/
-static const struct drm_connector_helper_funcs
- mdfld_dsi_connector_helper_funcs = {
- .get_modes = mdfld_dsi_connector_get_modes,
- .mode_valid = mdfld_dsi_connector_mode_valid,
- .best_encoder = mdfld_dsi_connector_best_encoder,
-};
-
-static int mdfld_dsi_get_default_config(struct drm_device *dev,
- struct mdfld_dsi_config *config, int pipe)
-{
- if (!dev || !config) {
- DRM_ERROR("Invalid parameters");
- return -EINVAL;
- }
-
- config->bpp = 24;
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- config->lane_count = 4;
- else
- config->lane_count = 2;
- config->channel_num = 0;
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- config->video_mode =
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
- else
- config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
-
- return 0;
-}
-
-int mdfld_dsi_panel_reset(struct drm_device *ddev, int pipe)
-{
- struct device *dev = ddev->dev;
- struct gpio_desc *gpiod;
-
- /*
- * Raise the GPIO reset line for the corresponding pipe to HIGH,
- * this is probably because it is active low so this takes the
- * respective pipe out of reset. (We have no code to put it back
- * into reset in this driver.)
- */
- switch (pipe) {
- case 0:
- gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- break;
- case 2:
- gpiod = gpiod_get(dev, "dsi-pipe2-reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- break;
- default:
- DRM_DEV_ERROR(dev, "Invalid output pipe\n");
- return -EINVAL;
- }
- gpiod_put(gpiod);
-
- /* Flush posted writes on the device */
- gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- gpiod_get_value(gpiod);
- gpiod_put(gpiod);
-
- return 0;
-}
-
-/*
- * MIPI output init
- * @dev drm device
- * @pipe pipe number. 0 or 2
- * @config
- *
- * Do the initialization of a MIPI output, including create DRM mode objects
- * initialization of DSI output on @pipe
- */
-void mdfld_dsi_output_init(struct drm_device *dev,
- int pipe,
- const struct panel_funcs *p_vid_funcs)
-{
- struct mdfld_dsi_config *dsi_config;
- struct mdfld_dsi_connector *dsi_connector;
- struct drm_connector *connector;
- struct mdfld_dsi_encoder *encoder;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct panel_info dsi_panel_info;
- u32 width_mm, height_mm;
-
- dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
-
- if (pipe != 0 && pipe != 2) {
- DRM_ERROR("Invalid parameter\n");
- return;
- }
-
- /*create a new connector*/
- dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
- if (!dsi_connector) {
- DRM_ERROR("No memory");
- return;
- }
-
- dsi_connector->pipe = pipe;
-
- dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
- GFP_KERNEL);
- if (!dsi_config) {
- DRM_ERROR("cannot allocate memory for DSI config\n");
- goto dsi_init_err0;
- }
- mdfld_dsi_get_default_config(dev, dsi_config, pipe);
-
- dsi_connector->private = dsi_config;
-
- dsi_config->changed = 1;
- dsi_config->dev = dev;
-
- dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
- if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
- goto dsi_init_err0;
-
- width_mm = dsi_panel_info.width_mm;
- height_mm = dsi_panel_info.height_mm;
-
- dsi_config->mode = dsi_config->fixed_mode;
- dsi_config->connector = dsi_connector;
-
- if (!dsi_config->fixed_mode) {
- DRM_ERROR("No panel fixed mode was found\n");
- goto dsi_init_err0;
- }
-
- if (pipe && dev_priv->dsi_configs[0]) {
- dsi_config->dvr_ic_inited = 0;
- dev_priv->dsi_configs[1] = dsi_config;
- } else if (pipe == 0) {
- dsi_config->dvr_ic_inited = 1;
- dev_priv->dsi_configs[0] = dsi_config;
- } else {
- DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
- goto dsi_init_err0;
- }
-
-
- connector = &dsi_connector->base.base;
- dsi_connector->base.save = mdfld_dsi_connector_save;
- dsi_connector->base.restore = mdfld_dsi_connector_restore;
-
- drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
-
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->display_info.width_mm = width_mm;
- connector->display_info.height_mm = height_mm;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- /*attach properties*/
- drm_object_attach_property(&connector->base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
- dev_priv->backlight_property,
- MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
- /*init DSI package sender on this output*/
- if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
- DRM_ERROR("Package Sender initialization failed on pipe %d\n",
- pipe);
- goto dsi_init_err0;
- }
-
- encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
- if (!encoder) {
- DRM_ERROR("Create DPI encoder failed\n");
- goto dsi_init_err1;
- }
- encoder->private = dsi_config;
- dsi_config->encoder = encoder;
- encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
- INTEL_OUTPUT_MIPI2;
- drm_connector_register(connector);
- return;
-
- /*TODO: add code to destroy outputs on error*/
-dsi_init_err1:
- /*destroy sender*/
- mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
-
- drm_connector_cleanup(connector);
-
- kfree(dsi_config->fixed_mode);
- kfree(dsi_config);
-dsi_init_err0:
- kfree(dsi_connector);
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
deleted file mode 100644
index 5c0db3c2903f..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_OUTPUT_H__
-#define __MDFLD_DSI_OUTPUT_H__
-
-#include <linux/backlight.h>
-
-#include <asm/intel-mid.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-
-#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
-#define FLD_MOD(orig, val, start, end) \
- (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-
-#define REG_FLD_MOD(reg, val, start, end) \
- REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
-
-static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
- u32 val, int start, int end)
-{
- int t = 100000;
-
- while (FLD_GET(REG_READ(reg), start, end) != val) {
- if (--t == 0)
- return 1;
- }
-
- return 0;
-}
-
-#define REG_FLD_WAIT(reg, val, start, end) \
- REGISTER_FLD_WAIT(dev, reg, val, start, end)
-
-#define REG_BIT_WAIT(reg, val, bitnum) \
- REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
-
-#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
-
-#ifdef DEBUG
-#define CHECK_PIPE(pipe) ({ \
- const typeof(pipe) __pipe = (pipe); \
- BUG_ON(__pipe != 0 && __pipe != 2); \
- __pipe; })
-#else
-#define CHECK_PIPE(pipe) (pipe)
-#endif
-
-/*
- * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
- */
-#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
-
-/* mdfld DSI controller registers */
-#define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe))
-#define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe))
-#define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe))
-#define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe))
-#define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe))
-#define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe))
-#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe))
-#define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe))
-#define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe))
-#define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe))
-#define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe))
-#define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe))
-#define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe))
-#define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe))
-#define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe))
-#define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe))
-#define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe))
-#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe))
-#define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe))
-#define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe))
-#define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe))
-#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe))
-#define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe))
-#define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe))
-#define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe))
-#define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe))
-#define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe))
-#define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe))
-#define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe))
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe))
-
-#define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe))
-#define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe))
-#define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe))
-#define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe))
-#define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe))
-
-/* non-uniform reg offset */
-#define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI)
-
-#define DSI_DEVICE_READY (0x1)
-#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
-#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
-#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
-
-
-#define DSI_ONE_DATA_LANE (0x1)
-#define DSI_TWO_DATA_LANE (0x2)
-#define DSI_THREE_DATA_LANE (0X3)
-#define DSI_FOUR_DATA_LANE (0x4)
-#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
-#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
-#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
-#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
-
-#define DSI_INTR_STATE_RXSOTERROR BIT(0)
-
-#define DSI_INTR_STATE_SPL_PKG_SENT BIT(30)
-#define DSI_INTR_STATE_TE BIT(31)
-
-#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
-
-#define DSI_RESET_TIMER_MASK (0xffff)
-
-#define DSI_DBI_FIFO_WM_HALF (0x0)
-#define DSI_DBI_FIFO_WM_QUARTER (0x1)
-#define DSI_DBI_FIFO_WM_LOW (0x2)
-
-#define DSI_DPI_TIMING_MASK (0xffff)
-
-#define DSI_INIT_TIMER_MASK (0xffff)
-
-#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
-
-#define DSI_LP_BYTECLK_MASK (0x0ffff)
-
-#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
-#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
-#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
-#define DSI_HS_CTRL_GEN_R0 (0x04)
-#define DSI_HS_CTRL_GEN_R1 (0x14)
-#define DSI_HS_CTRL_GEN_R2 (0x24)
-#define DSI_HS_CTRL_GEN_LONG_W (0x29)
-#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
-#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
-#define DSI_HS_CTRL_MCS_R0 (0x06)
-#define DSI_HS_CTRL_MCS_LONG_W (0x39)
-#define DSI_HS_CTRL_VC_OFFSET (0x06)
-#define DSI_HS_CTRL_WC_OFFSET (0x08)
-
-#define DSI_FIFO_GEN_HS_DATA_FULL BIT(0)
-#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1)
-#define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2)
-#define DSI_FIFO_GEN_LP_DATA_FULL BIT(8)
-#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9)
-#define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10)
-#define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16)
-#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17)
-#define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18)
-#define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24)
-#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25)
-#define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26)
-#define DSI_FIFO_DBI_EMPTY BIT(27)
-#define DSI_FIFO_DPI_EMPTY BIT(28)
-
-#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
-
-#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
-#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
-
-#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
-#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
-
-/*dsi power modes*/
-#define DSI_POWER_MODE_DISPLAY_ON BIT(2)
-#define DSI_POWER_MODE_NORMAL_ON BIT(3)
-#define DSI_POWER_MODE_SLEEP_OUT BIT(4)
-#define DSI_POWER_MODE_PARTIAL_ON BIT(5)
-#define DSI_POWER_MODE_IDLE_ON BIT(6)
-
-enum {
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
- MDFLD_DSI_VIDEO_BURST_MODE = 3,
-};
-
-#define DSI_DPI_COMPLETE_LAST_LINE BIT(2)
-#define DSI_DPI_DISABLE_BTA BIT(3)
-
-struct mdfld_dsi_connector {
- struct gma_connector base;
-
- int pipe;
- void *private;
- void *pkg_sender;
-
- /* Connection status */
- enum drm_connector_status status;
-};
-
-struct mdfld_dsi_encoder {
- struct gma_encoder base;
- void *private;
-};
-
-/*
- * DSI config, consists of one DSI connector, two DSI encoders.
- * DRM will pick up on DSI encoder basing on differents configs.
- */
-struct mdfld_dsi_config {
- struct drm_device *dev;
- struct drm_display_mode *fixed_mode;
- struct drm_display_mode *mode;
-
- struct mdfld_dsi_connector *connector;
- struct mdfld_dsi_encoder *encoder;
-
- int changed;
-
- int bpp;
- int lane_count;
- /*Virtual channel number for this encoder*/
- int channel_num;
- /*video mode configure*/
- int video_mode;
-
- int dvr_ic_inited;
-};
-
-static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
- struct drm_connector *connector)
-{
- struct gma_connector *gma_connector;
-
- gma_connector = to_gma_connector(connector);
-
- return container_of(gma_connector, struct mdfld_dsi_connector, base);
-}
-
-static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
- struct drm_encoder *encoder)
-{
- struct gma_encoder *gma_encoder;
-
- gma_encoder = to_gma_encoder(encoder);
-
- return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
-}
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
-{
- if (!connector)
- return NULL;
- return (struct mdfld_dsi_config *)connector->private;
-}
-
-static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
-{
- struct mdfld_dsi_connector *dsi_connector;
-
- if (!config)
- return NULL;
-
- dsi_connector = config->connector;
-
- if (!dsi_connector)
- return NULL;
-
- return dsi_connector->pkg_sender;
-}
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
-{
- if (!encoder)
- return NULL;
- return (struct mdfld_dsi_config *)encoder->private;
-}
-
-static inline struct mdfld_dsi_connector *
- mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *config;
-
- if (!encoder)
- return NULL;
-
- config = mdfld_dsi_encoder_get_config(encoder);
- if (!config)
- return NULL;
-
- return config->connector;
-}
-
-static inline void *mdfld_dsi_encoder_get_pkg_sender(
- struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *dsi_config;
-
- dsi_config = mdfld_dsi_encoder_get_config(encoder);
- if (!dsi_config)
- return NULL;
-
- return mdfld_dsi_get_pkg_sender(dsi_config);
-}
-
-static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_connector *connector;
-
- if (!encoder)
- return -1;
-
- connector = mdfld_dsi_encoder_get_connector(encoder);
- if (!connector)
- return -1;
- return connector->pipe;
-}
-
-/* Export functions */
-extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
- u32 gen_fifo_stat_reg, u32 fifo_stat);
-extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
- int level);
-extern void mdfld_dsi_output_init(struct drm_device *dev,
- int pipe,
- const struct panel_funcs *p_vid_funcs);
-extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-
-extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
- u32 *mode, bool hs);
-extern int mdfld_dsi_panel_reset(struct drm_device *dev, int pipe);
-
-#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
deleted file mode 100644
index 6e0de83e9f7d..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/freezer.h>
-
-#include <video/mipi_display.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#define MDFLD_DSI_READ_MAX_COUNT 5000
-
-enum {
- MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
-};
-
-enum {
- MDFLD_DSI_PKG_SENDER_FREE = 0x0,
- MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
-};
-
-static const char *const dsi_errors[] = {
- "RX SOT Error",
- "RX SOT Sync Error",
- "RX EOT Sync Error",
- "RX Escape Mode Entry Error",
- "RX LP TX Sync Error",
- "RX HS Receive Timeout Error",
- "RX False Control Error",
- "RX ECC Single Bit Error",
- "RX ECC Multibit Error",
- "RX Checksum Error",
- "RX DSI Data Type Not Recognised",
- "RX DSI VC ID Invalid",
- "TX False Control Error",
- "TX ECC Single Bit Error",
- "TX ECC Multibit Error",
- "TX Checksum Error",
- "TX DSI Data Type Not Recognised",
- "TX DSI VC ID invalid",
- "High Contention",
- "Low contention",
- "DPI FIFO Under run",
- "HS TX Timeout",
- "LP RX Timeout",
- "Turn Around ACK Timeout",
- "ACK With No Error",
- "RX Invalid TX Length",
- "RX Prot Violation",
- "HS Generic Write FIFO Full",
- "LP Generic Write FIFO Full",
- "Generic Read Data Avail",
- "Special Packet Sent",
- "Tearing Effect",
-};
-
-static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
- u32 mask)
-{
- struct drm_device *dev = sender->dev;
- u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
- int retry = 0xffff;
-
- while (retry--) {
- if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
- return 0;
- udelay(100);
- }
- DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
- return -EIO;
-}
-
-static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
- BIT(26) | BIT(27) | BIT(28)));
-}
-
-static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
-}
-
-static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
-}
-
-static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
-{
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- struct drm_device *dev = sender->dev;
-
- dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
-
- switch (mask) {
- case BIT(0):
- case BIT(1):
- case BIT(2):
- case BIT(3):
- case BIT(4):
- case BIT(5):
- case BIT(6):
- case BIT(7):
- case BIT(8):
- case BIT(9):
- case BIT(10):
- case BIT(11):
- case BIT(12):
- case BIT(13):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(14):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender)*/
- break;
- case BIT(15):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(16):
- break;
- case BIT(17):
- break;
- case BIT(18):
- case BIT(19):
- dev_dbg(sender->dev->dev, "High/Low contention detected\n");
- /*wait for contention recovery time*/
- /*mdelay(10);*/
- /*wait for all fifo empty*/
- if (0)
- wait_for_all_fifos_empty(sender);
- break;
- case BIT(20):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(21):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender);*/
- break;
- case BIT(22):
- break;
- case BIT(23):
- case BIT(24):
- case BIT(25):
- case BIT(26):
- case BIT(27):
- dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
- REG_WRITE(intr_stat_reg, mask);
- wait_for_hs_fifos_empty(sender);
- break;
- case BIT(28):
- dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
- REG_WRITE(intr_stat_reg, mask);
- wait_for_lp_fifos_empty(sender);
- break;
- case BIT(29):
- case BIT(30):
- case BIT(31):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- }
-
- if (mask & REG_READ(intr_stat_reg))
- dev_dbg(sender->dev->dev,
- "Cannot clean interrupt 0x%08x\n", mask);
- return 0;
-}
-
-static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
-{
- struct drm_device *dev = sender->dev;
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- u32 mask;
- u32 intr_stat;
- int i;
- int err = 0;
-
- intr_stat = REG_READ(intr_stat_reg);
-
- for (i = 0; i < 32; i++) {
- mask = (0x00000001UL) << i;
- if (intr_stat & mask) {
- dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
- err = handle_dsi_error(sender, mask);
- if (err)
- DRM_ERROR("Cannot handle error\n");
- }
- }
- return err;
-}
-
-static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 cmd, u8 param, bool hs)
-{
- struct drm_device *dev = sender->dev;
- u32 ctrl_reg;
- u32 val;
- u8 virtual_channel = 0;
-
- if (hs) {
- ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
-
- /* FIXME: wait_for_hs_fifos_empty(sender); */
- } else {
- ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
-
- /* FIXME: wait_for_lp_fifos_empty(sender); */
- }
-
- val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
- FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
-
- REG_WRITE(ctrl_reg, val);
-
- return 0;
-}
-
-static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, int len, bool hs)
-{
- struct drm_device *dev = sender->dev;
- u32 ctrl_reg;
- u32 data_reg;
- u32 val;
- u8 *p;
- u8 b1, b2, b3, b4;
- u8 virtual_channel = 0;
- int i;
-
- if (hs) {
- ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
- data_reg = sender->mipi_hs_gen_data_reg;
-
- /* FIXME: wait_for_hs_fifos_empty(sender); */
- } else {
- ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
- data_reg = sender->mipi_lp_gen_data_reg;
-
- /* FIXME: wait_for_lp_fifos_empty(sender); */
- }
-
- p = data;
- for (i = 0; i < len / 4; i++) {
- b1 = *p++;
- b2 = *p++;
- b3 = *p++;
- b4 = *p++;
-
- REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
- }
-
- i = len % 4;
- if (i) {
- b1 = 0; b2 = 0; b3 = 0;
-
- switch (i) {
- case 3:
- b1 = *p++;
- b2 = *p++;
- b3 = *p++;
- break;
- case 2:
- b1 = *p++;
- b2 = *p++;
- break;
- case 1:
- b1 = *p++;
- break;
- }
-
- REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
- }
-
- val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
- FLD_VAL(data_type, 5, 0);
-
- REG_WRITE(ctrl_reg, val);
-
- return 0;
-}
-
-static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len)
-{
- u8 cmd;
-
- switch (data_type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_LONG_WRITE:
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /*this prevents other package sending while doing msleep*/
- sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
-
- /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
- if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
- /*TODO: replace it with msleep later*/
- mdelay(120);
- }
-
- if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
- /*TODO: replace it with msleep later*/
- mdelay(120);
- }
- return 0;
-}
-
-static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len)
-{
- u8 cmd;
-
- switch (data_type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_LONG_WRITE:
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /*update panel status*/
- if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
- sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
- sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
- /*TODO: replace it with msleep later*/
- mdelay(5);
- }
-
- sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
- return 0;
-}
-
-static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len, bool hs)
-{
- int ret;
-
- /*handle DSI error*/
- ret = dsi_error_handler(sender);
- if (ret) {
- DRM_ERROR("Error handling failed\n");
- return -EAGAIN;
- }
-
- /* send pkg */
- if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
- DRM_ERROR("sender is busy\n");
- return -EAGAIN;
- }
-
- ret = send_pkg_prepare(sender, data_type, data, len);
- if (ret) {
- DRM_ERROR("send_pkg_prepare error\n");
- return ret;
- }
-
- switch (data_type) {
- case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_READ:
- ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
- break;
- case MIPI_DSI_GENERIC_LONG_WRITE:
- case MIPI_DSI_DCS_LONG_WRITE:
- ret = send_long_pkg(sender, data_type, data, len, hs);
- break;
- }
-
- send_pkg_done(sender, data_type, data, len);
-
- /*FIXME: should I query complete and fifo empty here?*/
-
- return ret;
-}
-
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs)
-{
- unsigned long flags;
-
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u8 param, u8 param_num, bool hs)
-{
- u8 data[2];
- unsigned long flags;
- u8 data_type;
-
- if (!sender) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- data[0] = cmd;
-
- if (param_num) {
- data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
- data[1] = param;
- } else {
- data_type = MIPI_DSI_DCS_SHORT_WRITE;
- data[1] = 0;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, data_type, data, sizeof(data), hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
- u8 param1, u8 param_num, bool hs)
-{
- u8 data[2];
- unsigned long flags;
- u8 data_type;
-
- if (!sender || param_num > 2) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- switch (param_num) {
- case 0:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
- data[0] = 0;
- data[1] = 0;
- break;
- case 1:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
- data[0] = param0;
- data[1] = 0;
- break;
- case 2:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
- data[0] = param0;
- data[1] = param1;
- break;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, data_type, data, sizeof(data), hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs)
-{
- unsigned long flags;
-
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
-{
- unsigned long flags;
- struct drm_device *dev;
- int i;
- u32 gen_data_reg;
- int retry = MDFLD_DSI_READ_MAX_COUNT;
-
- if (!sender || !data_out || !len_out) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- dev = sender->dev;
-
- /**
- * do reading.
- * 0) send out generic read request
- * 1) polling read data avail interrupt
- * 2) read data
- */
- spin_lock_irqsave(&sender->lock, flags);
-
- REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
- if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
- DRM_ERROR("Can NOT clean read data valid interrupt\n");
-
- /*send out read request*/
- send_pkg(sender, data_type, data, len, hs);
-
- /*polling read data avail interrupt*/
- while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
- udelay(100);
- retry--;
- }
-
- if (!retry) {
- spin_unlock_irqrestore(&sender->lock, flags);
- return -ETIMEDOUT;
- }
-
- REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
- /*read data*/
- if (hs)
- gen_data_reg = sender->mipi_hs_gen_data_reg;
- else
- gen_data_reg = sender->mipi_lp_gen_data_reg;
-
- for (i = 0; i < len_out; i++)
- *(data_out + i) = REG_READ(gen_data_reg);
-
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u32 *data, u16 len, bool hs)
-{
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
- data, len, hs);
-}
-
-int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *pkg_sender;
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 mipi_val = 0;
-
- if (!dsi_connector) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- pkg_sender = dsi_connector->pkg_sender;
-
- if (!pkg_sender || IS_ERR(pkg_sender)) {
- pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
- GFP_KERNEL);
- if (!pkg_sender) {
- DRM_ERROR("Create DSI pkg sender failed\n");
- return -ENOMEM;
- }
- dsi_connector->pkg_sender = (void *)pkg_sender;
- }
-
- pkg_sender->dev = dev;
- pkg_sender->dsi_connector = dsi_connector;
- pkg_sender->pipe = pipe;
- pkg_sender->pkg_num = 0;
- pkg_sender->panel_mode = 0;
- pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
- /*init regs*/
- /* FIXME: should just copy the regmap ptr ? */
- pkg_sender->dpll_reg = map->dpll;
- pkg_sender->dspcntr_reg = map->cntr;
- pkg_sender->pipeconf_reg = map->conf;
- pkg_sender->dsplinoff_reg = map->linoff;
- pkg_sender->dspsurf_reg = map->surf;
- pkg_sender->pipestat_reg = map->status;
-
- pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
- pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
- pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
- pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
- pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
- pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
- pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
- pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
- pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
-
- /*init lock*/
- spin_lock_init(&pkg_sender->lock);
-
- if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
- /**
- * For video mode, don't enable DPI timing output here,
- * will init the DPI timing output during mode setting.
- */
- mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (pipe == 0)
- mipi_val |= 0x2;
-
- REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- /* do dsi controller init */
- mdfld_dsi_controller_init(dsi_config, pipe);
- }
-
- return 0;
-}
-
-void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
-{
- if (!sender || IS_ERR(sender))
- return;
-
- /*free*/
- kfree(sender);
-}
-
-
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
deleted file mode 100644
index 0478a21c15d5..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-#ifndef __MDFLD_DSI_PKG_SENDER_H__
-#define __MDFLD_DSI_PKG_SENDER_H__
-
-#include <linux/kthread.h>
-
-#define MDFLD_MAX_DCS_PARAM 8
-
-struct mdfld_dsi_pkg_sender {
- struct drm_device *dev;
- struct mdfld_dsi_connector *dsi_connector;
- u32 status;
- u32 panel_mode;
-
- int pipe;
-
- spinlock_t lock;
-
- u32 pkg_num;
-
- /* Registers */
- u32 dpll_reg;
- u32 dspcntr_reg;
- u32 pipeconf_reg;
- u32 pipestat_reg;
- u32 dsplinoff_reg;
- u32 dspsurf_reg;
-
- u32 mipi_intr_stat_reg;
- u32 mipi_lp_gen_data_reg;
- u32 mipi_hs_gen_data_reg;
- u32 mipi_lp_gen_ctrl_reg;
- u32 mipi_hs_gen_ctrl_reg;
- u32 mipi_gen_fifo_stat_reg;
- u32 mipi_data_addr_reg;
- u32 mipi_data_len_reg;
- u32 mipi_cmd_addr_reg;
- u32 mipi_cmd_len_reg;
-};
-
-extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe);
-extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u8 param, u8 param_num, bool hs);
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs);
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
- u8 param1, u8 param_num, bool hs);
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs);
-/* Read interfaces */
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u32 *data, u16 len, bool hs);
-
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
deleted file mode 100644
index aae2d358364c..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ /dev/null
@@ -1,966 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2006-2007 Intel Corporation
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- */
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-
-#include "framebuffer.h"
-#include "gma_display.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "psb_intel_reg.h"
-
-/* Hardcoded currently */
-static int ksel = KSEL_CRYSTAL_19;
-
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct mrst_limit_t {
- struct psb_intel_range_t dot, m, p1;
-};
-
-struct mrst_clock_t {
- /* derived values */
- int dot;
- int m;
- int p1;
-};
-
-#define COUNT_MAX 0x10000000
-
-void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int count, temp;
-
- switch (pipe) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- gma_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe disable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_PIPE_STATE) == 0)
- break;
- }
-}
-
-void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int count, temp;
-
- switch (pipe) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- gma_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe enable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(map->conf);
- if (temp & PIPEACONF_PIPE_STATE)
- break;
- }
-}
-
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
-{
- u32 pfit_control;
-
- pfit_control = REG_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- return (pfit_control >> 29) & 0x3;
-}
-
-static int check_fb(struct drm_framebuffer *fb)
-{
- if (!fb)
- return 0;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- case 16:
- case 24:
- case 32:
- return 0;
- default:
- DRM_ERROR("Unknown color depth\n");
- return -EINVAL;
- }
-}
-
-static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret;
-
- dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
-
- /* no fb bound */
- if (!fb) {
- dev_dbg(dev->dev, "No FB bound\n");
- return 0;
- }
-
- ret = check_fb(fb);
- if (ret)
- return ret;
-
- if (pipe > 2) {
- DRM_ERROR("Illegal Pipe Number.\n");
- return -EINVAL;
- }
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- start = to_gtt_range(fb->obj[0])->offset;
- offset = y * fb->pitches[0] + x * fb->format->cpp[0];
-
- REG_WRITE(map->stride, fb->pitches[0]);
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (fb->format->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
- start, offset, x, y);
- REG_WRITE(map->linoff, offset);
- REG_READ(map->linoff);
- REG_WRITE(map->surf, start);
- REG_READ(map->surf);
-
- gma_power_end(dev);
-
- return 0;
-}
-
-/*
- * Disable the pipe, plane and pll.
- *
- */
-void mdfld_disable_crtc(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- dev_dbg(dev->dev, "pipe = %d\n", pipe);
-
-
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* FIXME_JLIU7 MDFLD_PO revisit */
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(map->conf, temp);
- REG_READ(map->conf);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(map->dpll);
- if (temp & DPLL_VCO_ENABLE) {
- if ((pipe != 1 &&
- !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
- & PIPEACONF_ENABLE)) || pipe == 1) {
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
-
- if (!(temp & MDFLD_PWR_GATE_EN)) {
- /* gating power of DPLL */
- REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(5000);
- }
- }
- }
-
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 pipeconf = dev_priv->pipeconf[pipe];
- u32 temp;
- int timeout = 0;
-
- dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-
- /* Note: Old code uses pipe a stat for pipe b but that appears
- to be a bug */
-
- if (!gma_power_begin(dev, true))
- return;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
-
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- /* When ungating power of DPLL, needs to wait 0.5us
- before enable the VCO */
- if (temp & MDFLD_PWR_GATE_EN) {
- temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(map->dpll, temp);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
-
- /**
- * wait for DSI PLL to lock
- * NOTE: only need to poll status of pipe 0 and pipe 1,
- * since both MIPI pipes share the same PLL.
- */
- while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
- }
-
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(map->conf, pipeconf);
-
- /* Wait for for the pipe enable to take effect. */
- mdfldWaitForPipeEnable(dev, pipe);
- }
-
- /*workaround for sighting 3741701 Random X blank display*/
- /*perform w/a in video mode only on pipe A or C*/
- if (pipe == 0 || pipe == 2) {
- REG_WRITE(map->status, REG_READ(map->status));
- msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(map->status))
- dev_dbg(dev->dev, "OK");
- else {
- dev_dbg(dev->dev, "STUCK!!!!");
- /*shutdown controller*/
- temp = REG_READ(map->cntr);
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(map->base, REG_READ(map->base));
- /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
- REG_WRITE(0xb048, 1);
- msleep(100);
- temp = REG_READ(map->conf);
- temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(map->conf, temp);
- msleep(100); /*wait for pipe disable*/
- REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
- msleep(100);
- REG_WRITE(0xb004, REG_READ(0xb004));
- /* try to bring the controller back up again*/
- REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(map->cntr);
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(map->base, REG_READ(map->base));
- /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
- REG_WRITE(0xb048, 2);
- msleep(100);
- temp = REG_READ(map->conf);
- temp |= PIPEACONF_ENABLE;
- REG_WRITE(map->conf, temp);
- }
- }
-
- gma_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
-
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready(dev,
- MIPI_GEN_FIFO_STAT_REG(pipe),
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(map->conf, temp);
- REG_READ(map->conf);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(map->dpll);
- if (temp & DPLL_VCO_ENABLE) {
- if ((pipe != 1 && !((REG_READ(PIPEACONF)
- | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
- || pipe == 1) {
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
- }
- }
- break;
- }
- gma_power_end(dev);
-}
-
-
-#define MDFLD_LIMT_DPLL_19 0
-#define MDFLD_LIMT_DPLL_25 1
-#define MDFLD_LIMT_DPLL_83 2
-#define MDFLD_LIMT_DPLL_100 3
-#define MDFLD_LIMT_DSIPLL_19 4
-#define MDFLD_LIMT_DSIPLL_25 5
-#define MDFLD_LIMT_DSIPLL_83 6
-#define MDFLD_LIMT_DSIPLL_100 7
-
-#define MDFLD_DOT_MIN 19750
-#define MDFLD_DOT_MAX 120000
-#define MDFLD_DPLL_M_MIN_19 113
-#define MDFLD_DPLL_M_MAX_19 155
-#define MDFLD_DPLL_P1_MIN_19 2
-#define MDFLD_DPLL_P1_MAX_19 10
-#define MDFLD_DPLL_M_MIN_25 101
-#define MDFLD_DPLL_M_MAX_25 130
-#define MDFLD_DPLL_P1_MIN_25 2
-#define MDFLD_DPLL_P1_MAX_25 10
-#define MDFLD_DPLL_M_MIN_83 64
-#define MDFLD_DPLL_M_MAX_83 64
-#define MDFLD_DPLL_P1_MIN_83 2
-#define MDFLD_DPLL_P1_MAX_83 2
-#define MDFLD_DPLL_M_MIN_100 64
-#define MDFLD_DPLL_M_MAX_100 64
-#define MDFLD_DPLL_P1_MIN_100 2
-#define MDFLD_DPLL_P1_MAX_100 2
-#define MDFLD_DSIPLL_M_MIN_19 131
-#define MDFLD_DSIPLL_M_MAX_19 175
-#define MDFLD_DSIPLL_P1_MIN_19 3
-#define MDFLD_DSIPLL_P1_MAX_19 8
-#define MDFLD_DSIPLL_M_MIN_25 97
-#define MDFLD_DSIPLL_M_MAX_25 140
-#define MDFLD_DSIPLL_P1_MIN_25 3
-#define MDFLD_DSIPLL_P1_MAX_25 9
-#define MDFLD_DSIPLL_M_MIN_83 33
-#define MDFLD_DSIPLL_M_MAX_83 92
-#define MDFLD_DSIPLL_P1_MIN_83 2
-#define MDFLD_DSIPLL_P1_MAX_83 3
-#define MDFLD_DSIPLL_M_MIN_100 97
-#define MDFLD_DSIPLL_M_MAX_100 140
-#define MDFLD_DSIPLL_P1_MIN_100 3
-#define MDFLD_DSIPLL_P1_MAX_100 9
-
-static const struct mrst_limit_t mdfld_limits[] = {
- { /* MDFLD_LIMT_DPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
- },
- { /* MDFLD_LIMT_DSIPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DSIPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DSIPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DSIPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
- },
-};
-
-#define MDFLD_M_MIN 21
-#define MDFLD_M_MAX 180
-static const u32 mdfld_m_converts[] = {
-/* M configuration table from 9-bit LFSR table */
- 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
- 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
- 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
- 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
- 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
- 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
- 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
- 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
- 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
- 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
- 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
- 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
- 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
- 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
- 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
- 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
-};
-
-static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
-{
- const struct mrst_limit_t *limit = NULL;
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
- || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
- } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
- } else {
- limit = NULL;
- dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
- }
-
- return limit;
-}
-
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
-{
- clock->dot = (refclk * clock->m) / clock->p1;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given refclk,
- * or FALSE. Divisor values are the actual divisors for
- */
-static bool
-mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
- struct mrst_clock_t *best_clock)
-{
- struct mrst_clock_t clock;
- const struct mrst_limit_t *limit = mdfld_limit(crtc);
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
- for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- mdfld_clock(refclk, &clock);
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- return err != target;
-}
-
-static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int refclk = 0;
- int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
- clk_tmp = 0;
- struct mrst_clock_t clock;
- bool ok;
- u32 dpll = 0, fp = 0;
- bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct gma_encoder *gma_encoder = NULL;
- uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int timeout = 0;
- int ret;
-
- dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-
- ret = check_fb(crtc->primary->fb);
- if (ret)
- return ret;
-
- dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
- adjusted_mode->hdisplay);
- dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
- adjusted_mode->vdisplay);
- dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
- adjusted_mode->hsync_start);
- dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
- adjusted_mode->hsync_end);
- dev_dbg(dev->dev, "adjusted_htotal = %d\n",
- adjusted_mode->htotal);
- dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
- adjusted_mode->vsync_start);
- dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
- adjusted_mode->vsync_end);
- dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
- adjusted_mode->vtotal);
- dev_dbg(dev->dev, "adjusted_clock = %d\n",
- adjusted_mode->clock);
- dev_dbg(dev->dev, "hdisplay = %d\n",
- mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay = %d\n",
- mode->vdisplay);
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- memcpy(&gma_crtc->saved_mode, mode,
- sizeof(struct drm_display_mode));
- memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
- sizeof(struct drm_display_mode));
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- encoder = connector->encoder;
- if (!encoder)
- continue;
-
- if (encoder->crtc != crtc)
- continue;
-
- gma_encoder = gma_attached_encoder(connector);
-
- switch (gma_encoder->type) {
- case INTEL_OUTPUT_MIPI:
- is_mipi = true;
- break;
- case INTEL_OUTPUT_MIPI2:
- is_mipi2 = true;
- break;
- case INTEL_OUTPUT_HDMI:
- is_hdmi = true;
- break;
- }
- }
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable the panel fitter if it was on our pipe */
- if (psb_intel_panel_fitter_pipe(dev) == pipe)
- REG_WRITE(PFIT_CONTROL, 0);
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- if (pipe == 1) {
- /* FIXME: To make HDMI display with 864x480 (TPO), 480x864
- * (PYR) or 480x854 (TMD), set the sprite width/height and
- * souce image size registers with the adjusted mode for
- * pipe B.
- */
-
- /*
- * The defined sprite rectangle must always be completely
- * contained within the displayable area of the screen image
- * (frame buffer).
- */
- REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
- | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
- /* Set the CRTC with encoder mode. */
- REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
- | (mode->crtc_vdisplay - 1));
- } else {
- REG_WRITE(map->size,
- ((mode->crtc_vdisplay - 1) << 16) |
- (mode->crtc_hdisplay - 1));
- REG_WRITE(map->src,
- ((mode->crtc_hdisplay - 1) << 16) |
- (mode->crtc_vdisplay - 1));
- }
-
- REG_WRITE(map->pos, 0);
-
- if (gma_encoder)
- drm_object_property_get_value(&connector->base,
- dev->mode_config.scaling_mode_property, &scalingType);
-
- if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
- /* Medfield doesn't have register support for centering so we
- * need to mess with the h/vblank and h/vsync start and ends
- * to get centering
- */
- int offsetX = 0, offsetY = 0;
-
- offsetX = (adjusted_mode->crtc_hdisplay -
- mode->crtc_hdisplay) / 2;
- offsetY = (adjusted_mode->crtc_vdisplay -
- mode->crtc_vdisplay) / 2;
-
- REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
- offsetX - 1) |
- ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
- offsetX - 1) |
- ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
- offsetY - 1) |
- ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
- offsetY - 1) |
- ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
- } else {
- REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
- }
-
- /* Flush the plane changes */
- {
- const struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
-
- /* setup pipeconf */
- dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
-
- /* Set up the display plane register */
- dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
- dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
- dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
-
- if (is_mipi2)
- goto mrst_crtc_mode_set_exit;
- clk = adjusted_mode->clock;
-
- if (is_hdmi) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
- refclk = 19200;
-
- if (is_mipi || is_mipi2)
- clk_n = 1, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 1, clk_p2 = 10;
- } else if (ksel == KSEL_BYPASS_25) {
- refclk = 25000;
-
- if (is_mipi || is_mipi2)
- clk_n = 1, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 1, clk_p2 = 10;
- } else if ((ksel == KSEL_BYPASS_83_100) &&
- dev_priv->core_freq == 166) {
- refclk = 83000;
-
- if (is_mipi || is_mipi2)
- clk_n = 4, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 4, clk_p2 = 10;
- } else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200)) {
- refclk = 100000;
- if (is_mipi || is_mipi2)
- clk_n = 4, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 4, clk_p2 = 10;
- }
-
- if (is_mipi)
- clk_byte = dev_priv->bpp / 8;
- else if (is_mipi2)
- clk_byte = dev_priv->bpp2 / 8;
-
- clk_tmp = clk * clk_n * clk_p2 * clk_byte;
-
- dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
- clk, clk_n, clk_p2);
- dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
- adjusted_mode->clock, clk_tmp);
-
- ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
-
- if (!ok) {
- DRM_ERROR
- ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
- } else {
- m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
-
- dev_dbg(dev->dev, "dot clock = %d,"
- "m = %d, p1 = %d, m_conv = %d.\n",
- clock.dot, clock.m,
- clock.p1, m_conv);
- }
-
- dpll = REG_READ(map->dpll);
-
- if (dpll & DPLL_VCO_ENABLE) {
- dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
-
- /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- /* reset M1, N1 & P1 */
- REG_WRITE(map->fp0, 0);
- dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- /* When ungating power of DPLL, needs to wait 0.5us before
- * enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
- dpll = 0;
-
- if (is_hdmi)
- dpll |= MDFLD_VCO_SEL;
-
- fp = (clk_n / 2) << 16;
- fp |= m_conv;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 2)) << 17;
-
- } else {
- dpll = 0x00800000;
- fp = 0x000000c1;
- }
-
- REG_WRITE(map->fp0, fp);
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
-
- /* wait for DSI PLL to lock */
- while (timeout < 20000 &&
- !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
-
- if (is_mipi)
- goto mrst_crtc_mode_set_exit;
-
- dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
-
- REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
- REG_READ(map->conf);
-
- /* Wait for for the pipe enable to take effect. */
- REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
- gma_wait_for_vblank(dev);
-
-mrst_crtc_mode_set_exit:
-
- gma_power_end(dev);
-
- return 0;
-}
-
-const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
- .dpms = mdfld_crtc_dpms,
- .mode_set = mdfld_crtc_mode_set,
- .mode_set_base = mdfld__intel_pipe_set_base,
- .prepare = gma_crtc_prepare,
- .commit = gma_crtc_commit,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
deleted file mode 100644
index c95966bb0c96..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#include "mdfld_output.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-
-#include "tc35876x-dsi-lvds.h"
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- return dev_priv->mdfld_panel_id;
-}
-
-static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
- int p_type)
-{
- switch (p_type) {
- case TPO_VID:
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
- break;
- case TC35876X:
- tc35876x_init(dev);
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
- break;
- case TMD_VID:
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
- break;
- case HDMI:
-/* if (dev_priv->mdfld_hdmi_present)
- mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
- break;
- }
-}
-
-
-int mdfld_output_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /* FIXME: hardcoded for now */
- dev_priv->mdfld_panel_id = TC35876X;
- /* MIPI panel 1 */
- mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
- /* HDMI panel */
- mdfld_init_panel(dev, 1, HDMI);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
deleted file mode 100644
index 37a516cc56be..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef MDFLD_OUTPUT_H
-#define MDFLD_OUTPUT_H
-
-#include "psb_drv.h"
-
-#define TPO_PANEL_WIDTH 84
-#define TPO_PANEL_HEIGHT 46
-#define TMD_PANEL_WIDTH 39
-#define TMD_PANEL_HEIGHT 71
-
-struct mdfld_dsi_config;
-
-enum panel_type {
- TPO_VID,
- TMD_VID,
- HDMI,
- TC35876X,
-};
-
-struct panel_info {
- u32 width_mm;
- u32 height_mm;
- /* Other info */
-};
-
-struct panel_funcs {
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
- struct drm_display_mode * (*get_config_mode)(struct drm_device *);
- int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
- int (*reset)(struct drm_device *, int);
- void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
-};
-
-int mdfld_output_init(struct drm_device *dev);
-
-struct backlight_device *mdfld_get_backlight_device(void);
-int mdfld_set_brightness(struct backlight_device *bd);
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe);
-
-extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
-
-extern const struct panel_funcs mdfld_tmd_vid_funcs;
-extern const struct panel_funcs mdfld_tpo_vid_funcs;
-
-extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
deleted file mode 100644
index 25e897b98f86..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- * Gideon Eaton <eaton.
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false; /*Disable GCT for now*/
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- if (use_gct) {
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay + \
- ((ti->hsync_offset_hi << 8) | \
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start + \
- ((ti->hsync_pulse_width_hi << 8) | \
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay + \
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
- } else {
- mode->hdisplay = 480;
- mode->vdisplay = 854;
- mode->hsync_start = 487;
- mode->hsync_end = 490;
- mode->htotal = 499;
- mode->vsync_start = 861;
- mode->vsync_end = 865;
- mode->vtotal = 873;
- mode->clock = 33264;
- }
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tmd_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TMD_PANEL_WIDTH;
- pi->height_mm = TMD_PANEL_HEIGHT;
-
- return 0;
-}
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_init_TMD_MIPI
- *
- * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
-\* ************************************************************************* */
-
-/* FIXME: make the below data u8 instead of u32; note byte order! */
-static u32 tmd_cmd_mcap_off[] = {0x000000b2};
-static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
-static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
-static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
-static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
-static u32 tmd_cmd_set_mode[] = {0x000000b3};
-static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
-static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
-static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
-static u32 tmd_cmd_set_video_mode[] = {0x00000153};
-/*no auto_bl,need add in furture*/
-static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
-static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
-
-static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- DRM_INFO("Enter mdfld init TMD MIPI display.\n");
-
- if (!sender) {
- DRM_ERROR("Cannot get sender\n");
- return;
- }
-
- if (dsi_config->dvr_ic_inited)
- return;
-
- msleep(3);
-
- /* FIXME: make the below data u8 instead of u32; note byte order! */
-
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
- sizeof(tmd_cmd_mcap_off), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
- sizeof(tmd_cmd_enable_lane_switch), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
- sizeof(tmd_cmd_set_lane_num), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
- sizeof(tmd_cmd_pushing_clock0), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
- sizeof(tmd_cmd_pushing_clock1), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
- sizeof(tmd_cmd_set_mode), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
- sizeof(tmd_cmd_set_sync_pulse_mode), false);
- mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
- sizeof(tmd_cmd_set_column), false);
- mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
- sizeof(tmd_cmd_set_page), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
- sizeof(tmd_cmd_set_video_mode), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
- sizeof(tmd_cmd_enable_backlight), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
- sizeof(tmd_cmd_set_backlight_dimming), false);
-
- dsi_config->dvr_ic_inited = 1;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tmd_vid_funcs = {
- .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
- .get_config_mode = &tmd_vid_get_config_mode,
- .get_panel_info = tmd_vid_get_panel_info,
- .reset = mdfld_dsi_panel_reset,
- .drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
deleted file mode 100644
index 11845978fb0a..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dpi.h"
-
-static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 873;
- mode->hsync_end = 876;
- mode->htotal = 887;
- mode->vsync_start = 487;
- mode->vsync_end = 490;
- mode->vtotal = 499;
- mode->clock = 33264;
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tpo_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TPO_PANEL_WIDTH;
- pi->height_mm = TPO_PANEL_HEIGHT;
-
- return 0;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tpo_vid_funcs = {
- .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
- .get_config_mode = &tpo_vid_get_config_mode,
- .get_panel_info = tpo_vid_get_panel_info,
-};
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 8ab44fec4bfa..68e787924ed0 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -19,8 +19,9 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
@@ -93,7 +94,8 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -269,11 +271,12 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, PCI_DEVFN(2, 0));
int ret = -1;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 505044c9a673..d856580b8111 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -48,7 +48,6 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
return offset >> PSB_PDE_SHIFT;
}
-#if defined(CONFIG_X86)
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
@@ -63,13 +62,6 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
psb_clflush(addr);
mb();
}
-#else
-
-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-{;
-}
-
-#endif
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
@@ -293,7 +285,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
-#if defined(CONFIG_X86)
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
@@ -302,7 +293,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
}
mb();
}
-#endif
kunmap_atomic(v);
spin_unlock(lock);
@@ -313,8 +303,8 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt;
}
-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
- unsigned long addr)
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
@@ -416,15 +406,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
return pd;
}
-/* Returns the physical address of the PD shared by sgx/msvdx */
-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
-{
- struct psb_mmu_pd *pd;
-
- pd = psb_mmu_get_default_pd(driver);
- return page_to_pfn(pd->p) << PAGE_SHIFT;
-}
-
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
@@ -468,7 +449,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
driver->has_clflush = 0;
-#if defined(CONFIG_X86)
if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
@@ -485,7 +465,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
-#endif
up_write(&driver->sem);
return driver;
@@ -495,7 +474,6 @@ out_err1:
return NULL;
}
-#if defined(CONFIG_X86)
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
@@ -543,14 +521,6 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
}
mb();
}
-#else
-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
- uint32_t num_pages, uint32_t desired_tile_stride,
- uint32_t hw_tile_stride)
-{
- drm_ttm_cache_flush();
-}
-#endif
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
@@ -690,7 +660,7 @@ out:
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
- return 0;
+ return ret;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 900e5499249d..129f87971002 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -174,7 +174,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
return min_error == 0;
}
-/**
+/*
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
@@ -205,7 +205,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
return err != target;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -337,7 +337,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
gma_power_end(dev);
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 8754290b0e23..aff0534831ef 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -10,9 +10,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_scu_ipc.h>
-
#include <drm/drm.h>
#include "intel_bios.h"
@@ -504,9 +501,10 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
-
- if (pci_enable_msi(dev->pdev))
+
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = oaktrail_regmap;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e28107061148..fc9a34ed58bd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
- if (i2c_dev == NULL) {
- DRM_ERROR("Can't allocate interface\n");
- ret = -ENOMEM;
- goto exit;
- }
+ if (!i2c_dev)
+ return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
- goto err;
+ goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
- return ret;
+ if (ret) {
+ DRM_ERROR("Failed to add I2C adapter\n");
+ goto free_irq;
+ }
-err:
+ return 0;
+
+free_irq:
+ free_irq(dev->irq, hdmi_dev);
+free_dev:
kfree(i2c_dev);
-exit:
+
return ret;
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 2828360153d1..432bdcc57ac9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -29,7 +29,7 @@
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BRIGHTNESS_MAX_LEVEL 100
-/**
+/*
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
@@ -60,7 +60,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
- pm_request_idle(&dev->pdev->dev);
+ pm_request_idle(dev->dev);
}
gma_power_end(dev);
}
@@ -282,6 +282,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
/**
* oaktrail_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index baaf8212e01d..1d2dd6ea1c71 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -66,12 +66,12 @@
static int get_clock(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
return val;
@@ -80,12 +80,12 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
return val;
@@ -145,7 +145,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index eab6d889bde9..a1ffc6a1c255 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -305,12 +305,13 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
void __iomem *base;
int err = 0;
- pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy);
if (opregion_phy == 0) {
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index bea8578846d1..56ef88237ef6 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -70,8 +70,8 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- pm_runtime_disable(&dev->pdev->dev);
- pm_runtime_set_suspended(&dev->pdev->dev);
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
}
/**
@@ -93,6 +93,7 @@ static void gma_suspend_display(struct drm_device *dev)
/**
* gma_resume_display - resume display side logic
+ * @pdev: PCI device
*
* Resume the display hardware restoring state and enabling
* as necessary.
@@ -146,7 +147,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
/**
* gma_resume_pci - resume helper
- * @dev: our PCI device
+ * @pdev: our PCI device
*
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
@@ -178,8 +179,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
/**
* gma_power_suspend - bus callback for suspend
- * @pdev: our PCI device
- * @state: suspend type
+ * @_dev: our device
*
* Called back by the PCI layer during a suspend of the system. We
* perform the necessary shut down steps and save enough state that
@@ -208,7 +208,7 @@ int gma_power_suspend(struct device *_dev)
/**
* gma_power_resume - resume power
- * @pdev: PCI device
+ * @_dev: our device
*
* Resume the PCI side of the graphics and then the displays
*/
@@ -249,6 +249,7 @@ bool gma_power_is_on(struct drm_device *dev)
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -256,7 +257,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Power already on ? */
if (dev_priv->display_power) {
dev_priv->display_count++;
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
}
@@ -264,11 +265,11 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
goto out_false;
/* Ok power up needed */
- ret = gma_resume_pci(dev->pdev);
+ ret = gma_resume_pci(pdev);
if (ret == 0) {
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
dev_priv->display_count++;
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
@@ -293,7 +294,7 @@ void gma_power_end(struct drm_device *dev)
dev_priv->display_count--;
WARN_ON(dev_priv->display_count < 0);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
- pm_runtime_put(&dev->pdev->dev);
+ pm_runtime_put(dev->dev);
}
int psb_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cc2d59e8471d..0bcab065242c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -46,13 +46,12 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
* PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
* PowerVR SGX535 - Moorestown - Intel GMA 600
* PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
- * PowerVR SGX540 - Medfield - Intel Atom Z2460
- * PowerVR SGX544MP2 - Medfield -
* PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
* PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
* N2800
*/
static const struct pci_device_id pciidlist[] = {
+ /* Poulsbo */
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600)
@@ -66,17 +65,7 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
#endif
-#if defined(CONFIG_DRM_MEDFIELD)
- { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-#endif
-#if defined(CONFIG_DRM_GMA3600)
+ /* Cedartrail */
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
@@ -93,7 +82,6 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
-#endif
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -208,6 +196,7 @@ static void psb_driver_unload(struct drm_device *dev)
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_psb_private *dev_priv;
unsigned long resource_start, resource_len;
unsigned long irqflags;
@@ -227,11 +216,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pg = &dev_priv->gtt;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
dev_priv->num_pipe = dev_priv->ops->pipes;
- resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+ resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE);
dev_priv->vdc_reg =
ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
@@ -244,7 +233,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
goto out_err;
if (IS_MRST(dev)) {
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
dev_priv->aux_pdev =
pci_get_domain_bus_and_slot(domain, 0,
@@ -312,6 +301,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_err;
+ ret = -ENOMEM;
+
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
@@ -359,7 +350,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- drm_irq_install(dev, dev->pdev->irq);
+ drm_irq_install(dev, pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -385,8 +376,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
psb_intel_opregion_enable_asle(dev);
#if 0
/* Enable runtime pm at last */
- pm_runtime_enable(&dev->pdev->dev);
- pm_runtime_set_active(&dev->pdev->dev);
+ pm_runtime_enable(dev->dev);
+ pm_runtime_set_active(dev->dev);
#endif
/* Intel drm driver load is done, continue doing pvr load */
return 0;
@@ -415,7 +406,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
runtime_allowed++;
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
dev_priv->rpm_enabled = 1;
}
return drm_ioctl(filp, cmd, arg);
@@ -437,7 +428,6 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_disable_device;
}
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5b7f7a312d53..694495070c65 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -40,19 +40,16 @@ enum {
CHIP_PSB_8108 = 0, /* Poulsbo */
CHIP_PSB_8109 = 1, /* Poulsbo */
CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
- CHIP_MFLD_0130 = 3, /* Medfield */
};
-#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
+#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108)
+#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100)
+#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0)
/* Hardware offsets */
#define PSB_VDC_OFFSET 0x00000000
#define PSB_VDC_SIZE 0x000080000
#define MRST_MMIO_SIZE 0x0000C0000
-#define MDFLD_MMIO_SIZE 0x000100000
#define PSB_SGX_SIZE 0x8000
#define PSB_SGX_OFFSET 0x00040000
#define MRST_SGX_OFFSET 0x00080000
@@ -109,8 +106,6 @@ enum {
#define _PSB_DPST_PIPEA_FLAG (1<<6)
#define _PSB_PIPEA_EVENT_FLAG (1<<6)
#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
-#define _MDFLD_MIPIA_FLAG (1<<16)
-#define _MDFLD_MIPIC_FLAG (1<<17)
#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
#define _PSB_IRQ_SGX_FLAG (1<<18)
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
@@ -119,13 +114,6 @@ enum {
#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
_PSB_VSYNC_PIPEB_FLAG)
-/* This flag includes all the display IRQ bits excepts the vblank irqs. */
-#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
- _MDFLD_PIPEB_EVENT_FLAG | \
- _PSB_PIPEA_EVENT_FLAG | \
- _PSB_VSYNC_PIPEA_FLAG | \
- _MDFLD_MIPIA_FLAG | \
- _MDFLD_MIPIC_FLAG)
#define PSB_INT_IDENTITY_R 0x20A4
#define PSB_INT_MASK_R 0x20A8
#define PSB_INT_ENABLE_R 0x20A0
@@ -191,25 +179,6 @@ enum {
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
-#define MDFLD_PNW_B0 0x04
-#define MDFLD_PNW_C0 0x08
-
-#define MDFLD_DSR_2D_3D_0 (1 << 0)
-#define MDFLD_DSR_2D_3D_2 (1 << 1)
-#define MDFLD_DSR_CURSOR_0 (1 << 2)
-#define MDFLD_DSR_CURSOR_2 (1 << 3)
-#define MDFLD_DSR_OVERLAY_0 (1 << 4)
-#define MDFLD_DSR_OVERLAY_2 (1 << 5)
-#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
-#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
-#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
-#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
-
-#define MDFLD_DSR_RR 45
-#define MDFLD_DPU_ENABLE (1 << 31)
-#define MDFLD_DSR_FULLSCREEN (1 << 30)
-#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR)
-
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -382,16 +351,6 @@ struct psb_state {
uint32_t savePWM_CONTROL_LOGIC;
};
-struct medfield_state {
- uint32_t saveMIPI;
- uint32_t saveMIPI_C;
-
- uint32_t savePFIT_CONTROL;
- uint32_t savePFIT_PGM_RATIOS;
- uint32_t saveHDMIPHYMISCCTL;
- uint32_t saveHDMIB_CONTROL;
-};
-
struct cdv_state {
uint32_t saveDSPCLK_GATE_D;
uint32_t saveRAMCLK_GATE_D;
@@ -417,7 +376,6 @@ struct psb_save_area {
uint32_t saveVBT;
union {
struct psb_state psb;
- struct medfield_state mdfld;
struct cdv_state cdv;
};
uint32_t saveBLC_PWM_CTL2;
@@ -428,6 +386,8 @@ struct psb_ops;
#define PSB_NUM_PIPE 3
+struct intel_scu_ipc_dev;
+
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
@@ -567,6 +527,7 @@ struct drm_psb_private {
* Used for modifying backlight from
* xrandr -- consider removing and using HAL instead
*/
+ struct intel_scu_ipc_dev *scu;
struct backlight_device *backlight_device;
struct drm_property *backlight_property;
bool backlight_enabled;
@@ -590,8 +551,6 @@ struct drm_psb_private {
u32 pipeconf[3];
u32 dspcntr[3];
- int mdfld_panel_id;
-
bool dplla_96mhz; /* DPLL data from the VBT */
struct {
@@ -737,9 +696,6 @@ extern const struct psb_ops psb_chip_ops;
/* oaktrail_device.c */
extern const struct psb_ops oaktrail_chip_ops;
-/* mdlfd_device.c */
-extern const struct psb_ops mdfld_chip_ops;
-
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
@@ -779,25 +735,6 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
-static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
-{
- int mcr = (0x10<<24) | (port << 16) | (offset << 8);
- uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
- pci_write_config_dword(pci_root, 0xD0, mcr);
- pci_read_config_dword(pci_root, 0xD4, &ret_val);
- pci_dev_put(pci_root);
- return ret_val;
-}
-static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
- u32 value)
-{
- int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
- pci_write_config_dword(pci_root, 0xD4, value);
- pci_write_config_dword(pci_root, 0xD0, mcr);
- pci_dev_put(pci_root);
-}
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c5485be17..9c3cb1b80bbd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -71,7 +71,7 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
clock->dot = clock->vco / clock->p;
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 063c66bb946d..ace95d4bdb6f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -216,7 +216,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
dev_err(dev->dev, "set power, chip off!\n");
return;
}
-
+
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
@@ -626,6 +626,7 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -700,7 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
lvds_priv->i2c_bus->slave_addr = 0x2C;
@@ -719,7 +720,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
/* Set up the DDC bus. */
lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!lvds_priv->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 88653a40aeb5..60306780e16c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -11,7 +11,7 @@
/**
* psb_intel_ddc_probe
- *
+ * @adapter: Associated I2C adaptor
*/
bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
{
@@ -43,6 +43,7 @@ bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: Associated I2C adaptor
*
* Fetch the EDID information from @connector using the DDC bus.
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 835cc924c45a..364ea8f06f9c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -595,7 +595,7 @@ struct dpst_guardband {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
-#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF 0x20e0
#define FW_BLC_SELF_EN (1<<15)
#define DSPARB 0x70030
@@ -789,17 +789,9 @@ struct dpst_guardband {
* MOORESTOWN delta registers
*/
#define MRST_DPLL_A 0x0f014
-#define MDFLD_DPLL_B 0x0f018
-#define MDFLD_INPUT_REF_SEL (1 << 14)
-#define MDFLD_VCO_SEL (1 << 16)
#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
-#define MDFLD_PLL_LATCHEN (1 << 28)
-#define MDFLD_PWR_GATE_EN (1 << 30)
-#define MDFLD_P1_MASK (0x1FF << 17)
#define MRST_FPA0 0x0f040
#define MRST_FPA1 0x0f044
-#define MDFLD_DPLL_DIV0 0x0f048
-#define MDFLD_DPLL_DIV1 0x0f04c
#define MRST_PERF_MODE 0x020f4
/*
@@ -848,7 +840,6 @@ struct dpst_guardband {
#define MRST_DSPABASE 0x7019c
#define MRST_DSPBBASE 0x7119c
-#define MDFLD_DSPCBASE 0x7219c
/*
* Moorestown registers.
@@ -930,7 +921,6 @@ struct dpst_guardband {
#define DEVICE_RESET_REG 0xb01C
#define DPI_RESOLUTION_REG 0xb020
#define RES_V_POS 0x10
-#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 907f966d6f22..355da2856389 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -221,7 +221,7 @@ static bool
psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
-/**
+/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
@@ -588,7 +588,7 @@ static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdv
&targets, sizeof(targets));
}
-/**
+/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
@@ -1818,7 +1818,7 @@ psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
#endif
}
-/**
+/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
@@ -2406,7 +2406,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
- sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 361e3a0c5ab6..ae9b100e640b 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -10,7 +10,6 @@
#include <drm/drm_vblank.h>
-#include "mdfld_output.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -126,9 +125,8 @@ static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
}
}
-/**
+/*
* Display controller interrupt handler for pipe event.
- *
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
@@ -165,8 +163,7 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
__func__, pipe, PSB_RVDC32(pipe_stat_reg));
- if (pipe_stat_val & PIPE_VBLANK_STATUS ||
- (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+ if (pipe_stat_val & PIPE_VBLANK_STATUS) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
unsigned long flags;
@@ -264,11 +261,6 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;
- /* FIXME: Handle Medfield
- if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
- dsp_int = 1;
- */
-
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
@@ -326,13 +318,6 @@ void psb_irq_preinstall(struct drm_device *dev)
if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
- /* FIXME: Handle Medfield irq mask
- if (dev->vblank[1].enabled)
- dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
- if (dev->vblank[2].enabled)
- dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
- */
-
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
@@ -505,11 +490,6 @@ int psb_enable_vblank(struct drm_crtc *crtc)
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
- /* Medfield is different - we should perhaps extract out vblank
- and blacklight etc ops */
- if (IS_MFLD(dev))
- return mdfld_enable_te(dev, pipe);
-
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
@@ -544,8 +524,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (IS_MFLD(dev))
- mdfld_disable_te(dev, pipe);
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
@@ -560,55 +538,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-/*
- * It is used to enable TE interrupt
- */
-int mdfld_enable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
- uint32_t reg_val = 0;
- uint32_t pipeconf_reg = mid_pipeconf(pipe);
-
- if (gma_power_begin(dev, false)) {
- reg_val = REG_READ(pipeconf_reg);
- gma_power_end(dev);
- }
-
- if (!(reg_val & PIPEACONF_ENABLE))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- mid_enable_pipe_event(dev_priv, pipe);
- psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-
- return 0;
-}
-
-/*
- * It is used to disable TE interrupt
- */
-void mdfld_disable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
-
- if (!dev_priv->dsr_enable)
- return;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- mid_disable_pipe_event(dev_priv, pipe);
- psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-}
-
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 4f73998848d1..1b577fa7010a 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -31,6 +31,4 @@ int psb_enable_vblank(struct drm_crtc *crtc);
void psb_disable_vblank(struct drm_crtc *crtc);
u32 psb_get_vblank_counter(struct drm_crtc *crtc);
-int mdfld_enable_te(struct drm_device *dev, int pipe);
-void mdfld_disable_te(struct drm_device *dev, int pipe);
#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
index fb22bac5bb74..2a229a0ef36c 100644
--- a/drivers/gpu/drm/gma500/psb_reg.h
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -550,21 +550,7 @@
#define PSB_PM_SSC 0x20
#define PSB_PM_SSS 0x30
#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
-#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
/* Display SSS register bits are different in A0 vs. B0 */
#define PSB_PWRGT_GFX_MASK 0x3
-#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
-#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
-#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
#define PSB_PWRGT_GFX_MASK_B0 0xc3
-#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
-#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
#endif
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
deleted file mode 100644
index e5bdd99ad453..000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-static struct i2c_client *tc35876x_client;
-static struct i2c_client *cmi_lcd_i2c_client;
-/* Panel GPIOs */
-static struct gpio_desc *bridge_reset;
-static struct gpio_desc *bridge_bl_enable;
-static struct gpio_desc *backlight_voltage;
-
-
-#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-
-/* DSI D-PHY Layer Registers */
-#define D0W_DPHYCONTTX 0x0004
-#define CLW_DPHYCONTRX 0x0020
-#define D0W_DPHYCONTRX 0x0024
-#define D1W_DPHYCONTRX 0x0028
-#define D2W_DPHYCONTRX 0x002C
-#define D3W_DPHYCONTRX 0x0030
-#define COM_DPHYCONTRX 0x0038
-#define CLW_CNTRL 0x0040
-#define D0W_CNTRL 0x0044
-#define D1W_CNTRL 0x0048
-#define D2W_CNTRL 0x004C
-#define D3W_CNTRL 0x0050
-#define DFTMODE_CNTRL 0x0054
-
-/* DSI PPI Layer Registers */
-#define PPI_STARTPPI 0x0104
-#define PPI_BUSYPPI 0x0108
-#define PPI_LINEINITCNT 0x0110
-#define PPI_LPTXTIMECNT 0x0114
-#define PPI_LANEENABLE 0x0134
-#define PPI_TX_RX_TA 0x013C
-#define PPI_CLS_ATMR 0x0140
-#define PPI_D0S_ATMR 0x0144
-#define PPI_D1S_ATMR 0x0148
-#define PPI_D2S_ATMR 0x014C
-#define PPI_D3S_ATMR 0x0150
-#define PPI_D0S_CLRSIPOCOUNT 0x0164
-#define PPI_D1S_CLRSIPOCOUNT 0x0168
-#define PPI_D2S_CLRSIPOCOUNT 0x016C
-#define PPI_D3S_CLRSIPOCOUNT 0x0170
-#define CLS_PRE 0x0180
-#define D0S_PRE 0x0184
-#define D1S_PRE 0x0188
-#define D2S_PRE 0x018C
-#define D3S_PRE 0x0190
-#define CLS_PREP 0x01A0
-#define D0S_PREP 0x01A4
-#define D1S_PREP 0x01A8
-#define D2S_PREP 0x01AC
-#define D3S_PREP 0x01B0
-#define CLS_ZERO 0x01C0
-#define D0S_ZERO 0x01C4
-#define D1S_ZERO 0x01C8
-#define D2S_ZERO 0x01CC
-#define D3S_ZERO 0x01D0
-#define PPI_CLRFLG 0x01E0
-#define PPI_CLRSIPO 0x01E4
-#define HSTIMEOUT 0x01F0
-#define HSTIMEOUTENABLE 0x01F4
-
-/* DSI Protocol Layer Registers */
-#define DSI_STARTDSI 0x0204
-#define DSI_BUSYDSI 0x0208
-#define DSI_LANEENABLE 0x0210
-#define DSI_LANESTATUS0 0x0214
-#define DSI_LANESTATUS1 0x0218
-#define DSI_INTSTATUS 0x0220
-#define DSI_INTMASK 0x0224
-#define DSI_INTCLR 0x0228
-#define DSI_LPTXTO 0x0230
-
-/* DSI General Registers */
-#define DSIERRCNT 0x0300
-
-/* DSI Application Layer Registers */
-#define APLCTRL 0x0400
-#define RDPKTLN 0x0404
-
-/* Video Path Registers */
-#define VPCTRL 0x0450
-#define HTIM1 0x0454
-#define HTIM2 0x0458
-#define VTIM1 0x045C
-#define VTIM2 0x0460
-#define VFUEN 0x0464
-
-/* LVDS Registers */
-#define LVMX0003 0x0480
-#define LVMX0407 0x0484
-#define LVMX0811 0x0488
-#define LVMX1215 0x048C
-#define LVMX1619 0x0490
-#define LVMX2023 0x0494
-#define LVMX2427 0x0498
-#define LVCFG 0x049C
-#define LVPHY0 0x04A0
-#define LVPHY1 0x04A4
-
-/* System Registers */
-#define SYSSTAT 0x0500
-#define SYSRST 0x0504
-
-/* GPIO Registers */
-/*#define GPIOC 0x0520*/
-#define GPIOO 0x0524
-#define GPIOI 0x0528
-
-/* I2C Registers */
-#define I2CTIMCTRL 0x0540
-#define I2CMADDR 0x0544
-#define WDATAQ 0x0548
-#define RDATAQ 0x054C
-
-/* Chip/Rev Registers */
-#define IDREG 0x0580
-
-/* Debug Registers */
-#define DEBUG00 0x05A0
-#define DEBUG01 0x05A4
-
-/* Panel CABC registers */
-#define PANEL_PWM_CONTROL 0x90
-#define PANEL_FREQ_DIVIDER_HI 0x91
-#define PANEL_FREQ_DIVIDER_LO 0x92
-#define PANEL_DUTY_CONTROL 0x93
-#define PANEL_MODIFY_RGB 0x94
-#define PANEL_FRAMERATE_CONTROL 0x96
-#define PANEL_PWM_MIN 0x97
-#define PANEL_PWM_REF 0x98
-#define PANEL_PWM_MAX 0x99
-#define PANEL_ALLOW_DISTORT 0x9A
-#define PANEL_BYPASS_PWMI 0x9B
-
-/* Panel color management registers */
-#define PANEL_CM_ENABLE 0x700
-#define PANEL_CM_HUE 0x701
-#define PANEL_CM_SATURATION 0x702
-#define PANEL_CM_INTENSITY 0x703
-#define PANEL_CM_BRIGHTNESS 0x704
-#define PANEL_CM_CE_ENABLE 0x705
-#define PANEL_CM_PEAK_EN 0x710
-#define PANEL_CM_GAIN 0x711
-#define PANEL_CM_HUETABLE_START 0x730
-#define PANEL_CM_HUETABLE_END 0x747 /* inclusive */
-
-/* Input muxing for registers LVMX0003...LVMX2427 */
-enum {
- INPUT_R0, /* 0 */
- INPUT_R1,
- INPUT_R2,
- INPUT_R3,
- INPUT_R4,
- INPUT_R5,
- INPUT_R6,
- INPUT_R7,
- INPUT_G0, /* 8 */
- INPUT_G1,
- INPUT_G2,
- INPUT_G3,
- INPUT_G4,
- INPUT_G5,
- INPUT_G6,
- INPUT_G7,
- INPUT_B0, /* 16 */
- INPUT_B1,
- INPUT_B2,
- INPUT_B3,
- INPUT_B4,
- INPUT_B5,
- INPUT_B6,
- INPUT_B7,
- INPUT_HSYNC, /* 24 */
- INPUT_VSYNC,
- INPUT_DE,
- LOGIC_0,
- /* 28...31 undefined */
-};
-
-#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \
- (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \
- FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
-
-/**
- * tc35876x_regw - Write DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: value to write
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
-{
- int r;
- u8 tx_data[] = {
- /* NOTE: Register address big-endian, data little-endian. */
- (reg >> 8) & 0xff,
- reg & 0xff,
- value & 0xff,
- (value >> 8) & 0xff,
- (value >> 16) & 0xff,
- (value >> 24) & 0xff,
- };
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = tx_data,
- .len = ARRAY_SIZE(tx_data),
- },
- };
-
- r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (r < 0) {
- dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
- __func__, reg, value, r);
- return r;
- }
-
- if (r < ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
- __func__, reg, value, r);
- return -EAGAIN;
- }
-
- dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
- __func__, reg, value);
-
- return 0;
-}
-
-/**
- * tc35876x_regr - Read DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: pointer for storing the value
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
-{
- int r;
- u8 tx_data[] = {
- (reg >> 8) & 0xff,
- reg & 0xff,
- };
- u8 rx_data[4];
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = tx_data,
- .len = ARRAY_SIZE(tx_data),
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .buf = rx_data,
- .len = ARRAY_SIZE(rx_data),
- },
- };
-
- r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (r < 0) {
- dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
- reg, r);
- return r;
- }
-
- if (r < ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
- reg, r);
- return -EAGAIN;
- }
-
- *value = rx_data[0] << 24 | rx_data[1] << 16 |
- rx_data[2] << 8 | rx_data[3];
-
- dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
- reg, *value);
-
- return 0;
-}
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
-{
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
-
- if (!bridge_reset)
- return;
-
- if (state) {
- gpiod_set_value_cansleep(bridge_reset, 0);
- mdelay(10);
- } else {
- /* Pull MIPI Bridge reset pin to Low */
- gpiod_set_value_cansleep(bridge_reset, 0);
- mdelay(20);
- /* Pull MIPI Bridge reset pin to High */
- gpiod_set_value_cansleep(bridge_reset, 1);
- mdelay(40);
- }
-}
-
-void tc35876x_configure_lvds_bridge(struct drm_device *dev)
-{
- struct i2c_client *i2c = tc35876x_client;
- u32 ppi_lptxtimecnt;
- u32 txtagocnt;
- u32 txtasurecnt;
- u32 id;
-
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (!tc35876x_regr(i2c, IDREG, &id))
- dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
- else
- dev_err(&tc35876x_client->dev, "Cannot read ID\n");
-
- ppi_lptxtimecnt = 4;
- txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
- txtasurecnt = 3 * ppi_lptxtimecnt / 2;
- tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
- FLD_VAL(txtasurecnt, 10, 0));
- tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
-
- tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-
- /* Enabling MIPI & PPI lanes, Enable 4 lanes */
- tc35876x_regw(i2c, PPI_LANEENABLE,
- BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
- tc35876x_regw(i2c, DSI_LANEENABLE,
- BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
- tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
- tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
-
- /* Setting LVDS output frequency */
- tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
- FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
-
- /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
- tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
-
- /* Horizontal back porch and horizontal pulse width. 0x00280028 */
- tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
-
- /* Horizontal front porch and horizontal active video size. 0x00500500*/
- tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
-
- /* Vertical back porch and vertical sync pulse width. 0x000e000a */
- tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
-
- /* Vertical front porch and vertical display size. 0x000e0320 */
- tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
-
- /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
- tc35876x_regw(i2c, VFUEN, BIT(0));
-
- /* Soft reset LCD controller. */
- tc35876x_regw(i2c, SYSRST, BIT(2));
-
- /* LVDS-TX input muxing */
- tc35876x_regw(i2c, LVMX0003,
- INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
- tc35876x_regw(i2c, LVMX0407,
- INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
- tc35876x_regw(i2c, LVMX0811,
- INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
- tc35876x_regw(i2c, LVMX1215,
- INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
- tc35876x_regw(i2c, LVMX1619,
- INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
- tc35876x_regw(i2c, LVMX2023,
- INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5));
- tc35876x_regw(i2c, LVMX2427,
- INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
-
- /* Enable LVDS transmitter. */
- tc35876x_regw(i2c, LVCFG, BIT(0));
-
- /* Clear notifications. Don't write reserved bits. Was write 0xffffffff
- * to 0x0288, must be in error?! */
- tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
-}
-
-#define GPIOPWMCTRL 0x38F
-#define PWM0CLKDIV0 0x62 /* low byte */
-#define PWM0CLKDIV1 0x61 /* high byte */
-
-#define SYSTEMCLK 19200000UL /* 19.2 MHz */
-#define PWM_FREQUENCY 9600 /* Hz */
-
-/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
-static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
-{
- return (baseclk - f) / f;
-}
-
-static void tc35876x_brightness_init(struct drm_device *dev)
-{
- int ret;
- u8 pwmctrl;
- u16 clkdiv;
-
- /* Make sure the PWM reference is the 19.2 MHz system clock. Read first
- * instead of setting directly to catch potential conflicts between PWM
- * users. */
- ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
- if (ret || pwmctrl != 0x01) {
- if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
- else
- dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
-
- ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
- if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
- }
-
- clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
-
- ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
- if (!ret)
- ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
-
- if (ret)
- dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
- else
- dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
- clkdiv, PWM_FREQUENCY);
-}
-
-#define PWM0DUTYCYCLE 0x67
-
-void tc35876x_brightness_control(struct drm_device *dev, int level)
-{
- int ret;
- u8 duty_val;
- u8 panel_duty_val;
-
- level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
- /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
- duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
-
- /* I won't pretend to understand this formula. The panel spec is quite
- * bad engrish.
- */
- panel_duty_val = (2 * level - 100) * 0xA9 /
- MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
-
- ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
- if (ret)
- dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
- __func__);
-
- if (cmi_lcd_i2c_client) {
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_PWM_MAX, panel_duty_val);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
- __func__);
- }
-}
-
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
-{
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (bridge_bl_enable)
- gpiod_set_value_cansleep(bridge_bl_enable, 0);
-
- if (backlight_voltage)
- gpiod_set_value_cansleep(backlight_voltage, 0);
-}
-
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (backlight_voltage) {
- gpiod_set_value_cansleep(backlight_voltage, 1);
- msleep(260);
- }
-
- if (cmi_lcd_i2c_client) {
- int ret;
- dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
- /* Bit 4 is average_saving. Setting it to 1, the brightness is
- * referenced to the average of the frame content. 0 means
- * reference to the maximum of frame contents. Bits 3:0 are
- * allow_distort. When set to a nonzero value, all color values
- * between 255-allow_distort*2 and 255 are mapped to the
- * 255-allow_distort*2 value.
- */
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_ALLOW_DISTORT, 0x10);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_BYPASS_PWMI, 0);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- /* Set minimum brightness value - this is tunable */
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_PWM_MIN, 0x35);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- }
-
- if (bridge_bl_enable)
- gpiod_set_value_cansleep(bridge_bl_enable, 1);
-
- tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
-}
-
-static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
-
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- /* FIXME: do this properly. */
- mode->hdisplay = 1280;
- mode->vdisplay = 800;
- mode->hsync_start = 1360;
- mode->hsync_end = 1400;
- mode->htotal = 1440;
- mode->vsync_start = 814;
- mode->vsync_end = 824;
- mode->vtotal = 838;
- mode->clock = 33324 << 1;
-
- dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
- dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
- dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
- dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
- dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
- dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
- dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
- dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
- dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-/* DV1 Active area 216.96 x 135.6 mm */
-#define DV1_PANEL_WIDTH 217
-#define DV1_PANEL_HEIGHT 136
-
-static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = DV1_PANEL_WIDTH;
- pi->height_mm = DV1_PANEL_HEIGHT;
-
- return 0;
-}
-
-static int tc35876x_bridge_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- dev_info(&client->dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
- __func__);
- return -ENODEV;
- }
-
- bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
- if (IS_ERR(bridge_reset))
- return PTR_ERR(bridge_reset);
- if (bridge_reset)
- gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
-
- bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
- if (IS_ERR(bridge_bl_enable))
- return PTR_ERR(bridge_bl_enable);
- if (bridge_bl_enable)
- gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
-
- backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
- if (IS_ERR(backlight_voltage))
- return PTR_ERR(backlight_voltage);
- if (backlight_voltage)
- gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
-
- tc35876x_client = client;
-
- return 0;
-}
-
-static int tc35876x_bridge_remove(struct i2c_client *client)
-{
- dev_dbg(&client->dev, "%s\n", __func__);
-
- tc35876x_client = NULL;
-
- return 0;
-}
-
-static const struct i2c_device_id tc35876x_bridge_id[] = {
- { "i2c_disp_brig", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
-
-static struct i2c_driver tc35876x_bridge_i2c_driver = {
- .driver = {
- .name = "i2c_disp_brig",
- },
- .id_table = tc35876x_bridge_id,
- .probe = tc35876x_bridge_probe,
- .remove = tc35876x_bridge_remove,
-};
-
-/* LCD panel I2C */
-static int cmi_lcd_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- dev_info(&client->dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
- __func__);
- return -ENODEV;
- }
-
- cmi_lcd_i2c_client = client;
-
- return 0;
-}
-
-static int cmi_lcd_i2c_remove(struct i2c_client *client)
-{
- dev_dbg(&client->dev, "%s\n", __func__);
-
- cmi_lcd_i2c_client = NULL;
-
- return 0;
-}
-
-static const struct i2c_device_id cmi_lcd_i2c_id[] = {
- { "cmi-lcd", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
-
-static struct i2c_driver cmi_lcd_i2c_driver = {
- .driver = {
- .name = "cmi-lcd",
- },
- .id_table = cmi_lcd_i2c_id,
- .probe = cmi_lcd_i2c_probe,
- .remove = cmi_lcd_i2c_remove,
-};
-
-/* HACK to create I2C device while it's not created by platform code */
-#define CMI_LCD_I2C_ADAPTER 2
-#define CMI_LCD_I2C_ADDR 0x60
-
-static int cmi_lcd_hack_create_device(void)
-{
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- struct i2c_board_info info = {
- .type = "cmi-lcd",
- .addr = CMI_LCD_I2C_ADDR,
- };
-
- pr_debug("%s\n", __func__);
-
- adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
- if (!adapter) {
- pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
- CMI_LCD_I2C_ADAPTER);
- return -EINVAL;
- }
-
- client = i2c_new_client_device(adapter, &info);
- if (IS_ERR(client)) {
- pr_err("%s: creating I2C device failed\n", __func__);
- i2c_put_adapter(adapter);
- return PTR_ERR(client);
- }
-
- return 0;
-}
-
-static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tc35876x_funcs = {
- .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
- .get_config_mode = tc35876x_get_config_mode,
- .get_panel_info = tc35876x_get_panel_info,
-};
-
-void tc35876x_init(struct drm_device *dev)
-{
- int r;
-
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
- cmi_lcd_hack_create_device();
-
- r = i2c_add_driver(&cmi_lcd_i2c_driver);
- if (r < 0)
- dev_err(&dev->pdev->dev,
- "%s: i2c_add_driver() for %s failed (%d)\n",
- __func__, cmi_lcd_i2c_driver.driver.name, r);
-
- r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
- if (r < 0)
- dev_err(&dev->pdev->dev,
- "%s: i2c_add_driver() for %s failed (%d)\n",
- __func__, tc35876x_bridge_i2c_driver.driver.name, r);
-
- tc35876x_brightness_init(dev);
-}
-
-void tc35876x_exit(void)
-{
- pr_debug("%s\n", __func__);
-
- i2c_del_driver(&tc35876x_bridge_i2c_driver);
-
- if (cmi_lcd_i2c_client)
- i2c_del_driver(&cmi_lcd_i2c_driver);
-}
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
deleted file mode 100644
index b14b7f9e7d1e..000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
-#define __MDFLD_DSI_LVDS_BRIDGE_H__
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
-void tc35876x_configure_lvds_bridge(struct drm_device *dev);
-void tc35876x_brightness_control(struct drm_device *dev, int level);
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
-void tc35876x_init(struct drm_device *dev);
-void tc35876x_exit(void);
-
-extern const struct panel_funcs mdfld_tc35876x_funcs;
-
-#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 684ef794eb7c..d25c75e60d3d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index ea962acfeae0..096eea985b6f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -499,7 +499,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
int hibmc_de_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct drm_crtc *crtc = &priv->crtc;
struct drm_plane *plane = &priv->primary_plane;
int ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index d845657fd99c..abd6250d5a14 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
@@ -43,6 +44,12 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
+}
+
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
@@ -77,47 +84,48 @@ static const struct dev_pm_ops hibmc_pm_ops = {
hibmc_pm_resume)
};
+static const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
+ struct drm_device *dev = &priv->dev;
int ret;
- drm_mode_config_init(priv->dev);
- priv->mode_config_initialized = true;
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- priv->dev->mode_config.min_width = 0;
- priv->dev->mode_config.min_height = 0;
- priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1200;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 1200;
- priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 32;
- priv->dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.fb_base = priv->fb_base;
+ dev->mode_config.preferred_depth = 32;
+ dev->mode_config.prefer_shadow = 1;
- priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+ dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
ret = hibmc_de_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init de: %d\n", ret);
+ drm_err(dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init vdac: %d\n", ret);
+ drm_err(dev, "failed to init vdac: %d\n", ret);
return ret;
}
return 0;
}
-static void hibmc_kms_fini(struct hibmc_drm_private *priv)
-{
- if (priv->mode_config_initialized) {
- drm_mode_config_cleanup(priv->dev);
- priv->mode_config_initialized = false;
- }
-}
-
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
@@ -202,8 +210,8 @@ static void hibmc_hw_config(struct hibmc_drm_private *priv)
static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
- struct pci_dev *pdev = dev->pdev;
+ struct drm_device *dev = &priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t addr, size, ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
@@ -242,40 +250,31 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
static int hibmc_unload(struct drm_device *dev)
{
- struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
-
drm_atomic_helper_shutdown(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- pci_disable_msi(dev->pdev);
- hibmc_kms_fini(priv);
- hibmc_mm_fini(priv);
- dev->dev_private = NULL;
+ pci_disable_msi(to_pci_dev(dev->dev));
+
return 0;
}
static int hibmc_load(struct drm_device *dev)
{
- struct hibmc_drm_private *priv;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
int ret;
- priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- drm_err(dev, "no memory to allocate for hibmc_drm_private\n");
- return -ENOMEM;
- }
- dev->dev_private = priv;
- priv->dev = dev;
-
ret = hibmc_hw_init(priv);
if (ret)
goto err;
- ret = hibmc_mm_init(priv);
- if (ret)
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
+ }
ret = hibmc_kms_init(priv);
if (ret)
@@ -287,11 +286,11 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(dev->pdev);
+ ret = pci_enable_msi(pdev);
if (ret) {
drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = drm_irq_install(dev, pdev->irq);
if (ret)
drm_warn(dev, "install irq failed: %d\n", ret);
}
@@ -310,6 +309,7 @@ err:
static int hibmc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct hibmc_drm_private *priv;
struct drm_device *dev;
int ret;
@@ -318,25 +318,26 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
- if (IS_ERR(dev)) {
+ priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
+ struct hibmc_drm_private, dev);
+ if (IS_ERR(priv)) {
DRM_ERROR("failed to allocate drm_device\n");
- return PTR_ERR(dev);
+ return PTR_ERR(priv);
}
- dev->pdev = pdev;
+ dev = &priv->dev;
pci_set_drvdata(pdev, dev);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
drm_err(dev, "failed to enable pci device: %d\n", ret);
- goto err_free;
+ goto err_return;
}
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
- goto err_disable;
+ goto err_return;
}
ret = drm_dev_register(dev, 0);
@@ -352,11 +353,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
err_unload:
hibmc_unload(dev);
-err_disable:
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
-
+err_return:
return ret;
}
@@ -366,7 +363,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
hibmc_unload(dev);
- drm_dev_put(dev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f310a83d9c48..7d263f4d7078 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -37,12 +37,11 @@ struct hibmc_drm_private {
resource_size_t fb_size;
/* drm */
- struct drm_device *dev;
+ struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct hibmc_connector connector;
- bool mode_config_initialized;
};
static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
@@ -52,7 +51,7 @@ static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *c
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct hibmc_drm_private, dev);
}
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
@@ -64,11 +63,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
-extern const struct drm_mode_config_funcs hibmc_mode_funcs;
-
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 86d712090d87..410bd019bb35 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -83,7 +83,7 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
connector->adapter.owner = THIS_MODULE;
connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
connector->adapter.algo_data = &connector->bit_data;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 74e26c27d878..c228091fb0e6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
@@ -42,12 +43,6 @@ out:
return count;
}
-static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
@@ -59,7 +54,6 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
- .mode_valid = hibmc_connector_mode_valid,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -90,15 +84,12 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
.mode_set = hibmc_encoder_mode_set,
};
-static const struct drm_encoder_funcs hibmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
+ struct drm_crtc *crtc = &priv->crtc;
struct drm_connector *connector = &hibmc_connector->base;
int ret;
@@ -108,9 +99,8 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
return ret;
}
- encoder->possible_crtcs = 0x1;
- ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
deleted file mode 100644
index 602ece11bb4a..000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- * Rongrong Zou <zourongrong@huawei.com>
- * Rongrong Zou <zourongrong@gmail.com>
- * Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_print.h>
-
-#include "hibmc_drm_drv.h"
-
-int hibmc_mm_init(struct hibmc_drm_private *hibmc)
-{
- struct drm_vram_mm *vmm;
- int ret;
- struct drm_device *dev = hibmc->dev;
-
- vmm = drm_vram_helper_alloc_mm(dev,
- pci_resource_start(dev->pdev, 0),
- hibmc->fb_size);
- if (IS_ERR(vmm)) {
- ret = PTR_ERR(vmm);
- drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
-{
- if (!hibmc->dev->vram_mm)
- return;
-
- drm_vram_helper_release_mm(hibmc->dev);
-}
-
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
-}
-
-const struct drm_mode_config_funcs hibmc_mode_funcs = {
- .mode_valid = drm_vram_helper_mode_valid,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_gem_fb_create,
-};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 25cd9788a4d5..72a38f28393f 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -19,6 +19,8 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ depends on EXPERT # only for developers
+ depends on !COMPILE_TEST # never built by robots
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
@@ -31,10 +33,13 @@ config DRM_I915_DEBUG
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
+ select DRM_I915_WERROR
+ select DRM_I915_DEBUG_GEM
+ select DRM_I915_DEBUG_GEM_ONCE
+ select DRM_I915_DEBUG_MMIO
+ select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_DEBUG_RUNTIME_PM
- select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -69,6 +74,21 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_DEBUG_GEM_ONCE
+ bool "Make a GEM debug failure fatal"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ During development, we often only want the very first failure
+ as that would otherwise be lost in the deluge of subsequent
+ failures. However, more casual testers may not want to trigger
+ a hard BUG_ON and hope that the system remains sufficiently usable
+ to capture a bug report in situ.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e5574e506a5c..2385a7505f5d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
-subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
@@ -38,6 +37,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
+ i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
@@ -58,6 +58,7 @@ i915-y += i915_drv.o \
# core library code
i915-y += \
+ dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -82,6 +83,7 @@ gt-y += \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
+ gt/gen8_engine_cs.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
@@ -91,6 +93,7 @@ gt-y += \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
+ gt/intel_execlists_submission.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
@@ -106,6 +109,7 @@ gt-y += \
gt/intel_mocs.o \
gt/intel_ppgtt.o \
gt/intel_rc6.o \
+ gt/intel_region_lmem.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ring.o \
@@ -131,6 +135,7 @@ gem-y += \
gem/i915_gem_clflush.o \
gem/i915_gem_client_blt.o \
gem/i915_gem_context.o \
+ gem/i915_gem_create.o \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
@@ -166,7 +171,6 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
- intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -196,13 +200,17 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_crtc.o \
display/intel_csr.o \
+ display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
+ display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
display/intel_fbc.o \
+ display/intel_fdi.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
@@ -214,7 +222,8 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
- display/intel_vga.o
+ display/intel_vga.o \
+ display/i9xx_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -233,6 +242,7 @@ i915-y += \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_dp.o \
+ display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
@@ -246,9 +256,11 @@ i915-y += \
display/intel_lspcon.o \
display/intel_lvds.o \
display/intel_panel.o \
+ display/intel_pps.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
+ display/intel_vrr.o \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
@@ -283,17 +295,9 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
# exclude some broken headers from the test coverage
no-header-test := \
- display/intel_vbt_defs.h \
- gvt/execlist.h \
- gvt/fb_decoder.h \
- gvt/gtt.h \
- gvt/gvt.h \
- gvt/interrupt.h \
- gvt/mmio_context.h \
- gvt/mpt.h \
- gvt/scheduler.h
+ display/intel_vbt_defs.h
-extra-$(CONFIG_DRM_I915_WERROR) += \
+always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 000000000000..e3e69e6cef65
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 dspcntr;
+
+ dspcntr = DISPLAY_PLANE_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dspcntr |= DISPPLANE_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /*
+ * When using an X-tiled surface the plane starts to
+ * misbehave if the x offset + width exceeds the stride.
+ * hsw/bdw: underrun galore
+ * ilk/snb/ivb: wrap to the next tile row mid scanout
+ * i965/g4x: so far appear immune to this
+ * vlv/chv: TODO check
+ *
+ * Linear surfaces seem to work just fine, even on hsw/bdw
+ * despite them not using the linear offset anymore.
+ */
+ if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ u32 alignment = intel_surf_alignment(fb, 0);
+ int cpp = fb->format->cpp[0];
+
+ while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) {
+ if (offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0,
+ offset, offset - alignment);
+ }
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (rotation & DRM_MODE_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_MODE_REFLECT_X) {
+ src_x += src_w - 1;
+ }
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+ } else if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+ return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 linear_offset;
+ int x = plane_state->color_plane[0].x;
+ int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ unsigned long irqflags;
+ u32 dspaddr_offset;
+ u32 dspcntr;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ dspaddr_offset = plane_state->color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
+
+ if (INTEL_GEN(dev_priv) < 4) {
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
+ */
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway so that the crtc state readout works correctly.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ if (async_flip)
+ dspcntr |= DISPPLANE_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 32 * 1024);
+ else
+ return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ else
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
+ } else {
+ formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane->max_stride = i965_plane_max_stride;
+ else
+ plane->max_stride = i9xx_plane_max_stride;
+ } else {
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->max_stride = hsw_primary_max_stride;
+ else
+ plane->max_stride = ilk_primary_max_stride;
+ }
+
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_disable_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c",
+ plane_name(plane->i9xx_plane));
+ if (ret)
+ goto fail;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ } else {
+ supported_rotations = DRM_MODE_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 000000000000..ca963c2a8457
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a9439b415603..9d245a689323 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
vdsc_cfg->convert_rgb = true;
+ /* FIXME: initialize from VBT */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -1616,10 +1619,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
-
- if (crtc_state->dsc.compression_enable)
- intel_display_power_get(i915,
- intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7e9f84b00859..4683f98f7e54 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
int ret;
intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ new_crtc_state->enabled_planes &= ~BIT(plane->id);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -449,7 +452,7 @@ void intel_update_plane(struct intel_plane *plane,
trace_intel_update_plane(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
- plane->async_flip(plane, crtc_state, plane_state);
+ plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_plane(plane, crtc_state, plane_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4cc949b228f2..987cf509337f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
@@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
+ } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->dsc.slice_count);
/*
- * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
- * implementation specific physical rate buffer size. Currently we use
- * the required rate buffer model size calculated in
- * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
- *
* The VBT rc_buffer_block_size and rc_buffer_size definitions
- * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
- * implementation should also use the DPCD (or perhaps VBT for eDP)
- * provided value for the buffer size.
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
*/
+ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+ dsc->rc_buffer_size);
/* FIXME: DSI spec says bpc + 1 for this one */
vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bd060404d249..4b5a30ac84bc 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -20,76 +20,9 @@ struct intel_qgv_point {
struct intel_qgv_info {
struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
- u8 num_channels;
u8 t_bl;
- enum intel_dram_type dram_type;
};
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
- struct intel_qgv_info *qi)
-{
- u32 val = 0;
- int ret;
-
- ret = sandybridge_pcode_read(dev_priv,
- ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
- &val, NULL);
- if (ret)
- return ret;
-
- if (IS_GEN(dev_priv, 12)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- case 4:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 5:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else if (IS_GEN(dev_priv, 11)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else {
- MISSING_CASE(INTEL_GEN(dev_priv));
- qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
- }
-
- qi->num_channels = (val & 0xf0) >> 4;
- qi->num_points = (val & 0xf00) >> 8;
-
- if (IS_GEN(dev_priv, 12))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
- else if (IS_GEN(dev_priv, 11))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
- return 0;
-}
-
static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
- ret = icl_pcode_read_mem_global_info(dev_priv, qi);
- if (ret)
- return ret;
+ qi->num_points = dram_info->num_qgv_points;
+
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels;
+ int num_channels = dev_priv->dram_info.num_channels;
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- num_channels = qi.num_channels;
deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c449d28d0560..2e878cc274b7 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv)
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
*/
- I915_WRITE(PCH_RAWCLK_FREQ,
- CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
return 38400;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 172d398081ee..ff7dcb7088bf 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1485,6 +1485,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
static int ivb_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
int ret;
@@ -1492,6 +1493,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
(crtc_state->hw.gamma_lut ||
crtc_state->hw.degamma_lut) &&
@@ -1525,12 +1533,20 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
static int glk_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index d5ad61e4083e..996ae0608a62 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy))
- drm_warn(&dev_priv->drm,
- "Combo PHY %c HW state changed unexpectedly\n",
- phy_name(phy));
+ !icl_combo_phy_verify_state(dev_priv, phy)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ /*
+ * A known problem with old ifwi:
+ * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+ * Suppress the warning for CI. Remove ASAP!
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ } else {
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ }
+ }
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 406e96785c76..d5ceb7bdc14b 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
}
void
-intel_attach_colorspace_property(struct drm_connector *connector)
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- if (drm_mode_create_hdmi_colorspace_property(connector))
- return;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (drm_mode_create_dp_colorspace_property(connector))
- return;
- break;
- default:
- MISSING_CASE(connector->connector_type);
- return;
- }
+ if (!drm_mode_create_hdmi_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_dp_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..661a37a3c6d8 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 000000000000..8e77ca7ddf11
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+ if (!vblank->max_vblank_count)
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 mode_flags = crtc->mode_flags;
+
+ /*
+ * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * have updated at the beginning of TE, if we want to use
+ * the hw counter, then we would find it updated in only
+ * the next TE, hence switching to sw counter.
+ */
+ if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ return 0;
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (INTEL_GEN(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ assert_vblank_disabled(&crtc->base);
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(intel_crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+ intel_crtc_debugfs_add(crtc);
+ return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources, \
+ .late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
+ int sprite, ret;
+
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(primary->id);
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(plane->id);
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ &primary->base, &cursor->base,
+ funcs, "pipe %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+ dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+ BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ drm_crtc_create_scaling_filter_property(&crtc->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_color_init(crtc);
+
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ return 0;
+
+fail:
+ intel_crtc_free(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 000000000000..08112d557411
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 000000000000..21fe4d2753e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ return base + plane_state->color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_rect src = plane_state->uapi.src;
+ const struct drm_rect dst = plane_state->uapi.dst;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = src;
+ plane_state->uapi.dst = dst;
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ return CURSOR_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = drm_rect_width(&plane_state->uapi.dst);
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ switch (fb->pitches[0]) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
+ size = (height << 12) | width;
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ *pipe = PIPE_A;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 cntl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return cntl;
+
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
+ case 64:
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+ return 0;
+ }
+
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+ cntl |= MCURSOR_ROTATE_180;
+
+ return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
+
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
+
+ return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
+
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
+ *
+ * The other registers are armed by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
+ */
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ if (HAS_CUR_FBC(dev_priv))
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *new_crtc_state;
+ int ret;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ *
+ * FIXME bigjoiner fastpath would be good
+ */
+ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+ crtc_state->update_pipe || crtc_state->bigjoiner)
+ goto slow;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+ goto slow;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
+ goto slow;
+
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+ if (ret)
+ goto out_free;
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ goto out_free;
+
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ plane->base.state = &new_plane_state->uapi;
+
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
+ if (new_plane_state->uapi.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, crtc_state);
+
+ intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+ if (ret)
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+ else
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+ return ret;
+
+slow:
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_plane *cursor;
+ int ret, zpos;
+
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
+
+ cursor->pipe = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+ cursor->id = PLANE_CURSOR;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
+ cursor->update_plane = i845_update_cursor;
+ cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->max_stride = i9xx_cursor_max_stride;
+ cursor->update_plane = i9xx_update_cursor;
+ cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
+
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+ return cursor;
+
+fail:
+ intel_plane_free(cursor);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 000000000000..ce333bf4c2d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 92940a0c5ef8..1bb40ec5fe5d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -46,10 +46,12 @@
#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -611,6 +613,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_11_6;
u32 cri_txdeemph_override_5_0;
@@ -766,6 +796,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
};
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1093,6 +1151,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
} else if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1259,7 +1323,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
} else {
@@ -1267,8 +1334,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
return tgl_combo_phy_ddi_translations_dp_hbr2;
}
} else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
}
}
@@ -2029,9 +2101,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
}
}
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable)
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2046,9 +2118,9 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (enable)
- tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ tmp |= hdcp_mask;
else
- tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ tmp &= ~hdcp_mask;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2285,18 +2357,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(dev_priv, phy))
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
}
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
@@ -2626,15 +2703,11 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else
ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- if (!ddi_translations)
- return;
- if (level >= n_entries) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 1);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2754,14 +2827,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
- }
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
+ level = n_entries - 1;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -2891,9 +2965,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- if (level >= n_entries)
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
@@ -3480,6 +3559,22 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+ enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
+ enable ? "enable" : "disable");
+}
+
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -3507,12 +3602,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
val |= DP_TP_CTL_FEC_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
-
- if (intel_de_wait_for_set(dev_priv,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -3532,6 +3621,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3556,7 +3662,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 2. Enable Panel Power if PPS is required */
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
/*
* 3. For non-TBT Type-C ports, set FIA lane count
@@ -3577,9 +3683,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3621,14 +3729,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
@@ -3643,6 +3744,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -3651,6 +3753,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
/*
* 7.i Follow DisplayPort specification training sequence (see notes for
* failure handling)
@@ -3693,14 +3798,16 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3713,19 +3820,12 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -3778,7 +3878,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3931,13 +4033,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 12)
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
+ intel_pps_vdd_on(intel_dp);
+ intel_pps_off(intel_dp);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
}
@@ -3958,8 +4061,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
@@ -3981,6 +4085,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_disable_pipe(old_crtc_state);
+ intel_vrr_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -4032,8 +4138,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
icl_unmap_plls_to_ports(encoder);
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port),
+ fetch_and_zero(&dig_port->aux_wakeref));
if (is_tc_port)
intel_tc_port_put_link(dig_port);
@@ -4118,6 +4225,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4125,7 +4233,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state, conn_state);
- intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -4206,6 +4317,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
@@ -4227,6 +4340,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!crtc_state->bigjoiner_slave)
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -4240,7 +4355,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -4263,6 +4378,9 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
false);
+ /* Disable Ignore_MSA bit in DP Sink */
+ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
@@ -4368,9 +4486,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
if (is_tc_port)
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
/*
@@ -4583,6 +4704,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 temp, flags = 0;
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4657,9 +4779,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->fec_enable);
}
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ pipe_config->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ else
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -4946,6 +5071,8 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
+ if (dig_port)
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -5099,12 +5226,20 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
enum phy phy = intel_port_to_phy(i915, encoder->port);
bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5262,8 +5397,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
return i915->hti_state & HDPORT_ENABLED &&
- (i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
- i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+ i915->hti_state & HDPORT_DDI_USED(phy);
}
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index dcc711cfe4fe..a4dd815c0000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -50,9 +50,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
u32 ddi_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 53a00cf3fa32..8d7aaa68c6f6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -45,8 +45,10 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
@@ -56,6 +58,9 @@
#include "display/intel_sdvo.h"
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
@@ -67,10 +72,12 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crtc.h"
#include "intel_csr.h"
#include "intel_display_types.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
+#include "intel_fdi.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -79,72 +86,14 @@
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for ivb (no fp16 due to hw issue) */
-static const u32 ivb_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
-};
-
-/* Primary plane formats for gen >= 4, except ivb */
-static const u32 i965_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-/* Primary plane formats for vlv/chv */
-static const u32 vlv_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-static const u64 i9xx_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
+#include "i9xx_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -171,18 +120,6 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
-
-struct intel_limit {
- struct {
- int min, max;
- } dot, vco, n, m, m1, m2, p, p1;
-
- struct {
- int dot_limit;
- int p2_slow, p2_fast;
- } p2;
-};
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -241,281 +178,6 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-/* units of 100MHz */
-static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3},
- .p2 = { .dot_limit = 270000,
- .p2_slow = 10,
- .p2_fast = 10
- },
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
- .dot = { .min = 22000, .max = 400000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 16, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8},
- .p2 = { .dot_limit = 165000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
- .dot = { .min = 20000, .max = 115000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 14, .p2_fast = 14
- },
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
- .dot = { .min = 80000, .max = 224000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 7, .p2_fast = 7
- },
-};
-
-static const struct intel_limit pnv_limits_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
- /* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- /* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit pnv_limits_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit ilk_limits_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 5 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit ilk_limits_single_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 118 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 56 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit ilk_limits_single_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 2 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 270000 * 5 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p1 = { .min = 2, .max = 3 },
- .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4800000, .max = 6480000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- .m2 = { .min = 24 << 22, .max = 175 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
- /* FIXME: find real dot limits */
- .dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6700000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- /* FIXME: find real m2 limits */
- .m2 = { .min = 2 << 22, .max = 255 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -542,12 +204,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct intel_crtc_state *state)
-{
- return drm_atomic_crtc_needs_modeset(&state->uapi);
-}
-
-static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
@@ -566,482 +222,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m2 + 2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
- return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = i9xx_dpll_compute_m(clock);
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
- clock->n << 22);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
- const struct intel_limit *limit,
- const struct dpll *clock)
-{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- return false;
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- return false;
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- return false;
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- return false;
-
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
- if (clock->m1 <= clock->m2)
- return false;
-
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)) {
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- return false;
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- return false;
- }
-
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- return false;
- /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
- * connector, etc., rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- return false;
-
- return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
- const struct intel_crtc_state *crtc_state,
- int target)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev_priv))
- return limit->p2.p2_fast;
- else
- return limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- return limit->p2.p2_slow;
- else
- return limit->p2.p2_fast;
- }
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- if (clock.m2 >= clock.m1)
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int max_n;
- bool found = false;
- /* approximately equals target * 0.00585 */
- int err_most = (target >> 8) + (target >> 9);
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- max_n = limit->n.max;
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirement, prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- for (clock.p1 = limit->p1.max;
- clock.p1 >= limit->p1.min; clock.p1--) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err_most) {
- *best_clock = clock;
- err_most = this_err;
- max_n = clock.n;
- found = true;
- }
- }
- }
- }
- }
- return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const struct dpll *calculated_clock,
- const struct dpll *best_clock,
- unsigned int best_error_ppm,
- unsigned int *error_ppm)
-{
- /*
- * For CHV ignore the error and consider only the P value.
- * Prefer a bigger P value based on HW requirements.
- */
- if (IS_CHERRYVIEW(to_i915(dev))) {
- *error_ppm = 0;
-
- return calculated_clock->p > best_clock->p;
- }
-
- if (drm_WARN_ON_ONCE(dev, !target_freq))
- return false;
-
- *error_ppm = div_u64(1000000ULL *
- abs(target_freq - calculated_clock->dot),
- target_freq);
- /*
- * Prefer a better P value over a better (smaller) error if the error
- * is small. Ensure this preference for future configurations too by
- * setting the error to 0.
- */
- if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
- *error_ppm = 0;
-
- return true;
- }
-
- return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct dpll clock;
- unsigned int bestppm = 1000000;
- /* min update 19.2 MHz */
- int max_n = min(limit->n.max, refclk / 19200);
- bool found = false;
-
- target *= 5; /* fast clock */
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- clock.p = clock.p1 * clock.p2;
- /* based on hardware requirement, prefer bigger m1,m2 values */
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm;
-
- clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
- refclk * clock.m1);
-
- vlv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target,
- &clock,
- best_clock,
- bestppm, &ppm))
- continue;
-
- *best_clock = clock;
- bestppm = ppm;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- unsigned int best_error_ppm;
- struct dpll clock;
- u64 m2;
- int found = false;
-
- memset(best_clock, 0, sizeof(*best_clock));
- best_error_ppm = 1000000;
-
- /*
- * Based on hardware doc, the n always set to 1, and m1 always
- * set to 2. If requires to support 200Mhz refclk, we need to
- * revisit this because n may not 1 anymore.
- */
- clock.n = 1, clock.m1 = 2;
- target *= 5; /* fast clock */
-
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast;
- clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- unsigned int error_ppm;
-
- clock.p = clock.p1 * clock.p2;
-
- m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
- refclk * clock.m1);
-
- if (m2 > INT_MAX/clock.m1)
- continue;
-
- clock.m2 = m2;
-
- chv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
- best_error_ppm, &error_ppm))
- continue;
-
- *best_clock = clock;
- best_error_ppm = error_ppm;
- found = true;
- }
- }
-
- return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_bxt;
-
- return chv_find_best_dpll(limit, crtc_state,
- crtc_state->port_clock, refclk,
- NULL, best_clock);
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1315,12 +495,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
- if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
- drm_crtc_vblank_put(crtc);
-}
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1672,7 +846,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -1683,7 +857,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
@@ -1728,7 +902,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1805,55 +979,6 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
-
- /*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
- * have updated at the beginning of TE, if we want to use
- * the hw counter, then we would find it updated in only
- * the next TE, hence switching to sw counter.
- */
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
- return 0;
-
- /*
- * On i965gm the hardware frame counter reads
- * zero when the TV encoder is enabled :(
- */
- if (IS_I965GM(dev_priv) &&
- (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
- return 0;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return 0xffffffff; /* full 32 bit counter */
- else if (INTEL_GEN(dev_priv) >= 3)
- return 0xffffff; /* only 24 bits of frame count */
- else
- return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- assert_vblank_disabled(&crtc->base);
- drm_crtc_set_max_vblank_count(&crtc->base,
- intel_crtc_max_vblank_count(crtc_state));
- drm_crtc_vblank_on(&crtc->base);
-}
-
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- drm_crtc_vblank_off(&crtc->base);
- assert_vblank_disabled(&crtc->base);
-}
-
void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1968,8 +1093,8 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
static bool is_gen12_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-
}
static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,6 +1102,12 @@ static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
}
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
{
if (is_ccs_modifier(fb->modifier))
@@ -1998,6 +1129,9 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2016,7 +1150,7 @@ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier)
+ u64 modifier)
{
return info->is_yuv &&
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
@@ -2048,6 +1182,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
@@ -2182,8 +1317,13 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
return 0;
}
-static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane)
+static bool has_async_flips(struct drm_i915_private *i915)
+{
+ return INTEL_GEN(i915) >= 5;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
@@ -2196,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (INTEL_GEN(dev_priv) >= 9)
+ if (has_async_flips(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -2204,6 +1344,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return intel_tile_row_size(fb, color_plane);
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -2309,7 +1450,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
@@ -2327,12 +1468,9 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -2452,10 +1590,10 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
* Adjust the tile offset by moving the difference into
* the x/y offsets.
*/
-static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane,
- u32 old_offset, u32 new_offset)
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset)
{
return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
state->hw.rotation,
@@ -2532,9 +1670,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
return offset_aligned;
}
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
@@ -2608,6 +1746,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
@@ -2686,6 +1825,25 @@ static const struct drm_format_info gen12_ccs_formats[] = {
.hsub = 2, .vsub = 2, .is_yuv = true },
};
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2714,6 +1872,10 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return lookup_format_info(gen12_ccs_cc_formats,
+ ARRAY_SIZE(gen12_ccs_cc_formats),
+ cmd->pixel_format);
default:
return NULL;
}
@@ -2722,6 +1884,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -2940,7 +2103,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
int ccs_x, ccs_y;
int main_x, main_y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
@@ -3067,6 +2230,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int x, y;
int ret;
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(fb, i)) {
+ if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
cpp = fb->format->cpp[i];
intel_fb_plane_dims(&width, &height, fb, i);
@@ -3271,7 +2446,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
}
}
-static int
+int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
@@ -3551,7 +2726,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -3561,11 +2736,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
* have been used on the same (or wrong) pipe. plane_mask uses
* unique ids, hence we can use that to reconstruct active_planes.
*/
+ crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->uapi.plane_mask)
+ crtc_state->uapi.plane_mask) {
+ crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+ }
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
@@ -3583,7 +2761,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -3613,12 +2791,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
-static struct intel_frontbuffer *
-to_intel_frontbuffer(struct drm_framebuffer *fb)
-{
- return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
-}
-
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3817,33 +2989,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
return INT_MAX;
}
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- int aux_plane = intel_main_to_aux_plane(fb, 0);
- u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- u32 alignment, offset;
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(fb, 0);
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
@@ -3852,9 +3010,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* main surface offset, and it must be non-negative. Make
* sure that is what we will get.
*/
- if (aux_plane && offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, aux_offset & ~(alignment - 1));
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@@ -3865,18 +3024,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((x + w) * cpp > plane_state->color_plane[0].stride) {
- if (offset == 0) {
+ while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+ if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
}
}
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
@@ -3899,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[0].offset = offset;
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
@@ -3971,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[uv_plane].offset = offset;
plane_state->color_plane[uv_plane].x = x;
plane_state->color_plane[uv_plane].y = y;
@@ -3991,7 +3187,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -4063,422 +3260,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int cpp = fb->format->cpp[0];
-
- /*
- * g4x bspec says 64bpp pixel rate can't exceed 80%
- * of cdclk when the sprite plane is enabled on the
- * same pipe. ilk/snb bspec says 64bpp pixel rate is
- * never allowed to exceed 80% of cdclk. Let's just go
- * with the ilk/snb limit always.
- */
- if (cpp == 8) {
- *num = 10;
- *den = 8;
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- unsigned int pixel_rate;
- unsigned int num, den;
-
- /*
- * Note that crtc_state->pixel_rate accounts for both
- * horizontal and vertical panel fitter downscaling factors.
- * Pre-HSW bspec tells us to only consider the horizontal
- * downscaling factor here. We ignore that and just consider
- * both for simplicity.
- */
- pixel_rate = crtc_state->pixel_rate;
-
- i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock with double wide pipe */
- if (crtc_state->double_wide)
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- if (!HAS_GMCH(dev_priv)) {
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 16*1024;
- else
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 8*1024;
- else
- return 16*1024;
- } else {
- if (plane->i9xx_plane == PLANE_C)
- return 4*1024;
- else
- return 8*1024;
- }
-}
-
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dspcntr = 0;
-
- if (crtc_state->gamma_enable)
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5)
- dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
- return dspcntr;
-}
-
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 dspcntr;
-
- dspcntr = DISPLAY_PLANE_ENABLE;
-
- if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
- IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_XRGB1555:
- dspcntr |= DISPPLANE_BGRX555;
- break;
- case DRM_FORMAT_ARGB1555:
- dspcntr |= DISPPLANE_BGRA555;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRA888;
- break;
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBA888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRA101010;
- break;
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBA101010;
- break;
- case DRM_FORMAT_XBGR16161616F:
- dspcntr |= DISPPLANE_RGBX161616;
- break;
- default:
- MISSING_CASE(fb->format->format);
- return 0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_MODE_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (rotation & DRM_MODE_REFLECT_X)
- dspcntr |= DISPPLANE_MIRROR;
-
- return dspcntr;
-}
-
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x, src_y, src_w;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- /* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
- return -EINVAL;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
- else
- offset = 0;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- if (rotation & DRM_MODE_ROTATE_180) {
- src_x += src_w - 1;
- src_y += src_h - 1;
- } else if (rotation & DRM_MODE_REFLECT_X) {
- src_x += src_w - 1;
- }
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static bool i9xx_plane_has_windowing(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-
- if (IS_CHERRYVIEW(dev_priv))
- return i9xx_plane == PLANE_B;
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return false;
- else if (IS_GEN(dev_priv, 4))
- return i9xx_plane == PLANE_C;
- else
- return i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- int ret;
-
- ret = chv_plane_check_rotation(plane_state);
- if (ret)
- return ret;
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- i9xx_plane_has_windowing(plane));
- if (ret)
- return ret;
-
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 linear_offset;
- int x = plane_state->color_plane[0].x;
- int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- unsigned long irqflags;
- u32 dspaddr_offset;
- u32 dspcntr;
-
- dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->color_plane[0].stride);
-
- if (INTEL_GEN(dev_priv) < 4) {
- /*
- * PLANE_A doesn't actually have a full window
- * generator but let's assume we still need to
- * program whatever is there.
- */
- intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
- (y << 16) | x);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
- linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
- (y << 16) | x);
- }
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
- u32 dspcntr;
-
- /*
- * DSPCNTR pipe gamma enable on g4x+ and pipe csc
- * enable on ilk+ affect the pipe bottom color as
- * well, so we must configure them even if the plane
- * is disabled.
- *
- * On pre-g4x there is no way to gamma correct the
- * pipe bottom color but we'll keep on doing this
- * anyway so that the crtc state readout works correctly.
- */
- dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-4 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- ret = val & DISPLAY_PLANE_ENABLE;
-
- if (INTEL_GEN(dev_priv) >= 5)
- *pipe = plane->pipe;
- else
- *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4647,6 +3428,7 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return PLANE_CTL_TILED_Y |
@@ -4707,9 +3489,6 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
- if (crtc_state->uapi.async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
@@ -4807,6 +3586,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
@@ -4996,532 +3777,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
- udelay(1000);
-
- /* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ilk_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, tries;
-
- /* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
- FDI_LINK_TRAIN_400MV_0DB_SNB_B,
- FDI_LINK_TRAIN_400MV_6DB_SNB_B,
- FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
- FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, retry;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN(dev_priv, 6)) {
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- }
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, j;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
-
- /* Try each vswing and preemphasis setting twice before moving on */
- for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
- /* disable first in case we need to retry */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[j/2];
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(1); /* should be 0.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done, level %i.\n",
- i);
- break;
- }
- udelay(1); /* should be 0.5us */
- }
- if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 fail on vswing %d\n", j / 2);
- continue;
- }
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(2); /* should be 1.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done, level %i.\n",
- i);
- goto train_done;
- }
- udelay(2); /* should be 1.5us */
- }
- if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 fail on vswing %d\n", j / 2);
- }
-
-train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
- }
-}
-
-static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
-static void ilk_fdi_disable(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
@@ -5752,7 +4007,7 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
*/
-static struct intel_encoder *
+struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@@ -6271,7 +4526,7 @@ static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
{
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
return (PS_FILTER_PROGRAMMED |
@@ -6470,7 +4725,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6497,7 +4752,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6550,7 +4805,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
new_crtc_state->active_planes;
}
@@ -6558,7 +4813,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return old_crtc_state->active_planes &&
- (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+ (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
}
static void intel_post_plane_update(struct intel_atomic_state *state,
@@ -6590,41 +4845,72 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
int i;
- for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
- u32 update_mask = new_crtc_state->update_planes;
- u32 plane_ctl, surf_addr;
- enum plane_id plane_id;
- unsigned long irqflags;
- enum pipe pipe;
-
- if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
- continue;
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->enable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->enable_flip_done(plane);
+ }
+}
- plane_id = plane->id;
- pipe = plane->pipe;
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
- surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->disable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->disable_flip_done(plane);
+ }
+}
- plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ bool need_vbl_wait = false;
+ int i;
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (plane->need_async_flip_disable_wa &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id)) {
+ /*
+ * Apart from the async flip bit we want to
+ * preserve the old state for the plane.
+ */
+ plane->async_flip(plane, old_crtc_state,
+ old_plane_state, false);
+ need_vbl_wait = true;
+ }
}
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (need_vbl_wait)
+ intel_wait_for_vblank(i915, crtc->pipe);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -6681,7 +4967,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* If we're doing a modeset we don't need to do any
* pre-vblank watermark programming here.
*/
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/*
* For platforms that support atomic watermarks, program the
* 'intermediate' watermarks immediately. On pre-gen9 platforms, these
@@ -6717,10 +5003,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip &&
- !new_crtc_state->uapi.async_flip &&
- IS_GEN_RANGE(dev_priv, 9, 10))
- skl_disable_async_flip_wa(state, crtc, new_crtc_state);
+ if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ intel_crtc_async_flip_disable_wa(state, crtc);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7140,7 +5424,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -7575,25 +5859,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = crtc->enabled_power_domains;
- crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc_state);
+ domains = get_crtc_power_domains(crtc_state);
- domains = new_domains & ~old_domains;
+ new_domains = domains & ~crtc->enabled_power_domains.mask;
+ old_domains = crtc->enabled_power_domains.mask & ~domains;
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
+ for_each_power_domain(domain, new_domains)
+ intel_display_power_get_in_set(dev_priv,
+ &crtc->enabled_power_domains,
+ domain);
- return old_domains & ~new_domains;
+ return old_domains;
}
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
- u64 domains)
+static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ u64 domains)
{
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
+ intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ &crtc->enabled_power_domains,
+ domains);
}
static void valleyview_crtc_enable(struct intel_atomic_state *state,
@@ -7789,12 +6073,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- enum intel_display_power_domain domain;
struct intel_plane *plane;
struct drm_atomic_state *state;
struct intel_crtc_state *temp_crtc_state;
enum pipe pipe = crtc->pipe;
- u64 domains;
int ret;
if (!crtc_state->hw.active)
@@ -7850,10 +6132,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
intel_update_watermarks(crtc);
intel_disable_shared_dpll(crtc_state);
- domains = crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
- crtc->enabled_power_domains = 0;
+ intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
@@ -7933,143 +6212,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
- return crtc_state->fdi_lanes;
-
- return 0;
-}
-
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_crtc *other_crtc;
- struct intel_crtc_state *other_crtc_state;
-
- drm_dbg_kms(&dev_priv->drm,
- "checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
- return -EINVAL;
- } else {
- return 0;
- }
- }
-
- if (INTEL_NUM_PIPES(dev_priv) == 2)
- return 0;
-
- /* Ivybridge 3 pipe is really complicated */
- switch (pipe) {
- case PIPE_A:
- return 0;
- case PIPE_B:
- if (pipe_config->fdi_lanes <= 2)
- return 0;
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
- return 0;
- case PIPE_C:
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "fdi link B uses too many lanes to enable link C\n");
- return -EINVAL;
- }
- return 0;
- default:
- BUG();
- }
-}
-
-#define RETRY 1
-static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int lane, link_bw, fdi_dotclock, ret;
- bool needs_recompute = false;
-
-retry:
- /* FDI is a binary signal running at ~2.7GHz, encoding
- * each output octet as 10 bits. The actual frequency
- * is stored as a divider into a 100MHz clock, and the
- * mode pixel clock is stored in units of 1KHz.
- * Hence the bw of each lane in terms of the mode signal
- * is:
- */
- link_bw = intel_fdi_link_freq(i915, pipe_config);
-
- fdi_dotclock = adjusted_mode->crtc_clock;
-
- lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
-
- pipe_config->fdi_lanes = lane;
-
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
-
- ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
- if (ret == -EDEADLK)
- return ret;
-
- if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
- pipe_config->pipe_bpp -= 2*3;
- drm_dbg_kms(&i915->drm,
- "fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
- needs_recompute = true;
- pipe_config->bw_constrained = true;
-
- goto retry;
- }
-
- if (needs_recompute)
- return RETRY;
-
- return ret;
-}
-
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8301,19 +6443,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->hw.ctm) {
- /*
- * There is only one pipe CSC unit per pipe, and we need that
- * for output conversion from RGB->YCBCR. So if CTM is already
- * applied we can't support YCBCR420 output.
- */
- drm_dbg_kms(&dev_priv->drm,
- "YCBCR420 and CTM together are not possible\n");
- return -EINVAL;
- }
-
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8425,51 +6554,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
- return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
-}
-
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
pipe)
{
@@ -8594,39 +6678,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
@@ -8886,128 +6937,7 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_disable_pll(dev_priv, pipe);
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
-
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- dpll |= (crtc_state->pixel_multiplier - 1)
- << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock->p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_GEN(dev_priv) >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
- if (crtc_state->sdvo_tv_clock)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock->p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock->p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
-
- /*
- * Bspec:
- * "[Almador Errata}: For the correct operation of the muxed DVO pins
- * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
- * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
- * Enable) must be set to “1†in both the DPLL A Control Register
- * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
- *
- * For simplicity We simply keep both bits always enabled in
- * both DPLLS. The spec says we should disable the DVO 2X clock
- * when not needed, but this seems to work fine in practice.
- */
- if (IS_I830(dev_priv) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
- dpll |= DPLL_DVO_2X_MODE;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
@@ -9207,214 +7137,12 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 48000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i8xx_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
- limit = &intel_limits_i8xx_dvo;
- } else {
- limit = &intel_limits_i8xx_dac;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i8xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- if (intel_is_dual_link_lvds(dev_priv))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else {
- /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &pnv_limits_lvds;
- } else {
- limit = &pnv_limits_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i9xx_lvds;
- } else {
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- chv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- vlv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
@@ -10316,7 +8044,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(pipe), val);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -10424,172 +8152,6 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
-
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
- fp |= FP_CB_TUNE;
-
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
-
- dpll = 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- dpll |= (crtc_state->pixel_multiplier - 1)
- << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /*
- * The high speed IO clock is only really required for
- * SDVO/HDMI/DP, but we also enable it for CRT to make it
- * possible to share the DPLL between CRT and HDMI. Enabling
- * the clock needlessly does no real harm, except use up a
- * bit of power potentially.
- *
- * We'll limit this to IVB with 3 pipes, since it has only two
- * DPLLs and so DPLL sharing is the only way to get three pipes
- * driving PCH ports at the same time. On SNB we could do this,
- * and potentially avoid enabling the second DPLL, but it's not
- * clear if it''s a win or loss power wise. No point in doing
- * this on ILK at all since it has a fixed DPLL<->pipe mapping.
- */
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
- switch (crtc_state->dpll.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_limit *limit;
- int refclk = 120000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!crtc_state->has_pch_encoder)
- return 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
- }
-
- if (intel_is_dual_link_lvds(dev_priv)) {
- if (refclk == 100000)
- limit = &ilk_limits_dual_lvds_100m;
- else
- limit = &ilk_limits_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &ilk_limits_single_lvds_100m;
- else
- limit = &ilk_limits_single_lvds;
- }
- } else {
- limit = &ilk_limits_dac;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- ilk_compute_dpll(crtc, crtc_state, NULL);
-
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
-
- return 0;
-}
-
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
@@ -11002,29 +8564,6 @@ out:
return ret;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- INTEL_GEN(dev_priv) >= 11) {
- struct intel_encoder *encoder =
- intel_get_crtc_new_encoder(state, crtc_state);
-
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
@@ -11226,16 +8765,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
- intel_wakeref_t wf;
u32 tmp;
if (INTEL_GEN(dev_priv) >= 11)
@@ -11306,16 +8842,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -11323,14 +8853,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder;
- intel_wakeref_t wf;
enum port port;
u32 tmp;
@@ -11340,16 +8867,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
/*
* The PLL needs to be enabled with a valid divider
* configuration, otherwise accessing DSI registers will hang
@@ -11432,30 +8953,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
- enum intel_display_power_domain power_domain;
- u64 power_domain_mask;
+ struct intel_display_power_domain_set power_domain_set = { };
bool active;
u32 tmp;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
-
- power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- wakerefs[power_domain] = wf;
- power_domain_mask = BIT_ULL(power_domain);
-
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs);
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -11478,6 +8989,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
}
+ if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ intel_vrr_get_config(crtc, pipe_config);
+
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
@@ -11519,14 +9033,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (wf) {
- wakerefs[power_domain] = wf;
- power_domain_mask |= BIT_ULL(power_domain);
-
+ if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
if (INTEL_GEN(dev_priv) >= 9)
skl_get_pfit_config(pipe_config);
else
@@ -11560,9 +9068,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
out:
- for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv,
- power_domain, wakerefs[power_domain]);
+ intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
return active;
}
@@ -11582,569 +9088,6 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- u32 base;
-
- if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->color_plane[0].offset;
-}
-
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
-{
- int x = plane_state->uapi.dst.x1;
- int y = plane_state->uapi.dst.y1;
- u32 pos = 0;
-
- if (x < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
-
- if (y < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
- pos |= y << CURSOR_Y_SHIFT;
-
- return pos;
-}
-
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- const struct drm_mode_config *config =
- &plane_state->uapi.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- return width > 0 && width <= config->cursor_width &&
- height > 0 && height <= config->cursor_height;
-}
-
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- unsigned int rotation = plane_state->hw.rotation;
- int src_x, src_y;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
-
- if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
- return -EINVAL;
- }
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- offset += (src_h * src_w - 1) * fb->format->cpp[0];
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_rect src = plane_state->uapi.src;
- const struct drm_rect dst = plane_state->uapi.dst;
- int ret;
-
- if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
- return -EINVAL;
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true);
- if (ret)
- return ret;
-
- /* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->uapi.src = src;
- plane_state->uapi.dst = dst;
-
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return 2048;
-}
-
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- u32 cntl = 0;
-
- if (crtc_state->gamma_enable)
- cntl |= CURSOR_GAMMA_ENABLE;
-
- return cntl;
-}
-
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- return CURSOR_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
-
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- int width = drm_rect_width(&plane_state->uapi.dst);
-
- /*
- * 845g/865g are only limited by the width of their cursors,
- * the height is arbitrary up to the precision of the register.
- */
- return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- switch (fb->pitches[0]) {
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
- fb->pitches[0]);
- return -EINVAL;
- }
-
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i845_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 cntl = 0, base = 0, pos = 0, size = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned int width = drm_rect_width(&plane_state->uapi.dst);
- unsigned int height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i845_cursor_ctl_crtc(crtc_state);
-
- size = (height << 12) | width;
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* On these chipsets we can only modify the base/size/stride
- * whilst the cursor is disabled.
- */
- if (plane->cursor.base != base ||
- plane->cursor.size != size ||
- plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE, size);
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
-
- plane->cursor.base = base;
- plane->cursor.size = size;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i845_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i845_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-
- *pipe = PIPE_A;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return plane->base.dev->mode_config.cursor_width * 4;
-}
-
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 cntl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return cntl;
-
- if (crtc_state->gamma_enable)
- cntl = MCURSOR_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
- return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- u32 cntl = 0;
-
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
-
- switch (drm_rect_width(&plane_state->uapi.dst)) {
- case 64:
- cntl |= MCURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= MCURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= MCURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
- return 0;
- }
-
- if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
- cntl |= MCURSOR_ROTATE_180;
-
- return cntl;
-}
-
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- if (!intel_cursor_size_ok(plane_state))
- return false;
-
- /* Cursor width is limited to a few power-of-two sizes */
- switch (width) {
- case 256:
- case 128:
- case 64:
- break;
- default:
- return false;
- }
-
- /*
- * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
- * height from 8 lines up to the cursor width, when the
- * cursor is not rotated. Everything else requires square
- * cursors.
- */
- if (HAS_CUR_FBC(dev_priv) &&
- plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
- if (height < 8 || height > width)
- return false;
- } else {
- if (height != width)
- return false;
- }
-
- return true;
-}
-
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- if (fb->pitches[0] !=
- drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- /*
- * There's something wrong with the cursor on CHV pipe C.
- * If it straddles the left edge of the screen then
- * moving it away from the edge or disabling it often
- * results in a pipe underrun, and often that can lead to
- * dead pipe (constant underrun reported, and it scans
- * out just a solid color). To recover from that, the
- * display power well must be turned off and on again.
- * Refuse the put the cursor into that compromised position.
- */
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
- return -EINVAL;
- }
-
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned width = drm_rect_width(&plane_state->uapi.dst);
- unsigned height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i9xx_cursor_ctl_crtc(crtc_state);
-
- if (width != height)
- fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * On some platforms writing CURCNTR first will also
- * cause CURPOS to be armed by the CURBASE write.
- * Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always update CURCNTR before
- * CURPOS.
- *
- * On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
- * a write to any of the cursor register will cancel
- * an already armed cursor update. Thus leaving out
- * the CURBASE write after CURPOS could lead to a
- * cursor that doesn't appear to move, or even change
- * shape. Thus we always write CURBASE.
- *
- * The other registers are armed by by the CURBASE write
- * except when the plane is getting enabled at which time
- * the CURCNTR write arms the update.
- */
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_write_cursor_wm(plane, crtc_state);
-
- if (!needs_modeset(crtc_state))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
-
- if (plane->cursor.base != base ||
- plane->cursor.size != fbc_ctl ||
- plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
- fbc_ctl);
- intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-
- plane->cursor.base = base;
- plane->cursor.size = fbc_ctl;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i9xx_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-3 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
-
- ret = val & MCURSOR_MODE;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- *pipe = plane->pipe;
- else
- *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
- MCURSOR_PIPE_SELECT_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -12527,33 +9470,6 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
- struct intel_crtc *crtc)
-{
- memset(crtc_state, 0, sizeof(*crtc_state));
-
- __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
-
- crtc_state->cpu_transcoder = INVALID_TRANSCODER;
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
- crtc_state->scaler_state.scaler_id = -1;
- crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
-}
-
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state;
-
- crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
-
- if (crtc_state)
- intel_crtc_state_reset(crtc_state, crtc);
-
- return crtc_state;
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -12594,14 +9510,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
-}
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @cur: current plane state
@@ -12651,7 +9559,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
@@ -12842,6 +9750,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->planar_linked_plane = NULL;
if (plane_state->planar_slave && !plane_state->uapi.visible) {
+ crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -12885,6 +9794,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_slave = true;
linked_state->planar_linked_plane = plane;
+ crtc_state->enabled_planes |= BIT(linked->id);
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
@@ -13013,7 +9923,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
@@ -13301,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len,
}
static const char * const output_format_str[] = {
- [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
@@ -13310,7 +10219,7 @@ static const char * const output_format_str[] = {
static const char *output_formats(enum intel_output_format format)
{
if (format >= ARRAY_SIZE(output_format_str))
- format = INTEL_OUTPUT_FORMAT_INVALID;
+ return "invalid";
return output_format_str[format];
}
@@ -13428,6 +10337,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ yesno(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
@@ -13815,7 +10731,7 @@ encoder_retry:
return ret;
}
- if (ret == RETRY) {
+ if (ret == I915_DISPLAY_CONFIG_RETRY) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -14421,6 +11337,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(mst_master_transcoder);
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
+ PIPE_CONF_CHECK_I(vrr.vmin);
+ PIPE_CONF_CHECK_I(vrr.vmax);
+ PIPE_CONF_CHECK_I(vrr.flipline);
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -14845,7 +11767,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
verify_wm_state(crtc, new_crtc_state);
@@ -14879,10 +11801,17 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode =
+ crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
- drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
@@ -14916,8 +11845,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
if (IS_GEN(dev_priv, 2)) {
int vtotal;
- vtotal = adjusted_mode->crtc_vtotal;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal = adjusted_mode.crtc_vtotal;
+ if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
@@ -14940,7 +11869,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
intel_release_shared_dplls(state, crtc);
@@ -14965,7 +11894,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.active ||
- !needs_modeset(crtc_state))
+ !intel_crtc_needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -14990,7 +11919,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->hw.active ||
- needs_modeset(crtc_state))
+ intel_crtc_needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -15102,6 +12031,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
{
/* See {hsw,vlv,ivb}_plane_ratio() */
@@ -15296,7 +12238,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->hw.enable &&
transcoders & BIT(new_crtc_state->cpu_transcoder) &&
- needs_modeset(new_crtc_state))
+ intel_crtc_needs_modeset(new_crtc_state))
return true;
}
@@ -15317,7 +12259,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
slave = crtc;
master = old_crtc_state->bigjoiner_linked_crtc;
master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
- if (!master_crtc_state || !needs_modeset(master_crtc_state))
+ if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
@@ -15355,21 +12297,16 @@ claimed:
return -EINVAL;
}
-static int kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc_state *master_crtc_state)
{
struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_crtc_state(&state->base,
- master_crtc_state->bigjoiner_linked_crtc);
-
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
- return 0;
}
/**
@@ -15382,7 +12319,7 @@ static int kill_bigjoiner_slave(struct intel_atomic_state *state,
* Async flip can only change the plane surface address, so anything else
* changing is rejected from the intel_atomic_check_async() function.
* Once this check is cleared, flip done interrupt is enabled using
- * the skl_enable_flip_done() function.
+ * the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
* generated and the requested events are sent to the usersapce in the interrupt
@@ -15401,7 +12338,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
return -EINVAL;
}
@@ -15426,7 +12363,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
* this(vlv/chv and icl+) should be added when async flip is
* enabled in the atomic IOCTL path.
*/
- if (plane->id != PLANE_PRIMARY)
+ if (!plane->async_flip)
return -EINVAL;
/*
@@ -15507,20 +12444,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- const struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc_state *linked_crtc_state;
+ struct intel_crtc *linked_crtc;
+ int ret;
if (!crtc_state->bigjoiner)
continue;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
- crtc_state->bigjoiner_linked_crtc);
+ linked_crtc = crtc_state->bigjoiner_linked_crtc;
+ linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
if (IS_ERR(linked_crtc_state))
return PTR_ERR(linked_crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ linked_crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &linked_crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (intel_crtc_needs_modeset(crtc_state) &&
+ crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
+ kill_bigjoiner_slave(state, crtc_state);
}
return 0;
@@ -15547,6 +12507,8 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->uapi.mode_changed = true;
}
+ intel_vrr_check_modeset(state);
+
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
@@ -15557,20 +12519,13 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/* Light copy */
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
continue;
}
- /* Kill old bigjoiner link, we may re-establish afterwards */
- if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
- ret = kill_bigjoiner_slave(state, new_crtc_state);
- if (ret)
- goto fail;
- }
-
if (!new_crtc_state->uapi.enable) {
if (!new_crtc_state->bigjoiner_slave) {
intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
@@ -15595,7 +12550,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
ret = intel_modeset_pipe_config_late(new_crtc_state);
@@ -15617,7 +12572,7 @@ static int intel_atomic_check(struct drm_device *dev,
* forced a full modeset.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
@@ -15640,11 +12595,21 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->update_pipe = false;
}
}
+
+ if (new_crtc_state->bigjoiner) {
+ struct intel_crtc_state *linked_crtc_state =
+ intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
+
+ if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
any_ms = true;
continue;
}
@@ -15670,20 +12635,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /*
- * distrust_bios_wm will force a full dbuf recomputation
- * but the hardware state will only get updated accordingly
- * if state->modeset==true. Hence distrust_bios_wm==true &&
- * state->modeset==false is an invalid combination which
- * would cause the hardware and software dbuf state to get
- * out of sync. We must prevent that.
- *
- * FIXME clean up this mess and introduce better
- * state tracking for dbuf.
- */
- if (dev_priv->wm.distrust_bios_wm)
- any_ms = true;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -15721,12 +12672,12 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- if (!needs_modeset(new_crtc_state) &&
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(new_crtc_state) ?
+ intel_crtc_needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -15758,7 +12709,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
if (mode_changed || crtc_state->update_pipe ||
crtc_state->uapi.color_mgmt_changed) {
@@ -15769,17 +12720,6 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return 0;
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
- return crtc->base.funcs->get_vblank_counter(&crtc->base);
-}
-
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -15849,7 +12789,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/*
* During modesets pipe configuration was programmed as the
@@ -15883,7 +12823,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
return;
intel_crtc_update_active_timings(new_crtc_state);
@@ -15905,7 +12845,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
if (!modeset) {
if (new_crtc_state->preload_luts &&
@@ -15997,7 +12937,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
continue;
if (!old_crtc_state->hw.active)
@@ -16021,7 +12961,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Disable everything else left on */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) ||
+ if (!intel_crtc_needs_modeset(new_crtc_state) ||
(handled & BIT(crtc->pipe)) ||
old_crtc_state->bigjoiner_slave)
continue;
@@ -16071,7 +13011,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
/* ignore allocations for crtc's that have been turned off. */
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
entries[pipe] = old_crtc_state->wm.skl.ddb;
update_pipes |= BIT(pipe);
} else {
@@ -16247,6 +13187,43 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ if (!fb ||
+ fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ continue;
+
+ /*
+ * The layout of the fast clear color value expected by HW
+ * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+ * - 4 x 4 bytes per-channel value
+ * (in surface type specific float/int format provided by the fb user)
+ * - 8 bytes native color value used by the display
+ * (converted/written by GPU during a fast clear operation using the
+ * above per-channel values)
+ *
+ * The commit's FB prepare hook already ensured that FB obj is pinned and the
+ * caller made sure that the object is synced wrt. the related color clear value
+ * GPU write on it.
+ */
+ ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+ fb->offsets[2] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
+ /* The above could only fail if the FB obj has an unexpected backing store type. */
+ drm_WARN_ON(&i915->drm, ret);
+ }
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -16264,9 +13241,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ intel_atomic_prepare_plane_clear_colors(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state) ||
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
@@ -16292,7 +13271,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
@@ -16312,7 +13291,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_enable_flip_done(crtc);
+ intel_crtc_enable_flip_done(state, crtc);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -16337,10 +13316,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_disable_flip_done(crtc);
+ intel_crtc_disable_flip_done(state, crtc);
if (new_crtc_state->hw.active &&
- !needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -16376,8 +13355,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
+ modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -16541,7 +13519,6 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -16620,7 +13597,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -16650,7 +13627,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return 0;
}
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
struct i915_vma *vma;
@@ -16659,15 +13636,6 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_unpin_fb_vma(vma, old_plane_state->flags);
}
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
-{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
- };
-
- i915_gem_object_wait_priority(obj, 0, &attr);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -16684,6 +13652,9 @@ int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
+ };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
@@ -16712,7 +13683,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state)) {
+ if (intel_crtc_needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
@@ -16723,6 +13694,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
i915_fence_timeout(dev_priv),
@@ -16744,7 +13717,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret)
return ret;
- fb_obj_bump_render_priority(obj);
+ i915_gem_object_wait_priority(obj, 0, &attr);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
@@ -16833,540 +13806,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB8888:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- return modifier == DRM_FORMAT_MOD_LINEAR &&
- format == DRM_FORMAT_ARGB8888;
-}
-
-static const struct drm_plane_funcs i965_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i965_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs i8xx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i8xx_plane_format_mod_supported,
-};
-
-static int
-intel_legacy_cursor_update(struct drm_plane *_plane,
- struct drm_crtc *_crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- u32 src_x, u32 src_y,
- u32 src_w, u32 src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_plane_state *new_plane_state;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_crtc_state *new_crtc_state;
- int ret;
-
- /*
- * When crtc is inactive or there is a modeset pending,
- * wait for it to complete in the slowpath
- *
- * FIXME bigjoiner fastpath would be good
- */
- if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner)
- goto slow;
-
- /*
- * Don't do an async update if there is an outstanding commit modifying
- * the plane. This prevents our async update's changes from getting
- * overridden by a previous synchronous update's state.
- */
- if (old_plane_state->uapi.commit &&
- !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
- goto slow;
-
- /*
- * If any parameters change that may affect watermarks,
- * take the slowpath. Only changing fb or position should be
- * in the fastpath.
- */
- if (old_plane_state->uapi.crtc != &crtc->base ||
- old_plane_state->uapi.src_w != src_w ||
- old_plane_state->uapi.src_h != src_h ||
- old_plane_state->uapi.crtc_w != crtc_w ||
- old_plane_state->uapi.crtc_h != crtc_h ||
- !old_plane_state->uapi.fb != !fb)
- goto slow;
-
- new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
- if (!new_plane_state)
- return -ENOMEM;
-
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
- if (!new_crtc_state) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
-
- new_plane_state->uapi.src_x = src_x;
- new_plane_state->uapi.src_y = src_y;
- new_plane_state->uapi.src_w = src_w;
- new_plane_state->uapi.src_h = src_h;
- new_plane_state->uapi.crtc_x = crtc_x;
- new_plane_state->uapi.crtc_y = crtc_y;
- new_plane_state->uapi.crtc_w = crtc_w;
- new_plane_state->uapi.crtc_h = crtc_h;
-
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
-
- ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- old_plane_state, new_plane_state);
- if (ret)
- goto out_free;
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- goto out_free;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
- to_intel_frontbuffer(new_plane_state->hw.fb),
- plane->frontbuffer_bit);
-
- /* Swap plane state */
- plane->base.state = &new_plane_state->uapi;
-
- /*
- * We cannot swap crtc_state as it may be in use by an atomic commit or
- * page flip that's running simultaneously. If we swap crtc_state and
- * destroy the old state, we will cause a use-after-free there.
- *
- * Only update active_planes, which is needed for our internal
- * bookkeeping. Either value will do the right thing when updating
- * planes atomically. If the cursor was part of the atomic update then
- * we would have taken the slowpath.
- */
- crtc_state->active_planes = new_crtc_state->active_planes;
-
- if (new_plane_state->uapi.visible)
- intel_update_plane(plane, crtc_state, new_plane_state);
- else
- intel_disable_plane(plane, crtc_state);
-
- intel_plane_unpin_fb(old_plane_state);
-
-out_free:
- if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
- if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
- else
- intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
- return ret;
-
-slow:
- return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_legacy_cursor_update,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_format_mod_supported,
-};
-
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
- else if (INTEL_GEN(dev_priv) >= 4)
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
- else
- return i9xx_plane == PLANE_A;
-}
-
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *plane;
- const struct drm_plane_funcs *plane_funcs;
- unsigned int supported_rotations;
- const u32 *formats;
- int num_formats;
- int ret, zpos;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- /*
- * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
- * port is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
- plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
- else
- plane->i9xx_plane = (enum i9xx_plane_id) pipe;
- plane->id = PLANE_PRIMARY;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- formats = vlv_primary_formats;
- num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- /*
- * WaFP16GammaEnabling:ivb
- * "Workaround : When using the 64-bit format, the plane
- * output on each color channel has one quarter amplitude.
- * It can be brought up to full amplitude by using pipe
- * gamma correction or pipe color space conversion to
- * multiply the plane output by four."
- *
- * There is no dedicated plane gamma for the primary plane,
- * and using the pipe gamma/csc could conflict with other
- * planes, so we choose not to expose fp16 on IVB primary
- * planes. HSW primary planes no longer have this problem.
- */
- if (IS_IVYBRIDGE(dev_priv)) {
- formats = ivb_primary_formats;
- num_formats = ARRAY_SIZE(ivb_primary_formats);
- } else {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
- }
- } else {
- formats = i8xx_primary_formats;
- num_formats = ARRAY_SIZE(i8xx_primary_formats);
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- plane_funcs = &i965_plane_funcs;
- else
- plane_funcs = &i8xx_plane_funcs;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "primary %c", pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane %c",
- plane_name(plane->i9xx_plane));
- if (ret)
- goto fail;
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- } else {
- supported_rotations = DRM_MODE_ROTATE_0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- zpos = 0;
- drm_plane_create_zpos_immutable_property(&plane->base, zpos);
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_plane *cursor;
- int ret, zpos;
-
- cursor = intel_plane_alloc();
- if (IS_ERR(cursor))
- return cursor;
-
- cursor->pipe = pipe;
- cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
- cursor->id = PLANE_CURSOR;
- cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- cursor->max_stride = i845_cursor_max_stride;
- cursor->update_plane = i845_update_cursor;
- cursor->disable_plane = i845_disable_cursor;
- cursor->get_hw_state = i845_cursor_get_hw_state;
- cursor->check_plane = i845_check_cursor;
- } else {
- cursor->max_stride = i9xx_cursor_max_stride;
- cursor->update_plane = i9xx_update_cursor;
- cursor->disable_plane = i9xx_disable_cursor;
- cursor->get_hw_state = i9xx_cursor_get_hw_state;
- cursor->check_plane = i9xx_check_cursor;
- }
-
- cursor->cursor.base = ~0;
- cursor->cursor.cntl = ~0;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
- cursor->cursor.size = ~0;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- cursor_format_modifiers,
- DRM_PLANE_TYPE_CURSOR,
- "cursor %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&cursor->base,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180);
-
- zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
- drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&cursor->base);
-
- drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
-
- return cursor;
-
-fail:
- intel_plane_free(cursor);
-
- return ERR_PTR(ret);
-}
-
-#define INTEL_CRTC_FUNCS \
- .gamma_set = drm_atomic_helper_legacy_gamma_set, \
- .set_config = drm_atomic_helper_set_config, \
- .destroy = intel_crtc_destroy, \
- .page_flip = drm_atomic_helper_page_flip, \
- .atomic_duplicate_state = intel_crtc_duplicate_state, \
- .atomic_destroy_state = intel_crtc_destroy_state, \
- .set_crc_source = intel_crtc_set_crc_source, \
- .verify_crc_source = intel_crtc_verify_crc_source, \
- .get_crc_sources = intel_crtc_get_crc_sources
-
-static const struct drm_crtc_funcs bdw_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = bdw_enable_vblank,
- .disable_vblank = bdw_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs ilk_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = ilk_enable_vblank,
- .disable_vblank = ilk_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs g4x_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i965_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915gm_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915gm_enable_vblank,
- .disable_vblank = i915gm_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i8xx_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- /* no hw vblank counter */
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static struct intel_crtc *intel_crtc_alloc(void)
-{
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
- if (!crtc)
- return ERR_PTR(-ENOMEM);
-
- crtc_state = intel_crtc_state_alloc(crtc);
- if (!crtc_state) {
- kfree(crtc);
- return ERR_PTR(-ENOMEM);
- }
-
- crtc->base.state = &crtc_state->uapi;
- crtc->config = crtc_state;
-
- return crtc;
-}
-
-static void intel_crtc_free(struct intel_crtc *crtc)
-{
- intel_crtc_destroy_state(&crtc->base, crtc->base.state);
- kfree(crtc);
-}
-
static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
@@ -17379,100 +13818,6 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *primary, *cursor;
- const struct drm_crtc_funcs *funcs;
- struct intel_crtc *crtc;
- int sprite, ret;
-
- crtc = intel_crtc_alloc();
- if (IS_ERR(crtc))
- return PTR_ERR(crtc);
-
- crtc->pipe = pipe;
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
-
- primary = intel_primary_plane_create(dev_priv, pipe);
- if (IS_ERR(primary)) {
- ret = PTR_ERR(primary);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(primary->id);
-
- for_each_sprite(dev_priv, pipe, sprite) {
- struct intel_plane *plane;
-
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(plane->id);
- }
-
- cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (IS_ERR(cursor)) {
- ret = PTR_ERR(cursor);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(cursor->id);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
- funcs = &g4x_crtc_funcs;
- else if (IS_GEN(dev_priv, 4))
- funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- funcs = &i915gm_crtc_funcs;
- else if (IS_GEN(dev_priv, 3))
- funcs = &i915_crtc_funcs;
- else
- funcs = &i8xx_crtc_funcs;
- } else {
- if (INTEL_GEN(dev_priv) >= 8)
- funcs = &bdw_crtc_funcs;
- else
- funcs = &ilk_crtc_funcs;
- }
-
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
- &primary->base, &cursor->base,
- funcs, "pipe %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
- dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
- if (INTEL_GEN(dev_priv) < 9) {
- enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
- }
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_crtc_create_scaling_filter_property(&crtc->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- intel_color_init(crtc);
-
- intel_crtc_crc_init(crtc);
-
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
-
- return 0;
-
-fail:
- intel_crtc_free(crtc);
-
- return ret;
-}
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -17555,48 +13900,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
- int pps_num;
- int pps_idx;
-
- if (HAS_DDI(dev_priv))
- return;
- /*
- * This w/a is needed at least on CPT/PPT, but to be sure apply it
- * everywhere where registers can be write protected.
- */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
-
- for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
- val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
- }
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
- dev_priv->pps_mmio_base = PCH_PPS_BASE;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->pps_mmio_base = VLV_PPS_BASE;
- else
- dev_priv->pps_mmio_base = PPS_BASE;
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_init(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -17997,7 +14306,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- if (is_gen12_ccs_plane(fb, i)) {
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
@@ -18195,86 +14504,40 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_dpll_init_clock_hook(dev_priv);
+
if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- skl_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- ilk_crtc_compute_clock;
dev_priv->display.crtc_enable = ilk_crtc_enable;
dev_priv->display.crtc_disable = ilk_crtc_disable;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_G4X(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN(dev_priv, 2)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- }
+ intel_fdi_init_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- else
+ dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
+ } else {
dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
+ dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
+ }
}
@@ -18282,14 +14545,10 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
-
- dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -18522,8 +14781,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- if (INTEL_GEN(i915) >= 9)
- mode_config->async_page_flip = true;
+ mode_config->async_page_flip = has_async_flips(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -18608,6 +14866,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ i915->framestart_delay = 1; /* 1-4 */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
@@ -18654,6 +14914,8 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
+ intel_pps_setup(i915);
+
intel_gmbus_setup(i915);
drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
@@ -18942,7 +15204,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -18950,7 +15212,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -18963,7 +15225,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
@@ -18972,7 +15234,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
}
@@ -19165,7 +15427,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
}
}
@@ -19588,7 +15850,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (drm_WARN_ON(dev, put_domains))
- modeset_put_power_domains(dev_priv, put_domains);
+ modeset_put_crtc_power_domains(crtc, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 5e0d42d82c11..76f8a805b0a3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -499,6 +499,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -544,7 +546,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -628,11 +629,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -643,7 +642,23 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier);
+ u64 modifier);
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane);
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ca41e8c00ad7..d62b18d5ecd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -18,6 +18,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
}
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+ u64 count;
+ int row;
+
+ count = 0;
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+ count += crtc->debug.vbl.times[row];
+ seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+ if (!count)
+ return;
+
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+ char columns[80] = " |";
+ unsigned int x;
+
+ if (row & 1) {
+ const char *units;
+
+ if (row > 10) {
+ x = 1000000;
+ units = "ms";
+ } else {
+ x = 1000;
+ units = "us";
+ }
+
+ snprintf(columns, sizeof(columns), "%4ld%s |",
+ DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+ }
+
+ if (crtc->debug.vbl.times[row]) {
+ x = ilog2(crtc->debug.vbl.times[row]);
+ memset(columns + 8, '*', x);
+ columns[8 + x] = '\0';
+ }
+
+ seq_printf(m, "%s%s\n", hdr, columns);
+ }
+
+ seq_printf(m, "%sMin update: %lluns\n",
+ hdr, crtc->debug.vbl.min);
+ seq_printf(m, "%sMax update: %lluns\n",
+ hdr, crtc->debug.vbl.max);
+ seq_printf(m, "%sAverage update: %lluns\n",
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
+ seq_printf(m, "%sOverruns > %uus: %u\n",
+ hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+ crtc_updates_info(m, m->private, "");
+ return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_crtc *crtc = m->private;
+
+ /* May race with an update. Meh. */
+ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+ return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_updates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+ debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+ to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
+
+ crtc_updates_info(m, crtc, "\t");
}
static int i915_display_info(struct seq_file *m, void *unused)
@@ -1032,7 +1139,6 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (!dev_priv->ipc_enabled && enable)
drm_info(&dev_priv->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
}
@@ -2048,13 +2154,13 @@ static int i915_panel_show(struct seq_file *m, void *data)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
+ intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
+ intel_dp->pps.panel_power_down_delay);
seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
+ intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
+ intel_dp->pps.backlight_off_delay);
return 0;
}
@@ -2278,3 +2384,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
return 0;
}
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Returns 0 on success, negative error codes on error.
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+ if (!crtc->debugfs_entry)
+ return -ENODEV;
+
+ crtc_updates_add(crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index c922c1745bfe..557901f3eb90 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -7,14 +7,17 @@
#define __INTEL_DISPLAY_DEBUGFS_H__
struct drm_connector;
+struct drm_crtc;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
+int intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fe2d90bba536..c11c37c65d86 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4,7 +4,6 @@
*/
#include "display/intel_crt.h"
-#include "display/intel_dp.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -16,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -936,7 +936,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -1446,7 +1446,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
@@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
}
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
@@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t __maybe_unused wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get_if_enabled(i915, domain);
+ if (!wf)
+ return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+
+ return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+
+ for_each_power_domain(domain, mask) {
+ intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+ intel_display_power_put(i915, domain, wf);
+ power_domain_set->mask &= ~BIT_ULL(domain);
+ }
+}
+
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
@@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ if (!i915->params.disable_power_well) {
+ drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+ i915->power_domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
+ }
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
@@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
@@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
{
struct i915_power_domains *power_domains = &i915->power_domains;
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
+ fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
@@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 4aa0a09cf14f..bc30c479be53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -212,7 +212,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
- intel_wakeref_t wakeref;
+ intel_wakeref_t init_wakeref;
+ intel_wakeref_t disable_wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
@@ -224,6 +225,13 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
+struct intel_display_power_domain_set {
+ u64 mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
for_each_if(BIT_ULL(domain) & (mask))
@@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
@@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915,
__intel_display_power_put_async(i915, domain, wakeref);
}
#else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+
static inline void
intel_display_power_put(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
@@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+}
+
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..184ecbbcec99 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -225,6 +225,17 @@ struct intel_encoder {
const struct drm_connector *audio_connector;
};
+struct intel_panel_bl_funcs {
+ /* Connector and platform specific backlight functions */
+ int (*setup)(struct intel_connector *connector, enum pipe pipe);
+ u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
+ void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+ void (*enable)(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
@@ -241,24 +252,28 @@ struct intel_panel {
bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
+ u32 pwm_level_min;
+ u32 pwm_level_max;
+ bool pwm_enabled;
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
struct pwm_state pwm_state;
/* DPCD backlight */
- u8 pwmgen_bit_count;
+ union {
+ struct {
+ u8 pwmgen_bit_count;
+ } vesa;
+ struct {
+ bool sdr_uses_aux;
+ } intel;
+ } edp;
struct backlight_device *device;
- /* Connector and platform specific backlight functions */
- int (*setup)(struct intel_connector *connector, enum pipe pipe);
- u32 (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, u32 level);
- void (*disable)(const struct drm_connector_state *conn_state);
- void (*enable)(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+ const struct intel_panel_bl_funcs *funcs;
+ const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -339,6 +354,10 @@ struct intel_hdcp_shim {
enum transcoder cpu_transcoder,
bool enable);
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* Ensures the link is still protected */
bool (*check_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
@@ -370,8 +389,13 @@ struct intel_hdcp_shim {
int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
};
struct intel_hdcp {
@@ -398,7 +422,6 @@ struct intel_hdcp {
* content can flow only through a link protected by HDCP2.2.
*/
u8 content_type;
- struct hdcp_port_data port_data;
bool is_paired;
bool is_repeater;
@@ -432,6 +455,8 @@ struct intel_hdcp {
* Hence caching the transcoder here.
*/
enum transcoder cpu_transcoder;
+ /* Only used for DP MST stream encryption */
+ enum transcoder stream_transcoder;
};
struct intel_connector {
@@ -531,7 +556,7 @@ struct intel_plane_state {
struct drm_framebuffer *fb;
u16 alpha;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
unsigned int rotation;
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
@@ -604,6 +629,11 @@ struct intel_plane_state {
u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
+
+ struct drm_rect psr2_sel_fetch_area;
+
+ /* Clear Color Value */
+ u64 ccval;
};
struct intel_initial_plane_config {
@@ -663,6 +693,8 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
struct intel_wm_level {
bool enable;
@@ -798,7 +830,6 @@ struct intel_crtc_wm_state {
};
enum intel_output_format {
- INTEL_OUTPUT_FORMAT_INVALID,
INTEL_OUTPUT_FORMAT_RGB,
INTEL_OUTPUT_FORMAT_YCBCR420,
INTEL_OUTPUT_FORMAT_YCBCR444,
@@ -1047,7 +1078,10 @@ struct intel_crtc_state {
u32 cgm_mode;
};
- /* bitmask of visible planes (enum plane_id) */
+ /* bitmask of logically enabled planes (enum plane_id) */
+ u8 enabled_planes;
+
+ /* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1118,6 +1152,13 @@ struct intel_crtc_state {
struct intel_dsb *dsb;
u32 psr2_man_track_ctl;
+
+ /* Variable Refresh Rate state */
+ struct {
+ bool enable;
+ u8 pipeline_full;
+ u16 flipline, vmin, vmax;
+ } vrr;
};
enum intel_pipe_crc_source {
@@ -1160,7 +1201,9 @@ struct intel_crtc {
/* I915_MODE_FLAG_* */
u8 mode_flags;
- unsigned long long enabled_power_domains;
+ u16 vmax_vblank_start;
+
+ struct intel_display_power_domain_set enabled_power_domains;
struct intel_overlay *overlay;
struct intel_crtc_state *config;
@@ -1186,6 +1229,15 @@ struct intel_crtc {
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ struct {
+ u64 min;
+ u64 max;
+ u64 sum;
+ unsigned int over;
+ unsigned int times[17]; /* [1us, 16ms] */
+ } vbl;
+#endif
} debug;
/* scalers available on this crtc */
@@ -1203,6 +1255,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
+ bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
struct {
@@ -1239,7 +1292,10 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+ const struct intel_plane_state *plane_state,
+ bool async_flip);
+ void (*enable_flip_done)(struct intel_plane *plane);
+ void (*disable_flip_done)(struct intel_plane *plane);
};
struct intel_watermark_params {
@@ -1321,6 +1377,43 @@ struct intel_dp_compliance {
u8 test_lane_count;
};
+struct intel_dp_pcon_frl {
+ bool is_trained;
+ int trained_rate_gbps;
+};
+
+struct intel_pps {
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
+ intel_wakeref_t vdd_wakeref;
+
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+ /*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
+ struct edp_power_seq pps_delays;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1331,6 +1424,7 @@ struct intel_dp {
bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
+ bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1339,6 +1433,7 @@ struct intel_dp {
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
u8 fec_capable;
+ u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1355,38 +1450,11 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
- u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- unsigned long last_power_on;
- unsigned long last_backlight_off;
- ktime_t panel_power_off_time;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
- /*
- * Pipe currently driving the port. Used for preventing
- * the use of the PPS for any pipe currentrly driving
- * external DP as that will mess things up on VLV.
- */
- enum pipe active_pipe;
- /*
- * Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
- */
- bool pps_reset;
- struct edp_power_seq pps_delays;
+ struct intel_pps pps;
bool can_mst; /* this port supports mst */
bool is_mst;
@@ -1432,15 +1500,22 @@ struct intel_dp {
struct {
int min_tmds_clock, max_tmds_clock;
int max_dotclock;
+ int pcon_max_frl_bw;
u8 max_bpc;
bool ycbcr_444_to_420;
+ bool rgb_to_ycbcr;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
bool hobl_failed;
bool hobl_active;
+
+ struct intel_dp_pcon_frl frl;
};
enum lspcon_vendor {
@@ -1450,6 +1525,7 @@ enum lspcon_vendor {
struct intel_lspcon {
bool active;
+ bool hdr_supported;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
};
@@ -1466,6 +1542,8 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ intel_wakeref_t ddi_io_wakeref;
+ intel_wakeref_t aux_wakeref;
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
int tc_link_refcount;
@@ -1475,10 +1553,14 @@ struct intel_digital_port {
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
- /* protects num_hdcp_streams reference count */
+ /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
struct mutex hdcp_mutex;
/* the number of pipes using HDCP signalling out of this port */
unsigned int num_hdcp_streams;
+ /* port HDCP auth status */
+ bool hdcp_auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data hdcp_port_data;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1755,6 +1837,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+ return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1777,4 +1865,32 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return dev_priv->fdi_pll_freq;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..8c12d5375607 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -41,13 +41,13 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -58,10 +58,12 @@
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#define DP_DPRX_ESI_LEN 14
@@ -121,6 +123,11 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
@@ -145,12 +152,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
@@ -162,8 +163,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
int i, max_rate;
int max_lttpr_rate;
- if (drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
static const int quirk_rates[] = { 162000, 270000, 324000 };
@@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return -1;
}
+ if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with max parameters\n");
+ intel_dp->use_max_params = true;
+ return 0;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector,
!drm_mode_is_420_only(info, mode))
return INTEL_OUTPUT_FORMAT_RGB;
+ if (intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
if (intel_dp->dfp.ycbcr_444_to_420)
return INTEL_OUTPUT_FORMAT_YCBCR444;
else
@@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
const struct drm_display_info *info = &connector->base.display_info;
int tmds_clock;
+ /* If PCON supports FRL MODE, check FRL bandwidth constraints */
+ if (intel_dp->dfp.pcon_max_frl_bw) {
+ int target_bw;
+ int max_frl_bw;
+ int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+
+ target_bw = bpp * target_clock;
+
+ max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+ /* converting bw from Gbps to Kbps*/
+ max_frl_bw = max_frl_bw * 1000000;
+
+ if (target_bw > max_frl_bw)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+ }
+
if (intel_dp->dfp.max_dotclock &&
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
@@ -833,1125 +863,6 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
-{
- int i;
- u32 v = 0;
-
- if (src_bytes > 4)
- src_bytes = 4;
- for (i = 0; i < src_bytes; i++)
- v |= ((u32)src[i]) << ((3 - i) * 8);
- return v;
-}
-
-static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
-{
- int i;
- if (dst_bytes > 4)
- dst_bytes = 4;
- for (i = 0; i < dst_bytes; i++)
- dst[i] = src >> ((3-i) * 8);
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd);
-static void
-intel_dp_pps_init(struct intel_dp *intel_dp);
-
-static intel_wakeref_t
-pps_lock(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * See intel_power_sequencer_reset() why we need
- * a power domain reference here.
- */
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
-
- mutex_lock(&dev_priv->pps_mutex);
-
- return wakeref;
-}
-
-static intel_wakeref_t
-pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)),
- wakeref);
- return 0;
-}
-
-#define with_pps_lock(dp, wf) \
- for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
-
-static void
-vlv_power_sequencer_kick(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps_pipe;
- bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
- enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- u32 DP;
-
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
- DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- DP |= DP_PORT_WIDTH(1);
- DP |= DP_LINK_TRAIN_PAT_1;
-
- if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SEL_CHV(pipe);
- else
- DP |= DP_PIPE_SEL(pipe);
-
- pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
-
- /*
- * The DPLL for the pipe must be enabled for this to work.
- * So enable temporarily it if it's not already enabled.
- */
- if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev_priv) &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
-
- if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
- return;
- }
- }
-
- /*
- * Similar magic as in intel_dp_enable_port().
- * We _must_ do this port enable + disable trick
- * to make this power sequencer lock onto the port.
- * Otherwise even VDD force bit won't work.
- */
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
-
- if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
- }
-}
-
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe !=
- intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps_pipe);
- } else {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->pps_pipe != INVALID_PIPE);
-
- if (intel_dp->active_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->active_pipe);
- }
- }
-
- if (pipes == 0)
- return INVALID_PIPE;
-
- return ffs(pipes) - 1;
-}
-
-static enum pipe
-vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- return intel_dp->pps_pipe;
-
- pipe = vlv_find_free_pps(dev_priv);
-
- /*
- * Didn't find one. This should not happen since there
- * are two power sequencers and up to two eDP ports.
- */
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
- pipe = PIPE_A;
-
- vlv_steal_power_sequencer(dev_priv, pipe);
- intel_dp->pps_pipe = pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-
- /*
- * Even vdd force doesn't work until we've made
- * the power sequencer lock in on the port.
- */
- vlv_power_sequencer_kick(intel_dp);
-
- return intel_dp->pps_pipe;
-}
-
-static int
-bxt_power_sequencer_idx(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- if (!intel_dp->pps_reset)
- return backlight_controller;
-
- intel_dp->pps_reset = false;
-
- /*
- * Only the HW needs to be reprogrammed, the SW state is fixed and
- * has been setup during connector init.
- */
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-
- return backlight_controller;
-}
-
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
-}
-
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
-}
-
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return true;
-}
-
-static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
-{
- enum pipe pipe;
-
- for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
- PANEL_PORT_SELECT_MASK;
-
- if (port_sel != PANEL_PORT_SELECT_VLV(port))
- continue;
-
- if (!pipe_check(dev_priv, pipe))
- continue;
-
- return pipe;
- }
-
- return INVALID_PIPE;
-}
-
-static void
-vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum port port = dig_port->base.port;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* try to find a pipe with this port selected */
- /* first pick one where the panel is on */
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
- /* didn't find one? pick one where vdd is on */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
- /* didn't find one? pick one with just the correct port */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
-
- /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
- if (intel_dp->pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- return;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
-
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-}
-
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !(IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_GEN9_LP(dev_priv))))
- return;
-
- /*
- * We can't grab pps_mutex here due to deadlock with power_domain
- * mutex when power_domain functions are called while holding pps_mutex.
- * That also means that in order to use pps_pipe the code needs to
- * hold both a power domain reference and pps_mutex, and the power domain
- * reference get/put must be done while _not_ holding pps_mutex.
- * pps_{lock,unlock}() do these steps in the correct order, so one
- * should use them always.
- */
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE);
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- if (IS_GEN9_LP(dev_priv))
- intel_dp->pps_reset = true;
- else
- intel_dp->pps_pipe = INVALID_PIPE;
- }
-}
-
-struct pps_registers {
- i915_reg_t pp_ctrl;
- i915_reg_t pp_stat;
- i915_reg_t pp_on;
- i915_reg_t pp_off;
- i915_reg_t pp_div;
-};
-
-static void intel_pps_get_registers(struct intel_dp *intel_dp,
- struct pps_registers *regs)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
-
- memset(regs, 0, sizeof(*regs));
-
- if (IS_GEN9_LP(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_idx = vlv_power_sequencer_pipe(intel_dp);
-
- regs->pp_ctrl = PP_CONTROL(pps_idx);
- regs->pp_stat = PP_STATUS(pps_idx);
- regs->pp_on = PP_ON_DELAYS(pps_idx);
- regs->pp_off = PP_OFF_DELAYS(pps_idx);
-
- /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- regs->pp_div = INVALID_MMIO_REG;
- else
- regs->pp_div = PP_DIVISOR(pps_idx);
-}
-
-static i915_reg_t
-_pp_ctrl_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_ctrl;
-}
-
-static i915_reg_t
-_pp_stat_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_stat;
-}
-
-static bool edp_have_panel_power(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
-}
-
-static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
- }
-}
-
-static u32
-intel_dp_aux_wait_done(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- const unsigned int timeout_ms = 10;
- u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
-
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (!done)
- drm_err(&i915->drm,
- "%s: did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
-#undef C
-
- return status;
-}
-
-static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2000 and use that
- */
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
-}
-
-static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 freq;
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the cdclk or PCH rawclk, and would
- * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
- * divide by 2000 and use that
- */
- if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
- else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
- return DIV_ROUND_CLOSEST(freq, 2000);
-}
-
-static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
- /* Workaround for non-ULT HSW */
- switch (index) {
- case 0: return 63;
- case 1: return 72;
- default: return 0;
- }
- }
-
- return ilk_get_aux_clock_divider(intel_dp, index);
-}
-
-static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- /*
- * SKL doesn't need us to program the AUX clock divider (Hardware will
- * derive the clock from CDCLK automatically). We still implement the
- * get_aux_clock_divider vfunc to plug-in into the existing code.
- */
- return index ? 0 : 1;
-}
-
-static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 aux_clock_divider)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- u32 precharge, timeout;
-
- if (IS_GEN(dev_priv, 6))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev_priv))
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
- return DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- timeout |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
-}
-
-static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 unused)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- u32 ret;
-
- ret = DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_MAX |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
-
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
- ret |= DP_AUX_CH_CTL_TBT_IO;
-
- return ret;
-}
-
-static int
-intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const u8 *send, int send_bytes,
- u8 *recv, int recv_size,
- u32 aux_send_ctl_flags)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
- i915_reg_t ch_ctl, ch_data[5];
- u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain;
- intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
- int i, ret, recv_bytes;
- int try, clock = 0;
- u32 status;
- bool vdd;
-
- ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- for (i = 0; i < ARRAY_SIZE(ch_data); i++)
- ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
-
- if (is_tc_port)
- intel_tc_port_lock(dig_port);
-
- aux_domain = intel_aux_power_domain(dig_port);
-
- aux_wakeref = intel_display_power_get(i915, aux_domain);
- pps_wakeref = pps_lock(intel_dp);
-
- /*
- * We will be called with VDD already enabled for dpcd/edid/oui reads.
- * In such cases we want to leave VDD enabled and it's up to upper layers
- * to turn it off. But for eg. i2c-dev access we need to turn it on/off
- * ourselves.
- */
- vdd = edp_panel_vdd_on(intel_dp);
-
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
-
- /* Try to wait for any previous AUX channel activity */
- for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- msleep(1);
- }
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
-
- if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
- "%s: not started (status 0x%08x)\n",
- intel_dp->aux.name, status);
- intel_dp->aux_busy_last_status = status;
- }
-
- ret = -EBUSY;
- goto out;
- }
-
- /* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
- ret = -E2BIG;
- goto out;
- }
-
- while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- send_bytes,
- aux_clock_divider);
-
- send_ctl |= aux_send_ctl_flags;
-
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
-
- /* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
-
- status = intel_dp_aux_wait_done(intel_dp);
-
- /* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
- * 400us delay required for errors and timeouts
- * Timeout errors from the HW already meet this
- * requirement so skip to next iteration
- */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- usleep_range(400, 500);
- continue;
- }
- if (status & DP_AUX_CH_CTL_DONE)
- goto done;
- }
- }
-
- if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EBUSY;
- goto out;
- }
-
-done:
- /* Check for timeout or receive error.
- * Timeouts occur when the sink is not connected
- */
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EIO;
- goto out;
- }
-
- /* Timeouts occur when the device isn't connected, so they're
- * "normal" -- don't fill the kernel log with these */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- /* Unload any bytes sent back from the other side */
- recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
- DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
- /*
- * By BSpec: "Message sizes of 0 or >20 are not allowed."
- * We have no idea of what happened so we return -EBUSY so
- * drm layer takes care for the necessary retries.
- */
- if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
- "%s: Forbidden recv_bytes = %d on aux transaction\n",
- intel_dp->aux.name, recv_bytes);
- ret = -EBUSY;
- goto out;
- }
-
- if (recv_bytes > recv_size)
- recv_bytes = recv_size;
-
- for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
- recv + i, recv_bytes - i);
-
- ret = recv_bytes;
-out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
-
- if (vdd)
- edp_panel_vdd_off(intel_dp, false);
-
- pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
-
- return ret;
-}
-
-#define BARE_ADDRESS_SIZE 3
-#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
-
-static void
-intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
- const struct drm_dp_aux_msg *msg)
-{
- txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
-}
-
-static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
-{
- /*
- * If we're trying to send the HDCP Aksv, we need to set a the Aksv
- * select bit to inform the hardware to send the Aksv after our header
- * since we can't access that data from software.
- */
- if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
- msg->address == DP_AUX_HDCP_AKSV)
- return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
-
- return 0;
-}
-
-static ssize_t
-intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 txbuf[20], rxbuf[20];
- size_t txsize, rxsize;
- u32 flags = intel_dp_aux_xfer_flags(msg);
- int ret;
-
- intel_dp_aux_header(txbuf, msg);
-
- switch (msg->request & ~DP_AUX_I2C_MOT) {
- case DP_AUX_NATIVE_WRITE:
- case DP_AUX_I2C_WRITE:
- case DP_AUX_I2C_WRITE_STATUS_UPDATE:
- txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 2; /* 0 or 1 data bytes */
-
- if (drm_WARN_ON(&i915->drm, txsize > 20))
- return -E2BIG;
-
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
-
- if (msg->buffer)
- memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
-
- if (ret > 1) {
- /* Number of bytes written in a short write. */
- ret = clamp_t(int, rxbuf[1], 0, msg->size);
- } else {
- /* Return payload size. */
- ret = msg->size;
- }
- }
- break;
-
- case DP_AUX_NATIVE_READ:
- case DP_AUX_I2C_READ:
- txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
- rxsize = msg->size + 1;
-
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
- return -E2BIG;
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
- /*
- * Assume happy day, and copy the data. The caller is
- * expected to check msg->reply before touching it.
- *
- * Return payload size.
- */
- ret--;
- memcpy(msg->buffer, rxbuf + 1, ret);
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_B);
- }
-}
-
-static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_B, index);
- }
-}
-
-static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_CTL(aux_ch);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_DATA(aux_ch, index);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static void
-intel_dp_aux_fini(struct intel_dp *intel_dp)
-{
- kfree(intel_dp->aux.name);
-}
-
-static void
-intel_dp_aux_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else {
- intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
- }
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
- drm_dp_aux_init(&intel_dp->aux);
-
- /* Failure to allocate our preferred name is not critical */
- if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
- aux_ch - AUX_CH_USBC1 + '1',
- encoder->base.name);
- else
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
- aux_ch_name(aux_ch),
- encoder->base.name);
-
- intel_dp->aux.transfer = intel_dp_aux_transfer;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
@@ -2263,6 +1174,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static int
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ output_bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -2289,6 +1238,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
u8 line_buf_depth;
int ret;
+ /*
+ * RC_MODEL_SIZE is currently a constant across all configurations.
+ *
+ * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+ * DP_DSC_RC_BUF_SIZE for this.
+ */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -2478,13 +1435,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp->use_max_params) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
+ * advertizes being capable of in case the initial fast
+ * optimal params failed us. The panels are generally
* designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * configuration, and typically on older panels these
+ * values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2503,11 +1461,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we fall back to the max clock and lane count for eDP
+ * panels that fail with the fast optimal settings (see
+ * intel_dp->use_max_params), in which case the fast vs. wide
+ * choice doesn't matter.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2756,6 +1725,9 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ if (pipe_config->vrr.enable)
+ return;
+
/*
* DRRS and PSR can't be enable together, so giving preference to PSR
* as it allows more power-savings by complete shutting down display,
@@ -2788,8 +1760,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
@@ -2859,6 +1830,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config);
intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
constant_n);
@@ -2959,427 +1931,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
}
}
-#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
-#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-
-#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void intel_pps_verify_state(struct intel_dp *intel_dp);
-
-static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_verify_state(intel_dp);
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
- mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
- drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
-}
-
-static void wait_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
- wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void wait_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
- wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
-
- /* take the difference of currrent time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
- panel_power_on_time = ktime_get_boottime();
- panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
-
- /* When we disable the VDD override bit last we have to do the manual
- * wait. */
- if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->panel_power_cycle_delay - panel_power_off_duration);
-
- wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-static void wait_backlight_on(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
- intel_dp->backlight_on_delay);
-}
-
-static void edp_wait_backlight_off(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
- intel_dp->backlight_off_delay);
-}
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 control;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
- }
- return control;
-}
-
-/*
- * Must be paired with edp_panel_vdd_off().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
- bool need_to_disable = !intel_dp->want_panel_vdd;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return false;
-
- cancel_delayed_work(&intel_dp->panel_vdd_work);
- intel_dp->want_panel_vdd = true;
-
- if (edp_have_panel_vdd(intel_dp))
- return need_to_disable;
-
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dig_port));
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- if (!edp_have_panel_power(intel_dp))
- wait_panel_power_cycle(intel_dp);
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_FORCE_VDD;
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
- /*
- * If the panel wasn't on, delay before accessing aux channel
- */
- if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- msleep(intel_dp->panel_power_up_delay);
- }
-
- return need_to_disable;
-}
-
-/*
- * Must be paired with intel_edp_panel_vdd_off() or
- * intel_edp_panel_off().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool vdd;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- vdd = false;
- with_pps_lock(intel_dp, wakeref)
- vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-}
-
-static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_FORCE_VDD;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp_stat_reg = _pp_stat_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- /* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if ((pp & PANEL_POWER_ON) == 0)
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(dig_port));
-}
-
-static void edp_panel_vdd_work(struct work_struct *__work)
-{
- struct intel_dp *intel_dp =
- container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
- intel_wakeref_t wakeref;
-
- with_pps_lock(intel_dp, wakeref) {
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- }
-}
-
-static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
-{
- unsigned long delay;
-
- /*
- * Queue the timer to fire a long time from now (relative to the power
- * down delay) to keep the panel power up across a sequence of
- * operations.
- */
- delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
-}
-
-/*
- * Must be paired with edp_panel_vdd_on().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- intel_dp->want_panel_vdd = false;
-
- if (sync)
- edp_panel_vdd_off_sync(intel_dp);
- else
- edp_panel_vdd_schedule_off(intel_dp);
-}
-
-static void edp_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
- return;
-
- wait_panel_power_cycle(intel_dp);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ilk_get_pp_control(intel_dp);
- if (IS_GEN(dev_priv, 5)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- pp |= PANEL_POWER_ON;
- if (!IS_GEN(dev_priv, 5))
- pp |= PANEL_POWER_RESET;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_on(intel_dp);
- intel_dp->last_power_on = jiffies;
-
- if (IS_GEN(dev_priv, 5)) {
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_on(intel_dp);
-}
-
-
-static void edp_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- /* We need to switch off panel power _and_ force vdd, for otherwise some
- * panels get very unhappy and cease to work. */
- pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
- EDP_BLC_ENABLE);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_dp->want_panel_vdd = false;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_off(intel_dp);
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- /* We got a reference when we enabled the VDD. */
- intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
-}
-
-void intel_edp_panel_off(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_off(intel_dp);
-}
-
-/* Enable backlight in the panel power control. */
-static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * If we enable the backlight right away following a panel power
- * on, we may see slight flicker as the panel syncs with the eDP
- * link. So delay a bit to make sure the image is solid before
- * allowing it to appear.
- */
- wait_backlight_on(intel_dp);
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -3394,31 +1945,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
- _intel_edp_backlight_on(intel_dp);
-}
-
-/* Disable backlight in the panel power control. */
-static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- intel_dp->last_backlight_off = jiffies;
- edp_wait_backlight_off(intel_dp);
+ intel_pps_backlight_on(intel_dp);
}
/* Disable backlight PP control and backlight PWM. */
@@ -3432,37 +1959,10 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
- _intel_edp_backlight_off(intel_dp);
+ intel_pps_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
}
-/*
- * Hook for controlling the panel power control backlight through the bl_power
- * sysfs attribute. Take care to handle multiple calls.
- */
-static void intel_edp_backlight_power(struct intel_connector *connector,
- bool enable)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- is_enabled = false;
- with_pps_lock(intel_dp, wakeref)
- is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- if (is_enabled == enable)
- return;
-
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
- enable ? "enable" : "disable");
-
- if (enable)
- _intel_edp_backlight_on(intel_dp);
- else
- _intel_edp_backlight_off(intel_dp);
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -3579,6 +2079,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
enable ? "enable" : "disable");
}
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 oui[] = { 0x00, 0xaa, 0x01 };
+ u8 buf[3] = { 0 };
+
+ /*
+ * During driver init, we want to be careful and avoid changing the source OUI if it's
+ * already set to what we want, so as to avoid clearing any state by accident
+ */
+ if (careful) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_err(&i915->drm, "Failed to read source OUI\n");
+
+ if (memcmp(oui, buf, sizeof(oui)) == 0)
+ return;
+ }
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+ drm_err(&i915->drm, "Failed to write source OUI\n");
+}
+
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
@@ -3600,6 +2123,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
lspcon_resume(dp_to_dig_port(intel_dp));
+ /* Write the source OUI as early as possible */
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_init_source_oui(intel_dp, false);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -3856,10 +2383,12 @@ static void intel_disable_dp(struct intel_atomic_state *state,
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- intel_edp_panel_vdd_on(intel_dp);
+ intel_pps_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- intel_edp_panel_off(intel_dp);
+ intel_pps_off(intel_dp);
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
}
static void g4x_disable_dp(struct intel_atomic_state *state,
@@ -3955,6 +2484,280 @@ cpt_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* Clear the cached register set to avoid using stale values */
+
+ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+ intel_dp->pcon_dsc_dpcd,
+ sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+ drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_PCON_DSC_ENCODER);
+
+ drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+ int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ int i;
+
+ for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+ if (frl_bw_mask & (1 << i))
+ return bw_gbps[i];
+ }
+ return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+ switch (max_frl) {
+ case 48:
+ return DP_PCON_FRL_BW_MASK_48GBPS;
+ case 40:
+ return DP_PCON_FRL_BW_MASK_40GBPS;
+ case 32:
+ return DP_PCON_FRL_BW_MASK_32GBPS;
+ case 24:
+ return DP_PCON_FRL_BW_MASK_24GBPS;
+ case 18:
+ return DP_PCON_FRL_BW_MASK_18GBPS;
+ case 9:
+ return DP_PCON_FRL_BW_MASK_9GBPS;
+ }
+
+ return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int max_frl_rate;
+ int max_lanes, rate_per_lane;
+ int max_dsc_lanes, dsc_rate_per_lane;
+
+ max_lanes = connector->display_info.hdmi.max_lanes;
+ rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_frl_rate = max_lanes * rate_per_lane;
+
+ if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (max_dsc_lanes && dsc_rate_per_lane)
+ max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+ }
+
+ return max_frl_rate;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
+#define PCON_CONCURRENT_MODE (1 > 0)
+#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
+#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+ u8 max_frl_bw_mask = 0, frl_trained_mask;
+ bool is_active;
+
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+
+ max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+ drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+ max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+ drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+ max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+ if (max_frl_bw <= 0)
+ return -EINVAL;
+
+ ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+ if (ret < 0)
+ return ret;
+ /* Wait for PCON to be FRL Ready */
+ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+ /*
+ * Wait for FRL to be completed
+ * Check if the HDMI Link is up and active.
+ */
+ wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ /* Verify HDMI Link configuration shows FRL Mode */
+ if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
+ DP_PCON_HDMI_MODE_FRL) {
+ drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
+ return -EINVAL;
+ }
+ drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
+
+ intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+ intel_dp->frl.is_trained = true;
+ drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+ return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+ if (drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->has_hdmi_sink &&
+ intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+ return true;
+
+ return false;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* Always go for FRL training if supported */
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ intel_dp->frl.is_trained)
+ return;
+
+ if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+ int ret, mode;
+
+ drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+ if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+ drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ } else {
+ drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ }
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+ int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+ return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+ int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+ return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+ pcon_max_slice_width,
+ hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int num_slices, int slice_width)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int output_format = crtc_state->output_format;
+ bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+ int hdmi_max_chunk_bytes =
+ connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+ return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+ num_slices, output_format, hdmi_all_bpp,
+ hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 pps_param[6];
+ int slice_height;
+ int slice_width;
+ int num_slices;
+ int bits_per_pixel;
+ int ret;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector;
+ bool hdmi_is_dsc_1_2;
+
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+ return;
+
+ if (!intel_connector)
+ return;
+ connector = &intel_connector->base;
+ hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+ !hdmi_is_dsc_1_2)
+ return;
+
+ slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+ if (!slice_height)
+ return;
+
+ num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+ if (!num_slices)
+ return;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+ num_slices);
+
+ bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+ num_slices, slice_width);
+ if (!bits_per_pixel)
+ return;
+
+ pps_param[0] = slice_height & 0xFF;
+ pps_param[1] = slice_height >> 8;
+ pps_param[2] = slice_width & 0xFF;
+ pps_param[3] = slice_width >> 8;
+ pps_param[4] = bits_per_pixel & 0xFF;
+ pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+ ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -4010,7 +2813,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 tmp;
@@ -4029,8 +2833,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
enableddisabled(intel_dp->has_hdmi_sink));
- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4039,12 +2843,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
tmp = 0;
+ if (intel_dp->dfp.rgb_to_ycbcr) {
+ bool bt2020, bt709;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+ /*
+ * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
+ * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
+ *
+ */
+ tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
+
+ bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+ bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+ switch (crtc_state->infoframes.vsc.colorimetry) {
+ case DP_COLORIMETRY_BT2020_RGB:
+ case DP_COLORIMETRY_BT2020_YCC:
+ if (bt2020)
+ tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
+ break;
+ case DP_COLORIMETRY_BT709_YCC:
+ case DP_COLORIMETRY_XVYCC_709:
+ if (bt709)
+ tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
- enableddisabled(false));
+ "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
+ enableddisabled(tmp ? true : false));
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -4062,15 +2896,15 @@ static void intel_enable_dp(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
- with_pps_lock(intel_dp, wakeref) {
+ with_intel_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ vlv_pps_init(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -4084,7 +2918,9 @@ static void intel_enable_dp(struct intel_atomic_state *state,
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
@@ -4127,112 +2963,6 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
ilk_edp_pll_on(intel_dp, pipe_config);
}
-static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum pipe pipe = intel_dp->pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return;
-
- edp_panel_vdd_off_sync(intel_dp);
-
- /*
- * VLV seems to get confused when multiple power sequencers
- * have the same port selected (even if only one has power/vdd
- * enabled). The failure manifests as vlv_wait_port_ready() failing
- * CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power sequencers, but let's clear the
- * port select always when logically disconnecting a power sequencer
- * from a port.
- */
- drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
-
- intel_dp->pps_pipe = INVALID_PIPE;
-}
-
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_encoder *encoder;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- if (intel_dp->pps_pipe != pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* make sure vdd is off before we steal it */
- vlv_detach_power_sequencer(intel_dp);
- }
-}
-
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (intel_dp->pps_pipe != INVALID_PIPE &&
- intel_dp->pps_pipe != crtc->pipe) {
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- vlv_detach_power_sequencer(intel_dp);
- }
-
- /*
- * We may be stealing the power
- * sequencer from another port.
- */
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
-
- intel_dp->active_pipe = crtc->pipe;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /* now it's all ours */
- intel_dp->pps_pipe = crtc->pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-}
-
static void vlv_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -4632,22 +3362,19 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static char dp_training_pattern_name(u8 train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
}
void
@@ -4655,13 +3382,15 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
- DP_TRAINING_PATTERN_DISABLE)
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- intel_dp_training_pattern_symbol(dp_train_pat));
+ "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
@@ -4727,15 +3456,15 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- msleep(intel_dp->panel_power_down_delay);
+ msleep(intel_dp->pps.panel_power_down_delay);
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
- with_pps_lock(intel_dp, wakeref)
- intel_dp->active_pipe = INVALID_PIPE;
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
}
}
@@ -4865,6 +3594,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
+ /*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_edp_init_source_oui(intel_dp, true);
+
return true;
}
@@ -5698,7 +4433,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
@@ -5771,6 +4506,17 @@ update_status:
"Could not write test response to sink\n");
}
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+{
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ *handled = true;
+ }
+}
+
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
* @intel_dp: Intel DP struct
@@ -5815,7 +4561,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
+
if (!handled)
break;
@@ -5833,6 +4580,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return link_ok;
}
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+ bool is_active;
+ u8 buf = 0;
+
+ is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+ if (intel_dp->frl.is_trained && !is_active) {
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+ return;
+
+ buf &= ~DP_PCON_ENABLE_HDMI_LINK;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+ return;
+
+ drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+ /* Restart FRL training or fall back to TMDS mode */
+ intel_dp_check_frl_training(intel_dp);
+ }
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -6006,6 +4775,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
!intel_dp_mst_is_master_trans(crtc_state))
continue;
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp, crtc_state);
intel_dp_stop_link_train(intel_dp, crtc_state);
break;
@@ -6115,7 +4886,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
return 0;
}
-static void intel_dp_phy_test(struct intel_encoder *encoder)
+void intel_dp_phy_test(struct intel_encoder *encoder)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -6197,7 +4968,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
return state;
}
-static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
@@ -6221,6 +4992,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+ drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+ return;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+ drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+ return;
+ }
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -6260,7 +5055,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
+ intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -6480,13 +5276,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
intel_dp->downstream_ports,
edid);
+ intel_dp->dfp.pcon_max_frl_bw =
+ drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
intel_dp->dfp.max_dotclock,
intel_dp->dfp.min_tmds_clock,
- intel_dp->dfp.max_tmds_clock);
+ intel_dp->dfp.max_tmds_clock,
+ intel_dp->dfp.pcon_max_frl_bw);
+
+ intel_dp_get_pcon_dsc_cap(intel_dp);
}
static void
@@ -6494,7 +5297,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
/* No YCbCr output support on gmch platforms */
if (HAS_GMCH(i915))
@@ -6516,14 +5319,26 @@ intel_dp_update_420(struct intel_dp *intel_dp)
dp_to_dig_port(intel_dp)->lspcon.active ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
+ rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
if (INTEL_GEN(i915) >= 11) {
+ /* Let PCON convert from RGB->YCbCr if possible */
+ if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+ intel_dp->dfp.rgb_to_ycbcr = true;
+ intel_dp->dfp.ycbcr_444_to_420 = true;
+ connector->base.ycbcr_420_allowed = true;
+ } else {
/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
- intel_dp->dfp.ycbcr_444_to_420 =
- ycbcr_444_to_420 && !ycbcr_420_passthrough;
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
- connector->base.ycbcr_420_allowed =
- !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ }
} else {
/* 4:4:4->4:2:0 conversion is the only way */
intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
@@ -6532,8 +5347,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
}
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
+ yesno(intel_dp->dfp.rgb_to_ycbcr),
yesno(connector->base.ycbcr_420_allowed),
yesno(intel_dp->dfp.ycbcr_444_to_420));
}
@@ -6557,7 +5373,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
}
drm_dp_cec_set_edid(&intel_dp->aux, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -6571,13 +5386,14 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_hdmi_sink = false;
intel_dp->has_audio = false;
- intel_dp->edid_quirks = 0;
intel_dp->dfp.max_bpc = 0;
intel_dp->dfp.max_dotclock = 0;
intel_dp->dfp.min_tmds_clock = 0;
intel_dp->dfp.max_tmds_clock = 0;
+ intel_dp->dfp.pcon_max_frl_bw = 0;
+
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
}
@@ -6683,7 +5499,7 @@ intel_dp_detect(struct drm_connector *connector,
to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
@@ -6736,6 +5552,10 @@ static int intel_dp_get_modes(struct drm_connector *connector)
edid = intel_connector->detect_edid;
if (edid) {
int ret = intel_connector_update_modes(connector, edid);
+
+ if (intel_vrr_is_capable(connector))
+ drm_connector_set_vrr_capable_property(connector,
+ true);
if (ret)
return ret;
}
@@ -6774,6 +5594,8 @@ intel_dp_connector_register(struct drm_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
int ret;
ret = intel_connector_register(connector);
@@ -6787,6 +5609,22 @@ intel_dp_connector_register(struct drm_connector *connector)
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return ret;
+
+ /*
+ * ToDo: Clean this up to handle lspcon init and resume more
+ * efficiently and streamlined.
+ */
+ if (lspcon_init(dig_port)) {
+ lspcon_detect_hdr_capability(lspcon);
+ if (lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+ }
+
return ret;
}
@@ -6806,17 +5644,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &dig_port->dp;
intel_dp_mst_encoder_cleanup(dig_port);
- if (intel_dp_is_edp(intel_dp)) {
- intel_wakeref_t wakeref;
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
- }
+ intel_pps_vdd_off_sync(intel_dp);
intel_dp_aux_fini(intel_dp);
}
@@ -6832,53 +5661,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- wait_panel_power_cycle(intel_dp);
-}
-
-static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
+ intel_pps_wait_power_cycle(intel_dp);
}
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
@@ -6898,30 +5689,20 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->reset_link_params = true;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
- if (intel_dp_is_edp(intel_dp)) {
- /*
- * Reinit the power sequencer, in case BIOS did
- * something nasty with it.
- */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
}
+
+ intel_pps_encoder_reset(intel_dp);
}
static int intel_modeset_tile_group(struct intel_atomic_state *state,
@@ -7086,19 +5867,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static bool intel_edp_have_power(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool have_power = false;
-
- with_pps_lock(intel_dp, wakeref) {
- have_power = edp_have_panel_power(intel_dp) &&
- edp_have_panel_vdd(intel_dp);
- }
-
- return have_power;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
@@ -7106,7 +5874,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
struct intel_dp *intel_dp = &dig_port->dp;
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
- (long_hpd || !intel_edp_have_power(intel_dp))) {
+ (long_hpd || !intel_pps_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
@@ -7175,7 +5943,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
- intel_attach_colorspace_property(connector);
+ /* Register HDMI colorspace for case of lspcon */
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(connector);
+ } else {
+ intel_attach_dp_colorspace_property(connector);
+ }
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
drm_object_attach_property(&connector->base,
@@ -7194,277 +5968,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
-}
-
-static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
-{
- intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_dp->last_power_on = jiffies;
- intel_dp->last_backlight_off = jiffies;
-}
-
-static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- pp_ctl = ilk_get_pp_control(intel_dp);
-
- /* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
-
- /* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
-
- if (i915_mmio_reg_valid(regs.pp_div)) {
- u32 pp_div;
-
- pp_div = intel_de_read(dev_priv, regs.pp_div);
-
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
- } else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
- }
-}
-
-static void
-intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
-{
- DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
-}
-static void
-intel_pps_verify_state(struct intel_dp *intel_dp)
-{
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps_delays;
-
- intel_pps_readout_hw_state(intel_dp, &hw);
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- DRM_ERROR("PPS state mismatch\n");
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- intel_pps_readout_hw_state(intel_dp, &cur);
-
- intel_pps_dump_state("cur", &cur);
-
- vbt = dev_priv->vbt.edp.pps;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
- * just fails to power back on. Increasing the delay to 800ms
- * seems sufficient to avoid this problem.
- */
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
- "Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
- }
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt.t11_t12 += 100 * 10;
-
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec.t11_t12 = (510 + 100) * 10;
-
- intel_pps_dump_state("vbt", &vbt);
-
- /* Use the max of the register settings and vbt. If both are
- * unset, fall back to the spec limits. */
-#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
- spec.field : \
- max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay,
- intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay,
- intel_dp->backlight_off_delay);
-
- /*
- * We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
- */
- final->t8 = 1;
- final->t9 = 1;
-
- /*
- * HW has only a 100msec granularity for t11_t12 so round it up
- * accordingly.
- */
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
-}
-
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
- struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_get_registers(intel_dp, &regs);
-
- /*
- * On some VLV machines the BIOS can leave the VDD
- * enabled even on power sequencers which aren't
- * hooked up to any port. This would mess up the
- * power domain tracking the first time we pick
- * one of these power sequencers for use since
- * edp_panel_vdd_on() would notice that the VDD was
- * already on and therefore wouldn't grab the power
- * domain reference. Disable VDD first to avoid this.
- * This also avoids spuriously turning the VDD on as
- * soon as the new power sequencer gets initialized.
- */
- if (force_disable_vdd) {
- u32 pp = ilk_get_pp_control(intel_dp);
-
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
- "Panel power already on\n");
-
- if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
- "VDD already on, disabling first\n");
-
- pp &= ~EDP_FORCE_VDD;
-
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
- }
-
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
-
- /* Haswell doesn't have any port selection bits for the panel
- * power sequencer any more. */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- switch (port) {
- case PORT_A:
- port_sel = PANEL_PORT_SELECT_DPA;
- break;
- case PORT_C:
- port_sel = PANEL_PORT_SELECT_DPC;
- break;
- case PORT_D:
- port_sel = PANEL_PORT_SELECT_DPD;
- break;
- default:
- MISSING_CASE(port);
- break;
- }
- }
-
- pp_on |= port_sel;
-
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
-
- /*
- * Compute the divisor for the pp clock, simply match the Bspec formula.
- */
- if (i915_mmio_reg_valid(regs.pp_div)) {
- intel_de_write(dev_priv, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
-}
-
-static void intel_dp_pps_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- }
+ if (HAS_VRR(dev_priv))
+ drm_connector_attach_vrr_capable_property(connector);
}
/**
@@ -7903,14 +6409,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
- intel_wakeref_t wakeref;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
-
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -7926,11 +6429,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- with_pps_lock(intel_dp, wakeref) {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ intel_pps_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -7947,7 +6446,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_connector_update_edid_property(connector, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7975,7 +6473,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps_pipe;
+ pipe = intel_dp->pps.pps_pipe;
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
@@ -7986,7 +6484,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_connector->panel.backlight.power = intel_edp_backlight_power;
+ intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
@@ -7998,13 +6496,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return true;
out_vdd_off:
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
return false;
}
@@ -8058,8 +6550,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp->reset_link_params = true;
- intel_dp->pps_pipe = INVALID_PIPE;
- intel_dp->active_pipe = INVALID_PIPE;
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
@@ -8077,7 +6569,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@ -8146,6 +6638,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
(temp & ~0xf) | 0xd);
}
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
+
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index b871a09b6901..d80839139bfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
@@ -69,16 +70,11 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -95,9 +91,6 @@ void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
@@ -143,5 +136,11 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 000000000000..eaebf123310a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
+{
+ int i;
+ u32 v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((u32)src[i]) << ((3 - i) * 8);
+ return v;
+}
+
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
+{
+ int i;
+
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(timeout_ms));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (!done)
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+ return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
+ */
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (dig_port->aux_ch == AUX_CH_A)
+ freq = dev_priv->cdclk.hw.cdclk;
+ else
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ }
+
+ return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+ u32 precharge, timeout;
+
+ if (IS_GEN(dev_priv, 6))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev_priv))
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ u32 ret;
+
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_phy_is_tc(i915, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u32 aux_send_ctl_flags)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
+ i915_reg_t ch_ctl, ch_data[5];
+ u32 aux_clock_divider;
+ enum intel_display_power_domain aux_domain;
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
+ int i, ret, recv_bytes;
+ int try, clock = 0;
+ u32 status;
+ bool vdd;
+
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+ if (is_tc_port)
+ intel_tc_port_lock(dig_port);
+
+ aux_domain = intel_aux_power_domain(dig_port);
+
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * We will be called with VDD already enabled for dpcd/edid/oui reads.
+ * In such cases we want to leave VDD enabled and it's up to upper layers
+ * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+ * ourselves.
+ */
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+ /*
+ * dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+ intel_pps_check_power_unlocked(intel_dp);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (try == 3) {
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+ if (status != intel_dp->aux_busy_last_status) {
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ intel_dp->aux_busy_last_status = status;
+ }
+
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp);
+
+ /* Clear done status and any errors */
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /*
+ * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
+ continue;
+ }
+ if (status & DP_AUX_CH_CTL_DONE)
+ goto done;
+ }
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+done:
+ /*
+ * Check for timeout or receive error. Timeouts occur when the sink is
+ * not connected.
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Timeouts occur when the device isn't connected, so they're "normal"
+ * -- don't fill the kernel log with these
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ /*
+ * By BSpec: "Message sizes of 0 or >20 are not allowed."
+ * We have no idea of what happened so we return -EBUSY so
+ * drm layer takes care for the necessary retries.
+ */
+ if (recv_bytes == 0 || recv_bytes > 20) {
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ if (vdd)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+
+ intel_pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+ if (is_tc_port)
+ intel_tc_port_unlock(dig_port);
+
+ return ret;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
+ int ret;
+
+ intel_dp_aux_header(txbuf, msg);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 2; /* 0 or 1 data bytes */
+
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
+ return -E2BIG;
+
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ }
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+ kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+ drm_dp_aux_init(&intel_dp->aux);
+
+ /* Failure to allocate our preferred name is not critical */
+ if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+ aux_ch - AUX_CH_USBC1 + '1',
+ encoder->base.name);
+ else
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch),
+ encoder->base.name);
+
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 000000000000..4afbe76217b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+#include <linux/types.h>
+
+struct intel_dp;
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 51d27fc98d48..651884390137 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,10 +22,253 @@
*
*/
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
+#include "intel_panel.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0 0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1 0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2 0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3 0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358
+# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3
+# define INTEL_EDP_TCON_POWER_MASK BIT(4)
+# define INTEL_EDP_TCON_POWER_DC (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+ u8 tcon_cap[4];
+
+ ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+ if (ret < 0)
+ return false;
+
+ if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+ return false;
+
+ if (tcon_cap[0] >= 1) {
+ drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ } else {
+ drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ return false;
+ }
+
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+ return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 tmp;
+ u8 buf[2] = { 0 };
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ return 0;
+ }
+
+ if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
+ u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+ return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ }
+
+ /* Assume 100% brightness if backlight controls aren't enabled yet */
+ return panel->backlight.max;
+ }
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) {
+ drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ return 0;
+ }
+
+ return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[4] = { 0 };
+
+ buf[0] = level & 0xFF;
+ buf[1] = (level & 0xFF00) >> 8;
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0)
+ drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ intel_panel_set_pwm_level(conn_state, pwm_level);
+ }
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+ u8 old_ctrl, ctrl;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ return;
+ }
+
+ ctrl = old_ctrl;
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+ ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ }
+
+ if (ctrl != old_ctrl)
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0)
+ drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ /* Nothing to do for AUX based backlight controls */
+ if (panel->backlight.edp.intel.sdr_uses_aux)
+ return;
+
+ /* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+ panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+ } else {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.level != 0;
-static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+ return 0;
+}
+
+/* VESA backlight callbacks */
+static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
@@ -52,7 +295,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
}
}
-static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -75,7 +318,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -86,7 +329,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* If we're not in DPCD control mode yet, the programmed brightness
* value is meaningless and we should assume max brightness
*/
- if (!intel_dp_aux_backlight_dpcd_mode(connector))
+ if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector))
return connector->panel.backlight.max;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -107,7 +350,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
-intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state,
+ u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -137,11 +381,11 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
-static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count;
int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
freq = dev_priv->vbt.backlight.pwm_freq_hz;
@@ -173,14 +417,16 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
return true;
}
-static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -201,7 +447,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
- panel->backlight.pwmgen_bit_count) < 0)
+ pwmgen_bit_count) < 0)
drm_dbg_kms(&i915->drm,
"Failed to write aux pwmgen bit count\n");
@@ -214,7 +460,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
- if (intel_dp_aux_set_pwm_freq(connector))
+ if (intel_dp_aux_vesa_set_pwm_freq(connector))
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
if (new_dpcd_buf != dpcd_buf) {
@@ -225,18 +471,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
- intel_dp_aux_set_backlight(conn_state,
- connector->panel.backlight.level);
- set_aux_backlight_enable(intel_dp, true);
+ intel_dp_aux_vesa_set_backlight(conn_state, level);
+ set_vesa_backlight_enable(intel_dp, true);
}
-static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+ u32 level)
{
- set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
- false);
+ set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
-static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -309,40 +555,45 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
"Failed to write aux pwmgen bit count\n");
return max_backlight;
}
- panel->backlight.pwmgen_bit_count = pn;
+ panel->backlight.edp.vesa.pwmgen_bit_count = pn;
max_backlight = (1 << pn) - 1;
return max_backlight;
}
-static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector);
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = 0;
- panel->backlight.level = intel_dp_aux_get_backlight(connector);
- panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+ panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe);
+ panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) &&
panel->backlight.level != 0;
return 0;
}
static bool
-intel_dp_aux_display_control_capable(struct intel_connector *connector)
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
- * the panel can support backlight control over the aux channel
+ * the panel can support backlight control over the aux channel.
+ *
+ * TODO: We currently only support AUX only backlight configurations, not backlights which
+ * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
+ * work just fine using normal PWM controls anyway.
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
@@ -350,40 +601,89 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
return false;
}
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+ .setup = intel_dp_aux_hdr_setup_backlight,
+ .enable = intel_dp_aux_hdr_enable_backlight,
+ .disable = intel_dp_aux_hdr_disable_backlight,
+ .set = intel_dp_aux_hdr_set_backlight,
+ .get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+ .setup = intel_dp_aux_vesa_setup_backlight,
+ .enable = intel_dp_aux_vesa_enable_backlight,
+ .disable = intel_dp_aux_vesa_disable_backlight,
+ .set = intel_dp_aux_vesa_set_backlight,
+ .get = intel_dp_aux_vesa_get_backlight,
+};
+
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
- struct intel_panel *panel = &intel_connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool try_intel_interface = false, try_vesa_interface = false;
- if (i915->params.enable_dpcd_backlight == 0 ||
- !intel_dp_aux_display_control_capable(intel_connector))
+ /* Check the VBT and user's module parameters to figure out which
+ * interfaces to probe
+ */
+ switch (i915->params.enable_dpcd_backlight) {
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+ switch (i915->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+ case INTEL_BACKLIGHT_DISPLAY_DDI:
+ try_intel_interface = true;
+ try_vesa_interface = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+ try_intel_interface = true;
+ break;
+ }
/*
- * There are a lot of machines that don't advertise the backlight
- * control interface to use properly in their VBIOS, :\
+ * A lot of eDP panels in the wild will report supporting both the
+ * Intel proprietary backlight control interface, and the VESA
+ * backlight control interface. Many of these panels are liars though,
+ * and will only work with the Intel interface. So, always probe for
+ * that first.
*/
- if (i915->vbt.backlight.type !=
- INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915->params.enable_dpcd_backlight != 1 &&
- !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
- DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- drm_info(&i915->drm,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
- return -ENODEV;
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+ return 0;
}
- panel->backlight.setup = intel_dp_aux_setup_backlight;
- panel->backlight.enable = intel_dp_aux_enable_backlight;
- panel->backlight.disable = intel_dp_aux_disable_backlight;
- panel->backlight.set = intel_dp_aux_set_backlight;
- panel->backlight.get = intel_dp_aux_get_backlight;
+ if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 03424d20e9f7..4dba5bb15af5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -16,6 +16,30 @@
#include "intel_dp.h"
#include "intel_hdcp.h"
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+ u32 stream_enc_mask;
+
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+ break;
+ case TRANSCODER_B:
+ stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+ break;
+ case TRANSCODER_C:
+ stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+ break;
+ case TRANSCODER_D:
+ stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+ break;
+ default:
+ stream_enc_mask = 0;
+ }
+
+ return stream_enc_mask;
+}
+
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
long ret;
@@ -561,7 +585,8 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status;
int ret;
@@ -622,48 +647,143 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
};
static int
-intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
- enum transcoder cpu_transcoder,
- bool enable)
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!enable)
- usleep_range(6, 60); /* Bspec says >= 6us */
-
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
- cpu_transcoder, enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ hdcp->stream_transcoder, enable,
+ TRANS_DDI_HDCP_SELECT);
if (ret)
- drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
-static
-bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ u32 stream_enc_status;
+ int ret;
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder);
+ if (!stream_enc_status)
+ return -EINVAL;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status,
+ enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *intel_dp = &dig_port->dp;
struct drm_dp_query_stream_enc_status_ack_reply reply;
+ struct intel_dp *intel_dp = &dig_port->dp;
int ret;
- if (!intel_dp_hdcp_check_link(dig_port, connector))
- return false;
-
ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
connector->port, &reply);
if (ret) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
- connector->base.base.id, connector->base.name, ret);
+ "[%s:%d] failed QSES ret=%d\n",
+ connector->base.name, connector->base.base.id, ret);
return false;
}
+ drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
+ connector->base.name, connector->base.base.id,
+ reply.auth_completed, reply.encryption_enabled);
+
return reply.auth_completed && reply.encryption_enabled;
}
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ enum pipe pipe = (enum pipe)cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ drm_WARN_ON(&i915->drm, enable &&
+ !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
+ * I.3.5 MST source device may use a QSES msg to query downstream status
+ * for a particular stream.
+ */
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ /*
+ * We do need to do the Link Check only for the connector involved with
+ * HDCP port authentication and encryption.
+ * We can re-use the hdcp->is_repeater flag to know that the connector
+ * involved with HDCP port authentication and encryption.
+ */
+ if (hdcp->is_repeater) {
+ ret = intel_dp_hdcp2_check_link(dig_port, connector);
+ if (ret)
+ return ret;
+ }
+
+ return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+}
+
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -673,10 +793,16 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
- .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
- .check_link = intel_dp_mst_hdcp_check_link,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+ .check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
-
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+ .check_2_2_link = intel_dp_mst_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
.protocol = HDCP_PROTOCOL_DP,
};
@@ -693,10 +819,10 @@ int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
return 0;
if (intel_connector->mst_port)
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_hdcp_shim);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 91d3979902d0..892d7db7d94f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,18 +34,6 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
-{
- int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
-
- /*
- * Pretend no LTTPRs in case of LTTPR detection error, or
- * if too many (>8) LTTPRs are detected. This translates to link
- * training in transparent mode.
- */
- return count <= 0 ? 0 : count;
-}
-
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -142,6 +130,17 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return 0;
ret = intel_dp_read_lttpr_common_caps(intel_dp);
+ if (!ret)
+ return 0;
+
+ lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ /*
+ * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+ * detected as this breaks link training at least on the Dell WD19TB
+ * dock.
+ */
+ if (lttpr_count == 0)
+ return 0;
/*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
@@ -150,17 +149,12 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
*/
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
- if (!ret)
- return 0;
-
- lttpr_count = intel_dp_lttpr_count(intel_dp);
-
/*
* In case of unsupported number of LTTPRs or failing to switch to
* non-transparent mode fall-back to transparent link training mode,
* still taking into account any LTTPR common lane- rate/count limits.
*/
- if (lttpr_count == 0)
+ if (lttpr_count < 0)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
@@ -222,11 +216,11 @@ intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int lttpr_count = intel_dp_lttpr_count(intel_dp);
+ int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
- return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+ return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
@@ -334,6 +328,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -341,7 +356,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
@@ -355,7 +370,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
@@ -413,7 +428,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
- link_config[0] = 0;
+ link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
@@ -676,9 +691,9 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
* @intel_dp: DP struct
* @crtc_state: state for CRTC attached to the encoder
*
- * Stop the link training of the @intel_dp port, disabling the test pattern
- * symbol generation on the port and disabling the training pattern in
- * the sink's DPCD.
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
*
* What symbols are output on the port after this point is
* platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
@@ -692,10 +707,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
intel_dp->link_trained = true;
- intel_dp_program_link_training_pattern(intel_dp,
- crtc_state,
- DP_TRAINING_PATTERN_DISABLE);
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_TRAINING_PATTERN_DISABLE);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 86905aa24db7..6a1f76bd8c75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 27f04aed8764..b4621ed0127e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,8 +53,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
@@ -69,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -569,7 +570,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- pipe_config->cpu_transcoder,
+ pipe_config,
(u8)conn_state->hdcp_content_type);
}
@@ -829,12 +830,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-
- /* TODO: Figure out how to make HDCP work on GEN12+ */
- if (INTEL_GEN(dev_priv) < 12) {
+ if (INTEL_GEN(dev_priv) <= 12) {
ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 000000000000..7ba7f315aaee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+struct intel_limit {
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+ /* FIXME: find real dot limits */
+ .dot = { .min = 0, .max = INT_MAX },
+ .vco = { .min = 4800000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+ const struct intel_limit *limit,
+ const struct dpll *clock)
+{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
+
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+ if (clock->m1 <= clock->m2)
+ return false;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_GEN9_LP(dev_priv)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ return false;
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ return false;
+ }
+
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ return false;
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ return false;
+
+ return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev_priv))
+ return limit->p2.p2_fast;
+ else
+ return limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ return limit->p2.p2_slow;
+ else
+ return limit->p2.p2_fast;
+ }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pnv_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int max_n;
+ bool found = false;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(to_i915(dev))) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct dpll clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
+
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
+ struct dpll clock;
+ u64 m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1;
+ clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_bxt;
+
+ return chv_find_best_dpll(limit, crtc_state,
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev_priv)) {
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
+ crtc_state->dpll_hw_state.fp1 = fp2;
+ } else {
+ crtc_state->dpll_hw_state.fp1 = fp;
+ }
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev_priv))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev_priv) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_GEN(dev_priv) >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc_state->sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1†in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ INTEL_GEN(dev_priv) >= 11) {
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll, fp, fp2;
+ int factor;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
+ factor = 25;
+ } else if (crtc_state->sdvo_tv_clock) {
+ factor = 20;
+ }
+
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
+
+ dpll = 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (crtc_state->dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_limit *limit;
+ int refclk = 120000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv)) {
+ if (refclk == 100000)
+ limit = &ilk_limits_dual_lvds_100m;
+ else
+ limit = &ilk_limits_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &ilk_limits_single_lvds_100m;
+ else
+ limit = &ilk_limits_single_lvds;
+ }
+ } else {
+ limit = &ilk_limits_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ ilk_compute_dpll(crtc, crtc_state, NULL);
+
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &pnv_limits_lvds;
+ } else {
+ limit = &pnv_limits_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
+ } else {
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 48000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ else if (!IS_GEN(dev_priv, 2))
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ else
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 000000000000..caf4615092e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+struct dpll;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index b53c50372918..584c14c4cbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -43,7 +43,7 @@
#define PANEL_PWM_MAX_VALUE 0xFF
-static u32 dcs_get_backlight(struct intel_connector *connector)
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
}
}
-static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
@@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
}
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&cabc, sizeof(cabc));
}
- dcs_set_backlight(conn_state, panel->backlight.level);
+ dcs_set_backlight(conn_state, level);
}
static int dcs_setup_backlight(struct intel_connector *connector,
@@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector,
return 0;
}
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ .setup = dcs_setup_backlight,
+ .enable = dcs_enable_backlight,
+ .disable = dcs_disable_backlight,
+ .set = dcs_set_backlight,
+ .get = dcs_get_backlight,
+};
+
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
@@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
- panel->backlight.setup = dcs_setup_backlight;
- panel->backlight.enable = dcs_enable_backlight;
- panel->backlight.disable = dcs_disable_backlight;
- panel->backlight.set = dcs_set_backlight;
- panel->backlight.get = dcs_get_backlight;
+ panel->backlight.funcs = &dcs_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 237dbb1ba0ee..090cd76266c6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- /*I915_WRITE(DVOB_SRCDIM,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
intel_de_write(dev_priv, dvo_srcdim_reg,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- /*I915_WRITE(DVOB, dvo_val);*/
intel_de_write(dev_priv, dvo_reg, dvo_val);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a5b072816a7b..5fd4fa4805ef 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -676,7 +676,7 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
- uint64_t modifier)
+ u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fence_id = plane_state->vma->fence->id;
else
cache->fence_id = -1;
+
+ cache->psr2_active = crtc_state->has_psr2;
}
static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
@@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Tigerlake is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+ fbc->no_fbc_reason = "not supported with PSR2";
+ return false;
+ }
+
return true;
}
@@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /*
- * Fbc is causing random underruns in CI execution on TGL platforms.
- * Disabling the same while the problem is being debugged and analyzed.
- */
- if (IS_TIGERLAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 842c04e63214..84f853f113b9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (vma->obj->stolen && !prealloc)
+ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -595,7 +595,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING &&
- intel_fb_obj(&ifbdev->fb->base)->stolen)
+ !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 000000000000..b2eb96ae10a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
+
+ return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
+ return 0;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return 0;
+ case PIPE_B:
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+ return 0;
+ case PIPE_C:
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ BUG();
+ }
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n, false, false);
+
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return I915_DISPLAY_CONFIG_RETRY;
+
+ return ret;
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (IS_IVYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, tries;
+
+ /* FDI needs bits from pipe first */
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN(dev_priv, 6)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, j;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
+ }
+
+train_done:
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+ }
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(0x7 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev_priv))
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 5)) {
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ } else if (IS_GEN(dev_priv, 6)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 000000000000..a9cd21663eb8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+#define I915_DISPLAY_CONFIG_RETRY 1
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index d898b370d7a4..7b38eee9980f 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
struct i915_vma *vma;
spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj)
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b2a4bbcfdcd2..ae1371c36a32 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -15,6 +15,7 @@
#include <drm/drm_hdcp.h>
#include <drm/i915_component.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
@@ -23,9 +24,78 @@
#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
#define HDCP2_LC_RETRY_CNT 3
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ return connector->port ? connector->port->vcpi.vcpi : 0;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ bool enforce_type0 = false;
+ int k;
+
+ data->k = 0;
+
+ if (dig_port->hdcp_auth_status)
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ if (!enforce_type0 && !intel_hdcp2_capable(connector))
+ enforce_type0 = true;
+
+ data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
+ /*
+ * Apply common protection level across all streams in DP MST Topology.
+ * Use highest supported content type for all streams in DP MST Topology.
+ */
+ for (k = 0; k < data->k; k++)
+ data->streams[k].stream_type =
+ enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -762,15 +832,22 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
- /*
- * XXX: If we have MST-connected devices, we need to enable encryption
- * on those as well.
- */
+ /* DP MST Auth Part 1 Step 2.a and Step 2.b */
+ if (shim->stream_encryption) {
+ ret = shim->stream_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
@@ -792,24 +869,29 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
- /*
- * If there are other connectors on this port using HDCP, don't disable
- * it. Instead, toggle the HDCP signalling off on that particular
- * connector/pipe and exit.
- */
- if (dig_port->num_hdcp_streams > 0) {
- ret = hdcp->shim->toggle_signalling(dig_port,
- cpu_transcoder, false);
- if (ret)
- DRM_ERROR("Failed to disable HDCP signalling\n");
- return ret;
+ if (hdcp->shim->stream_encryption) {
+ ret = hdcp->shim->stream_encryption(connector, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+ /*
+ * If there are other connectors on this port using HDCP,
+ * don't disable it until it disabled HDCP encryption for
+ * all connectors in MST topology.
+ */
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
}
hdcp->hdcp_encrypted = false;
intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
- ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1014,7 +1096,8 @@ static int
hdcp2_prepare_ake_init(struct intel_connector *connector,
struct hdcp2_ake_init *ake_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1043,7 +1126,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct hdcp2_ake_no_stored_km *ek_pub_km,
size_t *msg_sz)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1070,7 +1154,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
static int hdcp2_verify_hprime(struct intel_connector *connector,
struct hdcp2_ake_send_hprime *rx_hprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1095,7 +1180,8 @@ static int
hdcp2_store_pairing_info(struct intel_connector *connector,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1121,7 +1207,8 @@ static int
hdcp2_prepare_lc_init(struct intel_connector *connector,
struct hdcp2_lc_init *lc_init)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1147,7 +1234,8 @@ static int
hdcp2_verify_lprime(struct intel_connector *connector,
struct hdcp2_lc_send_lprime *rx_lprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1172,7 +1260,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
static int hdcp2_prepare_skey(struct intel_connector *connector,
struct hdcp2_ske_send_eks *ske_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1200,7 +1289,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
*rep_topology,
struct hdcp2_rep_send_ack *rep_send_ack)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1228,7 +1318,8 @@ static int
hdcp2_verify_mprime(struct intel_connector *connector,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1251,7 +1342,8 @@ hdcp2_verify_mprime(struct intel_connector *connector,
static int hdcp2_authenticate_port(struct intel_connector *connector)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1275,6 +1367,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
static int hdcp2_close_mei_session(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1288,7 +1381,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
- &connector->hdcp.port_data);
+ &dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1448,13 +1541,14 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
struct hdcp2_rep_stream_ready stream_ready;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
- int ret;
+ int ret, streams_size_delta, i;
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
return -ERANGE;
@@ -1463,16 +1557,18 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
- /* K no of streams is fixed as 1. Stored as big-endian. */
- msgs.stream_manage.k = cpu_to_be16(1);
+ msgs.stream_manage.k = cpu_to_be16(data->k);
- /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- msgs.stream_manage.streams[0].stream_id = 0;
- msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+ for (i = 0; i < data->k; i++) {
+ msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+ msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+ }
+ streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+ sizeof(struct hdcp2_streamid_type);
/* Send it to Repeater */
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
- sizeof(msgs.stream_manage));
+ sizeof(msgs.stream_manage) - streams_size_delta);
if (ret < 0)
goto out;
@@ -1481,8 +1577,8 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
if (ret < 0)
goto out;
- hdcp->port_data.seq_num_m = hdcp->seq_num_m;
- hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ data->seq_num_m = hdcp->seq_num_m;
+
ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
out:
@@ -1606,6 +1702,36 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
return ret;
}
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret = 0;
+
+ if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS)) {
+ drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+ connector->base.name, connector->base.base.id);
+ return -EPERM;
+ }
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ return ret;
+}
+
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1641,7 +1767,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ dig_port->hdcp_auth_status = true;
return ret;
}
@@ -1665,7 +1792,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
@@ -1714,11 +1841,11 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- int ret, i, tries = 3;
+ int ret = 0, i, tries = 3;
- for (i = 0; i < tries; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = hdcp2_propagate_stream_management_info(connector);
@@ -1728,8 +1855,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
ret);
break;
}
- hdcp->port_data.streams[0].stream_type =
- hdcp->content_type;
+
ret = hdcp2_authenticate_port(connector);
if (!ret)
break;
@@ -1744,7 +1870,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
- if (!ret) {
+ if (!ret && !dig_port->hdcp_auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -1759,12 +1885,16 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
}
}
+ ret = hdcp2_enable_stream_encryption(connector);
+
return ret;
}
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1772,6 +1902,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
+ /* Stream which requires encryption */
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
@@ -1789,18 +1929,37 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
connector->base.name, connector->base.base.id);
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, false);
+ if (ret) {
+ drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
+ }
+
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
return ret;
}
@@ -1816,6 +1975,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -1837,7 +1997,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port, connector);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
intel_hdcp_update_value(connector,
@@ -1893,6 +2053,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
}
out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1968,12 +2129,13 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
static int initialize_hdcp_port_data(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
- struct hdcp_port_data *data = &hdcp->port_data;
+ enum port port = dig_port->base.port;
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
@@ -1994,16 +2156,15 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
- data->k = 1;
if (!data->streams)
- data->streams = kcalloc(data->k,
+ data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
-
+ /* For SST */
data->streams[0].stream_id = 0;
data->streams[0].stream_type = hdcp->content_type;
@@ -2046,14 +2207,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
+static void intel_hdcp2_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector, port, shim);
+ ret = initialize_hdcp_port_data(connector, dig_port, shim);
if (ret) {
drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
@@ -2063,7 +2225,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
}
int intel_hdcp_init(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2073,15 +2235,15 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
- intel_hdcp2_init(connector, port, shim);
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, dig_port, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(hdcp->port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
return ret;
}
@@ -2095,7 +2257,7 @@ int intel_hdcp_init(struct intel_connector *connector,
}
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type)
+ const struct intel_crtc_state *pipe_config, u8 content_type)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2106,15 +2268,28 @@ int intel_hdcp_enable(struct intel_connector *connector,
if (!hdcp->shim)
return -ENOENT;
+ if (!connector->encoder) {
+ drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+ connector->base.name, connector->base.base.id);
+ return -ENODEV;
+ }
+
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&dev_priv->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
- hdcp->cpu_transcoder = cpu_transcoder;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+ hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+ hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+ hdcp->stream_transcoder = INVALID_TRANSCODER;
+ }
if (INTEL_GEN(dev_priv) >= 12)
- hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2210,6 +2385,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2221,11 +2397,19 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
desired_and_not_enabled =
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
}
if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -2275,7 +2459,6 @@ void intel_hdcp_cleanup(struct intel_connector *connector)
drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
mutex_lock(&hdcp->mutex);
- kfree(hdcp->port_data.streams);
hdcp->shim = NULL;
mutex_unlock(&hdcp->mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1bbf5b67ed0a..8f53b0c7fe5c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
@@ -16,16 +18,18 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
struct intel_hdcp_shim;
+struct intel_digital_port;
enum port;
enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector, enum port port,
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type);
+ const struct intel_crtc_state *pipe_config, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 82674a8853c6..95919d325b0b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *frame, ssize_t len)
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ctl_reg);
}
-static void hsw_read_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- void *frame, ssize_t len)
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type, void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1495,15 +1494,16 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- false);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- true);
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1526,8 +1526,9 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ cpu_transcoder, enable,
+ TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1732,7 +1733,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
@@ -2216,7 +2218,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
has_hdmi_sink))
return MODE_CLOCK_HIGH;
- /* BXT DPLL can't generate 223-240 MHz */
+ /* GLK DPLL can't generate 446-480 MHz */
+ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ return MODE_CLOCK_RANGE;
+
+ /* BXT/GLK DPLL can't generate 223-240 MHz */
if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
@@ -2950,21 +2956,12 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *dig_port =
- hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
- /*
- * Attach Colorspace property for Non LSPCON based device
- * ToDo: This needs to be extended for LSPCON implementation
- * as well. Will be implemented separately.
- */
- if (!dig_port->lspcon.active)
- intel_attach_colorspace_property(connector);
-
+ intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -3300,7 +3297,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_hdmi->attached_connector = intel_connector;
if (is_hdcp_supported(dev_priv, port)) {
- int ret = intel_hdcp_init(intel_connector, port,
+ int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
drm_dbg_kms(&dev_priv->drm,
@@ -3438,3 +3435,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(dig_port, intel_connector);
}
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * Slice Height determination : HDMI2.1 Section 7.7.5.2
+ * Select smallest slice height >=96, that results in a valid PPS and
+ * requires minimum padding lines required for final slice.
+ *
+ * Assumption : Vactive is even.
+ */
+ for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE 2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH 2720
+ int kslice_adjust;
+ int adjusted_clk_khz;
+ int min_slices;
+ int target_slices;
+ int max_throughput; /* max clock freq. in khz per slice */
+ int max_slice_width;
+ int slice_width;
+ int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ if (!hdmi_throughput)
+ return 0;
+
+ /*
+ * Slice Width determination : HDMI2.1 Section 7.7.5.1
+ * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+ * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+ * dividing adjusted clock value by 10.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ kslice_adjust = 10;
+ else
+ kslice_adjust = 5;
+
+ /*
+ * As per spec, the rate at which the source and the sink process
+ * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+ * This depends upon the pixel clock rate and output formats
+ * (kslice adjust).
+ * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+ * at max 340MHz, otherwise they can be processed at max 400MHz.
+ */
+
+ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+ if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+ else
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+ /*
+ * Taking into account the sink's capability for maximum
+ * clock per slice (in MHz) as read from HF-VSDB.
+ */
+ max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+ min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+ max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+ /*
+ * Keep on increasing the num of slices/line, starting from min_slices
+ * per line till we get such a number, for which the slice_width is
+ * just less than max_slice_width. The slices/line selected should be
+ * less than or equal to the max horizontal slices that the combination
+ * of PCON encoder and HDMI decoder can support.
+ */
+ slice_width = max_slice_width;
+
+ do {
+ if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+ target_slices = 1;
+ else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+ target_slices = 2;
+ else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+ target_slices = 4;
+ else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+ target_slices = 8;
+ else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+ target_slices = 12;
+ else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+ target_slices = 16;
+ else
+ return 0;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+ if (slice_width >= max_slice_width)
+ min_slices = target_slices + 1;
+ } while (slice_width >= max_slice_width);
+
+ return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+ int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes)
+{
+ int max_dsc_bpp, min_dsc_bpp;
+ int target_bytes;
+ bool bpp_found = false;
+ int bpp_decrement_x16;
+ int bpp_target;
+ int bpp_target_x16;
+
+ /*
+ * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+ * Start with the max bpp and keep on decrementing with
+ * fractional bpp, if supported by PCON DSC encoder
+ *
+ * for each bpp we check if no of bytes can be supported by HDMI sink
+ */
+
+ /* Assuming: bpc as 8*/
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ min_dsc_bpp = 6;
+ max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ min_dsc_bpp = 8;
+ max_dsc_bpp = 3 * 8; /* 3*bpc */
+ } else {
+ /* Assuming 4:2:2 encoding */
+ min_dsc_bpp = 7;
+ max_dsc_bpp = 2 * 8; /* 2*bpc */
+ }
+
+ /*
+ * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+ * Section 7.7.34 : Source shall not enable compressed Video
+ * Transport with bpp_target settings above 12 bpp unless
+ * DSC_all_bpp is set to 1.
+ */
+ if (!hdmi_all_bpp)
+ max_dsc_bpp = min(max_dsc_bpp, 12);
+
+ /*
+ * The Sink has a limit of compressed data in bytes for a scanline,
+ * as described in max_chunk_bytes field in HFVSDB block of edid.
+ * The no. of bytes depend on the target bits per pixel that the
+ * source configures. So we start with the max_bpp and calculate
+ * the target_chunk_bytes. We keep on decrementing the target_bpp,
+ * till we get the target_chunk_bytes just less than what the sink's
+ * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+ *
+ * The decrement is according to the fractional support from PCON DSC
+ * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+ *
+ * bpp_target_x16 = bpp_target * 16
+ * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+ * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+ */
+
+ bpp_target = max_dsc_bpp;
+
+ /* src does not support fractional bpp implies decrement by 16 for bppx16 */
+ if (!src_fractional_bpp)
+ src_fractional_bpp = 1;
+ bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+ bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+ while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+ int bpp;
+
+ bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+ target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+ if (target_bytes <= hdmi_max_chunk_bytes) {
+ bpp_found = true;
+ break;
+ }
+ bpp_target_x16 -= bpp_decrement_x16;
+ }
+ if (bpp_found)
+ return bpp_target_x16;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 15eb0ccde76e..fa1a9b030850 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+ int num_slices, int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e37d45e531df..e4ff533e3a69 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -30,11 +30,15 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_lspcon.h"
+#include "intel_hdmi.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ return DPCD_MCA_LSPCON_HDR_STATUS;
+ else
+ return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+ &hdr_caps, 1);
+
+ if (ret < 0) {
+ drm_dbg_kms(dev, "HDR capability detection failed\n");
+ lspcon->hdr_supported = false;
+ } else if (hdr_caps & 0x1) {
+ drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+ break;
+ default:
return;
-
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
-
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
@@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- /* FIXME implement this */
+ /* FIXME implement for AVI Infoframe as well */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ hsw_read_infoframe(encoder, crtc_state, type,
+ frame, len);
}
void lspcon_set_infoframes(struct intel_encoder *encoder,
@@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* Set the Colorspace as per the HDMI spec */
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
@@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- /* FIXME actually read this from the hw */
- return 0;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool infoframes_enabled;
+ u32 val = 0;
+ u32 mask, tmp;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+ else
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+ if (infoframes_enabled)
+ val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ if (lspcon->hdr_supported) {
+ tmp = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+ if (tmp & mask)
+ val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+ }
+
+ return val;
}
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
@@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-static bool lspcon_init(struct intel_digital_port *dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
struct intel_dp *dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port)
return true;
}
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
void lspcon_resume(struct intel_digital_port *dig_port)
{
struct intel_lspcon *lspcon = &dig_port->lspcon;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index b03dcb7076d8..e19e10492b05 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,6 +15,8 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
void lspcon_resume(struct intel_digital_port *dig_port);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
@@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state);
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 52b4f6193b4c..f455040fa989 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -182,6 +183,7 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
+ struct intel_frontbuffer *frontbuffer;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -282,21 +284,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *from = NULL, *to = NULL;
+ struct intel_frontbuffer *frontbuffer = NULL;
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
- if (overlay->vma)
- from = intel_frontbuffer_get(overlay->vma->obj);
if (vma)
- to = intel_frontbuffer_get(vma->obj);
+ frontbuffer = intel_frontbuffer_get(vma->obj);
- intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
- if (to)
- intel_frontbuffer_put(to);
- if (from)
- intel_frontbuffer_put(from);
+ if (overlay->frontbuffer)
+ intel_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 9f23bac0d792..5fdf52643150 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -511,40 +511,79 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
0, user_max);
}
-static u32 intel_panel_compute_brightness(struct intel_connector *connector,
- u32 val)
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness < 0)
return val;
if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val + panel->backlight.min;
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
return val;
}
-static u32 lpt_get_backlight(struct intel_connector *connector)
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_panel_invert_pwm_level(connector, val);
+}
+
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 pch_get_backlight(struct intel_connector *connector)
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 i9xx_get_backlight(struct intel_connector *connector)
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -564,23 +603,17 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
return val;
}
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 vlv_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_connector_get_pipe(connector);
-
- return _vlv_get_backlight(dev_priv, pipe);
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector)
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -589,7 +622,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
-static u32 pwm_get_backlight(struct intel_connector *connector)
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_panel *panel = &connector->panel;
struct pwm_state state;
@@ -624,12 +657,12 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
- lbpc = level * 0xfe / panel->backlight.max + 1;
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
@@ -666,7 +699,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
-static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
@@ -681,10 +714,9 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.set(conn_state, level);
+ panel->backlight.funcs->set(conn_state, level);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -726,13 +758,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_unlock(&dev_priv->backlight_lock);
}
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, level);
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
@@ -754,13 +786,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -769,44 +801,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
}
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
tmp & ~BLM_PWM_ENABLE);
}
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp, val;
+ u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -820,14 +852,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
}
}
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -835,7 +867,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
tmp & ~BXT_BLC_PWM_ENABLE);
}
-static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
@@ -870,13 +902,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
- panel->backlight.disable(old_conn_state);
+ panel->backlight.funcs->disable(old_conn_state, 0);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -906,7 +938,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -923,11 +955,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -958,9 +990,9 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -974,7 +1006,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -987,7 +1019,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1001,7 +1033,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -1013,7 +1045,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1028,7 +1060,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1044,11 +1076,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1063,11 +1095,11 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
- ctl = panel->backlight.max << 16;
+ ctl = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
ctl2 = 0;
if (panel->backlight.active_low_pwm)
@@ -1079,7 +1111,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1116,9 +1148,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1133,7 +1165,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1152,9 +1184,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1168,14 +1200,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
-static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- int level = panel->backlight.level;
- level = intel_panel_compute_brightness(connector, level);
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
@@ -1198,7 +1228,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s
panel->backlight.device->props.max_brightness);
}
- panel->backlight.enable(crtc_state, conn_state);
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1233,10 +1263,8 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_lock(&dev_priv->backlight_lock);
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
mutex_unlock(&dev_priv->backlight_lock);
@@ -1567,13 +1595,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
u32 pwm;
- if (!panel->backlight.hz_to_pwm) {
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion not supported\n");
return 0;
}
- pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion failed\n");
@@ -1592,7 +1620,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1609,7 +1637,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
}
/* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
}
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
@@ -1629,37 +1657,32 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
- if (cpu_mode)
- val = pch_get_backlight(connector);
- else
- val = lpt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, panel->backlight.level);
+ lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@@ -1674,29 +1697,24 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = pch_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
return 0;
@@ -1716,27 +1734,26 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
- panel->backlight.max = ctl >> 17;
+ panel->backlight.pwm_level_max = ctl >> 17;
- if (!panel->backlight.max) {
- panel->backlight.max = get_backlight_max_vbt(connector);
- panel->backlight.max >>= 1;
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
}
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_panel_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
- panel->backlight.enabled = val != 0;
+ panel->backlight.pwm_enabled = val != 0;
return 0;
}
@@ -1745,32 +1762,27 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
-
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max *= 0xff;
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1779,7 +1791,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
@@ -1788,22 +1800,17 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = _vlv_get_backlight(dev_priv, pipe);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1828,24 +1835,18 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
@@ -1855,7 +1856,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
+ u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
@@ -1868,30 +1869,24 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
-static int pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1915,8 +1910,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
- panel->backlight.max = 100; /* 100% */
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
if (pwm_is_enabled(panel->backlight.pwm)) {
/* PWM is already enabled, use existing settings */
@@ -1924,10 +1919,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
100);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.level = clamp(level, panel->backlight.min,
- panel->backlight.max);
- panel->backlight.enabled = true;
+ level = intel_panel_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
@@ -1943,6 +1936,58 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_panel_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
void intel_panel_update_backlight(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1981,12 +2026,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.setup(intel_connector, pipe);
+ ret = panel->backlight.funcs->setup(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
@@ -2016,6 +2061,94 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel)
panel->backlight.present = false;
}
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
/* Set up chip specific backlight functions */
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
@@ -2024,75 +2157,39 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
-
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
if (IS_GEN9_LP(dev_priv)) {
- panel->backlight.setup = bxt_setup_backlight;
- panel->backlight.enable = bxt_enable_backlight;
- panel->backlight.disable = bxt_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.setup = cnp_setup_backlight;
- panel->backlight.enable = cnp_enable_backlight;
- panel->backlight.disable = cnp_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- panel->backlight.setup = lpt_setup_backlight;
- panel->backlight.enable = lpt_enable_backlight;
- panel->backlight.disable = lpt_disable_backlight;
- panel->backlight.set = lpt_set_backlight;
- panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev_priv))
- panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
- panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.setup = pch_setup_backlight;
- panel->backlight.enable = pch_enable_backlight;
- panel->backlight.disable = pch_disable_backlight;
- panel->backlight.set = pch_set_backlight;
- panel->backlight.get = pch_get_backlight;
- panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.setup = pwm_setup_backlight;
- panel->backlight.enable = pwm_enable_backlight;
- panel->backlight.disable = pwm_disable_backlight;
- panel->backlight.set = pwm_set_backlight;
- panel->backlight.get = pwm_get_backlight;
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
- panel->backlight.setup = vlv_setup_backlight;
- panel->backlight.enable = vlv_enable_backlight;
- panel->backlight.disable = vlv_disable_backlight;
- panel->backlight.set = vlv_set_backlight;
- panel->backlight.get = vlv_get_backlight;
- panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
} else if (IS_GEN(dev_priv, 4)) {
- panel->backlight.setup = i965_setup_backlight;
- panel->backlight.enable = i965_enable_backlight;
- panel->backlight.disable = i965_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
- panel->backlight.setup = i9xx_setup_backlight;
- panel->backlight.enable = i9xx_enable_backlight;
- panel->backlight.disable = i9xx_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
}
enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5b813fe90557..1d340f77bffc 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -49,6 +49,10 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 000000000000..c4867a8020a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_pps.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * See intel_pps_reset_all() why we need a power domain reference here.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ mutex_unlock(&dev_priv->pps_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+ u32 DP;
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
+
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
+ }
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power sequencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ if (!pll_enabled) {
+ vlv_force_pll_off(dev_priv, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe !=
+ intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ } else {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.pps_pipe;
+
+ pipe = vlv_find_free_pps(dev_priv);
+
+ /*
+ * Didn't find one. This should not happen since there
+ * are two power sequencers and up to two eDP ports.
+ */
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
+ vlv_steal_power_sequencer(dev_priv, pipe);
+ intel_dp->pps.pps_pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe),
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
+
+ return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int backlight_controller = dev_priv->vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ if (!intel_dp->pps.pps_reset)
+ return backlight_controller;
+
+ intel_dp->pps.pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ pps_init_registers(intel_dp, false);
+
+ return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+ enum port port,
+ vlv_pipe_check pipe_check)
+{
+ enum pipe pipe;
+
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+
+ if (port_sel != PANEL_PORT_SELECT_VLV(port))
+ continue;
+
+ if (!pipe_check(dev_priv, pipe))
+ continue;
+
+ return pipe;
+ }
+
+ return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* try to find a pipe with this port selected */
+ /* first pick one where the panel is on */
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_vdd_on);
+ /* didn't find one? pick one with just the correct port */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_any);
+
+ /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
+ return;
+
+ /*
+ * We can't grab pps_mutex here due to deadlock with power_domain
+ * mutex when power_domain functions are called while holding pps_mutex.
+ * That also means that in order to use pps_pipe the code needs to
+ * hold both a power domain reference and pps_mutex, and the power domain
+ * reference get/put must be done while _not_ holding pps_mutex.
+ * pps_{lock,unlock}() do these steps in the correct order, so one
+ * should use them always.
+ */
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
+ intel_dp->pps.pps_reset = true;
+ else
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pps_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_GEN9_LP(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
+ regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_verify_state(intel_dp);
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+ /* take the difference of currrent time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+ intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+ intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 control;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
+ return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
+ cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+ intel_dp->pps.want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!edp_have_panel_power(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ msleep(intel_dp->pps.panel_power_up_delay);
+ }
+
+ return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool vdd;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ vdd = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port =
+ dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if ((pp & PANEL_POWER_ON) == 0)
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+ /*
+ * vdd might still be enabled due to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_pps *pps = container_of(to_delayed_work(__work),
+ struct intel_pps, panel_vdd_work);
+ struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->pps.want_panel_vdd)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ }
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ if (sync)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
+ return;
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
+ if (IS_GEN(dev_priv, 5)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ pp |= PANEL_POWER_ON;
+ if (!IS_GEN(dev_priv, 5))
+ pp |= PANEL_POWER_RESET;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->pps.last_power_on = jiffies;
+
+ if (IS_GEN(dev_priv, 5)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_off(intel_dp);
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ /* We got a reference when we enabled the VDD. */
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ intel_dp->pps.last_backlight_off = jiffies;
+ edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ is_enabled = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ if (is_enabled == enable)
+ return;
+
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable)
+ intel_pps_backlight_on(intel_dp);
+ else
+ intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power sequencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power sequencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
+
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ if (intel_dp->pps.pps_pipe != pipe)
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+
+ /*
+ * We may be stealing the power
+ * sequencer from another port.
+ */
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+ intel_dp->pps.active_pipe = crtc->pipe;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ /* now it's all ours */
+ intel_dp->pps.pps_pipe = crtc->pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+}
+
+static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+bool intel_pps_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp->pps.last_power_on = jiffies;
+ intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, pp_ctl;
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ pp_ctl = ilk_get_pp_control(intel_dp);
+
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+ /* Pull timing values out of registers */
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
+
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ } else {
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+ intel_pps_readout_hw_state(intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
+ intel_pps_readout_hw_state(intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
+
+ vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state("vbt", &vbt);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
+ intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->pps.backlight_on_delay = get_delay(t8);
+ intel_dp->pps.backlight_off_delay = get_delay(t9);
+ intel_dp->pps.panel_power_down_delay = get_delay(t10);
+ intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->pps.panel_power_up_delay,
+ intel_dp->pps.panel_power_down_delay,
+ intel_dp->pps.panel_power_cycle_delay);
+
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->pps.backlight_on_delay,
+ intel_dp->pps.backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, port_sel = 0;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ struct pps_registers regs;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power sequencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * intel_pps_vdd_on_unlocked() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power sequencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ilk_get_pp_control(intel_dp);
+
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ }
+
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ port_sel = PANEL_PORT_SELECT_VLV(port);
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ switch (port) {
+ case PORT_A:
+ port_sel = PANEL_PORT_SELECT_DPA;
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
+ port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
+ }
+
+ pp_on |= port_sel;
+
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /*
+ * Reinit the power sequencer also on the resume path, in case
+ * BIOS did something nasty with it.
+ */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_pps_vdd_sanitize(intel_dp);
+ }
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+ INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+ pps_init_timestamps(intel_dp);
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+ }
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+ i915->pps_mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->pps_mmio_base = VLV_PPS_BASE;
+ else
+ i915->pps_mmio_base = PPS_BASE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 000000000000..fbbcca782e7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf) \
+ for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_power(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b3631b722de3..850cb7f5b332 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,9 +28,10 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -305,7 +306,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
@@ -811,6 +812,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
&crtc_state->hw.adjusted_mode;
int psr_setup_time;
+ /*
+ * Current PSR panels dont work reliably with VRR enabled
+ * So if VRR is enabled, do not enable PSR.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
if (!CAN_PSR(dev_priv))
return;
@@ -1185,7 +1193,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 val;
+ const struct drm_rect *clip;
+ u32 val, offset;
+ int ret, x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
@@ -1196,16 +1206,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!val || plane->id == PLANE_CURSOR)
return;
- val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
- val = plane_state->color_plane[color_plane].y << 16;
- val |= plane_state->color_plane[color_plane].x;
+ /* TODO: consider auxiliary surfaces */
+ x = plane_state->uapi.src.x1 >> 16;
+ y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+ ret);
+ val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
/* Sizes are 0 based */
- val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+ val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}
@@ -1237,9 +1256,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
exit:
crtc_state->psr2_man_track_ctl = val;
}
@@ -1264,8 +1285,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
- struct drm_rect pipe_clip = { .y1 = -1 };
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -1277,13 +1298,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
+ /*
+ * Calculate minimal selective fetch area of each plane and calculate
+ * the pipe damaged area.
+ * In the next loop the plane selective fetch area will actually be set
+ * using whole pipe damaged area.
+ */
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect temp;
+ struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_mode_rect *damaged_clips;
+ u32 num_clips, j;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
+ if (!new_plane_state->uapi.visible &&
+ !old_plane_state->uapi.visible)
+ continue;
+
/*
* TODO: Not clear how to handle planes with negative position,
* also planes are not updated if they have a negative X
@@ -1295,18 +1328,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
break;
}
- if (!new_plane_state->uapi.visible)
- continue;
+ num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
/*
- * For now doing a selective fetch in the whole plane area,
- * optimizations will come in the future.
+ * If visibility or plane moved, mark the whole plane area as
+ * damaged as it needs to be complete redraw in the new and old
+ * position.
*/
- temp.y1 = new_plane_state->uapi.dst.y1;
- temp.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &temp);
+ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+ !drm_rect_equals(&new_plane_state->uapi.dst,
+ &old_plane_state->uapi.dst)) {
+ if (old_plane_state->uapi.visible) {
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+ (!num_clips &&
+ new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+ /*
+ * If the plane don't have damaged areas but the
+ * framebuffer changed or alpha changed, mark the whole
+ * plane area as damaged.
+ */
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ continue;
+ }
+
+ drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+ for (j = 0; j < num_clips; j++) {
+ struct drm_rect clip;
+
+ clip.x1 = damaged_clips[j].x1;
+ clip.y1 = damaged_clips[j].y1;
+ clip.x2 = damaged_clips[j].x2;
+ clip.y2 = damaged_clips[j].y2;
+ if (drm_rect_intersect(&clip, &src))
+ clip_area_update(&damaged_area, &clip);
+ }
+
+ if (damaged_area.y1 == -1)
+ continue;
+
+ damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (full_update)
+ goto skip_sel_fetch_set_loop;
+
+ /* It must be aligned to 4 lines */
+ pipe_clip.y1 -= pipe_clip.y1 % 4;
+ if (pipe_clip.y2 % 4)
+ pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+ /*
+ * Now that we have the pipe damaged area check if it intersect with
+ * every plane, if it does set the plane selective fetch area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect *sel_fetch_area, inter;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+ !new_plane_state->uapi.visible)
+ continue;
+
+ inter = pipe_clip;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+ sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
}
+skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 019a2d6d807a..993543334a1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -49,6 +49,8 @@
#include "intel_psr.h"
#include "intel_dsi.h"
#include "intel_sprite.h"
+#include "i9xx_plane.h"
+#include "intel_vrr.h"
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -61,13 +63,15 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
/**
* intel_pipe_update_start() - start update of a set of display registers
@@ -97,9 +101,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
- vblank_start = adjusted_mode->crtc_vblank_start;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -187,6 +192,36 @@ irq_disable:
local_irq_disable();
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
@@ -235,6 +270,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
local_irq_enable();
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
if (intel_vgpu_active(dev_priv))
return;
@@ -249,15 +287,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
- else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
- VBLANK_EVASION_TIME_US)
- drm_warn(&dev_priv->drm,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(pipe),
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- VBLANK_EVASION_TIME_US);
-#endif
+
+ dbg_vblank_evade(crtc, end_vbl_time);
}
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
@@ -618,13 +649,19 @@ skl_program_scaler(struct intel_plane *plane,
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -672,52 +709,7 @@ icl_program_input_csc(struct intel_plane *plane,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
@@ -734,14 +726,8 @@ icl_program_input_csc(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
@@ -755,7 +741,8 @@ icl_program_input_csc(struct intel_plane *plane,
static void
skl_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned long irqflags;
@@ -766,6 +753,9 @@ skl_plane_async_flip(struct intel_plane *plane,
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
@@ -851,6 +841,10 @@ skl_program_plane(struct intel_plane *plane,
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
skl_write_plane_wm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
@@ -942,6 +936,28 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1835,7 +1851,26 @@ g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- return 16384;
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 16 * 1024);
}
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
@@ -2350,7 +2385,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@@ -2840,6 +2876,7 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2848,6 +2885,7 @@ static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -3038,6 +3076,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
break;
default:
return false;
@@ -3274,7 +3313,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- plane->async_flip = skl_plane_async_flip;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -3382,11 +3427,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->max_stride = i9xx_plane_max_stride;
plane->update_plane = vlv_update_plane;
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@@ -3400,16 +3445,18 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ plane->max_stride = hsw_sprite_max_stride;
plane->min_cdclk = hsw_plane_min_cdclk;
- else
+ } else {
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = ivb_sprite_min_cdclk;
+ }
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -3417,11 +3464,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = g4x_update_plane;
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index cd2104ba1ca1..76126dd8d584 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,6 +17,16 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 4346bc1a747a..2cefc13535a0 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static void
-tc_port_load_fia_params(struct drm_i915_private *i915,
- struct intel_digital_port *dig_port)
-{
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
- u32 modular_fia;
-
- if (INTEL_INFO(i915)->display.has_modular_fia) {
- modular_fia = intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
- modular_fia &= MODULAR_FIA_MASK;
- } else {
- modular_fia = 0;
- }
-
- /*
- * Each Modular FIA instance houses 2 TC ports. In SOC that has more
- * than two TC ports, there are multiple instances of Modular FIA.
- */
- if (modular_fia) {
- dig_port->tc_phy_fia = tc_port / 2;
- dig_port->tc_phy_fia_idx = tc_port % 2;
- } else {
- dig_port->tc_phy_fia = FIA1;
- dig_port->tc_phy_fia_idx = tc_port;
- }
-}
-
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
@@ -262,7 +232,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
@@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
mutex_unlock(&dig_port->tc_lock);
}
+static bool
+tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ wakeref = tc_cold_block(dig_port);
+ val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ tc_cold_unblock(dig_port, wakeref);
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ return val & MODULAR_FIA_MASK;
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (tc_has_modular_fia(i915, dig_port)) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 49b4b5fca941..187ec573de59 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
+ RKL_DDC_BUS_DDI_D = 0x3,
+ RKL_DDC_BUS_DDI_E,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index e2716a67b281..f58cc5700784 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
else if (vdsc_cfg->bits_per_component == 12)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
- /* RC_MODEL_SIZE is a constant across all configurations */
- vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
@@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
- pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 000000000000..a9c2b2fd9252
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp;
+ const struct drm_display_info *info = &connector->display_info;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ /*
+ * DP Sink is capable of VRR video timings if
+ * Ignore MSA bit is set in DPCD.
+ * EDID monitor range also should be atleast 10 for reasonable
+ * Adaptive Sync or Variable Refresh Rate end user experience.
+ */
+ return HAS_VRR(i915) &&
+ drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
+ info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+ int i;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->uapi.vrr_enabled !=
+ old_crtc_state->uapi.vrr_enabled)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+}
+
+/*
+ * Without VRR registers get latched at:
+ * vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ * intel_vrr_vmin_vblank_start(), which if we want to maintain
+ * the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ * intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 0-3.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* The hw imposes the extra scanline before frame start */
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ /* Min vblank actually determined by flipline that is always >=vmin+1 */
+ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
+ if (!intel_vrr_is_capable(&connector->base))
+ return;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+ if (!crtc_state->uapi.vrr_enabled)
+ return;
+
+ vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+ adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+ vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ if (vmin >= vmax)
+ return;
+
+ /*
+ * flipline determines the min vblank length the hardware will
+ * generate, and flipline>=vmin+1, hence we reduce vmin by one
+ * to make sure we can get the actual min vblank length.
+ */
+ crtc_state->vrr.vmin = vmin - 1;
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.enable = true;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+ intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+ intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ if (!old_crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+ crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+ crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 000000000000..fac01bf4ab50
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_crtc;
+
+bool intel_vrr_is_capable(struct drm_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d52f9c177908..f94025ec603a 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- /* Deassert reset */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ /*
+ * Give the panel time to power-on and then deassert its reset.
+ * Depending on the VBT MIPI sequences version the deassert-seq
+ * may contain the necessary delay, intel_dsi_msleep() will skip
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+ if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+ msleep(intel_dsi->panel_on_delay);
+ }
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
new file mode 100644
index 000000000000..9e508e7d4629
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "dma_resv_utils.h"
+
+void dma_resv_prune(struct dma_resv *resv)
+{
+ if (dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
new file mode 100644
index 000000000000..b9d8fb5f8367
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DMA_RESV_UTILS_H
+#define DMA_RESV_UTILS_H
+
+struct dma_resv;
+
+void dma_resv_prune(struct dma_resv *resv);
+
+#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4fd38101bb56..4d2f40cf237b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -72,6 +72,8 @@
#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h" /* virtual_engine */
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return e;
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+void i915_gem_context_release(struct kref *ref)
{
- GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree_rcu(ctx, rcu);
}
-static void contexts_free_all(struct llist_node *list)
-{
- struct i915_gem_context *ctx, *cn;
-
- llist_for_each_entry_safe(ctx, cn, list, free_link)
- i915_gem_context_free(ctx);
-}
-
-static void contexts_flush_free(struct i915_gem_contexts *gc)
-{
- contexts_free_all(llist_del_all(&gc->free_list));
-}
-
-static void contexts_free_worker(struct work_struct *work)
-{
- struct i915_gem_contexts *gc =
- container_of(work, typeof(*gc), free_work);
-
- contexts_flush_free(gc);
-}
-
-void i915_gem_context_release(struct kref *ref)
-{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
-
- trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &gc->free_list))
- schedule_work(&gc->free_work);
-}
-
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
}
if (i915_request_is_active(rq)) {
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
*active = locked;
ret = true;
}
@@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
struct intel_engine_cs *engine = NULL;
struct i915_request *rq;
+ if (intel_context_has_inflight(ce))
+ return intel_context_inflight(ce);
+
if (!ce->timeline)
return NULL;
@@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
mutex_unlock(&ctx->mutex);
/*
@@ -740,7 +717,8 @@ err_free:
}
static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
+__context_engines_await(const struct i915_gem_context *ctx,
+ bool *user_engines)
{
struct i915_gem_engines *engines;
@@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines);
+ if (user_engines)
+ *user_engines = i915_gem_context_user_engines(ctx);
+
+ /* successful await => strong mb */
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
@@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
struct intel_context *ce;
int err = 0;
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
@@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
!HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the stale contexts */
- contexts_flush_free(&i915->gem.contexts);
-
ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
@@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&gc->list);
-
- INIT_WORK(&gc->free_work, contexts_free_worker);
- init_llist_head(&gc->free_list);
}
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- drm_dbg(&i915->drm, "%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
- flush_work(&i915->gem.contexts.free_work);
- rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -988,7 +955,6 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *i915 = file_priv->dev_priv;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file)
xa_for_each(&file_priv->vm_xa, idx, vm)
i915_vm_put(vm);
xa_destroy(&file_priv->vm_xa);
-
- contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
if (!e) {
i915_active_release(&cb->base);
return -ENOENT;
@@ -1879,27 +1843,6 @@ replace:
return 0;
}
-static struct i915_gem_engines *
-__copy_engines(struct i915_gem_engines *e)
-{
- struct i915_gem_engines *copy;
- unsigned int n;
-
- copy = alloc_engines(e->num_engines);
- if (!copy)
- return ERR_PTR(-ENOMEM);
-
- for (n = 0; n < e->num_engines; n++) {
- if (e->engines[n])
- copy->engines[n] = intel_context_get(e->engines[n]);
- else
- copy->engines[n] = NULL;
- }
- copy->num_engines = n;
-
- return copy;
-}
-
static int
get_engines(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx,
struct i915_context_param_engines __user *user;
struct i915_gem_engines *e;
size_t n, count, size;
+ bool user_engines;
int err = 0;
- err = mutex_lock_interruptible(&ctx->engines_mutex);
- if (err)
- return err;
+ e = __context_engines_await(ctx, &user_engines);
+ if (!e)
+ return -ENOENT;
- e = NULL;
- if (i915_gem_context_user_engines(ctx))
- e = __copy_engines(i915_gem_context_engines(ctx));
- mutex_unlock(&ctx->engines_mutex);
- if (IS_ERR_OR_NULL(e)) {
+ if (!user_engines) {
+ i915_sw_fence_complete(&e->fence);
args->size = 0;
- return PTR_ERR_OR_ZERO(e);
+ return 0;
}
count = e->num_engines;
@@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size;
err_free:
- free_engines(e);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst,
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
- struct i915_gem_engines *clone;
+ struct i915_gem_engines *clone, *e;
bool user_engines;
unsigned long n;
+ e = __context_engines_await(src, &user_engines);
+ if (!e)
+ return -ENOENT;
+
clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
@@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst,
}
}
clone->num_engines = n;
-
- user_engines = i915_gem_context_user_engines(src);
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
/* Serialised by constructor */
engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst,
return 0;
err_unlock:
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index a133f92bbedb..b5c908f3f4f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ae14ca24a11f..1449f54924e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -108,7 +108,6 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
- struct llist_node free_link;
/**
* @ref: reference count
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 000000000000..45d60e3d98e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct intel_memory_region *mr,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ u32 handle;
+ u64 size;
+ int ret;
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
+ if (size == 0)
+ return -EINVAL;
+
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ /* Allocate the new object */
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(size != obj->base.size);
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *handle_p = handle;
+ *size_p = size;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ enum intel_memory_type mem_type;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
+ &args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create *args = data;
+
+ i915_gem_flush_free_objects(i915);
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
+ &args->size, &args->handle);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index fcce6909f201..36f54cedaaeb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -5,6 +5,7 @@
*/
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -15,13 +16,58 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
+ }
+ spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -370,6 +416,7 @@ retry:
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
@@ -387,48 +434,6 @@ err:
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
@@ -451,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -569,9 +574,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
@@ -619,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
@@ -670,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bcc80f428172..d70ca36f74f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,6 +15,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
@@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -1046,7 +1045,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
int err;
if (!pool) {
- pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+ cache->has_llc ?
+ I915_MAP_WB :
+ I915_MAP_WC);
if (IS_ERR(pool))
return PTR_ERR(pool);
}
@@ -1287,15 +1289,14 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_pool;
- cmd = i915_gem_object_pin_map(pool->obj,
- cache->has_llc ?
- I915_MAP_FORCE_WB :
- I915_MAP_FORCE_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -2457,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb)
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
eb->batch_pool = pool;
@@ -2533,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
err = eb_move_to_gpu(eb);
if (err)
return err;
@@ -2573,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return err;
}
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
return 0;
}
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(&i915->gt));
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 932ee21e6609..194f35342710 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
size, flags);
}
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
@@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..036d53c01de9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 00d24000b5e8..70f798405f7f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,13 +25,13 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_memcpy.h"
#include "i915_trace.h"
static struct i915_global_object {
@@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains)
-{
- struct i915_vma *vma;
-
- assert_object_held(obj);
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (i915_vma_unset_ggtt_write(vma))
- intel_gt_flush_ggtt_writes(vma->vm->gt);
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
@@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
}
}
+static void
+i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void *src_map;
+ void *src_ptr;
+
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(src_ptr, size);
+ memcpy(dst, src_ptr, size);
+
+ kunmap_atomic(src_map);
+}
+
+static void
+i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void __iomem *src_map;
+ void __iomem *src_ptr;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
+
+ src_map = io_mapping_map_wc(&obj->mm.region->iomap,
+ dma - obj->mm.region->region.start,
+ PAGE_SIZE);
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
+ memcpy_fromio(dst, src_ptr, size);
+
+ io_mapping_unmap(src_map);
+}
+
+/**
+ * i915_gem_object_read_from_page - read data from the page of a GEM object
+ * @obj: GEM object to read from
+ * @offset: offset within the object
+ * @dst: buffer to store the read data
+ * @size: size to read
+ *
+ * Reads data from @obj at the specified offset. The requested region to read
+ * from can't cross a page boundary. The caller must ensure that @obj pages
+ * are pinned and that @obj is synced wrt. any related writes.
+ *
+ * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * unsupported.
+ */
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ GEM_BUG_ON(offset >= obj->base.size);
+ GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_gem_object_has_struct_page(obj))
+ i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
+ else if (i915_gem_object_has_iomem(obj))
+ i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index be14486f63a7..d0ae834d787a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -188,6 +188,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -201,6 +219,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+}
+
+static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
@@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains);
-
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
@@ -486,7 +498,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
@@ -512,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr);
+
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout);
@@ -540,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index aee7ad3cc3c6..d6dac21fce0b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
@@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
@@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e2d9b7e1e152..0438e00d4ca7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops {
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
enum i915_mmap_type {
I915_MMAP_TYPE_GTT = 0,
I915_MMAP_TYPE_WC,
@@ -142,8 +150,6 @@ struct drm_i915_gem_object {
*/
struct list_head obj_link;
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -167,6 +173,7 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
+#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -275,12 +282,6 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
} mm;
/** Record of address bit 17 of each page at last unbind. */
@@ -295,6 +296,8 @@ struct drm_i915_gem_object {
struct work_struct *work;
} userptr;
+ struct drm_mm_node *stolen;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e2c7b2a7895f..43028f3539a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ bool shrinkable;
int i;
lockdep_assert_held(&obj->mm.lock);
@@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
@@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
- if (i915_gem_object_is_shrinkable(obj)) {
+ shrinkable = i915_gem_object_is_shrinkable(obj);
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ shrinkable = false;
+ }
+
+ if (shrinkable) {
struct list_head *list;
unsigned long flags;
@@ -238,7 +241,7 @@ unlock:
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
struct page *stack[32], **pages = stack, *page;
@@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
vaddr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack)
kvfree(pages);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
resource_size_t iomap = obj->mm.region->iomap.base -
obj->mm.region->region.start;
@@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
void *vaddr;
if (type != I915_MAP_WC)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (n_pfn > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
if (!pfns)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
if (pfns != stack)
kvfree(pfns);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
/* get, pin, and map the pages of the object into kernel space */
@@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_unlock;
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+ }
smp_mb__before_atomic();
}
@@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
- err = -EBUSY;
+ ptr = ERR_PTR(-EBUSY);
goto err_unpin;
}
@@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!ptr) {
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
- ptr = NULL;
+ ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
else
ptr = i915_gem_object_map_pfn(obj, type);
- if (!ptr) {
- err = -ENOMEM;
+ if (IS_ERR(ptr))
goto err_unpin;
- }
obj->mm.mapping = page_pack_bits(ptr, type);
}
@@ -393,8 +398,6 @@ out_unlock:
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3a4dfe2ef1da..3c0b157e2a35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops == &i915_gem_phys_ops)
return 0;
- if (obj->ops != &i915_gem_shmem_ops)
+ if (!i915_gem_object_is_shmem(obj))
return -EINVAL;
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
@@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- if (obj->mm.quirked) {
+ if (i915_gem_object_has_tiling_quirk(obj)) {
err = -EFAULT;
goto err_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 40d3e40500fa..000e1cd8e920 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,6 +11,13 @@
#include "i915_drv.h"
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+ pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
- return list_first_entry_or_null(list,
- struct drm_i915_gem_object,
- mm.link);
-}
-
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
NULL
}, **phase;
unsigned long flags;
+ bool flush = false;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,56 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- LIST_HEAD(keep);
+ list_for_each_entry(obj, *phase, mm.link) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+ __start_cpu_write(obj); /* presume auto-hibernate */
+ }
+ }
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ if (flush)
+ wbinvd_on_all_cpus();
+}
- while ((obj = first_mm_object(*phase))) {
- list_move_tail(&obj->mm.link, &keep);
+int i915_gem_freeze(struct drm_i915_private *i915)
+{
+ /* Discard all purgeable objects, let userspace recover those as
+ * required after resuming.
+ */
+ i915_gem_shrink_all(i915);
- /* Beware the background _i915_gem_free_objects */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
+ return 0;
+}
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+int i915_gem_freeze_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
- i915_gem_object_put(obj);
+ /*
+ * Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ *
+ * To try and reduce the hibernation image, we manually shrink
+ * the objects as well, see i915_gem_freeze()
+ */
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- }
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
+ i915_gem_drain_freed_objects(i915);
- list_splice_tail(&keep, *phase);
- }
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ wbinvd_on_all_cpus();
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
+ __start_cpu_write(obj);
+
+ return 0;
}
void i915_gem_resume(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 26b78dbdc225..c9a66630e92e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -19,4 +19,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
void i915_gem_suspend(struct drm_i915_private *i915);
void i915_gem_suspend_late(struct drm_i915_private *i915);
+int i915_gem_freeze(struct drm_i915_private *i915);
+int i915_gem_freeze_late(struct drm_i915_private *i915);
+
#endif /* __I915_GEM_PM_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1515384d7e0e..3e3dad22a683 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
+ const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!st)
return -ENOMEM;
- if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
- GEM_BUG_ON(overflows_type(block_size, sg->length));
+ while (block_size) {
+ u64 len;
- if (offset != prev_end ||
- add_overflows_t(typeof(sg->length), sg->length, block_size)) {
- if (st->nents) {
- sg_page_sizes |= sg->length;
- sg = __sg_next(sg);
+ if (offset != prev_end || sg->length >= max_segment) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = 0;
+ sg->length = 0;
+ st->nents++;
}
- sg_dma_address(sg) = mem->region.start + offset;
- sg_dma_len(sg) = block_size;
+ len = min(block_size, max_segment - sg->length);
+ sg->length += len;
+ sg_dma_len(sg) += len;
- sg->length = block_size;
+ offset += len;
+ block_size -= len;
- st->nents++;
- } else {
- sg->length += block_size;
- sg_dma_len(sg) += block_size;
+ prev_end = offset;
}
-
- prev_end = offset + block_size;
}
sg_page_sizes |= sg->length;
@@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ int err;
/*
* NB: Our use of resource_size_t for the size stems from using struct
@@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = mem->ops->create_object(mem, size, flags);
- if (!IS_ERR(obj))
- trace_i915_gem_object_create(obj);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ err = mem->ops->init_object(mem, obj, size, flags);
+ if (err)
+ goto err_object_free;
+
+ trace_i915_gem_object_create(obj);
return obj;
+
+err_object_free:
+ i915_gem_object_free(obj);
+ return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 75e8b71c18b9..cf83c208688c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915,
return 0;
}
-static struct drm_i915_gem_object *
-create_shmem(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int shmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
ret = __create_shmem(i915, &obj->base, size);
if (ret)
- goto fail;
+ return ret;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_I965GM(i915) || IS_I965G(i915)) {
@@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
+ return 0;
}
struct drm_i915_gem_object *
@@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem)
static const struct intel_memory_region_ops shmem_region_ops = {
.init = init_shmem,
.release = release_shmem,
- .create_object = create_shmem,
+ .init_object = shmem_object_init,
};
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
@@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
PAGE_SIZE, 0,
&shmem_region_ops);
}
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_shmem_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index dc8f052a0ffe..c2dba1cd9532 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_requests.h"
+#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
mutex_unlock(&obj->mm.lock);
}
+ dma_resv_prune(obj->base.resv);
+
scanned += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 29bffc6afcc1..a1e197a6e999 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
-
- i915_gem_object_release_memory_region(obj);
-
i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
+
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.release = i915_gem_object_release_stolen,
};
-static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct intel_memory_region *mem,
- struct drm_mm_node *stolen)
+static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
- struct drm_i915_gem_object *obj;
unsigned int cache_level;
- int err = -ENOMEM;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- goto err;
+ int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
@@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
err = i915_gem_object_pin_pages(obj);
if (err)
- goto cleanup;
+ return err;
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-cleanup:
- i915_gem_object_free(obj);
-err:
- return ERR_PTR(err);
+ return 0;
}
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (size == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
goto err_remove;
- return obj;
+ return 0;
err_remove:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ret;
}
struct drm_i915_gem_object *
@@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem)
static const struct intel_memory_region_ops i915_region_stolen_ops = {
.init = init_stolen,
.release = release_stolen,
- .create_object = _i915_gem_object_create_stolen,
+ .init_object = _i915_gem_object_stolen_init,
};
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -767,21 +753,32 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
mutex_lock(&i915->mm.stolen_lock);
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ obj = i915_gem_object_alloc();
+ if (!obj) {
+ ret = -ENOMEM;
goto err_stolen;
+ }
+
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
+ goto err_object_free;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
+err_object_free:
+ i915_gem_object_free(obj);
err_stolen:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ERR_PTR(ret);
+}
+
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_object_stolen_ops;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 61e028063f9f..b03489706796 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
resource_size_t stolen_offset,
resource_size_t size);
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
+
#define I915_GEM_STOLEN_BIAS SZ_128K
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ffcaee74a249..d589d3d81085 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 8af55cd3e690..4b9856d5ba14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -5,10 +5,12 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/jiffies.h>
#include "gt/intel_engine.h"
+#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences && dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
+ if (prune_fences)
+ dma_resv_prune(resv);
return timeout;
}
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
@@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence,
rq = to_request(fence);
engine = rq->engine;
- local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(rq, attr);
rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ if (dma_fence_is_signaled(fence))
+ return;
+
+ local_bh_disable();
+
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
int i;
for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
+ fence_set_priority(array->fences[i], attr);
+ } else if (__dma_fence_is_chain(fence)) {
+ struct dma_fence *iter;
+
+ /* The chain is ordered; if we boost the last, we boost all */
+ dma_fence_chain_for_each(iter, fence) {
+ fence_set_priority(to_dma_fence_chain(iter)->fence,
+ attr);
+ break;
+ }
+ dma_fence_put(iter);
} else {
- __fence_set_priority(fence, attr);
+ fence_set_priority(fence, attr);
}
+
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
int
@@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
int ret;
ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
+ i915_gem_fence_wait_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
@@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
if (excl) {
- fence_set_priority(excl, attr);
+ i915_gem_fence_wait_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index a768ec61e966..2fb501a78a85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
static int huge_get_pages(struct drm_i915_gem_object *obj)
{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct scatterlist *sg, *src, *end;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1f35e71429b4..aacf4856ccb4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
+ /*
+ * The dma-api is like a box of chocolates when it comes to the
+ * alignment of dma addresses, however for LMEM we have total control
+ * and so can guarantee alignment, likewise when we allocate our blocks
+ * they should appear in descending order, and if we know that we align
+ * to the largest page size for the GTT address, we should be able to
+ * assert that if we see 2M physical pages then we should also get 2M
+ * GTT pages. If we don't then something might be wrong in our
+ * construction of the backing pages.
+ *
+ * Maintaining alignment is required to utilise huge pages in the ppGGT.
+ */
+ if (i915_gem_object_is_lmem(obj) &&
+ IS_ALIGNED(vma->node.start, SZ_2M) &&
+ vma->page_sizes.sg & SZ_2M &&
+ vma->page_sizes.gtt < SZ_2M) {
+ pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
+ vma->page_sizes.sg, vma->page_sizes.gtt);
+ err = -EINVAL;
+ }
+
if (obj->mm.page_sizes.gtt) {
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
obj->mm.page_sizes.gtt);
@@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg)
unsigned int flags;
} backends[] = {
{ igt_create_system, 0, },
+ { igt_create_local, 0, },
{ igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
struct {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 4e36d4897ea6..6a674a7994df 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
- struct rnd_state prng;
+ I915_RND_STATE(prng);
IGT_TIMEOUT(end);
u32 *vaddr;
int err = 0;
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 7049a6bbc03d..1117d2a44518 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d27d87a678c8..d429c7643ff2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index e21b5023ca7d..d6783061bc72 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 174a24553322..d4f4452ce5ed 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%dns)\n",
+ seq_printf(m, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
@@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "GPU busy? %s, %llums\n",
+ yesno(gt->awake),
+ ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 680bd9442eb0..e08dff376339 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -12,9 +12,9 @@
#include "intel_gt.h"
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
{
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
@@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
u32 ecochk;
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
@@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
}
void gen6_ppgtt_enable(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index d93d85cd3027..de575fdb033f 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
- u32 max_primitives;
- u32 max_urb_entries;
- u32 cmd_size;
- u32 state_size;
+ u32 max_threads;
u32 state_start;
- u32 batch_size;
+ u32 surface_start;
u32 surface_height;
u32 surface_width;
- u32 scratch_size;
- u32 max_size;
+ u32 size;
};
+static int num_primitives(const struct batch_vals *bv)
+{
+ /*
+ * We need to saturate the GPU with work in order to dispatch
+ * a shader on every HW thread, and clear the thread-local registers.
+ * In short, we have to dispatch work faster than the shaders can
+ * run in order to fill the EU and occupy each HW thread.
+ */
+ return bv->max_threads;
+}
+
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
- bv->max_primitives = 280;
- bv->max_urb_entries = MAX_URB_ENTRIES;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1:
+ bv->max_threads = 70;
+ break;
+ case 2:
+ bv->max_threads = 140;
+ break;
+ case 3:
+ bv->max_threads = 280;
+ break;
+ }
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
- bv->max_primitives = 128;
- bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1: /* including vlv */
+ bv->max_threads = 36;
+ break;
+ case 2:
+ bv->max_threads = 128;
+ break;
+ }
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
- bv->cmd_size = bv->max_primitives * 4096;
- bv->state_size = STATE_SIZE;
- bv->state_start = bv->cmd_size;
- bv->batch_size = bv->cmd_size + bv->state_size;
- bv->scratch_size = bv->surface_height * bv->surface_width;
- bv->max_size = bv->batch_size + bv->scratch_size;
+ bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+ bv->surface_start = bv->state_start + SZ_4K;
+ bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
- u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 surface_start =
+ gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@@ -214,13 +234,13 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
- u32 *cs = batch_alloc_items(batch, 0, 12);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
- *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ *cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
- *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
/* dynamic */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* indirect */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
- *cs++ = 0;
- *cs++ = 0;
batch_advance(batch, cs);
}
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
- u32 urb_entries = bv->max_urb_entries;
- u32 threads = bv->max_primitives - 1;
+ u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
- *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+ *cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
- unsigned int inline_data_size;
- unsigned int media_batch_size;
- unsigned int i;
+ unsigned int pkt = 6 + 3;
u32 *cs;
- inline_data_size = 112 * 8;
- media_batch_size = inline_data_size + 6;
-
- cs = batch_alloc_items(batch, 8, media_batch_size);
+ cs = batch_alloc_items(batch, 8, pkt);
- *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+ *cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@@ -317,25 +329,46 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
- *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
- for (i = 3; i < inline_data_size; i++)
- *cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 5);
+ u32 *cs = batch_alloc_items(batch, 0, 4);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 10);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
- PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
+
batch_advance(batch, cs);
}
@@ -344,34 +377,48 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int desc_count = 64;
- const u32 urb_size = 112;
+ const unsigned int desc_count = 1;
+ const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
- u32 interface_descriptor;
+ u32 descriptors;
unsigned int i;
- batch_init(&cmds, vma, start, 0, bv->cmd_size);
- batch_init(&state, vma, start, bv->state_start, bv->state_size);
+ batch_init(&cmds, vma, start, 0, bv->state_start);
+ batch_init(&state, vma, start, bv->state_start, SZ_4K);
- interface_descriptor =
- gen7_fill_interface_descriptor(&state, bv,
- IS_HASWELL(i915) ?
- &cb_kernel_hsw :
- &cb_kernel_ivb,
- desc_count);
+ descriptors = gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
+ gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
- gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_invalidate(&cmds);
+
gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_state_base_address(&cmds, descriptors);
+ gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+ gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
- gen7_emit_interface_descriptor_load(&cmds,
- interface_descriptor,
- desc_count);
-
- for (i = 0; i < bv->max_primitives; i++)
+ /* Execute the kernel on all HW threads */
+ for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +432,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
- return bv.max_size;
+ return bv.size;
- GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+ GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
- emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+ emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 000000000000..07ba524da90b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_lrc.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ bool vf_flush_wa = false, dc_flush_wa = false;
+ u32 *cs, flags = 0;
+ int len;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN(rq->engine->i915, 9))
+ vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
+ }
+
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv reg\n");
+ return INVALID_MMIO_REG;
+}
+
+static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 8 + 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ cmd = 4;
+ if (mode & EMIT_INVALIDATE)
+ cmd += 2;
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 2 * hweight8(aux_inv) + 2;
+
+ cs = intel_ring_begin(rq, cmd);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, rq->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = hwsp_offset(rq);
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ *
+ * i915_request_started() is used during preemption processing
+ * to decide if the request is currently inside the user payload
+ * or spinning on a kernel semaphore (or earlier). For no-preemption
+ * requests, we do allow preemption on the semaphore before the user
+ * payload, but do not allow preemption once the request is started.
+ *
+ * i915_request_started() is similarly used during GPU hangs to
+ * determine if the user's payload was guilty, and if so, the
+ * request is banned. Before the request is started, it is assumed
+ * to be unharmed and an innocent victim of another's hang.
+ */
+ *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_CHECK;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ if (unlikely(i915_request_has_nopreempt(rq)))
+ return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+ rq->wa_tail = intel_ring_offset(rq, cs);
+
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(rq);
+
+ return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = gen12_emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 000000000000..cc6e21d3662a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ memset(batch, 0, 6 * sizeof(u32));
+
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
+ batch[2] = offset;
+
+ return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+ *cs++ = 0; /* We're thrashing one extra dword. */
+
+ return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index a37c968ef8f7..755522ced60d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-static inline unsigned int
+static unsigned int
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
const int shift = gen8_pd_shift(lvl);
@@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
return i915_pde_index(end, shift) - *idx;
}
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
@@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
return (start ^ end) & mask && (start & ~mask) == 0;
}
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
+static unsigned int gen8_pt_count(u64 start, u64 end)
{
GEM_BUG_ON(start >= end);
if ((start ^ end) >> gen8_pd_shift(1))
@@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end)
return end - start;
}
-static inline unsigned int
-gen8_pd_top_count(const struct i915_address_space *vm)
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
return (vm->total + (1ull << shift) - 1) >> shift;
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
@@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
{
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index a24cc1ff08a0..34a645d6babd 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
return true;
}
-static inline bool __request_completed(const struct i915_request *rq)
-{
- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
@@ -192,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
@@ -251,13 +234,14 @@ static void signal_irq_work(struct irq_work *work)
intel_breadcrumbs_disarm_irq(b);
rcu_read_lock();
+ atomic_inc(&b->signaler_active);
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
struct i915_request *rq;
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release;
- if (!__request_completed(rq))
+ if (!__i915_request_is_complete(rq))
break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -273,17 +257,20 @@ static void signal_irq_work(struct irq_work *work)
list_del_rcu(&rq->signal_link);
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
-
- if (__signal_request(rq))
- /* We own signal_node now, xfer to local list */
- signal = slist_add(&rq->signal_node, signal);
-
if (release) {
- add_retire(b, ce->timeline);
+ if (intel_timeline_is_last(ce->timeline, rq))
+ add_retire(b, ce->timeline);
intel_context_put(ce);
}
+
+ if (__dma_fence_signal(&rq->fence))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
}
}
+ atomic_dec(&b->signaler_active);
rcu_read_unlock();
llist_for_each_safe(signal, sn, signal) {
@@ -342,17 +329,19 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- /* Kick the work once more to drain the signalers */
+ if (!READ_ONCE(b->irq_armed))
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
- while (unlikely(READ_ONCE(b->irq_armed))) {
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
- GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
@@ -363,6 +352,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
@@ -372,17 +372,13 @@ static void insert_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
- if (__request_completed(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ if (__i915_request_is_complete(rq)) {
+ irq_signal_request(rq, b);
return;
}
@@ -413,6 +409,8 @@ static void insert_breadcrumb(struct i915_request *rq)
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
@@ -453,22 +451,61 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b)
+{
+ struct i915_request *rq, *rn;
+ bool release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->signal_lock, flags);
+
+ if (list_empty(&ce->signals))
+ goto unlock;
+
+ list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ continue;
+
+ list_del_rcu(&rq->signal_link);
+ irq_signal_request(rq, b);
+ i915_request_put(rq);
+ }
+ release = remove_signaling_context(b, ce);
+
+unlock:
+ spin_unlock_irqrestore(&ce->signal_lock, flags);
+ if (release)
+ intel_context_put(ce);
+
+ while (atomic_read(&b->signaler_active))
+ cpu_relax();
+}
+
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
{
struct intel_context *ce;
@@ -481,8 +518,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
index ed3d1deabfbd..3ce5ce270b04 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_BREADCRUMBS__
#define __INTEL_BREADCRUMBS__
+#include <linux/atomic.h>
#include <linux/irq_work.h>
#include "intel_engine_types.h"
@@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+ atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (atomic_dec_and_test(&b->active))
+ __intel_breadcrumbs_park(b);
+}
static inline void
intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
@@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
bool i915_request_enable_breadcrumb(struct i915_request *request);
void i915_request_cancel_breadcrumb(struct i915_request *request);
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b);
+
#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index a74bb3062bd8..3a084ce8ff5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,17 +29,20 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- /* Not all breadcrumbs are attached to physical HW */
- struct intel_engine_cs *irq_engine;
+ atomic_t active;
spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
struct llist_head signaled_requests;
+ atomic_t signaler_active;
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
+
+ /* Not all breadcrumbs are attached to physical HW */
+ struct intel_engine_cs *irq_engine;
};
#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fda2eba81e22..d24ab6fa0ee5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce)
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+ return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce)
static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return READ_ONCE(ce->runtime.total) * period;
}
static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index b9c8163978a3..8dfd8f656aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,7 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
-#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c132746..e10d78601bbd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -30,6 +30,10 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
int (*alloc)(struct intel_context *ce);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
@@ -58,8 +62,12 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+ __intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+ __intel_context_inflight_count(READ_ONCE((ce)->inflight))
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -81,12 +89,13 @@ struct intel_context {
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
-#define CONTEXT_VALID_BIT 2
-#define CONTEXT_CLOSED_BIT 3
-#define CONTEXT_USE_SEMAPHORES 4
-#define CONTEXT_BANNED 5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
-#define CONTEXT_NOPREEMPT 7
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
u32 *lrc_reg_state;
union {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 760fefdfe392..47ee8578e511 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -15,7 +15,6 @@
#include "i915_selftest.h"
#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
-#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
void intel_engine_init_execlists(struct intel_engine_cs *engine);
-static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- memset(batch, 0, 6 * sizeof(u32));
-
- batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
- batch[1] = flags1;
- batch[2] = offset;
-
- return batch + 6;
-}
-
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, 0, flags, offset);
-}
-
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
-}
-
-static inline u32 *
-__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = value;
- *cs++ = 0; /* We're thrashing one extra dword. */
-
- return cs;
-}
-
-static inline u32*
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- 0,
- flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32*
-gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- flags0,
- flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32 *
-__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- *cs++ = (MI_FLUSH_DW + 1) | flags;
- *cs++ = gtt_offset;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
-}
-
-static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- GEM_BUG_ON(gtt_offset & (1 << 5));
- /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_flush_dw(cs,
- value,
- gtt_offset | MI_FLUSH_DW_USE_GTT,
- flags | MI_FLUSH_DW_OP_STOREDW);
-}
-
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
@@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ __intel_engine_flush_submission(engine, true);
+}
void intel_engines_reset_default_submission(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0b31670343f5..fb1b1d096975 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -33,12 +33,14 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_gt_pm.h"
-#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -340,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
- seqlock_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
+ INIT_LIST_HEAD(&engine->status_page.timelines);
+
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
@@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
return 0;
err_status:
@@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine,
return ce;
}
+static void destroy_pinned_context(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *hwsp = engine->status_page.vma;
+
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+ mutex_lock(&hwsp->vm->mutex);
+ list_del(&ce->timeline->engine_link);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+}
+
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ setup = intel_guc_submission_setup;
+ else if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
@@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
GEM_BUG_ON(!list_empty(&engine->active.requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
- cleanup_status_page(engine);
intel_breadcrumbs_free(engine->breadcrumbs);
intel_engine_fini_retire(engine);
@@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
fput(engine->default_state);
- if (engine->kernel_context) {
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- }
+ if (engine->kernel_context)
+ destroy_pinned_context(engine->kernel_context);
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+ cleanup_status_page(engine);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.stop_timeout_ms);
}
-int intel_engine_stop_cs(struct intel_engine_cs *engine)
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ int fast_timeout_us,
+ int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
+ const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ err = __intel_wait_for_register_fw(engine->uncore, mode,
+ MODE_IDLE, MODE_IDLE,
+ fast_timeout_us,
+ slow_timeout_ms,
+ NULL);
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ intel_uncore_posting_read_fw(uncore, mode);
+ return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ int err = 0;
+
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
+ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+ ENGINE_TRACE(engine,
+ "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+ ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
-
- err = 0;
- if (__intel_wait_for_register_fw(uncore,
- mode, MODE_IDLE, MODE_IDLE,
- 1000, stop_timeout(engine),
- NULL)) {
- ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
- err = -ETIMEDOUT;
+ /*
+ * Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+ err = -ETIMEDOUT;
}
- /* A final mmio read to let GPU writes be hopefully flushed to memory */
- intel_uncore_posting_read_fw(uncore, mode);
-
return err;
}
@@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
if (!t->func)
return;
- /* Synchronise and wait for the tasklet on another CPU */
- tasklet_kill(t);
-
- /* Having cancelled the tasklet, ensure that is run */
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
}
/**
@@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt, id)
+ for_each_engine(engine, gt, id) {
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
engine->set_default_submission(engine);
+ }
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
@@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(const struct i915_sched_attr *attr,
- char *buf, int x, int len)
-{
- if (attr->priority == I915_PRIORITY_INVALID)
- return x;
-
- x += snprintf(buf + x, len - x,
- " prio=%d", attr->priority);
-
- return x;
-}
-
-static void print_request(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix)
-{
- const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
- char buf[80] = "";
- int x = 0;
-
- x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
-
- drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
- prefix,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags) ? "+" :
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "-" :
- "",
- buf,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
-}
-
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
@@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv)) {
+ if (intel_engine_in_guc_submission_mode(engine)) {
+ /* nothing to print yet */
+ } else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
for (port = execlists->pending; (rq = *port); port++) {
char hdr[160];
@@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1667,7 +1676,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
- engine->fw_domain, atomic_read(&engine->fw_active));
+ engine->fw_domain, READ_ONCE(engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
- print_request(m, rq, "\t\tactive ");
+ i915_request_show(m, rq, "\t\tactive ", 0);
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
@@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- intel_execlists_show_requests(engine, m, print_request, 8);
+ intel_execlists_show_requests(engine, m, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
@@ -1745,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
* add it to the total.
*/
*now = ktime_get();
- if (atomic_read(&engine->stats.active))
+ if (READ_ONCE(engine->stats.active))
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
@@ -1764,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
ktime_t total;
do {
- seq = read_seqbegin(&engine->stats.lock);
+ seq = read_seqcount_begin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqretry(&engine->stats.lock, seq));
+ } while (read_seqcount_retry(&engine->stats.lock, seq));
return total;
}
@@ -1802,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
struct intel_timeline *tl = request->context->timeline;
list_for_each_entry_from_reverse(request, &tl->requests, link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
break;
active = request;
@@ -1813,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
return active;
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
continue;
- if (!i915_request_started(request))
+ if (!__i915_request_has_started(request))
continue;
/* More than one preemptible request may match! */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9060385cd69e..d7be2b9339f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
return true;
}
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+ struct i915_request *rq;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, gfp);
+ intel_context_exit(ce);
+
+ return rq;
+}
+
static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
{
engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
@@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
engine->heartbeat.systole = i915_request_get(rq);
}
+static void heartbeat_commit(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ idle_pulse(rq->engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, attr);
+}
+
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
@@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
goto unlock;
- idle_pulse(engine, rq);
-
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
unlock:
mutex_unlock(&ce->timeline->mutex);
@@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_engine_has_preemption(engine));
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
return PTR_ERR(rq);
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
- idle_pulse(engine, rq);
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
return 0;
@@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (llist_empty(&engine->barrier_tasks))
return 0;
@@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- rq = i915_request_create(engine->kernel_context);
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
+ goto out_rpm;
+ }
+
+ rq = heartbeat_create(ce, GFP_KERNEL);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_rpm;
+ goto out_unlock;
}
- idle_pulse(engine, rq);
- i915_request_add(rq);
+ heartbeat_commit(rq, &attr);
+ err = 0;
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 499b09cb4acf..e67d09259dd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -60,18 +60,26 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Scrub the context image after our loss of control */
ce->ops->reset(ce);
+
+ CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+ ce->timeline->seqno,
+ READ_ONCE(*ce->timeline->hwsp_seqno),
+ ce->ring->emit);
+ GEM_BUG_ON(ce->timeline->seqno !=
+ READ_ONCE(*ce->timeline->hwsp_seqno));
}
if (engine->unpark)
engine->unpark(engine);
+ intel_breadcrumbs_unpark(engine->breadcrumbs);
intel_engine_unpark_heartbeat(engine);
return 0;
}
#if IS_ENABLED(CONFIG_LOCKDEP)
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
@@ -81,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
return flags;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
@@ -90,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#else
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
return 0;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
}
@@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 000000000000..24fbdd94351a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (engine->stats.active) {
+ engine->stats.active++;
+ return;
+ }
+
+ /* The writer is serialised; but the pmu reader may be from hardirq */
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.start = ktime_get();
+ engine->stats.active++;
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+
+ GEM_BUG_ON(!engine->stats.active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(!engine->stats.active);
+ if (engine->stats.active > 1) {
+ engine->stats.active--;
+ return;
+ }
+
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.active--;
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ee6312601c56..d2346b425547 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
struct intel_hw_status_page {
+ struct list_head timelines;
struct i915_vma *vma;
u32 *addr;
};
@@ -183,7 +184,8 @@ struct intel_engine_execlists {
* Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
-#define ERROR_CSB BIT(31)
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -237,16 +239,6 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @switch_priority_hint: Second context priority.
- *
- * We submit multiple contexts to the HW simultaneously and would
- * like to occasionally switch between them to emulate timeslicing.
- * To know when timeslicing is suitable, we track the priority of
- * the context submitted second.
- */
- int switch_priority_hint;
-
- /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -327,7 +319,7 @@ struct intel_engine_cs {
* as possible.
*/
enum forcewake_domains fw_domain;
- atomic_t fw_active;
+ unsigned int fw_active;
unsigned long context_tag;
@@ -524,12 +516,12 @@ struct intel_engine_cs {
/**
* @active: Number of contexts currently scheduled in.
*/
- atomic_t active;
+ unsigned int active;
/**
* @lock: Lock protecting the below fields.
*/
- seqlock_t lock;
+ seqcount_t lock;
/**
* @total: Total time this engine was busy.
@@ -559,6 +551,8 @@ struct intel_engine_cs {
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
} props, defaults;
+
+ I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 000000000000..ac1be7a632d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,3896 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+ struct rcu_work rcu;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+ struct i915_request *rq,
+ int error)
+{
+ struct i915_request *active = rq;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (error) {
+ i915_request_set_error_once(rq, error);
+ __i915_request_skip(rq);
+ }
+ active = rq;
+ }
+
+ return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+ struct i915_priolist *p;
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&execlists->queue);
+ if (!rb)
+ return INT_MIN;
+
+ /*
+ * As the priolist[] are inverted, with the highest priority in [0],
+ * we have to flip the index value to become priority.
+ */
+ p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
+ return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ int last_prio;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(&engine->execlists)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (i915_request_is_active(prev))
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->active.requests,
+ sched.link) {
+ if (__i915_request_is_complete(rq)) {
+ list_del_init(&rq->sched.link);
+ continue;
+ }
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ rq->tail,
+ rq->ring->tail + 8) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+ active = rq;
+ }
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (__i915_request_is_complete(rq))
+ head = rq->tail;
+ else
+ head = __active_request(ce->timeline, rq, -EIO)->head;
+ head = intel_ring_wrap(ce->ring, head);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ lrc_init_regs(ce, engine, true);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_closed(ce) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_banned(ce);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "before");
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = __ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag >= BITS_PER_LONG);
+ __clear_bit(tag, &engine->context_tag);
+ ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ ce->lrc.ccid |= engine->execlists.ccid;
+
+ __intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !engine->fw_active++)
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+ return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = ce->inflight;
+ if (!old)
+ old = __execlists_schedule_in(rq);
+ WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ spin_lock_irq(&engine->active.lock);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ WRITE_ONCE(rq->engine, &ve->base);
+ ve->base.submit_request(rq);
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * After this point, the rq may be transferred to a new sibling, so
+ * before we clear ce->inflight make sure that the context has been
+ * removed from the b->signalers and furthermore we need to make sure
+ * that the concurrent iterator in signal_irq_work is no longer
+ * following ce->signal_link.
+ */
+ if (!list_empty(&ce->signals))
+ intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+ /*
+ * This engine is now too busy to run this virtual request, so
+ * see if we can find an alternative engine for it to execute on.
+ * Once a request has become bonded to this engine, we treat it the
+ * same as other native request.
+ */
+ if (i915_request_in_priority_queue(rq) &&
+ rq->execution_mask != engine->mask)
+ resubmit_virtual_request(rq, ve);
+
+ if (READ_ONCE(ve->request))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+ struct intel_context * const ce)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ unsigned int ccid;
+
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
+ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+ GEM_BUG_ON(ce->inflight != engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "after");
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (intel_timeline_is_last(ce->timeline, rq) &&
+ __i915_request_is_complete(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ ccid = ce->lrc.ccid;
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ __set_bit(ccid - 1, &engine->context_tag);
+ }
+
+ lrc_update_runtime(ce);
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !--engine->fw_active)
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ WRITE_ONCE(ce->inflight, NULL);
+ intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+
+ trace_i915_request_out(rq);
+
+ GEM_BUG_ON(!ce->inflight);
+ ce->inflight = ptr_dec(ce->inflight);
+ if (!__intel_context_inflight_count(ce->inflight))
+ __execlists_schedule_out(rq, ce);
+
+ i915_request_put(rq);
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ u64 desc = ce->lrc.desc;
+ u32 tail, prev;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
+ */
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+ bool sentinel = false;
+ u32 ccid = -1;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
+ /*
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ sentinel = i915_request_has_sentinel(rq);
+
+ /*
+ * We want virtual requests to only be in the first slot so
+ * that they are never stuck behind a hog and can be immediately
+ * transferred onto the next idle engine.
+ */
+ if (rq->execution_mask != engine->mask &&
+ port != execlists->pending) {
+ GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (__i915_request_is_complete(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned int n;
+
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq = execlists->pending[n];
+
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
+ }
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (ctx_single_port_submission(prev))
+ return false;
+
+ return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(prev == next);
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (__i915_request_is_complete(next))
+ return true;
+
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
+
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
+
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+ return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!rq)
+ return false;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = intel_context_inflight(&ve->context);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq || !virtual_matches(ve, rq, engine)) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ return ve;
+ }
+
+ return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ lrc_update_offsets(&ve->context, engine);
+
+ /*
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ /* If not currently active, or about to switch, wait for next event */
+ if (!rq || __i915_request_is_complete(rq))
+ return false;
+
+ /* We do not need to start the timeslice until after the ACK */
+ if (READ_ONCE(engine->execlists.pending[0]))
+ return false;
+
+ /* If ELSP[1] is occupied, always check to see if worth slicing */
+ if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+ ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ return true;
+ }
+
+ /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for queue\n");
+ return true;
+ }
+
+ if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ const struct intel_engine_execlists *el = &engine->execlists;
+
+ if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+ return false;
+
+ if (!needs_timeslice(engine, rq))
+ return false;
+
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ unsigned long duration;
+
+ /* Disable the timer if there is nothing to switch to */
+ duration = 0;
+ if (needs_timeslice(engine, *el->active)) {
+ /* Avoid continually prolonging an active timeslice */
+ if (timer_active(&el->timer)) {
+ /*
+ * If we just submitted a new ELSP after an old
+ * context, that context may have already consumed
+ * its timeslice, so recheck.
+ */
+ if (!timer_pending(&el->timer))
+ tasklet_hi_schedule(&el->tasklet);
+ return;
+ }
+
+ duration = timeslice(engine);
+ }
+
+ set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last, * const *active;
+ struct virtual_engine *ve;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
+
+ spin_lock(&engine->active.lock);
+
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ *
+ */
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
+
+ if (last) {
+ if (need_preempt(engine, last)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
+
+ last = NULL;
+ } else if (timeslice_expired(engine, last)) {
+ ENGINE_TRACE(engine,
+ "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+ yesno(timer_expired(&execlists->timer)),
+ last->fence.context, last->fence.seqno,
+ rq_prio(last),
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
+
+ /*
+ * Consume this timeslice; ensure we start a new one.
+ *
+ * The timeslice expired, and we will unwind the
+ * running contexts and recompute the next ELSP.
+ * If that submit will be the same pair of contexts
+ * (due to dependency ordering), we will skip the
+ * submission. If we don't cancel the timer now,
+ * we will see that the timer has expired and
+ * reschedule the tasklet; continually until the
+ * next context switch or other preeemption event.
+ *
+ * Since we have decided to reschedule based on
+ * consumption of this timeslice, if we submit the
+ * same context again, grant it a full timeslice.
+ */
+ cancel_timer(&execlists->timer);
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (active[1]) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ spin_unlock(&engine->active.lock);
+ return;
+ }
+ }
+ }
+
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.active.lock);
+
+ rq = ve->request;
+ if (unlikely(!virtual_matches(ve, rq, engine)))
+ goto unlock; /* lost the race to a sibling */
+
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->context != &ve->context);
+
+ if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ spin_unlock(&engine->active.lock);
+ return; /* leave this for another sibling */
+ }
+
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
+
+ if (__i915_request_submit(rq)) {
+ /*
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
+ */
+ virtual_xfer_context(ve, engine);
+ GEM_BUG_ON(ve->siblings[0] != engine);
+
+ submit = true;
+ last = rq;
+ }
+
+ i915_request_put(rq);
+unlock:
+ spin_unlock(&ve->base.active.lock);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
+ }
+
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ bool merge = true;
+
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
+ */
+ if (last && !can_merge_rq(last, rq)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port == last_port)
+ goto done;
+
+ /*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
+ goto done;
+
+ /*
+ * We avoid submitting virtual requests into
+ * the secondary ports so that we can migrate
+ * the request immediately to another engine
+ * rather than wait for the primary request.
+ */
+ if (rq->execution_mask != engine->mask)
+ goto done;
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
+ goto done;
+
+ merge = false;
+ }
+
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port++ = i915_request_get(last);
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
+
+ submit = true;
+ last = rq;
+ }
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+done:
+ *port++ = i915_request_get(last);
+
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose the priority hint such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the priority hint then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the priority hint is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ execlists->queue_priority_hint = queue_prio(execlists);
+ spin_unlock(&engine->active.lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+ * of requests as currently running, e.g. trying to timeslice a pair
+ * of ordered contexts.
+ */
+ if (submit &&
+ memcmp(active,
+ execlists->pending,
+ (port - execlists->pending) * sizeof(*port))) {
+ *port = NULL;
+ while (port-- != execlists->pending)
+ execlists_schedule_in(*port, port - execlists->pending);
+
+ WRITE_ONCE(execlists->yield, -1);
+ set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
+ while (port-- != execlists->pending)
+ i915_request_put(*port);
+ *execlists->pending = NULL;
+ }
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+ local_irq_disable(); /* Suspend interrupts across request submission */
+ execlists_dequeue(engine);
+ local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+ struct i915_request **inactive)
+{
+ struct i915_request * const *port;
+
+ for (port = execlists->pending; *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* Having cancelled all outstanding process_csb(), stop their timers */
+ GEM_BUG_ON(execlists->pending[0]);
+ cancel_timer(&execlists->timer);
+ cancel_timer(&execlists->preempt);
+
+ return inactive;
+}
+
+static void invalidate_csb_entries(const u64 *first, const u64 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static bool gen12_csb_parse(const u64 csb)
+{
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
+ bool new_queue =
+ lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+ return false;
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+ return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry;
+
+ /*
+ * Reading from the HWSP has one particular advantage: we can detect
+ * a stale entry. Since the write into HWSP is broken, we have no reason
+ * to trust the HW at all, the mmio entry may equally be unordered, so
+ * we prefer the path that is self-checking and as a last resort,
+ * return the mmio value.
+ *
+ * tgl,dg1:HSDES#22011327657
+ */
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+ int idx = csb - engine->execlists.csb_status;
+ int status;
+
+ status = GEN8_EXECLISTS_STATUS_BUF;
+ if (idx >= 6) {
+ status = GEN11_EXECLISTS_STATUS_BUF2;
+ idx -= 6;
+ }
+ status += sizeof(u64) * idx;
+
+ entry = intel_uncore_read64(engine->uncore,
+ _MMIO(engine->mmio_base + status));
+ }
+ preempt_enable();
+
+ return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry = READ_ONCE(*csb);
+
+ /*
+ * Unfortunately, the GPU does not always serialise its write
+ * of the CSB entries before its write of the CSB pointer, at least
+ * from the perspective of the CPU, using what is known as a Global
+ * Observation Point. We may read a new CSB tail pointer, but then
+ * read the stale CSB entries, causing us to misinterpret the
+ * context-switch events, and eventually declare the GPU hung.
+ *
+ * icl:HSDES#1806554093
+ * tgl:HSDES#22011248461
+ */
+ if (unlikely(entry == -1))
+ entry = wa_csb_read(engine, csb);
+
+ /* Consume this entry so that we can spot its future reuse. */
+ WRITE_ONCE(*csb, -1);
+
+ /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+ return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+ /* By cancelling, we will start afresh in start_timeslice() */
+ cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
+ struct i915_request **prev;
+ u8 head, tail;
+
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+
+ /*
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
+ */
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ if (unlikely(head == tail))
+ return inactive;
+
+ /*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
+ */
+ rmb();
+
+ /* Remember who was last running under the timer */
+ prev = inactive;
+ *prev = NULL;
+
+ do {
+ bool promote;
+ u64 csb;
+
+ if (++head == num_entries)
+ head = 0;
+
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
+ csb = csb_read(engine, buf + head);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, upper_32_bits(csb), lower_32_bits(csb));
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ promote = gen12_csb_parse(csb);
+ else
+ promote = gen8_csb_parse(csb);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ ring_set_paused(engine, 0);
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ *inactive++ = *old++;
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
+ */
+ if (GEM_SHOW_DEBUG() &&
+ !__i915_request_is_complete(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+ }
+
+ *inactive++ = *execlists->active++;
+
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ }
+ } while (head != tail);
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+ /*
+ * We assume that any event reflects a change in context flow
+ * and merits a fresh timeslice. We reinstall the timer after
+ * inspecting the queue to see if we need to resumbit.
+ */
+ if (*prev != *execlists->active) /* elide lite-restores */
+ new_timeslice(execlists);
+
+ return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+ struct i915_request **last)
+{
+ while (port != last)
+ execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (__i915_request_is_complete(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ if (i915_request_on_hold(rq))
+ return false;
+
+ spin_lock_irq(&engine->active.lock);
+
+ if (__i915_request_is_complete(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ cap->error->gt->engine->hung = true;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at active:%zd\n",
+ ccid, port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at pending:%zd\n",
+ ccid, port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return;
+
+ spin_lock_irq(&engine->active.lock);
+ cap->rq = active_context(engine, active_ccid(engine));
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = process_csb(engine, post);
+ GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+ if (unlikely(preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ }
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+ msg = "preemption time out";
+ else
+ msg = "internal error";
+
+ engine->execlists.error_interrupt = 0;
+ execlists_reset(engine, msg);
+ }
+
+ if (!engine->execlists.pending[0]) {
+ execlists_dequeue_irq(engine);
+ start_timeslice(engine);
+ }
+
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return false;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
+
+ if (submit_queue(engine, request))
+ __execlists_kick(&engine->execlists);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = execlists_context_alloc,
+
+ .pre_pin = execlists_context_pre_pin,
+ .pin = execlists_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ intel_ring_advance(rq, cs);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(execlists_active(&engine->execlists));
+
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ drm_err(&engine->i915->drm,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ if (INTEL_GEN(engine->i915) >= 11)
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ else
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ enable_error_interrupt(engine);
+}
+
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ bool unexpected = false;
+
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
+
+ return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->resume() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ring_set_paused(engine, 1);
+ intel_engine_stop_cs(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
+
+ inactive = process_csb(engine, inactive); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(engine);
+
+ return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 head;
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ rq = active_context(engine, engine->execlists.reset_ccid);
+ if (!rq)
+ return;
+
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ if (__i915_request_is_complete(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ head = intel_ring_wrap(ce->ring, rq->tail);
+ goto out_replay;
+ }
+
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+ rq = active_request(ce->timeline, rq);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
+
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!__i915_request_has_started(rq))
+ goto out_replay;
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ __i915_request_reset(rq, stalled);
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+out_replay:
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ head, ce->ring->tail);
+ lrc_reset_regs(ce, engine);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = reset_csb(engine, post);
+
+ execlists_reset_active(engine, true);
+
+ inactive = cancel_port_requests(execlists, inactive);
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /* Process the csb, find the guilty context and throw away */
+ execlists_reset_csb(engine, stalled);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+ /* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ execlists_reset_csb(engine, true);
+
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ i915_request_mark_eio(rq);
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ i915_request_mark_eio(rq);
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ i915_request_mark_eio(rq);
+
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.active.lock);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ i915_request_mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ }
+ spin_unlock(&ve->base.active.lock);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /*
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
+ *
+ * If the GPU reset fails, the engine may still be alive with requests
+ * inflight. We expect those to complete, or for the device to be
+ * reset as the next level of recovery, and as a final resort we
+ * will declare the device wedged.
+ */
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&execlists->tasklet))
+ __execlists_kick(execlists);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = execlists_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ execlists_shutdown(engine);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overriden by each engine. */
+
+ engine->resume = execlists_resume;
+
+ engine->cops = &execlists_context_ops;
+ engine->request_alloc = execlists_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = execlists_set_default_submission;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+ unsigned int shift = 0;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+ } else {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ execlists->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+ if (INTEL_GEN(i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
+ engine->release = execlists_release;
+
+ return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+ struct virtual_engine *ve =
+ container_of(wrk, typeof(*ve), rcu.work);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->context.inflight);
+
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!__i915_request_is_complete(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->active.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->active.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ lrc_fini(&ve->context);
+ intel_context_fini(&ve->context);
+
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
+ intel_engine_free_request_pool(&ve->base);
+
+ kfree(ve->bonds);
+ kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ /* Note: we must use a real engine class for setting up reg state */
+ return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ intel_timeline_exit(ce->timeline);
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = virtual_context_alloc,
+
+ .pre_pin = virtual_context_pre_pin,
+ .pin = virtual_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_set_error_once(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
+ spin_lock_irq(&sibling->active.lock);
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ }
+
+ goto unlock_engine;
+ }
+
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+
+unlock_engine:
+ spin_unlock_irq(&sibling->active.lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
+ }
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ unsigned long flags;
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ /* By the time we resubmit a request, it may be completed */
+ if (__i915_request_is_complete(rq)) {
+ __i915_request_submit(rq);
+ goto unlock;
+ }
+
+ if (ve->request) { /* background completion from preempt-to-busy */
+ GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+ __i915_request_submit(ve->request);
+ i915_request_put(ve->request);
+ }
+
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
+ struct ve_bond *bond;
+
+ allowed = ~to_request(signal)->engine->mask;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond)
+ allowed &= bond->sibling_mask;
+
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, &ve->base);
+
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
+ }
+
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ virtual_engine_initial_hint(ve);
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ if (execlists->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ execlists_set_default_submission;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 000000000000..a8fd7adefd82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index cf94525be2c1..700588bc9d57 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+ if (!intel_vtd_active())
+ return false;
+
+ if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+ return true;
+
+ if (IS_GEN(i915, 12))
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
@@ -526,16 +535,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -548,9 +580,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
@@ -1050,7 +1082,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->do_idle_maps = needs_idle_maps(i915);
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
ggtt->vm.insert_page = i915_ggtt_insert_page;
ggtt->vm.insert_entries = i915_ggtt_insert_entries;
ggtt->vm.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a..a357bb431815 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
fence_write(fence);
}
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+ return fence->vma && i915_vma_is_active(fence->vma);
+}
+
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct i915_fence_reg *fence;
+ struct i915_fence_reg *active = NULL;
+ struct i915_fence_reg *fence, *fn;
- list_for_each_entry(fence, &ggtt->fence_list, link) {
+ list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+ if (fence == active) /* now seen this fence twice */
+ active = ERR_PTR(-EAGAIN);
+
+ /* Prefer idle fences so we do not have to wait on the GPU */
+ if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+ if (!active)
+ active = fence;
+
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ continue;
+ }
+
if (atomic_read(&fence->pin_count))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 44f1d51e5ae5..d8e1ab412634 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
int intel_gt_init_mmio(struct intel_gt *gt)
{
+ intel_gt_init_clock_frequency(gt);
+
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
@@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- intel_gt_init_clock_frequency(gt);
-
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 104cb30e8c13..06d84cf09570 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -145,7 +145,8 @@ static void pool_retire(struct i915_active *ref)
}
static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+ enum i915_map_type type)
{
struct intel_gt *gt = to_gt(pool);
struct intel_gt_buffer_pool_node *node;
@@ -169,12 +170,14 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
i915_gem_object_set_readonly(obj);
+ node->type = type;
node->obj = obj;
return node;
}
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
@@ -191,6 +194,9 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
if (node->obj->base.size < size)
continue;
+ if (node->type != type)
+ continue;
+
age = READ_ONCE(node->age);
if (!age)
continue;
@@ -205,7 +211,7 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
rcu_read_unlock();
if (&node->link == list) {
- node = node_create(pool, size);
+ node = node_create(pool, size, type);
if (IS_ERR(node))
return node;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 42cbac003e8a..6068f8f1762e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -15,7 +15,8 @@ struct intel_gt;
struct i915_request;
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type);
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index bcf1658c9633..d8d82c890da8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -11,10 +11,9 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active_types.h"
-struct drm_i915_gem_object;
-
struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
struct rcu_head rcu;
};
unsigned long age;
+ enum i915_map_type type;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 999079686846..a4242ca8dcd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,34 +7,146 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
-#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
-#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
-#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+ u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-static u32 read_clock_frequency(const struct intel_gt *gt)
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
{
- if (INTEL_GEN(gt->i915) >= 11) {
- u32 config;
-
- config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
- config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (config) {
- case 0: return MHZ_12;
- case 1:
- case 2: return MHZ_19_2;
- default:
- case 3: return MHZ_12_5;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+
+ if (INTEL_GEN(uncore->i915) <= 4) {
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configurationâ€
+ * (“CLKCFGâ€) MCHBAR register)
+ */
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+ } else if (INTEL_GEN(uncore->i915) <= 8) {
+ /*
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(uncore->i915) <= 9) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- } else if (INTEL_GEN(gt->i915) >= 9) {
- if (IS_GEN9_LP(gt->i915))
- return MHZ_19_2;
- else
- return MHZ_12;
- } else {
- return MHZ_12_5;
+
+ return freq;
+ } else if (INTEL_GEN(uncore->i915) <= 12) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ if (INTEL_GEN(uncore->i915) <= 10)
+ freq = gen10_get_crystal_clock_freq(uncore, c0);
+ else
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
}
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
@@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*/
- gt->clock_frequency = read_clock_frequency(gt);
+ gt->clock_frequency = read_clock_frequency(gt->uncore);
+ if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
GT_TRACE(gt,
- "Using clock frequency: %dkHz\n",
- gt->clock_frequency / 1000);
+ "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+ gt->clock_frequency / 1000,
+ gt->clock_period_ns,
+ div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+ USEC_PER_SEC));
+
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
- if (gt->clock_frequency != read_clock_frequency(gt)) {
+ if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
dev_err(gt->i915->drm.dev,
"GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
- read_clock_frequency(gt));
+ read_clock_frequency(gt->uncore));
}
}
#endif
@@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den)
return div_u64(nom + den - 1, den);
}
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
- gt->clock_frequency);
+ return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
- 1000 * 1000 * 1000);
+ return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
- u32 val;
+ u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
@@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
- val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (IS_GEN(gt->i915, 6))
- val = roundup(val, 25);
+ val = div_u64_roundup(val, 25) * 25;
return val;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
index f793c89f2cbd..8b03e97a85df 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt);
static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
#endif
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 257063a57101..9830342aa6f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 274aa0dd7050..c94e8ac884eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend)
intel_gt_pm_put(gt);
}
+static void runtime_begin(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.start = ktime_get();
+ gt->stats.active = true;
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.active = false;
+ gt->stats.total =
+ ktime_add(gt->stats.total,
+ ktime_sub(ktime_get(), gt->stats.start));
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
intel_gt_unpark_requests(gt);
+ runtime_begin(gt);
return 0;
}
@@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
+ runtime_end(gt);
intel_gt_park_requests(gt);
i915_vma_parked(gt);
@@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
void intel_gt_pm_init(struct intel_gt *gt)
@@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
return intel_uc_runtime_resume(&gt->uc);
}
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ ktime_t total = gt->stats.total;
+
+ if (gt->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), gt->stats.start));
+
+ return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&gt->stats.lock);
+ total = __intel_gt_get_awake_time(gt);
+ } while (read_seqcount_retry(&gt->stats.lock, seq));
+
+ return total;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 60f0e2fbe55c..63846a856e7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 66fcbf9d0fdd..dc06c78c9eeb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- bool interruptible;
LIST_HEAD(free);
- interruptible = true;
- if (unlikely(timeout < 0))
- timeout = -timeout, interruptible = false;
-
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
@@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
- interruptible,
+ true,
timeout);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 6d39a4a11bf3..a83d3e18254d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -75,6 +75,7 @@ struct intel_gt {
intel_wakeref_t awake;
u32 clock_frequency;
+ u32 clock_period_ns;
struct intel_llc llc;
struct intel_rc6 rc6;
@@ -87,6 +88,30 @@ struct intel_gt {
u32 pm_guc_events;
+ struct {
+ bool active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_mutex_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ } stats;
+
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7bfe9072be9a..04aa6601e984 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
bdw_setup_private_ppat(uncore);
}
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 8a33940a71f3..29c10fde8ce3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7614a3d24fca..94f485b591af 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1,599 +1,23 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <ben@bwidawsk.net>
- * Michel Thierry <michel.thierry@intel.com>
- * Thomas Daniel <thomas.daniel@intel.com>
- * Oscar Mateo <oscar.mateo@intel.com>
- *
- */
-
-/**
- * DOC: Logical Rings, Logical Ring Contexts and Execlists
- *
- * Motivation:
- * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
- * These expanded contexts enable a number of new abilities, especially
- * "Execlists" (also implemented in this file).
- *
- * One of the main differences with the legacy HW contexts is that logical
- * ring contexts incorporate many more things to the context's state, like
- * PDPs or ringbuffer control registers:
- *
- * The reason why PDPs are included in the context is straightforward: as
- * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
- * contained there mean you don't need to do a ppgtt->switch_mm yourself,
- * instead, the GPU will do it for you on the context switch.
- *
- * But, what about the ringbuffer control registers (head, tail, etc..)?
- * shouldn't we just need a set of those per engine command streamer? This is
- * where the name "Logical Rings" starts to make sense: by virtualizing the
- * rings, the engine cs shifts to a new "ring buffer" with every context
- * switch. When you want to submit a workload to the GPU you: A) choose your
- * context, B) find its appropriate virtualized ring, C) write commands to it
- * and then, finally, D) tell the GPU to switch to that context.
- *
- * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
- * to a contexts is via a context execution list, ergo "Execlists".
- *
- * LRC implementation:
- * Regarding the creation of contexts, we have:
- *
- * - One global default context.
- * - One local default context for each opened fd.
- * - One local extra context for each context create ioctl call.
- *
- * Now that ringbuffers belong per-context (and not per-engine, like before)
- * and that contexts are uniquely tied to a given engine (and not reusable,
- * like before) we need:
- *
- * - One ringbuffer per-engine inside each context.
- * - One backing object per-engine inside each context.
- *
- * The global default context starts its life with these new objects fully
- * allocated and populated. The local default context for each opened fd is
- * more complex, because we don't know at creation time which engine is going
- * to use them. To handle this, we have implemented a deferred creation of LR
- * contexts:
- *
- * The local context starts its life as a hollow or blank holder, that only
- * gets populated for a given engine once we receive an execbuffer. If later
- * on we receive another execbuffer ioctl for the same context but a different
- * engine, we allocate/populate a new ringbuffer and context backing object and
- * so on.
- *
- * Finally, regarding local contexts created using the ioctl call: as they are
- * only allowed with the render ring, we can allocate & populate them right
- * away (no need to defer anything, at least for now).
- *
- * Execlists implementation:
- * Execlists are the new method by which, on gen8+ hardware, workloads are
- * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
- * This method works as follows:
- *
- * When a request is committed, its commands (the BB start and any leading or
- * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
- * for the appropriate context. The tail pointer in the hardware context is not
- * updated at this time, but instead, kept by the driver in the ringbuffer
- * structure. A structure representing this request is added to a request queue
- * for the appropriate engine: this structure contains a copy of the context's
- * tail after the request was written to the ring buffer and a pointer to the
- * context itself.
- *
- * If the engine's request queue was empty before the request was added, the
- * queue is processed immediately. Otherwise the queue will be processed during
- * a context switch interrupt. In any case, elements on the queue will get sent
- * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
- * globally unique 20-bits submission ID.
- *
- * When execution of a request completes, the GPU updates the context status
- * buffer with a context complete event and generates a context switch interrupt.
- * During the interrupt handling, the driver examines the events in the buffer:
- * for each context complete event, if the announced ID matches that on the head
- * of the request queue, then that request is retired and removed from the queue.
- *
- * After processing, if any requests were retired and the queue is not empty
- * then a new execution list can be submitted. The two requests at the front of
- * the queue are next to be submitted but since a context may not occur twice in
- * an execution list, if subsequent requests have the same ID as the first then
- * the two requests must be combined. This is done simply by discarding requests
- * at the head of the queue until either only one requests is left (in which case
- * we use a NULL second context) or the first two requests have unique IDs.
- *
- * By always executing the first two requests in the queue the driver ensures
- * that the GPU is kept as busy as possible. In the case where a single context
- * completes but a second context is still executing, the request for this second
- * context will be at the head of the queue when we remove the first one. This
- * request will then be resubmitted along with a new request for a different context,
- * which will cause the hardware to continue executing the second request and queue
- * the new request (the GPU detects the condition of a context getting preempted
- * with the same context and optimizes the context switch flow by not doing
- * preemption, but just sampling the new tail pointer).
- *
*/
-#include <linux/interrupt.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
-#include "i915_trace.h"
-#include "i915_vgpu.h"
-#include "intel_breadcrumbs.h"
-#include "intel_context.h"
-#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
-#include "intel_gt_pm.h"
-#include "intel_gt_requests.h"
+#include "intel_lrc.h"
#include "intel_lrc_reg.h"
-#include "intel_mocs.h"
-#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
-#define RING_EXECLIST_QFULL (1 << 0x2)
-#define RING_EXECLIST1_VALID (1 << 0x3)
-#define RING_EXECLIST0_VALID (1 << 0x4)
-#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
-#define RING_EXECLIST1_ACTIVE (1 << 0x11)
-#define RING_EXECLIST0_ACTIVE (1 << 0x12)
-
-#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
-#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
-#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
-#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
-#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
-#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
-
-#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
-
-#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
-
-#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
-#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
-#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
-#define GEN12_IDLE_CTX_ID 0x7FF
-#define GEN12_CSB_CTX_VALID(csb_dw) \
- (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
-
-/* Typical size of the average request (2 pipecontrols and a MI_BB) */
-#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
-struct virtual_engine {
- struct intel_engine_cs base;
- struct intel_context context;
- struct rcu_work rcu;
-
- /*
- * We allow only a single request through the virtual engine at a time
- * (each request in the timeline waits for the completion fence of
- * the previous before being submitted). By restricting ourselves to
- * only submitting a single request, each request is placed on to a
- * physical to maximise load spreading (by virtue of the late greedy
- * scheduling -- each real engine takes the next available request
- * upon idling).
- */
- struct i915_request *request;
-
- /*
- * We keep a rbtree of available virtual engines inside each physical
- * engine, sorted by priority. Here we preallocate the nodes we need
- * for the virtual engine, indexed by physical_engine->id.
- */
- struct ve_node {
- struct rb_node rb;
- int prio;
- } nodes[I915_NUM_ENGINES];
-
- /*
- * Keep track of bonded pairs -- restrictions upon on our selection
- * of physical engines any particular request may be submitted to.
- * If we receive a submit-fence from a master engine, we will only
- * use one of sibling_mask physical engines.
- */
- struct ve_bond {
- const struct intel_engine_cs *master;
- intel_engine_mask_t sibling_mask;
- } *bonds;
- unsigned int num_bonds;
-
- /* And finally, which physical engines this virtual engine maps onto. */
- unsigned int num_siblings;
- struct intel_engine_cs *siblings[];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!intel_engine_is_virtual(engine));
- return container_of(engine, struct virtual_engine, base);
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
-
-static void execlists_init_reg_state(u32 *reg_state,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool close);
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head);
-
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
-static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x74;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x68;
- else if (engine->class == RENDER_CLASS)
- return 0xd8;
- else
- return -1;
-}
-
-static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x12;
- else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
- return 0x18;
- else
- return -1;
-}
-
-static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_wa_bb_per_ctx(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_indirect_ptr(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
-{
- if (engine->class != RENDER_CLASS)
- return -1;
-
- if (INTEL_GEN(engine->i915) >= 12)
- return 0xb6;
- else if (INTEL_GEN(engine->i915) >= 11)
- return 0xaa;
- else
- return -1;
-}
-
-static u32
-lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- fallthrough;
- case 12:
- return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 11:
- return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 10:
- return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 9:
- return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 8:
- return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- }
-}
-
-static void
-lrc_ring_setup_indirect_ctx(u32 *regs,
- const struct intel_engine_cs *engine,
- u32 ctx_bb_ggtt_addr,
- u32 size)
-{
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
- GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
- regs[lrc_ring_indirect_ptr(engine) + 1] =
- ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
-
- GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
- regs[lrc_ring_indirect_offset(engine) + 1] =
- lrc_ring_indirect_offset_default(engine) << 6;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
-static struct i915_request *
-active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-{
- struct i915_request *active = rq;
-
- rcu_read_lock();
- list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
- if (i915_request_completed(rq))
- break;
-
- active = rq;
- }
- rcu_read_unlock();
-
- return active;
-}
-
-static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline void
-ring_set_paused(const struct intel_engine_cs *engine, int state)
-{
- /*
- * We inspect HWS_PREEMPT with a semaphore inside
- * engine->emit_fini_breadcrumb. If the dword is true,
- * the ring is paused as the semaphore will busywait
- * until the dword is false.
- */
- engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
- if (state)
- wmb();
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return READ_ONCE(rq->sched.attr.priority);
-}
-
-static int effective_prio(const struct i915_request *rq)
-{
- int prio = rq_prio(rq);
-
- /*
- * If this request is special and must not be interrupted at any
- * cost, so be it. Note we are only checking the most recent request
- * in the context and so may be masking an earlier vip request. It
- * is hoped that under the conditions where nopreempt is used, this
- * will not matter (i.e. all requests to that context will be
- * nopreempt for as long as desired).
- */
- if (i915_request_has_nopreempt(rq))
- prio = I915_PRIORITY_UNPREEMPTABLE;
-
- return prio;
-}
-
-static int queue_prio(const struct intel_engine_execlists *execlists)
-{
- struct i915_priolist *p;
- struct rb_node *rb;
-
- rb = rb_first_cached(&execlists->queue);
- if (!rb)
- return INT_MIN;
-
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
-}
-
-static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- struct rb_node *rb)
-{
- int last_prio;
-
- if (!intel_engine_has_semaphores(engine))
- return false;
-
- /*
- * Check if the current priority hint merits a preemption attempt.
- *
- * We record the highest value priority we saw during rescheduling
- * prior to this dequeue, therefore we know that if it is strictly
- * less than the current tail of ESLP[0], we do not need to force
- * a preempt-to-idle cycle.
- *
- * However, the priority hint is a mere hint that we may need to
- * preempt. If that hint is stale or we may be trying to preempt
- * ourselves, ignore the request.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
- if (engine->execlists.queue_priority_hint <= last_prio)
- return false;
-
- /*
- * Check against the first request in ELSP[1], it will, thanks to the
- * power of PI, be the highest priority of that context.
- */
- if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
- rq_prio(list_next_entry(rq, sched.link)) > last_prio)
- return true;
-
- if (rb) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- bool preempt = false;
-
- if (engine == ve->siblings[0]) { /* only preempt one sibling */
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- preempt = rq_prio(next) > last_prio;
- rcu_read_unlock();
- }
-
- if (preempt)
- return preempt;
- }
-
- /*
- * If the inflight context did not trigger the preemption, then maybe
- * it was the set of queued requests? Pick the highest priority in
- * the queue (the first active priolist) and see if it deserves to be
- * running instead of ELSP[0].
- *
- * The highest priority request in the queue can not be either
- * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
- * context, it's priority would not exceed ELSP[0] aka last_prio.
- */
- return queue_prio(&engine->execlists) > last_prio;
-}
-
-__maybe_unused static inline bool
-assert_priority_queue(const struct i915_request *prev,
- const struct i915_request *next)
-{
- /*
- * Without preemption, the prev may refer to the still active element
- * which we refuse to let go.
- *
- * Even with preemption, there are times when we think it is better not
- * to preempt and leave an ostensibly lower priority request in flight.
- */
- if (i915_request_is_active(prev))
- return true;
-
- return rq_prio(prev) >= rq_prio(next);
-}
-
-/*
- * The context descriptor encodes various attributes of a context,
- * including its GTT address and some flags. Because it's fairly
- * expensive to calculate, we'll just do it once and cache the result,
- * which remains valid until the context is unpinned.
- *
- * This is what a descriptor looks like, from LSB to MSB::
- *
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
- * bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
- * bits 53-54: mbz, reserved for use by hardware
- * bits 55-63: group ID, currently unused and set to 0
- *
- * Starting from Gen11, the upper dword of the descriptor has a new format:
- *
- * bits 32-36: reserved
- * bits 37-47: SW context ID
- * bits 48:53: engine instance
- * bit 54: mbz, reserved for use by hardware
- * bits 55-60: SW counter
- * bits 61-63: engine class
- *
- * engine info, SW context ID and SW counter need to form a unique number
- * (Context ID) per lrc.
- */
-static u32
-lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
-{
- u32 desc;
-
- desc = INTEL_LEGACY_32B_CONTEXT;
- if (i915_vm_is_4lvl(ce->vm))
- desc = INTEL_LEGACY_64B_CONTEXT;
- desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
- if (IS_GEN(engine->i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- return i915_ggtt_offset(ce->state) | desc;
-}
-
-static inline unsigned int dword_in_page(void *addr)
-{
- return offset_in_page(addr) / sizeof(u32);
-}
-
static void set_offsets(u32 *regs,
const u8 *data,
const struct intel_engine_cs *engine,
- bool clear)
+ bool close)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
@@ -601,7 +25,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(total_state_size) 0, (total_state_size)
+#define END 0
{
const u32 base = engine->mmio_base;
@@ -610,8 +34,6 @@ static void set_offsets(u32 *regs,
if (*data & BIT(7)) { /* skip */
count = *data++ & ~BIT(7);
- if (clear)
- memset32(regs, MI_NOOP, count);
regs += count;
continue;
}
@@ -639,19 +61,11 @@ static void set_offsets(u32 *regs,
} while (v & BIT(7));
regs[0] = base + (offset << 2);
- if (clear)
- regs[1] = 0;
regs += 2;
} while (--count);
}
- if (clear) {
- u8 count = *++data;
-
- /* Clear past the tail for HW access */
- GEM_BUG_ON(dword_in_page(regs) > count);
- memset32(regs, MI_NOOP, count - dword_in_page(regs));
-
+ if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
if (INTEL_GEN(engine->i915) >= 10)
@@ -691,7 +105,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(80)
+ END
};
static const u8 gen9_xcs_offsets[] = {
@@ -775,7 +189,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(176)
+ END
};
static const u8 gen12_xcs_offsets[] = {
@@ -807,7 +221,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(80)
+ END
};
static const u8 gen8_rcs_offsets[] = {
@@ -844,7 +258,7 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen9_rcs_offsets[] = {
@@ -928,7 +342,7 @@ static const u8 gen9_rcs_offsets[] = {
REG16(0x67c),
REG(0x68),
- END(176)
+ END
};
static const u8 gen11_rcs_offsets[] = {
@@ -969,7 +383,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen12_rcs_offsets[] = {
@@ -1065,7 +479,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END(192)
+ END
};
#undef END
@@ -1104,2284 +518,440 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
}
}
-static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
-{
- struct i915_request *rq, *rn, *active = NULL;
- struct list_head *pl;
- int prio = I915_PRIORITY_INVALID;
-
- lockdep_assert_held(&engine->active.lock);
-
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->active.requests,
- sched.link) {
- if (i915_request_completed(rq))
- continue; /* XXX */
-
- __i915_request_unsubmit(rq);
-
- /*
- * Push the request back into the queue for later resubmission.
- * If this request is not native to this physical engine (i.e.
- * it came from a virtual source), push it back onto the virtual
- * engine so that it can be moved across onto another physical
- * engine as load dictates.
- */
- if (likely(rq->execution_mask == engine->mask)) {
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
- list_move(&rq->sched.link, pl);
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Check in case we rollback so far we wrap [size/2] */
- if (intel_ring_direction(rq->ring,
- rq->tail,
- rq->ring->tail + 8) > 0)
- rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-
- active = rq;
- } else {
- struct intel_engine_cs *owner = rq->context->engine;
-
- WRITE_ONCE(rq->engine, owner);
- owner->submit_request(rq);
- active = NULL;
- }
- }
-
- return active;
-}
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
-static inline void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->engine->context_status_notifier,
- status, rq);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
}
-static void intel_engine_context_in(struct intel_engine_cs *engine)
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- if (atomic_add_unless(&engine->stats.active, 1, 0))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
- engine->stats.start = ktime_get();
- atomic_inc(&engine->stats.active);
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
}
-static void intel_engine_context_out(struct intel_engine_cs *engine)
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- GEM_BUG_ON(!atomic_read(&engine->stats.active));
-
- if (atomic_add_unless(&engine->stats.active, -1, 1))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (atomic_dec_and_test(&engine->stats.active)) {
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
}
-static void
-execlists_check_context(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const char *when)
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
{
- const struct intel_ring *ring = ce->ring;
- u32 *regs = ce->lrc_reg_state;
- bool valid = true;
int x;
- if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
- pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_START],
- i915_ggtt_offset(ring->vma));
- regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- valid = false;
- }
-
- if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
- (RING_CTL_SIZE(ring->size) | RING_VALID)) {
- pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_CTL],
- (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- valid = false;
- }
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
- pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
- engine->name, regs[x + 1]);
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- valid = false;
- }
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
- WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
+ return x + 2;
}
-static void restore_default_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
{
- u32 *regs;
+ int x;
- regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
- execlists_init_reg_state(regs, ce, engine, ce->ring, true);
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
- ce->runtime.last = intel_context_get_runtime(ce);
+ return x + 2;
}
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
- u32 head;
-
- /*
- * The executing context has been cancelled. We want to prevent
- * further execution along this context and propagate the error on
- * to anything depending on its results.
- *
- * In __i915_request_submit(), we apply the -EIO and remove the
- * requests' payloads for any banned requests. But first, we must
- * rewind the context back to the start of the incomplete request so
- * that we do not jump back into the middle of the batch.
- *
- * We preserve the breadcrumbs and semaphores of the incomplete
- * requests so that inter-timeline dependencies (i.e other timelines)
- * remain correctly ordered. And we defer to __i915_request_submit()
- * so that all asynchronous waits are correctly handled.
- */
- ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
- rq->fence.context, rq->fence.seqno);
+ if (engine->class != RENDER_CLASS)
+ return -1;
- /* On resubmission of the active request, payload will be scrubbed */
- if (i915_request_completed(rq))
- head = rq->tail;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
else
- head = active_request(ce->timeline, rq)->head;
- head = intel_ring_wrap(ce->ring, head);
-
- /* Scrub the context image to prevent replaying the previous batch */
- restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
-
- /* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow += dt < 0;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
-#endif
+ return -1;
}
-static void intel_context_update_runtime(struct intel_context *ce)
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
- u32 old;
- s32 dt;
-
- if (intel_context_is_barrier(ce))
- return;
-
- old = ce->runtime.last;
- ce->runtime.last = intel_context_get_runtime(ce);
- dt = ce->runtime.last - old;
-
- if (unlikely(dt <= 0)) {
- CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
- return;
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
}
-
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
}
-static inline struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
{
- struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->context;
-
- intel_context_get(ce);
-
- if (unlikely(intel_context_is_banned(ce)))
- reset_active(rq, engine);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "before");
-
- if (ce->tag) {
- /* Use a fixed tag for OA and friends */
- GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
- ce->lrc.ccid = ce->tag;
- } else {
- /* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(READ_ONCE(engine->context_tag));
-
- GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
- clear_bit(tag - 1, &engine->context_tag);
- ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
- BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
- }
-
- ce->lrc.ccid |= engine->execlists.ccid;
-
- __intel_gt_pm_get(engine->gt);
- if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
- intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(engine);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
- return engine;
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
}
-static inline struct i915_request *
-execlists_schedule_in(struct i915_request *rq, int idx)
+static void init_common_regs(u32 * const regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *old;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
- trace_i915_request_in(rq, idx);
-
- old = READ_ONCE(ce->inflight);
- do {
- if (!old) {
- WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
- break;
- }
- } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
-
- GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
- return i915_request_get(rq);
-}
+ u32 ctl;
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- struct i915_request *next = READ_ONCE(ve->request);
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (INTEL_GEN(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
- if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ regs[CTX_TIMESTAMP] = ce->runtime.last;
}
-static inline void
-__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine,
- unsigned int ccid)
+static void init_wa_bb_regs(u32 * const regs,
+ const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
-
- /*
- * NB process_csb() is not under the engine->active.lock and hence
- * schedule_out can race with schedule_in meaning that we should
- * refrain from doing non-trivial work here.
- */
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "after");
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- /*
- * If we have just completed this context, the engine may now be
- * idle and we want to re-enter powersaving.
- */
- if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
- i915_request_completed(rq))
- intel_engine_add_retire(engine, ce->timeline);
-
- ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
- ccid &= GEN12_MAX_CONTEXT_HW_ID;
- if (ccid < BITS_PER_LONG) {
- GEM_BUG_ON(ccid == 0);
- GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
- set_bit(ccid - 1, &engine->context_tag);
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- intel_context_update_runtime(ce);
- intel_engine_context_out(engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
- if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
- intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
-
- /*
- * If this is part of a virtual engine, its next request may
- * have been blocked waiting for access to the active context.
- * We have to kick all the siblings again in case we need to
- * switch (e.g. the next request is not runnable on this
- * engine). Hopefully, we will already have submitted the next
- * request before the tasklet runs and do not need to rebuild
- * each virtual tree and kick everyone again.
- */
- if (ce->engine != engine)
- kick_siblings(rq, ce);
-
- intel_context_put(ce);
-}
-
-static inline void
-execlists_schedule_out(struct i915_request *rq)
-{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *cur, *old;
- u32 ccid;
-
- trace_i915_request_out(rq);
-
- ccid = rq->context->lrc.ccid;
- old = READ_ONCE(ce->inflight);
- do
- cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
- while (!try_cmpxchg(&ce->inflight, &old, cur));
- if (!cur)
- __execlists_schedule_out(rq, old, ccid);
-
- i915_request_put(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
-{
- struct intel_context *ce = rq->context;
- u64 desc = ce->lrc.desc;
- u32 tail, prev;
-
- /*
- * WaIdleLiteRestore:bdw,skl
- *
- * We should never submit the context with the same RING_TAIL twice
- * just in case we submit an empty ring, which confuses the HW.
- *
- * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
- * the normal request to be able to always advance the RING_TAIL on
- * subsequent resubmissions (for lite restore). Should that fail us,
- * and we try and submit the same tail again, force the context
- * reload.
- *
- * If we need to return to a preempted context, we need to skip the
- * lite-restore and force it to reload the RING_TAIL. Otherwise, the
- * HW has a tendency to ignore us rewinding the TAIL to the end of
- * an earlier request.
- */
- GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
- prev = rq->ring->tail;
- tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
- desc |= CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state[CTX_RING_TAIL] = tail;
- rq->tail = rq->wa_tail;
-
- /*
- * Make sure the context image is complete before we submit it to HW.
- *
- * Ostensibly, writes (including the WCB) should be flushed prior to
- * an uncached write such as our mmio register access, the empirical
- * evidence (esp. on Braswell) suggests that the WC write into memory
- * may not be visible to the HW prior to the completion of the UC
- * register write and that we may begin execution from the context
- * before its image is complete leading to invalid PD chasing.
- */
- wmb();
-
- ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
- return desc;
-}
-
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
-{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
- } else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ if (wa_ctx->indirect_ctx.size) {
+ lrc_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
-static __maybe_unused char *
-dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
{
- if (!rq)
- return "";
-
- snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
- prefix,
- rq->context->lrc.ccid,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- rq_prio(rq));
-
- return buf;
-}
-
-static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
- const char *msg,
- struct i915_request * const *ports)
-{
- const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- char __maybe_unused p0[40], p1[40];
-
- if (!ports[0])
- return;
-
- ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
- dump_port(p0, sizeof(p0), "", ports[0]),
- dump_port(p1, sizeof(p1), ", ", ports[1]));
-}
-
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
-static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
- const char *msg)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
- struct intel_context *ce = NULL;
- bool sentinel = false;
- u32 ccid = -1;
-
- trace_ports(execlists, msg, execlists->pending);
-
- /* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
- return true;
-
- if (!execlists->pending[0]) {
- GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
- engine->name);
- return false;
- }
-
- if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
- return false;
- }
-
- for (port = execlists->pending; (rq = *port); port++) {
- unsigned long flags;
- bool ok = true;
-
- GEM_BUG_ON(!kref_read(&rq->fence.refcount));
- GEM_BUG_ON(!i915_request_is_active(rq));
-
- if (ce == rq->context) {
- GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ce = rq->context;
-
- if (ccid == ce->lrc.ccid) {
- GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
- engine->name,
- ccid, ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ccid = ce->lrc.ccid;
-
- /*
- * Sentinels are supposed to be the last request so they flush
- * the current execution off the HW. Check that they are the only
- * request in the pending submission.
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
*/
- if (sentinel) {
- GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- sentinel = i915_request_has_sentinel(rq);
-
- /* Hold tightly onto the lock to prevent concurrent retires! */
- if (!spin_trylock_irqsave(&rq->lock, flags))
- continue;
-
- if (i915_request_completed(rq))
- goto unlock;
-
- if (i915_active_is_idle(&ce->active) &&
- !intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!ok)
- return false;
- }
-
- return ce;
-}
-
-static void execlists_submit_ports(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned int n;
-
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
-
- /*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
- */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /*
- * ELSQ note: the submit queue is not cleared after being submitted
- * to the HW so we need to make sure we always clean it up. This is
- * currently ensured by the fact that we always write the same number
- * of elsq entries, keep this in mind before changing the loop below.
- */
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
-
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
- }
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-}
-
-static bool ctx_single_port_submission(const struct intel_context *ce)
-{
- return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- intel_context_force_single_submission(ce));
-}
-
-static bool can_merge_ctx(const struct intel_context *prev,
- const struct intel_context *next)
-{
- if (prev != next)
- return false;
-
- if (ctx_single_port_submission(prev))
- return false;
-
- return true;
-}
-
-static unsigned long i915_request_flags(const struct i915_request *rq)
-{
- return READ_ONCE(rq->fence.flags);
-}
-
-static bool can_merge_rq(const struct i915_request *prev,
- const struct i915_request *next)
-{
- GEM_BUG_ON(prev == next);
- GEM_BUG_ON(!assert_priority_queue(prev, next));
-
- /*
- * We do not submit known completed requests. Therefore if the next
- * request is already completed, we can pretend to merge it in
- * with the previous context (and we will skip updating the ELSP
- * and tracking). Thus hopefully keeping the ELSP full with active
- * contexts, despite the best efforts of preempt-to-busy to confuse
- * us.
- */
- if (i915_request_completed(next))
- return true;
-
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
- (BIT(I915_FENCE_FLAG_NOPREEMPT) |
- BIT(I915_FENCE_FLAG_SENTINEL))))
- return false;
-
- if (!can_merge_ctx(prev->context, next->context))
- return false;
-
- GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
- return true;
-}
-
-static void virtual_update_register_offsets(u32 *regs,
- struct intel_engine_cs *engine)
-{
- set_offsets(regs, reg_offsets(engine), engine, false);
-}
-
-static bool virtual_matches(const struct virtual_engine *ve,
- const struct i915_request *rq,
- const struct intel_engine_cs *engine)
-{
- const struct intel_engine_cs *inflight;
-
- if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
- return false;
-
- /*
- * We track when the HW has completed saving the context image
- * (i.e. when we have seen the final CS event switching out of
- * the context) and must not overwrite the context image before
- * then. This restricts us to only using the active engine
- * while the previous virtualized request is inflight (so
- * we reuse the register offsets). This is a very small
- * hystersis on the greedy seelction algorithm.
- */
- inflight = intel_context_inflight(&ve->context);
- if (inflight && inflight != engine)
- return false;
-
- return true;
-}
-
-static void virtual_xfer_context(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
-{
- unsigned int n;
-
- if (likely(engine == ve->siblings[0]))
- return;
-
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- engine);
-
- /*
- * Move the bound engine to the top of the list for
- * future execution. We then kick this tasklet first
- * before checking others, so that we preferentially
- * reuse this set of bound registers.
- */
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n], ve->siblings[0]);
- break;
- }
+ ASSIGN_CTX_PML4(ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
}
-#define for_each_waiter(p__, rq__) \
- list_for_each_entry_lockless(p__, \
- &(rq__)->sched.waiters_list, \
- wait_link)
-
-#define for_each_signaler(p__, rq__) \
- list_for_each_entry_rcu(p__, \
- &(rq__)->sched.signalers_list, \
- signal_link)
-
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
- LIST_HEAD(list);
-
- /*
- * We want to move the interrupted request to the back of
- * the round-robin list (i.e. its priority level), but
- * in doing so, we must then move all requests that were in
- * flight and were waiting for the interrupted request to
- * be run after it again.
- */
- do {
- struct i915_dependency *p;
-
- GEM_BUG_ON(i915_request_is_active(rq));
- list_move_tail(&rq->sched.link, pl);
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- if (p->flags & I915_DEPENDENCY_WEAK)
- continue;
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- /* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
- i915_request_started(w) &&
- !i915_request_completed(rq));
-
- GEM_BUG_ON(i915_request_is_active(w));
- if (!i915_request_is_ready(w))
- continue;
-
- if (rq_prio(w) < rq_prio(rq))
- continue;
-
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void defer_active(struct intel_engine_cs *engine)
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
{
- struct i915_request *rq;
-
- rq = __unwind_incomplete_requests(engine);
- if (!rq)
- return;
-
- defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
}
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- const struct rb_node *rb)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- int hint;
-
- if (!intel_engine_has_timeslices(engine))
- return false;
-
- hint = engine->execlists.queue_priority_hint;
-
- if (rb) {
- const struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- const struct intel_engine_cs *inflight =
- intel_context_inflight(&ve->context);
-
- if (!inflight || inflight == engine) {
- struct i915_request *next;
+ int x;
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- hint = max(hint, rq_prio(next));
- rcu_read_unlock();
- }
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- if (!list_is_last(&rq->sched.link, &engine->active.requests))
- hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
- GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
- return hint >= effective_prio(rq);
}
-static bool
-timeslice_yield(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
+static void __lrc_init_regs(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
/*
- * Once bitten, forever smitten!
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
*
- * If the active context ever busy-waited on a semaphore,
- * it will be treated as a hog until the end of its timeslice (i.e.
- * until it is scheduled out and replaced by a new submission,
- * possibly even its own lite-restore). The HW only sends an interrupt
- * on the first miss, and we do know if that semaphore has been
- * signaled, or even if it is now stuck on another semaphore. Play
- * safe, yield if it might be stuck -- it will be given a fresh
- * timeslice in the near future.
+ * Must keep consistent with virtual_update_register_offsets().
*/
- return rq->context->lrc.ccid == READ_ONCE(el->yield);
-}
-
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
-{
- return timer_expired(&el->timer) || timeslice_yield(el, rq);
-}
-
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return engine->execlists.queue_priority_hint;
-
- return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
-{
- return READ_ONCE(engine->props.timeslice_duration_ms);
-}
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- const struct i915_request *rq = *execlists->active;
-
- if (!rq || i915_request_completed(rq))
- return 0;
-
- if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
- return 0;
-
- return timeslice(engine);
-}
+ if (inhibit)
+ memset(regs, 0, PAGE_SIZE);
-static void set_timeslice(struct intel_engine_cs *engine)
-{
- unsigned long duration;
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (!intel_engine_has_timeslices(engine))
- return;
+ init_common_regs(regs, ce, engine, inhibit);
+ init_ppgtt_regs(regs, vm_alias(ce->vm));
- duration = active_timeslice(engine);
- ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+ init_wa_bb_regs(regs, engine);
- set_timer_ms(&engine->execlists.timer, duration);
+ __reset_stop_ring(regs, engine);
}
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long duration;
-
- if (!intel_engine_has_timeslices(engine))
- return;
-
- WRITE_ONCE(execlists->switch_priority_hint, prio);
- if (prio == INT_MIN)
- return;
-
- if (timer_pending(&execlists->timer))
- return;
-
- duration = timeslice(engine);
- ENGINE_TRACE(engine,
- "start timeslicing, prio:%d, interval:%lu",
- prio, duration);
-
- set_timer_ms(&execlists->timer, duration);
+ __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ __reset_stop_ring(ce->lrc_reg_state, engine);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
{
- if (!rq)
- return 0;
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
- /* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
- return 1;
+ vaddr += engine->context_size;
- return READ_ONCE(engine->props.preempt_timeout_ms);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
{
- if (!intel_engine_has_preempt_reset(engine))
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine, rq));
-}
-
-static inline void clear_ports(struct i915_request **ports, int count)
-{
- memset_p((void **)ports, NULL, count);
-}
+ vaddr += engine->context_size;
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
- /* A memcpy_p() would be very useful here! */
- while (count--)
- WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ drm_err_once(&engine->i915->drm,
+ "%s context redzone overwritten!\n",
+ engine->name);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
- struct i915_request * const *active;
- struct i915_request *last;
- struct rb_node *rb;
- bool submit = false;
-
- /*
- * Hardware submission is through 2 ports. Conceptually each port
- * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
- * static for a context, and unique to each, so we only execute
- * requests belonging to a single context from each ring. RING_HEAD
- * is maintained by the CS in the context image, it marks the place
- * where it got up to last time, and through RING_TAIL we tell the CS
- * where we want to execute up to this time.
- *
- * In this list the requests are in order of execution. Consecutive
- * requests from the same context are adjacent in the ringbuffer. We
- * can combine these requests into a single RING_TAIL update:
- *
- * RING_HEAD...req1...req2
- * ^- RING_TAIL
- * since to execute req2 the CS must first execute req1.
- *
- * Our goal then is to point each port to the end of a consecutive
- * sequence of requests as being the most optimal (fewest wake ups
- * and context switches) submission.
- */
-
- for (rb = rb_first_cached(&execlists->virtual); rb; ) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (!rq) { /* lazily cleanup after another engine handled rq */
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
-
- break;
- }
-
- /*
- * If the queue is higher priority than the last
- * request in the currently active context, submit afresh.
- * We will resubmit again afterwards in case we need to split
- * the active context to interject the preemption request,
- * i.e. we will retrigger preemption following the ack in case
- * of trouble.
- */
- active = READ_ONCE(execlists->active);
-
- /*
- * In theory we can skip over completed contexts that have not
- * yet been processed by events (as those events are in flight):
- *
- * while ((last = *active) && i915_request_completed(last))
- * active++;
- *
- * However, the GPU cannot handle this as it will ultimately
- * find itself trying to jump back into a context it has just
- * completed and barf.
- */
-
- if ((last = *active)) {
- if (need_preempt(engine, last, rb)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "preempting last=%llx:%lld, prio=%d, hint=%d\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
- record_preemption(execlists);
-
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
- /*
- * Note that we have not stopped the GPU at this point,
- * so we are unwinding the incomplete requests as they
- * remain inflight and so by the time we do complete
- * the preemption, some of the unwound requests may
- * complete!
- */
- __unwind_incomplete_requests(engine);
-
- last = NULL;
- } else if (need_timeslice(engine, last, rb) &&
- timeslice_expired(execlists, last)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint,
- yesno(timeslice_yield(execlists, last)));
-
- ring_set_paused(engine, 1);
- defer_active(engine);
-
- /*
- * Unlike for preemption, if we rewind and continue
- * executing the same context as previously active,
- * the order of execution will remain the same and
- * the tail will only advance. We do not need to
- * force a full context restore, as a lite-restore
- * is sufficient to resample the monotonic TAIL.
- *
- * If we switch to any other context, similarly we
- * will not rewind TAIL of current context, and
- * normal save/restore will preserve state and allow
- * us to later continue executing the same request.
- */
- last = NULL;
- } else {
- /*
- * Otherwise if we already have a request pending
- * for execution after the current one, we can
- * just wait until the next CS event before
- * queuing more. In either case we will force a
- * lite-restore preemption event, but if we wait
- * we hopefully coalesce several updates into a single
- * submission.
- */
- if (!list_is_last(&last->sched.link,
- &engine->active.requests)) {
- /*
- * Even if ELSP[1] is occupied and not worthy
- * of timeslices, our queue might be.
- */
- start_timeslice(engine, queue_prio(execlists));
- return;
- }
- }
- }
-
- while (rb) { /* XXX virtual is always taking precedence */
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq;
-
- spin_lock(&ve->base.active.lock);
-
- rq = ve->request;
- if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.active.lock);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
+ bool inhibit = true;
- GEM_BUG_ON(rq != ve->request);
- GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->context != &ve->context);
-
- if (rq_prio(rq) >= queue_prio(execlists)) {
- if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_next(rb);
- continue;
- }
-
- if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
- start_timeslice(engine, rq_prio(rq));
- return; /* leave this for another sibling */
- }
-
- ENGINE_TRACE(engine,
- "virtual rq=%llx:%lld%s, new engine? %s\n",
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
-
- WRITE_ONCE(ve->request, NULL);
- WRITE_ONCE(ve->base.execlists.queue_priority_hint,
- INT_MIN);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- WRITE_ONCE(rq->engine, engine);
-
- if (__i915_request_submit(rq)) {
- /*
- * Only after we confirm that we will submit
- * this request (i.e. it has not already
- * completed), do we want to update the context.
- *
- * This serves two purposes. It avoids
- * unnecessary work if we are resubmitting an
- * already completed request after timeslicing.
- * But more importantly, it prevents us altering
- * ve->siblings[] on an idle context, where
- * we may be using ve->siblings[] in
- * virtual_context_enter / virtual_context_exit.
- */
- virtual_xfer_context(ve, engine);
- GEM_BUG_ON(ve->siblings[0] != engine);
-
- submit = true;
- last = rq;
- }
- i915_request_put(rq);
-
- /*
- * Hmm, we have a bunch of virtual engine requests,
- * but the first one was already completed (thanks
- * preempt-to-busy!). Keep looking at the veng queue
- * until we have no more relevant requests (i.e.
- * the normal submit queue has higher priority).
- */
- if (!submit) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
- }
+ set_redzone(state, engine);
- spin_unlock(&ve->base.active.lock);
- break;
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ state, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
}
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- struct i915_request *rq, *rn;
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- bool merge = true;
-
- /*
- * Can we combine this request with the current port?
- * It has to be the same context/ringbuffer and not
- * have any exceptions (e.g. GVT saying never to
- * combine contexts).
- *
- * If we can combine the requests, we can execute both
- * by updating the RING_TAIL to point to the end of the
- * second request, and so we never need to tell the
- * hardware about the first.
- */
- if (last && !can_merge_rq(last, rq)) {
- /*
- * If we are on the second port and cannot
- * combine this request with the last, then we
- * are done.
- */
- if (port == last_port)
- goto done;
-
- /*
- * We must not populate both ELSP[] with the
- * same LRCA, i.e. we must submit 2 different
- * contexts if we submit 2 ELSP.
- */
- if (last->context == rq->context)
- goto done;
-
- if (i915_request_has_sentinel(last))
- goto done;
-
- /*
- * If GVT overrides us we only ever submit
- * port[0], leaving port[1] empty. Note that we
- * also have to be careful that we don't queue
- * the same context (even though a different
- * request) to the second port.
- */
- if (ctx_single_port_submission(last->context) ||
- ctx_single_port_submission(rq->context))
- goto done;
-
- merge = false;
- }
-
- if (__i915_request_submit(rq)) {
- if (!merge) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- port++;
- last = NULL;
- }
-
- GEM_BUG_ON(last &&
- !can_merge_ctx(last->context,
- rq->context));
- GEM_BUG_ON(last &&
- i915_seqno_passed(last->fence.seqno,
- rq->fence.seqno));
-
- submit = true;
- last = rq;
- }
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(state, 0, PAGE_SIZE);
-done:
/*
- * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
- *
- * We choose the priority hint such that if we add a request of greater
- * priority than this, we kick the submission tasklet to decide on
- * the right order of submitting the requests to hardware. We must
- * also be prepared to reorder requests as they are in-flight on the
- * HW. We derive the priority hint then as the first "hole" in
- * the HW submission ports and if there are no available slots,
- * the priority of the lowest executing request, i.e. last.
- *
- * When we do receive a higher priority request ready to run from the
- * user, see queue_request(), the priority hint is bumped to that
- * request triggering preemption on the next dequeue (or subsequent
- * interrupt for secondary ports).
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
*/
- execlists->queue_priority_hint = queue_prio(execlists);
-
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
-
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
-
- WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *active);
- execlists_submit_ports(engine);
- } else {
- start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
- ring_set_paused(engine, 0);
- }
+ __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_request * const *port;
-
- for (port = execlists->pending; *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
-
- /* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
-
- smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
-}
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 context_size;
-static inline void
-invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-/*
- * Starting with Gen12, the status has a new format:
- *
- * bit 0: switched to new queue
- * bit 1: reserved
- * bit 2: semaphore wait mode (poll or signal), only valid when
- * switch detail is set to "wait on semaphore"
- * bits 3-5: engine class
- * bits 6-11: engine instance
- * bits 12-14: reserved
- * bits 15-25: sw context id of the lrc the GT switched to
- * bits 26-31: sw counter of the lrc the GT switched to
- * bits 32-35: context switch detail
- * - 0: ctx complete
- * - 1: wait on sync flip
- * - 2: wait on vblank
- * - 3: wait on scanline
- * - 4: wait on semaphore
- * - 5: context preempted (not on SEMAPHORE_WAIT or
- * WAIT_FOR_EVENT)
- * bit 36: reserved
- * bits 37-43: wait detail (for switch detail 1 to 4)
- * bits 44-46: reserved
- * bits 47-57: sw context id of the lrc the GT switched away from
- * bits 58-63: sw counter of the lrc the GT switched away from
- */
-static inline bool gen12_csb_parse(const u64 csb)
-{
- bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
- bool new_queue =
- lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
- /*
- * The context switch detail is not guaranteed to be 5 when a preemption
- * occurs, so we can't just check for that. The check below works for
- * all the cases we care about, including preemptions of WAIT
- * instructions and lite-restore. Preempt-to-idle via the CTRL register
- * would require some extra handling, but we don't support that.
- */
- if (!ctx_away_valid || new_queue) {
- GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
- return true;
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
}
- /*
- * switch detail = 5 is covered by the case above and we do not expect a
- * context switch on an unsuccessful wait instruction since we always
- * use polling mode.
- */
- GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
- return false;
-}
-
-static inline bool gen8_csb_parse(const u64 csb)
-{
- return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
-}
-
-static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry;
-
- /*
- * Reading from the HWSP has one particular advantage: we can detect
- * a stale entry. Since the write into HWSP is broken, we have no reason
- * to trust the HW at all, the mmio entry may equally be unordered, so
- * we prefer the path that is self-checking and as a last resort,
- * return the mmio value.
- *
- * tgl,dg1:HSDES#22011327657
- */
- preempt_disable();
- if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
- int idx = csb - engine->execlists.csb_status;
- int status;
-
- status = GEN8_EXECLISTS_STATUS_BUF;
- if (idx >= 6) {
- status = GEN11_EXECLISTS_STATUS_BUF2;
- idx -= 6;
- }
- status += sizeof(u64) * idx;
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- entry = intel_uncore_read64(engine->uncore,
- _MMIO(engine->mmio_base + status));
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
}
- preempt_enable();
-
- return entry;
-}
-static inline u64
-csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry = READ_ONCE(*csb);
-
- /*
- * Unfortunately, the GPU does not always serialise its write
- * of the CSB entries before its write of the CSB pointer, at least
- * from the perspective of the CPU, using what is known as a Global
- * Observation Point. We may read a new CSB tail pointer, but then
- * read the stale CSB entries, causing us to misinterpret the
- * context-switch events, and eventually declare the GPU hung.
- *
- * icl:HSDES#1806554093
- * tgl:HSDES#22011248461
- */
- if (unlikely(entry == -1))
- entry = wa_csb_read(engine, csb);
-
- /* Consume this entry so that we can spot its future reuse. */
- WRITE_ONCE(*csb, -1);
-
- /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
- return entry;
+ return vma;
}
-static void process_csb(struct intel_engine_cs *engine)
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
- u8 head, tail;
-
- /*
- * As we modify our execlists state tracking we require exclusive
- * access. Either we are inside the tasklet, or the tasklet is disabled
- * and we assume that is only inside the reset paths and so serialised.
- */
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
-
- /*
- * Note that csb_write, csb_status may be either in HWSP or mmio.
- * When reading from the csb_write mmio register, we have to be
- * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
- * the low 4bits. As it happens we know the next 4bits are always
- * zero and so we can simply masked off the low u8 of the register
- * and treat it identically to reading from the HWSP (without having
- * to use explicit shifting and masking, and probably bifurcating
- * the code to handle the legacy mmio read).
- */
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
- if (unlikely(head == tail))
- return;
-
- /*
- * We will consume all events from HW, or at least pretend to.
- *
- * The sequence of events from the HW is deterministic, and derived
- * from our writes to the ELSP, with a smidgen of variability for
- * the arrival of the asynchronous requests wrt to the inflight
- * execution. If the HW sends an event that does not correspond with
- * the one we are expecting, we have to abandon all hope as we lose
- * all tracking of what the engine is actually executing. We will
- * only detect we are out of sequence with the HW when we get an
- * 'impossible' event because we have already drained our own
- * preemption/promotion queue. If this occurs, we know that we likely
- * lost track of execution earlier and must unwind and restart, the
- * simplest way is by stop processing the event queue and force the
- * engine to reset.
- */
- execlists->csb_head = tail;
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
-
- /*
- * Hopefully paired with a wmb() in HW!
- *
- * We must complete the read of the write pointer before any reads
- * from the CSB, so that we do not see stale values. Without an rmb
- * (lfence) the HW may speculatively perform the CSB[] reads *before*
- * we perform the READ_ONCE(*csb_write).
- */
- rmb();
- do {
- bool promote;
- u64 csb;
-
- if (++head == num_entries)
- head = 0;
-
- /*
- * We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
- */
-
- csb = csb_read(engine, buf + head);
- ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
- head, upper_32_bits(csb), lower_32_bits(csb));
-
- if (INTEL_GEN(engine->i915) >= 12)
- promote = gen12_csb_parse(csb);
- else
- promote = gen8_csb_parse(csb);
- if (promote) {
- struct i915_request * const *old = execlists->active;
-
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- ring_set_paused(engine, 0);
-
- /* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
- smp_wmb(); /* notify execlists_active() */
-
- /* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
- while (*old)
- execlists_schedule_out(*old++);
-
- /* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
- smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
-
- /* XXX Magic delay for tgl */
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- WRITE_ONCE(execlists->pending[0], NULL);
- } else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- /* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt is processed. One might assume
- * that the breadcrumb write being before the
- * user interrupt and the CS event for the context
- * switch would therefore be before the CS event
- * itself...
- */
- if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
- const u32 *regs __maybe_unused =
- rq->context->lrc_reg_state;
-
- ENGINE_TRACE(engine,
- "context completed before request!\n");
- ENGINE_TRACE(engine,
- "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
- ENGINE_READ(engine, RING_START),
- ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
- ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_MI_MODE));
- ENGINE_TRACE(engine,
- "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
- i915_ggtt_offset(rq->ring->vma),
- rq->head, rq->tail,
- rq->fence.context,
- lower_32_bits(rq->fence.seqno),
- hwsp_seqno(rq));
- ENGINE_TRACE(engine,
- "ctx:{start:%08x, head:%04x, tail:%04x}, ",
- regs[CTX_RING_START],
- regs[CTX_RING_HEAD],
- regs[CTX_RING_TAIL]);
- }
-
- execlists_schedule_out(*execlists->active++);
-
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
- }
- } while (head != tail);
-
- set_timeslice(engine);
-
- /*
- * Gen11 has proven to fail wrt global observation point between
- * entry and tail update, failing on the ordering and thus
- * we see an old entry in the context status buffer.
- *
- * Forcibly evict out entries for the next gpu csb update,
- * to increase the odds that we get a fresh entries with non
- * working hardware. The cost for doing so comes out mostly with
- * the wash as hardware, working or not, will need to do the
- * invalidation before.
- */
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
-}
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
-{
- lockdep_assert_held(&engine->active.lock);
- if (!READ_ONCE(engine->execlists.pending[0])) {
- rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
- rcu_read_unlock();
- }
+ return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
-static void __execlists_hold(struct i915_request *rq)
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- if (i915_request_is_active(rq))
- __i915_request_unsubmit(rq);
-
- clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- list_move_tail(&rq->sched.link, &rq->engine->active.hold);
- i915_request_set_hold(rq);
- RQ_TRACE(rq, "on hold\n");
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_is_ready(w))
- continue;
-
- if (i915_request_completed(w))
- continue;
-
- if (i915_request_on_hold(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+ int err;
-static bool execlists_hold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- if (i915_request_on_hold(rq))
- return false;
+ GEM_BUG_ON(ce->state);
- spin_lock_irq(&engine->active.lock);
+ vma = __lrc_alloc_state(ce, engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (i915_request_completed(rq)) { /* too late! */
- rq = NULL;
- goto unlock;
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_vma;
}
- if (rq->engine != engine) { /* preempted virtual engine */
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
-
- /*
- * intel_context_inflight() is only protected by virtue
- * of process_csb() being called only by the tasklet (or
- * directly from inside reset while the tasklet is suspended).
- * Assert that neither of those are allowed to run while we
- * poke at the request queues.
- */
- GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+ if (!page_mask_bits(ce->timeline)) {
+ struct intel_timeline *tl;
/*
- * An unsubmitted request along a virtual engine will
- * remain on the active (this) engine until we are able
- * to process the context switch away (and so mark the
- * context as no longer in flight). That cannot have happened
- * yet, otherwise we would not be hanging!
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
*/
- spin_lock(&ve->base.active.lock);
- GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
- GEM_BUG_ON(ve->request != rq);
- ve->request = NULL;
- spin_unlock(&ve->base.active.lock);
- i915_request_put(rq);
-
- rq->engine = engine;
- }
-
- /*
- * Transfer this request onto the hold queue to prevent it
- * being resumbitted to HW (and potentially completed) before we have
- * released it. Since we may have already submitted following
- * requests, we need to remove those as well.
- */
- GEM_BUG_ON(i915_request_on_hold(rq));
- GEM_BUG_ON(rq->engine != engine);
- __execlists_hold(rq);
- GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
- spin_unlock_irq(&engine->active.lock);
- return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
- struct i915_dependency *p;
- bool result = false;
-
- /*
- * If one of our ancestors is on hold, we must also be on hold,
- * otherwise we will bypass it and execute before it.
- */
- rcu_read_lock();
- for_each_signaler(p, rq) {
- const struct i915_request *s =
- container_of(p->signaler, typeof(*s), sched);
-
- if (s->engine != rq->engine)
- continue;
-
- result = i915_request_on_hold(s);
- if (result)
- break;
- }
- rcu_read_unlock();
-
- return result;
-}
-
-static void __execlists_unhold(struct i915_request *rq)
-{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- RQ_TRACE(rq, "hold release\n");
-
- GEM_BUG_ON(!i915_request_on_hold(rq));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
- i915_request_clear_hold(rq);
- list_move_tail(&rq->sched.link,
- i915_sched_lookup_priolist(rq->engine,
- rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Also release any children on this engine that are ready */
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_on_hold(w))
- continue;
-
- /* Check that no other parents are also on hold */
- if (hold_request(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void execlists_unhold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- spin_lock_irq(&engine->active.lock);
-
- /*
- * Move this request back to the priority queue, and all of its
- * children and grandchildren that were suspended along with it.
- */
- __execlists_unhold(rq);
-
- if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
-
- spin_unlock_irq(&engine->active.lock);
-}
-
-struct execlists_capture {
- struct work_struct work;
- struct i915_request *rq;
- struct i915_gpu_coredump *error;
-};
-
-static void execlists_capture_work(struct work_struct *work)
-{
- struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
- struct intel_engine_cs *engine = cap->rq->engine;
- struct intel_gt_coredump *gt = cap->error->gt;
- struct intel_engine_capture_vma *vma;
-
- /* Compress all the objects attached to the request, slow! */
- vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
- if (vma) {
- struct i915_vma_compress *compress =
- i915_vma_capture_prepare(gt);
-
- intel_engine_coredump_add_vma(gt->engine, vma, compress);
- i915_vma_capture_finish(gt, compress);
- }
-
- gt->simulated = gt->engine->simulated;
- cap->error->simulated = gt->simulated;
-
- /* Publish the error state, and announce it to the world */
- i915_error_state_store(cap->error);
- i915_gpu_coredump_put(cap->error);
-
- /* Return this request and all that depend upon it for signaling */
- execlists_unhold(engine, cap->rq);
- i915_request_put(cap->rq);
-
- kfree(cap);
-}
-
-static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
-{
- const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
- struct execlists_capture *cap;
-
- cap = kmalloc(sizeof(*cap), gfp);
- if (!cap)
- return NULL;
-
- cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
- if (!cap->error)
- goto err_cap;
-
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
- if (!cap->error->gt)
- goto err_gpu;
-
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
- if (!cap->error->gt->engine)
- goto err_gt;
-
- cap->error->gt->engine->hung = true;
-
- return cap;
-
-err_gt:
- kfree(cap->error->gt);
-err_gpu:
- kfree(cap->error);
-err_cap:
- kfree(cap);
- return NULL;
-}
-
-static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
-{
- const struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request * const *port, *rq;
-
- /*
- * Use the most recent result from process_csb(), but just in case
- * we trigger an error (via interrupt) before the first CS event has
- * been written, peek at the next submission.
- */
-
- for (port = el->active; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at active:%zd\n",
- port - el->active);
- return rq;
- }
- }
-
- for (port = el->pending; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at pending:%zd\n",
- port - el->pending);
- return rq;
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce, engine);
+ else
+ tl = intel_timeline_create(engine->gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_ring;
}
- }
-
- ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
- return NULL;
-}
-
-static u32 active_ccid(struct intel_engine_cs *engine)
-{
- return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
-}
-
-static void execlists_capture(struct intel_engine_cs *engine)
-{
- struct execlists_capture *cap;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return;
-
- /*
- * We need to _quickly_ capture the engine state before we reset.
- * We are inside an atomic section (softirq) here and we are delaying
- * the forced preemption event.
- */
- cap = capture_regs(engine);
- if (!cap)
- return;
- spin_lock_irq(&engine->active.lock);
- cap->rq = active_context(engine, active_ccid(engine));
- if (cap->rq) {
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
+ ce->timeline = tl;
}
- spin_unlock_irq(&engine->active.lock);
- if (!cap->rq)
- goto err_free;
-
- /*
- * Remove the request from the execlists queue, and take ownership
- * of the request. We pass it to our worker who will _slowly_ compress
- * all the pages the _user_ requested for debugging their batch, after
- * which we return it to the queue for signaling.
- *
- * By removing them from the execlists queue, we also remove the
- * requests from being processed by __unwind_incomplete_requests()
- * during the intel_engine_reset(), and so they will *not* be replayed
- * afterwards.
- *
- * Note that because we have not yet reset the engine at this point,
- * it is possible for the request that we have identified as being
- * guilty, did in fact complete and we will then hit an arbitration
- * point allowing the outstanding preemption to succeed. The likelihood
- * of that is very low (as capturing of the engine registers should be
- * fast enough to run inside an irq-off atomic section!), so we will
- * simply hold that request accountable for being non-preemptible
- * long enough to force the reset.
- */
- if (!execlists_hold(engine, cap->rq))
- goto err_rq;
-
- INIT_WORK(&cap->work, execlists_capture_work);
- schedule_work(&cap->work);
- return;
-
-err_rq:
- i915_request_put(cap->rq);
-err_free:
- i915_gpu_coredump_put(cap->error);
- kfree(cap);
-}
-
-static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
-{
- const unsigned int bit = I915_RESET_ENGINE + engine->id;
- unsigned long *lock = &engine->gt->reset.flags;
- if (!intel_has_reset_engine(engine->gt))
- return;
-
- if (test_and_set_bit(bit, lock))
- return;
-
- ENGINE_TRACE(engine, "reset for %s\n", msg);
-
- /* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ ce->ring = ring;
+ ce->state = vma;
- ring_set_paused(engine, 1); /* Freeze the current request in place */
- execlists_capture(engine);
- intel_engine_reset(engine, msg);
+ return 0;
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+err_ring:
+ intel_ring_put(ring);
+err_vma:
+ i915_vma_put(vma);
+ return err;
}
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
+void lrc_reset(struct intel_context *ce)
{
- const struct timer_list *t = &engine->execlists.preempt;
-
- if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
- return false;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- if (!timer_expired(t))
- return false;
+ intel_ring_reset(ce->ring, ce->ring->emit);
- return READ_ONCE(engine->execlists.pending[0]);
+ /* Scrub away the garbage */
+ lrc_init_regs(ce, ce->engine, true);
+ ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- bool timeout = preempt_timeout(engine);
-
- process_csb(engine);
-
- if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
- const char *msg;
-
- /* Generate the error message in priority wrt to the user! */
- if (engine->execlists.error_interrupt & GENMASK(15, 0))
- msg = "CS error"; /* thrown by a user payload */
- else if (engine->execlists.error_interrupt & ERROR_CSB)
- msg = "invalid CSB event";
- else
- msg = "internal error";
-
- engine->execlists.error_interrupt = 0;
- execlists_reset(engine, msg);
- }
-
- if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
- unsigned long flags;
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915) |
+ I915_MAP_OVERRIDE);
- /* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine))) {
- cancel_timer(&engine->execlists.preempt);
- execlists_reset(engine, "preemption time out");
- }
- }
+ return PTR_ERR_OR_ZERO(*vaddr);
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
{
- /* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
-}
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
-#define execlists_kick(t, member) \
- __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ lrc_init_state(ce, engine, vaddr);
-static void execlists_timeslice(struct timer_list *timer)
-{
- execlists_kick(timer, timer);
-}
-
-static void execlists_preempt(struct timer_list *timer)
-{
- execlists_kick(timer, preempt);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+ return 0;
}
-static void queue_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void lrc_unpin(struct intel_context *ce)
{
- GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+ ce->engine);
}
-static void __submit_queue_imm(struct intel_engine_cs *engine)
+void lrc_post_unpin(struct intel_context *ce)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (reset_in_progress(execlists))
- return; /* defer until we restart the engine following reset */
-
- __execlists_submission_tasklet(engine);
+ i915_gem_object_unpin_map(ce->state->obj);
}
-static void submit_queue(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+void lrc_fini(struct intel_context *ce)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- if (rq_prio(rq) <= execlists->queue_priority_hint)
+ if (!ce->state)
return;
- execlists->queue_priority_hint = rq_prio(rq);
- __submit_queue_imm(engine);
+ intel_ring_put(fetch_and_zero(&ce->ring));
+ i915_vma_put(fetch_and_zero(&ce->state));
}
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
-{
- GEM_BUG_ON(i915_request_on_hold(rq));
- return !list_empty(&engine->active.hold) && hold_request(rq);
-}
-
-static void flush_csb(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *el = &engine->execlists;
-
- if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
- if (!reset_in_progress(el))
- process_csb(engine);
- tasklet_unlock(&el->tasklet);
- }
-}
-
-static void execlists_submit_request(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
- /* Hopefully we clear execlists->pending[] to let us through */
- flush_csb(engine);
-
- /* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- if (unlikely(ancestor_on_hold(engine, request))) {
- RQ_TRACE(request, "ancestor on hold\n");
- list_add_tail(&request->sched.link, &engine->active.hold);
- i915_request_set_hold(request);
- } else {
- queue_request(engine, request);
-
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
-
- submit_queue(engine, request);
- }
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __execlists_context_fini(struct intel_context *ce)
-{
- intel_ring_put(ce->ring);
- i915_vma_put(ce->state);
-}
-
-static void execlists_context_destroy(struct kref *kref)
+void lrc_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->state)
- __execlists_context_fini(ce);
+ lrc_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
-static void
-set_redzone(void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
-}
-
-static void
-check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- drm_err_once(&engine->i915->drm,
- "%s context redzone overwritten!\n",
- engine->name);
-}
-
-static void execlists_context_unpin(struct intel_context *ce)
-{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
- ce->engine);
-}
-
-static void execlists_context_post_unpin(struct intel_context *ce)
-{
- i915_gem_object_unpin_map(ce->state->obj);
-}
-
static u32 *
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
{
@@ -3465,7 +1035,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return cs;
}
-static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+static u32 context_wa_bb_offset(const struct intel_context *ce)
{
return PAGE_SIZE * ce->wa_bb_page;
}
@@ -3496,16 +1066,57 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
- lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
- i915_ggtt_offset(ce->state) +
- context_wa_bb_offset(ce),
- (cs - start) * sizeof(*cs));
+ lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
}
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head)
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+ u32 desc;
+
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(ce->vm->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ return i915_ggtt_offset(ce->state) | desc;
+}
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -3537,205 +1148,54 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
}
-}
-static int
-execlists_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww, void **vaddr)
-{
- GEM_BUG_ON(!ce->state);
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- *vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
- I915_MAP_OVERRIDE);
-
- return PTR_ERR_OR_ZERO(*vaddr);
-}
-
-static int
-__execlists_context_pin(struct intel_context *ce,
- struct intel_engine_cs *engine,
- void *vaddr)
-{
- ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
- __execlists_update_reg_state(ce, engine, ce->ring->tail);
-
- return 0;
-}
-
-static int execlists_context_pin(struct intel_context *ce, void *vaddr)
-{
- return __execlists_context_pin(ce, ce->engine, vaddr);
+ return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
}
-static int execlists_context_alloc(struct intel_context *ce)
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- return __execlists_context_alloc(ce, ce->engine);
+ set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
}
-static void execlists_context_reset(struct intel_context *ce)
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when)
{
- CE_TRACE(ce, "reset\n");
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- intel_ring_reset(ce->ring, ce->ring->emit);
-
- /* Scrub away the garbage */
- execlists_init_reg_state(ce->lrc_reg_state,
- ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
-
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static const struct intel_context_ops execlists_context_ops = {
- .alloc = execlists_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = execlists_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = intel_context_enter_engine,
- .exit = intel_context_exit_engine,
-
- .reset = execlists_context_reset,
- .destroy = execlists_context_destroy,
-};
-
-static u32 hwsp_offset(const struct i915_request *rq)
-{
- const struct intel_timeline_cacheline *cl;
-
- /* Before the request is executed, the timeline/cachline is fixed */
-
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
-}
-
-static int gen8_emit_init_breadcrumb(struct i915_request *rq)
-{
- u32 *cs;
-
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
- if (!i915_request_timeline(rq)->has_initial_breadcrumb)
- return 0;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Check if we have been preempted before we even get started.
- *
- * After this point i915_request_started() reports true, even if
- * we get preempted and so are no longer running.
- */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = hwsp_offset(rq);
- *cs++ = 0;
- *cs++ = rq->fence.seqno - 1;
-
- intel_ring_advance(rq, cs);
-
- /* Record the updated position of the request's payload */
- rq->infix = intel_ring_offset(rq, cs);
-
- __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
-
- return 0;
-}
-
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
}
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int execlists_request_alloc(struct i915_request *request)
-{
- int ret;
-
- GEM_BUG_ON(!intel_context_is_pinned(request->context));
-
- /*
- * Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- /*
- * Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- if (!i915_vm_is_4lvl(request->context->vm)) {
- ret = emit_pdps(request);
- if (ret)
- return ret;
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
}
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- if (ret)
- return ret;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
+ WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
}
/*
@@ -3955,7 +1415,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
{
@@ -3963,7 +1423,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3985,30 +1445,34 @@ err:
return err;
}
-static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
-static int intel_init_workaround_bb(struct intel_engine_cs *engine)
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
- &wa_ctx->per_ctx };
- wa_bb_func_t wa_bb_fn[2];
+ struct i915_wa_ctx_bb *wa_bb[] = {
+ &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+ };
+ wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
void *batch, *batch_ptr;
unsigned int i;
- int ret;
+ int err;
if (engine->class != RENDER_CLASS)
- return 0;
+ return;
switch (INTEL_GEN(engine->i915)) {
case 12:
case 11:
- return 0;
+ return;
case 10:
wa_bb_fn[0] = gen10_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -4023,14 +1487,20 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
- return 0;
+ return;
}
- ret = lrc_setup_wa_ctx(engine);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to setup context WA page: %d\n", ret);
- return ret;
+ err = lrc_setup_wa_ctx(engine);
+ if (err) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ drm_err(&engine->i915->drm,
+ "Ignoring context switch w/a allocation error:%d\n",
+ err);
+ return;
}
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
@@ -4045,2104 +1515,52 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
CACHELINE_BYTES))) {
- ret = -EINVAL;
+ err = -EINVAL;
break;
}
if (wa_bb_fn[i])
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
- GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
- if (ret)
- lrc_destroy_wa_ctx(engine);
-
- return ret;
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
-
- ring_set_paused(engine, 0);
-
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
-
- /* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
-
- /* Once more for luck and our trusty paranoia */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
-}
-
-static void execlists_sanitize(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(execlists_active(&engine->execlists));
-
- /*
- * Poison residual state on resume, in case the suspend didn't!
- *
- * We have to assume that across suspend/resume (or other loss
- * of control) that the contents of our pinned buffers has been
- * lost, replaced by garbage. Since this doesn't always happen,
- * let's poison such state so that we more quickly spot when
- * we falsely assume it has been preserved.
- */
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
-
- reset_csb_pointers(engine);
-
- /*
- * The kernel_context HWSP is stored in the status_page. As above,
- * that may be lost on resume/initialisation, and so we need to
- * reset the value in the HWSP.
- */
- intel_timeline_reset_seqno(engine->kernel_context->timeline);
-
- /* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
-}
-
-static void enable_error_interrupt(struct intel_engine_cs *engine)
-{
- u32 status;
-
- engine->execlists.error_interrupt = 0;
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
-
- status = ENGINE_READ(engine, RING_ESR);
- if (unlikely(status)) {
- drm_err(&engine->i915->drm,
- "engine '%s' resumed still in error: %08x\n",
- engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
- }
-
- /*
- * On current gen8+, we have 2 signals to play with
- *
- * - I915_ERROR_INSTUCTION (bit 0)
- *
- * Generate an error if the command parser encounters an invalid
- * instruction
- *
- * This is a fatal error.
- *
- * - CP_PRIV (bit 2)
- *
- * Generate an error on privilege violation (where the CP replaces
- * the instruction with a no-op). This also fires for writes into
- * read-only scratch pages.
- *
- * This is a non-fatal error, parsing continues.
- *
- * * there are a few others defined for odd HW that we do not use
- *
- * Since CP_PRIV fires for cases where we have chosen to ignore the
- * error (as the HW is validating and suppressing the mistakes), we
- * only unmask the instruction error bit.
- */
- ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
-}
-
-static void enable_execlists(struct intel_engine_cs *engine)
-{
- u32 mode;
-
- assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
-
- intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
-
- if (INTEL_GEN(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
- else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
- ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
-
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
-
- ENGINE_WRITE_FW(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
- ENGINE_POSTING_READ(engine, RING_HWS_PGA);
-
- enable_error_interrupt(engine);
-
- engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-}
-
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
-static int execlists_resume(struct intel_engine_cs *engine)
-{
- intel_mocs_init_engine(engine);
-
- intel_breadcrumbs_reset(engine->breadcrumbs);
-
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
- enable_execlists(engine);
-
- return 0;
-}
-static void execlists_reset_prepare(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->resume() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- */
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
-
- /* And flush any current direct submission. */
- spin_lock_irqsave(&engine->active.lock, flags);
- spin_unlock_irqrestore(&engine->active.lock, flags);
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- ring_set_paused(engine, 1);
- intel_engine_stop_cs(engine);
-
- engine->execlists.reset_ccid = active_ccid(engine);
-}
-
-static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1) {
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- }
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
-
- __reset_stop_ring(regs, engine);
+ /* Verify that we can handle failure to setup the wa_ctx */
+ if (err || i915_inject_probe_error(engine->i915, -ENODEV))
+ lrc_fini_wa_ctx(engine);
}
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct intel_context *ce;
- struct i915_request *rq;
- u32 head;
-
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
-
- process_csb(engine); /* drain preemption events */
-
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(engine);
-
- /*
- * Save the currently executing context, even if we completed
- * its request, it was still running at the time of the
- * reset and will have been clobbered.
- */
- rq = active_context(engine, engine->execlists.reset_ccid);
- if (!rq)
- goto unwind;
-
- ce = rq->context;
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- if (i915_request_completed(rq)) {
- /* Idle context; tidy up the ring so we can restart afresh */
- head = intel_ring_wrap(ce->ring, rq->tail);
- goto out_replay;
- }
-
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /* Context has requests still in-flight; it should not be idle! */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
-
- rq = active_request(ce->timeline, rq);
- head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(head == ce->ring->tail);
-
- /*
- * If this request hasn't started yet, e.g. it is waiting on a
- * semaphore, we need to avoid skipping the request or else we
- * break the signaling chain. However, if the context is corrupt
- * the request will not restart and we will be stuck with a wedged
- * device. It is quite often the case that if we issue a reset
- * while the GPU is loading the context image, that the context
- * image becomes corrupt.
- *
- * Otherwise, if we have not started yet, the request should replay
- * perfectly and we do not need to flag the result as being erroneous.
- */
- if (!i915_request_started(rq))
- goto out_replay;
-
- /*
- * If the request was innocent, we leave the request in the ELSP
- * and will try to replay it on restarting. The context image may
- * have been corrupted by the reset, in which case we may have
- * to service a new GPU hang, but more likely we can continue on
- * without impact.
- *
- * If the request was guilty, we presume the context is corrupt
- * and have to at least restore the RING register in the context
- * image back to the expected values to skip over the guilty request.
- */
- __i915_request_reset(rq, stalled);
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
-out_replay:
- ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- head, ce->ring->tail);
- __execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
-
-unwind:
- /* Push back any incomplete requests for replay after the reset. */
- cancel_port_requests(execlists);
- __unwind_incomplete_requests(engine);
-}
-
-static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
-{
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, stalled);
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-
- /* The driver is wedged; don't process any more events. */
- WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
-}
-
-static void execlists_reset_cancel(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, true);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
- intel_engine_signal_breadcrumbs(engine);
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- mark_eio(rq);
- __i915_request_submit(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &engine->active.hold, sched.link)
- mark_eio(rq);
-
- /* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- spin_lock(&ve->base.active.lock);
- rq = fetch_and_zero(&ve->request);
- if (rq) {
- mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
- i915_request_put(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- }
- spin_unlock(&ve->base.active.lock);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void execlists_reset_finish(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- /*
- * After a GPU reset, we may have requests to replay. Do so now while
- * we still have the forcewake to be sure that the GPU is not allowed
- * to sleep before we restart and reload a context.
- */
- GEM_BUG_ON(!reset_in_progress(execlists));
- if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
- execlists->tasklet.func(execlists->tasklet.data);
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
-}
-
-static int gen8_emit_bb_start_noarb(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * WaDisableCtxRestoreArbitration:bdw,chv
- *
- * We don't need to perform MI_ARB_ENABLE as often as we do (in
- * particular all the gen that do not need the w/a at all!), if we
- * took care to make sure that on every switch into this context
- * (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, for gen8 there is another w/a that
- * requires us to not preempt inside GPGPU execution, so we keep
- * arbitration disabled for gen8 batches. Arbitration will be
- * re-enabled before we close the request
- * (engine->emit_fini_breadcrumb).
- */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- /* FIXME(BDW+): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- ENGINE_POSTING_READ(engine, RING_IMR);
-}
-
-static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-}
-
-static int gen8_emit_flush(struct i915_request *request, u32 mode)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(request, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen8_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- bool vf_flush_wa = false, dc_flush_wa = false;
- u32 *cs, flags = 0;
- int len;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
-
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- /*
- * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
- * pipe control.
- */
- if (IS_GEN(request->engine->i915, 9))
- vf_flush_wa = true;
-
- /* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
- dc_flush_wa = true;
- }
-
- len = 6;
-
- if (vf_flush_wa)
- len += 6;
-
- if (dc_flush_wa)
- len += 12;
-
- cs = intel_ring_begin(request, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (vf_flush_wa)
- cs = gen8_emit_pipe_control(cs, 0, 0);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen11_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static u32 preparser_disable(bool state)
-{
- return MI_ARB_CHECK | 1 << 8 | state;
-}
-
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
-{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv_reg\n");
-
- return INVALID_MMIO_REG;
-}
-
-static u32 *
-gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(inv_reg);
- *cs++ = AUX_INV;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static int gen12_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_FLUSH_L3;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /* Wa_1409600907:tgl */
- flags |= PIPE_CONTROL_DEPTH_STALL;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen12_emit_pipe_control(cs,
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 8 + 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Prevent the pre-parser from skipping past the TLB
- * invalidate and loading a stale page for the batch
- * buffer / request payload.
- */
- *cs++ = preparser_disable(true);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
-
- *cs++ = preparser_disable(false);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static int gen12_emit_flush(struct i915_request *request, u32 mode)
-{
- intel_engine_mask_t aux_inv = 0;
- u32 cmd, *cs;
-
- cmd = 4;
- if (mode & EMIT_INVALIDATE)
- cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = request->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight8(aux_inv) + 2;
-
- cs = intel_ring_begin(request, cmd);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(true);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
-
- if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
- for_each_engine_masked(engine, request->engine->gt,
- aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
- }
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(false);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static void assert_request_valid(struct i915_request *rq)
-{
- struct intel_ring *ring __maybe_unused = rq->ring;
-
- /* Can we unwind this request without appearing to go forwards? */
- GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
-}
-
-/*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
-static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
-{
- /* Ensure there's always at least one preemption point per-request. */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request, cs);
-
- /* Check that entire request is less than half the ring */
- assert_request_valid(request);
-
- return cs;
-}
-
-static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
-
- return cs;
-}
-
-static __always_inline u32*
-gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
-}
-
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
-}
-
-static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static u32 *
-gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-/*
- * Note that the CS instruction pre-parser will not stall on the breadcrumb
- * flush and will continue pre-fetching the instructions after it before the
- * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
- * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
- * of the next request before the memory has been flushed, we're guaranteed that
- * we won't access the batch itself too early.
- * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
- * so, if the current request is modifying an instruction in the next request on
- * the same intel_context, we might pre-fetch and then execute the pre-update
- * instruction. To avoid this, the users of self-modifying code should either
- * disable the parser around the code emitting the memory writes, via a new flag
- * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
- * the in-kernel use-cases we've opted to use a separate context, see
- * reloc_gpu() as an example.
- * All the above applies only to the instructions themselves. Non-inline data
- * used by the instructions is not pre-fetched.
- */
-
-static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static __always_inline u32*
-gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = gen12_emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* XXX Stalling flush before seqno write; post-sync not */
- cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
- return gen12_emit_fini_breadcrumb_tail(rq, cs);
-}
-
-static u32 *
-gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen12_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen12_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static void execlists_park(struct intel_engine_cs *engine)
-{
- cancel_timer(&engine->execlists.timer);
- cancel_timer(&engine->execlists.preempt);
-}
-
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->submit_request = execlists_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (INTEL_GEN(engine->i915) >= 12)
- engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
-}
-
-static void execlists_shutdown(struct intel_engine_cs *engine)
-{
- /* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
-}
-
-static void execlists_release(struct intel_engine_cs *engine)
-{
- engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
-
- execlists_shutdown(engine);
-
- intel_engine_cleanup_common(engine);
- lrc_destroy_wa_ctx(engine);
-}
-
-static void
-logical_ring_default_vfuncs(struct intel_engine_cs *engine)
-{
- /* Default vfuncs which can be overriden by each engine. */
-
- engine->resume = execlists_resume;
-
- engine->cops = &execlists_context_ops;
- engine->request_alloc = execlists_request_alloc;
-
- engine->emit_flush = gen8_emit_flush;
- engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12) {
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
- engine->emit_flush = gen12_emit_flush;
- }
- engine->set_default_submission = intel_execlists_set_default_submission;
-
- if (INTEL_GEN(engine->i915) < 11) {
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
- } else {
- /*
- * TODO: On Gen11 interrupt masks need to be clear
- * to allow C6 entry. Keep interrupts enabled at
- * and take the hit of generating extra interrupts
- * until a more refined solution exists.
- */
- }
-}
-
-static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine)
-{
- unsigned int shift = 0;
-
- if (INTEL_GEN(engine->i915) < 11) {
- const u8 irq_shifts[] = {
- [RCS0] = GEN8_RCS_IRQ_SHIFT,
- [BCS0] = GEN8_BCS_IRQ_SHIFT,
- [VCS0] = GEN8_VCS0_IRQ_SHIFT,
- [VCS1] = GEN8_VCS1_IRQ_SHIFT,
- [VECS0] = GEN8_VECS_IRQ_SHIFT,
- };
-
- shift = irq_shifts[engine->id];
- }
-
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
-}
-
-static void rcs_submission_override(struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- case 12:
- engine->emit_flush = gen12_emit_flush_render;
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
- break;
- case 11:
- engine->emit_flush = gen11_emit_flush_render;
- engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
- break;
- default:
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- break;
- }
-}
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
-
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine);
-
- if (engine->class == RENDER_CLASS)
- rcs_submission_override(engine);
-
- if (intel_init_workaround_bb(engine))
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- drm_err(&i915->drm, "WA batch buffer initialization failed\n");
-
- if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
- } else {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_ELSP(base));
- }
-
- execlists->csb_status =
- (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
- execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
- if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
- else
- execlists->csb_size = GEN11_CSB_ENTRIES;
-
- if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
- }
-
- /* Finally, take ownership and responsibility for cleanup! */
- engine->sanitize = execlists_sanitize;
- engine->release = execlists_release;
-
- return 0;
-}
-
-static void init_common_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- u32 ctl;
-
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (inhibit)
- ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
- if (INTEL_GEN(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
- regs[CTX_CONTEXT_CONTROL] = ctl;
-
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_TIMESTAMP] = 0;
-}
-
-static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine)
-{
- const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
-
- if (wa_ctx->per_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
- regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
-
- if (wa_ctx->indirect_ctx.size) {
- lrc_ring_setup_indirect_ctx(regs, engine,
- i915_ggtt_offset(wa_ctx->vma) +
- wa_ctx->indirect_ctx.offset,
- wa_ctx->indirect_ctx.size);
- }
-}
-
-static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
-{
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- /* 64b PPGTT (48bit canonical)
- * PDP0_DESCRIPTOR contains the base address to PML4 and
- * other PDP Descriptors are ignored.
- */
- ASSIGN_CTX_PML4(ppgtt, regs);
- } else {
- ASSIGN_CTX_PDP(ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ppgtt, regs, 0);
- }
-}
-
-static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
-{
- if (i915_is_ggtt(vm))
- return i915_vm_to_ggtt(vm)->alias;
- else
- return i915_vm_to_ppgtt(vm);
-}
-
-static void execlists_init_reg_state(u32 *regs,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- set_offsets(regs, reg_offsets(engine), engine, inhibit);
-
- init_common_reg_state(regs, engine, ring, inhibit);
- init_ppgtt_reg_state(regs, vm_alias(ce->vm));
-
- init_wa_bb_reg_state(regs, engine);
-
- __reset_stop_ring(regs, engine);
-}
-
-static int
-populate_lr_context(struct intel_context *ce,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- bool inhibit = true;
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
- return PTR_ERR(vaddr);
- }
-
- set_redzone(vaddr, engine);
-
- if (engine->default_state) {
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
- inhibit = false;
- }
-
- /* Clear the ppHWSP (inc. per-context counters) */
- memset(vaddr, 0, PAGE_SIZE);
-
- /*
- * The second page of the context object contains some registers which
- * must be set up prior to the first execution.
- */
- execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
- ce, engine, ring, inhibit);
-
- __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
- i915_gem_object_unpin_map(ctx_obj);
- return 0;
-}
-
-static struct intel_timeline *pinned_timeline(struct intel_context *ce)
-{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-
- return intel_timeline_create_from_engine(ce->engine,
- page_unmask_bits(tl));
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj;
- struct intel_ring *ring;
- struct i915_vma *vma;
- u32 context_size;
- int ret;
-
- GEM_BUG_ON(ce->state);
- context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- context_size += I915_GTT_PAGE_SIZE; /* for redzone */
-
- if (INTEL_GEN(engine->i915) == 12) {
- ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
- }
-
- ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
- if (IS_ERR(ctx_obj))
- return PTR_ERR(ctx_obj);
-
- vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto error_deref_obj;
- }
-
- if (!page_mask_bits(ce->timeline)) {
- struct intel_timeline *tl;
-
- /*
- * Use the static global HWSP for the kernel context, and
- * a dynamically allocated cacheline for everyone else.
- */
- if (unlikely(ce->timeline))
- tl = pinned_timeline(ce);
- else
- tl = intel_timeline_create(engine->gt);
- if (IS_ERR(tl)) {
- ret = PTR_ERR(tl);
- goto error_deref_obj;
- }
-
- ce->timeline = tl;
- }
-
- ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error_deref_obj;
- }
-
- ret = populate_lr_context(ce, ctx_obj, engine, ring);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
- }
-
- ce->ring = ring;
- ce->state = vma;
-
- return 0;
-
-error_ring_free:
- intel_ring_put(ring);
-error_deref_obj:
- i915_gem_object_put(ctx_obj);
- return ret;
-}
-
-static struct list_head *virtual_queue(struct virtual_engine *ve)
-{
- return &ve->base.execlists.default_priolist.requests[0];
-}
-
-static void rcu_virtual_context_destroy(struct work_struct *wrk)
-{
- struct virtual_engine *ve =
- container_of(wrk, typeof(*ve), rcu.work);
- unsigned int n;
-
- GEM_BUG_ON(ve->context.inflight);
-
- /* Preempt-to-busy may leave a stale request behind. */
- if (unlikely(ve->request)) {
- struct i915_request *old;
-
- spin_lock_irq(&ve->base.active.lock);
-
- old = fetch_and_zero(&ve->request);
- if (old) {
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- spin_unlock_irq(&ve->base.active.lock);
- }
-
- /*
- * Flush the tasklet in case it is still running on another core.
- *
- * This needs to be done before we remove ourselves from the siblings'
- * rbtrees as in the case it is running in parallel, it may reinsert
- * the rb_node into a sibling.
- */
- tasklet_kill(&ve->base.execlists.tasklet);
-
- /* Decouple ourselves from the siblings, no more access allowed. */
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
- struct rb_node *node = &ve->nodes[sibling->id].rb;
-
- if (RB_EMPTY_NODE(node))
- continue;
-
- spin_lock_irq(&sibling->active.lock);
-
- /* Detachment is lazily performed in the execlists tasklet */
- if (!RB_EMPTY_NODE(node))
- rb_erase_cached(node, &sibling->execlists.virtual);
-
- spin_unlock_irq(&sibling->active.lock);
- }
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-
- if (ve->context.state)
- __execlists_context_fini(&ve->context);
- intel_context_fini(&ve->context);
-
- intel_breadcrumbs_free(ve->base.breadcrumbs);
- intel_engine_free_request_pool(&ve->base);
-
- kfree(ve->bonds);
- kfree(ve);
-}
-
-static void virtual_context_destroy(struct kref *kref)
-{
- struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
-
- GEM_BUG_ON(!list_empty(&ve->context.signals));
-
- /*
- * When destroying the virtual engine, we have to be aware that
- * it may still be in use from an hardirq/softirq context causing
- * the resubmission of a completed request (background completion
- * due to preempt-to-busy). Before we can free the engine, we need
- * to flush the submission code and tasklets that are still potentially
- * accessing the engine. Flushing the tasklets requires process context,
- * and since we can guard the resubmit onto the engine with an RCU read
- * lock, we can delegate the free of the engine to an RCU worker.
- */
- INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
- queue_rcu_work(system_wq, &ve->rcu);
-}
-
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
-{
- int swp;
-
- /*
- * Pick a random sibling on starting to help spread the load around.
- *
- * New contexts are typically created with exactly the same order
- * of siblings, and often started in batches. Due to the way we iterate
- * the array of sibling when submitting requests, sibling[0] is
- * prioritised for dequeuing. If we make sure that sibling[0] is fairly
- * randomised across the system, we also help spread the load by the
- * first engine we inspect being different each time.
- *
- * NB This does not force us to execute on this engine, it will just
- * typically be the first we inspect for submission.
- */
- swp = prandom_u32_max(ve->num_siblings);
- if (swp)
- swap(ve->siblings[swp], ve->siblings[0]);
-}
-
-static int virtual_context_alloc(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- return __execlists_context_alloc(ce, ve->siblings[0]);
-}
-
-static int virtual_context_pin(struct intel_context *ce, void *vaddr)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- /* Note: we must use a real engine class for setting up reg state */
- return __execlists_context_pin(ce, ve->siblings[0], vaddr);
-}
-
-static void virtual_context_enter(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_get(ve->siblings[n]);
-
- intel_timeline_enter(ce->timeline);
-}
-
-static void virtual_context_exit(struct intel_context *ce)
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- intel_timeline_exit(ce->timeline);
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_put(ve->siblings[n]);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow++;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
}
-static const struct intel_context_ops virtual_context_ops = {
- .alloc = virtual_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = virtual_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = virtual_context_enter,
- .exit = virtual_context_exit,
-
- .destroy = virtual_context_destroy,
-};
-
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+void lrc_update_runtime(struct intel_context *ce)
{
- struct i915_request *rq;
- intel_engine_mask_t mask;
-
- rq = READ_ONCE(ve->request);
- if (!rq)
- return 0;
-
- /* The rq is ready for submission; rq->execution_mask is now stable. */
- mask = rq->execution_mask;
- if (unlikely(!mask)) {
- /* Invalid selection, submit to a random engine in error */
- i915_request_set_error_once(rq, -ENODEV);
- mask = ve->siblings[0]->mask;
- }
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
-
- return mask;
-}
+ u32 old;
+ s32 dt;
-static void virtual_submission_tasklet(unsigned long data)
-{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
- intel_engine_mask_t mask;
- unsigned int n;
-
- rcu_read_lock();
- mask = virtual_submission_mask(ve);
- rcu_read_unlock();
- if (unlikely(!mask))
+ if (intel_context_is_barrier(ce))
return;
- local_irq_disable();
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
- struct ve_node * const node = &ve->nodes[sibling->id];
- struct rb_node **parent, *rb;
- bool first;
-
- if (!READ_ONCE(ve->request))
- break; /* already handled by a sibling's tasklet */
-
- if (unlikely(!(mask & sibling->mask))) {
- if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->active.lock);
- rb_erase_cached(&node->rb,
- &sibling->execlists.virtual);
- RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->active.lock);
- }
- continue;
- }
-
- spin_lock(&sibling->active.lock);
-
- if (!RB_EMPTY_NODE(&node->rb)) {
- /*
- * Cheat and avoid rebalancing the tree if we can
- * reuse this node in situ.
- */
- first = rb_first_cached(&sibling->execlists.virtual) ==
- &node->rb;
- if (prio == node->prio || (prio > node->prio && first))
- goto submit_engine;
-
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
- }
-
- rb = NULL;
- first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
- while (*parent) {
- struct ve_node *other;
-
- rb = *parent;
- other = rb_entry(rb, typeof(*other), rb);
- if (prio > other->prio) {
- parent = &rb->rb_left;
- } else {
- parent = &rb->rb_right;
- first = false;
- }
- }
-
- rb_link_node(&node->rb, rb, parent);
- rb_insert_color_cached(&node->rb,
- &sibling->execlists.virtual,
- first);
-
-submit_engine:
- GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
- node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
-
- spin_unlock(&sibling->active.lock);
- }
- local_irq_enable();
-}
-
-static void virtual_submit_request(struct i915_request *rq)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- struct i915_request *old;
- unsigned long flags;
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
- rq->fence.context,
- rq->fence.seqno);
-
- GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
-
- spin_lock_irqsave(&ve->base.active.lock, flags);
-
- old = ve->request;
- if (old) { /* background completion event from preempt-to-busy */
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- if (i915_request_completed(rq)) {
- __i915_request_submit(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
- } else {
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- ve->request = i915_request_get(rq);
-
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- list_move_tail(&rq->sched.link, virtual_queue(ve));
-
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
- }
-
- spin_unlock_irqrestore(&ve->base.active.lock, flags);
-}
-
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
- const struct intel_engine_cs *master)
-{
- int i;
-
- for (i = 0; i < ve->num_bonds; i++) {
- if (ve->bonds[i].master == master)
- return &ve->bonds[i];
- }
-
- return NULL;
-}
-
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- intel_engine_mask_t allowed, exec;
- struct ve_bond *bond;
-
- allowed = ~to_request(signal)->engine->mask;
-
- bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond)
- allowed &= bond->sibling_mask;
-
- /* Restrict the bonded request to run on only the available engines */
- exec = READ_ONCE(rq->execution_mask);
- while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
- ;
-
- /* Prevent the master from being re-run on the bonded engines */
- to_request(signal)->execution_mask &= ~allowed;
-}
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
-{
- struct virtual_engine *ve;
- unsigned int n;
- int err;
-
- if (count == 0)
- return ERR_PTR(-EINVAL);
-
- if (count == 1)
- return intel_context_create(siblings[0]);
-
- ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
- if (!ve)
- return ERR_PTR(-ENOMEM);
-
- ve->base.i915 = siblings[0]->i915;
- ve->base.gt = siblings[0]->gt;
- ve->base.uncore = siblings[0]->uncore;
- ve->base.id = -1;
-
- ve->base.class = OTHER_CLASS;
- ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
- ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
-
- /*
- * The decision on whether to submit a request using semaphores
- * depends on the saturated state of the engine. We only compute
- * this during HW submission of the request, and we need for this
- * state to be globally applied to all requests being submitted
- * to this engine. Virtual engines encompass more than one physical
- * engine and so we cannot accurately tell in advance if one of those
- * engines is already saturated and so cannot afford to use a semaphore
- * and be pessimized in priority for doing so -- if we are the only
- * context using semaphores after all other clients have stopped, we
- * will be starved on the saturated system. Such a global switch for
- * semaphores is less than ideal, but alas is the current compromise.
- */
- ve->base.saturated = ALL_ENGINES;
-
- snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
-
- intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
- intel_engine_init_execlists(&ve->base);
-
- ve->base.cops = &virtual_context_ops;
- ve->base.request_alloc = execlists_request_alloc;
-
- ve->base.schedule = i915_schedule;
- ve->base.submit_request = virtual_submit_request;
- ve->base.bond_execute = virtual_bond_execute;
-
- INIT_LIST_HEAD(virtual_queue(ve));
- ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
-
- intel_context_init(&ve->context, &ve->base);
-
- ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
- if (!ve->base.breadcrumbs) {
- err = -ENOMEM;
- goto err_put;
- }
-
- for (n = 0; n < count; n++) {
- struct intel_engine_cs *sibling = siblings[n];
-
- GEM_BUG_ON(!is_power_of_2(sibling->mask));
- if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
- err = -EINVAL;
- goto err_put;
- }
-
- /*
- * The virtual engine implementation is tightly coupled to
- * the execlists backend -- we push out request directly
- * into a tree inside each physical engine. We could support
- * layering if we handle cloning of the requests and
- * submitting a copy into each backend.
- */
- if (sibling->execlists.tasklet.func !=
- execlists_submission_tasklet) {
- err = -ENODEV;
- goto err_put;
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
- RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
-
- ve->siblings[ve->num_siblings++] = sibling;
- ve->base.mask |= sibling->mask;
-
- /*
- * All physical engines must be compatible for their emission
- * functions (as we build the instructions during request
- * construction and do not alter them before submission
- * on the physical engine). We use the engine class as a guide
- * here, although that could be refined.
- */
- if (ve->base.class != OTHER_CLASS) {
- if (ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
- err = -EINVAL;
- goto err_put;
- }
- continue;
- }
-
- ve->base.class = sibling->class;
- ve->base.uabi_class = sibling->uabi_class;
- snprintf(ve->base.name, sizeof(ve->base.name),
- "v%dx%d", ve->base.class, count);
- ve->base.context_size = sibling->context_size;
-
- ve->base.emit_bb_start = sibling->emit_bb_start;
- ve->base.emit_flush = sibling->emit_flush;
- ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
- ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
- ve->base.emit_fini_breadcrumb_dw =
- sibling->emit_fini_breadcrumb_dw;
-
- ve->base.flags = sibling->flags;
- }
-
- ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
-
- virtual_engine_initial_hint(ve);
- return &ve->context;
-
-err_put:
- intel_context_put(&ve->context);
- return ERR_PTR(err);
-}
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
- struct virtual_engine *se = to_virtual_engine(src);
- struct intel_context *dst;
-
- dst = intel_execlists_create_virtual(se->siblings,
- se->num_siblings);
- if (IS_ERR(dst))
- return dst;
-
- if (se->num_bonds) {
- struct virtual_engine *de = to_virtual_engine(dst->engine);
-
- de->bonds = kmemdup(se->bonds,
- sizeof(*se->bonds) * se->num_bonds,
- GFP_KERNEL);
- if (!de->bonds) {
- intel_context_put(dst);
- return ERR_PTR(-ENOMEM);
- }
-
- de->num_bonds = se->num_bonds;
- }
-
- return dst;
-}
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling)
-{
- struct virtual_engine *ve = to_virtual_engine(engine);
- struct ve_bond *bond;
- int n;
-
- /* Sanity check the sibling is part of the virtual engine */
- for (n = 0; n < ve->num_siblings; n++)
- if (sibling == ve->siblings[n])
- break;
- if (n == ve->num_siblings)
- return -EINVAL;
-
- bond = virtual_find_bond(ve, master);
- if (bond) {
- bond->sibling_mask |= sibling->mask;
- return 0;
- }
-
- bond = krealloc(ve->bonds,
- sizeof(*bond) * (ve->num_bonds + 1),
- GFP_KERNEL);
- if (!bond)
- return -ENOMEM;
-
- bond[ve->num_bonds].master = master;
- bond[ve->num_bonds].sibling_mask = sibling->mask;
-
- ve->bonds = bond;
- ve->num_bonds++;
-
- return 0;
-}
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- struct i915_request *rq, *last;
- unsigned long flags;
- unsigned int count;
- struct rb_node *rb;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tE ");
- }
-
- if (execlists->switch_priority_hint != INT_MIN)
- drm_printf(m, "\t\tSwitch priority hint: %d\n",
- READ_ONCE(execlists->switch_priority_hint));
- if (execlists->queue_priority_hint != INT_MIN)
- drm_printf(m, "\t\tQueue priority hint: %d\n",
- READ_ONCE(execlists->queue_priority_hint));
-
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tQ ");
- }
+ old = ce->runtime.last;
+ ce->runtime.last = lrc_get_runtime(ce);
+ dt = ce->runtime.last - old;
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (rq) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tV ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d virtual requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tV ");
+ if (unlikely(dt < 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
- if (scrub)
- restore_default_state(ce, engine);
-
- /* Rerun the request; its payload has been neutered (if guilty). */
- __execlists_update_reg_state(ce, engine, head);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- intel_execlists_set_default_submission;
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2d287f25497..7f697845c4cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -1,90 +1,20 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_LRC_H_
-#define _INTEL_LRC_H_
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
#include <linux/types.h>
-struct drm_printer;
+#include "intel_context.h"
+#include "intel_lrc_reg.h"
-struct drm_i915_private;
-struct i915_gem_context;
-struct i915_request;
-struct intel_context;
+struct drm_i915_gem_object;
struct intel_engine_cs;
+struct intel_ring;
-/* Execlists regs */
-#define RING_ELSP(base) _MMIO((base) + 0x230)
-#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
-#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
-#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
-#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
-#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
-#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
-#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
-
-#define EL_CTRL_LOAD (1 << 0)
-
-/* The docs specify that the write pointer wraps around after 5h, "After status
- * is written out to the last available status QW at offset 5h, this pointer
- * wraps to 0."
- *
- * Therefore, one must infer than even though there are 3 bits available, 6 and
- * 7 appear to be * reserved.
- */
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x7
-#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
-#define GEN11_CSB_ENTRIES 12
-#define GEN11_CSB_PTR_MASK 0xf
-#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
-#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
-
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
-
-enum {
- INTEL_CONTEXT_SCHEDULE_IN = 0,
- INTEL_CONTEXT_SCHEDULE_OUT,
- INTEL_CONTEXT_SCHEDULE_PREEMPTED,
-};
-
-/* Logical Rings */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-
-/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
@@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub);
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max);
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling);
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
-#endif /* _INTEL_LRC_H_ */
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+static inline u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 1b51f7b9a5c3..65fe76738335 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
@@ -52,4 +54,43 @@
#define GEN8_EXECLISTS_STATUS_BUF 0x370
#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
+/* Execlists regs */
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index ab6870242e18..8acb84960cd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -24,8 +24,8 @@
#include "intel_engine.h"
#include "intel_gt.h"
+#include "intel_lrc_reg.h"
#include "intel_mocs.h"
-#include "intel_lrc.h"
#include "intel_ring.h"
/* structures required */
@@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(u16 low, u16 high)
+static u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 46d9aceda64c..96b85a10ef33 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
kfree(pt);
}
-static inline void
+static void
write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index d7b8e4457fc2..35504c97f11d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
return rc6_to_gt(rc)->i915;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 40d8f1a95df6..60393ce5614d 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -95,10 +95,10 @@ region_lmem_init(struct intel_memory_region *mem)
return ret;
}
-const struct intel_memory_region_ops intel_region_lmem_ops = {
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
- .create_object = __i915_gem_lmem_object_create,
+ .init_object = __i915_gem_lmem_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 213def7c7b8a..8ea43e538dab 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -8,8 +8,6 @@
struct drm_i915_private;
-extern const struct intel_memory_region_ops intel_region_lmem_ops;
-
struct intel_memory_region *
intel_setup_fake_lmem(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ea2a77c7b469..ca816ba22197 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,7 +27,8 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-#include "gt/intel_context.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
static const struct intel_renderstate_rodata *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3654c955e6be..61410cd62927 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
-static void engine_skip_context(struct i915_request *rq)
+static void skip_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
struct intel_context *hung_ctx = rq->context;
- if (!i915_request_is_active(rq))
- return;
+ list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
+ if (!i915_request_is_active(rq))
+ return;
- lockdep_assert_held(&engine->active.lock);
- list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -152,15 +151,14 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
-
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
if (mark_guilty(rq))
- engine_skip_context(rq);
+ skip_context(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
@@ -231,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -239,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -265,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -276,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -305,9 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- drm_dbg(&gt->i915->drm,
- "Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ GT_TRACE(gt,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -407,8 +405,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- drm_dbg(&engine->i915->drm,
- "Wait for SFC forced lock ack failed\n");
+ ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -499,6 +496,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
u32 request, mask, ack;
int ret;
+ if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+ return -ETIMEDOUT;
+
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
@@ -754,8 +754,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
+ local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
intel_ggtt_restore_fences(gt->ggtt);
@@ -833,9 +835,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
+ local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
+ local_bh_enable();
reset_finish(gt, awake);
@@ -1105,25 +1109,12 @@ error:
goto finish;
}
-static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
-/**
- * intel_engine_reset - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no drm_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
@@ -1148,8 +1139,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "", engine->name, ret);
+ ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
goto out;
}
@@ -1174,6 +1164,30 @@ out:
return ret;
}
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ int err;
+
+ local_bh_disable();
+ err = __intel_engine_reset_bh(engine, msg);
+ local_bh_enable();
+
+ return err;
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1186,7 +1200,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
+ GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
@@ -1260,18 +1274,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
* single reset fails.
*/
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
- if (intel_engine_reset(engine, msg) == 0)
+ if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
+ local_bh_enable();
}
if (!engine_mask)
@@ -1380,6 +1396,17 @@ void intel_gt_init_reset(struct intel_gt *gt)
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ /*
+ * While undesirable to wait inside the shrinker, complain anyway.
+ *
+ * If we have to wait during shrinking, we guarantee forward progress
+ * by forcing the reset. Therefore during the reset we must not
+ * re-enter the shrinker. By declaring that we take the reset mutex
+ * within the shrinker, we forbid ourselves from performing any
+ * fs-reclaim or taking related locks during reset.
+ */
+ i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, &gt->reset.flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index a0eec7c11c0c..7dbf5cc8a333 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+ const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 4034a4bac7f0..78d1360caa0f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -5,9 +5,11 @@
*/
#include "gem/i915_gem_object.h"
+
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
#include "intel_timeline.h"
@@ -40,7 +42,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- if (vma->obj->stolen)
+ if (i915_gem_object_is_stolen(vma->obj))
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a41b43f445b8..4984ff565424 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
+#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -121,31 +122,27 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- intel_uncore_write(engine->uncore, hwsp, offset);
- intel_uncore_posting_read(engine->uncore, hwsp);
+ intel_uncore_write_fw(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read_fw(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ if (!IS_GEN_RANGE(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- drm_WARN_ON(&dev_priv->drm,
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- ENGINE_WRITE(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(engine->uncore,
- RING_INSTPM(engine->mmio_base),
- INSTPM_SYNC_FLUSH, 0,
- 1000))
- drm_err(&dev_priv->drm,
- "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE_FW(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
+ 2000, 0, NULL))
+ ENGINE_TRACE(engine,
+ "wait for SyncFlush to complete for TLB invalidation timed out\n");
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -156,44 +153,6 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
flush_cs_tlb(engine);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (INTEL_GEN(dev_priv) > 2) {
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(engine->uncore,
- RING_MI_MODE(engine->mmio_base),
- MODE_IDLE,
- MODE_IDLE,
- 1000)) {
- drm_err(&dev_priv->drm,
- "%s : timed out trying to stop ring\n",
- engine->name);
-
- /*
- * Sometimes we observe that the idle flag is not
- * set even though the ring is empty. So double
- * check before giving up.
- */
- if (ENGINE_READ(engine, RING_HEAD) !=
- ENGINE_READ(engine, RING_TAIL))
- return false;
- }
- }
-
- ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
-
- ENGINE_WRITE(engine, RING_HEAD, 0);
- ENGINE_WRITE(engine, RING_TAIL, 0);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE(engine, RING_CTL, 0);
-
- return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
@@ -211,9 +170,16 @@ static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
- if (vm) {
- ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
- ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
+ if (!vm)
+ return;
+
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+ if (INTEL_GEN(engine->i915) >= 7) {
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -221,38 +187,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
- int ret = 0;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (!stop_ring(engine)) {
- /* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&dev_priv->drm, "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- drm_err(&dev_priv->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
- ret = -EIO;
- goto out;
- }
- }
-
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
@@ -269,7 +207,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
* also enforces ordering), otherwise the hw might lose the new ring
* register values.
*/
- ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -279,53 +217,98 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
/* First wake the ring up to an empty/idle ring */
- ENGINE_WRITE(engine, RING_HEAD, ring->head);
- ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
- ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE_FW(engine, RING_CTL,
+ RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(engine->uncore,
- RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
- drm_err(&dev_priv->drm, "%s initialization failed "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- ret = -EIO;
- goto out;
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 5000, 0, NULL)) {
+ drm_err(&dev_priv->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
if (INTEL_GEN(dev_priv) > 2)
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
-out:
- intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+ return 0;
+}
- return ret;
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
}
-static void reset_prepare(struct intel_engine_cs *engine)
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+static void reset_prepare(struct intel_engine_cs *engine)
+{
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
@@ -337,30 +320,35 @@ static void reset_prepare(struct intel_engine_cs *engine)
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ * WaClearRingBufHeadRegAtInit:ctg,elk
*
* FIXME: Wa for more modern gens needs to be validated
*/
ENGINE_TRACE(engine, "\n");
+ intel_engine_stop_cs(engine);
- if (intel_engine_stop_cs(engine))
- ENGINE_TRACE(engine, "timed out on STOP_RING\n");
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+ if (!stop_ring(engine)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ drm_dbg(&engine->i915->drm,
+ "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- ENGINE_TRACE(engine, "ring head [%x] not parked\n",
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -371,12 +359,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
rq = NULL;
spin_lock_irqsave(&engine->active.lock, flags);
+ rcu_read_lock();
list_for_each_entry(pos, &engine->active.requests, sched.link) {
- if (!i915_request_completed(pos)) {
+ if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
}
}
+ rcu_read_unlock();
/*
* The guilty request will get skipped on a hung engine.
@@ -440,10 +430,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->active.requests, sched.link) {
- i915_request_set_error_once(request, -EIO);
- i915_request_mark_complete(request);
- }
+ list_for_each_entry(request, &engine->active.requests, sched.link)
+ i915_request_mark_eio(request);
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -602,6 +590,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused)
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, ce->ring->emit);
+ clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
static const struct intel_context_ops ring_context_ops = {
@@ -653,9 +642,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq,
- struct intel_context *ce,
- u32 flags)
+static int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
@@ -886,7 +875,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
- if (engine->wa_ctx.vma->private != ce) {
+ if (engine->wa_ctx.vma->private != ce &&
+ i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@@ -1069,6 +1059,8 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
engine->resume = xcs_resume;
+ engine->sanitize = xcs_sanitize;
+
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
@@ -1290,7 +1282,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b629eeb14002..ee5835c29c03 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
@@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps,
return val;
}
-static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
+ return -EBUSY; /* still busy with another command */
}
/* Invert the frequency bin into an ips delay */
@@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- return true;
+ return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ spin_lock_irq(&mchdev_lock);
+ err = __gen5_rps_set(rps, val);
+ spin_unlock_irq(&mchdev_lock);
+
+ return err;
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
"stuck trying to change perf mode\n");
mdelay(1);
- gen5_rps_set(rps, rps->cur_freq);
+ __gen5_rps_set(rps, rps->cur_freq);
rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
@@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps)
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
/* Go back to the starting frequency */
- gen5_rps_set(rps, rps->idle_freq);
+ __gen5_rps_set(rps, rps->idle_freq);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
@@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
if (val == rps->last_freq)
return 0;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
- else
+ else if (INTEL_GEN(i915) >= 6)
err = gen6_rps_set(rps, val);
+ else
+ err = gen5_rps_set(rps, val);
if (err)
return err;
- if (update)
+ if (update && INTEL_GEN(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
@@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps)
{
int adj;
+ GEM_BUG_ON(atomic_read(&rps->num_waiters));
+
if (!intel_rps_clear_active(rps))
return;
@@ -907,28 +919,27 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
- unsigned long flags;
-
- if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
+ if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
/* Serializes with i915_request_retire() */
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+ if (atomic_fetch_inc(&rps->num_waiters))
+ return;
+
+ if (!intel_rps_is_active(rps))
+ return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
- if (!atomic_fetch_inc(&rps->num_waiters) &&
- READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(&rps->boosts);
+ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
- spin_unlock_irqrestore(&rq->lock, flags);
}
int intel_rps_set(struct intel_rps *rps, u8 val)
@@ -1798,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps)
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
rps->cur_freq = new_freq;
spin_unlock(&mchdev_lock);
@@ -2109,7 +2120,7 @@ bool i915_gpu_turbo_disable(void)
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 38083f0402d9..029fe13cf303 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -93,7 +93,7 @@ struct intel_rps {
} power;
atomic_t num_waiters;
- atomic_t boosts;
+ unsigned int boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7ea94d201fe6..037b0e3ccbed 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu);
+ /* Must wait until after all *rq->hwsp are complete before removing */
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
i915_active_fini(&cl->active);
kfree(cl);
}
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- i915_vma_put(cl->hwsp->vma);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
call_rcu(&cl->rcu, __rcu_cacheline_free);
}
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return ERR_CAST(vaddr);
}
- i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
@@ -321,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt,
return timeline;
}
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ struct i915_vma *hwsp = engine->status_page.vma;
+ struct intel_timeline *tl;
+
+ tl = __intel_timeline_create(engine->gt, hwsp, offset);
+ if (IS_ERR(tl))
+ return tl;
+
+ /* Borrow a nearby lock; we only create these timelines during init */
+ mutex_lock(&hwsp->vm->mutex);
+ list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ return tl;
+}
+
void __intel_timeline_pin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
@@ -565,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_completed(from)) /* confirm cacheline is valid */
+ if (i915_request_signaled(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */
- if (unlikely(i915_request_completed(from)))
+ if (unlikely(__i915_request_is_complete(from)))
goto release;
rcu_read_unlock();
@@ -617,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent))
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ unsigned long count, ready, inflight;
+ struct i915_request *rq, *rn;
+ struct dma_fence *fence;
+
+ if (!mutex_trylock(&tl->mutex)) {
+ drm_printf(m, "Timeline %llx: busy; skipping\n",
+ tl->fence_context);
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ count = 0;
+ ready = 0;
+ inflight = 0;
+ list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ count++;
+ if (i915_request_is_ready(rq))
+ ready++;
+ if (i915_request_is_active(rq))
+ inflight++;
+ }
+
+ drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+ drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+ count, ready, inflight);
+ drm_printf(m, ", seqno: { current: %d, last: %d }",
+ *tl->hwsp_seqno, tl->seqno);
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ drm_printf(m, ", engine: %s",
+ to_request(fence)->engine->name);
+ dma_fence_put(fence);
+ }
+ drm_printf(m, " }\n");
+
+ if (show_request) {
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ show_request(m, rq, "", 2);
+ }
+
+ mutex_unlock(&tl->mutex);
+ spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 9882cd911d8e..dcdee692a80e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,6 +31,8 @@
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
+struct drm_printer;
+
struct intel_timeline *
__intel_timeline_create(struct intel_gt *gt,
struct i915_vma *global_hwsp,
@@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt)
return __intel_timeline_create(gt, NULL, 0);
}
-static inline struct intel_timeline *
+struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
- unsigned int offset)
-{
- return __intel_timeline_create(engine->gt,
- engine->status_page.vma,
- offset);
-}
+ unsigned int offset);
static inline struct intel_timeline *
intel_timeline_get(struct intel_timeline *timeline)
@@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_gt_init_timelines(struct intel_gt *gt);
void intel_gt_fini_timelines(struct intel_gt *gt);
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 4474f487f589..e360f50706bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -84,6 +84,8 @@ struct intel_timeline {
struct list_head link;
struct intel_gt *gt;
+ struct list_head engine_link;
+
struct kref kref;
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index adc9a8ea410a..ec366cf9ef56 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -194,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear);
}
@@ -202,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, set);
+ wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, set, set);
+ wa_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
- wa_write_masked_or(wal, reg, clr, 0);
+ wa_write_clr_set(wal, reg, clr, 0);
}
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
@@ -229,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
-#define WA_SET_BIT_MASKED(addr, mask) \
- wa_masked_en(wal, (addr), (mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_masked_dis(wal, (addr), (mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 mask, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
@@ -268,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -280,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -293,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
@@ -306,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ wa_masked_en(wal, HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -332,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+ wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -349,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN9_PBE_COMPRESSED_HASH_SELECTION);
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
- GEN9_ENABLE_GPGPU_PREEMPTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(CACHE_MODE_1,
- GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
+ wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
@@ -396,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
@@ -422,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
- WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+ wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
@@ -465,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
@@ -487,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -504,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -518,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -528,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* WaForceContextSaveRestoreNonCoherent:cnl */
- WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+ wa_masked_en(wal, CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaPushConstantDereferenceHoldDisable:cnl */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix:cnl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaDisable3DMidCmdPreemption:cnl */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:cnl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaDisableEarlyEOT:cnl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -580,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ PUSH_CONSTANT_DEREF_DISABLE);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@@ -590,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
- WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
/* WaEnableFloatBlendOptimization:icl */
- wa_write_masked_or(wal,
- GEN10_CACHE_MODE_SS,
- 0, /* write-only, so skip validation */
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+ wa_write_clr_set(wal,
+ GEN10_CACHE_MODE_SS,
+ 0, /* write-only, so skip validation */
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
/* WaDisableGPGPUMidThreadPreemption:icl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
- WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
- GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
- wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
- 0, /* write-only register; skip validation */
- 0xFFFFFFFF);
+ wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
@@ -643,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_14010443199:rkl
* Wa_14010698770:rkl
*/
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
@@ -680,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
- WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+ wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+
+ /*
+ * Wa_16011163337
+ *
+ * Like in tgl_ctx_workarounds_init(), read verification is ignored due
+ * to Wa_1608008084.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -804,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
- wa_masked_en(wal,
- _3D_CHICKEN,
- _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
-
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal,
- GEN6_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
-
- wa_masked_en(wal,
- _3D_CHICKEN3,
- /* WaStripsFansDisableFastClipPerformanceFix:snb */
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
- /*
- * Bspec says:
- * "This bit must be set if 3DSTATE_CLIP clip mode is set
- * to normal and 3DSTATE_SF number of SF output attributes
- * is more than 16."
- */
- _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
}
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:ivb */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(i915))
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:ivb */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
@@ -866,91 +840,15 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
- /*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- if (0) { /* causes HiZ corruption on ivb:gt1 */
- /* enable HiZ Raw Stall Optimization */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
- }
-
- /* WaDisable4x2SubspanOptimization:ivb */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
}
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:vlv */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaPsdDispatchEnable:vlv */
- /* WaDisablePSDDualDispatchEnable:vlv */
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_MAX_PS_THREAD_DEP |
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:vlv */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -970,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
-
- wa_masked_dis(wal,
- CACHE_MODE_0_GEN7,
- /* WaDisable_RenderCache_OperationalFlush:hsw */
- RC_OP_FLUSH_ENABLE |
- /* enable HiZ Raw Stall Optimization */
- HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /* WaSampleCChickenBitEnable:hsw */
- wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
}
static void
@@ -1164,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
- wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -1189,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- wa_write_masked_or(wal,
- GEN11_GACB_PERF_CTRL,
- GEN11_HASH_CTRL_MASK,
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+ wa_write_clr_set(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
@@ -1260,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1408615072:tgl[a0] */
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1358,9 +1236,9 @@ static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
+ DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read, wa->set);
+ cur, cur & wa->read, wa->set & wa->read);
return false;
}
@@ -1425,7 +1303,8 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
}
-static inline bool is_nonpriv_flags_valid(u32 flags)
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
@@ -1752,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
-
- /* Wa_1408615072:tgl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
}
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
@@ -1770,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1606700617:tgl,dg1
+ * Wa_22010271021:tgl,rkl,dg1
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
}
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
@@ -1798,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
- /*
- * Wa_1606700617:tgl
- * Wa_22010271021:tgl,rkl
- */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
- }
-
- if (IS_GEN(i915, 12)) {
- /* Wa_1406941453:gen12 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
if (IS_GEN(i915, 11)) {
@@ -1838,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
- wa_write_masked_or(wal,
- GEN8_GARBCNTL,
- GEN11_HASH_CTRL_EXCL_MASK,
- GEN11_HASH_CTRL_EXCL_BIT0);
- wa_write_masked_or(wal,
- GEN11_GLBLINVL,
- GEN11_BANK_HASH_ADDR_EXCL_MASK,
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
@@ -1874,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
- wa_write_masked_or(wal,
- GEN11_SCRATCH2,
- GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
- 0);
+ wa_write_clr_set(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
@@ -1951,24 +1824,132 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
- wa_write_masked_or(wal,
- GEN8_L3SQCREG1,
- L3_PRIO_CREDITS_MASK,
- L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ wa_write_clr_set(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /* Disable atomics in L3 to prevent unrecoverable hangs */
+ wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
+ GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN8_L3SQCREG4,
+ GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN9_SCRATCH1,
+ EVICTION_PERF_FIX_ENABLE, 0);
+ }
+
+ if (IS_HASWELL(i915)) {
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal,
+ HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ }
+
+ if (IS_VALLEYVIEW(i915)) {
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
- if (IS_GEN(i915, 7))
+ if (IS_IVYBRIDGE(i915)) {
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_GEN(i915, 7)) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
GFX_MODE_GEN7,
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+ }
+
if (IS_GEN_RANGE(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
@@ -1991,6 +1972,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal,
+ GEN6_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -2067,39 +2081,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
-static struct i915_vma *
-create_scratch(struct i915_address_space *vm, int count)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int size;
- int err;
-
- size = round_up(count * sizeof(u32), PAGE_SIZE);
- obj = i915_gem_object_create_internal(vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0,
- i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
struct mcr_range {
u32 start;
u32 end;
@@ -2202,7 +2183,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
+ vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+ wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2f830017c51d..4b4f03b70df7 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(stalled);
}
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
+ i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- mark_eio(rq);
+ i915_request_mark_eio(rq);
__i915_request_submit(rq);
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 1f4020e906a8..db738d400168 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 729c3c7b11e2..439c8984f5fa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -6,6 +6,7 @@
#include <linux/sort.h>
+#include "intel_gpu_commands.h"
#include "intel_gt_pm.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b88aa35ad75b..223ab88f7e57 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b)
static int __live_heartbeat_fast(struct intel_engine_cs *engine)
{
+ const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
struct intel_context *ce;
struct i915_request *rq;
ktime_t t0, t1;
@@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
times[0],
times[ARRAY_SIZE(times) - 1]);
- /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
- if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ /*
+ * Ideally, the upper bound on min work delay would be something like
+ * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+ * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+ * and may be stuck behind some slow work for many millisecond. Such
+ * as our very own display workers.
+ */
+ if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
- jiffies_to_usecs(6));
+ error_threshold);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8a..c3d965279fc3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,215 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+ sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+ return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ op;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+ WRITE_ONCE(*x, value);
+ wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+ u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+ u32 offset = i915_ggtt_offset(engine->status_page.vma);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 28);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* Signal & wait for start */
+ cs = emit_store(cs, offset + 4008, 1);
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+ /* Busy wait */
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+ intel_ring_advance(rq, cs);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ intel_engine_flush_submission(engine);
+
+ /* Wait for the request to start executing, that then waits for us */
+ while (READ_ONCE(sema[2]) == 0)
+ cpu_relax();
+
+ /* Run the request for a 100us, sampling timestamps before/after */
+ preempt_disable();
+ *dt = local_clock();
+ write_semaphore(&sema[2], 0);
+ udelay(100);
+ *dt = local_clock() - *dt;
+ write_semaphore(&sema[2], 1);
+ preempt_enable();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ return -ETIME;
+ }
+ i915_request_put(rq);
+
+ pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+ engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+ *d_ctx = sema[3] - sema[1];
+ *d_ring = sema[4] - sema[0];
+ return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+ u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+ struct intel_context *ce;
+ int i, err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < COUNT; i++) {
+ err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ dt = trifilter(st);
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+ engine->name, dt,
+ intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+ intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+ d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+ if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+ pr_err("%s Mismatch between ring timestamp and walltime!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ d_ctx *= engine->gt->clock_frequency;
+ if (IS_ICELAKE(engine->i915))
+ d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+ else
+ d_ring *= engine->gt->clock_frequency;
+
+ if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+ pr_err("%s Mismatch between ring and context timestamps!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+ * the same CS clock.
+ */
+
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ int err;
+
+ st_engine_heartbeat_disable(engine);
+ err = __live_engine_timestamps(engine);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -177,6 +379,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_timestamps),
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 000000000000..264b5ebdb021
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4741 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_ctx;
+ }
+
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto out_ctx;
+ }
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ int err = 0;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[A2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ st_engine_heartbeat_enable(engine);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = igt_request_alloc(ctx_lo, engine);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
+static int live_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ engine->schedule(rq, &attr);
+
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+}
+
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+ c->ctx = kernel_context(gt->i915);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, gt))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[1]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[2]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq;
+ int err;
+
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+
+ err = intel_engine_pulse(engine);
+ if (err)
+ goto out;
+
+ force_reset_timeout(engine);
+
+ /* force preempt reset [failure] */
+ while (!engine->execlists.pending[0])
+ intel_engine_flush_submission(engine);
+ del_timer_sync(&engine->execlists.preempt);
+ intel_engine_flush_submission(engine);
+
+ cancel_reset_timeout(engine);
+
+ /* after failure, require heartbeats to reset device */
+ intel_engine_set_heartbeat(engine, 1);
+ err = wait_for_reset(engine, rq, HZ / 2);
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_fail(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+ };
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0; /* presume black blox */
+
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ st_engine_heartbeat_disable(engine);
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ st_engine_heartbeat_enable(engine);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
+
+ if (preempt_client_init(gt, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = igt_request_alloc(hi.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
+
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_obj;
+ }
+
+ rq->batch = i915_vma_get(vma);
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->mock.link.next = &(*prev)->mock.link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n = list_next_entry(rq, mock.link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end_test;
+ }
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+ return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct intel_gt *gt;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = igt_request_alloc(ctx, smoke->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ if (err)
+ return err;
+
+ count++;
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ for_each_engine(engine, smoke->gt, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ count = 0;
+ for_each_engine(engine, smoke->gt, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->gt, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .gt = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 256,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ struct igt_live_test t;
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
+ i915_gem_object_unpin_map(smoke.batch);
+
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+ err = -EIO;
+ goto err_batch;
+ }
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_free:
+ kfree(smoke.contexts);
+
+ return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16] = {};
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+ for (n = 0; n < nctx; n++) {
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ }
+ return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ return err;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(gt, siblings, nsibling,
+ n, 0);
+ if (err)
+ return err;
+ }
+
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
+ if (nsibling < 2)
+ continue;
+
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ unsigned long n;
+ int err;
+
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, gt, id) {
+ struct i915_sw_fence fence = {};
+ struct intel_context *ce;
+
+ if (master->class == class)
+ continue;
+
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
+
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1], 0,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(gt,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_disable(siblings[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_enable(siblings[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
+ SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
+ SUBTEST(live_busywait_preempt),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
+ SUBTEST(live_preempt_gang),
+ SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
+ SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
+ SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
+ };
+
+ if (!HAS_EXECLISTS(i915))
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 6180a47c1b51..5d911f724ebe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg)
enum intel_engine_id id;
int err = 0;
- if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ if (!gt->clock_frequency) { /* unknown */
pr_info("CS_TIMESTAMP frequency unknown\n");
return 0;
}
@@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg)
measure_clocks(engine, &cycles, &dt);
- time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
- expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+ time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+ expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
engine->name, cycles, time, dt, expected,
- RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+ engine->gt->clock_frequency / 1000);
if (9 * time < 8 * dt || 8 * time > 9 * dt) {
pr_err("%s: CS ticks did not match walltime!\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index fb5ebf930ab2..463bb6a700c8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg)
}
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg)
return 0;
}
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can recover from engine-reset failues */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ st_engine_heartbeat_disable(engine);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err == 0) /* timeouts only generated on gen8+ */
+ goto skip;
+
+ count = 0;
+ do {
+ struct i915_request *last = NULL;
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < count % 15; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ if (last)
+ i915_request_put(last);
+
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (count & 1) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ GEM_TRACE_DUMP();
+ i915_request_put(last);
+ break;
+ }
+ } else {
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err != -ETIMEDOUT) {
+ pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+ engine->name, err);
+ i915_request_put(last);
+ break;
+ }
+ }
+
+ err = 0;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ / 2) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EIO;
+ }
+ i915_request_put(last);
+ }
+ count++;
+ } while (err == 0 && time_before(jiffies, end_time));
+out:
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
@@ -560,6 +704,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long count;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,6 +722,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ count = 0;
do {
if (active) {
struct i915_request *rq;
@@ -608,7 +754,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -625,9 +772,13 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = -EINVAL;
break;
}
+
+ count++;
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
+ pr_info("%s: Completed %lu %s resets\n",
+ engine->name, count, active ? "active" : "idle");
if (err)
break;
@@ -1478,7 +1629,8 @@ static int igt_reset_queue(void *arg)
prev = rq;
count++;
} while (time_before(jiffies, end_time));
- pr_info("%s: Completed %d resets\n", engine->name, count);
+ pr_info("%s: Completed %d queued resets\n",
+ engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
@@ -1575,13 +1727,21 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable(t);
+ if (t->func)
+ tasklet_disable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
- tasklet_enable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
@@ -1687,6 +1847,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_fail_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c01d0e0..920979a89413 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1,22 +1,22 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_reset.h"
-#include "gt/selftest_engine_heartbeat.h"
-
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
@@ -27,29 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
+ return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -70,6 +48,9 @@ static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
@@ -89,4623 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
} while (1);
}
-static int wait_for_reset(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
-{
- timeout += jiffies;
-
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_completed(rq))
- break;
-
- if (READ_ONCE(rq->fence.error))
- break;
- } while (time_before(jiffies, timeout));
-
- flush_scheduled_work();
-
- if (rq->fence.error != -EIO) {
- pr_err("%s: hanging request %llx:%lld not reset\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -EINVAL;
- }
-
- /* Give the request a jiffie to complete after flushing the worker */
- if (i915_request_wait(rq, 0,
- max(0l, (long)(timeout - jiffies)) + 1) < 0) {
- pr_err("%s: hanging request %llx:%lld did not complete\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int live_sanitycheck(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_ctx;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
- GEM_TRACE("spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_ctx;
- }
-
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto out_ctx;
- }
-
-out_ctx:
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_restore(struct intel_gt *gt, int prio)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = -ENOMEM;
-
- /*
- * Check that we can correctly context switch between 2 instances
- * on the same engine from the same parent context.
- */
-
- if (igt_spinner_init(&spin, gt))
- return err;
-
- err = 0;
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq[2];
- struct igt_live_test t;
- int n;
-
- if (prio && !intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- /*
- * Setup the pair of contexts such that if we
- * lite-restore using the RING_TAIL from ce[1] it
- * will execute garbage from ce[0]->ring.
- */
- memset(tmp->ring->vaddr,
- POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
- tmp->ring->vma->size);
-
- ce[n] = tmp;
- }
- GEM_BUG_ON(!ce[1]->ring->size);
- intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
-
- rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto err_ce;
- }
-
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
-
- if (!igt_wait_for_spinner(&spin, rq[0])) {
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- rq[1] = i915_request_create(ce[1]);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- if (!prio) {
- /*
- * Ensure we do the switch to ce[1] on completion.
- *
- * rq[0] is already submitted, so this should reduce
- * to a no-op (a wait on a request on the same engine
- * uses the submit fence, not the completion fence),
- * but it will install a dependency on rq[1] for rq[0]
- * that will prevent the pair being reordered by
- * timeslicing.
- */
- i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- }
-
- i915_request_get(rq[1]);
- i915_request_add(rq[1]);
- GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
- i915_request_put(rq[0]);
-
- if (prio) {
- struct i915_sched_attr attr = {
- .priority = prio,
- };
-
- /* Alternatively preempt the spinner with ce[1] */
- engine->schedule(rq[1], &attr);
- }
-
- /* And switch back to ce[0] for good measure */
- rq[0] = i915_request_create(ce[0]);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- i915_request_put(rq[1]);
- goto err_ce;
- }
-
- i915_request_await_dma_fence(rq[0], &rq[1]->fence);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_switch(void *arg)
-{
- return live_unlite_restore(arg, 0);
-}
-
-static int live_unlite_preempt(void *arg)
-{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
-}
-
-static int live_unlite_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Setup a preemption event that will cause almost the entire ring
- * to be unwound, potentially fooling our intel_ring_direction()
- * into emitting a forward lite-restore instead of the rollback.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- /* Create max prio spinner, followed by N low prio nops */
- rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (intel_ring_direction(ce[0]->ring,
- rq->wa_tail,
- ce[0]->ring->tail) <= 0) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
- rq->tail,
- ce[0]->ring->tail) <= 0);
- i915_request_put(rq);
-
- /* Create a second ring to preempt the first ring after rq[0] */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submitted\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_pin_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * We have to be careful not to trust intel_ring too much, for example
- * ring->head is updated upon retire which is out of sync with pinning
- * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
- * or else we risk writing an older, stale value.
- *
- * To simulate this, let's apply a bit of deliberate sabotague.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- struct intel_ring *ring;
- struct igt_live_test t;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- err = intel_context_pin(ce);
- if (err) {
- intel_context_put(ce);
- break;
- }
-
- /* Keep the context awake while we play games */
- err = i915_active_acquire(&ce->active);
- if (err) {
- intel_context_unpin(ce);
- intel_context_put(ce);
- break;
- }
- ring = ce->ring;
-
- /* Poison the ring, and offset the next request from HEAD */
- memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
- ring->emit = ring->size / 2;
- ring->tail = ring->emit;
- GEM_BUG_ON(ring->head);
-
- intel_context_unpin(ce);
-
- /* Submit a simple nop request */
- GEM_BUG_ON(intel_context_is_pinned(ce));
- rq = intel_context_create_request(ce);
- i915_active_release(&ce->active); /* e.g. async retire */
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
- GEM_BUG_ON(!rq->head);
- i915_request_add(rq);
-
- /* Expect not to hang! */
- if (igt_live_test_end(&t)) {
- err = -EIO;
- break;
- }
- }
-
- return err;
-}
-
-static int live_hold_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out;
- }
-
- /* We have our request executing, now remove it and reset */
-
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- i915_request_get(rq);
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- i915_request_put(rq);
- err = -EIO;
- goto out;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
- i915_request_put(rq);
-
-out:
- st_engine_heartbeat_enable(engine);
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static const char *error_repr(int err)
-{
- return err ? "bad" : "good";
-}
-
-static int live_error_interrupt(void *arg)
-{
- static const struct error_phase {
- enum { GOOD = 0, BAD = -EIO } error[2];
- } phases[] = {
- { { BAD, GOOD } },
- { { BAD, BAD } },
- { { BAD, GOOD } },
- { { GOOD, GOOD } }, /* sentinel */
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
- * of invalid commands in user batches that will cause a GPU hang.
- * This is a faster mechanism than using hangcheck/heartbeats, but
- * only detects problems the HW knows about -- it will not warn when
- * we kill the HW!
- *
- * To verify our detection and reset, we throw some invalid commands
- * at the HW and wait for the interrupt.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for_each_engine(engine, gt, id) {
- const struct error_phase *p;
- int err = 0;
-
- st_engine_heartbeat_disable(engine);
-
- for (p = phases; p->error[0] != GOOD; p++) {
- struct i915_request *client[ARRAY_SIZE(phases->error)];
- u32 *cs;
- int i;
-
- memset(client, 0, sizeof(*client));
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (rq->engine->emit_init_breadcrumb) {
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out;
- }
-
- if (p->error[i]) {
- *cs++ = 0xdeadbeef;
- *cs++ = 0xdeadbeef;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- client[i] = i915_request_get(rq);
- i915_request_add(rq);
- }
-
- err = wait_for_submit(engine, client[0], HZ / 2);
- if (err) {
- pr_err("%s: first request did not start within time!\n",
- engine->name);
- err = -ETIME;
- goto out;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (i915_request_wait(client[i], 0, HZ / 5) < 0)
- pr_debug("%s: %s request incomplete!\n",
- engine->name,
- error_repr(p->error[i]));
-
- if (!i915_request_started(client[i])) {
- pr_err("%s: %s request not started!\n",
- engine->name,
- error_repr(p->error[i]));
- err = -ETIME;
- goto out;
- }
-
- /* Kick the tasklet to process the error */
- intel_engine_flush_submission(engine);
- if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request (%s) with wrong error code: %d\n",
- engine->name,
- error_repr(p->error[i]),
- i915_request_completed(client[i]) ? "completed" : "running",
- client[i]->fence.error);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- for (i = 0; i < ARRAY_SIZE(client); i++)
- if (client[i])
- i915_request_put(client[i]);
- if (err) {
- pr_err("%s: failed at phase[%zd] { %d, %d }\n",
- engine->name, p - phases,
- p->error[0], p->error[1]);
- break;
- }
- }
-
- st_engine_heartbeat_enable(engine);
- if (err) {
- intel_gt_set_wedged(gt);
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 10);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma) + 4 * idx;
- *cs++ = 0;
-
- if (idx > 0) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static struct i915_request *
-semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
-{
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto out_ce;
-
- err = 0;
- if (rq->engine->emit_init_breadcrumb)
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err == 0)
- err = emit_semaphore_chain(rq, vma, idx);
- if (err == 0)
- i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_ce:
- intel_context_put(ce);
- return rq;
-}
-
-static int
-release_queue(struct intel_engine_cs *engine,
- struct i915_vma *vma,
- int idx, int prio)
-{
- struct i915_sched_attr attr = {
- .priority = prio,
- };
- struct i915_request *rq;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- local_bh_disable();
- engine->schedule(rq, &attr);
- local_bh_enable(); /* kick tasklet */
-
- i915_request_put(rq);
-
- return 0;
-}
-
-static int
-slice_semaphore_queue(struct intel_engine_cs *outer,
- struct i915_vma *vma,
- int count)
-{
- struct intel_engine_cs *engine;
- struct i915_request *head;
- enum intel_engine_id id;
- int err, i, n = 0;
-
- head = semaphore_queue(outer, vma, n++);
- if (IS_ERR(head))
- return PTR_ERR(head);
-
- for_each_engine(engine, outer->gt, id) {
- for (i = 0; i < count; i++) {
- struct i915_request *rq;
-
- rq = semaphore_queue(engine, vma, n++);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_put(rq);
- }
- }
-
- err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
- if (err)
- goto out;
-
- if (i915_request_wait(head, 0,
- 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
- pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
- count, n);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(outer->gt);
- err = -EIO;
- }
-
-out:
- i915_request_put(head);
- return err;
-}
-
-static int live_timeslice_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * If a request takes too long, we would like to give other users
- * a fair go on the GPU. In particular, users may create batches
- * that wait upon external input, where that input may even be
- * supplied by another GPU job. To avoid blocking forever, we
- * need to preempt the current task and replace it with another
- * ready task.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
-
- memset(vaddr, 0, PAGE_SIZE);
-
- st_engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, 5);
- st_engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static struct i915_request *
-create_rewinder(struct intel_context *ce,
- struct i915_request *wait,
- void *slot, int idx)
-{
- const u32 offset =
- i915_ggtt_offset(ce->engine->status_page.vma) +
- offset_in_page(slot);
- struct i915_request *rq;
- u32 *cs;
- int err;
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- return rq;
-
- if (wait) {
- err = i915_request_await_dma_fence(rq, &wait->fence);
- if (err)
- goto err;
- }
-
- cs = intel_ring_begin(rq, 14);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = idx;
- *cs++ = offset;
- *cs++ = 0;
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = offset + idx * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = idx + 1;
-
- intel_ring_advance(rq, cs);
-
- rq->sched.attr.priority = I915_PRIORITY_MASK;
- err = 0;
-err:
- i915_request_get(rq);
- i915_request_add(rq);
- if (err) {
- i915_request_put(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static int live_timeslice_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * The usual presumption on timeslice expiration is that we replace
- * the active context with another. However, given a chain of
- * dependencies we may end up with replacing the context with itself,
- * but only a few of those requests, forcing us to rewind the
- * RING_TAIL of the original request.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- for_each_engine(engine, gt, id) {
- enum { A1, A2, B1 };
- enum { X = 1, Z, Y };
- struct i915_request *rq[3] = {};
- struct intel_context *ce;
- unsigned long timeslice;
- int i, err = 0;
- u32 *slot;
-
- if (!intel_engine_has_timeslices(engine))
- continue;
-
- /*
- * A:rq1 -- semaphore wait, timestamp X
- * A:rq2 -- write timestamp Y
- *
- * B:rq1 [await A:rq1] -- write timestamp Z
- *
- * Force timeslice, release semaphore.
- *
- * Expect execution/evaluation order XZY
- */
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- slot = memset32(engine->status_page.addr + 1000, 0, 4);
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[A1] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[A1])) {
- intel_context_put(ce);
- goto err;
- }
-
- rq[A2] = create_rewinder(ce, NULL, slot, Y);
- intel_context_put(ce);
- if (IS_ERR(rq[A2]))
- goto err;
-
- err = wait_for_submit(engine, rq[A2], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit first context\n",
- engine->name);
- goto err;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
- intel_context_put(ce);
- if (IS_ERR(rq[2]))
- goto err;
-
- err = wait_for_submit(engine, rq[B1], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit second context\n",
- engine->name);
- goto err;
- }
-
- /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
- if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
- }
- /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
- GEM_BUG_ON(i915_request_is_active(rq[A2]));
-
- /* Release the hounds! */
- slot[0] = 1;
- wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
-
- for (i = 1; i <= 3; i++) {
- unsigned long timeout = jiffies + HZ / 2;
-
- while (!READ_ONCE(slot[i]) &&
- time_before(jiffies, timeout))
- ;
-
- if (!time_before(jiffies, timeout)) {
- pr_err("%s: rq[%d] timed out\n",
- engine->name, i - 1);
- err = -ETIME;
- goto err;
- }
-
- pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
- }
-
- /* XZY: XZ < XY */
- if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
- pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
- engine->name,
- slot[Z] - slot[X],
- slot[Y] - slot[X]);
- err = -EINVAL;
- }
-
-err:
- memset32(&slot[0], -1, 4);
- wmb();
-
- engine->props.timeslice_duration_ms = timeslice;
- st_engine_heartbeat_enable(engine);
- for (i = 0; i < 3; i++)
- i915_request_put(rq[i]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return rq;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- return rq;
-}
-
-static long slice_timeout(struct intel_engine_cs *engine)
-{
- long timeout;
-
- /* Enough time for a timeslice to kick in, and kick out */
- timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
-
- /* Enough time for the nop request to complete */
- timeout += HZ / 5;
-
- return timeout + 1;
-}
-
-static int live_timeslice_queue(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * Make sure that even if ELSP[0] and ELSP[1] are filled with
- * timeslicing between them disabled, we *do* enable timeslicing
- * if the queue demands it. (Normally, we do not submit if
- * ELSP[1] is already occupied, so must rely on timeslicing to
- * eject ELSP[0] in favour of the queue.)
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct i915_request *rq, *nop;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
- memset(vaddr, 0, PAGE_SIZE);
-
- /* ELSP[0]: semaphore wait */
- rq = semaphore_queue(engine, vma, 0);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_heartbeat;
- }
- engine->schedule(rq, &attr);
- err = wait_for_submit(engine, rq, HZ / 2);
- if (err) {
- pr_err("%s: Timed out trying to submit semaphores\n",
- engine->name);
- goto err_rq;
- }
-
- /* ELSP[1]: nop request */
- nop = nop_request(engine);
- if (IS_ERR(nop)) {
- err = PTR_ERR(nop);
- goto err_rq;
- }
- err = wait_for_submit(engine, nop, HZ / 2);
- i915_request_put(nop);
- if (err) {
- pr_err("%s: Timed out trying to submit nop\n",
- engine->name);
- goto err_rq;
- }
-
- GEM_BUG_ON(i915_request_completed(rq));
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Queue: semaphore signal, matching priority as semaphore */
- err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err)
- goto err_rq;
-
- /* Wait until we ack the release_queue and start timeslicing */
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
- } while (READ_ONCE(engine->execlists.pending[0]));
-
- /* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to timeslice into queue\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EIO;
- }
-err_rq:
- i915_request_put(rq);
-err_heartbeat:
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int live_timeslice_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * We should not timeslice into a request that is marked with
- * I915_REQUEST_NOPREEMPT.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- unsigned long timeslice;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- /* Create an unpreemptible spinner */
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
- i915_request_put(rq);
-
- /* Followed by a maximum priority barrier (heartbeat) */
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out_spin;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_spin;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- /*
- * Wait until the barrier is in ELSP, and we know timeslicing
- * will have been activated.
- */
- if (wait_for_submit(engine, rq, HZ / 2)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- /*
- * Since the ELSP[0] request is unpreemptible, it should not
- * allow the maximum priority barrier through. Wait long
- * enough to see if it is timesliced in by mistake.
- */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
- pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
- engine->name);
- err = -EINVAL;
- }
- i915_request_put(rq);
-
-out_spin:
- igt_spinner_end(&spin);
-out_heartbeat:
- xchg(&engine->props.timeslice_duration_ms, timeslice);
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- break;
- }
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_busywait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- enum intel_engine_id id;
- int err = -ENOMEM;
- u32 *map;
-
- /*
- * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
- * preempt the busywaits used to synchronise between rings.
- */
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ctx_lo;
- }
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_map;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_vma;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *lo, *hi;
- struct igt_live_test t;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_vma;
- }
-
- /*
- * We create two requests. The low priority request
- * busywaits on a semaphore (inside the ringbuffer where
- * is should be preemptible) and the high priority requests
- * uses a MI_STORE_DWORD_IMM to update the semaphore value
- * allowing the first request to complete. If preemption
- * fails, we hang instead.
- */
-
- lo = igt_request_alloc(ctx_lo, engine);
- if (IS_ERR(lo)) {
- err = PTR_ERR(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(lo, 8);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 1;
-
- /* XXX Do we need a flush + invalidate here? */
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
-
- intel_ring_advance(lo, cs);
-
- i915_request_get(lo);
- i915_request_add(lo);
-
- if (wait_for(READ_ONCE(*map), 10)) {
- i915_request_put(lo);
- err = -ETIMEDOUT;
- goto err_vma;
- }
-
- /* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, 0, 1) != -ETIME) {
- i915_request_put(lo);
- pr_err("%s: Busywaiting request did not!\n",
- engine->name);
- err = -EIO;
- goto err_vma;
- }
-
- hi = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(hi)) {
- err = PTR_ERR(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(hi, 4);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 0;
-
- intel_ring_advance(hi, cs);
- i915_request_add(hi);
-
- if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to preempt semaphore busywait!\n",
- engine->name);
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_request_put(lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_vma;
- }
- GEM_BUG_ON(READ_ONCE(*map));
- i915_request_put(lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_vma;
- }
- }
-
- err = 0;
-err_vma:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arb)
-{
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = igt_spinner_create_request(spin, ce, arb);
- intel_context_put(ce);
- return rq;
-}
-
-static int live_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
- pr_err("Logical preemption supported, but not exposed\n");
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
-static int live_late_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {};
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
-
- /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- pr_err("First context failed to start\n");
- goto err_wedged;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("Second context overtook first?\n");
- goto err_wedged;
- }
-
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- engine->schedule(rq, &attr);
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("High priority context failed to preempt the low priority context\n");
- GEM_TRACE_DUMP();
- goto err_wedged;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
-}
-
-struct preempt_client {
- struct igt_spinner spin;
- struct i915_gem_context *ctx;
-};
-
-static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
-{
- c->ctx = kernel_context(gt->i915);
- if (!c->ctx)
- return -ENOMEM;
-
- if (igt_spinner_init(&c->spin, gt))
- goto err_ctx;
-
- return 0;
-
-err_ctx:
- kernel_context_close(c->ctx);
- return -ENOMEM;
-}
-
-static void preempt_client_fini(struct preempt_client *c)
-{
- igt_spinner_fini(&c->spin);
- kernel_context_close(c->ctx);
-}
-
-static int live_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that we can disable preemption for an individual request
- * that may be being observed and not want to be interrupted.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- goto err_client_b;
- }
-
- /* Low priority client, but unpreemptable! */
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- goto err_wedged;
- }
-
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- goto err_client_b;
- }
-
- i915_request_add(rq_b);
-
- /* B is much more important than A! (But A is unpreemptable.) */
- GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
-
- /* Wait long enough for preemption and timeslicing */
- if (igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client started too early!\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&b.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d; should have been suppressed!\n",
- engine->execlists.preempt_hang.count);
- err = -EINVAL;
- goto err_wedged;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-struct live_preempt_cancel {
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
-};
-
-static int __cancel_active0(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP0 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_active1(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[2] = {};
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP1 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* no preemption */
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = spinner_create_request(&arg->b.spin,
- arg->b.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[1]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- igt_spinner_end(&arg->a.spin);
- err = wait_for_reset(arg->engine, rq[1], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != 0) {
- pr_err("Normal inflight0 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != -EIO) {
- pr_err("Cancelled inflight1 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_queued(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[3] = {};
- struct igt_live_test t;
- int err;
-
- /* Full ELSP and one in the wings */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- rq[2] = spinner_create_request(&arg->b.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[2])) {
- err = PTR_ERR(rq[2]);
- goto out;
- }
-
- i915_request_get(rq[2]);
- err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
- i915_request_add(rq[2]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[2]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq[2], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != 0) {
- pr_err("Normal inflight1 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[2]->fence.error != -EIO) {
- pr_err("Cancelled queued request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[2]);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_hostile(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- int err;
-
- /* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!intel_has_reset_engine(arg->engine->gt))
- return 0;
-
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine); /* force reset */
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_flush_test(arg->engine->i915))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_cancel(void *arg)
-{
- struct intel_gt *gt = arg;
- struct live_preempt_cancel data;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * To cancel an inflight context, we need to first remove it from the
- * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &data.a))
- return -ENOMEM;
- if (preempt_client_init(gt, &data.b))
- goto err_client_a;
-
- for_each_engine(data.engine, gt, id) {
- if (!intel_engine_has_preemption(data.engine))
- continue;
-
- err = __cancel_active0(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_active1(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_queued(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_hostile(&data);
- if (err)
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&data.b);
-err_client_a:
- preempt_client_fini(&data.a);
- return err;
-
-err_wedged:
- GEM_TRACE_DUMP();
- igt_spinner_end(&data.b.spin);
- igt_spinner_end(&data.a.spin);
- intel_gt_set_wedged(gt);
- goto err_client_b;
-}
-
-static int live_suppress_self_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that if a preemption request does not cause a change in
- * the current execution order, the preempt-to-idle injection is
- * skipped and that we do not accidentally apply it after the CS
- * completion event.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0; /* presume black blox */
-
- if (intel_vgpu_active(gt->i915))
- return 0; /* GVT forces single port & request submission */
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- st_engine_heartbeat_disable(engine);
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- /* Keep postponing the timer to avoid premature slicing */
- mod_timer(&engine->execlists.timer, jiffies + HZ);
- for (depth = 0; depth < 8; depth++) {
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
- i915_request_add(rq_b);
-
- GEM_BUG_ON(i915_request_completed(rq_a));
- engine->schedule(rq_a, &attr);
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- swap(a, b);
- rq_a = rq_b;
- }
- igt_spinner_end(&a.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- goto err_client_b;
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-static int live_chain_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client hi, lo;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Build a chain AB...BA between two contexts (A, B) and request
- * preemption of the last request. It should then complete before
- * the previously submitted spinner in B.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &hi))
- return -ENOMEM;
-
- if (preempt_client_init(gt, &lo))
- goto err_client_hi;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct igt_live_test t;
- struct i915_request *rq;
- int ring_size, count, i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- ring_size = rq->wa_tail - rq->head;
- if (ring_size < 0)
- ring_size += rq->ring->size;
- ring_size = rq->ring->size / ring_size;
- pr_debug("%s(%s): Using maximum of %d requests\n",
- __func__, engine->name, ring_size);
-
- igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- pr_err("Timed out waiting to flush %s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_wedged;
- }
-
- for_each_prime_number_from(count, 1, ring_size) {
- rq = spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&hi.spin, rq))
- goto err_wedged;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
-
- for (i = 0; i < count; i++) {
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- }
-
- rq = igt_request_alloc(hi.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
- engine->schedule(rq, &attr);
-
- igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to preempt over chain of %d\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- igt_spinner_end(&lo.spin);
- i915_request_put(rq);
-
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to flush low priority chain of %d requests\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
- }
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_wedged;
- }
- }
-
- err = 0;
-err_client_lo:
- preempt_client_fini(&lo);
-err_client_hi:
- preempt_client_fini(&hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&hi.spin);
- igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_lo;
-}
-
-static int create_gang(struct intel_engine_cs *engine,
- struct i915_request **prev)
-{
- struct drm_i915_gem_object *obj;
- struct intel_context *ce;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cs;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ce;
- }
-
- vma = i915_vma_instance(obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs))
- goto err_obj;
-
- /* Semaphore target: spin until zero */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
-
- if (*prev) {
- u64 offset = (*prev)->batch->node.start;
-
- /* Terminate the spinner in the next lower priority batch. */
- *cs++ = MI_STORE_DWORD_IMM_GEN4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = 0;
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto err_obj;
-
- rq->batch = i915_vma_get(vma);
- i915_request_get(rq);
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- i915_request_add(rq);
- if (err)
- goto err_rq;
-
- i915_gem_object_put(obj);
- intel_context_put(ce);
-
- rq->mock.link.next = &(*prev)->mock.link;
- *prev = rq;
- return 0;
-
-err_rq:
- i915_vma_put(rq->batch);
- i915_request_put(rq);
-err_obj:
- i915_gem_object_put(obj);
-err_ce:
- intel_context_put(ce);
- return err;
-}
-
-static int __live_preempt_ring(struct intel_engine_cs *engine,
- struct igt_spinner *spin,
- int queue_sz, int ring_sz)
-{
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int err = 0;
- int n;
-
- if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
- return -EIO;
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- tmp->ring = __intel_context_ring_size(ring_sz);
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(spin, rq)) {
- intel_gt_set_wedged(engine->gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, queue_sz, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- i915_request_put(rq);
-
- /* Create a second request to preempt the first ring */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submited\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Check that we rollback large chunks of a ring in order to do a
- * preemption event. Similar to live_unlite_ring, but looking at
- * ring size rather than the impact of intel_ring_direction().
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n <= 3; n++) {
- err = __live_preempt_ring(engine, &spin,
- n * SZ_4K / 4, SZ_4K);
- if (err)
- break;
- }
-
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_preempt_gang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * Build as long a chain of preempters as we can, with each
- * request higher priority than the last. Once we are ready, we release
- * the last batch which then precolates down the chain, each releasing
- * the next oldest in turn. The intent is to simply push as hard as we
- * can with the number of preemptions, trying to exceed narrow HW
- * limits. At a minimum, we insist that we can sort all the user
- * high priority levels into execution order.
- */
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq = NULL;
- struct igt_live_test t;
- IGT_TIMEOUT(end_time);
- int prio = 0;
- int err = 0;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
- return -EIO;
-
- do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
-
- err = create_gang(engine, &rq);
- if (err)
- break;
-
- /* Submit each spinner at increasing priority */
- engine->schedule(rq, &attr);
- } while (prio <= I915_PRIORITY_MAX &&
- !__igt_timeout(end_time, NULL));
- pr_debug("%s: Preempt chain of %d requests\n",
- engine->name, prio);
-
- /*
- * Such that the last spinner is the highest priority and
- * should execute first. When that spinner completes,
- * it will terminate the next lowest spinner until there
- * are no more spinners and the gang is complete.
- */
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
- if (!IS_ERR(cs)) {
- *cs = 0;
- i915_gem_object_unpin_map(rq->batch->obj);
- } else {
- err = PTR_ERR(cs);
- intel_gt_set_wedged(gt);
- }
-
- while (rq) { /* wait for each rq from highest to lowest prio */
- struct i915_request *n = list_next_entry(rq, mock.link);
-
- if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(engine->i915->drm.dev);
-
- pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -ETIME;
- }
-
- i915_vma_put(rq->batch);
- i915_request_put(rq);
- rq = n;
- }
-
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_vma *
-create_gpr_user(struct intel_engine_cs *engine,
- struct i915_vma *result,
- unsigned int offset)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *cs;
- int err;
- int i;
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, result->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- i915_vma_put(vma);
- return ERR_CAST(cs);
- }
-
- /* All GPR are clear for new contexts. We use GPR(0) as a constant */
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, 0);
- *cs++ = 1;
-
- for (i = 1; i < NUM_GPR; i++) {
- u64 addr;
-
- /*
- * Perform: GPR[i]++
- *
- * As we read and write into the context saved GPR[i], if
- * we restart this batch buffer from an earlier point, we
- * will repeat the increment and store a value > 1.
- */
- *cs++ = MI_MATH(4);
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
- *cs++ = MI_MATH_ADD;
- *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
-
- addr = result->node.start + offset + i * sizeof(*cs);
- *cs++ = MI_STORE_REGISTER_MEM_GEN8;
- *cs++ = CS_GPR(engine, 2 * i);
- *cs++ = lower_32_bits(addr);
- *cs++ = upper_32_bits(addr);
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- return vma;
-}
-
-static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, sz);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, 0);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-static struct i915_request *
-create_gpr_client(struct intel_engine_cs *engine,
- struct i915_vma *global,
- unsigned int offset)
-{
- struct i915_vma *batch, *vma;
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- vma = i915_vma_instance(global->obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_ce;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_ce;
-
- batch = create_gpr_user(engine, vma, offset);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_vma;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_batch;
- }
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
-
- i915_vma_lock(batch);
- if (!err)
- err = i915_request_await_object(rq, batch->obj, false);
- if (!err)
- err = i915_vma_move_to_active(batch, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- batch->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(batch);
- i915_vma_unpin(batch);
-
- if (!err)
- i915_request_get(rq);
- i915_request_add(rq);
-
-out_batch:
- i915_vma_put(batch);
-out_vma:
- i915_vma_unpin(vma);
-out_ce:
- intel_context_put(ce);
- return err ? ERR_PTR(err) : rq;
-}
-
-static int preempt_user(struct intel_engine_cs *engine,
- struct i915_vma *global,
- int id)
-{
- struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_MAX
- };
- struct i915_request *rq;
- int err = 0;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(global);
- *cs++ = 0;
- *cs++ = id;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- engine->schedule(rq, &attr);
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0)
- err = -ETIME;
- i915_request_put(rq);
-
- return err;
-}
-
-static int live_preempt_user(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_vma *global;
- enum intel_engine_id id;
- u32 *result;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * In our other tests, we look at preemption in carefully
- * controlled conditions in the ringbuffer. Since most of the
- * time is spent in user batches, most of our preemptions naturally
- * occur there. We want to verify that when we preempt inside a batch
- * we continue on from the current instruction and do not roll back
- * to the start, or another earlier arbitration point.
- *
- * To verify this, we create a batch which is a mixture of
- * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
- * a few preempting contexts thrown into the mix, we look for any
- * repeated instructions (which show up as incorrect values).
- */
-
- global = create_global(gt, 4096);
- if (IS_ERR(global))
- return PTR_ERR(global);
-
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
- if (IS_ERR(result)) {
- i915_vma_unpin_and_release(&global, 0);
- return PTR_ERR(result);
- }
-
- for_each_engine(engine, gt, id) {
- struct i915_request *client[3] = {};
- struct igt_live_test t;
- int i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
- continue; /* we need per-context GPR */
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- memset(result, 0, 4096);
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *rq;
-
- rq = create_gpr_client(engine, global,
- NUM_GPR * i * sizeof(u32));
- if (IS_ERR(rq))
- goto end_test;
-
- client[i] = rq;
- }
-
- /* Continuously preempt the set of 3 running contexts */
- for (i = 1; i <= NUM_GPR; i++) {
- err = preempt_user(engine, global, i);
- if (err)
- goto end_test;
- }
-
- if (READ_ONCE(result[0]) != NUM_GPR) {
- pr_err("%s: Failed to release semaphore\n",
- engine->name);
- err = -EIO;
- goto end_test;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- int gpr;
-
- if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
- err = -ETIME;
- goto end_test;
- }
-
- for (gpr = 1; gpr < NUM_GPR; gpr++) {
- if (result[NUM_GPR * i + gpr] != 1) {
- pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
- engine->name,
- i, gpr, result[NUM_GPR * i + gpr]);
- err = -EINVAL;
- goto end_test;
- }
- }
- }
-
-end_test:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i])
- break;
-
- i915_request_put(client[i]);
- }
-
- /* Flush the semaphores on error */
- smp_store_mb(result[0], -1);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
- return err;
-}
-
-static int live_preempt_timeout(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Check that we force preemption to occur by cancelling the previous
- * context if it refuses to yield the GPU.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- unsigned long saved_timeout;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- /* Flush the previous CS ack before changing timeouts */
- while (READ_ONCE(engine->execlists.pending[0]))
- cpu_relax();
-
- saved_timeout = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- intel_engine_flush_submission(engine);
- engine->props.preempt_timeout_ms = saved_timeout;
-
- if (i915_request_wait(rq, 0, HZ / 10) < 0) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_lo);
- i915_request_put(rq);
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
- return err;
-}
-
-static int random_range(struct rnd_state *rnd, int min, int max)
-{
- return i915_prandom_u32_max_state(max - min, rnd) + min;
-}
-
-static int random_priority(struct rnd_state *rnd)
-{
- return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
-}
-
-struct preempt_smoke {
- struct intel_gt *gt;
- struct i915_gem_context **contexts;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *batch;
- unsigned int ncontext;
- struct rnd_state prng;
- unsigned long count;
-};
-
-static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
-{
- return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
- &smoke->prng)];
-}
-
-static int smoke_submit(struct preempt_smoke *smoke,
- struct i915_gem_context *ctx, int prio,
- struct drm_i915_gem_object *batch)
-{
- struct i915_request *rq;
- struct i915_vma *vma = NULL;
- int err = 0;
-
- if (batch) {
- struct i915_address_space *vm;
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(batch, vm, NULL);
- i915_vm_put(vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
- }
-
- ctx->sched.priority = prio;
-
- rq = igt_request_alloc(ctx, smoke->engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto unpin;
- }
-
- if (vma) {
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- }
-
- i915_request_add(rq);
-
-unpin:
- if (vma)
- i915_vma_unpin(vma);
-
- return err;
-}
-
-static int smoke_crescendo_thread(void *arg)
-{
- struct preempt_smoke *smoke = arg;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, count % I915_PRIORITY_MAX,
- smoke->batch);
- if (err)
- return err;
-
- count++;
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- smoke->count = count;
- return 0;
-}
-
-static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
-#define BATCH BIT(0)
-{
- struct task_struct *tsk[I915_NUM_ENGINES] = {};
- struct preempt_smoke arg[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned long count;
- int err = 0;
-
- for_each_engine(engine, smoke->gt, id) {
- arg[id] = *smoke;
- arg[id].engine = engine;
- if (!(flags & BATCH))
- arg[id].batch = NULL;
- arg[id].count = 0;
-
- tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
- "igt/smoke:%d", id);
- if (IS_ERR(tsk[id])) {
- err = PTR_ERR(tsk[id]);
- break;
- }
- get_task_struct(tsk[id]);
- }
-
- yield(); /* start all threads before we kthread_stop() */
-
- count = 0;
- for_each_engine(engine, smoke->gt, id) {
- int status;
-
- if (IS_ERR_OR_NULL(tsk[id]))
- continue;
-
- status = kthread_stop(tsk[id]);
- if (status && !err)
- err = status;
-
- count += arg[id].count;
-
- put_task_struct(tsk[id]);
- }
-
- pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
-{
- enum intel_engine_id id;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- for_each_engine(smoke->engine, smoke->gt, id) {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, random_priority(&smoke->prng),
- flags & BATCH ? smoke->batch : NULL);
- if (err)
- return err;
-
- count++;
- }
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int live_preempt_smoke(void *arg)
-{
- struct preempt_smoke smoke = {
- .gt = arg,
- .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 256,
- };
- const unsigned int phase[] = { 0, BATCH };
- struct igt_live_test t;
- int err = -ENOMEM;
- u32 *cs;
- int n;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
- return 0;
-
- smoke.contexts = kmalloc_array(smoke.ncontext,
- sizeof(*smoke.contexts),
- GFP_KERNEL);
- if (!smoke.contexts)
- return -ENOMEM;
-
- smoke.batch =
- i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
- if (IS_ERR(smoke.batch)) {
- err = PTR_ERR(smoke.batch);
- goto err_free;
- }
-
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err_batch;
- }
- for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
- cs[n] = MI_ARB_CHECK;
- cs[n] = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(smoke.batch);
- i915_gem_object_unpin_map(smoke.batch);
-
- if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
- err = -EIO;
- goto err_batch;
- }
-
- for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.gt->i915);
- if (!smoke.contexts[n])
- goto err_ctx;
- }
-
- for (n = 0; n < ARRAY_SIZE(phase); n++) {
- err = smoke_crescendo(&smoke, phase[n]);
- if (err)
- goto err_ctx;
-
- err = smoke_random(&smoke, phase[n]);
- if (err)
- goto err_ctx;
- }
-
-err_ctx:
- if (igt_live_test_end(&t))
- err = -EIO;
-
- for (n = 0; n < smoke.ncontext; n++) {
- if (!smoke.contexts[n])
- break;
- kernel_context_close(smoke.contexts[n]);
- }
-
-err_batch:
- i915_gem_object_put(smoke.batch);
-err_free:
- kfree(smoke.contexts);
-
- return err;
-}
-
-static int nop_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int nctx,
- unsigned int flags)
-#define CHAIN BIT(0)
-{
- IGT_TIMEOUT(end_time);
- struct i915_request *request[16] = {};
- struct intel_context *ve[16];
- unsigned long n, prime, nc;
- struct igt_live_test t;
- ktime_t times[2] = {};
- int err;
-
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
-
- for (n = 0; n < nctx; n++) {
- ve[n] = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve[n])) {
- err = PTR_ERR(ve[n]);
- nctx = n;
- goto out;
- }
-
- err = intel_context_pin(ve[n]);
- if (err) {
- intel_context_put(ve[n]);
- nctx = n;
- goto out;
- }
- }
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
- if (err)
- goto out;
-
- for_each_prime_number_from(prime, 1, 8192) {
- times[1] = ktime_get_raw();
-
- if (flags & CHAIN) {
- for (nc = 0; nc < nctx; nc++) {
- for (n = 0; n < prime; n++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- } else {
- for (n = 0; n < prime; n++) {
- for (nc = 0; nc < nctx; nc++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- }
-
- for (nc = 0; nc < nctx; nc++) {
- if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- break;
- }
- }
-
- times[1] = ktime_sub(ktime_get_raw(), times[1]);
- if (prime == 1)
- times[0] = times[1];
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- request[nc] = NULL;
- }
-
- if (__igt_timeout(end_time, NULL))
- break;
- }
-
- err = igt_live_test_end(&t);
- if (err)
- goto out;
-
- pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
- nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
- prime, div64_u64(ktime_to_ns(times[1]), prime));
-
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- intel_context_unpin(ve[nc]);
- intel_context_put(ve[nc]);
- }
- return err;
-}
-
-static unsigned int
-__select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- bool (*filter)(const struct intel_engine_cs *))
-{
- unsigned int n = 0;
- unsigned int inst;
-
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- if (filter && !filter(gt->engine_class[class][inst]))
- continue;
-
- siblings[n++] = gt->engine_class[class][inst];
- }
-
- return n;
-}
-
-static unsigned int
-select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings)
-{
- return __select_siblings(gt, class, siblings, NULL);
-}
-
-static int live_virtual_engine(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for_each_engine(engine, gt, id) {
- err = nop_virtual_engine(gt, &engine, 1, 1, 0);
- if (err) {
- pr_err("Failed to wrap engine %s: err=%d\n",
- engine->name, err);
- return err;
- }
- }
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, n;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(gt, siblings, nsibling,
- n, 0);
- if (err)
- return err;
- }
-
- err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mask_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct intel_context *ve;
- struct igt_live_test t;
- unsigned int n;
- int err;
-
- /*
- * Check that by setting the execution mask on a request, we can
- * restrict it to our desired engine within the virtual engine.
- */
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_close;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < nsibling; n++) {
- request[n] = i915_request_create(ve);
- if (IS_ERR(request[n])) {
- err = PTR_ERR(request[n]);
- nsibling = n;
- goto out;
- }
-
- /* Reverse order as it's more likely to be unnatural */
- request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
-
- i915_request_get(request[n]);
- i915_request_add(request[n]);
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out;
- }
-
- if (request[n]->engine != siblings[nsibling - n - 1]) {
- pr_err("Executed on wrong sibling '%s', expected '%s'\n",
- request[n]->engine->name,
- siblings[nsibling - n - 1]->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- err = igt_live_test_end(&t);
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (n = 0; n < nsibling; n++)
- i915_request_put(request[n]);
-
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_close:
- return err;
-}
-
-static int live_virtual_mask(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = mask_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int slicein_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must take part in timeslicing on the target engines.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for (n = 0; n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
- __func__, rq->engine->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int sliceout_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must allow others a fair timeslice.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- /* XXX We do not handle oversubscription and fairness with normal rq */
- for (n = 0; n < nsibling; n++) {
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- for (n = 0; !err && n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
- __func__, siblings[n]->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
- }
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_slice(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = __select_siblings(gt, class, siblings,
- intel_engine_has_timeslices);
- if (nsibling < 2)
- continue;
-
- err = slicein_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
-
- err = sliceout_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int preserved_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *last = NULL;
- struct intel_context *ve;
- struct i915_vma *scratch;
- struct igt_live_test t;
- unsigned int n;
- int err = 0;
- u32 *cs;
-
- scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- err = i915_vma_sync(scratch);
- if (err)
- goto out_scratch;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_scratch;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- struct intel_engine_cs *engine = siblings[n % nsibling];
- struct i915_request *rq;
-
- rq = i915_request_create(ve);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_end;
- }
-
- i915_request_put(last);
- last = i915_request_get(rq);
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
- *cs++ = n + 1;
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* Restrict this request to run on a particular engine */
- rq->execution_mask = engine->mask;
- i915_request_add(rq);
- }
-
- if (i915_request_wait(last, 0, HZ / 5) < 0) {
- err = -ETIME;
- goto out_end;
- }
-
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- if (cs[n] != n) {
- pr_err("Incorrect value[%d] found for GPR[%d]\n",
- cs[n], n);
- err = -EINVAL;
- break;
- }
- }
-
- i915_gem_object_unpin_map(scratch->obj);
-
-out_end:
- if (igt_live_test_end(&t))
- err = -EIO;
- i915_request_put(last);
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_scratch:
- i915_vma_unpin_and_release(&scratch, 0);
- return err;
-}
-
-static int live_virtual_preserved(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that the context image retains non-privileged (user) registers
- * from one engine to the next. For this we check that the CS_GPR
- * are preserved.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- /* As we use CS_GPR we cannot run before they existed on all engines. */
- if (INTEL_GEN(gt->i915) < 9)
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = preserved_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int bond_virtual_engine(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
- struct intel_engine_cs *master;
- struct i915_request *rq[16];
- enum intel_engine_id id;
- struct igt_spinner spin;
- unsigned long n;
- int err;
-
- /*
- * A set of bonded requests is intended to be run concurrently
- * across a number of engines. We use one request per-engine
- * and a magic fence to schedule each of the bonded requests
- * at the same time. A consequence of our current scheduler is that
- * we only move requests to the HW ready queue when the request
- * becomes ready, that is when all of its prerequisite fences have
- * been signaled. As one of those fences is the master submit fence,
- * there is a delay on all secondary fences as the HW may be
- * currently busy. Equally, as all the requests are independent,
- * they may have other fences that delay individual request
- * submission to HW. Ergo, we do not guarantee that all requests are
- * immediately submitted to HW at the same time, just that if the
- * rules are abided by, they are ready at the same time as the
- * first is submitted. Userspace can embed semaphores in its batch
- * to ensure parallel execution of its phases as it requires.
- * Though naturally it gets requested that perhaps the scheduler should
- * take care of parallel execution, even across preemption events on
- * different HW. (The proper answer is of course "lalalala".)
- *
- * With the submit-fence, we have identified three possible phases
- * of synchronisation depending on the master fence: queued (not
- * ready), executing, and signaled. The first two are quite simple
- * and checked below. However, the signaled master fence handling is
- * contentious. Currently we do not distinguish between a signaled
- * fence and an expired fence, as once signaled it does not convey
- * any information about the previous execution. It may even be freed
- * and hence checking later it may not exist at all. Ergo we currently
- * do not apply the bonding constraint for an already signaled fence,
- * as our expectation is that it should not constrain the secondaries
- * and is outside of the scope of the bonded request API (i.e. all
- * userspace requests are meant to be running in parallel). As
- * it imposes no constraint, and is effectively a no-op, we do not
- * check below as normal execution flows are checked extensively above.
- *
- * XXX Is the degenerate handling of signaled submit fences the
- * expected behaviour for userpace?
- */
-
- GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- err = 0;
- rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, gt, id) {
- struct i915_sw_fence fence = {};
- struct intel_context *ce;
-
- if (master->class == class)
- continue;
-
- ce = intel_context_create(master);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
- rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
- intel_context_put(ce);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto out;
- }
- i915_request_get(rq[0]);
-
- if (flags & BOND_SCHEDULE) {
- onstack_fence_init(&fence);
- err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
- &fence,
- GFP_KERNEL);
- }
-
- i915_request_add(rq[0]);
- if (err < 0)
- goto out;
-
- if (!(flags & BOND_SCHEDULE) &&
- !igt_wait_for_spinner(&spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- struct intel_context *ve;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_virtual_engine_attach_bond(ve->engine,
- master,
- siblings[n]);
- if (err) {
- intel_context_put(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_context_pin(ve);
- intel_context_put(ve);
- if (err) {
- onstack_fence_fini(&fence);
- goto out;
- }
-
- rq[n + 1] = i915_request_create(ve);
- intel_context_unpin(ve);
- if (IS_ERR(rq[n + 1])) {
- err = PTR_ERR(rq[n + 1]);
- onstack_fence_fini(&fence);
- goto out;
- }
- i915_request_get(rq[n + 1]);
-
- err = i915_request_await_execution(rq[n + 1],
- &rq[0]->fence,
- ve->engine->bond_execute);
- i915_request_add(rq[n + 1]);
- if (err < 0) {
- onstack_fence_fini(&fence);
- goto out;
- }
- }
- onstack_fence_fini(&fence);
- intel_engine_flush_submission(master);
- igt_spinner_end(&spin);
-
- if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
- pr_err("Master request did not execute (on %s)!\n",
- rq[0]->engine->name);
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(rq[n + 1], 0,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq[n + 1]->engine != siblings[n]) {
- pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
- siblings[n]->name,
- rq[n + 1]->engine->name,
- rq[0]->engine->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- rq[0] = ERR_PTR(-ENOMEM);
- }
-
-out:
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_bond(void *arg)
-{
- static const struct phase {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "", 0 },
- { "schedule", BOND_SCHEDULE },
- { },
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- const struct phase *p;
- int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (p = phases; p->name; p++) {
- err = bond_virtual_engine(gt,
- class, siblings, nsibling,
- p->flags);
- if (err) {
- pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
- __func__, p->name, class, nsibling, err);
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static int reset_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct intel_engine_cs *engine;
- struct intel_context *ve;
- struct igt_spinner spin;
- struct i915_request *rq;
- unsigned int n;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_spin;
- }
-
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_disable(siblings[n]);
-
- rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out_heartbeat;
- }
-
- engine = rq->engine;
- GEM_BUG_ON(engine == ve->engine);
-
- /* Take ownership of the reset and tasklet */
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Fake a preemption event; failed of course */
- spin_lock_irq(&engine->active.lock);
- __unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->active.lock);
- GEM_BUG_ON(rq->engine != ve->engine);
-
- /* Reset the engine while keeping our active request on hold */
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- /* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- i915_request_get(rq);
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_rq;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
-
-out_rq:
- i915_request_put(rq);
-out_heartbeat:
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_enable(siblings[n]);
-
- intel_context_put(ve);
-out_spin:
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that we handle a reset event within a virtual engine.
- * Only the physical engine is reset, but we have to check the flow
- * of the virtual requests around the reset, and make sure it is not
- * forgotten.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = reset_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-int intel_execlists_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_sanitycheck),
- SUBTEST(live_unlite_switch),
- SUBTEST(live_unlite_preempt),
- SUBTEST(live_unlite_ring),
- SUBTEST(live_pin_rewind),
- SUBTEST(live_hold_reset),
- SUBTEST(live_error_interrupt),
- SUBTEST(live_timeslice_preempt),
- SUBTEST(live_timeslice_rewind),
- SUBTEST(live_timeslice_queue),
- SUBTEST(live_timeslice_nopreempt),
- SUBTEST(live_busywait_preempt),
- SUBTEST(live_preempt),
- SUBTEST(live_late_preempt),
- SUBTEST(live_nopreempt),
- SUBTEST(live_preempt_cancel),
- SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_chain_preempt),
- SUBTEST(live_preempt_ring),
- SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_timeout),
- SUBTEST(live_preempt_user),
- SUBTEST(live_preempt_smoke),
- SUBTEST(live_virtual_engine),
- SUBTEST(live_virtual_mask),
- SUBTEST(live_virtual_preserved),
- SUBTEST(live_virtual_slice),
- SUBTEST(live_virtual_bond),
- SUBTEST(live_virtual_reset),
- };
-
- if (!HAS_EXECLISTS(i915))
- return 0;
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
if (!lrc)
return -ENOMEM;
+ GEM_BUG_ON(offset_in_page(lrc));
err = 0;
for_each_engine(engine, gt, id) {
@@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
- execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
- engine->kernel_context,
- engine,
- engine->kernel_context->ring,
- true);
+ __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context, engine, true);
dw = 0;
do {
- u32 lri = hw[dw];
+ u32 lri = READ_ONCE(hw[dw]);
if (lri == 0) {
dw++;
@@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg)
dw++;
while (lri) {
- if (hw[dw] != lrc[dw]) {
+ u32 offset = READ_ONCE(hw[dw]);
+
+ if (offset != lrc[dw]) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
- engine->name, dw, hw[dw], lrc[dw]);
+ engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
break;
}
@@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg)
dw += 2;
lri -= 2;
}
- } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
if (err) {
pr_info("%s: HW register image:\n", engine->name);
@@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(lrc);
+ free_page((unsigned long)lrc);
return err;
}
@@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
err = emit_semaphore_signal(engine->kernel_context, slot);
if (err)
goto err_rq;
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
} else {
slot[0] = 1;
wmb();
@@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (test_and_set_bit(bit, lock))
- return;
-
- tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+ if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable(&engine->execlists.tasklet);
- if (!rq->fence.error)
- intel_engine_reset(engine, NULL);
+ if (!rq->fence.error)
+ __intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+ }
+ local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index b25eba50c88e..cf373c72359e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -5,6 +5,7 @@
*/
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/mock_context.h"
@@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
return err;
}
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table;
- arg->scratch = create_scratch(gt);
+ arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg)
static int read_regs(struct i915_request *rq,
u32 addr, unsigned int count,
- uint32_t *offset)
+ u32 *offset)
{
unsigned int i;
u32 *cs;
@@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq,
static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr;
@@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq,
static int read_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq,
static int check_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
unsigned int i;
u32 expect;
@@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
static int check_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce,
static int __live_mocs_reset(struct live_mocs *mocs,
struct intel_context *ce)
{
+ struct intel_gt *gt = ce->engine->gt;
int err;
- err = intel_engine_reset(ce->engine, "mocs");
- if (err)
- return err;
+ if (intel_has_reset_engine(gt)) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
- err = active_engine_reset(ce, "mocs");
- if (err)
- return err;
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
- intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+ if (intel_has_gpu_reset(gt)) {
+ intel_gt_reset(gt, ce->engine->mask, "mocs");
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg)
/* Check the mocs setup is retained over per-engine and global resets */
- if (!intel_has_reset_engine(gt))
- return 0;
-
err = live_mocs_init(&mocs, gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 64ef5ee5decf..61abc0556601 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -6,6 +6,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_rc6.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index ef5aeebbeeb0..8784257ec808 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,6 +9,7 @@
#include "i915_memcpy.h"
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
@@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt,
if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1))
- memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+ memset_io(s, STACK_MAGIC, PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
crc[page] = crc32_le(0, in, PAGE_SIZE);
@@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->error_capture.start,
PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
x = crc32_le(0, in, PAGE_SIZE);
@@ -320,17 +321,25 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock;
for_each_engine(engine, gt, id) {
- tasklet_disable(&engine->execlists.tasklet);
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (t->func)
+ tasklet_disable(t);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
@@ -339,7 +348,10 @@ static int igt_atomic_engine_reset(void *arg)
}
intel_engine_pm_put(engine);
- tasklet_enable(&engine->execlists.tasklet);
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index aa5675ecb5cc..967641fee42a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq)
{
mutex_lock(&rps->lock);
GEM_BUG_ON(!intel_rps_is_active(rps));
- intel_rps_set(rps, freq);
+ if (wait_for(!intel_rps_set(rps, freq), 50)) {
+ mutex_unlock(&rps->lock);
+ return 0;
+ }
GEM_BUG_ON(rps->last_freq != freq);
mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 2edf2b15885f..6f3a3687ef0f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -9,6 +9,7 @@
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
@@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg)
}
count++;
- if (8 * watcher[1].rq->ring->emit >
- 3 * watcher[1].rq->ring->size) {
- i915_request_put(rq);
- break;
- }
-
/* Flush the timeline before manually wrapping again */
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
@@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg)
i915_request_put(rq);
goto out;
}
-
retire_requests(tl);
i915_request_put(rq);
+
+ /* Single requests are limited to half a ring at most */
+ if (8 * watcher[1].rq->ring->emit >
+ 3 * watcher[1].rq->ring->size)
+ break;
+
} while (!__igt_timeout(end_time, NULL));
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 61a0532d0f3d..2070b91cb607 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
}
static struct drm_i915_gem_object *
-read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+read_nonprivs(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
struct i915_request *rq;
@@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results)
}
}
-static int check_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_whitelist(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct drm_i915_gem_object *results;
struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
- results = read_nonprivs(ctx, engine);
+ results = read_nonprivs(ce);
if (IS_ERR(results))
return PTR_ERR(results);
@@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
const char *name)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx, *tmp;
+ struct intel_context *ce, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
@@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
engine->whitelist.count, engine->name, name);
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
err = igt_spinner_init(&spin, engine->gt);
if (err)
goto out_ctx;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out_spin;
@@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out_spin;
}
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
goto out_spin;
}
- tmp = kernel_context(i915);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto out_spin;
}
- kernel_context_close(ctx);
- ctx = tmp;
+ intel_context_put(ce);
+ ce = tmp;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
out_spin:
igt_spinner_fini(&spin);
out_ctx:
- kernel_context_close(ctx);
+ intel_context_put(ce);
return err;
}
@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
- int err = 0, i, v;
+ int err = 0, i, v, sz;
u32 *cs, *results;
- scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
+ sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+ scratch = __vm_create_scratch_for_read(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -786,15 +787,15 @@ out:
return err;
}
-static int read_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int read_whitelisted_registers(struct intel_context *ce,
struct i915_vma *results)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
int i, err = 0;
u32 srm, *cs;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
goto err_req;
srm = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -834,18 +835,15 @@ err_req:
return request_add_sync(rq, err);
}
-static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int scrub_whitelisted_registers(struct intel_context *ce)
{
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- vm = i915_gem_context_get_vm_rcu(ctx);
- batch = create_batch(vm);
- i915_vm_put(vm);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct {
- struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
} client[2] = {};
struct intel_engine_cs *engine;
@@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg)
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_address_space *vm;
- struct i915_gem_context *c;
-
- c = kernel_context(gt->i915);
- if (IS_ERR(c)) {
- err = PTR_ERR(c);
- goto err;
- }
-
- vm = i915_gem_context_get_vm_rcu(c);
-
- client[i].scratch[0] = create_scratch(vm, 1024);
+ client[i].scratch[0] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(vm, 1024);
+ client[i].scratch[1] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
-
- client[i].ctx = c;
- i915_vm_put(vm);
}
for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2];
+
if (!engine->kernel_context->vm)
continue;
if (!whitelist_writable_count(engine))
continue;
+ ce[0] = intel_context_create(engine);
+ if (IS_ERR(ce[0])) {
+ err = PTR_ERR(ce[0]);
+ break;
+ }
+ ce[1] = intel_context_create(engine);
+ if (IS_ERR(ce[1])) {
+ err = PTR_ERR(ce[1]);
+ intel_context_put(ce[0]);
+ break;
+ }
+
/* Read default values */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[0]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Try to overwrite registers (should only affect ctx0) */
- err = scrub_whitelisted_registers(client[0].ctx, engine);
+ err = scrub_whitelisted_registers(ce[0]);
if (err)
- goto err;
+ goto err_ce;
/* Read values from ctx1, we expect these to be defaults */
- err = read_whitelisted_registers(client[1].ctx, engine,
- client[1].scratch[0]);
+ err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Verify that both reads return the same default values */
err = check_whitelisted_registers(engine,
@@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg)
client[1].scratch[0],
result_eq);
if (err)
- goto err;
+ goto err_ce;
/* Read back the updated values in ctx0 */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[1]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
if (err)
- goto err;
+ goto err_ce;
/* User should be granted privilege to overwhite regs */
err = check_whitelisted_registers(engine,
client[0].scratch[0],
client[0].scratch[1],
result_neq);
+err_ce:
+ intel_context_put(ce[1]);
+ intel_context_put(ce[0]);
if (err)
- goto err;
+ break;
}
err:
for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i].ctx)
- break;
-
i915_vma_unpin_and_release(&client[i].scratch[1], 0);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- kernel_context_close(client[i].ctx);
}
if (igt_flush_test(gt->i915))
@@ -1128,18 +1119,21 @@ err:
}
static bool
-verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
const char *str)
{
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
- for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
- enum intel_engine_id id = ce->engine->id;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return false;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].wa_list,
@@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= engine_wa_list_verify(ce,
&lists->engine[id].ctx_wa_list,
str) == 0;
+
+ intel_context_put(ce);
}
return ok;
@@ -1157,7 +1153,6 @@ static int
live_gpu_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg)
if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- i915_gem_context_lock_engines(ctx);
-
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(gt);
@@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg)
reference_lists_init(gt, &lists);
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok)
goto out;
intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
- ok = verify_wa_lists(ctx, &lists, "after reset");
+ ok = verify_wa_lists(gt, &lists, "after reset");
out:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
@@ -1200,8 +1187,8 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
@@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(gt, &lists);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_engine(engine, gt, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ break;
+ }
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:idle");
- ok = verify_wa_lists(ctx, &lists, "after idle reset");
+ ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:active");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_wa_lists(ctx, &lists, "after busy reset");
+ ok = verify_wa_lists(gt, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- }
+
err:
- i915_gem_context_unlock_engines(ctx);
+ intel_context_put(ce);
+ if (ret)
+ break;
+ }
+
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
- kernel_context_close(ctx);
igt_flush_test(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 5982b62f913d..a4d8fc9e2374 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
struct file *file;
void *ptr;
- if (obj->ops == &i915_gem_shmem_ops) {
+ if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2a343a977987..4545e90e3bf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 action[] = {
- INTEL_GUC_ACTION_EXIT_S_STATE,
- GUC_POWER_D0,
- };
-
- /*
- * If GuC communication is enabled but submission is not supported,
- * we do not need to resume the GuC but we do need to enable the
- * GuC communication on resume (above).
- */
- if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
- return 0;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ /* XXX: to be implemented with submission interface rework */
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e84ab67b317d..bc2ba7d0626c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,13 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct i915_vma *workqueue;
- void *workqueue_vaddr;
- spinlock_t wq_lock;
-
- struct i915_vma *proc_desc;
- void *proc_desc_vaddr;
-
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 5212ff844292..17526717368c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_lrc.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index f9d0907ea1a5..2270d6b3b272 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
static int guc_wait_ucode(struct intel_uncore *uncore)
{
- struct drm_device *drm = &uncore->i915->drm;
u32 status;
int ret;
@@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
* attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(uncore, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
if (ret) {
- drm_err(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_err(drm, "GuC load failed: status: Reset = %d, "
+ struct drm_device *drm = &uncore->i915->drm;
+
+ drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_dbg(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_err(drm, "GuC firmware signature verification failed\n");
+ drm_dbg(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_err(drm, "GuC firmware exception. EIP: %#x\n",
+ drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fdfeb4b9b0f5..23dc0aeaa0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,11 +6,14 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
#include "intel_guc_submission.h"
@@ -54,6 +57,8 @@
*
*/
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
return &base[id];
}
-static int guc_workqueue_create(struct intel_guc *guc)
-{
- return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
- &guc->workqueue_vaddr);
-}
-
-static void guc_workqueue_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static int guc_proc_desc_create(struct intel_guc *guc)
-{
- const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
-
- return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
- &guc->proc_desc_vaddr);
-}
-
-static void guc_proc_desc_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
-}
-
-static void guc_proc_desc_init(struct intel_guc *guc)
-{
- struct guc_process_desc *desc;
-
- desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
-
- /*
- * XXX: pDoorbell and WQVBaseAddress are pointers in process address
- * space for ring3 clients (set them as in mmap_ioctl) or kernel
- * space for kernel clients (map on demand instead? May make debug
- * easier to have it mapped).
- */
- desc->wq_base_addr = 0;
- desc->db_base_addr = 0;
-
- desc->wq_size_bytes = GUC_WQ_SIZE;
- desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
-}
-
-static void guc_proc_desc_fini(struct intel_guc *guc)
-{
- memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
-}
-
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
@@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc)
desc->stage_id = 0;
desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
- desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
desc->wq_size = GUC_WQ_SIZE;
}
@@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc)
memset(desc, 0, sizeof(*desc));
}
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc *guc,
- u32 target_engine, u32 context_desc,
- u32 ring_tail, u32 fence_id)
-{
- /* wqi_len is in DWords, and does not include the one-word header */
- const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = guc->proc_desc_vaddr;
- struct guc_wq_item *wqi;
- u32 wq_off;
-
- lockdep_assert_held(&guc->wq_lock);
-
- /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
- * should not have the case where structure wqi is across page, neither
- * wrapped to the beginning. This simplifies the implementation below.
- *
- * XXX: if not the case, we need save data to a temp wqi and copy it to
- * workqueue buffer dw by dw.
- */
- BUILD_BUG_ON(wqi_size != 16);
-
- /* We expect the WQ to be active if we're appending items to it */
- GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
- /* Free space is guaranteed. */
- wq_off = READ_ONCE(desc->tail);
- GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
- GUC_WQ_SIZE) < wqi_size);
- GEM_BUG_ON(wq_off & (wqi_size - 1));
-
- wqi = guc->workqueue_vaddr + wq_off;
-
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
-
- /* Make the update visible to GuC */
- WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = rq->context->lrc.ccid;
- u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
- guc_wq_item_append(guc, engine->guc_id, ctx_desc,
- ring_tail, rq->fence.seqno);
+ /* Leaving stub as this function will be used in future patches */
}
/*
@@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine,
{
struct intel_guc *guc = &engine->gt->uc.guc;
- spin_lock(&guc->wq_lock);
-
do {
struct i915_request *rq = *out++;
flush_ggtt_writes(rq->ring->vma);
guc_add_request(guc, rq);
} while (out != end);
-
- spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void guc_reset_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ u32 head,
+ bool scrub)
{
- struct i915_request * const *port, *rq;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- /* Note we are only using the inflight and not the pending queue */
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub)
+ lrc_init_regs(ce, engine, true);
- for (port = execlists->active; (rq = *port); port++)
- schedule_out(rq);
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ lrc_update_regs(ce, engine, head);
}
static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- cancel_port_requests(execlists);
-
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
if (!rq)
@@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+ guc_reset_state(rq->context, engine, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->active.lock, flags);
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- cancel_port_requests(execlists);
-
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
i915_request_set_error_once(rq, -EIO);
@@ -497,12 +395,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
}
/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
-/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
@@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- ret = guc_workqueue_create(guc);
- if (ret)
- goto err_pool;
-
- ret = guc_proc_desc_create(guc);
- if (ret)
- goto err_workqueue;
-
- spin_lock_init(&guc->wq_lock);
-
return 0;
-
-err_workqueue:
- guc_workqueue_destroy(guc);
-err_pool:
- guc_stage_desc_pool_destroy(guc);
- return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
if (guc->stage_desc_pool) {
- guc_proc_desc_destroy(guc);
- guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
}
@@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt)
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
-static void guc_set_default_submission(struct intel_engine_cs *engine)
+static int guc_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .alloc = guc_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int guc_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
/*
- * We inherit a bunch of functions from execlists that we'd like
- * to keep using:
- *
- * engine->submit_request = execlists_submit_request;
- * engine->cancel_requests = execlists_cancel_requests;
- * engine->schedule = execlists_schedule;
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += GUC_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= GUC_REQUEST_SIZE;
+ return 0;
+}
+
+static inline void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ queue_request(engine, rq, rq_prio(rq));
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&rq->sched.link));
+
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
*
- * But we need to override the actual submission backend in order
- * to talk to the GuC.
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
*/
- intel_execlists_set_default_submission(engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
- /* do not use execlists park/unpark */
- engine->park = engine->unpark = NULL;
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ setup_hwsp(engine);
+ start_engine(engine);
+
+ return 0;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = guc_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
/*
* For the breadcrumb irq to work we need the interrupts to stay
@@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_release(struct intel_engine_cs *engine)
{
- struct intel_gt *gt = guc_to_gt(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ tasklet_kill(&engine->execlists.tasklet);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = guc_resume;
+
+ engine->cops = &guc_context_ops;
+ engine->request_alloc = guc_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = guc_set_default_submission;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+ engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
/*
- * We're using GuC work items for submitting work through GuC. Since
- * we're coalescing multiple requests from a single context into a
- * single work item prior to assigning it to execlist_port, we can
- * never have more work items than the total number of ports (for all
- * engines). The GuC firmware is controlling the HEAD of work queue,
- * and it is guaranteed that it will remove the work item from the
- * queue before our request is completed.
+ * The setup relies on several assumptions (e.g. irqs always enabled)
+ * that are only valid on gen11+
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
- sizeof(struct guc_wq_item) *
- I915_NUM_ENGINES > GUC_WQ_SIZE);
+ GEM_BUG_ON(INTEL_GEN(i915) < 11);
+
+ tasklet_init(&engine->execlists.tasklet,
+ guc_submission_tasklet, (unsigned long)engine);
+
+ guc_default_vfuncs(engine);
+ guc_default_irqs(engine);
- guc_proc_desc_init(guc);
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = guc_sanitize;
+ engine->release = guc_release;
+
+ return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(gt);
-
- for_each_engine(engine, gt, id) {
- engine->set_default_submission = guc_set_default_submission;
- engine->set_default_submission(engine);
- }
+ guc_interrupts_capture(guc_to_gt(guc));
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(gt);
guc_stage_desc_fini(guc);
- guc_proc_desc_fini(guc);
}
static bool __guc_submission_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 4cf9d3e50263..5f7b9e6347d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4e6070e95fe9..6abb8f2dc33d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -15,6 +15,29 @@
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (i915->params.enable_guc != -1)
+ return;
+
+ /* Don't enable GuC/HuC on pre-Gen12 */
+ if (INTEL_GEN(i915) < 12) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Don't enable GuC/HuC on older Gen12 platforms */
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Default: enable HuC authentication only */
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -52,9 +75,6 @@ static void __confirm_options(struct intel_uc *uc)
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915->params.enable_guc == -1)
- return;
-
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
@@ -79,8 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC submission is N/A");
- if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "undocumented flag");
@@ -88,6 +107,8 @@ static void __confirm_options(struct intel_uc *uc)
void intel_uc_init_early(struct intel_uc *uc)
{
+ uc_expand_default_options(uc);
+
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
@@ -175,19 +196,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc));
- if (!guc->mmio_msg)
- return;
-
- spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
- spin_unlock_irq(&i915->irq_lock);
-
- guc->mmio_msg = 0;
+ spin_lock_irq(&guc->irq_lock);
+ if (guc->mmio_msg) {
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ guc->mmio_msg = 0;
+ }
+ spin_unlock_irq(&guc->irq_lock);
}
static void guc_reset_interrupts(struct intel_guc *guc)
@@ -207,7 +224,8 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(guc_communication_enabled(guc));
@@ -227,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..67b06fde1225 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
@@ -151,16 +152,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->path = NULL;
}
}
-
- /* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
- uc_fw->path = NULL;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ENABLE_GUC_MASK)
return i915->params.guc_firmware_path;
return "";
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 16b582cb97ed..fef1e857cefc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,11 +37,19 @@
#include <linux/slab.h>
#include "i915_drv.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/shmem_utils.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#define INVALID_OP (~0U)
#define OP_LEN_MI 9
@@ -454,6 +462,7 @@ enum {
RING_BUFFER_INSTRUCTION,
BATCH_BUFFER_INSTRUCTION,
BATCH_BUFFER_2ND_LEVEL,
+ RING_BUFFER_CTX,
};
enum {
@@ -495,6 +504,7 @@ struct parser_exec_state {
*/
int saved_buf_addr_type;
bool is_ctx_wa;
+ bool is_init_ctx;
const struct cmd_info *info;
@@ -708,6 +718,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index)
return *cmd_ptr(s, index);
}
+static inline bool is_init_ctx(struct parser_exec_state *s)
+{
+ return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
+}
+
static void parser_exec_state_dump(struct parser_exec_state *s)
{
int cnt = 0;
@@ -721,7 +736,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
- "RING_BUFFER" : "BATCH_BUFFER",
+ "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
+ "CTX_BUFFER" : "BATCH_BUFFER"),
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
@@ -756,7 +772,8 @@ static inline void update_ip_va(struct parser_exec_state *s)
if (WARN_ON(s->ring_head == s->ring_tail))
return;
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
unsigned long ring_top = s->ring_start + s->ring_size;
if (s->ring_head > s->ring_tail) {
@@ -820,68 +837,12 @@ static inline int cmd_length(struct parser_exec_state *s)
*addr = val; \
} while (0)
-static bool is_shadowed_mmio(unsigned int offset)
-{
- bool ret = false;
-
- if ((offset == 0x2168) || /*BB current head register UDW */
- (offset == 0x2140) || /*BB current header register */
- (offset == 0x211c) || /*second BB header register UDW */
- (offset == 0x2114)) { /*second BB header register UDW */
- ret = true;
- }
- return ret;
-}
-
-static inline bool is_force_nonpriv_mmio(unsigned int offset)
-{
- return (offset >= 0x24d0 && offset < 0x2500);
-}
-
-static int force_nonpriv_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index, char *cmd)
-{
- struct intel_gvt *gvt = s->vgpu->gvt;
- unsigned int data;
- u32 ring_base;
- u32 nopid;
-
- if (!strcmp(cmd, "lri"))
- data = cmd_val(s, index + 1);
- else {
- gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
- offset, cmd);
- return -EINVAL;
- }
-
- ring_base = s->engine->mmio_base;
- nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
-
- if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
- data != nopid) {
- gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
- offset, data);
- patch_value(s, cmd_ptr(s, index), nopid);
- return 0;
- }
- return 0;
-}
-
static inline bool is_mocs_mmio(unsigned int offset)
{
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
((offset >= 0xb020) && (offset <= 0xb0a0));
}
-static int mocs_cmd_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index)
-{
- if (!is_mocs_mmio(offset))
- return -EINVAL;
- vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
- return 0;
-}
-
static int is_cmd_update_pdps(unsigned int offset,
struct parser_exec_state *s)
{
@@ -929,6 +890,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
+ u32 *vreg, vreg_old;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -936,34 +898,101 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
+ if (is_init_ctx(s)) {
+ struct intel_gvt_mmio_info *mmio_info;
+
+ intel_gvt_mmio_set_cmd_accessible(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (mmio_info && mmio_info->write)
+ intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
+ return 0;
+ }
+
if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
}
- if (is_shadowed_mmio(offset)) {
- gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
- return 0;
+ if (!strncmp(cmd, "srm", 3) ||
+ !strncmp(cmd, "lrm", 3)) {
+ if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
+ offset != 0x21f0) {
+ gvt_vgpu_err("%s access to register (%x)\n",
+ cmd, offset);
+ return -EPERM;
+ } else
+ return 0;
}
- if (is_mocs_mmio(offset) &&
- mocs_cmd_reg_handler(s, offset, index))
- return -EINVAL;
+ if (!strncmp(cmd, "lrr-src", 7) ||
+ !strncmp(cmd, "lrr-dst", 7)) {
+ gvt_vgpu_err("not allowed cmd %s\n", cmd);
+ return -EPERM;
+ }
+
+ if (!strncmp(cmd, "pipe_ctrl", 9)) {
+ /* TODO: add LRI POST logic here */
+ return 0;
+ }
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index, cmd))
+ if (strncmp(cmd, "lri", 3))
return -EPERM;
+ /* below are all lri handlers */
+ vreg = &vgpu_vreg(s->vgpu, offset);
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
+ return -EBADRQC;
+ }
+
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
- if (is_cmd_update_pdps(offset, s) &&
- cmd_pdp_mmio_update_handler(s, offset, index))
- return -EINVAL;
+ if (is_mocs_mmio(offset))
+ *vreg = cmd_val(s, index + 1);
+
+ vreg_old = *vreg;
+
+ if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
+ u32 cmdval_new, cmdval;
+ struct intel_gvt_mmio_info *mmio_info;
+
+ cmdval = cmd_val(s, index + 1);
+
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (!mmio_info) {
+ cmdval_new = cmdval;
+ } else {
+ u64 ro_mask = mmio_info->ro_mask;
+ int ret;
+
+ if (likely(!ro_mask))
+ ret = mmio_info->write(s->vgpu, offset,
+ &cmdval, 4);
+ else {
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
+ ret = -EBADRQC;
+ }
+ if (ret)
+ return ret;
+ cmdval_new = *vreg;
+ }
+ if (cmdval_new != cmdval)
+ patch_value(s, cmd_ptr(s, index+1), cmdval_new);
+ }
+
+ /* only patch cmd. restore vreg value if changed in mmio write handler*/
+ *vreg = vreg_old;
/* TODO
* In order to let workload with inhibit context to generate
@@ -1215,6 +1244,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
s->buf_type = BATCH_BUFFER_INSTRUCTION;
ret = ip_gma_set(s, s->ret_ip_gma_bb);
s->buf_addr_type = s->saved_buf_addr_type;
+ } else if (s->buf_type == RING_BUFFER_CTX) {
+ ret = ip_gma_set(s, s->ring_tail);
} else {
s->buf_type = RING_BUFFER_INSTRUCTION;
s->buf_addr_type = GTT_BUFFER;
@@ -2763,7 +2794,8 @@ static int command_scan(struct parser_exec_state *s,
gma_bottom = rb_start + rb_len;
while (s->ip_gma != gma_tail) {
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_vgpu_err("ip_gma %lx out of ring scope."
@@ -3056,6 +3088,118 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+/* generate dummy contexts by sending empty requests to HW, and let
+ * the HW to fill Engine Contexts. This dummy contexts are used for
+ * initialization purpose (update reg whitelist), so referred to as
+ * init context here
+ */
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
+{
+ const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (gvt->is_reg_whitelist_updated)
+ return;
+
+ /* scan init ctx to update cmd accessible list */
+ for_each_engine(engine, gvt->gt, id) {
+ struct parser_exec_state s;
+ void *vaddr;
+ int ret;
+
+ if (!engine->default_state)
+ continue;
+
+ vaddr = shmem_pin_map(engine->default_state);
+ if (IS_ERR(vaddr)) {
+ gvt_err("failed to map %s->default state, err:%zd\n",
+ engine->name, PTR_ERR(vaddr));
+ return;
+ }
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = vgpu;
+ s.engine = engine;
+ s.ring_start = 0;
+ s.ring_size = engine->context_size - start;
+ s.ring_head = 0;
+ s.ring_tail = s.ring_size;
+ s.rb_va = vaddr + start;
+ s.workload = NULL;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = true;
+
+ /* skipping the first RING_CTX_SIZE(0x50) dwords */
+ ret = ip_gma_set(&s, RING_CTX_SIZE);
+ if (ret == 0) {
+ ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
+ if (ret)
+ gvt_err("Scan init ctx error\n");
+ }
+
+ shmem_unpin_map(engine->default_state, vaddr);
+ if (ret)
+ return;
+ }
+
+ gvt->is_reg_whitelist_updated = true;
+}
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ unsigned long gma_head, gma_tail, gma_start, ctx_size;
+ struct parser_exec_state s;
+ int ring_id = workload->engine->id;
+ struct intel_context *ce = vgpu->submission.shadow[ring_id];
+ int ret;
+
+ GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
+
+ ctx_size = workload->engine->context_size - PAGE_SIZE;
+
+ /* Only ring contxt is loaded to HW for inhibit context, no need to
+ * scan engine context
+ */
+ if (is_inhibit_context(ce))
+ return 0;
+
+ gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
+ gma_head = 0;
+ gma_tail = ctx_size;
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.engine = workload->engine;
+ s.ring_start = gma_start;
+ s.ring_size = ctx_size;
+ s.ring_head = gma_start + gma_head;
+ s.ring_tail = gma_start + gma_tail;
+ s.rb_va = ce->lrc_reg_state;
+ s.workload = workload;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = false;
+
+ /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
+ * context
+ */
+ ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, gma_head, gma_tail,
+ gma_start, ctx_size);
+out:
+ if (ret)
+ gvt_vgpu_err("scan shadow ctx error\n");
+
+ return ret;
+}
+
static int init_cmd_table(struct intel_gvt *gvt)
{
unsigned int gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index ab25d151932a..416d345e2816 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -40,6 +40,7 @@
struct intel_gvt;
struct intel_shadow_wa_ctx;
+struct intel_vgpu;
struct intel_vgpu_workload;
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
@@ -50,4 +51,8 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu);
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a15f87539657..62a5b0dd2003 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
- if (connected) {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTA_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIC_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
- }
- } else {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ SFUSE_STRAP_DDIB_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
- ~SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTB_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIC_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIC_DETECTED;
}
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTC_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
}
- vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
- PORTB_HOTPLUG_STATUS_MASK;
- intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 158873f269b1..c8dcda6d4f0d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine);
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index d62cd14605a3..84ad74b37d66 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- intel_engine_mask_t engine_mask);
-
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 67b6ede9e707..0daa3931aef7 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,6 +38,10 @@
#include <linux/types.h>
+#include "display/intel_display.h"
+
+struct intel_vgpu;
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
@@ -98,8 +102,6 @@ enum DDI_PORT {
DDI_PORT_E = 4
};
-struct intel_gvt;
-
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b0e173f2d990..3bf45672ef98 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,10 +34,19 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define I915_GTT_PAGE_SHIFT 12
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+
+#include "gt/intel_gtt.h"
+struct intel_gvt;
+struct intel_vgpu;
struct intel_vgpu_mm;
+#define I915_GTT_PAGE_SHIFT 12
+
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index cf3578e3f4dd..03c993d68f10 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -33,6 +33,10 @@
#ifndef _GVT_H_
#define _GVT_H_
+#include <uapi/linux/pci_regs.h>
+
+#include "i915_drv.h"
+
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
@@ -244,7 +248,7 @@ struct gvt_mmio_block {
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
- u8 *mmio_attribute;
+ u16 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
@@ -263,6 +267,8 @@ struct intel_gvt_mmio {
* logical context image
*/
#define F_SR_IN_CTX (1 << 7)
+/* Value of command write of this reg needs to be patched */
+#define F_CMD_WRITE_PATCH (1 << 8)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -329,6 +335,7 @@ struct intel_gvt {
u32 *mocs_mmio_offset_list;
u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
+ bool is_reg_whitelist_updated;
struct dentry *debugfs_root;
};
@@ -412,6 +419,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+/* ring context size i.e. the first 0x50 dwords*/
+#define RING_CTX_SIZE 320
+
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
@@ -683,6 +693,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx(
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+/**
+ * intel_gvt_mmio_set_cmd_write_patch -
+ * mark an MMIO if its cmd write needs to be
+ * patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
+ * be patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if GPU commmand write to an MMIO should be patched
+ */
+static inline bool intel_gvt_mmio_is_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa7e75cb3e6a..6eeaeecb7f85 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
-static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
@@ -96,7 +96,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
}
static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u8 flags, u32 size,
+ u32 offset, u16 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
{
@@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
return -ENOMEM;
info->offset = i;
- p = find_mmio_info(gvt, info->offset);
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
@@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
-/**
+/*
* FixMe:
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
@@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_REG, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
- NULL, force_nonpriv_write);
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
+ D_BDW_PLUS, NULL, force_nonpriv_write);
MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
@@ -3626,7 +3627,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
/*
* Normal tracked MMIOs.
*/
- mmio_info = find_mmio_info(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
if (!mmio_info) {
gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
goto default_rw;
@@ -3686,14 +3687,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
}
}
-static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
- u32 offset, void *data)
+static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;
if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+ intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index fcd663811d37..287cd142629e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,7 +32,10 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
-#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+
+#include "i915_reg.h"
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 60f1a386dd06..b4348256ae95 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1703,7 +1703,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1712,7 +1712,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
kvmgt_protect_table_add(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1737,7 +1737,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (!kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1746,7 +1746,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
kvmgt_protect_table_del(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1772,7 +1772,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvmgt_guest_info *info = container_of(node,
struct kvmgt_guest_info, track_node);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
if (kvmgt_gfn_is_write_protected(info, gfn)) {
@@ -1781,7 +1781,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
kvmgt_protect_table_del(info, gfn);
}
}
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9e862dc73579..7c26af39fbfc 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index afe574d6b3b5..c9589e26af93 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 3b25e7fe32f6..b6b69777af49 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,6 +36,18 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+#include <linux/types.h>
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_lrc_reg.h"
+#include "i915_reg.h"
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gvt;
+struct intel_vgpu;
+
struct engine_mmio {
enum intel_engine_id id;
i915_reg_t reg;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 6f92cde71971..550a456e936f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -33,6 +33,8 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
+#include "gvt.h"
+
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index b58860dee970..244cc7320b54 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -133,4 +133,6 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+
+#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aed2ef6466a2..fc735692f21f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -37,6 +37,8 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -135,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int i;
bool skip = false;
int ring_id = workload->engine->id;
+ int ret;
GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -161,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
- }
+ } else if (workload->engine->id == BCS0)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ BCS_TILE_REGISTER_VAL_OFFSET,
+ (void *)shadow_ring_context +
+ BCS_TILE_REGISTER_VAL_OFFSET, 4);
#undef COPY_REG
#undef COPY_REG_MASKED
+ /* don't copy Ring Context (the first 0x50 dwords),
+ * only copy the Engine Context part from guest
+ */
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
- sizeof(*shadow_ring_context),
+ RING_CTX_SIZE,
(void *)shadow_ring_context +
- sizeof(*shadow_ring_context),
- I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ RING_CTX_SIZE,
+ I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
@@ -237,6 +248,11 @@ read:
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ ret = intel_gvt_scan_engine_context(workload);
+ if (ret) {
+ gvt_vgpu_err("invalid cmd found in guest context pages\n");
+ return ret;
+ }
s->last_ctx[ring_id].valid = true;
return 0;
}
@@ -396,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
if (!wa_ctx->indirect_ctx.obj)
return;
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
wa_ctx->indirect_ctx.obj = NULL;
@@ -504,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = workload->vgpu->gvt;
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
struct intel_vgpu_shadow_bb *bb;
+ struct i915_gem_ww_ctx ww;
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
@@ -528,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
* directly
*/
if (!bb->ppgtt) {
- bb->vma = i915_gem_object_ggtt_pin(bb->obj,
- NULL, 0, 0, 0);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(bb->obj, &ww);
+
+ bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
+ NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
goto err;
}
@@ -545,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
0);
if (ret)
goto err;
- }
- /* No one is going to touch shadow bb from now on. */
- i915_gem_object_flush_map(bb->obj);
+ /* No one is going to touch shadow bb from now on. */
+ i915_gem_object_flush_map(bb->obj);
+ i915_gem_object_unlock(bb->obj);
+ }
}
return 0;
err:
+ i915_gem_ww_ctx_fini(&ww);
release_shadow_batch_buffer(workload);
return ret;
}
@@ -578,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
+ struct i915_gem_ww_ctx ww;
+ int ret;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
- vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
- 0, CACHELINE_BYTES, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
+
+ vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ return ret;
+ }
+
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
@@ -619,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
+ i915_gem_object_lock(bb->obj, NULL);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
if (bb->vma && !IS_ERR(bb->vma))
i915_vma_unpin(bb->vma);
+ i915_gem_object_unlock(bb->obj);
i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
@@ -999,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 64e7a0b791c3..7c86984a842f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -36,6 +36,11 @@
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
+#include "gt/intel_engine_types.h"
+
+#include "execlist.h"
+#include "interrupt.h"
+
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index e49944fde333..6a16d0ca7cda 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
- else if (!IS_BROXTON(dev_priv))
+ else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
@@ -500,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu))
+ if (!IS_ERR(vgpu)) {
/* calculate left instance change for types */
intel_gvt_update_vgpu_types(gvt);
+ intel_gvt_update_reg_whitelist(vgpu);
+ }
mutex_unlock(&gvt->lock);
return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 10a865f3dc09..3bc616cc1ad2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref)
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
/* Make the cached node available for reuse with any timeline */
- if (IS_ENABLED(CONFIG_64BIT))
- ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
}
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
if (cached == idx)
return it;
-#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
/*
* An unclaimed cache [.timeline=0] can only be claimed once.
*
@@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* only the winner of that race will cmpxchg return the old
* value of 0).
*/
- if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ if (!cached && !cmpxchg64(&it->timeline, 0, idx))
return it;
-#endif
}
BUILD_BUG_ON(offsetof(typeof(*it), node));
@@ -631,24 +628,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..ced9a96d7c34 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "gt/intel_engine.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
@@ -1142,7 +1143,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
if (IS_ERR(dst))
return dst;
@@ -1166,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 77e76b665098..88336ff4bf09 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -45,6 +45,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_irq.h"
+#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -209,7 +210,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
spin_unlock(&obj->vma.lock);
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->stolen)
+ if (i915_gem_object_is_stolen(obj))
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
@@ -219,145 +220,6 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", engine->name);
}
-struct file_stats {
- struct i915_address_space *vm;
- unsigned long count;
- u64 total;
- u64 active, inactive;
- u64 closed;
-};
-
-static int per_file_stats(int id, void *ptr, void *data)
-{
- struct drm_i915_gem_object *obj = ptr;
- struct file_stats *stats = data;
- struct i915_vma *vma;
-
- if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
- return 0;
-
- stats->count++;
- stats->total += obj->base.size;
-
- spin_lock(&obj->vma.lock);
- if (!stats->vm) {
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- } else {
- struct rb_node *p = obj->vma.tree.rb_node;
-
- while (p) {
- long cmp;
-
- vma = rb_entry(p, typeof(*vma), obj_node);
- cmp = i915_vma_compare(vma, stats->vm, NULL);
- if (cmp == 0) {
- if (drm_mm_node_allocated(&vma->node)) {
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- break;
- }
- if (cmp < 0)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_put(obj);
- return 0;
-}
-
-#define print_file_stats(m, name, stats) do { \
- if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
- name, \
- stats.count, \
- stats.total, \
- stats.active, \
- stats.inactive, \
- stats.closed); \
-} while (0)
-
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *i915)
-{
- struct file_stats kstats = {};
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- rcu_read_lock();
- if (ce->state)
- per_file_stats(0,
- ce->state->obj, &kstats);
- per_file_stats(0, ce->ring->vma->obj, &kstats);
- rcu_read_unlock();
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- mutex_lock(&ctx->mutex);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = {
- .vm = rcu_access_pointer(ctx->vm),
- };
- struct drm_file *file = ctx->file_priv->file;
- struct task_struct *task;
- char name[80];
-
- rcu_read_lock();
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- rcu_read_unlock();
-
- rcu_read_lock();
- task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s",
- task ? task->comm : "<unknown>");
- rcu_read_unlock();
-
- print_file_stats(m, name, stats);
- }
- mutex_unlock(&ctx->mutex);
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- print_file_stats(m, "[k]contexts", kstats);
-}
-
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -371,311 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
for_each_memory_region(mr, i915, id)
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
mr->name, &mr->total, &mr->avail);
- seq_putc(m, '\n');
-
- print_context_stats(m, i915);
-
- return 0;
-}
-
-static void gen8_display_interrupt_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!wakeref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
-}
-
-static int i915_interrupt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- int i, pipe;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
- } else if (INTEL_GEN(dev_priv) >= 11) {
- if (HAS_MASTER_UNIT_IRQ(dev_priv))
- seq_printf(m, "Master Unit Interrupt Control: %08x\n",
- I915_READ(DG1_MSTR_UNIT_INTR));
-
- seq_printf(m, "Master Interrupt Control: %08x\n",
- I915_READ(GEN11_GFX_MSTR_IRQ));
-
- seq_printf(m, "Render/Copy Intr Enable: %08x\n",
- I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
- seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
- I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
- seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_ENABLE));
- seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
- seq_printf(m, "Crypto Intr Enable:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
- seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
-
- seq_printf(m, "Display Interrupt Control:\t%08x\n",
- I915_READ(GEN11_DISPLAY_INT_CTL));
-
- gen8_display_interrupt_info(m);
- } else if (INTEL_GEN(dev_priv) >= 8) {
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- gen8_display_interrupt_info(m);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- seq_printf(m, "Master IER:\t%08x\n",
- I915_READ(VLV_MASTER_IER));
-
- seq_printf(m, "Render IER:\t%08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Render IIR:\t%08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Render IMR:\t%08x\n",
- I915_READ(GTIMR));
-
- seq_printf(m, "PM IER:\t\t%08x\n",
- I915_READ(GEN6_PMIER));
- seq_printf(m, "PM IIR:\t\t%08x\n",
- I915_READ(GEN6_PMIIR));
- seq_printf(m, "PM IMR:\t\t%08x\n",
- I915_READ(GEN6_PMIMR));
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- } else if (!HAS_PCH_SPLIT(dev_priv)) {
- seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(GEN2_IER));
- seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(GEN2_IIR));
- seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(GEN2_IMR));
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "Pipe %c stat: %08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- } else {
- seq_printf(m, "North Display Interrupt enable: %08x\n",
- I915_READ(DEIER));
- seq_printf(m, "North Display Interrupt identity: %08x\n",
- I915_READ(DEIIR));
- seq_printf(m, "North Display Interrupt mask: %08x\n",
- I915_READ(DEIMR));
- seq_printf(m, "South Display Interrupt enable: %08x\n",
- I915_READ(SDEIER));
- seq_printf(m, "South Display Interrupt identity: %08x\n",
- I915_READ(SDEIIR));
- seq_printf(m, "South Display Interrupt mask: %08x\n",
- I915_READ(SDEIMR));
- seq_printf(m, "Graphics Interrupt enable: %08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Graphics Interrupt identity: %08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Graphics Interrupt mask: %08x\n",
- I915_READ(GTIMR));
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- seq_printf(m, "RCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
- seq_printf(m, "BCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_BCS_RSVD_INTR_MASK));
- seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
- seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
- seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
- seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_MASK));
- seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
- seq_printf(m, "Crypto Intr Mask:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
- seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
-
- } else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_uabi_engine(engine, dev_priv) {
- seq_printf(m,
- "Graphics Interrupt mask (%s): %08x\n",
- engine->name, ENGINE_READ(engine, RING_IMR));
- }
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- unsigned int i;
-
- seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
-
- rcu_read_lock();
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- struct i915_vma *vma = reg->vma;
-
- seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, atomic_read(&reg->pin_count));
- if (!vma)
- seq_puts(m, "unused");
- else
- i915_debugfs_describe_obj(m, vma->obj);
- seq_putc(m, '\n');
- }
- rcu_read_unlock();
return 0;
}
@@ -802,7 +359,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -847,19 +404,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
- rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
} else {
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
}
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
@@ -871,24 +428,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
reqf = intel_gpu_freq(rps, reqf);
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
- rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
- rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
- pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
* The equivalent to the PM ISR & IIR cannot be read
* without affecting the current state of the system
@@ -896,17 +453,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_isr = 0;
pm_iir = 0;
} else if (INTEL_GEN(dev_priv) >= 8) {
- pm_ier = I915_READ(GEN8_GT_IER(2));
- pm_imr = I915_READ(GEN8_GT_IMR(2));
- pm_isr = I915_READ(GEN8_GT_ISR(2));
- pm_iir = I915_READ(GEN8_GT_IIR(2));
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
} else {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
}
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -936,27 +493,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpupei,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldun)\n",
rpcurup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpdownei,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpprevdown));
@@ -1011,111 +568,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
- unsigned int max_gpu_freq, min_gpu_freq;
- intel_wakeref_t wakeref;
- int gpu_freq, ia_freq;
-
- if (!HAS_LLC(dev_priv))
- return -ENODEV;
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
- ia_freq = gpu_freq;
- sandybridge_pcode_read(dev_priv,
- GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq, NULL);
- seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(rps,
- (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
- ((ia_freq >> 0) & 0xff) * 100,
- ((ia_freq >> 8) & 0xff) * 100);
- }
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
-{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
- ring->space, ring->head, ring->tail, ring->emit);
-}
-
-static int i915_context_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- seq_puts(m, "HW context ");
- if (ctx->pid) {
- struct task_struct *task;
-
- task = get_pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- seq_printf(m, "(%s [%d]) ",
- task->comm, task->pid);
- put_task_struct(task);
- }
- } else if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else {
- seq_puts(m, "(kernel) ");
- }
-
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, '\n');
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- i915_debugfs_describe_obj(m, ce->state->obj);
- describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- seq_putc(m, '\n');
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -1193,20 +645,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
-static const char *rps_power_to_str(unsigned int power)
-{
- static const char * const strings[] = {
- [LOW_POWER] = "low power",
- [BETWEEN] = "mixed",
- [HIGH_POWER] = "high power",
- };
-
- if (power >= ARRAY_SIZE(strings) || !strings[power])
- return "unknown";
-
- return strings[power];
-}
-
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1231,42 +669,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
-
- if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
- u32 rpup, rpupei;
- u32 rpdown, rpdownei;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
- rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
- rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
- rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power.mode));
- seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
- rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->power.up_threshold);
- seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
- rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->power.down_threshold);
- } else {
- seq_puts(m, "\nRPS Autotuning inactive\n");
- }
-
- return 0;
-}
-
-static int i915_llc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const bool edram = INTEL_GEN(dev_priv) > 8;
-
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
- dev_priv->edram_size_mb);
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
return 0;
}
@@ -1280,7 +683,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- enableddisabled(!dev_priv->power_domains.wakeref));
+ enableddisabled(!dev_priv->power_domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
@@ -1306,34 +709,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- seq_printf(m, "GT awake? %s [%d]\n",
- yesno(dev_priv->gt.awake),
- atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u Hz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
+ seq_printf(m, "GT awake? %s [%d], %llums\n",
+ yesno(i915->gt.awake),
+ atomic_read(&i915->gt.wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ i915->gt.clock_frequency,
+ i915->gt.clock_period_ns);
p = drm_seq_file_printer(m);
- for_each_uabi_engine(engine, dev_priv)
+ for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
- return 0;
-}
-
-static int i915_shrinker_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
-
- seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
- seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -1411,7 +808,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1529,55 +926,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
i915_drop_caches_get, i915_drop_caches_set,
"0x%08llx\n");
-static int
-i915_cache_sharing_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- u32 snpcr = 0;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
-
- return 0;
-}
-
-static int
-i915_cache_sharing_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- if (val > 3)
- return -EINVAL;
-
- drm_dbg(&dev_priv->drm,
- "Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 snpcr;
-
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
- i915_cache_sharing_get, i915_cache_sharing_set,
- "%llu\n");
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1621,16 +969,10 @@ static const struct file_operations i915_forcewake_fops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
- {"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_context_status", i915_context_status, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
@@ -1643,7 +985,6 @@ static const struct i915_debugfs_files {
} i915_debugfs_files[] = {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
- {"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..8e9cb44e66e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,14 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
@@ -410,6 +412,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
@@ -516,8 +519,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
-
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
@@ -578,8 +579,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -611,14 +610,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
+
+ intel_pcode_init(dev_priv);
+
/*
- * Fill the dram structure to get the system raw bandwidth and
- * dram info. This will be used for memory latency calculation.
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
*/
intel_dram_detect(dev_priv);
- intel_pcode_init(dev_priv);
-
intel_bw_init_hw(dev_priv);
return 0;
@@ -626,7 +626,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +647,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -738,6 +735,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
* events.
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
+ drm_atomic_helper_shutdown(&dev_priv->drm);
intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
@@ -940,8 +938,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(&i915->drm);
-
intel_gvt_driver_remove(i915);
intel_modeset_driver_remove(i915);
@@ -1052,6 +1048,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1063,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a3ee4f9dc0a..26d69d06aa6d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,9 +79,9 @@
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
+#include "gt/intel_region_lmem.h"
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
@@ -103,7 +103,6 @@
#include "i915_vma.h"
#include "i915_irq.h"
-#include "intel_region_lmem.h"
/* General customization:
*/
@@ -416,6 +415,7 @@ struct intel_fbc {
u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
+ bool psr2_active;
} state_cache;
/*
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
@@ -1125,23 +1122,11 @@ struct drm_i915_private {
* crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
-
- /*
- * Set during HW readout of watermarks/DDB. Some platforms
- * need to know when we're still using BIOS-provided values
- * (which we don't fully trust).
- *
- * FIXME get rid of this.
- */
- bool distrust_bios_wm;
} wm;
struct dram_info {
- bool valid;
- bool is_16gb_dimm;
+ bool wm_lv_0_adjust_needed;
u8 num_channels;
- u8 ranks;
- u32 bandwidth_kbps;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
@@ -1150,6 +1135,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4
} type;
+ u8 num_qgv_points;
} dram_info;
struct intel_bw_info {
@@ -1172,9 +1158,6 @@ struct drm_i915_private {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
-
- struct llist_head free_list;
- struct work_struct free_work;
} contexts;
/*
@@ -1188,6 +1171,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ u8 framestart_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1349,7 +1334,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
@@ -1572,16 +1557,30 @@ enum {
TGL_REVID_D0,
};
-extern const struct i915_rev_steppings tgl_uy_revids[];
-extern const struct i915_rev_steppings tgl_revids[];
+#define TGL_UY_REVIDS_SIZE 4
+#define TGL_REVIDS_SIZE 2
+
+extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return &tgl_uy_revids[INTEL_REVID(dev_priv)];
- else
- return &tgl_revids[INTEL_REVID(dev_priv)];
+ u8 revid = INTEL_REVID(dev_priv);
+ u8 size;
+ const struct i915_rev_steppings *tgl_revid_tbl;
+
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ tgl_revid_tbl = tgl_uy_revids;
+ size = ARRAY_SIZE(tgl_uy_revids);
+ } else {
+ tgl_revid_tbl = tgl_revids;
+ size = ARRAY_SIZE(tgl_revids);
+ }
+
+ revid = min_t(u8, revid, size - 1);
+
+ return &tgl_revid_tbl[revid];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1591,14 +1590,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1649,8 +1648,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
@@ -1752,10 +1749,17 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+static inline bool run_as_guest(void)
+{
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+}
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -1764,7 +1768,7 @@ static inline bool intel_vtd_active(void)
#endif
/* Running as a guest, we assume the host is enforcing VT'd */
- return !hypervisor_is_type(X86_HYPER_NATIVE);
+ return run_as_guest();
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1796,8 +1800,6 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-int i915_gem_freeze(struct drm_i915_private *dev_priv);
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
@@ -1970,43 +1972,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-#define __I915_REG_OP(op__, dev_priv__, ...) \
- intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-
-#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-
-#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-
-/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections, such as inside IRQ handlers, where forcewake is explicitly
- * controlled.
- *
- * Think twice, and think again, before using these.
- *
- * As an example, these accessors can possibly be used between:
- *
- * spin_lock_irq(&dev_priv->uncore.lock);
- * intel_uncore_forcewake_get__locked();
- *
- * and
- *
- * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
- *
- *
- * Note: some registers may not need forcewake held, so
- * intel_uncore_forcewake_{get,put} can be omitted, see
- * intel_uncore_forcewake_for_reg().
- *
- * Certain architectures will die if the same cacheline is concurrently accessed
- * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
- * a more localised lock guarding all access to that bank of registers.
- */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2029,16 +1994,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
-{
- return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
- 1000000000);
-}
-
-static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
-{
- return div_u64(val * 1000000000,
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58276694c848..aa4490934469 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,108 +180,6 @@ try_again:
}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
-{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
- int ret;
-
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
- if (size == 0)
- return -EINVAL;
-
- /* For most of the ABI (e.g. mmap) we think in system pages */
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- *handle_p = handle;
- *size_p = size;
- return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- enum intel_memory_type mem_type;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- u32 format;
-
- switch (cpp) {
- case 1:
- format = DRM_FORMAT_C8;
- break;
- case 2:
- format = DRM_FORMAT_RGB565;
- break;
- case 4:
- format = DRM_FORMAT_XRGB8888;
- break;
- default:
- return -EINVAL;
- }
-
- /* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * cpp, 64);
-
- /* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
- DRM_FORMAT_MOD_LINEAR))
- args->pitch = ALIGN(args->pitch, 4096);
-
- if (args->pitch < args->width)
- return -EINVAL;
-
- args->size = mul_u32_u32(args->pitch, args->height);
-
- mem_type = INTEL_MEMORY_SYSTEM;
- if (HAS_LMEM(to_i915(dev)))
- mem_type = INTEL_MEMORY_LOCAL;
-
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_create *args = data;
-
- i915_gem_flush_free_objects(i915);
-
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
-}
-
-static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
@@ -1059,14 +957,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
@@ -1207,8 +1105,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- i915_gem_driver_release__contexts(dev_priv);
-
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1249,53 +1145,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
-int i915_gem_freeze(struct drm_i915_private *dev_priv)
-{
- /* Discard all purgeable objects, let userspace recover those as
- * required after resuming.
- */
- i915_gem_shrink_all(dev_priv);
-
- return 0;
-}
-
-int i915_gem_freeze_late(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
-
- /*
- * Called just before we write the hibernation image.
- *
- * We need to update the domain tracking to reflect that the CPU
- * will be accessing all the pages to create and restore from the
- * hibernation, and so upon restoration those pages will be in the
- * CPU domain.
- *
- * To make sure the hibernation image contains the latest state,
- * we update that state just before writing out the image.
- *
- * To try and reduce the hibernation image, we manually shrink
- * the objects as well, see i915_gem_freeze()
- */
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- i915_gem_shrink(i915, -1UL, NULL, ~0);
- i915_gem_drain_freed_objects(i915);
-
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_cpu_domain(obj, true));
- i915_gem_object_unlock(obj);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return 0;
-}
-
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a4cad3f154ca..e622aee6e4be 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -38,11 +38,18 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
+#ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE
+#define __GEM_BUG(cond) BUG()
+#else
+#define __GEM_BUG(cond) \
+ WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond))
+#endif
+
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE_DUMP(); \
- BUG(); \
+ __GEM_BUG(condition); \
} \
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e1a66c8245b8..4d2d59a9942b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
return drm_mm_scan_add_block(scan, &vma->node);
}
+static bool defer_evict(struct i915_vma *vma)
+{
+ if (i915_vma_is_active(vma))
+ return true;
+
+ if (i915_vma_is_scanout(vma))
+ return true;
+
+ return false;
+}
+
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
@@ -150,7 +161,7 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
if (!active)
active = vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5ee1567f3d1..3ee2f682eff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct device *kdev = &dev_priv->drm.pdev->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (unlikely(ggtt->do_idle_maps)) {
- /* XXX This does not prevent more requests being submitted! */
- if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
- -MAX_SCHEDULE_TIMEOUT)) {
- drm_err(&dev_priv->drm,
- "Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ /* XXX This does not prevent more requests being submitted! */
+ if (unlikely(ggtt->do_idle_maps))
+ /* Wait a bit, in the hope it avoids the hang */
+ usleep_range(100, 250);
- dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&i915->drm.pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index f96032c60a12..75c3bfc2486e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
+ value = i915->gt.clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881f..f962693404b7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+ const u32 period = m->i915->gt.clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -1051,7 +1051,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma->pages) {
void __iomem *s;
- s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ s = io_mapping_map_wc(&mem->iomap,
+ dma - mem->region.start,
+ PAGE_SIZE);
ret = compress_page(compress,
(void __force *)s, dst,
true);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6cdb052e3850..1a701367a718 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -327,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, bits & ~mask);
- val = I915_READ(PORT_HOTPLUG_EN);
+ val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
- I915_WRITE(PORT_HOTPLUG_EN, val);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
}
/**
@@ -376,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- POSTING_READ(DEIMR);
+ intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
}
}
@@ -401,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = I915_READ(GEN8_DE_PORT_IMR);
+ old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- I915_WRITE(GEN8_DE_PORT_IMR, new_val);
- POSTING_READ(GEN8_DE_PORT_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
}
}
@@ -440,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -455,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -466,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- I915_WRITE(SDEIMR, sdeimr);
- POSTING_READ(SDEIMR);
+ intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
+ intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
@@ -533,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
@@ -556,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
@@ -715,28 +715,18 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
if (!vblank->max_vblank_count)
return 0;
- return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+ return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
}
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_vblank_crtc *vblank =
&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
u32 htotal = mode->crtc_htotal;
u32 clock = mode->crtc_clock;
- u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
/*
* To avoid the race condition where we might cross into the
@@ -763,8 +753,28 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
- scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
scanline = min(scanline, vtotal - 1);
scanline = (scanline + vblank_start) % vtotal;
@@ -883,7 +893,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
if (stime)
*stime = ktime_get();
- if (use_scanline_counter) {
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1004,9 +1027,9 @@ static void ivb_parity_work(struct work_struct *work)
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
i915_reg_t reg;
@@ -1020,13 +1043,13 @@ static void ivb_parity_work(struct work_struct *work)
reg = GEN7_L3CDERRST1(slice);
- error_status = I915_READ(reg);
+ error_status = intel_uncore_read(&dev_priv->uncore, reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
- I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
@@ -1047,7 +1070,7 @@ static void ivb_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
@@ -1062,17 +1085,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
default:
return false;
}
@@ -1096,13 +1114,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
case HPD_PORT_B:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
case HPD_PORT_C:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
case HPD_PORT_D:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1112,17 +1127,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
+ return val & ICP_TC_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1337,7 +1347,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
@@ -1345,11 +1355,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -1358,19 +1368,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
- res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_RED(pipe)),
- I915_READ(PIPE_CRC_RES_GREEN(pipe)),
- I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
res1, res2);
}
@@ -1379,7 +1389,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
+ intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
@@ -1433,7 +1443,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
@@ -1446,8 +1456,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- I915_WRITE(reg, pipe_stats[pipe]);
- I915_WRITE(reg, enable_mask);
+ intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -1530,6 +1540,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
intel_handle_vblank(dev_priv, pipe);
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ flip_done_handler(dev_priv, pipe);
+
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1563,18 +1576,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+ u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
}
drm_WARN_ONCE(&dev_priv->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1623,9 +1636,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
- iir = I915_READ(VLV_IIR);
+ gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
break;
@@ -1645,14 +1658,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
* bits this time around.
*/
- I915_WRITE(VLV_MASTER_IER, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
+ intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1670,10 +1683,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
@@ -1710,8 +1723,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
- iir = I915_READ(VLV_IIR);
+ master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (master_ctl == 0 && iir == 0)
break;
@@ -1731,9 +1744,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
* bits this time around.
*/
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
@@ -1754,10 +1767,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1783,7 +1796,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -1792,7 +1805,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg &= ~mask;
}
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
@@ -1837,7 +1850,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -1856,7 +1869,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
@@ -1874,12 +1887,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
}
}
- I915_WRITE(GEN7_ERR_INT, err_int);
+ intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -1889,7 +1902,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
- I915_WRITE(SERR_INT, serr_int);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -1922,7 +1935,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
@@ -1938,8 +1951,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1950,8 +1963,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
@@ -1976,8 +1989,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -1988,8 +2001,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
- I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
@@ -2009,8 +2022,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2042,6 +2055,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_PIPE_VBLANK(pipe))
intel_handle_vblank(dev_priv, pipe);
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ flip_done_handler(dev_priv, pipe);
+
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2051,7 +2067,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -2059,7 +2075,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
@@ -2079,10 +2095,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
+ intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2092,18 +2108,21 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
intel_handle_vblank(dev_priv, pipe);
+
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ flip_done_handler(dev_priv, pipe);
}
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
}
@@ -2190,8 +2209,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2210,8 +2229,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
@@ -2222,8 +2241,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
@@ -2300,8 +2319,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = I915_READ(iir_reg);
- I915_WRITE(iir_reg, psr_iir);
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
if (psr_iir)
found = true;
@@ -2325,7 +2344,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -2337,7 +2356,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -2346,7 +2365,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -2366,8 +2385,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+}
+
+static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 9)
+ return GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
static irqreturn_t
@@ -2378,9 +2405,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
enum pipe pipe;
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = I915_READ(GEN8_DE_MISC_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
- I915_WRITE(GEN8_DE_MISC_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
@@ -2390,9 +2417,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = I915_READ(GEN11_DE_HPD_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
if (iir) {
- I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
@@ -2402,11 +2429,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = I915_READ(GEN8_DE_PORT_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- I915_WRITE(GEN8_DE_PORT_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
if (iir & gen8_de_port_aux_mask(dev_priv)) {
@@ -2459,7 +2486,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
@@ -2467,12 +2494,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
- if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
+ if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
@@ -2496,9 +2523,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- iir = I915_READ(SDEIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (iir) {
- I915_WRITE(SDEIIR, iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
ret = IRQ_HANDLED;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -2741,7 +2768,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* only when vblank interrupts are actually enabled.
*/
if (dev_priv->vblank_enabled++ == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -2798,16 +2825,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- tmp = I915_READ(DSI_INTR_MASK_REG(port));
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
if (enable)
tmp &= ~DSI_TE_EVENT;
else
tmp |= DSI_TE_EVENT;
- I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
return true;
}
@@ -2835,19 +2862,6 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-void skl_enable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2869,7 +2883,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc)
i8xx_disable_vblank(crtc);
if (--dev_priv->vblank_enabled == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -2912,19 +2926,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void skl_disable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2935,7 +2936,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- I915_WRITE(SERR_INT, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2948,7 +2949,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3011,8 +3012,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(&dev_priv->gt);
@@ -3117,13 +3118,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9)
- extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
-
spin_lock_irq(&dev_priv->irq_lock);
if (!intel_irqs_enabled(dev_priv)) {
@@ -3165,8 +3163,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
gen8_gt_irq_reset(&dev_priv->gt);
@@ -3212,7 +3210,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3221,7 +3219,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
PORTC_PULSE_DURATION_MASK |
PORTD_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3270,20 +3268,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
}
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
@@ -3291,7 +3289,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3302,7 +3300,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3330,12 +3328,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val |= (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
icp_hpd_irq_setup(dev_priv);
}
@@ -3344,7 +3342,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3352,14 +3350,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
}
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3367,7 +3365,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
}
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3378,11 +3376,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
- val = I915_READ(GEN11_DE_HPD_IMR);
+ val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
val |= ~enabled_irqs & hotplug_irqs;
- I915_WRITE(GEN11_DE_HPD_IMR, val);
- POSTING_READ(GEN11_DE_HPD_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
gen11_tc_hpd_detection_setup(dev_priv);
gen11_tbt_hpd_detection_setup(dev_priv);
@@ -3425,25 +3423,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
val |= CHASSIS_CLK_REQ_DURATION(0xf);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
}
/* Enable digital hotplug on the PCH */
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
PORTD_HOTPLUG_ENABLE);
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
- hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
hotplug &= ~PORTE_HOTPLUG_ENABLE;
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
- I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
}
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3451,7 +3449,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
@@ -3482,11 +3480,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3536,7 +3534,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3544,7 +3542,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
BXT_DDIB_HPD_INVERT |
BXT_DDIC_HPD_INVERT);
hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3598,6 +3596,9 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -3605,6 +3606,8 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
DE_DP_A_HOTPLUG);
}
@@ -3664,8 +3667,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3695,11 +3698,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_port_masked |= DSI0_TE | DSI1_TE;
}
- de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
-
- if (INTEL_GEN(dev_priv) >= 9)
- de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
+ de_pipe_enables = de_pipe_masked |
+ GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
if (IS_GEN9_LP(dev_priv))
@@ -3778,14 +3779,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
- POSTING_READ(DG1_MSTR_UNIT_INTR);
+ intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
} else {
gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
}
}
@@ -3798,8 +3799,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
@@ -3889,11 +3890,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = I915_READ(EIR);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
- I915_WRITE(EIR, *eir);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
- *eir_stuck = I915_READ(EIR);
+ *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -3907,9 +3908,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ(EMR);
- I915_WRITE(EMR, 0xffffffff);
- I915_WRITE(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -3975,7 +3976,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
}
i9xx_pipestat_irq_reset(dev_priv);
@@ -3989,7 +3990,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
@@ -4042,7 +4043,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4059,7 +4060,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
@@ -4085,7 +4086,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -4112,7 +4113,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
- I915_WRITE(EMR, error_mask);
+ intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4188,7 +4189,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4204,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 2efe609519ca..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -118,9 +118,6 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void skl_enable_flip_done(struct intel_crtc *crtc);
-void skl_disable_flip_done(struct intel_crtc *crtc);
-
void gen2_irq_reset(struct intel_uncore *uncore);
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644
index 000000000000..84f12598d145
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+ CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+ [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+ return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned long new = ~0UL;
+ char *str, *sep, *tok;
+ bool first = true;
+ int err = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ for (sep = str; (tok = strsep(&sep, ","));) {
+ bool enable = true;
+ int i;
+
+ /* Be tolerant of leading/trailing whitespace */
+ tok = strim(tok);
+
+ if (first) {
+ first = false;
+
+ if (!strcmp(tok, "auto"))
+ continue;
+
+ new = 0;
+ if (!strcmp(tok, "off"))
+ continue;
+ }
+
+ if (*tok == '!') {
+ enable = !enable;
+ tok++;
+ }
+
+ if (!strncmp(tok, "no", 2)) {
+ enable = !enable;
+ tok += 2;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if (!strcmp(tok, names[i])) {
+ if (enable)
+ new |= BIT(i);
+ else
+ new &= ~BIT(i);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(names)) {
+ pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+ DRIVER_NAME, val, tok);
+ err = -EINVAL;
+ break;
+ }
+ }
+ kfree(str);
+ if (err)
+ return err;
+
+ WRITE_ONCE(mitigations, new);
+ return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long local = READ_ONCE(mitigations);
+ int count, i;
+ bool enable;
+
+ if (!local)
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+ if (local & BIT(BITS_PER_LONG - 1)) {
+ count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+ enable = false;
+ } else {
+ enable = true;
+ count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if ((local & BIT(i)) != enable)
+ continue;
+
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
+ "%s%s,", enable ? "" : "!", names[i]);
+ }
+
+ buffer[count - 1] = '\n';
+ return count;
+}
+
+static const struct kernel_param_ops ops = {
+ .set = mitigations_set,
+ .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+" auto -- enables all mitigations required for the platform [default]\n"
+" off -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+" residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644
index 000000000000..1359d8135287
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 43039dc8c607..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -62,7 +62,7 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
- if (GEM_WARN_ON(!r->sgt.pfn))
+ if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f139ea4a90b..6939634e56ed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -185,7 +185,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 330c03e2b4f7..f031966af5b7 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -32,6 +32,7 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+#define ENABLE_GUC_MASK GENMASK(1, 0)
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 11fe790b1969..020b5f561f07 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -455,6 +455,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -513,6 +514,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_reset_engine = true,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
@@ -571,8 +573,7 @@ static const struct intel_device_info hsw_gt3_info = {
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
- .has_64bit_reloc = 1, \
- .has_reset_engine = 1
+ .has_64bit_reloc = 1
#define BDW_PLATFORM \
GEN8_FEATURES, \
@@ -639,7 +640,6 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
@@ -700,7 +700,6 @@ static const struct intel_device_info skl_gt4_info = {
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
- .has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 649c26518d26..112ba5f2ce90 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -198,8 +198,11 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -3516,7 +3520,8 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
+ return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ 2ULL << exponent);
}
/**
@@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit =
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
+ /* Choose a representative limit */
+ oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
mutex_init(&perf->metrics_lock);
- idr_init(&perf->metrics_idr);
+ idr_init_base(&perf->metrics_idr, 1);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d76685ce0399..2b88c0baa1bf 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -26,8 +26,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
-
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
@@ -56,17 +54,42 @@ static bool is_engine_config(u64 config)
return config < __I915_PMU_OTHER(0);
}
-static unsigned int config_enabled_bit(u64 config)
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT + val;
+}
+
+static unsigned int config_bit(const u64 config)
{
if (is_engine_config(config))
return engine_config_sample(config);
else
- return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+ return other_bit(config);
}
-static u64 config_enabled_mask(u64 config)
+static u64 config_mask(u64 config)
{
- return BIT_ULL(config_enabled_bit(config));
+ return BIT_ULL(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -74,15 +97,15 @@ static bool is_engine_event(struct perf_event *event)
return is_engine_config(event->attr.config);
}
-static unsigned int event_enabled_bit(struct perf_event *event)
+static unsigned int event_bit(struct perf_event *event)
{
- return config_enabled_bit(event->attr.config);
+ return config_bit(event->attr.config);
}
static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- u64 enable;
+ u32 enable;
/*
* Only some counters need the sampling timer.
@@ -95,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY) |
ENGINE_SAMPLE_MASK;
/*
@@ -137,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt)
return val;
}
-#if IS_ENABLED(CONFIG_PM)
-
-static inline s64 ktime_since(const ktime_t kt)
+static inline s64 ktime_since_raw(const ktime_t kt)
{
- return ktime_to_ns(ktime_sub(ktime_get(), kt));
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
static u64 get_rc6(struct intel_gt *gt)
@@ -170,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since(pmu->sleep_last);
+ val = ktime_since_raw(pmu->sleep_last);
val += pmu->sample[__I915_SAMPLE_RC6].cur;
}
@@ -184,26 +205,26 @@ static u64 get_rc6(struct intel_gt *gt)
return val;
}
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ intel_wakeref_t wakeref;
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-
- pmu->sleep_last = ktime_get();
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+ pmu->sample[__I915_SAMPLE_RC6].cur;
+ pmu->sleep_last = ktime_get_raw();
+ }
}
-#else
-
-static u64 get_rc6(struct intel_gt *gt)
+static void park_rc6(struct drm_i915_private *i915)
{
- return __get_rc6(gt);
-}
-
-static void park_rc6(struct drm_i915_private *i915) {}
+ struct i915_pmu *pmu = &i915->pmu;
-#endif
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get_raw();
+}
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
@@ -343,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
static bool frequency_sampling_enabled(struct i915_pmu *pmu)
{
return pmu->enable &
- (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY));
}
static void
@@ -362,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
/*
@@ -384,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, val), period_ns / 1000);
}
- if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
@@ -471,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config)
if (!HAS_RC6(i915))
return -ENODEV;
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
default:
return -ENOENT;
}
@@ -578,6 +601,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ break;
}
}
@@ -610,12 +636,14 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
unsigned long flags;
+ unsigned int bit;
+
+ bit = event_bit(event);
+ if (bit == -1)
+ goto update;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -626,13 +654,6 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- if (pmu->enable_count[bit] == 0 &&
- config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
- }
-
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -667,24 +688,26 @@ static void i915_pmu_enable(struct perf_event *event)
spin_unlock_irqrestore(&pmu->lock, flags);
+update:
/*
* Store the current counter value so we can report the correct delta
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
+ unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ if (bit == -1)
+ return;
+
spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
@@ -881,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu)
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+ __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
@@ -1130,6 +1154,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8405d6da5b9a..60f9595f902c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -14,6 +14,21 @@
struct drm_i915_private;
+/**
+ * Non-engine events that we need to track enabled-disabled transition and
+ * current state.
+ */
+enum i915_pmu_tracked_events {
+ __I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0,
+ __I915_PMU_REQUESTED_FREQUENCY_ENABLED,
+ __I915_PMU_RC6_RESIDENCY_ENABLED,
+ __I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
+};
+
+/**
+ * Slots used from the sampling timer (non-engine events) with some extras for
+ * convenience.
+ */
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
@@ -28,8 +43,7 @@ enum {
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
- ((1 << I915_PMU_SAMPLE_BITS) + \
- (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+ (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
@@ -66,18 +80,17 @@ struct i915_pmu {
*/
struct hrtimer timer;
/**
- * @enable: Bitmask of all currently enabled events.
+ * @enable: Bitmask of specific enabled events.
+ *
+ * For some events we need to track their state and do some internal
+ * house keeping.
*
- * Bits are derived from uAPI event numbers in a way that low 16 bits
- * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
- * bit 0), and higher bits correspond to other events (for instance
- * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ * Each engine event sampler type and event listed in enum
+ * i915_pmu_tracked_events gets a bit in this field.
*
- * In other words, low 16 bits are not per engine but per engine
- * sampler type, while the upper bits are directly mapped to other
- * event types.
+ * Low bits are engine samplers and other events continue from there.
*/
- u64 enable;
+ u32 enable;
/**
* @timer_last:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5375b219cc3b..7146cd0f3256 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
-#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
-#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
+#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
/* Make render/texture TLB fetches lower priorty than associated data
@@ -4347,12 +4346,13 @@ enum {
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -6578,6 +6578,7 @@ enum {
#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
+#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
@@ -6614,6 +6615,7 @@ enum {
#define DISPPLANE_ROTATE_180 (1 << 15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */
#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
@@ -6625,6 +6627,7 @@ enum {
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
@@ -7070,6 +7073,8 @@ enum {
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define _PLANE_CC_VAL_1_A 0x701b4
+#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
@@ -7111,6 +7116,13 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
+#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
+#define PLANE_CC_VAL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
@@ -8213,6 +8225,7 @@ enum {
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
+#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
@@ -9872,6 +9885,7 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
+
#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
#define _TRANSA_HDCP2_AUTH 0x66498
#define _TRANSB_HDCP2_AUTH 0x66598
@@ -9911,6 +9925,44 @@ enum skl_power_gate {
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS BIT(31)
+#define STREAM_TYPE_STATUS BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -9960,6 +10012,7 @@ enum skl_power_gate {
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
#define TRANS_DDI_BFI_ENABLE (1 << 4)
#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
@@ -10851,8 +10904,10 @@ enum skl_power_gate {
#define CNL_DRAM_RANK_3 (0x2 << 9)
#define CNL_DRAM_RANK_4 (0x3 << 9)
-/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using I915_WRITE. */
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
@@ -12053,6 +12108,12 @@ enum skl_power_gate {
#define __GEN11_VCS2_MOCS0 0x10000
#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
+#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
+#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
+
+#define GEN9_SCRATCH1 _MMIO(0xb11c)
+#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
+
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5385b081a376..22e39d938f17 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -275,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
bool i915_request_retire(struct i915_request *rq)
{
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
return false;
RQ_TRACE(rq, "\n");
@@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
}
- if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
atomic_dec(&rq->engine->gt->rps.num_waiters);
- }
/*
* We only loosely track inflight requests across preemption,
@@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq)
* after removing the breadcrumb and signaling it, so that we do not
* inadvertently attach the breadcrumb to a completed request.
*/
- remove_from_engine(rq);
+ if (!list_empty(&rq->sched.link))
+ remove_from_engine(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct i915_request *tmp;
RQ_TRACE(rq, "\n");
-
- GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
do {
tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
@@ -488,6 +487,8 @@ void __i915_request_skip(struct i915_request *rq)
if (rq->infix == rq->postfix)
return;
+ RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -513,6 +514,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
} while (!try_cmpxchg(&rq->fence.error, &old, error));
}
+void i915_request_mark_eio(struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -539,12 +551,10 @@ bool __i915_request_submit(struct i915_request *request)
* dropped upon retiring. (Otherwise if resubmit a *retired*
* request, this would be a horrible use-after-free.)
*/
- if (i915_request_completed(request))
- goto xfer;
-
- if (unlikely(intel_context_is_closed(request->context) &&
- !intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(request->context);
+ if (__i915_request_is_complete(request)) {
+ list_del_init(&request->sched.link);
+ goto active;
+ }
if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO);
@@ -579,11 +589,11 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer:
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
- list_move_tail(&request->sched.link, &engine->active.requests);
- clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
- }
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ list_move_tail(&request->sched.link, &engine->active.requests);
+active:
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
/*
* XXX Rollback bonded-execution on __i915_request_unsubmit()?
@@ -643,7 +653,7 @@ void __i915_request_unsubmit(struct i915_request *request)
i915_request_cancel_breadcrumb(request);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request))
+ if (request->sched.semaphores && __i915_request_has_started(request))
request->sched.semaphores = 0;
/*
@@ -855,7 +865,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -961,15 +971,22 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
if (i915_request_started(signal))
return 0;
+ /*
+ * The caller holds a reference on @signal, but we do not serialise
+ * against it being retired and removed from the lists.
+ *
+ * We do not hold a reference to the request before @signal, and
+ * so must be very careful to ensure that it is not _recycled_ as
+ * we follow the link backwards.
+ */
fence = NULL;
rcu_read_lock();
- spin_lock_irq(&signal->lock);
do {
struct list_head *pos = READ_ONCE(signal->link.prev);
struct i915_request *prev;
/* Confirm signal has not been retired, the link is valid */
- if (unlikely(i915_request_started(signal)))
+ if (unlikely(__i915_request_has_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
@@ -994,7 +1011,6 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
fence = &prev->fence;
} while (0);
- spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
@@ -1511,7 +1527,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
*/
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
- if (prev && !i915_request_completed(prev)) {
+ if (prev && !__i915_request_is_complete(prev)) {
/*
* The requests are supposed to be kept in order. However,
* we need to be wary in case the timeline->last_request
@@ -1582,6 +1598,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
return __i915_request_add_to_timeline(rq);
}
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+ i915_sw_fence_commit(&rq->semaphore);
+ i915_sw_fence_commit(&rq->submit);
+}
+
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
@@ -1598,8 +1620,10 @@ void __i915_request_queue(struct i915_request *rq,
*/
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
- i915_sw_fence_commit(&rq->semaphore);
- i915_sw_fence_commit(&rq->submit);
+
+ local_bh_disable();
+ __i915_request_queue_bh(rq);
+ local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
@@ -1823,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
* for unhappy HW.
*/
if (i915_request_is_ready(rq))
- intel_engine_flush_submission(rq->engine);
+ __intel_engine_flush_submission(rq->engine, false);
for (;;) {
set_current_state(state);
@@ -1855,6 +1879,106 @@ out:
return timeout;
}
+static int print_sched_attr(const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return 'E';
+
+ if (i915_request_is_ready(rq))
+ return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+ return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return "!";
+
+ if (__i915_request_has_started(rq))
+ return "*";
+
+ if (!i915_sw_fence_signaled(&rq->semaphore))
+ return "&";
+
+ return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return "+";
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ return "-";
+
+ return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ /*
+ * The prefix is used to show the queue status, for which we use
+ * the following flags:
+ *
+ * U [Unready]
+ * - initial status upon being submitted by the user
+ *
+ * - the request is not ready for execution as it is waiting
+ * for external fences
+ *
+ * R [Ready]
+ * - all fences the request was waiting on have been signaled,
+ * and the request is now ready for execution and will be
+ * in a backend queue
+ *
+ * - a ready request may still need to wait on semaphores
+ * [internal fences]
+ *
+ * V [Ready/virtual]
+ * - same as ready, but queued over multiple backends
+ *
+ * E [Executing]
+ * - the request has been transferred from the backend queue and
+ * submitted for execution on HW
+ *
+ * - a completed request may still be regarded as executing, its
+ * status may not be updated until it is retired and removed
+ * from the lists
+ */
+
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+ prefix, indent, " ",
+ queue_status(rq),
+ rq->fence.context, rq->fence.seqno,
+ run_status(rq),
+ fence_status(rq),
+ buf,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ name);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 620b6fab2c5c..1bfe214a47e9 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,6 +43,7 @@
struct drm_file;
struct drm_i915_gem_object;
+struct drm_printer;
struct i915_request;
struct i915_capture_list {
@@ -308,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
-void i915_request_set_error_once(struct i915_request *rq, int error);
void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+void __i915_request_queue_bh(struct i915_request *rq);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
@@ -371,6 +374,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
static inline bool i915_request_signaled(const struct i915_request *rq)
{
/* The request may live longer than its HWSP, so check flags first! */
@@ -434,7 +442,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
@@ -465,11 +473,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- /* Remember: started but may have since been preempted! */
- return __i915_request_has_started(rq);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ /* Remember: started but may have since been preempted! */
+ result = __i915_request_has_started(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -482,10 +498,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
+ bool result;
+
if (!i915_request_is_active(rq))
return false;
- return __i915_request_has_started(rq);
+ rcu_read_lock();
+ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -509,12 +531,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
return !list_empty(&rq->sched.link);
}
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ result = __i915_request_is_complete(rq);
+ rcu_read_unlock();
+
+ return result;
}
static inline void i915_request_mark_complete(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..7144239f08df 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
- local_bh_disable();
-
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
- local_bh_enable(); /* kick submission tasklet */
-
return 0;
}
@@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node)
spin_unlock_irq(&schedule_lock);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ struct i915_dependency *dep;
+
+ i915_request_show(m, rq, prefix, indent);
+ if (i915_request_completed(rq))
+ return;
+
+ rcu_read_lock();
+ for_each_signaler(dep, rq) {
+ const struct i915_request *signaler =
+ node_to_request(dep->signaler);
+
+ /* Dependencies along the same timeline are expected. */
+ if (signaler->timeline == rq->timeline)
+ continue;
+
+ if (__i915_request_is_complete(signaler))
+ continue;
+
+ i915_request_show(m, signaler, prefix, indent + 2);
+ }
+ rcu_read_unlock();
+}
+
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..4501e5ac2637 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -13,6 +13,8 @@
#include "i915_scheduler_types.h"
+struct drm_printer;
+
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
@@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f72e6c397b08..343ed44d5ed4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -81,4 +81,14 @@ struct i915_dependency {
#define I915_DEPENDENCY_WEAK BIT(2)
};
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
+
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index db2111fc809e..63212df33c9e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,6 +24,7 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "display/intel_de.h"
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
@@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
}
}
@@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
}
@@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
@@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
/* Display arbitration */
if (INTEL_GEN(dev_priv) <= 4)
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 038d4c6884c5..2744558f3050 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,10 +18,15 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
-
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#define WQ_FLAG_BITS \
+ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
+
+/* after WQ_FLAG_* for safety */
+#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
+#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
+
enum {
DEBUG_FENCE_IDLE = 0,
DEBUG_FENCE_NOTIFY,
@@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
if (continuation) {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- if (pos->func == autoremove_wake_function)
- pos->func(pos, TASK_NORMAL, 0, continuation);
- else
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
list_move_tail(&pos->entry, continuation);
+ else
+ pos->func(pos, TASK_NORMAL, 0, continuation);
}
} else {
LIST_HEAD(extra);
@@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
list_for_each_entry_safe(pos, next, &x->head, entry) {
int wake_flags;
- wake_flags = fence->error;
- if (pos->func == autoremove_wake_function)
- wake_flags = 0;
+ wake_flags = 0;
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
+ wake_flags = fence->error;
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
@@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_entry_t *wq, gfp_t gfp)
{
+ unsigned int pending;
unsigned long flags;
- int pending;
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
@@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
- pending = 0;
+ pending = I915_SW_FENCE_FLAG_FENCE;
if (!wq) {
wq = kmalloc(sizeof(*wq), gfp);
if (!wq) {
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016..f9e780dee9de 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
void cancel_timer(struct timer_list *t)
{
- if (!READ_ONCE(t->expires))
+ if (!timer_active(t))
return;
del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd..abd4dcd9f79c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
void cancel_timer(struct timer_list *t);
void set_timer_ms(struct timer_list *t, unsigned long timeout);
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
static inline bool timer_expired(const struct timer_list *t)
{
- return READ_ONCE(t->expires) && !timer_pending(t);
+ return timer_active(t) && !timer_pending(t);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5b3a3c653454..a64adc8c883b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_vma_parked(struct intel_gt *gt);
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+ clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b..f5cb848b7a7e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -249,6 +249,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
+
struct i915_active active;
#define I915_VMA_PAGES_BIAS 24
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e67cec8fa2aa..f2d5ae59081e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u Hz\n",
- info->cs_timestamp_frequency_hz);
-}
-
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
-{
- u32 ts_override = intel_uncore_read(&dev_priv->uncore,
- GEN9_TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000000;
-
- frac_freq = ((ts_override &
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock = (rpm_config_reg &
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 f25_mhz = 25000000;
- u32 f38_4_mhz = 38400000;
- u32 crystal_clock = (rpm_config_reg &
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
-
- if (INTEL_GEN(dev_priv) <= 4) {
- /* PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configurationâ€
- * (“CLKCFGâ€) MCHBAR register)
- */
- return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
- } else if (INTEL_GEN(dev_priv) <= 8) {
- /* PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
- */
- return f12_5_mhz;
- } else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- /* First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
- */
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (INTEL_GEN(dev_priv) <= 10)
- freq = gen10_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
- else
- freq = gen11_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((rpm_config_reg &
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- }
-
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
}
#undef INTEL_VGA_DEVICE
@@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
- /* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_hz =
- read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_hz) {
- runtime->cs_timestamp_period_ns =
- i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
- drm_dbg(&dev_priv->drm,
- "CS timestamp wraparound in %lldms\n",
- div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
- S32_MAX),
- USEC_PER_SEC));
- }
-
if (!HAS_DISPLAY(dev_priv)) {
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d92fa041c700..cf2d528c6e9b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -123,7 +123,6 @@ enum intel_ppgtt_type {
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_logical_ring_preemption); \
func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
@@ -224,9 +223,6 @@ struct intel_runtime_info {
u8 num_scalers[I915_MAX_PIPES];
u32 rawclk_freq;
-
- u32 cs_timestamp_frequency_hz;
- u32 cs_timestamp_period_ns;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 4754296a250e..73d256fc6830 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
+#include "intel_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -201,22 +202,12 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
return -EINVAL;
}
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
@@ -269,16 +260,12 @@ skl_get_dram_info(struct drm_i915_private *i915)
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (dram_info->num_channels * mem_freq_khz == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
}
- dram_info->valid = true;
return 0;
}
@@ -365,7 +352,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
u32 dram_channels;
u32 mem_freq_khz, val;
- u8 num_active_channels;
+ u8 num_active_channels, valid_ranks = 0;
int i;
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
@@ -375,10 +362,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
num_active_channels = hweight32(dram_channels);
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (mem_freq_khz * num_active_channels == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
@@ -410,57 +394,125 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
- if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
return -EINVAL;
}
- dram_info->valid = true;
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
return 0;
}
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+ int ret = skl_get_dram_info(i915);
+
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+ /* Always needed for GEN12+ */
+ i915->dram_info.wm_lv_0_adjust_needed = true;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
/*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
+ * Assume level 0 watermark latency adjustment is needed until proven
+ * otherwise, this w/a is not needed by bxt/glk.
*/
- dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (IS_GEN9_LP(i915))
+ if (INTEL_GEN(i915) >= 12)
+ ret = gen12_get_dram_info(i915);
+ else if (INTEL_GEN(i915) >= 11)
+ ret = gen11_get_dram_info(i915);
+ else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
if (ret)
return;
- drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps, dram_info->num_channels);
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
- drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+ drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+ yesno(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a1026..1bfcdd89b241 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -10,7 +10,7 @@
#define REGION_MAP(type, inst) \
BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
-const u32 intel_region_map[] = {
+static const u32 intel_region_map[] = {
[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 232490d89a83..6ffc0673f005 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -51,21 +51,16 @@ enum intel_region_id {
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
for_each_if((mr) = (i915)->mm.regions[id])
-/**
- * Memory regions encoded as type | instance
- */
-extern const u32 intel_region_map[];
-
struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
void (*release)(struct intel_memory_region *mem);
- struct drm_i915_gem_object *
- (*create_object)(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+ int (*init_object)(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
};
struct intel_memory_region {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index f31c0dabd0cc..ecaf314d60b6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -143,8 +143,9 @@ static bool intel_is_virt_pch(unsigned short id,
sdevice == PCI_SUBDEVICE_ID_QEMU));
}
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+ unsigned short *pch_id, enum intel_pch *pch_type)
{
unsigned short id = 0;
@@ -181,12 +182,21 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
else
drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
- return id;
+ *pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
}
void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
/* DG1 has south engine display on the same PCI device */
if (IS_DG1(dev_priv)) {
@@ -206,9 +216,6 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one.
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
if (pch->vendor != PCI_VENDOR_ID_INTEL)
continue;
@@ -221,14 +228,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
break;
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && pch_type == PCH_NONE))
- id = 0;
-
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
@@ -244,10 +244,15 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
"Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
+ } else if (!pch) {
+ if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+ }
}
- if (!pch)
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
-
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a20b5051f18c..0c3e63f27c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
SKL_DE_COMPRESSED_HASH_MODE);
}
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
* Display WA #0859: skl,bxt,kbl,glk,cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
}
@@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* FIXME:
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
*/
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
@@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
- I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
/*
* WaFbcTurnOffFbcWatermark:bxt
* Display WA #0562: bxt
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcHighMemBwCorruptionAvoidance:bxt
* Display WA #0883: bxt
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
* Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
@@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(CLKCFG);
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
@@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
}
/* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
@@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- POSTING_READ(FW_BLC_SELF_VLV);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- POSTING_READ(FW_BLC_SELF);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3);
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, val);
- POSTING_READ(DSPFW3);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- I915_WRITE(FW_BLC_SELF, val);
- POSTING_READ(FW_BLC_SELF);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- I915_WRITE(INSTPM, val);
- POSTING_READ(INSTPM);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
} else {
return false;
}
@@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = I915_READ(DSPARB2);
- dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc)
wm = intel_calculate_wm(clock, &pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = I915_READ(DSPFW1);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- I915_WRITE(DSPFW1, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
@@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
(wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
FW_WM(wm->sr.fbc, FBC_SR) |
FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
(wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
FW_WM(wm->sr.cursor, CURSOR_SR) |
FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
FW_WM(wm->hpll.plane, HPLL_SR));
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#define FW_WM_VLV(value, plane) \
@@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(VLV_DDL(pipe),
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
(wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
@@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- I915_WRITE(DSPHOWM, 0);
- I915_WRITE(DSPHOWM1, 0);
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
- I915_WRITE(DSPFW7_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPFW8_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- I915_WRITE(DSPFW9_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
@@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- I915_WRITE(DSPFW7,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
@@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#undef FW_WM_VLV
@@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
FW_WM(8, CURSORB) |
FW_WM(8, PLANEB) |
FW_WM(8, PLANEA));
- I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
srwm = 1;
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- I915_WRITE(FW_BLC_SELF,
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
}
drm_dbg_kms(&dev_priv->drm,
@@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
if (enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
drm_dbg_kms(&dev_priv->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- I915_WRITE(FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2930,7 +2930,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.wm_lv_0_adjust_needed)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
previous->wm_lp[2] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
previous->wm_lp[1] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
previous->wm_lp[0] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
_ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(WM_MISC);
+ val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
} else {
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~DISP_DATA_PARTITION_5_6;
else
val |= DISP_DATA_PARTITION_5_6;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
}
if (dirty & WM_DIRTY_FBC) {
- val = I915_READ(DISP_ARB_CTL);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
else
val |= DISP_FBC_WM_DIS;
- I915_WRITE(DISP_ARB_CTL, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
}
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
@@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
u8 enabled_slices_mask = 0;
for (i = 0; i < max_slices; i++) {
- if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
enabled_slices_mask |= BIT(i);
}
@@ -4017,43 +4017,48 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-/*
- * Calculate initial DBuf slice offset, based on slice size
- * and mask(i.e if slice size is 1024 and second slice is enabled
- * offset would be 1024)
- */
-static unsigned int
-icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
- u32 slice_size,
- u32 ddb_size)
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
{
- unsigned int offset = 0;
+ int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- if (!dbuf_slice_mask)
- return 0;
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
- offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
- WARN_ON(offset >= ddb_size);
- return offset;
+ return ddb_size;
}
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+ return intel_dbuf_size(dev_priv) /
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
- if (INTEL_GEN(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(dev_priv);
- return ddb_size;
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
u32 slice_mask = 0;
- u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 ddb_size = intel_dbuf_size(dev_priv);
u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u16 slice_size = ddb_size / num_supported_slices;
u16 start_slice;
@@ -4077,116 +4082,40 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
return slice_mask;
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes);
-
-static int
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- struct skl_ddb_entry *alloc, /* out */
- int *num_active /* out */)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
- const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
- enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(intel_state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(intel_state);
- u8 active_pipes = new_dbuf_state->active_pipes;
- u16 ddb_size;
- u32 ddb_range_size;
- u32 i;
- u32 dbuf_slice_mask;
- u32 offset;
- u32 slice_size;
- u32 total_slice_mask;
- u32 start, end;
- int ret;
-
- *num_active = hweight8(active_pipes);
-
- if (!crtc_state->hw.active) {
- alloc->start = 0;
- alloc->end = 0;
- return 0;
- }
-
- ddb_size = intel_get_ddb_size(dev_priv);
-
- slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
- /*
- * If the state doesn't change the active CRTC's or there is no
- * modeset request, then there's no need to recalculate;
- * the existing pipe allocation limits should remain unchanged.
- * Note that we're safe from racing commits since any racing commit
- * that changes the active CRTC list or do modeset would need to
- * grab _all_ crtc locks, including the one we currently hold.
- */
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
- !dev_priv->wm.distrust_bios_wm) {
- /*
- * alloc may be cleared by clear_intel_crtc_state,
- * copy from old state to be sure
- *
- * FIXME get rid of this mess
- */
- *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
+ if (!crtc_state->hw.active)
return 0;
- }
-
- /*
- * Get allowed DBuf slices for correspondent pipe and platform.
- */
- dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
-
- /*
- * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
- * and slice size is 1024, the offset would be 1024
- */
- offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
- slice_size, ddb_size);
-
- /*
- * Figure out total size of allowed DBuf slices, which is basically
- * a number of allowed slices for that pipe multiplied by slice size.
- * Inside of this
- * range ddb entries are still allocated in proportion to display width.
- */
- ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
/*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- total_slice_mask = dbuf_slice_mask;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- enum pipe pipe = crtc->pipe;
- int hdisplay, vdisplay;
- u32 pipe_dbuf_slice_mask;
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
- if (!crtc_state->hw.active)
- continue;
+ return hdisplay;
+}
- pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
- active_pipes);
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
- /*
- * According to BSpec pipe can share one dbuf slice with another
- * pipes or pipe can use multiple dbufs, in both cases we
- * account for other pipes only if they have exactly same mask.
- * However we need to account how many slices we should enable
- * in total.
- */
- total_slice_mask |= pipe_dbuf_slice_mask;
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ int weight = dbuf_state->weight[pipe];
/*
* Do not account pipes using other slice sets
@@ -4195,42 +4124,78 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* i.e no partial intersection), so it is enough to check for
* equality for now.
*/
- if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
continue;
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
- total_width_in_range += hdisplay;
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
- if (pipe < for_pipe)
- width_before_pipe_in_range += hdisplay;
- else if (pipe == for_pipe)
- pipe_width = hdisplay;
+ if (new_dbuf_state->weight[pipe] == 0) {
+ new_dbuf_state->ddb[pipe].start = 0;
+ new_dbuf_state->ddb[pipe].end = 0;
+ goto out;
}
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
+ skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
+
+out:
+ if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
- end = ddb_range_size *
- (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- alloc->start = offset + start;
- alloc->end = offset + end;
+ crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
- for_crtc->base.id, for_crtc->name,
- dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
return 0;
}
@@ -4300,12 +4265,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = I915_READ(CUR_BUF_CFG(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
if (val & PLANE_CTL_ENABLE)
@@ -4314,11 +4279,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ALPHA_MASK);
if (INTEL_GEN(dev_priv) >= 11) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
if (fourcc &&
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
@@ -4632,10 +4597,8 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -4798,55 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
- int num_active;
u32 blocks;
int level;
- int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (!crtc_state->hw.active) {
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- /*
- * FIXME hack to make sure we compute this sensibly when
- * turning off all the pipes. Otherwise we leave it at
- * whatever we had previously, and then runtime PM will
- * mess it up by turning off all but S1. Remove this
- * once the dbuf state computation flow becomes sane.
- */
- if (new_dbuf_state->active_pipes == 0) {
- new_dbuf_state->enabled_slices = BIT(DBUF_S1);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
- }
-
- alloc->start = alloc->end = 0;
+ if (!crtc_state->hw.active)
return 0;
- }
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
@@ -4855,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
total_data_rate =
skl_get_total_relative_data_rate(state, crtc);
- ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
- total_data_rate,
- alloc, &num_active);
- if (ret)
- return ret;
-
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5731,6 +5663,18 @@ static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
@@ -5775,39 +5719,114 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return 0;
}
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(dev_priv, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
- const struct intel_dbuf_state *new_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ }
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
}
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
drm_dbg_kms(&dev_priv->drm,
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -5944,83 +5963,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_affected_pipes(struct intel_atomic_state *state,
- u8 pipe_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- if ((pipe_mask & BIT(crtc->pipe)) == 0)
- continue;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i, ret;
-
- if (dev_priv->wm.distrust_bios_wm) {
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them, and it really
- * wants to recompute things when distrust_bios_wm is set
- * so we add all the pipes to the state.
- */
- ret = intel_add_affected_pipes(state, ~0);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_dbuf_state *new_dbuf_state;
- const struct intel_dbuf_state *old_dbuf_state;
-
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
- break;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them.
- */
- ret = intel_add_affected_pipes(state,
- new_dbuf_state->active_pipes);
- if (ret)
- return ret;
-
- break;
- }
-
- return 0;
-}
-
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6088,15 +6030,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
int ret, i;
- ret = skl_ddb_add_affected_pipes(state);
- if (ret)
- return ret;
-
- /*
- * Calculate WM's for all pipes that are part of this transaction.
- * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified if pipe allocations had to change.
- */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
if (ret)
@@ -6231,9 +6164,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for (level = 0; level <= max_level; level++) {
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, plane_id, level));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
else
- val = I915_READ(CUR_WM(pipe, level));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
@@ -6242,9 +6175,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
wm->sagv_wm0 = wm->wm[0];
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
- val = I915_READ(CUR_WM_TRANS(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
@@ -6255,20 +6188,49 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
- }
- if (dev_priv->active_pipes) {
- /* Fully recompute DDB on first atomic commit */
- dev_priv->wm.distrust_bios_wm = true;
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
+ plane_id, ddb_y, ddb_uv);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
+ }
+
+ dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
}
+
+ dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
@@ -6280,7 +6242,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -6324,13 +6286,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -6338,7 +6300,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
@@ -6352,7 +6314,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
u32 tmp;
for_each_pipe(dev_priv, pipe) {
- tmp = I915_READ(VLV_DDL(pipe));
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -6364,34 +6326,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
- tmp = I915_READ(DSPFW7_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPFW8_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = I915_READ(DSPFW9_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -6403,11 +6365,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = I915_READ(DSPFW7);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -6428,7 +6390,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
g4x_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -6572,7 +6534,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -6719,9 +6681,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
/*
* Don't touch WM1S_LP_EN here.
@@ -6739,25 +6701,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
- hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
- hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
- hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
if (INTEL_GEN(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
/**
@@ -6808,14 +6770,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
if (!HAS_IPC(dev_priv))
return;
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (dev_priv->ipc_enabled)
val |= DISP_IPC_ENABLE;
else
val &= ~DISP_IPC_ENABLE;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
@@ -6850,7 +6812,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
@@ -6858,12 +6820,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
+ intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
- POSTING_READ(DSPSURF(pipe));
+ intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+ intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
}
}
@@ -6879,10 +6841,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(PCH_3DCGDIS0,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
/*
@@ -6892,12 +6854,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
+ (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
/*
@@ -6909,18 +6871,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
}
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
g4x_disable_trickle_feed(dev_priv);
@@ -6938,27 +6900,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(dev_priv, pipe) {
- val = I915_READ(TRANS_CHICKEN2(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
- I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
@@ -6967,7 +6929,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(MCH_SSKPD);
+ tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
@@ -6978,14 +6940,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -7002,7 +6964,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
* WaDisableRCCUnitClockGating:snb
* WaDisableRCPBUnitClockGating:snb
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -7017,14 +6979,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
*
* WaFbcAsynchFlipDisableFbcQueue:snb
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
@@ -7042,23 +7004,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(dev_priv))
- I915_WRITE(SOUTH_DSPCLK_GATE_D,
- I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
- I915_WRITE(TRANS_CHICKEN1(PIPE_A),
- I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -7070,60 +7032,62 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
u32 val;
/* WaTempDisableDOPClkGating:bdw */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- val = I915_READ(GEN8_L3SQCREG1);
+ val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
- I915_WRITE(GEN8_L3SQCREG1, val);
+ intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
* See the definition of L3SQCREG1 in BSpec.
*/
- POSTING_READ(GEN8_L3SQCREG1);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
}
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
- I915_WRITE(ILK_DPFC_CHICKEN,
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* This is not an Wa. Enable to reduce Sampler power */
- I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
- I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
}
-static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl */
- I915_WRITE(ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ /* Wa_1409120013:tgl,rkl,adl_s,dg1 */
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
- /* Wa_14011059788:tgl */
+ /* Wa_14011059788:tgl,rkl,adl_s,dg1 */
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
0, DFR_DISABLE);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ gen12lp_init_clock_gating(dev_priv);
+
/* Wa_1409836686:dg1[a0] */
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
@@ -7133,7 +7097,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -7143,35 +7107,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
/* WaEnableChickenDCPR:cnl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:cnl
* Display WA #0859: cnl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
- val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
- val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
val |= GWUNIT_CLKGATE_DIS;
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
/* WaDisableVFclkgate:cnl */
/* WaVFUnitClockGatingDisable:cnl */
- val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
val |= VFUNIT_CLKGATE_DIS;
- I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7180,21 +7144,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:cfl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:cfl
* Display WA #0562: cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:cfl
* Display WA #0873: cfl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7203,31 +7167,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:kbl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/*
* WaFbcTurnOffFbcWatermark:kbl
* Display WA #0562: kbl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:kbl
* Display WA #0873: kbl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7236,32 +7200,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableDopClockGating:skl */
- I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
~GEN7_DOP_CLOCK_GATE_ENABLE);
/* WAC6entrylatency:skl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:skl
* Display WA #0562: skl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:skl
* Display WA #0873: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -7270,42 +7234,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* WaSwitchSolVfFArbitrationPriority:bdw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* WaPsrDPAMaskVBlankInSRD:bdw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(CHICKEN_PIPESL_1(pipe),
- I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
/* WaKVMNotificationOnConfigChange:bdw */
- I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(dev_priv);
@@ -7315,24 +7279,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
* Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
* clock gating.
*/
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* This is required by WaCatErrorRejectionIssue:hsw */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaSwitchSolVfFArbitrationPriority:hsw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
lpt_init_clock_gating(dev_priv);
}
@@ -7341,26 +7305,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 snpcr;
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
/* WaDisableBackToBackFlipFix:ivb */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
if (IS_IVB_GT1(dev_priv))
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
@@ -7368,20 +7332,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
g4x_disable_trickle_feed(dev_priv);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev_priv);
@@ -7392,58 +7356,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableBackToBackFlipFix:vlv */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisableDopClockGating:vlv */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* WaDisableL3Bank2xClockGate:vlv
* Disabling L3 clock gating- MMIO 940c[25] = 1
* Set bit 25, to disable L3_BANK_2x_CLK_GATING */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
+ intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+ intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
/* WaDisableSemaphoreAndSyncFlipWait:chv */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSDEUnitClockGating:chv */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
@@ -7458,17 +7422,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate;
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
+ intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7489,49 +7453,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 dstate = I915_READ(D_STATE);
+ u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
+ intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
if (IS_PINEVIEW(dev_priv))
- I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
@@ -7541,13 +7505,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
* abosultely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
- I915_WRITE(SCPD0,
+ intel_uncore_write(&dev_priv->uncore, SCPD0,
_MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
@@ -7583,7 +7547,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
else if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+ dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index eab83e251dd5..97550cf0b6df 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,8 +9,10 @@
#include <linux/types.h>
#include "display/intel_bw.h"
+#include "display/intel_display.h"
#include "display/intel_global_state.h"
+#include "i915_drv.h"
#include "i915_reg.h"
struct drm_device;
@@ -40,7 +42,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -69,6 +70,10 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
struct intel_dbuf_state {
struct intel_global_state base;
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+
u8 enabled_slices;
u8 active_pipes;
};
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 02ebf5a04a9b..0ec0cf191955 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
lockdep_assert_held(&i915->sb_lock);
/*
- * GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
* required when reading/writing.
*/
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1c14a07eba7d..9ac501bcfdad 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ_FW(reg) & mask) == value
+ * (intel_uncore_read_fw(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
* For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
@@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ(reg) & mask) == value
+ * (intel_uncore_read(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bd2467284295..59f0da8f1fbb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
/*
* Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
+ * Must be used with intel_uncore_read_fw() and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
@@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false)
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * uncore->funcs.mmio_writeq.
+ * machine death. For this reason we do not support intel_uncore_write64,
+ * or uncore->funcs.mmio_writeq.
*
* When reading a 64-bit value as two 32-bit values, the delay may cause
* the two reads to mismatch, e.g. a timestamp overflowing. Also note that
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 412e21604a05..dc394fb7ccfa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f88473d396f4..f99bb0113726 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
struct list_head *objects)
{
/* quirk is only for live tiled objects, use it to declare ownership */
- GEM_BUG_ON(obj->mm.quirked);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
list_add(&obj->st_link, objects);
}
@@ -85,7 +85,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
struct i915_vma *vma;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (vma->obj->mm.quirked)
+ if (i915_gem_object_has_tiling_quirk(vma->obj))
i915_vma_unpin(vma);
}
@@ -94,8 +94,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, list, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
i915_gem_object_put(obj);
}
@@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct file *file;
-
- file = mock_file(i915);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- break;
- }
count = 0;
onstack_fence_init(&fence);
do {
+ struct intel_context *ce;
struct i915_request *rq;
- struct i915_gem_context *ctx;
- ctx = live_context(i915, file);
- if (IS_ERR(ctx))
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
break;
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
igt_evict_ctl.fail_if_busy = false;
+ intel_context_put(ce);
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
@@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg)
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
-
- fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c53a222e3dec..c1adea8765a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -28,6 +28,7 @@
#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_random.h"
#include "i915_selftest.h"
@@ -1880,7 +1881,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index debbac660519..e9d86dab8677 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -262,7 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
+ delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e424a6d1a68c..d2a678a2497e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_requests.h"
#include "gt/selftest_engine_heartbeat.h"
@@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a)
static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
{
- u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
}
@@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
- local_bh_disable();
i915_sw_fence_commit(submit);
- local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
@@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
- local_bh_disable();
i915_request_add(rq);
- local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ec0ecb4e4ca6..83f6e5f31fb3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
100) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 0aeba8e3af28..ce7adfa3bca0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr = -1;
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (addr != -1 && sg_dma_address(sg) != addr)
+ return false;
+
+ addr = sg_dma_address(sg) + sg_dma_len(sg);
+ }
+
+ return true;
+}
+
static int igt_mock_contiguous(void *arg)
{
struct intel_memory_region *mem = arg;
@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s min object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s min object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s max object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s max object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects;
}
- if (obj->mm.pages->nents != 1) {
- pr_err("%s object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -337,6 +352,68 @@ out_put:
return err;
}
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ const unsigned int max_segment = i915_sg_segment_size();
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ struct scatterlist *sg;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ size = 0;
+ list_for_each_entry(block, &obj->mm.blocks, link) {
+ if (i915_buddy_block_size(&mem->mm, block) > size)
+ size = i915_buddy_block_size(&mem->mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
}
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ if (t[0] <= 0) {
+ /* ignore the impossible to protect our sanity */
+ pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ t[0], t[4]);
+ continue;
+ }
+
pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
__func__,
- src_mr->name,
- repr_type(src_type),
- dst_mr->name,
- repr_type(dst_type),
- tests[i].name,
- size >> 10,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
div64_u64(mul_u32_u32(4 * size,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
@@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e946bd2087d8..0188f877cab2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
- i915_gem_driver_release__contexts(i915);
-
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 979d96f27c43..3c6021415274 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,21 +15,16 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-static struct drm_i915_gem_object *
-mock_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int mock_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
if (size > mem->mm.size)
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
+ return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
@@ -40,13 +35,13 @@ mock_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_memory_region_init_buddy,
.release = intel_memory_region_release_buddy,
- .create_object = mock_object_create,
+ .init_object = mock_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 6231048aa5aa..b5fa0e45a839 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -28,6 +28,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
+ depends on COMMON_CLK
select DRM_PANEL
help
Choose this to enable the internal LVDS Display Bridge (LDB)
@@ -36,7 +37,7 @@ config DRM_IMX_LDB
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
- depends on DRM_IMX
+ depends on DRM_IMX && OF
help
Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index d07b39b8afd2..87428fb23d9f 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -15,23 +15,32 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+ struct drm_encoder encoder;
+ struct imx_hdmi *hdmi;
+};
+
struct imx_hdmi {
struct device *dev;
- struct drm_encoder encoder;
+ struct drm_bridge *bridge;
struct dw_hdmi *hdmi;
struct regmap *regmap;
};
static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
{
- return container_of(e, struct imx_hdmi, encoder);
+ return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
}
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
@@ -98,19 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = {
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
-static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
-{
- struct device_node *np = hdmi->dev->of_node;
-
- hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
- if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get gpr\n");
- return PTR_ERR(hdmi->regmap);
- }
-
- return 0;
-}
-
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -195,65 +191,36 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct dw_hdmi_plat_data *plat_data;
- const struct of_device_id *match;
struct drm_device *drm = data;
+ struct imx_hdmi_encoder *hdmi_encoder;
struct drm_encoder *encoder;
- struct imx_hdmi *hdmi;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
+ hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder,
+ encoder, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(hdmi_encoder))
+ return PTR_ERR(hdmi_encoder);
- hdmi = dev_get_drvdata(dev);
- memset(hdmi, 0, sizeof(*hdmi));
-
- match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
- plat_data = match->data;
- hdmi->dev = &pdev->dev;
- encoder = &hdmi->encoder;
+ hdmi_encoder->hdmi = dev_get_drvdata(dev);
+ encoder = &hdmi_encoder->encoder;
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
if (ret)
return ret;
- ret = dw_hdmi_imx_parse_dt(hdmi);
- if (ret < 0)
- return ret;
-
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
- /*
- * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
- * which would have called the encoder cleanup. Do it manually.
- */
- if (IS_ERR(hdmi->hdmi)) {
- ret = PTR_ERR(hdmi->hdmi);
- drm_encoder_cleanup(encoder);
- }
-
- return ret;
-}
-
-static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_hdmi *hdmi = dev_get_drvdata(dev);
-
- dw_hdmi_unbind(hdmi->hdmi);
+ return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0);
}
static const struct component_ops dw_hdmi_imx_ops = {
.bind = dw_hdmi_imx_bind,
- .unbind = dw_hdmi_imx_unbind,
};
static int dw_hdmi_imx_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -261,13 +228,33 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, hdmi);
+ hdmi->dev = &pdev->dev;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get gpr\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ hdmi->hdmi = dw_hdmi_probe(pdev, match->data);
+ if (IS_ERR(hdmi->hdmi))
+ return PTR_ERR(hdmi->hdmi);
+
+ hdmi->bridge = of_drm_find_bridge(np);
+ if (!hdmi->bridge) {
+ dev_err(hdmi->dev, "Unable to find bridge\n");
+ return -ENODEV;
+ }
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)
{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &dw_hdmi_imx_ops);
+ dw_hdmi_remove(hdmi->hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 41e2978cb1eb..dbfe39e2f7f6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -22,6 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -47,12 +48,18 @@
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
+struct imx_ldb_channel;
+
+struct imx_ldb_encoder {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct imx_ldb_channel *channel;
+};
+
struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
- struct drm_connector connector;
- struct drm_encoder encoder;
/* Defines what is connected to the ldb, only one at a time */
struct drm_panel *panel;
@@ -70,12 +77,12 @@ struct imx_ldb_channel {
static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
{
- return container_of(c, struct imx_ldb_channel, connector);
+ return container_of(c, struct imx_ldb_encoder, connector)->channel;
}
static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
{
- return container_of(e, struct imx_ldb_channel, encoder);
+ return container_of(e, struct imx_ldb_encoder, encoder)->channel;
}
struct bus_mux {
@@ -411,9 +418,20 @@ static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- struct drm_encoder *encoder = &imx_ldb_ch->encoder;
+ struct imx_ldb_encoder *ldb_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
int ret;
+ ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder,
+ encoder, DRM_MODE_ENCODER_LVDS);
+ if (IS_ERR(ldb_encoder))
+ return PTR_ERR(ldb_encoder);
+
+ ldb_encoder->channel = imx_ldb_ch;
+ connector = &ldb_encoder->connector;
+ encoder = &ldb_encoder->encoder;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
@@ -429,11 +447,9 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
- ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
@@ -445,13 +461,13 @@ static int imx_ldb_register(struct drm_device *drm,
* historical reasons, the ldb driver can also work without
* a panel.
*/
- drm_connector_helper_add(&imx_ldb_ch->connector,
- &imx_ldb_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+ drm_connector_helper_add(connector,
+ &imx_ldb_connector_helper_funcs);
+ drm_connector_init_with_ddc(drm, connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
imx_ldb_ch->ddc);
- drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
@@ -559,17 +575,42 @@ static int imx_ldb_panel_ddc(struct device *dev,
static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+ if (!channel->ldb)
+ break;
+
+ ret = imx_ldb_register(drm, channel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+};
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id =
- of_match_device(imx_ldb_dt_ids, dev);
+ const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
int ret;
int i;
- imx_ldb = dev_get_drvdata(dev);
- memset(imx_ldb, 0, sizeof(*imx_ldb));
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+ if (!imx_ldb)
+ return -ENOMEM;
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
@@ -669,25 +710,20 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
channel->bus_format = bus_format;
channel->child = child;
-
- ret = imx_ldb_register(drm, channel);
- if (ret) {
- channel->child = NULL;
- goto free_child;
- }
}
- return 0;
+ platform_set_drvdata(pdev, imx_ldb);
+
+ return component_add(&pdev->dev, &imx_ldb_ops);
free_child:
of_node_put(child);
return ret;
}
-static void imx_ldb_unbind(struct device *dev, struct device *master,
- void *data)
+static int imx_ldb_remove(struct platform_device *pdev)
{
- struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
int i;
for (i = 0; i < 2; i++) {
@@ -696,28 +732,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
kfree(channel->edid);
i2c_put_adapter(channel->ddc);
}
-}
-
-static const struct component_ops imx_ldb_ops = {
- .bind = imx_ldb_bind,
- .unbind = imx_ldb_unbind,
-};
-
-static int imx_ldb_probe(struct platform_device *pdev)
-{
- struct imx_ldb *imx_ldb;
-
- imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
- if (!imx_ldb)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, imx_ldb);
-
- return component_add(&pdev->dev, &imx_ldb_ops);
-}
-static int imx_ldb_remove(struct platform_device *pdev)
-{
component_del(&pdev->dev, &imx_ldb_ops);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2a8d2e32e7b4..bc8c3f802a15 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -99,9 +100,13 @@ enum {
TVE_MODE_VGA,
};
-struct imx_tve {
+struct imx_tve_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct imx_tve *tve;
+};
+
+struct imx_tve {
struct device *dev;
int mode;
int di_hsync_pin;
@@ -118,12 +123,12 @@ struct imx_tve {
static inline struct imx_tve *con_to_tve(struct drm_connector *c)
{
- return container_of(c, struct imx_tve, connector);
+ return container_of(c, struct imx_tve_encoder, connector)->tve;
}
static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
{
- return container_of(e, struct imx_tve, encoder);
+ return container_of(e, struct imx_tve_encoder, encoder)->tve;
}
static void tve_enable(struct imx_tve *tve)
@@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
init.parent_names = (const char **)&tve_di_parent;
tve->clk_hw_di.init = &init;
- tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di);
+ tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
if (IS_ERR(tve->di_clk)) {
dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
PTR_ERR(tve->di_clk));
@@ -428,33 +433,6 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
return 0;
}
-static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
-{
- int encoder_type;
- int ret;
-
- encoder_type = tve->mode == TVE_MODE_VGA ?
- DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
-
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
- if (ret)
- return ret;
-
- drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
-
- drm_connector_helper_add(&tve->connector,
- &imx_tve_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &tve->connector,
- &imx_tve_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- tve->ddc);
-
- drm_connector_attach_encoder(&tve->connector, &tve->encoder);
-
- return 0;
-}
-
static void imx_tve_disable_regulator(void *data)
{
struct imx_tve *tve = data;
@@ -502,8 +480,49 @@ static int of_get_tve_mode(struct device_node *np)
static int imx_tve_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct imx_tve *tve = dev_get_drvdata(dev);
+ struct imx_tve_encoder *tvee;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int encoder_type;
+ int ret;
+
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
+
+ tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder,
+ encoder_type);
+ if (IS_ERR(tvee))
+ return PTR_ERR(tvee);
+
+ tvee->tve = tve;
+ encoder = &tvee->encoder;
+ connector = &tvee->connector;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+
+ drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, tve->ddc);
+ if (ret)
+ return ret;
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_tve *tve;
@@ -513,8 +532,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
int irq;
int ret;
- tve = dev_get_drvdata(dev);
- memset(tve, 0, sizeof(*tve));
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+ if (!tve)
+ return -ENOMEM;
tve->dev = dev;
@@ -621,28 +641,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = imx_tve_register(drm, tve);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_tve_ops = {
- .bind = imx_tve_bind,
-};
-
-static int imx_tve_probe(struct platform_device *pdev)
-{
- struct imx_tve *tve;
-
- tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
- if (!tve)
- return -ENOMEM;
-
platform_set_drvdata(pdev, tve);
- return component_add(&pdev->dev, &imx_tve_ops);
+ return component_add(dev, &imx_tve_ops);
}
static int imx_tve_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7ebd99ee3240..e6431a227feb 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -163,7 +164,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -322,73 +322,73 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.atomic_enable = ipu_crtc_atomic_enable,
};
-static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_crtc *ipu_crtc = ptr;
+
if (!IS_ERR_OR_NULL(ipu_crtc->dc))
ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
}
-static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata)
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
+ struct ipu_client_platformdata *pdata)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
- if (IS_ERR(ipu_crtc->dc)) {
- ret = PTR_ERR(ipu_crtc->dc);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->dc))
+ return PTR_ERR(ipu_crtc->dc);
+
+ ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+ if (ret)
+ return ret;
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
- if (IS_ERR(ipu_crtc->di)) {
- ret = PTR_ERR(ipu_crtc->di);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->di))
+ return PTR_ERR(ipu_crtc->di);
return 0;
-err_out:
- ipu_put_resources(ipu_crtc);
-
- return ret;
}
-static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata, struct drm_device *drm)
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
- struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- struct drm_crtc *crtc = &ipu_crtc->base;
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct ipu_soc *ipu = dev_get_drvdata(dev->parent);
+ struct drm_device *drm = data;
+ struct ipu_plane *primary_plane;
+ struct ipu_crtc *ipu_crtc;
+ struct drm_crtc *crtc;
int dp = -EINVAL;
int ret;
- ret = ipu_get_resources(ipu_crtc, pdata);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
- ret);
- return ret;
- }
-
if (pdata->dp >= 0)
dp = IPU_DP_FLOW_SYNC_BG;
- ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(ipu_crtc->plane[0])) {
- ret = PTR_ERR(ipu_crtc->plane[0]);
- goto err_put_resources;
- }
+ primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary_plane))
+ return PTR_ERR(primary_plane);
+
+ ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base,
+ &primary_plane->base, NULL,
+ &ipu_crtc_funcs, NULL);
+ if (IS_ERR(ipu_crtc))
+ return PTR_ERR(ipu_crtc);
+ ipu_crtc->dev = dev;
+ ipu_crtc->plane[0] = primary_plane;
+
+ crtc = &ipu_crtc->base;
crtc->port = pdata->of_node;
drm_crtc_helper_add(crtc, &ipu_helper_funcs);
- drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
- &ipu_crtc_funcs, NULL);
- ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
+ ret = ipu_get_resources(drm, ipu_crtc, pdata);
if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
+ dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
ret);
- goto err_put_resources;
+ return ret;
}
/* If this crtc is using the DP, add an overlay plane */
@@ -397,16 +397,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1])) {
+ if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
- } else {
- ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 1 "
- "resources failed with %d.\n", ret);
- goto err_put_plane0_res;
- }
- }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -414,58 +406,21 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane1_res;
+ return ret;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
return 0;
-
-err_put_plane1_res:
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
-err_put_plane0_res:
- ipu_plane_put_resources(ipu_crtc->plane[0]);
-err_put_resources:
- ipu_put_resources(ipu_crtc);
-
- return ret;
-}
-
-static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
-{
- struct ipu_client_platformdata *pdata = dev->platform_data;
- struct drm_device *drm = data;
- struct ipu_crtc *ipu_crtc;
-
- ipu_crtc = dev_get_drvdata(dev);
- memset(ipu_crtc, 0, sizeof(*ipu_crtc));
-
- ipu_crtc->dev = dev;
-
- return ipu_crtc_init(ipu_crtc, pdata, drm);
-}
-
-static void ipu_drm_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
-
- ipu_put_resources(ipu_crtc);
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
.bind = ipu_drm_bind,
- .unbind = ipu_drm_unbind,
};
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ipu_crtc *ipu_crtc;
int ret;
if (!dev->platform_data)
@@ -475,12 +430,6 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
- if (!ipu_crtc)
- return -ENOMEM;
-
- dev_set_drvdata(dev, ipu_crtc);
-
return component_add(dev, &ipu_crtc_ops);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 8a4235d9d9f1..075508051b5f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <video/imx-ipu-v3.h>
@@ -142,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
fb->format->cpp[2] * x - eba;
}
-void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_plane *ipu_plane = ptr;
+
if (!IS_ERR_OR_NULL(ipu_plane->dp))
ipu_dp_put(ipu_plane->dp);
if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
@@ -154,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
ipu_idmac_put(ipu_plane->alpha_ch);
}
-int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+static int ipu_plane_get_resources(struct drm_device *dev,
+ struct ipu_plane *ipu_plane)
{
int ret;
int alpha_ch;
@@ -166,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
return ret;
}
+ ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+ if (ret)
+ return ret;
+
alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
if (alpha_ch >= 0) {
ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
@@ -181,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dmfc)) {
ret = PTR_ERR(ipu_plane->dmfc);
DRM_ERROR("failed to get dmfc: ret %d\n", ret);
- goto err_out;
+ return ret;
}
if (ipu_plane->dp_flow >= 0) {
@@ -189,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dp)) {
ret = PTR_ERR(ipu_plane->dp);
DRM_ERROR("failed to get dp flow: %d\n", ret);
- goto err_out;
+ return ret;
}
}
return 0;
-err_out:
- ipu_plane_put_resources(ipu_plane);
-
- return ret;
}
static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
@@ -262,16 +266,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane)
}
EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
-static void ipu_plane_destroy(struct drm_plane *plane)
-{
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
-}
-
static void ipu_plane_state_reset(struct drm_plane *plane)
{
unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
@@ -336,7 +330,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = ipu_plane_destroy,
.reset = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state = ipu_plane_destroy_state,
@@ -834,10 +827,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
dma, dp, possible_crtcs);
- ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
- if (!ipu_plane) {
- DRM_ERROR("failed to allocate plane\n");
- return ERR_PTR(-ENOMEM);
+ ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+ possible_crtcs, &ipu_plane_funcs,
+ ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats),
+ modifiers, type, NULL);
+ if (IS_ERR(ipu_plane)) {
+ DRM_ERROR("failed to allocate and initialize %s plane\n",
+ zpos ? "overlay" : "primary");
+ return ipu_plane;
}
ipu_plane->ipu = ipu;
@@ -847,22 +845,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
if (ipu_prg_present(ipu))
modifiers = pre_format_modifiers;
- ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
- &ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats),
- modifiers, type, NULL);
- if (ret) {
- DRM_ERROR("failed to initialize plane\n");
- kfree(ipu_plane);
- return ERR_PTR(ret);
- }
-
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
- drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+ ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+ 1);
else
- drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+ ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+ 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ipu_plane_get_resources(dev, ipu_plane);
+ if (ret) {
+ DRM_ERROR("failed to get %s plane resources: %pe\n",
+ zpos ? "overlay" : "primary", &ret);
+ return ERR_PTR(ret);
+ }
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index ffacbcdd2f98..6d544e6ce63f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -41,9 +41,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-int ipu_plane_get_resources(struct ipu_plane *plane);
-void ipu_plane_put_resources(struct ipu_plane *plane);
-
int ipu_plane_irq(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2eb8df4697df..e0412e694fd9 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -22,10 +23,14 @@
#include "imx-drm.h"
-struct imx_parallel_display {
+struct imx_parallel_display_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
struct drm_bridge bridge;
+ struct imx_parallel_display *pd;
+};
+
+struct imx_parallel_display {
struct device *dev;
void *edid;
u32 bus_format;
@@ -37,12 +42,12 @@ struct imx_parallel_display {
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
{
- return container_of(c, struct imx_parallel_display, connector);
+ return container_of(c, struct imx_parallel_display_encoder, connector)->pd;
}
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display, bridge);
+ return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
}
static int imx_pd_connector_get_modes(struct drm_connector *connector)
@@ -74,7 +79,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return ret;
drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num_modes++;
}
@@ -253,12 +258,26 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
-static int imx_pd_register(struct drm_device *drm,
- struct imx_parallel_display *imxpd)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_encoder *encoder = &imxpd->encoder;
+ struct drm_device *drm = data;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
+ struct imx_parallel_display_encoder *imxpd_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
int ret;
+ imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder,
+ encoder, DRM_MODE_ENCODER_NONE);
+ if (IS_ERR(imxpd_encoder))
+ return PTR_ERR(imxpd_encoder);
+
+ imxpd_encoder->pd = imxpd;
+ connector = &imxpd_encoder->connector;
+ encoder = &imxpd_encoder->encoder;
+ bridge = &imxpd_encoder->bridge;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
@@ -268,39 +287,37 @@ static int imx_pd_register(struct drm_device *drm,
* immediately since the current state is ON
* at this point.
*/
- imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
-
- imxpd->bridge.funcs = &imx_pd_bridge_funcs;
- drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+ connector->dpms = DRM_MODE_DPMS_OFF;
- if (!imxpd->next_bridge) {
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector,
- &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- }
+ bridge->funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, bridge, NULL, 0);
if (imxpd->next_bridge) {
- ret = drm_bridge_attach(encoder, imxpd->next_bridge,
- &imxpd->bridge, 0);
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
return ret;
}
} else {
- drm_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_helper_add(connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, connector, &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
}
-static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+};
+
+static int imx_pd_probe(struct platform_device *pdev)
{
- struct drm_device *drm = data;
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
@@ -309,8 +326,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
u32 bus_format = 0;
const char *fmt;
- imxpd = dev_get_drvdata(dev);
- memset(imxpd, 0, sizeof(*imxpd));
+ imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
+ if (!imxpd)
+ return -ENOMEM;
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
@@ -337,28 +355,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->dev = dev;
- ret = imx_pd_register(drm, imxpd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_pd_ops = {
- .bind = imx_pd_bind,
-};
-
-static int imx_pd_probe(struct platform_device *pdev)
-{
- struct imx_parallel_display *imxpd;
-
- imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
-
platform_set_drvdata(pdev, imxpd);
- return component_add(&pdev->dev, &imx_pd_ops);
+ return component_add(dev, &imx_pd_ops);
}
static int imx_pd_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 477d5387e43e..3b57f8be007c 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -4,6 +4,7 @@ config DRM_INGENIC
depends on DRM
depends on CMA
depends on OF
+ depends on COMMON_CLK
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 368bfef8b340..7bb31fbee29d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
@@ -190,15 +191,15 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
{
unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht;
- vpe = mode->vsync_end - mode->vsync_start;
- vds = mode->vtotal - mode->vsync_start;
- vde = vds + mode->vdisplay;
- vt = vde + mode->vsync_start - mode->vdisplay;
+ vpe = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vds = mode->crtc_vtotal - mode->crtc_vsync_start;
+ vde = vds + mode->crtc_vdisplay;
+ vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay;
- hpe = mode->hsync_end - mode->hsync_start;
- hds = mode->htotal - mode->hsync_start;
- hde = hds + mode->hdisplay;
- ht = hde + mode->hsync_start - mode->hdisplay;
+ hpe = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hds = mode->crtc_htotal - mode->crtc_hsync_start;
+ hde = hds + mode->crtc_hdisplay;
+ ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay;
regmap_write(priv->map, JZ_REG_LCD_VSYNC,
0 << JZ_LCD_VSYNC_VPS_OFFSET |
@@ -333,7 +334,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event = crtc_state->event;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
+ ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode);
priv->update_clk_rate = true;
}
@@ -589,7 +590,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_connector *conn = conn_state->connector;
struct drm_display_info *info = &conn->display_info;
- unsigned int cfg;
+ unsigned int cfg, rgbcfg = 0;
priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
@@ -626,6 +627,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
break;
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB;
+ fallthrough;
case MEDIA_BUS_FMT_RGB888_3X8:
cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
break;
@@ -636,6 +640,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
+ regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
}
static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -643,6 +648,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_info *info = &conn_state->connector->display_info;
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
if (info->num_bus_formats != 1)
return -EINVAL;
@@ -651,10 +657,23 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
switch (*info->bus_formats) {
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ /*
+ * The LCD controller expects timing values in dot-clock ticks,
+ * which is 3x the timing values in pixels when using a 3x8-bit
+ * display; but it will count the display area size in pixels
+ * either way. Go figure.
+ */
+ mode->crtc_clock = mode->clock * 3;
+ mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2;
+ mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2;
+ mode->crtc_hdisplay = mode->hdisplay;
+ mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2;
+ return 0;
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
- case MEDIA_BUS_FMT_RGB888_3X8:
return 0;
default:
return -EINVAL;
@@ -755,8 +774,6 @@ static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = {
.enable_vblank = ingenic_drm_enable_vblank,
.disable_vblank = ingenic_drm_disable_vblank,
-
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
@@ -1167,6 +1184,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(&priv->drm);
+}
+
+static int __maybe_unused ingenic_drm_resume(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(&priv->drm);
+}
+
+static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
@@ -1246,6 +1279,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
+ .pm = pm_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 9b48ce02803d..1b4347f7f084 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -31,6 +31,7 @@
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
+#define JZ_REG_LCD_RGBC 0x90
#define JZ_REG_LCD_OSDC 0x100
#define JZ_REG_LCD_OSDCTRL 0x104
#define JZ_REG_LCD_OSDS 0x108
@@ -138,6 +139,19 @@
#define JZ_LCD_STATE_SOF_IRQ BIT(4)
#define JZ_LCD_STATE_DISABLED BIT(0)
+#define JZ_LCD_RGBC_ODD_RGB (0x0 << 4)
+#define JZ_LCD_RGBC_ODD_RBG (0x1 << 4)
+#define JZ_LCD_RGBC_ODD_GRB (0x2 << 4)
+#define JZ_LCD_RGBC_ODD_GBR (0x3 << 4)
+#define JZ_LCD_RGBC_ODD_BRG (0x4 << 4)
+#define JZ_LCD_RGBC_ODD_BGR (0x5 << 4)
+#define JZ_LCD_RGBC_EVEN_RGB (0x0 << 0)
+#define JZ_LCD_RGBC_EVEN_RBG (0x1 << 0)
+#define JZ_LCD_RGBC_EVEN_GRB (0x2 << 0)
+#define JZ_LCD_RGBC_EVEN_GBR (0x3 << 0)
+#define JZ_LCD_RGBC_EVEN_BRG (0x4 << 0)
+#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0)
+
#define JZ_LCD_OSDC_OSDEN BIT(0)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a31a840ce634..f64e06e1067d 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -400,7 +400,7 @@ static void kmb_irq_reset(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
-static struct drm_driver kmb_driver = {
+static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = kmb_isr,
@@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, kmb_of_match);
static int __maybe_unused kmb_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
+ struct kmb_drm_private *kmb = to_kmb(drm);
drm_kms_helper_poll_disable(drm);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 8448d1edb553..be8eea3830c1 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -114,6 +114,9 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
kmb = to_kmb(plane->dev);
+ if (WARN_ON(plane_id >= KMB_MAX_PLANES))
+ return;
+
switch (plane_id) {
case LAYER_0:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 63b4c5643f9c..5cc20b403a25 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev)
int ret;
/* resume GPU if it has been suspended by runtime PM */
- ret = pm_runtime_get_sync(ldev->dev);
+ ret = pm_runtime_resume_and_get(ldev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index a892edec5563..dc54a7a69005 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-mediatek-drm-y := mtk_disp_color.o \
+mediatek-drm-y := mtk_disp_ccorr.o \
+ mtk_disp_color.o \
+ mtk_disp_gamma.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
mtk_drm_crtc.o \
- mtk_drm_ddp.o \
mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
mtk_drm_gem.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
new file mode 100644
index 000000000000..141cb36b9c07
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define CCORR_ENGINE_EN BIT(1)
+#define CCORR_GAMMA_OFF BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP BIT(3)
+#define DISP_CCORR_SIZE 0x0030
+#define DISP_CCORR_COEF_0 0x0080
+#define DISP_CCORR_COEF_1 0x0084
+#define DISP_CCORR_COEF_2 0x0088
+#define DISP_CCORR_COEF_3 0x008C
+#define DISP_CCORR_COEF_4 0x0090
+
+struct mtk_disp_ccorr_data {
+ u32 matrix_bits;
+};
+
+/**
+ * struct mtk_disp_ccorr - DISP_CCORR driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_ccorr {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+ const struct mtk_disp_ccorr_data *data;
+};
+
+int mtk_ccorr_clk_enable(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ccorr->clk);
+}
+
+void mtk_ccorr_clk_disable(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ccorr->clk);
+}
+
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &ccorr->cmdq_reg, ccorr->regs,
+ DISP_CCORR_SIZE);
+ mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, &ccorr->cmdq_reg, ccorr->regs,
+ DISP_CCORR_CFG);
+}
+
+void mtk_ccorr_start(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ writel(CCORR_EN, ccorr->regs + DISP_CCORR_EN);
+}
+
+void mtk_ccorr_stop(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN);
+}
+
+/* Converts a DRM S31.32 value to the HW S1.n format. */
+static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n)
+{
+ u16 r;
+
+ /* Sign bit. */
+ r = in & BIT_ULL(63) ? BIT(n + 1) : 0;
+
+ if ((in & GENMASK_ULL(62, 33)) > 0) {
+ /* identity value 0x100000000 -> 0x400(mt8183), */
+ /* identity value 0x100000000 -> 0x800(mt8192), */
+ /* if bigger this, set it to max 0x7ff. */
+ r |= GENMASK(n, 0);
+ } else {
+ /* take the n+1 most important bits. */
+ r |= (in >> (32 - n)) & GENMASK(n, 0);
+ }
+
+ return r;
+}
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+ struct drm_property_blob *blob = state->ctm;
+ struct drm_color_ctm *ctm;
+ const u64 *input;
+ uint16_t coeffs[9] = { 0 };
+ int i;
+ struct cmdq_pkt *cmdq_pkt = NULL;
+ u32 matrix_bits = ccorr->data->matrix_bits;
+
+ if (!blob)
+ return;
+
+ ctm = (struct drm_color_ctm *)blob->data;
+ input = ctm->matrix;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+ coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits);
+
+ mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0);
+ mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_1);
+ mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_2);
+ mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_3);
+ mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_4);
+}
+
+static int mtk_disp_ccorr_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_ccorr_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_ccorr_component_ops = {
+ .bind = mtk_disp_ccorr_bind,
+ .unbind = mtk_disp_ccorr_unbind,
+};
+
+static int mtk_disp_ccorr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_ccorr *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get ccorr clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap ccorr\n");
+ return PTR_ERR(priv->regs);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_ccorr_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_ccorr_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
+ .matrix_bits = 10,
+};
+
+static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8183-disp-ccorr",
+ .data = &mt8183_ccorr_driver_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
+
+struct platform_driver mtk_disp_ccorr_driver = {
+ .probe = mtk_disp_ccorr_probe,
+ .remove = mtk_disp_ccorr_remove,
+ .driver = {
+ .name = "mediatek-disp-ccorr",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_ccorr_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6048cbc9f0ec..63f411ab393b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -36,64 +37,55 @@ struct mtk_disp_color_data {
* @data: platform colour driver data
*/
struct mtk_disp_color {
- struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_color_data *data;
};
-static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+int mtk_color_clk_enable(struct device *dev)
{
- return container_of(comp, struct mtk_disp_color, ddp_comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(color->clk);
}
-static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_color_clk_disable(struct device *dev)
{
- struct mtk_disp_color *color = comp_to_color(comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
- mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
+ clk_disable_unprepare(color->clk);
}
-static void mtk_color_start(struct mtk_ddp_comp *comp)
+void mtk_color_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_color *color = comp_to_color(comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
- writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
- comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START(color));
+ mtk_ddp_write(cmdq_pkt, w, &color->cmdq_reg, color->regs, DISP_COLOR_WIDTH(color));
+ mtk_ddp_write(cmdq_pkt, h, &color->cmdq_reg, color->regs, DISP_COLOR_HEIGHT(color));
}
-static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
- .config = mtk_color_config,
- .start = mtk_color_start,
-};
+void mtk_color_start(struct device *dev)
+{
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+ writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+ color->regs + DISP_COLOR_CFG_MAIN);
+ writel(0x1, color->regs + DISP_COLOR_START(color));
+}
static int mtk_disp_color_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_color *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
static void mtk_disp_color_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_color *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_color_component_ops = {
@@ -105,31 +97,32 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
- int comp_id;
+ struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get color clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_color_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap color\n");
+ return PTR_ERR(priv->regs);
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_color_component_ops);
@@ -141,8 +134,6 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &mtk_disp_color_component_ops);
-
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
new file mode 100644
index 000000000000..cafd9df2d63b
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef _MTK_DISP_DRV_H_
+#define _MTK_DISP_DRV_H_
+
+#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_drm_plane.h"
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state);
+int mtk_ccorr_clk_enable(struct device *dev);
+void mtk_ccorr_clk_disable(struct device *dev);
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_ccorr_start(struct device *dev);
+void mtk_ccorr_stop(struct device *dev);
+
+void mtk_color_bypass_shadow(struct device *dev);
+int mtk_color_clk_enable(struct device *dev);
+void mtk_color_clk_disable(struct device *dev);
+void mtk_color_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_color_start(struct device *dev);
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+ unsigned int bpc, unsigned int cfg,
+ unsigned int dither_en, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_dpi_start(struct device *dev);
+void mtk_dpi_stop(struct device *dev);
+
+void mtk_dsi_ddp_start(struct device *dev);
+void mtk_dsi_ddp_stop(struct device *dev);
+
+int mtk_gamma_clk_enable(struct device *dev);
+void mtk_gamma_clk_disable(struct device *dev);
+void mtk_gamma_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state);
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state);
+void mtk_gamma_start(struct device *dev);
+void mtk_gamma_stop(struct device *dev);
+
+void mtk_ovl_bgclr_in_on(struct device *dev);
+void mtk_ovl_bgclr_in_off(struct device *dev);
+void mtk_ovl_bypass_shadow(struct device *dev);
+int mtk_ovl_clk_enable(struct device *dev);
+void mtk_ovl_clk_disable(struct device *dev);
+void mtk_ovl_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *mtk_state);
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_ovl_layer_nr(struct device *dev);
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_start(struct device *dev);
+void mtk_ovl_stop(struct device *dev);
+unsigned int mtk_ovl_supported_rotations(struct device *dev);
+void mtk_ovl_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+void mtk_ovl_disable_vblank(struct device *dev);
+
+void mtk_rdma_bypass_shadow(struct device *dev);
+int mtk_rdma_clk_enable(struct device *dev);
+void mtk_rdma_clk_disable(struct device *dev);
+void mtk_rdma_config(struct device *dev, unsigned int width,
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_rdma_layer_nr(struct device *dev);
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_rdma_start(struct device *dev);
+void mtk_rdma_stop(struct device *dev);
+void mtk_rdma_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+void mtk_rdma_disable_vblank(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
new file mode 100644
index 000000000000..3ebf91e0ab41
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_GAMMA_EN 0x0000
+#define GAMMA_EN BIT(0)
+#define DISP_GAMMA_CFG 0x0020
+#define GAMMA_LUT_EN BIT(1)
+#define GAMMA_DITHERING BIT(2)
+#define DISP_GAMMA_SIZE 0x0030
+#define DISP_GAMMA_LUT 0x0700
+
+#define LUT_10BIT_MASK 0x03ff
+
+struct mtk_disp_gamma_data {
+ bool has_dither;
+};
+
+/**
+ * struct mtk_disp_gamma - DISP_GAMMA driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_gamma {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+ const struct mtk_disp_gamma_data *data;
+};
+
+int mtk_gamma_clk_enable(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(gamma->clk);
+}
+
+void mtk_gamma_clk_disable(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gamma->clk);
+}
+
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state)
+{
+ unsigned int i, reg;
+ struct drm_color_lut *lut;
+ void __iomem *lut_base;
+ u32 word;
+
+ if (state->gamma_lut) {
+ reg = readl(regs + DISP_GAMMA_CFG);
+ reg = reg | GAMMA_LUT_EN;
+ writel(reg, regs + DISP_GAMMA_CFG);
+ lut_base = regs + DISP_GAMMA_LUT;
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ for (i = 0; i < MTK_LUT_SIZE; i++) {
+ word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+ (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+ ((lut[i].blue >> 6) & LUT_10BIT_MASK);
+ writel(word, (lut_base + i * 4));
+ }
+ }
+}
+
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ mtk_gamma_set_common(gamma->regs, state);
+}
+
+void mtk_gamma_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &gamma->cmdq_reg, gamma->regs,
+ DISP_GAMMA_SIZE);
+ if (gamma->data && gamma->data->has_dither)
+ mtk_dither_set_common(gamma->regs, &gamma->cmdq_reg, bpc,
+ DISP_GAMMA_CFG, GAMMA_DITHERING, cmdq_pkt);
+}
+
+void mtk_gamma_start(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ writel(GAMMA_EN, gamma->regs + DISP_GAMMA_EN);
+}
+
+void mtk_gamma_stop(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, gamma->regs + DISP_GAMMA_EN);
+}
+
+static int mtk_disp_gamma_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_gamma_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_gamma_component_ops = {
+ .bind = mtk_disp_gamma_bind,
+ .unbind = mtk_disp_gamma_unbind,
+};
+
+static int mtk_disp_gamma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_gamma *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get gamma clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap gamma\n");
+ return PTR_ERR(priv->regs);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_gamma_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_gamma_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
+ .has_dither = true,
+};
+
+static const struct of_device_id mtk_disp_gamma_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-gamma",
+ .data = &mt8173_gamma_driver_data},
+ { .compatible = "mediatek,mt8183-disp-gamma"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
+
+struct platform_driver mtk_disp_gamma_driver = {
+ .probe = mtk_disp_gamma_probe,
+ .remove = mtk_disp_gamma_remove,
+ .driver = {
+ .name = "mediatek-disp-gamma",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_gamma_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 74ef6fc0528b..961f87f8d4d1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -23,6 +24,7 @@
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_LAYER_SMI_ID_EN BIT(0)
#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
@@ -61,6 +63,7 @@ struct mtk_disp_ovl_data {
unsigned int gmc_bits;
unsigned int layer_nr;
bool fmt_rgb565_is_0;
+ bool smi_id_en;
};
/**
@@ -70,88 +73,124 @@ struct mtk_disp_ovl_data {
* @data: platform data
*/
struct mtk_disp_ovl {
- struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_ovl_data *data;
+ void (*vblank_cb)(void *data);
+ void *vblank_cb_data;
};
-static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_ovl, ddp_comp);
-}
-
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
- struct mtk_ddp_comp *ovl = &priv->ddp_comp;
/* Clear frame completion interrupt */
- writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+ writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
- if (!priv->crtc)
+ if (!priv->vblank_cb)
return IRQ_NONE;
- mtk_crtc_ddp_irq(priv->crtc, ovl);
+ priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
-static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+void mtk_ovl_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ ovl->vblank_cb = vblank_cb;
+ ovl->vblank_cb_data = vblank_cb_data;
+ writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+ writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+void mtk_ovl_disable_vblank(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ ovl->vblank_cb = NULL;
+ ovl->vblank_cb_data = NULL;
+ writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+int mtk_ovl_clk_enable(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- ovl->crtc = crtc;
- writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
- writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
+ return clk_prepare_enable(ovl->clk);
}
-static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_ovl_clk_disable(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- ovl->crtc = NULL;
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
+ clk_disable_unprepare(ovl->clk);
}
-static void mtk_ovl_start(struct mtk_ddp_comp *comp)
+void mtk_ovl_start(struct device *dev)
{
- writel_relaxed(0x1, comp->regs + DISP_REG_OVL_EN);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ if (ovl->data->smi_id_en) {
+ unsigned int reg;
+
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_LAYER_SMI_ID_EN;
+ writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ }
+ writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
}
-static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
+void mtk_ovl_stop(struct device *dev)
{
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_EN);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
+ if (ovl->data->smi_id_en) {
+ unsigned int reg;
+
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_LAYER_SMI_ID_EN;
+ writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ }
+
}
-static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
if (w != 0 && h != 0)
- mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ROI_SIZE);
- mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
+ mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR);
- mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
- mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
}
-static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_layer_nr(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->layer_nr;
}
-static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_supported_rotations(struct device *dev)
{
return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
}
-static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *mtk_state)
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
{
struct drm_plane_state *state = &mtk_state->base;
unsigned int rotation = 0;
@@ -178,15 +217,15 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
return 0;
}
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
unsigned int gmc_thrshd_l;
unsigned int gmc_thrshd_h;
unsigned int gmc_value;
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, 0x1, comp,
+ mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
gmc_thrshd_l = GMC_THRESHOLD_LOW >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
@@ -198,17 +237,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
mtk_ddp_write(cmdq_pkt, gmc_value,
- comp, DISP_REG_OVL_RDMA_GMC(idx));
- mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
+ mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
}
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
- mtk_ddp_write(cmdq_pkt, 0, comp,
+ mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
}
@@ -248,11 +289,11 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
}
}
-static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -262,12 +303,12 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
if (!pending->enable) {
- mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+ mtk_ovl_layer_off(dev, idx, cmdq_pkt);
return;
}
con = ovl_fmt_convert(ovl, fmt);
- if (state->base.fb->format->has_alpha)
+ if (state->base.fb && state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -280,76 +321,49 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
addr += pending->pitch - 1;
}
- mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_PITCH(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_OFFSET(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- mtk_ovl_layer_on(comp, idx, cmdq_pkt);
+ mtk_ovl_layer_on(dev, idx, cmdq_pkt);
}
-static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_on(struct device *dev)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
- reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg | OVL_BGCLR_SEL_IN;
- writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
-static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_off(struct device *dev)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
- reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg & ~OVL_BGCLR_SEL_IN;
- writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
-static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
- .config = mtk_ovl_config,
- .start = mtk_ovl_start,
- .stop = mtk_ovl_stop,
- .enable_vblank = mtk_ovl_enable_vblank,
- .disable_vblank = mtk_ovl_disable_vblank,
- .supported_rotations = mtk_ovl_supported_rotations,
- .layer_nr = mtk_ovl_layer_nr,
- .layer_check = mtk_ovl_layer_check,
- .layer_config = mtk_ovl_layer_config,
- .bgclr_in_on = mtk_ovl_bgclr_in_on,
- .bgclr_in_off = mtk_ovl_bgclr_in_off,
-};
-
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_ovl_component_ops = {
@@ -361,7 +375,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ovl *priv;
- int comp_id;
+ struct resource *res;
int irq;
int ret;
@@ -373,27 +387,25 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- priv->data = of_device_get_match_data(dev);
-
- comp_id = mtk_ddp_comp_get_id(dev->of_node,
- priv->data->layer_nr == 4 ?
- MTK_DISP_OVL :
- MTK_DISP_OVL_2L);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get ovl clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_ovl_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap ovl\n");
+ return PTR_ERR(priv->regs);
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+ priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -412,8 +424,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
-
return 0;
}
@@ -431,11 +441,29 @@ static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
};
+static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 4,
+ .fmt_rgb565_is_0 = true,
+};
+
+static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 2,
+ .fmt_rgb565_is_0 = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = &mt2701_ovl_driver_data},
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = &mt8173_ovl_driver_data},
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = &mt8183_ovl_driver_data},
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = &mt8183_ovl_2l_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index d46b8ae1d080..728aaadfea8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -61,83 +62,105 @@ struct mtk_disp_rdma_data {
* @data: local driver data
*/
struct mtk_disp_rdma {
- struct mtk_ddp_comp ddp_comp;
- struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_rdma_data *data;
+ void (*vblank_cb)(void *data);
+ void *vblank_cb_data;
+ u32 fifo_size;
};
-static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_rdma, ddp_comp);
-}
-
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
- struct mtk_ddp_comp *rdma = &priv->ddp_comp;
/* Clear frame completion interrupt */
- writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
- if (!priv->crtc)
+ if (!priv->vblank_cb)
return IRQ_NONE;
- mtk_crtc_ddp_irq(priv->crtc, rdma);
+ priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
-static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
+static void rdma_update_bits(struct device *dev, unsigned int reg,
unsigned int mask, unsigned int val)
{
- unsigned int tmp = readl(comp->regs + reg);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+ unsigned int tmp = readl(rdma->regs + reg);
tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, comp->regs + reg);
+ writel(tmp, rdma->regs + reg);
}
-static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+void mtk_rdma_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
- rdma->crtc = crtc;
- rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
+ rdma->vblank_cb = vblank_cb;
+ rdma->vblank_cb_data = vblank_cb_data;
+ rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
-static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_rdma_disable_vblank(struct device *dev)
+{
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+ rdma->vblank_cb = NULL;
+ rdma->vblank_cb_data = NULL;
+ rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+}
+
+int mtk_rdma_clk_enable(struct device *dev)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
- rdma->crtc = NULL;
- rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+ return clk_prepare_enable(rdma->clk);
}
-static void mtk_rdma_start(struct mtk_ddp_comp *comp)
+void mtk_rdma_clk_disable(struct device *dev)
{
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rdma->clk);
+}
+
+void mtk_rdma_start(struct device *dev)
+{
+ rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
RDMA_ENGINE_EN);
}
-static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
+void mtk_rdma_stop(struct device *dev)
{
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
+ rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
}
-static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
- unsigned int height, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_config(struct device *dev, unsigned int width,
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+ u32 rdma_fifo_size;
- mtk_ddp_write_mask(cmdq_pkt, width, comp,
+ mtk_ddp_write_mask(cmdq_pkt, width, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0, 0xfff);
- mtk_ddp_write_mask(cmdq_pkt, height, comp,
+ mtk_ddp_write_mask(cmdq_pkt, height, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
+ if (rdma->fifo_size)
+ rdma_fifo_size = rdma->fifo_size;
+ else
+ rdma_fifo_size = RDMA_FIFO_SIZE(rdma);
+
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
* Keep the FIFO pseudo size reset default of 8 KiB. Set the
@@ -146,9 +169,9 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
*/
threshold = width * height * vrefresh * 4 * 7 / 1000000;
reg = RDMA_FIFO_UNDERFLOW_EN |
- RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
+ RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
- mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
+ mtk_ddp_write(cmdq_pkt, reg, &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -188,16 +211,16 @@ static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
}
}
-static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_rdma_layer_nr(struct device *dev)
{
return 1;
}
-static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -205,53 +228,34 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
- mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, &rdma->cmdq_reg, rdma->regs, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
- mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
- comp, DISP_REG_RDMA_SIZE_CON_0,
+ &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_INT_MTX_SEL);
} else {
- mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ mtk_ddp_write_mask(cmdq_pkt, 0, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
}
- mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
- mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
- mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, &rdma->cmdq_reg, rdma->regs,
+ DISP_RDMA_MEM_START_ADDR);
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, &rdma->cmdq_reg, rdma->regs,
+ DISP_RDMA_MEM_SRC_PITCH);
+ mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, &rdma->cmdq_reg, rdma->regs,
DISP_RDMA_MEM_GMC_SETTING_0);
- mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
}
-static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
- .config = mtk_rdma_config,
- .start = mtk_rdma_start,
- .stop = mtk_rdma_stop,
- .enable_vblank = mtk_rdma_enable_vblank,
- .disable_vblank = mtk_rdma_disable_vblank,
- .layer_nr = mtk_rdma_layer_nr,
- .layer_config = mtk_rdma_layer_config,
-};
-
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
@@ -259,10 +263,6 @@ static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_rdma_component_ops = {
@@ -274,7 +274,7 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_rdma *priv;
- int comp_id;
+ struct resource *res;
int irq;
int ret;
@@ -286,25 +286,37 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get rdma clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_rdma_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap rdma\n");
+ return PTR_ERR(priv->regs);
+ }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
+ ret = of_property_read_u32(dev->of_node,
+ "mediatek,rdma-fifo-size",
+ &priv->fifo_size);
+ if (ret) {
+ dev_err(dev, "Failed to get rdma fifo size\n");
+ return ret;
+ }
}
/* Disable and clear pending interrupts */
- writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
- writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
@@ -339,11 +351,17 @@ static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
.fifo_size = SZ_8K,
};
+static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
+ .fifo_size = 5 * SZ_1K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = &mt2701_rdma_driver_data},
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = &mt8173_rdma_driver_data},
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = &mt8183_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 52f11a63a330..b05f900d9322 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -20,10 +20,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
@@ -62,10 +64,10 @@ enum mtk_dpi_out_color_format {
};
struct mtk_dpi {
- struct mtk_ddp_comp ddp_comp;
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
+ struct drm_connector *connector;
void __iomem *regs;
struct device *dev;
struct clk *engine_clk;
@@ -562,53 +564,50 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.enable = mtk_dpi_bridge_enable,
};
-static void mtk_dpi_start(struct mtk_ddp_comp *comp)
+void mtk_dpi_start(struct device *dev)
{
- struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_on(dpi);
}
-static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
+void mtk_dpi_stop(struct device *dev)
{
- struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_off(dpi);
}
-static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
- .start = mtk_dpi_start,
- .stop = mtk_dpi_stop,
-};
-
static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
int ret;
- ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
- goto err_unregister;
+ return ret;
}
- dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
+ dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
- ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
+ ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
}
+ dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder);
+ if (IS_ERR(dpi->connector)) {
+ dev_err(dev, "Unable to create bridge connector\n");
+ ret = PTR_ERR(dpi->connector);
+ goto err_cleanup;
+ }
+ drm_connector_attach_encoder(dpi->connector, &dpi->encoder);
+
dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
@@ -618,8 +617,6 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
err_cleanup:
drm_encoder_cleanup(&dpi->encoder);
-err_unregister:
- mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
return ret;
}
@@ -627,10 +624,8 @@ static void mtk_dpi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
drm_encoder_cleanup(&dpi->encoder);
- mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
}
static const struct component_ops mtk_dpi_component_ops = {
@@ -691,7 +686,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
struct resource *mem;
- int comp_id;
int ret;
dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
@@ -769,19 +763,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
- }
-
- ret = mtk_ddp_comp_init(dev, dev->of_node, &dpi->ddp_comp, comp_id,
- &mtk_dpi_funcs);
- if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
- }
-
platform_set_drvdata(pdev, dpi);
dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bdd37eadecd5..8b0de90156c6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -19,7 +20,6 @@
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
@@ -55,7 +55,7 @@ struct mtk_drm_crtc {
#endif
struct device *mmsys_dev;
- struct mtk_disp_mutex *mutex;
+ struct mtk_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -107,7 +107,7 @@ static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- mtk_disp_mutex_put(mtk_crtc->mutex);
+ mtk_mutex_put(mtk_crtc->mutex);
drm_crtc_cleanup(crtc);
}
@@ -169,31 +169,13 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
state->pending_config = true;
}
-static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
- mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
-
- return 0;
-}
-
-static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
- mtk_ddp_comp_disable_vblank(comp);
-}
-
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
{
int ret;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
- ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
+ ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
if (ret) {
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
goto err;
@@ -203,7 +185,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
return 0;
err:
while (--i >= 0)
- clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+ mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
return ret;
}
@@ -212,7 +194,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+ mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
}
static
@@ -283,7 +265,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
return ret;
}
- ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
+ ret = mtk_mutex_prepare(mtk_crtc->mutex);
if (ret < 0) {
DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
goto err_pm_runtime_put;
@@ -299,11 +281,11 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
- mtk_disp_mutex_add_comp(mtk_crtc->mutex,
+ mtk_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
- mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
- mtk_disp_mutex_enable(mtk_crtc->mutex);
+ mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_mutex_enable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
@@ -332,7 +314,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
return 0;
err_mutex_unprepare:
- mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+ mtk_mutex_unprepare(mtk_crtc->mutex);
err_pm_runtime_put:
pm_runtime_put(crtc->dev->dev);
return ret;
@@ -351,19 +333,19 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
- mtk_disp_mutex_disable(mtk_crtc->mutex);
+ mtk_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
mtk_crtc_ddp_clk_disable(mtk_crtc);
- mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+ mtk_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
@@ -475,9 +457,9 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->pending_async_planes = true;
if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_mutex_acquire(mtk_crtc->mutex);
mtk_crtc_ddp_config(crtc, NULL);
- mtk_disp_mutex_release(mtk_crtc->mutex);
+ mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
@@ -493,6 +475,40 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
mutex_unlock(&mtk_crtc->hw_lock);
}
+static void mtk_crtc_ddp_irq(void *data)
+{
+ struct drm_crtc *crtc = data;
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
+ if (!priv->data->shadow_register)
+#endif
+ mtk_crtc_ddp_config(crtc, NULL);
+
+ mtk_drm_finish_page_flip(mtk_crtc);
+}
+
+static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base);
+
+ return 0;
+}
+
+static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_disable_vblank(comp);
+}
+
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state)
{
@@ -619,7 +635,6 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
.reset = mtk_drm_crtc_reset,
.atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
.atomic_destroy_state = mtk_drm_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
};
@@ -662,21 +677,6 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
-
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
-#else
- if (!priv->data->shadow_register)
-#endif
- mtk_crtc_ddp_config(crtc, NULL);
-
- mtk_drm_finish_page_flip(mtk_crtc);
-}
-
static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
int comp_idx)
{
@@ -772,7 +772,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
- mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
+ mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
if (IS_ERR(mtk_crtc->mutex)) {
ret = PTR_ERR(mtk_crtc->mutex);
dev_err(dev, "Failed to get mutex: %d\n", ret);
@@ -785,7 +785,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
struct device_node *node;
node = priv->comp_node[comp_id];
- comp = priv->ddp_comp[comp_id];
+ comp = &priv->ddp_comp[comp_id];
if (!comp) {
dev_err(dev, "Component %pOF not initialized\n", node);
ret = -ENODEV;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index a2b4677a451c..45cfd0a032de 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -15,7 +15,6 @@
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
deleted file mode 100644
index 6b691a57be4a..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_DDP_H
-#define MTK_DRM_DDP_H
-
-#include "mtk_drm_ddp_comp.h"
-
-struct regmap;
-struct device;
-struct mtk_disp_mutex;
-
-struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
-int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
-
-#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3064eac1a750..75bc00e17fc4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -9,12 +9,12 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <drm/drm_print.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -35,31 +35,13 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
-#define DISP_CCORR_EN 0x0000
-#define CCORR_EN BIT(0)
-#define DISP_CCORR_CFG 0x0020
-#define CCORR_RELAY_MODE BIT(0)
-#define CCORR_ENGINE_EN BIT(1)
-#define CCORR_GAMMA_OFF BIT(2)
-#define CCORR_WGAMUT_SRC_CLIP BIT(3)
-#define DISP_CCORR_SIZE 0x0030
-#define DISP_CCORR_COEF_0 0x0080
-#define DISP_CCORR_COEF_1 0x0084
-#define DISP_CCORR_COEF_2 0x0088
-#define DISP_CCORR_COEF_3 0x008C
-#define DISP_CCORR_COEF_4 0x0090
-
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
#define DISP_DITHER_CFG 0x0020
#define DITHER_RELAY_MODE BIT(0)
+#define DITHER_ENGINE_EN BIT(1)
#define DISP_DITHER_SIZE 0x0030
-#define DISP_GAMMA_EN 0x0000
-#define DISP_GAMMA_CFG 0x0020
-#define DISP_GAMMA_SIZE 0x0030
-#define DISP_GAMMA_LUT 0x0700
-
#define LUT_10BIT_MASK 0x03ff
#define OD_RELAYMODE BIT(0)
@@ -68,9 +50,6 @@
#define AAL_EN BIT(0)
-#define GAMMA_EN BIT(0)
-#define GAMMA_LUT_EN BIT(1)
-
#define DISP_DITHERING BIT(2)
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
@@ -86,262 +65,233 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+struct mtk_ddp_comp_dev {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+};
+
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset)
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
- cmdq_pkt_write(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value);
+ cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value);
else
#endif
- writel(value, comp->regs + offset);
+ writel(value, regs + offset);
}
void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp,
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
- cmdq_pkt_write(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value);
+ cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value);
else
#endif
- writel_relaxed(value, comp->regs + offset);
+ writel_relaxed(value, regs + offset);
}
-void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
- unsigned int value,
- struct mtk_ddp_comp *comp,
- unsigned int offset,
- unsigned int mask)
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset, unsigned int mask)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt) {
- cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value, mask);
+ cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value, mask);
} else {
#endif
- u32 tmp = readl(comp->regs + offset);
+ u32 tmp = readl(regs + offset);
tmp = (tmp & ~mask) | (value & mask);
- writel(tmp, comp->regs + offset);
+ writel(tmp, regs + offset);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
}
#endif
}
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
+static int mtk_ddp_clk_enable(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(priv->clk);
+}
+
+static void mtk_ddp_clk_disable(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+ unsigned int bpc, unsigned int cfg,
+ unsigned int dither_en, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
- mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
- mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_NEW_BIT_MODE,
- comp, DISP_DITHER_15);
+ cmdq_reg, regs, DISP_DITHER_15);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- comp, DISP_DITHER_16);
- mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
+ cmdq_reg, regs, DISP_DITHER_16);
+ mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
}
}
-static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_set(struct device *dev, unsigned int bpc,
+ unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
- mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
- mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_od_start(struct mtk_ddp_comp *comp)
-{
- writel(1, comp->regs + DISP_OD_EN);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg,
+ DISP_DITHERING, cmdq_pkt);
}
-static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
+static void mtk_od_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
-{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG);
+ mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt);
}
-static void mtk_aal_start(struct mtk_ddp_comp *comp)
+static void mtk_od_start(struct device *dev)
{
- writel(AAL_EN, comp->regs + DISP_AAL_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_aal_stop(struct mtk_ddp_comp *comp)
-{
- writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+ writel(1, priv->regs + DISP_OD_EN);
}
-static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_ufoe_start(struct device *dev)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
- mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
-{
- writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+ writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
-static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+static void mtk_aal_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
}
-/* Converts a DRM S31.32 value to the HW S1.10 format. */
-static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
{
- u16 r;
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- /* Sign bit. */
- r = in & BIT_ULL(63) ? BIT(11) : 0;
+ mtk_gamma_set_common(priv->regs, state);
+}
- if ((in & GENMASK_ULL(62, 33)) > 0) {
- /* identity value 0x100000000 -> 0x400, */
- /* if bigger this, set it to max 0x7ff. */
- r |= GENMASK(10, 0);
- } else {
- /* take the 11 most important bits. */
- r |= (in >> 22) & GENMASK(10, 0);
- }
+static void mtk_aal_start(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- return r;
+ writel(AAL_EN, priv->regs + DISP_AAL_EN);
}
-static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
- struct drm_crtc_state *state)
+static void mtk_aal_stop(struct device *dev)
{
- struct drm_property_blob *blob = state->ctm;
- struct drm_color_ctm *ctm;
- const u64 *input;
- uint16_t coeffs[9] = { 0 };
- int i;
- struct cmdq_pkt *cmdq_pkt = NULL;
-
- if (!blob)
- return;
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- ctm = (struct drm_color_ctm *)blob->data;
- input = ctm->matrix;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++)
- coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
-
- mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
- comp, DISP_CCORR_COEF_0);
- mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
- comp, DISP_CCORR_COEF_1);
- mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
- comp, DISP_CCORR_COEF_2);
- mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
- comp, DISP_CCORR_COEF_3);
- mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
- comp, DISP_CCORR_COEF_4);
+ writel_relaxed(0x0, priv->regs + DISP_AAL_EN);
}
-static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+static void mtk_dither_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
- mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
-}
-
-static void mtk_dither_start(struct mtk_ddp_comp *comp)
-{
- writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_dither_stop(struct mtk_ddp_comp *comp)
-{
- writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG,
+ DITHER_ENGINE_EN, cmdq_pkt);
}
-static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_start(struct device *dev)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
- mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_gamma_start(struct mtk_ddp_comp *comp)
-{
- writel(GAMMA_EN, comp->regs + DISP_GAMMA_EN);
+ writel(DITHER_EN, priv->regs + DISP_DITHER_EN);
}
-static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+static void mtk_dither_stop(struct device *dev)
{
- writel_relaxed(0x0, comp->regs + DISP_GAMMA_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_gamma_set(struct mtk_ddp_comp *comp,
- struct drm_crtc_state *state)
-{
- unsigned int i, reg;
- struct drm_color_lut *lut;
- void __iomem *lut_base;
- u32 word;
-
- if (state->gamma_lut) {
- reg = readl(comp->regs + DISP_GAMMA_CFG);
- reg = reg | GAMMA_LUT_EN;
- writel(reg, comp->regs + DISP_GAMMA_CFG);
- lut_base = comp->regs + DISP_GAMMA_LUT;
- lut = (struct drm_color_lut *)state->gamma_lut->data;
- for (i = 0; i < MTK_LUT_SIZE; i++) {
- word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
- (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
- ((lut[i].blue >> 6) & LUT_10BIT_MASK);
- writel(word, (lut_base + i * 4));
- }
- }
+ writel_relaxed(0x0, priv->regs + DISP_DITHER_EN);
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
- .gamma_set = mtk_gamma_set,
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
+ .gamma_set = mtk_aal_gamma_set,
.config = mtk_aal_config,
.start = mtk_aal_start,
.stop = mtk_aal_stop,
};
static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .clk_enable = mtk_ccorr_clk_enable,
+ .clk_disable = mtk_ccorr_clk_disable,
.config = mtk_ccorr_config,
.start = mtk_ccorr_start,
.stop = mtk_ccorr_stop,
.ctm_set = mtk_ccorr_ctm_set,
};
+static const struct mtk_ddp_comp_funcs ddp_color = {
+ .clk_enable = mtk_color_clk_enable,
+ .clk_disable = mtk_color_clk_disable,
+ .config = mtk_color_config,
+ .start = mtk_color_start,
+};
+
static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.config = mtk_dither_config,
.start = mtk_dither_start,
.stop = mtk_dither_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_dpi = {
+ .start = mtk_dpi_start,
+ .stop = mtk_dpi_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dsi = {
+ .start = mtk_dsi_ddp_start,
+ .stop = mtk_dsi_ddp_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
+ .clk_enable = mtk_gamma_clk_enable,
+ .clk_disable = mtk_gamma_clk_disable,
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
.start = mtk_gamma_start,
@@ -349,11 +299,43 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
};
static const struct mtk_ddp_comp_funcs ddp_od = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.config = mtk_od_config,
.start = mtk_od_start,
};
+static const struct mtk_ddp_comp_funcs ddp_ovl = {
+ .clk_enable = mtk_ovl_clk_enable,
+ .clk_disable = mtk_ovl_clk_disable,
+ .config = mtk_ovl_config,
+ .start = mtk_ovl_start,
+ .stop = mtk_ovl_stop,
+ .enable_vblank = mtk_ovl_enable_vblank,
+ .disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
+ .layer_nr = mtk_ovl_layer_nr,
+ .layer_check = mtk_ovl_layer_check,
+ .layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_rdma = {
+ .clk_enable = mtk_rdma_clk_enable,
+ .clk_disable = mtk_rdma_clk_disable,
+ .config = mtk_rdma_config,
+ .start = mtk_rdma_start,
+ .stop = mtk_rdma_stop,
+ .enable_vblank = mtk_rdma_enable_vblank,
+ .disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
+};
+
static const struct mtk_ddp_comp_funcs ddp_ufoe = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.start = mtk_ufoe_start,
};
@@ -387,36 +369,37 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
- [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
- [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
- [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
- [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
- [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL },
- [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL },
+ [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
+ [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
+ [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
[DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
- [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
- [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
- [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
- [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
+ [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
- [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
- [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
- [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
+ [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
+ [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
+ [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
[DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
[DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+static bool mtk_drm_find_comp_in_ddp(struct device *dev,
const enum mtk_ddp_comp_id *path,
- unsigned int path_len)
+ unsigned int path_len,
+ struct mtk_ddp_comp *ddp_comp)
{
unsigned int i;
@@ -424,7 +407,7 @@ static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
return false;
for (i = 0U; i < path_len; i++)
- if (ddp_comp.id == path[i])
+ if (dev == ddp_comp[path[i]].dev)
return true;
return false;
@@ -446,18 +429,19 @@ int mtk_ddp_comp_get_id(struct device_node *node,
}
unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct mtk_ddp_comp ddp_comp)
+ struct device *dev)
{
struct mtk_drm_private *private = drm->dev_private;
unsigned int ret = 0;
- if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+ if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
+ private->ddp_comp))
ret = BIT(0);
- else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
- private->data->ext_len))
+ else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
+ private->data->ext_len, private->ddp_comp))
ret = BIT(1);
- else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
- private->data->third_len))
+ else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
+ private->data->third_len, private->ddp_comp))
ret = BIT(2);
else
DRM_INFO("Failed to find comp in ddp table\n");
@@ -465,59 +449,15 @@ unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
return ret;
}
-int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
- struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
- const struct mtk_ddp_comp_funcs *funcs)
+static int mtk_ddp_get_larb_dev(struct device_node *node, struct mtk_ddp_comp *comp,
+ struct device *dev)
{
- enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct resource res;
- struct cmdq_client_reg cmdq_reg;
- int ret;
-#endif
-
- if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
- return -EINVAL;
-
- type = mtk_ddp_matches[comp_id].type;
-
- comp->id = comp_id;
- comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
-
- if (comp_id == DDP_COMPONENT_BLS ||
- comp_id == DDP_COMPONENT_DPI0 ||
- comp_id == DDP_COMPONENT_DPI1 ||
- comp_id == DDP_COMPONENT_DSI0 ||
- comp_id == DDP_COMPONENT_DSI1 ||
- comp_id == DDP_COMPONENT_DSI2 ||
- comp_id == DDP_COMPONENT_DSI3 ||
- comp_id == DDP_COMPONENT_PWM0) {
- comp->regs = NULL;
- comp->clk = NULL;
- comp->irq = 0;
- return 0;
- }
-
- comp->regs = of_iomap(node, 0);
- comp->irq = of_irq_get(node, 0);
- comp->clk = of_clk_get(node, 0);
- if (IS_ERR(comp->clk))
- return PTR_ERR(comp->clk);
-
- /* Only DMA capable components need the LARB property */
- comp->larb_dev = NULL;
- if (type != MTK_DISP_OVL &&
- type != MTK_DISP_OVL_2L &&
- type != MTK_DISP_RDMA &&
- type != MTK_DISP_WDMA)
- return 0;
larb_node = of_parse_phandle(node, "mediatek,larb", 0);
if (!larb_node) {
- dev_err(dev,
- "Missing mediadek,larb phandle in %pOF node\n", node);
+ dev_err(dev, "Missing mediadek,larb phandle in %pOF node\n", node);
return -EINVAL;
}
@@ -528,40 +468,71 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
return -EPROBE_DEFER;
}
of_node_put(larb_node);
-
comp->larb_dev = &larb_pdev->dev;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (of_address_to_resource(node, 0, &res) != 0) {
- dev_err(dev, "Missing reg in %s node\n", node->full_name);
- put_device(&larb_pdev->dev);
- return -EINVAL;
- }
- comp->regs_pa = res.start;
-
- ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
- if (ret)
- dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
- else
- comp->subsys = cmdq_reg.subsys;
-#endif
return 0;
}
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp)
+int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+ enum mtk_ddp_comp_id comp_id)
{
- struct mtk_drm_private *private = drm->dev_private;
+ struct platform_device *comp_pdev;
+ enum mtk_ddp_comp_type type;
+ struct mtk_ddp_comp_dev *priv;
+ int ret;
- if (private->ddp_comp[comp->id])
- return -EBUSY;
+ if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
+ return -EINVAL;
- private->ddp_comp[comp->id] = comp;
- return 0;
-}
+ type = mtk_ddp_matches[comp_id].type;
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp)
-{
- struct mtk_drm_private *private = drm->dev_private;
+ comp->id = comp_id;
+ comp->funcs = mtk_ddp_matches[comp_id].funcs;
+ comp_pdev = of_find_device_by_node(node);
+ if (!comp_pdev) {
+ DRM_INFO("Waiting for device %s\n", node->full_name);
+ return -EPROBE_DEFER;
+ }
+ comp->dev = &comp_pdev->dev;
- private->ddp_comp[comp->id] = NULL;
+ /* Only DMA capable components need the LARB property */
+ if (type == MTK_DISP_OVL ||
+ type == MTK_DISP_OVL_2L ||
+ type == MTK_DISP_RDMA ||
+ type == MTK_DISP_WDMA) {
+ ret = mtk_ddp_get_larb_dev(node, comp, comp->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (type == MTK_DISP_BLS ||
+ type == MTK_DISP_CCORR ||
+ type == MTK_DISP_COLOR ||
+ type == MTK_DISP_GAMMA ||
+ type == MTK_DPI ||
+ type == MTK_DSI ||
+ type == MTK_DISP_OVL ||
+ type == MTK_DISP_OVL_2L ||
+ type == MTK_DISP_PWM ||
+ type == MTK_DISP_RDMA)
+ return 0;
+
+ priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = of_iomap(node, 0);
+ priv->clk = of_clk_get(node, 0);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ platform_set_drvdata(comp_pdev, priv);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 5aa52b7afeec..bb914d976cf5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
@@ -39,79 +40,95 @@ enum mtk_ddp_comp_type {
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
- void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
+ int (*clk_enable)(struct device *dev);
+ void (*clk_disable)(struct device *dev);
+ void (*config)(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
- void (*start)(struct mtk_ddp_comp *comp);
- void (*stop)(struct mtk_ddp_comp *comp);
- void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
- void (*disable_vblank)(struct mtk_ddp_comp *comp);
- unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
- unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
- int (*layer_check)(struct mtk_ddp_comp *comp,
+ void (*start)(struct device *dev);
+ void (*stop)(struct device *dev);
+ void (*enable_vblank)(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+ void (*disable_vblank)(struct device *dev);
+ unsigned int (*supported_rotations)(struct device *dev);
+ unsigned int (*layer_nr)(struct device *dev);
+ int (*layer_check)(struct device *dev,
unsigned int idx,
struct mtk_plane_state *state);
- void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
+ void (*layer_config)(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);
- void (*gamma_set)(struct mtk_ddp_comp *comp,
+ void (*gamma_set)(struct device *dev,
struct drm_crtc_state *state);
- void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
- void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
- void (*ctm_set)(struct mtk_ddp_comp *comp,
+ void (*bgclr_in_on)(struct device *dev);
+ void (*bgclr_in_off)(struct device *dev);
+ void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
- struct clk *clk;
- void __iomem *regs;
+ struct device *dev;
int irq;
struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
- resource_size_t regs_pa;
- u8 subsys;
};
+static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->clk_enable)
+ return comp->funcs->clk_enable(comp->dev);
+
+ return 0;
+}
+
+static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->clk_disable)
+ comp->funcs->clk_disable(comp->dev);
+}
+
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
unsigned int vrefresh, unsigned int bpc,
struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
+ comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->start)
- comp->funcs->start(comp);
+ comp->funcs->start(comp->dev);
}
static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->stop)
- comp->funcs->stop(comp);
+ comp->funcs->stop(comp->dev);
}
static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
{
if (comp->funcs && comp->funcs->enable_vblank)
- comp->funcs->enable_vblank(comp, crtc);
+ comp->funcs->enable_vblank(comp->dev, vblank_cb, vblank_cb_data);
}
static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->disable_vblank)
- comp->funcs->disable_vblank(comp);
+ comp->funcs->disable_vblank(comp->dev);
}
static inline
unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->supported_rotations)
- return comp->funcs->supported_rotations(comp);
+ return comp->funcs->supported_rotations(comp->dev);
return 0;
}
@@ -119,7 +136,7 @@ unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->layer_nr)
- return comp->funcs->layer_nr(comp);
+ return comp->funcs->layer_nr(comp->dev);
return 0;
}
@@ -129,7 +146,7 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
struct mtk_plane_state *state)
{
if (comp->funcs && comp->funcs->layer_check)
- return comp->funcs->layer_check(comp, idx, state);
+ return comp->funcs->layer_check(comp->dev, idx, state);
return 0;
}
@@ -139,52 +156,49 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->layer_config)
- comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
+ comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt);
}
static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state)
{
if (comp->funcs && comp->funcs->gamma_set)
- comp->funcs->gamma_set(comp, state);
+ comp->funcs->gamma_set(comp->dev, state);
}
static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->bgclr_in_on)
- comp->funcs->bgclr_in_on(comp);
+ comp->funcs->bgclr_in_on(comp->dev);
}
static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->bgclr_in_off)
- comp->funcs->bgclr_in_off(comp);
+ comp->funcs->bgclr_in_off(comp->dev);
}
static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state)
{
if (comp->funcs && comp->funcs->ctm_set)
- comp->funcs->ctm_set(comp, state);
+ comp->funcs->ctm_set(comp->dev, state);
}
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct mtk_ddp_comp ddp_comp);
-int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
- struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
- const struct mtk_ddp_comp_funcs *funcs);
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+ struct device *dev);
+int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
+ enum mtk_ddp_comp_id comp_id);
enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset);
void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset);
void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset,
- unsigned int mask);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset, unsigned int mask);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2f717df28a77..b013d56d2777 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,7 +10,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
-#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -26,7 +25,6 @@
#include <drm/drm_vblank.h>
#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
@@ -131,6 +129,24 @@ static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -163,6 +179,13 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
};
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .main_path = mt8183_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
+ .ext_path = mt8183_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -377,12 +400,20 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma",
.data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8183-disp-ccorr",
+ .data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
@@ -391,22 +422,32 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8183-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8183-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8173-disp-ufoe",
.data = (void *)MTK_DISP_UFOE },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt2701-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8173-dpi",
.data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8183-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8173-disp-pwm",
@@ -425,6 +466,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
+ { .compatible = "mediatek,mt8183-mmsys",
+ .data = &mt8183_mmsys_driver_data},
{ }
};
@@ -488,11 +531,13 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
- * separate component platform drivers and initialize their own
+ * Currently only the CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
+ * blocks have separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
- if (comp_type == MTK_DISP_COLOR ||
+ if (comp_type == MTK_DISP_CCORR ||
+ comp_type == MTK_DISP_COLOR ||
+ comp_type == MTK_DISP_GAMMA ||
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
@@ -502,24 +547,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
node);
drm_of_component_match_add(dev, &match, compare_of,
node);
- } else {
- struct mtk_ddp_comp *comp;
-
- comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
- if (!comp) {
- ret = -ENOMEM;
- of_node_put(node);
- goto err_node;
- }
-
- ret = mtk_ddp_comp_init(dev->parent, node, comp,
- comp_id, NULL);
- if (ret) {
- of_node_put(node);
- goto err_node;
- }
-
- private->ddp_comp[comp_id] = comp;
+ }
+
+ ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
+ if (ret) {
+ of_node_put(node);
+ goto err_node;
}
}
@@ -545,10 +578,8 @@ err_node:
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]);
- if (private->ddp_comp[i]) {
- put_device(private->ddp_comp[i]->larb_dev);
- private->ddp_comp[i] = NULL;
- }
+ if (private->ddp_comp[i].larb_dev)
+ put_device(private->ddp_comp[i].larb_dev);
}
return ret;
}
@@ -604,8 +635,9 @@ static struct platform_driver mtk_drm_platform_driver = {
};
static struct platform_driver * const mtk_drm_drivers[] = {
- &mtk_ddp_driver,
+ &mtk_disp_ccorr_driver,
&mtk_disp_color_driver,
+ &mtk_disp_gamma_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 5d771cf0bf25..637f5669e895 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -41,13 +41,14 @@ struct mtk_drm_private {
struct device *mutex_dev;
struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
- struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+ struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
struct drm_atomic_state *suspend_state;
};
-extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_ccorr_driver;
extern struct platform_driver mtk_disp_color_driver;
+extern struct platform_driver mtk_disp_gamma_driver;
extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 28a2ee1336ef..280ea0d5e840 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return -ENOMEM;
}
- drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+ drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 65fd99c528af..a1ff152ef468 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -25,6 +25,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_ddp_comp.h"
#define DSI_START 0x00
@@ -178,7 +179,6 @@ struct mtk_dsi_driver_data {
};
struct mtk_dsi {
- struct mtk_ddp_comp ddp_comp;
struct device *dev;
struct mipi_dsi_host host;
struct drm_encoder encoder;
@@ -767,25 +767,20 @@ static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.mode_set = mtk_dsi_bridge_mode_set,
};
-static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_start(struct device *dev)
{
- struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweron(dsi);
}
-static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_stop(struct device *dev)
{
- struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweroff(dsi);
}
-static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
- .start = mtk_dsi_ddp_start,
- .stop = mtk_dsi_ddp_stop,
-};
-
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -952,7 +947,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
return ret;
}
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
@@ -980,32 +975,17 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
- ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- goto err_unregister;
- return 0;
-
-err_unregister:
- mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dsi->encoder);
- mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
static const struct component_ops mtk_dsi_component_ops = {
@@ -1020,7 +1000,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
struct drm_panel *panel;
struct resource *regs;
int irq_num;
- int comp_id;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -1090,20 +1069,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
goto err_unregister_host;
}
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- ret = comp_id;
- goto err_unregister_host;
- }
-
- ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
- &mtk_dsi_funcs);
- if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
- goto err_unregister_host;
- }
-
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
@@ -1111,9 +1076,8 @@ static int mtk_dsi_probe(struct platform_device *pdev)
goto err_unregister_host;
}
- irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
- IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+ IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
goto err_unregister_host;
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 6ccd270789c6..4fd4de16cd32 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
@@ -159,13 +159,13 @@ static struct {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * mga_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index a977c9f49719..4e4c105f9a50 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,10 +47,11 @@ static const struct drm_driver mgag200_driver = {
static bool mgag200_has_sgram(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option;
int ret;
- ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
return false;
@@ -60,6 +61,7 @@ static bool mgag200_has_sgram(struct mga_device *mdev)
static int mgag200_regs_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option, option2;
u8 crtcext3;
@@ -99,13 +101,13 @@ static int mgag200_regs_init(struct mga_device *mdev)
}
if (option)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
if (option2)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
/* BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
+ mdev->rmmio_base = pci_resource_start(pdev, 1);
+ mdev->rmmio_size = pci_resource_len(pdev, 1);
if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
mdev->rmmio_size, "mgadrmfb_mmio")) {
@@ -113,7 +115,7 @@ static int mgag200_regs_init(struct mga_device *mdev)
return -ENOMEM;
}
- mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+ mdev->rmmio = pcim_iomap(pdev, 1, 0);
if (mdev->rmmio == NULL)
return -ENOMEM;
@@ -218,6 +220,7 @@ static void mgag200_g200_interpret_bios(struct mga_device *mdev,
static void mgag200_g200_init_refclk(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned char __iomem *rom;
unsigned char *bios;
size_t size;
@@ -226,7 +229,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
mdev->model.g200.pclk_max = 230000;
mdev->model.g200.ref_clk = 27050;
- rom = pci_map_rom(dev->pdev, &size);
+ rom = pci_map_rom(pdev, &size);
if (!rom)
return;
@@ -244,7 +247,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
vfree(bios);
out:
- pci_unmap_rom(dev->pdev, rom);
+ pci_unmap_rom(pdev, rom);
}
static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
@@ -301,7 +304,6 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
return mdev;
dev = &mdev->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = mgag200_device_init(mdev, flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 09731e614e46..ac8e34eef513 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -126,7 +126,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
i2c->clock = clock;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index 641f1aa992be..b667371b69a4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -78,11 +78,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
static void mgag200_mm_release(struct drm_device *dev, void *ptr)
{
struct mga_device *mdev = to_mga_device(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
mdev->vram_fb_available = 0;
iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
@@ -90,6 +91,7 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr)
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
int ret;
@@ -102,8 +104,8 @@ int mgag200_mm_init(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
/* BAR 0 is VRAM */
- start = pci_resource_start(dev->pdev, 0);
- len = pci_resource_len(dev->pdev, 0);
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
drm_err(dev, "can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 346cc6ff3a36..7b9fcfe95c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
+
#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index a5af223eaf50..7e553d3efeb2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a5xx_preempt_trigger(gpu);
}
-static const struct {
+static const struct adreno_five_hwcg_regs {
u32 offset;
u32 value;
} a5xx_hwcg[] = {
@@ -318,16 +318,124 @@ static const struct {
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+}, a50x_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+}, a512_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
};
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
+ const struct adreno_five_hwcg_regs *regs;
+ unsigned int i, sz;
+
+ if (adreno_is_a508(adreno_gpu)) {
+ regs = a50x_hwcg;
+ sz = ARRAY_SIZE(a50x_hwcg);
+ } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
+ regs = a512_hwcg;
+ sz = ARRAY_SIZE(a512_hwcg);
+ } else {
+ regs = a5xx_hwcg;
+ sz = ARRAY_SIZE(a5xx_hwcg);
+ }
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
- gpu_write(gpu, a5xx_hwcg[i].offset,
- state ? a5xx_hwcg[i].value : 0);
+ for (i = 0; i < sz; i++)
+ gpu_write(gpu, regs[i].offset,
+ state ? regs[i].value : 0);
if (adreno_is_a540(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
@@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 regbit;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
- if (adreno_is_a540(adreno_gpu))
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
@@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- if (adreno_is_a510(adreno_gpu)) {
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
- (0x200 << 11 | 0x200 << 22));
} else {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
- if (adreno_is_a540(adreno_gpu))
+ else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ }
+
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x100 << 11 | 0x100 << 22));
+ else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ else
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x400 << 11 | 0x300 << 22));
- }
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+ /*
+ * Disable the RB sampler datapath DP2 clock gating optimization
+ * for 1-SP GPUs, as it is enabled by default.
+ */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
+
+ /* Disable UCHE global filter as SP can invalidate/flush independently */
+ gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
- gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
- gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
if (adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
+ regbit = 2;
+ else
+ regbit = 1;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+
+ /* Disable All flat shading optimization (ALLFLATOPTDIS) */
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
@@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* VPC */
gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
- gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
@@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- if (!adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
@@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_is_a510(adreno_gpu)) {
+ /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_set_hwcg(gpu, true);
@@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
- /* A510 has 3 XIN ports in VBIF */
- if (adreno_is_a510(adreno_gpu))
+ /* A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
@@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
/*
* Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
+ * entries on Adreno A510 and A530 (the others will tend to lock up)
*/
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
ret = msm_gpu_pm_suspend(gpu);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index f176a6f3eff6..5ccc9da455a1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
int ret;
/* Not all A5xx chips have a GPMU */
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return 0;
/* Set up the limits management */
@@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return;
if (a5xx_gpu->gpmu_bo)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e6703ae98760..71c917f909af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
return;
}
@@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
}
@@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
return ret;
}
+struct a6xx_gmu_oob_bits {
+ int set, ack, set_new, ack_new;
+ const char *name;
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ [GMU_OOB_GPU_SET] = {
+ .name = "GPU_SET",
+ .set = 16,
+ .ack = 24,
+ .set_new = 30,
+ .ack_new = 31,
+ },
+
+ [GMU_OOB_PERFCOUNTER_SET] = {
+ .name = "PERFCOUNTER",
+ .set = 17,
+ .ack = 25,
+ .set_new = 28,
+ .ack_new = 30,
+ },
+
+ [GMU_OOB_BOOT_SLUMBER] = {
+ .name = "BOOT_SLUMBER",
+ .set = 22,
+ .ack = 30,
+ },
+
+ [GMU_OOB_DCVS_SET] = {
+ .name = "GPU_DCVS",
+ .set = 23,
+ .ack = 31,
+ },
+};
+
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int ret;
u32 val;
int request, ack;
- const char *name;
- switch (state) {
- case GMU_OOB_GPU_SET:
- if (gmu->legacy) {
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
- } else {
- request = GMU_OOB_GPU_SET_REQUEST_NEW;
- ack = GMU_OOB_GPU_SET_ACK_NEW;
- }
- name = "GPU_SET";
- break;
- case GMU_OOB_BOOT_SLUMBER:
- request = GMU_OOB_BOOT_SLUMBER_REQUEST;
- ack = GMU_OOB_BOOT_SLUMBER_ACK;
- name = "BOOT_SLUMBER";
- break;
- case GMU_OOB_DCVS_SET:
- request = GMU_OOB_DCVS_REQUEST;
- ack = GMU_OOB_DCVS_ACK;
- name = "GPU_DCVS";
- break;
- default:
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
+
+ if (gmu->legacy) {
+ request = a6xx_gmu_oob_bits[state].set;
+ ack = a6xx_gmu_oob_bits[state].ack;
+ } else {
+ request = a6xx_gmu_oob_bits[state].set_new;
+ ack = a6xx_gmu_oob_bits[state].ack_new;
+ if (!request || !ack) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Invalid non-legacy GMU request %s\n",
+ a6xx_gmu_oob_bits[state].name);
+ return -EINVAL;
+ }
}
/* Trigger the equested OOB operation */
@@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
- name,
+ a6xx_gmu_oob_bits[state].name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
/* Clear the acknowledge interrupt */
@@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
- if (!gmu->legacy) {
- WARN_ON(state != GMU_OOB_GPU_SET);
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ int bit;
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
- }
- switch (state) {
- case GMU_OOB_GPU_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR);
- break;
- case GMU_OOB_BOOT_SLUMBER:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
- break;
- case GMU_OOB_DCVS_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_DCVS_CLEAR);
- break;
- }
+ if (gmu->legacy)
+ bit = a6xx_gmu_oob_bits[state].ack;
+ else
+ bit = a6xx_gmu_oob_bits[state].ack_new;
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
}
/* Enable CPU control of SPTP power power collapse */
@@ -866,7 +885,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp))
return;
- dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
@@ -1072,7 +1091,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
- dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c6d2bced8e5d..71dfa60070cc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
*/
enum a6xx_gmu_oob_state {
+ /*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
GMU_OOB_BOOT_SLUMBER = 0,
+ /*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
GMU_OOB_GPU_SET,
+ /*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
GMU_OOB_DCVS_SET,
+ /*
+ * Used to keep the GPU on for CPU-side reads of performance counters.
+ */
+ GMU_OOB_PERFCOUNTER_SET,
};
-/* These are the interrupt / ack bits for each OOB request that are set
- * in a6xx_gmu_set_oob and a6xx_clear_oob
- */
-
-/*
- * Let the GMU know that a boot or slumber operation has started. The value in
- * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
- * doing
- */
-#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
-#define GMU_OOB_BOOT_SLUMBER_ACK 30
-#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
-
-/*
- * Set a new power level for the GPU when the CPU is doing frequency scaling
- */
-#define GMU_OOB_DCVS_REQUEST 23
-#define GMU_OOB_DCVS_ACK 31
-#define GMU_OOB_DCVS_CLEAR 31
-
-/*
- * Let the GMU know to not turn off any GPU registers while the CPU is in a
- * critical section
- */
-#define GMU_OOB_GPU_SET_REQUEST 16
-#define GMU_OOB_GPU_SET_ACK 24
-#define GMU_OOB_GPU_SET_CLEAR 24
-
-#define GMU_OOB_GPU_SET_REQUEST_NEW 30
-#define GMU_OOB_GPU_SET_ACK_NEW 31
-#define GMU_OOB_GPU_SET_CLEAR_NEW 31
-
-
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 130661898546..ba8e9d3cf0fe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/nvmem-consumer.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
- if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
}
@@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ static DEFINE_MUTEX(perfcounter_oob);
+
+ mutex_lock(&perfcounter_oob);
/* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ mutex_unlock(&perfcounter_oob);
return 0;
}
@@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
+
+ if (a6xx_gpu->opp_table)
+ dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
+
kfree(a6xx_gpu);
}
@@ -1240,6 +1249,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
}
static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ adreno_set_llc_attributes(iommu);
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
@@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
+static u32 a618_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 169)
+ return 1;
+ else if (fuse == 174)
+ return 2;
+
+ return UINT_MAX;
+}
+
+static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+{
+ u32 val = UINT_MAX;
+
+ if (revn == 618)
+ val = a618_get_speed_bin(fuse);
+
+ if (val == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
+ fuse);
+ return UINT_MAX;
+ }
+
+ return (1 << val);
+}
+
+static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
+ u32 revn)
+{
+ struct opp_table *opp_table;
+ struct nvmem_cell *cell;
+ u32 supp_hw = UINT_MAX;
+ void *buf;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (PTR_ERR(cell) == -ENOENT)
+ return 0;
+ else if (IS_ERR(cell)) {
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+done:
+ opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
+
+ a6xx_gpu->opp_table = opp_table;
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
},
@@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_llc_slices_init(pdev, a6xx_gpu);
+ ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e793d329e77b..ce0610c5256f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -33,6 +33,8 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
+
+ struct opp_table *opp_table;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..600d445fabe8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
@@ -130,6 +134,41 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 0, 8, ANY_ID),
+ .revn = 508,
+ .name = "A508",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a508_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 0, 9, ANY_ID),
+ .revn = 509,
+ .name = "A509",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ /* Adreno 509 uses the same ZAP as 512 */
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
.revn = 510,
.name = "A510",
@@ -145,6 +184,23 @@ static const struct adreno_info gpulist[] = {
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 1, 2, ANY_ID),
+ .revn = 512,
+ .name = "A512",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
@@ -164,7 +220,7 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 4, 0, 2),
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
.revn = 540,
.name = "A540",
.fw = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..0f184c3dd9d9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -186,13 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+void adreno_set_llc_attributes(struct iommu_domain *iommu)
+{
+ struct io_pgtable_domain_attr pgtbl_cfg;
+
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,15 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
- }
-
mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) {
iommu_domain_free(iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..ccac275aa7a2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -196,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a508(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 508;
+}
+
+static inline int adreno_is_a509(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 509;
+}
+
static inline int adreno_is_a510(struct adreno_gpu *gpu)
{
return gpu->revn == 510;
}
+static inline int adreno_is_a512(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 512;
+}
+
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
@@ -272,6 +288,8 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
+void adreno_set_llc_attributes(struct iommu_domain *iommu);
+
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 5a056c1191df..b2be39b9144e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -4,8 +4,10 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/delay.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
+#include "dpu_hw_pingpong.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
@@ -35,6 +37,8 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
+
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
@@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
tc_cfg.vsync_count = vsync_hz /
(mode->vtotal * drm_mode_vrefresh(mode));
- /* enable external TE after kickoff to avoid premature autorefresh */
- tc_cfg.hw_vsync_mode = 0;
-
/*
- * By setting sync_cfg_height to near max register value, we essentially
- * disable dpu hw generated TE signal, since hw TE will arrive first.
- * Only caveat is if due to error, we hit wrap-around.
+ * Set the sync_cfg_height to twice vtotal so that if we lose a
+ * TE event coming from the display TE pin we won't stall immediately
*/
- tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.hw_vsync_mode = 1;
+ tc_cfg.sync_cfg_height = mode->vtotal * 2;
tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
@@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
atomic_read(&phys_enc->pending_kickoff_cnt));
}
+static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pp_vsync_info info;
+
+ if (!phys_enc)
+ return false;
+
+ phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
+ if (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
+ return true;
+
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_prepare_commit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int trial = 0;
+
+ if (!phys_enc)
+ return;
+ if (!phys_enc->hw_pp)
+ return;
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence fallowed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+ phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR_CMDENC(cmd_enc,
+ "disable autorefresh failed\n");
+ break;
+ }
+
+ trial++;
+ } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
+
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
+ "disabled autorefresh\n");
+}
+
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc)
{
@@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
-
- /* required for both controllers */
- if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
- return rc;
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
static int dpu_encoder_phys_cmd_wait_for_vblank(
@@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start(
static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
+ ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 90393fe9e59c..189f3533525c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -12,14 +12,17 @@
#define VIG_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+
+#define VIG_SM8250_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
@@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_cfg sm8250_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
@@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
-#define DSPP_BLK(_name, _id, _base, _sblk) \
+#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x1800, \
- .features = DSPP_SC7180_MASK, \
+ .features = _mask, \
.sblk = _sblk \
}
static const struct dpu_dspp_cfg sc7180_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sc7180_dspp_sblk),
};
static const struct dpu_dspp_cfg sm8150_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
};
/*************************************************************
@@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
/*************************************************************
* INTF sub blocks config
*************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
.features = _features, \
.type = _type, \
.controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = 24 \
+ .prog_fetch_lines_worst_case = _progfetch \
}
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
};
static const struct dpu_intf_cfg sm8150_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
};
/*************************************************************
@@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mdp = sm8250_mdp,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
- /* TODO: sspp qseed version differs from 845 */
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
.mixer_count = ARRAY_SIZE(sm8150_lm),
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index eaef99db2d2f..ea4647d21a20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -95,6 +95,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
@@ -114,6 +115,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED3LITE,
DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index bea4ab5c58c5..245a7a62b5c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -23,6 +23,7 @@
#define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
@@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
return 0;
}
+static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable)
+{
+ DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
+ enable ? (BIT(31) | frame_count) : 0);
+}
+
+/*
+ * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
+ * @pp: DPU pingpong structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
u32 timeout_us)
{
@@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+ c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
+ c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 6902b9b95c8e..845b9ce80e31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg {
* @setup_tearcheck : program tear check values
* @enable_tearcheck : enables tear check
* @get_vsync_info : retries timing info of the panel
+ * @setup_autorefresh : configure and enable the autorefresh config
+ * @get_autorefresh : retrieve autorefresh config from hardware
* @setup_dither : function to program the dither hw block
* @get_line_count: obtain current vertical line counter
*/
@@ -95,6 +97,18 @@ struct dpu_hw_pingpong_ops {
struct dpu_hw_pp_vsync_info *info);
/**
+ * configure and enable the autorefresh config
+ */
+ void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable);
+
+ /**
+ * retrieve autorefresh config from hardware
+ */
+ bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 *frame_count);
+
+ /**
* poll until write pointer transmission starts
* @Return: 0 on success, -ETIMEDOUT on timeout
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 2c2ca5335aa8..34d81aa16041 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 85b018a9b03c..fdfd4b46e2c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -28,6 +28,7 @@ struct dpu_hw_pipe;
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
(1UL << DPU_SSPP_SCALER_QSEED4))
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 84e9875994a8..f94584c982cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+/* DPU_SCALER_QSEED3LITE */
+#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3LITE_COEF_LUT_CTRL 0x4C
+#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
+#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
+#define QSEED3LITE_FILTERS 2
+#define QSEED3LITE_SEPARABLE_LUTS 10
+#define QSEED3LITE_LUT_SIZE 33
+#define QSEED3LITE_SEP_LUT_SIZE \
+ (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
+
+
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
@@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
}
+static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset;
+ u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
+
+ DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
+
+ if (!scaler3_cfg->sep_lut)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
+ for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{
@@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
op_mode |= BIT(8);
}
- if (scaler3_cfg->lut_flag)
- _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
- scaler_offset);
+ if (scaler3_cfg->lut_flag) {
+ if (scaler_version < 0x2004)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
+ else
+ _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
+ }
if (scaler_version == 0x1002) {
phase_init =
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 234eb7d65753..ff3cffde84cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg {
* @ cir_lut: pointer to circular filter LUT
* @ sep_lut: pointer to separable filter LUT
* @ de: detail enhancer configuration
+ * @ dir_weight: Directional weight
*/
struct dpu_hw_scaler3_cfg {
u32 enable;
@@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg {
* Detail enhancer settings
*/
struct dpu_hw_scaler3_de_cfg de;
+
+ u32 dir_weight;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index cf867f3f7c36..b757054e1c23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -30,7 +30,7 @@
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
-#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
@@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level)
{
struct dpu_hw_blk_reg_map *c;
- u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+ u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif)
return;
c = &vbif->hw;
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
- reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
mask = 0x7 << reg_shift;
@@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
- DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+ DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
}
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 374b0e8471e6..5a8e3e1fc48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1;
break;
- };
+ }
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index bc0231a50132..f898a8f67b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index df10c1ac7591..94ce62a26daf 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
[3] = INTF_HDMI,
},
},
- .max_clk = 200000000,
+ .max_clk = 320000000,
};
static const struct mdp5_cfg_hw apq8084_config = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0c8f9f88301f..f5d71b274079 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done);
- complete(&mdp5_crtc->pp_completion);
+ complete_all(&mdp5_crtc->pp_completion);
}
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 19b35ae3e927..1c6e1d2b947c 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- int const retry_count = 5;
struct dp_aux_private *aux = container_of(dp_aux,
struct dp_aux_private, dp_aux);
@@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
- if (aux->native) {
- aux->retry_cnt++;
- if (!(aux->retry_cnt % retry_count))
- dp_catalog_aux_update_cfg(aux->catalog);
- dp_catalog_aux_reset(aux->catalog);
- }
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 44f0c57798d0..b1a9b1b98f5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
return 0;
}
+/**
+ * dp_catalog_aux_reset() - reset AUX controller
+ *
+ * @aux: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset AUX controller
+ *
+ * NOTE: reset AUX controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
{
u32 aux_ctrl;
@@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
return 0;
}
+/**
+ * dp_catalog_ctrl_reset() - reset DP controller
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset the DP controller
+ *
+ * NOTE: reset DP controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
{
u32 sw_reset;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index e3462f5d96d7..1390f3547fde 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (!tu)
- return
+ return;
dp_panel_update_tu_timings(in, tu);
@@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
default:
ret = -EINVAL;
break;
- };
+ }
if (!ret)
DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
@@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* transitioned to PUSH_IDLE. In order to start transmitting
* a link training pattern, we have to first do soft reset.
*/
- dp_catalog_ctrl_reset(ctrl->catalog);
ret = dp_ctrl_link_train(ctrl, cr, training_step);
@@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
return ret;
}
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
@@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
ctrl->dp_ctrl.orientation = flip;
+ if (reset)
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
@@ -1420,16 +1422,14 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
u8 *dpcd = ctrl->panel->dpcd;
- u32 edid_quirks = 0;
- edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
/*
* For better interop experience, used a fixed NVID=0x8000
* whenever connected to a VGA dongle downstream.
*/
if (drm_dp_is_branch(dpcd))
- return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
- DP_DPCD_QUIRK_CONSTANT_N));
+ return (drm_dp_has_quirk(&ctrl->panel->desc,
+ DP_DPCD_QUIRK_CONSTANT_N));
return false;
}
@@ -1498,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
- dp_catalog_ctrl_reset(ctrl->catalog);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
@@ -1787,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
* Set up transfer unit values and set controller state to send
* video.
*/
+ reinit_completion(&ctrl->video_comp);
+
dp_ctrl_configure_source_params(ctrl);
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
- reinit_completion(&ctrl->video_comp);
-
dp_ctrl_setup_tr_unit(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index f60ba93c8678..a836bd358447 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -19,7 +19,7 @@ struct dp_ctrl {
u32 pixel_rate;
};
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..5a39da6e1eaf 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -350,7 +350,7 @@ end:
return rc;
}
-static void dp_display_host_init(struct dp_display_private *dp)
+static void dp_display_host_init(struct dp_display_private *dp, int reset)
{
bool flip = false;
@@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
dp_display_set_encoder_mode(dp);
dp_power_init(dp->power, flip);
- dp_ctrl_host_init(dp->ctrl, flip);
+ dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
goto end;
}
- dp_display_host_init(dp);
+ dp_display_host_init(dp, false);
/*
* set sink to normal operation mode -- D0
@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(g_dp_display, false);
reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
DP_DP_IRQ_HPD_INT_MASK, true);
@@ -693,6 +693,20 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
@@ -883,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
+ /* signal the disconnect event */
+ reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
@@ -995,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
static void dp_display_config_hpd(struct dp_display_private *dp)
{
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
/* Enable interrupt first time
@@ -1249,7 +1266,7 @@ static int dp_pm_resume(struct device *dev)
dp->hpd_state = ST_DISCONNECTED;
/* turn on dp ctrl/phy */
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
@@ -1432,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF)
- dp_display_host_init(dp_display);
+ dp_display_host_init(dp_display, true);
dp_display_enable(dp_display, 0);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..9cc816663668 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
@@ -403,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
- int rc = 0;
struct drm_display_mode *drm_mode;
drm_mode = &dp_panel->dp_mode.drm_mode;
@@ -430,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
min_t(u32, dp_panel->dp_mode.bpp, 30));
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
- return rc;
+ return 0;
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1afb7c579dbb..eca86bf448f7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
- .io_start = { 0xfd998300, 0xfd9a0300 },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index e4e9bf04b736..de3b802ccd3d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
multiplier = 1 << config->frac_bits;
dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (pll_freq <= 1900000000UL)
regs->pll_prop_gain_rate = 8;
@@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll)
reg->frac_div_start_mid);
pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
- pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
+ reg->pll_lockdet_rate);
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
@@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
{
+ struct device *dev = &pll->pdev->dev;
int rc;
u32 status = 0;
u32 const delay_us = 100;
@@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
delay_us,
timeout_us);
if (rc)
- pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
- pll->id, status);
+ DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
return rc;
}
@@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct device *dev = &pll_10nm->pdev->dev;
int rc;
dsi_pll_enable_pll_bias(pll_10nm);
@@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
if (rc) {
- pr_err("vco_set_rate failed, rc=%d\n", rc);
+ DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
return rc;
}
@@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Check for PLL lock */
rc = dsi_pll_10nm_lock_status(pll_10nm);
if (rc) {
- pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
goto error;
}
@@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct dsi_pll_config *config = &pll_10nm->pll_configuration;
void __iomem *base = pll_10nm->mmio;
u64 ref_clk = pll_10nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
- multiplier = 1 << 18;
+ multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..94525ac76d4e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- if (!ctx->aspace)
+ if (!priv->gpu)
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9a7c49bc394f..f091c1e164fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -985,8 +987,9 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
- if (msm_obj->pages)
- kvfree(msm_obj->pages);
+ kvfree(msm_obj->pages);
+
+ put_iova_vmas(obj);
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
@@ -997,11 +1000,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1115,6 +1117,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1126,9 +1130,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
@@ -1211,7 +1215,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index d04c349d8112..5480852bdeda 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
submit->cmd[i].idx = submit_cmd.submit_idx;
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+ userptr = u64_to_user_ptr(submit_cmd.relocs);
+
sz = array_size(submit_cmd.nr_relocs,
sizeof(struct drm_msm_gem_submit_reloc));
/* check for overflow: */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8151a89e163..4735251a394d 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -157,6 +157,7 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
+ struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
- mutex_init(&kms->commit_lock[i]);
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
+ lockdep_register_key(&kms->commit_lock_keys[i]);
+ __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
+ &kms->commit_lock_keys[i]);
+ }
kms->funcs = funcs;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 9d4a2d97507e..1d3542d6006b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -200,16 +200,17 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
- if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
- (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
0x7c, &type);
@@ -251,11 +252,12 @@ void
nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
- else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
- (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 42687ea2a4ca..ce3d8c6ef000 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -488,12 +488,13 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
- dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
+ if (pdev->device == 0x0174 || pdev->device == 0x0179 ||
+ pdev->device == 0x0189 || pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 5ace5e906949..f0a24126641a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -130,7 +130,7 @@ static inline bool
nv_two_heads(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -142,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
static inline bool
nv_gf4_disp_arch(struct drm_device *dev)
{
- return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
+ return nv_two_heads(dev) && (to_pci_dev(dev->dev)->device & 0x0ff0) != 0x0110;
}
static inline bool
nv_two_reg_pll(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
@@ -160,9 +160,11 @@ static inline bool
nv_match_device(struct drm_device *dev, unsigned device,
unsigned sub_vendor, unsigned sub_device)
{
- return dev->pdev->device == device &&
- dev->pdev->subsystem_vendor == sub_vendor &&
- dev->pdev->subsystem_device == sub_device;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pdev->device == device &&
+ pdev->subsystem_vendor == sub_vendor &&
+ pdev->subsystem_device == sub_device;
}
#include <subdev/bios/init.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b674d68ef28a..f7d35657aa64 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -214,14 +214,15 @@ nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
int
nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nvkm_pll_vals pllvals;
int ret;
int domain;
- domain = pci_domain_nr(dev->pdev->bus);
+ domain = pci_domain_nr(pdev->bus);
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
+ (pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
0x6c, &mpllP);
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
+ (pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
@@ -309,6 +310,7 @@ void
nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint8_t misc, gr4, gr5, gr6, seq2, seq4;
bool graphicsmode;
unsigned plane;
@@ -327,7 +329,7 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
/* map first 64KiB of VRAM, holds VGA fonts etc */
- iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+ iovram = ioremap(pci_resource_start(pdev, 1), 65536);
if (!iovram) {
NV_ERROR(drm, "Failed to map VRAM, "
"cannot save/restore VGA fonts.\n");
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 6fdddb266fb1..4488e1c061b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 302d4e6fc52f..788db043a342 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 18d34096f125..093d4ba6910e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index 27ea3f34706d..abefc2343443 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e6f16a7750f0..1a1d806e0b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -36,7 +36,7 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 5)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 9035d3ab062c..42f877f2ced2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -54,7 +54,7 @@ corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 9)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 121c24a18f11..31d8b2e4791d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 33fff388dd83..196612addfd6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -220,9 +220,13 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return 0;
}
+MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
+static int nv50_dmac_vram_pushbuf = -1;
+module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
+
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -241,7 +245,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
*
* This appears to match NVIDIA's behaviour on Pascal.
*/
- if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ if ((nv50_dmac_vram_pushbuf > 0) ||
+ (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
@@ -271,7 +276,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
@@ -305,6 +310,14 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
* Output path helpers
*****************************************************************************/
static void
+nv50_outp_dump_caps(struct nouveau_drm *drm,
+ struct nouveau_encoder *outp)
+{
+ NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
+ outp->base.base.name, outp->caps.dp_interlace);
+}
+
+static void
nv50_outp_release(struct nouveau_encoder *nv_encoder)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
@@ -419,8 +432,7 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
}
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -436,8 +448,7 @@ nv50_outp_get_new_connector(struct nouveau_encoder *outp,
}
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -452,27 +463,44 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp,
return NULL;
}
+static struct nouveau_crtc *
+nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const u32 mask = drm_encoder_mask(&outp->base.base);
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->encoder_mask & mask)
+ return nouveau_crtc(crtc);
+ }
+
+ return NULL;
+}
+
/******************************************************************************
* DAC
*****************************************************************************/
static void
-nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -493,7 +521,7 @@ nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
asyh->or.depth = 0;
- nv_encoder->crtc = encoder->crtc;
+ nv_encoder->crtc = &nv_crtc->base;
}
static enum drm_connector_status
@@ -526,8 +554,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_dac_enable,
- .atomic_disable = nv50_dac_disable,
+ .atomic_enable = nv50_dac_atomic_enable,
+ .atomic_disable = nv50_dac_atomic_disable,
.detect = nv50_dac_detect
};
@@ -593,34 +621,27 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
- struct drm_connector *connector;
struct nouveau_crtc *nv_crtc;
- struct drm_connector_list_iter conn_iter;
int ret = 0;
*enabled = false;
+ mutex_lock(&drm->audio.lock);
+
drm_for_each_encoder(encoder, drm->dev) {
struct nouveau_connector *nv_connector = NULL;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue; /* TODO */
+
nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_connector(nv_encoder->audio.connector);
+ nv_crtc = nouveau_crtc(nv_encoder->crtc);
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->best_encoder == encoder) {
- nv_connector = nouveau_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- if (!nv_connector)
+ if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id)
continue;
- nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_crtc || nv_encoder->or != port ||
- nv_crtc->index != dev_id)
- continue;
- *enabled = nv_encoder->audio;
+ *enabled = nv_encoder->audio.enabled;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -629,6 +650,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
break;
}
+ mutex_unlock(&drm->audio.lock);
+
return ret;
}
@@ -678,17 +701,22 @@ static const struct component_ops nv50_audio_component_bind_ops = {
static void
nv50_audio_component_init(struct nouveau_drm *drm)
{
- if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
- drm->audio.component_registered = true;
+ if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ return;
+
+ drm->audio.component_registered = true;
+ mutex_init(&drm->audio.lock);
}
static void
nv50_audio_component_fini(struct nouveau_drm *drm)
{
- if (drm->audio.component_registered) {
- component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
- drm->audio.component_registered = false;
- }
+ if (!drm->audio.component_registered)
+ return;
+
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ mutex_destroy(&drm->audio.lock);
}
/******************************************************************************
@@ -711,24 +739,25 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
- if (!nv_encoder->audio)
- return;
-
- nv_encoder->audio = false;
- nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ mutex_lock(&drm->audio.lock);
+ if (nv_encoder->audio.enabled) {
+ nv_encoder->audio.enabled = false;
+ nv_encoder->audio.connector = NULL;
+ nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ }
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
}
static void
-nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct __packed {
struct {
@@ -744,15 +773,19 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
(0x0100 << nv_crtc->index),
};
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
+ mutex_lock(&drm->audio.lock);
+
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
- nv_encoder->audio = true;
+ nv_encoder->audio.enabled = true;
+ nv_encoder->audio.connector = &nv_connector->base;
+
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
@@ -781,12 +814,12 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
}
static void
-nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
@@ -801,7 +834,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
.pwr.state = 1,
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
- struct nouveau_connector *nv_connector;
struct drm_hdmi_info *hdmi;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
@@ -811,7 +843,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
int ret;
int size;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
@@ -857,7 +888,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
/* If SCDC is supported by the downstream monitor, update
* divider / scrambling settings to what we programmed above.
@@ -898,6 +929,7 @@ struct nv50_mstc {
struct nv50_msto {
struct drm_encoder encoder;
+ /* head is statically assigned on msto creation */
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
@@ -1056,11 +1088,12 @@ nv50_dp_bpc_to_depth(unsigned int bpc)
}
static void
-nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
- struct nv50_head *head = nv50_head(encoder->crtc);
- struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_head *head = msto->head;
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
@@ -1081,8 +1114,7 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
if (WARN_ON(!mstc))
return;
- r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
- armh->dp.tu);
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
if (!r)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
@@ -1094,15 +1126,15 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- mstm->outp->update(mstm->outp, head->base.index, armh, proto,
- nv50_dp_bpc_to_depth(armh->or.bpc));
+ mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
+ nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
mstm->modified = true;
}
static void
-nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
@@ -1119,8 +1151,8 @@ nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
- .atomic_disable = nv50_msto_disable,
- .atomic_enable = nv50_msto_enable,
+ .atomic_disable = nv50_msto_atomic_disable,
+ .atomic_enable = nv50_msto_atomic_enable,
.atomic_check = nv50_msto_atomic_check,
};
@@ -1616,43 +1648,38 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
}
static void
-nv50_sor_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct nouveau_connector *nv_connector =
- nv50_outp_get_old_connector(nv_encoder, state);
-
- nv_encoder->crtc = NULL;
-
- if (nv_crtc) {
- struct drm_dp_aux *aux = &nv_connector->aux;
- u8 pwr;
+ struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ u8 pwr;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
- if (ret == 0) {
- pwr &= ~DP_SET_POWER_MASK;
- pwr |= DP_SET_POWER_D3;
- drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
- }
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
}
-
- nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
- nv50_audio_disable(encoder, nv_crtc);
- nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
- nv50_outp_release(nv_encoder);
}
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ nv50_outp_release(nv_encoder);
+ nv_encoder->crtc = NULL;
}
static void
-nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
@@ -1672,8 +1699,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
- nv_encoder->crtc = encoder->crtc;
+ nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
+ nv_encoder->crtc = &nv_crtc->base;
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
@@ -1699,7 +1726,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
- nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, nv_crtc, nv_connector, state, mode);
break;
case DCB_OUTPUT_LVDS:
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
@@ -1740,7 +1767,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
break;
default:
BUG();
@@ -1753,8 +1780,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_sor_enable,
- .atomic_disable = nv50_sor_disable,
+ .atomic_enable = nv50_sor_atomic_enable,
+ .atomic_disable = nv50_sor_atomic_disable,
};
static void
@@ -1821,6 +1848,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -1875,23 +1903,24 @@ nv50_pior_atomic_check(struct drm_encoder *encoder,
}
static void
-nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -1929,8 +1958,8 @@ nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
- .atomic_enable = nv50_pior_enable,
- .atomic_disable = nv50_pior_disable,
+ .atomic_enable = nv50_pior_atomic_enable,
+ .atomic_disable = nv50_pior_atomic_disable,
};
static void
@@ -1991,6 +2020,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
return 0;
}
@@ -2663,6 +2693,14 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 92bddc083617..38dec11e7dda 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
/*
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 537c1ef2e464..ec361d17e900 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -503,7 +503,6 @@ nv50_head_destroy(struct drm_crtc *crtc)
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -518,7 +517,6 @@ nv50_head_func = {
static const struct drm_crtc_funcs
nvd9_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 8f860e9c5224..85648d790743 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -322,7 +322,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
const int i = head->base.index;
int ret;
- if ((ret = PUSH_WAIT(push, 14)))
+ if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
@@ -353,14 +353,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
- NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
-
- HEAD_SET_CRC_CONTROL(i),
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index a5d827403660..ea9f8667305e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -22,6 +22,7 @@
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
@@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
@@ -101,7 +127,7 @@ head917d = {
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index a1ac153d5e98..566fbddfc8d7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 685b70871324..b390029c69ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0356474ad6f6..271de3a63f21 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
@@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
@@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
@@ -784,6 +793,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 3278e2880034..f4e0c5080034 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 429be0bb0222..abdd3bb658b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644
index 000000000000..7a370fa1df20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc67e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
index 2a2612d6e1e0..fb223723a38a 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
@@ -66,6 +66,10 @@
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index cd9a2e687bb6..0b86c44878e0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
+#define NV_DEVICE_INFO_V0_AMPERE 0x0d
__u8 family;
__u8 pad06[2];
__u64 ram_size;
@@ -59,37 +60,33 @@ struct nv_device_time_v0 {
#define NV_DEVICE_INFO_UNIT (0xffffffffULL << 32)
#define NV_DEVICE_INFO(n) ((n) | (0x00000000ULL << 32))
-#define NV_DEVICE_FIFO(n) ((n) | (0x00000001ULL << 32))
+#define NV_DEVICE_HOST(n) ((n) | (0x00000001ULL << 32))
-/* This will be returned for unsupported queries. */
+/* This will be returned in the mthd field for unsupported queries. */
#define NV_DEVICE_INFO_INVALID ~0ULL
-/* These return a mask of available engines of particular type. */
-#define NV_DEVICE_INFO_ENGINE_SW NV_DEVICE_INFO(0x00000000)
-#define NV_DEVICE_INFO_ENGINE_GR NV_DEVICE_INFO(0x00000001)
-#define NV_DEVICE_INFO_ENGINE_MPEG NV_DEVICE_INFO(0x00000002)
-#define NV_DEVICE_INFO_ENGINE_ME NV_DEVICE_INFO(0x00000003)
-#define NV_DEVICE_INFO_ENGINE_CIPHER NV_DEVICE_INFO(0x00000004)
-#define NV_DEVICE_INFO_ENGINE_BSP NV_DEVICE_INFO(0x00000005)
-#define NV_DEVICE_INFO_ENGINE_VP NV_DEVICE_INFO(0x00000006)
-#define NV_DEVICE_INFO_ENGINE_CE NV_DEVICE_INFO(0x00000007)
-#define NV_DEVICE_INFO_ENGINE_SEC NV_DEVICE_INFO(0x00000008)
-#define NV_DEVICE_INFO_ENGINE_MSVLD NV_DEVICE_INFO(0x00000009)
-#define NV_DEVICE_INFO_ENGINE_MSPDEC NV_DEVICE_INFO(0x0000000a)
-#define NV_DEVICE_INFO_ENGINE_MSPPP NV_DEVICE_INFO(0x0000000b)
-#define NV_DEVICE_INFO_ENGINE_MSENC NV_DEVICE_INFO(0x0000000c)
-#define NV_DEVICE_INFO_ENGINE_VIC NV_DEVICE_INFO(0x0000000d)
-#define NV_DEVICE_INFO_ENGINE_SEC2 NV_DEVICE_INFO(0x0000000e)
-#define NV_DEVICE_INFO_ENGINE_NVDEC NV_DEVICE_INFO(0x0000000f)
-#define NV_DEVICE_INFO_ENGINE_NVENC NV_DEVICE_INFO(0x00000010)
-
+/* Returns the number of available runlists. */
+#define NV_DEVICE_HOST_RUNLISTS NV_DEVICE_HOST(0x00000000)
/* Returns the number of available channels. */
-#define NV_DEVICE_FIFO_CHANNELS NV_DEVICE_FIFO(0x00000000)
-
-/* Returns a mask of available runlists. */
-#define NV_DEVICE_FIFO_RUNLISTS NV_DEVICE_FIFO(0x00000001)
+#define NV_DEVICE_HOST_CHANNELS NV_DEVICE_HOST(0x00000001)
-/* These return a mask of engines available on a particular runlist. */
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n) ((n) + NV_DEVICE_FIFO(0x00000010))
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE 64
+/* Returns a mask of available engine types on runlist(data). */
+#define NV_DEVICE_HOST_RUNLIST_ENGINES NV_DEVICE_HOST(0x00000100)
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SW 0x00000001
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_GR 0x00000002
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MPEG 0x00000004
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_ME 0x00000008
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CIPHER 0x00000010
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_BSP 0x00000020
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VP 0x00000040
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CE 0x00000080
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC 0x00000100
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD 0x00000200
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC 0x00000400
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP 0x00000800
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSENC 0x00001000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VIC 0x00002000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2 0x00004000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC 0x00008000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC 0x00010000
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 2c79beb41126..ba2c28ea43d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -88,6 +88,7 @@
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GA102_DISP /* cl5070.h */ 0x0000c670
#define GV100_DISP_CAPS 0x0000c373
@@ -103,6 +104,7 @@
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR /* cl507a.h */ 0x0000c67a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -112,6 +114,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c67b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -135,6 +138,7 @@
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c67d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -145,6 +149,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c67e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
index e9468c9f9abf..d351ac890ca1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
@@ -2,15 +2,15 @@
#define __NVIF_FIFO_H__
#include <nvif/device.h>
-/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */
+/* Returns mask of runlists that support a NV_DEVICE_INFO_RUNLIST_ENGINES_* type. */
u64 nvif_fifo_runlist(struct nvif_device *, u64 engine);
/* CE-supporting runlists (excluding GRCE, if others exist). */
static inline u64
nvif_fifo_runlist_ce(struct nvif_device *device)
{
- u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
- u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE);
+ u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
+ u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_CE);
if (runmce && !(runmce &= ~runmgr))
runmce = runmgr;
return runmce;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 168d7694ede5..6d3a8a3d2087 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5c007ce62fc3..a18b6cfda07e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -3,79 +3,7 @@
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
#include <core/event.h>
-
-enum nvkm_devidx {
- NVKM_SUBDEV_PCI,
- NVKM_SUBDEV_VBIOS,
- NVKM_SUBDEV_DEVINIT,
- NVKM_SUBDEV_TOP,
- NVKM_SUBDEV_IBUS,
- NVKM_SUBDEV_GPIO,
- NVKM_SUBDEV_I2C,
- NVKM_SUBDEV_FUSE,
- NVKM_SUBDEV_MXM,
- NVKM_SUBDEV_MC,
- NVKM_SUBDEV_BUS,
- NVKM_SUBDEV_TIMER,
- NVKM_SUBDEV_INSTMEM,
- NVKM_SUBDEV_FB,
- NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_MMU,
- NVKM_SUBDEV_BAR,
- NVKM_SUBDEV_FAULT,
- NVKM_SUBDEV_ACR,
- NVKM_SUBDEV_PMU,
- NVKM_SUBDEV_VOLT,
- NVKM_SUBDEV_ICCSENSE,
- NVKM_SUBDEV_THERM,
- NVKM_SUBDEV_CLK,
- NVKM_SUBDEV_GSP,
-
- NVKM_ENGINE_BSP,
-
- NVKM_ENGINE_CE0,
- NVKM_ENGINE_CE1,
- NVKM_ENGINE_CE2,
- NVKM_ENGINE_CE3,
- NVKM_ENGINE_CE4,
- NVKM_ENGINE_CE5,
- NVKM_ENGINE_CE6,
- NVKM_ENGINE_CE7,
- NVKM_ENGINE_CE8,
- NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8,
-
- NVKM_ENGINE_CIPHER,
- NVKM_ENGINE_DISP,
- NVKM_ENGINE_DMAOBJ,
- NVKM_ENGINE_FIFO,
- NVKM_ENGINE_GR,
- NVKM_ENGINE_IFB,
- NVKM_ENGINE_ME,
- NVKM_ENGINE_MPEG,
- NVKM_ENGINE_MSENC,
- NVKM_ENGINE_MSPDEC,
- NVKM_ENGINE_MSPPP,
- NVKM_ENGINE_MSVLD,
-
- NVKM_ENGINE_NVENC0,
- NVKM_ENGINE_NVENC1,
- NVKM_ENGINE_NVENC2,
- NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
-
- NVKM_ENGINE_NVDEC0,
- NVKM_ENGINE_NVDEC1,
- NVKM_ENGINE_NVDEC2,
- NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
-
- NVKM_ENGINE_PM,
- NVKM_ENGINE_SEC,
- NVKM_ENGINE_SEC2,
- NVKM_ENGINE_SW,
- NVKM_ENGINE_VIC,
- NVKM_ENGINE_VP,
-
- NVKM_SUBDEV_NR
-};
+enum nvkm_subdev_type;
enum nvkm_device_type {
NVKM_DEVICE_PCI,
@@ -102,7 +30,6 @@ struct nvkm_device {
struct nvkm_event event;
- u64 disable_mask;
u32 debug;
const struct nvkm_device_chip *chip;
@@ -120,6 +47,7 @@ struct nvkm_device {
GP100 = 0x130,
GV100 = 0x140,
TU100 = 0x160,
+ GA100 = 0x170,
} card_type;
u32 chipset;
u8 chiprev;
@@ -129,58 +57,16 @@ struct nvkm_device {
struct notifier_block nb;
} acpi;
- struct nvkm_acr *acr;
- struct nvkm_bar *bar;
- struct nvkm_bios *bios;
- struct nvkm_bus *bus;
- struct nvkm_clk *clk;
- struct nvkm_devinit *devinit;
- struct nvkm_fault *fault;
- struct nvkm_fb *fb;
- struct nvkm_fuse *fuse;
- struct nvkm_gpio *gpio;
- struct nvkm_gsp *gsp;
- struct nvkm_i2c *i2c;
- struct nvkm_subdev *ibus;
- struct nvkm_iccsense *iccsense;
- struct nvkm_instmem *imem;
- struct nvkm_ltc *ltc;
- struct nvkm_mc *mc;
- struct nvkm_mmu *mmu;
- struct nvkm_subdev *mxm;
- struct nvkm_pci *pci;
- struct nvkm_pmu *pmu;
- struct nvkm_therm *therm;
- struct nvkm_timer *timer;
- struct nvkm_top *top;
- struct nvkm_volt *volt;
-
- struct nvkm_engine *bsp;
- struct nvkm_engine *ce[9];
- struct nvkm_engine *cipher;
- struct nvkm_disp *disp;
- struct nvkm_dma *dma;
- struct nvkm_fifo *fifo;
- struct nvkm_gr *gr;
- struct nvkm_engine *ifb;
- struct nvkm_engine *me;
- struct nvkm_engine *mpeg;
- struct nvkm_engine *msenc;
- struct nvkm_engine *mspdec;
- struct nvkm_engine *msppp;
- struct nvkm_engine *msvld;
- struct nvkm_nvenc *nvenc[3];
- struct nvkm_nvdec *nvdec[3];
- struct nvkm_pm *pm;
- struct nvkm_engine *sec;
- struct nvkm_sec2 *sec2;
- struct nvkm_sw *sw;
- struct nvkm_engine *vic;
- struct nvkm_engine *vp;
+#define NVKM_LAYOUT_ONCE(type,data,ptr) data *ptr;
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt) data *ptr[cnt];
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+ struct list_head subdev;
};
-struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
-struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
@@ -201,55 +87,15 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
-
- int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **);
- int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
- int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
- int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
- int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
- int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
- int (*fault )(struct nvkm_device *, int idx, struct nvkm_fault **);
- int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
- int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
- int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
- int (*gsp )(struct nvkm_device *, int idx, struct nvkm_gsp **);
- int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
- int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
- int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
- int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
- int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
- int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
- int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
- int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
- int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
- int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
- int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
-
- int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[9] )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
- int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
- int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
- int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
- int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
- int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
- int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
- int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
- int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
- int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...) \
+ struct { \
+ u32 inst; \
+ int (*ctor)(struct nvkm_device *, enum nvkm_subdev_type, int inst, data **); \
+ } ptr;
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
};
struct nvkm_device *nvkm_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index c6b401a6ea23..e58923b67d74 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -6,12 +6,18 @@
struct nvkm_fifo_chan;
struct nvkm_fb_tile;
+extern const struct nvkm_subdev_func nvkm_engine;
+
struct nvkm_engine {
const struct nvkm_engine_func *func;
struct nvkm_subdev subdev;
spinlock_t lock;
- int usecount;
+ struct {
+ refcount_t refcount;
+ struct mutex mutex;
+ bool enabled;
+ } use;
};
struct nvkm_engine_func {
@@ -42,9 +48,10 @@ struct nvkm_engine_func {
};
int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_engine *);
+ enum nvkm_subdev_type, int inst, bool enable, struct nvkm_engine *);
int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_engine **);
+ enum nvkm_subdev_type, int, bool enable, struct nvkm_engine **);
+
struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
void nvkm_engine_unref(struct nvkm_engine **);
void nvkm_engine_tile(struct nvkm_engine *, int region);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index ce98efd4b209..070462be35d9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -8,6 +8,7 @@ struct nvkm_enum {
const char *name;
const void *data;
u32 data2;
+ int inst;
};
const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
index 3981cb106aae..fd9a3f9a518e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -21,11 +21,11 @@ void nvkm_falcon_v1_disable(struct nvkm_falcon *);
void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
int gp102_sec2_flcn_enable(struct nvkm_falcon *);
-#define FLCN_PRINTK(t,f,fmt,a...) do { \
- if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \
- nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
- else \
- nvkm_##t((f)->owner, fmt"\n", ##a); \
+#define FLCN_PRINTK(t,f,fmt,a...) do { \
+ if ((f)->owner->name != (f)->name) \
+ nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+ else \
+ nvkm_##t((f)->owner, fmt"\n", ##a); \
} while(0)
#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
new file mode 100644
index 000000000000..7afe1579b20f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI , struct nvkm_pci , pci)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS , struct nvkm_bios , bios)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_DEVINIT , struct nvkm_devinit , devinit)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PRIVRING, struct nvkm_subdev , privring)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GPIO , struct nvkm_gpio , gpio)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_I2C , struct nvkm_i2c , i2c)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FUSE , struct nvkm_fuse , fuse)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MXM , struct nvkm_subdev , mxm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MC , struct nvkm_mc , mc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BUS , struct nvkm_bus , bus)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TIMER , struct nvkm_timer , timer)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_INSTMEM , struct nvkm_instmem , imem)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FB , struct nvkm_fb , fb)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_LTC , struct nvkm_ltc , ltc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MMU , struct nvkm_mmu , mmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BAR , struct nvkm_bar , bar)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FAULT , struct nvkm_fault , fault)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ACR , struct nvkm_acr , acr)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PMU , struct nvkm_pmu , pmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VOLT , struct nvkm_volt , volt)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ICCSENSE, struct nvkm_iccsense, iccsense)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_THERM , struct nvkm_therm , therm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_CLK , struct nvkm_clk , clk)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
+NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
+
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_FIFO , struct nvkm_fifo , fifo)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_GR , struct nvkm_gr , gr)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_IFB , struct nvkm_engine , ifb)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_ME , struct nvkm_engine , me)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MPEG , struct nvkm_engine , mpeg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC , struct nvkm_engine , msenc)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VIC , struct nvkm_engine , vic)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VP , struct nvkm_engine , vp)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 76288c682e9e..1665738948fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -3,13 +3,25 @@
#define __NVKM_SUBDEV_H__
#include <core/device.h>
+enum nvkm_subdev_type {
+#define NVKM_LAYOUT_ONCE(t,s,p,...) t,
+#define NVKM_LAYOUT_INST NVKM_LAYOUT_ONCE
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+ NVKM_SUBDEV_NR
+};
+
struct nvkm_subdev {
const struct nvkm_subdev_func *func;
struct nvkm_device *device;
- enum nvkm_devidx index;
- struct mutex mutex;
+ enum nvkm_subdev_type type;
+ int inst;
+ char name[16];
u32 debug;
+ struct list_head head;
+ void **pself;
bool oneinit;
};
@@ -23,11 +35,12 @@ struct nvkm_subdev_func {
void (*intr)(struct nvkm_subdev *);
};
-extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
-int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
- int index, struct nvkm_subdev **);
+extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
- int index, struct nvkm_subdev *);
+ enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
+void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst);
void nvkm_subdev_del(struct nvkm_subdev **);
int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
@@ -38,10 +51,8 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \
const struct nvkm_subdev *_subdev = (s); \
- if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \
- dev_##p(_subdev->device->dev, "%s: "f, \
- nvkm_subdev_name[_subdev->index], ##a); \
- } \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) \
+ dev_##p(_subdev->device->dev, "%s: "f, _subdev->name, ##a); \
} while(0)
#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index f938f024db81..d5530faf025e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
#include <engine/xtensa.h>
-int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_bsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 86f420f4630b..cfd2da8e66fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -3,13 +3,13 @@
#define __NVKM_CE_H__
#include <engine/falcon.h>
-int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int tu102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm107_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm200_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gv100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int tu102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 66c5c5e27520..9da9176bbbbf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
-int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_cipher_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 5a96c942d912..d08d3337ba0d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -17,24 +17,28 @@ struct nvkm_disp {
struct nvkm_event hpd;
struct nvkm_event vblank;
- struct nvkm_oproxy *client;
+ struct {
+ spinlock_t lock;
+ struct nvkm_oproxy *object;
+ } client;
};
-int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp77_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp89_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv04_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp77_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp89_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 2e12cdb6bb93..a003da39fd13 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -23,9 +23,9 @@ struct nvkm_dma {
struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
-int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv04_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gv100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 27c1f868552c..306125d17ece 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -64,7 +64,7 @@ int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, bool enable, u32 addr, struct nvkm_engine **);
+ enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **);
struct nvkm_falcon_func {
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index b335f3a1e66d..54fab7cc36c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -7,6 +7,7 @@
struct nvkm_fault_data;
#define NVKM_FIFO_CHID_NR 4096
+#define NVKM_FIFO_ENGN_NR 16
struct nvkm_fifo_engn {
struct nvkm_object *object;
@@ -17,7 +18,7 @@ struct nvkm_fifo_engn {
struct nvkm_fifo_chan {
const struct nvkm_fifo_chan_func *func;
struct nvkm_fifo *fifo;
- u64 engines;
+ u32 engm;
struct nvkm_object object;
struct list_head head;
@@ -29,7 +30,7 @@ struct nvkm_fifo_chan {
u64 addr;
u32 size;
- struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
+ struct nvkm_fifo_engn engn[NVKM_FIFO_ENGN_NR];
};
struct nvkm_fifo {
@@ -40,6 +41,7 @@ struct nvkm_fifo {
int nr;
struct list_head chan;
spinlock_t lock;
+ struct mutex mutex;
struct nvkm_event uevent; /* async user trigger */
struct nvkm_event cevent; /* channel creation event */
@@ -57,22 +59,22 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
struct nvkm_fifo_chan *
nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
-int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int tu102_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv04_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk110_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm107_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm200_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 1530c81f86a2..b28b752ffaa2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -14,44 +14,44 @@ int nvkm_gr_ctxsw_pause(struct nvkm_device *);
int nvkm_gr_ctxsw_resume(struct nvkm_device *);
u32 nvkm_gr_ctxsw_inst(struct nvkm_device *);
-int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv04_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 8585a31f5943..f137f277935f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -2,9 +2,9 @@
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
#include <core/engine.h>
-int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv31_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 83bb2fcb2cbf..ac8f08ce183c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -2,8 +2,8 @@
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
#include <engine/falcon.h>
-int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 69e09fd96e0c..81c2b6f0ad84 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
#include <engine/falcon.h>
-int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 9e11cefc9649..2d5fa961ba66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -2,9 +2,9 @@
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
#include <engine/falcon.h>
-int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 1b3183e31606..97bd3092f68a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -11,5 +11,5 @@ struct nvkm_nvdec {
struct nvkm_falcon falcon;
};
-int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 33e6ba8adc8d..1a259c5c9a71 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -11,5 +11,5 @@ struct nvkm_nvenc {
struct nvkm_falcon falcon;
};
-int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
+int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 4d754e7650d9..af89d46ea360 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -7,20 +7,23 @@ struct nvkm_pm {
const struct nvkm_pm_func *func;
struct nvkm_engine engine;
- struct nvkm_object *perfmon;
+ struct {
+ spinlock_t lock;
+ struct nvkm_object *object;
+ } client;
struct list_head domains;
struct list_head sources;
u32 sequence;
};
-int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index f14e98a8a0ca..37ed7ab8d050 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
#include <engine/falcon.h>
-int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_sec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 34dc765648d5..06264c840eae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -18,7 +18,7 @@ struct nvkm_sec2 {
bool initmsg_received;
};
-int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int tu102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 2e91769e3ee2..b1a53ffbfdef 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -12,8 +12,8 @@ struct nvkm_sw {
bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
-int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv04_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 8984415b2a3d..1bab26858538 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
#include <engine/xtensa.h>
-int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_vp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index fbf27b2293a9..3083a5866a55 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -13,7 +13,7 @@ struct nvkm_xtensa {
};
int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
- int index, bool enable, u32 addr, struct nvkm_engine **);
+ enum nvkm_subdev_type, int, bool enable, u32 addr, struct nvkm_engine **);
struct nvkm_xtensa_func {
u32 fifo_val;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
index 836d8b932822..c0b254f7f0b5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -59,12 +59,12 @@ struct nvkm_acr {
bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
-int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm200_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
struct nvkm_acr_lsfw {
const struct nvkm_acr_lsf_func *func;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 14b09f7e46a5..4f07836ab984 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -23,11 +23,11 @@ void nvkm_bar_bar2_reset(struct nvkm_device *);
struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
-int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int tu102_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int nv50_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int tu102_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index f2860f8e0c2e..b61cfb077533 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -30,5 +30,5 @@ u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
-int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
+int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f5f59261ea81..d1beaad0c82b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -14,6 +14,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_LVDS_SPWG = 0x41,
DCB_CONNECTOR_DP = 0x46,
DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_mDP = 0x48,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
DCB_CONNECTOR_HDMI_C = 0x63,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index ae9ad6c034fb..2ac03bbc6133 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -18,9 +18,9 @@ void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *);
void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
-int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv04_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index bf937e7dfd77..05b99c9e9a26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -125,14 +125,14 @@ int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
-int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 1a39e52e09e3..848b5d9ce705 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -14,22 +14,22 @@ struct nvkm_devinit {
u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
void nvkm_devinit_meminit(struct nvkm_devinit *);
-u64 nvkm_devinit_disable(struct nvkm_devinit *);
-int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
+int nvkm_devinit_post(struct nvkm_devinit *);
-int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv04_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm200_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gv100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int tu102_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index a513c16ab105..581458ad38e0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,8 +30,8 @@ struct nvkm_fault_data {
u8 reason;
};
-int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gv100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int tu102_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 34b56b10218a..ef6a6297148c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -36,7 +36,11 @@ struct nvkm_fb {
struct nvkm_blob vpr_scrubber;
struct nvkm_ram *ram;
- struct nvkm_mm tags;
+
+ struct {
+ struct mutex mutex; /* protects mm and nvkm_memory::tags */
+ struct nvkm_mm mm;
+ } tags;
struct {
struct nvkm_fb_tile region[16];
@@ -54,38 +58,40 @@ void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
-int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv04_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf108_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk110_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm200_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -126,6 +132,7 @@ struct nvkm_ram {
#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram;
u64 stolen;
+ struct mutex mutex;
int ranks;
int parts;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index 00111c34311e..dabbef0ac96c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -11,7 +11,7 @@ struct nvkm_fuse {
u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
-int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int nv50_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index eaacf8d80527..0e46ea1fe972 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -32,9 +32,10 @@ int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
-int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv10_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 06db67610a50..cf42a59d4e58 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,5 +9,5 @@ struct nvkm_gsp {
struct nvkm_falcon falcon;
};
-int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
+int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 81b977319640..146e13292203 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -85,14 +85,15 @@ struct nvkm_i2c {
struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
-int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv04_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gm200_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
static inline int
nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
deleted file mode 100644
index db791411eaa8..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_H__
-#define __NVKM_IBUS_H__
-#include <core/subdev.h>
-
-int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index f483dcd7cd1c..7400d62dcbec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -14,6 +14,6 @@ struct nvkm_iccsense {
u32 power_w_crit;
};
-int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
+int gf100_iccsense_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index c74ab7c31d05..f967b97d163c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -13,6 +13,11 @@ struct nvkm_instmem {
struct list_head boot;
u32 reserved;
+ /* <=nv4x: protects NV_PRAMIN/BAR2 MM
+ * >=nv50: protects BAR2 MM & LRU
+ */
+ struct mutex mutex;
+
struct nvkm_memory *vbios;
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
@@ -25,8 +30,8 @@ int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
struct nvkm_memory **);
-int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index d76f60d7d29a..d32a326a9290 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -13,6 +13,7 @@ struct nvkm_ltc {
u32 ltc_nr;
u32 lts_nr;
+ struct mutex mutex; /* serialises CBC operations */
u32 num_tags;
u32 tag_base;
struct nvkm_memory *tag_ram;
@@ -33,12 +34,11 @@ int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32);
void nvkm_ltc_invalidate(struct nvkm_ltc *);
void nvkm_ltc_flush(struct nvkm_ltc *);
-int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gf100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm200_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp102_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 6641fe4c252c..cb86a56e68d4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -8,28 +8,29 @@ struct nvkm_mc {
struct nvkm_subdev subdev;
};
-void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
-bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_subdev_type, int);
+bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
void nvkm_mc_intr(struct nvkm_device *, bool *handled);
void nvkm_mc_intr_unarm(struct nvkm_device *);
void nvkm_mc_intr_rearm(struct nvkm_device *);
-void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int, bool enable);
void nvkm_mc_unk260(struct nvkm_device *, u32 data);
-int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv04_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv11_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv17_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g84_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gt215_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk104_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp10b_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int tu102_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 54cdcb017518..0911e73f7424 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -117,22 +117,24 @@ struct nvkm_mmu {
struct list_head list;
} ptc, ptp;
+ struct mutex mutex; /* serialises mmu invalidations */
+
struct nvkm_device_oclass user;
};
-int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index 78df1e9def05..7d4132a17d0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -3,5 +3,5 @@
#define __NVKM_MXM_H__
#include <core/subdev.h>
-int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int nv50_mxm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 4803a4fad4a2..74c19bdfb757 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -39,17 +39,17 @@ void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
-int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv46_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g84_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g92_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g94_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 5ff6d1f8985a..f57a3a5a288d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -18,6 +18,7 @@ struct nvkm_pmu {
struct completion wpr_ready;
struct {
+ struct mutex mutex;
u32 base;
u32 size;
} send;
@@ -39,18 +40,18 @@ int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
bool nvkm_pmu_fan_controlled(struct nvkm_device *);
-int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gt215_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm20b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
new file mode 100644
index 000000000000..e1399f8a90ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_H__
+#define __NVKM_PRIVRING_H__
+#include <core/subdev.h>
+
+int gf100_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gf117_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk104_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk20a_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gm200_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gp10b_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 62c34f98c930..bd04f49272a6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -107,13 +107,13 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *,
void nvkm_therm_clkgate_enable(struct nvkm_therm *);
void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
-int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv40_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gk104_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm200_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index d06dcbe1faa6..439a3f72b0d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -76,8 +76,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
#define nvkm_wait_msec(d,m,addr,mask,data) \
nvkm_wait_usec((d), (m) * 1000, (addr), (mask), (data))
-int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv04_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 7be0e7e7bd77..ee75c5524c43 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -9,13 +9,24 @@ struct nvkm_top {
struct list_head device;
};
-u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
-u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
-int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
+struct nvkm_top_device {
+ enum nvkm_subdev_type type;
+ int inst;
+ u32 addr;
+ int fault;
+ int engine;
+ int runlist;
+ int reset;
+ int intr;
+ struct list_head head;
+};
+
+u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int);
+int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_subdev_type, int);
+struct nvkm_subdev *nvkm_top_fault(struct nvkm_device *, int fault);
-int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
+int gk104_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
+int ga100_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 45053a280930..0be86d5f0158 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -36,10 +36,10 @@ int nvkm_volt_get(struct nvkm_volt *);
int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
int condition);
-int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int nv40_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf117_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk104_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gm20b_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9a5be6f32424..0a9334deffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -181,6 +181,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nvif_device *device = &drm->client.device;
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
@@ -188,13 +189,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->vendor;
+ getparam->value = pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->device;
+ getparam->value = pdev->device;
else
getparam->value = 0;
break;
@@ -205,7 +206,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
case NV_DEVICE_INFO_V0_IGP :
- if (!pci_is_pcie(dev->pdev))
+ if (!pci_is_pcie(pdev))
getparam->value = 1;
else
getparam->value = 2;
@@ -268,19 +269,19 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
- case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR ; break;
- case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break;
- case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break;
- case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break;
- case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE ; break;
+ case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
+ case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
+ case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
+ case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
+ case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}
} else {
- engine = NV_DEVICE_INFO_ENGINE_GR;
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
}
- if (engine != NV_DEVICE_INFO_ENGINE_CE)
+ if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
engine = nvif_fifo_runlist(device, engine);
else
engine = nvif_fifo_runlist_ce(device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 69a84d0197d0..7c15f6448428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -377,7 +377,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(dev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c94dbf3..72f35a2babcb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
+ case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d204ea8a5618..e8c445eb1100 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -110,6 +110,9 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
struct nvbios *bios = &drm->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+#ifdef __powerpc__
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+#endif
if (!bios->fp.xlated_entry || !sub || !scriptofs)
return -EINVAL;
@@ -123,8 +126,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
- (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
- dev->pdev->device == 0x0329))
+ (pdev->device == 0x0179 || pdev->device == 0x0189 ||
+ pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
@@ -2083,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
int ret;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c85b1af06b7b..2375711877cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available -= bo->mem.size;
+ drm->gem.vram_available -= bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available -= bo->mem.size;
+ drm->gem.gart_available -= bo->base.size;
break;
default:
break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available += bo->mem.size;
+ drm->gem.vram_available += bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available += bo->mem.size;
+ drm->gem.gart_available += bo->base.size;
break;
default:
break;
@@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
@@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
@@ -774,7 +797,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
return ret;
}
- mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+ if (drm_drv_uses_atomic_modeset(drm->dev))
+ mutex_lock(&cli->mutex);
+ else
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
@@ -910,7 +936,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
return 0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
- *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+ *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
nvbo->mode, nvbo->zeta);
}
@@ -1232,9 +1258,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
if (slave && ttm->sg) {
- /* make userspace faulting work */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- ttm_dma->dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
+ ttm->num_pages);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 5d191e58edf1..7cfac265fd45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
+ goto done;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
@@ -555,7 +556,7 @@ nouveau_channels_init(struct nouveau_drm *drm)
} args = {
.m.version = 1,
.m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_FIFO_CHANNELS,
+ .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
};
struct nvif_object *device = &drm->client.device.object;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8b4b3688c7ae..61e6d7412505 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -411,6 +411,7 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int ret;
@@ -438,11 +439,11 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
break;
if (switcheroo_ddc)
- vga_switcheroo_lock_ddc(dev->pdev);
+ vga_switcheroo_lock_ddc(pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
found = nv_encoder;
if (switcheroo_ddc)
- vga_switcheroo_unlock_ddc(dev->pdev);
+ vga_switcheroo_unlock_ddc(pdev);
break;
}
@@ -490,6 +491,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (nv_connector->detected_encoder == nv_encoder)
return;
@@ -511,8 +513,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->doublescan_allowed = true;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
(drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- (dev->pdev->device & 0x0ff0) != 0x0100 &&
- (dev->pdev->device & 0x0ff0) != 0x0150))
+ (pdev->device & 0x0ff0) != 0x0100 &&
+ (pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
@@ -1210,6 +1212,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca..17831ee897ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
bl_size = bw * bh * (1 << tile_mode) * gob_size;
- DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
- nvbo->bo.mem.size);
+ nvbo->bo.base.size);
- if (bl_size + offset > nvbo->bo.mem.size)
+ if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE;
return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
} else {
uint32_t size = mode_cmd->pitches[i] * height;
- if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
return -ERANGE;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d141a5f004af..885815ea917f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -115,8 +115,8 @@ nouveau_platform_name(struct platform_device *platformdev)
static u64
nouveau_name(struct drm_device *dev)
{
- if (dev->pdev)
- return nouveau_pci_name(dev->pdev);
+ if (dev_is_pci(dev->dev))
+ return nouveau_pci_name(to_pci_dev(dev->dev));
else
return nouveau_platform_name(to_platform_device(dev->dev));
}
@@ -344,7 +344,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
/* Allocate channel that has access to the graphics engine. */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
+ arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
arg1 = 1;
} else {
arg0 = NvDmaFB;
@@ -760,7 +760,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- drm_dev->pdev = pdev;
pci_set_drvdata(pdev, drm_dev);
ret = nouveau_drm_device_init(drm_dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9d04d1b36434..d28ee6844245 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_audio_component.h>
@@ -222,6 +221,7 @@ struct nouveau_drm {
struct {
struct drm_audio_component *component;
+ struct mutex lock;
bool component_registered;
} audio;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 21937f1c7dd9..1ffcc0a491fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -53,7 +53,12 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
- bool audio;
+
+ /* Protected by nouveau_drm.audio.lock */
+ struct {
+ bool enabled;
+ struct drm_connector *connector;
+ } audio;
struct drm_display_mode mode;
int last_dpms;
@@ -141,11 +146,9 @@ enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
unsigned *clock);
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
int nv50_mstm_detect(struct nouveau_encoder *encoder);
void nv50_mstm_remove(struct nv50_mstm *mstm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24ec5339efb4..4fc0fa696461 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -396,7 +396,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
fb->width, fb->height, nvbo->offset, nvbo);
- vga_switcheroo_client_fb_set(dev->pdev, info);
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
+
return 0;
out_unlock:
@@ -548,7 +550,7 @@ nouveau_fbcon_init(struct drm_device *dev)
int ret;
if (!dev->mode_config.num_crtc ||
- (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 2f16b5249283..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -30,9 +30,9 @@
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- int npages = nvbo->bo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+ nvbo->bo.ttm->num_pages);
}
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2e23fd4906c..1cf52635ea74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -84,7 +84,7 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
if (!nvbe)
return NULL;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 4f69e4c3dafd..1c3f890377d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c85dd8afa3c3..7c4b374b3eca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,18 +87,20 @@ nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode);
/* don't register Thunderbolt eGPU with vga_switcheroo */
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+ vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
@@ -109,17 +111,19 @@ nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(pdev, NULL, NULL, NULL);
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_unregister_client(dev->pdev);
+ vga_switcheroo_unregister_client(pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 1253fdec712d..b1cd8d7dd87d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 447238e3cbe7..1625826505f6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 8d0d30e08f57..529cb60d5efb 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GA102_DISP, -1 },
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
index e84a2e2ff043..a463289962b2 100644
--- a/drivers/gpu/drm/nouveau/nvif/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -41,9 +41,11 @@ nvif_fifo_runlists(struct nvif_device *device)
return -ENOMEM;
a->m.version = 1;
a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
- a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS;
- for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++)
- a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i);
+ a->v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS;
+ for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) {
+ a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES;
+ a->v.runlist[i].data = i;
+ }
ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
if (ret)
@@ -58,7 +60,7 @@ nvif_fifo_runlists(struct nvif_device *device)
}
for (i = 0; i < device->runlists; i++) {
- if (a->v.runlists.data & BIT_ULL(i))
+ if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID)
device->runlist[i].engines = a->v.runlist[i].data;
}
@@ -70,29 +72,15 @@ done:
u64
nvif_fifo_runlist(struct nvif_device *device, u64 engine)
{
- struct nvif_object *object = &device->object;
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data engine;
- } v;
- } a = {
- .m.version = 1,
- .m.count = sizeof(a.v) / sizeof(a.v.engine),
- .v.engine.mthd = engine,
- };
u64 runm = 0;
int ret, i;
if ((ret = nvif_fifo_runlists(device)))
return runm;
- ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a));
- if (ret == 0) {
- for (i = 0; i < device->runlists; i++) {
- if (device->runlist[i].engines & a.v.engine.data)
- runm |= BIT_ULL(i);
- }
+ for (i = 0; i < device->runlists; i++) {
+ if (device->runlist[i].engines & engine)
+ runm |= BIT_ULL(i);
}
return runm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 1a47c40e171b..e41a39ae1597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -40,10 +40,11 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
{
struct nvkm_engine *engine = *pengine;
if (engine) {
- mutex_lock(&engine->subdev.mutex);
- if (--engine->usecount == 0)
+ if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) {
nvkm_subdev_fini(&engine->subdev, false);
- mutex_unlock(&engine->subdev.mutex);
+ engine->use.enabled = false;
+ mutex_unlock(&engine->use.mutex);
+ }
*pengine = NULL;
}
}
@@ -51,17 +52,21 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
struct nvkm_engine *
nvkm_engine_ref(struct nvkm_engine *engine)
{
+ int ret;
if (engine) {
- mutex_lock(&engine->subdev.mutex);
- if (++engine->usecount == 1) {
- int ret = nvkm_subdev_init(&engine->subdev);
- if (ret) {
- engine->usecount--;
- mutex_unlock(&engine->subdev.mutex);
- return ERR_PTR(ret);
+ if (!refcount_inc_not_zero(&engine->use.refcount)) {
+ mutex_lock(&engine->use.mutex);
+ if (!refcount_inc_not_zero(&engine->use.refcount)) {
+ engine->use.enabled = true;
+ if ((ret = nvkm_subdev_init(&engine->subdev))) {
+ engine->use.enabled = false;
+ mutex_unlock(&engine->use.mutex);
+ return ERR_PTR(ret);
+ }
+ refcount_set(&engine->use.refcount, 1);
}
+ mutex_unlock(&engine->use.mutex);
}
- mutex_unlock(&engine->subdev.mutex);
}
return engine;
}
@@ -114,7 +119,7 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
int ret = 0, i;
s64 time;
- if (!engine->usecount) {
+ if (!engine->use.enabled) {
nvkm_trace(subdev, "init skipped, engine has no users\n");
return ret;
}
@@ -156,11 +161,12 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->dtor)
return engine->func->dtor(engine);
+ mutex_destroy(&engine->use.mutex);
return engine;
}
-static const struct nvkm_subdev_func
-nvkm_engine_func = {
+const struct nvkm_subdev_func
+nvkm_engine = {
.dtor = nvkm_engine_dtor,
.preinit = nvkm_engine_preinit,
.init = nvkm_engine_init,
@@ -170,14 +176,15 @@ nvkm_engine_func = {
};
int
-nvkm_engine_ctor(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, bool enable,
- struct nvkm_engine *engine)
+nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
{
- nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
+ nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
engine->func = func;
+ refcount_set(&engine->use.refcount, 0);
+ mutex_init(&engine->use.mutex);
- if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+ if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
nvkm_debug(&engine->subdev, "disabled\n");
return -ENODEV;
}
@@ -187,11 +194,11 @@ nvkm_engine_ctor(const struct nvkm_engine_func *func,
}
int
-nvkm_engine_new_(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, bool enable,
+nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable,
struct nvkm_engine **pengine)
{
if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(func, device, index, enable, *pengine);
+ return nvkm_engine_ctor(func, device, type, inst, enable, *pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 38130ef272d6..c69daac9bac7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -33,13 +33,13 @@ nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags = *ptags;
if (tags) {
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if (refcount_dec_and_test(&tags->refcount)) {
- nvkm_mm_free(&fb->tags, &tags->mn);
+ nvkm_mm_free(&fb->tags.mm, &tags->mn);
kfree(memory->tags);
memory->tags = NULL;
}
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = NULL;
}
}
@@ -52,29 +52,29 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags;
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if ((tags = memory->tags)) {
/* If comptags exist for the memory, but a different amount
* than requested, the buffer is being mapped with settings
* that are incompatible with existing mappings.
*/
if (tags->mn && tags->mn->length != nr) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -EINVAL;
}
refcount_inc(&tags->refcount);
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = tags;
return 0;
}
if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -ENOMEM;
}
- if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
if (clr)
clr(device, tags->mn->offset, tags->mn->length);
} else {
@@ -92,7 +92,7 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
refcount_set(&tags->refcount, 1);
*ptags = memory->tags = tags;
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 49d468b45d3f..a74b7acb6832 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -26,69 +26,13 @@
#include <core/option.h>
#include <subdev/mc.h>
-static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
-
const char *
-nvkm_subdev_name[NVKM_SUBDEV_NR] = {
- [NVKM_SUBDEV_ACR ] = "acr",
- [NVKM_SUBDEV_BAR ] = "bar",
- [NVKM_SUBDEV_VBIOS ] = "bios",
- [NVKM_SUBDEV_BUS ] = "bus",
- [NVKM_SUBDEV_CLK ] = "clk",
- [NVKM_SUBDEV_DEVINIT ] = "devinit",
- [NVKM_SUBDEV_FAULT ] = "fault",
- [NVKM_SUBDEV_FB ] = "fb",
- [NVKM_SUBDEV_FUSE ] = "fuse",
- [NVKM_SUBDEV_GPIO ] = "gpio",
- [NVKM_SUBDEV_GSP ] = "gsp",
- [NVKM_SUBDEV_I2C ] = "i2c",
- [NVKM_SUBDEV_IBUS ] = "priv",
- [NVKM_SUBDEV_ICCSENSE] = "iccsense",
- [NVKM_SUBDEV_INSTMEM ] = "imem",
- [NVKM_SUBDEV_LTC ] = "ltc",
- [NVKM_SUBDEV_MC ] = "mc",
- [NVKM_SUBDEV_MMU ] = "mmu",
- [NVKM_SUBDEV_MXM ] = "mxm",
- [NVKM_SUBDEV_PCI ] = "pci",
- [NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_THERM ] = "therm",
- [NVKM_SUBDEV_TIMER ] = "tmr",
- [NVKM_SUBDEV_TOP ] = "top",
- [NVKM_SUBDEV_VOLT ] = "volt",
- [NVKM_ENGINE_BSP ] = "bsp",
- [NVKM_ENGINE_CE0 ] = "ce0",
- [NVKM_ENGINE_CE1 ] = "ce1",
- [NVKM_ENGINE_CE2 ] = "ce2",
- [NVKM_ENGINE_CE3 ] = "ce3",
- [NVKM_ENGINE_CE4 ] = "ce4",
- [NVKM_ENGINE_CE5 ] = "ce5",
- [NVKM_ENGINE_CE6 ] = "ce6",
- [NVKM_ENGINE_CE7 ] = "ce7",
- [NVKM_ENGINE_CE8 ] = "ce8",
- [NVKM_ENGINE_CIPHER ] = "cipher",
- [NVKM_ENGINE_DISP ] = "disp",
- [NVKM_ENGINE_DMAOBJ ] = "dma",
- [NVKM_ENGINE_FIFO ] = "fifo",
- [NVKM_ENGINE_GR ] = "gr",
- [NVKM_ENGINE_IFB ] = "ifb",
- [NVKM_ENGINE_ME ] = "me",
- [NVKM_ENGINE_MPEG ] = "mpeg",
- [NVKM_ENGINE_MSENC ] = "msenc",
- [NVKM_ENGINE_MSPDEC ] = "mspdec",
- [NVKM_ENGINE_MSPPP ] = "msppp",
- [NVKM_ENGINE_MSVLD ] = "msvld",
- [NVKM_ENGINE_NVENC0 ] = "nvenc0",
- [NVKM_ENGINE_NVENC1 ] = "nvenc1",
- [NVKM_ENGINE_NVENC2 ] = "nvenc2",
- [NVKM_ENGINE_NVDEC0 ] = "nvdec0",
- [NVKM_ENGINE_NVDEC1 ] = "nvdec1",
- [NVKM_ENGINE_NVDEC2 ] = "nvdec2",
- [NVKM_ENGINE_PM ] = "pm",
- [NVKM_ENGINE_SEC ] = "sec",
- [NVKM_ENGINE_SEC2 ] = "sec2",
- [NVKM_ENGINE_SW ] = "sw",
- [NVKM_ENGINE_VIC ] = "vic",
- [NVKM_ENGINE_VP ] = "vp",
+nvkm_subdev_type[NVKM_SUBDEV_NR] = {
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr,
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_ONCE
+#undef NVKM_LAYOUT_INST
};
void
@@ -125,7 +69,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
}
}
- nvkm_mc_reset(device, subdev->index);
+ nvkm_mc_reset(device, subdev->type, subdev->inst);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
@@ -199,6 +143,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
if (subdev && !WARN_ON(!subdev->func)) {
nvkm_trace(subdev, "destroy running...\n");
time = ktime_to_us(ktime_get());
+ list_del(&subdev->head);
if (subdev->func->dtor)
*psubdev = subdev->func->dtor(subdev);
time = ktime_to_us(ktime_get()) - time;
@@ -209,26 +154,41 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
}
void
-nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_subdev *subdev)
+nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
+{
+ struct nvkm_subdev *subdev;
+ list_for_each_entry(subdev, &device->subdev, head) {
+ if (subdev->type == type && subdev->inst == inst) {
+ *subdev->pself = NULL;
+ nvkm_subdev_del(&subdev);
+ break;
+ }
+ }
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
- const char *name = nvkm_subdev_name[index];
subdev->func = func;
subdev->device = device;
- subdev->index = index;
-
- __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
- subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+ subdev->type = type;
+ subdev->inst = inst < 0 ? 0 : inst;
+
+ if (inst >= 0)
+ snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst);
+ else
+ strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
+ subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
+ list_add_tail(&subdev->head, &device->subdev);
}
int
-nvkm_subdev_new_(const struct nvkm_subdev_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_subdev **psubdev)
+nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev)
{
if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(func, device, index, *psubdev);
+ nvkm_subdev_ctor(func, device, type, inst, *psubdev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 44e116f7880d..39f6db269c7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -36,8 +36,9 @@ g84_bsp = {
};
int
-g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_bsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_xtensa_new_(&g84_bsp, device, index,
+ return nvkm_xtensa_new_(&g84_bsp, device, type, inst,
device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index ad9f855c9a40..b9cc39565985 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -29,9 +29,7 @@
static void
gf100_ce_init(struct nvkm_falcon *ce)
{
- struct nvkm_device *device = ce->engine.subdev.device;
- const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
- nvkm_wr32(device, ce->addr + 0x084, index);
+ nvkm_wr32(ce->engine.subdev.device, ce->addr + 0x084, ce->engine.subdev.inst);
}
static const struct nvkm_falcon_func
@@ -63,16 +61,9 @@ gf100_ce1 = {
};
int
-gf100_ce_new(struct nvkm_device *device, int index,
+gf100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- if (index == NVKM_ENGINE_CE0) {
- return nvkm_falcon_new_(&gf100_ce0, device, index, true,
- 0x104000, pengine);
- } else
- if (index == NVKM_ENGINE_CE1) {
- return nvkm_falcon_new_(&gf100_ce1, device, index, true,
- 0x105000, pengine);
- }
- return -ENODEV;
+ return nvkm_falcon_new_(inst ? &gf100_ce1 : &gf100_ce0, device, type, inst, true,
+ 0x104000 + (inst * 0x1000), pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index 9e0b53a10f77..27f29eb0494d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -58,9 +58,9 @@ gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
void
gk104_ce_intr(struct nvkm_engine *ce)
{
- const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
+ const u32 base = subdev->inst * 0x1000;
u32 mask = nvkm_rd32(device, 0x104904 + base);
u32 intr = nvkm_rd32(device, 0x104908 + base) & mask;
if (intr & 0x00000001) {
@@ -94,8 +94,8 @@ gk104_ce = {
};
int
-gk104_ce_new(struct nvkm_device *device, int index,
+gk104_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gk104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gk104_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index c0df7daa85e2..c3c476592c43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -36,8 +36,8 @@ gm107_ce = {
};
int
-gm107_ce_new(struct nvkm_device *device, int index,
+gm107_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gm107_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gm107_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index c6fa8b20737e..d2db61865371 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -35,8 +35,8 @@ gm200_ce = {
};
int
-gm200_ce_new(struct nvkm_device *device, int index,
+gm200_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gm200_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gm200_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
index c7710456bc30..a4f08a4472c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -59,9 +59,9 @@ gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
void
gp100_ce_intr(struct nvkm_engine *ce)
{
- const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
+ const u32 base = subdev->inst * 0x80;
u32 mask = nvkm_rd32(device, 0x10440c + base);
u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
if (intr & 0x00000001) { //XXX: guess
@@ -95,8 +95,8 @@ gp100_ce = {
};
int
-gp100_ce_new(struct nvkm_device *device, int index,
+gp100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 985c8f653874..180d497a95eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -37,8 +37,8 @@ gp102_ce = {
};
int
-gp102_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 63ac51a54fd3..704df0f2d1f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -44,7 +44,7 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
{
struct nvkm_subdev *subdev = &ce->engine.subdev;
struct nvkm_device *device = subdev->device;
- const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+ const u32 base = subdev->inst * 0x1000;
u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
@@ -75,9 +75,9 @@ gt215_ce = {
};
int
-gt215_ce_new(struct nvkm_device *device, int index,
+gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(&gt215_ce, device, index,
+ return nvkm_falcon_new_(&gt215_ce, device, type, inst,
(device->chipset != 0xaf), 0x104000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
index fcda3de45857..cd5e9cdca1cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -33,8 +33,8 @@ gv100_ce = {
};
int
-gv100_ce_new(struct nvkm_device *device, int index,
+gv100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gv100_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gv100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index b4308e2d8c75..e5ff92d9364c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -33,8 +33,8 @@ tu102_ce = {
};
int
-tu102_ce_new(struct nvkm_device *device, int index,
+tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&tu102_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 68ffb520531e..be2a7181dc15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -127,8 +127,8 @@ g84_cipher = {
};
int
-g84_cipher_new(struct nvkm_device *device, int index,
+g84_cipher_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&g84_cipher, device, index, true, pengine);
+ return nvkm_engine_new_(&g84_cipher, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7851bec5f0e5..b930f539feec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -71,2585 +71,2557 @@ nvkm_device_list(u64 *name, int size)
static const struct nvkm_device_chip
null_chipset = {
.name = "NULL",
- .bios = nvkm_bios_new,
+ .bios = { 0x00000001, nvkm_bios_new },
};
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv04_devinit_new,
- .fb = nv04_fb_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv04_fifo_new,
- .gr = nv04_gr_new,
- .sw = nv04_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv04_devinit_new },
+ .fb = { 0x00000001, nv04_fb_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv04_fifo_new },
+ .gr = { 0x00000001, nv04_gr_new },
+ .sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv5_chipset = {
.name = "NV05",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv05_devinit_new,
- .fb = nv04_fb_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv04_fifo_new,
- .gr = nv04_gr_new,
- .sw = nv04_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv05_devinit_new },
+ .fb = { 0x00000001, nv04_fb_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv04_fifo_new },
+ .gr = { 0x00000001, nv04_gr_new },
+ .sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv10_chipset = {
.name = "NV10",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .gr = nv10_gr_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .gr = { 0x00000001, nv10_gr_new },
};
static const struct nvkm_device_chip
nv11_chipset = {
.name = "NV11",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv11_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv11_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv15_chipset = {
.name = "NV15",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv17_chipset = {
.name = "NV17",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv18_chipset = {
.name = "NV18",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1a_chipset = {
.name = "nForce",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv1a_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv1a_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1f_chipset = {
.name = "nForce2",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv1a_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv1a_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv20_chipset = {
.name = "NV20",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv20_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv20_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv20_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv20_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv25_chipset = {
.name = "NV25",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv25_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv25_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv28_chipset = {
.name = "NV28",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv25_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv25_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv2a_chipset = {
.name = "NV2A",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv2a_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv2a_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv30_chipset = {
.name = "NV30",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv30_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv30_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv30_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv30_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv31_chipset = {
.name = "NV31",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv30_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv30_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv30_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv30_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv34_chipset = {
.name = "NV34",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv34_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv34_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv35_chipset = {
.name = "NV35",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv35_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv35_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv35_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv35_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv36_chipset = {
.name = "NV36",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv36_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv35_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv36_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv35_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv40_chipset = {
.name = "NV40",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv40_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv40_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv40_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv40_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv41_chipset = {
.name = "NV41",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv42_chipset = {
.name = "NV42",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv43_chipset = {
.name = "NV43",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv44_chipset = {
.name = "NV44",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv44_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv44_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv45_chipset = {
.name = "NV45",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv40_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv40_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv46_chipset = {
.name = "G72",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv46_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv46_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv47_chipset = {
.name = "G70",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv47_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv47_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv49_chipset = {
.name = "G71",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv49_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv49_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4a_chipset = {
.name = "NV44A",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv44_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv44_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4b_chipset = {
.name = "G73",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv49_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv49_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4c_chipset = {
.name = "C61",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4e_chipset = {
.name = "C51",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv4e_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv4e_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv4e_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv4e_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv50_chipset = {
.name = "G80",
- .bar = nv50_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = nv50_clk_new,
- .devinit = nv50_devinit_new,
- .fb = nv50_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = nv50_mc_new,
- .mmu = nv50_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = nv46_pci_new,
- .therm = nv50_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv50_disp_new,
- .dma = nv50_dma_new,
- .fifo = nv50_fifo_new,
- .gr = nv50_gr_new,
- .mpeg = nv50_mpeg_new,
- .pm = nv50_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, nv50_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, nv50_clk_new },
+ .devinit = { 0x00000001, nv50_devinit_new },
+ .fb = { 0x00000001, nv50_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, nv50_mc_new },
+ .mmu = { 0x00000001, nv50_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, nv46_pci_new },
+ .therm = { 0x00000001, nv50_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv50_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, nv50_fifo_new },
+ .gr = { 0x00000001, nv50_gr_new },
+ .mpeg = { 0x00000001, nv50_mpeg_new },
+ .pm = { 0x00000001, nv50_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nv63_chipset = {
.name = "C73",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv67_chipset = {
.name = "C67",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv68_chipset = {
.name = "C68",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv84_chipset = {
.name = "G84",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g84_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g84_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv86_chipset = {
.name = "G86",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g84_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g84_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv92_chipset = {
.name = "G92",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g92_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g92_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv94_chipset = {
.name = "G94",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv96_chipset = {
.name = "G96",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv98_chipset = {
.name = "G98",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g98_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva0_chipset = {
.name = "GT200",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = gt200_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt200_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = gt200_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, gt200_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt200_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, gt200_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nva3_chipset = {
.name = "GT215",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mpeg = g84_mpeg_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva5_chipset = {
.name = "GT216",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva8_chipset = {
.name = "GT218",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaa_chipset = {
.name = "MCP77/MCP78",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = mcp77_clk_new,
- .devinit = g98_devinit_new,
- .fb = mcp77_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = mcp77_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt200_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, mcp77_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, mcp77_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, mcp77_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt200_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvac_chipset = {
.name = "MCP79/MCP7A",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = mcp77_clk_new,
- .devinit = g98_devinit_new,
- .fb = mcp77_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = mcp77_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = mcp79_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, mcp77_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, mcp77_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, mcp77_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, mcp79_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaf_chipset = {
.name = "MCP89",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = mcp89_devinit_new,
- .fb = mcp89_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = mcp89_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = mcp89_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = mcp89_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, mcp89_devinit_new },
+ .fb = { 0x00000001, mcp89_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, mcp89_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, mcp89_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, mcp89_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvc0_chipset = {
.name = "GF100",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf100_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf100_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc1_chipset = {
.name = "GF108",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf108_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf108_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf108_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf108_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf108_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf108_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc3_chipset = {
.name = "GF106",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc4_chipset = {
.name = "GF104",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc8_chipset = {
.name = "GF110",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf110_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf110_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvce_chipset = {
.name = "GF114",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvcf_chipset = {
.name = "GF116",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd7_chipset = {
.name = "GF117",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gf119_gpio_new,
- .i2c = gf117_i2c_new,
- .ibus = gf117_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .therm = gf119_therm_new,
- .timer = nv41_timer_new,
- .volt = gf117_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gf119_disp_new,
- .dma = gf119_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf117_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf117_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gf119_gpio_new },
+ .i2c = { 0x00000001, gf117_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .privring = { 0x00000001, gf117_privring_new },
+ .therm = { 0x00000001, gf119_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf117_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gf119_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf117_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf117_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd9_chipset = {
.name = "GF119",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gf119_gpio_new,
- .i2c = gf119_i2c_new,
- .ibus = gf117_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf119_pmu_new,
- .therm = gf119_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gf119_disp_new,
- .dma = gf119_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf119_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf117_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gf119_gpio_new },
+ .i2c = { 0x00000001, gf119_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf119_pmu_new },
+ .privring = { 0x00000001, gf117_privring_new },
+ .therm = { 0x00000001, gf119_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gf119_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf119_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf117_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve4_chipset = {
.name = "GK104",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve6_chipset = {
.name = "GK106",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve7_chipset = {
.name = "GK107",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvea_chipset = {
.name = "GK20A",
- .bar = gk20a_bar_new,
- .bus = gf100_bus_new,
- .clk = gk20a_clk_new,
- .fb = gk20a_fb_new,
- .fuse = gf100_fuse_new,
- .ibus = gk20a_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk20a_mmu_new,
- .pmu = gk20a_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk20a_volt_new,
- .ce[2] = gk104_ce_new,
- .dma = gf119_dma_new,
- .fifo = gk20a_fifo_new,
- .gr = gk20a_gr_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gk20a_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk20a_clk_new },
+ .fb = { 0x00000001, gk20a_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk20a_mmu_new },
+ .pmu = { 0x00000001, gk20a_pmu_new },
+ .privring = { 0x00000001, gk20a_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk20a_volt_new },
+ .ce = { 0x00000004, gk104_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk20a_fifo_new },
+ .gr = { 0x00000001, gk20a_gr_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf0_chipset = {
.name = "GK110",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk110_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk110_fifo_new,
- .gr = gk110_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk110_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk110_fifo_new },
+ .gr = { 0x00000001, gk110_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf1_chipset = {
.name = "GK110B",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk110_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk110_fifo_new,
- .gr = gk110b_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk110_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk110_fifo_new },
+ .gr = { 0x00000001, gk110b_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv106_chipset = {
.name = "GK208B",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk208_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk208_fifo_new,
- .gr = gk208_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk208_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk208_fifo_new },
+ .gr = { 0x00000001, gk208_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv108_chipset = {
.name = "GK208",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk208_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk208_fifo_new,
- .gr = gk208_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk208_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk208_fifo_new },
+ .gr = { 0x00000001, gk208_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv117_chipset = {
.name = "GM107",
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gm107_devinit_new,
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm107_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
- .therm = gm107_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm107_ce_new,
- .ce[2] = gm107_ce_new,
- .disp = gm107_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm107_fifo_new,
- .gr = gm107_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gm107_devinit_new },
+ .fb = { 0x00000001, gm107_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm107_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm107_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gm107_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000005, gm107_ce_new },
+ .disp = { 0x00000001, gm107_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm107_fifo_new },
+ .gr = { 0x00000001, gm107_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv118_chipset = {
.name = "GM108",
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gm107_devinit_new,
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm107_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
- .therm = gm107_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm107_ce_new,
- .ce[2] = gm107_ce_new,
- .disp = gm107_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm107_fifo_new,
- .gr = gm107_gr_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gm107_devinit_new },
+ .fb = { 0x00000001, gm107_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm107_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm107_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gm107_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000005, gm107_ce_new },
+ .disp = { 0x00000001, gm107_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm107_fifo_new },
+ .gr = { 0x00000001, gm107_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
- .acr = gm20b_acr_new,
- .bar = gm20b_bar_new,
- .bus = gf100_bus_new,
- .clk = gm20b_clk_new,
- .fb = gm20b_fb_new,
- .fuse = gm107_fuse_new,
- .ibus = gk20a_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm20b_mmu_new,
- .pmu = gm20b_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[2] = gm200_ce_new,
- .volt = gm20b_volt_new,
- .dma = gf119_dma_new,
- .fifo = gm20b_fifo_new,
- .gr = gm20b_gr_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm20b_acr_new },
+ .bar = { 0x00000001, gm20b_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gm20b_clk_new },
+ .fb = { 0x00000001, gm20b_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm20b_mmu_new },
+ .pmu = { 0x00000001, gm20b_pmu_new },
+ .privring = { 0x00000001, gk20a_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gm20b_volt_new },
+ .ce = { 0x00000004, gm200_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm20b_fifo_new },
+ .gr = { 0x00000001, gm20b_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp100_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gm200_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp100_ce_new,
- .ce[1] = gp100_ce_new,
- .ce[2] = gp100_ce_new,
- .ce[3] = gp100_ce_new,
- .ce[4] = gp100_ce_new,
- .ce[5] = gp100_ce_new,
- .dma = gf119_dma_new,
- .disp = gp100_disp_new,
- .fifo = gp100_fifo_new,
- .gr = gp100_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .nvenc[2] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp100_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000003f, gp100_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .disp = { 0x00000001, gp100_disp_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp100_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000007, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp104_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp104_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp104_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp104_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp107_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp107_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
- .acr = gp108_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp108_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .sec2 = gp108_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp108_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp108_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .sec2 = { 0x00000001, gp108_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
- .acr = gp10b_acr_new,
- .bar = gm20b_bar_new,
- .bus = gf100_bus_new,
- .fault = gp10b_fault_new,
- .fb = gp10b_fb_new,
- .fuse = gm107_fuse_new,
- .ibus = gp10b_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gp10b_ltc_new,
- .mc = gp10b_mc_new,
- .mmu = gp10b_mmu_new,
- .pmu = gp10b_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp100_ce_new,
- .dma = gf119_dma_new,
- .fifo = gp10b_fifo_new,
- .gr = gp10b_gr_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp10b_acr_new },
+ .bar = { 0x00000001, gm20b_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .fault = { 0x00000001, gp10b_fault_new },
+ .fb = { 0x00000001, gp10b_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gp10b_ltc_new },
+ .mc = { 0x00000001, gp10b_mc_new },
+ .mmu = { 0x00000001, gp10b_mmu_new },
+ .pmu = { 0x00000001, gp10b_pmu_new },
+ .privring = { 0x00000001, gp10b_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x00000001, gp100_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp10b_fifo_new },
+ .gr = { 0x00000001, gp10b_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
- .acr = gp108_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gv100_devinit_new,
- .fault = gv100_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gv100_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .disp = gv100_disp_new,
- .ce[0] = gv100_ce_new,
- .ce[1] = gv100_ce_new,
- .ce[2] = gv100_ce_new,
- .ce[3] = gv100_ce_new,
- .ce[4] = gv100_ce_new,
- .ce[5] = gv100_ce_new,
- .ce[6] = gv100_ce_new,
- .ce[7] = gv100_ce_new,
- .ce[8] = gv100_ce_new,
- .dma = gv100_dma_new,
- .fifo = gv100_fifo_new,
- .gr = gv100_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .nvenc[2] = gm107_nvenc_new,
- .sec2 = gp108_sec2_new,
+ .acr = { 0x00000001, gp108_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gv100_devinit_new },
+ .fault = { 0x00000001, gv100_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gv100_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x000001ff, gv100_ce_new },
+ .disp = { 0x00000001, gv100_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, gv100_fifo_new },
+ .gr = { 0x00000001, gv100_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000007, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp108_sec2_new },
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvdec[1] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000003, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvdec[1] = gm107_nvdec_new,
- .nvdec[2] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000007, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv170_chipset = {
+ .name = "GA100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga100_fb_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+ .name = "GA102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+ .name = "GA104",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
};
static int
@@ -2671,97 +2643,24 @@ nvkm_device_event_func = {
};
struct nvkm_subdev *
-nvkm_device_subdev(struct nvkm_device *device, int index)
+nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
- struct nvkm_engine *engine;
-
- if (device->disable_mask & (1ULL << index))
- return NULL;
-
- switch (index) {
-#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
- _(ACR , device->acr , &device->acr->subdev);
- _(BAR , device->bar , &device->bar->subdev);
- _(VBIOS , device->bios , &device->bios->subdev);
- _(BUS , device->bus , &device->bus->subdev);
- _(CLK , device->clk , &device->clk->subdev);
- _(DEVINIT , device->devinit , &device->devinit->subdev);
- _(FAULT , device->fault , &device->fault->subdev);
- _(FB , device->fb , &device->fb->subdev);
- _(FUSE , device->fuse , &device->fuse->subdev);
- _(GPIO , device->gpio , &device->gpio->subdev);
- _(GSP , device->gsp , &device->gsp->subdev);
- _(I2C , device->i2c , &device->i2c->subdev);
- _(IBUS , device->ibus , device->ibus);
- _(ICCSENSE, device->iccsense, &device->iccsense->subdev);
- _(INSTMEM , device->imem , &device->imem->subdev);
- _(LTC , device->ltc , &device->ltc->subdev);
- _(MC , device->mc , &device->mc->subdev);
- _(MMU , device->mmu , &device->mmu->subdev);
- _(MXM , device->mxm , device->mxm);
- _(PCI , device->pci , &device->pci->subdev);
- _(PMU , device->pmu , &device->pmu->subdev);
- _(THERM , device->therm , &device->therm->subdev);
- _(TIMER , device->timer , &device->timer->subdev);
- _(TOP , device->top , &device->top->subdev);
- _(VOLT , device->volt , &device->volt->subdev);
-#undef _
- default:
- engine = nvkm_device_engine(device, index);
- if (engine)
- return &engine->subdev;
- break;
+ struct nvkm_subdev *subdev;
+
+ list_for_each_entry(subdev, &device->subdev, head) {
+ if (subdev->type == type && subdev->inst == inst)
+ return subdev;
}
+
return NULL;
}
struct nvkm_engine *
-nvkm_device_engine(struct nvkm_device *device, int index)
+nvkm_device_engine(struct nvkm_device *device, int type, int inst)
{
- if (device->disable_mask & (1ULL << index))
- return NULL;
-
- switch (index) {
-#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
- _(BSP , device->bsp , device->bsp);
- _(CE0 , device->ce[0] , device->ce[0]);
- _(CE1 , device->ce[1] , device->ce[1]);
- _(CE2 , device->ce[2] , device->ce[2]);
- _(CE3 , device->ce[3] , device->ce[3]);
- _(CE4 , device->ce[4] , device->ce[4]);
- _(CE5 , device->ce[5] , device->ce[5]);
- _(CE6 , device->ce[6] , device->ce[6]);
- _(CE7 , device->ce[7] , device->ce[7]);
- _(CE8 , device->ce[8] , device->ce[8]);
- _(CIPHER , device->cipher , device->cipher);
- _(DISP , device->disp , &device->disp->engine);
- _(DMAOBJ , device->dma , &device->dma->engine);
- _(FIFO , device->fifo , &device->fifo->engine);
- _(GR , device->gr , &device->gr->engine);
- _(IFB , device->ifb , device->ifb);
- _(ME , device->me , device->me);
- _(MPEG , device->mpeg , device->mpeg);
- _(MSENC , device->msenc , device->msenc);
- _(MSPDEC , device->mspdec , device->mspdec);
- _(MSPPP , device->msppp , device->msppp);
- _(MSVLD , device->msvld , device->msvld);
- _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
- _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
- _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
- _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
- _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
- _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
- _(PM , device->pm , &device->pm->engine);
- _(SEC , device->sec , device->sec);
- _(SEC2 , device->sec2 , &device->sec2->engine);
- _(SW , device->sw , &device->sw->engine);
- _(VIC , device->vic , device->vic);
- _(VP , device->vp , device->vp);
-#undef _
- default:
- WARN_ON(1);
- break;
- }
+ struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst);
+ if (subdev && subdev->func == &nvkm_engine)
+ return container_of(subdev, struct nvkm_engine, subdev);
return NULL;
}
@@ -2770,7 +2669,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
{
const char *action = suspend ? "suspend" : "fini";
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
nvdev_trace(device, "%s running...\n", action);
@@ -2778,12 +2677,10 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
nvkm_acpi_fini(device);
- for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_fini(subdev, suspend);
- if (ret && suspend)
- goto fail;
- }
+ list_for_each_entry_reverse(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_fini(subdev, suspend);
+ if (ret && suspend)
+ goto fail;
}
nvkm_therm_clkgate_fini(device->therm, suspend);
@@ -2796,13 +2693,11 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
return 0;
fail:
- do {
- if ((subdev = nvkm_device_subdev(device, i))) {
- int rret = nvkm_subdev_init(subdev);
- if (rret)
- nvkm_fatal(subdev, "failed restart, %d\n", ret);
- }
- } while (++i < NVKM_SUBDEV_NR);
+ list_for_each_entry_from(subdev, &device->subdev, head) {
+ int rret = nvkm_subdev_init(subdev);
+ if (rret)
+ nvkm_fatal(subdev, "failed restart, %d\n", ret);
+ }
nvdev_trace(device, "%s failed with %d\n", action, ret);
return ret;
@@ -2812,7 +2707,7 @@ static int
nvkm_device_preinit(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
nvdev_trace(device, "preinit running...\n");
@@ -2824,15 +2719,13 @@ nvkm_device_preinit(struct nvkm_device *device)
goto fail;
}
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_preinit(subdev);
- if (ret)
- goto fail;
- }
+ list_for_each_entry(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_preinit(subdev);
+ if (ret)
+ goto fail;
}
- ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+ ret = nvkm_devinit_post(device->devinit);
if (ret)
goto fail;
@@ -2849,7 +2742,7 @@ int
nvkm_device_init(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
ret = nvkm_device_preinit(device);
@@ -2867,12 +2760,10 @@ nvkm_device_init(struct nvkm_device *device)
goto fail;
}
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_init(subdev);
- if (ret)
- goto fail_subdev;
- }
+ list_for_each_entry(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_init(subdev);
+ if (ret)
+ goto fail_subdev;
}
nvkm_acpi_init(device);
@@ -2883,11 +2774,8 @@ nvkm_device_init(struct nvkm_device *device)
return 0;
fail_subdev:
- do {
- if ((subdev = nvkm_device_subdev(device, i)))
- nvkm_subdev_fini(subdev, false);
- } while (--i >= 0);
-
+ list_for_each_entry_from(subdev, &device->subdev, head)
+ nvkm_subdev_fini(subdev, false);
fail:
nvkm_device_fini(device, false);
@@ -2899,15 +2787,12 @@ void
nvkm_device_del(struct nvkm_device **pdevice)
{
struct nvkm_device *device = *pdevice;
- int i;
+ struct nvkm_subdev *subdev, *subtmp;
if (device) {
mutex_lock(&nv_devices_mutex);
- device->disable_mask = 0;
- for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
- struct nvkm_subdev *subdev =
- nvkm_device_subdev(device, i);
+
+ list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
nvkm_subdev_del(&subdev);
- }
nvkm_event_fini(&device->event);
@@ -2966,7 +2851,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
u32 boot0, boot1, strap;
- int ret = -EEXIST, i;
+ int ret = -EEXIST, j;
unsigned chipset;
mutex_lock(&nv_devices_mutex);
@@ -2983,6 +2868,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->name = name;
list_add_tail(&device->head, &nv_devices);
device->debug = nvkm_dbgopt(device->dbgopt, "device");
+ INIT_LIST_HEAD(&device->subdev);
ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
if (ret)
@@ -3063,6 +2949,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
default:
break;
}
@@ -3160,10 +3047,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
default:
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
+
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+ ret = -ENODEV;
+ goto done;
+ }
+ break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",
@@ -3202,88 +3102,46 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mutex_init(&device->mutex);
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-#define _(s,m) case s: \
- if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \
- ret = device->chip->m(device, (s), &device->m); \
- if (ret) { \
- subdev = nvkm_device_subdev(device, (s)); \
- nvkm_subdev_del(&subdev); \
- device->m = NULL; \
- if (ret != -ENODEV) { \
- nvdev_error(device, "%s ctor failed, %d\n", \
- nvkm_subdev_name[s], ret); \
- goto done; \
- } \
- } \
- } \
- break
- switch (i) {
- _(NVKM_SUBDEV_ACR , acr);
- _(NVKM_SUBDEV_BAR , bar);
- _(NVKM_SUBDEV_VBIOS , bios);
- _(NVKM_SUBDEV_BUS , bus);
- _(NVKM_SUBDEV_CLK , clk);
- _(NVKM_SUBDEV_DEVINIT , devinit);
- _(NVKM_SUBDEV_FAULT , fault);
- _(NVKM_SUBDEV_FB , fb);
- _(NVKM_SUBDEV_FUSE , fuse);
- _(NVKM_SUBDEV_GPIO , gpio);
- _(NVKM_SUBDEV_GSP , gsp);
- _(NVKM_SUBDEV_I2C , i2c);
- _(NVKM_SUBDEV_IBUS , ibus);
- _(NVKM_SUBDEV_ICCSENSE, iccsense);
- _(NVKM_SUBDEV_INSTMEM , imem);
- _(NVKM_SUBDEV_LTC , ltc);
- _(NVKM_SUBDEV_MC , mc);
- _(NVKM_SUBDEV_MMU , mmu);
- _(NVKM_SUBDEV_MXM , mxm);
- _(NVKM_SUBDEV_PCI , pci);
- _(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_THERM , therm);
- _(NVKM_SUBDEV_TIMER , timer);
- _(NVKM_SUBDEV_TOP , top);
- _(NVKM_SUBDEV_VOLT , volt);
- _(NVKM_ENGINE_BSP , bsp);
- _(NVKM_ENGINE_CE0 , ce[0]);
- _(NVKM_ENGINE_CE1 , ce[1]);
- _(NVKM_ENGINE_CE2 , ce[2]);
- _(NVKM_ENGINE_CE3 , ce[3]);
- _(NVKM_ENGINE_CE4 , ce[4]);
- _(NVKM_ENGINE_CE5 , ce[5]);
- _(NVKM_ENGINE_CE6 , ce[6]);
- _(NVKM_ENGINE_CE7 , ce[7]);
- _(NVKM_ENGINE_CE8 , ce[8]);
- _(NVKM_ENGINE_CIPHER , cipher);
- _(NVKM_ENGINE_DISP , disp);
- _(NVKM_ENGINE_DMAOBJ , dma);
- _(NVKM_ENGINE_FIFO , fifo);
- _(NVKM_ENGINE_GR , gr);
- _(NVKM_ENGINE_IFB , ifb);
- _(NVKM_ENGINE_ME , me);
- _(NVKM_ENGINE_MPEG , mpeg);
- _(NVKM_ENGINE_MSENC , msenc);
- _(NVKM_ENGINE_MSPDEC , mspdec);
- _(NVKM_ENGINE_MSPPP , msppp);
- _(NVKM_ENGINE_MSVLD , msvld);
- _(NVKM_ENGINE_NVENC0 , nvenc[0]);
- _(NVKM_ENGINE_NVENC1 , nvenc[1]);
- _(NVKM_ENGINE_NVENC2 , nvenc[2]);
- _(NVKM_ENGINE_NVDEC0 , nvdec[0]);
- _(NVKM_ENGINE_NVDEC1 , nvdec[1]);
- _(NVKM_ENGINE_NVDEC2 , nvdec[2]);
- _(NVKM_ENGINE_PM , pm);
- _(NVKM_ENGINE_SEC , sec);
- _(NVKM_ENGINE_SEC2 , sec2);
- _(NVKM_ENGINE_SW , sw);
- _(NVKM_ENGINE_VIC , vic);
- _(NVKM_ENGINE_VP , vp);
- default:
- WARN_ON(1);
- continue;
- }
-#undef _
+#define NVKM_LAYOUT_ONCE(type,data,ptr) \
+ if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
+ WARN_ON(device->chip->ptr.inst != 0x00000001); \
+ ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
+ subdev = nvkm_device_subdev(device, (type), 0); \
+ if (ret) { \
+ nvkm_subdev_del(&subdev); \
+ device->ptr = NULL; \
+ if (ret != -ENODEV) { \
+ nvdev_error(device, "%s ctor failed: %d\n", \
+ nvkm_subdev_type[(type)], ret); \
+ goto done; \
+ } \
+ } else { \
+ subdev->pself = (void **)&device->ptr; \
+ } \
+ }
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
+ WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
+ for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
+ if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
+ int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \
+ ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \
+ subdev = nvkm_device_subdev(device, (type), (j)); \
+ if (ret) { \
+ nvkm_subdev_del(&subdev); \
+ device->ptr[j] = NULL; \
+ if (ret != -ENODEV) { \
+ nvdev_error(device, "%s%d ctor failed: %d\n", \
+ nvkm_subdev_type[(type)], (j), ret); \
+ goto done; \
+ } \
+ } else { \
+ subdev->pself = (void **)&device->ptr[j]; \
+ } \
+ } \
}
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
ret = 0;
done:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 54eab5e04230..93949b3c7214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -15,7 +15,6 @@
#include <subdev/gpio.h>
#include <subdev/gsp.h>
#include <subdev/i2c.h>
-#include <subdev/ibus.h>
#include <subdev/iccsense.h>
#include <subdev/instmem.h>
#include <subdev/ltc.h>
@@ -24,6 +23,7 @@
#include <subdev/mxm.h>
#include <subdev/pci.h>
#include <subdev/pmu.h>
+#include <subdev/privring.h>
#include <subdev/therm.h>
#include <subdev/timer.h>
#include <subdev/top.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 03c6d9aef075..fea9d8f2b10c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -43,15 +43,15 @@ static int
nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
{
struct nvkm_subdev *subdev;
- enum nvkm_devidx subidx;
+ enum nvkm_subdev_type type;
switch (mthd & NV_DEVICE_INFO_UNIT) {
- case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
+ case NV_DEVICE_HOST(0): type = NVKM_ENGINE_FIFO; break;
default:
return -EINVAL;
}
- subdev = nvkm_device_subdev(device, subidx);
+ subdev = nvkm_device_subdev(device, type, 0);
if (subdev)
return nvkm_subdev_info(subdev, mthd, data);
return -ENODEV;
@@ -66,37 +66,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
args->mthd = NV_DEVICE_INFO_INVALID;
return;
}
-
- switch (args->mthd) {
-#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i; \
- for (_i = (B), args->data = 0ULL; _i <= (C); _i++) { \
- if (nvkm_device_engine(device, _i)) \
- args->data |= BIT_ULL(_i); \
- } \
-}
-#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A , NVKM_ENGINE_##A)
-#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
- case ENGINE_A(SW ); break;
- case ENGINE_A(GR ); break;
- case ENGINE_A(MPEG ); break;
- case ENGINE_A(ME ); break;
- case ENGINE_A(CIPHER); break;
- case ENGINE_A(BSP ); break;
- case ENGINE_A(VP ); break;
- case ENGINE_B(CE ); break;
- case ENGINE_A(SEC ); break;
- case ENGINE_A(MSVLD ); break;
- case ENGINE_A(MSPDEC); break;
- case ENGINE_A(MSPPP ); break;
- case ENGINE_A(MSENC ); break;
- case ENGINE_A(VIC ); break;
- case ENGINE_A(SEC2 ); break;
- case ENGINE_B(NVDEC ); break;
- case ENGINE_B(NVENC ); break;
- default:
- args->mthd = NV_DEVICE_INFO_INVALID;
- break;
- }
+ args->mthd = NV_DEVICE_INFO_INVALID;
}
static int
@@ -176,6 +146,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+ case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;
@@ -356,7 +327,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
int i;
for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
- if (!(engine = nvkm_device_engine(device, i)) ||
+ if (!(engine = nvkm_device_engine(device, i, 0)) ||
!(engine->func->base.sclass))
continue;
oclass->engine = engine;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index cf075311cdd2..b03f043efe26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
nvkm-y += nvkm/engine/disp/capsgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index cbd33e87b799..5daa77755276 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -149,10 +149,10 @@ static void
nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
{
struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
- mutex_lock(&disp->engine.subdev.mutex);
- if (disp->client == oproxy)
- disp->client = NULL;
- mutex_unlock(&disp->engine.subdev.mutex);
+ spin_lock(&disp->client.lock);
+ if (disp->client.object == oproxy)
+ disp->client.object = NULL;
+ spin_unlock(&disp->client.lock);
}
static const struct nvkm_oproxy_func
@@ -175,13 +175,13 @@ nvkm_disp_class_new(struct nvkm_device *device,
return ret;
*pobject = &oproxy->base;
- mutex_lock(&disp->engine.subdev.mutex);
- if (disp->client) {
- mutex_unlock(&disp->engine.subdev.mutex);
+ spin_lock(&disp->client.lock);
+ if (disp->client.object) {
+ spin_unlock(&disp->client.lock);
return -EBUSY;
}
- disp->client = oproxy;
- mutex_unlock(&disp->engine.subdev.mutex);
+ disp->client.object = oproxy;
+ spin_unlock(&disp->client.lock);
return sclass->ctor(disp, oclass, data, size, &oproxy->object);
}
@@ -473,21 +473,22 @@ nvkm_disp = {
int
nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp *disp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp *disp)
{
disp->func = func;
INIT_LIST_HEAD(&disp->head);
INIT_LIST_HEAD(&disp->ior);
INIT_LIST_HEAD(&disp->outp);
INIT_LIST_HEAD(&disp->conn);
- return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
+ spin_lock_init(&disp->client.lock);
+ return nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
}
int
nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp **pdisp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_disp_ctor(func, device, index, *pdisp);
+ return nvkm_disp_ctor(func, device, type, inst, *pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 50e3539f33d2..a7a7eb041515 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -278,7 +278,7 @@ nv50_disp_chan_child_get(struct nvkm_object *object, int index,
const struct nvkm_device_oclass *oclass = NULL;
if (chan->func->bind)
- sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+ sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
else
sclass->engine = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 3800aeb507d0..55fbfe28c6dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -33,6 +33,12 @@
#include <nvif/event.h>
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM. However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
struct lt_state {
struct nvkm_dp *dp;
u8 stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
+ if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+ lnkcmp += 3;
+ lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+ nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+ init.outp = &dp->outp.info;
+ init.or = ior->id;
+ init.link = ior->asy.link;
+ );
+ }
+
/* Set desired link configuration on the source. */
if ((lnkcmp = lt.dp->info.lnkcmp)) {
if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
);
}
- /* Execute BeforeLinkTraining script from DP Info table. */
- nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
- init.outp = &dp->outp.info;
- init.or = dp->outp.ior->id;
- init.link = dp->outp.ior->asy.link;
- );
+ if (!AMPERE_IED_HACK(dp->outp.disp)) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+ init.outp = &dp->outp.info;
+ init.or = dp->outp.ior->id;
+ init.link = dp->outp.ior->asy.link;
+ );
+ }
}
static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 731f188fc1ee..156bbe8b2de3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -41,7 +41,8 @@ g84_disp = {
};
int
-g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g84_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&g84_disp, device, index, pdisp);
+ return nv50_disp_new_(&g84_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index def54fe1951e..3425b5d3bc72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -41,7 +41,8 @@ g94_disp = {
};
int
-g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&g94_disp, device, index, pdisp);
+ return nv50_disp_new_(&g94_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644
index 000000000000..68aa52588d92
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+ .init = tu102_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &ga102_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&ga102_disp, device, type, inst, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index e675d9b9d5d7..a6bafe7fea1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -266,7 +266,8 @@ gf119_disp = {
};
int
-gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gf119_disp, device, index, pdisp);
+ return nv50_disp_new_(&gf119_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 4c3439b1a62d..3b79cf233ac5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -41,7 +41,8 @@ gk104_disp = {
};
int
-gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk104_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gk104_disp, device, index, pdisp);
+ return nv50_disp_new_(&gk104_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index bc6f4750c942..988eb12237a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -41,7 +41,8 @@ gk110_disp = {
};
int
-gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk110_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gk110_disp, device, index, pdisp);
+ return nv50_disp_new_(&gk110_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 031cf6b03a76..5d8108feeacd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -41,7 +41,8 @@ gm107_disp = {
};
int
-gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm107_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gm107_disp, device, index, pdisp);
+ return nv50_disp_new_(&gm107_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index ec9c33a5162d..f7bb66087476 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -41,7 +41,8 @@ gm200_disp = {
};
int
-gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gm200_disp, device, index, pdisp);
+ return nv50_disp_new_(&gm200_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 8471de3f3b61..af0ca812a394 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -40,7 +40,8 @@ gp100_disp = {
};
int
-gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gp100_disp, device, index, pdisp);
+ return nv50_disp_new_(&gp100_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index a3779c5046ea..065fea1bdfd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -67,7 +67,8 @@ gp102_disp = {
};
int
-gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gp102_disp, device, index, pdisp);
+ return nv50_disp_new_(&gp102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index f80183701f44..22bc269df64a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -41,7 +41,8 @@ gt200_disp = {
};
int
-gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gt200_disp, device, index, pdisp);
+ return nv50_disp_new_(&gt200_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 7581efc1357e..63a912b174d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -41,7 +41,8 @@ gt215_disp = {
};
int
-gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt215_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gt215_disp, device, index, pdisp);
+ return nv50_disp_new_(&gt215_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index c1032527f791..53879d5271cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -441,7 +441,8 @@ gv100_disp = {
};
int
-gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gv100_disp, device, index, pdisp);
+ return nv50_disp_new_(&gv100_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 09f3038eff26..9f0bb7c6b010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index cfdce23ab83a..762a59f24bbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -39,7 +39,8 @@ mcp77_disp = {
};
int
-mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp77_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&mcp77_disp, device, index, pdisp);
+ return nv50_disp_new_(&mcp77_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 85d9329cfa0e..e5c58aae15de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -39,7 +39,8 @@ mcp89_disp = {
};
int
-mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp89_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&mcp89_disp, device, index, pdisp);
+ return nv50_disp_new_(&mcp89_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index b780ba1a3bc7..a12097db2c2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -64,11 +64,12 @@ nv04_disp = {
};
int
-nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv04_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
int ret, i;
- ret = nvkm_disp_new_(&nv04_disp, device, index, pdisp);
+ ret = nvkm_disp_new_(&nv04_disp, device, type, inst, pdisp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index e21556bf2cb1..3f20e49070ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -154,7 +154,7 @@ nv50_disp_ = {
int
nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp **pdisp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
struct nv50_disp *disp;
int ret;
@@ -164,7 +164,7 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
disp->func = func;
*pdisp = &disp->base;
- ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
+ ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base);
if (ret)
return ret;
@@ -769,7 +769,8 @@ nv50_disp = {
};
int
-nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&nv50_disp, device, index, pdisp);
+ return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index a677161c7f3a..025cacd7c3b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -47,8 +47,8 @@ void nv50_disp_super_2_1(struct nv50_disp *, struct nvkm_head *);
void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *);
void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *);
-int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp **);
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp **);
struct nv50_disp_func {
int (*init)(struct nv50_disp *);
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
void gv100_disp_super(struct work_struct *);
int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+int tu102_disp_init(struct nv50_disp *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index f815a5342880..ec57d8b6bce9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -4,10 +4,10 @@
#include <engine/disp.h>
#include "outp.h"
-int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp *);
-int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp **);
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp **);
void nvkm_disp_vblank(struct nvkm_disp *, int head);
struct nvkm_disp_func {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644
index 000000000000..9af07c3cf9fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+ .user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
+ {{0,0,GA102_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,GA102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+ .base.oclass = GA102_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = ga102_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 7070f5408d92..27bb170d0293 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644
index 000000000000..033827de9116
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ switch (sor->dp.bw) {
+ case 0x06: clksor |= 0x00000000; break;
+ case 0x0a: clksor |= 0x00040000; break;
+ case 0x14: clksor |= 0x00080000; break;
+ case 0x1e: clksor |= 0x000c0000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ u32 div2 = 0;
+ if (sor->asy.proto == TMDS) {
+ if (sor->tmds.high_speed)
+ div2 = 1;
+ }
+ nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 59865a934c4b..0cf9e8752d25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 883ae4151ff8..f5f8dc8e8f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
tu102_disp_init(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
@@ -146,7 +146,8 @@ tu102_disp = {
};
int
-tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&tu102_disp, device, index, pdisp);
+ return nv50_disp_new_(&tu102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index 11b7b8fd5dda..425cde35f128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -104,7 +104,7 @@ nvkm_dma = {
int
nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
- int index, struct nvkm_dma **pdma)
+ enum nvkm_subdev_type type, int inst, struct nvkm_dma **pdma)
{
struct nvkm_dma *dma;
@@ -112,5 +112,5 @@ nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
return -ENOMEM;
dma->func = func;
- return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
+ return nvkm_engine_ctor(&nvkm_dma, device, type, inst, true, &dma->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
index efec5d322179..99a1e07fa204 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -30,7 +30,8 @@ gf100_dma = {
};
int
-gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+ return nvkm_dma_new_(&gf100_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
index 34c766039aed..fd1d1fc22dc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -30,7 +30,8 @@ gf119_dma = {
};
int
-gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf119_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+ return nvkm_dma_new_(&gf119_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
index c65a4c2ea93d..a5af0df30663 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
@@ -28,7 +28,8 @@ gv100_dma = {
};
int
-gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gv100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gv100_dma, device, index, pdma);
+ return nvkm_dma_new_(&gv100_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
index 30747a0ce488..ea5a889f60c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -30,7 +30,8 @@ nv04_dma = {
};
int
-nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv04_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+ return nvkm_dma_new_(&nv04_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
index 77aca7b71c83..6e8f79660014 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -30,7 +30,8 @@ nv50_dma = {
};
int
-nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv50_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+ return nvkm_dma_new_(&nv50_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index 0c9d9640a59d..d403bedb485a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -9,8 +9,8 @@ struct nvkm_dmaobj_func {
struct nvkm_gpuobj **);
};
-int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
- int index, struct nvkm_dma **);
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_dma **);
struct nvkm_dma_func {
int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 8675613e142b..43b7dec45179 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -108,7 +108,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
}
}
- if (nvkm_mc_enabled(device, engine->subdev.index)) {
+ if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) {
nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
nvkm_wr32(device, base + 0x014, 0xffffffff);
}
@@ -335,9 +335,9 @@ nvkm_falcon = {
};
int
-nvkm_falcon_new_(const struct nvkm_falcon_func *func,
- struct nvkm_device *device, int index, bool enable,
- u32 addr, struct nvkm_engine **pengine)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+ struct nvkm_engine **pengine)
{
struct nvkm_falcon *falcon;
@@ -351,6 +351,5 @@ nvkm_falcon_new_(const struct nvkm_falcon_func *func,
falcon->data.size = func->data.size;
*pengine = &falcon->engine;
- return nvkm_engine_ctor(&nvkm_falcon, device, index,
- enable, &falcon->engine);
+ return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index c773caf21f6b..2ed4ff05d207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -292,7 +292,7 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
switch (mthd) {
- case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
+ case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0;
default:
if (fifo->func->info)
return fifo->func->info(fifo, mthd, data);
@@ -313,7 +313,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
static void
nvkm_fifo_preinit(struct nvkm_engine *engine)
{
- nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
}
static int
@@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent);
+ mutex_destroy(&fifo->mutex);
return data;
}
@@ -351,13 +352,14 @@ nvkm_fifo = {
int
nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, int nr, struct nvkm_fifo *fifo)
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo)
{
int ret;
fifo->func = func;
INIT_LIST_HEAD(&fifo->chan);
spin_lock_init(&fifo->lock);
+ mutex_init(&fifo->mutex);
if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
fifo->nr = NVKM_FIFO_CHID_NR;
@@ -365,7 +367,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->nr = nr;
bitmap_clear(fifo->mask, 0, fifo->nr);
- ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index d83485385934..8d957643940a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -35,6 +35,15 @@ struct nvkm_fifo_chan_object {
int hash;
};
+static struct nvkm_fifo_engn *
+nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->fifo->func->engine_id(chan->fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
{
@@ -42,8 +51,8 @@ nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
- const char *name = nvkm_subdev_name[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
int ret = 0;
if (--engn->usecount)
@@ -75,8 +84,8 @@ nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
- const char *name = nvkm_subdev_name[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
int ret;
if (engn->usecount++)
@@ -108,7 +117,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.base.engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
if (chan->func->object_dtor)
chan->func->object_dtor(chan, object->hash);
@@ -118,7 +127,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
chan->func->engine_dtor(chan, engine);
nvkm_object_del(&engn->object);
if (chan->vmm)
- atomic_dec(&chan->vmm->engref[engine->subdev.index]);
+ atomic_dec(&chan->vmm->engref[engine->subdev.type]);
}
}
@@ -135,7 +144,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
{
struct nvkm_engine *engine = oclass->engine;
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
struct nvkm_fifo_chan_object *object;
int ret = 0;
@@ -152,7 +161,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
};
if (chan->vmm)
- atomic_inc(&chan->vmm->engref[engine->subdev.index]);
+ atomic_inc(&chan->vmm->engref[engine->subdev.type]);
if (engine->func->fifo.cclass) {
ret = engine->func->fifo.cclass(chan, &cclass,
@@ -203,13 +212,12 @@ nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
struct nvkm_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_engine *engine;
- u64 mask = chan->engines;
- int ret, i, c;
+ u32 engm = chan->engm;
+ int engi, ret, c;
- for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
- if (!(engine = nvkm_device_engine(device, i)))
+ for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
+ if (!(engine = fifo->func->id_engine(fifo, engi)))
continue;
oclass->engine = engine;
oclass->base.oclass = 0;
@@ -352,7 +360,7 @@ nvkm_fifo_chan_func = {
int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
- u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+ u64 hvmm, u64 push, u32 engm, int bar, u32 base,
u32 user, const struct nvkm_oclass *oclass,
struct nvkm_fifo_chan *chan)
{
@@ -365,7 +373,7 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
chan->func = func;
chan->fifo = fifo;
- chan->engines = engines;
+ chan->engm = engm;
INIT_LIST_HEAD(&chan->head);
/* instance memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 177e10562600..e53504354841 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -22,7 +22,7 @@ struct nvkm_fifo_chan_func {
int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
u32 size, u32 align, bool zero, u64 vm, u64 push,
- u64 engines, int bar, u32 base, u32 user,
+ u32 engm, int bar, u32 base, u32 user,
const struct nvkm_oclass *, struct nvkm_fifo_chan *);
struct nvkm_fifo_chan_oclass {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index a5c998fe4485..353b77d9b3dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -45,29 +45,9 @@ g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
}
static int
-g84_fifo_chan_engine(struct nvkm_engine *engine)
-{
- switch (engine->subdev.index) {
- case NVKM_ENGINE_GR : return 0;
- case NVKM_ENGINE_MPEG :
- case NVKM_ENGINE_MSPPP : return 1;
- case NVKM_ENGINE_CE0 : return 2;
- case NVKM_ENGINE_VP :
- case NVKM_ENGINE_MSPDEC: return 3;
- case NVKM_ENGINE_CIPHER:
- case NVKM_ENGINE_SEC : return 4;
- case NVKM_ENGINE_BSP :
- case NVKM_ENGINE_MSVLD : return 5;
- default:
- WARN_ON(1);
- return 0;
- }
-}
-
-static int
g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0020;
@@ -79,7 +59,7 @@ g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0080;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : return 0x00a0;
- case NVKM_ENGINE_CE0 : return 0x00c0;
+ case NVKM_ENGINE_CE : return 0x00c0;
default:
WARN_ON(1);
return -1;
@@ -102,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
if (offset < 0)
return 0;
- engn = g84_fifo_chan_engine(engine);
+ engn = fifo->base.func->engine_id(&fifo->base, engine);
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
done = nvkm_msec(device, 2000,
@@ -134,7 +114,7 @@ g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
@@ -162,12 +142,11 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- int engn = engine->subdev.index;
if (g84_fifo_chan_engine_addr(engine) < 0)
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
static int
@@ -178,14 +157,14 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
u32 context;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context = 0x00000000; break;
case NVKM_ENGINE_GR : context = 0x00100000; break;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
case NVKM_ENGINE_ME :
- case NVKM_ENGINE_CE0 : context = 0x00300000; break;
+ case NVKM_ENGINE_CE : context = 0x00300000; break;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
case NVKM_ENGINE_CIPHER:
@@ -241,20 +220,20 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
0x10000, 0x1000, false, vmm, push,
- (1ULL << NVKM_ENGINE_BSP) |
- (1ULL << NVKM_ENGINE_CE0) |
- (1ULL << NVKM_ENGINE_CIPHER) |
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_ME) |
- (1ULL << NVKM_ENGINE_MPEG) |
- (1ULL << NVKM_ENGINE_MSPDEC) |
- (1ULL << NVKM_ENGINE_MSPPP) |
- (1ULL << NVKM_ENGINE_MSVLD) |
- (1ULL << NVKM_ENGINE_SEC) |
- (1ULL << NVKM_ENGINE_SW) |
- (1ULL << NVKM_ENGINE_VIC) |
- (1ULL << NVKM_ENGINE_VP),
+ BIT(G84_FIFO_ENGN_SW) |
+ BIT(G84_FIFO_ENGN_GR) |
+ BIT(G84_FIFO_ENGN_MPEG) |
+ BIT(G84_FIFO_ENGN_MSPPP) |
+ BIT(G84_FIFO_ENGN_ME) |
+ BIT(G84_FIFO_ENGN_CE0) |
+ BIT(G84_FIFO_ENGN_VP) |
+ BIT(G84_FIFO_ENGN_MSPDEC) |
+ BIT(G84_FIFO_ENGN_CIPHER) |
+ BIT(G84_FIFO_ENGN_SEC) |
+ BIT(G84_FIFO_ENGN_VIC) |
+ BIT(G84_FIFO_ENGN_BSP) |
+ BIT(G84_FIFO_ENGN_MSVLD) |
+ BIT(G84_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 7c125a15f963..f7ac1061fa84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -12,10 +12,17 @@ struct gf100_fifo_chan {
struct list_head head;
bool killed;
- struct {
+#define GF100_FIFO_ENGN_GR 0
+#define GF100_FIFO_ENGN_MSPDEC 1
+#define GF100_FIFO_ENGN_MSPPP 2
+#define GF100_FIFO_ENGN_MSVLD 3
+#define GF100_FIFO_ENGN_CE0 4
+#define GF100_FIFO_ENGN_CE1 5
+#define GF100_FIFO_ENGN_SW 15
+ struct gf100_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
- } engn[NVKM_SUBDEV_NR];
+ } engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 22698661aa85..cfbe096e604f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -16,10 +16,11 @@ struct gk104_fifo_chan {
struct nvkm_memory *mthd;
- struct {
+#define GK104_FIFO_ENGN_SW 15
+ struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
- } engn[NVKM_SUBDEV_NR];
+ } engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
@@ -29,6 +30,7 @@ int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
+struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 60ca79465aff..727bc8976b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -9,7 +9,11 @@ struct nv04_fifo_chan {
struct nvkm_fifo_chan base;
struct nv04_fifo *fifo;
u32 ramfc;
- struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV04_FIFO_ENGN_SW 0
+#define NV04_FIFO_ENGN_GR 1
+#define NV04_FIFO_ENGN_MPEG 2
+#define NV04_FIFO_ENGN_DMA 3
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 85f7dbf53c99..c44d7c81dd52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -31,7 +31,7 @@
static int
nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0000;
@@ -42,6 +42,15 @@ nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
}
}
+struct nvkm_gpuobj **
+nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -103,7 +112,7 @@ nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
@@ -130,7 +139,7 @@ nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+ nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine));
}
static int
@@ -139,12 +148,11 @@ nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- int engn = engine->subdev.index;
if (nv50_fifo_chan_engine_addr(engine) < 0)
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
void
@@ -162,7 +170,7 @@ nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
u32 context;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context = 0x00000000; break;
case NVKM_ENGINE_GR : context = 0x00100000; break;
@@ -240,10 +248,10 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
0x10000, 0x1000, false, vmm, push,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_SW) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG),
+ BIT(NV50_FIFO_ENGN_SW) |
+ BIT(NV50_FIFO_ENGN_GR) |
+ BIT(NV50_FIFO_ENGN_MPEG) |
+ BIT(NV50_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index 5735ff72a9d1..af8bdf275552 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -15,13 +15,33 @@ struct nv50_fifo_chan {
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
- struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV50_FIFO_ENGN_SW 0
+#define NV50_FIFO_ENGN_GR 1
+#define NV50_FIFO_ENGN_MPEG 2
+#define NV50_FIFO_ENGN_DMA 3
+
+#define G84_FIFO_ENGN_SW 0
+#define G84_FIFO_ENGN_GR 1
+#define G84_FIFO_ENGN_MPEG 2
+#define G84_FIFO_ENGN_MSPPP 2
+#define G84_FIFO_ENGN_ME 3
+#define G84_FIFO_ENGN_CE0 3
+#define G84_FIFO_ENGN_VP 4
+#define G84_FIFO_ENGN_MSPDEC 4
+#define G84_FIFO_ENGN_CIPHER 5
+#define G84_FIFO_ENGN_SEC 5
+#define G84_FIFO_ENGN_VIC 5
+#define G84_FIFO_ENGN_BSP 6
+#define G84_FIFO_ENGN_MSVLD 6
+#define G84_FIFO_ENGN_DMA 7
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index c213122cf088..dbcdc5fab990 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_ramht_remove(imem->ramht, cookie);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
}
static int
@@ -53,7 +53,7 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
int hash;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context |= 0x00000000; break;
case NVKM_ENGINE_GR : context |= 0x00010000; break;
@@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL;
}
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return hash;
}
@@ -191,9 +191,9 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
index f5f355ff005d..07d80d54a07c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -62,9 +62,9 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
index 7edc6a564b5d..edd70a114218 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -62,10 +62,10 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) | /* NV31- */
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
index 5f722c6e8a2f..0411fb908457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -35,7 +35,7 @@
static bool
nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW:
return false;
@@ -55,6 +55,15 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
}
}
+static struct nvkm_gpuobj **
+nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -99,7 +108,7 @@ nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
- inst = chan->engn[engine->subdev.index]->addr >> 4;
+ inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
@@ -121,7 +130,7 @@ nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+ nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
}
static int
@@ -130,13 +139,12 @@ nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- const int engn = engine->subdev.index;
u32 reg, ctx;
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
}
static int
@@ -149,7 +157,7 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
int hash;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context |= 0x00000000; break;
case NVKM_ENGINE_GR : context |= 0x00100000; break;
@@ -159,10 +167,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL;
}
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return hash;
}
@@ -209,10 +217,10 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0xc00000, 0x1000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index ff7b529764fe..c0a7d0f21dac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -38,12 +38,82 @@ g84_fifo_uevent_init(struct nvkm_fifo *fifo)
nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
}
+static struct nvkm_engine *
+g84_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_engine *engine;
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case G84_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case G84_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case G84_FIFO_ENGN_MPEG :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPPP, 0)))
+ return engine;
+ type = NVKM_ENGINE_MPEG;
+ break;
+ case G84_FIFO_ENGN_ME :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_CE, 0)))
+ return engine;
+ type = NVKM_ENGINE_ME;
+ break;
+ case G84_FIFO_ENGN_VP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPDEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_VP;
+ break;
+ case G84_FIFO_ENGN_CIPHER:
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_VIC, 0)))
+ return engine;
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_SEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_CIPHER;
+ break;
+ case G84_FIFO_ENGN_BSP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSVLD, 0)))
+ return engine;
+ type = NVKM_ENGINE_BSP;
+ break;
+ case G84_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+static int
+g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return G84_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return G84_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_CE : return G84_FIFO_ENGN_CE0;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return G84_FIFO_ENGN_CIPHER;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP;
+ case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
static const struct nvkm_fifo_func
g84_fifo = {
.dtor = nv50_fifo_dtor,
.oneinit = nv50_fifo_oneinit,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = g84_fifo_engine_id,
+ .id_engine = g84_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.uevent_init = g84_fifo_uevent_init,
@@ -56,7 +126,8 @@ g84_fifo = {
};
int
-g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
+ return nv50_fifo_new_(&g84_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 5a39e51d42d7..8b4f36b3e34b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
int nr = 0;
int target;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&fifo->base.mutex);
cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active;
@@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
WARN_ON(1);
return;
}
@@ -86,59 +86,61 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
!(nvkm_rd32(device, 0x00227c) & 0x00100000),
msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist update timeout\n");
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
list_del_init(&chan->head);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
list_add_tail(&chan->head, &fifo->chan);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
-static inline int
-gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
+static struct nvkm_engine *
+gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
{
- switch (engn) {
- case NVKM_ENGINE_GR : engn = 0; break;
- case NVKM_ENGINE_MSVLD : engn = 1; break;
- case NVKM_ENGINE_MSPPP : engn = 2; break;
- case NVKM_ENGINE_MSPDEC: engn = 3; break;
- case NVKM_ENGINE_CE0 : engn = 4; break;
- case NVKM_ENGINE_CE1 : engn = 5; break;
+ enum nvkm_subdev_type type;
+ int inst;
+
+ switch (engi) {
+ case GF100_FIFO_ENGN_GR : type = NVKM_ENGINE_GR ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE0 : type = NVKM_ENGINE_CE ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE1 : type = NVKM_ENGINE_CE ; inst = 1; break;
+ case GF100_FIFO_ENGN_SW : type = NVKM_ENGINE_SW ; inst = 0; break;
default:
- return -1;
+ WARN_ON(1);
+ return NULL;
}
- return engn;
+ return nvkm_device_engine(fifo->engine.subdev.device, type, inst);
}
-static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
+static int
+gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
-
- switch (engn) {
- case 0: engn = NVKM_ENGINE_GR; break;
- case 1: engn = NVKM_ENGINE_MSVLD; break;
- case 2: engn = NVKM_ENGINE_MSPPP; break;
- case 3: engn = NVKM_ENGINE_MSPDEC; break;
- case 4: engn = NVKM_ENGINE_CE0; break;
- case 5: engn = NVKM_ENGINE_CE1; break;
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_GR : return GF100_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
+ case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
+ case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
+ case NVKM_ENGINE_CE : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
+ case NVKM_ENGINE_SW : return GF100_FIFO_ENGN_SW;
default:
- return NULL;
+ WARN_ON(1);
+ return -1;
}
-
- return nvkm_device_engine(device, engn);
}
static void
@@ -148,20 +150,17 @@ gf100_fifo_recover_work(struct work_struct *w)
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
unsigned long flags;
- u32 engn, engm = 0;
- u64 mask, todo;
+ u32 engm, engn, todo;
spin_lock_irqsave(&fifo->base.lock, flags);
- mask = fifo->recover.mask;
+ engm = fifo->recover.mask;
fifo->recover.mask = 0ULL;
spin_unlock_irqrestore(&fifo->base.lock, flags);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
- engm |= 1 << gf100_fifo_engidx(fifo, engn);
nvkm_mask(device, 0x002630, engm, engm);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
- if ((engine = nvkm_device_engine(device, engn))) {
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) {
+ if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) {
nvkm_subdev_fini(&engine->subdev, false);
WARN_ON(nvkm_subdev_init(&engine->subdev));
}
@@ -179,17 +178,18 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
+ int engi = gf100_fifo_engine_id(&fifo->base, engine);
nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
- nvkm_subdev_name[engine->subdev.index], chid);
+ engine->subdev.name, chid);
assert_spin_locked(&fifo->base.lock);
nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
list_del_init(&chan->head);
chan->killed = true;
- if (engine != &fifo->base.engine)
- fifo->recover.mask |= 1ULL << engine->subdev.index;
+ if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
+ fifo->recover.mask |= BIT(engi);
schedule_work(&fifo->recover.work);
nvkm_fifo_kevent(&fifo->base, chid);
}
@@ -205,8 +205,8 @@ gf100_fifo_fault_engine[] = {
{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PCOUNTER" },
{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
- { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
- { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
+ { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{}
};
@@ -286,7 +286,7 @@ gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_device_engine(device, eu->data2);
+ engine = nvkm_device_engine(device, eu->data2, eu->inst);
break;
}
}
@@ -335,7 +335,7 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
if (busy && unk0 && unk1) {
list_for_each_entry(chan, &fifo->chan, head) {
if (chan->base.chid == chid) {
- engine = gf100_fifo_engine(fifo, engn);
+ engine = gf100_fifo_id_engine(&fifo->base, engn);
if (!engine)
break;
gf100_fifo_recover(fifo, engine, chan);
@@ -673,6 +673,8 @@ gf100_fifo = {
.fini = gf100_fifo_fini,
.intr = gf100_fifo_intr,
.fault = gf100_fifo_fault,
+ .engine_id = gf100_fifo_engine_id,
+ .id_engine = gf100_fifo_id_engine,
.uevent_init = gf100_fifo_uevent_init,
.uevent_fini = gf100_fifo_uevent_fini,
.chan = {
@@ -682,7 +684,8 @@ gf100_fifo = {
};
int
-gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
struct gf100_fifo *fifo;
@@ -692,5 +695,5 @@ gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
*pfifo = &fifo->base;
- return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+ return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 5d4b695cab8e..69da601f1754 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -36,19 +36,7 @@
#include <nvif/class.h>
#include <nvif/cl0080.h>
-struct gk104_fifo_engine_status {
- bool busy;
- bool faulted;
- bool chsw;
- bool save;
- bool load;
- struct {
- bool tsg;
- u32 id;
- } prev, next, *chan;
-};
-
-static void
+void
gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
struct gk104_fifo_engine_status *status)
{
@@ -95,7 +83,7 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
status->chan == &status->next ? "*" : " ");
}
-static int
+int
gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
@@ -112,7 +100,7 @@ gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -EINVAL;
}
-static int
+int
gk104_fifo_class_get(struct nvkm_fifo *base, int index,
struct nvkm_oclass *oclass)
{
@@ -134,14 +122,14 @@ gk104_fifo_class_get(struct nvkm_fifo *base, int index,
return c;
}
-static void
+void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-static void
+void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
@@ -180,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{
const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan;
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp;
int nr = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&fifo->base.mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
fifo->runlist[runl].next = !fifo->runlist[runl].next;
@@ -203,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
nvkm_done(mem);
func->commit(fifo, runl, mem, nr);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
if (!list_empty(&chan->head)) {
list_del_init(&chan->head);
if (cgrp && !--cgrp->chan_nr)
list_del_init(&cgrp->head);
}
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
if (cgrp) {
if (!cgrp->chan_nr++)
list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
@@ -231,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
} else {
list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
}
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
@@ -271,6 +258,30 @@ gk104_fifo_pbdma = {
.init = gk104_fifo_pbdma_init,
};
+struct nvkm_engine *
+gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
+{
+ return gk104_fifo(base)->engine[engi].engine;
+}
+
+int
+gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int engn;
+
+ if (engine->subdev.type == NVKM_ENGINE_SW)
+ return GK104_FIFO_ENGN_SW;
+
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine)
+ return engn;
+ }
+
+ WARN_ON(1);
+ return -1;
+}
+
static void
gk104_fifo_recover_work(struct work_struct *w)
{
@@ -422,11 +433,12 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
* called from the fault handler already.
*/
if (!status.faulted && engine) {
- mmui = nvkm_top_fault_id(device, engine->subdev.index);
+ mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
if (mmui < 0) {
const struct nvkm_enum *en = fifo->func->fault.engine;
for (; en && en->name; en++) {
- if (en->data2 == engine->subdev.index) {
+ if (en->data2 == engine->subdev.type &&
+ en->inst == engine->subdev.inst) {
mmui = en->value;
break;
}
@@ -471,8 +483,8 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
struct nvkm_engine *engine = NULL;
struct nvkm_fifo_chan *chan;
unsigned long flags;
- char ct[8] = "HUB/", en[16] = "";
- int engn;
+ const char *en = "";
+ char ct[8] = "HUB/";
er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
@@ -496,23 +508,20 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_device_engine(device, ee->data2);
+ engine = nvkm_device_engine(device, ee->data2, 0);
break;
}
}
if (ee == NULL) {
- enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
- if (engidx < NVKM_SUBDEV_NR) {
- const char *src = nvkm_subdev_name[engidx];
- char *dst = en;
- do {
- *dst++ = toupper(*src++);
- } while(*src);
- engine = nvkm_device_engine(device, engidx);
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
}
} else {
- snprintf(en, sizeof(en), "%s", ee->name);
+ en = ee->name;
}
spin_lock_irqsave(&fifo->base.lock, flags);
@@ -535,11 +544,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
* correct engine(s), but just in case we can't find the channel
* information...
*/
- for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
- if (fifo->engine[engn].engine == engine) {
+ if (engine) {
+ int engn = fifo->base.func->engine_id(&fifo->base, engine);
+ if (engn >= 0 && engn != GK104_FIFO_ENGN_SW)
gk104_fifo_recover_engn(fifo, engn);
- break;
- }
}
spin_unlock_irqrestore(&fifo->base.lock, flags);
@@ -556,7 +564,7 @@ gk104_fifo_bind_reason[] = {
{}
};
-static void
+void
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -627,7 +635,7 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -637,7 +645,7 @@ gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
nvkm_wr32(device, 0x00256c, stat);
}
-static void
+void
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -680,7 +688,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -729,7 +737,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -750,7 +758,7 @@ gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
}
-static void
+void
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -763,7 +771,7 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
{
nvkm_fifo_uevent(&fifo->base);
@@ -861,7 +869,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
}
}
-static void
+void
gk104_fifo_fini(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -871,24 +879,46 @@ gk104_fifo_fini(struct nvkm_fifo *base)
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
-static int
+int
gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
{
struct gk104_fifo *fifo = gk104_fifo(base);
switch (mthd) {
- case NV_DEVICE_FIFO_RUNLISTS:
+ case NV_DEVICE_HOST_RUNLISTS:
*data = (1ULL << fifo->runlist_nr) - 1;
return 0;
- case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
- NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
- int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
- if (runl < fifo->runlist_nr) {
- unsigned long engm = fifo->runlist[runl].engm;
+ case NV_DEVICE_HOST_RUNLIST_ENGINES: {
+ if (*data < fifo->runlist_nr) {
+ unsigned long engm = fifo->runlist[*data].engm;
struct nvkm_engine *engine;
+ int engn;
*data = 0;
for_each_set_bit(engn, &engm, fifo->engine_nr) {
- if ((engine = fifo->engine[engn].engine))
- *data |= BIT_ULL(engine->subdev.index);
+ if ((engine = fifo->engine[engn].engine)) {
+#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
+ switch (engine->subdev.type) {
+ CASE(SW );
+ CASE(GR );
+ CASE(MPEG );
+ CASE(ME );
+ CASE(CIPHER);
+ CASE(BSP );
+ CASE(VP );
+ CASE(CE );
+ CASE(SEC );
+ CASE(MSVLD );
+ CASE(MSPDEC);
+ CASE(MSPPP );
+ CASE(MSENC );
+ CASE(VIC );
+ CASE(SEC2 );
+ CASE(NVDEC );
+ CASE(NVENC );
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
}
return 0;
}
@@ -899,15 +929,15 @@ gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
}
}
-static int
+int
gk104_fifo_oneinit(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
- int engn, runl, pbid, ret, i, j;
- enum nvkm_devidx engidx;
+ struct nvkm_top_device *tdev;
+ int pbid, ret, i, j;
u32 *map;
fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
@@ -921,25 +951,41 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
/* Determine runlist configuration from topology device info. */
- i = 0;
- while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
+ list_for_each_entry(tdev, &device->top->device, head) {
+ const int engn = tdev->engine;
+ char _en[16], *en;
+
+ if (engn < 0)
+ continue;
+
/* Determine which PBDMA handles requests for this engine. */
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
- if (map[j] & (1 << runl)) {
+ if (map[j] & BIT(tdev->runlist)) {
pbid = j;
break;
}
}
+ fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
+ if (!fifo->engine[engn].engine) {
+ snprintf(_en, sizeof(_en), "%s, %d",
+ nvkm_subdev_type[tdev->type], tdev->inst);
+ en = _en;
+ } else {
+ en = fifo->engine[engn].engine->subdev.name;
+ }
+
nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
- engn, runl, pbid, nvkm_subdev_name[engidx]);
+ tdev->engine, tdev->runlist, pbid, en);
- fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
- fifo->engine[engn].runl = runl;
+ fifo->engine[engn].runl = tdev->runlist;
fifo->engine[engn].pbid = pbid;
fifo->engine_nr = max(fifo->engine_nr, engn + 1);
- fifo->runlist[runl].engm |= 1 << engn;
- fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
+ fifo->runlist[tdev->runlist].engm |= BIT(engn);
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
+ if (tdev->type == NVKM_ENGINE_GR)
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
+ fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
}
kfree(map);
@@ -974,7 +1020,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
-static void
+void
gk104_fifo_init(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1006,7 +1052,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002140, 0x7fffffff);
}
-static void *
+void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1033,6 +1079,8 @@ gk104_fifo_ = {
.fini = gk104_fifo_fini,
.intr = gk104_fifo_intr,
.fault = gk104_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
.uevent_init = gk104_fifo_uevent_init,
.uevent_fini = gk104_fifo_uevent_fini,
.recover_chan = gk104_fifo_recover_chan,
@@ -1042,7 +1090,7 @@ gk104_fifo_ = {
int
gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
- int index, int nr, struct nvkm_fifo **pfifo)
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
{
struct gk104_fifo *fifo;
@@ -1052,7 +1100,7 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
*pfifo = &fifo->base;
- return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
+ return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base);
}
const struct nvkm_enum
@@ -1084,12 +1132,12 @@ gk104_fifo_fault_engine[] = {
{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PERF" },
{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
- { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
- { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
- { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
{}
};
@@ -1191,7 +1239,8 @@ gk104_fifo = {
};
int
-gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 6407a4a174cf..f2d12ae73944 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -35,6 +35,7 @@ struct gk104_fifo {
struct list_head cgrp;
struct list_head chan;
u32 engm;
+ u32 engm_sw;
} runlist[16];
int runlist_nr;
@@ -87,11 +88,43 @@ struct gk104_fifo_func {
bool cgrp_force;
};
-int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
+struct gk104_fifo_engine_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status);
+void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
+void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
+void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
+void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
+void gk104_fifo_intr_engine(struct gk104_fifo *fifo);
+void *gk104_fifo_dtor(struct nvkm_fifo *base);
+int gk104_fifo_oneinit(struct nvkm_fifo *base);
+int gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data);
+void gk104_fifo_init(struct nvkm_fifo *base);
+void gk104_fifo_fini(struct nvkm_fifo *base);
+int gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject);
+int gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ struct nvkm_oclass *oclass);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *fifo);
+void gk104_fifo_uevent_init(struct nvkm_fifo *fifo);
extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
int gk104_fifo_pbdma_nr(struct gk104_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index f820969e4405..915278c7e012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -60,7 +60,8 @@ gk110_fifo = {
};
int
-gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gk110_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 2f54787b5fd0..cb703693de52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -57,7 +57,8 @@ gk208_fifo = {
};
int
-gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+ return gk104_fifo_new_(&gk208_fifo, device, type, inst, 1024, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index a814c4e0ed3e..6e35cf44c640 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -38,7 +38,8 @@ gk20a_fifo = {
};
int
-gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+ return gk104_fifo_new_(&gk20a_fifo, device, type, inst, 128, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index c2a2e4572f6c..7af6e687d474 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -106,7 +106,8 @@ gm107_fifo = {
};
int
-gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo);
+ return gk104_fifo_new_(&gm107_fifo, device, type, inst, 2048, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b8cfe3b28c4f..573658cb6c73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -54,7 +54,8 @@ gm200_fifo = {
};
int
-gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gm200_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 70b4feebc1fa..556c97e54f14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -38,7 +38,8 @@ gm20b_fifo = {
};
int
-gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm20b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+ return gk104_fifo_new_(&gm20b_fifo, device, type, inst, 512, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 2c7a0176b3c8..6b46b6b65b87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -91,7 +91,8 @@ gp100_fifo = {
};
int
-gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gp100_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 8c65ad4feedb..7a5929cb4d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -39,7 +39,8 @@ gp10b_fifo = {
};
int
-gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp10b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
+ return gk104_fifo_new_(&gp10b_fifo, device, type, inst, 512, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index 75f9632789b3..4e78bbe3b94b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -52,11 +52,10 @@ gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
static u32
gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_SW : return 0;
case NVKM_ENGINE_GR : return 0x0210;
- case NVKM_ENGINE_CE0 : return 0x0230;
- case NVKM_ENGINE_CE1 : return 0x0240;
+ case NVKM_ENGINE_CE : return 0x0230 + (engine->subdev.inst * 0x10);
case NVKM_ENGINE_MSPDEC: return 0x0250;
case NVKM_ENGINE_MSPPP : return 0x0260;
case NVKM_ENGINE_MSVLD : return 0x0270;
@@ -66,6 +65,15 @@ gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
}
}
+static struct gf100_fifo_engn *
+gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -77,7 +85,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -87,7 +95,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
chan->base.chid, chan->base.object.client->name);
ret = -ETIMEDOUT;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
if (ret && suspend)
return ret;
@@ -108,13 +116,13 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
{
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
- nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
- nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
}
@@ -126,8 +134,9 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
}
static int
@@ -136,23 +145,21 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- int engn = engine->subdev.index;
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gf100_fifo_gpfifo_engine_addr(engine))
return 0;
- ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
- ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
- &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
- return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
- chan->engn[engn].vma, NULL, 0);
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
static void
@@ -243,13 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
0x1000, 0x1000, true, args->v0.vmm, 0,
- (1ULL << NVKM_ENGINE_CE0) |
- (1ULL << NVKM_ENGINE_CE1) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MSPDEC) |
- (1ULL << NVKM_ENGINE_MSPPP) |
- (1ULL << NVKM_ENGINE_MSVLD) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(GF100_FIFO_ENGN_GR) |
+ BIT(GF100_FIFO_ENGN_MSPDEC) |
+ BIT(GF100_FIFO_ENGN_MSPPP) |
+ BIT(GF100_FIFO_ENGN_MSVLD) |
+ BIT(GF100_FIFO_ENGN_CE0) |
+ BIT(GF100_FIFO_ENGN_CE1) |
+ BIT(GF100_FIFO_ENGN_SW),
1, fifo->user.bar->addr, 0x1000,
oclass, &chan->base);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 728a1edbf98c..b6900a52bcce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -65,19 +65,18 @@ int
gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
{
int ret;
- mutex_lock(&chan->base.fifo->engine.subdev.mutex);
+ mutex_lock(&chan->base.fifo->mutex);
ret = gk104_fifo_gpfifo_kick_locked(chan);
- mutex_unlock(&chan->base.fifo->engine.subdev.mutex);
+ mutex_unlock(&chan->base.fifo->mutex);
return ret;
}
static u32
gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_SW :
- case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST:
- return 0;
+ case NVKM_ENGINE_CE : return 0;
case NVKM_ENGINE_GR : return 0x0210;
case NVKM_ENGINE_SEC : return 0x0220;
case NVKM_ENGINE_MSPDEC: return 0x0250;
@@ -85,15 +84,26 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0270;
case NVKM_ENGINE_VIC : return 0x0280;
case NVKM_ENGINE_MSENC : return 0x0290;
- case NVKM_ENGINE_NVDEC0: return 0x02100270;
- case NVKM_ENGINE_NVENC0: return 0x02100290;
- case NVKM_ENGINE_NVENC1: return 0x0210;
+ case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVENC :
+ if (engine->subdev.inst)
+ return 0x0210;
+ return 0x02100290;
default:
WARN_ON(1);
return 0;
}
}
+struct gk104_fifo_engn *
+gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -126,13 +136,13 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma->addr;
- u32 datalo = lower_32_bits(addr) | 0x00000004;
- u32 datahi = upper_32_bits(addr);
+ u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
+ u32 datahi = upper_32_bits(engn->vma->addr);
nvkm_kmap(inst);
nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
@@ -151,8 +161,9 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
}
int
@@ -161,23 +172,21 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- int engn = engine->subdev.index;
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
- ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
- ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
- &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
- return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
- chan->engn[engn].vma, NULL, 0);
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
void
@@ -247,23 +256,12 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
{
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- unsigned long engm;
- u64 subdevs = 0;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
*runlists = BIT_ULL(runlist);
- engm = fifo->runlist[runlist].engm;
- for_each_set_bit(i, &engm, fifo->engine_nr) {
- if (fifo->engine[i].engine)
- subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
- }
-
- if (subdevs & BIT_ULL(NVKM_ENGINE_GR))
- subdevs |= BIT_ULL(NVKM_ENGINE_SW);
-
/* Allocate the channel. */
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
@@ -273,7 +271,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vmm, 0, subdevs,
+ 0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index a7462cf59d65..ee4967b706a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
int ret;
/* Block runlist to prevent the channel from being rescheduled. */
- mutex_lock(&subdev->mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
/* Preempt the channel. */
@@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
/* Resume runlist. */
nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return ret;
}
@@ -70,8 +70,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
- if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
- engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+ if (engine->subdev.type == NVKM_ENGINE_CE)
return gk104_fifo_gpfifo_kick(chan);
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
@@ -90,17 +89,15 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
- u64 addr;
- if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
- engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+ if (engine->subdev.type == NVKM_ENGINE_CE)
return 0;
- addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
- nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004);
- nvkm_wo32(inst, 0x214, upper_32_bits(addr));
+ nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
+ nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, false, true);
@@ -129,8 +126,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- unsigned long engm;
- u64 subdevs = 0;
u64 usermem, mthd;
u32 size;
@@ -138,12 +133,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
return -EINVAL;
*runlists = BIT_ULL(runlist);
- engm = fifo->runlist[runlist].engm;
- for_each_set_bit(i, &engm, fifo->engine_nr) {
- if (fifo->engine[i].engine)
- subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
- }
-
/* Allocate the channel. */
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
@@ -153,7 +142,7 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
- 0, subdevs, 1, fifo->user.bar->addr, 0x200,
+ 0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 6ee1bb32a071..70e16a91ac12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -301,7 +301,8 @@ gv100_fifo = {
};
int
-gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gv100_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index c1d1b1aa5bc6..c6730c124769 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -94,6 +94,38 @@ __releases(fifo->base.lock)
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
+struct nvkm_engine *
+nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case NV04_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case NV04_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break;
+ case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+int
+nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
static const char *
nv_dma_state_err(u32 state)
{
@@ -326,7 +358,7 @@ nv04_fifo_init(struct nvkm_fifo *base)
int
nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+ enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc,
struct nvkm_fifo **pfifo)
{
struct nv04_fifo *fifo;
@@ -337,7 +369,7 @@ nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->ramfc = ramfc;
*pfifo = &fifo->base;
- ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
+ ret = nvkm_fifo_ctor(func, device, type, inst, nr, &fifo->base);
if (ret)
return ret;
@@ -349,6 +381,8 @@ static const struct nvkm_fifo_func
nv04_fifo = {
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -358,8 +392,8 @@ nv04_fifo = {
};
int
-nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv04_fifo, device, index, 16,
- nv04_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e5ecceee77ae..3f23bcde4a54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -17,8 +17,7 @@ struct nv04_fifo {
const struct nv04_fifo_ramfc *ramfc;
};
-int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, int nr, const struct nv04_fifo_ramfc *,
- struct nvkm_fifo **);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, const struct nv04_fifo_ramfc *, struct nvkm_fifo **);
void nv04_fifo_init(struct nvkm_fifo *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index f9a87deb2b3d..f8887f0f2f82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -43,6 +43,8 @@ static const struct nvkm_fifo_func
nv10_fifo = {
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -52,8 +54,8 @@ nv10_fifo = {
};
int
-nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv10_fifo, device, index, 32,
- nv10_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index f6d383a21222..3f94c7b5b054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -81,6 +81,8 @@ static const struct nvkm_fifo_func
nv17_fifo = {
.init = nv17_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -90,8 +92,8 @@ nv17_fifo = {
};
int
-nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv17_fifo, device, index, 32,
- nv17_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 2d61fd832ddb..f9ea46809bc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -112,6 +112,8 @@ static const struct nvkm_fifo_func
nv40_fifo = {
.init = nv40_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -121,8 +123,8 @@ nv40_fifo = {
};
int
-nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv40_fifo, device, index, 32,
- nv40_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index fa6e094d8068..be94156ea248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
void
nv50_fifo_runlist_update(struct nv50_fifo *fifo)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
nv50_fifo_runlist_update_locked(fifo);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
int
@@ -107,7 +107,7 @@ nv50_fifo_dtor(struct nvkm_fifo *base)
int
nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, struct nvkm_fifo **pfifo)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
struct nv50_fifo *fifo;
int ret;
@@ -116,7 +116,7 @@ nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return -ENOMEM;
*pfifo = &fifo->base;
- ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
+ ret = nvkm_fifo_ctor(func, device, type, inst, 128, &fifo->base);
if (ret)
return ret;
@@ -131,6 +131,8 @@ nv50_fifo = {
.oneinit = nv50_fifo_oneinit,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -141,7 +143,8 @@ nv50_fifo = {
};
int
-nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
+ return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 87d30b6bd2ea..0111e7e5a4e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -10,8 +10,8 @@ struct nv50_fifo {
int cur_runlist;
};
-int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, struct nvkm_fifo **);
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fifo **);
void *nv50_fifo_dtor(struct nvkm_fifo *);
int nv50_fifo_oneinit(struct nvkm_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 0ef8baab513e..899272801a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -4,8 +4,8 @@
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
#include <engine/fifo.h>
-int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, int nr, struct nvkm_fifo *);
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
void nvkm_fifo_cevent(struct nvkm_fifo *);
void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
@@ -23,6 +23,8 @@ struct nvkm_fifo_func {
void (*fini)(struct nvkm_fifo *);
void (*intr)(struct nvkm_fifo *);
void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
+ int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *);
+ struct nvkm_engine *(*id_engine)(struct nvkm_fifo *, int engi);
void (*pause)(struct nvkm_fifo *, unsigned long *);
void (*start)(struct nvkm_fifo *, unsigned long *);
void (*uevent_init)(struct nvkm_fifo *);
@@ -35,8 +37,13 @@ struct nvkm_fifo_func {
};
void nv04_fifo_intr(struct nvkm_fifo *);
+int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *nv04_fifo_id_engine(struct nvkm_fifo *, int);
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
+
+int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *gk104_fifo_id_engine(struct nvkm_fifo *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 005f3e1729b9..e417044cc347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -24,7 +24,13 @@
#include "changk104.h"
#include "user.h"
+#include <core/client.h>
#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/top.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
#include <nvif/class.h>
@@ -109,8 +115,363 @@ tu102_fifo = {
.cgrp_force = true,
};
+static void
+tu102_fifo_recover_work(struct work_struct *w)
+{
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, runm, todo;
+ int engn, runl;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_update(fifo, runl);
+
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+tu102_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runm = BIT(runl);
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
+
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+tu102_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+ struct gk104_fifo_chan *chan;
+ struct nvkm_fifo_cgrp *cgrp;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ return chan;
+ }
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ if (cgrp->id == chid) {
+ chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+ list_del_init(&chan->head);
+ if (!--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ return chan;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+tu102_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ chan = tu102_fifo_recover_chid(fifo, runl, chid);
+ if (chan) {
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
+ }
+
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ tu102_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ tu102_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Preempt the runlist */
+ nvkm_wr32(device, 0x2638, BIT(runl));
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *en = "";
+ char ct[8] = "HUB/";
+ int engn;
+
+ er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, ee->data2, 0);
+ break;
+ }
+ }
+
+ if (ee == NULL) {
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
+ }
+ } else {
+ en = ee->name;
+ }
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : en,
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ tu102_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ tu102_fifo_recover_engn(fifo, engn);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags, engm;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+
+ engm = nvkm_rd32(device, 0x2a30);
+ nvkm_wr32(device, 0x2a30, engm);
+
+ for_each_set_bit(engn, &engm, 32)
+ tu102_fifo_recover_engn(fifo, engn);
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+
+ nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static void
+tu102_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ tu102_fifo_intr_ctxsw_timeout(fifo);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000100) {
+ tu102_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+
+ while (mask) {
+ u32 unit = __ffs(mask);
+
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .info = gk104_fifo_info,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = tu102_fifo_intr,
+ .fault = tu102_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = tu102_fifo_recover_chan,
+ .class_get = gk104_fifo_class_get,
+ .class_new = gk104_fifo_class_new,
+};
+
int
-tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
+ struct gk104_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->func = &tu102_fifo;
+ INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&tu102_fifo_, device, type, inst, 4096, &fifo->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index d41fb94524e9..61759f54406e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -175,8 +175,8 @@ nvkm_gr = {
int
nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, bool enable, struct nvkm_gr *gr)
+ enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_gr *gr)
{
gr->func = func;
- return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
+ return nvkm_engine_ctor(&nvkm_gr, device, type, inst, enable, &gr->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
index da1ba74682b4..65c332118fd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -192,7 +192,7 @@ g84_gr = {
};
int
-g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+g84_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&g84_gr, device, index, pgr);
+ return nv50_gr_new_(&g84_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 749f73fc45a8..397ff4fe9df8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2087,8 +2087,8 @@ gf100_gr_flcn = {
};
int
-gf100_gr_new_(const struct gf100_gr_fwif *fwif,
- struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
int ret;
@@ -2097,7 +2097,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif,
return -ENOMEM;
*pgr = &gr->base;
- ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, type, inst, true, &gr->base);
if (ret)
return ret;
@@ -2483,7 +2483,7 @@ gf100_gr_fwif[] = {
};
int
-gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index dfd5dd74f0d5..c0038f906135 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -416,6 +416,6 @@ void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
-int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 0536fe8b2b92..3acd99c306f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -152,7 +152,7 @@ gf104_gr_fwif[] = {
};
int
-gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 14284b06112f..030640bb3dca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -151,7 +151,7 @@ gf108_gr_fwif[] = {
};
int
-gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf108_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 280752551a3a..616e2def1865 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -127,7 +127,7 @@ gf110_gr_fwif[] = {
};
int
-gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf110_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 235c3fbe4b95..669e7536970e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -192,7 +192,7 @@ gf117_gr_fwif[] = {
};
int
-gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf117_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf117_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7eac385ece97..5b09bda8110c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -218,7 +218,7 @@ gf119_gr_fwif[] = {
};
int
-gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf119_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf119_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 89f51d76082b..b680eaa0f350 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -497,7 +497,7 @@ gk104_gr_fwif[] = {
};
int
-gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 735f05e54d62..103e06a77e65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -393,7 +393,7 @@ gk110_gr_fwif[] = {
};
int
-gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk110_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index adc971be8f3b..034d0b11a17d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -144,7 +144,8 @@ gk110b_gr_fwif[] = {
};
int
-gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk110b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index aa0eff6795ac..116d682f9f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -202,7 +202,7 @@ gk208_gr_fwif[] = {
};
int
-gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk208_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk208_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 6d4d72851610..be0b2cefd8e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -357,7 +357,7 @@ gk20a_gr_fwif[] = {
};
int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk20a_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 09bb78ba9d00..310987174cb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -437,7 +437,7 @@ gm107_gr_fwif[] = {
};
int
-gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm107_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 815137047518..5c38ff0fe7f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -288,7 +288,7 @@ gm200_gr_fwif[] = {
};
int
-gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 1aab691fa71c..ec1c46e47e00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -181,7 +181,7 @@ gm20b_gr_fwif[] = {
};
int
-gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm20b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm20b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index ddba7ce937c7..0550dd6f46f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -156,7 +156,7 @@ gp100_gr_fwif[] = {
};
int
-gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index c083f3757ff7..5b001f374be0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -152,7 +152,7 @@ gp102_gr_fwif[] = {
};
int
-gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index f6a31e9a8cc8..2655574ec63b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -93,7 +93,7 @@ gp104_gr_fwif[] = {
};
int
-gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 2c80c6a75b56..adabc04d4f3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -82,7 +82,7 @@ gp107_gr_fwif[] = {
};
int
-gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp107_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
index 2be8f416dd6f..7310f0466bb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -92,7 +92,7 @@ gp108_gr_fwif[] = {
};
int
-gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp108_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 6edc4bc7ed44..e13683b6e7b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -94,7 +94,7 @@ gp10b_gr_fwif[] = {
};
int
-gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp10b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp10b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
index c711a55ce392..1dfc65d45b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -43,7 +43,7 @@ gt200_gr = {
};
int
-gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&gt200_gr, device, index, pgr);
+ return nv50_gr_new_(&gt200_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
index fa103df32ec7..fcb5ead345a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -44,7 +44,7 @@ gt215_gr = {
};
int
-gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt215_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&gt215_gr, device, index, pgr);
+ return nv50_gr_new_(&gt215_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 2189a8f4e644..4d043c1173ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -141,7 +141,7 @@ gv100_gr_fwif[] = {
};
int
-gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gv100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gv100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
index eb1a90644752..cf782b64f62e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -42,7 +42,7 @@ mcp79_gr = {
};
int
-mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp79_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+ return nv50_gr_new_(&mcp79_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
index c91eb56e9327..6f90a6395453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -44,7 +44,7 @@ mcp89_gr = {
};
int
-mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp89_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+ return nv50_gr_new_(&mcp89_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 9c2e985dc079..0bc1a238de43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1413,7 +1413,7 @@ nv04_gr = {
};
int
-nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv04_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv04_gr *gr;
@@ -1422,5 +1422,5 @@ nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base);
+ return nvkm_gr_ctor(&nv04_gr, device, type, inst, true, &gr->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 4ebbfbdd8240..942450b33bc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -1173,7 +1173,7 @@ nv10_gr_init(struct nvkm_gr *base)
int
nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv10_gr *gr;
@@ -1182,7 +1182,7 @@ nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -1215,7 +1215,7 @@ nv10_gr = {
};
int
-nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv10_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv10_gr, device, index, pgr);
+ return nv10_gr_new_(&nv10_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index 4327baea02af..5cfe927c9123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -3,7 +3,7 @@
#define __NV10_GR_H__
#include "priv.h"
-int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv10_gr_init(struct nvkm_gr *);
void nv10_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
index 3e2c6856b4c4..69ece259df86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -53,7 +53,7 @@ nv15_gr = {
};
int
-nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv15_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv15_gr, device, index, pgr);
+ return nv10_gr_new_(&nv15_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
index 12437d085a73..e39dfc7d4077 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -53,7 +53,7 @@ nv17_gr = {
};
int
-nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv17_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv17_gr, device, index, pgr);
+ return nv10_gr_new_(&nv17_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d837630a3625..6bff10cee71b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -330,7 +330,7 @@ nv20_gr_dtor(struct nvkm_gr *base)
int
nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv20_gr *gr;
@@ -338,7 +338,7 @@ nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -370,7 +370,7 @@ nv20_gr = {
};
int
-nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv20_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv20_gr, device, index, pgr);
+ return nv20_gr_new_(&nv20_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index e57407a8a7c3..c0d2be53413e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -9,8 +9,8 @@ struct nv20_gr {
struct nvkm_memory *ctxtab;
};
-int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_gr **);
void *nv20_gr_dtor(struct nvkm_gr *);
int nv20_gr_oneinit(struct nvkm_gr *);
int nv20_gr_init(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 32d29d3faee0..f3a56f17d94a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -129,7 +129,7 @@ nv25_gr = {
};
int
-nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv25_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv25_gr, device, index, pgr);
+ return nv20_gr_new_(&nv25_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index f941062c66f0..f268d2642d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -120,7 +120,7 @@ nv2a_gr = {
};
int
-nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv2a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv2a_gr, device, index, pgr);
+ return nv20_gr_new_(&nv2a_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 785ec956df0f..e5737cdf2fa1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -194,7 +194,7 @@ nv30_gr = {
};
int
-nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv30_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv30_gr, device, index, pgr);
+ return nv20_gr_new_(&nv30_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index bd610d75c677..1ab2da8ebf4e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -131,7 +131,7 @@ nv34_gr = {
};
int
-nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv34_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv34_gr, device, index, pgr);
+ return nv20_gr_new_(&nv34_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 89db7f523037..591260f5676b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -131,7 +131,7 @@ nv35_gr = {
};
int
-nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv35_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv35_gr, device, index, pgr);
+ return nv20_gr_new_(&nv35_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 5f1ad8344ea9..67f3535ff97e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -429,7 +429,7 @@ nv40_gr_init(struct nvkm_gr *base)
int
nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv40_gr *gr;
@@ -438,7 +438,7 @@ nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
*pgr = &gr->base;
INIT_LIST_HEAD(&gr->chan);
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -470,7 +470,7 @@ nv40_gr = {
};
int
-nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv40_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv40_gr_new_(&nv40_gr, device, index, pgr);
+ return nv40_gr_new_(&nv40_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index e6128791b2d2..f3d3d3a5ae5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -10,7 +10,7 @@ struct nv40_gr {
struct list_head chan;
};
-int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv40_gr_init(struct nvkm_gr *);
void nv40_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
index 45ff80254eb4..22b6a38a7031 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -102,7 +102,7 @@ nv44_gr = {
};
int
-nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv44_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv40_gr_new_(&nv44_gr, device, index, pgr);
+ return nv40_gr_new_(&nv44_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index df16ffda1749..563a10097e95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -761,7 +761,7 @@ nv50_gr_init(struct nvkm_gr *base)
int
nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv50_gr *gr;
@@ -770,7 +770,7 @@ nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -790,7 +790,7 @@ nv50_gr = {
};
int
-nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv50_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&nv50_gr, device, index, pgr);
+ return nv50_gr_new_(&nv50_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 465f4da0ddfc..84388c42e5c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -11,7 +11,7 @@ struct nv50_gr {
u32 size;
};
-int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv50_gr_init(struct nvkm_gr *);
void nv50_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 3b30f24032cc..9b2c66e8be90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -7,8 +7,8 @@
struct nvkm_fb_tile;
struct nvkm_fifo_chan;
-int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_gr *);
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ bool enable, struct nvkm_gr *);
bool nv04_gr_idle(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 6039f9948aa2..1a8a21844e12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -198,7 +198,7 @@ tu102_gr_fwif[] = {
};
int
-tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index c0e11a071843..0fcc0ffa1e40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -37,7 +37,8 @@ g84_mpeg = {
};
int
-g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+g84_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg);
+ return nvkm_engine_new_(&g84_mpeg, device, type, inst, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 7fea7d45202f..b1054db4c1b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -274,7 +274,7 @@ nv31_mpeg_ = {
int
nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pmpeg)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pmpeg)
{
struct nv31_mpeg *mpeg;
@@ -283,8 +283,7 @@ nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
mpeg->func = func;
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv31_mpeg_, device, index,
- true, &mpeg->engine);
+ return nvkm_engine_ctor(&nv31_mpeg_, device, type, inst, true, &mpeg->engine);
}
static const struct nv31_mpeg_func
@@ -293,7 +292,8 @@ nv31_mpeg = {
};
int
-nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv31_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+ return nv31_mpeg_new_(&nv31_mpeg, device, type, inst, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index b3e131538858..9f30aaaf809e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -11,8 +11,8 @@ struct nv31_mpeg {
struct nv31_mpeg_chan *chan;
};
-int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_engine **);
struct nv31_mpeg_func {
bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index b5ec7c504dc6..179167484ef1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -76,7 +76,8 @@ nv40_mpeg = {
};
int
-nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv40_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
+ return nv31_mpeg_new_(&nv40_mpeg, device, type, inst, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index c3cf02ed468e..521ce43a2871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -203,7 +203,8 @@ nv44_mpeg = {
};
int
-nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv44_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
struct nv44_mpeg *mpeg;
@@ -212,5 +213,5 @@ nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
INIT_LIST_HEAD(&mpeg->chan);
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine);
+ return nvkm_engine_ctor(&nv44_mpeg, device, type, inst, true, &mpeg->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 6df880a39019..e6374f36961c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -129,7 +129,8 @@ nv50_mpeg = {
};
int
-nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv50_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg);
+ return nvkm_engine_new_(&nv50_mpeg, device, type, inst, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
index 80211f76093b..842fcfbd28b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -24,9 +24,8 @@
#include "priv.h"
int
-nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x085000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index f30cf1dcfb30..ecb06d68f544 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -43,8 +43,8 @@ g98_mspdec = {
};
int
-g98_mspdec_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+g98_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&g98_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index cfe1aa81bd14..0a69bd767d69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -43,8 +43,8 @@ gf100_mspdec = {
};
int
-gf100_mspdec_new(struct nvkm_device *device, int index,
+gf100_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gf100_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 24272b4927bc..a08991dca428 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -35,8 +35,8 @@ gk104_mspdec = {
};
int
-gk104_mspdec_new(struct nvkm_device *device, int index,
+gk104_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gk104_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index cf6e59ad6ee2..791fb03a32ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -35,8 +35,8 @@ gt215_mspdec = {
};
int
-gt215_mspdec_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gt215_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index 86445a2600d0..2bc5537d40a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSPDEC_PRIV_H__
#include <engine/mspdec.h>
-int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_mspdec_init(struct nvkm_falcon *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
index bfae5e60e925..45a9411ab2e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -25,7 +25,7 @@
int
nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pengine)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x086000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index c45dbf79d1f9..160120b9bd64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -43,8 +43,8 @@ g98_msppp = {
};
int
-g98_msppp_new(struct nvkm_device *device, int index,
+g98_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&g98_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 803c62ab516e..debed9ae8731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -43,8 +43,8 @@ gf100_msppp = {
};
int
-gf100_msppp_new(struct nvkm_device *device, int index,
+gf100_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&gf100_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
index 49cbf72cee4b..a2fd736fef94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -35,8 +35,8 @@ gt215_msppp = {
};
int
-gt215_msppp_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&gt215_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index f20b10915db2..582ab8ce1425 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSPPP_PRIV_H__
#include <engine/msppp.h>
-int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_msppp_init(struct nvkm_falcon *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
index 745bbb653dc0..7be42b980e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -25,7 +25,7 @@
int
nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pengine)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x084000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 4a2a9f0494af..cfa2065319a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -43,8 +43,8 @@ g98_msvld = {
};
int
-g98_msvld_new(struct nvkm_device *device, int index,
+g98_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&g98_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index 1695e532c081..8d58ad8e04d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -43,8 +43,8 @@ gf100_msvld = {
};
int
-gf100_msvld_new(struct nvkm_device *device, int index,
+gf100_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gf100_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index b640cd63ebe8..b28be28046f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -35,8 +35,8 @@ gk104_msvld = {
};
int
-gk104_msvld_new(struct nvkm_device *device, int index,
+gk104_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gk104_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
index 201e8ef3519e..d7489f972c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -35,8 +35,8 @@ gt215_msvld = {
};
int
-gt215_msvld_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gt215_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
index a0f540ef257b..16c30b62ab09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -35,8 +35,8 @@ mcp89_msvld = {
};
int
-mcp89_msvld_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+mcp89_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&mcp89_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 5cd1e83badbb..f729d919b054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSVLD_PRIV_H__
#include <engine/msvld.h>
-int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_msvld_init(struct nvkm_falcon *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 9b23c1b70ebf..b0181cc5953b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -37,7 +37,7 @@ nvkm_nvdec = {
int
nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_nvdec **pnvdec)
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
int ret;
@@ -45,7 +45,7 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ ret = nvkm_engine_ctor(&nvkm_nvdec, device, type, inst, true,
&nvdec->engine);
if (ret)
return ret;
@@ -57,5 +57,5 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
nvdec->func = fwif->func;
return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
- nvkm_subdev_name[index], 0, &nvdec->falcon);
+ nvdec->engine.subdev.name, 0, &nvdec->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index 0ab27ab4d8ee..8c44ce44a6d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -56,8 +56,8 @@ gm107_nvdec_fwif[] = {
};
int
-gm107_nvdec_new(struct nvkm_device *device, int index,
+gm107_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
- return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index e14da8b000d0..0920f6a887e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvdec_fwif {
const struct nvkm_nvdec_func *func;
};
-int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
- struct nvkm_device *, int, struct nvkm_nvdec **);
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
index 484100e15668..c39e797dc7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -39,7 +39,7 @@ nvkm_nvenc = {
int
nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_nvenc **pnvenc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
{
struct nvkm_nvenc *nvenc;
int ret;
@@ -47,7 +47,7 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+ ret = nvkm_engine_ctor(&nvkm_nvenc, device, type, inst, true,
&nvenc->engine);
if (ret)
return ret;
@@ -59,5 +59,5 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
nvenc->func = fwif->func;
return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
- nvkm_subdev_name[index], 0, &nvenc->falcon);
+ nvenc->engine.subdev.name, 0, &nvenc->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
index d249c8ffb2d5..f44d41bf2034 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -56,8 +56,8 @@ gm107_nvenc_fwif[] = {
};
int
-gm107_nvenc_new(struct nvkm_device *device, int index,
+gm107_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
- return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 100fa5ebbeef..4130a2bfbb4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvenc_fwif {
const struct nvkm_nvenc_func *func;
};
-int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index b2785bee418e..8fe0444f761e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -628,10 +628,10 @@ nvkm_perfmon_dtor(struct nvkm_object *object)
{
struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
struct nvkm_pm *pm = perfmon->pm;
- mutex_lock(&pm->engine.subdev.mutex);
- if (pm->perfmon == &perfmon->object)
- pm->perfmon = NULL;
- mutex_unlock(&pm->engine.subdev.mutex);
+ spin_lock(&pm->client.lock);
+ if (pm->client.object == &perfmon->object)
+ pm->client.object = NULL;
+ spin_unlock(&pm->client.lock);
return perfmon;
}
@@ -671,11 +671,11 @@ nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
if (ret)
return ret;
- mutex_lock(&pm->engine.subdev.mutex);
- if (pm->perfmon == NULL)
- pm->perfmon = *pobject;
- ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
- mutex_unlock(&pm->engine.subdev.mutex);
+ spin_lock(&pm->client.lock);
+ if (pm->client.object == NULL)
+ pm->client.object = *pobject;
+ ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
+ spin_unlock(&pm->client.lock);
return ret;
}
@@ -858,10 +858,11 @@ nvkm_pm = {
int
nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
- int index, struct nvkm_pm *pm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
{
pm->func = func;
INIT_LIST_HEAD(&pm->domains);
INIT_LIST_HEAD(&pm->sources);
- return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
+ spin_lock_init(&pm->client.lock);
+ return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index 6e441ddafd86..0086d00eb162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -159,7 +159,7 @@ g84_pm[] = {
};
int
-g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(g84_pm, device, index, ppm);
+ return nv40_pm_new_(g84_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index fe2532ee4145..8e02701def8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -187,7 +187,7 @@ gf100_pm_ = {
int
gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
- int index, struct nvkm_pm **ppm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nvkm_pm *pm;
u32 mask;
@@ -196,7 +196,7 @@ gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
+ ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
if (ret)
return ret;
@@ -237,7 +237,7 @@ gf100_pm = {
};
int
-gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf100_pm, device, index, ppm);
+ return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 461bb219b1c0..bc4b014c4e8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -9,8 +9,8 @@ struct gf100_pm_func {
const struct nvkm_specdom *doms_part;
};
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
- int index, struct nvkm_pm **);
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm **);
extern const struct nvkm_funcdom gf100_perfctr_func;
extern const struct nvkm_specdom gf100_pm_gpc[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
index 49b24c98a7f7..505565866b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -60,7 +60,7 @@ gf108_pm = {
};
int
-gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf108_pm, device, index, ppm);
+ return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
index 9170025fc988..c61e8c010bb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -74,7 +74,7 @@ gf117_pm = {
};
int
-gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf117_pm, device, index, ppm);
+ return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 07f946d26ac6..75bf3df1cb18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -178,7 +178,7 @@ gk104_pm = {
};
int
-gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gk104_pm, device, index, ppm);
+ return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
index 5cf5dd536fd0..25874c541486 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -151,7 +151,7 @@ gt200_pm[] = {
};
int
-gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(gt200_pm, device, index, ppm);
+ return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index c9227ad41b04..54c23e2b6645 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -132,7 +132,7 @@ gt215_pm[] = {
};
int
-gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(gt215_pm, device, index, ppm);
+ return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 3fda594700e0..eba5b3b79340 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -80,7 +80,7 @@ nv40_pm_ = {
int
nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
- int index, struct nvkm_pm **ppm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nv40_pm *pm;
int ret;
@@ -89,7 +89,7 @@ nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
return -ENOMEM;
*ppm = &pm->base;
- ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+ ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv40_pm[] = {
};
int
-nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(nv40_pm, device, index, ppm);
+ return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 8ed19320fda1..afb79843723d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -9,7 +9,7 @@ struct nv40_pm {
u32 sequence;
};
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
- int index, struct nvkm_pm **);
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm **);
extern const struct nvkm_funcdom nv40_perfctr_func;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index cc5a41d4c6f2..bbd3404901f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -169,7 +169,7 @@ nv50_pm[] = {
};
int
-nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(nv50_pm, device, index, ppm);
+ return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index cd6f8f79b235..6ae25d3e7f45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -4,8 +4,8 @@
#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
#include <engine/pm.h>
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
- int index, struct nvkm_pm *);
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm *);
struct nvkm_pm_func {
void (*fini)(struct nvkm_pm *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 6d2a7f0afbb5..1b87df03c823 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -74,9 +74,8 @@ g98_sec = {
};
int
-g98_sec_new(struct nvkm_device *device, int index,
+g98_sec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(&g98_sec, device, index,
- true, 0x087000, pengine);
+ return nvkm_falcon_new_(&g98_sec, device, type, inst, true, 0x087000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 41318aa0d481..092c6d0b8e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -85,7 +85,7 @@ nvkm_sec2 = {
int
nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
- int index, u32 addr, struct nvkm_sec2 **psec2)
+ enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
int ret;
@@ -93,7 +93,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ ret = nvkm_engine_ctor(&nvkm_sec2, device, type, inst, true, &sec2->engine);
if (ret)
return ret;
@@ -104,7 +104,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
sec2->func = fwif->func;
ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
- nvkm_subdev_name[index], addr, &sec2->falcon);
+ sec2->engine.subdev.name, addr, &sec2->falcon);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index bccf7acb7f98..44e39f5743d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -343,7 +343,8 @@ gp102_sec2_fwif[] = {
};
int
-gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
+ return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index e770c9497871..3e9f5c842f3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -36,7 +36,8 @@ gp108_sec2_fwif[] = {
};
int
-gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp108_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+ return nvkm_sec2_new_(gp108_sec2_fwif, device, type, inst, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 8cbc0b7d0a27..af19229e885d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -25,6 +25,6 @@ int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
extern const struct nvkm_sec2_func gp102_sec2;
extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
-int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index a231c1c6c0a5..f3faeb705575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -72,10 +72,11 @@ tu102_sec2_fwif[] = {
};
int
-tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index 7be3198e11de..14871d0bd746 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -97,7 +97,7 @@ nvkm_sw = {
int
nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
- int index, struct nvkm_sw **psw)
+ enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
struct nvkm_sw *sw;
@@ -106,5 +106,5 @@ nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
INIT_LIST_HEAD(&sw->chan);
sw->func = func;
- return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine);
+ return nvkm_engine_ctor(&nvkm_sw, device, type, inst, true, &sw->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index ea8f4247b628..55abf839f29d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -149,7 +149,7 @@ gf100_sw = {
};
int
-gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+gf100_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&gf100_sw, device, index, psw);
+ return nvkm_sw_new_(&gf100_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index b6675fe1b0ce..4aa57573869c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -133,7 +133,7 @@ nv04_sw = {
};
int
-nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv04_sw, device, index, psw);
+ return nvkm_sw_new_(&nv04_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index 09d22fcd194c..e79e640ae535 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -62,7 +62,7 @@ nv10_sw = {
};
int
-nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv10_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv10_sw, device, index, psw);
+ return nvkm_sw_new_(&nv10_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 01573d187f2c..1fdd094c8b7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -142,7 +142,7 @@ nv50_sw = {
};
int
-nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv50_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv50_sw, device, index, psw);
+ return nvkm_sw_new_(&nv50_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 6d18fc6180f2..d9d83b1b8849 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -5,8 +5,8 @@
#include <engine/sw.h>
struct nvkm_sw_chan;
-int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
- int index, struct nvkm_sw **);
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_sw **);
struct nvkm_sw_chan_sclass {
int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 7a96178786c4..b502266c76fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -36,8 +36,8 @@ g84_vp = {
};
int
-g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_vp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_xtensa_new_(&g84_vp, device, index,
- true, 0x00f000, pengine);
+ return nvkm_xtensa_new_(&g84_vp, device, type, inst, true, 0x00f000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 70549381e082..f7d3ba0afb55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -175,9 +175,9 @@ nvkm_xtensa = {
};
int
-nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
- struct nvkm_device *device, int index, bool enable,
- u32 addr, struct nvkm_engine **pengine)
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+ struct nvkm_engine **pengine)
{
struct nvkm_xtensa *xtensa;
@@ -187,6 +187,5 @@ nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
xtensa->addr = addr;
*pengine = &xtensa->engine;
- return nvkm_engine_ctor(&nvkm_xtensa, device, index,
- enable, &xtensa->engine);
+ return nvkm_engine_ctor(&nvkm_xtensa, device, type, inst, enable, &xtensa->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index c6a3448180d6..262641a014b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -88,13 +88,12 @@ int
nvkm_falcon_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
- enum nvkm_devidx id = falcon->owner->index;
int ret;
- nvkm_mc_enable(device, id);
+ nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
ret = falcon->func->enable(falcon);
if (ret) {
- nvkm_mc_disable(device, id);
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
return ret;
}
@@ -105,15 +104,14 @@ void
nvkm_falcon_disable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
- enum nvkm_devidx id = falcon->owner->index;
/* already disabled, return or wait_idle will timeout */
- if (!nvkm_mc_enabled(device, id))
+ if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
return;
falcon->func->disable(falcon);
- nvkm_mc_disable(device, id);
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
}
int
@@ -143,7 +141,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
u32 reg;
if (!falcon->addr) {
- falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
if (WARN_ON(!falcon->addr))
return -ENODEV;
}
@@ -188,7 +186,7 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
- falcon->name, nvkm_subdev_name[falcon->user->index]);
+ falcon->name, falcon->user->name);
mutex_unlock(&falcon->mutex);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index fb4fff1222af..2cb24fff7e32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -11,7 +11,6 @@ include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
-include $(src)/nvkm/subdev/ibus/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
include $(src)/nvkm/subdev/instmem/Kbuild
include $(src)/nvkm/subdev/ltc/Kbuild
@@ -20,6 +19,7 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/privring/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index c962df9910dd..af6cac696d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -410,14 +410,14 @@ nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
int
nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_acr **pacr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr)
{
struct nvkm_acr *acr;
long wprfw;
if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+ nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev);
INIT_LIST_HEAD(&acr->hsfw);
INIT_LIST_HEAD(&acr->lsfw);
INIT_LIST_HEAD(&acr->hsf);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index cd41b2e6cc87..cdb1ead26d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -262,7 +262,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
hsf->func->bld(acr, hsf);
/* Boot the falcon. */
- nvkm_mc_intr_mask(device, falcon->owner->index, false);
+ nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, false);
nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
@@ -279,7 +279,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
return -EIO;
nvkm_falcon_clear_interrupt(falcon, intr_clear);
- nvkm_mc_intr_mask(device, falcon->owner->index, true);
+ nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, true);
return ret;
}
@@ -478,7 +478,8 @@ gm200_acr_fwif[] = {
};
int
-gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
index b1ecc58152cc..54e996f2f630 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -129,7 +129,8 @@ gm20b_acr_fwif[] = {
};
int
-gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm20b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gm20b_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index 80eb9d8dbc80..fb9132a39bb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -276,7 +276,8 @@ gp102_acr_fwif[] = {
};
int
-gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
index 67a7c141004b..373d638a2177 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -106,7 +106,8 @@ gp108_acr_fwif[] = {
};
int
-gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp108_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp108_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
index 8249f0d2d81d..f03ba028867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -52,7 +52,8 @@ gp10b_acr_fwif[] = {
};
int
-gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp10b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp10b_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
index d71af17a169a..c30b841c9d35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -135,8 +135,8 @@ int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
-int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
- struct nvkm_acr **);
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_acr **);
int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
struct nvkm_acr_lsf {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index c4981bce9a2b..05a87e77525f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -224,7 +224,8 @@ tu102_acr_fwif[] = {
};
int
-tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 209a6a40834a..d017a1b5e5dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -134,9 +134,9 @@ nvkm_bar = {
void
nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, struct nvkm_bar *bar)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar *bar)
{
- nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev);
+ nvkm_subdev_ctor(&nvkm_bar, device, type, inst, &bar->subdev);
bar->func = func;
spin_lock_init(&bar->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index 87f26f54b481..77a41bcf860e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -56,7 +56,8 @@ g84_bar_func = {
};
int
-g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+g84_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+ return nv50_bar_new_(&g84_bar_func, device, type, inst, 0x200, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3dcb09a40ee..51070b7dda85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -162,12 +162,12 @@ gf100_bar_dtor(struct nvkm_bar *base)
int
gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, struct nvkm_bar **pbar)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
{
struct gf100_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
- nvkm_bar_ctor(func, device, index, &bar->base);
+ nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
*pbar = &bar->base;
return 0;
@@ -189,7 +189,8 @@ gf100_bar_func = {
};
int
-gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gf100_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+ return gf100_bar_new_(&gf100_bar_func, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 4ae4c7145712..328a68b418d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -15,7 +15,7 @@ struct gf100_bar {
struct gf100_barN bar[2];
};
-int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_bar **);
void *gf100_bar_dtor(struct nvkm_bar *);
int gf100_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 35878fb538f2..eead8ab88393 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -32,9 +32,10 @@ gk20a_bar_func = {
};
int
-gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gk20a_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+ int ret = gf100_bar_new_(&gk20a_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
index 3ddf9222d935..da95307a7912 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -59,7 +59,8 @@ gm107_bar_func = {
};
int
-gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm107_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+ return gf100_bar_new_(&gm107_bar_func, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
index 1ed6170891c4..4acdb4fb0107 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -32,9 +32,10 @@ gm20b_bar_func = {
};
int
-gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm20b_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+ int ret = gf100_bar_new_(&gm20b_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index f23a0ccc2bec..27d8a1be43e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -220,12 +220,12 @@ nv50_bar_dtor(struct nvkm_bar *base)
int
nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, u32 pgd_addr, struct nvkm_bar **pbar)
+ enum nvkm_subdev_type type, int inst, u32 pgd_addr, struct nvkm_bar **pbar)
{
struct nv50_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
- nvkm_bar_ctor(func, device, index, &bar->base);
+ nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->pgd_addr = pgd_addr;
*pbar = &bar->base;
return 0;
@@ -248,7 +248,8 @@ nv50_bar_func = {
};
int
-nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+nv50_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+ return nv50_bar_new_(&nv50_bar_func, device, type, inst, 0x1400, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index e4193deb2e51..dedee9394079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -16,7 +16,7 @@ struct nv50_bar {
struct nvkm_gpuobj *bar2;
};
-int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, u32 pgd_addr, struct nvkm_bar **);
void *nv50_bar_dtor(struct nvkm_bar *);
int nv50_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 869ad184f923..daebfc991c76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -5,7 +5,7 @@
#include <subdev/bar.h>
void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
- int, struct nvkm_bar *);
+ enum nvkm_subdev_type, int, struct nvkm_bar *);
struct nvkm_bar_func {
void *(*dtor)(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index 798f65ec3a86..c25ab407b85d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -92,7 +92,8 @@ tu102_bar = {
};
int
-tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&tu102_bar, device, index, pbar);
+ return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index f3c30b2a788e..d0f52d59fc2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -140,7 +140,8 @@ nvkm_bios = {
};
int
-nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
+nvkm_bios_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
struct nvbios_image image;
@@ -149,7 +150,7 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev);
+ nvkm_subdev_ctor(&nvkm_bios, device, type, inst, &bios->subdev);
ret = nvbios_shadow(bios);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 3634cd0630b8..023ddc7c5399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
return NULL;
/* we can't get the bios image pointer without PDISP */
+ if (device->card_type >= GA100)
+ addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+ else
if (device->card_type >= GM100)
addr = nvkm_rd32(device, 0x021c04);
else
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index 52ad73bce5fe..0e5a46db52ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -53,12 +53,12 @@ nvkm_bus = {
int
nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
- int index, struct nvkm_bus **pbus)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus)
{
struct nvkm_bus *bus;
if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev);
+ nvkm_subdev_ctor(&nvkm_bus, device, type, inst, &bus->subdev);
bus->func = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index 9700b5c01cc6..a0d6e2d3f804 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -58,7 +58,8 @@ g94_bus = {
};
int
-g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+g94_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&g94_bus, device, index, pbus);
+ return nvkm_bus_new_(&g94_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index e0930d5fdfb1..53a6651ac225 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -40,7 +40,7 @@ gf100_bus_intr(struct nvkm_bus *bus)
(addr & 0x00000002) ? "write" : "read", data,
(addr & 0x00fffffc),
(stat & 0x00000002) ? "!ENGINE " : "",
- (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000004) ? "PRIVRING " : "",
(stat & 0x00000008) ? "TIMEOUT " : "");
nvkm_wr32(device, 0x009084, 0x00000000);
@@ -69,7 +69,8 @@ gf100_bus = {
};
int
-gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+ return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 2b44ba5cf4b0..cfed17c062ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -68,7 +68,8 @@ nv04_bus = {
};
int
-nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv04_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv04_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv04_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index 5153d89e1f0b..ad8da523bb22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -82,7 +82,8 @@ nv31_bus = {
};
int
-nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv31_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv31_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 19e10fdc9291..3a1e45adeedc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -99,7 +99,8 @@ nv50_bus = {
};
int
-nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv50_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv50_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index 76f7ba1c6494..2e9345b17cf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -11,7 +11,7 @@ struct nvkm_bus_func {
u32 hwsq_size;
};
-int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_bus **);
void nv50_bus_init(struct nvkm_bus *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index dc184e857f85..57199be082fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -649,7 +649,7 @@ nvkm_clk = {
int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk *clk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
{
struct nvkm_subdev *subdev = &clk->subdev;
struct nvkm_bios *bios = device->bios;
@@ -657,7 +657,7 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
const char *mode;
struct nvbios_vpstate_header h;
- nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+ nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
if (bios && !nvbios_vpstate_parse(bios, &h)) {
struct nvbios_vpstate_entry base, boost;
@@ -716,9 +716,9 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int
nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk **pclk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+ return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index f97e3ec196bb..07157cf53c9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -41,8 +41,8 @@ g84_clk = {
};
int
-g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- return nv50_clk_new_(&g84_clk, device, index,
- (device->chipset >= 0x94), pclk);
+ return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 7f67f9f5a550..6eea11aefb70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -468,7 +468,8 @@ gf100_clk = {
};
int
-gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gf100_clk *clk;
@@ -476,5 +477,5 @@ gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
+ return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 0b37e3da7feb..0d8e2ddcc5ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -504,7 +504,8 @@ gk104_clk = {
};
int
-gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gk104_clk *clk;
@@ -512,5 +513,5 @@ gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 218893e3e5f9..d573fb0917fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -610,10 +610,9 @@ gk20a_clk = {
};
int
-gk20a_clk_ctor(struct nvkm_device *device, int index,
- const struct nvkm_clk_func *func,
- const struct gk20a_clk_pllg_params *params,
- struct gk20a_clk *clk)
+gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
+ struct gk20a_clk *clk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int ret;
@@ -628,7 +627,7 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
clk->params = params;
clk->parent_rate = clk_get_rate(tdev->clk);
- ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
+ ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
if (ret)
return ret;
@@ -639,7 +638,8 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
}
int
-gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
@@ -649,11 +649,9 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
- clk);
+ ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 0d1450972162..286413ff4a9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -146,8 +146,8 @@ gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
clk->parent_rate / KHZ);
}
-int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
- const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
+int gk20a_clk_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, const struct nvkm_clk_func *,
+ const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
void gk20a_clk_fini(struct nvkm_clk *);
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index b284e949f732..a139dafffe06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -908,7 +908,7 @@ gm20b_clk = {
};
static int
-gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
@@ -919,12 +919,9 @@ gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
return -ENOMEM;
*pclk = &clk->base;
- ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
- &gm20b_pllg_params, clk);
-
+ ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
-
return ret;
}
@@ -1014,7 +1011,8 @@ gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
}
int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gm20b_clk *clk;
@@ -1024,7 +1022,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
/* Speedo 0 GPUs cannot use noise-aware PLL */
if (tdev->gpu_speedo_id == 0)
- return gm20b_clk_new_speedo0(device, index, pclk);
+ return gm20b_clk_new_speedo0(device, type, inst, pclk);
/* Speedo >= 1, use NAPLL */
clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
@@ -1036,8 +1034,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
/* duplicate the clock parameters since we will patch them below */
clk_params = (void *) (clk + 1);
*clk_params = gm20b_pllg_params;
- ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
- &clk->base);
+ ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
if (ret)
return ret;
@@ -1050,7 +1047,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
if (clk_params->max_m == 0) {
nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
kfree(clk);
- return gm20b_clk_new_speedo0(device, index, pclk);
+ return gm20b_clk_new_speedo0(device, type, inst, pclk);
}
clk->base.pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index f0a26881d9b9..b5f3969727a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -537,7 +537,8 @@ gt215_clk = {
};
int
-gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gt215_clk *clk;
@@ -545,5 +546,5 @@ gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&gt215_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 4884eb4a9221..81f103f88dc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -409,7 +409,8 @@ mcp77_clk = {
};
int
-mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct mcp77_clk *clk;
@@ -417,5 +418,5 @@ mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index b280f85e8827..ca13598c2caa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -72,9 +72,10 @@ nv04_clk = {
};
int
-nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+ int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk);
if (ret == 0) {
(*pclk)->pll_calc = nv04_clk_pll_calc;
(*pclk)->pll_prog = nv04_clk_pll_prog;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index 2ab9b9b84018..7ddd8cecb805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -218,7 +218,8 @@ nv40_clk = {
};
int
-nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct nv40_clk *clk;
@@ -228,5 +229,5 @@ nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
clk->base.pll_prog = nv04_clk_pll_prog;
*pclk = &clk->base;
- return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index da1770e47490..83067763c0ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -507,14 +507,14 @@ nv50_clk_tidy(struct nvkm_clk *base)
int
nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk **pclk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
struct nv50_clk *clk;
int ret;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+ ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base);
*pclk = &clk->base;
if (ret)
return ret;
@@ -555,7 +555,8 @@ nv50_clk = {
};
int
-nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+ return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 7c7713238ec4..5b4cb7e5cff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -20,7 +20,7 @@ struct nv50_clk {
struct nv50_clk_hwsq hwsq;
};
-int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool, struct nvkm_clk **);
int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index 81dfb37480ae..810cc572cd30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -16,9 +16,9 @@ struct nvkm_clk_func {
struct nvkm_domain domains[];
};
-int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool allow_reclock, struct nvkm_clk *);
-int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool allow_reclock, struct nvkm_clk **);
int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index b3429371ed82..d1abb64841da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 4756019ddf3f..dd4981708fe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -56,12 +56,12 @@ nvkm_devinit_disable(struct nvkm_devinit *init)
}
int
-nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
+nvkm_devinit_post(struct nvkm_devinit *init)
{
int ret = 0;
if (init && init->func->post)
ret = init->func->post(init, init->post);
- *disable = nvkm_devinit_disable(init);
+ nvkm_devinit_disable(init);
return ret;
}
@@ -126,11 +126,10 @@ nvkm_devinit = {
};
void
-nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit *init)
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit *init)
{
- nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev);
+ nvkm_subdev_ctor(&nvkm_devinit, device, type, inst, &init->subdev);
init->func = func;
init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index e895289bf3c1..c224702b7bed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -35,18 +35,18 @@ g84_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MPEG);
- disable |= (1ULL << NVKM_ENGINE_VP);
- disable |= (1ULL << NVKM_ENGINE_BSP);
- disable |= (1ULL << NVKM_ENGINE_CIPHER);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_VP, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_BSP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_CIPHER);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
return disable;
}
@@ -61,8 +61,8 @@ g84_devinit = {
};
int
-g84_devinit_new(struct nvkm_device *device, int index,
+g84_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+ return nv50_devinit_new_(&g84_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index a9d45844df5a..05729ca19e9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -35,17 +35,17 @@ g98_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_SEC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0);
return disable;
}
@@ -60,8 +60,8 @@ g98_devinit = {
};
int
-g98_devinit_new(struct nvkm_device *device, int index,
+g98_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+ return nv50_devinit_new_(&g98_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644
index 000000000000..6b280b05c4ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+ nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+ nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu102_devinit_post,
+ .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 8b1b34c3ad26..051cfd6a5caf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -71,21 +71,21 @@ gf100_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (r022500 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (r022500 & 0x00000002) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (r022500 & 0x00000004)
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (r022500 & 0x00000008)
- disable |= (1ULL << NVKM_ENGINE_MSENC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSENC, 0);
if (r022500 & 0x00000100)
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r022500 & 0x00000200)
- disable |= (1ULL << NVKM_ENGINE_CE1);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1);
return disable;
}
@@ -114,8 +114,8 @@ gf100_devinit = {
};
int
-gf100_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gf100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gf100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 28ca01be3d38..4323732a3cb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -35,11 +35,11 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (r021c00 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r021c00 & 0x00000004)
- disable |= (1ULL << NVKM_ENGINE_CE2);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
if (r021c04 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
return disable;
}
@@ -54,8 +54,8 @@ gm107_devinit = {
};
int
-gm107_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gm107_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gm107_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 59940dacc2ba..a308b9bde449 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -179,8 +179,8 @@ gm200_devinit = {
};
int
-gm200_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gm200_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gm200_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gm200_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 9a8522fa9c65..dc026ac1b595 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -71,16 +71,16 @@ gt215_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
return disable;
}
@@ -146,8 +146,8 @@ gt215_devinit = {
};
int
-gt215_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gt215_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gt215_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
index fbde6828bd38..b4d1688517d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
@@ -72,8 +72,8 @@ gv100_devinit = {
};
int
-gv100_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gv100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gv100_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gv100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index ce4f718e98a1..fb90d47e1225 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -35,18 +35,18 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_VIC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
return disable;
}
@@ -61,8 +61,8 @@ mcp89_devinit = {
};
int
-mcp89_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+mcp89_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+ return nv50_devinit_new_(&mcp89_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 317ce9fb8225..88bc890f89a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -434,9 +434,8 @@ nv04_devinit_dtor(struct nvkm_devinit *base)
}
int
-nv04_devinit_new_(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv04_devinit *init;
@@ -444,7 +443,7 @@ nv04_devinit_new_(const struct nvkm_devinit_func *func,
return -ENOMEM;
*pinit = &init->base;
- nvkm_devinit_ctor(func, device, index, &init->base);
+ nvkm_devinit_ctor(func, device, type, inst, &init->base);
init->owner = -1;
return 0;
}
@@ -459,8 +458,8 @@ nv04_devinit = {
};
int
-nv04_devinit_new(struct nvkm_device *device, int index,
+nv04_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv04_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 15b029ddf6df..06ad8a606bb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -11,7 +11,7 @@ struct nv04_devinit {
};
int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
- int, struct nvkm_devinit **);
+ enum nvkm_subdev_type, int, struct nvkm_devinit **);
void *nv04_devinit_dtor(struct nvkm_devinit *);
void nv04_devinit_preinit(struct nvkm_devinit *);
void nv04_devinit_fini(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index 9891eadca1ce..1410befd2285 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -136,8 +136,8 @@ nv05_devinit = {
};
int
-nv05_devinit_new(struct nvkm_device *device, int index,
+nv05_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv05_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 570822f83acf..a6aa8786d610 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -106,8 +106,8 @@ nv10_devinit = {
};
int
-nv10_devinit_new(struct nvkm_device *device, int index,
+nv10_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv10_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index fefafec7e2a7..4cc5ef9a5a63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -35,8 +35,8 @@ nv1a_devinit = {
};
int
-nv1a_devinit_new(struct nvkm_device *device, int index,
+nv1a_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv1a_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 4ef04e0d8826..67f46df723e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -72,8 +72,8 @@ nv20_devinit = {
};
int
-nv20_devinit_new(struct nvkm_device *device, int index,
+nv20_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv20_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index d7947c4391dc..380995d398b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -85,7 +85,7 @@ nv50_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000))
- disable |= (1ULL << NVKM_ENGINE_MPEG);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
return disable;
}
@@ -101,8 +101,8 @@ nv50_devinit_preinit(struct nvkm_devinit *base)
* missing, assume it's a secondary gpu which requires post
*/
if (!base->post) {
- u64 disable = nvkm_devinit_disable(base);
- if (disable & (1ULL << NVKM_ENGINE_DISP))
+ nvkm_devinit_disable(base);
+ if (!device->disp)
base->post = true;
}
@@ -148,9 +148,8 @@ nv50_devinit_init(struct nvkm_devinit *base)
}
int
-nv50_devinit_new_(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+nv50_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv50_devinit *init;
@@ -158,7 +157,7 @@ nv50_devinit_new_(const struct nvkm_devinit_func *func,
return -ENOMEM;
*pinit = &init->base;
- nvkm_devinit_ctor(func, device, index, &init->base);
+ nvkm_devinit_ctor(func, device, type, inst, &init->base);
return 0;
}
@@ -172,8 +171,8 @@ nv50_devinit = {
};
int
-nv50_devinit_new(struct nvkm_device *device, int index,
+nv50_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+ return nv50_devinit_new_(&nv50_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index e8d37a6145a2..987a7f478b84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -9,7 +9,7 @@ struct nv50_devinit {
u32 r001540;
};
-int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_devinit **);
void nv50_devinit_preinit(struct nvkm_devinit *);
void nv50_devinit_init(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 94723352137a..dd8b038a8cee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -16,7 +16,9 @@ struct nvkm_devinit_func {
};
void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
- int index, struct nvkm_devinit *);
+ enum nvkm_subdev_type, int inst, struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 397670e72fff..634f64f88fc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static int
+int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
@@ -82,8 +82,8 @@ tu102_devinit = {
};
int
-tu102_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&tu102_devinit, device, index, pinit);
+ return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index f6dca97140d6..fd54fa504efa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -170,12 +170,12 @@ nvkm_fault = {
int
nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
- int index, struct nvkm_fault **pfault)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault)
{
struct nvkm_fault *fault;
if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
+ nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev);
fault->func = func;
fault->user.ctor = nvkm_ufault_new;
fault->user.base = func->user.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index f6b189cc4330..6af7959e02ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -30,7 +30,7 @@ void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
}
void
@@ -82,8 +82,8 @@ gp100_fault = {
};
int
-gp100_fault_new(struct nvkm_device *device, int index,
+gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gp100_fault, device, index, pfault);
+ return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
index 9e66d1f7654d..89e0bc96fb92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -46,8 +46,8 @@ gp10b_fault = {
};
int
-gp10b_fault_new(struct nvkm_device *device, int index,
+gp10b_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+ return nvkm_fault_new_(&gp10b_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 2707be4ffabc..cd9d2ade5ac7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -228,8 +228,8 @@ gv100_fault = {
};
int
-gv100_fault_new(struct nvkm_device *device, int index,
+gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gv100_fault, device, index, pfault);
+ return nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index f6f1dd7eee1f..36681c347fb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -18,8 +18,8 @@ struct nvkm_fault_buffer {
u64 addr;
};
-int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
- int index, struct nvkm_fault **);
+int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_fault **);
struct nvkm_fault_func {
int (*oneinit)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 45a6a68b9f48..91eb6729c84d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <core/memory.h>
+#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <engine/fifo.h>
@@ -34,6 +35,9 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
* which don't appear to actually work anymore, but newer
* versions of RM don't appear to touch anything at all..
*/
+ struct nvkm_device *device = buffer->fault->subdev.device;
+
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
}
static void
@@ -41,6 +45,11 @@ tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+
+ /* Disable the fault interrupts */
+ nvkm_wr32(device, 0xb81408, 0x1);
+ nvkm_wr32(device, 0xb81410, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
}
@@ -50,6 +59,10 @@ tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+ /* Enable the fault interrupts */
+ nvkm_wr32(device, 0xb81208, 0x1);
+ nvkm_wr32(device, 0xb81210, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
@@ -109,14 +122,20 @@ tu102_fault_intr(struct nvkm_fault *fault)
}
if (stat & 0x00000200) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81010, 0x10);
+
if (fault->buffer[0]) {
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
stat &= ~0x00000200;
}
}
- /*XXX: guess, can't confirm until we get fw... */
+ /* Replayable MMU fault */
if (stat & 0x00000100) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81008, 0x1);
+
if (fault->buffer[1]) {
nvkm_event_send(&fault->event, 1, 1, NULL, 0);
stat &= ~0x00000100;
@@ -162,8 +181,8 @@ tu102_fault = {
};
int
-tu102_fault_new(struct nvkm_device *device, int index,
+tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&tu102_fault, device, index, pfault);
+ return nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 43a42159a3d0..5d0bab8ecb43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 5940e0dea2f8..6faaea948fc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -122,7 +122,7 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
nvkm_debug(subdev, "%d comptags\n", tags);
}
- return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
}
static int
@@ -205,7 +205,9 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
- nvkm_mm_fini(&fb->tags);
+ nvkm_mm_fini(&fb->tags.mm);
+ mutex_destroy(&fb->tags.mutex);
+
nvkm_ram_del(&fb->ram);
nvkm_blob_dtor(&fb->vpr_scrubber);
@@ -225,21 +227,21 @@ nvkm_fb = {
void
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb *fb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
{
- nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
+ nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
- fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
- fb->func->default_bigpage);
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
+ mutex_init(&fb->tags.mutex);
}
int
nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(func, device, index, *pfb);
+ nvkm_fb_ctor(func, device, type, inst, *pfb);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 06bf95c0c549..770a4ad39122 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -32,7 +32,7 @@ g84_fb = {
};
int
-g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+g84_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&g84_fb, device, index, pfb);
+ return nv50_fb_new_(&g84_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644
index 000000000000..b47bebfbc26f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644
index 000000000000..6ea7908f0563
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = ga102_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e8dc4e913494..9dcc40f9ef79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -117,13 +117,13 @@ gf100_fb_dtor(struct nvkm_fb *base)
int
gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct gf100_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(func, device, index, &fb->base);
+ nvkm_fb_ctor(func, device, type, inst, &fb->base);
*pfb = &fb->base;
return 0;
@@ -141,7 +141,7 @@ gf100_fb = {
};
int
-gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gf100_fb, device, index, pfb);
+ return gf100_fb_new_(&gf100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2ed7cdaab37c..0cac7b06acc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -10,8 +10,8 @@ struct gf100_fb {
dma_addr_t r100c10;
};
-int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
- int index, struct nvkm_fb **);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fb **);
void *gf100_fb_dtor(struct nvkm_fb *);
void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 4a9f463745b5..76678dd60f93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -36,7 +36,7 @@ gf108_fb = {
};
int
-gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf108_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gf108_fb, device, index, pfb);
+ return gf100_fb_new_(&gf108_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 48fd98e08baa..f73442ccb424 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -83,7 +83,7 @@ gk104_fb = {
};
int
-gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk104_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk104_fb, device, index, pfb);
+ return gf100_fb_new_(&gk104_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
index 0695e5dd360e..45d6cdffafee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -65,7 +65,7 @@ gk110_fb = {
};
int
-gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk110_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk110_fb, device, index, pfb);
+ return gf100_fb_new_(&gk110_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a7e29b125094..6bc42f89d8c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -34,7 +34,7 @@ gk20a_fb = {
};
int
-gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk20a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 69c876d5d1c1..de52462a92bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -36,7 +36,7 @@ gm107_fb = {
};
int
-gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm107_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm107_fb, device, index, pfb);
+ return gf100_fb_new_(&gm107_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d3b8c3367152..5acf8d15d06f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -67,7 +67,7 @@ gm200_fb = {
};
int
-gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm200_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm200_fb, device, index, pfb);
+ return gf100_fb_new_(&gm200_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index 12db61e31128..86f61a3f2fea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -34,7 +34,7 @@ gm20b_fb = {
};
int
-gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm20b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+ return gf100_fb_new_(&gm20b_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 8205ce436b3e..09e943edc362 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -71,7 +71,7 @@ gp100_fb = {
};
int
-gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp100_fb, device, index, pfb);
+ return gf100_fb_new_(&gp100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index fc8c93aa3da5..0e78b3d734a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -114,9 +114,9 @@ gp102_fb = {
int
gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- int ret = gf100_fb_new_(func, device, index, pfb);
+ int ret = gf100_fb_new_(func, device, type, inst, pfb);
if (ret)
return ret;
@@ -126,9 +126,9 @@ gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
}
int
-gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gp102_fb, device, index, pfb);
+ return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index af8e43979dc1..84c9815a6d48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -31,7 +31,7 @@ gp10b_fb = {
};
int
-gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp10b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp10b_fb, device, index, pfb);
+ return gf100_fb_new_(&gp10b_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index 9266559b45f9..c1ec9758617c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -32,7 +32,7 @@ gt215_fb = {
};
int
-gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gt215_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&gt215_fb, device, index, pfb);
+ return nv50_fb_new_(&gt215_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 10ff5d053f7e..63daa83ae12d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -22,7 +22,7 @@
#include "gf100.h"
#include "ram.h"
-static int
+int
gv100_fb_init_page(struct nvkm_fb *fb)
{
return (fb->page == 16) ? 0 : -EINVAL;
@@ -42,9 +42,9 @@ gv100_fb = {
};
int
-gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gv100_fb, device, index, pfb);
+ return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 73b3b86a2826..70c7b08ee0a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -31,7 +31,7 @@ mcp77_fb = {
};
int
-mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp77_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+ return nv50_fb_new_(&mcp77_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 6d11e32ec7ad..308d955168e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -31,7 +31,7 @@ mcp89_fb = {
};
int
-mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp89_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+ return nv50_fb_new_(&mcp89_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c886664533c8..8d5a007ecc47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -44,7 +44,7 @@ nv04_fb = {
};
int
-nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv04_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv04_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv04_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index c998b7e96aa3..7d2c16b27032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -64,7 +64,7 @@ nv10_fb = {
};
int
-nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv10_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv10_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 7b9f04f44af8..4bdad2abd56f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb = {
};
int
-nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv1a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv1a_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index a021d21ff153..d254f27f9b37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&fb->tags, &tile->tag);
+ nvkm_mm_free(&fb->tags.mm, &tile->tag);
}
void
@@ -96,7 +96,7 @@ nv20_fb = {
};
int
-nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv20_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv20_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index 7709f5fe9a45..47da66dea6e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -54,7 +54,7 @@ nv25_fb = {
};
int
-nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv25_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv25_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 8aa782666507..0f87efb636d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -127,7 +127,7 @@ nv30_fb = {
};
int
-nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv30_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv30_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 6e83dcff72e0..0694dcfd107e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -56,7 +56,7 @@ nv35_fb = {
};
int
-nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 2a07617bb44c..1a39770372f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -56,7 +56,7 @@ nv36_fb = {
};
int
-nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv36_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv36_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index 955160778b5b..77dbb9d6ba48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -62,7 +62,7 @@ nv40_fb = {
};
int
-nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv40_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv40_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index b77f08d34cc3..0f9d9e48e7ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -56,7 +56,7 @@ nv41_fb = {
};
int
-nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv41_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv41_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index b59dc486083d..b1046ee9f0ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -65,7 +65,7 @@ nv44_fb = {
};
int
-nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv44_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv44_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index cab7d20fa039..0d78de422dfa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -51,7 +51,7 @@ nv46_fb = {
};
int
-nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv46_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv46_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index a8b0ad4c871d..5cedde29c8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -39,7 +39,7 @@ nv47_fb = {
};
int
-nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv47_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv47_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index d0b317bb0252..95cc099603d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -39,7 +39,7 @@ nv49_fb = {
};
int
-nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv49_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv49_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 6a6f0c086071..c9f3148f4e75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -37,7 +37,7 @@ nv4e_fb = {
};
int
-nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv4e_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv4e_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index b2f5bf8144ea..95fd8f834010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -262,16 +262,15 @@ nv50_fb_ = {
int
nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct nv50_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+ nvkm_fb_ctor(&nv50_fb_, device, type, inst, &fb->base);
fb->func = func;
*pfb = &fb->base;
-
return 0;
}
@@ -283,7 +282,7 @@ nv50_fb = {
};
int
-nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv50_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&nv50_fb, device, index, pfb);
+ return nv50_fb_new_(&nv50_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index 5e2b0c9539ed..a5e673859a90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -17,6 +17,6 @@ struct nv50_fb_func {
u32 trap;
};
-int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fb **pfb);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 5be9c563350d..3f1be9780c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -38,9 +38,9 @@ struct nvkm_fb_func {
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
- int index, struct nvkm_fb *);
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb *);
int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
- int index, struct nvkm_fb **);
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *);
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
@@ -78,8 +78,10 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fb **);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index b11867f682cb..03b1bdb27770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -81,12 +81,12 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
struct nvkm_vram *vram = nvkm_vram(memory);
struct nvkm_mm_node *next = vram->mn;
struct nvkm_mm_node *node;
- mutex_lock(&vram->ram->fb->subdev.mutex);
+ mutex_lock(&vram->ram->mutex);
while ((node = next)) {
next = node->next;
nvkm_mm_free(&vram->ram->vram, &node);
}
- mutex_unlock(&vram->ram->fb->subdev.mutex);
+ mutex_unlock(&vram->ram->mutex);
return vram;
}
@@ -126,7 +126,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
vram->page = page;
*pmemory = &vram->memory;
- mutex_lock(&ram->fb->subdev.mutex);
+ mutex_lock(&ram->mutex);
node = &vram->mn;
do {
if (back)
@@ -134,7 +134,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
else
ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
+ mutex_unlock(&ram->mutex);
nvkm_memory_unref(pmemory);
return ret;
}
@@ -143,7 +143,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
node = &r->next;
max -= r->length;
} while (max);
- mutex_unlock(&ram->fb->subdev.mutex);
+ mutex_unlock(&ram->mutex);
return 0;
}
@@ -163,6 +163,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
if (ram->func->dtor)
*pram = ram->func->dtor(ram);
nvkm_mm_fini(&ram->vram);
+ mutex_destroy(&ram->mutex);
kfree(*pram);
*pram = NULL;
}
@@ -196,6 +197,7 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
ram->fb = fb;
ram->type = type;
ram->size = size;
+ mutex_init(&ram->mutex);
if (!nvkm_mm_initialised(&ram->vram)) {
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index d723a9b4e3c4..ea7d66f3dd82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644
index 000000000000..298c136cefe0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ u32 size = nvkm_rd32(device, 0x1183a4);
+
+ return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index d350d92852d2..2b678b60b4d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -260,7 +260,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
@@ -661,7 +661,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_unblock(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
@@ -711,7 +711,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
@@ -943,7 +943,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_unblock(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index 1c3c18ea8ced..375dfce09f84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -42,12 +42,12 @@ nvkm_fuse = {
int
nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
- int index, struct nvkm_fuse **pfuse)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse)
{
struct nvkm_fuse *fuse;
if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev);
+ nvkm_subdev_ctor(&nvkm_fuse, device, type, inst, &fuse->subdev);
fuse->func = func;
spin_lock_init(&fuse->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 13671fedc805..01f770654b1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -47,7 +47,8 @@ gf100_fuse = {
};
int
-gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gf100_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&gf100_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 9aff4ea04506..7dc99492f536 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -36,7 +36,8 @@ gm107_fuse = {
};
int
-gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 514c193db25d..2505e8e1c1d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -45,7 +45,8 @@ nv50_fuse = {
};
int
-nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+nv50_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&nv50_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 2edc612408dd..e83d0c30dff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -8,6 +8,6 @@ struct nvkm_fuse_func {
u32 (*read)(struct nvkm_fuse *, u32 addr);
};
-int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
- int index, struct nvkm_fuse **);
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index b2ad5922a1c2..efbbaa080de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
nvkm-y += nvkm/subdev/gpio/g94.o
nvkm-y += nvkm/subdev/gpio/gf119.o
nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 914276410ef8..048bcc70c3f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -241,14 +241,14 @@ nvkm_gpio = {
int
nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
- int index, struct nvkm_gpio **pgpio)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio)
{
struct nvkm_gpio *gpio;
if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev);
+ nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev);
gpio->func = func;
return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 6dcda55fb865..114728ccdf8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -68,7 +68,8 @@ g94_gpio = {
};
int
-g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+g94_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&g94_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644
index 000000000000..4a96f926b66d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, len;
+ u16 entry;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+ u32 data = nvbios_rd32(bios, entry);
+ u8 line = (data & 0x0000003f);
+ u8 defs = !!(data & 0x00000080);
+ u8 func = (data & 0x0000ff00) >> 8;
+ u8 unk0 = (data & 0x00ff0000) >> 16;
+ u8 unk1 = (data & 0x1f000000) >> 24;
+
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
+ continue;
+
+ nvkm_gpio_set(gpio, 0, func, line, defs);
+
+ nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+ }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+ nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x021640);
+ u32 intr1 = nvkm_rd32(device, 0x02164c);
+ u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nvkm_wr32(device, 0x021640, intr0);
+ nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x021648);
+ u32 inte1 = nvkm_rd32(device, 0x021654);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nvkm_wr32(device, 0x021648, inte0);
+ nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+ .lines = 32,
+ .intr_stat = ga102_gpio_intr_stat,
+ .intr_mask = ga102_gpio_intr_mask,
+ .drive = ga102_gpio_drive,
+ .sense = ga102_gpio_sense,
+ .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index bb7400dfaef8..ecb19e4f5c48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -80,7 +80,8 @@ gf119_gpio = {
};
int
-gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gf119_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&gf119_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 2ead515b8530..c0e4cdb45520 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -68,7 +68,8 @@ gk104_gpio = {
};
int
-gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index ae3499b48330..48ad29b5638f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -112,7 +112,8 @@ nv10_gpio = {
};
int
-nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv10_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&nv10_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 73923fd5f7f2..b86c49762f11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -126,7 +126,8 @@ nv50_gpio = {
};
int
-nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv50_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&nv50_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 59e39affe2a0..6590d81164e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -28,8 +28,8 @@ struct nvkm_gpio_func {
void (*reset)(struct nvkm_gpio *, u8);
};
-int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
- int index, struct nvkm_gpio **);
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_gpio **);
void nv50_gpio_reset(struct nvkm_gpio *, u8);
int nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index 5a32df0f9992..22574886b819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -40,20 +40,18 @@ nvkm_gsp = {
int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_gsp **pgsp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
struct nvkm_gsp *gsp;
if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+ nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
if (IS_ERR(fwif))
return PTR_ERR(fwif);
- return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
- nvkm_subdev_name[gsp->subdev.index], 0,
- &gsp->falcon);
+ return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index 2114f9b00a28..2ac7fc934c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -49,7 +49,8 @@ gv100_gsp[] = {
};
int
-gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
+gv100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
{
- return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
+ return nvkm_gsp_new_(gv100_gsp, device, type, inst, pgsp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 92820fb997c1..19381ddd38d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -10,6 +10,6 @@ struct nvkm_gsp_fwif {
const struct nvkm_falcon_func *flcn;
};
-int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 723d0284caef..819703913a00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
nvkm-y += nvkm/subdev/i2c/gf117.o
nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
nvkm-y += nvkm/subdev/i2c/gm200.o
nvkm-y += nvkm/subdev/i2c/pad.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 30b48896965e..f920eabf8628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,13 @@
#define __NVKM_I2C_AUX_H__
#include "pad.h"
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ if (i2c->func->aux_autodpcd)
+ i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
struct nvkm_i2c_aux_func {
bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index db7769cb33eb..47068f6f9c55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
-
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
g94_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index edb6148cbca0..8bd1d442e465 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
*size = stat & 0x0000001f;
}
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 719345074711..cb5cb533d91c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -277,7 +277,7 @@ nvkm_i2c_drv[] = {
int
nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
- int index, struct nvkm_i2c **pi2c)
+ enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c;
@@ -289,7 +289,7 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev);
+ nvkm_subdev_ctor(&nvkm_i2c, device, type, inst, &i2c->subdev);
i2c->func = func;
INIT_LIST_HEAD(&i2c->pad);
INIT_LIST_HEAD(&i2c->bus);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index bb2a31d88161..e5bad085c06f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -66,7 +66,8 @@ g94_i2c = {
};
int
-g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+g94_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&g94_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index ae4aad3fcd2e..cda30ee6767d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -30,7 +30,8 @@ gf117_i2c = {
};
int
-gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf117_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gf117_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
index 6f2b02af42c8..e9c6a6cca09d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -34,7 +34,8 @@ gf119_i2c = {
};
int
-gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf119_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gf119_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index f9f6bf4b66c9..d35aa6fe3015 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -66,7 +66,8 @@ gk104_i2c = {
};
int
-gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gk104_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gk104_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644
index 000000000000..9fec6af56e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gk110_i2c, device, type, inst, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index a23c5f315221..46917eb600f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -24,6 +24,12 @@
#include "priv.h"
#include "pad.h"
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
static const struct nvkm_i2c_func
gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
@@ -31,10 +37,12 @@ gm200_i2c = {
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gm200_aux_autodpcd,
};
int
-gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 18776f49355c..ecfcf147c789 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -30,7 +30,8 @@ nv04_i2c = {
};
int
-nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv04_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv04_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 6b762f7cee9e..ad1d3fd2bcbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -30,7 +30,8 @@ nv4e_i2c = {
};
int
-nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv4e_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv4e_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index 75640ab97d6a..2f94bed2c056 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -30,7 +30,8 @@ nv50_i2c = {
};
int
-nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv50_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv50_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 461016814f4f..44b7bb7d4777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
struct nvkm_i2c_pad {
const struct nvkm_i2c_pad_func *func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bd86bc298ebe..f9d79f72f7e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -4,8 +4,8 @@
#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
#include <subdev/i2c.h>
-int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
- int index, struct nvkm_i2c **);
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_i2c **);
struct nvkm_i2c_func {
int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
/* mask on/off interrupt types for a given set of auxch
*/
void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+ /* enable/disable HW-initiated DPCD reads
+ */
+ void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
};
void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
deleted file mode 100644
index 127efb51f67d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/ibus/gf100.o
-nvkm-y += nvkm/subdev/ibus/gf117.o
-nvkm-y += nvkm/subdev/ibus/gk104.o
-nvkm-y += nvkm/subdev/ibus/gk20a.o
-nvkm-y += nvkm/subdev/ibus/gm200.o
-nvkm-y += nvkm/subdev/ibus/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
deleted file mode 100644
index 302d69e384d8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_PRIV_H__
-#define __NVKM_IBUS_PRIV_H__
-
-#include <subdev/ibus.h>
-
-void gf100_ibus_intr(struct nvkm_subdev *);
-void gk104_ibus_intr(struct nvkm_subdev *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index fecfa6afcf54..8f0ccd3664eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -312,20 +312,20 @@ iccsense_func = {
};
void
-nvkm_iccsense_ctor(struct nvkm_device *device, int index,
+nvkm_iccsense_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense *iccsense)
{
- nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev);
+ nvkm_subdev_ctor(&iccsense_func, device, type, inst, &iccsense->subdev);
}
int
-nvkm_iccsense_new_(struct nvkm_device *device, int index,
+nvkm_iccsense_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **iccsense)
{
if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&(*iccsense)->sensors);
INIT_LIST_HEAD(&(*iccsense)->rails);
- nvkm_iccsense_ctor(device, index, *iccsense);
+ nvkm_iccsense_ctor(device, type, inst, *iccsense);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
index cccff1c8a409..3eabf4944395 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
@@ -24,8 +24,8 @@
#include "priv.h"
int
-gf100_iccsense_new(struct nvkm_device *device, int index,
+gf100_iccsense_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **piccsense)
{
- return nvkm_iccsense_new_(device, index, piccsense);
+ return nvkm_iccsense_new_(device, type, inst, piccsense);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index cc09c6c504af..c33441124241 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -22,6 +22,6 @@ struct nvkm_iccsense_rail {
u8 mohm;
};
-void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *);
-int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **);
+void nvkm_iccsense_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense *);
+int nvkm_iccsense_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 364ea4492acc..cd8163a52bb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -218,9 +218,11 @@ static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ void *data = imem;
if (imem->func->dtor)
- return imem->func->dtor(imem);
- return imem;
+ data = imem->func->dtor(imem);
+ mutex_destroy(&imem->mutex);
+ return data;
}
static const struct nvkm_subdev_func
@@ -232,13 +234,13 @@ nvkm_instmem = {
};
void
-nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_instmem *imem)
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
{
- nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
+ nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
INIT_LIST_HEAD(&imem->boot);
+ mutex_init(&imem->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 13d4d7ac0697..648ecf5a8fbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -568,7 +568,7 @@ gk20a_instmem = {
};
int
-gk20a_instmem_new(struct nvkm_device *device, int index,
+gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
@@ -576,7 +576,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base);
mutex_init(&imem->lock);
*pimem = &imem->base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6bf0dad46919..25603b01d6f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -99,9 +99,9 @@ static void *
nv04_instobj_dtor(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
@@ -218,14 +217,14 @@ nv04_instmem = {
};
int
-nv04_instmem_new(struct nvkm_device *device, int index,
+nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv04_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 086c118488ef..6b462f960922 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -99,9 +99,9 @@ static void *
nv40_instobj_dtor(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
@@ -236,7 +235,7 @@ nv40_instmem = {
};
int
-nv40_instmem_new(struct nvkm_device *device, int index,
+nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
@@ -244,7 +243,7 @@ nv40_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
/* map bar */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 02c4eb28cef4..96aca0edfa3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
@@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
emap = eobj->map;
eobj->map = NULL;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
@@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
return;
}
@@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
wmb();
nvkm_bar_flush(subdev->device->bar);
- if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
@@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
}
}
@@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return iobj->map;
}
@@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
refcount_set(&iobj->maps, 1);
}
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return map;
}
@@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
@@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
}
static u64
@@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nvkm_vma *bar;
void *map = map;
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
@@ -386,14 +386,14 @@ nv50_instmem = {
};
int
-nv50_instmem_new(struct nvkm_device *device, int index,
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv50_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index f5da8fcbdde3..56c15e30a5dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,7 +16,7 @@ struct nvkm_instmem_func {
};
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
- int index, struct nvkm_instmem *);
+ enum nvkm_subdev_type, int, struct nvkm_instmem *);
void nvkm_instmem_boot(struct nvkm_instmem *);
#include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 23242179e600..fa683c190795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -33,10 +33,10 @@ nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
BUG_ON((first > limit) || (limit >= ltc->num_tags));
- mutex_lock(&ltc->subdev.mutex);
+ mutex_lock(&ltc->mutex);
ltc->func->cbc_clear(ltc, first, limit);
ltc->func->cbc_wait(ltc);
- mutex_unlock(&ltc->subdev.mutex);
+ mutex_unlock(&ltc->mutex);
}
int
@@ -113,6 +113,7 @@ nvkm_ltc_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
nvkm_memory_unref(&ltc->tag_ram);
+ mutex_destroy(&ltc->mutex);
return ltc;
}
@@ -126,15 +127,16 @@ nvkm_ltc = {
int
nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
- int index, struct nvkm_ltc **pltc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc)
{
struct nvkm_ltc *ltc;
if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_ltc, device, index, &ltc->subdev);
+ nvkm_subdev_ctor(&nvkm_ltc, device, type, inst, &ltc->subdev);
ltc->func = func;
+ mutex_init(&ltc->mutex);
ltc->zbc_min = 1; /* reserve 0 for disabled */
ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index a21ef45b8572..fd8aeafc812d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -200,8 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
}
mm_init:
- nvkm_mm_fini(&fb->tags);
- return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
+ nvkm_mm_fini(&fb->tags.mm);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1);
}
int
@@ -249,7 +249,8 @@ gf100_ltc = {
};
int
-gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index b4f6e0034d58..94aa09244d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -50,7 +50,8 @@ gk104_ltc = {
};
int
-gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gk104_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gk104_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index ec0a3844b2d1..54d1d65d5a85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -145,7 +145,8 @@ gm107_ltc = {
};
int
-gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm107_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gm107_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index e18e0dc19ec8..8cfdbbdd8e8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -57,7 +57,8 @@ gm200_ltc = {
};
int
-gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm200_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gm200_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gm200_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index e923ed76d37a..a4a6cd9b435a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -69,7 +69,8 @@ gp100_ltc = {
};
int
-gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp100_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
index 601747ada655..ff05d617e7f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -45,7 +45,8 @@ gp102_ltc = {
};
int
-gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp102_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
index c0063c7caa50..dfebd796cb4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -59,7 +59,8 @@ gp10b_ltc = {
};
int
-gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp10b_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp10b_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index eca5a711b1b8..2bebe139005d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -5,8 +5,8 @@
#include <subdev/ltc.h>
#include <core/enum.h>
-int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
- int index, struct nvkm_ltc **);
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_ltc **);
struct nvkm_ltc_func {
int (*oneinit)(struct nvkm_ltc *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2585ef07532a..ac2b34e9ac6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 0e57ab2a709f..21c4af3f81d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -35,14 +35,14 @@ nvkm_mc_unk260(struct nvkm_device *device, u32 data)
}
void
-nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en)
{
struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
if (likely(mc) && mc->func->intr_mask) {
- u32 mask = nvkm_top_intr_mask(device, devidx);
+ u32 mask = nvkm_top_intr_mask(device, type, inst);
for (map = mc->func->intr; !mask && map->stat; map++) {
- if (map->unit == devidx)
+ if (map->type == type && map->inst == inst)
mask = map->stat;
}
mc->func->intr_mask(mc, mask, en ? mask : 0);
@@ -78,27 +78,34 @@ void
nvkm_mc_intr(struct nvkm_device *device, bool *handled)
{
struct nvkm_mc *mc = device->mc;
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *tdev;
struct nvkm_subdev *subdev;
const struct nvkm_mc_map *map;
u32 stat, intr;
- u64 subdevs;
if (unlikely(!mc))
return;
- intr = nvkm_mc_intr_stat(mc);
- stat = nvkm_top_intr(device, intr, &subdevs);
- while (subdevs) {
- enum nvkm_devidx subidx = __ffs64(subdevs);
- subdev = nvkm_device_subdev(device, subidx);
- if (subdev)
- nvkm_subdev_intr(subdev);
- subdevs &= ~BIT_ULL(subidx);
+ stat = intr = nvkm_mc_intr_stat(mc);
+
+ if (top) {
+ list_for_each_entry(tdev, &top->device, head) {
+ if (tdev->intr >= 0 && (stat & BIT(tdev->intr))) {
+ subdev = nvkm_device_subdev(device, tdev->type, tdev->inst);
+ if (subdev) {
+ nvkm_subdev_intr(subdev);
+ stat &= ~BIT(tdev->intr);
+ if (!stat)
+ break;
+ }
+ }
+ }
}
for (map = mc->func->intr; map->stat; map++) {
if (intr & map->stat) {
- subdev = nvkm_device_subdev(device, map->unit);
+ subdev = nvkm_device_subdev(device, map->type, map->inst);
if (subdev)
nvkm_subdev_intr(subdev);
stat &= ~map->stat;
@@ -108,23 +115,19 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
if (stat)
nvkm_error(&mc->subdev, "intr %08x\n", stat);
*handled = intr != 0;
-
- if (mc->func->intr_hack)
- mc->func->intr_hack(mc, handled);
}
static u32
-nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
- enum nvkm_devidx devidx)
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst)
{
struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
u64 pmc_enable = 0;
if (likely(mc)) {
- if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+ if (!(pmc_enable = nvkm_top_reset(device, type, inst))) {
for (map = mc->func->reset; map && map->stat; map++) {
if (!isauto || !map->noauto) {
- if (map->unit == devidx) {
+ if (map->type == type && map->inst == inst) {
pmc_enable = map->stat;
break;
}
@@ -136,9 +139,9 @@ nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
}
void
-nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -147,17 +150,17 @@ nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
}
void
-nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable)
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
}
void
-nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
nvkm_rd32(device, 0x000200);
@@ -165,9 +168,9 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
}
bool
-nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
return (pmc_enable != 0) &&
((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
@@ -206,19 +209,19 @@ nvkm_mc = {
void
nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc *mc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc *mc)
{
- nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+ nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev);
mc->func = func;
}
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc **pmc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(func, device, index, *pmc);
+ nvkm_mc_ctor(func, device, type, inst, *pmc);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 430a61c3df44..4cfc1c984006 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -62,7 +62,7 @@ g84_mc = {
};
int
-g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g84_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&g84_mc, device, index, pmc);
+ return nvkm_mc_new_(&g84_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 93ad4982ce5f..b7e58d75d894 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -62,7 +62,7 @@ g98_mc = {
};
int
-g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g98_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&g98_mc, device, index, pmc);
+ return nvkm_mc_new_(&g98_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644
index 000000000000..4105175dfccd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81210, mask & intr );
+ nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+ u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+ if (intr_top & 0x00000004)
+ intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+ return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+ nv50_mc_init(mc);
+ nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+ .init = ga100_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = ga100_mc_intr_unarm,
+ .intr_rearm = ga100_mc_intr_rearm,
+ .intr_mask = ga100_mc_intr_mask,
+ .intr_stat = ga100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index f93766418056..3a589c6f7fad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -27,11 +27,11 @@ static const struct nvkm_mc_map
gf100_mc_reset[] = {
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
- { 0x00002000, NVKM_SUBDEV_PMU, true },
+ { 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00000080, NVKM_ENGINE_CE1 },
- { 0x00000040, NVKM_ENGINE_CE0 },
+ { 0x00000080, NVKM_ENGINE_CE, 1 },
+ { 0x00000040, NVKM_ENGINE_CE, 0 },
{ 0x00000002, NVKM_ENGINE_MSPPP },
{}
};
@@ -43,10 +43,10 @@ gf100_mc_intr[] = {
{ 0x00008000, NVKM_ENGINE_MSVLD },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00000040, NVKM_ENGINE_CE1 },
- { 0x00000020, NVKM_ENGINE_CE0 },
+ { 0x00000040, NVKM_ENGINE_CE, 1 },
+ { 0x00000020, NVKM_ENGINE_CE, 0 },
{ 0x00000001, NVKM_ENGINE_MSPPP },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -112,7 +112,7 @@ gf100_mc = {
};
int
-gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gf100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+ return nvkm_mc_new_(&gf100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 7b8c6ecad1a5..d9b9067fa93f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,7 +26,7 @@
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00002000, NVKM_SUBDEV_PMU, true },
+ { 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{}
};
@@ -34,7 +34,7 @@ const struct nvkm_mc_map
gk104_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -60,7 +60,7 @@ gk104_mc = {
};
int
-gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk104_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gk104_mc, device, index, pmc);
+ return nvkm_mc_new_(&gk104_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index ca1bf3279dbe..03590292749a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -35,7 +35,7 @@ gk20a_mc = {
};
int
-gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk20a_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+ return nvkm_mc_new_(&gk20a_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 43db245eec9a..5fd1a0595c33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -80,7 +80,7 @@ gp100_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000200, NVKM_SUBDEV_FAULT },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -106,13 +106,13 @@ gp100_mc = {
int
gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc **pmc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
struct gp100_mc *mc;
if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(func, device, index, &mc->base);
+ nvkm_mc_ctor(func, device, type, inst, &mc->base);
*pmc = &mc->base;
spin_lock_init(&mc->lock);
@@ -122,7 +122,7 @@ gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
}
int
-gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&gp100_mc, device, index, pmc);
+ return gp100_mc_new_(&gp100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index 45c62f5ef782..dd581d030ced 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -43,7 +43,7 @@ gp10b_mc = {
};
int
-gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp10b_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&gp10b_mc, device, index, pmc);
+ return gp100_mc_new_(&gp10b_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index 99d50a3d956f..1b4d43531dba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -27,7 +27,7 @@ static const struct nvkm_mc_map
gt215_mc_reset[] = {
{ 0x04008000, NVKM_ENGINE_MSVLD },
{ 0x01020000, NVKM_ENGINE_MSPDEC },
- { 0x00802000, NVKM_ENGINE_CE0 },
+ { 0x00802000, NVKM_ENGINE_CE, 0 },
{ 0x00400002, NVKM_ENGINE_MSPPP },
{ 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
@@ -37,7 +37,7 @@ gt215_mc_reset[] = {
static const struct nvkm_mc_map
gt215_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
- { 0x00400000, NVKM_ENGINE_CE0 },
+ { 0x00400000, NVKM_ENGINE_CE, 0 },
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
{ 0x00001000, NVKM_ENGINE_GR },
@@ -71,7 +71,7 @@ gt215_mc = {
};
int
-gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gt215_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gt215_mc, device, index, pmc);
+ return nvkm_mc_new_(&gt215_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 6509defd1460..bc0d09bafa99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -80,7 +80,7 @@ nv04_mc = {
};
int
-nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv04_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv04_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 9213107901e6..ab59ca1ee068 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -44,7 +44,7 @@ nv11_mc = {
};
int
-nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv11_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv11_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv11_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index 64bf5bbf8146..03d756e26e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -53,7 +53,7 @@ nv17_mc = {
};
int
-nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv17_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv17_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv17_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 65fa44a64b98..95f65766e8b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -48,7 +48,7 @@ nv44_mc = {
};
int
-nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv44_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv44_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index fe93b4fd7100..fce3613cdfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -55,7 +55,7 @@ nv50_mc = {
};
int
-nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv50_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv50_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv50_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4aab753a6040..c8bcabb98f99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -4,14 +4,15 @@
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
-void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
- int index, struct nvkm_mc *);
-int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
- int index, struct nvkm_mc **);
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mc *);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mc **);
struct nvkm_mc_map {
u32 stat;
- u32 unit;
+ enum nvkm_subdev_type type;
+ int inst;
bool noauto;
};
@@ -26,7 +27,6 @@ struct nvkm_mc_func {
void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_stat)(struct nvkm_mc *);
- void (*intr_hack)(struct nvkm_mc *, bool *handled);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
@@ -53,7 +53,7 @@ void gf100_mc_unk260(struct nvkm_mc *, u32);
void gp100_mc_intr_unarm(struct nvkm_mc *);
void gp100_mc_intr_rearm(struct nvkm_mc *);
void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
-int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
+int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_mc **);
extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index d098c44a4fcb..58db83ebadc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -19,37 +19,118 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define tu102_mc(p) container_of((p), struct tu102_mc, base)
#include "priv.h"
+struct tu102_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
static void
-tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_update(struct tu102_mc *mc)
{
- struct nvkm_device *device = mc->subdev.device;
- u32 stat = nvkm_rd32(device, 0xb81010);
- if (stat & 0x00000050) {
- struct nvkm_subdev *subdev =
- nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
- nvkm_wr32(device, 0xb81010, stat & 0x00000050);
- if (subdev)
- nvkm_subdev_intr(subdev);
- *handled = true;
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
}
+
+ if (mask & 0x00000200)
+ nvkm_wr32(device, 0xb81608, 0x6);
+ else
+ nvkm_wr32(device, 0xb81610, 0x6);
}
+void
+tu102_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static u32
+tu102_mc_intr_stat(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x000100);
+ u32 intr1 = nvkm_rd32(device, 0x000104);
+ u32 intr_top = nvkm_rd32(device, 0xb81600);
+
+ /* Turing and above route the MMU fault interrupts via a different
+ * interrupt tree with different control registers. For the moment remap
+ * them back to the old PMC vector.
+ */
+ if (intr_top & 0x00000006)
+ intr0 |= 0x00000200;
+
+ return intr0 | intr1;
+}
+
+
static const struct nvkm_mc_func
tu102_mc = {
.init = nv50_mc_init,
.intr = gp100_mc_intr,
- .intr_unarm = gp100_mc_intr_unarm,
- .intr_rearm = gp100_mc_intr_rearm,
- .intr_mask = gp100_mc_intr_mask,
- .intr_stat = gf100_mc_intr_stat,
- .intr_hack = tu102_mc_intr_hack,
+ .intr_unarm = tu102_mc_intr_unarm,
+ .intr_rearm = tu102_mc_intr_rearm,
+ .intr_mask = tu102_mc_intr_mask,
+ .intr_stat = tu102_mc_intr_stat,
.reset = gk104_mc_reset,
};
+static int
+tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
+{
+ struct tu102_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(func, device, type, inst, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
+
int
-tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&tu102_mc, device, index, pmc);
+ return tu102_mc_new_(&tu102_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index de91e9a26172..ad3b44a9e0e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu);
+ mutex_destroy(&mmu->mutex);
return mmu;
}
@@ -414,22 +415,23 @@ nvkm_mmu = {
void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu *mmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
{
- nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
+ nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
mmu->func = func;
mmu->dma_bits = func->dma_bits;
nvkm_mmu_ptc_init(mmu);
+ mutex_init(&mmu->mutex);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
}
int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu **pmmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
{
if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mmu_ctor(func, device, index, *pmmu);
+ nvkm_mmu_ctor(func, device, type, inst, *pmmu);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
index 8accda5a772b..ce47a3b97be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -35,7 +35,8 @@ g84_mmu = {
};
int
-g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2cd5ec81c0d0..7a28b1d49f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -84,7 +84,8 @@ gf100_mmu = {
};
int
-gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
index 3d7d1eb1cff9..34c9b2b821f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -35,7 +35,8 @@ gk104_mmu = {
};
int
-gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
index ac74965a60d4..a7db29c429ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -35,7 +35,8 @@ gk20a_mmu = {
};
int
-gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index 83990c83f9f8..e1696f637a68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -90,9 +90,10 @@ gm200_mmu_fixed = {
};
int
-gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->fb->page)
- return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
- return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 7353a94b4091..e6e1a8ad701e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -47,9 +47,10 @@ gm20b_mmu_fixed = {
};
int
-gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->fb->page)
- return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
- return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 65cb9d28e60e..daa5ab0f8711 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -37,9 +37,10 @@ gp100_mmu = {
};
int
-gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
- return gm200_mmu_new(device, index, pmmu);
- return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+ return gm200_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 0a50be9a785a..edd0bf9a5cd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -37,9 +37,10 @@ gp10b_mmu = {
};
int
-gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
- return gm20b_mmu_new(device, index, pmmu);
- return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+ return gm20b_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
index e0997eedd6d9..fb8bdc88d566 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -37,7 +37,8 @@ gv100_mmu = {
};
int
-gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
index 0527b50730d9..514876d6411b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -35,7 +35,8 @@ mcp77_mmu = {
};
int
-mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index d201c887c2cd..0674aa8f68c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -35,7 +35,8 @@ nv04_mmu = {
};
int
-nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index adca81895c09..909f92b72847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -47,11 +47,12 @@ nv41_mmu = {
};
int
-nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
- return nv04_mmu_new(device, index, pmmu);
+ return nv04_mmu_new(device, type, inst, pmmu);
- return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index 598c53a27bde..dd2a8d461da3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -62,11 +62,12 @@ nv44_mmu = {
};
int
-nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
- return nv04_mmu_new(device, index, pmmu);
+ return nv04_mmu_new(device, type, inst, pmmu);
- return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index c0083ddda65a..78d46e35d0a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -71,7 +71,8 @@ nv50_mmu = {
};
int
-nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 479b02344271..5265bf4d8366 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -4,10 +4,10 @@
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
#include <subdev/mmu.h>
-void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu *);
-int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu **);
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu **);
struct nvkm_mmu_func {
void (*init)(struct nvkm_mmu *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 94081f35f967..8d060ce47f86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -51,7 +51,8 @@ tu102_mmu = {
};
int
-tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index 6a2d9eb8e1ea..5438384d9a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
void
gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
u64 addr = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
/* Looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases.
*/
@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break;
);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
index 1d3369683a21..31984671daf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
static void
nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0x100810, 0x00000022);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100810) & 0x00000020)
break;
);
nvkm_wr32(device, 0x100810, 0x00000000);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 2d89e27e8e9e..b7548dcd72c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
struct nvkm_device *device = subdev->device;
int i, id;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vmm->engref[i]))
continue;
@@ -207,7 +207,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
case NVKM_ENGINE_MSVLD : id = 0x09; break;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : id = 0x0a; break;
- case NVKM_ENGINE_CE0 : id = 0x0d; break;
+ case NVKM_ENGINE_CE : id = 0x0d; break;
default:
continue;
}
@@ -217,10 +217,9 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
break;
) < 0)
- nvkm_error(subdev, "%s mmu invalidate timeout\n",
- nvkm_subdev_name[i]);
+ nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index b1294d0076c0..6cb5eefa45e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -26,15 +26,14 @@
static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
break;
);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index f44682d62f75..c1acfe642da3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -230,7 +230,8 @@ nvkm_mxm = {
};
int
-nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
+nvkm_mxm_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mxm **pmxm)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_mxm *mxm;
@@ -240,7 +241,7 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev);
+ nvkm_subdev_ctor(&nvkm_mxm, device, type, inst, &mxm->subdev);
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nvbios_rd08(bios, data))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 70e2c414bb7b..f3167904dcb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -201,12 +201,13 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
}
int
-nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
+nv50_mxm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pmxm)
{
struct nvkm_mxm *mxm;
int ret;
- ret = nvkm_mxm_new_(device, index, &mxm);
+ ret = nvkm_mxm_new_(device, type, inst, &mxm);
if (mxm)
*pmxm = &mxm->subdev;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index fc8f69e6fc64..fcacb6c6a7f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -12,5 +12,5 @@ struct nvkm_mxm {
u8 *mxms;
};
-int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+int nvkm_mxm_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mxm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index ee2431a7804e..a7d42ea8ba28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -183,13 +183,13 @@ nvkm_pci_func = {
int
nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
- int index, struct nvkm_pci **ppci)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci)
{
struct nvkm_pci *pci;
if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
+ nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
pci->func = func;
pci->pdev = device->func->pci(device)->pdev;
pci->irq = -1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 62438d892f42..5b29aacedef3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -150,7 +150,8 @@ g84_pci_func = {
};
int
-g84_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g84_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g84_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g84_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index 48874359d5f6..a9e0674009c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -51,7 +51,8 @@ g92_pci_func = {
};
int
-g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g92_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g92_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g92_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 09adb37a5664..7bacd0693283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -43,7 +43,8 @@ g94_pci_func = {
};
int
-g94_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g94_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g94_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g94_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 00a5e7d3ee9d..099906092fe1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -96,7 +96,8 @@ gf100_pci_func = {
};
int
-gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gf100_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index 11bf419afe3f..bcde609ba866 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -43,7 +43,8 @@ gf106_pci_func = {
};
int
-gf106_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf106_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gf106_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gf106_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index e68030507d88..6be87ecffc89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -222,7 +222,8 @@ gk104_pci_func = {
};
int
-gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gk104_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gk104_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gk104_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index 82c5234a06ff..a5fafda0014d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -38,7 +38,8 @@ gp100_pci_func = {
};
int
-gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gp100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gp100_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 5b1ed42cb90b..9ab64194b185 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -52,7 +52,8 @@ nv04_pci_func = {
};
int
-nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv04_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv04_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6eb417765802..6a3c31cf0200 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -59,7 +59,8 @@ nv40_pci_func = {
};
int
-nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv40_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv40_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index fc617e4c0ab6..9cad17f178ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -45,7 +45,8 @@ nv46_pci_func = {
};
int
-nv46_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv46_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv46_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv46_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 1f1b26b5fa72..741e34bf307c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -31,7 +31,8 @@ nv4c_pci_func = {
};
int
-nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv4c_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv4c_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 7009aad86b6e..9b7583532962 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -4,8 +4,8 @@
#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
#include <subdev/pci.h>
-int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
- int index, struct nvkm_pci **);
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pci **);
struct nvkm_pci_func {
void (*init)(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index a0fe607c9c07..24382875fb4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -148,6 +148,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
nvkm_falcon_cmdq_del(&pmu->hpq);
nvkm_falcon_qmgr_del(&pmu->qmgr);
nvkm_falcon_dtor(&pmu->falcon);
+ mutex_destroy(&pmu->send.mutex);
return nvkm_pmu(subdev);
}
@@ -162,11 +163,13 @@ nvkm_pmu = {
int
nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_pmu *pmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu)
{
int ret;
- nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
+ nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev);
+
+ mutex_init(&pmu->send.mutex);
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
@@ -177,9 +180,8 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
pmu->func = fwif->func;
- ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
- nvkm_subdev_name[pmu->subdev.index], 0x10a000,
- &pmu->falcon);
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name,
+ 0x10a000, &pmu->falcon);
if (ret)
return ret;
@@ -195,10 +197,10 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int
nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_pmu **ppmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_pmu_ctor(fwif, device, index, *ppmu);
+ return nvkm_pmu_ctor(fwif, device, type, inst, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 3ecb3d9cbcf2..f725a3ec5479 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,14 +30,14 @@ void
gf100_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mc_disable(device, NVKM_SUBDEV_PMU);
- nvkm_mc_enable(device, NVKM_SUBDEV_PMU);
+ nvkm_mc_disable(device, NVKM_SUBDEV_PMU, 0);
+ nvkm_mc_enable(device, NVKM_SUBDEV_PMU, 0);
}
bool
gf100_pmu_enabled(struct nvkm_pmu *pmu)
{
- return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
+ return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU, 0);
}
static const struct nvkm_pmu_func
@@ -69,7 +69,8 @@ gf100_pmu_fwif[] = {
};
int
-gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf100_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gf100_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 8dd0271aaaee..0f4b6697a4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -47,7 +47,8 @@ gf119_pmu_fwif[] = {
};
int
-gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf119_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gf119_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8b70cc17a634..9e7631d7aa41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -127,7 +127,8 @@ gk104_pmu_fwif[] = {
};
int
-gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk104_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk104_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 0081f2141b10..dbaefee53e1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -106,7 +106,8 @@ gk110_pmu_fwif[] = {
};
int
-gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk110_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk110_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b227c701a5e7..a08fb049e6d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -48,7 +48,8 @@ gk208_pmu_fwif[] = {
};
int
-gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk208_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk208_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 26c1adf8f44c..a67a42e73f08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -210,7 +210,8 @@ gk20a_pmu_fwif[] = {
};
int
-gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
int ret;
@@ -219,7 +220,7 @@ gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
return -ENOMEM;
*ppmu = &pmu->base;
- ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+ ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 5afb55e58b51..622ee637f97b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -49,7 +49,8 @@ gm107_pmu_fwif[] = {
};
int
-gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm107_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm107_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
index 383376addb41..5968c7696596 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -45,7 +45,8 @@ gm200_pmu_fwif[] = {
};
int
-gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm200_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm200_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 8f6ed5373ea1..148706977eec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -240,7 +240,8 @@ gm20b_pmu_fwif[] = {
};
int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 3d8ce14dba7b..00da1b873ce8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -51,7 +51,8 @@ gp102_pmu_fwif[] = {
};
int
-gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 9c237c426599..461f722656e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -99,7 +99,8 @@ gp10b_pmu_fwif[] = {
};
int
-gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp10b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gp10b_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 88b909913ff9..b0407b86bc10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -34,7 +34,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
struct nvkm_device *device = subdev->device;
u32 addr;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&pmu->send.mutex);
/* wait for a free slot in the fifo */
addr = nvkm_rd32(device, 0x10a4a0);
if (nvkm_msec(device, 2000,
@@ -42,7 +42,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
if (tmp != (addr ^ 8))
break;
) < 0) {
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&pmu->send.mutex);
return -EBUSY;
}
@@ -79,7 +79,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
reply[1] = pmu->recv.data[1];
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&pmu->send.mutex);
return 0;
}
@@ -282,7 +282,8 @@ gt215_pmu_fwif[] = {
};
int
-gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 276b6d778e53..e7860d177353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -62,8 +62,8 @@ int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
-int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
- int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
- int index, struct nvkm_pmu **);
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
new file mode 100644
index 000000000000..d47d1bdd0f2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/privring/gf100.o
+nvkm-y += nvkm/subdev/privring/gf117.o
+nvkm-y += nvkm/subdev/privring/gk104.o
+nvkm-y += nvkm/subdev/privring/gk20a.o
+nvkm-y += nvkm/subdev/privring/gm200.o
+nvkm-y += nvkm/subdev/privring/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
index 2340040942c9..ef7caca70372 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
@@ -22,44 +22,42 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
-gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
- nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
- nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
- nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
-gf100_ibus_intr(struct nvkm_subdev *ibus)
+gf100_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x121c58);
u32 intr1 = nvkm_rd32(device, 0x121c5c);
u32 hubnr = nvkm_rd32(device, 0x121c70);
@@ -70,7 +68,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gf100_ibus_intr_hub(ibus, i);
+ gf100_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
@@ -78,7 +76,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gf100_ibus_intr_rop(ibus, i);
+ gf100_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
@@ -86,16 +84,22 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gf100_ibus_intr_gpc(ibus, i);
+ gf100_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
-gf100_ibus_init(struct nvkm_subdev *ibus)
+gf100_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_wr32(device, 0x12232c, 0x00100064);
nvkm_wr32(device, 0x122330, 0x00100064);
@@ -105,14 +109,14 @@ gf100_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gf100_ibus = {
- .init = gf100_ibus_init,
- .intr = gf100_ibus_intr,
+gf100_privring = {
+ .init = gf100_privring_init,
+ .intr = gf100_privring_intr,
};
int
-gf100_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gf100_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gf100_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
index 1124dadac145..c78721fcd729 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
@@ -24,9 +24,9 @@
#include "priv.h"
static int
-gf117_ibus_init(struct nvkm_subdev *ibus)
+gf117_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
@@ -34,14 +34,14 @@ gf117_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gf117_ibus = {
- .init = gf117_ibus_init,
- .intr = gf100_ibus_intr,
+gf117_privring = {
+ .init = gf117_privring_init,
+ .intr = gf100_privring_intr,
};
int
-gf117_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gf117_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gf117_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
index f3915f85838e..568a4c0997bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
@@ -22,44 +22,42 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
-gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
- nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
- nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
- nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+ nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
-gk104_ibus_intr(struct nvkm_subdev *ibus)
+gk104_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x120058);
u32 intr1 = nvkm_rd32(device, 0x12005c);
u32 hubnr = nvkm_rd32(device, 0x120070);
@@ -70,7 +68,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gk104_ibus_intr_hub(ibus, i);
+ gk104_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
@@ -78,7 +76,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gk104_ibus_intr_rop(ibus, i);
+ gk104_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
@@ -86,16 +84,22 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gk104_ibus_intr_gpc(ibus, i);
+ gk104_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
-gk104_ibus_init(struct nvkm_subdev *ibus)
+gk104_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
@@ -107,15 +111,15 @@ gk104_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gk104_ibus = {
- .preinit = gk104_ibus_init,
- .init = gk104_ibus_init,
- .intr = gk104_ibus_intr,
+gk104_privring = {
+ .preinit = gk104_privring_init,
+ .init = gk104_privring_init,
+ .intr = gk104_privring_intr,
};
int
-gk104_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gk104_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gk104_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
index 187d544378b0..55e4a60d8770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
@@ -19,13 +19,13 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
#include <subdev/timer.h>
static void
-gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
+gk20a_privring_init_privring_ring(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
@@ -46,14 +46,14 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
}
static void
-gk20a_ibus_intr(struct nvkm_subdev *ibus)
+gk20a_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 status0 = nvkm_rd32(device, 0x120058);
if (status0 & 0x7) {
- nvkm_debug(ibus, "resetting ibus ring\n");
- gk20a_ibus_init_ibus_ring(ibus);
+ nvkm_debug(privring, "resetting privring ring\n");
+ gk20a_privring_init_privring_ring(privring);
}
/* Acknowledge interrupt */
@@ -65,21 +65,21 @@ gk20a_ibus_intr(struct nvkm_subdev *ibus)
}
static int
-gk20a_ibus_init(struct nvkm_subdev *ibus)
+gk20a_privring_init(struct nvkm_subdev *privring)
{
- gk20a_ibus_init_ibus_ring(ibus);
+ gk20a_privring_init_privring_ring(privring);
return 0;
}
static const struct nvkm_subdev_func
-gk20a_ibus = {
- .init = gk20a_ibus_init,
- .intr = gk20a_ibus_intr,
+gk20a_privring = {
+ .init = gk20a_privring_init,
+ .intr = gk20a_privring_intr,
};
int
-gk20a_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gk20a_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gk20a_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
index 0f1f0ad6377e..b4eaf6db36d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
@@ -24,13 +24,13 @@
#include "priv.h"
static const struct nvkm_subdev_func
-gm200_ibus = {
- .intr = gk104_ibus_intr,
+gm200_privring = {
+ .intr = gk104_privring_intr,
};
int
-gm200_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
index 0347b367cefe..4534111cf907 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
@@ -19,14 +19,14 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
#include "priv.h"
static int
-gp10b_ibus_init(struct nvkm_subdev *ibus)
+gp10b_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_wr32(device, 0x1200a8, 0x0);
@@ -42,14 +42,14 @@ gp10b_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gp10b_ibus = {
- .init = gp10b_ibus_init,
- .intr = gk104_ibus_intr,
+gp10b_privring = {
+ .init = gp10b_privring_init,
+ .intr = gk104_privring_intr,
};
int
-gp10b_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gp10b_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gp10b_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
new file mode 100644
index 000000000000..b378c14bc8dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_PRIV_H__
+#define __NVKM_PRIVRING_PRIV_H__
+#include <subdev/privring.h>
+
+void gf100_privring_intr(struct nvkm_subdev *);
+void gk104_privring_intr(struct nvkm_subdev *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 4a4d1e224126..fc5ee118e910 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -421,10 +421,10 @@ nvkm_therm = {
};
void
-nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
- int index, const struct nvkm_therm_func *func)
+nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, enum nvkm_subdev_type type,
+ int inst, const struct nvkm_therm_func *func)
{
- nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
+ nvkm_subdev_ctor(&nvkm_therm, device, type, inst, &therm->subdev);
therm->func = func;
nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
@@ -443,13 +443,13 @@ nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
int
nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_therm_ctor(therm, device, index, func);
+ nvkm_therm_ctor(therm, device, type, inst, func);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 96f8da40ac82..4af86f2d3e7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -223,12 +223,13 @@ g84_therm = {
};
int
-g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
+g84_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
int ret;
- ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+ ret = nvkm_therm_new_(&g84_therm, device, type, inst, &therm);
*ptherm = therm;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 0981b02790e2..2b031d4eaeb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -146,8 +146,8 @@ gf119_therm = {
};
int
-gf119_therm_new(struct nvkm_device *device, int index,
- struct nvkm_therm **ptherm)
+gf119_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gf119_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
index 4e03971d2e3d..45e295c271fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -35,8 +35,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
int i;
/* Program ENG_MANT, ENG_FILTER */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
@@ -47,8 +47,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
/* Enable clockgating (ENG_CLK = RUN->AUTO) */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
@@ -64,8 +64,8 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
int i;
/* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
@@ -73,15 +73,15 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
}
const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
- { NVKM_ENGINE_GR, 0x00 },
- { NVKM_ENGINE_MSPDEC, 0x04 },
- { NVKM_ENGINE_MSPPP, 0x08 },
- { NVKM_ENGINE_MSVLD, 0x0c },
- { NVKM_ENGINE_CE0, 0x10 },
- { NVKM_ENGINE_CE1, 0x14 },
- { NVKM_ENGINE_MSENC, 0x18 },
- { NVKM_ENGINE_CE2, 0x1c },
- { NVKM_SUBDEV_NR, 0 },
+ { NVKM_ENGINE_GR, 0, 0x00 },
+ { NVKM_ENGINE_MSPDEC, 0, 0x04 },
+ { NVKM_ENGINE_MSPPP, 0, 0x08 },
+ { NVKM_ENGINE_MSVLD, 0, 0x0c },
+ { NVKM_ENGINE_CE, 0, 0x10 },
+ { NVKM_ENGINE_CE, 1, 0x14 },
+ { NVKM_ENGINE_MSENC, 0, 0x18 },
+ { NVKM_ENGINE_CE, 2, 0x1c },
+ { NVKM_SUBDEV_NR },
};
const struct gf100_idle_filter gk104_idle_filter = {
@@ -106,9 +106,8 @@ gk104_therm_func = {
};
static int
-gk104_therm_new_(const struct nvkm_therm_func *func,
- struct nvkm_device *device,
- int index,
+gk104_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst,
const struct gk104_clkgate_engine_info *clkgate_order,
const struct gf100_idle_filter *idle_filter,
struct nvkm_therm **ptherm)
@@ -118,19 +117,17 @@ gk104_therm_new_(const struct nvkm_therm_func *func,
if (!therm)
return -ENOMEM;
- nvkm_therm_ctor(&therm->base, device, index, func);
+ nvkm_therm_ctor(&therm->base, device, type, inst, func);
*ptherm = &therm->base;
therm->clkgate_order = clkgate_order;
therm->idle_filter = idle_filter;
-
return 0;
}
int
-gk104_therm_new(struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+gk104_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
- return gk104_therm_new_(&gk104_therm_func, device, index,
+ return gk104_therm_new_(&gk104_therm_func, device, type, inst,
gk104_clkgate_engine_info, &gk104_idle_filter,
ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
index 293e7743b19b..9a8641421038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -31,7 +31,8 @@
#include "gf100.h"
struct gk104_clkgate_engine_info {
- enum nvkm_devidx engine;
+ enum nvkm_subdev_type type;
+ int inst;
u8 offset;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 86848ece4d89..c845fd392f58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -68,8 +68,8 @@ gm107_therm = {
};
int
-gm107_therm_new(struct nvkm_device *device, int index,
+gm107_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gm107_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
index 73dc78093d5d..e0cdd12463ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
@@ -32,8 +32,8 @@ gm200_therm = {
};
int
-gm200_therm_new(struct nvkm_device *device, int index,
+gm200_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gm200_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gm200_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
index 9f0dea3f61dc..44f021392b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -49,8 +49,8 @@ gp100_therm = {
};
int
-gp100_therm_new(struct nvkm_device *device, int index,
+gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f2aff5..9e451bd9395c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -68,8 +68,8 @@ gt215_therm = {
};
int
-gt215_therm_new(struct nvkm_device *device, int index,
- struct nvkm_therm **ptherm)
+gt215_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gt215_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 2c92ffb5f9d0..c13fee9734df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -197,8 +197,8 @@ nv40_therm = {
};
int
-nv40_therm_new(struct nvkm_device *device, int index,
+nv40_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+ return nvkm_therm_new_(&nv40_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 9b57b433d4cf..9cf16a75a3cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -169,8 +169,8 @@ nv50_therm = {
};
int
-nv50_therm_new(struct nvkm_device *device, int index,
+nv50_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+ return nvkm_therm_new_(&nv50_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 21659daf1864..54e960589411 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -30,10 +30,10 @@
#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
-int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
- int index, struct nvkm_therm **);
-void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
- int index, const struct nvkm_therm_func *func);
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_therm **);
+void nvkm_therm_ctor(struct nvkm_therm *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ const struct nvkm_therm_func *);
struct nvkm_fan {
struct nvkm_therm *parent;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index dd922033628c..8b0da0c06268 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -183,14 +183,14 @@ nvkm_timer = {
int
nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
- int index, struct nvkm_timer **ptmr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_timer **ptmr)
{
struct nvkm_timer *tmr;
if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
+ nvkm_subdev_ctor(&nvkm_timer, device, type, inst, &tmr->subdev);
tmr->func = func;
INIT_LIST_HEAD(&tmr->alarms);
spin_lock_init(&tmr->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 9ed5f64912d0..73c3776b6b83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -33,7 +33,8 @@ gk20a_timer = {
};
int
-gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+gk20a_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
+ return nvkm_timer_new_(&gk20a_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7f48249f41de..0058e856b378 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -145,7 +145,8 @@ nv04_timer = {
};
int
-nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv04_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv04_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
index bb99a152f26e..7e1f8c22f2a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -82,7 +82,8 @@ nv40_timer = {
};
int
-nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv40_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv40_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
index 3cf9ec1b1b57..c2b263721f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -79,7 +79,8 @@ nv41_timer = {
};
int
-nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv41_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv41_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 89e97294b182..e6debe7e2fa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -4,8 +4,8 @@
#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
#include <subdev/timer.h>
-int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
- int index, struct nvkm_timer **);
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_timer **);
struct nvkm_timer_func {
void (*init)(struct nvkm_timer *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
index 438d9d78ab52..d5db845195dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/top/base.o
nvkm-y += nvkm/subdev/top/gk104.o
+nvkm-y += nvkm/subdev/top/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index cce6e4e90ebf..28d0789f50fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -28,7 +28,8 @@ nvkm_top_device_new(struct nvkm_top *top)
{
struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info) {
- info->index = NVKM_SUBDEV_NR;
+ info->type = NVKM_SUBDEV_NR;
+ info->inst = -1;
info->addr = 0;
info->fault = -1;
info->engine = -1;
@@ -41,14 +42,14 @@ nvkm_top_device_new(struct nvkm_top *top)
}
u32
-nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_addr(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == index)
+ if (info->type == type && info->inst == inst)
return info->addr;
}
}
@@ -57,14 +58,14 @@ nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
}
u32
-nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == index && info->reset >= 0)
+ if (info->type == type && info->inst == inst && info->reset >= 0)
return BIT(info->reset);
}
}
@@ -73,14 +74,14 @@ nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
}
u32
-nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == devidx && info->intr >= 0)
+ if (info->type == type && info->inst == inst && info->intr >= 0)
return BIT(info->intr);
}
}
@@ -88,44 +89,21 @@ nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
return 0;
}
-u32
-nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
-{
- struct nvkm_top *top = device->top;
- struct nvkm_top_device *info;
- u64 subdevs = 0;
- u32 handled = 0;
-
- if (top) {
- list_for_each_entry(info, &top->device, head) {
- if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
- if (intr & BIT(info->intr)) {
- subdevs |= BIT_ULL(info->index);
- handled |= BIT(info->intr);
- }
- }
- }
- }
-
- *psubdevs = subdevs;
- return intr & ~handled;
-}
-
int
-nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
- if (info->index == devidx && info->fault >= 0)
+ if (info->type == type && info->inst == inst && info->fault >= 0)
return info->fault;
}
return -ENOENT;
}
-enum nvkm_devidx
+struct nvkm_subdev *
nvkm_top_fault(struct nvkm_device *device, int fault)
{
struct nvkm_top *top = device->top;
@@ -133,28 +111,10 @@ nvkm_top_fault(struct nvkm_device *device, int fault)
list_for_each_entry(info, &top->device, head) {
if (info->fault == fault)
- return info->index;
- }
-
- return NVKM_SUBDEV_NR;
-}
-
-enum nvkm_devidx
-nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
-{
- struct nvkm_top *top = device->top;
- struct nvkm_top_device *info;
- int n = 0;
-
- list_for_each_entry(info, &top->device, head) {
- if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
- *runl = info->runlist;
- *engn = info->engine;
- return info->index;
- }
+ return nvkm_device_subdev(device, info->type, info->inst);
}
- return -ENODEV;
+ return NULL;
}
static int
@@ -186,12 +146,12 @@ nvkm_top = {
int
nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
- int index, struct nvkm_top **ptop)
+ enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop)
{
struct nvkm_top *top;
if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
+ nvkm_subdev_ctor(&nvkm_top, device, type, inst, &top->subdev);
top->func = func;
INIT_LIST_HEAD(&top->device);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
new file mode 100644
index 000000000000..31933f3e5a07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static int
+ga100_top_oneinit(struct nvkm_top *top)
+{
+ struct nvkm_subdev *subdev = &top->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_top_device *info = NULL;
+ u32 data, type, inst;
+ int i, n, size = nvkm_rd32(device, 0x0224fc) >> 20;
+
+ for (i = 0, n = 0; i < size; i++) {
+ if (!info) {
+ if (!(info = nvkm_top_device_new(top)))
+ return -ENOMEM;
+ type = ~0;
+ inst = 0;
+ }
+
+ data = nvkm_rd32(device, 0x022800 + (i * 0x04));
+ nvkm_trace(subdev, "%02x: %08x\n", i, data);
+ if (!data && n == 0)
+ continue;
+
+ switch (n++) {
+ case 0:
+ type = (data & 0x3f000000) >> 24;
+ inst = (data & 0x000f0000) >> 16;
+ info->fault = (data & 0x0000007f);
+ break;
+ case 1:
+ info->addr = (data & 0x00fff000);
+ info->reset = (data & 0x0000001f);
+ break;
+ case 2:
+ info->runlist = (data & 0x0000fc00) >> 10;
+ info->engine = (data & 0x00000003);
+ break;
+ default:
+ break;
+ }
+
+ if (data & 0x80000000)
+ continue;
+ n = 0;
+
+ /* Translate engine type to NVKM engine identifier. */
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
+ switch (type) {
+ case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
+ case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
+ case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+ case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+ case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+ case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
+ case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
+ case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
+ case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break;
+ case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break;
+ break;
+ default:
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+ "runlist %2d engine %2d reset %2d\n", type, inst,
+ info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
+ info->addr, info->fault, info->runlist, info->engine, info->reset);
+ info = NULL;
+ }
+
+ return 0;
+}
+
+static const struct nvkm_top_func
+ga100_top = {
+ .oneinit = ga100_top_oneinit,
+};
+
+int
+ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_top **ptop)
+{
+ return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 1156634533f9..4dcad97bd505 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -70,26 +70,26 @@ gk104_top_oneinit(struct nvkm_top *top)
continue;
/* Translate engine type to NVKM engine identifier. */
-#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
-#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
- info->index = NVKM_ENGINE_##A##0 + inst
-#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
switch (type) {
- case 0x00000000: A_(GR ); break;
- case 0x00000001: A_(CE0 ); break;
- case 0x00000002: A_(CE1 ); break;
- case 0x00000003: A_(CE2 ); break;
- case 0x00000008: A_(MSPDEC); break;
- case 0x00000009: A_(MSPPP ); break;
- case 0x0000000a: A_(MSVLD ); break;
- case 0x0000000b: A_(MSENC ); break;
- case 0x0000000c: A_(VIC ); break;
- case 0x0000000d: A_(SEC2 ); break;
- case 0x0000000e: B_(NVENC ); break;
- case 0x0000000f: A_(NVENC1); break;
- case 0x00000010: B_(NVDEC ); break;
- case 0x00000013: B_(CE ); break;
- case 0x00000014: C_(GSP ); break;
+ case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
+ case 0x00000001: O_(NVKM_ENGINE_CE , 0); break;
+ case 0x00000002: O_(NVKM_ENGINE_CE , 1); break;
+ case 0x00000003: O_(NVKM_ENGINE_CE , 2); break;
+ case 0x00000008: O_(NVKM_ENGINE_MSPDEC, 0); break;
+ case 0x00000009: O_(NVKM_ENGINE_MSPPP , 0); break;
+ case 0x0000000a: O_(NVKM_ENGINE_MSVLD , 0); break;
+ case 0x0000000b: O_(NVKM_ENGINE_MSENC , 0); break;
+ case 0x0000000c: O_(NVKM_ENGINE_VIC , 0); break;
+ case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
+ case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+ case 0x0000000f: O_(NVKM_ENGINE_NVENC , 1); break;
+ case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+ case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+ case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
+ case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
+ case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
default:
break;
}
@@ -97,8 +97,7 @@ gk104_top_oneinit(struct nvkm_top *top)
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
"engine %2d runlist %2d intr %2d "
"reset %2d\n", type, inst,
- info->index == NVKM_SUBDEV_NR ? NULL :
- nvkm_subdev_name[info->index],
+ info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
info->addr, info->fault, info->engine, info->runlist,
info->intr, info->reset);
info = NULL;
@@ -113,7 +112,8 @@ gk104_top = {
};
int
-gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop)
+gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_top **ptop)
{
- return nvkm_top_new_(&gk104_top, device, index, ptop);
+ return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index a16baa2941cf..8e103a836705 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -8,19 +8,8 @@ struct nvkm_top_func {
int (*oneinit)(struct nvkm_top *);
};
-int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *,
- int, struct nvkm_top **);
-
-struct nvkm_top_device {
- enum nvkm_devidx index;
- u32 addr;
- int fault;
- int engine;
- int runlist;
- int reset;
- int intr;
- struct list_head head;
-};
+int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_top **);
struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index e344901cfdc7..a17a6dd8d3de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -281,12 +281,12 @@ nvkm_volt = {
void
nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
- int index, struct nvkm_volt *volt)
+ enum nvkm_subdev_type type, int inst, struct nvkm_volt *volt)
{
struct nvkm_bios *bios = device->bios;
int i;
- nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev);
+ nvkm_subdev_ctor(&nvkm_volt, device, type, inst, &volt->subdev);
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
@@ -319,10 +319,10 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
int
nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
- int index, struct nvkm_volt **pvolt)
+ enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
return -ENOMEM;
- nvkm_volt_ctor(func, device, index, *pvolt);
+ nvkm_volt_ctor(func, device, type, inst, *pvolt);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
index d9ed6925ca64..b47a1c0817be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -56,12 +56,13 @@ gf100_volt = {
};
int
-gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf100_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&gf100_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
index 547a58f0aeac..03c8a2c2916c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
@@ -46,12 +46,13 @@ gf117_volt = {
};
int
-gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf117_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&gf117_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 1c744e029454..d1ce4309cfb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -95,7 +95,8 @@ gk104_volt_pwm = {
};
int
-gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk104_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
const struct nvkm_volt_func *volt_func = &gk104_volt_gpio;
struct dcb_gpio_func gpio;
@@ -114,7 +115,7 @@ gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
return -ENOMEM;
- nvkm_volt_ctor(volt_func, device, index, &volt->base);
+ nvkm_volt_ctor(volt_func, device, type, inst, &volt->base);
*pvolt = &volt->base;
volt->bios = bios;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index ce5d83cdc7cf..8c2faa964511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -144,14 +144,14 @@ gk20a_volt = {
};
int
-gk20a_volt_ctor(struct nvkm_device *device, int index,
+gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
const struct cvb_coef *coefs, int nb_coefs,
int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
- nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+ nvkm_volt_ctor(&gk20a_volt, device, type, inst, &volt->base);
uv = regulator_get_voltage(tdev->vdd);
nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
@@ -172,7 +172,7 @@ gk20a_volt_ctor(struct nvkm_device *device, int index,
}
int
-gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk20a_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
struct gk20a_volt *volt;
@@ -181,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
return -ENOMEM;
*pvolt = &volt->base;
- return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ return gk20a_volt_ctor(device, type, inst, gk20a_cvb_coef,
ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 6a6c97f9684e..01f8a5fcf496 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,7 +37,7 @@ struct gk20a_volt {
struct regulator *vdd;
};
-int gk20a_volt_ctor(struct nvkm_device *device, int index,
+int gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type, int,
const struct cvb_coef *coefs, int nb_coefs,
int vmin, struct gk20a_volt *volt);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 2925b9cae681..c2e9694d333f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -64,7 +64,8 @@ static const u32 speedo_to_vmin[] = {
};
int
-gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gm20b_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
@@ -84,9 +85,9 @@ gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
vmin = speedo_to_vmin[tdev->gpu_speedo_id];
if (tdev->gpu_speedo_id >= 1)
- return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
- ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+ return gk20a_volt_ctor(device, type, inst, gm20b_na_cvb_coef,
+ ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
else
- return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
- ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
+ return gk20a_volt_ctor(device, type, inst, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 23409387abb5..d6a587d6082d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -30,12 +30,13 @@ nv40_volt = {
};
int
-nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+nv40_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&nv40_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 75f13a34671f..24e2d16d1913 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -4,10 +4,10 @@
#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
#include <subdev/volt.h>
-void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
- int index, struct nvkm_volt *);
-int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
- int index, struct nvkm_volt **);
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_volt **);
struct nvkm_volt_func {
int (*oneinit)(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 5417e7a47072..e7281da5bc6a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,13 +5,129 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select HDMI
default n
help
DRM display driver for OMAP2/3/4 based boards.
if DRM_OMAP
-source "drivers/gpu/drm/omapdrm/dss/Kconfig"
-source "drivers/gpu/drm/omapdrm/displays/Kconfig"
+config OMAP2_DSS_DEBUG
+ bool "Debug support"
+ default n
+ help
+ This enables printing of debug messages. Alternatively, debug messages
+ can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+ appropriate flags in <debugfs>/dynamic_debug/control.
+
+config OMAP2_DSS_DEBUGFS
+ bool "Debugfs filesystem support"
+ depends on DEBUG_FS
+ default n
+ help
+ This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+ querying about clock configuration and register configuration of dss,
+ dispc, dsi, hdmi and rfbi.
+
+config OMAP2_DSS_COLLECT_IRQ_STATS
+ bool "Collect DSS IRQ statistics"
+ depends on OMAP2_DSS_DEBUGFS
+ default n
+ help
+ Collect DSS IRQ statistics, printable via debugfs.
+
+ The statistics can be found from
+ <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
+ <debugfs>/omapdss/dsi_irq for DSI interrupts.
+
+config OMAP2_DSS_DPI
+ bool "DPI support"
+ default y
+ help
+ DPI Interface. This is the Parallel Display Interface.
+
+config OMAP2_DSS_VENC
+ bool "VENC support"
+ default y
+ help
+ OMAP Video Encoder support for S-Video and composite TV-out.
+
+config OMAP2_DSS_HDMI_COMMON
+ bool
+
+config OMAP4_DSS_HDMI
+ bool "HDMI support for OMAP4"
+ default y
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI support for OMAP4 based SoCs.
+
+config OMAP4_DSS_HDMI_CEC
+ bool "Enable HDMI CEC support for OMAP4"
+ depends on OMAP4_DSS_HDMI
+ select CEC_CORE
+ default y
+ help
+ When selected the HDMI transmitter will support the CEC feature.
+
+config OMAP5_DSS_HDMI
+ bool "HDMI support for OMAP5"
+ default n
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI Interface for OMAP5 and similar cores. This adds the High
+ Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
+ specification.
+
+config OMAP2_DSS_SDI
+ bool "SDI support"
+ default n
+ help
+ SDI (Serial Display Interface) support.
+
+ SDI is a high speed one-way display serial bus between the host
+ processor and a display.
+
+config OMAP2_DSS_DSI
+ bool "DSI support"
+ default n
+ select DRM_MIPI_DSI
+ help
+ MIPI DSI (Display Serial Interface) support.
+
+ DSI is a high speed half-duplex serial interface between the host
+ processor and a peripheral, such as a display or a framebuffer chip.
+
+ See http://www.mipi.org/ for DSI specifications.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+ int "Minimum FCK/PCK ratio (for scaling)"
+ range 0 32
+ default 0
+ help
+ This can be used to adjust the minimum FCK/PCK ratio.
+
+ With this you can make sure that DISPC FCK is at least
+ n x PCK. Video plane scaling requires higher FCK than
+ normally.
+
+ If this is set to 0, there's no extra constraint on the
+ DISPC FCK. However, the FCK will at minimum be
+ 2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+ Max FCK is 173MHz, so this doesn't work if your PCK
+ is very high.
+
+config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
+ bool "Sleep 20ms after VENC reset"
+ default y
+ help
+ There is a 20ms sleep after VENC reset which seemed to fix the
+ reset. The reason for the bug is unclear, and it's also unclear
+ on what platforms this happens.
+
+ This option enables the sleep, and is enabled by default. You can
+ disable the sleep if it doesn't cause problems on your platform.
endif
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index f115253115c5..21e8277ff88f 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -4,16 +4,12 @@
# Direct Rendering Infrastructure (DRI)
#
-obj-y += dss/
-obj-y += displays/
-
omapdrm-y := omap_drv.o \
omap_irq.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
omap_encoder.o \
- omap_connector.o \
omap_fb.o \
omap_gem.o \
omap_gem_dmabuf.o \
@@ -22,4 +18,17 @@ omapdrm-y := omap_drv.o \
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
-obj-$(CONFIG_DRM_OMAP) += omapdrm.o
+omapdrm-y += dss/base.o dss/output.o dss/dss.o dss/dispc.o \
+ dss/dispc_coefs.o dss/pll.o dss/video-pll.o
+omapdrm-$(CONFIG_OMAP2_DSS_DPI) += dss/dpi.o
+omapdrm-$(CONFIG_OMAP2_DSS_VENC) += dss/venc.o
+omapdrm-$(CONFIG_OMAP2_DSS_SDI) += dss/sdi.o
+omapdrm-$(CONFIG_OMAP2_DSS_DSI) += dss/dsi.o
+omapdrm-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += dss/hdmi_common.o dss/hdmi_wp.o \
+ dss/hdmi_pll.o dss/hdmi_phy.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI) += dss/hdmi4.o dss/hdmi4_core.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI_CEC) += dss/hdmi4_cec.o
+omapdrm-$(CONFIG_OMAP5_DSS_HDMI) += dss/hdmi5.o dss/hdmi5_core.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
deleted file mode 100644
index f2be594c7eff..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "OMAPDRM External Display Device Drivers"
-
-config DRM_OMAP_PANEL_DSI_CM
- tristate "Generic DSI Command Mode Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DSI command mode panels.
-
-endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
deleted file mode 100644
index 488ddf153613..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
deleted file mode 100644
index e39ce0c0c9a9..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ /dev/null
@@ -1,1385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic DSI Command Mode panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/* #define DEBUG */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/of_device.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_connector.h>
-
-#include <video/mipi_display.h>
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-/* DSI Virtual channel. Hardcoded for now. */
-#define TCH 0
-
-#define DCS_READ_NUM_ERRORS 0x05
-#define DCS_BRIGHTNESS 0x51
-#define DCS_CTRL_DISPLAY 0x53
-#define DCS_GET_ID1 0xda
-#define DCS_GET_ID2 0xdb
-#define DCS_GET_ID3 0xdc
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- struct omap_dss_device *src;
-
- struct videomode vm;
-
- struct platform_device *pdev;
-
- struct mutex lock;
-
- struct backlight_device *bldev;
- struct backlight_device *extbldev;
-
- unsigned long hw_guard_end; /* next value of jiffies when we can
- * issue the next sleep in/out command
- */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- /* panel HW configuration from DT or platform data */
- struct gpio_desc *reset_gpio;
- struct gpio_desc *ext_te_gpio;
-
- struct regulator *vpnl;
- struct regulator *vddi;
-
- bool use_dsi_backlight;
-
- int width_mm;
- int height_mm;
-
- struct omap_dsi_pin_config pin_config;
-
- /* runtime variables */
- bool enabled;
-
- bool te_enabled;
-
- atomic_t do_update;
- int channel;
-
- struct delayed_work te_timeout_work;
-
- bool intro_printed;
-
- struct workqueue_struct *workqueue;
-
- bool ulps_enabled;
- unsigned int ulps_timeout;
- struct delayed_work ulps_work;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static irqreturn_t dsicm_te_isr(int irq, void *data);
-static void dsicm_te_timeout_work_callback(struct work_struct *work);
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable);
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata);
-
-static void dsicm_ulps_work(struct work_struct *work);
-
-static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
-{
- struct backlight_device *backlight;
-
- if (ddata->bldev)
- backlight = ddata->bldev;
- else if (ddata->extbldev)
- backlight = ddata->extbldev;
- else
- return;
-
- if (enable) {
- backlight->props.fb_blank = FB_BLANK_UNBLANK;
- backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
- backlight->props.power = FB_BLANK_UNBLANK;
- } else {
- backlight->props.fb_blank = FB_BLANK_NORMAL;
- backlight->props.power = FB_BLANK_POWERDOWN;
- backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
- }
-
- backlight_update_status(backlight);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
- ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
- ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
- unsigned long wait = ddata->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u8 buf[1];
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
-
- if (r < 0)
- return r;
-
- *data = buf[0];
-
- return 0;
-}
-
-static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
-{
- struct omap_dss_device *src = ddata->src;
-
- return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
-}
-
-static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
-{
- struct omap_dss_device *src = ddata->src;
- u8 buf[2] = { dcs_cmd, param };
-
- return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
-}
-
-static int dsicm_sleep_in(struct panel_drv_data *ddata)
-
-{
- struct omap_dss_device *src = ddata->src;
- u8 cmd;
- int r;
-
- hw_guard_wait(ddata);
-
- cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_sleep_out(struct panel_drv_data *ddata)
-{
- int r;
-
- hw_guard_wait(ddata);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
-{
- int r;
-
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
- if (r)
- return r;
-
- return 0;
-}
-
-static int dsicm_set_update_window(struct panel_drv_data *ddata,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u16 x1 = x;
- u16 x2 = x + w - 1;
- u16 y1 = y;
- u16 y2 = y + h - 1;
-
- u8 buf[5];
- buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
- buf[1] = (x1 >> 8) & 0xff;
- buf[2] = (x1 >> 0) & 0xff;
- buf[3] = (x2 >> 8) & 0xff;
- buf[4] = (x2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
- buf[1] = (y1 >> 8) & 0xff;
- buf[2] = (y1 >> 0) & 0xff;
- buf[3] = (y2 >> 8) & 0xff;
- buf[4] = (y2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- src->ops->dsi.bta_sync(src, ddata->channel);
-
- return r;
-}
-
-static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_timeout > 0)
- queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
- msecs_to_jiffies(ddata->ulps_timeout));
-}
-
-static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
-{
- cancel_delayed_work(&ddata->ulps_work);
-}
-
-static int dsicm_enter_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (ddata->ulps_enabled)
- return 0;
-
- dsicm_cancel_ulps_work(ddata);
-
- r = _dsicm_enable_te(ddata, false);
- if (r)
- goto err;
-
- if (ddata->ext_te_gpio)
- disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- src->ops->dsi.disable(src, false, true);
-
- ddata->ulps_enabled = true;
-
- return 0;
-
-err:
- dev_err(&ddata->pdev->dev, "enter ULPS failed");
- dsicm_panel_reset(ddata);
-
- ddata->ulps_enabled = false;
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_exit_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (!ddata->ulps_enabled)
- return 0;
-
- src->ops->enable(src);
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- r = _dsicm_enable_te(ddata, true);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to re-enable TE");
- goto err2;
- }
-
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- dsicm_queue_ulps_work(ddata);
-
- ddata->ulps_enabled = false;
-
- return 0;
-
-err2:
- dev_err(&ddata->pdev->dev, "failed to exit ULPS");
-
- r = dsicm_panel_reset(ddata);
- if (!r) {
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
- ddata->ulps_enabled = false;
- }
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_wake_up(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_enabled)
- return dsicm_exit_ulps(ddata);
-
- dsicm_cancel_ulps_work(ddata);
- dsicm_queue_ulps_work(ddata);
- return 0;
-}
-
-static int dsicm_bl_update_status(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->src;
- int r = 0;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_bl_get_intensity(struct backlight_device *dev)
-{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops dsicm_bl_ops = {
- .get_brightness = dsicm_bl_get_intensity,
- .update_status = dsicm_bl_update_status,
-};
-
-static ssize_t dsicm_num_errors_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 errors = 0;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
- &errors);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
-}
-
-static ssize_t dsicm_hw_revision_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
-}
-
-static ssize_t dsicm_store_ulps(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- if (t)
- r = dsicm_enter_ulps(ddata);
- else
- r = dsicm_wake_up(ddata);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_enabled;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t dsicm_store_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
- ddata->ulps_timeout = t;
-
- if (ddata->enabled) {
- /* dsicm_wake_up will restart the timer */
- src->ops->dsi.bus_lock(src);
- r = dsicm_wake_up(ddata);
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_timeout;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
-static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL);
-static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
- dsicm_show_ulps, dsicm_store_ulps);
-static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
- dsicm_show_ulps_timeout, dsicm_store_ulps_timeout);
-
-static struct attribute *dsicm_attrs[] = {
- &dev_attr_num_dsi_errors.attr,
- &dev_attr_hw_revision.attr,
- &dev_attr_ulps.attr,
- &dev_attr_ulps_timeout.attr,
- NULL,
-};
-
-static const struct attribute_group dsicm_attr_group = {
- .attrs = dsicm_attrs,
-};
-
-static void dsicm_hw_reset(struct panel_drv_data *ddata)
-{
- gpiod_set_value(ddata->reset_gpio, 1);
- udelay(10);
- /* reset the panel */
- gpiod_set_value(ddata->reset_gpio, 0);
- /* assert reset */
- udelay(10);
- gpiod_set_value(ddata->reset_gpio, 1);
- /* wait after releasing reset */
- usleep_range(5000, 10000);
-}
-
-static int dsicm_power_on(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
- struct omap_dss_dsi_config dsi_config = {
- .mode = OMAP_DSS_DSI_CMD_MODE,
- .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .vm = &ddata->vm,
- .hs_clk_min = 150000000,
- .hs_clk_max = 300000000,
- .lp_clk_min = 7000000,
- .lp_clk_max = 10000000,
- };
-
- if (ddata->vpnl) {
- r = regulator_enable(ddata->vpnl);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VPNL: %d\n", r);
- return r;
- }
- }
-
- if (ddata->vddi) {
- r = regulator_enable(ddata->vddi);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VDDI: %d\n", r);
- goto err_vpnl;
- }
- }
-
- if (ddata->pin_config.num_pins > 0) {
- r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to configure DSI pins\n");
- goto err_vddi;
- }
- }
-
- r = src->ops->dsi.set_config(src, &dsi_config);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
- goto err_vddi;
- }
-
- src->ops->enable(src);
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.enable_hs(src, ddata->channel, false);
-
- r = dsicm_sleep_out(ddata);
- if (r)
- goto err;
-
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY,
- (1<<2) | (1<<5)); /* BL | BCTRL */
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT,
- MIPI_DCS_PIXEL_FMT_24BIT);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, ddata->te_enabled);
- if (r)
- goto err;
-
- r = src->ops->dsi.enable_video_output(src, ddata->channel);
- if (r)
- goto err;
-
- ddata->enabled = true;
-
- if (!ddata->intro_printed) {
- dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
- id1, id2, id3);
- ddata->intro_printed = true;
- }
-
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- return 0;
-err:
- dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n");
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.disable(src, true, false);
-err_vddi:
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
-err_vpnl:
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- return r;
-}
-
-static void dsicm_power_off(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- src->ops->dsi.disable_video_output(src, ddata->channel);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
- if (!r)
- r = dsicm_sleep_in(ddata);
-
- if (r) {
- dev_err(&ddata->pdev->dev,
- "error disabling panel, issuing HW reset\n");
- dsicm_hw_reset(ddata);
- }
-
- src->ops->dsi.disable(src, true, false);
-
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- ddata->enabled = false;
-}
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata)
-{
- dev_err(&ddata->pdev->dev, "performing LCD reset\n");
-
- dsicm_power_off(ddata);
- dsicm_hw_reset(ddata);
- return dsicm_power_on(ddata);
-}
-
-static int dsicm_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- struct device *dev = &ddata->pdev->dev;
- int r;
-
- r = src->ops->dsi.request_vc(src, &ddata->channel);
- if (r) {
- dev_err(dev, "failed to get virtual channel\n");
- return r;
- }
-
- r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
- if (r) {
- dev_err(dev, "failed to set VC_ID\n");
- src->ops->dsi.release_vc(src, ddata->channel);
- return r;
- }
-
- ddata->src = src;
- return 0;
-}
-
-static void dsicm_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- src->ops->dsi.release_vc(src, ddata->channel);
- ddata->src = NULL;
-}
-
-static void dsicm_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_power_on(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- if (r)
- goto err;
-
- mutex_unlock(&ddata->lock);
-
- dsicm_bl_power(ddata, true);
-
- return;
-err:
- dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dsicm_bl_power(ddata, false);
-
- mutex_lock(&ddata->lock);
-
- dsicm_cancel_ulps_work(ddata);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_framedone_cb(int err, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
- src->ops->dsi.bus_unlock(src);
-}
-
-static irqreturn_t dsicm_te_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
- int old;
- int r;
-
- old = atomic_cmpxchg(&ddata->do_update, 1, 0);
-
- if (old) {
- cancel_delayed_work(&ddata->te_timeout_work);
-
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- return IRQ_HANDLED;
-err:
- dev_err(&ddata->pdev->dev, "start update failed\n");
- src->ops->dsi.bus_unlock(src);
- return IRQ_HANDLED;
-}
-
-static void dsicm_te_timeout_work_callback(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- te_timeout_work.work);
- struct omap_dss_device *src = ddata->src;
-
- dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
-
- atomic_set(&ddata->do_update, 0);
- src->ops->dsi.bus_unlock(src);
-}
-
-static int dsicm_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- if (!ddata->enabled) {
- r = 0;
- goto err;
- }
-
- /* XXX no need to send this every frame, but dsi break if not done */
- r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
- ddata->vm.vactive);
- if (r)
- goto err;
-
- if (ddata->te_enabled && ddata->ext_te_gpio) {
- schedule_delayed_work(&ddata->te_timeout_work,
- msecs_to_jiffies(250));
- atomic_set(&ddata->do_update, 1);
- } else {
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- /* note: no bus_unlock here. unlock is src framedone_cb */
- mutex_unlock(&ddata->lock);
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static int dsicm_sync(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "sync\n");
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- dev_dbg(&ddata->pdev->dev, "sync done\n");
-
- return 0;
-}
-
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (enable)
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0);
- else
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
-
- if (!ddata->ext_te_gpio)
- src->ops->dsi.enable_te(src, enable);
-
- /* possible panel bug */
- msleep(100);
-
- return r;
-}
-
-static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->te_enabled == enable)
- goto end;
-
- src->ops->dsi.bus_lock(src);
-
- if (ddata->enabled) {
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, enable);
- if (r)
- goto err;
- }
-
- ddata->te_enabled = enable;
-
- src->ops->dsi.bus_unlock(src);
-end:
- mutex_unlock(&ddata->lock);
-
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_get_te(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- mutex_lock(&ddata->lock);
- r = ddata->te_enabled;
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_memory_read(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
- int first = 1;
- int plen;
- unsigned int buf_used = 0;
-
- if (size < w * h * 3)
- return -ENOMEM;
-
- mutex_lock(&ddata->lock);
-
- if (!ddata->enabled) {
- r = -ENODEV;
- goto err1;
- }
-
- size = min((u32)w * h * 3,
- ddata->vm.hactive * ddata->vm.vactive * 3);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err2;
-
- /* plen 1 or 2 goes into short packet. until checksum error is fixed,
- * use short packets. plen 32 works, but bigger packets seem to cause
- * an error. */
- if (size % 2)
- plen = 1;
- else
- plen = 2;
-
- dsicm_set_update_window(ddata, x, y, w, h);
-
- r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
- if (r)
- goto err2;
-
- while (buf_used < size) {
- u8 dcs_cmd = first ? 0x2e : 0x3e;
- first = 0;
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
- buf + buf_used, size - buf_used);
-
- if (r < 0) {
- dev_err(dssdev->dev, "read error\n");
- goto err3;
- }
-
- buf_used += r;
-
- if (r < plen) {
- dev_err(&ddata->pdev->dev, "short read\n");
- break;
- }
-
- if (signal_pending(current)) {
- dev_err(&ddata->pdev->dev, "signal pending, "
- "aborting memory read\n");
- r = -ERESTARTSYS;
- goto err3;
- }
- }
-
- r = buf_used;
-
-err3:
- src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
-err2:
- src->ops->dsi.bus_unlock(src);
-err1:
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static void dsicm_ulps_work(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- ulps_work.work);
- struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = ddata->src;
-
- mutex_lock(&ddata->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) {
- mutex_unlock(&ddata->lock);
- return;
- }
-
- src->ops->dsi.bus_lock(src);
-
- dsicm_enter_ulps(ddata);
-
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-}
-
-static int dsicm_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- connector->display_info.width_mm = ddata->width_mm;
- connector->display_info.height_mm = ddata->height_mm;
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int ret = 0;
-
- if (mode->hdisplay != ddata->vm.hactive)
- ret = -EINVAL;
-
- if (mode->vdisplay != ddata->vm.vactive)
- ret = -EINVAL;
-
- if (ret) {
- dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- mode->hdisplay, mode->vdisplay);
- dev_warn(dssdev->dev, "panel resolution: %d x %d",
- ddata->vm.hactive, ddata->vm.vactive);
- }
-
- return ret;
-}
-
-static const struct omap_dss_device_ops dsicm_ops = {
- .connect = dsicm_connect,
- .disconnect = dsicm_disconnect,
-
- .enable = dsicm_enable,
- .disable = dsicm_disable,
-
- .get_modes = dsicm_get_modes,
- .check_timings = dsicm_check_timings,
-};
-
-static const struct omap_dss_driver dsicm_dss_driver = {
- .update = dsicm_update,
- .sync = dsicm_sync,
-
- .enable_te = dsicm_enable_te,
- .get_te = dsicm_get_te,
-
- .memory_read = dsicm_memory_read,
-};
-
-static int dsicm_probe_of(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct backlight_device *backlight;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct display_timing timing;
- int err;
-
- ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->reset_gpio)) {
- err = PTR_ERR(ddata->reset_gpio);
- dev_err(&pdev->dev, "reset gpio request failed: %d", err);
- return err;
- }
-
- ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
- GPIOD_IN);
- if (IS_ERR(ddata->ext_te_gpio)) {
- err = PTR_ERR(ddata->ext_te_gpio);
- dev_err(&pdev->dev, "TE gpio request failed: %d", err);
- return err;
- }
-
- err = of_get_display_timing(node, "panel-timing", &timing);
- if (!err) {
- videomode_from_timing(&timing, &ddata->vm);
- if (!ddata->vm.pixelclock)
- ddata->vm.pixelclock =
- ddata->vm.hactive * ddata->vm.vactive * 60;
- } else {
- dev_warn(&pdev->dev,
- "failed to get video timing, using defaults\n");
- }
-
- ddata->width_mm = 0;
- of_property_read_u32(node, "width-mm", &ddata->width_mm);
-
- ddata->height_mm = 0;
- of_property_read_u32(node, "height-mm", &ddata->height_mm);
-
- ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
- if (IS_ERR(ddata->vpnl)) {
- err = PTR_ERR(ddata->vpnl);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vpnl = NULL;
- }
-
- ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
- if (IS_ERR(ddata->vddi)) {
- err = PTR_ERR(ddata->vddi);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vddi = NULL;
- }
-
- backlight = devm_of_find_backlight(&pdev->dev);
- if (IS_ERR(backlight))
- return PTR_ERR(backlight);
-
- /* If no backlight device is found assume native backlight support */
- if (backlight)
- ddata->extbldev = backlight;
- else
- ddata->use_dsi_backlight = true;
-
- /* TODO: ulps */
-
- return 0;
-}
-
-static int dsicm_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct backlight_device *bldev = NULL;
- struct device *dev = &pdev->dev;
- struct omap_dss_device *dssdev;
- int r;
-
- dev_dbg(dev, "probe\n");
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->pdev = pdev;
-
- ddata->vm.hactive = 864;
- ddata->vm.vactive = 480;
- ddata->vm.pixelclock = 864 * 480 * 60;
-
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = dev;
- dssdev->ops = &dsicm_ops;
- dssdev->driver = &dsicm_dss_driver;
- dssdev->type = OMAP_DISPLAY_TYPE_DSI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_port = 0;
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- mutex_init(&ddata->lock);
-
- atomic_set(&ddata->do_update, 0);
-
- if (ddata->ext_te_gpio) {
- r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
- dsicm_te_isr,
- IRQF_TRIGGER_RISING,
- "taal vsync", ddata);
-
- if (r) {
- dev_err(dev, "IRQ request failed\n");
- goto err_reg;
- }
-
- INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
- dsicm_te_timeout_work_callback);
-
- dev_dbg(dev, "Using GPIO TE\n");
- }
-
- ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (!ddata->workqueue) {
- r = -ENOMEM;
- goto err_reg;
- }
- INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
-
- dsicm_hw_reset(ddata);
-
- if (ddata->use_dsi_backlight) {
- struct backlight_properties props = { 0 };
- props.max_brightness = 255;
- props.type = BACKLIGHT_RAW;
-
- bldev = devm_backlight_device_register(dev, dev_name(dev),
- dev, ddata, &dsicm_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_bl;
- }
-
- ddata->bldev = bldev;
- }
-
- r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
- if (r) {
- dev_err(dev, "failed to create sysfs files\n");
- goto err_bl;
- }
-
- return 0;
-
-err_bl:
- destroy_workqueue(ddata->workqueue);
-err_reg:
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- return r;
-}
-
-static int __exit dsicm_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&pdev->dev, "remove\n");
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- dsicm_disable(dssdev);
- omapdss_device_disconnect(ddata->src, dssdev);
-
- sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
-
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- dsicm_cancel_ulps_work(ddata);
- destroy_workqueue(ddata->workqueue);
-
- /* reset, to be sure that the panel is in a valid state */
- dsicm_hw_reset(ddata);
-
- return 0;
-}
-
-static const struct of_device_id dsicm_of_match[] = {
- { .compatible = "omapdss,panel-dsi-cm", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dsicm_of_match);
-
-static struct platform_driver dsicm_driver = {
- .probe = dsicm_probe,
- .remove = __exit_p(dsicm_remove),
- .driver = {
- .name = "panel-dsi-cm",
- .of_match_table = dsicm_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dsicm_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
deleted file mode 100644
index e11b258a2294..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ /dev/null
@@ -1,135 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config OMAP2_DSS_INIT
- bool
-
-config OMAP_DSS_BASE
- tristate
-
-menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support"
- select OMAP_DSS_BASE
- select VIDEOMODE_HELPERS
- select OMAP2_DSS_INIT
- select HDMI
- help
- OMAP2+ Display Subsystem support.
-
-if OMAP2_DSS
-
-config OMAP2_DSS_DEBUG
- bool "Debug support"
- default n
- help
- This enables printing of debug messages. Alternatively, debug messages
- can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
- appropriate flags in <debugfs>/dynamic_debug/control.
-
-config OMAP2_DSS_DEBUGFS
- bool "Debugfs filesystem support"
- depends on DEBUG_FS
- default n
- help
- This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
- querying about clock configuration and register configuration of dss,
- dispc, dsi, hdmi and rfbi.
-
-config OMAP2_DSS_COLLECT_IRQ_STATS
- bool "Collect DSS IRQ statistics"
- depends on OMAP2_DSS_DEBUGFS
- default n
- help
- Collect DSS IRQ statistics, printable via debugfs.
-
- The statistics can be found from
- <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
- <debugfs>/omapdss/dsi_irq for DSI interrupts.
-
-config OMAP2_DSS_DPI
- bool "DPI support"
- default y
- help
- DPI Interface. This is the Parallel Display Interface.
-
-config OMAP2_DSS_VENC
- bool "VENC support"
- default y
- help
- OMAP Video Encoder support for S-Video and composite TV-out.
-
-config OMAP2_DSS_HDMI_COMMON
- bool
-
-config OMAP4_DSS_HDMI
- bool "HDMI support for OMAP4"
- default y
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI support for OMAP4 based SoCs.
-
-config OMAP4_DSS_HDMI_CEC
- bool "Enable HDMI CEC support for OMAP4"
- depends on OMAP4_DSS_HDMI
- select CEC_CORE
- default y
- help
- When selected the HDMI transmitter will support the CEC feature.
-
-config OMAP5_DSS_HDMI
- bool "HDMI support for OMAP5"
- default n
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI Interface for OMAP5 and similar cores. This adds the High
- Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI
- specification.
-
-config OMAP2_DSS_SDI
- bool "SDI support"
- default n
- help
- SDI (Serial Display Interface) support.
-
- SDI is a high speed one-way display serial bus between the host
- processor and a display.
-
-config OMAP2_DSS_DSI
- bool "DSI support"
- default n
- help
- MIPI DSI (Display Serial Interface) support.
-
- DSI is a high speed half-duplex serial interface between the host
- processor and a peripheral, such as a display or a framebuffer chip.
-
- See https://www.mipi.org/ for DSI specifications.
-
-config OMAP2_DSS_MIN_FCK_PER_PCK
- int "Minimum FCK/PCK ratio (for scaling)"
- range 0 32
- default 0
- help
- This can be used to adjust the minimum FCK/PCK ratio.
-
- With this you can make sure that DISPC FCK is at least
- n x PCK. Video plane scaling requires higher FCK than
- normally.
-
- If this is set to 0, there's no extra constraint on the
- DISPC FCK. However, the FCK will at minimum be
- 2xPCK (if active matrix) or 3xPCK (if passive matrix).
-
- Max FCK is 173MHz, so this doesn't work if your PCK
- is very high.
-
-config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
- bool "Sleep 20ms after VENC reset"
- default y
- help
- There is a 20ms sleep after VENC reset which seemed to fix the
- reset. The reason for the bug is unclear, and it's also unclear
- on what platforms this happens.
-
- This option enables the sleep, and is enabled by default. You can
- disable the sleep if it doesn't cause problems on your platform.
-
-endif
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
deleted file mode 100644
index f967e6948f2e..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
-
-obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o output.o
-
-obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-# Core DSS files
-omapdss-y := dss.o dispc.o dispc_coefs.o \
- pll.o video-pll.o
-omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
-omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
-omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
- hdmi_phy.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
-omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
-ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index cf50430e6363..050ca7eafac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -16,32 +16,10 @@
#include "dss.h"
#include "omapdss.h"
-static struct dss_device *dss_device;
-
-struct dss_device *omapdss_get_dss(void)
-{
- return dss_device;
-}
-EXPORT_SYMBOL(omapdss_get_dss);
-
-void omapdss_set_dss(struct dss_device *dss)
-{
- dss_device = dss;
-}
-EXPORT_SYMBOL(omapdss_set_dss);
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
return dss->dispc;
}
-EXPORT_SYMBOL(dispc_get_dispc);
-
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
-{
- return dss->dispc_ops;
-}
-EXPORT_SYMBOL(dispc_get_ops);
-
/* -----------------------------------------------------------------------------
* OMAP DSS Devices Handling
@@ -56,7 +34,6 @@ void omapdss_device_register(struct omap_dss_device *dssdev)
list_add_tail(&dssdev->list, &omapdss_devices_list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_register);
void omapdss_device_unregister(struct omap_dss_device *dssdev)
{
@@ -64,7 +41,6 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev)
list_del(&dssdev->list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_unregister);
static bool omapdss_device_is_registered(struct device_node *node)
{
@@ -86,24 +62,16 @@ static bool omapdss_device_is_registered(struct device_node *node)
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
{
- if (!try_module_get(dssdev->owner))
- return NULL;
-
- if (get_device(dssdev->dev) == NULL) {
- module_put(dssdev->owner);
+ if (get_device(dssdev->dev) == NULL)
return NULL;
- }
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get);
void omapdss_device_put(struct omap_dss_device *dssdev)
{
put_device(dssdev->dev);
- module_put(dssdev->owner);
}
-EXPORT_SYMBOL(omapdss_device_put);
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
@@ -149,7 +117,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id && (dssdev->next || dssdev->bridge))
+ if (dssdev->id && dssdev->bridge)
goto done;
}
@@ -164,7 +132,6 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_next_output);
static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
@@ -175,8 +142,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int ret;
-
dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
src ? dev_name(src->dev) : "NULL",
dst ? dev_name(dst->dev) : "NULL");
@@ -195,17 +160,8 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- if (dst->ops && dst->ops->connect) {
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
- }
- }
-
return 0;
}
-EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
@@ -222,43 +178,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(!dst->display);
+ WARN_ON(1);
return;
}
- WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
-
- if (dst->ops && dst->ops->disconnect)
- dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
-EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-
-void omapdss_device_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops && dssdev->ops->enable)
- dssdev->ops->enable(dssdev);
-
- omapdss_device_enable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_enable);
-
-void omapdss_device_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_disable(dssdev->next);
-
- if (dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_disable);
/* -----------------------------------------------------------------------------
* Components Handling
@@ -344,7 +269,6 @@ void omapdss_gather_components(struct device *dev)
for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
}
-EXPORT_SYMBOL(omapdss_gather_components);
static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
@@ -369,8 +293,3 @@ bool omapdss_stack_is_ready(void)
return true;
}
-EXPORT_SYMBOL(omapdss_stack_is_ready);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP Display Subsystem Base");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 599183879caf..f4cbef8ccace 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -351,8 +351,6 @@ static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane);
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-
static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
__raw_writel(val, dispc->base + idx);
@@ -379,12 +377,12 @@ static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
-static int dispc_get_num_ovls(struct dispc_device *dispc)
+int dispc_get_num_ovls(struct dispc_device *dispc)
{
return dispc->feat->num_ovls;
}
-static int dispc_get_num_mgrs(struct dispc_device *dispc)
+int dispc_get_num_mgrs(struct dispc_device *dispc)
{
return dispc->feat->num_mgrs;
}
@@ -670,13 +668,13 @@ void dispc_runtime_put(struct dispc_device *dispc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
-static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
@@ -685,18 +683,18 @@ static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
return mgr_desc[channel].framedone_irq;
}
-static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
-static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
-static void dispc_mgr_enable(struct dispc_device *dispc,
+void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -710,13 +708,13 @@ static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
-static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
WARN_ON(dispc_mgr_go_busy(dispc, channel));
@@ -726,12 +724,12 @@ static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-static bool dispc_wb_go_busy(struct dispc_device *dispc)
+bool dispc_wb_go_busy(struct dispc_device *dispc)
{
return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
-static void dispc_wb_go(struct dispc_device *dispc)
+void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
@@ -877,50 +875,62 @@ static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
#undef CVAL
}
-static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
- const struct csc_coef_rgb2yuv *ct)
-{
- const enum omap_plane_id plane = OMAP_DSS_WB;
-
-#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
+ 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ true, /* full range */
+};
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ false, /* limited range */
+};
- REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
+ 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ true, /* full range */
+};
-#undef CVAL
-}
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
+ 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ false, /* limited range */
+};
-static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
+static void dispc_ovl_set_csc(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
- int i;
- int num_ovl = dispc_get_num_ovls(dispc);
-
- /* YUV -> RGB, ITU-R BT.601, limited range */
- const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
- 298, 0, 409, /* ry, rcb, rcr */
- 298, -100, -208, /* gy, gcb, gcr */
- 298, 516, 0, /* by, bcb, bcr */
- false, /* limited range */
- };
+ const struct csc_coef_yuv2rgb *csc;
- /* RGB -> YUV, ITU-R BT.601, limited range */
- const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
- 66, 129, 25, /* yr, yg, yb */
- -38, -74, 112, /* cbr, cbg, cbb */
- 112, -94, -18, /* crr, crg, crb */
- false, /* limited range */
- };
-
- for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
+ switch (color_encoding) {
+ default:
+ case DRM_COLOR_YCBCR_BT601:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt601_full;
+ else
+ csc = &coefs_yuv2rgb_bt601_lim;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt709_full;
+ else
+ csc = &coefs_yuv2rgb_bt709_lim;
+ break;
+ }
- if (dispc->feat->has_writeback)
- dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
+ dispc_ovl_write_color_conv_coef(dispc, plane, csc);
}
static void dispc_ovl_set_ba0(struct dispc_device *dispc,
@@ -1285,7 +1295,7 @@ static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
return false;
}
-static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane)
{
return dispc->feat->supported_color_modes[plane];
@@ -2601,7 +2611,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
u8 pre_mult_alpha, u8 global_alpha,
enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
- bool mem_to_mem)
+ bool mem_to_mem,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
bool five_taps = true;
bool fieldmode = false;
@@ -2750,6 +2762,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
fieldmode, fourcc, rotation);
dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
+
+ if (plane != OMAP_DSS_WB)
+ dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
}
dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
@@ -2764,7 +2779,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
return 0;
}
-static int dispc_ovl_setup(struct dispc_device *dispc,
+int dispc_ovl_setup(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct omap_overlay_info *oi,
const struct videomode *vm, bool mem_to_mem,
@@ -2786,12 +2801,13 @@ static int dispc_ovl_setup(struct dispc_device *dispc,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, vm, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem,
+ oi->color_encoding, oi->color_range);
return r;
}
-static int dispc_wb_setup(struct dispc_device *dispc,
+int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in)
@@ -2819,7 +2835,8 @@ static int dispc_wb_setup(struct dispc_device *dispc,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, vm, mem_to_mem);
+ replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
@@ -2874,12 +2891,12 @@ static int dispc_wb_setup(struct dispc_device *dispc,
return 0;
}
-static bool dispc_has_writeback(struct dispc_device *dispc)
+bool dispc_has_writeback(struct dispc_device *dispc)
{
return dispc->feat->has_writeback;
}
-static int dispc_ovl_enable(struct dispc_device *dispc,
+int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2970,7 +2987,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
-static void dispc_mgr_setup(struct dispc_device *dispc,
+void dispc_mgr_setup(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_overlay_manager_info *info)
{
@@ -3049,7 +3066,7 @@ static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
-static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -3098,7 +3115,7 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
return pclk <= dispc->feat->max_tv_pclk;
}
-static int dispc_mgr_check_timings(struct dispc_device *dispc,
+int dispc_mgr_check_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3191,7 +3208,7 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
}
/* change name to mode? */
-static void dispc_mgr_set_timings(struct dispc_device *dispc,
+void dispc_mgr_set_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3735,17 +3752,17 @@ int dispc_mgr_get_clock_div(struct dispc_device *dispc,
return 0;
}
-static u32 dispc_read_irqstatus(struct dispc_device *dispc)
+u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
-static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
@@ -3769,7 +3786,7 @@ void dispc_disable_sidle(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
-static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
@@ -3824,7 +3841,7 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
-static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
enum omap_channel channel,
const struct drm_color_lut *lut,
unsigned int length)
@@ -3930,8 +3947,6 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc)
dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef(dispc);
-
dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
dispc_init_fifos(dispc);
@@ -4482,7 +4497,7 @@ static irqreturn_t dispc_irq_handler(int irq, void *arg)
return dispc->user_handler(irq, dispc->user_data);
}
-static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
void *dev_id)
{
int r;
@@ -4506,7 +4521,7 @@ static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
return r;
}
-static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
{
devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
@@ -4514,7 +4529,7 @@ static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
dispc->user_data = NULL;
}
-static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
u32 limit = 0;
@@ -4684,47 +4699,6 @@ static void dispc_errata_i734_wa(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
-static const struct dispc_ops dispc_ops = {
- .read_irqstatus = dispc_read_irqstatus,
- .clear_irqstatus = dispc_clear_irqstatus,
- .write_irqenable = dispc_write_irqenable,
-
- .request_irq = dispc_request_irq,
- .free_irq = dispc_free_irq,
-
- .runtime_get = dispc_runtime_get,
- .runtime_put = dispc_runtime_put,
-
- .get_num_ovls = dispc_get_num_ovls,
- .get_num_mgrs = dispc_get_num_mgrs,
-
- .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
-
- .mgr_enable = dispc_mgr_enable,
- .mgr_is_enabled = dispc_mgr_is_enabled,
- .mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
- .mgr_get_framedone_irq = dispc_mgr_get_framedone_irq,
- .mgr_get_sync_lost_irq = dispc_mgr_get_sync_lost_irq,
- .mgr_go_busy = dispc_mgr_go_busy,
- .mgr_go = dispc_mgr_go,
- .mgr_set_lcd_config = dispc_mgr_set_lcd_config,
- .mgr_check_timings = dispc_mgr_check_timings,
- .mgr_set_timings = dispc_mgr_set_timings,
- .mgr_setup = dispc_mgr_setup,
- .mgr_gamma_size = dispc_mgr_gamma_size,
- .mgr_set_gamma = dispc_mgr_set_gamma,
-
- .ovl_enable = dispc_ovl_enable,
- .ovl_setup = dispc_ovl_setup,
- .ovl_get_color_modes = dispc_ovl_get_color_modes,
-
- .wb_get_framedone_irq = dispc_wb_get_framedone_irq,
- .wb_setup = dispc_wb_setup,
- .has_writeback = dispc_has_writeback,
- .wb_go_busy = dispc_wb_go_busy,
- .wb_go = dispc_wb_go,
-};
-
/* DISPC HW IP initialisation */
static const struct of_device_id dispc_of_match[] = {
{ .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
@@ -4826,7 +4800,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
dispc_runtime_put(dispc);
dss->dispc = dispc;
- dss->dispc_ops = &dispc_ops;
dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
dispc);
@@ -4848,7 +4821,6 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data)
dss_debugfs_remove_file(dispc->debugfs);
dss->dispc = NULL;
- dss->dispc_ops = NULL;
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
deleted file mode 100644
index 3b82158b1bfd..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#include "omapdss.h"
-
-static int disp_num_counter;
-
-void omapdss_display_init(struct omap_dss_device *dssdev)
-{
- int id;
-
- /*
- * Note: this presumes that all displays either have an DT alias, or
- * none has.
- */
- id = of_alias_get_id(dssdev->dev->of_node, "display");
- if (id < 0)
- id = disp_num_counter++;
-
- /* Use 'label' property for name, if it exists */
- of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
-
- if (dssdev->name == NULL)
- dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
- "display%u", id);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_init);
-
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm)
-{
- struct drm_display_mode *mode;
-
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
-
- drm_display_mode_from_videomode(vm, mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 1d2992daef40..030f997eccd0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -641,7 +641,6 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_port = port_num;
- out->owner = THIS_MODULE;
r = omapdss_device_init_output(out, &dpi->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 735a4e9027d0..8e11612f5fe1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -14,7 +14,9 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/semaphore.h>
@@ -33,6 +35,9 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
#include <video/mipi_display.h>
#include "omapdss.h"
@@ -40,73 +45,7 @@
#define DSI_CATCH_MISSING_TE
-struct dsi_reg { u16 module; u16 idx; };
-
-#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
-
-/* DSI Protocol Engine */
-
-#define DSI_PROTO 0
-#define DSI_PROTO_SZ 0x200
-
-#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
-#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
-#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
-#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
-#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
-#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
-#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
-#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
-#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
-#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
-#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
-#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
-#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
-#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
-#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
-#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
-#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
-#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
-#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
-#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
-#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
-#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
-#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
-#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
-#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
-
-/* DSIPHY_SCP */
-
-#define DSI_PHY 1
-#define DSI_PHY_OFFSET 0x200
-#define DSI_PHY_SZ 0x40
-
-#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
-#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
-#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
-#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
-#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
-
-/* DSI_PLL_CTRL_SCP */
-
-#define DSI_PLL 2
-#define DSI_PLL_OFFSET 0x300
-#define DSI_PLL_SZ 0x20
-
-#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
-#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
-#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
-#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
-#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+#include "dsi.h"
#define REG_GET(dsi, idx, start, end) \
FLD_GET(dsi_read_reg(dsi, idx), start, end)
@@ -114,324 +53,36 @@ struct dsi_reg { u16 module; u16 idx; };
#define REG_FLD_MOD(dsi, idx, val, start, end) \
dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
-/* Global interrupts */
-#define DSI_IRQ_VC0 (1 << 0)
-#define DSI_IRQ_VC1 (1 << 1)
-#define DSI_IRQ_VC2 (1 << 2)
-#define DSI_IRQ_VC3 (1 << 3)
-#define DSI_IRQ_WAKEUP (1 << 4)
-#define DSI_IRQ_RESYNC (1 << 5)
-#define DSI_IRQ_PLL_LOCK (1 << 7)
-#define DSI_IRQ_PLL_UNLOCK (1 << 8)
-#define DSI_IRQ_PLL_RECALL (1 << 9)
-#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
-#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
-#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
-#define DSI_IRQ_TE_TRIGGER (1 << 16)
-#define DSI_IRQ_ACK_TRIGGER (1 << 17)
-#define DSI_IRQ_SYNC_LOST (1 << 18)
-#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
-#define DSI_IRQ_TA_TIMEOUT (1 << 20)
-#define DSI_IRQ_ERROR_MASK \
- (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
- DSI_IRQ_TA_TIMEOUT)
-#define DSI_IRQ_CHANNEL_MASK 0xf
-
-/* Virtual channel interrupts */
-#define DSI_VC_IRQ_CS (1 << 0)
-#define DSI_VC_IRQ_ECC_CORR (1 << 1)
-#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
-#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
-#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
-#define DSI_VC_IRQ_BTA (1 << 5)
-#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
-#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
-#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
-#define DSI_VC_IRQ_ERROR_MASK \
- (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
- DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
- DSI_VC_IRQ_FIFO_TX_UDF)
-
-/* ComplexIO interrupts */
-#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
-#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
-#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
-#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
-#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
-#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
-#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
-#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
-#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
-#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
-#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
-#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
-#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
-#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
-#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
-#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
-#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
-#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
-#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
-#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
-#define DSI_CIO_IRQ_ERROR_MASK \
- (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
- DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
- DSI_CIO_IRQ_ERRSYNCESC5 | \
- DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
- DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
- DSI_CIO_IRQ_ERRESC5 | \
- DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
- DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
- DSI_CIO_IRQ_ERRCONTROL5 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
-
-typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
-struct dsi_data;
-
-static int dsi_display_init_dispc(struct dsi_data *dsi);
-static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
-
-/* DSI PLL HSDIV indices */
-#define HSDIV_DISPC 0
-#define HSDIV_DSI 1
-
-#define DSI_MAX_NR_ISRS 2
-#define DSI_MAX_NR_LANES 5
-
-enum dsi_model {
- DSI_MODEL_OMAP3,
- DSI_MODEL_OMAP4,
- DSI_MODEL_OMAP5,
-};
-
-enum dsi_lane_function {
- DSI_LANE_UNUSED = 0,
- DSI_LANE_CLK,
- DSI_LANE_DATA1,
- DSI_LANE_DATA2,
- DSI_LANE_DATA3,
- DSI_LANE_DATA4,
-};
-
-struct dsi_lane_config {
- enum dsi_lane_function function;
- u8 polarity;
-};
-
-struct dsi_isr_data {
- omap_dsi_isr_t isr;
- void *arg;
- u32 mask;
-};
-
-enum fifo_size {
- DSI_FIFO_SIZE_0 = 0,
- DSI_FIFO_SIZE_32 = 1,
- DSI_FIFO_SIZE_64 = 2,
- DSI_FIFO_SIZE_96 = 3,
- DSI_FIFO_SIZE_128 = 4,
-};
-
-enum dsi_vc_source {
- DSI_VC_SOURCE_L4 = 0,
- DSI_VC_SOURCE_VP,
-};
-
-struct dsi_irq_stats {
- unsigned long last_reset;
- unsigned int irq_count;
- unsigned int dsi_irqs[32];
- unsigned int vc_irqs[4][32];
- unsigned int cio_irqs[32];
-};
-
-struct dsi_isr_tables {
- struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
-};
-
-struct dsi_clk_calc_ctx {
- struct dsi_data *dsi;
- struct dss_pll *pll;
-
- /* inputs */
-
- const struct omap_dss_dsi_config *config;
-
- unsigned long req_pck_min, req_pck_nom, req_pck_max;
-
- /* outputs */
-
- struct dss_pll_clock_info dsi_cinfo;
- struct dispc_clock_info dispc_cinfo;
-
- struct videomode vm;
- struct omap_dss_dsi_videomode_timings dsi_vm;
-};
-
-struct dsi_lp_clock_info {
- unsigned long lp_clk;
- u16 lp_clk_div;
-};
-
-struct dsi_module_id_data {
- u32 address;
- int id;
-};
-
-enum dsi_quirks {
- DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
- DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
- DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
- DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
- DSI_QUIRK_GNQ = (1 << 4),
- DSI_QUIRK_PHY_DCC = (1 << 5),
-};
-
-struct dsi_of_data {
- enum dsi_model model;
- const struct dss_pll_hw *pll_hw;
- const struct dsi_module_id_data *modules;
- unsigned int max_fck_freq;
- unsigned int max_pll_lpdiv;
- enum dsi_quirks quirks;
-};
-
-struct dsi_data {
- struct device *dev;
- void __iomem *proto_base;
- void __iomem *phy_base;
- void __iomem *pll_base;
-
- const struct dsi_of_data *data;
- int module_id;
-
- int irq;
-
- bool is_enabled;
-
- struct clk *dss_clk;
- struct regmap *syscon;
- struct dss_device *dss;
-
- struct dispc_clock_info user_dispc_cinfo;
- struct dss_pll_clock_info user_dsi_cinfo;
+static int dsi_init_dispc(struct dsi_data *dsi);
+static void dsi_uninit_dispc(struct dsi_data *dsi);
- struct dsi_lp_clock_info user_lp_cinfo;
- struct dsi_lp_clock_info current_lp_cinfo;
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel);
- struct dss_pll pll;
-
- bool vdds_dsi_enabled;
- struct regulator *vdds_dsi_reg;
-
- struct {
- enum dsi_vc_source source;
- struct omap_dss_device *dssdev;
- enum fifo_size tx_fifo_size;
- enum fifo_size rx_fifo_size;
- int vc_id;
- } vc[4];
-
- struct mutex lock;
- struct semaphore bus_lock;
-
- spinlock_t irq_lock;
- struct dsi_isr_tables isr_tables;
- /* space for a copy used by the interrupt handler */
- struct dsi_isr_tables isr_tables_copy;
-
- int update_channel;
-#ifdef DSI_PERF_MEASURE
- unsigned int update_bytes;
-#endif
-
- bool te_enabled;
- bool ulps_enabled;
-
- void (*framedone_callback)(int, void *);
- void *framedone_data;
-
- struct delayed_work framedone_timeout_work;
-
-#ifdef DSI_CATCH_MISSING_TE
- struct timer_list te_timer;
-#endif
-
- unsigned long cache_req_pck;
- unsigned long cache_clk_freq;
- struct dss_pll_clock_info cache_cinfo;
-
- u32 errors;
- spinlock_t errors_lock;
-#ifdef DSI_PERF_MEASURE
- ktime_t perf_setup_time;
- ktime_t perf_start_time;
-#endif
- int debug_read;
- int debug_write;
- struct {
- struct dss_debugfs_entry *irqs;
- struct dss_debugfs_entry *regs;
- struct dss_debugfs_entry *clks;
- } debugfs;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dsi_irq_stats irq_stats;
-#endif
-
- unsigned int num_lanes_supported;
- unsigned int line_buffer_size;
-
- struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
- unsigned int num_lanes_used;
-
- unsigned int scp_clk_refcount;
-
- struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
- enum omap_dss_dsi_pixel_format pix_fmt;
- enum omap_dss_dsi_mode mode;
- struct omap_dss_dsi_videomode_timings vm_timings;
-
- struct omap_dss_device output;
-};
-
-struct dsi_packet_sent_handler_data {
- struct dsi_data *dsi;
- struct completion *completion;
-};
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg);
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
+/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */
+#define VC_VIDEO 0
+#define VC_CMD 1
+
+#define drm_bridge_to_dsi(bridge) \
+ container_of(bridge, struct dsi_data, bridge)
+
static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
return dev_get_drvdata(dssdev->dev);
}
+static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dsi_data, host);
+}
+
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
@@ -461,17 +112,13 @@ static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
return __raw_readl(base + idx.idx);
}
-static void dsi_bus_lock(struct omap_dss_device *dssdev)
+static void dsi_bus_lock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
down(&dsi->bus_lock);
}
-static void dsi_bus_unlock(struct omap_dss_device *dssdev)
+static void dsi_bus_unlock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
up(&dsi->bus_lock);
}
@@ -514,22 +161,6 @@ static inline bool wait_for_bit_change(struct dsi_data *dsi,
return false;
}
-static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
-{
- switch (fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- case OMAP_DSS_DSI_FMT_RGB666:
- return 24;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- return 18;
- case OMAP_DSS_DSI_FMT_RGB565:
- return 16;
- default:
- BUG();
- return 0;
- }
-}
-
#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
@@ -623,7 +254,7 @@ static void print_irq_status(u32 status)
#undef PIS
}
-static void print_irq_status_vc(int channel, u32 status)
+static void print_irq_status_vc(int vc, u32 status)
{
if (status == 0)
return;
@@ -634,7 +265,7 @@ static void print_irq_status_vc(int channel, u32 status)
#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
- channel,
+ vc,
status,
PIS(CS),
PIS(ECC_CORR),
@@ -1015,7 +646,7 @@ static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
return r;
}
-static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_register_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1024,18 +655,18 @@ static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_register_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1044,49 +675,11 @@ static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_unregister_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
@@ -1819,56 +1412,6 @@ static void dsi_cio_timings(struct dsi_data *dsi)
dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
-/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
- unsigned int mask_p,
- unsigned int mask_n)
-{
- int i;
- u32 l;
- u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
-
- l = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- unsigned int p = dsi->lanes[i].polarity;
-
- if (mask_p & (1 << i))
- l |= 1 << (i * 2 + (p ? 0 : 1));
-
- if (mask_n & (1 << i))
- l |= 1 << (i * 2 + (p ? 1 : 0));
- }
-
- /*
- * Bits in REGLPTXSCPDAT4TO0DXDY:
- * 17: DY0 18: DX0
- * 19: DY1 20: DX1
- * 21: DY2 22: DX2
- * 23: DY3 24: DX3
- * 25: DY4 26: DX4
- */
-
- /* Set the lane override configuration */
-
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
-
- /* Enable lane override */
-
- /* ENLPTXSCPDAT */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
-}
-
-static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
-{
- /* Disable lane override */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
- /* Reset the lane override configuration */
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
-}
-
static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
int t, i;
@@ -2043,32 +1586,6 @@ static int dsi_cio_init(struct dsi_data *dsi)
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsi, DSI_TIMING1, l);
- if (dsi->ulps_enabled) {
- unsigned int mask_p;
- int i;
-
- DSSDBG("manual ulps exit\n");
-
- /* ULPS is exited by Mark-1 state for 1ms, followed by
- * stop state. DSS HW cannot do this via the normal
- * ULPS exit sequence, as after reset the DSS HW thinks
- * that we are not in ULPS mode, and refuses to send the
- * sequence. So we need to send the ULPS exit sequence
- * manually by setting positive lines high and negative lines
- * low for 1ms.
- */
-
- mask_p = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask_p |= 1 << i;
- }
-
- dsi_cio_enable_lane_override(dsi, mask_p, 0);
- }
-
r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
@@ -2087,29 +1604,15 @@ static int dsi_cio_init(struct dsi_data *dsi)
if (r)
goto err_tx_clk_esc_rst;
- if (dsi->ulps_enabled) {
- /* Keep Mark-1 state for 1ms (as per DSI spec) */
- ktime_t wait = ns_to_ktime(1000 * 1000);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
-
- /* Disable the override. The lanes should be set to Mark-11
- * state by the HW */
- dsi_cio_disable_lane_override(dsi);
- }
-
/* FORCE_TX_STOP_MODE_IO */
REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
dsi_cio_timings(dsi);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- /* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsi, DSI_CLK_CTRL,
- dsi->vm_timings.ddr_clk_always_on, 13, 13);
- }
-
- dsi->ulps_enabled = false;
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL,
+ !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS),
+ 13, 13);
DSSDBG("CIO init done\n");
@@ -2120,8 +1623,6 @@ err_tx_clk_esc_rst:
err_cio_pwr_dom:
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
- if (dsi->ulps_enabled)
- dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
@@ -2218,9 +1719,9 @@ static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
return 0;
}
-static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc)
{
- return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
+ return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
@@ -2228,14 +1729,14 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask)
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = vp_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
u8 bit = dsi->te_enabled ? 30 : 31;
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0)
complete(vp_data->completion);
}
-static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
@@ -2247,13 +1748,13 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
bit = dsi->te_enabled ? 30 : 31;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
@@ -2262,12 +1763,12 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
@@ -2278,13 +1779,13 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = l4_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0)
complete(l4_data->completion);
}
-static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
@@ -2293,13 +1794,13 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
};
int r = 0;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
@@ -2308,47 +1809,47 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
-static int dsi_sync_vc(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int vc)
{
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
- if (!dsi_vc_is_enabled(dsi, channel))
+ if (!dsi_vc_is_enabled(dsi, vc))
return 0;
- switch (dsi->vc[channel].source) {
+ switch (dsi->vc[vc].source) {
case DSI_VC_SOURCE_VP:
- return dsi_sync_vc_vp(dsi, channel);
+ return dsi_sync_vc_vp(dsi, vc);
case DSI_VC_SOURCE_L4:
- return dsi_sync_vc_l4(dsi, channel);
+ return dsi_sync_vc_l4(dsi, vc);
default:
BUG();
return -EINVAL;
}
}
-static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable)
{
- DSSDBG("dsi_vc_enable channel %d, enable %d\n",
- channel, enable);
+ DSSDBG("dsi_vc_enable vc %d, enable %d\n",
+ vc, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0);
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
@@ -2356,17 +1857,17 @@ static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
return 0;
}
-static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int vc)
{
u32 r;
- DSSDBG("Initial config of virtual channel %d", channel);
+ DSSDBG("Initial config of VC %d", vc);
- r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ r = dsi_read_reg(dsi, DSI_VC_CTRL(vc));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
- channel);
+ vc);
r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
@@ -2381,74 +1882,39 @@ static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
+ dsi_write_reg(dsi, DSI_VC_CTRL(vc), r);
- dsi->vc[channel].source = DSI_VC_SOURCE_L4;
+ dsi->vc[vc].source = DSI_VC_SOURCE_L4;
}
-static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
- enum dsi_vc_source source)
-{
- if (dsi->vc[channel].source == source)
- return 0;
-
- DSSDBG("Source config of virtual channel %d", channel);
-
- dsi_sync_vc(dsi, channel);
-
- dsi_vc_enable(dsi, channel, 0);
-
- /* VC_BUSY */
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
- DSSERR("vc(%d) busy when trying to config for VP\n", channel);
- return -EIO;
- }
-
- /* SOURCE, 0 = L4, 1 = video port */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
-
- /* DCS_CMD_ENABLE */
- if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
- bool enable = source == DSI_VC_SOURCE_VP;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
- }
-
- dsi_vc_enable(dsi, channel, 1);
-
- dsi->vc[channel].source = source;
-
- return 0;
-}
-
-static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
+static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc,
bool enable)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+ DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable);
+
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable)
+ return;
WARN_ON(!dsi_bus_is_locked(dsi));
- dsi_vc_enable(dsi, channel, 0);
+ dsi_vc_enable(dsi, vc, 0);
dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9);
- dsi_vc_enable(dsi, channel, 1);
+ dsi_vc_enable(dsi, vc, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
-
- /* start the DDR clock by sending a NULL packet */
- if (dsi->vm_timings.ddr_clk_always_on && enable)
- dsi_vc_send_null(dsi, channel);
}
-static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc)
{
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2494,13 +1960,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2515,7 +1981,7 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(dsi, channel);
+ dsi_vc_flush_long_data(dsi, vc);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -2523,35 +1989,35 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
return 0;
}
-static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int vc)
{
if (dsi->debug_write || dsi->debug_read)
- DSSDBG("dsi_vc_send_bta %d\n", channel);
+ DSSDBG("dsi_vc_send_bta %d\n", vc);
WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
+ dsi_vc_flush_receive_data(dsi, vc);
}
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */
/* flush posted write */
- dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ dsi_read_reg(dsi, DSI_VC_CTRL(vc));
return 0;
}
-static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
+ r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
@@ -2561,7 +2027,7 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
if (r)
goto err1;
- r = dsi_vc_send_bta(dsi, channel);
+ r = dsi_vc_send_bta(dsi, vc);
if (r)
goto err2;
@@ -2582,29 +2048,30 @@ err2:
dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
+ dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
-static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
- u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc,
+ int channel, u8 data_type, u16 len,
+ u8 ecc)
{
u32 val;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsi));
- data_id = data_type | dsi->vc[channel].vc_id << 6;
+ data_id = data_type | channel << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val);
}
-static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
@@ -2614,33 +2081,31 @@ static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val);
}
-static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
- u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
/*u32 val; */
int i;
- u8 *p;
+ const u8 *p;
int r = 0;
u8 b1, b2, b3, b4;
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+ DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
/* len + header */
- if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
+ if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
-
- dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
+ dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0);
- p = data;
- for (i = 0; i < len >> 2; i++) {
+ p = msg->tx_buf;
+ for (i = 0; i < msg->tx_len >> 2; i++) {
if (dsi->debug_write)
DSSDBG("\tsending full packet %d\n", i);
@@ -2649,10 +2114,10 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4);
}
- i = len % 4;
+ i = msg->tx_len % 4;
if (i) {
b1 = 0; b2 = 0; b3 = 0;
@@ -2674,194 +2139,89 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
break;
}
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
- u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
+ struct mipi_dsi_packet pkt;
u32 r;
- u8 data_id;
+
+ r = mipi_dsi_create_packet(&pkt, msg);
+ if (r < 0)
+ return r;
WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
- channel,
- data_type, data & 0xff, (data >> 8) & 0xff);
-
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
+ DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n",
+ vc, msg->type, pkt.header[1], pkt.header[2]);
- if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
+ if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
- data_id = data_type | dsi->vc[channel].vc_id << 6;
-
- r = (data_id << 0) | (data << 8) | (ecc << 24);
+ r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 |
+ pkt.header[0];
- dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r);
return 0;
}
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
-{
- return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
-}
-
-static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
- u8 *data, int len,
- enum dss_dsi_content_type type)
-{
- int r;
-
- if (len == 0) {
- BUG_ON(type == DSS_DSI_CONTENT_DCS);
- r = dsi_vc_send_short(dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
- } else if (len == 1) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
- } else if (len == 2) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- data[0] | (data[1] << 8), 0);
- } else {
- r = dsi_vc_send_long(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
- }
-
- return r;
-}
-
-static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_NULL_PACKET,
+ };
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
+ return dsi_vc_send_long(dsi, vc, &msg);
}
-static int dsi_vc_write_common(struct omap_dss_device *dssdev,
- int channel, u8 *data, int len,
- enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
- if (r)
- goto err;
-
- r = dsi_vc_send_bta_sync(dssdev, channel);
- if (r)
- goto err;
-
- /* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
- DSSERR("rx fifo not empty after write, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
- r = -EIO;
- goto err;
- }
-
- return 0;
-err:
- DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
- channel, data[0], len);
- return r;
-}
-
-static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
-}
+ if (mipi_dsi_packet_format_is_short(msg->type))
+ r = dsi_vc_send_short(dsi, vc, msg);
+ else
+ r = dsi_vc_send_long(dsi, vc, msg);
-static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
- u8 dcs_cmd)
-{
- int r;
+ if (r < 0)
+ return r;
- if (dsi->debug_read)
- DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
- channel, dcs_cmd);
+ /*
+ * TODO: we do not always have to do the BTA sync, for example
+ * we can improve performance by setting the update window
+ * information without sending BTA sync between the commands.
+ * In that case we can return early.
+ */
- r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r) {
- DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
- " failed\n", channel, dcs_cmd);
+ DSSERR("bta sync failed\n");
return r;
}
- return 0;
-}
-
-static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
- u8 *reqdata, int reqlen)
-{
- u16 data;
- u8 data_type;
- int r;
-
- if (dsi->debug_read)
- DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
- channel, reqlen);
-
- if (reqlen == 0) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- } else if (reqlen == 1) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- } else if (reqlen == 2) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = reqdata[0] | (reqdata[1] << 8);
- } else {
- BUG();
- return -EINVAL;
- }
-
- r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
- if (r) {
- DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
- " failed\n", channel, reqlen);
- return r;
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
+ DSSERR("rx fifo not empty after write, dumping data:\n");
+ dsi_vc_flush_receive_data(dsi, vc);
+ return -EIO;
}
return 0;
}
-static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf,
int buflen, enum dss_dsi_content_type type)
{
u32 val;
@@ -2869,13 +2229,13 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
int r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
@@ -2939,7 +2299,7 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
for (w = 0; w < len + 2;) {
int b;
val = dsi_read_reg(dsi,
- DSI_VC_SHORT_PACKET_HEADER(channel));
+ DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
(val >> 0) & 0xff,
@@ -2963,168 +2323,73 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
}
err:
- DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
+ DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
return r;
}
-static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
+static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
int r;
- r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
+ if (dsi->debug_read)
+ DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd);
+
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
- if (r != buflen) {
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
return 0;
err:
- DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
+ DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd);
return r;
}
-static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
+static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
- return r;
+ goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
- return r;
+ goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
- return r;
-
- if (r != buflen) {
- r = -EIO;
- return r;
- }
-
- return 0;
-}
-
-static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
- u16 len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_send_short(dsi, channel,
- MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
-}
-
-static int dsi_enter_ulps(struct dsi_data *dsi)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int r, i;
- unsigned int mask;
-
- DSSDBG("Entering ULPS");
-
- WARN_ON(!dsi_bus_is_locked(dsi));
-
- WARN_ON(dsi->ulps_enabled);
-
- if (dsi->ulps_enabled)
- return 0;
-
- /* DDR_CLK_ALWAYS_ON */
- if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
- dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
- dsi_if_enable(dsi, 1);
- }
-
- dsi_sync_vc(dsi, 0);
- dsi_sync_vc(dsi, 1);
- dsi_sync_vc(dsi, 2);
- dsi_sync_vc(dsi, 3);
-
- dsi_force_tx_stop_mode_io(dsi);
-
- dsi_vc_enable(dsi, 0, false);
- dsi_vc_enable(dsi, 1, false);
- dsi_vc_enable(dsi, 2, false);
- dsi_vc_enable(dsi, 3, false);
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
- DSSERR("HS busy when enabling ULPS\n");
- return -EIO;
- }
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
- DSSERR("LP busy when enabling ULPS\n");
- return -EIO;
- }
-
- r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
- if (r)
- return r;
-
- mask = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask |= 1 << i;
- }
- /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
- /* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
+ goto err;
- if (wait_for_completion_timeout(&completion,
- msecs_to_jiffies(1000)) == 0) {
- DSSERR("ULPS enable timeout\n");
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
-
- /* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
-
- dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
-
- dsi_if_enable(dsi, false);
-
- dsi->ulps_enabled = true;
-
return 0;
-
err:
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len);
return r;
}
@@ -3241,7 +2506,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
@@ -3372,7 +2637,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
@@ -3500,7 +2765,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
- switch (dsi_get_pixel_size(dsi->pix_fmt)) {
+ switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) {
case 16:
buswidth = 0;
break;
@@ -3621,7 +2886,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int tl, t_he, width_bytes;
hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
@@ -3659,12 +2924,9 @@ static void dsi_proto_timings(struct dsi_data *dsi)
}
}
-static int dsi_configure_pins(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg)
+static int dsi_configure_pins(struct dsi_data *dsi,
+ int num_pins, const u32 *pins)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int num_pins;
- const int *pins;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
int num_lanes;
int i;
@@ -3677,9 +2939,6 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
DSI_LANE_DATA4,
};
- num_pins = pin_cfg->num_pins;
- pins = pin_cfg->pins;
-
if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
|| num_pins % 2 != 0)
return -EINVAL;
@@ -3691,15 +2950,15 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
for (i = 0; i < num_pins; i += 2) {
u8 lane, pol;
- int dx, dy;
+ u32 dx, dy;
dx = pins[i];
dy = pins[i + 1];
- if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
+ if (dx >= dsi->num_lanes_supported * 2)
return -EINVAL;
- if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
+ if (dy >= dsi->num_lanes_supported * 2)
return -EINVAL;
if (dx & 1) {
@@ -3725,86 +2984,102 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
return 0;
}
-static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
+static int dsi_enable_video_mode(struct dsi_data *dsi, int vc)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
u8 data_type;
u16 word_count;
- int r;
- r = dsi_display_init_dispc(dsi);
- if (r)
- return r;
+ switch (dsi->pix_fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- switch (dsi->pix_fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case OMAP_DSS_DSI_FMT_RGB666:
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB565:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
- r = -EINVAL;
- goto err_pix_fmt;
- }
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4);
- /* MODE, 1 = video mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
- word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type,
+ word_count, 0);
- dsi_vc_write_long_header(dsi, channel, data_type,
- word_count, 0);
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
+ return 0;
+}
+
+static void dsi_disable_video_mode(struct dsi_data *dsi, int vc)
+{
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
+
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4);
+
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
+}
+
+static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ r = dsi_init_dispc(dsi);
+ if (r) {
+ dev_err(dsi->dev, "failed to init dispc!\n");
+ return;
+ }
+
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ r = dsi_enable_video_mode(dsi, vc);
+ if (r)
+ goto err_video_mode;
}
r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ dsi_vc_enable(dsi, vc, false);
}
-err_pix_fmt:
- dsi_display_uninit_dispc(dsi);
- return r;
+err_video_mode:
+ dsi_uninit_dispc(dsi);
+ dev_err(dsi->dev, "failed to enable DSI encoder!\n");
+ return;
}
-static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
+static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
-
- /* MODE, 0 = command mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
-
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
- }
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ dsi_disable_video_mode(dsi, vc);
dss_mgr_disable(&dsi->output);
- dsi_display_uninit_dispc(dsi);
+ dsi_uninit_dispc(dsi);
}
static void dsi_update_screen_dispc(struct dsi_data *dsi)
@@ -3817,16 +3092,14 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
unsigned int packet_len;
u32 l;
int r;
- const unsigned channel = dsi->update_channel;
+ const unsigned vc = dsi->update_vc;
const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
-
- bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
bytespf = bytespl * h;
@@ -3845,16 +3118,16 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
- dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -3877,7 +3150,7 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
* for TE is longer than the timer allows */
REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(dsi, channel);
+ dsi_vc_send_bta(dsi, vc);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -3902,7 +3175,7 @@ static void dsi_handle_framedone(struct dsi_data *dsi, int error)
REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
- dsi->framedone_callback(error, dsi->framedone_data);
+ dsi_bus_unlock(dsi);
if (!error)
dsi_perf_show(dsi, "DISPC");
@@ -3935,30 +3208,91 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
+ DSSDBG("Framedone received!\n");
+
dsi_handle_framedone(dsi, 0);
}
-static int dsi_update(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data)
+static int _dsi_update(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi_perf_mark_setup(dsi);
- dsi->update_channel = channel;
-
- dsi->framedone_callback = callback;
- dsi->framedone_data = data;
-
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive *
- dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsi);
return 0;
}
+static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel)
+{
+ const u8 payload[] = { MIPI_DCS_NOP };
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_SHORT_WRITE,
+ .tx_len = 1,
+ .tx_buf = payload,
+ };
+
+ WARN_ON(!dsi_bus_is_locked(dsi));
+
+ return _omap_dsi_host_transfer(dsi, vc, &msg);
+}
+
+static int dsi_update_channel(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->video_enabled) {
+ r = -EIO;
+ goto err;
+ }
+
+ if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ DSSDBG("dsi_update_channel: %d", vc);
+
+ /*
+ * Send NOP between the frames. If we don't send something here, the
+ * updates stop working. This is probably related to DSI spec stating
+ * that the DSI host should transition to LP at least once per frame.
+ */
+ r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel);
+ if (r < 0) {
+ DSSWARN("failed to send nop between frames: %d\n", r);
+ goto err;
+ }
+
+ dsi->update_vc = vc;
+
+ if (dsi->te_enabled && dsi->te_gpio) {
+ schedule_delayed_work(&dsi->te_timeout_work,
+ msecs_to_jiffies(250));
+ atomic_set(&dsi->do_ext_te_update, 1);
+ } else {
+ _dsi_update(dsi);
+ }
+
+ return 0;
+
+err:
+ dsi_bus_unlock(dsi);
+ return r;
+}
+
+static int dsi_update_all(struct omap_dss_device *dssdev)
+{
+ return dsi_update_channel(dssdev, VC_VIDEO);
+}
+
/* Display funcs */
static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
@@ -3983,12 +3317,12 @@ static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dispc(struct dsi_data *dsi)
+static int dsi_init_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
int r;
- dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
@@ -4013,7 +3347,7 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dsi->mgr_config.video_port_width =
- dsi_get_pixel_size(dsi->pix_fmt);
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
@@ -4024,19 +3358,19 @@ err1:
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
err:
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct dsi_data *dsi)
+static void dsi_uninit_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
@@ -4055,7 +3389,37 @@ static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dsi(struct dsi_data *dsi)
+static void dsi_setup_dsi_vcs(struct dsi_data *dsi)
+{
+ /* Setup VC_CMD for LP and cpu transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */
+ dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4;
+
+ /* Setup VC_VIDEO for HS and dispc transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */
+ dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP;
+
+ if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) &&
+ !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO))
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */
+
+ dsi_vc_enable(dsi, VC_CMD, 1);
+ dsi_vc_enable(dsi, VC_VIDEO, 1);
+
+ dsi_if_enable(dsi, 1);
+
+ dsi_force_tx_stop_mode_io(dsi);
+
+ /* start the DDR clock by sending a NULL packet */
+ if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel);
+}
+
+static int dsi_init_dsi(struct dsi_data *dsi)
{
int r;
@@ -4097,13 +3461,7 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
if (r)
goto err3;
- /* enable interface */
- dsi_vc_enable(dsi, 0, 1);
- dsi_vc_enable(dsi, 1, 1);
- dsi_vc_enable(dsi, 2, 1);
- dsi_vc_enable(dsi, 3, 1);
- dsi_if_enable(dsi, 1);
- dsi_force_tx_stop_mode_io(dsi);
+ dsi_setup_dsi_vcs(dsi);
return 0;
err3:
@@ -4119,12 +3477,8 @@ err0:
return r;
}
-static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
- bool enter_ulps)
+static void dsi_uninit_dsi(struct dsi_data *dsi)
{
- if (enter_ulps && !dsi->ulps_enabled)
- dsi_enter_ulps(dsi);
-
/* disable interface */
dsi_if_enable(dsi, 0);
dsi_vc_enable(dsi, 0, 0);
@@ -4136,21 +3490,19 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dsi_cio_uninit(dsi);
dss_pll_disable(&dsi->pll);
- if (disconnect_lanes) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
}
-static void dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_enable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- DSSDBG("dsi_display_enable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
r = dsi_runtime_get(dsi);
@@ -4159,10 +3511,12 @@ static void dsi_display_enable(struct omap_dss_device *dssdev)
_dsi_initialize_irq(dsi);
- r = dsi_display_init_dsi(dsi);
+ r = dsi_init_dsi(dsi);
if (r)
goto err_init_dsi;
+ dsi->iface_enabled = true;
+
mutex_unlock(&dsi->lock);
return;
@@ -4171,18 +3525,16 @@ err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
- DSSDBG("dsi_display_enable FAILED\n");
+ DSSDBG("dsi_enable FAILED\n");
}
-static void dsi_display_disable(struct omap_dss_device *dssdev,
- bool disconnect_lanes, bool enter_ulps)
+static void dsi_disable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- DSSDBG("dsi_display_disable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(!dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
dsi_sync_vc(dsi, 0);
@@ -4190,18 +3542,26 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
- dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
+ dsi_uninit_dsi(dsi);
dsi_runtime_put(dsi);
+ dsi->iface_enabled = false;
+
mutex_unlock(&dsi->lock);
}
-static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int dsi_enable_te(struct dsi_data *dsi, bool enable)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi->te_enabled = enable;
+
+ if (dsi->te_gpio) {
+ if (enable)
+ enable_irq(dsi->te_irq);
+ else
+ disable_irq(dsi->te_irq);
+ }
+
return 0;
}
@@ -4351,7 +3711,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
unsigned long pck, txbyteclk;
clkin = clk_get_rate(dsi->pll.clkin);
- bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
ndl = dsi->num_lanes_used - 1;
/*
@@ -4384,7 +3744,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
unsigned long byteclk = hsclk / 4;
@@ -4531,7 +3891,6 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hfp_blanking_mode = 1;
dsi_vm->hbp_blanking_mode = 1;
- dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
dsi_vm->window_sync = 4;
/* setup DISPC videomode */
@@ -4651,7 +4010,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
unsigned long pll_min;
unsigned long pll_max;
int ndl = dsi->num_lanes_used - 1;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
unsigned long byteclk_min;
clkin = clk_get_rate(dsi->pll.clkin);
@@ -4684,39 +4043,62 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
dsi_vm_calc_pll_cb, ctx);
}
-static int dsi_set_config(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *config)
+static bool dsi_is_video_mode(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- struct dsi_clk_calc_ctx ctx;
+
+ return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE;
+}
+
+static int __dsi_calc_config(struct dsi_data *dsi,
+ const struct drm_display_mode *mode,
+ struct dsi_clk_calc_ctx *ctx)
+{
+ struct omap_dss_dsi_config cfg = dsi->config;
+ struct videomode vm;
bool ok;
int r;
- mutex_lock(&dsi->lock);
+ drm_display_mode_to_videomode(mode, &vm);
- dsi->pix_fmt = config->pixel_format;
- dsi->mode = config->mode;
+ cfg.vm = &vm;
+ cfg.mode = dsi->mode;
+ cfg.pixel_format = dsi->pix_fmt;
- if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
- ok = dsi_vm_calc(dsi, config, &ctx);
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ ok = dsi_vm_calc(dsi, &cfg, ctx);
else
- ok = dsi_cm_calc(dsi, config, &ctx);
+ ok = dsi_cm_calc(dsi, &cfg, ctx);
- if (!ok) {
- DSSERR("failed to find suitable DSI clock settings\n");
- r = -EINVAL;
- goto err;
- }
+ if (!ok)
+ return -EINVAL;
+
+ dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo);
+
+ r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI],
+ cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo);
+ if (r)
+ return r;
+
+ return 0;
+}
- dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
+static int dsi_set_config(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
- r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
- config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
+ mutex_lock(&dsi->lock);
+
+ r = __dsi_calc_config(dsi, mode, &ctx);
if (r) {
- DSSERR("failed to find suitable DSI LP clock settings\n");
+ DSSERR("failed to find suitable DSI clock settings\n");
goto err;
}
+ dsi->user_lp_cinfo = ctx.lp_cinfo;
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
@@ -4757,12 +4139,12 @@ err:
}
/*
- * Return a hardcoded channel for the DSI output. This should work for
+ * Return a hardcoded dispc channel for the DSI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
+static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi)
{
switch (dsi->data->model) {
case DSI_MODEL_OMAP3:
@@ -4796,59 +4178,75 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
}
}
-static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int i;
+ struct omap_dss_device *dssdev = &dsi->output;
+ int r;
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
- if (!dsi->vc[i].dssdev) {
- dsi->vc[i].dssdev = dssdev;
- *channel = i;
- return 0;
- }
+ dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM));
+
+ switch (msg->type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ case MIPI_DSI_NULL_PACKET:
+ r = dsi_vc_write_common(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ r = dsi_vc_generic_read(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_DCS_READ:
+ r = dsi_vc_dcs_read(dssdev, vc, msg);
+ break;
+ default:
+ r = -EINVAL;
+ break;
}
- DSSERR("cannot get VC for display %s", dssdev->name);
- return -ENOSPC;
-}
-
-static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- if (vc_id < 0 || vc_id > 3) {
- DSSERR("VC ID out of range\n");
- return -EINVAL;
- }
+ if (r < 0)
+ return r;
- if (channel < 0 || channel > 3) {
- DSSERR("Virtual Channel out of range\n");
- return -EINVAL;
- }
+ if (msg->type == MIPI_DSI_DCS_SHORT_WRITE ||
+ msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) {
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
- if (dsi->vc[channel].dssdev != dssdev) {
- DSSERR("Virtual Channel not allocated to display %s\n",
- dssdev->name);
- return -EINVAL;
+ if (cmd == MIPI_DCS_SET_TEAR_OFF)
+ dsi_enable_te(dsi, false);
+ else if (cmd == MIPI_DCS_SET_TEAR_ON)
+ dsi_enable_te(dsi, true);
}
- dsi->vc[channel].vc_id = vc_id;
-
return 0;
}
-static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
+ int vc = VC_CMD;
- if ((channel >= 0 && channel <= 3) &&
- dsi->vc[channel].dssdev == dssdev) {
- dsi->vc[channel].dssdev = NULL;
- dsi->vc[channel].vc_id = 0;
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled) {
+ dsi_enable(dsi);
+ schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000));
}
-}
+ r = _omap_dsi_host_transfer(dsi, vc, msg);
+
+ dsi_bus_unlock(dsi);
+
+ return r;
+}
static int dsi_get_clocks(struct dsi_data *dsi)
{
@@ -4865,57 +4263,167 @@ static int dsi_get_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static const struct omapdss_dsi_ops dsi_ops = {
+ .update = dsi_update_all,
+ .is_video_mode = dsi_is_video_mode,
+};
+
+static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct dsi_data *dsi = (struct dsi_data *)dev_id;
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ cancel_delayed_work(&dsi->te_timeout_work);
+ _dsi_update(dsi);
+ }
+
+ return IRQ_HANDLED;
}
-static void dsi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct dsi_data *dsi =
+ container_of(work, struct dsi_data, te_timeout_work.work);
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ dev_err(dsi->dev, "TE not received for 250ms!\n");
+ _dsi_update(dsi);
+ }
}
-static const struct omap_dss_device_ops dsi_ops = {
- .connect = dsi_connect,
- .disconnect = dsi_disconnect,
- .enable = dsi_display_enable,
+static int omap_dsi_register_te_irq(struct dsi_data *dsi,
+ struct mipi_dsi_device *client)
+{
+ int err;
+ int te_irq;
- .dsi = {
- .bus_lock = dsi_bus_lock,
- .bus_unlock = dsi_bus_unlock,
+ dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN);
+ if (IS_ERR(dsi->te_gpio)) {
+ err = PTR_ERR(dsi->te_gpio);
- .disable = dsi_display_disable,
+ if (err == -ENOENT) {
+ dsi->te_gpio = NULL;
+ return 0;
+ }
- .enable_hs = dsi_vc_enable_hs,
+ dev_err(dsi->dev, "Could not get TE gpio: %d\n", err);
+ return err;
+ }
+
+ te_irq = gpiod_to_irq(dsi->te_gpio);
+ if (te_irq < 0) {
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return -EINVAL;
+ }
+
+ dsi->te_irq = te_irq;
- .configure_pins = dsi_configure_pins,
- .set_config = dsi_set_config,
+ irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
- .enable_video_output = dsi_enable_video_output,
- .disable_video_output = dsi_disable_video_output,
+ err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (err) {
+ dev_err(dsi->dev, "request irq failed with %d\n", err);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return err;
+ }
+
+ INIT_DEFERRABLE_WORK(&dsi->te_timeout_work,
+ omap_dsi_te_timeout_work_callback);
- .update = dsi_update,
+ dev_dbg(dsi->dev, "Using GPIO TE\n");
- .enable_te = dsi_enable_te,
+ return 0;
+}
- .request_vc = dsi_request_vc,
- .set_vc_id = dsi_set_vc_id,
- .release_vc = dsi_release_vc,
+static void omap_dsi_unregister_te_irq(struct dsi_data *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(dsi->te_irq, dsi);
+ cancel_delayed_work(&dsi->te_timeout_work);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ }
+}
- .dcs_write = dsi_vc_dcs_write,
- .dcs_write_nosync = dsi_vc_dcs_write_nosync,
- .dcs_read = dsi_vc_dcs_read,
+static int omap_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
- .gen_write = dsi_vc_generic_write,
- .gen_write_nosync = dsi_vc_generic_write_nosync,
- .gen_read = dsi_vc_generic_read,
+ if (dsi->dsidev) {
+ DSSERR("dsi client already attached\n");
+ return -EBUSY;
+ }
- .bta_sync = dsi_vc_send_bta_sync,
+ if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) {
+ DSSERR("invalid pixel format\n");
+ return -EINVAL;
+ }
- .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
- },
+ atomic_set(&dsi->do_ext_te_update, 0);
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi->mode = OMAP_DSS_DSI_VIDEO_MODE;
+ } else {
+ r = omap_dsi_register_te_irq(dsi, client);
+ if (r)
+ return r;
+
+ dsi->mode = OMAP_DSS_DSI_CMD_MODE;
+ }
+
+ dsi->dsidev = client;
+ dsi->pix_fmt = client->format;
+
+ dsi->config.hs_clk_min = 150000000; // TODO: get from client?
+ dsi->config.hs_clk_max = client->hs_rate;
+ dsi->config.lp_clk_min = 7000000; // TODO: get from client?
+ dsi->config.lp_clk_max = client->lp_rate;
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE;
+ else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE;
+ else
+ dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE;
+
+ return 0;
+}
+
+static int omap_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+
+ if (WARN_ON(dsi->dsidev != client))
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+
+ omap_dsi_unregister_te_irq(dsi);
+ dsi->dsidev = NULL;
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops omap_dsi_host_ops = {
+ .attach = omap_dsi_host_attach,
+ .detach = omap_dsi_host_detach,
+ .transfer = omap_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -5097,6 +4605,106 @@ static const struct component_ops dsi_component_ops = {
};
/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
+
+ mutex_lock(&dsi->lock);
+ r = __dsi_calc_config(dsi, mode, &ctx);
+ mutex_unlock(&dsi->lock);
+
+ return r ? MODE_CLOCK_RANGE : MODE_OK;
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ dsi_set_config(&dsi->output, adjusted_mode);
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled)
+ dsi_enable(dsi);
+
+ dsi_enable_video_output(dssdev, VC_VIDEO);
+
+ dsi->video_enabled = true;
+
+ dsi_bus_unlock(dsi);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ dsi->video_enabled = false;
+
+ dsi_disable_video_output(dssdev, VC_VIDEO);
+
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
+static const struct drm_bridge_funcs dsi_bridge_funcs = {
+ .attach = dsi_bridge_attach,
+ .mode_valid = dsi_bridge_mode_valid,
+ .mode_set = dsi_bridge_mode_set,
+ .enable = dsi_bridge_enable,
+ .disable = dsi_bridge_disable,
+};
+
+static void dsi_bridge_init(struct dsi_data *dsi)
+{
+ dsi->bridge.funcs = &dsi_bridge_funcs;
+ dsi->bridge.of_node = dsi->host.dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ drm_bridge_add(&dsi->bridge);
+}
+
+static void dsi_bridge_cleanup(struct dsi_data *dsi)
+{
+ drm_bridge_remove(&dsi->bridge);
+}
+
+/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
@@ -5105,23 +4713,26 @@ static int dsi_init_output(struct dsi_data *dsi)
struct omap_dss_device *out = &dsi->output;
int r;
+ dsi_bridge_init(dsi);
+
out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi);
- out->ops = &dsi_ops;
- out->owner = THIS_MODULE;
+ out->dispc_channel = dsi_get_dispc_channel(dsi);
+ out->dsi_ops = &dsi_ops;
out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out, NULL);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dsi->bridge);
+ if (r < 0) {
+ dsi_bridge_cleanup(dsi);
return r;
+ }
omapdss_device_register(out);
@@ -5134,6 +4745,7 @@ static void dsi_uninit_output(struct dsi_data *dsi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+ dsi_bridge_cleanup(dsi);
}
static int dsi_probe_of(struct dsi_data *dsi)
@@ -5142,9 +4754,8 @@ static int dsi_probe_of(struct dsi_data *dsi)
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
- int r, i;
+ int r;
struct device_node *ep;
- struct omap_dsi_pin_config pin_cfg;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
@@ -5172,11 +4783,7 @@ static int dsi_probe_of(struct dsi_data *dsi)
goto err;
}
- pin_cfg.num_pins = num_pins;
- for (i = 0; i < num_pins; ++i)
- pin_cfg.pins[i] = (int)lane_arr[i];
-
- r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ r = dsi_configure_pins(dsi, num_pins, lane_arr);
if (r) {
dev_err(dsi->dev, "failed to configure pins");
goto err;
@@ -5256,6 +4863,18 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ /* sentinel */ }
};
+static void omap_dsi_disable_work_callback(struct work_struct *work)
+{
+ struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled && !dsi->video_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
static int dsi_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
@@ -5289,6 +4908,8 @@ static int dsi_probe(struct platform_device *pdev)
INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
+ INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback);
+
#ifdef DSI_CATCH_MISSING_TE
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
@@ -5364,11 +4985,8 @@ static int dsi_probe(struct platform_device *pdev)
}
/* DSI VCs initialization */
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
+ for (i = 0; i < ARRAY_SIZE(dsi->vc); i++)
dsi->vc[i].source = DSI_VC_SOURCE_L4;
- dsi->vc[i].dssdev = NULL;
- dsi->vc[i].vc_id = 0;
- }
r = dsi_get_clocks(dsi);
if (r)
@@ -5387,21 +5005,24 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ dsi->host.ops = &omap_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ r = dsi_probe_of(dsi);
if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
+ DSSERR("Invalid DSI DT data\n");
+ goto err_pm_disable;
+ }
+
+ r = mipi_dsi_host_register(&dsi->host);
+ if (r < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", r);
goto err_pm_disable;
}
r = dsi_init_output(dsi);
if (r)
- goto err_of_depopulate;
-
- r = dsi_probe_of(dsi);
- if (r) {
- DSSERR("Invalid DSI DT data\n");
- goto err_uninit_output;
- }
+ goto err_dsi_host_unregister;
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
@@ -5411,8 +5032,8 @@ static int dsi_probe(struct platform_device *pdev)
err_uninit_output:
dsi_uninit_output(dsi);
-err_of_depopulate:
- of_platform_depopulate(dev);
+err_dsi_host_unregister:
+ mipi_dsi_host_unregister(&dsi->host);
err_pm_disable:
pm_runtime_disable(dev);
return r;
@@ -5426,7 +5047,7 @@ static int dsi_remove(struct platform_device *pdev)
dsi_uninit_output(dsi);
- of_platform_depopulate(&pdev->dev);
+ mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.h b/drivers/gpu/drm/omapdrm/dss/dsi.h
new file mode 100644
index 000000000000..601707c0ecc4
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __OMAP_DRM_DSS_DSI_H
+#define __OMAP_DRM_DSS_DSI_H
+
+#include <drm/drm_mipi_dsi.h>
+
+struct dsi_reg {
+ u16 module;
+ u16 idx;
+};
+
+#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
+
+/* DSI Protocol Engine */
+
+#define DSI_PROTO 0
+#define DSI_PROTO_SZ 0x200
+
+#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_PHY 1
+#define DSI_PHY_OFFSET 0x200
+#define DSI_PHY_SZ 0x40
+
+#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL 2
+#define DSI_PLL_OFFSET 0x300
+#define DSI_PLL_SZ 0x20
+
+#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+
+/* Global interrupts */
+#define DSI_IRQ_VC0 (1 << 0)
+#define DSI_IRQ_VC1 (1 << 1)
+#define DSI_IRQ_VC2 (1 << 2)
+#define DSI_IRQ_VC3 (1 << 3)
+#define DSI_IRQ_WAKEUP (1 << 4)
+#define DSI_IRQ_RESYNC (1 << 5)
+#define DSI_IRQ_PLL_LOCK (1 << 7)
+#define DSI_IRQ_PLL_UNLOCK (1 << 8)
+#define DSI_IRQ_PLL_RECALL (1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
+#define DSI_IRQ_TE_TRIGGER (1 << 16)
+#define DSI_IRQ_ACK_TRIGGER (1 << 17)
+#define DSI_IRQ_SYNC_LOST (1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
+#define DSI_IRQ_TA_TIMEOUT (1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+ (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+ DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK 0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS (1 << 0)
+#define DSI_VC_IRQ_ECC_CORR (1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
+#define DSI_VC_IRQ_BTA (1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+ (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+ DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+ DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
+#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
+#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
+#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
+#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
+#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
+#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
+#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
+#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
+#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
+#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
+#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
+#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
+#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
+#define DSI_CIO_IRQ_ERROR_MASK \
+ (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
+ DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
+ DSI_CIO_IRQ_ERRSYNCESC5 | \
+ DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
+ DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
+ DSI_CIO_IRQ_ERRESC5 | \
+ DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
+ DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
+ DSI_CIO_IRQ_ERRCONTROL5 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
+
+enum omap_dss_dsi_mode {
+ OMAP_DSS_DSI_CMD_MODE = 0,
+ OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_dss_dsi_trans_mode {
+ /* Sync Pulses: both sync start and end packets sent */
+ OMAP_DSS_DSI_PULSE_MODE,
+ /* Sync Events: only sync start packets sent */
+ OMAP_DSS_DSI_EVENT_MODE,
+ /* Burst: only sync start packets sent, pixels are time compressed */
+ OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+ unsigned long hsclk;
+
+ unsigned int ndl;
+ unsigned int bitspp;
+
+ /* pixels */
+ u16 hact;
+ /* lines */
+ u16 vact;
+
+ /* DSI video mode blanking data */
+ /* Unit: byte clock cycles */
+ u16 hss;
+ u16 hsa;
+ u16 hse;
+ u16 hfp;
+ u16 hbp;
+ /* Unit: line clocks */
+ u16 vsa;
+ u16 vfp;
+ u16 vbp;
+
+ /* DSI blanking modes */
+ int blanking_mode;
+ int hsa_blanking_mode;
+ int hbp_blanking_mode;
+ int hfp_blanking_mode;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+
+ int window_sync;
+};
+
+struct omap_dss_dsi_config {
+ enum omap_dss_dsi_mode mode;
+ enum mipi_dsi_pixel_format pixel_format;
+ const struct videomode *vm;
+
+ unsigned long hs_clk_min, hs_clk_max;
+ unsigned long lp_clk_min, lp_clk_max;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+/* DSI PLL HSDIV indices */
+#define HSDIV_DISPC 0
+#define HSDIV_DSI 1
+
+#define DSI_MAX_NR_ISRS 2
+#define DSI_MAX_NR_LANES 5
+
+enum dsi_model {
+ DSI_MODEL_OMAP3,
+ DSI_MODEL_OMAP4,
+ DSI_MODEL_OMAP5,
+};
+
+enum dsi_lane_function {
+ DSI_LANE_UNUSED = 0,
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+ enum dsi_lane_function function;
+ u8 polarity;
+};
+
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+struct dsi_isr_data {
+ omap_dsi_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+enum fifo_size {
+ DSI_FIFO_SIZE_0 = 0,
+ DSI_FIFO_SIZE_32 = 1,
+ DSI_FIFO_SIZE_64 = 2,
+ DSI_FIFO_SIZE_96 = 3,
+ DSI_FIFO_SIZE_128 = 4,
+};
+
+enum dsi_vc_source {
+ DSI_VC_SOURCE_L4 = 0,
+ DSI_VC_SOURCE_VP,
+};
+
+struct dsi_irq_stats {
+ unsigned long last_reset;
+ unsigned int irq_count;
+ unsigned int dsi_irqs[32];
+ unsigned int vc_irqs[4][32];
+ unsigned int cio_irqs[32];
+};
+
+struct dsi_isr_tables {
+ struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
+struct dsi_lp_clock_info {
+ unsigned long lp_clk;
+ u16 lp_clk_div;
+};
+
+struct dsi_clk_calc_ctx {
+ struct dsi_data *dsi;
+ struct dss_pll *pll;
+
+ /* inputs */
+
+ const struct omap_dss_dsi_config *config;
+
+ unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+ /* outputs */
+
+ struct dss_pll_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ struct dsi_lp_clock_info lp_cinfo;
+
+ struct videomode vm;
+ struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+enum dsi_quirks {
+ DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
+ DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+ DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+ DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+ DSI_QUIRK_GNQ = (1 << 4),
+ DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+ enum dsi_model model;
+ const struct dss_pll_hw *pll_hw;
+ const struct dsi_module_id_data *modules;
+ unsigned int max_fck_freq;
+ unsigned int max_pll_lpdiv;
+ enum dsi_quirks quirks;
+};
+
+struct dsi_data {
+ struct device *dev;
+ void __iomem *proto_base;
+ void __iomem *phy_base;
+ void __iomem *pll_base;
+
+ const struct dsi_of_data *data;
+ int module_id;
+
+ int irq;
+
+ bool is_enabled;
+
+ struct clk *dss_clk;
+ struct regmap *syscon;
+ struct dss_device *dss;
+
+ struct mipi_dsi_host host;
+
+ struct dispc_clock_info user_dispc_cinfo;
+ struct dss_pll_clock_info user_dsi_cinfo;
+
+ struct dsi_lp_clock_info user_lp_cinfo;
+ struct dsi_lp_clock_info current_lp_cinfo;
+
+ struct dss_pll pll;
+
+ bool vdds_dsi_enabled;
+ struct regulator *vdds_dsi_reg;
+
+ struct mipi_dsi_device *dsidev;
+
+ struct {
+ enum dsi_vc_source source;
+ enum fifo_size tx_fifo_size;
+ enum fifo_size rx_fifo_size;
+ } vc[4];
+
+ struct mutex lock;
+ struct semaphore bus_lock;
+
+ spinlock_t irq_lock;
+ struct dsi_isr_tables isr_tables;
+ /* space for a copy used by the interrupt handler */
+ struct dsi_isr_tables isr_tables_copy;
+
+ int update_vc;
+#ifdef DSI_PERF_MEASURE
+ unsigned int update_bytes;
+#endif
+
+ /* external TE GPIO */
+ struct gpio_desc *te_gpio;
+ int te_irq;
+ struct delayed_work te_timeout_work;
+ atomic_t do_ext_te_update;
+
+ bool te_enabled;
+ bool iface_enabled;
+ bool video_enabled;
+
+ struct delayed_work framedone_timeout_work;
+
+#ifdef DSI_CATCH_MISSING_TE
+ struct timer_list te_timer;
+#endif
+
+ unsigned long cache_req_pck;
+ unsigned long cache_clk_freq;
+ struct dss_pll_clock_info cache_cinfo;
+
+ u32 errors;
+ spinlock_t errors_lock;
+#ifdef DSI_PERF_MEASURE
+ ktime_t perf_setup_time;
+ ktime_t perf_start_time;
+#endif
+ int debug_read;
+ int debug_write;
+ struct {
+ struct dss_debugfs_entry *irqs;
+ struct dss_debugfs_entry *regs;
+ struct dss_debugfs_entry *clks;
+ } debugfs;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dsi_irq_stats irq_stats;
+#endif
+
+ unsigned int num_lanes_supported;
+ unsigned int line_buffer_size;
+
+ struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+ unsigned int num_lanes_used;
+
+ unsigned int scp_clk_refcount;
+
+ struct omap_dss_dsi_config config;
+
+ struct dss_lcd_mgr_config mgr_config;
+ struct videomode vm;
+ enum mipi_dsi_pixel_format pix_fmt;
+ enum omap_dss_dsi_mode mode;
+ struct omap_dss_dsi_videomode_timings vm_timings;
+
+ struct omap_dss_device output;
+ struct drm_bridge bridge;
+
+ struct delayed_work dsi_disable_work;
+};
+
+struct dsi_packet_sent_handler_data {
+ struct dsi_data *dsi;
+ struct completion *completion;
+};
+
+#endif /* __OMAP_DRM_DSS_DSI_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d7b2f5bcac16..d6a5862b4dbf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1308,6 +1308,7 @@ static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
struct platform_device *drm_pdev;
+ struct dss_pdata pdata;
int r;
r = component_bind_all(dev, NULL);
@@ -1316,9 +1317,9 @@ static int dss_bind(struct device *dev)
pm_set_vt_switch(0);
- omapdss_set_dss(dss);
-
- drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ pdata.dss = dss;
+ drm_pdev = platform_device_register_data(NULL, "omapdrm", 0,
+ &pdata, sizeof(pdata));
if (IS_ERR(drm_pdev)) {
component_unbind_all(dev, NULL);
return PTR_ERR(drm_pdev);
@@ -1335,8 +1336,6 @@ static void dss_unbind(struct device *dev)
platform_device_unregister(dss->drm_pdev);
- omapdss_set_dss(NULL);
-
component_unbind_all(dev, NULL);
}
@@ -1569,15 +1568,7 @@ static int dss_remove(struct platform_device *pdev)
static void dss_shutdown(struct platform_device *pdev)
{
- struct omap_dss_device *dssdev = NULL;
-
DSSDBG("shutdown\n");
-
- for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
- dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
- }
}
static int dss_runtime_suspend(struct device *dev)
@@ -1650,21 +1641,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
#endif
};
-static int __init omap_dss_init(void)
+int __init omap_dss_init(void)
{
return platform_register_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-static void __exit omap_dss_exit(void)
+void omap_dss_exit(void)
{
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 2b404bcb41dd..a547527bb2f3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -257,8 +257,6 @@ struct dss_device {
struct dss_pll *video2_pll;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
- const struct dss_mgr_ops *mgr_ops;
struct omap_drm_private *mgr_ops_priv;
};
@@ -393,6 +391,76 @@ void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
int dispc_runtime_get(struct dispc_device *dispc);
void dispc_runtime_put(struct dispc_device *dispc);
+int dispc_get_num_ovls(struct dispc_device *dispc);
+int dispc_get_num_mgrs(struct dispc_device *dispc);
+
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+
+u32 dispc_read_irqstatus(struct dispc_device *dispc);
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
+
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id);
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id);
+
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
+
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
+
+void dispc_mgr_enable(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable);
+
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
+ enum omap_channel channel);
+
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel);
+
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void dispc_mgr_setup(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+
+int dispc_mgr_check_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+ enum omap_channel channel);
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
+
+int dispc_ovl_setup(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct omap_overlay_info *oi,
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
+
+int dispc_ovl_enable(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable);
+
+bool dispc_has_writeback(struct dispc_device *dispc);
+int dispc_wb_setup(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in);
+bool dispc_wb_go_busy(struct dispc_device *dispc);
+void dispc_wb_go(struct dispc_device *dispc);
+
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 8de41e74e8f8..35b750cebaeb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -707,7 +707,6 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 54e5cb5aa52d..65085d886da5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -681,7 +681,6 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
deleted file mode 100644
index f21b5df31213..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/*
- * As omapdss panel drivers are omapdss specific, but we want to define the
- * DT-data in generic manner, we convert the compatible strings of the panel and
- * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have
- * both correct DT data and omapdss specific drivers.
- *
- * When we get generic panel drivers to the kernel, this file will be removed.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-static struct list_head dss_conv_list __initdata;
-
-static const char prefix[] __initconst = "omapdss,";
-
-struct dss_conv_node {
- struct list_head list;
- struct device_node *node;
- bool root;
-};
-
-static int __init omapdss_count_strings(const struct property *prop)
-{
- const char *p = prop->value;
- int l = 0, total = 0;
- int i;
-
- for (i = 0; total < prop->length; total += l, p += l, i++)
- l = strlen(p) + 1;
-
- return i;
-}
-
-static void __init omapdss_update_prop(struct device_node *node, char *compat,
- int len)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "compatible";
- prop->value = compat;
- prop->length = len;
-
- of_update_property(node, prop);
-}
-
-static void __init omapdss_prefix_strcpy(char *dst, int dst_len,
- const char *src, int src_len)
-{
- size_t total = 0;
-
- while (total < src_len) {
- size_t l = strlen(src) + 1;
-
- strcpy(dst, prefix);
- dst += strlen(prefix);
-
- strcpy(dst, src);
- dst += l;
-
- src += l;
- total += l;
- }
-}
-
-/* prepend compatible property strings with "omapdss," */
-static void __init omapdss_omapify_node(struct device_node *node)
-{
- struct property *prop;
- char *new_compat;
- int num_strs;
- int new_len;
-
- prop = of_find_property(node, "compatible", NULL);
-
- if (!prop || !prop->value)
- return;
-
- if (strnlen(prop->value, prop->length) >= prop->length)
- return;
-
- /* is it already prefixed? */
- if (strncmp(prefix, prop->value, strlen(prefix)) == 0)
- return;
-
- num_strs = omapdss_count_strings(prop);
-
- new_len = prop->length + strlen(prefix) * num_strs;
- new_compat = kmalloc(new_len, GFP_KERNEL);
-
- omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
-
- omapdss_update_prop(node, new_compat, new_len);
-}
-
-static void __init omapdss_add_to_list(struct device_node *node, bool root)
-{
- struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- n->node = node;
- n->root = root;
- list_add(&n->list, &dss_conv_list);
- }
-}
-
-static bool __init omapdss_list_contains(const struct device_node *node)
-{
- struct dss_conv_node *n;
-
- list_for_each_entry(n, &dss_conv_list, list) {
- if (n->node == node)
- return true;
- }
-
- return false;
-}
-
-static void __init omapdss_walk_device(struct device_node *node, bool root)
-{
- struct device_node *n;
-
- omapdss_add_to_list(node, root);
-
- /*
- * of_graph_get_remote_port_parent() prints an error if there is no
- * port/ports node. To avoid that, check first that there's the node.
- */
- n = of_get_child_by_name(node, "ports");
- if (!n)
- n = of_get_child_by_name(node, "port");
- if (!n)
- return;
-
- of_node_put(n);
-
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
- struct device_node *pn;
-
- pn = of_graph_get_remote_port_parent(n);
-
- if (!pn)
- continue;
-
- if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
- of_node_put(pn);
- continue;
- }
-
- omapdss_walk_device(pn, false);
- }
-}
-
-static const struct of_device_id omapdss_of_match[] __initconst = {
- { .compatible = "ti,omap2-dss", },
- { .compatible = "ti,omap3-dss", },
- { .compatible = "ti,omap4-dss", },
- { .compatible = "ti,omap5-dss", },
- { .compatible = "ti,dra7-dss", },
- {},
-};
-
-static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "panel-dsi-cm" },
- {},
-};
-
-static void __init omapdss_find_children(struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
-
- if (of_device_is_compatible(child, "ti,sysc"))
- omapdss_find_children(child);
- }
-}
-
-static int __init omapdss_boot_init(void)
-{
- struct device_node *dss;
-
- INIT_LIST_HEAD(&dss_conv_list);
-
- dss = of_find_matching_node(NULL, omapdss_of_match);
-
- if (dss == NULL || !of_device_is_available(dss))
- goto put_node;
-
- omapdss_walk_device(dss, true);
- omapdss_find_children(dss);
-
- while (!list_empty(&dss_conv_list)) {
- struct dss_conv_node *n;
-
- n = list_first_entry(&dss_conv_list, struct dss_conv_node,
- list);
-
- if (of_match_node(omapdss_of_fixups_whitelist, n->node))
- omapdss_omapify_node(n->node);
-
- list_del(&n->list);
- of_node_put(n->node);
- kfree(n);
- }
-
-put_node:
- of_node_put(dss);
- return 0;
-}
-
-subsys_initcall(omapdss_boot_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index a48a9a254e33..a40abeafd2e9 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -7,13 +7,14 @@
#ifndef __OMAP_DRM_DSS_H
#define __OMAP_DRM_DSS_H
-#include <linux/list.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mode.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <video/videomode.h>
+#include <linux/list.h>
#include <linux/platform_data/omapdss.h>
-#include <uapi/drm/drm_mode.h>
-#include <drm/drm_crtc.h>
+#include <video/videomode.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -116,28 +117,6 @@ enum omap_dss_venc_type {
OMAP_DSS_VENC_TYPE_SVIDEO,
};
-enum omap_dss_dsi_pixel_format {
- OMAP_DSS_DSI_FMT_RGB888,
- OMAP_DSS_DSI_FMT_RGB666,
- OMAP_DSS_DSI_FMT_RGB666_PACKED,
- OMAP_DSS_DSI_FMT_RGB565,
-};
-
-enum omap_dss_dsi_mode {
- OMAP_DSS_DSI_CMD_MODE = 0,
- OMAP_DSS_DSI_VIDEO_MODE,
-};
-
-enum omap_display_caps {
- OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
-};
-
-enum omap_dss_display_state {
- OMAP_DSS_DISPLAY_DISABLED = 0,
- OMAP_DSS_DISPLAY_ACTIVE,
-};
-
enum omap_dss_rotation_type {
OMAP_DSS_ROT_NONE = 0,
OMAP_DSS_ROT_TILER = 1 << 0,
@@ -162,64 +141,6 @@ enum omap_dss_output_id {
OMAP_DSS_OUTPUT_HDMI = 1 << 6,
};
-/* DSI */
-
-enum omap_dss_dsi_trans_mode {
- /* Sync Pulses: both sync start and end packets sent */
- OMAP_DSS_DSI_PULSE_MODE,
- /* Sync Events: only sync start packets sent */
- OMAP_DSS_DSI_EVENT_MODE,
- /* Burst: only sync start packets sent, pixels are time compressed */
- OMAP_DSS_DSI_BURST_MODE,
-};
-
-struct omap_dss_dsi_videomode_timings {
- unsigned long hsclk;
-
- unsigned int ndl;
- unsigned int bitspp;
-
- /* pixels */
- u16 hact;
- /* lines */
- u16 vact;
-
- /* DSI video mode blanking data */
- /* Unit: byte clock cycles */
- u16 hss;
- u16 hsa;
- u16 hse;
- u16 hfp;
- u16 hbp;
- /* Unit: line clocks */
- u16 vsa;
- u16 vfp;
- u16 vbp;
-
- /* DSI blanking modes */
- int blanking_mode;
- int hsa_blanking_mode;
- int hbp_blanking_mode;
- int hfp_blanking_mode;
-
- enum omap_dss_dsi_trans_mode trans_mode;
-
- bool ddr_clk_always_on;
- int window_sync;
-};
-
-struct omap_dss_dsi_config {
- enum omap_dss_dsi_mode mode;
- enum omap_dss_dsi_pixel_format pixel_format;
- const struct videomode *vm;
-
- unsigned long hs_clk_min, hs_clk_max;
- unsigned long lp_clk_min, lp_clk_max;
-
- bool ddr_clk_always_on;
- enum omap_dss_dsi_trans_mode trans_mode;
-};
-
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
@@ -243,6 +164,9 @@ struct omap_overlay_info {
u8 global_alpha;
u8 pre_mult_alpha;
u8 zorder;
+
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
};
struct omap_overlay_manager_info {
@@ -258,21 +182,6 @@ struct omap_overlay_manager_info {
struct omap_dss_cpr_coefs cpr_coefs;
};
-/* 22 pins means 1 clk lane and 10 data lanes */
-#define OMAP_DSS_MAX_DSI_PINS 22
-
-struct omap_dsi_pin_config {
- int num_pins;
- /*
- * pin numbers in the following order:
- * clk+, clk-
- * data1+, data1-
- * data2+, data2-
- * ...
- */
- int pins[OMAP_DSS_MAX_DSI_PINS];
-};
-
struct omap_dss_writeback_info {
u32 paddr;
u32 p_uv_addr;
@@ -286,89 +195,14 @@ struct omap_dss_writeback_info {
};
struct omapdss_dsi_ops {
- void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
- bool enter_ulps);
-
- /* bus configuration */
- int (*set_config)(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *cfg);
- int (*configure_pins)(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg);
-
- void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
- bool enable);
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-
- int (*update)(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data);
-
- void (*bus_lock)(struct omap_dss_device *dssdev);
- void (*bus_unlock)(struct omap_dss_device *dssdev);
-
- int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
- void (*disable_video_output)(struct omap_dss_device *dssdev,
- int channel);
-
- int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
- int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
- int vc_id);
- void (*release_vc)(struct omap_dss_device *dssdev, int channel);
-
- /* data transfer */
- int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *data, int len);
-
- int (*gen_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_read)(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen,
- u8 *data, int len);
-
- int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
-
- int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
- int channel, u16 plen);
-};
-
-struct omap_dss_device_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- void (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode);
-
- int (*get_modes)(struct omap_dss_device *dssdev,
- struct drm_connector *connector);
-
- const struct omapdss_dsi_ops dsi;
-};
-
-/**
- * enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
- */
-enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_MODES = BIT(3),
+ int (*update)(struct omap_dss_device *dssdev);
+ bool (*is_video_mode)(struct omap_dss_device *dssdev);
};
struct omap_dss_device {
struct device *dev;
- struct module *owner;
-
struct dss_device *dss;
- struct omap_dss_device *next;
struct drm_bridge *bridge;
struct drm_bridge *next_bridge;
struct drm_panel *panel;
@@ -382,23 +216,11 @@ struct omap_dss_device {
*/
enum omap_display_type type;
- /*
- * True if the device is a display (panel or connector) at the end of
- * the pipeline, false otherwise.
- */
- bool display;
-
const char *name;
- const struct omap_dss_driver *driver;
- const struct omap_dss_device_ops *ops;
- unsigned long ops_flags;
+ const struct omapdss_dsi_ops *dsi_ops;
u32 bus_flags;
- enum omap_display_caps caps;
-
- enum omap_dss_display_state state;
-
/* OMAP DSS output specific fields */
/* DISPC channel for this output */
@@ -411,30 +233,10 @@ struct omap_dss_device {
unsigned int of_port;
};
-struct omap_dss_driver {
- int (*update)(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h);
- int (*sync)(struct omap_dss_device *dssdev);
-
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
- int (*get_te)(struct omap_dss_device *dssdev);
-
- int (*memory_read)(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h);
+struct dss_pdata {
+ struct dss_device *dss;
};
-struct dss_device *omapdss_get_dss(void);
-void omapdss_set_dss(struct dss_device *dss);
-static inline bool omapdss_is_initialized(void)
-{
- return !!omapdss_get_dss();
-}
-
-void omapdss_display_init(struct omap_dss_device *dssdev);
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm);
-
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
@@ -445,8 +247,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_enable(struct omap_dss_device *dssdev);
-void omapdss_device_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -466,11 +266,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
-{
- return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
-}
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
@@ -482,31 +277,23 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dss_mgr_ops {
- void (*start_update)(struct omap_drm_private *priv,
- enum omap_channel channel);
- int (*enable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*disable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*set_timings)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*set_lcd_config)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(struct dss_device *dss);
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+ enum omap_channel channel);
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable);
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+int omap_crtc_dss_register_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
+void omap_crtc_dss_unregister_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
@@ -520,80 +307,12 @@ int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
-/* dispc ops */
-
-struct dispc_ops {
- u32 (*read_irqstatus)(struct dispc_device *dispc);
- void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
- void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
-
- int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
- void *dev_id);
- void (*free_irq)(struct dispc_device *dispc, void *dev_id);
-
- int (*runtime_get)(struct dispc_device *dispc);
- void (*runtime_put)(struct dispc_device *dispc);
-
- int (*get_num_ovls)(struct dispc_device *dispc);
- int (*get_num_mgrs)(struct dispc_device *dispc);
-
- u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
-
- void (*mgr_enable)(struct dispc_device *dispc,
- enum omap_channel channel, bool enable);
- bool (*mgr_is_enabled)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- bool (*mgr_go_busy)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
- void (*mgr_set_lcd_config)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*mgr_check_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_set_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
- u32 (*mgr_gamma_size)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_set_gamma)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length);
-
- int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
- bool enable);
- int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
- const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel);
-
- const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
- enum omap_plane_id plane);
-
- u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
- int (*wb_setup)(struct dispc_device *dispc,
- const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm,
- enum dss_writeback_channel channel_in);
- bool (*has_writeback)(struct dispc_device *dispc);
- bool (*wb_go_busy)(struct dispc_device *dispc);
- void (*wb_go)(struct dispc_device *dispc);
-};
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss);
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
bool omapdss_stack_is_ready(void);
void omapdss_gather_components(struct device *dev);
+int omap_dss_init(void);
+void omap_dss_exit(void);
+
#endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 5affdf078134..7378e855c278 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -30,7 +30,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
return 0;
}
- out->next = omapdss_find_device_by_node(remote_node);
out->bridge = of_drm_find_bridge(remote_node);
out->panel = of_drm_find_panel(remote_node);
if (IS_ERR(out->panel))
@@ -38,12 +37,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
of_node_put(remote_node);
- if (out->next && out->type != out->next->type) {
- dev_err(out->dev, "output type and display type don't match\n");
- ret = -EINVAL;
- goto error;
- }
-
if (out->panel) {
struct drm_bridge *bridge;
@@ -69,7 +62,7 @@ int omapdss_device_init_output(struct omap_dss_device *out,
out->bridge = local_bridge;
}
- if (!out->next && !out->bridge) {
+ if (!out->bridge) {
ret = -EPROBE_DEFER;
goto error;
}
@@ -78,98 +71,64 @@ int omapdss_device_init_output(struct omap_dss_device *out,
error:
omapdss_device_cleanup_output(out);
- out->next = NULL;
return ret;
}
-EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
if (out->bridge && out->panel)
drm_panel_bridge_remove(out->next_bridge ?
out->next_bridge : out->bridge);
-
- if (out->next)
- omapdss_device_put(out->next);
-}
-EXPORT_SYMBOL(omapdss_device_cleanup_output);
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv)
-{
- if (dss->mgr_ops)
- return -EBUSY;
-
- dss->mgr_ops = mgr_ops;
- dss->mgr_ops_priv = priv;
-
- return 0;
-}
-EXPORT_SYMBOL(dss_install_mgr_ops);
-
-void dss_uninstall_mgr_ops(struct dss_device *dss)
-{
- dss->mgr_ops = NULL;
- dss->mgr_ops_priv = NULL;
}
-EXPORT_SYMBOL(dss_uninstall_mgr_ops);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
- dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, vm);
}
-EXPORT_SYMBOL(dss_mgr_set_timings);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, config);
}
-EXPORT_SYMBOL(dss_mgr_set_lcd_config);
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+ return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_enable);
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_disable);
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_start_update);
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+ return omap_crtc_dss_register_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+ omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 241a338ace29..4c8246a3ded9 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -222,6 +222,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
n_inc = 1;
+ if (n_start > n_stop)
+ return false;
+
if (hw->errata_i886) {
swap(n_start, n_stop);
n_inc = -1;
@@ -239,6 +242,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
hw->m_max);
m_inc = 1;
+ if (m_start > m_stop)
+ continue;
+
if (hw->errata_i886) {
swap(m_start, m_stop);
m_inc = -1;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 282e4c837cd9..91eaae3b9481 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -312,7 +312,6 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_port = 1;
- out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 94cf50d837b0..e522c17955d0 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -733,9 +733,7 @@ static int venc_init_output(struct venc_device *venc)
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
- out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
r = omapdss_device_init_output(out, &venc->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
deleted file mode 100644
index 47719b92e22b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Rob Clark <rob@ti.com>
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-
-#include "omap_drv.h"
-
-/*
- * connector funcs
- */
-
-#define to_omap_connector(x) container_of(x, struct omap_connector, base)
-
-struct omap_connector {
- struct drm_connector base;
- struct omap_dss_device *output;
-};
-
-static enum drm_connector_status omap_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static void omap_connector_destroy(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- DBG("%s", connector->name);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-
- omapdss_device_put(omap_connector->output);
-
- kfree(omap_connector);
-}
-
-static int omap_connector_get_modes(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- DBG("%s", connector->name);
-
- /*
- * If the display pipeline reports modes (e.g. with a fixed resolution
- * panel or an analog TV output), query it.
- */
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
- dssdev = d;
- }
-
- if (dssdev)
- return dssdev->ops->get_modes(dssdev, connector);
-
- /* We can't retrieve modes. The KMS core will add the default modes. */
- return 0;
-}
-
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- int ret;
-
- drm_mode_copy(adjusted_mode, mode);
-
- for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops || !dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
- if (ret)
- return MODE_BAD;
- }
-
- return MODE_OK;
-}
-
-static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct drm_display_mode new_mode = {};
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_connector->output, mode,
- &new_mode);
- if (status != MODE_OK)
- goto done;
-
- /* Check if vrefresh is still valid. */
- if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
- status = MODE_NOCLOCK;
-
-done:
- DBG("connector: mode %s: " DRM_MODE_FMT,
- (status == MODE_OK) ? "valid" : "invalid",
- DRM_MODE_ARG(mode));
-
- return status;
-}
-
-static const struct drm_connector_funcs omap_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = omap_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = omap_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
- .get_modes = omap_connector_get_modes,
- .mode_valid = omap_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct omap_connector *omap_connector;
-
- DBG("%s", output->name);
-
- omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
- if (!omap_connector)
- goto fail;
-
- omap_connector->output = omapdss_device_get(output);
-
- connector = &omap_connector->base;
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
-
- drm_connector_init(dev, connector, &omap_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &omap_connector_helper_funcs);
-
- return connector;
-
-fail:
- if (connector)
- omap_connector_destroy(connector);
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
deleted file mode 100644
index 0ecd4f1655b7..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * omap_connector.h -- OMAP DRM Connector
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- */
-
-#ifndef __OMAPDRM_CONNECTOR_H__
-#define __OMAPDRM_CONNECTOR_H__
-
-#include <linux/types.h>
-
-enum drm_mode_status;
-
-struct drm_connector;
-struct drm_device;
-struct drm_encoder;
-struct omap_dss_device;
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder);
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
-#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 7d66269ad998..06a719c104f4 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -100,14 +100,14 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* the upstream part of the video pipe.
*/
-static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
- priv->dispc_ops->mgr_enable(priv->dispc, channel, true);
+ dispc_mgr_enable(priv->dispc, channel, true);
}
/* Called only from the encoder enable/disable and suspend/resume handlers. */
-static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
{
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
@@ -141,9 +141,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
omap_crtc->ignore_digit_sync_lost = true;
}
- framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+ framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc,
channel);
- vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -163,7 +163,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -180,21 +180,19 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(struct omap_drm_private *priv,
- enum omap_channel channel)
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
-static void omap_crtc_dss_disable(struct omap_drm_private *priv,
- enum omap_channel channel)
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -202,7 +200,7 @@ static void omap_crtc_dss_disable(struct omap_drm_private *priv,
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -213,7 +211,7 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
omap_crtc->vm = *vm;
}
-static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -221,11 +219,11 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
- priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
config);
}
-static int omap_crtc_dss_register_framedone(
+int omap_crtc_dss_register_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -244,7 +242,7 @@ static int omap_crtc_dss_register_framedone(
return 0;
}
-static void omap_crtc_dss_unregister_framedone(
+void omap_crtc_dss_unregister_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -261,16 +259,6 @@ static void omap_crtc_dss_unregister_framedone(
omap_crtc->framedone_handler_data = NULL;
}
-static const struct dss_mgr_ops mgr_ops = {
- .start_update = omap_crtc_dss_start_update,
- .enable = omap_crtc_dss_enable,
- .disable = omap_crtc_dss_disable,
- .set_timings = omap_crtc_dss_set_timings,
- .set_lcd_config = omap_crtc_dss_set_lcd_config,
- .register_framedone_handler = omap_crtc_dss_register_framedone,
- .unregister_framedone_handler = omap_crtc_dss_unregister_framedone,
-};
-
/* -----------------------------------------------------------------------------
* Setup, Flush and Page Flip
*/
@@ -300,7 +288,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
- if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
+ if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
@@ -362,27 +350,14 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
{
struct omap_crtc *omap_crtc =
container_of(data, struct omap_crtc, update_work.work);
- struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode;
- struct omap_dss_device *dssdev = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
struct drm_device *dev = omap_crtc->base.dev;
- const struct omap_dss_driver *dssdrv;
int ret;
- if (!dssdev) {
- dev_err_once(dev->dev, "missing display dssdev!");
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update)
return;
- }
-
- dssdrv = dssdev->driver;
- if (!dssdrv || !dssdrv->update) {
- dev_err_once(dev->dev, "missing or incorrect dssdrv!");
- return;
- }
-
- if (dssdrv->sync)
- dssdrv->sync(dssdev);
- ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay);
+ ret = dssdev->dsi_ops->update(dssdev);
if (ret < 0) {
spin_lock_irq(&dev->event_lock);
omap_crtc->pending = false;
@@ -391,6 +366,33 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
}
}
+static s16 omap_crtc_s31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+
+ s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff);
+
+ if (cbits & sign_bit)
+ ret = -ret;
+
+ return ret;
+}
+
+static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
+ struct omap_dss_cpr_coefs *cpr)
+{
+ cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]);
+ cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]);
+ cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
+ cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]);
+ cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]);
+ cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]);
+ cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]);
+ cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]);
+ cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]);
+}
+
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
@@ -402,9 +404,17 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
info.default_color = 0x000000;
info.trans_enabled = false;
info.partial_alpha_enabled = false;
- info.cpr_enable = false;
- priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
+ if (crtc->state->ctm) {
+ struct drm_color_ctm *ctm = crtc->state->ctm->data;
+
+ info.cpr_enable = true;
+ omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
+ } else {
+ info.cpr_enable = false;
+ }
+
+ dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
@@ -445,7 +455,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
DBG("%s", omap_crtc->name);
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* manual updated display will not trigger vsync irq */
if (omap_state->manually_updated)
@@ -484,7 +494,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
@@ -502,9 +512,8 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
* valid DISPC mode. DSI will calculate and configure the
* proper DISPC mode later.
*/
- if (omap_crtc->pipe->output->next == NULL ||
- omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
- r = priv->dispc_ops->mgr_check_timings(priv->dispc,
+ if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) {
+ r = dispc_mgr_check_timings(priv->dispc,
omap_crtc->channel,
&vm);
if (r)
@@ -555,17 +564,16 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct omap_dss_device *display = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
- if (!display)
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode)
return false;
- if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- DBG("detected manually updated display!");
- return true;
- }
+ if (dssdev->dsi_ops->is_video_mode(dssdev))
+ return false;
- return false;
+ DBG("detected manually updated display!");
+ return true;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -575,8 +583,8 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
crtc);
struct drm_plane_state *pri_state;
- if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
- unsigned int length = crtc_state->gamma_lut->length /
+ if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) {
+ unsigned int length = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
@@ -617,13 +625,13 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_color_lut *lut = NULL;
unsigned int length = 0;
- if (crtc->state->gamma_lut) {
+ if (crtc->state->degamma_lut) {
lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
+ crtc->state->degamma_lut->data;
+ length = crtc->state->degamma_lut->length /
sizeof(*lut);
}
- priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel,
lut, length);
}
@@ -648,7 +656,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
- priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
+ dispc_mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -741,7 +749,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.atomic_duplicate_state = omap_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_set_property = omap_crtc_atomic_set_property,
@@ -771,16 +778,6 @@ static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
-void omap_crtc_pre_init(struct omap_drm_private *priv)
-{
- dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
-}
-
-void omap_crtc_pre_uninit(struct omap_drm_private *priv)
-{
- dss_uninstall_mgr_ops(priv->dss);
-}
-
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
@@ -839,10 +836,10 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supported.
*/
- if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+ if (dispc_mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
- drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index 2fd57751ae2b..a8b9cbee86e0 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -22,8 +22,6 @@ struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(struct omap_drm_private *priv);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
struct drm_plane *plane);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 42c2ed752095..28bbad1353ee 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -192,7 +192,7 @@ static int omap_compare_pipelines(const void *a, const void *b)
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
@@ -206,14 +206,7 @@ static int omap_display_id(struct omap_dss_device *output)
{
struct device_node *node = NULL;
- if (output->next) {
- struct omap_dss_device *display = output;
-
- while (display->next)
- display = display->next;
-
- node = display->dev->of_node;
- } else if (output->bridge) {
+ if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
while (drm_bridge_get_next_bridge(bridge))
@@ -228,8 +221,8 @@ static int omap_display_id(struct omap_dss_device *output)
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
- int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ int num_ovls = dispc_get_num_ovls(priv->dispc);
+ int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int i;
int ret;
u32 plane_crtc_mask;
@@ -332,19 +325,12 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (pipe->output->next) {
- pipe->connector = omap_connector_init(dev, pipe->output,
- encoder);
- if (!pipe->connector)
- return -ENOMEM;
- } else {
- pipe->connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(pipe->connector)) {
- dev_err(priv->dev,
- "unable to create bridge connector for %s\n",
- pipe->output->name);
- return PTR_ERR(pipe->connector);
- }
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
}
drm_connector_attach_encoder(pipe->connector, encoder);
@@ -568,6 +554,7 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
+ struct dss_pdata *pdata = dev->platform_data;
struct drm_device *ddev;
int ret;
@@ -582,11 +569,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
ddev->dev_private = priv;
priv->dev = dev;
- priv->dss = omapdss_get_dss();
+ priv->dss = pdata->dss;
priv->dispc = dispc_get_dispc(priv->dss);
- priv->dispc_ops = dispc_get_ops(priv->dss);
- omap_crtc_pre_init(priv);
+ priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
@@ -596,9 +582,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
INIT_LIST_HEAD(&priv->obj_list);
/* Get memory bandwidth limits */
- if (priv->dispc_ops->get_memory_bandwidth_limit)
- priv->max_bandwidth =
- priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
+ priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
@@ -641,7 +625,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
}
@@ -667,7 +650,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
}
@@ -677,9 +659,6 @@ static int pdev_probe(struct platform_device *pdev)
struct omap_drm_private *priv;
int ret;
- if (omapdss_is_initialized() == false)
- return -EPROBE_DEFER;
-
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
@@ -748,9 +727,21 @@ static struct platform_driver * const drivers[] = {
static int __init omap_drm_init(void)
{
+ int r;
+
DBG("init");
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ r = omap_dss_init();
+ if (r)
+ return r;
+
+ r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (r) {
+ omap_dss_exit();
+ return r;
+ }
+
+ return 0;
}
static void __exit omap_drm_fini(void)
@@ -758,13 +749,15 @@ static void __exit omap_drm_fini(void)
DBG("fini");
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+
+ omap_dss_exit();
}
-/* need late_initcall() so we load after dss_driver's are loaded */
-late_initcall(omap_drm_init);
+module_init(omap_drm_init);
module_exit(omap_drm_fini);
MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("OMAP DRM Display Driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae57e7ada876..d6f136984da9 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -12,11 +12,11 @@
#include <linux/workqueue.h>
#include "dss/omapdss.h"
+#include "dss/dss.h"
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
-#include "omap_connector.h"
#include "omap_crtc.h"
#include "omap_encoder.h"
#include "omap_fb.h"
@@ -47,7 +47,6 @@ struct omap_drm_private {
struct dss_device *dss;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
unsigned int num_pipes;
struct omap_drm_pipeline pipes[8];
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 57e92a4d5937..4dd05bc732da 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -75,7 +75,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *output = omap_encoder->output;
- struct omap_dss_device *dssdev;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_bridge *bridge;
@@ -98,9 +97,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = output; dssdev; dssdev = dssdev->next)
- omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
-
for (bridge = output->bridge; bridge;
bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
@@ -113,65 +109,12 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for the dss manager. */
+ /* Set timings for all devices in the display pipeline. */
dss_mgr_set_timings(output, &vm);
}
-static void omap_encoder_disable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
-
- /*
- * Disable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_disable(dssdev->next);
-}
-
-static void omap_encoder_enable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
-
- /*
- * Enable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_enable(dssdev->next);
-}
-
-static int omap_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_encoder->output,
- &crtc_state->mode,
- &crtc_state->adjusted_mode);
- if (status != MODE_OK) {
- dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.mode_set = omap_encoder_mode_set,
- .disable = omap_encoder_disable,
- .enable = omap_encoder_enable,
- .atomic_check = omap_encoder_atomic_check,
};
/* initialize encoder */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 30d299ca8795..38af6195d959 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
omap_obj->pages = pages;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
- npages);
+ ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 97c83b959f7e..15148d4b35b5 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
- priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
+ dispc_write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -83,7 +83,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
int framedone_irq =
- priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
+ dispc_mgr_get_framedone_irq(priv->dispc, channel);
DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
@@ -120,7 +120,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -146,7 +146,7 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -211,9 +211,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
unsigned int id;
u32 irqstatus;
- irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
- priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
+ irqstatus = dispc_read_irqstatus(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, irqstatus);
+ dispc_read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
@@ -221,15 +221,15 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
+ if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
- if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
- if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
omap_crtc_framedone_irq(crtc, irqstatus);
}
@@ -263,7 +263,7 @@ static const u32 omap_underflow_irqs[] = {
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
@@ -281,13 +281,13 @@ int omap_drm_irq_install(struct drm_device *dev)
}
for (i = 0; i < num_mgrs; ++i)
- priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
+ priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
- priv->dispc_ops->runtime_get(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_get(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, 0xffffffff);
+ dispc_runtime_put(priv->dispc);
- ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
+ ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
@@ -305,5 +305,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
- priv->dispc_ops->free_irq(priv->dispc, dev);
+ dispc_free_irq(priv->dispc, dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 21e0b9785599..51dc24acea73 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -59,6 +59,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
+ info.color_encoding = state->color_encoding;
+ info.color_range = state->color_range;
/* update scanout: */
omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -70,17 +72,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
&info.paddr, &info.p_uv_addr);
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
return;
}
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, true);
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -93,7 +95,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -189,6 +191,8 @@ static void omap_plane_reset(struct drm_plane *plane)
*/
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
+ plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+ plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -232,6 +236,22 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.atomic_get_property = omap_plane_atomic_get_property,
};
+static bool omap_plane_supports_yuv(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
+ u32 i;
+
+ for (i = 0; formats[i]; i++)
+ if (formats[i] == DRM_FORMAT_YUYV ||
+ formats[i] == DRM_FORMAT_UYVY ||
+ formats[i] == DRM_FORMAT_NV12)
+ return true;
+
+ return false;
+}
+
static const char *plane_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
@@ -252,7 +272,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
@@ -271,7 +291,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
+ formats = dispc_ovl_get_color_modes(priv->dispc, id);
for (nformats = 0; formats[nformats]; ++nformats)
;
omap_plane->id = id;
@@ -293,6 +313,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ if (omap_plane_supports_yuv(plane))
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
error:
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 9e1acbd2c7aa..8338dc665301 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -254,6 +254,5 @@ struct tcm *sita_init(u16 width, u16 height)
return tcm;
error:
- kfree(tcm);
return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index b4e021ea30f9..4894913936e9 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -57,6 +57,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
45NA WUXGA PANEL DSI Video Mode panel
+config DRM_PANEL_DSI_CM
+ tristate "Generic DSI command mode panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ DRM panel driver for DSI command mode panels with support for
+ embedded and external backlights.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -145,6 +154,17 @@ config DRM_PANEL_JDI_LT070ME05000
The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
24 bit per pixel.
+config DRM_PANEL_KHADAS_TS050
+ tristate "Khadas TS050 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Khadas TS050 TFT-LCD
+ panel module. The panel has a 1080x1920 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_KINGDISPLAY_KD097D04
tristate "Kingdisplay kd097d04 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index ebbf488c7eac..cae4d976c069 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
+obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
new file mode 100644
index 000000000000..af381d756ac1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic DSI Command Mode panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define DCS_GET_ID1 0xda
+#define DCS_GET_ID2 0xdb
+#define DCS_GET_ID3 0xdc
+
+#define DCS_REGULATOR_SUPPLY_NUM 2
+
+static const struct of_device_id dsicm_of_match[];
+
+struct dsic_panel_data {
+ u32 xres;
+ u32 yres;
+ u32 refresh;
+ u32 width_mm;
+ u32 height_mm;
+ u32 max_hs_rate;
+ u32 max_lp_rate;
+};
+
+struct panel_drv_data {
+ struct mipi_dsi_device *dsi;
+ struct drm_panel panel;
+ struct drm_display_mode mode;
+
+ struct mutex lock;
+
+ struct backlight_device *bldev;
+ struct backlight_device *extbldev;
+
+ unsigned long hw_guard_end; /* next value of jiffies when we can
+ * issue the next sleep in/out command
+ */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ const struct dsic_panel_data *panel_data;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM];
+
+ bool use_dsi_backlight;
+
+ /* runtime variables */
+ bool enabled;
+
+ bool intro_printed;
+};
+
+static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_drv_data, panel);
+}
+
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+ struct backlight_device *backlight;
+
+ if (ddata->bldev)
+ backlight = ddata->bldev;
+ else if (ddata->extbldev)
+ backlight = ddata->extbldev;
+ else
+ return;
+
+ if (enable) {
+ backlight->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+ backlight->props.power = FB_BLANK_UNBLANK;
+ } else {
+ backlight->props.fb_blank = FB_BLANK_NORMAL;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+ }
+
+ backlight_update_status(backlight);
+}
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+ ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+ unsigned long wait = ddata->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
+{
+ return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1);
+}
+
+static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
+{
+ return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, &param, 1);
+}
+
+static int dsicm_sleep_in(struct panel_drv_data *ddata)
+
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_sleep_out(struct panel_drv_data *ddata)
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
+{
+ int r;
+
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_set_update_window(struct panel_drv_data *ddata)
+{
+ struct mipi_dsi_device *dsi = ddata->dsi;
+ int r;
+
+ r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1);
+ if (r < 0)
+ return r;
+
+ r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_bl_update_status(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r = 0;
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ level);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops dsicm_bl_ops = {
+ .get_brightness = dsicm_bl_get_intensity,
+ .update_status = dsicm_bl_update_status,
+};
+
+static ssize_t num_dsi_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 errors = 0;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t hw_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 id1, id2, id3;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static DEVICE_ATTR_RO(num_dsi_errors);
+static DEVICE_ATTR_RO(hw_revision);
+
+static struct attribute *dsicm_attrs[] = {
+ &dev_attr_num_dsi_errors.attr,
+ &dev_attr_hw_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group dsicm_attr_group = {
+ .attrs = dsicm_attrs,
+};
+
+static void dsicm_hw_reset(struct panel_drv_data *ddata)
+{
+ gpiod_set_value(ddata->reset_gpio, 1);
+ udelay(10);
+ /* reset the panel */
+ gpiod_set_value(ddata->reset_gpio, 0);
+ /* assert reset */
+ udelay(10);
+ gpiod_set_value(ddata->reset_gpio, 1);
+ /* wait after releasing reset */
+ usleep_range(5000, 10000);
+}
+
+static int dsicm_power_on(struct panel_drv_data *ddata)
+{
+ u8 id1, id2, id3;
+ int r;
+
+ dsicm_hw_reset(ddata);
+
+ ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ r = dsicm_sleep_out(ddata);
+ if (r)
+ goto err;
+
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ (1<<2) | (1<<5)); /* BL | BCTRL */
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT);
+ if (r)
+ goto err;
+
+ r = dsicm_set_update_window(ddata);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_display_on(ddata->dsi);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+
+ /* possible panel bug */
+ msleep(100);
+
+ ddata->enabled = true;
+
+ if (!ddata->intro_printed) {
+ dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n",
+ id1, id2, id3);
+ ddata->intro_printed = true;
+ }
+
+ ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n");
+
+ dsicm_hw_reset(ddata);
+
+ return r;
+}
+
+static int dsicm_power_off(struct panel_drv_data *ddata)
+{
+ int r;
+
+ ddata->enabled = false;
+
+ r = mipi_dsi_dcs_set_display_off(ddata->dsi);
+ if (!r)
+ r = dsicm_sleep_in(ddata);
+
+ if (r) {
+ dev_err(&ddata->dsi->dev,
+ "error disabling panel, issuing HW reset\n");
+ dsicm_hw_reset(ddata);
+ }
+
+ return r;
+}
+
+static int dsicm_prepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_enable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_on(ddata);
+ if (r)
+ goto err;
+
+ mutex_unlock(&ddata->lock);
+
+ dsicm_bl_power(ddata, true);
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r);
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static int dsicm_unprepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_disable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ dsicm_bl_power(ddata, false);
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_off(ddata);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &ddata->mode);
+ if (!mode) {
+ dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n",
+ ddata->mode.hdisplay, ddata->mode.vdisplay,
+ ddata->mode.clock);
+ return -ENOMEM;
+ }
+
+ connector->display_info.width_mm = ddata->panel_data->width_mm;
+ connector->display_info.height_mm = ddata->panel_data->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs dsicm_panel_funcs = {
+ .unprepare = dsicm_unprepare,
+ .disable = dsicm_disable,
+ .prepare = dsicm_prepare,
+ .enable = dsicm_enable,
+ .get_modes = dsicm_get_modes,
+};
+
+static int dsicm_probe_of(struct mipi_dsi_device *dsi)
+{
+ struct backlight_device *backlight;
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+ int err;
+ struct drm_display_mode *mode = &ddata->mode;
+
+ ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->reset_gpio)) {
+ err = PTR_ERR(ddata->reset_gpio);
+ dev_err(&dsi->dev, "reset gpio request failed: %d", err);
+ return err;
+ }
+
+ mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal =
+ ddata->panel_data->xres;
+ mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal =
+ ddata->panel_data->yres;
+ mode->clock = ddata->panel_data->xres * ddata->panel_data->yres *
+ ddata->panel_data->refresh / 1000;
+ mode->width_mm = ddata->panel_data->width_mm;
+ mode->height_mm = ddata->panel_data->height_mm;
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+
+ ddata->supplies[0].supply = "vpnl";
+ ddata->supplies[1].supply = "vddi";
+ err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies),
+ ddata->supplies);
+ if (err)
+ return err;
+
+ backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ /* If no backlight device is found assume native backlight support */
+ if (backlight)
+ ddata->extbldev = backlight;
+ else
+ ddata->use_dsi_backlight = true;
+
+ return 0;
+}
+
+static int dsicm_probe(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata;
+ struct backlight_device *bldev = NULL;
+ struct device *dev = &dsi->dev;
+ int r;
+
+ dev_dbg(dev, "probe\n");
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ddata);
+ ddata->dsi = dsi;
+
+ ddata->panel_data = of_device_get_match_data(dev);
+ if (!ddata->panel_data)
+ return -ENODEV;
+
+ r = dsicm_probe_of(dsi);
+ if (r)
+ return r;
+
+ mutex_init(&ddata->lock);
+
+ dsicm_hw_reset(ddata);
+
+ drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (ddata->use_dsi_backlight) {
+ struct backlight_properties props = { 0 };
+ props.max_brightness = 255;
+ props.type = BACKLIGHT_RAW;
+
+ bldev = devm_backlight_device_register(dev, dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_bl;
+ }
+
+ ddata->bldev = bldev;
+ }
+
+ r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
+ if (r) {
+ dev_err(dev, "failed to create sysfs files\n");
+ goto err_bl;
+ }
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+ dsi->hs_rate = ddata->panel_data->max_hs_rate;
+ dsi->lp_rate = ddata->panel_data->max_lp_rate;
+
+ drm_panel_add(&ddata->panel);
+
+ r = mipi_dsi_attach(dsi);
+ if (r < 0)
+ goto err_dsi_attach;
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ddata->panel);
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+err_bl:
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return r;
+}
+
+static int dsicm_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+
+ dev_dbg(&dsi->dev, "remove\n");
+
+ mipi_dsi_detach(dsi);
+
+ drm_panel_remove(&ddata->panel);
+
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return 0;
+}
+
+static const struct dsic_panel_data taal_data = {
+ .xres = 864,
+ .yres = 480,
+ .refresh = 60,
+ .width_mm = 0,
+ .height_mm = 0,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data himalaya_data = {
+ .xres = 480,
+ .yres = 864,
+ .refresh = 60,
+ .width_mm = 49,
+ .height_mm = 88,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data droid4_data = {
+ .xres = 540,
+ .yres = 960,
+ .refresh = 60,
+ .width_mm = 50,
+ .height_mm = 89,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct of_device_id dsicm_of_match[] = {
+ { .compatible = "tpo,taal", .data = &taal_data },
+ { .compatible = "nokia,himalaya", &himalaya_data },
+ { .compatible = "motorola,droid4-panel", &droid4_data },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dsicm_of_match);
+
+static struct mipi_dsi_driver dsicm_driver = {
+ .probe = dsicm_probe,
+ .remove = dsicm_remove,
+ .driver = {
+ .name = "panel-dsi-cm",
+ .of_match_table = dsicm_of_match,
+ },
+};
+module_mipi_dsi_driver(dsicm_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index bc36aa3c1123..fe5ac3ef9018 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
new file mode 100644
index 000000000000..8f6ac1a40c31
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct khadas_ts050_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+struct khadas_ts050_panel_cmd {
+ u8 cmd;
+ u8 data;
+};
+
+/* Only the CMD1 User Command set is documented */
+static const struct khadas_ts050_panel_cmd init_code[] = {
+ /* Select Unknown CMD Page (Undocumented) */
+ {0xff, 0xee},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x1f, 0x45},
+ {0x24, 0x4f},
+ {0x38, 0xc8},
+ {0x39, 0x27},
+ {0x1e, 0x77},
+ {0x1d, 0x0f},
+ {0x7e, 0x71},
+ {0x7c, 0x03},
+ {0xff, 0x00},
+ {0xfb, 0x01},
+ {0x35, 0x01},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x55},
+ {0x02, 0x40},
+ {0x05, 0x40},
+ {0x06, 0x4a},
+ {0x07, 0x24},
+ {0x08, 0x0c},
+ {0x0b, 0x7d},
+ {0x0c, 0x7d},
+ {0x0e, 0xb0},
+ {0x0f, 0xae},
+ {0x11, 0x10},
+ {0x12, 0x10},
+ {0x13, 0x03},
+ {0x14, 0x4a},
+ {0x15, 0x12},
+ {0x16, 0x12},
+ {0x18, 0x00},
+ {0x19, 0x77},
+ {0x1a, 0x55},
+ {0x1b, 0x13},
+ {0x1c, 0x00},
+ {0x1d, 0x00},
+ {0x1e, 0x13},
+ {0x1f, 0x00},
+ {0x23, 0x00},
+ {0x24, 0x00},
+ {0x25, 0x00},
+ {0x26, 0x00},
+ {0x27, 0x00},
+ {0x28, 0x00},
+ {0x35, 0x00},
+ {0x66, 0x00},
+ {0x58, 0x82},
+ {0x59, 0x02},
+ {0x5a, 0x02},
+ {0x5b, 0x02},
+ {0x5c, 0x82},
+ {0x5d, 0x82},
+ {0x5e, 0x02},
+ {0x5f, 0x02},
+ {0x72, 0x31},
+ /* Select CMD2 Page4 (Undocumented) */
+ {0xff, 0x05},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x0b},
+ {0x02, 0x0c},
+ {0x03, 0x09},
+ {0x04, 0x0a},
+ {0x05, 0x00},
+ {0x06, 0x0f},
+ {0x07, 0x10},
+ {0x08, 0x00},
+ {0x09, 0x00},
+ {0x0a, 0x00},
+ {0x0b, 0x00},
+ {0x0c, 0x00},
+ {0x0d, 0x13},
+ {0x0e, 0x15},
+ {0x0f, 0x17},
+ {0x10, 0x01},
+ {0x11, 0x0b},
+ {0x12, 0x0c},
+ {0x13, 0x09},
+ {0x14, 0x0a},
+ {0x15, 0x00},
+ {0x16, 0x0f},
+ {0x17, 0x10},
+ {0x18, 0x00},
+ {0x19, 0x00},
+ {0x1a, 0x00},
+ {0x1b, 0x00},
+ {0x1c, 0x00},
+ {0x1d, 0x13},
+ {0x1e, 0x15},
+ {0x1f, 0x17},
+ {0x20, 0x00},
+ {0x21, 0x03},
+ {0x22, 0x01},
+ {0x23, 0x40},
+ {0x24, 0x40},
+ {0x25, 0xed},
+ {0x29, 0x58},
+ {0x2a, 0x12},
+ {0x2b, 0x01},
+ {0x4b, 0x06},
+ {0x4c, 0x11},
+ {0x4d, 0x20},
+ {0x4e, 0x02},
+ {0x4f, 0x02},
+ {0x50, 0x20},
+ {0x51, 0x61},
+ {0x52, 0x01},
+ {0x53, 0x63},
+ {0x54, 0x77},
+ {0x55, 0xed},
+ {0x5b, 0x00},
+ {0x5c, 0x00},
+ {0x5d, 0x00},
+ {0x5e, 0x00},
+ {0x5f, 0x15},
+ {0x60, 0x75},
+ {0x61, 0x00},
+ {0x62, 0x00},
+ {0x63, 0x00},
+ {0x64, 0x00},
+ {0x65, 0x00},
+ {0x66, 0x00},
+ {0x67, 0x00},
+ {0x68, 0x04},
+ {0x69, 0x00},
+ {0x6a, 0x00},
+ {0x6c, 0x40},
+ {0x75, 0x01},
+ {0x76, 0x01},
+ {0x7a, 0x80},
+ {0x7b, 0xa3},
+ {0x7c, 0xd8},
+ {0x7d, 0x60},
+ {0x7f, 0x15},
+ {0x80, 0x81},
+ {0x83, 0x05},
+ {0x93, 0x08},
+ {0x94, 0x10},
+ {0x8a, 0x00},
+ {0x9b, 0x0f},
+ {0xea, 0xff},
+ {0xec, 0x00},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x75, 0x00},
+ {0x76, 0xdf},
+ {0x77, 0x00},
+ {0x78, 0xe4},
+ {0x79, 0x00},
+ {0x7a, 0xed},
+ {0x7b, 0x00},
+ {0x7c, 0xf6},
+ {0x7d, 0x00},
+ {0x7e, 0xff},
+ {0x7f, 0x01},
+ {0x80, 0x07},
+ {0x81, 0x01},
+ {0x82, 0x10},
+ {0x83, 0x01},
+ {0x84, 0x18},
+ {0x85, 0x01},
+ {0x86, 0x20},
+ {0x87, 0x01},
+ {0x88, 0x3d},
+ {0x89, 0x01},
+ {0x8a, 0x56},
+ {0x8b, 0x01},
+ {0x8c, 0x84},
+ {0x8d, 0x01},
+ {0x8e, 0xab},
+ {0x8f, 0x01},
+ {0x90, 0xec},
+ {0x91, 0x02},
+ {0x92, 0x22},
+ {0x93, 0x02},
+ {0x94, 0x23},
+ {0x95, 0x02},
+ {0x96, 0x55},
+ {0x97, 0x02},
+ {0x98, 0x8b},
+ {0x99, 0x02},
+ {0x9a, 0xaf},
+ {0x9b, 0x02},
+ {0x9c, 0xdf},
+ {0x9d, 0x03},
+ {0x9e, 0x01},
+ {0x9f, 0x03},
+ {0xa0, 0x2c},
+ {0xa2, 0x03},
+ {0xa3, 0x39},
+ {0xa4, 0x03},
+ {0xa5, 0x47},
+ {0xa6, 0x03},
+ {0xa7, 0x56},
+ {0xa9, 0x03},
+ {0xaa, 0x66},
+ {0xab, 0x03},
+ {0xac, 0x76},
+ {0xad, 0x03},
+ {0xae, 0x85},
+ {0xaf, 0x03},
+ {0xb0, 0x90},
+ {0xb1, 0x03},
+ {0xb2, 0xcb},
+ {0xb3, 0x00},
+ {0xb4, 0xdf},
+ {0xb5, 0x00},
+ {0xb6, 0xe4},
+ {0xb7, 0x00},
+ {0xb8, 0xed},
+ {0xb9, 0x00},
+ {0xba, 0xf6},
+ {0xbb, 0x00},
+ {0xbc, 0xff},
+ {0xbd, 0x01},
+ {0xbe, 0x07},
+ {0xbf, 0x01},
+ {0xc0, 0x10},
+ {0xc1, 0x01},
+ {0xc2, 0x18},
+ {0xc3, 0x01},
+ {0xc4, 0x20},
+ {0xc5, 0x01},
+ {0xc6, 0x3d},
+ {0xc7, 0x01},
+ {0xc8, 0x56},
+ {0xc9, 0x01},
+ {0xca, 0x84},
+ {0xcb, 0x01},
+ {0xcc, 0xab},
+ {0xcd, 0x01},
+ {0xce, 0xec},
+ {0xcf, 0x02},
+ {0xd0, 0x22},
+ {0xd1, 0x02},
+ {0xd2, 0x23},
+ {0xd3, 0x02},
+ {0xd4, 0x55},
+ {0xd5, 0x02},
+ {0xd6, 0x8b},
+ {0xd7, 0x02},
+ {0xd8, 0xaf},
+ {0xd9, 0x02},
+ {0xda, 0xdf},
+ {0xdb, 0x03},
+ {0xdc, 0x01},
+ {0xdd, 0x03},
+ {0xde, 0x2c},
+ {0xdf, 0x03},
+ {0xe0, 0x39},
+ {0xe1, 0x03},
+ {0xe2, 0x47},
+ {0xe3, 0x03},
+ {0xe4, 0x56},
+ {0xe5, 0x03},
+ {0xe6, 0x66},
+ {0xe7, 0x03},
+ {0xe8, 0x76},
+ {0xe9, 0x03},
+ {0xea, 0x85},
+ {0xeb, 0x03},
+ {0xec, 0x90},
+ {0xed, 0x03},
+ {0xee, 0xcb},
+ {0xef, 0x00},
+ {0xf0, 0xbb},
+ {0xf1, 0x00},
+ {0xf2, 0xc0},
+ {0xf3, 0x00},
+ {0xf4, 0xcc},
+ {0xf5, 0x00},
+ {0xf6, 0xd6},
+ {0xf7, 0x00},
+ {0xf8, 0xe1},
+ {0xf9, 0x00},
+ {0xfa, 0xea},
+ /* Select CMD2 Page2 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x00},
+ {0x01, 0xf4},
+ {0x02, 0x00},
+ {0x03, 0xef},
+ {0x04, 0x01},
+ {0x05, 0x07},
+ {0x06, 0x01},
+ {0x07, 0x28},
+ {0x08, 0x01},
+ {0x09, 0x44},
+ {0x0a, 0x01},
+ {0x0b, 0x76},
+ {0x0c, 0x01},
+ {0x0d, 0xa0},
+ {0x0e, 0x01},
+ {0x0f, 0xe7},
+ {0x10, 0x02},
+ {0x11, 0x1f},
+ {0x12, 0x02},
+ {0x13, 0x22},
+ {0x14, 0x02},
+ {0x15, 0x54},
+ {0x16, 0x02},
+ {0x17, 0x8b},
+ {0x18, 0x02},
+ {0x19, 0xaf},
+ {0x1a, 0x02},
+ {0x1b, 0xe0},
+ {0x1c, 0x03},
+ {0x1d, 0x01},
+ {0x1e, 0x03},
+ {0x1f, 0x2d},
+ {0x20, 0x03},
+ {0x21, 0x39},
+ {0x22, 0x03},
+ {0x23, 0x47},
+ {0x24, 0x03},
+ {0x25, 0x57},
+ {0x26, 0x03},
+ {0x27, 0x65},
+ {0x28, 0x03},
+ {0x29, 0x77},
+ {0x2a, 0x03},
+ {0x2b, 0x85},
+ {0x2d, 0x03},
+ {0x2f, 0x8f},
+ {0x30, 0x03},
+ {0x31, 0xcb},
+ {0x32, 0x00},
+ {0x33, 0xbb},
+ {0x34, 0x00},
+ {0x35, 0xc0},
+ {0x36, 0x00},
+ {0x37, 0xcc},
+ {0x38, 0x00},
+ {0x39, 0xd6},
+ {0x3a, 0x00},
+ {0x3b, 0xe1},
+ {0x3d, 0x00},
+ {0x3f, 0xea},
+ {0x40, 0x00},
+ {0x41, 0xf4},
+ {0x42, 0x00},
+ {0x43, 0xfe},
+ {0x44, 0x01},
+ {0x45, 0x07},
+ {0x46, 0x01},
+ {0x47, 0x28},
+ {0x48, 0x01},
+ {0x49, 0x44},
+ {0x4a, 0x01},
+ {0x4b, 0x76},
+ {0x4c, 0x01},
+ {0x4d, 0xa0},
+ {0x4e, 0x01},
+ {0x4f, 0xe7},
+ {0x50, 0x02},
+ {0x51, 0x1f},
+ {0x52, 0x02},
+ {0x53, 0x22},
+ {0x54, 0x02},
+ {0x55, 0x54},
+ {0x56, 0x02},
+ {0x58, 0x8b},
+ {0x59, 0x02},
+ {0x5a, 0xaf},
+ {0x5b, 0x02},
+ {0x5c, 0xe0},
+ {0x5d, 0x03},
+ {0x5e, 0x01},
+ {0x5f, 0x03},
+ {0x60, 0x2d},
+ {0x61, 0x03},
+ {0x62, 0x39},
+ {0x63, 0x03},
+ {0x64, 0x47},
+ {0x65, 0x03},
+ {0x66, 0x57},
+ {0x67, 0x03},
+ {0x68, 0x65},
+ {0x69, 0x03},
+ {0x6a, 0x77},
+ {0x6b, 0x03},
+ {0x6c, 0x85},
+ {0x6d, 0x03},
+ {0x6e, 0x8f},
+ {0x6f, 0x03},
+ {0x70, 0xcb},
+ {0x71, 0x00},
+ {0x72, 0x00},
+ {0x73, 0x00},
+ {0x74, 0x21},
+ {0x75, 0x00},
+ {0x76, 0x4c},
+ {0x77, 0x00},
+ {0x78, 0x6b},
+ {0x79, 0x00},
+ {0x7a, 0x85},
+ {0x7b, 0x00},
+ {0x7c, 0x9a},
+ {0x7d, 0x00},
+ {0x7e, 0xad},
+ {0x7f, 0x00},
+ {0x80, 0xbe},
+ {0x81, 0x00},
+ {0x82, 0xcd},
+ {0x83, 0x01},
+ {0x84, 0x01},
+ {0x85, 0x01},
+ {0x86, 0x29},
+ {0x87, 0x01},
+ {0x88, 0x68},
+ {0x89, 0x01},
+ {0x8a, 0x98},
+ {0x8b, 0x01},
+ {0x8c, 0xe5},
+ {0x8d, 0x02},
+ {0x8e, 0x1e},
+ {0x8f, 0x02},
+ {0x90, 0x30},
+ {0x91, 0x02},
+ {0x92, 0x52},
+ {0x93, 0x02},
+ {0x94, 0x88},
+ {0x95, 0x02},
+ {0x96, 0xaa},
+ {0x97, 0x02},
+ {0x98, 0xd7},
+ {0x99, 0x02},
+ {0x9a, 0xf7},
+ {0x9b, 0x03},
+ {0x9c, 0x21},
+ {0x9d, 0x03},
+ {0x9e, 0x2e},
+ {0x9f, 0x03},
+ {0xa0, 0x3d},
+ {0xa2, 0x03},
+ {0xa3, 0x4c},
+ {0xa4, 0x03},
+ {0xa5, 0x5e},
+ {0xa6, 0x03},
+ {0xa7, 0x71},
+ {0xa9, 0x03},
+ {0xaa, 0x86},
+ {0xab, 0x03},
+ {0xac, 0x94},
+ {0xad, 0x03},
+ {0xae, 0xfa},
+ {0xaf, 0x00},
+ {0xb0, 0x00},
+ {0xb1, 0x00},
+ {0xb2, 0x21},
+ {0xb3, 0x00},
+ {0xb4, 0x4c},
+ {0xb5, 0x00},
+ {0xb6, 0x6b},
+ {0xb7, 0x00},
+ {0xb8, 0x85},
+ {0xb9, 0x00},
+ {0xba, 0x9a},
+ {0xbb, 0x00},
+ {0xbc, 0xad},
+ {0xbd, 0x00},
+ {0xbe, 0xbe},
+ {0xbf, 0x00},
+ {0xc0, 0xcd},
+ {0xc1, 0x01},
+ {0xc2, 0x01},
+ {0xc3, 0x01},
+ {0xc4, 0x29},
+ {0xc5, 0x01},
+ {0xc6, 0x68},
+ {0xc7, 0x01},
+ {0xc8, 0x98},
+ {0xc9, 0x01},
+ {0xca, 0xe5},
+ {0xcb, 0x02},
+ {0xcc, 0x1e},
+ {0xcd, 0x02},
+ {0xce, 0x20},
+ {0xcf, 0x02},
+ {0xd0, 0x52},
+ {0xd1, 0x02},
+ {0xd2, 0x88},
+ {0xd3, 0x02},
+ {0xd4, 0xaa},
+ {0xd5, 0x02},
+ {0xd6, 0xd7},
+ {0xd7, 0x02},
+ {0xd8, 0xf7},
+ {0xd9, 0x03},
+ {0xda, 0x21},
+ {0xdb, 0x03},
+ {0xdc, 0x2e},
+ {0xdd, 0x03},
+ {0xde, 0x3d},
+ {0xdf, 0x03},
+ {0xe0, 0x4c},
+ {0xe1, 0x03},
+ {0xe2, 0x5e},
+ {0xe3, 0x03},
+ {0xe4, 0x71},
+ {0xe5, 0x03},
+ {0xe6, 0x86},
+ {0xe7, 0x03},
+ {0xe8, 0x94},
+ {0xe9, 0x03},
+ {0xea, 0xfa},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page1 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page3 (Undocumented) */
+ {0xff, 0x04},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD1 */
+ {0xff, 0x00},
+ {0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */
+ {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+static inline
+struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct khadas_ts050_panel, base);
+}
+
+static int khadas_ts050_panel_prepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ unsigned int i;
+ int err;
+
+ if (khadas_ts050->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+
+ err = regulator_enable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1);
+
+ msleep(60);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0);
+
+ /* Select CMD2 page 4 (Undocumented) */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1);
+
+ /* Reload CMD1: Don't reload default value to register */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1);
+
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1);
+
+ msleep(100);
+
+ for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ err = mipi_dsi_dcs_write(khadas_ts050->link,
+ init_code[i].cmd,
+ &init_code[i].data, 1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed write cmds: %d\n", err);
+ goto poweroff;
+ }
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ /* Select CMD1 */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1);
+
+ err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set tear on: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_display_on(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ goto poweroff;
+ }
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->prepared = true;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ regulator_disable(khadas_ts050->supply);
+
+ return err;
+}
+
+static int khadas_ts050_panel_unprepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->prepared)
+ return 0;
+
+ khadas_ts050->prepared = false;
+
+ err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+ msleep(150);
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ err = regulator_disable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_enable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+
+ khadas_ts050->enabled = true;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_disable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->enabled)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->enabled = false;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 120000,
+ .hdisplay = 1088,
+ .hsync_start = 1088 + 104,
+ .hsync_end = 1088 + 104 + 4,
+ .htotal = 1088 + 104 + 4 + 127,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 3,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int khadas_ts050_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 64;
+ connector->display_info.height_mm = 118;
+ connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
+ .prepare = khadas_ts050_panel_prepare,
+ .unprepare = khadas_ts050_panel_unprepare,
+ .enable = khadas_ts050_panel_enable,
+ .disable = khadas_ts050_panel_disable,
+ .get_modes = khadas_ts050_panel_get_modes,
+};
+
+static const struct of_device_id khadas_ts050_of_match[] = {
+ { .compatible = "khadas,ts050", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
+
+static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
+{
+ struct device *dev = &khadas_ts050->link->dev;
+ int err;
+
+ khadas_ts050->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(khadas_ts050->supply))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply),
+ "failed to get power supply");
+
+ khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(khadas_ts050->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio),
+ "failed to get reset gpio");
+
+ khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(khadas_ts050->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
+ "failed to get enable gpio");
+
+ drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
+ &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&khadas_ts050->base);
+ if (err)
+ return err;
+
+ drm_panel_add(&khadas_ts050->base);
+
+ return 0;
+}
+
+static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
+ GFP_KERNEL);
+ if (!khadas_ts050)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, khadas_ts050);
+ khadas_ts050->link = dsi;
+
+ err = khadas_ts050_panel_add(khadas_ts050);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ if (err)
+ drm_panel_remove(&khadas_ts050->base);
+
+ return err;
+}
+
+static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&khadas_ts050->base);
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+
+ return 0;
+}
+
+static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+}
+
+static struct mipi_dsi_driver khadas_ts050_panel_driver = {
+ .driver = {
+ .name = "panel-khadas-ts050",
+ .of_match_table = khadas_ts050_of_match,
+ },
+ .probe = khadas_ts050_panel_probe,
+ .remove = khadas_ts050_panel_remove,
+ .shutdown = khadas_ts050_panel_shutdown,
+};
+module_mipi_dsi_driver(khadas_ts050_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Khadas TS050 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 0c5f22e95c2d..30f28ad4df6b 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
@@ -22,6 +23,7 @@
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
#define MANTIX_CMD_INT_CANCEL 0x4C
+#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
struct device *dev;
@@ -33,6 +35,8 @@ struct mantix {
struct regulator *avdd;
struct regulator *avee;
struct regulator *vddi;
+
+ const struct drm_display_mode *default_mode;
};
static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
@@ -66,6 +70,10 @@ static int mantix_init_sequence(struct mantix *ctx)
dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+ msleep(20);
+
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
@@ -182,7 +190,7 @@ static int mantix_prepare(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const struct drm_display_mode default_mode_mantix = {
.hdisplay = 720,
.hsync_start = 720 + 45,
.hsync_end = 720 + 45 + 14,
@@ -197,17 +205,32 @@ static const struct drm_display_mode default_mode = {
.height_mm = 130,
};
+static const struct drm_display_mode default_mode_ys = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 45,
+ .hsync_end = 720 + 45 + 14,
+ .htotal = 720 + 45 + 14 + 25,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 175,
+ .vsync_end = 1440 + 175 + 8,
+ .vtotal = 1440 + 175 + 8 + 50,
+ .clock = 85298,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
static int mantix_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct mantix *ctx = panel_to_mantix(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode));
return -ENOMEM;
}
@@ -238,6 +261,7 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
@@ -288,8 +312,8 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
}
dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode),
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
@@ -316,7 +340,8 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id mantix_of_match[] = {
- { .compatible = "mantix,mlaf057we51-x" },
+ { .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix },
+ { .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mantix_of_match);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6b4e97bfd46e..603c5dfe8768 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -25,6 +25,14 @@
/* Manufacturer Command Set */
#define MCS_ELVSS_ON 0xb1
#define MCS_TEMP_SWIRE 0xb2
+#define MCS_PENTILE_1 0xb3
+#define MCS_PENTILE_2 0xb4
+#define MCS_GAMMA_DELTA_Y_RED 0xb5
+#define MCS_GAMMA_DELTA_X_RED 0xb6
+#define MCS_GAMMA_DELTA_Y_GREEN 0xb7
+#define MCS_GAMMA_DELTA_X_GREEN 0xb8
+#define MCS_GAMMA_DELTA_Y_BLUE 0xb9
+#define MCS_GAMMA_DELTA_X_BLUE 0xba
#define MCS_MIECTL1 0xc0
#define MCS_BCMODE 0xc1
#define MCS_ERROR_CHECK 0xd5
@@ -281,6 +289,7 @@ struct s6e63m0 {
struct backlight_device *bl_dev;
u8 lcd_type;
u8 elvss_pulse;
+ bool dsi_mode;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
static void s6e63m0_init(struct s6e63m0 *ctx)
{
- s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
- 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
- 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ /*
+ * We do not know why there is a difference in the DSI mode.
+ * (No datasheet.)
+ *
+ * In the vendor driver this sequence is called
+ * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
+ */
+ if (ctx->dsi_mode)
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
+ 0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
+ else
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
0x00, 0x8e, 0x07);
- s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
- s6e63m0_dcs_write_seq_static(ctx, 0xb5,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb6,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb7,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb9,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xba,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
.update_status = s6e63m0_set_brightness,
};
-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
+static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
{
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
- .brightness = MAX_BRIGHTNESS,
- .max_brightness = MAX_BRIGHTNESS
+ .brightness = max_brightness,
+ .max_brightness = max_brightness,
};
struct device *dev = ctx->dev;
int ret = 0;
@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
bool dsi_mode)
{
struct s6e63m0 *ctx;
+ u32 max_brightness;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->dsi_mode = dsi_mode;
ctx->dcs_read = dcs_read;
ctx->dcs_write = dcs_write;
dev_set_drvdata(dev, ctx);
@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
ctx->enabled = false;
ctx->prepared = false;
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = MAX_BRIGHTNESS;
+ if (max_brightness > MAX_BRIGHTNESS) {
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = MAX_BRIGHTNESS;
+ }
+
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
dsi_mode ? DRM_MODE_CONNECTOR_DSI :
DRM_MODE_CONNECTOR_DPI);
- ret = s6e63m0_backlight_register(ctx);
+ ret = s6e63m0_backlight_register(ctx, max_brightness);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 41bbec72b2da..4e2dad314c79 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -39,72 +39,145 @@
#include <drm/drm_panel.h>
/**
- * struct panel_desc
- * @modes: Pointer to array of fixed modes appropriate for this panel. If
- * only one mode then this can just be the address of this the mode.
- * NOTE: cannot be used with "timings" and also if this is specified
- * then you cannot override the mode in the device tree.
- * @num_modes: Number of elements in modes array.
- * @timings: Pointer to array of display timings. NOTE: cannot be used with
- * "modes" and also these will be used to validate a device tree
- * override if one is present.
- * @num_timings: Number of elements in timings array.
- * @bpc: Bits per color.
- * @size: Structure containing the physical size of this panel.
- * @delay: Structure containing various delay values for this panel.
- * @bus_format: See MEDIA_BUS_FMT_... defines.
- * @bus_flags: See DRM_BUS_FLAG_... defines.
- * @connector_type: LVDS, eDP, DSI, DPI, etc.
+ * struct panel_desc - Describes a simple panel.
*/
struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
unsigned int num_timings;
+ /** @bpc: Bits per color. */
unsigned int bpc;
- /**
- * @width: width (in millimeters) of the panel's active display area
- * @height: height (in millimeters) of the panel's active display area
- */
+ /** @size: Structure containing the physical size of this panel. */
struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
unsigned int height;
} size;
- /**
- * @prepare: the time (in milliseconds) that it takes for the panel to
- * become ready and start receiving video data
- * @hpd_absent_delay: Add this to the prepare delay if we know Hot
- * Plug Detect isn't used.
- * @enable: the time (in milliseconds) that it takes for the panel to
- * display the first valid frame after starting to receive
- * video data
- * @disable: the time (in milliseconds) that it takes for the panel to
- * turn the display off (no content is visible)
- * @unprepare: the time (in milliseconds) that it takes for the panel
- * to power itself down completely
- */
+ /** @delay: Structure containing various delay values for this panel. */
struct {
+ /**
+ * @delay.prepare: Time for the panel to become ready.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ */
unsigned int prepare;
+
+ /**
+ * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect
+ * isn't used.
+ */
unsigned int hpd_absent_delay;
+
+ /**
+ * @delay.prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @delay.enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ */
unsigned int enable;
+
+ /**
+ * @delay.disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ */
unsigned int disable;
+
+ /**
+ * @delay.unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ */
unsigned int unprepare;
} delay;
+ /** @bus_format: See MEDIA_BUS_FMT_... defines. */
u32 bus_format;
+
+ /** @bus_flags: See DRM_BUS_FLAG_... defines. */
u32 bus_flags;
+
+ /** @connector_type: LVDS, eDP, DSI, DPI, etc. */
int connector_type;
};
struct panel_simple {
struct drm_panel base;
- bool prepared;
bool enabled;
bool no_hpd;
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
const struct panel_desc *desc;
struct regulator *supply;
@@ -232,6 +305,20 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
return num;
}
+static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
static int panel_simple_disable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
@@ -251,17 +338,15 @@ static int panel_simple_unprepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
- if (!p->prepared)
+ if (p->prepared_time == 0)
return 0;
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- if (p->desc->delay.unprepare)
- msleep(p->desc->delay.unprepare);
-
- p->prepared = false;
+ p->prepared_time = 0;
+ p->unprepared_time = ktime_get();
return 0;
}
@@ -298,9 +383,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
int err;
int hpd_asserted;
- if (p->prepared)
+ if (p->prepared_time != 0)
return 0;
+ panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
+
err = regulator_enable(p->supply);
if (err < 0) {
dev_err(panel->dev, "failed to enable supply: %d\n", err);
@@ -335,7 +422,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
}
}
- p->prepared = true;
+ p->prepared_time = ktime_get();
return 0;
}
@@ -350,6 +437,8 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
+ panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
p->enabled = true;
return 0;
@@ -516,7 +605,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
- panel->prepared = false;
+ panel->prepared_time = 0;
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
@@ -1318,6 +1407,51 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
/* Also used for boe_nv133fhm_n62 */
static const struct drm_display_mode boe_nv133fhm_n61_modes = {
.clock = 147840,
@@ -2265,6 +2399,8 @@ static const struct panel_desc innolux_n116bge = {
.width = 256,
.height = 144,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode innolux_n125hce_gn1_mode = {
@@ -4034,6 +4170,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
.compatible = "boe,nv133fhm-n61",
.data = &boe_nv133fhm_n61,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b30510b1696a..a2c303e5732c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -530,10 +530,8 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n");
mipi_dsi_set_drvdata(dsi, ctx);
@@ -545,19 +543,13 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
dsi->lanes = ctx->desc->lanes;
ctx->vcc = devm_regulator_get(dev, "vcc");
- if (IS_ERR(ctx->vcc)) {
- ret = PTR_ERR(ctx->vcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n");
+
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f44d28fad085..56b3f5935703 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -76,6 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 40e6708fbbe2..fa0a737e9dea 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -228,7 +228,7 @@ static const struct drm_driver pl111_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -320,7 +320,7 @@ dev_put:
return ret;
}
-static int pl111_amba_remove(struct amba_device *amba_dev)
+static void pl111_amba_remove(struct amba_device *amba_dev)
{
struct device *dev = &amba_dev->dev;
struct drm_device *drm = amba_get_drvdata(amba_dev);
@@ -331,8 +331,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_panel_bridge_remove(priv->bridge);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index a7bc31f6d565..06caa61b5d66 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -271,7 +271,7 @@ struct qxl_mode {
/* qxl-1 compat: fixed */
struct qxl_modes {
uint32_t n_modes;
- struct qxl_mode modes[0];
+ struct qxl_mode modes[];
};
/* qxl-1 compat: append only */
@@ -382,12 +382,12 @@ struct qxl_data_chunk {
uint32_t data_size;
QXLPHYSICAL prev_chunk;
QXLPHYSICAL next_chunk;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_message {
union qxl_release_info release_info;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_compat_update_cmd {
@@ -469,7 +469,7 @@ struct qxl_raster_glyph {
struct qxl_point glyph_origin;
uint16_t width;
uint16_t height;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_string {
@@ -768,7 +768,7 @@ enum {
struct qxl_path_seg {
uint32_t flags;
uint32_t count;
- struct qxl_point_fix points[0];
+ struct qxl_point_fix points[];
};
struct qxl_path {
@@ -819,7 +819,7 @@ struct qxl_image_descriptor {
struct qxl_palette {
uint64_t unique;
uint16_t num_ents;
- uint32_t ents[0];
+ uint32_t ents[];
};
struct qxl_bitmap {
@@ -838,7 +838,7 @@ struct qxl_surface_id {
struct qxl_encoder_data {
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_image {
@@ -868,7 +868,7 @@ struct qxl_monitors_config {
uint16_t count;
uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
driver */
- struct qxl_head heads[0];
+ struct qxl_head heads[];
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e7f16f4cec7..1864467f1063 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -141,7 +141,7 @@ static void qxl_drm_release(struct drm_device *dev)
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
- * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * reordering qxl_modeset_fini() + qxl_device_fini() calls is
* non-trivial though.
*/
qxl_modeset_fini(qdev);
@@ -163,7 +163,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct qxl_device *qdev = to_qxl(dev);
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8bd0f916dfbc..83b54f0dad61 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 16e1e589508e..b6075f452b9e 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -370,13 +370,14 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
- if (dev->pdev->revision < 4)
+ if (pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 1ba5a702d763..ddf6588a2a38 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -81,6 +81,7 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work)
int qxl_irq_init(struct qxl_device *qdev)
{
+ struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev);
int ret;
init_waitqueue_head(&qdev->display_event);
@@ -93,7 +94,7 @@ int qxl_irq_init(struct qxl_device *qdev)
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
- ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
+ ret = drm_irq_install(&qdev->ddev, pdev->irq);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 228e2b9198f1..4a60a52ab62e 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -111,7 +111,6 @@ int qxl_device_init(struct qxl_device *qdev,
{
int r, sb;
- qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
mutex_init(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ebf24c9d2bf2..e60a8f88e226 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e75e364655b8..0fcfc952d5e9 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7dd0c69baa47..33c09dc94f8b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -31,7 +31,6 @@
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6ac71755c22d..cdeb1db87222 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file r128_ioc32.c
*
* 32-bit ioctl compatibility routines for the R128 DRM.
@@ -170,13 +170,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * r128_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 683de198e18d..0fce73b9a646 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2062,9 +2062,9 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((rdev->pdev->device == 0x71C5) &&
+ (rdev->pdev->subsystem_vendor == 0x106b) &&
+ (rdev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aef4efc692b1..2955bb32d5ad 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2612,7 +2612,6 @@ int r100_asic_reset(struct radeon_device *rdev, bool hard)
void r100_set_common_regs(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
u32 tmp;
@@ -2630,7 +2629,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
* don't report it in the bios connector
* table.
*/
- switch (dev->pdev->device) {
+ switch (rdev->pdev->device) {
/* RN50 */
case 0x515e:
case 0x5969:
@@ -2640,17 +2639,17 @@ void r100_set_common_regs(struct radeon_device *rdev)
case 0x5159:
case 0x515a:
/* DELL triple head servers */
- if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
- ((dev->pdev->subsystem_device == 0x016c) ||
- (dev->pdev->subsystem_device == 0x016d) ||
- (dev->pdev->subsystem_device == 0x016e) ||
- (dev->pdev->subsystem_device == 0x016f) ||
- (dev->pdev->subsystem_device == 0x0170) ||
- (dev->pdev->subsystem_device == 0x017d) ||
- (dev->pdev->subsystem_device == 0x017e) ||
- (dev->pdev->subsystem_device == 0x0183) ||
- (dev->pdev->subsystem_device == 0x018a) ||
- (dev->pdev->subsystem_device == 0x019a)))
+ if ((rdev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((rdev->pdev->subsystem_device == 0x016c) ||
+ (rdev->pdev->subsystem_device == 0x016d) ||
+ (rdev->pdev->subsystem_device == 0x016e) ||
+ (rdev->pdev->subsystem_device == 0x016f) ||
+ (rdev->pdev->subsystem_device == 0x0170) ||
+ (rdev->pdev->subsystem_device == 0x017d) ||
+ (rdev->pdev->subsystem_device == 0x017e) ||
+ (rdev->pdev->subsystem_device == 0x0183) ||
+ (rdev->pdev->subsystem_device == 0x018a) ||
+ (rdev->pdev->subsystem_device == 0x019a)))
force_dac2 = true;
break;
}
@@ -2798,7 +2797,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
if (rdev->mc.aper_size > config_aper_size)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index dc68e538d5a9..34b7c6f16479 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -220,7 +220,7 @@ int r600_fmt_get_nblocksx(u32 format, u32 w)
if (bw == 0)
return 0;
- return (w + bw - 1) / bw;
+ return DIV_ROUND_UP(w, bw);
}
int r600_fmt_get_nblocksy(u32 format, u32 h)
@@ -234,7 +234,7 @@ int r600_fmt_get_nblocksy(u32 format, u32 h)
if (bh == 0)
return 0;
- return (h + bh - 1) / bh;
+ return DIV_ROUND_UP(h, bh);
}
struct array_mode_checker {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5f3adba43e47..f09989bdce98 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,7 +75,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drm_gem.h>
@@ -2314,6 +2313,9 @@ struct radeon_device {
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
@@ -2623,14 +2625,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
-#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
- (rdev->ddev->pdev->device == 0x9443) || \
- (rdev->ddev->pdev->device == 0x944B) || \
- (rdev->ddev->pdev->device == 0x9506) || \
- (rdev->ddev->pdev->device == 0x9509) || \
- (rdev->ddev->pdev->device == 0x950F) || \
- (rdev->ddev->pdev->device == 0x689C) || \
- (rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_X2(rdev) ((rdev->pdev->device == 0x9441) || \
+ (rdev->pdev->device == 0x9443) || \
+ (rdev->pdev->device == 0x944B) || \
+ (rdev->pdev->device == 0x9506) || \
+ (rdev->pdev->device == 0x9509) || \
+ (rdev->pdev->device == 0x950F) || \
+ (rdev->pdev->device == 0x689C) || \
+ (rdev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -2653,14 +2655,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
(rdev->family == CHIP_MULLINS))
-#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
- (rdev->ddev->pdev->device == 0x6850) || \
- (rdev->ddev->pdev->device == 0x6858) || \
- (rdev->ddev->pdev->device == 0x6859) || \
- (rdev->ddev->pdev->device == 0x6840) || \
- (rdev->ddev->pdev->device == 0x6841) || \
- (rdev->ddev->pdev->device == 0x6842) || \
- (rdev->ddev->pdev->device == 0x6843))
+#define ASIC_IS_LOMBOK(rdev) ((rdev->pdev->device == 0x6849) || \
+ (rdev->pdev->device == 0x6850) || \
+ (rdev->pdev->device == 0x6858) || \
+ (rdev->pdev->device == 0x6859) || \
+ (rdev->pdev->device == 0x6840) || \
+ (rdev->pdev->device == 0x6841) || \
+ (rdev->pdev->device == 0x6842) || \
+ (rdev->pdev->device == 0x6843))
/*
* BIOS helpers.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8becbe09af2f..bfacf8fe5cc1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2478,6 +2478,9 @@ int radeon_asic_init(struct radeon_device *rdev)
if (rdev->family == CHIP_HAINAN) {
rdev->has_uvd = false;
rdev->has_vce = false;
+ } else if (rdev->family == CHIP_OLAND) {
+ rdev->has_uvd = true;
+ rdev->has_vce = false;
} else {
rdev->has_uvd = true;
rdev->has_vce = true;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index be96d9b64e43..42301b4e56f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -284,46 +284,47 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x791e) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x826d)) {
+ if ((pdev->device == 0x791e) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x826d)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* Asrock RS600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x1849) &&
- (dev->pdev->subsystem_device == 0x7941)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x1849) &&
+ (pdev->subsystem_device == 0x7941)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
- if ((dev->pdev->device == 0x796e) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x7302)) {
+ if ((pdev->device == 0x796e) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x7302)) {
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
return false;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x147b) &&
- (dev->pdev->subsystem_device == 0x2412)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x147b) &&
+ (pdev->subsystem_device == 0x2412)) {
if (*connector_type == DRM_MODE_CONNECTOR_DVII)
return false;
}
/* Falcon NW laptop lists vga ddc line for LVDS */
- if ((dev->pdev->device == 0x5653) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x0291)) {
+ if ((pdev->device == 0x5653) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x0291)) {
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
i2c_bus->valid = false;
*line_mux = 53;
@@ -331,26 +332,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* HIS X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7146) &&
- (dev->pdev->subsystem_vendor == 0x17af) &&
- (dev->pdev->subsystem_device == 0x2058)) {
+ if ((pdev->device == 0x7146) &&
+ (pdev->subsystem_vendor == 0x17af) &&
+ (pdev->subsystem_device == 0x2058)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7142) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x2134)) {
+ if ((pdev->device == 0x7142) &&
+ (pdev->subsystem_vendor == 0x1458) &&
+ (pdev->subsystem_device == 0x2134)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((pdev->device == 0x71C5) &&
+ (pdev->subsystem_vendor == 0x106b) &&
+ (pdev->subsystem_device == 0x0080)) {
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
@@ -366,27 +367,27 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01da)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01da)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e4)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e4)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3450 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x95C5) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e2)) {
+ if ((pdev->device == 0x95C5) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e2)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
@@ -411,9 +412,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
* with different crtcs which isn't possible on the hardware
* side and leaves no crtcs for LVDS or VGA.
*/
- if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
- (dev->pdev->subsystem_vendor == 0x1025) &&
- (dev->pdev->subsystem_device == 0x013c)) {
+ if (((pdev->device == 0x95c4) || (pdev->device == 0x9591)) &&
+ (pdev->subsystem_vendor == 0x1025) &&
+ (pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
/* actually it's a DVI-D port not DVI-I */
@@ -425,9 +426,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
/* XFX Pine Group device rv730 reports no VGA DDC lines
* even though they are wired up to record 0x93
*/
- if ((dev->pdev->device == 0x9498) &&
- (dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452) &&
+ if ((pdev->device == 0x9498) &&
+ (pdev->subsystem_vendor == 0x1682) &&
+ (pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
@@ -435,11 +436,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) ||
- (dev->pdev->device == 0x9805) ||
- (dev->pdev->device == 0x9806)) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
+ if (((pdev->device == 0x9802) ||
+ (pdev->device == 0x9805) ||
+ (pdev->device == 0x9806)) &&
+ (pdev->subsystem_vendor == 0x1734) &&
+ (pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
*line_mux = 0x3103;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index bb29cf02974d..33121655d50b 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -205,7 +205,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -218,7 +218,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -528,7 +528,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -565,7 +565,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -583,7 +583,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index ff2135059c07..783a6b8802d5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -894,13 +894,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
/* quirks */
/* Radeon 7000 (RV100) */
- if (((dev->pdev->device == 0x5159) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7c28)) ||
+ if (((rdev->pdev->device == 0x5159) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- ((dev->pdev->device == 0x514D) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149))) {
+ ((rdev->pdev->device == 0x514D) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
@@ -2221,20 +2221,21 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
+ struct radeon_device *rdev = dev->dev_private;
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
- if (dev->pdev->device == 0x515e &&
- dev->pdev->subsystem_vendor == 0x1014) {
+ if (rdev->pdev->device == 0x515e &&
+ rdev->pdev->subsystem_vendor == 0x1014) {
if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
return false;
}
/* X300 card with extra non-existent DVI port */
- if (dev->pdev->device == 0x5B60 &&
- dev->pdev->subsystem_vendor == 0x17af &&
- dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+ if (rdev->pdev->device == 0x5B60 &&
+ rdev->pdev->subsystem_vendor == 0x17af &&
+ rdev->pdev->subsystem_device == 0x201e && bios_index == 2) {
if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
return false;
}
@@ -2244,22 +2245,24 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
/* Acer 5102 has non-existent TV port */
- if (dev->pdev->device == 0x5975 &&
- dev->pdev->subsystem_vendor == 0x1025 &&
- dev->pdev->subsystem_device == 0x009f)
+ if (rdev->pdev->device == 0x5975 &&
+ rdev->pdev->subsystem_vendor == 0x1025 &&
+ rdev->pdev->subsystem_device == 0x009f)
return false;
/* HP dc5750 has non-existent TV port */
- if (dev->pdev->device == 0x5974 &&
- dev->pdev->subsystem_vendor == 0x103c &&
- dev->pdev->subsystem_device == 0x280a)
+ if (rdev->pdev->device == 0x5974 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
return false;
/* MSI S270 has non-existent TV port */
- if (dev->pdev->device == 0x5955 &&
- dev->pdev->subsystem_vendor == 0x1462 &&
- dev->pdev->subsystem_device == 0x0131)
+ if (rdev->pdev->device == 0x5955 &&
+ rdev->pdev->subsystem_vendor == 0x1462 &&
+ rdev->pdev->subsystem_device == 0x0131)
return false;
return true;
@@ -2413,9 +2416,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
/* RV100 board with external TDMS bit mis-set.
* Actually uses internal TMDS, clear the bit.
*/
- if (dev->pdev->device == 0x5159 &&
- dev->pdev->subsystem_vendor == 0x1014 &&
- dev->pdev->subsystem_device == 0x029A) {
+ if (rdev->pdev->device == 0x5159 &&
+ rdev->pdev->subsystem_vendor == 0x1014 &&
+ rdev->pdev->subsystem_device == 0x029A) {
tmp &= ~(1 << 4);
}
if ((tmp >> 4) & 0x1) {
@@ -2707,9 +2710,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
/* boards with a thermal chip, but no overdrive table */
/* Asus 9600xt has an f75375 on the monid bus */
- if ((dev->pdev->device == 0x4152) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0xc002)) {
+ if ((rdev->pdev->device == 0x4152) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0xc002)) {
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c6262fce7440..35e937d39b51 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,8 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* IGP chips to avoid image corruptions
*/
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
- PCI_CAP_ID_AGP) ||
+ (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
@@ -401,7 +400,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+ return (int)la->robj->tbo.mem.num_pages -
+ (int)lb->robj->tbo.mem.num_pages;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ebccaa5b2d0e..2cbf14fc6ece 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1562,6 +1562,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze)
{
struct radeon_device *rdev;
+ struct pci_dev *pdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
@@ -1571,6 +1572,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
rdev = dev->dev_private;
+ pdev = to_pci_dev(dev->dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1636,14 +1638,14 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_agp_suspend(rdev);
- pci_save_state(dev->pdev);
+ pci_save_state(pdev);
if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
- pci_restore_state(dev->pdev);
+ pci_restore_state(pdev);
} else if (suspend) {
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
}
if (fbcon) {
@@ -1665,6 +1667,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_crtc *crtc;
int r;
@@ -1675,9 +1678,9 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
console_lock();
}
if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
if (fbcon)
console_unlock();
return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3a6fedad002d..652af7a134bd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1317,7 +1317,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ dev_err(dev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e45d7344ac2b..efeb115ae70e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -342,14 +342,9 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free;
- dev->pdev = pdev;
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
pci_set_drvdata(pdev, dev);
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fc4212633bdf..0b206b052972 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+ vga_switcheroo_client_fb_set(rdev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b6b21d2e7262..941826923247 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -651,7 +651,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (args->offset < RADEON_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
+ dev_err(dev->dev,
"offset 0x%lX is in reserved area 0x%X\n",
(unsigned long)args->offset,
RADEON_VA_RESERVED_SIZE);
@@ -665,7 +665,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
*/
invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
if ((args->flags & invalid_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
args->flags, invalid_flags);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
@@ -676,7 +676,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
case RADEON_VA_UNMAP:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_err(dev->dev, "unsupported operation %d\n",
args->operation);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e543d993f73e..314d066e68e9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -919,7 +919,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index b8b7f627f0a9..84d0b1a3355f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -314,7 +314,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
- r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
+ r = drm_irq_install(rdev->ddev, rdev->pdev->irq);
if (r) {
rdev->irq.installed = false;
flush_delayed_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 50cee4880bb4..2479d6ab7a36 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -76,7 +76,7 @@ void radeon_driver_unload_kms(struct drm_device *dev)
}
radeon_acpi_fini(rdev);
-
+
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -105,6 +105,7 @@ done_free:
*/
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct radeon_device *rdev;
int r, acpi_status;
@@ -114,10 +115,14 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
+#ifdef __alpha__
+ rdev->hose = pdev->sysdata;
+#endif
+
/* update BUS flag */
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
flags |= RADEON_IS_AGP;
- } else if (pci_is_pcie(dev->pdev)) {
+ } else if (pci_is_pcie(pdev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -126,7 +131,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
@@ -135,9 +140,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ r = radeon_device_init(rdev, dev, pdev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -147,7 +152,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_modeset_init(rdev);
if (r)
- dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+ dev_err(dev->dev, "Fatal error during modeset init\n");
/* Call ACPI methods: require modeset init
* but failure is not fatal
@@ -155,8 +160,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = radeon_acpi_init(rdev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
}
if (radeon_is_px(dev)) {
@@ -239,7 +243,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pdev->device;
+ *value = to_pci_dev(dev->dev)->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index e64fd0ce6707..7fdb77d48d6a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -974,9 +974,9 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/* XXX: these are oem specific */
if (ASIC_IS_R300(rdev)) {
- if ((dev->pdev->device == 0x4850) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+ if ((rdev->pdev->device == 0x4850) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
else
fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bc5ad1d6585..9b81786782de 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -53,20 +53,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
struct radeon_device *rdev = bo->rdev;
- u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
switch (mem_type) {
case TTM_PL_TT:
if (sign > 0)
- atomic64_add(size, &rdev->gtt_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
else
- atomic64_sub(size, &rdev->gtt_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
break;
case TTM_PL_VRAM:
if (sign > 0)
- atomic64_add(size, &rdev->vram_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
else
- atomic64_sub(size, &rdev->vram_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
break;
}
}
@@ -255,7 +254,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -609,7 +608,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.start << PAGE_SHIFT,
- bo->tbo.num_pages << PAGE_SHIFT);
+ bo->tbo.base.size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d606e9a935e3..9896d8231fe5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
}
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index dd482edc819c..ab29eb9e8667 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -35,9 +35,9 @@
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int npages = bo->tbo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c93f3ab3c4e3..1729cb9a95c5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..e8c66d10478f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -46,7 +46,6 @@
#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "radeon_reg.h"
@@ -276,7 +275,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+ atomic64_add(bo->base.size, &rdev->num_bytes_moved);
radeon_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -325,7 +324,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
* access, as done in ttm_bo_vm_fault().
*/
mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
- rdev->ddev->hose->dense_mem_base;
+ rdev->hose->dense_mem_base;
#endif
break;
default:
@@ -396,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (r)
goto release_sg;
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -535,7 +534,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
else
caching = ttm_cached;
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
@@ -573,8 +572,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
}
if (slave && ttm->sg) {
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
@@ -730,9 +729,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 39c1c339be7b..dfa9fdbe98da 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -781,7 +781,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -791,19 +791,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD create msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000000);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
- msg[4] = cpu_to_le32(0x00000000);
- msg[5] = cpu_to_le32(0x00000000);
- msg[6] = cpu_to_le32(0x00000000);
- msg[7] = cpu_to_le32(0x00000780);
- msg[8] = cpu_to_le32(0x00000440);
- msg[9] = cpu_to_le32(0x00000000);
- msg[10] = cpu_to_le32(0x01b37000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(0x0, (void __iomem *)&msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
+ writel(0x0, &msg[4]);
+ writel(0x0, &msg[5]);
+ writel(0x0, &msg[6]);
+ writel(cpu_to_le32(0x00000780), &msg[7]);
+ writel(cpu_to_le32(0x00000440), &msg[8]);
+ writel(0x0, &msg[9]);
+ writel(cpu_to_le32(0x01b37000), &msg[10]);
for (i = 11; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
@@ -817,7 +817,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -827,12 +827,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD destroy msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000002);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(cpu_to_le32(0x00000002), &msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
for (i = 4; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index a450497368b2..511a942e851d 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -68,7 +68,6 @@ int radeon_vce_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- case CHIP_OLAND:
case CHIP_ARUBA:
fw_name = FIRMWARE_TAHITI;
break;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index c296f94f9700..7bc302a89232 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -187,7 +187,7 @@ static void rs690_mc_init(struct radeon_device *rdev)
/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
* memory is present.
*/
- if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ if (!rdev->mc.igp_sideport_enabled && radeon_fastfb == 1) {
DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
(unsigned long long)rdev->mc.aper_base, k8_addr);
rdev->mc.aper_base = (resource_size_t)k8_addr;
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 17390074277a..24ad12409120 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -223,16 +223,15 @@ static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
static void rs780_voltage_scaling_init(struct radeon_device *rdev)
{
struct igp_power_info *pi = rs780_get_pi(rdev);
- struct drm_device *dev = rdev->ddev;
u32 fv_throt_pwm_fb_div_range[3];
u32 fv_throt_pwm_range[4];
- if (dev->pdev->device == 0x9614) {
+ if (rdev->pdev->device == 0x9614) {
fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
- } else if ((dev->pdev->device == 0x9714) ||
- (dev->pdev->device == 0x9715)) {
+ } else if ((rdev->pdev->device == 0x9714) ||
+ (rdev->pdev->device == 0x9715)) {
fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 70c5da2141d7..bdfbcf14b864 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -169,7 +169,6 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
chip_id = 0x01000015;
break;
case CHIP_PITCAIRN:
- case CHIP_OLAND:
chip_id = 0x01000016;
break;
case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
index c578095b09a5..382d53f8a22e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
{
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index b5fb941e0f53..ea7e39d03545 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
*/
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- struct drm_bridge *bridge;
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_enable(bridge, mode->clock * 1000);
}
@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
- struct drm_bridge *bridge;
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_disable(bridge);
}
@@ -1144,7 +1138,6 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/* -----------------------------------------------------------------------------
@@ -1257,7 +1250,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
else
primary = &rgrp->planes[swindex % 2].plane;
- ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
+ ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
rcdu->info->gen <= 2 ?
&crtc_funcs_gen2 : &crtc_funcs_gen3,
NULL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 600056dff374..bfbff90588cb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -18,10 +18,11 @@
#include <linux/wait.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
@@ -527,14 +528,14 @@ static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_suspend(rcdu->ddev);
+ return drm_mode_config_helper_suspend(&rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_resume(rcdu->ddev);
+ return drm_mode_config_helper_resume(&rcdu->ddev);
}
#endif
@@ -549,7 +550,7 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
static int rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
- struct drm_device *ddev = rcdu->ddev;
+ struct drm_device *ddev = &rcdu->ddev;
drm_dev_unregister(ddev);
@@ -563,14 +564,14 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
- struct drm_device *ddev;
struct resource *mem;
int ret;
/* Allocate and initialize the R-Car device structure. */
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL)
- return -ENOMEM;
+ rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver,
+ struct rcar_du_device, ddev);
+ if (IS_ERR(rcdu))
+ return PTR_ERR(rcdu);
rcdu->dev = &pdev->dev;
rcdu->info = of_device_get_match_data(rcdu->dev);
@@ -584,13 +585,6 @@ static int rcar_du_probe(struct platform_device *pdev)
return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
@@ -599,25 +593,24 @@ static int rcar_du_probe(struct platform_device *pdev)
goto error;
}
- ddev->irq_enabled = 1;
+ rcdu->ddev.irq_enabled = 1;
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
- ret = drm_dev_register(ddev, 0);
+ ret = drm_dev_register(&rcdu->ddev, 0);
if (ret)
goto error;
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
- drm_fbdev_generic_setup(ddev, 32);
+ drm_fbdev_generic_setup(&rcdu->ddev, 32);
return 0;
error:
- rcar_du_remove(pdev);
-
+ drm_kms_helper_poll_fini(&rcdu->ddev);
return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 61504c54e2ec..02ca2d0e1b55 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
#include <linux/wait.h>
+#include <drm/drm_device.h>
+
#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -20,10 +22,9 @@
struct clk;
struct device;
-struct drm_device;
+struct drm_bridge;
struct drm_property;
struct rcar_du_device;
-struct rcar_du_encoder;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
@@ -71,6 +72,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_CRTCS 4
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
+#define RCAR_DU_MAX_LVDS 2
struct rcar_du_device {
struct device *dev;
@@ -78,16 +80,15 @@ struct rcar_du_device {
void __iomem *mmio;
- struct drm_device *ddev;
+ struct drm_device ddev;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
- struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
-
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
+ struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
struct {
struct drm_property *colorkey;
@@ -98,6 +99,11 @@ struct rcar_du_device {
unsigned int vspd1_sink;
};
+static inline struct rcar_du_device *to_rcar_du_device(struct drm_device *dev)
+{
+ return container_of(dev, struct rcar_du_device, ddev);
+}
+
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
unsigned int feature)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b0335da0c161..ba8c6038cd63 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -8,12 +8,13 @@
*/
#include <linux/export.h>
+#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
-#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -44,26 +45,25 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
return num_ports;
}
+static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
+};
+
+static void rcar_du_encoder_release(struct drm_device *dev, void *res)
+{
+ struct rcar_du_encoder *renc = res;
+
+ drm_encoder_cleanup(&renc->base);
+ kfree(renc);
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
int ret;
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- rcdu->encoders[output] = renc;
- renc->output = output;
- encoder = rcar_encoder_to_drm_encoder(renc);
-
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
-
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
* DT node has a single port, assume that it describes a panel and
@@ -74,57 +74,57 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
rcar_du_encoder_count_ports(enc_node) == 1) {
struct drm_panel *panel = of_drm_find_panel(enc_node);
- if (IS_ERR(panel)) {
- ret = PTR_ERR(panel);
- goto done;
- }
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge)) {
- ret = PTR_ERR(bridge);
- goto done;
- }
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
} else {
bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ if (output == RCAR_DU_OUTPUT_LVDS0 ||
+ output == RCAR_DU_OUTPUT_LVDS1)
+ rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
}
/*
- * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a
- * companion for LVDS0 in dual-link mode.
+ * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+ * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
+ * mode.
*/
if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
- if (rcar_lvds_dual_link(bridge)) {
- ret = -ENOLINK;
- goto done;
- }
+ if (rcar_lvds_dual_link(bridge))
+ return -ENOLINK;
}
- ret = drm_simple_encoder_init(rcdu->ddev, encoder,
- DRM_MODE_ENCODER_NONE);
- if (ret < 0)
- goto done;
+ renc = kzalloc(sizeof(*renc), GFP_KERNEL);
+ if (renc == NULL)
+ return -ENOMEM;
- /*
- * Attach the bridge to the encoder. The bridge will create the
- * connector.
- */
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
+ renc->output = output;
+
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+ enc_node, output);
-done:
+ ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret < 0) {
- if (encoder->name)
- encoder->funcs->destroy(encoder);
- devm_kfree(rcdu->dev, renc);
+ kfree(renc);
+ return ret;
}
- return ret;
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
+ renc);
+ if (ret)
+ return ret;
+
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ return drm_bridge_attach(&renc->base, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index df9be4524301..73560563fb31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -22,8 +22,6 @@ struct rcar_du_encoder {
#define to_rcar_encoder(e) \
container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 72dda446355f..fdb8a0d127ad 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -327,7 +328,7 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
@@ -349,7 +350,7 @@ static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
const struct rcar_du_format_info *format;
unsigned int chroma_pitch;
unsigned int max_pitch;
@@ -421,7 +422,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
static int rcar_du_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
int ret;
ret = drm_atomic_helper_check(dev, state);
@@ -437,7 +438,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned int i;
@@ -583,7 +584,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
* or enable source color keying (1).
*/
rcdu->props.colorkey =
- drm_property_create_range(rcdu->ddev, 0, "colorkey",
+ drm_property_create_range(&rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
@@ -700,10 +701,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
int ret;
cmm = of_parse_phandle(np, "renesas,cmms", i);
- if (IS_ERR(cmm)) {
+ if (!cmm) {
dev_err(rcdu->dev,
"Failed to parse 'renesas,cmms' property\n");
- return PTR_ERR(cmm);
+ return -EINVAL;
}
if (!of_device_is_available(cmm)) {
@@ -713,10 +714,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
}
pdev = of_find_device_by_node(cmm);
- if (IS_ERR(pdev)) {
+ if (!pdev) {
dev_err(rcdu->dev, "No device found for CMM%u\n", i);
of_node_put(cmm);
- return PTR_ERR(pdev);
+ return -EINVAL;
}
of_node_put(cmm);
@@ -726,8 +727,12 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
* disabled: return 0 and let the DU continue probing.
*/
ret = rcar_cmm_init(pdev);
- if (ret)
+ if (ret) {
+ platform_device_put(pdev);
return ret == -ENODEV ? 0 : ret;
+ }
+
+ rcdu->cmms[i] = pdev;
/*
* Enforce suspend/resume ordering by making the CMM a provider
@@ -739,20 +744,27 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
"Failed to create device link to CMM%u\n", i);
return -EINVAL;
}
-
- rcdu->cmms[i] = pdev;
}
return 0;
}
+static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res)
+{
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i)
+ platform_device_put(rcdu->cmms[i]);
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
DU0_REG_OFFSET, DU2_REG_OFFSET
};
- struct drm_device *dev = rcdu->ddev;
+ struct drm_device *dev = &rcdu->ddev;
struct drm_encoder *encoder;
unsigned int dpad0_sources;
unsigned int num_encoders;
@@ -766,6 +778,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret)
return ret;
+ ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL);
+ if (ret)
+ return ret;
+
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a0021fc25b27..02e5f11f38eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -128,7 +128,7 @@ static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
int rcar_du_atomic_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
bool needs_realloc = false;
@@ -773,9 +773,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
plane->group = rgrp;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats),
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_plane_funcs,
+ formats, ARRAY_SIZE(formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f6a69aa116e6..53221d8473c1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/vsp1.h>
@@ -344,6 +345,15 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
{
struct rcar_du_vsp *vsp = res;
+ unsigned int i;
+
+ for (i = 0; i < vsp->num_planes; ++i) {
+ struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+ drm_plane_cleanup(&plane->plane);
+ }
+
+ kfree(vsp->planes);
put_device(vsp->vsp);
}
@@ -354,6 +364,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
unsigned int num_crtcs = hweight32(crtcs);
+ unsigned int num_planes;
unsigned int i;
int ret;
@@ -364,7 +375,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
vsp->vsp = &pdev->dev;
- ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp);
if (ret < 0)
return ret;
@@ -376,14 +387,13 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
* 4 RPFs.
*/
- vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+ num_planes = rcdu->info->gen >= 3 ? 5 : 4;
- vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
- sizeof(*vsp->planes), GFP_KERNEL);
+ vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL);
if (!vsp->planes)
return -ENOMEM;
- for (i = 0; i < vsp->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
enum drm_plane_type type = i < num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
@@ -392,8 +402,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
plane->vsp = vsp;
plane->index = i;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_vsp_plane_funcs,
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_vsp_plane_funcs,
rcar_du_vsp_formats,
ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
@@ -409,8 +419,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
} else {
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ num_planes - 1);
}
+
+ vsp->num_planes++;
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 04efa78d70b6..c79d1259e49b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -204,7 +204,7 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
- return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ return drm_writeback_connector_init(&rcdu->ddev, wb_conn,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 310aa1546893..cb25c0e8fc9b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -49,7 +49,7 @@ config ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY
help
This selects support for Rockchip SoC specific extensions
- for the Synopsys DesignWare HDMI driver. If you want to
+ for the Synopsys DesignWare dsi driver. If you want to
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index e84325e56d98..24a71091759c 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1089,7 +1089,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
- DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 23de359a1dec..830bdd5e9b7c 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -202,7 +202,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->vpll_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vpll clock\n");
return PTR_ERR(hdmi->vpll_clk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d1e05482641b..8d15cabdcb02 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1643,7 +1643,6 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 4a2099cb582e..857d97cdc67c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,9 +17,20 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+/* AFBC supports a number of configurable modes. Relevant to us is block size
+ * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
+ * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
+ * could be enabled via the hreg_block_split register, but is not currently
+ * handled. The colourspace transform is implicitly always assumed by the
+ * decoder, so consumers must use this transform as well.
+ *
+ * Failure to match modifiers will cause errors displaying AFBC buffers
+ * produced by conformant AFBC producers, including Mesa.
+ */
#define ROCKCHIP_AFBC_MOD \
DRM_FORMAT_MOD_ARM_AFBC( \
AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ | AFBC_FORMAT_MOD_YTR \
)
enum vop_data_format {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b498d474ef9e..92637b70c9bf 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct drm_gpu_scheduler *sched = s_fence->sched;
+
+ atomic_dec(&sched->hw_rq_count);
+ atomic_dec(&sched->score);
+
+ trace_drm_sched_process_job(s_fence);
+
+ dma_fence_get(&s_fence->finished);
+ drm_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+ drm_sched_job_done(s_job);
+}
+
+/**
* drm_sched_dependency_optimized
*
* @fence: the dependency fence
@@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_empty(&sched->ring_mirror_list))
+ !list_empty(&sched->pending_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
@@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
{
spin_lock(&sched->job_list_lock);
- if (list_empty(&sched->ring_mirror_list))
+ if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job) {
/*
@@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe.
*/
- list_del_init(&job->node);
+ list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
@@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest
* job extracted.
*/
- list_add(&bad->node, &sched->ring_mirror_list);
+ list_add(&bad->list, &sched->pending_list);
/*
* Iterate the job list from later to earlier one and either deactive
- * their HW callbacks or remove them from mirror list if they already
+ * their HW callbacks or remove them from pending list if they already
* signaled.
* This iteration is thread safe as sched thread is stopped.
*/
- list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+ list) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
atomic_dec(&sched->hw_rq_count);
} else {
/*
- * remove job from ring_mirror_list.
+ * remove job from pending_list.
* Locking here is for concurrent resume timeout
*/
spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock);
/*
@@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
@@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &s_job->cb);
+ drm_sched_job_done(s_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
} else
- drm_sched_process_job(NULL, &s_job->cb);
+ drm_sched_job_done(s_job);
}
if (full_recovery) {
@@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
*
* @sched: scheduler instance
*
@@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false;
struct dma_fence *fence;
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_LIST_HEAD(&job->node);
+ INIT_LIST_HEAD(&job->list);
return 0;
}
@@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
}
/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
- struct drm_sched_fence *s_fence = s_job->s_fence;
- struct drm_gpu_scheduler *sched = s_fence->sched;
-
- atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
-
- trace_drm_sched_process_job(s_fence);
-
- dma_fence_get(&s_fence->finished);
- drm_sched_fence_finished(s_fence);
- dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
-}
-
-/**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
@@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/*
* Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch ring_mirror_list
+ * is being parked and hence assumed to not touch pending_list
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
- /* remove job from ring_mirror_list */
- list_del_init(&job->node);
+ /* remove job from pending_list */
+ list_del_init(&job->list);
} else {
job = NULL;
/* queue timeout for next job */
@@ -809,9 +817,9 @@ static int drm_sched_main(void *param)
if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &sched_job->cb);
+ drm_sched_job_done(sched_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
@@ -820,7 +828,7 @@ static int drm_sched_main(void *param)
if (IS_ERR(fence))
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- drm_sched_process_job(NULL, &sched_job->cb);
+ drm_sched_job_done(sched_job);
}
wake_up(&sched->job_scheduled);
@@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
- INIT_LIST_HEAD(&sched->ring_mirror_list);
+ INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
@@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
if (sched->thread)
kthread_stop(sched->thread);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index a98057431023..7476301d7142 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -330,13 +330,6 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_disable = sti_cursor_atomic_disable,
};
-static void sti_cursor_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_cursor_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -350,7 +343,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_cursor_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 2d5a2b5b78b8..2f4a34f14d33 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -884,13 +884,6 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_disable = sti_gdp_atomic_disable,
};
-static void sti_gdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_gdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -902,7 +895,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_gdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 5a4e12194a77..62f824cd5f21 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1262,13 +1262,6 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_disable = sti_hqvdp_atomic_disable,
};
-static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -1282,7 +1275,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_hqvdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3980677435cb..7812094f93d6 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -713,7 +713,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/*
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index eaaf5d70e352..9f06dec0fc61 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
+ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
@@ -689,6 +672,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+ /* Setup the polarity of multiple signals */
+ if (tcon->quirks->polarity_in_ch0) {
+ val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+ } else {
+ /* according to vendor driver, this bit must be always set */
+ val = SUN4I_TCON1_IO_POL_UNKNOWN;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
+ }
+
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@@ -1517,6 +1524,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
+ .polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index cfbf4e6c1679..e624f6977eb8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -113,6 +113,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
+#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
@@ -153,6 +154,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
+/* there is no documentation about this bit */
+#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
+#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
+
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@@ -235,6 +241,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 781955dd4995..9bd62de0c288 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -46,33 +46,6 @@ static const u32 yuv2rgb[2][2][12] = {
},
};
-static const u32 yvu2rgb[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
- 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
- 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
- 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
- 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
- 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
- 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
- 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
- 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
- }
- },
-};
-
/*
* DE3 has a bit different CSC units. Factors are in two's complement format.
* First three factors in a row are multiplication factors which have 17 bits
@@ -96,7 +69,7 @@ static const u32 yvu2rgb[2][2][12] = {
* c20 c21 c22 [d2 const2]
*/
-static const u32 yuv2rgb_de3[2][2][12] = {
+static const u32 yuv2rgb_de3[2][3][12] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
@@ -107,6 +80,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000,
+ 0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000,
+ 0x0002542A, 0x00044896, 0x00000000, 0xFE000000,
}
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
@@ -119,33 +97,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x00020000, 0x00000000, 0x0003264C, 0x00000000,
0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
- }
- },
-};
-
-static const u32 yvu2rgb_de3[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
- 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
},
- [DRM_COLOR_YCBCR_BT709] = {
- 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
- 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
- 0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00020000, 0x0003264C, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
- 0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x00020000, 0x00000000, 0x0002F2FE, 0x00000000,
+ 0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000,
+ 0x00020000, 0x0003C346, 0x00000000, 0xFE000000,
}
},
};
@@ -157,21 +113,30 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
{
const u32 *table;
u32 base_reg;
+ int i;
+
+ table = yuv2rgb[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb[range][encoding];
+ base_reg = SUN8I_CSC_COEFF(base, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ base_reg = SUN8I_CSC_COEFF(base, i + 1);
+ else if ((i & 3) == 2)
+ base_reg = SUN8I_CSC_COEFF(base, i - 1);
+ else
+ base_reg = SUN8I_CSC_COEFF(base, i);
+ regmap_write(map, base_reg, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN8I_CSC_COEFF(base, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
@@ -180,22 +145,36 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
enum drm_color_range range)
{
const u32 *table;
- u32 base_reg;
+ u32 addr;
+ int i;
+
+ table = yuv2rgb_de3[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb_de3[range][encoding];
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
+ regmap_bulk_write(map, addr, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb_de3[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i + 1);
+ else if ((i & 3) == 2)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i - 1);
+ else
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer, i);
+ regmap_write(map, addr, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 92add2cef2e7..bbdfd5e26ec8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- if (hdmi->quirks->set_rate)
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
{
/*
* Controller support maximum of 594 MHz, which correlates to
- * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
- * 340 MHz scrambling has to be enabled. Because scrambling is
- * not yet implemented, just limit to 340 MHz for now.
+ * 4K@60Hz 4:4:4 or RGB.
*/
- if (mode->clock > 340000)
+ if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
- .set_rate = true,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d983746fa194..d4b55af0592f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
- unsigned int set_rate : 1;
unsigned int use_drm_infoframe : 1;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 35c2133724e2..9994edf67509 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
- { 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
- { 59400000, { 0x0008, 0x0008, 0x0008 }, },
- { 72000000, { 0x0008, 0x0008, 0x001b }, },
- { 74250000, { 0x0013, 0x0013, 0x0013 }, },
- { 90000000, { 0x0008, 0x001a, 0x001b }, },
- { 118800000, { 0x001b, 0x001a, 0x001b }, },
- { 144000000, { 0x001b, 0x001a, 0x0034 }, },
- { 180000000, { 0x001b, 0x0033, 0x0034 }, },
- { 216000000, { 0x0036, 0x0033, 0x0034 }, },
- { 237600000, { 0x0036, 0x0033, 0x001b }, },
- { 288000000, { 0x0036, 0x001b, 0x001b }, },
- { 297000000, { 0x0019, 0x001b, 0x0019 }, },
- { 330000000, { 0x0036, 0x001b, 0x001b }, },
- { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { 74250000, { 0x0013, 0x001a, 0x001b }, },
+ { 148500000, { 0x0019, 0x0033, 0x0034 }, },
+ { 297000000, { 0x0019, 0x001b, 0x001b }, },
+ { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
- { 74250000, 0x8009, 0x0004, 0x0232},
- { 148500000, 0x8029, 0x0004, 0x0273},
- { 594000000, 0x8039, 0x0004, 0x014a},
+ { 27000000, 0x8009, 0x0007, 0x02b0 },
+ { 74250000, 0x8009, 0x0006, 0x022d },
+ { 148500000, 0x8029, 0x0006, 0x0270 },
+ { 297000000, 0x8039, 0x0005, 0x01ab },
+ { 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 7576b523fdbb..145833a9d82d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -50,10 +50,8 @@
#define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc)
#define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100)
-#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
- ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y))
-#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
- ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x) \
+ ((base) + 0x110 + (layer) * 0x30 + (x) * 4)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 76393fc976fe..8cc294a9969d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -543,6 +543,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
+ if (mixer->cfg->is_de3)
+ supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ab699bf0ac5c..58c185c299f4 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,7 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
.llseek = noop_llseek,
};
-static struct drm_driver driver = {
+static const struct drm_driver driver = {
.driver_features = DRIVER_LEGACY,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 85dd7131553a..0ae3a025efe9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e45c8414e2a3..e9ce7d6992d2 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1301,8 +1301,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dc", },
{ .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr2d", },
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 5691ef1b0e58..f46d377f0c30 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 56edef06c48e..223ab2ceb7e6 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -72,7 +72,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
- if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
@@ -178,9 +178,10 @@ int falcon_boot(struct falcon *falcon)
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
- /* copy the first code segment into Falcon internal memory */
- falcon_copy_chunk(falcon, falcon->firmware.code.offset,
- 0, FALCON_MEMORY_IMEM);
+ /* copy the code segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
+ offset, FALCON_MEMORY_IMEM);
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1a0d3ba6e525..adbe2ddcda19 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -161,9 +161,14 @@ static const struct gr2d_soc tegra30_gr2d_soc = {
.version = 0x30,
};
+static const struct gr2d_soc tegra114_gr2d_soc = {
+ .version = 0x35,
+};
+
static const struct of_device_id gr2d_match[] = {
- { .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
- { .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
+ { .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
{ },
};
MODULE_DEVICE_TABLE(of, gr2d_match);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d09a24931c87..e5d2a4026028 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 22a03f7ffdc1..5ce771cba133 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
unsigned int i;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index cc2aa2308a51..f02a035dda45 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index ade56b860cf9..77e128832920 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -117,7 +117,19 @@ static int vic_boot(struct vic *vic)
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
+ /*
+ * STREAMID0 is used for input/output buffers.
+ * Initialize it to SID_VIC in case context isolation
+ * is not enabled, and SID_VIC is used for both firmware
+ * and data buffers.
+ *
+ * If context isolation is enabled, it will be
+ * overridden by the SETSTREAMID opcode as part of
+ * each job.
+ */
vic_writel(vic, value, VIC_THI_STREAMID0);
+
+ /* STREAMID1 is used for firmware loading. */
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
@@ -135,16 +147,21 @@ static int vic_boot(struct vic *vic)
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.virt +
- *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
- fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
- fce_ucode_size);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.iova + fce_bin_data_offset)
- >> 8);
+
+ /* Old VIC firmware needs kernel help with setting up FCE microcode. */
+ if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ hdr = vic->falcon.firmware.virt +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(
+ &vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
+ }
err = falcon_wait_idle(&vic->falcon);
if (err < 0) {
@@ -314,7 +331,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
struct vic *vic = to_vic(client);
int err;
- err = pm_runtime_get_sync(vic->dev);
+ err = pm_runtime_resume_and_get(vic->dev);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 662bf3a348c9..f5190477de72 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ifeq (, $(findstring -W,$(KCFLAGS)))
ccflags-y += -Werror
endif
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 561c49d8657a..a043e602199e 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -602,7 +602,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
drm_mode_config_reset(dev);
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 03c86628e4ac..8f9fa4188897 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a03c7834b1e..20a25660b35b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/jiffies.h>
@@ -43,6 +42,8 @@
#include <linux/atomic.h>
#include <linux/dma-resv.h>
+#include "ttm_module.h"
+
static void ttm_bo_global_kobj_release(struct kobject *kobj);
/*
@@ -71,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
- drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+ bo, bo->mem.num_pages, bo->base.size >> 10,
+ bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
@@ -109,40 +110,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
.default_attrs = ttm_bo_global_attrs
};
-static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
- struct ttm_resource *mem)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- if (!list_empty(&bo->lru) || bo->pin_count)
- return;
-
- man = ttm_manager_type(bdev, mem->mem_type);
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
-
- if (man->use_tt && bo->ttm &&
- !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
- TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- }
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- bool notify = false;
- if (!list_empty(&bo->swap)) {
- list_del_init(&bo->swap);
- notify = true;
- }
- if (!list_empty(&bo->lru)) {
- list_del_init(&bo->lru);
- notify = true;
- }
+ list_del_init(&bo->swap);
+ list_del_init(&bo->lru);
- if (notify && bdev->driver->del_from_lru_notify)
+ if (bdev->driver->del_from_lru_notify)
bdev->driver->del_from_lru_notify(bo);
}
@@ -155,12 +130,32 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem,
struct ttm_lru_bulk_move *bulk)
{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man;
+
dma_resv_assert_held(bo->base.resv);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ if (bo->pin_count) {
+ ttm_bo_del_from_lru(bo);
+ return;
+ }
+
+ man = ttm_manager_type(bdev, mem->mem_type);
+ list_move_tail(&bo->lru, &man->lru[bo->priority]);
+ if (man->use_tt && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
+ struct list_head *swap;
+
+ swap = &ttm_bo_glob.swap_lru[bo->priority];
+ list_move_tail(&bo->swap, swap);
+ }
+
+ if (bdev->driver->del_from_lru_notify)
+ bdev->driver->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
@@ -267,7 +262,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
}
- ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+ ctx->bytes_moved += bo->base.size;
return 0;
out_err:
@@ -514,10 +509,9 @@ static void ttm_bo_release(struct kref *kref)
* shrinkers, now that they are queued for
* destruction.
*/
- if (bo->pin_count) {
+ if (WARN_ON(bo->pin_count)) {
bo->pin_count = 0;
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
}
kref_init(&bo->kref);
@@ -859,8 +853,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->placement = place->flags;
spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
+ ttm_bo_move_to_lru_tail(bo, mem, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
@@ -937,9 +930,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
ttm_bo_move_to_lru_tail_unlocked(bo);
- }
return ret;
}
@@ -967,8 +959,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return ret;
/* move to the bounce domain */
ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
- if (ret)
+ if (ret) {
+ ttm_resource_free(bo, &hop_mem);
return ret;
+ }
return 0;
}
@@ -984,8 +978,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
memset(&hop, 0, sizeof(hop));
- mem.num_pages = bo->num_pages;
- mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
mem.bus.offset = 0;
mem.bus.addr = NULL;
@@ -1000,18 +993,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* stop and the driver will be called to make
* the second hop.
*/
-bounce:
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
return ret;
+bounce:
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
- return ret;
+ goto out;
/* try and move to final place now. */
goto bounce;
}
+out:
if (ret)
ttm_resource_free(bo, &mem);
return ret;
@@ -1101,7 +1095,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
@@ -1112,9 +1106,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- int ret = 0;
- unsigned long num_pages;
bool locked;
+ int ret = 0;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
@@ -1126,16 +1119,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (num_pages == 0) {
- pr_err("Illegal buffer object size\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- ttm_mem_global_free(mem_glob, acc_size);
- return -EINVAL;
- }
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
@@ -1144,10 +1127,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev;
bo->type = type;
- bo->num_pages = num_pages;
- bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->mem.num_pages = bo->num_pages;
+ bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
bo->mem.bus.offset = 0;
@@ -1165,9 +1146,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
if (!ttm_bo_uses_embedded_gem_object(bo)) {
/*
- * bo.gem is not initialized, so we have to setup the
+ * bo.base is not initialized, so we have to setup the
* struct elements we want use regardless.
*/
+ bo->base.size = size;
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
@@ -1209,7 +1191,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7ccb2295cac1..398d5013fc39 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -310,7 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.pin_count = 1;
+ fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -319,6 +319,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
*new_obj = &fbo->base;
return 0;
}
@@ -429,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->num_pages)
+ if (num_pages > bo->mem.num_pages)
return -EINVAL;
- if (start_page > bo->num_pages)
+ if ((start_page + num_pages) > bo->mem.num_pages)
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -483,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem;
- size_t size = bo->num_pages << PAGE_SHIFT;
if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined)
- vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+ vaddr_iomem = ioremap_wc(mem->bus.offset,
+ bo->base.size);
else
- vaddr_iomem = ioremap(mem->bus.offset, size);
+ vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem)
return -ENOMEM;
@@ -515,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
- vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+ vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2944fa0af493..6dc96cf66744 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
@@ -199,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->num_pages)
+ if (page_offset + fault_page_size > bo->mem.num_pages)
goto out_fallback;
if (bo->mem.bus.is_iomem)
@@ -307,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages))
+ if (unlikely(page_offset >= bo->mem.num_pages))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -470,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8a8f1a6a83a6..9fa36ed59429 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
@@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 5ed1fc8f2ace..a3bfbd9cea68 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -39,6 +38,8 @@
#include <linux/swap.h>
#include <drm/ttm/ttm_pool.h>
+#include "ttm_module.h"
+
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_global ttm_mem_glob;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 6ff40c041d79..c0906437cb1c 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -32,9 +32,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_sysfs.h>
+#include "ttm_module.h"
+
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
static atomic_t device_released;
diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..45fa318c1585
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.h
@@ -0,0 +1,40 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#include <linux/kernel.h>
+struct kobject;
+
+#define TTM_PFX "[TTM] "
+extern struct kobject *ttm_get_kobj(void);
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 7b2f60616750..6e27cb1bf48b 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -66,7 +67,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static spinlock_t shrinker_lock;
+static struct mutex shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -79,12 +80,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
struct page *p;
void *vaddr;
- if (order) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order);
@@ -190,7 +192,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pool->dev, **dma_addr))
+ if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
@@ -217,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
/* Give pages into a specific pool_type */
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{
+ unsigned int i, num_pages = 1 << pt->order;
+
+ for (i = 0; i < num_pages; ++i) {
+ if (PageHighMem(p))
+ clear_highpage(p + i);
+ else
+ clear_page(page_address(p + i));
+ }
+
spin_lock(&pt->lock);
list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock);
@@ -249,9 +260,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -259,9 +270,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p, *tmp;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -302,7 +313,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_freed;
struct page *p;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
p = ttm_pool_type_take(pt);
@@ -314,7 +325,7 @@ static unsigned int ttm_pool_shrink(void)
}
list_move_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return num_freed;
}
@@ -507,7 +518,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -525,7 +535,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
@@ -566,7 +575,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i)
@@ -602,7 +611,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return 0;
}
@@ -646,7 +655,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- spin_lock_init(&shrinker_lock);
+ mutex_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index e0952444cea9..a39305f742da 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index da9eeffe0c6d..7f75a13163f0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
uint32_t page_flags,
enum ttm_caching caching)
{
- ttm->num_pages = bo->num_pages;
+ ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
@@ -162,19 +162,6 @@ void ttm_tt_fini(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching)
-{
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
-
- if (ttm_dma_tt_alloc_page_directory(ttm)) {
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 17ff24d999d1..cb0e837d3dba 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -11,7 +11,6 @@
*/
#include <linux/clk.h>
-#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 07140e0b90a3..7fa71c8bb828 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 42d401fd244e..99e22beea90b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -232,8 +232,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
- dev->coherent_dma_mask =
- DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
ident1 = V3D_READ(V3D_HUB_IDENT1);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index c88686489b88..e714d5318f30 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -178,10 +178,7 @@ v3d_hub_irq(int irq, void *arg)
};
const char *client = "?";
- V3D_WRITE(V3D_MMU_CTL,
- V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
- V3D_MMU_CTL_PT_INVALID |
- V3D_MMU_CTL_WRITE_VIOLATION));
+ V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
if (v3d->ver >= 41) {
axi_id = axi_id >> 5;
@@ -217,7 +214,7 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
+ irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f3eac72cb46e..e534896b6cfd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -51,7 +51,6 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(vbox))
return PTR_ERR(vbox);
- vbox->ddev.pdev = pdev;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
@@ -109,15 +108,16 @@ static void vbox_pci_remove(struct pci_dev *pdev)
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int error;
error = drm_mode_config_helper_suspend(&vbox->ddev);
if (error)
return error;
- pci_save_state(vbox->ddev.pdev);
- pci_disable_device(vbox->ddev.pdev);
- pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -125,8 +125,9 @@ static int vbox_pm_suspend(struct device *dev)
static int vbox_pm_resume(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
- if (pci_enable_device(vbox->ddev.pdev))
+ if (pci_enable_device(pdev))
return -EIO;
return drm_mode_config_helper_resume(&vbox->ddev);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 631657fa554f..b3ded68603ba 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -170,10 +170,12 @@ static void vbox_hotplug_worker(struct work_struct *work)
int vbox_irq_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
+
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox_update_mode_hints(vbox);
- return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
+ return drm_irq_install(&vbox->ddev, pdev->irq);
}
void vbox_irq_fini(struct vbox_private *vbox)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d68d9bad7674..f28779715ccd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -8,7 +8,9 @@
* Hans de Goede <hdegoede@redhat.com>
*/
+#include <linux/pci.h>
#include <linux/vbox_err.h>
+
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
@@ -30,6 +32,7 @@ void vbox_report_caps(struct vbox_private *vbox)
static int vbox_accel_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
struct vbva_buffer *vbva;
unsigned int i;
@@ -41,7 +44,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
- vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
+ vbox->vbva_buffers = pci_iomap_range(pdev, 0,
vbox->available_vram_size,
vbox->num_crtcs *
VBVA_MIN_BUFFER_SIZE);
@@ -106,6 +109,7 @@ bool vbox_check_supported(u16 id)
int vbox_hw_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
int ret = -ENOMEM;
vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
@@ -115,7 +119,7 @@ int vbox_hw_init(struct vbox_private *vbox)
/* Map guest-heap at end of vram */
vbox->guest_heap =
- pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_SIZE);
if (!vbox->guest_heap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index f5a06675da43..0066a3c1dfc9 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -15,8 +15,9 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = &vbox->ddev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
- vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+ vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0),
vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
@@ -24,8 +25,8 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 469d1b4f2643..fddaeb0b09c1 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -21,7 +21,7 @@
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
-static vm_fault_t vc4_fault(struct vm_fault *vmf);
+static const struct drm_gem_object_funcs vc4_gem_object_funcs;
static const char * const bo_type_names[] = {
"kernel",
@@ -376,20 +376,6 @@ out:
return bo;
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
- .free = vc4_free_object,
- .export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = vc4_prime_vmap,
- .vm_ops = &vc4_vm_ops,
-};
-
/**
* vc4_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
@@ -538,7 +524,7 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
*/
-void vc4_free_object(struct drm_gem_object *gem_bo)
+static void vc4_free_object(struct drm_gem_object *gem_bo)
{
struct drm_device *dev = gem_bo->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -673,7 +659,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
+static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
struct dma_buf *dmabuf;
@@ -718,19 +704,9 @@ static vm_fault_t vc4_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
+static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct drm_gem_object *gem_obj;
- unsigned long vm_pgoff;
- struct vc4_bo *bo;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- bo = to_vc4_bo(gem_obj);
+ struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
@@ -744,72 +720,23 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
-
- /* This ->vm_pgoff dance is needed to make all parties happy:
- * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
- * mem-region, hence the need to set it to zero (the value set by
- * the DRM core is a virtual offset encoding the GEM object-id)
- * - the mmap() core logic needs ->vm_pgoff to be restored to its
- * initial value before returning from this function because it
- * encodes the offset of this GEM in the dev->anon_inode pseudo-file
- * and this information will be used when we invalidate userspace
- * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
- */
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = 0;
- ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
- bo->base.paddr, vma->vm_end - vma->vm_start);
- vma->vm_pgoff = vm_pgoff;
-
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_mmap(obj, vma);
-}
-
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader) {
- DRM_DEBUG("mmaping of shader BOs not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_vmap(obj, map);
+ return drm_gem_cma_mmap(obj, vma);
}
-struct drm_gem_object *
-vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_object *obj;
-
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
- if (IS_ERR(obj))
- return obj;
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
- return obj;
-}
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = vc4_gem_object_mmap,
+ .vm_ops = &vc4_vm_ops,
+};
static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ea710beb8e00..269390bc586e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
-static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+static int vc4_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ unsigned int channel)
{
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
@@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
mdelay(20);
if (vc4_encoder && vc4_encoder->post_crtc_disable)
- vc4_encoder->post_crtc_disable(encoder);
+ vc4_encoder->post_crtc_disable(encoder, state);
vc4_crtc_pixelvalve_reset(crtc);
vc4_hvs_stop_channel(dev, channel);
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
- vc4_encoder->post_crtc_powerdown(encoder);
+ vc4_encoder->post_crtc_powerdown(encoder, state);
return 0;
}
@@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (channel < 0)
return 0;
- return vc4_crtc_disable(crtc, channel);
+ return vc4_crtc_disable(crtc, NULL, channel);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
+ vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
*/
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
if (vc4_encoder->pre_crtc_configure)
- vc4_encoder->pre_crtc_configure(encoder);
+ vc4_encoder->pre_crtc_configure(encoder, state);
vc4_crtc_config_pv(crtc);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
if (vc4_encoder->pre_crtc_enable)
- vc4_encoder->pre_crtc_enable(encoder);
+ vc4_encoder->pre_crtc_enable(encoder, state);
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
@@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
if (vc4_encoder->post_crtc_enable)
- vc4_encoder->post_crtc_enable(encoder);
+ vc4_encoder->post_crtc_enable(encoder, state);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector_state *conn_state;
int ret, i;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -697,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
vc4_plane_async_set_fb(plane, flip_state->fb);
@@ -729,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
kfree(flip_state);
-
- up(&vc4->async_modeset);
}
/* Implements async (non-vblank-synced) page flips.
@@ -745,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
int ret = 0;
struct vc4_async_flip_state *flip_state;
@@ -774,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
flip_state->crtc = crtc;
flip_state->event = event;
- /* Make sure all other async modesetes have landed. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- drm_framebuffer_put(fb);
- vc4_bo_dec_usecnt(bo);
- kfree(flip_state);
- return ret;
- }
-
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
@@ -884,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2cd97a39c286..556ad0f02a0d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -140,17 +140,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
kfree(vc4file);
}
-static const struct file_operations vc4_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = vc4_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(vc4_drm_fops);
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
@@ -190,12 +180,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_mmap = vc4_prime_mmap,
-
- .dumb_create = vc4_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 43a1af110b3e..a7500716cf3f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -77,7 +77,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
- struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct vc4_txp *txp;
@@ -215,8 +214,6 @@ struct vc4_dev {
struct work_struct reset_work;
} hangcheck;
- struct semaphore async_modeset;
-
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
struct drm_private_obj hvs_channels;
@@ -444,12 +441,12 @@ struct vc4_encoder {
enum vc4_encoder_type type;
u32 clock_select;
- void (*pre_crtc_configure)(struct drm_encoder *encoder);
- void (*pre_crtc_enable)(struct drm_encoder *encoder);
- void (*post_crtc_enable)(struct drm_encoder *encoder);
+ void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
- void (*post_crtc_disable)(struct drm_encoder *encoder);
- void (*post_crtc_powerdown)(struct drm_encoder *encoder);
+ void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
};
static inline struct vc4_encoder *
@@ -785,13 +782,11 @@ struct vc4_validated_shader_info {
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
-void vc4_free_object(struct drm_gem_object *gem_obj);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type);
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -806,12 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int vc4_bo_cache_init(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
@@ -916,11 +905,10 @@ void vc4_irq_reset(struct drm_device *dev);
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
-int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 19aab4e7e209..a55256ed0955 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -306,11 +306,11 @@
# define DSI0_PHY_AFEC0_RESET BIT(11)
# define DSI1_PHY_AFEC0_PD_BG BIT(11)
# define DSI0_PHY_AFEC0_PD BIT(10)
-# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10)
# define DSI0_PHY_AFEC0_PD_BG BIT(9)
# define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9)
# define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8)
-# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8)
# define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4)
# define DSI_PHY_AFEC0_PTATADJ_SHIFT 4
# define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0)
@@ -493,6 +493,18 @@
*/
#define DSI1_ID 0x8c
+struct vc4_dsi_variant {
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ unsigned int port;
+
+ bool broken_axi_workaround;
+
+ const char *debugfs_name;
+ const struct debugfs_reg32 *regs;
+ size_t nregs;
+
+};
+
/* General DSI hardware state. */
struct vc4_dsi {
struct platform_device *pdev;
@@ -509,8 +521,7 @@ struct vc4_dsi {
u32 *reg_dma_mem;
dma_addr_t reg_paddr;
- /* Whether we're on bcm2835's DSI0 or DSI1. */
- int port;
+ const struct vc4_dsi_variant *variant;
/* DSI channel for the panel we're connected to. */
u32 channel;
@@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
#define DSI_READ(offset) readl(dsi->regs + (offset))
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
- DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
#define DSI_PORT_WRITE(offset, val) \
- DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
/* VC4 DSI encoder KMS struct */
struct vc4_dsi_encoder {
@@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
ret = pm_runtime_get_sync(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
/* Set AFE CTR00/CTR1 to release powerdown of analog. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
@@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
- (dsi->port == 0 ?
+ (dsi->variant->port == 0 ?
VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
@@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP1_ENABLE);
/* Ungate the block. */
- if (dsi->port == 0)
+ if (dsi->variant->port == 0)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
else
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
} else {
DSI_PORT_WRITE(PHY_AFEC0,
DSI_PORT_READ(PHY_AFEC0) &
@@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
+ .port = 1,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi0_variant = {
+ .port = 0,
+ .debugfs_name = "dsi0_regs",
+ .regs = dsi0_regs,
+ .nregs = ARRAY_SIZE(dsi0_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+ .port = 1,
+ .broken_axi_workaround = true,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
static const struct of_device_id vc4_dsi_dt_match[] = {
- { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant },
+ { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant },
+ { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
{}
};
@@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
*ret = IRQ_HANDLED;
}
@@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
struct device *dev = &dsi->pdev->dev;
const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
static const struct {
- const char *dsi0_name, *dsi1_name;
+ const char *name;
int div;
} phy_clocks[] = {
- { "dsi0_byte", "dsi1_byte", 8 },
- { "dsi0_ddr2", "dsi1_ddr2", 4 },
- { "dsi0_ddr", "dsi1_ddr", 2 },
+ { "byte", 8 },
+ { "ddr2", 4 },
+ { "ddr", 2 },
};
int i;
@@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
struct clk_init_data init;
+ char clk_name[16];
int ret;
+ snprintf(clk_name, sizeof(clk_name),
+ "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
/* We just use core fixed factor clock ops for the PHY
* clocks. The clocks are actually gated by the
* PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
memset(&init, 0, sizeof(init));
init.parent_names = &parent_name;
init.num_parents = 1;
- if (dsi->port == 1)
- init.name = phy_clocks[i].dsi1_name;
- else
- init.name = phy_clocks[i].dsi0_name;
+ init.name = clk_name;
init.ops = &clk_fixed_factor_ops;
ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
@@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
- dsi->port = (uintptr_t)match->data;
+ dsi->variant = match->data;
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
@@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(dsi->regs);
dsi->regset.base = dsi->regs;
- if (dsi->port == 0) {
- dsi->regset.regs = dsi0_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
- } else {
- dsi->regset.regs = dsi1_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
- }
+ dsi->regset.regs = dsi->variant->regs;
+ dsi->regset.nregs = dsi->variant->nregs;
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- /* DSI1 has a broken AXI slave that doesn't respond to writes
- * from the ARM. It does handle writes from the DMA engine,
+ /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to
+ * writes from the ARM. It does handle writes from the DMA engine,
* so set up a channel for talking to it.
*/
- if (dsi->port == 1) {
+ if (dsi->variant->broken_axi_workaround) {
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- if (dsi->port == 1)
- vc4->dsi1 = dsi;
-
drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
@@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
- if (dsi->port == 0)
- vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
- else
- vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
pm_runtime_enable(dev);
@@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
@@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
drm_encoder_cleanup(dsi->encoder);
-
- if (dsi->port == 1)
- vc4->dsi1 = NULL;
}
static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b641252939d8..445d3bab89e0 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1026,7 +1026,6 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
void (*func)(struct vc4_seqno_cb *cb))
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret = 0;
unsigned long irqflags;
cb->func = func;
@@ -1041,7 +1040,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- return ret;
+ return 0;
}
/* Scheduled when any job has been completed, this walks the list of
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 555106220578..1fda574579af 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -76,12 +76,25 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
+
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0)
+
+#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31)
+
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
#define VC4_HSM_MID_CLOCK 149985000
+#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -119,24 +132,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
}
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
+{
+ u16 clk_cnt;
+ u32 value;
+
+ value = HDMI_READ(HDMI_CEC_CNTRL_1);
+ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+
+ /*
+ * Set the clock divider: the hsm_clock rate and this divider
+ * setting will give a 40 kHz CEC clock.
+ */
+ clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ;
+ value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+}
+#else
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
+#endif
+
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ bool connected = false;
if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low)
- return connector_status_connected;
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- return connector_status_disconnected;
+ connected = true;
+ } else if (drm_probe_ddc(vc4_hdmi->ddc)) {
+ connected = true;
+ } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
+ connected = true;
}
- if (drm_probe_ddc(vc4_hdmi->ddc))
- return connector_status_connected;
+ if (connected) {
+ if (connector->status != connector_status_connected) {
+ struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
+
+ if (edid) {
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+ }
+ }
- if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
return connector_status_connected;
+ }
+
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return connector_status_disconnected;
}
@@ -170,16 +216,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
- drm_atomic_helper_connector_reset(connector);
+ struct vc4_hdmi_connector_state *old_state =
+ conn_state_to_vc4_hdmi_conn_state(connector->state);
+ struct vc4_hdmi_connector_state *new_state =
+ kzalloc(sizeof(*new_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(old_state);
+ __drm_atomic_helper_connector_reset(connector, &new_state->base);
+
+ if (!new_state)
+ return;
+
+ new_state->base.max_bpc = 8;
+ new_state->base.max_requested_bpc = 8;
drm_atomic_helper_connector_tv_reset(connector);
}
+static struct drm_connector_state *
+vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state = connector->state;
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+ struct vc4_hdmi_connector_state *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ new_state->pixel_rate = vc4_state->pixel_rate;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ return &new_state->base;
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -200,12 +278,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
vc4_hdmi->ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ret;
drm_connector_attach_tv_margin_properties(connector);
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
@@ -219,7 +305,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
- enum hdmi_infoframe_type type)
+ enum hdmi_infoframe_type type,
+ bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
@@ -227,6 +314,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ if (!poll)
+ return 0;
+
return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
@@ -253,7 +343,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
if (len < 0)
return;
- ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+ ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
return;
@@ -356,7 +446,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_audio_infoframe(encoder);
}
-static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -369,7 +460,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
}
-static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
int ret;
@@ -468,6 +560,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -511,7 +604,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
}
+
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -531,6 +626,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned char gcp;
+ bool gcp_en;
+ u32 reg;
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
@@ -556,6 +654,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+ switch (state->max_bpc) {
+ case 12:
+ gcp = 6;
+ gcp_en = true;
+ break;
+ case 10:
+ gcp = 5;
+ gcp_en = true;
+ break;
+ case 8:
+ default:
+ gcp = 4;
+ gcp_en = false;
+ break;
+ }
+
+ reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
+ reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
+ VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
+ reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
+ VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
+ HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_WORD_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
+ reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ HDMI_WRITE(HDMI_GCP_WORD_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_CONFIG);
+ reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
+ reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
}
@@ -583,8 +714,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
-static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+static struct drm_connector_state *
+vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return conn_state;
+ }
+
+ return NULL;
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state =
+ vc4_hdmi_encoder_get_connector_state(encoder, state);
+ struct vc4_hdmi_connector_state *vc4_conn_state =
+ conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long pixel_rate, hsm_rate;
@@ -596,7 +748,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+ pixel_rate = vc4_conn_state->pixel_rate;
ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
@@ -639,6 +791,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
/*
* FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
* at 300MHz.
@@ -660,11 +814,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- if (vc4_hdmi->variant->reset)
- vc4_hdmi->variant->reset(vc4_hdmi);
-
if (vc4_hdmi->variant->phy_init)
- vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+ vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
@@ -672,10 +823,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
if (vc4_hdmi->variant->set_timings)
- vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+ vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
}
-static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
@@ -697,7 +849,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
}
-static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -766,6 +919,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long long pixel_rate = mode->clock * 1000;
@@ -790,9 +944,22 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
pixel_rate = mode->clock * 1000;
}
+ if (conn_state->max_bpc == 12) {
+ pixel_rate = pixel_rate * 150;
+ do_div(pixel_rate, 100);
+ } else if (conn_state->max_bpc == 10) {
+ pixel_rate = pixel_rate * 125;
+ do_div(pixel_rate, 100);
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ pixel_rate = pixel_rate * 2;
+
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
+ vc4_state->pixel_rate = pixel_rate;
+
return 0;
}
@@ -936,7 +1103,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
int ret;
vc4_hdmi->audio.streaming = false;
- ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
+ ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
@@ -1267,6 +1434,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
+ card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
@@ -1287,15 +1455,22 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
-static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+
+ if (vc4_hdmi->cec_rx_msg.len)
+ cec_received_msg(vc4_hdmi->cec_adap,
+ &vc4_hdmi->cec_rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
- if (vc4_hdmi->cec_irq_was_rx) {
- if (vc4_hdmi->cec_rx_msg.len)
- cec_received_msg(vc4_hdmi->cec_adap,
- &vc4_hdmi->cec_rx_msg);
- } else if (vc4_hdmi->cec_tx_ok) {
+ if (vc4_hdmi->cec_tx_ok) {
cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
} else {
@@ -1309,15 +1484,35 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
return IRQ_HANDLED;
}
+static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ irqreturn_t ret;
+
+ if (vc4_hdmi->cec_irq_was_rx)
+ ret = vc4_cec_irq_handler_rx_thread(irq, priv);
+ else
+ ret = vc4_cec_irq_handler_tx_thread(irq, priv);
+
+ return ret;
+}
+
static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
{
+ struct drm_device *dev = vc4_hdmi->connector.dev;
struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
unsigned int i;
msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
+
+ if (msg->len > 16) {
+ drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
+ return;
+ }
+
for (i = 0; i < msg->len; i += 4) {
- u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
+ u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
msg->msg[i] = val & 0xff;
msg->msg[i + 1] = (val >> 8) & 0xff;
@@ -1326,31 +1521,55 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
}
}
+static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ u32 cntrl1;
+
+ cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+ vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
+ cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ u32 cntrl1;
+
+ vc4_hdmi->cec_rx_msg.len = 0;
+ cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+ vc4_cec_read_msg(vc4_hdmi, cntrl1);
+ cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+ cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+ return IRQ_WAKE_THREAD;
+}
+
static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS);
- u32 cntrl1, cntrl5;
+ irqreturn_t ret;
+ u32 cntrl5;
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
- vc4_hdmi->cec_rx_msg.len = 0;
- cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+
cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
- if (vc4_hdmi->cec_irq_was_rx) {
- vc4_cec_read_msg(vc4_hdmi, cntrl1);
- cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
- HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
- cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
- } else {
- vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
- cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
- }
- HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
- HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ if (vc4_hdmi->cec_irq_was_rx)
+ ret = vc4_cec_irq_handler_rx_bare(irq, priv);
+ else
+ ret = vc4_cec_irq_handler_tx_bare(irq, priv);
- return IRQ_WAKE_THREAD;
+ HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ return ret;
}
static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -1387,9 +1606,11 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
- HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
} else {
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
}
@@ -1410,11 +1631,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *dev = vc4_hdmi->connector.dev;
u32 val;
unsigned int i;
+ if (msg->len > 16) {
+ drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+ return -ENOMEM;
+ }
+
for (i = 0; i < msg->len; i += 4)
- HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
+ HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
(msg->msg[i]) |
(msg->msg[i + 1] << 8) |
(msg->msg[i + 2] << 16) |
@@ -1441,11 +1668,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
u32 value;
int ret;
- if (!vc4_hdmi->variant->cec_available)
+ if (!of_find_property(dev->of_node, "interrupts", NULL)) {
+ dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
return 0;
+ }
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4_hdmi, "vc4",
@@ -1458,23 +1688,39 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(HDMI_CEC_CNTRL_1);
- value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
- /*
- * Set the logical address to Unregistered and set the clock
- * divider: the hsm_clock rate and this divider setting will
- * give a 40 kHz CEC clock.
- */
- value |= VC4_HDMI_CEC_ADDR_MASK |
- (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
+ /* Set the logical address to Unregistered */
+ value |= VC4_HDMI_CEC_ADDR_MASK;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
- ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
- if (ret)
- goto err_delete_cec_adap;
+
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
+ if (vc4_hdmi->variant->external_irq_controller) {
+ ret = devm_request_threaded_irq(&pdev->dev,
+ platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+ } else {
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+
+ ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+ }
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
@@ -1574,6 +1820,7 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
+ vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
return 0;
}
@@ -1667,6 +1914,12 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->audio_clock);
}
+ vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
+ if (IS_ERR(vc4_hdmi->cec_clock)) {
+ DRM_ERROR("Failed to get CEC clock\n");
+ return PTR_ERR(vc4_hdmi->cec_clock);
+ }
+
vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(vc4_hdmi->reset)) {
DRM_ERROR("Failed to get HDMI reset line\n");
@@ -1739,6 +1992,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+ if (vc4_hdmi->variant->reset)
+ vc4_hdmi->variant->reset(vc4_hdmi);
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1834,7 +2090,6 @@ static const struct vc4_hdmi_variant bcm2835_variant = {
.debugfs_name = "hdmi_regs",
.card_name = "vc4-hdmi",
.max_pixel_clock = 162000000,
- .cec_available = true,
.registers = vc4_hdmi_fields,
.num_registers = ARRAY_SIZE(vc4_hdmi_fields),
@@ -1853,7 +2108,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -1863,6 +2118,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
PHY_LANE_CK,
},
.unsupported_odd_h_timings = true,
+ .external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
@@ -1879,7 +2135,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
@@ -1889,6 +2145,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
PHY_LANE_2,
},
.unsupported_odd_h_timings = true,
+ .external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 0526a9cf608a..3cebd1fd00fc 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_hdmi_encoder, base.base);
}
-struct drm_display_mode;
-
struct vc4_hdmi;
struct vc4_hdmi_register;
+struct vc4_hdmi_connector_state;
enum vc4_hdmi_phy_channel {
PHY_LANE_0 = 0,
@@ -43,9 +42,6 @@ struct vc4_hdmi_variant {
/* Filename to expose the registers in debugfs */
const char *debugfs_name;
- /* Set to true when the CEC support is available */
- bool cec_available;
-
/* Maximum pixel clock supported by the controller (in Hz) */
unsigned long long max_pixel_clock;
@@ -65,6 +61,13 @@ struct vc4_hdmi_variant {
/* The BCM2711 cannot deal with odd horizontal pixel timings */
bool unsupported_odd_h_timings;
+ /*
+ * The BCM2711 CEC/hotplug IRQ controller is shared between the
+ * two HDMI controllers, and we have a proper irqchip driver for
+ * it.
+ */
+ bool external_irq_controller;
+
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
@@ -78,11 +81,12 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode);
- /* Callback to initialize the PHY according to the mode */
+ /* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
/* Callback to disable the PHY */
void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
@@ -155,6 +159,7 @@ struct vc4_hdmi {
bool cec_tx_ok;
bool cec_irq_was_rx;
+ struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
struct clk *audio_clock;
@@ -180,14 +185,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder)
return container_of(_encoder, struct vc4_hdmi, encoder);
}
+struct vc4_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned long long pixel_rate;
+};
+
+static inline struct vc4_hdmi_connector_state *
+conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state)
+{
+ return container_of(conn_state, struct vc4_hdmi_connector_state, base);
+}
+
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 057796b54c51..36535480f8e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -127,7 +127,8 @@
#define OSCILLATOR_FREQUENCY 54000000
-void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
@@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
-void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- unsigned long long pixel_freq = mode->clock * 1000;
+ unsigned long long pixel_freq = conn_state->pixel_rate;
unsigned long long vco_freq;
unsigned char word_sel;
u8 vco_sel, vco_div;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 96d764ebfe67..e1b58eac766f 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -29,6 +29,7 @@ enum vc4_hdmi_field {
HDMI_CEC_CPU_MASK_SET,
HDMI_CEC_CPU_MASK_STATUS,
HDMI_CEC_CPU_STATUS,
+ HDMI_CEC_CPU_SET,
/*
* Transmit data, first byte is low byte of the 32-bit reg.
@@ -59,9 +60,12 @@ enum vc4_hdmi_field {
*/
HDMI_CTS_0,
HDMI_CTS_1,
+ HDMI_DEEP_COLOR_CONFIG_1,
HDMI_DVP_CTL,
HDMI_FIFO_CTL,
HDMI_FRAME_COUNT,
+ HDMI_GCP_CONFIG,
+ HDMI_GCP_WORD_1,
HDMI_HORZA,
HDMI_HORZB,
HDMI_HOTPLUG,
@@ -196,9 +200,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
+ VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344),
VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
- VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350),
VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
};
@@ -229,6 +234,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
@@ -305,6 +313,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index cccd341e5d67..c239045e05d6 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
SCALER_DISPSTATX_EMPTY);
}
-int vc4_hvs_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
@@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc,
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
*/
- if (hweight32(state->connector_mask) > 1)
+ if (hweight32(crtc_state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
dlist_count += vc4_plane_dlist_size(plane_state);
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
}
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool oneshot = vc4_state->feed_txp;
@@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
unsigned int chan = vc4_state->assigned_channel;
@@ -620,11 +622,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba310c0ab5f6..f09254c2497d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -39,7 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
struct vc4_hvs_state {
struct drm_private_state base;
- unsigned int unassigned_channels;
+
+ struct {
+ unsigned in_use: 1;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
};
static struct vc4_hvs_state *
@@ -183,6 +187,32 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
}
static struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
@@ -302,14 +332,15 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
-static void
-vc4_atomic_complete_commit(struct drm_atomic_state *state)
+static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
+ struct vc4_hvs_state *old_hvs_state;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -325,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
+ if (!old_hvs_state)
+ return;
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
+ struct drm_crtc_commit *commit;
+ unsigned int channel = vc4_crtc_state->assigned_channel;
+ unsigned long done;
+
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
+
+ if (!old_hvs_state->fifo_state[channel].in_use)
+ continue;
+
+ commit = old_hvs_state->fifo_state[i].pending_commit;
+ if (!commit)
+ continue;
- drm_atomic_helper_wait_for_dependencies(state);
+ done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for hw_done\n");
+
+ done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for flip_done\n");
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -350,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_helper_commit_cleanup_done(state);
-
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 0);
-
- drm_atomic_state_put(state);
-
- up(&vc4->async_modeset);
-}
-
-static void commit_work(struct work_struct *work)
-{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
- vc4_atomic_complete_commit(state);
}
-/**
- * vc4_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int vc4_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
-
- if (state->async_update) {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
-
- drm_atomic_helper_async_commit(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- up(&vc4->async_modeset);
-
- return 0;
- }
+ struct drm_crtc_state *crtc_state;
+ struct vc4_hvs_state *hvs_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
- /* We know for sure we don't want an async update here. Set
- * state->legacy_cursor_update to false to prevent
- * drm_atomic_helper_setup_commit() from auto-completing
- * commit->flip_done.
- */
- state->legacy_cursor_update = false;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ if (!hvs_state)
+ return -EINVAL;
- INIT_WORK(&state->commit_work, commit_work);
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(crtc_state);
+ unsigned int channel =
+ vc4_crtc_state->assigned_channel;
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
+ if (!hvs_state->fifo_state[channel].in_use)
+ continue;
- if (!nonblock) {
- ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- up(&vc4->async_modeset);
- return ret;
- }
+ hvs_state->fifo_state[channel].pending_commit =
+ drm_crtc_commit_get(crtc_state->commit);
}
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
- vc4_atomic_complete_commit(state);
-
return 0;
}
@@ -697,6 +666,7 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
struct vc4_hvs_state *state;
+ unsigned int i;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -704,7 +674,16 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- state->unassigned_channels = old_state->unassigned_channels;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+
+ if (!old_state->fifo_state[i].pending_commit)
+ continue;
+
+ state->fifo_state[i].pending_commit =
+ drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
+ }
return &state->base;
}
@@ -713,6 +692,14 @@ static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+ unsigned int i;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].pending_commit)
+ continue;
+
+ drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
+ }
kfree(hvs_state);
}
@@ -737,7 +724,6 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
if (!state)
return -ENOMEM;
- state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
&state->base,
&vc4_hvs_state_funcs);
@@ -781,12 +767,17 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct vc4_hvs_state *hvs_new_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
+ unsigned int unassigned_channels = 0;
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state)
return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
+ if (!hvs_new_state->fifo_state[i].in_use)
+ unassigned_channels |= BIT(i);
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct vc4_crtc_state *old_vc4_crtc_state =
to_vc4_crtc_state(old_crtc_state);
@@ -794,6 +785,7 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
+ unsigned int channel;
/* Nothing to do here, let's skip it */
if (old_crtc_state->enable == new_crtc_state->enable)
@@ -804,7 +796,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
- hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+ channel = old_vc4_crtc_state->assigned_channel;
+ hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -833,15 +826,14 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
* the future, we will need to have something smarter,
* but it works so far.
*/
- matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (matching_channels) {
- unsigned int channel = ffs(matching_channels) - 1;
-
- new_vc4_crtc_state->assigned_channel = channel;
- hvs_new_state->unassigned_channels &= ~BIT(channel);
- } else {
+ matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ if (!matching_channels)
return -EINVAL;
- }
+
+ channel = ffs(matching_channels) - 1;
+ new_vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ hvs_new_state->fifo_state[channel].in_use = true;
}
return 0;
@@ -867,9 +859,14 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return vc4_load_tracker_atomic_check(state);
}
+static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
+ .atomic_commit_setup = vc4_atomic_commit_setup,
+ .atomic_commit_tail = vc4_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.atomic_check = vc4_atomic_check,
- .atomic_commit = vc4_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
.fb_create = vc4_fb_create,
};
@@ -889,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev)
vc4->load_tracker_enabled = true;
}
- sema_init(&vc4->async_modeset, 1);
-
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
@@ -910,6 +905,7 @@ int vc4_kms_load(struct drm_device *dev)
}
dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 6b39cc2ca18d..7322169c0682 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
-static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
- vc4_state->dlist[vc4_state->dlist_count++] = val;
+ vc4_state->dlist_count++;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ unsigned int idx = vc4_state->dlist_count;
+
+ vc4_dlist_counter_increment(vc4_state);
+ vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@@ -437,6 +445,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
@@ -472,7 +481,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
@@ -912,9 +925,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
@@ -973,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE)
- vc4_state->lbm_offset = vc4_state->dlist_count++;
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
@@ -1268,11 +1283,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update,
};
-static void vc4_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1323,7 +1333,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vc4_plane_destroy,
+ .destroy = drm_plane_cleanup,
.set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 8aa5220885f4..c0122d83b651 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -382,7 +382,6 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
};
@@ -395,7 +394,7 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -408,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_hvs_atomic_disable(crtc, old_state);
+ vc4_hvs_atomic_disable(crtc, state);
/*
* Make sure we issue a vblank event after disabling the CRTC if
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f8635ccaf9a1..a0e75f1d5d01 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
}
obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
- npages);
+ drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index a3e0fb5b8671..faeae5d881fb 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -308,7 +308,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("via_driver_irq_postinstall\n");
+ DRM_DEBUG("fun: %s\n", __func__);
if (!dev_priv)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index b925b8b1da16..51ec7c3240c9 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
- depends on DRM && VIRTIO && VIRTIO_MENU && MMU
+ depends on DRM && VIRTIO_MENU && MMU
+ select VIRTIO
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5fefc88d47e4..c2b20e0ee030 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,14 +28,13 @@
#include "virtgpu_drv.h"
-static void virtio_add_bool(struct seq_file *m, const char *name,
- bool value)
+static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
+ bool value)
{
seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
}
-static void virtio_add_int(struct seq_file *m, const char *name,
- int value)
+static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
{
seq_printf(m, "%-16s : %d\n", name, value);
}
@@ -45,13 +44,16 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
- virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
- virtio_add_bool(m, "edid", vgdev->has_edid);
- virtio_add_bool(m, "indirect", vgdev->has_indirect);
- virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
- virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
- virtio_add_int(m, "cap sets", vgdev->num_capsets);
- virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_gpu_add_bool(m, "edid", vgdev->has_edid);
+ virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect);
+
+ virtio_gpu_add_bool(m, "resource uuid",
+ vgdev->has_resource_assign_uuid);
+
+ virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
(unsigned long)vgdev->host_visible_region.addr,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 27f13bd29c13..a21dc3ad6f88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -54,7 +54,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
- dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
"virtiodrmfb");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6a232553c99b..d9dbc4f258f3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -136,6 +136,7 @@ struct virtio_gpu_fence_driver {
struct virtio_gpu_fence {
struct dma_fence f;
+ uint64_t fence_id;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 728ca36f6327..d28e25e8409b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,51 +27,48 @@
#include "virtgpu_drv.h"
-#define to_virtio_fence(x) \
+#define to_virtio_gpu_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
-static const char *virtio_get_driver_name(struct dma_fence *f)
+static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct dma_fence *f)
+static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_gpu_fence_signaled(struct dma_fence *f)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
- if (WARN_ON_ONCE(fence->f.seqno == 0))
- /* leaked fence outside driver before completing
- * initialization with virtio_gpu_fence_emit */
- return false;
- if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
- return true;
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit.
+ */
+ WARN_ON_ONCE(f->seqno == 0);
return false;
}
-static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
{
- snprintf(str, size, "%llu", f->seqno);
+ snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
}
-static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
+ int size)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
snprintf(str, size, "%llu",
(u64)atomic64_read(&fence->drv->last_fence_id));
}
-static const struct dma_fence_ops virtio_fence_ops = {
- .get_driver_name = virtio_get_driver_name,
- .get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_fence_signaled,
- .fence_value_str = virtio_fence_value_str,
- .timeline_value_str = virtio_timeline_value_str,
+static const struct dma_fence_ops virtio_gpu_fence_ops = {
+ .get_driver_name = virtio_gpu_get_driver_name,
+ .get_timeline_name = virtio_gpu_get_timeline_name,
+ .signaled = virtio_gpu_fence_signaled,
+ .fence_value_str = virtio_gpu_fence_value_str,
+ .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
@@ -88,7 +85,8 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
+ 0);
return fence;
}
@@ -101,7 +99,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->f.seqno = ++drv->current_fence_id;
+ fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -109,24 +107,45 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
trace_dma_fence_emit(&fence->f);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
+ cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct virtio_gpu_fence *fence, *tmp;
+ struct virtio_gpu_fence *signaled, *curr, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
- list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (fence_id < fence->f.seqno)
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ if (fence_id != curr->fence_id)
continue;
- dma_fence_signal_locked(&fence->f);
- list_del(&fence->node);
- dma_fence_put(&fence->f);
+
+ signaled = curr;
+
+ /*
+ * Signal any fences with a strictly smaller sequence number
+ * than the current signaled fence.
+ */
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ /* dma-fence contexts must match */
+ if (signaled->f.context != curr->f.context)
+ continue;
+
+ if (!dma_fence_is_later(&signaled->f, &curr->f))
+ continue;
+
+ dma_fence_signal_locked(&curr->f);
+ list_del(&curr->node);
+ dma_fence_put(&curr->f);
+ }
+
+ dma_fence_signal_locked(&signaled->f);
+ list_del(&signaled->node);
+ dma_fence_put(&signaled->f);
+ break;
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index c30c75ee83fc..8502400b2f9c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- if (vgdev->has_virgl_3d)
- virtio_gpu_create_context(dev, file);
-
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_virgl_3d)
goto out_notify;
+ /* the context might still be missing when the first ioctl is
+ * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
+ */
+ virtio_gpu_create_context(obj->dev, file);
+
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index b4ec479c32cd..b375394193be 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->host_visible_region.len,
dev_name(&vgdev->vdev->dev))) {
DRM_ERROR("Could not reserve host visible region\n");
+ ret = -EBUSY;
goto err_vqs;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 23c21bc4d01e..5cc34e7330fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -69,6 +69,7 @@ static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.close = virtio_gpu_gem_object_close,
.free = virtio_gpu_vram_free,
.mmap = virtio_gpu_vram_mmap,
+ .export = virtgpu_gem_prime_export,
};
bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
@@ -134,6 +135,8 @@ int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
obj = &vram->base.base.base;
obj->funcs = &virtio_gpu_vram_funcs;
+
+ params->size = PAGE_ALIGN(params->size);
drm_gem_private_object_init(vgdev->ddev, obj, params->size);
/* Create fake offset */
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d4d39227f2ed..2173b82606f6 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -34,12 +34,16 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static struct vkms_device *vkms_device;
+static struct vkms_config *default_config;
-bool enable_cursor = true;
+static bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
+static bool enable_writeback = true;
+module_param_named(enable_writeback, enable_writeback, bool, 0444);
+MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
@@ -113,16 +117,20 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- dev->mode_config.preferred_depth = 32;
+ /* FIXME: There's a confusion between bpp and depth between this and
+ * fbdev helpers. We have to go with 0, meaning "pick the default",
+ * which ix XRGB8888 in all cases. */
+ dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
}
-static int __init vkms_init(void)
+static int vkms_create(struct vkms_config *config)
{
int ret;
struct platform_device *pdev;
+ struct vkms_device *vkms_device;
pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(pdev))
@@ -140,6 +148,8 @@ static int __init vkms_init(void)
goto out_devres;
}
vkms_device->platform = pdev;
+ vkms_device->config = config;
+ config->dev = vkms_device;
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
@@ -176,21 +186,47 @@ out_unregister:
return ret;
}
-static void __exit vkms_exit(void)
+static int __init vkms_init(void)
+{
+ struct vkms_config *config;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ default_config = config;
+
+ config->cursor = enable_cursor;
+ config->writeback = enable_writeback;
+
+ return vkms_create(config);
+}
+
+static void vkms_destroy(struct vkms_config *config)
{
struct platform_device *pdev;
- if (!vkms_device) {
+ if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = vkms_device->platform;
+ pdev = config->dev->platform;
- drm_dev_unregister(&vkms_device->drm);
- drm_atomic_helper_shutdown(&vkms_device->drm);
+ drm_dev_unregister(&config->dev->drm);
+ drm_atomic_helper_shutdown(&config->dev->drm);
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
+
+ config->dev = NULL;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (default_config->dev)
+ vkms_destroy(default_config);
+
+ kfree(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5ed91ff08cb3..35540c7c4416 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -19,8 +19,6 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-extern bool enable_cursor;
-
struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
@@ -82,10 +80,20 @@ struct vkms_output {
spinlock_t composer_lock;
};
+struct vkms_device;
+
+struct vkms_config {
+ bool writeback;
+ bool cursor;
+ /* only set when instantiated */
+ struct vkms_device *dev;
+};
+
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
struct vkms_output output;
+ const struct vkms_config *config;
};
#define drm_crtc_to_vkms_output(target) \
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 4a1848b0318f..f5f6f15c362c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -41,12 +41,13 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor = NULL;
int ret;
+ int writeback;
primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (enable_cursor) {
+ if (vkmsdev->config->cursor) {
cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
@@ -80,9 +81,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
goto err_attach;
}
- ret = vkms_enable_writeback_connector(vkmsdev);
- if (ret)
- DRM_ERROR("Failed to init writeback connector\n");
+ if (vkmsdev->config->writeback) {
+ writeback = vkms_enable_writeback_connector(vkmsdev);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
drm_mode_config_reset(dev);
@@ -98,7 +101,7 @@ err_connector:
drm_crtc_cleanup(crtc);
err_crtc:
- if (enable_cursor)
+ if (vkmsdev->config->cursor)
drm_plane_cleanup(cursor);
err_cursor:
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 31f85f09f1fc..cc4cdca7176e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
- vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
+ vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 16077785ad47..0fe869d0fad1 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,7 +59,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index f41550797970..180f6dbc9460 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -555,7 +555,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -564,7 +564,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.cid = bi->ctx->id;
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -587,7 +587,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -598,7 +598,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -626,7 +626,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -636,7 +636,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
cmd->body.s1.stage = binding->texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -657,7 +657,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -665,7 +665,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -686,7 +686,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -703,7 +703,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.sizeInBytes = 0;
cmd->body.sid = SVGA3D_INVALID_ID;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -810,7 +810,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -821,7 +821,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -846,7 +846,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -860,7 +860,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -930,7 +930,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -938,7 +938,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
cmd->header.size = sizeof(cmd->body) + so_target_size;
memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -1044,7 +1044,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1054,7 +1054,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->dirty_vb,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -1074,7 +1074,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1086,7 +1086,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1104,7 +1104,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1116,7 +1116,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1263,7 +1263,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1279,7 +1279,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1315,14 +1315,14 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetStreamOutput body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index f21881e087db..9f2779ddcf08 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->num_pages;
- d.src_num_pages = src->num_pages;
+ d.dst_num_pages = dst->mem.num_pages;
+ d.src_num_pages = src->mem.num_pages;
d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 263d76ae43f0..63dbc44eebe0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
+ place.lpfn = bo->mem.num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
+ bo->mem.start < bo->mem.num_pages &&
bo->mem.start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a95156fc5db7..7400d617ae3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -36,9 +36,8 @@ struct vmw_temp_set_context {
SVGA3dCmdDXTempSetContext body;
};
-bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+bool vmw_supports_3d(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -62,15 +61,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ hwversion = vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
@@ -87,13 +86,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
@@ -102,7 +100,6 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
@@ -129,6 +126,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE);
+
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
@@ -139,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
if (min < PAGE_SIZE)
min = PAGE_SIZE;
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
- vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
wmb();
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
@@ -159,15 +157,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) fifo->capabilities);
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
- vmw_marker_queue_init(&fifo->marker_queue);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
@@ -175,13 +172,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
- dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -190,8 +185,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- vmw_marker_queue_takedown(&fifo->marker_queue);
-
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
@@ -205,11 +198,10 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
@@ -298,7 +290,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -306,9 +298,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
int ret;
mutex_lock(&fifo_state->fifo_mutex);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
@@ -319,7 +311,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->reserved_size = bytes;
while (1) {
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
@@ -353,8 +345,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->using_bounce_buffer = false;
if (reserveable)
- vmw_mmio_write(bytes, fifo_mem +
- SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv,
+ SVGA_FIFO_RESERVED,
+ bytes);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
@@ -381,7 +374,7 @@ out_err:
return NULL;
}
-void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
@@ -402,10 +395,11 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
+ u32 *fifo_mem = vmw->fifo_mem;
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
@@ -414,7 +408,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
if (bytes < chunk_size)
chunk_size = bytes;
- vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
@@ -423,7 +417,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -431,12 +425,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
- vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
+ vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
mb();
bytes -= sizeof(uint32_t);
}
@@ -445,10 +439,9 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
if (fifo_state->dx)
@@ -462,10 +455,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (fifo_state->using_bounce_buffer) {
if (reserveable)
- vmw_fifo_res_copy(fifo_state, fifo_mem,
+ vmw_fifo_res_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
else
- vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ vmw_fifo_slow_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
@@ -481,18 +474,18 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (next_cmd >= max)
next_cmd -= max - min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
}
if (reserveable)
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
@@ -507,7 +500,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
-void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
@@ -522,7 +515,7 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
-int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
@@ -532,7 +525,7 @@ int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
return 0;
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -540,7 +533,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = VMW_FIFO_RESERVE(dev_priv, bytes);
+ fm = VMW_CMD_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -560,15 +553,14 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
* waiting code in vmwgfx_irq.c will emulate this.
*/
- vmw_fifo_commit(dev_priv, 0);
+ vmw_cmd_commit(dev_priv, 0);
return 0;
}
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
- vmw_fifo_commit_flush(dev_priv, bytes);
- (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+ vmw_cmd_commit_flush(dev_priv, bytes);
vmw_update_seqno(dev_priv, fifo_state);
out_err:
@@ -599,7 +591,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -616,7 +608,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.guestResult.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -645,7 +637,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -657,7 +649,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->body.mobid = bo->mem.start;
cmd->body.offset = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -681,7 +673,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9a9fe10d829b..45fbc41440f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -610,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
/* Send a new fence in case one was removed */
if (send_fence) {
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ vmw_cmd_send_fence(man->dev_priv, &dummy);
wake_up_all(&man->idle_queue);
}
@@ -1208,18 +1208,14 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
- * @default_size: The default size of the command buffer for small kernel
- * submissions.
*
- * Set the size and allocate the main command buffer space pool,
- * as well as the default size of the command buffer for
- * small kernel submissions. If successful, this enables large command
- * submissions. Note that this function requires that rudimentary command
+ * Set the size and allocate the main command buffer space pool.
+ * If successful, this enables large command submissions.
+ * Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
-int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size)
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
@@ -1230,7 +1226,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
- man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
@@ -1313,7 +1309,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
@@ -1322,7 +1318,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
}
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
@@ -1387,7 +1383,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else {
- dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 61c246335e66..6f4d0da11ad8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -163,7 +163,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -171,7 +171,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_dec(dev_priv);
}
@@ -265,7 +265,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
vmw_resource_unreference(&res);
return -ENOMEM;
@@ -275,7 +275,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
@@ -316,7 +316,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -325,7 +325,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -348,7 +348,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -392,7 +392,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -411,7 +411,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -440,14 +440,14 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
@@ -483,7 +483,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -492,7 +492,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -515,7 +515,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -525,7 +525,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -608,7 +608,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -627,7 +627,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -656,14 +656,14 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 984d8884357d..ba658fa9cf6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -175,7 +175,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
cmd->body.mobid = bo->mem.start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
- vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
vcotbl->scrubbed = false;
return 0;
@@ -263,7 +263,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (!cmd1)
return -ENOMEM;
@@ -283,7 +283,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd1->body.type = vcotbl->type;
cmd1->body.mobid = SVGA3D_INVALID_ID;
cmd1->body.validSizeInBytes = 0;
- vmw_fifo_commit_flush(dev_priv, submit_size);
+ vmw_cmd_commit_flush(dev_priv, submit_size);
vcotbl->scrubbed = true;
/* Trigger a create() on next validate. */
@@ -349,7 +349,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->num_pages; ++i) {
+ for (i = 0; i < old_bo->mem.num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 216daf93022c..dd69b51c40e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,10 +32,10 @@
#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_object.h"
@@ -43,8 +43,6 @@
#include "vmwgfx_drv.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
-#define VMWGFX_CHIP_SVGAII 0
-#define VMW_FB_RESERVATION 0
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
@@ -253,8 +251,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
- {0, 0, 0}
+ { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+ { }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -425,8 +423,7 @@ static int vmw_request_device_late(struct vmw_private *dev_priv)
}
if (dev_priv->cman) {
- ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
- 256*4096, 2*4096);
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
@@ -609,7 +606,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
*/
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
@@ -644,26 +641,84 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
#endif
}
-static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+static int vmw_setup_pci_resources(struct vmw_private *dev,
+ unsigned long pci_id)
{
- struct vmw_private *dev_priv;
+ resource_size_t fifo_start;
+ resource_size_t fifo_size;
int ret;
+ struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, "vmwgfx probe");
+ if (ret)
+ return ret;
+
+ dev->io_start = pci_resource_start(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 1);
+ dev->vram_size = pci_resource_len(pdev, 1);
+ fifo_start = pci_resource_start(pdev, 2);
+ fifo_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("FIFO at %pa size is %llu kiB\n",
+ &fifo_start, (uint64_t)fifo_size / 1024);
+ dev->fifo_mem = devm_memremap(dev->drm.dev,
+ fifo_start,
+ fifo_size,
+ MEMREMAP_WB);
+
+ if (IS_ERR(dev->fifo_mem)) {
+ DRM_ERROR("Failed mapping FIFO memory.\n");
+ pci_release_regions(pdev);
+ return PTR_ERR(dev->fifo_mem);
+ }
+
+ /*
+ * This is approximate size of the vram, the exact size will only
+ * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
+ * size will be equal to or bigger than the size reported by
+ * SVGA_REG_VRAM_SIZE.
+ */
+ DRM_INFO("VRAM at %pa size is %llu kiB\n",
+ &dev->vram_start, (uint64_t)dev->vram_size / 1024);
+
+ return 0;
+}
+
+static int vmw_detect_version(struct vmw_private *dev)
+{
uint32_t svga_id;
+
+ vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
+ svga_id, dev->vmw_chipset);
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+{
+ int ret;
enum vmw_res_type i;
bool refuse_dma = false;
char host_log[100] = {0};
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(!dev_priv)) {
- DRM_ERROR("Failed allocating a device private struct.\n");
- return -ENOMEM;
- }
+ dev_priv->vmw_chipset = pci_id;
+ dev_priv->last_read_seqno = (uint32_t) -100;
+ dev_priv->drm.dev_private = dev_priv;
- pci_set_master(dev->pdev);
+ ret = vmw_setup_pci_resources(dev_priv, pci_id);
+ if (ret)
+ return ret;
+ ret = vmw_detect_version(dev_priv);
+ if (ret)
+ goto out_no_pci_or_version;
- dev_priv->dev = dev;
- dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
@@ -673,7 +728,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
- spin_lock_init(&dev_priv->svga_lock);
spin_lock_init(&dev_priv->cursor_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -688,21 +742,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->used_memory_size = 0;
- dev_priv->io_start = pci_resource_start(dev->pdev, 0);
- dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
- dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
-
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
dev_priv->enable_fb = enable_fbdev;
- vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
- svga_id = vmw_read(dev_priv, SVGA_REG_ID);
- if (svga_id != SVGA_ID_2) {
- ret = -ENOSYS;
- DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- goto out_err0;
- }
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
@@ -720,7 +763,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
@@ -794,7 +837,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
- dma_set_max_seg_size(dev->dev, U32_MAX);
+ dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
@@ -804,21 +847,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
- DRM_INFO("Maximum display memory size is %u kiB\n",
- dev_priv->prim_bb_mem / 1024);
- DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
- dev_priv->vram_start, dev_priv->vram_size / 1024);
- DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
- dev_priv->mmio_start, dev_priv->mmio_size / 1024);
-
- dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
- dev_priv->mmio_size, MEMREMAP_WB);
-
- if (unlikely(dev_priv->mmio_virt == NULL)) {
- ret = -ENOMEM;
- DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err0;
- }
+ DRM_INFO("Maximum display memory size is %llu kiB\n",
+ (uint64_t)dev_priv->prim_bb_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
@@ -826,7 +856,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
- goto out_err4;
+ goto out_err0;
}
dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
@@ -835,29 +865,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
- goto out_err4;
- }
-
- dev->dev_private = dev_priv;
-
- ret = pci_request_regions(dev->pdev, "vmwgfx probe");
- dev_priv->stealth = (ret != 0);
- if (dev_priv->stealth) {
- /**
- * Request at least the mmio PCI resource.
- */
-
- DRM_INFO("It appears like vesafb is loaded. "
- "Ignore above error if any.\n");
- ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
- goto out_no_device;
- }
+ goto out_err0;
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = vmw_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -874,8 +886,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
- dev_priv->dev->dev,
- dev->anon_inode->i_mapping,
+ dev_priv->drm.dev,
+ dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
@@ -955,7 +967,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
- DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no.");
if (dev_priv->sm_type == VMW_SM_5)
DRM_INFO("SM5 support available.\n");
@@ -1000,29 +1012,24 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(&dev_priv->drm);
out_no_irq:
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
-out_no_device:
ttm_object_device_release(&dev_priv->tdev);
-out_err4:
- memunmap(dev_priv->mmio_virt);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
- kfree(dev_priv);
+out_no_pci_or_version:
+ pci_release_regions(pdev);
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1052,21 +1059,16 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
+ vmw_irq_uninstall(&dev_priv->drm);
ttm_object_device_release(&dev_priv->tdev);
- memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
- kfree(dev_priv);
+ pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
@@ -1190,12 +1192,10 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
ttm_resource_manager_set_used(man, true);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1221,14 +1221,12 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1253,19 +1251,16 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
* to be inconsistent with the device, causing modesetting problems.
*
*/
- vmw_kms_lost_device(dev_priv->dev);
+ vmw_kms_lost_device(&dev_priv->drm);
ttm_write_lock(&dev_priv->reservation_sem, false);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
- ttm_resource_manager_set_used(man, false);
- spin_unlock(&dev_priv->svga_lock);
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
+ ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
- } else
- spin_unlock(&dev_priv->svga_lock);
+ }
ttm_write_unlock(&dev_priv->reservation_sem);
}
@@ -1275,8 +1270,6 @@ static void vmw_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
vmw_driver_unload(dev);
- drm_dev_put(dev);
- pci_disable_device(pdev);
}
static unsigned long
@@ -1377,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev)
* No user-space processes should be running now.
*/
ttm_suspend_unlock(&dev_priv->reservation_sem);
- ret = vmw_kms_suspend(dev_priv->dev);
+ ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
ttm_suspend_lock(&dev_priv->reservation_sem);
DRM_ERROR("Failed to freeze modesetting.\n");
@@ -1409,7 +1402,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_fence_fifo_down(dev_priv->fman);
__vmw_svga_disable(dev_priv);
-
+
vmw_release_device_late(dev_priv);
return 0;
}
@@ -1438,7 +1431,7 @@ static int vmw_pm_restore(struct device *kdev)
dev_priv->suspend_locked = false;
ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
- vmw_kms_resume(dev_priv->dev);
+ vmw_kms_resume(&dev_priv->drm);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
@@ -1507,39 +1500,36 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct drm_device *dev;
+ struct vmw_private *vmw;
int ret;
- ret = pci_enable_device(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
+ vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vmw_private, drm);
+ if (IS_ERR(vmw))
+ return PTR_ERR(vmw);
- ret = vmw_driver_load(dev, ent->driver_data);
- if (ret)
- goto err_drm_dev_put;
+ vmw->drm.pdev = pdev;
+ pci_set_drvdata(pdev, &vmw->drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = vmw_driver_load(vmw, ent->device);
if (ret)
- goto err_vmw_driver_unload;
+ return ret;
- return 0;
+ ret = drm_dev_register(&vmw->drm, 0);
+ if (ret) {
+ vmw_driver_unload(&vmw->drm);
+ return ret;
+ }
-err_vmw_driver_unload:
- vmw_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b45becbb00f8..5fa5bcd20cc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,7 +39,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include "ttm_lock.h"
#include "ttm_object.h"
@@ -67,6 +66,8 @@
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
+#define VMWGFX_PCI_ID_SVGA2 0x0405
+
/*
* Perhaps we should have sysfs entries for these.
*/
@@ -275,13 +276,6 @@ struct vmw_surface {
struct list_head view_list;
};
-struct vmw_marker_queue {
- struct list_head head;
- u64 lag;
- u64 lag_time;
- spinlock_t lock;
-};
-
struct vmw_fifo_state {
unsigned long reserved_size;
u32 *dynamic_buffer;
@@ -291,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_marker_queue marker_queue;
bool dx;
};
@@ -490,19 +483,19 @@ enum vmw_sm_type {
};
struct vmw_private {
+ struct drm_device drm;
struct ttm_bo_device bdev;
struct vmw_fifo_state fifo;
- struct drm_device *dev;
struct drm_vma_offset_manager vma_manager;
- unsigned long vmw_chipset;
- unsigned int io_start;
- uint32_t vram_start;
- uint32_t vram_size;
- uint32_t prim_bb_mem;
- uint32_t mmio_start;
- uint32_t mmio_size;
+ u32 vmw_chipset;
+ resource_size_t io_start;
+ resource_size_t vram_start;
+ resource_size_t vram_size;
+ resource_size_t prim_bb_mem;
+ u32 *fifo_mem;
+ resource_size_t fifo_mem_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t texture_max_width;
@@ -511,7 +504,6 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
- u32 *mmio_virt;
uint32_t capabilities;
uint32_t capabilities2;
uint32_t max_gmr_ids;
@@ -591,13 +583,7 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
struct mutex binding_mutex;
- /**
- * Operating mode.
- */
-
- bool stealth;
bool enable_fb;
- spinlock_t svga_lock;
/**
* PM management.
@@ -967,30 +953,29 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *
-vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
-extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
-extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
-extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *seqno);
+vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
+extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno);
+extern bool vmw_supports_3d(struct vmw_private *dev_priv);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
-extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid);
-extern int vmw_fifo_flush(struct vmw_private *dev_priv,
- bool interruptible);
+extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid);
+extern int vmw_cmd_flush(struct vmw_private *dev_priv,
+ bool interruptible);
-#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \
({ \
- vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \
DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
__func__, (unsigned int) __bytes); \
NULL; \
}); \
})
-#define VMW_FIFO_RESERVE(__priv, __bytes) \
- VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+#define VMW_CMD_RESERVE(__priv, __bytes) \
+ VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -1125,19 +1110,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
-/**
- * Rudimentary fence-like objects currently used only for throttling -
- * vmwgfx_marker.c
- */
-
-extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
-extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
-extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
-extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
-extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -1411,8 +1383,7 @@ struct vmw_cmdbuf_header;
extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
-extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
@@ -1581,28 +1552,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
}
/**
- * vmw_mmio_read - Perform a MMIO read from volatile memory
+ * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
*
- * @addr: The address to read from
+ * @fifo_reg: The fifo register to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
-static inline u32 vmw_mmio_read(u32 *addr)
+static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
- return READ_ONCE(*addr);
+ return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
/**
- * vmw_mmio_write - Perform a MMIO write to volatile memory
+ * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
*
- * @addr: The address to write to
+ * @addr: The fifo register to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
-static inline void vmw_mmio_write(u32 value, u32 *addr)
+static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
+ u32 value)
{
- WRITE_ONCE(*addr, value);
+ WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e67e2e8f6e6f..462f17320708 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -724,7 +724,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
if (cmd == NULL)
return -ENOMEM;
@@ -732,7 +732,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.mem.start;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1100,7 +1100,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
- ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ bo_size = vmw_bo->base.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -3762,7 +3762,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
/* p_handle implies file_priv. */
BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ ret = vmw_cmd_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
@@ -3876,10 +3876,10 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, command_size);
if (!cmd)
return -ENOMEM;
@@ -3888,7 +3888,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_fifo_commit(dev_priv, command_size);
+ vmw_cmd_commit(dev_priv, command_size);
return 0;
}
@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (ret)
- goto out_free_fence_fd;
+ VMW_DEBUG_USER("Throttling is no longer supported.\n");
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
@@ -4329,7 +4325,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (ret)
goto out_no_emit;
dev_priv->query_cid_valid = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4d60201037d1..33f07abfc3ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -258,7 +258,7 @@ out_unreserve:
if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
- vmw_fifo_flush(vmw_priv, false);
+ vmw_cmd_flush(vmw_priv, false);
}
out_unlock:
mutex_unlock(&par->bo_mutex);
@@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
DRM_ERROR("Could not unset a mode.\n");
return ret;
}
- drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
par->set_mode = NULL;
}
@@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info)
struct drm_display_mode *mode;
int ret;
- mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+ mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
return -ENOMEM;
@@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
- drm_mode_destroy(vmw_priv->dev, mode);
+ drm_mode_destroy(&vmw_priv->drm, mode);
return -EINVAL;
}
@@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info)
out_unlock:
if (par->set_mode)
- drm_mode_destroy(vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&vmw_priv->drm, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
@@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = {
int vmw_fb_init(struct vmw_private *vmw_priv)
{
- struct device *device = &vmw_priv->dev->pdev->dev;
+ struct device *device = vmw_priv->drm.dev;
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 0f8d29397157..378ec7600154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- u32 *fifo_mem = dev_priv->mmio_virt;
- u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
@@ -401,14 +400,12 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- u32 *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
@@ -416,8 +413,9 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- vmw_mmio_write(fence->base.seqno,
- fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv,
+ SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
break;
}
}
@@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- u32 *fifo_mem;
if (dma_fence_is_signaled_locked(&fence->base))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
fman->seqno_valid = true;
return true;
@@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
- u32 *fifo_mem = fman->dev_priv->mmio_virt;
- seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -492,7 +488,7 @@ rerun:
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
- new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
@@ -1033,7 +1029,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
eaction->action.type = VMW_ACTION_EVENT;
eaction->fence = vmw_fence_obj_reference(fence);
- eaction->dev = fman->dev_priv->dev;
+ eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
@@ -1055,7 +1051,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
{
struct vmw_event_fence_pending *event;
struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct drm_device *dev = fman->dev_priv->dev;
+ struct drm_device *dev = &fman->dev_priv->drm;
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 83c0d5a3e4fd..964ddf1ca57a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -98,7 +98,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
- vmw_fifo_commit(dev_priv, cmd_size);
+ vmw_cmd_commit(dev_priv, cmd_size);
return 0;
}
@@ -110,7 +110,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, define_size);
if (unlikely(cmd == NULL))
return;
@@ -120,7 +120,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
- vmw_fifo_commit(dev_priv, define_size);
+ vmw_cmd_commit(dev_priv, define_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index be325a62c178..1774960d1b89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
@@ -65,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += bo->num_pages;
+ gman->used_gmr_pages += mem->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
mem->mm_node = gman;
mem->start = id;
- mem->num_pages = bo->num_pages;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= bo->num_pages;
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
return -ENOSPC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f681b7b4df1b..80af8772b8c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -51,7 +51,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
- param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+ param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
@@ -67,7 +67,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- u32 *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -76,11 +75,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
}
param->value =
- vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -235,7 +234,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_err;
} else {
- fifo_mem = dev_priv->mmio_virt;
+ fifo_mem = dev_priv->fifo_mem;
memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 75f3efee21a4..6c2a569f1fcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -117,12 +117,10 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
- vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman);
}
}
@@ -222,11 +220,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
- if (ret == 0 && fifo_idle) {
- u32 *fifo_mem = dev_priv->mmio_virt;
+ if (ret == 0 && fifo_idle)
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
- vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
- }
wake_up_all(&dev_priv->fence_queue);
out_err:
if (fifo_idle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bc67f2b930e1..9a89f658e501 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -36,9 +36,6 @@
#include "vmwgfx_kms.h"
-/* Might need a hrtimer here? */
-#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
void vmw_du_cleanup(struct vmw_display_unit *du)
{
drm_plane_cleanup(&du->primary);
@@ -68,7 +65,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -83,7 +80,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
- vmw_fifo_commit_flush(dev_priv, cmd_size);
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
return 0;
}
@@ -128,15 +125,14 @@ err_unreserve:
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
- vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
- vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
- vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
- count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
- vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
spin_unlock(&dev_priv->cursor_lock);
}
@@ -236,7 +232,7 @@ err_unreserve:
*/
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -252,7 +248,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -891,7 +887,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
bool is_bo_proxy)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
@@ -1003,11 +999,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1033,10 +1029,10 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
break;
}
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1213,14 +1209,14 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
*mode_cmd)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1319,7 +1315,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
bo, &surface);
if (ret)
return ERR_PTR(ret);
@@ -1768,7 +1764,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
if (ret)
return ret;
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
return 0;
}
@@ -1780,7 +1776,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
return;
dev_priv->hotplug_mode_update_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
@@ -1791,7 +1787,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
int vmw_kms_init(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
drm_mode_config_init(dev);
@@ -1823,7 +1819,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
- drm_mode_config_cleanup(dev_priv->dev);
+ drm_mode_config_cleanup(&dev_priv->drm);
if (dev_priv->active_display_unit == vmw_du_legacy)
ret = vmw_kms_ldu_close_display(dev_priv);
@@ -1876,11 +1872,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(pitch, vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+ if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
@@ -1934,7 +1930,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc)
static int vmw_du_update_layout(struct vmw_private *dev_priv,
unsigned int num_rects, struct drm_rect *rects)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
@@ -2366,7 +2362,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (dirty->crtc) {
units[num_units++] = vmw_crtc_to_du(dirty->crtc);
} else {
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
head) {
struct drm_plane *plane = crtc->primary;
@@ -2386,7 +2382,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
+ dirty->cmd = VMW_CMD_RESERVE(dev_priv,
dirty->fifo_reserve_size);
if (!dirty->cmd)
return -ENOMEM;
@@ -2520,7 +2516,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
if (!cmd)
return -ENOMEM;
@@ -2549,7 +2545,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
copy_size += sizeof(*cmd);
}
- vmw_fifo_commit(dev_priv, copy_size);
+ vmw_cmd_commit(dev_priv, copy_size);
return 0;
}
@@ -2568,8 +2564,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
int i = 0;
int ret = 0;
- mutex_lock(&dev_priv->dev->mode_config.mutex);
- list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+ list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
head) {
if (i == unit)
break;
@@ -2577,7 +2573,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
++i;
}
- if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+ if (&con->head == &dev_priv->drm.mode_config.connector_list) {
DRM_ERROR("Could not find initial display unit.\n");
ret = -EINVAL;
goto out_unlock;
@@ -2611,7 +2607,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
out_unlock:
- mutex_unlock(&dev_priv->dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
return ret;
}
@@ -2631,7 +2627,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
return;
dev_priv->implicit_placement_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
}
@@ -2752,7 +2748,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
+ cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
@@ -2801,7 +2797,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
if (reserved_size < submit_size)
submit_size = 0;
- vmw_fifo_commit(update->dev_priv, submit_size);
+ vmw_cmd_commit(update->dev_priv, submit_size);
vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
update->out_fence, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9d1de5b5cc6a..9a9508edbc9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -125,7 +125,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
i++;
}
@@ -355,7 +354,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -479,7 +478,7 @@ err_free:
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (dev_priv->ldu_priv) {
@@ -554,7 +553,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -567,6 +566,6 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
cmd[i].body.height = clips->y2 - clips->y1;
}
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
deleted file mode 100644
index e53bc639a754..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "vmwgfx_drv.h"
-
-struct vmw_marker {
- struct list_head head;
- uint32_t seqno;
- u64 submitted;
-};
-
-void vmw_marker_queue_init(struct vmw_marker_queue *queue)
-{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = 0;
- queue->lag_time = ktime_get_raw_ns();
- spin_lock_init(&queue->lock);
-}
-
-void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
-{
- struct vmw_marker *marker, *next;
-
- spin_lock(&queue->lock);
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- kfree(marker);
- }
- spin_unlock(&queue->lock);
-}
-
-int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno)
-{
- struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
-
- if (unlikely(!marker))
- return -ENOMEM;
-
- marker->seqno = seqno;
- marker->submitted = ktime_get_raw_ns();
- spin_lock(&queue->lock);
- list_add_tail(&marker->head, &queue->head);
- spin_unlock(&queue->lock);
-
- return 0;
-}
-
-int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno)
-{
- struct vmw_marker *marker, *next;
- bool updated = false;
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
-
- if (list_empty(&queue->head)) {
- queue->lag = 0;
- queue->lag_time = now;
- updated = true;
- goto out_unlock;
- }
-
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- if (signaled_seqno - marker->seqno > (1 << 30))
- continue;
-
- queue->lag = now - marker->submitted;
- queue->lag_time = now;
- updated = true;
- list_del(&marker->head);
- kfree(marker);
- }
-
-out_unlock:
- spin_unlock(&queue->lock);
-
- return (updated) ? 0 : -EBUSY;
-}
-
-static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
- queue->lag += now - queue->lag_time;
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
-}
-
-
-static bool vmw_lag_lt(struct vmw_marker_queue *queue,
- uint32_t us)
-{
- u64 cond = (u64) us * NSEC_PER_USEC;
-
- return vmw_fifo_lag(queue) <= cond;
-}
-
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us)
-{
- struct vmw_marker *marker;
- uint32_t seqno;
- int ret;
-
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- seqno = atomic_read(&dev_priv->marker_seq);
- else {
- marker = list_first_entry(&queue->head,
- struct vmw_marker, head);
- seqno = marker->seqno;
- }
- spin_unlock(&queue->lock);
-
- ret = vmw_wait_seqno(dev_priv, false, seqno, true,
- 3*HZ);
-
- if (unlikely(ret != 0))
- return ret;
-
- (void) vmw_marker_pull(queue, seqno);
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7f95ed6aa224..a372980fe6a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -148,7 +148,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -170,7 +170,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
*/
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
otable->page_table = mob;
return 0;
@@ -203,7 +203,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -215,7 +215,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
cmd->body.sizeInBytes = 0;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (bo) {
int ret;
@@ -558,12 +558,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
@@ -625,7 +625,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
goto out_no_cmd_space;
@@ -636,7 +636,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cd7ed1650d60..d6d282c13b7f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -122,7 +122,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -169,7 +169,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fill_flush(flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
@@ -192,7 +192,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
+ cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
@@ -211,7 +211,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
cmds->body.items[0].value = false;
fill_flush(&cmds->flush, stream_id);
- vmw_fifo_commit(dev_priv, sizeof(*cmds));
+ vmw_cmd_commit(dev_priv, sizeof(*cmds));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 0b76b3d17d4c..0a900afc66ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.num_pages;
+ pgoff_t num_pages = vbo->base.mem.num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
+ if (unlikely(page_offset >= bo->mem.num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 00b535831a7a..d1e7b9608145 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
int ret;
if (likely(res->backup)) {
- BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ BUG_ON(res->backup->base.base.size < size);
return 0;
}
@@ -827,7 +827,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -835,7 +835,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4bdad2f2d130..b0db059b8cfb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -132,7 +132,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -153,7 +153,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
sou->defined = true;
@@ -181,7 +181,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -189,7 +189,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
cmd->body.screenId = sou->base.unit;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_object_unit *sou;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -946,7 +946,7 @@ err_free:
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
@@ -992,7 +992,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -1003,7 +1003,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1029,7 +1029,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
int i;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -1061,7 +1061,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
blit->bottom -= sdirty->top;
}
- vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+ vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd));
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1185,11 +1185,11 @@ out_unref:
static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
@@ -1295,11 +1295,11 @@ out_unref:
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f328aa5839a2..905ae50aaa2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -222,7 +222,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -233,7 +233,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -256,7 +256,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -266,7 +266,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -284,7 +284,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -293,7 +293,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/*
* Create a fence object and fence the backup buffer.
@@ -324,7 +324,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -333,7 +333,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -394,7 +394,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -404,7 +404,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->body.shid = shader->id;
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
@@ -481,7 +481,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -491,7 +491,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&shader->cotable_head);
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret;
}
- if ((u64)buffer->base.num_pages * PAGE_SIZE <
- (u64)size + (u64)offset) {
+ if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 3f97b61dd5d8..7369dd86d3a9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,7 +170,7 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -181,7 +181,7 @@ static int vmw_view_create(struct vmw_resource *res)
/* Sid may have changed due to surface eviction. */
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
cmd->body.sid = view->srf->id;
- vmw_fifo_commit(res->dev_priv, view->cmd_size);
+ vmw_cmd_commit(res->dev_priv, view->cmd_size);
res->id = view->view_id;
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
@@ -213,14 +213,14 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
cmd->body.view_id = view->view_id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5b04ec047ef3..fbe977881364 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -170,7 +170,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
stdu->defined = true;
stdu->display_width = mode->hdisplay;
@@ -229,7 +229,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -239,7 +239,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
cmd->body.image = image;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -293,7 +293,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -301,7 +301,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
0, stdu->display_width,
0, stdu->display_height);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -329,7 +329,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -338,7 +338,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -499,7 +499,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.host.mipmap = 0;
cmd->body.transfer = ddirty->transfer;
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = ddirty->buf->base.base.size;
if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
blit_size += sizeof(struct vmw_stdu_update);
@@ -522,7 +522,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
ddirty->top, ddirty->bottom);
}
- vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
@@ -628,7 +628,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
goto out_cleanup;
@@ -636,7 +636,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x1, region.x2,
region.y1, region.y2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
out_cleanup:
@@ -795,7 +795,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
size_t commit_size;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -817,7 +817,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
sdirty->right, sdirty->top, sdirty->bottom);
- vmw_fifo_commit(dirty->dev_priv, commit_size);
+ vmw_cmd_commit(dirty->dev_priv, commit_size);
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = vfbbo->buffer->base.base.size;
vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
bb->y1, bb->y2);
@@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_target_display_unit *stdu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 193192456663..1dd042a20a66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -99,7 +99,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
if (!list_empty(&so->cotable_head) || !so->committed )
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -109,7 +109,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(so->cotable, &so->cotable_head);
@@ -172,7 +172,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
WARN_ON_ONCE(!so->committed);
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -182,7 +182,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&so->cotable_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3914bfee0533..f6cab77075a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -372,12 +372,12 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
if (unlikely(!cmd))
return;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+ vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
/*
* used_memory_size_atomic, or separate lock
@@ -440,14 +440,14 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
vmw_surface_define_encode(srf, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
vmw_fifo_resource_inc(dev_priv);
/*
@@ -492,14 +492,14 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -578,12 +578,12 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Surface memory usage accounting.
@@ -1121,7 +1121,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
cmd4 = (typeof(cmd4))cmd;
@@ -1188,7 +1188,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->body.size.depth = metadata->base_size.depth;
}
- vmw_fifo_commit(dev_priv, submit_len);
+ vmw_cmd_commit(dev_priv, submit_len);
return 0;
@@ -1219,7 +1219,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
return -ENOMEM;
@@ -1233,7 +1233,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
}
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
if (res->backup->dirty && res->backup_dirty) {
/* We've just made a full upload. Cear dirty regions. */
@@ -1272,7 +1272,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
@@ -1295,7 +1295,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
cmd3->body.sid = res->id;
cmd3->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -1328,7 +1328,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -1337,7 +1337,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup,
&user_srf->backup_base);
if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
+ if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+ rep->crep.buffer_size = srf->res.backup->base.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1896,7 +1895,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
goto out;
alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
@@ -1932,7 +1931,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
}
}
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
out:
memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
dirty->num_subres);
@@ -2032,14 +2031,14 @@ static int vmw_surface_clean(struct vmw_resource *res)
} *cmd;
alloc_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 155ca3a5c7e5..e8e79de255cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -5,7 +5,6 @@
* Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 6a04261ce760..dbb068830d80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
*/
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
@@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
*/
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
}
@@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
vsgt->num_pages, 0,
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->dev->dev),
+ dma_get_max_seg_size(dev_priv->drm.dev),
NULL, 0, GFP_KERNEL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
@@ -611,8 +611,8 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
- ttm_cached);
+ ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
else
ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 74db5a840bed..b293c67230ef 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
xen_obj->sgt_imported = sgt;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
- NULL, xen_obj->num_pages);
+ ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
+ xen_obj->num_pages);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index c685d94409b0..148add0ca1d6 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1396,19 +1396,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
static void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- struct drm_crtc *crtc = &disp->crtc;
-
zynqmp_disp_audio_disable(&disp->audio);
zynqmp_disp_avbuf_disable_audio(&disp->avbuf);
zynqmp_disp_avbuf_disable_channels(&disp->avbuf);
zynqmp_disp_avbuf_disable(&disp->avbuf);
-
- /* Mark the flip is done as crtc is disabled anyway */
- if (crtc->state->event) {
- complete_all(crtc->state->event->base.completion);
- crtc->state->event = NULL;
- }
}
static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
@@ -1499,6 +1491,13 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(&disp->crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
clk_disable_unprepare(disp->pclk);
pm_runtime_put_sync(disp->dev);
}
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index c8f7b21fa09e..78d787afe594 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -438,15 +438,10 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
.atomic_disable = zx_plane_atomic_disable,
};
-static void zx_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs zx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = zx_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d506fcc..e617f60afeea 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
/* unused */
} , {
/* unused */
- } , {
- /* unused */
- } , {
- /* unused */
},
};
/* can't use #7 and #8 for line active and pixel active counters */
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 1df6ab5d339d..48ad154df3a7 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -567,12 +567,9 @@ static int cport_enable(struct gb_host_device *hd, u16 cport_id,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, cport_id, 0,
req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
- if (ret != sizeof(*req)) {
+ if (ret < 0) {
dev_err(&udev->dev, "failed to set cport flags for port %d\n",
cport_id);
- if (ret >= 0)
- ret = -EIO;
-
goto out;
}
@@ -961,12 +958,10 @@ static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
0, 0,
rpc->req, le16_to_cpu(rpc->req->size),
ES2_USB_CTRL_TIMEOUT);
- if (retval != le16_to_cpu(rpc->req->size)) {
+ if (retval < 0) {
dev_err(&udev->dev,
"failed to send ARPC request %d: %d\n",
rpc->req->type, retval);
- if (retval > 0)
- retval = -EIO;
return retval;
}
diff --git a/drivers/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h
index 1bc9f1275c65..616a3bd61aa6 100644
--- a/drivers/greybus/greybus_trace.h
+++ b/drivers/greybus/greybus_trace.h
@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(gb_message,
__entry->result = message->header->result;
),
- TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
+ TP_printk("size=%u operation_id=0x%04x type=0x%02x result=0x%02x",
__entry->size, __entry->operation_id,
__entry->type, __entry->result)
);
@@ -317,7 +317,7 @@ DECLARE_EVENT_CLASS(gb_interface,
__entry->mode_switch = intf->mode_switch;
),
- TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
+ TP_printk("intf_id=%u device_id=%u module_id=%u D=%d J=%d A=%d E=%d M=%d",
__entry->id, __entry->device_id, __entry->module_id,
__entry->disconnected, __entry->ejected, __entry->active,
__entry->enabled, __entry->mode_switch)
@@ -391,7 +391,7 @@ DECLARE_EVENT_CLASS(gb_module,
__entry->disconnected = module->disconnected;
),
- TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
+ TP_printk("hd_bus_id=%d module_id=%u num_interfaces=%zu disconnected=%d",
__entry->hd_bus_id, __entry->module_id,
__entry->num_interfaces, __entry->disconnected)
);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7bdda1b5b221..786b71ef7738 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -853,6 +853,24 @@ config HID_PLANTRONICS
Say M here if you may ever plug in a Plantronics USB audio device.
+config HID_PLAYSTATION
+ tristate "PlayStation HID Driver"
+ depends on HID
+ select CRC32
+ select POWER_SUPPLY
+ help
+ Provides support for Sony PS5 controllers including support for
+ its special functionalities e.g. touchpad, lights and motion
+ sensors.
+
+config PLAYSTATION_FF
+ bool "PlayStation force feedback support"
+ depends on HID_PLAYSTATION
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here if you would like to enable force feedback support for
+ PlayStation game controllers.
+
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
depends on HID
@@ -899,6 +917,7 @@ config HID_SONY
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
+ select CRC32
help
Support for
@@ -908,6 +927,7 @@ config HID_SONY
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
* Guitar Hero Live PS3 and Wii U guitar dongles
+ * Guitar Hero PS3 and PC guitar dongles
config SONY_FF
bool "Sony PS2/3/4 accessories force feedback support"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 014d21fe7dac..c4f6d5c613dc 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -94,6 +94,7 @@ hid-picolcd-$(CONFIG_HID_PICOLCD_CIR) += hid-picolcd_cir.o
hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
+obj-$(CONFIG_HID_PLAYSTATION) += hid-playstation.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o
obj-$(CONFIG_HID_RETRODE) += hid-retrode.o
@@ -138,7 +139,7 @@ obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
-obj-$(CONFIG_I2C_HID) += i2c-hid/
+obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 3d1ccac5d99a..2ab38b715347 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -154,7 +154,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
- &cl_data->sensor_phys_addr[i],
+ &cl_data->sensor_dma_addr[i],
GFP_KERNEL);
cl_data->sensor_sts[i] = 0;
cl_data->sensor_requested_cnt[i] = 0;
@@ -187,7 +187,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
}
info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
info.sensor_idx = cl_idx;
- info.phys_address = cl_data->sensor_phys_addr[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
if (!cl_data->report_descr[i]) {
@@ -212,7 +212,7 @@ cleanup:
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
kfree(cl_data->feature_report[i]);
kfree(cl_data->input_report[i]);
@@ -238,7 +238,7 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
}
kfree(cl_data);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 6be0783d885c..d7eac1728e31 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -27,7 +27,7 @@ struct amdtp_cl_data {
int hid_descr_size[MAX_HID_DEVICES];
phys_addr_t phys_addr_base;
u32 *sensor_virt_addr[MAX_HID_DEVICES];
- phys_addr_t sensor_phys_addr[MAX_HID_DEVICES];
+ dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
u32 sensor_sts[MAX_HID_DEVICES];
u32 sensor_requested_cnt[MAX_HID_DEVICES];
u8 report_type[MAX_HID_DEVICES];
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index a51c7b76283b..dbac16641662 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -41,7 +41,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
cmd_param.s.buf_layout = 1;
cmd_param.s.buf_length = 16;
- writeq(info.phys_address, privdata->mmio + AMD_C2P_MSG2);
+ writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index e8be94f935b7..8f8d19b2cfe5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -67,7 +67,7 @@ struct amd_mp2_dev {
struct amd_mp2_sensor_info {
u8 sensor_idx;
u32 period;
- phys_addr_t phys_address;
+ dma_addr_t dma_address;
};
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index 3f0ed6a95223..ca556d39da2a 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -21,6 +21,39 @@
#include "hid-ids.h"
+#define CH_WIRELESS_CTL_REPORT_ID 0x11
+
+static int ch_report_wireless(struct hid_report *report, u8 *data, int size)
+{
+ struct hid_device *hdev = report->device;
+ struct input_dev *input;
+
+ if (report->id != CH_WIRELESS_CTL_REPORT_ID || report->maxfield != 1)
+ return 0;
+
+ input = report->field[0]->hidinput->input;
+ if (!input) {
+ hid_warn(hdev, "can't find wireless radio control's input");
+ return 0;
+ }
+
+ input_report_key(input, KEY_RFKILL, 1);
+ input_sync(input);
+ input_report_key(input, KEY_RFKILL, 0);
+ input_sync(input);
+
+ return 1;
+}
+
+static int ch_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ if (report->application == HID_GD_WIRELESS_RADIO_CTLS)
+ return ch_report_wireless(report, data, size);
+
+ return 0;
+}
+
#define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -77,10 +110,30 @@ static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Chicony hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Chicony hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ }
};
@@ -91,6 +144,8 @@ static struct hid_driver ch_driver = {
.id_table = ch_devices,
.report_fixup = ch_switch12_report_fixup,
.input_mapping = ch_input_mapping,
+ .probe = ch_probe,
+ .raw_event = ch_raw_event,
};
module_hid_driver(ch_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 56172fe6995c..097cb1ee3126 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
* Register a new field for this report.
*/
-static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
+static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
{
struct hid_field *field;
@@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field = kzalloc((sizeof(struct hid_field) +
usages * sizeof(struct hid_usage) +
- values * sizeof(unsigned)), GFP_KERNEL);
+ usages * sizeof(unsigned)), GFP_KERNEL);
if (!field)
return NULL;
@@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
usages = max_t(unsigned, parser->local.usage_index,
parser->global.report_count);
- field = hid_register_field(report, usages, parser->global.report_count);
+ field = hid_register_field(report, usages);
if (!field)
return 0;
@@ -1307,6 +1307,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
static s32 snto32(__u32 value, unsigned n)
{
+ if (!value || !n)
+ return 0;
+
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 85a054f1ce38..d9319622da44 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -392,30 +392,34 @@ static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int hammer_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+static void hammer_folded_event(struct hid_device *hdev, bool folded)
{
unsigned long flags;
- if (usage->hid == HID_USAGE_KBD_FOLDED) {
- spin_lock_irqsave(&cbas_ec_lock, flags);
+ spin_lock_irqsave(&cbas_ec_lock, flags);
- /*
- * If we are getting events from Whiskers that means that it
- * is attached to the lid.
- */
- cbas_ec.base_present = true;
- cbas_ec.base_folded = value;
- hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
- cbas_ec.base_present, cbas_ec.base_folded);
-
- if (cbas_ec.input) {
- input_report_switch(cbas_ec.input,
- SW_TABLET_MODE, value);
- input_sync(cbas_ec.input);
- }
+ /*
+ * If we are getting events from Whiskers that means that it
+ * is attached to the lid.
+ */
+ cbas_ec.base_present = true;
+ cbas_ec.base_folded = folded;
+ hid_dbg(hdev, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
- spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ if (cbas_ec.input) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, folded);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+}
+
+static int hammer_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
+ hammer_folded_event(hid, value);
return 1; /* We handled this event */
}
@@ -457,6 +461,47 @@ static bool hammer_has_backlight_control(struct hid_device *hdev)
HID_GD_KEYBOARD, HID_AD_BRIGHTNESS);
}
+static void hammer_get_folded_state(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ char *buf;
+ int len, rlen;
+ int a;
+
+ report = hdev->report_enum[HID_INPUT_REPORT].report_id_hash[0x0];
+
+ if (!report || report->maxfield < 1)
+ return;
+
+ len = hid_report_len(report) + 1;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ rlen = hid_hw_raw_request(hdev, report->id, buf, len, report->type, HID_REQ_GET_REPORT);
+
+ if (rlen != len) {
+ hid_warn(hdev, "Unable to read base folded state: %d (expected %d)\n", rlen, len);
+ goto out;
+ }
+
+ for (a = 0; a < report->maxfield; a++) {
+ struct hid_field *field = report->field[a];
+
+ if (field->usage->hid == HID_USAGE_KBD_FOLDED) {
+ u32 value = hid_field_extract(hdev, buf+1,
+ field->report_offset, field->report_size);
+
+ hammer_folded_event(hdev, value);
+ break;
+ }
+ }
+
+out:
+ kfree(buf);
+}
+
static int hammer_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -481,6 +526,8 @@ static int hammer_probe(struct hid_device *hdev,
error = hid_hw_open(hdev);
if (error)
return error;
+
+ hammer_get_folded_state(hdev);
}
if (hammer_has_backlight_control(hdev)) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4c5f23640f9c..e42aaae3138f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -40,6 +40,9 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
+#define USB_VENDOR_ID_ACTIVISION 0x1430
+#define USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE 0x474c
+
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
@@ -270,6 +273,7 @@
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+#define USB_DEVICE_ID_CHICONY_WIRELESS3 0x1236
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
@@ -389,6 +393,8 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -640,6 +646,8 @@
#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
#define USB_VENDOR_ID_ITE 0x048d
+#define I2C_VENDOR_ID_ITE 0x103c
+#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
@@ -1072,13 +1080,15 @@
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
+#define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
-#define USB_VENDOR_ID_SONY_GHLIVE 0x12ba
+#define USB_VENDOR_ID_SONY_RHYTHM 0x12ba
#define USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE 0x074b
+#define USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE 0x0100
#define USB_VENDOR_ID_SINO_LITE 0x1345
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index dc7f6b4a775c..236bccd37760 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -322,6 +322,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1852,6 +1856,16 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
list_for_each_entry(hidinput, &hid->inputs, list) {
if (hidinput->application == report->application)
return hidinput;
+
+ /*
+ * Keep SystemControl and ConsumerControl applications together
+ * with the main keyboard, if present.
+ */
+ if ((report->application == HID_GD_SYSTEM_CONTROL ||
+ report->application == HID_CP_CONSUMER_CONTROL) &&
+ hidinput->application == HID_GD_KEYBOARD) {
+ return hidinput;
+ }
}
return NULL;
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 22bfbebceaf4..14fc068affad 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -23,11 +23,16 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
rdesc[163] = HID_MAIN_ITEM_RELATIVE;
}
- /* For Acer One S1002 keyboard-dock */
+ /* For Acer One S1002/S1003 keyboard-dock */
if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
- hid_info(hdev, "Fixing up Acer S1002 ITE keyboard report descriptor\n");
+ hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
rdesc[186] = HID_MAIN_ITEM_RELATIVE;
}
+ /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
+ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
+ hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
+ rdesc[185] = HID_MAIN_ITEM_RELATIVE;
+ }
}
return rdesc;
@@ -114,7 +119,8 @@ static const struct hid_device_id ite_devices[] = {
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
- USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index fcaf8466e627..bfbba0d41933 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -647,7 +647,7 @@ static void lg_g15_input_close(struct input_dev *dev)
static int lg_g15_register_led(struct lg_g15_data *g15, int i)
{
- const char * const led_names[] = {
+ static const char * const led_names[] = {
"g15::kbd_backlight",
"g15::lcd_backlight",
"g15::macro_preset1",
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 1ffcfc9a1e03..271bd8d24339 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
case 0x07:
device_type = "eQUAD step 4 Gaming";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x08:
device_type = "eQUAD step 4 for gamepads";
@@ -994,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0d:
- device_type = "eQUAD Lightspeed 1_1";
+ device_type = "eQUAD Lightspeed 1.1";
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ case 0x0f:
+ device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;
@@ -1869,6 +1875,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc531),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech G602 receiver (0xc537) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc537),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f85781464807..d459e2dbe647 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -92,6 +92,8 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
+#define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5)
+#define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6)
#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -152,6 +154,7 @@ struct hidpp_battery {
int voltage;
int charge_type;
bool online;
+ u8 supported_levels_1004;
};
/**
@@ -1171,7 +1174,7 @@ static int hidpp20_batterylevel_get_battery_info(struct hidpp_device *hidpp,
return 0;
}
-static int hidpp20_query_battery_info(struct hidpp_device *hidpp)
+static int hidpp20_query_battery_info_1000(struct hidpp_device *hidpp)
{
u8 feature_type;
int ret;
@@ -1208,7 +1211,7 @@ static int hidpp20_query_battery_info(struct hidpp_device *hidpp)
return 0;
}
-static int hidpp20_battery_event(struct hidpp_device *hidpp,
+static int hidpp20_battery_event_1000(struct hidpp_device *hidpp,
u8 *data, int size)
{
struct hidpp_report *report = (struct hidpp_report *)data;
@@ -1380,6 +1383,224 @@ static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1004: Unified battery */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_UNIFIED_BATTERY 0x1004
+
+#define CMD_UNIFIED_BATTERY_GET_CAPABILITIES 0x00
+#define CMD_UNIFIED_BATTERY_GET_STATUS 0x10
+
+#define EVENT_UNIFIED_BATTERY_STATUS_EVENT 0x00
+
+#define FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL BIT(0)
+#define FLAG_UNIFIED_BATTERY_LEVEL_LOW BIT(1)
+#define FLAG_UNIFIED_BATTERY_LEVEL_GOOD BIT(2)
+#define FLAG_UNIFIED_BATTERY_LEVEL_FULL BIT(3)
+
+#define FLAG_UNIFIED_BATTERY_FLAGS_RECHARGEABLE BIT(0)
+#define FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE BIT(1)
+
+static int hidpp20_unifiedbattery_get_capabilities(struct hidpp_device *hidpp,
+ u8 feature_index)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS ||
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) {
+ /* we have already set the device capabilities, so let's skip */
+ return 0;
+ }
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_UNIFIED_BATTERY_GET_CAPABILITIES,
+ NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * If the device supports state of charge (battery percentage) we won't
+ * export the battery level information. there are 4 possible battery
+ * levels and they all are optional, this means that the device might
+ * not support any of them, we are just better off with the battery
+ * percentage.
+ */
+ if (params[1] & FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_PERCENTAGE;
+ hidpp->battery.supported_levels_1004 = 0;
+ } else {
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS;
+ hidpp->battery.supported_levels_1004 = params[0];
+ }
+
+ return 0;
+}
+
+static int hidpp20_unifiedbattery_map_status(struct hidpp_device *hidpp,
+ u8 charging_status,
+ u8 external_power_status)
+{
+ int status;
+
+ switch (charging_status) {
+ case 0: /* discharging */
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 1: /* charging */
+ case 2: /* charging slow */
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 3: /* complete */
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case 4: /* error */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ hid_info(hidpp->hid_dev, "%s: charging error",
+ hidpp->name);
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ }
+
+ return status;
+}
+
+static int hidpp20_unifiedbattery_map_level(struct hidpp_device *hidpp,
+ u8 battery_level)
+{
+ /* cler unsupported level bits */
+ battery_level &= hidpp->battery.supported_levels_1004;
+
+ if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_FULL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_GOOD)
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_LOW)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+ return POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+}
+
+static int hidpp20_unifiedbattery_get_status(struct hidpp_device *hidpp,
+ u8 feature_index,
+ u8 *state_of_charge,
+ int *status,
+ int *level)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_UNIFIED_BATTERY_GET_STATUS,
+ NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ *state_of_charge = params[0];
+ *status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]);
+ *level = hidpp20_unifiedbattery_map_level(hidpp, params[1]);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ u8 state_of_charge;
+ int status, level;
+
+ if (hidpp->battery.feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_UNIFIED_BATTERY,
+ &hidpp->battery.feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_unifiedbattery_get_capabilities(hidpp,
+ hidpp->battery.feature_index);
+ if (ret)
+ return ret;
+
+ ret = hidpp20_unifiedbattery_get_status(hidpp,
+ hidpp->battery.feature_index,
+ &state_of_charge,
+ &status,
+ &level);
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_UNIFIED_BATTERY;
+ hidpp->battery.capacity = state_of_charge;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.online = true;
+
+ return 0;
+}
+
+static int hidpp20_battery_event_1004(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ u8 *params = (u8 *)report->fap.params;
+ int state_of_charge, status, level;
+ bool changed;
+
+ if (report->fap.feature_index != hidpp->battery.feature_index ||
+ report->fap.funcindex_clientid != EVENT_UNIFIED_BATTERY_STATUS_EVENT)
+ return 0;
+
+ state_of_charge = params[0];
+ status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]);
+ level = hidpp20_unifiedbattery_map_level(hidpp, params[1]);
+
+ changed = status != hidpp->battery.status ||
+ (state_of_charge != hidpp->battery.capacity &&
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) ||
+ (level != hidpp->battery.level &&
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS);
+
+ if (changed) {
+ hidpp->battery.capacity = state_of_charge;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Battery feature helpers */
+/* -------------------------------------------------------------------------- */
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -3307,7 +3528,10 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- ret = hidpp20_battery_event(hidpp, data, size);
+ ret = hidpp20_battery_event_1000(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ ret = hidpp20_battery_event_1004(hidpp, data, size);
if (ret != 0)
return ret;
ret = hidpp_solar_battery_event(hidpp, data, size);
@@ -3443,9 +3667,14 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
else {
- ret = hidpp20_query_battery_voltage_info(hidpp);
+ /* we only support one battery feature right now, so let's
+ first check the ones that support battery level first
+ and leave voltage for last */
+ ret = hidpp20_query_battery_info_1000(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info_1004(hidpp);
if (ret)
- ret = hidpp20_query_battery_info(hidpp);
+ ret = hidpp20_query_battery_voltage_info(hidpp);
}
if (ret)
@@ -3473,7 +3702,8 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
- if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE ||
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY;
@@ -3650,8 +3880,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
hidpp20_query_battery_voltage_info(hidpp);
+ else if (hidpp->capabilities & HIDPP_CAPABILITY_UNIFIED_BATTERY)
+ hidpp20_query_battery_info_1004(hidpp);
else
- hidpp20_query_battery_info(hidpp);
+ hidpp20_query_battery_info_1000(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
@@ -4053,6 +4285,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX Master mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Ergo trackball over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* MX Master 3 mouse over Bluetooth */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index d670bcd57bde..9d9f3e1bd5f4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1746,6 +1747,13 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
#ifdef CONFIG_PM
+static int mt_suspend(struct hid_device *hdev, pm_message_t state)
+{
+ /* High latency is desirable for power savings during S3/S0ix */
+ mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ return 0;
+}
+
static int mt_reset_resume(struct hid_device *hdev)
{
mt_release_contacts(hdev);
@@ -1761,6 +1769,8 @@ static int mt_resume(struct hid_device *hdev)
hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+
return 0;
}
#endif
@@ -2054,6 +2064,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
@@ -2150,6 +2164,7 @@ static struct hid_driver mt_driver = {
.event = mt_event,
.report = mt_report,
#ifdef CONFIG_PM
+ .suspend = mt_suspend,
.reset_resume = mt_reset_resume,
.resume = mt_resume,
#endif
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
new file mode 100644
index 000000000000..ab7c82c2e886
--- /dev/null
+++ b/drivers/hid/hid-playstation.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for Sony DualSense(TM) controller.
+ *
+ * Copyright (c) 2020 Sony Interactive Entertainment
+ */
+
+#include <linux/bits.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/idr.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+#include "hid-ids.h"
+
+/* List of connected playstation devices. */
+static DEFINE_MUTEX(ps_devices_lock);
+static LIST_HEAD(ps_devices_list);
+
+static DEFINE_IDA(ps_player_id_allocator);
+
+#define HID_PLAYSTATION_VERSION_PATCH 0x8000
+
+/* Base class for playstation devices. */
+struct ps_device {
+ struct list_head list;
+ struct hid_device *hdev;
+ spinlock_t lock;
+
+ uint32_t player_id;
+
+ struct power_supply_desc battery_desc;
+ struct power_supply *battery;
+ uint8_t battery_capacity;
+ int battery_status;
+
+ uint8_t mac_address[6]; /* Note: stored in little endian order. */
+ uint32_t hw_version;
+ uint32_t fw_version;
+
+ int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size);
+};
+
+/* Calibration data for playstation motion sensors. */
+struct ps_calibration_data {
+ int abs_code;
+ short bias;
+ int sens_numer;
+ int sens_denom;
+};
+
+/* Seed values for DualShock4 / DualSense CRC32 for different report types. */
+#define PS_INPUT_CRC32_SEED 0xA1
+#define PS_OUTPUT_CRC32_SEED 0xA2
+#define PS_FEATURE_CRC32_SEED 0xA3
+
+#define DS_INPUT_REPORT_USB 0x01
+#define DS_INPUT_REPORT_USB_SIZE 64
+#define DS_INPUT_REPORT_BT 0x31
+#define DS_INPUT_REPORT_BT_SIZE 78
+#define DS_OUTPUT_REPORT_USB 0x02
+#define DS_OUTPUT_REPORT_USB_SIZE 63
+#define DS_OUTPUT_REPORT_BT 0x31
+#define DS_OUTPUT_REPORT_BT_SIZE 78
+
+#define DS_FEATURE_REPORT_CALIBRATION 0x05
+#define DS_FEATURE_REPORT_CALIBRATION_SIZE 41
+#define DS_FEATURE_REPORT_PAIRING_INFO 0x09
+#define DS_FEATURE_REPORT_PAIRING_INFO_SIZE 20
+#define DS_FEATURE_REPORT_FIRMWARE_INFO 0x20
+#define DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE 64
+
+/* Button masks for DualSense input report. */
+#define DS_BUTTONS0_HAT_SWITCH GENMASK(3, 0)
+#define DS_BUTTONS0_SQUARE BIT(4)
+#define DS_BUTTONS0_CROSS BIT(5)
+#define DS_BUTTONS0_CIRCLE BIT(6)
+#define DS_BUTTONS0_TRIANGLE BIT(7)
+#define DS_BUTTONS1_L1 BIT(0)
+#define DS_BUTTONS1_R1 BIT(1)
+#define DS_BUTTONS1_L2 BIT(2)
+#define DS_BUTTONS1_R2 BIT(3)
+#define DS_BUTTONS1_CREATE BIT(4)
+#define DS_BUTTONS1_OPTIONS BIT(5)
+#define DS_BUTTONS1_L3 BIT(6)
+#define DS_BUTTONS1_R3 BIT(7)
+#define DS_BUTTONS2_PS_HOME BIT(0)
+#define DS_BUTTONS2_TOUCHPAD BIT(1)
+#define DS_BUTTONS2_MIC_MUTE BIT(2)
+
+/* Status field of DualSense input report. */
+#define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0)
+#define DS_STATUS_CHARGING GENMASK(7, 4)
+#define DS_STATUS_CHARGING_SHIFT 4
+
+/*
+ * Status of a DualSense touch point contact.
+ * Contact IDs, with highest bit set are 'inactive'
+ * and any associated data is then invalid.
+ */
+#define DS_TOUCH_POINT_INACTIVE BIT(7)
+
+ /* Magic value required in tag field of Bluetooth output report. */
+#define DS_OUTPUT_TAG 0x10
+/* Flags for DualSense output report. */
+#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
+#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
+#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
+#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
+#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
+#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
+#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
+
+/* DualSense hardware limits */
+#define DS_ACC_RES_PER_G 8192
+#define DS_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS_GYRO_RES_PER_DEG_S 1024
+#define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS_TOUCHPAD_WIDTH 1920
+#define DS_TOUCHPAD_HEIGHT 1080
+
+struct dualsense {
+ struct ps_device base;
+ struct input_dev *gamepad;
+ struct input_dev *sensors;
+ struct input_dev *touchpad;
+
+ /* Calibration data for accelerometer and gyroscope. */
+ struct ps_calibration_data accel_calib_data[3];
+ struct ps_calibration_data gyro_calib_data[3];
+
+ /* Timestamp for sensor data */
+ bool sensor_timestamp_initialized;
+ uint32_t prev_sensor_timestamp;
+ uint32_t sensor_timestamp_us;
+
+ /* Compatible rumble state */
+ bool update_rumble;
+ uint8_t motor_left;
+ uint8_t motor_right;
+
+ /* RGB lightbar */
+ bool update_lightbar;
+ uint8_t lightbar_red;
+ uint8_t lightbar_green;
+ uint8_t lightbar_blue;
+
+ /* Microphone */
+ bool update_mic_mute;
+ bool mic_muted;
+ bool last_btn_mic_state;
+
+ /* Player leds */
+ bool update_player_leds;
+ uint8_t player_leds_state;
+ struct led_classdev player_leds[5];
+
+ struct work_struct output_worker;
+ void *output_report_dmabuf;
+ uint8_t output_seq; /* Sequence number for output report. */
+};
+
+struct dualsense_touch_point {
+ uint8_t contact;
+ uint8_t x_lo;
+ uint8_t x_hi:4, y_lo:4;
+ uint8_t y_hi;
+} __packed;
+static_assert(sizeof(struct dualsense_touch_point) == 4);
+
+/* Main DualSense input report excluding any BT/USB specific headers. */
+struct dualsense_input_report {
+ uint8_t x, y;
+ uint8_t rx, ry;
+ uint8_t z, rz;
+ uint8_t seq_number;
+ uint8_t buttons[4];
+ uint8_t reserved[4];
+
+ /* Motion sensors */
+ __le16 gyro[3]; /* x, y, z */
+ __le16 accel[3]; /* x, y, z */
+ __le32 sensor_timestamp;
+ uint8_t reserved2;
+
+ /* Touchpad */
+ struct dualsense_touch_point points[2];
+
+ uint8_t reserved3[12];
+ uint8_t status;
+ uint8_t reserved4[10];
+} __packed;
+/* Common input report size shared equals the size of the USB report minus 1 byte for ReportID. */
+static_assert(sizeof(struct dualsense_input_report) == DS_INPUT_REPORT_USB_SIZE - 1);
+
+/* Common data between DualSense BT/USB main output report. */
+struct dualsense_output_report_common {
+ uint8_t valid_flag0;
+ uint8_t valid_flag1;
+
+ /* For DualShock 4 compatibility mode. */
+ uint8_t motor_right;
+ uint8_t motor_left;
+
+ /* Audio controls */
+ uint8_t reserved[4];
+ uint8_t mute_button_led;
+
+ uint8_t power_save_control;
+ uint8_t reserved2[28];
+
+ /* LEDs and lightbar */
+ uint8_t valid_flag2;
+ uint8_t reserved3[2];
+ uint8_t lightbar_setup;
+ uint8_t led_brightness;
+ uint8_t player_leds;
+ uint8_t lightbar_red;
+ uint8_t lightbar_green;
+ uint8_t lightbar_blue;
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_common) == 47);
+
+struct dualsense_output_report_bt {
+ uint8_t report_id; /* 0x31 */
+ uint8_t seq_tag;
+ uint8_t tag;
+ struct dualsense_output_report_common common;
+ uint8_t reserved[24];
+ __le32 crc32;
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_bt) == DS_OUTPUT_REPORT_BT_SIZE);
+
+struct dualsense_output_report_usb {
+ uint8_t report_id; /* 0x02 */
+ struct dualsense_output_report_common common;
+ uint8_t reserved[15];
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB_SIZE);
+
+/*
+ * The DualSense has a main output report used to control most features. It is
+ * largely the same between Bluetooth and USB except for different headers and CRC.
+ * This structure hide the differences between the two to simplify sending output reports.
+ */
+struct dualsense_output_report {
+ uint8_t *data; /* Start of data */
+ uint8_t len; /* Size of output report */
+
+ /* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
+ struct dualsense_output_report_bt *bt;
+ /* Points to USB data payload in case for a USB report else NULL. */
+ struct dualsense_output_report_usb *usb;
+ /* Points to common section of report, so past any headers. */
+ struct dualsense_output_report_common *common;
+};
+
+/*
+ * Common gamepad buttons across DualShock 3 / 4 and DualSense.
+ * Note: for device with a touchpad, touchpad button is not included
+ * as it will be part of the touchpad device.
+ */
+static const int ps_gamepad_buttons[] = {
+ BTN_WEST, /* Square */
+ BTN_NORTH, /* Triangle */
+ BTN_EAST, /* Circle */
+ BTN_SOUTH, /* Cross */
+ BTN_TL, /* L1 */
+ BTN_TR, /* R1 */
+ BTN_TL2, /* L2 */
+ BTN_TR2, /* R2 */
+ BTN_SELECT, /* Create (PS5) / Share (PS4) */
+ BTN_START, /* Option */
+ BTN_THUMBL, /* L3 */
+ BTN_THUMBR, /* R3 */
+ BTN_MODE, /* PS Home */
+};
+
+static const struct {int x; int y; } ps_gamepad_hat_mapping[] = {
+ {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}, {-1, -1},
+ {0, 0},
+};
+
+/*
+ * Add a new ps_device to ps_devices if it doesn't exist.
+ * Return error on duplicate device, which can happen if the same
+ * device is connected using both Bluetooth and USB.
+ */
+static int ps_devices_list_add(struct ps_device *dev)
+{
+ struct ps_device *entry;
+
+ mutex_lock(&ps_devices_lock);
+ list_for_each_entry(entry, &ps_devices_list, list) {
+ if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) {
+ hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n",
+ dev->mac_address);
+ mutex_unlock(&ps_devices_lock);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&dev->list, &ps_devices_list);
+ mutex_unlock(&ps_devices_lock);
+ return 0;
+}
+
+static int ps_devices_list_remove(struct ps_device *dev)
+{
+ mutex_lock(&ps_devices_lock);
+ list_del(&dev->list);
+ mutex_unlock(&ps_devices_lock);
+ return 0;
+}
+
+static int ps_device_set_player_id(struct ps_device *dev)
+{
+ int ret = ida_alloc(&ps_player_id_allocator, GFP_KERNEL);
+
+ if (ret < 0)
+ return ret;
+
+ dev->player_id = ret;
+ return 0;
+}
+
+static void ps_device_release_player_id(struct ps_device *dev)
+{
+ ida_free(&ps_player_id_allocator, dev->player_id);
+
+ dev->player_id = U32_MAX;
+}
+
+static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return ERR_PTR(-ENOMEM);
+
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_dev->uniq = hdev->uniq;
+
+ if (name_suffix) {
+ input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
+ name_suffix);
+ if (!input_dev->name)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ input_dev->name = hdev->name;
+ }
+
+ input_set_drvdata(input_dev, hdev);
+
+ return input_dev;
+}
+
+static enum power_supply_property ps_power_supply_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int ps_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ps_device *dev = power_supply_get_drvdata(psy);
+ uint8_t battery_capacity;
+ int battery_status;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ battery_capacity = dev->battery_capacity;
+ battery_status = dev->battery_status;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battery_status;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ps_device_register_battery(struct ps_device *dev)
+{
+ struct power_supply *battery;
+ struct power_supply_config battery_cfg = { .drv_data = dev };
+ int ret;
+
+ dev->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ dev->battery_desc.properties = ps_power_supply_props;
+ dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props);
+ dev->battery_desc.get_property = ps_battery_get_property;
+ dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "ps-controller-battery-%pMR", dev->mac_address);
+ if (!dev->battery_desc.name)
+ return -ENOMEM;
+
+ battery = devm_power_supply_register(&dev->hdev->dev, &dev->battery_desc, &battery_cfg);
+ if (IS_ERR(battery)) {
+ ret = PTR_ERR(battery);
+ hid_err(dev->hdev, "Unable to register battery device: %d\n", ret);
+ return ret;
+ }
+ dev->battery = battery;
+
+ ret = power_supply_powers(dev->battery, &dev->hdev->dev);
+ if (ret) {
+ hid_err(dev->hdev, "Unable to activate battery device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Compute crc32 of HID data and compare against expected CRC. */
+static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc)
+{
+ uint32_t crc;
+
+ crc = crc32_le(0xFFFFFFFF, &seed, 1);
+ crc = ~crc32_le(crc, data, len);
+
+ return crc == report_crc;
+}
+
+static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
+ int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
+{
+ struct input_dev *gamepad;
+ unsigned int i;
+ int ret;
+
+ gamepad = ps_allocate_input_dev(hdev, NULL);
+ if (IS_ERR(gamepad))
+ return ERR_CAST(gamepad);
+
+ input_set_abs_params(gamepad, ABS_X, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_Y, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_Z, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RX, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RY, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RZ, 0, 255, 0, 0);
+
+ input_set_abs_params(gamepad, ABS_HAT0X, -1, 1, 0, 0);
+ input_set_abs_params(gamepad, ABS_HAT0Y, -1, 1, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ps_gamepad_buttons); i++)
+ input_set_capability(gamepad, EV_KEY, ps_gamepad_buttons[i]);
+
+#if IS_ENABLED(CONFIG_PLAYSTATION_FF)
+ if (play_effect) {
+ input_set_capability(gamepad, EV_FF, FF_RUMBLE);
+ input_ff_create_memless(gamepad, NULL, play_effect);
+ }
+#endif
+
+ ret = input_register_device(gamepad);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return gamepad;
+}
+
+static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *buf, size_t size)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(hdev, report_id, buf, size, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ hid_err(hdev, "Failed to retrieve feature with reportID %d: %d\n", report_id, ret);
+ return ret;
+ }
+
+ if (ret != size) {
+ hid_err(hdev, "Invalid byte count transferred, expected %zu got %d\n", size, ret);
+ return -EINVAL;
+ }
+
+ if (buf[0] != report_id) {
+ hid_err(hdev, "Invalid reportID received, expected %d got %d\n", report_id, buf[0]);
+ return -EINVAL;
+ }
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ /* Last 4 bytes contains crc32. */
+ uint8_t crc_offset = size - 4;
+ uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]);
+
+ if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) {
+ hid_err(hdev, "CRC check failed for reportID=%d\n", report_id);
+ return -EILSEQ;
+ }
+ }
+
+ return 0;
+}
+
+static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res,
+ int gyro_range, int gyro_res)
+{
+ struct input_dev *sensors;
+ int ret;
+
+ sensors = ps_allocate_input_dev(hdev, "Motion Sensors");
+ if (IS_ERR(sensors))
+ return ERR_CAST(sensors);
+
+ __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit);
+ __set_bit(EV_MSC, sensors->evbit);
+ __set_bit(MSC_TIMESTAMP, sensors->mscbit);
+
+ /* Accelerometer */
+ input_set_abs_params(sensors, ABS_X, -accel_range, accel_range, 16, 0);
+ input_set_abs_params(sensors, ABS_Y, -accel_range, accel_range, 16, 0);
+ input_set_abs_params(sensors, ABS_Z, -accel_range, accel_range, 16, 0);
+ input_abs_set_res(sensors, ABS_X, accel_res);
+ input_abs_set_res(sensors, ABS_Y, accel_res);
+ input_abs_set_res(sensors, ABS_Z, accel_res);
+
+ /* Gyroscope */
+ input_set_abs_params(sensors, ABS_RX, -gyro_range, gyro_range, 16, 0);
+ input_set_abs_params(sensors, ABS_RY, -gyro_range, gyro_range, 16, 0);
+ input_set_abs_params(sensors, ABS_RZ, -gyro_range, gyro_range, 16, 0);
+ input_abs_set_res(sensors, ABS_RX, gyro_res);
+ input_abs_set_res(sensors, ABS_RY, gyro_res);
+ input_abs_set_res(sensors, ABS_RZ, gyro_res);
+
+ ret = input_register_device(sensors);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sensors;
+}
+
+static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height,
+ unsigned int num_contacts)
+{
+ struct input_dev *touchpad;
+ int ret;
+
+ touchpad = ps_allocate_input_dev(hdev, "Touchpad");
+ if (IS_ERR(touchpad))
+ return ERR_CAST(touchpad);
+
+ /* Map button underneath touchpad to BTN_LEFT. */
+ input_set_capability(touchpad, EV_KEY, BTN_LEFT);
+ __set_bit(INPUT_PROP_BUTTONPAD, touchpad->propbit);
+
+ input_set_abs_params(touchpad, ABS_MT_POSITION_X, 0, width - 1, 0, 0);
+ input_set_abs_params(touchpad, ABS_MT_POSITION_Y, 0, height - 1, 0, 0);
+
+ ret = input_mt_init_slots(touchpad, num_contacts, INPUT_MT_POINTER);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = input_register_device(touchpad);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return touchpad;
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct ps_device *ps_dev = hid_get_drvdata(hdev);
+
+ return sysfs_emit(buf, "0x%08x\n", ps_dev->fw_version);
+}
+
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t hardware_version_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct ps_device *ps_dev = hid_get_drvdata(hdev);
+
+ return sysfs_emit(buf, "0x%08x\n", ps_dev->hw_version);
+}
+
+static DEVICE_ATTR_RO(hardware_version);
+
+static struct attribute *ps_device_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_hardware_version.attr,
+ NULL
+};
+
+static const struct attribute_group ps_device_attribute_group = {
+ .attrs = ps_device_attributes,
+};
+
+static int dualsense_get_calibration_data(struct dualsense *ds)
+{
+ short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
+ short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
+ short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
+ short gyro_speed_plus, gyro_speed_minus;
+ short acc_x_plus, acc_x_minus;
+ short acc_y_plus, acc_y_minus;
+ short acc_z_plus, acc_z_minus;
+ int speed_2x;
+ int range_2g;
+ int ret = 0;
+ uint8_t *buf;
+
+ buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf,
+ DS_FEATURE_REPORT_CALIBRATION_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense calibration info: %d\n", ret);
+ goto err_free;
+ }
+
+ gyro_pitch_bias = get_unaligned_le16(&buf[1]);
+ gyro_yaw_bias = get_unaligned_le16(&buf[3]);
+ gyro_roll_bias = get_unaligned_le16(&buf[5]);
+ gyro_pitch_plus = get_unaligned_le16(&buf[7]);
+ gyro_pitch_minus = get_unaligned_le16(&buf[9]);
+ gyro_yaw_plus = get_unaligned_le16(&buf[11]);
+ gyro_yaw_minus = get_unaligned_le16(&buf[13]);
+ gyro_roll_plus = get_unaligned_le16(&buf[15]);
+ gyro_roll_minus = get_unaligned_le16(&buf[17]);
+ gyro_speed_plus = get_unaligned_le16(&buf[19]);
+ gyro_speed_minus = get_unaligned_le16(&buf[21]);
+ acc_x_plus = get_unaligned_le16(&buf[23]);
+ acc_x_minus = get_unaligned_le16(&buf[25]);
+ acc_y_plus = get_unaligned_le16(&buf[27]);
+ acc_y_minus = get_unaligned_le16(&buf[29]);
+ acc_z_plus = get_unaligned_le16(&buf[31]);
+ acc_z_minus = get_unaligned_le16(&buf[33]);
+
+ /*
+ * Set gyroscope calibration and normalization parameters.
+ * Data values will be normalized to 1/DS_GYRO_RES_PER_DEG_S degree/s.
+ */
+ speed_2x = (gyro_speed_plus + gyro_speed_minus);
+ ds->gyro_calib_data[0].abs_code = ABS_RX;
+ ds->gyro_calib_data[0].bias = gyro_pitch_bias;
+ ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
+
+ ds->gyro_calib_data[1].abs_code = ABS_RY;
+ ds->gyro_calib_data[1].bias = gyro_yaw_bias;
+ ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
+
+ ds->gyro_calib_data[2].abs_code = ABS_RZ;
+ ds->gyro_calib_data[2].bias = gyro_roll_bias;
+ ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
+
+ /*
+ * Set accelerometer calibration and normalization parameters.
+ * Data values will be normalized to 1/DS_ACC_RES_PER_G g.
+ */
+ range_2g = acc_x_plus - acc_x_minus;
+ ds->accel_calib_data[0].abs_code = ABS_X;
+ ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
+ ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[0].sens_denom = range_2g;
+
+ range_2g = acc_y_plus - acc_y_minus;
+ ds->accel_calib_data[1].abs_code = ABS_Y;
+ ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
+ ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[1].sens_denom = range_2g;
+
+ range_2g = acc_z_plus - acc_z_minus;
+ ds->accel_calib_data[2].abs_code = ABS_Z;
+ ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
+ ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[2].sens_denom = range_2g;
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static int dualsense_get_firmware_info(struct dualsense *ds)
+{
+ uint8_t *buf;
+ int ret;
+
+ buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf,
+ DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret);
+ goto err_free;
+ }
+
+ ds->base.hw_version = get_unaligned_le32(&buf[24]);
+ ds->base.fw_version = get_unaligned_le32(&buf[28]);
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static int dualsense_get_mac_address(struct dualsense *ds)
+{
+ uint8_t *buf;
+ int ret = 0;
+
+ buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf,
+ DS_FEATURE_REPORT_PAIRING_INFO_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense pairing info: %d\n", ret);
+ goto err_free;
+ }
+
+ memcpy(ds->base.mac_address, &buf[1], sizeof(ds->base.mac_address));
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp,
+ void *buf)
+{
+ struct hid_device *hdev = ds->base.hdev;
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ struct dualsense_output_report_bt *bt = buf;
+
+ memset(bt, 0, sizeof(*bt));
+ bt->report_id = DS_OUTPUT_REPORT_BT;
+ bt->tag = DS_OUTPUT_TAG; /* Tag must be set. Exact meaning is unclear. */
+
+ /*
+ * Highest 4-bit is a sequence number, which needs to be increased
+ * every report. Lowest 4-bit is tag and can be zero for now.
+ */
+ bt->seq_tag = (ds->output_seq << 4) | 0x0;
+ if (++ds->output_seq == 16)
+ ds->output_seq = 0;
+
+ rp->data = buf;
+ rp->len = sizeof(*bt);
+ rp->bt = bt;
+ rp->usb = NULL;
+ rp->common = &bt->common;
+ } else { /* USB */
+ struct dualsense_output_report_usb *usb = buf;
+
+ memset(usb, 0, sizeof(*usb));
+ usb->report_id = DS_OUTPUT_REPORT_USB;
+
+ rp->data = buf;
+ rp->len = sizeof(*usb);
+ rp->bt = NULL;
+ rp->usb = usb;
+ rp->common = &usb->common;
+ }
+}
+
+/*
+ * Helper function to send DualSense output reports. Applies a CRC at the end of a report
+ * for Bluetooth reports.
+ */
+static void dualsense_send_output_report(struct dualsense *ds,
+ struct dualsense_output_report *report)
+{
+ struct hid_device *hdev = ds->base.hdev;
+
+ /* Bluetooth packets need to be signed with a CRC in the last 4 bytes. */
+ if (report->bt) {
+ uint32_t crc;
+ uint8_t seed = PS_OUTPUT_CRC32_SEED;
+
+ crc = crc32_le(0xFFFFFFFF, &seed, 1);
+ crc = ~crc32_le(crc, report->data, report->len - 4);
+
+ report->bt->crc32 = cpu_to_le32(crc);
+ }
+
+ hid_hw_output_report(hdev, report->data, report->len);
+}
+
+static void dualsense_output_worker(struct work_struct *work)
+{
+ struct dualsense *ds = container_of(work, struct dualsense, output_worker);
+ struct dualsense_output_report report;
+ struct dualsense_output_report_common *common;
+ unsigned long flags;
+
+ dualsense_init_output_report(ds, &report, ds->output_report_dmabuf);
+ common = report.common;
+
+ spin_lock_irqsave(&ds->base.lock, flags);
+
+ if (ds->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
+ common->motor_left = ds->motor_left;
+ common->motor_right = ds->motor_right;
+ ds->update_rumble = false;
+ }
+
+ if (ds->update_lightbar) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
+ common->lightbar_red = ds->lightbar_red;
+ common->lightbar_green = ds->lightbar_green;
+ common->lightbar_blue = ds->lightbar_blue;
+
+ ds->update_lightbar = false;
+ }
+
+ if (ds->update_player_leds) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
+ common->player_leds = ds->player_leds_state;
+
+ ds->update_player_leds = false;
+ }
+
+ if (ds->update_mic_mute) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
+ common->mute_button_led = ds->mic_muted;
+
+ if (ds->mic_muted) {
+ /* Disable microphone */
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ } else {
+ /* Enable microphone */
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control &= ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ }
+
+ ds->update_mic_mute = false;
+ }
+
+ spin_unlock_irqrestore(&ds->base.lock, flags);
+
+ dualsense_send_output_report(ds, &report);
+}
+
+static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct hid_device *hdev = ps_dev->hdev;
+ struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
+ struct dualsense_input_report *ds_report;
+ uint8_t battery_data, battery_capacity, charging_status, value;
+ int battery_status;
+ uint32_t sensor_timestamp;
+ bool btn_mic_state;
+ unsigned long flags;
+ int i;
+
+ /*
+ * DualSense in USB uses the full HID report for reportID 1, but
+ * Bluetooth uses a minimal HID report for reportID 1 and reports
+ * the full report using reportID 49.
+ */
+ if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB &&
+ size == DS_INPUT_REPORT_USB_SIZE) {
+ ds_report = (struct dualsense_input_report *)&data[1];
+ } else if (hdev->bus == BUS_BLUETOOTH && report->id == DS_INPUT_REPORT_BT &&
+ size == DS_INPUT_REPORT_BT_SIZE) {
+ /* Last 4 bytes of input report contain crc32 */
+ uint32_t report_crc = get_unaligned_le32(&data[size - 4]);
+
+ if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
+ hid_err(hdev, "DualSense input CRC's check failed\n");
+ return -EILSEQ;
+ }
+
+ ds_report = (struct dualsense_input_report *)&data[2];
+ } else {
+ hid_err(hdev, "Unhandled reportID=%d\n", report->id);
+ return -1;
+ }
+
+ input_report_abs(ds->gamepad, ABS_X, ds_report->x);
+ input_report_abs(ds->gamepad, ABS_Y, ds_report->y);
+ input_report_abs(ds->gamepad, ABS_RX, ds_report->rx);
+ input_report_abs(ds->gamepad, ABS_RY, ds_report->ry);
+ input_report_abs(ds->gamepad, ABS_Z, ds_report->z);
+ input_report_abs(ds->gamepad, ABS_RZ, ds_report->rz);
+
+ value = ds_report->buttons[0] & DS_BUTTONS0_HAT_SWITCH;
+ if (value >= ARRAY_SIZE(ps_gamepad_hat_mapping))
+ value = 8; /* center */
+ input_report_abs(ds->gamepad, ABS_HAT0X, ps_gamepad_hat_mapping[value].x);
+ input_report_abs(ds->gamepad, ABS_HAT0Y, ps_gamepad_hat_mapping[value].y);
+
+ input_report_key(ds->gamepad, BTN_WEST, ds_report->buttons[0] & DS_BUTTONS0_SQUARE);
+ input_report_key(ds->gamepad, BTN_SOUTH, ds_report->buttons[0] & DS_BUTTONS0_CROSS);
+ input_report_key(ds->gamepad, BTN_EAST, ds_report->buttons[0] & DS_BUTTONS0_CIRCLE);
+ input_report_key(ds->gamepad, BTN_NORTH, ds_report->buttons[0] & DS_BUTTONS0_TRIANGLE);
+ input_report_key(ds->gamepad, BTN_TL, ds_report->buttons[1] & DS_BUTTONS1_L1);
+ input_report_key(ds->gamepad, BTN_TR, ds_report->buttons[1] & DS_BUTTONS1_R1);
+ input_report_key(ds->gamepad, BTN_TL2, ds_report->buttons[1] & DS_BUTTONS1_L2);
+ input_report_key(ds->gamepad, BTN_TR2, ds_report->buttons[1] & DS_BUTTONS1_R2);
+ input_report_key(ds->gamepad, BTN_SELECT, ds_report->buttons[1] & DS_BUTTONS1_CREATE);
+ input_report_key(ds->gamepad, BTN_START, ds_report->buttons[1] & DS_BUTTONS1_OPTIONS);
+ input_report_key(ds->gamepad, BTN_THUMBL, ds_report->buttons[1] & DS_BUTTONS1_L3);
+ input_report_key(ds->gamepad, BTN_THUMBR, ds_report->buttons[1] & DS_BUTTONS1_R3);
+ input_report_key(ds->gamepad, BTN_MODE, ds_report->buttons[2] & DS_BUTTONS2_PS_HOME);
+ input_sync(ds->gamepad);
+
+ /*
+ * The DualSense has an internal microphone, which can be muted through a mute button
+ * on the device. The driver is expected to read the button state and program the device
+ * to mute/unmute audio at the hardware level.
+ */
+ btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE);
+ if (btn_mic_state && !ds->last_btn_mic_state) {
+ spin_lock_irqsave(&ps_dev->lock, flags);
+ ds->update_mic_mute = true;
+ ds->mic_muted = !ds->mic_muted; /* toggle */
+ spin_unlock_irqrestore(&ps_dev->lock, flags);
+
+ /* Schedule updating of microphone state at hardware level. */
+ schedule_work(&ds->output_worker);
+ }
+ ds->last_btn_mic_state = btn_mic_state;
+
+ /* Parse and calibrate gyroscope data. */
+ for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) {
+ int raw_data = (short)le16_to_cpu(ds_report->gyro[i]);
+ int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer,
+ raw_data - ds->gyro_calib_data[i].bias,
+ ds->gyro_calib_data[i].sens_denom);
+
+ input_report_abs(ds->sensors, ds->gyro_calib_data[i].abs_code, calib_data);
+ }
+
+ /* Parse and calibrate accelerometer data. */
+ for (i = 0; i < ARRAY_SIZE(ds_report->accel); i++) {
+ int raw_data = (short)le16_to_cpu(ds_report->accel[i]);
+ int calib_data = mult_frac(ds->accel_calib_data[i].sens_numer,
+ raw_data - ds->accel_calib_data[i].bias,
+ ds->accel_calib_data[i].sens_denom);
+
+ input_report_abs(ds->sensors, ds->accel_calib_data[i].abs_code, calib_data);
+ }
+
+ /* Convert timestamp (in 0.33us unit) to timestamp_us */
+ sensor_timestamp = le32_to_cpu(ds_report->sensor_timestamp);
+ if (!ds->sensor_timestamp_initialized) {
+ ds->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp, 3);
+ ds->sensor_timestamp_initialized = true;
+ } else {
+ uint32_t delta;
+
+ if (ds->prev_sensor_timestamp > sensor_timestamp)
+ delta = (U32_MAX - ds->prev_sensor_timestamp + sensor_timestamp + 1);
+ else
+ delta = sensor_timestamp - ds->prev_sensor_timestamp;
+ ds->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta, 3);
+ }
+ ds->prev_sensor_timestamp = sensor_timestamp;
+ input_event(ds->sensors, EV_MSC, MSC_TIMESTAMP, ds->sensor_timestamp_us);
+ input_sync(ds->sensors);
+
+ for (i = 0; i < ARRAY_SIZE(ds_report->points); i++) {
+ struct dualsense_touch_point *point = &ds_report->points[i];
+ bool active = (point->contact & DS_TOUCH_POINT_INACTIVE) ? false : true;
+
+ input_mt_slot(ds->touchpad, i);
+ input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active);
+
+ if (active) {
+ int x = (point->x_hi << 8) | point->x_lo;
+ int y = (point->y_hi << 4) | point->y_lo;
+
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y);
+ }
+ }
+ input_mt_sync_frame(ds->touchpad);
+ input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD);
+ input_sync(ds->touchpad);
+
+ battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY;
+ charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT;
+
+ switch (charging_status) {
+ case 0x0:
+ /*
+ * Each unit of battery data corresponds to 10%
+ * 0 = 0-9%, 1 = 10-19%, .. and 10 = 100%
+ */
+ battery_capacity = min(battery_data * 10 + 5, 100);
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0x1:
+ battery_capacity = min(battery_data * 10 + 5, 100);
+ battery_status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x2:
+ battery_capacity = 100;
+ battery_status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case 0xa: /* voltage or temperature out of range */
+ case 0xb: /* temperature error */
+ battery_capacity = 0;
+ battery_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 0xf: /* charging error */
+ default:
+ battery_capacity = 0;
+ battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ spin_lock_irqsave(&ps_dev->lock, flags);
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ spin_unlock_irqrestore(&ps_dev->lock, flags);
+
+ return 0;
+}
+
+static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct dualsense *ds = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ spin_lock_irqsave(&ds->base.lock, flags);
+ ds->update_rumble = true;
+ ds->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds->motor_right = effect->u.rumble.weak_magnitude / 256;
+ spin_unlock_irqrestore(&ds->base.lock, flags);
+
+ schedule_work(&ds->output_worker);
+ return 0;
+}
+
+static int dualsense_reset_leds(struct dualsense *ds)
+{
+ struct dualsense_output_report report;
+ uint8_t *buf;
+
+ buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dualsense_init_output_report(ds, &report, buf);
+ /*
+ * On Bluetooth the DualSense outputs an animation on the lightbar
+ * during startup and maintains a color afterwards. We need to explicitly
+ * reconfigure the lightbar before we can do any programming later on.
+ * In USB the lightbar is not on by default, but redoing the setup there
+ * doesn't hurt.
+ */
+ report.common->valid_flag2 = DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE;
+ report.common->lightbar_setup = DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT; /* Fade light out. */
+ dualsense_send_output_report(ds, &report);
+
+ kfree(buf);
+ return 0;
+}
+
+static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue)
+{
+ ds->update_lightbar = true;
+ ds->lightbar_red = red;
+ ds->lightbar_green = green;
+ ds->lightbar_blue = blue;
+
+ schedule_work(&ds->output_worker);
+}
+
+static void dualsense_set_player_leds(struct dualsense *ds)
+{
+ /*
+ * The DualSense controller has a row of 5 LEDs used for player ids.
+ * Behavior on the PlayStation 5 console is to center the player id
+ * across the LEDs, so e.g. player 1 would be "--x--" with x being 'on'.
+ * Follow a similar mapping here.
+ */
+ static const int player_ids[5] = {
+ BIT(2),
+ BIT(3) | BIT(1),
+ BIT(4) | BIT(2) | BIT(0),
+ BIT(4) | BIT(3) | BIT(1) | BIT(0),
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)
+ };
+
+ uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
+
+ ds->update_player_leds = true;
+ ds->player_leds_state = player_ids[player_id];
+ schedule_work(&ds->output_worker);
+}
+
+static struct ps_device *dualsense_create(struct hid_device *hdev)
+{
+ struct dualsense *ds;
+ struct ps_device *ps_dev;
+ uint8_t max_output_report_size;
+ int ret;
+
+ ds = devm_kzalloc(&hdev->dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Patch version to allow userspace to distinguish between
+ * hid-generic vs hid-playstation axis and button mapping.
+ */
+ hdev->version |= HID_PLAYSTATION_VERSION_PATCH;
+
+ ps_dev = &ds->base;
+ ps_dev->hdev = hdev;
+ spin_lock_init(&ps_dev->lock);
+ ps_dev->battery_capacity = 100; /* initial value until parse_report. */
+ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ ps_dev->parse_report = dualsense_parse_report;
+ INIT_WORK(&ds->output_worker, dualsense_output_worker);
+ hid_set_drvdata(hdev, ds);
+
+ max_output_report_size = sizeof(struct dualsense_output_report_bt);
+ ds->output_report_dmabuf = devm_kzalloc(&hdev->dev, max_output_report_size, GFP_KERNEL);
+ if (!ds->output_report_dmabuf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dualsense_get_mac_address(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get MAC address from DualSense\n");
+ return ERR_PTR(ret);
+ }
+ snprintf(hdev->uniq, sizeof(hdev->uniq), "%pMR", ds->base.mac_address);
+
+ ret = dualsense_get_firmware_info(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get firmware info from DualSense\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = ps_devices_list_add(ps_dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = dualsense_get_calibration_data(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get calibration data from DualSense\n");
+ goto err;
+ }
+
+ ds->gamepad = ps_gamepad_create(hdev, dualsense_play_effect);
+ if (IS_ERR(ds->gamepad)) {
+ ret = PTR_ERR(ds->gamepad);
+ goto err;
+ }
+
+ ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G,
+ DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
+ if (IS_ERR(ds->sensors)) {
+ ret = PTR_ERR(ds->sensors);
+ goto err;
+ }
+
+ ds->touchpad = ps_touchpad_create(hdev, DS_TOUCHPAD_WIDTH, DS_TOUCHPAD_HEIGHT, 2);
+ if (IS_ERR(ds->touchpad)) {
+ ret = PTR_ERR(ds->touchpad);
+ goto err;
+ }
+
+ ret = ps_device_register_battery(ps_dev);
+ if (ret)
+ goto err;
+
+ /*
+ * The hardware may have control over the LEDs (e.g. in Bluetooth on startup).
+ * Reset the LEDs (lightbar, mute, player leds), so we can control them
+ * from software.
+ */
+ ret = dualsense_reset_leds(ds);
+ if (ret)
+ goto err;
+
+ dualsense_set_lightbar(ds, 0, 0, 128); /* blue */
+
+ ret = ps_device_set_player_id(ps_dev);
+ if (ret) {
+ hid_err(hdev, "Failed to assign player id for DualSense: %d\n", ret);
+ goto err;
+ }
+
+ /* Set player LEDs to our player id. */
+ dualsense_set_player_leds(ds);
+
+ /*
+ * Reporting hardware and firmware is important as there are frequent updates, which
+ * can change behavior.
+ */
+ hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n",
+ ds->base.hw_version, ds->base.fw_version);
+
+ return &ds->base;
+
+err:
+ ps_devices_list_remove(ps_dev);
+ return ERR_PTR(ret);
+}
+
+static int ps_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct ps_device *dev = hid_get_drvdata(hdev);
+
+ if (dev && dev->parse_report)
+ return dev->parse_report(dev, report, data, size);
+
+ return 0;
+}
+
+static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct ps_device *dev;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "Failed to start HID device\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "Failed to open HID device\n");
+ goto err_stop;
+ }
+
+ if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) {
+ dev = dualsense_create(hdev);
+ if (IS_ERR(dev)) {
+ hid_err(hdev, "Failed to create dualsense.\n");
+ ret = PTR_ERR(dev);
+ goto err_close;
+ }
+ }
+
+ ret = devm_device_add_group(&hdev->dev, &ps_device_attribute_group);
+ if (ret) {
+ hid_err(hdev, "Failed to register sysfs nodes.\n");
+ goto err_close;
+ }
+
+ return ret;
+
+err_close:
+ hid_hw_close(hdev);
+err_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void ps_remove(struct hid_device *hdev)
+{
+ struct ps_device *dev = hid_get_drvdata(hdev);
+
+ ps_devices_list_remove(dev);
+ ps_device_release_player_id(dev);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id ps_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, ps_devices);
+
+static struct hid_driver ps_driver = {
+ .name = "playstation",
+ .id_table = ps_devices,
+ .probe = ps_probe,
+ .remove = ps_remove,
+ .raw_event = ps_raw_event,
+};
+
+static int __init ps_init(void)
+{
+ return hid_register_driver(&ps_driver);
+}
+
+static void __exit ps_exit(void)
+{
+ hid_unregister_driver(&ps_driver);
+ ida_destroy(&ps_player_id_allocator);
+}
+
+module_init(ps_init);
+module_exit(ps_exit);
+
+MODULE_AUTHOR("Sony Interactive Entertainment");
+MODULE_DESCRIPTION("HID Driver for PlayStation peripherals.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index d9ca874dffac..1a9daf03dbfa 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -180,7 +180,6 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
- { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
@@ -1029,7 +1028,7 @@ static DEFINE_MUTEX(dquirks_lock);
/* Runtime ("dynamic") quirks manipulation functions */
/**
- * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * hid_exists_dquirk - find any dynamic quirks for a HID device
* @hdev: the HID device to match
*
* Description:
@@ -1037,7 +1036,7 @@ static DEFINE_MUTEX(dquirks_lock);
* the pointer to the relevant struct hid_device_id if found.
* Must be called with a read lock held on dquirks_lock.
*
- * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ * Return: NULL if no quirk found, struct hid_device_id * if found.
*/
static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
{
@@ -1061,7 +1060,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
/**
- * hid_modify_dquirk: add/replace a HID quirk
+ * hid_modify_dquirk - add/replace a HID quirk
* @id: the HID device to match
* @quirks: the unsigned long quirks value to add/replace
*
@@ -1070,7 +1069,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
* quirks value with what was provided. Otherwise, add the quirk
* to the dynamic quirks list.
*
- * Returns: 0 OK, -error on failure.
+ * Return: 0 OK, -error on failure.
*/
static int hid_modify_dquirk(const struct hid_device_id *id,
const unsigned long quirks)
@@ -1122,7 +1121,7 @@ static int hid_modify_dquirk(const struct hid_device_id *id,
}
/**
- * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * hid_remove_all_dquirks - remove all runtime HID quirks from memory
* @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
*
* Description:
@@ -1146,7 +1145,10 @@ static void hid_remove_all_dquirks(__u16 bus)
}
/**
- * hid_quirks_init: apply HID quirks specified at module load time
+ * hid_quirks_init - apply HID quirks specified at module load time
+ * @quirks_param: array of quirks strings (vendor:product:quirks)
+ * @bus: bus type
+ * @count: number of quirks to check
*/
int hid_quirks_init(char **quirks_param, __u16 bus, int count)
{
@@ -1177,7 +1179,7 @@ int hid_quirks_init(char **quirks_param, __u16 bus, int count)
EXPORT_SYMBOL_GPL(hid_quirks_init);
/**
- * hid_quirks_exit: release memory associated with dynamic_quirks
+ * hid_quirks_exit - release memory associated with dynamic_quirks
* @bus: a bus to match against
*
* Description:
@@ -1194,14 +1196,14 @@ void hid_quirks_exit(__u16 bus)
EXPORT_SYMBOL_GPL(hid_quirks_exit);
/**
- * hid_gets_squirk: return any static quirks for a HID device
+ * hid_gets_squirk - return any static quirks for a HID device
* @hdev: the HID device to match
*
* Description:
* Given a HID device, return a pointer to the quirked hid_device_id entry
* associated with that device.
*
- * Returns: the quirks.
+ * Return: the quirks.
*/
static unsigned long hid_gets_squirk(const struct hid_device *hdev)
{
@@ -1225,13 +1227,13 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
}
/**
- * hid_lookup_quirk: return any quirks associated with a HID device
+ * hid_lookup_quirk - return any quirks associated with a HID device
* @hdev: the HID device to look for
*
* Description:
* Given a HID device, return any quirks associated with that device.
*
- * Returns: an unsigned long quirks value.
+ * Return: an unsigned long quirks value.
*/
unsigned long hid_lookup_quirk(const struct hid_device *hdev)
{
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index ffcd444ae2ba..4556d2a50f75 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -42,7 +42,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
if (retval)
return retval;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state);
+ return sysfs_emit(buf, "%d\n", temp_buf.state);
}
static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
@@ -92,7 +92,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
if (retval)
return retval;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask);
+ return sysfs_emit(buf, "%d\n", temp_buf.key_mask);
}
static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
@@ -146,7 +146,7 @@ static ssize_t arvo_sysfs_show_actual_profile(struct device *dev,
struct arvo_device *arvo =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile);
+ return sysfs_emit(buf, "%d\n", arvo->actual_profile);
}
static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 4d25577a8573..2628bc53ed80 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -4,6 +4,7 @@
* Copyright (c) 2015, Intel Corporation.
*/
+#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -21,6 +22,7 @@
#define HID_CUSTOM_TOTAL_ATTRS (HID_CUSTOM_MAX_CORE_ATTRS + 1)
#define HID_CUSTOM_FIFO_SIZE 4096
#define HID_CUSTOM_MAX_FEATURE_BYTES 64
+#define HID_SENSOR_USAGE_LENGTH (4 + 1)
struct hid_sensor_custom_field {
int report_id;
@@ -50,6 +52,7 @@ struct hid_sensor_custom {
struct kfifo data_fifo;
unsigned long misc_opened;
wait_queue_head_t wait;
+ struct platform_device *custom_pdev;
};
/* Header for each sample to user space via dev interface */
@@ -746,11 +749,130 @@ static void hid_sensor_custom_dev_if_remove(struct hid_sensor_custom
}
+/* luid defined in FW (e.g. ISH). Maybe used to identify sensor. */
+static const char *const known_sensor_luid[] = { "020B000000000000" };
+
+static int get_luid_table_index(unsigned char *usage_str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(known_sensor_luid); i++) {
+ if (!strncmp(usage_str, known_sensor_luid[i],
+ strlen(known_sensor_luid[i])))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int get_known_custom_sensor_index(struct hid_sensor_hub_device *hsdev)
+{
+ struct hid_sensor_hub_attribute_info sensor_manufacturer = { 0 };
+ struct hid_sensor_hub_attribute_info sensor_luid_info = { 0 };
+ int report_size;
+ int ret;
+ static u16 w_buf[HID_CUSTOM_MAX_FEATURE_BYTES];
+ static char buf[HID_CUSTOM_MAX_FEATURE_BYTES];
+ int i;
+
+ memset(w_buf, 0, sizeof(w_buf));
+ memset(buf, 0, sizeof(buf));
+
+ /* get manufacturer info */
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, hsdev->usage,
+ HID_USAGE_SENSOR_PROP_MANUFACTURER, &sensor_manufacturer);
+ if (ret < 0)
+ return ret;
+
+ report_size =
+ sensor_hub_get_feature(hsdev, sensor_manufacturer.report_id,
+ sensor_manufacturer.index, sizeof(w_buf),
+ w_buf);
+ if (report_size <= 0) {
+ hid_err(hsdev->hdev,
+ "Failed to get sensor manufacturer info %d\n",
+ report_size);
+ return -ENODEV;
+ }
+
+ /* convert from wide char to char */
+ for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
+ buf[i] = (char)w_buf[i];
+
+ /* ensure it's ISH sensor */
+ if (strncmp(buf, "INTEL", strlen("INTEL")))
+ return -ENODEV;
+
+ memset(w_buf, 0, sizeof(w_buf));
+ memset(buf, 0, sizeof(buf));
+
+ /* get real usage id */
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, hsdev->usage,
+ HID_USAGE_SENSOR_PROP_SERIAL_NUM, &sensor_luid_info);
+ if (ret < 0)
+ return ret;
+
+ report_size = sensor_hub_get_feature(hsdev, sensor_luid_info.report_id,
+ sensor_luid_info.index, sizeof(w_buf),
+ w_buf);
+ if (report_size <= 0) {
+ hid_err(hsdev->hdev, "Failed to get real usage info %d\n",
+ report_size);
+ return -ENODEV;
+ }
+
+ /* convert from wide char to char */
+ for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
+ buf[i] = (char)w_buf[i];
+
+ if (strlen(buf) != strlen(known_sensor_luid[0]) + 5) {
+ hid_err(hsdev->hdev,
+ "%s luid length not match %zu != (%zu + 5)\n", __func__,
+ strlen(buf), strlen(known_sensor_luid[0]));
+ return -ENODEV;
+ }
+
+ /* get table index with luid (not matching 'LUID: ' in luid) */
+ return get_luid_table_index(&buf[5]);
+}
+
+static struct platform_device *
+hid_sensor_register_platform_device(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ int index)
+{
+ char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
+ struct platform_device *custom_pdev;
+ const char *dev_name;
+ char *c;
+
+ /* copy real usage id */
+ memcpy(real_usage, known_sensor_luid[index], 4);
+
+ /* usage id are all lowcase */
+ for (c = real_usage; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ /* HID-SENSOR-INT-REAL_USAGE_ID */
+ dev_name = kasprintf(GFP_KERNEL, "HID-SENSOR-INT-%s", real_usage);
+ if (!dev_name)
+ return ERR_PTR(-ENOMEM);
+
+ custom_pdev = platform_device_register_data(pdev->dev.parent, dev_name,
+ PLATFORM_DEVID_NONE, hsdev,
+ sizeof(*hsdev));
+ kfree(dev_name);
+ return custom_pdev;
+}
+
static int hid_sensor_custom_probe(struct platform_device *pdev)
{
struct hid_sensor_custom *sensor_inst;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
int ret;
+ int index;
sensor_inst = devm_kzalloc(&pdev->dev, sizeof(*sensor_inst),
GFP_KERNEL);
@@ -764,6 +886,22 @@ static int hid_sensor_custom_probe(struct platform_device *pdev)
sensor_inst->pdev = pdev;
mutex_init(&sensor_inst->mutex);
platform_set_drvdata(pdev, sensor_inst);
+
+ index = get_known_custom_sensor_index(hsdev);
+ if (index >= 0 && index < ARRAY_SIZE(known_sensor_luid)) {
+ sensor_inst->custom_pdev =
+ hid_sensor_register_platform_device(pdev, hsdev, index);
+
+ ret = PTR_ERR_OR_ZERO(sensor_inst->custom_pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register_platform_device failed\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
ret = sensor_hub_register_callback(hsdev, hsdev->usage,
&sensor_inst->callbacks);
if (ret < 0) {
@@ -802,6 +940,11 @@ static int hid_sensor_custom_remove(struct platform_device *pdev)
struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ if (sensor_inst->custom_pdev) {
+ platform_device_unregister(sensor_inst->custom_pdev);
+ return 0;
+ }
+
hid_sensor_custom_dev_if_remove(sensor_inst);
hid_sensor_custom_remove_attributes(sensor_inst);
sysfs_remove_group(&sensor_inst->pdev->dev.kobj,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index e3a557dc9ffd..8319b0ce385a 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -12,6 +12,7 @@
* Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com>
* Copyright (c) 2018 Todd Kelner
* Copyright (c) 2020 Pascal Giard <pascal.giard@etsmtl.ca>
+ * Copyright (c) 2020 Sanjay Govind <sanjay.govind9@gmail.com>
*/
/*
@@ -59,7 +60,8 @@
#define NSG_MR5U_REMOTE_BT BIT(14)
#define NSG_MR7U_REMOTE_BT BIT(15)
#define SHANWAN_GAMEPAD BIT(16)
-#define GHL_GUITAR_PS3WIIU BIT(17)
+#define GH_GUITAR_CONTROLLER BIT(17)
+#define GHL_GUITAR_PS3WIIU BIT(18)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -84,7 +86,7 @@
#define NSG_MRXU_MAX_Y 1868
#define GHL_GUITAR_POKE_INTERVAL 10 /* In seconds */
-#define GHL_GUITAR_TILT_USAGE 44
+#define GUITAR_TILT_USAGE 44
/* Magic value and data taken from GHLtarUtility:
* https://github.com/ghlre/GHLtarUtility/blob/master/PS3Guitar.cs
@@ -692,7 +694,7 @@ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
unsigned int abs = usage->hid & HID_USAGE;
- if (abs == GHL_GUITAR_TILT_USAGE) {
+ if (abs == GUITAR_TILT_USAGE) {
hid_map_usage_clear(hi, usage, bit, max, EV_ABS, ABS_RY);
return 1;
}
@@ -1481,7 +1483,7 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & DUALSHOCK4_CONTROLLER)
return ds4_mapping(hdev, hi, field, usage, bit, max);
- if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ if (sc->quirks & GH_GUITAR_CONTROLLER)
return guitar_mapping(hdev, hi, field, usage, bit, max);
/* Let hid-core decide for the others */
@@ -3167,8 +3169,14 @@ static const struct hid_device_id sony_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_NSG_MR7U_REMOTE),
.driver_data = NSG_MR7U_REMOTE_BT },
/* Guitar Hero Live PS3 and Wii U guitar dongles */
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY_GHLIVE, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
- .driver_data = GHL_GUITAR_PS3WIIU},
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
+ .driver_data = GHL_GUITAR_PS3WIIU | GH_GUITAR_CONTROLLER },
+ /* Guitar Hero PC Guitar Dongle */
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACTIVISION, USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE),
+ .driver_data = GH_GUITAR_CONTROLLER },
+ /* Guitar Hero PS3 World Tour Guitar Dongle */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE),
+ .driver_data = GH_GUITAR_CONTROLLER },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 8e9c9e646cb7..6a9865dd703c 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -371,6 +371,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_HS64) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST,
+ USB_DEVICE_ID_TRUST_PANORA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index d26d8cd98efc..6af25c38b9cc 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
goto cleanup;
} else if (rc < 0) {
hid_err(hdev,
- "failed retrieving string descriptor #%hhu: %d\n",
+ "failed retrieving string descriptor #%u: %d\n",
idx, rc);
goto cleanup;
}
@@ -1045,6 +1045,8 @@ int uclogic_params_init(struct uclogic_params *params,
uclogic_params_init_with_pen_unused(&p);
}
break;
+ case VID_PID(USB_VENDOR_ID_TRUST,
+ USB_DEVICE_ID_TRUST_PANORA_TABLET):
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_G5):
/* Ignore non-pen interfaces */
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 41012681cafd..4399d6c6afef 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
wdata->state.cmd_err = err;
wiimote_cmd_complete(wdata);
} else if (err) {
- hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+ hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
cmd);
}
}
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index c4e5dfeab2bd..a16c6a69680b 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -2,18 +2,55 @@
menu "I2C HID support"
depends on I2C
-config I2C_HID
- tristate "HID over I2C transport layer"
+config I2C_HID_ACPI
+ tristate "HID over I2C transport layer ACPI driver"
default n
- depends on I2C && INPUT
- select HID
+ depends on I2C && INPUT && ACPI
+ help
+ Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
+ other HID based devices which is connected to your computer via I2C.
+ This driver supports ACPI-based systems.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid-acpi. It will also build/depend on the
+ module i2c-hid.
+
+config I2C_HID_OF
+ tristate "HID over I2C transport layer Open Firmware driver"
+ default n
+ depends on I2C && INPUT && OF
help
Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
other HID based devices which is connected to your computer via I2C.
+ This driver supports Open Firmware (Device Tree)-based systems.
If unsure, say N.
This support is also available as a module. If so, the module
- will be called i2c-hid.
+ will be called i2c-hid-of. It will also build/depend on the
+ module i2c-hid.
+
+config I2C_HID_OF_GOODIX
+ tristate "Driver for Goodix hid-i2c based devices on OF systems"
+ default n
+ depends on I2C && INPUT && OF
+ help
+ Say Y here if you want support for Goodix i2c devices that use
+ the i2c-hid protocol on Open Firmware (Device Tree)-based
+ systems.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid-of-goodix. It will also build/depend on
+ the module i2c-hid.
endmenu
+
+config I2C_HID_CORE
+ tristate
+ default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_GOODIX=y
+ default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_GOODIX=m
+ select HID
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 681b3896898e..302545a771f3 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,7 +3,11 @@
# Makefile for the I2C input drivers
#
-obj-$(CONFIG_I2C_HID) += i2c-hid.o
+obj-$(CONFIG_I2C_HID_CORE) += i2c-hid.o
i2c-hid-objs = i2c-hid-core.o
i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
+
+obj-$(CONFIG_I2C_HID_ACPI) += i2c-hid-acpi.o
+obj-$(CONFIG_I2C_HID_OF) += i2c-hid-of.o
+obj-$(CONFIG_I2C_HID_OF_GOODIX) += i2c-hid-of-goodix.o
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
new file mode 100644
index 000000000000..bb8c00e6be78
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -0,0 +1,143 @@
+/*
+ * HID over I2C ACPI Subclass
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code was forked out of the core code, which was partly based on
+ * "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include "i2c-hid.h"
+
+struct i2c_hid_acpi {
+ struct i2chid_ops ops;
+ struct i2c_client *client;
+};
+
+static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ /*
+ * The CHPN0001 ACPI device, which is used to describe the Chipone
+ * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ */
+ {"CHPN0001", 0 },
+ { },
+};
+
+static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
+{
+ static guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+ union acpi_object *obj;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ u16 hid_descriptor_address;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+
+ if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
+ return -ENODEV;
+
+ obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
+ dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
+ return -ENODEV;
+ }
+
+ hid_descriptor_address = obj->integer.value;
+ ACPI_FREE(obj);
+
+ return hid_descriptor_address;
+}
+
+static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
+{
+ struct i2c_hid_acpi *ihid_acpi =
+ container_of(ops, struct i2c_hid_acpi, ops);
+ struct device *dev = &ihid_acpi->client->dev;
+ acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
+}
+
+static int i2c_hid_acpi_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct i2c_hid_acpi *ihid_acpi;
+ struct acpi_device *adev;
+ u16 hid_descriptor_address;
+ int ret;
+
+ ihid_acpi = devm_kzalloc(&client->dev, sizeof(*ihid_acpi), GFP_KERNEL);
+ if (!ihid_acpi)
+ return -ENOMEM;
+
+ ihid_acpi->client = client;
+ ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
+
+ ret = i2c_hid_acpi_get_descriptor(client);
+ if (ret < 0)
+ return ret;
+ hid_descriptor_address = ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (adev)
+ acpi_device_fix_up_power(adev);
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev, false);
+ }
+
+ return i2c_hid_core_probe(client, &ihid_acpi->ops,
+ hid_descriptor_address);
+}
+
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+
+static struct i2c_driver i2c_hid_acpi_driver = {
+ .driver = {
+ .name = "i2c_hid_acpi",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+ },
+
+ .probe = i2c_hid_acpi_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+};
+
+module_i2c_driver(i2c_hid_acpi_driver);
+
+MODULE_DESCRIPTION("HID over I2C ACPI driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index bfe716d7ea44..9993133989a5 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -35,11 +35,6 @@
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/mutex.h>
-#include <linux/acpi.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-
-#include <linux/platform_data/i2c-hid.h>
#include "../hid-ids.h"
#include "i2c-hid.h"
@@ -156,10 +151,10 @@ struct i2c_hid {
wait_queue_head_t wait; /* For waiting the interrupt */
- struct i2c_hid_platform_data pdata;
-
bool irq_wake_enabled;
struct mutex reset_lock;
+
+ struct i2chid_ops *ops;
};
static const struct i2c_hid_quirks {
@@ -171,6 +166,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
@@ -884,144 +881,36 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
- /*
- * The CHPN0001 ACPI device, which is used to describe the Chipone
- * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
- */
- {"CHPN0001", 0 },
- { },
-};
-
-static int i2c_hid_acpi_pdata(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- static guid_t i2c_hid_guid =
- GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
- 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
- union acpi_object *obj;
- struct acpi_device *adev;
- acpi_handle handle;
-
- handle = ACPI_HANDLE(&client->dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_err(&client->dev, "Error could not get ACPI device\n");
- return -ENODEV;
- }
-
- if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
- return -ENODEV;
-
- obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
- ACPI_TYPE_INTEGER);
- if (!obj) {
- dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
- return -ENODEV;
- }
-
- pdata->hid_descriptor_address = obj->integer.value;
- ACPI_FREE(obj);
-
- return 0;
-}
-
-static void i2c_hid_acpi_fix_up_power(struct device *dev)
-{
- struct acpi_device *adev;
-
- adev = ACPI_COMPANION(dev);
- if (adev)
- acpi_device_fix_up_power(adev);
-}
-
-static void i2c_hid_acpi_enable_wakeup(struct device *dev)
-{
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- device_set_wakeup_capable(dev, true);
- device_set_wakeup_enable(dev, false);
- }
-}
-
-static void i2c_hid_acpi_shutdown(struct device *dev)
+static int i2c_hid_core_power_up(struct i2c_hid *ihid)
{
- acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
-}
+ if (!ihid->ops->power_up)
+ return 0;
-static const struct acpi_device_id i2c_hid_acpi_match[] = {
- {"ACPI0C50", 0 },
- {"PNP0C50", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
-#else
-static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- return -ENODEV;
+ return ihid->ops->power_up(ihid->ops);
}
-static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
-
-static inline void i2c_hid_acpi_enable_wakeup(struct device *dev) {}
-
-static inline void i2c_hid_acpi_shutdown(struct device *dev) {}
-#endif
-
-#ifdef CONFIG_OF
-static int i2c_hid_of_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
+static void i2c_hid_core_power_down(struct i2c_hid *ihid)
{
- struct device *dev = &client->dev;
- u32 val;
- int ret;
-
- ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
- if (ret) {
- dev_err(&client->dev, "HID register address not provided\n");
- return -ENODEV;
- }
- if (val >> 16) {
- dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
- val);
- return -EINVAL;
- }
- pdata->hid_descriptor_address = val;
-
- return 0;
-}
+ if (!ihid->ops->power_down)
+ return;
-static const struct of_device_id i2c_hid_of_match[] = {
- { .compatible = "hid-over-i2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
-#else
-static inline int i2c_hid_of_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- return -ENODEV;
+ ihid->ops->power_down(ihid->ops);
}
-#endif
-static void i2c_hid_fwnode_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
+static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
{
- u32 val;
+ if (!ihid->ops->shutdown_tail)
+ return;
- if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
- &val))
- pdata->post_power_delay_ms = val;
+ ihid->ops->shutdown_tail(ihid->ops);
}
-static int i2c_hid_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ u16 hid_descriptor_address)
{
int ret;
struct i2c_hid *ihid;
struct hid_device *hid;
- __u16 hidRegister;
- struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
@@ -1042,44 +931,17 @@ static int i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
- if (client->dev.of_node) {
- ret = i2c_hid_of_probe(client, &ihid->pdata);
- if (ret)
- return ret;
- } else if (!platform_data) {
- ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
- if (ret)
- return ret;
- } else {
- ihid->pdata = *platform_data;
- }
-
- /* Parse platform agnostic common properties from ACPI / device tree */
- i2c_hid_fwnode_probe(client, &ihid->pdata);
+ ihid->ops = ops;
- ihid->pdata.supplies[0].supply = "vdd";
- ihid->pdata.supplies[1].supply = "vddl";
-
- ret = devm_regulator_bulk_get(&client->dev,
- ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ ret = i2c_hid_core_power_up(ihid);
if (ret)
return ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
- if (ret < 0)
- return ret;
-
- if (ihid->pdata.post_power_delay_ms)
- msleep(ihid->pdata.post_power_delay_ms);
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
- hidRegister = ihid->pdata.hid_descriptor_address;
- ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
+ ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address);
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->reset_lock);
@@ -1089,11 +951,7 @@ static int i2c_hid_probe(struct i2c_client *client,
* real computation later. */
ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
if (ret < 0)
- goto err_regulator;
-
- i2c_hid_acpi_fix_up_power(&client->dev);
-
- i2c_hid_acpi_enable_wakeup(&client->dev);
+ goto err_powered;
device_enable_async_suspend(&client->dev);
@@ -1102,19 +960,19 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0) {
dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
ret = -ENXIO;
- goto err_regulator;
+ goto err_powered;
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0) {
dev_err(&client->dev,
"Failed to fetch the HID Descriptor\n");
- goto err_regulator;
+ goto err_powered;
}
ret = i2c_hid_init_irq(client);
if (ret < 0)
- goto err_regulator;
+ goto err_powered;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
@@ -1153,14 +1011,14 @@ err_mem_free:
err_irq:
free_irq(client->irq, ihid);
-err_regulator:
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+err_powered:
+ i2c_hid_core_power_down(ihid);
i2c_hid_free_buffers(ihid);
return ret;
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_probe);
-static int i2c_hid_remove(struct i2c_client *client)
+int i2c_hid_core_remove(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
@@ -1173,24 +1031,25 @@ static int i2c_hid_remove(struct i2c_client *client)
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ i2c_hid_core_power_down(ihid);
return 0;
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_remove);
-static void i2c_hid_shutdown(struct i2c_client *client)
+void i2c_hid_core_shutdown(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
free_irq(client->irq, ihid);
- i2c_hid_acpi_shutdown(&client->dev);
+ i2c_hid_core_shutdown_tail(ihid);
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_shutdown);
#ifdef CONFIG_PM_SLEEP
-static int i2c_hid_suspend(struct device *dev)
+static int i2c_hid_core_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct i2c_hid *ihid = i2c_get_clientdata(client);
@@ -1217,14 +1076,13 @@ static int i2c_hid_suspend(struct device *dev)
hid_warn(hid, "Failed to enable irq wake: %d\n",
wake_status);
} else {
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ i2c_hid_core_power_down(ihid);
}
return 0;
}
-static int i2c_hid_resume(struct device *dev)
+static int i2c_hid_core_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
@@ -1233,13 +1091,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (!device_may_wakeup(&client->dev)) {
- ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
- if (ret)
- hid_warn(hid, "Failed to enable supplies: %d\n", ret);
-
- if (ihid->pdata.post_power_delay_ms)
- msleep(ihid->pdata.post_power_delay_ms);
+ i2c_hid_core_power_up(ihid);
} else if (ihid->irq_wake_enabled) {
wake_status = disable_irq_wake(client->irq);
if (!wake_status)
@@ -1276,34 +1128,10 @@ static int i2c_hid_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops i2c_hid_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
+const struct dev_pm_ops i2c_hid_core_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_core_suspend, i2c_hid_core_resume)
};
-
-static const struct i2c_device_id i2c_hid_id_table[] = {
- { "hid", 0 },
- { "hid-over-i2c", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
-
-
-static struct i2c_driver i2c_hid_driver = {
- .driver = {
- .name = "i2c_hid",
- .pm = &i2c_hid_pm,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
- .of_match_table = of_match_ptr(i2c_hid_of_match),
- },
-
- .probe = i2c_hid_probe,
- .remove = i2c_hid_remove,
- .shutdown = i2c_hid_shutdown,
- .id_table = i2c_hid_id_table,
-};
-
-module_i2c_driver(i2c_hid_driver);
+EXPORT_SYMBOL_GPL(i2c_hid_core_pm);
MODULE_DESCRIPTION("HID over I2C core driver");
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
new file mode 100644
index 000000000000..ee0225982a82
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Goodix touchscreens that use the i2c-hid protocol.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include "i2c-hid.h"
+
+struct goodix_i2c_hid_timing_data {
+ unsigned int post_gpio_reset_delay_ms;
+ unsigned int post_power_delay_ms;
+};
+
+struct i2c_hid_of_goodix {
+ struct i2chid_ops ops;
+
+ struct regulator *vdd;
+ struct gpio_desc *reset_gpio;
+ const struct goodix_i2c_hid_timing_data *timings;
+};
+
+static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+ int ret;
+
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
+
+ if (ihid_goodix->timings->post_power_delay_ms)
+ msleep(ihid_goodix->timings->post_power_delay_ms);
+
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 0);
+ if (ihid_goodix->timings->post_gpio_reset_delay_ms)
+ msleep(ihid_goodix->timings->post_gpio_reset_delay_ms);
+
+ return 0;
+}
+
+static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
+ regulator_disable(ihid_goodix->vdd);
+}
+
+static int i2c_hid_of_goodix_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_hid_of_goodix *ihid_goodix;
+
+ ihid_goodix = devm_kzalloc(&client->dev, sizeof(*ihid_goodix),
+ GFP_KERNEL);
+ if (!ihid_goodix)
+ return -ENOMEM;
+
+ ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
+ ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
+
+ /* Start out with reset asserted */
+ ihid_goodix->reset_gpio =
+ devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ihid_goodix->reset_gpio))
+ return PTR_ERR(ihid_goodix->reset_gpio);
+
+ ihid_goodix->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ihid_goodix->vdd))
+ return PTR_ERR(ihid_goodix->vdd);
+
+ ihid_goodix->timings = device_get_match_data(&client->dev);
+
+ return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001);
+}
+
+static const struct goodix_i2c_hid_timing_data goodix_gt7375p_timing_data = {
+ .post_power_delay_ms = 10,
+ .post_gpio_reset_delay_ms = 180,
+};
+
+static const struct of_device_id goodix_i2c_hid_of_match[] = {
+ { .compatible = "goodix,gt7375p", .data = &goodix_gt7375p_timing_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_i2c_hid_of_match);
+
+static struct i2c_driver goodix_i2c_hid_ts_driver = {
+ .driver = {
+ .name = "i2c_hid_of_goodix",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(goodix_i2c_hid_of_match),
+ },
+ .probe = i2c_hid_of_goodix_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+};
+module_i2c_driver(goodix_i2c_hid_ts_driver);
+
+MODULE_AUTHOR("Douglas Anderson <dianders@chromium.org>");
+MODULE_DESCRIPTION("Goodix i2c-hid touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
new file mode 100644
index 000000000000..4bf7cea92637
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -0,0 +1,143 @@
+/*
+ * HID over I2C Open Firmware Subclass
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code was forked out of the core code, which was partly based on
+ * "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include "i2c-hid.h"
+
+struct i2c_hid_of {
+ struct i2chid_ops ops;
+
+ struct i2c_client *client;
+ struct regulator_bulk_data supplies[2];
+ int post_power_delay_ms;
+};
+
+static int i2c_hid_of_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of *ihid_of = container_of(ops, struct i2c_hid_of, ops);
+ struct device *dev = &ihid_of->client->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+ if (ret) {
+ dev_warn(dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (ihid_of->post_power_delay_ms)
+ msleep(ihid_of->post_power_delay_ms);
+
+ return 0;
+}
+
+static void i2c_hid_of_power_down(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of *ihid_of = container_of(ops, struct i2c_hid_of, ops);
+
+ regulator_bulk_disable(ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+}
+
+static int i2c_hid_of_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct i2c_hid_of *ihid_of;
+ u16 hid_descriptor_address;
+ int ret;
+ u32 val;
+
+ ihid_of = devm_kzalloc(&client->dev, sizeof(*ihid_of), GFP_KERNEL);
+ if (!ihid_of)
+ return -ENOMEM;
+
+ ihid_of->ops.power_up = i2c_hid_of_power_up;
+ ihid_of->ops.power_down = i2c_hid_of_power_down;
+
+ ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
+ if (ret) {
+ dev_err(&client->dev, "HID register address not provided\n");
+ return -ENODEV;
+ }
+ if (val >> 16) {
+ dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
+ val);
+ return -EINVAL;
+ }
+ hid_descriptor_address = val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ ihid_of->post_power_delay_ms = val;
+
+ ihid_of->supplies[0].supply = "vdd";
+ ihid_of->supplies[1].supply = "vddl";
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+ if (ret)
+ return ret;
+
+ return i2c_hid_core_probe(client, &ihid_of->ops,
+ hid_descriptor_address);
+}
+
+static const struct of_device_id i2c_hid_of_match[] = {
+ { .compatible = "hid-over-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
+
+static const struct i2c_device_id i2c_hid_of_id_table[] = {
+ { "hid", 0 },
+ { "hid-over-i2c", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, i2c_hid_of_id_table);
+
+static struct i2c_driver i2c_hid_of_driver = {
+ .driver = {
+ .name = "i2c_hid_of",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(i2c_hid_of_match),
+ },
+
+ .probe = i2c_hid_of_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+ .id_table = i2c_hid_of_id_table,
+};
+
+module_i2c_driver(i2c_hid_of_driver);
+
+MODULE_DESCRIPTION("HID over I2C OF driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index a8c19aef5824..05a7827d211a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -3,6 +3,7 @@
#ifndef I2C_HID_H
#define I2C_HID_H
+#include <linux/i2c.h>
#ifdef CONFIG_DMI
struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
@@ -17,4 +18,25 @@ static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
{ return NULL; }
#endif
+/**
+ * struct i2chid_ops - Ops provided to the core.
+ *
+ * @power_up: do sequencing to power up the device.
+ * @power_down: do sequencing to power down the device.
+ * @shutdown_tail: called at the end of shutdown.
+ */
+struct i2chid_ops {
+ int (*power_up)(struct i2chid_ops *ops);
+ void (*power_down)(struct i2chid_ops *ops);
+ void (*shutdown_tail)(struct i2chid_ops *ops);
+};
+
+int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ u16 hid_descriptor_address);
+int i2c_hid_core_remove(struct i2c_client *client);
+
+void i2c_hid_core_shutdown(struct i2c_client *client);
+
+extern const struct dev_pm_ops i2c_hid_core_pm;
+
#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1fb294ca463e..21b87e4003af 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -27,6 +27,7 @@
#define CMP_H_DEVICE_ID 0x06FC
#define EHL_Ax_DEVICE_ID 0x4BB3
#define TGL_LP_DEVICE_ID 0xA0FC
+#define TGL_H_DEVICE_ID 0x43FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
@@ -81,5 +82,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
int ish_hw_start(struct ishtp_device *dev);
void ish_device_disable(struct ishtp_device *dev);
int ish_disable_dma(struct ishtp_device *dev);
+void ish_set_host_ready(struct ishtp_device *dev);
#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a45ac7fa417b..47bbeb8b492b 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -193,6 +193,33 @@ static void ish_clr_host_rdy(struct ishtp_device *dev)
ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
}
+static bool ish_chk_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ return (host_status & IPC_HOSTCOMM_READY_BIT);
+}
+
+/**
+ * ish_set_host_ready() - reconfig ipc host registers
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state
+ * This API is called in some case:
+ * fw is still on, but ipc is powered down.
+ * such as OOB case.
+ *
+ * Return: 0 for success else error fault code
+ */
+void ish_set_host_ready(struct ishtp_device *dev)
+{
+ if (ish_chk_host_rdy(dev))
+ return;
+
+ ish_set_host_rdy(dev);
+ set_host_ready(dev);
+}
+
/**
* _ishtp_read_hdr() - Read message header
* @dev: ISHTP device pointer
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index c6d48a8648b7..06081cf9b85a 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014-2016, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -37,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
@@ -111,6 +113,42 @@ static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
}
+static int enable_gpe(struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_status acpi_sts;
+ struct acpi_device *adev;
+ struct acpi_device_wakeup *wakeup;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_err(dev, "get acpi handle failed\n");
+ return -ENODEV;
+ }
+ wakeup = &adev->wakeup;
+
+ acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(dev, "enable ose_gpe failed\n");
+ return -EIO;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void enable_pme_wake(struct pci_dev *pdev)
+{
+ if ((pci_pme_capable(pdev, PCI_D0) ||
+ pci_pme_capable(pdev, PCI_D3hot) ||
+ pci_pme_capable(pdev, PCI_D3cold)) && !enable_gpe(&pdev->dev)) {
+ pci_pme_active(pdev, true);
+ dev_dbg(&pdev->dev, "ish ipc driver pme wake enabled\n");
+ }
+}
+
/**
* ish_probe() - PCI driver probe callback
* @pdev: pci device
@@ -179,6 +217,10 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&ishtp->suspend_wait);
init_waitqueue_head(&ishtp->resume_wait);
+ /* Enable PME for EHL */
+ if (pdev->device == EHL_Ax_DEVICE_ID)
+ enable_pme_wake(pdev);
+
ret = ish_init(ishtp);
if (ret)
return ret;
@@ -218,11 +260,15 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts = dev->ops->get_fw_status(dev);
int ret;
- if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag) {
+ if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
+ && IPC_IS_ISH_ILUP(fwsts)) {
disable_irq_wake(pdev->irq);
+ ish_set_host_ready(dev);
+
ishtp_send_resume(dev);
/* Waiting to get resume response */
@@ -317,6 +363,13 @@ static int __maybe_unused ish_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ /* add this to finish power flow for EHL */
+ if (dev->pdev->device == EHL_Ax_DEVICE_ID) {
+ pci_set_power_state(pdev, PCI_D0);
+ enable_pme_wake(pdev);
+ dev_dbg(dev->devc, "set power state to D0 for ehl\n");
+ }
+
ish_resume_device = device;
dev->resume_flag = 1;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 045c464228d9..8328ef155c46 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
@@ -1270,6 +1270,38 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
group);
}
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+ struct kfifo_rec_ptr_2 *devres = res;
+
+ kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct kfifo_rec_ptr_2 *pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+ sizeof(struct kfifo_rec_ptr_2),
+ GFP_KERNEL);
+
+ if (!pen_fifo)
+ return -ENOMEM;
+
+ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error) {
+ devres_free(pen_fifo);
+ return error;
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
+
+ return 0;
+}
+
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{
struct wacom *wacom = led->wacom;
@@ -1793,7 +1825,7 @@ static ssize_t wacom_show_speed(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct wacom *wacom = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%i\n", wacom->wacom_wac.bt_high_speed);
+ return sysfs_emit(buf, "%i\n", wacom->wacom_wac.bt_high_speed);
}
static ssize_t wacom_store_speed(struct device *dev,
@@ -2724,7 +2756,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = wacom_devm_kfifo_alloc(wacom);
if (error)
return error;
@@ -2786,8 +2818,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
-
- kfifo_free(&wacom_wac->pen_fifo);
}
#ifdef CONFIG_PM
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1bd0eb71559c..44d715c12f6a 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
wacom_wac->is_invalid_bt_frame = !value;
return;
case HID_DG_CONTACTMAX:
- features->touch_max = value;
+ if (!features->touch_max) {
+ features->touch_max = value;
+ } else {
+ hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
+ "%d -> %d\n", __func__, features->touch_max, value);
+ }
return;
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index da612b6e9c77..195910dd2154 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -342,7 +342,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 7596dc164648..44a3f5660c10 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
int err;
- err = pm_runtime_get_sync(ssi->device.parent);
+ err = pm_runtime_resume_and_get(ssi->device.parent);
if (err < 0) {
dev_err(&ssi->device, "runtime PM failed %d\n", err);
return err;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 6fb0c76bfbf8..0bd202de7960 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -618,7 +618,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
goto error_clean_ring;
/* Create and init the channel open message */
- open_info = kmalloc(sizeof(*open_info) +
+ open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
if (!open_info) {
@@ -745,7 +745,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
unsigned long flags;
int ret;
- info = kmalloc(sizeof(*info) +
+ info = kzalloc(sizeof(*info) +
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1d44bb635bb8..f0ed730e2e4e 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -31,101 +31,118 @@ const struct vmbus_device vmbus_devs[] = {
{ .dev_type = HV_IDE,
HV_IDE_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* SCSI */
{ .dev_type = HV_SCSI,
HV_SCSI_GUID,
.perf_device = true,
+ .allowed_in_isolated = true,
},
/* Fibre Channel */
{ .dev_type = HV_FC,
HV_SYNTHFC_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* Synthetic NIC */
{ .dev_type = HV_NIC,
HV_NIC_GUID,
.perf_device = true,
+ .allowed_in_isolated = true,
},
/* Network Direct */
{ .dev_type = HV_ND,
HV_ND_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* PCIE */
{ .dev_type = HV_PCIE,
HV_PCIE_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic Frame Buffer */
{ .dev_type = HV_FB,
HV_SYNTHVID_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic Keyboard */
{ .dev_type = HV_KBD,
HV_KBD_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic MOUSE */
{ .dev_type = HV_MOUSE,
HV_MOUSE_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* KVP */
{ .dev_type = HV_KVP,
HV_KVP_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Time Synch */
{ .dev_type = HV_TS,
HV_TS_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* Heartbeat */
{ .dev_type = HV_HB,
HV_HEART_BEAT_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* Shutdown */
{ .dev_type = HV_SHUTDOWN,
HV_SHUTDOWN_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* File copy */
{ .dev_type = HV_FCOPY,
HV_FCOPY_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Backup */
{ .dev_type = HV_BACKUP,
HV_VSS_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Dynamic Memory */
{ .dev_type = HV_DM,
HV_DM_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Unknown GUID */
{ .dev_type = HV_UNKNOWN,
.perf_device = false,
+ .allowed_in_isolated = false,
},
};
@@ -190,6 +207,7 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
* vmbus_prep_negotiate_resp() - Create default response for Negotiate message
* @icmsghdrp: Pointer to msg header structure
* @buf: Raw buffer channel data
+ * @buflen: Length of the raw buffer channel data.
* @fw_version: The framework versions we can support.
* @fw_vercnt: The size of @fw_version.
* @srv_version: The service versions we can support.
@@ -202,8 +220,8 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
* Set up and fill in default negotiate response message.
* Mainly used by Hyper-V drivers.
*/
-bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
- u8 *buf, const int *fw_version, int fw_vercnt,
+bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+ u32 buflen, const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version)
{
@@ -215,10 +233,14 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
bool found_match = false;
struct icmsg_negotiate *negop;
+ /* Check that there's enough space for icframe_vercnt, icmsg_vercnt */
+ if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
+ pr_err_ratelimited("Invalid icmsg negotiate\n");
+ return false;
+ }
+
icmsghdrp->icmsgsize = 0x10;
- negop = (struct icmsg_negotiate *)&buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
icframe_major = negop->icframe_vercnt;
icframe_minor = 0;
@@ -226,6 +248,15 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
icmsg_major = negop->icmsg_vercnt;
icmsg_minor = 0;
+ /* Validate negop packet */
+ if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
+ pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
+ icframe_major, icmsg_major);
+ goto fw_error;
+ }
+
/*
* Select the framework version number we will
* support.
@@ -889,6 +920,20 @@ find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
return channel;
}
+static bool vmbus_is_valid_device(const guid_t *guid)
+{
+ u16 i;
+
+ if (!hv_is_isolation_supported())
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
+ if (guid_equal(guid, &vmbus_devs[i].guid))
+ return vmbus_devs[i].allowed_in_isolated;
+ }
+ return false;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -903,6 +948,13 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
trace_vmbus_onoffer(offer);
+ if (!vmbus_is_valid_device(&offer->offer.if_type)) {
+ pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
+ offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ return;
+ }
+
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
@@ -1049,6 +1101,18 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
+ if (channel != NULL) {
+ /*
+ * Guarantee that no other instance of vmbus_onoffer_rescind()
+ * has got a reference to the channel object. Synchronize on
+ * &vmbus_connection.channel_mutex.
+ */
+ if (channel->rescind_ref) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return;
+ }
+ channel->rescind_ref = true;
+ }
mutex_unlock(&vmbus_connection.channel_mutex);
if (channel == NULL) {
@@ -1102,8 +1166,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
- }
- if (channel->primary_channel != NULL) {
+ } else if (channel->primary_channel != NULL) {
/*
* Sub-channel is being rescinded. Following is the channel
* close sequence when initiated from the driveri (refer to
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 11170d9a2e1a..c83612cddb99 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -244,6 +244,13 @@ int vmbus_connect(void)
break;
}
+ if (hv_is_isolation_supported() && version < VERSION_WIN10_V5_2) {
+ pr_err("Invalid VMBus version %d.%d (expected >= %d.%d) from the host supporting isolation\n",
+ version >> 16, version & 0xFFFF, VERSION_WIN10_V5_2 >> 16, VERSION_WIN10_V5_2 & 0xFFFF);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
version >> 16, version & 0xFFFF);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 8c471823a5af..2f776d78e3c1 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -726,7 +726,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE);
+ (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 5040d7e0cd9e..59ce85e00a02 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -235,15 +235,27 @@ void hv_fcopy_onchannelcallback(void *context)
if (fcopy_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
- &requestid);
- if (recvlen <= 0)
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ pr_err_ratelimited("Fcopy request received. Could not read into recv buf\n");
return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Fcopy request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
icmsghdr = (struct icmsg_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr)];
+
if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
+ if (vmbus_prep_negotiate_resp(icmsghdr,
+ recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
fcopy_versions, FCOPY_VER_COUNT,
NULL, &fcopy_srv_version)) {
@@ -252,10 +264,14 @@ void hv_fcopy_onchannelcallback(void *context)
fcopy_srv_version >> 16,
fcopy_srv_version & 0xFFFF);
}
- } else {
- fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ } else if (icmsghdr->icmsgtype == ICMSGTYPE_FCOPY) {
+ /* Ensure recvlen is big enough to contain hv_fcopy_hdr */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_fcopy_hdr)) {
+ pr_err_ratelimited("Invalid Fcopy hdr. Packet length too small: %u\n",
+ recvlen);
+ return;
+ }
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
@@ -280,6 +296,10 @@ void hv_fcopy_onchannelcallback(void *context)
schedule_delayed_work(&fcopy_timeout_work,
HV_UTIL_TIMEOUT * HZ);
return;
+ } else {
+ pr_err_ratelimited("Fcopy request received. Invalid msg type: %d\n",
+ icmsghdr->icmsgtype);
+ return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 754d35a25a1c..b49962d312ce 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -662,71 +662,87 @@ void hv_kvp_onchannelcallback(void *context)
if (kvp_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen,
- &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp,
- recv_buffer, fw_versions, FW_VER_COUNT,
- kvp_versions, KVP_VER_COUNT,
- NULL, &kvp_srv_version)) {
- pr_info("KVP IC version %d.%d\n",
- kvp_srv_version >> 16,
- kvp_srv_version & 0xFFFF);
- }
- } else {
- kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen, &requestid)) {
+ pr_err_ratelimited("KVP request received. Could not read into recv buf\n");
+ return;
+ }
- /*
- * Stash away this global state for completing the
- * transaction; note transactions are serialized.
- */
+ if (!recvlen)
+ return;
- kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_req_id = requestid;
- kvp_transaction.kvp_msg = kvp_msg;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("KVP request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
- if (kvp_transaction.state < HVUTIL_READY) {
- /* Userspace is not registered yet */
- kvp_respond_to_host(NULL, HV_E_FAIL);
- return;
- }
- kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ kvp_versions, KVP_VER_COUNT,
+ NULL, &kvp_srv_version)) {
+ pr_info("KVP IC version %d.%d\n",
+ kvp_srv_version >> 16,
+ kvp_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_KVPEXCHANGE) {
+ /*
+ * recvlen is not checked against sizeof(struct kvp_msg) because kvp_msg contains
+ * a union of structs and the msg type received is not known. Code using this
+ * struct should provide validation when accessing its fields.
+ */
+ kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ICMSG_HDR];
- /*
- * Get the information from the
- * user-mode component.
- * component. This transaction will be
- * completed when we get the value from
- * the user-mode component.
- * Set a timeout to deal with
- * user-mode not responding.
- */
- schedule_work(&kvp_sendkey_work);
- schedule_delayed_work(&kvp_timeout_work,
- HV_UTIL_TIMEOUT * HZ);
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
- return;
+ kvp_transaction.recv_len = recvlen;
+ kvp_transaction.recv_req_id = requestid;
+ kvp_transaction.kvp_msg = kvp_msg;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ return;
}
+ kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
+ /*
+ * Get the information from the
+ * user-mode component.
+ * component. This transaction will be
+ * completed when we get the value from
+ * the user-mode component.
+ * Set a timeout to deal with
+ * user-mode not responding.
+ */
+ schedule_work(&kvp_sendkey_work);
+ schedule_delayed_work(&kvp_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
- vmbus_sendpacket(channel, recv_buffer,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ return;
- host_negotiatied = NEGO_FINISHED;
- hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ } else {
+ pr_err_ratelimited("KVP request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static void kvp_on_reset(void)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 783779e4cc1a..2267bd4c3472 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -298,49 +298,64 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
- &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp,
- recv_buffer, fw_versions, FW_VER_COUNT,
- vss_versions, VSS_VER_COUNT,
- NULL, &vss_srv_version)) {
-
- pr_info("VSS IC version %d.%d\n",
- vss_srv_version >> 16,
- vss_srv_version & 0xFFFF);
- }
- } else {
- vss_msg = (struct hv_vss_msg *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
-
- /*
- * Stash away this global state for completing the
- * transaction; note transactions are serialized.
- */
-
- vss_transaction.recv_len = recvlen;
- vss_transaction.recv_req_id = requestid;
- vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
-
- schedule_work(&vss_handle_request_work);
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ vss_versions, VSS_VER_COUNT,
+ NULL, &vss_srv_version)) {
+
+ pr_info("VSS IC version %d.%d\n",
+ vss_srv_version >> 16,
+ vss_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
+ /* Ensure recvlen is big enough to contain hv_vss_msg */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
+ pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
+ recvlen);
return;
}
+ vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
+ vss_transaction.recv_len = recvlen;
+ vss_transaction.recv_req_id = requestid;
+ vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
- vmbus_sendpacket(channel, recv_buffer,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ schedule_work(&vss_handle_request_work);
+ return;
+ } else {
+ pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
+ ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
static void vss_on_reset(void)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 05566ecdbe4b..e4aefeb330da 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -195,73 +195,88 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
- vmbus_recvpacket(channel, shut_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
+ pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
+ return;
+ }
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr)];
+ if (!recvlen)
+ return;
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
- fw_versions, FW_VER_COUNT,
- sd_versions, SD_VER_COUNT,
- NULL, &sd_srv_version)) {
- pr_info("Shutdown IC version %d.%d\n",
- sd_srv_version >> 16,
- sd_srv_version & 0xFFFF);
- }
- } else {
- shutdown_msg =
- (struct shutdown_msg_data *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
- /*
- * shutdown_msg->flags can be 0(shut down), 2(reboot),
- * or 4(hibernate). It may bitwise-OR 1, which means
- * performing the request by force. Linux always tries
- * to perform the request by force.
- */
- switch (shutdown_msg->flags) {
- case 0:
- case 1:
- icmsghdrp->status = HV_S_OK;
- work = &shutdown_work;
- pr_info("Shutdown request received -"
- " graceful shutdown initiated\n");
- break;
- case 2:
- case 3:
- icmsghdrp->status = HV_S_OK;
- work = &restart_work;
- pr_info("Restart request received -"
- " graceful restart initiated\n");
- break;
- case 4:
- case 5:
- pr_info("Hibernation request received\n");
- icmsghdrp->status = hibernation_supported ?
- HV_S_OK : HV_E_FAIL;
- if (hibernation_supported)
- work = &hibernate_context.work;
- break;
- default:
- icmsghdrp->status = HV_E_FAIL;
- pr_info("Shutdown request received -"
- " Invalid request\n");
- break;
- }
+ icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ shut_txf_buf, recvlen,
+ fw_versions, FW_VER_COUNT,
+ sd_versions, SD_VER_COUNT,
+ NULL, &sd_srv_version)) {
+ pr_info("Shutdown IC version %d.%d\n",
+ sd_srv_version >> 16,
+ sd_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
+ /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
+ if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
+ pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
+ recvlen);
+ return;
}
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, shut_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
+
+ /*
+ * shutdown_msg->flags can be 0(shut down), 2(reboot),
+ * or 4(hibernate). It may bitwise-OR 1, which means
+ * performing the request by force. Linux always tries
+ * to perform the request by force.
+ */
+ switch (shutdown_msg->flags) {
+ case 0:
+ case 1:
+ icmsghdrp->status = HV_S_OK;
+ work = &shutdown_work;
+ pr_info("Shutdown request received - graceful shutdown initiated\n");
+ break;
+ case 2:
+ case 3:
+ icmsghdrp->status = HV_S_OK;
+ work = &restart_work;
+ pr_info("Restart request received - graceful restart initiated\n");
+ break;
+ case 4:
+ case 5:
+ pr_info("Hibernation request received\n");
+ icmsghdrp->status = hibernation_supported ?
+ HV_S_OK : HV_E_FAIL;
+ if (hibernation_supported)
+ work = &hibernate_context.work;
+ break;
+ default:
+ icmsghdrp->status = HV_E_FAIL;
+ pr_info("Shutdown request received - Invalid request\n");
+ break;
+ }
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, shut_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
if (work)
schedule_work(work);
}
@@ -396,19 +411,27 @@ static void timesync_onchannelcallback(void *context)
HV_HYP_PAGE_SIZE, &recvlen,
&requestid);
if (ret) {
- pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
- ret);
+ pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
break;
}
if (!recvlen)
break;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ time_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
ts_versions, TS_VER_COUNT,
NULL, &ts_srv_version)) {
@@ -416,33 +439,44 @@ static void timesync_onchannelcallback(void *context)
ts_srv_version >> 16,
ts_srv_version & 0xFFFF);
}
- } else {
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
if (ts_srv_version > TS_VERSION_3) {
- refdata = (struct ictimesync_ref_data *)
- &time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read ictimesync_ref_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
+ pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
adj_guesttime(refdata->parenttime,
refdata->vmreferencetime,
refdata->flags);
} else {
- timedatap = (struct ictimesync_data *)
- &time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read ictimesync_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
+ pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
+
adj_guesttime(timedatap->parenttime,
hv_read_reference_counter(),
timedatap->flags);
}
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, time_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
}
@@ -462,18 +496,28 @@ static void heartbeat_onchannelcallback(void *context)
while (1) {
- vmbus_recvpacket(channel, hbeat_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
+ &recvlen, &requestid)) {
+ pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
+ return;
+ }
if (!recvlen)
break;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
- hbeat_txf_buf,
+ hbeat_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
hb_versions, HB_VER_COUNT,
NULL, &hb_srv_version)) {
@@ -482,21 +526,31 @@ static void heartbeat_onchannelcallback(void *context)
hb_srv_version >> 16,
hb_srv_version & 0xFFFF);
}
- } else {
- heartbeat_msg =
- (struct heartbeat_msg_data *)&hbeat_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
+ /*
+ * Ensure recvlen is big enough to read seq_num. Reserved area is not
+ * included in the check as the host may not fill it up entirely
+ */
+ if (recvlen < ICMSG_HDR + sizeof(u64)) {
+ pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
heartbeat_msg->seq_num += 1;
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, hbeat_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 502f8cd95f6d..10dce9f91216 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -678,6 +678,23 @@ static const struct attribute_group vmbus_dev_group = {
};
__ATTRIBUTE_GROUPS(vmbus_dev);
+/* Set up the attribute for /sys/bus/vmbus/hibernation */
+static ssize_t hibernation_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
+}
+
+static BUS_ATTR_RO(hibernation);
+
+static struct attribute *vmbus_bus_attrs[] = {
+ &bus_attr_hibernation.attr,
+ NULL,
+};
+static const struct attribute_group vmbus_bus_group = {
+ .attrs = vmbus_bus_attrs,
+};
+__ATTRIBUTE_GROUPS(vmbus_bus);
+
/*
* vmbus_uevent - add uevent for our device
*
@@ -1024,6 +1041,7 @@ static struct bus_type hv_bus = {
.uevent = vmbus_uevent,
.dev_groups = vmbus_dev_groups,
.drv_groups = vmbus_drv_groups,
+ .bus_groups = vmbus_bus_groups,
.pm = &vmbus_pm,
};
@@ -1054,12 +1072,14 @@ void vmbus_on_msg_dpc(unsigned long data)
{
struct hv_per_cpu_context *hv_cpu = (void *)data;
void *page_addr = hv_cpu->synic_message_page;
- struct hv_message *msg = (struct hv_message *)page_addr +
+ struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct vmbus_channel_message_header *hdr;
+ enum vmbus_channel_message_type msgtype;
const struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
- u32 message_type = msg->header.message_type;
+ __u8 payload_size;
+ u32 message_type;
/*
* 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
@@ -1068,45 +1088,52 @@ void vmbus_on_msg_dpc(unsigned long data)
*/
BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
+ /*
+ * Since the message is in memory shared with the host, an erroneous or
+ * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
+ * or individual message handlers are executing; to prevent this, copy
+ * the message into private memory.
+ */
+ memcpy(&msg_copy, msg, sizeof(struct hv_message));
+
+ message_type = msg_copy.header.message_type;
if (message_type == HVMSG_NONE)
/* no msg */
return;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
+ msgtype = hdr->msgtype;
trace_vmbus_on_msg_dpc(hdr);
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+ if (msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
goto msg_handled;
}
- if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
- WARN_ONCE(1, "payload size is too large (%d)\n",
- msg->header.payload_size);
+ payload_size = msg_copy.header.payload_size;
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
+ WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
goto msg_handled;
}
- entry = &channel_message_table[hdr->msgtype];
+ entry = &channel_message_table[msgtype];
if (!entry->message_handler)
goto msg_handled;
- if (msg->header.payload_size < entry->min_payload_len) {
- WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
- hdr->msgtype, msg->header.payload_size);
+ if (payload_size < entry->min_payload_len) {
+ WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
goto msg_handled;
}
if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size,
- GFP_ATOMIC);
+ ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
if (ctx == NULL)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(msg->header) +
- msg->header.payload_size);
+ memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
/*
* The host can generate a rescind message while we
@@ -1115,7 +1142,7 @@ void vmbus_on_msg_dpc(unsigned long data)
* by offer_in_progress and by channel_mutex. See also the
* inline comments in vmbus_onoffer_rescind().
*/
- switch (hdr->msgtype) {
+ switch (msgtype) {
case CHANNELMSG_RESCIND_CHANNELOFFER:
/*
* If we are handling the rescind message;
@@ -2550,7 +2577,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2592,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
@@ -2620,6 +2645,9 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition)
+ return 0;
+
init_completion(&probe_event);
/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 1ecf697d8d99..54f04e61fb83 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -38,19 +38,6 @@ config HWMON_DEBUG_CHIP
comment "Native drivers"
-config SENSORS_AB8500
- tristate "AB8500 thermal monitoring"
- depends on AB8500_GPADC && AB8500_BM && (IIO = y)
- default n
- help
- If you say yes here you get support for the thermal sensor part
- of the AB8500 chip. The driver includes thermal management for
- AB8500 die and two GPADC channels. The GPADC channel are preferably
- used to access sensors outside the AB8500 chip.
-
- This driver can also be built as a module. If so, the module
- will be called abx500-temp.
-
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
depends on X86 && DMI
@@ -257,6 +244,16 @@ config SENSORS_ADT7475
This driver can also be built as a module. If so, the module
will be called adt7475.
+config SENSORS_AHT10
+ tristate "Aosong AHT10"
+ depends on I2C
+ help
+ If you say yes here, you get support for the Aosong AHT10
+ temperature and humidity sensors
+
+ This driver can also be built as a module. If so, the module
+ will be called aht10.
+
config SENSORS_AS370
tristate "Synaptics AS370 SoC hardware monitoring driver"
help
@@ -1136,6 +1133,17 @@ config SENSORS_TC654
This driver can also be built as a module. If so, the module
will be called tc654.
+config SENSORS_TPS23861
+ tristate "Texas Instruments TPS23861 PoE PSE"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments
+ TPS23861 802.3at PoE PSE chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps23861.
+
config SENSORS_MENF21BMC_HWMON
tristate "MEN 14F021P00 BMC Hardware Monitoring"
depends on MFD_MENF21BMC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 09a86c5e1d29..fe38e8a5c979 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
-obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
@@ -45,6 +44,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
+obj-$(CONFIG_SENSORS_AHT10) += aht10.o
obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
@@ -144,6 +144,7 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_TPS23861) += tps23861.o
obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_MR75203) += mr75203.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
deleted file mode 100644
index 53f3379d799d..000000000000
--- a/drivers/hwmon/ab8500.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.org>
- *
- * When the AB8500 thermal warning temperature is reached (threshold cannot
- * be changed by SW), an interrupt is set, and if no further action is taken
- * within a certain time frame, kernel_power_off will be called.
- *
- * When AB8500 thermal shutdown temperature is reached a hardware shutdown of
- * the AB8500 will occur.
- */
-
-#include <linux/err.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/power/ab8500.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/iio/consumer.h>
-#include "abx500.h"
-
-#define DEFAULT_POWER_OFF_DELAY (HZ * 10)
-#define THERMAL_VCC 1800
-#define PULL_UP_RESISTOR 47000
-
-#define AB8500_SENSOR_AUX1 0
-#define AB8500_SENSOR_AUX2 1
-#define AB8500_SENSOR_BTEMP_BALL 2
-#define AB8500_SENSOR_BAT_CTRL 3
-#define NUM_MONITORED_SENSORS 4
-
-struct ab8500_gpadc_cfg {
- const struct abx500_res_to_temp *temp_tbl;
- int tbl_sz;
- int vcc;
- int r_up;
-};
-
-struct ab8500_temp {
- struct iio_channel *aux1;
- struct iio_channel *aux2;
- struct ab8500_btemp *btemp;
- struct delayed_work power_off_work;
- struct ab8500_gpadc_cfg cfg;
- struct abx500_temp *abx500_data;
-};
-
-/*
- * The hardware connection is like this:
- * VCC----[ R_up ]-----[ NTC ]----GND
- * where R_up is pull-up resistance, and GPADC measures voltage on NTC.
- * and res_to_temp table is strictly sorted by falling resistance values.
- */
-static int ab8500_voltage_to_temp(struct ab8500_gpadc_cfg *cfg,
- int v_ntc, int *temp)
-{
- int r_ntc, i = 0, tbl_sz = cfg->tbl_sz;
- const struct abx500_res_to_temp *tbl = cfg->temp_tbl;
-
- if (cfg->vcc < 0 || v_ntc >= cfg->vcc)
- return -EINVAL;
-
- r_ntc = v_ntc * cfg->r_up / (cfg->vcc - v_ntc);
- if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist)
- return -EINVAL;
-
- while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) &&
- i < tbl_sz - 2)
- i++;
-
- /* return milli-Celsius */
- *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 *
- (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
-
- return 0;
-}
-
-static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
-{
- int voltage, ret;
- struct ab8500_temp *ab8500_data = data->plat_data;
-
- if (sensor == AB8500_SENSOR_BTEMP_BALL) {
- *temp = ab8500_btemp_get_temp(ab8500_data->btemp);
- } else if (sensor == AB8500_SENSOR_BAT_CTRL) {
- *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
- } else if (sensor == AB8500_SENSOR_AUX1) {
- ret = iio_read_channel_processed(ab8500_data->aux1, &voltage);
- if (ret < 0)
- return ret;
- ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
- if (ret < 0)
- return ret;
- } else if (sensor == AB8500_SENSOR_AUX2) {
- ret = iio_read_channel_processed(ab8500_data->aux2, &voltage);
- if (ret < 0)
- return ret;
- ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static void ab8500_thermal_power_off(struct work_struct *work)
-{
- struct ab8500_temp *ab8500_data = container_of(work,
- struct ab8500_temp, power_off_work.work);
- struct abx500_temp *abx500_data = ab8500_data->abx500_data;
-
- dev_warn(&abx500_data->pdev->dev, "Power off due to critical temp\n");
-
- kernel_power_off();
-}
-
-static ssize_t ab8500_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "ab8500\n");
-}
-
-static ssize_t ab8500_show_label(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- char *label;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int index = attr->index;
-
- switch (index) {
- case 1:
- label = "ext_adc1";
- break;
- case 2:
- label = "ext_adc2";
- break;
- case 3:
- label = "bat_temp";
- break;
- case 4:
- label = "bat_ctrl";
- break;
- default:
- return -EINVAL;
- }
-
- return sprintf(buf, "%s\n", label);
-}
-
-static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
-{
- struct ab8500_temp *ab8500_data = data->plat_data;
-
- dev_warn(&data->pdev->dev, "Power off in %d s\n",
- DEFAULT_POWER_OFF_DELAY / HZ);
-
- schedule_delayed_work(&ab8500_data->power_off_work,
- DEFAULT_POWER_OFF_DELAY);
- return 0;
-}
-
-int abx500_hwmon_init(struct abx500_temp *data)
-{
- struct ab8500_temp *ab8500_data;
-
- ab8500_data = devm_kzalloc(&data->pdev->dev, sizeof(*ab8500_data),
- GFP_KERNEL);
- if (!ab8500_data)
- return -ENOMEM;
-
- ab8500_data->btemp = ab8500_btemp_get();
- if (IS_ERR(ab8500_data->btemp))
- return PTR_ERR(ab8500_data->btemp);
-
- INIT_DELAYED_WORK(&ab8500_data->power_off_work,
- ab8500_thermal_power_off);
-
- ab8500_data->cfg.vcc = THERMAL_VCC;
- ab8500_data->cfg.r_up = PULL_UP_RESISTOR;
- ab8500_data->cfg.temp_tbl = ab8500_temp_tbl_a_thermistor;
- ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
-
- data->plat_data = ab8500_data;
- ab8500_data->aux1 = devm_iio_channel_get(&data->pdev->dev, "aux1");
- if (IS_ERR(ab8500_data->aux1)) {
- if (PTR_ERR(ab8500_data->aux1) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&data->pdev->dev, "failed to get AUX1 ADC channel\n");
- return PTR_ERR(ab8500_data->aux1);
- }
- ab8500_data->aux2 = devm_iio_channel_get(&data->pdev->dev, "aux2");
- if (IS_ERR(ab8500_data->aux2)) {
- if (PTR_ERR(ab8500_data->aux2) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&data->pdev->dev, "failed to get AUX2 ADC channel\n");
- return PTR_ERR(ab8500_data->aux2);
- }
-
- data->gpadc_addr[0] = AB8500_SENSOR_AUX1;
- data->gpadc_addr[1] = AB8500_SENSOR_AUX2;
- data->gpadc_addr[2] = AB8500_SENSOR_BTEMP_BALL;
- data->gpadc_addr[3] = AB8500_SENSOR_BAT_CTRL;
- data->monitored_sensors = NUM_MONITORED_SENSORS;
-
- data->ops.read_sensor = ab8500_read_sensor;
- data->ops.irq_handler = ab8500_temp_irq_handler;
- data->ops.show_name = ab8500_show_name;
- data->ops.show_label = ab8500_show_label;
- data->ops.is_visible = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(abx500_hwmon_init);
-
-MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@linaro.org>");
-MODULE_DESCRIPTION("AB8500 temperature driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
deleted file mode 100644
index 4b9648819836..000000000000
--- a/drivers/hwmon/abx500.c
+++ /dev/null
@@ -1,487 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.org>
- *
- * ABX500 does not provide auto ADC, so to monitor the required temperatures,
- * a periodic work is used. It is more important to not wake up the CPU than
- * to perform this job, hence the use of a deferred delay.
- *
- * A deferred delay for thermal monitor is considered safe because:
- * If the chip gets too hot during a sleep state it's most likely due to
- * external factors, such as the surrounding temperature. I.e. no SW decisions
- * will make any difference.
- */
-
-#include <linux/err.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/workqueue.h>
-#include "abx500.h"
-
-#define DEFAULT_MONITOR_DELAY HZ
-#define DEFAULT_MAX_TEMP 130
-
-static inline void schedule_monitor(struct abx500_temp *data)
-{
- data->work_active = true;
- schedule_delayed_work(&data->work, DEFAULT_MONITOR_DELAY);
-}
-
-static void threshold_updated(struct abx500_temp *data)
-{
- int i;
- for (i = 0; i < data->monitored_sensors; i++)
- if (data->max[i] != 0 || data->min[i] != 0) {
- schedule_monitor(data);
- return;
- }
-
- dev_dbg(&data->pdev->dev, "No active thresholds.\n");
- cancel_delayed_work_sync(&data->work);
- data->work_active = false;
-}
-
-static void gpadc_monitor(struct work_struct *work)
-{
- int temp, i, ret;
- char alarm_node[30];
- bool updated_min_alarm, updated_max_alarm;
- struct abx500_temp *data;
-
- data = container_of(work, struct abx500_temp, work.work);
- mutex_lock(&data->lock);
-
- for (i = 0; i < data->monitored_sensors; i++) {
- /* Thresholds are considered inactive if set to 0 */
- if (data->max[i] == 0 && data->min[i] == 0)
- continue;
-
- if (data->max[i] < data->min[i])
- continue;
-
- ret = data->ops.read_sensor(data, data->gpadc_addr[i], &temp);
- if (ret < 0) {
- dev_err(&data->pdev->dev, "GPADC read failed\n");
- continue;
- }
-
- updated_min_alarm = false;
- updated_max_alarm = false;
-
- if (data->min[i] != 0) {
- if (temp < data->min[i]) {
- if (data->min_alarm[i] == false) {
- data->min_alarm[i] = true;
- updated_min_alarm = true;
- }
- } else {
- if (data->min_alarm[i] == true) {
- data->min_alarm[i] = false;
- updated_min_alarm = true;
- }
- }
- }
- if (data->max[i] != 0) {
- if (temp > data->max[i]) {
- if (data->max_alarm[i] == false) {
- data->max_alarm[i] = true;
- updated_max_alarm = true;
- }
- } else if (temp < data->max[i] - data->max_hyst[i]) {
- if (data->max_alarm[i] == true) {
- data->max_alarm[i] = false;
- updated_max_alarm = true;
- }
- }
- }
-
- if (updated_min_alarm) {
- ret = sprintf(alarm_node, "temp%d_min_alarm", i + 1);
- sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
- }
- if (updated_max_alarm) {
- ret = sprintf(alarm_node, "temp%d_max_alarm", i + 1);
- sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
- }
- }
-
- schedule_monitor(data);
- mutex_unlock(&data->lock);
-}
-
-/* HWMON sysfs interfaces */
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- /* Show chip name */
- return data->ops.show_name(dev, devattr, buf);
-}
-
-static ssize_t label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- /* Show each sensor label */
- return data->ops.show_label(dev, devattr, buf);
-}
-
-static ssize_t input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int ret, temp;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- u8 gpadc_addr = data->gpadc_addr[attr->index];
-
- ret = data->ops.read_sensor(data, gpadc_addr, &temp);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", temp);
-}
-
-/* Set functions (RW nodes) */
-static ssize_t min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtol(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->min[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtol(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->max[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t max_hyst_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtoul(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->max_hyst[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-/* Show functions (RO nodes) */
-static ssize_t min_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->min[attr->index]);
-}
-
-static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->max[attr->index]);
-}
-
-static ssize_t max_hyst_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
-}
-
-static ssize_t min_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%d\n", data->min_alarm[attr->index]);
-}
-
-static ssize_t max_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%d\n", data->max_alarm[attr->index]);
-}
-
-static umode_t abx500_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct abx500_temp *data = dev_get_drvdata(dev);
-
- if (data->ops.is_visible)
- return data->ops.is_visible(attr, n);
-
- return attr->mode;
-}
-
-/* Chip name, required by hwmon */
-static SENSOR_DEVICE_ATTR_RO(name, name, 0);
-
-/* GPADC - SENSOR1 */
-static SENSOR_DEVICE_ATTR_RO(temp1_label, label, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_input, input, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, min, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, max_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, min_alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, max_alarm, 0);
-
-/* GPADC - SENSOR2 */
-static SENSOR_DEVICE_ATTR_RO(temp2_label, label, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, input, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_min, min, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, max_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, min_alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, max_alarm, 1);
-
-/* GPADC - SENSOR3 */
-static SENSOR_DEVICE_ATTR_RO(temp3_label, label, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, input, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_min, min, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, max, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, max_hyst, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, min_alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, max_alarm, 2);
-
-/* GPADC - SENSOR4 */
-static SENSOR_DEVICE_ATTR_RO(temp4_label, label, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, input, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_min, min, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, max, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_max_hyst, max_hyst, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_min_alarm, min_alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, max_alarm, 3);
-
-static struct attribute *abx500_temp_attributes[] = {
- &sensor_dev_attr_name.dev_attr.attr,
-
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_min.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group abx500_temp_group = {
- .attrs = abx500_temp_attributes,
- .is_visible = abx500_attrs_visible,
-};
-
-static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
-{
- struct platform_device *pdev = irq_data;
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- data->ops.irq_handler(irq, data);
- return IRQ_HANDLED;
-}
-
-static int setup_irqs(struct platform_device *pdev)
-{
- int ret;
- int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
-
- if (irq < 0) {
- dev_err(&pdev->dev, "Get irq by name failed\n");
- return irq;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- abx500_temp_irq_handler, 0, "abx500-temp", pdev);
- if (ret < 0)
- dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
-
- return ret;
-}
-
-static int abx500_temp_probe(struct platform_device *pdev)
-{
- struct abx500_temp *data;
- int err;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->pdev = pdev;
- mutex_init(&data->lock);
-
- /* Chip specific initialization */
- err = abx500_hwmon_init(data);
- if (err < 0 || !data->ops.read_sensor || !data->ops.show_name ||
- !data->ops.show_label)
- return err;
-
- INIT_DEFERRABLE_WORK(&data->work, gpadc_monitor);
-
- platform_set_drvdata(pdev, data);
-
- err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
- if (err < 0) {
- dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
- return err;
- }
-
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
- goto exit_sysfs_group;
- }
-
- if (data->ops.irq_handler) {
- err = setup_irqs(pdev);
- if (err < 0)
- goto exit_hwmon_reg;
- }
- return 0;
-
-exit_hwmon_reg:
- hwmon_device_unregister(data->hwmon_dev);
-exit_sysfs_group:
- sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
- return err;
-}
-
-static int abx500_temp_remove(struct platform_device *pdev)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- cancel_delayed_work_sync(&data->work);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
-
- return 0;
-}
-
-static int abx500_temp_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- if (data->work_active)
- cancel_delayed_work_sync(&data->work);
-
- return 0;
-}
-
-static int abx500_temp_resume(struct platform_device *pdev)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- if (data->work_active)
- schedule_monitor(data);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id abx500_temp_match[] = {
- { .compatible = "stericsson,abx500-temp" },
- {},
-};
-MODULE_DEVICE_TABLE(of, abx500_temp_match);
-#endif
-
-static struct platform_driver abx500_temp_driver = {
- .driver = {
- .name = "abx500-temp",
- .of_match_table = of_match_ptr(abx500_temp_match),
- },
- .suspend = abx500_temp_suspend,
- .resume = abx500_temp_resume,
- .probe = abx500_temp_probe,
- .remove = abx500_temp_remove,
-};
-
-module_platform_driver(abx500_temp_driver);
-
-MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
-MODULE_DESCRIPTION("ABX500 temperature driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
deleted file mode 100644
index 4517594260f2..000000000000
--- a/drivers/hwmon/abx500.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef _ABX500_H
-#define _ABX500_H
-
-#define NUM_SENSORS 5
-
-struct abx500_temp;
-
-/*
- * struct abx500_temp_ops - abx500 chip specific ops
- * @read_sensor: reads gpadc output
- * @irq_handler: irq handler
- * @show_name: hwmon device name
- * @show_label: hwmon attribute label
- * @is_visible: is attribute visible
- */
-struct abx500_temp_ops {
- int (*read_sensor)(struct abx500_temp *, u8, int *);
- int (*irq_handler)(int, struct abx500_temp *);
- ssize_t (*show_name)(struct device *,
- struct device_attribute *, char *);
- ssize_t (*show_label) (struct device *,
- struct device_attribute *, char *);
- int (*is_visible)(struct attribute *, int);
-};
-
-/*
- * struct abx500_temp - representation of temp mon device
- * @pdev: platform device
- * @hwmon_dev: hwmon device
- * @ops: abx500 chip specific ops
- * @gpadc_addr: gpadc channel address
- * @min: sensor temperature min value
- * @max: sensor temperature max value
- * @max_hyst: sensor temperature hysteresis value for max limit
- * @min_alarm: sensor temperature min alarm
- * @max_alarm: sensor temperature max alarm
- * @work: delayed work scheduled to monitor temperature periodically
- * @work_active: True if work is active
- * @lock: mutex
- * @monitored_sensors: number of monitored sensors
- * @plat_data: private usage, usually points to platform specific data
- */
-struct abx500_temp {
- struct platform_device *pdev;
- struct device *hwmon_dev;
- struct abx500_temp_ops ops;
- u8 gpadc_addr[NUM_SENSORS];
- unsigned long min[NUM_SENSORS];
- unsigned long max[NUM_SENSORS];
- unsigned long max_hyst[NUM_SENSORS];
- bool min_alarm[NUM_SENSORS];
- bool max_alarm[NUM_SENSORS];
- struct delayed_work work;
- bool work_active;
- struct mutex lock;
- int monitored_sensors;
- void *plat_data;
-};
-
-int abx500_hwmon_init(struct abx500_temp *data);
-
-#endif /* _ABX500_H */
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 848718ab7312..7d3ddcba34ce 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -161,7 +161,7 @@ static ssize_t set_avg_interval(struct device *dev,
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
&args, &data);
- if (!ACPI_FAILURE(status))
+ if (ACPI_SUCCESS(status))
resource->avg_interval = temp;
mutex_unlock(&resource->lock);
@@ -232,7 +232,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
&args, &data);
- if (!ACPI_FAILURE(status))
+ if (ACPI_SUCCESS(status))
resource->cap = temp;
mutex_unlock(&resource->lock);
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
new file mode 100644
index 000000000000..2d9770cb4401
--- /dev/null
+++ b/drivers/hwmon/aht10.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * aht10.c - Linux hwmon driver for AHT10 Temperature and Humidity sensor
+ * Copyright (C) 2020 Johannes Cornelis Draaijer
+ */
+
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+
+#define AHT10_MEAS_SIZE 6
+
+/*
+ * Poll intervals (in milliseconds)
+ */
+#define AHT10_DEFAULT_MIN_POLL_INTERVAL 2000
+#define AHT10_MIN_POLL_INTERVAL 2000
+
+/*
+ * I2C command delays (in microseconds)
+ */
+#define AHT10_MEAS_DELAY 80000
+#define AHT10_CMD_DELAY 350000
+#define AHT10_DELAY_EXTRA 100000
+
+/*
+ * Command bytes
+ */
+#define AHT10_CMD_INIT 0b11100001
+#define AHT10_CMD_MEAS 0b10101100
+#define AHT10_CMD_RST 0b10111010
+
+/*
+ * Flags in the answer byte/command
+ */
+#define AHT10_CAL_ENABLED BIT(3)
+#define AHT10_BUSY BIT(7)
+#define AHT10_MODE_NOR (BIT(5) | BIT(6))
+#define AHT10_MODE_CYC BIT(5)
+#define AHT10_MODE_CMD BIT(6)
+
+#define AHT10_MAX_POLL_INTERVAL_LEN 30
+
+/**
+ * struct aht10_data - All the data required to operate an AHT10 chip
+ * @client: the i2c client associated with the AHT10
+ * @lock: a mutex that is used to prevent parallel access to the
+ * i2c client
+ * @min_poll_interval: the minimum poll interval
+ * While the poll rate limit is not 100% necessary,
+ * the datasheet recommends that a measurement
+ * is not performed too often to prevent
+ * the chip from warming up due to the heat it generates.
+ * If it's unwanted, it can be ignored setting it to
+ * it to 0. Default value is 2000 ms
+ * @previous_poll_time: the previous time that the AHT10
+ * was polled
+ * @temperature: the latest temperature value received from
+ * the AHT10
+ * @humidity: the latest humidity value received from the
+ * AHT10
+ */
+
+struct aht10_data {
+ struct i2c_client *client;
+ /*
+ * Prevent simultaneous access to the i2c
+ * client and previous_poll_time
+ */
+ struct mutex lock;
+ ktime_t min_poll_interval;
+ ktime_t previous_poll_time;
+ int temperature;
+ int humidity;
+};
+
+/**
+ * aht10_init() - Initialize an AHT10 chip
+ * @client: the i2c client associated with the AHT10
+ * @data: the data associated with this AHT10 chip
+ * Return: 0 if succesfull, 1 if not
+ */
+static int aht10_init(struct aht10_data *data)
+{
+ const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ 0x00};
+ int res;
+ u8 status;
+ struct i2c_client *client = data->client;
+
+ res = i2c_master_send(client, cmd_init, 3);
+ if (res < 0)
+ return res;
+
+ usleep_range(AHT10_CMD_DELAY, AHT10_CMD_DELAY +
+ AHT10_DELAY_EXTRA);
+
+ res = i2c_master_recv(client, &status, 1);
+ if (res != 1)
+ return -ENODATA;
+
+ if (status & AHT10_BUSY)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * aht10_polltime_expired() - check if the minimum poll interval has
+ * expired
+ * @data: the data containing the time to compare
+ * Return: 1 if the minimum poll interval has expired, 0 if not
+ */
+static int aht10_polltime_expired(struct aht10_data *data)
+{
+ ktime_t current_time = ktime_get_boottime();
+ ktime_t difference = ktime_sub(current_time, data->previous_poll_time);
+
+ return ktime_after(difference, data->min_poll_interval);
+}
+
+/**
+ * aht10_read_values() - read and parse the raw data from the AHT10
+ * @aht10_data: the struct aht10_data to use for the lock
+ * Return: 0 if succesfull, 1 if not
+ */
+static int aht10_read_values(struct aht10_data *data)
+{
+ const u8 cmd_meas[] = {AHT10_CMD_MEAS, 0x33, 0x00};
+ u32 temp, hum;
+ int res;
+ u8 raw_data[AHT10_MEAS_SIZE];
+ struct i2c_client *client = data->client;
+
+ mutex_lock(&data->lock);
+ if (aht10_polltime_expired(data)) {
+ res = i2c_master_send(client, cmd_meas, sizeof(cmd_meas));
+ if (res < 0) {
+ mutex_unlock(&data->lock);
+ return res;
+ }
+
+ usleep_range(AHT10_MEAS_DELAY,
+ AHT10_MEAS_DELAY + AHT10_DELAY_EXTRA);
+
+ res = i2c_master_recv(client, raw_data, AHT10_MEAS_SIZE);
+ if (res != AHT10_MEAS_SIZE) {
+ mutex_unlock(&data->lock);
+ if (res >= 0)
+ return -ENODATA;
+ else
+ return res;
+ }
+
+ hum = ((u32)raw_data[1] << 12u) |
+ ((u32)raw_data[2] << 4u) |
+ ((raw_data[3] & 0xF0u) >> 4u);
+
+ temp = ((u32)(raw_data[3] & 0x0Fu) << 16u) |
+ ((u32)raw_data[4] << 8u) |
+ raw_data[5];
+
+ temp = ((temp * 625) >> 15u) * 10;
+ hum = ((hum * 625) >> 16u) * 10;
+
+ data->temperature = (int)temp - 50000;
+ data->humidity = hum;
+ data->previous_poll_time = ktime_get_boottime();
+ }
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+/**
+ * aht10_interval_write() - store the given minimum poll interval.
+ * Return: 0 on success, -EINVAL if a value lower than the
+ * AHT10_MIN_POLL_INTERVAL is given
+ */
+static ssize_t aht10_interval_write(struct aht10_data *data,
+ long val)
+{
+ data->min_poll_interval = ms_to_ktime(clamp_val(val, 2000, LONG_MAX));
+ return 0;
+}
+
+/**
+ * aht10_interval_read() - read the minimum poll interval
+ * in milliseconds
+ */
+static ssize_t aht10_interval_read(struct aht10_data *data,
+ long *val)
+{
+ *val = ktime_to_ms(data->min_poll_interval);
+ return 0;
+}
+
+/**
+ * aht10_temperature1_read() - read the temperature in millidegrees
+ */
+static int aht10_temperature1_read(struct aht10_data *data, long *val)
+{
+ int res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ *val = data->temperature;
+ return 0;
+}
+
+/**
+ * aht10_humidity1_read() - read the relative humidity in millipercent
+ */
+static int aht10_humidity1_read(struct aht10_data *data, long *val)
+{
+ int res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ *val = data->humidity;
+ return 0;
+}
+
+static umode_t aht10_hwmon_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ case hwmon_chip:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int aht10_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct aht10_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return aht10_temperature1_read(data, val);
+ case hwmon_humidity:
+ return aht10_humidity1_read(data, val);
+ case hwmon_chip:
+ return aht10_interval_read(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aht10_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct aht10_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return aht10_interval_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *aht10_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL,
+};
+
+static const struct hwmon_ops aht10_hwmon_ops = {
+ .is_visible = aht10_hwmon_visible,
+ .read = aht10_hwmon_read,
+ .write = aht10_hwmon_write,
+};
+
+static const struct hwmon_chip_info aht10_chip_info = {
+ .ops = &aht10_hwmon_ops,
+ .info = aht10_info,
+};
+
+static int aht10_probe(struct i2c_client *client,
+ const struct i2c_device_id *aht10_id)
+{
+ struct device *device = &client->dev;
+ struct device *hwmon_dev;
+ struct aht10_data *data;
+ int res;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENOENT;
+
+ data = devm_kzalloc(device, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->min_poll_interval = ms_to_ktime(AHT10_DEFAULT_MIN_POLL_INTERVAL);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ res = aht10_init(data);
+ if (res < 0)
+ return res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(device,
+ client->name,
+ data,
+ &aht10_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id aht10_id[] = {
+ { "aht10", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, aht10_id);
+
+static struct i2c_driver aht10_driver = {
+ .driver = {
+ .name = "aht10",
+ },
+ .probe = aht10_probe,
+ .id_table = aht10_id,
+};
+
+module_i2c_driver(aht10_driver);
+
+MODULE_AUTHOR("Johannes Cornelis Draaijer <jcdra1@gmail.com>");
+MODULE_DESCRIPTION("AHT10 Temperature and Humidity sensor driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..a86cc8d6d93d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
@@ -332,6 +333,7 @@ static struct platform_device *amd_energy_platdev;
static const struct x86_cpu_id cpu_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL),
X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x19, 0x01, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x19, 0x30, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 89207af81c48..28b137eedf2e 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -565,7 +565,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
static int applesmc_init_smcreg_try(void)
{
struct applesmc_registers *s = &smcreg;
- bool left_light_sensor = 0, right_light_sensor = 0;
+ bool left_light_sensor = false, right_light_sensor = false;
unsigned int count;
u8 tmp[1];
int ret;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 3d8239fd66ed..3cb88d6fbec0 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -620,7 +620,7 @@ static ssize_t rpm_show(struct device *dev, struct device_attribute *attr,
static umode_t pwm_is_visible(struct kobject *kobj,
struct attribute *a, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
if (!priv->pwm_present[index])
@@ -631,7 +631,7 @@ static umode_t pwm_is_visible(struct kobject *kobj,
static umode_t fan_dev_is_visible(struct kobject *kobj,
struct attribute *a, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
if (!priv->fan_tach_present[index])
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 4af2fc309c28..ed6c5df94fdf 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -299,7 +299,7 @@ static ssize_t label_show(struct device *dev,
static umode_t da9052_channel_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
struct device_attribute *dattr = container_of(attr,
struct device_attribute, attr);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index ec448f5f2dc3..73b9db9e3aab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1159,6 +1159,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
},
},
+ {
+ .ident = "Dell XPS 15 L502X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L502X"),
+ },
+ },
{ }
};
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 3ea4021f267c..befe989ca7b9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -299,7 +299,7 @@ static DEVICE_ATTR(fan1_target, 0644, fan1_input_show, set_rpm);
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
if (index == 0 && !data->alarm_gpio)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3bc2551577a3..5ff3669c2b60 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -448,7 +448,8 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->is_zen = true;
switch (boot_cpu_data.x86_model) {
- case 0x0 ... 0x1: /* Zen3 */
+ case 0x0 ... 0x1: /* Zen3 SP3/TR */
+ case 0x21: /* Zen3 Ryzen Desktop */
k10temp_get_ccd_support(pdev, data, 8);
break;
}
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index ae2b84263a44..40eab3349904 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -22,9 +22,9 @@
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -173,25 +173,15 @@ MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
static int lm70_probe(struct spi_device *spi)
{
- const struct of_device_id *of_match;
struct device *hwmon_dev;
struct lm70 *p_lm70;
int chip;
- of_match = of_match_device(lm70_of_ids, &spi->dev);
- if (of_match)
- chip = (int)(uintptr_t)of_match->data;
- else {
-#ifdef CONFIG_ACPI
- const struct acpi_device_id *acpi_match;
+ if (dev_fwnode(&spi->dev))
+ chip = (int)(uintptr_t)device_get_match_data(&spi->dev);
+ else
+ chip = spi_get_device_id(spi)->driver_data;
- acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev);
- if (acpi_match)
- chip = (int)(uintptr_t)acpi_match->driver_data;
- else
-#endif
- chip = spi_get_device_id(spi)->driver_data;
- }
/* signaling is SPI_MODE_0 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index cc7f2980fe83..f8d4534ce172 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -321,7 +321,7 @@ static SENSOR_DEVICE_ATTR_RO(gpio2_alarm, alarm, MAX6650_ALRM_GPIO2);
static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct max6650_data *data = dev_get_drvdata(dev);
struct device_attribute *devattr;
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 7f7e30f0de7b..a23047a3bfe2 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -169,6 +169,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_INTEL 0x805
#define NCT6683_CUSTOMER_ID_MITAC 0xa0e
#define NCT6683_CUSTOMER_ID_MSI 0x201
+#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1225,6 +1226,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 94f4b8b4a2ba..6a9ba23cd302 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1700,8 +1700,8 @@ static int __init pc87360_device_add(unsigned short address)
continue;
res[res_count].start = extra_isa[i];
res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
- res[res_count].name = "pc87360",
- res[res_count].flags = IORESOURCE_IO,
+ res[res_count].name = "pc87360";
+ res[res_count].flags = IORESOURCE_IO;
err = acpi_check_resource_conflict(&res[res_count]);
if (err)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 03606d4298a4..32d2fc850621 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -158,10 +158,10 @@ config SENSORS_MAX16064
be called max16064.
config SENSORS_MAX16601
- tristate "Maxim MAX16601"
+ tristate "Maxim MAX16508, MAX16601"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX16601.
+ MAX16508 and MAX16601.
This driver can also be built as a module. If so, the module will
be called max16601.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d6bbbb223871..ffde5aaa5036 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -472,7 +472,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
};
static struct pmbus_platform_data ibm_cffps_pdata = {
- .flags = PMBUS_SKIP_STATUS_CHECK,
+ .flags = PMBUS_SKIP_STATUS_CHECK | PMBUS_NO_CAPABILITY,
};
static int ibm_cffps_probe(struct i2c_client *client)
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index c75a6bf39641..e9a66fd9e144 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -371,21 +371,18 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIN_OV_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0, reg, word);
- pmbus_clear_cache(client);
break;
case PMBUS_IIN_OC_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_IIN_OC_WARN_LIMIT,
word);
- pmbus_clear_cache(client);
break;
case PMBUS_PIN_OP_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_PIN_OP_WARN_LIMIT,
word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
@@ -393,7 +390,6 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_UV_WARN_LIMIT, word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
@@ -401,7 +397,6 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_OV_WARN_LIMIT, word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index a960b86e72d2..0d1204c2dd54 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware monitoring driver for Maxim MAX16601
+ * Hardware monitoring driver for Maxim MAX16508 and MAX16601.
*
* Implementation notes:
*
- * Ths chip supports two rails, VCORE and VSA. Telemetry information for the
- * two rails is reported in two subsequent I2C addresses. The driver
+ * This chip series supports two rails, VCORE and VSA. Telemetry information
+ * for the two rails is reported in two subsequent I2C addresses. The driver
* instantiates a dummy I2C client at the second I2C address to report
* information for the VSA rail in a single instance of the driver.
* Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2.
@@ -31,6 +31,9 @@
#include "pmbus.h"
+enum chips { max16508, max16601 };
+
+#define REG_DEFAULT_NUM_POP 0xc4
#define REG_SETPT_DVID 0xd1
#define DAC_10MV_MODE BIT(4)
#define REG_IOUT_AVG_PK 0xee
@@ -40,7 +43,10 @@
#define CORE_RAIL_INDICATOR BIT(7)
#define REG_PHASE_REPORTING 0xf4
+#define MAX16601_NUM_PHASES 8
+
struct max16601_data {
+ enum chips id;
struct pmbus_driver_info info;
struct i2c_client *vsa;
int iout_avg_pkg;
@@ -185,6 +191,7 @@ static int max16601_write_word(struct i2c_client *client, int page, int reg,
static int max16601_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
+ struct max16601_data *data = to_max16601_data(info);
int reg;
reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID);
@@ -195,6 +202,21 @@ static int max16601_identify(struct i2c_client *client,
else
info->vrm_version[0] = vr12;
+ if (data->id != max16601)
+ return 0;
+
+ reg = i2c_smbus_read_byte_data(client, REG_DEFAULT_NUM_POP);
+ if (reg < 0)
+ return reg;
+
+ /*
+ * If REG_DEFAULT_NUM_POP returns 0, we don't know how many phases
+ * are populated. Stick with the default in that case.
+ */
+ reg &= 0x0f;
+ if (reg && reg <= MAX16601_NUM_PHASES)
+ info->phases[0] = reg;
+
return 0;
}
@@ -216,7 +238,7 @@ static struct pmbus_driver_info max16601_info = {
.func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL,
- .phases[0] = 8,
+ .phases[0] = MAX16601_NUM_PHASES,
.pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
@@ -239,28 +261,61 @@ static void max16601_remove(void *_data)
i2c_unregister_device(data->vsa);
}
-static int max16601_probe(struct i2c_client *client)
+static const struct i2c_device_id max16601_id[] = {
+ {"max16508", max16508},
+ {"max16601", max16601},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max16601_id);
+
+static int max16601_get_id(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
- struct max16601_data *data;
+ enum chips id;
int ret;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA))
- return -ENODEV;
-
ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
- if (ret < 0)
+ if (ret < 0 || ret < 11)
return -ENODEV;
- /* PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" */
- if (ret < 11 || strncmp(buf, "MAX16601", 8)) {
+ /*
+ * PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx"
+ * or "MAX16500y.xx".
+ */
+ if (!strncmp(buf, "MAX16500", 8)) {
+ id = max16508;
+ } else if (!strncmp(buf, "MAX16601", 8)) {
+ id = max16601;
+ } else {
buf[ret] = '\0';
dev_err(dev, "Unsupported chip '%s'\n", buf);
return -ENODEV;
}
+ return id;
+}
+
+static int max16601_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ const struct i2c_device_id *id;
+ struct max16601_data *data;
+ int ret, chip_id;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ chip_id = max16601_get_id(client);
+ if (chip_id < 0)
+ return chip_id;
+
+ id = i2c_match_id(max16601_id, client);
+ if (chip_id != id->driver_data)
+ dev_warn(&client->dev,
+ "Device mismatch: Configured %s (%d), detected %d\n",
+ id->name, (int) id->driver_data, chip_id);
ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID);
if (ret < 0)
@@ -275,6 +330,7 @@ static int max16601_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
+ data->id = chip_id;
data->iout_avg_pkg = 0xfc00;
data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1);
if (IS_ERR(data->vsa)) {
@@ -290,13 +346,6 @@ static int max16601_probe(struct i2c_client *client)
return pmbus_do_probe(client, &data->info);
}
-static const struct i2c_device_id max16601_id[] = {
- {"max16601", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, max16601_id);
-
static struct i2c_driver max16601_driver = {
.driver = {
.name = "max16601",
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index e5a9f4019cd5..17489abc49d5 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -17,6 +17,7 @@ enum max31785_regs {
#define MAX31785 0x3030
#define MAX31785A 0x3040
+#define MAX31785B 0x3061
#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
@@ -329,7 +330,7 @@ static int max31785_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
bool dual_tach = false;
- s64 ret;
+ int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
@@ -350,12 +351,14 @@ static int max31785_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- if (ret == MAX31785A) {
+ if (ret == MAX31785A || ret == MAX31785B) {
dual_tach = true;
} else if (ret == MAX31785) {
- if (!strcmp("max31785a", client->name))
- dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ if (!strcmp("max31785a", client->name) ||
+ !strcmp("max31785b", client->name))
+ dev_warn(dev, "Expected max31785a/b, found max31785: cannot provide secondary tachometer readings\n");
} else {
+ dev_err(dev, "Unrecognized MAX31785 revision: %x\n", ret);
return -ENODEV;
}
@@ -371,6 +374,7 @@ static int max31785_probe(struct i2c_client *client)
static const struct i2c_device_id max31785_id[] = {
{ "max31785", 0 },
{ "max31785a", 0 },
+ { "max31785b", 0 },
{ },
};
@@ -379,6 +383,7 @@ MODULE_DEVICE_TABLE(i2c, max31785_id);
static const struct of_device_id max31785_of_match[] = {
{ .compatible = "maxim,max31785" },
{ .compatible = "maxim,max31785a" },
+ { .compatible = "maxim,max31785b" },
{ },
};
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 192442b3b7a2..aadea85fe630 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -974,7 +974,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
if (ret < 0)
rv = ret;
else
- sensor->data = regval;
+ sensor->data = -ENODATA;
mutex_unlock(&data->update_lock);
return rv;
}
@@ -1262,7 +1262,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
* which global bit is set) for this page is accessible.
*/
if (!ret && attr->gbit &&
- (!upper || (upper && data->has_status_word)) &&
+ (!upper || data->has_status_word) &&
pmbus_check_status_register(client, page)) {
ret = pmbus_add_boolean(data, name, "alarm", index,
NULL, NULL,
@@ -2204,9 +2204,11 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
}
/* Enable PEC if the controller supports it */
- ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
- client->flags |= I2C_CLIENT_PEC;
+ if (!(data->flags & PMBUS_NO_CAPABILITY)) {
+ ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+ client->flags |= I2C_CLIENT_PEC;
+ }
/*
* Check if the chip is write protected. If it is, we can not clear
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..17518b4cab1b 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -21,15 +21,21 @@
#define MAX_PWM 255
+struct pwm_fan_tach {
+ int irq;
+ atomic_t pulses;
+ unsigned int rpm;
+ u8 pulses_per_revolution;
+};
+
struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
+ struct pwm_state pwm_state;
struct regulator *reg_en;
- int irq;
- atomic_t pulses;
- unsigned int rpm;
- u8 pulses_per_revolution;
+ int tach_count;
+ struct pwm_fan_tach *tachs;
ktime_t sample_start;
struct timer_list rpm_timer;
@@ -40,6 +46,7 @@ struct pwm_fan_ctx {
struct thermal_cooling_device *cdev;
struct hwmon_chip_info info;
+ struct hwmon_channel_info fan_channel;
};
static const u32 pwm_fan_channel_config_pwm[] = {
@@ -52,22 +59,12 @@ static const struct hwmon_channel_info pwm_fan_channel_pwm = {
.config = pwm_fan_channel_config_pwm,
};
-static const u32 pwm_fan_channel_config_fan[] = {
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info pwm_fan_channel_fan = {
- .type = hwmon_fan,
- .config = pwm_fan_channel_config_fan,
-};
-
/* This handler assumes self resetting edge triggered interrupt. */
static irqreturn_t pulse_handler(int irq, void *dev_id)
{
- struct pwm_fan_ctx *ctx = dev_id;
+ struct pwm_fan_tach *tach = dev_id;
- atomic_inc(&ctx->pulses);
+ atomic_inc(&tach->pulses);
return IRQ_HANDLED;
}
@@ -76,13 +73,18 @@ static void sample_timer(struct timer_list *t)
{
struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start);
- int pulses;
+ int i;
if (delta) {
- pulses = atomic_read(&ctx->pulses);
- atomic_sub(pulses, &ctx->pulses);
- ctx->rpm = (unsigned int)(pulses * 1000 * 60) /
- (ctx->pulses_per_revolution * delta);
+ for (i = 0; i < ctx->tach_count; i++) {
+ struct pwm_fan_tach *tach = &ctx->tachs[i];
+ int pulses;
+
+ pulses = atomic_read(&tach->pulses);
+ atomic_sub(pulses, &tach->pulses);
+ tach->rpm = (unsigned int)(pulses * 1000 * 60) /
+ (tach->pulses_per_revolution * delta);
+ }
ctx->sample_start = ktime_get();
}
@@ -94,18 +96,17 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
unsigned long period;
int ret = 0;
- struct pwm_state state = { };
+ struct pwm_state *state = &ctx->pwm_state;
mutex_lock(&ctx->lock);
if (ctx->pwm_value == pwm)
goto exit_set_pwm_err;
- pwm_init_state(ctx->pwm, &state);
- period = ctx->pwm->args.period;
- state.duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
- state.enabled = pwm ? true : false;
+ period = state->period;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ state->enabled = pwm ? true : false;
- ret = pwm_apply_state(ctx->pwm, &state);
+ ret = pwm_apply_state(ctx->pwm, state);
if (!ret)
ctx->pwm_value = pwm;
exit_set_pwm_err:
@@ -152,7 +153,7 @@ static int pwm_fan_read(struct device *dev, enum hwmon_sensor_types type,
return 0;
case hwmon_fan:
- *val = ctx->rpm;
+ *val = ctx->tachs[channel].rpm;
return 0;
default:
@@ -287,7 +288,9 @@ static void pwm_fan_regulator_disable(void *data)
static void pwm_fan_pwm_disable(void *__ctx)
{
struct pwm_fan_ctx *ctx = __ctx;
- pwm_disable(ctx->pwm);
+
+ ctx->pwm_state.enabled = false;
+ pwm_apply_state(ctx->pwm, &ctx->pwm_state);
del_timer_sync(&ctx->rpm_timer);
}
@@ -298,9 +301,10 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
- struct pwm_state state = { };
- int tach_count;
const struct hwmon_channel_info **channels;
+ u32 *fan_channel_config;
+ int channel_count = 1; /* We always have a PWM channel. */
+ int i;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -334,12 +338,20 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
- pwm_init_state(ctx->pwm, &state);
- state.duty_cycle = ctx->pwm->args.period - 1;
- state.enabled = true;
+ pwm_init_state(ctx->pwm, &ctx->pwm_state);
- ret = pwm_apply_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (ctx->pwm_state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
+ ret = __set_pwm(ctx, MAX_PWM);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -349,27 +361,46 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (ret)
return ret;
- tach_count = platform_irq_count(pdev);
- if (tach_count < 0)
- return dev_err_probe(dev, tach_count,
+ ctx->tach_count = platform_irq_count(pdev);
+ if (ctx->tach_count < 0)
+ return dev_err_probe(dev, ctx->tach_count,
"Could not get number of fan tachometer inputs\n");
+ dev_dbg(dev, "%d fan tachometer inputs\n", ctx->tach_count);
+
+ if (ctx->tach_count) {
+ channel_count++; /* We also have a FAN channel. */
+
+ ctx->tachs = devm_kcalloc(dev, ctx->tach_count,
+ sizeof(struct pwm_fan_tach),
+ GFP_KERNEL);
+ if (!ctx->tachs)
+ return -ENOMEM;
+
+ ctx->fan_channel.type = hwmon_fan;
+ fan_channel_config = devm_kcalloc(dev, ctx->tach_count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!fan_channel_config)
+ return -ENOMEM;
+ ctx->fan_channel.config = fan_channel_config;
+ }
- channels = devm_kcalloc(dev, tach_count + 2,
+ channels = devm_kcalloc(dev, channel_count + 1,
sizeof(struct hwmon_channel_info *), GFP_KERNEL);
if (!channels)
return -ENOMEM;
channels[0] = &pwm_fan_channel_pwm;
- if (tach_count > 0) {
+ for (i = 0; i < ctx->tach_count; i++) {
+ struct pwm_fan_tach *tach = &ctx->tachs[i];
u32 ppr = 2;
- ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq == -EPROBE_DEFER)
- return ctx->irq;
- if (ctx->irq > 0) {
- ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
- pdev->name, ctx);
+ tach->irq = platform_get_irq(pdev, i);
+ if (tach->irq == -EPROBE_DEFER)
+ return tach->irq;
+ if (tach->irq > 0) {
+ ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
+ pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
@@ -378,22 +409,27 @@ static int pwm_fan_probe(struct platform_device *pdev)
}
}
- of_property_read_u32(dev->of_node,
- "pulses-per-revolution",
- &ppr);
- ctx->pulses_per_revolution = ppr;
- if (!ctx->pulses_per_revolution) {
+ of_property_read_u32_index(dev->of_node,
+ "pulses-per-revolution",
+ i,
+ &ppr);
+ tach->pulses_per_revolution = ppr;
+ if (!tach->pulses_per_revolution) {
dev_err(dev, "pulses-per-revolution can't be zero.\n");
return -EINVAL;
}
- dev_dbg(dev, "tach: irq=%d, pulses_per_revolution=%d\n",
- ctx->irq, ctx->pulses_per_revolution);
+ fan_channel_config[i] = HWMON_F_INPUT;
+
+ dev_dbg(dev, "tach%d: irq=%d, pulses_per_revolution=%d\n",
+ i, tach->irq, tach->pulses_per_revolution);
+ }
+ if (ctx->tach_count > 0) {
ctx->sample_start = ktime_get();
mod_timer(&ctx->rpm_timer, jiffies + HZ);
- channels[1] = &pwm_fan_channel_fan;
+ channels[1] = &ctx->fan_channel;
}
ctx->info.ops = &pwm_fan_hwmon_ops;
@@ -422,7 +458,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return ret;
}
ctx->cdev = cdev;
- thermal_cdev_update(cdev);
}
return 0;
@@ -431,17 +466,17 @@ static int pwm_fan_probe(struct platform_device *pdev)
static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- struct pwm_args args;
int ret;
- pwm_get_args(ctx->pwm, &args);
-
if (ctx->pwm_value) {
- ret = pwm_config(ctx->pwm, 0, args.period);
+ /* keep ctx->pwm_state unmodified for pwm_fan_resume() */
+ struct pwm_state state = ctx->pwm_state;
+
+ state.duty_cycle = 0;
+ state.enabled = false;
+ ret = pwm_apply_state(ctx->pwm, &state);
if (ret < 0)
return ret;
-
- pwm_disable(ctx->pwm);
}
if (ctx->reg_en) {
@@ -469,8 +504,6 @@ static int pwm_fan_suspend(struct device *dev)
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- struct pwm_args pargs;
- unsigned long duty;
int ret;
if (ctx->reg_en) {
@@ -484,12 +517,7 @@ static int pwm_fan_resume(struct device *dev)
if (ctx->pwm_value == 0)
return 0;
- pwm_get_args(ctx->pwm, &pargs);
- duty = DIV_ROUND_UP_ULL(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
- ret = pwm_config(ctx->pwm, duty, pargs.period);
- if (ret)
- return ret;
- return pwm_enable(ctx->pwm);
+ return pwm_apply_state(ctx->pwm, &ctx->pwm_state);
}
#endif
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index b637836b58a1..37531b5c8254 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -682,7 +682,7 @@ static int __init smsc47m1_handle_resources(unsigned short address,
/* Request the resources */
if (!devm_request_region(dev, start, len, DRVNAME)) {
dev_err(dev,
- "Region 0x%hx-0x%hx already in use!\n",
+ "Region 0x%x-0x%x already in use!\n",
start, start + len);
return -EBUSY;
}
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
new file mode 100644
index 000000000000..c2484f15298b
--- /dev/null
+++ b/drivers/hwmon/tps23861.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Sartura Ltd.
+ *
+ * Driver for the TI TPS23861 PoE PSE.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define TEMPERATURE 0x2c
+#define INPUT_VOLTAGE_LSB 0x2e
+#define INPUT_VOLTAGE_MSB 0x2f
+#define PORT_1_CURRENT_LSB 0x30
+#define PORT_1_CURRENT_MSB 0x31
+#define PORT_1_VOLTAGE_LSB 0x32
+#define PORT_1_VOLTAGE_MSB 0x33
+#define PORT_2_CURRENT_LSB 0x34
+#define PORT_2_CURRENT_MSB 0x35
+#define PORT_2_VOLTAGE_LSB 0x36
+#define PORT_2_VOLTAGE_MSB 0x37
+#define PORT_3_CURRENT_LSB 0x38
+#define PORT_3_CURRENT_MSB 0x39
+#define PORT_3_VOLTAGE_LSB 0x3a
+#define PORT_3_VOLTAGE_MSB 0x3b
+#define PORT_4_CURRENT_LSB 0x3c
+#define PORT_4_CURRENT_MSB 0x3d
+#define PORT_4_VOLTAGE_LSB 0x3e
+#define PORT_4_VOLTAGE_MSB 0x3f
+#define PORT_N_CURRENT_LSB_OFFSET 0x04
+#define PORT_N_VOLTAGE_LSB_OFFSET 0x04
+#define VOLTAGE_CURRENT_MASK GENMASK(13, 0)
+#define PORT_1_RESISTANCE_LSB 0x60
+#define PORT_1_RESISTANCE_MSB 0x61
+#define PORT_2_RESISTANCE_LSB 0x62
+#define PORT_2_RESISTANCE_MSB 0x63
+#define PORT_3_RESISTANCE_LSB 0x64
+#define PORT_3_RESISTANCE_MSB 0x65
+#define PORT_4_RESISTANCE_LSB 0x66
+#define PORT_4_RESISTANCE_MSB 0x67
+#define PORT_N_RESISTANCE_LSB_OFFSET 0x02
+#define PORT_RESISTANCE_MASK GENMASK(13, 0)
+#define PORT_RESISTANCE_RSN_MASK GENMASK(15, 14)
+#define PORT_RESISTANCE_RSN_OTHER 0
+#define PORT_RESISTANCE_RSN_LOW 1
+#define PORT_RESISTANCE_RSN_OPEN 2
+#define PORT_RESISTANCE_RSN_SHORT 3
+#define PORT_1_STATUS 0x0c
+#define PORT_2_STATUS 0x0d
+#define PORT_3_STATUS 0x0e
+#define PORT_4_STATUS 0x0f
+#define PORT_STATUS_CLASS_MASK GENMASK(7, 4)
+#define PORT_STATUS_DETECT_MASK GENMASK(3, 0)
+#define PORT_CLASS_UNKNOWN 0
+#define PORT_CLASS_1 1
+#define PORT_CLASS_2 2
+#define PORT_CLASS_3 3
+#define PORT_CLASS_4 4
+#define PORT_CLASS_RESERVED 5
+#define PORT_CLASS_0 6
+#define PORT_CLASS_OVERCURRENT 7
+#define PORT_CLASS_MISMATCH 8
+#define PORT_DETECT_UNKNOWN 0
+#define PORT_DETECT_SHORT 1
+#define PORT_DETECT_RESERVED 2
+#define PORT_DETECT_RESISTANCE_LOW 3
+#define PORT_DETECT_RESISTANCE_OK 4
+#define PORT_DETECT_RESISTANCE_HIGH 5
+#define PORT_DETECT_OPEN_CIRCUIT 6
+#define PORT_DETECT_RESERVED_2 7
+#define PORT_DETECT_MOSFET_FAULT 8
+#define PORT_DETECT_LEGACY 9
+/* Measurment beyond clamp voltage */
+#define PORT_DETECT_CAPACITANCE_INVALID_BEYOND 10
+/* Insufficient voltage delta */
+#define PORT_DETECT_CAPACITANCE_INVALID_DELTA 11
+#define PORT_DETECT_CAPACITANCE_OUT_OF_RANGE 12
+#define POE_PLUS 0x40
+#define OPERATING_MODE 0x12
+#define OPERATING_MODE_OFF 0
+#define OPERATING_MODE_MANUAL 1
+#define OPERATING_MODE_SEMI 2
+#define OPERATING_MODE_AUTO 3
+#define OPERATING_MODE_PORT_1_MASK GENMASK(1, 0)
+#define OPERATING_MODE_PORT_2_MASK GENMASK(3, 2)
+#define OPERATING_MODE_PORT_3_MASK GENMASK(5, 4)
+#define OPERATING_MODE_PORT_4_MASK GENMASK(7, 6)
+
+#define DETECT_CLASS_RESTART 0x18
+#define POWER_ENABLE 0x19
+#define TPS23861_NUM_PORTS 4
+
+#define TEMPERATURE_LSB 652 /* 0.652 degrees Celsius */
+#define VOLTAGE_LSB 3662 /* 3.662 mV */
+#define SHUNT_RESISTOR_DEFAULT 255000 /* 255 mOhm */
+#define CURRENT_LSB_255 62260 /* 62.260 uA */
+#define CURRENT_LSB_250 61039 /* 61.039 uA */
+#define RESISTANCE_LSB 110966 /* 11.0966 Ohm*/
+#define RESISTANCE_LSB_LOW 157216 /* 15.7216 Ohm*/
+
+struct tps23861_data {
+ struct regmap *regmap;
+ u32 shunt_resistor;
+ struct i2c_client *client;
+ struct dentry *debugfs_dir;
+};
+
+static struct regmap_config tps23861_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int tps23861_read_temp(struct tps23861_data *data, long *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_read(data->regmap, TEMPERATURE, &regval);
+ if (err < 0)
+ return err;
+
+ *val = (regval * TEMPERATURE_LSB) - 20000;
+
+ return 0;
+}
+
+static int tps23861_read_voltage(struct tps23861_data *data, int channel,
+ long *val)
+{
+ unsigned int regval;
+ int err;
+
+ if (channel < TPS23861_NUM_PORTS) {
+ err = regmap_bulk_read(data->regmap,
+ PORT_1_VOLTAGE_LSB + channel * PORT_N_VOLTAGE_LSB_OFFSET,
+ &regval, 2);
+ } else {
+ err = regmap_bulk_read(data->regmap,
+ INPUT_VOLTAGE_LSB,
+ &regval, 2);
+ }
+ if (err < 0)
+ return err;
+
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * VOLTAGE_LSB) / 1000;
+
+ return 0;
+}
+
+static int tps23861_read_current(struct tps23861_data *data, int channel,
+ long *val)
+{
+ unsigned int current_lsb;
+ unsigned int regval;
+ int err;
+
+ if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+ current_lsb = CURRENT_LSB_255;
+ else
+ current_lsb = CURRENT_LSB_250;
+
+ err = regmap_bulk_read(data->regmap,
+ PORT_1_CURRENT_LSB + channel * PORT_N_CURRENT_LSB_OFFSET,
+ &regval, 2);
+ if (err < 0)
+ return err;
+
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * current_lsb) / 1000000;
+
+ return 0;
+}
+
+static int tps23861_port_disable(struct tps23861_data *data, int channel)
+{
+ unsigned int regval = 0;
+ int err;
+
+ regval |= BIT(channel + 4);
+ err = regmap_write(data->regmap, POWER_ENABLE, regval);
+
+ return err;
+}
+
+static int tps23861_port_enable(struct tps23861_data *data, int channel)
+{
+ unsigned int regval = 0;
+ int err;
+
+ regval |= BIT(channel);
+ regval |= BIT(channel + 4);
+ err = regmap_write(data->regmap, DETECT_CLASS_RESTART, regval);
+
+ return err;
+}
+
+static umode_t tps23861_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_enable:
+ return 0200;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_label:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int tps23861_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tps23861_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_enable:
+ if (val == 0)
+ err = tps23861_port_disable(data, channel);
+ else if (val == 1)
+ err = tps23861_port_enable(data, channel);
+ else
+ err = -EINVAL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int tps23861_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tps23861_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ err = tps23861_read_temp(data, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ err = tps23861_read_voltage(data, channel, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ err = tps23861_read_current(data, channel, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static const char * const tps23861_port_label[] = {
+ "Port1",
+ "Port2",
+ "Port3",
+ "Port4",
+ "Input",
+};
+
+static int tps23861_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ case hwmon_curr:
+ *str = tps23861_port_label[channel];
+ break;
+ case hwmon_temp:
+ *str = "Die";
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *tps23861_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops tps23861_hwmon_ops = {
+ .is_visible = tps23861_is_visible,
+ .write = tps23861_write,
+ .read = tps23861_read,
+ .read_string = tps23861_read_string,
+};
+
+static const struct hwmon_chip_info tps23861_chip_info = {
+ .ops = &tps23861_hwmon_ops,
+ .info = tps23861_info,
+};
+
+static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+ int mode;
+
+ regmap_read(data->regmap, OPERATING_MODE, &regval);
+
+ switch (port) {
+ case 1:
+ mode = FIELD_GET(OPERATING_MODE_PORT_1_MASK, regval);
+ break;
+ case 2:
+ mode = FIELD_GET(OPERATING_MODE_PORT_2_MASK, regval);
+ break;
+ case 3:
+ mode = FIELD_GET(OPERATING_MODE_PORT_3_MASK, regval);
+ break;
+ case 4:
+ mode = FIELD_GET(OPERATING_MODE_PORT_4_MASK, regval);
+ break;
+ default:
+ mode = -EINVAL;
+ }
+
+ switch (mode) {
+ case OPERATING_MODE_OFF:
+ return "Off";
+ case OPERATING_MODE_MANUAL:
+ return "Manual";
+ case OPERATING_MODE_SEMI:
+ return "Semi-Auto";
+ case OPERATING_MODE_AUTO:
+ return "Auto";
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap,
+ PORT_1_STATUS + (port - 1),
+ &regval);
+
+ switch (FIELD_GET(PORT_STATUS_DETECT_MASK, regval)) {
+ case PORT_DETECT_UNKNOWN:
+ return "Unknown device";
+ case PORT_DETECT_SHORT:
+ return "Short circuit";
+ case PORT_DETECT_RESISTANCE_LOW:
+ return "Too low resistance";
+ case PORT_DETECT_RESISTANCE_OK:
+ return "Valid resistance";
+ case PORT_DETECT_RESISTANCE_HIGH:
+ return "Too high resistance";
+ case PORT_DETECT_OPEN_CIRCUIT:
+ return "Open circuit";
+ case PORT_DETECT_MOSFET_FAULT:
+ return "MOSFET fault";
+ case PORT_DETECT_LEGACY:
+ return "Legacy device";
+ case PORT_DETECT_CAPACITANCE_INVALID_BEYOND:
+ return "Invalid capacitance, beyond clamp voltage";
+ case PORT_DETECT_CAPACITANCE_INVALID_DELTA:
+ return "Invalid capacitance, insufficient voltage delta";
+ case PORT_DETECT_CAPACITANCE_OUT_OF_RANGE:
+ return "Valid capacitance, outside of legacy range";
+ case PORT_DETECT_RESERVED:
+ case PORT_DETECT_RESERVED_2:
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_class_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap,
+ PORT_1_STATUS + (port - 1),
+ &regval);
+
+ switch (FIELD_GET(PORT_STATUS_CLASS_MASK, regval)) {
+ case PORT_CLASS_UNKNOWN:
+ return "Unknown";
+ case PORT_CLASS_RESERVED:
+ case PORT_CLASS_0:
+ return "0";
+ case PORT_CLASS_1:
+ return "1";
+ case PORT_CLASS_2:
+ return "2";
+ case PORT_CLASS_3:
+ return "3";
+ case PORT_CLASS_4:
+ return "4";
+ case PORT_CLASS_OVERCURRENT:
+ return "Overcurrent";
+ case PORT_CLASS_MISMATCH:
+ return "Mismatch";
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap, POE_PLUS, &regval);
+
+ if (BIT(port + 3) & regval)
+ return "Yes";
+ else
+ return "No";
+}
+
+static int tps23861_port_resistance(struct tps23861_data *data, int port)
+{
+ u16 regval;
+
+ regmap_bulk_read(data->regmap,
+ PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
+ &regval,
+ 2);
+
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ case PORT_RESISTANCE_RSN_OTHER:
+ return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ case PORT_RESISTANCE_RSN_LOW:
+ return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ case PORT_RESISTANCE_RSN_SHORT:
+ case PORT_RESISTANCE_RSN_OPEN:
+ default:
+ return 0;
+ }
+}
+
+static int tps23861_port_status_show(struct seq_file *s, void *data)
+{
+ struct tps23861_data *priv = s->private;
+ int i;
+
+ for (i = 1; i < TPS23861_NUM_PORTS + 1; i++) {
+ seq_printf(s, "Port: \t\t%d\n", i);
+ seq_printf(s, "Operating mode: %s\n", tps23861_port_operating_mode(priv, i));
+ seq_printf(s, "Detected: \t%s\n", tps23861_port_detect_status(priv, i));
+ seq_printf(s, "Class: \t\t%s\n", tps23861_port_class_status(priv, i));
+ seq_printf(s, "PoE Plus: \t%s\n", tps23861_port_poe_plus_status(priv, i));
+ seq_printf(s, "Resistance: \t%d\n", tps23861_port_resistance(priv, i));
+ seq_putc(s, '\n');
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
+
+static void tps23861_init_debugfs(struct tps23861_data *data)
+{
+ data->debugfs_dir = debugfs_create_dir(data->client->name, NULL);
+
+ debugfs_create_file("port_status",
+ 0400,
+ data->debugfs_dir,
+ data,
+ &tps23861_port_status_fops);
+}
+
+static int tps23861_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tps23861_data *data;
+ struct device *hwmon_dev;
+ u32 shunt_resistor;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ data->regmap = devm_regmap_init_i2c(client, &tps23861_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ if (!of_property_read_u32(dev->of_node, "shunt-resistor-micro-ohms", &shunt_resistor))
+ data->shunt_resistor = shunt_resistor;
+ else
+ data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &tps23861_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ tps23861_init_debugfs(data);
+
+ return 0;
+}
+
+static int tps23861_remove(struct i2c_client *client)
+{
+ struct tps23861_data *data = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(data->debugfs_dir);
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused tps23861_of_match[] = {
+ { .compatible = "ti,tps23861", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps23861_of_match);
+
+static struct i2c_driver tps23861_driver = {
+ .probe_new = tps23861_probe,
+ .remove = tps23861_remove,
+ .driver = {
+ .name = "tps23861",
+ .of_match_table = of_match_ptr(tps23861_of_match),
+ },
+};
+module_i2c_driver(tps23861_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("TI TPS23861 PoE PSE");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 3964ceab2817..8618aaf32350 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1110,7 +1110,7 @@ clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct device_attribute *devattr;
struct sensor_device_attribute *sda;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 3b05560456ea..33eeff94fc2a 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -2,11 +2,12 @@
/*
* OMAP hardware spinlock driver
*
- * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
+ * Suman Anna <s-anna@ti.com>
*/
#include <linux/kernel.h>
@@ -164,6 +165,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
static const struct of_device_id omap_hwspinlock_of_match[] = {
{ .compatible = "ti,omap4-hwspinlock", },
+ { .compatible = "ti,am64-hwspinlock", },
{ .compatible = "ti,am654-hwspinlock", },
{ /* end */ },
};
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index a61313f320bd..e0740c6dbd54 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -401,8 +401,9 @@ static const struct attribute_group *catu_groups[] = {
static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
{
- return coresight_timeout(drvdata->base,
- CATU_STATUS, CATU_STATUS_READY, 1);
+ struct csdev_access *csa = &drvdata->csdev->access;
+
+ return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
}
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
@@ -411,6 +412,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
u32 control, mode;
struct etr_buf *etr_buf = data;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
if (catu_wait_for_ready(drvdata))
dev_warn(dev, "Timeout while waiting for READY\n");
@@ -421,7 +423,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return -EBUSY;
}
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
return rc;
@@ -465,9 +467,10 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
{
int rc = 0;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
catu_write_control(drvdata, 0);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
if (catu_wait_for_ready(drvdata)) {
dev_info(dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
@@ -551,6 +554,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
dev->platform_data = pdata;
drvdata->base = base;
+ catu_desc.access = CSDEV_ACCESS_IOMEM(base);
catu_desc.pdata = pdata;
catu_desc.dev = dev;
catu_desc.groups = catu_groups;
@@ -567,12 +571,11 @@ out:
return ret;
}
-static int catu_remove(struct amba_device *adev)
+static void catu_remove(struct amba_device *adev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
- return 0;
}
static struct amba_id catu_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 4ba801dffcb7..0062c8935653 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -145,30 +145,32 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
return -ENODEV;
}
-static inline u32 coresight_read_claim_tags(void __iomem *base)
+static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
{
- return readl_relaxed(base + CORESIGHT_CLAIMCLR);
+ return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
}
-static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
+static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
+ return coresight_read_claim_tags(csdev) == CORESIGHT_CLAIM_SELF_HOSTED;
}
-static inline bool coresight_is_claimed_any(void __iomem *base)
+static inline bool coresight_is_claimed_any(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) != 0;
+ return coresight_read_claim_tags(csdev) != 0;
}
-static inline void coresight_set_claim_tags(void __iomem *base)
+static inline void coresight_set_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMSET);
isb();
}
-static inline void coresight_clear_claim_tags(void __iomem *base)
+static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMCLR);
isb();
}
@@ -182,27 +184,33 @@ static inline void coresight_clear_claim_tags(void __iomem *base)
* Called with CS_UNLOCKed for the component.
* Returns : 0 on success
*/
-int coresight_claim_device_unlocked(void __iomem *base)
+int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_any(base))
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ if (coresight_is_claimed_any(csdev))
return -EBUSY;
- coresight_set_claim_tags(base);
- if (coresight_is_claimed_self_hosted(base))
+ coresight_set_claim_tags(csdev);
+ if (coresight_is_claimed_self_hosted(csdev))
return 0;
/* There was a race setting the tags, clean up and fail */
- coresight_clear_claim_tags(base);
+ coresight_clear_claim_tags(csdev);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
-int coresight_claim_device(void __iomem *base)
+int coresight_claim_device(struct coresight_device *csdev)
{
int rc;
- CS_UNLOCK(base);
- rc = coresight_claim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ CS_UNLOCK(csdev->access.base);
+ rc = coresight_claim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
return rc;
}
@@ -212,11 +220,14 @@ EXPORT_SYMBOL_GPL(coresight_claim_device);
* coresight_disclaim_device_unlocked : Clear the claim tags for the device.
* Called with CS_UNLOCKed for the component.
*/
-void coresight_disclaim_device_unlocked(void __iomem *base)
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_self_hosted(base))
- coresight_clear_claim_tags(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ if (coresight_is_claimed_self_hosted(csdev))
+ coresight_clear_claim_tags(csdev);
else
/*
* The external agent may have not honoured our claim
@@ -227,11 +238,14 @@ void coresight_disclaim_device_unlocked(void __iomem *base)
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
-void coresight_disclaim_device(void __iomem *base)
+void coresight_disclaim_device(struct coresight_device *csdev)
{
- CS_UNLOCK(base);
- coresight_disclaim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ CS_UNLOCK(csdev->access.base);
+ coresight_disclaim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device);
@@ -1418,23 +1432,24 @@ static void coresight_remove_conns(struct coresight_device *csdev)
}
/**
- * coresight_timeout - loop until a bit has changed to a specific state.
- * @addr: base address of the area of interest.
- * @offset: address of a register, starting from @addr.
+ * coresight_timeout - loop until a bit has changed to a specific register
+ * state.
+ * @csa: coresight device access for the device
+ * @offset: Offset of the register from the base of the device.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
*
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
-
-int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
+int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
{
int i;
u32 val;
for (i = TIMEOUT_US; i > 0; i--) {
- val = __raw_readl(addr + offset);
+ val = csdev_access_read32(csa, offset);
/* waiting on the bit to go from 0 to 1 */
if (value) {
if (val & BIT(position))
@@ -1458,6 +1473,48 @@ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
}
EXPORT_SYMBOL_GPL(coresight_timeout);
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read32(&csdev->access, offset);
+}
+
+u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read32(&csdev->access, offset);
+}
+
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+ csdev_access_relaxed_write32(&csdev->access, val, offset);
+}
+
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+ csdev_access_write32(&csdev->access, val, offset);
+}
+
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read64(&csdev->access, offset);
+}
+
+u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read64(&csdev->access, offset);
+}
+
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+ csdev_access_relaxed_write64(&csdev->access, val, offset);
+}
+
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+ csdev_access_write64(&csdev->access, val, offset);
+}
+
/*
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
@@ -1522,6 +1579,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->type = desc->type;
csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
+ csdev->access = desc->access;
csdev->orphan = false;
csdev->dev.type = &coresight_dev_type[desc->type];
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index e1d232411d8d..2dcf13de751f 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -627,7 +627,7 @@ err:
return ret;
}
-static int debug_remove(struct amba_device *adev)
+static void debug_remove(struct amba_device *adev)
{
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata = amba_get_drvdata(adev);
@@ -642,8 +642,6 @@ static int debug_remove(struct amba_device *adev)
if (!--debug_count)
debug_func_exit();
-
- return 0;
}
static const struct amba_cs_uci_id uci_id_debug[] = {
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 61dbc1afd8da..e2a3620cbf48 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -102,7 +102,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
goto cti_state_unchanged;
/* claim the device */
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
@@ -136,7 +136,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
goto cti_hp_not_enabled;
/* try to claim the device */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(drvdata->csdev))
goto cti_hp_not_enabled;
cti_write_all_hw_regs(drvdata);
@@ -154,6 +154,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
spin_lock(&drvdata->spinlock);
@@ -171,7 +172,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
writel_relaxed(0, drvdata->base + CTICONTROL);
config->hw_enabled = false;
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
pm_runtime_put(dev);
@@ -655,6 +656,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
struct cti_drvdata *drvdata;
+ struct coresight_device *csdev;
unsigned int cpu = smp_processor_id();
int notify_res = NOTIFY_OK;
@@ -662,6 +664,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
return NOTIFY_OK;
drvdata = cti_cpu_drvdata[cpu];
+ csdev = drvdata->csdev;
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
return NOTIFY_BAD;
@@ -673,13 +676,13 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* CTI regs all static - we have a copy & nothing to save */
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(csdev);
break;
case CPU_PM_ENTER_FAILED:
drvdata->config.hw_powered = true;
if (drvdata->config.hw_enabled) {
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
drvdata->config.hw_enabled = false;
}
break;
@@ -692,7 +695,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* check enable reference count to enable HW */
if (atomic_read(&drvdata->config.enable_req_count)) {
/* check we can claim the device as we re-power */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
goto cti_notify_exit;
drvdata->config.hw_enabled = true;
@@ -736,7 +739,7 @@ static int cti_dying_cpu(unsigned int cpu)
spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -836,7 +839,7 @@ static void cti_device_release(struct device *dev)
if (drvdata->csdev_release)
drvdata->csdev_release(dev);
}
-static int cti_remove(struct amba_device *adev)
+static void cti_remove(struct amba_device *adev)
{
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -845,8 +848,6 @@ static int cti_remove(struct amba_device *adev)
mutex_unlock(&ect_mutex);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
@@ -870,6 +871,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ cti_desc.access = CSDEV_ACCESS_IOMEM(base);
dev_set_drvdata(dev, drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index 98f830c6ed50..ccef04f27f12 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -343,7 +343,6 @@ static int cti_plat_create_connection(struct device *dev,
{
struct cti_trig_con *tc = NULL;
int cpuid = -1, err = 0;
- struct fwnode_handle *cs_fwnode = NULL;
struct coresight_device *csdev = NULL;
const char *assoc_name = "unknown";
char cpu_name_str[16];
@@ -397,8 +396,9 @@ static int cti_plat_create_connection(struct device *dev,
assoc_name = cpu_name_str;
} else {
/* associated device ? */
- cs_fwnode = fwnode_find_reference(fwnode,
- CTI_DT_CSDEV_ASSOC, 0);
+ struct fwnode_handle *cs_fwnode = fwnode_find_reference(fwnode,
+ CTI_DT_CSDEV_ASSOC,
+ 0);
if (!IS_ERR(cs_fwnode)) {
assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
&csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 0cf6f0b947b6..f775cbee12b8 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -132,7 +132,7 @@ static void __etb_enable_hw(struct etb_drvdata *drvdata)
static int etb_enable_hw(struct etb_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -252,6 +252,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
{
u32 ffcr;
struct device *dev = &drvdata->csdev->dev;
+ struct csdev_access *csa = &drvdata->csdev->access;
CS_UNLOCK(drvdata->base);
@@ -263,7 +264,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
ffcr |= ETB_FFCR_FON_MAN;
writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
- if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
+ if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
dev_err(dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -271,7 +272,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
/* disable trace capture */
writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
- if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
+ if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
dev_err(dev,
"timeout while waiting for Formatter to Stop\n");
}
@@ -344,7 +345,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
{
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static int etb_disable(struct coresight_device *csdev)
@@ -757,6 +758,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -803,7 +805,7 @@ err_misc_register:
return ret;
}
-static int etb_remove(struct amba_device *adev)
+static void etb_remove(struct amba_device *adev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -814,8 +816,6 @@ static int etb_remove(struct amba_device *adev)
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index bdc34ca449f7..0f603b4094f2 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -27,17 +27,45 @@ static bool etm_perf_up;
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
-/* ETMv3.5/PTM's ETMCR is 'config' */
+/*
+ * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
+ * now take them as general formats and apply on all ETMs.
+ */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
-PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
+PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
+PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
/* Sink ID - same for all ETMs */
PMU_FORMAT_ATTR(sinkid, "config2:0-31");
+/*
+ * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
+ * when the kernel is running at EL1; when the kernel is at EL2,
+ * the PID is in CONTEXTIDR_EL2.
+ */
+static ssize_t format_attr_contextid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ int pid_fmt = ETM_OPT_CTXTID;
+
+#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
+ pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
+#endif
+ return sprintf(page, "config:%d\n", pid_fmt);
+}
+
+struct device_attribute format_attr_contextid =
+ __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
+
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_contextid.attr,
+ &format_attr_contextid1.attr,
+ &format_attr_contextid2.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
&format_attr_sinkid.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 5bf5a5a4ce6d..cf64ce73a741 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -358,10 +358,11 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
int i, rc;
u32 etmcr;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
@@ -566,6 +567,7 @@ static void etm_disable_hw(void *info)
int i;
struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
@@ -577,7 +579,7 @@ static void etm_disable_hw(void *info)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
@@ -602,7 +604,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
* power down the tracer.
*/
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -839,6 +841,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -909,7 +912,7 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static int etm_remove(struct amba_device *adev)
+static void etm_remove(struct amba_device *adev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -932,8 +935,6 @@ static int etm_remove(struct amba_device *adev)
cpus_read_unlock();
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index b20b6ff17cf6..15016f757828 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -59,32 +60,99 @@ static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
-static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+struct etm4_init_arg {
+ unsigned int pid;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
+};
+
+/*
+ * Check if TRCSSPCICRn(i) is implemented for a given instance.
+ *
+ * TRCSSPCICRn is implemented only if :
+ * TRCSSPCICR<n> is present only if all of the following are true:
+ * TRCIDR4.NUMSSCC > n.
+ * TRCIDR4.NUMPC > 0b0000 .
+ * TRCSSCSR<n>.PC == 0b1
+ */
+static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
+{
+ return (n < drvdata->nr_ss_cmp) &&
+ drvdata->nr_pe &&
+ (drvdata->config.ss_status[n] & TRCSSCSRn_PC);
+}
+
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
+{
+ u64 res = 0;
+
+ switch (offset) {
+ ETM4x_READ_SYSREG_CASES(res)
+ default :
+ pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
+}
+
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
+{
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETM4x_WRITE_SYSREG_CASES(val)
+ default :
+ pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa)
{
/* Writing 0 to TRCOSLAR unlocks the trace registers */
- writel_relaxed(0x0, drvdata->base + TRCOSLAR);
+ etm4x_relaxed_write32(csa, 0x0, TRCOSLAR);
drvdata->os_unlock = true;
isb();
}
+static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+{
+ if (!WARN_ON(!drvdata->csdev))
+ etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
+
+}
+
static void etm4_os_lock(struct etmv4_drvdata *drvdata)
{
+ if (WARN_ON(!drvdata->csdev))
+ return;
+
/* Writing 0x1 to TRCOSLAR locks the trace registers */
- writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR);
drvdata->os_unlock = false;
isb();
}
-static bool etm4_arch_supported(u8 arch)
+static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
{
- /* Mask out the minor version number */
- switch (arch & 0xf0) {
- case ETM_ARCH_V4:
- break;
- default:
- return false;
- }
- return true;
+ /* Software Lock is only accessible via memory mapped interface */
+ if (csa->io_mem)
+ CS_LOCK(csa->base);
+}
+
+static void etm4_cs_unlock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ if (csa->io_mem)
+ CS_UNLOCK(csa->base);
}
static int etm4_cpu_id(struct coresight_device *csdev)
@@ -201,57 +269,64 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
+
- CS_UNLOCK(drvdata->base);
+ etm4_cs_unlock(drvdata, csa);
etm4_enable_arch_specific(drvdata);
etm4_os_unlock(drvdata);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
/* Disable the trace unit before programming trace registers */
- writel_relaxed(0, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 0, TRCPRGCTLR);
+
+ /*
+ * If we use system instructions, we need to synchronize the
+ * write to the TRCPRGCTLR, before accessing the TRCSTATR.
+ * See ARM IHI0064F, section
+ * "4.3.7 Synchronization of register updates"
+ */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
- writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
- writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
+ etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR);
/* nothing specific implemented */
- writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
- writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
- writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
- writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
- writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
- writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
- writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
- writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(config->vissctlr,
- drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR);
+ etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(config->vipcssctlr,
- drvdata->base + TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(config->seq_ctrl[i],
- drvdata->base + TRCSEQEVRn(i));
- writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
- writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(config->cntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(config->cntr_ctrl[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(config->cntr_val[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i));
}
/*
@@ -259,54 +334,52 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
* such start at 2.
*/
for (i = 2; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(config->res_ctrl[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
config->ss_status[i] &= ~BIT(31);
- writel_relaxed(config->ss_ctrl[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(config->ss_status[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(config->ss_pe_cmp[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- writeq_relaxed(config->addr_val[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(config->addr_acc[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(config->ctxid_pid[i],
- drvdata->base + TRCCIDCVRn(i));
- writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1);
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(config->vmid_val[i],
- drvdata->base + TRCVMIDCVRn(i));
- writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1);
if (!drvdata->skip_power_up) {
+ u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR);
+
/*
* Request to keep the trace unit powered and also
* emulation of powerdown
*/
- writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
- TRCPDCR_PU, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
/* Enable the trace unit */
- writel_relaxed(1, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
+
+ /* Synchronize the register updates for sysreg access */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
@@ -318,7 +391,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
done:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
@@ -477,6 +550,19 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
/* bit[6], Context ID tracing bit */
config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ /*
+ * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
+ * for recording CONTEXTIDR_EL2. Do not enable VMID tracing if the
+ * kernel is not running in EL2.
+ */
+ if (attr->config & BIT(ETM_OPT_CTXTID2)) {
+ if (!is_kernel_in_hyp_mode()) {
+ ret = -EINVAL;
+ goto out;
+ }
+ config->cfg |= BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT);
+ }
+
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
@@ -570,20 +656,22 @@ static void etm4_disable_hw(void *info)
u32 control;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
int i;
- CS_UNLOCK(drvdata->base);
+ etm4_cs_unlock(drvdata, csa);
etm4_disable_arch_specific(drvdata);
if (!drvdata->skip_power_up) {
/* power can be removed from the trace unit now */
- control = readl_relaxed(drvdata->base + TRCPDCR);
+ control = etm4x_relaxed_read32(csa, TRCPDCR);
control &= ~TRCPDCR_PU;
- writel_relaxed(control, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, control, TRCPDCR);
}
- control = readl_relaxed(drvdata->base + TRCPRGCTLR);
+ control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
@@ -595,29 +683,27 @@ static void etm4_disable_hw(void *info)
*/
dsb(sy);
isb();
- writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/* wait for TRCSTATR.PMSTABLE to go to '1' */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* read back the current counter values */
for (i = 0; i < drvdata->nr_cntr; i++) {
config->cntr_val[i] =
- readl_relaxed(drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_read32(csa, TRCCNTVRn(i));
}
- coresight_disclaim_device_unlocked(drvdata->base);
-
- CS_LOCK(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
+ etm4_cs_lock(drvdata, csa);
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
@@ -641,7 +727,7 @@ static int etm4_disable_perf(struct coresight_device *csdev,
* scheduled again. Configuration of the start/stop logic happens in
* function etm4_set_event_filters().
*/
- control = readl_relaxed(drvdata->base + TRCVICTLR);
+ control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR);
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
@@ -712,24 +798,136 @@ static const struct coresight_ops etm4_cs_ops = {
.source_ops = &etm4_source_ops,
};
+static inline bool cpu_supports_sysreg_trace(void)
+{
+ u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+
+ return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+}
+
+static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch;
+
+ if (!cpu_supports_sysreg_trace())
+ return false;
+
+ /*
+ * ETMs implementing sysreg access must implement TRCDEVARCH.
+ */
+ devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
+ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH)
+ return false;
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = etm4x_sysreg_read,
+ .write = etm4x_sysreg_write,
+ };
+
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ return true;
+}
+
+static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+ u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ /*
+ * All ETMs must implement TRCDEVARCH to indicate that
+ * the component is an ETMv4. To support any broken
+ * implementations we fall back to TRCIDR1 check, which
+ * is not really reliable.
+ */
+ if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ } else {
+ pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+ smp_processor_id(), devarch);
+
+ if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+ return false;
+ drvdata->arch = etm_trcidr_to_arch(idr1);
+ }
+
+ *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ return true;
+}
+
+static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ /*
+ * Always choose the memory mapped io, if there is
+ * a memory map to prevent sysreg access on broken
+ * systems.
+ */
+ if (drvdata->base)
+ return etm4_init_iomem_access(drvdata, csa);
+
+ if (etm4_init_sysreg_access(drvdata, csa))
+ return true;
+
+ return false;
+}
+
+static void cpu_enable_tracing(void)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+ u64 trfcr;
+
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ return;
+
+ /*
+ * If the CPU supports v8.4 SelfHosted Tracing, enable
+ * tracing at the kernel EL and EL0, forcing to use the
+ * virtual time as the timestamp.
+ */
+ trfcr = (TRFCR_ELx_TS_VIRTUAL |
+ TRFCR_ELx_ExTRE |
+ TRFCR_ELx_E0TRE);
+
+ /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
+ if (is_kernel_in_hyp_mode())
+ trfcr |= TRFCR_EL2_CX;
+
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
+}
+
static void etm4_init_arch_data(void *info)
{
u32 etmidr0;
- u32 etmidr1;
u32 etmidr2;
u32 etmidr3;
u32 etmidr4;
u32 etmidr5;
- struct etmv4_drvdata *drvdata = info;
+ struct etm4_init_arg *init_arg = info;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
int i;
+ drvdata = init_arg->drvdata;
+ csa = init_arg->csa;
+
+ /*
+ * If we are unable to detect the access mechanism,
+ * or unable to detect the trace unit type, fail
+ * early.
+ */
+ if (!etm4_init_csdev_access(drvdata, csa))
+ return;
+
/* Make sure all registers are accessible */
- etm4_os_unlock(drvdata);
+ etm4_os_unlock_csa(drvdata, csa);
+ etm4_cs_unlock(drvdata, csa);
- CS_UNLOCK(drvdata->base);
+ etm4_check_arch_features(drvdata, init_arg->pid);
/* find all capabilities of the tracing unit */
- etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
+ etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
@@ -768,17 +966,8 @@ static void etm4_init_arch_data(void *info)
/* TSSIZE, bits[28:24] Global timestamp size field */
drvdata->ts_size = BMVAL(etmidr0, 24, 28);
- /* base architecture of trace unit */
- etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
- /*
- * TRCARCHMIN, bits[7:4] architecture the minor version number
- * TRCARCHMAJ, bits[11:8] architecture major versin number
- */
- drvdata->arch = BMVAL(etmidr1, 4, 11);
- drvdata->config.arch = drvdata->arch;
-
/* maximum size of resources */
- etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
+ etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
@@ -786,11 +975,12 @@ static void etm4_init_arch_data(void *info)
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
drvdata->ccsize = BMVAL(etmidr2, 25, 28);
- etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
+ etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
@@ -836,7 +1026,7 @@ static void etm4_init_arch_data(void *info)
drvdata->nooverflow = false;
/* number of resources trace unit supports */
- etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
+ etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
@@ -852,7 +1042,7 @@ static void etm4_init_arch_data(void *info)
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
- if ((drvdata->arch < ETM4X_ARCH_4V3) || (drvdata->nr_resource > 0))
+ if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
@@ -862,14 +1052,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
- etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
+ etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
@@ -891,23 +1081,20 @@ static void etm4_init_arch_data(void *info)
drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
+ cpu_enable_tracing();
+}
+
+static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
}
/* Set ELx trace filter access in the TRCVICTLR register */
static void etm4_set_victlr_access(struct etmv4_config *config)
{
- u64 access_type;
-
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
-
- /*
- * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
- * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
- * etm4_get_access_type() but with a relative shift in this register.
- */
- access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
- config->vinst_ctrl |= (u32)access_type;
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK;
+ config->vinst_ctrl |= etm4_get_victlr_access_type(config);
}
static void etm4_set_default_config(struct etmv4_config *config)
@@ -937,12 +1124,9 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
u64 access_type = 0;
/*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
+ * EXLEVEL_NS, for NonSecure Exception levels.
+ * The mask here is a generic value and must be
+ * shifted to the corresponding field for the registers
*/
if (!is_kernel_in_hyp_mode()) {
/* Stay away from hypervisor mode for non-VHE */
@@ -959,27 +1143,26 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
return access_type;
}
+/*
+ * Construct the exception level masks for a given config.
+ * This must be shifted to the corresponding register field
+ * for usage.
+ */
static u64 etm4_get_access_type(struct etmv4_config *config)
{
- u64 access_type = etm4_get_ns_access_type(config);
- u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
-
- /*
- * EXLEVEL_S, bits[11:8], don't trace anything happening
- * in secure state.
- */
- access_type |= (ETM_EXLEVEL_S_APP |
- ETM_EXLEVEL_S_OS |
- s_hyp |
- ETM_EXLEVEL_S_MON);
+ /* All Secure exception levels are excluded from the trace */
+ return etm4_get_ns_access_type(config) | (u64)config->s_ex_level;
+}
- return access_type;
+static u64 etm4_get_comparator_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT;
}
static void etm4_set_comparator_filter(struct etmv4_config *config,
u64 start, u64 stop, int comparator)
{
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* First half of default address comparator */
config->addr_val[comparator] = start;
@@ -1014,7 +1197,7 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
enum etm_addr_type type)
{
int shift;
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* Configure the comparator */
config->addr_val[comparator] = address;
@@ -1255,7 +1438,15 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
struct etmv4_save_state *state;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa;
+ struct device *etm_dev;
+
+ if (WARN_ON(!csdev))
+ return -ENODEV;
+
+ etm_dev = &csdev->dev;
+ csa = &csdev->access;
/*
* As recommended by 3.4.1 ("The procedure when powering down the PE")
@@ -1264,14 +1455,12 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
dsb(sy);
isb();
- CS_UNLOCK(drvdata->base);
-
+ etm4_cs_unlock(drvdata, csa);
/* Lock the OS lock to disable trace and external debugger access */
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1281,55 +1470,57 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
state = drvdata->save_state;
- state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
if (drvdata->nr_pe)
- state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
- state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
- state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
- state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
- state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
- state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
- state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
- state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
- state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
- state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
- state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
- state->trcqctlr = readl(drvdata->base + TRCQCTLR);
-
- state->trcvictlr = readl(drvdata->base + TRCVICTLR);
- state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
- state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
+ state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
+ state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR);
+ state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R);
+ state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR);
+ state->trctsctlr = etm4x_read32(csa, TRCTSCTLR);
+ state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR);
+ state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+
+ state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
- state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
- state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
- state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+ state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+ state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+ state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+ state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+ state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
- state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
- state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
- state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+ state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
- state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
- state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i));
+ state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+ state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
- state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
- state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i));
+ state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
- state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
+ state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i));
+ state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i));
}
/*
@@ -1340,25 +1531,26 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
*/
for (i = 0; i < drvdata->numcidc; i++)
- state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
+ state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
+ state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i));
- state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+ state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1);
- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1);
- state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+ state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR);
- state->trcpdcr = readl(drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up)
+ state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -1373,11 +1565,11 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
* potentially save power on systems that respect the TRCPDCR_PU
* despite requesting software to save/restore state.
*/
- writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
- drvdata->base + TRCPDCR);
-
+ if (!drvdata->skip_power_up)
+ etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU),
+ TRCPDCR);
out:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
return ret;
}
@@ -1385,91 +1577,83 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
+ struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ struct csdev_access *csa = &tmp_csa;
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4_cs_unlock(drvdata, csa);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
if (drvdata->nr_pe)
- writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
- writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
- writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
- writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
- writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
- writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
- writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
- writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
- writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
-
- writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
- writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
+ etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+
+ etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
- writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
- writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
- writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+ etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(state->trcseqevr[i],
- drvdata->base + TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
- writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
- writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(state->trccntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(state->trccntctlr[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(state->trccntvr[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(state->trcrsctlr[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- writel_relaxed(state->trcssccr[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(state->trcsscsr[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(state->trcsspcicr[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- writeq_relaxed(state->trcacvr[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(state->trcacatr[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(state->trccidcvr[i],
- drvdata->base + TRCCIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(state->trcvmidcvr[i],
- drvdata->base + TRCVMIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i));
- writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1);
- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1);
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up)
+ etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
drvdata->state_needs_restore = false;
@@ -1482,7 +1666,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
/* Unlock the OS lock to re-enable trace and external debug access */
etm4_os_unlock(drvdata);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
}
static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
@@ -1569,15 +1753,13 @@ static void etm4_pm_clear(void)
}
}
-static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
{
int ret;
- void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct etmv4_drvdata *drvdata;
- struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
+ struct etm4_init_arg init_arg = { 0 };
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -1596,14 +1778,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
- if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
@@ -1616,13 +1790,22 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (!desc.name)
return -ENOMEM;
+ init_arg.drvdata = drvdata;
+ init_arg.csa = &desc.access;
+ init_arg.pid = etm_pid;
+
if (smp_call_function_single(drvdata->cpu,
- etm4_init_arch_data, drvdata, 1))
+ etm4_init_arch_data, &init_arg, 1))
dev_err(dev, "ETM arch init failed\n");
- if (etm4_arch_supported(drvdata->arch) == false)
+ if (!drvdata->arch)
return -EINVAL;
+ /* TRCPDCR is not accessible with system instructions. */
+ if (!desc.access.io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
@@ -1630,7 +1813,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1650,25 +1833,61 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etmdrvdata[drvdata->cpu] = drvdata;
- pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
- drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
+ drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch),
+ ETM_ARCH_MINOR_VERSION(drvdata->arch));
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
}
- etm4_check_arch_features(drvdata, id->id);
-
return 0;
}
+static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct resource *res = &adev->res;
+ int ret;
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = etm4_probe(dev, base, id->id);
+ if (!ret)
+ pm_runtime_put(&adev->dev);
+
+ return ret;
+}
+
+static int etm4_probe_platform_dev(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * System register based devices could match the
+ * HW by reading appropriate registers on the HW
+ * and thus we could skip the PID.
+ */
+ ret = etm4_probe(&pdev->dev, NULL, 0);
+
+ pm_runtime_put(&pdev->dev);
+ return ret;
+}
+
static struct amba_cs_uci_id uci_id_etm4[] = {
{
/* ETMv4 UCI data */
- .devarch = 0x47704a13,
- .devarch_mask = 0xfff0ffff,
+ .devarch = ETM_DEVARCH_ETMv4x_ARCH,
+ .devarch_mask = ETM_DEVARCH_ID_MASK,
.devtype = 0x00000013,
}
};
@@ -1680,15 +1899,12 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static int etm4_remove(struct amba_device *adev)
+static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
-
etm_perf_symlink(drvdata->csdev, false);
-
/*
- * Taking hotplug lock here to avoid racing between etm4_remove and
- * CPU hotplug call backs.
+ * Taking hotplug lock here to avoid racing between etm4_remove_dev()
+ * and CPU hotplug call backs.
*/
cpus_read_lock();
/*
@@ -1707,12 +1923,33 @@ static int etm4_remove(struct amba_device *adev)
return 0;
}
+static void __exit etm4_remove_amba(struct amba_device *adev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ if (drvdata)
+ etm4_remove_dev(drvdata);
+}
+
+static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+ if (drvdata)
+ ret = etm4_remove_dev(drvdata);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
@@ -1728,17 +1965,32 @@ static const struct amba_id etm4_ids[] = {
MODULE_DEVICE_TABLE(amba, etm4_ids);
-static struct amba_driver etm4x_driver = {
+static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
- .probe = etm4_probe,
- .remove = etm4_remove,
+ .probe = etm4_probe_amba,
+ .remove = etm4_remove_amba,
.id_table = etm4_ids,
};
+static const struct of_device_id etm4_sysreg_match[] = {
+ { .compatible = "arm,coresight-etm4x-sysreg" },
+ {}
+};
+
+static struct platform_driver etm4_platform_driver = {
+ .probe = etm4_probe_platform_dev,
+ .remove = etm4_remove_platform_dev,
+ .driver = {
+ .name = "coresight-etm4x",
+ .of_match_table = etm4_sysreg_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
static int __init etm4x_init(void)
{
int ret;
@@ -1749,18 +2001,28 @@ static int __init etm4x_init(void)
if (ret)
return ret;
- ret = amba_driver_register(&etm4x_driver);
+ ret = amba_driver_register(&etm4x_amba_driver);
if (ret) {
- pr_err("Error registering etm4x driver\n");
- etm4_pm_clear();
+ pr_err("Error registering etm4x AMBA driver\n");
+ goto clear_pm;
}
+ ret = platform_driver_register(&etm4_platform_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering etm4x platform driver\n");
+ amba_driver_unregister(&etm4x_amba_driver);
+
+clear_pm:
+ etm4_pm_clear();
return ret;
}
static void __exit etm4x_exit(void)
{
- amba_driver_unregister(&etm4x_driver);
+ amba_driver_unregister(&etm4x_amba_driver);
+ platform_driver_unregister(&etm4_platform_driver);
etm4_pm_clear();
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 989ce7b8ade7..0995a10790f4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -389,7 +389,7 @@ static ssize_t mode_store(struct device *dev,
config->eventctrl1 &= ~BIT(12);
/* bit[8], Instruction stall bit */
- if (config->mode & ETM_MODE_ISTALL_EN)
+ if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
config->stall_ctrl |= BIT(8);
else
config->stall_ctrl &= ~BIT(8);
@@ -743,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -760,10 +760,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
- config->vinst_ctrl |= (val << 16);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -778,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -795,10 +795,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
- config->vinst_ctrl |= (val << 20);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -2319,7 +2319,8 @@ static struct attribute *coresight_etmv4_attrs[] = {
};
struct etmv4_reg {
- void __iomem *addr;
+ struct coresight_device *csdev;
+ u32 offset;
u32 data;
};
@@ -2327,15 +2328,16 @@ static void do_smp_cross_read(void *data)
{
struct etmv4_reg *reg = data;
- reg->data = readl_relaxed(reg->addr);
+ reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
}
-static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
struct etmv4_reg reg;
- reg.addr = drvdata->base + offset;
+ reg.offset = offset;
+ reg.csdev = drvdata->csdev;
+
/*
* smp cross call ensures the CPU will be powered up before
* accessing the ETMv4 trace core registers
@@ -2344,72 +2346,120 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
return reg.data;
}
-#define coresight_etm4x_reg(name, offset) \
- coresight_simple_reg32(struct etmv4_drvdata, name, offset)
+static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return (u32)(unsigned long)eattr->var;
+}
+
+static ssize_t coresight_etm4x_reg_show(struct device *dev,
+ struct device_attribute *d_attr,
+ char *buf)
+{
+ u32 val, offset;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-#define coresight_etm4x_cross_read(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
- name, offset)
+ offset = coresight_etm4x_attr_to_offset(d_attr);
-coresight_etm4x_reg(trcpdcr, TRCPDCR);
-coresight_etm4x_reg(trcpdsr, TRCPDSR);
-coresight_etm4x_reg(trclsr, TRCLSR);
-coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_reg(trcdevid, TRCDEVID);
-coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_reg(trcpidr0, TRCPIDR0);
-coresight_etm4x_reg(trcpidr1, TRCPIDR1);
-coresight_etm4x_reg(trcpidr2, TRCPIDR2);
-coresight_etm4x_reg(trcpidr3, TRCPIDR3);
-coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
-coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
-coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
+ pm_runtime_get_sync(dev->parent);
+ val = etmv4_cross_read(drvdata, offset);
+ pm_runtime_put_sync(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+static inline bool
+etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
+{
+ switch (offset) {
+ ETM4x_SYSREG_LIST_CASES
+ /*
+ * Registers accessible via system instructions are always
+ * implemented.
+ */
+ return true;
+ ETM4x_MMAP_LIST_CASES
+ /*
+ * Registers accessible only via memory-mapped registers
+ * must not be accessed via system instructions.
+ * We cannot access the drvdata->csdev here, as this
+ * function is called during the device creation, via
+ * coresight_register() and the csdev is not initialized
+ * until that is done. So rely on the drvdata->base to
+ * detect if we have a memory mapped access.
+ */
+ return !!drvdata->base;
+ }
+
+ return false;
+}
+
+/*
+ * Hide the ETM4x registers that may not be available on the
+ * hardware.
+ * There are certain management registers unavailable via system
+ * instructions. Make those sysfs attributes hidden on such
+ * systems.
+ */
+static umode_t
+coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct device_attribute *d_attr;
+ u32 offset;
+
+ d_attr = container_of(attr, struct device_attribute, attr);
+ offset = coresight_etm4x_attr_to_offset(d_attr);
+
+ if (etm4x_register_implemented(drvdata, offset))
+ return attr->mode;
+ return 0;
+}
+
+#define coresight_etm4x_reg(name, offset) \
+ &((struct dev_ext_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
+ (void *)(unsigned long)offset \
+ } \
+ })[0].attr.attr
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
- &dev_attr_trcoslsr.attr,
- &dev_attr_trcpdcr.attr,
- &dev_attr_trcpdsr.attr,
- &dev_attr_trclsr.attr,
- &dev_attr_trcconfig.attr,
- &dev_attr_trctraceid.attr,
- &dev_attr_trcauthstatus.attr,
- &dev_attr_trcdevid.attr,
- &dev_attr_trcdevtype.attr,
- &dev_attr_trcpidr0.attr,
- &dev_attr_trcpidr1.attr,
- &dev_attr_trcpidr2.attr,
- &dev_attr_trcpidr3.attr,
+ coresight_etm4x_reg(trcpdcr, TRCPDCR),
+ coresight_etm4x_reg(trcpdsr, TRCPDSR),
+ coresight_etm4x_reg(trclsr, TRCLSR),
+ coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
+ coresight_etm4x_reg(trcdevid, TRCDEVID),
+ coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
+ coresight_etm4x_reg(trcpidr0, TRCPIDR0),
+ coresight_etm4x_reg(trcpidr1, TRCPIDR1),
+ coresight_etm4x_reg(trcpidr2, TRCPIDR2),
+ coresight_etm4x_reg(trcpidr3, TRCPIDR3),
+ coresight_etm4x_reg(trcoslsr, TRCOSLSR),
+ coresight_etm4x_reg(trcconfig, TRCCONFIGR),
+ coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
+ coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
NULL,
};
-coresight_etm4x_cross_read(trcidr0, TRCIDR0);
-coresight_etm4x_cross_read(trcidr1, TRCIDR1);
-coresight_etm4x_cross_read(trcidr2, TRCIDR2);
-coresight_etm4x_cross_read(trcidr3, TRCIDR3);
-coresight_etm4x_cross_read(trcidr4, TRCIDR4);
-coresight_etm4x_cross_read(trcidr5, TRCIDR5);
-/* trcidr[6,7] are reserved */
-coresight_etm4x_cross_read(trcidr8, TRCIDR8);
-coresight_etm4x_cross_read(trcidr9, TRCIDR9);
-coresight_etm4x_cross_read(trcidr10, TRCIDR10);
-coresight_etm4x_cross_read(trcidr11, TRCIDR11);
-coresight_etm4x_cross_read(trcidr12, TRCIDR12);
-coresight_etm4x_cross_read(trcidr13, TRCIDR13);
-
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
- &dev_attr_trcidr0.attr,
- &dev_attr_trcidr1.attr,
- &dev_attr_trcidr2.attr,
- &dev_attr_trcidr3.attr,
- &dev_attr_trcidr4.attr,
- &dev_attr_trcidr5.attr,
+ coresight_etm4x_reg(trcidr0, TRCIDR0),
+ coresight_etm4x_reg(trcidr1, TRCIDR1),
+ coresight_etm4x_reg(trcidr2, TRCIDR2),
+ coresight_etm4x_reg(trcidr3, TRCIDR3),
+ coresight_etm4x_reg(trcidr4, TRCIDR4),
+ coresight_etm4x_reg(trcidr5, TRCIDR5),
/* trcidr[6,7] are reserved */
- &dev_attr_trcidr8.attr,
- &dev_attr_trcidr9.attr,
- &dev_attr_trcidr10.attr,
- &dev_attr_trcidr11.attr,
- &dev_attr_trcidr12.attr,
- &dev_attr_trcidr13.attr,
+ coresight_etm4x_reg(trcidr8, TRCIDR8),
+ coresight_etm4x_reg(trcidr9, TRCIDR9),
+ coresight_etm4x_reg(trcidr10, TRCIDR10),
+ coresight_etm4x_reg(trcidr11, TRCIDR11),
+ coresight_etm4x_reg(trcidr12, TRCIDR12),
+ coresight_etm4x_reg(trcidr13, TRCIDR13),
NULL,
};
@@ -2418,6 +2468,7 @@ static const struct attribute_group coresight_etmv4_group = {
};
static const struct attribute_group coresight_etmv4_mgmt_group = {
+ .is_visible = coresight_etm4x_attr_reg_implemented,
.attrs = coresight_etmv4_mgmt_attrs,
.name = "mgmt",
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 3dd3e0633328..0af60571aa23 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -45,13 +45,13 @@
#define TRCVDSACCTLR 0x0A4
#define TRCVDARCCTLR 0x0A8
/* Derived resources registers */
-#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
#define TRCSEQRSTEVR 0x118
#define TRCSEQSTR 0x11C
#define TRCEXTINSELR 0x120
-#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
-#define TRCCNTCTLRn(n) (0x150 + (n * 4))
-#define TRCCNTVRn(n) (0x160 + (n * 4))
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
+#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
+#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
/* ID registers */
#define TRCIDR8 0x180
#define TRCIDR9 0x184
@@ -60,7 +60,7 @@
#define TRCIDR12 0x190
#define TRCIDR13 0x194
#define TRCIMSPEC0 0x1C0
-#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIMSPECn(n) (0x1C0 + (n * 4)) /* n = 1-7 */
#define TRCIDR0 0x1E0
#define TRCIDR1 0x1E4
#define TRCIDR2 0x1E8
@@ -69,9 +69,12 @@
#define TRCIDR5 0x1F4
#define TRCIDR6 0x1F8
#define TRCIDR7 0x1FC
-/* Resource selection registers */
+/*
+ * Resource selection registers, n = 2-31.
+ * First pair (regs 0, 1) is always present and is reserved.
+ */
#define TRCRSCTLRn(n) (0x200 + (n * 4))
-/* Single-shot comparator registers */
+/* Single-shot comparator registers, n = 0-7 */
#define TRCSSCCRn(n) (0x280 + (n * 4))
#define TRCSSCSRn(n) (0x2A0 + (n * 4))
#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
@@ -81,11 +84,13 @@
#define TRCPDCR 0x310
#define TRCPDSR 0x314
/* Trace registers (0x318-0xEFC) */
-/* Comparator registers */
+/* Address Comparator registers n = 0-15 */
#define TRCACVRn(n) (0x400 + (n * 8))
#define TRCACATRn(n) (0x480 + (n * 8))
+/* Data Value Comparator Value registers, n = 0-7 */
#define TRCDVCVRn(n) (0x500 + (n * 16))
#define TRCDVCMRn(n) (0x580 + (n * 16))
+/* ContextID/Virtual ContextID comparators, n = 0-7 */
#define TRCCIDCVRn(n) (0x600 + (n * 8))
#define TRCVMIDCVRn(n) (0x640 + (n * 8))
#define TRCCIDCCTLR0 0x680
@@ -121,6 +126,332 @@
#define TRCCIDR2 0xFF8
#define TRCCIDR3 0xFFC
+/*
+ * System instructions to access ETM registers.
+ * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
+ */
+#define ETM4x_OFFSET_TO_REG(x) ((x) >> 2)
+
+#define ETM4x_CRn(n) (((n) >> 7) & 0x7)
+#define ETM4x_Op2(n) (((n) >> 4) & 0x7)
+#define ETM4x_CRm(n) ((n) & 0xf)
+
+#include <asm/sysreg.h>
+#define ETM4x_REG_NUM_TO_SYSREG(n) \
+ sys_reg(2, 1, ETM4x_CRn(n), ETM4x_CRm(n), ETM4x_Op2(n))
+
+#define READ_ETM4x_REG(reg) \
+ read_sysreg_s(ETM4x_REG_NUM_TO_SYSREG((reg)))
+#define WRITE_ETM4x_REG(val, reg) \
+ write_sysreg_s(val, ETM4x_REG_NUM_TO_SYSREG((reg)))
+
+#define read_etm4x_sysreg_const_offset(offset) \
+ READ_ETM4x_REG(ETM4x_OFFSET_TO_REG(offset))
+
+#define write_etm4x_sysreg_const_offset(val, offset) \
+ WRITE_ETM4x_REG(val, ETM4x_OFFSET_TO_REG(offset))
+
+#define CASE_READ(res, x) \
+ case (x): { (res) = read_etm4x_sysreg_const_offset((x)); break; }
+
+#define CASE_WRITE(val, x) \
+ case (x): { write_etm4x_sysreg_const_offset((val), (x)); break; }
+
+#define CASE_NOP(__unused, x) \
+ case (x): /* fall through */
+
+/* List of registers accessible via System instructions */
+#define ETM_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPRGCTLR) \
+ CASE_##op((val), TRCPROCSELR) \
+ CASE_##op((val), TRCSTATR) \
+ CASE_##op((val), TRCCONFIGR) \
+ CASE_##op((val), TRCAUXCTLR) \
+ CASE_##op((val), TRCEVENTCTL0R) \
+ CASE_##op((val), TRCEVENTCTL1R) \
+ CASE_##op((val), TRCSTALLCTLR) \
+ CASE_##op((val), TRCTSCTLR) \
+ CASE_##op((val), TRCSYNCPR) \
+ CASE_##op((val), TRCCCCTLR) \
+ CASE_##op((val), TRCBBCTLR) \
+ CASE_##op((val), TRCTRACEIDR) \
+ CASE_##op((val), TRCQCTLR) \
+ CASE_##op((val), TRCVICTLR) \
+ CASE_##op((val), TRCVIIECTLR) \
+ CASE_##op((val), TRCVISSCTLR) \
+ CASE_##op((val), TRCVIPCSSCTLR) \
+ CASE_##op((val), TRCVDCTLR) \
+ CASE_##op((val), TRCVDSACCTLR) \
+ CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCSEQEVRn(0)) \
+ CASE_##op((val), TRCSEQEVRn(1)) \
+ CASE_##op((val), TRCSEQEVRn(2)) \
+ CASE_##op((val), TRCSEQRSTEVR) \
+ CASE_##op((val), TRCSEQSTR) \
+ CASE_##op((val), TRCEXTINSELR) \
+ CASE_##op((val), TRCCNTRLDVRn(0)) \
+ CASE_##op((val), TRCCNTRLDVRn(1)) \
+ CASE_##op((val), TRCCNTRLDVRn(2)) \
+ CASE_##op((val), TRCCNTRLDVRn(3)) \
+ CASE_##op((val), TRCCNTCTLRn(0)) \
+ CASE_##op((val), TRCCNTCTLRn(1)) \
+ CASE_##op((val), TRCCNTCTLRn(2)) \
+ CASE_##op((val), TRCCNTCTLRn(3)) \
+ CASE_##op((val), TRCCNTVRn(0)) \
+ CASE_##op((val), TRCCNTVRn(1)) \
+ CASE_##op((val), TRCCNTVRn(2)) \
+ CASE_##op((val), TRCCNTVRn(3)) \
+ CASE_##op((val), TRCIDR8) \
+ CASE_##op((val), TRCIDR9) \
+ CASE_##op((val), TRCIDR10) \
+ CASE_##op((val), TRCIDR11) \
+ CASE_##op((val), TRCIDR12) \
+ CASE_##op((val), TRCIDR13) \
+ CASE_##op((val), TRCIMSPECn(0)) \
+ CASE_##op((val), TRCIMSPECn(1)) \
+ CASE_##op((val), TRCIMSPECn(2)) \
+ CASE_##op((val), TRCIMSPECn(3)) \
+ CASE_##op((val), TRCIMSPECn(4)) \
+ CASE_##op((val), TRCIMSPECn(5)) \
+ CASE_##op((val), TRCIMSPECn(6)) \
+ CASE_##op((val), TRCIMSPECn(7)) \
+ CASE_##op((val), TRCIDR0) \
+ CASE_##op((val), TRCIDR1) \
+ CASE_##op((val), TRCIDR2) \
+ CASE_##op((val), TRCIDR3) \
+ CASE_##op((val), TRCIDR4) \
+ CASE_##op((val), TRCIDR5) \
+ CASE_##op((val), TRCIDR6) \
+ CASE_##op((val), TRCIDR7) \
+ CASE_##op((val), TRCRSCTLRn(2)) \
+ CASE_##op((val), TRCRSCTLRn(3)) \
+ CASE_##op((val), TRCRSCTLRn(4)) \
+ CASE_##op((val), TRCRSCTLRn(5)) \
+ CASE_##op((val), TRCRSCTLRn(6)) \
+ CASE_##op((val), TRCRSCTLRn(7)) \
+ CASE_##op((val), TRCRSCTLRn(8)) \
+ CASE_##op((val), TRCRSCTLRn(9)) \
+ CASE_##op((val), TRCRSCTLRn(10)) \
+ CASE_##op((val), TRCRSCTLRn(11)) \
+ CASE_##op((val), TRCRSCTLRn(12)) \
+ CASE_##op((val), TRCRSCTLRn(13)) \
+ CASE_##op((val), TRCRSCTLRn(14)) \
+ CASE_##op((val), TRCRSCTLRn(15)) \
+ CASE_##op((val), TRCRSCTLRn(16)) \
+ CASE_##op((val), TRCRSCTLRn(17)) \
+ CASE_##op((val), TRCRSCTLRn(18)) \
+ CASE_##op((val), TRCRSCTLRn(19)) \
+ CASE_##op((val), TRCRSCTLRn(20)) \
+ CASE_##op((val), TRCRSCTLRn(21)) \
+ CASE_##op((val), TRCRSCTLRn(22)) \
+ CASE_##op((val), TRCRSCTLRn(23)) \
+ CASE_##op((val), TRCRSCTLRn(24)) \
+ CASE_##op((val), TRCRSCTLRn(25)) \
+ CASE_##op((val), TRCRSCTLRn(26)) \
+ CASE_##op((val), TRCRSCTLRn(27)) \
+ CASE_##op((val), TRCRSCTLRn(28)) \
+ CASE_##op((val), TRCRSCTLRn(29)) \
+ CASE_##op((val), TRCRSCTLRn(30)) \
+ CASE_##op((val), TRCRSCTLRn(31)) \
+ CASE_##op((val), TRCSSCCRn(0)) \
+ CASE_##op((val), TRCSSCCRn(1)) \
+ CASE_##op((val), TRCSSCCRn(2)) \
+ CASE_##op((val), TRCSSCCRn(3)) \
+ CASE_##op((val), TRCSSCCRn(4)) \
+ CASE_##op((val), TRCSSCCRn(5)) \
+ CASE_##op((val), TRCSSCCRn(6)) \
+ CASE_##op((val), TRCSSCCRn(7)) \
+ CASE_##op((val), TRCSSCSRn(0)) \
+ CASE_##op((val), TRCSSCSRn(1)) \
+ CASE_##op((val), TRCSSCSRn(2)) \
+ CASE_##op((val), TRCSSCSRn(3)) \
+ CASE_##op((val), TRCSSCSRn(4)) \
+ CASE_##op((val), TRCSSCSRn(5)) \
+ CASE_##op((val), TRCSSCSRn(6)) \
+ CASE_##op((val), TRCSSCSRn(7)) \
+ CASE_##op((val), TRCSSPCICRn(0)) \
+ CASE_##op((val), TRCSSPCICRn(1)) \
+ CASE_##op((val), TRCSSPCICRn(2)) \
+ CASE_##op((val), TRCSSPCICRn(3)) \
+ CASE_##op((val), TRCSSPCICRn(4)) \
+ CASE_##op((val), TRCSSPCICRn(5)) \
+ CASE_##op((val), TRCSSPCICRn(6)) \
+ CASE_##op((val), TRCSSPCICRn(7)) \
+ CASE_##op((val), TRCOSLAR) \
+ CASE_##op((val), TRCOSLSR) \
+ CASE_##op((val), TRCACVRn(0)) \
+ CASE_##op((val), TRCACVRn(1)) \
+ CASE_##op((val), TRCACVRn(2)) \
+ CASE_##op((val), TRCACVRn(3)) \
+ CASE_##op((val), TRCACVRn(4)) \
+ CASE_##op((val), TRCACVRn(5)) \
+ CASE_##op((val), TRCACVRn(6)) \
+ CASE_##op((val), TRCACVRn(7)) \
+ CASE_##op((val), TRCACVRn(8)) \
+ CASE_##op((val), TRCACVRn(9)) \
+ CASE_##op((val), TRCACVRn(10)) \
+ CASE_##op((val), TRCACVRn(11)) \
+ CASE_##op((val), TRCACVRn(12)) \
+ CASE_##op((val), TRCACVRn(13)) \
+ CASE_##op((val), TRCACVRn(14)) \
+ CASE_##op((val), TRCACVRn(15)) \
+ CASE_##op((val), TRCACATRn(0)) \
+ CASE_##op((val), TRCACATRn(1)) \
+ CASE_##op((val), TRCACATRn(2)) \
+ CASE_##op((val), TRCACATRn(3)) \
+ CASE_##op((val), TRCACATRn(4)) \
+ CASE_##op((val), TRCACATRn(5)) \
+ CASE_##op((val), TRCACATRn(6)) \
+ CASE_##op((val), TRCACATRn(7)) \
+ CASE_##op((val), TRCACATRn(8)) \
+ CASE_##op((val), TRCACATRn(9)) \
+ CASE_##op((val), TRCACATRn(10)) \
+ CASE_##op((val), TRCACATRn(11)) \
+ CASE_##op((val), TRCACATRn(12)) \
+ CASE_##op((val), TRCACATRn(13)) \
+ CASE_##op((val), TRCACATRn(14)) \
+ CASE_##op((val), TRCACATRn(15)) \
+ CASE_##op((val), TRCDVCVRn(0)) \
+ CASE_##op((val), TRCDVCVRn(1)) \
+ CASE_##op((val), TRCDVCVRn(2)) \
+ CASE_##op((val), TRCDVCVRn(3)) \
+ CASE_##op((val), TRCDVCVRn(4)) \
+ CASE_##op((val), TRCDVCVRn(5)) \
+ CASE_##op((val), TRCDVCVRn(6)) \
+ CASE_##op((val), TRCDVCVRn(7)) \
+ CASE_##op((val), TRCDVCMRn(0)) \
+ CASE_##op((val), TRCDVCMRn(1)) \
+ CASE_##op((val), TRCDVCMRn(2)) \
+ CASE_##op((val), TRCDVCMRn(3)) \
+ CASE_##op((val), TRCDVCMRn(4)) \
+ CASE_##op((val), TRCDVCMRn(5)) \
+ CASE_##op((val), TRCDVCMRn(6)) \
+ CASE_##op((val), TRCDVCMRn(7)) \
+ CASE_##op((val), TRCCIDCVRn(0)) \
+ CASE_##op((val), TRCCIDCVRn(1)) \
+ CASE_##op((val), TRCCIDCVRn(2)) \
+ CASE_##op((val), TRCCIDCVRn(3)) \
+ CASE_##op((val), TRCCIDCVRn(4)) \
+ CASE_##op((val), TRCCIDCVRn(5)) \
+ CASE_##op((val), TRCCIDCVRn(6)) \
+ CASE_##op((val), TRCCIDCVRn(7)) \
+ CASE_##op((val), TRCVMIDCVRn(0)) \
+ CASE_##op((val), TRCVMIDCVRn(1)) \
+ CASE_##op((val), TRCVMIDCVRn(2)) \
+ CASE_##op((val), TRCVMIDCVRn(3)) \
+ CASE_##op((val), TRCVMIDCVRn(4)) \
+ CASE_##op((val), TRCVMIDCVRn(5)) \
+ CASE_##op((val), TRCVMIDCVRn(6)) \
+ CASE_##op((val), TRCVMIDCVRn(7)) \
+ CASE_##op((val), TRCCIDCCTLR0) \
+ CASE_##op((val), TRCCIDCCTLR1) \
+ CASE_##op((val), TRCVMIDCCTLR0) \
+ CASE_##op((val), TRCVMIDCCTLR1) \
+ CASE_##op((val), TRCCLAIMSET) \
+ CASE_##op((val), TRCCLAIMCLR) \
+ CASE_##op((val), TRCAUTHSTATUS) \
+ CASE_##op((val), TRCDEVARCH) \
+ CASE_##op((val), TRCDEVID)
+
+/* List of registers only accessible via memory-mapped interface */
+#define ETM_MMAP_LIST(op, val) \
+ CASE_##op((val), TRCDEVTYPE) \
+ CASE_##op((val), TRCPDCR) \
+ CASE_##op((val), TRCPDSR) \
+ CASE_##op((val), TRCDEVAFF0) \
+ CASE_##op((val), TRCDEVAFF1) \
+ CASE_##op((val), TRCLAR) \
+ CASE_##op((val), TRCLSR) \
+ CASE_##op((val), TRCITCTRL) \
+ CASE_##op((val), TRCPIDR4) \
+ CASE_##op((val), TRCPIDR0) \
+ CASE_##op((val), TRCPIDR1) \
+ CASE_##op((val), TRCPIDR2) \
+ CASE_##op((val), TRCPIDR3)
+
+#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res))
+#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val))
+
+#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused)
+#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
+
+#define read_etm4x_sysreg_offset(offset, _64bit) \
+ ({ \
+ u64 __val; \
+ \
+ if (__builtin_constant_p((offset))) \
+ __val = read_etm4x_sysreg_const_offset((offset)); \
+ else \
+ __val = etm4x_sysreg_read((offset), true, (_64bit)); \
+ __val; \
+ })
+
+#define write_etm4x_sysreg_offset(val, offset, _64bit) \
+ do { \
+ if (__builtin_constant_p((offset))) \
+ write_etm4x_sysreg_const_offset((val), \
+ (offset)); \
+ else \
+ etm4x_sysreg_write((val), (offset), true, \
+ (_64bit)); \
+ } while (0)
+
+
+#define etm4x_relaxed_read32(csa, offset) \
+ ((u32)((csa)->io_mem ? \
+ readl_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), false)))
+
+#define etm4x_relaxed_read64(csa, offset) \
+ ((u64)((csa)->io_mem ? \
+ readq_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), true)))
+
+#define etm4x_read32(csa, offset) \
+ ({ \
+ u32 __val = etm4x_relaxed_read32((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_read64(csa, offset) \
+ ({ \
+ u64 __val = etm4x_relaxed_read64((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_relaxed_write32(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writel_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ false); \
+ } while (0)
+
+#define etm4x_relaxed_write64(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writeq_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ true); \
+ } while (0)
+
+#define etm4x_write32(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write32((csa), (val), (offset)); \
+ } while (0)
+
+#define etm4x_write64(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write64((csa), (val), (offset)); \
+ } while (0)
+
+
/* ETMv4 resources */
#define ETM_MAX_NR_PE 8
#define ETMv4_MAX_CNTR 4
@@ -137,7 +468,6 @@
#define ETM_MAX_RES_SEL 32
#define ETM_MAX_SS_CMP 8
-#define ETM_ARCH_V4 0x40
#define ETMv4_SYNC_MASK 0x1F
#define ETM_CYC_THRESHOLD_MASK 0xFFF
#define ETM_CYC_THRESHOLD_DEFAULT 0x100
@@ -175,34 +505,150 @@
ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
+/*
+ * TRCDEVARCH Bit field definitions
+ * Bits[31:21] - ARCHITECT = Always Arm Ltd.
+ * * Bits[31:28] = 0x4
+ * * Bits[27:21] = 0b0111011
+ * Bit[20] - PRESENT, Indicates the presence of this register.
+ *
+ * Bit[19:16] - REVISION, Revision of the architecture.
+ *
+ * Bit[15:0] - ARCHID, Identifies this component as an ETM
+ * * Bits[15:12] - architecture version of ETM
+ * * = 4 for ETMv4
+ * * Bits[11:0] = 0xA13, architecture part number for ETM.
+ */
+#define ETM_DEVARCH_ARCHITECT_MASK GENMASK(31, 21)
+#define ETM_DEVARCH_ARCHITECT_ARM ((0x4 << 28) | (0b0111011 << 21))
+#define ETM_DEVARCH_PRESENT BIT(20)
+#define ETM_DEVARCH_REVISION_SHIFT 16
+#define ETM_DEVARCH_REVISION_MASK GENMASK(19, 16)
+#define ETM_DEVARCH_REVISION(x) \
+ (((x) & ETM_DEVARCH_REVISION_MASK) >> ETM_DEVARCH_REVISION_SHIFT)
+#define ETM_DEVARCH_ARCHID_MASK GENMASK(15, 0)
+#define ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT 12
+#define ETM_DEVARCH_ARCHID_ARCH_VER_MASK GENMASK(15, 12)
+#define ETM_DEVARCH_ARCHID_ARCH_VER(x) \
+ (((x) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK) >> ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT)
+
+#define ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(ver) \
+ (((ver) << ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK)
+
+#define ETM_DEVARCH_ARCHID_ARCH_PART(x) ((x) & 0xfffUL)
+
+#define ETM_DEVARCH_MAKE_ARCHID(major) \
+ ((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
+
+#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
+
+#define ETM_DEVARCH_ID_MASK \
+ (ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETMv4x_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
+
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
+#define TRCSSCSRn_PC BIT(3)
+
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_S_APP BIT(8)
-#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_HYP BIT(10)
-#define ETM_EXLEVEL_S_MON BIT(11)
-/* non-secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_NS_APP BIT(12)
-#define ETM_EXLEVEL_NS_OS BIT(13)
-#define ETM_EXLEVEL_NS_HYP BIT(14)
-#define ETM_EXLEVEL_NS_NA BIT(15)
+#define TRCACATR_EXLEVEL_SHIFT 8
+
+/*
+ * Exception level mask for Secure and Non-Secure ELs.
+ * ETM defines the bits for EL control (e.g, TRVICTLR, TRCACTRn).
+ * The Secure and Non-Secure ELs are always to gether.
+ * Non-secure EL3 is never implemented.
+ * We use the following generic mask as they appear in different
+ * registers and this can be shifted for the appropriate
+ * fields.
+ */
+#define ETM_EXLEVEL_S_APP BIT(0) /* Secure EL0 */
+#define ETM_EXLEVEL_S_OS BIT(1) /* Secure EL1 */
+#define ETM_EXLEVEL_S_HYP BIT(2) /* Secure EL2 */
+#define ETM_EXLEVEL_S_MON BIT(3) /* Secure EL3/Monitor */
+#define ETM_EXLEVEL_NS_APP BIT(4) /* NonSecure EL0 */
+#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
+#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
+
+#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
+#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
+#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
-/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
-#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
+/* access level controls in TRCACATRn */
+#define TRCACATR_EXLEVEL_SHIFT 8
+
+/* access level control in TRCVICTLR */
+#define TRCVICTLR_EXLEVEL_SHIFT 16
+#define TRCVICTLR_EXLEVEL_S_SHIFT 16
+#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
-/* NS MON (EL3) mode never implemented */
-#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+
+#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
+#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MAJOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MAJOR_MASK) >> ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR_SHIFT 4
+#define ETM_TRCIDR1_ARCH_MINOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MINOR_MASK) >> ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_SHIFT ETM_TRCIDR1_ARCH_MINOR_SHIFT
+#define ETM_TRCIDR1_ARCH_MASK \
+ (ETM_TRCIDR1_ARCH_MAJOR_MASK | ETM_TRCIDR1_ARCH_MINOR_MASK)
+#define ETM_TRCIDR1_ARCH_ETMv4 0x4
+
+/*
+ * Driver representation of the ETM architecture.
+ * The version of an ETM component can be detected from
+ *
+ * TRCDEVARCH - CoreSight architected register
+ * - Bits[15:12] - Major version
+ * - Bits[19:16] - Minor version
+ * TRCIDR1 - ETM architected register
+ * - Bits[11:8] - Major version
+ * - Bits[7:4] - Minor version
+ * We must rely on TRCDEVARCH for the version information,
+ * however we don't want to break the support for potential
+ * old implementations which might not implement it. Thus
+ * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+ * for memory mapped components.
+ * Now to make certain decisions easier based on the version
+ * we use an internal representation of the version in the
+ * driver, as follows :
+ *
+ * ETM_ARCH_VERSION[7:0], where :
+ * Bits[7:4] - Major version
+ * Bits[3:0] - Minro version
+ */
+#define ETM_ARCH_VERSION(major, minor) \
+ ((((major) & 0xfU) << 4) | (((minor) & 0xfU)))
+#define ETM_ARCH_MAJOR_VERSION(arch) (((arch) >> 4) & 0xfU)
+#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
+
+#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
/* Interpretation of resource numbers change at ETM v4.3 architecture */
-#define ETM4X_ARCH_4V3 0x43
+#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
+
+static inline u8 etm_devarch_to_arch(u32 devarch)
+{
+ return ETM_ARCH_VERSION(ETM_DEVARCH_ARCHID_ARCH_VER(devarch),
+ ETM_DEVARCH_REVISION(devarch));
+}
+
+static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+{
+ return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+ ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+}
enum etm_impdef_type {
ETM4_IMPDEF_HISI_CORE_COMMIT,
@@ -256,7 +702,7 @@ enum etm_impdef_type {
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
- * @arch: ETM architecture version (for arch dependent config).
+ * @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
u32 mode;
@@ -300,7 +746,7 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
- u8 arch;
+ u8 s_ex_level;
};
/**
@@ -369,7 +815,7 @@ struct etmv4_save_state {
* @spinlock: Only one at a time pls.
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
* @cpu: The cpu this component is affined to.
- * @arch: ETM version number.
+ * @arch: ETM architecture version.
* @nr_pe: The number of processing entity available for tracing.
* @nr_pe_cmp: The number of processing entity comparator inputs that are
* available for tracing.
@@ -491,4 +937,7 @@ enum etm_addr_ctxtype {
extern const struct attribute_group *coresight_etmv4_groups[];
void etm4_config_trace_mode(struct etmv4_config *config);
+
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 071c723227db..b363dd6bc510 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -52,13 +52,14 @@ static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
/* Claim the device only when we enable the first slave */
if (!(functl & FUNNEL_ENSx_MASK)) {
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
}
@@ -101,6 +102,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
int inport)
{
u32 functl;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -110,7 +112,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
/* Disclaim the device if none of the slaves are now active */
if (!(functl & FUNNEL_ENSx_MASK))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -242,6 +244,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = coresight_funnel_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
dev_set_drvdata(dev, drvdata);
@@ -370,9 +373,9 @@ static int dynamic_funnel_probe(struct amba_device *adev,
return funnel_probe(&adev->dev, &adev->res);
}
-static int dynamic_funnel_remove(struct amba_device *adev)
+static void dynamic_funnel_remove(struct amba_device *adev)
{
- return funnel_remove(&adev->dev);
+ funnel_remove(&adev->dev);
}
static const struct amba_id dynamic_funnel_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 7e2a2b7f503f..b86acbc74cf0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -45,12 +45,14 @@ struct replicator_drvdata {
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
- if (!coresight_claim_device_unlocked(drvdata->base)) {
+ if (!coresight_claim_device_unlocked(csdev)) {
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
}
CS_LOCK(drvdata->base);
@@ -70,6 +72,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
{
int rc = 0;
u32 id0val, id1val;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -84,7 +87,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
id0val = id1val = 0xff;
if (id0val == 0xff && id1val == 0xff)
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (!rc) {
switch (outport) {
@@ -140,6 +143,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
u32 reg;
+ struct coresight_device *csdev = drvdata->csdev;
switch (outport) {
case 0:
@@ -160,7 +164,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
(readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -254,6 +258,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = replicator_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
if (fwnode_property_present(dev_fwnode(dev),
@@ -388,9 +393,9 @@ static int dynamic_replicator_probe(struct amba_device *adev,
return replicator_probe(&adev->dev, &adev->res);
}
-static int dynamic_replicator_remove(struct amba_device *adev)
+static void dynamic_replicator_remove(struct amba_device *adev)
{
- return replicator_remove(&adev->dev);
+ replicator_remove(&adev->dev);
}
static const struct amba_id dynamic_replicator_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 99791773f682..58062a5a8238 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -258,6 +258,7 @@ static void stm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
/*
* For as long as the tracer isn't disabled another entity can't
@@ -270,7 +271,7 @@ static void stm_disable(struct coresight_device *csdev,
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
- coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
+ coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(csdev->dev.parent);
@@ -884,6 +885,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
ret = stm_get_stimulus_area(dev, &ch_res);
if (ret)
@@ -951,15 +953,13 @@ stm_unregister:
return ret;
}
-static int stm_remove(struct amba_device *adev)
+static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 8169dff5a9f6..74c6323d4d6a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -33,16 +33,20 @@ DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
+
/* Ensure formatter, unformatter and hardware fifo are empty */
- if (coresight_timeout(drvdata->base,
- TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
+ dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
}
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
@@ -51,9 +55,8 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
- if (coresight_timeout(drvdata->base,
- TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
+ dev_err(&csdev->dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -456,6 +459,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
}
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -559,7 +563,7 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
-static int tmc_remove(struct amba_device *adev)
+static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -570,8 +574,6 @@ static int tmc_remove(struct amba_device *adev)
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static const struct amba_id tmc_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 989d965f3d90..45b85edfc690 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -37,7 +37,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -88,7 +88,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etb_disable_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
@@ -109,7 +109,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -120,11 +120,13 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index bf5230e39c5b..acdb59e0e661 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1040,7 +1040,7 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = tmc_etr_enable_catu(drvdata, etr_buf);
if (rc)
return rc;
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
__tmc_etr_enable_hw(drvdata);
@@ -1134,7 +1134,7 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
/* Reset the ETR buf used by hardware */
drvdata->etr_buf = NULL;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index d5dfee9ee556..34d37abd2c8d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -60,49 +60,45 @@ struct tpiu_drvdata {
struct coresight_device *csdev;
};
-static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_enable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* TODO: fill this up */
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- tpiu_enable_hw(drvdata);
+ tpiu_enable_hw(&csdev->access);
atomic_inc(csdev->refcnt);
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
-static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_disable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* Clear formatter and stop on flush */
- writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI, TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI | FFCR_FON_MAN, TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
+ coresight_timeout(csa, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
+ coresight_timeout(csa, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_disable(struct coresight_device *csdev)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
if (atomic_dec_return(csdev->refcnt))
return -EBUSY;
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&csdev->access);
dev_dbg(&csdev->dev, "TPIU disabled\n");
return 0;
@@ -149,9 +145,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
/* Disable tpiu to support older devices */
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&desc.access);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
@@ -173,13 +170,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(drvdata->csdev);
}
-static int tpiu_remove(struct amba_device *adev)
+static void tpiu_remove(struct amba_device *adev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 52acd77438ed..251e75c9ba9d 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -269,6 +269,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 3e7df1c0477f..81d7b21d31ec 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 913db013fe90..fc90293afcbf 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -622,9 +622,7 @@ static int bit_xfer_atomic(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[],
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL_ALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d4d60ad0eda0..05ebf7546e3f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -586,13 +586,6 @@ config I2C_DIGICOLOR
This driver can also be built as a module. If so, the module
will be called i2c-digicolor.
-config I2C_EFM32
- tristate "EFM32 I2C controller"
- depends on ARCH_EFM32 || COMPILE_TEST
- help
- This driver supports the i2c block found in Energy Micro's EFM32
- SoCs.
-
config I2C_EG20T
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
@@ -1000,19 +993,10 @@ config I2C_SIMTEC
This driver can also be built as a module. If so, the module
will be called i2c-simtec.
-config I2C_SIRF
- tristate "CSR SiRFprimaII I2C interface"
- depends on ARCH_SIRF || COMPILE_TEST
- help
- If you say yes to this option, support will be included for the
- CSR SiRFprimaII I2C interface.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-sirf.
-
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
@@ -1049,19 +1033,6 @@ config I2C_STM32F7
This driver can also be built as module. If so, the module
will be called i2c-stm32f7.
-config I2C_STU300
- tristate "ST Microelectronics DDC I2C interface"
- depends on MACH_U300 || COMPILE_TEST
- default y if MACH_U300
- help
- If you say yes to this option, support will be included for the
- I2C interface from ST Microelectronics simply called "DDC I2C"
- supporting both I2C and DDC, used in e.g. the U300 series
- mobile platforms.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-stu300.
-
config I2C_SUN6I_P2WI
tristate "Allwinner sun6i internal P2WI controller"
depends on RESET_CONTROLLER
@@ -1174,8 +1145,8 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_XLR
- tristate "Netlogic XLR and Sigma Designs I2C support"
- depends on CPU_XLR || ARCH_TANGO || COMPILE_TEST
+ tristate "Netlogic XLR I2C support"
+ depends on CPU_XLR || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -1400,15 +1371,6 @@ config I2C_OPAL
This driver can also be built as a module. If so, the module will be
called as i2c-opal.
-config I2C_ZX2967
- tristate "ZTE ZX2967 I2C support"
- depends on ARCH_ZX
- default y
- help
- Selecting this option will add ZX2967 I2C driver.
- This driver can also be built as a module. If so, the module will be
- called i2c-zx2967.
-
config I2C_FSI
tristate "FSI I2C driver"
depends on FSI
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 683c49faca05..615f35e3e31f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,7 +58,6 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
-obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
@@ -99,13 +98,11 @@ obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
-obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o
obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o
i2c-stm32f7-drv-objs := i2c-stm32f7.o i2c-stm32.o
obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7-drv.o
-obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
obj-$(CONFIG_I2C_SYNQUACER) += i2c-synquacer.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
@@ -122,7 +119,6 @@ obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
-obj-$(CONFIG_I2C_ZX2967) += i2c-zx2967.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index cd3fd5ee5f65..ce130a821ea5 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -30,7 +30,7 @@ static void amd_mp2_c2p_mutex_unlock(struct amd_i2c_common *i2c_common)
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
if (unlikely(privdata->c2p_lock_busid != i2c_common->bus_id)) {
- dev_warn(ndev_dev(privdata),
+ pci_warn(privdata->pci_dev,
"bus %d attempting to unlock C2P locked by bus %d\n",
i2c_common->bus_id, privdata->c2p_lock_busid);
return;
@@ -59,8 +59,7 @@ int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable)
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
union i2c_cmd_base i2c_cmd_base;
- dev_dbg(ndev_dev(privdata), "%s id: %d\n", __func__,
- i2c_common->bus_id);
+ pci_dbg(privdata->pci_dev, "id: %d\n", i2c_common->bus_id);
i2c_cmd_base.ul = 0;
i2c_cmd_base.s.i2c_cmd = enable ? i2c_enable : i2c_disable;
@@ -111,20 +110,19 @@ EXPORT_SYMBOL_GPL(amd_mp2_rw);
static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
int len = i2c_common->eventval.r.length;
u32 slave_addr = i2c_common->eventval.r.slave_addr;
bool err = false;
if (unlikely(len != i2c_common->msg->len)) {
- dev_err(ndev_dev(privdata),
- "length %d in event doesn't match buffer length %d!\n",
+ pci_err(pdev, "length %d in event doesn't match buffer length %d!\n",
len, i2c_common->msg->len);
err = true;
}
if (unlikely(slave_addr != i2c_common->msg->addr)) {
- dev_err(ndev_dev(privdata),
- "unexpected slave address %x (expected: %x)!\n",
+ pci_err(pdev, "unexpected slave address %x (expected: %x)!\n",
slave_addr, i2c_common->msg->addr);
err = true;
}
@@ -136,13 +134,14 @@ static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
enum status_type sts = i2c_common->eventval.r.status;
enum response_type res = i2c_common->eventval.r.response;
int len = i2c_common->eventval.r.length;
if (res != command_success) {
if (res != command_failed)
- dev_err(ndev_dev(privdata), "invalid response to i2c command!\n");
+ pci_err(pdev, "invalid response to i2c command!\n");
return;
}
@@ -155,32 +154,26 @@ static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
privdata->mmio + AMD_C2P_MSG2,
len);
} else if (sts != i2c_readfail_event) {
- dev_err(ndev_dev(privdata),
- "invalid i2c status after read (%d)!\n", sts);
+ pci_err(pdev, "invalid i2c status after read (%d)!\n", sts);
}
break;
case i2c_write:
if (sts == i2c_writecomplete_event)
amd_mp2_pci_check_rw_event(i2c_common);
else if (sts != i2c_writefail_event)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after write (%d)!\n", sts);
+ pci_err(pdev, "invalid i2c status after write (%d)!\n", sts);
break;
case i2c_enable:
if (sts == i2c_busenable_complete)
i2c_common->cmd_success = true;
else if (sts != i2c_busenable_failed)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after bus enable (%d)!\n",
- sts);
+ pci_err(pdev, "invalid i2c status after bus enable (%d)!\n", sts);
break;
case i2c_disable:
if (sts == i2c_busdisable_complete)
i2c_common->cmd_success = true;
else if (sts != i2c_busdisable_failed)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after bus disable (%d)!\n",
- sts);
+ pci_err(pdev, "invalid i2c status after bus disable (%d)!\n", sts);
break;
default:
break;
@@ -190,10 +183,10 @@ static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
void amd_mp2_process_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
if (unlikely(i2c_common->reqcmd == i2c_none)) {
- dev_warn(ndev_dev(privdata),
- "received msg but no cmd was sent (bus = %d)!\n",
+ pci_warn(pdev, "received msg but no cmd was sent (bus = %d)!\n",
i2c_common->bus_id);
return;
}
@@ -208,6 +201,7 @@ EXPORT_SYMBOL_GPL(amd_mp2_process_event);
static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
{
struct amd_mp2_dev *privdata = dev;
+ struct pci_dev *pdev = privdata->pci_dev;
struct amd_i2c_common *i2c_common;
u32 val;
unsigned int bus_id;
@@ -236,8 +230,7 @@ static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
val = readl(privdata->mmio + AMD_P2C_MSG_INTEN);
if (val != 0) {
writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
- dev_warn(ndev_dev(privdata),
- "received irq without message\n");
+ pci_warn(pdev, "received irq without message\n");
ret = IRQ_HANDLED;
}
}
@@ -255,13 +248,13 @@ EXPORT_SYMBOL_GPL(amd_mp2_rw_timeout);
int amd_mp2_register_cb(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
if (i2c_common->bus_id > 1)
return -EINVAL;
if (privdata->busses[i2c_common->bus_id]) {
- dev_err(ndev_dev(privdata),
- "Bus %d already taken!\n", i2c_common->bus_id);
+ pci_err(pdev, "Bus %d already taken!\n", i2c_common->bus_id);
return -EINVAL;
}
@@ -301,13 +294,13 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
rc = pcim_enable_device(pci_dev);
if (rc) {
- dev_err(ndev_dev(privdata), "Failed to enable MP2 PCI device\n");
+ pci_err(pci_dev, "Failed to enable MP2 PCI device\n");
goto err_pci_enable;
}
rc = pcim_iomap_regions(pci_dev, 1 << 2, pci_name(pci_dev));
if (rc) {
- dev_err(ndev_dev(privdata), "I/O memory remapping failed\n");
+ pci_err(pci_dev, "I/O memory remapping failed\n");
goto err_pci_enable;
}
privdata->mmio = pcim_iomap_table(pci_dev)[2];
@@ -327,7 +320,7 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
rc = devm_request_irq(&pci_dev->dev, pci_dev->irq, amd_mp2_irq_isr,
IRQF_SHARED, dev_name(&pci_dev->dev), privdata);
if (rc)
- dev_err(&pci_dev->dev, "Failure requesting irq %i: %d\n",
+ pci_err(pci_dev, "Failure requesting irq %i: %d\n",
pci_dev->irq, rc);
return rc;
@@ -363,7 +356,7 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
privdata->probed = true;
- dev_info(&pci_dev->dev, "MP2 device registered.\n");
+ pci_info(pci_dev, "MP2 device registered.\n");
return 0;
}
@@ -397,8 +390,7 @@ static int amd_mp2_pci_suspend(struct device *dev)
ret = pci_save_state(pci_dev);
if (ret) {
- dev_err(ndev_dev(privdata),
- "pci_save_state failed = %d\n", ret);
+ pci_err(pci_dev, "pci_save_state failed = %d\n", ret);
return ret;
}
@@ -417,8 +409,7 @@ static int amd_mp2_pci_resume(struct device *dev)
pci_restore_state(pci_dev);
ret = pci_enable_device(pci_dev);
if (ret < 0) {
- dev_err(ndev_dev(privdata),
- "pci_enable_device failed = %d\n", ret);
+ pci_err(pci_dev, "pci_enable_device failed = %d\n", ret);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 506433bc0ff2..de058671f9b8 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -88,8 +88,7 @@ static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common)
union i2c_event *event = &i2c_common->eventval;
if (event->r.status == i2c_readcomplete_event)
- dev_dbg(&i2c_dev->pdev->dev, "%s readdata:%*ph\n",
- __func__, event->r.length,
+ dev_dbg(&i2c_dev->pdev->dev, "readdata:%*ph\n", event->r.length,
i2c_common->msg->buf);
complete(&i2c_dev->cmd_complete);
diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h
index 058362edebaa..ddecd0c88656 100644
--- a/drivers/i2c/busses/i2c-amd-mp2.h
+++ b/drivers/i2c/busses/i2c-amd-mp2.h
@@ -185,12 +185,6 @@ struct amd_mp2_dev {
unsigned int probed;
};
-#define ndev_pdev(ndev) ((ndev)->pci_dev)
-#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
-#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
-#define work_amd_i2c_common(__work) \
- container_of(__work, struct amd_i2c_common, work.work)
-
/* PCIe communication driver */
int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd);
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d8295b1c379d..cceaf69279a9 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -93,6 +93,7 @@
#define S_CMD_STATUS_MASK 0x07
#define S_CMD_STATUS_SUCCESS 0x0
#define S_CMD_STATUS_TIMEOUT 0x5
+#define S_CMD_STATUS_MASTER_ABORT 0x7
#define IE_OFFSET 0x38
#define IE_M_RX_FIFO_FULL_SHIFT 31
@@ -159,6 +160,11 @@
#define IE_S_ALL_INTERRUPT_SHIFT 21
#define IE_S_ALL_INTERRUPT_MASK 0x3f
+/*
+ * It takes ~18us to reading 10bytes of data, hence to keep tasklet
+ * running for less time, max slave read per tasklet is set to 10 bytes.
+ */
+#define MAX_SLAVE_RX_PER_INT 10
enum i2c_slave_read_status {
I2C_SLAVE_RX_FIFO_EMPTY = 0,
@@ -205,8 +211,18 @@ struct bcm_iproc_i2c_dev {
/* bytes that have been read */
unsigned int rx_bytes;
unsigned int thld_bytes;
+
+ bool slave_rx_only;
+ bool rx_start_rcvd;
+ bool slave_read_complete;
+ u32 tx_underrun;
+ u32 slave_int_mask;
+ struct tasklet_struct slave_rx_tasklet;
};
+/* tasklet to process slave rx data */
+static void slave_rx_tasklet_fn(unsigned long);
+
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
@@ -215,7 +231,8 @@ struct bcm_iproc_i2c_dev {
#define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
| BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
- | BIT(IS_S_TX_UNDERRUN_SHIFT))
+ | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
+ | BIT(IS_S_RX_THLD_SHIFT))
static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
@@ -259,6 +276,7 @@ static void bcm_iproc_i2c_slave_init(
{
u32 val;
+ iproc_i2c->tx_underrun = 0;
if (need_reset) {
/* put controller in reset */
val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
@@ -295,8 +313,13 @@ static void bcm_iproc_i2c_slave_init(
/* Enable interrupt register to indicate a valid byte in receive fifo */
val = BIT(IE_S_RX_EVENT_SHIFT);
+ /* Enable interrupt register to indicate Slave Rx FIFO Full */
+ val |= BIT(IE_S_RX_FIFO_FULL_SHIFT);
+ /* Enable interrupt register to indicate a Master read transaction */
+ val |= BIT(IE_S_RD_EVENT_SHIFT);
/* Enable interrupt register for the Slave BUSY command */
val |= BIT(IE_S_START_BUSY_SHIFT);
+ iproc_i2c->slave_int_mask = val;
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
@@ -311,9 +334,10 @@ static void bcm_iproc_i2c_check_slave_status(
return;
val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
- if (val == S_CMD_STATUS_TIMEOUT) {
- dev_err(iproc_i2c->device, "slave random stretch time timeout\n");
-
+ if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+ dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+ "slave random stretch time timeout\n" :
+ "Master aborted read transaction\n");
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
bcm_iproc_i2c_slave_init(iproc_i2c, true);
@@ -321,76 +345,187 @@ static void bcm_iproc_i2c_check_slave_status(
}
}
-static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
- u32 status)
+static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
{
+ u8 rx_data, rx_status;
+ u32 rx_bytes = 0;
u32 val;
- u8 value, rx_status;
- /* Slave RX byte receive */
- if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
+ while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
- if (rx_status == I2C_SLAVE_RX_START) {
- /* Start of SMBUS for Master write */
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_REQUESTED, &value);
+ rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
- val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+ if (rx_status == I2C_SLAVE_RX_START) {
+ /* Start of SMBUS Master write */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
- /* Start of SMBUS for Master Read */
+ I2C_SLAVE_WRITE_REQUESTED, &rx_data);
+ iproc_i2c->rx_start_rcvd = true;
+ iproc_i2c->slave_read_complete = false;
+ } else if (rx_status == I2C_SLAVE_RX_DATA &&
+ iproc_i2c->rx_start_rcvd) {
+ /* Middle of SMBUS Master write */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_READ_REQUESTED, &value);
- iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+ I2C_SLAVE_WRITE_RECEIVED, &rx_data);
+ } else if (rx_status == I2C_SLAVE_RX_END &&
+ iproc_i2c->rx_start_rcvd) {
+ /* End of SMBUS Master write */
+ if (iproc_i2c->slave_rx_only)
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_RECEIVED,
+ &rx_data);
+
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
+ &rx_data);
+ } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
+ iproc_i2c->rx_start_rcvd = false;
+ iproc_i2c->slave_read_complete = true;
+ break;
+ }
- val = BIT(S_CMD_START_BUSY_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+ rx_bytes++;
+ }
+}
- /*
- * Enable interrupt for TX FIFO becomes empty and
- * less than PKT_LENGTH bytes were output on the SMBUS
- */
- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
- } else {
- /* Master write other than start */
- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+static void slave_rx_tasklet_fn(unsigned long data)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
+ u32 int_clr;
+
+ bcm_iproc_i2c_slave_read(iproc_i2c);
+
+ /* clear pending IS_S_RX_EVENT_SHIFT interrupt */
+ int_clr = BIT(IS_S_RX_EVENT_SHIFT);
+
+ if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
+ /*
+ * In case of single byte master-read request,
+ * IS_S_TX_UNDERRUN_SHIFT event is generated before
+ * IS_S_START_BUSY_SHIFT event. Hence start slave data send
+ * from first IS_S_TX_UNDERRUN_SHIFT event.
+ *
+ * This means don't send any data from slave when
+ * IS_S_RD_EVENT_SHIFT event is generated else it will increment
+ * eeprom or other backend slave driver read pointer twice.
+ */
+ iproc_i2c->tx_underrun = 0;
+ iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
+
+ /* clear IS_S_RD_EVENT_SHIFT interrupt */
+ int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
+ }
+
+ /* clear slave interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
+ /* enable slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
+}
+
+static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ u32 val;
+ u8 value;
+
+ /*
+ * Slave events in case of master-write, master-write-read and,
+ * master-read
+ *
+ * Master-write : only IS_S_RX_EVENT_SHIFT event
+ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events
+ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events or only IS_S_RD_EVENT_SHIFT
+ *
+ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+ * full. This can happen if Master issues write requests of more than
+ * 64 bytes.
+ */
+ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+ status & BIT(IS_S_RD_EVENT_SHIFT) ||
+ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+ /* disable slave interrupts */
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~iproc_i2c->slave_int_mask;
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ if (status & BIT(IS_S_RD_EVENT_SHIFT))
+ /* Master-write-read request */
+ iproc_i2c->slave_rx_only = false;
+ else
+ /* Master-write request only */
+ iproc_i2c->slave_rx_only = true;
+
+ /* schedule tasklet to read data later */
+ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+
+ /*
+ * clear only IS_S_RX_EVENT_SHIFT and
+ * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+ */
+ val = BIT(IS_S_RX_EVENT_SHIFT);
+ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+ val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+ }
+
+ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ iproc_i2c->tx_underrun++;
+ if (iproc_i2c->tx_underrun == 1)
+ /* Start of SMBUS for Master Read */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- if (rx_status == I2C_SLAVE_RX_END)
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_STOP, &value);
- }
- } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
- /* Master read other than start */
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_READ_PROCESSED, &value);
+ I2C_SLAVE_READ_REQUESTED,
+ &value);
+ else
+ /* Master read other than start */
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_PROCESSED,
+ &value);
iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+ /* start transfer */
val = BIT(S_CMD_START_BUSY_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+
+ /* clear interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
+ BIT(IS_S_TX_UNDERRUN_SHIFT));
}
- /* Stop */
+ /* Stop received from master in case of master read transaction */
if (status & BIT(IS_S_START_BUSY_SHIFT)) {
- i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
/*
- * Enable interrupt for TX FIFO becomes empty and
+ * Disable interrupt for TX FIFO becomes empty and
* less than PKT_LENGTH bytes were output on the SMBUS
*/
- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+ iproc_i2c->slave_int_mask);
+
+ /* End of SMBUS for Master Read */
+ val = BIT(S_TX_WR_STATUS_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
+
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+
+ /* flush TX FIFOs */
+ val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
+ val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
+
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+
+ /* clear interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
+ BIT(IS_S_START_BUSY_SHIFT));
}
- /* clear interrupt status */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+ /* check slave transmit status only if slave is transmitting */
+ if (!iproc_i2c->slave_rx_only)
+ bcm_iproc_i2c_check_slave_status(iproc_i2c);
- bcm_iproc_i2c_check_slave_status(iproc_i2c);
return true;
}
@@ -505,12 +640,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
{
struct bcm_iproc_i2c_dev *iproc_i2c = data;
- u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ u32 slave_status;
+ u32 status;
bool ret;
- u32 sl_status = status & ISR_MASK_SLAVE;
- if (sl_status) {
- ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
+ status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ /* process only slave interrupt which are enabled */
+ slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
+ ISR_MASK_SLAVE;
+
+ if (slave_status) {
+ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
if (ret)
return IRQ_HANDLED;
else
@@ -1066,6 +1206,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
return -EAFNOSUPPORT;
iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
bcm_iproc_i2c_slave_init(iproc_i2c, false);
return 0;
}
@@ -1086,6 +1230,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
IE_S_ALL_INTERRUPT_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
/* Erase the slave address programmed */
tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index d4e0a0f6732a..ba766d24219e 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
goto cmd_out;
}
- if ((CMD_RD || CMD_WR) &&
+ if ((cmd == CMD_RD || cmd == CMD_WR) &&
bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
rc = -EREMOTEIO;
dev_dbg(dev->device, "controller received NOACK intr for %s\n",
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 85307cfa7109..5392b82f68a4 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -38,6 +38,8 @@
#define DW_IC_CON_TX_EMPTY_CTRL BIT(8)
#define DW_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define DW_IC_DATA_CMD_DAT GENMASK(7, 0)
+
/*
* Registers offset
*/
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index d6425ad6e6a3..dd27b9dbe931 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -432,7 +432,7 @@ i2c_dw_read(struct dw_i2c_dev *dev)
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
/* Ensure length byte is a valid value */
if (flags & I2C_M_RECV_LEN &&
- tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
+ (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
len = i2c_dw_recv_len(dev, tmp);
}
*buf++ = tmp;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index f67639dc74b7..60c838c7c454 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -160,12 +160,11 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
{
struct dc_i2c *i2c = dev_id;
int cmd_status = dc_i2c_cmd_status(i2c);
- unsigned long flags;
u8 addr_cmd;
writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR);
- spin_lock_irqsave(&i2c->lock, flags);
+ spin_lock(&i2c->lock);
if (cmd_status == II_CMD_STATUS_ACK_BAD
|| cmd_status == II_CMD_STATUS_ABORT) {
@@ -207,7 +206,7 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
}
out:
- spin_unlock_irqrestore(&i2c->lock, flags);
+ spin_unlock(&i2c->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
deleted file mode 100644
index f6e13ceeb2b3..000000000000
--- a/drivers/i2c/busses/i2c-efm32.c
+++ /dev/null
@@ -1,469 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Uwe Kleine-Koenig for Pengutronix
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-
-#define DRIVER_NAME "efm32-i2c"
-
-#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
-
-#define REG_CTRL 0x00
-#define REG_CTRL_EN 0x00001
-#define REG_CTRL_SLAVE 0x00002
-#define REG_CTRL_AUTOACK 0x00004
-#define REG_CTRL_AUTOSE 0x00008
-#define REG_CTRL_AUTOSN 0x00010
-#define REG_CTRL_ARBDIS 0x00020
-#define REG_CTRL_GCAMEN 0x00040
-#define REG_CTRL_CLHR__MASK 0x00300
-#define REG_CTRL_BITO__MASK 0x03000
-#define REG_CTRL_BITO_OFF 0x00000
-#define REG_CTRL_BITO_40PCC 0x01000
-#define REG_CTRL_BITO_80PCC 0x02000
-#define REG_CTRL_BITO_160PCC 0x03000
-#define REG_CTRL_GIBITO 0x08000
-#define REG_CTRL_CLTO__MASK 0x70000
-#define REG_CTRL_CLTO_OFF 0x00000
-
-#define REG_CMD 0x04
-#define REG_CMD_START 0x00001
-#define REG_CMD_STOP 0x00002
-#define REG_CMD_ACK 0x00004
-#define REG_CMD_NACK 0x00008
-#define REG_CMD_CONT 0x00010
-#define REG_CMD_ABORT 0x00020
-#define REG_CMD_CLEARTX 0x00040
-#define REG_CMD_CLEARPC 0x00080
-
-#define REG_STATE 0x08
-#define REG_STATE_BUSY 0x00001
-#define REG_STATE_MASTER 0x00002
-#define REG_STATE_TRANSMITTER 0x00004
-#define REG_STATE_NACKED 0x00008
-#define REG_STATE_BUSHOLD 0x00010
-#define REG_STATE_STATE__MASK 0x000e0
-#define REG_STATE_STATE_IDLE 0x00000
-#define REG_STATE_STATE_WAIT 0x00020
-#define REG_STATE_STATE_START 0x00040
-#define REG_STATE_STATE_ADDR 0x00060
-#define REG_STATE_STATE_ADDRACK 0x00080
-#define REG_STATE_STATE_DATA 0x000a0
-#define REG_STATE_STATE_DATAACK 0x000c0
-
-#define REG_STATUS 0x0c
-#define REG_STATUS_PSTART 0x00001
-#define REG_STATUS_PSTOP 0x00002
-#define REG_STATUS_PACK 0x00004
-#define REG_STATUS_PNACK 0x00008
-#define REG_STATUS_PCONT 0x00010
-#define REG_STATUS_PABORT 0x00020
-#define REG_STATUS_TXC 0x00040
-#define REG_STATUS_TXBL 0x00080
-#define REG_STATUS_RXDATAV 0x00100
-
-#define REG_CLKDIV 0x10
-#define REG_CLKDIV_DIV__MASK 0x001ff
-#define REG_CLKDIV_DIV(div) MASK_VAL(REG_CLKDIV_DIV__MASK, (div))
-
-#define REG_SADDR 0x14
-#define REG_SADDRMASK 0x18
-#define REG_RXDATA 0x1c
-#define REG_RXDATAP 0x20
-#define REG_TXDATA 0x24
-#define REG_IF 0x28
-#define REG_IF_START 0x00001
-#define REG_IF_RSTART 0x00002
-#define REG_IF_ADDR 0x00004
-#define REG_IF_TXC 0x00008
-#define REG_IF_TXBL 0x00010
-#define REG_IF_RXDATAV 0x00020
-#define REG_IF_ACK 0x00040
-#define REG_IF_NACK 0x00080
-#define REG_IF_MSTOP 0x00100
-#define REG_IF_ARBLOST 0x00200
-#define REG_IF_BUSERR 0x00400
-#define REG_IF_BUSHOLD 0x00800
-#define REG_IF_TXOF 0x01000
-#define REG_IF_RXUF 0x02000
-#define REG_IF_BITO 0x04000
-#define REG_IF_CLTO 0x08000
-#define REG_IF_SSTOP 0x10000
-
-#define REG_IFS 0x2c
-#define REG_IFC 0x30
-#define REG_IFC__MASK 0x1ffcf
-
-#define REG_IEN 0x34
-
-#define REG_ROUTE 0x38
-#define REG_ROUTE_SDAPEN 0x00001
-#define REG_ROUTE_SCLPEN 0x00002
-#define REG_ROUTE_LOCATION__MASK 0x00700
-#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
-
-struct efm32_i2c_ddata {
- struct i2c_adapter adapter;
-
- struct clk *clk;
- void __iomem *base;
- unsigned int irq;
- u8 location;
- unsigned long frequency;
-
- /* transfer data */
- struct completion done;
- struct i2c_msg *msgs;
- size_t num_msgs;
- size_t current_word, current_msg;
- int retval;
-};
-
-static u32 efm32_i2c_read32(struct efm32_i2c_ddata *ddata, unsigned offset)
-{
- return readl(ddata->base + offset);
-}
-
-static void efm32_i2c_write32(struct efm32_i2c_ddata *ddata,
- unsigned offset, u32 value)
-{
- writel(value, ddata->base + offset);
-}
-
-static void efm32_i2c_send_next_msg(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_START);
- efm32_i2c_write32(ddata, REG_TXDATA, i2c_8bit_addr_from_msg(cur_msg));
-}
-
-static void efm32_i2c_send_next_byte(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- if (ddata->current_word >= cur_msg->len) {
- /* cur_msg completely transferred */
- ddata->current_word = 0;
- ddata->current_msg += 1;
-
- if (ddata->current_msg >= ddata->num_msgs) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_msg(ddata);
- }
- } else {
- efm32_i2c_write32(ddata, REG_TXDATA,
- cur_msg->buf[ddata->current_word++]);
- }
-}
-
-static void efm32_i2c_recv_next_byte(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- cur_msg->buf[ddata->current_word] = efm32_i2c_read32(ddata, REG_RXDATA);
- ddata->current_word += 1;
- if (ddata->current_word >= cur_msg->len) {
- /* cur_msg completely transferred */
- ddata->current_word = 0;
- ddata->current_msg += 1;
-
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_NACK);
-
- if (ddata->current_msg >= ddata->num_msgs) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_msg(ddata);
- }
- } else {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ACK);
- }
-}
-
-static irqreturn_t efm32_i2c_irq(int irq, void *dev_id)
-{
- struct efm32_i2c_ddata *ddata = dev_id;
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
- u32 irqflag = efm32_i2c_read32(ddata, REG_IF);
- u32 state = efm32_i2c_read32(ddata, REG_STATE);
-
- efm32_i2c_write32(ddata, REG_IFC, irqflag & REG_IFC__MASK);
-
- switch (state & REG_STATE_STATE__MASK) {
- case REG_STATE_STATE_IDLE:
- /* arbitration lost? */
- ddata->retval = -EAGAIN;
- complete(&ddata->done);
- break;
- case REG_STATE_STATE_WAIT:
- /*
- * huh, this shouldn't happen.
- * Reset hardware state and get out
- */
- ddata->retval = -EIO;
- efm32_i2c_write32(ddata, REG_CMD,
- REG_CMD_STOP | REG_CMD_ABORT |
- REG_CMD_CLEARTX | REG_CMD_CLEARPC);
- complete(&ddata->done);
- break;
- case REG_STATE_STATE_START:
- /* "caller" is expected to send an address */
- break;
- case REG_STATE_STATE_ADDR:
- /* wait for Ack or NAck of slave */
- break;
- case REG_STATE_STATE_ADDRACK:
- if (state & REG_STATE_NACKED) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- ddata->retval = -ENXIO;
- complete(&ddata->done);
- } else if (cur_msg->flags & I2C_M_RD) {
- /* wait for slave to send first data byte */
- } else {
- efm32_i2c_send_next_byte(ddata);
- }
- break;
- case REG_STATE_STATE_DATA:
- if (cur_msg->flags & I2C_M_RD) {
- efm32_i2c_recv_next_byte(ddata);
- } else {
- /* wait for Ack or Nack of slave */
- }
- break;
- case REG_STATE_STATE_DATAACK:
- if (state & REG_STATE_NACKED) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_byte(ddata);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int efm32_i2c_master_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct efm32_i2c_ddata *ddata = i2c_get_adapdata(adap);
- int ret;
-
- if (ddata->msgs)
- return -EBUSY;
-
- ddata->msgs = msgs;
- ddata->num_msgs = num;
- ddata->current_word = 0;
- ddata->current_msg = 0;
- ddata->retval = -EIO;
-
- reinit_completion(&ddata->done);
-
- dev_dbg(&ddata->adapter.dev, "state: %08x, status: %08x\n",
- efm32_i2c_read32(ddata, REG_STATE),
- efm32_i2c_read32(ddata, REG_STATUS));
-
- efm32_i2c_send_next_msg(ddata);
-
- wait_for_completion(&ddata->done);
-
- if (ddata->current_msg >= ddata->num_msgs)
- ret = ddata->num_msgs;
- else
- ret = ddata->retval;
-
- return ret;
-}
-
-static u32 efm32_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm efm32_i2c_algo = {
- .master_xfer = efm32_i2c_master_xfer,
- .functionality = efm32_i2c_functionality,
-};
-
-static u32 efm32_i2c_get_configured_location(struct efm32_i2c_ddata *ddata)
-{
- u32 reg = efm32_i2c_read32(ddata, REG_ROUTE);
-
- return (reg & REG_ROUTE_LOCATION__MASK) >>
- __ffs(REG_ROUTE_LOCATION__MASK);
-}
-
-static int efm32_i2c_probe(struct platform_device *pdev)
-{
- struct efm32_i2c_ddata *ddata;
- struct resource *res;
- unsigned long rate;
- struct device_node *np = pdev->dev.of_node;
- u32 location, frequency;
- int ret;
- u32 clkdiv;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
- platform_set_drvdata(pdev, ddata);
-
- init_completion(&ddata->done);
- strlcpy(ddata->adapter.name, pdev->name, sizeof(ddata->adapter.name));
- ddata->adapter.owner = THIS_MODULE;
- ddata->adapter.algo = &efm32_i2c_algo;
- ddata->adapter.dev.parent = &pdev->dev;
- ddata->adapter.dev.of_node = pdev->dev.of_node;
- i2c_set_adapdata(&ddata->adapter, ddata);
-
- ddata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ddata->clk)) {
- ret = PTR_ERR(ddata->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
- return ret;
- }
-
- ddata->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(ddata->base))
- return PTR_ERR(ddata->base);
-
- if (resource_size(res) < 0x42) {
- dev_err(&pdev->dev, "memory resource too small\n");
- return -EINVAL;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- if (!ret)
- ret = -EINVAL;
- return ret;
- }
-
- ddata->irq = ret;
-
- ret = clk_prepare_enable(ddata->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
- return ret;
- }
-
-
- ret = of_property_read_u32(np, "energymicro,location", &location);
-
- if (ret)
- /* fall back to wrongly namespaced property */
- ret = of_property_read_u32(np, "efm32,location", &location);
-
- if (!ret) {
- dev_dbg(&pdev->dev, "using location %u\n", location);
- } else {
- /* default to location configured in hardware */
- location = efm32_i2c_get_configured_location(ddata);
-
- dev_info(&pdev->dev, "fall back to location %u\n", location);
- }
-
- ddata->location = location;
-
- ret = of_property_read_u32(np, "clock-frequency", &frequency);
- if (!ret) {
- dev_dbg(&pdev->dev, "using frequency %u\n", frequency);
- } else {
- frequency = I2C_MAX_STANDARD_MODE_FREQ;
- dev_info(&pdev->dev, "defaulting to 100 kHz\n");
- }
- ddata->frequency = frequency;
-
- rate = clk_get_rate(ddata->clk);
- if (!rate) {
- dev_err(&pdev->dev, "there is no input clock available\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
- clkdiv = DIV_ROUND_UP(rate, 8 * ddata->frequency) - 1;
- if (clkdiv >= 0x200) {
- dev_err(&pdev->dev,
- "input clock too fast (%lu) to divide down to bus freq (%lu)",
- rate, ddata->frequency);
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- dev_dbg(&pdev->dev, "input clock = %lu, bus freq = %lu, clkdiv = %lu\n",
- rate, ddata->frequency, (unsigned long)clkdiv);
- efm32_i2c_write32(ddata, REG_CLKDIV, REG_CLKDIV_DIV(clkdiv));
-
- efm32_i2c_write32(ddata, REG_ROUTE, REG_ROUTE_SDAPEN |
- REG_ROUTE_SCLPEN |
- REG_ROUTE_LOCATION(ddata->location));
-
- efm32_i2c_write32(ddata, REG_CTRL, REG_CTRL_EN |
- REG_CTRL_BITO_160PCC | 0 * REG_CTRL_GIBITO);
-
- efm32_i2c_write32(ddata, REG_IFC, REG_IFC__MASK);
- efm32_i2c_write32(ddata, REG_IEN, REG_IF_TXC | REG_IF_ACK | REG_IF_NACK
- | REG_IF_ARBLOST | REG_IF_BUSERR | REG_IF_RXDATAV);
-
- /* to make bus idle */
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ABORT);
-
- ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
- goto err_disable_clk;
- }
-
- ret = i2c_add_adapter(&ddata->adapter);
- if (ret) {
- free_irq(ddata->irq, ddata);
-
-err_disable_clk:
- clk_disable_unprepare(ddata->clk);
- }
- return ret;
-}
-
-static int efm32_i2c_remove(struct platform_device *pdev)
-{
- struct efm32_i2c_ddata *ddata = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&ddata->adapter);
- free_irq(ddata->irq, ddata);
- clk_disable_unprepare(ddata->clk);
-
- return 0;
-}
-
-static const struct of_device_id efm32_i2c_dt_ids[] = {
- {
- .compatible = "energymicro,efm32-i2c",
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, efm32_i2c_dt_ids);
-
-static struct platform_driver efm32_i2c_driver = {
- .probe = efm32_i2c_probe,
- .remove = efm32_i2c_remove,
-
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = efm32_i2c_dt_ids,
- },
-};
-module_platform_driver(efm32_i2c_driver);
-
-MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
-MODULE_DESCRIPTION("EFM32 i2c driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 140426db28df..b0f50dce9d0f 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -49,7 +49,7 @@ static int mmapped;
static wait_queue_head_t pcf_wait;
static int pcf_pending;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
static struct i2c_adapter pcf_isa_ops;
@@ -132,7 +132,6 @@ static irqreturn_t pcf_isa_handler(int this_irq, void *dev_id) {
static int pcf_isa_init(void)
{
- spin_lock_init(&lock);
if (!mmapped) {
if (!request_region(base, 2, pcf_isa_ops.name)) {
printk(KERN_ERR "%s: requested I/O region (%#x:2) is "
@@ -282,7 +281,7 @@ static int elektor_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int elektor_remove(struct device *dev, unsigned int id)
+static void elektor_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pcf_isa_ops);
@@ -298,8 +297,6 @@ static int elektor_remove(struct device *dev, unsigned int id)
iounmap(base_iomem);
release_mem_region(base, 2);
}
-
- return 0;
}
static struct isa_driver i2c_elektor_driver = {
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 20a9881a0d6c..5ac30d95650c 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -606,6 +606,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
u32 i2c_ctl;
u32 int_en = 0;
u32 i2c_auto_conf = 0;
+ u32 i2c_addr = 0;
u32 fifo_ctl;
unsigned long flags;
unsigned short trig_lvl;
@@ -640,7 +641,12 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
}
- writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
+ i2c_addr = HSI2C_SLV_ADDR_MAS(i2c->msg->addr);
+
+ if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ)
+ i2c_addr |= HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr));
+
+ writel(i2c_addr, i2c->regs + HSI2C_ADDR);
writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
writel(i2c_ctl, i2c->regs + HSI2C_CTL);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index a4a6825c8758..7a048abbf92b 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -520,5 +520,5 @@ module_exit(i2c_gpio_exit);
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:i2c-gpio");
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index ab15b1ec2ab3..c45f226c2b85 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -413,10 +413,8 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n");
+ if (irq <= 0)
return irq;
- }
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..4acee6f9e5a3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
* Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes
+ * Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -228,6 +229,7 @@
#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1084,6 +1086,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
{ 0, }
};
@@ -1433,7 +1436,7 @@ static int i801_add_mux(struct i801_priv *priv)
const struct i801_mux_config *mux_config;
struct i2c_mux_gpio_platform_data gpio_data;
struct gpiod_lookup_table *lookup;
- int err, i;
+ int i;
if (!priv->mux_drvdata)
return 0;
@@ -1449,7 +1452,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
@@ -1473,22 +1476,17 @@ static int i801_add_mux(struct i801_priv *priv)
PLATFORM_DEVID_NONE, &gpio_data,
sizeof(struct i2c_mux_gpio_platform_data));
if (IS_ERR(priv->mux_pdev)) {
- err = PTR_ERR(priv->mux_pdev);
gpiod_remove_lookup_table(lookup);
- priv->mux_pdev = NULL;
dev_err(dev, "Failed to register i2c-mux-gpio device\n");
- return err;
}
- return 0;
+ return PTR_ERR_OR_ZERO(priv->mux_pdev);
}
static void i801_del_mux(struct i801_priv *priv)
{
- if (priv->mux_pdev)
- platform_device_unregister(priv->mux_pdev);
- if (priv->lookup)
- gpiod_remove_lookup_table(priv->lookup);
+ platform_device_unregister(priv->mux_pdev);
+ gpiod_remove_lookup_table(priv->lookup);
}
static unsigned int i801_get_adapter_class(struct i801_priv *priv)
@@ -1772,6 +1770,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b444fbf1a262..b80fdc1f0092 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -209,6 +209,7 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
struct i2c_client *slave;
+ enum i2c_slave_event last_slave_event;
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
@@ -241,6 +242,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
};
+static const struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -537,7 +551,7 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
i2c_imx->cur_clk = i2c_clk_rate;
- div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
+ div = DIV_ROUND_UP(i2c_clk_rate, i2c_imx->bitrate);
if (div < i2c_clk_div[0].div)
i = 0;
else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
@@ -555,8 +569,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
* This delay is used in I2C bus disable function
* to fix chip hardware bug.
*/
- i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
- + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
+ i2c_imx->disable_delay = DIV_ROUND_UP(500000U * i2c_clk_div[i].div,
+ i2c_clk_rate / 2);
#ifdef CONFIG_I2C_DEBUG_BUS
dev_dbg(&i2c_imx->adapter.dev, "I2C_CLK=%d, REQ DIV=%d\n",
@@ -662,6 +676,36 @@ static void i2c_imx_enable_bus_idle(struct imx_i2c_struct *i2c_imx)
}
}
+static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx,
+ enum i2c_slave_event event, u8 *val)
+{
+ i2c_slave_event(i2c_imx->slave, event, val);
+ i2c_imx->last_slave_event = event;
+}
+
+static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
+{
+ u8 val;
+
+ while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) {
+ switch (i2c_imx->last_slave_event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ i2c_imx_slave_event(i2c_imx, I2C_SLAVE_READ_PROCESSED,
+ &val);
+ break;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ i2c_imx_slave_event(i2c_imx, I2C_SLAVE_STOP, &val);
+ break;
+
+ case I2C_SLAVE_STOP:
+ break;
+ }
+ }
+}
+
static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
unsigned int status, unsigned int ctl)
{
@@ -674,9 +718,11 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
}
if (status & I2SR_IAAS) { /* Addressed as a slave */
+ i2c_imx_slave_finish_op(i2c_imx);
if (status & I2SR_SRW) { /* Master wants to read from us*/
dev_dbg(&i2c_imx->adapter.dev, "read requested");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_REQUESTED, &value);
/* Slave transmit */
ctl |= I2CR_MTX;
@@ -686,7 +732,8 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
} else { /* Master wants to write to us */
dev_dbg(&i2c_imx->adapter.dev, "write requested");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
/* Slave receive */
ctl &= ~I2CR_MTX;
@@ -697,17 +744,20 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
} else if (!(ctl & I2CR_MTX)) { /* Receive mode */
if (status & I2SR_IBB) { /* No STOP signal detected */
value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
} else { /* STOP signal is detected */
dev_dbg(&i2c_imx->adapter.dev,
"STOP signal detected");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_STOP, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_STOP, &value);
}
} else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */
ctl |= I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
} else { /* Transmit mode received NAK */
@@ -748,6 +798,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
return -EBUSY;
i2c_imx->slave = client;
+ i2c_imx->last_slave_event = I2C_SLAVE_STOP;
/* Resume */
ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
@@ -800,10 +851,17 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+
if (status & I2SR_IIF) {
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
- if (i2c_imx->slave && !(ctl & I2CR_MSTA))
- return i2c_imx_slave_isr(i2c_imx, status, ctl);
+ if (i2c_imx->slave) {
+ if (!(ctl & I2CR_MSTA)) {
+ return i2c_imx_slave_isr(i2c_imx, status, ctl);
+ } else if (i2c_imx->last_slave_event !=
+ I2C_SLAVE_STOP) {
+ i2c_imx_slave_finish_op(i2c_imx);
+ }
+ }
return i2c_imx_master_isr(i2c_imx, status);
}
@@ -1330,7 +1388,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
match = device_get_match_data(&pdev->dev);
- i2c_imx->hwdata = match;
+ if (match)
+ i2c_imx->hwdata = match;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1560,7 @@ static struct platform_driver i2c_imx_driver = {
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index cb4a25ebb890..8509c5f11356 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -437,9 +437,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
unsigned short intst;
unsigned short intmsk;
struct jz4780_i2c *i2c = dev_id;
- unsigned long flags;
- spin_lock_irqsave(&i2c->lock, flags);
+ spin_lock(&i2c->lock);
intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
intst = jz4780_i2c_readw(i2c, JZ4780_I2C_INTST);
@@ -551,7 +550,7 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
}
done:
- spin_unlock_irqrestore(&i2c->lock, flags);
+ spin_unlock(&i2c->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 71d7bae2cbca..4e0b7c2882ce 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -1,34 +1,8 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ * Mellanox i2c driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/delay.h>
@@ -37,7 +11,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
/* General defines */
#define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR 0x2000
@@ -51,7 +27,7 @@
#define MLXCPLD_I2C_MAX_ADDR_LEN 4
#define MLXCPLD_I2C_RETR_NUM 2
#define MLXCPLD_I2C_XFER_TO 500000 /* usec */
-#define MLXCPLD_I2C_POLL_TIME 2000 /* usec */
+#define MLXCPLD_I2C_POLL_TIME 400 /* usec */
/* LPC I2C registers */
#define MLXCPLD_LPCI2C_CPBLTY_REG 0x0
@@ -72,6 +48,16 @@
#define MLXCPLD_LPCI2C_ACK_IND 1
#define MLXCPLD_LPCI2C_NACK_IND 2
+#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
+#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
+
+enum mlxcpld_i2c_frequency {
+ MLXCPLD_I2C_FREQ_1000KHZ = 1,
+ MLXCPLD_I2C_FREQ_400KHZ = 2,
+ MLXCPLD_I2C_FREQ_100KHZ = 3,
+};
+
struct mlxcpld_i2c_curr_xfer {
u8 cmd;
u8 addr_width;
@@ -489,8 +475,45 @@ static struct i2c_adapter mlxcpld_i2c_adapter = {
.nr = MLXCPLD_I2C_BUS_NUM,
};
+static int
+mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
+ struct mlxreg_core_hotplug_platform_data *pdata)
+{
+ struct mlxreg_core_item *item = pdata->items;
+ struct mlxreg_core_data *data;
+ u32 regval;
+ u8 freq;
+ int err;
+
+ if (!item)
+ return 0;
+
+ /* Read frequency setting. */
+ data = item->data;
+ err = regmap_read(pdata->regmap, data->reg, &regval);
+ if (err)
+ return err;
+
+ /* Set frequency only if it is not 100KHz, which is default. */
+ switch ((data->reg & data->mask) >> data->bit) {
+ case MLXCPLD_I2C_FREQ_1000KHZ:
+ freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
+ break;
+ case MLXCPLD_I2C_FREQ_400KHZ:
+ freq = MLXCPLD_I2C_FREQ_400KHZ_SET;
+ break;
+ default:
+ return 0;
+ }
+
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_HALF_CYC_REG, &freq, 1);
+
+ return 0;
+}
+
static int mlxcpld_i2c_probe(struct platform_device *pdev)
{
+ struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxcpld_i2c_priv *priv;
int err;
u8 val;
@@ -505,6 +528,14 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
+ /* Set I2C bus frequency if platform data provides this info. */
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ err = mlxcpld_i2c_set_frequency(priv, pdata);
+ if (err)
+ goto mlxcpld_i2_probe_failed;
+ }
+
/* Register with i2c layer */
mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
/* Read capability register */
@@ -523,8 +554,12 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
err = i2c_add_numbered_adapter(&priv->adap);
if (err)
- mutex_destroy(&priv->lock);
+ goto mlxcpld_i2_probe_failed;
+ return 0;
+
+mlxcpld_i2_probe_failed:
+ mutex_destroy(&priv->lock);
return err;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..2ffd2f354d0a 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
@@ -1258,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
mtk_i2c_clock_disable(i2c);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ I2C_DRV_NAME, i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
@@ -1285,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+ struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
{
int ret;
struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1300,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev)
mtk_i2c_clock_disable(i2c);
+ i2c_mark_adapter_resumed(&i2c->adap);
+
return 0;
}
#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5cfe70aedced..c590d36b5fd1 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -18,6 +18,7 @@
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -717,6 +718,10 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
int rc, ret = num;
+ rc = pm_runtime_resume_and_get(&adap->dev);
+ if (rc)
+ return rc;
+
BUG_ON(drv_data->msgs != NULL);
drv_data->msgs = msgs;
drv_data->num_msgs = num;
@@ -732,6 +737,9 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
drv_data->num_msgs = 0;
drv_data->msgs = NULL;
+ pm_runtime_mark_last_busy(&adap->dev);
+ pm_runtime_put_autosuspend(&adap->dev);
+
return ret;
}
@@ -805,7 +813,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
* need to know tclk in order to calculate bus clock
* factors.
*/
- if (IS_ERR(drv_data->clk)) {
+ if (!drv_data->clk) {
rc = -ENODEV;
goto out;
}
@@ -828,7 +836,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
rc = PTR_ERR(drv_data->rstc);
goto out;
}
- reset_control_deassert(drv_data->rstc);
/* Its not yet defined how timeouts will be specified in device tree.
* So hard code the value to 1 second.
@@ -894,6 +901,32 @@ static int mv64xxx_i2c_init_recovery_info(struct mv64xxx_i2c_data *drv_data,
}
static int
+mv64xxx_i2c_runtime_suspend(struct device *dev)
+{
+ struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
+
+ reset_control_assert(drv_data->rstc);
+ clk_disable_unprepare(drv_data->reg_clk);
+ clk_disable_unprepare(drv_data->clk);
+
+ return 0;
+}
+
+static int
+mv64xxx_i2c_runtime_resume(struct device *dev)
+{
+ struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(drv_data->clk);
+ clk_prepare_enable(drv_data->reg_clk);
+ reset_control_reset(drv_data->rstc);
+
+ mv64xxx_i2c_hw_init(drv_data);
+
+ return 0;
+}
+
+static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
@@ -920,18 +953,22 @@ mv64xxx_i2c_probe(struct platform_device *pd)
/* Not all platforms have clocks */
drv_data->clk = devm_clk_get(&pd->dev, NULL);
- if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR(drv_data->clk))
- clk_prepare_enable(drv_data->clk);
+ if (IS_ERR(drv_data->clk)) {
+ if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ drv_data->clk = NULL;
+ }
drv_data->reg_clk = devm_clk_get(&pd->dev, "reg");
- if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR(drv_data->reg_clk))
- clk_prepare_enable(drv_data->reg_clk);
+ if (IS_ERR(drv_data->reg_clk)) {
+ if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ drv_data->reg_clk = NULL;
+ }
drv_data->irq = platform_get_irq(pd, 0);
+ if (drv_data->irq < 0)
+ return drv_data->irq;
if (pdata) {
drv_data->freq_m = pdata->freq_m;
@@ -942,16 +979,12 @@ mv64xxx_i2c_probe(struct platform_device *pd)
} else if (pd->dev.of_node) {
rc = mv64xxx_of_config(drv_data, &pd->dev);
if (rc)
- goto exit_clk;
- }
- if (drv_data->irq < 0) {
- rc = drv_data->irq;
- goto exit_reset;
+ return rc;
}
rc = mv64xxx_i2c_init_recovery_info(drv_data, &pd->dev);
if (rc == -EPROBE_DEFER)
- goto exit_reset;
+ return rc;
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
@@ -962,7 +995,14 @@ mv64xxx_i2c_probe(struct platform_device *pd)
platform_set_drvdata(pd, drv_data);
i2c_set_adapdata(&drv_data->adapter, drv_data);
- mv64xxx_i2c_hw_init(drv_data);
+ pm_runtime_set_autosuspend_delay(&pd->dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(&pd->dev);
+ pm_runtime_enable(&pd->dev);
+ if (!pm_runtime_enabled(&pd->dev)) {
+ rc = mv64xxx_i2c_runtime_resume(&pd->dev);
+ if (rc)
+ goto exit_disable_pm;
+ }
rc = request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
MV64XXX_I2C_CTLR_NAME, drv_data);
@@ -970,7 +1010,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't register intr handler irq%d: %d\n",
drv_data->irq, rc);
- goto exit_reset;
+ goto exit_disable_pm;
} else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) {
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't add i2c adapter, rc: %d\n", -rc);
@@ -981,54 +1021,50 @@ mv64xxx_i2c_probe(struct platform_device *pd)
exit_free_irq:
free_irq(drv_data->irq, drv_data);
-exit_reset:
- reset_control_assert(drv_data->rstc);
-exit_clk:
- clk_disable_unprepare(drv_data->reg_clk);
- clk_disable_unprepare(drv_data->clk);
+exit_disable_pm:
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
return rc;
}
static int
-mv64xxx_i2c_remove(struct platform_device *dev)
+mv64xxx_i2c_remove(struct platform_device *pd)
{
- struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
+ struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pd);
i2c_del_adapter(&drv_data->adapter);
free_irq(drv_data->irq, drv_data);
- reset_control_assert(drv_data->rstc);
- clk_disable_unprepare(drv_data->reg_clk);
- clk_disable_unprepare(drv_data->clk);
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int mv64xxx_i2c_resume(struct device *dev)
+static void
+mv64xxx_i2c_shutdown(struct platform_device *pd)
{
- struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
-
- mv64xxx_i2c_hw_init(drv_data);
-
- return 0;
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
}
-static const struct dev_pm_ops mv64xxx_i2c_pm = {
- .resume = mv64xxx_i2c_resume,
+static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(mv64xxx_i2c_runtime_suspend,
+ mv64xxx_i2c_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#define mv64xxx_i2c_pm_ops (&mv64xxx_i2c_pm)
-#else
-#define mv64xxx_i2c_pm_ops NULL
-#endif
-
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
.remove = mv64xxx_i2c_remove,
+ .shutdown = mv64xxx_i2c_shutdown,
.driver = {
.name = MV64XXX_I2C_CTLR_NAME,
- .pm = mv64xxx_i2c_pm_ops,
+ .pm = &mv64xxx_i2c_pm_ops,
.of_match_table = mv64xxx_i2c_of_match_table,
},
};
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index d4b1b0865f67..a3363b20f168 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1055,7 +1055,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
}
-static int nmk_i2c_remove(struct amba_device *adev)
+static void nmk_i2c_remove(struct amba_device *adev)
{
struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
@@ -1068,8 +1068,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..845eda70b8ca 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f27bc1e55385..85e8cf58e8bf 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -161,7 +161,7 @@ static int pca_isa_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int pca_isa_remove(struct device *dev, unsigned int id)
+static void pca_isa_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pca_isa_ops);
@@ -170,8 +170,6 @@ static int pca_isa_remove(struct device *dev, unsigned int id)
free_irq(irq, &pca_isa_ops);
}
release_region(base, IO_SIZE);
-
- return 0;
}
static struct isa_driver pca_isa_driver = {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 046d241183c5..214b4c913a13 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -86,6 +86,9 @@ struct geni_i2c_dev {
u32 clk_freq_out;
const struct geni_i2c_clk_fld *clk_fld;
int suspended;
+ void *dma_buf;
+ size_t xfer_len;
+ dma_addr_t dma_addr;
};
struct geni_i2c_err_log {
@@ -348,14 +351,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
}
+static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
+ struct i2c_msg *cur)
+{
+ gi2c->cur_rd = 0;
+ if (gi2c->dma_buf) {
+ if (gi2c->err)
+ geni_i2c_rx_fsm_rst(gi2c);
+ geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
+ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
+ }
+}
+
+static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
+ struct i2c_msg *cur)
+{
+ gi2c->cur_wr = 0;
+ if (gi2c->dma_buf) {
+ if (gi2c->err)
+ geni_i2c_tx_fsm_rst(gi2c);
+ geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
+ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
+ }
+}
+
static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
u32 m_param)
{
- dma_addr_t rx_dma;
+ dma_addr_t rx_dma = 0;
unsigned long time_left;
void *dma_buf;
struct geni_se *se = &gi2c->se;
size_t len = msg->len;
+ struct i2c_msg *cur;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
@@ -370,19 +398,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
dma_buf = NULL;
+ } else {
+ gi2c->xfer_len = len;
+ gi2c->dma_addr = rx_dma;
+ gi2c->dma_buf = dma_buf;
}
+ cur = gi2c->cur;
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
- gi2c->cur_rd = 0;
- if (dma_buf) {
- if (gi2c->err)
- geni_i2c_rx_fsm_rst(gi2c);
- geni_se_rx_dma_unprep(se, rx_dma, len);
- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
- }
+ geni_i2c_rx_msg_cleanup(gi2c, cur);
return gi2c->err;
}
@@ -390,11 +417,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
u32 m_param)
{
- dma_addr_t tx_dma;
+ dma_addr_t tx_dma = 0;
unsigned long time_left;
void *dma_buf;
struct geni_se *se = &gi2c->se;
size_t len = msg->len;
+ struct i2c_msg *cur;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
@@ -409,22 +437,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
dma_buf = NULL;
+ } else {
+ gi2c->xfer_len = len;
+ gi2c->dma_addr = tx_dma;
+ gi2c->dma_buf = dma_buf;
}
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+ cur = gi2c->cur;
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
- gi2c->cur_wr = 0;
- if (dma_buf) {
- if (gi2c->err)
- geni_i2c_tx_fsm_rst(gi2c);
- geni_se_tx_dma_unprep(se, tx_dma, len);
- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
- }
+ geni_i2c_tx_msg_cleanup(gi2c, cur);
return gi2c->err;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 5a47915869ae..61dc20fd1191 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1603,7 +1603,7 @@ out:
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK);
}
static const struct i2c_algorithm qup_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 217def2d7cb4..12f6d452c0f7 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -91,7 +91,6 @@
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
-#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
@@ -120,6 +119,7 @@ enum rcar_i2c_type {
};
struct rcar_i2c_priv {
+ u32 flags;
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -130,7 +130,6 @@ struct rcar_i2c_priv {
int pos;
u32 icccr;
- u32 flags;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -621,27 +620,16 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
/*
* This driver has a lock-free design because there are IP cores (at least
* R-Car Gen2) which have an inherent race condition in their hardware design.
- * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
+ * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after
* the interrupt was generated, otherwise an unwanted repeated message gets
* generated. It turned out that taking a spinlock at the beginning of the ISR
* was already causing repeated messages. Thus, this driver was converted to
* the now lockless behaviour. Please keep this in mind when hacking the driver.
+ * R-Car Gen3 seems to have this fixed but earlier versions than R-Car Gen2 are
+ * likely affected. Therefore, we have different interrupt handler entries.
*/
-static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
+static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
{
- struct rcar_i2c_priv *priv = ptr;
- u32 msr, val;
-
- /* Clear START or STOP immediately, except for REPSTART after read */
- if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
- val = rcar_i2c_read(priv, ICMCR);
- rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
- }
-
- msr = rcar_i2c_read(priv, ICMSR);
-
- /* Only handle interrupts that are currently enabled */
- msr &= rcar_i2c_read(priv, ICMIER);
if (!msr) {
if (rcar_i2c_slave_irq(priv))
return IRQ_HANDLED;
@@ -685,6 +673,41 @@ out:
return IRQ_HANDLED;
}
+static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
+{
+ struct rcar_i2c_priv *priv = ptr;
+ u32 msr;
+
+ /* Clear START or STOP immediately, except for REPSTART after read */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+
+ /* Only handle interrupts that are currently enabled */
+ msr = rcar_i2c_read(priv, ICMSR);
+ msr &= rcar_i2c_read(priv, ICMIER);
+
+ return rcar_i2c_irq(irq, priv, msr);
+}
+
+static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
+{
+ struct rcar_i2c_priv *priv = ptr;
+ u32 msr;
+
+ /* Only handle interrupts that are currently enabled */
+ msr = rcar_i2c_read(priv, ICMSR);
+ msr &= rcar_i2c_read(priv, ICMIER);
+
+ /*
+ * Clear START or STOP immediately, except for REPSTART after read or
+ * if a spurious interrupt was detected.
+ */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+
+ return rcar_i2c_irq(irq, priv, msr);
+}
+
static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir,
dma_addr_t port_addr)
@@ -931,6 +954,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
+ unsigned long irqflags = 0;
+ irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq;
int ret;
/* Otherwise logic will break because some bytes must always use PIO */
@@ -979,6 +1004,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ irqflags |= IRQF_NO_THREAD;
+ irqhandler = rcar_i2c_gen2_irq;
+ }
+
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
@@ -998,7 +1028,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
priv->flags |= ID_P_HOST_NOTIFY;
priv->irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
goto out_pm_disable;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3eafe0eb3e4c..62a903fbe912 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -781,7 +781,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL_ALL | I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING;
}
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
deleted file mode 100644
index 30db8fafe078..000000000000
--- a/drivers/i2c/busses/i2c-sirf.c
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * I2C bus driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#define SIRFSOC_I2C_CLK_CTRL 0x00
-#define SIRFSOC_I2C_STATUS 0x0C
-#define SIRFSOC_I2C_CTRL 0x10
-#define SIRFSOC_I2C_IO_CTRL 0x14
-#define SIRFSOC_I2C_SDA_DELAY 0x18
-#define SIRFSOC_I2C_CMD_START 0x1C
-#define SIRFSOC_I2C_CMD_BUF 0x30
-#define SIRFSOC_I2C_DATA_BUF 0x80
-
-#define SIRFSOC_I2C_CMD_BUF_MAX 16
-#define SIRFSOC_I2C_DATA_BUF_MAX 16
-
-#define SIRFSOC_I2C_CMD(x) (SIRFSOC_I2C_CMD_BUF + (x)*0x04)
-#define SIRFSOC_I2C_DATA_MASK(x) (0xFF<<(((x)&3)*8))
-#define SIRFSOC_I2C_DATA_SHIFT(x) (((x)&3)*8)
-
-#define SIRFSOC_I2C_DIV_MASK (0xFFFF)
-
-/* I2C status flags */
-#define SIRFSOC_I2C_STAT_BUSY BIT(0)
-#define SIRFSOC_I2C_STAT_TIP BIT(1)
-#define SIRFSOC_I2C_STAT_NACK BIT(2)
-#define SIRFSOC_I2C_STAT_TR_INT BIT(4)
-#define SIRFSOC_I2C_STAT_STOP BIT(6)
-#define SIRFSOC_I2C_STAT_CMD_DONE BIT(8)
-#define SIRFSOC_I2C_STAT_ERR BIT(9)
-#define SIRFSOC_I2C_CMD_INDEX (0x1F<<16)
-
-/* I2C control flags */
-#define SIRFSOC_I2C_RESET BIT(0)
-#define SIRFSOC_I2C_CORE_EN BIT(1)
-#define SIRFSOC_I2C_MASTER_MODE BIT(2)
-#define SIRFSOC_I2C_CMD_DONE_EN BIT(11)
-#define SIRFSOC_I2C_ERR_INT_EN BIT(12)
-
-#define SIRFSOC_I2C_SDA_DELAY_MASK (0xFF)
-#define SIRFSOC_I2C_SCLF_FILTER (3<<8)
-
-#define SIRFSOC_I2C_START_CMD BIT(0)
-
-#define SIRFSOC_I2C_CMD_RP(x) ((x)&0x7)
-#define SIRFSOC_I2C_NACK BIT(3)
-#define SIRFSOC_I2C_WRITE BIT(4)
-#define SIRFSOC_I2C_READ BIT(5)
-#define SIRFSOC_I2C_STOP BIT(6)
-#define SIRFSOC_I2C_START BIT(7)
-
-#define SIRFSOC_I2C_ERR_NOACK 1
-#define SIRFSOC_I2C_ERR_TIMEOUT 2
-
-struct sirfsoc_i2c {
- void __iomem *base;
- struct clk *clk;
- u32 cmd_ptr; /* Current position in CMD buffer */
- u8 *buf; /* Buffer passed by user */
- u32 msg_len; /* Message length */
- u32 finished_len; /* number of bytes read/written */
- u32 read_cmd_len; /* number of read cmd sent */
- int msg_read; /* 1 indicates a read message */
- int err_status; /* 1 indicates an error on bus */
-
- u32 sda_delay; /* For suspend/resume */
- u32 clk_div;
- int last; /* Last message in transfer, STOP cmd can be sent */
-
- struct completion done; /* indicates completion of message transfer */
- struct i2c_adapter adapter;
-};
-
-static void i2c_sirfsoc_read_data(struct sirfsoc_i2c *siic)
-{
- u32 data = 0;
- int i;
-
- for (i = 0; i < siic->read_cmd_len; i++) {
- if (!(i & 0x3))
- data = readl(siic->base + SIRFSOC_I2C_DATA_BUF + i);
- siic->buf[siic->finished_len++] =
- (u8)((data & SIRFSOC_I2C_DATA_MASK(i)) >>
- SIRFSOC_I2C_DATA_SHIFT(i));
- }
-}
-
-static void i2c_sirfsoc_queue_cmd(struct sirfsoc_i2c *siic)
-{
- u32 regval;
- int i = 0;
-
- if (siic->msg_read) {
- while (((siic->finished_len + i) < siic->msg_len)
- && (siic->cmd_ptr < SIRFSOC_I2C_CMD_BUF_MAX)) {
- regval = SIRFSOC_I2C_READ | SIRFSOC_I2C_CMD_RP(0);
- if (((siic->finished_len + i) ==
- (siic->msg_len - 1)) && siic->last)
- regval |= SIRFSOC_I2C_STOP | SIRFSOC_I2C_NACK;
- writel(regval,
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- i++;
- }
-
- siic->read_cmd_len = i;
- } else {
- while ((siic->cmd_ptr < SIRFSOC_I2C_CMD_BUF_MAX - 1)
- && (siic->finished_len < siic->msg_len)) {
- regval = SIRFSOC_I2C_WRITE | SIRFSOC_I2C_CMD_RP(0);
- if ((siic->finished_len == (siic->msg_len - 1))
- && siic->last)
- regval |= SIRFSOC_I2C_STOP;
- writel(regval,
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- writel(siic->buf[siic->finished_len++],
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- }
- }
- siic->cmd_ptr = 0;
-
- /* Trigger the transfer */
- writel(SIRFSOC_I2C_START_CMD, siic->base + SIRFSOC_I2C_CMD_START);
-}
-
-static irqreturn_t i2c_sirfsoc_irq(int irq, void *dev_id)
-{
- struct sirfsoc_i2c *siic = (struct sirfsoc_i2c *)dev_id;
- u32 i2c_stat = readl(siic->base + SIRFSOC_I2C_STATUS);
-
- if (i2c_stat & SIRFSOC_I2C_STAT_ERR) {
- /* Error conditions */
- siic->err_status = SIRFSOC_I2C_ERR_NOACK;
- writel(SIRFSOC_I2C_STAT_ERR, siic->base + SIRFSOC_I2C_STATUS);
-
- if (i2c_stat & SIRFSOC_I2C_STAT_NACK)
- dev_dbg(&siic->adapter.dev, "ACK not received\n");
- else
- dev_err(&siic->adapter.dev, "I2C error\n");
-
- /*
- * Due to hardware ANOMALY, we need to reset I2C earlier after
- * we get NOACK while accessing non-existing clients, otherwise
- * we will get errors even we access existing clients later
- */
- writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
- siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
-
- complete(&siic->done);
- } else if (i2c_stat & SIRFSOC_I2C_STAT_CMD_DONE) {
- /* CMD buffer execution complete */
- if (siic->msg_read)
- i2c_sirfsoc_read_data(siic);
- if (siic->finished_len == siic->msg_len)
- complete(&siic->done);
- else /* Fill a new CMD buffer for left data */
- i2c_sirfsoc_queue_cmd(siic);
-
- writel(SIRFSOC_I2C_STAT_CMD_DONE, siic->base + SIRFSOC_I2C_STATUS);
- }
-
- return IRQ_HANDLED;
-}
-
-static void i2c_sirfsoc_set_address(struct sirfsoc_i2c *siic,
- struct i2c_msg *msg)
-{
- unsigned char addr;
- u32 regval = SIRFSOC_I2C_START | SIRFSOC_I2C_CMD_RP(0) | SIRFSOC_I2C_WRITE;
-
- /* no data and last message -> add STOP */
- if (siic->last && (msg->len == 0))
- regval |= SIRFSOC_I2C_STOP;
-
- writel(regval, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
-
- addr = i2c_8bit_addr_from_msg(msg);
-
- /* Reverse direction bit */
- if (msg->flags & I2C_M_REV_DIR_ADDR)
- addr ^= 1;
-
- writel(addr, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
-}
-
-static int i2c_sirfsoc_xfer_msg(struct sirfsoc_i2c *siic, struct i2c_msg *msg)
-{
- u32 regval = readl(siic->base + SIRFSOC_I2C_CTRL);
- /* timeout waiting for the xfer to finish or fail */
- int timeout = msecs_to_jiffies((msg->len + 1) * 50);
-
- i2c_sirfsoc_set_address(siic, msg);
-
- writel(regval | SIRFSOC_I2C_CMD_DONE_EN | SIRFSOC_I2C_ERR_INT_EN,
- siic->base + SIRFSOC_I2C_CTRL);
- i2c_sirfsoc_queue_cmd(siic);
-
- if (wait_for_completion_timeout(&siic->done, timeout) == 0) {
- siic->err_status = SIRFSOC_I2C_ERR_TIMEOUT;
- dev_err(&siic->adapter.dev, "Transfer timeout\n");
- }
-
- writel(regval & ~(SIRFSOC_I2C_CMD_DONE_EN | SIRFSOC_I2C_ERR_INT_EN),
- siic->base + SIRFSOC_I2C_CTRL);
- writel(0, siic->base + SIRFSOC_I2C_CMD_START);
-
- /* i2c control doesn't response, reset it */
- if (siic->err_status == SIRFSOC_I2C_ERR_TIMEOUT) {
- writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
- siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- }
- return siic->err_status ? -EAGAIN : 0;
-}
-
-static u32 i2c_sirfsoc_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static int i2c_sirfsoc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct sirfsoc_i2c *siic = adap->algo_data;
- int i, ret;
-
- clk_enable(siic->clk);
-
- for (i = 0; i < num; i++) {
- siic->buf = msgs[i].buf;
- siic->msg_len = msgs[i].len;
- siic->msg_read = !!(msgs[i].flags & I2C_M_RD);
- siic->err_status = 0;
- siic->cmd_ptr = 0;
- siic->finished_len = 0;
- siic->last = (i == (num - 1));
-
- ret = i2c_sirfsoc_xfer_msg(siic, &msgs[i]);
- if (ret) {
- clk_disable(siic->clk);
- return ret;
- }
- }
-
- clk_disable(siic->clk);
- return num;
-}
-
-/* I2C algorithms associated with this master controller driver */
-static const struct i2c_algorithm i2c_sirfsoc_algo = {
- .master_xfer = i2c_sirfsoc_xfer,
- .functionality = i2c_sirfsoc_func,
-};
-
-static int i2c_sirfsoc_probe(struct platform_device *pdev)
-{
- struct sirfsoc_i2c *siic;
- struct i2c_adapter *adap;
- struct clk *clk;
- int bitrate;
- int ctrl_speed;
- int irq;
-
- int err;
- u32 regval;
-
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(&pdev->dev, "Clock get failed\n");
- goto err_get_clk;
- }
-
- err = clk_prepare(clk);
- if (err) {
- dev_err(&pdev->dev, "Clock prepare failed\n");
- goto err_clk_prep;
- }
-
- err = clk_enable(clk);
- if (err) {
- dev_err(&pdev->dev, "Clock enable failed\n");
- goto err_clk_en;
- }
-
- ctrl_speed = clk_get_rate(clk);
-
- siic = devm_kzalloc(&pdev->dev, sizeof(*siic), GFP_KERNEL);
- if (!siic) {
- err = -ENOMEM;
- goto out;
- }
- adap = &siic->adapter;
- adap->class = I2C_CLASS_DEPRECATED;
-
- siic->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(siic->base)) {
- err = PTR_ERR(siic->base);
- goto out;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
- err = devm_request_irq(&pdev->dev, irq, i2c_sirfsoc_irq, 0,
- dev_name(&pdev->dev), siic);
- if (err)
- goto out;
-
- adap->algo = &i2c_sirfsoc_algo;
- adap->algo_data = siic;
- adap->retries = 3;
-
- adap->dev.of_node = pdev->dev.of_node;
- adap->dev.parent = &pdev->dev;
- adap->nr = pdev->id;
-
- strlcpy(adap->name, "sirfsoc-i2c", sizeof(adap->name));
-
- platform_set_drvdata(pdev, adap);
- init_completion(&siic->done);
-
- /* Controller initialisation */
-
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- writel(SIRFSOC_I2C_CORE_EN | SIRFSOC_I2C_MASTER_MODE,
- siic->base + SIRFSOC_I2C_CTRL);
-
- siic->clk = clk;
-
- err = of_property_read_u32(pdev->dev.of_node,
- "clock-frequency", &bitrate);
- if (err < 0)
- bitrate = I2C_MAX_STANDARD_MODE_FREQ;
-
- /*
- * Due to some hardware design issues, we need to tune the formula.
- * Since i2c is open drain interface that allows the slave to
- * stall the transaction by holding the SCL line at '0', the RTL
- * implementation is waiting for SCL feedback from the pin after
- * setting it to High-Z ('1'). This wait adds to the high-time
- * interval counter few cycles of the input synchronization
- * (depending on the SCL_FILTER_REG field), and also the time it
- * takes for the board pull-up resistor to rise the SCL line.
- * For slow SCL settings these additions are negligible,
- * but they start to affect the speed when clock is set to faster
- * frequencies.
- * Through the actual tests, use the different user_div value(which
- * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
- * the different ranges of i2c bus clock frequency, to make the SCL
- * more accurate.
- */
- if (bitrate <= 30000)
- regval = ctrl_speed / (bitrate * 5);
- else if (bitrate > 30000 && bitrate <= 280000)
- regval = (2 * ctrl_speed) / (bitrate * 11);
- else
- regval = ctrl_speed / (bitrate * 6);
-
- writel(regval, siic->base + SIRFSOC_I2C_CLK_CTRL);
- if (regval > 0xFF)
- writel(0xFF, siic->base + SIRFSOC_I2C_SDA_DELAY);
- else
- writel(regval, siic->base + SIRFSOC_I2C_SDA_DELAY);
-
- err = i2c_add_numbered_adapter(adap);
- if (err < 0)
- goto out;
-
- clk_disable(clk);
-
- dev_info(&pdev->dev, " I2C adapter ready to operate\n");
-
- return 0;
-
-out:
- clk_disable(clk);
-err_clk_en:
- clk_unprepare(clk);
-err_clk_prep:
- clk_put(clk);
-err_get_clk:
- return err;
-}
-
-static int i2c_sirfsoc_remove(struct platform_device *pdev)
-{
- struct i2c_adapter *adapter = platform_get_drvdata(pdev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- i2c_del_adapter(adapter);
- clk_unprepare(siic->clk);
- clk_put(siic->clk);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int i2c_sirfsoc_suspend(struct device *dev)
-{
- struct i2c_adapter *adapter = dev_get_drvdata(dev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- clk_enable(siic->clk);
- siic->sda_delay = readl(siic->base + SIRFSOC_I2C_SDA_DELAY);
- siic->clk_div = readl(siic->base + SIRFSOC_I2C_CLK_CTRL);
- clk_disable(siic->clk);
- return 0;
-}
-
-static int i2c_sirfsoc_resume(struct device *dev)
-{
- struct i2c_adapter *adapter = dev_get_drvdata(dev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- clk_enable(siic->clk);
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- writel(SIRFSOC_I2C_CORE_EN | SIRFSOC_I2C_MASTER_MODE,
- siic->base + SIRFSOC_I2C_CTRL);
- writel(siic->clk_div, siic->base + SIRFSOC_I2C_CLK_CTRL);
- writel(siic->sda_delay, siic->base + SIRFSOC_I2C_SDA_DELAY);
- clk_disable(siic->clk);
- return 0;
-}
-
-static const struct dev_pm_ops i2c_sirfsoc_pm_ops = {
- .suspend = i2c_sirfsoc_suspend,
- .resume = i2c_sirfsoc_resume,
-};
-#endif
-
-static const struct of_device_id sirfsoc_i2c_of_match[] = {
- { .compatible = "sirf,prima2-i2c", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_i2c_of_match);
-
-static struct platform_driver i2c_sirfsoc_driver = {
- .driver = {
- .name = "sirfsoc_i2c",
-#ifdef CONFIG_PM
- .pm = &i2c_sirfsoc_pm_ops,
-#endif
- .of_match_table = sirfsoc_i2c_of_match,
- },
- .probe = i2c_sirfsoc_probe,
- .remove = i2c_sirfsoc_remove,
-};
-module_platform_driver(i2c_sirfsoc_driver);
-
-MODULE_DESCRIPTION("SiRF SoC I2C master controller driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
-MODULE_AUTHOR("Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 9aa8e65b511e..c62c815b88eb 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -57,6 +57,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
+#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
+#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5)
@@ -160,7 +162,7 @@ enum {
};
#define STM32F7_I2C_DNF_DEFAULT 0
-#define STM32F7_I2C_DNF_MAX 16
+#define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
+
+ /* Program the Digital Filter */
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF_MASK);
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
}
@@ -2026,12 +2035,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
irq_error = platform_get_irq(pdev, 1);
- if (irq_error <= 0) {
- if (irq_error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
- irq_error);
+ if (irq_error <= 0)
return irq_error ? : -ENOENT;
- }
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
deleted file mode 100644
index 64d739baf480..000000000000
--- a/drivers/i2c/busses/i2c-stu300.c
+++ /dev/null
@@ -1,1008 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2012 ST-Ericsson AB
- * ST DDC I2C master mode driver, used in e.g. U300 series platforms.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-/* the name of this kernel module */
-#define NAME "stu300"
-
-/* CR (Control Register) 8bit (R/W) */
-#define I2C_CR (0x00000000)
-#define I2C_CR_RESET_VALUE (0x00)
-#define I2C_CR_RESET_UMASK (0x00)
-#define I2C_CR_DDC1_ENABLE (0x80)
-#define I2C_CR_TRANS_ENABLE (0x40)
-#define I2C_CR_PERIPHERAL_ENABLE (0x20)
-#define I2C_CR_DDC2B_ENABLE (0x10)
-#define I2C_CR_START_ENABLE (0x08)
-#define I2C_CR_ACK_ENABLE (0x04)
-#define I2C_CR_STOP_ENABLE (0x02)
-#define I2C_CR_INTERRUPT_ENABLE (0x01)
-/* SR1 (Status Register 1) 8bit (R/-) */
-#define I2C_SR1 (0x00000004)
-#define I2C_SR1_RESET_VALUE (0x00)
-#define I2C_SR1_RESET_UMASK (0x00)
-#define I2C_SR1_EVF_IND (0x80)
-#define I2C_SR1_ADD10_IND (0x40)
-#define I2C_SR1_TRA_IND (0x20)
-#define I2C_SR1_BUSY_IND (0x10)
-#define I2C_SR1_BTF_IND (0x08)
-#define I2C_SR1_ADSL_IND (0x04)
-#define I2C_SR1_MSL_IND (0x02)
-#define I2C_SR1_SB_IND (0x01)
-/* SR2 (Status Register 2) 8bit (R/-) */
-#define I2C_SR2 (0x00000008)
-#define I2C_SR2_RESET_VALUE (0x00)
-#define I2C_SR2_RESET_UMASK (0x40)
-#define I2C_SR2_MASK (0xBF)
-#define I2C_SR2_SCLFAL_IND (0x80)
-#define I2C_SR2_ENDAD_IND (0x20)
-#define I2C_SR2_AF_IND (0x10)
-#define I2C_SR2_STOPF_IND (0x08)
-#define I2C_SR2_ARLO_IND (0x04)
-#define I2C_SR2_BERR_IND (0x02)
-#define I2C_SR2_DDC2BF_IND (0x01)
-/* CCR (Clock Control Register) 8bit (R/W) */
-#define I2C_CCR (0x0000000C)
-#define I2C_CCR_RESET_VALUE (0x00)
-#define I2C_CCR_RESET_UMASK (0x00)
-#define I2C_CCR_MASK (0xFF)
-#define I2C_CCR_FMSM (0x80)
-#define I2C_CCR_CC_MASK (0x7F)
-/* OAR1 (Own Address Register 1) 8bit (R/W) */
-#define I2C_OAR1 (0x00000010)
-#define I2C_OAR1_RESET_VALUE (0x00)
-#define I2C_OAR1_RESET_UMASK (0x00)
-#define I2C_OAR1_ADD_MASK (0xFF)
-/* OAR2 (Own Address Register 2) 8bit (R/W) */
-#define I2C_OAR2 (0x00000014)
-#define I2C_OAR2_RESET_VALUE (0x40)
-#define I2C_OAR2_RESET_UMASK (0x19)
-#define I2C_OAR2_MASK (0xE6)
-#define I2C_OAR2_FR_25_10MHZ (0x00)
-#define I2C_OAR2_FR_10_1667MHZ (0x20)
-#define I2C_OAR2_FR_1667_2667MHZ (0x40)
-#define I2C_OAR2_FR_2667_40MHZ (0x60)
-#define I2C_OAR2_FR_40_5333MHZ (0x80)
-#define I2C_OAR2_FR_5333_66MHZ (0xA0)
-#define I2C_OAR2_FR_66_80MHZ (0xC0)
-#define I2C_OAR2_FR_80_100MHZ (0xE0)
-#define I2C_OAR2_FR_MASK (0xE0)
-#define I2C_OAR2_ADD_MASK (0x06)
-/* DR (Data Register) 8bit (R/W) */
-#define I2C_DR (0x00000018)
-#define I2C_DR_RESET_VALUE (0x00)
-#define I2C_DR_RESET_UMASK (0xFF)
-#define I2C_DR_D_MASK (0xFF)
-/* ECCR (Extended Clock Control Register) 8bit (R/W) */
-#define I2C_ECCR (0x0000001C)
-#define I2C_ECCR_RESET_VALUE (0x00)
-#define I2C_ECCR_RESET_UMASK (0xE0)
-#define I2C_ECCR_MASK (0x1F)
-#define I2C_ECCR_CC_MASK (0x1F)
-
-/*
- * These events are more or less responses to commands
- * sent into the hardware, presumably reflecting the state
- * of an internal state machine.
- */
-enum stu300_event {
- STU300_EVENT_NONE = 0,
- STU300_EVENT_1,
- STU300_EVENT_2,
- STU300_EVENT_3,
- STU300_EVENT_4,
- STU300_EVENT_5,
- STU300_EVENT_6,
- STU300_EVENT_7,
- STU300_EVENT_8,
- STU300_EVENT_9
-};
-
-enum stu300_error {
- STU300_ERROR_NONE = 0,
- STU300_ERROR_ACKNOWLEDGE_FAILURE,
- STU300_ERROR_BUS_ERROR,
- STU300_ERROR_ARBITRATION_LOST,
- STU300_ERROR_UNKNOWN
-};
-
-/* timeout waiting for the controller to respond */
-#define STU300_TIMEOUT (msecs_to_jiffies(1000))
-
-/*
- * The number of address send athemps tried before giving up.
- * If the first one fails it seems like 5 to 8 attempts are required.
- */
-#define NUM_ADDR_RESEND_ATTEMPTS 12
-
-/* I2C clock speed, in Hz 0-400kHz*/
-static unsigned int scl_frequency = I2C_MAX_STANDARD_MODE_FREQ;
-module_param(scl_frequency, uint, 0644);
-
-/**
- * struct stu300_dev - the stu300 driver state holder
- * @pdev: parent platform device
- * @adapter: corresponding I2C adapter
- * @clk: hardware block clock
- * @irq: assigned interrupt line
- * @cmd_issue_lock: this locks the following cmd_ variables
- * @cmd_complete: acknowledge completion for an I2C command
- * @cmd_event: expected event coming in as a response to a command
- * @cmd_err: error code as response to a command
- * @speed: current bus speed in Hz
- * @msg_index: index of current message
- * @msg_len: length of current message
- */
-
-struct stu300_dev {
- struct platform_device *pdev;
- struct i2c_adapter adapter;
- void __iomem *virtbase;
- struct clk *clk;
- int irq;
- spinlock_t cmd_issue_lock;
- struct completion cmd_complete;
- enum stu300_event cmd_event;
- enum stu300_error cmd_err;
- unsigned int speed;
- int msg_index;
- int msg_len;
-};
-
-/* Local forward function declarations */
-static int stu300_init_hw(struct stu300_dev *dev);
-
-/*
- * The block needs writes in both MSW and LSW in order
- * for all data lines to reach their destination.
- */
-static inline void stu300_wr8(u32 value, void __iomem *address)
-{
- writel((value << 16) | value, address);
-}
-
-/*
- * This merely masks off the duplicates which appear
- * in bytes 1-3. You _MUST_ use 32-bit bus access on this
- * device, else it will not work.
- */
-static inline u32 stu300_r8(void __iomem *address)
-{
- return readl(address) & 0x000000FFU;
-}
-
-static void stu300_irq_enable(struct stu300_dev *dev)
-{
- u32 val;
- val = stu300_r8(dev->virtbase + I2C_CR);
- val |= I2C_CR_INTERRUPT_ENABLE;
- /* Twice paranoia (possible HW glitch) */
- stu300_wr8(val, dev->virtbase + I2C_CR);
- stu300_wr8(val, dev->virtbase + I2C_CR);
-}
-
-static void stu300_irq_disable(struct stu300_dev *dev)
-{
- u32 val;
- val = stu300_r8(dev->virtbase + I2C_CR);
- val &= ~I2C_CR_INTERRUPT_ENABLE;
- /* Twice paranoia (possible HW glitch) */
- stu300_wr8(val, dev->virtbase + I2C_CR);
- stu300_wr8(val, dev->virtbase + I2C_CR);
-}
-
-
-/*
- * Tells whether a certain event or events occurred in
- * response to a command. The events represent states in
- * the internal state machine of the hardware. The events
- * are not very well described in the hardware
- * documentation and can only be treated as abstract state
- * machine states.
- *
- * @ret 0 = event has not occurred or unknown error, any
- * other value means the correct event occurred or an error.
- */
-
-static int stu300_event_occurred(struct stu300_dev *dev,
- enum stu300_event mr_event) {
- u32 status1;
- u32 status2;
-
- /* What event happened? */
- status1 = stu300_r8(dev->virtbase + I2C_SR1);
-
- if (!(status1 & I2C_SR1_EVF_IND))
- /* No event at all */
- return 0;
-
- status2 = stu300_r8(dev->virtbase + I2C_SR2);
-
- /* Block any multiple interrupts */
- stu300_irq_disable(dev);
-
- /* Check for errors first */
- if (status2 & I2C_SR2_AF_IND) {
- dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
- return 1;
- } else if (status2 & I2C_SR2_BERR_IND) {
- dev->cmd_err = STU300_ERROR_BUS_ERROR;
- return 1;
- } else if (status2 & I2C_SR2_ARLO_IND) {
- dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
- return 1;
- }
-
- switch (mr_event) {
- case STU300_EVENT_1:
- if (status1 & I2C_SR1_ADSL_IND)
- return 1;
- break;
- case STU300_EVENT_2:
- case STU300_EVENT_3:
- case STU300_EVENT_7:
- case STU300_EVENT_8:
- if (status1 & I2C_SR1_BTF_IND) {
- return 1;
- }
- break;
- case STU300_EVENT_4:
- if (status2 & I2C_SR2_STOPF_IND)
- return 1;
- break;
- case STU300_EVENT_5:
- if (status1 & I2C_SR1_SB_IND)
- /* Clear start bit */
- return 1;
- break;
- case STU300_EVENT_6:
- if (status2 & I2C_SR2_ENDAD_IND) {
- /* First check for any errors */
- return 1;
- }
- break;
- case STU300_EVENT_9:
- if (status1 & I2C_SR1_ADD10_IND)
- return 1;
- break;
- default:
- break;
- }
- /* If we get here, we're on thin ice.
- * Here we are in a status where we have
- * gotten a response that does not match
- * what we requested.
- */
- dev->cmd_err = STU300_ERROR_UNKNOWN;
- dev_err(&dev->pdev->dev,
- "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n",
- mr_event, status1, status2);
- return 0;
-}
-
-static irqreturn_t stu300_irh(int irq, void *data)
-{
- struct stu300_dev *dev = data;
- int res;
-
- /* Just make sure that the block is clocked */
- clk_enable(dev->clk);
-
- /* See if this was what we were waiting for */
- spin_lock(&dev->cmd_issue_lock);
-
- res = stu300_event_occurred(dev, dev->cmd_event);
- if (res || dev->cmd_err != STU300_ERROR_NONE)
- complete(&dev->cmd_complete);
-
- spin_unlock(&dev->cmd_issue_lock);
-
- clk_disable(dev->clk);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Sends a command and then waits for the bits masked by *flagmask*
- * to go high or low by IRQ awaiting.
- */
-static int stu300_start_and_await_event(struct stu300_dev *dev,
- u8 cr_value,
- enum stu300_event mr_event)
-{
- int ret;
-
- /* Lock command issue, fill in an event we wait for */
- spin_lock_irq(&dev->cmd_issue_lock);
- init_completion(&dev->cmd_complete);
- dev->cmd_err = STU300_ERROR_NONE;
- dev->cmd_event = mr_event;
- spin_unlock_irq(&dev->cmd_issue_lock);
-
- /* Turn on interrupt, send command and wait. */
- cr_value |= I2C_CR_INTERRUPT_ENABLE;
- stu300_wr8(cr_value, dev->virtbase + I2C_CR);
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- STU300_TIMEOUT);
- if (ret < 0) {
- dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout() "
- "returned %d waiting for event %04x\n", ret, mr_event);
- return ret;
- }
-
- if (ret == 0) {
- dev_err(&dev->pdev->dev, "controller timed out "
- "waiting for event %d, reinit hardware\n", mr_event);
- (void) stu300_init_hw(dev);
- return -ETIMEDOUT;
- }
-
- if (dev->cmd_err != STU300_ERROR_NONE) {
- dev_err(&dev->pdev->dev, "controller (start) "
- "error %d waiting for event %d, reinit hardware\n",
- dev->cmd_err, mr_event);
- (void) stu300_init_hw(dev);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * This waits for a flag to be set, if it is not set on entry, an interrupt is
- * configured to wait for the flag using a completion.
- */
-static int stu300_await_event(struct stu300_dev *dev,
- enum stu300_event mr_event)
-{
- int ret;
-
- /* Is it already here? */
- spin_lock_irq(&dev->cmd_issue_lock);
- dev->cmd_err = STU300_ERROR_NONE;
- dev->cmd_event = mr_event;
-
- init_completion(&dev->cmd_complete);
-
- /* Turn on the I2C interrupt for current operation */
- stu300_irq_enable(dev);
-
- /* Unlock the command block and wait for the event to occur */
- spin_unlock_irq(&dev->cmd_issue_lock);
-
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- STU300_TIMEOUT);
- if (ret < 0) {
- dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout()"
- "returned %d waiting for event %04x\n", ret, mr_event);
- return ret;
- }
-
- if (ret == 0) {
- if (mr_event != STU300_EVENT_6) {
- dev_err(&dev->pdev->dev, "controller "
- "timed out waiting for event %d, reinit "
- "hardware\n", mr_event);
- (void) stu300_init_hw(dev);
- }
- return -ETIMEDOUT;
- }
-
- if (dev->cmd_err != STU300_ERROR_NONE) {
- if (mr_event != STU300_EVENT_6) {
- dev_err(&dev->pdev->dev, "controller "
- "error (await_event) %d waiting for event %d, "
- "reinit hardware\n", dev->cmd_err, mr_event);
- (void) stu300_init_hw(dev);
- }
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Waits for the busy bit to go low by repeated polling.
- */
-#define BUSY_RELEASE_ATTEMPTS 10
-static int stu300_wait_while_busy(struct stu300_dev *dev)
-{
- unsigned long timeout;
- int i;
-
- for (i = 0; i < BUSY_RELEASE_ATTEMPTS; i++) {
- timeout = jiffies + STU300_TIMEOUT;
-
- while (!time_after(jiffies, timeout)) {
- /* Is not busy? */
- if ((stu300_r8(dev->virtbase + I2C_SR1) &
- I2C_SR1_BUSY_IND) == 0)
- return 0;
- msleep(1);
- }
-
- dev_err(&dev->pdev->dev, "transaction timed out "
- "waiting for device to be free (not busy). "
- "Attempt: %d\n", i+1);
-
- dev_err(&dev->pdev->dev, "base address = "
- "0x%p, reinit hardware\n", dev->virtbase);
-
- (void) stu300_init_hw(dev);
- }
-
- dev_err(&dev->pdev->dev, "giving up after %d attempts "
- "to reset the bus.\n", BUSY_RELEASE_ATTEMPTS);
-
- return -ETIMEDOUT;
-}
-
-struct stu300_clkset {
- unsigned long rate;
- u32 setting;
-};
-
-static const struct stu300_clkset stu300_clktable[] = {
- { 0, 0xFFU },
- { 2500000, I2C_OAR2_FR_25_10MHZ },
- { 10000000, I2C_OAR2_FR_10_1667MHZ },
- { 16670000, I2C_OAR2_FR_1667_2667MHZ },
- { 26670000, I2C_OAR2_FR_2667_40MHZ },
- { 40000000, I2C_OAR2_FR_40_5333MHZ },
- { 53330000, I2C_OAR2_FR_5333_66MHZ },
- { 66000000, I2C_OAR2_FR_66_80MHZ },
- { 80000000, I2C_OAR2_FR_80_100MHZ },
- { 100000000, 0xFFU },
-};
-
-
-static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
-{
-
- u32 val;
- int i = 0;
-
- /* Locate the appropriate clock setting */
- while (i < ARRAY_SIZE(stu300_clktable) - 1 &&
- stu300_clktable[i].rate < clkrate)
- i++;
-
- if (stu300_clktable[i].setting == 0xFFU) {
- dev_err(&dev->pdev->dev, "too %s clock rate requested "
- "(%lu Hz).\n", i ? "high" : "low", clkrate);
- return -EINVAL;
- }
-
- stu300_wr8(stu300_clktable[i].setting,
- dev->virtbase + I2C_OAR2);
-
- dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
- "virtbase %p\n", clkrate, dev->speed, dev->virtbase);
-
- if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ)
- /* Fast Mode I2C */
- val = ((clkrate/dev->speed) - 9)/3 + 1;
- else
- /* Standard Mode I2C */
- val = ((clkrate/dev->speed) - 7)/2 + 1;
-
- /* According to spec the divider must be > 2 */
- if (val < 0x002) {
- dev_err(&dev->pdev->dev, "too low clock rate (%lu Hz).\n",
- clkrate);
- return -EINVAL;
- }
-
- /* We have 12 bits clock divider only! */
- if (val & 0xFFFFF000U) {
- dev_err(&dev->pdev->dev, "too high clock rate (%lu Hz).\n",
- clkrate);
- return -EINVAL;
- }
-
- if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ) {
- /* CC6..CC0 */
- stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
- dev->virtbase + I2C_CCR);
- dev_dbg(&dev->pdev->dev, "set clock divider to 0x%08x, "
- "Fast Mode I2C\n", val);
- } else {
- /* CC6..CC0 */
- stu300_wr8((val & I2C_CCR_CC_MASK),
- dev->virtbase + I2C_CCR);
- dev_dbg(&dev->pdev->dev, "set clock divider to "
- "0x%08x, Standard Mode I2C\n", val);
- }
-
- /* CC11..CC7 */
- stu300_wr8(((val >> 7) & 0x1F),
- dev->virtbase + I2C_ECCR);
-
- return 0;
-}
-
-
-static int stu300_init_hw(struct stu300_dev *dev)
-{
- u32 dummy;
- unsigned long clkrate;
- int ret;
-
- /* Disable controller */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- /*
- * Set own address to some default value (0x00).
- * We do not support slave mode anyway.
- */
- stu300_wr8(0x00, dev->virtbase + I2C_OAR1);
- /*
- * The I2C controller only operates properly in 26 MHz but we
- * program this driver as if we didn't know. This will also set the two
- * high bits of the own address to zero as well.
- * There is no known hardware issue with running in 13 MHz
- * However, speeds over 200 kHz are not used.
- */
- clkrate = clk_get_rate(dev->clk);
- ret = stu300_set_clk(dev, clkrate);
-
- if (ret)
- return ret;
- /*
- * Enable block, do it TWICE (hardware glitch)
- * Setting bit 7 can enable DDC mode. (Not used currently.)
- */
- stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
- dev->virtbase + I2C_CR);
- stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
- dev->virtbase + I2C_CR);
- /* Make a dummy read of the status register SR1 & SR2 */
- dummy = stu300_r8(dev->virtbase + I2C_SR2);
- dummy = stu300_r8(dev->virtbase + I2C_SR1);
-
- return 0;
-}
-
-
-
-/* Send slave address. */
-static int stu300_send_address(struct stu300_dev *dev,
- struct i2c_msg *msg, int resend)
-{
- u32 val;
- int ret;
-
- if (msg->flags & I2C_M_TEN) {
- /* This is probably how 10 bit addresses look */
- val = (0xf0 | (((u32) msg->addr & 0x300) >> 7)) &
- I2C_DR_D_MASK;
- if (msg->flags & I2C_M_RD)
- /* This is the direction bit */
- val |= 0x01;
- } else {
- val = i2c_8bit_addr_from_msg(msg);
- }
-
- if (resend) {
- if (msg->flags & I2C_M_RD)
- dev_dbg(&dev->pdev->dev, "read resend\n");
- else
- dev_dbg(&dev->pdev->dev, "write resend\n");
- }
-
- stu300_wr8(val, dev->virtbase + I2C_DR);
-
- /* For 10bit addressing, await 10bit request (EVENT 9) */
- if (msg->flags & I2C_M_TEN) {
- ret = stu300_await_event(dev, STU300_EVENT_9);
- /*
- * The slave device wants a 10bit address, send the rest
- * of the bits (the LSBits)
- */
- val = msg->addr & I2C_DR_D_MASK;
- /* This clears "event 9" */
- stu300_wr8(val, dev->virtbase + I2C_DR);
- if (ret != 0)
- return ret;
- }
- /* FIXME: Why no else here? two events for 10bit?
- * Await event 6 (normal) or event 9 (10bit)
- */
-
- if (resend)
- dev_dbg(&dev->pdev->dev, "await event 6\n");
- ret = stu300_await_event(dev, STU300_EVENT_6);
-
- /*
- * Clear any pending EVENT 6 no matter what happened during
- * await_event.
- */
- val = stu300_r8(dev->virtbase + I2C_CR);
- val |= I2C_CR_PERIPHERAL_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
-
- return ret;
-}
-
-static int stu300_xfer_msg(struct i2c_adapter *adap,
- struct i2c_msg *msg, int stop)
-{
- u32 cr;
- u32 val;
- u32 i;
- int ret;
- int attempts = 0;
- struct stu300_dev *dev = i2c_get_adapdata(adap);
-
- clk_enable(dev->clk);
-
- /* Remove this if (0) to trace each and every message. */
- if (0) {
- dev_dbg(&dev->pdev->dev, "I2C message to: 0x%04x, len: %d, "
- "flags: 0x%04x, stop: %d\n",
- msg->addr, msg->len, msg->flags, stop);
- }
-
- /*
- * For some reason, sending the address sometimes fails when running
- * on the 13 MHz clock. No interrupt arrives. This is a work around,
- * which tries to restart and send the address up to 10 times before
- * really giving up. Usually 5 to 8 attempts are enough.
- */
- do {
- if (attempts)
- dev_dbg(&dev->pdev->dev, "wait while busy\n");
- /* Check that the bus is free, or wait until some timeout */
- ret = stu300_wait_while_busy(dev);
- if (ret != 0)
- goto exit_disable;
-
- if (attempts)
- dev_dbg(&dev->pdev->dev, "re-int hw\n");
- /*
- * According to ST, there is no problem if the clock is
- * changed between 13 and 26 MHz during a transfer.
- */
- ret = stu300_init_hw(dev);
- if (ret)
- goto exit_disable;
-
- /* Send a start condition */
- cr = I2C_CR_PERIPHERAL_ENABLE;
- /* Setting the START bit puts the block in master mode */
- if (!(msg->flags & I2C_M_NOSTART))
- cr |= I2C_CR_START_ENABLE;
- if ((msg->flags & I2C_M_RD) && (msg->len > 1))
- /* On read more than 1 byte, we need ack. */
- cr |= I2C_CR_ACK_ENABLE;
- /* Check that it gets through */
- if (!(msg->flags & I2C_M_NOSTART)) {
- if (attempts)
- dev_dbg(&dev->pdev->dev, "send start event\n");
- ret = stu300_start_and_await_event(dev, cr,
- STU300_EVENT_5);
- }
-
- if (attempts)
- dev_dbg(&dev->pdev->dev, "send address\n");
-
- if (ret == 0)
- /* Send address */
- ret = stu300_send_address(dev, msg, attempts != 0);
-
- if (ret != 0) {
- attempts++;
- dev_dbg(&dev->pdev->dev, "failed sending address, "
- "retrying. Attempt: %d msg_index: %d/%d\n",
- attempts, dev->msg_index, dev->msg_len);
- }
-
- } while (ret != 0 && attempts < NUM_ADDR_RESEND_ATTEMPTS);
-
- if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
- dev_dbg(&dev->pdev->dev, "managed to get address "
- "through after %d attempts\n", attempts);
- } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
- dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
- "to resend address.\n",
- NUM_ADDR_RESEND_ATTEMPTS);
- goto exit_disable;
- }
-
-
- if (msg->flags & I2C_M_RD) {
- /* READ: we read the actual bytes one at a time */
- for (i = 0; i < msg->len; i++) {
- if (i == msg->len-1) {
- /*
- * Disable ACK and set STOP condition before
- * reading last byte
- */
- val = I2C_CR_PERIPHERAL_ENABLE;
-
- if (stop)
- val |= I2C_CR_STOP_ENABLE;
-
- stu300_wr8(val,
- dev->virtbase + I2C_CR);
- }
- /* Wait for this byte... */
- ret = stu300_await_event(dev, STU300_EVENT_7);
- if (ret != 0)
- goto exit_disable;
- /* This clears event 7 */
- msg->buf[i] = (u8) stu300_r8(dev->virtbase + I2C_DR);
- }
- } else {
- /* WRITE: we send the actual bytes one at a time */
- for (i = 0; i < msg->len; i++) {
- /* Write the byte */
- stu300_wr8(msg->buf[i],
- dev->virtbase + I2C_DR);
- /* Check status */
- ret = stu300_await_event(dev, STU300_EVENT_8);
- /* Next write to DR will clear event 8 */
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "error awaiting "
- "event 8 (%d)\n", ret);
- goto exit_disable;
- }
- }
- /* Check NAK */
- if (!(msg->flags & I2C_M_IGNORE_NAK)) {
- if (stu300_r8(dev->virtbase + I2C_SR2) &
- I2C_SR2_AF_IND) {
- dev_err(&dev->pdev->dev, "I2C payload "
- "send returned NAK!\n");
- ret = -EIO;
- goto exit_disable;
- }
- }
- if (stop) {
- /* Send stop condition */
- val = I2C_CR_PERIPHERAL_ENABLE;
- val |= I2C_CR_STOP_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
- }
- }
-
- /* Check that the bus is free, or wait until some timeout occurs */
- ret = stu300_wait_while_busy(dev);
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "timeout waiting for transfer "
- "to commence.\n");
- goto exit_disable;
- }
-
- /* Dummy read status registers */
- val = stu300_r8(dev->virtbase + I2C_SR2);
- val = stu300_r8(dev->virtbase + I2C_SR1);
- ret = 0;
-
- exit_disable:
- /* Disable controller */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- clk_disable(dev->clk);
- return ret;
-}
-
-static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- int ret = -1;
- int i;
-
- struct stu300_dev *dev = i2c_get_adapdata(adap);
- dev->msg_len = num;
-
- for (i = 0; i < num; i++) {
- /*
- * Another driver appears to send stop for each message,
- * here we only do that for the last message. Possibly some
- * peripherals require this behaviour, then their drivers
- * have to send single messages in order to get "stop" for
- * each message.
- */
- dev->msg_index = i;
-
- ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
-
- if (ret != 0) {
- num = ret;
- break;
- }
- }
-
- return num;
-}
-
-static int stu300_xfer_todo(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
- /* TODO: implement polling for this case if need be. */
- WARN(1, "%s: atomic transfers not implemented\n", dev_name(&adap->dev));
- return -EOPNOTSUPP;
-}
-
-static u32 stu300_func(struct i2c_adapter *adap)
-{
- /* This is the simplest thing you can think of... */
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm stu300_algo = {
- .master_xfer = stu300_xfer,
- .master_xfer_atomic = stu300_xfer_todo,
- .functionality = stu300_func,
-};
-
-static const struct i2c_adapter_quirks stu300_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static int stu300_probe(struct platform_device *pdev)
-{
- struct stu300_dev *dev;
- struct i2c_adapter *adap;
- int bus_nr;
- int ret = 0;
-
- dev = devm_kzalloc(&pdev->dev, sizeof(struct stu300_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- bus_nr = pdev->id;
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
- return PTR_ERR(dev->clk);
- }
-
- dev->pdev = pdev;
- dev->virtbase = devm_platform_ioremap_resource(pdev, 0);
- dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
- "base %p\n", bus_nr, dev->virtbase);
- if (IS_ERR(dev->virtbase))
- return PTR_ERR(dev->virtbase);
-
- dev->irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev);
- if (ret < 0)
- return ret;
-
- dev->speed = scl_frequency;
-
- clk_prepare_enable(dev->clk);
- ret = stu300_init_hw(dev);
- clk_disable(dev->clk);
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "error initializing hardware.\n");
- return -EIO;
- }
-
- /* IRQ event handling initialization */
- spin_lock_init(&dev->cmd_issue_lock);
- dev->cmd_event = STU300_EVENT_NONE;
- dev->cmd_err = STU300_ERROR_NONE;
-
- adap = &dev->adapter;
- adap->owner = THIS_MODULE;
- /* DDC class but actually often used for more generic I2C */
- adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
- sizeof(adap->name));
- adap->nr = bus_nr;
- adap->algo = &stu300_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
- adap->quirks = &stu300_quirks;
-
- i2c_set_adapdata(adap, dev);
-
- /* i2c device drivers may be active on return from add_adapter() */
- ret = i2c_add_numbered_adapter(adap);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, dev);
- dev_info(&pdev->dev, "ST DDC I2C @ %p, irq %d\n",
- dev->virtbase, dev->irq);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stu300_suspend(struct device *device)
-{
- struct stu300_dev *dev = dev_get_drvdata(device);
-
- /* Turn off everything */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- return 0;
-}
-
-static int stu300_resume(struct device *device)
-{
- int ret = 0;
- struct stu300_dev *dev = dev_get_drvdata(device);
-
- clk_enable(dev->clk);
- ret = stu300_init_hw(dev);
- clk_disable(dev->clk);
-
- if (ret != 0)
- dev_err(device, "error re-initializing hardware.\n");
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
-#define STU300_I2C_PM (&stu300_pm)
-#else
-#define STU300_I2C_PM NULL
-#endif
-
-static int stu300_remove(struct platform_device *pdev)
-{
- struct stu300_dev *dev = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&dev->adapter);
- /* Turn off everything */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- return 0;
-}
-
-static const struct of_device_id stu300_dt_match[] = {
- { .compatible = "st,ddci2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, stu300_dt_match);
-
-static struct platform_driver stu300_i2c_driver = {
- .driver = {
- .name = NAME,
- .pm = STU300_I2C_PM,
- .of_match_table = stu300_dt_match,
- },
- .probe = stu300_probe,
- .remove = stu300_remove,
-
-};
-
-static int __init stu300_init(void)
-{
- return platform_driver_register(&stu300_i2c_driver);
-}
-
-static void __exit stu300_exit(void)
-{
- platform_driver_unregister(&stu300_i2c_driver);
-}
-
-/*
- * The systems using this bus often have very basic devices such
- * as regulators on the I2C bus, so this needs to be loaded early.
- * Therefore it is registered in the subsys_initcall().
- */
-subsys_initcall(stu300_init);
-module_exit(stu300_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("ST Micro DDC I2C adapter (" NAME ")");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" NAME);
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec7a7e917edd..c0c7d01473f2 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 6f08c0c3238d..c883044715f3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ else if (i2c_dev->is_vi)
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned int reg, unsigned int len)
+{
+ u32 *data32 = data;
+
+ /*
+ * VI I2C controller has known hardware bug where writes get stuck
+ * when immediate multiple writes happen to TX_FIFO register.
+ * Recommended software work around is to read I2C register after
+ * each write to TX_FIFO register to flush out the data.
+ */
+ while (len--)
+ i2c_writel(i2c_dev, *data32++, reg);
+}
+
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
unsigned int reg, unsigned int len)
{
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ if (i2c_dev->is_vi)
+ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ else
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
}
@@ -1719,9 +1739,10 @@ static int tegra_i2c_probe(struct platform_device *pdev)
/* interrupt will be enabled during of transfer time */
irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
- err = devm_request_irq(i2c_dev->dev, i2c_dev->irq, tegra_i2c_isr,
- IRQF_NO_SUSPEND, dev_name(i2c_dev->dev),
- i2c_dev);
+ err = devm_request_threaded_irq(i2c_dev->dev, i2c_dev->irq,
+ NULL, tegra_i2c_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(i2c_dev->dev), i2c_dev);
if (err)
return err;
diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c
deleted file mode 100644
index 8db9519695a6..000000000000
--- a/drivers/i2c/busses/i2c-zx2967.c
+++ /dev/null
@@ -1,602 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <linux/clk.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#define REG_CMD 0x04
-#define REG_DEVADDR_H 0x0C
-#define REG_DEVADDR_L 0x10
-#define REG_CLK_DIV_FS 0x14
-#define REG_CLK_DIV_HS 0x18
-#define REG_WRCONF 0x1C
-#define REG_RDCONF 0x20
-#define REG_DATA 0x24
-#define REG_STAT 0x28
-
-#define I2C_STOP 0
-#define I2C_MASTER BIT(0)
-#define I2C_ADDR_MODE_TEN BIT(1)
-#define I2C_IRQ_MSK_ENABLE BIT(3)
-#define I2C_RW_READ BIT(4)
-#define I2C_CMB_RW_EN BIT(5)
-#define I2C_START BIT(6)
-
-#define I2C_ADDR_LOW_MASK GENMASK(6, 0)
-#define I2C_ADDR_LOW_SHIFT 0
-#define I2C_ADDR_HI_MASK GENMASK(2, 0)
-#define I2C_ADDR_HI_SHIFT 7
-
-#define I2C_WFIFO_RESET BIT(7)
-#define I2C_RFIFO_RESET BIT(7)
-
-#define I2C_IRQ_ACK_CLEAR BIT(7)
-#define I2C_INT_MASK GENMASK(6, 0)
-
-#define I2C_TRANS_DONE BIT(0)
-#define I2C_SR_EDEVICE BIT(1)
-#define I2C_SR_EDATA BIT(2)
-
-#define I2C_FIFO_MAX 16
-
-#define I2C_TIMEOUT msecs_to_jiffies(1000)
-
-#define DEV(i2c) ((i2c)->adap.dev.parent)
-
-struct zx2967_i2c {
- struct i2c_adapter adap;
- struct clk *clk;
- struct completion complete;
- u32 clk_freq;
- void __iomem *reg_base;
- size_t residue;
- int irq;
- int msg_rd;
- u8 *cur_trans;
- u8 access_cnt;
- int error;
-};
-
-static void zx2967_i2c_writel(struct zx2967_i2c *i2c,
- u32 val, unsigned long reg)
-{
- writel_relaxed(val, i2c->reg_base + reg);
-}
-
-static u32 zx2967_i2c_readl(struct zx2967_i2c *i2c, unsigned long reg)
-{
- return readl_relaxed(i2c->reg_base + reg);
-}
-
-static void zx2967_i2c_writesb(struct zx2967_i2c *i2c,
- void *data, unsigned long reg, int len)
-{
- writesb(i2c->reg_base + reg, data, len);
-}
-
-static void zx2967_i2c_readsb(struct zx2967_i2c *i2c,
- void *data, unsigned long reg, int len)
-{
- readsb(i2c->reg_base + reg, data, len);
-}
-
-static void zx2967_i2c_start_ctrl(struct zx2967_i2c *i2c)
-{
- u32 status;
- u32 ctl;
-
- status = zx2967_i2c_readl(i2c, REG_STAT);
- status |= I2C_IRQ_ACK_CLEAR;
- zx2967_i2c_writel(i2c, status, REG_STAT);
-
- ctl = zx2967_i2c_readl(i2c, REG_CMD);
- if (i2c->msg_rd)
- ctl |= I2C_RW_READ;
- else
- ctl &= ~I2C_RW_READ;
- ctl &= ~I2C_CMB_RW_EN;
- ctl |= I2C_START;
- zx2967_i2c_writel(i2c, ctl, REG_CMD);
-}
-
-static void zx2967_i2c_flush_fifos(struct zx2967_i2c *i2c)
-{
- u32 offset;
- u32 val;
-
- if (i2c->msg_rd) {
- offset = REG_RDCONF;
- val = I2C_RFIFO_RESET;
- } else {
- offset = REG_WRCONF;
- val = I2C_WFIFO_RESET;
- }
-
- val |= zx2967_i2c_readl(i2c, offset);
- zx2967_i2c_writel(i2c, val, offset);
-}
-
-static int zx2967_i2c_empty_rx_fifo(struct zx2967_i2c *i2c, u32 size)
-{
- u8 val[I2C_FIFO_MAX] = {0};
- int i;
-
- if (size > I2C_FIFO_MAX) {
- dev_err(DEV(i2c), "fifo size %d over the max value %d\n",
- size, I2C_FIFO_MAX);
- return -EINVAL;
- }
-
- zx2967_i2c_readsb(i2c, val, REG_DATA, size);
- for (i = 0; i < size; i++) {
- *i2c->cur_trans++ = val[i];
- i2c->residue--;
- }
-
- barrier();
-
- return 0;
-}
-
-static int zx2967_i2c_fill_tx_fifo(struct zx2967_i2c *i2c)
-{
- size_t residue = i2c->residue;
- u8 *buf = i2c->cur_trans;
-
- if (residue == 0) {
- dev_err(DEV(i2c), "residue is %d\n", (int)residue);
- return -EINVAL;
- }
-
- if (residue <= I2C_FIFO_MAX) {
- zx2967_i2c_writesb(i2c, buf, REG_DATA, residue);
-
- /* Again update before writing to FIFO to make sure isr sees. */
- i2c->residue = 0;
- i2c->cur_trans = NULL;
- } else {
- zx2967_i2c_writesb(i2c, buf, REG_DATA, I2C_FIFO_MAX);
- i2c->residue -= I2C_FIFO_MAX;
- i2c->cur_trans += I2C_FIFO_MAX;
- }
-
- barrier();
-
- return 0;
-}
-
-static int zx2967_i2c_reset_hardware(struct zx2967_i2c *i2c)
-{
- u32 val;
- u32 clk_div;
-
- val = I2C_MASTER | I2C_IRQ_MSK_ENABLE;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- clk_div = clk_get_rate(i2c->clk) / i2c->clk_freq - 1;
- zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_FS);
- zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_HS);
-
- zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_WRCONF);
- zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_RDCONF);
- zx2967_i2c_writel(i2c, 1, REG_RDCONF);
-
- zx2967_i2c_flush_fifos(i2c);
-
- return 0;
-}
-
-static void zx2967_i2c_isr_clr(struct zx2967_i2c *i2c)
-{
- u32 status;
-
- status = zx2967_i2c_readl(i2c, REG_STAT);
- status |= I2C_IRQ_ACK_CLEAR;
- zx2967_i2c_writel(i2c, status, REG_STAT);
-}
-
-static irqreturn_t zx2967_i2c_isr(int irq, void *dev_id)
-{
- u32 status;
- struct zx2967_i2c *i2c = (struct zx2967_i2c *)dev_id;
-
- status = zx2967_i2c_readl(i2c, REG_STAT) & I2C_INT_MASK;
- zx2967_i2c_isr_clr(i2c);
-
- if (status & I2C_SR_EDEVICE)
- i2c->error = -ENXIO;
- else if (status & I2C_SR_EDATA)
- i2c->error = -EIO;
- else if (status & I2C_TRANS_DONE)
- i2c->error = 0;
- else
- goto done;
-
- complete(&i2c->complete);
-done:
- return IRQ_HANDLED;
-}
-
-static void zx2967_set_addr(struct zx2967_i2c *i2c, u16 addr)
-{
- u16 val;
-
- val = (addr >> I2C_ADDR_LOW_SHIFT) & I2C_ADDR_LOW_MASK;
- zx2967_i2c_writel(i2c, val, REG_DEVADDR_L);
-
- val = (addr >> I2C_ADDR_HI_SHIFT) & I2C_ADDR_HI_MASK;
- zx2967_i2c_writel(i2c, val, REG_DEVADDR_H);
- if (val)
- val = zx2967_i2c_readl(i2c, REG_CMD) | I2C_ADDR_MODE_TEN;
- else
- val = zx2967_i2c_readl(i2c, REG_CMD) & ~I2C_ADDR_MODE_TEN;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-}
-
-static int zx2967_i2c_xfer_bytes(struct zx2967_i2c *i2c, u32 bytes)
-{
- unsigned long time_left;
- int rd = i2c->msg_rd;
- int ret;
-
- reinit_completion(&i2c->complete);
-
- if (rd) {
- zx2967_i2c_writel(i2c, bytes - 1, REG_RDCONF);
- } else {
- ret = zx2967_i2c_fill_tx_fifo(i2c);
- if (ret)
- return ret;
- }
-
- zx2967_i2c_start_ctrl(i2c);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- return rd ? zx2967_i2c_empty_rx_fifo(i2c, bytes) : 0;
-}
-
-static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c,
- struct i2c_msg *msg)
-{
- int ret;
- int i;
-
- zx2967_i2c_flush_fifos(i2c);
-
- i2c->cur_trans = msg->buf;
- i2c->residue = msg->len;
- i2c->access_cnt = msg->len / I2C_FIFO_MAX;
- i2c->msg_rd = msg->flags & I2C_M_RD;
-
- for (i = 0; i < i2c->access_cnt; i++) {
- ret = zx2967_i2c_xfer_bytes(i2c, I2C_FIFO_MAX);
- if (ret)
- return ret;
- }
-
- if (i2c->residue > 0) {
- ret = zx2967_i2c_xfer_bytes(i2c, i2c->residue);
- if (ret)
- return ret;
- }
-
- i2c->residue = 0;
- i2c->access_cnt = 0;
-
- return 0;
-}
-
-static int zx2967_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct zx2967_i2c *i2c = i2c_get_adapdata(adap);
- int ret;
- int i;
-
- zx2967_set_addr(i2c, msgs->addr);
-
- for (i = 0; i < num; i++) {
- ret = zx2967_i2c_xfer_msg(i2c, &msgs[i]);
- if (ret)
- return ret;
- }
-
- return num;
-}
-
-static void
-zx2967_smbus_xfer_prepare(struct zx2967_i2c *i2c, u16 addr,
- char read_write, u8 command, int size,
- union i2c_smbus_data *data)
-{
- u32 val;
-
- val = zx2967_i2c_readl(i2c, REG_RDCONF);
- val |= I2C_RFIFO_RESET;
- zx2967_i2c_writel(i2c, val, REG_RDCONF);
- zx2967_set_addr(i2c, addr);
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val &= ~I2C_RW_READ;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- switch (size) {
- case I2C_SMBUS_BYTE:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- break;
- case I2C_SMBUS_BYTE_DATA:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- if (read_write == I2C_SMBUS_WRITE)
- zx2967_i2c_writel(i2c, data->byte, REG_DATA);
- break;
- case I2C_SMBUS_WORD_DATA:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- if (read_write == I2C_SMBUS_WRITE) {
- zx2967_i2c_writel(i2c, (data->word >> 8), REG_DATA);
- zx2967_i2c_writel(i2c, (data->word & 0xff),
- REG_DATA);
- }
- break;
- }
-}
-
-static int zx2967_smbus_xfer_read(struct zx2967_i2c *i2c, int size,
- union i2c_smbus_data *data)
-{
- unsigned long time_left;
- u8 buf[2];
- u32 val;
-
- reinit_completion(&i2c->complete);
-
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_CMB_RW_EN;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_START;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- switch (size) {
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- val = zx2967_i2c_readl(i2c, REG_DATA);
- data->byte = val;
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- buf[0] = zx2967_i2c_readl(i2c, REG_DATA);
- buf[1] = zx2967_i2c_readl(i2c, REG_DATA);
- data->word = (buf[0] << 8) | buf[1];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int zx2967_smbus_xfer_write(struct zx2967_i2c *i2c)
-{
- unsigned long time_left;
- u32 val;
-
- reinit_completion(&i2c->complete);
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_START;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- return 0;
-}
-
-static int zx2967_smbus_xfer(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data)
-{
- struct zx2967_i2c *i2c = i2c_get_adapdata(adap);
-
- if (size == I2C_SMBUS_QUICK)
- read_write = I2C_SMBUS_WRITE;
-
- switch (size) {
- case I2C_SMBUS_QUICK:
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- case I2C_SMBUS_WORD_DATA:
- zx2967_smbus_xfer_prepare(i2c, addr, read_write,
- command, size, data);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- if (read_write == I2C_SMBUS_READ)
- return zx2967_smbus_xfer_read(i2c, size, data);
-
- return zx2967_smbus_xfer_write(i2c);
-}
-
-static u32 zx2967_i2c_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_QUICK |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA |
- I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_SMBUS_I2C_BLOCK;
-}
-
-static int __maybe_unused zx2967_i2c_suspend(struct device *dev)
-{
- struct zx2967_i2c *i2c = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
- return 0;
-}
-
-static int __maybe_unused zx2967_i2c_resume(struct device *dev)
-{
- struct zx2967_i2c *i2c = dev_get_drvdata(dev);
-
- clk_prepare_enable(i2c->clk);
- i2c_mark_adapter_resumed(&i2c->adap);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(zx2967_i2c_dev_pm_ops,
- zx2967_i2c_suspend, zx2967_i2c_resume);
-
-static const struct i2c_algorithm zx2967_i2c_algo = {
- .master_xfer = zx2967_i2c_xfer,
- .smbus_xfer = zx2967_smbus_xfer,
- .functionality = zx2967_i2c_func,
-};
-
-static const struct i2c_adapter_quirks zx2967_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static const struct of_device_id zx2967_i2c_of_match[] = {
- { .compatible = "zte,zx296718-i2c", },
- { },
-};
-MODULE_DEVICE_TABLE(of, zx2967_i2c_of_match);
-
-static int zx2967_i2c_probe(struct platform_device *pdev)
-{
- struct zx2967_i2c *i2c;
- void __iomem *reg_base;
- struct clk *clk;
- int ret;
-
- i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
- if (!i2c)
- return -ENOMEM;
-
- reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(reg_base))
- return PTR_ERR(reg_base);
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "missing controller clock");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable i2c_clk\n");
- return ret;
- }
-
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &i2c->clk_freq);
- if (ret) {
- dev_err(&pdev->dev, "missing clock-frequency");
- return ret;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- i2c->irq = ret;
- i2c->reg_base = reg_base;
- i2c->clk = clk;
-
- init_completion(&i2c->complete);
- platform_set_drvdata(pdev, i2c);
-
- ret = zx2967_i2c_reset_hardware(i2c);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize i2c controller\n");
- goto err_clk_unprepare;
- }
-
- ret = devm_request_irq(&pdev->dev, i2c->irq,
- zx2967_i2c_isr, 0, dev_name(&pdev->dev), i2c);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq %i\n", i2c->irq);
- goto err_clk_unprepare;
- }
-
- i2c_set_adapdata(&i2c->adap, i2c);
- strlcpy(i2c->adap.name, "zx2967 i2c adapter",
- sizeof(i2c->adap.name));
- i2c->adap.algo = &zx2967_i2c_algo;
- i2c->adap.quirks = &zx2967_i2c_quirks;
- i2c->adap.nr = pdev->id;
- i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = pdev->dev.of_node;
-
- ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret)
- goto err_clk_unprepare;
-
- return 0;
-
-err_clk_unprepare:
- clk_disable_unprepare(i2c->clk);
- return ret;
-}
-
-static int zx2967_i2c_remove(struct platform_device *pdev)
-{
- struct zx2967_i2c *i2c = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
- return 0;
-}
-
-static struct platform_driver zx2967_i2c_driver = {
- .probe = zx2967_i2c_probe,
- .remove = zx2967_i2c_remove,
- .driver = {
- .name = "zx2967_i2c",
- .of_match_table = zx2967_i2c_of_match,
- .pm = &zx2967_i2c_dev_pm_ops,
- },
-};
-module_platform_driver(zx2967_i2c_driver);
-
-MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX2967 I2C Bus Controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 37c510d9347a..8ceaa88dd78f 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -225,12 +225,8 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
adev->power.flags.ignore_parent = true;
acpi_device_set_enumerated(adev);
- if (IS_ERR(i2c_new_client_device(adapter, info))) {
+ if (IS_ERR(i2c_new_client_device(adapter, info)))
adev->power.flags.ignore_parent = false;
- dev_err(&adapter->dev,
- "failed to add I2C device %s from ACPI\n",
- dev_name(&adev->dev));
- }
}
static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index f5c9787992e9..d2d32c0fd8c3 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -323,8 +323,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
*/
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ ? 2 : 1;
- int i;
+ int nmsgs = read_write == I2C_SMBUS_READ ? 2 : 1;
u8 partial_pec = 0;
int status;
struct i2c_msg msg[2] = {
@@ -340,6 +339,8 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
.buf = msgbuf1,
},
};
+ bool wants_pec = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK
+ && size != I2C_SMBUS_I2C_BLOCK_DATA);
msgbuf0[0] = command;
switch (size) {
@@ -348,13 +349,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
/* Special case: The read/write field is used as data */
msg[0].flags = flags | (read_write == I2C_SMBUS_READ ?
I2C_M_RD : 0);
- num = 1;
+ nmsgs = 1;
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ) {
/* Special case: only a read! */
msg[0].flags = I2C_M_RD | flags;
- num = 1;
+ nmsgs = 1;
}
break;
case I2C_SMBUS_BYTE_DATA:
@@ -375,7 +376,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
}
break;
case I2C_SMBUS_PROC_CALL:
- num = 2; /* Special case */
+ nmsgs = 2; /* Special case */
read_write = I2C_SMBUS_READ;
msg[0].len = 3;
msg[1].len = 2;
@@ -398,12 +399,11 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
}
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i < msg[0].len; i++)
- msg[0].buf[i] = data->block[i - 1];
+ memcpy(msg[0].buf + 1, data->block, msg[0].len - 1);
}
break;
case I2C_SMBUS_BLOCK_PROC_CALL:
- num = 2; /* Another special case */
+ nmsgs = 2; /* Another special case */
read_write = I2C_SMBUS_READ;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev,
@@ -414,8 +414,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[0].len = data->block[0] + 2;
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i < msg[0].len; i++)
- msg[0].buf[i] = data->block[i - 1];
+ memcpy(msg[0].buf + 1, data->block, msg[0].len - 1);
msg[1].flags |= I2C_M_RECV_LEN;
msg[1].len = 1; /* block length will be added by
@@ -437,8 +436,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[0].len = data->block[0] + 1;
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i <= data->block[0]; i++)
- msg[0].buf[i] = data->block[i];
+ memcpy(msg[0].buf + 1, data->block + 1, data->block[0]);
}
break;
default:
@@ -446,33 +444,31 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
return -EOPNOTSUPP;
}
- i = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK
- && size != I2C_SMBUS_I2C_BLOCK_DATA);
- if (i) {
+ if (wants_pec) {
/* Compute PEC if first message is a write */
if (!(msg[0].flags & I2C_M_RD)) {
- if (num == 1) /* Write only */
+ if (nmsgs == 1) /* Write only */
i2c_smbus_add_pec(&msg[0]);
else /* Write followed by read */
partial_pec = i2c_smbus_msg_pec(0, &msg[0]);
}
/* Ask for PEC if last message is a read */
- if (msg[num-1].flags & I2C_M_RD)
- msg[num-1].len++;
+ if (msg[nmsgs - 1].flags & I2C_M_RD)
+ msg[nmsgs - 1].len++;
}
- status = __i2c_transfer(adapter, msg, num);
+ status = __i2c_transfer(adapter, msg, nmsgs);
if (status < 0)
goto cleanup;
- if (status != num) {
+ if (status != nmsgs) {
status = -EIO;
goto cleanup;
}
status = 0;
/* Check PEC if last message is a read */
- if (i && (msg[num-1].flags & I2C_M_RD)) {
- status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
+ if (wants_pec && (msg[nmsgs - 1].flags & I2C_M_RD)) {
+ status = i2c_smbus_check_pec(partial_pec, &msg[nmsgs - 1]);
if (status < 0)
goto cleanup;
}
@@ -490,8 +486,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
data->word = msgbuf1[0] | (msgbuf1[1] << 8);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i + 1] = msg[1].buf[i];
+ memcpy(data->block + 1, msg[1].buf, data->block[0]);
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
@@ -502,8 +497,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
status = -EPROTO;
goto cleanup;
}
- for (i = 0; i < msg[1].buf[0] + 1; i++)
- data->block[i] = msg[1].buf[i];
+ memcpy(data->block, msg[1].buf, msg[1].buf[0] + 1);
break;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index c288102de324..56dae08dfd48 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -19,6 +19,7 @@
enum testunit_cmds {
TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
TU_CMD_HOST_NOTIFY,
+ TU_CMD_SMBUS_BLOCK_PROC_CALL,
TU_NUM_CMDS
};
@@ -88,6 +89,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
{
struct testunit_data *tu = i2c_get_clientdata(client);
+ bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 &&
+ tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL;
int ret = 0;
switch (event) {
@@ -118,12 +121,17 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
fallthrough;
case I2C_SLAVE_WRITE_REQUESTED:
+ memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;
- case I2C_SLAVE_READ_REQUESTED:
case I2C_SLAVE_READ_PROCESSED:
- *val = TU_CUR_VERSION;
+ if (is_proc_call && tu->regs[TU_REG_DATAH])
+ tu->regs[TU_REG_DATAH]--;
+ fallthrough;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION;
break;
}
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index 537a598e22db..d642cad219d9 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -7,7 +7,6 @@
*/
-#define DEBUG 1
#define pr_fmt(fmt) "i2c-stub: " fmt
#include <linux/errno.h>
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 4effe563e9e8..bac415a52b78 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -49,60 +49,112 @@ static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
return 0;
}
-#ifdef CONFIG_OF
-static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
- struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+
+static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
+ struct fwnode_handle *fwdev,
+ unsigned int *adr)
+
+{
+ unsigned long long adr64;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE_FWNODE(fwdev),
+ METHOD_NAME__ADR,
+ NULL, &adr64);
+
+ if (!ACPI_SUCCESS(status)) {
+ dev_err(dev, "Cannot get address\n");
+ return -EINVAL;
+ }
+
+ *adr = adr64;
+ if (*adr != adr64) {
+ dev_err(dev, "Address out of range\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+#else
+
+static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
+ struct fwnode_handle *fwdev,
+ unsigned int *adr)
+{
+ return -EINVAL;
+}
+
+#endif
+
+static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
+ struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *adapter_np, *child;
- struct i2c_adapter *adapter;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *adapter_np;
+ struct i2c_adapter *adapter = NULL;
+ struct fwnode_handle *child;
unsigned *values;
- int i = 0;
+ int rc, i = 0;
+
+ if (is_of_node(dev->fwnode)) {
+ if (!np)
+ return -ENODEV;
- if (!np)
- return -ENODEV;
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ of_node_put(adapter_np);
+
+ } else if (is_acpi_node(dev->fwnode)) {
+ /*
+ * In ACPI land the mux should be a direct child of the i2c
+ * bus it muxes.
+ */
+ acpi_handle dev_handle = ACPI_HANDLE(dev->parent);
- adapter_np = of_parse_phandle(np, "i2c-parent", 0);
- if (!adapter_np) {
- dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
- return -ENODEV;
+ adapter = i2c_acpi_find_adapter_by_handle(dev_handle);
}
- adapter = of_find_i2c_adapter_by_node(adapter_np);
- of_node_put(adapter_np);
+
if (!adapter)
return -EPROBE_DEFER;
mux->data.parent = i2c_adapter_id(adapter);
put_device(&adapter->dev);
- mux->data.n_values = of_get_child_count(np);
-
- values = devm_kcalloc(&pdev->dev,
+ mux->data.n_values = device_get_child_node_count(dev);
+ values = devm_kcalloc(dev,
mux->data.n_values, sizeof(*mux->data.values),
GFP_KERNEL);
if (!values) {
- dev_err(&pdev->dev, "Cannot allocate values array");
+ dev_err(dev, "Cannot allocate values array");
return -ENOMEM;
}
- for_each_child_of_node(np, child) {
- of_property_read_u32(child, "reg", values + i);
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child)) {
+ fwnode_property_read_u32(child, "reg", values + i);
+
+ } else if (is_acpi_node(child)) {
+ rc = i2c_mux_gpio_get_acpi_adr(dev, child, values + i);
+ if (rc)
+ return rc;
+ }
+
i++;
}
mux->data.values = values;
- if (of_property_read_u32(np, "idle-state", &mux->data.idle))
+ if (fwnode_property_read_u32(dev->fwnode, "idle-state", &mux->data.idle))
mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
return 0;
}
-#else
-static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
- struct platform_device *pdev)
-{
- return 0;
-}
-#endif
static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
@@ -118,7 +170,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
if (!dev_get_platdata(&pdev->dev)) {
- ret = i2c_mux_gpio_probe_dt(mux, pdev);
+ ret = i2c_mux_gpio_probe_fw(mux, pdev);
if (ret < 0)
return ret;
} else {
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 5ed55ca4fe93..1a879f6a31ef 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -1,35 +1,8 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
- * drivers/i2c/muxes/i2c-mux-mlxcpld.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ * Mellanox i2c mux driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/device.h>
@@ -38,19 +11,19 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/platform_data/x86/mlxcpld.h>
+#include <linux/platform_data/mlxcpld.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define CPLD_MUX_MAX_NCHANS 8
-
/* mlxcpld_mux - mux control structure:
- * @last_chan - last register value
+ * @last_val - last selected register value or -1 if mux deselected
* @client - I2C device client
+ * @pdata: platform data
*/
struct mlxcpld_mux {
- u8 last_chan;
+ int last_val;
struct i2c_client *client;
+ struct mlxcpld_mux_plat_data pdata;
};
/* MUX logic description.
@@ -81,37 +54,50 @@ struct mlxcpld_mux {
*
*/
-static const struct i2c_device_id mlxcpld_mux_id[] = {
- { "mlxcpld_mux_module", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mlxcpld_mux_id);
-
/* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer()
* for this as they will try to lock adapter a second time.
*/
static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
- struct i2c_client *client, u8 val)
+ struct mlxcpld_mux *mux, u32 val)
{
- struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
- union i2c_smbus_data data = { .byte = val };
-
- return __i2c_smbus_xfer(adap, client->addr, client->flags,
- I2C_SMBUS_WRITE, pdata->sel_reg_addr,
- I2C_SMBUS_BYTE_DATA, &data);
+ struct i2c_client *client = mux->client;
+ union i2c_smbus_data data;
+ struct i2c_msg msg;
+ u8 buf[3];
+
+ switch (mux->pdata.reg_size) {
+ case 1:
+ data.byte = val;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, mux->pdata.sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
+ case 2:
+ buf[0] = mux->pdata.sel_reg_addr >> 8;
+ buf[1] = mux->pdata.sel_reg_addr;
+ buf[2] = val;
+ msg.addr = client->addr;
+ msg.buf = buf;
+ msg.len = mux->pdata.reg_size + 1;
+ msg.flags = 0;
+ return __i2c_transfer(adap, &msg, 1);
+ default:
+ return -EINVAL;
+ }
}
static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
- struct mlxcpld_mux *data = i2c_mux_priv(muxc);
- struct i2c_client *client = data->client;
- u8 regval = chan + 1;
+ struct mlxcpld_mux *mux = i2c_mux_priv(muxc);
+ u32 regval = chan;
int err = 0;
+ if (mux->pdata.reg_size == 1)
+ regval += 1;
+
/* Only select the channel if its different from the last channel */
- if (data->last_chan != regval) {
- err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
- data->last_chan = err < 0 ? 0 : regval;
+ if (mux->last_val != regval) {
+ err = mlxcpld_mux_reg_write(muxc->parent, mux, regval);
+ mux->last_val = err < 0 ? -1 : regval;
}
return err;
@@ -119,56 +105,64 @@ static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct mlxcpld_mux *data = i2c_mux_priv(muxc);
- struct i2c_client *client = data->client;
+ struct mlxcpld_mux *mux = i2c_mux_priv(muxc);
/* Deselect active channel */
- data->last_chan = 0;
+ mux->last_val = -1;
- return mlxcpld_mux_reg_write(muxc->parent, client, data->last_chan);
+ return mlxcpld_mux_reg_write(muxc->parent, mux, 0);
}
/* Probe/reomove functions */
-static int mlxcpld_mux_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mlxcpld_mux_probe(struct platform_device *pdev)
{
- struct i2c_adapter *adap = client->adapter;
- struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct i2c_client *client = to_i2c_client(pdev->dev.parent);
struct i2c_mux_core *muxc;
- int num, force;
struct mlxcpld_mux *data;
- int err;
+ int num, err;
+ u32 func;
if (!pdata)
return -EINVAL;
- if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ switch (pdata->reg_size) {
+ case 1:
+ func = I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+ break;
+ case 2:
+ func = I2C_FUNC_I2C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, func))
return -ENODEV;
- muxc = i2c_mux_alloc(adap, &client->dev, CPLD_MUX_MAX_NCHANS,
+ muxc = i2c_mux_alloc(client->adapter, &pdev->dev, pdata->num_adaps,
sizeof(*data), 0, mlxcpld_mux_select_chan,
mlxcpld_mux_deselect);
if (!muxc)
return -ENOMEM;
+ platform_set_drvdata(pdev, muxc);
data = i2c_mux_priv(muxc);
- i2c_set_clientdata(client, muxc);
data->client = client;
- data->last_chan = 0; /* force the first selection */
+ memcpy(&data->pdata, pdata, sizeof(*pdata));
+ data->last_val = -1; /* force the first selection */
/* Create an adapter for each channel. */
- for (num = 0; num < CPLD_MUX_MAX_NCHANS; num++) {
- if (num >= pdata->num_adaps)
- /* discard unconfigured channels */
- break;
-
- force = pdata->adap_ids[num];
-
- err = i2c_mux_add_adapter(muxc, force, num, 0);
+ for (num = 0; num < pdata->num_adaps; num++) {
+ err = i2c_mux_add_adapter(muxc, 0, pdata->chan_ids[num], 0);
if (err)
goto virt_reg_failed;
}
+ /* Notify caller when all channels' adapters are created. */
+ if (pdata->completion_notify)
+ pdata->completion_notify(pdata->handle, muxc->parent, muxc->adapter);
+
return 0;
virt_reg_failed:
@@ -176,24 +170,23 @@ virt_reg_failed:
return err;
}
-static int mlxcpld_mux_remove(struct i2c_client *client)
+static int mlxcpld_mux_remove(struct platform_device *pdev)
{
- struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
return 0;
}
-static struct i2c_driver mlxcpld_mux_driver = {
- .driver = {
- .name = "mlxcpld-mux",
+static struct platform_driver mlxcpld_mux_driver = {
+ .driver = {
+ .name = "i2c-mux-mlxcpld",
},
- .probe = mlxcpld_mux_probe,
- .remove = mlxcpld_mux_remove,
- .id_table = mlxcpld_mux_id,
+ .probe = mlxcpld_mux_probe,
+ .remove = mlxcpld_mux_remove,
};
-module_i2c_driver(mlxcpld_mux_driver);
+module_platform_driver(mlxcpld_mux_driver);
MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index bb8e60dff988..e92d3e9a52bd 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -262,6 +262,11 @@ int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
drv->driver.owner = owner;
drv->driver.bus = &i3c_bus_type;
+ if (!drv->probe) {
+ pr_err("Trying to register an i3c driver without probe callback\n");
+ return -EINVAL;
+ }
+
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index b61bf53ec07a..f8e9b7305c13 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -326,15 +326,13 @@ static int i3c_device_remove(struct device *dev)
{
struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
- int ret;
- ret = driver->remove(i3cdev);
- if (ret)
- return ret;
+ if (driver->remove)
+ driver->remove(i3cdev);
i3c_device_free_ibi(i3cdev);
- return ret;
+ return 0;
}
struct bus_type i3c_bus_type = {
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index e68f15f4b4d0..3b8f95916f46 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -22,9 +22,18 @@ config DW_I3C_MASTER
This driver can also be built as a module. If so, the module
will be called dw-i3c-master.
+config SVC_I3C_MASTER
+ tristate "Silvaco I3C Dual-Role Master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Support for Silvaco I3C Dual-Role Master Controller.
+
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
depends on I3C
+ depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
Interface specification.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index b892fd4cafad..b3fee0f690b2 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 8513bd353c05..03a368da51b9 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -816,11 +816,6 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
dw_i3c_master_free_xfer(xfer);
- i3c_master_disec_locked(m, I3C_BROADCAST_ADDR,
- I3C_CCC_EVENT_HJ |
- I3C_CCC_EVENT_MR |
- I3C_CCC_EVENT_SIR);
-
return 0;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 500abd27fb22..1b73647cc3b1 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -777,7 +777,7 @@ static int i3c_hci_remove(struct platform_device *pdev)
return 0;
}
-static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
{ .compatible = "mipi-i3c-hci", },
{},
};
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
new file mode 100644
index 000000000000..8d990696676e
--- /dev/null
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -0,0 +1,1478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Silvaco dual-role I3C master driver
+ *
+ * Copyright (C) 2020 Silvaco
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Master Mode Registers */
+#define SVC_I3C_MCONFIG 0x000
+#define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
+#define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
+#define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
+#define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
+#define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
+#define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
+#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
+#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
+
+#define SVC_I3C_MCTRL 0x084
+#define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
+#define SVC_I3C_MCTRL_REQUEST_NONE 0
+#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
+#define SVC_I3C_MCTRL_REQUEST_STOP 2
+#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
+#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
+#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
+#define SVC_I3C_MCTRL_TYPE_I3C 0
+#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
+#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
+#define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
+#define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
+#define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
+#define SVC_I3C_MCTRL_DIR_WRITE 0
+#define SVC_I3C_MCTRL_DIR_READ 1
+#define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+#define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
+
+#define SVC_I3C_MSTATUS 0x088
+#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
+#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
+#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
+#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
+#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
+#define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
+#define SVC_I3C_MSTATUS_IBITYPE_IBI 1
+#define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
+#define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
+#define SVC_I3C_MINT_SLVSTART BIT(8)
+#define SVC_I3C_MINT_MCTRLDONE BIT(9)
+#define SVC_I3C_MINT_COMPLETE BIT(10)
+#define SVC_I3C_MINT_RXPEND BIT(11)
+#define SVC_I3C_MINT_TXNOTFULL BIT(12)
+#define SVC_I3C_MINT_IBIWON BIT(13)
+#define SVC_I3C_MINT_ERRWARN BIT(15)
+#define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
+#define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
+#define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
+#define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
+#define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
+#define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
+#define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
+#define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
+
+#define SVC_I3C_IBIRULES 0x08C
+#define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
+ ((addr) & 0x3F) << ((slot) * 6))
+#define SVC_I3C_IBIRULES_ADDRS 5
+#define SVC_I3C_IBIRULES_MSB0 BIT(30)
+#define SVC_I3C_IBIRULES_NOBYTE BIT(31)
+#define SVC_I3C_IBIRULES_MANDBYTE 0
+#define SVC_I3C_MINTSET 0x090
+#define SVC_I3C_MINTCLR 0x094
+#define SVC_I3C_MINTMASKED 0x098
+#define SVC_I3C_MERRWARN 0x09C
+#define SVC_I3C_MDMACTRL 0x0A0
+#define SVC_I3C_MDATACTRL 0x0AC
+#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+#define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
+#define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
+#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
+#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
+#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
+#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
+
+#define SVC_I3C_MWDATAB 0x0B0
+#define SVC_I3C_MWDATAB_END BIT(8)
+
+#define SVC_I3C_MWDATABE 0x0B4
+#define SVC_I3C_MWDATAH 0x0B8
+#define SVC_I3C_MWDATAHE 0x0BC
+#define SVC_I3C_MRDATAB 0x0C0
+#define SVC_I3C_MRDATAH 0x0C8
+#define SVC_I3C_MWMSG_SDR 0x0D0
+#define SVC_I3C_MRMSG_SDR 0x0D4
+#define SVC_I3C_MWMSG_DDR 0x0D8
+#define SVC_I3C_MRMSG_DDR 0x0DC
+
+#define SVC_I3C_MDYNADDR 0x0E4
+#define SVC_MDYNADDR_VALID BIT(0)
+#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
+
+#define SVC_I3C_MAX_DEVS 32
+
+/* This parameter depends on the implementation and may be tuned */
+#define SVC_I3C_FIFO_SIZE 16
+
+struct svc_i3c_cmd {
+ u8 addr;
+ bool rnw;
+ u8 *in;
+ const void *out;
+ unsigned int len;
+ unsigned int read_len;
+ bool continued;
+};
+
+struct svc_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int type;
+ unsigned int ncmds;
+ struct svc_i3c_cmd cmds[];
+};
+
+/**
+ * struct svc_i3c_master - Silvaco I3C Master structure
+ * @base: I3C master controller
+ * @dev: Corresponding device
+ * @regs: Memory mapping
+ * @free_slots: Bit array of available slots
+ * @addrs: Array containing the dynamic addresses of each attached device
+ * @descs: Array of descriptors, one per attached device
+ * @hj_work: Hot-join work
+ * @ibi_work: IBI work
+ * @irq: Main interrupt
+ * @pclk: System clock
+ * @fclk: Fast clock (bus)
+ * @sclk: Slow clock (other events)
+ * @xferqueue: Transfer queue structure
+ * @xferqueue.list: List member
+ * @xferqueue.cur: Current ongoing transfer
+ * @xferqueue.lock: Queue lock
+ * @ibi: IBI structure
+ * @ibi.num_slots: Number of slots available in @ibi.slots
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
+ */
+struct svc_i3c_master {
+ struct i3c_master_controller base;
+ struct device *dev;
+ void __iomem *regs;
+ u32 free_slots;
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
+ struct work_struct hj_work;
+ struct work_struct ibi_work;
+ int irq;
+ struct clk *pclk;
+ struct clk *fclk;
+ struct clk *sclk;
+ struct {
+ struct list_head list;
+ struct svc_i3c_xfer *cur;
+ /* Prevent races between transfers */
+ spinlock_t lock;
+ } xferqueue;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ struct i3c_ibi_slot *tbq_slot;
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
+};
+
+/**
+ * struct svc_i3c_i3c_dev_data - Device specific data
+ * @index: Index in the master tables corresponding to this device
+ * @ibi: IBI slot index in the master structure
+ * @ibi_pool: IBI pool associated to this device
+ */
+struct svc_i3c_i2c_dev_data {
+ u8 index;
+ int ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static bool svc_i3c_master_error(struct svc_i3c_master *master)
+{
+ u32 mstatus, merrwarn;
+
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
+{
+ writel(mask, master->regs + SVC_I3C_MINTSET);
+}
+
+static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
+{
+ u32 mask = readl(master->regs + SVC_I3C_MINTSET);
+
+ writel(mask, master->regs + SVC_I3C_MINTCLR);
+}
+
+static inline struct svc_i3c_master *
+to_svc_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct svc_i3c_master, base);
+}
+
+static void svc_i3c_master_hj_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master;
+
+ master = container_of(work, struct svc_i3c_master, hj_work);
+ i3c_master_do_daa(&master->base);
+}
+
+static struct i3c_dev_desc *
+svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
+ unsigned int ibiaddr)
+{
+ int i;
+
+ for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
+ if (master->addrs[i] == ibiaddr)
+ break;
+
+ if (i == SVC_I3C_MAX_DEVS)
+ return NULL;
+
+ return master->descs[i];
+}
+
+static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * This delay is necessary after the emission of a stop, otherwise eg.
+ * repeating IBIs do not get detected. There is a note in the manual
+ * about it, stating that the stop condition might not be settled
+ * correctly if a start condition follows too rapidly.
+ */
+ udelay(1);
+}
+
+static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
+{
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+}
+
+static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return -ENOSPC;
+
+ slot->len = 0;
+ buf = slot->data;
+
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
+ readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
+ slot->len += count;
+ buf += count;
+ }
+
+ master->ibi.tbq_slot = slot;
+
+ return 0;
+}
+
+static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
+ bool mandatory_byte)
+{
+ unsigned int ibi_ack_nack;
+
+ ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
+ if (mandatory_byte)
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
+ else
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
+
+ writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
+ SVC_I3C_MCTRL_IBIRESP_NACK,
+ master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_ibi_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
+ struct svc_i3c_i2c_dev_data *data;
+ unsigned int ibitype, ibiaddr;
+ struct i3c_dev_desc *dev;
+ u32 status, val;
+ int ret;
+
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+ master->regs + SVC_I3C_MCTRL);
+
+ /* Wait for IBIWON, should take approximately 100us */
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
+ goto reenable_ibis;
+ }
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ status = readl(master->regs + SVC_I3C_MSTATUS);
+ ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+
+ /* Handle the critical responses to IBI's */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
+ if (!dev)
+ svc_i3c_master_nack_ibi(master);
+ else
+ svc_i3c_master_handle_ibi(master, dev);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ svc_i3c_master_ack_ibi(master, false);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_nack_ibi(master);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If an error happened, we probably got interrupted and the exchange
+ * timedout. In this case we just drop everything, emit a stop and wait
+ * for the slave to interrupt again.
+ */
+ if (svc_i3c_master_error(master)) {
+ if (master->ibi.tbq_slot) {
+ data = i3c_dev_get_master_data(dev);
+ i3c_generic_ibi_recycle_slot(data->ibi_pool,
+ master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+
+ svc_i3c_master_emit_stop(master);
+
+ goto reenable_ibis;
+ }
+
+ /* Handle the non critical tasks */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ if (dev) {
+ i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+ svc_i3c_master_emit_stop(master);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ default:
+ break;
+ }
+
+reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+}
+
+static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+ u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Handle the interrupt in a non atomic context */
+ queue_work(master->base.wq, &master->ibi_work);
+
+ return IRQ_HANDLED;
+}
+
+static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ unsigned long fclk_rate, fclk_period_ns;
+ unsigned int high_period_ns, od_low_period_ns;
+ u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg;
+ int ret;
+
+ /* Timings derivation */
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate)
+ return -EINVAL;
+
+ fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+
+ /*
+ * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
+ * Simplest configuration is using a 50% duty-cycle of 40ns.
+ */
+ ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ pplow = 0;
+
+ /*
+ * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
+ * duty-cycle tuned so that high levels are filetered out by
+ * the 50ns filter (target being 40ns).
+ */
+ odhpp = 1;
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ i2cbaud = 0;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ /*
+ * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
+ * between the high and low period does not really matter.
+ */
+ i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ /*
+ * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
+ * constraints as the FM+ mode.
+ */
+ i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = SVC_I3C_MCONFIG_MASTER_EN |
+ SVC_I3C_MCONFIG_DISTO(0) |
+ SVC_I3C_MCONFIG_HKEEP(0) |
+ SVC_I3C_MCONFIG_ODSTOP(0) |
+ SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
+ SVC_I3C_MCONFIG_PPLOW(pplow) |
+ SVC_I3C_MCONFIG_ODBAUD(odbaud) |
+ SVC_I3C_MCONFIG_ODHPP(odhpp) |
+ SVC_I3C_MCONFIG_SKEW(0) |
+ SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
+ writel(reg, master->regs + SVC_I3C_MCONFIG);
+
+ /* Master core's registration */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ info.dyn_addr = ret;
+
+ writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
+ master->regs + SVC_I3C_MDYNADDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+ return 0;
+}
+
+static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Disable master */
+ writel(0, master->regs + SVC_I3C_MCONFIG);
+}
+
+static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
+{
+ unsigned int slot;
+
+ if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
+ return -ENOSPC;
+
+ slot = ffs(master->free_slots) - 1;
+
+ master->free_slots &= ~BIT(slot);
+
+ return slot;
+}
+
+static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
+ unsigned int slot)
+{
+ master->free_slots |= BIT(slot);
+}
+
+static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->ibi = -1;
+ data->index = slot;
+ master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+ master->descs[slot] = dev;
+
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ master->addrs[data->index] = 0;
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->index = slot;
+ master->addrs[slot] = dev->addr;
+
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
+ unsigned int len)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < len; i++) {
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000);
+ if (ret)
+ return ret;
+
+ dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ u8 *addrs, unsigned int *count)
+{
+ u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
+ unsigned int dev_nb = 0, last_addr = 0;
+ u32 reg;
+ int ret, i;
+
+ while (true) {
+ /* Enter/proceed with DAA */
+ writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
+ master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * Either one slave will send its ID, or the assignment process
+ * is done.
+ */
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_RXPEND(reg) |
+ SVC_I3C_MSTATUS_MCTRLDONE(reg),
+ 1, 1000);
+ if (ret)
+ return ret;
+
+ if (SVC_I3C_MSTATUS_RXPEND(reg)) {
+ u8 data[6];
+
+ /*
+ * We only care about the 48-bit provisional ID yet to
+ * be sure a device does not nack an address twice.
+ * Otherwise, we would just need to flush the RX FIFO.
+ */
+ ret = svc_i3c_master_readb(master, data, 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 6; i++)
+ prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
+
+ /* We do not care about the BCR and DCR yet */
+ ret = svc_i3c_master_readb(master, data, 2);
+ if (ret)
+ return ret;
+ } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
+ if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
+ SVC_I3C_MSTATUS_COMPLETE(reg)) {
+ /*
+ * All devices received and acked they dynamic
+ * address, this is the natural end of the DAA
+ * procedure.
+ */
+ break;
+ } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /*
+ * A slave device nacked the address, this is
+ * allowed only once, DAA will be stopped and
+ * then resumed. The same device is supposed to
+ * answer again immediately and shall ack the
+ * address this time.
+ */
+ if (prov_id[dev_nb] == nacking_prov_id)
+ return -EIO;
+
+ dev_nb--;
+ nacking_prov_id = prov_id[dev_nb];
+ svc_i3c_master_emit_stop(master);
+
+ continue;
+ } else {
+ return -EIO;
+ }
+ }
+
+ /* Wait for the slave to be ready to receive its address */
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
+ SVC_I3C_MSTATUS_STATE_DAA(reg) &&
+ SVC_I3C_MSTATUS_BETWEEN(reg),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /* Give the slave device a suitable dynamic address */
+ ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
+ if (ret < 0)
+ return ret;
+
+ addrs[dev_nb] = ret;
+ dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
+ dev_nb, addrs[dev_nb]);
+
+ writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
+ last_addr = addrs[dev_nb++];
+ }
+
+ *count = dev_nb;
+
+ return 0;
+}
+
+static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
+{
+ struct i3c_dev_desc *dev;
+ u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
+ unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
+ nobyte_addr_ko = 0;
+ bool list_mbyte = false, list_nobyte = false;
+
+ /* Create the IBIRULES register for both cases */
+ i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
+ if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
+ continue;
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
+ reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ mbyte_addr_ko++;
+ else
+ mbyte_addr_ok++;
+ } else {
+ reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ nobyte_addr_ko++;
+ else
+ nobyte_addr_ok++;
+ }
+ }
+
+ /* Device list cannot be handled by hardware */
+ if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_mbyte = true;
+
+ if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_nobyte = true;
+
+ /* No list can be properly handled, return an error */
+ if (!list_mbyte && !list_nobyte)
+ return -ERANGE;
+
+ /* Pick the first list that can be handled by hardware, randomly */
+ if (list_mbyte)
+ writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
+ else
+ writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ unsigned long flags;
+ unsigned int dev_nb;
+ int ret, i;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+ if (ret)
+ goto emit_stop;
+
+ /* Register all devices who participated to the core */
+ for (i = 0; i < dev_nb; i++) {
+ ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Configure IBI auto-rules */
+ ret = svc_i3c_update_ibirules(master);
+ if (ret) {
+ dev_err(master->dev, "Cannot handle such a list of devices");
+ return ret;
+ }
+
+ return 0;
+
+emit_stop:
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+
+ return ret;
+}
+
+static int svc_i3c_master_read(struct svc_i3c_master *master,
+ u8 *in, unsigned int len)
+{
+ int offset = 0, i, ret;
+ u32 mdctrl;
+
+ while (offset < len) {
+ unsigned int count;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
+ mdctrl,
+ !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
+ for (i = 0; i < count; i++)
+ in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
+
+ offset += count;
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_write(struct svc_i3c_master *master,
+ const u8 *out, unsigned int len)
+{
+ int offset = 0, ret;
+ u32 mdctrl;
+
+ while (offset < len) {
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
+ mdctrl,
+ !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /*
+ * The last byte to be sent over the bus must either have the
+ * "end" bit set or be written in MWDATABE.
+ */
+ if (likely(offset < (len - 1)))
+ writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
+ else
+ writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ bool rnw, unsigned int xfer_type, u8 addr,
+ u8 *in, const u8 *out, unsigned int xfer_len,
+ unsigned int read_len, bool continued)
+{
+ u32 reg;
+ int ret;
+
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(read_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ if (rnw)
+ ret = svc_i3c_master_read(master, in, xfer_len);
+ else
+ ret = svc_i3c_master_write(master, out, xfer_len);
+ if (ret)
+ goto emit_stop;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ if (!continued)
+ svc_i3c_master_emit_stop(master);
+
+ return 0;
+
+emit_stop:
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+
+ return ret;
+}
+
+static struct svc_i3c_xfer *
+svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
+{
+ struct svc_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+{
+ struct svc_i3c_xfer *xfer = master->xferqueue.cur;
+ int ret, i;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ cmd->addr, cmd->in, cmd->out,
+ cmd->len, cmd->read_len,
+ cmd->continued);
+ if (ret)
+ break;
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0)
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct svc_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+}
+
+static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static bool
+svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd)
+{
+ /* No software support for CCC commands targeting more than one slave */
+ return (cmd->ndests == 1);
+}
+
+static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len + 1;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ u8 *buf;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ buf = kmalloc(xfer_len, GFP_KERNEL);
+ if (!buf) {
+ svc_i3c_master_free_xfer(xfer);
+ return -ENOMEM;
+ }
+
+ buf[0] = ccc->id;
+ memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ cmd = &xfer->cmds[0];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = NULL;
+ cmd->out = buf;
+ cmd->len = xfer_len;
+ cmd->read_len = 0;
+ cmd->continued = false;
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ kfree(buf);
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len;
+ unsigned int read_len = ccc->rnw ? xfer_len : 0;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ /* Broadcasted message */
+ cmd = &xfer->cmds[0];
+ cmd->addr = I3C_BROADCAST_ADDR;
+ cmd->rnw = 0;
+ cmd->in = NULL;
+ cmd->out = &ccc->id;
+ cmd->len = 1;
+ cmd->read_len = xfer_len;
+ cmd->read_len = 0;
+ cmd->continued = true;
+
+ /* Directed message */
+ cmd = &xfer->cmds[1];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->len = xfer_len;
+ cmd->read_len = read_len;
+ cmd->continued = false;
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ bool broadcast = cmd->id < 0x80;
+
+ if (broadcast)
+ return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ else
+ return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+}
+
+static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].rnw;
+ cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
+ cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->len = xfers[i].len;
+ cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1) < nxfers;
+ }
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].flags & I2C_M_RD;
+ cmd->in = cmd->rnw ? xfers[i].buf : NULL;
+ cmd->out = cmd->rnw ? NULL : xfers[i].buf;
+ cmd->len = xfers[i].len;
+ cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1 < nxfers);
+ }
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
+ dev_err(master->dev, "IBI max payload %d should be < %d\n",
+ dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
+ return -ERANGE;
+ }
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+
+ return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops svc_i3c_master_ops = {
+ .bus_init = svc_i3c_master_bus_init,
+ .bus_cleanup = svc_i3c_master_bus_cleanup,
+ .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
+ .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
+ .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
+ .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
+ .do_daa = svc_i3c_master_do_daa,
+ .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
+ .priv_xfers = svc_i3c_master_priv_xfers,
+ .i2c_xfers = svc_i3c_master_i2c_xfers,
+ .request_ibi = svc_i3c_master_request_ibi,
+ .free_ibi = svc_i3c_master_free_ibi,
+ .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
+ .enable_ibi = svc_i3c_master_enable_ibi,
+ .disable_ibi = svc_i3c_master_disable_ibi,
+};
+
+static void svc_i3c_master_reset(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ /* Clear pending warnings */
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+
+ /* Set RX and TX tigger levels, flush FIFOs */
+ reg = SVC_I3C_MDATACTRL_FLUSHTB |
+ SVC_I3C_MDATACTRL_FLUSHRB |
+ SVC_I3C_MDATACTRL_UNLOCK_TRIG |
+ SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
+ SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
+ writel(reg, master->regs + SVC_I3C_MDATACTRL);
+
+ svc_i3c_master_disable_interrupts(master);
+}
+
+static int svc_i3c_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct svc_i3c_master *master;
+ int ret;
+
+ master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->fclk = devm_clk_get(dev, "fast_clk");
+ if (IS_ERR(master->fclk))
+ return PTR_ERR(master->fclk);
+
+ master->sclk = devm_clk_get(dev, "slow_clk");
+ if (IS_ERR(master->sclk))
+ return PTR_ERR(master->sclk);
+
+ master->irq = platform_get_irq(pdev, 0);
+ if (master->irq <= 0)
+ return -ENOENT;
+
+ master->dev = dev;
+
+ svc_i3c_master_reset(master);
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->fclk);
+ if (ret)
+ goto err_disable_pclk;
+
+ ret = clk_prepare_enable(master->sclk);
+ if (ret)
+ goto err_disable_fclk;
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+ goto err_disable_sclk;
+
+ master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = SVC_I3C_MAX_DEVS;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
+ goto err_disable_sclk;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ /* Register the master */
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &svc_i3c_master_ops, false);
+ if (ret)
+ goto err_disable_sclk;
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(master->sclk);
+
+err_disable_fclk:
+ clk_disable_unprepare(master->fclk);
+
+err_disable_pclk:
+ clk_disable_unprepare(master->pclk);
+
+ return ret;
+}
+
+static int svc_i3c_master_remove(struct platform_device *pdev)
+{
+ struct svc_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ free_irq(master->irq, master);
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ clk_disable_unprepare(master->sclk);
+
+ return 0;
+}
+
+static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
+ { .compatible = "silvaco,i3c-master" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver svc_i3c_master = {
+ .probe = svc_i3c_master_probe,
+ .remove = svc_i3c_master_remove,
+ .driver = {
+ .name = "silvaco-i3c-master",
+ .of_match_table = svc_i3c_master_of_match_tbl,
+ },
+};
+module_platform_driver(svc_i3c_master);
+
+MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 013ad33fbbc8..a1ce9f5ac3aa 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -107,7 +107,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape)
scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
- blk_execute_rq(drive->queue, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 25d2d88e82ad..cffbcc27a34c 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -467,7 +467,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
}
- blk_execute_rq(drive->queue, info->disk, rq, 0);
+ blk_execute_rq(info->disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
if (buffer)
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 46f2df288c6a..011eab9c69b7 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -299,7 +299,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
- blk_execute_rq(drive->queue, cd->disk, rq, 0);
+ blk_execute_rq(cd->disk, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
/*
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index f2f93ed40356..ca1d4b3d3878 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -173,7 +173,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
*(int *)&scsi_req(rq)->cmd[1] = arg;
ide_req(rq)->special = setting->set;
- blk_execute_rq(q, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result;
blk_put_request(rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 34b9441084f8..8413731c6259 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -482,7 +482,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
blk_put_request(rq);
return (drive->mult_count == arg) ? 0 : -EIO;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 58994da10c06..43fbc37d85c3 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -137,7 +137,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
err = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -235,7 +235,7 @@ static int generic_drive_reset(ide_drive_t *drive)
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
- blk_execute_rq(drive->queue, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
ret = scsi_req(rq)->result;
blk_put_request(rq);
return ret;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 8af7af6001eb..a80a0f28f7b9 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -37,7 +37,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
ide_req(rq)->special = &timeout;
- blk_execute_rq(q, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
rc = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
if (rc)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 82ab308f1aaf..d680b3e3295f 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -27,7 +27,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -50,7 +50,7 @@ static int ide_pm_execute_rq(struct request *rq)
blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
- blk_execute_rq(q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 88b96437b22e..fa05e7e7d609 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -868,7 +868,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
goto out_put;
}
- blk_execute_rq(drive->queue, tape->disk, rq, 0);
+ blk_execute_rq(tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
size -= scsi_req(rq)->resid_len;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d016cbe68cba..6665fc4724b9 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -443,7 +443,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
ide_req(rq)->special = cmd;
cmd->rq = rq;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 28f93b9aa51b..3273360f30f7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -37,7 +37,7 @@
*/
/* un-comment DEBUG to enable pr_debug() statements */
-#define DEBUG
+/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 4c5e594024f8..5d63ed19e6e2 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -23,6 +23,7 @@ enum accel_3d_channel {
ACCEL_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP ACCEL_3D_CHANNEL_MAX
struct accel_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
@@ -75,7 +76,7 @@ static const struct iio_chan_spec accel_3d_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
},
- IIO_CHAN_SOFT_TIMESTAMP(3)
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Channel definitions */
@@ -110,7 +111,8 @@ static const struct iio_chan_spec gravity_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP),
};
/* Adjust channel real bits based on report descriptor */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index e92c7e6766e1..2fadafc860fd 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -133,6 +134,7 @@ enum kx_acpi_type {
};
struct kxcjk1013_data {
+ struct regulator_bulk_data regulators[2];
struct i2c_client *client;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
@@ -1300,6 +1302,13 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static void kxcjk1013_disable_regulators(void *d)
+{
+ struct kxcjk1013_data *data = d;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
static int kxcjk1013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1330,6 +1339,29 @@ static int kxcjk1013_probe(struct i2c_client *client,
return ret;
}
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(&client->dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, kxcjk1013_disable_regulators, data);
+ if (ret)
+ return ret;
+
+ /*
+ * A typical delay of 10ms is required for powering up
+ * according to the data sheets of supported chips.
+ * Hence double that to play safe.
+ */
+ msleep(20);
+
if (id) {
data->chipset = (enum kx_chipset)(id->driver_data);
name = id->name;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 15587a1bc80d..bf7d22fa4be2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1228,8 +1228,15 @@ config XILINX_XADC
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to have support for the Xilinx XADC. The driver does support
- both the ZYNQ interface to the XADC as well as the AXI-XADC interface.
+ Say yes here to have support for the Xilinx 7 Series XADC or
+ UltraScale/UltraScale+ System Management Wizard.
+
+ For the 7 Series the driver does support both the ZYNQ interface
+ to the XADC as well as the AXI-XADC interface.
+
+ The driver also support the Xilinx System Management Wizard IP core
+ that can be used to access the System Monitor ADC on the Xilinx
+ UltraScale and UltraScale+ FPGAs.
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 1bb987a4acba..6f9a3e2d5533 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -1108,10 +1108,14 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
return gpadc->irq_sw;
}
- gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0) {
- dev_err(dev, "failed to get platform hw_conv_end irq\n");
- return gpadc->irq_hw;
+ if (is_ab8500(gpadc->ab8500)) {
+ gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+ if (gpadc->irq_hw < 0) {
+ dev_err(dev, "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
+ }
+ } else {
+ gpadc->irq_hw = 0;
}
/* Initialize completion used to notify completion of conversion */
@@ -1128,14 +1132,16 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
- ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
- "ab8500-gpadc-hw", gpadc);
- if (ret < 0) {
- dev_err(dev,
- "Failed to request hw conversion irq: %d\n",
- gpadc->irq_hw);
- return ret;
+ if (gpadc->irq_hw) {
+ ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-hw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to request hw conversion irq: %d\n",
+ gpadc->irq_hw);
+ return ret;
+ }
}
/* The VTVout LDO used to power the AB8500 GPADC */
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 66c55ae67791..17402714b387 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -67,6 +67,7 @@ enum ad7476_supported_device_ids {
ID_ADS7866,
ID_ADS7867,
ID_ADS7868,
+ ID_LTC2314_14,
};
static void ad7091_convst(struct ad7476_state *st)
@@ -250,6 +251,10 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
.channel[0] = ADS786X_CHAN(8),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
+ [ID_LTC2314_14] = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
};
static const struct iio_info ad7476_info = {
@@ -365,6 +370,7 @@ static const struct spi_device_id ad7476_id[] = {
{"ads7866", ID_ADS7866},
{"ads7867", ID_ADS7867},
{"ads7868", ID_ADS7868},
+ {"ltc2314-14", ID_LTC2314_14},
{}
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 7e108da7d255..0610bf254771 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -10,6 +10,7 @@
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
@@ -21,8 +22,6 @@
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
-#include "qcom-vadc-common.h"
-
/*
* Definitions for the "user processor" registers lifted from the v3.4
* Qualcomm tree. Their kernel has two out-of-tree drivers for the ADC:
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index c10aa28be70a..87438d1e5c0b 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -7,6 +7,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -14,12 +15,12 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/iio/qcom,spmi-vadc.h>
-#include "qcom-vadc-common.h"
#define ADC5_USR_REVISION1 0x0
#define ADC5_USR_STATUS1 0x8
@@ -154,18 +155,6 @@ struct adc5_chip {
const struct adc5_data *data;
};
-static const struct vadc_prescale_ratio adc5_prescale_ratios[] = {
- {.num = 1, .den = 1},
- {.num = 1, .den = 3},
- {.num = 1, .den = 4},
- {.num = 1, .den = 6},
- {.num = 1, .den = 20},
- {.num = 1, .den = 8},
- {.num = 10, .den = 81},
- {.num = 1, .den = 10},
- {.num = 1, .den = 16}
-};
-
static int adc5_read(struct adc5_chip *adc, u16 offset, u8 *data, int len)
{
return regmap_bulk_read(adc->regmap, adc->base + offset, data, len);
@@ -181,55 +170,6 @@ static int adc5_masked_write(struct adc5_chip *adc, u16 offset, u8 mask, u8 val)
return regmap_update_bits(adc->regmap, adc->base + offset, mask, val);
}
-static int adc5_prescaling_from_dt(u32 num, u32 den)
-{
- unsigned int pre;
-
- for (pre = 0; pre < ARRAY_SIZE(adc5_prescale_ratios); pre++)
- if (adc5_prescale_ratios[pre].num == num &&
- adc5_prescale_ratios[pre].den == den)
- break;
-
- if (pre == ARRAY_SIZE(adc5_prescale_ratios))
- return -EINVAL;
-
- return pre;
-}
-
-static int adc5_hw_settle_time_from_dt(u32 value,
- const unsigned int *hw_settle)
-{
- unsigned int i;
-
- for (i = 0; i < VADC_HW_SETTLE_SAMPLES_MAX; i++) {
- if (value == hw_settle[i])
- return i;
- }
-
- return -EINVAL;
-}
-
-static int adc5_avg_samples_from_dt(u32 value)
-{
- if (!is_power_of_2(value) || value > ADC5_AVG_SAMPLES_MAX)
- return -EINVAL;
-
- return __ffs(value);
-}
-
-static int adc5_decimation_from_dt(u32 value,
- const unsigned int *decimation)
-{
- unsigned int i;
-
- for (i = 0; i < ADC5_DECIMATION_SAMPLES_MAX; i++) {
- if (value == decimation[i])
- return i;
- }
-
- return -EINVAL;
-}
-
static int adc5_read_voltage_data(struct adc5_chip *adc, u16 *data)
{
int ret;
@@ -511,7 +451,7 @@ static int adc_read_raw_common(struct iio_dev *indio_dev,
return ret;
ret = qcom_adc5_hw_scale(prop->scale_fn_type,
- &adc5_prescale_ratios[prop->prescale],
+ prop->prescale,
adc->data,
adc_code_volt, val);
if (ret)
@@ -717,7 +657,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32(node, "qcom,decimation", &value);
if (!ret) {
- ret = adc5_decimation_from_dt(value, data->decimation);
+ ret = qcom_adc5_decimation_from_dt(value, data->decimation);
if (ret < 0) {
dev_err(dev, "%02x invalid decimation %d\n",
chan, value);
@@ -730,7 +670,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
if (!ret) {
- ret = adc5_prescaling_from_dt(varr[0], varr[1]);
+ ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
dev_err(dev, "%02x invalid pre-scaling <%d %d>\n",
chan, varr[0], varr[1]);
@@ -759,11 +699,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
if ((dig_version[0] >= ADC5_HW_SETTLE_DIFF_MINOR &&
dig_version[1] >= ADC5_HW_SETTLE_DIFF_MAJOR) ||
adc->data->info == &adc7_info)
- ret = adc5_hw_settle_time_from_dt(value,
- data->hw_settle_2);
+ ret = qcom_adc5_hw_settle_time_from_dt(value, data->hw_settle_2);
else
- ret = adc5_hw_settle_time_from_dt(value,
- data->hw_settle_1);
+ ret = qcom_adc5_hw_settle_time_from_dt(value, data->hw_settle_1);
if (ret < 0) {
dev_err(dev, "%02x invalid hw-settle-time %d us\n",
@@ -777,7 +715,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32(node, "qcom,avg-samples", &value);
if (!ret) {
- ret = adc5_avg_samples_from_dt(value);
+ ret = qcom_adc5_avg_samples_from_dt(value);
if (ret < 0) {
dev_err(dev, "%02x invalid avg-samples %d\n",
chan, value);
@@ -870,8 +808,6 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
struct adc5_channel_prop prop, *chan_props;
struct device_node *child;
unsigned int index = 0;
- const struct of_device_id *id;
- const struct adc5_data *data;
int ret;
adc->nchannels = of_get_available_child_count(node);
@@ -890,24 +826,21 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
chan_props = adc->chan_props;
iio_chan = adc->iio_chans;
- id = of_match_node(adc5_match_table, node);
- if (id)
- data = id->data;
- else
- data = &adc5_data_pmic;
- adc->data = data;
+ adc->data = of_device_get_match_data(adc->dev);
+ if (!adc->data)
+ adc->data = &adc5_data_pmic;
for_each_available_child_of_node(node, child) {
- ret = adc5_get_dt_channel_data(adc, &prop, child, data);
+ ret = adc5_get_dt_channel_data(adc, &prop, child, adc->data);
if (ret) {
of_node_put(child);
return ret;
}
prop.scale_fn_type =
- data->adc_chans[prop.channel].scale_fn_type;
+ adc->data->adc_chans[prop.channel].scale_fn_type;
*chan_props = prop;
- adc_chan = &data->adc_chans[prop.channel];
+ adc_chan = &adc->data->adc_chans[prop.channel];
iio_chan->channel = prop.channel;
iio_chan->datasheet_name = prop.datasheet_name;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index b0388f8a69f4..05ff948372b3 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -7,6 +7,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -20,8 +21,6 @@
#include <dt-bindings/iio/qcom,spmi-vadc.h>
-#include "qcom-vadc-common.h"
-
/* VADC register and bit definitions */
#define VADC_REVISION2 0x1
#define VADC_REVISION2_SUPPORTED_VADC 1
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index 5113aaa6ba67..14723896aab2 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -2,50 +2,61 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/fixp-arith.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/math64.h>
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/units.h>
-#include "qcom-vadc-common.h"
+/**
+ * struct vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct vadc_map_pt {
+ s32 x;
+ s32 y;
+};
/* Voltage to temperature */
static const struct vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {1758, -40},
- {1742, -35},
- {1719, -30},
- {1691, -25},
- {1654, -20},
- {1608, -15},
- {1551, -10},
- {1483, -5},
- {1404, 0},
- {1315, 5},
- {1218, 10},
- {1114, 15},
- {1007, 20},
- {900, 25},
- {795, 30},
- {696, 35},
- {605, 40},
- {522, 45},
- {448, 50},
- {383, 55},
- {327, 60},
- {278, 65},
- {237, 70},
- {202, 75},
- {172, 80},
- {146, 85},
- {125, 90},
- {107, 95},
- {92, 100},
- {79, 105},
- {68, 110},
- {59, 115},
- {51, 120},
- {44, 125}
+ {1758, -40000 },
+ {1742, -35000 },
+ {1719, -30000 },
+ {1691, -25000 },
+ {1654, -20000 },
+ {1608, -15000 },
+ {1551, -10000 },
+ {1483, -5000 },
+ {1404, 0 },
+ {1315, 5000 },
+ {1218, 10000 },
+ {1114, 15000 },
+ {1007, 20000 },
+ {900, 25000 },
+ {795, 30000 },
+ {696, 35000 },
+ {605, 40000 },
+ {522, 45000 },
+ {448, 50000 },
+ {383, 55000 },
+ {327, 60000 },
+ {278, 65000 },
+ {237, 70000 },
+ {202, 75000 },
+ {172, 80000 },
+ {146, 85000 },
+ {125, 90000 },
+ {107, 95000 },
+ {92, 100000 },
+ {79, 105000 },
+ {68, 110000 },
+ {59, 115000 },
+ {51, 120000 },
+ {44, 125000 }
};
/*
@@ -90,18 +101,18 @@ static const struct vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
};
static const struct vadc_map_pt adcmap7_die_temp[] = {
- { 433700, 1967},
- { 473100, 1964},
- { 512400, 1957},
- { 551500, 1949},
- { 590500, 1940},
- { 629300, 1930},
- { 667900, 1921},
- { 706400, 1910},
- { 744600, 1896},
- { 782500, 1878},
- { 820100, 1859},
- { 857300, 0},
+ { 857300, 160000 },
+ { 820100, 140000 },
+ { 782500, 120000 },
+ { 744600, 100000 },
+ { 706400, 80000 },
+ { 667900, 60000 },
+ { 629300, 40000 },
+ { 590500, 20000 },
+ { 551500, 0 },
+ { 512400, -20000 },
+ { 473100, -40000 },
+ { 433700, -60000 },
};
/*
@@ -278,6 +289,18 @@ static const struct vadc_map_pt adcmap7_100k[] = {
{ 2420, 130048 }
};
+static const struct vadc_prescale_ratio adc5_prescale_ratios[] = {
+ {.num = 1, .den = 1},
+ {.num = 1, .den = 3},
+ {.num = 1, .den = 4},
+ {.num = 1, .den = 6},
+ {.num = 1, .den = 20},
+ {.num = 1, .den = 8},
+ {.num = 10, .den = 81},
+ {.num = 1, .den = 10},
+ {.num = 1, .den = 16}
+};
+
static int qcom_vadc_scale_hw_calib_volt(
const struct vadc_prescale_ratio *prescale,
const struct adc5_data *data,
@@ -323,48 +346,50 @@ static struct qcom_adc5_scale_type scale_adc5_fn[] = {
static int qcom_vadc_map_voltage_temp(const struct vadc_map_pt *pts,
u32 tablesize, s32 input, int *output)
{
- bool descending = 1;
u32 i = 0;
if (!pts)
return -EINVAL;
- /* Check if table is descending or ascending */
- if (tablesize > 1) {
- if (pts[0].x < pts[1].x)
- descending = 0;
- }
-
- while (i < tablesize) {
- if ((descending) && (pts[i].x < input)) {
- /* table entry is less than measured*/
- /* value and table is descending, stop */
- break;
- } else if ((!descending) &&
- (pts[i].x > input)) {
- /* table entry is greater than measured*/
- /*value and table is ascending, stop */
- break;
- }
+ while (i < tablesize && pts[i].x > input)
i++;
- }
if (i == 0) {
*output = pts[0].y;
} else if (i == tablesize) {
*output = pts[tablesize - 1].y;
} else {
- /* result is between search_index and search_index-1 */
/* interpolate linearly */
- *output = (((s32)((pts[i].y - pts[i - 1].y) *
- (input - pts[i - 1].x)) /
- (pts[i].x - pts[i - 1].x)) +
- pts[i - 1].y);
+ *output = fixp_linear_interpolate(pts[i - 1].x, pts[i - 1].y,
+ pts[i].x, pts[i].y,
+ input);
}
return 0;
}
+static s32 qcom_vadc_map_temp_voltage(const struct vadc_map_pt *pts,
+ u32 tablesize, int input)
+{
+ u32 i = 0;
+
+ /*
+ * Table must be sorted, find the interval of 'y' which contains value
+ * 'input' and map it to proper 'x' value
+ */
+ while (i < tablesize && pts[i].y < input)
+ i++;
+
+ if (i == 0)
+ return pts[0].x;
+ if (i == tablesize)
+ return pts[tablesize - 1].x;
+
+ /* interpolate linearly */
+ return fixp_linear_interpolate(pts[i - 1].y, pts[i - 1].x,
+ pts[i].y, pts[i].x, input);
+}
+
static void qcom_vadc_scale_calib(const struct vadc_linear_graph *calib_graph,
u16 adc_code,
bool absolute,
@@ -415,8 +440,6 @@ static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph,
if (ret)
return ret;
- *result_mdec *= 1000;
-
return 0;
}
@@ -462,6 +485,21 @@ static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph,
return 0;
}
+/* convert voltage to ADC code, using 1.875V reference */
+static u16 qcom_vadc_scale_voltage_code(s32 voltage,
+ const struct vadc_prescale_ratio *prescale,
+ const u32 full_scale_code_volt,
+ unsigned int factor)
+{
+ s64 volt = voltage;
+ s64 adc_vdd_ref_mv = 1875; /* reference voltage */
+
+ volt *= prescale->num * factor * full_scale_code_volt;
+ volt = div64_s64(volt, (s64)prescale->den * adc_vdd_ref_mv * 1000);
+
+ return volt;
+}
+
static int qcom_vadc_scale_code_voltage_factor(u16 adc_code,
const struct vadc_prescale_ratio *prescale,
const struct adc5_data *data,
@@ -563,33 +601,13 @@ static int qcom_vadc7_scale_hw_calib_die_temp(
u16 adc_code, int *result_mdec)
{
- int voltage, vtemp0, temp, i;
+ int voltage;
voltage = qcom_vadc_scale_code_voltage_factor(adc_code,
prescale, data, 1);
- if (adcmap7_die_temp[0].x > voltage) {
- *result_mdec = DIE_TEMP_ADC7_SCALE_1;
- return 0;
- }
-
- if (adcmap7_die_temp[ARRAY_SIZE(adcmap7_die_temp) - 1].x <= voltage) {
- *result_mdec = DIE_TEMP_ADC7_MAX;
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(adcmap7_die_temp); i++)
- if (adcmap7_die_temp[i].x > voltage)
- break;
-
- vtemp0 = adcmap7_die_temp[i - 1].x;
- voltage = voltage - vtemp0;
- temp = div64_s64(voltage * DIE_TEMP_ADC7_SCALE_FACTOR,
- adcmap7_die_temp[i - 1].y);
- temp += DIE_TEMP_ADC7_SCALE_1 + (DIE_TEMP_ADC7_SCALE_2 * (i - 1));
- *result_mdec = temp;
-
- return 0;
+ return qcom_vadc_map_voltage_temp(adcmap7_die_temp, ARRAY_SIZE(adcmap7_die_temp),
+ voltage, result_mdec);
}
static int qcom_vadc_scale_hw_smb_temp(
@@ -646,11 +664,26 @@ int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
}
EXPORT_SYMBOL(qcom_vadc_scale);
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp)
+{
+ const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio];
+ s32 voltage;
+
+ voltage = qcom_vadc_map_temp_voltage(adcmap_100k_104ef_104fb_1875_vref,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+ temp);
+ return qcom_vadc_scale_voltage_code(voltage, prescale, full_scale_code_volt, 1000);
+}
+EXPORT_SYMBOL(qcom_adc_tm5_temp_volt_scale);
+
int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
- const struct vadc_prescale_ratio *prescale,
+ unsigned int prescale_ratio,
const struct adc5_data *data,
u16 adc_code, int *result)
{
+ const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio];
+
if (!(scaletype >= SCALE_HW_CALIB_DEFAULT &&
scaletype < SCALE_HW_CALIB_INVALID)) {
pr_err("Invalid scale type %d\n", scaletype);
@@ -662,6 +695,58 @@ int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
}
EXPORT_SYMBOL(qcom_adc5_hw_scale);
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den)
+{
+ unsigned int pre;
+
+ for (pre = 0; pre < ARRAY_SIZE(adc5_prescale_ratios); pre++)
+ if (adc5_prescale_ratios[pre].num == num &&
+ adc5_prescale_ratios[pre].den == den)
+ break;
+
+ if (pre == ARRAY_SIZE(adc5_prescale_ratios))
+ return -EINVAL;
+
+ return pre;
+}
+EXPORT_SYMBOL(qcom_adc5_prescaling_from_dt);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value,
+ const unsigned int *hw_settle)
+{
+ unsigned int i;
+
+ for (i = 0; i < VADC_HW_SETTLE_SAMPLES_MAX; i++) {
+ if (value == hw_settle[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qcom_adc5_hw_settle_time_from_dt);
+
+int qcom_adc5_avg_samples_from_dt(u32 value)
+{
+ if (!is_power_of_2(value) || value > ADC5_AVG_SAMPLES_MAX)
+ return -EINVAL;
+
+ return __ffs(value);
+}
+EXPORT_SYMBOL(qcom_adc5_avg_samples_from_dt);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation)
+{
+ unsigned int i;
+
+ for (i = 0; i < ADC5_DECIMATION_SAMPLES_MAX; i++) {
+ if (value == decimation[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qcom_adc5_decimation_from_dt);
+
int qcom_vadc_decimation_from_dt(u32 value)
{
if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN ||
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
deleted file mode 100644
index 17b2fc4d8bf2..000000000000
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Code shared between the different Qualcomm PMIC voltage ADCs
- */
-
-#ifndef QCOM_VADC_COMMON_H
-#define QCOM_VADC_COMMON_H
-
-#define VADC_CONV_TIME_MIN_US 2000
-#define VADC_CONV_TIME_MAX_US 2100
-
-/* Min ADC code represents 0V */
-#define VADC_MIN_ADC_CODE 0x6000
-/* Max ADC code represents full-scale range of 1.8V */
-#define VADC_MAX_ADC_CODE 0xa800
-
-#define VADC_ABSOLUTE_RANGE_UV 625000
-#define VADC_RATIOMETRIC_RANGE 1800
-
-#define VADC_DEF_PRESCALING 0 /* 1:1 */
-#define VADC_DEF_DECIMATION 0 /* 512 */
-#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
-#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
-#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
-
-#define VADC_DECIMATION_MIN 512
-#define VADC_DECIMATION_MAX 4096
-#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */
-#define ADC5_DECIMATION_SHORT 250
-#define ADC5_DECIMATION_MEDIUM 420
-#define ADC5_DECIMATION_LONG 840
-/* Default decimation - 1024 for rev2, 840 for pmic5 */
-#define ADC5_DECIMATION_DEFAULT 2
-#define ADC5_DECIMATION_SAMPLES_MAX 3
-
-#define VADC_HW_SETTLE_DELAY_MAX 10000
-#define VADC_HW_SETTLE_SAMPLES_MAX 16
-#define VADC_AVG_SAMPLES_MAX 512
-#define ADC5_AVG_SAMPLES_MAX 16
-
-#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
-#define PMIC5_SMB_TEMP_CONSTANT 419400
-#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
-
-#define PMI_CHG_SCALE_1 -138890
-#define PMI_CHG_SCALE_2 391750000000LL
-
-#define VADC5_MAX_CODE 0x7fff
-#define ADC5_FULL_SCALE_CODE 0x70e4
-#define ADC5_USR_DATA_CHECK 0x8000
-
-#define R_PU_100K 100000
-#define RATIO_MAX_ADC7 BIT(14)
-
-#define DIE_TEMP_ADC7_SCALE_1 -60000
-#define DIE_TEMP_ADC7_SCALE_2 20000
-#define DIE_TEMP_ADC7_SCALE_FACTOR 1000
-#define DIE_TEMP_ADC7_MAX 160000
-
-/**
- * struct vadc_map_pt - Map the graph representation for ADC channel
- * @x: Represent the ADC digitized code.
- * @y: Represent the physical data which can be temperature, voltage,
- * resistance.
- */
-struct vadc_map_pt {
- s32 x;
- s32 y;
-};
-
-/*
- * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
- * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
- * calibration.
- */
-enum vadc_calibration {
- VADC_CALIB_ABSOLUTE = 0,
- VADC_CALIB_RATIOMETRIC
-};
-
-/**
- * struct vadc_linear_graph - Represent ADC characteristics.
- * @dy: numerator slope to calculate the gain.
- * @dx: denominator slope to calculate the gain.
- * @gnd: A/D word of the ground reference used for the channel.
- *
- * Each ADC device has different offset and gain parameters which are
- * computed to calibrate the device.
- */
-struct vadc_linear_graph {
- s32 dy;
- s32 dx;
- s32 gnd;
-};
-
-/**
- * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
- * @num: the inverse numerator of the gain applied to the input channel.
- * @den: the inverse denominator of the gain applied to the input channel.
- */
-struct vadc_prescale_ratio {
- u32 num;
- u32 den;
-};
-
-/**
- * enum vadc_scale_fn_type - Scaling function to convert ADC code to
- * physical scaled units for the channel.
- * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
- * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
- * Uses a mapping table with 100K pullup.
- * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
- * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
- * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
- * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
- * voltage (uV) with hardware applied offset/slope values to adc code.
- * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
- * lookup table. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
- * 100k pullup. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
- * lookup table for PMIC7. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
- * The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
- * The hardware applies offset/slope to adc code. This is for PMIC7.
- * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
- * charger temperature.
- * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
- * SMB1390 temperature.
- */
-enum vadc_scale_fn_type {
- SCALE_DEFAULT = 0,
- SCALE_THERM_100K_PULLUP,
- SCALE_PMIC_THERM,
- SCALE_XOTHERM,
- SCALE_PMI_CHG_TEMP,
- SCALE_HW_CALIB_DEFAULT,
- SCALE_HW_CALIB_THERM_100K_PULLUP,
- SCALE_HW_CALIB_XOTHERM,
- SCALE_HW_CALIB_THERM_100K_PU_PM7,
- SCALE_HW_CALIB_PMIC_THERM,
- SCALE_HW_CALIB_PMIC_THERM_PM7,
- SCALE_HW_CALIB_PM5_CHG_TEMP,
- SCALE_HW_CALIB_PM5_SMB_TEMP,
- SCALE_HW_CALIB_INVALID,
-};
-
-struct adc5_data {
- const u32 full_scale_code_volt;
- const u32 full_scale_code_cur;
- const struct adc5_channels *adc_chans;
- const struct iio_info *info;
- unsigned int *decimation;
- unsigned int *hw_settle_1;
- unsigned int *hw_settle_2;
-};
-
-int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
- const struct vadc_linear_graph *calib_graph,
- const struct vadc_prescale_ratio *prescale,
- bool absolute,
- u16 adc_code, int *result_mdec);
-
-struct qcom_adc5_scale_type {
- int (*scale_fn)(const struct vadc_prescale_ratio *prescale,
- const struct adc5_data *data, u16 adc_code, int *result);
-};
-
-int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
- const struct vadc_prescale_ratio *prescale,
- const struct adc5_data *data,
- u16 adc_code, int *result_mdec);
-
-int qcom_vadc_decimation_from_dt(u32 value);
-
-#endif /* QCOM_VADC_COMMON_H */
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index aa32a1f385e2..301cf66de695 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -307,7 +307,7 @@ static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
sc27xx_adc_volt_ratio(data, channel, scale, &numerator, &denominator);
- return (volt * denominator + numerator / 2) / numerator;
+ return DIV_ROUND_CLOSEST(volt * denominator, numerator);
}
static int sc27xx_adc_read_processed(struct sc27xx_adc_data *data,
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 9d1ad6e38e85..c088cb990193 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -535,20 +535,16 @@ static int stm32_adc_core_hw_start(struct device *dev)
goto err_switches_dis;
}
- if (priv->bclk) {
- ret = clk_prepare_enable(priv->bclk);
- if (ret < 0) {
- dev_err(dev, "bus clk enable failed\n");
- goto err_regulator_disable;
- }
+ ret = clk_prepare_enable(priv->bclk);
+ if (ret < 0) {
+ dev_err(dev, "bus clk enable failed\n");
+ goto err_regulator_disable;
}
- if (priv->aclk) {
- ret = clk_prepare_enable(priv->aclk);
- if (ret < 0) {
- dev_err(dev, "adc clk enable failed\n");
- goto err_bclk_disable;
- }
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "adc clk enable failed\n");
+ goto err_bclk_disable;
}
writel_relaxed(priv->ccr_bak, priv->common.base + priv->cfg->regs->ccr);
@@ -556,8 +552,7 @@ static int stm32_adc_core_hw_start(struct device *dev)
return 0;
err_bclk_disable:
- if (priv->bclk)
- clk_disable_unprepare(priv->bclk);
+ clk_disable_unprepare(priv->bclk);
err_regulator_disable:
regulator_disable(priv->vref);
err_switches_dis:
@@ -575,10 +570,8 @@ static void stm32_adc_core_hw_stop(struct device *dev)
/* Backup CCR that may be lost (depends on power state to achieve) */
priv->ccr_bak = readl_relaxed(priv->common.base + priv->cfg->regs->ccr);
- if (priv->aclk)
- clk_disable_unprepare(priv->aclk);
- if (priv->bclk)
- clk_disable_unprepare(priv->bclk);
+ clk_disable_unprepare(priv->aclk);
+ clk_disable_unprepare(priv->bclk);
regulator_disable(priv->vref);
stm32_adc_core_switches_supply_dis(priv);
regulator_disable(priv->vdda);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index c067c994dae2..f7c53cea509a 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -546,8 +546,7 @@ static int stm32_adc_hw_stop(struct device *dev)
if (adc->cfg->unprepare)
adc->cfg->unprepare(indio_dev);
- if (adc->clk)
- clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->clk);
return 0;
}
@@ -558,11 +557,9 @@ static int stm32_adc_hw_start(struct device *dev)
struct stm32_adc *adc = iio_priv(indio_dev);
int ret;
- if (adc->clk) {
- ret = clk_prepare_enable(adc->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(adc->clk);
+ if (ret)
+ return ret;
stm32_adc_set_res(adc);
@@ -575,8 +572,7 @@ static int stm32_adc_hw_start(struct device *dev)
return 0;
err_clk_dis:
- if (adc->clk)
- clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->clk);
return ret;
}
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 42a7377704a4..bb925a11c8ae 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -117,8 +117,7 @@ static void stm32_dfsdm_clk_disable_unprepare(struct stm32_dfsdm *dfsdm)
{
struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
- if (priv->aclk)
- clk_disable_unprepare(priv->aclk);
+ clk_disable_unprepare(priv->aclk);
clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b11c8c47ba2a..e946903b0993 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index f93c34fe5873..34800dccbf69 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -92,7 +93,12 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_REG_GIER 0x5c
#define XADC_AXI_REG_IPISR 0x60
#define XADC_AXI_REG_IPIER 0x68
-#define XADC_AXI_ADC_REG_OFFSET 0x200
+
+/* 7 Series */
+#define XADC_7S_AXI_ADC_REG_OFFSET 0x200
+
+/* UltraScale */
+#define XADC_US_AXI_ADC_REG_OFFSET 0x400
#define XADC_AXI_RESET_MAGIC 0xa
#define XADC_AXI_GIER_ENABLE BIT(31)
@@ -447,6 +453,12 @@ static const struct xadc_ops xadc_zynq_ops = {
.get_dclk_rate = xadc_zynq_get_dclk_rate,
.interrupt_handler = xadc_zynq_interrupt_handler,
.update_alarm = xadc_zynq_update_alarm,
+ .type = XADC_TYPE_S7,
+};
+
+static const unsigned int xadc_axi_reg_offsets[] = {
+ [XADC_TYPE_S7] = XADC_7S_AXI_ADC_REG_OFFSET,
+ [XADC_TYPE_US] = XADC_US_AXI_ADC_REG_OFFSET,
};
static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -454,7 +466,8 @@ static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
{
uint32_t val32;
- xadc_read_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, &val32);
+ xadc_read_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
+ &val32);
*val = val32 & 0xffff;
return 0;
@@ -463,7 +476,8 @@ static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
uint16_t val)
{
- xadc_write_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, val);
+ xadc_write_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
+ val);
return 0;
}
@@ -541,7 +555,7 @@ static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
return clk_get_rate(xadc->clk);
}
-static const struct xadc_ops xadc_axi_ops = {
+static const struct xadc_ops xadc_7s_axi_ops = {
.read = xadc_axi_read_adc_reg,
.write = xadc_axi_write_adc_reg,
.setup = xadc_axi_setup,
@@ -549,6 +563,18 @@ static const struct xadc_ops xadc_axi_ops = {
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED,
+ .type = XADC_TYPE_S7,
+};
+
+static const struct xadc_ops xadc_us_axi_ops = {
+ .read = xadc_axi_read_adc_reg,
+ .write = xadc_axi_write_adc_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+ .type = XADC_TYPE_US,
};
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -585,15 +611,22 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *mask)
{
struct xadc *xadc = iio_priv(indio_dev);
- unsigned int n;
+ size_t new_size, n;
+ void *data;
n = bitmap_weight(mask, indio_dev->masklength);
- kfree(xadc->data);
- xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
- if (!xadc->data)
+ if (check_mul_overflow(n, sizeof(*xadc->data), &new_size))
+ return -ENOMEM;
+
+ data = devm_krealloc(indio_dev->dev.parent, xadc->data,
+ new_size, GFP_KERNEL);
+ if (!data)
return -ENOMEM;
+ memset(data, 0, new_size);
+ xadc->data = data;
+
return 0;
}
@@ -705,11 +738,12 @@ static const struct iio_trigger_ops xadc_trigger_ops = {
static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
const char *name)
{
+ struct device *dev = indio_dev->dev.parent;
struct iio_trigger *trig;
int ret;
- trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
- indio_dev->id, name);
+ trig = devm_iio_trigger_alloc(dev, "%s%d-%s", indio_dev->name,
+ indio_dev->id, name);
if (trig == NULL)
return ERR_PTR(-ENOMEM);
@@ -717,21 +751,26 @@ static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
trig->ops = &xadc_trigger_ops;
iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
- ret = iio_trigger_register(trig);
+ ret = devm_iio_trigger_register(dev, trig);
if (ret)
- goto error_free_trig;
+ return ERR_PTR(ret);
return trig;
-
-error_free_trig:
- iio_trigger_free(trig);
- return ERR_PTR(ret);
}
static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{
uint16_t val;
+ /*
+ * As per datasheet the power-down bits are don't care in the
+ * UltraScale, but as per reality setting the power-down bit for the
+ * non-existing ADC-B powers down the main ADC, so just return and don't
+ * do anything.
+ */
+ if (xadc->ops->type == XADC_TYPE_US)
+ return 0;
+
/* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS:
@@ -751,6 +790,10 @@ static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
{
unsigned int aux_scan_mode = scan_mode >> 16;
+ /* UltraScale has only one ADC and supports only continuous mode */
+ if (xadc->ops->type == XADC_TYPE_US)
+ return XADC_CONF1_SEQ_CONTINUOUS;
+
if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
return XADC_CONF1_SEQ_SIMULTANEOUS;
@@ -863,6 +906,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int bits = chan->scan_type.realbits;
uint16_t val16;
int ret;
@@ -874,17 +918,17 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- val16 >>= 4;
+ val16 >>= chan->scan_type.shift;
if (chan->scan_type.sign == 'u')
*val = val16;
else
- *val = sign_extend32(val16, 11);
+ *val = sign_extend32(val16, bits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- /* V = (val * 3.0) / 4096 */
+ /* V = (val * 3.0) / 2**bits */
switch (chan->address) {
case XADC_REG_VCCINT:
case XADC_REG_VCCAUX:
@@ -900,19 +944,19 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = 1000;
break;
}
- *val2 = 12;
+ *val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
- /* Temp in C = (val * 503.975) / 4096 - 273.15 */
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
*val = 503975;
- *val2 = 12;
+ *val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
/* Only the temperature channel has an offset */
- *val = -((273150 << 12) / 503975);
+ *val = -((273150 << bits) / 503975);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_samplerate(xadc);
@@ -1001,7 +1045,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
},
};
-#define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
+#define XADC_CHAN_TEMP(_chan, _scan_index, _addr, _bits) { \
.type = IIO_TEMP, \
.indexed = 1, \
.channel = (_chan), \
@@ -1015,14 +1059,14 @@ static const struct iio_event_spec xadc_voltage_events[] = {
.scan_index = (_scan_index), \
.scan_type = { \
.sign = 'u', \
- .realbits = 12, \
+ .realbits = (_bits), \
.storagebits = 16, \
- .shift = 4, \
+ .shift = 16 - (_bits), \
.endianness = IIO_CPU, \
}, \
}
-#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
+#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _bits, _ext, _alarm) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_chan), \
@@ -1035,41 +1079,82 @@ static const struct iio_event_spec xadc_voltage_events[] = {
.scan_index = (_scan_index), \
.scan_type = { \
.sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
- .realbits = 12, \
+ .realbits = (_bits), \
.storagebits = 16, \
- .shift = 4, \
+ .shift = 16 - (_bits), \
.endianness = IIO_CPU, \
}, \
.extend_name = _ext, \
}
-static const struct iio_chan_spec xadc_channels[] = {
- XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
- XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
- XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
- XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
- XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
- XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
- XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
- XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
- XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
- XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
- XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
- XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
- XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
- XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
- XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
- XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
- XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
- XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
- XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
- XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
- XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
- XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
- XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
- XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
- XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+/* 7 Series */
+#define XADC_7S_CHAN_TEMP(_chan, _scan_index, _addr) \
+ XADC_CHAN_TEMP(_chan, _scan_index, _addr, 12)
+#define XADC_7S_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
+ XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 12, _ext, _alarm)
+
+static const struct iio_chan_spec xadc_7s_channels[] = {
+ XADC_7S_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_7S_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_7S_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
+ XADC_7S_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_7S_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_7S_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+ XADC_7S_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
+ XADC_7S_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_7S_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_7S_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_7S_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+};
+
+/* UltraScale */
+#define XADC_US_CHAN_TEMP(_chan, _scan_index, _addr) \
+ XADC_CHAN_TEMP(_chan, _scan_index, _addr, 10)
+#define XADC_US_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
+ XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 10, _ext, _alarm)
+
+static const struct iio_chan_spec xadc_us_channels[] = {
+ XADC_US_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_US_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_US_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
+ XADC_US_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_US_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpsintlp", true),
+ XADC_US_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpsintfp", true),
+ XADC_US_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccpsaux", true),
+ XADC_US_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_US_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_US_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_US_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_US_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_US_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_US_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_US_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_US_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_US_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_US_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_US_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_US_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_US_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_US_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_US_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_US_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_US_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_US_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
};
static const struct iio_info xadc_info = {
@@ -1083,8 +1168,16 @@ static const struct iio_info xadc_info = {
};
static const struct of_device_id xadc_of_match_table[] = {
- { .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
- { .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ {
+ .compatible = "xlnx,zynq-xadc-1.00.a",
+ .data = &xadc_zynq_ops
+ }, {
+ .compatible = "xlnx,axi-xadc-1.00.a",
+ .data = &xadc_7s_axi_ops
+ }, {
+ .compatible = "xlnx,system-management-wiz-1.3",
+ .data = &xadc_us_axi_ops
+ },
{ },
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
@@ -1094,8 +1187,10 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
{
struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
+ const struct iio_chan_spec *channel_templates;
struct iio_chan_spec *channels, *chan;
struct device_node *chan_node, *child;
+ unsigned int max_channels;
unsigned int num_channels;
const char *external_mux;
u32 ext_mux_chan;
@@ -1136,9 +1231,15 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
-
- channels = devm_kmemdup(dev, xadc_channels,
- sizeof(xadc_channels), GFP_KERNEL);
+ if (xadc->ops->type == XADC_TYPE_S7) {
+ channel_templates = xadc_7s_channels;
+ max_channels = ARRAY_SIZE(xadc_7s_channels);
+ } else {
+ channel_templates = xadc_us_channels;
+ max_channels = ARRAY_SIZE(xadc_us_channels);
+ }
+ channels = devm_kmemdup(dev, channel_templates,
+ sizeof(channels[0]) * max_channels, GFP_KERNEL);
if (!channels)
return -ENOMEM;
@@ -1148,7 +1249,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
chan_node = of_get_child_by_name(np, "xlnx,channels");
if (chan_node) {
for_each_child_of_node(chan_node, child) {
- if (num_channels >= ARRAY_SIZE(xadc_channels)) {
+ if (num_channels >= max_channels) {
of_node_put(child);
break;
}
@@ -1184,8 +1285,28 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
return 0;
}
+static const char * const xadc_type_names[] = {
+ [XADC_TYPE_S7] = "xadc",
+ [XADC_TYPE_US] = "xilinx-system-monitor",
+};
+
+static void xadc_clk_disable_unprepare(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static void xadc_cancel_delayed_work(void *data)
+{
+ struct delayed_work *work = data;
+
+ cancel_delayed_work_sync(work);
+}
+
static int xadc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
@@ -1195,10 +1316,10 @@ static int xadc_probe(struct platform_device *pdev)
int irq;
int i;
- if (!pdev->dev.of_node)
+ if (!dev->of_node)
return -ENODEV;
- id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
+ id = of_match_node(xadc_of_match_table, dev->of_node);
if (!id)
return -EINVAL;
@@ -1206,7 +1327,7 @@ static int xadc_probe(struct platform_device *pdev)
if (irq <= 0)
return -ENXIO;
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
if (!indio_dev)
return -ENOMEM;
@@ -1222,43 +1343,44 @@ static int xadc_probe(struct platform_device *pdev)
if (IS_ERR(xadc->base))
return PTR_ERR(xadc->base);
- indio_dev->name = "xadc";
+ indio_dev->name = xadc_type_names[xadc->ops->type];
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
- ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
+ ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0);
if (ret)
return ret;
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
- ret = iio_triggered_buffer_setup(indio_dev,
- &iio_pollfunc_store_time, &xadc_trigger_handler,
- &xadc_buffer_ops);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &xadc_trigger_handler,
+ &xadc_buffer_ops);
if (ret)
return ret;
xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
- if (IS_ERR(xadc->convst_trigger)) {
- ret = PTR_ERR(xadc->convst_trigger);
- goto err_triggered_buffer_cleanup;
- }
+ if (IS_ERR(xadc->convst_trigger))
+ return PTR_ERR(xadc->convst_trigger);
+
xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
"samplerate");
- if (IS_ERR(xadc->samplerate_trigger)) {
- ret = PTR_ERR(xadc->samplerate_trigger);
- goto err_free_convst_trigger;
- }
+ if (IS_ERR(xadc->samplerate_trigger))
+ return PTR_ERR(xadc->samplerate_trigger);
}
- xadc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(xadc->clk)) {
- ret = PTR_ERR(xadc->clk);
- goto err_free_samplerate_trigger;
- }
+ xadc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(xadc->clk))
+ return PTR_ERR(xadc->clk);
ret = clk_prepare_enable(xadc->clk);
if (ret)
- goto err_free_samplerate_trigger;
+ return ret;
+
+ ret = devm_add_action_or_reset(dev,
+ xadc_clk_disable_unprepare, xadc->clk);
+ if (ret)
+ return ret;
/*
* Make sure not to exceed the maximum samplerate since otherwise the
@@ -1267,22 +1389,28 @@ static int xadc_probe(struct platform_device *pdev)
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = xadc_read_samplerate(xadc);
if (ret < 0)
- goto err_free_samplerate_trigger;
+ return ret;
+
if (ret > XADC_MAX_SAMPLERATE) {
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
if (ret < 0)
- goto err_free_samplerate_trigger;
+ return ret;
}
}
- ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = devm_request_irq(dev, xadc->irq, xadc->ops->interrupt_handler, 0,
+ dev_name(dev), indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
+ &xadc->zynq_unmask_work);
if (ret)
- goto err_clk_disable_unprepare;
+ return ret;
ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_free_irq;
+ return ret;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1290,7 +1418,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
if (ret)
- goto err_free_irq;
+ return ret;
bipolar_mask = 0;
for (i = 0; i < indio_dev->num_channels; i++) {
@@ -1300,17 +1428,18 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
if (ret)
- goto err_free_irq;
+ return ret;
+
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
bipolar_mask >> 16);
if (ret)
- goto err_free_irq;
+ return ret;
/* Disable all alarms */
ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
XADC_CONF1_ALARM_MASK);
if (ret)
- goto err_free_irq;
+ return ret;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1325,60 +1454,17 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
if (ret)
- goto err_free_irq;
+ return ret;
}
/* Go to non-buffered mode */
xadc_postdisable(indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret)
- goto err_free_irq;
-
- platform_set_drvdata(pdev, indio_dev);
-
- return 0;
-
-err_free_irq:
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
-err_free_samplerate_trigger:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_trigger_free(xadc->samplerate_trigger);
-err_free_convst_trigger:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_trigger_free(xadc->convst_trigger);
-err_triggered_buffer_cleanup:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int xadc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct xadc *xadc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
- iio_trigger_free(xadc->samplerate_trigger);
- iio_trigger_free(xadc->convst_trigger);
- iio_triggered_buffer_cleanup(indio_dev);
- }
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
- clk_disable_unprepare(xadc->clk);
- kfree(xadc->data);
-
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
static struct platform_driver xadc_driver = {
.probe = xadc_probe,
- .remove = xadc_remove,
.driver = {
.name = "xadc",
.of_match_table = xadc_of_match_table,
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index 2357f585720a..1bd375fb10e0 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -155,9 +155,6 @@ err_out:
return ret;
}
-/* Register value is msb aligned, the lower 4 bits are ignored */
-#define XADC_THRESHOLD_VALUE_SHIFT 4
-
int xadc_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, enum iio_event_type type,
enum iio_event_direction dir, enum iio_event_info info,
@@ -177,7 +174,8 @@ int xadc_read_event_value(struct iio_dev *indio_dev,
return -EINVAL;
}
- *val >>= XADC_THRESHOLD_VALUE_SHIFT;
+ /* MSB aligned */
+ *val >>= 16 - chan->scan_type.realbits;
return IIO_VAL_INT;
}
@@ -191,7 +189,8 @@ int xadc_write_event_value(struct iio_dev *indio_dev,
struct xadc *xadc = iio_priv(indio_dev);
int ret = 0;
- val <<= XADC_THRESHOLD_VALUE_SHIFT;
+ /* MSB aligned */
+ val <<= 16 - chan->scan_type.realbits;
if (val < 0 || val > 0xffff)
return -EINVAL;
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 25abed9c0285..8b80195725e9 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -70,6 +70,11 @@ struct xadc {
int irq;
};
+enum xadc_type {
+ XADC_TYPE_S7, /* Series 7 */
+ XADC_TYPE_US, /* UltraScale and UltraScale+ */
+};
+
struct xadc_ops {
int (*read)(struct xadc *xadc, unsigned int reg, uint16_t *val);
int (*write)(struct xadc *xadc, unsigned int reg, uint16_t val);
@@ -80,6 +85,7 @@ struct xadc_ops {
irqreturn_t (*interrupt_handler)(int irq, void *devid);
unsigned int flags;
+ enum xadc_type type;
};
static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 6ea99e4cbf92..bf23cc7eb99e 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -479,7 +479,7 @@ static u8 bme680_calc_heater_res(struct bme680_data *data, u16 temp)
var4 = (var3 / (calib->res_heat_range + 4));
var5 = 131 * calib->res_heat_val + 65536;
heatr_res_x100 = ((var4 / var5) - 250) * 34;
- heatr_res = (heatr_res_x100 + 50) / 100;
+ heatr_res = DIV_ROUND_CLOSEST(heatr_res_x100, 100);
return heatr_res;
}
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index e9d4405654bc..e9857d93b307 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -282,7 +282,7 @@ static int pms7003_probe(struct serdev_device *serdev)
state->serdev = serdev;
indio_dev->info = &pms7003_info;
indio_dev->name = PMS7003_DRIVER_NAME;
- indio_dev->channels = pms7003_channels,
+ indio_dev->channels = pms7003_channels;
indio_dev->num_channels = ARRAY_SIZE(pms7003_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->available_scan_masks = pms7003_scan_masks;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 442ff787f7af..5b822a4298a0 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -71,6 +71,8 @@ static struct {
{HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_UNITS_DEGREES, 1000, 0},
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
+ {HID_USAGE_SENSOR_HINGE, 0, 0, 17453293},
+ {HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
};
static void simple_div(int dividend, int divisor, int *whole,
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index b9e2038d05ef..16ea697e945c 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -488,24 +488,20 @@ int ms_sensors_ht_read_humidity(struct ms_ht_dev *dev_data,
EXPORT_SYMBOL(ms_sensors_ht_read_humidity);
/**
- * ms_sensors_tp_crc_valid() - CRC check function for
+ * ms_sensors_tp_crc4() - Calculate PROM CRC for
* Temperature and pressure devices.
* This function is only used when reading PROM coefficients
*
* @prom: pointer to PROM coefficients array
- * @len: length of PROM coefficients array
*
- * Return: True if CRC is ok.
+ * Return: CRC.
*/
-static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
+static u8 ms_sensors_tp_crc4(u16 *prom)
{
unsigned int cnt, n_bit;
- u16 n_rem = 0x0000, crc_read = prom[0], crc = (*prom & 0xF000) >> 12;
-
- prom[len - 1] = 0;
- prom[0] &= 0x0FFF; /* Clear the CRC computation part */
+ u16 n_rem = 0x0000;
- for (cnt = 0; cnt < len * 2; cnt++) {
+ for (cnt = 0; cnt < MS_SENSORS_TP_PROM_WORDS_NB * 2; cnt++) {
if (cnt % 2 == 1)
n_rem ^= prom[cnt >> 1] & 0x00FF;
else
@@ -518,10 +514,55 @@ static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
n_rem <<= 1;
}
}
- n_rem >>= 12;
- prom[0] = crc_read;
- return n_rem == crc;
+ return n_rem >> 12;
+}
+
+/**
+ * ms_sensors_tp_crc_valid_112() - CRC check function for
+ * Temperature and pressure devices for 112bit PROM.
+ * This function is only used when reading PROM coefficients
+ *
+ * @prom: pointer to PROM coefficients array
+ *
+ * Return: True if CRC is ok.
+ */
+static bool ms_sensors_tp_crc_valid_112(u16 *prom)
+{
+ u16 w0 = prom[0], crc_read = (w0 & 0xF000) >> 12;
+ u8 crc;
+
+ prom[0] &= 0x0FFF; /* Clear the CRC computation part */
+ prom[MS_SENSORS_TP_PROM_WORDS_NB - 1] = 0;
+
+ crc = ms_sensors_tp_crc4(prom);
+
+ prom[0] = w0;
+
+ return crc == crc_read;
+}
+
+/**
+ * ms_sensors_tp_crc_valid_128() - CRC check function for
+ * Temperature and pressure devices for 128bit PROM.
+ * This function is only used when reading PROM coefficients
+ *
+ * @prom: pointer to PROM coefficients array
+ *
+ * Return: True if CRC is ok.
+ */
+static bool ms_sensors_tp_crc_valid_128(u16 *prom)
+{
+ u16 w7 = prom[7], crc_read = w7 & 0x000F;
+ u8 crc;
+
+ prom[7] &= 0xFF00; /* Clear the CRC and LSB part */
+
+ crc = ms_sensors_tp_crc4(prom);
+
+ prom[7] = w7;
+
+ return crc == crc_read;
}
/**
@@ -536,8 +577,9 @@ static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
int ms_sensors_tp_read_prom(struct ms_tp_dev *dev_data)
{
int i, ret;
+ bool valid;
- for (i = 0; i < MS_SENSORS_TP_PROM_WORDS_NB; i++) {
+ for (i = 0; i < dev_data->hw->prom_len; i++) {
ret = ms_sensors_read_prom_word(
dev_data->client,
MS_SENSORS_TP_PROM_READ + (i << 1),
@@ -547,8 +589,12 @@ int ms_sensors_tp_read_prom(struct ms_tp_dev *dev_data)
return ret;
}
- if (!ms_sensors_tp_crc_valid(dev_data->prom,
- MS_SENSORS_TP_PROM_WORDS_NB + 1)) {
+ if (dev_data->hw->prom_len == 8)
+ valid = ms_sensors_tp_crc_valid_128(dev_data->prom);
+ else
+ valid = ms_sensors_tp_crc_valid_112(dev_data->prom);
+
+ if (!valid) {
dev_err(&dev_data->client->dev,
"Calibration coefficients crc check error\n");
return -ENODEV;
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.h b/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
index bad09c80e47a..f15b973f27c6 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
@@ -11,7 +11,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
-#define MS_SENSORS_TP_PROM_WORDS_NB 7
+#define MS_SENSORS_TP_PROM_WORDS_NB 8
/**
* struct ms_ht_dev - Humidity/Temperature sensor device structure
@@ -26,6 +26,16 @@ struct ms_ht_dev {
};
/**
+ * struct ms_hw_data - Temperature/Pressure sensor hardware data
+ * @prom_len: number of words in the PROM
+ * @max_res_index: maximum sensor resolution index
+ */
+struct ms_tp_hw_data {
+ u8 prom_len;
+ u8 max_res_index;
+};
+
+/**
* struct ms_tp_dev - Temperature/Pressure sensor device structure
* @client: i2c client
* @lock: lock protecting the i2c conversion
@@ -36,7 +46,8 @@ struct ms_ht_dev {
struct ms_tp_dev {
struct i2c_client *client;
struct mutex lock;
- u16 prom[MS_SENSORS_TP_PROM_WORDS_NB + 1];
+ const struct ms_tp_hw_data *hw;
+ u16 prom[MS_SENSORS_TP_PROM_WORDS_NB];
u8 res_index;
};
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 0507283bd4c1..2dbd2646e44e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -23,35 +23,31 @@
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 6f6074a5d3db..cea07b4cced1 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -189,6 +189,16 @@ config AD5764
To compile this driver as a module, choose M here: the
module will be called ad5764.
+config AD5766
+ tristate "Analog Devices AD5766/AD5767 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5766, AD5767
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5766.
+
config AD5770R
tristate "Analog Devices AD5770R IDAC driver"
depends on SPI_MASTER
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 2fc481167724..33e16f14902a 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5755) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
+obj-$(CONFIG_AD5766) += ad5766.o
obj-$(CONFIG_AD5770R) += ad5770r.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 28921b62e642..e9297c25d4ef 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
new file mode 100644
index 000000000000..ef1618ea6a20
--- /dev/null
+++ b/drivers/iio/dac/ad5766.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD5766, AD5767
+ * Digital to Analog Converters driver
+ * Copyright 2019-2020 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
+#define AD5766_UPPER_WORD_SPI_MASK GENMASK(31, 16)
+#define AD5766_LOWER_WORD_SPI_MASK GENMASK(15, 0)
+#define AD5766_DITHER_SOURCE_MASK(ch) GENMASK(((2 * ch) + 1), (2 * ch))
+#define AD5766_DITHER_SOURCE(ch, source) BIT((ch * 2) + source)
+#define AD5766_DITHER_SCALE_MASK(x) AD5766_DITHER_SOURCE_MASK(x)
+#define AD5766_DITHER_SCALE(ch, scale) (scale << (ch * 2))
+#define AD5766_DITHER_ENABLE_MASK(ch) BIT(ch)
+#define AD5766_DITHER_ENABLE(ch, state) ((!state) << ch)
+#define AD5766_DITHER_INVERT_MASK(ch) BIT(ch)
+#define AD5766_DITHER_INVERT(ch, state) (state << ch)
+
+#define AD5766_CMD_NOP_MUX_OUT 0x00
+#define AD5766_CMD_SDO_CNTRL 0x01
+#define AD5766_CMD_WR_IN_REG(x) (0x10 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_WR_DAC_REG(x) (0x20 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_SW_LDAC 0x30
+#define AD5766_CMD_SPAN_REG 0x40
+#define AD5766_CMD_WR_PWR_DITHER 0x51
+#define AD5766_CMD_WR_DAC_REG_ALL 0x60
+#define AD5766_CMD_SW_FULL_RESET 0x70
+#define AD5766_CMD_READBACK_REG(x) (0x80 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_DITHER_SIG_1 0x90
+#define AD5766_CMD_DITHER_SIG_2 0xA0
+#define AD5766_CMD_INV_DITHER 0xB0
+#define AD5766_CMD_DITHER_SCALE_1 0xC0
+#define AD5766_CMD_DITHER_SCALE_2 0xD0
+
+#define AD5766_FULL_RESET_CODE 0x1234
+
+enum ad5766_type {
+ ID_AD5766,
+ ID_AD5767,
+};
+
+enum ad5766_voltage_range {
+ AD5766_VOLTAGE_RANGE_M20V_0V,
+ AD5766_VOLTAGE_RANGE_M16V_to_0V,
+ AD5766_VOLTAGE_RANGE_M10V_to_0V,
+ AD5766_VOLTAGE_RANGE_M12V_to_14V,
+ AD5766_VOLTAGE_RANGE_M16V_to_10V,
+ AD5766_VOLTAGE_RANGE_M10V_to_6V,
+ AD5766_VOLTAGE_RANGE_M5V_to_5V,
+ AD5766_VOLTAGE_RANGE_M10V_to_10V,
+};
+
+/**
+ * struct ad5766_chip_info - chip specific information
+ * @num_channels: number of channels
+ * @channels: channel specification
+ */
+struct ad5766_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+};
+
+enum {
+ AD5766_DITHER_ENABLE,
+ AD5766_DITHER_INVERT,
+ AD5766_DITHER_SOURCE,
+};
+
+/*
+ * Dither signal can also be scaled.
+ * Available dither scale strings corresponding to "dither_scale" field in
+ * "struct ad5766_state".
+ */
+static const char * const ad5766_dither_scales[] = {
+ "1",
+ "0.75",
+ "0.5",
+ "0.25",
+};
+
+/**
+ * struct ad5766_state - driver instance specific data
+ * @spi: SPI device
+ * @lock: Lock used to restrict concurent access to SPI device
+ * @chip_info: Chip model specific constants
+ * @gpio_reset: Reset GPIO, used to reset the device
+ * @crt_range: Current selected output range
+ * @dither_enable: Power enable bit for each channel dither block (for
+ * example, D15 = DAC 15,D8 = DAC 8, and D0 = DAC 0)
+ * 0 - Normal operation, 1 - Power down
+ * @dither_invert: Inverts the dither signal applied to the selected DAC
+ * outputs
+ * @dither_source: Selects between 2 possible sources:
+ * 1: N0, 2: N1
+ * Two bits are used for each channel
+ * @dither_scale: Two bits are used for each of the 16 channels:
+ * 0: 1 SCALING, 1: 0.75 SCALING, 2: 0.5 SCALING,
+ * 3: 0.25 SCALING.
+ * @data: SPI transfer buffers
+ */
+struct ad5766_state {
+ struct spi_device *spi;
+ struct mutex lock;
+ const struct ad5766_chip_info *chip_info;
+ struct gpio_desc *gpio_reset;
+ enum ad5766_voltage_range crt_range;
+ u16 dither_enable;
+ u16 dither_invert;
+ u32 dither_source;
+ u32 dither_scale;
+ union {
+ u32 d32;
+ u16 w16[2];
+ u8 b8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+struct ad5766_span_tbl {
+ int min;
+ int max;
+};
+
+static const struct ad5766_span_tbl ad5766_span_tbl[] = {
+ [AD5766_VOLTAGE_RANGE_M20V_0V] = {-20, 0},
+ [AD5766_VOLTAGE_RANGE_M16V_to_0V] = {-16, 0},
+ [AD5766_VOLTAGE_RANGE_M10V_to_0V] = {-10, 0},
+ [AD5766_VOLTAGE_RANGE_M12V_to_14V] = {-12, 14},
+ [AD5766_VOLTAGE_RANGE_M16V_to_10V] = {-16, 10},
+ [AD5766_VOLTAGE_RANGE_M10V_to_6V] = {-10, 6},
+ [AD5766_VOLTAGE_RANGE_M5V_to_5V] = {-5, 5},
+ [AD5766_VOLTAGE_RANGE_M10V_to_10V] = {-10, 10},
+};
+
+static int __ad5766_spi_read(struct ad5766_state *st, u8 dac, int *val)
+{
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->data[0].d32,
+ .bits_per_word = 8,
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d32,
+ .rx_buf = &st->data[2].d32,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ st->data[0].d32 = AD5766_CMD_READBACK_REG(dac);
+ st->data[1].d32 = AD5766_CMD_NOP_MUX_OUT;
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
+
+ *val = st->data[2].w16[1];
+
+ return ret;
+}
+
+static int __ad5766_spi_write(struct ad5766_state *st, u8 command, u16 data)
+{
+ st->data[0].b8[0] = command;
+ put_unaligned_be16(data, &st->data[0].b8[1]);
+
+ return spi_write(st->spi, &st->data[0].b8[0], 3);
+}
+
+static int ad5766_read(struct iio_dev *indio_dev, u8 dac, int *val)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad5766_spi_read(st, dac, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5766_write(struct iio_dev *indio_dev, u8 dac, u16 data)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad5766_spi_write(st, AD5766_CMD_WR_DAC_REG(dac), data);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5766_reset(struct ad5766_state *st)
+{
+ int ret;
+
+ if (st->gpio_reset) {
+ gpiod_set_value_cansleep(st->gpio_reset, 1);
+ ndelay(100); /* t_reset >= 100ns */
+ gpiod_set_value_cansleep(st->gpio_reset, 0);
+ } else {
+ ret = __ad5766_spi_write(st, AD5766_CMD_SW_FULL_RESET,
+ AD5766_FULL_RESET_CODE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Minimum time between a reset and the subsequent successful write is
+ * typically 25 ns
+ */
+ ndelay(25);
+
+ return 0;
+}
+
+static int ad5766_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5766_read(indio_dev, chan->address, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5766_span_tbl[st->crt_range].min;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = ad5766_span_tbl[st->crt_range].max -
+ ad5766_span_tbl[st->crt_range].min;
+ *val2 = st->chip_info->channels[0].scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5766_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ const int max_val = GENMASK(chan->scan_type.realbits - 1, 0);
+
+ if (val > max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ return ad5766_write(indio_dev, chan->address, val);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ad5766_info = {
+ .read_raw = ad5766_read_raw,
+ .write_raw = ad5766_write_raw,
+};
+
+static int ad5766_get_dither_source(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ u32 source;
+
+ source = st->dither_source & AD5766_DITHER_SOURCE_MASK(chan->channel);
+ source = source >> (chan->channel * 2);
+ source -= 1;
+
+ return source;
+}
+
+static int ad5766_set_dither_source(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int source)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ uint16_t val;
+ int ret;
+
+ st->dither_source &= ~AD5766_DITHER_SOURCE_MASK(chan->channel);
+ st->dither_source |= AD5766_DITHER_SOURCE(chan->channel, source);
+
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_source);
+ ret = ad5766_write(dev, AD5766_CMD_DITHER_SIG_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_source);
+
+ return ad5766_write(dev, AD5766_CMD_DITHER_SIG_2, val);
+}
+
+static int ad5766_get_dither_scale(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ u32 scale;
+
+ scale = st->dither_scale & AD5766_DITHER_SCALE_MASK(chan->channel);
+
+ return (scale >> (chan->channel * 2));
+}
+
+static int ad5766_set_dither_scale(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int scale)
+{
+ int ret;
+ struct ad5766_state *st = iio_priv(dev);
+ uint16_t val;
+
+ st->dither_scale &= ~AD5766_DITHER_SCALE_MASK(chan->channel);
+ st->dither_scale |= AD5766_DITHER_SCALE(chan->channel, scale);
+
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_scale);
+ ret = ad5766_write(dev, AD5766_CMD_DITHER_SCALE_1, val);
+ if (ret)
+ return ret;
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_scale);
+
+ return ad5766_write(dev, AD5766_CMD_DITHER_SCALE_2, val);
+}
+
+static const struct iio_enum ad5766_dither_scale_enum = {
+ .items = ad5766_dither_scales,
+ .num_items = ARRAY_SIZE(ad5766_dither_scales),
+ .set = ad5766_set_dither_scale,
+ .get = ad5766_get_dither_scale,
+};
+
+static ssize_t ad5766_read_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+
+ switch (private) {
+ case AD5766_DITHER_ENABLE:
+ return sprintf(buf, "%u\n",
+ !(st->dither_enable & BIT(chan->channel)));
+ break;
+ case AD5766_DITHER_INVERT:
+ return sprintf(buf, "%u\n",
+ !!(st->dither_invert & BIT(chan->channel)));
+ break;
+ case AD5766_DITHER_SOURCE:
+ return sprintf(buf, "%d\n",
+ ad5766_get_dither_source(indio_dev, chan));
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ad5766_write_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ bool readin;
+ int ret;
+
+ ret = kstrtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ switch (private) {
+ case AD5766_DITHER_ENABLE:
+ st->dither_enable &= ~AD5766_DITHER_ENABLE_MASK(chan->channel);
+ st->dither_enable |= AD5766_DITHER_ENABLE(chan->channel,
+ readin);
+ ret = ad5766_write(indio_dev, AD5766_CMD_WR_PWR_DITHER,
+ st->dither_enable);
+ break;
+ case AD5766_DITHER_INVERT:
+ st->dither_invert &= ~AD5766_DITHER_INVERT_MASK(chan->channel);
+ st->dither_invert |= AD5766_DITHER_INVERT(chan->channel,
+ readin);
+ ret = ad5766_write(indio_dev, AD5766_CMD_INV_DITHER,
+ st->dither_invert);
+ break;
+ case AD5766_DITHER_SOURCE:
+ ret = ad5766_set_dither_source(indio_dev, chan, readin);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+#define _AD5766_CHAN_EXT_INFO(_name, _what, _shared) { \
+ .name = _name, \
+ .read = ad5766_read_ext, \
+ .write = ad5766_write_ext, \
+ .private = _what, \
+ .shared = _shared, \
+}
+
+#define IIO_ENUM_AVAILABLE_SHARED(_name, _shared, _e) \
+{ \
+ .name = (_name "_available"), \
+ .shared = _shared, \
+ .read = iio_enum_available_read, \
+ .private = (uintptr_t)(_e), \
+}
+
+static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
+
+ _AD5766_CHAN_EXT_INFO("dither_enable", AD5766_DITHER_ENABLE,
+ IIO_SEPARATE),
+ _AD5766_CHAN_EXT_INFO("dither_invert", AD5766_DITHER_INVERT,
+ IIO_SEPARATE),
+ _AD5766_CHAN_EXT_INFO("dither_source", AD5766_DITHER_SOURCE,
+ IIO_SEPARATE),
+ IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum),
+ IIO_ENUM_AVAILABLE_SHARED("dither_scale",
+ IIO_SEPARATE,
+ &ad5766_dither_scale_enum),
+ {}
+};
+
+#define AD576x_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+ .ext_info = ad5766_ext_info, \
+}
+
+#define DECLARE_AD576x_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name[] = { \
+ AD576x_CHANNEL(0, (_bits)), \
+ AD576x_CHANNEL(1, (_bits)), \
+ AD576x_CHANNEL(2, (_bits)), \
+ AD576x_CHANNEL(3, (_bits)), \
+ AD576x_CHANNEL(4, (_bits)), \
+ AD576x_CHANNEL(5, (_bits)), \
+ AD576x_CHANNEL(6, (_bits)), \
+ AD576x_CHANNEL(7, (_bits)), \
+ AD576x_CHANNEL(8, (_bits)), \
+ AD576x_CHANNEL(9, (_bits)), \
+ AD576x_CHANNEL(10, (_bits)), \
+ AD576x_CHANNEL(11, (_bits)), \
+ AD576x_CHANNEL(12, (_bits)), \
+ AD576x_CHANNEL(13, (_bits)), \
+ AD576x_CHANNEL(14, (_bits)), \
+ AD576x_CHANNEL(15, (_bits)), \
+}
+
+static DECLARE_AD576x_CHANNELS(ad5766_channels, 16);
+static DECLARE_AD576x_CHANNELS(ad5767_channels, 12);
+
+static const struct ad5766_chip_info ad5766_chip_infos[] = {
+ [ID_AD5766] = {
+ .num_channels = ARRAY_SIZE(ad5766_channels),
+ .channels = ad5766_channels,
+ },
+ [ID_AD5767] = {
+ .num_channels = ARRAY_SIZE(ad5767_channels),
+ .channels = ad5767_channels,
+ },
+};
+
+static int ad5766_get_output_range(struct ad5766_state *st)
+{
+ int i, ret, min, max, tmp[2];
+
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "output-range-voltage",
+ tmp, 2);
+ if (ret)
+ return ret;
+
+ min = tmp[0] / 1000;
+ max = tmp[1] / 1000;
+ for (i = 0; i < ARRAY_SIZE(ad5766_span_tbl); i++) {
+ if (ad5766_span_tbl[i].min != min ||
+ ad5766_span_tbl[i].max != max)
+ continue;
+
+ st->crt_range = i;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5766_default_setup(struct ad5766_state *st)
+{
+ uint16_t val;
+ int ret, i;
+
+ /* Always issue a reset before writing to the span register. */
+ ret = ad5766_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad5766_get_output_range(st);
+ if (ret)
+ return ret;
+
+ /* Dither power down */
+ st->dither_enable = GENMASK(15, 0);
+ ret = __ad5766_spi_write(st, AD5766_CMD_WR_PWR_DITHER,
+ st->dither_enable);
+ if (ret)
+ return ret;
+
+ st->dither_source = 0;
+ for (i = 0; i < ARRAY_SIZE(ad5766_channels); i++)
+ st->dither_source |= AD5766_DITHER_SOURCE(i, 0);
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_source);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SIG_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_source);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SIG_2, val);
+ if (ret)
+ return ret;
+
+ st->dither_scale = 0;
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_scale);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SCALE_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_scale);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SCALE_2, val);
+ if (ret)
+ return ret;
+
+ st->dither_invert = 0;
+ ret = __ad5766_spi_write(st, AD5766_CMD_INV_DITHER, st->dither_invert);
+ if (ret)
+ return ret;
+
+ return __ad5766_spi_write(st, AD5766_CMD_SPAN_REG, st->crt_range);
+}
+
+static int ad5766_probe(struct spi_device *spi)
+{
+ enum ad5766_type type;
+ struct iio_dev *indio_dev;
+ struct ad5766_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
+ st->spi = spi;
+ type = spi_get_device_id(spi)->driver_data;
+ st->chip_info = &ad5766_chip_infos[type];
+
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = &ad5766_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
+
+ ret = ad5766_default_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id ad5766_dt_match[] = {
+ { .compatible = "adi,ad5766" },
+ { .compatible = "adi,ad5767" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad5766_dt_match);
+
+static const struct spi_device_id ad5766_spi_ids[] = {
+ { "ad5766", ID_AD5766 },
+ { "ad5767", ID_AD5767 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5766_spi_ids);
+
+static struct spi_driver ad5766_driver = {
+ .driver = {
+ .name = "ad5766",
+ .of_match_table = ad5766_dt_match,
+ },
+ .probe = ad5766_probe,
+ .id_table = ad5766_spi_ids,
+};
+module_spi_driver(ad5766_driver);
+
+MODULE_AUTHOR("Denis-Gabriel Gheorghescu <denis.gheorghescu@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5766/AD5767 DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 82c050a3899d..1462a6a5bc6d 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -582,8 +582,7 @@ error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_disable_clk:
- if (clk)
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
return ret;
}
@@ -599,8 +598,7 @@ static int adf4350_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (st->clk)
- clk_disable_unprepare(st->clk);
+ clk_disable_unprepare(st->clk);
if (!IS_ERR(reg))
regulator_disable(reg);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 2d5015801a75..029ef4c34604 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -19,6 +19,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -92,6 +93,7 @@
struct bmg160_data {
struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
struct iio_mount_matrix orientation;
@@ -1061,6 +1063,13 @@ static const char *bmg160_match_acpi_device(struct device *dev)
return dev_name(dev);
}
+static void bmg160_disable_regulators(void *d)
+{
+ struct bmg160_data *data = d;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name)
{
@@ -1077,6 +1086,22 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->irq = irq;
data->regmap = regmap;
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, bmg160_disable_regulators, data);
+ if (ret)
+ return ret;
+
ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 6698f5f535f6..fb0d678ece1a 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -23,15 +23,20 @@ enum gyro_3d_channel {
GYRO_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP GYRO_3D_CHANNEL_MAX
struct gyro_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info gyro[GYRO_3D_CHANNEL_MAX];
- u32 gyro_val[GYRO_3D_CHANNEL_MAX];
+ struct {
+ u32 gyro_val[GYRO_3D_CHANNEL_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
static const u32 gyro_3d_addresses[GYRO_3D_CHANNEL_MAX] = {
@@ -72,7 +77,8 @@ static const struct iio_chan_spec gyro_3d_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Adjust channel real bits based on report descriptor */
@@ -178,14 +184,6 @@ static const struct iio_info gyro_3d_info = {
.write_raw = &gyro_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
- int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -195,10 +193,15 @@ static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "gyro_3d_proc_event\n");
- if (atomic_read(&gyro_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- gyro_state->gyro_val,
- sizeof(gyro_state->gyro_val));
+ if (atomic_read(&gyro_state->common_attributes.data_ready)) {
+ if (!gyro_state->timestamp)
+ gyro_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &gyro_state->scan,
+ gyro_state->timestamp);
+
+ gyro_state->timestamp = 0;
+ }
return 0;
}
@@ -219,10 +222,15 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS:
case HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS:
offset = usage_id - HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS;
- gyro_state->gyro_val[CHANNEL_SCAN_INDEX_X + offset] =
- *(u32 *)raw_data;
+ gyro_state->scan.gyro_val[CHANNEL_SCAN_INDEX_X + offset] =
+ *(u32 *)raw_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ gyro_state->timestamp =
+ hid_sensor_convert_timestamp(&gyro_state->common_attributes,
+ *(s64 *)raw_data);
+ break;
default:
break;
}
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 7137ea6f25db..9c625517173a 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -16,8 +16,8 @@ config INV_MPU6050_I2C
select REGMAP_I2C
help
This driver supports the Invensense MPU6050/9150,
- MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
- IAM20680 motion tracking devices over I2C.
+ MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ and IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
select REGMAP_SPI
help
This driver supports the Invensense MPU6000,
- MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
- IAM20680 motion tracking devices over SPI.
+ MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ and IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 18a1898e3e34..453c51c79655 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -161,6 +161,14 @@ static const struct inv_mpu6050_hw hw_info[] = {
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
+ .whoami = INV_MPU6880_WHOAMI_VALUE,
+ .name = "MPU6880",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 4096,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
+ },
+ {
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
@@ -1323,6 +1331,7 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
case INV_MPU6000:
case INV_MPU6500:
case INV_MPU6515:
+ case INV_MPU6880:
case INV_MPU9250:
case INV_MPU9255:
/* reset signal path (required for spi connection) */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 28cfae1e61cf..95f16951c8f4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -177,6 +177,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
{"mpu6515", INV_MPU6515},
+ {"mpu6880", INV_MPU6880},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
@@ -205,6 +206,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6515
},
{
+ .compatible = "invensense,mpu6880",
+ .data = (void *)INV_MPU6880
+ },
+ {
.compatible = "invensense,mpu9150",
.data = (void *)INV_MPU9150
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index eb522b38acf3..58188dc0dd13 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -70,6 +70,7 @@ enum inv_devices {
INV_MPU6050,
INV_MPU6500,
INV_MPU6515,
+ INV_MPU6880,
INV_MPU6000,
INV_MPU9150,
INV_MPU9250,
@@ -373,6 +374,7 @@ struct inv_mpu6050_state {
#define INV_MPU6000_WHOAMI_VALUE 0x68
#define INV_MPU6050_WHOAMI_VALUE 0x68
#define INV_MPU6500_WHOAMI_VALUE 0x70
+#define INV_MPU6880_WHOAMI_VALUE 0x78
#define INV_MPU9150_WHOAMI_VALUE 0x68
#define INV_MPU9250_WHOAMI_VALUE 0x71
#define INV_MPU9255_WHOAMI_VALUE 0x73
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 6f968ce687e1..b056f3fe2561 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -70,6 +70,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
{"mpu6515", INV_MPU6515},
+ {"mpu6880", INV_MPU6880},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
@@ -97,6 +98,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6515
},
{
+ .compatible = "invensense,mpu6880",
+ .data = (void *)INV_MPU6880
+ },
+ {
.compatible = "invensense,mpu9250",
.data = (void *)INV_MPU9250
},
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index c2e4c267c36b..7db761afa578 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -169,6 +169,36 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
};
+/**
+ * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
+ * @array: array of strings
+ * @n: number of strings in the array
+ * @str: string to match with
+ *
+ * Returns index of @str in the @array or -EINVAL, similar to match_string().
+ * Uses sysfs_streq instead of strcmp for matching.
+ *
+ * This routine will look for a string in an array of strings.
+ * The search will continue until the element is found or the n-th element
+ * is reached, regardless of any NULL elements in the array.
+ */
+static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
+ const char *str)
+{
+ const char *item;
+ int index;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ continue;
+ if (sysfs_streq(item, str))
+ return index;
+ }
+
+ return -EINVAL;
+}
+
#if defined(CONFIG_DEBUG_FS)
/*
* There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
@@ -470,8 +500,11 @@ ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
if (!e->num_items)
return 0;
- for (i = 0; i < e->num_items; ++i)
+ for (i = 0; i < e->num_items; ++i) {
+ if (!e->items[i])
+ continue;
len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]);
+ }
/* replace last space with a newline */
buf[len - 1] = '\n';
@@ -492,7 +525,7 @@ ssize_t iio_enum_read(struct iio_dev *indio_dev,
i = e->get(indio_dev, chan);
if (i < 0)
return i;
- else if (i >= e->num_items)
+ else if (i >= e->num_items || !e->items[i])
return -EINVAL;
return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]);
@@ -509,7 +542,7 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
if (!e->set)
return -EINVAL;
- ret = __sysfs_match_string(e->items, e->num_items, buf);
+ ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf);
if (ret < 0)
return ret;
@@ -1473,11 +1506,14 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
goto error_clear_attrs;
}
/* Copy across original attributes */
- if (indio_dev->info->attrs)
+ if (indio_dev->info->attrs) {
memcpy(iio_dev_opaque->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
*attrcount_orig);
+ iio_dev_opaque->chan_attr_group.is_visible =
+ indio_dev->info->attrs->is_visible;
+ }
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index fe30bcb6a57b..db77a2d4a56b 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -191,8 +191,8 @@ err_free_channel:
return ERR_PTR(err);
}
-static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
- const char *name)
+struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ const char *name)
{
struct iio_channel *chan = NULL;
@@ -230,6 +230,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
return chan;
}
+EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
static struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
@@ -272,12 +273,6 @@ error_free_chans:
#else /* CONFIG_OF */
-static inline struct iio_channel *
-of_iio_channel_get_by_name(struct device_node *np, const char *name)
-{
- return NULL;
-}
-
static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
return NULL;
@@ -393,6 +388,29 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
+struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
+ struct device_node *np,
+ const char *channel_name)
+{
+ struct iio_channel **ptr, *channel;
+
+ ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channel = of_iio_channel_get_by_name(np, channel_name);
+ if (IS_ERR(channel)) {
+ devres_free(ptr);
+ return channel;
+ }
+
+ *ptr = channel;
+ devres_add(dev, ptr);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
+
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 547e7f9d6920..df0647856e5d 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -8,6 +8,7 @@
* TODO: gesture + proximity calib offsets
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -1113,6 +1114,12 @@ static const struct i2c_device_id apds9960_id[] = {
};
MODULE_DEVICE_TABLE(i2c, apds9960_id);
+static const struct acpi_device_id apds9960_acpi_match[] = {
+ { "MSHW0184" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, apds9960_acpi_match);
+
static const struct of_device_id apds9960_of_match[] = {
{ .compatible = "avago,apds9960" },
{ }
@@ -1124,6 +1131,7 @@ static struct i2c_driver apds9960_driver = {
.name = APDS9960_DRV_NAME,
.of_match_table = apds9960_of_match,
.pm = &apds9960_pm_ops,
+ .acpi_match_table = apds9960_acpi_match,
},
.probe = apds9960_probe,
.remove = apds9960_remove,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index a21c827e4953..4093f2353d95 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -22,15 +22,21 @@ enum {
CHANNEL_SCAN_INDEX_MAX
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP CHANNEL_SCAN_INDEX_MAX
+
struct als_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als_illum;
- u32 illum[CHANNEL_SCAN_INDEX_MAX];
+ struct {
+ u32 illum[CHANNEL_SCAN_INDEX_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
/* Channel definitions */
@@ -54,7 +60,8 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Adjust channel real bits based on report descriptor */
@@ -168,14 +175,6 @@ static const struct iio_info als_info = {
.write_raw = &als_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
- int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int als_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -185,10 +184,14 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev,
struct als_state *als_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "als_proc_event\n");
- if (atomic_read(&als_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- &als_state->illum,
- sizeof(als_state->illum));
+ if (atomic_read(&als_state->common_attributes.data_ready)) {
+ if (!als_state->timestamp)
+ als_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &als_state->scan,
+ als_state->timestamp);
+ als_state->timestamp = 0;
+ }
return 0;
}
@@ -206,10 +209,14 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_LIGHT_ILLUM:
- als_state->illum[CHANNEL_SCAN_INDEX_INTENSITY] = sample_data;
- als_state->illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_INTENSITY] = sample_data;
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
+ *(s64 *)raw_data);
+ break;
default:
break;
}
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 9e5490b7473b..0f787bfc88fc 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -285,7 +285,7 @@ static int tsl2583_get_lux(struct iio_dev *indio_dev)
lux64 = lux64 * chip->als_settings.als_gain_trim;
lux64 >>= 13;
lux = lux64;
- lux = (lux + 500) / 1000;
+ lux = DIV_ROUND_CLOSEST(lux, 1000);
if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
return_max:
@@ -361,12 +361,12 @@ static int tsl2583_set_als_time(struct tsl2583_chip *chip)
u8 val;
/* determine als integration register */
- als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+ als_count = DIV_ROUND_CLOSEST(chip->als_settings.als_time * 100, 270);
if (!als_count)
als_count = 1; /* ensure at least one cycle */
/* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
+ als_time = DIV_ROUND_CLOSEST(als_count * 27, 10);
val = 256 - als_count;
ret = i2c_smbus_write_byte_data(chip->client,
@@ -380,7 +380,7 @@ static int tsl2583_set_als_time(struct tsl2583_chip *chip)
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
+ chip->als_time_scale = DIV_ROUND_CLOSEST(als_time, 50);
return ret;
}
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 4775bd785e50..d47a4f6f4e87 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -392,7 +392,7 @@ static int vl6180_set_it(struct vl6180_data *data, int val, int val2)
{
int ret, it_ms;
- it_ms = (val2 + 500) / 1000; /* round to ms */
+ it_ms = DIV_ROUND_CLOSEST(val2, 1000); /* round to ms */
if (val != 0 || it_ms < 1 || it_ms > 512)
return -EINVAL;
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 1697a8c03506..5d4ffd66032e 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -205,4 +205,19 @@ config SENSORS_RM3100_SPI
To compile this driver as a module, choose M here: the module
will be called rm3100-spi.
+config YAMAHA_YAS530
+ tristate "Yamaha YAS530 family of 3-Axis Magnetometers (I2C)"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to add support for the Yamaha YAS530 series of
+ 3-Axis Magnetometers. Right now YAS530, YAS532 and YAS533 are
+ fully supported.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called yamaha-yas.
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index ba1bc34b82fa..b9f45b7fafc3 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -28,3 +28,5 @@ obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
obj-$(CONFIG_SENSORS_RM3100) += rm3100-core.o
obj-$(CONFIG_SENSORS_RM3100_I2C) += rm3100-i2c.o
obj-$(CONFIG_SENSORS_RM3100_SPI) += rm3100-spi.o
+
+obj-$(CONFIG_YAMAHA_YAS530) += yamaha-yas530.o
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index fa09fcab620a..b2f3129e1b4f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -25,6 +25,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include "bmc150_magn.h"
@@ -135,6 +136,7 @@ struct bmc150_magn_data {
*/
struct mutex mutex;
struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
struct iio_mount_matrix orientation;
/* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
s32 buffer[6];
@@ -692,12 +694,24 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
int ret, chip_id;
struct bmc150_magn_preset preset;
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+ /*
+ * 3ms power-on time according to datasheet, let's better
+ * be safe than sorry and set this delay to 5ms.
+ */
+ msleep(5);
+
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND,
false);
if (ret < 0) {
dev_err(data->dev,
"Failed to bring up device from suspend mode\n");
- return ret;
+ goto err_regulator_disable;
}
ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id);
@@ -752,6 +766,8 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
err_poweroff:
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
+err_regulator_disable:
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
return ret;
}
@@ -867,6 +883,13 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
data->irq = irq;
data->dev = dev;
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
@@ -984,6 +1007,7 @@ int bmc150_magn_remove(struct device *dev)
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
mutex_unlock(&data->mutex);
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
return 0;
}
EXPORT_SYMBOL(bmc150_magn_remove);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 97642ebd9168..fa48044b7f5b 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -24,6 +24,7 @@ enum magn_3d_channel {
CHANNEL_SCAN_INDEX_NORTH_TRUE_TILT_COMP,
CHANNEL_SCAN_INDEX_NORTH_MAGN,
CHANNEL_SCAN_INDEX_NORTH_TRUE,
+ CHANNEL_SCAN_INDEX_TIMESTAMP,
MAGN_3D_CHANNEL_MAX,
};
@@ -47,6 +48,7 @@ struct magn_3d_state {
struct common_attributes magn_flux_attr;
struct common_attributes rot_attr;
+ s64 timestamp;
};
static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -57,6 +59,7 @@ static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH,
HID_USAGE_SENSOR_ORIENT_MAGN_NORTH,
HID_USAGE_SENSOR_ORIENT_TRUE_NORTH,
+ HID_USAGE_SENSOR_TIME_TIMESTAMP,
};
/* Channel definitions */
@@ -124,7 +127,8 @@ static const struct iio_chan_spec magn_3d_channels[] = {
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(7)
};
/* Adjust channel real bits based on report descriptor */
@@ -273,13 +277,6 @@ static const struct iio_info magn_3d_info = {
.write_raw = &magn_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -289,8 +286,15 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
- if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
- hid_sensor_push_data(indio_dev, magn_state->iio_vals);
+ if (atomic_read(&magn_state->magn_flux_attributes.data_ready)) {
+ if (!magn_state->timestamp)
+ magn_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ magn_state->iio_vals,
+ magn_state->timestamp);
+ magn_state->timestamp = 0;
+ }
return 0;
}
@@ -321,6 +325,11 @@ static int magn_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
offset = (usage_id - HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH)
+ CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ magn_state->timestamp =
+ hid_sensor_convert_timestamp(&magn_state->magn_flux_attributes,
+ *(s64 *)raw_data);
+ return ret;
default:
return -EINVAL;
}
@@ -386,9 +395,10 @@ static int magn_3d_parse_report(struct platform_device *pdev,
return -ENOMEM;
}
- st->iio_vals = devm_kcalloc(&pdev->dev, attr_count,
- sizeof(u32),
- GFP_KERNEL);
+ /* attr_count include timestamp channel, and the iio_vals should be aligned to 8byte */
+ st->iio_vals = devm_kcalloc(&pdev->dev,
+ ((attr_count + 1) % 2 + (attr_count + 1) / 2) * 2,
+ sizeof(u32), GFP_KERNEL);
if (!st->iio_vals) {
dev_err(&pdev->dev,
"failed to allocate space for iio values array\n");
@@ -404,11 +414,13 @@ static int magn_3d_parse_report(struct platform_device *pdev,
(_channels[*chan_count]).scan_index = *chan_count;
(_channels[*chan_count]).address = i;
- /* Set magn_val_addr to iio value address */
- st->magn_val_addr[i] = &(st->iio_vals[*chan_count]);
- magn_3d_adjust_channel_bit_mask(_channels,
- *chan_count,
- st->magn[i].size);
+ if (i != CHANNEL_SCAN_INDEX_TIMESTAMP) {
+ /* Set magn_val_addr to iio value address */
+ st->magn_val_addr[i] = &st->iio_vals[*chan_count];
+ magn_3d_adjust_channel_bit_mask(_channels,
+ *chan_count,
+ st->magn[i].size);
+ }
(*chan_count)++;
}
}
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
new file mode 100644
index 000000000000..d46f23d82b3d
--- /dev/null
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Yamaha YAS magnetic sensors, often used in Samsung
+ * mobile phones. While all are not yet handled because of lacking
+ * hardware, expand this driver to handle the different variants:
+ *
+ * YAS530 MS-3E (2011 Samsung Galaxy S Advance)
+ * YAS532 MS-3R (2011 Samsung Galaxy S4)
+ * YAS533 MS-3F (Vivo 1633, 1707, V3, Y21L)
+ * (YAS534 is a magnetic switch, not handled)
+ * YAS535 MS-6C
+ * YAS536 MS-3W
+ * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Xiaomi)
+ * YAS539 MS-3S (2018 Samsung Galaxy A7 SM-A750FN)
+ *
+ * Code functions found in the MPU3050 YAS530 and YAS532 drivers
+ * named "inv_compass" in the Tegra Android kernel tree.
+ * Copyright (C) 2012 InvenSense Corporation
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/random.h>
+#include <linux/unaligned/be_byteshift.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
+#define YAS5XX_DEVICE_ID 0x80
+#define YAS5XX_ACTUATE_INIT_COIL 0x81
+#define YAS5XX_MEASURE 0x82
+#define YAS5XX_CONFIG 0x83
+#define YAS5XX_MEASURE_INTERVAL 0x84
+#define YAS5XX_OFFSET_X 0x85 /* [-31 .. 31] */
+#define YAS5XX_OFFSET_Y1 0x86 /* [-31 .. 31] */
+#define YAS5XX_OFFSET_Y2 0x87 /* [-31 .. 31] */
+#define YAS5XX_TEST1 0x88
+#define YAS5XX_TEST2 0x89
+#define YAS5XX_CAL 0x90
+#define YAS5XX_MEASURE_DATA 0xB0
+
+/* Bits in the YAS5xx config register */
+#define YAS5XX_CONFIG_INTON BIT(0) /* Interrupt on? */
+#define YAS5XX_CONFIG_INTHACT BIT(1) /* Interrupt active high? */
+#define YAS5XX_CONFIG_CCK_MASK GENMASK(4, 2)
+#define YAS5XX_CONFIG_CCK_SHIFT 2
+
+/* Bits in the measure command register */
+#define YAS5XX_MEASURE_START BIT(0)
+#define YAS5XX_MEASURE_LDTC BIT(1)
+#define YAS5XX_MEASURE_FORS BIT(2)
+#define YAS5XX_MEASURE_DLYMES BIT(4)
+
+/* Bits in the measure data register */
+#define YAS5XX_MEASURE_DATA_BUSY BIT(7)
+
+#define YAS530_DEVICE_ID 0x01 /* YAS530 (MS-3E) */
+#define YAS530_VERSION_A 0 /* YAS530 (MS-3E A) */
+#define YAS530_VERSION_B 1 /* YAS530B (MS-3E B) */
+#define YAS530_VERSION_A_COEF 380
+#define YAS530_VERSION_B_COEF 550
+#define YAS530_DATA_BITS 12
+#define YAS530_DATA_CENTER BIT(YAS530_DATA_BITS - 1)
+#define YAS530_DATA_OVERFLOW (BIT(YAS530_DATA_BITS) - 1)
+
+#define YAS532_DEVICE_ID 0x02 /* YAS532/YAS533 (MS-3R/F) */
+#define YAS532_VERSION_AB 0 /* YAS532/533 AB (MS-3R/F AB) */
+#define YAS532_VERSION_AC 1 /* YAS532/533 AC (MS-3R/F AC) */
+#define YAS532_VERSION_AB_COEF 1800
+#define YAS532_VERSION_AC_COEF_X 850
+#define YAS532_VERSION_AC_COEF_Y1 750
+#define YAS532_VERSION_AC_COEF_Y2 750
+#define YAS532_DATA_BITS 13
+#define YAS532_DATA_CENTER BIT(YAS532_DATA_BITS - 1)
+#define YAS532_DATA_OVERFLOW (BIT(YAS532_DATA_BITS) - 1)
+#define YAS532_20DEGREES 390 /* Looks like Kelvin */
+
+/* These variant IDs are known from code dumps */
+#define YAS537_DEVICE_ID 0x07 /* YAS537 (MS-3T) */
+#define YAS539_DEVICE_ID 0x08 /* YAS539 (MS-3S) */
+
+/* Turn off device regulators etc after 5 seconds of inactivity */
+#define YAS5XX_AUTOSUSPEND_DELAY_MS 5000
+
+struct yas5xx_calibration {
+ /* Linearization calibration x, y1, y2 */
+ s32 r[3];
+ u32 f[3];
+ /* Temperature compensation calibration */
+ s32 Cx, Cy1, Cy2;
+ /* Misc calibration coefficients */
+ s32 a2, a3, a4, a5, a6, a7, a8, a9, k;
+ /* clock divider */
+ u8 dck;
+};
+
+/**
+ * struct yas5xx - state container for the YAS5xx driver
+ * @dev: parent device pointer
+ * @devid: device ID number
+ * @version: device version
+ * @name: device name
+ * @calibration: calibration settings from the OTP storage
+ * @hard_offsets: offsets for each axis measured with initcoil actuated
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to access the YAX5xx registers over I2C
+ * @regs: the vdd and vddio power regulators
+ * @reset: optional GPIO line used for handling RESET
+ * @lock: locks the magnetometer for exclusive use during a measurement (which
+ * involves several register transactions so the regmap lock is not enough)
+ * so that measurements get serialized in a first-come-first serve manner
+ * @scan: naturally aligned measurements
+ */
+struct yas5xx {
+ struct device *dev;
+ unsigned int devid;
+ unsigned int version;
+ char name[16];
+ struct yas5xx_calibration calibration;
+ u8 hard_offsets[3];
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct regulator_bulk_data regs[2];
+ struct gpio_desc *reset;
+ struct mutex lock;
+ /*
+ * The scanout is 4 x 32 bits in CPU endianness.
+ * Ensure timestamp is naturally aligned
+ */
+ struct {
+ s32 channels[4];
+ s64 ts __aligned(8);
+ } scan;
+};
+
+/* On YAS530 the x, y1 and y2 values are 12 bits */
+static u16 yas530_extract_axis(u8 *data)
+{
+ u16 val;
+
+ /*
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 3), val);
+ return val;
+}
+
+/* On YAS532 the x, y1 and y2 values are 13 bits */
+static u16 yas532_extract_axis(u8 *data)
+{
+ u16 val;
+
+ /*
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 2), val);
+ return val;
+}
+
+/**
+ * yas5xx_measure() - Make a measure from the hardware
+ * @yas5xx: The device state
+ * @t: the raw temperature measurement
+ * @x: the raw x axis measurement
+ * @y1: the y1 axis measurement
+ * @y2: the y2 axis measurement
+ * @return: 0 on success or error code
+ */
+static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
+{
+ unsigned int busy;
+ u8 data[8];
+ int ret;
+ u16 val;
+
+ mutex_lock(&yas5xx->lock);
+ ret = regmap_write(yas5xx->map, YAS5XX_MEASURE, YAS5XX_MEASURE_START);
+ if (ret < 0)
+ goto out_unlock;
+
+ /*
+ * Typical time to measure 1500 us, max 2000 us so wait min 500 us
+ * and at most 20000 us (one magnitude more than the datsheet max)
+ * before timeout.
+ */
+ ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA, busy,
+ !(busy & YAS5XX_MEASURE_DATA_BUSY),
+ 500, 20000);
+ if (ret) {
+ dev_err(yas5xx->dev, "timeout waiting for measurement\n");
+ goto out_unlock;
+ }
+
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
+ data, sizeof(data));
+ if (ret)
+ goto out_unlock;
+
+ mutex_unlock(&yas5xx->lock);
+
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ /*
+ * The t value is 9 bits in big endian format
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 6), val);
+ *t = val;
+ *x = yas530_extract_axis(&data[2]);
+ *y1 = yas530_extract_axis(&data[4]);
+ *y2 = yas530_extract_axis(&data[6]);
+ break;
+ case YAS532_DEVICE_ID:
+ /*
+ * The t value is 10 bits in big endian format
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 5), val);
+ *t = val;
+ *x = yas532_extract_axis(&data[2]);
+ *y1 = yas532_extract_axis(&data[4]);
+ *y2 = yas532_extract_axis(&data[6]);
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown data format\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+
+out_unlock:
+ mutex_unlock(&yas5xx->lock);
+ return ret;
+}
+
+static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ static const s32 yas532ac_coef[] = {
+ YAS532_VERSION_AC_COEF_X,
+ YAS532_VERSION_AC_COEF_Y1,
+ YAS532_VERSION_AC_COEF_Y2,
+ };
+ s32 coef;
+
+ /* Select coefficients */
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ if (yas5xx->version == YAS530_VERSION_A)
+ coef = YAS530_VERSION_A_COEF;
+ else
+ coef = YAS530_VERSION_B_COEF;
+ break;
+ case YAS532_DEVICE_ID:
+ if (yas5xx->version == YAS532_VERSION_AB)
+ coef = YAS532_VERSION_AB_COEF;
+ else
+ /* Elaborate coefficients */
+ coef = yas532ac_coef[axis];
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown device type\n");
+ return val;
+ }
+ /*
+ * Linearization formula:
+ *
+ * x' = x - (3721 + 50 * f) + (xoffset - r) * c
+ *
+ * Where f and r are calibration values, c is a per-device
+ * and sometimes per-axis coefficient.
+ */
+ return val - (3721 + 50 * c->f[axis]) +
+ (yas5xx->hard_offsets[axis] - c->r[axis]) * coef;
+}
+
+/**
+ * yas5xx_get_measure() - Measure a sample of all axis and process
+ * @yas5xx: The device state
+ * @to: Temperature out
+ * @xo: X axis out
+ * @yo: Y axis out
+ * @zo: Z axis out
+ * @return: 0 on success or error code
+ *
+ * Returned values are in nanotesla according to some code.
+ */
+static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u16 t, x, y1, y2;
+ /* These are "signed x, signed y1 etc */
+ s32 sx, sy1, sy2, sy, sz;
+ int ret;
+
+ /* We first get raw data that needs to be translated to [x,y,z] */
+ ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+
+ /* Do some linearization if available */
+ sx = yas5xx_linearize(yas5xx, x, 0);
+ sy1 = yas5xx_linearize(yas5xx, y1, 1);
+ sy2 = yas5xx_linearize(yas5xx, y2, 2);
+
+ /*
+ * Temperature compensation for x, y1, y2 respectively:
+ *
+ * Cx * t
+ * x' = x - ------
+ * 100
+ */
+ sx = sx - (c->Cx * t) / 100;
+ sy1 = sy1 - (c->Cy1 * t) / 100;
+ sy2 = sy2 - (c->Cy2 * t) / 100;
+
+ /*
+ * Break y1 and y2 into y and z, y1 and y2 are apparently encoding
+ * y and z.
+ */
+ sy = sy1 - sy2;
+ sz = -sy1 - sy2;
+
+ /*
+ * FIXME: convert to Celsius? Just guessing this is given
+ * as 1/10:s of degrees so multiply by 100 to get millicentigrades.
+ */
+ *to = t * 100;
+ /*
+ * Calibrate [x,y,z] with some formulas like this:
+ *
+ * 100 * x + a_2 * y + a_3 * z
+ * x' = k * ---------------------------
+ * 10
+ *
+ * a_4 * x + a_5 * y + a_6 * z
+ * y' = k * ---------------------------
+ * 10
+ *
+ * a_7 * x + a_8 * y + a_9 * z
+ * z' = k * ---------------------------
+ * 10
+ */
+ *xo = c->k * ((100 * sx + c->a2 * sy + c->a3 * sz) / 10);
+ *yo = c->k * ((c->a4 * sx + c->a5 * sy + c->a6 * sz) / 10);
+ *zo = c->k * ((c->a7 * sx + c->a8 * sy + c->a9 * sz) / 10);
+
+ return 0;
+}
+
+static int yas5xx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ s32 t, x, y, z;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ pm_runtime_get_sync(yas5xx->dev);
+ ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ pm_runtime_mark_last_busy(yas5xx->dev);
+ pm_runtime_put_autosuspend(yas5xx->dev);
+ if (ret)
+ return ret;
+ switch (chan->address) {
+ case 0:
+ *val = t;
+ break;
+ case 1:
+ *val = x;
+ break;
+ case 2:
+ *val = y;
+ break;
+ case 3:
+ *val = z;
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown channel\n");
+ return -EINVAL;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->address == 0) {
+ /* Temperature is unscaled */
+ *val = 1;
+ return IIO_VAL_INT;
+ }
+ /*
+ * The axis values are in nanotesla according to the vendor
+ * drivers, but is clearly in microtesla according to
+ * experiments. Since 1 uT = 0.01 Gauss, we need to divide
+ * by 100000000 (10^8) to get to Gauss from the raw value.
+ */
+ *val = 1;
+ *val2 = 100000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ /* Unknown request */
+ return -EINVAL;
+ }
+}
+
+static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ s32 t, x, y, z;
+ int ret;
+
+ pm_runtime_get_sync(yas5xx->dev);
+ ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ pm_runtime_mark_last_busy(yas5xx->dev);
+ pm_runtime_put_autosuspend(yas5xx->dev);
+ if (ret) {
+ dev_err(yas5xx->dev, "error refilling buffer\n");
+ return;
+ }
+ yas5xx->scan.channels[0] = t;
+ yas5xx->scan.channels[1] = x;
+ yas5xx->scan.channels[2] = y;
+ yas5xx->scan.channels[3] = z;
+ iio_push_to_buffers_with_timestamp(indio_dev, &yas5xx->scan,
+ iio_get_time_ns(indio_dev));
+}
+
+static irqreturn_t yas5xx_handle_trigger(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ yas5xx_fill_buffer(indio_dev);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+
+static const struct iio_mount_matrix *
+yas5xx_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+
+ return &yas5xx->orientation;
+}
+
+static const struct iio_chan_spec_ext_info yas5xx_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, yas5xx_get_mount_matrix),
+ { }
+};
+
+#define YAS5XX_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = yas5xx_ext_info, \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec yas5xx_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = 0,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ YAS5XX_AXIS_CHANNEL(X, 1),
+ YAS5XX_AXIS_CHANNEL(Y, 2),
+ YAS5XX_AXIS_CHANNEL(Z, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const unsigned long yas5xx_scan_masks[] = { GENMASK(3, 0), 0 };
+
+static const struct iio_info yas5xx_info = {
+ .read_raw = &yas5xx_read_raw,
+};
+
+static bool yas5xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == YAS5XX_ACTUATE_INIT_COIL ||
+ reg == YAS5XX_MEASURE ||
+ (reg >= YAS5XX_MEASURE_DATA && reg <= YAS5XX_MEASURE_DATA + 8);
+}
+
+/* TODO: enable regmap cache, using mark dirty and sync at runtime resume */
+static const struct regmap_config yas5xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .volatile_reg = yas5xx_volatile_reg,
+};
+
+/**
+ * yas53x_extract_calibration() - extracts the a2-a9 and k calibration
+ * @data: the bitfield to use
+ * @c: the calibration to populate
+ */
+static void yas53x_extract_calibration(u8 *data, struct yas5xx_calibration *c)
+{
+ u64 val = get_unaligned_be64(data);
+
+ /*
+ * Bitfield layout for the axis calibration data, for factor
+ * a2 = 2 etc, k = k, c = clock divider
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ 2 2 2 2 2 2 3 3 ] bits 63 .. 56
+ * 1 [ 3 3 4 4 4 4 4 4 ] bits 55 .. 48
+ * 2 [ 5 5 5 5 5 5 6 6 ] bits 47 .. 40
+ * 3 [ 6 6 6 6 7 7 7 7 ] bits 39 .. 32
+ * 4 [ 7 7 7 8 8 8 8 8 ] bits 31 .. 24
+ * 5 [ 8 9 9 9 9 9 9 9 ] bits 23 .. 16
+ * 6 [ 9 k k k k k c c ] bits 15 .. 8
+ * 7 [ c x x x x x x x ] bits 7 .. 0
+ */
+ c->a2 = FIELD_GET(GENMASK_ULL(63, 58), val) - 32;
+ c->a3 = FIELD_GET(GENMASK_ULL(57, 54), val) - 8;
+ c->a4 = FIELD_GET(GENMASK_ULL(53, 48), val) - 32;
+ c->a5 = FIELD_GET(GENMASK_ULL(47, 42), val) + 38;
+ c->a6 = FIELD_GET(GENMASK_ULL(41, 36), val) - 32;
+ c->a7 = FIELD_GET(GENMASK_ULL(35, 29), val) - 64;
+ c->a8 = FIELD_GET(GENMASK_ULL(28, 23), val) - 32;
+ c->a9 = FIELD_GET(GENMASK_ULL(22, 15), val);
+ c->k = FIELD_GET(GENMASK_ULL(14, 10), val) + 10;
+ c->dck = FIELD_GET(GENMASK_ULL(9, 7), val);
+}
+
+static int yas530_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[16];
+ u32 val;
+ int ret;
+
+ /* Dummy read, first read is ALWAYS wrong */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+
+ /* Actual calibration readout */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+
+ add_device_randomness(data, sizeof(data));
+ yas5xx->version = data[15] & GENMASK(1, 0);
+
+ /* Extract the calibration from the bitfield */
+ c->Cx = data[0] * 6 - 768;
+ c->Cy1 = data[1] * 6 - 768;
+ c->Cy2 = data[2] * 6 - 768;
+ yas53x_extract_calibration(&data[3], c);
+
+ /*
+ * Extract linearization:
+ * Linearization layout in the 32 bits at byte 11:
+ * The r factors are 6 bit values where bit 5 is the sign
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ xx xx xx r0 r0 r0 r0 r0 ] bits 31 .. 24
+ * 1 [ r0 f0 f0 r1 r1 r1 r1 r1 ] bits 23 .. 16
+ * 2 [ r1 f1 f1 r2 r2 r2 r2 r2 ] bits 15 .. 8
+ * 3 [ r2 f2 f2 xx xx xx xx xx ] bits 7 .. 0
+ */
+ val = get_unaligned_be32(&data[11]);
+ c->f[0] = FIELD_GET(GENMASK(22, 21), val);
+ c->f[1] = FIELD_GET(GENMASK(14, 13), val);
+ c->f[2] = FIELD_GET(GENMASK(6, 5), val);
+ c->r[0] = sign_extend32(FIELD_GET(GENMASK(28, 23), val), 5);
+ c->r[1] = sign_extend32(FIELD_GET(GENMASK(20, 15), val), 5);
+ c->r[2] = sign_extend32(FIELD_GET(GENMASK(12, 7), val), 5);
+ return 0;
+}
+
+static int yas532_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[14];
+ u32 val;
+ int ret;
+
+ /* Dummy read, first read is ALWAYS wrong */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ /* Actual calibration readout */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+
+ /* Sanity check, is this all zeroes? */
+ if (memchr_inv(data, 0x00, 13)) {
+ if (!(data[13] & BIT(7)))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+ }
+
+ add_device_randomness(data, sizeof(data));
+ /* Only one bit of version info reserved here as far as we know */
+ yas5xx->version = data[13] & BIT(0);
+
+ /* Extract calibration from the bitfield */
+ c->Cx = data[0] * 10 - 1280;
+ c->Cy1 = data[1] * 10 - 1280;
+ c->Cy2 = data[2] * 10 - 1280;
+ yas53x_extract_calibration(&data[3], c);
+ /*
+ * Extract linearization:
+ * Linearization layout in the 32 bits at byte 10:
+ * The r factors are 6 bit values where bit 5 is the sign
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ xx r0 r0 r0 r0 r0 r0 f0 ] bits 31 .. 24
+ * 1 [ f0 r1 r1 r1 r1 r1 r1 f1 ] bits 23 .. 16
+ * 2 [ f1 r2 r2 r2 r2 r2 r2 f2 ] bits 15 .. 8
+ * 3 [ f2 xx xx xx xx xx xx xx ] bits 7 .. 0
+ */
+ val = get_unaligned_be32(&data[10]);
+ c->f[0] = FIELD_GET(GENMASK(24, 23), val);
+ c->f[1] = FIELD_GET(GENMASK(16, 15), val);
+ c->f[2] = FIELD_GET(GENMASK(8, 7), val);
+ c->r[0] = sign_extend32(FIELD_GET(GENMASK(30, 25), val), 5);
+ c->r[1] = sign_extend32(FIELD_GET(GENMASK(22, 17), val), 5);
+ c->r[2] = sign_extend32(FIELD_GET(GENMASK(14, 7), val), 5);
+
+ return 0;
+}
+
+static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+
+ dev_dbg(yas5xx->dev, "f[] = [%d, %d, %d]\n",
+ c->f[0], c->f[1], c->f[2]);
+ dev_dbg(yas5xx->dev, "r[] = [%d, %d, %d]\n",
+ c->r[0], c->r[1], c->r[2]);
+ dev_dbg(yas5xx->dev, "Cx = %d\n", c->Cx);
+ dev_dbg(yas5xx->dev, "Cy1 = %d\n", c->Cy1);
+ dev_dbg(yas5xx->dev, "Cy2 = %d\n", c->Cy2);
+ dev_dbg(yas5xx->dev, "a2 = %d\n", c->a2);
+ dev_dbg(yas5xx->dev, "a3 = %d\n", c->a3);
+ dev_dbg(yas5xx->dev, "a4 = %d\n", c->a4);
+ dev_dbg(yas5xx->dev, "a5 = %d\n", c->a5);
+ dev_dbg(yas5xx->dev, "a6 = %d\n", c->a6);
+ dev_dbg(yas5xx->dev, "a7 = %d\n", c->a7);
+ dev_dbg(yas5xx->dev, "a8 = %d\n", c->a8);
+ dev_dbg(yas5xx->dev, "a9 = %d\n", c->a9);
+ dev_dbg(yas5xx->dev, "k = %d\n", c->k);
+ dev_dbg(yas5xx->dev, "dck = %d\n", c->dck);
+}
+
+static int yas5xx_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
+{
+ int ret;
+
+ ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_X, ox);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_Y1, oy1);
+ if (ret)
+ return ret;
+ return regmap_write(yas5xx->map, YAS5XX_OFFSET_Y2, oy2);
+}
+
+static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
+{
+ if (measure > center)
+ return old + BIT(bit);
+ if (measure < center)
+ return old - BIT(bit);
+ return old;
+}
+
+static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
+{
+ int ret;
+ u16 center;
+ u16 t, x, y1, y2;
+ s8 ox, oy1, oy2;
+ int i;
+
+ /* Actuate the init coil and measure offsets */
+ ret = regmap_write(yas5xx->map, YAS5XX_ACTUATE_INIT_COIL, 0);
+ if (ret)
+ return ret;
+
+ /* When the initcoil is active this should be around the center */
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ center = YAS530_DATA_CENTER;
+ break;
+ case YAS532_DEVICE_ID:
+ center = YAS532_DATA_CENTER;
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown device type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We set offsets in the interval +-31 by iterating
+ * +-16, +-8, +-4, +-2, +-1 adjusting the offsets each
+ * time, then writing the final offsets into the
+ * registers.
+ *
+ * NOTE: these offsets are NOT in the same unit or magnitude
+ * as the values for [x, y1, y2]. The value is +/-31
+ * but the effect on the raw values is much larger.
+ * The effect of the offset is to bring the measure
+ * rougly to the center.
+ */
+ ox = 0;
+ oy1 = 0;
+ oy2 = 0;
+
+ for (i = 4; i >= 0; i--) {
+ ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ if (ret)
+ return ret;
+
+ ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "measurement %d: x=%d, y1=%d, y2=%d\n",
+ 5-i, x, y1, y2);
+
+ ox = yas5xx_adjust_offset(ox, i, center, x);
+ oy1 = yas5xx_adjust_offset(oy1, i, center, y1);
+ oy2 = yas5xx_adjust_offset(oy2, i, center, y2);
+ }
+
+ /* Needed for calibration algorithm */
+ yas5xx->hard_offsets[0] = ox;
+ yas5xx->hard_offsets[1] = oy1;
+ yas5xx->hard_offsets[2] = oy2;
+ ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ if (ret)
+ return ret;
+
+ dev_info(yas5xx->dev, "discovered hard offsets: x=%d, y1=%d, y2=%d\n",
+ ox, oy1, oy2);
+ return 0;
+}
+
+static int yas5xx_power_on(struct yas5xx *yas5xx)
+{
+ unsigned int val;
+ int ret;
+
+ /* Zero the test registers */
+ ret = regmap_write(yas5xx->map, YAS5XX_TEST1, 0);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS5XX_TEST2, 0);
+ if (ret)
+ return ret;
+
+ /* Set up for no interrupts, calibrated clock divider */
+ val = FIELD_PREP(YAS5XX_CONFIG_CCK_MASK, yas5xx->calibration.dck);
+ ret = regmap_write(yas5xx->map, YAS5XX_CONFIG, val);
+ if (ret)
+ return ret;
+
+ /* Measure interval 0 (back-to-back?) */
+ return regmap_write(yas5xx->map, YAS5XX_MEASURE_INTERVAL, 0);
+}
+
+static int yas5xx_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct device *dev = &i2c->dev;
+ struct yas5xx *yas5xx;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*yas5xx));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ yas5xx = iio_priv(indio_dev);
+ i2c_set_clientdata(i2c, indio_dev);
+ yas5xx->dev = dev;
+ mutex_init(&yas5xx->lock);
+
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &yas5xx->orientation);
+ if (ret)
+ return ret;
+
+ yas5xx->regs[0].supply = "vdd";
+ yas5xx->regs[1].supply = "iovdd";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(yas5xx->regs),
+ yas5xx->regs);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+ if (ret) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /* See comment in runtime resume callback */
+ usleep_range(31000, 40000);
+
+ /* This will take the device out of reset if need be */
+ yas5xx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(yas5xx->reset)) {
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset),
+ "failed to get reset line\n");
+ goto reg_off;
+ }
+
+ yas5xx->map = devm_regmap_init_i2c(i2c, &yas5xx_regmap_config);
+ if (IS_ERR(yas5xx->map)) {
+ dev_err(dev, "failed to allocate register map\n");
+ ret = PTR_ERR(yas5xx->map);
+ goto assert_reset;
+ }
+
+ ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &yas5xx->devid);
+ if (ret)
+ goto assert_reset;
+
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ ret = yas530_get_calibration_data(yas5xx);
+ if (ret)
+ goto assert_reset;
+ dev_info(dev, "detected YAS530 MS-3E %s",
+ yas5xx->version ? "B" : "A");
+ strncpy(yas5xx->name, "yas530", sizeof(yas5xx->name));
+ break;
+ case YAS532_DEVICE_ID:
+ ret = yas532_get_calibration_data(yas5xx);
+ if (ret)
+ goto assert_reset;
+ dev_info(dev, "detected YAS532/YAS533 MS-3R/F %s",
+ yas5xx->version ? "AC" : "AB");
+ strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
+ break;
+ default:
+ dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
+ goto assert_reset;
+ }
+
+ yas5xx_dump_calibration(yas5xx);
+ ret = yas5xx_power_on(yas5xx);
+ if (ret)
+ goto assert_reset;
+ ret = yas5xx_meaure_offsets(yas5xx);
+ if (ret)
+ goto assert_reset;
+
+ indio_dev->info = &yas5xx_info;
+ indio_dev->available_scan_masks = yas5xx_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = yas5xx->name;
+ indio_dev->channels = yas5xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(yas5xx_channels);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ yas5xx_handle_trigger,
+ NULL);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto assert_reset;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "device register failed\n");
+ goto cleanup_buffer;
+ }
+
+ /* Take runtime PM online */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ pm_runtime_set_autosuspend_delay(dev, YAS5XX_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+assert_reset:
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+reg_off:
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return ret;
+}
+
+static int yas5xx_remove(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ struct device *dev = &i2c->dev;
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ /*
+ * Now we can't get any more reads from the device, which would
+ * also call pm_runtime* functions and race with our disable
+ * code. Disable PM runtime in orderly fashion and power down.
+ */
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return 0;
+}
+
+static int __maybe_unused yas5xx_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return 0;
+}
+
+static int __maybe_unused yas5xx_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+ if (ret) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /*
+ * The YAS530 datasheet says TVSKW is up to 30 ms, after that 1 ms
+ * for all voltages to settle. The YAS532 is 10ms then 4ms for the
+ * I2C to come online. Let's keep it safe and put this at 31ms.
+ */
+ usleep_range(31000, 40000);
+ gpiod_set_value_cansleep(yas5xx->reset, 0);
+
+ ret = yas5xx_power_on(yas5xx);
+ if (ret) {
+ dev_err(dev, "cannot power on\n");
+ goto out_reset;
+ }
+
+ return 0;
+
+out_reset:
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return ret;
+}
+
+static const struct dev_pm_ops yas5xx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(yas5xx_runtime_suspend,
+ yas5xx_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id yas5xx_id[] = {
+ {"yas530", },
+ {"yas532", },
+ {"yas533", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, yas5xx_id);
+
+static const struct of_device_id yas5xx_of_match[] = {
+ { .compatible = "yamaha,yas530", },
+ { .compatible = "yamaha,yas532", },
+ { .compatible = "yamaha,yas533", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yas5xx_of_match);
+
+static struct i2c_driver yas5xx_driver = {
+ .driver = {
+ .name = "yas5xx",
+ .of_match_table = yas5xx_of_match,
+ .pm = &yas5xx_dev_pm_ops,
+ },
+ .probe = yas5xx_probe,
+ .remove = yas5xx_remove,
+ .id_table = yas5xx_id,
+};
+module_i2c_driver(yas5xx_driver);
+
+MODULE_DESCRIPTION("Yamaha YAS53x 3-axis magnetometer driver");
+MODULE_AUTHOR("Linus Walleij");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index ae132a93bcae..52ebef30f9be 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -24,15 +24,21 @@ enum incl_3d_channel {
INCLI_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP INCLI_3D_CHANNEL_MAX
+
struct incl_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
- u32 incl_val[INCLI_3D_CHANNEL_MAX];
+ struct {
+ u32 incl_val[INCLI_3D_CHANNEL_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
@@ -73,7 +79,8 @@ static const struct iio_chan_spec incl_3d_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP),
};
/* Adjust channel real bits based on report descriptor */
@@ -178,13 +185,6 @@ static const struct iio_info incl_3d_info = {
.write_raw = &incl_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -194,10 +194,16 @@ static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct incl_3d_state *incl_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "incl_3d_proc_event\n");
- if (atomic_read(&incl_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- (u8 *)incl_state->incl_val,
- sizeof(incl_state->incl_val));
+ if (atomic_read(&incl_state->common_attributes.data_ready)) {
+ if (!incl_state->timestamp)
+ incl_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &incl_state->scan,
+ incl_state->timestamp);
+
+ incl_state->timestamp = 0;
+ }
return 0;
}
@@ -214,13 +220,18 @@ static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_ORIENT_TILT_X:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Y:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Z:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ incl_state->timestamp =
+ hid_sensor_convert_timestamp(&incl_state->common_attributes,
+ *(s64 *)raw_data);
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 23bc61a7f018..18e4ef060096 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -20,11 +20,15 @@ struct dev_rot_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info quaternion;
- u32 sampled_vals[4];
+ struct {
+ u32 sampled_vals[4] __aligned(16);
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
/* Channel definitions */
@@ -37,8 +41,10 @@ static const struct iio_chan_spec dev_rot_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_HYSTERESIS)
- }
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = 0
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
};
/* Adjust channel real bits based on report descriptor */
@@ -70,7 +76,7 @@ static int dev_rot_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
if (size >= 4) {
for (i = 0; i < 4; ++i)
- vals[i] = rot_state->sampled_vals[i];
+ vals[i] = rot_state->scan.sampled_vals[i];
ret_type = IIO_VAL_INT_MULTIPLE;
*val_len = 4;
} else
@@ -132,15 +138,6 @@ static const struct iio_info dev_rot_info = {
.write_raw = &dev_rot_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data >>\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data <<\n");
-
-}
-
/* Callback handler to send event after all samples are received and captured */
static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -150,10 +147,15 @@ static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
struct dev_rot_state *rot_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "dev_rot_proc_event\n");
- if (atomic_read(&rot_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- (u8 *)rot_state->sampled_vals,
- sizeof(rot_state->sampled_vals));
+ if (atomic_read(&rot_state->common_attributes.data_ready)) {
+ if (!rot_state->timestamp)
+ rot_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &rot_state->scan,
+ rot_state->timestamp);
+
+ rot_state->timestamp = 0;
+ }
return 0;
}
@@ -168,10 +170,14 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
struct dev_rot_state *rot_state = iio_priv(indio_dev);
if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
- memcpy(rot_state->sampled_vals, raw_data,
- sizeof(rot_state->sampled_vals));
+ memcpy(&rot_state->scan.sampled_vals, raw_data,
+ sizeof(rot_state->scan.sampled_vals));
+
dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
- sizeof(rot_state->sampled_vals));
+ sizeof(rot_state->scan.sampled_vals));
+ } else if (usage_id == HID_USAGE_SENSOR_TIME_TIMESTAMP) {
+ rot_state->timestamp = hid_sensor_convert_timestamp(&rot_state->common_attributes,
+ *(s64 *)raw_data);
}
return 0;
diff --git a/drivers/iio/position/Kconfig b/drivers/iio/position/Kconfig
index eda67f008c5b..1576a6380b53 100644
--- a/drivers/iio/position/Kconfig
+++ b/drivers/iio/position/Kconfig
@@ -16,4 +16,20 @@ config IQS624_POS
To compile this driver as a module, choose M here: the module
will be called iqs624-pos.
+config HID_SENSOR_CUSTOM_INTEL_HINGE
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Hinge"
+ help
+ This sensor present three angles, hinge angel, screen angles
+ and keyboard angle respect to horizon (ground).
+ Say yes here to build support for the HID custom
+ intel hinge sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-custom-hinge.
+
endmenu
diff --git a/drivers/iio/position/Makefile b/drivers/iio/position/Makefile
index 3cbe7a734352..d70902f2979d 100644
--- a/drivers/iio/position/Makefile
+++ b/drivers/iio/position/Makefile
@@ -4,4 +4,5 @@
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE) += hid-sensor-custom-intel-hinge.o
obj-$(CONFIG_IQS624_POS) += iqs624-pos.o
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
new file mode 100644
index 000000000000..64a7fa7db6af
--- /dev/null
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2020, Intel Corporation.
+ */
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/platform_device.h>
+
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum hinge_channel {
+ CHANNEL_SCAN_INDEX_HINGE_ANGLE,
+ CHANNEL_SCAN_INDEX_SCREEN_ANGLE,
+ CHANNEL_SCAN_INDEX_KEYBOARD_ANGLE,
+ CHANNEL_SCAN_INDEX_MAX,
+};
+
+#define CHANNEL_SCAN_INDEX_TIMESTAMP CHANNEL_SCAN_INDEX_MAX
+
+static const u32 hinge_addresses[CHANNEL_SCAN_INDEX_MAX] = {
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1),
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(2),
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(3)
+};
+
+static const char *const hinge_labels[CHANNEL_SCAN_INDEX_MAX] = { "hinge",
+ "screen",
+ "keyboard" };
+
+struct hinge_state {
+ struct iio_dev *indio_dev;
+ struct hid_sensor_hub_attribute_info hinge[CHANNEL_SCAN_INDEX_MAX];
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ const char *labels[CHANNEL_SCAN_INDEX_MAX];
+ struct {
+ u32 hinge_val[3];
+ u64 timestamp __aligned(8);
+ } scan;
+
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+ u64 timestamp;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec hinge_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_HINGE_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ }, {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_SCREEN_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ }, {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 2,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_KEYBOARD_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void hinge_adjust_channel_realbits(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.realbits = size * 8;
+}
+
+/* Channel read_raw handler */
+static int hinge_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+ struct hid_sensor_hub_device *hsdev;
+ int report_id;
+ s32 min;
+
+ hsdev = st->common_attributes.hsdev;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ hid_sensor_power_state(&st->common_attributes, true);
+ report_id = st->hinge[chan->scan_index].report_id;
+ min = st->hinge[chan->scan_index].logical_minimum;
+ if (report_id < 0) {
+ hid_sensor_power_state(&st->common_attributes, false);
+ return -EINVAL;
+ }
+
+ *val = sensor_hub_input_attr_get_raw_value(st->common_attributes.hsdev,
+ hsdev->usage,
+ hinge_addresses[chan->scan_index],
+ report_id,
+ SENSOR_HUB_SYNC, min < 0);
+
+ hid_sensor_power_state(&st->common_attributes, false);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->scale_pre_decml;
+ *val2 = st->scale_post_decml;
+ return st->scale_precision;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = st->value_offset;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_read_samp_freq_value(&st->common_attributes,
+ val, val2);
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_read_raw_hyst_value(&st->common_attributes,
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Channel write_raw handler */
+static int hinge_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_write_samp_freq_value(&st->common_attributes,
+ val, val2);
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_write_raw_hyst_value(&st->common_attributes,
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hinge_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ return sprintf(label, "%s\n", st->labels[chan->channel]);
+}
+
+static const struct iio_info hinge_info = {
+ .read_raw = hinge_read_raw,
+ .write_raw = hinge_write_raw,
+ .read_label = hinge_read_label,
+};
+
+/*
+ * Callback handler to send event after all samples are received
+ * and captured.
+ */
+static int hinge_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ if (atomic_read(&st->common_attributes.data_ready)) {
+ if (!st->timestamp)
+ st->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
+ st->timestamp);
+
+ st->timestamp = 0;
+ }
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int hinge_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, size_t raw_len,
+ char *raw_data, void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct hinge_state *st = iio_priv(indio_dev);
+ int offset;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1):
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(2):
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(3):
+ offset = usage_id - HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1);
+ st->scan.hinge_val[offset] = *(u32 *)raw_data;
+ return 0;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ st->timestamp = hid_sensor_convert_timestamp(&st->common_attributes,
+ *(int64_t *)raw_data);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Parse report which is specific to an usage id */
+static int hinge_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned int usage_id, struct hinge_state *st)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; ++i) {
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ hinge_addresses[i],
+ &st->hinge[i]);
+ if (ret < 0)
+ return ret;
+
+ hinge_adjust_channel_realbits(channels, i, st->hinge[i].size);
+ }
+
+ st->scale_precision = hid_sensor_format_scale(HID_USAGE_SENSOR_HINGE,
+ &st->hinge[CHANNEL_SCAN_INDEX_HINGE_ANGLE],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1),
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_hinge_probe(struct platform_device *pdev)
+{
+ struct hinge_state *st;
+ struct iio_dev *indio_dev;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ int ret;
+ int i;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->common_attributes.hsdev = hsdev;
+ st->common_attributes.pdev = pdev;
+ st->indio_dev = indio_dev;
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; i++)
+ st->labels[i] = hinge_labels[i];
+
+ ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
+ &st->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ indio_dev->num_channels = ARRAY_SIZE(hinge_channels);
+ indio_dev->channels = devm_kmemdup(&indio_dev->dev, hinge_channels,
+ sizeof(hinge_channels), GFP_KERNEL);
+ if (!indio_dev->channels)
+ return -ENOMEM;
+
+ ret = hinge_parse_report(pdev, hsdev,
+ (struct iio_chan_spec *)indio_dev->channels,
+ hsdev->usage, st);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &hinge_info;
+ indio_dev->name = "hinge";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ atomic_set(&st->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, indio_dev->name,
+ &st->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ return ret;
+ }
+
+ st->callbacks.send_event = hinge_proc_event;
+ st->callbacks.capture_sample = hinge_capture_sample;
+ st->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, hsdev->usage, &st->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_remove_trigger;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_callback;
+ }
+
+ return ret;
+
+error_remove_callback:
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
+error_remove_trigger:
+ hid_sensor_remove_trigger(indio_dev, &st->common_attributes);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_hinge_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
+ hid_sensor_remove_trigger(indio_dev, &st->common_attributes);
+
+ return 0;
+}
+
+static const struct platform_device_id hid_hinge_ids[] = {
+ {
+ /* Format: HID-SENSOR-INT-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-INT-020b",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_hinge_ids);
+
+static struct platform_driver hid_hinge_platform_driver = {
+ .id_table = hid_hinge_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_hinge_probe,
+ .remove = hid_hinge_remove,
+};
+module_platform_driver(hid_hinge_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor INTEL Hinge");
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index 5b59a4137d32..81f683321b23 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -30,9 +30,25 @@
#include "../common/ms_sensors/ms_sensors_i2c.h"
+struct ms_tp_data {
+ const char *name;
+ const struct ms_tp_hw_data *hw;
+};
+
static const int ms5637_samp_freq[6] = { 960, 480, 240, 120, 60, 30 };
-/* String copy of the above const for readability purpose */
-static const char ms5637_show_samp_freq[] = "960 480 240 120 60 30";
+
+static ssize_t ms5637_show_samp_freq(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ms_tp_dev *dev_data = iio_priv(indio_dev);
+ int i, len = 0;
+
+ for (i = 0; i <= dev_data->hw->max_res_index; i++)
+ len += sysfs_emit_at(buf, len, "%u ", ms5637_samp_freq[i]);
+ sysfs_emit_at(buf, len - 1, "\n");
+
+ return len;
+}
static int ms5637_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
@@ -109,10 +125,10 @@ static const struct iio_chan_spec ms5637_channels[] = {
}
};
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(ms5637_show_samp_freq);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(ms5637_show_samp_freq);
static struct attribute *ms5637_attributes[] = {
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -129,6 +145,7 @@ static const struct iio_info ms5637_info = {
static int ms5637_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ms_tp_data *data;
struct ms_tp_dev *dev_data;
struct iio_dev *indio_dev;
int ret;
@@ -142,17 +159,25 @@ static int ms5637_probe(struct i2c_client *client,
return -EOPNOTSUPP;
}
+ if (id)
+ data = (const struct ms_tp_data *)id->driver_data;
+ else
+ data = device_get_match_data(&client->dev);
+ if (!data)
+ return -EINVAL;
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
if (!indio_dev)
return -ENOMEM;
dev_data = iio_priv(indio_dev);
dev_data->client = client;
- dev_data->res_index = 5;
+ dev_data->res_index = data->hw->max_res_index;
+ dev_data->hw = data->hw;
mutex_init(&dev_data->lock);
indio_dev->info = &ms5637_info;
- indio_dev->name = id->name;
+ indio_dev->name = data->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ms5637_channels;
indio_dev->num_channels = ARRAY_SIZE(ms5637_channels);
@@ -170,20 +195,44 @@ static int ms5637_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct ms_tp_hw_data ms5637_hw_data = {
+ .prom_len = 7,
+ .max_res_index = 5
+};
+
+static const struct ms_tp_hw_data ms5803_hw_data = {
+ .prom_len = 8,
+ .max_res_index = 4
+};
+
+static const struct ms_tp_data ms5637_data = { .name = "ms5637", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms5803_data = { .name = "ms5803", .hw = &ms5803_hw_data };
+
+static const struct ms_tp_data ms5805_data = { .name = "ms5805", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms5837_data = { .name = "ms5837", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms8607_data = {
+ .name = "ms8607-temppressure",
+ .hw = &ms5637_hw_data,
+};
+
static const struct i2c_device_id ms5637_id[] = {
- {"ms5637", 0},
- {"ms5805", 0},
- {"ms5837", 0},
- {"ms8607-temppressure", 0},
+ {"ms5637", (kernel_ulong_t)&ms5637_data },
+ {"ms5805", (kernel_ulong_t)&ms5805_data },
+ {"ms5837", (kernel_ulong_t)&ms5837_data },
+ {"ms8607-temppressure", (kernel_ulong_t)&ms8607_data },
{}
};
MODULE_DEVICE_TABLE(i2c, ms5637_id);
static const struct of_device_id ms5637_of_match[] = {
- { .compatible = "meas,ms5637", },
- { .compatible = "meas,ms5805", },
- { .compatible = "meas,ms5837", },
- { .compatible = "meas,ms8607-temppressure", },
+ { .compatible = "meas,ms5637", .data = &ms5637_data },
+ { .compatible = "meas,ms5803", .data = &ms5803_data },
+ { .compatible = "meas,ms5805", .data = &ms5805_data },
+ { .compatible = "meas,ms5837", .data = &ms5837_data },
+ { .compatible = "meas,ms8607-temppressure", .data = &ms8607_data },
{ },
};
MODULE_DEVICE_TABLE(of, ms5637_of_match);
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index a2f820997afc..37fd0b65a014 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
return ret;
regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+ if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
return -EINVAL;
*val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
if (ret)
break;
- pos = min(max(ilog2(pos), 3), 10) - 3;
+ /* Powers of 2, except for a gap between 16 and 64 */
+ pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
pos);
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 503fe54a0bb9..608ccb1d8bc8 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 9325e189a215..04a78d9f8fe3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -41,6 +41,7 @@ config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
depends on MMU
+ select DMA_SHARED_BUFFER
default y
config INFINIBAND_ON_DEMAND_PAGING
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index ccf2670ef45e..8ab4eea5a0a5 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -40,5 +40,5 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_srq.o \
uverbs_std_types_wq.o \
uverbs_std_types_qp.o
-ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o umem_dmabuf.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 7989b7e1d1c0..5c9fac7cf420 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -669,11 +669,10 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
* rdma_find_gid_by_port - Returns the GID entry attributes when it finds
* a valid GID entry for given search parameters. It searches for the specified
* GID value in the local software cache.
- * @device: The device to query.
+ * @ib_dev: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
- * @port_num: The port number of the device where the GID value should be
- * searched.
+ * @port: The port number of the device where the GID value should be searched.
* @ndev: In RoCE, the net device of the device. NULL means ignore.
*
* Returns sgid attributes if the GID is found with valid reference or
@@ -719,7 +718,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
/**
* rdma_find_gid_by_filter - Returns the GID table attribute where a
* specified GID value occurs
- * @device: The device to query.
+ * @ib_dev: The device to query.
* @gid: The GID value to search for.
* @port: The port number of the device where the GID value could be
* searched.
@@ -728,6 +727,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
* otherwise, we continue searching the GID table. It's guaranteed that
* while filter is executed, ndev field is valid and the structure won't
* change. filter is executed in an atomic context. filter must not be NULL.
+ * @context: Private data to pass into the call-back.
*
* rdma_find_gid_by_filter() searches for the specified GID value
* of which the filter function returns true in the port's GID table.
@@ -1253,7 +1253,6 @@ EXPORT_SYMBOL(rdma_get_gid_attr);
* @entries: Entries where GID entries are returned.
* @max_entries: Maximum number of entries that can be returned.
* Entries array must be allocated to hold max_entries number of entries.
- * @num_entries: Updated to the number of entries that were successfully read.
*
* Returns number of entries on success or appropriate error code.
*/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 98165589c8ab..be996dba040c 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -4333,7 +4333,7 @@ static int cm_add_one(struct ib_device *ib_device)
unsigned long flags;
int ret;
int count = 0;
- u8 i;
+ unsigned int i;
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
@@ -4345,7 +4345,7 @@ static int cm_add_one(struct ib_device *ib_device)
cm_dev->going_down = 0;
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
@@ -4431,7 +4431,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
- int i;
+ unsigned int i;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
@@ -4441,7 +4441,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
cm_dev->going_down = 1;
spin_unlock_irq(&cm.lock);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c51b84b2d2f3..94096511599f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -352,7 +352,13 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
struct cma_multicast {
struct rdma_id_private *id_priv;
- struct ib_sa_multicast *sa_mc;
+ union {
+ struct ib_sa_multicast *sa_mc;
+ struct {
+ struct work_struct work;
+ struct rdma_cm_event event;
+ } iboe_join;
+ };
struct list_head list;
void *context;
struct sockaddr_storage addr;
@@ -1823,6 +1829,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_igmp_send(ndev, &mgid, false);
dev_put(ndev);
}
+
+ cancel_work_sync(&mc->iboe_join.work);
}
kfree(mc);
}
@@ -2683,6 +2691,28 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv,
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
+static void cma_iboe_join_work_handler(struct work_struct *work)
+{
+ struct cma_multicast *mc =
+ container_of(work, struct cma_multicast, iboe_join.work);
+ struct rdma_cm_event *event = &mc->iboe_join.event;
+ struct rdma_id_private *id_priv = mc->id_priv;
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+
+ ret = cma_cm_event_handler(id_priv, event);
+ WARN_ON(ret);
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&event->param.ud.ah_attr);
+}
+
static void cma_work_handler(struct work_struct *_work)
{
struct cma_work *work = container_of(_work, struct cma_work, work);
@@ -4478,10 +4508,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
- if (ret) {
- destroy_id_handler_unlock(id_priv);
- return 0;
- }
+ WARN_ON(ret);
out:
mutex_unlock(&id_priv->handler_mutex);
@@ -4542,17 +4569,6 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = mc->join_state;
- if ((rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) &&
- (!ib_sa_sendonly_fullmem_support(&sa_client,
- id_priv->id.device,
- id_priv->id.port_num))) {
- dev_warn(
- &id_priv->id.device->dev,
- "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
- id_priv->id.port_num);
- return -EOPNOTSUPP;
- }
-
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
@@ -4604,7 +4620,6 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
- struct cma_work *work;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
@@ -4618,10 +4633,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (cma_zero_addr(addr))
return -EINVAL;
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (!work)
- return -ENOMEM;
-
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
@@ -4632,10 +4643,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (!ndev) {
- err = -ENODEV;
- goto err_free;
- }
+ if (!ndev)
+ return -ENODEV;
+
ib.rec.rate = iboe_get_rate(ndev);
ib.rec.hop_limit = 1;
ib.rec.mtu = iboe_get_mtu(ndev->mtu);
@@ -4653,24 +4663,15 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -ENOTSUPP;
}
dev_put(ndev);
- if (err || !ib.rec.mtu) {
- if (!err)
- err = -EINVAL;
- goto err_free;
- }
+ if (err || !ib.rec.mtu)
+ return err ?: -EINVAL;
+
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&ib.rec.port_gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
- /* Balances with cma_id_put() in cma_work_handler */
- cma_id_get(id_priv);
- queue_work(cma_wq, &work->work);
+ INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
+ cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
+ queue_work(cma_wq, &mc->iboe_join.work);
return 0;
-
-err_free:
- kfree(work);
- return err;
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 7f70e5a7de10..e0d5e3bae458 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
return ret;
gid_type = ib_cache_gid_parse_type_str(buf);
- if (gid_type < 0)
+ if (gid_type < 0) {
+ cma_configfs_params_put(cma_dev);
return -EINVAL;
+ }
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
@@ -202,7 +204,6 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
unsigned int i;
unsigned int ports_num;
struct cma_dev_port_group *ports;
- int err;
ibdev = cma_get_ib_dev(cma_dev);
@@ -213,10 +214,8 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports),
GFP_KERNEL);
- if (!ports) {
- err = -ENOMEM;
- goto free;
- }
+ if (!ports)
+ return -ENOMEM;
for (i = 0; i < ports_num; i++) {
char port_str[10];
@@ -232,12 +231,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
cma_dev_group->ports = ports;
-
return 0;
-free:
- kfree(ports);
- cma_dev_group->ports = NULL;
- return err;
}
static void release_cma_dev(struct config_item *item)
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 92745522250e..f3a7c1f404af 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -10,30 +10,35 @@
#define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID)
-static int __counter_set_mode(struct rdma_counter_mode *curr,
+static int __counter_set_mode(struct rdma_port_counter *port_counter,
enum rdma_nl_counter_mode new_mode,
enum rdma_nl_counter_mask new_mask)
{
- if ((new_mode == RDMA_COUNTER_MODE_AUTO) &&
- ((new_mask & (~ALL_AUTO_MODE_MASKS)) ||
- (curr->mode != RDMA_COUNTER_MODE_NONE)))
- return -EINVAL;
+ if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters)
+ if (new_mask & ~ALL_AUTO_MODE_MASKS ||
+ port_counter->mode.mode != RDMA_COUNTER_MODE_NONE)
+ return -EINVAL;
- curr->mode = new_mode;
- curr->mask = new_mask;
+ port_counter->mode.mode = new_mode;
+ port_counter->mode.mask = new_mask;
return 0;
}
-/**
+/*
* rdma_counter_set_auto_mode() - Turn on/off per-port auto mode
*
- * When @on is true, the @mask must be set; When @on is false, it goes
- * into manual mode if there's any counter, so that the user is able to
- * manually access them.
+ * @dev: Device to operate
+ * @port: Port to use
+ * @mask: Mask to configure
+ * @extack: Message to the user
+ *
+ * Return 0 on success.
*/
int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
- bool on, enum rdma_nl_counter_mask mask)
+ enum rdma_nl_counter_mask mask,
+ struct netlink_ext_ack *extack)
{
+ enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO;
struct rdma_port_counter *port_counter;
int ret;
@@ -42,23 +47,23 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
return -EOPNOTSUPP;
mutex_lock(&port_counter->lock);
- if (on) {
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_AUTO, mask);
- } else {
- if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
- ret = -EINVAL;
- goto out;
- }
+ if (mask) {
+ ret = __counter_set_mode(port_counter, mode, mask);
+ if (ret)
+ NL_SET_ERR_MSG(
+ extack,
+ "Turning on auto mode is not allowed when there is bound QP");
+ goto out;
+ }
- if (port_counter->num_counters)
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_MANUAL, 0);
- else
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_NONE, 0);
+ if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
+ ret = -EINVAL;
+ goto out;
}
+ mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
+ RDMA_COUNTER_MODE_NONE;
+ ret = __counter_set_mode(port_counter, mode, 0);
out:
mutex_unlock(&port_counter->lock);
return ret;
@@ -122,8 +127,8 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
mutex_lock(&port_counter->lock);
switch (mode) {
case RDMA_COUNTER_MODE_MANUAL:
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_MANUAL, 0);
+ ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL,
+ 0);
if (ret) {
mutex_unlock(&port_counter->lock);
goto err_mode;
@@ -170,8 +175,7 @@ static void rdma_counter_free(struct rdma_counter *counter)
port_counter->num_counters--;
if (!port_counter->num_counters &&
(port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL))
- __counter_set_mode(&port_counter->mode, RDMA_COUNTER_MODE_NONE,
- 0);
+ __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0);
mutex_unlock(&port_counter->lock);
@@ -227,7 +231,7 @@ static void counter_history_stat_update(struct rdma_counter *counter)
port_counter->hstats->value[i] += counter->stats->value[i];
}
-/**
+/*
* rdma_get_counter_auto_mode - Find the counter that @qp should be bound
* with in auto mode
*
@@ -274,7 +278,7 @@ static void counter_release(struct kref *kref)
rdma_counter_free(counter);
}
-/**
+/*
* rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
* the auto-mode rule
*/
@@ -311,7 +315,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
return 0;
}
-/**
+/*
* rdma_counter_unbind_qp - Unbind a qp from a counter
* @force:
* true - Decrease the counter ref-count anyway (e.g., qp destroy)
@@ -380,7 +384,7 @@ next:
return sum;
}
-/**
+/*
* rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
* specific port, including the running ones and history data
*/
@@ -436,7 +440,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
return counter;
}
-/**
+/*
* rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
*/
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
@@ -485,7 +489,7 @@ err:
return ret;
}
-/**
+/*
* rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
* The id of new counter is returned in @counter_id
*/
@@ -533,7 +537,7 @@ err:
return ret;
}
-/**
+/*
* rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
*/
int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e96f979e6d52..aac0fe14e1d9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -848,6 +848,20 @@ static int setup_port_data(struct ib_device *device)
return 0;
}
+/**
+ * ib_port_immutable_read() - Read rdma port's immutable data
+ * @dev: IB device
+ * @port: port number whose immutable data to read. It starts with index 1 and
+ * valid upto including rdma_end_port().
+ */
+const struct ib_port_immutable*
+ib_port_immutable_read(struct ib_device *dev, unsigned int port)
+{
+ WARN_ON(!rdma_is_port_valid(dev, port));
+ return &dev->port_data[port].immutable;
+}
+EXPORT_SYMBOL(ib_port_immutable_read);
+
void ib_get_device_fw_str(struct ib_device *dev, char *str)
{
if (dev->ops.get_dev_fw_str)
@@ -1887,9 +1901,9 @@ static int __ib_get_client_nl_info(struct ib_device *ibdev,
/**
* ib_get_client_nl_info - Fetch the nl_info from a client
- * @device - IB device
- * @client_name - Name of the client
- * @res - Result of the query
+ * @ibdev: IB device
+ * @client_name: Name of the client
+ * @res: Result of the query
*/
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
struct ib_client_nl_info *res)
@@ -2317,7 +2331,7 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
up_read(&devices_rwsem);
}
-/**
+/*
* ib_enum_all_devs - enumerate all ib_devices
* @cb: Callback to call for each found ib_device
*
@@ -2681,6 +2695,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, read_counters);
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
+ SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 46686990a827..30a0ff76b332 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -392,7 +392,7 @@ static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
/**
* iwpm_register_pid_cb - Process the port mapper response to
* iwpm_register_pid query
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* If successful, the function receives the userspace port mapper pid
@@ -468,7 +468,7 @@ static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
/**
* iwpm_add_mapping_cb - Process the port mapper response to
* iwpm_add_mapping request
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -545,7 +545,7 @@ static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] =
/**
* iwpm_add_and_query_mapping_cb - Process the port mapper response to
* iwpm_add_and_query_mapping request
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
@@ -627,7 +627,7 @@ query_mapping_response_exit:
/**
* iwpm_remote_info_cb - Process remote connecting peer address info, which
* the port mapper has received from the connecting peer
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Stores the IPv4/IPv6 address info in a hash table
@@ -706,7 +706,7 @@ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
/**
* iwpm_mapping_info_cb - Process a notification that the userspace
* port mapper daemon is started
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Using the received port mapper pid, send all the local mapping
@@ -766,7 +766,7 @@ static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
/**
* iwpm_ack_mapping_info_cb - Process the port mapper ack for
* the provided local mapping info records
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -796,7 +796,7 @@ static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -841,7 +841,7 @@ static const struct nla_policy hello_policy[IWPM_NLA_HELLO_MAX] = {
/**
* iwpm_hello_cb - Process a hello message from iwpmd
*
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Using the received port mapper pid, send the kernel's abi_version
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 13495b43dbc1..f80e5550b51f 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -127,8 +127,8 @@ static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
- * @local_addr: Local ip/tcp address
- * @mapped_addr: Mapped local ip/tcp address
+ * @local_sockaddr: Local ip/tcp address
+ * @mapped_sockaddr: Mapped local ip/tcp address
* @nl_client: The index of the netlink client
* @map_flags: IWPM mapping flags
*/
@@ -174,7 +174,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
/**
* iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address
* info from the hash table
- * @local_addr: Local ip/tcp address
+ * @local_sockaddr: Local ip/tcp address
* @mapped_local_addr: Mapped local ip/tcp address
*
* Returns err code if mapping info is not found in the hash table,
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 740f03ecc05d..57519ca6cd2c 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -721,6 +721,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
* member record and gid of the device.
* @device: RDMA device
* @port_num: Port of the rdma device to consider
+ * @rec: Multicast member record to use
* @ndev: Optional netdevice, applicable only for RoCE
* @gid_type: GID type to consider
* @ah_attr: AH attribute to fillup on successful completion
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 08366e254b1d..d306049c22a2 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1768,9 +1768,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
mask = nla_get_u32(
tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
-
- ret = rdma_counter_set_auto_mode(device, port,
- mask ? true : false, mask);
+ ret = rdma_counter_set_auto_mode(device, port, mask, extack);
if (ret)
goto err_msg;
} else {
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index e0a41c867002..ffabaf327242 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -201,8 +201,8 @@ EXPORT_SYMBOL(rdma_restrack_parent_name);
/**
* rdma_restrack_new() - Initializes new restrack entry to allow _put() interface
* to release memory in fully automatic way.
- * @res - Entry to initialize
- * @type - REstrack type
+ * @res: Entry to initialize
+ * @type: REstrack type
*/
void rdma_restrack_new(struct rdma_restrack_entry *res,
enum rdma_restrack_type type)
@@ -254,6 +254,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
} else {
ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
&rt->next_id, GFP_KERNEL);
+ ret = (ret < 0) ? ret : 0;
}
out:
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 6b8364bb032d..34fff94eaa38 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -505,7 +505,7 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
*
- * @device: the rdma device
+ * @ib_dev: the rdma device
*/
void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index a96030b784eb..31156e22d3e7 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -410,7 +410,7 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ctx->type = RDMA_RW_SIG_MR;
ctx->nr_ops = 1;
- ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL);
+ ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
if (!ctx->reg) {
ret = -ENOMEM;
goto out_unmap_prot_sg;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 89a831fa1885..9ef1a355131b 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1434,7 +1434,7 @@ enum opa_pr_supported {
PR_IB_SUPPORTED
};
-/**
+/*
* opa_pr_query_possible - Check if current PR query can be an OPA query.
*
* Retuns PR_NOT_SUPPORTED if a path record query is not
@@ -1951,30 +1951,6 @@ err1:
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
-bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num)
-{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- bool ret = false;
- unsigned long flags;
-
- if (!sa_dev)
- return ret;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
-
- spin_lock_irqsave(&port->classport_lock, flags);
- if ((port->classport_info.valid) &&
- (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
- ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
- & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
- spin_unlock_irqrestore(&port->classport_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
-
struct ib_classport_info_context {
struct completion done;
struct ib_sa_query *sa_query;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 7dab9a27a145..da2512c30ffd 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,8 +95,6 @@ struct ucma_context {
u64 uid;
struct list_head list;
- /* sync between removal event and id destroy, protected by file mut */
- int destroying;
struct work_struct close_work;
};
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
static DEFINE_XARRAY_ALLOC(multicast_table);
static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
- * by its creator.
+ * by its creator. This puts back the xarray's reference.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
- /*
- * At this point ctx->ref is zero so the only place the ctx can be is in
- * a uevent or in __destroy_id(). Since the former doesn't touch
- * ctx->cm_id and the latter sync cancels this, there is no races with
- * this store.
- */
+ /* Reading the cm_id without holding a positive ref is not allowed */
ctx->cm_id = NULL;
}
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return ctx;
}
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+ struct rdma_cm_id *cm_id)
+{
+ refcount_set(&ctx->ref, 1);
+ ctx->cm_id = cm_id;
+}
+
static void ucma_finish_ctx(struct ucma_context *ctx)
{
lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
ctx = ucma_alloc_ctx(listen_ctx->file);
if (!ctx)
goto err_backlog;
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
uevent = ucma_create_uevent(listen_ctx, event);
if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
return 0;
err_alloc:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
err_backlog:
atomic_inc(&listen_ctx->backlog);
/* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
wake_up_interruptible(&ctx->file->poll_wait);
}
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
- queue_work(system_unbound_wq, &ctx->close_work);
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ xa_lock(&ctx_table);
+ if (xa_load(&ctx_table, ctx->id) == ctx)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ xa_unlock(&ctx_table);
+ }
return 0;
}
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = PTR_ERR(cm_id);
goto err1;
}
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ ucma_destroy_private_ctx(ctx);
return -EFAULT;
}
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return 0;
err1:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
return ret;
}
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
rdma_unlock_handler(mc->ctx->cm_id);
}
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
- ucma_cleanup_multicast(ctx);
-
- /* Cleanup events not yet reported to the user. */
+ /* Cleanup events not yet reported to the user.*/
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+ if (uevent->ctx != ctx)
+ continue;
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+ uevent->conn_req_ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) == uevent->conn_req_ctx) {
list_move_tail(&uevent->list, &list);
+ continue;
+ }
+ list_del(&uevent->list);
+ kfree(uevent);
}
list_del(&ctx->list);
events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
/*
- * If this was a listening ID then any connections spawned from it
- * that have not been delivered to userspace are cleaned up too.
- * Must be done outside any locks.
+ * If this was a listening ID then any connections spawned from it that
+ * have not been delivered to userspace are cleaned up too. Must be done
+ * outside any locks.
*/
list_for_each_entry_safe(uevent, tmp, &list, list) {
- list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
- uevent->conn_req_ctx != ctx)
- __destroy_id(uevent->conn_req_ctx);
+ ucma_destroy_private_ctx(uevent->conn_req_ctx);
kfree(uevent);
}
-
- mutex_destroy(&ctx->mutex);
- kfree(ctx);
return events_reported;
}
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ * - ucma_finish_ctx() hasn't been called
+ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
{
+ int events_reported;
+
/*
- * If the refcount is already 0 then ucma_close_id() has already
- * destroyed the cm_id, otherwise holding the refcount keeps cm_id
- * valid. Prevent queue_work() from being called.
+ * Destroy the underlying cm_id. New work queuing is prevented now by
+ * the removal from the xarray. Once the work is cancled ref will either
+ * be 0 because the work ran to completion and consumed the ref from the
+ * xarray, or it will be positive because we still have the ref from the
+ * xarray. This can also be 0 in cases where cm_id was never set
*/
- if (refcount_inc_not_zero(&ctx->ref)) {
- rdma_lock_handler(ctx->cm_id);
- ctx->destroying = 1;
- rdma_unlock_handler(ctx->cm_id);
- ucma_put_ctx(ctx);
- }
-
cancel_work_sync(&ctx->close_work);
- /* At this point it's guaranteed that there is no inflight closing task */
- if (ctx->cm_id)
+ if (refcount_read(&ctx->ref))
ucma_close_id(&ctx->close_work);
- return ucma_free_ctx(ctx);
+
+ events_reported = ucma_cleanup_ctx_events(ctx);
+ ucma_cleanup_multicast(ctx);
+
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+ GFP_KERNEL) != NULL);
+ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
}
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
xa_lock(&ctx_table);
ctx = _ucma_find_context(cmd.id, file);
- if (!IS_ERR(ctx))
- __xa_erase(&ctx_table, ctx->id);
+ if (!IS_ERR(ctx)) {
+ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx)
+ ctx = ERR_PTR(-ENOENT);
+ }
xa_unlock(&ctx_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- resp.events_reported = __destroy_id(ctx);
+ resp.events_reported = ucma_destroy_private_ctx(ctx);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
* prevented by this being a FD release function. The list_add_tail() in
* ucma_connect_event_handler() can run concurrently, however it only
* adds to the list *after* a listening ID. By only reading the first of
- * the list, and relying on __destroy_id() to block
+ * the list, and relying on ucma_destroy_private_ctx() to block
* ucma_connect_event_handler(), no additional locking is needed.
*/
while (!list_empty(&file->ctx_list)) {
struct ucma_context *ctx = list_first_entry(
&file->ctx_list, struct ucma_context, list);
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx);
+ ucma_destroy_private_ctx(ctx);
}
kfree(file);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7ca4112e3e8f..2dde99a9ba07 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -2,6 +2,7 @@
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -135,7 +136,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
if (mask)
pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
- return rounddown_pow_of_two(pgsz_bitmap);
+ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
@@ -278,6 +279,8 @@ void ib_umem_release(struct ib_umem *umem)
{
if (!umem)
return;
+ if (umem->is_dmabuf)
+ return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
if (umem->is_odp)
return ib_umem_odp_release(to_ib_umem_odp(umem));
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
new file mode 100644
index 000000000000..f9b5162d9260
--- /dev/null
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/dma-mapping.h>
+
+#include "uverbs.h"
+
+int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct dma_fence *fence;
+ unsigned long start, end, cur = 0;
+ unsigned int nmap = 0;
+ int i;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (umem_dmabuf->sgt)
+ goto wait_fence;
+
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ /* modify the sg list in-place to match umem address and length */
+
+ start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
+ end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
+ PAGE_SIZE);
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (start < cur + sg_dma_len(sg) && cur < end)
+ nmap++;
+ if (cur <= start && start < cur + sg_dma_len(sg)) {
+ unsigned long offset = start - cur;
+
+ umem_dmabuf->first_sg = sg;
+ umem_dmabuf->first_sg_offset = offset;
+ sg_dma_address(sg) += offset;
+ sg_dma_len(sg) -= offset;
+ cur += offset;
+ }
+ if (cur < end && end <= cur + sg_dma_len(sg)) {
+ unsigned long trim = cur + sg_dma_len(sg) - end;
+
+ umem_dmabuf->last_sg = sg;
+ umem_dmabuf->last_sg_trim = trim;
+ sg_dma_len(sg) -= trim;
+ break;
+ }
+ cur += sg_dma_len(sg);
+ }
+
+ umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
+ umem_dmabuf->umem.sg_head.nents = nmap;
+ umem_dmabuf->umem.nmap = nmap;
+ umem_dmabuf->sgt = sgt;
+
+wait_fence:
+ /*
+ * Although the sg list is valid now, the content of the pages
+ * may be not up-to-date. Wait for the exporter to finish
+ * the migration.
+ */
+ fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
+ if (fence)
+ return dma_fence_wait(fence, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
+
+void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ /* retore the original sg list */
+ if (umem_dmabuf->first_sg) {
+ sg_dma_address(umem_dmabuf->first_sg) -=
+ umem_dmabuf->first_sg_offset;
+ sg_dma_len(umem_dmabuf->first_sg) +=
+ umem_dmabuf->first_sg_offset;
+ umem_dmabuf->first_sg = NULL;
+ umem_dmabuf->first_sg_offset = 0;
+ }
+ if (umem_dmabuf->last_sg) {
+ sg_dma_len(umem_dmabuf->last_sg) +=
+ umem_dmabuf->last_sg_trim;
+ umem_dmabuf->last_sg = NULL;
+ umem_dmabuf->last_sg_trim = 0;
+ }
+
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
+
+ umem_dmabuf->sgt = NULL;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ struct dma_buf *dmabuf;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ unsigned long end;
+ struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
+
+ if (check_add_overflow(offset, (unsigned long)size, &end))
+ return ret;
+
+ if (unlikely(!ops || !ops->move_notify))
+ return ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ if (dmabuf->size < end)
+ goto out_release_dmabuf;
+
+ umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
+ if (!umem_dmabuf) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_release_dmabuf;
+ }
+
+ umem = &umem_dmabuf->umem;
+ umem->ibdev = device;
+ umem->length = size;
+ umem->address = offset;
+ umem->writable = ib_access_writable(access);
+ umem->is_dmabuf = 1;
+
+ if (!ib_umem_num_pages(umem))
+ goto out_free_umem;
+
+ umem_dmabuf->attach = dma_buf_dynamic_attach(
+ dmabuf,
+ device->dma_device,
+ ops,
+ umem_dmabuf);
+ if (IS_ERR(umem_dmabuf->attach)) {
+ ret = ERR_CAST(umem_dmabuf->attach);
+ goto out_free_umem;
+ }
+ return umem_dmabuf;
+
+out_free_umem:
+ kfree(umem_dmabuf);
+
+out_release_dmabuf:
+ dma_buf_put(dmabuf);
+ return ret;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get);
+
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ dma_buf_detach(dmabuf, umem_dmabuf->attach);
+ dma_buf_put(dmabuf);
+ kfree(umem_dmabuf);
+}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 19104a675691..dd7f3b437c6b 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
while (list_empty(&file->recv_list)) {
mutex_unlock(&file->mutex);
@@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
}
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list);
@@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
- ret = -EINVAL;
+ ret = -EIO;
goto err_up;
}
@@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
/* we will always be able to post a MAD send */
__poll_t mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_lock(&file->mutex);
poll_wait(filp, &file->recv_wait, wait);
if (!list_empty(&file->recv_list))
mask |= EPOLLIN | EPOLLRDNORM;
+ if (file->agents_dead)
+ mask = EPOLLERR;
+ mutex_unlock(&file->mutex);
return mask;
}
@@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
file->agents_dead = 1;
+ wake_up_interruptible(&file->recv_wait);
mutex_unlock(&file->mutex);
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 98a5d36813ff..f5b8be3bedde 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1382,7 +1382,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (has_sq)
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
- if (!ind_tbl)
+ if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index dd4e76b26c74..f782d5e1aa25 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -182,6 +183,86 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)(
return IS_UVERBS_COPY_ERR(ret) ? ret : 0;
}
+static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
+ u64 offset, length, iova;
+ u32 fd, access_flags;
+ struct ib_mr *mr;
+ int ret;
+
+ if (!ib_dev->ops.reg_user_mr_dmabuf)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&offset, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_OFFSET);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&length, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&iova, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_IOVA);
+ if (ret)
+ return ret;
+
+ if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+
+ ret = uverbs_copy_from(&fd, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_FD);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&access_flags, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (ret)
+ return ret;
+
+ ret = ib_check_mr_access(ib_dev, access_flags);
+ if (ret)
+ return ret;
+
+ mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
+ access_flags,
+ &attrs->driver_udata);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ &mr->lkey, sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_ADVISE_MR,
UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
@@ -247,6 +328,37 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_FD,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MR_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE,
@@ -257,10 +369,11 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_MR,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
+ &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
&UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG),
&UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
- &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR));
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR));
const struct uapi_definition uverbs_def_obj_mr[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 9137a25bb521..28464c58738c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2248,7 +2248,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
struct ib_qp_init_attr init_attr = {};
struct ib_qp_attr attr = {};
int num_eth_ports = 0;
- int port;
+ unsigned int port;
/* If QP state >= init, it is assigned to a port and we can check this
* port only.
@@ -2263,7 +2263,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
}
/* Can't get a quick answer, iterate over all ports */
- for (port = 0; port < qp->device->phys_port_cnt; port++)
+ rdma_for_each_port(qp->device, port)
if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 401bdc9e931e..ba515efd4fdc 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -469,7 +469,6 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct bnxt_re_mr *mr = NULL;
dma_addr_t dma_addr = 0;
struct ib_mw *mw;
- u64 pbl_tbl;
int rc;
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
@@ -504,9 +503,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
- pbl_tbl = dma_addr;
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
+ BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
goto fail;
@@ -3589,7 +3587,6 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
- u64 pbl = 0;
int rc;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
@@ -3608,7 +3605,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3779,19 +3776,6 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
-static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
- int page_shift)
-{
- u64 *pbl_tbl = pbl_tbl_orig;
- u64 page_size = BIT_ULL(page_shift);
- struct ib_block_iter biter;
-
- rdma_umem_for_each_dma_block(umem, &biter, page_size)
- *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
-
- return pbl_tbl - pbl_tbl_orig;
-}
-
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3801,7 +3785,6 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl = NULL;
unsigned long page_size;
int umem_pgs, rc;
@@ -3846,39 +3829,19 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
}
mr->qplib_mr.total_size = length;
- if (page_size == BNXT_RE_PAGE_SIZE_4K &&
- length > BNXT_RE_MAX_MR_SIZE_LOW) {
- ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
- length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
- rc = -EINVAL;
- goto free_umem;
- }
-
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
- pbl_tbl = kcalloc(umem_pgs, sizeof(*pbl_tbl), GFP_KERNEL);
- if (!pbl_tbl) {
- rc = -ENOMEM;
- goto free_umem;
- }
-
- /* Map umem buf ptrs to the PBL */
- umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, order_base_2(page_size));
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
- umem_pgs, false, page_size);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
+ umem_pgs, page_size);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register user MR");
- goto fail;
+ goto free_umem;
}
- kfree(pbl_tbl);
-
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
atomic_inc(&rdev->mr_count);
return &mr->ib_mr;
-fail:
- kfree(pbl_tbl);
free_umem:
ib_umem_release(umem);
free_mrw:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 6316179583a6..049b3576302b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -650,42 +650,32 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
struct creq_register_mr_resp resp;
struct cmdq_register_mr req;
- int pg_ptrs, pages, i, rc;
u16 cmd_flags = 0, level;
- dma_addr_t **pbl_ptr;
+ int pages, rc;
u32 pg_size;
if (num_pbls) {
+ pages = roundup_pow_of_two(num_pbls);
/* Allocate memory for the non-leaf pages to store buf ptrs.
* Non-leaf pages always uses system PAGE_SIZE
*/
- pg_ptrs = roundup_pow_of_two(num_pbls);
- pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
- if (!pages)
- pages++;
-
- if (pages > MAX_PBL_LVL_1_PGS) {
- dev_err(&res->pdev->dev,
- "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n",
- pages, MAX_PBL_LVL_1_PGS);
- return -ENOMEM;
- }
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res, &mr->hwq);
/* Use system PAGE_SIZE */
hwq_attr.res = res;
hwq_attr.depth = pages;
- hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.stride = buf_pg_size;
hwq_attr.type = HWQ_TYPE_MR;
hwq_attr.sginfo = &sginfo;
+ hwq_attr.sginfo->umem = umem;
hwq_attr.sginfo->npages = pages;
hwq_attr.sginfo->pgsize = PAGE_SIZE;
hwq_attr.sginfo->pgshft = PAGE_SHIFT;
@@ -695,11 +685,6 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
"SP: Reg MR memory allocation failed\n");
return -ENOMEM;
}
- /* Write to the hwq */
- pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr;
- for (i = 0; i < num_pbls; i++)
- pbl_ptr[PTR_PG(i)][PTR_IDX(i)] =
- (pbl_tbl[i] & PAGE_MASK) | PTU_PTE_VALID;
}
RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
@@ -711,7 +696,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
- level = mr->hwq.level + 1;
+ level = mr->hwq.level;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
}
pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
@@ -728,7 +713,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.mr_size = cpu_to_le64(mr->total_size);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, block);
+ (void *)&resp, NULL, false);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 967890cd81f2..bc228340684f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -254,7 +254,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a7401398cb34..d109bb3822a5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index b32e6516d65f..ff645b955a08 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -209,7 +209,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
if (!epcp)
return 0;
- uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
+ uep = kzalloc(sizeof(*uep), GFP_KERNEL);
if (!uep)
return 0;
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index b199e4ac6cf9..fa38b34eddb8 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -161,8 +161,8 @@ struct efa_admin_create_qp_resp {
u32 qp_handle;
/*
- * QP number in the given EFA virtual device. Least-significant bits
- * (as needed according to max_qp) carry unique QP ID
+ * QP number in the given EFA virtual device. Least-significant bits (as
+ * needed according to max_qp) carry unique QP ID
*/
u16 qp_num;
@@ -465,7 +465,7 @@ struct efa_admin_create_cq_cmd {
/*
* number of sub cqs - must be equal to sub_cqs_per_cq of queue
- * attributes.
+ * attributes.
*/
u16 num_sub_cqs;
@@ -563,12 +563,8 @@ struct efa_admin_acq_get_stats_resp {
};
struct efa_admin_get_set_feature_common_desc {
- /*
- * 1:0 : select - 0x1 - current value; 0x3 - default
- * value
- * 7:3 : reserved3 - MBZ
- */
- u8 flags;
+ /* MBZ */
+ u8 reserved0;
/* as appears in efa_admin_aq_feature_id */
u8 feature_id;
@@ -823,12 +819,6 @@ enum efa_admin_aenq_group {
EFA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum efa_admin_aenq_notification_syndrom {
- EFA_ADMIN_SUSPEND = 0,
- EFA_ADMIN_RESUME = 1,
- EFA_ADMIN_UPDATE_HINTS = 2,
-};
-
struct efa_admin_mmio_req_read_less_resp {
u16 req_id;
@@ -909,9 +899,6 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
-/* get_set_feature_common_desc */
-#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
-
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index 29d53ed63b3e..78ff9389ae25 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_H_
@@ -82,7 +82,7 @@ struct efa_admin_acq_common_desc {
/*
* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
u16 sq_head_indx;
};
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 336bc2c57bb1..0d523ad736c7 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -20,9 +20,6 @@
#define EFA_CTRL_MINOR 0
#define EFA_CTRL_SUB_MINOR 1
-#define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
-#define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
-
enum efa_cmd_status {
EFA_CMD_SUBMITTED,
EFA_CMD_COMPLETED,
@@ -33,8 +30,6 @@ struct efa_comp_ctx {
struct efa_admin_acq_entry *user_cqe;
u32 comp_size;
enum efa_cmd_status status;
- /* status from the device */
- u8 comp_status;
u8 cmd_opcode;
u8 occupied;
};
@@ -140,8 +135,8 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr);
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr);
+ addr_high = upper_32_bits(sq->dma_addr);
+ addr_low = lower_32_bits(sq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
@@ -174,8 +169,8 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
cq->cc = 0;
cq->phase = 1;
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr);
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr);
+ addr_high = upper_32_bits(cq->dma_addr);
+ addr_low = lower_32_bits(cq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
@@ -215,8 +210,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
aenq->cc = 0;
aenq->phase = 1;
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+ addr_low = lower_32_bits(aenq->dma_addr);
+ addr_high = upper_32_bits(aenq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
@@ -421,9 +416,7 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
}
comp_ctx->status = EFA_CMD_COMPLETED;
- comp_ctx->comp_status = cqe->acq_common_descriptor.status;
- if (comp_ctx->user_cqe)
- memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
+ memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
complete(&comp_ctx->wait_event);
@@ -521,7 +514,7 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c
msleep(aq->poll_interval);
}
- err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
out:
efa_com_put_comp_ctx(aq, comp_ctx);
return err;
@@ -569,7 +562,7 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
goto out;
}
- err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
out:
efa_com_put_comp_ctx(aq, comp_ctx);
return err;
@@ -641,8 +634,8 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
aq->efa_dev,
"Failed to process command %s (opcode %u) comp_status %d err %d\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
- err);
+ cmd->aq_common_descriptor.opcode,
+ comp_ctx->user_cqe->acq_common_descriptor.status, err);
atomic64_inc(&aq->stats.cmd_err);
}
@@ -795,7 +788,7 @@ err_destroy_comp_ctxt:
* This method goes over the admin completion queue and wakes up
* all the pending threads that wait on the commands wait event.
*
- * @note: Should be called after MSI-X interrupt.
+ * Note: Should be called after MSI-X interrupt.
*/
void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
{
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index c87b94ea2939..993cbf37e0b9 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1323,8 +1323,8 @@ CNTR_ELEM(#name, \
/**
* hfi_addr_from_offset - return addr for readq/writeq
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* This routine selects the appropriate base address
* based on the indicated offset.
@@ -1340,8 +1340,8 @@ static inline void __iomem *hfi1_addr_from_offset(
/**
* read_csr - read CSR at the indicated offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* Return: the value read or all FF's if there
* is no mapping
@@ -1355,9 +1355,9 @@ u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
/**
* write_csr - write CSR at the indicated offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
- * @value - value to write
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ * @value: value to write
*/
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
{
@@ -1373,8 +1373,8 @@ void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
/**
* get_csr_addr - return te iomem address for offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* Return: The iomem address to use in subsequent
* writeq/readq operations.
@@ -8433,7 +8433,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
return hfi1_rcd_head(rcd) != tail;
}
-/**
+/*
* Common code for receive contexts interrupt handlers.
* Update traces, increment kernel IRQ counter and
* setup ASPM when needed.
@@ -8447,7 +8447,7 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
aspm_ctx_disable(rcd);
}
-/**
+/*
* __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
* when there are packets present in the queue. When calling
* with interrupts enabled please use hfi1_rcd_eoi_intr.
@@ -8484,8 +8484,8 @@ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
/**
* hfi1_netdev_rx_napi - napi poll function to move eoi inline
- * @napi - pointer to napi object
- * @budget - netdev budget
+ * @napi: pointer to napi object
+ * @budget: netdev budget
*/
int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
{
@@ -10142,7 +10142,7 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
/*
* Set Send Length
- * @ppd - per port data
+ * @ppd: per port data
*
* Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
* registers compare against LRH.PktLen, so use the max bytes included
@@ -14200,9 +14200,9 @@ u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
/**
* init_qpmap_table
- * @dd - device data
- * @first_ctxt - first context
- * @last_ctxt - first context
+ * @dd: device data
+ * @first_ctxt: first context
+ * @last_ctxt: first context
*
* This return sets the qpn mapping table that
* is indexed by qpn[8:1].
@@ -14383,8 +14383,8 @@ no_qos:
/**
* init_qos - init RX qos
- * @dd - device data
- * @rmt - RSM map table
+ * @dd: device data
+ * @rmt: RSM map table
*
* This routine initializes Rule 0 and the RSM map table to implement
* quality of service (qos).
@@ -15022,8 +15022,7 @@ err_exit:
/**
* hfi1_init_dd() - Initialize most of the dd structure.
- * @dev: the pci_dev for hfi1_ib device
- * @ent: pci_device_id struct for this dev
+ * @dd: the dd device
*
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
@@ -15378,10 +15377,11 @@ static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
/**
* create_pbc - build a pbc for transmission
+ * @ppd: info of physical Hfi port
* @flags: special case flags or-ed in built pbc
- * @srate: static rate
+ * @srate_mbs: static rate
* @vl: vl
- * @dwlen: dword length (header words + data words + pbc words)
+ * @dw_len: dword length (header words + data words + pbc words)
*
* Create a PBC with the given flags, rate, VL, and length.
*
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index e9d5cc8b771a..91f13140ddf2 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -50,7 +50,7 @@
/**
* exp_tid_group_init - initialize exp_tid_set
- * @set - the set
+ * @set: the set
*/
static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
{
@@ -60,7 +60,7 @@ static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
/**
* hfi1_exp_tid_group_init - initialize rcd expected receive
- * @rcd - the rcd
+ * @rcd: the rcd
*/
void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
{
@@ -71,7 +71,7 @@ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
/**
* alloc_ctxt_rcv_groups - initialize expected receive groups
- * @rcd - the context to add the groupings to
+ * @rcd: the context to add the groupings to
*/
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
@@ -101,7 +101,7 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
/**
* free_ctxt_rcv_groups - free expected receive groups
- * @rcd - the context to free
+ * @rcd: the context to free
*
* The routine dismantles the expect receive linked
* list and clears any tids associated with the receive
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 329ee4f48d95..3b7bbc7b9d10 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1522,7 +1522,7 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
* manage_rcvq - manage a context's receive queue
* @uctxt: the context
* @subctxt: the sub-context
- * @start_stop: action to carry out
+ * @arg: start/stop action to carry out
*
* start_stop == 0 disables receive on the context, for use in queue
* overflow conditions. start_stop==1 re-enables, to be used to
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 387305b768e9..5ba5c11459e7 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -91,9 +91,9 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
/**
* format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
+ * @msg: message buffer
+ * @msgl: length of message buffer
+ * @hwmsg: message to add to message buffer
*/
static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
@@ -104,11 +104,11 @@ static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
/**
* hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
+ * @hwerrs: hardware errors bit vector
+ * @hwerrmsgs: hardware error descriptions
+ * @nhwerrmsgs: number of hwerrmsgs
+ * @msg: message buffer
+ * @msgl: message buffer length
*/
void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
index 5836fe7b2817..111489802614 100644
--- a/drivers/infiniband/hw/hfi1/iowait.c
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -26,7 +26,7 @@ inline void iowait_clear_flag(struct iowait *wait, u32 flag)
clear_bit(flag, &wait->flags);
}
-/**
+/*
* iowait_init() - initialize wait structure
* @wait: wait struct to initialize
* @tx_limit: limit for overflow queuing
@@ -88,7 +88,7 @@ void iowait_cancel_work(struct iowait *w)
/**
* iowait_set_work_flag - set work flag based on leg
- * @w - the iowait work struct
+ * @w: the iowait work struct
*/
int iowait_set_work_flag(struct iowait_work *w)
{
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 3222e3acb79c..e2f2f7847aed 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1341,7 +1341,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
return 0;
}
-/**
+/*
* subn_set_opa_portinfo - set port information
* @smp: the incoming SM packet
* @ibdev: the infiniband device
@@ -4902,6 +4902,8 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
* @in_grh: the global route header for this packet
* @in_mad: the incoming MAD
* @out_mad: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: used to apss back the packet key index
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index d61ee853d215..cf3040bb177f 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -103,8 +103,8 @@ int msix_initialize(struct hfi1_devdata *dd)
* @arg: context information for the IRQ
* @handler: IRQ handler
* @thread: IRQ thread handler (could be NULL)
- * @idx: zero base idx if multiple devices are needed
* @type: affinty IRQ type
+ * @name: IRQ name
*
* Allocated an MSIx vector if available, and then create the appropriate
* meta data needed to keep track of the pci IRQ request.
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 6d263c9749b3..1fb6e1a0e4e1 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -467,7 +467,7 @@ void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
* hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
*
* @dd: hfi1 dev data
- * @id: requested integer id up to INT_MAX
+ * @start_id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 18d32f053d26..6f06e9920503 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -334,7 +334,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
return 0;
}
-/**
+/*
* Restore command and BARs after a reset has wiped them out
*
* Returns 0 on success, otherwise a negative error value
@@ -393,7 +393,7 @@ error:
return pcibios_err_to_errno(ret);
}
-/**
+/*
* Save BARs and command to rewrite after device reset
*
* Returns 0 on success, otherwise a negative error value
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 4a4ec2397857..14bfd8287f4a 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -55,6 +55,7 @@
/**
* pio_copy - copy data block to MMIO space
+ * @dd: hfi1 dev data
* @pbuf: a number of blocks allocated within a PIO send context
* @pbc: PBC to send
* @from: source, must be 8 byte aligned
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 681bb4e918c9..e037df911512 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -186,7 +186,7 @@ static void flush_iowait(struct rvt_qp *qp)
write_sequnlock_irqrestore(lock, flags);
}
-/**
+/*
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
* to blindly pass the MTU enum value from the PathRecord to us.
@@ -289,9 +289,9 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
/**
* hfi1_setup_wqe - set up the wqe
- * @qp - The qp
- * @wqe - The built wqe
- * @call_send - Determine if the send should be posted or scheduled.
+ * @qp: The qp
+ * @wqe: The built wqe
+ * @call_send: Determine if the send should be posted or scheduled.
*
* Perform setup of the wqe. This is called
* prior to inserting the wqe into the ring but after
@@ -595,7 +595,7 @@ struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
return sde;
}
-/*
+/**
* qp_to_send_context - map a qp to a send context
* @qp: the QP
* @sc5: the 5 bit sc
@@ -912,8 +912,8 @@ void notify_error_qp(struct rvt_qp *qp)
/**
* hfi1_qp_iter_cb - callback for iterator
- * @qp - the qp
- * @v - the sl in low bits of v
+ * @qp: the qp
+ * @v: the sl in low bits of v
*
* This is called from the iterator callback to work
* on an individual qp.
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 8386c84c2d92..38f311f855b5 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -242,7 +242,7 @@ static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
msgs[0].buf = offset_bytes;
msgs[1].addr = slave_addr;
- msgs[1].flags = I2C_M_NOSTART,
+ msgs[1].flags = I2C_M_NOSTART;
msgs[1].len = len;
msgs[1].buf = data;
break;
@@ -290,7 +290,7 @@ static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
msgs[0].buf = offset_bytes;
msgs[1].addr = slave_addr;
- msgs[1].flags = I2C_M_RD,
+ msgs[1].flags = I2C_M_RD;
msgs[1].len = len;
msgs[1].buf = data;
break;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1bb5f57152d3..0174b8ee9f00 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -421,6 +421,7 @@ bail:
/**
* hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
+ * @ps: the current packet state
*
* Assumes s_lock is held.
*
@@ -1375,9 +1376,8 @@ static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
[HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
};
-/**
+/*
* hfi1_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
*
* This is called from hfi1_rc_rcv() and handle_receive_interrupt().
* Note that RDMA reads and atomics are handled in the
@@ -1992,7 +1992,7 @@ static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
}
}
-/**
+/*
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK
@@ -2541,6 +2541,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
* @opcode: the opcode for this packet
* @psn: the packet sequence number for this packet
* @diff: the difference between the PSN and the expected PSN
+ * @rcd: the receive context
*
* This is called from hfi1_rc_rcv() to process an unexpected
* incoming RC packet for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 23ac6057b211..c3fa1814c6a8 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -260,6 +260,7 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
@@ -348,6 +349,7 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
@@ -455,11 +457,10 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
/**
* hfi1_schedule_send_yield - test for a yield required for QP
* send engine
- * @timeout: Final time for timeout slice for jiffies
* @qp: a pointer to QP
* @ps: a pointer to a structure with commonly lookup values for
* the the send engine progress
- * @tid - true if it is the tid leg
+ * @tid: true if it is the tid leg
*
* This routine checks if the time slice for the QP has expired
* for RC QPs, if so an additional work entry is queued. At this
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index a307d4c8b15a..46b5290b2839 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1740,7 +1740,7 @@ retry:
sane = (hwhead == swhead);
if (unlikely(!sane)) {
- dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
+ dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%u swhd=%u swtl=%u cnt=%u\n",
sde->this_idx,
use_dmahead ? "dma" : "kreg",
hwhead, swhead, swtail, cnt);
@@ -2448,11 +2448,11 @@ nodesc:
* @sde: sdma engine to use
* @wait: SE wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
- * @count: pointer to a u16 which, after return will contain the total number of
- * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
- * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
- * which are added to SDMA engine flush list if the SDMA engine state is
- * not running.
+ * @count_out: pointer to a u16 which, after return will contain the total number of
+ * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
+ * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
+ * which are added to SDMA engine flush list if the SDMA engine state is
+ * not running.
*
* The call submits the list into the ring.
*
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92aa2a9b3b5a..0b1f9e4d038b 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -309,7 +309,8 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
/**
* qp_to_rcd - determine the receive context used by a qp
- * @qp - the qp
+ * @rdi: rvt dev struct
+ * @qp: the qp
*
* This routine returns the receive context associated
* with a a qp's qpn.
@@ -484,6 +485,7 @@ static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
/**
* kernel_tid_waiters - determine rcd wait
* @rcd: the receive context
+ * @queue: the queue to operate on
* @qp: the head of the qp being processed
*
* This routine will return false IFF
@@ -517,7 +519,9 @@ static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
/**
* dequeue_tid_waiter - dequeue the qp from the list
- * @qp - the qp to remove the wait list
+ * @rcd: the receive context
+ * @queue: the queue to operate on
+ * @qp: the qp to remove the wait list
*
* This routine removes the indicated qp from the
* wait list if it is there.
@@ -549,6 +553,7 @@ static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
/**
* queue_qp_for_tid_wait - suspend QP on tid space
* @rcd: the receive context
+ * @queue: the queue to operate on
* @qp: the qp
*
* The qp is inserted at the tail of the rcd
@@ -593,7 +598,7 @@ static void __trigger_tid_waiter(struct rvt_qp *qp)
/**
* tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
- * @qp - the qp
+ * @qp: the qp
*
* trigger a schedule or a waiting qp in a deadlock
* safe manner. The qp reference is held prior
@@ -630,7 +635,7 @@ static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
/**
* tid_rdma_trigger_resume - field a trigger work request
- * @work - the work item
+ * @work: the work item
*
* Complete the off qp trigger processing by directly
* calling the progress routine.
@@ -654,7 +659,7 @@ static void tid_rdma_trigger_resume(struct work_struct *work)
rvt_put_qp(qp);
}
-/**
+/*
* tid_rdma_flush_wait - unwind any tid space wait
*
* This is called when resetting a qp to
@@ -693,8 +698,8 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
/* Flow functions */
/**
* kern_reserve_flow - allocate a hardware flow
- * @rcd - the context to use for allocation
- * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
+ * @rcd: the context to use for allocation
+ * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
* signify "don't care".
*
* Use a bit mask based allocation to reserve a hardware
@@ -860,9 +865,10 @@ static u8 trdma_pset_order(struct tid_rdma_pageset *s)
/**
* tid_rdma_find_phys_blocks_4k - get groups base on mr info
- * @npages - number of pages
- * @pages - pointer to an array of page structs
- * @list - page set array to return
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
*
* This routine returns the number of groups associated with
* the current sge information. This implementation is based
@@ -949,10 +955,10 @@ static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
/**
* tid_flush_pages - dump out pages into pagesets
- * @list - list of pagesets
- * @idx - pointer to current page index
- * @pages - number of pages to dump
- * @sets - current number of pagesset
+ * @list: list of pagesets
+ * @idx: pointer to current page index
+ * @pages: number of pages to dump
+ * @sets: current number of pagesset
*
* This routine flushes out accumuated pages.
*
@@ -990,9 +996,10 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list,
/**
* tid_rdma_find_phys_blocks_8k - get groups base on mr info
- * @pages - pointer to an array of page structs
- * @npages - number of pages
- * @list - page set array to return
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
*
* This routine parses an array of pages to compute pagesets
* in an 8k compatible way.
@@ -1064,7 +1071,7 @@ static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
return sets;
}
-/**
+/*
* Find pages for one segment of a sge array represented by @ss. The function
* does not check the sge, the sge must have been checked for alignment with a
* prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
@@ -1598,7 +1605,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
/**
* hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
- * @req - the tid rdma request to be cleaned
+ * @req: the tid rdma request to be cleaned
*/
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
{
@@ -3435,7 +3442,7 @@ static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
return 0;
}
-/**
+/*
* Central place for resource allocation at TID write responder,
* is called from write_req and write_data interrupt handlers as
* well as the send thread when a queued QP is scheduled for
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 1fb918399da0..5b0f536b34e0 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -55,6 +55,7 @@
/**
* hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
+ * @ps: the current packet state
*
* Assume s_lock is held.
*
@@ -291,12 +292,7 @@ bail_no_tx:
/**
* hfi1_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
+ * @packet: the packet structure
*
* This is called from qp_rcv() to process an incoming UC packet
* for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index e804af71b629..6ecb984c85fa 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -468,6 +468,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
/**
* hfi1_make_ud_req - construct a UD request packet
* @qp: the QP
+ * @ps: the current packet state
*
* Assume s_lock is held.
*
@@ -840,12 +841,7 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
/**
* hfi1_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
+ * @packet: the packet structure
*
* This is called from qp_rcv() to process an incoming UD packet
* for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b94fc7fd75a9..58dcab2679d9 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -154,12 +154,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
fd->entry_to_rb = NULL;
}
-/**
+/*
* Release pinned receive buffer pages.
*
- * @mapped - true if the pages have been DMA mapped. false otherwise.
- * @idx - Index of the first page to unpin.
- * @npages - No of pages to unpin.
+ * @mapped: true if the pages have been DMA mapped. false otherwise.
+ * @idx: Index of the first page to unpin.
+ * @npages: No of pages to unpin.
*
* If the pages have been DMA mapped (indicated by mapped parameter), their
* info will be passed via a struct tid_rb_node. If they haven't been mapped,
@@ -189,7 +189,7 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
fd->tid_n_pinned -= npages;
}
-/**
+/*
* Pin receive buffer pages.
*/
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3591923abebb..0dd4bb0a5a7e 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -729,7 +729,7 @@ bail_txadd:
/**
* update_tx_opstats - record stats by opcode
- * @qp; the qp
+ * @qp: the qp
* @ps: transmit packet state
* @plen: the plen in dwords
*
@@ -1145,7 +1145,7 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
* egress_pkey_check - check P_KEY of a packet
* @ppd: Physical IB port data
* @slid: SLID for packet
- * @bkey: PKEY for header
+ * @pkey: PKEY for header
* @sc5: SC for packet
* @s_pkey_index: It will be used for look up optimization for kernel contexts
* only. If it is negative value, then it means user contexts is calling this
@@ -1206,7 +1206,7 @@ bad:
return 1;
}
-/**
+/*
* get_send_routine - choose an egress routine
*
* Choose an egress routine based on QP type
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 5afee04fb02c..23c438cef40d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -32,6 +32,7 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
+#include <linux/bitfield.h>
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
@@ -65,6 +66,27 @@
#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
+#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ *((__le32 *)_ptr + (field_h) / 32) &= \
+ cpu_to_le32( \
+ ~GENMASK((field_h) % 32, (field_l) % 32)) + \
+ BUILD_BUG_ON_ZERO(((field_h) / 32) != \
+ ((field_l) / 32)); \
+ })
+
+#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
+
+#define _hr_reg_write(ptr, field_type, field_h, field_l, val) \
+ ({ \
+ _hr_reg_clear(ptr, field_type, field_h, field_l); \
+ *((__le32 *)ptr + (field_h) / 32) |= cpu_to_le32(FIELD_PREP( \
+ GENMASK((field_h) % 32, (field_l) % 32), val)); \
+ })
+
+#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
@@ -342,8 +364,8 @@
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
-#define ROCEE_TX_CMQ_TAIL_REG 0x07010
-#define ROCEE_TX_CMQ_HEAD_REG 0x07014
+#define ROCEE_TX_CMQ_HEAD_REG 0x07010
+#define ROCEE_TX_CMQ_TAIL_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 8533fc2d8df2..74fc4940b03a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -38,11 +38,74 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
+static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+{
+ u32 least_load = bank[0].inuse;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+ bankid = i;
+ }
+ }
+
+ return bankid;
+}
+
+static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+ u8 bankid;
+ int id;
+
+ mutex_lock(&cq_table->bank_mutex);
+ bankid = get_least_load_bankid_for_cq(cq_table->bank);
+ bank = &cq_table->bank[bankid];
+
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
+ if (id < 0) {
+ mutex_unlock(&cq_table->bank_mutex);
+ return id;
+ }
+
+ /* the lower 2 bits is bankid */
+ hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
+ bank->inuse++;
+ mutex_unlock(&cq_table->bank_mutex);
+
+ return 0;
+}
+
+static inline u8 get_cq_bankid(unsigned long cqn)
+{
+ /* The lower 2 bits of CQN are used to hash to different banks */
+ return (u8)(cqn & GENMASK(1, 0));
+}
+
+static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+
+ bank = &cq_table->bank[get_cq_bankid(cqn)];
+
+ ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
+
+ mutex_lock(&cq_table->bank_mutex);
+ bank->inuse--;
+ mutex_unlock(&cq_table->bank_mutex);
+}
+
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_cq_table *cq_table;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
int ret;
@@ -54,13 +117,6 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return -EINVAL;
}
- cq_table = &hr_dev->cq_table;
- ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret);
- return ret;
- }
-
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
@@ -110,7 +166,6 @@ err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
return ret;
}
@@ -138,7 +193,6 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
wait_for_completion(&hr_cq->free);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
@@ -152,7 +206,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
@@ -298,11 +351,17 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cq_buf;
}
+ ret = alloc_cqn(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
+ goto err_cq_db;
+ }
+
ret = alloc_cqc(hr_dev, hr_cq);
if (ret) {
ibdev_err(ibdev,
"failed to alloc CQ context, ret = %d.\n", ret);
- goto err_cq_db;
+ goto err_cqn;
}
/*
@@ -326,6 +385,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
err_cqc:
free_cqc(hr_dev, hr_cq);
+err_cqn:
+ free_cqn(hr_dev, hr_cq->cqn);
err_cq_db:
free_cq_db(hr_dev, hr_cq, udata);
err_cq_buf:
@@ -341,9 +402,11 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
- free_cq_buf(hr_dev, hr_cq);
- free_cq_db(hr_dev, hr_cq, udata);
free_cqc(hr_dev, hr_cq);
+ free_cqn(hr_dev, hr_cq->cqn);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cq_buf(hr_dev, hr_cq);
+
return 0;
}
@@ -402,18 +465,33 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
complete(&hr_cq->free);
}
-int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ unsigned int reserved_from_bot;
+ unsigned int i;
+ mutex_init(&cq_table->bank_mutex);
xa_init(&cq_table->array);
- return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
- hr_dev->caps.num_cqs - 1,
- hr_dev->caps.reserved_cqs, 0);
+ reserved_from_bot = hr_dev->caps.reserved_cqs;
+
+ for (i = 0; i < reserved_from_bot; i++) {
+ cq_table->bank[get_cq_bankid(i)].inuse++;
+ cq_table->bank[get_cq_bankid(i)].min++;
+ }
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ ida_init(&cq_table->bank[i].ida);
+ cq_table->bank[i].max = hr_dev->caps.num_cqs /
+ HNS_ROCE_CQ_BANK_NUM - 1;
+ }
}
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
{
- hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
+ int i;
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
+ ida_destroy(&hr_dev->cq_table.bank[i].ida);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 55d538625e36..3d6b7a2db496 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -54,6 +54,7 @@
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
+#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
@@ -65,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
+#define HNS_ROCE_RESERVED_SGE 1
+
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -90,6 +93,7 @@
#define HNS_ROCE_MAX_PORTS 6
#define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_SGE_SIZE 16
+#define HNS_ROCE_DWQE_SIZE 65536
#define HNS_ROCE_HOP_NUM_0 0xff
@@ -119,6 +123,9 @@
#define SRQ_DB_REG 0x230
#define HNS_ROCE_QP_BANK_NUM 8
+#define HNS_ROCE_CQ_BANK_NUM 4
+
+#define CQ_BANKID_SHIFT 2
/* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth
@@ -163,44 +170,6 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
-/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
-enum {
- HNS_ROCE_LWQCE_QPC_ERROR = 1,
- HNS_ROCE_LWQCE_MTU_ERROR = 2,
- HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
- HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
- HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
- HNS_ROCE_LWQCE_SL_ERROR = 6,
- HNS_ROCE_LWQCE_PORT_ERROR = 7,
-};
-
-/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
-enum {
- HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
- HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
- HNS_ROCE_LAVWQE_VA_ERROR = 3,
- HNS_ROCE_LAVWQE_PD_ERROR = 4,
- HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
- HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
- HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
-};
-
-/* DOORBELL overflow subtype */
-enum {
- HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
- HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
-};
-
-enum {
- /* RQ&SRQ related operations */
- HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
- HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
-};
-
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
enum {
@@ -253,9 +222,6 @@ enum {
#define HNS_ROCE_CMD_SUCCESS 1
-#define HNS_ROCE_PORT_DOWN 0
-#define HNS_ROCE_PORT_UP 1
-
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
@@ -332,7 +298,6 @@ struct hns_roce_buf_attr {
} region[HNS_ROCE_MAX_BT_REGION];
unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
- bool fixed_page; /* decide page shift is fixed-size or maximum size */
unsigned int user_access; /* umem access flag */
bool mtt_only; /* only alloc buffer-required MTT memory */
};
@@ -393,6 +358,7 @@ struct hns_roce_wq {
spinlock_t lock;
u32 wqe_cnt; /* WQE num */
u32 max_gs;
+ u32 rsv_sge;
int offset;
int wqe_shift; /* WQE size */
u32 head;
@@ -489,6 +455,8 @@ struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
int entry_shift;
unsigned long *bitmap;
+ u32 head;
+ u32 tail;
};
struct hns_roce_srq {
@@ -496,7 +464,9 @@ struct hns_roce_srq {
unsigned long srqn;
u32 wqe_cnt;
int max_gs;
+ u32 rsv_sge;
int wqe_shift;
+ u32 cqn;
void __iomem *db_reg_l;
atomic_t refcount;
@@ -507,8 +477,6 @@ struct hns_roce_srq {
u64 *wrid;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
- u16 head;
- u16 tail;
struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
@@ -532,13 +500,14 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
- spinlock_t bank_lock;
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
- struct hns_roce_bitmap bitmap;
struct xarray array;
struct hns_roce_hem_table table;
+ struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
+ struct mutex bank_mutex;
};
struct hns_roce_srq_table {
@@ -640,6 +609,10 @@ struct hns_roce_work {
u32 queue_num;
};
+enum {
+ HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -647,7 +620,7 @@ struct hns_roce_qp {
struct hns_roce_db sdb;
unsigned long en_flags;
u32 doorbell_qpn;
- u32 sq_signal_bits;
+ enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq;
struct hns_roce_mtr mtr;
@@ -779,7 +752,7 @@ struct hns_roce_caps {
u32 max_cqes;
u32 min_cqes;
u32 min_wqes;
- int reserved_cqs;
+ u32 reserved_cqs;
int reserved_srqs;
int num_aeq_vectors;
int num_comp_vectors;
@@ -911,8 +884,7 @@ struct hns_roce_hw {
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, int flags, u32 pdn,
- int mr_access_flags, u64 iova, u64 size,
+ struct hns_roce_mr *mr, int flags,
void *mb_buf);
int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr);
@@ -945,11 +917,7 @@ struct hns_roce_hw {
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
- void (*write_srqc)(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
- void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
- dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx);
+ int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
@@ -982,6 +950,7 @@ struct hns_roce_dev {
struct mutex pgdir_mutex;
int irq[HNS_ROCE_MAX_IRQ_NUM];
u8 __iomem *reg_base;
+ void __iomem *mem_base;
struct hns_roce_caps caps;
struct xarray qp_table_xa;
@@ -1067,7 +1036,7 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
- __raw_writeq(*(u64 *) val, dest);
+ writeq(*(u64 *)val, dest);
}
static inline struct hns_roce_qp
@@ -1164,7 +1133,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
@@ -1281,7 +1250,6 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
-
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index edc9d6b98d95..cfd2e1b60c7f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1075,9 +1075,8 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
return NULL;
if (exist_bt) {
- hem->addr = dma_alloc_coherent(hr_dev->dev,
- count * BA_BYTE_LEN,
- &hem->dma_addr, GFP_KERNEL);
+ hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
+ &hem->dma_addr, GFP_KERNEL);
if (!hem->addr) {
kfree(hem);
return NULL;
@@ -1336,6 +1335,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (ba_num < 1)
return -ENOMEM;
+ if (ba_num > unit)
+ return -ENOBUFS;
+
+ ba_num = min_t(int, ba_num, unit);
INIT_LIST_HEAD(&temp_root);
offset = r->offset;
/* indicate to last region */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index f68585ff8e8a..5346fdca9473 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -43,6 +43,22 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v1.h"
+/**
+ * hns_get_gid_index - Get gid index.
+ * @hr_dev: pointer to structure hns_roce_dev.
+ * @port: port, value range: 0 ~ MAX
+ * @gid_index: gid_index, value range: 0 ~ MAX
+ * Description:
+ * N ports shared gids, allocation method as follow:
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * And so on
+ */
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+{
+ return gid_index * hr_dev->caps.num_ports + port;
+}
+
static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
@@ -314,8 +330,6 @@ out:
/* Set DB return */
if (likely(nreq)) {
qp->sq.head += nreq;
- /* Memory barrier */
- wmb();
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
@@ -395,8 +409,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /* Memory barrier */
- wmb();
if (ibqp->qp_type == IB_QPT_GSI) {
__le32 tmp;
@@ -1391,7 +1403,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
- * @enable: true -- drop reset, false -- reset
+ * @dereset: true -- drop reset, false -- reset
* return 0 - success , negative --fail
*/
static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
@@ -1968,12 +1980,6 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- /*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
- wmb();
-
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -2314,8 +2320,6 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
*hr_cq->tptr_addr = hr_cq->cons_index &
((hr_cq->cq_depth << 1) - 1);
- /* Memroy barrier */
- wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
@@ -3204,9 +3208,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
* need to hw to flash RQ HEAD by DB again
*/
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- /* Memory barrier */
- wmb();
-
roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 46ab0a321d21..84383236e47d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -193,6 +193,49 @@
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
+enum {
+ HNS_ROCE_LWQCE_QPC_ERROR = 1,
+ HNS_ROCE_LWQCE_MTU_ERROR,
+ HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR,
+ HNS_ROCE_LWQCE_WQE_ADDR_ERROR,
+ HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR,
+ HNS_ROCE_LWQCE_SL_ERROR,
+ HNS_ROCE_LWQCE_PORT_ERROR,
+};
+
+/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
+enum {
+ HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
+ HNS_ROCE_LAVWQE_LENGTH_ERROR,
+ HNS_ROCE_LAVWQE_VA_ERROR,
+ HNS_ROCE_LAVWQE_PD_ERROR,
+ HNS_ROCE_LAVWQE_RW_ACC_ERROR,
+ HNS_ROCE_LAVWQE_KEY_STATE_ERROR,
+ HNS_ROCE_LAVWQE_MR_OPERATION_ERROR,
+};
+
+/* DOORBELL overflow subtype */
+enum {
+ HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF,
+ HNS_ROCE_DB_SUBTYPE_ODB_OVF,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP,
+};
+
+enum {
+ /* RQ&SRQ related operations */
+ HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
+ HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE,
+};
+
+enum {
+ HNS_ROCE_PORT_DOWN = 0,
+ HNS_ROCE_PORT_UP,
+};
+
struct hns_roce_cq_context {
__le32 cqc_byte_4;
__le32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 833e1f259936..c3934abeb260 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -48,8 +48,8 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
-static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
- struct ib_sge *sg)
+static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+ struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
dseg->addr = cpu_to_le64(sg->addr);
@@ -99,16 +99,16 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
u64 pbl_ba;
/* use ib_access_flags */
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
- wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
- wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
- wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
- wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
- wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S,
+ !!(wr->access & IB_ACCESS_MW_BIND));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RR_S,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RW_S,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_LW_S,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
/* Data structure reuse may lead to confusion */
pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
@@ -121,12 +121,10 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
fseg->pbl_size = cpu_to_le32(mr->npages);
- roce_set_field(fseg->mode_buf_pg_sz,
- V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
+ roce_set_field(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
- roce_set_bit(fseg->mode_buf_pg_sz,
- V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@@ -361,7 +359,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) {
- ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
+ ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
hr_qp->state);
return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
@@ -469,7 +467,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
- memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON(ret))
@@ -503,6 +500,8 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
if (ret)
return ret;
+ qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
+
set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
/*
@@ -521,10 +520,12 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
return 0;
}
-static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+static int set_rc_opcode(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
+ int ret = 0;
rc_sq_wqe->immtdata = get_immtdata(wr);
@@ -544,7 +545,10 @@ static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_REG_MR:
- set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ else
+ ret = -EOPNOTSUPP;
break;
case IB_WR_LOCAL_INV:
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
@@ -553,19 +557,23 @@ static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+ if (unlikely(ret))
+ return ret;
+
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
- return 0;
+ return ret;
}
static inline int set_rc_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
unsigned int owner_bit)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge;
@@ -573,11 +581,10 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
- ret = set_rc_opcode(rc_sq_wqe, wr);
+ ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
if (WARN_ON(ret))
return ret;
@@ -635,6 +642,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ /* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
+ roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
V2_DB_PARAMETER_IDX_S, qp->sq.head);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
@@ -644,6 +653,38 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
}
}
+static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
+ u64 __iomem *dest)
+{
+#define HNS_ROCE_WRITE_TIMES 8
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int i;
+
+ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
+ for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
+ writeq_relaxed(*(val + i), dest + i);
+}
+
+static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ void *wqe)
+{
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
+ /* All kinds of DirectWQE have the same header field layout */
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
+ V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
+
+ hns_roce_write512(hr_dev, wqe, hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * qp->ibqp.qp_num);
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
@@ -708,9 +749,12 @@ out:
if (likely(nreq)) {
qp->sq.head += nreq;
qp->next_sge = sge_idx;
- /* Memory barrier */
- wmb();
- update_sq_db(hr_dev, qp);
+
+ if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
+ (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ write_dwqe(hr_dev, qp, wqe);
+ else
+ update_sq_db(hr_dev, qp);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -721,14 +765,74 @@ out:
static int check_recv_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
+ ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
+ ibqp->qp_type);
+ return -EOPNOTSUPP;
+ }
+
if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
return -EIO;
- else if (hr_qp->state == IB_QPS_RESET)
+
+ if (hr_qp->state == IB_QPS_RESET)
return -EINVAL;
return 0;
}
+static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
+ u32 max_sge, bool rsv)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ u32 i, cnt;
+
+ for (i = 0, cnt = 0; i < wr->num_sge; i++) {
+ /* Skip zero-length sge */
+ if (!wr->sg_list[i].length)
+ continue;
+ set_data_seg_v2(dseg + cnt, wr->sg_list + i);
+ cnt++;
+ }
+
+ /* Fill a reserved sge to make hw stop reading remaining segments */
+ if (rsv) {
+ dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[cnt].addr = 0;
+ dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ } else {
+ /* Clear remaining segments to make ROCEE ignore sges */
+ if (cnt < max_sge)
+ memset(dseg + cnt, 0,
+ (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
+ }
+}
+
+static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
+ u32 wqe_idx, u32 max_sge)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ void *wqe = NULL;
+ u32 i;
+
+ wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ /* rq support inline data */
+ if (hr_qp->rq_inl_buf.wqe_cnt) {
+ sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+ }
+}
+
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
@@ -736,14 +840,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_v2_wqe_data_seg *dseg;
- struct hns_roce_rinl_sge *sge_list;
+ u32 wqe_idx, nreq, max_sge;
unsigned long flags;
- void *wqe = NULL;
- u32 wqe_idx;
- int nreq;
int ret;
- int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
@@ -754,6 +853,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
+ max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
hr_qp->ibqp.recv_cq))) {
@@ -762,50 +862,22 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
- wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge > max_sge)) {
ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
- wr->num_sge, hr_qp->rq.max_gs);
+ wr->num_sge, max_sge);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
- for (i = 0; i < wr->num_sge; i++) {
- if (!wr->sg_list[i].length)
- continue;
- set_data_seg_v2(dseg, wr->sg_list + i);
- dseg++;
- }
-
- if (wr->num_sge < hr_qp->rq.max_gs) {
- dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
- dseg->addr = 0;
- }
-
- /* rq support inline data */
- if (hr_qp->rq_inl_buf.wqe_cnt) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
- hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
- (u32)wr->num_sge;
- for (i = 0; i < wr->num_sge; i++) {
- sge_list[i].addr =
- (void *)(u64)wr->sg_list[i].addr;
- sge_list[i].len = wr->sg_list[i].length;
- }
- }
-
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+ fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /* Memory barrier */
- wmb();
/*
* Hip08 hardware cannot flush the WQEs in RQ if the QP state
@@ -829,41 +901,82 @@ out:
return ret;
}
-static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
+static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
{
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
-static void *get_idx_buf(struct hns_roce_idx_que *idx_que, unsigned int n)
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
{
return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift);
}
-static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
{
/* always called with interrupts disabled. */
spin_lock(&srq->lock);
bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
- srq->tail++;
+ srq->idx_que.tail++;
spin_unlock(&srq->lock);
}
-static int find_empty_entry(struct hns_roce_idx_que *idx_que,
- unsigned long size)
+static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
{
- int wqe_idx;
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
- if (unlikely(bitmap_full(idx_que->bitmap, size)))
+ return idx_que->head - idx_que->tail >= srq->wqe_cnt;
+}
+
+static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
+ const struct ib_recv_wr *wr)
+{
+ struct ib_device *ib_dev = srq->ibsrq.device;
+
+ if (unlikely(wr->num_sge > max_sge)) {
+ ibdev_err(ib_dev,
+ "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
+ wr->num_sge, max_sge);
+ return -EINVAL;
+ }
+
+ if (unlikely(hns_roce_srqwq_overflow(srq))) {
+ ibdev_err(ib_dev,
+ "failed to check srqwq status, srqwq is full.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ u32 pos;
+
+ pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
+ if (unlikely(pos == srq->wqe_cnt))
return -ENOSPC;
- wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
+ bitmap_set(idx_que->bitmap, pos, 1);
+ *wqe_idx = pos;
+ return 0;
+}
- bitmap_set(idx_que->bitmap, wqe_idx, 1);
+static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ unsigned int head;
+ __le32 *buf;
- return wqe_idx;
+ head = idx_que->head & (srq->wqe_cnt - 1);
+
+ buf = get_idx_buf(idx_que, head);
+ *buf = cpu_to_le32(wqe_idx);
+
+ idx_que->head++;
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
@@ -872,77 +985,42 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
- unsigned int ind;
- __le32 *srq_idx;
int ret = 0;
- int wqe_idx;
+ u32 max_sge;
+ u32 wqe_idx;
void *wqe;
- int nreq;
- int i;
+ u32 nreq;
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->wqe_cnt - 1);
-
+ max_sge = srq->max_gs - srq->rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(wr->num_sge >= srq->max_gs)) {
- ret = -EINVAL;
- *bad_wr = wr;
- break;
- }
-
- if (unlikely(srq->head == srq->tail)) {
- ret = -ENOMEM;
+ ret = check_post_srq_valid(srq, max_sge, wr);
+ if (ret) {
*bad_wr = wr;
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (unlikely(wqe_idx < 0)) {
- ret = -ENOMEM;
+ ret = get_srq_wqe_idx(srq, &wqe_idx);
+ if (unlikely(ret)) {
*bad_wr = wr;
break;
}
- wqe = get_srq_wqe(srq, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
-
- for (i = 0; i < wr->num_sge; ++i) {
- dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
- dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
- dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
- }
-
- if (wr->num_sge < srq->max_gs) {
- dseg[i].len = 0;
- dseg[i].lkey = cpu_to_le32(0x100);
- dseg[i].addr = 0;
- }
-
- srq_idx = get_idx_buf(&srq->idx_que, ind);
- *srq_idx = cpu_to_le32(wqe_idx);
-
+ wqe = get_srq_wqe_buf(srq, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
+ fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
- srq->head += nreq;
-
- /*
- * Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
srq_db.byte_4 =
cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
(srq->srqn & V2_DB_BYTE_4_TAG_M));
srq_db.parameter =
- cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
+ cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
}
@@ -1059,15 +1137,6 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
return 0;
}
-static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
@@ -1107,8 +1176,7 @@ static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
&priv->cmq.csq : &priv->cmq.crq;
ring->flag = ring_type;
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
+ ring->head = 0;
return hns_roce_alloc_cmq_desc(hr_dev, ring);
}
@@ -1207,34 +1275,10 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
struct hns_roce_v2_priv *priv = hr_dev->priv;
- return head == priv->cmq.csq.next_to_use;
-}
-
-static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
- struct hns_roce_cmq_desc *desc;
- u16 ntc = csq->next_to_clean;
- u32 head;
- int clean = 0;
-
- desc = &csq->desc[ntc];
- head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
- }
- csq->next_to_clean = ntc;
-
- return clean;
+ return tail == priv->cmq.csq.head;
}
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
@@ -1242,42 +1286,26 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
- struct hns_roce_cmq_desc *desc_to_use;
- bool complete = false;
u32 timeout = 0;
- int handle = 0;
u16 desc_ret;
- int ret = 0;
- int ntc;
+ u32 tail;
+ int ret;
+ int i;
spin_lock_bh(&csq->lock);
- if (num > hns_roce_cmq_space(csq)) {
- spin_unlock_bh(&csq->lock);
- return -EBUSY;
- }
-
- /*
- * Record the location of desc in the cmq for this time
- * which will be use for hardware to write back
- */
- ntc = csq->next_to_use;
+ tail = csq->head;
- while (handle < num) {
- desc_to_use = &csq->desc[csq->next_to_use];
- *desc_to_use = desc[handle];
- dev_dbg(hr_dev->dev, "set cmq desc:\n");
- csq->next_to_use++;
- if (csq->next_to_use == csq->desc_num)
- csq->next_to_use = 0;
- handle++;
+ for (i = 0; i < num; i++) {
+ csq->desc[csq->head++] = desc[i];
+ if (csq->head == csq->desc_num)
+ csq->head = 0;
}
/* Write to hardware */
- roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
+ roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, csq->head);
- /*
- * If the command is sync, wait for the firmware to write back,
+ /* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
@@ -1285,39 +1313,34 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev))
break;
udelay(1);
- timeout++;
- } while (timeout < priv->cmq.tx_timeout);
+ } while (++timeout < priv->cmq.tx_timeout);
}
if (hns_roce_cmq_csq_done(hr_dev)) {
- complete = true;
- handle = 0;
- while (handle < num) {
- /* get the result of hardware write back */
- desc_to_use = &csq->desc[ntc];
- desc[handle] = *desc_to_use;
- dev_dbg(hr_dev->dev, "Get cmq desc:\n");
- desc_ret = le16_to_cpu(desc[handle].retval);
- if (desc_ret == CMD_EXEC_SUCCESS)
- ret = 0;
- else
- ret = -EIO;
- priv->cmq.last_status = desc_ret;
- ntc++;
- handle++;
- if (ntc == csq->desc_num)
- ntc = 0;
+ for (ret = 0, i = 0; i < num; i++) {
+ /* check the result of hardware write back */
+ desc[i] = csq->desc[tail++];
+ if (tail == csq->desc_num)
+ tail = 0;
+
+ desc_ret = le16_to_cpu(desc[i].retval);
+ if (likely(desc_ret == CMD_EXEC_SUCCESS))
+ continue;
+
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = %x, return = %x\n",
+ desc->opcode, desc_ret);
+ ret = -EIO;
}
- }
+ } else {
+ /* FW/HW reset or incorrect number of desc */
+ tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
+ dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
+ csq->head, tail);
+ csq->head = tail;
- if (!complete)
ret = -EAGAIN;
-
- /* clean the command send queue */
- handle = hns_roce_cmq_csq_clean(hr_dev);
- if (handle != num)
- dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
- handle, num);
+ }
spin_unlock_bh(&csq->lock);
@@ -1530,7 +1553,8 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
roce_set_field(req->time_cfg_udp_port,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S,
+ ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
@@ -1541,17 +1565,13 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
struct hns_roce_pf_res_a *req_a;
struct hns_roce_pf_res_b *req_b;
int ret;
- int i;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_QUERY_PF_RES, true);
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_QUERY_PF_RES,
+ true);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_QUERY_PF_RES,
+ true);
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
@@ -1644,19 +1664,16 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc[2];
struct hns_roce_vf_res_a *req_a;
struct hns_roce_vf_res_b *req_b;
- int i;
req_a = (struct hns_roce_vf_res_a *)desc[0].data;
req_b = (struct hns_roce_vf_res_b *)desc[1].data;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_ALLOC_VF_RES, false);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_ALLOC_VF_RES,
+ false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_ALLOC_VF_RES,
+ false);
roce_set_field(req_a->vf_qpc_bt_idx_num,
VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
@@ -1866,7 +1883,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
- HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
@@ -1999,10 +2015,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
+ caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
+ caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
@@ -2336,7 +2354,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
struct hns_roce_link_table_entry *entry;
enum hns_roce_opcode_type opcode;
u32 page_num;
- int i;
switch (type) {
case TSQ_LINK_TABLE:
@@ -2354,14 +2371,10 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
@@ -2880,36 +2893,20 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
- V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
- V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
- HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
- roce_set_field(mpt_entry->byte_4_pd_hop_st,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
- V2_MPT_BYTE_4_PD_S, mr->pd);
-
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
- (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
- mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
- (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
- (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
- (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
-
- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
- mr->type == MR_TYPE_MR ? 0 : 1);
- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
- 1);
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+ hr_reg_enable(mpt_entry, MPT_L_INV_EN);
+
+ hr_reg_write(mpt_entry, MPT_BIND_EN,
+ !!(mr->access & IB_ACCESS_MW_BIND));
+ hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_ATOMIC));
+ hr_reg_write(mpt_entry, MPT_RR_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_READ));
+ hr_reg_write(mpt_entry, MPT_RW_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_WRITE));
+ hr_reg_write(mpt_entry, MPT_LW_EN,
+ !!((mr->access & IB_ACCESS_LOCAL_WRITE)));
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -2917,9 +2914,19 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+ if (mr->type != MR_TYPE_MR)
+ hr_reg_enable(mpt_entry, MPT_PA);
+
if (mr->type == MR_TYPE_DMA)
return 0;
+ if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
+ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
+
+ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
+
ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
@@ -2927,20 +2934,17 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
- u32 pdn, int mr_access_flags, u64 iova,
- u64 size, void *mb_buf)
+ void *mb_buf)
{
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ u32 mr_access_flags = mr->access;
int ret = 0;
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
- if (flags & IB_MR_REREG_PD) {
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
- V2_MPT_BYTE_4_PD_S, pdn);
- mr->pd = pdn;
- }
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
if (flags & IB_MR_REREG_ACCESS) {
roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
@@ -2958,13 +2962,10 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
}
if (flags & IB_MR_REREG_TRANS) {
- mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
- mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
- mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
- mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
-
- mr->iova = iova;
- mr->size = size;
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
@@ -3126,11 +3127,6 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- /*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
- wmb();
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -3639,11 +3635,8 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
break;
}
- if (npolled) {
- /* Memory barrier */
- wmb();
+ if (npolled)
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
out:
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -4235,7 +4228,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
@@ -4243,7 +4235,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t irrl_ba;
enum ib_mtu mtu;
u8 lp_pktn_ini;
- u8 port_num;
u64 *mtts;
u8 *dmac;
u8 *smac;
@@ -4324,15 +4315,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
- /* Configure GID index */
- port_num = rdma_ah_get_port_num(&attr->ah_attr);
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
- hns_get_gid_index(hr_dev, port_num - 1,
- grh->sgid_index));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
-
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -5083,7 +5065,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
@@ -5174,6 +5156,9 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc desc;
int ret, i;
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
mutex_lock(&hr_dev->qp_table.scc_mutex);
/* set scc ctx clear done flag */
@@ -5220,98 +5205,96 @@ out:
return ret;
}
-static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
- u32 cqn, void *mb_buf, u64 *mtts_wqe,
- u64 *mtts_idx, dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx)
+#define DMA_IDX_SHIFT 3
+#define DMA_WQE_SHIFT 3
+
+static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
+ struct hns_roce_srq_context *ctx)
{
- struct hns_roce_srq_context *srq_context;
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u64 mtts_idx[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_idx = 0;
+ int ret;
+
+ /* Get physical address of idx que buf */
+ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
+ ret);
+ return -ENOBUFS;
+ }
+
+ hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
+ upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
+
+ hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
+
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[0]));
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[1]));
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+
+ return 0;
+}
- srq_context = mb_buf;
- memset(srq_context, 0, sizeof(*srq_context));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
- SRQC_BYTE_4_SRQ_ST_S, 1);
-
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
- srq->wqe_cnt));
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
- SRQC_BYTE_4_SRQN_S, srq->srqn);
-
- roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
- SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
-
- srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
-
- roce_set_field(srq_context->byte_24_wqe_bt_ba,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- dma_handle_wqe >> 35);
-
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
- SRQC_BYTE_28_PD_S, pdn);
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
- SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
- fls(srq->max_gs - 1));
-
- srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
- roce_set_field(srq_context->rsv_idx_bt_ba,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- dma_handle_idx >> 35);
-
- srq_context->idx_cur_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
- srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
-
- srq_context->idx_nxt_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
- roce_set_field(srq_context->rsv_idxnxtblkaddr,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
- cqn);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
- to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
- to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
-
- roce_set_bit(srq_context->db_record_addr_record_en,
- SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
+static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
+{
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_srq_context *ctx = mb_buf;
+ u64 mtts_wqe[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_wqe = 0;
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Get the physical address of srq buf */
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
+ ret);
+ return -ENOBUFS;
+ }
+
+ hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
+ hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
+ hr_reg_write(ctx, SRQC_XRCD, 0);
+ hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
+ hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
+ hr_reg_write(ctx, SRQC_RQWS,
+ srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
+
+ hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
+ upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
+
+ hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ return hns_roce_v2_write_srqc_index_queue(srq, ctx);
}
static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
@@ -5331,7 +5314,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
return -EINVAL;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->wqe_cnt)
+ if (srq_attr->srq_limit > srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -5394,8 +5377,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs;
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5626,9 +5609,6 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
aeqe_found = 1;
- if (eq->cons_index > (2 * eq->entries - 1))
- eq->cons_index = 0;
-
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
aeqe = next_aeqe_sw_v2(eq);
@@ -5671,9 +5651,6 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = 1;
- if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
- eq->cons_index = 0;
-
ceqe = next_ceqe_sw_v2(eq);
}
@@ -5948,7 +5925,6 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
buf_attr.region[0].size = eq->entries * eq->eqe_size;
buf_attr.region[0].hopnum = eq->hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
hr_dev->caps.eqe_ba_pg_sz +
@@ -6286,6 +6262,7 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
/* Get info from NIC driver. */
hr_dev->reg_base = handle->rinfo.roce_io_base;
+ hr_dev->mem_base = handle->rinfo.roce_mem_base;
hr_dev->caps.num_ports = 1;
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index bdaccf86460d..39621fb6ec16 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -96,7 +96,8 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
@@ -366,24 +367,61 @@ struct hns_roce_v2_cq_context {
#define CQC_STASH CQC_FIELD_LOC(63, 63)
struct hns_roce_srq_context {
- __le32 byte_4_srqn_srqst;
- __le32 byte_8_limit_wl;
- __le32 byte_12_xrcd;
- __le32 byte_16_pi_ci;
- __le32 wqe_bt_ba;
- __le32 byte_24_wqe_bt_ba;
- __le32 byte_28_rqws_pd;
- __le32 idx_bt_ba;
- __le32 rsv_idx_bt_ba;
- __le32 idx_cur_blk_addr;
- __le32 byte_44_idxbufpgsz_addr;
- __le32 idx_nxt_blk_addr;
- __le32 rsv_idxnxtblkaddr;
- __le32 byte_56_xrc_cqn;
- __le32 db_record_addr_record_en;
- __le32 db_record_addr;
+ __le32 byte_4_srqn_srqst;
+ __le32 byte_8_limit_wl;
+ __le32 byte_12_xrcd;
+ __le32 byte_16_pi_ci;
+ __le32 wqe_bt_ba;
+ __le32 byte_24_wqe_bt_ba;
+ __le32 byte_28_rqws_pd;
+ __le32 idx_bt_ba;
+ __le32 rsv_idx_bt_ba;
+ __le32 idx_cur_blk_addr;
+ __le32 byte_44_idxbufpgsz_addr;
+ __le32 idx_nxt_blk_addr;
+ __le32 rsv_idxnxtblkaddr;
+ __le32 byte_56_xrc_cqn;
+ __le32 db_record_addr_record_en;
+ __le32 db_record_addr;
};
+#define SRQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_srq_context, h, l)
+
+#define SRQC_SRQ_ST SRQC_FIELD_LOC(1, 0)
+#define SRQC_WQE_HOP_NUM SRQC_FIELD_LOC(3, 2)
+#define SRQC_SHIFT SRQC_FIELD_LOC(7, 4)
+#define SRQC_SRQN SRQC_FIELD_LOC(31, 8)
+#define SRQC_LIMIT_WL SRQC_FIELD_LOC(47, 32)
+#define SRQC_RSV0 SRQC_FIELD_LOC(63, 48)
+#define SRQC_XRCD SRQC_FIELD_LOC(87, 64)
+#define SRQC_RSV1 SRQC_FIELD_LOC(95, 88)
+#define SRQC_PRODUCER_IDX SRQC_FIELD_LOC(111, 96)
+#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
+#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
+#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
+#define SRQC_RSV2 SRQC_FIELD_LOC(191, 189)
+#define SRQC_PD SRQC_FIELD_LOC(215, 192)
+#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
+#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
+#define SRQC_IDX_BT_BA_L SRQC_FIELD_LOC(255, 224)
+#define SRQC_IDX_BT_BA_H SRQC_FIELD_LOC(284, 256)
+#define SRQC_RSV4 SRQC_FIELD_LOC(287, 285)
+#define SRQC_IDX_CUR_BLK_ADDR_L SRQC_FIELD_LOC(319, 288)
+#define SRQC_IDX_CUR_BLK_ADDR_H SRQC_FIELD_LOC(339, 320)
+#define SRQC_RSV5 SRQC_FIELD_LOC(341, 340)
+#define SRQC_IDX_HOP_NUM SRQC_FIELD_LOC(343, 342)
+#define SRQC_IDX_BA_PG_SZ SRQC_FIELD_LOC(347, 344)
+#define SRQC_IDX_BUF_PG_SZ SRQC_FIELD_LOC(351, 348)
+#define SRQC_IDX_NXT_BLK_ADDR_L SRQC_FIELD_LOC(383, 352)
+#define SRQC_IDX_NXT_BLK_ADDR_H SRQC_FIELD_LOC(403, 384)
+#define SRQC_RSV6 SRQC_FIELD_LOC(415, 404)
+#define SRQC_XRC_CQN SRQC_FIELD_LOC(439, 416)
+#define SRQC_WQE_BA_PG_SZ SRQC_FIELD_LOC(443, 440)
+#define SRQC_WQE_BUF_PG_SZ SRQC_FIELD_LOC(447, 444)
+#define SRQC_DB_RECORD_EN SRQC_FIELD_LOC(448, 448)
+#define SRQC_DB_RECORD_ADDR_L SRQC_FIELD_LOC(479, 449)
+#define SRQC_DB_RECORD_ADDR_H SRQC_FIELD_LOC(511, 480)
+
#define SRQC_BYTE_4_SRQ_ST_S 0
#define SRQC_BYTE_4_SRQ_ST_M GENMASK(1, 0)
@@ -993,6 +1031,45 @@ struct hns_roce_v2_mpt_entry {
__le32 byte_64_buf_pa1;
};
+#define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l)
+
+#define MPT_ST MPT_FIELD_LOC(1, 0)
+#define MPT_PBL_HOP_NUM MPT_FIELD_LOC(3, 2)
+#define MPT_PBL_BA_PG_SZ MPT_FIELD_LOC(7, 4)
+#define MPT_PD MPT_FIELD_LOC(31, 8)
+#define MPT_RA_EN MPT_FIELD_LOC(32, 32)
+#define MPT_R_INV_EN MPT_FIELD_LOC(33, 33)
+#define MPT_L_INV_EN MPT_FIELD_LOC(34, 34)
+#define MPT_BIND_EN MPT_FIELD_LOC(35, 35)
+#define MPT_ATOMIC_EN MPT_FIELD_LOC(36, 36)
+#define MPT_RR_EN MPT_FIELD_LOC(37, 37)
+#define MPT_RW_EN MPT_FIELD_LOC(38, 38)
+#define MPT_LW_EN MPT_FIELD_LOC(39, 39)
+#define MPT_MW_CNT MPT_FIELD_LOC(63, 40)
+#define MPT_FRE MPT_FIELD_LOC(64, 64)
+#define MPT_PA MPT_FIELD_LOC(65, 65)
+#define MPT_ZBVA MPT_FIELD_LOC(66, 66)
+#define MPT_SHARE MPT_FIELD_LOC(67, 67)
+#define MPT_MR_MW MPT_FIELD_LOC(68, 68)
+#define MPT_BPD MPT_FIELD_LOC(69, 69)
+#define MPT_BQP MPT_FIELD_LOC(70, 70)
+#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
+#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
+#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
+#define MPT_LEN MPT_FIELD_LOC(191, 128)
+#define MPT_LKEY MPT_FIELD_LOC(223, 192)
+#define MPT_VA MPT_FIELD_LOC(287, 224)
+#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
+#define MPT_PBL_BA MPT_FIELD_LOC(380, 320)
+#define MPT_BLK_MODE MPT_FIELD_LOC(381, 381)
+#define MPT_RSV0 MPT_FIELD_LOC(383, 382)
+#define MPT_PA0 MPT_FIELD_LOC(441, 384)
+#define MPT_BOUND_VA MPT_FIELD_LOC(447, 442)
+#define MPT_PA1 MPT_FIELD_LOC(505, 448)
+#define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506)
+#define MPT_RSV2 MPT_FIELD_LOC(507, 507)
+#define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508)
+
#define V2_MPT_BYTE_4_MPT_ST_S 0
#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
@@ -1059,6 +1136,8 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_BYTE_4_CMD_S 24
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_DB_FLAG_S 31
+
#define V2_DB_PARAMETER_IDX_S 0
#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
@@ -1155,6 +1234,15 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5)
+
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13)
+
+#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15
+#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15)
+
#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
@@ -1167,15 +1255,17 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
-#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
+#define V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S 10
+
+#define V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S 11
-#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
+#define V2_RC_FRMR_WQE_BYTE_40_RR_S 12
-#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
+#define V2_RC_FRMR_WQE_BYTE_40_RW_S 13
-#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
+#define V2_RC_FRMR_WQE_BYTE_40_LW_S 14
-#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
+#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
@@ -1190,7 +1280,7 @@ struct hns_roce_v2_rc_send_wqe {
struct hns_roce_wqe_frmr_seg {
__le32 pbl_size;
- __le32 mode_buf_pg_sz;
+ __le32 byte_40;
};
#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
@@ -1786,12 +1876,8 @@ struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr;
struct hns_roce_cmq_desc *desc;
u32 head;
- u32 tail;
-
u16 buf_size;
u16 desc_num;
- int next_to_use;
- int next_to_clean;
u8 flag;
spinlock_t lock; /* command queue lock */
};
@@ -1800,7 +1886,6 @@ struct hns_roce_v2_cmq {
struct hns_roce_v2_cmq_ring csq;
struct hns_roce_v2_cmq_ring crq;
u16 tx_timeout;
- u16 last_status;
};
enum hns_roce_link_table_type {
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d9179bae4989..c9c0836394a2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,22 +42,6 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-/**
- * hns_get_gid_index - Get gid index.
- * @hr_dev: pointer to structure hns_roce_dev.
- * @port: port, value range: 0 ~ MAX
- * @gid_index: gid_index, value range: 0 ~ MAX
- * Description:
- * N ports shared gids, allocation method as follow:
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * And so on
- */
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
-{
- return gid_index * hr_dev->caps.num_ports + port;
-}
-
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
@@ -217,7 +201,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
+ hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
@@ -748,11 +733,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_pd_table_free;
}
- ret = hns_roce_init_cq_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to init completion queue table.\n");
- goto err_mr_table_free;
- }
+ hns_roce_init_cq_table(hr_dev);
ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
@@ -772,13 +753,10 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return 0;
err_qp_table_free:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
- hns_roce_cleanup_qp_table(hr_dev);
+ hns_roce_cleanup_qp_table(hr_dev);
err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
-
-err_mr_table_free:
hns_roce_cleanup_mr_table(hr_dev);
err_pd_table_free:
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 1bcffd93ff3e..79b3c3023fe7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -66,8 +66,7 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- u32 pd, u64 iova, u64 size, u32 access)
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned long obj = 0;
@@ -82,11 +81,6 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
return -ENOMEM;
}
- mr->iova = iova; /* MR va starting addr */
- mr->size = size; /* MR addr range */
- mr->pd = pd; /* MR num */
- mr->access = access; /* MR access permit */
- mr->enabled = 0; /* MR active status */
mr->key = hw_index_to_key(obj); /* MR key */
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
@@ -110,8 +104,7 @@ static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
}
static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- size_t length, struct ib_udata *udata, u64 start,
- int access)
+ struct ib_udata *udata, u64 start)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
bool is_fast = mr->type == MR_TYPE_FRMR;
@@ -121,11 +114,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
- buf_attr.region[0].size = length;
+ buf_attr.region[0].size = mr->size;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
- buf_attr.user_access = access;
+ buf_attr.user_access = mr->access;
/* fast MR's buffer is alloced before mapping, not at creation */
buf_attr.mtt_only = is_fast;
@@ -197,9 +189,6 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
mr->enabled = 1;
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return 0;
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -237,14 +226,16 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = acc;
/* Allocate memory region key */
hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_free;
- ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
+ ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
goto err_mr;
@@ -271,13 +262,17 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
+ mr->iova = virt_addr;
+ mr->size = length;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = access_flags;
mr->type = MR_TYPE_MR;
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
- access_flags);
+
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_alloc_mr;
- ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
if (ret)
goto err_alloc_key;
@@ -299,35 +294,6 @@ err_alloc_mr:
return ERR_PTR(ret);
}
-static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
- u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct hns_roce_cmd_mailbox *mailbox,
- u32 pdn, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_mr *mr = to_hr_mr(ibmr);
- int ret;
-
- free_mr_pbl(hr_dev, mr);
- ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
- if (ret) {
- ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
- mr_access_flags, virt_addr,
- length, mailbox->buf);
- if (ret) {
- ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
- free_mr_pbl(hr_dev, mr);
- }
-
- return ret;
-}
-
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -338,7 +304,6 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
unsigned long mtpt_idx;
- u32 pdn = 0;
int ret;
if (!mr->enabled)
@@ -360,23 +325,29 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
mr->enabled = 0;
+ mr->iova = virt_addr;
+ mr->size = length;
if (flags & IB_MR_REREG_PD)
- pdn = to_hr_pd(pd)->pdn;
+ mr->pd = to_hr_pd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access = mr_access_flags;
if (flags & IB_MR_REREG_TRANS) {
- ret = rereg_mr_trans(ibmr, flags,
- start, length,
- virt_addr, mr_access_flags,
- mailbox, pdn, udata);
- if (ret)
- goto free_cmd_mbox;
- } else {
- ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
- mr_access_flags, virt_addr,
- length, mailbox->buf);
- if (ret)
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
+ ret);
goto free_cmd_mbox;
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto free_cmd_mbox;
}
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
@@ -386,12 +357,6 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
}
mr->enabled = 1;
- if (flags & IB_MR_REREG_ACCESS)
- mr->access = mr_access_flags;
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return NULL;
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -421,7 +386,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
- u64 length;
int ret;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -438,14 +402,15 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_FRMR;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->size = max_num_sg * (1 << PAGE_SHIFT);
/* Allocate memory region key */
- length = max_num_sg * (1 << PAGE_SHIFT);
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_free;
- ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
+ ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
if (ret)
goto err_key;
@@ -454,7 +419,7 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
goto err_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
+ mr->ibmr.length = mr->size;
return &mr->ibmr;
@@ -631,30 +596,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
}
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, struct hns_roce_buf_region *region)
+ struct hns_roce_buf_region *region, dma_addr_t *pages,
+ int max_count)
{
+ int count, npage;
+ int offset, end;
__le64 *mtts;
- int offset;
- int count;
- int npage;
u64 addr;
- int end;
int i;
- /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
- if (!region->hopnum)
- return 0;
-
offset = region->offset;
end = offset + region->count;
npage = 0;
- while (offset < end) {
+ while (offset < end && npage < max_count) {
+ count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset, &count, NULL);
if (!mtts)
return -ENOBUFS;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count && npage < max_count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
addr = to_hr_hw_page_addr(pages[npage]);
else
@@ -666,7 +627,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
offset += count;
}
- return 0;
+ return npage;
}
static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
@@ -729,25 +690,15 @@ static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
}
static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- struct hns_roce_buf_attr *buf_attr, bool is_direct,
+ struct hns_roce_buf_attr *buf_attr,
struct ib_udata *udata, unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned int best_pg_shift;
- int all_pg_count = 0;
size_t total_size;
- int ret;
total_size = mtr_bufs_size(buf_attr);
- if (total_size < 1) {
- ibdev_err(ibdev, "failed to check mtr size\n.");
- return -EINVAL;
- }
if (udata) {
- unsigned long pgsz_bitmap;
- unsigned long page_size;
-
mtr->kmem = NULL;
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
@@ -756,76 +707,67 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
PTR_ERR(mtr->umem));
return -ENOMEM;
}
- if (buf_attr->fixed_page)
- pgsz_bitmap = 1 << buf_attr->page_shift;
- else
- pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
-
- page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
- user_addr);
- if (!page_size)
- return -EINVAL;
- best_pg_shift = order_base_2(page_size);
- all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
- ret = 0;
} else {
mtr->umem = NULL;
- mtr->kmem =
- hns_roce_buf_alloc(hr_dev, total_size,
- buf_attr->page_shift,
- is_direct ? HNS_ROCE_BUF_DIRECT : 0);
+ mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
+ buf_attr->page_shift,
+ mtr->hem_cfg.is_direct ?
+ HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
PTR_ERR(mtr->kmem));
return PTR_ERR(mtr->kmem);
}
-
- best_pg_shift = buf_attr->page_shift;
- all_pg_count = mtr->kmem->npages;
- }
-
- /* must bigger than minimum hardware page shift */
- if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
- ret = -EINVAL;
- ibdev_err(ibdev,
- "failed to check mtr, page shift = %u count = %d.\n",
- best_pg_shift, all_pg_count);
- goto err_alloc_mem;
}
- mtr->hem_cfg.buf_pg_shift = best_pg_shift;
- mtr->hem_cfg.buf_pg_count = all_pg_count;
-
return 0;
-err_alloc_mem:
- mtr_free_bufs(hr_dev, mtr);
- return ret;
}
-static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, int count, unsigned int page_shift)
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ int page_count, unsigned int page_shift)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t *pages;
int npage;
- int err;
+ int ret;
+
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
if (mtr->umem)
- npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, 0,
mtr->umem, page_shift);
else
- npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, 0,
mtr->kmem);
+ if (npage != page_count) {
+ ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
+ page_count);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
+ }
+
if (mtr->hem_cfg.is_direct && npage > 1) {
- err = mtr_check_direct_pages(pages, npage, page_shift);
- if (err) {
- ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
- mtr->umem ? "user" : "kernel", err);
- npage = err;
+ ret = mtr_check_direct_pages(pages, npage, page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to check %s mtr, idx = %d.\n",
+ mtr->umem ? "user" : "kernel", ret);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
}
}
- return npage;
+ ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
+ if (ret)
+ ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
+
+err_alloc_list:
+ kvfree(pages);
+
+ return ret;
}
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
@@ -833,8 +775,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
- unsigned int i;
- int err;
+ unsigned int i, mapped_cnt;
+ int ret;
/*
* Only use the first page address as root ba when hopnum is 0, this
@@ -845,26 +787,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
- for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
+ /* if hopnum is 0, no need to map pages in this region */
+ if (!r->hopnum) {
+ mapped_cnt += r->count;
+ continue;
+ }
+
if (r->offset + r->count > page_cnt) {
- err = -EINVAL;
+ ret = -EINVAL;
ibdev_err(ibdev,
"failed to check mtr%u end %u + %u, max %u.\n",
i, r->offset, r->count, page_cnt);
- return err;
+ return ret;
}
- err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
- if (err) {
+ ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
+ page_cnt - mapped_cnt);
+ if (ret < 0) {
ibdev_err(ibdev,
"failed to map mtr%u offset %u, ret = %d.\n",
- i, r->offset, err);
- return err;
+ i, r->offset, ret);
+ return ret;
}
+ mapped_cnt += ret;
+ ret = 0;
}
- return 0;
+ if (mapped_cnt < page_cnt) {
+ ret = -ENOBUFS;
+ ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
+ mapped_cnt, page_cnt);
+ }
+
+ return ret;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
@@ -928,68 +886,92 @@ done:
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift)
+ unsigned int *buf_page_shift, int unalinged_size)
{
struct hns_roce_buf_region *r;
+ int first_region_padding;
+ int page_cnt, region_cnt;
unsigned int page_shift;
- int page_cnt = 0;
size_t buf_size;
- int region_cnt;
+ /* If mtt is disabled, all pages must be within a continuous range */
+ cfg->is_direct = !mtr_has_mtt(attr);
+ buf_size = mtr_bufs_size(attr);
if (cfg->is_direct) {
- buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
- page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
- /*
- * When HEM buffer use level-0 addressing, the page size equals
- * the buffer size, and the the page size = 4K * 2^N.
+ /* When HEM buffer uses 0-level addressing, the page size is
+ * equal to the whole buffer size, and we split the buffer into
+ * small pages which is used to check whether the adjacent
+ * units are in the continuous space and its size is fixed to
+ * 4K based on hns ROCEE's requirement.
*/
- cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
- if (attr->region_count > 1) {
- cfg->buf_pg_count = page_cnt;
- page_shift = HNS_HW_PAGE_SHIFT;
- } else {
- cfg->buf_pg_count = 1;
- page_shift = cfg->buf_pg_shift;
- if (buf_size != 1 << page_shift) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to check direct size %zu shift %d.\n",
- buf_size, page_shift);
- return -EINVAL;
- }
- }
+ page_shift = HNS_HW_PAGE_SHIFT;
+
+ /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+ cfg->buf_pg_count = 1;
+ cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
+ order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
+ first_region_padding = 0;
} else {
- page_shift = cfg->buf_pg_shift;
+ page_shift = attr->page_shift;
+ cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
+ 1 << page_shift);
+ cfg->buf_pg_shift = page_shift;
+ first_region_padding = unalinged_size;
}
- /* convert buffer size to page index and page count */
- for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
- region_cnt < attr->region_count &&
+ /* Convert buffer size to page index and page count for each region and
+ * the buffer's offset needs to be appended to the first region.
+ */
+ for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
r = &cfg->region[region_cnt];
r->offset = page_cnt;
- buf_size = hr_hw_page_align(attr->region[region_cnt].size);
+ buf_size = hr_hw_page_align(attr->region[region_cnt].size +
+ first_region_padding);
r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
+ first_region_padding = 0;
page_cnt += r->count;
r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
r->count);
}
- if (region_cnt < 1) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to check mtr region count, pages = %d.\n",
- cfg->buf_pg_count);
- return -ENOBUFS;
- }
-
cfg->region_count = region_cnt;
*buf_page_shift = page_shift;
return page_cnt;
}
+static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ unsigned int ba_page_shift)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ int ret;
+
+ hns_roce_hem_list_init(&mtr->hem_list);
+ if (!cfg->is_direct) {
+ ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ cfg->region, cfg->region_count,
+ ba_page_shift);
+ if (ret)
+ return ret;
+ cfg->root_ba = mtr->hem_list.root_ba;
+ cfg->ba_pg_shift = ba_page_shift;
+ } else {
+ cfg->ba_pg_shift = cfg->buf_pg_shift;
+ }
+
+ return 0;
+}
+
+static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+}
+
/**
* hns_roce_mtr_create - Create hns memory translate region.
*
+ * @hr_dev: RoCE device struct pointer
* @mtr: memory translate region
* @buf_attr: buffer attribute for creating mtr
* @ba_page_shift: page shift for multi-hop base address table
@@ -1001,95 +983,51 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned int ba_page_shift, struct ib_udata *udata,
unsigned long user_addr)
{
- struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int buf_page_shift = 0;
- dma_addr_t *pages = NULL;
- int all_pg_cnt;
- int get_pg_cnt;
- int ret = 0;
-
- /* if disable mtt, all pages must in a continuous address range */
- cfg->is_direct = !mtr_has_mtt(buf_attr);
-
- /* if buffer only need mtt, just init the hem cfg */
- if (buf_attr->mtt_only) {
- cfg->buf_pg_shift = buf_attr->page_shift;
- cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
- buf_attr->page_shift;
- mtr->umem = NULL;
- mtr->kmem = NULL;
- } else {
- ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
- udata, user_addr);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc mtr bufs, ret = %d.\n", ret);
- return ret;
- }
- }
+ int buf_page_cnt;
+ int ret;
- all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
- if (all_pg_cnt < 1) {
- ret = -ENOBUFS;
- ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
- goto err_alloc_bufs;
+ buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
+ &buf_page_shift,
+ udata ? user_addr & ~PAGE_MASK : 0);
+ if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
+ ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %d.\n",
+ buf_page_cnt, buf_page_shift);
+ return -EINVAL;
}
- hns_roce_hem_list_init(&mtr->hem_list);
- if (!cfg->is_direct) {
- ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
- cfg->region, cfg->region_count,
- ba_page_shift);
- if (ret) {
- ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
- ret);
- goto err_alloc_bufs;
- }
- cfg->root_ba = mtr->hem_list.root_ba;
- cfg->ba_pg_shift = ba_page_shift;
- } else {
- cfg->ba_pg_shift = cfg->buf_pg_shift;
+ ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
+ return ret;
}
- /* no buffer to map */
- if (buf_attr->mtt_only)
+ /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
+ * to finish the MTT configuration.
+ */
+ if (buf_attr->mtt_only) {
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
return 0;
-
- /* alloc a tmp array to store buffer's dma address */
- pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
- all_pg_cnt);
- goto err_alloc_hem_list;
- }
-
- get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
- buf_page_shift);
- if (get_pg_cnt != all_pg_cnt) {
- ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
- get_pg_cnt, all_pg_cnt);
- ret = -ENOBUFS;
- goto err_alloc_page_list;
}
- /* write buffer's dma address to BA table */
- ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
+ ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
if (ret) {
- ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
- goto err_alloc_page_list;
+ ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
+ goto err_alloc_mtt;
}
- /* drop tmp array */
- kvfree(pages);
- return 0;
-err_alloc_page_list:
- kvfree(pages);
-err_alloc_hem_list:
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
-err_alloc_bufs:
+ /* Write buffer's dma address to MTT */
+ ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
+ if (ret)
+ ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
+ else
+ return 0;
+
mtr_free_bufs(hr_dev, mtr);
+err_alloc_mtt:
+ mtr_free_mtt(hr_dev, mtr);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d8e2fe5558d2..004aca9086ab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1;
} else {
- spin_lock(&qp_table->bank_lock);
+ mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
@@ -408,14 +408,37 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
- spin_lock(&hr_dev->qp_table.bank_lock);
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
- spin_unlock(&hr_dev->qp_table.bank_lock);
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
+}
+
+static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_rq_sg;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_qp->rq.rsv_sge = 1;
+
+ return max_sge;
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp, int has_rq)
+ struct hns_roce_qp *hr_qp, int has_rq, bool user)
{
+ u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
u32 cnt;
/* If srq exist, set zero for relative number of rq */
@@ -431,8 +454,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
/* Check the validity of QP support capacity */
if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
- cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
- ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
+ cap->max_recv_sge > max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "RQ config error, depth = %u, sge = %u\n",
cap->max_recv_wr, cap->max_recv_sge);
return -EINVAL;
}
@@ -444,7 +468,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
return -EINVAL;
}
- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ hr_qp->rq.rsv_sge);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -459,7 +484,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs;
+ cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
return 0;
}
@@ -599,7 +624,6 @@ static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
return -EINVAL;
buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
- buf_attr->fixed_page = true;
buf_attr->region_count = idx;
return 0;
@@ -919,7 +943,7 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
- hns_roce_qp_has_rq(init_attr));
+ hns_roce_qp_has_rq(init_attr), !!udata);
if (ret) {
ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
ret);
@@ -1371,6 +1395,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index c4ae57e4173a..d5a6de0e7095 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 Hisilicon Limited.
*/
+#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
@@ -76,40 +77,16 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
- u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
- u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
- dma_addr_t dma_handle_wqe = 0;
- dma_addr_t dma_handle_idx = 0;
int ret;
- /* Get the physical address of srq buf */
- ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
- ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
- if (ret < 1) {
- ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
- ret);
- return -ENOBUFS;
- }
-
- /* Get physical address of idx que buf */
- ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
- ARRAY_SIZE(mtts_idx), &dma_handle_idx);
- if (ret < 1) {
- ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
- ret);
- return -ENOBUFS;
- }
-
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ number, ret = %d.\n", ret);
+ ibdev_err(ibdev, "failed to alloc SRQ number.\n");
return -ENOMEM;
}
@@ -127,34 +104,36 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR_OR_NULL(mailbox)) {
- ret = -ENOMEM;
ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
+ ret = -ENOMEM;
goto err_xa;
}
- hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
- mtts_wqe, mtts_idx, dma_handle_wqe,
- dma_handle_idx);
+ ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
+ if (ret) {
+ ibdev_err(ibdev, "failed to write SRQC.\n");
+ goto err_mbox;
+ }
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
- goto err_xa;
+ goto err_mbox;
}
- atomic_set(&srq->refcount, 1);
- init_completion(&srq->free);
- return ret;
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+err_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_xa:
xa_erase(&srq_table->xa, srq->srqn);
-
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
-
err_out:
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
+
return ret;
}
@@ -178,46 +157,13 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
-static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
- struct ib_udata *udata, unsigned long addr)
-{
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_buf_attr buf_attr = {};
- int err;
-
- srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
- HNS_ROCE_SGE_SIZE *
- srq->max_gs)));
-
- buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
- buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
- srq->wqe_shift);
- buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
- buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
-
- err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
- hr_dev->caps.srqwqe_ba_pg_sz +
- HNS_HW_PAGE_SHIFT, udata, addr);
- if (err)
- ibdev_err(ibdev,
- "failed to alloc SRQ buf mtr, ret = %d.\n", err);
-
- return err;
-}
-
-static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
-{
- hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
-}
-
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata, unsigned long addr)
{
struct hns_roce_idx_que *idx_que = &srq->idx_que;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
- int err;
+ int ret;
srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
@@ -226,31 +172,33 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
srq->idx_que.entry_shift);
buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
- err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr);
- if (err) {
+ if (ret) {
ibdev_err(ibdev,
- "failed to alloc SRQ idx mtr, ret = %d.\n", err);
- return err;
+ "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
+ return ret;
}
if (!udata) {
idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) {
ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_idx_mtr;
}
}
+ idx_que->head = 0;
+ idx_que->tail = 0;
+
return 0;
err_idx_mtr:
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
- return err;
+ return ret;
}
static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
@@ -262,10 +210,42 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
}
+static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+
+ ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
+
static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- srq->head = 0;
- srq->tail = srq->wqe_cnt - 1;
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
return -ENOMEM;
@@ -279,96 +259,171 @@ static void free_srq_wrid(struct hns_roce_srq *srq)
srq->wrid = NULL;
}
-int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_srq_sges;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_srq->rsv_sge = 1;
+
+ return max_sge;
+}
+
+static int set_srq_basic_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct ib_srq_attr *attr = &init_attr->attr;
+ u32 max_sge;
+
+ max_sge = proc_srq_sge(hr_dev, srq, !!udata);
+ if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
+ attr->max_sge > max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid SRQ attr, depth = %u, sge = %u.\n",
+ attr->max_wr, attr->max_sge);
+ return -EINVAL;
+ }
+
+ attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
+ srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
+ srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
+
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
+ attr->srq_limit = 0;
+
+ return 0;
+}
+
+static void set_srq_ext_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr)
+{
+ srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
+}
+
+static int set_srq_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
- struct hns_roce_ib_create_srq_resp resp = {};
- struct hns_roce_srq *srq = to_hr_srq(ib_srq);
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_ib_create_srq ucmd = {};
int ret;
- u32 cqn;
- if (init_attr->srq_type != IB_SRQT_BASIC &&
- init_attr->srq_type != IB_SRQT_XRC)
- return -EOPNOTSUPP;
+ ret = set_srq_basic_param(srq, init_attr, udata);
+ if (ret)
+ return ret;
- /* Check the actual SRQ wqe and SRQ sge num */
- if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
- return -EINVAL;
+ set_srq_ext_param(srq, init_attr);
- mutex_init(&srq->mutex);
- spin_lock_init(&srq->lock);
+ return 0;
+}
- srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge;
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ib_create_srq ucmd = {};
+ int ret;
if (udata) {
ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd)));
if (ret) {
- ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to copy SRQ udata, ret = %d.\n",
ret);
return ret;
}
}
- ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ buffer, ret = %d.\n", ret);
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret)
return ret;
- }
- ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
- goto err_buf_alloc;
- }
+ ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret)
+ goto err_idx;
if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
- ret);
- goto err_idx_alloc;
- }
+ if (ret)
+ goto err_wqe_buf;
}
- cqn = ib_srq_has_cq(init_attr->srq_type) ?
- to_hr_cq(init_attr->ext.cq)->cqn : 0;
- srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ return 0;
- ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ context, ret = %d.\n", ret);
- goto err_wrid_alloc;
- }
+err_wqe_buf:
+ free_srq_wqe_buf(hr_dev, srq);
+err_idx:
+ free_srq_idx(hr_dev, srq);
- srq->event = hns_roce_ib_srq_event;
- resp.srqn = srq->srqn;
+ return ret;
+}
+
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ free_srq_wrid(srq);
+ free_srq_wqe_buf(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+}
+
+int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
+ struct hns_roce_ib_create_srq_resp resp = {};
+ struct hns_roce_srq *srq = to_hr_srq(ib_srq);
+ int ret;
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+
+ ret = set_srq_param(srq, init_attr, udata);
+ if (ret)
+ return ret;
+
+ ret = alloc_srq_buf(hr_dev, srq, udata);
+ if (ret)
+ return ret;
+
+ ret = alloc_srqc(hr_dev, srq);
+ if (ret)
+ goto err_srq_buf;
if (udata) {
- ret = ib_copy_to_udata(udata, &resp,
- min(udata->outlen, sizeof(resp)));
- if (ret)
- goto err_srqc_alloc;
+ resp.srqn = srq->srqn;
+ if (ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)))) {
+ ret = -EFAULT;
+ goto err_srqc;
+ }
}
+ srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ srq->event = hns_roce_ib_srq_event;
+ atomic_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
return 0;
-err_srqc_alloc:
+err_srqc:
free_srqc(hr_dev, srq);
-err_wrid_alloc:
- free_srq_wrid(srq);
-err_idx_alloc:
- free_srq_idx(hr_dev, srq);
-err_buf_alloc:
+err_srq_buf:
free_srq_buf(hr_dev, srq);
+
return ret;
}
@@ -378,8 +433,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
free_srqc(hr_dev, srq);
- free_srq_idx(hr_dev, srq);
- free_srq_wrid(srq);
free_srq_buf(hr_dev, srq);
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 9acc0ecc9a43..ac65c8237b2e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -70,7 +70,7 @@ static void i40iw_disconnect_worker(struct work_struct *work);
/**
* i40iw_free_sqbuf - put back puda buffer if refcount = 0
* @vsi: pointer to vsi structure
- * @buf: puda buffer to free
+ * @bufp: puda buffer to free
*/
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
{
@@ -729,6 +729,7 @@ static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
/**
* i40iw_build_mpa_v1 - build a MPA V1 frame
* @cm_node: connection's node
+ * @start_addr: MPA frame start address
* @mpa_key: to do read0 or write0
*/
static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
@@ -1040,7 +1041,7 @@ negotiate_done:
/**
* i40iw_schedule_cm_timer
- * @@cm_node: connection's node
+ * @cm_node: connection's node
* @sqbuf: buffer to send
* @type: if it is send or close
* @send_retrans: if rexmits to be done
@@ -1205,7 +1206,7 @@ static void i40iw_build_timer_list(struct list_head *timer_list,
/**
* i40iw_cm_timer_tick - system's timer expired callback
- * @pass: Pointing to cm_core
+ * @t: Timer instance to fetch the cm_core pointer from
*/
static void i40iw_cm_timer_tick(struct timer_list *t)
{
@@ -1463,6 +1464,7 @@ struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
* @cm_core: cm's core
* @dst_port: listener tcp port num
* @dst_addr: listener ip addr
+ * @vlan_id: vlan id for the given address
* @listener_state: state to match with listen node's
*/
static struct i40iw_cm_listener *i40iw_find_listener(
@@ -1521,7 +1523,7 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
/**
* i40iw_find_port - find port that matches reference port
* @hte: ptr to accelerated or non-accelerated list
- * @accelerated_list: flag for accelerated vs non-accelerated list
+ * @port: port number to locate
*/
static bool i40iw_find_port(struct list_head *hte, u16 port)
{
@@ -1834,6 +1836,7 @@ exit:
/**
* i40iw_dec_refcnt_listen - delete listener and associated cm nodes
* @cm_core: cm's core
+ * @listener: passive connection's listener
* @free_hanging_nodes: to free associated cm_nodes
* @apbvt_del: flag to delete the apbvt
*/
@@ -2029,7 +2032,7 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
return rc;
}
-/**
+/*
* i40iw_get_dst_ipv6
*/
static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
@@ -2051,7 +2054,8 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
/**
* i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
* @iwdev: iwarp device structure
- * @dst_ip: remote ip address
+ * @src: source ip address
+ * @dest: remote ip address
* @arpindex: if there is an arp entry
*/
static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
@@ -3004,7 +3008,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
/**
* i40iw_cm_reject - reject and teardown a connection
* @cm_node: connection's node
- * @pdate: ptr to private data for reject
+ * @pdata: ptr to private data for reject
* @plen: size of private data
*/
static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
@@ -4302,7 +4306,7 @@ set_qhash:
* i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
* @ipaddr: Pointer to IPv4 or IPv6 address
- * @ipv4: flag indicating IPv4 when true
+ * @nfo: cm info node
* @disconnect_all: flag indicating disconnect all QPs
* teardown QPs where source or destination addr matches ip addr
*/
@@ -4358,6 +4362,7 @@ void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
/**
* i40iw_ifdown_notify - process an ifdown on an interface
* @iwdev: device pointer
+ * @netdev: network interface device structure
* @ipaddr: Pointer to IPv4 or IPv6 address
* @ipv4: flag indicating IPv4 when true
* @ifup: flag indicating interface up when true
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index c943d491b72b..eaea5d545eb8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -181,7 +181,7 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
* i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
* @buf: ptr to fpm query buffer
* @buf_idx: index into buf
- * @info: ptr to i40iw_hmc_obj_info struct
+ * @obj_info: ptr to i40iw_hmc_obj_info struct
* @rsrc_idx: resource index into info
*
* Decode a 64 bit value from fpm query buffer into max count and size
@@ -205,7 +205,7 @@ static u64 i40iw_sc_decode_fpm_query(u64 *buf,
/**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer
- * @info: ptr to i40iw_hmc_obj_info struct
+ * @hmc_info: ptr to i40iw_hmc_obj_info struct
* @hmc_fpm_misc: ptr to fpm data
*
* parses fpm query buffer and copy max_cnt and
@@ -775,7 +775,7 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
* i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
* @cqp: struct for cqp hw
* @op_code: cqp opcode for completion
- * @info: completion q entry to return
+ * @compl_info: completion q entry to return
*/
static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
struct i40iw_sc_cqp *cqp,
@@ -933,7 +933,7 @@ static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cq
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @hmc_fn_id: hmc function id
- * @commit_fpm_mem; Memory for fpm values
+ * @commit_fpm_mem: Memory for fpm values
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
@@ -1026,7 +1026,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
/**
* i40iw_get_rdma_features - get RDMA features
- * @dev - sc device struct
+ * @dev: sc device struct
*/
enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
{
@@ -1456,7 +1456,7 @@ static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @entry_idx: index of mac entry
- * @ ignore_ref_count: to force mac adde delete
+ * @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
@@ -2304,7 +2304,7 @@ static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
* i40iw_sc_cq_modify - modify a Completion Queue
* @cq: cq struct
* @info: modification info struct
- * @scratch:
+ * @scratch: u64 saved to be used during cqp completion
* @post_sq: flag to post to sq
*/
static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
@@ -3673,7 +3673,7 @@ static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev
/**
* cqp_sds_wqe_fill - fill cqp wqe doe sd
* @cqp: struct for cqp hw
- * @info; sd info for wqe
+ * @info: sd info for wqe
* @scratch: u64 saved to be used during cqp completion
*/
static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
@@ -4884,7 +4884,7 @@ void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
/**
* i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
- * @stat: pestat struct
+ * @stats: pestat struct
* @index: index in HW stats table which contains offset reg-addr
* @value: hw stats value
*/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
index 5484cbf55f0f..8bd72af9e099 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -46,7 +46,7 @@
* i40iw_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
- * @index: starting index for the object
+ * @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
@@ -78,7 +78,7 @@ static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
- * @pd_index: pointer to return page descriptor index
+ * @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 56fdc161f6f8..d167ac10c751 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -165,7 +165,7 @@ static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq
/**
* i40iw_iwarp_ce_handler - handle iwarp completions
* @iwdev: iwarp device
- * @iwcp: iwarp cq receiving event
+ * @iwcq: iwarp cq receiving event
*/
static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *iwcq)
@@ -519,6 +519,7 @@ enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
* @iwdev: iwarp device
* @mac_addr: mac address ptr
* @ip_addr: ip addr for arp cache
+ * @ipv4: flag indicating IPv4 when true
* @action: add, delete or modify
*/
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
@@ -581,7 +582,6 @@ static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u
* @mtype: type of qhash
* @cmnode: cmnode associated with connection
* @wait: wait for completion
- * @user_pri:user pri of the connection
*/
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 584932d3cc44..ab4cb11950dc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -186,7 +186,7 @@ static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
/**
* i40iw_dpc - tasklet for aeq and ceq 0
- * @data: iwarp device
+ * @t: Timer context to fetch pointer to iwarp device
*/
static void i40iw_dpc(struct tasklet_struct *t)
{
@@ -200,7 +200,7 @@ static void i40iw_dpc(struct tasklet_struct *t)
/**
* i40iw_ceq_dpc - dpc handler for CEQ
- * @data: data points to CEQ
+ * @t: Timer context to fetch pointer to CEQ data
*/
static void i40iw_ceq_dpc(struct tasklet_struct *t)
{
@@ -227,7 +227,7 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
/**
* i40iw_destroy_cqp - destroy control qp
* @iwdev: iwarp device
- * @create_done: 1 if cqp create poll was success
+ * @free_hwcqp: 1 if CQP should be destroyed
*
* Issue destroy cqp request and
* free the resources associated with the cqp
@@ -253,7 +253,7 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
/**
* i40iw_disable_irqs - disable device interrupts
* @dev: hardware control device structure
- * @msic_vec: msix vector to disable irq
+ * @msix_vec: msix vector to disable irq
* @dev_id: parameter to pass to free_irq (used during irq setup)
*
* The function is called when destroying aeq/ceq
@@ -394,8 +394,9 @@ static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
/**
* i40iw_close_hmc_objects_type - delete hmc objects of a given type
- * @iwdev: iwarp device
+ * @dev: iwarp device
* @obj_type: the hmc object type to be deleted
+ * @hmc_info: pointer to the HMC configuration information
* @is_pf: true if the function is PF otherwise false
* @reset: true if called before reset
*/
@@ -437,6 +438,7 @@ static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
/**
* i40iw_ceq_handler - interrupt handler for ceq
+ * @irq: interrupt request number
* @data: ceq pointer
*/
static irqreturn_t i40iw_ceq_handler(int irq, void *data)
@@ -1777,6 +1779,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
/**
* i40iw_close - client interface operation close for iwarp/uda device
* @ldev: lan device information
+ * @reset: true if called before reset
* @client: client to close
*
* Called by the lan driver during the processing of client unregister
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 5f97643e22e5..53e5cd1a2bd6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -54,6 +54,7 @@ static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chun
/**
* i40iw_destroy_pble_pool - destroy pool during module unload
+ * @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
@@ -112,8 +113,8 @@ enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
- * @ pble_rsrc: structure containing fpm address
- * @ idx: where to return indexes
+ * @pble_rsrc: structure containing fpm address
+ * @idx: where to return indexes
*/
static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 924be4b03c9a..d1c8cc0a6236 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -511,7 +511,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_qp_wqe - setup wqe for qp create
- * @rsrc: resource for qp
+ * @dev: iwarp device
+ * @qp: resource for qp
*/
static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
{
@@ -623,7 +624,8 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_cq_wqe - setup wqe for cq create
- * @rsrc: resource for cq
+ * @dev: iwarp device
+ * @cq: cq to setup
*/
static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
{
@@ -782,7 +784,7 @@ static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_dele_resources - delete all resources during close
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @type: type of resource to dele
* @reset: true if reset chip
*/
@@ -876,7 +878,7 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
/**
* i40iw_puda_create_rsrc - create resouce (ilq or ieq)
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @info: resource information
*/
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
@@ -1121,6 +1123,7 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
/**
* i40iw_ieq_create_pbufl - create buffer list for single fpdu
+ * @pfpdu: partial management per user qp
* @rxlist: resource list for receive ieq buffes
* @pbufl: temp. list for buffers for fpddu
* @buf: first receive buffer
@@ -1434,7 +1437,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
/**
* i40iw_ieq_receive - received exception buffer
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @buf: exception buffer received
*/
static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index c3633c9944db..f521be16bf31 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -119,6 +119,8 @@ void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
* @qp: hw qp ptr
* @wqe_idx: return wqe index
* @wqe_size: size of sq wqe
+ * @total_size: work request length
+ * @wr_id: work request id
*/
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
u32 *wqe_idx,
@@ -717,7 +719,6 @@ static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
* i40iw_cq_poll_completion - get cq completion info
* @cq: hw cq
* @info: cq poll information returned
- * @post_cq: update cq tail
*/
static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
struct i40iw_cq_poll_info *info)
@@ -1051,7 +1052,7 @@ void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
/**
* i40iw_clean_cq - clean cq entries
- * @ queue completion context
+ * @queue: completion context
* @cq: cq to clean
*/
void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 644f8c641aa0..76f052b12c14 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -55,6 +55,7 @@
* i40iw_arp_table - manage arp table
* @iwdev: iwarp device
* @ip_addr: ip address for device
+ * @ipv4: flag indicating IPv4 when true
* @mac_addr: mac address ptr
* @action: modify, delete or add
*/
@@ -138,7 +139,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
/**
* i40iw_inetaddr_event - system notifier for ipv4 addr events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: if address
*/
@@ -214,7 +215,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
/**
* i40iw_inet6addr_event - system notifier for ipv6 addr events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: if address
*/
@@ -265,7 +266,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
/**
* i40iw_net_event - system notifier for netevents
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: neighbor
*/
@@ -310,7 +311,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
/**
* i40iw_netdevice_event - system notifier for netdev events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: netdev
*/
@@ -652,6 +653,7 @@ struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
* i40iw_debug_buf - print debug msg and buffer is mask set
* @dev: hardware control device structure
* @mask: mask to compare if to print debug buffer
+ * @desc: identifying string
* @buf: points buffer addr
* @size: saize of buffer to print
*/
@@ -784,7 +786,7 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
/**
* i40iw_cqp_sds_cmd - create cqp command for sd
* @dev: hardware control device structure
- * @sd_info: information for sd cqp
+ * @sdinfo: information for sd cqp
*
*/
enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
@@ -889,7 +891,7 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
/**
* i40iw_terminate_imeout - timeout happened
- * @context: points to iwarp qp
+ * @t: points to iwarp qp
*/
static void i40iw_terminate_timeout(struct timer_list *t)
{
@@ -943,7 +945,7 @@ static void i40iw_cqp_generic_worker(struct work_struct *work)
/**
* i40iw_cqp_spawn_worker - spawn worket thread
- * @iwdev: device struct pointer
+ * @dev: device struct pointer
* @work_info: work request info
* @iw_vf_idx: virtual function index
*/
@@ -1048,7 +1050,7 @@ enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
/**
* i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
- * @iwdev: function device struct
+ * @dev: function device struct
* @values_mem: buffer for fpm
* @hmc_fn_id: function id for fpm
*/
@@ -1114,7 +1116,7 @@ enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
/**
* i40iw_vf_wait_vchnl_resp - wait for channel msg
- * @iwdev: function's device struct
+ * @dev: function's device struct
*/
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
{
@@ -1461,7 +1463,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
/**
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @vsi: pointer to the vsi structure
+ * @t: Timer context containing pointer to the vsi structure
*/
static void i40iw_hw_stats_timeout(struct timer_list *t)
{
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 65aedfe57e77..f18d146a6079 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -265,9 +265,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
/**
* i40iw_free_qp_resources - free up memory resources for qp
- * @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
- * @qp_num: qp number assigned
*/
void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
{
@@ -302,6 +300,7 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
/**
* i40iw_destroy_qp - destroy qp
* @ibqp: qp's ib pointer also to get to device's qp address
+ * @udata: user data
*/
static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
@@ -338,8 +337,8 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
/**
* i40iw_setup_virt_qp - setup for allocation of virtual qp
- * @dev: iwarp device
- * @qp: qp ptr
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr
* @init_info: initialize info to return
*/
static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
@@ -1241,7 +1240,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
* @arr: lvl1 pbl array
* @npages: page count
- * pg_size: page size
+ * @pg_size: page size
*
*/
static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
@@ -1258,7 +1257,7 @@ static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
/**
* i40iw_check_mr_contiguous - check if MR is physically contiguous
* @palloc: pbl allocation struct
- * pg_size: page size
+ * @pg_size: page size
*/
static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
{
@@ -1533,6 +1532,7 @@ static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
* @ibmr: ib mem to access iwarp mr pointer
* @sg: scatter gather list for fmr
* @sg_nents: number of sg pages
+ * @sg_offset: scatter gather offset
*/
static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
@@ -1881,6 +1881,7 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
/**
* i40iw_dereg_mr - deregister mr
* @ib_mr: mr ptr for dereg
+ * @udata: user data
*/
static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
@@ -1945,7 +1946,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
return 0;
}
-/**
+/*
* hw_rev_show
*/
static ssize_t hw_rev_show(struct device *dev,
@@ -1959,7 +1960,7 @@ static ssize_t hw_rev_show(struct device *dev,
}
static DEVICE_ATTR_RO(hw_rev);
-/**
+/*
* hca_type_show
*/
static ssize_t hca_type_show(struct device *dev,
@@ -1969,7 +1970,7 @@ static ssize_t hca_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(hca_type);
-/**
+/*
* board_id_show
*/
static ssize_t board_id_show(struct device *dev,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 48fd327f876b..aca9061688ae 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -119,7 +119,7 @@ static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev
return ret_code;
}
-/**
+/*
* vchnl_vf_send_add_hmc_objs_req - Add HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
@@ -158,9 +158,9 @@ static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev
* vchnl_vf_send_del_hmc_objs_req - del HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
- * @ rsrc_type - resource type to delete
- * @ start_index - starting index for resource
- * @ rsrc_count - number of resource type to delete
+ * @rsrc_type: resource type to delete
+ * @start_index: starting index for resource
+ * @rsrc_count: number of resource type to delete
*/
static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
@@ -222,6 +222,7 @@ static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
+ * @hmc_fcn: HMC function index pointer
*/
static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
@@ -276,6 +277,7 @@ static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
+ * @op_ret_code: I40IW_ERR_* status code
*/
static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
@@ -297,8 +299,9 @@ static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
/**
* pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
- * @cqp_req_param: CQP Request param value
- * @not_used: unused CQP callback parameter
+ * @dev: IWARP device pointer
+ * @callback_param: unused CQP callback parameter
+ * @cqe_info: CQE information pointer
*/
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
struct i40iw_ccq_cqe_info *cqe_info)
@@ -331,7 +334,7 @@ static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback
/**
* pf_add_hmc_obj - Callback for Add HMC Object
- * @vf_dev: pointer to the VF Device
+ * @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
{
@@ -404,7 +407,7 @@ del_out:
/**
* i40iw_vf_init_pestat - Initialize stats for VF
- * @devL pointer to the VF Device
+ * @dev: pointer to the VF Device
* @stats: Statistics structure pointer
* @index: Stats index
*/
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e3cd402c079a..f26a0d920842 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1699,7 +1699,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
- if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+ if (!rdma_is_port_valid(qp->device, flow_attr->port))
return ERR_PTR(-EINVAL);
if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 1b5891130aab..24ee79aa2122 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -798,7 +798,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
{
- int i;
+ unsigned int i;
int ret = 0;
if (!mlx4_is_master(dev->dev))
@@ -817,7 +817,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
goto err_ports;
}
- for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
+ rdma_for_each_port(&dev->ib_dev, i) {
ret = add_port_entries(dev, i);
if (ret)
goto err_add_entries;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 819c142857d6..ebc2a4355fa5 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -94,13 +94,13 @@ struct devx_umem {
struct mlx5_core_dev *mdev;
struct ib_umem *umem;
u32 dinlen;
- u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
+ u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
};
struct devx_umem_reg_cmd {
void *in;
u32 inlen;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)];
};
static struct mlx5_ib_ucontext *
@@ -111,8 +111,8 @@ devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
{
- u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
void *uctx;
int err;
u16 uid;
@@ -138,14 +138,14 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
if (err)
return err;
- uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ uid = MLX5_GET(create_uctx_out, out, uid);
return uid;
}
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
@@ -288,6 +288,80 @@ static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
return ((u64)opcode << 32) | obj_id;
}
+static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ case MLX5_CMD_OP_CREATE_UMEM:
+ return MLX5_GET(create_umem_out, out, umem_id);
+ case MLX5_CMD_OP_CREATE_MKEY:
+ return MLX5_GET(create_mkey_out, out, mkey_index);
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_GET(create_cq_out, out, cqn);
+ case MLX5_CMD_OP_ALLOC_PD:
+ return MLX5_GET(alloc_pd_out, out, pd);
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ return MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_GET(create_rmp_out, out, rmpn);
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_GET(create_sq_out, out, sqn);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_GET(create_rq_out, out, rqn);
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_GET(create_rqt_out, out, rqtn);
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_GET(create_tir_out, out, tirn);
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_GET(create_tis_out, out, tisn);
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ return MLX5_GET(create_flow_table_out, out, table_id);
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ return MLX5_GET(create_flow_group_out, out, group_id);
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ return MLX5_GET(set_fte_in, in, flow_index);
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ return MLX5_GET(alloc_packet_reformat_context_out, out,
+ packet_reformat_id);
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ return MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ return MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ return MLX5_GET(set_l2_table_entry_in, in, table_index);
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_GET(create_qp_out, out, qpn);
+ case MLX5_CMD_OP_CREATE_SRQ:
+ return MLX5_GET(create_srq_out, out, srqn);
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_GET(create_dct_out, out, dctn);
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_GET(create_xrq_out, out, xrqn);
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ return MLX5_GET(attach_to_mcg_in, in, qpn);
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return MLX5_GET(alloc_xrcd_out, out, xrcd);
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_GET(create_psv_out, out, psv0_index);
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ return 0;
+ }
+}
+
static u64 devx_get_obj_id(const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
@@ -399,8 +473,8 @@ static u64 devx_get_obj_id(const void *in)
break;
case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
- MLX5_GET(general_obj_in_cmd_hdr, in,
- obj_id));
+ MLX5_GET(query_modify_header_context_in,
+ in, modify_header_id));
break;
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
@@ -1019,63 +1093,76 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
u32 *dinlen,
u32 *obj_id)
{
- u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ *obj_id = devx_get_created_obj_id(in, out, opcode);
*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
-
- MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
- switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
+ switch (opcode) {
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
+ MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
break;
case MLX5_CMD_OP_CREATE_UMEM:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_umem_in, din, opcode,
MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_MKEY:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
break;
case MLX5_CMD_OP_CREATE_CQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_PD:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_transport_domain_in, din, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
+ *obj_id);
break;
case MLX5_CMD_OP_CREATE_RMP:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_RQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_RQT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_TIR:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_TIS:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_q_counter_in, din, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
- *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
MLX5_SET(destroy_flow_table_in, din, other_vport,
MLX5_GET(create_flow_table_in, in, other_vport));
MLX5_SET(destroy_flow_table_in, din, vport_number,
@@ -1083,12 +1170,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(destroy_flow_table_in, din, table_type,
MLX5_GET(create_flow_table_in, in, table_type));
MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_flow_table_in, din, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
break;
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
- *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
MLX5_SET(destroy_flow_group_in, din, other_vport,
MLX5_GET(create_flow_group_in, in, other_vport));
MLX5_SET(destroy_flow_group_in, din, vport_number,
@@ -1098,12 +1184,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(destroy_flow_group_in, din, table_id,
MLX5_GET(create_flow_group_in, in, table_id));
MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_flow_group_in, din, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
break;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
- *obj_id = MLX5_GET(set_fte_in, in, flow_index);
MLX5_SET(delete_fte_in, din, other_vport,
MLX5_GET(set_fte_in, in, other_vport));
MLX5_SET(delete_fte_in, din, vport_number,
@@ -1113,63 +1198,70 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(delete_fte_in, din, table_id,
MLX5_GET(set_fte_in, in, table_id));
MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_fte_in, din, opcode,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
break;
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_flow_counter_in, din, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
+ *obj_id);
break;
case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, din,
+ packet_reformat_id, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_modify_header_context_in, din, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, din,
+ modify_header_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
- *obj_id = MLX5_GET(create_scheduling_element_out, out,
- scheduling_element_id);
MLX5_SET(destroy_scheduling_element_in, din,
scheduling_hierarchy,
MLX5_GET(create_scheduling_element_in, in,
scheduling_hierarchy));
MLX5_SET(destroy_scheduling_element_in, din,
scheduling_element_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_scheduling_element_in, din, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
break;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
- *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
break;
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
- *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_l2_table_entry_in, din, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
break;
case MLX5_CMD_OP_CREATE_QP:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_XRC_SRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_xrc_srq_in, din, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_DCT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_XRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
break;
case MLX5_CMD_OP_ATTACH_TO_MCG:
*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
@@ -1178,16 +1270,19 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, opcode,
+ MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_XRCD:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
break;
case MLX5_CMD_OP_CREATE_PSV:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_psv_in, din, opcode,
MLX5_CMD_OP_DESTROY_PSV);
- MLX5_SET(destroy_psv_in, din, psvn,
- MLX5_GET(create_psv_out, out, psv0_index));
+ MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
break;
default:
/* The entry must match to one of the devx_is_obj_create_cmd */
@@ -1215,9 +1310,9 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ init_waitqueue_head(&mkey->wait);
- return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
+ return mlx5r_store_odp_mkey(dev, mkey);
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1290,16 +1385,15 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
int ret;
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
+ xa_erase(&obj->ib_dev->odp_mkeys,
+ mlx5_base_mkey(obj->devx_mr.mmkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->odp_mkeys,
- mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->odp_srcu);
- }
+ mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
@@ -1345,6 +1439,16 @@ out:
rcu_read_unlock();
}
+static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, apu) ||
+ !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ apu_thread_cq))
+ return false;
+
+ return true;
+}
+
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
struct uverbs_attr_bundle *attrs)
{
@@ -1398,7 +1502,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj->flags |= DEVX_OBJ_FLAGS_DCT;
err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
cmd_in_len, cmd_out, cmd_out_len);
- } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
+ } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
+ !is_apu_thread_cq(dev, cmd_in)) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9bb9bb058932..652c6ccf1881 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -48,7 +48,7 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
return true;
- return dev->mdev->port_caps[port_num - 1].has_smi;
+ return dev->port_caps[port_num - 1].has_smi;
}
static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
@@ -279,7 +279,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -299,7 +299,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
+ dev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
@@ -308,8 +308,8 @@ out:
return err;
}
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad)
+static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
{
struct ib_smp *in_mad = NULL;
int err = -ENOMEM;
@@ -549,7 +549,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
- props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->pkey_tbl_len = dev->pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
@@ -589,7 +589,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
- if (mdev->port_caps[port - 1].ext_port_cap &
+ if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3bae9ba0ead8..0d69a697d75f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
#include <linux/debugfs.h>
@@ -461,7 +462,6 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
bool put_mdev = true;
- u16 qkey_viol_cntr;
u32 eth_prot_oper;
u8 mdev_port_num;
bool ext;
@@ -499,20 +499,22 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width, ext);
- props->port_cap_flags |= IB_PORT_CM_SUP;
- props->ip_gids = true;
+ if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
+ u16 qkey_viol_cntr;
- props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
- roce_address_table_size);
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
+ roce_address_table_size);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
+ props->qkey_viol_cntr = qkey_viol_cntr;
+ }
props->max_mtu = IB_MTU_4096;
props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
props->pkey_tbl_len = 1;
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
- mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
- props->qkey_viol_cntr = qkey_viol_cntr;
-
/* If this is a stub query for an unaffiliated port stop here */
if (!put_mdev)
goto out;
@@ -815,9 +817,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (err)
return err;
- err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
- if (err)
- return err;
+ props->max_pkeys = dev->pkey_table_len;
err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
if (err)
@@ -1384,19 +1384,17 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
- int ret;
+ return mlx5_query_port_roce(ibdev, port, props);
+}
- /* Only link layer == ethernet is valid for representors
- * and we always use port 1
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ /* Default special Pkey for representor device port as per the
+ * IB specification 1.3 section 10.9.1.2.
*/
- ret = mlx5_query_port_roce(ibdev, port, props);
- if (ret || !props)
- return ret;
-
- /* We don't support GIDS */
- props->gid_tbl_len = 0;
-
- return ret;
+ *pkey = 0xffff;
+ return 0;
}
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
@@ -2935,8 +2933,8 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
- dev->mdev->port_caps[port - 1].has_smi = false;
+ for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
+ dev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
@@ -2948,10 +2946,10 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
port, err);
return err;
}
- dev->mdev->port_caps[port - 1].has_smi =
+ dev->port_caps[port - 1].has_smi =
vport_ctx.has_smi;
} else {
- dev->mdev->port_caps[port - 1].has_smi = true;
+ dev->port_caps[port - 1].has_smi = true;
}
}
}
@@ -2960,63 +2958,12 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
- int port;
+ unsigned int port;
- for (port = 1; port <= dev->num_ports; port++)
+ rdma_for_each_port (&dev->ib_dev, port)
mlx5_query_ext_port_caps(dev, port);
}
-static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
-{
- struct ib_device_attr *dprops = NULL;
- struct ib_port_attr *pprops = NULL;
- int err = -ENOMEM;
-
- pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
- if (!pprops)
- goto out;
-
- dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
- if (!dprops)
- goto out;
-
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
- if (err) {
- mlx5_ib_warn(dev, "query_device failed %d\n", err);
- goto out;
- }
-
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- goto out;
- }
-
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
- port, dprops->max_pkeys, pprops->gid_tbl_len);
-
-out:
- kfree(pprops);
- kfree(dprops);
-
- return err;
-}
-
-static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
-{
- /* For representors use port 1, is this is the only native
- * port
- */
- if (dev->is_rep)
- return __get_port_caps(dev, 1);
- return __get_port_caps(dev, port);
-}
-
static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
{
switch (umr_fence_cap) {
@@ -3311,8 +3258,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3324,8 +3270,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -3490,10 +3435,6 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
if (err)
goto unbind;
- err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
- if (err)
- goto unbind;
-
err = mlx5_add_netdev_notifier(ibdev, port_num);
if (err) {
mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
@@ -3571,11 +3512,9 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
break;
}
}
- if (!bound) {
- get_port_caps(dev, i + 1);
+ if (!bound)
mlx5_ib_dbg(dev, "no free port found for port %d\n",
i + 1);
- }
}
list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
@@ -3928,8 +3867,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
WARN_ON(!xa_empty(&dev->odp_mkeys));
- cleanup_srcu_struct(&dev->odp_srcu);
-
+ mutex_destroy(&dev->cap_mask_mutex);
WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -3940,6 +3878,12 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
int err;
int i;
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
rwlock_init(&dev->port[i].roce.netdev_lock);
@@ -3956,29 +3900,16 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = set_has_smi_cap(dev);
if (err)
- return err;
+ goto err_mp;
- if (!mlx5_core_mp_enabled(mdev)) {
- for (i = 1; i <= dev->num_ports; i++) {
- err = get_port_caps(dev, i);
- if (err)
- break;
- }
- } else {
- err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
- }
+ err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
if (err)
goto err_mp;
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
- dev->ib_dev.dev.parent = mdev->device;
- dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -3989,17 +3920,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
-
- err = init_srcu_struct(&dev->odp_srcu);
- if (err)
- goto err_mp;
-
return 0;
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
-
- return -ENOMEM;
+ return err;
}
static int mlx5_ib_enable_driver(struct ib_device *dev)
@@ -4069,6 +3994,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.query_srq = mlx5_ib_query_srq,
.query_ucontext = mlx5_ib_query_ucontext,
.reg_user_mr = mlx5_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
@@ -4209,6 +4135,7 @@ static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.get_port_immutable = mlx5_port_rep_immutable,
.query_port = mlx5_ib_rep_query_port,
+ .query_pkey = mlx5_ib_rep_query_pkey,
};
static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
@@ -4319,7 +4246,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b0fdc1b08e06..88cc26e008fc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
#ifndef MLX5_IB_H
@@ -683,11 +684,8 @@ struct mlx5_ib_mr {
u64 pi_iova;
/* For ODP and implicit */
- atomic_t num_deferred_work;
- wait_queue_head_t q_deferred_work;
struct xarray implicit_children;
union {
- struct rcu_head rcu;
struct list_head elm;
struct work_struct work;
} odp_destroy;
@@ -703,6 +701,12 @@ static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
mr->umem->is_odp;
}
+static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_dmabuf;
+}
+
struct mlx5_ib_mw {
struct ib_mw ibmw;
struct mlx5_core_mkey mmkey;
@@ -1029,6 +1033,11 @@ struct mlx5_var_table {
u64 num_var_hw_entries;
};
+struct mlx5_port_caps {
+ bool has_smi;
+ u8 ext_port_cap;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -1056,11 +1065,6 @@ struct mlx5_ib_dev {
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
- /*
- * Sleepable RCU that prevents destruction of MRs while they are still
- * being used by a page fault handler.
- */
- struct srcu_struct odp_srcu;
struct xarray odp_mkeys;
u32 null_mkey;
@@ -1089,6 +1093,8 @@ struct mlx5_ib_dev {
struct mlx5_var_table var_table;
struct xarray sig_mrs;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u16 pkey_table_len;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1243,6 +1249,10 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags,
@@ -1253,11 +1263,13 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
+int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1279,9 +1291,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad);
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid);
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
@@ -1345,6 +1355,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge);
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -1370,6 +1381,10 @@ static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{
return -EOPNOTSUPP;
}
+static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
@@ -1576,6 +1591,29 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
return true;
}
+static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_core_mkey *mmkey)
+{
+ refcount_set(&mmkey->usecount, 1);
+
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
+ mmkey, GFP_KERNEL));
+}
+
+/* deref an mkey that can participate in ODP flow */
+static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
+{
+ if (refcount_dec_and_test(&mmkey->usecount))
+ wake_up(&mmkey->wait);
+}
+
+/* deref an mkey that can participate in ODP flow and wait for relese */
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
+{
+ mlx5r_deref_odp_mkey(mmkey);
+ wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
+}
+
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 24f8d59a42ea..db05b0e0a8d7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,6 +37,8 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
@@ -155,6 +158,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
mr->mmkey.type = MLX5_MKEY_MR;
mr->mmkey.key |= mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, mr->out, mkey_index));
+ init_waitqueue_head(&mr->mmkey.wait);
WRITE_ONCE(dev->cache.last_add, jiffies);
@@ -935,6 +939,17 @@ static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr->access_flags = access_flags;
}
+static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
+ u64 iova)
+{
+ /*
+ * The alignment of iova has already been checked upon entering
+ * UVERBS_METHOD_REG_DMABUF_MR
+ */
+ umem->iova = iova;
+ return PAGE_SIZE;
+}
+
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
int access_flags)
@@ -944,7 +959,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct mlx5_ib_mr *mr;
unsigned int page_size;
- page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 0, iova);
+ if (umem->is_dmabuf)
+ page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
+ else
+ page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
+ 0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
ent = mr_cache_ent_from_order(
@@ -980,7 +999,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags);
return mr;
@@ -1201,8 +1219,10 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
/*
* Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
*/
-static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
@@ -1244,6 +1264,10 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
cur_mtt->ptag =
cpu_to_be64(rdma_block_iter_dma_address(&biter) |
MLX5_IB_MTT_PRESENT);
+
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+
cur_mtt++;
}
@@ -1528,10 +1552,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
}
odp->private = mr;
- init_waitqueue_head(&mr->q_deferred_work);
- atomic_set(&mr->num_deferred_work, 0);
- err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
if (err)
goto err_dereg_mr;
@@ -1567,6 +1588,81 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return create_real_mr(pd, umem, iova, access_flags);
}
+static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+ struct mlx5_ib_mr *mr = umem_dmabuf->private;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+}
+
+static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
+ .allow_peer2peer = 1,
+ .move_notify = mlx5_ib_dmabuf_invalidate_cb,
+};
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5_ib_can_load_pas_with_umr(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ if (IS_ERR(umem_dmabuf)) {
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
+ PTR_ERR(umem_dmabuf));
+ return ERR_CAST(umem_dmabuf);
+ }
+
+ mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
+ access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_CAST(mr);
+ }
+
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
+
+ atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
+ umem_dmabuf->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_dmabuf_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
+
+err_dereg_mr:
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+}
+
/**
* mlx5_mr_cache_invalidate - Fence all DMA on the MR
* @mr: The MR to fence
@@ -1740,8 +1836,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return ERR_PTR(err);
return NULL;
}
- /* DM or ODP MR's don't have a umem so we can't re-use it */
- if (!mr->umem || is_odp_mr(mr))
+ /* DM or ODP MR's don't have a normal umem so we can't re-use it */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;
/*
@@ -1760,10 +1856,10 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
/*
- * DM doesn't have a PAS list so we can't re-use it, odp does but the
- * logic around releasing the umem is different
+ * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
+ * but the logic around releasing the umem is different
*/
- if (!mr->umem || is_odp_mr(mr))
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;
if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
@@ -1876,6 +1972,8 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Stop all DMA */
if (is_odp_mr(mr))
mlx5_ib_fence_odp_mr(mr);
+ else if (is_dmabuf_mr(mr))
+ mlx5_ib_fence_dmabuf_mr(mr);
else
clean_mr(dev, mr);
@@ -2227,9 +2325,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = xa_err(xa_store(&dev->odp_mkeys,
- mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
- GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
if (err)
goto free_mkey;
}
@@ -2249,14 +2345,13 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
/*
- * pagefault_single_data_segment() may be accessing mmw under
- * SRCU if the user bound an ODP MR to this MW.
+ * pagefault_single_data_segment() may be accessing mmw
+ * if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->odp_srcu);
- }
+ mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index aa2413b50adc..374698186662 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -33,6 +33,8 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -113,7 +115,6 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
* xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
- * srcu_read_lock()
* xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
@@ -124,12 +125,9 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
* before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the xa_store() visible to a racing thread. While SRCU is not
- * technically required, using it gives consistent use of the SRCU
- * locking around the xarray.
+ * the xa_store() visible to a racing thread.
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- lockdep_assert_held(&mr_to_mdev(imr)->odp_srcu);
for (; pklm != end; pklm++, idx++) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
@@ -205,8 +203,8 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
}
/*
- * This must be called after the mr has been removed from implicit_children
- * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * This must be called after the mr has been removed from implicit_children.
+ * NOTE: The MR does not necessarily have to be
* empty here, parallel page faults could have raced with the free process and
* added pages to it.
*/
@@ -216,19 +214,15 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- int srcu_key;
- /* implicit_child_mr's are not allowed to have deferred work */
- WARN_ON(atomic_read(&mr->num_deferred_work));
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
if (need_imr_xlt) {
- srcu_key = srcu_read_lock(&mr_to_mdev(mr)->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr_to_mdev(mr)->odp_srcu, srcu_key);
}
dma_fence_odp_mr(mr);
@@ -236,26 +230,16 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
mr->parent = NULL;
mlx5_mr_cache_free(mr_to_mdev(mr), mr);
ib_umem_odp_release(odp);
- if (atomic_dec_and_test(&imr->num_deferred_work))
- wake_up(&imr->q_deferred_work);
}
static void free_implicit_child_mr_work(struct work_struct *work)
{
struct mlx5_ib_mr *mr =
container_of(work, struct mlx5_ib_mr, odp_destroy.work);
+ struct mlx5_ib_mr *imr = mr->parent;
free_implicit_child_mr(mr, true);
-}
-
-static void free_implicit_child_mr_rcu(struct rcu_head *head)
-{
- struct mlx5_ib_mr *mr =
- container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
-
- /* Freeing a MR is a sleeping operation, so bounce to a work queue */
- INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
- queue_work(system_unbound_wq, &mr->odp_destroy.work);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
}
static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
@@ -264,21 +248,14 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
- xa_lock(&imr->implicit_children);
- /*
- * This can race with mlx5_ib_free_implicit_mr(), the first one to
- * reach the xa lock wins the race and destroys the MR.
- */
- if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
- mr)
- goto out_unlock;
+ if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ return;
- atomic_inc(&imr->num_deferred_work);
- call_srcu(&mr_to_mdev(mr)->odp_srcu, &mr->odp_destroy.rcu,
- free_implicit_child_mr_rcu);
+ xa_erase(&imr->implicit_children, idx);
-out_unlock:
- xa_unlock(&imr->implicit_children);
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
}
static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
@@ -490,6 +467,12 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->parent = imr;
odp->private = mr;
+ /*
+ * First refcount is owned by the xarray and second refconut
+ * is returned to the caller.
+ */
+ refcount_set(&mr->mmkey.usecount, 2);
+
err = mlx5_ib_update_xlt(mr, 0,
MLX5_IMR_MTT_ENTRIES,
PAGE_SHIFT,
@@ -500,27 +483,28 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
goto out_mr;
}
- /*
- * Once the store to either xarray completes any error unwind has to
- * use synchronize_srcu(). Avoid this with xa_reserve()
- */
- ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
- GFP_KERNEL);
+ xa_lock(&imr->implicit_children);
+ ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
if (unlikely(ret)) {
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
- goto out_mr;
+ goto out_lock;
}
/*
* Another thread beat us to creating the child mr, use
* theirs.
*/
- goto out_mr;
+ refcount_inc(&ret->mmkey.usecount);
+ goto out_lock;
}
+ xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
+out_lock:
+ xa_unlock(&imr->implicit_children);
out_mr:
mlx5_mr_cache_free(mr_to_mdev(imr), mr);
out_umem:
@@ -559,8 +543,6 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->ibmr.device = &dev->ib_dev;
imr->umem = &umem_odp->umem;
imr->is_odp_implicit = true;
- atomic_set(&imr->num_deferred_work, 0);
- init_waitqueue_head(&imr->q_deferred_work);
xa_init(&imr->implicit_children);
err = mlx5_ib_update_xlt(imr, 0,
@@ -572,8 +554,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (err)
goto out_mr;
- err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
- &imr->mmkey, GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
if (err)
goto out_mr;
@@ -591,60 +572,35 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct mlx5_ib_dev *dev = mr_to_mdev(imr);
- struct list_head destroy_list;
struct mlx5_ib_mr *mtt;
- struct mlx5_ib_mr *tmp;
unsigned long idx;
- INIT_LIST_HEAD(&destroy_list);
-
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
/*
- * This stops the SRCU protected page fault path from touching either
- * the imr or any children. The page fault path can only reach the
- * children xarray via the imr.
- */
- synchronize_srcu(&dev->odp_srcu);
-
- /*
* All work on the prefetch list must be completed, xa_erase() prevented
* new work from being created.
*/
- wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
-
+ mlx5r_deref_wait_odp_mkey(&imr->mmkey);
/*
* At this point it is forbidden for any other thread to enter
* pagefault_mr() on this imr. It is already forbidden to call
* pagefault_mr() on an implicit child. Due to this additions to
* implicit_children are prevented.
+ * In addition, any new call to destroy_unused_implicit_child_mr()
+ * may return immediately.
*/
/*
- * Block destroy_unused_implicit_child_mr() from incrementing
- * num_deferred_work.
- */
- xa_lock(&imr->implicit_children);
- xa_for_each (&imr->implicit_children, idx, mtt) {
- __xa_erase(&imr->implicit_children, idx);
- list_add(&mtt->odp_destroy.elm, &destroy_list);
- }
- xa_unlock(&imr->implicit_children);
-
- /*
- * Wait for any concurrent destroy_unused_implicit_child_mr() to
- * complete.
- */
- wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
-
- /*
* Fence the imr before we destroy the children. This allows us to
* skip updating the XLT of the imr during destroy of the child mkey
* the imr points to.
*/
mlx5_mr_cache_invalidate(imr);
- list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ xa_for_each(&imr->implicit_children, idx, mtt) {
+ xa_erase(&imr->implicit_children, idx);
free_implicit_child_mr(mtt, false);
+ }
mlx5_mr_cache_free(dev, imr);
ib_umem_odp_release(odp_imr);
@@ -663,13 +619,39 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
/* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&mr_to_mdev(mr)->odp_srcu);
-
- wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
dma_fence_odp_mr(mr);
}
+/**
+ * mlx5_ib_fence_dmabuf_mr - Stop all access to the dmabuf MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ mlx5_mr_cache_invalidate(mr);
+ umem_dmabuf->private = NULL;
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!mr->cache_ent) {
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
#define MLX5_PF_FLAGS_ENABLE BIT(3)
@@ -747,8 +729,10 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
struct mlx5_ib_mr *mtt;
u64 len;
+ xa_lock(&imr->implicit_children);
mtt = xa_load(&imr->implicit_children, idx);
if (unlikely(!mtt)) {
+ xa_unlock(&imr->implicit_children);
mtt = implicit_get_child_mr(imr, idx);
if (IS_ERR(mtt)) {
ret = PTR_ERR(mtt);
@@ -756,6 +740,9 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
}
upd_start_idx = min(upd_start_idx, idx);
upd_len = idx - upd_start_idx + 1;
+ } else {
+ refcount_inc(&mtt->mmkey.usecount);
+ xa_unlock(&imr->implicit_children);
}
umem_odp = to_ib_umem_odp(mtt->umem);
@@ -764,6 +751,9 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
bytes_mapped, flags);
+
+ mlx5r_deref_odp_mkey(&mtt->mmkey);
+
if (ret < 0)
goto out;
user_va += len;
@@ -803,6 +793,44 @@ out:
return ret;
}
+static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ u32 xlt_flags = 0;
+ int err;
+ unsigned int page_size;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err) {
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ return err;
+ }
+
+ page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
+ log_page_size, 0,
+ umem_dmabuf->umem.iova);
+ if (unlikely(page_size < PAGE_SIZE)) {
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ err = -EINVAL;
+ } else {
+ err = mlx5_ib_update_mr_pas(mr, xlt_flags);
+ }
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ if (err)
+ return err;
+
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+
+ return ib_umem_num_pages(mr->umem);
+}
+
/*
* Returns:
* -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
@@ -817,10 +845,12 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- lockdep_assert_held(&mr_to_mdev(mr)->odp_srcu);
if (unlikely(io_virt < mr->mmkey.iova))
return -EFAULT;
+ if (mr->umem->is_dmabuf)
+ return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
+
if (!odp->is_implicit_odp) {
u64 user_va;
@@ -847,6 +877,16 @@ int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
return ret >= 0 ? 0 : ret;
}
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
+ MLX5_PF_FLAGS_ENABLE);
+
+ return ret >= 0 ? 0 : ret;
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -896,7 +936,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
- int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
+ int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -905,14 +945,14 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
-
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
+ xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
if (!mmkey) {
+ xa_unlock(&dev->odp_mkeys);
mlx5_ib_dbg(
dev,
"skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
@@ -925,12 +965,15 @@ next_mr:
* faulted.
*/
ret = 0;
- goto srcu_unlock;
+ goto end;
}
+ refcount_inc(&mmkey->usecount);
+ xa_unlock(&dev->odp_mkeys);
+
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
switch (mmkey->type) {
@@ -939,7 +982,7 @@ next_mr:
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
- goto srcu_unlock;
+ goto end;
mlx5_update_odp_stats(mr, faults, ret);
@@ -954,7 +997,7 @@ next_mr:
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
@@ -965,7 +1008,7 @@ next_mr:
out = kzalloc(outlen, GFP_KERNEL);
if (!out) {
ret = -ENOMEM;
- goto srcu_unlock;
+ goto end;
}
cur_outlen = outlen;
}
@@ -975,7 +1018,7 @@ next_mr:
ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
if (ret)
- goto srcu_unlock;
+ goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
@@ -989,7 +1032,7 @@ next_mr:
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) {
ret = -ENOMEM;
- goto srcu_unlock;
+ goto end;
}
frame->key = be32_to_cpu(pklm->key);
@@ -1008,7 +1051,7 @@ next_mr:
default:
mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
if (head) {
@@ -1021,10 +1064,13 @@ next_mr:
depth = frame->depth;
kfree(frame);
+ mlx5r_deref_odp_mkey(mmkey);
goto next_mr;
}
-srcu_unlock:
+end:
+ if (mmkey)
+ mlx5r_deref_odp_mkey(mmkey);
while (head) {
frame = head;
head = frame->next;
@@ -1032,7 +1078,6 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
@@ -1040,16 +1085,18 @@ srcu_unlock:
/**
* Parse a series of data segments for page fault handling.
*
- * @pfault contains page fault information.
- * @wqe points at the first data segment in the WQE.
- * @wqe_end points after the end of the WQE.
- * @bytes_mapped receives the number of bytes that the function was able to
- * map. This allows the caller to decide intelligently whether
- * enough memory was mapped to resolve the page fault
- * successfully (e.g. enough for the next MTU, or the entire
- * WQE).
- * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
- * the committed bytes).
+ * @dev: Pointer to mlx5 IB device
+ * @pfault: contains page fault information.
+ * @wqe: points at the first data segment in the WQE.
+ * @wqe_end: points after the end of the WQE.
+ * @bytes_mapped: receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ * @receive_queue: receive WQE end of sg list
*
* Returns the number of pages loaded if positive, zero for an empty WQE, or a
* negative error code.
@@ -1738,8 +1785,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
u32 i;
for (i = 0; i < work->num_sge; ++i)
- if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
- wake_up(&work->frags[i].mr->q_deferred_work);
+ mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
+
kvfree(work);
}
@@ -1749,27 +1796,30 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_core_mkey *mmkey;
- struct ib_umem_odp *odp;
- struct mlx5_ib_mr *mr;
-
- lockdep_assert_held(&dev->odp_srcu);
+ struct mlx5_ib_mr *mr = NULL;
+ xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
- return NULL;
+ goto end;
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (mr->ibmr.pd != pd)
- return NULL;
-
- odp = to_ib_umem_odp(mr->umem);
+ if (mr->ibmr.pd != pd) {
+ mr = NULL;
+ goto end;
+ }
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
- !odp->umem.writable)
- return NULL;
+ !mr->umem->writable) {
+ mr = NULL;
+ goto end;
+ }
+ refcount_inc(&mmkey->usecount);
+end:
+ xa_unlock(&dev->odp_mkeys);
return mr;
}
@@ -1777,17 +1827,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
{
struct prefetch_mr_work *work =
container_of(w, struct prefetch_mr_work, work);
- struct mlx5_ib_dev *dev;
u32 bytes_mapped = 0;
- int srcu_key;
int ret;
u32 i;
/* We rely on IB/core that work is executed if we have num_sge != 0 only. */
WARN_ON(!work->num_sge);
- dev = mr_to_mdev(work->frags[0].mr);
- /* SRCU should be held when calling to mlx5_odp_populate_xlt() */
- srcu_key = srcu_read_lock(&dev->odp_srcu);
for (i = 0; i < work->num_sge; ++i) {
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
work->frags[i].length, &bytes_mapped,
@@ -1796,7 +1841,6 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
continue;
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
}
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
}
@@ -1820,9 +1864,6 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->num_sge = i;
return false;
}
-
- /* Keep the MR pointer will valid outside the SRCU */
- atomic_inc(&work->frags[i].mr->num_deferred_work);
}
work->num_sge = num_sge;
return true;
@@ -1833,42 +1874,35 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
u32 pf_flags, struct ib_sge *sg_list,
u32 num_sge)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
u32 bytes_mapped = 0;
- int srcu_key;
int ret = 0;
u32 i;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
for (i = 0; i < num_sge; ++i) {
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr) {
- ret = -ENOENT;
- goto out;
- }
+ if (!mr)
+ return -ENOENT;
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ return ret;
+ }
mlx5_update_odp_stats(mr, prefetch, ret);
+ mlx5r_deref_odp_mkey(&mr->mmkey);
}
- ret = 0;
-out:
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
- return ret;
+ return 0;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
u32 pf_flags = 0;
struct prefetch_mr_work *work;
- int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1884,13 +1918,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (!work)
return -ENOMEM;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0cb7cc642d87..ec4b3f6a8222 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1078,6 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
/* Set "fast registration enabled" for all kernel QPs */
@@ -1172,10 +1173,72 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
sq->flow_rule = NULL;
}
+static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+
+ if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+
+ if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+ int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+
+ if (recv_cq &&
+ recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
+
+ if (send_cq &&
+ send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
+
+ if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING &&
+ !fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return ts_format;
+}
+
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct ib_udata *udata,
struct mlx5_ib_sq *sq, void *qpin,
- struct ib_pd *pd)
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
{
struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
__be64 *pas;
@@ -1187,6 +1250,11 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
int err;
unsigned int page_offset_quantized;
unsigned long page_size;
+ int ts_format;
+
+ ts_format = get_sq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
ubuffer->buf_size, 0);
@@ -1215,6 +1283,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
@@ -1263,7 +1332,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- struct ib_pd *pd)
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1274,9 +1343,14 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct ib_umem *umem = rq->base.ubuffer.umem;
unsigned int page_offset_quantized;
unsigned long page_size = 0;
+ int ts_format;
size_t inlen;
int err;
+ ts_format = get_rq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
MLX5_ADAPTER_PAGE_SHIFT,
page_offset, 64,
@@ -1296,6 +1370,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, vsd, 1);
MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
@@ -1393,10 +1468,10 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u32 *in, size_t inlen,
- struct ib_pd *pd,
+ u32 *in, size_t inlen, struct ib_pd *pd,
struct ib_udata *udata,
- struct mlx5_ib_create_qp_resp *resp)
+ struct mlx5_ib_create_qp_resp *resp,
+ struct ib_qp_init_attr *init_attr)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1415,7 +1490,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
- err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
+ err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
+ to_mcq(init_attr->send_cq));
if (err)
goto err_destroy_tis;
@@ -1437,7 +1513,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, pd);
+ err = create_raw_packet_qp_rq(dev, rq, in, pd,
+ to_mcq(init_attr->recv_cq));
if (err)
goto err_destroy_sq;
@@ -1907,6 +1984,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
struct mlx5_ib_qp_base *base;
+ int ts_format;
int mlx5_st;
void *qpc;
u32 *in;
@@ -1944,6 +2022,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
return -EINVAL;
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ if (ts_format < 0)
+ return ts_format;
+ }
+
err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
&inlen, base, ucmd);
if (err)
@@ -1992,6 +2077,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
if (qp->sq.wqe_cnt) {
@@ -2046,7 +2134,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
- &params->resp);
+ &params->resp, init_attr);
} else
err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
@@ -2432,9 +2520,6 @@ static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
case MLX5_IB_QPT_HW_GSI:
case IB_QPT_DRIVER:
case IB_QPT_GSI:
- if (dev->profile == &raw_eth_profile)
- goto out;
- fallthrough;
case IB_QPT_RAW_PACKET:
case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
@@ -2629,10 +2714,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
int create_flags = attr->create_flags;
bool cond;
- if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
- if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
- return -EINVAL;
-
if (qp_type == MLX5_IB_QPT_DCT)
return (create_flags) ? -EINVAL : 0;
@@ -3076,6 +3157,8 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 4;
case IB_RATE_50_GBPS:
return 5;
+ case IB_RATE_400_GBPS:
+ return 6;
default:
return rate + MLX5_STAT_RATE_OFFSET;
}
@@ -3183,11 +3266,13 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
- if (grh->sgid_index >=
- dev->mdev->port_caps[port - 1].gid_table_len) {
+ const struct ib_port_immutable *immutable;
+
+ immutable = ib_port_immutable_read(&dev->ib_dev, port);
+ if (grh->sgid_index >= immutable->gid_tbl_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
grh->sgid_index,
- dev->mdev->port_caps[port - 1].gid_table_len);
+ immutable->gid_tbl_len);
return -EINVAL;
}
}
@@ -4211,6 +4296,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
+static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ enum ib_qp_type qp_type)
+{
+ if (dev->profile != &raw_eth_profile)
+ return true;
+
+ if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR)
+ return true;
+
+ /* Internal QP used for wc testing, with NOPs in wq */
+ if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
+ return true;
+
+ return false;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -4221,7 +4323,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int port;
+
+ if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type))
+ return -EOPNOTSUPP;
if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
return -EOPNOTSUPP;
@@ -4263,10 +4367,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
- port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- }
-
if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
@@ -4295,14 +4395,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- if (attr_mask & IB_QP_PKEY_INDEX) {
- port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >=
- dev->mdev->port_caps[port - 1].pkey_table_len) {
- mlx5_ib_dbg(dev, "invalid pkey index %d\n",
- attr->pkey_index);
- goto out;
- }
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
+ goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
@@ -5376,7 +5472,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
handle_drain_completion(cq, &rdrain, dev);
}
-/**
+/*
* Bind a qp to a counter. If @counter is NULL then bind the qp to
* the default counter
*/
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index d6038fb6c50c..cf2852cba45c 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -1369,7 +1369,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
handle_qpt_uc(wr, &seg, &size);
break;
case IB_QPT_SMI:
- if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
+ if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) {
mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
err = -EPERM;
*bad_wr = wr;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc98bd950d99..3acb5c10b155 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
- kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
_ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 9dde70373a55..3cb4febaad0f 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -617,18 +617,18 @@ static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
- return 0;
+ return false;
- return 1;
+ return true;
}
static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
- return 0;
+ return false;
- return 1;
+ return true;
}
static inline struct qedr_user_mmap_entry *
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index f5542d703ef9..13e5e6bbec99 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -586,8 +586,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qedr_inc_sw_prod(&qp->sq);
DP_DEBUG(qp->dev, QEDR_MSG_GSI,
- "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
- wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
+ wr->wr_id);
} else {
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
rc = -EAGAIN;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 92eeea5679e2..84fc4dcc5399 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -151,7 +151,7 @@ int qib_count_units(int *npresentp, int *nupp)
/**
* qib_wait_linkstate - wait for an IB link state change to occur
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @state: the state to wait for
* @msecs: the number of milliseconds to wait
*
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 5838b3bf34b9..bf660c001b6d 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -47,7 +47,7 @@
* qib_eeprom_read - receives bytes from the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: address to read from
- * @buffer: where to store result
+ * @buff: where to store result
* @len: number of bytes to receive
*/
int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
@@ -94,7 +94,7 @@ static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
* qib_eeprom_write - writes data to the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: where to place data
- * @buffer: data to write
+ * @buff: data to write
* @len: number of bytes to write
*/
int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 44150be215bf..b35e1174be22 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1223,7 +1223,7 @@ static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
/**
* qib_6120_bringup_serdes - bring up the serdes
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
*/
static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
{
@@ -1412,7 +1412,7 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
/**
* qib_6120_setup_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @on: whether the link is up or not
*
* The exact combo of LEDs if on is true is determined by looking
@@ -1823,7 +1823,7 @@ bail:
* qib_6120_put_tid - write a TID in chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
* for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*
@@ -1890,7 +1890,7 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
* qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
* for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*
@@ -1932,7 +1932,7 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_6120_clear_tids - clear all TID entries for a context, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the context
+ * @rcd: the context
*
* clear all TID entries for a context, expected and eager.
* Used from qib_close(). On this chip, TIDs are only 32 bits,
@@ -2008,7 +2008,7 @@ int __attribute__((weak)) qib_unordered_wc(void)
/**
* qib_6120_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithms.
@@ -2270,8 +2270,8 @@ static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_6120 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
+ * @ppd: the qlogic_ib device
+ * @reg: the counter to snapshot
*/
static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
{
@@ -2610,7 +2610,7 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
/**
* qib_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
* need traffic_wds done the way it is
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 0a6f26d4cb31..229dcd6ead95 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1701,7 +1701,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
/**
* qib_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @on: whether the link is up or not
*
* The exact combo of LEDs if on is true is determined by looking
@@ -2146,7 +2146,7 @@ bail:
* qib_7220_put_tid - write a TID to the chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
+ * @type: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*/
static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
@@ -2180,7 +2180,7 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the ctxt
+ * @rcd: the ctxt
*
* clear all TID entries for a ctxt, expected and eager.
* Used from qib_close(). On this chip, TIDs are only 32 bits,
@@ -2238,7 +2238,7 @@ static void qib_7220_tidtemplate(struct qib_devdata *dd)
/**
* qib_init_7220_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithims.
@@ -2896,8 +2896,8 @@ static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_7220 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
+ * @ppd: the qlogic_ib device
+ * @reg: the counter to snapshot
*/
static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
{
@@ -3232,7 +3232,7 @@ done:
/**
* qib_get_7220_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
* need traffic_wds done the way it is
@@ -4468,7 +4468,7 @@ static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
/**
* qib_init_iba7220_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
+ * @pdev: the pci_dev for qlogic_ib device
* @ent: pci_device_id struct for this dev
*
* This is global, and is called directly at init to set up the
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 189a0ce6056a..9fe6ea75b45e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2514,7 +2514,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
/**
* qib_7322_quiet_serdes - set serdes to txidle
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* Called when driver is being unloaded
*/
static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
@@ -3760,7 +3760,7 @@ bail:
* qib_7322_put_tid - write a TID to the chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
+ * @type: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*/
static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
@@ -3796,7 +3796,7 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the ctxt
+ * @rcd: the ctxt
*
* clear all TID entries for a ctxt, expected and eager.
* Used from qib_close().
@@ -3861,7 +3861,7 @@ static void qib_7322_tidtemplate(struct qib_devdata *dd)
/**
* qib_init_7322_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithims.
@@ -4724,7 +4724,7 @@ static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_7322 - read a per-port chip counter
* @ppd: the qlogic_ib pport
- * @creg: the counter to read (not a chip offset)
+ * @reg: the counter to read (not a chip offset)
*/
static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
{
@@ -5096,7 +5096,7 @@ done:
/**
* qib_get_7322_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* VESTIGIAL IBA7322 has no "small fast counters", so the only
* real purpose of this function is to maintain the notion of
@@ -7175,7 +7175,7 @@ static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
/**
* qib_init_iba7322_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
+ * @pdev: the pci_dev for qlogic_ib device
* @ent: pci_device_id struct for this dev
*
* Also allocates, inits, and returns the devdata struct for this
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 65c3b964ad1b..85c3187d796d 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -40,9 +40,9 @@
/**
* qib_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
+ * @msg: message buffer
+ * @msgl: length of message buffer
+ * @hwmsg: message to add to message buffer
*/
static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
@@ -53,11 +53,11 @@ static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
/**
* qib_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
+ * @hwerrs: hardware errors bit vector
+ * @hwerrmsgs: hardware error descriptions
+ * @nhwerrmsgs: number of hwerrmsgs
+ * @msg: message buffer
+ * @msgl: message buffer length
*/
void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f83e331977f8..44e2f813024a 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -886,7 +886,7 @@ done:
/**
* rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @key: the PKEY index
*
* Return true if this was the last reference and the hardware table entry
@@ -916,7 +916,7 @@ bail:
/**
* add_pkey - add the given PKEY to the hardware table
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @key: the PKEY
*
* Return an error code if unable to add the entry, zero if no change,
@@ -2346,8 +2346,10 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
* @port: the port number this packet came in on
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
+ * @in: the incoming MAD
+ * @out: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: unused
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 2e07b3749b88..cb2a02d671e2 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -181,7 +181,7 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
pci_set_drvdata(dd->pcidev, NULL);
}
-/**
+/*
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 8d0563ef5be1..ca39a029e4af 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -207,7 +207,7 @@ bail:
return ret;
}
-/**
+/*
* qib_free_all_qps - check for QPs still in use
*/
unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
@@ -376,9 +376,9 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
/**
* qib_check_send_wqe - validate wr/wqe
- * @qp - The qp
- * @wqe - The built wqe
- * @call_send - Determine if the send should be posted or scheduled
+ * @qp: The qp
+ * @wqe: The built wqe
+ * @call_send: Determine if the send should be posted or scheduled
*
* Returns 0 on success, -EINVAL on failure
*/
@@ -418,8 +418,8 @@ static const char * const qp_type_str[] = {
/**
* qib_qp_iter_print - print information to seq_file
- * @s - the seq_file
- * @iter - the iterator
+ * @s: the seq_file
+ * @iter: the iterator
*/
void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
{
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 3915e5b4a9bc..a1c20ffb4490 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -207,6 +207,7 @@ bail:
/**
* qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
+ * @flags: unused
*
* Assumes the s_lock is held.
*
@@ -992,7 +993,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
return wqe;
}
-/**
+/*
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK
@@ -1259,6 +1260,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
* @psn: the packet sequence number for this packet
* @hdrsize: the header length
* @pmtu: the path MTU
+ * @rcd: the context pointer
*
* This is called from qib_rc_rcv() to process an incoming RC response
* packet for the given QP.
@@ -1480,6 +1482,7 @@ bail:
* @opcode: the opcode for this packet
* @psn: the packet sequence number for this packet
* @diff: the difference between the PSN and the expected PSN
+ * @rcd: the context pointer
*
* This is called from qib_rc_rcv() to process an unexpected
* incoming RC packet for the given QP.
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index f5698664419b..97b8a2bf5c69 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -168,6 +168,7 @@ static void stop_cmd(struct qib_devdata *dd);
/**
* rd_byte - read a byte, sending STOP on last, else ACK
* @dd: the qlogic_ib device
+ * @last: identifies the last read
*
* Returns byte shifted out of device
*/
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 29785eb84646..6a8148851f21 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -377,6 +377,7 @@ void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
* @start: the starting send buffer number
* @len: the number of send buffers
* @avail: true if the buffers are available for kernel use, false otherwise
+ * @rcd: the context pointer
*/
void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
unsigned len, u32 avail, struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 554af4273a13..8e2bda77d8b9 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -40,6 +40,7 @@
/**
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
+ * @flags: unused
*
* Assumes the s_lock is held.
*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 93ca21347959..81eda94bd279 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -222,6 +222,7 @@ drop:
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
+ * @flags: flags to modify and pass back to caller
*
* Assumes the s_lock is held.
*
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 4c24e83f3175..5d6cf7427431 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -43,7 +43,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
unpin_user_pages_dirty_lock(p, num_pages, dirty);
}
-/**
+/*
* qib_map_page - a safety wrapper around pci_map_page()
*
* A dma_addr of all 0's is interpreted by the chip as "disabled".
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index f6c01bad5a74..8e0de265ad57 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1067,7 +1067,7 @@ bail:
/**
* qib_get_counters - get various chip counters
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @cntrs: counters are placed here
*
* Return the counters needed by recv_pma_get_portcounters().
@@ -1675,7 +1675,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
/**
* _qib_schedule_send - schedule progress
- * @qp - the qp
+ * @qp: the qp
*
* This schedules progress w/o regard to the s_flags.
*
@@ -1694,7 +1694,7 @@ bool _qib_schedule_send(struct rvt_qp *qp)
/**
* qib_schedule_send - schedule progress
- * @qp - the qp
+ * @qp: the qp
*
* This schedules qp progress. The s_lock
* should be held.
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index e59615a4c9d9..586b0e52ba7f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
struct usnic_vnic_res *vnic_res;
int len;
- len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- len += sysfs_emit_at(
- buf, len, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
- len = sysfs_emit_at(buf, len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 38a37770c016..3705c6b8b223 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
/* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index c142f5e7f25f..de57f2fed743 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a119ac3e103c..6aa40bd2fd52 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -367,7 +367,7 @@ retry:
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 00a330909bb3..4b6019e7de67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -474,7 +474,6 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
sizeof(struct pvrdma_cqne);
unsigned int head;
- unsigned long flags;
dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
@@ -483,11 +482,11 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
struct pvrdma_cq *cq;
cqne = get_cqne(dev, head);
- spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ spin_lock(&dev->cq_tbl_lock);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
refcount_inc(&cq->refcnt);
- spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+ spin_unlock(&dev->cq_tbl_lock);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 20cc0799ac4b..5138afca067f 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -371,7 +371,7 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
return ret;
}
-/**
+/*
* rvt_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
index 108c71e3ac23..fa5be13a4394 100644
--- a/drivers/infiniband/sw/rdmavt/mad.c
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -56,8 +56,11 @@
* @port_num: the port number this packet came in on, 1 based from ib core
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
+ * @in: the incoming MAD
+ * @in_mad_size: size of the incoming MAD reply
+ * @out: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: unused
*
* Note that the verbs framework has already done the MAD sanity checks,
* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 5233a63d99a6..951abac13dbb 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -180,7 +180,7 @@ struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
}
EXPORT_SYMBOL(rvt_mcast_find);
-/**
+/*
* rvt_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 90fc234f489a..601d18dda1f5 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -369,6 +369,7 @@ bail:
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
+ * @virt_addr: associated virtual address
* @mr_access_flags: access flags for this memory region
* @udata: unused by the driver
*
@@ -438,8 +439,8 @@ bail_umem:
/**
* rvt_dereg_clean_qp_cb - callback from iterator
- * @qp - the qp
- * @v - the mregion (as u64)
+ * @qp: the qp
+ * @v: the mregion (as u64)
*
* This routine fields the callback for all QPs and
* for QPs in the same PD as the MR will call the
@@ -457,7 +458,7 @@ static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
/**
* rvt_dereg_clean_qps - find QPs for reference cleanup
- * @mr - the MR that is being deregistered
+ * @mr: the MR that is being deregistered
*
* This routine iterates RC QPs looking for references
* to the lkey noted in mr.
@@ -471,8 +472,8 @@ static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
/**
* rvt_check_refs - check references
- * @mr - the megion
- * @t - the caller identification
+ * @mr: the megion
+ * @t: the caller identification
*
* This routine checks MRs holding a reference during
* when being de-registered.
@@ -506,8 +507,8 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
/**
* rvt_mr_has_lkey - is MR
- * @mr - the mregion
- * @lkey - the lkey
+ * @mr: the mregion
+ * @lkey: the lkey
*/
bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
{
@@ -516,8 +517,8 @@ bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
/**
* rvt_ss_has_lkey - is mr in sge tests
- * @ss - the sge state
- * @lkey
+ * @ss: the sge state
+ * @lkey: the lkey
*
* This code tests for an MR in the indicated
* sge state.
@@ -540,7 +541,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
/**
* rvt_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
- *
+ * @udata: unused by the driver
*
* Note that this is called to free MRs created by rvt_get_dma_mr()
* or rvt_reg_user_mr().
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22fa9bde5419..9d13db68283c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -156,7 +156,7 @@ void rvt_wss_exit(struct rvt_dev_info *rdi)
rdi->wss = NULL;
}
-/**
+/*
* rvt_wss_init - Init wss data structures
*
* Return: 0 on success
@@ -323,6 +323,7 @@ static void get_map_page(struct rvt_qpn_table *qpt,
/**
* init_qpn_table - initialize the QP number table for a device
+ * @rdi: rvt dev struct
* @qpt: the QPN table
*/
static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
@@ -524,6 +525,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* IB_QPT_SMI/IB_QPT_GSI
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
+ * @type: the QP type
* @port_num: IB port number, 1 based, comes from core
* @exclude_prefix: prefix of special queue pair number being allocated
*
@@ -655,8 +657,8 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
/**
* rvt_swqe_has_lkey - return true if lkey is used by swqe
- * @wqe - the send wqe
- * @lkey - the lkey
+ * @wqe: the send wqe
+ * @lkey: the lkey
*
* Test the swqe for using lkey
*/
@@ -675,8 +677,8 @@ static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
/**
* rvt_qp_sends_has_lkey - return true is qp sends use lkey
- * @qp - the rvt_qp
- * @lkey - the lkey
+ * @qp: the rvt_qp
+ * @lkey: the lkey
*/
static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
{
@@ -699,8 +701,8 @@ static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
/**
* rvt_qp_acks_has_lkey - return true if acks have lkey
- * @qp - the qp
- * @lkey - the lkey
+ * @qp: the qp
+ * @lkey: the lkey
*/
static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
{
@@ -716,10 +718,10 @@ static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
return false;
}
-/*
+/**
* rvt_qp_mr_clean - clean up remote ops for lkey
- * @qp - the qp
- * @lkey - the lkey that is being de-registered
+ * @qp: the qp
+ * @lkey: the lkey that is being de-registered
*
* This routine checks if the lkey is being used by
* the qp.
@@ -853,6 +855,7 @@ bail:
/**
* rvt_init_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
* @qp: the QP to init or reinit
* @type: the QP type
*
@@ -907,6 +910,7 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/**
* _rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
* @qp: the QP to reset
* @type: the QP type
*
@@ -1726,6 +1730,7 @@ inval:
/**
* rvt_destroy_qp - destroy a queue pair
* @ibqp: the queue pair to destroy
+ * @udata: unused by the driver
*
* Note that this can be called while the QP is actively sending or
* receiving!
@@ -1901,9 +1906,9 @@ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/**
* rvt_qp_valid_operation - validate post send wr request
- * @qp - the qp
- * @post-parms - the post send table for the driver
- * @wr - the work request
+ * @qp: the qp
+ * @post_parms: the post send table for the driver
+ * @wr: the work request
*
* The routine validates the operation based on the
* validation table an returns the length of the operation
@@ -2013,6 +2018,7 @@ static inline int rvt_qp_is_avail(
* rvt_post_one_wr - post one RC, UC, or UD send work request
* @qp: the QP to post on
* @wr: the work request to send
+ * @call_send: kick the send engine into gear
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
const struct ib_send_wr *wr,
@@ -2612,7 +2618,7 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
/**
* rvt_stop_rnr_timer - stop an rnr timer
- * @qp - the QP
+ * @qp: the QP
*
* stop an rnr timer and return if the timer
* had been pending.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 64d98bf238ab..2a7c2f12d372 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -67,7 +67,7 @@ void rvt_driver_srq_init(struct rvt_dev_info *rdi)
/**
* rvt_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
+ * @ibsrq: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
*
@@ -311,7 +311,8 @@ bail_free:
return ret;
}
-/** rvt_query_srq - query srq data
+/**
+ * rvt_query_srq - query srq data
* @ibsrq: srq to query
* @attr: return info in attr
*
@@ -330,7 +331,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
/**
* rvt_destroy_srq - destory an srq
* @ibsrq: srq object to destroy
- *
+ * @udata: user data for libibverbs.so
*/
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 49cec85a372a..8fd0128a9336 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -294,7 +294,7 @@ static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
/**
* rvt_dealloc_ucontext - Free a user context
- * @context - Free this
+ * @context: Unused
*/
static void rvt_dealloc_ucontext(struct ib_ucontext *context)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 0a1e6393250b..a8ac791a1bb9 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
while ((wqe = queue_head(qp->sq.queue))) {
@@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
}
}
+static void free_pkt(struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ struct rxe_qp *qp = pkt->qp;
+ struct ib_device *dev = qp->ibqp.device;
+
+ kfree_skb(skb);
+ rxe_drop_ref(qp);
+ ib_device_put(dev);
+}
+
int rxe_completer(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -624,11 +636,8 @@ int rxe_completer(void *arg)
break;
case COMPST_DONE:
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
+ if (pkt)
+ free_pkt(pkt);
goto done;
case COMPST_EXIT:
@@ -671,12 +680,8 @@ int rxe_completer(void *arg)
*/
if (qp->comp.started_retry &&
!qp->comp.timeout_retry) {
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto done;
}
@@ -699,13 +704,8 @@ int rxe_completer(void *arg)
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0);
}
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto done;
} else {
@@ -726,9 +726,7 @@ int rxe_completer(void *arg)
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
+ free_pkt(pkt);
goto exit;
} else {
rxe_counter_inc(rxe,
@@ -742,13 +740,8 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index 3b483b75dfe3..e432f9e37795 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -22,7 +22,6 @@ struct rxe_pkt_info {
u16 paylen; /* length of bth - icrc */
u8 port_num; /* port pkt received on */
u8 opcode; /* bth opcode of packet */
- u8 offset; /* bth offset from pkt->hdr */
};
/* Macros should be used only for received skb */
@@ -280,134 +279,134 @@ static inline void __bth_set_psn(void *arg, u32 psn)
static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
{
- return __bth_opcode(pkt->hdr + pkt->offset);
+ return __bth_opcode(pkt->hdr);
}
static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
{
- __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
+ __bth_set_opcode(pkt->hdr, opcode);
}
static inline u8 bth_se(struct rxe_pkt_info *pkt)
{
- return __bth_se(pkt->hdr + pkt->offset);
+ return __bth_se(pkt->hdr);
}
static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
{
- __bth_set_se(pkt->hdr + pkt->offset, se);
+ __bth_set_se(pkt->hdr, se);
}
static inline u8 bth_mig(struct rxe_pkt_info *pkt)
{
- return __bth_mig(pkt->hdr + pkt->offset);
+ return __bth_mig(pkt->hdr);
}
static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
{
- __bth_set_mig(pkt->hdr + pkt->offset, mig);
+ __bth_set_mig(pkt->hdr, mig);
}
static inline u8 bth_pad(struct rxe_pkt_info *pkt)
{
- return __bth_pad(pkt->hdr + pkt->offset);
+ return __bth_pad(pkt->hdr);
}
static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
{
- __bth_set_pad(pkt->hdr + pkt->offset, pad);
+ __bth_set_pad(pkt->hdr, pad);
}
static inline u8 bth_tver(struct rxe_pkt_info *pkt)
{
- return __bth_tver(pkt->hdr + pkt->offset);
+ return __bth_tver(pkt->hdr);
}
static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
{
- __bth_set_tver(pkt->hdr + pkt->offset, tver);
+ __bth_set_tver(pkt->hdr, tver);
}
static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
{
- return __bth_pkey(pkt->hdr + pkt->offset);
+ return __bth_pkey(pkt->hdr);
}
static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
{
- __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
+ __bth_set_pkey(pkt->hdr, pkey);
}
static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
{
- return __bth_qpn(pkt->hdr + pkt->offset);
+ return __bth_qpn(pkt->hdr);
}
static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
{
- __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
+ __bth_set_qpn(pkt->hdr, qpn);
}
static inline int bth_fecn(struct rxe_pkt_info *pkt)
{
- return __bth_fecn(pkt->hdr + pkt->offset);
+ return __bth_fecn(pkt->hdr);
}
static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
{
- __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
+ __bth_set_fecn(pkt->hdr, fecn);
}
static inline int bth_becn(struct rxe_pkt_info *pkt)
{
- return __bth_becn(pkt->hdr + pkt->offset);
+ return __bth_becn(pkt->hdr);
}
static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
{
- __bth_set_becn(pkt->hdr + pkt->offset, becn);
+ __bth_set_becn(pkt->hdr, becn);
}
static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
{
- return __bth_resv6a(pkt->hdr + pkt->offset);
+ return __bth_resv6a(pkt->hdr);
}
static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
{
- __bth_set_resv6a(pkt->hdr + pkt->offset);
+ __bth_set_resv6a(pkt->hdr);
}
static inline int bth_ack(struct rxe_pkt_info *pkt)
{
- return __bth_ack(pkt->hdr + pkt->offset);
+ return __bth_ack(pkt->hdr);
}
static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
{
- __bth_set_ack(pkt->hdr + pkt->offset, ack);
+ __bth_set_ack(pkt->hdr, ack);
}
static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
{
- __bth_set_resv7(pkt->hdr + pkt->offset);
+ __bth_set_resv7(pkt->hdr);
}
static inline u32 bth_psn(struct rxe_pkt_info *pkt)
{
- return __bth_psn(pkt->hdr + pkt->offset);
+ return __bth_psn(pkt->hdr);
}
static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
{
- __bth_set_psn(pkt->hdr + pkt->offset, psn);
+ __bth_set_psn(pkt->hdr, psn);
}
static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
int mig, int pad, u16 pkey, u32 qpn, int ack_req,
u32 psn)
{
- struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
+ struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
bth->opcode = opcode;
bth->flags = (pad << 4) & BTH_PAD_MASK;
@@ -448,14 +447,14 @@ static inline void __rdeth_set_een(void *arg, u32 een)
static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
{
- return __rdeth_een(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
+ return __rdeth_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
}
static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
{
- __rdeth_set_een(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
+ __rdeth_set_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
}
/******************************************************************************
@@ -499,26 +498,26 @@ static inline void __deth_set_sqp(void *arg, u32 sqp)
static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
{
- return __deth_qkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+ return __deth_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
{
- __deth_set_qkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
+ __deth_set_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
}
static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
{
- return __deth_sqp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+ return __deth_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
{
- __deth_set_sqp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
+ __deth_set_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
}
/******************************************************************************
@@ -574,38 +573,38 @@ static inline void __reth_set_len(void *arg, u32 len)
static inline u64 reth_va(struct rxe_pkt_info *pkt)
{
- return __reth_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
- __reth_set_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
+ __reth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
}
static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
{
- return __reth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __reth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
+ __reth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
}
static inline u32 reth_len(struct rxe_pkt_info *pkt)
{
- return __reth_len(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
{
- __reth_set_len(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
+ __reth_set_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
}
/******************************************************************************
@@ -676,50 +675,50 @@ static inline void __atmeth_set_comp(void *arg, u64 comp)
static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
{
- return __atmeth_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
- __atmeth_set_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
+ __atmeth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
}
static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
{
- return __atmeth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __atmeth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
+ __atmeth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
}
static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
{
- return __atmeth_swap_add(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
{
- __atmeth_set_swap_add(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
+ __atmeth_set_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
}
static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
{
- return __atmeth_comp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
{
- __atmeth_set_comp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
+ __atmeth_set_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
}
/******************************************************************************
@@ -780,26 +779,26 @@ static inline void __aeth_set_msn(void *arg, u32 msn)
static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
{
- return __aeth_syn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+ return __aeth_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
{
- __aeth_set_syn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
+ __aeth_set_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
}
static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
{
- return __aeth_msn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+ return __aeth_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
{
- __aeth_set_msn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
+ __aeth_set_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
}
/******************************************************************************
@@ -825,14 +824,14 @@ static inline void __atmack_set_orig(void *arg, u64 orig)
static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
{
- return __atmack_orig(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
+ return __atmack_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
}
static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
{
- __atmack_set_orig(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
+ __atmack_set_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
}
/******************************************************************************
@@ -858,14 +857,14 @@ static inline void __immdt_set_imm(void *arg, __be32 imm)
static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
{
- return __immdt_imm(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
+ return __immdt_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
}
static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
{
- __immdt_set_imm(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
+ __immdt_set_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
}
/******************************************************************************
@@ -891,14 +890,14 @@ static inline void __ieth_set_rkey(void *arg, u32 rkey)
static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
{
- return __ieth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
+ return __ieth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
}
static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __ieth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
+ __ieth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
}
enum rxe_hdr_length {
@@ -915,13 +914,12 @@ enum rxe_hdr_length {
static inline size_t header_size(struct rxe_pkt_info *pkt)
{
- return pkt->offset + rxe_opcode[pkt->opcode].length;
+ return rxe_opcode[pkt->opcode].length;
}
static inline void *payload_addr(struct rxe_pkt_info *pkt)
{
- return pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
+ return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
}
static inline size_t payload_size(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index c02315aed8d1..0ea9a5aa4ec0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -7,45 +7,61 @@
#include "rxe.h"
#include "rxe_loc.h"
+/* caller should hold mc_grp_pool->pool_lock */
+static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
+ struct rxe_pool *pool,
+ union ib_gid *mgid)
+{
+ int err;
+ struct rxe_mc_grp *grp;
+
+ grp = rxe_alloc_locked(&rxe->mc_grp_pool);
+ if (!grp)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&grp->qp_list);
+ spin_lock_init(&grp->mcg_lock);
+ grp->rxe = rxe;
+ rxe_add_key_locked(grp, mgid);
+
+ err = rxe_mcast_add(rxe, mgid);
+ if (unlikely(err)) {
+ rxe_drop_key_locked(grp);
+ rxe_drop_ref(grp);
+ return ERR_PTR(err);
+ }
+
+ return grp;
+}
+
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mc_grp **grp_p)
{
int err;
struct rxe_mc_grp *grp;
+ struct rxe_pool *pool = &rxe->mc_grp_pool;
+ unsigned long flags;
- if (rxe->attr.max_mcast_qp_attach == 0) {
- err = -EINVAL;
- goto err1;
- }
+ if (rxe->attr.max_mcast_qp_attach == 0)
+ return -EINVAL;
- grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ write_lock_irqsave(&pool->pool_lock, flags);
+
+ grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
goto done;
- grp = rxe_alloc(&rxe->mc_grp_pool);
- if (!grp) {
- err = -ENOMEM;
- goto err1;
+ grp = create_grp(rxe, pool, mgid);
+ if (IS_ERR(grp)) {
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+ err = PTR_ERR(grp);
+ return err;
}
- INIT_LIST_HEAD(&grp->qp_list);
- spin_lock_init(&grp->mcg_lock);
- grp->rxe = rxe;
-
- rxe_add_key(grp, mgid);
-
- err = rxe_mcast_add(rxe, mgid);
- if (err)
- goto err2;
-
done:
+ write_unlock_irqrestore(&pool->pool_lock, flags);
*grp_p = grp;
return 0;
-
-err2:
- rxe_drop_ref(grp);
-err1:
- return err;
}
int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c4b06ced30a7..0701bd1ffd1a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -8,6 +8,7 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
+#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
@@ -152,10 +153,16 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
+ struct rxe_dev *rxe;
struct net_device *ndev = skb->dev;
- struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ /* takes a reference on rxe->ib_dev
+ * drop when skb is freed
+ */
+ rxe = rxe_get_dev_from_net(ndev);
+ if (!rxe && is_vlan_dev(ndev))
+ rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
if (!rxe)
goto drop;
@@ -174,12 +181,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
rxe_rcv(skb);
- /*
- * FIXME: this is in the wrong place, it needs to be done when pkt is
- * destroyed
- */
- ib_device_put(&rxe->ib_dev);
-
return 0;
drop:
kfree_skb(skb);
@@ -408,6 +409,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
void rxe_loopback(struct sk_buff *skb)
{
+ if (skb->protocol == htons(ETH_P_IP))
+ skb_pull(skb, sizeof(struct iphdr));
+ else
+ skb_pull(skb, sizeof(struct ipv6hdr));
+
rxe_rcv(skb);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b374eb53e2fe..307d8986e7c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -15,21 +15,25 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
+ .elem_offset = offsetof(struct rxe_ucontext, pelem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
+ .elem_offset = offsetof(struct rxe_pd, pelem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
+ .elem_offset = offsetof(struct rxe_ah, pelem),
+ .flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
+ .elem_offset = offsetof(struct rxe_srq, pelem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -37,6 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
+ .elem_offset = offsetof(struct rxe_qp, pelem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_QP_INDEX,
@@ -45,12 +50,14 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
+ .elem_offset = offsetof(struct rxe_cq, pelem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mem),
+ .elem_offset = offsetof(struct rxe_mem, pelem),
.cleanup = rxe_mem_cleanup,
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MR_INDEX,
@@ -59,6 +66,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mem),
+ .elem_offset = offsetof(struct rxe_mem, pelem),
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
@@ -66,6 +74,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
+ .elem_offset = offsetof(struct rxe_mc_grp, pelem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -74,7 +83,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .flags = RXE_POOL_ATOMIC,
+ .elem_offset = offsetof(struct rxe_mc_elem, pelem),
},
};
@@ -94,18 +103,18 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
goto out;
}
- pool->max_index = max;
- pool->min_index = min;
+ pool->index.max_index = max;
+ pool->index.min_index = min;
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
- pool->table = kmalloc(size, GFP_KERNEL);
- if (!pool->table) {
+ pool->index.table = kmalloc(size, GFP_KERNEL);
+ if (!pool->index.table) {
err = -ENOMEM;
goto out;
}
- pool->table_size = size;
- bitmap_zero(pool->table, max - min + 1);
+ pool->index.table_size = size;
+ bitmap_zero(pool->index.table, max - min + 1);
out:
return err;
@@ -127,13 +136,12 @@ int rxe_pool_init(
pool->max_elem = max_elem;
pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
pool->flags = rxe_type_info[type].flags;
- pool->tree = RB_ROOT;
+ pool->index.tree = RB_ROOT;
+ pool->key.tree = RB_ROOT;
pool->cleanup = rxe_type_info[type].cleanup;
atomic_set(&pool->num_elem, 0);
- kref_init(&pool->ref_cnt);
-
rwlock_init(&pool->pool_lock);
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
@@ -145,67 +153,47 @@ int rxe_pool_init(
}
if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key_offset = rxe_type_info[type].key_offset;
- pool->key_size = rxe_type_info[type].key_size;
+ pool->key.key_offset = rxe_type_info[type].key_offset;
+ pool->key.key_size = rxe_type_info[type].key_size;
}
- pool->state = RXE_POOL_STATE_VALID;
-
out:
return err;
}
-static void rxe_pool_release(struct kref *kref)
-{
- struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
-
- pool->state = RXE_POOL_STATE_INVALID;
- kfree(pool->table);
-}
-
-static void rxe_pool_put(struct rxe_pool *pool)
-{
- kref_put(&pool->ref_cnt, rxe_pool_release);
-}
-
void rxe_pool_cleanup(struct rxe_pool *pool)
{
- unsigned long flags;
-
- write_lock_irqsave(&pool->pool_lock, flags);
- pool->state = RXE_POOL_STATE_INVALID;
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- write_unlock_irqrestore(&pool->pool_lock, flags);
- rxe_pool_put(pool);
+ kfree(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
{
u32 index;
- u32 range = pool->max_index - pool->min_index + 1;
+ u32 range = pool->index.max_index - pool->index.min_index + 1;
- index = find_next_zero_bit(pool->table, range, pool->last);
+ index = find_next_zero_bit(pool->index.table, range, pool->index.last);
if (index >= range)
- index = find_first_zero_bit(pool->table, range);
+ index = find_first_zero_bit(pool->index.table, range);
WARN_ON_ONCE(index >= range);
- set_bit(index, pool->table);
- pool->last = index;
- return index + pool->min_index;
+ set_bit(index, pool->index.table);
+ pool->index.last = index;
+ return index + pool->index.min_index;
}
static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -218,25 +206,25 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->index_node, parent, link);
+ rb_insert_color(&new->index_node, &pool->index.tree);
out:
return;
}
static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- (u8 *)new + pool->key_offset, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ (u8 *)new + pool->key.key_offset, pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -249,116 +237,135 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->key_node, parent, link);
+ rb_insert_color(&new->key_node, &pool->key.tree);
out:
return;
}
-void rxe_add_key(void *arg, void *key)
+void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
- memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
+ memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
insert_key(pool, elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_drop_key(void *arg)
+void __rxe_add_key(struct rxe_pool_entry *elem, void *key)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- rb_erase(&elem->node, &pool->tree);
+ __rxe_add_key_locked(elem, key);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_add_index(void *arg)
+void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ rb_erase(&elem->key_node, &pool->key.tree);
+}
+
+void __rxe_drop_key(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
+ __rxe_drop_key_locked(elem);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void __rxe_add_index_locked(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
elem->index = alloc_index(pool);
insert_index(pool, elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_drop_index(void *arg)
+void __rxe_add_index(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- clear_bit(elem->index - pool->min_index, pool->table);
- rb_erase(&elem->node, &pool->tree);
+ __rxe_add_index_locked(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void *rxe_alloc(struct rxe_pool *pool)
+void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem;
- unsigned long flags;
+ struct rxe_pool *pool = elem->pool;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+ clear_bit(elem->index - pool->index.min_index, pool->index.table);
+ rb_erase(&elem->index_node, &pool->index.tree);
+}
- read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID) {
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return NULL;
- }
- kref_get(&pool->ref_cnt);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+void __rxe_drop_index(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ write_lock_irqsave(&pool->pool_lock, flags);
+ __rxe_drop_index_locked(elem);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+}
- if (!ib_device_try_get(&pool->rxe->ib_dev))
- goto out_put_pool;
+void *rxe_alloc_locked(struct rxe_pool *pool)
+{
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rxe_pool_entry *elem;
+ u8 *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- elem = kzalloc(rxe_type_info[pool->type].size,
- (pool->flags & RXE_POOL_ATOMIC) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (!elem)
+ obj = kzalloc(info->size, GFP_ATOMIC);
+ if (!obj)
goto out_cnt;
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+
elem->pool = pool;
kref_init(&elem->ref_cnt);
- return elem;
+ return obj;
out_cnt:
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
-out_put_pool:
- rxe_pool_put(pool);
return NULL;
}
-int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+void *rxe_alloc(struct rxe_pool *pool)
{
- unsigned long flags;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_cnt;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+ obj = kzalloc(info->size, GFP_KERNEL);
+ if (!obj)
+ goto out_cnt;
- read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID) {
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return -EINVAL;
- }
- kref_get(&pool->ref_cnt);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
- if (!ib_device_try_get(&pool->rxe->ib_dev))
- goto out_put_pool;
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return obj;
+
+out_cnt:
+ atomic_dec(&pool->num_elem);
+ return NULL;
+}
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
@@ -369,9 +376,6 @@ int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
out_cnt:
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
-out_put_pool:
- rxe_pool_put(pool);
return -EINVAL;
}
@@ -380,67 +384,77 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ u8 *obj;
if (pool->cleanup)
pool->cleanup(elem);
- if (!(pool->flags & RXE_POOL_NO_ALLOC))
- kfree(elem);
+ if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
+ obj = (u8 *)elem - info->elem_offset;
+ kfree(obj);
+ }
+
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
- rxe_pool_put(pool);
}
-void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
+void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
- unsigned long flags;
-
- read_lock_irqsave(&pool->pool_lock, flags);
-
- if (pool->state != RXE_POOL_STATE_VALID)
- goto out;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj;
- node = pool->tree.rb_node;
+ node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, index_node);
if (elem->index > index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else {
- kref_get(&elem->ref_cnt);
+ else
break;
- }
}
-out:
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+ if (node) {
+ kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
+ }
+
+ return obj;
}
-void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
- int cmp;
+ u8 *obj;
unsigned long flags;
read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_pool_get_index_locked(pool, index);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID)
- goto out;
+ return obj;
+}
- node = pool->tree.rb_node;
+void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
+{
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+ int cmp;
+
+ node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- key, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ key, pool->key.key_size);
if (cmp > 0)
node = node->rb_left;
@@ -450,10 +464,24 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
break;
}
- if (node)
+ if (node) {
kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
+ }
-out:
+ return obj;
+}
+
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+{
+ u8 *obj;
+ unsigned long flags;
+
+ read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_pool_get_key_locked(pool, key);
read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+
+ return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 432745ffc8d4..61210b300a78 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -11,7 +11,6 @@
#define RXE_POOL_CACHE_FLAGS (0)
enum rxe_pool_flags {
- RXE_POOL_ATOMIC = BIT(0),
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
RXE_POOL_NO_ALLOC = BIT(4),
@@ -36,6 +35,7 @@ struct rxe_pool_entry;
struct rxe_type_info {
const char *name;
size_t size;
+ size_t elem_offset;
void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_flags flags;
u32 max_index;
@@ -46,18 +46,16 @@ struct rxe_type_info {
extern struct rxe_type_info rxe_type_info[];
-enum rxe_pool_state {
- RXE_POOL_STATE_INVALID,
- RXE_POOL_STATE_VALID,
-};
-
struct rxe_pool_entry {
struct rxe_pool *pool;
struct kref ref_cnt;
struct list_head list;
- /* only used if indexed or keyed */
- struct rb_node node;
+ /* only used if keyed */
+ struct rb_node key_node;
+
+ /* only used if indexed */
+ struct rb_node index_node;
u32 index;
};
@@ -65,24 +63,29 @@ struct rxe_pool {
struct rxe_dev *rxe;
rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
- struct kref ref_cnt;
void (*cleanup)(struct rxe_pool_entry *obj);
- enum rxe_pool_state state;
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
- /* only used if indexed or keyed */
- struct rb_root tree;
- unsigned long *table;
- size_t table_size;
- u32 max_index;
- u32 min_index;
- u32 last;
- size_t key_offset;
- size_t key_size;
+ /* only used if indexed */
+ struct {
+ struct rb_root tree;
+ unsigned long *table;
+ size_t table_size;
+ u32 last;
+ u32 max_index;
+ u32 min_index;
+ } index;
+
+ /* only used if keyed */
+ struct {
+ struct rb_root tree;
+ size_t key_offset;
+ size_t key_size;
+ } key;
};
/* initialize a pool of objects with given limit on
@@ -95,32 +98,70 @@ int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool */
+/* allocate an object from pool holding and not holding the pool lock */
+void *rxe_alloc_locked(struct rxe_pool *pool);
+
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
/* assign an index to an indexed object and insert object into
- * pool's rb tree
+ * pool's rb tree holding and not holding the pool_lock
*/
-void rxe_add_index(void *elem);
+void __rxe_add_index_locked(struct rxe_pool_entry *elem);
+
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
-/* drop an index and remove object from rb tree */
-void rxe_drop_index(void *elem);
+void __rxe_add_index(struct rxe_pool_entry *elem);
+
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+
+/* drop an index and remove object from rb tree
+ * holding and not holding the pool_lock
+ */
+void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+
+void __rxe_drop_index(struct rxe_pool_entry *elem);
+
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
/* assign a key to a keyed object and insert object into
- * pool's rb tree
+ * pool's rb tree holding and not holding pool_lock
*/
-void rxe_add_key(void *elem, void *key);
+void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+
+void __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+
+/* remove elem from rb tree holding and not holding the pool_lock */
+void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
-/* remove elem from rb tree */
-void rxe_drop_key(void *elem);
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+
+void __rxe_drop_key(struct rxe_pool_entry *elem);
+
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+
+/* lookup an indexed object from index holding and not holding the pool_lock.
+ * takes a reference on object
+ */
+void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
-/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
-/* lookup keyed object from key. takes a reference on the object */
+/* lookup keyed object from key holding and not holding the pool_lock.
+ * takes a reference on the objecti
+ */
+void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key);
+
void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
/* cleanup an object when all references are dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 656a5b4be847..34ae957a315c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -62,6 +62,17 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
struct rxe_port *port;
int port_num = init->port_num;
+ switch (init->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (!init->recv_cq || !init->send_cq) {
pr_warn("missing cq\n");
goto err1;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index c9984a28eecc..45d2f711bce2 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -9,21 +9,26 @@
#include "rxe.h"
#include "rxe_loc.h"
+/* check that QP matches packet opcode type and is in a valid state */
static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp)
{
+ unsigned int pkt_type;
+
if (unlikely(!qp->valid))
goto err1;
+ pkt_type = pkt->opcode & 0xe0;
+
switch (qp_type(qp)) {
case IB_QPT_RC:
- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
+ if (unlikely(pkt_type != IB_OPCODE_RC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
break;
case IB_QPT_UC:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
+ if (unlikely(pkt_type != IB_OPCODE_UC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -31,7 +36,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
+ if (unlikely(pkt_type != IB_OPCODE_UD)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -85,8 +90,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
goto err1;
}
- if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
- pkt->mask) {
+ if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
@@ -252,7 +256,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
qp = mce->qp;
- pkt = SKB_TO_PKT(skb);
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -264,12 +267,22 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
continue;
/* for all but the last qp create a new clone of the
- * skb and pass to the qp.
+ * skb and pass to the qp. If an error occurs in the
+ * checks for the last qp in the list we need to
+ * free the skb since it hasn't been passed on to
+ * rxe_rcv_pkt() which would free it later.
*/
- if (mce->qp_list.next != &mcg->qp_list)
+ if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
- else
+ if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
+ kfree_skb(per_qp_skb);
+ continue;
+ }
+ } else {
per_qp_skb = skb;
+ /* show we have consumed the skb */
+ skb = NULL;
+ }
if (unlikely(!per_qp_skb))
continue;
@@ -284,10 +297,10 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
- return;
-
err1:
+ /* free skb if not consumed */
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
/**
@@ -340,9 +353,7 @@ void rxe_rcv(struct sk_buff *skb)
__be32 *icrcp;
u32 calc_icrc, pack_icrc;
- pkt->offset = 0;
-
- if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
+ if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop;
if (rxe_chk_dgid(rxe, skb) < 0) {
@@ -397,4 +408,5 @@ drop:
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d4917646641a..889290793d75 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -375,7 +375,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
pkt->psn = qp->req.psn;
pkt->mask = rxe_opcode[opcode].mask;
pkt->paylen = paylen;
- pkt->offset = 0;
pkt->wqe = wqe;
/* init skb */
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5a098083a9d2..142f3d8014d8 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
/* go drain recv wr queue */
@@ -585,11 +586,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
ack->qp = qp;
ack->opcode = opcode;
ack->mask = rxe_opcode[opcode].mask;
- ack->offset = pkt->offset;
ack->paylen = paylen;
/* fill in bth using the request packet headers */
- memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
+ memcpy(ack->hdr, pkt->hdr, RXE_BTH_BYTES);
bth_set_opcode(ack, opcode);
bth_set_qpn(ack, qp->attr.dest_qp_num);
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
@@ -1012,6 +1017,7 @@ static enum resp_states cleanup(struct rxe_qp *qp,
skb = skb_dequeue(&qp->req_pkts);
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (qp->resp.mr) {
@@ -1176,6 +1182,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (notify)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index a031514e2f41..dee5e0e919d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,12 +106,12 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
return IB_LINK_LAYER_ETHERNET;
}
-static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(uctx->device);
- struct rxe_ucontext *uc = to_ruc(uctx);
+ struct rxe_dev *rxe = to_rdev(ibuc->device);
+ struct rxe_ucontext *uc = to_ruc(ibuc);
- return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
+ return rxe_add_to_pool(&rxe->uc_pool, uc);
}
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
@@ -145,7 +145,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
+ return rxe_add_to_pool(&rxe->pd_pool, pd);
}
static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -169,7 +169,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
+ err = rxe_add_to_pool(&rxe->ah_pool, ah);
if (err)
return err;
@@ -273,7 +273,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (err)
goto err1;
- err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
+ err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err)
goto err1;
@@ -555,37 +555,42 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}
-static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ const struct ib_send_wr *ibwr)
+{
+ struct ib_sge *sge = ibwr->sg_list;
+ u8 *p = wqe->dma.inline_data;
+ int i;
+
+ for (i = 0; i < ibwr->num_sge; i++, sge++) {
+ memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+ p += sge->length;
+ }
+}
+
+static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
int num_sge = ibwr->num_sge;
- struct ib_sge *sge;
- int i;
- u8 *p;
init_send_wr(qp, &wqe->wr, ibwr);
+ /* local operation */
+ if (unlikely(mask & WR_REG_MASK)) {
+ wqe->mask = mask;
+ wqe->state = wqe_state_posted;
+ return;
+ }
+
if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI)
memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
- if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
- p = wqe->dma.inline_data;
-
- sge = ibwr->sg_list;
- for (i = 0; i < num_sge; i++, sge++) {
- memcpy(p, (void *)(uintptr_t)sge->addr,
- sge->length);
-
- p += sge->length;
- }
- } else if (mask & WR_REG_MASK) {
- wqe->mask = mask;
- wqe->state = wqe_state_posted;
- return 0;
- } else
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+ copy_inline_data_to_wqe(wqe, ibwr);
+ else
memcpy(wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -599,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
wqe->dma.sge_offset = 0;
wqe->state = wqe_state_posted;
wqe->ssn = atomic_add_return(1, &qp->ssn);
-
- return 0;
}
static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -623,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
}
send_wqe = producer_addr(sq->queue);
-
- err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
- if (unlikely(err))
- goto err1;
+ init_send_wqe(qp, ibwr, mask, length, send_wqe);
advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -774,7 +774,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (err)
return err;
- return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
+ return rxe_add_to_pool(&rxe->cq_pool, cq);
}
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -1118,7 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+ strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index adda78996219..368959ae9a8c 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -653,7 +653,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
struct siw_sqe *orq_e = orq_get_tail(qp);
- if (orq_e && READ_ONCE(orq_e->flags) == 0)
+ if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
return NULL;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index ee95cf29179d..cf55326f2ab4 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -135,7 +135,7 @@ static struct {
static int siw_init_cpulist(void)
{
- int i, num_nodes = num_possible_nodes();
+ int i, num_nodes = nr_node_ids;
memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
@@ -357,7 +357,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sizeof(base_dev->iw_ifname));
/* Disable TCP port mapping */
- base_dev->iw_driver_flags = IW_F_NO_PORT_MAP,
+ base_dev->iw_driver_flags = IW_F_NO_PORT_MAP;
sdev->attrs.max_qp = SIW_MAX_QP;
sdev->attrs.max_qp_wr = SIW_MAX_QP_WR;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 875d36d4b1c6..ddb2e66f9f13 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
{
- irq_size = roundup_pow_of_two(irq_size);
- orq_size = roundup_pow_of_two(orq_size);
-
- qp->attrs.irq_size = irq_size;
- qp->attrs.orq_size = orq_size;
-
- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
- if (!qp->irq) {
- siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
- qp->attrs.irq_size = 0;
- return -ENOMEM;
+ if (irq_size) {
+ irq_size = roundup_pow_of_two(irq_size);
+ qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+ if (!qp->irq) {
+ qp->attrs.irq_size = 0;
+ return -ENOMEM;
+ }
}
- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
- if (!qp->orq) {
- siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
- qp->attrs.orq_size = 0;
- qp->attrs.irq_size = 0;
- vfree(qp->irq);
- return -ENOMEM;
+ if (orq_size) {
+ orq_size = roundup_pow_of_two(orq_size);
+ qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+ if (!qp->orq) {
+ qp->attrs.orq_size = 0;
+ qp->attrs.irq_size = 0;
+ vfree(qp->irq);
+ return -ENOMEM;
+ }
}
+ qp->attrs.irq_size = irq_size;
+ qp->attrs.orq_size = orq_size;
siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
return 0;
}
@@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
if (ctrl & MPA_V2_RDMA_WRITE_RTR)
wqe->sqe.opcode = SIW_OP_WRITE;
else if (ctrl & MPA_V2_RDMA_READ_RTR) {
- struct siw_sqe *rreq;
+ struct siw_sqe *rreq = NULL;
wqe->sqe.opcode = SIW_OP_READ;
spin_lock(&qp->orq_lock);
- rreq = orq_get_free(qp);
+ if (qp->attrs.orq_size)
+ rreq = orq_get_free(qp);
if (rreq) {
siw_read_to_orq(rreq, &wqe->sqe);
qp->orq_put++;
@@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
rreq->num_sge = 1;
}
-/*
- * Must be called with SQ locked.
- * To avoid complete SQ starvation by constant inbound READ requests,
- * the active IRQ will not be served after qp->irq_burst, if the
- * SQ has pending work.
- */
-int siw_activate_tx(struct siw_qp *qp)
+static int siw_activate_tx_from_sq(struct siw_qp *qp)
{
- struct siw_sqe *irqe, *sqe;
+ struct siw_sqe *sqe;
struct siw_wqe *wqe = tx_wqe(qp);
int rv = 1;
- irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
-
- if (irqe->flags & SIW_WQE_VALID) {
- sqe = sq_get_next(qp);
-
- /*
- * Avoid local WQE processing starvation in case
- * of constant inbound READ request stream
- */
- if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
- qp->irq_burst = 0;
- goto skip_irq;
- }
- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
- wqe->wr_status = SIW_WR_QUEUED;
-
- /* start READ RESPONSE */
- wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
- wqe->sqe.flags = 0;
- if (irqe->num_sge) {
- wqe->sqe.num_sge = 1;
- wqe->sqe.sge[0].length = irqe->sge[0].length;
- wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
- wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
- } else {
- wqe->sqe.num_sge = 0;
- }
-
- /* Retain original RREQ's message sequence number for
- * potential error reporting cases.
- */
- wqe->sqe.sge[1].length = irqe->sge[1].length;
-
- wqe->sqe.rkey = irqe->rkey;
- wqe->sqe.raddr = irqe->raddr;
+ sqe = sq_get_next(qp);
+ if (!sqe)
+ return 0;
- wqe->processed = 0;
- qp->irq_get++;
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
- /* mark current IRQ entry free */
- smp_store_mb(irqe->flags, 0);
+ /* First copy SQE to kernel private memory */
+ memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+ if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
+ rv = -EINVAL;
goto out;
}
- sqe = sq_get_next(qp);
- if (sqe) {
-skip_irq:
- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
- wqe->wr_status = SIW_WR_QUEUED;
-
- /* First copy SQE to kernel private memory */
- memcpy(&wqe->sqe, sqe, sizeof(*sqe));
-
- if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
+ if (wqe->sqe.flags & SIW_WQE_INLINE) {
+ if (wqe->sqe.opcode != SIW_OP_SEND &&
+ wqe->sqe.opcode != SIW_OP_WRITE) {
rv = -EINVAL;
goto out;
}
- if (wqe->sqe.flags & SIW_WQE_INLINE) {
- if (wqe->sqe.opcode != SIW_OP_SEND &&
- wqe->sqe.opcode != SIW_OP_WRITE) {
- rv = -EINVAL;
- goto out;
- }
- if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
- rv = -EINVAL;
- goto out;
- }
- wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
- wqe->sqe.sge[0].lkey = 0;
- wqe->sqe.num_sge = 1;
+ if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+ rv = -EINVAL;
+ goto out;
}
- if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
- /* A READ cannot be fenced */
- if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
- wqe->sqe.opcode ==
- SIW_OP_READ_LOCAL_INV)) {
- siw_dbg_qp(qp, "cannot fence read\n");
- rv = -EINVAL;
- goto out;
- }
- spin_lock(&qp->orq_lock);
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].lkey = 0;
+ wqe->sqe.num_sge = 1;
+ }
+ if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+ /* A READ cannot be fenced */
+ if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode ==
+ SIW_OP_READ_LOCAL_INV)) {
+ siw_dbg_qp(qp, "cannot fence read\n");
+ rv = -EINVAL;
+ goto out;
+ }
+ spin_lock(&qp->orq_lock);
- if (!siw_orq_empty(qp)) {
- qp->tx_ctx.orq_fence = 1;
- rv = 0;
- }
- spin_unlock(&qp->orq_lock);
+ if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
+ }
+ spin_unlock(&qp->orq_lock);
- } else if (wqe->sqe.opcode == SIW_OP_READ ||
- wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- struct siw_sqe *rreq;
+ } else if (wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+ struct siw_sqe *rreq;
- wqe->sqe.num_sge = 1;
+ if (unlikely(!qp->attrs.orq_size)) {
+ /* We negotiated not to send READ req's */
+ rv = -EINVAL;
+ goto out;
+ }
+ wqe->sqe.num_sge = 1;
- spin_lock(&qp->orq_lock);
+ spin_lock(&qp->orq_lock);
- rreq = orq_get_free(qp);
- if (rreq) {
- /*
- * Make an immediate copy in ORQ to be ready
- * to process loopback READ reply
- */
- siw_read_to_orq(rreq, &wqe->sqe);
- qp->orq_put++;
- } else {
- qp->tx_ctx.orq_fence = 1;
- rv = 0;
- }
- spin_unlock(&qp->orq_lock);
+ rreq = orq_get_free(qp);
+ if (rreq) {
+ /*
+ * Make an immediate copy in ORQ to be ready
+ * to process loopback READ reply
+ */
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+ } else {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
}
-
- /* Clear SQE, can be re-used by application */
- smp_store_mb(sqe->flags, 0);
- qp->sq_get++;
- } else {
- rv = 0;
+ spin_unlock(&qp->orq_lock);
}
+
+ /* Clear SQE, can be re-used by application */
+ smp_store_mb(sqe->flags, 0);
+ qp->sq_get++;
out:
if (unlikely(rv < 0)) {
siw_dbg_qp(qp, "error %d\n", rv);
@@ -1015,6 +969,65 @@ out:
}
/*
+ * Must be called with SQ locked.
+ * To avoid complete SQ starvation by constant inbound READ requests,
+ * the active IRQ will not be served after qp->irq_burst, if the
+ * SQ has pending work.
+ */
+int siw_activate_tx(struct siw_qp *qp)
+{
+ struct siw_sqe *irqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+
+ if (!qp->attrs.irq_size)
+ return siw_activate_tx_from_sq(qp);
+
+ irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+
+ if (!(irqe->flags & SIW_WQE_VALID))
+ return siw_activate_tx_from_sq(qp);
+
+ /*
+ * Avoid local WQE processing starvation in case
+ * of constant inbound READ request stream
+ */
+ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+ qp->irq_burst = 0;
+ return siw_activate_tx_from_sq(qp);
+ }
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
+
+ /* start READ RESPONSE */
+ wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+ wqe->sqe.flags = 0;
+ if (irqe->num_sge) {
+ wqe->sqe.num_sge = 1;
+ wqe->sqe.sge[0].length = irqe->sge[0].length;
+ wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+ wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+ } else {
+ wqe->sqe.num_sge = 0;
+ }
+
+ /* Retain original RREQ's message sequence number for
+ * potential error reporting cases.
+ */
+ wqe->sqe.sge[1].length = irqe->sge[1].length;
+
+ wqe->sqe.rkey = irqe->rkey;
+ wqe->sqe.raddr = irqe->raddr;
+
+ wqe->processed = 0;
+ qp->irq_get++;
+
+ /* mark current IRQ entry free */
+ smp_store_mb(irqe->flags, 0);
+
+ return 1;
+}
+
+/*
* Check if current CQ state qualifies for calling CQ completion
* handler. Must be called with CQ lock held.
*/
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 4bd1f1f84057..60116f20653c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
}
spin_lock_irqsave(&qp->sq_lock, flags);
+ if (unlikely(!qp->attrs.irq_size)) {
+ run_sq = 0;
+ goto error_irq;
+ }
if (tx_work->wr_status == SIW_WR_IDLE) {
/*
* immediately schedule READ response w/o
@@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
/* RRESP now valid as current TX wqe or placed into IRQ */
smp_store_mb(resp->flags, SIW_WQE_VALID);
} else {
- pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
- qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
+error_irq:
+ pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
+ qp_id(qp), qp->attrs.irq_size);
siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
RDMAP_ETYPE_REMOTE_OPERATION,
@@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
struct siw_sqe *orqe;
struct siw_wqe *wqe = NULL;
+ if (unlikely(!qp->attrs.orq_size))
+ return -EPROTO;
+
/* make sure ORQ indices are current */
smp_mb();
@@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
*/
rv = siw_orqe_start_rx(qp);
if (rv) {
- pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
- qp_id(qp), qp->orq_get % qp->attrs.orq_size);
+ pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
+ qp_id(qp), qp->attrs.orq_size);
goto error_term;
}
rv = siw_rresp_check_ntoh(srx, frx);
@@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
wc_status);
siw_wqe_put_mem(wqe, SIW_OP_READ);
- if (!error)
+ if (!error) {
rv = siw_check_tx_fence(qp);
- else
- /* Disable current ORQ eleement */
- WRITE_ONCE(orq_get_current(qp)->flags, 0);
+ } else {
+ /* Disable current ORQ element */
+ if (qp->attrs.orq_size)
+ WRITE_ONCE(orq_get_current(qp)->flags, 0);
+ }
break;
case RDMAP_RDMA_READ_REQ:
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index d19d8325588b..7989c4043db4 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1107,8 +1107,8 @@ next_wqe:
/*
* RREQ may have already been completed by inbound RRESP!
*/
- if (tx_type == SIW_OP_READ ||
- tx_type == SIW_OP_READ_LOCAL_INV) {
+ if ((tx_type == SIW_OP_READ ||
+ tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
/* Cleanup pending entry in ORQ */
qp->orq_put--;
qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 68fd053fc774..e389d44e5591 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -365,13 +365,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
if (rv)
goto err_out;
+ num_sqe = attrs->cap.max_send_wr;
+ num_rqe = attrs->cap.max_recv_wr;
+
/* All queue indices are derived from modulo operations
* on a free running 'get' (consumer) and 'put' (producer)
* unsigned counter. Having queue sizes at power of two
* avoids handling counter wrap around.
*/
- num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
- num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
+ if (num_sqe)
+ num_sqe = roundup_pow_of_two(num_sqe);
+ else {
+ /* Zero sized SQ is not supported */
+ rv = -EINVAL;
+ goto err_out;
+ }
+ if (num_rqe)
+ num_rqe = roundup_pow_of_two(num_rqe);
if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
@@ -379,7 +389,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
- siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
rv = -ENOMEM;
goto err_out_xa;
}
@@ -413,7 +422,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
- siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
rv = -ENOMEM;
goto err_out_xa;
}
@@ -966,9 +974,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
unsigned long flags;
int rv = 0;
- if (qp->srq) {
+ if (qp->srq || qp->attrs.rq_size == 0) {
*bad_wr = wr;
- return -EOPNOTSUPP; /* what else from errno.h? */
+ return -EINVAL;
}
if (!rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3440dc48d02c..179ff1d068e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -413,7 +413,6 @@ struct ipoib_dev_priv {
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
unsigned int max_send_sge;
- bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a6f413491321..e16b40c09f82 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -141,8 +141,6 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- priv->sm_fullmember_sendonly_support = false;
-
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 86e4ed64e4e2..5b3154503bf4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -275,7 +275,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
memset(&av, 0, sizeof(av));
av.type = rdma_ah_find_type(priv->ca, priv->port);
- rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)),
+ rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
rdma_ah_set_port_num(&av, priv->port);
rdma_ah_set_sl(&av, mcast->mcmember.sl);
rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
@@ -334,15 +334,6 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
return;
}
/*
- * Check if can send sendonly MCG's with sendonly-fullmember join state.
- * It done here after the successfully join to the broadcast group,
- * because the broadcast group must always be joined first and is always
- * re-joined if the SM changes substantially.
- */
- priv->sm_fullmember_sendonly_support =
- ib_sa_sendonly_fullmem_support(&ipoib_sa_client,
- priv->ca, priv->port);
- /*
* Take rtnl_lock to avoid racing with ipoib_stop() and
* turning the carrier back on while a device is being
* removed. However, ipoib_stop() will attempt to flush
@@ -537,9 +528,7 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
* most closely emulates the behavior, from a user space
* application perspective, of Ethernet multicast operation.
*/
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
- priv->sm_fullmember_sendonly_support)
- /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 4792b9bf400f..8fcaa1136f2c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -89,13 +89,20 @@ int iser_debug_level = 0;
module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops iscsi_iser_size_ops = {
+ .set = iscsi_iser_set,
+ .get = param_get_uint,
+};
+
static unsigned int iscsi_max_lun = 512;
-module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
-MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512");
+module_param_cb(max_lun, &iscsi_iser_size_ops, &iscsi_max_lun, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session, should > 0 (default:512)");
unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
-module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
+module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:1024)");
bool iser_always_reg = true;
module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
@@ -110,6 +117,18 @@ int iser_pi_guard;
module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned int n = 0;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n == 0)
+ return -EINVAL;
+
+ return param_set_uint(val, kp);
+}
+
/*
* iscsi_iser_recv() - Process a successful recv completion
* @conn: iscsi connection
@@ -571,13 +590,20 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
static inline unsigned int
iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
- SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
- SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
- SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
+ int ret = 0;
+
+ if (prot_caps & IB_PROT_T10DIF_TYPE_1)
+ ret |= SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_2)
+ ret |= SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_3)
+ ret |= SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION;
+
+ return ret;
}
/**
@@ -1009,11 +1035,6 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n");
- if (iscsi_max_lun < 1) {
- iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
- return -EINVAL;
- }
-
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index d4e057fac219..afec40da9b58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -169,7 +169,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
domain->sig.dif.ref_escape = true;
if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
-};
+}
static int
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
@@ -390,4 +390,3 @@ err_reg:
return err;
}
-
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2bd18b006893..136f6c4492e0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -685,7 +685,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
iser_disconnected_handler(cma_id);
iser_free_ib_conn_res(iser_conn, destroy);
complete(&iser_conn->ib_completion);
-};
+}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2ba27221ea85..7305ed8976c2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -71,7 +71,6 @@ static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp
return param_set_int(val, kp);
}
-
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
{
@@ -79,7 +78,6 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
cmd->prot_op != TARGET_PROT_NORMAL);
}
-
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
@@ -232,8 +230,10 @@ isert_create_device_ib_res(struct isert_device *device)
}
/* Check signature cap */
- device->pi_capable = ib_dev->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER ? true : false;
+ if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER)
+ device->pi_capable = true;
+ else
+ device->pi_capable = false;
return 0;
}
@@ -1993,7 +1993,7 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
domain->sig.dif.ref_remap = true;
-};
+}
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4933085a864a..cecf0f7cadf9 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -233,7 +233,7 @@ static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
port_info = (struct opa_class_port_info *)rsp_mad->data;
memcpy(port_info, &port->class_port_info, sizeof(*port_info));
- port_info->base_version = OPA_MGMT_BASE_VERSION,
+ port_info->base_version = OPA_MGMT_BASE_VERSION;
port_info->class_version = OPA_EMA_CLASS_VERSION;
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index ba00f0de14ca..b6a0abf40589 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -408,6 +408,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&sess->kobj);
return err;
}
err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
@@ -419,6 +420,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
&sess->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&sess->stats->kobj_stats);
goto remove_group;
}
@@ -469,15 +471,12 @@ int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
-void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+
if (clt->kobj_paths) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
}
}
-
-void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
-{
- sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
-}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 67f86c405a26..0a08b4b742a3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -31,6 +31,8 @@
*/
#define RTRS_RECONNECT_SEED 8
+#define FIRST_CONN 0x01
+
MODULE_DESCRIPTION("RDMA Transport Client");
MODULE_LICENSE("GPL");
@@ -178,18 +180,18 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
}
/**
- * __rtrs_clt_change_state() - change the session state through session state
+ * rtrs_clt_change_state() - change the session state through session state
* machine.
*
* @sess: client session to change the state of.
* @new_state: state to change to.
*
- * returns true if successful, false if the requested state can not be set.
+ * returns true if sess's state is changed to new state, otherwise return false.
*
* Locks:
* state_wq lock must be hold.
*/
-static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
@@ -286,7 +288,7 @@ static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
spin_lock_irq(&sess->state_wq.lock);
if (sess->state == old_state)
- changed = __rtrs_clt_change_state(sess, new_state);
+ changed = rtrs_clt_change_state(sess, new_state);
spin_unlock_irq(&sess->state_wq.lock);
return changed;
@@ -494,7 +496,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
int err;
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
@@ -514,7 +516,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
u32 buf_id;
int err;
- WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
@@ -621,12 +623,12 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
rtrs_send_hb_ack(&sess->s);
- if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
sess->s.hb_missed_cnt = 0;
- if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
@@ -654,7 +656,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
- if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
@@ -664,7 +666,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
- * and hb
*/
break;
@@ -680,7 +681,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
for (i = 0; i < q_size; i++) {
- if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
@@ -1318,6 +1319,12 @@ out_err:
static void free_permits(struct rtrs_clt *clt)
{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
kfree(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
@@ -1353,21 +1360,14 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
bool changed;
spin_lock_irq(&sess->state_wq.lock);
- *old_state = sess->state;
- changed = __rtrs_clt_change_state(sess, new_state);
+ if (old_state)
+ *old_state = sess->state;
+ changed = rtrs_clt_change_state(sess, new_state);
spin_unlock_irq(&sess->state_wq.lock);
return changed;
}
-static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
- enum rtrs_clt_state new_state)
-{
- enum rtrs_clt_state old_state;
-
- return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
-}
-
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
@@ -1511,7 +1511,7 @@ static void destroy_con(struct rtrs_clt_con *con)
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- u16 wr_queue_size;
+ u32 max_send_wr, max_recv_wr, cq_size;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
@@ -1523,7 +1523,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* + 2 for drain and heartbeat
* in case qp gets into error state
*/
- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
/* We must be the first here */
if (WARN_ON(sess->s.dev))
return -EINVAL;
@@ -1555,25 +1556,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
/* Shared between connections */
sess->s.dev_ref++;
- wr_queue_size =
+ max_send_wr =
min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
sess->queue_depth * 3 + 1);
+ max_recv_wr =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ sess->queue_depth * 3 + 1);
}
/* alloc iu to recv new rkey reply when server reports flags set */
- if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
- con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
return -ENOMEM;
- con->queue_size = wr_queue_size;
+ con->queue_size = max_recv_wr;
}
+ cq_size = max_send_wr + max_recv_wr;
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, wr_queue_size, wr_queue_size,
- IB_POLL_SOFTIRQ);
+ cq_vector, cq_size, max_send_wr,
+ max_recv_wr, IB_POLL_SOFTIRQ);
/*
* In case of error we do not bother to clean previous allocations,
* since destroy_con_cq_qp() must be called.
@@ -1657,6 +1662,7 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
.cid_num = cpu_to_le16(sess->s.con_num),
.recon_cnt = cpu_to_le16(sess->s.recon_cnt),
};
+ msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
uuid_copy(&msg.sess_uuid, &sess->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
@@ -1742,6 +1748,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
scnprintf(sess->hca_name, sizeof(sess->hca_name),
sess->s.dev->ib_dev->name);
sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ /* set for_new_clt, to allow future reconnect on any path */
+ sess->for_new_clt = 1;
}
return 0;
@@ -1788,7 +1796,7 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
{
- if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &sess->close_work);
if (wait)
flush_work(&sess->close_work);
@@ -2174,7 +2182,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
cancel_delayed_work_sync(&sess->reconnect_dwork);
rtrs_clt_stop_and_destroy_conns(sess);
- rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
}
static int init_conns(struct rtrs_clt_sess *sess)
@@ -2226,7 +2234,7 @@ destroy:
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2243,7 +2251,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(sess->clt, "Sess info request send failed: %s\n",
ib_wc_status_msg(wc->status));
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
@@ -2367,7 +2375,7 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- rtrs_clt_change_state(sess, state);
+ rtrs_clt_change_state_get_old(sess, state, NULL);
}
static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
@@ -2423,7 +2431,6 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
- goto out;
}
out:
@@ -2433,7 +2440,7 @@ out:
rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
if (unlikely(err))
/* If we've never taken async path because of malloc problems */
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2490,7 +2497,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
/* Stop everything */
rtrs_clt_stop_and_destroy_conns(sess);
msleep(RTRS_RECONNECT_BACKOFF);
- if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
err = init_sess(sess);
if (err)
goto reconnect_again;
@@ -2499,7 +2506,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
return;
reconnect_again:
- if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
sess->stats->reconnects.fail_cnt++;
delay_ms = clt->reconnect_delay_sec * 1000;
queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
@@ -2565,11 +2572,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
clt->dev.class = rtrs_clt_dev_class;
clt->dev.release = rtrs_clt_dev_release;
err = dev_set_name(&clt->dev, "%s", sessname);
- if (err) {
- free_percpu(clt->pcpu_path);
- kfree(clt);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err;
/*
* Suppress user space notification until
* sysfs files are created
@@ -2577,44 +2581,35 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
dev_set_uevent_suppress(&clt->dev, true);
err = device_register(&clt->dev);
if (err) {
- free_percpu(clt->pcpu_path);
put_device(&clt->dev);
- return ERR_PTR(err);
+ goto err;
}
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
- free_percpu(clt->pcpu_path);
- device_unregister(&clt->dev);
- return NULL;
+ err = -ENOMEM;
+ goto err_dev;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
- free_percpu(clt->pcpu_path);
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
- device_unregister(&clt->dev);
- return ERR_PTR(err);
+ goto err_dev;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
-}
-
-static void wait_for_inflight_permits(struct rtrs_clt *clt)
-{
- if (clt->permits_map) {
- size_t sz = clt->queue_depth;
-
- wait_event(clt->permits_wait,
- find_first_bit(clt->permits_map, sz) >= sz);
- }
+err_dev:
+ device_unregister(&clt->dev);
+err:
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt *clt)
{
- wait_for_inflight_permits(clt);
free_permits(clt);
free_percpu(clt->pcpu_path);
mutex_destroy(&clt->paths_ev_mutex);
@@ -2672,6 +2667,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
err = PTR_ERR(sess);
goto close_all_sess;
}
+ if (!i)
+ sess->for_new_clt = 1;
list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
err = init_sess(sess);
@@ -2702,8 +2699,7 @@ close_all_sess:
rtrs_clt_close_conns(sess, true);
kobject_put(&sess->kobj);
}
- rtrs_clt_destroy_sysfs_root_files(clt);
- rtrs_clt_destroy_sysfs_root_folders(clt);
+ rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
out:
@@ -2720,8 +2716,7 @@ void rtrs_clt_close(struct rtrs_clt *clt)
struct rtrs_clt_sess *sess, *tmp;
/* Firstly forbid sysfs access */
- rtrs_clt_destroy_sysfs_root_files(clt);
- rtrs_clt_destroy_sysfs_root_folders(clt);
+ rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index b8dbd701b3cb..692bc83e1f09 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -143,6 +143,7 @@ struct rtrs_clt_sess {
int max_send_sge;
u32 flags;
struct kobject kobj;
+ u8 for_new_clt;
struct rtrs_clt_stats *stats;
/* cache hca_port and hca_name to display in sysfs */
u8 hca_port;
@@ -243,8 +244,7 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
/* rtrs-clt-sysfs.c */
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 3f2918671dbe..8caad0a2322b 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -188,7 +188,9 @@ struct rtrs_msg_conn_req {
__le16 recon_cnt;
uuid_t sess_uuid;
uuid_t paths_uuid;
- u8 reserved[12];
+ u8 first_conn : 1;
+ u8 reserved_bits : 7;
+ u8 reserved[11];
};
/**
@@ -303,8 +305,9 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
struct ib_send_wr *head);
int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, u16 cq_size,
- u16 wr_queue_size, enum ib_poll_context poll_ctx);
+ u32 max_send_sge, int cq_vector, int cq_size,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index d2edff3b8f0d..126a96e75c62 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -51,6 +51,8 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
+ /* first remove sysfs itself to avoid deadlock */
+ sysfs_remove_file_self(&sess->kobj, &attr->attr);
close_sess(sess);
return count;
@@ -181,6 +183,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
device_del(&srv->dev);
+ put_device(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
@@ -206,6 +209,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
device_del(&srv->dev);
+ put_device(&srv->dev);
} else {
mutex_unlock(&srv->paths_mutex);
}
@@ -234,6 +238,7 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
&sess->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ kobject_put(&sess->stats->kobj_stats);
return err;
}
err = sysfs_create_group(&sess->stats->kobj_stats,
@@ -290,8 +295,8 @@ remove_group:
sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
put_kobj:
kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
destroy_root:
+ kobject_put(&sess->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(sess);
return err;
@@ -302,7 +307,7 @@ void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
if (sess->kobj.state_in_sysfs) {
kobject_del(&sess->stats->kobj_stats);
kobject_put(&sess->stats->kobj_stats);
- kobject_del(&sess->kobj);
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
kobject_put(&sess->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(sess);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index c42fd470c4eb..d071809e3ed2 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -222,7 +222,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct rtrs_srv *srv = sess->srv;
- struct ib_send_wr inv_wr, imm_wr;
+ struct ib_send_wr inv_wr;
+ struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
enum ib_send_flags flags;
size_t sg_cnt;
@@ -267,21 +268,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
WARN_ON_ONCE(rkey != wr->rkey);
wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.wr_cqe = &io_comp_cqe;
wr->wr.ex.imm_data = 0;
wr->wr.send_flags = 0;
if (need_inval && always_invalidate) {
wr->wr.next = &rwr.wr;
rwr.wr.next = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr->wr.next = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr->wr.next = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else {
- wr->wr.next = &imm_wr;
+ wr->wr.next = &imm_wr.wr;
}
/*
* From time to time we have to post signaled sends,
@@ -294,16 +296,18 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.send_flags = 0;
inv_wr.ex.invalidate_rkey = rkey;
}
- imm_wr.next = NULL;
+ imm_wr.wr.next = NULL;
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &sess->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.mr = srv_mr->mr;
rwr.wr.send_flags = 0;
@@ -318,22 +322,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
- imm_wr.sg_list = &list;
- imm_wr.num_sge = 1;
- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
- imm_wr.sg_list = NULL;
- imm_wr.num_sge = 0;
- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
- imm_wr.send_flags = flags;
- imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
0, need_inval));
- imm_wr.wr_cqe = &io_comp_cqe;
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
@@ -360,7 +364,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
{
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_send_wr inv_wr, *wr = NULL;
+ struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_mr *srv_mr;
@@ -379,6 +384,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
if (need_inval) {
if (likely(sg_cnt)) {
+ inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
@@ -396,15 +402,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
if (need_inval && always_invalidate) {
wr = &inv_wr;
inv_wr.next = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else {
- wr = &imm_wr;
+ wr = &imm_wr.wr;
}
/*
* From time to time we have to post signalled sends,
@@ -413,14 +419,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
- imm_wr.next = NULL;
+ imm_wr.wr.next = NULL;
if (always_invalidate) {
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &sess->mrs[id->msg_id];
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.wr.send_flags = 0;
rwr.mr = srv_mr->mr;
@@ -435,21 +442,21 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
- imm_wr.sg_list = &list;
- imm_wr.num_sge = 1;
- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
- imm_wr.sg_list = NULL;
- imm_wr.num_sge = 0;
- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
- imm_wr.send_flags = flags;
- imm_wr.wr_cqe = &io_comp_cqe;
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
- imm_wr.ex.imm_data = cpu_to_be32(imm);
+ imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
err = ib_post_send(id->con->c.qp, wr, NULL);
if (unlikely(err))
@@ -651,7 +658,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
if (!srv_mr->iu) {
err = -ENOMEM;
rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
- goto free_iu;
+ goto dereg_mr;
}
}
/* Eventually dma addr for each chunk can be cached */
@@ -667,7 +674,6 @@ err:
srv_mr = &sess->mrs[mri];
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
-free_iu:
rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
@@ -814,7 +820,7 @@ static int process_info_req(struct rtrs_srv_con *con,
rwr[mri].wr.opcode = IB_WR_REG_MR;
rwr[mri].wr.wr_cqe = &local_reg_cqe;
rwr[mri].wr.num_sge = 0;
- rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].wr.send_flags = 0;
rwr[mri].mr = mr;
rwr[mri].key = mr->rkey;
rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
@@ -1238,7 +1244,6 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
- * and hb
*/
atomic_add(srv->queue_depth, &con->sq_wr_avail);
@@ -1328,7 +1333,8 @@ static void free_srv(struct rtrs_srv *srv)
}
static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
- const uuid_t *paths_uuid)
+ const uuid_t *paths_uuid,
+ bool first_conn)
{
struct rtrs_srv *srv;
int i;
@@ -1341,13 +1347,18 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
return srv;
}
}
+ mutex_unlock(&ctx->srv_mutex);
+ /*
+ * If this request is not the first connection request from the
+ * client for this session then fail and return error.
+ */
+ if (!first_conn)
+ return ERR_PTR(-ENXIO);
/* need to allocate a new srv */
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
- if (!srv) {
- mutex_unlock(&ctx->srv_mutex);
- return NULL;
- }
+ if (!srv)
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&srv->paths_list);
mutex_init(&srv->paths_mutex);
@@ -1357,8 +1368,6 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
srv->ctx = ctx;
device_initialize(&srv->dev);
srv->dev.release = rtrs_srv_dev_release;
- list_add(&srv->ctx_list, &ctx->srv_list);
- mutex_unlock(&ctx->srv_mutex);
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
@@ -1371,6 +1380,9 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
goto err_free_chunks;
}
refcount_set(&srv->refcount, 1);
+ mutex_lock(&ctx->srv_mutex);
+ list_add(&srv->ctx_list, &ctx->srv_list);
+ mutex_unlock(&ctx->srv_mutex);
return srv;
@@ -1381,7 +1393,7 @@ err_free_chunks:
err_free_srv:
kfree(srv);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
static void put_srv(struct rtrs_srv *srv)
@@ -1461,10 +1473,12 @@ static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
static void free_sess(struct rtrs_srv_sess *sess)
{
- if (sess->kobj.state_in_sysfs)
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->kobj);
kobject_put(&sess->kobj);
- else
+ } else {
kfree(sess);
+ }
}
static void rtrs_srv_close_work(struct work_struct *work)
@@ -1586,7 +1600,7 @@ static int create_con(struct rtrs_srv_sess *sess,
struct rtrs_sess *s = &sess->s;
struct rtrs_srv_con *con;
- u16 cq_size, wr_queue_size;
+ u32 cq_size, wr_queue_size;
int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL);
@@ -1600,7 +1614,7 @@ static int create_con(struct rtrs_srv_sess *sess,
con->c.cm_id = cm_id;
con->c.sess = &sess->s;
con->c.cid = cid;
- atomic_set(&con->wr_cnt, 0);
+ atomic_set(&con->wr_cnt, 1);
if (con->c.cid == 0) {
/*
@@ -1630,7 +1644,8 @@ static int create_con(struct rtrs_srv_sess *sess,
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
- wr_queue_size, IB_POLL_WORKQUEUE);
+ wr_queue_size, wr_queue_size,
+ IB_POLL_WORKQUEUE);
if (err) {
rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
goto free_con;
@@ -1781,13 +1796,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_econnreset;
}
recon_cnt = le16_to_cpu(msg->recon_cnt);
- srv = get_or_create_srv(ctx, &msg->paths_uuid);
- /*
- * "refcount == 0" happens if a previous thread calls get_or_create_srv
- * allocate srv, but chunks of srv are not allocated yet.
- */
- if (!srv || refcount_read(&srv->refcount) == 0) {
- err = -ENOMEM;
+ srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
+ if (IS_ERR(srv)) {
+ err = PTR_ERR(srv);
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
@@ -1862,8 +1873,8 @@ reject_w_econnreset:
return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
close_and_return_err:
- close_sess(sess);
mutex_unlock(&srv->paths_mutex);
+ close_sess(sess);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 2e3a849e0a77..d13aff0aa816 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -182,16 +182,16 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
u32 imm_data, enum ib_send_flags flags,
struct ib_send_wr *head)
{
- struct ib_send_wr wr;
+ struct ib_rdma_wr wr;
- wr = (struct ib_send_wr) {
- .wr_cqe = cqe,
- .send_flags = flags,
- .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
- .ex.imm_data = cpu_to_be32(imm_data),
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = cqe,
+ .wr.send_flags = flags,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
};
- return rtrs_post_send(con->qp, head, &wr);
+ return rtrs_post_send(con->qp, head, &wr.wr);
}
EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
@@ -231,14 +231,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
}
static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
- u16 wr_queue_size, u32 max_sge)
+ u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
{
struct ib_qp_init_attr init_attr = {NULL};
struct rdma_cm_id *cm_id = con->cm_id;
int ret;
- init_attr.cap.max_send_wr = wr_queue_size;
- init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_send_wr = max_send_wr;
+ init_attr.cap.max_recv_wr = max_recv_wr;
init_attr.cap.max_recv_sge = 1;
init_attr.event_handler = qp_event_handler;
init_attr.qp_context = con;
@@ -260,8 +260,9 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
}
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, u16 cq_size,
- u16 wr_queue_size, enum ib_poll_context poll_ctx)
+ u32 max_send_sge, int cq_vector, int cq_size,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx)
{
int err;
@@ -269,7 +270,8 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
if (err)
return err;
- err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
+ max_send_sge);
if (err) {
ib_free_cq(con->cq);
con->cq = NULL;
@@ -308,7 +310,7 @@ void rtrs_send_hb_ack(struct rtrs_sess *sess)
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- IB_SEND_SIGNALED, NULL);
+ 0, NULL);
if (err) {
sess->hb_err_handler(usr_con);
return;
@@ -337,7 +339,7 @@ static void hb_work(struct work_struct *work)
}
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- IB_SEND_SIGNALED, NULL);
+ 0, NULL);
if (err) {
sess->hb_err_handler(usr_con);
return;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5492b66a8153..31f8aa2c40ed 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3628,7 +3628,7 @@ static ssize_t srp_create_target(struct device *dev,
struct srp_rdma_ch *ch;
struct srp_device *srp_dev = host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
- int ret, node_idx, node, cpu, i;
+ int ret, i, ch_idx;
unsigned int max_sectors_per_mr, mr_per_cmd = 0;
bool multich = false;
uint32_t max_iu_len;
@@ -3753,81 +3753,61 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- if (target->ch_count == 0)
+ if (target->ch_count == 0) {
target->ch_count =
- max_t(unsigned int, num_online_nodes(),
- min(ch_count ?:
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ min(ch_count ?:
+ max(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus());
+ }
+
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
goto out;
- node_idx = 0;
- for_each_online_node(node) {
- const int ch_start = (node_idx * target->ch_count /
- num_online_nodes());
- const int ch_end = ((node_idx + 1) * target->ch_count /
- num_online_nodes());
- const int cv_start = node_idx * ibdev->num_comp_vectors /
- num_online_nodes();
- const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
- num_online_nodes();
- int cpu_idx = 0;
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) != node)
- continue;
- if (ch_start + cpu_idx >= ch_end)
- continue;
- ch = &target->ch[ch_start + cpu_idx];
- ch->target = target;
- ch->comp_vector = cv_start == cv_end ? cv_start :
- cv_start + cpu_idx % (cv_end - cv_start);
- spin_lock_init(&ch->lock);
- INIT_LIST_HEAD(&ch->free_tx);
- ret = srp_new_cm_id(ch);
- if (ret)
- goto err_disconnect;
+ for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
+ ch = &target->ch[ch_idx];
+ ch->target = target;
+ ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->free_tx);
+ ret = srp_new_cm_id(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_create_ch_ib(ch);
- if (ret)
- goto err_disconnect;
+ ret = srp_create_ch_ib(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_alloc_req_data(ch);
- if (ret)
- goto err_disconnect;
+ ret = srp_alloc_req_data(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_connect_ch(ch, max_iu_len, multich);
- if (ret) {
- char dst[64];
-
- if (target->using_rdma_cm)
- snprintf(dst, sizeof(dst), "%pIS",
- &target->rdma_cm.dst);
- else
- snprintf(dst, sizeof(dst), "%pI6",
- target->ib_cm.orig_dgid.raw);
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %s failed\n",
- ch_start + cpu_idx,
- target->ch_count, dst);
- if (node_idx == 0 && cpu_idx == 0) {
- goto free_ch;
- } else {
- srp_free_ch_ib(target, ch);
- srp_free_req_data(target, ch);
- target->ch_count = ch - target->ch;
- goto connected;
- }
- }
+ ret = srp_connect_ch(ch, max_iu_len, multich);
+ if (ret) {
+ char dst[64];
- multich = true;
- cpu_idx++;
+ if (target->using_rdma_cm)
+ snprintf(dst, sizeof(dst), "%pIS",
+ &target->rdma_cm.dst);
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Connection %d/%d to %s failed\n",
+ ch_idx,
+ target->ch_count, dst);
+ if (ch_idx == 0) {
+ goto free_ch;
+ } else {
+ srp_free_ch_ib(target, ch);
+ srp_free_req_data(target, ch);
+ target->ch_count = ch - target->ch;
+ goto connected;
+ }
}
- node_idx++;
+ multich = true;
}
connected:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index a2b5fbba2d3b..430dc6975004 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
if (IS_ERR(abspam))
return PTR_ERR(abspam);
- for (i = 0; i < joydev->nabs; i++) {
+ for (i = 0; i < len && i < joydev->nabs; i++) {
if (abspam[i] > ABS_MAX) {
retval = -EINVAL;
goto out;
@@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
int i;
int retval = 0;
+ if (len % sizeof(*keypam))
+ return -EINVAL;
+
len = min(len, sizeof(joydev->keypam));
/* Validate the map. */
@@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
if (IS_ERR(keypam))
return PTR_ERR(keypam);
- for (i = 0; i < joydev->nkey; i++) {
+ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
retval = -EINVAL;
goto out;
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index b080f0cfb068..5e38899058c1 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -382,4 +382,11 @@ config JOYSTICK_FSIA6B
To compile this driver as a module, choose M here: the
module will be called fsia6b.
+config JOYSTICK_N64
+ bool "N64 controller"
+ depends on MACH_NINTENDO64
+ help
+ Say Y here if you want enable support for the four
+ built-in controller ports on the Nintendo 64 console.
+
endif
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 58232b3057d3..31d720c9e493 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_JOYSTICK_INTERACT) += interact.o
obj-$(CONFIG_JOYSTICK_JOYDUMP) += joydump.o
obj-$(CONFIG_JOYSTICK_MAGELLAN) += magellan.o
obj-$(CONFIG_JOYSTICK_MAPLE) += maplecontrol.o
+obj-$(CONFIG_JOYSTICK_N64) += n64joy.o
obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o
obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o
obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o
@@ -37,4 +38,3 @@ obj-$(CONFIG_JOYSTICK_WARRIOR) += warrior.o
obj-$(CONFIG_JOYSTICK_WALKERA0701) += walkera0701.o
obj-$(CONFIG_JOYSTICK_XPAD) += xpad.o
obj-$(CONFIG_JOYSTICK_ZHENHUA) += zhenhua.o
-
diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c
new file mode 100644
index 000000000000..8bcc529942bc
--- /dev/null
+++ b/drivers/input/joystick/n64joy.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the four N64 controllers.
+ *
+ * Copyright (c) 2021 Lauri Kasanen
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/limits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("Lauri Kasanen <cand@gmx.com>");
+MODULE_DESCRIPTION("Driver for N64 controllers");
+MODULE_LICENSE("GPL");
+
+#define PIF_RAM 0x1fc007c0
+
+#define SI_DRAM_REG 0
+#define SI_READ_REG 1
+#define SI_WRITE_REG 4
+#define SI_STATUS_REG 6
+
+#define SI_STATUS_DMA_BUSY BIT(0)
+#define SI_STATUS_IO_BUSY BIT(1)
+
+#define N64_CONTROLLER_ID 0x0500
+
+#define MAX_CONTROLLERS 4
+
+static const char *n64joy_phys[MAX_CONTROLLERS] = {
+ "n64joy/port0",
+ "n64joy/port1",
+ "n64joy/port2",
+ "n64joy/port3",
+};
+
+struct n64joy_priv {
+ u64 si_buf[8] ____cacheline_aligned;
+ struct timer_list timer;
+ struct mutex n64joy_mutex;
+ struct input_dev *n64joy_dev[MAX_CONTROLLERS];
+ u32 __iomem *reg_base;
+ u8 n64joy_opened;
+};
+
+struct joydata {
+ unsigned int: 16; /* unused */
+ unsigned int err: 2;
+ unsigned int: 14; /* unused */
+
+ union {
+ u32 data;
+
+ struct {
+ unsigned int a: 1;
+ unsigned int b: 1;
+ unsigned int z: 1;
+ unsigned int start: 1;
+ unsigned int up: 1;
+ unsigned int down: 1;
+ unsigned int left: 1;
+ unsigned int right: 1;
+ unsigned int: 2; /* unused */
+ unsigned int l: 1;
+ unsigned int r: 1;
+ unsigned int c_up: 1;
+ unsigned int c_down: 1;
+ unsigned int c_left: 1;
+ unsigned int c_right: 1;
+ signed int x: 8;
+ signed int y: 8;
+ };
+ };
+};
+
+static void n64joy_write_reg(u32 __iomem *reg_base, const u8 reg, const u32 value)
+{
+ writel(value, reg_base + reg);
+}
+
+static u32 n64joy_read_reg(u32 __iomem *reg_base, const u8 reg)
+{
+ return readl(reg_base + reg);
+}
+
+static void n64joy_wait_si_dma(u32 __iomem *reg_base)
+{
+ while (n64joy_read_reg(reg_base, SI_STATUS_REG) &
+ (SI_STATUS_DMA_BUSY | SI_STATUS_IO_BUSY))
+ cpu_relax();
+}
+
+static void n64joy_exec_pif(struct n64joy_priv *priv, const u64 in[8])
+{
+ unsigned long flags;
+
+ dma_cache_wback_inv((unsigned long) in, 8 * 8);
+ dma_cache_inv((unsigned long) priv->si_buf, 8 * 8);
+
+ local_irq_save(flags);
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_DRAM_REG, virt_to_phys(in));
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_WRITE_REG, PIF_RAM);
+ barrier();
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_DRAM_REG, virt_to_phys(priv->si_buf));
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_READ_REG, PIF_RAM);
+ barrier();
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ local_irq_restore(flags);
+}
+
+static const u64 polldata[] ____cacheline_aligned = {
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xfe00000000000000,
+ 0,
+ 0,
+ 1
+};
+
+static void n64joy_poll(struct timer_list *t)
+{
+ const struct joydata *data;
+ struct n64joy_priv *priv = container_of(t, struct n64joy_priv, timer);
+ struct input_dev *dev;
+ u32 i;
+
+ n64joy_exec_pif(priv, polldata);
+
+ data = (struct joydata *) priv->si_buf;
+
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!priv->n64joy_dev[i])
+ continue;
+
+ dev = priv->n64joy_dev[i];
+
+ /* d-pad */
+ input_report_key(dev, BTN_DPAD_UP, data[i].up);
+ input_report_key(dev, BTN_DPAD_DOWN, data[i].down);
+ input_report_key(dev, BTN_DPAD_LEFT, data[i].left);
+ input_report_key(dev, BTN_DPAD_RIGHT, data[i].right);
+
+ /* c buttons */
+ input_report_key(dev, BTN_FORWARD, data[i].c_up);
+ input_report_key(dev, BTN_BACK, data[i].c_down);
+ input_report_key(dev, BTN_LEFT, data[i].c_left);
+ input_report_key(dev, BTN_RIGHT, data[i].c_right);
+
+ /* matching buttons */
+ input_report_key(dev, BTN_START, data[i].start);
+ input_report_key(dev, BTN_Z, data[i].z);
+
+ /* remaining ones: a, b, l, r */
+ input_report_key(dev, BTN_0, data[i].a);
+ input_report_key(dev, BTN_1, data[i].b);
+ input_report_key(dev, BTN_2, data[i].l);
+ input_report_key(dev, BTN_3, data[i].r);
+
+ input_report_abs(dev, ABS_X, data[i].x);
+ input_report_abs(dev, ABS_Y, data[i].y);
+
+ input_sync(dev);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16));
+}
+
+static int n64joy_open(struct input_dev *dev)
+{
+ struct n64joy_priv *priv = input_get_drvdata(dev);
+ int err;
+
+ err = mutex_lock_interruptible(&priv->n64joy_mutex);
+ if (err)
+ return err;
+
+ if (!priv->n64joy_opened) {
+ /*
+ * We could use the vblank irq, but it's not important if
+ * the poll point slightly changes.
+ */
+ timer_setup(&priv->timer, n64joy_poll, 0);
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16));
+ }
+
+ priv->n64joy_opened++;
+
+ mutex_unlock(&priv->n64joy_mutex);
+ return err;
+}
+
+static void n64joy_close(struct input_dev *dev)
+{
+ struct n64joy_priv *priv = input_get_drvdata(dev);
+
+ mutex_lock(&priv->n64joy_mutex);
+ if (!--priv->n64joy_opened)
+ del_timer_sync(&priv->timer);
+ mutex_unlock(&priv->n64joy_mutex);
+}
+
+static const u64 __initconst scandata[] ____cacheline_aligned = {
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xfe00000000000000,
+ 0,
+ 0,
+ 1
+};
+
+/*
+ * The target device is embedded and RAM-constrained. We save RAM
+ * by initializing in __init code that gets dropped late in boot.
+ * For the same reason there is no module or unloading support.
+ */
+static int __init n64joy_probe(struct platform_device *pdev)
+{
+ const struct joydata *data;
+ struct n64joy_priv *priv;
+ struct input_dev *dev;
+ int err = 0;
+ u32 i, j, found = 0;
+
+ priv = kzalloc(sizeof(struct n64joy_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ mutex_init(&priv->n64joy_mutex);
+
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!priv->reg_base) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* The controllers are not hotpluggable, so we can scan in init */
+ n64joy_exec_pif(priv, scandata);
+
+ data = (struct joydata *) priv->si_buf;
+
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!data[i].err && data[i].data >> 16 == N64_CONTROLLER_ID) {
+ found++;
+
+ dev = priv->n64joy_dev[i] = input_allocate_device();
+ if (!priv->n64joy_dev[i]) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ input_set_drvdata(dev, priv);
+
+ dev->name = "N64 controller";
+ dev->phys = n64joy_phys[i];
+ dev->id.bustype = BUS_HOST;
+ dev->id.vendor = 0;
+ dev->id.product = data[i].data >> 16;
+ dev->id.version = 0;
+ dev->dev.parent = &pdev->dev;
+
+ dev->open = n64joy_open;
+ dev->close = n64joy_close;
+
+ /* d-pad */
+ input_set_capability(dev, EV_KEY, BTN_DPAD_UP);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_DOWN);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_LEFT);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_RIGHT);
+ /* c buttons */
+ input_set_capability(dev, EV_KEY, BTN_LEFT);
+ input_set_capability(dev, EV_KEY, BTN_RIGHT);
+ input_set_capability(dev, EV_KEY, BTN_FORWARD);
+ input_set_capability(dev, EV_KEY, BTN_BACK);
+ /* matching buttons */
+ input_set_capability(dev, EV_KEY, BTN_START);
+ input_set_capability(dev, EV_KEY, BTN_Z);
+ /* remaining ones: a, b, l, r */
+ input_set_capability(dev, EV_KEY, BTN_0);
+ input_set_capability(dev, EV_KEY, BTN_1);
+ input_set_capability(dev, EV_KEY, BTN_2);
+ input_set_capability(dev, EV_KEY, BTN_3);
+
+ for (j = 0; j < 2; j++)
+ input_set_abs_params(dev, ABS_X + j,
+ S8_MIN, S8_MAX, 0, 0);
+
+ err = input_register_device(dev);
+ if (err) {
+ input_free_device(dev);
+ goto fail;
+ }
+ }
+ }
+
+ pr_info("%u controller(s) connected\n", found);
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+fail:
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!priv->n64joy_dev[i])
+ continue;
+ input_unregister_device(priv->n64joy_dev[i]);
+ }
+ return err;
+}
+
+static struct platform_driver n64joy_driver = {
+ .driver = {
+ .name = "n64joy",
+ },
+};
+
+static int __init n64joy_init(void)
+{
+ return platform_driver_probe(&n64joy_driver, n64joy_probe);
+}
+
+module_init(n64joy_init);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 0687f0ed60b8..9f0d07dcbf06 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -215,9 +215,17 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -296,6 +304,10 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -429,8 +441,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 2b321c17054a..32d15809ae58 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -446,7 +446,7 @@ config KEYBOARD_MPR121
config KEYBOARD_SNVS_PWRKEY
tristate "IMX SNVS Power Key Driver"
- depends on ARCH_MXC || COMPILE_TEST
+ depends on ARCH_MXC || (COMPILE_TEST && HAS_IOMEM)
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
@@ -685,7 +685,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
- depends on OF || ARCH_OMAP2PLUS
+ depends on (OF && HAS_IOMEM) || ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
@@ -773,7 +773,7 @@ config KEYBOARD_CAP11XX
config KEYBOARD_BCM
tristate "Broadcom keypad driver"
- depends on OF && HAVE_CLK
+ depends on OF && HAVE_CLK && HAS_IOMEM
select INPUT_MATRIXKMAP
default ARCH_BCM_CYGNUS
help
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index d22223154177..eda1b23002b5 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -48,6 +48,7 @@
#include <linux/efi.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/ktime.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -409,7 +410,7 @@ struct applespi_data {
unsigned int cmd_msg_cntr;
/* lock to protect the above parameters and flags below */
spinlock_t cmd_msg_lock;
- bool cmd_msg_queued;
+ ktime_t cmd_msg_queued;
enum applespi_evt_type cmd_evt_type;
struct led_classdev backlight_info;
@@ -729,7 +730,7 @@ static void applespi_msg_complete(struct applespi_data *applespi,
wake_up_all(&applespi->drain_complete);
if (is_write_msg) {
- applespi->cmd_msg_queued = false;
+ applespi->cmd_msg_queued = 0;
applespi_send_cmd_msg(applespi);
}
@@ -748,6 +749,8 @@ static void applespi_async_write_complete(void *context)
applespi->tx_status,
APPLESPI_STATUS_SIZE);
+ udelay(SPI_RW_CHG_DELAY_US);
+
if (!applespi_check_write_status(applespi, applespi->wr_m.status)) {
/*
* If we got an error, we presumably won't get the expected
@@ -771,8 +774,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return 0;
/* check whether send is in progress */
- if (applespi->cmd_msg_queued)
- return 0;
+ if (applespi->cmd_msg_queued) {
+ if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000)
+ return 0;
+
+ dev_warn(&applespi->spi->dev, "Command %d timed out\n",
+ applespi->cmd_evt_type);
+
+ applespi->cmd_msg_queued = 0;
+ applespi->write_active = false;
+ }
/* set up packet */
memset(packet, 0, APPLESPI_PACKET_SIZE);
@@ -869,7 +880,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return sts;
}
- applespi->cmd_msg_queued = true;
+ applespi->cmd_msg_queued = ktime_get_coarse();
applespi->write_active = true;
return 0;
@@ -1921,7 +1932,7 @@ static int __maybe_unused applespi_resume(struct device *dev)
applespi->drain = false;
applespi->have_cl_led_on = false;
applespi->have_bl_level = 0;
- applespi->cmd_msg_queued = false;
+ applespi->cmd_msg_queued = 0;
applespi->read_active = false;
applespi->write_active = false;
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index b379ed762878..38457d9641bd 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -27,6 +27,8 @@
#include <asm/unaligned.h>
+#define MAX_NUM_TOP_ROW_KEYS 15
+
/**
* struct cros_ec_keyb - Structure representing EC keyboard device
*
@@ -42,6 +44,9 @@
* @idev: The input device for the matrix keys.
* @bs_idev: The input device for non-matrix buttons and switches (or NULL).
* @notifier: interrupt event notifier for transport devices
+ * @function_row_physmap: An array of the encoded rows/columns for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
*/
struct cros_ec_keyb {
unsigned int rows;
@@ -58,6 +63,9 @@ struct cros_ec_keyb {
struct input_dev *idev;
struct input_dev *bs_idev;
struct notifier_block notifier;
+
+ u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
+ size_t num_function_row_keys;
};
/**
@@ -527,6 +535,11 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
+ struct property *prop;
+ const __be32 *p;
+ u16 *physmap;
+ u32 key_pos;
+ int row, col;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -578,6 +591,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
+ physmap = ckdev->function_row_physmap;
+ of_property_for_each_u32(dev->of_node, "function-row-physmap",
+ prop, p, key_pos) {
+ if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+ dev_warn(dev, "Only support up to %d top row keys\n",
+ MAX_NUM_TOP_ROW_KEYS);
+ break;
+ }
+ row = KEY_ROW(key_pos);
+ col = KEY_COL(key_pos);
+ *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap++;
+ ckdev->num_function_row_keys++;
+ }
+
err = input_register_device(ckdev->idev);
if (err) {
dev_err(dev, "cannot register input device\n");
@@ -587,6 +615,51 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
return 0;
}
+static ssize_t function_row_physmap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t size = 0;
+ int i;
+ struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+ u16 *physmap = ckdev->function_row_physmap;
+
+ for (i = 0; i < ckdev->num_function_row_keys; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "%s%02X", size ? " " : "", physmap[i]);
+ if (size)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+ return size;
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+
+static struct attribute *cros_ec_keyb_attrs[] = {
+ &dev_attr_function_row_physmap.attr,
+ NULL,
+};
+
+static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_function_row_physmap.attr &&
+ !ckdev->num_function_row_keys)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group cros_ec_keyb_attr_group = {
+ .is_visible = cros_ec_keyb_attr_is_visible,
+ .attrs = cros_ec_keyb_attrs,
+};
+
+
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
@@ -617,6 +690,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return err;
}
+ err = devm_device_add_group(dev, &cros_ec_keyb_attr_group);
+ if (err) {
+ dev_err(dev, "failed to create attributes. err=%d\n", err);
+ return err;
+ }
+
ckdev->notifier.notifier_call = cros_ec_keyb_work;
err = blocking_notifier_chain_register(&ckdev->ec->event_notifier,
&ckdev->notifier);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index daf6a753ca61..dae053596572 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -304,7 +304,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
return err;
}
-static int locomokbd_remove(struct locomo_dev *dev)
+static void locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
@@ -318,8 +318,6 @@ static int locomokbd_remove(struct locomo_dev *dev)
release_mem_region((unsigned long) dev->mapbase, dev->length);
kfree(locomokbd);
-
- return 0;
}
static struct locomo_driver keyboard_driver = {
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b17ac2a295b9..43375b38ee59 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -60,6 +60,8 @@
((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
+#define OMAP4_KEYPAD_AUTOIDLE_MS 50 /* Approximate measured time */
+#define OMAP4_KEYPAD_IDLE_CHECK_MS (OMAP4_KEYPAD_AUTOIDLE_MS / 2)
enum {
KBD_REVISION_OMAP4 = 0,
@@ -71,6 +73,7 @@ struct omap4_keypad {
void __iomem *base;
unsigned int irq;
+ struct mutex lock; /* for key scan */
unsigned int rows;
unsigned int cols;
@@ -78,7 +81,7 @@ struct omap4_keypad {
u32 irqreg_offset;
unsigned int row_shift;
bool no_autorepeat;
- unsigned char key_state[8];
+ u64 keys;
unsigned short *keymap;
};
@@ -107,6 +110,55 @@ static void kbd_write_irqreg(struct omap4_keypad *keypad_data,
keypad_data->base + keypad_data->irqreg_offset + offset);
}
+static int omap4_keypad_report_keys(struct omap4_keypad *keypad_data,
+ u64 keys, bool down)
+{
+ struct input_dev *input_dev = keypad_data->input;
+ unsigned int col, row, code;
+ DECLARE_BITMAP(mask, 64);
+ unsigned long bit;
+ int events = 0;
+
+ bitmap_from_u64(mask, keys);
+
+ for_each_set_bit(bit, mask, keypad_data->rows * BITS_PER_BYTE) {
+ row = bit / BITS_PER_BYTE;
+ col = bit % BITS_PER_BYTE;
+ code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev, keypad_data->keymap[code], down);
+
+ events++;
+ }
+
+ if (events)
+ input_sync(input_dev);
+
+ return events;
+}
+
+static void omap4_keypad_scan_keys(struct omap4_keypad *keypad_data, u64 keys)
+{
+ u64 changed;
+
+ mutex_lock(&keypad_data->lock);
+
+ changed = keys ^ keypad_data->keys;
+
+ /*
+ * Report key up events separately and first. This matters in case we
+ * lost key-up interrupt and just now catching up.
+ */
+ omap4_keypad_report_keys(keypad_data, changed & ~keys, false);
+
+ /* Report key down events */
+ omap4_keypad_report_keys(keypad_data, changed & keys, true);
+
+ keypad_data->keys = keys;
+
+ mutex_unlock(&keypad_data->lock);
+}
/* Interrupt handlers */
static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
@@ -122,48 +174,44 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
{
struct omap4_keypad *keypad_data = dev_id;
- struct input_dev *input_dev = keypad_data->input;
- unsigned char key_state[ARRAY_SIZE(keypad_data->key_state)];
- unsigned int col, row, code, changed;
- u32 *new_state = (u32 *) key_state;
-
- *new_state = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
- *(new_state + 1) = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
-
- for (row = 0; row < keypad_data->rows; row++) {
- changed = key_state[row] ^ keypad_data->key_state[row];
- if (!changed)
- continue;
-
- for (col = 0; col < keypad_data->cols; col++) {
- if (changed & (1 << col)) {
- code = MATRIX_SCAN_CODE(row, col,
- keypad_data->row_shift);
- input_event(input_dev, EV_MSC, MSC_SCAN, code);
- input_report_key(input_dev,
- keypad_data->keymap[code],
- key_state[row] & (1 << col));
- }
- }
+ struct device *dev = keypad_data->input->dev.parent;
+ u32 low, high;
+ int error;
+ u64 keys;
+
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(dev);
+ return IRQ_NONE;
}
- input_sync(input_dev);
+ low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
+ high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
+ keys = low | (u64)high << 32;
- memcpy(keypad_data->key_state, key_state,
- sizeof(keypad_data->key_state));
+ omap4_keypad_scan_keys(keypad_data, keys);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return IRQ_HANDLED;
}
static int omap4_keypad_open(struct input_dev *input)
{
struct omap4_keypad *keypad_data = input_get_drvdata(input);
+ struct device *dev = input->dev.parent;
+ int error;
- pm_runtime_get_sync(input->dev.parent);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(dev);
+ return error;
+ }
disable_irq(keypad_data->irq);
@@ -176,13 +224,15 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_DEF_IRQENABLE_EVENTEN |
- OMAP4_DEF_IRQENABLE_LONGKEY);
+ OMAP4_DEF_IRQENABLE_EVENTEN);
kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE,
- OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA);
+ OMAP4_DEF_WUP_EVENT_ENA);
enable_irq(keypad_data->irq);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -200,14 +250,20 @@ static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
static void omap4_keypad_close(struct input_dev *input)
{
- struct omap4_keypad *keypad_data;
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+ struct device *dev = input->dev.parent;
+ int error;
+
+ error = pm_runtime_get_sync(dev);
+ if (error < 0)
+ pm_runtime_put_noidle(dev);
- keypad_data = input_get_drvdata(input);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
enable_irq(keypad_data->irq);
- pm_runtime_put_sync(input->dev.parent);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static int omap4_keypad_parse_dt(struct device *dev,
@@ -252,8 +308,41 @@ static int omap4_keypad_check_revision(struct device *dev,
return 0;
}
+/*
+ * Errata ID i689 "1.32 Keyboard Key Up Event Can Be Missed".
+ * Interrupt may not happen for key-up events. We must clear stuck
+ * key-up events after the keyboard hardware has auto-idled.
+ */
+static int __maybe_unused omap4_keypad_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
+ u32 active;
+
+ active = kbd_readl(keypad_data, OMAP4_KBD_STATEMACHINE);
+ if (active) {
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+
+ omap4_keypad_scan_keys(keypad_data, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap4_keypad_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap4_keypad_runtime_suspend, NULL, NULL)
+};
+
+static void omap4_disable_pm(void *d)
+{
+ pm_runtime_dont_use_autosuspend(d);
+ pm_runtime_disable(d);
+}
+
static int omap4_keypad_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
struct resource *res;
@@ -271,63 +360,62 @@ static int omap4_keypad_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
+ keypad_data = devm_kzalloc(dev, sizeof(*keypad_data), GFP_KERNEL);
if (!keypad_data) {
- dev_err(&pdev->dev, "keypad_data memory allocation failed\n");
+ dev_err(dev, "keypad_data memory allocation failed\n");
return -ENOMEM;
}
keypad_data->irq = irq;
+ mutex_init(&keypad_data->lock);
+ platform_set_drvdata(pdev, keypad_data);
- error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
+ error = omap4_keypad_parse_dt(dev, keypad_data);
if (error)
- goto err_free_keypad;
+ return error;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "can't request mem region\n");
- error = -EBUSY;
- goto err_free_keypad;
- }
+ keypad_data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(keypad_data->base))
+ return PTR_ERR(keypad_data->base);
- keypad_data->base = ioremap(res->start, resource_size(res));
- if (!keypad_data->base) {
- dev_err(&pdev->dev, "can't ioremap mem resource\n");
- error = -ENOMEM;
- goto err_release_mem;
- }
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, OMAP4_KEYPAD_IDLE_CHECK_MS);
+ pm_runtime_enable(dev);
- pm_runtime_enable(&pdev->dev);
+ error = devm_add_action_or_reset(dev, omap4_disable_pm, dev);
+ if (error) {
+ dev_err(dev, "unable to register cleanup action\n");
+ return error;
+ }
/*
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(&pdev->dev);
+ error = pm_runtime_get_sync(dev);
if (error) {
- dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(&pdev->dev);
- } else {
- error = omap4_keypad_check_revision(&pdev->dev,
- keypad_data);
- if (!error) {
- /* Ensure device does not raise interrupts */
- omap4_keypad_stop(keypad_data);
- }
- pm_runtime_put_sync(&pdev->dev);
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_put_noidle(dev);
+ return error;
+ }
+
+ error = omap4_keypad_check_revision(dev, keypad_data);
+ if (!error) {
+ /* Ensure device does not raise interrupts */
+ omap4_keypad_stop(keypad_data);
}
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
if (error)
- goto err_pm_disable;
+ return error;
/* input device allocation */
- keypad_data->input = input_dev = input_allocate_device();
- if (!input_dev) {
- error = -ENOMEM;
- goto err_pm_disable;
- }
+ keypad_data->input = input_dev = devm_input_allocate_device(dev);
+ if (!input_dev)
+ return -ENOMEM;
input_dev->name = pdev->name;
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
@@ -344,84 +432,51 @@ static int omap4_keypad_probe(struct platform_device *pdev)
keypad_data->row_shift = get_count_order(keypad_data->cols);
max_keys = keypad_data->rows << keypad_data->row_shift;
- keypad_data->keymap = kcalloc(max_keys,
- sizeof(keypad_data->keymap[0]),
- GFP_KERNEL);
+ keypad_data->keymap = devm_kcalloc(dev,
+ max_keys,
+ sizeof(keypad_data->keymap[0]),
+ GFP_KERNEL);
if (!keypad_data->keymap) {
- dev_err(&pdev->dev, "Not enough memory for keymap\n");
- error = -ENOMEM;
- goto err_free_input;
+ dev_err(dev, "Not enough memory for keymap\n");
+ return -ENOMEM;
}
error = matrix_keypad_build_keymap(NULL, NULL,
keypad_data->rows, keypad_data->cols,
keypad_data->keymap, input_dev);
if (error) {
- dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_keymap;
+ dev_err(dev, "failed to build keymap\n");
+ return error;
}
- error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
- omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
- "omap4-keypad", keypad_data);
+ error = devm_request_threaded_irq(dev, keypad_data->irq,
+ omap4_keypad_irq_handler,
+ omap4_keypad_irq_thread_fn,
+ IRQF_ONESHOT,
+ "omap4-keypad", keypad_data);
if (error) {
- dev_err(&pdev->dev, "failed to register interrupt\n");
- goto err_free_keymap;
+ dev_err(dev, "failed to register interrupt\n");
+ return error;
}
error = input_register_device(keypad_data->input);
- if (error < 0) {
- dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ if (error) {
+ dev_err(dev, "failed to register input device\n");
+ return error;
}
- device_init_wakeup(&pdev->dev, true);
- error = dev_pm_set_wake_irq(&pdev->dev, keypad_data->irq);
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, keypad_data->irq);
if (error)
- dev_warn(&pdev->dev,
- "failed to set up wakeup irq: %d\n", error);
-
- platform_set_drvdata(pdev, keypad_data);
+ dev_warn(dev, "failed to set up wakeup irq: %d\n", error);
return 0;
-
-err_free_irq:
- free_irq(keypad_data->irq, keypad_data);
-err_free_keymap:
- kfree(keypad_data->keymap);
-err_free_input:
- input_free_device(input_dev);
-err_pm_disable:
- pm_runtime_disable(&pdev->dev);
- iounmap(keypad_data->base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_keypad:
- kfree(keypad_data);
- return error;
}
static int omap4_keypad_remove(struct platform_device *pdev)
{
- struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
- struct resource *res;
-
dev_pm_clear_wake_irq(&pdev->dev);
- free_irq(keypad_data->irq, keypad_data);
-
- pm_runtime_disable(&pdev->dev);
-
- input_unregister_device(keypad_data->input);
-
- iounmap(keypad_data->base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad_data->keymap);
- kfree(keypad_data);
-
return 0;
}
@@ -437,6 +492,7 @@ static struct platform_driver omap4_keypad_driver = {
.driver = {
.name = "omap4-keypad",
.of_match_table = omap_keypad_dt_match,
+ .pm = &omap4_keypad_pm_ops,
},
};
module_platform_driver(omap4_keypad_driver);
diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c
index eda86ab552b9..17bbaac8b80c 100644
--- a/drivers/input/misc/ariel-pwrbutton.c
+++ b/drivers/input/misc/ariel-pwrbutton.c
@@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
- { "wyse-ariel-ec-input", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
index 37568b00873d..b08610d6e575 100644
--- a/drivers/input/misc/da7280.c
+++ b/drivers/input/misc/da7280.c
@@ -863,6 +863,7 @@ static void da7280_parse_properties(struct device *dev,
gpi_str3[7] = '0' + i;
haptics->gpi_ctl[i].polarity = 0;
error = device_property_read_string(dev, gpi_str3, &str);
+ if (!error)
haptics->gpi_ctl[i].polarity =
da7280_haptic_of_gpi_pol_str(dev, str);
}
@@ -1299,11 +1300,13 @@ static int __maybe_unused da7280_resume(struct device *dev)
return retval;
}
+#ifdef CONFIG_OF
static const struct of_device_id da7280_of_match[] = {
{ .compatible = "dlg,da7280", },
{ }
};
MODULE_DEVICE_TABLE(of, da7280_of_match);
+#endif
static const struct i2c_device_id da7280_i2c_id[] = {
{ "da7280", },
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index b067bfd2699c..4a6b33bbe7ea 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -986,7 +986,7 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
case V7_PACKET_ID_TWO:
mt[1].x &= ~0x000F;
mt[1].y |= 0x000F;
- /* Detect false-postive touches where x & y report max value */
+ /* Detect false-positive touches where x & y report max value */
if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
mt[1].x = 0;
/* y gets set to 0 at the end of this function */
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8fb7b4385ded..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1106,8 +1106,11 @@ static void synaptics_process_packet(struct psmouse *psmouse)
num_fingers = hw.w + 2;
break;
case 2:
- if (SYN_MODEL_PEN(info->model_id))
- ; /* Nothing, treat a pen as a single finger */
+ /*
+ * SYN_MODEL_PEN(info->model_id): even if
+ * the device supports pen, we treat it as
+ * a single finger.
+ */
break;
case 4 ... 15:
if (SYN_CAP_PALMDETECT(info->capabilities))
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 0754744b9ce5..f39b7b3f7942 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -255,7 +255,7 @@ config SERIO_ARC_PS2
config SERIO_APBPS2
tristate "GRLIB APBPS2 PS/2 keyboard/mouse controller"
- depends on OF
+ depends on OF && HAS_IOMEM
help
Say Y here if you want support for GRLIB APBPS2 peripherals used
to connect to PS/2 keyboard and/or mouse.
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index ecdeca147ed7..4408245b61d2 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -159,7 +159,7 @@ static int amba_kmi_probe(struct amba_device *dev,
return ret;
}
-static int amba_kmi_remove(struct amba_device *dev)
+static void amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -168,7 +168,6 @@ static int amba_kmi_remove(struct amba_device *dev)
iounmap(kmi->base);
kfree(kmi);
amba_release_regions(dev);
- return 0;
}
static int __maybe_unused amba_kmi_resume(struct device *dev)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 3a2dcf0805f1..9119e12a5778 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
@@ -586,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
},
{ }
};
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 7b8ceb702a74..68fac4801e2e 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -344,7 +344,7 @@ static int ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int ps2_remove(struct sa1111_dev *dev)
+static void ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -353,8 +353,6 @@ static int ps2_remove(struct sa1111_dev *dev)
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
-
- return 0;
}
/*
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 8ac970a423de..33e9d9bfd036 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -156,7 +156,9 @@ out:
* returning 0 characters.
*/
-static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, unsigned char __user * buf, size_t nr)
+static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
+ unsigned char *kbuf, size_t nr,
+ void **cookie, unsigned long offset)
{
struct serport *serport = (struct serport*) tty->disc_data;
struct serio *serio;
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e08b0ef078e8..fcb1b646436a 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1036,9 +1036,9 @@ static ssize_t show_tabletSize(struct device *dev, struct device_attribute *attr
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%dx%d\n",
- input_abs_get_max(aiptek->inputdev, ABS_X) + 1,
- input_abs_get_max(aiptek->inputdev, ABS_Y) + 1);
+ return sysfs_emit(buf, "%dx%d\n",
+ input_abs_get_max(aiptek->inputdev, ABS_X) + 1,
+ input_abs_get_max(aiptek->inputdev, ABS_Y) + 1);
}
/* These structs define the sysfs files, param #1 is the name of the
@@ -1064,9 +1064,8 @@ static ssize_t show_tabletPointerMode(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(pointer_mode_map,
- aiptek->curSetting.pointerMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(pointer_mode_map,
+ aiptek->curSetting.pointerMode));
}
static ssize_t
@@ -1101,9 +1100,8 @@ static ssize_t show_tabletCoordinateMode(struct device *dev, struct device_attri
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(coordinate_mode_map,
- aiptek->curSetting.coordinateMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(coordinate_mode_map,
+ aiptek->curSetting.coordinateMode));
}
static ssize_t
@@ -1143,9 +1141,8 @@ static ssize_t show_tabletToolMode(struct device *dev, struct device_attribute *
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(tool_mode_map,
- aiptek->curSetting.toolMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(tool_mode_map,
+ aiptek->curSetting.toolMode));
}
static ssize_t
@@ -1174,10 +1171,9 @@ static ssize_t show_tabletXtilt(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.xTilt == AIPTEK_TILT_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.xTilt);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.xTilt);
}
}
@@ -1216,10 +1212,9 @@ static ssize_t show_tabletYtilt(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.yTilt == AIPTEK_TILT_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.yTilt);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.yTilt);
}
}
@@ -1257,7 +1252,7 @@ static ssize_t show_tabletJitterDelay(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", aiptek->curSetting.jitterDelay);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.jitterDelay);
}
static ssize_t
@@ -1286,8 +1281,7 @@ static ssize_t show_tabletProgrammableDelay(struct device *dev, struct device_at
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.programmableDelay);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.programmableDelay);
}
static ssize_t
@@ -1316,7 +1310,7 @@ static ssize_t show_tabletEventsReceived(struct device *dev, struct device_attri
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%ld\n", aiptek->eventCount);
+ return sysfs_emit(buf, "%ld\n", aiptek->eventCount);
}
static DEVICE_ATTR(event_count, S_IRUGO, show_tabletEventsReceived, NULL);
@@ -1355,7 +1349,7 @@ static ssize_t show_tabletDiagnosticMessage(struct device *dev, struct device_at
default:
return 0;
}
- return snprintf(buf, PAGE_SIZE, retMsg);
+ return sysfs_emit(buf, retMsg);
}
static DEVICE_ATTR(diagnostic, S_IRUGO, show_tabletDiagnosticMessage, NULL);
@@ -1375,9 +1369,8 @@ static ssize_t show_tabletStylusUpper(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(stylus_button_map,
- aiptek->curSetting.stylusButtonUpper));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(stylus_button_map,
+ aiptek->curSetting.stylusButtonUpper));
}
static ssize_t
@@ -1406,9 +1399,8 @@ static ssize_t show_tabletStylusLower(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(stylus_button_map,
- aiptek->curSetting.stylusButtonLower));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(stylus_button_map,
+ aiptek->curSetting.stylusButtonLower));
}
static ssize_t
@@ -1444,9 +1436,8 @@ static ssize_t show_tabletMouseLeft(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonLeft));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonLeft));
}
static ssize_t
@@ -1474,9 +1465,8 @@ static ssize_t show_tabletMouseMiddle(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonMiddle));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonMiddle));
}
static ssize_t
@@ -1504,9 +1494,8 @@ static ssize_t show_tabletMouseRight(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonRight));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonRight));
}
static ssize_t
@@ -1535,10 +1524,9 @@ static ssize_t show_tabletWheel(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.wheel == AIPTEK_WHEEL_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.wheel);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.wheel);
}
}
@@ -1568,8 +1556,7 @@ static ssize_t show_tabletExecute(struct device *dev, struct device_attribute *a
/* There is nothing useful to display, so a one-line manual
* is in order...
*/
- return snprintf(buf, PAGE_SIZE,
- "Write anything to this file to program your tablet.\n");
+ return sysfs_emit(buf, "Write anything to this file to program your tablet.\n");
}
static ssize_t
@@ -1600,7 +1587,7 @@ static ssize_t show_tabletODMCode(struct device *dev, struct device_attribute *a
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", aiptek->features.odmCode);
+ return sysfs_emit(buf, "0x%04x\n", aiptek->features.odmCode);
}
static DEVICE_ATTR(odm_code, S_IRUGO, show_tabletODMCode, NULL);
@@ -1613,7 +1600,7 @@ static ssize_t show_tabletModelCode(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", aiptek->features.modelCode);
+ return sysfs_emit(buf, "0x%04x\n", aiptek->features.modelCode);
}
static DEVICE_ATTR(model_code, S_IRUGO, show_tabletModelCode, NULL);
@@ -1626,8 +1613,7 @@ static ssize_t show_firmwareCode(struct device *dev, struct device_attribute *at
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%04x\n",
- aiptek->features.firmwareCode);
+ return sysfs_emit(buf, "%04x\n", aiptek->features.firmwareCode);
}
static DEVICE_ATTR(firmware_code, S_IRUGO, show_firmwareCode, NULL);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cc18f54ea887..529614d364fe 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -608,7 +608,7 @@ config TOUCHSCREEN_MTOUCH
config TOUCHSCREEN_IMX6UL_TSC
tristate "Freescale i.MX6UL touchscreen controller"
- depends on (OF && GPIOLIB) || COMPILE_TEST
+ depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
help
Say Y here if you have a Freescale i.MX6UL, and want to
use the internal touchscreen controller.
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a703870ca7bd..f113a27aeb1e 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -64,24 +64,13 @@
struct ads7846_buf {
u8 cmd;
- /*
- * This union is a temporary hack. The driver does an in-place
- * endianness conversion. This will be cleaned up in the next
- * patch.
- */
- union {
- __be16 data_be16;
- u16 data;
- };
+ __be16 data;
} __packed;
-
-struct ts_event {
- bool ignore;
- struct ads7846_buf x;
- struct ads7846_buf y;
- struct ads7846_buf z1;
- struct ads7846_buf z2;
+struct ads7846_buf_layout {
+ unsigned int offset;
+ unsigned int count;
+ unsigned int skip;
};
/*
@@ -90,12 +79,18 @@ struct ts_event {
* systems where main memory is not DMA-coherent (most non-x86 boards).
*/
struct ads7846_packet {
- struct ts_event tc;
- struct ads7846_buf read_x_cmd;
- struct ads7846_buf read_y_cmd;
- struct ads7846_buf read_z1_cmd;
- struct ads7846_buf read_z2_cmd;
+ unsigned int count;
+ unsigned int count_skip;
+ unsigned int cmds;
+ unsigned int last_cmd_idx;
+ struct ads7846_buf_layout l[5];
+ struct ads7846_buf *rx;
+ struct ads7846_buf *tx;
+
struct ads7846_buf pwrdown_cmd;
+
+ bool ignore;
+ u16 x, y, z1, z2;
};
struct ads7846 {
@@ -194,7 +189,6 @@ struct ads7846 {
#define READ_Y(vref) (READ_12BIT_DFR(y, 1, vref))
#define READ_Z1(vref) (READ_12BIT_DFR(z1, 1, vref))
#define READ_Z2(vref) (READ_12BIT_DFR(z2, 1, vref))
-
#define READ_X(vref) (READ_12BIT_DFR(x, 1, vref))
#define PWRDOWN (READ_12BIT_DFR(y, 0, 0)) /* LAST */
@@ -207,6 +201,21 @@ struct ads7846 {
#define REF_ON (READ_12BIT_DFR(x, 1, 1))
#define REF_OFF (READ_12BIT_DFR(y, 0, 0))
+/* Order commands in the most optimal way to reduce Vref switching and
+ * settling time:
+ * Measure: X; Vref: X+, X-; IN: Y+
+ * Measure: Y; Vref: Y+, Y-; IN: X+
+ * Measure: Z1; Vref: Y+, X-; IN: X+
+ * Measure: Z2; Vref: Y+, X-; IN: Y-
+ */
+enum ads7846_cmds {
+ ADS7846_X,
+ ADS7846_Y,
+ ADS7846_Z1,
+ ADS7846_Z2,
+ ADS7846_PWDOWN,
+};
+
static int get_pendown_state(struct ads7846 *ts)
{
if (ts->get_pendown_state)
@@ -689,26 +698,109 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val)
return ADS7846_FILTER_OK;
}
-static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
+static int ads7846_get_value(struct ads7846_buf *buf)
{
int value;
- struct spi_transfer *t =
- list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
- struct ads7846_buf *buf = t->rx_buf;
- value = be16_to_cpup(&buf->data_be16);
+ value = be16_to_cpup(&buf->data);
/* enforce ADC output is 12 bits width */
return (value >> 3) & 0xfff;
}
-static void ads7846_update_value(struct spi_message *m, int val)
+static void ads7846_set_cmd_val(struct ads7846 *ts, enum ads7846_cmds cmd_idx,
+ u16 val)
+{
+ struct ads7846_packet *packet = ts->packet;
+
+ switch (cmd_idx) {
+ case ADS7846_Y:
+ packet->y = val;
+ break;
+ case ADS7846_X:
+ packet->x = val;
+ break;
+ case ADS7846_Z1:
+ packet->z1 = val;
+ break;
+ case ADS7846_Z2:
+ packet->z2 = val;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static u8 ads7846_get_cmd(enum ads7846_cmds cmd_idx, int vref)
+{
+ switch (cmd_idx) {
+ case ADS7846_Y:
+ return READ_Y(vref);
+ case ADS7846_X:
+ return READ_X(vref);
+
+ /* 7846 specific commands */
+ case ADS7846_Z1:
+ return READ_Z1(vref);
+ case ADS7846_Z2:
+ return READ_Z2(vref);
+ case ADS7846_PWDOWN:
+ return PWRDOWN;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return 0;
+}
+
+static bool ads7846_cmd_need_settle(enum ads7846_cmds cmd_idx)
+{
+ switch (cmd_idx) {
+ case ADS7846_X:
+ case ADS7846_Y:
+ case ADS7846_Z1:
+ case ADS7846_Z2:
+ return true;
+ case ADS7846_PWDOWN:
+ return false;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return false;
+}
+
+static int ads7846_filter(struct ads7846 *ts)
{
- struct spi_transfer *t =
- list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
- struct ads7846_buf *buf = t->rx_buf;
+ struct ads7846_packet *packet = ts->packet;
+ int action;
+ int val;
+ unsigned int cmd_idx, b;
- buf->data = val;
+ packet->ignore = false;
+ for (cmd_idx = packet->last_cmd_idx; cmd_idx < packet->cmds - 1; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+
+ packet->last_cmd_idx = cmd_idx;
+
+ for (b = l->skip; b < l->count; b++) {
+ val = ads7846_get_value(&packet->rx[l->offset + b]);
+
+ action = ts->filter(ts->filter_data, cmd_idx, &val);
+ if (action == ADS7846_FILTER_REPEAT) {
+ if (b == l->count - 1)
+ return -EAGAIN;
+ } else if (action == ADS7846_FILTER_OK) {
+ ads7846_set_cmd_val(ts, cmd_idx, val);
+ break;
+ } else {
+ packet->ignore = true;
+ return 0;
+ }
+ }
+ }
+
+ return 0;
}
static void ads7846_read_state(struct ads7846 *ts)
@@ -716,52 +808,26 @@ static void ads7846_read_state(struct ads7846 *ts)
struct ads7846_packet *packet = ts->packet;
struct spi_message *m;
int msg_idx = 0;
- int val;
- int action;
int error;
- while (msg_idx < ts->msg_count) {
+ packet->last_cmd_idx = 0;
+ while (true) {
ts->wait_for_sync();
m = &ts->msg[msg_idx];
error = spi_sync(ts->spi, m);
if (error) {
dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
- packet->tc.ignore = true;
+ packet->ignore = true;
return;
}
- /*
- * Last message is power down request, no need to convert
- * or filter the value.
- */
- if (msg_idx < ts->msg_count - 1) {
-
- val = ads7846_get_value(ts, m);
-
- action = ts->filter(ts->filter_data, msg_idx, &val);
- switch (action) {
- case ADS7846_FILTER_REPEAT:
- continue;
-
- case ADS7846_FILTER_IGNORE:
- packet->tc.ignore = true;
- msg_idx = ts->msg_count - 1;
- continue;
-
- case ADS7846_FILTER_OK:
- ads7846_update_value(m, val);
- packet->tc.ignore = false;
- msg_idx++;
- break;
+ error = ads7846_filter(ts);
+ if (error)
+ continue;
- default:
- BUG();
- }
- } else {
- msg_idx++;
- }
+ return;
}
}
@@ -771,19 +837,14 @@ static void ads7846_report_state(struct ads7846 *ts)
unsigned int Rt;
u16 x, y, z1, z2;
- /*
- * ads7846_get_value() does in-place conversion (including byte swap)
- * from on-the-wire format as part of debouncing to get stable
- * readings.
- */
- x = packet->tc.x.data;
- y = packet->tc.y.data;
+ x = packet->x;
+ y = packet->y;
if (ts->model == 7845) {
z1 = 0;
z2 = 0;
} else {
- z1 = packet->tc.z1.data;
- z2 = packet->tc.z2.data;
+ z1 = packet->z1;
+ z2 = packet->z2;
}
/* range filtering */
@@ -816,9 +877,9 @@ static void ads7846_report_state(struct ads7846 *ts)
* the maximum. Don't report it to user space, repeat at least
* once more the measurement
*/
- if (packet->tc.ignore || Rt > ts->pressure_max) {
+ if (packet->ignore || Rt > ts->pressure_max) {
dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n",
- packet->tc.ignore, Rt);
+ packet->ignore, Rt);
return;
}
@@ -979,13 +1040,59 @@ static int ads7846_setup_pendown(struct spi_device *spi,
* Set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
-static void ads7846_setup_spi_msg(struct ads7846 *ts,
+static int ads7846_setup_spi_msg(struct ads7846 *ts,
const struct ads7846_platform_data *pdata)
{
struct spi_message *m = &ts->msg[0];
struct spi_transfer *x = ts->xfer;
struct ads7846_packet *packet = ts->packet;
int vref = pdata->keep_vref_on;
+ unsigned int count, offset = 0;
+ unsigned int cmd_idx, b;
+ unsigned long time;
+ size_t size = 0;
+
+ /* time per bit */
+ time = NSEC_PER_SEC / ts->spi->max_speed_hz;
+
+ count = pdata->settle_delay_usecs * NSEC_PER_USEC / time;
+ packet->count_skip = DIV_ROUND_UP(count, 24);
+
+ if (ts->debounce_max && ts->debounce_rep)
+ /* ads7846_debounce_filter() is making ts->debounce_rep + 2
+ * reads. So we need to get all samples for normal case. */
+ packet->count = ts->debounce_rep + 2;
+ else
+ packet->count = 1;
+
+ if (ts->model == 7846)
+ packet->cmds = 5; /* x, y, z1, z2, pwdown */
+ else
+ packet->cmds = 3; /* x, y, pwdown */
+
+ for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+ unsigned int max_count;
+
+ if (ads7846_cmd_need_settle(cmd_idx))
+ max_count = packet->count + packet->count_skip;
+ else
+ max_count = packet->count;
+
+ l->offset = offset;
+ offset += max_count;
+ l->count = max_count;
+ l->skip = packet->count_skip;
+ size += sizeof(*packet->tx) * max_count;
+ }
+
+ packet->tx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL);
+ if (!packet->tx)
+ return -ENOMEM;
+
+ packet->rx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL);
+ if (!packet->rx)
+ return -ENOMEM;
if (ts->model == 7873) {
/*
@@ -1001,117 +1108,20 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
spi_message_init(m);
m->context = ts;
- packet->read_y_cmd.cmd = READ_Y(vref);
- x->tx_buf = &packet->read_y_cmd;
- x->rx_buf = &packet->tc.y;
- x->len = 3;
- spi_message_add_tail(x, m);
+ for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+ u8 cmd = ads7846_get_cmd(cmd_idx, vref);
- /*
- * The first sample after switching drivers can be low quality;
- * optionally discard it, using a second one after the signals
- * have had enough time to stabilize.
- */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
- x++;
-
- x->tx_buf = &packet->read_y_cmd;
- x->rx_buf = &packet->tc.y;
- x->len = 3;
- spi_message_add_tail(x, m);
+ for (b = 0; b < l->count; b++)
+ packet->tx[l->offset + b].cmd = cmd;
}
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- /* turn y- off, x+ on, then leave in lowpower */
- x++;
- packet->read_x_cmd.cmd = READ_X(vref);
- x->tx_buf = &packet->read_x_cmd;
- x->rx_buf = &packet->tc.x;
- x->len = 3;
+ x->tx_buf = packet->tx;
+ x->rx_buf = packet->rx;
+ x->len = size;
spi_message_add_tail(x, m);
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_x_cmd;
- x->rx_buf = &packet->tc.x;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
-
- /* turn y+ off, x- on; we'll use formula #2 */
- if (ts->model == 7846) {
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->read_z1_cmd.cmd = READ_Z1(vref);
- x->tx_buf = &packet->read_z1_cmd;
- x->rx_buf = &packet->tc.z1;
- x->len = 3;
- spi_message_add_tail(x, m);
-
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_z1_cmd;
- x->rx_buf = &packet->tc.z1;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
-
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->read_z2_cmd.cmd = READ_Z2(vref);
- x->tx_buf = &packet->read_z2_cmd;
- x->rx_buf = &packet->tc.z2;
- x->len = 3;
- spi_message_add_tail(x, m);
-
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_z2_cmd;
- x->rx_buf = &packet->tc.z2;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
- }
-
- /* power down */
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->pwrdown_cmd.cmd = PWRDOWN;
- x->tx_buf = &packet->pwrdown_cmd;
- x->len = 3;
-
- CS_CHANGE(*x);
- spi_message_add_tail(x, m);
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index d51cb910fba1..4c2b579f6c8b 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -56,6 +56,7 @@
#define QUEUE_HEADER_SINGLE 0x62
#define QUEUE_HEADER_NORMAL 0X63
#define QUEUE_HEADER_WAIT 0x64
+#define QUEUE_HEADER_NORMAL2 0x66
/* Command header definition */
#define CMD_HEADER_WRITE 0x54
@@ -69,6 +70,7 @@
#define CMD_HEADER_REK 0x66
/* FW position data */
+#define PACKET_SIZE_OLD 40
#define PACKET_SIZE 55
#define MAX_CONTACT_NUM 10
#define FW_POS_HEADER 0
@@ -90,6 +92,8 @@
/* FW read command, 0x53 0x?? 0x0, 0x01 */
#define E_ELAN_INFO_FW_VER 0x00
#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_X_RES 0x60
+#define E_ELAN_INFO_Y_RES 0x63
#define E_ELAN_INFO_REK 0xD0
#define E_ELAN_INFO_TEST_VER 0xE0
#define E_ELAN_INFO_FW_ID 0xF0
@@ -112,6 +116,11 @@
#define ELAN_POWERON_DELAY_USEC 500
#define ELAN_RESET_DELAY_MSEC 20
+enum elants_chip_id {
+ EKTH3500,
+ EKTF3624,
+};
+
enum elants_state {
ELAN_STATE_NORMAL,
ELAN_WAIT_QUEUE_HEADER,
@@ -143,9 +152,12 @@ struct elants_data {
unsigned int y_res;
unsigned int x_max;
unsigned int y_max;
+ unsigned int phy_x;
+ unsigned int phy_y;
struct touchscreen_properties prop;
enum elants_state state;
+ enum elants_chip_id chip_id;
enum elants_iap_mode iap_mode;
/* Guards against concurrent access to the device via sysfs */
@@ -433,7 +445,51 @@ static int elants_i2c_query_bc_version(struct elants_data *ts)
return 0;
}
-static int elants_i2c_query_ts_info(struct elants_data *ts)
+static int elants_i2c_query_ts_info_ektf(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error;
+ u8 resp[4];
+ u16 phy_x, phy_y;
+ const u8 get_xres_cmd[] = {
+ CMD_HEADER_READ, E_ELAN_INFO_X_RES, 0x00, 0x00
+ };
+ const u8 get_yres_cmd[] = {
+ CMD_HEADER_READ, E_ELAN_INFO_Y_RES, 0x00, 0x00
+ };
+
+ /* Get X/Y size in mm */
+ error = elants_i2c_execute_command(client, get_xres_cmd,
+ sizeof(get_xres_cmd),
+ resp, sizeof(resp), 1,
+ "get X size");
+ if (error)
+ return error;
+
+ phy_x = resp[2] | ((resp[3] & 0xF0) << 4);
+
+ error = elants_i2c_execute_command(client, get_yres_cmd,
+ sizeof(get_yres_cmd),
+ resp, sizeof(resp), 1,
+ "get Y size");
+ if (error)
+ return error;
+
+ phy_y = resp[2] | ((resp[3] & 0xF0) << 4);
+
+ dev_dbg(&client->dev, "phy_x=%d, phy_y=%d\n", phy_x, phy_y);
+
+ ts->phy_x = phy_x;
+ ts->phy_y = phy_y;
+
+ /* eKTF doesn't report max size, set it to default values */
+ ts->x_max = 2240 - 1;
+ ts->y_max = 1408 - 1;
+
+ return 0;
+}
+
+static int elants_i2c_query_ts_info_ekth(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
int error;
@@ -508,6 +564,8 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
ts->x_res = DIV_ROUND_CLOSEST(ts->x_max, phy_x);
ts->y_max = ELAN_TS_RESOLUTION(cols, osr);
ts->y_res = DIV_ROUND_CLOSEST(ts->y_max, phy_y);
+ ts->phy_x = phy_x;
+ ts->phy_y = phy_y;
}
return 0;
@@ -587,8 +645,19 @@ static int elants_i2c_initialize(struct elants_data *ts)
error = elants_i2c_query_fw_version(ts);
if (!error)
error = elants_i2c_query_test_version(ts);
- if (!error)
- error = elants_i2c_query_ts_info(ts);
+
+ switch (ts->chip_id) {
+ case EKTH3500:
+ if (!error)
+ error = elants_i2c_query_ts_info_ekth(ts);
+ break;
+ case EKTF3624:
+ if (!error)
+ error = elants_i2c_query_ts_info_ektf(ts);
+ break;
+ default:
+ BUG();
+ }
if (error)
ts->iap_mode = ELAN_IAP_RECOVERY;
@@ -853,7 +922,8 @@ out:
* Event reporting.
*/
-static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
+static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf,
+ size_t packet_size)
{
struct input_dev *input = ts->input;
unsigned int n_fingers;
@@ -880,8 +950,24 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
pos = &buf[FW_POS_XY + i * 3];
x = (((u16)pos[0] & 0xf0) << 4) | pos[1];
y = (((u16)pos[0] & 0x0f) << 8) | pos[2];
- p = buf[FW_POS_PRESSURE + i];
- w = buf[FW_POS_WIDTH + i];
+
+ /*
+ * eKTF3624 may have use "old" touch-report format,
+ * depending on a device and TS firmware version.
+ * For example, ASUS Transformer devices use the "old"
+ * format, while ASUS Nexus 7 uses the "new" formant.
+ */
+ if (packet_size == PACKET_SIZE_OLD &&
+ ts->chip_id == EKTF3624) {
+ w = buf[FW_POS_WIDTH + i / 2];
+ w >>= 4 * (~i & 1);
+ w |= w << 4;
+ w |= !w;
+ p = w;
+ } else {
+ p = buf[FW_POS_PRESSURE + i];
+ w = buf[FW_POS_WIDTH + i];
+ }
dev_dbg(&ts->client->dev, "i=%d x=%d y=%d p=%d w=%d\n",
i, x, y, p, w);
@@ -913,7 +999,8 @@ static u8 elants_i2c_calculate_checksum(u8 *buf)
return checksum;
}
-static void elants_i2c_event(struct elants_data *ts, u8 *buf)
+static void elants_i2c_event(struct elants_data *ts, u8 *buf,
+ size_t packet_size)
{
u8 checksum = elants_i2c_calculate_checksum(buf);
@@ -927,7 +1014,7 @@ static void elants_i2c_event(struct elants_data *ts, u8 *buf)
"%s: unknown packet type: %02x\n",
__func__, buf[FW_POS_HEADER]);
else
- elants_i2c_mt_event(ts, buf);
+ elants_i2c_mt_event(ts, buf, packet_size);
}
static irqreturn_t elants_i2c_irq(int irq, void *_dev)
@@ -970,7 +1057,6 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
switch (ts->buf[FW_HDR_TYPE]) {
case CMD_HEADER_HELLO:
case CMD_HEADER_RESP:
- case CMD_HEADER_REK:
break;
case QUEUE_HEADER_WAIT:
@@ -985,9 +1071,24 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
break;
case QUEUE_HEADER_SINGLE:
- elants_i2c_event(ts, &ts->buf[HEADER_SIZE]);
+ elants_i2c_event(ts, &ts->buf[HEADER_SIZE],
+ ts->buf[FW_HDR_LENGTH]);
break;
+ case QUEUE_HEADER_NORMAL2: /* CMD_HEADER_REK */
+ /*
+ * Depending on firmware version, eKTF3624 touchscreens
+ * may utilize one of these opcodes for the touch events:
+ * 0x63 (NORMAL) and 0x66 (NORMAL2). The 0x63 is used by
+ * older firmware version and differs from 0x66 such that
+ * touch pressure value needs to be adjusted. The 0x66
+ * opcode of newer firmware is equal to 0x63 of eKTH3500.
+ */
+ if (ts->chip_id != EKTF3624)
+ break;
+
+ fallthrough;
+
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
if (report_count == 0 || report_count > 3) {
@@ -998,7 +1099,12 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
}
report_len = ts->buf[FW_HDR_LENGTH] / report_count;
- if (report_len != PACKET_SIZE) {
+
+ if (report_len == PACKET_SIZE_OLD &&
+ ts->chip_id == EKTF3624) {
+ dev_dbg_once(&client->dev,
+ "using old report format\n");
+ } else if (report_len != PACKET_SIZE) {
dev_err(&client->dev,
"mismatching report length: %*ph\n",
HEADER_SIZE, ts->buf);
@@ -1007,8 +1113,8 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
for (i = 0; i < report_count; i++) {
u8 *buf = ts->buf + HEADER_SIZE +
- i * PACKET_SIZE;
- elants_i2c_event(ts, buf);
+ i * report_len;
+ elants_i2c_event(ts, buf, report_len);
}
break;
@@ -1250,6 +1356,7 @@ static int elants_i2c_probe(struct i2c_client *client,
init_completion(&ts->cmd_done);
ts->client = client;
+ ts->chip_id = (enum elants_chip_id)id->driver_data;
i2c_set_clientdata(client, ts);
ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
@@ -1331,13 +1438,20 @@ static int elants_i2c_probe(struct i2c_client *client,
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
0, MT_TOOL_PALM, 0, 0);
+
+ touchscreen_parse_properties(ts->input, true, &ts->prop);
+
+ if (ts->chip_id == EKTF3624) {
+ /* calculate resolution from size */
+ ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
+ ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
+ }
+
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
if (ts->major_res > 0)
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
- touchscreen_parse_properties(ts->input, true, &ts->prop);
-
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@@ -1466,14 +1580,16 @@ static SIMPLE_DEV_PM_OPS(elants_i2c_pm_ops,
elants_i2c_suspend, elants_i2c_resume);
static const struct i2c_device_id elants_i2c_id[] = {
- { DEVICE_NAME, 0 },
+ { DEVICE_NAME, EKTH3500 },
+ { "ekth3500", EKTH3500 },
+ { "ektf3624", EKTF3624 },
{ }
};
MODULE_DEVICE_TABLE(i2c, elants_i2c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id elants_acpi_id[] = {
- { "ELAN0001", 0 },
+ { "ELAN0001", EKTH3500 },
{ }
};
MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
@@ -1482,6 +1598,7 @@ MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
#ifdef CONFIG_OF
static const struct of_device_id elants_of_match[] = {
{ .compatible = "elan,ekth3500" },
+ { .compatible = "elan,ektf3624" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, elants_of_match);
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index e0bacd34866a..96173232e53f 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
switch (elo->id) {
case 0: /* 10-byte protocol */
- if (elo_setup_10(elo))
+ if (elo_setup_10(elo)) {
+ err = -EIO;
goto fail3;
+ }
break;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 19765f1c04f7..c682b028f0a2 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
+ { .id = "9286", .data = &gt1x_chip_data },
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
@@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" },
{ }
};
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 2f261a34f9c2..056ba76087e8 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -171,7 +171,7 @@ static int htcpen_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int htcpen_isa_remove(struct device *dev, unsigned int id)
+static void htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
@@ -182,8 +182,6 @@ static int htcpen_isa_remove(struct device *dev, unsigned int id)
release_region(HTCPEN_PORT_INDEX, 2);
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 199cf3daec10..d8fccf048bf4 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -29,11 +29,13 @@ struct ili2xxx_chip {
void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
- unsigned int *x, unsigned int *y);
+ unsigned int *x, unsigned int *y,
+ unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_pressure_reg;
};
struct ili210x {
@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
if (touchdata[0] & BIT(finger))
return false;
@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u32 data;
@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
*x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+ *z = touchdata[1 + (finger * 5) + 4];
return true;
}
@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_pressure_reg = true,
};
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input;
int i;
bool contact = false, touch;
- unsigned int x = 0, y = 0;
+ unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) {
- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true);
+ if (priv->chip->has_pressure_reg)
+ input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true;
}
}
@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+ if (priv->chip->has_pressure_reg)
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 4fd21bc3ce0f..54f30038dca4 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -2,8 +2,7 @@
/*
* Azoteq IQS550/572/525 Trackpad/Touchscreen Controller
*
- * Copyright (C) 2018
- * Author: Jeff LaBundy <jeff@labundy.com>
+ * Copyright (C) 2018 Jeff LaBundy <jeff@labundy.com>
*
* These devices require firmware exported from a PC-based configuration tool
* made available by the vendor. Firmware files may be pushed to the device's
@@ -12,6 +11,7 @@
* Link to PC-based configuration tool and data sheet: http://www.azoteq.com/
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -30,9 +30,9 @@
#define IQS5XX_FW_FILE_LEN 64
#define IQS5XX_NUM_RETRIES 10
-#define IQS5XX_NUM_POINTS 256
#define IQS5XX_NUM_CONTACTS 5
#define IQS5XX_WR_BYTES_MAX 2
+#define IQS5XX_XY_RES_MAX 0xFFFE
#define IQS5XX_PROD_NUM_IQS550 40
#define IQS5XX_PROD_NUM_IQS572 58
@@ -41,28 +41,27 @@
#define IQS5XX_PROJ_NUM_B000 15
#define IQS5XX_MAJOR_VER_MIN 2
-#define IQS5XX_RESUME 0x00
-#define IQS5XX_SUSPEND 0x01
+#define IQS5XX_SHOW_RESET BIT(7)
+#define IQS5XX_ACK_RESET BIT(7)
-#define IQS5XX_SW_INPUT_EVENT 0x10
-#define IQS5XX_SETUP_COMPLETE 0x40
-#define IQS5XX_EVENT_MODE 0x01
-#define IQS5XX_TP_EVENT 0x04
+#define IQS5XX_SUSPEND BIT(0)
+#define IQS5XX_RESUME 0
-#define IQS5XX_FLIP_X 0x01
-#define IQS5XX_FLIP_Y 0x02
-#define IQS5XX_SWITCH_XY_AXIS 0x04
+#define IQS5XX_SETUP_COMPLETE BIT(6)
+#define IQS5XX_WDT BIT(5)
+#define IQS5XX_ALP_REATI BIT(3)
+#define IQS5XX_REATI BIT(2)
+
+#define IQS5XX_TP_EVENT BIT(2)
+#define IQS5XX_EVENT_MODE BIT(0)
#define IQS5XX_PROD_NUM 0x0000
-#define IQS5XX_ABS_X 0x0016
-#define IQS5XX_ABS_Y 0x0018
+#define IQS5XX_SYS_INFO0 0x000F
+#define IQS5XX_SYS_INFO1 0x0010
#define IQS5XX_SYS_CTRL0 0x0431
#define IQS5XX_SYS_CTRL1 0x0432
#define IQS5XX_SYS_CFG0 0x058E
#define IQS5XX_SYS_CFG1 0x058F
-#define IQS5XX_TOTAL_RX 0x063D
-#define IQS5XX_TOTAL_TX 0x063E
-#define IQS5XX_XY_CFG0 0x0669
#define IQS5XX_X_RES 0x066E
#define IQS5XX_Y_RES 0x0670
#define IQS5XX_CHKSM 0x83C0
@@ -99,6 +98,7 @@ struct iqs5xx_private {
struct i2c_client *client;
struct input_dev *input;
struct gpio_desc *reset_gpio;
+ struct touchscreen_properties prop;
struct mutex lock;
u8 bl_status;
};
@@ -126,6 +126,14 @@ struct iqs5xx_touch_data {
u8 area;
} __packed;
+struct iqs5xx_status {
+ u8 sys_info[2];
+ u8 num_active;
+ __be16 rel_x;
+ __be16 rel_y;
+ struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+} __packed;
+
static int iqs5xx_read_burst(struct i2c_client *client,
u16 reg, void *val, u16 len)
{
@@ -182,11 +190,6 @@ static int iqs5xx_read_word(struct i2c_client *client, u16 reg, u16 *val)
return 0;
}
-static int iqs5xx_read_byte(struct i2c_client *client, u16 reg, u8 *val)
-{
- return iqs5xx_read_burst(client, reg, val, sizeof(*val));
-}
-
static int iqs5xx_write_burst(struct i2c_client *client,
u16 reg, const void *val, u16 len)
{
@@ -337,11 +340,16 @@ static int iqs5xx_bl_open(struct i2c_client *client)
*/
for (i = 0; i < IQS5XX_BL_ATTEMPTS; i++) {
iqs5xx_reset(client);
+ usleep_range(350, 400);
for (j = 0; j < IQS5XX_NUM_RETRIES; j++) {
error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
- if (!error || error == -EINVAL)
- return error;
+ if (!error)
+ usleep_range(10000, 10100);
+ else if (error != -EINVAL)
+ continue;
+
+ return error;
}
}
@@ -481,12 +489,10 @@ static void iqs5xx_close(struct input_dev *input)
static int iqs5xx_axis_init(struct i2c_client *client)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
- struct touchscreen_properties prop;
+ struct touchscreen_properties *prop = &iqs5xx->prop;
struct input_dev *input;
+ u16 max_x, max_y;
int error;
- u16 max_x, max_x_hw;
- u16 max_y, max_y_hw;
- u8 val;
if (!iqs5xx->input) {
input = devm_input_allocate_device(&client->dev);
@@ -506,89 +512,39 @@ static int iqs5xx_axis_init(struct i2c_client *client)
iqs5xx->input = input;
}
- touchscreen_parse_properties(iqs5xx->input, true, &prop);
-
- error = iqs5xx_read_byte(client, IQS5XX_TOTAL_RX, &val);
- if (error)
- return error;
- max_x_hw = (val - 1) * IQS5XX_NUM_POINTS;
-
- error = iqs5xx_read_byte(client, IQS5XX_TOTAL_TX, &val);
+ error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
if (error)
return error;
- max_y_hw = (val - 1) * IQS5XX_NUM_POINTS;
- error = iqs5xx_read_byte(client, IQS5XX_XY_CFG0, &val);
+ error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
if (error)
return error;
- if (val & IQS5XX_SWITCH_XY_AXIS)
- swap(max_x_hw, max_y_hw);
+ input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_X, max_x);
+ input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_Y, max_y);
- if (prop.swap_x_y)
- val ^= IQS5XX_SWITCH_XY_AXIS;
-
- if (prop.invert_x)
- val ^= prop.swap_x_y ? IQS5XX_FLIP_Y : IQS5XX_FLIP_X;
-
- if (prop.invert_y)
- val ^= prop.swap_x_y ? IQS5XX_FLIP_X : IQS5XX_FLIP_Y;
-
- error = iqs5xx_write_byte(client, IQS5XX_XY_CFG0, val);
- if (error)
- return error;
+ touchscreen_parse_properties(iqs5xx->input, true, prop);
- if (prop.max_x > max_x_hw) {
+ if (prop->max_x > IQS5XX_XY_RES_MAX) {
dev_err(&client->dev, "Invalid maximum x-coordinate: %u > %u\n",
- prop.max_x, max_x_hw);
+ prop->max_x, IQS5XX_XY_RES_MAX);
return -EINVAL;
- } else if (prop.max_x == 0) {
- error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
+ } else if (prop->max_x != max_x) {
+ error = iqs5xx_write_word(client, IQS5XX_X_RES, prop->max_x);
if (error)
return error;
-
- input_abs_set_max(iqs5xx->input,
- prop.swap_x_y ? ABS_MT_POSITION_Y :
- ABS_MT_POSITION_X,
- max_x);
- } else {
- max_x = (u16)prop.max_x;
}
- if (prop.max_y > max_y_hw) {
+ if (prop->max_y > IQS5XX_XY_RES_MAX) {
dev_err(&client->dev, "Invalid maximum y-coordinate: %u > %u\n",
- prop.max_y, max_y_hw);
+ prop->max_y, IQS5XX_XY_RES_MAX);
return -EINVAL;
- } else if (prop.max_y == 0) {
- error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
+ } else if (prop->max_y != max_y) {
+ error = iqs5xx_write_word(client, IQS5XX_Y_RES, prop->max_y);
if (error)
return error;
-
- input_abs_set_max(iqs5xx->input,
- prop.swap_x_y ? ABS_MT_POSITION_X :
- ABS_MT_POSITION_Y,
- max_y);
- } else {
- max_y = (u16)prop.max_y;
}
- /*
- * Write horizontal and vertical resolution to the device in case its
- * original defaults were overridden or swapped as per the properties
- * specified in the device tree.
- */
- error = iqs5xx_write_word(client,
- prop.swap_x_y ? IQS5XX_Y_RES : IQS5XX_X_RES,
- max_x);
- if (error)
- return error;
-
- error = iqs5xx_write_word(client,
- prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
- max_y);
- if (error)
- return error;
-
error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
INPUT_MT_DIRECT);
if (error)
@@ -603,7 +559,6 @@ static int iqs5xx_dev_init(struct i2c_client *client)
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
struct iqs5xx_dev_id_info *dev_id_info;
int error;
- u8 val;
u8 buf[sizeof(*dev_id_info) + 1];
error = iqs5xx_read_burst(client, IQS5XX_PROD_NUM,
@@ -666,18 +621,18 @@ static int iqs5xx_dev_init(struct i2c_client *client)
if (error)
return error;
- error = iqs5xx_read_byte(client, IQS5XX_SYS_CFG0, &val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CTRL0, IQS5XX_ACK_RESET);
if (error)
return error;
- val |= IQS5XX_SETUP_COMPLETE;
- val &= ~IQS5XX_SW_INPUT_EVENT;
- error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0, val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0,
+ IQS5XX_SETUP_COMPLETE | IQS5XX_WDT |
+ IQS5XX_ALP_REATI | IQS5XX_REATI);
if (error)
return error;
- val = IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE;
- error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1, val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1,
+ IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE);
if (error)
return error;
@@ -688,13 +643,12 @@ static int iqs5xx_dev_init(struct i2c_client *client)
iqs5xx->bl_status = dev_id_info->bl_status;
/*
- * Closure of the first communication window that appears following the
- * release of reset appears to kick off an initialization period during
- * which further communication is met with clock stretching. The return
- * from this function is delayed so that further communication attempts
- * avoid this period.
+ * The following delay allows ATI to complete before the open and close
+ * callbacks are free to elicit I2C communication. Any attempts to read
+ * from or write to the device during this time may face extended clock
+ * stretching and prompt the I2C controller to report an error.
*/
- msleep(100);
+ msleep(250);
return 0;
}
@@ -702,7 +656,7 @@ static int iqs5xx_dev_init(struct i2c_client *client)
static irqreturn_t iqs5xx_irq(int irq, void *data)
{
struct iqs5xx_private *iqs5xx = data;
- struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+ struct iqs5xx_status status;
struct i2c_client *client = iqs5xx->client;
struct input_dev *input = iqs5xx->input;
int error, i;
@@ -715,21 +669,35 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
return IRQ_NONE;
- error = iqs5xx_read_burst(client, IQS5XX_ABS_X,
- touch_data, sizeof(touch_data));
+ error = iqs5xx_read_burst(client, IQS5XX_SYS_INFO0,
+ &status, sizeof(status));
if (error)
return IRQ_NONE;
- for (i = 0; i < ARRAY_SIZE(touch_data); i++) {
- u16 pressure = be16_to_cpu(touch_data[i].strength);
+ if (status.sys_info[0] & IQS5XX_SHOW_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ error = iqs5xx_dev_init(client);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", error);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(status.touch_data); i++) {
+ struct iqs5xx_touch_data *touch_data = &status.touch_data[i];
+ u16 pressure = be16_to_cpu(touch_data->strength);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
pressure != 0)) {
- input_report_abs(input, ABS_MT_POSITION_X,
- be16_to_cpu(touch_data[i].abs_x));
- input_report_abs(input, ABS_MT_POSITION_Y,
- be16_to_cpu(touch_data[i].abs_y));
+ touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+ be16_to_cpu(touch_data->abs_x),
+ be16_to_cpu(touch_data->abs_y),
+ true);
input_report_abs(input, ABS_MT_PRESSURE, pressure);
}
}
@@ -884,7 +852,7 @@ static int iqs5xx_fw_file_parse(struct i2c_client *client,
static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
- int error;
+ int error, error_bl = 0;
u8 *pmap;
if (iqs5xx->bl_status == IQS5XX_BL_STATUS_NONE)
@@ -938,6 +906,7 @@ err_reset:
usleep_range(10000, 10100);
}
+ error_bl = error;
error = iqs5xx_dev_init(client);
if (!error && iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
error = -EINVAL;
@@ -949,11 +918,15 @@ err_reset:
err_kfree:
kfree(pmap);
+ if (error_bl)
+ return error_bl;
+
return error;
}
-static ssize_t fw_file_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fw_file_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
struct i2c_client *client = iqs5xx->client;
@@ -1012,7 +985,7 @@ static int __maybe_unused iqs5xx_suspend(struct device *dev)
struct input_dev *input = iqs5xx->input;
int error = 0;
- if (!input)
+ if (!input || device_may_wakeup(dev))
return error;
mutex_lock(&input->mutex);
@@ -1031,7 +1004,7 @@ static int __maybe_unused iqs5xx_resume(struct device *dev)
struct input_dev *input = iqs5xx->input;
int error = 0;
- if (!input)
+ if (!input || device_may_wakeup(dev))
return error;
mutex_lock(&input->mutex);
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index c0050044a5a9..225796a3f546 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -465,13 +465,13 @@ static void mip4_report_keys(struct mip4_ts *ts, u8 *packet)
static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
{
int id;
- bool hover;
- bool palm;
+ bool __always_unused hover;
+ bool __always_unused palm;
bool state;
u16 x, y;
- u8 pressure_stage = 0;
+ u8 __always_unused pressure_stage = 0;
u8 pressure;
- u8 size;
+ u8 __always_unused size;
u8 touch_major;
u8 touch_minor;
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 603a948460d6..4d2d22a86977 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -445,6 +445,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
enum raydium_bl_ack state)
{
int error;
+ static const u8 cmd[] = { 0xFF, 0x39 };
error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
if (error) {
@@ -453,7 +454,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
return error;
}
- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
+ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
if (error) {
dev_err(&client->dev, "Ack obj command failed: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index bda96762744e..6abae665ca71 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -26,6 +26,20 @@
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
+#define REG_STATUS 0x01 /* Device Status | Error Code */
+
+#define STATUS_NORMAL 0x00
+#define STATUS_INIT 0x01
+#define STATUS_ERROR 0x02
+#define STATUS_AUTO_TUNING 0x03
+#define STATUS_IDLE 0x04
+#define STATUS_POWER_DOWN 0x05
+
+#define ERROR_NONE 0x00
+#define ERROR_INVALID_ADDRESS 0x10
+#define ERROR_INVALID_VALUE 0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
#define REG_XY_RESOLUTION 0x04
#define REG_XY_COORDINATES 0x12
#define ST_TS_MAX_FINGERS 10
@@ -47,7 +61,8 @@ struct st1232_ts_data {
u8 *read_buf;
};
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+ unsigned int n)
{
struct i2c_client *client = ts->client;
struct i2c_msg msg[] = {
@@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
{
.addr = client->addr,
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
- .len = ts->read_buf_len,
+ .len = n,
.buf = ts->read_buf,
}
};
@@ -72,6 +87,27 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
return 0;
}
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+ unsigned int retries;
+ int error;
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+ if (!error) {
+ switch (ts->read_buf[0]) {
+ case STATUS_NORMAL | ERROR_NONE:
+ case STATUS_IDLE | ERROR_NONE:
+ return 0;
+ }
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ENXIO;
+}
+
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
u16 *max_y)
{
@@ -79,14 +115,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
int error;
/* select resolution register */
- error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+ error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
if (error)
return error;
buf = ts->read_buf;
- *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
- *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+ *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+ *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
return 0;
}
@@ -140,7 +176,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
int count;
int error;
- error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+ error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
if (error)
goto out;
@@ -251,6 +287,11 @@ static int st1232_ts_probe(struct i2c_client *client,
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
+ /* Wait until device is ready */
+ error = st1232_ts_wait_ready(ts);
+ if (error)
+ return error;
+
/* Read resolution from the chip */
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
if (error) {
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index cd747725589b..25c45c3a3561 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -52,6 +52,7 @@
* @idev: registered input device
* @work: a work item used to scan the device
* @dev: a pointer back to the MFD cell struct device*
+ * @prop: Touchscreen properties
* @ave_ctrl: Sample average control
* (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
* @touch_det_delay: Touch detect interrupt delay
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 620cdd7d214a..12f2562b0141 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -787,6 +787,7 @@ static int sur40_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"Unable to register video controls.");
v4l2_ctrl_handler_free(&sur40->hdl);
+ error = sur40->hdl.error;
goto err_unreg_v4l2;
}
diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c
index 731454599fce..1da23e5585a0 100644
--- a/drivers/input/touchscreen/surface3_spi.c
+++ b/drivers/input/touchscreen/surface3_spi.c
@@ -94,9 +94,7 @@ static void surface3_spi_report_touch(struct surface3_ts_data *ts_data,
static void surface3_spi_process_touch(struct surface3_ts_data *ts_data, u8 *data)
{
- u16 timestamp;
unsigned int i;
- timestamp = get_unaligned_le16(&data[15]);
for (i = 0; i < 13; i++) {
struct surface3_ts_data_finger *finger;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 397cb1d3f481..c847453a03c2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1044,6 +1044,7 @@ static void nexio_exit(struct usbtouch_usb *usbtouch)
static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
{
+ struct device *dev = &usbtouch->interface->dev;
struct nexio_touch_packet *packet = (void *) pkt;
struct nexio_priv *priv = usbtouch->priv;
unsigned int data_len = be16_to_cpu(packet->data_len);
@@ -1062,6 +1063,8 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
/* send ACK */
ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
+ if (ret)
+ dev_warn(dev, "Failed to submit ACK URB: %d\n", ret);
if (!usbtouch->type->max_xc) {
usbtouch->type->max_xc = 2 * x_len;
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index a3e3adbabc67..3b636beb583c 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -161,7 +161,7 @@ static int zinitix_read_data(struct i2c_client *client,
ret = i2c_master_recv(client, (u8 *)values, length);
if (ret != length)
- return ret < 0 ? ret : -EIO; ;
+ return ret < 0 ? ret : -EIO;
return 0;
}
@@ -190,7 +190,7 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
return 0;
}
-static bool zinitix_init_touch(struct bt541_ts_data *bt541)
+static int zinitix_init_touch(struct bt541_ts_data *bt541)
{
struct i2c_client *client = bt541->client;
int i;
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..ca52647f8955 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -17,6 +17,15 @@ config INTERCONNECT_QCOM_MSM8916
This is a driver for the Qualcomm Network-on-Chip on msm8916-based
platforms.
+config INTERCONNECT_QCOM_MSM8939
+ tristate "Qualcomm MSM8939 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8939-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8974
tristate "Qualcomm MSM8974 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -42,13 +51,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,18 +76,25 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
+config INTERCONNECT_QCOM_SDX55
+ tristate "Qualcomm SDX55 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdx55-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +103,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index cf628f7990cd..c6a735df067e 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -2,24 +2,28 @@
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
+qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
qnoc-sdm845-objs := sdm845.o
+qnoc-sdx55-objs := sdx55.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
-icc-smd-rpm-objs := smd-rpm.o
+icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
new file mode 100644
index 000000000000..cc6095492cbe
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+#include "icc-rpm.h"
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
+ const struct clk_bulk_data *cd)
+{
+ struct device *dev = &pdev->dev;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, cd, cd_size,
+ GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = cd_num;
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ icc_provider_del(provider);
+
+ return ret;
+}
+EXPORT_SYMBOL(qnoc_probe);
+
+int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return icc_provider_del(&qp->provider);
+}
+EXPORT_SYMBOL(qnoc_remove);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
new file mode 100644
index 000000000000..79a6f68249c1
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
+#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define QCOM_MAX_LINKS 12
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[QCOM_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+
+int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
+ const struct clk_bulk_data *cd);
+int qnoc_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index e8371d40ab8d..fc3689c8947a 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -15,9 +15,7 @@
#include <dt-bindings/interconnect/qcom,msm8916.h>
#include "smd-rpm.h"
-
-#define RPM_BUS_MASTER_REQ 0x73616d62
-#define RPM_BUS_SLAVE_REQ 0x766c7362
+#include "icc-rpm.h"
enum {
MSM8916_BIMC_SNOC_MAS = 1,
@@ -107,67 +105,11 @@ enum {
MSM8916_SNOC_PNOC_SLV,
};
-#define to_msm8916_provider(_provider) \
- container_of(_provider, struct msm8916_icc_provider, provider)
-
static const struct clk_bulk_data msm8916_bus_clocks[] = {
{ .id = "bus" },
{ .id = "bus_a" },
};
-/**
- * struct msm8916_icc_provider - Qualcomm specific interconnect provider
- * @provider: generic interconnect provider
- * @bus_clks: the clk_bulk_data table of bus clocks
- * @num_clks: the total number of clk_bulk_data entries
- */
-struct msm8916_icc_provider {
- struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
- int num_clks;
-};
-
-#define MSM8916_MAX_LINKS 8
-
-/**
- * struct msm8916_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @id: a unique node identifier
- * @links: an array of nodes where we can go next while traversing
- * @num_links: the total number of @links
- * @buswidth: width of the interconnect between a node and the bus (bytes)
- * @mas_rpm_id: RPM ID for devices that are bus masters
- * @slv_rpm_id: RPM ID for devices that are bus slaves
- * @rate: current bus clock rate in Hz
- */
-struct msm8916_icc_node {
- unsigned char *name;
- u16 id;
- u16 links[MSM8916_MAX_LINKS];
- u16 num_links;
- u16 buswidth;
- int mas_rpm_id;
- int slv_rpm_id;
- u64 rate;
-};
-
-struct msm8916_icc_desc {
- struct msm8916_icc_node **nodes;
- size_t num_nodes;
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- ...) \
- static struct msm8916_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
@@ -254,7 +196,7 @@ DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIM
DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
-static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
+static struct qcom_icc_node *msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -283,12 +225,12 @@ static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
[SNOC_QDSS_INT] = &qdss_int,
};
-static struct msm8916_icc_desc msm8916_snoc = {
+static struct qcom_icc_desc msm8916_snoc = {
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
};
-static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
+static struct qcom_icc_node *msm8916_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -300,12 +242,12 @@ static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
};
-static struct msm8916_icc_desc msm8916_bimc = {
+static struct qcom_icc_desc msm8916_bimc = {
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
};
-static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
+static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -358,178 +300,15 @@ static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
-static struct msm8916_icc_desc msm8916_pcnoc = {
+static struct qcom_icc_desc msm8916_pcnoc = {
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
};
-static int msm8916_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct msm8916_icc_provider *qp;
- struct msm8916_icc_node *qn;
- u64 sum_bw, max_peak_bw, rate;
- u32 agg_avg = 0, agg_peak = 0;
- struct icc_provider *provider;
- struct icc_node *n;
- int ret, i;
-
- qn = src->data;
- provider = src->provider;
- qp = to_msm8916_provider(provider);
-
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
-
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
-
- /* send bandwidth request message to the RPM processor */
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
- }
- }
-
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv error %d\n",
- ret);
- return ret;
- }
- }
-
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
-
- if (qn->rate == rate)
- return 0;
-
- for (i = 0; i < qp->num_clks; i++) {
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
- return ret;
- }
- }
-
- qn->rate = rate;
-
- return 0;
-}
-
static int msm8916_qnoc_probe(struct platform_device *pdev)
{
- const struct msm8916_icc_desc *desc;
- struct msm8916_icc_node **qnodes;
- struct msm8916_icc_provider *qp;
- struct device *dev = &pdev->dev;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- /* wait for the RPM proxy */
- if (!qcom_icc_rpm_smd_available())
- return -EPROBE_DEFER;
-
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- qp->bus_clks = devm_kmemdup(dev, msm8916_bus_clocks,
- sizeof(msm8916_bus_clocks), GFP_KERNEL);
- if (!qp->bus_clks)
- return -ENOMEM;
-
- qp->num_clks = ARRAY_SIZE(msm8916_bus_clocks);
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
- provider->dev = dev;
- provider->set = msm8916_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- provider->data = data;
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return ret;
-}
-
-static int msm8916_qnoc_remove(struct platform_device *pdev)
-{
- struct msm8916_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ return qnoc_probe(pdev, sizeof(msm8916_bus_clocks),
+ ARRAY_SIZE(msm8916_bus_clocks), msm8916_bus_clocks);
}
static const struct of_device_id msm8916_noc_of_match[] = {
@@ -542,7 +321,7 @@ MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
static struct platform_driver msm8916_noc_driver = {
.probe = msm8916_qnoc_probe,
- .remove = msm8916_qnoc_remove,
+ .remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8916",
.of_match_table = msm8916_noc_of_match,
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
new file mode 100644
index 000000000000..dfbec30ed149
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ * Author: Jun Nie <jun.nie@linaro.org>
+ * With reference of msm8916 interconnect driver of Georgi Djakov.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/interconnect/qcom,msm8939.h>
+
+#include "smd-rpm.h"
+#include "icc-rpm.h"
+
+enum {
+ MSM8939_BIMC_SNOC_MAS = 1,
+ MSM8939_BIMC_SNOC_SLV,
+ MSM8939_MASTER_AMPSS_M0,
+ MSM8939_MASTER_LPASS,
+ MSM8939_MASTER_BLSP_1,
+ MSM8939_MASTER_DEHR,
+ MSM8939_MASTER_GRAPHICS_3D,
+ MSM8939_MASTER_JPEG,
+ MSM8939_MASTER_MDP_PORT0,
+ MSM8939_MASTER_MDP_PORT1,
+ MSM8939_MASTER_CPP,
+ MSM8939_MASTER_CRYPTO_CORE0,
+ MSM8939_MASTER_SDCC_1,
+ MSM8939_MASTER_SDCC_2,
+ MSM8939_MASTER_QDSS_BAM,
+ MSM8939_MASTER_QDSS_ETR,
+ MSM8939_MASTER_SNOC_CFG,
+ MSM8939_MASTER_SPDM,
+ MSM8939_MASTER_TCU0,
+ MSM8939_MASTER_USB_HS1,
+ MSM8939_MASTER_USB_HS2,
+ MSM8939_MASTER_VFE,
+ MSM8939_MASTER_VIDEO_P0,
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_1,
+ MSM8939_SNOC_MM_INT_2,
+ MSM8939_PNOC_INT_0,
+ MSM8939_PNOC_INT_1,
+ MSM8939_PNOC_MAS_0,
+ MSM8939_PNOC_MAS_1,
+ MSM8939_PNOC_SLV_0,
+ MSM8939_PNOC_SLV_1,
+ MSM8939_PNOC_SLV_2,
+ MSM8939_PNOC_SLV_3,
+ MSM8939_PNOC_SLV_4,
+ MSM8939_PNOC_SLV_8,
+ MSM8939_PNOC_SLV_9,
+ MSM8939_PNOC_SNOC_MAS,
+ MSM8939_PNOC_SNOC_SLV,
+ MSM8939_SNOC_QDSS_INT,
+ MSM8939_SLAVE_AMPSS_L2,
+ MSM8939_SLAVE_APSS,
+ MSM8939_SLAVE_LPASS,
+ MSM8939_SLAVE_BIMC_CFG,
+ MSM8939_SLAVE_BLSP_1,
+ MSM8939_SLAVE_BOOT_ROM,
+ MSM8939_SLAVE_CAMERA_CFG,
+ MSM8939_SLAVE_CATS_128,
+ MSM8939_SLAVE_OCMEM_64,
+ MSM8939_SLAVE_CLK_CTL,
+ MSM8939_SLAVE_CRYPTO_0_CFG,
+ MSM8939_SLAVE_DEHR_CFG,
+ MSM8939_SLAVE_DISPLAY_CFG,
+ MSM8939_SLAVE_EBI_CH0,
+ MSM8939_SLAVE_GRAPHICS_3D_CFG,
+ MSM8939_SLAVE_IMEM_CFG,
+ MSM8939_SLAVE_IMEM,
+ MSM8939_SLAVE_MPM,
+ MSM8939_SLAVE_MSG_RAM,
+ MSM8939_SLAVE_MSS,
+ MSM8939_SLAVE_PDM,
+ MSM8939_SLAVE_PMIC_ARB,
+ MSM8939_SLAVE_PNOC_CFG,
+ MSM8939_SLAVE_PRNG,
+ MSM8939_SLAVE_QDSS_CFG,
+ MSM8939_SLAVE_QDSS_STM,
+ MSM8939_SLAVE_RBCPR_CFG,
+ MSM8939_SLAVE_SDCC_1,
+ MSM8939_SLAVE_SDCC_2,
+ MSM8939_SLAVE_SECURITY,
+ MSM8939_SLAVE_SNOC_CFG,
+ MSM8939_SLAVE_SPDM,
+ MSM8939_SLAVE_SRVC_SNOC,
+ MSM8939_SLAVE_TCSR,
+ MSM8939_SLAVE_TLMM,
+ MSM8939_SLAVE_USB_HS1,
+ MSM8939_SLAVE_USB_HS2,
+ MSM8939_SLAVE_VENUS_CFG,
+ MSM8939_SNOC_BIMC_0_MAS,
+ MSM8939_SNOC_BIMC_0_SLV,
+ MSM8939_SNOC_BIMC_1_MAS,
+ MSM8939_SNOC_BIMC_1_SLV,
+ MSM8939_SNOC_BIMC_2_MAS,
+ MSM8939_SNOC_BIMC_2_SLV,
+ MSM8939_SNOC_INT_0,
+ MSM8939_SNOC_INT_1,
+ MSM8939_SNOC_INT_BIMC,
+ MSM8939_SNOC_PNOC_MAS,
+ MSM8939_SNOC_PNOC_SLV,
+};
+
+static const struct clk_bulk_data msm8939_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+DEFINE_QNODE(bimc_snoc_mas, MSM8939_BIMC_SNOC_MAS, 8, -1, -1, MSM8939_BIMC_SNOC_SLV);
+DEFINE_QNODE(bimc_snoc_slv, MSM8939_BIMC_SNOC_SLV, 16, -1, 2, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_1);
+DEFINE_QNODE(mas_apss, MSM8939_MASTER_AMPSS_M0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_audio, MSM8939_MASTER_LPASS, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_blsp_1, MSM8939_MASTER_BLSP_1, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_dehr, MSM8939_MASTER_DEHR, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_gfx, MSM8939_MASTER_GRAPHICS_3D, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_jpeg, MSM8939_MASTER_JPEG, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp0, MSM8939_MASTER_MDP_PORT0, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp1, MSM8939_MASTER_MDP_PORT1, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_cpp, MSM8939_MASTER_CPP, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8939_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_usb_hs2, MSM8939_MASTER_USB_HS2, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_vfe, MSM8939_MASTER_VFE, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_video, MSM8939_MASTER_VIDEO_P0, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mm_int_0, MSM8939_SNOC_MM_INT_0, 16, -1, -1, MSM8939_SNOC_BIMC_2_MAS);
+DEFINE_QNODE(mm_int_1, MSM8939_SNOC_MM_INT_1, 16, -1, -1, MSM8939_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(mm_int_2, MSM8939_SNOC_MM_INT_2, 16, -1, -1, MSM8939_SNOC_INT_0);
+DEFINE_QNODE(pcnoc_int_0, MSM8939_PNOC_INT_0, 8, -1, -1, MSM8939_PNOC_SNOC_MAS, MSM8939_PNOC_SLV_0, MSM8939_PNOC_SLV_1, MSM8939_PNOC_SLV_2, MSM8939_PNOC_SLV_3, MSM8939_PNOC_SLV_4, MSM8939_PNOC_SLV_8, MSM8939_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_1, MSM8939_PNOC_INT_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_m_0, MSM8939_PNOC_MAS_0, 8, -1, -1, MSM8939_PNOC_INT_0);
+DEFINE_QNODE(pcnoc_m_1, MSM8939_PNOC_MAS_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_s_0, MSM8939_PNOC_SLV_0, 4, -1, -1, MSM8939_SLAVE_CLK_CTL, MSM8939_SLAVE_TLMM, MSM8939_SLAVE_TCSR, MSM8939_SLAVE_SECURITY, MSM8939_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8939_PNOC_SLV_1, 4, -1, -1, MSM8939_SLAVE_IMEM_CFG, MSM8939_SLAVE_CRYPTO_0_CFG, MSM8939_SLAVE_MSG_RAM, MSM8939_SLAVE_PDM, MSM8939_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8939_PNOC_SLV_2, 4, -1, -1, MSM8939_SLAVE_SPDM, MSM8939_SLAVE_BOOT_ROM, MSM8939_SLAVE_BIMC_CFG, MSM8939_SLAVE_PNOC_CFG, MSM8939_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8939_PNOC_SLV_3, 4, -1, -1, MSM8939_SLAVE_MPM, MSM8939_SLAVE_SNOC_CFG, MSM8939_SLAVE_RBCPR_CFG, MSM8939_SLAVE_QDSS_CFG, MSM8939_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8939_PNOC_SLV_4, 4, -1, -1, MSM8939_SLAVE_VENUS_CFG, MSM8939_SLAVE_CAMERA_CFG, MSM8939_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8939_PNOC_SLV_8, 4, -1, -1, MSM8939_SLAVE_USB_HS1, MSM8939_SLAVE_SDCC_1, MSM8939_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8939_PNOC_SLV_9, 4, -1, -1, MSM8939_SLAVE_SDCC_2, MSM8939_SLAVE_LPASS, MSM8939_SLAVE_USB_HS2);
+DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
+DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
+DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_display_cfg, MSM8939_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8939_SLAVE_EBI_CH0, 16, -1, 0, 0);
+DEFINE_QNODE(slv_gfx_cfg, MSM8939_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, MSM8939_SLAVE_IMEM_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem, MSM8939_SLAVE_IMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_mpm, MSM8939_SLAVE_MPM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_msg_ram, MSM8939_SLAVE_MSG_RAM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_mss, MSM8939_SLAVE_MSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, MSM8939_SLAVE_PDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, MSM8939_SLAVE_PMIC_ARB, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pcnoc_cfg, MSM8939_SLAVE_PNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_prng, MSM8939_SLAVE_PRNG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, MSM8939_SLAVE_QDSS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, MSM8939_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8939_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, MSM8939_SLAVE_SDCC_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
+DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
+DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
+DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
+DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
+DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
+
+static struct qcom_icc_node *msm8939_snoc_nodes[] = {
+ [BIMC_SNOC_SLV] = &bimc_snoc_slv,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
+ [SLAVE_APSS] = &slv_apss,
+ [SLAVE_CATS_128] = &slv_cats_0,
+ [SLAVE_OCMEM_64] = &slv_cats_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+ [SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
+ [SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
+ [SNOC_BIMC_2_MAS] = &snoc_bimc_2_mas,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
+ [SNOC_QDSS_INT] = &qdss_int,
+};
+
+static struct qcom_icc_desc msm8939_snoc = {
+ .nodes = msm8939_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
+};
+
+static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
+ [MASTER_VIDEO_P0] = &mas_video,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_MDP_PORT0] = &mas_mdp0,
+ [MASTER_MDP_PORT1] = &mas_mdp1,
+ [MASTER_CPP] = &mas_cpp,
+ [SNOC_MM_INT_0] = &mm_int_0,
+ [SNOC_MM_INT_1] = &mm_int_1,
+ [SNOC_MM_INT_2] = &mm_int_2,
+};
+
+static struct qcom_icc_desc msm8939_snoc_mm = {
+ .nodes = msm8939_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
+};
+
+static struct qcom_icc_node *msm8939_bimc_nodes[] = {
+ [BIMC_SNOC_MAS] = &bimc_snoc_mas,
+ [MASTER_AMPSS_M0] = &mas_apss,
+ [MASTER_GRAPHICS_3D] = &mas_gfx,
+ [MASTER_TCU0] = &mas_tcu0,
+ [SLAVE_AMPSS_L2] = &slv_apps_l2,
+ [SLAVE_EBI_CH0] = &slv_ebi_ch0,
+ [SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
+ [SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
+ [SNOC_BIMC_2_SLV] = &snoc_bimc_2_slv,
+};
+
+static struct qcom_icc_desc msm8939_bimc = {
+ .nodes = msm8939_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
+};
+
+static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_DEHR] = &mas_dehr,
+ [MASTER_LPASS] = &mas_audio,
+ [MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
+ [MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
+ [MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_USB_HS1] = &mas_usb_hs1,
+ [MASTER_USB_HS2] = &mas_usb_hs2,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_MAS_0] = &pcnoc_m_0,
+ [PCNOC_MAS_1] = &pcnoc_m_1,
+ [PCNOC_SLV_0] = &pcnoc_s_0,
+ [PCNOC_SLV_1] = &pcnoc_s_1,
+ [PCNOC_SLV_2] = &pcnoc_s_2,
+ [PCNOC_SLV_3] = &pcnoc_s_3,
+ [PCNOC_SLV_4] = &pcnoc_s_4,
+ [PCNOC_SLV_8] = &pcnoc_s_8,
+ [PCNOC_SLV_9] = &pcnoc_s_9,
+ [PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BOOT_ROM] = &slv_boot_rom,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DEHR_CFG] = &slv_dehr_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_LPASS] = &slv_audio,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_MSG_RAM] = &slv_msg_ram,
+ [SLAVE_MSS] = &slv_mss,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SECURITY] = &slv_security,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_USB_HS1] = &slv_usb_hs1,
+ [SLAVE_USB_HS2] = &slv_usb_hs2,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
+};
+
+static struct qcom_icc_desc msm8939_pcnoc = {
+ .nodes = msm8939_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
+};
+
+static int msm8939_qnoc_probe(struct platform_device *pdev)
+{
+ return qnoc_probe(pdev, sizeof(msm8939_bus_clocks),
+ ARRAY_SIZE(msm8939_bus_clocks), msm8939_bus_clocks);
+}
+
+static const struct of_device_id msm8939_noc_of_match[] = {
+ { .compatible = "qcom,msm8939-bimc", .data = &msm8939_bimc },
+ { .compatible = "qcom,msm8939-pcnoc", .data = &msm8939_pcnoc },
+ { .compatible = "qcom,msm8939-snoc", .data = &msm8939_snoc },
+ { .compatible = "qcom,msm8939-snoc-mm", .data = &msm8939_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
+
+static struct platform_driver msm8939_noc_driver = {
+ .probe = msm8939_qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8939",
+ .of_match_table = msm8939_noc_of_match,
+ },
+};
+module_platform_driver(msm8939_noc_driver);
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm MSM8939 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 9820709b43db..36a7e30a00be 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -9,15 +9,12 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/of_device.h>
-#include "smd-rpm.h"
-#define RPM_BUS_MASTER_REQ 0x73616d62
-#define RPM_BUS_SLAVE_REQ 0x766c7362
+#include "smd-rpm.h"
+#include "icc-rpm.h"
enum {
QCS404_MASTER_AMPSS_M0 = 1,
@@ -95,67 +92,11 @@ enum {
QCS404_SLAVE_LPASS,
};
-#define to_qcom_provider(_provider) \
- container_of(_provider, struct qcom_icc_provider, provider)
-
-static const struct clk_bulk_data bus_clocks[] = {
+static const struct clk_bulk_data qcs404_bus_clocks[] = {
{ .id = "bus" },
{ .id = "bus_a" },
};
-/**
- * struct qcom_icc_provider - Qualcomm specific interconnect provider
- * @provider: generic interconnect provider
- * @bus_clks: the clk_bulk_data table of bus clocks
- * @num_clks: the total number of clk_bulk_data entries
- */
-struct qcom_icc_provider {
- struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
- int num_clks;
-};
-
-#define QCS404_MAX_LINKS 12
-
-/**
- * struct qcom_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @id: a unique node identifier
- * @links: an array of nodes where we can go next while traversing
- * @num_links: the total number of @links
- * @buswidth: width of the interconnect between a node and the bus (bytes)
- * @mas_rpm_id: RPM id for devices that are bus masters
- * @slv_rpm_id: RPM id for devices that are bus slaves
- * @rate: current bus clock rate in Hz
- */
-struct qcom_icc_node {
- unsigned char *name;
- u16 id;
- u16 links[QCS404_MAX_LINKS];
- u16 num_links;
- u16 buswidth;
- int mas_rpm_id;
- int slv_rpm_id;
- u64 rate;
-};
-
-struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- ...) \
- static struct qcom_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
@@ -327,178 +268,11 @@ static struct qcom_icc_desc qcs404_snoc = {
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
- struct icc_provider *provider;
- struct icc_node *n;
- u64 sum_bw;
- u64 max_peak_bw;
- u64 rate;
- u32 agg_avg = 0;
- u32 agg_peak = 0;
- int ret, i;
-
- qn = src->data;
- provider = src->provider;
- qp = to_qcom_provider(provider);
-
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
-
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
-
- /* send bandwidth request message to the RPM processor */
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
- }
- }
-
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv error %d\n",
- ret);
- return ret;
- }
- }
-
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
-
- if (qn->rate == rate)
- return 0;
-
- for (i = 0; i < qp->num_clks; i++) {
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
- return ret;
- }
- }
-
- qn->rate = rate;
- return 0;
-}
-
-static int qnoc_probe(struct platform_device *pdev)
+static int qcs404_qnoc_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- /* wait for the RPM proxy */
- if (!qcom_icc_rpm_smd_available())
- return -EPROBE_DEFER;
-
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
- GFP_KERNEL);
- if (!qp->bus_clks)
- return -ENOMEM;
-
- qp->num_clks = ARRAY_SIZE(bus_clocks);
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
- provider->dev = dev;
- provider->set = qcom_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- provider->data = data;
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- dev_dbg(dev, "registered node %s\n", node->name);
-
- /* populate links */
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(provider);
-
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ return qnoc_probe(pdev, sizeof(qcs404_bus_clocks),
+ ARRAY_SIZE(qcs404_bus_clocks), qcs404_bus_clocks);
}
static const struct of_device_id qcs404_noc_of_match[] = {
@@ -510,7 +284,7 @@ static const struct of_device_id qcs404_noc_of_match[] = {
MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
static struct platform_driver qcs404_noc_driver = {
- .probe = qnoc_probe,
+ .probe = qcs404_qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-qcs404",
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
new file mode 100644
index 000000000000..a5a122ee3d21
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDX55 interconnect driver
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx55.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdx55.h"
+
+DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
+DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
+DEFINE_QNODE(xm_apps_rdwr, SDX55_MASTER_AMPSS_M0, 1, 16, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_audio, SDX55_MASTER_AUDIO, 1, 4, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_blsp1, SDX55_MASTER_BLSP_1, 1, 4, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SDX55_MASTER_QDSS_BAM, 1, 4, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qhm_qpic, SDX55_MASTER_QPIC, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
+DEFINE_QNODE(qhm_snoc_cfg, SDX55_MASTER_SNOC_CFG, 1, 4, SDX55_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qhm_spmi_fetcher1, SDX55_MASTER_SPMI_FETCHER, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
+DEFINE_QNODE(qnm_aggre_noc, SDX55_MASTER_ANOC_SNOC, 1, 8, SDX55_SLAVE_PCIE_0, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_USB3, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_ipa, SDX55_MASTER_IPA, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_TLMM, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_memnoc, SDX55_MASTER_MEM_NOC_SNOC, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_memnoc_pcie, SDX55_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_SLAVE_PCIE_0);
+DEFINE_QNODE(qxm_crypto, SDX55_MASTER_CRYPTO_CORE_0, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
+DEFINE_QNODE(xm_emac, SDX55_MASTER_EMAC, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_ipa2pcie_slv, SDX55_MASTER_IPA_PCIE, 1, 8, SDX55_SLAVE_PCIE_0);
+DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
+DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
+DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
+DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(qns_sys_pcie, SDX55_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhs_aop, SDX55_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SDX55_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SDX55_SLAVE_APPSS, 1, 4);
+DEFINE_QNODE(qhs_audio, SDX55_SLAVE_AUDIO, 1, 4);
+DEFINE_QNODE(qhs_blsp1, SDX55_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDX55_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDX55_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SDX55_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_ecc_cfg, SDX55_SLAVE_ECC_CFG, 1, 4);
+DEFINE_QNODE(qhs_emac_cfg, SDX55_SLAVE_EMAC_CFG, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SDX55_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDX55_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mss_cfg, SDX55_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_pcie_parf, SDX55_SLAVE_PCIE_PARF, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDX55_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_prng, SDX55_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDX55_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qpic, SDX55_SLAVE_QPIC, 1, 4);
+DEFINE_QNODE(qhs_sdc1, SDX55_SLAVE_SDCC_1, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDX55_SLAVE_SNOC_CFG, 1, 4, SDX55_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spmi_fetcher, SDX55_SLAVE_SPMI_FETCHER, 1, 4);
+DEFINE_QNODE(qhs_spmi_vgi_coex, SDX55_SLAVE_SPMI_VGI_COEX, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDX55_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SDX55_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_usb3, SDX55_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_phy, SDX55_SLAVE_USB3_PHY_CFG, 1, 4);
+DEFINE_QNODE(qns_aggre_noc, SDX55_SLAVE_ANOC_SNOC, 1, 8, SDX55_MASTER_ANOC_SNOC);
+DEFINE_QNODE(qns_snoc_memnoc, SDX55_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDX55_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDX55_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDX55_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie, SDX55_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SDX55_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
+DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_pn5, "PN5", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &xs_pcie);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_emac, &xm_emac, &xm_usb3,
+ &qns_aggre_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qhm_qdss_bam, &xm_qdss_etr);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx55_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_AMPSS_M0] = &xm_apps_rdwr,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+};
+
+static const struct qcom_icc_desc sdx55_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pn0,
+ &bcm_pn1,
+ &bcm_pn2,
+ &bcm_pn3,
+ &bcm_pn5,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn10,
+ &bcm_sn11,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_AUDIO] = &qhm_audio,
+ [MASTER_BLSP_1] = &qhm_blsp1,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_IPA] = &qnm_ipa,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_EMAC] = &xm_emac,
+ [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
+ [MASTER_PCIE] = &xm_pcie,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_USB3] = &xm_usb3,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_AUDIO] = &qhs_audio,
+ [SLAVE_BLSP_1] = &qhs_blsp1,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_ECC_CFG] = &qhs_ecc_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_PARF] = &qhs_pcie_parf,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
+ [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+ [SLAVE_ANOC_SNOC] = &qns_aggre_noc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx55_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static const struct qcom_icc_desc sdx55_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdx55-mc-virt",
+ .data = &sdx55_mc_virt},
+ { .compatible = "qcom,sdx55-mem-noc",
+ .data = &sdx55_mem_noc},
+ { .compatible = "qcom,sdx55-system-noc",
+ .data = &sdx55_system_noc},
+ { .compatible = "qcom,sdx55-ipa-virt",
+ .data = &sdx55_ipa_virt},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdx55",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDX55 NoC driver");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdx55.h b/drivers/interconnect/qcom/sdx55.h
new file mode 100644
index 000000000000..deff8afe0631
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx55.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX55_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX55_H
+
+#define SDX55_MASTER_IPA_CORE 0
+#define SDX55_MASTER_LLCC 1
+#define SDX55_MASTER_TCU_0 2
+#define SDX55_MASTER_SNOC_GC_MEM_NOC 3
+#define SDX55_MASTER_AMPSS_M0 4
+#define SDX55_MASTER_AUDIO 5
+#define SDX55_MASTER_BLSP_1 6
+#define SDX55_MASTER_QDSS_BAM 7
+#define SDX55_MASTER_QPIC 8
+#define SDX55_MASTER_SNOC_CFG 9
+#define SDX55_MASTER_SPMI_FETCHER 10
+#define SDX55_MASTER_ANOC_SNOC 11
+#define SDX55_MASTER_IPA 12
+#define SDX55_MASTER_MEM_NOC_SNOC 13
+#define SDX55_MASTER_MEM_NOC_PCIE_SNOC 14
+#define SDX55_MASTER_CRYPTO_CORE_0 15
+#define SDX55_MASTER_EMAC 16
+#define SDX55_MASTER_IPA_PCIE 17
+#define SDX55_MASTER_PCIE 18
+#define SDX55_MASTER_QDSS_ETR 19
+#define SDX55_MASTER_SDCC_1 20
+#define SDX55_MASTER_USB3 21
+#define SDX55_SLAVE_IPA_CORE 22
+#define SDX55_SLAVE_EBI_CH0 23
+#define SDX55_SLAVE_LLCC 24
+#define SDX55_SLAVE_MEM_NOC_SNOC 25
+#define SDX55_SLAVE_MEM_NOC_PCIE_SNOC 26
+#define SDX55_SLAVE_ANOC_SNOC 27
+#define SDX55_SLAVE_SNOC_CFG 28
+#define SDX55_SLAVE_EMAC_CFG 29
+#define SDX55_SLAVE_USB3 30
+#define SDX55_SLAVE_TLMM 31
+#define SDX55_SLAVE_SPMI_FETCHER 32
+#define SDX55_SLAVE_QDSS_CFG 33
+#define SDX55_SLAVE_PDM 34
+#define SDX55_SLAVE_SNOC_MEM_NOC_GC 35
+#define SDX55_SLAVE_TCSR 36
+#define SDX55_SLAVE_CNOC_DDRSS 37
+#define SDX55_SLAVE_SPMI_VGI_COEX 38
+#define SDX55_SLAVE_QPIC 39
+#define SDX55_SLAVE_OCIMEM 40
+#define SDX55_SLAVE_IPA_CFG 41
+#define SDX55_SLAVE_USB3_PHY_CFG 42
+#define SDX55_SLAVE_AOP 43
+#define SDX55_SLAVE_BLSP_1 44
+#define SDX55_SLAVE_SDCC_1 45
+#define SDX55_SLAVE_CNOC_MSS 46
+#define SDX55_SLAVE_PCIE_PARF 47
+#define SDX55_SLAVE_ECC_CFG 48
+#define SDX55_SLAVE_AUDIO 49
+#define SDX55_SLAVE_AOSS 51
+#define SDX55_SLAVE_PRNG 52
+#define SDX55_SLAVE_CRYPTO_0_CFG 53
+#define SDX55_SLAVE_TCU 54
+#define SDX55_SLAVE_CLK_CTL 55
+#define SDX55_SLAVE_IMEM_CFG 56
+#define SDX55_SLAVE_SERVICE_SNOC 57
+#define SDX55_SLAVE_PCIE_0 58
+#define SDX55_SLAVE_QDSS_STM 59
+#define SDX55_SLAVE_APPSS 60
+
+#endif
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 626b97d0dd21..a3cbafb603f5 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,6 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
+ select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index dc5a2fa4fd37..a935f8f4b974 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 6b8cbdf71714..026ce7f8d993 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -36,6 +36,7 @@ extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;
+extern enum io_pgtable_fmt amd_iommu_pgtable;
/* IOMMUv2 specific functions */
struct iommu_domain;
@@ -56,6 +57,10 @@ extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
+extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+extern void amd_iommu_domain_update(struct protection_domain *domain);
+extern void amd_iommu_domain_flush_complete(struct protection_domain *domain);
+extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
@@ -84,12 +89,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
+ return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
@@ -102,6 +104,21 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
return phys_to_virt(__sme_clr(paddr));
}
+static inline
+void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+ atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static inline
+void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+ amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev);
@@ -114,4 +131,6 @@ void amd_iommu_apply_ivrs_quirks(void);
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
+extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode);
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 553587827771..6937e3674a16 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
+#include <linux/io-pgtable.h>
/*
* Maximum number of IOMMUs supported
@@ -252,6 +253,19 @@
#define GA_GUEST_NR 0x1
+#define IOMMU_IN_ADDR_BIT_SIZE 52
+#define IOMMU_OUT_ADDR_BIT_SIZE 52
+
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * 512GB Pages are not supported due to a hardware bug
+ */
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
@@ -387,6 +401,10 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
@@ -466,6 +484,27 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define io_pgtable_to_data(x) \
+ container_of((x), struct amd_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define io_pgtable_ops_to_domain(x) \
+ container_of(io_pgtable_ops_to_data(x), \
+ struct protection_domain, iop)
+
+#define io_pgtable_cfg_to_data(x) \
+ container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+
+struct amd_io_pgtable {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable iop;
+ int mode;
+ u64 *root;
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -474,9 +513,9 @@ struct protection_domain {
struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
+ struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- atomic64_t pt_root; /* pgtable root and pgtable mode */
int glx; /* Number of levels for GCR3 table */
u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
@@ -484,12 +523,6 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
-/* For decocded pt_root */
-struct domain_pgtable {
- int mode;
- u64 *root;
-};
-
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index f54cd79b43e4..9126efcbaf2c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -147,6 +148,8 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
+enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
@@ -254,9 +257,13 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
+static u32 amd_iommu_ivinfo __initdata;
+
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
@@ -296,6 +303,18 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ iommu->features = h->efr_reg;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -1577,6 +1596,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
break;
default:
return -EINVAL;
@@ -1695,13 +1717,11 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value, bool is_write);
-
-static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
{
+ int retry;
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0, save_reg = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
@@ -1709,17 +1729,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* save the value to restore, if writable */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
goto pc_false;
- /* Check if the performance counters can be written to */
- if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
- (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2))
+ /*
+ * Disable power gating by programing the performance counter
+ * source to 20 (i.e. counts the reads and writes from/to IOMMU
+ * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
+ * which never get incremented during this init phase.
+ * (Note: The event is also deprecated.)
+ */
+ val = 20;
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
goto pc_false;
+ /* Check if the performance counters can be written to */
+ val = 0xabcd;
+ for (retry = 5; retry; retry--) {
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
+ val2)
+ break;
+
+ /* Wait about 20 msec for power gating to disable and retry. */
+ msleep(20);
+ }
+
/* restore */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
+ goto pc_false;
+
+ if (val != val2)
goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
@@ -1770,6 +1812,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features)
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ features, iommu->features);
+}
+
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
@@ -1789,8 +1860,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
- /* read extended feature bits */
- iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@@ -1883,7 +1953,7 @@ static void print_iommu_info(void)
struct pci_dev *pdev = iommu->dev;
int i;
- pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
+ pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pci_info(pdev, "Extended features (%#llx):",
@@ -1911,7 +1981,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
- int ret = 0;
+ int ret;
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
@@ -1973,8 +2043,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2237,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
@@ -2608,6 +2677,11 @@ static void __init free_dma_resources(void)
free_unity_maps();
}
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
@@ -2638,8 +2712,8 @@ static void __init free_dma_resources(void)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
+ int i, remap_cache_sz, ret;
acpi_status status;
- int i, remap_cache_sz, ret = 0;
u32 pci_id;
if (!amd_iommu_detected)
@@ -2662,6 +2736,8 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ ivinfo_init(ivrs_base);
+
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
@@ -2781,7 +2857,6 @@ static int __init early_amd_iommu_init(void)
out:
/* Don't leak any ACPI memory */
acpi_put_table(ivrs_base);
- ivrs_base = NULL;
return ret;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
new file mode 100644
index 000000000000..1c4961e05c12
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table allocator.
+ *
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+static void v1_tlb_flush_all(void *cookie)
+{
+}
+
+static void v1_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v1_flush_ops = {
+ .tlb_flush_all = v1_tlb_flush_all,
+ .tlb_flush_walk = v1_tlb_flush_walk,
+ .tlb_add_page = v1_tlb_add_page,
+};
+
+/*
+ * Helper function to get the first pte of a large mapping
+ */
+static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
+ unsigned long *count)
+{
+ unsigned long pte_mask, pg_size, cnt;
+ u64 *fpte;
+
+ pg_size = PTE_PAGE_SIZE(*pte);
+ cnt = PAGE_SIZE_PTE_COUNT(pg_size);
+ pte_mask = ~((cnt << 3) - 1);
+ fpte = (u64 *)(((unsigned long)pte) & pte_mask);
+
+ if (page_size)
+ *page_size = pg_size;
+
+ if (count)
+ *count = cnt;
+
+ return fpte;
+}
+
+/****************************************************************************
+ *
+ * The functions below are used the create the page table mappings for
+ * unity mapped regions.
+ *
+ ****************************************************************************/
+
+static void free_page_list(struct page *freelist)
+{
+ while (freelist != NULL) {
+ unsigned long p = (unsigned long)page_address(freelist);
+
+ freelist = freelist->freelist;
+ free_page(p);
+ }
+}
+
+static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+{
+ struct page *p = virt_to_page((void *)pt);
+
+ p->freelist = freelist;
+
+ return p;
+}
+
+#define DEFINE_FREE_PT_FN(LVL, FN) \
+static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
+{ \
+ unsigned long p; \
+ u64 *pt; \
+ int i; \
+ \
+ pt = (u64 *)__pt; \
+ \
+ for (i = 0; i < 512; ++i) { \
+ /* PTE present? */ \
+ if (!IOMMU_PTE_PRESENT(pt[i])) \
+ continue; \
+ \
+ /* Large PTE? */ \
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
+ PM_PTE_LEVEL(pt[i]) == 7) \
+ continue; \
+ \
+ p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
+ freelist = FN(p, freelist); \
+ } \
+ \
+ return free_pt_page((unsigned long)pt, freelist); \
+}
+
+DEFINE_FREE_PT_FN(l2, free_pt_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
+static struct page *free_sub_pt(unsigned long root, int mode,
+ struct page *freelist)
+{
+ switch (mode) {
+ case PAGE_MODE_NONE:
+ case PAGE_MODE_7_LEVEL:
+ break;
+ case PAGE_MODE_1_LEVEL:
+ freelist = free_pt_page(root, freelist);
+ break;
+ case PAGE_MODE_2_LEVEL:
+ freelist = free_pt_l2(root, freelist);
+ break;
+ case PAGE_MODE_3_LEVEL:
+ freelist = free_pt_l3(root, freelist);
+ break;
+ case PAGE_MODE_4_LEVEL:
+ freelist = free_pt_l4(root, freelist);
+ break;
+ case PAGE_MODE_5_LEVEL:
+ freelist = free_pt_l5(root, freelist);
+ break;
+ case PAGE_MODE_6_LEVEL:
+ freelist = free_pt_l6(root, freelist);
+ break;
+ default:
+ BUG();
+ }
+
+ return freelist;
+}
+
+void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode)
+{
+ u64 pt_root;
+
+ /* lowest 3 bits encode pgtable mode */
+ pt_root = mode & 7;
+ pt_root |= (u64)root;
+
+ amd_iommu_domain_set_pt_root(domain, pt_root);
+}
+
+/*
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
+ gfp_t gfp)
+{
+ unsigned long flags;
+ bool ret = true;
+ u64 *pte;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
+ goto out;
+
+ ret = false;
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
+ goto out;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ goto out;
+
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
+ amd_iommu_update_and_flush_device_table(domain);
+ amd_iommu_domain_flush_complete(domain);
+
+ /*
+ * Device Table needs to be updated and flushed before the new root can
+ * be published.
+ */
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
+
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long page_size,
+ u64 **pte_page,
+ gfp_t gfp,
+ bool *updated)
+{
+ int level, end_lvl;
+ u64 *pte, *page;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
+ /*
+ * Return an error if there is no memory to update the
+ * page-table.
+ */
+ if (!increase_address_space(domain, address, gfp))
+ return NULL;
+ }
+
+
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+ while (level > end_lvl) {
+ u64 __pte, __npte;
+ int pte_level;
+
+ __pte = *pte;
+ pte_level = PM_PTE_LEVEL(__pte);
+
+ /*
+ * If we replace a series of large PTEs, we need
+ * to tear down all of them.
+ */
+ if (IOMMU_PTE_PRESENT(__pte) &&
+ pte_level == PAGE_MODE_7_LEVEL) {
+ unsigned long count, i;
+ u64 *lpte;
+
+ lpte = first_pte_l7(pte, NULL, &count);
+
+ /*
+ * Unmap the replicated PTEs that still match the
+ * original large mapping
+ */
+ for (i = 0; i < count; ++i)
+ cmpxchg64(&lpte[i], __pte, 0ULL);
+
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte) ||
+ pte_level == PAGE_MODE_NONE) {
+ page = (u64 *)get_zeroed_page(gfp);
+
+ if (!page)
+ return NULL;
+
+ __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
+
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_page((unsigned long)page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ /* No level skipping support yet */
+ if (pte_level != level)
+ return NULL;
+
+ level -= 1;
+
+ pte = IOMMU_PTE_PAGE(__pte);
+
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
+
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address. If
+ * there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long address,
+ unsigned long *page_size)
+{
+ int level;
+ u64 *pte;
+
+ *page_size = 0;
+
+ if (address > PM_LEVEL_SIZE(pgtable->mode))
+ return NULL;
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+
+ while (level > 0) {
+
+ /* Not Present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Large PTE */
+ if (PM_PTE_LEVEL(*pte) == 7 ||
+ PM_PTE_LEVEL(*pte) == 0)
+ break;
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
+ level -= 1;
+
+ /* Walk to the next level */
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
+ pte = first_pte_l7(pte, page_size, NULL);
+
+ return pte;
+}
+
+static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+{
+ unsigned long pt;
+ int mode;
+
+ while (cmpxchg64(pte, pteval, 0) != pteval) {
+ pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
+ pteval = *pte;
+ }
+
+ if (!IOMMU_PTE_PRESENT(pteval))
+ return freelist;
+
+ pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
+ mode = IOMMU_PTE_MODE(pteval);
+
+ return free_sub_pt(pt, mode, freelist);
+}
+
+/*
+ * Generic mapping functions. It maps a physical address into a DMA
+ * address space. It allocates the page table pages if necessary.
+ * In the future it can be extended to a generic mapping function
+ * supporting all features of AMD IOMMU page tables like level skipping
+ * and full 64 bit address spaces.
+ */
+static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
+ struct page *freelist = NULL;
+ bool updated = false;
+ u64 __pte, *pte;
+ int ret, i, count;
+
+ BUG_ON(!IS_ALIGNED(iova, size));
+ BUG_ON(!IS_ALIGNED(paddr, size));
+
+ ret = -EINVAL;
+ if (!(prot & IOMMU_PROT_MASK))
+ goto out;
+
+ count = PAGE_SIZE_PTE_COUNT(size);
+ pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
+
+ for (i = 0; i < count; ++i)
+ freelist = free_clear_pte(&pte[i], pte[i], freelist);
+
+ if (freelist != NULL)
+ updated = true;
+
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
+
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ ret = 0;
+
+out:
+ if (updated) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ /*
+ * Flush domain TLB(s) and wait for completion. Any Device-Table
+ * Updates and flushing already happened in
+ * increase_address_space().
+ */
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
+ }
+
+ /* Everything flushed out, free pages now */
+ free_page_list(freelist);
+
+ return ret;
+}
+
+static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t size,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long long unmapped;
+ unsigned long unmap_size;
+ u64 *pte;
+
+ BUG_ON(!is_power_of_2(size));
+
+ unmapped = 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (pte) {
+ int i, count;
+
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+ }
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return iova;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v1_free_pgtable(struct io_pgtable *iop)
+{
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+ struct protection_domain *dom;
+ struct page *freelist = NULL;
+ unsigned long root;
+
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return;
+
+ dom = container_of(pgtable, struct protection_domain, iop);
+
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+ pgtable->mode > PAGE_MODE_6_LEVEL);
+
+ root = (unsigned long)pgtable->root;
+ freelist = free_sub_pt(root, pgtable->mode, freelist);
+
+ free_page_list(freelist);
+}
+
+static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v1_flush_ops;
+
+ pgtable->iop.ops.map = iommu_v1_map_page;
+ pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
+
+ return &pgtable->iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
+ .alloc = v1_alloc_pgtable,
+ .free = v1_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7e2c445a1fae..a69a8b573e40 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/iova.h>
+#include <linux/io-pgtable.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -57,16 +58,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
-/*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * 512GB Pages are not supported due to a hardware bug
- */
-#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
-
#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
static DEFINE_SPINLOCK(pd_bitmap_lock);
@@ -96,10 +87,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
-static void update_domain(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void update_and_flush_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable);
/****************************************************************************
*
@@ -151,37 +139,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
-{
- u64 pt_root = atomic64_read(&domain->pt_root);
-
- pgtable->root = (u64 *)(pt_root & PAGE_MASK);
- pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
-static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
-{
- atomic64_set(&domain->pt_root, root);
-}
-
-static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
-{
- amd_iommu_domain_set_pt_root(domain, 0);
-}
-
-static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
- u64 *root, int mode)
-{
- u64 pt_root;
-
- /* lowest 3 bits encode pgtable mode */
- pt_root = mode & 7;
- pt_root |= (u64)root;
-
- amd_iommu_domain_set_pt_root(domain, pt_root);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -437,29 +394,6 @@ static void amd_iommu_uninit_device(struct device *dev)
*/
}
-/*
- * Helper function to get the first pte of a large mapping
- */
-static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
- unsigned long *count)
-{
- unsigned long pte_mask, pg_size, cnt;
- u64 *fpte;
-
- pg_size = PTE_PAGE_SIZE(*pte);
- cnt = PAGE_SIZE_PTE_COUNT(pg_size);
- pte_mask = ~((cnt << 3) - 1);
- fpte = (u64 *)(((unsigned long)pte) & pte_mask);
-
- if (page_size)
- *page_size = pg_size;
-
- if (count)
- *count = cnt;
-
- return fpte;
-}
-
/****************************************************************************
*
* Interrupt handling functions
@@ -1335,12 +1269,12 @@ static void domain_flush_pages(struct protection_domain *domain,
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-static void domain_flush_tlb_pde(struct protection_domain *domain)
+void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
-static void domain_flush_complete(struct protection_domain *domain)
+void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1365,7 +1299,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
domain_flush_pages(domain, iova, size);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@@ -1384,443 +1318,6 @@ static void domain_flush_devices(struct protection_domain *domain)
/****************************************************************************
*
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
-static void free_page_list(struct page *freelist)
-{
- while (freelist != NULL) {
- unsigned long p = (unsigned long)page_address(freelist);
- freelist = freelist->freelist;
- free_page(p);
- }
-}
-
-static struct page *free_pt_page(unsigned long pt, struct page *freelist)
-{
- struct page *p = virt_to_page((void *)pt);
-
- p->freelist = freelist;
-
- return p;
-}
-
-#define DEFINE_FREE_PT_FN(LVL, FN) \
-static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
-{ \
- unsigned long p; \
- u64 *pt; \
- int i; \
- \
- pt = (u64 *)__pt; \
- \
- for (i = 0; i < 512; ++i) { \
- /* PTE present? */ \
- if (!IOMMU_PTE_PRESENT(pt[i])) \
- continue; \
- \
- /* Large PTE? */ \
- if (PM_PTE_LEVEL(pt[i]) == 0 || \
- PM_PTE_LEVEL(pt[i]) == 7) \
- continue; \
- \
- p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
- freelist = FN(p, freelist); \
- } \
- \
- return free_pt_page((unsigned long)pt, freelist); \
-}
-
-DEFINE_FREE_PT_FN(l2, free_pt_page)
-DEFINE_FREE_PT_FN(l3, free_pt_l2)
-DEFINE_FREE_PT_FN(l4, free_pt_l3)
-DEFINE_FREE_PT_FN(l5, free_pt_l4)
-DEFINE_FREE_PT_FN(l6, free_pt_l5)
-
-static struct page *free_sub_pt(unsigned long root, int mode,
- struct page *freelist)
-{
- switch (mode) {
- case PAGE_MODE_NONE:
- case PAGE_MODE_7_LEVEL:
- break;
- case PAGE_MODE_1_LEVEL:
- freelist = free_pt_page(root, freelist);
- break;
- case PAGE_MODE_2_LEVEL:
- freelist = free_pt_l2(root, freelist);
- break;
- case PAGE_MODE_3_LEVEL:
- freelist = free_pt_l3(root, freelist);
- break;
- case PAGE_MODE_4_LEVEL:
- freelist = free_pt_l4(root, freelist);
- break;
- case PAGE_MODE_5_LEVEL:
- freelist = free_pt_l5(root, freelist);
- break;
- case PAGE_MODE_6_LEVEL:
- freelist = free_pt_l6(root, freelist);
- break;
- default:
- BUG();
- }
-
- return freelist;
-}
-
-static void free_pagetable(struct domain_pgtable *pgtable)
-{
- struct page *freelist = NULL;
- unsigned long root;
-
- if (pgtable->mode == PAGE_MODE_NONE)
- return;
-
- BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
- pgtable->mode > PAGE_MODE_6_LEVEL);
-
- root = (unsigned long)pgtable->root;
- freelist = free_sub_pt(root, pgtable->mode, freelist);
-
- free_page_list(freelist);
-}
-
-/*
- * This function is used to add another level to an IO page table. Adding
- * another level increases the size of the address space by 9 bits to a size up
- * to 64 bits.
- */
-static bool increase_address_space(struct protection_domain *domain,
- unsigned long address,
- gfp_t gfp)
-{
- struct domain_pgtable pgtable;
- unsigned long flags;
- bool ret = true;
- u64 *pte;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address <= PM_LEVEL_SIZE(pgtable.mode))
- goto out;
-
- ret = false;
- if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
- goto out;
-
- pte = (void *)get_zeroed_page(gfp);
- if (!pte)
- goto out;
-
- *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
-
- pgtable.root = pte;
- pgtable.mode += 1;
- update_and_flush_device_table(domain, &pgtable);
- domain_flush_complete(domain);
-
- /*
- * Device Table needs to be updated and flushed before the new root can
- * be published.
- */
- amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
-
- ret = true;
-
-out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *alloc_pte(struct protection_domain *domain,
- unsigned long address,
- unsigned long page_size,
- u64 **pte_page,
- gfp_t gfp,
- bool *updated)
-{
- struct domain_pgtable pgtable;
- int level, end_lvl;
- u64 *pte, *page;
-
- BUG_ON(!is_power_of_2(page_size));
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- while (address > PM_LEVEL_SIZE(pgtable.mode)) {
- /*
- * Return an error if there is no memory to update the
- * page-table.
- */
- if (!increase_address_space(domain, address, gfp))
- return NULL;
-
- /* Read new values to check if update was successful */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- }
-
-
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
- address = PAGE_SIZE_ALIGN(address, page_size);
- end_lvl = PAGE_SIZE_LEVEL(page_size);
-
- while (level > end_lvl) {
- u64 __pte, __npte;
- int pte_level;
-
- __pte = *pte;
- pte_level = PM_PTE_LEVEL(__pte);
-
- /*
- * If we replace a series of large PTEs, we need
- * to tear down all of them.
- */
- if (IOMMU_PTE_PRESENT(__pte) &&
- pte_level == PAGE_MODE_7_LEVEL) {
- unsigned long count, i;
- u64 *lpte;
-
- lpte = first_pte_l7(pte, NULL, &count);
-
- /*
- * Unmap the replicated PTEs that still match the
- * original large mapping
- */
- for (i = 0; i < count; ++i)
- cmpxchg64(&lpte[i], __pte, 0ULL);
-
- *updated = true;
- continue;
- }
-
- if (!IOMMU_PTE_PRESENT(__pte) ||
- pte_level == PAGE_MODE_NONE) {
- page = (u64 *)get_zeroed_page(gfp);
-
- if (!page)
- return NULL;
-
- __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
-
- /* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
- free_page((unsigned long)page);
- else if (IOMMU_PTE_PRESENT(__pte))
- *updated = true;
-
- continue;
- }
-
- /* No level skipping support yet */
- if (pte_level != level)
- return NULL;
-
- level -= 1;
-
- pte = IOMMU_PTE_PAGE(__pte);
-
- if (pte_page && level == end_lvl)
- *pte_page = pte;
-
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- }
-
- return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address. If
- * there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct protection_domain *domain,
- unsigned long address,
- unsigned long *page_size)
-{
- struct domain_pgtable pgtable;
- int level;
- u64 *pte;
-
- *page_size = 0;
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address > PM_LEVEL_SIZE(pgtable.mode))
- return NULL;
-
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
-
- while (level > 0) {
-
- /* Not Present */
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
-
- /* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 7 ||
- PM_PTE_LEVEL(*pte) == 0)
- break;
-
- /* No level skipping support yet */
- if (PM_PTE_LEVEL(*pte) != level)
- return NULL;
-
- level -= 1;
-
- /* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
- }
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
- pte = first_pte_l7(pte, page_size, NULL);
-
- return pte;
-}
-
-static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
-{
- unsigned long pt;
- int mode;
-
- while (cmpxchg64(pte, pteval, 0) != pteval) {
- pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
-
- if (!IOMMU_PTE_PRESENT(pteval))
- return freelist;
-
- pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
- mode = IOMMU_PTE_MODE(pteval);
-
- return free_sub_pt(pt, mode, freelist);
-}
-
-/*
- * Generic mapping functions. It maps a physical address into a DMA
- * address space. It allocates the page table pages if necessary.
- * In the future it can be extended to a generic mapping function
- * supporting all features of AMD IOMMU page tables like level skipping
- * and full 64 bit address spaces.
- */
-static int iommu_map_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long phys_addr,
- unsigned long page_size,
- int prot,
- gfp_t gfp)
-{
- struct page *freelist = NULL;
- bool updated = false;
- u64 __pte, *pte;
- int ret, i, count;
-
- BUG_ON(!IS_ALIGNED(bus_addr, page_size));
- BUG_ON(!IS_ALIGNED(phys_addr, page_size));
-
- ret = -EINVAL;
- if (!(prot & IOMMU_PROT_MASK))
- goto out;
-
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
-
- ret = -ENOMEM;
- if (!pte)
- goto out;
-
- for (i = 0; i < count; ++i)
- freelist = free_clear_pte(&pte[i], pte[i], freelist);
-
- if (freelist != NULL)
- updated = true;
-
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
-
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
-
- ret = 0;
-
-out:
- if (updated) {
- unsigned long flags;
-
- spin_lock_irqsave(&dom->lock, flags);
- /*
- * Flush domain TLB(s) and wait for completion. Any Device-Table
- * Updates and flushing already happened in
- * increase_address_space().
- */
- domain_flush_tlb_pde(dom);
- domain_flush_complete(dom);
- spin_unlock_irqrestore(&dom->lock, flags);
- }
-
- /* Everything flushed out, free pages now */
- free_page_list(freelist);
-
- return ret;
-}
-
-static unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long page_size)
-{
- unsigned long long unmapped;
- unsigned long unmap_size;
- u64 *pte;
-
- BUG_ON(!is_power_of_2(page_size));
-
- unmapped = 0;
-
- while (unmapped < page_size) {
-
- pte = fetch_pte(dom, bus_addr, &unmap_size);
-
- if (pte) {
- int i, count;
-
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
- for (i = 0; i < count; i++)
- pte[i] = 0ULL;
- }
-
- bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
- unmapped += unmap_size;
- }
-
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
- return unmapped;
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1896,17 +1393,16 @@ static void free_gcr3_table(struct protection_domain *domain)
}
static void set_dte_entry(u16 devid, struct protection_domain *domain,
- struct domain_pgtable *pgtable,
bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
- if (pgtable->mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(pgtable->root);
+ if (domain->iop.mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(domain->iop.root);
- pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
+ pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
@@ -1979,7 +1475,6 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1995,8 +1490,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- set_dte_entry(dev_data->devid, domain, &pgtable,
+ set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
@@ -2020,10 +1514,10 @@ static void do_detach(struct iommu_dev_data *dev_data)
device_flush_dte(dev_data);
/* Flush IOTLB */
- domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@@ -2156,9 +1650,9 @@ skip_ats_check:
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
- domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
out:
spin_unlock(&dev_data->lock);
@@ -2303,36 +1797,31 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
*
*****************************************************************************/
-static void update_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
+static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain, pgtable,
+ set_dte_entry(dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
}
}
-static void update_and_flush_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
{
- update_device_table(domain, pgtable);
+ update_device_table(domain);
domain_flush_devices(domain);
}
-static void update_domain(struct protection_domain *domain)
+void amd_iommu_domain_update(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- update_and_flush_device_table(domain, &pgtable);
+ amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_complete(domain);
}
int __init amd_iommu_init_api(void)
@@ -2400,22 +1889,19 @@ static void cleanup_domain(struct protection_domain *domain)
static void protection_domain_free(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
if (!domain)
return;
if (domain->id)
domain_id_free(domain->id);
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- amd_iommu_domain_clr_pt_root(domain);
- free_pagetable(&pgtable);
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
kfree(domain);
}
-static int protection_domain_init(struct protection_domain *domain, int mode)
+static int protection_domain_init_v1(struct protection_domain *domain, int mode)
{
u64 *pt_root = NULL;
@@ -2438,34 +1924,55 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return 0;
}
-static struct protection_domain *protection_domain_alloc(int mode)
+static struct protection_domain *protection_domain_alloc(unsigned int type)
{
+ struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
+ int pgtable = amd_iommu_pgtable;
+ int mode = DEFAULT_PGTABLE_LEVEL;
+ int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- if (protection_domain_init(domain, mode))
+ /*
+ * Force IOMMU v1 page table when iommu=pt and
+ * when allocating domain for pass-through devices.
+ */
+ if (type == IOMMU_DOMAIN_IDENTITY) {
+ pgtable = AMD_IOMMU_V1;
+ mode = PAGE_MODE_NONE;
+ } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+ pgtable = AMD_IOMMU_V1;
+ }
+
+ switch (pgtable) {
+ case AMD_IOMMU_V1:
+ ret = protection_domain_init_v1(domain, mode);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
goto out_err;
- return domain;
+ pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
+ if (!pgtbl_ops)
+ goto out_err;
+ return domain;
out_err:
kfree(domain);
-
return NULL;
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
- int mode = DEFAULT_PGTABLE_LEVEL;
- if (type == IOMMU_DOMAIN_IDENTITY)
- mode = PAGE_MODE_NONE;
-
- domain = protection_domain_alloc(mode);
+ domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2580,12 +2087,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
int prot = 0;
- int ret;
+ int ret = -EINVAL;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2593,9 +2100,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
- domain_flush_np_cache(domain, iova, page_size);
+ if (ops->map) {
+ ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ domain_flush_np_cache(domain, iova, page_size);
+ }
return ret;
}
@@ -2605,36 +2113,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
return 0;
- return iommu_unmap_page(domain, iova, page_size);
+ return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
struct protection_domain *domain = to_pdomain(dom);
- unsigned long offset_mask, pte_pgsize;
- struct domain_pgtable pgtable;
- u64 *pte, __pte;
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
- return iova;
-
- pte = fetch_pte(domain, iova, &pte_pgsize);
-
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- offset_mask = pte_pgsize - 1;
- __pte = __sme_clr(*pte & PM_ADDR_MASK);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- return (__pte & ~offset_mask) | (iova & offset_mask);
+ return ops->iova_to_phys(ops, iova);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@@ -2720,8 +2214,8 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_tlb_pde(dom);
- domain_flush_complete(dom);
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2799,22 +2293,12 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- /* First save pgtable configuration*/
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- /* Remove page-table from domain */
- amd_iommu_domain_clr_pt_root(domain);
-
- /* Make changes visible to IOMMUs */
- update_domain(domain);
-
- /* Page-table is not visible to IOMMU anymore, so free it */
- free_pagetable(&pgtable);
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -2855,7 +2339,7 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
- update_domain(domain);
+ amd_iommu_domain_update(domain);
ret = 0;
@@ -2892,7 +2376,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until IOMMU TLB flushes are complete */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
/* Now flush device TLBs */
list_for_each_entry(dev_data, &domain->dev_list, list) {
@@ -2918,7 +2402,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until all device TLBs are flushed */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
ret = 0;
@@ -3003,11 +2487,9 @@ static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, u32 pasid,
unsigned long cr3)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -3021,11 +2503,9 @@ static int __set_gcr3(struct protection_domain *domain, u32 pasid,
static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
@@ -3854,6 +3334,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 5ecc0bc608ec..f8d4ad421e07 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -77,7 +77,7 @@ struct fault {
};
static LIST_HEAD(state_list);
-static spinlock_t state_lock;
+static DEFINE_SPINLOCK(state_lock);
static struct workqueue_struct *iommu_wq;
@@ -938,8 +938,6 @@ static int __init amd_iommu_v2_init(void)
return 0;
}
- spin_lock_init(&state_lock);
-
ret = -ENOMEM;
iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
if (iommu_wq == NULL)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index e13b092e6004..bb251cab61f3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -182,9 +182,13 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
unsigned long start, unsigned long end)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ size_t size = end - start + 1;
- arm_smmu_atc_inv_domain(smmu_mn->domain, mm->pasid, start,
- end - start + 1);
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
+ arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+ PAGE_SIZE, false, smmu_domain);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
}
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -391,7 +395,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
unsigned long reg, fld;
unsigned long oas;
unsigned long asid_bits;
- u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
+ u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
if (vabits_actual == 52)
feat_mask |= ARM_SMMU_FEAT_VAX;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 8ca7415d785d..8594b4a83043 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -88,15 +88,6 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
-static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
- struct arm_smmu_device *smmu)
-{
- if (offset > SZ_64K)
- return smmu->page1 + offset - SZ_64K;
-
- return smmu->base + offset;
-}
-
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -272,9 +263,11 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ fallthrough;
+ case CMDQ_OP_TLBI_EL2_VA:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
- cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
@@ -296,6 +289,9 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
+ case CMDQ_OP_TLBI_EL2_ASID:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ break;
case CMDQ_OP_ATC_INV:
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
@@ -886,7 +882,8 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
{
struct arm_smmu_cmdq_ent cmd = {
- .opcode = CMDQ_OP_TLBI_NH_ASID,
+ .opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID,
.tlbi.asid = asid,
};
@@ -1269,13 +1266,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
}
if (s1_cfg) {
+ u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
+ STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
+
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
+ FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
@@ -1667,40 +1667,28 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
-static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- struct arm_smmu_domain *smmu_domain)
+static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size,
+ size_t granule,
+ struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
+ unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds = {};
- struct arm_smmu_cmdq_ent cmd = {
- .tlbi = {
- .leaf = leaf,
- },
- };
if (!size)
return;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_VA;
- cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
- } else {
- cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
- cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- }
-
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
- cmd.tlbi.tg = (tg - 10) / 2;
+ cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */
- cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg;
}
@@ -1718,11 +1706,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
/* Determine the power of 2 multiple number of pages */
scale = __ffs(num_pages);
- cmd.tlbi.scale = scale;
+ cmd->tlbi.scale = scale;
/* Determine how many chunks of 2^scale size we have */
num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
- cmd.tlbi.num = num - 1;
+ cmd->tlbi.num = num - 1;
/* range is num * 2^scale * pgsize */
inv_range = num << (scale + tg);
@@ -1731,17 +1719,54 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
num_pages -= num << scale;
}
- cmd.tlbi.addr = iova;
- arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+ cmd->tlbi.addr = iova;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
iova += inv_range;
}
arm_smmu_cmdq_batch_submit(smmu, &cmds);
+}
+
+static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .tlbi = {
+ .leaf = leaf,
+ },
+ };
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
+ cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ }
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
/*
* Unfortunately, this can't be leaf-only since we may have
* zapped an entire table.
*/
- arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
+ arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
+}
+
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA,
+ .tlbi = {
+ .asid = asid,
+ .leaf = leaf,
+ },
+ };
+
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
}
static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
@@ -1757,7 +1782,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
- arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
+ arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
}
static const struct iommu_flush_ops arm_smmu_flush_ops = {
@@ -2280,8 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
- gather->pgsize, true, smmu_domain);
+ arm_smmu_tlb_inv_range_domain(gather->start,
+ gather->end - gather->start + 1,
+ gather->pgsize, true, smmu_domain);
}
static phys_addr_t
@@ -2611,6 +2637,7 @@ static struct iommu_ops arm_smmu_ops = {
/* Probing and initialisation functions */
static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q,
+ void __iomem *page,
unsigned long prod_off,
unsigned long cons_off,
size_t dwords, const char *name)
@@ -2639,8 +2666,8 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1 << q->llq.max_n_shift, name);
}
- q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
- q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
+ q->prod_reg = page + prod_off;
+ q->cons_reg = page + cons_off;
q->ent_dwords = dwords;
q->q_base = Q_BASE_RWA;
@@ -2684,9 +2711,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
int ret;
/* cmdq */
- ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
- ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
- "cmdq");
+ ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
+ ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS,
+ CMDQ_ENT_DWORDS, "cmdq");
if (ret)
return ret;
@@ -2695,9 +2722,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
return ret;
/* evtq */
- ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
- ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
- "evtq");
+ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
+ ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS,
+ EVTQ_ENT_DWORDS, "evtq");
if (ret)
return ret;
@@ -2705,9 +2732,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
- return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
- ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
- "priq");
+ return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
+ ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS,
+ PRIQ_ENT_DWORDS, "priq");
}
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@@ -3060,7 +3087,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
/* CR2 (random crap) */
- reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
+ reg = CR2_PTM | CR2_RECINVSID;
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H)
+ reg |= CR2_E2H;
+
writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
/* Stream table */
@@ -3099,10 +3130,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
- writel_relaxed(smmu->evtq.q.llq.prod,
- arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
- writel_relaxed(smmu->evtq.q.llq.cons,
- arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
+ writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
+ writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
enables |= CR0_EVTQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -3117,9 +3146,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
writeq_relaxed(smmu->priq.q.q_base,
smmu->base + ARM_SMMU_PRIQ_BASE);
writel_relaxed(smmu->priq.q.llq.prod,
- arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
+ smmu->page1 + ARM_SMMU_PRIQ_PROD);
writel_relaxed(smmu->priq.q.llq.cons,
- arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
+ smmu->page1 + ARM_SMMU_PRIQ_CONS);
enables |= CR0_PRIQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -3221,8 +3250,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->options |= ARM_SMMU_OPT_MSIPOLL;
}
- if (reg & IDR0_HYP)
+ if (reg & IDR0_HYP) {
smmu->features |= ARM_SMMU_FEAT_HYP;
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ smmu->features |= ARM_SMMU_FEAT_E2H;
+ }
/*
* The coherency feature as set by FW is used in preference to the ID
@@ -3489,11 +3521,7 @@ err_reset_pci_ops: __maybe_unused;
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size)
{
- struct resource res = {
- .flags = IORESOURCE_MEM,
- .start = start,
- .end = start + size - 1,
- };
+ struct resource res = DEFINE_RES_MEM(start, size);
return devm_ioremap_resource(dev, &res);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 96c2e9565e00..f985817c967a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -139,15 +139,15 @@
#define ARM_SMMU_CMDQ_CONS 0x9c
#define ARM_SMMU_EVTQ_BASE 0xa0
-#define ARM_SMMU_EVTQ_PROD 0x100a8
-#define ARM_SMMU_EVTQ_CONS 0x100ac
+#define ARM_SMMU_EVTQ_PROD 0xa8
+#define ARM_SMMU_EVTQ_CONS 0xac
#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
#define ARM_SMMU_PRIQ_BASE 0xc0
-#define ARM_SMMU_PRIQ_PROD 0x100c8
-#define ARM_SMMU_PRIQ_CONS 0x100cc
+#define ARM_SMMU_PRIQ_PROD 0xc8
+#define ARM_SMMU_PRIQ_CONS 0xcc
#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
@@ -430,6 +430,8 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_NH_ASID 0x11
#define CMDQ_OP_TLBI_NH_VA 0x12
#define CMDQ_OP_TLBI_EL2_ALL 0x20
+ #define CMDQ_OP_TLBI_EL2_ASID 0x21
+ #define CMDQ_OP_TLBI_EL2_VA 0x22
#define CMDQ_OP_TLBI_S12_VMALL 0x28
#define CMDQ_OP_TLBI_S2_IPA 0x2a
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
@@ -604,6 +606,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
#define ARM_SMMU_FEAT_BTM (1 << 16)
#define ARM_SMMU_FEAT_SVA (1 << 17)
+#define ARM_SMMU_FEAT_E2H (1 << 18)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -694,6 +697,9 @@ extern struct arm_smmu_ctx_desc quiet_cd;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5dff7ffbef11..98b3a1c2a181 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -166,6 +166,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -196,6 +197,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
@@ -204,6 +207,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
+ /* Ignore valid bit for SMR mask extraction. */
+ smr &= ~ARM_SMMU_SMR_VALID;
smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
smmu->smrs[i].valid = true;
@@ -323,10 +328,14 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
}
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sc8180x-smmu-500" },
+ { .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
+ { .compatible = "qcom,sm8350-smmu-500" },
{ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..9ab6ee22c110 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -51,6 +51,8 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
};
+static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
@@ -378,21 +380,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
-static int iommu_dma_deferred_attach(struct device *dev,
- struct iommu_domain *domain)
-{
- const struct iommu_ops *ops = domain->ops;
-
- if (!is_kdump_kernel())
- return 0;
-
- if (unlikely(ops->is_attach_deferred &&
- ops->is_attach_deferred(domain, dev)))
- return iommu_attach_device(domain, dev);
-
- return 0;
-}
-
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -535,7 +522,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
@@ -693,7 +681,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
@@ -863,33 +852,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
- /*
- * The Intel graphic driver is used to assume that the returned
- * sg list is not combound. This blocks the efforts of converting
- * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
- * device driver work and should be removed once it's fixed in i915
- * driver.
- */
- if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
- to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
- (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for_each_sg(sg, s, nents, i) {
- unsigned int s_iova_off = sg_dma_address(s);
- unsigned int s_length = sg_dma_len(s);
- unsigned int s_iova_len = s->length;
-
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = dma_addr + s_iova_off;
- sg_dma_len(s) = s_length;
- dma_addr += s_iova_len;
-
- pr_info_once("sg combining disabled due to i915 driver\n");
- }
-
- return nents;
- }
-
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
@@ -1003,7 +965,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return 0;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1224,34 +1187,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
-#ifdef CONFIG_DMA_REMAP
-static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
-{
- if (!gfpflags_allow_blocking(gfp)) {
- struct page *page;
-
- page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
- if (!page)
- return NULL;
- return page_address(page);
- }
-
- return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
- PAGE_KERNEL, 0);
-}
-
-static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
-{
- __iommu_dma_unmap(dev, handle, size);
- __iommu_dma_free(dev, size, cpu_addr);
-}
-#else
-#define iommu_dma_alloc_noncoherent NULL
-#define iommu_dma_free_noncoherent NULL
-#endif /* CONFIG_DMA_REMAP */
-
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -1322,8 +1257,6 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
- .alloc_noncoherent = iommu_dma_alloc_noncoherent,
- .free_noncoherent = iommu_dma_free_noncoherent,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
@@ -1451,6 +1384,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
static int iommu_dma_init(void)
{
+ if (is_kdump_kernel())
+ static_branch_enable(&iommu_deferred_attach_enabled);
+
return iova_cache_get();
}
arch_initcall(iommu_dma_init);
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 1d21a0b5f724..e285a220c913 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -20,6 +20,7 @@
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/hypervisor.h>
+#include <asm/mshyperv.h>
#include "irq_remapping.h"
@@ -115,30 +116,43 @@ static const struct irq_domain_ops hyperv_ir_domain_ops = {
.free = hyperv_irq_remapping_free,
};
+static const struct irq_domain_ops hyperv_root_ir_domain_ops;
static int __init hyperv_prepare_irq_remapping(void)
{
struct fwnode_handle *fn;
int i;
+ const char *name;
+ const struct irq_domain_ops *ops;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
x86_init.hyper.msi_ext_dest_id() ||
!x2apic_supported())
return -ENODEV;
- fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
+ if (hv_root_partition) {
+ name = "HYPERV-ROOT-IR";
+ ops = &hyperv_root_ir_domain_ops;
+ } else {
+ name = "HYPERV-IR";
+ ops = &hyperv_ir_domain_ops;
+ }
+
+ fn = irq_domain_alloc_named_id_fwnode(name, 0);
if (!fn)
return -ENOMEM;
ioapic_ir_domain =
irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
- 0, IOAPIC_REMAPPING_ENTRY, fn,
- &hyperv_ir_domain_ops, NULL);
+ 0, IOAPIC_REMAPPING_ENTRY, fn, ops, NULL);
if (!ioapic_ir_domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
+ if (hv_root_partition)
+ return 0; /* The rest is only relevant to guests */
+
/*
* Hyper-V doesn't provide irq remapping function for
* IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
@@ -166,4 +180,161 @@ struct irq_remap_ops hyperv_irq_remap_ops = {
.enable = hyperv_enable_irq_remapping,
};
+/* IRQ remapping domain when Linux runs as the root partition */
+struct hyperv_root_ir_data {
+ u8 ioapic_id;
+ bool is_level;
+ struct hv_interrupt_entry entry;
+};
+
+static void
+hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ u64 status;
+ u32 vector;
+ struct irq_cfg *cfg;
+ int ioapic_id;
+ struct cpumask *affinity;
+ int cpu;
+ struct hv_interrupt_entry entry;
+ struct hyperv_root_ir_data *data = irq_data->chip_data;
+ struct IO_APIC_route_entry e;
+
+ cfg = irqd_cfg(irq_data);
+ affinity = irq_data_get_effective_affinity_mask(irq_data);
+ cpu = cpumask_first_and(affinity, cpu_online_mask);
+
+ vector = cfg->vector;
+ ioapic_id = data->ioapic_id;
+
+ if (data->entry.source == HV_DEVICE_TYPE_IOAPIC
+ && data->entry.ioapic_rte.as_uint64) {
+ entry = data->entry;
+
+ status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
+
+ if (status != HV_STATUS_SUCCESS)
+ pr_debug("%s: unexpected unmap status %lld\n", __func__, status);
+
+ data->entry.ioapic_rte.as_uint64 = 0;
+ data->entry.source = 0; /* Invalid source */
+ }
+
+
+ status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
+ vector, &entry);
+
+ if (status != HV_STATUS_SUCCESS) {
+ pr_err("%s: map hypercall failed, status %lld\n", __func__, status);
+ return;
+ }
+
+ data->entry = entry;
+
+ /* Turn it into an IO_APIC_route_entry, and generate MSI MSG. */
+ e.w1 = entry.ioapic_rte.low_uint32;
+ e.w2 = entry.ioapic_rte.high_uint32;
+
+ memset(msg, 0, sizeof(*msg));
+ msg->arch_data.vector = e.vector;
+ msg->arch_data.delivery_mode = e.delivery_mode;
+ msg->arch_addr_lo.dest_mode_logical = e.dest_mode_logical;
+ msg->arch_addr_lo.dmar_format = e.ir_format;
+ msg->arch_addr_lo.dmar_index_0_14 = e.ir_index_0_14;
+}
+
+static int hyperv_root_ir_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ int ret;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+
+ send_cleanup_vector(cfg);
+
+ return 0;
+}
+
+static struct irq_chip hyperv_root_ir_chip = {
+ .name = "HYPERV-ROOT-IR",
+ .irq_ack = apic_ack_irq,
+ .irq_set_affinity = hyperv_root_ir_set_affinity,
+ .irq_compose_msi_msg = hyperv_root_ir_compose_msi_msg,
+};
+
+static int hyperv_root_irq_remapping_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data;
+ struct hyperv_root_ir_data *data;
+ int ret = 0;
+
+ if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -ENOMEM;
+ }
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ if (!irq_data) {
+ kfree(data);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -EINVAL;
+ }
+
+ data->ioapic_id = info->devid;
+ data->is_level = info->ioapic.is_level;
+
+ irq_data->chip = &hyperv_root_ir_chip;
+ irq_data->chip_data = data;
+
+ return 0;
+}
+
+static void hyperv_root_irq_remapping_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irq_data;
+ struct hyperv_root_ir_data *data;
+ struct hv_interrupt_entry *e;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+
+ if (irq_data && irq_data->chip_data) {
+ data = irq_data->chip_data;
+ e = &data->entry;
+
+ if (e->source == HV_DEVICE_TYPE_IOAPIC
+ && e->ioapic_rte.as_uint64)
+ hv_unmap_ioapic_interrupt(data->ioapic_id,
+ &data->entry);
+
+ kfree(data);
+ }
+ }
+
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops hyperv_root_ir_domain_ops = {
+ .select = hyperv_irq_remapping_select,
+ .alloc = hyperv_root_irq_remapping_alloc,
+ .free = hyperv_root_irq_remapping_free,
+};
+
#endif
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index fb8e1e8c8029..ae236ec7d219 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
new file mode 100644
index 000000000000..b12e421a2f1a
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cap_audit.c - audit iommu capabilities for boot time and hot plug
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ * Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/intel-iommu.h>
+#include "cap_audit.h"
+
+static u64 intel_iommu_cap_sanity;
+static u64 intel_iommu_ecap_sanity;
+
+static inline void check_irq_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
+}
+
+static inline void check_dmar_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
+
+ CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
+}
+
+static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ bool mismatch = false;
+ u64 old_cap = intel_iommu_cap_sanity;
+ u64 old_ecap = intel_iommu_ecap_sanity;
+
+ if (type == CAP_AUDIT_HOTPLUG_IRQR) {
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
+ goto out;
+ }
+
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
+
+ /* Abort hot plug if the hot plug iommu feature is smaller than global */
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
+
+out:
+ if (mismatch) {
+ intel_iommu_cap_sanity = old_cap;
+ intel_iommu_ecap_sanity = old_ecap;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ struct dmar_drhd_unit *d;
+ struct intel_iommu *i;
+
+ rcu_read_lock();
+ if (list_empty(&dmar_drhd_units))
+ goto out;
+
+ for_each_active_iommu(i, d) {
+ if (!iommu) {
+ intel_iommu_ecap_sanity = i->ecap;
+ intel_iommu_cap_sanity = i->cap;
+ iommu = i;
+ continue;
+ }
+
+ if (type == CAP_AUDIT_STATIC_DMAR)
+ check_dmar_capabilities(iommu, i);
+ else
+ check_irq_capabilities(iommu, i);
+ }
+
+out:
+ rcu_read_unlock();
+ return 0;
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
+{
+ switch (type) {
+ case CAP_AUDIT_STATIC_DMAR:
+ case CAP_AUDIT_STATIC_IRQR:
+ return cap_audit_static(iommu, type);
+ case CAP_AUDIT_HOTPLUG_DMAR:
+ case CAP_AUDIT_HOTPLUG_IRQR:
+ return cap_audit_hotplug(iommu, type);
+ default:
+ break;
+ }
+
+ return -EFAULT;
+}
+
+bool intel_cap_smts_sanity(void)
+{
+ return ecap_smts(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_pasid_sanity(void)
+{
+ return ecap_pasid(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_nest_sanity(void)
+{
+ return ecap_nest(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_flts_sanity(void)
+{
+ return ecap_flts(intel_iommu_ecap_sanity);
+}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
new file mode 100644
index 000000000000..74cfccae0e81
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cap_audit.h - audit iommu capabilities header
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ */
+
+/*
+ * Capability Register Mask
+ */
+#define CAP_FL5LP_MASK BIT_ULL(60)
+#define CAP_PI_MASK BIT_ULL(59)
+#define CAP_FL1GP_MASK BIT_ULL(56)
+#define CAP_RD_MASK BIT_ULL(55)
+#define CAP_WD_MASK BIT_ULL(54)
+#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
+#define CAP_NFR_MASK GENMASK_ULL(47, 40)
+#define CAP_PSI_MASK BIT_ULL(39)
+#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
+#define CAP_FRO_MASK GENMASK_ULL(33, 24)
+#define CAP_ZLR_MASK BIT_ULL(22)
+#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
+#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
+#define CAP_CM_MASK BIT_ULL(7)
+#define CAP_PHMR_MASK BIT_ULL(6)
+#define CAP_PLMR_MASK BIT_ULL(5)
+#define CAP_RWBF_MASK BIT_ULL(4)
+#define CAP_AFL_MASK BIT_ULL(3)
+#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
+
+/*
+ * Extended Capability Register Mask
+ */
+#define ECAP_RPS_MASK BIT_ULL(49)
+#define ECAP_SMPWC_MASK BIT_ULL(48)
+#define ECAP_FLTS_MASK BIT_ULL(47)
+#define ECAP_SLTS_MASK BIT_ULL(46)
+#define ECAP_SLADS_MASK BIT_ULL(45)
+#define ECAP_VCS_MASK BIT_ULL(44)
+#define ECAP_SMTS_MASK BIT_ULL(43)
+#define ECAP_PDS_MASK BIT_ULL(42)
+#define ECAP_DIT_MASK BIT_ULL(41)
+#define ECAP_PASID_MASK BIT_ULL(40)
+#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
+#define ECAP_EAFS_MASK BIT_ULL(34)
+#define ECAP_NWFS_MASK BIT_ULL(33)
+#define ECAP_SRS_MASK BIT_ULL(31)
+#define ECAP_ERS_MASK BIT_ULL(30)
+#define ECAP_PRS_MASK BIT_ULL(29)
+#define ECAP_NEST_MASK BIT_ULL(26)
+#define ECAP_MTS_MASK BIT_ULL(25)
+#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
+#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
+#define ECAP_SC_MASK BIT_ULL(7)
+#define ECAP_PT_MASK BIT_ULL(6)
+#define ECAP_EIM_MASK BIT_ULL(4)
+#define ECAP_DT_MASK BIT_ULL(2)
+#define ECAP_QI_MASK BIT_ULL(1)
+#define ECAP_C_MASK BIT_ULL(0)
+
+/*
+ * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
+ * IOMMU gets audited.
+ */
+#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(a) != cap##_##feature(b)) { \
+ intel_iommu_##cap##_sanity &= ~(MASK); \
+ pr_info("IOMMU feature %s inconsistent", #feature); \
+ } \
+} while (0)
+
+#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+ DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
+
+#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(intel_iommu_##cap##_sanity)) \
+ DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
+ (b)->cap, cap, feature, MASK); \
+} while (0)
+
+#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
+do { \
+ u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
+ min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
+ intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
+ min_feature; \
+} while (0)
+
+#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
+do { \
+ if ((intel_iommu_##cap##_sanity & (MASK)) > \
+ (cap##_##feature((iommu)->cap))) \
+ mismatch = true; \
+ else \
+ (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
+ (intel_iommu_##cap##_sanity & (MASK)); \
+} while (0)
+
+enum cap_audit_type {
+ CAP_AUDIT_STATIC_DMAR,
+ CAP_AUDIT_STATIC_IRQR,
+ CAP_AUDIT_HOTPLUG_DMAR,
+ CAP_AUDIT_HOTPLUG_IRQR,
+};
+
+bool intel_cap_smts_sanity(void);
+bool intel_cap_pasid_sanity(void);
+bool intel_cap_nest_sanity(void);
+bool intel_cap_flts_sanity(void);
+
+static inline bool scalable_mode_support(void)
+{
+ return (intel_iommu_sm && intel_cap_smts_sanity());
+}
+
+static inline bool pasid_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_pasid_sanity();
+}
+
+static inline bool nested_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_nest_sanity();
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..d5c51b5c20af 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -31,6 +31,7 @@
#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
+#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
@@ -525,6 +526,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_rhsa *rhsa;
+ struct acpi_dmar_satc *satc;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -554,6 +556,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
/* We don't print this here because we need to sanity-check
it first. So print it in dmar_parse_one_andd() instead. */
break;
+ case ACPI_DMAR_TYPE_SATC:
+ satc = container_of(header, struct acpi_dmar_satc, header);
+ pr_info("SATC flags: 0x%x\n", satc->flags);
+ break;
}
}
@@ -641,6 +647,7 @@ parse_dmar_table(void)
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
};
/*
@@ -1307,6 +1314,8 @@ restart:
offset = ((index + i) % QI_LENGTH) << shift;
memcpy(qi->desc + offset, &desc[i], 1 << shift);
qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+ trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
+ desc[i].qw2, desc[i].qw3);
}
qi->desc_status[wait_index] = QI_IN_USE;
@@ -1461,8 +1470,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
@@ -1496,7 +1505,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
@@ -2074,6 +2083,7 @@ static guid_t dmar_hp_guid =
#define DMAR_DSM_FUNC_DRHD 1
#define DMAR_DSM_FUNC_ATSR 2
#define DMAR_DSM_FUNC_RHSA 3
+#define DMAR_DSM_FUNC_SATC 4
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
@@ -2091,6 +2101,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
};
if (!dmar_detect_dsm(handle, func))
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 788119c5b021..ee0932307d64 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -38,17 +38,16 @@
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
#include "pasid.h"
+#include "cap_audit.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -317,8 +316,18 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+struct dmar_satc_unit {
+ struct list_head list; /* list of SATC units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct dmar_dev_scope *devices; /* target devices */
+ struct intel_iommu *iommu; /* the corresponding iommu */
+ int devices_cnt; /* target device count */
+ u8 atc_required:1; /* ATS is required */
+};
+
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
+static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
@@ -719,6 +728,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -744,6 +755,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1014,8 +1027,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ pteval |= DMA_FL_PTE_ACCESS;
+ }
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1464,17 +1480,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1576,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1841,25 +1874,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
*/
static bool first_level_by_default(void)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- static int first_level_support = -1;
-
- if (likely(first_level_support != -1))
- return first_level_support;
-
- first_level_support = 1;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
- first_level_support = 0;
- break;
- }
- }
- rcu_read_unlock();
-
- return first_level_support;
+ return scalable_mode_support() && intel_cap_flts_sanity();
}
static struct dmar_domain *alloc_domain(int flags)
@@ -1877,6 +1892,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2277,9 +2293,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
- struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
@@ -2289,9 +2305,16 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+ attr |= DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+ }
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2301,7 +2324,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+ pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
@@ -2362,34 +2385,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
+ *
+ * We leave clflush for the leaf pte changes to iotlb_sync_map()
+ * callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
+ (largepage_lvl > 1 && nr_pages < lvl_pages))
pte = NULL;
- }
- }
-
- return 0;
-}
-
-static int
-domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot)
-{
- int iommu_id, ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
@@ -2547,7 +2550,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -3176,6 +3179,10 @@ static int __init init_dmars(void)
goto error;
}
+ ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
+ if (ret)
+ goto free_iommu;
+
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -3719,6 +3726,57 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
+{
+ struct dmar_satc_unit *satcu;
+ struct acpi_dmar_satc *tmp;
+
+ list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
+ dmar_rcu_check()) {
+ tmp = (struct acpi_dmar_satc *)satcu->hdr;
+ if (satc->segment != tmp->segment)
+ continue;
+ if (satc->header.length != tmp->header.length)
+ continue;
+ if (memcmp(satc, tmp, satc->header.length) == 0)
+ return satcu;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_satc *satc;
+ struct dmar_satc_unit *satcu;
+
+ if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ return 0;
+
+ satc = container_of(hdr, struct acpi_dmar_satc, header);
+ satcu = dmar_find_satc(satc);
+ if (satcu)
+ return 0;
+
+ satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
+ if (!satcu)
+ return -ENOMEM;
+
+ satcu->hdr = (void *)(satcu + 1);
+ memcpy(satcu->hdr, hdr, hdr->length);
+ satcu->atc_required = satc->flags & 0x1;
+ satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ &satcu->devices_cnt);
+ if (satcu->devices_cnt && !satcu->devices) {
+ kfree(satcu);
+ return -ENOMEM;
+ }
+ list_add_rcu(&satcu->list, &dmar_satc_units);
+
+ return 0;
+}
+
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret;
@@ -3727,6 +3785,10 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (g_iommus[iommu->seq_id])
return 0;
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
+ if (ret)
+ goto out;
+
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name);
@@ -3822,6 +3884,7 @@ static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
+ struct dmar_satc_unit *satcu, *satc_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
@@ -3833,6 +3896,11 @@ static void intel_iommu_free_dmars(void)
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
+ list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
+ list_del(&satcu->list);
+ dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
+ kfree(satcu);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3884,8 +3952,10 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
+ struct dmar_satc_unit *satcu;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_satc *satc;
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
return 0;
@@ -3926,6 +3996,23 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
}
}
+ list_for_each_entry(satcu, &dmar_satc_units, list) {
+ satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ satc->segment, satcu->devices,
+ satcu->devices_cnt);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
+ if (dmar_remove_dev_scope(info, satc->segment,
+ satcu->devices, satcu->devices_cnt))
+ break;
+ }
+ }
return 0;
}
@@ -4269,6 +4356,9 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
+ if (list_empty(&dmar_satc_units))
+ pr_info("No SATC found\n");
+
if (dmar_map_gfx)
intel_iommu_gfx_mapped = 1;
@@ -4475,33 +4565,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
+
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4648,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -4548,10 +4679,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -4560,8 +4690,10 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
return ret;
@@ -4581,14 +4713,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -4876,7 +5012,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
- int ret;
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
@@ -4902,9 +5037,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
+ return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
@@ -4973,60 +5107,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
-static inline bool scalable_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool iommu_pasid_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!pasid_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool nested_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5267,7 +5347,7 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
int ret;
if (!dev_is_pci(dev) || dmar_disabled ||
- !scalable_mode_support() || !iommu_pasid_support())
+ !scalable_mode_support() || !pasid_mode_support())
return false;
ret = pci_pasid_features(to_pci_dev(dev));
@@ -5373,6 +5453,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
@@ -5383,7 +5493,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !intel_iommu_strict;
+ *(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;
@@ -5411,6 +5521,57 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
+static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
+ unsigned long clf_pages)
+{
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ unsigned long lvl_pages = 0;
+ int level = 0;
+
+ while (clf_pages > 0) {
+ if (!pte) {
+ level = 0;
+ pte = pfn_to_dma_pte(domain, clf_pfn, &level);
+ if (WARN_ON(!pte))
+ return;
+ first_pte = pte;
+ lvl_pages = lvl_to_nr_pages(level);
+ }
+
+ if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
+ return;
+
+ clf_pages -= lvl_pages;
+ clf_pfn += lvl_pages;
+ pte++;
+
+ if (!clf_pages || first_pte_in_page(pte) ||
+ (level > 1 && clf_pages < lvl_pages)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ }
+}
+
+static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long pages = aligned_nrpages(iova, size);
+ unsigned long pfn = iova >> VTD_PAGE_SHIFT;
+ struct intel_iommu *iommu;
+ int iommu_id;
+
+ if (!dmar_domain->iommu_coherency)
+ clflush_sync_map(dmar_domain, pfn, pages);
+
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, dmar_domain, pfn, pages);
+ }
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5423,6 +5584,7 @@ const struct iommu_ops intel_iommu_ops = {
.aux_detach_dev = intel_iommu_aux_detach_device,
.aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
.unmap = intel_iommu_unmap,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..611ef5243cb6 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -22,6 +22,7 @@
#include <asm/pci-direct.h>
#include "../irq_remapping.h"
+#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -734,6 +735,9 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
+ if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
+ goto error;
+
if (!dmar_ir_support())
return -ENODEV;
@@ -1353,6 +1357,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
@@ -1437,6 +1443,10 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
+ if (ret)
+ return ret;
+
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index b92af83b79bd..f26cb6195b2c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -457,20 +457,6 @@ pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
}
static void
-iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
-{
- struct qi_desc desc;
-
- desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- desc.qw2 = 0;
- desc.qw3 = 0;
-
- qi_submit_sync(iommu, &desc, 1, 0);
-}
-
-static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
@@ -514,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
@@ -530,7 +516,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
} else {
iommu_flush_write_buffer(iommu);
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4fa248b98031..574a7e657a9a 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -118,55 +118,36 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
{
- struct qi_desc desc;
+ struct device_domain_info *info = get_domain_info(sdev->dev);
- if (pages == -1) {
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- } else {
- int mask = ilog2(__roundup_pow_of_two(pages));
-
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_IH(ih) |
- QI_EIOTLB_AM(mask);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
-
- if (sdev->dev_iotlb) {
- desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
- QI_DEV_EIOTLB_SID(sdev->sid) |
- QI_DEV_EIOTLB_QDEP(sdev->qdep) |
- QI_DEIOTLB_TYPE;
- if (pages == -1) {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
- QI_DEV_EIOTLB_SIZE;
- } else if (pages > 1) {
- /* The least significant zero bit indicates the size. So,
- * for example, an "address" value of 0x12345f000 will
- * flush from 0x123440000 to 0x12347ffff (256KiB). */
- unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
- unsigned long mask = __rounddown_pow_of_two(address ^ last);
-
- desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
- (mask - 1)) | QI_DEV_EIOTLB_SIZE;
- } else {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ if (WARN_ON(!pages))
+ return;
+
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+ svm->pasid, sdev->qdep, address,
+ order_base_2(pages));
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
+{
+ unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+ unsigned long start = ALIGN_DOWN(address, align);
+ unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+ while (start < end) {
+ __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+ start += align;
}
}
@@ -211,7 +192,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +262,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +345,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +364,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +469,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +530,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +560,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -605,14 +589,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +616,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
@@ -927,10 +911,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
u64 address;
handled = 1;
-
req = &iommu->prq[head / sizeof(*req)];
-
- result = QI_RESP_FAILURE;
+ result = QI_RESP_INVALID;
address = (u64)req->addr << VTD_PAGE_SHIFT;
if (!req->pasid_present) {
pr_err("%s: Page request without PASID: %08llx %08llx\n",
@@ -968,7 +950,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
rcu_read_unlock();
}
- result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
if (!svm->mm)
@@ -1058,8 +1039,17 @@ prq_advance:
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling.
*/
- if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
- writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
+ iommu->name);
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ if (head == tail) {
+ writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
+ iommu->name);
+ }
+ }
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1d92ac948db7..d4004bcf333a 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -44,26 +44,25 @@
/*
* We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
- * and 12 bits in a page. With some carefully-chosen coefficients we can
- * hide the ugly inconsistencies behind these macros and at least let the
- * rest of the code pretend to be somewhat sane.
+ * and 12 bits in a page.
+ * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2.
*/
#define ARM_V7S_ADDR_BITS 32
-#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
-#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
+#define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8)
+#define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12)
#define ARM_V7S_TABLE_SHIFT 10
-#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
-#define ARM_V7S_TABLE_SIZE(lvl) \
- (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
+#define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg))
+#define ARM_V7S_TABLE_SIZE(lvl, cfg) \
+ (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte))
#define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
#define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
#define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
-#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
-#define ARM_V7S_LVL_IDX(addr, lvl) ({ \
+#define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1)
+#define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \
int _l = lvl; \
- ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
+ ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \
})
/*
@@ -112,9 +111,10 @@
#define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
-/* MediaTek extend the two bits for PA 32bit/33bit */
+/* MediaTek extend the bits below for PA 32bit/33bit/34bit */
#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
+#define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5)
/* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
@@ -194,6 +194,8 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
+ if (paddr & BIT_ULL(34))
+ pte |= ARM_V7S_ATTR_MTK_PA_BIT34;
return pte;
}
@@ -218,6 +220,8 @@ static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
paddr |= BIT_ULL(32);
if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
paddr |= BIT_ULL(33);
+ if (pte & ARM_V7S_ATTR_MTK_PA_BIT34)
+ paddr |= BIT_ULL(34);
return paddr;
}
@@ -234,7 +238,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
struct device *dev = cfg->iommu_dev;
phys_addr_t phys;
dma_addr_t dma;
- size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
if (lvl == 1)
@@ -280,7 +284,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct device *dev = cfg->iommu_dev;
- size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
@@ -424,7 +428,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
arm_v7s_iopte *tblp;
size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
- tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
+ tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg);
if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz))
return -EINVAL;
@@ -477,7 +481,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
/* Find our entry at the current level */
- ptep += ARM_V7S_LVL_IDX(iova, lvl);
+ ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
/* If we can install a leaf entry at this level, then do so */
if (num_entries)
@@ -519,7 +523,6 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- struct io_pgtable *iop = &data->iop;
int ret;
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
@@ -535,12 +538,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
*/
- if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
- io_pgtable_tlb_flush_walk(iop, iova, size,
- ARM_V7S_BLOCK_SIZE(2));
- } else {
- wmb();
- }
+ wmb();
return ret;
}
@@ -550,7 +548,7 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
int i;
- for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
+ for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) {
arm_v7s_iopte pte = data->pgd[i];
if (ARM_V7S_PTE_IS_TABLE(pte, 1))
@@ -602,9 +600,9 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
if (!tablep)
return 0; /* Bytes unmapped */
- num_ptes = ARM_V7S_PTES_PER_LVL(2);
+ num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg);
num_entries = size >> ARM_V7S_LVL_SHIFT(2);
- unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
+ unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg);
pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
if (num_entries > 1)
@@ -646,7 +644,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
if (WARN_ON(lvl > 2))
return 0;
- idx = ARM_V7S_LVL_IDX(iova, lvl);
+ idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg);
ptep += idx;
do {
pte[i] = READ_ONCE(ptep[i]);
@@ -717,7 +715,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- if (WARN_ON(upper_32_bits(iova)))
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
@@ -732,7 +730,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
u32 mask;
do {
- ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
+ ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg);
pte = READ_ONCE(*ptep);
ptep = iopte_deref(pte, lvl, data);
} while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
@@ -751,15 +749,14 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
{
struct arm_v7s_io_pgtable *data;
- if (cfg->ias > ARM_V7S_ADDR_BITS)
+ if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
- if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
+ if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS))
return NULL;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_EXT |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -775,8 +772,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
spin_lock_init(&data->split_lock);
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
- ARM_V7S_TABLE_SIZE(2),
- ARM_V7S_TABLE_SIZE(2),
+ ARM_V7S_TABLE_SIZE(2, cfg),
+ ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 94394c81468f..6e9917ce980f 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -24,6 +24,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
+#ifdef CONFIG_AMD_IOMMU
+ [AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
+#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ffeebda8d6de..d0b0a15dba84 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1980,6 +1980,16 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
+ return __iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/*
* Check flags and other user provided data for valid combinations. We also
* make sure no reserved fields or unused flags are set. This is to ensure
@@ -2426,9 +2436,6 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
size -= pgsize;
}
- if (ops->iotlb_sync_map)
- ops->iotlb_sync_map(domain);
-
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
@@ -2438,18 +2445,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ const struct iommu_ops *ops = domain->ops;
+ int ret;
+
+ ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
+ if (ret == 0 && ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain, iova, size);
+
+ return ret;
+}
+
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
might_sleep();
- return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+ return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map);
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+ return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);
@@ -2533,6 +2553,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
+ const struct iommu_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
@@ -2563,6 +2584,8 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
sg = sg_next(sg);
}
+ if (ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain, iova, mapped);
return mapped;
out_err:
@@ -2586,7 +2609,6 @@ size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
{
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
-EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
@@ -2599,15 +2621,6 @@ int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
}
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
-void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
-{
- if (unlikely(domain->ops->domain_window_disable == NULL))
- return;
-
- return domain->ops->domain_window_disable(domain, wnd_nr);
-}
-EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
-
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
@@ -2863,17 +2876,6 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
/*
* Per device IOMMU features.
*/
-bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
-{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
-
- if (ops && ops->dev_has_feat)
- return ops->dev_has_feat(dev, feat);
-
- return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
-
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4bb3293ae4d7..e6e2fa85271c 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -55,7 +55,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
-bool has_iova_flush_queue(struct iova_domain *iovad)
+static bool has_iova_flush_queue(struct iova_domain *iovad)
{
return !!iovad->fq;
}
@@ -112,7 +112,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
return 0;
}
-EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
@@ -358,7 +357,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -451,7 +450,6 @@ retry:
return new_iova->pfn_lo;
}
-EXPORT_SYMBOL_GPL(alloc_iova_fast);
/**
* free_iova_fast - free iova pfn range into rcache
@@ -598,10 +596,9 @@ void queue_iova(struct iova_domain *iovad,
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
-EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -710,36 +707,6 @@ finish:
}
EXPORT_SYMBOL_GPL(reserve_iova);
-/**
- * copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
- * @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
- * other.
- */
-void
-copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
-{
- unsigned long flags;
- struct rb_node *node;
-
- spin_lock_irqsave(&from->iova_rbtree_lock, flags);
- for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
- struct iova *iova = rb_entry(node, struct iova, node);
- struct iova *new_iova;
-
- if (iova->pfn_lo == IOVA_ANCHOR)
- continue;
-
- new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
- if (!new_iova)
- pr_err("Reserve iova range %lx@%lx failed\n",
- iova->pfn_lo, iova->pfn_lo);
- }
- spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
-}
-EXPORT_SYMBOL_GPL(copy_reserved_iova);
-
/*
* Magazine caches for IOVA ranges. For an introduction to magazines,
* see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index d71f10257f15..eaaec0a55cc6 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -734,54 +734,45 @@ static int ipmmu_init_platform_device(struct device *dev,
return 0;
}
-static const struct soc_device_attribute soc_rcar_gen3[] = {
- { .soc_id = "r8a774a1", },
- { .soc_id = "r8a774b1", },
- { .soc_id = "r8a774c0", },
- { .soc_id = "r8a774e1", },
- { .soc_id = "r8a7795", },
- { .soc_id = "r8a77961", },
- { .soc_id = "r8a7796", },
- { .soc_id = "r8a77965", },
- { .soc_id = "r8a77970", },
- { .soc_id = "r8a77990", },
- { .soc_id = "r8a77995", },
+static const struct soc_device_attribute soc_needs_opt_in[] = {
+ { .family = "R-Car Gen3", },
+ { .family = "RZ/G2", },
{ /* sentinel */ }
};
-static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
- { .soc_id = "r8a774b1", },
- { .soc_id = "r8a774c0", },
- { .soc_id = "r8a774e1", },
- { .soc_id = "r8a7795", .revision = "ES3.*" },
- { .soc_id = "r8a77961", },
- { .soc_id = "r8a77965", },
- { .soc_id = "r8a77990", },
- { .soc_id = "r8a77995", },
+static const struct soc_device_attribute soc_denylist[] = {
+ { .soc_id = "r8a774a1", },
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { .soc_id = "r8a7795", .revision = "ES2.*" },
+ { .soc_id = "r8a7796", },
{ /* sentinel */ }
};
-static const char * const rcar_gen3_slave_whitelist[] = {
+static const char * const devices_allowlist[] = {
+ "ee100000.mmc",
+ "ee120000.mmc",
+ "ee140000.mmc",
+ "ee160000.mmc"
};
-static bool ipmmu_slave_whitelist(struct device *dev)
+static bool ipmmu_device_is_allowed(struct device *dev)
{
unsigned int i;
/*
- * For R-Car Gen3 use a white list to opt-in slave devices.
+ * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices.
* For Other SoCs, this returns true anyway.
*/
- if (!soc_device_match(soc_rcar_gen3))
+ if (!soc_device_match(soc_needs_opt_in))
return true;
- /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
- if (!soc_device_match(soc_rcar_gen3_whitelist))
+ /* Check whether this SoC can use the IPMMU correctly or not */
+ if (soc_device_match(soc_denylist))
return false;
- /* Check whether this slave device can work with the IPMMU */
- for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
- if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
+ /* Check whether this device can work with the IPMMU */
+ for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
+ if (!strcmp(dev_name(dev), devices_allowlist[i]))
return true;
}
@@ -792,7 +783,7 @@ static bool ipmmu_slave_whitelist(struct device *dev)
static int ipmmu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- if (!ipmmu_slave_whitelist(dev))
+ if (!ipmmu_device_is_allowed(dev))
return -ENODEV;
iommu_fwspec_add_ids(dev, spec->args, 1);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 040e85f70861..f0ba6a09b434 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -343,7 +343,6 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
spin_lock_init(&priv->pgtlock);
priv->cfg = (struct io_pgtable_cfg) {
- .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
.ias = 32,
.oas = 32,
@@ -490,6 +489,14 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
+{
+ struct msm_priv *priv = to_msm_priv(domain);
+
+ __flush_iotlb_range(iova, size, SZ_4K, false, priv);
+}
+
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t len, struct iommu_iotlb_gather *gather)
{
@@ -680,6 +687,7 @@ static struct iommu_ops msm_iommu_ops = {
* kick starting the other master.
*/
.iotlb_sync = NULL,
+ .iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.probe_device = msm_iommu_probe_device,
.release_device = msm_iommu_release_device,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 8e56cec532e7..6ecc007f07cd 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -3,10 +3,12 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -20,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -88,6 +91,9 @@
#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
#define REG_MMU0_FAULT_VA 0x13c
+#define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12)
+#define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9)
+#define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6)
#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
@@ -103,13 +109,6 @@
#define MTK_PROTECT_PA_ALIGN 256
-/*
- * Get the local arbiter ID and the portid within the larb arbiter
- * from mtk_m4u_id which is defined by MTK_M4U_ID.
- */
-#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
-#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
-
#define HAS_4GB_MODE BIT(0)
/* HW will use the EMI clock if there isn't the "bclk". */
#define HAS_BCLK BIT(1)
@@ -119,6 +118,7 @@
#define HAS_SUB_COMM BIT(5)
#define WR_THROT_EN BIT(6)
#define HAS_LEGACY_IVRP_PADDR BIT(7)
+#define IOVA_34_EN BIT(8)
#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
((((pdata)->flags) & (_x)) == (_x))
@@ -127,11 +127,19 @@ struct mtk_iommu_domain {
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
+ struct mtk_iommu_data *data;
struct iommu_domain domain;
};
static const struct iommu_ops mtk_iommu_ops;
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
+
+#define MTK_IOMMU_TLB_ADDR(iova) ({ \
+ dma_addr_t _addr = iova; \
+ ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\
+})
+
/*
* In M4U 4GB mode, the physical address is remapped as below:
*
@@ -160,6 +168,25 @@ static LIST_HEAD(m4ulist); /* List all the M4U HWs */
#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
+struct mtk_iommu_iova_region {
+ dma_addr_t iova_base;
+ unsigned long long size;
+};
+
+static const struct mtk_iommu_iova_region single_domain[] = {
+ {.iova_base = 0, .size = SZ_4G},
+};
+
+static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
+ { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */
+ #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */
+ { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */
+ { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */
+ { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */
+ #endif
+};
+
/*
* There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
* for the performance.
@@ -182,33 +209,43 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
return container_of(dom, struct mtk_iommu_domain, domain);
}
-static void mtk_iommu_tlb_flush_all(void *cookie)
+static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{
- struct mtk_iommu_data *data = cookie;
-
for_each_m4u(data) {
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
+ continue;
+
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
+
+ pm_runtime_put(data->dev);
}
}
static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+ size_t granule,
+ struct mtk_iommu_data *data)
{
- struct mtk_iommu_data *data = cookie;
+ bool has_pm = !!data->dev->pm_domain;
unsigned long flags;
int ret;
u32 tmp;
for_each_m4u(data) {
+ if (has_pm) {
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
+ continue;
+ }
+
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + data->plat_data->inv_sel_reg);
- writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
- writel_relaxed(iova + size - 1,
+ writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
+ data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
@@ -219,36 +256,24 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
- mtk_iommu_tlb_flush_all(cookie);
+ mtk_iommu_tlb_flush_all(data);
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
spin_unlock_irqrestore(&data->tlb_lock, flags);
- }
-}
-
-static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- struct iommu_domain *domain = &data->m4u_dom->domain;
- iommu_iotlb_gather_add_page(domain, gather, iova, granule);
+ if (has_pm)
+ pm_runtime_put(data->dev);
+ }
}
-static const struct iommu_flush_ops mtk_iommu_flush_ops = {
- .tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
- .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
-};
-
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
{
struct mtk_iommu_data *data = dev_id;
struct mtk_iommu_domain *dom = data->m4u_dom;
- u32 int_state, regval, fault_iova, fault_pa;
unsigned int fault_larb, fault_port, sub_comm = 0;
+ u32 int_state, regval, va34_32, pa34_32;
+ u64 fault_iova, fault_pa;
bool layer, write;
/* Read error info from registers */
@@ -264,6 +289,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
}
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
+ va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
+ pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
+ fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
+ fault_iova |= (u64)va34_32 << 32;
+ fault_pa |= (u64)pa34_32 << 32;
+ }
+
fault_port = F_MMU_INT_ID_PORT_ID(regval);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
fault_larb = F_MMU_INT_ID_COMM_ID(regval);
@@ -277,7 +310,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
dev_err_ratelimited(
data->dev,
- "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
+ "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n",
int_state, fault_iova, fault_pa, fault_larb, fault_port,
layer, write ? "write" : "read");
}
@@ -292,21 +325,57 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mtk_iommu_config(struct mtk_iommu_data *data,
- struct device *dev, bool enable)
+static int mtk_iommu_get_domain_id(struct device *dev,
+ const struct mtk_iommu_plat_data *plat_data)
+{
+ const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
+ const struct bus_dma_region *dma_rgn = dev->dma_range_map;
+ int i, candidate = -1;
+ dma_addr_t dma_end;
+
+ if (!dma_rgn || plat_data->iova_region_nr == 1)
+ return 0;
+
+ dma_end = dma_rgn->dma_start + dma_rgn->size - 1;
+ for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) {
+ /* Best fit. */
+ if (dma_rgn->dma_start == rgn->iova_base &&
+ dma_end == rgn->iova_base + rgn->size - 1)
+ return i;
+ /* ok if it is inside this region. */
+ if (dma_rgn->dma_start >= rgn->iova_base &&
+ dma_end < rgn->iova_base + rgn->size)
+ candidate = i;
+ }
+
+ if (candidate >= 0)
+ return candidate;
+ dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n",
+ &dma_rgn->dma_start, dma_rgn->size);
+ return -EINVAL;
+}
+
+static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
+ bool enable, unsigned int domid)
{
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ const struct mtk_iommu_iova_region *region;
int i;
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
+
larb_mmu = &data->larb_imu[larbid];
- dev_dbg(dev, "%s iommu port: %d\n",
- enable ? "enable" : "disable", portid);
+ region = data->plat_data->iova_region + domid;
+ larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
+
+ dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n",
+ enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ portid, domid, larb_mmu->bank[portid]);
if (enable)
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
@@ -315,22 +384,34 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
}
}
-static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
+static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ struct mtk_iommu_data *data,
+ unsigned int domid)
{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ const struct mtk_iommu_iova_region *region;
+
+ /* Use the exist domain as there is only one pgtable here. */
+ if (data->m4u_dom) {
+ dom->iop = data->m4u_dom->iop;
+ dom->cfg = data->m4u_dom->cfg;
+ dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
+ goto update_iova_region;
+ }
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_EXT,
.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
- .ias = 32,
- .oas = 34,
- .tlb = &mtk_iommu_flush_ops,
+ .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32,
.iommu_dev = data->dev,
};
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
+ dom->cfg.oas = data->enable_4GB ? 33 : 32;
+ else
+ dom->cfg.oas = 35;
+
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
if (!dom->iop) {
dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -339,6 +420,13 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+
+update_iova_region:
+ /* Update the iova region for this domain */
+ region = data->plat_data->iova_region + domid;
+ dom->domain.geometry.aperture_start = region->iova_base;
+ dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
+ dom->domain.geometry.force_aperture = true;
return 0;
}
@@ -353,30 +441,16 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
if (!dom)
return NULL;
- if (iommu_get_dma_cookie(&dom->domain))
- goto free_dom;
-
- if (mtk_iommu_domain_finalise(dom))
- goto put_dma_cookie;
-
- dom->domain.geometry.aperture_start = 0;
- dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
- dom->domain.geometry.force_aperture = true;
+ if (iommu_get_dma_cookie(&dom->domain)) {
+ kfree(dom);
+ return NULL;
+ }
return &dom->domain;
-
-put_dma_cookie:
- iommu_put_dma_cookie(&dom->domain);
-free_dom:
- kfree(dom);
- return NULL;
}
static void mtk_iommu_domain_free(struct iommu_domain *domain)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-
- free_io_pgtable_ops(dom->iop);
iommu_put_dma_cookie(domain);
kfree(to_mtk_domain(domain));
}
@@ -386,18 +460,37 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct device *m4udev = data->dev;
+ int ret, domid;
- if (!data)
- return -ENODEV;
+ domid = mtk_iommu_get_domain_id(dev, data->plat_data);
+ if (domid < 0)
+ return domid;
+
+ if (!dom->data) {
+ if (mtk_iommu_domain_finalise(dom, data, domid))
+ return -ENODEV;
+ dom->data = data;
+ }
+
+ if (!data->m4u_dom) { /* Initialize the M4U HW */
+ ret = pm_runtime_resume_and_get(m4udev);
+ if (ret < 0)
+ return ret;
- /* Update the pgtable base address register of the M4U HW */
- if (!data->m4u_dom) {
+ ret = mtk_iommu_hw_init(data);
+ if (ret) {
+ pm_runtime_put(m4udev);
+ return ret;
+ }
data->m4u_dom = dom;
writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
data->base + REG_MMU_PT_BASE_ADDR);
+
+ pm_runtime_put(m4udev);
}
- mtk_iommu_config(data, dev, true);
+ mtk_iommu_config(data, dev, true, domid);
return 0;
}
@@ -406,20 +499,16 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- if (!data)
- return;
-
- mtk_iommu_config(data, dev, false);
+ mtk_iommu_config(data, dev, false, 0);
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
- if (data->enable_4GB)
+ if (dom->data->enable_4GB)
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
@@ -431,37 +520,48 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long end = iova + size - 1;
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
return dom->iop->unmap(dom->iop, iova, size, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+
+ mtk_iommu_tlb_flush_all(dom->data);
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- size_t length = gather->end - gather->start;
-
- if (gather->start == ULONG_MAX)
- return;
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ size_t length = gather->end - gather->start + 1;
mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
- data);
+ dom->data);
+}
+
+static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+
+ mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
phys_addr_t pa;
pa = dom->iop->iova_to_phys(dom->iop, iova);
- if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+ if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
return pa;
@@ -493,19 +593,25 @@ static void mtk_iommu_release_device(struct device *dev)
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ struct iommu_group *group;
+ int domid;
if (!data)
return ERR_PTR(-ENODEV);
- /* All the client devices are in the same m4u iommu-group */
- if (!data->m4u_group) {
- data->m4u_group = iommu_group_alloc();
- if (IS_ERR(data->m4u_group))
- dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ domid = mtk_iommu_get_domain_id(dev, data->plat_data);
+ if (domid < 0)
+ return ERR_PTR(domid);
+
+ group = data->m4u_group[domid];
+ if (!group) {
+ group = iommu_group_alloc();
+ if (!IS_ERR(group))
+ data->m4u_group[domid] = group;
} else {
- iommu_group_ref_get(data->m4u_group);
+ iommu_group_ref_get(group);
}
- return data->m4u_group;
+ return group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
@@ -530,6 +636,35 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static void mtk_iommu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+ unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i;
+ const struct mtk_iommu_iova_region *resv, *curdom;
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+
+ if ((int)domid < 0)
+ return;
+ curdom = data->plat_data->iova_region + domid;
+ for (i = 0; i < data->plat_data->iova_region_nr; i++) {
+ resv = data->plat_data->iova_region + i;
+
+ /* Only reserve when the region is inside the current domain */
+ if (resv->iova_base <= curdom->iova_base ||
+ resv->iova_base + resv->size >= curdom->iova_base + curdom->size)
+ continue;
+
+ region = iommu_alloc_resv_region(resv->iova_base, resv->size,
+ prot, IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+ }
+}
+
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
.domain_free = mtk_iommu_domain_free,
@@ -539,11 +674,14 @@ static const struct iommu_ops mtk_iommu_ops = {
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
+ .iotlb_sync_map = mtk_iommu_sync_map,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
+ .get_resv_regions = mtk_iommu_get_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
@@ -639,6 +777,9 @@ static int mtk_iommu_probe(struct platform_device *pdev)
{
struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *larbnode, *smicomm_node;
+ struct platform_device *plarbdev;
+ struct device_link *link;
struct resource *res;
resource_size_t ioaddr;
struct component_match *match = NULL;
@@ -705,8 +846,6 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return larb_nr;
for (i = 0; i < larb_nr; i++) {
- struct device_node *larbnode;
- struct platform_device *plarbdev;
u32 id;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
@@ -733,31 +872,65 @@ static int mtk_iommu_probe(struct platform_device *pdev)
compare_of, larbnode);
}
- platform_set_drvdata(pdev, data);
+ /* Get smi-common dev from the last larb. */
+ smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
+ if (!smicomm_node)
+ return -EINVAL;
- ret = mtk_iommu_hw_init(data);
- if (ret)
- return ret;
+ plarbdev = of_find_device_by_node(smicomm_node);
+ of_node_put(smicomm_node);
+ data->smicomm_dev = &plarbdev->dev;
+
+ pm_runtime_enable(dev);
+
+ link = device_link_add(data->smicomm_dev, dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
+ ret = -EINVAL;
+ goto out_runtime_disable;
+ }
+
+ platform_set_drvdata(pdev, data);
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
if (ret)
- return ret;
+ goto out_link_remove;
iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
ret = iommu_device_register(&data->iommu);
if (ret)
- return ret;
+ goto out_sysfs_remove;
spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist);
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (!iommu_present(&platform_bus_type)) {
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (ret)
+ goto out_list_del;
+ }
- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ if (ret)
+ goto out_bus_set_null;
+ return ret;
+
+out_bus_set_null:
+ bus_set_iommu(&platform_bus_type, NULL);
+out_list_del:
+ list_del(&data->list);
+ iommu_device_unregister(&data->iommu);
+out_sysfs_remove:
+ iommu_device_sysfs_remove(&data->iommu);
+out_link_remove:
+ device_link_remove(data->smicomm_dev, dev);
+out_runtime_disable:
+ pm_runtime_disable(dev);
+ return ret;
}
static int mtk_iommu_remove(struct platform_device *pdev)
@@ -771,12 +944,14 @@ static int mtk_iommu_remove(struct platform_device *pdev)
bus_set_iommu(&platform_bus_type, NULL);
clk_disable_unprepare(data->bclk);
+ device_link_remove(data->smicomm_dev, &pdev->dev);
+ pm_runtime_disable(&pdev->dev);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
return 0;
}
-static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg;
@@ -794,7 +969,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_iommu_resume(struct device *dev)
+static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg;
@@ -802,6 +977,9 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
void __iomem *base = data->base;
int ret;
+ /* Avoid first resume to affect the default value of registers below. */
+ if (!m4u_dom)
+ return 0;
ret = clk_prepare_enable(data->bclk);
if (ret) {
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
@@ -815,20 +993,22 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
- if (m4u_dom)
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- base + REG_MMU_PT_BASE_ADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
return 0;
}
static const struct dev_pm_ops mtk_iommu_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+ SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct mtk_iommu_plat_data mt2712_data = {
.m4u_plat = M4U_MT2712,
.flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
};
@@ -836,6 +1016,8 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
@@ -843,6 +1025,8 @@ static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
};
@@ -851,6 +1035,8 @@ static const struct mtk_iommu_plat_data mt8173_data = {
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
HAS_LEGACY_IVRP_PADDR,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
@@ -858,15 +1044,29 @@ static const struct mtk_iommu_plat_data mt8183_data = {
.m4u_plat = M4U_MT8183,
.flags = RESET_AXI,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
+static const struct mtk_iommu_plat_data mt8192_data = {
+ .m4u_plat = M4U_MT8192,
+ .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
+ {0, 14, 16}, {0, 13, 18, 17}},
+};
+
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
+ { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
{}
};
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index df32b3e3408b..f81fa8862ed0 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -17,10 +17,13 @@
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/mediatek/smi.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
#define MTK_LARB_COM_MAX 8
#define MTK_LARB_SUBCOM_MAX 4
+#define MTK_IOMMU_GROUP_MAX 8
+
struct mtk_iommu_suspend_reg {
union {
u32 standard_axi_mode;/* v1 */
@@ -42,12 +45,18 @@ enum mtk_iommu_plat {
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
+ M4U_MT8192,
};
+struct mtk_iommu_iova_region;
+
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat;
u32 flags;
u32 inv_sel_reg;
+
+ unsigned int iova_region_nr;
+ const struct mtk_iommu_iova_region *iova_region;
unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
@@ -61,12 +70,13 @@ struct mtk_iommu_data {
phys_addr_t protect_base; /* protect memory base */
struct mtk_iommu_suspend_reg reg;
struct mtk_iommu_domain *m4u_dom;
- struct iommu_group *m4u_group;
+ struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX];
bool enable_4GB;
spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
+ struct device *smicomm_dev;
struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index fac720273889..6f130e51f072 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -261,7 +261,8 @@ static int gart_iommu_of_xlate(struct device *dev,
return 0;
}
-static void gart_iommu_sync_map(struct iommu_domain *domain)
+static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
FLUSH_GART_REGS(gart_handle);
}
@@ -269,7 +270,9 @@ static void gart_iommu_sync_map(struct iommu_domain *domain)
static void gart_iommu_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- gart_iommu_sync_map(domain);
+ size_t length = gather->end - gather->start + 1;
+
+ gart_iommu_sync_map(domain, gather->start, length);
}
static const struct iommu_ops gart_iommu_ops = {
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 9267a85fee18..7de9605cac4f 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -64,9 +64,6 @@ static int ipack_bus_probe(struct device *device)
struct ipack_device *dev = to_ipack_dev(device);
struct ipack_driver *drv = to_ipack_driver(device->driver);
- if (!drv->ops->probe)
- return -EINVAL;
-
return drv->ops->probe(dev);
}
@@ -75,10 +72,9 @@ static int ipack_bus_remove(struct device *device)
struct ipack_device *dev = to_ipack_dev(device);
struct ipack_driver *drv = to_ipack_driver(device->driver);
- if (!drv->ops->remove)
- return -EINVAL;
+ if (drv->ops->remove)
+ drv->ops->remove(dev);
- drv->ops->remove(dev);
return 0;
}
@@ -252,6 +248,9 @@ EXPORT_SYMBOL_GPL(ipack_bus_unregister);
int ipack_driver_register(struct ipack_driver *edrv, struct module *owner,
const char *name)
{
+ if (!edrv->ops->probe)
+ return -EINVAL;
+
edrv->driver.owner = owner;
edrv->driver.name = name;
edrv->driver.bus = &ipack_bus_type;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 94920a51c628..e74fa206240a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -260,11 +260,6 @@ config ST_IRQCHIP
help
Enables SysCfg Controlled IRQs on STi based platforms.
-config TANGO_IRQ
- bool
- select IRQ_DOMAIN
- select GENERIC_IRQ_CHIP
-
config TB10X_IRQC
bool
select IRQ_DOMAIN
@@ -432,7 +427,7 @@ config QCOM_PDC
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
config CSKY_MPINTC
- bool "C-SKY Multi Processor Interrupt Controller"
+ bool
depends on CSKY
help
Say yes here to enable C-SKY SMP interrupt controller driver used
@@ -457,7 +452,8 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX
- def_bool y if ARCH_MXC || COMPILE_TEST
+ bool "i.MX INTMUX support" if COMPILE_TEST
+ default y if ARCH_MXC
select IRQ_DOMAIN
help
Support for the i.MX INTMUX interrupt multiplexer.
@@ -493,8 +489,9 @@ config TI_SCI_INTA_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 0ac93bfaec61..c59b95a0532c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sun6i-r.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
@@ -45,7 +46,6 @@ obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
-obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
@@ -55,7 +55,6 @@ obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_ST_IRQCHIP) += irq-st.o
-obj-$(CONFIG_TANGO_IRQ) += irq-tango.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
@@ -113,3 +112,4 @@ obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
+obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 5f5eb8877c41..25c9a9c06e41 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
{
int cpu = smp_processor_id();
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
.name = "IPI",
.irq_mask = bcm2836_arm_irqchip_dummy_op,
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
- .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 3fc65375cbe0..eb0ee356a629 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -75,10 +75,10 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
* are presented to the GIC CPUIF as follow:
* (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
*
- * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
+ * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
* EL1 are subject to a similar operation thus matching the priorities presented
* from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
- * these values are unchanched by the GIC.
+ * these values are unchanged by the GIC.
*
* see GICv3/GICv4 Architecture Specification (IHI0069D):
* - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 9ed1bc473663..09b91b81851c 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 12aeeab43289..32562b7e681b 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -225,7 +225,7 @@ static int pch_msi_init(struct device_node *node,
goto err_priv;
}
- priv->msi_map = bitmap_alloc(priv->num_irqs, GFP_KERNEL);
+ priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
if (!priv->msi_map) {
ret = -ENOMEM;
goto err_priv;
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index f94f974a8764..853b3972dbe7 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -64,7 +64,7 @@ static struct irq_chip ls_extirq_chip = {
.irq_set_type = ls_extirq_set_type,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = irq_chip_set_affinity_parent,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static int
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 95d4fd8f7a96..0bbb0b2d0dd5 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
new file mode 100644
index 000000000000..b57c67dfab5b
--- /dev/null
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Birger Koblitz <mail@birger-koblitz.de>
+ * Copyright (C) 2020 Bert Vermeulen <bert@biot.com>
+ * Copyright (C) 2020 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/chained_irq.h>
+
+/* Global Interrupt Mask Register */
+#define RTL_ICTL_GIMR 0x00
+/* Global Interrupt Status Register */
+#define RTL_ICTL_GISR 0x04
+/* Interrupt Routing Registers */
+#define RTL_ICTL_IRR0 0x08
+#define RTL_ICTL_IRR1 0x0c
+#define RTL_ICTL_IRR2 0x10
+#define RTL_ICTL_IRR3 0x14
+
+#define REG(x) (realtek_ictl_base + x)
+
+static DEFINE_RAW_SPINLOCK(irq_lock);
+static void __iomem *realtek_ictl_base;
+
+static void realtek_ictl_unmask_irq(struct irq_data *i)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+
+ value = readl(REG(RTL_ICTL_GIMR));
+ value |= BIT(i->hwirq);
+ writel(value, REG(RTL_ICTL_GIMR));
+
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+static void realtek_ictl_mask_irq(struct irq_data *i)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+
+ value = readl(REG(RTL_ICTL_GIMR));
+ value &= ~BIT(i->hwirq);
+ writel(value, REG(RTL_ICTL_GIMR));
+
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+static struct irq_chip realtek_ictl_irq = {
+ .name = "realtek-rtl-intc",
+ .irq_mask = realtek_ictl_mask_irq,
+ .irq_unmask = realtek_ictl_unmask_irq,
+};
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(hw, &realtek_ictl_irq, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .map = intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void realtek_irq_dispatch(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain;
+ unsigned int pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR));
+ if (unlikely(!pending)) {
+ spurious_interrupt();
+ goto out;
+ }
+ domain = irq_desc_get_handler_data(desc);
+ generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+/*
+ * SoC interrupts are cascaded to MIPS CPU interrupts according to the
+ * interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
+ * the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
+ * thus go into 4 IRRs.
+ */
+static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
+{
+ struct device_node *cpu_ictl;
+ const __be32 *imap;
+ u32 imaplen, soc_int, cpu_int, tmp, regs[4];
+ int ret, i, irr_regs[] = {
+ RTL_ICTL_IRR3,
+ RTL_ICTL_IRR2,
+ RTL_ICTL_IRR1,
+ RTL_ICTL_IRR0,
+ };
+ u8 mips_irqs_set;
+
+ ret = of_property_read_u32(node, "#address-cells", &tmp);
+ if (ret || tmp)
+ return -EINVAL;
+
+ imap = of_get_property(node, "interrupt-map", &imaplen);
+ if (!imap || imaplen % 3)
+ return -EINVAL;
+
+ mips_irqs_set = 0;
+ memset(regs, 0, sizeof(regs));
+ for (i = 0; i < imaplen; i += 3 * sizeof(u32)) {
+ soc_int = be32_to_cpup(imap);
+ if (soc_int > 31)
+ return -EINVAL;
+
+ cpu_ictl = of_find_node_by_phandle(be32_to_cpup(imap + 1));
+ if (!cpu_ictl)
+ return -EINVAL;
+ ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
+ if (ret || tmp != 1)
+ return -EINVAL;
+ of_node_put(cpu_ictl);
+
+ cpu_int = be32_to_cpup(imap + 2);
+ if (cpu_int > 7)
+ return -EINVAL;
+
+ if (!(mips_irqs_set & BIT(cpu_int))) {
+ irq_set_chained_handler_and_data(cpu_int, realtek_irq_dispatch,
+ domain);
+ mips_irqs_set |= BIT(cpu_int);
+ }
+
+ regs[(soc_int * 4) / 32] |= cpu_int << (soc_int * 4) % 32;
+ imap += 3;
+ }
+
+ for (i = 0; i < 4; i++)
+ writel(regs[i], REG(irr_regs[i]));
+
+ return 0;
+}
+
+static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ int ret;
+
+ realtek_ictl_base = of_iomap(node, 0);
+ if (!realtek_ictl_base)
+ return -ENXIO;
+
+ /* Disable all cascaded interrupts */
+ writel(0, REG(RTL_ICTL_GIMR));
+
+ domain = irq_domain_add_simple(node, 32, 0,
+ &irq_domain_ops, NULL);
+
+ ret = map_interrupts(node, domain);
+ if (ret) {
+ pr_err("invalid interrupt map\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(realtek_rtl_intc, "realtek,rtl-intc", realtek_rtl_of_init);
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
deleted file mode 100644
index c86faaa35ca4..000000000000
--- a/drivers/irqchip/irq-sirfsoc.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * interrupt controller support for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
-#include <linux/syscore_ops.h>
-#include <asm/mach/irq.h>
-#include <asm/exception.h>
-
-#define SIRFSOC_INT_RISC_MASK0 0x0018
-#define SIRFSOC_INT_RISC_MASK1 0x001C
-#define SIRFSOC_INT_RISC_LEVEL0 0x0020
-#define SIRFSOC_INT_RISC_LEVEL1 0x0024
-#define SIRFSOC_INIT_IRQ_ID 0x0038
-#define SIRFSOC_INT_BASE_OFFSET 0x0004
-
-#define SIRFSOC_NUM_IRQS 64
-#define SIRFSOC_NUM_BANKS (SIRFSOC_NUM_IRQS / 32)
-
-static struct irq_domain *sirfsoc_irqdomain;
-
-static void __iomem *sirfsoc_irq_get_regbase(void)
-{
- return (void __iomem __force *)sirfsoc_irqdomain->host_data;
-}
-
-static __init void sirfsoc_alloc_gc(void __iomem *base)
-{
- unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
- unsigned int set = IRQ_LEVEL;
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
- int i;
-
- irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc",
- handle_level_irq, clr, set,
- IRQ_GC_INIT_MASK_CACHE);
-
- for (i = 0; i < SIRFSOC_NUM_BANKS; i++) {
- gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32);
- gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET;
- ct = gc->chip_types;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
- ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
- }
-}
-
-static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
-{
- void __iomem *base = sirfsoc_irq_get_regbase();
- u32 irqstat;
-
- irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
- handle_domain_irq(sirfsoc_irqdomain, irqstat & 0xff, regs);
-}
-
-static int __init sirfsoc_irq_init(struct device_node *np,
- struct device_node *parent)
-{
- void __iomem *base = of_iomap(np, 0);
- if (!base)
- panic("unable to map intc cpu registers\n");
-
- sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
- &irq_generic_chip_ops, base);
- sirfsoc_alloc_gc(base);
-
- writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
- writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
-
- writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0);
- writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1);
-
- set_handle_irq(sirfsoc_handle_irq);
-
- return 0;
-}
-IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init);
-
-struct sirfsoc_irq_status {
- u32 mask0;
- u32 mask1;
- u32 level0;
- u32 level1;
-};
-
-static struct sirfsoc_irq_status sirfsoc_irq_st;
-
-static int sirfsoc_irq_suspend(void)
-{
- void __iomem *base = sirfsoc_irq_get_regbase();
-
- sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
- sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
- sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0);
- sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1);
-
- return 0;
-}
-
-static void sirfsoc_irq_resume(void)
-{
- void __iomem *base = sirfsoc_irq_get_regbase();
-
- writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
- writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
- writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0);
- writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1);
-}
-
-static struct syscore_ops sirfsoc_irq_syscore_ops = {
- .suspend = sirfsoc_irq_suspend,
- .resume = sirfsoc_irq_resume,
-};
-
-static int __init sirfsoc_irq_pm_init(void)
-{
- if (!sirfsoc_irqdomain)
- return 0;
-
- register_syscore_ops(&sirfsoc_irq_syscore_ops);
- return 0;
-}
-device_initcall(sirfsoc_irq_pm_init);
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
index 0aa50d025ef6..fbb354413ffa 100644
--- a/drivers/irqchip/irq-sl28cpld.c
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
irqchip->chip.num_regs = 1;
irqchip->chip.status_base = base + INTC_IP;
irqchip->chip.mask_base = base + INTC_IE;
- irqchip->chip.mask_invert = true,
+ irqchip->chip.mask_invert = true;
irqchip->chip.ack_base = base + INTC_IP;
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
new file mode 100644
index 000000000000..4cd3e533740b
--- /dev/null
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The R_INTC in Allwinner A31 and newer SoCs manages several types of
+ * interrupts, as shown below:
+ *
+ * NMI IRQ DIRECT IRQs MUXED IRQs
+ * bit 0 bits 1-15^ bits 19-31
+ *
+ * +---------+ +---------+ +---------+ +---------+
+ * | NMI Pad | | IRQ d | | IRQ m | | IRQ m+7 |
+ * +---------+ +---------+ +---------+ +---------+
+ * | | | | | | |
+ * | | | | |......| |
+ * +------V------+ +------------+ | | | +--V------V--+ |
+ * | Invert/ | | Write 1 to | | | | | AND with | |
+ * | Edge Detect | | PENDING[0] | | | | | MUX[m/8] | |
+ * +-------------+ +------------+ | | | +------------+ |
+ * | | | | | | |
+ * +--V-------V--+ +--V--+ | +--V--+ | +--V--+
+ * | Set Reset| | GIC | | | GIC | | | GIC |
+ * | Latch | | SPI | | | SPI |... | ...| SPI |
+ * +-------------+ | N+d | | | m | | | m+7 |
+ * | | +-----+ | +-----+ | +-----+
+ * | | | |
+ * +-------V-+ +-V----------+ +---------V--+ +--------V--------+
+ * | GIC SPI | | AND with | | AND with | | AND with |
+ * | N (=32) | | ENABLE[0] | | ENABLE[d] | | ENABLE[19+m/8] |
+ * +---------+ +------------+ +------------+ +-----------------+
+ * | | |
+ * +------V-----+ +------V-----+ +--------V--------+
+ * | Read | | Read | | Read |
+ * | PENDING[0] | | PENDING[d] | | PENDING[19+m/8] |
+ * +------------+ +------------+ +-----------------+
+ *
+ * ^ bits 16-18 are direct IRQs for peripherals with banked interrupts, such as
+ * the MSGBOX. These IRQs do not map to any GIC SPI.
+ *
+ * The H6 variant adds two more (banked) direct IRQs and implements the full
+ * set of 128 mux bits. This requires a second set of top-level registers.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define SUN6I_NMI_CTRL (0x0c)
+#define SUN6I_IRQ_PENDING(n) (0x10 + 4 * (n))
+#define SUN6I_IRQ_ENABLE(n) (0x40 + 4 * (n))
+#define SUN6I_MUX_ENABLE(n) (0xc0 + 4 * (n))
+
+#define SUN6I_NMI_SRC_TYPE_LEVEL_LOW 0
+#define SUN6I_NMI_SRC_TYPE_EDGE_FALLING 1
+#define SUN6I_NMI_SRC_TYPE_LEVEL_HIGH 2
+#define SUN6I_NMI_SRC_TYPE_EDGE_RISING 3
+
+#define SUN6I_NMI_BIT BIT(0)
+
+#define SUN6I_NMI_NEEDS_ACK ((void *)1)
+
+#define SUN6I_NR_TOP_LEVEL_IRQS 64
+#define SUN6I_NR_DIRECT_IRQS 16
+#define SUN6I_NR_MUX_BITS 128
+
+struct sun6i_r_intc_variant {
+ u32 first_mux_irq;
+ u32 nr_mux_irqs;
+ u32 mux_valid[BITS_TO_U32(SUN6I_NR_MUX_BITS)];
+};
+
+static void __iomem *base;
+static irq_hw_number_t nmi_hwirq;
+static DECLARE_BITMAP(wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
+static DECLARE_BITMAP(wake_mux_enabled, SUN6I_NR_MUX_BITS);
+static DECLARE_BITMAP(wake_mux_valid, SUN6I_NR_MUX_BITS);
+
+static void sun6i_r_intc_ack_nmi(void)
+{
+ writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_PENDING(0));
+}
+
+static void sun6i_r_intc_nmi_ack(struct irq_data *data)
+{
+ if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+ sun6i_r_intc_ack_nmi();
+ else
+ data->chip_data = SUN6I_NMI_NEEDS_ACK;
+}
+
+static void sun6i_r_intc_nmi_eoi(struct irq_data *data)
+{
+ /* For oneshot IRQs, delay the ack until the IRQ is unmasked. */
+ if (data->chip_data == SUN6I_NMI_NEEDS_ACK && !irqd_irq_masked(data)) {
+ data->chip_data = NULL;
+ sun6i_r_intc_ack_nmi();
+ }
+
+ irq_chip_eoi_parent(data);
+}
+
+static void sun6i_r_intc_nmi_unmask(struct irq_data *data)
+{
+ if (data->chip_data == SUN6I_NMI_NEEDS_ACK) {
+ data->chip_data = NULL;
+ sun6i_r_intc_ack_nmi();
+ }
+
+ irq_chip_unmask_parent(data);
+}
+
+static int sun6i_r_intc_nmi_set_type(struct irq_data *data, unsigned int type)
+{
+ u32 nmi_src_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(nmi_src_type, base + SUN6I_NMI_CTRL);
+
+ /*
+ * The "External NMI" GIC input connects to a latch inside R_INTC, not
+ * directly to the pin. So the GIC trigger type does not depend on the
+ * NMI pin trigger type.
+ */
+ return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static int sun6i_r_intc_nmi_set_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ if (which == IRQCHIP_STATE_PENDING && !state)
+ sun6i_r_intc_ack_nmi();
+
+ return irq_chip_set_parent_state(data, which, state);
+}
+
+static int sun6i_r_intc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ unsigned long offset_from_nmi = data->hwirq - nmi_hwirq;
+
+ if (offset_from_nmi < SUN6I_NR_DIRECT_IRQS)
+ assign_bit(offset_from_nmi, wake_irq_enabled, on);
+ else if (test_bit(data->hwirq, wake_mux_valid))
+ assign_bit(data->hwirq, wake_mux_enabled, on);
+ else
+ /* Not wakeup capable. */
+ return -EPERM;
+
+ return 0;
+}
+
+static struct irq_chip sun6i_r_intc_nmi_chip = {
+ .name = "sun6i-r-intc",
+ .irq_ack = sun6i_r_intc_nmi_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = sun6i_r_intc_nmi_unmask,
+ .irq_eoi = sun6i_r_intc_nmi_eoi,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = sun6i_r_intc_nmi_set_type,
+ .irq_set_irqchip_state = sun6i_r_intc_nmi_set_irqchip_state,
+ .irq_set_wake = sun6i_r_intc_irq_set_wake,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static struct irq_chip sun6i_r_intc_wakeup_chip = {
+ .name = "sun6i-r-intc",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = sun6i_r_intc_irq_set_wake,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int sun6i_r_intc_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* Accept the old two-cell binding for the NMI only. */
+ if (fwspec->param_count == 2 && fwspec->param[0] == 0) {
+ *hwirq = nmi_hwirq;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ /* Otherwise this binding should match the GIC SPI binding. */
+ if (fwspec->param_count < 3)
+ return -EINVAL;
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int sun6i_r_intc_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec gic_fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int i, ret;
+
+ ret = sun6i_r_intc_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+ if (hwirq + nr_irqs > SUN6I_NR_MUX_BITS)
+ return -EINVAL;
+
+ /* Construct a GIC-compatible fwspec from this fwspec. */
+ gic_fwspec = (struct irq_fwspec) {
+ .fwnode = domain->parent->fwnode,
+ .param_count = 3,
+ .param = { GIC_SPI, hwirq, type },
+ };
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; ++i, ++hwirq, ++virq) {
+ if (hwirq == nmi_hwirq) {
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &sun6i_r_intc_nmi_chip, 0);
+ irq_set_handler(virq, handle_fasteoi_ack_irq);
+ } else {
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &sun6i_r_intc_wakeup_chip, 0);
+ }
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
+ .translate = sun6i_r_intc_domain_translate,
+ .alloc = sun6i_r_intc_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int sun6i_r_intc_suspend(void)
+{
+ u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
+ int i;
+
+ /* Wake IRQs are enabled during system sleep and shutdown. */
+ bitmap_to_arr32(buf, wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
+ for (i = 0; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
+ writel_relaxed(buf[i], base + SUN6I_IRQ_ENABLE(i));
+ bitmap_to_arr32(buf, wake_mux_enabled, SUN6I_NR_MUX_BITS);
+ for (i = 0; i < BITS_TO_U32(SUN6I_NR_MUX_BITS); ++i)
+ writel_relaxed(buf[i], base + SUN6I_MUX_ENABLE(i));
+
+ return 0;
+}
+
+static void sun6i_r_intc_resume(void)
+{
+ int i;
+
+ /* Only the NMI is relevant during normal operation. */
+ writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_ENABLE(0));
+ for (i = 1; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
+ writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i));
+}
+
+static void sun6i_r_intc_shutdown(void)
+{
+ sun6i_r_intc_suspend();
+}
+
+static struct syscore_ops sun6i_r_intc_syscore_ops = {
+ .suspend = sun6i_r_intc_suspend,
+ .resume = sun6i_r_intc_resume,
+ .shutdown = sun6i_r_intc_shutdown,
+};
+
+static int __init sun6i_r_intc_init(struct device_node *node,
+ struct device_node *parent,
+ const struct sun6i_r_intc_variant *v)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct of_phandle_args nmi_parent;
+ int ret;
+
+ /* Extract the NMI hwirq number from the OF node. */
+ ret = of_irq_parse_one(node, 0, &nmi_parent);
+ if (ret)
+ return ret;
+ if (nmi_parent.args_count < 3 ||
+ nmi_parent.args[0] != GIC_SPI ||
+ nmi_parent.args[2] != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ nmi_hwirq = nmi_parent.args[1];
+
+ bitmap_set(wake_irq_enabled, v->first_mux_irq, v->nr_mux_irqs);
+ bitmap_from_arr32(wake_mux_valid, v->mux_valid, SUN6I_NR_MUX_BITS);
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: Failed to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ base = of_io_request_and_map(node, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("%pOF: Failed to map MMIO region\n", node);
+ return PTR_ERR(base);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node,
+ &sun6i_r_intc_domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOF: Failed to allocate domain\n", node);
+ iounmap(base);
+ return -ENOMEM;
+ }
+
+ register_syscore_ops(&sun6i_r_intc_syscore_ops);
+
+ sun6i_r_intc_ack_nmi();
+ sun6i_r_intc_resume();
+
+ return 0;
+}
+
+static const struct sun6i_r_intc_variant sun6i_a31_r_intc_variant __initconst = {
+ .first_mux_irq = 19,
+ .nr_mux_irqs = 13,
+ .mux_valid = { 0xffffffff, 0xfff80000, 0xffffffff, 0x0000000f },
+};
+
+static int __init sun6i_a31_r_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sun6i_r_intc_init(node, parent, &sun6i_a31_r_intc_variant);
+}
+IRQCHIP_DECLARE(sun6i_a31_r_intc, "allwinner,sun6i-a31-r-intc", sun6i_a31_r_intc_init);
+
+static const struct sun6i_r_intc_variant sun50i_h6_r_intc_variant __initconst = {
+ .first_mux_irq = 21,
+ .nr_mux_irqs = 16,
+ .mux_valid = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff },
+};
+
+static int __init sun50i_h6_r_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sun6i_r_intc_init(node, parent, &sun50i_h6_r_intc_variant);
+}
+IRQCHIP_DECLARE(sun50i_h6_r_intc, "allwinner,sun50i-h6-r-intc", sun50i_h6_r_intc_init);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index a412b5d5d0fa..9f2bd0c5d289 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -27,18 +27,12 @@
#define SUNXI_NMI_IRQ_BIT BIT(0)
-#define SUN6I_R_INTC_CTRL 0x0c
-#define SUN6I_R_INTC_PENDING 0x10
-#define SUN6I_R_INTC_ENABLE 0x40
-
/*
* For deprecated sun6i-a31-sc-nmi compatible.
- * Registers are offset by 0x0c.
*/
-#define SUN6I_R_INTC_NMI_OFFSET 0x0c
-#define SUN6I_NMI_CTRL (SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET)
-#define SUN6I_NMI_PENDING (SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET)
-#define SUN6I_NMI_ENABLE (SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET)
+#define SUN6I_NMI_CTRL 0x00
+#define SUN6I_NMI_PENDING 0x04
+#define SUN6I_NMI_ENABLE 0x34
#define SUN7I_NMI_CTRL 0x00
#define SUN7I_NMI_PENDING 0x04
@@ -61,12 +55,6 @@ struct sunxi_sc_nmi_reg_offs {
u32 enable;
};
-static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst = {
- .ctrl = SUN6I_R_INTC_CTRL,
- .pend = SUN6I_R_INTC_PENDING,
- .enable = SUN6I_R_INTC_ENABLE,
-};
-
static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
.ctrl = SUN6I_NMI_CTRL,
.pend = SUN6I_NMI_PENDING,
@@ -232,14 +220,6 @@ fail_irqd_remove:
return ret;
}
-static int __init sun6i_r_intc_irq_init(struct device_node *node,
- struct device_node *parent)
-{
- return sunxi_sc_nmi_irq_init(node, &sun6i_r_intc_reg_offs);
-}
-IRQCHIP_DECLARE(sun6i_r_intc, "allwinner,sun6i-a31-r-intc",
- sun6i_r_intc_irq_init);
-
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
deleted file mode 100644
index 34290f09b853..000000000000
--- a/drivers/irqchip/irq-tango.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/slab.h>
-
-#define IRQ0_CTL_BASE 0x0000
-#define IRQ1_CTL_BASE 0x0100
-#define EDGE_CTL_BASE 0x0200
-#define IRQ2_CTL_BASE 0x0300
-
-#define IRQ_CTL_HI 0x18
-#define EDGE_CTL_HI 0x20
-
-#define IRQ_STATUS 0x00
-#define IRQ_RAWSTAT 0x04
-#define IRQ_EN_SET 0x08
-#define IRQ_EN_CLR 0x0c
-#define IRQ_SOFT_SET 0x10
-#define IRQ_SOFT_CLR 0x14
-
-#define EDGE_STATUS 0x00
-#define EDGE_RAWSTAT 0x04
-#define EDGE_CFG_RISE 0x08
-#define EDGE_CFG_FALL 0x0c
-#define EDGE_CFG_RISE_SET 0x10
-#define EDGE_CFG_RISE_CLR 0x14
-#define EDGE_CFG_FALL_SET 0x18
-#define EDGE_CFG_FALL_CLR 0x1c
-
-struct tangox_irq_chip {
- void __iomem *base;
- unsigned long ctl;
-};
-
-static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
-{
- return readl_relaxed(chip->base + reg);
-}
-
-static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
-{
- writel_relaxed(val, chip->base + reg);
-}
-
-static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
- int base)
-{
- unsigned int hwirq;
- unsigned int virq;
-
- while (status) {
- hwirq = __ffs(status);
- virq = irq_find_mapping(dom, base + hwirq);
- if (virq)
- generic_handle_irq(virq);
- status &= ~BIT(hwirq);
- }
-}
-
-static void tangox_irq_handler(struct irq_desc *desc)
-{
- struct irq_domain *dom = irq_desc_get_handler_data(desc);
- struct irq_chip *host_chip = irq_desc_get_chip(desc);
- struct tangox_irq_chip *chip = dom->host_data;
- unsigned int status_lo, status_hi;
-
- chained_irq_enter(host_chip, desc);
-
- status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
- status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
-
- tangox_dispatch_irqs(dom, status_lo, 0);
- tangox_dispatch_irqs(dom, status_hi, 32);
-
- chained_irq_exit(host_chip, desc);
-}
-
-static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct tangox_irq_chip *chip = gc->domain->host_data;
- struct irq_chip_regs *regs = &gc->chip_types[0].regs;
-
- switch (flow_type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_RISING:
- intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
- intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
- intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
- intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
- intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
- break;
-
- default:
- pr_err("Invalid trigger mode %x for IRQ %d\n",
- flow_type, d->irq);
- return -EINVAL;
- }
-
- return irq_setup_alt_chip(d, flow_type);
-}
-
-static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
- unsigned long ctl_offs,
- unsigned long edge_offs)
-{
- struct tangox_irq_chip *chip = gc->domain->host_data;
- struct irq_chip_type *ct = gc->chip_types;
- unsigned long ctl_base = chip->ctl + ctl_offs;
- unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
- int i;
-
- gc->reg_base = chip->base;
- gc->unused = 0;
-
- for (i = 0; i < 2; i++) {
- ct[i].chip.irq_ack = irq_gc_ack_set_bit;
- ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
- ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
- ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
- ct[i].chip.irq_set_type = tangox_irq_set_type;
- ct[i].chip.name = gc->domain->name;
-
- ct[i].regs.enable = ctl_base + IRQ_EN_SET;
- ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
- ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
- ct[i].regs.type = edge_base;
- }
-
- ct[0].type = IRQ_TYPE_LEVEL_MASK;
- ct[0].handler = handle_level_irq;
-
- ct[1].type = IRQ_TYPE_EDGE_BOTH;
- ct[1].handler = handle_edge_irq;
-
- intc_writel(chip, ct->regs.disable, 0xffffffff);
- intc_writel(chip, ct->regs.ack, 0xffffffff);
-}
-
-static void __init tangox_irq_domain_init(struct irq_domain *dom)
-{
- struct irq_chip_generic *gc;
- int i;
-
- for (i = 0; i < 2; i++) {
- gc = irq_get_domain_generic_chip(dom, i * 32);
- tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
- }
-}
-
-static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
- struct device_node *node)
-{
- struct tangox_irq_chip *chip;
- struct irq_domain *dom;
- struct resource res;
- int irq;
- int err;
-
- irq = irq_of_parse_and_map(node, 0);
- if (!irq)
- panic("%pOFn: failed to get IRQ", node);
-
- err = of_address_to_resource(node, 0, &res);
- if (err)
- panic("%pOFn: failed to get address", node);
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- chip->ctl = res.start - baseres->start;
- chip->base = base;
-
- dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
- if (!dom)
- panic("%pOFn: failed to create irqdomain", node);
-
- err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
- handle_level_irq, 0, 0, 0);
- if (err)
- panic("%pOFn: failed to allocate irqchip", node);
-
- tangox_irq_domain_init(dom);
-
- irq_set_chained_handler_and_data(irq, tangox_irq_handler, dom);
-
- return 0;
-}
-
-static int __init tangox_of_irq_init(struct device_node *node,
- struct device_node *parent)
-{
- struct device_node *c;
- struct resource res;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base)
- panic("%pOFn: of_iomap failed", node);
-
- of_address_to_resource(node, 0, &res);
-
- for_each_child_of_node(node, c)
- tangox_irq_init(base, &res, c);
-
- return 0;
-}
-IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 849d3c5f908e..b6742b4231bf 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -928,7 +928,13 @@ config LEDS_ACER_A500
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+comment "Flash and Torch LED drivers"
+source "drivers/leds/flash/Kconfig"
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
+comment "LED Blink"
+source "drivers/leds/blink/Kconfig"
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 73e603e1727e..2a698df9da57 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -103,5 +103,11 @@ obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
+# Flash and Torch LED Drivers
+obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
+
+# LED Blink
+obj-$(CONFIG_LEDS_BLINK) += blink/
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
new file mode 100644
index 000000000000..265b53476a80
--- /dev/null
+++ b/drivers/leds/blink/Kconfig
@@ -0,0 +1,20 @@
+menuconfig LEDS_BLINK
+ bool "LED Blink support"
+ depends on LEDS_CLASS
+ help
+ This option enables blink support for the leds class.
+ If unsure, say Y.
+
+if LEDS_BLINK
+
+config LEDS_BLINK_LGM
+ tristate "LED support for Intel LGM SoC series"
+ depends on LEDS_CLASS
+ depends on MFD_SYSCON
+ depends on OF
+ help
+ Parallel to serial conversion, which is also called SSO controller,
+ can drive external shift register for LED outputs.
+ This enables LED support for Serial Shift Output controller(SSO).
+
+endif # LEDS_BLINK
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
new file mode 100644
index 000000000000..2fa6c7b7b67e
--- /dev/null
+++ b/drivers/leds/blink/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_BLINK_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
new file mode 100644
index 000000000000..7d5c9ca007d6
--- /dev/null
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lightning Mountain SoC LED Serial Shift Output Controller driver
+ *
+ * Copyright (c) 2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+
+#define SSO_DEV_NAME "lgm-sso"
+
+#define LED_BLINK_H8_0 0x0
+#define LED_BLINK_H8_1 0x4
+#define GET_FREQ_OFFSET(pin, src) (((pin) * 6) + ((src) * 2))
+#define GET_SRC_OFFSET(pinc) (((pin) * 6) + 4)
+
+#define DUTY_CYCLE(x) (0x8 + ((x) * 4))
+#define SSO_CON0 0x2B0
+#define SSO_CON0_RZFL BIT(26)
+#define SSO_CON0_BLINK_R BIT(30)
+#define SSO_CON0_SWU BIT(31)
+
+#define SSO_CON1 0x2B4
+#define SSO_CON1_FCDSC GENMASK(21, 20) /* Fixed Divider Shift Clock */
+#define SSO_CON1_FPID GENMASK(24, 23)
+#define SSO_CON1_GPTD GENMASK(26, 25)
+#define SSO_CON1_US GENMASK(31, 30)
+
+#define SSO_CPU 0x2B8
+#define SSO_CON2 0x2C4
+#define SSO_CON3 0x2C8
+
+/* Driver MACRO */
+#define MAX_PIN_NUM_PER_BANK SZ_32
+#define MAX_GROUP_NUM SZ_4
+#define PINS_PER_GROUP SZ_8
+#define FPID_FREQ_RANK_MAX SZ_4
+#define SSO_LED_MAX_NUM SZ_32
+#define MAX_FREQ_RANK 10
+#define DEF_GPTC_CLK_RATE 200000000
+#define SSO_DEF_BRIGHTNESS LED_HALF
+#define DATA_CLK_EDGE 0 /* 0-rising, 1-falling */
+
+static const u32 freq_div_tbl[] = {4000, 2000, 1000, 800};
+static const int freq_tbl[] = {2, 4, 8, 10, 50000, 100000, 200000, 250000};
+static const int shift_clk_freq_tbl[] = {25000000, 12500000, 6250000, 3125000};
+
+/*
+ * Update Source to update the SOUTs
+ * SW - Software has to update the SWU bit
+ * GPTC - General Purpose timer is used as clock source
+ * FPID - Divided FSC clock (FPID) is used as clock source
+ */
+enum {
+ US_SW = 0,
+ US_GPTC = 1,
+ US_FPID = 2
+};
+
+enum {
+ MAX_FPID_FREQ_RANK = 5, /* 1 to 4 */
+ MAX_GPTC_FREQ_RANK = 9, /* 5 to 8 */
+ MAX_GPTC_HS_FREQ_RANK = 10, /* 9 to 10 */
+};
+
+enum {
+ LED_GRP0_PIN_MAX = 24,
+ LED_GRP1_PIN_MAX = 29,
+ LED_GRP2_PIN_MAX = 32,
+};
+
+enum {
+ LED_GRP0_0_23,
+ LED_GRP1_24_28,
+ LED_GRP2_29_31,
+ LED_GROUP_MAX,
+};
+
+enum {
+ CLK_SRC_FPID = 0,
+ CLK_SRC_GPTC = 1,
+ CLK_SRC_GPTC_HS = 2,
+};
+
+struct sso_led_priv;
+
+struct sso_led_desc {
+ const char *name;
+ const char *default_trigger;
+ unsigned int brightness;
+ unsigned int blink_rate;
+ unsigned int retain_state_suspended:1;
+ unsigned int retain_state_shutdown:1;
+ unsigned int panic_indicator:1;
+ unsigned int hw_blink:1;
+ unsigned int hw_trig:1;
+ unsigned int blinking:1;
+ int freq_idx;
+ u32 pin;
+};
+
+struct sso_led {
+ struct list_head list;
+ struct led_classdev cdev;
+ struct gpio_desc *gpiod;
+ struct sso_led_desc desc;
+ struct sso_led_priv *priv;
+};
+
+struct sso_gpio {
+ struct gpio_chip chip;
+ int shift_clk_freq;
+ int edge;
+ int freq;
+ u32 pins;
+ u32 alloc_bitmap;
+};
+
+struct sso_led_priv {
+ struct regmap *mmap;
+ struct device *dev;
+ struct platform_device *pdev;
+ struct clk *gclk;
+ struct clk *fpid_clk;
+ u32 fpid_clkrate;
+ u32 gptc_clkrate;
+ u32 freq[MAX_FREQ_RANK];
+ struct list_head led_list;
+ struct sso_gpio gpio;
+};
+
+static int sso_get_blink_rate_idx(struct sso_led_priv *priv, u32 rate)
+{
+ int i;
+
+ for (i = 0; i < MAX_FREQ_RANK; i++) {
+ if (rate <= priv->freq[i])
+ return i;
+ }
+
+ return -1;
+}
+
+static unsigned int sso_led_pin_to_group(u32 pin)
+{
+ if (pin < LED_GRP0_PIN_MAX)
+ return LED_GRP0_0_23;
+ else if (pin < LED_GRP1_PIN_MAX)
+ return LED_GRP1_24_28;
+ else
+ return LED_GRP2_29_31;
+}
+
+static u32 sso_led_get_freq_src(int freq_idx)
+{
+ if (freq_idx < MAX_FPID_FREQ_RANK)
+ return CLK_SRC_FPID;
+ else if (freq_idx < MAX_GPTC_FREQ_RANK)
+ return CLK_SRC_GPTC;
+ else
+ return CLK_SRC_GPTC_HS;
+}
+
+static u32 sso_led_pin_blink_off(u32 pin, unsigned int group)
+{
+ if (group == LED_GRP2_29_31)
+ return pin - LED_GRP1_PIN_MAX;
+ else if (group == LED_GRP1_24_28)
+ return pin - LED_GRP0_PIN_MAX;
+ else /* led 0 - 23 in led 32 location */
+ return SSO_LED_MAX_NUM - LED_GRP1_PIN_MAX;
+}
+
+static struct sso_led
+*cdev_to_sso_led_data(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct sso_led, cdev);
+}
+
+static void sso_led_freq_set(struct sso_led_priv *priv, u32 pin, int freq_idx)
+{
+ u32 reg, off, freq_src, val_freq;
+ u32 low, high, val;
+ unsigned int group;
+
+ if (!freq_idx)
+ return;
+
+ group = sso_led_pin_to_group(pin);
+ freq_src = sso_led_get_freq_src(freq_idx);
+ off = sso_led_pin_blink_off(pin, group);
+
+ if (group == LED_GRP0_0_23)
+ return;
+ else if (group == LED_GRP1_24_28)
+ reg = LED_BLINK_H8_0;
+ else
+ reg = LED_BLINK_H8_1;
+
+ if (freq_src == CLK_SRC_FPID)
+ val_freq = freq_idx - 1;
+ else if (freq_src == CLK_SRC_GPTC)
+ val_freq = freq_idx - MAX_FPID_FREQ_RANK;
+
+ /* set blink rate idx */
+ if (freq_src != CLK_SRC_GPTC_HS) {
+ low = GET_FREQ_OFFSET(off, freq_src);
+ high = low + 2;
+ val = val_freq << high;
+ regmap_update_bits(priv->mmap, reg, GENMASK(high, low), val);
+ }
+
+ /* select clock source */
+ low = GET_SRC_OFFSET(off);
+ high = low + 2;
+ val = freq_src << high;
+ regmap_update_bits(priv->mmap, reg, GENMASK(high, low), val);
+}
+
+static void sso_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct sso_led_priv *priv;
+ struct sso_led_desc *desc;
+ struct sso_led *led;
+ int val;
+
+ led = cdev_to_sso_led_data(led_cdev);
+ priv = led->priv;
+ desc = &led->desc;
+
+ desc->brightness = brightness;
+ regmap_write(priv->mmap, DUTY_CYCLE(desc->pin), brightness);
+
+ if (brightness == LED_OFF)
+ val = 0;
+ else
+ val = 1;
+
+ /* HW blink off */
+ if (desc->hw_blink && !val && desc->blinking) {
+ desc->blinking = 0;
+ regmap_update_bits(priv->mmap, SSO_CON2, BIT(desc->pin), 0);
+ } else if (desc->hw_blink && val && !desc->blinking) {
+ desc->blinking = 1;
+ regmap_update_bits(priv->mmap, SSO_CON2, BIT(desc->pin),
+ 1 << desc->pin);
+ }
+
+ if (!desc->hw_trig && led->gpiod)
+ gpiod_set_value(led->gpiod, val);
+}
+
+static enum led_brightness sso_led_brightness_get(struct led_classdev *led_cdev)
+{
+ struct sso_led *led = cdev_to_sso_led_data(led_cdev);
+
+ return (enum led_brightness)led->desc.brightness;
+}
+
+static int
+delay_to_freq_idx(struct sso_led *led, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct sso_led_priv *priv = led->priv;
+ unsigned long delay;
+ int freq_idx;
+ u32 freq;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = *delay_off = (1000 / priv->freq[0]) / 2;
+ return 0;
+ }
+
+ delay = *delay_on + *delay_off;
+ freq = 1000 / delay;
+
+ freq_idx = sso_get_blink_rate_idx(priv, freq);
+ if (freq_idx == -1)
+ freq_idx = MAX_FREQ_RANK - 1;
+
+ delay = 1000 / priv->freq[freq_idx];
+ *delay_on = *delay_off = delay / 2;
+
+ if (!*delay_on)
+ *delay_on = *delay_off = 1;
+
+ return freq_idx;
+}
+
+static int
+sso_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct sso_led_priv *priv;
+ struct sso_led *led;
+ int freq_idx;
+
+ led = cdev_to_sso_led_data(led_cdev);
+ priv = led->priv;
+ freq_idx = delay_to_freq_idx(led, delay_on, delay_off);
+
+ sso_led_freq_set(priv, led->desc.pin, freq_idx);
+ regmap_update_bits(priv->mmap, SSO_CON2, BIT(led->desc.pin),
+ 1 << led->desc.pin);
+ led->desc.freq_idx = freq_idx;
+ led->desc.blink_rate = priv->freq[freq_idx];
+ led->desc.blinking = 1;
+
+ return 1;
+}
+
+static void sso_led_hw_cfg(struct sso_led_priv *priv, struct sso_led *led)
+{
+ struct sso_led_desc *desc = &led->desc;
+
+ /* set freq */
+ if (desc->hw_blink) {
+ sso_led_freq_set(priv, desc->pin, desc->freq_idx);
+ regmap_update_bits(priv->mmap, SSO_CON2, BIT(desc->pin),
+ 1 << desc->pin);
+ }
+
+ if (desc->hw_trig)
+ regmap_update_bits(priv->mmap, SSO_CON3, BIT(desc->pin),
+ 1 << desc->pin);
+
+ /* set brightness */
+ regmap_write(priv->mmap, DUTY_CYCLE(desc->pin), desc->brightness);
+
+ /* enable output */
+ if (!desc->hw_trig && desc->brightness)
+ gpiod_set_value(led->gpiod, 1);
+}
+
+static int sso_create_led(struct sso_led_priv *priv, struct sso_led *led,
+ struct fwnode_handle *child)
+{
+ struct sso_led_desc *desc = &led->desc;
+ struct led_init_data init_data;
+ int err;
+
+ init_data.fwnode = child;
+ init_data.devicename = SSO_DEV_NAME;
+ init_data.default_label = ":";
+
+ led->cdev.default_trigger = desc->default_trigger;
+ led->cdev.brightness_set = sso_led_brightness_set;
+ led->cdev.brightness_get = sso_led_brightness_get;
+ led->cdev.brightness = desc->brightness;
+ led->cdev.max_brightness = LED_FULL;
+
+ if (desc->retain_state_shutdown)
+ led->cdev.flags |= LED_RETAIN_AT_SHUTDOWN;
+ if (desc->retain_state_suspended)
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ if (desc->panic_indicator)
+ led->cdev.flags |= LED_PANIC_INDICATOR;
+
+ if (desc->hw_blink)
+ led->cdev.blink_set = sso_led_blink_set;
+
+ sso_led_hw_cfg(priv, led);
+
+ err = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (err)
+ return err;
+
+ list_add(&led->list, &priv->led_list);
+
+ return 0;
+}
+
+static void sso_init_freq(struct sso_led_priv *priv)
+{
+ int i;
+
+ priv->freq[0] = 0;
+ for (i = 1; i < MAX_FREQ_RANK; i++) {
+ if (i < MAX_FPID_FREQ_RANK) {
+ priv->freq[i] = priv->fpid_clkrate / freq_div_tbl[i - 1];
+ } else if (i < MAX_GPTC_FREQ_RANK) {
+ priv->freq[i] = priv->gptc_clkrate /
+ freq_div_tbl[i - MAX_FPID_FREQ_RANK];
+ } else if (i < MAX_GPTC_HS_FREQ_RANK) {
+ priv->freq[i] = priv->gptc_clkrate;
+ }
+ }
+}
+
+static int sso_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct sso_led_priv *priv = gpiochip_get_data(chip);
+
+ if (priv->gpio.alloc_bitmap & BIT(offset))
+ return -EINVAL;
+
+ priv->gpio.alloc_bitmap |= BIT(offset);
+ regmap_write(priv->mmap, DUTY_CYCLE(offset), 0xFF);
+
+ return 0;
+}
+
+static void sso_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct sso_led_priv *priv = gpiochip_get_data(chip);
+
+ priv->gpio.alloc_bitmap &= ~BIT(offset);
+ regmap_write(priv->mmap, DUTY_CYCLE(offset), 0x0);
+}
+
+static int sso_gpio_get_dir(struct gpio_chip *chip, unsigned int offset)
+{
+ return GPIOF_DIR_OUT;
+}
+
+static int
+sso_gpio_dir_out(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct sso_led_priv *priv = gpiochip_get_data(chip);
+ bool bit = !!value;
+
+ regmap_update_bits(priv->mmap, SSO_CPU, BIT(offset), bit << offset);
+ if (!priv->gpio.freq)
+ regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_SWU,
+ SSO_CON0_SWU);
+
+ return 0;
+}
+
+static int sso_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct sso_led_priv *priv = gpiochip_get_data(chip);
+ u32 reg_val;
+
+ regmap_read(priv->mmap, SSO_CPU, &reg_val);
+
+ return !!(reg_val & BIT(offset));
+}
+
+static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct sso_led_priv *priv = gpiochip_get_data(chip);
+
+ regmap_update_bits(priv->mmap, SSO_CPU, BIT(offset), value << offset);
+ if (!priv->gpio.freq)
+ regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_SWU,
+ SSO_CON0_SWU);
+}
+
+static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
+{
+ struct gpio_chip *gc = &priv->gpio.chip;
+
+ gc->request = sso_gpio_request;
+ gc->free = sso_gpio_free;
+ gc->get_direction = sso_gpio_get_dir;
+ gc->direction_output = sso_gpio_dir_out;
+ gc->get = sso_gpio_get;
+ gc->set = sso_gpio_set;
+
+ gc->label = "lgm-sso";
+ gc->base = -1;
+ /* To exclude pins from control, use "gpio-reserved-ranges" */
+ gc->ngpio = priv->gpio.pins;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->of_node = dev->of_node;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+
+static int sso_gpio_get_freq_idx(int freq)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(freq_tbl); idx++) {
+ if (freq <= freq_tbl[idx])
+ return idx;
+ }
+
+ return -1;
+}
+
+static void sso_register_shift_clk(struct sso_led_priv *priv)
+{
+ int idx, size = ARRAY_SIZE(shift_clk_freq_tbl);
+ u32 val = 0;
+
+ for (idx = 0; idx < size; idx++) {
+ if (shift_clk_freq_tbl[idx] <= priv->gpio.shift_clk_freq) {
+ val = idx;
+ break;
+ }
+ }
+
+ if (idx == size)
+ dev_warn(priv->dev, "%s: Invalid freq %d\n",
+ __func__, priv->gpio.shift_clk_freq);
+
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_FCDSC,
+ FIELD_PREP(SSO_CON1_FCDSC, val));
+}
+
+static int sso_gpio_freq_set(struct sso_led_priv *priv)
+{
+ int freq_idx;
+ u32 val;
+
+ freq_idx = sso_gpio_get_freq_idx(priv->gpio.freq);
+ if (freq_idx == -1)
+ freq_idx = ARRAY_SIZE(freq_tbl) - 1;
+
+ val = freq_idx % FPID_FREQ_RANK_MAX;
+
+ if (!priv->gpio.freq) {
+ regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_BLINK_R, 0);
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_US,
+ FIELD_PREP(SSO_CON1_US, US_SW));
+ } else if (freq_idx < FPID_FREQ_RANK_MAX) {
+ regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_BLINK_R,
+ SSO_CON0_BLINK_R);
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_US,
+ FIELD_PREP(SSO_CON1_US, US_FPID));
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_FPID,
+ FIELD_PREP(SSO_CON1_FPID, val));
+ } else {
+ regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_BLINK_R,
+ SSO_CON0_BLINK_R);
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_US,
+ FIELD_PREP(SSO_CON1_US, US_GPTC));
+ regmap_update_bits(priv->mmap, SSO_CON1, SSO_CON1_GPTD,
+ FIELD_PREP(SSO_CON1_GPTD, val));
+ }
+
+ return 0;
+}
+
+static int sso_gpio_hw_init(struct sso_led_priv *priv)
+{
+ u32 activate;
+ int i, err;
+
+ /* Clear all duty cycles */
+ for (i = 0; i < priv->gpio.pins; i++) {
+ err = regmap_write(priv->mmap, DUTY_CYCLE(i), 0);
+ if (err)
+ return err;
+ }
+
+ /* 4 groups for total 32 pins */
+ for (i = 1; i <= MAX_GROUP_NUM; i++) {
+ activate = !!(i * PINS_PER_GROUP <= priv->gpio.pins ||
+ priv->gpio.pins > (i - 1) * PINS_PER_GROUP);
+ err = regmap_update_bits(priv->mmap, SSO_CON1, BIT(i - 1),
+ activate << (i - 1));
+ if (err)
+ return err;
+ }
+
+ /* NO HW directly controlled pin by default */
+ err = regmap_write(priv->mmap, SSO_CON3, 0);
+ if (err)
+ return err;
+
+ /* NO BLINK for all pins */
+ err = regmap_write(priv->mmap, SSO_CON2, 0);
+ if (err)
+ return err;
+
+ /* OUTPUT 0 by default */
+ err = regmap_write(priv->mmap, SSO_CPU, 0);
+ if (err)
+ return err;
+
+ /* update edge */
+ err = regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_RZFL,
+ FIELD_PREP(SSO_CON0_RZFL, priv->gpio.edge));
+ if (err)
+ return err;
+
+ /* Set GPIO update rate */
+ sso_gpio_freq_set(priv);
+
+ /* Register shift clock */
+ sso_register_shift_clk(priv);
+
+ return 0;
+}
+
+static void sso_led_shutdown(struct sso_led *led)
+{
+ struct sso_led_priv *priv = led->priv;
+
+ /* unregister led */
+ devm_led_classdev_unregister(priv->dev, &led->cdev);
+
+ /* clear HW control bit */
+ if (led->desc.hw_trig)
+ regmap_update_bits(priv->mmap, SSO_CON3, BIT(led->desc.pin), 0);
+
+ if (led->gpiod)
+ devm_gpiod_put(priv->dev, led->gpiod);
+
+ led->priv = NULL;
+}
+
+static int
+__sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
+{
+ struct fwnode_handle *fwnode_child;
+ struct device *dev = priv->dev;
+ struct sso_led_desc *desc;
+ struct sso_led *led;
+ struct list_head *p;
+ const char *tmp;
+ u32 prop;
+ int ret;
+
+ fwnode_for_each_child_node(fw_ssoled, fwnode_child) {
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&led->list);
+ led->priv = priv;
+ desc = &led->desc;
+
+ led->gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL,
+ fwnode_child,
+ GPIOD_ASIS, NULL);
+ if (IS_ERR(led->gpiod)) {
+ dev_err(dev, "led: get gpio fail!\n");
+ goto __dt_err;
+ }
+
+ fwnode_property_read_string(fwnode_child,
+ "linux,default-trigger",
+ &desc->default_trigger);
+
+ if (fwnode_property_present(fwnode_child,
+ "retain-state-suspended"))
+ desc->retain_state_suspended = 1;
+
+ if (fwnode_property_present(fwnode_child,
+ "retain-state-shutdown"))
+ desc->retain_state_shutdown = 1;
+
+ if (fwnode_property_present(fwnode_child, "panic-indicator"))
+ desc->panic_indicator = 1;
+
+ ret = fwnode_property_read_u32(fwnode_child, "reg", &prop);
+ if (ret != 0 || prop >= SSO_LED_MAX_NUM) {
+ dev_err(dev, "invalid LED pin:%u\n", prop);
+ goto __dt_err;
+ }
+ desc->pin = prop;
+
+ if (fwnode_property_present(fwnode_child, "intel,sso-hw-blink"))
+ desc->hw_blink = 1;
+
+ desc->hw_trig = fwnode_property_read_bool(fwnode_child,
+ "intel,sso-hw-trigger");
+ if (desc->hw_trig) {
+ desc->default_trigger = NULL;
+ desc->retain_state_shutdown = 0;
+ desc->retain_state_suspended = 0;
+ desc->panic_indicator = 0;
+ desc->hw_blink = 0;
+ }
+
+ if (fwnode_property_read_u32(fwnode_child,
+ "intel,sso-blink-rate-hz", &prop)) {
+ /* default first freq rate */
+ desc->freq_idx = 0;
+ desc->blink_rate = priv->freq[desc->freq_idx];
+ } else {
+ desc->freq_idx = sso_get_blink_rate_idx(priv, prop);
+ if (desc->freq_idx == -1)
+ desc->freq_idx = MAX_FREQ_RANK - 1;
+
+ desc->blink_rate = priv->freq[desc->freq_idx];
+ }
+
+ if (!fwnode_property_read_string(fwnode_child, "default-state", &tmp)) {
+ if (!strcmp(tmp, "on"))
+ desc->brightness = LED_FULL;
+ }
+
+ if (sso_create_led(priv, led, fwnode_child))
+ goto __dt_err;
+ }
+ fwnode_handle_put(fw_ssoled);
+
+ return 0;
+__dt_err:
+ fwnode_handle_put(fw_ssoled);
+ /* unregister leds */
+ list_for_each(p, &priv->led_list) {
+ led = list_entry(p, struct sso_led, list);
+ sso_led_shutdown(led);
+ }
+
+ return -EINVAL;
+}
+
+static int sso_led_dt_parse(struct sso_led_priv *priv)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(priv->dev);
+ struct fwnode_handle *fw_ssoled;
+ struct device *dev = priv->dev;
+ int count;
+ int ret;
+
+ count = device_get_child_node_count(dev);
+ if (!count)
+ return 0;
+
+ fw_ssoled = fwnode_get_named_child_node(fwnode, "ssoled");
+ if (fw_ssoled) {
+ ret = __sso_led_dt_parse(priv, fw_ssoled);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sso_probe_gpios(struct sso_led_priv *priv)
+{
+ struct device *dev = priv->dev;
+ int ret;
+
+ if (device_property_read_u32(dev, "ngpios", &priv->gpio.pins))
+ priv->gpio.pins = MAX_PIN_NUM_PER_BANK;
+
+ if (priv->gpio.pins > MAX_PIN_NUM_PER_BANK)
+ return -EINVAL;
+
+ if (device_property_read_u32(dev, "intel,sso-update-rate-hz",
+ &priv->gpio.freq))
+ priv->gpio.freq = 0;
+
+ priv->gpio.edge = DATA_CLK_EDGE;
+ priv->gpio.shift_clk_freq = -1;
+
+ ret = sso_gpio_hw_init(priv);
+ if (ret)
+ return ret;
+
+ return sso_gpio_gc_init(dev, priv);
+}
+
+static void sso_clk_disable(void *data)
+{
+ struct sso_led_priv *priv = data;
+
+ clk_disable_unprepare(priv->fpid_clk);
+ clk_disable_unprepare(priv->gclk);
+}
+
+static int intel_sso_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sso_led_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ priv->dev = dev;
+
+ /* gate clock */
+ priv->gclk = devm_clk_get(dev, "sso");
+ if (IS_ERR(priv->gclk)) {
+ dev_err(dev, "get sso gate clock failed!\n");
+ return PTR_ERR(priv->gclk);
+ }
+
+ ret = clk_prepare_enable(priv->gclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepate/enable sso gate clock!\n");
+ return ret;
+ }
+
+ priv->fpid_clk = devm_clk_get(dev, "fpid");
+ if (IS_ERR(priv->fpid_clk)) {
+ dev_err(dev, "Failed to get fpid clock!\n");
+ return PTR_ERR(priv->fpid_clk);
+ }
+
+ ret = clk_prepare_enable(priv->fpid_clk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare/enable fpid clock!\n");
+ return ret;
+ }
+ priv->fpid_clkrate = clk_get_rate(priv->fpid_clk);
+
+ ret = devm_add_action_or_reset(dev, sso_clk_disable, priv);
+ if (ret) {
+ dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
+ return ret;
+ }
+
+ priv->mmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(priv->mmap)) {
+ dev_err(dev, "Failed to map iomem!\n");
+ return PTR_ERR(priv->mmap);
+ }
+
+ ret = sso_probe_gpios(priv);
+ if (ret) {
+ regmap_exit(priv->mmap);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&priv->led_list);
+
+ platform_set_drvdata(pdev, priv);
+ sso_init_freq(priv);
+
+ priv->gptc_clkrate = DEF_GPTC_CLK_RATE;
+
+ ret = sso_led_dt_parse(priv);
+ if (ret) {
+ regmap_exit(priv->mmap);
+ return ret;
+ }
+ dev_info(priv->dev, "sso LED init success!\n");
+
+ return 0;
+}
+
+static int intel_sso_led_remove(struct platform_device *pdev)
+{
+ struct sso_led_priv *priv;
+ struct list_head *pos, *n;
+ struct sso_led *led;
+
+ priv = platform_get_drvdata(pdev);
+
+ list_for_each_safe(pos, n, &priv->led_list) {
+ list_del(pos);
+ led = list_entry(pos, struct sso_led, list);
+ sso_led_shutdown(led);
+ }
+
+ clk_disable_unprepare(priv->fpid_clk);
+ clk_disable_unprepare(priv->gclk);
+ regmap_exit(priv->mmap);
+
+ return 0;
+}
+
+static const struct of_device_id of_sso_led_match[] = {
+ { .compatible = "intel,lgm-ssoled" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, of_sso_led_match);
+
+static struct platform_driver intel_sso_led_driver = {
+ .probe = intel_sso_led_probe,
+ .remove = intel_sso_led_remove,
+ .driver = {
+ .name = "lgm-ssoled",
+ .of_match_table = of_match_ptr(of_sso_led_match),
+ },
+};
+
+module_platform_driver(intel_sso_led_driver);
+
+MODULE_DESCRIPTION("Intel SSO LED/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
new file mode 100644
index 000000000000..b580b416b9a4
--- /dev/null
+++ b/drivers/leds/flash/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_FLASH
+
+config LEDS_RT8515
+ tristate "LED support for Richtek RT8515 flash/torch LED"
+ depends on GPIOLIB
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ help
+ This option enables support for the Richtek RT8515 flash
+ and torch LEDs found on some mobile phones.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-rt8515.
+
+endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
new file mode 100644
index 000000000000..e990e257f4d7
--- /dev/null
+++ b/drivers/leds/flash/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
new file mode 100644
index 000000000000..590bfa180d10
--- /dev/null
+++ b/drivers/leds/flash/leds-rt8515.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LED driver for Richtek RT8515 flash/torch white LEDs
+ * found on some Samsung mobile phones.
+ *
+ * This is a 1.5A Boost dual channel driver produced around 2011.
+ *
+ * The component lacks a datasheet, but in the schematic picture
+ * from the LG P970 service manual you can see the connections
+ * from the RT8515 to the LED, with two resistors connected
+ * from the pins "RFS" and "RTS" to ground.
+ *
+ * On the LG P970:
+ * RFS (resistance flash setting?) is 20 kOhm
+ * RTS (resistance torch setting?) is 39 kOhm
+ *
+ * Some sleuthing finds us the RT9387A which we have a datasheet for:
+ * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
+ * This apparently works the same way so in theory this driver
+ * should cover RT9387A as well. This has not been tested, please
+ * update the compatibles if you add RT9387A support.
+ *
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+/* We can provide 15-700 mA out to the LED */
+#define RT8515_MIN_IOUT_MA 15
+#define RT8515_MAX_IOUT_MA 700
+/* The maximum intensity is 1-16 for flash and 1-100 for torch */
+#define RT8515_FLASH_MAX 16
+#define RT8515_TORCH_MAX 100
+
+#define RT8515_TIMEOUT_US 250000U
+#define RT8515_MAX_TIMEOUT_US 300000U
+
+struct rt8515 {
+ struct led_classdev_flash fled;
+ struct device *dev;
+ struct v4l2_flash *v4l2_flash;
+ struct mutex lock;
+ struct regulator *reg;
+ struct gpio_desc *enable_torch;
+ struct gpio_desc *enable_flash;
+ struct timer_list powerdown_timer;
+ u32 max_timeout; /* Flash max timeout */
+ int flash_max_intensity;
+ int torch_max_intensity;
+};
+
+static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
+{
+ return container_of(fled, struct rt8515, fled);
+}
+
+static void rt8515_gpio_led_off(struct rt8515 *rt)
+{
+ gpiod_set_value(rt->enable_flash, 0);
+ gpiod_set_value(rt->enable_torch, 0);
+}
+
+static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
+ int brightness)
+{
+ int i;
+
+ /*
+ * Toggling a GPIO line with a small delay increases the
+ * brightness one step at a time.
+ */
+ for (i = 0; i < brightness; i++) {
+ gpiod_set_value(gpiod, 0);
+ udelay(1);
+ gpiod_set_value(gpiod, 1);
+ udelay(1);
+ }
+}
+
+/* This is setting the torch light level */
+static int rt8515_led_brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled = lcdev_to_flcdev(led);
+ struct rt8515 *rt = to_rt8515(fled);
+
+ mutex_lock(&rt->lock);
+
+ if (brightness == LED_OFF) {
+ /* Off */
+ rt8515_gpio_led_off(rt);
+ } else if (brightness < RT8515_TORCH_MAX) {
+ /* Step it up to movie mode brightness using the flash pin */
+ rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
+ } else {
+ /* Max torch brightness requested */
+ gpiod_set_value(rt->enable_torch, 1);
+ }
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
+ bool state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+ struct led_flash_setting *timeout = &fled->timeout;
+ int brightness = rt->flash_max_intensity;
+
+ mutex_lock(&rt->lock);
+
+ if (state) {
+ /* Enable LED flash mode and set brightness */
+ rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
+ /* Set timeout */
+ mod_timer(&rt->powerdown_timer,
+ jiffies + usecs_to_jiffies(timeout->val));
+ } else {
+ del_timer_sync(&rt->powerdown_timer);
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+ }
+
+ fled->led_cdev.brightness = LED_OFF;
+ /* After this the torch LED will be disabled */
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
+ bool *state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+
+ *state = timer_pending(&rt->powerdown_timer);
+
+ return 0;
+}
+
+static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
+ u32 timeout)
+{
+ /* The timeout is stored in the led-class-flash core */
+ return 0;
+}
+
+static const struct led_flash_ops rt8515_flash_ops = {
+ .strobe_set = rt8515_led_flash_strobe_set,
+ .strobe_get = rt8515_led_flash_strobe_get,
+ .timeout_set = rt8515_led_flash_timeout_set,
+};
+
+static void rt8515_powerdown_timer(struct timer_list *t)
+{
+ struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+}
+
+static void rt8515_init_flash_timeout(struct rt8515 *rt)
+{
+ struct led_classdev_flash *fled = &rt->fled;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled->timeout;
+ s->min = 1;
+ s->max = rt->max_timeout;
+ s->step = 1;
+ /*
+ * Set default timeout to RT8515_TIMEOUT_US except if
+ * max_timeout from DT is lower.
+ */
+ s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/* Configure the V2L2 flash subdevice */
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led = &rt->fled.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /*
+ * Init flash intensity setting: this is a linear scale
+ * capped from the device tree max intensity setting
+ * 1..flash_max_intensity
+ */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 1;
+ s->max = rt->flash_max_intensity;
+ s->step = 1;
+ s->val = s->max;
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+ v4l2_flash_release(rt->v4l2_flash);
+}
+
+#else
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+}
+#endif
+
+static void rt8515_determine_max_intensity(struct rt8515 *rt,
+ struct fwnode_handle *led,
+ const char *resistance,
+ const char *max_ua_prop, int hw_max,
+ int *max_intensity_setting)
+{
+ u32 res = 0; /* Can't be 0 so 0 is undefined */
+ u32 ua;
+ u32 max_ma;
+ int max_intensity;
+ int ret;
+
+ fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
+ ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
+
+ /* Missing info in DT, OK go with hardware maxima */
+ if (ret || res == 0) {
+ dev_err(rt->dev,
+ "either %s or %s missing from DT, using HW max\n",
+ resistance, max_ua_prop);
+ max_ma = RT8515_MAX_IOUT_MA;
+ max_intensity = hw_max;
+ goto out_assign_max;
+ }
+
+ /*
+ * Formula from the datasheet, this is the maximum current
+ * defined by the hardware.
+ */
+ max_ma = (5500 * 1000) / res;
+ /*
+ * Calculate max intensity (linear scaling)
+ * Formula is ((ua / 1000) / max_ma) * 100, then simplified
+ */
+ max_intensity = (ua / 10) / max_ma;
+
+ dev_info(rt->dev,
+ "current restricted from %u to %u mA, max intensity %d/100\n",
+ max_ma, (ua / 1000), max_intensity);
+
+out_assign_max:
+ dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
+ max_intensity, hw_max, max_ma);
+ *max_intensity_setting = max_intensity;
+}
+
+static int rt8515_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct rt8515 *rt;
+ struct led_classdev *led;
+ struct led_classdev_flash *fled;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+
+ rt->dev = dev;
+ fled = &rt->fled;
+ led = &fled->led_cdev;
+
+ /* ENF - Enable Flash line */
+ rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_flash))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
+ "cannot get ENF (enable flash) GPIO\n");
+
+ /* ENT - Enable Torch line */
+ rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_torch))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
+ "cannot get ENT (enable torch) GPIO\n");
+
+ child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!child) {
+ dev_err(dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
+ "flash-max-microamp",
+ RT8515_FLASH_MAX,
+ &rt->flash_max_intensity);
+ rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
+ "led-max-microamp",
+ RT8515_TORCH_MAX,
+ &rt->torch_max_intensity);
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
+ &rt->max_timeout);
+ if (ret) {
+ rt->max_timeout = RT8515_MAX_TIMEOUT_US;
+ dev_warn(dev,
+ "flash-max-timeout-us property missing\n");
+ }
+ timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
+ rt8515_init_flash_timeout(rt);
+
+ fled->ops = &rt8515_flash_ops;
+
+ led->max_brightness = rt->torch_max_intensity;
+ led->brightness_set_blocking = rt8515_led_brightness_set;
+ led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+ mutex_init(&rt->lock);
+
+ platform_set_drvdata(pdev, rt);
+
+ ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
+ if (ret) {
+ dev_err(dev, "can't register LED %s\n", led->name);
+ mutex_destroy(&rt->lock);
+ return ret;
+ }
+
+ rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
+
+ /* Create a V4L2 Flash device if V4L2 flash is enabled */
+ rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
+ if (IS_ERR(rt->v4l2_flash)) {
+ ret = PTR_ERR(rt->v4l2_flash);
+ dev_err(dev, "failed to register V4L2 flash device (%d)\n",
+ ret);
+ /*
+ * Continue without the V4L2 flash
+ * (we still have the classdev)
+ */
+ }
+
+ return 0;
+}
+
+static int rt8515_remove(struct platform_device *pdev)
+{
+ struct rt8515 *rt = platform_get_drvdata(pdev);
+
+ rt8515_v4l2_flash_release(rt);
+ del_timer_sync(&rt->powerdown_timer);
+ mutex_destroy(&rt->lock);
+
+ return 0;
+}
+
+static const struct of_device_id rt8515_match[] = {
+ { .compatible = "richtek,rt8515", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt8515_match);
+
+static struct platform_driver rt8515_driver = {
+ .driver = {
+ .name = "rt8515",
+ .of_match_table = rt8515_match,
+ },
+ .probe = rt8515_probe,
+ .remove = rt8515_remove,
+};
+module_platform_driver(rt8515_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Richtek RT8515 LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 131ca83f5fb3..2e495ff67856 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -145,8 +145,7 @@ static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
}
-void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev, unsigned int brightness)
{
if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
return;
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index c4e780bdb385..8eb8054ef9c6 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -39,8 +39,7 @@ const char * const led_colors[LED_COLOR_ID_MAX] = {
};
EXPORT_SYMBOL_GPL(led_colors);
-static int __led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value)
{
if (!led_cdev->brightness_set)
return -ENOTSUPP;
@@ -50,8 +49,7 @@ static int __led_set_brightness(struct led_classdev *led_cdev,
return 0;
}
-static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned int value)
{
if (!led_cdev->brightness_set_blocking)
return -ENOTSUPP;
@@ -240,8 +238,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
-void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness)
{
/*
* If software blink is active, delay brightness setting
@@ -253,7 +250,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
* work queue task to avoid problems in case we are called
* from hard irq context.
*/
- if (brightness == LED_OFF) {
+ if (!brightness) {
set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags);
schedule_work(&led_cdev->set_brightness_work);
} else {
@@ -268,8 +265,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
}
EXPORT_SYMBOL_GPL(led_set_brightness);
-void led_set_brightness_nopm(struct led_classdev *led_cdev,
- enum led_brightness value)
+void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value)
{
/* Use brightness_set op if available, it is guaranteed not to sleep */
if (!__led_set_brightness(led_cdev, value))
@@ -281,8 +277,7 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev,
}
EXPORT_SYMBOL_GPL(led_set_brightness_nopm);
-void led_set_brightness_nosleep(struct led_classdev *led_cdev,
- enum led_brightness value)
+void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value)
{
led_cdev->brightness = min(value, led_cdev->max_brightness);
@@ -293,8 +288,7 @@ void led_set_brightness_nosleep(struct led_classdev *led_cdev,
}
EXPORT_SYMBOL_GPL(led_set_brightness_nosleep);
-int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value)
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value)
{
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off)
return -EBUSY;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 91da90cfb11d..4e7b78a84149 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -378,14 +378,15 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -396,11 +397,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@@ -408,7 +410,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
index 7fd557aceff6..c409b80c236d 100644
--- a/drivers/leds/leds-apu.c
+++ b/drivers/leds/leds-apu.c
@@ -83,6 +83,7 @@ static const struct apu_led_profile apu1_led_profile[] = {
};
static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
+ /* PC Engines APU with factory bios "SageBios_PCEngines_APU-45" */
{
.ident = "apu",
.matches = {
@@ -90,6 +91,14 @@ static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "APU")
}
},
+ /* PC Engines APU with "Mainline" bios >= 4.6.8 */
+ {
+ .ident = "apu",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "apu1")
+ }
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
@@ -173,7 +182,7 @@ static int __init apu_led_init(void)
int err;
if (!(dmi_match(DMI_SYS_VENDOR, "PC Engines") &&
- dmi_match(DMI_PRODUCT_NAME, "APU"))) {
+ (dmi_match(DMI_PRODUCT_NAME, "APU") || dmi_match(DMI_PRODUCT_NAME, "apu1")))) {
pr_err("No PC Engines APUv1 board detected. For APUv2,3 support, enable CONFIG_PCENGINES_APU2\n");
return -ENODEV;
}
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
index bb68ba23a7d4..49e1bddaa15e 100644
--- a/drivers/leds/leds-ariel.c
+++ b/drivers/leds/leds-ariel.c
@@ -96,14 +96,14 @@ static int ariel_led_probe(struct platform_device *pdev)
return -ENOMEM;
leds[0].ec_index = EC_BLUE_LED;
- leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.name = "blue:power";
leds[0].led_cdev.default_trigger = "default-on";
leds[1].ec_index = EC_AMBER_LED;
- leds[1].led_cdev.name = "amber:status",
+ leds[1].led_cdev.name = "amber:status";
leds[2].ec_index = EC_GREEN_LED;
- leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.name = "green:status";
leds[2].led_cdev.default_trigger = "default-on";
for (i = 0; i < NLEDS; i++) {
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index e11fe1788242..b4e1fdff4186 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -192,13 +192,13 @@ static int store_color_common(struct device *dev, const char *buf, int color)
return 0;
}
-static ssize_t show_red(struct device *dev, struct device_attribute *attr,
+static ssize_t red_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, RED);
}
-static ssize_t store_red(struct device *dev, struct device_attribute *attr,
+static ssize_t red_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -209,15 +209,15 @@ static ssize_t store_red(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(red, S_IRUGO | S_IWUSR, show_red, store_red);
+static DEVICE_ATTR_RW(red);
-static ssize_t show_green(struct device *dev, struct device_attribute *attr,
+static ssize_t green_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, GREEN);
}
-static ssize_t store_green(struct device *dev, struct device_attribute *attr,
+static ssize_t green_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -229,15 +229,15 @@ static ssize_t store_green(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(green, S_IRUGO | S_IWUSR, show_green, store_green);
+static DEVICE_ATTR_RW(green);
-static ssize_t show_blue(struct device *dev, struct device_attribute *attr,
+static ssize_t blue_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, BLUE);
}
-static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
+static ssize_t blue_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -248,16 +248,16 @@ static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(blue, S_IRUGO | S_IWUSR, show_blue, store_blue);
+static DEVICE_ATTR_RW(blue);
-static ssize_t show_test(struct device *dev, struct device_attribute *attr,
+static ssize_t test_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE,
"#Write into test to start test sequence!#\n");
}
-static ssize_t store_test(struct device *dev, struct device_attribute *attr,
+static ssize_t test_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -273,7 +273,7 @@ static ssize_t store_test(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(test, S_IRUGO | S_IWUSR, show_test, store_test);
+static DEVICE_ATTR_RW(test);
/* TODO: HSB, fade, timeadj, script ... */
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 93f5b1b60fde..b5d5e22d2d1e 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -96,7 +96,8 @@ static int create_gpio_led(const struct gpio_led *template,
} else {
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
}
- led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+ led_dat->cdev.brightness = state;
+ led_dat->cdev.max_brightness = 1;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
if (template->panic_indicator)
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 2f8362f6bf75..2db455efd4b1 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -346,8 +346,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
}
}
-static ssize_t lm3530_mode_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
@@ -365,8 +365,8 @@ static ssize_t lm3530_mode_get(struct device *dev,
return len;
}
-static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t size)
+static ssize_t mode_store(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
@@ -397,7 +397,7 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
return sizeof(drvdata->mode);
}
-static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
+static DEVICE_ATTR_RW(mode);
static struct attribute *lm3530_attrs[] = {
&dev_attr_mode.attr,
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index b3edee703193..43d5970d96aa 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -608,7 +608,7 @@ static struct attribute *lm3533_led_attributes[] = {
static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3533_led *led = to_lm3533_led(led_cdev);
umode_t mode = attr->mode;
@@ -679,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
led->cdev.brightness_get = lm3533_led_get;
led->cdev.blink_set = lm3533_led_blink_set;
led->cdev.brightness = LED_OFF;
- led->cdev.groups = lm3533_led_attribute_groups,
+ led->cdev.groups = lm3533_led_attribute_groups;
led->id = pdev->id;
mutex_init(&led->mutex);
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 1505521249b5..2d3e11845ba5 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -349,9 +349,9 @@ static int lm355x_indicator_brightness_set(struct led_classdev *cdev,
}
/* indicator pattern only for lm3556*/
-static ssize_t lm3556_indicator_pattern_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t pattern_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
ssize_t ret;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -381,7 +381,7 @@ out:
return ret;
}
-static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
+static DEVICE_ATTR_WO(pattern);
static struct attribute *lm355x_indicator_attrs[] = {
&dev_attr_pattern.attr,
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 62c14872caf7..8007b82985a8 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -165,9 +165,9 @@ static int lm3642_control(struct lm3642_chip_data *chip,
/* torch */
/* torch pin config for lm3642 */
-static ssize_t lm3642_torch_pin_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t torch_pin_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
ssize_t ret;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -193,7 +193,7 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
+static DEVICE_ATTR_WO(torch_pin);
static int lm3642_torch_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
@@ -212,9 +212,9 @@ static int lm3642_torch_brightness_set(struct led_classdev *cdev,
/* flash */
/* strobe pin config for lm3642*/
-static ssize_t lm3642_strobe_pin_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t strobe_pin_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
ssize_t ret;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -240,7 +240,7 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
+static DEVICE_ATTR_WO(strobe_pin);
static int lm3642_strobe_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index f13117eed976..06230614fdc5 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -6,10 +6,9 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -322,7 +321,7 @@ static int lp50xx_brightness_set(struct led_classdev *cdev,
ret = regmap_write(led->priv->regmap, reg_val, brightness);
if (ret) {
- dev_err(&led->priv->client->dev,
+ dev_err(led->priv->dev,
"Cannot write brightness value %d\n", ret);
goto out;
}
@@ -338,7 +337,7 @@ static int lp50xx_brightness_set(struct led_classdev *cdev,
ret = regmap_write(led->priv->regmap, reg_val,
mc_dev->subled_info[i].intensity);
if (ret) {
- dev_err(&led->priv->client->dev,
+ dev_err(led->priv->dev,
"Cannot write intensity value %d\n", ret);
goto out;
}
@@ -360,8 +359,8 @@ static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[])
bank_enable_mask |= (1 << led_banks[i]);
}
- led_config_lo = (u8)(bank_enable_mask & 0xff);
- led_config_hi = (u8)(bank_enable_mask >> 8) & 0xff;
+ led_config_lo = bank_enable_mask;
+ led_config_hi = bank_enable_mask >> 8;
ret = regmap_write(priv->regmap, LP50XX_LED_CFG0, led_config_lo);
if (ret)
@@ -382,11 +381,9 @@ static int lp50xx_enable_disable(struct lp50xx *priv, int enable_disable)
{
int ret;
- if (priv->enable_gpio) {
- ret = gpiod_direction_output(priv->enable_gpio, enable_disable);
- if (ret)
- return ret;
- }
+ ret = gpiod_direction_output(priv->enable_gpio, enable_disable);
+ if (ret)
+ return ret;
if (enable_disable)
return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN);
@@ -404,7 +401,7 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
if (num_leds > 1) {
if (num_leds > priv->chip_info->max_modules) {
- dev_err(&priv->client->dev, "reg property is invalid\n");
+ dev_err(priv->dev, "reg property is invalid\n");
return -EINVAL;
}
@@ -412,13 +409,13 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
ret = fwnode_property_read_u32_array(child, "reg", led_banks, num_leds);
if (ret) {
- dev_err(&priv->client->dev, "reg property is missing\n");
+ dev_err(priv->dev, "reg property is missing\n");
return ret;
}
ret = lp50xx_set_banks(priv, led_banks);
if (ret) {
- dev_err(&priv->client->dev, "Cannot setup banked LEDs\n");
+ dev_err(priv->dev, "Cannot setup banked LEDs\n");
return ret;
}
@@ -426,12 +423,12 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
} else {
ret = fwnode_property_read_u32(child, "reg", &led_number);
if (ret) {
- dev_err(&priv->client->dev, "led reg property missing\n");
+ dev_err(priv->dev, "led reg property missing\n");
return ret;
}
if (led_number > priv->chip_info->num_leds) {
- dev_err(&priv->client->dev, "led-sources property is invalid\n");
+ dev_err(priv->dev, "led-sources property is invalid\n");
return -EINVAL;
}
@@ -455,12 +452,9 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
int i = 0;
priv->enable_gpio = devm_gpiod_get_optional(priv->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(priv->enable_gpio)) {
- ret = PTR_ERR(priv->enable_gpio);
- dev_err(&priv->client->dev, "Failed to get enable gpio: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(priv->enable_gpio))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->enable_gpio),
+ "Failed to get enable GPIO\n");
priv->regulator = devm_regulator_get(priv->dev, "vled");
if (IS_ERR(priv->regulator))
@@ -470,7 +464,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
led = &priv->leds[i];
ret = fwnode_property_count_u32(child, "reg");
if (ret < 0) {
- dev_err(&priv->client->dev, "reg property is invalid\n");
+ dev_err(priv->dev, "reg property is invalid\n");
goto child_out;
}
@@ -510,12 +504,11 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
led_cdev = &led->mc_cdev.led_cdev;
led_cdev->brightness_set_blocking = lp50xx_brightness_set;
- ret = devm_led_classdev_multicolor_register_ext(&priv->client->dev,
+ ret = devm_led_classdev_multicolor_register_ext(priv->dev,
&led->mc_cdev,
&init_data);
if (ret) {
- dev_err(&priv->client->dev, "led register err: %d\n",
- ret);
+ dev_err(priv->dev, "led register err: %d\n", ret);
goto child_out;
}
i++;
@@ -529,8 +522,7 @@ child_out:
return ret;
}
-static int lp50xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp50xx_probe(struct i2c_client *client)
{
struct lp50xx *led;
int count;
@@ -550,7 +542,7 @@ static int lp50xx_probe(struct i2c_client *client,
mutex_init(&led->lock);
led->client = client;
led->dev = &client->dev;
- led->chip_info = &lp50xx_chip_info_tbl[id->driver_data];
+ led->chip_info = device_get_match_data(&client->dev);
i2c_set_clientdata(client, led);
led->regmap = devm_regmap_init_i2c(client,
led->chip_info->lp50xx_regmap_config);
@@ -579,15 +571,14 @@ static int lp50xx_remove(struct i2c_client *client)
ret = lp50xx_enable_disable(led, 0);
if (ret) {
- dev_err(&led->client->dev, "Failed to disable chip\n");
+ dev_err(led->dev, "Failed to disable chip\n");
return ret;
}
if (led->regulator) {
ret = regulator_disable(led->regulator);
if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ dev_err(led->dev, "Failed to disable regulator\n");
}
mutex_destroy(&led->lock);
@@ -596,24 +587,24 @@ static int lp50xx_remove(struct i2c_client *client)
}
static const struct i2c_device_id lp50xx_id[] = {
- { "lp5009", LP5009 },
- { "lp5012", LP5012 },
- { "lp5018", LP5018 },
- { "lp5024", LP5024 },
- { "lp5030", LP5030 },
- { "lp5036", LP5036 },
+ { "lp5009", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5009] },
+ { "lp5012", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5012] },
+ { "lp5018", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5018] },
+ { "lp5024", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5024] },
+ { "lp5030", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5030] },
+ { "lp5036", (kernel_ulong_t)&lp50xx_chip_info_tbl[LP5036] },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp50xx_id);
static const struct of_device_id of_lp50xx_leds_match[] = {
- { .compatible = "ti,lp5009", .data = (void *)LP5009 },
- { .compatible = "ti,lp5012", .data = (void *)LP5012 },
- { .compatible = "ti,lp5018", .data = (void *)LP5018 },
- { .compatible = "ti,lp5024", .data = (void *)LP5024 },
- { .compatible = "ti,lp5030", .data = (void *)LP5030 },
- { .compatible = "ti,lp5036", .data = (void *)LP5036 },
- {},
+ { .compatible = "ti,lp5009", .data = &lp50xx_chip_info_tbl[LP5009] },
+ { .compatible = "ti,lp5012", .data = &lp50xx_chip_info_tbl[LP5012] },
+ { .compatible = "ti,lp5018", .data = &lp50xx_chip_info_tbl[LP5018] },
+ { .compatible = "ti,lp5024", .data = &lp50xx_chip_info_tbl[LP5024] },
+ { .compatible = "ti,lp5030", .data = &lp50xx_chip_info_tbl[LP5030] },
+ { .compatible = "ti,lp5036", .data = &lp50xx_chip_info_tbl[LP5036] },
+ {}
};
MODULE_DEVICE_TABLE(of, of_lp50xx_leds_match);
@@ -622,7 +613,7 @@ static struct i2c_driver lp50xx_driver = {
.name = "lp50xx",
.of_match_table = of_lp50xx_leds_match,
},
- .probe = lp50xx_probe,
+ .probe_new = lp50xx_probe,
.remove = lp50xx_remove,
.id_table = lp50xx_id,
};
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 512a11d142d0..c0bddb33888d 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -160,8 +160,8 @@ static void max8997_led_brightness_set(struct led_classdev *led_cdev,
}
}
-static ssize_t max8997_led_show_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
@@ -193,9 +193,9 @@ static ssize_t max8997_led_show_mode(struct device *dev,
return ret;
}
-static ssize_t max8997_led_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
@@ -222,7 +222,7 @@ static ssize_t max8997_led_store_mode(struct device *dev,
return size;
}
-static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
+static DEVICE_ATTR_RW(mode);
static struct attribute *max8997_attrs[] = {
&dev_attr_mode.attr,
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 68fbf0b66fad..77213b79f84d 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -204,9 +204,9 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
spin_unlock_irqrestore(&led_dat->lock, flags);
}
-static ssize_t netxbig_led_sata_store(struct device *dev,
- struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t sata_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct netxbig_led_data *led_dat =
@@ -255,8 +255,8 @@ exit_unlock:
return ret;
}
-static ssize_t netxbig_led_sata_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t sata_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct netxbig_led_data *led_dat =
@@ -265,7 +265,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
return sprintf(buf, "%d\n", led_dat->sata);
}
-static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
+static DEVICE_ATTR_RW(sata);
static struct attribute *netxbig_led_attrs[] = {
&dev_attr_sata.attr,
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 245de443fe9c..fcaa34706b6c 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -441,8 +441,8 @@ static void set_power_light_amber_noblink(void)
nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
}
-static ssize_t nas_led_blink_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t blink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct led_classdev *led = dev_get_drvdata(dev);
int blinking = 0;
@@ -451,9 +451,9 @@ static ssize_t nas_led_blink_show(struct device *dev,
return sprintf(buf, "%u\n", blinking);
}
-static ssize_t nas_led_blink_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
int ret;
struct led_classdev *led = dev_get_drvdata(dev);
@@ -468,7 +468,7 @@ static ssize_t nas_led_blink_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
+static DEVICE_ATTR_RW(blink);
static struct attribute *nasgpio_led_attrs[] = {
&dev_attr_blink.attr,
@@ -478,7 +478,6 @@ ATTRIBUTE_GROUPS(nasgpio_led);
static int register_nasgpio_led(int led_nr)
{
- int ret;
struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
struct led_classdev *led = get_classdev_for_led_nr(led_nr);
@@ -489,11 +488,8 @@ static int register_nasgpio_led(int led_nr)
led->brightness_set = nasgpio_led_set_brightness;
led->blink_set = nasgpio_led_set_blink;
led->groups = nasgpio_led_groups;
- ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
- if (ret)
- return ret;
- return 0;
+ return led_classdev_register(&nas_gpio_pci_dev->dev, led);
}
static void unregister_nasgpio_led(int led_nr)
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 67f4235cb28a..c48b80574f02 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -155,8 +155,8 @@ static const char * const led_src_texts[] = {
"soft",
};
-static ssize_t wm831x_status_src_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t src_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct wm831x_status *led = to_wm831x_status(led_cdev);
@@ -178,9 +178,9 @@ static ssize_t wm831x_status_src_show(struct device *dev,
return ret;
}
-static ssize_t wm831x_status_src_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t src_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct wm831x_status *led = to_wm831x_status(led_cdev);
@@ -197,7 +197,7 @@ static ssize_t wm831x_status_src_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
+static DEVICE_ATTR_RW(src);
static struct attribute *wm831x_status_attrs[] = {
&dev_attr_src.attr,
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 2d9eb48bbed9..345062ccabda 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -19,10 +19,8 @@ static inline int led_get_brightness(struct led_classdev *led_cdev)
void led_init_core(struct led_classdev *led_cdev);
void led_stop_software_blink(struct led_classdev *led_cdev);
-void led_set_brightness_nopm(struct led_classdev *led_cdev,
- enum led_brightness value);
-void led_set_brightness_nosleep(struct led_classdev *led_cdev,
- enum led_brightness value);
+void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value);
+void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value);
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count);
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index ce9429ca6dde..b77a01bd27f4 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -144,4 +144,13 @@ config LEDS_TRIGGER_AUDIO
the audio mute and mic-mute changes.
If unsure, say N
+config LEDS_TRIGGER_TTY
+ tristate "LED Trigger for TTY devices"
+ depends on TTY
+ help
+ This allows LEDs to be controlled by activity on ttys which includes
+ serial devices like /dev/ttyS0.
+
+ When build as a module this driver will be called ledtrig-tty.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 733a83e2a718..25c4db97cdd4 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o
obj-$(CONFIG_LEDS_TRIGGER_AUDIO) += ledtrig-audio.o
+obj-$(CONFIG_LEDS_TRIGGER_TTY) += ledtrig-tty.o
diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
new file mode 100644
index 000000000000..d2ab6ab080ac
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-tty.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/delay.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <uapi/linux/serial.h>
+
+struct ledtrig_tty_data {
+ struct led_classdev *led_cdev;
+ struct delayed_work dwork;
+ struct mutex mutex;
+ const char *ttyname;
+ struct tty_struct *tty;
+ int rx, tx;
+};
+
+static void ledtrig_tty_restart(struct ledtrig_tty_data *trigger_data)
+{
+ schedule_delayed_work(&trigger_data->dwork, 0);
+}
+
+static ssize_t ttyname_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+ ssize_t len = 0;
+
+ mutex_lock(&trigger_data->mutex);
+
+ if (trigger_data->ttyname)
+ len = sprintf(buf, "%s\n", trigger_data->ttyname);
+
+ mutex_unlock(&trigger_data->mutex);
+
+ return len;
+}
+
+static ssize_t ttyname_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+ char *ttyname;
+ ssize_t ret = size;
+ bool running;
+
+ if (size > 0 && buf[size - 1] == '\n')
+ size -= 1;
+
+ if (size) {
+ ttyname = kmemdup_nul(buf, size, GFP_KERNEL);
+ if (!ttyname) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ttyname = NULL;
+ }
+
+ mutex_lock(&trigger_data->mutex);
+
+ running = trigger_data->ttyname != NULL;
+
+ kfree(trigger_data->ttyname);
+ tty_kref_put(trigger_data->tty);
+ trigger_data->tty = NULL;
+
+ trigger_data->ttyname = ttyname;
+
+out_unlock:
+ mutex_unlock(&trigger_data->mutex);
+
+ if (ttyname && !running)
+ ledtrig_tty_restart(trigger_data);
+
+ return ret;
+}
+static DEVICE_ATTR_RW(ttyname);
+
+static void ledtrig_tty_work(struct work_struct *work)
+{
+ struct ledtrig_tty_data *trigger_data =
+ container_of(work, struct ledtrig_tty_data, dwork.work);
+ struct serial_icounter_struct icount;
+ int ret;
+
+ mutex_lock(&trigger_data->mutex);
+
+ if (!trigger_data->ttyname) {
+ /* exit without rescheduling */
+ mutex_unlock(&trigger_data->mutex);
+ return;
+ }
+
+ /* try to get the tty corresponding to $ttyname */
+ if (!trigger_data->tty) {
+ dev_t devno;
+ struct tty_struct *tty;
+ int ret;
+
+ ret = tty_dev_name_to_number(trigger_data->ttyname, &devno);
+ if (ret < 0)
+ /*
+ * A device with this name might appear later, so keep
+ * retrying.
+ */
+ goto out;
+
+ tty = tty_kopen_shared(devno);
+ if (IS_ERR(tty) || !tty)
+ /* What to do? retry or abort */
+ goto out;
+
+ trigger_data->tty = tty;
+ }
+
+ ret = tty_get_icount(trigger_data->tty, &icount);
+ if (ret) {
+ dev_info(trigger_data->tty->dev, "Failed to get icount, stopped polling\n");
+ mutex_unlock(&trigger_data->mutex);
+ return;
+ }
+
+ if (icount.rx != trigger_data->rx ||
+ icount.tx != trigger_data->tx) {
+ led_set_brightness(trigger_data->led_cdev, LED_ON);
+
+ trigger_data->rx = icount.rx;
+ trigger_data->tx = icount.tx;
+ } else {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ }
+
+out:
+ mutex_unlock(&trigger_data->mutex);
+ schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100));
+}
+
+static struct attribute *ledtrig_tty_attrs[] = {
+ &dev_attr_ttyname.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ledtrig_tty);
+
+static int ledtrig_tty_activate(struct led_classdev *led_cdev)
+{
+ struct ledtrig_tty_data *trigger_data;
+
+ trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+ if (!trigger_data)
+ return -ENOMEM;
+
+ led_set_trigger_data(led_cdev, trigger_data);
+
+ INIT_DELAYED_WORK(&trigger_data->dwork, ledtrig_tty_work);
+ trigger_data->led_cdev = led_cdev;
+ mutex_init(&trigger_data->mutex);
+
+ return 0;
+}
+
+static void ledtrig_tty_deactivate(struct led_classdev *led_cdev)
+{
+ struct ledtrig_tty_data *trigger_data = led_get_trigger_data(led_cdev);
+
+ cancel_delayed_work_sync(&trigger_data->dwork);
+
+ kfree(trigger_data);
+}
+
+static struct led_trigger ledtrig_tty = {
+ .name = "tty",
+ .activate = ledtrig_tty_activate,
+ .deactivate = ledtrig_tty_deactivate,
+ .groups = ledtrig_tty_groups,
+};
+module_led_trigger(ledtrig_tty);
+
+MODULE_AUTHOR("Uwe Kleine-König <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("UART LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c1bcac71008c..28ddcaa5358b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
ret = nvm_submit_io_sync_raw(dev, &rqd);
+ __free_page(page);
if (ret)
return ret;
- __free_page(page);
-
return rqd.error;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 1dddba11e721..33d39d3dd343 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -988,7 +988,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
- guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
+ export_guid(smeta_buf->header.uuid, &pblk->instance_uuid);
smeta_buf->header.id = cpu_to_le32(line->id);
smeta_buf->header.type = cpu_to_le16(line->type);
smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
@@ -1803,8 +1803,7 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
- guid_copy((guid_t *)&emeta_buf->header.uuid,
- &pblk->instance_uuid);
+ export_guid(emeta_buf->header.uuid, &pblk->instance_uuid);
emeta_buf->header.id = cpu_to_le32(line->id);
emeta_buf->header.type = cpu_to_le16(line->type);
emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 2581eebcfc41..b31658be35a7 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -23,8 +23,7 @@
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
{
- if (gc_rq->data)
- vfree(gc_rq->data);
+ vfree(gc_rq->data);
kfree(gc_rq);
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index b6246f73895c..5924f09c217b 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -49,7 +49,7 @@ struct bio_set pblk_bio_set;
static blk_qc_t pblk_submit_bio(struct bio *bio)
{
- struct pblk *pblk = bio->bi_disk->queue->queuedata;
+ struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 299ef47a17b2..0e6f0c76e930 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -706,8 +706,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
/* The first valid instance uuid is used for initialization */
if (!valid_uuid) {
- guid_copy(&pblk->instance_uuid,
- (guid_t *)&smeta_buf->header.uuid);
+ import_guid(&pblk->instance_uuid, smeta_buf->header.uuid);
valid_uuid = 1;
}
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 0ee327249150..2633bc254935 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -19,6 +19,7 @@
#include <asm/macints.h>
#include <asm/mac_iop.h>
#include <asm/adb_iop.h>
+#include <asm/unaligned.h>
#include <linux/adb.h>
@@ -249,7 +250,7 @@ static void adb_iop_set_ap_complete(struct iop_msg *msg)
{
struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
- autopoll_devs = (amsg->data[1] << 8) | amsg->data[0];
+ autopoll_devs = get_unaligned_be16(amsg->data);
if (autopoll_devs & (1 << autopoll_addr))
return;
autopoll_addr = autopoll_devs ? (ffs(autopoll_devs) - 1) : 0;
@@ -266,8 +267,7 @@ static int adb_iop_autopoll(int devs)
amsg.flags = ADB_IOP_SET_AUTOPOLL | (mask ? ADB_IOP_AUTOPOLL : 0);
amsg.count = 2;
amsg.cmd = 0;
- amsg.data[0] = mask & 0xFF;
- amsg.data[1] = (mask >> 8) & 0xFF;
+ put_unaligned_be16(mask, amsg.data);
iop_send_message(ADB_IOP, ADB_CHAN, NULL, sizeof(amsg), (__u8 *)&amsg,
adb_iop_set_ap_complete);
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
index 67fb10885bb4..d997f8ebfa98 100644
--- a/drivers/mailbox/arm_mhuv2.c
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -238,19 +238,19 @@ struct mhuv2_mbox_chan_priv {
};
/* Macro for reading a bitfield within a physically mapped packed struct */
-#define readl_relaxed_bitfield(_regptr, _field) \
+#define readl_relaxed_bitfield(_regptr, _type, _field) \
({ \
u32 _regval; \
_regval = readl_relaxed((_regptr)); \
- (*(typeof((_regptr)))(&_regval))._field; \
+ (*(_type *)(&_regval))._field; \
})
/* Macro for writing a bitfield within a physically mapped packed struct */
-#define writel_relaxed_bitfield(_value, _regptr, _field) \
+#define writel_relaxed_bitfield(_value, _regptr, _type, _field) \
({ \
u32 _regval; \
_regval = readl_relaxed(_regptr); \
- (*(typeof(_regptr))(&_regval))._field = _value; \
+ (*(_type *)(&_regval))._field = _value; \
writel_relaxed(_regval, _regptr); \
})
@@ -496,7 +496,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
/* Interrupt handlers */
-static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 *reg)
+static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
{
struct mbox_chan *chans = mhu->mbox.chans;
int channel = 0, i, offset = 0, windows, protocol, ch_wn;
@@ -699,7 +699,9 @@ static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
ret = IRQ_HANDLED;
}
- kfree(data);
+ if (!IS_ERR(data))
+ kfree(data);
+
return ret;
}
@@ -969,8 +971,8 @@ static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
mhu->mbox.ops = &mhuv2_sender_ops;
mhu->send = reg;
- mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, num_ch);
- mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, arch_minor_rev);
+ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev);
spin_lock_init(&mhu->doorbell_pending_lock);
@@ -990,7 +992,7 @@ static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
mhu->mbox.txdone_poll = false;
mhu->irq = adev->irq[0];
- writel_relaxed_bitfield(1, &mhu->send->int_en, chcomb);
+ writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb);
/* Disable all channel interrupts */
for (i = 0; i < mhu->windows; i++)
@@ -1023,8 +1025,8 @@ static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
mhu->mbox.ops = &mhuv2_receiver_ops;
mhu->recv = reg;
- mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, num_ch);
- mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, arch_minor_rev);
+ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev);
mhu->irq = adev->irq[0];
if (!mhu->irq) {
@@ -1045,7 +1047,7 @@ static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
if (mhu->minor)
- writel_relaxed_bitfield(1, &mhu->recv->int_en, chcomb);
+ writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb);
return 0;
}
@@ -1095,14 +1097,12 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
}
-static int mhuv2_remove(struct amba_device *adev)
+static void mhuv2_remove(struct amba_device *adev)
{
struct mhuv2 *mhu = amba_get_drvdata(adev);
if (mhu->frame == SENDER_FRAME)
writel_relaxed(0x0, &mhu->send->access_request);
-
- return 0;
}
static struct amba_id mhuv2_ids[] = {
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 93fe08aef3ca..7295e3835e30 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -3,7 +3,7 @@
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
- * Copyright (C) 2013-2019 Texas Instruments Incorporated - https://www.ti.com
+ * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
@@ -664,6 +664,10 @@ static const struct of_device_id omap_mailbox_of_match[] = {
.data = &omap4_data,
},
{
+ .compatible = "ti,am64-mailbox",
+ .data = &omap4_data,
+ },
+ {
/* end */
},
};
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 077e5c6a9ef7..f25324d03842 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -61,11 +61,15 @@ static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
+static const struct qcom_apcs_ipc_data sdx55_apcs_data = {
+ .offset = 0x1008, .clk_name = "qcom-sdx55-acps-clk"
+};
+
static const struct regmap_config apcs_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xFFC,
+ .max_register = 0x1008,
.fast_io = true,
};
@@ -159,9 +163,11 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index f6fab24ae8a9..4c325301a2fe 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -35,7 +35,7 @@
#define SPRD_MBOX_IRQ_CLR BIT(0)
/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
-#define SPRD_OUTBOX_FIFO_FULL BIT(0)
+#define SPRD_OUTBOX_FIFO_FULL BIT(2)
#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index e07091d71986..acd0675da681 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -98,7 +98,9 @@ struct tegra_hsp {
unsigned int num_ss;
unsigned int num_db;
unsigned int num_si;
+
spinlock_t lock;
+ struct lock_class_key lock_key;
struct list_head doorbells;
struct tegra_hsp_mailbox *mailboxes;
@@ -775,6 +777,18 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return err;
}
+ lockdep_register_key(&hsp->lock_key);
+ lockdep_set_class(&hsp->lock, &hsp->lock_key);
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ lockdep_unregister_key(&hsp->lock_key);
+
return 0;
}
@@ -834,6 +848,7 @@ static struct platform_driver tegra_hsp_driver = {
.pm = &tegra_hsp_pm_ops,
},
.probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
};
static int __init tegra_hsp_init(void)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b7e2d9666614..f2014385d48b 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -270,6 +270,7 @@ config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
+ depends on (TRUSTED_KEYS || TRUSTED_KEYS=n)
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
@@ -605,6 +606,7 @@ config DM_INTEGRITY
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
+ select CRYPTO_SKCIPHER
select ASYNC_XOR
help
This device-mapper target emulates a block device that has
@@ -622,6 +624,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
+ select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 1d57f48307e6..848dd4db1659 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -373,6 +373,7 @@ struct cached_dev {
unsigned int partial_stripes_expensive:1;
unsigned int writeback_metadata:1;
unsigned int writeback_running:1;
+ unsigned int writeback_consider_fragment:1;
unsigned char writeback_percent;
unsigned int writeback_delay;
@@ -385,6 +386,9 @@ struct cached_dev {
unsigned int writeback_rate_update_seconds;
unsigned int writeback_rate_i_term_inverse;
unsigned int writeback_rate_p_term_inverse;
+ unsigned int writeback_rate_fp_term_low;
+ unsigned int writeback_rate_fp_term_mid;
+ unsigned int writeback_rate_fp_term_high;
unsigned int writeback_rate_minimum;
enum stop_on_failure stop_when_cache_set_failed;
@@ -1001,6 +1005,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
extern struct workqueue_struct *bch_journal_wq;
+extern struct workqueue_struct *bch_flush_wq;
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1042,5 +1047,7 @@ void bch_debug_exit(void);
void bch_debug_init(void);
void bch_request_exit(void);
int bch_request_init(void);
+void bch_btree_exit(void);
+int bch_btree_init(void);
#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 67a2c47f4201..94d38e8a59b3 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -712,8 +712,10 @@ void bch_bset_build_written_tree(struct btree_keys *b)
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size)) {
- while (bkey_to_cacheline(t, k) < cacheline)
- prev = k, k = bkey_next(k);
+ while (bkey_to_cacheline(t, k) < cacheline) {
+ prev = k;
+ k = bkey_next(k);
+ }
t->prev[j] = bkey_u64s(prev);
t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
@@ -901,8 +903,10 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
status = BTREE_INSERT_STATUS_INSERT;
while (m != bset_bkey_last(i) &&
- bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
- prev = m, m = bkey_next(m);
+ bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
+ prev = m;
+ m = bkey_next(m);
+ }
/* prev is in the tree, if we merge we're done */
status = BTREE_INSERT_STATUS_BACK_MERGE;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 910df242c83d..fe6dce125aba 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -99,6 +99,8 @@
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+static struct workqueue_struct *btree_io_wq;
+
#define insert_lock(s, b) ((b)->level <= (s)->lock)
@@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
btree_complete_write(b, w);
if (btree_node_dirty(b))
- schedule_delayed_work(&b->work, 30 * HZ);
+ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
closure_return_with_destructor(cl, btree_node_write_unlock);
}
@@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
BUG_ON(!i->keys);
if (!btree_node_dirty(b))
- schedule_delayed_work(&b->work, 30 * HZ);
+ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
set_btree_node_dirty(b);
@@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
spin_lock_init(&buf->lock);
array_allocator_init(&buf->freelist);
}
+
+void bch_btree_exit(void)
+{
+ if (btree_io_wq)
+ destroy_workqueue(btree_io_wq);
+}
+
+int __init bch_btree_init(void)
+{
+ btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
+ if (!btree_io_wq)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index b00fd08d696b..63e809f38e3f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -114,7 +114,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
- check->bi_disk = bio->bi_disk;
+ bio_set_dev(check, bio->bi_bdev);
check->bi_opf = REQ_OP_READ;
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..d1c8fd3977fc 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -29,6 +33,8 @@
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
@@ -46,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
@@ -63,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \
@@ -77,7 +87,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index aefbdb7e003b..c6613e817333 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -932,8 +932,8 @@ atomic_t *bch_journal(struct cache_set *c,
journal_try_write(c);
} else if (!w->dirty) {
w->dirty = true;
- schedule_delayed_work(&c->journal.work,
- msecs_to_jiffies(c->journal_delay_ms));
+ queue_delayed_work(bch_flush_wq, &c->journal.work,
+ msecs_to_jiffies(c->journal_delay_ms));
spin_unlock(&c->journal.lock);
} else {
spin_unlock(&c->journal.lock);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 85b1f2a9b72d..29c231758293 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
- struct block_device *part;
+ struct block_device *orig_bdev;
unsigned long start_time;
struct btree_op op;
@@ -670,8 +670,8 @@ static void bio_complete(struct search *s)
{
if (s->orig_bio) {
/* Count on bcache device */
- part_end_io_acct(s->part, s->orig_bio, s->start_time);
-
+ bio_end_io_acct_remapped(s->orig_bio, s->start_time,
+ s->orig_bdev);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -714,7 +714,8 @@ static void search_free(struct closure *cl)
}
static inline struct search *search_alloc(struct bio *bio,
- struct bcache_device *d)
+ struct bcache_device *d, struct block_device *orig_bdev,
+ unsigned long start_time)
{
struct search *s;
@@ -732,7 +733,8 @@ static inline struct search *search_alloc(struct bio *bio,
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
/* Count on the bcache device */
- s->start_time = part_start_io_acct(d->disk, &s->part, bio);
+ s->orig_bdev = orig_bdev;
+ s->start_time = start_time;
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -894,7 +896,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_disk) - bio_end_sector(bio));
+ get_capacity(bio->bi_bdev->bd_disk) -
+ bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@@ -1073,7 +1076,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
- struct block_device *part;
+ struct block_device *orig_bdev;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1085,7 +1088,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_private = ddip->bi_private;
/* Count on the bcache device */
- part_end_io_acct(ddip->part, bio, ddip->start_time);
+ bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1098,7 +1101,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io(bio);
}
-static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
+static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
+ struct block_device *orig_bdev, unsigned long start_time)
{
struct detached_dev_io_private *ddip;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
@@ -1111,7 +1115,8 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
/* Count on the bcache device */
- ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
+ ddip->orig_bdev = orig_bdev;
+ ddip->start_time = start_time;
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1167,8 +1172,10 @@ static void quit_max_writeback_rate(struct cache_set *c,
blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct block_device *orig_bdev = bio->bi_bdev;
+ struct bcache_device *d = orig_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ unsigned long start_time;
int rw = bio_data_dir(bio);
if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
@@ -1193,11 +1200,13 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
}
+ start_time = bio_start_io_acct(bio);
+
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, orig_bdev, start_time);
trace_bcache_request_start(s->d, bio);
if (!bio->bi_iter.bi_size) {
@@ -1218,7 +1227,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
} else
/* I/O request sent to backing device */
- detached_dev_do_request(d, bio);
+ detached_dev_do_request(d, bio, orig_bdev, start_time);
return BLK_QC_T_NONE;
}
@@ -1274,7 +1283,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
@@ -1282,7 +1291,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
return BLK_QC_T_NONE;
}
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
cl = &s->cl;
bio = &s->bio.bio;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a4752ac410dc..71691f32959b 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -49,6 +49,7 @@ static int bcache_major;
static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_flush_wq;
struct workqueue_struct *bch_journal_wq;
@@ -64,9 +65,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +245,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1302,6 +1333,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1524,6 +1561,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -1897,7 +1940,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
goto err;
if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ BIOSET_NEED_RESCUER))
goto err;
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
@@ -2083,6 +2126,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2472,7 +2518,7 @@ out:
module_put(THIS_MODULE);
}
-static void register_device_aync(struct async_reg_args *args)
+static void register_device_async(struct async_reg_args *args)
{
if (SB_IS_BDEV(args->sb))
INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
@@ -2566,7 +2612,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->sb = sb;
args->sb_disk = sb_disk;
args->bdev = bdev;
- register_device_aync(args);
+ register_device_async(args);
/* No wait and returns to user space */
goto async_done;
}
@@ -2644,8 +2690,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
@@ -2776,6 +2822,9 @@ static void bcache_exit(void)
destroy_workqueue(bcache_wq);
if (bch_journal_wq)
destroy_workqueue(bch_journal_wq);
+ if (bch_flush_wq)
+ destroy_workqueue(bch_flush_wq);
+ bch_btree_exit();
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
@@ -2831,10 +2880,26 @@ static int __init bcache_init(void)
return bcache_major;
}
+ if (bch_btree_init())
+ goto err;
+
bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
if (!bcache_wq)
goto err;
+ /*
+ * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
+ *
+ * 1. It used `system_wq` before which also does no memory reclaim.
+ * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
+ * reduced throughput can be observed.
+ *
+ * We still want to user our own queue to not congest the `system_wq`.
+ */
+ bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
+ if (!bch_flush_wq)
+ goto err;
+
bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
if (!bch_journal_wq)
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 00a520c03f41..cc89f3156d1a 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -117,10 +117,14 @@ rw_attribute(writeback_running);
rw_attribute(writeback_percent);
rw_attribute(writeback_delay);
rw_attribute(writeback_rate);
+rw_attribute(writeback_consider_fragment);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_i_term_inverse);
rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_fp_term_low);
+rw_attribute(writeback_rate_fp_term_mid);
+rw_attribute(writeback_rate_fp_term_high);
rw_attribute(writeback_rate_minimum);
read_attribute(writeback_rate_debug);
@@ -195,6 +199,7 @@ SHOW(__bch_cached_dev)
var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i");
+ var_printf(writeback_consider_fragment, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
sysfs_hprint(writeback_rate,
@@ -205,6 +210,9 @@ SHOW(__bch_cached_dev)
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_i_term_inverse);
var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_fp_term_low);
+ var_print(writeback_rate_fp_term_mid);
+ var_print(writeback_rate_fp_term_high);
var_print(writeback_rate_minimum);
if (attr == &sysfs_writeback_rate_debug) {
@@ -303,6 +311,7 @@ STORE(__cached_dev)
sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
sysfs_strtoul_bool(writeback_running, dc->writeback_running);
+ sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment);
sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
@@ -331,6 +340,16 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
dc->writeback_rate_p_term_inverse,
1, UINT_MAX);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_low,
+ dc->writeback_rate_fp_term_low,
+ 1, dc->writeback_rate_fp_term_mid - 1);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_mid,
+ dc->writeback_rate_fp_term_mid,
+ dc->writeback_rate_fp_term_low + 1,
+ dc->writeback_rate_fp_term_high - 1);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_high,
+ dc->writeback_rate_fp_term_high,
+ dc->writeback_rate_fp_term_mid + 1, UINT_MAX);
sysfs_strtoul_clamp(writeback_rate_minimum,
dc->writeback_rate_minimum,
1, UINT_MAX);
@@ -499,9 +518,13 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_delay,
&sysfs_writeback_percent,
&sysfs_writeback_rate,
+ &sysfs_writeback_consider_fragment,
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_fp_term_low,
+ &sysfs_writeback_rate_fp_term_mid,
+ &sysfs_writeback_rate_fp_term_high,
&sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
&sysfs_io_errors,
@@ -1071,8 +1094,10 @@ SHOW(__bch_cache)
--n;
while (cached < p + n &&
- *cached == BTREE_PRIO)
- cached++, n--;
+ *cached == BTREE_PRIO) {
+ cached++;
+ n--;
+ }
for (i = 0; i < n; i++)
sum += INITIAL_PRIO - cached[i];
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index a129e4d2707c..82d4e0880a99 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -88,6 +88,44 @@ static void __update_writeback_rate(struct cached_dev *dc)
int64_t integral_scaled;
uint32_t new_rate;
+ /*
+ * We need to consider the number of dirty buckets as well
+ * when calculating the proportional_scaled, Otherwise we might
+ * have an unreasonable small writeback rate at a highly fragmented situation
+ * when very few dirty sectors consumed a lot dirty buckets, the
+ * worst case is when dirty buckets reached cutoff_writeback_sync and
+ * dirty data is still not even reached to writeback percent, so the rate
+ * still will be at the minimum value, which will cause the write
+ * stuck at a non-writeback mode.
+ */
+ struct cache_set *c = dc->disk.c;
+
+ int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
+
+ if (dc->writeback_consider_fragment &&
+ c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
+ int64_t fragment =
+ div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty);
+ int64_t fp_term;
+ int64_t fps;
+
+ if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
+ fp_term = dc->writeback_rate_fp_term_low *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
+ } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
+ fp_term = dc->writeback_rate_fp_term_mid *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
+ } else {
+ fp_term = dc->writeback_rate_fp_term_high *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
+ }
+ fps = div_s64(dirty, dirty_buckets) * fp_term;
+ if (fragment > 3 && fps > proportional_scaled) {
+ /* Only overrite the p when fragment > 3 */
+ proportional_scaled = fps;
+ }
+ }
+
if ((error < 0 && dc->writeback_rate_integral > 0) ||
(error > 0 && time_before64(local_clock(),
dc->writeback_rate.next + NSEC_PER_MSEC))) {
@@ -977,6 +1015,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_metadata = true;
dc->writeback_running = false;
+ dc->writeback_consider_fragment = true;
dc->writeback_percent = 10;
dc->writeback_delay = 30;
atomic_long_set(&dc->writeback_rate.rate, 1024);
@@ -984,6 +1023,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
dc->writeback_rate_p_term_inverse = 40;
+ dc->writeback_rate_fp_term_low = 1;
+ dc->writeback_rate_fp_term_mid = 10;
+ dc->writeback_rate_fp_term_high = 1000;
dc->writeback_rate_i_term_inverse = 10000;
WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 3f1230e22de0..02b2f9df73f6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -16,6 +16,10 @@
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
+
#define BCH_DIRTY_INIT_THRD_MAX 64
/*
* 14 (16384ths) is chosen here as something that each backing device
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 2ea0360108e1..a3b71350eec8 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -18,8 +18,7 @@
*/
struct dm_bio_details {
- struct gendisk *bi_disk;
- u8 bi_partno;
+ struct block_device *bi_bdev;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
@@ -31,8 +30,7 @@ struct dm_bio_details {
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- bd->bi_disk = bio->bi_disk;
- bd->bi_partno = bio->bi_partno;
+ bd->bi_bdev = bio->bi_bdev;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
@@ -44,8 +42,7 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- bio->bi_disk = bd->bi_disk;
- bio->bi_partno = bd->bi_partno;
+ bio->bi_bdev = bd->bi_bdev;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c1a86bde658..fce4cbf9529d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+ return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index af6d4f898e4c..89a73204dbf4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -449,7 +449,7 @@ static int __check_incompat_features(struct cache_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(cmd->bdev->bd_disk))
+ if (bdev_read_only(cmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index bdb255edc200..a90bdf9b2ca6 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -85,12 +85,6 @@ struct clone {
struct dm_clone_metadata *cmd;
- /*
- * bio used to flush the destination device, before committing the
- * metadata.
- */
- struct bio flush_bio;
-
/* Region hydration hash table */
struct hash_table_bucket *ht;
@@ -1155,11 +1149,7 @@ static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
goto out;
}
- bio_reset(&clone->flush_bio);
- bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
- clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- r = submit_bio_wait(&clone->flush_bio);
+ r = blkdev_issue_flush(clone->dest_dev->bdev);
if (unlikely(r)) {
__metadata_operation_failed(clone, "flush destination device", r);
goto out;
@@ -1886,7 +1876,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&clone->deferred_flush_completions);
clone->hydration_offset = 0;
atomic_set(&clone->hydrations_in_flight, 0);
- bio_init(&clone->flush_bio, NULL, 0);
clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
if (!clone->wq) {
@@ -1958,7 +1947,6 @@ static void clone_dtr(struct dm_target *ti)
struct clone *clone = ti->private;
mutex_destroy(&clone->commit_lock);
- bio_uninit(&clone->flush_bio);
for (i = 0; i < clone->nr_ctr_args; i++)
kfree(clone->ctr_args[i]);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 086d293c2b03..5953ff2bd260 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -13,6 +13,7 @@
#include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h>
+#include <linux/keyslot-manager.h>
#include <trace/events/block.h>
@@ -102,6 +103,10 @@ struct mapped_device {
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
+ int swap_bios;
+ struct semaphore swap_bios_semaphore;
+ struct mutex swap_bios_lock;
+
struct dm_stats stats;
/* for blk-mq request-based DM support */
@@ -162,6 +167,10 @@ struct dm_table {
void *event_context;
struct dm_md_mempools *mempools;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct blk_keyslot_manager *ksm;
+#endif
};
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53791138d78b..11c105ecd165 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -37,6 +37,7 @@
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
+#include <keys/trusted-type.h>
#include <linux/device-mapper.h>
@@ -133,7 +134,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_WRITE_INLINE };
enum cipher_flags {
- CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
+ CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
};
@@ -1454,13 +1455,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
- if (!ctx->r.req)
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req) {
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req)
+ return -ENOMEM;
+ }
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@@ -1471,13 +1475,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+ return 0;
}
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!ctx->r.req_aead)
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req_aead) {
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req_aead)
+ return -ENOMEM;
+ }
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@@ -1488,15 +1497,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+ return 0;
}
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
- crypt_alloc_req_aead(cc, ctx);
+ return crypt_alloc_req_aead(cc, ctx);
else
- crypt_alloc_req_skcipher(cc, ctx);
+ return crypt_alloc_req_skcipher(cc, ctx);
}
static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1540,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
- struct convert_context *ctx, bool atomic)
+ struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
- atomic_set(&ctx->cc_pending, 1);
+ /*
+ * if reset_pending is set we are dealing with the bio for the first time,
+ * else we're continuing to work on the previous bio, so don't mess with
+ * the cc_pending counter
+ */
+ if (reset_pending)
+ atomic_set(&ctx->cc_pending, 1);
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
- crypt_alloc_req(cc, ctx);
+ r = crypt_alloc_req(cc, ctx);
+ if (r) {
+ complete(&ctx->restart);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
@@ -1553,7 +1575,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
- wait_for_completion(&ctx->restart);
+ if (in_interrupt()) {
+ if (try_wait_for_completion(&ctx->restart)) {
+ /*
+ * we don't have to block to wait for completion,
+ * so proceed
+ */
+ } else {
+ /*
+ * we can't wait for completion without blocking
+ * exit and continue processing in a workqueue
+ */
+ ctx->r.req = NULL;
+ ctx->cc_sector += sector_step;
+ tag_offset++;
+ return BLK_STS_DEV_RESOURCE;
+ }
+ } else {
+ wait_for_completion(&ctx->restart);
+ }
reinit_completion(&ctx->restart);
fallthrough;
/*
@@ -1691,6 +1731,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ bio_endio(io->base_bio);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1713,7 +1759,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);
base_bio->bi_status = error;
- bio_endio(base_bio);
+
+ /*
+ * If we are running this function from our tasklet,
+ * we can't call bio_endio() here, because it will call
+ * clone_endio() from dm.c, which in turn will
+ * free the current struct dm_crypt_io structure with
+ * our tasklet. In this case we need to delay bio_endio()
+ * execution to after the tasklet is done and dequeued.
+ */
+ if (tasklet_trylock(&io->tasklet)) {
+ tasklet_unlock(&io->tasklet);
+ bio_endio(base_bio);
+ return;
+ }
+
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
}
/*
@@ -1945,6 +2007,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ struct convert_context *ctx = &io->ctx;
+ int crypt_finished;
+ sector_t sector = io->sector;
+ blk_status_t r;
+
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+ /* Wait for completion signaled by kcryptd_async_done() */
+ wait_for_completion(&ctx->restart);
+ crypt_finished = 1;
+ }
+
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
+ kcryptd_crypt_write_io_submit(io, 0);
+ io->sector = sector;
+ }
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -1973,7 +2066,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
- test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ * (TODO: is it actually possible to be in softirq in the write path?)
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2101,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ blk_status_t r;
+
+ wait_for_completion(&io->ctx.restart);
+ reinit_completion(&io->ctx.restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
+ kcryptd_crypt_read_done(io);
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2009,7 +2131,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
@@ -2091,8 +2222,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
- if (in_irq()) {
- /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+ /*
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ * it is being executed with irqs disabled.
+ */
+ if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
@@ -2302,7 +2437,6 @@ static int set_key_user(struct crypt_config *cc, struct key *key)
return 0;
}
-#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
static int set_key_encrypted(struct crypt_config *cc, struct key *key)
{
const struct encrypted_key_payload *ekp;
@@ -2318,7 +2452,22 @@ static int set_key_encrypted(struct crypt_config *cc, struct key *key)
return 0;
}
-#endif /* CONFIG_ENCRYPTED_KEYS */
+
+static int set_key_trusted(struct crypt_config *cc, struct key *key)
+{
+ const struct trusted_key_payload *tkp;
+
+ tkp = key->payload.data[0];
+ if (!tkp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != tkp->key_len)
+ return -EINVAL;
+
+ memcpy(cc->key, tkp->key, cc->key_size);
+
+ return 0;
+}
static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{
@@ -2348,11 +2497,14 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
} else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
type = &key_type_user;
set_key = set_key_user;
-#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
- } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
+ } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
+ !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
type = &key_type_encrypted;
set_key = set_key_encrypted;
-#endif
+ } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
+ !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+ type = &key_type_trusted;
+ set_key = set_key_trusted;
} else {
return -EINVAL;
}
@@ -2982,7 +3134,6 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
}
#ifdef CONFIG_BLK_DEV_ZONED
-
static int crypt_report_zones(struct dm_target *ti,
struct dm_report_zones_args *args, unsigned int nr_zones)
{
@@ -2993,7 +3144,8 @@ static int crypt_report_zones(struct dm_target *ti,
return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
dm_report_zones_cb, args);
}
-
+#else
+#define crypt_report_zones NULL
#endif
/*
@@ -3190,6 +3342,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wake_up_process(cc->write_thread);
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
return 0;
@@ -3424,14 +3577,12 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 22, 0},
+ .version = {1, 23, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
-#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
.report_zones = crypt_report_zones,
-#endif
.map = crypt_map,
.status = crypt_status,
.postsuspend = crypt_postsuspend,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 072ea913cebc..cbe1058ee589 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -130,7 +130,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block,
dd->badblock_count++;
if (!dd->quiet_mode) {
- DMINFO("%s: badblock added at block %llu with write fail count %hhu",
+ DMINFO("%s: badblock added at block %llu with write fail count %u",
__func__, block, wr_fail_cnt);
}
spin_unlock_irqrestore(&dd->dust_lock, flags);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index b24e3839bb3a..d9ac7372108c 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -47,6 +47,7 @@ struct writeset {
static void writeset_free(struct writeset *ws)
{
vfree(ws->bits);
+ ws->bits = NULL;
}
static int setup_on_disk_bitset(struct dm_disk_bitset *info,
@@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits)
*/
static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
{
- ws->md.nr_bits = nr_blocks;
- ws->md.root = INVALID_WRITESET_ROOT;
ws->bits = vzalloc(bitset_size(nr_blocks));
if (!ws->bits) {
DMERR("%s: couldn't allocate in memory bitset", __func__);
@@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
/*
* Wipes the in-core bitset, and creates a new on disk bitset.
*/
-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
+static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
+ dm_block_t nr_blocks)
{
int r;
- memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
+ memset(ws->bits, 0, bitset_size(nr_blocks));
+ ws->md.nr_bits = nr_blocks;
r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
if (r) {
DMERR("%s: setup_on_disk_bitset failed", __func__);
@@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
{
int r;
- if (!test_and_set_bit(block, ws->bits)) {
+ if (!test_bit(block, ws->bits)) {
r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
if (r) {
/* FIXME: fail mode */
@@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value)
static int ws_eq(void *context, const void *value1, const void *value2)
{
- return !memcmp(value1, value2, sizeof(struct writeset_metadata));
+ return !memcmp(value1, value2, sizeof(struct writeset_disk));
}
/*----------------------------------------------------------------*/
@@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md)
}
disk = dm_block_data(sblock);
+
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk->data_block_size) != md->block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk->data_block_size), md->block_size);
+ r = -EINVAL;
+ goto bad;
+ }
+
r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
disk->metadata_space_map_root,
sizeof(disk->metadata_space_map_root),
@@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md)
setup_infos(md);
- md->block_size = le32_to_cpu(disk->data_block_size);
md->nr_blocks = le32_to_cpu(disk->nr_blocks);
md->current_era = le32_to_cpu(disk->current_era);
+ ws_unpack(&disk->current_writeset, &md->current_writeset->md);
md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
md->era_array_root = le64_to_cpu(disk->era_array_root);
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
@@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
ws_unpack(&disk, &d->writeset);
d->value = cpu_to_le32(key);
+ /*
+ * We initialise another bitset info to avoid any caching side effects
+ * with the previous one.
+ */
+ dm_disk_bitset_init(md->tm, &d->info);
+
d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
d->current_bit = 0;
d->step = metadata_digest_transcribe_writeset;
@@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
return 0;
memset(d, 0, sizeof(*d));
-
- /*
- * We initialise another bitset info to avoid any caching side
- * effects with the previous one.
- */
- dm_disk_bitset_init(md->tm, &d->info);
d->step = metadata_digest_lookup_writeset;
return 0;
@@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
static void metadata_close(struct era_metadata *md)
{
+ writeset_free(&md->writesets[0]);
+ writeset_free(&md->writesets[1]);
destroy_persistent_data_objects(md);
kfree(md);
}
@@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
r = writeset_alloc(&md->writesets[1], *new_size);
if (r) {
DMERR("%s: writeset_alloc failed for writeset 1", __func__);
+ writeset_free(&md->writesets[0]);
return r;
}
@@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
&value, &md->era_array_root);
if (r) {
DMERR("%s: dm_array_resize failed", __func__);
+ writeset_free(&md->writesets[0]);
+ writeset_free(&md->writesets[1]);
return r;
}
@@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md)
}
ws_pack(&md->current_writeset->md, &value);
- md->current_writeset->md.root = INVALID_WRITESET_ROOT;
keys[0] = md->current_era;
__dm_bless_for_disk(&value);
@@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md)
return r;
}
+ md->current_writeset->md.root = INVALID_WRITESET_ROOT;
md->archived_writesets = true;
return 0;
@@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md)
int r;
struct writeset *new_writeset = next_writeset(md);
- r = writeset_init(&md->bitset_info, new_writeset);
+ r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
if (r) {
DMERR("%s: writeset_init failed", __func__);
return r;
@@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md)
int r;
struct dm_block *sblock;
- if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
+ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
&md->current_writeset->md.root);
if (r) {
@@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era)
int r;
struct bio_list deferred_bios, marked_bios;
struct bio *bio;
+ struct blk_plug plug;
bool commit_needed = false;
bool failed = false;
+ struct writeset *ws = era->md->current_writeset;
bio_list_init(&deferred_bios);
bio_list_init(&marked_bios);
@@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era)
bio_list_init(&era->deferred_bios);
spin_unlock(&era->deferred_lock);
+ if (bio_list_empty(&deferred_bios))
+ return;
+
while ((bio = bio_list_pop(&deferred_bios))) {
- r = writeset_test_and_set(&era->md->bitset_info,
- era->md->current_writeset,
+ r = writeset_test_and_set(&era->md->bitset_info, ws,
get_block(era, bio));
if (r < 0) {
/*
@@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era)
* FIXME: finish.
*/
failed = true;
-
} else if (r == 0)
commit_needed = true;
@@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era)
if (failed)
while ((bio = bio_list_pop(&marked_bios)))
bio_io_error(bio);
- else
- while ((bio = bio_list_pop(&marked_bios)))
+ else {
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&marked_bios))) {
+ /*
+ * Only update the in-core writeset if the on-disk one
+ * was updated too.
+ */
+ if (commit_needed)
+ set_bit(get_block(era, bio), ws->bits);
submit_bio_noacct(bio);
+ }
+ blk_finish_plug(&plug);
+ }
}
static void process_rpc_calls(struct era *era)
@@ -1473,15 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
era->md = md;
- era->nr_blocks = calc_nr_blocks(era);
-
- r = metadata_resize(era->md, &era->nr_blocks);
- if (r) {
- ti->error = "couldn't resize metadata";
- era_destroy(era);
- return -ENOMEM;
- }
-
era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
if (!era->wq) {
ti->error = "could not create workqueue for metadata object";
@@ -1556,16 +1575,24 @@ static int era_preresume(struct dm_target *ti)
dm_block_t new_size = calc_nr_blocks(era);
if (era->nr_blocks != new_size) {
- r = in_worker1(era, metadata_resize, &new_size);
- if (r)
+ r = metadata_resize(era->md, &new_size);
+ if (r) {
+ DMERR("%s: metadata_resize failed", __func__);
+ return r;
+ }
+
+ r = metadata_commit(era->md);
+ if (r) {
+ DMERR("%s: metadata_commit failed", __func__);
return r;
+ }
era->nr_blocks = new_size;
}
start_worker(era);
- r = in_worker0(era, metadata_new_era);
+ r = in_worker0(era, metadata_era_rollover);
if (r) {
DMERR("%s: metadata_era_rollover failed", __func__);
return r;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index a2cc9e45cbba..b7fee9936f05 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -469,6 +469,8 @@ static int flakey_report_zones(struct dm_target *ti,
return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
dm_report_zones_cb, args);
}
+#else
+#define flakey_report_zones NULL
#endif
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -481,10 +483,8 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
-#ifdef CONFIG_BLK_DEV_ZONED
- .features = DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
.report_zones = flakey_report_zones,
-#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 5a7a1b90e671..46b5d542b8fe 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -40,6 +40,7 @@
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
#define DISCARD_FILLER 0xf6
+#define SALT_SIZE 16
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -57,6 +58,7 @@
#define SB_VERSION_2 2
#define SB_VERSION_3 3
#define SB_VERSION_4 4
+#define SB_VERSION_5 5
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -72,12 +74,15 @@ struct superblock {
__u8 log2_blocks_per_bitmap_bit;
__u8 pad[2];
__u64 recalc_sector;
+ __u8 pad2[8];
+ __u8 salt[SALT_SIZE];
};
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
#define SB_FLAG_DIRTY_BITMAP 0x4
#define SB_FLAG_FIXED_PADDING 0x8
+#define SB_FLAG_FIXED_HMAC 0x10
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -257,8 +262,10 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
- bool fix_padding;
bool discard;
+ bool fix_padding;
+ bool fix_hmac;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -386,6 +393,17 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if (ic->legacy_recalculate)
+ return false;
+ if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
+ ic->internal_hash_alg.key || ic->journal_mac_alg.key :
+ ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
@@ -468,7 +486,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
+ ic->sb->version = SB_VERSION_5;
+ else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
ic->sb->version = SB_VERSION_4;
else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
ic->sb->version = SB_VERSION_3;
@@ -478,10 +498,58 @@ static void sb_set_version(struct dm_integrity_c *ic)
ic->sb->version = SB_VERSION_1;
}
+static int sb_mac(struct dm_integrity_c *ic, bool wr)
+{
+ SHASH_DESC_ON_STACK(desc, ic->journal_mac);
+ int r;
+ unsigned size = crypto_shash_digestsize(ic->journal_mac);
+
+ if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
+ dm_integrity_io_error(ic, "digest is too long", -EINVAL);
+ return -EINVAL;
+ }
+
+ desc->tfm = ic->journal_mac;
+
+ r = crypto_shash_init(desc);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_init", r);
+ return r;
+ }
+
+ r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_update", r);
+ return r;
+ }
+
+ if (likely(wr)) {
+ r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_final", r);
+ return r;
+ }
+ } else {
+ __u8 result[HASH_MAX_DIGESTSIZE];
+ r = crypto_shash_final(desc, result);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_final", r);
+ return r;
+ }
+ if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
+ dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
+ return -EILSEQ;
+ }
+ }
+
+ return 0;
+}
+
static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
+ int r;
io_req.bi_op = op;
io_req.bi_op_flags = op_flags;
@@ -493,10 +561,28 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
io_loc.sector = ic->start;
io_loc.count = SB_SECTORS;
- if (op == REQ_OP_WRITE)
+ if (op == REQ_OP_WRITE) {
sb_set_version(ic);
+ if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ r = sb_mac(ic, true);
+ if (unlikely(r))
+ return r;
+ }
+ }
+
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r))
+ return r;
+
+ if (op == REQ_OP_READ) {
+ if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ r = sb_mac(ic, false);
+ if (unlikely(r))
+ return r;
+ }
+ }
- return dm_io(&io_req, 1, &io_loc, NULL);
+ return 0;
}
#define BITMAP_OP_TEST_ALL_SET 0
@@ -713,15 +799,32 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
desc->tfm = ic->journal_mac;
r = crypto_shash_init(desc);
- if (unlikely(r)) {
+ if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_init", r);
goto err;
}
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ uint64_t section_le;
+
+ r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_update", r);
+ goto err;
+ }
+
+ section_le = cpu_to_le64(section);
+ r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_update", r);
+ goto err;
+ }
+ }
+
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, section, j);
r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
- if (unlikely(r)) {
+ if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto err;
}
@@ -731,7 +834,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
if (likely(size <= JOURNAL_MAC_SIZE)) {
r = crypto_shash_final(desc, result);
- if (unlikely(r)) {
+ if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
goto err;
}
@@ -744,7 +847,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
goto err;
}
r = crypto_shash_final(desc, digest);
- if (unlikely(r)) {
+ if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
goto err;
}
@@ -1379,12 +1482,52 @@ thorough_test:
#undef MAY_BE_HASH
}
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+ struct dm_io_request io_req;
+ struct dm_io_region io_reg;
+ struct dm_integrity_c *ic;
+ struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+ struct flush_request *fr = fr_;
+ if (unlikely(error != 0))
+ dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
+ complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
+
+ struct flush_request fr;
+
+ if (!ic->meta_dev)
+ flush_data = false;
+ if (flush_data) {
+ fr.io_req.bi_op = REQ_OP_WRITE,
+ fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.mem.type = DM_IO_KMEM,
+ fr.io_req.mem.ptr.addr = NULL,
+ fr.io_req.notify.fn = flush_notify,
+ fr.io_req.notify.context = &fr;
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+ fr.io_reg.bdev = ic->dev->bdev,
+ fr.io_reg.sector = 0,
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ BUG_ON(r);
+ }
+
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
+
+ if (flush_data)
+ wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -1507,6 +1650,14 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
+ if (unlikely(r < 0)) {
+ dm_integrity_io_error(ic, "crypto_shash_update", r);
+ goto failed;
+ }
+ }
+
r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
@@ -2110,7 +2261,7 @@ offload_to_thread:
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
integrity_metadata(&dio->work);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
@@ -2195,7 +2346,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@@ -2409,7 +2560,7 @@ skip_io:
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2602,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@@ -2654,7 +2805,7 @@ static void bitmap_flush_work(struct work_struct *work)
unsigned long limit;
struct bio *bio;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
range.logical_sector = 0;
range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2814,7 @@ static void bitmap_flush_work(struct work_struct *work)
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
- if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+ dm_integrity_flush_buffers(ic, true);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +3083,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
if (ic->mode == 'B') {
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
#if 1
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
@@ -3102,6 +3251,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+ arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -3125,6 +3276,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
DMEMIT(" fix_padding");
+ if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
+ DMEMIT(" fix_hmac");
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
@@ -3260,6 +3415,11 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
if (!journal_sections)
journal_sections = 1;
+ if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
+ get_random_bytes(ic->sb->salt, SALT_SIZE);
+ }
+
if (!ic->meta_dev) {
if (ic->fix_padding)
ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
@@ -3754,7 +3914,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 9, "Invalid number of feature args"},
+ {0, 17, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -3892,7 +4052,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad;
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
- r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
+ r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)
goto bad;
@@ -3902,6 +4062,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
+ } else if (!strcmp(opt_string, "fix_hmac")) {
+ ic->fix_hmac = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -4058,7 +4222,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -4197,6 +4361,20 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
@@ -4376,7 +4554,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4312007d2d34..2d3cda0acacb 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -341,8 +341,8 @@ static void do_region(int op, int op_flags, unsigned region,
num_bvecs = 1;
break;
default:
- num_bvecs = min_t(int, BIO_MAX_PAGES,
- dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+ num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
+ (PAGE_SIZE >> SECTOR_SHIFT)));
}
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 00774b5d7668..92db0f5e7f28 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -146,6 +146,8 @@ static int linear_report_zones(struct dm_target *ti,
return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
dm_report_zones_cb, args);
}
+#else
+#define linear_report_zones NULL
#endif
static int linear_iterate_devices(struct dm_target *ti,
@@ -227,13 +229,9 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
-#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
- DM_TARGET_ZONED_HM,
+ DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
.report_zones = linear_report_zones,
-#else
- .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
-#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index e3d35c6c9f71..57882654ffee 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -264,15 +264,14 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
size_t entrylen, void *data, size_t datalen,
sector_t sector)
{
- int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
+ int bio_pages, pg_datalen, pg_sectorlen, i;
struct page *page;
struct bio *bio;
size_t ret;
void *ptr;
while (datalen) {
- num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
- bio_pages = min(num_pages, BIO_MAX_PAGES);
+ bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE));
atomic_inc(&lc->io_blocks);
@@ -364,7 +363,7 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
@@ -386,7 +385,8 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL,
+ bio_max_segs(block->vec_cnt - i));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 23c38777e8f6..cab12b2251ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
+ * RAID0 and RAID10 personalities require bio splitting,
+ * RAID1/4/5/6 don't and process large discard bios properly.
*/
- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+ if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa09bc4e4c54..b0a82f29a2e4 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_disk == NULL, details were not saved */
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1190,7 +1190,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@@ -1257,7 +1257,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
- if (!bio_record->details.bi_disk) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1282,7 +1282,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@@ -1292,7 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
return DM_ENDIO_DONE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4668b2cd98f4..11890db71f3f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,6 +141,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
+
+ /*
+ * Flush data after merge.
+ */
+ struct bio flush_bio;
};
/*
@@ -1121,6 +1126,17 @@ shut:
static void error_bios(struct bio *bio);
+static int flush_data(struct dm_snapshot *s)
+{
+ struct bio *flush_bio = &s->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, s->origin->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
+ if (flush_data(s) < 0) {
+ DMERR("Flush after merge failed: shutting down merge");
+ goto shut;
+ }
+
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
+ bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ bio_uninit(&s->flush_bio);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 188f41287f18..95391f78b8d5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -187,6 +187,8 @@ static void free_devices(struct list_head *devices, struct mapped_device *md)
}
}
+static void dm_table_destroy_keyslot_manager(struct dm_table *t);
+
void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
@@ -215,6 +217,8 @@ void dm_table_destroy(struct dm_table *t)
dm_free_md_mempools(t->mempools);
+ dm_table_destroy_keyslot_manager(t);
+
kfree(t);
}
@@ -363,14 +367,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -811,24 +824,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
int blocksize = *(int *) data, id;
bool rc;
id = dax_read_lock();
- rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
dax_read_unlock(id);
return rc;
}
/* Check devices support synchronous DAX */
-static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- return dev->dax_dev && dax_synchronous(dev->dax_dev);
+ return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
}
bool dm_table_supports_dax(struct dm_table *t,
@@ -845,7 +858,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ ti->type->iterate_devices(ti, iterate_fn, blocksize))
return false;
}
@@ -916,7 +929,7 @@ static int dm_table_determine_type(struct dm_table *t)
verify_bio_based:
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
- if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
+ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
}
@@ -1194,6 +1207,210 @@ static int dm_table_register_integrity(struct dm_table *t)
return 0;
}
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+struct dm_keyslot_manager {
+ struct blk_keyslot_manager ksm;
+ struct mapped_device *md;
+};
+
+struct dm_keyslot_evict_args {
+ const struct blk_crypto_key *key;
+ int err;
+};
+
+static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_keyslot_evict_args *args = data;
+ int err;
+
+ err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
+ if (!args->err)
+ args->err = err;
+ /* Always try to evict the key from all devices. */
+ return 0;
+}
+
+/*
+ * When an inline encryption key is evicted from a device-mapper device, evict
+ * it from all the underlying devices.
+ */
+static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
+ const struct blk_crypto_key *key, unsigned int slot)
+{
+ struct dm_keyslot_manager *dksm = container_of(ksm,
+ struct dm_keyslot_manager,
+ ksm);
+ struct mapped_device *md = dksm->md;
+ struct dm_keyslot_evict_args args = { key };
+ struct dm_table *t;
+ int srcu_idx;
+ int i;
+ struct dm_target *ti;
+
+ t = dm_get_live_table(md, &srcu_idx);
+ if (!t)
+ return 0;
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+ if (!ti->type->iterate_devices)
+ continue;
+ ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
+ }
+ dm_put_live_table(md, srcu_idx);
+ return args.err;
+}
+
+static struct blk_ksm_ll_ops dm_ksm_ll_ops = {
+ .keyslot_evict = dm_keyslot_evict,
+};
+
+static int device_intersect_crypto_modes(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct blk_keyslot_manager *parent = data;
+ struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
+
+ blk_ksm_intersect_modes(parent, child);
+ return 0;
+}
+
+void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+{
+ struct dm_keyslot_manager *dksm = container_of(ksm,
+ struct dm_keyslot_manager,
+ ksm);
+
+ if (!ksm)
+ return;
+
+ blk_ksm_destroy(ksm);
+ kfree(dksm);
+}
+
+static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+{
+ dm_destroy_keyslot_manager(t->ksm);
+ t->ksm = NULL;
+}
+
+/*
+ * Constructs and initializes t->ksm with a keyslot manager that
+ * represents the common set of crypto capabilities of the devices
+ * described by the dm_table. However, if the constructed keyslot
+ * manager does not support a superset of the crypto capabilities
+ * supported by the current keyslot manager of the mapped_device,
+ * it returns an error instead, since we don't support restricting
+ * crypto capabilities on table changes. Finally, if the constructed
+ * keyslot manager doesn't actually support any crypto modes at all,
+ * it just returns NULL.
+ */
+static int dm_table_construct_keyslot_manager(struct dm_table *t)
+{
+ struct dm_keyslot_manager *dksm;
+ struct blk_keyslot_manager *ksm;
+ struct dm_target *ti;
+ unsigned int i;
+ bool ksm_is_empty = true;
+
+ dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
+ if (!dksm)
+ return -ENOMEM;
+ dksm->md = t->md;
+
+ ksm = &dksm->ksm;
+ blk_ksm_init_passthrough(ksm);
+ ksm->ksm_ll_ops = dm_ksm_ll_ops;
+ ksm->max_dun_bytes_supported = UINT_MAX;
+ memset(ksm->crypto_modes_supported, 0xFF,
+ sizeof(ksm->crypto_modes_supported));
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (!dm_target_passes_crypto(ti->type)) {
+ blk_ksm_intersect_modes(ksm, NULL);
+ break;
+ }
+ if (!ti->type->iterate_devices)
+ continue;
+ ti->type->iterate_devices(ti, device_intersect_crypto_modes,
+ ksm);
+ }
+
+ if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
+ DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
+ dm_destroy_keyslot_manager(ksm);
+ return -EINVAL;
+ }
+
+ /*
+ * If the new KSM doesn't actually support any crypto modes, we may as
+ * well represent it with a NULL ksm.
+ */
+ ksm_is_empty = true;
+ for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
+ if (ksm->crypto_modes_supported[i]) {
+ ksm_is_empty = false;
+ break;
+ }
+ }
+
+ if (ksm_is_empty) {
+ dm_destroy_keyslot_manager(ksm);
+ ksm = NULL;
+ }
+
+ /*
+ * t->ksm is only set temporarily while the table is being set
+ * up, and it gets set to NULL after the capabilities have
+ * been transferred to the request_queue.
+ */
+ t->ksm = ksm;
+
+ return 0;
+}
+
+static void dm_update_keyslot_manager(struct request_queue *q,
+ struct dm_table *t)
+{
+ if (!t->ksm)
+ return;
+
+ /* Make the ksm less restrictive */
+ if (!q->ksm) {
+ blk_ksm_register(t->ksm, q);
+ } else {
+ blk_ksm_update_capabilities(q->ksm, t->ksm);
+ dm_destroy_keyslot_manager(t->ksm);
+ }
+ t->ksm = NULL;
+}
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static int dm_table_construct_keyslot_manager(struct dm_table *t)
+{
+ return 0;
+}
+
+void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+{
+}
+
+static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+{
+}
+
+static void dm_update_keyslot_manager(struct request_queue *q,
+ struct dm_table *t)
+{
+}
+
+#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
+
/*
* Prepares the table for use by building the indices,
* setting the type, and allocating mempools.
@@ -1220,6 +1437,12 @@ int dm_table_complete(struct dm_table *t)
return r;
}
+ r = dm_table_construct_keyslot_manager(t);
+ if (r) {
+ DMERR("could not construct keyslot manager.");
+ return r;
+ }
+
r = dm_table_alloc_md_mempools(t, t->md);
if (r)
DMERR("unable to allocate mempools");
@@ -1286,6 +1509,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
+/*
+ * type->iterate_devices() should be called when the sanity check needs to
+ * iterate and check all underlying data devices. iterate_devices() will
+ * iterate all underlying data devices until it encounters a non-zero return
+ * code, returned by whether the input iterate_devices_callout_fn, or
+ * iterate_devices() itself internally.
+ *
+ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
+ * iterate multiple underlying devices internally, in which case a non-zero
+ * return code returned by iterate_devices_callout_fn will stop the iteration
+ * in advance.
+ *
+ * Cases requiring _any_ underlying device supporting some kind of attribute,
+ * should use the iteration structure like dm_table_any_dev_attr(), or call
+ * it directly. @func should handle semantics of positive examples, e.g.
+ * capable of something.
+ *
+ * Cases requiring _all_ underlying devices supporting some kind of attribute,
+ * should use the iteration structure like dm_table_supports_nowait() or
+ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
+ * uses an @anti_func that handle semantics of counter examples, e.g. not
+ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
+ */
+static bool dm_table_any_dev_attr(struct dm_table *t,
+ iterate_devices_callout_fn func, void *data)
+{
+ struct dm_target *ti;
+ unsigned int i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, func, data))
+ return true;
+ }
+
+ return false;
+}
+
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1322,13 +1585,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
return true;
}
-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
enum blk_zoned_model *zoned_model = data;
- return q && blk_queue_zoned_model(q) == *zoned_model;
+ return blk_queue_zoned_model(q) != *zoned_model;
}
static bool dm_table_supports_zoned_model(struct dm_table *t,
@@ -1345,37 +1608,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
+ ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
return false;
}
return true;
}
-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
- return q && blk_queue_zone_sectors(q) == *zone_sectors;
-}
-
-static bool dm_table_matches_zone_sectors(struct dm_table *t,
- unsigned int zone_sectors)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
- return false;
- }
-
- return true;
+ return blk_queue_zone_sectors(q) != *zone_sectors;
}
static int validate_hardware_zoned_model(struct dm_table *table,
@@ -1395,7 +1641,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
if (!zone_sectors || !is_power_of_2(zone_sectors))
return -EINVAL;
- if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
+ if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all devices",
dm_device_name(table->md));
return -EINVAL;
@@ -1524,7 +1770,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
unsigned long flush = (unsigned long) data;
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && (q->queue_flags & flush);
+ return (q->queue_flags & flush);
}
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
@@ -1569,29 +1815,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
-static int dm_table_supports_dax_write_cache(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti,
- device_dax_write_cache_enabled, NULL))
- return true;
- }
-
- return false;
-}
-
-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q);
+ return !blk_queue_nonrot(q);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1599,24 +1828,7 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !blk_queue_add_random(q);
-}
-
-static bool dm_table_all_devices_attribute(struct dm_table *t,
- iterate_devices_callout_fn func)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, func, NULL))
- return false;
- }
-
- return true;
+ return !blk_queue_add_random(q);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1624,7 +1836,7 @@ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *de
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !q->limits.max_write_same_sectors;
+ return !q->limits.max_write_same_sectors;
}
static bool dm_table_supports_write_same(struct dm_table *t)
@@ -1651,7 +1863,7 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !q->limits.max_write_zeroes_sectors;
+ return !q->limits.max_write_zeroes_sectors;
}
static bool dm_table_supports_write_zeroes(struct dm_table *t)
@@ -1678,7 +1890,7 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !blk_queue_nowait(q);
+ return !blk_queue_nowait(q);
}
static bool dm_table_supports_nowait(struct dm_table *t)
@@ -1705,7 +1917,7 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !blk_queue_discard(q);
+ return !blk_queue_discard(q);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1739,7 +1951,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !blk_queue_secure_erase(q);
+ return !blk_queue_secure_erase(q);
}
static bool dm_table_supports_secure_erase(struct dm_table *t)
@@ -1767,28 +1979,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_stable_writes(q);
-}
-
-/*
- * If any underlying device requires stable pages, a table must require
- * them as well. Only targets that support iterate_devices are considered:
- * don't want error, zero, etc to require stable pages.
- */
-static bool dm_table_requires_stable_pages(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
- return true;
- }
-
- return false;
+ return blk_queue_stable_writes(q);
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1828,22 +2019,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
- if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
set_dax_synchronous(t->md->dax_dev);
}
else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax_write_cache(t))
+ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */
- if (dm_table_all_devices_attribute(t, device_is_nonrot))
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
+ if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
@@ -1855,8 +2046,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
+ * If any underlying device requires stable pages, a table must require
+ * them as well. Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
*/
- if (dm_table_requires_stable_pages(t))
+ if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
@@ -1867,7 +2061,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+ if (blk_queue_add_random(q) &&
+ dm_table_any_dev_attr(t, device_is_not_random, NULL))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
@@ -1882,6 +2077,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
#endif
+ dm_update_keyslot_manager(q, t);
blk_queue_update_readahead(q);
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6ebb2127f3e2..e75b20480e46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -636,7 +636,7 @@ static int __check_incompat_features(struct thin_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(pmd->bdev->bd_disk))
+ if (bdev_read_only(pmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d5223a0e5cc5..844c4be11768 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -148,6 +148,7 @@ struct dm_writecache {
size_t metadata_sectors;
size_t n_blocks;
uint64_t seq_count;
+ sector_t data_device_sectors;
void *block_start;
struct wc_entry *entries;
unsigned block_size;
@@ -159,14 +160,22 @@ struct dm_writecache {
bool overwrote_committed:1;
bool memory_vmapped:1;
+ bool start_sector_set:1;
bool high_wm_percent_set:1;
bool low_wm_percent_set:1;
bool max_writeback_jobs_set:1;
bool autocommit_blocks_set:1;
bool autocommit_time_set:1;
+ bool max_age_set:1;
bool writeback_fua_set:1;
bool flush_on_suspend:1;
bool cleaner:1;
+ bool cleaner_set:1;
+
+ unsigned high_wm_percent_value;
+ unsigned low_wm_percent_value;
+ unsigned autocommit_time_value;
+ unsigned max_age_value;
unsigned writeback_all;
struct workqueue_struct *writeback_wq;
@@ -523,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
region.bdev = wc->ssd_dev->bdev;
region.sector = 0;
- region.count = PAGE_SIZE;
+ region.count = PAGE_SIZE >> SECTOR_SHIFT;
if (unlikely(region.sector + region.count > wc->metadata_sectors))
region.count = wc->metadata_sectors - region.sector;
@@ -969,6 +978,8 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc);
+ wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev);
+
if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
} else {
@@ -1638,6 +1649,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
void *address = memory_data(wc, e);
persistent_memory_flush_cache(address, block_size);
+
+ if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
+ return true;
+
return bio_add_page(&wb->bio, persistent_memory_page(address),
block_size, persistent_memory_page_offset(address)) != 0;
}
@@ -1709,6 +1724,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
+ } else if (unlikely(!bio_sectors(bio))) {
+ bio->bi_status = BLK_STS_OK;
+ bio_endio(bio);
} else {
submit_bio(bio);
}
@@ -1752,6 +1770,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
e = f;
}
+ if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
+ if (to.sector >= wc->data_device_sectors) {
+ writecache_copy_endio(0, 0, c);
+ continue;
+ }
+ from.count = to.count = wc->data_device_sectors - to.sector;
+ }
+
dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
__writeback_throttle(wc, wbl);
@@ -2004,8 +2030,7 @@ static void writecache_dtr(struct dm_target *ti)
if (wc->ssd_dev)
dm_put_device(ti, wc->ssd_dev);
- if (wc->entries)
- vfree(wc->entries);
+ vfree(wc->entries);
if (wc->memory_map) {
if (WC_MODE_PMEM(wc))
@@ -2020,8 +2045,7 @@ static void writecache_dtr(struct dm_target *ti)
if (wc->dm_io)
dm_io_client_destroy(wc->dm_io);
- if (wc->dirty_bitmap)
- vfree(wc->dirty_bitmap);
+ vfree(wc->dirty_bitmap);
kfree(wc);
}
@@ -2205,6 +2229,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
goto invalid_optional;
wc->start_sector = start_sector;
+ wc->start_sector_set = true;
if (wc->start_sector != start_sector ||
wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
goto invalid_optional;
@@ -2214,6 +2239,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
if (high_wm_percent < 0 || high_wm_percent > 100)
goto invalid_optional;
+ wc->high_wm_percent_value = high_wm_percent;
wc->high_wm_percent_set = true;
} else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
string = dm_shift_arg(&as), opt_params--;
@@ -2221,6 +2247,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
if (low_wm_percent < 0 || low_wm_percent > 100)
goto invalid_optional;
+ wc->low_wm_percent_value = low_wm_percent;
wc->low_wm_percent_set = true;
} else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
string = dm_shift_arg(&as), opt_params--;
@@ -2240,6 +2267,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (autocommit_msecs > 3600000)
goto invalid_optional;
wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
+ wc->autocommit_time_value = autocommit_msecs;
wc->autocommit_time_set = true;
} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
unsigned max_age_msecs;
@@ -2249,7 +2277,10 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (max_age_msecs > 86400000)
goto invalid_optional;
wc->max_age = msecs_to_jiffies(max_age_msecs);
+ wc->max_age_set = true;
+ wc->max_age_value = max_age_msecs;
} else if (!strcasecmp(string, "cleaner")) {
+ wc->cleaner_set = true;
wc->cleaner = true;
} else if (!strcasecmp(string, "fua")) {
if (WC_MODE_PMEM(wc)) {
@@ -2455,7 +2486,6 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
struct dm_writecache *wc = ti->private;
unsigned extra_args;
unsigned sz = 0;
- uint64_t x;
switch (type) {
case STATUSTYPE_INFO:
@@ -2467,11 +2497,11 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
wc->dev->name, wc->ssd_dev->name, wc->block_size);
extra_args = 0;
- if (wc->start_sector)
+ if (wc->start_sector_set)
extra_args += 2;
- if (wc->high_wm_percent_set && !wc->cleaner)
+ if (wc->high_wm_percent_set)
extra_args += 2;
- if (wc->low_wm_percent_set && !wc->cleaner)
+ if (wc->low_wm_percent_set)
extra_args += 2;
if (wc->max_writeback_jobs_set)
extra_args += 2;
@@ -2479,37 +2509,29 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args += 2;
if (wc->autocommit_time_set)
extra_args += 2;
- if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ if (wc->max_age_set)
extra_args += 2;
- if (wc->cleaner)
+ if (wc->cleaner_set)
extra_args++;
if (wc->writeback_fua_set)
extra_args++;
DMEMIT("%u", extra_args);
- if (wc->start_sector)
+ if (wc->start_sector_set)
DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
- if (wc->high_wm_percent_set && !wc->cleaner) {
- x = (uint64_t)wc->freelist_high_watermark * 100;
- x += wc->n_blocks / 2;
- do_div(x, (size_t)wc->n_blocks);
- DMEMIT(" high_watermark %u", 100 - (unsigned)x);
- }
- if (wc->low_wm_percent_set && !wc->cleaner) {
- x = (uint64_t)wc->freelist_low_watermark * 100;
- x += wc->n_blocks / 2;
- do_div(x, (size_t)wc->n_blocks);
- DMEMIT(" low_watermark %u", 100 - (unsigned)x);
- }
+ if (wc->high_wm_percent_set)
+ DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
+ if (wc->low_wm_percent_set)
+ DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
if (wc->max_writeback_jobs_set)
DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
if (wc->autocommit_blocks_set)
DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
if (wc->autocommit_time_set)
- DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
- if (wc->max_age != MAX_AGE_UNSPECIFIED)
- DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
- if (wc->cleaner)
+ DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
+ if (wc->max_age_set)
+ DMEMIT(" max_age %u", wc->max_age_value);
+ if (wc->cleaner_set)
DMEMIT(" cleaner");
if (wc->writeback_fua_set)
DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
@@ -2519,7 +2541,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index b298fefb022e..039d17b28938 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
goto err;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3c3c8b4cb42..50b693d776d6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -28,6 +28,7 @@
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
+#include <linux/keyslot-manager.h>
#define DM_MSG_PREFIX "core"
@@ -105,12 +106,16 @@ struct dm_io {
struct dm_target_io tio;
};
+#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
+#define DM_IO_BIO_OFFSET \
+ (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
+
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
if (!tio->inside_dm_io)
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
- return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
+ return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
+ return (char *)bio - DM_IO_BIO_OFFSET - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);
@@ -118,9 +123,9 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
if (io->magic == DM_IO_MAGIC)
- return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
+ return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
BUG_ON(io->magic != DM_TIO_MAGIC);
- return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
+ return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
@@ -148,6 +153,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
+#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
+static int swap_bios = DEFAULT_SWAP_BIOS;
+static int get_swap_bios(void)
+{
+ int latch = READ_ONCE(swap_bios);
+ if (unlikely(latch <= 0))
+ latch = DEFAULT_SWAP_BIOS;
+ return latch;
+}
+
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -562,7 +577,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
- DMWARN_LIMIT(
+ DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
@@ -969,6 +984,11 @@ void disable_write_zeroes(struct mapped_device *md)
limits->max_write_zeroes_sectors = 0;
}
+static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
+{
+ return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
+}
+
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
@@ -977,16 +997,17 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
- !bio->bi_disk->queue->limits.max_discard_sectors)
+ !q->limits.max_discard_sectors)
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !q->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -996,7 +1017,7 @@ static void clone_endio(struct bio *bio)
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
- struct request_queue *q = orig_bio->bi_disk->queue;
+ struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
@@ -1019,6 +1040,11 @@ static void clone_endio(struct bio *bio)
}
}
+ if (unlikely(swap_bios_limit(tio->ti, bio))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
+
free_tio(tio);
dec_pending(io, error);
}
@@ -1128,7 +1154,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
if (!map)
goto out;
- ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
+ ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
out:
dm_put_live_table(md, srcu_idx);
@@ -1252,6 +1278,22 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
+{
+ mutex_lock(&md->swap_bios_lock);
+ while (latch < md->swap_bios) {
+ cond_resched();
+ down(&md->swap_bios_semaphore);
+ md->swap_bios--;
+ }
+ while (latch > md->swap_bios) {
+ cond_resched();
+ up(&md->swap_bios_semaphore);
+ md->swap_bios++;
+ }
+ mutex_unlock(&md->swap_bios_lock);
+}
+
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1271,6 +1313,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ int latch = get_swap_bios();
+ if (unlikely(latch != md->swap_bios))
+ __set_swap_bios_limit(md, latch);
+ down(&md->swap_bios_semaphore);
+ }
+
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@@ -1281,10 +1331,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
@@ -1422,8 +1480,7 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- flush_bio.bi_disk = ci->io->md->disk;
- bio_associate_blkg(&flush_bio);
+ bio_set_dev(&flush_bio, ci->io->md->disk->part0);
ci->bio = &flush_bio;
ci->sector_count = 0;
@@ -1626,7 +1683,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
static blk_qc_t dm_submit_bio(struct bio *bio)
{
- struct mapped_device *md = bio->bi_disk->private_data;
+ struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1718,6 +1775,19 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+{
+ dm_destroy_keyslot_manager(q->ksm);
+}
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+{
+}
+#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
+
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
@@ -1739,14 +1809,17 @@ static void cleanup_mapped_device(struct mapped_device *md)
put_disk(md->disk);
}
- if (md->queue)
+ if (md->queue) {
+ dm_queue_destroy_keyslot_manager(md->queue);
blk_cleanup_queue(md->queue);
+ }
cleanup_srcu_struct(&md->io_barrier);
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
+ mutex_destroy(&md->swap_bios_lock);
dm_mq_cleanup_mapped_device(md);
}
@@ -1814,6 +1887,10 @@ static struct mapped_device *alloc_dev(int minor)
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
+ md->swap_bios = get_swap_bios();
+ sema_init(&md->swap_bios_semaphore, md->swap_bios);
+ mutex_init(&md->swap_bios_lock);
+
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
@@ -2849,8 +2926,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
- front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
- io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+ front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
+ io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
if (ret)
goto out;
@@ -3097,6 +3174,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+module_param(swap_bios, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index fffe1e289c53..b441ad772c18 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,7 +73,7 @@ void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
int *blocksize);
-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data);
void dm_lock_md_type(struct mapped_device *md);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 68cac7d19278..63ed8329a98d 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -252,7 +252,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue))) {
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ca409428b4fc..21da0c48f6c2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -340,24 +340,6 @@ static int start_readonly;
*/
static bool create_on_open = true;
-struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->bio_set))
- return bio_alloc(gfp_mask, nr_iovecs);
-
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
-}
-EXPORT_SYMBOL_GPL(bio_alloc_mddev);
-
-static struct bio *md_bio_alloc_sync(struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->sync_set))
- return bio_alloc(GFP_NOIO, 1);
-
- return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
-}
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -463,8 +445,8 @@ struct md_io {
struct mddev *mddev;
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
+ struct block_device *orig_bi_bdev;
unsigned long start_time;
- struct block_device *part;
};
static void md_end_io(struct bio *bio)
@@ -472,7 +454,7 @@ static void md_end_io(struct bio *bio)
struct md_io *md_io = bio->bi_private;
struct mddev *mddev = md_io->mddev;
- part_end_io_acct(md_io->part, bio, md_io->start_time);
+ bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
bio->bi_end_io = md_io->orig_bi_end_io;
bio->bi_private = md_io->orig_bi_private;
@@ -486,7 +468,7 @@ static void md_end_io(struct bio *bio)
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
- struct mddev *mddev = bio->bi_disk->private_data;
+ struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
@@ -514,12 +496,12 @@ static blk_qc_t md_submit_bio(struct bio *bio)
md_io->mddev = mddev;
md_io->orig_bi_end_io = bio->bi_end_io;
md_io->orig_bi_private = bio->bi_private;
+ md_io->orig_bi_bdev = bio->bi_bdev;
bio->bi_end_io = md_end_io;
bio->bi_private = md_io;
- md_io->start_time = part_start_io_acct(mddev->gendisk,
- &md_io->part, bio);
+ md_io->start_time = bio_start_io_acct(bio);
}
/* bio could be mergeable after passing to underlayer */
@@ -613,7 +595,7 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
+ bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bio_set_dev(bi, rdev->bdev);
@@ -639,8 +621,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
+ spin_lock_irq(&mddev->lock);
mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
@@ -997,7 +981,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = md_bio_alloc_sync(mddev);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
@@ -1029,29 +1013,29 @@ int md_super_wait(struct mddev *mddev)
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, bool metadata_op)
{
- struct bio *bio = md_bio_alloc_sync(rdev->mddev);
- int ret;
+ struct bio bio;
+ struct bio_vec bvec;
+
+ bio_init(&bio, &bvec, 1);
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(bio, rdev->meta_bdev);
+ bio_set_dev(&bio, rdev->meta_bdev);
else
- bio_set_dev(bio, rdev->bdev);
- bio_set_op_attrs(bio, op, op_flags);
+ bio_set_dev(&bio, rdev->bdev);
+ bio.bi_opf = op | op_flags;
if (metadata_op)
- bio->bi_iter.bi_sector = sector + rdev->sb_start;
+ bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
+ bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio_add_page(bio, page, size, 0);
+ bio.bi_iter.bi_sector = sector + rdev->data_offset;
+ bio_add_page(&bio, page, size, 0);
- submit_bio_wait(bio);
+ submit_bio_wait(&bio);
- ret = !bio->bi_status;
- bio_put(bio);
- return ret;
+ return !bio.bi_status;
}
EXPORT_SYMBOL_GPL(sync_page_io);
@@ -2415,6 +2399,12 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_add_rdev);
+static bool rdev_read_only(struct md_rdev *rdev)
+{
+ return bdev_read_only(rdev->bdev) ||
+ (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
+}
+
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
@@ -2424,8 +2414,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
- if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
- mddev->pers)
+ if (rdev_read_only(rdev) && mddev->pers)
return -EROFS;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
@@ -5859,9 +5848,7 @@ int md_run(struct mddev *mddev)
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
- if (mddev->ro != 1 &&
- (bdev_read_only(rdev->bdev) ||
- bdev_read_only(rdev->meta_bdev))) {
+ if (mddev->ro != 1 && rdev_read_only(rdev)) {
mddev->ro = 1;
if (mddev->gendisk)
set_disk_ro(mddev->gendisk, 1);
@@ -6156,7 +6143,7 @@ static int restart_array(struct mddev *mddev)
if (test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
has_journal = true;
- if (bdev_read_only(rdev->bdev))
+ if (rdev_read_only(rdev))
has_readonly = true;
}
rcu_read_unlock();
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 34070ab30a8a..bcbba1b5ec4a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -556,7 +556,7 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
@@ -742,8 +742,6 @@ extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
-extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
@@ -793,14 +791,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 564896659dd4..fe073d92f01e 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -100,7 +100,7 @@ struct dm_block *shadow_parent(struct shadow_spine *s);
int shadow_has_parent(struct shadow_spine *s);
-int shadow_root(struct shadow_spine *s);
+dm_block_t shadow_root(struct shadow_spine *s);
/*
* Some inlines.
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index e03cb9e48773..8a2bfbfb218b 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -235,7 +235,7 @@ int shadow_has_parent(struct shadow_spine *s)
return s->count >= 2;
}
-int shadow_root(struct shadow_spine *s)
+dm_block_t shadow_root(struct shadow_spine *s)
{
return s->root;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c0347997f6ff..d2378765dc15 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -794,13 +794,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void *)bio->bi_disk;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1104,7 +1104,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
+ behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1520,7 +1520,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c5d88ef6a45c..a9ae7d113492 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -882,13 +882,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1075,13 +1075,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1253,7 +1253,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)rdev;
+ mbio->bi_bdev = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@@ -3003,7 +3003,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
- * have bi_end_io, bi_sector, bi_disk set,
+ * have bi_end_io, bi_sector, bi_bdev set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.
@@ -4531,7 +4531,7 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
+ read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
@@ -4539,10 +4539,6 @@ read_more:
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
- read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
- read_bio->bi_status = 0;
- read_bio->bi_vcnt = 0;
- read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index d0f540296fe9..e8c118e05dfd 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(rdev->bdev);
out:
__free_page(page);
return ret;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3a90cc0e43ca..5d57a5bd171f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5310,7 +5310,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- WARN_ON_ONCE(bio->bi_partno);
+ WARN_ON_ONCE(bio->bi_bdev->bd_partno);
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
@@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
struct r5conf *conf = mddev->private;
- int dd_idx;
- struct bio* align_bi;
+ struct bio *align_bio;
struct md_rdev *rdev;
- sector_t end_sector;
+ sector_t sector, end_sector, first_bad;
+ int bad_sectors, dd_idx;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__);
return 0;
}
- /*
- * use bio_clone_fast to make a copy of the bio
- */
- align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
- if (!align_bi)
- return 0;
- /*
- * set bi_end_io to a new function, and set bi_private to the
- * original bio.
- */
- align_bi->bi_end_io = raid5_align_endio;
- align_bi->bi_private = raid_bio;
- /*
- * compute position
- */
- align_bi->bi_iter.bi_sector =
- raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
- 0, &dd_idx, NULL);
- end_sector = bio_end_sector(align_bi);
+ sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
+ &dd_idx, NULL);
+ end_sector = bio_end_sector(raid_bio);
+
rcu_read_lock();
+ if (r5c_big_stripe_cached(conf, sector))
+ goto out_rcu_unlock;
+
rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) {
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev &&
- (test_bit(Faulty, &rdev->flags) ||
+ if (!rdev)
+ goto out_rcu_unlock;
+ if (test_bit(Faulty, &rdev->flags) ||
!(test_bit(In_sync, &rdev->flags) ||
- rdev->recovery_offset >= end_sector)))
- rdev = NULL;
+ rdev->recovery_offset >= end_sector))
+ goto out_rcu_unlock;
}
- if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
- rcu_read_unlock();
- bio_put(align_bi);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+
+ align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
+ bio_set_dev(align_bio, rdev->bdev);
+ align_bio->bi_end_io = raid5_align_endio;
+ align_bio->bi_private = raid_bio;
+ align_bio->bi_iter.bi_sector = sector;
+
+ raid_bio->bi_next = (void *)rdev;
+
+ if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
+ &bad_sectors)) {
+ bio_put(align_bio);
+ rdev_dec_pending(rdev, mddev);
return 0;
}
- if (rdev) {
- sector_t first_bad;
- int bad_sectors;
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- raid_bio->bi_next = (void*)rdev;
- bio_set_dev(align_bi, rdev->bdev);
-
- if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
- bio_sectors(align_bi),
- &first_bad, &bad_sectors)) {
- bio_put(align_bi);
- rdev_dec_pending(rdev, mddev);
- return 0;
- }
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bio->bi_iter.bi_sector += rdev->data_offset;
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_iter.bi_sector += rdev->data_offset;
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
+ conf->device_lock);
+ atomic_inc(&conf->active_aligned_reads);
+ spin_unlock_irq(&conf->device_lock);
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0,
- conf->device_lock);
- atomic_inc(&conf->active_aligned_reads);
- spin_unlock_irq(&conf->device_lock);
+ if (mddev->gendisk)
+ trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
+ raid_bio->bi_iter.bi_sector);
+ submit_bio_noacct(align_bio);
+ return 1;
- if (mddev->gendisk)
- trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_iter.bi_sector);
- submit_bio_noacct(align_bi);
- return 1;
- } else {
- rcu_read_unlock();
- bio_put(align_bi);
- return 0;
- }
+out_rcu_unlock:
+ rcu_read_unlock();
+ return 0;
}
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
@@ -7661,7 +7643,7 @@ static int raid5_run(struct mddev *mddev)
}
/* device size must be a multiple of chunk size */
- mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
+ mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1);
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > dirty_parity_disks &&
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index d5d5d28d0b36..79fa36de8a04 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1296,7 +1296,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/*
* If we are unable to get an OK or a NACK after max_retries attempts
* (and note that each attempt already consists of four polls), then
- * then we assume that something is really weird and that it is not a
+ * we assume that something is really weird and that it is not a
* good idea to try and claim this logical address.
*/
if (i == max_retries)
@@ -1735,7 +1735,7 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
u8 *features = log_addrs->features[i];
bool op_is_dev_features = false;
- unsigned j;
+ unsigned int j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index f922a2196b2b..769e6b4cddce 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -40,7 +40,7 @@ static __poll_t cec_poll(struct file *filp,
poll_wait(filp, &fh->wait, poll);
if (!cec_is_registered(adap))
- return EPOLLERR | EPOLLHUP;
+ return EPOLLERR | EPOLLHUP | EPOLLPRI;
mutex_lock(&adap->lock);
if (adap->is_configured &&
adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
index 3a947159b25a..ea6f8ee8161c 100644
--- a/drivers/media/cec/platform/Makefile
+++ b/drivers/media/cec/platform/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig
index edbc99ebba87..d2223a12c95f 100644
--- a/drivers/media/common/videobuf2/Kconfig
+++ b/drivers/media/common/videobuf2/Kconfig
@@ -9,7 +9,6 @@ config VIDEOBUF2_V4L2
config VIDEOBUF2_MEMOPS
tristate
- select FRAME_VECTOR
config VIDEOBUF2_DMA_CONTIG
tristate
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile
index 77bebe8b202f..54306f8d096c 100644
--- a/drivers/media/common/videobuf2/Makefile
+++ b/drivers/media/common/videobuf2/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
videobuf2-common-objs := videobuf2-core.o
+videobuf2-common-objs += frame_vector.o
ifeq ($(CONFIG_TRACEPOINTS),y)
videobuf2-common-objs += vb2-trace.o
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
new file mode 100644
index 000000000000..a0e65481a201
--- /dev/null
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+
+#include <media/frame_vector.h>
+
+/**
+ * get_vaddr_frames() - map virtual addresses to pfns
+ * @start: starting user address
+ * @nr_frames: number of pages / pfns from start to map
+ * @gup_flags: flags modifying lookup behaviour
+ * @vec: structure which receives pages / pfns of the addresses mapped.
+ * It should have space for at least nr_frames entries.
+ *
+ * This function maps virtual addresses from @start and fills @vec structure
+ * with page frame numbers or page pointers to corresponding pages (choice
+ * depends on the type of the vma underlying the virtual address). If @start
+ * belongs to a normal vma, the function grabs reference to each of the pages
+ * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
+ * touch page structures and the caller must make sure pfns aren't reused for
+ * anything else while he is using them.
+ *
+ * The function returns number of pages mapped which may be less than
+ * @nr_frames. In particular we stop mapping if there are more vmas of
+ * different type underlying the specified range of virtual addresses.
+ * When the function isn't able to map a single page, it returns error.
+ *
+ * This function takes care of grabbing mmap_lock as necessary.
+ */
+int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+ struct frame_vector *vec)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ret = 0;
+ int err;
+
+ if (nr_frames == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
+ nr_frames = vec->nr_allocated;
+
+ start = untagged_addr(start);
+
+ ret = pin_user_pages_fast(start, nr_frames,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ (struct page **)(vec->ptrs));
+ if (ret > 0) {
+ vec->got_ref = true;
+ vec->is_pfns = false;
+ goto out_unlocked;
+ }
+
+ mmap_read_lock(mm);
+ vec->got_ref = false;
+ vec->is_pfns = true;
+ ret = 0;
+ do {
+ unsigned long *nums = frame_vector_pfns(vec);
+
+ vma = find_vma_intersection(mm, start, start + 1);
+ if (!vma)
+ break;
+
+ while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
+ err = follow_pfn(vma, start, &nums[ret]);
+ if (err) {
+ if (ret == 0)
+ ret = err;
+ goto out;
+ }
+ start += PAGE_SIZE;
+ ret++;
+ }
+ /* Bail out if VMA doesn't completely cover the tail page. */
+ if (start < vma->vm_end)
+ break;
+ } while (ret < nr_frames);
+out:
+ mmap_read_unlock(mm);
+out_unlocked:
+ if (!ret)
+ ret = -EFAULT;
+ if (ret > 0)
+ vec->nr_frames = ret;
+ return ret;
+}
+EXPORT_SYMBOL(get_vaddr_frames);
+
+/**
+ * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
+ * them
+ * @vec: frame vector to put
+ *
+ * Drop references to pages if get_vaddr_frames() acquired them. We also
+ * invalidate the frame vector so that it is prepared for the next call into
+ * get_vaddr_frames().
+ */
+void put_vaddr_frames(struct frame_vector *vec)
+{
+ struct page **pages;
+
+ if (!vec->got_ref)
+ goto out;
+ pages = frame_vector_pages(vec);
+ /*
+ * frame_vector_pages() might needed to do a conversion when
+ * get_vaddr_frames() got pages but vec was later converted to pfns.
+ * But it shouldn't really fail to convert pfns back...
+ */
+ if (WARN_ON(IS_ERR(pages)))
+ goto out;
+
+ unpin_user_pages(pages, vec->nr_frames);
+ vec->got_ref = false;
+out:
+ vec->nr_frames = 0;
+}
+EXPORT_SYMBOL(put_vaddr_frames);
+
+/**
+ * frame_vector_to_pages - convert frame vector to contain page pointers
+ * @vec: frame vector to convert
+ *
+ * Convert @vec to contain array of page pointers. If the conversion is
+ * successful, return 0. Otherwise return an error. Note that we do not grab
+ * page references for the page structures.
+ */
+int frame_vector_to_pages(struct frame_vector *vec)
+{
+ int i;
+ unsigned long *nums;
+ struct page **pages;
+
+ if (!vec->is_pfns)
+ return 0;
+ nums = frame_vector_pfns(vec);
+ for (i = 0; i < vec->nr_frames; i++)
+ if (!pfn_valid(nums[i]))
+ return -EINVAL;
+ pages = (struct page **)nums;
+ for (i = 0; i < vec->nr_frames; i++)
+ pages[i] = pfn_to_page(nums[i]);
+ vec->is_pfns = false;
+ return 0;
+}
+EXPORT_SYMBOL(frame_vector_to_pages);
+
+/**
+ * frame_vector_to_pfns - convert frame vector to contain pfns
+ * @vec: frame vector to convert
+ *
+ * Convert @vec to contain array of pfns.
+ */
+void frame_vector_to_pfns(struct frame_vector *vec)
+{
+ int i;
+ unsigned long *nums;
+ struct page **pages;
+
+ if (vec->is_pfns)
+ return;
+ pages = (struct page **)(vec->ptrs);
+ nums = (unsigned long *)pages;
+ for (i = 0; i < vec->nr_frames; i++)
+ nums[i] = page_to_pfn(pages[i]);
+ vec->is_pfns = true;
+}
+EXPORT_SYMBOL(frame_vector_to_pfns);
+
+/**
+ * frame_vector_create() - allocate & initialize structure for pinned pfns
+ * @nr_frames: number of pfns slots we should reserve
+ *
+ * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
+ * pfns.
+ */
+struct frame_vector *frame_vector_create(unsigned int nr_frames)
+{
+ struct frame_vector *vec;
+ int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
+
+ if (WARN_ON_ONCE(nr_frames == 0))
+ return NULL;
+ /*
+ * This is absurdly high. It's here just to avoid strange effects when
+ * arithmetics overflows.
+ */
+ if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
+ return NULL;
+ /*
+ * Avoid higher order allocations, use vmalloc instead. It should
+ * be rare anyway.
+ */
+ vec = kvmalloc(size, GFP_KERNEL);
+ if (!vec)
+ return NULL;
+ vec->nr_allocated = nr_frames;
+ vec->nr_frames = 0;
+ return vec;
+}
+EXPORT_SYMBOL(frame_vector_create);
+
+/**
+ * frame_vector_destroy() - free memory allocated to carry frame vector
+ * @vec: Frame vector to free
+ *
+ * Free structure allocated by frame_vector_create() to carry frames.
+ */
+void frame_vector_destroy(struct frame_vector *vec)
+{
+ /* Make sure put_vaddr_frames() got called properly... */
+ VM_BUG_ON(vec->nr_frames > 0);
+ kvfree(vec);
+}
+EXPORT_SYMBOL(frame_vector_destroy);
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 89e38392509c..02281d13505f 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2374,13 +2374,20 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
struct vb2_buffer *vb = NULL;
unsigned long flags;
+ /*
+ * poll_wait() MUST be called on the first invocation on all the
+ * potential queues of interest, even if we are not interested in their
+ * events during this first call. Failure to do so will result in
+ * queue's events to be ignored because the poll_table won't be capable
+ * of adding new wait queues thereafter.
+ */
+ poll_wait(file, &q->done_wq, wait);
+
if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
return 0;
if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
return 0;
- poll_wait(file, &q->done_wq, wait);
-
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c
index 6e9e05153f4e..9dd6c27162f4 100644
--- a/drivers/media/common/videobuf2/videobuf2-memops.c
+++ b/drivers/media/common/videobuf2/videobuf2-memops.c
@@ -40,7 +40,6 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
- unsigned int flags = FOLL_FORCE | FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -48,7 +47,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 96d3b2b2aa31..7e96f67c60ba 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return -EINVAL;
}
} else {
- length = (b->memory == VB2_MEMORY_USERPTR ||
- b->memory == VB2_MEMORY_DMABUF)
+ length = (b->memory == VB2_MEMORY_USERPTR)
? b->length : vb->planes[0].length;
if (b->bytesused > length)
@@ -488,11 +487,6 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
!q->ops->buf_out_validate))
return -EINVAL;
- if (b->request_fd < 0) {
- dprintk(q, 1, "%s: request_fd < 0\n", opname);
- return -EINVAL;
- }
-
req = media_request_get_by_fd(mdev, b->request_fd);
if (IS_ERR(req)) {
dprintk(q, 1, "%s: invalid request_fd\n", opname);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 06ea30a689d7..fb35697dd93c 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -984,6 +984,7 @@ static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
fe->ops.info.symbol_rate_max);
return -EINVAL;
}
+ break;
default:
break;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 643b851a6b60..3468b07b62fe 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -515,7 +515,7 @@ config DVB_RTL2830
config DVB_RTL2832
tristate "Realtek RTL2832 DVB-T"
depends on DVB_CORE && I2C && I2C_MUX
- select REGMAP
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
@@ -708,6 +708,15 @@ config DVB_S5H1411
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+config DVB_MXL692
+ tristate "MaxLinear MXL692 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ MaxLinear MxL692 is a combo tuner-demodulator that
+ supports ATSC 8VSB and QAM modes. Say Y when you want to
+ support this frontend.
+
comment "ISDB-T (terrestrial) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e9179162658c..b9f47d68e14e 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
+obj-$(CONFIG_DVB_MXL692) += mxl692.o
obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBH25) += lnbh25.o
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 6a8d78b6edac..785c49b3d307 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -125,6 +125,7 @@ static int af9033_init(struct dvb_frontend *fe)
if (i == ARRAY_SIZE(clock_adc_lut)) {
dev_err(&client->dev, "Couldn't find ADC config for clock %d\n",
dev->cfg.clock);
+ ret = -ENODEV;
goto err;
}
@@ -852,6 +853,7 @@ static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
*snr = *snr * 0xffff / 32;
break;
default:
+ ret = -EINVAL;
goto err;
}
}
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 2464b63fe0cf..d8acd582c711 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -363,6 +363,7 @@ static void cx24120_check_cmd(struct cx24120_state *state, u8 id)
case CMD_DISEQC_BURST:
cx24120_msg_mpeg_output_global_config(state, 0);
/* Old driver would do a msleep(100) here */
+ return;
default:
return;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 758c95bc3b11..5431f922f55e 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3338,7 +3338,7 @@ static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
cxd2841er_tuner_set(fe);
cxd2841er_tune_done(priv);
- timeout = ((3000000 + (symbol_rate - 1)) / symbol_rate) + 150;
+ timeout = DIV_ROUND_UP(3000000, symbol_rate) + 150;
i = 0;
do {
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 08a85831e917..903da33642df 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1765,6 +1765,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
dib0090_write_reg(state, 0x1f, 0x7);
*tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
state->calibrate &= ~DC_CAL;
+ break;
+
default:
break;
}
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index a57470bf71bf..d7fc2595f15b 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -3294,6 +3294,7 @@ static int dvbt_sc_command(struct drxk_state *state,
case OFDM_SC_RA_RAM_CMD_USER_IO:
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
status = read16(state, OFDM_SC_RA_RAM_PARAM0__A, &(param0));
+ break;
/* All commands yielding 0 results */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_SET_TIMER:
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 39cbb3ea1c9d..b294ba87e934 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -390,6 +390,7 @@ static int m88rs2000_tab_set(struct m88rs2000_state *state,
case 0xff:
if (tab[i].reg == 0xaa && tab[i].val == 0xff)
return 0;
+ break;
case 0x00:
break;
default:
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
new file mode 100644
index 000000000000..86af831c0176
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -0,0 +1,1378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the MaxLinear MxL69x family of combo tuners/demods
+ *
+ * Copyright (C) 2020 Brad Love <brad@nextdimension.cc>
+ *
+ * based on code:
+ * Copyright (c) 2016 MaxLinear, Inc. All rights reserved
+ * which was released under GPL V2
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/i2c-mux.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+
+#include "mxl692.h"
+#include "mxl692_defs.h"
+
+static const struct dvb_frontend_ops mxl692_ops;
+
+struct mxl692_dev {
+ struct dvb_frontend fe;
+ struct i2c_client *i2c_client;
+ struct mutex i2c_lock; /* i2c command mutex */
+ enum MXL_EAGLE_DEMOD_TYPE_E demod_type;
+ enum MXL_EAGLE_POWER_MODE_E power_mode;
+ u32 current_frequency;
+ int device_type;
+ int seqnum;
+ int init_done;
+};
+
+static int mxl692_i2c_write(struct mxl692_dev *dev, u8 *buffer, u16 buf_len)
+{
+ int ret = 0;
+ struct i2c_msg msg = {
+ .addr = dev->i2c_client->addr,
+ .flags = 0,
+ .buf = buffer,
+ .len = buf_len
+ };
+
+ ret = i2c_transfer(dev->i2c_client->adapter, &msg, 1);
+ if (ret != 1)
+ dev_dbg(&dev->i2c_client->dev, "i2c write error!\n");
+
+ return ret;
+}
+
+static int mxl692_i2c_read(struct mxl692_dev *dev, u8 *buffer, u16 buf_len)
+{
+ int ret = 0;
+ struct i2c_msg msg = {
+ .addr = dev->i2c_client->addr,
+ .flags = I2C_M_RD,
+ .buf = buffer,
+ .len = buf_len
+ };
+
+ ret = i2c_transfer(dev->i2c_client->adapter, &msg, 1);
+ if (ret != 1)
+ dev_dbg(&dev->i2c_client->dev, "i2c read error!\n");
+
+ return ret;
+}
+
+static int convert_endian(u32 size, u8 *d)
+{
+ u32 i;
+
+ for (i = 0; i < (size & ~3); i += 4) {
+ d[i + 0] ^= d[i + 3];
+ d[i + 3] ^= d[i + 0];
+ d[i + 0] ^= d[i + 3];
+
+ d[i + 1] ^= d[i + 2];
+ d[i + 2] ^= d[i + 1];
+ d[i + 1] ^= d[i + 2];
+ }
+
+ switch (size & 3) {
+ case 0:
+ case 1:
+ /* do nothing */
+ break;
+ case 2:
+ d[i + 0] ^= d[i + 1];
+ d[i + 1] ^= d[i + 0];
+ d[i + 0] ^= d[i + 1];
+ break;
+
+ case 3:
+ d[i + 0] ^= d[i + 2];
+ d[i + 2] ^= d[i + 0];
+ d[i + 0] ^= d[i + 2];
+ break;
+ }
+ return size;
+}
+
+static int convert_endian_n(int n, u32 size, u8 *d)
+{
+ int i, count = 0;
+
+ for (i = 0; i < n; i += size)
+ count += convert_endian(size, d + i);
+ return count;
+}
+
+static void mxl692_tx_swap(enum MXL_EAGLE_OPCODE_E opcode, u8 *buffer)
+{
+#ifdef __BIG_ENDIAN
+ return;
+#endif
+ buffer += MXL_EAGLE_HOST_MSG_HEADER_SIZE; /* skip API header */
+
+ switch (opcode) {
+ case MXL_EAGLE_OPCODE_DEVICE_INTR_MASK_SET:
+ case MXL_EAGLE_OPCODE_TUNER_CHANNEL_TUNE_SET:
+ case MXL_EAGLE_OPCODE_SMA_TRANSMIT_SET:
+ buffer += convert_endian(sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_PARAMS_SET:
+ buffer += 5;
+ buffer += convert_endian(2 * sizeof(u32), buffer);
+ break;
+ default:
+ /* no swapping - all get opcodes */
+ /* ATSC/OOB no swapping */
+ break;
+ }
+}
+
+static void mxl692_rx_swap(enum MXL_EAGLE_OPCODE_E opcode, u8 *buffer)
+{
+#ifdef __BIG_ENDIAN
+ return;
+#endif
+ buffer += MXL_EAGLE_HOST_MSG_HEADER_SIZE; /* skip API header */
+
+ switch (opcode) {
+ case MXL_EAGLE_OPCODE_TUNER_AGC_STATUS_GET:
+ buffer++;
+ buffer += convert_endian(2 * sizeof(u16), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_ATSC_STATUS_GET:
+ buffer += convert_endian_n(2, sizeof(u16), buffer);
+ buffer += convert_endian(sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_ATSC_ERROR_COUNTERS_GET:
+ buffer += convert_endian(3 * sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_ATSC_EQUALIZER_FILTER_FFE_TAPS_GET:
+ buffer += convert_endian_n(24, sizeof(u16), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_STATUS_GET:
+ buffer += 8;
+ buffer += convert_endian_n(2, sizeof(u16), buffer);
+ buffer += convert_endian(sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_ERROR_COUNTERS_GET:
+ buffer += convert_endian(7 * sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_CONSTELLATION_VALUE_GET:
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_START_GET:
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_MIDDLE_GET:
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_END_GET:
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_SPUR_START_GET:
+ buffer += convert_endian_n(24, sizeof(u16), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_SPUR_END_GET:
+ buffer += convert_endian_n(8, sizeof(u16), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_FFE_GET:
+ buffer += convert_endian_n(17, sizeof(u16), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_OOB_ERROR_COUNTERS_GET:
+ buffer += convert_endian(3 * sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_OOB_STATUS_GET:
+ buffer += convert_endian_n(2, sizeof(u16), buffer);
+ buffer += convert_endian(sizeof(u32), buffer);
+ break;
+ case MXL_EAGLE_OPCODE_SMA_RECEIVE_GET:
+ buffer += convert_endian(sizeof(u32), buffer);
+ break;
+ default:
+ /* no swapping - all set opcodes */
+ break;
+ }
+}
+
+static u32 mxl692_checksum(u8 *buffer, u32 size)
+{
+ u32 ix, div_size;
+ u32 cur_cksum = 0;
+ __be32 *buf;
+
+ div_size = DIV_ROUND_UP(size, 4);
+
+ buf = (__be32 *)buffer;
+ for (ix = 0; ix < div_size; ix++)
+ cur_cksum += be32_to_cpu(buf[ix]);
+
+ cur_cksum ^= 0xDEADBEEF;
+
+ return cur_cksum;
+}
+
+static int mxl692_validate_fw_header(struct mxl692_dev *dev,
+ const u8 *buffer, u32 buf_len)
+{
+ int status = 0;
+ u32 ix, temp;
+ __be32 *local_buf = NULL;
+ u8 temp_cksum = 0;
+ const u8 fw_hdr[] = { 0x4D, 0x31, 0x10, 0x02, 0x40, 0x00, 0x00, 0x80 };
+
+ if (memcmp(buffer, fw_hdr, 8) != 0) {
+ status = -EINVAL;
+ goto err_finish;
+ }
+
+ local_buf = (__be32 *)(buffer + 8);
+ temp = be32_to_cpu(*local_buf);
+
+ if ((buf_len - 16) != temp >> 8) {
+ status = -EINVAL;
+ goto err_finish;
+ }
+
+ for (ix = 16; ix < buf_len; ix++)
+ temp_cksum += buffer[ix];
+
+ if (temp_cksum != buffer[11])
+ status = -EINVAL;
+
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "failed\n");
+ return status;
+}
+
+static int mxl692_write_fw_block(struct mxl692_dev *dev, const u8 *buffer,
+ u32 buf_len, u32 *index)
+{
+ int status = 0;
+ u32 ix = 0, total_len = 0, addr = 0, chunk_len = 0, prevchunk_len = 0;
+ u8 local_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {}, *plocal_buf = NULL;
+ int payload_max = MXL_EAGLE_MAX_I2C_PACKET_SIZE - MXL_EAGLE_I2C_MHEADER_SIZE;
+
+ ix = *index;
+
+ if (buffer[ix] == 0x53) {
+ total_len = buffer[ix + 1] << 16 | buffer[ix + 2] << 8 | buffer[ix + 3];
+ total_len = (total_len + 3) & ~3;
+ addr = buffer[ix + 4] << 24 | buffer[ix + 5] << 16 |
+ buffer[ix + 6] << 8 | buffer[ix + 7];
+ ix += MXL_EAGLE_FW_SEGMENT_HEADER_SIZE;
+
+ while ((total_len > 0) && (status == 0)) {
+ plocal_buf = local_buf;
+ chunk_len = (total_len < payload_max) ? total_len : payload_max;
+
+ *plocal_buf++ = 0xFC;
+ *plocal_buf++ = chunk_len + sizeof(u32);
+
+ *(u32 *)plocal_buf = addr + prevchunk_len;
+#ifdef __BIG_ENDIAN
+ convert_endian(sizeof(u32), plocal_buf);
+#endif
+ plocal_buf += sizeof(u32);
+
+ memcpy(plocal_buf, &buffer[ix], chunk_len);
+ convert_endian(chunk_len, plocal_buf);
+ if (mxl692_i2c_write(dev, local_buf,
+ (chunk_len + MXL_EAGLE_I2C_MHEADER_SIZE)) < 0) {
+ status = -EREMOTEIO;
+ break;
+ }
+
+ prevchunk_len += chunk_len;
+ total_len -= chunk_len;
+ ix += chunk_len;
+ }
+ *index = ix;
+ } else {
+ status = -EINVAL;
+ }
+
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+
+ return status;
+}
+
+static int mxl692_memwrite(struct mxl692_dev *dev, u32 addr,
+ u8 *buffer, u32 size)
+{
+ int status = 0, total_len = 0;
+ u8 local_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {}, *plocal_buf = NULL;
+
+ total_len = size;
+ total_len = (total_len + 3) & ~3; /* 4 byte alignment */
+
+ if (total_len > (MXL_EAGLE_MAX_I2C_PACKET_SIZE - MXL_EAGLE_I2C_MHEADER_SIZE))
+ dev_dbg(&dev->i2c_client->dev, "hrmph?\n");
+
+ plocal_buf = local_buf;
+
+ *plocal_buf++ = 0xFC;
+ *plocal_buf++ = total_len + sizeof(u32);
+
+ *(u32 *)plocal_buf = addr;
+ plocal_buf += sizeof(u32);
+
+ memcpy(plocal_buf, buffer, total_len);
+#ifdef __BIG_ENDIAN
+ convert_endian(sizeof(u32) + total_len, local_buf + 2);
+#endif
+ if (mxl692_i2c_write(dev, local_buf,
+ (total_len + MXL_EAGLE_I2C_MHEADER_SIZE)) < 0) {
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+
+ return status;
+err_finish:
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_memread(struct mxl692_dev *dev, u32 addr,
+ u8 *buffer, u32 size)
+{
+ int status = 0;
+ u8 local_buf[MXL_EAGLE_I2C_MHEADER_SIZE] = {}, *plocal_buf = NULL;
+
+ plocal_buf = local_buf;
+
+ *plocal_buf++ = 0xFB;
+ *plocal_buf++ = sizeof(u32);
+ *(u32 *)plocal_buf = addr;
+#ifdef __BIG_ENDIAN
+ convert_endian(sizeof(u32), plocal_buf);
+#endif
+ mutex_lock(&dev->i2c_lock);
+
+ if (mxl692_i2c_write(dev, local_buf, MXL_EAGLE_I2C_MHEADER_SIZE) > 0) {
+ size = (size + 3) & ~3; /* 4 byte alignment */
+ status = mxl692_i2c_read(dev, buffer, (u16)size) < 0 ? -EREMOTEIO : 0;
+#ifdef __BIG_ENDIAN
+ if (status == 0)
+ convert_endian(size, buffer);
+#endif
+ } else {
+ status = -EREMOTEIO;
+ }
+
+ mutex_unlock(&dev->i2c_lock);
+
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+
+ return status;
+}
+
+static const char *mxl692_opcode_string(u8 opcode)
+{
+ if (opcode >= 0 && opcode <= MXL_EAGLE_OPCODE_INTERNAL)
+ return MXL_EAGLE_OPCODE_STRING[opcode];
+
+ return "invalid opcode";
+}
+
+static int mxl692_opwrite(struct mxl692_dev *dev, u8 *buffer,
+ u32 size)
+{
+ int status = 0, total_len = 0;
+ u8 local_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {}, *plocal_buf = NULL;
+ struct MXL_EAGLE_HOST_MSG_HEADER_T *tx_hdr = (struct MXL_EAGLE_HOST_MSG_HEADER_T *)buffer;
+
+ total_len = size;
+ total_len = (total_len + 3) & ~3; /* 4 byte alignment */
+
+ if (total_len > (MXL_EAGLE_MAX_I2C_PACKET_SIZE - MXL_EAGLE_I2C_PHEADER_SIZE))
+ dev_dbg(&dev->i2c_client->dev, "hrmph?\n");
+
+ plocal_buf = local_buf;
+
+ *plocal_buf++ = 0xFE;
+ *plocal_buf++ = (u8)total_len;
+
+ memcpy(plocal_buf, buffer, total_len);
+ convert_endian(total_len, plocal_buf);
+
+ if (mxl692_i2c_write(dev, local_buf,
+ (total_len + MXL_EAGLE_I2C_PHEADER_SIZE)) < 0) {
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "opcode %s err %d\n",
+ mxl692_opcode_string(tx_hdr->opcode), status);
+ return status;
+}
+
+static int mxl692_opread(struct mxl692_dev *dev, u8 *buffer,
+ u32 size)
+{
+ int status = 0;
+ u32 ix = 0;
+ u8 local_buf[MXL_EAGLE_I2C_PHEADER_SIZE] = {};
+
+ local_buf[0] = 0xFD;
+ local_buf[1] = 0;
+
+ if (mxl692_i2c_write(dev, local_buf, MXL_EAGLE_I2C_PHEADER_SIZE) > 0) {
+ size = (size + 3) & ~3; /* 4 byte alignment */
+
+ /* Read in 4 byte chunks */
+ for (ix = 0; ix < size; ix += 4) {
+ if (mxl692_i2c_read(dev, buffer + ix, 4) < 0) {
+ dev_dbg(&dev->i2c_client->dev, "ix=%d size=%d\n", ix, size);
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+ }
+ convert_endian(size, buffer);
+ } else {
+ status = -EREMOTEIO;
+ }
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_i2c_writeread(struct mxl692_dev *dev,
+ u8 opcode,
+ u8 *tx_payload,
+ u8 tx_payload_size,
+ u8 *rx_payload,
+ u8 rx_payload_expected)
+{
+ int status = 0, timeout = 40;
+ u8 tx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ u8 rx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ u32 resp_checksum = 0, resp_checksum_tmp = 0;
+ struct MXL_EAGLE_HOST_MSG_HEADER_T *tx_header;
+ struct MXL_EAGLE_HOST_MSG_HEADER_T *rx_header;
+
+ mutex_lock(&dev->i2c_lock);
+
+ if ((tx_payload_size + MXL_EAGLE_HOST_MSG_HEADER_SIZE) >
+ (MXL_EAGLE_MAX_I2C_PACKET_SIZE - MXL_EAGLE_I2C_PHEADER_SIZE)) {
+ status = -EINVAL;
+ goto err_finish;
+ }
+
+ tx_header = (struct MXL_EAGLE_HOST_MSG_HEADER_T *)tx_buf;
+ tx_header->opcode = opcode;
+ tx_header->seqnum = dev->seqnum++;
+ tx_header->payload_size = tx_payload_size;
+ tx_header->checksum = 0;
+
+ if (dev->seqnum == 0)
+ dev->seqnum = 1;
+
+ if (tx_payload && tx_payload_size > 0)
+ memcpy(&tx_buf[MXL_EAGLE_HOST_MSG_HEADER_SIZE], tx_payload, tx_payload_size);
+
+ mxl692_tx_swap(opcode, tx_buf);
+
+ tx_header->checksum = 0;
+ tx_header->checksum = mxl692_checksum(tx_buf,
+ MXL_EAGLE_HOST_MSG_HEADER_SIZE + tx_payload_size);
+#ifdef __LITTLE_ENDIAN
+ convert_endian(4, (u8 *)&tx_header->checksum); /* cksum is big endian */
+#endif
+ /* send Tx message */
+ status = mxl692_opwrite(dev, tx_buf,
+ tx_payload_size + MXL_EAGLE_HOST_MSG_HEADER_SIZE);
+ if (status) {
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+
+ /* receive Rx message (polling) */
+ rx_header = (struct MXL_EAGLE_HOST_MSG_HEADER_T *)rx_buf;
+
+ do {
+ status = mxl692_opread(dev, rx_buf,
+ rx_payload_expected + MXL_EAGLE_HOST_MSG_HEADER_SIZE);
+ usleep_range(1000, 2000);
+ timeout--;
+ } while ((timeout > 0) && (status == 0) &&
+ (rx_header->seqnum == 0) &&
+ (rx_header->checksum == 0));
+
+ if (timeout == 0 || status) {
+ dev_dbg(&dev->i2c_client->dev, "timeout=%d status=%d\n",
+ timeout, status);
+ status = -ETIMEDOUT;
+ goto err_finish;
+ }
+
+ if (rx_header->status) {
+ dev_dbg(&dev->i2c_client->dev, "rx header status code: %d\n", rx_header->status);
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+
+ if (rx_header->seqnum != tx_header->seqnum ||
+ rx_header->opcode != tx_header->opcode ||
+ rx_header->payload_size != rx_payload_expected) {
+ dev_dbg(&dev->i2c_client->dev, "Something failed seq=%s opcode=%s pSize=%s\n",
+ rx_header->seqnum != tx_header->seqnum ? "X" : "0",
+ rx_header->opcode != tx_header->opcode ? "X" : "0",
+ rx_header->payload_size != rx_payload_expected ? "X" : "0");
+ if (rx_header->payload_size != rx_payload_expected)
+ dev_dbg(&dev->i2c_client->dev,
+ "rx_header->payloadSize=%d rx_payload_expected=%d\n",
+ rx_header->payload_size, rx_payload_expected);
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+
+ resp_checksum = rx_header->checksum;
+ rx_header->checksum = 0;
+
+ resp_checksum_tmp = mxl692_checksum(rx_buf,
+ MXL_EAGLE_HOST_MSG_HEADER_SIZE + rx_header->payload_size);
+#ifdef __LITTLE_ENDIAN
+ convert_endian(4, (u8 *)&resp_checksum_tmp); /* cksum is big endian */
+#endif
+ if (resp_checksum != resp_checksum_tmp) {
+ dev_dbg(&dev->i2c_client->dev, "rx checksum failure\n");
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+
+ mxl692_rx_swap(rx_header->opcode, rx_buf);
+
+ if (rx_header->payload_size > 0) {
+ if (!rx_payload) {
+ dev_dbg(&dev->i2c_client->dev, "no rx payload?!?\n");
+ status = -EREMOTEIO;
+ goto err_finish;
+ }
+ memcpy(rx_payload, rx_buf + MXL_EAGLE_HOST_MSG_HEADER_SIZE,
+ rx_header->payload_size);
+ }
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+
+ mutex_unlock(&dev->i2c_lock);
+ return status;
+}
+
+static int mxl692_fwdownload(struct mxl692_dev *dev,
+ const u8 *firmware_buf, u32 buf_len)
+{
+ int status = 0;
+ u32 ix, reg_val = 0x1;
+ u8 rx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ struct MXL_EAGLE_DEV_STATUS_T *dev_status;
+
+ if (buf_len < MXL_EAGLE_FW_HEADER_SIZE ||
+ buf_len > MXL_EAGLE_FW_MAX_SIZE_IN_KB * 1000)
+ return -EINVAL;
+
+ mutex_lock(&dev->i2c_lock);
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ status = mxl692_validate_fw_header(dev, firmware_buf, buf_len);
+ if (status)
+ goto err_finish;
+
+ ix = 16;
+ status = mxl692_write_fw_block(dev, firmware_buf, buf_len, &ix); /* DRAM */
+ if (status)
+ goto err_finish;
+
+ status = mxl692_write_fw_block(dev, firmware_buf, buf_len, &ix); /* IRAM */
+ if (status)
+ goto err_finish;
+
+ /* release CPU from reset */
+ status = mxl692_memwrite(dev, 0x70000018, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ mutex_unlock(&dev->i2c_lock);
+
+ if (status == 0) {
+ /* verify FW is alive */
+ usleep_range(MXL_EAGLE_FW_LOAD_TIME * 1000, (MXL_EAGLE_FW_LOAD_TIME + 5) * 1000);
+ dev_status = (struct MXL_EAGLE_DEV_STATUS_T *)&rx_buf;
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_DEVICE_STATUS_GET,
+ NULL,
+ 0,
+ (u8 *)dev_status,
+ sizeof(struct MXL_EAGLE_DEV_STATUS_T));
+ }
+
+ return status;
+err_finish:
+ mutex_unlock(&dev->i2c_lock);
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_get_versions(struct mxl692_dev *dev)
+{
+ int status = 0;
+ struct MXL_EAGLE_DEV_VER_T dev_ver = {};
+ static const char * const chip_id[] = {"N/A", "691", "248", "692"};
+
+ status = mxl692_i2c_writeread(dev, MXL_EAGLE_OPCODE_DEVICE_VERSION_GET,
+ NULL,
+ 0,
+ (u8 *)&dev_ver,
+ sizeof(struct MXL_EAGLE_DEV_VER_T));
+ if (status)
+ return status;
+
+ dev_info(&dev->i2c_client->dev, "MxL692_DEMOD Chip ID: %s\n",
+ chip_id[dev_ver.chip_id]);
+
+ dev_info(&dev->i2c_client->dev,
+ "MxL692_DEMOD FW Version: %d.%d.%d.%d_RC%d\n",
+ dev_ver.firmware_ver[0],
+ dev_ver.firmware_ver[1],
+ dev_ver.firmware_ver[2],
+ dev_ver.firmware_ver[3],
+ dev_ver.firmware_ver[4]);
+
+ return status;
+}
+
+static int mxl692_reset(struct mxl692_dev *dev)
+{
+ int status = 0;
+ u32 dev_type = MXL_EAGLE_DEVICE_MAX, reg_val = 0x2;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ /* legacy i2c override */
+ status = mxl692_memwrite(dev, 0x80000100, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* verify sku */
+ status = mxl692_memread(dev, 0x70000188, (u8 *)&dev_type, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ if (dev_type != dev->device_type)
+ goto err_finish;
+
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_config_regulators(struct mxl692_dev *dev,
+ enum MXL_EAGLE_POWER_SUPPLY_SOURCE_E power_supply)
+{
+ int status = 0;
+ u32 reg_val;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ /* configure main regulator according to the power supply source */
+ status = mxl692_memread(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val &= 0x00FFFFFF;
+ reg_val |= (power_supply == MXL_EAGLE_POWER_SUPPLY_SOURCE_SINGLE) ?
+ 0x14000000 : 0x10000000;
+
+ status = mxl692_memwrite(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* configure digital regulator to high current mode */
+ status = mxl692_memread(dev, 0x90000018, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val |= 0x800;
+
+ status = mxl692_memwrite(dev, 0x90000018, (u8 *)&reg_val, sizeof(u32));
+
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_config_xtal(struct mxl692_dev *dev,
+ struct MXL_EAGLE_DEV_XTAL_T *dev_xtal)
+{
+ int status = 0;
+ u32 reg_val, reg_val1;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ status = mxl692_memread(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* set XTAL capacitance */
+ reg_val &= 0xFFFFFFE0;
+ reg_val |= dev_xtal->xtal_cap;
+
+ /* set CLK OUT */
+ reg_val = dev_xtal->clk_out_enable ? (reg_val | 0x0100) : (reg_val & 0xFFFFFEFF);
+
+ status = mxl692_memwrite(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* set CLK OUT divider */
+ reg_val = dev_xtal->clk_out_div_enable ? (reg_val | 0x0200) : (reg_val & 0xFFFFFDFF);
+
+ status = mxl692_memwrite(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* set XTAL sharing */
+ reg_val = dev_xtal->xtal_sharing_enable ? (reg_val | 0x010400) : (reg_val & 0xFFFEFBFF);
+
+ status = mxl692_memwrite(dev, 0x90000000, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* enable/disable XTAL calibration, based on master/slave device */
+ status = mxl692_memread(dev, 0x90000030, (u8 *)&reg_val1, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ if (dev_xtal->xtal_calibration_enable) {
+ /* enable XTAL calibration and set XTAL amplitude to a higher value */
+ reg_val1 &= 0xFFFFFFFD;
+ reg_val1 |= 0x30;
+
+ status = mxl692_memwrite(dev, 0x90000030, (u8 *)&reg_val1, sizeof(u32));
+ if (status)
+ goto err_finish;
+ } else {
+ /* disable XTAL calibration */
+ reg_val1 |= 0x2;
+
+ status = mxl692_memwrite(dev, 0x90000030, (u8 *)&reg_val1, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ /* set XTAL bias value */
+ status = mxl692_memread(dev, 0x9000002c, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val &= 0xC0FFFFFF;
+ reg_val |= 0xA000000;
+
+ status = mxl692_memwrite(dev, 0x9000002c, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+ }
+
+ /* start XTAL calibration */
+ status = mxl692_memread(dev, 0x70000010, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val |= 0x8;
+
+ status = mxl692_memwrite(dev, 0x70000010, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ status = mxl692_memread(dev, 0x70000018, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val |= 0x10;
+
+ status = mxl692_memwrite(dev, 0x70000018, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ status = mxl692_memread(dev, 0x9001014c, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val &= 0xFFFFEFFF;
+
+ status = mxl692_memwrite(dev, 0x9001014c, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ reg_val |= 0x1000;
+
+ status = mxl692_memwrite(dev, 0x9001014c, (u8 *)&reg_val, sizeof(u32));
+ if (status)
+ goto err_finish;
+
+ usleep_range(45000, 55000);
+
+err_finish:
+ if (status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_powermode(struct mxl692_dev *dev,
+ enum MXL_EAGLE_POWER_MODE_E power_mode)
+{
+ int status = 0;
+ u8 mode = power_mode;
+
+ dev_dbg(&dev->i2c_client->dev, "%s\n",
+ power_mode == MXL_EAGLE_POWER_MODE_SLEEP ? "sleep" : "active");
+
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_DEVICE_POWERMODE_SET,
+ &mode,
+ sizeof(u8),
+ NULL,
+ 0);
+ if (status) {
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+ }
+
+ dev->power_mode = power_mode;
+
+ return status;
+}
+
+static int mxl692_init(struct dvb_frontend *fe)
+{
+ struct mxl692_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->i2c_client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int status = 0;
+ const struct firmware *firmware;
+ struct MXL_EAGLE_DEV_XTAL_T xtal_config = {};
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ if (dev->init_done)
+ goto warm;
+
+ dev->seqnum = 1;
+
+ status = mxl692_reset(dev);
+ if (status)
+ goto err;
+
+ usleep_range(50 * 1000, 60 * 1000); /* was 1000! */
+
+ status = mxl692_config_regulators(dev, MXL_EAGLE_POWER_SUPPLY_SOURCE_DUAL);
+ if (status)
+ goto err;
+
+ xtal_config.xtal_cap = 26;
+ xtal_config.clk_out_div_enable = 0;
+ xtal_config.clk_out_enable = 0;
+ xtal_config.xtal_calibration_enable = 0;
+ xtal_config.xtal_sharing_enable = 1;
+ status = mxl692_config_xtal(dev, &xtal_config);
+ if (status)
+ goto err;
+
+ status = request_firmware(&firmware, MXL692_FIRMWARE, &client->dev);
+ if (status) {
+ dev_dbg(&dev->i2c_client->dev, "firmware missing? %s\n",
+ MXL692_FIRMWARE);
+ goto err;
+ }
+
+ status = mxl692_fwdownload(dev, firmware->data, firmware->size);
+ if (status)
+ goto err_release_firmware;
+
+ release_firmware(firmware);
+
+ status = mxl692_get_versions(dev);
+ if (status)
+ goto err;
+
+ dev->power_mode = MXL_EAGLE_POWER_MODE_SLEEP;
+warm:
+ /* Config Device Power Mode */
+ if (dev->power_mode != MXL_EAGLE_POWER_MODE_ACTIVE) {
+ status = mxl692_powermode(dev, MXL_EAGLE_POWER_MODE_ACTIVE);
+ if (status)
+ goto err;
+
+ usleep_range(50 * 1000, 60 * 1000); /* was 500! */
+ }
+
+ /* Init stats here to indicate which stats are supported */
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ dev->init_done = 1;
+ return 0;
+err_release_firmware:
+ release_firmware(firmware);
+err:
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_sleep(struct dvb_frontend *fe)
+{
+ struct mxl692_dev *dev = fe->demodulator_priv;
+
+ if (dev->power_mode != MXL_EAGLE_POWER_MODE_SLEEP)
+ mxl692_powermode(dev, MXL_EAGLE_POWER_MODE_SLEEP);
+
+ return 0;
+}
+
+static int mxl692_set_frontend(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct mxl692_dev *dev = fe->demodulator_priv;
+
+ int status = 0;
+ enum MXL_EAGLE_DEMOD_TYPE_E demod_type;
+ struct MXL_EAGLE_MPEGOUT_PARAMS_T mpeg_params = {};
+ enum MXL_EAGLE_QAM_DEMOD_ANNEX_TYPE_E qam_annex = MXL_EAGLE_QAM_DEMOD_ANNEX_B;
+ struct MXL_EAGLE_QAM_DEMOD_PARAMS_T qam_params = {};
+ struct MXL_EAGLE_TUNER_CHANNEL_PARAMS_T tuner_params = {};
+ u8 op_param = 0;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ switch (p->modulation) {
+ case VSB_8:
+ demod_type = MXL_EAGLE_DEMOD_TYPE_ATSC;
+ break;
+ case QAM_AUTO:
+ case QAM_64:
+ case QAM_128:
+ case QAM_256:
+ demod_type = MXL_EAGLE_DEMOD_TYPE_QAM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dev->current_frequency == p->frequency && dev->demod_type == demod_type) {
+ dev_dbg(&dev->i2c_client->dev, "already set up\n");
+ return 0;
+ }
+
+ dev->current_frequency = -1;
+ dev->demod_type = -1;
+
+ op_param = demod_type;
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_DEVICE_DEMODULATOR_TYPE_SET,
+ &op_param,
+ sizeof(u8),
+ NULL,
+ 0);
+ if (status) {
+ dev_dbg(&dev->i2c_client->dev,
+ "DEVICE_DEMODULATOR_TYPE_SET...FAIL err 0x%x\n", status);
+ goto err;
+ }
+
+ usleep_range(20 * 1000, 30 * 1000); /* was 500! */
+
+ mpeg_params.mpeg_parallel = 0;
+ mpeg_params.msb_first = MXL_EAGLE_DATA_SERIAL_MSB_1ST;
+ mpeg_params.mpeg_sync_pulse_width = MXL_EAGLE_DATA_SYNC_WIDTH_BIT;
+ mpeg_params.mpeg_valid_pol = MXL_EAGLE_CLOCK_POSITIVE;
+ mpeg_params.mpeg_sync_pol = MXL_EAGLE_CLOCK_POSITIVE;
+ mpeg_params.mpeg_clk_pol = MXL_EAGLE_CLOCK_NEGATIVE;
+ mpeg_params.mpeg3wire_mode_enable = 0;
+ mpeg_params.mpeg_clk_freq = MXL_EAGLE_MPEG_CLOCK_27MHZ;
+
+ switch (demod_type) {
+ case MXL_EAGLE_DEMOD_TYPE_ATSC:
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_DEVICE_MPEG_OUT_PARAMS_SET,
+ (u8 *)&mpeg_params,
+ sizeof(struct MXL_EAGLE_MPEGOUT_PARAMS_T),
+ NULL,
+ 0);
+ if (status)
+ goto err;
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_QAM:
+ if (qam_annex == MXL_EAGLE_QAM_DEMOD_ANNEX_A)
+ mpeg_params.msb_first = MXL_EAGLE_DATA_SERIAL_LSB_1ST;
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_DEVICE_MPEG_OUT_PARAMS_SET,
+ (u8 *)&mpeg_params,
+ sizeof(struct MXL_EAGLE_MPEGOUT_PARAMS_T),
+ NULL,
+ 0);
+ if (status)
+ goto err;
+
+ qam_params.annex_type = qam_annex;
+ qam_params.qam_type = MXL_EAGLE_QAM_DEMOD_AUTO;
+ qam_params.iq_flip = MXL_EAGLE_DEMOD_IQ_AUTO;
+ if (p->modulation == QAM_64)
+ qam_params.symbol_rate_hz = 5057000;
+ else
+ qam_params.symbol_rate_hz = 5361000;
+
+ qam_params.symbol_rate_256qam_hz = 5361000;
+
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_QAM_PARAMS_SET,
+ (u8 *)&qam_params,
+ sizeof(struct MXL_EAGLE_QAM_DEMOD_PARAMS_T),
+ NULL, 0);
+ if (status)
+ goto err;
+
+ break;
+ default:
+ break;
+ }
+
+ usleep_range(20 * 1000, 30 * 1000); /* was 500! */
+
+ tuner_params.freq_hz = p->frequency;
+ tuner_params.bandwidth = MXL_EAGLE_TUNER_BW_6MHZ;
+ tuner_params.tune_mode = MXL_EAGLE_TUNER_CHANNEL_TUNE_MODE_VIEW;
+
+ dev_dbg(&dev->i2c_client->dev, " Tuning Freq: %d %s\n", tuner_params.freq_hz,
+ demod_type == MXL_EAGLE_DEMOD_TYPE_ATSC ? "ATSC" : "QAM");
+
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_TUNER_CHANNEL_TUNE_SET,
+ (u8 *)&tuner_params,
+ sizeof(struct MXL_EAGLE_TUNER_CHANNEL_PARAMS_T),
+ NULL,
+ 0);
+ if (status)
+ goto err;
+
+ usleep_range(20 * 1000, 30 * 1000); /* was 500! */
+
+ switch (demod_type) {
+ case MXL_EAGLE_DEMOD_TYPE_ATSC:
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_ATSC_INIT_SET,
+ NULL, 0, NULL, 0);
+ if (status)
+ goto err;
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_QAM:
+ status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_QAM_RESTART_SET,
+ NULL, 0, NULL, 0);
+ if (status)
+ goto err;
+ break;
+ default:
+ break;
+ }
+
+ dev->demod_type = demod_type;
+ dev->current_frequency = p->frequency;
+
+ return 0;
+err:
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", status);
+ return status;
+}
+
+static int mxl692_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ p->modulation = c->modulation;
+ p->frequency = c->frequency;
+
+ return 0;
+}
+
+static int mxl692_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct mxl692_dev *dev = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 rx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ struct MXL_EAGLE_ATSC_DEMOD_STATUS_T *atsc_status;
+ struct MXL_EAGLE_QAM_DEMOD_STATUS_T *qam_status;
+ enum MXL_EAGLE_DEMOD_TYPE_E demod_type = dev->demod_type;
+ int mxl_status = 0;
+
+ *snr = 0;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ atsc_status = (struct MXL_EAGLE_ATSC_DEMOD_STATUS_T *)&rx_buf;
+ qam_status = (struct MXL_EAGLE_QAM_DEMOD_STATUS_T *)&rx_buf;
+
+ switch (demod_type) {
+ case MXL_EAGLE_DEMOD_TYPE_ATSC:
+ mxl_status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_ATSC_STATUS_GET,
+ NULL,
+ 0,
+ rx_buf,
+ sizeof(struct MXL_EAGLE_ATSC_DEMOD_STATUS_T));
+ if (!mxl_status) {
+ *snr = (u16)(atsc_status->snr_db_tenths / 10);
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = *snr;
+ }
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_QAM:
+ mxl_status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_QAM_STATUS_GET,
+ NULL,
+ 0,
+ rx_buf,
+ sizeof(struct MXL_EAGLE_QAM_DEMOD_STATUS_T));
+ if (!mxl_status)
+ *snr = (u16)(qam_status->snr_db_tenths / 10);
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_OOB:
+ default:
+ break;
+ }
+
+ if (mxl_status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", mxl_status);
+ return mxl_status;
+}
+
+static int mxl692_read_ber_ucb(struct dvb_frontend *fe)
+{
+ struct mxl692_dev *dev = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 rx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ struct MXL_EAGLE_ATSC_DEMOD_ERROR_COUNTERS_T *atsc_errors;
+ enum MXL_EAGLE_DEMOD_TYPE_E demod_type = dev->demod_type;
+ int mxl_status = 0;
+ u32 utmp;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ atsc_errors = (struct MXL_EAGLE_ATSC_DEMOD_ERROR_COUNTERS_T *)&rx_buf;
+
+ switch (demod_type) {
+ case MXL_EAGLE_DEMOD_TYPE_ATSC:
+ mxl_status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_ATSC_ERROR_COUNTERS_GET,
+ NULL,
+ 0,
+ rx_buf,
+ sizeof(struct MXL_EAGLE_ATSC_DEMOD_ERROR_COUNTERS_T));
+ if (!mxl_status) {
+ if (atsc_errors->error_packets == 0)
+ utmp = 0;
+ else
+ utmp = ((atsc_errors->error_bytes / atsc_errors->error_packets) *
+ atsc_errors->total_packets);
+ /* ber */
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += atsc_errors->error_bytes;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += utmp;
+ /* ucb */
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue += atsc_errors->error_packets;
+
+ dev_dbg(&dev->i2c_client->dev, "%llu %llu\n",
+ c->post_bit_count.stat[0].uvalue, c->block_error.stat[0].uvalue);
+ }
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_QAM:
+ case MXL_EAGLE_DEMOD_TYPE_OOB:
+ default:
+ break;
+ }
+
+ if (mxl_status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", mxl_status);
+
+ return mxl_status;
+}
+
+static int mxl692_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct mxl692_dev *dev = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 rx_buf[MXL_EAGLE_MAX_I2C_PACKET_SIZE] = {};
+ struct MXL_EAGLE_ATSC_DEMOD_STATUS_T *atsc_status;
+ struct MXL_EAGLE_QAM_DEMOD_STATUS_T *qam_status;
+ enum MXL_EAGLE_DEMOD_TYPE_E demod_type = dev->demod_type;
+ int mxl_status = 0;
+ *status = 0;
+
+ dev_dbg(&dev->i2c_client->dev, "\n");
+
+ atsc_status = (struct MXL_EAGLE_ATSC_DEMOD_STATUS_T *)&rx_buf;
+ qam_status = (struct MXL_EAGLE_QAM_DEMOD_STATUS_T *)&rx_buf;
+
+ switch (demod_type) {
+ case MXL_EAGLE_DEMOD_TYPE_ATSC:
+ mxl_status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_ATSC_STATUS_GET,
+ NULL,
+ 0,
+ rx_buf,
+ sizeof(struct MXL_EAGLE_ATSC_DEMOD_STATUS_T));
+ if (!mxl_status && atsc_status->atsc_lock) {
+ *status |= FE_HAS_SIGNAL;
+ *status |= FE_HAS_CARRIER;
+ *status |= FE_HAS_VITERBI;
+ *status |= FE_HAS_SYNC;
+ *status |= FE_HAS_LOCK;
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = atsc_status->snr_db_tenths / 10;
+ }
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_QAM:
+ mxl_status = mxl692_i2c_writeread(dev,
+ MXL_EAGLE_OPCODE_QAM_STATUS_GET,
+ NULL,
+ 0,
+ rx_buf,
+ sizeof(struct MXL_EAGLE_QAM_DEMOD_STATUS_T));
+ if (!mxl_status && qam_status->qam_locked) {
+ *status |= FE_HAS_SIGNAL;
+ *status |= FE_HAS_CARRIER;
+ *status |= FE_HAS_VITERBI;
+ *status |= FE_HAS_SYNC;
+ *status |= FE_HAS_LOCK;
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = qam_status->snr_db_tenths / 10;
+ }
+ break;
+ case MXL_EAGLE_DEMOD_TYPE_OOB:
+ default:
+ break;
+ }
+
+ if ((*status & FE_HAS_LOCK) == 0) {
+ /* No lock, reset all statistics */
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return 0;
+ }
+
+ if (mxl_status)
+ dev_dbg(&dev->i2c_client->dev, "err %d\n", mxl_status);
+ else
+ mxl_status = mxl692_read_ber_ucb(fe);
+
+ return mxl_status;
+}
+
+static const struct dvb_frontend_ops mxl692_ops = {
+ .delsys = { SYS_ATSC },
+ .info = {
+ .name = "MaxLinear MxL692 VSB tuner-demodulator",
+ .frequency_min_hz = 54000000,
+ .frequency_max_hz = 858000000,
+ .frequency_stepsize_hz = 62500,
+ .caps = FE_CAN_8VSB
+ },
+
+ .init = mxl692_init,
+ .sleep = mxl692_sleep,
+ .set_frontend = mxl692_set_frontend,
+ .get_frontend = mxl692_get_frontend,
+
+ .read_status = mxl692_read_status,
+ .read_snr = mxl692_read_snr,
+};
+
+static int mxl692_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mxl692_config *config = client->dev.platform_data;
+ struct mxl692_dev *dev;
+ int ret = 0;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ dev_dbg(&client->dev, "kzalloc() failed\n");
+ goto err;
+ }
+
+ memcpy(&dev->fe.ops, &mxl692_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = dev;
+ dev->i2c_client = client;
+ *config->fe = &dev->fe;
+ mutex_init(&dev->i2c_lock);
+ i2c_set_clientdata(client, dev);
+
+ dev_info(&client->dev, "MaxLinear mxl692 successfully attached\n");
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return -ENODEV;
+}
+
+static int mxl692_remove(struct i2c_client *client)
+{
+ struct mxl692_dev *dev = i2c_get_clientdata(client);
+
+ dev->fe.demodulator_priv = NULL;
+ i2c_set_clientdata(client, NULL);
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id mxl692_id_table[] = {
+ {"mxl692", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mxl692_id_table);
+
+static struct i2c_driver mxl692_driver = {
+ .driver = {
+ .name = "mxl692",
+ },
+ .probe = mxl692_probe,
+ .remove = mxl692_remove,
+ .id_table = mxl692_id_table,
+};
+
+module_i2c_driver(mxl692_driver);
+
+MODULE_AUTHOR("Brad Love <brad@nextdimension.cc>");
+MODULE_DESCRIPTION("MaxLinear MxL692 demodulator/tuner driver");
+MODULE_FIRMWARE(MXL692_FIRMWARE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/mxl692.h b/drivers/media/dvb-frontends/mxl692.h
new file mode 100644
index 000000000000..45bc48f1f12f
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl692.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver for the MaxLinear MxL69x family of tuners/demods
+ *
+ * Copyright (C) 2020 Brad Love <brad@nextdimension.cc>
+ *
+ * based on code:
+ * Copyright (c) 2016 MaxLinear, Inc. All rights reserved
+ * which was released under GPL V2
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MXL692_H_
+#define _MXL692_H_
+
+#include <media/dvb_frontend.h>
+
+#define MXL692_FIRMWARE "dvb-demod-mxl692.fw"
+
+struct mxl692_config {
+ unsigned char id;
+ u8 i2c_addr;
+ /*
+ * frontend
+ * returned by driver
+ */
+ struct dvb_frontend **fe;
+};
+
+#endif /* _MXL692_H_ */
diff --git a/drivers/media/dvb-frontends/mxl692_defs.h b/drivers/media/dvb-frontends/mxl692_defs.h
new file mode 100644
index 000000000000..776ac407b4e7
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl692_defs.h
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver for the MaxLinear MxL69x family of combo tuners/demods
+ *
+ * Copyright (C) 2020 Brad Love <brad@nextdimension.cc>
+ *
+ * based on code:
+ * Copyright (c) 2016 MaxLinear, Inc. All rights reserved
+ * which was released under GPL V2
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*****************************************************************************
+ * Defines
+ *****************************************************************************
+ */
+#define MXL_EAGLE_HOST_MSG_HEADER_SIZE 8
+#define MXL_EAGLE_FW_MAX_SIZE_IN_KB 76
+#define MXL_EAGLE_QAM_FFE_TAPS_LENGTH 16
+#define MXL_EAGLE_QAM_SPUR_TAPS_LENGTH 32
+#define MXL_EAGLE_QAM_DFE_TAPS_LENGTH 72
+#define MXL_EAGLE_ATSC_FFE_TAPS_LENGTH 4096
+#define MXL_EAGLE_ATSC_DFE_TAPS_LENGTH 384
+#define MXL_EAGLE_VERSION_SIZE 5 /* A.B.C.D-RCx */
+#define MXL_EAGLE_FW_LOAD_TIME 50
+
+#define MXL_EAGLE_FW_MAX_SIZE_IN_KB 76
+#define MXL_EAGLE_FW_HEADER_SIZE 16
+#define MXL_EAGLE_FW_SEGMENT_HEADER_SIZE 8
+#define MXL_EAGLE_MAX_I2C_PACKET_SIZE 58
+#define MXL_EAGLE_I2C_MHEADER_SIZE 6
+#define MXL_EAGLE_I2C_PHEADER_SIZE 2
+
+/* Enum of Eagle family devices */
+enum MXL_EAGLE_DEVICE_E {
+ MXL_EAGLE_DEVICE_691 = 1, /* Device Mxl691 */
+ MXL_EAGLE_DEVICE_248 = 2, /* Device Mxl248 */
+ MXL_EAGLE_DEVICE_692 = 3, /* Device Mxl692 */
+ MXL_EAGLE_DEVICE_MAX, /* No such device */
+};
+
+#define VER_A 1
+#define VER_B 1
+#define VER_C 1
+#define VER_D 3
+#define VER_E 6
+
+/* Enum of Host to Eagle I2C protocol opcodes */
+enum MXL_EAGLE_OPCODE_E {
+ /* DEVICE */
+ MXL_EAGLE_OPCODE_DEVICE_DEMODULATOR_TYPE_SET,
+ MXL_EAGLE_OPCODE_DEVICE_MPEG_OUT_PARAMS_SET,
+ MXL_EAGLE_OPCODE_DEVICE_POWERMODE_SET,
+ MXL_EAGLE_OPCODE_DEVICE_GPIO_DIRECTION_SET,
+ MXL_EAGLE_OPCODE_DEVICE_GPO_LEVEL_SET,
+ MXL_EAGLE_OPCODE_DEVICE_INTR_MASK_SET,
+ MXL_EAGLE_OPCODE_DEVICE_IO_MUX_SET,
+ MXL_EAGLE_OPCODE_DEVICE_VERSION_GET,
+ MXL_EAGLE_OPCODE_DEVICE_STATUS_GET,
+ MXL_EAGLE_OPCODE_DEVICE_GPI_LEVEL_GET,
+
+ /* TUNER */
+ MXL_EAGLE_OPCODE_TUNER_CHANNEL_TUNE_SET,
+ MXL_EAGLE_OPCODE_TUNER_LOCK_STATUS_GET,
+ MXL_EAGLE_OPCODE_TUNER_AGC_STATUS_GET,
+
+ /* ATSC */
+ MXL_EAGLE_OPCODE_ATSC_INIT_SET,
+ MXL_EAGLE_OPCODE_ATSC_ACQUIRE_CARRIER_SET,
+ MXL_EAGLE_OPCODE_ATSC_STATUS_GET,
+ MXL_EAGLE_OPCODE_ATSC_ERROR_COUNTERS_GET,
+ MXL_EAGLE_OPCODE_ATSC_EQUALIZER_FILTER_DFE_TAPS_GET,
+ MXL_EAGLE_OPCODE_ATSC_EQUALIZER_FILTER_FFE_TAPS_GET,
+
+ /* QAM */
+ MXL_EAGLE_OPCODE_QAM_PARAMS_SET,
+ MXL_EAGLE_OPCODE_QAM_RESTART_SET,
+ MXL_EAGLE_OPCODE_QAM_STATUS_GET,
+ MXL_EAGLE_OPCODE_QAM_ERROR_COUNTERS_GET,
+ MXL_EAGLE_OPCODE_QAM_CONSTELLATION_VALUE_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_FFE_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_SPUR_START_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_SPUR_END_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_TAPS_NUMBER_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_START_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_MIDDLE_GET,
+ MXL_EAGLE_OPCODE_QAM_EQUALIZER_FILTER_DFE_END_GET,
+
+ /* OOB */
+ MXL_EAGLE_OPCODE_OOB_PARAMS_SET,
+ MXL_EAGLE_OPCODE_OOB_RESTART_SET,
+ MXL_EAGLE_OPCODE_OOB_ERROR_COUNTERS_GET,
+ MXL_EAGLE_OPCODE_OOB_STATUS_GET,
+
+ /* SMA */
+ MXL_EAGLE_OPCODE_SMA_INIT_SET,
+ MXL_EAGLE_OPCODE_SMA_PARAMS_SET,
+ MXL_EAGLE_OPCODE_SMA_TRANSMIT_SET,
+ MXL_EAGLE_OPCODE_SMA_RECEIVE_GET,
+
+ /* DEBUG */
+ MXL_EAGLE_OPCODE_INTERNAL,
+
+ MXL_EAGLE_OPCODE_MAX = 70,
+};
+
+/* Enum of Host to Eagle I2C protocol opcodes */
+static const char * const MXL_EAGLE_OPCODE_STRING[] = {
+ /* DEVICE */
+ "DEVICE_DEMODULATOR_TYPE_SET",
+ "DEVICE_MPEG_OUT_PARAMS_SET",
+ "DEVICE_POWERMODE_SET",
+ "DEVICE_GPIO_DIRECTION_SET",
+ "DEVICE_GPO_LEVEL_SET",
+ "DEVICE_INTR_MASK_SET",
+ "DEVICE_IO_MUX_SET",
+ "DEVICE_VERSION_GET",
+ "DEVICE_STATUS_GET",
+ "DEVICE_GPI_LEVEL_GET",
+
+ /* TUNER */
+ "TUNER_CHANNEL_TUNE_SET",
+ "TUNER_LOCK_STATUS_GET",
+ "TUNER_AGC_STATUS_GET",
+
+ /* ATSC */
+ "ATSC_INIT_SET",
+ "ATSC_ACQUIRE_CARRIER_SET",
+ "ATSC_STATUS_GET",
+ "ATSC_ERROR_COUNTERS_GET",
+ "ATSC_EQUALIZER_FILTER_DFE_TAPS_GET",
+ "ATSC_EQUALIZER_FILTER_FFE_TAPS_GET",
+
+ /* QAM */
+ "QAM_PARAMS_SET",
+ "QAM_RESTART_SET",
+ "QAM_STATUS_GET",
+ "QAM_ERROR_COUNTERS_GET",
+ "QAM_CONSTELLATION_VALUE_GET",
+ "QAM_EQUALIZER_FILTER_FFE_GET",
+ "QAM_EQUALIZER_FILTER_SPUR_START_GET",
+ "QAM_EQUALIZER_FILTER_SPUR_END_GET",
+ "QAM_EQUALIZER_FILTER_DFE_TAPS_NUMBER_GET",
+ "QAM_EQUALIZER_FILTER_DFE_START_GET",
+ "QAM_EQUALIZER_FILTER_DFE_MIDDLE_GET",
+ "QAM_EQUALIZER_FILTER_DFE_END_GET",
+
+ /* OOB */
+ "OOB_PARAMS_SET",
+ "OOB_RESTART_SET",
+ "OOB_ERROR_COUNTERS_GET",
+ "OOB_STATUS_GET",
+
+ /* SMA */
+ "SMA_INIT_SET",
+ "SMA_PARAMS_SET",
+ "SMA_TRANSMIT_SET",
+ "SMA_RECEIVE_GET",
+
+ /* DEBUG */
+ "INTERNAL",
+};
+
+/* Enum of Callabck function types */
+enum MXL_EAGLE_CB_TYPE_E {
+ MXL_EAGLE_CB_FW_DOWNLOAD = 0,
+};
+
+/* Enum of power supply types */
+enum MXL_EAGLE_POWER_SUPPLY_SOURCE_E {
+ MXL_EAGLE_POWER_SUPPLY_SOURCE_SINGLE, /* Single supply of 3.3V */
+ MXL_EAGLE_POWER_SUPPLY_SOURCE_DUAL, /* Dual supply, 1.8V & 3.3V */
+};
+
+/* Enum of I/O pad drive modes */
+enum MXL_EAGLE_IO_MUX_DRIVE_MODE_E {
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_1X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_2X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_3X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_4X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_5X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_6X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_7X,
+ MXL_EAGLE_IO_MUX_DRIVE_MODE_8X,
+};
+
+/* Enum of demodulator types. Used for selection of demodulator
+ * type in relevant devices, e.g. ATSC vs. QAM in Mxl691
+ */
+enum MXL_EAGLE_DEMOD_TYPE_E {
+ MXL_EAGLE_DEMOD_TYPE_QAM, /* Mxl248 or Mxl692 */
+ MXL_EAGLE_DEMOD_TYPE_OOB, /* Mxl248 only */
+ MXL_EAGLE_DEMOD_TYPE_ATSC /* Mxl691 or Mxl692 */
+};
+
+/* Enum of power modes. Used for initial
+ * activation, or for activating sleep mode
+ */
+enum MXL_EAGLE_POWER_MODE_E {
+ MXL_EAGLE_POWER_MODE_SLEEP,
+ MXL_EAGLE_POWER_MODE_ACTIVE
+};
+
+/* Enum of GPIOs, used in device GPIO APIs */
+enum MXL_EAGLE_GPIO_NUMBER_E {
+ MXL_EAGLE_GPIO_NUMBER_0,
+ MXL_EAGLE_GPIO_NUMBER_1,
+ MXL_EAGLE_GPIO_NUMBER_2,
+ MXL_EAGLE_GPIO_NUMBER_3,
+ MXL_EAGLE_GPIO_NUMBER_4,
+ MXL_EAGLE_GPIO_NUMBER_5,
+ MXL_EAGLE_GPIO_NUMBER_6
+};
+
+/* Enum of GPIO directions, used in GPIO direction configuration API */
+enum MXL_EAGLE_GPIO_DIRECTION_E {
+ MXL_EAGLE_GPIO_DIRECTION_INPUT,
+ MXL_EAGLE_GPIO_DIRECTION_OUTPUT
+};
+
+/* Enum of GPIO level, used in device GPIO APIs */
+enum MXL_EAGLE_GPIO_LEVEL_E {
+ MXL_EAGLE_GPIO_LEVEL_LOW,
+ MXL_EAGLE_GPIO_LEVEL_HIGH,
+};
+
+/* Enum of I/O Mux function, used in device I/O mux configuration API */
+enum MXL_EAGLE_IOMUX_FUNCTION_E {
+ MXL_EAGLE_IOMUX_FUNC_FEC_LOCK,
+ MXL_EAGLE_IOMUX_FUNC_MERR,
+};
+
+/* Enum of MPEG Data format, used in MPEG and OOB output configuration */
+enum MXL_EAGLE_MPEG_DATA_FORMAT_E {
+ MXL_EAGLE_DATA_SERIAL_LSB_1ST = 0,
+ MXL_EAGLE_DATA_SERIAL_MSB_1ST,
+
+ MXL_EAGLE_DATA_SYNC_WIDTH_BIT = 0,
+ MXL_EAGLE_DATA_SYNC_WIDTH_BYTE
+};
+
+/* Enum of MPEG Clock format, used in MPEG and OOB output configuration */
+enum MXL_EAGLE_MPEG_CLOCK_FORMAT_E {
+ MXL_EAGLE_CLOCK_ACTIVE_HIGH = 0,
+ MXL_EAGLE_CLOCK_ACTIVE_LOW,
+
+ MXL_EAGLE_CLOCK_POSITIVE = 0,
+ MXL_EAGLE_CLOCK_NEGATIVE,
+
+ MXL_EAGLE_CLOCK_IN_PHASE = 0,
+ MXL_EAGLE_CLOCK_INVERTED,
+};
+
+/* Enum of MPEG Clock speeds, used in MPEG output configuration */
+enum MXL_EAGLE_MPEG_CLOCK_RATE_E {
+ MXL_EAGLE_MPEG_CLOCK_54MHZ,
+ MXL_EAGLE_MPEG_CLOCK_40_5MHZ,
+ MXL_EAGLE_MPEG_CLOCK_27MHZ,
+ MXL_EAGLE_MPEG_CLOCK_13_5MHZ,
+};
+
+/* Enum of Interrupt mask bit, used in host interrupt configuration */
+enum MXL_EAGLE_INTR_MASK_BITS_E {
+ MXL_EAGLE_INTR_MASK_DEMOD = 0,
+ MXL_EAGLE_INTR_MASK_SMA_RX = 1,
+ MXL_EAGLE_INTR_MASK_WDOG = 31
+};
+
+/* Enum of QAM Demodulator type, used in QAM configuration */
+enum MXL_EAGLE_QAM_DEMOD_ANNEX_TYPE_E {
+ MXL_EAGLE_QAM_DEMOD_ANNEX_B, /* J.83B */
+ MXL_EAGLE_QAM_DEMOD_ANNEX_A, /* DVB-C */
+};
+
+/* Enum of QAM Demodulator modulation, used in QAM configuration and status */
+enum MXL_EAGLE_QAM_DEMOD_QAM_TYPE_E {
+ MXL_EAGLE_QAM_DEMOD_QAM16,
+ MXL_EAGLE_QAM_DEMOD_QAM64,
+ MXL_EAGLE_QAM_DEMOD_QAM256,
+ MXL_EAGLE_QAM_DEMOD_QAM1024,
+ MXL_EAGLE_QAM_DEMOD_QAM32,
+ MXL_EAGLE_QAM_DEMOD_QAM128,
+ MXL_EAGLE_QAM_DEMOD_QPSK,
+ MXL_EAGLE_QAM_DEMOD_AUTO,
+};
+
+/* Enum of Demodulator IQ setup, used in QAM, OOB configuration and status */
+enum MXL_EAGLE_IQ_FLIP_E {
+ MXL_EAGLE_DEMOD_IQ_NORMAL,
+ MXL_EAGLE_DEMOD_IQ_FLIPPED,
+ MXL_EAGLE_DEMOD_IQ_AUTO,
+};
+
+/* Enum of OOB Demodulator symbol rates, used in OOB configuration */
+enum MXL_EAGLE_OOB_DEMOD_SYMB_RATE_E {
+ MXL_EAGLE_OOB_DEMOD_SYMB_RATE_0_772MHZ, /* ANSI/SCTE 55-2 0.772 MHz */
+ MXL_EAGLE_OOB_DEMOD_SYMB_RATE_1_024MHZ, /* ANSI/SCTE 55-1 1.024 MHz */
+ MXL_EAGLE_OOB_DEMOD_SYMB_RATE_1_544MHZ, /* ANSI/SCTE 55-2 1.544 MHz */
+};
+
+/* Enum of tuner channel tuning mode */
+enum MXL_EAGLE_TUNER_CHANNEL_TUNE_MODE_E {
+ MXL_EAGLE_TUNER_CHANNEL_TUNE_MODE_VIEW, /* Normal "view" mode */
+ MXL_EAGLE_TUNER_CHANNEL_TUNE_MODE_SCAN, /* Fast "scan" mode */
+};
+
+/* Enum of tuner bandwidth */
+enum MXL_EAGLE_TUNER_BW_E {
+ MXL_EAGLE_TUNER_BW_6MHZ,
+ MXL_EAGLE_TUNER_BW_7MHZ,
+ MXL_EAGLE_TUNER_BW_8MHZ,
+};
+
+/* Enum of tuner bandwidth */
+enum MXL_EAGLE_JUNCTION_TEMPERATURE_E {
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BELOW_0_CELSIUS = 0,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_0_TO_14_CELSIUS = 1,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_14_TO_28_CELSIUS = 3,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_28_TO_42_CELSIUS = 2,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_42_TO_57_CELSIUS = 6,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_57_TO_71_CELSIUS = 7,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_BETWEEN_71_TO_85_CELSIUS = 5,
+ MXL_EAGLE_JUNCTION_TEMPERATURE_ABOVE_85_CELSIUS = 4,
+};
+
+/* Struct passed in optional callback used during FW download */
+struct MXL_EAGLE_FW_DOWNLOAD_CB_PAYLOAD_T {
+ u32 total_len;
+ u32 downloaded_len;
+};
+
+/* Struct used of I2C protocol between host and Eagle, internal use only */
+struct __packed MXL_EAGLE_HOST_MSG_HEADER_T {
+ u8 opcode;
+ u8 seqnum;
+ u8 payload_size;
+ u8 status;
+ u32 checksum;
+};
+
+/* Device version information struct */
+struct __packed MXL_EAGLE_DEV_VER_T {
+ u8 chip_id;
+ u8 firmware_ver[MXL_EAGLE_VERSION_SIZE];
+ u8 mxlware_ver[MXL_EAGLE_VERSION_SIZE];
+};
+
+/* Xtal configuration struct */
+struct __packed MXL_EAGLE_DEV_XTAL_T {
+ u8 xtal_cap; /* accepted range is 1..31 pF. Default is 26 */
+ u8 clk_out_enable;
+ u8 clk_out_div_enable; /* clock out freq is xtal freq / 6 */
+ u8 xtal_sharing_enable; /* if enabled set xtal_cap to 25 pF */
+ u8 xtal_calibration_enable; /* enable for master, disable for slave */
+};
+
+/* GPIO direction struct, internally used in GPIO configuration API */
+struct __packed MXL_EAGLE_DEV_GPIO_DIRECTION_T {
+ u8 gpio_number;
+ u8 gpio_direction;
+};
+
+/* GPO level struct, internally used in GPIO configuration API */
+struct __packed MXL_EAGLE_DEV_GPO_LEVEL_T {
+ u8 gpio_number;
+ u8 gpo_level;
+};
+
+/* Device Status struct */
+struct MXL_EAGLE_DEV_STATUS_T {
+ u8 temperature;
+ u8 demod_type;
+ u8 power_mode;
+ u8 cpu_utilization_percent;
+};
+
+/* Device interrupt configuration struct */
+struct __packed MXL_EAGLE_DEV_INTR_CFG_T {
+ u32 intr_mask;
+ u8 edge_trigger;
+ u8 positive_trigger;
+ u8 global_enable_interrupt;
+};
+
+/* MPEG pad drive parameters, used on MPEG output configuration */
+/* See MXL_EAGLE_IO_MUX_DRIVE_MODE_E */
+struct MXL_EAGLE_MPEG_PAD_DRIVE_T {
+ u8 pad_drv_mpeg_syn;
+ u8 pad_drv_mpeg_dat;
+ u8 pad_drv_mpeg_val;
+ u8 pad_drv_mpeg_clk;
+};
+
+/* MPEGOUT parameter struct, used in MPEG output configuration */
+struct MXL_EAGLE_MPEGOUT_PARAMS_T {
+ u8 mpeg_parallel;
+ u8 msb_first;
+ u8 mpeg_sync_pulse_width; /* See MXL_EAGLE_MPEG_DATA_FORMAT_E */
+ u8 mpeg_valid_pol;
+ u8 mpeg_sync_pol;
+ u8 mpeg_clk_pol;
+ u8 mpeg3wire_mode_enable;
+ u8 mpeg_clk_freq;
+ struct MXL_EAGLE_MPEG_PAD_DRIVE_T mpeg_pad_drv;
+};
+
+/* QAM Demodulator parameters struct, used in QAM params configuration */
+struct __packed MXL_EAGLE_QAM_DEMOD_PARAMS_T {
+ u8 annex_type;
+ u8 qam_type;
+ u8 iq_flip;
+ u8 search_range_idx;
+ u8 spur_canceller_enable;
+ u32 symbol_rate_hz;
+ u32 symbol_rate_256qam_hz;
+};
+
+/* QAM Demodulator status */
+struct MXL_EAGLE_QAM_DEMOD_STATUS_T {
+ u8 annex_type;
+ u8 qam_type;
+ u8 iq_flip;
+ u8 interleaver_depth_i;
+ u8 interleaver_depth_j;
+ u8 qam_locked;
+ u8 fec_locked;
+ u8 mpeg_locked;
+ u16 snr_db_tenths;
+ s16 timing_offset;
+ s32 carrier_offset_hz;
+};
+
+/* QAM Demodulator error counters */
+struct MXL_EAGLE_QAM_DEMOD_ERROR_COUNTERS_T {
+ u32 corrected_code_words;
+ u32 uncorrected_code_words;
+ u32 total_code_words_received;
+ u32 corrected_bits;
+ u32 error_mpeg_frames;
+ u32 mpeg_frames_received;
+ u32 erasures;
+};
+
+/* QAM Demodulator constellation point */
+struct MXL_EAGLE_QAM_DEMOD_CONSTELLATION_VAL_T {
+ s16 i_value[12];
+ s16 q_value[12];
+};
+
+/* QAM Demodulator equalizer filter taps */
+struct MXL_EAGLE_QAM_DEMOD_EQU_FILTER_T {
+ s16 ffe_taps[MXL_EAGLE_QAM_FFE_TAPS_LENGTH];
+ s16 spur_taps[MXL_EAGLE_QAM_SPUR_TAPS_LENGTH];
+ s16 dfe_taps[MXL_EAGLE_QAM_DFE_TAPS_LENGTH];
+ u8 ffe_leading_tap_index;
+ u8 dfe_taps_number;
+};
+
+/* OOB Demodulator parameters struct, used in OOB params configuration */
+struct __packed MXL_EAGLE_OOB_DEMOD_PARAMS_T {
+ u8 symbol_rate;
+ u8 iq_flip;
+ u8 clk_pol;
+};
+
+/* OOB Demodulator error counters */
+struct MXL_EAGLE_OOB_DEMOD_ERROR_COUNTERS_T {
+ u32 corrected_packets;
+ u32 uncorrected_packets;
+ u32 total_packets_received;
+};
+
+/* OOB status */
+struct __packed MXL_EAGLE_OOB_DEMOD_STATUS_T {
+ u16 snr_db_tenths;
+ s16 timing_offset;
+ s32 carrier_offsetHz;
+ u8 qam_locked;
+ u8 fec_locked;
+ u8 mpeg_locked;
+ u8 retune_required;
+ u8 iq_flip;
+};
+
+/* ATSC Demodulator status */
+struct __packed MXL_EAGLE_ATSC_DEMOD_STATUS_T {
+ s16 snr_db_tenths;
+ s16 timing_offset;
+ s32 carrier_offset_hz;
+ u8 frame_lock;
+ u8 atsc_lock;
+ u8 fec_lock;
+};
+
+/* ATSC Demodulator error counters */
+struct MXL_EAGLE_ATSC_DEMOD_ERROR_COUNTERS_T {
+ u32 error_packets;
+ u32 total_packets;
+ u32 error_bytes;
+};
+
+/* ATSC Demodulator equalizers filter taps */
+struct __packed MXL_EAGLE_ATSC_DEMOD_EQU_FILTER_T {
+ s16 ffe_taps[MXL_EAGLE_ATSC_FFE_TAPS_LENGTH];
+ s8 dfe_taps[MXL_EAGLE_ATSC_DFE_TAPS_LENGTH];
+};
+
+/* Tuner AGC Status */
+struct __packed MXL_EAGLE_TUNER_AGC_STATUS_T {
+ u8 locked;
+ u16 raw_agc_gain; /* AGC gain [dB] = rawAgcGain / 2^6 */
+ s16 rx_power_db_hundredths;
+};
+
+/* Tuner channel tune parameters */
+struct __packed MXL_EAGLE_TUNER_CHANNEL_PARAMS_T {
+ u32 freq_hz;
+ u8 tune_mode;
+ u8 bandwidth;
+};
+
+/* Tuner channel lock indications */
+struct __packed MXL_EAGLE_TUNER_LOCK_STATUS_T {
+ u8 rf_pll_locked;
+ u8 ref_pll_locked;
+};
+
+/* Smart antenna parameters used in Smart antenna params configuration */
+struct __packed MXL_EAGLE_SMA_PARAMS_T {
+ u8 full_duplex_enable;
+ u8 rx_disable;
+ u8 idle_logic_high;
+};
+
+/* Smart antenna message format */
+struct __packed MXL_EAGLE_SMA_MESSAGE_T {
+ u32 payload_bits;
+ u8 total_num_bits;
+};
+
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 01dcc7f1b9b2..dcbeb9f5e12a 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -698,6 +698,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
+ ret = -EINVAL;
if (constellation > CONSTELLATION_NUM - 1)
goto err;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 2b9d81e4794a..462c0e059754 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -813,6 +813,20 @@ config VIDEO_IMX319
To compile this driver as a module, choose M here: the
module will be called imx319.
+config VIDEO_IMX334
+ tristate "Sony IMX334 sensor support"
+ depends on OF_GPIO
+ depends on I2C && VIDEO_V4L2
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX334 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx334.
+
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
depends on I2C && VIDEO_V4L2
@@ -936,6 +950,19 @@ config VIDEO_OV5647
To compile this driver as a module, choose M here: the
module will be called ov5647.
+config VIDEO_OV5648
+ tristate "OmniVision OV5648 sensor support"
+ depends on I2C && PM && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV5648 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5648.
+
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
@@ -1000,6 +1027,7 @@ config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
depends on I2C && VIDEO_V4L2
select REGMAP_SCCB
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV772x camera.
@@ -1047,6 +1075,19 @@ config VIDEO_OV8856
To compile this driver as a module, choose M here: the
module will be called ov8856.
+config VIDEO_OV8865
+ tristate "OmniVision OV8865 sensor support"
+ depends on I2C && PM && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for OmniVision
+ OV8865 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov8865.
+
config VIDEO_OV9640
tristate "OmniVision OV9640 sensor support"
depends on I2C && VIDEO_V4L2
@@ -1199,12 +1240,16 @@ config VIDEO_NOON010PC30
source "drivers/media/i2c/m5mols/Kconfig"
+config VIDEO_MAX9271_LIB
+ tristate
+
config VIDEO_RDACM20
tristate "IMI RDACM20 camera support"
depends on I2C
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
+ select VIDEO_MAX9271_LIB
help
This driver supports the IMI RDACM20 GMSL camera, used in
ADAS systems.
@@ -1212,6 +1257,20 @@ config VIDEO_RDACM20
This camera should be used in conjunction with a GMSL
deserialiser such as the MAX9286.
+config VIDEO_RDACM21
+ tristate "IMI RDACM21 camera support"
+ depends on I2C
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select VIDEO_MAX9271_LIB
+ help
+ This driver supports the IMI RDACM21 GMSL camera, used in
+ ADAS systems.
+
+ This camera should be used in conjunction with a GMSL
+ deserialiser such as the MAX9286.
+
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index a3149dce21bb..0c067beca066 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
+obj-$(CONFIG_VIDEO_OV5648) += ov5648.o
obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV772X) += ov772x.o
obj-$(CONFIG_VIDEO_OV7740) += ov7740.o
obj-$(CONFIG_VIDEO_OV8856) += ov8856.o
+obj-$(CONFIG_VIDEO_OV8865) += ov8865.o
obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV9734) += ov9734.o
@@ -120,10 +122,12 @@ obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
+obj-$(CONFIG_VIDEO_IMX334) += imx334.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
-rdacm20-camera_module-objs := rdacm20.o max9271.o
-obj-$(CONFIG_VIDEO_RDACM20) += rdacm20-camera_module.o
+obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o
+obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o
+obj-$(CONFIG_VIDEO_RDACM21) += rdacm21.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index eb7b6f01f623..fcc39360cc50 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -17,20 +17,20 @@
#include "ccs-pll.h"
/* Return an even number or one. */
-static inline uint32_t clk_div_even(uint32_t a)
+static inline u32 clk_div_even(u32 a)
{
- return max_t(uint32_t, 1, a & ~1);
+ return max_t(u32, 1, a & ~1);
}
/* Return an even number or one. */
-static inline uint32_t clk_div_even_up(uint32_t a)
+static inline u32 clk_div_even_up(u32 a)
{
if (a == 1)
return 1;
return (a + 1) & ~1;
}
-static inline uint32_t is_one_or_even(uint32_t a)
+static inline u32 is_one_or_even(u32 a)
{
if (a == 1)
return 1;
@@ -40,13 +40,13 @@ static inline uint32_t is_one_or_even(uint32_t a)
return 1;
}
-static inline uint32_t one_or_more(uint32_t a)
+static inline u32 one_or_more(u32 a)
{
return a ?: 1;
}
-static int bounds_check(struct device *dev, uint32_t val,
- uint32_t min, uint32_t max, const char *prefix,
+static int bounds_check(struct device *dev, u32 val,
+ u32 min, u32 max, const char *prefix,
char *str)
{
if (val >= min && val <= max)
@@ -138,12 +138,12 @@ static void print_pll(struct device *dev, struct ccs_pll *pll)
pll->flags & PLL_FL(OP_PIX_DDR) ? " op-pix-ddr" : "");
}
-static uint32_t op_sys_ddr(uint32_t flags)
+static u32 op_sys_ddr(u32 flags)
{
return flags & CCS_PLL_FLAG_OP_SYS_DDR ? 1 : 0;
}
-static uint32_t op_pix_ddr(uint32_t flags)
+static u32 op_pix_ddr(u32 flags)
{
return flags & CCS_PLL_FLAG_OP_PIX_DDR ? 1 : 0;
}
@@ -250,8 +250,8 @@ static int check_ext_bounds(struct device *dev, struct ccs_pll *pll)
static void
ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim,
struct ccs_pll *pll, struct ccs_pll_branch_fr *pll_fr,
- uint16_t min_vt_div, uint16_t max_vt_div,
- uint16_t *min_sys_div, uint16_t *max_sys_div)
+ u16 min_vt_div, u16 max_vt_div,
+ u16 *min_sys_div, u16 *max_sys_div)
{
/*
* Find limits for sys_clk_div. Not all values are possible with all
@@ -259,11 +259,11 @@ ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim,
*/
*min_sys_div = lim->vt_bk.min_sys_clk_div;
dev_dbg(dev, "min_sys_div: %u\n", *min_sys_div);
- *min_sys_div = max_t(uint16_t, *min_sys_div,
+ *min_sys_div = max_t(u16, *min_sys_div,
DIV_ROUND_UP(min_vt_div,
lim->vt_bk.max_pix_clk_div));
dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %u\n", *min_sys_div);
- *min_sys_div = max_t(uint16_t, *min_sys_div,
+ *min_sys_div = max_t(u16, *min_sys_div,
pll_fr->pll_op_clk_freq_hz
/ lim->vt_bk.max_sys_clk_freq_hz);
dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %u\n", *min_sys_div);
@@ -272,11 +272,11 @@ ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim,
*max_sys_div = lim->vt_bk.max_sys_clk_div;
dev_dbg(dev, "max_sys_div: %u\n", *max_sys_div);
- *max_sys_div = min_t(uint16_t, *max_sys_div,
+ *max_sys_div = min_t(u16, *max_sys_div,
DIV_ROUND_UP(max_vt_div,
lim->vt_bk.min_pix_clk_div));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %u\n", *max_sys_div);
- *max_sys_div = min_t(uint16_t, *max_sys_div,
+ *max_sys_div = min_t(u16, *max_sys_div,
DIV_ROUND_UP(pll_fr->pll_op_clk_freq_hz,
lim->vt_bk.min_pix_clk_freq_hz));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %u\n", *max_sys_div);
@@ -289,15 +289,15 @@ ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim,
static inline int
__ccs_pll_calculate_vt_tree(struct device *dev,
const struct ccs_pll_limits *lim,
- struct ccs_pll *pll, uint32_t mul, uint32_t div)
+ struct ccs_pll *pll, u32 mul, u32 div)
{
const struct ccs_pll_branch_limits_fr *lim_fr = &lim->vt_fr;
const struct ccs_pll_branch_limits_bk *lim_bk = &lim->vt_bk;
struct ccs_pll_branch_fr *pll_fr = &pll->vt_fr;
struct ccs_pll_branch_bk *pll_bk = &pll->vt_bk;
- uint32_t more_mul;
- uint16_t best_pix_div = SHRT_MAX >> 1, best_div;
- uint16_t vt_div, min_sys_div, max_sys_div, sys_div;
+ u32 more_mul;
+ u16 best_pix_div = SHRT_MAX >> 1, best_div;
+ u16 vt_div, min_sys_div, max_sys_div, sys_div;
pll_fr->pll_ip_clk_freq_hz =
pll->ext_clk_freq_hz / pll_fr->pre_pll_clk_div;
@@ -331,7 +331,7 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
for (sys_div = min_sys_div; sys_div <= max_sys_div;
sys_div += 2 - (sys_div & 1)) {
- uint16_t pix_div;
+ u16 pix_div;
if (vt_div % sys_div)
continue;
@@ -379,9 +379,9 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
{
const struct ccs_pll_branch_limits_fr *lim_fr = &lim->vt_fr;
struct ccs_pll_branch_fr *pll_fr = &pll->vt_fr;
- uint16_t min_pre_pll_clk_div = lim_fr->min_pre_pll_clk_div;
- uint16_t max_pre_pll_clk_div = lim_fr->max_pre_pll_clk_div;
- uint32_t pre_mul, pre_div;
+ u16 min_pre_pll_clk_div = lim_fr->min_pre_pll_clk_div;
+ u16 max_pre_pll_clk_div = lim_fr->max_pre_pll_clk_div;
+ u32 pre_mul, pre_div;
pre_div = gcd(pll->pixel_rate_csi,
pll->ext_clk_freq_hz * pll->vt_lanes);
@@ -390,11 +390,11 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
/* Make sure PLL input frequency is within limits */
max_pre_pll_clk_div =
- min_t(uint16_t, max_pre_pll_clk_div,
+ min_t(u16, max_pre_pll_clk_div,
DIV_ROUND_UP(pll->ext_clk_freq_hz,
lim_fr->min_pll_ip_clk_freq_hz));
- min_pre_pll_clk_div = max_t(uint16_t, min_pre_pll_clk_div,
+ min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div,
pll->ext_clk_freq_hz /
lim_fr->max_pll_ip_clk_freq_hz);
@@ -406,7 +406,7 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
pll_fr->pre_pll_clk_div +=
(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER) ? 1 :
2 - (pll_fr->pre_pll_clk_div & 1)) {
- uint32_t mul, div;
+ u32 mul, div;
int rval;
div = gcd(pre_mul * pll_fr->pre_pll_clk_div, pre_div);
@@ -440,13 +440,13 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
const struct ccs_pll_branch_limits_bk *op_lim_bk,
struct ccs_pll *pll, struct ccs_pll_branch_fr *pll_fr,
struct ccs_pll_branch_bk *op_pll_bk, bool cphy,
- uint32_t phy_const)
+ u32 phy_const)
{
- uint16_t sys_div;
- uint16_t best_pix_div = SHRT_MAX >> 1;
- uint16_t vt_op_binning_div;
- uint16_t min_vt_div, max_vt_div, vt_div;
- uint16_t min_sys_div, max_sys_div;
+ u16 sys_div;
+ u16 best_pix_div = SHRT_MAX >> 1;
+ u16 vt_op_binning_div;
+ u16 min_vt_div, max_vt_div, vt_div;
+ u16 min_sys_div, max_sys_div;
if (pll->flags & CCS_PLL_FLAG_NO_OP_CLOCKS)
goto out_calc_pixel_rate;
@@ -500,18 +500,18 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
/* Find smallest and biggest allowed vt divisor. */
dev_dbg(dev, "min_vt_div: %u\n", min_vt_div);
- min_vt_div = max_t(uint16_t, min_vt_div,
+ min_vt_div = max_t(u16, min_vt_div,
DIV_ROUND_UP(pll_fr->pll_op_clk_freq_hz,
lim->vt_bk.max_pix_clk_freq_hz));
dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %u\n",
min_vt_div);
- min_vt_div = max_t(uint16_t, min_vt_div, lim->vt_bk.min_pix_clk_div
- * lim->vt_bk.min_sys_clk_div);
+ min_vt_div = max_t(u16, min_vt_div, lim->vt_bk.min_pix_clk_div
+ * lim->vt_bk.min_sys_clk_div);
dev_dbg(dev, "min_vt_div: min_vt_clk_div: %u\n", min_vt_div);
max_vt_div = lim->vt_bk.max_sys_clk_div * lim->vt_bk.max_pix_clk_div;
dev_dbg(dev, "max_vt_div: %u\n", max_vt_div);
- max_vt_div = min_t(uint16_t, max_vt_div,
+ max_vt_div = min_t(u16, max_vt_div,
DIV_ROUND_UP(pll_fr->pll_op_clk_freq_hz,
lim->vt_bk.min_pix_clk_freq_hz));
dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %u\n",
@@ -526,12 +526,12 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
* divisor.
*/
for (vt_div = min_vt_div; vt_div <= max_vt_div; vt_div++) {
- uint16_t __max_sys_div = vt_div & 1 ? 1 : max_sys_div;
+ u16 __max_sys_div = vt_div & 1 ? 1 : max_sys_div;
for (sys_div = min_sys_div; sys_div <= __max_sys_div;
sys_div += 2 - (sys_div & 1)) {
- uint16_t pix_div;
- uint16_t rounded_div;
+ u16 pix_div;
+ u16 rounded_div;
pix_div = DIV_ROUND_UP(vt_div, sys_div);
@@ -588,9 +588,9 @@ ccs_pll_calculate_op(struct device *dev, const struct ccs_pll_limits *lim,
const struct ccs_pll_branch_limits_fr *op_lim_fr,
const struct ccs_pll_branch_limits_bk *op_lim_bk,
struct ccs_pll *pll, struct ccs_pll_branch_fr *op_pll_fr,
- struct ccs_pll_branch_bk *op_pll_bk, uint32_t mul,
- uint32_t div, uint32_t op_sys_clk_freq_hz_sdr, uint32_t l,
- bool cphy, uint32_t phy_const)
+ struct ccs_pll_branch_bk *op_pll_bk, u32 mul,
+ u32 div, u32 op_sys_clk_freq_hz_sdr, u32 l,
+ bool cphy, u32 phy_const)
{
/*
* Higher multipliers (and divisors) are often required than
@@ -598,9 +598,9 @@ ccs_pll_calculate_op(struct device *dev, const struct ccs_pll_limits *lim,
* There are limits for all values in the clock tree. These
* are the minimum and maximum multiplier for mul.
*/
- uint32_t more_mul_min, more_mul_max;
- uint32_t more_mul_factor;
- uint32_t i;
+ u32 more_mul_min, more_mul_max;
+ u32 more_mul_factor;
+ u32 i;
/*
* Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
@@ -614,7 +614,7 @@ ccs_pll_calculate_op(struct device *dev, const struct ccs_pll_limits *lim,
more_mul_max);
/* Don't go above max pll op frequency. */
more_mul_max =
- min_t(uint32_t,
+ min_t(u32,
more_mul_max,
op_lim_fr->max_pll_op_clk_freq_hz
/ (pll->ext_clk_freq_hz /
@@ -706,14 +706,14 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
struct ccs_pll_branch_fr *op_pll_fr;
struct ccs_pll_branch_bk *op_pll_bk;
bool cphy = pll->bus_type == CCS_PLL_BUS_TYPE_CSI2_CPHY;
- uint32_t phy_const = cphy ? CPHY_CONST : DPHY_CONST;
- uint32_t op_sys_clk_freq_hz_sdr;
- uint16_t min_op_pre_pll_clk_div;
- uint16_t max_op_pre_pll_clk_div;
- uint32_t mul, div;
- uint32_t l = (!pll->op_bits_per_lane ||
- pll->op_bits_per_lane >= pll->bits_per_pixel) ? 1 : 2;
- uint32_t i;
+ u32 phy_const = cphy ? CPHY_CONST : DPHY_CONST;
+ u32 op_sys_clk_freq_hz_sdr;
+ u16 min_op_pre_pll_clk_div;
+ u16 max_op_pre_pll_clk_div;
+ u32 mul, div;
+ u32 l = (!pll->op_bits_per_lane ||
+ pll->op_bits_per_lane >= pll->bits_per_pixel) ? 1 : 2;
+ u32 i;
int rval = -EINVAL;
if (!(pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL)) {
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
switch (pll->bus_type) {
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- op_sys_clk_freq_hz_sdr = pll->link_freq * 2
- * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- 1 : pll->csi2.lanes);
- break;
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
- op_sys_clk_freq_hz_sdr =
- pll->link_freq
+ op_sys_clk_freq_hz_sdr = pll->link_freq * 2
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
1 : pll->csi2.lanes);
break;
@@ -797,11 +791,11 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
dev_dbg(dev, "min / max op_pre_pll_clk_div: %u / %u\n",
op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div);
max_op_pre_pll_clk_div =
- min_t(uint16_t, op_lim_fr->max_pre_pll_clk_div,
+ min_t(u16, op_lim_fr->max_pre_pll_clk_div,
clk_div_even(pll->ext_clk_freq_hz /
op_lim_fr->min_pll_ip_clk_freq_hz));
min_op_pre_pll_clk_div =
- max_t(uint16_t, op_lim_fr->min_pre_pll_clk_div,
+ max_t(u16, op_lim_fr->min_pre_pll_clk_div,
clk_div_even_up(
DIV_ROUND_UP(pll->ext_clk_freq_hz,
op_lim_fr->max_pll_ip_clk_freq_hz)));
@@ -815,7 +809,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
dev_dbg(dev, "mul %u / div %u\n", mul, div);
min_op_pre_pll_clk_div =
- max_t(uint16_t, min_op_pre_pll_clk_div,
+ max_t(u16, min_op_pre_pll_clk_div,
clk_div_even_up(
mul /
one_or_more(
@@ -883,4 +877,4 @@ EXPORT_SYMBOL_GPL(ccs_pll_calculate);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_DESCRIPTION("Generic MIPI CCS/SMIA/SMIA++ PLL calculator");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ccs-pll.h b/drivers/media/i2c/ccs-pll.h
index b97d7ff50ea5..6eb1b1c68e1e 100644
--- a/drivers/media/i2c/ccs-pll.h
+++ b/drivers/media/i2c/ccs-pll.h
@@ -44,10 +44,10 @@
* @pll_op_clk_freq_hz: PLL output clock frequency
*/
struct ccs_pll_branch_fr {
- uint16_t pre_pll_clk_div;
- uint16_t pll_multiplier;
- uint32_t pll_ip_clk_freq_hz;
- uint32_t pll_op_clk_freq_hz;
+ u16 pre_pll_clk_div;
+ u16 pll_multiplier;
+ u32 pll_ip_clk_freq_hz;
+ u32 pll_op_clk_freq_hz;
};
/**
@@ -61,10 +61,10 @@ struct ccs_pll_branch_fr {
* @pix_clk_freq_hz: Pixel clock frequency
*/
struct ccs_pll_branch_bk {
- uint16_t sys_clk_div;
- uint16_t pix_clk_div;
- uint32_t sys_clk_freq_hz;
- uint32_t pix_clk_freq_hz;
+ u16 sys_clk_div;
+ u16 pix_clk_div;
+ u32 sys_clk_freq_hz;
+ u32 pix_clk_freq_hz;
};
/**
@@ -97,21 +97,21 @@ struct ccs_pll_branch_bk {
*/
struct ccs_pll {
/* input values */
- uint8_t bus_type;
- uint8_t op_lanes;
- uint8_t vt_lanes;
+ u8 bus_type;
+ u8 op_lanes;
+ u8 vt_lanes;
struct {
- uint8_t lanes;
+ u8 lanes;
} csi2;
- uint8_t binning_horizontal;
- uint8_t binning_vertical;
- uint8_t scale_m;
- uint8_t scale_n;
- uint8_t bits_per_pixel;
- uint8_t op_bits_per_lane;
- uint16_t flags;
- uint32_t link_freq;
- uint32_t ext_clk_freq_hz;
+ u8 binning_horizontal;
+ u8 binning_vertical;
+ u8 scale_m;
+ u8 scale_n;
+ u8 bits_per_pixel;
+ u8 op_bits_per_lane;
+ u16 flags;
+ u32 link_freq;
+ u32 ext_clk_freq_hz;
/* output values */
struct ccs_pll_branch_fr vt_fr;
@@ -119,8 +119,8 @@ struct ccs_pll {
struct ccs_pll_branch_fr op_fr;
struct ccs_pll_branch_bk op_bk;
- uint32_t pixel_rate_csi;
- uint32_t pixel_rate_pixel_array;
+ u32 pixel_rate_csi;
+ u32 pixel_rate_pixel_array;
};
/**
@@ -136,14 +136,14 @@ struct ccs_pll {
* @max_pll_op_clk_freq_hz: Maximum PLL output clock frequency
*/
struct ccs_pll_branch_limits_fr {
- uint16_t min_pre_pll_clk_div;
- uint16_t max_pre_pll_clk_div;
- uint32_t min_pll_ip_clk_freq_hz;
- uint32_t max_pll_ip_clk_freq_hz;
- uint16_t min_pll_multiplier;
- uint16_t max_pll_multiplier;
- uint32_t min_pll_op_clk_freq_hz;
- uint32_t max_pll_op_clk_freq_hz;
+ u16 min_pre_pll_clk_div;
+ u16 max_pre_pll_clk_div;
+ u32 min_pll_ip_clk_freq_hz;
+ u32 max_pll_ip_clk_freq_hz;
+ u16 min_pll_multiplier;
+ u16 max_pll_multiplier;
+ u32 min_pll_op_clk_freq_hz;
+ u32 max_pll_op_clk_freq_hz;
};
/**
@@ -159,14 +159,14 @@ struct ccs_pll_branch_limits_fr {
* @max_pix_clk_freq_hz: Maximum pixel clock frequency
*/
struct ccs_pll_branch_limits_bk {
- uint16_t min_sys_clk_div;
- uint16_t max_sys_clk_div;
- uint32_t min_sys_clk_freq_hz;
- uint32_t max_sys_clk_freq_hz;
- uint16_t min_pix_clk_div;
- uint16_t max_pix_clk_div;
- uint32_t min_pix_clk_freq_hz;
- uint32_t max_pix_clk_freq_hz;
+ u16 min_sys_clk_div;
+ u16 max_sys_clk_div;
+ u32 min_sys_clk_freq_hz;
+ u32 max_sys_clk_freq_hz;
+ u16 min_pix_clk_div;
+ u16 max_pix_clk_div;
+ u32 min_pix_clk_freq_hz;
+ u32 max_pix_clk_freq_hz;
};
/**
@@ -183,8 +183,8 @@ struct ccs_pll_branch_limits_bk {
*/
struct ccs_pll_limits {
/* Strict PLL limits */
- uint32_t min_ext_clk_freq_hz;
- uint32_t max_ext_clk_freq_hz;
+ u32 min_ext_clk_freq_hz;
+ u32 max_ext_clk_freq_hz;
struct ccs_pll_branch_limits_fr vt_fr;
struct ccs_pll_branch_limits_bk vt_bk;
@@ -192,8 +192,8 @@ struct ccs_pll_limits {
struct ccs_pll_branch_limits_bk op_bk;
/* Other relevant limits */
- uint32_t min_line_length_pck_bin;
- uint32_t min_line_length_pck;
+ u32 min_line_length_pck_bin;
+ u32 min_line_length_pck;
};
struct device;
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index b39ae5f8446b..15afbb4f5b31 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -28,6 +28,7 @@
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-device.h>
+#include <uapi/linux/ccs.h>
#include "ccs.h"
@@ -382,15 +383,22 @@ static int ccs_pll_configure(struct ccs_sensor *sensor)
if (rval < 0)
return rval;
- /* Lane op clock ratio does not apply here. */
- rval = ccs_write(sensor, REQUESTED_LINK_RATE,
- DIV_ROUND_UP(pll->op_bk.sys_clk_freq_hz,
- 1000000 / 256 / 256) *
- (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- sensor->pll.csi2.lanes : 1) <<
- (pll->flags & CCS_PLL_FLAG_OP_SYS_DDR ? 1 : 0));
- if (rval < 0 || sensor->pll.flags & CCS_PLL_FLAG_NO_OP_CLOCKS)
- return rval;
+ if (!(CCS_LIM(sensor, PHY_CTRL_CAPABILITY) &
+ CCS_PHY_CTRL_CAPABILITY_AUTO_PHY_CTL)) {
+ /* Lane op clock ratio does not apply here. */
+ rval = ccs_write(sensor, REQUESTED_LINK_RATE,
+ DIV_ROUND_UP(pll->op_bk.sys_clk_freq_hz,
+ 1000000 / 256 / 256) *
+ (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
+ sensor->pll.csi2.lanes : 1) <<
+ (pll->flags & CCS_PLL_FLAG_OP_SYS_DDR ?
+ 1 : 0));
+ if (rval < 0)
+ return rval;
+ }
+
+ if (sensor->pll.flags & CCS_PLL_FLAG_NO_OP_CLOCKS)
+ return 0;
rval = ccs_write(sensor, OP_PIX_CLK_DIV, pll->op_bk.pix_clk_div);
if (rval < 0)
@@ -671,6 +679,49 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
rval = ccs_write(sensor, ANALOG_GAIN_CODE_GLOBAL, ctrl->val);
break;
+
+ case V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN:
+ rval = ccs_write(sensor, ANALOG_LINEAR_GAIN_GLOBAL, ctrl->val);
+
+ break;
+
+ case V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN:
+ rval = ccs_write(sensor, ANALOG_EXPONENTIAL_GAIN_GLOBAL,
+ ctrl->val);
+
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ if (CCS_LIM(sensor, DIGITAL_GAIN_CAPABILITY) ==
+ CCS_DIGITAL_GAIN_CAPABILITY_GLOBAL) {
+ rval = ccs_write(sensor, DIGITAL_GAIN_GLOBAL,
+ ctrl->val);
+ break;
+ }
+
+ rval = ccs_write_addr(sensor,
+ SMIAPP_REG_U16_DIGITAL_GAIN_GREENR,
+ ctrl->val);
+ if (rval)
+ break;
+
+ rval = ccs_write_addr(sensor,
+ SMIAPP_REG_U16_DIGITAL_GAIN_RED,
+ ctrl->val);
+ if (rval)
+ break;
+
+ rval = ccs_write_addr(sensor,
+ SMIAPP_REG_U16_DIGITAL_GAIN_BLUE,
+ ctrl->val);
+ if (rval)
+ break;
+
+ rval = ccs_write_addr(sensor,
+ SMIAPP_REG_U16_DIGITAL_GAIN_GREENB,
+ ctrl->val);
+
+ break;
case V4L2_CID_EXPOSURE:
rval = ccs_write(sensor, COARSE_INTEGRATION_TIME, ctrl->val);
@@ -713,6 +764,19 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
rval = ccs_write(sensor, TEST_DATA_GREENB, ctrl->val);
break;
+ case V4L2_CID_CCS_SHADING_CORRECTION:
+ rval = ccs_write(sensor, SHADING_CORRECTION_EN,
+ ctrl->val ? CCS_SHADING_CORRECTION_EN_ENABLE :
+ 0);
+
+ if (!rval && sensor->luminance_level)
+ v4l2_ctrl_activate(sensor->luminance_level, ctrl->val);
+
+ break;
+ case V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL:
+ rval = ccs_write(sensor, LUMINANCE_CORRECTION_LEVEL, ctrl->val);
+
+ break;
case V4L2_CID_PIXEL_RATE:
/* For v4l2_ctrl_s_ctrl_int64() used internally. */
rval = 0;
@@ -739,19 +803,144 @@ static int ccs_init_controls(struct ccs_sensor *sensor)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
- rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 12);
+ rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 17);
if (rval)
return rval;
sensor->pixel_array->ctrl_handler.lock = &sensor->mutex;
- sensor->analog_gain = v4l2_ctrl_new_std(
- &sensor->pixel_array->ctrl_handler, &ccs_ctrl_ops,
- V4L2_CID_ANALOGUE_GAIN,
- CCS_LIM(sensor, ANALOG_GAIN_CODE_MIN),
- CCS_LIM(sensor, ANALOG_GAIN_CODE_MAX),
- max(CCS_LIM(sensor, ANALOG_GAIN_CODE_STEP), 1U),
- CCS_LIM(sensor, ANALOG_GAIN_CODE_MIN));
+ switch (CCS_LIM(sensor, ANALOG_GAIN_CAPABILITY)) {
+ case CCS_ANALOG_GAIN_CAPABILITY_GLOBAL: {
+ struct {
+ const char *name;
+ u32 id;
+ s32 value;
+ } const gain_ctrls[] = {
+ { "Analogue Gain m0", V4L2_CID_CCS_ANALOGUE_GAIN_M0,
+ CCS_LIM(sensor, ANALOG_GAIN_M0), },
+ { "Analogue Gain c0", V4L2_CID_CCS_ANALOGUE_GAIN_C0,
+ CCS_LIM(sensor, ANALOG_GAIN_C0), },
+ { "Analogue Gain m1", V4L2_CID_CCS_ANALOGUE_GAIN_M1,
+ CCS_LIM(sensor, ANALOG_GAIN_M1), },
+ { "Analogue Gain c1", V4L2_CID_CCS_ANALOGUE_GAIN_C1,
+ CCS_LIM(sensor, ANALOG_GAIN_C1), },
+ };
+ struct v4l2_ctrl_config ctrl_cfg = {
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &ccs_ctrl_ops,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ .step = 1,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gain_ctrls); i++) {
+ ctrl_cfg.name = gain_ctrls[i].name;
+ ctrl_cfg.id = gain_ctrls[i].id;
+ ctrl_cfg.min = ctrl_cfg.max = ctrl_cfg.def =
+ gain_ctrls[i].value;
+
+ v4l2_ctrl_new_custom(&sensor->pixel_array->ctrl_handler,
+ &ctrl_cfg, NULL);
+ }
+
+ v4l2_ctrl_new_std(&sensor->pixel_array->ctrl_handler,
+ &ccs_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ CCS_LIM(sensor, ANALOG_GAIN_CODE_MIN),
+ CCS_LIM(sensor, ANALOG_GAIN_CODE_MAX),
+ max(CCS_LIM(sensor, ANALOG_GAIN_CODE_STEP),
+ 1U),
+ CCS_LIM(sensor, ANALOG_GAIN_CODE_MIN));
+ }
+ break;
+
+ case CCS_ANALOG_GAIN_CAPABILITY_ALTERNATE_GLOBAL: {
+ struct {
+ const char *name;
+ u32 id;
+ u16 min, max, step;
+ } const gain_ctrls[] = {
+ {
+ "Analogue Linear Gain",
+ V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN,
+ CCS_LIM(sensor, ANALOG_LINEAR_GAIN_MIN),
+ CCS_LIM(sensor, ANALOG_LINEAR_GAIN_MAX),
+ max(CCS_LIM(sensor,
+ ANALOG_LINEAR_GAIN_STEP_SIZE),
+ 1U),
+ },
+ {
+ "Analogue Exponential Gain",
+ V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN,
+ CCS_LIM(sensor, ANALOG_EXPONENTIAL_GAIN_MIN),
+ CCS_LIM(sensor, ANALOG_EXPONENTIAL_GAIN_MAX),
+ max(CCS_LIM(sensor,
+ ANALOG_EXPONENTIAL_GAIN_STEP_SIZE),
+ 1U),
+ },
+ };
+ struct v4l2_ctrl_config ctrl_cfg = {
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &ccs_ctrl_ops,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gain_ctrls); i++) {
+ ctrl_cfg.name = gain_ctrls[i].name;
+ ctrl_cfg.min = ctrl_cfg.def = gain_ctrls[i].min;
+ ctrl_cfg.max = gain_ctrls[i].max;
+ ctrl_cfg.step = gain_ctrls[i].step;
+ ctrl_cfg.id = gain_ctrls[i].id;
+
+ v4l2_ctrl_new_custom(&sensor->pixel_array->ctrl_handler,
+ &ctrl_cfg, NULL);
+ }
+ }
+ }
+
+ if (CCS_LIM(sensor, SHADING_CORRECTION_CAPABILITY) &
+ (CCS_SHADING_CORRECTION_CAPABILITY_COLOR_SHADING |
+ CCS_SHADING_CORRECTION_CAPABILITY_LUMINANCE_CORRECTION)) {
+ const struct v4l2_ctrl_config ctrl_cfg = {
+ .name = "Shading Correction",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .id = V4L2_CID_CCS_SHADING_CORRECTION,
+ .ops = &ccs_ctrl_ops,
+ .max = 1,
+ .step = 1,
+ };
+
+ v4l2_ctrl_new_custom(&sensor->pixel_array->ctrl_handler,
+ &ctrl_cfg, NULL);
+ }
+
+ if (CCS_LIM(sensor, SHADING_CORRECTION_CAPABILITY) &
+ CCS_SHADING_CORRECTION_CAPABILITY_LUMINANCE_CORRECTION) {
+ const struct v4l2_ctrl_config ctrl_cfg = {
+ .name = "Luminance Correction Level",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .id = V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL,
+ .ops = &ccs_ctrl_ops,
+ .max = 255,
+ .step = 1,
+ .def = 128,
+ };
+
+ sensor->luminance_level =
+ v4l2_ctrl_new_custom(&sensor->pixel_array->ctrl_handler,
+ &ctrl_cfg, NULL);
+ }
+
+ if (CCS_LIM(sensor, DIGITAL_GAIN_CAPABILITY) ==
+ CCS_DIGITAL_GAIN_CAPABILITY_GLOBAL ||
+ CCS_LIM(sensor, DIGITAL_GAIN_CAPABILITY) ==
+ SMIAPP_DIGITAL_GAIN_CAPABILITY_PER_CHANNEL)
+ v4l2_ctrl_new_std(&sensor->pixel_array->ctrl_handler,
+ &ccs_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ CCS_LIM(sensor, DIGITAL_GAIN_MIN),
+ CCS_LIM(sensor, DIGITAL_GAIN_MAX),
+ max(CCS_LIM(sensor, DIGITAL_GAIN_STEP_SIZE),
+ 1U),
+ 0x100);
/* Exposure limits will be updated soon, use just something here. */
sensor->exposure = v4l2_ctrl_new_std(
@@ -1001,7 +1190,7 @@ static void ccs_update_blanking(struct ccs_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
- uint16_t min_fll, max_fll, min_llp, max_llp, min_lbp;
+ u16 min_fll, max_fll, min_llp, max_llp, min_lbp;
int min, max;
if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
@@ -1322,6 +1511,28 @@ static int ccs_write_msr_regs(struct ccs_sensor *sensor)
sensor->mdata.num_module_manufacturer_regs);
}
+static int ccs_update_phy_ctrl(struct ccs_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ u8 val;
+
+ if (!sensor->ccs_limits)
+ return 0;
+
+ if (CCS_LIM(sensor, PHY_CTRL_CAPABILITY) &
+ CCS_PHY_CTRL_CAPABILITY_AUTO_PHY_CTL) {
+ val = CCS_PHY_CTRL_AUTO;
+ } else if (CCS_LIM(sensor, PHY_CTRL_CAPABILITY) &
+ CCS_PHY_CTRL_CAPABILITY_UI_PHY_CTL) {
+ val = CCS_PHY_CTRL_UI;
+ } else {
+ dev_err(&client->dev, "manual PHY control not supported\n");
+ return -EINVAL;
+ }
+
+ return ccs_write(sensor, PHY_CTRL, val);
+}
+
static int ccs_power_on(struct device *dev)
{
struct v4l2_subdev *subdev = dev_get_drvdata(dev);
@@ -1333,7 +1544,6 @@ static int ccs_power_on(struct device *dev)
struct ccs_sensor *sensor =
container_of(ssd, struct ccs_sensor, ssds[0]);
const struct ccs_device *ccsdev = device_get_match_data(dev);
- unsigned int sleep;
int rval;
rval = regulator_bulk_enable(ARRAY_SIZE(ccs_regulators),
@@ -1343,21 +1553,25 @@ static int ccs_power_on(struct device *dev)
return rval;
}
- rval = clk_prepare_enable(sensor->ext_clk);
- if (rval < 0) {
- dev_dbg(dev, "failed to enable xclk\n");
- goto out_xclk_fail;
- }
+ if (sensor->reset || sensor->xshutdown || sensor->ext_clk) {
+ unsigned int sleep;
+
+ rval = clk_prepare_enable(sensor->ext_clk);
+ if (rval < 0) {
+ dev_dbg(dev, "failed to enable xclk\n");
+ goto out_xclk_fail;
+ }
- gpiod_set_value(sensor->reset, 0);
- gpiod_set_value(sensor->xshutdown, 1);
+ gpiod_set_value(sensor->reset, 0);
+ gpiod_set_value(sensor->xshutdown, 1);
- if (ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA)
- sleep = SMIAPP_RESET_DELAY(sensor->hwcfg.ext_clk);
- else
- sleep = 5000;
+ if (ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA)
+ sleep = SMIAPP_RESET_DELAY(sensor->hwcfg.ext_clk);
+ else
+ sleep = 5000;
- usleep_range(sleep, sleep);
+ usleep_range(sleep, sleep);
+ }
/*
* Failures to respond to the address change command have been noticed.
@@ -1370,18 +1584,27 @@ static int ccs_power_on(struct device *dev)
* is found.
*/
- if (sensor->hwcfg.i2c_addr_alt) {
- rval = ccs_change_cci_addr(sensor);
- if (rval) {
- dev_err(dev, "cci address change error\n");
+ if (!sensor->reset && !sensor->xshutdown) {
+ u8 retry = 100;
+ u32 reset;
+
+ rval = ccs_write(sensor, SOFTWARE_RESET, CCS_SOFTWARE_RESET_ON);
+ if (rval < 0) {
+ dev_err(dev, "software reset failed\n");
goto out_cci_addr_fail;
}
- }
- rval = ccs_write(sensor, SOFTWARE_RESET, CCS_SOFTWARE_RESET_ON);
- if (rval < 0) {
- dev_err(dev, "software reset failed\n");
- goto out_cci_addr_fail;
+ do {
+ rval = ccs_read(sensor, SOFTWARE_RESET, &reset);
+ reset = !rval && reset == CCS_SOFTWARE_RESET_OFF;
+ if (reset)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--retry);
+
+ if (!reset)
+ return -EIO;
}
if (sensor->hwcfg.i2c_addr_alt) {
@@ -1426,8 +1649,7 @@ static int ccs_power_on(struct device *dev)
goto out_cci_addr_fail;
}
- /* DPHY control done by sensor based on requested link rate */
- rval = ccs_write(sensor, PHY_CTRL, CCS_PHY_CTRL_UI);
+ rval = ccs_update_phy_ctrl(sensor);
if (rval < 0)
goto out_cci_addr_fail;
@@ -2908,7 +3130,8 @@ static int ccs_get_hwconfig(struct ccs_sensor *sensor, struct device *dev)
int i;
int rval;
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ ep = fwnode_graph_get_endpoint_by_id(fwnode, 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
return -ENODEV;
@@ -3080,6 +3303,11 @@ static int ccs_probe(struct i2c_client *client)
return -EINVAL;
}
+ if (!sensor->hwcfg.ext_clk) {
+ dev_err(&client->dev, "cannot work with xclk frequency 0\n");
+ return -EINVAL;
+ }
+
sensor->reset = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(sensor->reset))
@@ -3148,6 +3376,10 @@ static int ccs_probe(struct i2c_client *client)
goto out_free_ccs_limits;
}
+ rval = ccs_update_phy_ctrl(sensor);
+ if (rval < 0)
+ goto out_free_ccs_limits;
+
/*
* Handle Sensor Module orientation on the board.
*
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 9a6097b088bd..45f2b2f55ec5 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -10,7 +10,6 @@
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/types.h>
#include "ccs-data-defs.h"
@@ -152,7 +151,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
v->static_data_version_major[1];
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
- v->static_data_version_major[1];
+ v->static_data_version_minor[1];
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
vv->date_month = v->month;
vv->date_day = v->day;
@@ -215,7 +214,7 @@ static int ccs_data_parse_regs(struct bin_container *bin,
size_t *__num_regs, const void *payload,
const void *endp, struct device *dev)
{
- struct ccs_reg *regs_base, *regs;
+ struct ccs_reg *regs_base = NULL, *regs = NULL;
size_t num_regs = 0;
u16 addr = 0;
@@ -286,6 +285,9 @@ static int ccs_data_parse_regs(struct bin_container *bin,
if (!bin->base) {
bin_reserve(bin, len);
} else if (__regs) {
+ if (!regs)
+ return -EIO;
+
regs->addr = addr;
regs->len = len;
regs->value = bin_alloc(bin, len);
@@ -306,8 +308,12 @@ static int ccs_data_parse_regs(struct bin_container *bin,
if (__num_regs)
*__num_regs = num_regs;
- if (bin->base && __regs)
+ if (bin->base && __regs) {
+ if (!regs_base)
+ return -EIO;
+
*__regs = regs_base;
+ }
return 0;
}
@@ -426,7 +432,7 @@ static int ccs_data_parse_rules(struct bin_container *bin,
size_t *__num_rules, const void *payload,
const void *endp, struct device *dev)
{
- struct ccs_rule *rules_base, *rules = NULL, *next_rule;
+ struct ccs_rule *rules_base = NULL, *rules = NULL, *next_rule = NULL;
size_t num_rules = 0;
const void *__next_rule = payload;
int rval;
@@ -484,6 +490,9 @@ static int ccs_data_parse_rules(struct bin_container *bin,
} else {
unsigned int i;
+ if (!next_rule)
+ return -EIO;
+
rules = next_rule;
next_rule++;
@@ -556,6 +565,9 @@ static int ccs_data_parse_rules(struct bin_container *bin,
bin_reserve(bin, sizeof(*rules) * num_rules);
*__num_rules = num_rules;
} else {
+ if (!rules_base)
+ return -EIO;
+
*__rules = rules_base;
}
@@ -691,7 +703,7 @@ static int ccs_data_parse_pdaf(struct bin_container *bin, struct ccs_pdaf_pix_lo
}
for (i = 0; i < max_block_type_id; i++) {
- struct ccs_pdaf_pix_loc_pixel_desc_group *pdgroup;
+ struct ccs_pdaf_pix_loc_pixel_desc_group *pdgroup = NULL;
unsigned int j;
if (!is_contained(__num_pixel_descs, endp))
@@ -722,6 +734,9 @@ static int ccs_data_parse_pdaf(struct bin_container *bin, struct ccs_pdaf_pix_lo
if (!bin->base)
continue;
+ if (!pdgroup)
+ return -EIO;
+
pdesc = &pdgroup->descs[j];
pdesc->pixel_type = __pixel_desc->pixel_type;
pdesc->small_offset_x = __pixel_desc->small_offset_x;
diff --git a/drivers/media/i2c/ccs/ccs-data.h b/drivers/media/i2c/ccs/ccs-data.h
index 50d6508b24f3..c75d480c8792 100644
--- a/drivers/media/i2c/ccs/ccs-data.h
+++ b/drivers/media/i2c/ccs/ccs-data.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
+struct device;
+
/**
* struct ccs_data_block_version - CCS static data version
* @version_major: Major version number
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.c b/drivers/media/i2c/ccs/ccs-reg-access.c
index b776af2a3c33..25993445f4fe 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.c
+++ b/drivers/media/i2c/ccs/ccs-reg-access.c
@@ -17,11 +17,10 @@
#include "ccs.h"
#include "ccs-limits.h"
-static uint32_t float_to_u32_mul_1000000(struct i2c_client *client,
- uint32_t phloat)
+static u32 float_to_u32_mul_1000000(struct i2c_client *client, u32 phloat)
{
- int32_t exp;
- uint64_t man;
+ s32 exp;
+ u64 man;
if (phloat >= 0x80000000) {
dev_err(&client->dev, "this is a negative number\n");
@@ -137,11 +136,11 @@ static int ____ccs_read_addr_8only(struct ccs_sensor *sensor, u16 reg,
unsigned int ccs_reg_width(u32 reg)
{
if (reg & CCS_FL_16BIT)
- return sizeof(uint16_t);
+ return sizeof(u16);
if (reg & CCS_FL_32BIT)
- return sizeof(uint32_t);
+ return sizeof(u32);
- return sizeof(uint8_t);
+ return sizeof(u8);
}
static u32 ireal32_to_u32_mul_1000000(struct i2c_client *client, u32 val)
@@ -205,7 +204,7 @@ static int __ccs_read_data(struct ccs_reg *regs, size_t num_regs,
size_t i;
for (i = 0; i < num_regs; i++, regs++) {
- uint8_t *data;
+ u8 *data;
if (regs->addr + regs->len < CCS_REG_ADDR(reg) + width)
continue;
@@ -216,13 +215,13 @@ static int __ccs_read_data(struct ccs_reg *regs, size_t num_regs,
data = &regs->value[CCS_REG_ADDR(reg) - regs->addr];
switch (width) {
- case sizeof(uint8_t):
+ case sizeof(u8):
*val = *data;
break;
- case sizeof(uint16_t):
+ case sizeof(u16):
*val = get_unaligned_be16(data);
break;
- case sizeof(uint32_t):
+ case sizeof(u32):
*val = get_unaligned_be32(data);
break;
default:
@@ -387,12 +386,20 @@ int ccs_write_data_regs(struct ccs_sensor *sensor, struct ccs_reg *regs,
for (j = 0; j < regs->len;
j += msg.len - 2, regdata += msg.len - 2) {
+ char printbuf[(MAX_WRITE_LEN << 1) +
+ 1 /* \0 */] = { 0 };
int rval;
msg.len = min(regs->len - j, MAX_WRITE_LEN);
+ bin2hex(printbuf, regdata, msg.len);
+ dev_dbg(&client->dev,
+ "writing msr reg 0x%4.4x value 0x%s\n",
+ regs->addr + j, printbuf);
+
put_unaligned_be16(regs->addr + j, buf);
memcpy(buf + 2, regdata, msg.len);
+
msg.len += 2;
rval = ccs_write_retry(client, &msg);
diff --git a/drivers/media/i2c/ccs/ccs.h b/drivers/media/i2c/ccs/ccs.h
index 356b87c33405..6beac375cc48 100644
--- a/drivers/media/i2c/ccs/ccs.h
+++ b/drivers/media/i2c/ccs/ccs.h
@@ -84,11 +84,11 @@ struct ccs_hwconfig {
unsigned short i2c_addr_dfl; /* Default i2c addr */
unsigned short i2c_addr_alt; /* Alternate i2c addr */
- uint32_t ext_clk; /* sensor external clk */
+ u32 ext_clk; /* sensor external clk */
unsigned int lanes; /* Number of CSI-2 lanes */
- uint32_t csi_signalling_mode; /* CCS_CSI_SIGNALLING_MODE_* */
- uint64_t *op_sys_clock;
+ u32 csi_signalling_mode; /* CCS_CSI_SIGNALLING_MODE_* */
+ u64 *op_sys_clock;
enum ccs_module_board_orient module_board_orient;
@@ -262,13 +262,13 @@ struct ccs_sensor {
unsigned long *valid_link_freqs;
/* Pixel array controls */
- struct v4l2_ctrl *analog_gain;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *pixel_rate_parray;
+ struct v4l2_ctrl *luminance_level;
/* src controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate_csi;
diff --git a/drivers/media/i2c/ccs/smiapp-reg-defs.h b/drivers/media/i2c/ccs/smiapp-reg-defs.h
index e80c110ebf3a..177e3e51207a 100644
--- a/drivers/media/i2c/ccs/smiapp-reg-defs.h
+++ b/drivers/media/i2c/ccs/smiapp-reg-defs.h
@@ -535,6 +535,8 @@
#define SMIAPP_DIGITAL_CROP_CAPABILITY_NONE 0
#define SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP 1
+#define SMIAPP_DIGITAL_GAIN_CAPABILITY_PER_CHANNEL 1
+
#define SMIAPP_BINNING_CAPABILITY_NO 0
#define SMIAPP_BINNING_CAPABILITY_YES 1
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index e7791a0848b3..6e3382b85a90 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -390,6 +390,10 @@ static const struct imx219_reg raw10_framefmt_regs[] = {
{0x0309, 0x0a},
};
+static const s64 imx219_link_freq_menu[] = {
+ IMX219_DEFAULT_LINK_FREQ,
+};
+
static const char * const imx219_test_pattern_menu[] = {
"Disabled",
"Color Bars",
@@ -547,6 +551,7 @@ struct imx219 {
struct v4l2_ctrl_handler ctrl_handler;
/* V4L2 Controls */
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *vflip;
struct v4l2_ctrl *hflip;
@@ -806,7 +811,9 @@ static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index >= (ARRAY_SIZE(codes) / 4))
return -EINVAL;
+ mutex_lock(&imx219->mutex);
code->code = imx219_get_format_code(imx219, codes[code->index * 4]);
+ mutex_unlock(&imx219->mutex);
return 0;
}
@@ -816,11 +823,15 @@ static int imx219_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx219 *imx219 = to_imx219(sd);
+ u32 code;
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- if (fse->code != imx219_get_format_code(imx219, fse->code))
+ mutex_lock(&imx219->mutex);
+ code = imx219_get_format_code(imx219, fse->code);
+ mutex_unlock(&imx219->mutex);
+ if (fse->code != code)
return -EINVAL;
fse->min_width = supported_modes[fse->index].width;
@@ -1263,7 +1274,7 @@ static int imx219_init_controls(struct imx219 *imx219)
int i, ret;
ctrl_hdlr = &imx219->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 11);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
if (ret)
return ret;
@@ -1277,6 +1288,14 @@ static int imx219_init_controls(struct imx219 *imx219)
IMX219_PIXEL_RATE, 1,
IMX219_PIXEL_RATE);
+ imx219->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx219_link_freq_menu) - 1, 0,
+ imx219_link_freq_menu);
+ if (imx219->link_freq)
+ imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
/* Initial vblank/hblank/exposure parameters based on current mode */
imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index df62c69a48c0..61d74b794582 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -2,6 +2,7 @@
// Copyright (C) 2018 Intel Corporation
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -68,6 +69,9 @@
#define REG_CONFIG_MIRROR_FLIP 0x03
#define REG_CONFIG_FLIP_TEST_PATTERN 0x02
+/* Input clock frequency in Hz */
+#define IMX258_INPUT_CLOCK_FREQ 19200000
+
struct imx258_reg {
u16 address;
u8 val;
@@ -610,6 +614,8 @@ struct imx258 {
/* Streaming on/off */
bool streaming;
+
+ struct clk *clk;
};
static inline struct imx258 *to_imx258(struct v4l2_subdev *_sd)
@@ -972,6 +978,29 @@ static int imx258_stop_streaming(struct imx258 *imx258)
return 0;
}
+static int imx258_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx258 *imx258 = to_imx258(sd);
+ int ret;
+
+ ret = clk_prepare_enable(imx258->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static int imx258_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx258 *imx258 = to_imx258(sd);
+
+ clk_disable_unprepare(imx258->clk);
+
+ return 0;
+}
+
static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx258 *imx258 = to_imx258(sd);
@@ -1018,8 +1047,7 @@ err_unlock:
static int __maybe_unused imx258_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx258 *imx258 = to_imx258(sd);
if (imx258->streaming)
@@ -1030,8 +1058,7 @@ static int __maybe_unused imx258_suspend(struct device *dev)
static int __maybe_unused imx258_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx258 *imx258 = to_imx258(sd);
int ret;
@@ -1201,9 +1228,26 @@ static int imx258_probe(struct i2c_client *client)
int ret;
u32 val = 0;
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000)
+ imx258 = devm_kzalloc(&client->dev, sizeof(*imx258), GFP_KERNEL);
+ if (!imx258)
+ return -ENOMEM;
+
+ imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ if (!imx258->clk) {
+ dev_dbg(&client->dev,
+ "no clock provided, using clock-frequency property\n");
+
+ device_property_read_u32(&client->dev, "clock-frequency", &val);
+ if (val != IMX258_INPUT_CLOCK_FREQ)
+ return -EINVAL;
+ } else if (IS_ERR(imx258->clk)) {
+ return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ "error getting clock\n");
+ }
+ if (clk_get_rate(imx258->clk) != IMX258_INPUT_CLOCK_FREQ) {
+ dev_err(&client->dev, "input clock frequency not supported\n");
return -EINVAL;
+ }
/*
* Check that the device is mounted upside down. The driver only
@@ -1213,24 +1257,25 @@ static int imx258_probe(struct i2c_client *client)
if (ret || val != 180)
return -EINVAL;
- imx258 = devm_kzalloc(&client->dev, sizeof(*imx258), GFP_KERNEL);
- if (!imx258)
- return -ENOMEM;
-
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx258->sd, client, &imx258_subdev_ops);
+ /* Will be powered off via pm_runtime_idle */
+ ret = imx258_power_on(&client->dev);
+ if (ret)
+ return ret;
+
/* Check module identity */
ret = imx258_identify_module(imx258);
if (ret)
- return ret;
+ goto error_identify;
/* Set default mode to max resolution */
imx258->cur_mode = &supported_modes[0];
ret = imx258_init_controls(imx258);
if (ret)
- return ret;
+ goto error_identify;
/* Initialize subdev */
imx258->sd.internal_ops = &imx258_internal_ops;
@@ -1260,6 +1305,9 @@ error_media_entity:
error_handler_free:
imx258_free_controls(imx258);
+error_identify:
+ imx258_power_off(&client->dev);
+
return ret;
}
@@ -1273,6 +1321,8 @@ static int imx258_remove(struct i2c_client *client)
imx258_free_controls(imx258);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ imx258_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
return 0;
@@ -1280,6 +1330,7 @@ static int imx258_remove(struct i2c_client *client)
static const struct dev_pm_ops imx258_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(imx258_suspend, imx258_resume)
+ SET_RUNTIME_PM_OPS(imx258_power_off, imx258_power_on, NULL)
};
#ifdef CONFIG_ACPI
@@ -1291,11 +1342,18 @@ static const struct acpi_device_id imx258_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, imx258_acpi_ids);
#endif
+static const struct of_device_id imx258_dt_ids[] = {
+ { .compatible = "sony,imx258" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx258_dt_ids);
+
static struct i2c_driver imx258_i2c_driver = {
.driver = {
.name = "imx258",
.pm = &imx258_pm_ops,
.acpi_match_table = ACPI_PTR(imx258_acpi_ids),
+ .of_match_table = imx258_dt_ids,
},
.probe_new = imx258_probe,
.remove = imx258_remove,
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
new file mode 100644
index 000000000000..ad530f0d338a
--- /dev/null
+++ b/drivers/media/i2c/imx334.c
@@ -0,0 +1,1132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sony imx334 sensor driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+#include <asm/unaligned.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Streaming Mode */
+#define IMX334_REG_MODE_SELECT 0x3000
+#define IMX334_MODE_STANDBY 0x01
+#define IMX334_MODE_STREAMING 0x00
+
+/* Lines per frame */
+#define IMX334_REG_LPFR 0x3030
+
+/* Chip ID */
+#define IMX334_REG_ID 0x3044
+#define IMX334_ID 0x1e
+
+/* Exposure control */
+#define IMX334_REG_SHUTTER 0x3058
+#define IMX334_EXPOSURE_MIN 1
+#define IMX334_EXPOSURE_OFFSET 5
+#define IMX334_EXPOSURE_STEP 1
+#define IMX334_EXPOSURE_DEFAULT 0x0648
+
+/* Analog gain control */
+#define IMX334_REG_AGAIN 0x30e8
+#define IMX334_AGAIN_MIN 0
+#define IMX334_AGAIN_MAX 240
+#define IMX334_AGAIN_STEP 1
+#define IMX334_AGAIN_DEFAULT 0
+
+/* Group hold register */
+#define IMX334_REG_HOLD 0x3001
+
+/* Input clock rate */
+#define IMX334_INCLK_RATE 24000000
+
+/* CSI2 HW configuration */
+#define IMX334_LINK_FREQ 891000000
+#define IMX334_NUM_DATA_LANES 4
+
+#define IMX334_REG_MIN 0x00
+#define IMX334_REG_MAX 0xfffff
+
+/**
+ * struct imx334_reg - imx334 sensor register
+ * @address: Register address
+ * @val: Register value
+ */
+struct imx334_reg {
+ u16 address;
+ u8 val;
+};
+
+/**
+ * struct imx334_reg_list - imx334 sensor register list
+ * @num_of_regs: Number of registers in the list
+ * @regs: Pointer to register list
+ */
+struct imx334_reg_list {
+ u32 num_of_regs;
+ const struct imx334_reg *regs;
+};
+
+/**
+ * struct imx334_mode - imx334 sensor mode structure
+ * @width: Frame width
+ * @height: Frame height
+ * @code: Format code
+ * @hblank: Horizontal blanking in lines
+ * @vblank: Vertical blanking in lines
+ * @vblank_min: Minimal vertical blanking in lines
+ * @vblank_max: Maximum vertical blanking in lines
+ * @pclk: Sensor pixel clock
+ * @link_freq_idx: Link frequency index
+ * @reg_list: Register list for sensor mode
+ */
+struct imx334_mode {
+ u32 width;
+ u32 height;
+ u32 code;
+ u32 hblank;
+ u32 vblank;
+ u32 vblank_min;
+ u32 vblank_max;
+ u64 pclk;
+ u32 link_freq_idx;
+ struct imx334_reg_list reg_list;
+};
+
+/**
+ * struct imx334 - imx334 sensor device structure
+ * @dev: Pointer to generic device
+ * @client: Pointer to i2c client
+ * @sd: V4L2 sub-device
+ * @pad: Media pad. Only one pad supported
+ * @reset_gpio: Sensor reset gpio
+ * @inclk: Sensor input clock
+ * @ctrl_handler: V4L2 control handler
+ * @link_freq_ctrl: Pointer to link frequency control
+ * @pclk_ctrl: Pointer to pixel clock control
+ * @hblank_ctrl: Pointer to horizontal blanking control
+ * @vblank_ctrl: Pointer to vertical blanking control
+ * @exp_ctrl: Pointer to exposure control
+ * @again_ctrl: Pointer to analog gain control
+ * @vblank: Vertical blanking in lines
+ * @cur_mode: Pointer to current selected sensor mode
+ * @mutex: Mutex for serializing sensor controls
+ * @streaming: Flag indicating streaming state
+ */
+struct imx334 {
+ struct device *dev;
+ struct i2c_client *client;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct gpio_desc *reset_gpio;
+ struct clk *inclk;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *link_freq_ctrl;
+ struct v4l2_ctrl *pclk_ctrl;
+ struct v4l2_ctrl *hblank_ctrl;
+ struct v4l2_ctrl *vblank_ctrl;
+ struct {
+ struct v4l2_ctrl *exp_ctrl;
+ struct v4l2_ctrl *again_ctrl;
+ };
+ u32 vblank;
+ const struct imx334_mode *cur_mode;
+ struct mutex mutex;
+ bool streaming;
+};
+
+static const s64 link_freq[] = {
+ IMX334_LINK_FREQ,
+};
+
+/* Sensor mode registers */
+static const struct imx334_reg mode_3840x2160_regs[] = {
+ {0x3000, 0x01},
+ {0x3002, 0x00},
+ {0x3018, 0x04},
+ {0x37b0, 0x36},
+ {0x304c, 0x00},
+ {0x300c, 0x3b},
+ {0x300d, 0x2a},
+ {0x3034, 0x26},
+ {0x3035, 0x02},
+ {0x314c, 0x29},
+ {0x314d, 0x01},
+ {0x315a, 0x02},
+ {0x3168, 0xa0},
+ {0x316a, 0x7e},
+ {0x3288, 0x21},
+ {0x328a, 0x02},
+ {0x302c, 0x3c},
+ {0x302e, 0x00},
+ {0x302f, 0x0f},
+ {0x3076, 0x70},
+ {0x3077, 0x08},
+ {0x3090, 0x70},
+ {0x3091, 0x08},
+ {0x30d8, 0x20},
+ {0x30d9, 0x12},
+ {0x3308, 0x70},
+ {0x3309, 0x08},
+ {0x3414, 0x05},
+ {0x3416, 0x18},
+ {0x35ac, 0x0e},
+ {0x3648, 0x01},
+ {0x364a, 0x04},
+ {0x364c, 0x04},
+ {0x3678, 0x01},
+ {0x367c, 0x31},
+ {0x367e, 0x31},
+ {0x3708, 0x02},
+ {0x3714, 0x01},
+ {0x3715, 0x02},
+ {0x3716, 0x02},
+ {0x3717, 0x02},
+ {0x371c, 0x3d},
+ {0x371d, 0x3f},
+ {0x372c, 0x00},
+ {0x372d, 0x00},
+ {0x372e, 0x46},
+ {0x372f, 0x00},
+ {0x3730, 0x89},
+ {0x3731, 0x00},
+ {0x3732, 0x08},
+ {0x3733, 0x01},
+ {0x3734, 0xfe},
+ {0x3735, 0x05},
+ {0x375d, 0x00},
+ {0x375e, 0x00},
+ {0x375f, 0x61},
+ {0x3760, 0x06},
+ {0x3768, 0x1b},
+ {0x3769, 0x1b},
+ {0x376a, 0x1a},
+ {0x376b, 0x19},
+ {0x376c, 0x18},
+ {0x376d, 0x14},
+ {0x376e, 0x0f},
+ {0x3776, 0x00},
+ {0x3777, 0x00},
+ {0x3778, 0x46},
+ {0x3779, 0x00},
+ {0x377a, 0x08},
+ {0x377b, 0x01},
+ {0x377c, 0x45},
+ {0x377d, 0x01},
+ {0x377e, 0x23},
+ {0x377f, 0x02},
+ {0x3780, 0xd9},
+ {0x3781, 0x03},
+ {0x3782, 0xf5},
+ {0x3783, 0x06},
+ {0x3784, 0xa5},
+ {0x3788, 0x0f},
+ {0x378a, 0xd9},
+ {0x378b, 0x03},
+ {0x378c, 0xeb},
+ {0x378d, 0x05},
+ {0x378e, 0x87},
+ {0x378f, 0x06},
+ {0x3790, 0xf5},
+ {0x3792, 0x43},
+ {0x3794, 0x7a},
+ {0x3796, 0xa1},
+ {0x3e04, 0x0e},
+ {0x3a00, 0x01},
+};
+
+/* Supported sensor mode configurations */
+static const struct imx334_mode supported_mode = {
+ .width = 3840,
+ .height = 2160,
+ .hblank = 560,
+ .vblank = 2340,
+ .vblank_min = 90,
+ .vblank_max = 132840,
+ .pclk = 594000000,
+ .link_freq_idx = 0,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3840x2160_regs),
+ .regs = mode_3840x2160_regs,
+ },
+};
+
+/**
+ * to_imx334() - imv334 V4L2 sub-device to imx334 device.
+ * @subdev: pointer to imx334 V4L2 sub-device
+ *
+ * Return: pointer to imx334 device
+ */
+static inline struct imx334 *to_imx334(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct imx334, sd);
+}
+
+/**
+ * imx334_read_reg() - Read registers.
+ * @imx334: pointer to imx334 device
+ * @reg: register address
+ * @len: length of bytes to read. Max supported bytes is 4
+ * @val: pointer to register value to be filled.
+ *
+ * Big endian register addresses with little endian values.
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_read_reg(struct imx334 *imx334, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
+ struct i2c_msg msgs[2] = {0};
+ u8 addr_buf[2] = {0};
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (WARN_ON(len > 4))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_le32(data_buf);
+
+ return 0;
+}
+
+/**
+ * imx334_write_reg() - Write register
+ * @imx334: pointer to imx334 device
+ * @reg: register address
+ * @len: length of bytes. Max supported bytes is 4
+ * @val: register value
+ *
+ * Big endian register addresses with little endian values.
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_write_reg(struct imx334 *imx334, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
+ u8 buf[6] = {0};
+
+ if (WARN_ON(len > 4))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_le32(val, buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * imx334_write_regs() - Write a list of registers
+ * @imx334: pointer to imx334 device
+ * @regs: list of registers to be written
+ * @len: length of registers array
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_write_regs(struct imx334 *imx334,
+ const struct imx334_reg *regs, u32 len)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < len; i++) {
+ ret = imx334_write_reg(imx334, regs[i].address, 1, regs[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * imx334_update_controls() - Update control ranges based on streaming mode
+ * @imx334: pointer to imx334 device
+ * @mode: pointer to imx334_mode sensor mode
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_update_controls(struct imx334 *imx334,
+ const struct imx334_mode *mode)
+{
+ int ret;
+
+ ret = __v4l2_ctrl_s_ctrl(imx334->link_freq_ctrl, mode->link_freq_idx);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(imx334->hblank_ctrl, mode->hblank);
+ if (ret)
+ return ret;
+
+ return __v4l2_ctrl_modify_range(imx334->vblank_ctrl, mode->vblank_min,
+ mode->vblank_max, 1, mode->vblank);
+}
+
+/**
+ * imx334_update_exp_gain() - Set updated exposure and gain
+ * @imx334: pointer to imx334 device
+ * @exposure: updated exposure value
+ * @gain: updated analog gain value
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_update_exp_gain(struct imx334 *imx334, u32 exposure, u32 gain)
+{
+ u32 lpfr, shutter;
+ int ret;
+
+ lpfr = imx334->vblank + imx334->cur_mode->height;
+ shutter = lpfr - exposure;
+
+ dev_dbg(imx334->dev, "Set long exp %u analog gain %u sh0 %u lpfr %u",
+ exposure, gain, shutter, lpfr);
+
+ ret = imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 1);
+ if (ret)
+ return ret;
+
+ ret = imx334_write_reg(imx334, IMX334_REG_LPFR, 3, lpfr);
+ if (ret)
+ goto error_release_group_hold;
+
+ ret = imx334_write_reg(imx334, IMX334_REG_SHUTTER, 3, shutter);
+ if (ret)
+ goto error_release_group_hold;
+
+ ret = imx334_write_reg(imx334, IMX334_REG_AGAIN, 1, gain);
+
+error_release_group_hold:
+ imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 0);
+
+ return ret;
+}
+
+/**
+ * imx334_set_ctrl() - Set subdevice control
+ * @ctrl: pointer to v4l2_ctrl structure
+ *
+ * Supported controls:
+ * - V4L2_CID_VBLANK
+ * - cluster controls:
+ * - V4L2_CID_ANALOGUE_GAIN
+ * - V4L2_CID_EXPOSURE
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx334 *imx334 =
+ container_of(ctrl->handler, struct imx334, ctrl_handler);
+ u32 analog_gain;
+ u32 exposure;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ imx334->vblank = imx334->vblank_ctrl->val;
+
+ dev_dbg(imx334->dev, "Received vblank %u, new lpfr %u",
+ imx334->vblank,
+ imx334->vblank + imx334->cur_mode->height);
+
+ ret = __v4l2_ctrl_modify_range(imx334->exp_ctrl,
+ IMX334_EXPOSURE_MIN,
+ imx334->vblank +
+ imx334->cur_mode->height -
+ IMX334_EXPOSURE_OFFSET,
+ 1, IMX334_EXPOSURE_DEFAULT);
+ break;
+ case V4L2_CID_EXPOSURE:
+
+ /* Set controls only if sensor is in power on state */
+ if (!pm_runtime_get_if_in_use(imx334->dev))
+ return 0;
+
+ exposure = ctrl->val;
+ analog_gain = imx334->again_ctrl->val;
+
+ dev_dbg(imx334->dev, "Received exp %u analog gain %u",
+ exposure, analog_gain);
+
+ ret = imx334_update_exp_gain(imx334, exposure, analog_gain);
+
+ pm_runtime_put(imx334->dev);
+
+ break;
+ default:
+ dev_err(imx334->dev, "Invalid control %d", ctrl->id);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* V4l2 subdevice control ops*/
+static const struct v4l2_ctrl_ops imx334_ctrl_ops = {
+ .s_ctrl = imx334_set_ctrl,
+};
+
+/**
+ * imx334_enum_mbus_code() - Enumerate V4L2 sub-device mbus codes
+ * @sd: pointer to imx334 V4L2 sub-device structure
+ * @cfg: V4L2 sub-device pad configuration
+ * @code: V4L2 sub-device code enumeration need to be filled
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = supported_mode.code;
+
+ return 0;
+}
+
+/**
+ * imx334_enum_frame_size() - Enumerate V4L2 sub-device frame sizes
+ * @sd: pointer to imx334 V4L2 sub-device structure
+ * @cfg: V4L2 sub-device pad configuration
+ * @fsize: V4L2 sub-device size enumeration need to be filled
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fsize)
+{
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ if (fsize->code != supported_mode.code)
+ return -EINVAL;
+
+ fsize->min_width = supported_mode.width;
+ fsize->max_width = fsize->min_width;
+ fsize->min_height = supported_mode.height;
+ fsize->max_height = fsize->min_height;
+
+ return 0;
+}
+
+/**
+ * imx334_fill_pad_format() - Fill subdevice pad format
+ * from selected sensor mode
+ * @imx334: pointer to imx334 device
+ * @mode: pointer to imx334_mode sensor mode
+ * @fmt: V4L2 sub-device format need to be filled
+ */
+static void imx334_fill_pad_format(struct imx334 *imx334,
+ const struct imx334_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = mode->code;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ fmt->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->format.quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->format.xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+/**
+ * imx334_get_pad_format() - Get subdevice pad format
+ * @sd: pointer to imx334 V4L2 sub-device structure
+ * @cfg: V4L2 sub-device pad configuration
+ * @fmt: V4L2 sub-device format need to be set
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx334 *imx334 = to_imx334(sd);
+
+ mutex_lock(&imx334->mutex);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *framefmt;
+
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ imx334_fill_pad_format(imx334, imx334->cur_mode, fmt);
+ }
+
+ mutex_unlock(&imx334->mutex);
+
+ return 0;
+}
+
+/**
+ * imx334_set_pad_format() - Set subdevice pad format
+ * @sd: pointer to imx334 V4L2 sub-device structure
+ * @cfg: V4L2 sub-device pad configuration
+ * @fmt: V4L2 sub-device format need to be set
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx334 *imx334 = to_imx334(sd);
+ const struct imx334_mode *mode;
+ int ret = 0;
+
+ mutex_lock(&imx334->mutex);
+
+ mode = &supported_mode;
+ imx334_fill_pad_format(imx334, mode, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *framefmt;
+
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ ret = imx334_update_controls(imx334, mode);
+ if (!ret)
+ imx334->cur_mode = mode;
+ }
+
+ mutex_unlock(&imx334->mutex);
+
+ return ret;
+}
+
+/**
+ * imx334_init_pad_cfg() - Initialize sub-device pad configuration
+ * @sd: pointer to imx334 V4L2 sub-device structure
+ * @cfg: V4L2 sub-device pad configuration
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_init_pad_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct imx334 *imx334 = to_imx334(sd);
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ imx334_fill_pad_format(imx334, &supported_mode, &fmt);
+
+ return imx334_set_pad_format(sd, cfg, &fmt);
+}
+
+/**
+ * imx334_start_streaming() - Start sensor stream
+ * @imx334: pointer to imx334 device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_start_streaming(struct imx334 *imx334)
+{
+ const struct imx334_reg_list *reg_list;
+ int ret;
+
+ /* Write sensor mode registers */
+ reg_list = &imx334->cur_mode->reg_list;
+ ret = imx334_write_regs(imx334, reg_list->regs,
+ reg_list->num_of_regs);
+ if (ret) {
+ dev_err(imx334->dev, "fail to write initial registers");
+ return ret;
+ }
+
+ /* Setup handler will write actual exposure and gain */
+ ret = __v4l2_ctrl_handler_setup(imx334->sd.ctrl_handler);
+ if (ret) {
+ dev_err(imx334->dev, "fail to setup handler");
+ return ret;
+ }
+
+ /* Start streaming */
+ ret = imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
+ 1, IMX334_MODE_STREAMING);
+ if (ret) {
+ dev_err(imx334->dev, "fail to start streaming");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * imx334_stop_streaming() - Stop sensor stream
+ * @imx334: pointer to imx334 device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_stop_streaming(struct imx334 *imx334)
+{
+ return imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
+ 1, IMX334_MODE_STANDBY);
+}
+
+/**
+ * imx334_set_stream() - Enable sensor streaming
+ * @sd: pointer to imx334 subdevice
+ * @enable: set to enable sensor streaming
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx334 *imx334 = to_imx334(sd);
+ int ret;
+
+ mutex_lock(&imx334->mutex);
+
+ if (imx334->streaming == enable) {
+ mutex_unlock(&imx334->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx334->dev);
+ if (ret)
+ goto error_power_off;
+
+ ret = imx334_start_streaming(imx334);
+ if (ret)
+ goto error_power_off;
+ } else {
+ imx334_stop_streaming(imx334);
+ pm_runtime_put(imx334->dev);
+ }
+
+ imx334->streaming = enable;
+
+ mutex_unlock(&imx334->mutex);
+
+ return 0;
+
+error_power_off:
+ pm_runtime_put(imx334->dev);
+ mutex_unlock(&imx334->mutex);
+
+ return ret;
+}
+
+/**
+ * imx334_detect() - Detect imx334 sensor
+ * @imx334: pointer to imx334 device
+ *
+ * Return: 0 if successful, -EIO if sensor id does not match
+ */
+static int imx334_detect(struct imx334 *imx334)
+{
+ int ret;
+ u32 val;
+
+ ret = imx334_read_reg(imx334, IMX334_REG_ID, 2, &val);
+ if (ret)
+ return ret;
+
+ if (val != IMX334_ID) {
+ dev_err(imx334->dev, "chip id mismatch: %x!=%x",
+ IMX334_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/**
+ * imx334_parse_hw_config() - Parse HW configuration and check if supported
+ * @imx334: pointer to imx334 device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_parse_hw_config(struct imx334 *imx334)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(imx334->dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ unsigned long rate;
+ int ret;
+ int i;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ /* Request optional reset pin */
+ imx334->reset_gpio = devm_gpiod_get_optional(imx334->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(imx334->reset_gpio)) {
+ dev_err(imx334->dev, "failed to get reset gpio %ld",
+ PTR_ERR(imx334->reset_gpio));
+ return PTR_ERR(imx334->reset_gpio);
+ }
+
+ /* Get sensor input clock */
+ imx334->inclk = devm_clk_get(imx334->dev, NULL);
+ if (IS_ERR(imx334->inclk)) {
+ dev_err(imx334->dev, "could not get inclk");
+ return PTR_ERR(imx334->inclk);
+ }
+
+ rate = clk_get_rate(imx334->inclk);
+ if (rate != IMX334_INCLK_RATE) {
+ dev_err(imx334->dev, "inclk frequency mismatch");
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX334_NUM_DATA_LANES) {
+ dev_err(imx334->dev,
+ "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto done_endpoint_free;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(imx334->dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto done_endpoint_free;
+ }
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.link_frequencies[i] == IMX334_LINK_FREQ)
+ goto done_endpoint_free;
+
+ ret = -EINVAL;
+
+done_endpoint_free:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+/* V4l2 subdevice ops */
+static const struct v4l2_subdev_video_ops imx334_video_ops = {
+ .s_stream = imx334_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
+ .init_cfg = imx334_init_pad_cfg,
+ .enum_mbus_code = imx334_enum_mbus_code,
+ .enum_frame_size = imx334_enum_frame_size,
+ .get_fmt = imx334_get_pad_format,
+ .set_fmt = imx334_set_pad_format,
+};
+
+static const struct v4l2_subdev_ops imx334_subdev_ops = {
+ .video = &imx334_video_ops,
+ .pad = &imx334_pad_ops,
+};
+
+/**
+ * imx334_power_on() - Sensor power on sequence
+ * @dev: pointer to i2c device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx334 *imx334 = to_imx334(sd);
+ int ret;
+
+ gpiod_set_value_cansleep(imx334->reset_gpio, 1);
+
+ ret = clk_prepare_enable(imx334->inclk);
+ if (ret) {
+ dev_err(imx334->dev, "fail to enable inclk");
+ goto error_reset;
+ }
+
+ usleep_range(18000, 20000);
+
+ return 0;
+
+error_reset:
+ gpiod_set_value_cansleep(imx334->reset_gpio, 0);
+
+ return ret;
+}
+
+/**
+ * imx334_power_off() - Sensor power off sequence
+ * @dev: pointer to i2c device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx334 *imx334 = to_imx334(sd);
+
+ gpiod_set_value_cansleep(imx334->reset_gpio, 0);
+
+ clk_disable_unprepare(imx334->inclk);
+
+ return 0;
+}
+
+/**
+ * imx334_init_controls() - Initialize sensor subdevice controls
+ * @imx334: pointer to imx334 device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_init_controls(struct imx334 *imx334)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &imx334->ctrl_handler;
+ const struct imx334_mode *mode = imx334->cur_mode;
+ u32 lpfr;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 6);
+ if (ret)
+ return ret;
+
+ /* Serialize controls with sensor device */
+ ctrl_hdlr->lock = &imx334->mutex;
+
+ /* Initialize exposure and gain */
+ lpfr = mode->vblank + mode->height;
+ imx334->exp_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX334_EXPOSURE_MIN,
+ lpfr - IMX334_EXPOSURE_OFFSET,
+ IMX334_EXPOSURE_STEP,
+ IMX334_EXPOSURE_DEFAULT);
+
+ imx334->again_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ IMX334_AGAIN_MIN,
+ IMX334_AGAIN_MAX,
+ IMX334_AGAIN_STEP,
+ IMX334_AGAIN_DEFAULT);
+
+ v4l2_ctrl_cluster(2, &imx334->exp_ctrl);
+
+ imx334->vblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_VBLANK,
+ mode->vblank_min,
+ mode->vblank_max,
+ 1, mode->vblank);
+
+ /* Read only controls */
+ imx334->pclk_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ mode->pclk, mode->pclk,
+ 1, mode->pclk);
+
+ imx334->link_freq_ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq) -
+ 1,
+ mode->link_freq_idx,
+ link_freq);
+ if (imx334->link_freq_ctrl)
+ imx334->link_freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ imx334->hblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx334_ctrl_ops,
+ V4L2_CID_HBLANK,
+ IMX334_REG_MIN,
+ IMX334_REG_MAX,
+ 1, mode->hblank);
+ if (imx334->hblank_ctrl)
+ imx334->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (ctrl_hdlr->error) {
+ dev_err(imx334->dev, "control init failed: %d",
+ ctrl_hdlr->error);
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ return ctrl_hdlr->error;
+ }
+
+ imx334->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+/**
+ * imx334_probe() - I2C client device binding
+ * @client: pointer to i2c client device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_probe(struct i2c_client *client)
+{
+ struct imx334 *imx334;
+ int ret;
+
+ imx334 = devm_kzalloc(&client->dev, sizeof(*imx334), GFP_KERNEL);
+ if (!imx334)
+ return -ENOMEM;
+
+ imx334->dev = &client->dev;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&imx334->sd, client, &imx334_subdev_ops);
+
+ ret = imx334_parse_hw_config(imx334);
+ if (ret) {
+ dev_err(imx334->dev, "HW configuration is not supported");
+ return ret;
+ }
+
+ mutex_init(&imx334->mutex);
+
+ ret = imx334_power_on(imx334->dev);
+ if (ret) {
+ dev_err(imx334->dev, "failed to power-on the sensor");
+ goto error_mutex_destroy;
+ }
+
+ /* Check module identity */
+ ret = imx334_detect(imx334);
+ if (ret) {
+ dev_err(imx334->dev, "failed to find sensor: %d", ret);
+ goto error_power_off;
+ }
+
+ /* Set default mode to max resolution */
+ imx334->cur_mode = &supported_mode;
+ imx334->vblank = imx334->cur_mode->vblank;
+
+ ret = imx334_init_controls(imx334);
+ if (ret) {
+ dev_err(imx334->dev, "failed to init controls: %d", ret);
+ goto error_power_off;
+ }
+
+ /* Initialize subdev */
+ imx334->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx334->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ imx334->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx334->sd.entity, 1, &imx334->pad);
+ if (ret) {
+ dev_err(imx334->dev, "failed to init entity pads: %d", ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx334->sd);
+ if (ret < 0) {
+ dev_err(imx334->dev,
+ "failed to register async subdev: %d", ret);
+ goto error_media_entity;
+ }
+
+ pm_runtime_set_active(imx334->dev);
+ pm_runtime_enable(imx334->dev);
+ pm_runtime_idle(imx334->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&imx334->sd.entity);
+error_handler_free:
+ v4l2_ctrl_handler_free(imx334->sd.ctrl_handler);
+error_power_off:
+ imx334_power_off(imx334->dev);
+error_mutex_destroy:
+ mutex_destroy(&imx334->mutex);
+
+ return ret;
+}
+
+/**
+ * imx334_remove() - I2C client device unbinding
+ * @client: pointer to I2C client device
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx334_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx334 *imx334 = to_imx334(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_suspended(&client->dev);
+
+ mutex_destroy(&imx334->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx334_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx334_power_off, imx334_power_on, NULL)
+};
+
+static const struct of_device_id imx334_of_match[] = {
+ { .compatible = "sony,imx334" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, imx334_of_match);
+
+static struct i2c_driver imx334_driver = {
+ .probe_new = imx334_probe,
+ .remove = imx334_remove,
+ .driver = {
+ .name = "imx334",
+ .pm = &imx334_pm_ops,
+ .of_match_table = imx334_of_match,
+ },
+};
+
+module_i2c_driver(imx334_driver);
+
+MODULE_DESCRIPTION("Sony imx334 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/max9271.c b/drivers/media/i2c/max9271.c
index c247db569bab..c495582dcff6 100644
--- a/drivers/media/i2c/max9271.c
+++ b/drivers/media/i2c/max9271.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include "max9271.h"
@@ -339,3 +340,7 @@ int max9271_set_translation(struct max9271_device *dev, u8 source, u8 dest)
return 0;
}
EXPORT_SYMBOL_GPL(max9271_set_translation);
+
+MODULE_DESCRIPTION("Maxim MAX9271 GMSL Serializer");
+MODULE_AUTHOR("Jacopo Mondi");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index c82c1493e099..6fd4d59fcc72 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -163,6 +163,8 @@ struct max9286_priv {
unsigned int mux_channel;
bool mux_open;
+ u32 reverse_channel_mv;
+
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixelrate;
@@ -336,6 +338,31 @@ static void max9286_configure_i2c(struct max9286_priv *priv, bool localack)
usleep_range(3000, 5000);
}
+static void max9286_reverse_channel_setup(struct max9286_priv *priv,
+ unsigned int chan_amplitude)
+{
+ /* Reverse channel transmission time: default to 1. */
+ u8 chan_config = MAX9286_REV_TRF(1);
+
+ /*
+ * Reverse channel setup.
+ *
+ * - Enable custom reverse channel configuration (through register 0x3f)
+ * and set the first pulse length to 35 clock cycles.
+ * - Adjust reverse channel amplitude: values > 130 are programmed
+ * using the additional +100mV REV_AMP_X boost flag
+ */
+ max9286_write(priv, 0x3f, MAX9286_EN_REV_CFG | MAX9286_REV_FLEN(35));
+
+ if (chan_amplitude > 100) {
+ /* It is not possible to express values (100 < x < 130) */
+ chan_amplitude = max(30U, chan_amplitude - 100);
+ chan_config |= MAX9286_REV_AMP_X;
+ }
+ max9286_write(priv, 0x3b, chan_config | MAX9286_REV_AMP(chan_amplitude));
+ usleep_range(2000, 2500);
+}
+
/*
* max9286_check_video_links() - Make sure video links are detected and locked
*
@@ -531,10 +558,14 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
* All enabled sources have probed and enabled their reverse control
* channels:
*
+ * - Increase the reverse channel amplitude to compensate for the
+ * remote ends high threshold, if not done already
* - Verify all configuration links are properly detected
* - Disable auto-ack as communication on the control channel are now
* stable.
*/
+ if (priv->reverse_channel_mv < 170)
+ max9286_reverse_channel_setup(priv, 170);
max9286_check_config_link(priv, priv->source_mask);
/*
@@ -576,19 +607,19 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
- struct v4l2_async_subdev *asd;
+ struct max9286_asd *mas;
- asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+ mas = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
source->fwnode,
- sizeof(*asd));
- if (IS_ERR(asd)) {
+ struct max9286_asd);
+ if (IS_ERR(mas)) {
dev_err(dev, "Failed to add subdev for source %u: %ld",
- i, PTR_ERR(asd));
+ i, PTR_ERR(mas));
v4l2_async_notifier_cleanup(&priv->notifier);
- return PTR_ERR(asd);
+ return PTR_ERR(mas);
}
- to_max9286_asd(asd)->source = source;
+ mas->source = source;
}
priv->notifier.ops = &max9286_notify_ops;
@@ -941,19 +972,7 @@ static int max9286_setup(struct max9286_priv *priv)
* only. This should be disabled after the mux is initialised.
*/
max9286_configure_i2c(priv, true);
-
- /*
- * Reverse channel setup.
- *
- * - Enable custom reverse channel configuration (through register 0x3f)
- * and set the first pulse length to 35 clock cycles.
- * - Increase the reverse channel amplitude to 170mV to accommodate the
- * high threshold enabled by the serializer driver.
- */
- max9286_write(priv, 0x3f, MAX9286_EN_REV_CFG | MAX9286_REV_FLEN(35));
- max9286_write(priv, 0x3b, MAX9286_REV_TRF(1) | MAX9286_REV_AMP(70) |
- MAX9286_REV_AMP_X);
- usleep_range(2000, 2500);
+ max9286_reverse_channel_setup(priv, priv->reverse_channel_mv);
/*
* Enable GMSL links, mask unused ones and autodetect link
@@ -1117,6 +1136,7 @@ static int max9286_parse_dt(struct max9286_priv *priv)
struct device_node *i2c_mux;
struct device_node *node = NULL;
unsigned int i2c_mux_mask = 0;
+ u32 reverse_channel_microvolt;
/* Balance the of_node_put() performed by of_find_node_by_name(). */
of_node_get(dev->of_node);
@@ -1207,6 +1227,20 @@ static int max9286_parse_dt(struct max9286_priv *priv)
}
of_node_put(node);
+ /*
+ * Parse the initial value of the reverse channel amplitude from
+ * the firmware interface and convert it to millivolts.
+ *
+ * Default it to 170mV for backward compatibility with DTBs that do not
+ * provide the property.
+ */
+ if (of_property_read_u32(dev->of_node,
+ "maxim,reverse-channel-microvolt",
+ &reverse_channel_microvolt))
+ priv->reverse_channel_mv = 170;
+ else
+ priv->reverse_channel_mv = reverse_channel_microvolt / 1000U;
+
priv->route_mask = priv->source_mask;
return 0;
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 69697386ffcd..0e11734f75aa 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
*/
+#include <linux/clk.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -16,7 +17,6 @@
#include <linux/property.h>
#include <media/v4l2-async.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -232,7 +232,7 @@ struct mt9m111 {
struct v4l2_ctrl *gain;
struct mt9m111_context *ctx;
struct v4l2_rect rect; /* cropping rectangle */
- struct v4l2_clk *clk;
+ struct clk *clk;
unsigned int width; /* output */
unsigned int height; /* sizes */
struct v4l2_fract frame_interval;
@@ -977,7 +977,7 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
- ret = v4l2_clk_enable(mt9m111->clk);
+ ret = clk_prepare_enable(mt9m111->clk);
if (ret < 0)
return ret;
@@ -995,7 +995,7 @@ out_regulator_disable:
regulator_disable(mt9m111->regulator);
out_clk_disable:
- v4l2_clk_disable(mt9m111->clk);
+ clk_disable_unprepare(mt9m111->clk);
dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
@@ -1006,7 +1006,7 @@ static void mt9m111_power_off(struct mt9m111 *mt9m111)
{
mt9m111_suspend(mt9m111);
regulator_disable(mt9m111->regulator);
- v4l2_clk_disable(mt9m111->clk);
+ clk_disable_unprepare(mt9m111->clk);
}
static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -1266,7 +1266,7 @@ static int mt9m111_probe(struct i2c_client *client)
return ret;
}
- mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
+ mt9m111->clk = devm_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
return PTR_ERR(mt9m111->clk);
@@ -1311,7 +1311,7 @@ static int mt9m111_probe(struct i2c_client *client)
mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
if (mt9m111->hdl.error) {
ret = mt9m111->hdl.error;
- goto out_clkput;
+ return ret;
}
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -1354,8 +1354,6 @@ out_entityclean:
out_hdlfree:
#endif
v4l2_ctrl_handler_free(&mt9m111->hdl);
-out_clkput:
- v4l2_clk_put(mt9m111->clk);
return ret;
}
@@ -1366,7 +1364,6 @@ static int mt9m111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&mt9m111->subdev);
media_entity_cleanup(&mt9m111->subdev.entity);
- v4l2_clk_put(mt9m111->clk);
v4l2_ctrl_handler_free(&mt9m111->hdl);
return 0;
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 61ae6a0d5679..97c7527b74ed 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1253,12 +1253,6 @@ static int mt9v111_remove(struct i2c_client *client)
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
- devm_gpiod_put(mt9v111->dev, mt9v111->oe);
- devm_gpiod_put(mt9v111->dev, mt9v111->standby);
- devm_gpiod_put(mt9v111->dev, mt9v111->reset);
-
- devm_clk_put(mt9v111->dev, mt9v111->clk);
-
return 0;
}
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 8683ffd3287a..60b4bc645334 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -388,7 +388,7 @@ static int ov02a10_check_sensor_id(struct ov02a10 *ov02a10)
if (ret < 0)
return ret;
- chip_id = le16_to_cpu(ret);
+ chip_id = le16_to_cpu((__force __le16)ret);
if ((chip_id & OV02A10_ID_MASK) != OV02A10_ID) {
dev_err(&client->dev, "unexpected sensor id(0x%04x)\n", chip_id);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index e7d2e5b4ad4b..1cefa15729ce 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A V4L2 driver for OmniVision OV5647 cameras.
*
@@ -8,119 +9,316 @@
* Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
*
* Copyright (C) 2016, Synopsys, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-mediabus.h>
-#define SENSOR_NAME "ov5647"
+/*
+ * From the datasheet, "20ms after PWDN goes low or 20ms after RESETB goes
+ * high if reset is inserted after PWDN goes high, host can access sensor's
+ * SCCB to initialize sensor."
+ */
+#define PWDN_ACTIVE_DELAY_MS 20
#define MIPI_CTRL00_CLOCK_LANE_GATE BIT(5)
+#define MIPI_CTRL00_LINE_SYNC_ENABLE BIT(4)
#define MIPI_CTRL00_BUS_IDLE BIT(2)
#define MIPI_CTRL00_CLOCK_LANE_DISABLE BIT(0)
#define OV5647_SW_STANDBY 0x0100
#define OV5647_SW_RESET 0x0103
-#define OV5647_REG_CHIPID_H 0x300A
-#define OV5647_REG_CHIPID_L 0x300B
-#define OV5640_REG_PAD_OUT 0x300D
+#define OV5647_REG_CHIPID_H 0x300a
+#define OV5647_REG_CHIPID_L 0x300b
+#define OV5640_REG_PAD_OUT 0x300d
+#define OV5647_REG_EXP_HI 0x3500
+#define OV5647_REG_EXP_MID 0x3501
+#define OV5647_REG_EXP_LO 0x3502
+#define OV5647_REG_AEC_AGC 0x3503
+#define OV5647_REG_GAIN_HI 0x350a
+#define OV5647_REG_GAIN_LO 0x350b
+#define OV5647_REG_VTS_HI 0x380e
+#define OV5647_REG_VTS_LO 0x380f
#define OV5647_REG_FRAME_OFF_NUMBER 0x4202
#define OV5647_REG_MIPI_CTRL00 0x4800
#define OV5647_REG_MIPI_CTRL14 0x4814
+#define OV5647_REG_AWB 0x5001
#define REG_TERM 0xfffe
#define VAL_TERM 0xfe
#define REG_DLY 0xffff
-#define OV5647_ROW_START 0x01
-#define OV5647_ROW_START_MIN 0
-#define OV5647_ROW_START_MAX 2004
-#define OV5647_ROW_START_DEF 54
+/* OV5647 native and active pixel array size */
+#define OV5647_NATIVE_WIDTH 2624U
+#define OV5647_NATIVE_HEIGHT 1956U
-#define OV5647_COLUMN_START 0x02
-#define OV5647_COLUMN_START_MIN 0
-#define OV5647_COLUMN_START_MAX 2750
-#define OV5647_COLUMN_START_DEF 16
+#define OV5647_PIXEL_ARRAY_LEFT 16U
+#define OV5647_PIXEL_ARRAY_TOP 16U
+#define OV5647_PIXEL_ARRAY_WIDTH 2592U
+#define OV5647_PIXEL_ARRAY_HEIGHT 1944U
-#define OV5647_WINDOW_HEIGHT 0x03
-#define OV5647_WINDOW_HEIGHT_MIN 2
-#define OV5647_WINDOW_HEIGHT_MAX 2006
-#define OV5647_WINDOW_HEIGHT_DEF 1944
+#define OV5647_VBLANK_MIN 4
+#define OV5647_VTS_MAX 32767
-#define OV5647_WINDOW_WIDTH 0x04
-#define OV5647_WINDOW_WIDTH_MIN 2
-#define OV5647_WINDOW_WIDTH_MAX 2752
-#define OV5647_WINDOW_WIDTH_DEF 2592
+#define OV5647_EXPOSURE_MIN 4
+#define OV5647_EXPOSURE_STEP 1
+#define OV5647_EXPOSURE_DEFAULT 1000
+#define OV5647_EXPOSURE_MAX 65535
struct regval_list {
u16 addr;
u8 data;
};
+struct ov5647_mode {
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+ u64 pixel_rate;
+ int hts;
+ int vts;
+ const struct regval_list *reg_list;
+ unsigned int num_regs;
+};
+
struct ov5647 {
struct v4l2_subdev sd;
struct media_pad pad;
struct mutex lock;
- struct v4l2_mbus_framefmt format;
- unsigned int width;
- unsigned int height;
- int power_count;
struct clk *xclk;
+ struct gpio_desc *pwdn;
+ bool clock_ncont;
+ struct v4l2_ctrl_handler ctrls;
+ const struct ov5647_mode *mode;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *exposure;
+ bool streaming;
};
-static inline struct ov5647 *to_state(struct v4l2_subdev *sd)
+static inline struct ov5647 *to_sensor(struct v4l2_subdev *sd)
{
return container_of(sd, struct ov5647, sd);
}
-static struct regval_list sensor_oe_disable_regs[] = {
+static const struct regval_list sensor_oe_disable_regs[] = {
{0x3000, 0x00},
{0x3001, 0x00},
{0x3002, 0x00},
};
-static struct regval_list sensor_oe_enable_regs[] = {
+static const struct regval_list sensor_oe_enable_regs[] = {
{0x3000, 0x0f},
{0x3001, 0xff},
{0x3002, 0xe4},
};
-static struct regval_list ov5647_640x480[] = {
+static struct regval_list ov5647_2592x1944_10bpp[] = {
{0x0100, 0x00},
{0x0103, 0x01},
- {0x3034, 0x08},
+ {0x3034, 0x1a},
{0x3035, 0x21},
- {0x3036, 0x46},
+ {0x3036, 0x69},
+ {0x303c, 0x11},
+ {0x3106, 0xf5},
+ {0x3821, 0x06},
+ {0x3820, 0x00},
+ {0x3827, 0xec},
+ {0x370c, 0x03},
+ {0x3612, 0x5b},
+ {0x3618, 0x04},
+ {0x5000, 0x06},
+ {0x5002, 0x41},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3016, 0x08},
+ {0x3017, 0xe0},
+ {0x3018, 0x44},
+ {0x301c, 0xf8},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x0b},
+ {0x380d, 0x1c},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3708, 0x64},
+ {0x3709, 0x12},
+ {0x3808, 0x0a},
+ {0x3809, 0x20},
+ {0x380a, 0x07},
+ {0x380b, 0x98},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3811, 0x10},
+ {0x3813, 0x06},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x28},
+ {0x3a0a, 0x00},
+ {0x3a0b, 0xf6},
+ {0x3a0d, 0x08},
+ {0x3a0e, 0x06},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x19},
+ {0x4800, 0x24},
+ {0x3503, 0x03},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_1080p30_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3034, 0x1a},
+ {0x3035, 0x21},
+ {0x3036, 0x62},
+ {0x303c, 0x11},
+ {0x3106, 0xf5},
+ {0x3821, 0x06},
+ {0x3820, 0x00},
+ {0x3827, 0xec},
+ {0x370c, 0x03},
+ {0x3612, 0x5b},
+ {0x3618, 0x04},
+ {0x5000, 0x06},
+ {0x5002, 0x41},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3016, 0x08},
+ {0x3017, 0xe0},
+ {0x3018, 0x44},
+ {0x301c, 0xf8},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x09},
+ {0x380d, 0x70},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3708, 0x64},
+ {0x3709, 0x12},
+ {0x3808, 0x07},
+ {0x3809, 0x80},
+ {0x380a, 0x04},
+ {0x380b, 0x38},
+ {0x3800, 0x01},
+ {0x3801, 0x5c},
+ {0x3802, 0x01},
+ {0x3803, 0xb2},
+ {0x3804, 0x08},
+ {0x3805, 0xe3},
+ {0x3806, 0x05},
+ {0x3807, 0xf1},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x4b},
+ {0x3a0a, 0x01},
+ {0x3a0b, 0x13},
+ {0x3a0d, 0x04},
+ {0x3a0e, 0x03},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x19},
+ {0x4800, 0x34},
+ {0x3503, 0x03},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_2x2binned_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3034, 0x1a},
+ {0x3035, 0x21},
+ {0x3036, 0x62},
{0x303c, 0x11},
{0x3106, 0xf5},
- {0x3821, 0x07},
- {0x3820, 0x41},
{0x3827, 0xec},
- {0x370c, 0x0f},
+ {0x370c, 0x03},
{0x3612, 0x59},
{0x3618, 0x00},
{0x5000, 0x06},
- {0x5001, 0x01},
{0x5002, 0x41},
{0x5003, 0x08},
{0x5a00, 0x08},
@@ -136,32 +334,115 @@ static struct regval_list ov5647_640x480[] = {
{0x3a19, 0xf8},
{0x3c01, 0x80},
{0x3b07, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0xcc},
{0x380c, 0x07},
{0x380d, 0x68},
- {0x380e, 0x03},
- {0x380f, 0xd8},
+ {0x3811, 0x0c},
+ {0x3813, 0x06},
{0x3814, 0x31},
{0x3815, 0x31},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x28},
+ {0x3a0a, 0x00},
+ {0x3a0b, 0xf6},
+ {0x3a0d, 0x08},
+ {0x3a0e, 0x06},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x16},
+ {0x4800, 0x24},
+ {0x3503, 0x03},
+ {0x3820, 0x41},
+ {0x3821, 0x07},
+ {0x350a, 0x00},
+ {0x350b, 0x10},
+ {0x3500, 0x00},
+ {0x3501, 0x1a},
+ {0x3502, 0xf0},
+ {0x3212, 0xa0},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_640x480_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3035, 0x11},
+ {0x3036, 0x46},
+ {0x303c, 0x11},
+ {0x3821, 0x07},
+ {0x3820, 0x41},
+ {0x370c, 0x03},
+ {0x3612, 0x59},
+ {0x3618, 0x00},
+ {0x5000, 0x06},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0xff},
+ {0x3001, 0xff},
+ {0x3002, 0xff},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x07},
+ {0x380d, 0x3c},
+ {0x3814, 0x35},
+ {0x3815, 0x35},
{0x3708, 0x64},
{0x3709, 0x52},
{0x3808, 0x02},
{0x3809, 0x80},
{0x380a, 0x01},
- {0x380b, 0xE0},
- {0x3801, 0x00},
+ {0x380b, 0xe0},
+ {0x3800, 0x00},
+ {0x3801, 0x10},
{0x3802, 0x00},
{0x3803, 0x00},
{0x3804, 0x0a},
- {0x3805, 0x3f},
+ {0x3805, 0x2f},
{0x3806, 0x07},
- {0x3807, 0xa1},
- {0x3811, 0x08},
- {0x3813, 0x02},
+ {0x3807, 0x9f},
{0x3630, 0x2e},
{0x3632, 0xe2},
{0x3633, 0x23},
{0x3634, 0x44},
- {0x3636, 0x06},
{0x3620, 0x64},
{0x3621, 0xe0},
{0x3600, 0x37},
@@ -176,11 +457,11 @@ static struct regval_list ov5647_640x480[] = {
{0x3f06, 0x10},
{0x3f01, 0x0a},
{0x3a08, 0x01},
- {0x3a09, 0x27},
+ {0x3a09, 0x2e},
{0x3a0a, 0x00},
- {0x3a0b, 0xf6},
- {0x3a0d, 0x04},
- {0x3a0e, 0x03},
+ {0x3a0b, 0xfb},
+ {0x3a0d, 0x02},
+ {0x3a0e, 0x01},
{0x3a0f, 0x58},
{0x3a10, 0x50},
{0x3a1b, 0x58},
@@ -190,31 +471,152 @@ static struct regval_list ov5647_640x480[] = {
{0x4001, 0x02},
{0x4004, 0x02},
{0x4000, 0x09},
- {0x4837, 0x24},
- {0x4050, 0x6e},
- {0x4051, 0x8f},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3017, 0xe0},
+ {0x301c, 0xfc},
+ {0x3636, 0x06},
+ {0x3016, 0x08},
+ {0x3827, 0xec},
+ {0x3018, 0x44},
+ {0x3035, 0x21},
+ {0x3106, 0xf5},
+ {0x3034, 0x1a},
+ {0x301c, 0xf8},
+ {0x4800, 0x34},
+ {0x3503, 0x03},
{0x0100, 0x01},
};
-static int ov5647_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+static const struct ov5647_mode ov5647_modes[] = {
+ /* 2592x1944 full resolution full FOV 10-bit mode. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 2592,
+ .height = 1944
+ },
+ .crop = {
+ .left = OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2592,
+ .height = 1944
+ },
+ .pixel_rate = 87500000,
+ .hts = 2844,
+ .vts = 0x7b0,
+ .reg_list = ov5647_2592x1944_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_2592x1944_10bpp)
+ },
+ /* 1080p30 10-bit mode. Full resolution centre-cropped down to 1080p. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 1920,
+ .height = 1080
+ },
+ .crop = {
+ .left = 348 + OV5647_PIXEL_ARRAY_LEFT,
+ .top = 434 + OV5647_PIXEL_ARRAY_TOP,
+ .width = 1928,
+ .height = 1080,
+ },
+ .pixel_rate = 81666700,
+ .hts = 2416,
+ .vts = 0x450,
+ .reg_list = ov5647_1080p30_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_1080p30_10bpp)
+ },
+ /* 2x2 binned full FOV 10-bit mode. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 1296,
+ .height = 972
+ },
+ .crop = {
+ .left = OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2592,
+ .height = 1944,
+ },
+ .pixel_rate = 81666700,
+ .hts = 1896,
+ .vts = 0x59b,
+ .reg_list = ov5647_2x2binned_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_2x2binned_10bpp)
+ },
+ /* 10-bit VGA full FOV 60fps. 2x2 binned and subsampled down to VGA. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 640,
+ .height = 480
+ },
+ .crop = {
+ .left = 16 + OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2560,
+ .height = 1920,
+ },
+ .pixel_rate = 55000000,
+ .hts = 1852,
+ .vts = 0x1f8,
+ .reg_list = ov5647_640x480_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_640x480_10bpp)
+ },
+};
+
+/* Default sensor mode is 2x2 binned 640x480 SBGGR10_1X10. */
+#define OV5647_DEFAULT_MODE (&ov5647_modes[3])
+#define OV5647_DEFAULT_FORMAT (ov5647_modes[3].format)
+
+static int ov5647_write16(struct v4l2_subdev *sd, u16 reg, u16 val)
{
+ unsigned char data[4] = { reg >> 8, reg & 0xff, val >> 8, val & 0xff};
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
+
+ ret = i2c_master_send(client, data, 4);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5647_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
unsigned char data[3] = { reg >> 8, reg & 0xff, val};
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
ret = i2c_master_send(client, data, 3);
- if (ret < 0)
+ if (ret < 0) {
dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
__func__, reg);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ov5647_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
{
- int ret;
unsigned char data_w[2] = { reg >> 8, reg & 0xff };
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
ret = i2c_master_send(client, data_w, 2);
if (ret < 0) {
@@ -224,15 +626,17 @@ static int ov5647_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
}
ret = i2c_master_recv(client, val, 1);
- if (ret < 0)
+ if (ret < 0) {
dev_dbg(&client->dev, "%s: i2c read error, reg: %x\n",
__func__, reg);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ov5647_write_array(struct v4l2_subdev *sd,
- struct regval_list *regs, int array_size)
+ const struct regval_list *regs, int array_size)
{
int i, ret;
@@ -255,161 +659,174 @@ static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
return ret;
channel_id &= ~(3 << 6);
- return ov5647_write(sd, OV5647_REG_MIPI_CTRL14, channel_id | (channel << 6));
+
+ return ov5647_write(sd, OV5647_REG_MIPI_CTRL14,
+ channel_id | (channel << 6));
}
-static int ov5647_stream_on(struct v4l2_subdev *sd)
+static int ov5647_set_mode(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ u8 resetval, rdval;
int ret;
- ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_BUS_IDLE);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_write_array(sd, sensor->mode->reg_list,
+ sensor->mode->num_regs);
+ if (ret < 0) {
+ dev_err(&client->dev, "write sensor default regs error\n");
+ return ret;
+ }
+
+ ret = ov5647_set_virtual_channel(sd, 0);
if (ret < 0)
return ret;
- ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
if (ret < 0)
return ret;
- return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
+ if (!(resetval & 0x01)) {
+ dev_err(&client->dev, "Device was in SW standby");
+ ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
-static int ov5647_stream_off(struct v4l2_subdev *sd)
+static int ov5647_stream_on(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ u8 val = MIPI_CTRL00_BUS_IDLE;
int ret;
- ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_CLOCK_LANE_GATE
- | MIPI_CTRL00_BUS_IDLE | MIPI_CTRL00_CLOCK_LANE_DISABLE);
- if (ret < 0)
+ ret = ov5647_set_mode(sd);
+ if (ret) {
+ dev_err(&client->dev, "Failed to program sensor mode: %d\n", ret);
return ret;
+ }
- ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
- if (ret < 0)
+ /* Apply customized values from user when stream starts. */
+ ret = __v4l2_ctrl_handler_setup(sd->ctrl_handler);
+ if (ret)
return ret;
- return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
-}
-
-static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
-{
- int ret;
- u8 rdval;
+ if (sensor->clock_ncont)
+ val |= MIPI_CTRL00_CLOCK_LANE_GATE |
+ MIPI_CTRL00_LINE_SYNC_ENABLE;
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, val);
if (ret < 0)
return ret;
- if (standby)
- rdval &= ~0x01;
- else
- rdval |= 0x01;
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ if (ret < 0)
+ return ret;
- return ov5647_write(sd, OV5647_SW_STANDBY, rdval);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
}
-static int __sensor_init(struct v4l2_subdev *sd)
+static int ov5647_stream_off(struct v4l2_subdev *sd)
{
int ret;
- u8 resetval, rdval;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00,
+ MIPI_CTRL00_CLOCK_LANE_GATE | MIPI_CTRL00_BUS_IDLE |
+ MIPI_CTRL00_CLOCK_LANE_DISABLE);
if (ret < 0)
return ret;
- ret = ov5647_write_array(sd, ov5647_640x480,
- ARRAY_SIZE(ov5647_640x480));
- if (ret < 0) {
- dev_err(&client->dev, "write sensor default regs error\n");
- return ret;
- }
-
- ret = ov5647_set_virtual_channel(sd, 0);
- if (ret < 0)
- return ret;
-
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
if (ret < 0)
return ret;
- if (!(resetval & 0x01)) {
- dev_err(&client->dev, "Device was in SW standby");
- ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
- if (ret < 0)
- return ret;
- }
-
- /*
- * stream off to make the clock lane into LP-11 state.
- */
- return ov5647_stream_off(sd);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
}
-static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
+static int ov5647_power_on(struct device *dev)
{
- int ret = 0;
- struct ov5647 *ov5647 = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = dev_get_drvdata(dev);
+ int ret;
- mutex_lock(&ov5647->lock);
+ dev_dbg(dev, "OV5647 power on\n");
- if (on && !ov5647->power_count) {
- dev_dbg(&client->dev, "OV5647 power on\n");
+ if (sensor->pwdn) {
+ gpiod_set_value_cansleep(sensor->pwdn, 0);
+ msleep(PWDN_ACTIVE_DELAY_MS);
+ }
- ret = clk_prepare_enable(ov5647->xclk);
- if (ret < 0) {
- dev_err(&client->dev, "clk prepare enable failed\n");
- goto out;
- }
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret < 0) {
+ dev_err(dev, "clk prepare enable failed\n");
+ goto error_pwdn;
+ }
- ret = ov5647_write_array(sd, sensor_oe_enable_regs,
- ARRAY_SIZE(sensor_oe_enable_regs));
- if (ret < 0) {
- clk_disable_unprepare(ov5647->xclk);
- dev_err(&client->dev,
- "write sensor_oe_enable_regs error\n");
- goto out;
- }
+ ret = ov5647_write_array(&sensor->sd, sensor_oe_enable_regs,
+ ARRAY_SIZE(sensor_oe_enable_regs));
+ if (ret < 0) {
+ dev_err(dev, "write sensor_oe_enable_regs error\n");
+ goto error_clk_disable;
+ }
- ret = __sensor_init(sd);
- if (ret < 0) {
- clk_disable_unprepare(ov5647->xclk);
- dev_err(&client->dev,
- "Camera not available, check Power\n");
- goto out;
- }
- } else if (!on && ov5647->power_count == 1) {
- dev_dbg(&client->dev, "OV5647 power off\n");
+ /* Stream off to coax lanes into LP-11 state. */
+ ret = ov5647_stream_off(&sensor->sd);
+ if (ret < 0) {
+ dev_err(dev, "camera not available, check power\n");
+ goto error_clk_disable;
+ }
- ret = ov5647_write_array(sd, sensor_oe_disable_regs,
- ARRAY_SIZE(sensor_oe_disable_regs));
+ return 0;
- if (ret < 0)
- dev_dbg(&client->dev, "disable oe failed\n");
+error_clk_disable:
+ clk_disable_unprepare(sensor->xclk);
+error_pwdn:
+ gpiod_set_value_cansleep(sensor->pwdn, 1);
- ret = set_sw_standby(sd, true);
+ return ret;
+}
- if (ret < 0)
- dev_dbg(&client->dev, "soft stby failed\n");
+static int ov5647_power_off(struct device *dev)
+{
+ struct ov5647 *sensor = dev_get_drvdata(dev);
+ u8 rdval;
+ int ret;
- clk_disable_unprepare(ov5647->xclk);
- }
+ dev_dbg(dev, "OV5647 power off\n");
- /* Update the power count. */
- ov5647->power_count += on ? 1 : -1;
- WARN_ON(ov5647->power_count < 0);
+ ret = ov5647_write_array(&sensor->sd, sensor_oe_disable_regs,
+ ARRAY_SIZE(sensor_oe_disable_regs));
+ if (ret < 0)
+ dev_dbg(dev, "disable oe failed\n");
-out:
- mutex_unlock(&ov5647->lock);
+ /* Enter software standby */
+ ret = ov5647_read(&sensor->sd, OV5647_SW_STANDBY, &rdval);
+ if (ret < 0)
+ dev_dbg(dev, "software standby failed\n");
- return ret;
+ rdval &= ~0x01;
+ ret = ov5647_write(&sensor->sd, OV5647_SW_STANDBY, rdval);
+ if (ret < 0)
+ dev_dbg(dev, "software standby failed\n");
+
+ clk_disable_unprepare(sensor->xclk);
+ gpiod_set_value_cansleep(sensor->pwdn, 1);
+
+ return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov5647_sensor_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
+ struct v4l2_dbg_register *reg)
{
- u8 val;
int ret;
+ u8 val;
ret = ov5647_read(sd, reg->reg & 0xff, &val);
if (ret < 0)
@@ -422,29 +839,77 @@ static int ov5647_sensor_get_register(struct v4l2_subdev *sd,
}
static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
+ const struct v4l2_dbg_register *reg)
{
return ov5647_write(sd, reg->reg & 0xff, reg->val & 0xff);
}
#endif
-/*
- * Subdev core operations registration
- */
+/* Subdev core operations registration */
static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
- .s_power = ov5647_sensor_power,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov5647_sensor_get_register,
.s_register = ov5647_sensor_set_register,
#endif
};
+static const struct v4l2_rect *
+__ov5647_get_pad_crop(struct ov5647 *ov5647, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&ov5647->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5647->mode->crop;
+ }
+
+ return NULL;
+}
+
static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
{
- if (enable)
- return ov5647_stream_on(sd);
- else
- return ov5647_stream_off(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ int ret;
+
+ mutex_lock(&sensor->lock);
+ if (sensor->streaming == enable) {
+ mutex_unlock(&sensor->lock);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0)
+ goto error_unlock;
+
+ ret = ov5647_stream_on(sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "stream start failed: %d\n", ret);
+ goto error_unlock;
+ }
+ } else {
+ ret = ov5647_stream_off(sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "stream stop failed: %d\n", ret);
+ goto error_unlock;
+ }
+ pm_runtime_put(&client->dev);
+ }
+
+ sensor->streaming = enable;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+
+error_unlock:
+ pm_runtime_put(&client->dev);
+ mutex_unlock(&sensor->lock);
+
+ return ret;
}
static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
@@ -452,19 +917,150 @@ static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
};
static int ov5647_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov5647_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct v4l2_mbus_framefmt *fmt;
+
+ if (fse->code != MEDIA_BUS_FMT_SBGGR10_1X10 ||
+ fse->index >= ARRAY_SIZE(ov5647_modes))
+ return -EINVAL;
+
+ fmt = &ov5647_modes[fse->index].format;
+ fse->min_width = fmt->width;
+ fse->max_width = fmt->width;
+ fse->min_height = fmt->height;
+ fse->max_height = fmt->height;
+
+ return 0;
+}
+
+static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ const struct v4l2_mbus_framefmt *sensor_format;
+ struct ov5647 *sensor = to_sensor(sd);
+
+ mutex_lock(&sensor->lock);
+ switch (format->which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ sensor_format = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ break;
+ default:
+ sensor_format = &sensor->mode->format;
+ break;
+ }
+
+ *fmt = *sensor_format;
+ mutex_unlock(&sensor->lock);
return 0;
}
+static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5647 *sensor = to_sensor(sd);
+ const struct ov5647_mode *mode;
+
+ mode = v4l2_find_nearest_size(ov5647_modes, ARRAY_SIZE(ov5647_modes),
+ format.width, format.height,
+ fmt->width, fmt->height);
+
+ /* Update the sensor mode and apply at it at streamon time. */
+ mutex_lock(&sensor->lock);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, format->pad) = mode->format;
+ } else {
+ int exposure_max, exposure_def;
+ int hblank, vblank;
+
+ sensor->mode = mode;
+ __v4l2_ctrl_modify_range(sensor->pixel_rate, mode->pixel_rate,
+ mode->pixel_rate, 1, mode->pixel_rate);
+
+ hblank = mode->hts - mode->format.width;
+ __v4l2_ctrl_modify_range(sensor->hblank, hblank, hblank, 1,
+ hblank);
+
+ vblank = mode->vts - mode->format.height;
+ __v4l2_ctrl_modify_range(sensor->vblank, OV5647_VBLANK_MIN,
+ OV5647_VTS_MAX - mode->format.height,
+ 1, vblank);
+ __v4l2_ctrl_s_ctrl(sensor->vblank, vblank);
+
+ exposure_max = mode->vts - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ exposure_def);
+ }
+ *fmt = mode->format;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov5647_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ struct ov5647 *sensor = to_sensor(sd);
+
+ mutex_lock(&sensor->lock);
+ sel->r = *__ov5647_get_pad_crop(sensor, cfg, sel->pad,
+ sel->which);
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+ }
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV5647_NATIVE_WIDTH;
+ sel->r.height = OV5647_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = OV5647_PIXEL_ARRAY_TOP;
+ sel->r.left = OV5647_PIXEL_ARRAY_LEFT;
+ sel->r.width = OV5647_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV5647_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static const struct v4l2_subdev_pad_ops ov5647_subdev_pad_ops = {
- .enum_mbus_code = ov5647_enum_mbus_code,
+ .enum_mbus_code = ov5647_enum_mbus_code,
+ .enum_frame_size = ov5647_enum_frame_size,
+ .set_fmt = ov5647_set_pad_fmt,
+ .get_fmt = ov5647_get_pad_fmt,
+ .get_selection = ov5647_get_selection,
};
static const struct v4l2_subdev_ops ov5647_subdev_ops = {
@@ -475,9 +1071,9 @@ static const struct v4l2_subdev_ops ov5647_subdev_ops = {
static int ov5647_detect(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 read;
int ret;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
ret = ov5647_write(sd, OV5647_SW_RESET, 0x01);
if (ret < 0)
@@ -508,20 +1104,14 @@ static int ov5647_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
v4l2_subdev_get_try_format(sd, fh->pad, 0);
- struct v4l2_rect *crop =
- v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
- crop->left = OV5647_COLUMN_START_DEF;
- crop->top = OV5647_ROW_START_DEF;
- crop->width = OV5647_WINDOW_WIDTH_DEF;
- crop->height = OV5647_WINDOW_HEIGHT_DEF;
+ crop->left = OV5647_PIXEL_ARRAY_LEFT;
+ crop->top = OV5647_PIXEL_ARRAY_TOP;
+ crop->width = OV5647_PIXEL_ARRAY_WIDTH;
+ crop->height = OV5647_PIXEL_ARRAY_HEIGHT;
- format->code = MEDIA_BUS_FMT_SBGGR8_1X8;
-
- format->width = OV5647_WINDOW_WIDTH_DEF;
- format->height = OV5647_WINDOW_HEIGHT_DEF;
- format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+ *format = OV5647_DEFAULT_FORMAT;
return 0;
}
@@ -530,11 +1120,220 @@ static const struct v4l2_subdev_internal_ops ov5647_subdev_internal_ops = {
.open = ov5647_open,
};
-static int ov5647_parse_dt(struct device_node *np)
+static int ov5647_s_auto_white_balance(struct v4l2_subdev *sd, u32 val)
{
- struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
- struct device_node *ep;
+ return ov5647_write(sd, OV5647_REG_AWB, val ? 1 : 0);
+}
+static int ov5647_s_autogain(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+ u8 reg;
+
+ /* Non-zero turns on AGC by clearing bit 1.*/
+ ret = ov5647_read(sd, OV5647_REG_AEC_AGC, &reg);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_AEC_AGC, val ? reg & ~BIT(1)
+ : reg | BIT(1));
+}
+
+static int ov5647_s_exposure_auto(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+ u8 reg;
+
+ /*
+ * Everything except V4L2_EXPOSURE_MANUAL turns on AEC by
+ * clearing bit 0.
+ */
+ ret = ov5647_read(sd, OV5647_REG_AEC_AGC, &reg);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_AEC_AGC,
+ val == V4L2_EXPOSURE_MANUAL ? reg | BIT(0)
+ : reg & ~BIT(0));
+}
+
+static int ov5647_s_analogue_gain(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+
+ /* 10 bits of gain, 2 in the high register. */
+ ret = ov5647_write(sd, OV5647_REG_GAIN_HI, (val >> 8) & 3);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_GAIN_LO, val & 0xff);
+}
+
+static int ov5647_s_exposure(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+
+ /*
+ * Sensor has 20 bits, but the bottom 4 bits are fractions of a line
+ * which we leave as zero (and don't receive in "val").
+ */
+ ret = ov5647_write(sd, OV5647_REG_EXP_HI, (val >> 12) & 0xf);
+ if (ret)
+ return ret;
+
+ ret = ov5647_write(sd, OV5647_REG_EXP_MID, (val >> 4) & 0xff);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_EXP_LO, (val & 0xf) << 4);
+}
+
+static int ov5647_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5647 *sensor = container_of(ctrl->handler,
+ struct ov5647, ctrls);
+ struct v4l2_subdev *sd = &sensor->sd;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = sensor->mode->format.height + ctrl->val - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ exposure_def);
+ }
+
+ /*
+ * If the device is not powered up do not apply any controls
+ * to H/W at this time. Instead the controls will be restored
+ * at s_stream(1) time.
+ */
+ if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov5647_s_auto_white_balance(sd, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ ret = ov5647_s_autogain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov5647_s_exposure_auto(sd, ctrl->val);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov5647_s_analogue_gain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov5647_s_exposure(sd, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov5647_write16(sd, OV5647_REG_VTS_HI,
+ sensor->mode->format.height + ctrl->val);
+ break;
+
+ /* Read-only, but we adjust it based on mode. */
+ case V4L2_CID_PIXEL_RATE:
+ case V4L2_CID_HBLANK:
+ /* Read-only, but we adjust it based on mode. */
+ break;
+
+ default:
+ dev_info(&client->dev,
+ "Control (id:0x%x, val:0x%x) not supported\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov5647_ctrl_ops = {
+ .s_ctrl = ov5647_s_ctrl,
+};
+
+static int ov5647_init_controls(struct ov5647 *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->sd);
+ int hblank, exposure_max, exposure_def;
+
+ v4l2_ctrl_handler_init(&sensor->ctrls, 8);
+
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std_menu(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL,
+ 0, V4L2_EXPOSURE_MANUAL);
+
+ exposure_max = sensor->mode->vts - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ sensor->exposure = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV5647_EXPOSURE_MIN,
+ exposure_max, OV5647_EXPOSURE_STEP,
+ exposure_def);
+
+ /* min: 16 = 1.0x; max (10 bits); default: 32 = 2.0x. */
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, 16, 1023, 1, 32);
+
+ /* By default, PIXEL_RATE is read only, but it does change per mode */
+ sensor->pixel_rate = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ sensor->mode->pixel_rate,
+ sensor->mode->pixel_rate, 1,
+ sensor->mode->pixel_rate);
+
+ /* By default, HBLANK is read only, but it does change per mode. */
+ hblank = sensor->mode->hts - sensor->mode->format.width;
+ sensor->hblank = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank, 1,
+ hblank);
+
+ sensor->vblank = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_VBLANK, OV5647_VBLANK_MIN,
+ OV5647_VTS_MAX -
+ sensor->mode->format.height, 1,
+ sensor->mode->vts -
+ sensor->mode->format.height);
+
+ if (sensor->ctrls.error)
+ goto handler_free;
+
+ sensor->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->sd.ctrl_handler = &sensor->ctrls;
+
+ return 0;
+
+handler_free:
+ dev_err(&client->dev, "%s Controls initialization failed (%d)\n",
+ __func__, sensor->ctrls.error);
+ v4l2_ctrl_handler_free(&sensor->ctrls);
+
+ return sensor->ctrls.error;
+}
+
+static int ov5647_parse_dt(struct ov5647 *sensor, struct device_node *np)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device_node *ep;
int ret;
ep = of_graph_get_next_endpoint(np, NULL);
@@ -542,33 +1341,39 @@ static int ov5647_parse_dt(struct device_node *np)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
+ if (ret)
+ goto out;
+ sensor->clock_ncont = bus_cfg.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+
+out:
of_node_put(ep);
+
return ret;
}
static int ov5647_probe(struct i2c_client *client)
{
+ struct device_node *np = client->dev.of_node;
struct device *dev = &client->dev;
struct ov5647 *sensor;
- int ret;
struct v4l2_subdev *sd;
- struct device_node *np = client->dev.of_node;
u32 xclk_freq;
+ int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
if (IS_ENABLED(CONFIG_OF) && np) {
- ret = ov5647_parse_dt(np);
+ ret = ov5647_parse_dt(sensor, np);
if (ret) {
dev_err(dev, "DT parsing error: %d\n", ret);
return ret;
}
}
- /* get system clock (xclk) */
sensor->xclk = devm_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk)) {
dev_err(dev, "could not get xclk");
@@ -581,52 +1386,87 @@ static int ov5647_probe(struct i2c_client *client)
return -EINVAL;
}
+ /* Request the power down GPIO asserted. */
+ sensor->pwdn = devm_gpiod_get_optional(dev, "pwdn", GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->pwdn)) {
+ dev_err(dev, "Failed to get 'pwdn' gpio\n");
+ return -EINVAL;
+ }
+
mutex_init(&sensor->lock);
+ sensor->mode = OV5647_DEFAULT_MODE;
+
+ ret = ov5647_init_controls(sensor);
+ if (ret)
+ goto mutex_destroy;
+
sd = &sensor->sd;
v4l2_i2c_subdev_init(sd, client, &ov5647_subdev_ops);
- sensor->sd.internal_ops = &ov5647_subdev_internal_ops;
- sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &ov5647_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &sensor->pad);
if (ret < 0)
- goto mutex_remove;
+ goto ctrl_handler_free;
+
+ ret = ov5647_power_on(dev);
+ if (ret)
+ goto entity_cleanup;
ret = ov5647_detect(sd);
if (ret < 0)
- goto error;
+ goto power_off;
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
- goto error;
+ goto power_off;
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
dev_dbg(dev, "OmniVision OV5647 camera driver probed\n");
+
return 0;
-error:
+
+power_off:
+ ov5647_power_off(dev);
+entity_cleanup:
media_entity_cleanup(&sd->entity);
-mutex_remove:
+ctrl_handler_free:
+ v4l2_ctrl_handler_free(&sensor->ctrls);
+mutex_destroy:
mutex_destroy(&sensor->lock);
+
return ret;
}
static int ov5647_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov5647 *ov5647 = to_state(sd);
+ struct ov5647 *sensor = to_sensor(sd);
- v4l2_async_unregister_subdev(&ov5647->sd);
- media_entity_cleanup(&ov5647->sd.entity);
+ v4l2_async_unregister_subdev(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls);
v4l2_device_unregister_subdev(sd);
- mutex_destroy(&ov5647->lock);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&sensor->lock);
return 0;
}
+static const struct dev_pm_ops ov5647_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov5647_power_off, ov5647_power_on, NULL)
+};
+
static const struct i2c_device_id ov5647_id[] = {
{ "ov5647", 0 },
- { }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov5647_id);
@@ -641,7 +1481,8 @@ MODULE_DEVICE_TABLE(of, ov5647_of_match);
static struct i2c_driver ov5647_driver = {
.driver = {
.of_match_table = of_match_ptr(ov5647_of_match),
- .name = SENSOR_NAME,
+ .name = "ov5647",
+ .pm = &ov5647_pm_ops,
},
.probe_new = ov5647_probe,
.remove = ov5647_remove,
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
new file mode 100644
index 000000000000..dfe38ab8224d
--- /dev/null
+++ b/drivers/media/i2c/ov5648.c
@@ -0,0 +1,2624 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-mediabus.h>
+
+/* Clock rate */
+
+#define OV5648_XVCLK_RATE 24000000
+
+/* Register definitions */
+
+/* System */
+
+#define OV5648_SW_STANDBY_REG 0x100
+#define OV5648_SW_STANDBY_STREAM_ON BIT(0)
+
+#define OV5648_SW_RESET_REG 0x103
+#define OV5648_SW_RESET_RESET BIT(0)
+
+#define OV5648_PAD_OEN0_REG 0x3000
+#define OV5648_PAD_OEN1_REG 0x3001
+#define OV5648_PAD_OEN2_REG 0x3002
+#define OV5648_PAD_OUT0_REG 0x3008
+#define OV5648_PAD_OUT1_REG 0x3009
+
+#define OV5648_CHIP_ID_H_REG 0x300a
+#define OV5648_CHIP_ID_H_VALUE 0x56
+#define OV5648_CHIP_ID_L_REG 0x300b
+#define OV5648_CHIP_ID_L_VALUE 0x48
+
+#define OV5648_PAD_OUT2_REG 0x300d
+#define OV5648_PAD_SEL0_REG 0x300e
+#define OV5648_PAD_SEL1_REG 0x300f
+#define OV5648_PAD_SEL2_REG 0x3010
+#define OV5648_PAD_PK_REG 0x3011
+#define OV5648_PAD_PK_PD_DATO_EN BIT(7)
+#define OV5648_PAD_PK_DRIVE_STRENGTH_1X (0 << 5)
+#define OV5648_PAD_PK_DRIVE_STRENGTH_2X (2 << 5)
+#define OV5648_PAD_PK_FREX_N BIT(1)
+
+#define OV5648_A_PWC_PK_O0_REG 0x3013
+#define OV5648_A_PWC_PK_O0_BP_REGULATOR_N BIT(3)
+#define OV5648_A_PWC_PK_O1_REG 0x3014
+
+#define OV5648_MIPI_PHY0_REG 0x3016
+#define OV5648_MIPI_PHY1_REG 0x3017
+#define OV5648_MIPI_SC_CTRL0_REG 0x3018
+#define OV5648_MIPI_SC_CTRL0_MIPI_LANES(v) (((v) << 5) & GENMASK(7, 5))
+#define OV5648_MIPI_SC_CTRL0_PHY_HS_TX_PD BIT(4)
+#define OV5648_MIPI_SC_CTRL0_PHY_LP_RX_PD BIT(3)
+#define OV5648_MIPI_SC_CTRL0_MIPI_EN BIT(2)
+#define OV5648_MIPI_SC_CTRL0_MIPI_SUSP BIT(1)
+#define OV5648_MIPI_SC_CTRL0_LANE_DIS_OP BIT(0)
+#define OV5648_MIPI_SC_CTRL1_REG 0x3019
+#define OV5648_MISC_CTRL0_REG 0x3021
+#define OV5648_MIPI_SC_CTRL2_REG 0x3022
+#define OV5648_SUB_ID_REG 0x302a
+
+#define OV5648_PLL_CTRL0_REG 0x3034
+#define OV5648_PLL_CTRL0_PLL_CHARGE_PUMP(v) (((v) << 4) & GENMASK(6, 4))
+#define OV5648_PLL_CTRL0_BITS(v) ((v) & GENMASK(3, 0))
+#define OV5648_PLL_CTRL1_REG 0x3035
+#define OV5648_PLL_CTRL1_SYS_DIV(v) (((v) << 4) & GENMASK(7, 4))
+#define OV5648_PLL_CTRL1_MIPI_DIV(v) ((v) & GENMASK(3, 0))
+#define OV5648_PLL_MUL_REG 0x3036
+#define OV5648_PLL_MUL(v) ((v) & GENMASK(7, 0))
+#define OV5648_PLL_DIV_REG 0x3037
+#define OV5648_PLL_DIV_ROOT_DIV(v) ((((v) - 1) << 4) & BIT(4))
+#define OV5648_PLL_DIV_PLL_PRE_DIV(v) ((v) & GENMASK(3, 0))
+#define OV5648_PLL_DEBUG_REG 0x3038
+#define OV5648_PLL_BYPASS_REG 0x3039
+
+#define OV5648_PLLS_BYPASS_REG 0x303a
+#define OV5648_PLLS_MUL_REG 0x303b
+#define OV5648_PLLS_MUL(v) ((v) & GENMASK(4, 0))
+#define OV5648_PLLS_CTRL_REG 0x303c
+#define OV5648_PLLS_CTRL_PLL_CHARGE_PUMP(v) (((v) << 4) & GENMASK(6, 4))
+#define OV5648_PLLS_CTRL_SYS_DIV(v) ((v) & GENMASK(3, 0))
+#define OV5648_PLLS_DIV_REG 0x303d
+#define OV5648_PLLS_DIV_PLLS_PRE_DIV(v) (((v) << 4) & GENMASK(5, 4))
+#define OV5648_PLLS_DIV_PLLS_DIV_R(v) ((((v) - 1) << 2) & BIT(2))
+#define OV5648_PLLS_DIV_PLLS_SEL_DIV(v) ((v) & GENMASK(1, 0))
+
+#define OV5648_SRB_CTRL_REG 0x3106
+#define OV5648_SRB_CTRL_SCLK_DIV(v) (((v) << 2) & GENMASK(3, 2))
+#define OV5648_SRB_CTRL_RESET_ARBITER_EN BIT(1)
+#define OV5648_SRB_CTRL_SCLK_ARBITER_EN BIT(0)
+
+/* Group Hold */
+
+#define OV5648_GROUP_ADR0_REG 0x3200
+#define OV5648_GROUP_ADR1_REG 0x3201
+#define OV5648_GROUP_ADR2_REG 0x3202
+#define OV5648_GROUP_ADR3_REG 0x3203
+#define OV5648_GROUP_LEN0_REG 0x3204
+#define OV5648_GROUP_LEN1_REG 0x3205
+#define OV5648_GROUP_LEN2_REG 0x3206
+#define OV5648_GROUP_LEN3_REG 0x3207
+#define OV5648_GROUP_ACCESS_REG 0x3208
+
+/* Exposure/gain/banding */
+
+#define OV5648_EXPOSURE_CTRL_HH_REG 0x3500
+#define OV5648_EXPOSURE_CTRL_HH(v) (((v) & GENMASK(19, 16)) >> 16)
+#define OV5648_EXPOSURE_CTRL_HH_VALUE(v) (((v) << 16) & GENMASK(19, 16))
+#define OV5648_EXPOSURE_CTRL_H_REG 0x3501
+#define OV5648_EXPOSURE_CTRL_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV5648_EXPOSURE_CTRL_H_VALUE(v) (((v) << 8) & GENMASK(15, 8))
+#define OV5648_EXPOSURE_CTRL_L_REG 0x3502
+#define OV5648_EXPOSURE_CTRL_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_EXPOSURE_CTRL_L_VALUE(v) ((v) & GENMASK(7, 0))
+#define OV5648_MANUAL_CTRL_REG 0x3503
+#define OV5648_MANUAL_CTRL_FRAME_DELAY(v) (((v) << 4) & GENMASK(5, 4))
+#define OV5648_MANUAL_CTRL_AGC_MANUAL_EN BIT(1)
+#define OV5648_MANUAL_CTRL_AEC_MANUAL_EN BIT(0)
+#define OV5648_GAIN_CTRL_H_REG 0x350a
+#define OV5648_GAIN_CTRL_H(v) (((v) & GENMASK(9, 8)) >> 8)
+#define OV5648_GAIN_CTRL_H_VALUE(v) (((v) << 8) & GENMASK(9, 8))
+#define OV5648_GAIN_CTRL_L_REG 0x350b
+#define OV5648_GAIN_CTRL_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_GAIN_CTRL_L_VALUE(v) ((v) & GENMASK(7, 0))
+
+#define OV5648_ANALOG_CTRL0_REG_BASE 0x3600
+#define OV5648_ANALOG_CTRL1_REG_BASE 0x3700
+
+#define OV5648_AEC_CTRL0_REG 0x3a00
+#define OV5648_AEC_CTRL0_DEBUG BIT(6)
+#define OV5648_AEC_CTRL0_DEBAND_EN BIT(5)
+#define OV5648_AEC_CTRL0_DEBAND_LOW_LIMIT_EN BIT(4)
+#define OV5648_AEC_CTRL0_START_SEL_EN BIT(3)
+#define OV5648_AEC_CTRL0_NIGHT_MODE_EN BIT(2)
+#define OV5648_AEC_CTRL0_FREEZE_EN BIT(0)
+#define OV5648_EXPOSURE_MIN_REG 0x3a01
+#define OV5648_EXPOSURE_MAX_60_H_REG 0x3a02
+#define OV5648_EXPOSURE_MAX_60_L_REG 0x3a03
+#define OV5648_AEC_CTRL5_REG 0x3a05
+#define OV5648_AEC_CTRL6_REG 0x3a06
+#define OV5648_AEC_CTRL7_REG 0x3a07
+#define OV5648_BANDING_STEP_50_H_REG 0x3a08
+#define OV5648_BANDING_STEP_50_L_REG 0x3a09
+#define OV5648_BANDING_STEP_60_H_REG 0x3a0a
+#define OV5648_BANDING_STEP_60_L_REG 0x3a0b
+#define OV5648_AEC_CTRLC_REG 0x3a0c
+#define OV5648_BANDING_MAX_60_REG 0x3a0d
+#define OV5648_BANDING_MAX_50_REG 0x3a0e
+#define OV5648_WPT_REG 0x3a0f
+#define OV5648_BPT_REG 0x3a10
+#define OV5648_VPT_HIGH_REG 0x3a11
+#define OV5648_AVG_MANUAL_REG 0x3a12
+#define OV5648_PRE_GAIN_REG 0x3a13
+#define OV5648_EXPOSURE_MAX_50_H_REG 0x3a14
+#define OV5648_EXPOSURE_MAX_50_L_REG 0x3a15
+#define OV5648_GAIN_BASE_NIGHT_REG 0x3a17
+#define OV5648_AEC_GAIN_CEILING_H_REG 0x3a18
+#define OV5648_AEC_GAIN_CEILING_L_REG 0x3a19
+#define OV5648_DIFF_MAX_REG 0x3a1a
+#define OV5648_WPT2_REG 0x3a1b
+#define OV5648_LED_ADD_ROW_H_REG 0x3a1c
+#define OV5648_LED_ADD_ROW_L_REG 0x3a1d
+#define OV5648_BPT2_REG 0x3a1e
+#define OV5648_VPT_LOW_REG 0x3a1f
+#define OV5648_AEC_CTRL20_REG 0x3a20
+#define OV5648_AEC_CTRL21_REG 0x3a21
+
+#define OV5648_AVG_START_X_H_REG 0x5680
+#define OV5648_AVG_START_X_L_REG 0x5681
+#define OV5648_AVG_START_Y_H_REG 0x5682
+#define OV5648_AVG_START_Y_L_REG 0x5683
+#define OV5648_AVG_WINDOW_X_H_REG 0x5684
+#define OV5648_AVG_WINDOW_X_L_REG 0x5685
+#define OV5648_AVG_WINDOW_Y_H_REG 0x5686
+#define OV5648_AVG_WINDOW_Y_L_REG 0x5687
+#define OV5648_AVG_WEIGHT00_REG 0x5688
+#define OV5648_AVG_WEIGHT01_REG 0x5689
+#define OV5648_AVG_WEIGHT02_REG 0x568a
+#define OV5648_AVG_WEIGHT03_REG 0x568b
+#define OV5648_AVG_WEIGHT04_REG 0x568c
+#define OV5648_AVG_WEIGHT05_REG 0x568d
+#define OV5648_AVG_WEIGHT06_REG 0x568e
+#define OV5648_AVG_WEIGHT07_REG 0x568f
+#define OV5648_AVG_CTRL10_REG 0x5690
+#define OV5648_AVG_WEIGHT_SUM_REG 0x5691
+#define OV5648_AVG_READOUT_REG 0x5693
+
+#define OV5648_DIG_CTRL0_REG 0x5a00
+#define OV5648_DIG_COMP_MAN_H_REG 0x5a02
+#define OV5648_DIG_COMP_MAN_L_REG 0x5a03
+
+#define OV5648_GAINC_MAN_H_REG 0x5a20
+#define OV5648_GAINC_MAN_L_REG 0x5a21
+#define OV5648_GAINC_DGC_MAN_H_REG 0x5a22
+#define OV5648_GAINC_DGC_MAN_L_REG 0x5a23
+#define OV5648_GAINC_CTRL0_REG 0x5a24
+
+#define OV5648_GAINF_ANA_NUM_REG 0x5a40
+#define OV5648_GAINF_DIG_GAIN_REG 0x5a41
+
+/* Timing */
+
+#define OV5648_CROP_START_X_H_REG 0x3800
+#define OV5648_CROP_START_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_CROP_START_X_L_REG 0x3801
+#define OV5648_CROP_START_X_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_CROP_START_Y_H_REG 0x3802
+#define OV5648_CROP_START_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_CROP_START_Y_L_REG 0x3803
+#define OV5648_CROP_START_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_CROP_END_X_H_REG 0x3804
+#define OV5648_CROP_END_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_CROP_END_X_L_REG 0x3805
+#define OV5648_CROP_END_X_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_CROP_END_Y_H_REG 0x3806
+#define OV5648_CROP_END_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_CROP_END_Y_L_REG 0x3807
+#define OV5648_CROP_END_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_OUTPUT_SIZE_X_H_REG 0x3808
+#define OV5648_OUTPUT_SIZE_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_OUTPUT_SIZE_X_L_REG 0x3809
+#define OV5648_OUTPUT_SIZE_X_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_OUTPUT_SIZE_Y_H_REG 0x380a
+#define OV5648_OUTPUT_SIZE_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_OUTPUT_SIZE_Y_L_REG 0x380b
+#define OV5648_OUTPUT_SIZE_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_HTS_H_REG 0x380c
+#define OV5648_HTS_H(v) (((v) & GENMASK(12, 8)) >> 8)
+#define OV5648_HTS_L_REG 0x380d
+#define OV5648_HTS_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_VTS_H_REG 0x380e
+#define OV5648_VTS_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV5648_VTS_L_REG 0x380f
+#define OV5648_VTS_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_OFFSET_X_H_REG 0x3810
+#define OV5648_OFFSET_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_OFFSET_X_L_REG 0x3811
+#define OV5648_OFFSET_X_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_OFFSET_Y_H_REG 0x3812
+#define OV5648_OFFSET_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_OFFSET_Y_L_REG 0x3813
+#define OV5648_OFFSET_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_SUB_INC_X_REG 0x3814
+#define OV5648_SUB_INC_X_ODD(v) (((v) << 4) & GENMASK(7, 4))
+#define OV5648_SUB_INC_X_EVEN(v) ((v) & GENMASK(3, 0))
+#define OV5648_SUB_INC_Y_REG 0x3815
+#define OV5648_SUB_INC_Y_ODD(v) (((v) << 4) & GENMASK(7, 4))
+#define OV5648_SUB_INC_Y_EVEN(v) ((v) & GENMASK(3, 0))
+#define OV5648_HSYNCST_H_REG 0x3816
+#define OV5648_HSYNCST_H(v) (((v) >> 8) & 0xf)
+#define OV5648_HSYNCST_L_REG 0x3817
+#define OV5648_HSYNCST_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_HSYNCW_H_REG 0x3818
+#define OV5648_HSYNCW_H(v) (((v) >> 8) & 0xf)
+#define OV5648_HSYNCW_L_REG 0x3819
+#define OV5648_HSYNCW_L(v) ((v) & GENMASK(7, 0))
+
+#define OV5648_TC20_REG 0x3820
+#define OV5648_TC20_DEBUG BIT(6)
+#define OV5648_TC20_FLIP_VERT_ISP_EN BIT(2)
+#define OV5648_TC20_FLIP_VERT_SENSOR_EN BIT(1)
+#define OV5648_TC20_BINNING_VERT_EN BIT(0)
+#define OV5648_TC21_REG 0x3821
+#define OV5648_TC21_FLIP_HORZ_ISP_EN BIT(2)
+#define OV5648_TC21_FLIP_HORZ_SENSOR_EN BIT(1)
+#define OV5648_TC21_BINNING_HORZ_EN BIT(0)
+
+/* Strobe/exposure */
+
+#define OV5648_STROBE_REG 0x3b00
+#define OV5648_FREX_EXP_HH_REG 0x3b01
+#define OV5648_SHUTTER_DLY_H_REG 0x3b02
+#define OV5648_SHUTTER_DLY_L_REG 0x3b03
+#define OV5648_FREX_EXP_H_REG 0x3b04
+#define OV5648_FREX_EXP_L_REG 0x3b05
+#define OV5648_FREX_CTRL_REG 0x3b06
+#define OV5648_FREX_MODE_SEL_REG 0x3b07
+#define OV5648_FREX_MODE_SEL_FREX_SA1 BIT(4)
+#define OV5648_FREX_MODE_SEL_FX1_FM_EN BIT(3)
+#define OV5648_FREX_MODE_SEL_FREX_INV BIT(2)
+#define OV5648_FREX_MODE_SEL_MODE1 0x0
+#define OV5648_FREX_MODE_SEL_MODE2 0x1
+#define OV5648_FREX_MODE_SEL_ROLLING 0x2
+#define OV5648_FREX_EXP_REQ_REG 0x3b08
+#define OV5648_FREX_SHUTTER_DLY_REG 0x3b09
+#define OV5648_FREX_RST_LEN_REG 0x3b0a
+#define OV5648_STROBE_WIDTH_HH_REG 0x3b0b
+#define OV5648_STROBE_WIDTH_H_REG 0x3b0c
+
+/* OTP */
+
+#define OV5648_OTP_DATA_REG_BASE 0x3d00
+#define OV5648_OTP_PROGRAM_CTRL_REG 0x3d80
+#define OV5648_OTP_LOAD_CTRL_REG 0x3d81
+
+/* PSRAM */
+
+#define OV5648_PSRAM_CTRL1_REG 0x3f01
+#define OV5648_PSRAM_CTRLF_REG 0x3f0f
+
+/* Black Level */
+
+#define OV5648_BLC_CTRL0_REG 0x4000
+#define OV5648_BLC_CTRL1_REG 0x4001
+#define OV5648_BLC_CTRL1_START_LINE(v) ((v) & GENMASK(5, 0))
+#define OV5648_BLC_CTRL2_REG 0x4002
+#define OV5648_BLC_CTRL2_AUTO_EN BIT(6)
+#define OV5648_BLC_CTRL2_RESET_FRAME_NUM(v) ((v) & GENMASK(5, 0))
+#define OV5648_BLC_CTRL3_REG 0x4003
+#define OV5648_BLC_LINE_NUM_REG 0x4004
+#define OV5648_BLC_LINE_NUM(v) ((v) & GENMASK(7, 0))
+#define OV5648_BLC_CTRL5_REG 0x4005
+#define OV5648_BLC_CTRL5_UPDATE_EN BIT(1)
+#define OV5648_BLC_LEVEL_REG 0x4009
+
+/* Frame */
+
+#define OV5648_FRAME_CTRL_REG 0x4200
+#define OV5648_FRAME_ON_NUM_REG 0x4201
+#define OV5648_FRAME_OFF_NUM_REG 0x4202
+
+/* MIPI CSI-2 */
+
+#define OV5648_MIPI_CTRL0_REG 0x4800
+#define OV5648_MIPI_CTRL0_CLK_LANE_AUTOGATE BIT(5)
+#define OV5648_MIPI_CTRL0_LANE_SYNC_EN BIT(4)
+#define OV5648_MIPI_CTRL0_LANE_SELECT_LANE1 0
+#define OV5648_MIPI_CTRL0_LANE_SELECT_LANE2 BIT(3)
+#define OV5648_MIPI_CTRL0_IDLE_LP00 0
+#define OV5648_MIPI_CTRL0_IDLE_LP11 BIT(2)
+
+#define OV5648_MIPI_CTRL1_REG 0x4801
+#define OV5648_MIPI_CTRL2_REG 0x4802
+#define OV5648_MIPI_CTRL3_REG 0x4803
+#define OV5648_MIPI_CTRL4_REG 0x4804
+#define OV5648_MIPI_CTRL5_REG 0x4805
+#define OV5648_MIPI_MAX_FRAME_COUNT_H_REG 0x4810
+#define OV5648_MIPI_MAX_FRAME_COUNT_L_REG 0x4811
+#define OV5648_MIPI_CTRL14_REG 0x4814
+#define OV5648_MIPI_DT_SPKT_REG 0x4815
+#define OV5648_MIPI_HS_ZERO_MIN_H_REG 0x4818
+#define OV5648_MIPI_HS_ZERO_MIN_L_REG 0x4819
+#define OV5648_MIPI_HS_TRAIN_MIN_H_REG 0x481a
+#define OV5648_MIPI_HS_TRAIN_MIN_L_REG 0x481b
+#define OV5648_MIPI_CLK_ZERO_MIN_H_REG 0x481c
+#define OV5648_MIPI_CLK_ZERO_MIN_L_REG 0x481d
+#define OV5648_MIPI_CLK_PREPARE_MIN_H_REG 0x481e
+#define OV5648_MIPI_CLK_PREPARE_MIN_L_REG 0x481f
+#define OV5648_MIPI_CLK_POST_MIN_H_REG 0x4820
+#define OV5648_MIPI_CLK_POST_MIN_L_REG 0x4821
+#define OV5648_MIPI_CLK_TRAIL_MIN_H_REG 0x4822
+#define OV5648_MIPI_CLK_TRAIL_MIN_L_REG 0x4823
+#define OV5648_MIPI_LPX_P_MIN_H_REG 0x4824
+#define OV5648_MIPI_LPX_P_MIN_L_REG 0x4825
+#define OV5648_MIPI_HS_PREPARE_MIN_H_REG 0x4826
+#define OV5648_MIPI_HS_PREPARE_MIN_L_REG 0x4827
+#define OV5648_MIPI_HS_EXIT_MIN_H_REG 0x4828
+#define OV5648_MIPI_HS_EXIT_MIN_L_REG 0x4829
+#define OV5648_MIPI_HS_ZERO_MIN_UI_REG 0x482a
+#define OV5648_MIPI_HS_TRAIL_MIN_UI_REG 0x482b
+#define OV5648_MIPI_CLK_ZERO_MIN_UI_REG 0x482c
+#define OV5648_MIPI_CLK_PREPARE_MIN_UI_REG 0x482d
+#define OV5648_MIPI_CLK_POST_MIN_UI_REG 0x482e
+#define OV5648_MIPI_CLK_TRAIL_MIN_UI_REG 0x482f
+#define OV5648_MIPI_LPX_P_MIN_UI_REG 0x4830
+#define OV5648_MIPI_HS_PREPARE_MIN_UI_REG 0x4831
+#define OV5648_MIPI_HS_EXIT_MIN_UI_REG 0x4832
+#define OV5648_MIPI_REG_MIN_H_REG 0x4833
+#define OV5648_MIPI_REG_MIN_L_REG 0x4834
+#define OV5648_MIPI_REG_MAX_H_REG 0x4835
+#define OV5648_MIPI_REG_MAX_L_REG 0x4836
+#define OV5648_MIPI_PCLK_PERIOD_REG 0x4837
+#define OV5648_MIPI_WKUP_DLY_REG 0x4838
+#define OV5648_MIPI_LP_GPIO_REG 0x483b
+#define OV5648_MIPI_SNR_PCLK_DIV_REG 0x4843
+
+/* ISP */
+
+#define OV5648_ISP_CTRL0_REG 0x5000
+#define OV5648_ISP_CTRL0_BLACK_CORRECT_EN BIT(2)
+#define OV5648_ISP_CTRL0_WHITE_CORRECT_EN BIT(1)
+#define OV5648_ISP_CTRL1_REG 0x5001
+#define OV5648_ISP_CTRL1_AWB_EN BIT(0)
+#define OV5648_ISP_CTRL2_REG 0x5002
+#define OV5648_ISP_CTRL2_WIN_EN BIT(6)
+#define OV5648_ISP_CTRL2_OTP_EN BIT(1)
+#define OV5648_ISP_CTRL2_AWB_GAIN_EN BIT(0)
+#define OV5648_ISP_CTRL3_REG 0x5003
+#define OV5648_ISP_CTRL3_BUF_EN BIT(3)
+#define OV5648_ISP_CTRL3_BIN_MAN_SET BIT(2)
+#define OV5648_ISP_CTRL3_BIN_AUTO_EN BIT(1)
+#define OV5648_ISP_CTRL4_REG 0x5004
+#define OV5648_ISP_CTRL5_REG 0x5005
+#define OV5648_ISP_CTRL6_REG 0x5006
+#define OV5648_ISP_CTRL7_REG 0x5007
+#define OV5648_ISP_MAN_OFFSET_X_H_REG 0x5008
+#define OV5648_ISP_MAN_OFFSET_X_L_REG 0x5009
+#define OV5648_ISP_MAN_OFFSET_Y_H_REG 0x500a
+#define OV5648_ISP_MAN_OFFSET_Y_L_REG 0x500b
+#define OV5648_ISP_MAN_WIN_OFFSET_X_H_REG 0x500c
+#define OV5648_ISP_MAN_WIN_OFFSET_X_L_REG 0x500d
+#define OV5648_ISP_MAN_WIN_OFFSET_Y_H_REG 0x500e
+#define OV5648_ISP_MAN_WIN_OFFSET_Y_L_REG 0x500f
+#define OV5648_ISP_MAN_WIN_OUTPUT_X_H_REG 0x5010
+#define OV5648_ISP_MAN_WIN_OUTPUT_X_L_REG 0x5011
+#define OV5648_ISP_MAN_WIN_OUTPUT_Y_H_REG 0x5012
+#define OV5648_ISP_MAN_WIN_OUTPUT_Y_L_REG 0x5013
+#define OV5648_ISP_MAN_INPUT_X_H_REG 0x5014
+#define OV5648_ISP_MAN_INPUT_X_L_REG 0x5015
+#define OV5648_ISP_MAN_INPUT_Y_H_REG 0x5016
+#define OV5648_ISP_MAN_INPUT_Y_L_REG 0x5017
+#define OV5648_ISP_CTRL18_REG 0x5018
+#define OV5648_ISP_CTRL19_REG 0x5019
+#define OV5648_ISP_CTRL1A_REG 0x501a
+#define OV5648_ISP_CTRL1D_REG 0x501d
+#define OV5648_ISP_CTRL1F_REG 0x501f
+#define OV5648_ISP_CTRL1F_OUTPUT_EN 3
+#define OV5648_ISP_CTRL25_REG 0x5025
+
+#define OV5648_ISP_CTRL3D_REG 0x503d
+#define OV5648_ISP_CTRL3D_PATTERN_EN BIT(7)
+#define OV5648_ISP_CTRL3D_ROLLING_BAR_EN BIT(6)
+#define OV5648_ISP_CTRL3D_TRANSPARENT_MODE BIT(5)
+#define OV5648_ISP_CTRL3D_SQUARES_BW_MODE BIT(4)
+#define OV5648_ISP_CTRL3D_PATTERN_COLOR_BARS 0
+#define OV5648_ISP_CTRL3D_PATTERN_RANDOM_DATA 1
+#define OV5648_ISP_CTRL3D_PATTERN_COLOR_SQUARES 2
+#define OV5648_ISP_CTRL3D_PATTERN_INPUT 3
+
+#define OV5648_ISP_CTRL3E_REG 0x503e
+#define OV5648_ISP_CTRL4B_REG 0x504b
+#define OV5648_ISP_CTRL4B_POST_BIN_H_EN BIT(5)
+#define OV5648_ISP_CTRL4B_POST_BIN_V_EN BIT(4)
+#define OV5648_ISP_CTRL4C_REG 0x504c
+#define OV5648_ISP_CTRL57_REG 0x5057
+#define OV5648_ISP_CTRL58_REG 0x5058
+#define OV5648_ISP_CTRL59_REG 0x5059
+
+#define OV5648_ISP_WINDOW_START_X_H_REG 0x5980
+#define OV5648_ISP_WINDOW_START_X_L_REG 0x5981
+#define OV5648_ISP_WINDOW_START_Y_H_REG 0x5982
+#define OV5648_ISP_WINDOW_START_Y_L_REG 0x5983
+#define OV5648_ISP_WINDOW_WIN_X_H_REG 0x5984
+#define OV5648_ISP_WINDOW_WIN_X_L_REG 0x5985
+#define OV5648_ISP_WINDOW_WIN_Y_H_REG 0x5986
+#define OV5648_ISP_WINDOW_WIN_Y_L_REG 0x5987
+#define OV5648_ISP_WINDOW_MAN_REG 0x5988
+
+/* White Balance */
+
+#define OV5648_AWB_CTRL_REG 0x5180
+#define OV5648_AWB_CTRL_FAST_AWB BIT(6)
+#define OV5648_AWB_CTRL_GAIN_FREEZE_EN BIT(5)
+#define OV5648_AWB_CTRL_SUM_FREEZE_EN BIT(4)
+#define OV5648_AWB_CTRL_GAIN_MANUAL_EN BIT(3)
+
+#define OV5648_AWB_DELTA_REG 0x5181
+#define OV5648_AWB_STABLE_RANGE_REG 0x5182
+#define OV5648_AWB_STABLE_RANGE_WIDE_REG 0x5183
+#define OV5648_HSIZE_MAN_REG 0x5185
+
+#define OV5648_GAIN_RED_MAN_H_REG 0x5186
+#define OV5648_GAIN_RED_MAN_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_GAIN_RED_MAN_L_REG 0x5187
+#define OV5648_GAIN_RED_MAN_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_GAIN_GREEN_MAN_H_REG 0x5188
+#define OV5648_GAIN_GREEN_MAN_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_GAIN_GREEN_MAN_L_REG 0x5189
+#define OV5648_GAIN_GREEN_MAN_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_GAIN_BLUE_MAN_H_REG 0x518a
+#define OV5648_GAIN_BLUE_MAN_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV5648_GAIN_BLUE_MAN_L_REG 0x518b
+#define OV5648_GAIN_BLUE_MAN_L(v) ((v) & GENMASK(7, 0))
+#define OV5648_GAIN_RED_LIMIT_REG 0x518c
+#define OV5648_GAIN_GREEN_LIMIT_REG 0x518d
+#define OV5648_GAIN_BLUE_LIMIT_REG 0x518e
+#define OV5648_AWB_FRAME_COUNT_REG 0x518f
+#define OV5648_AWB_BASE_MAN_REG 0x51df
+
+/* Macros */
+
+#define ov5648_subdev_sensor(s) \
+ container_of(s, struct ov5648_sensor, subdev)
+
+#define ov5648_ctrl_subdev(c) \
+ (&container_of((c)->handler, struct ov5648_sensor, \
+ ctrls.handler)->subdev)
+
+/* Data structures */
+
+struct ov5648_register_value {
+ u16 address;
+ u8 value;
+ unsigned int delay_ms;
+};
+
+/*
+ * PLL1 Clock Tree:
+ *
+ * +-< XVCLK
+ * |
+ * +-+ pll_pre_div (0x3037 [3:0], special values: 5: 1.5, 7: 2.5)
+ * |
+ * +-+ pll_mul (0x3036 [7:0])
+ * |
+ * +-+ sys_div (0x3035 [7:4])
+ * |
+ * +-+ mipi_div (0x3035 [3:0])
+ * | |
+ * | +-> MIPI_SCLK
+ * | |
+ * | +-+ mipi_phy_div (2)
+ * | |
+ * | +-> MIPI_CLK
+ * |
+ * +-+ root_div (0x3037 [4])
+ * |
+ * +-+ bit_div (0x3034 [3:0], 8 bits: 2, 10 bits: 2.5, other: 1)
+ * |
+ * +-+ sclk_div (0x3106 [3:2])
+ * |
+ * +-> SCLK
+ * |
+ * +-+ mipi_div (0x3035, 1: PCLK = SCLK)
+ * |
+ * +-> PCLK
+ */
+
+struct ov5648_pll1_config {
+ unsigned int pll_pre_div;
+ unsigned int pll_mul;
+ unsigned int sys_div;
+ unsigned int root_div;
+ unsigned int sclk_div;
+ unsigned int mipi_div;
+};
+
+/*
+ * PLL2 Clock Tree:
+ *
+ * +-< XVCLK
+ * |
+ * +-+ plls_pre_div (0x303d [5:4], special values: 0: 1, 1: 1.5)
+ * |
+ * +-+ plls_div_r (0x303d [2])
+ * |
+ * +-+ plls_mul (0x303b [4:0])
+ * |
+ * +-+ sys_div (0x303c [3:0])
+ * |
+ * +-+ sel_div (0x303d [1:0], special values: 0: 1, 3: 2.5)
+ * |
+ * +-> ADCLK
+ */
+
+struct ov5648_pll2_config {
+ unsigned int plls_pre_div;
+ unsigned int plls_div_r;
+ unsigned int plls_mul;
+ unsigned int sys_div;
+ unsigned int sel_div;
+};
+
+/*
+ * General formulas for (array-centered) mode calculation:
+ * - photo_array_width = 2624
+ * - crop_start_x = (photo_array_width - output_size_x) / 2
+ * - crop_end_x = crop_start_x + offset_x + output_size_x - 1
+ *
+ * - photo_array_height = 1956
+ * - crop_start_y = (photo_array_height - output_size_y) / 2
+ * - crop_end_y = crop_start_y + offset_y + output_size_y - 1
+ */
+
+struct ov5648_mode {
+ unsigned int crop_start_x;
+ unsigned int offset_x;
+ unsigned int output_size_x;
+ unsigned int crop_end_x;
+ unsigned int hts;
+
+ unsigned int crop_start_y;
+ unsigned int offset_y;
+ unsigned int output_size_y;
+ unsigned int crop_end_y;
+ unsigned int vts;
+
+ bool binning_x;
+ bool binning_y;
+
+ unsigned int inc_x_odd;
+ unsigned int inc_x_even;
+ unsigned int inc_y_odd;
+ unsigned int inc_y_even;
+
+ /* 8-bit frame interval followed by 10-bit frame interval. */
+ struct v4l2_fract frame_interval[2];
+
+ /* 8-bit config followed by 10-bit config. */
+ const struct ov5648_pll1_config *pll1_config[2];
+ const struct ov5648_pll2_config *pll2_config;
+
+ const struct ov5648_register_value *register_values;
+ unsigned int register_values_count;
+};
+
+struct ov5648_state {
+ const struct ov5648_mode *mode;
+ u32 mbus_code;
+
+ bool streaming;
+};
+
+struct ov5648_ctrls {
+ struct v4l2_ctrl *exposure_auto;
+ struct v4l2_ctrl *exposure;
+
+ struct v4l2_ctrl *gain_auto;
+ struct v4l2_ctrl *gain;
+
+ struct v4l2_ctrl *white_balance_auto;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct v4l2_ctrl_handler handler;
+} __packed;
+
+struct ov5648_sensor {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerdown;
+ struct regulator *avdd;
+ struct regulator *dvdd;
+ struct regulator *dovdd;
+ struct clk *xvclk;
+
+ struct v4l2_fwnode_endpoint endpoint;
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+
+ struct mutex mutex;
+
+ struct ov5648_state state;
+ struct ov5648_ctrls ctrls;
+};
+
+/* Static definitions */
+
+/*
+ * XVCLK = 24 MHz
+ * SCLK = 84 MHz
+ * PCLK = 84 MHz
+ */
+static const struct ov5648_pll1_config ov5648_pll1_config_native_8_bits = {
+ .pll_pre_div = 3,
+ .pll_mul = 84,
+ .sys_div = 2,
+ .root_div = 1,
+ .sclk_div = 1,
+ .mipi_div = 1,
+};
+
+/*
+ * XVCLK = 24 MHz
+ * SCLK = 84 MHz
+ * PCLK = 84 MHz
+ */
+static const struct ov5648_pll1_config ov5648_pll1_config_native_10_bits = {
+ .pll_pre_div = 3,
+ .pll_mul = 105,
+ .sys_div = 2,
+ .root_div = 1,
+ .sclk_div = 1,
+ .mipi_div = 1,
+};
+
+/*
+ * XVCLK = 24 MHz
+ * ADCLK = 200 MHz
+ */
+static const struct ov5648_pll2_config ov5648_pll2_config_native = {
+ .plls_pre_div = 3,
+ .plls_div_r = 1,
+ .plls_mul = 25,
+ .sys_div = 1,
+ .sel_div = 1,
+};
+
+static const struct ov5648_mode ov5648_modes[] = {
+ /* 2592x1944 */
+ {
+ /* Horizontal */
+ .crop_start_x = 16,
+ .offset_x = 0,
+ .output_size_x = 2592,
+ .crop_end_x = 2607,
+ .hts = 2816,
+
+ /* Vertical */
+ .crop_start_y = 6,
+ .offset_y = 0,
+ .output_size_y = 1944,
+ .crop_end_y = 1949,
+ .vts = 1984,
+
+ /* Subsample increase */
+ .inc_x_odd = 1,
+ .inc_x_even = 1,
+ .inc_y_odd = 1,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 15 },
+ { 1, 15 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+ /* 1600x1200 (UXGA) */
+ {
+ /* Horizontal */
+ .crop_start_x = 512,
+ .offset_x = 0,
+ .output_size_x = 1600,
+ .crop_end_x = 2111,
+ .hts = 2816,
+
+ /* Vertical */
+ .crop_start_y = 378,
+ .offset_y = 0,
+ .output_size_y = 1200,
+ .crop_end_y = 1577,
+ .vts = 1984,
+
+ /* Subsample increase */
+ .inc_x_odd = 1,
+ .inc_x_even = 1,
+ .inc_y_odd = 1,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 15 },
+ { 1, 15 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+ /* 1920x1080 (Full HD) */
+ {
+ /* Horizontal */
+ .crop_start_x = 352,
+ .offset_x = 0,
+ .output_size_x = 1920,
+ .crop_end_x = 2271,
+ .hts = 2816,
+
+ /* Vertical */
+ .crop_start_y = 438,
+ .offset_y = 0,
+ .output_size_y = 1080,
+ .crop_end_y = 1517,
+ .vts = 1984,
+
+ /* Subsample increase */
+ .inc_x_odd = 1,
+ .inc_x_even = 1,
+ .inc_y_odd = 1,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 15 },
+ { 1, 15 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+ /* 1280x960 */
+ {
+ /* Horizontal */
+ .crop_start_x = 16,
+ .offset_x = 8,
+ .output_size_x = 1280,
+ .crop_end_x = 2607,
+ .hts = 1912,
+
+ /* Vertical */
+ .crop_start_y = 6,
+ .offset_y = 6,
+ .output_size_y = 960,
+ .crop_end_y = 1949,
+ .vts = 1496,
+
+ /* Binning */
+ .binning_x = true,
+
+ /* Subsample increase */
+ .inc_x_odd = 3,
+ .inc_x_even = 1,
+ .inc_y_odd = 3,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 30 },
+ { 1, 30 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+ /* 1280x720 (HD) */
+ {
+ /* Horizontal */
+ .crop_start_x = 16,
+ .offset_x = 8,
+ .output_size_x = 1280,
+ .crop_end_x = 2607,
+ .hts = 1912,
+
+ /* Vertical */
+ .crop_start_y = 254,
+ .offset_y = 2,
+ .output_size_y = 720,
+ .crop_end_y = 1701,
+ .vts = 1496,
+
+ /* Binning */
+ .binning_x = true,
+
+ /* Subsample increase */
+ .inc_x_odd = 3,
+ .inc_x_even = 1,
+ .inc_y_odd = 3,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 30 },
+ { 1, 30 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+ /* 640x480 (VGA) */
+ {
+ /* Horizontal */
+ .crop_start_x = 0,
+ .offset_x = 8,
+ .output_size_x = 640,
+ .crop_end_x = 2623,
+ .hts = 1896,
+
+ /* Vertical */
+ .crop_start_y = 0,
+ .offset_y = 2,
+ .output_size_y = 480,
+ .crop_end_y = 1953,
+ .vts = 984,
+
+ /* Binning */
+ .binning_x = true,
+
+ /* Subsample increase */
+ .inc_x_odd = 7,
+ .inc_x_even = 1,
+ .inc_y_odd = 7,
+ .inc_y_even = 1,
+
+ /* Frame Interval */
+ .frame_interval = {
+ { 1, 30 },
+ { 1, 30 },
+ },
+
+ /* PLL */
+ .pll1_config = {
+ &ov5648_pll1_config_native_8_bits,
+ &ov5648_pll1_config_native_10_bits,
+ },
+ .pll2_config = &ov5648_pll2_config_native,
+ },
+};
+
+static const u32 ov5648_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+};
+
+static const struct ov5648_register_value ov5648_init_sequence[] = {
+ /* PSRAM */
+ { OV5648_PSRAM_CTRL1_REG, 0x0d },
+ { OV5648_PSRAM_CTRLF_REG, 0xf5 },
+};
+
+static const s64 ov5648_link_freq_menu[] = {
+ 210000000,
+ 168000000,
+};
+
+static const char *const ov5648_test_pattern_menu[] = {
+ "Disabled",
+ "Random data",
+ "Color bars",
+ "Color bars with rolling bar",
+ "Color squares",
+ "Color squares with rolling bar"
+};
+
+static const u8 ov5648_test_pattern_bits[] = {
+ 0,
+ OV5648_ISP_CTRL3D_PATTERN_EN | OV5648_ISP_CTRL3D_PATTERN_RANDOM_DATA,
+ OV5648_ISP_CTRL3D_PATTERN_EN | OV5648_ISP_CTRL3D_PATTERN_COLOR_BARS,
+ OV5648_ISP_CTRL3D_PATTERN_EN | OV5648_ISP_CTRL3D_ROLLING_BAR_EN |
+ OV5648_ISP_CTRL3D_PATTERN_COLOR_BARS,
+ OV5648_ISP_CTRL3D_PATTERN_EN | OV5648_ISP_CTRL3D_PATTERN_COLOR_SQUARES,
+ OV5648_ISP_CTRL3D_PATTERN_EN | OV5648_ISP_CTRL3D_ROLLING_BAR_EN |
+ OV5648_ISP_CTRL3D_PATTERN_COLOR_SQUARES,
+};
+
+/* Input/Output */
+
+static int ov5648_read(struct ov5648_sensor *sensor, u16 address, u8 *value)
+{
+ unsigned char data[2] = { address >> 8, address & 0xff };
+ struct i2c_client *client = sensor->i2c_client;
+ int ret;
+
+ ret = i2c_master_send(client, data, sizeof(data));
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c send error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, value, 1);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c recv error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5648_write(struct ov5648_sensor *sensor, u16 address, u8 value)
+{
+ unsigned char data[3] = { address >> 8, address & 0xff, value };
+ struct i2c_client *client = sensor->i2c_client;
+ int ret;
+
+ ret = i2c_master_send(client, data, sizeof(data));
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c send error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5648_write_sequence(struct ov5648_sensor *sensor,
+ const struct ov5648_register_value *sequence,
+ unsigned int sequence_count)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < sequence_count; i++) {
+ ret = ov5648_write(sensor, sequence[i].address,
+ sequence[i].value);
+ if (ret)
+ break;
+
+ if (sequence[i].delay_ms)
+ msleep(sequence[i].delay_ms);
+ }
+
+ return ret;
+}
+
+static int ov5648_update_bits(struct ov5648_sensor *sensor, u16 address,
+ u8 mask, u8 bits)
+{
+ u8 value = 0;
+ int ret;
+
+ ret = ov5648_read(sensor, address, &value);
+ if (ret)
+ return ret;
+
+ value &= ~mask;
+ value |= bits;
+
+ ret = ov5648_write(sensor, address, value);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Sensor */
+
+static int ov5648_sw_reset(struct ov5648_sensor *sensor)
+{
+ return ov5648_write(sensor, OV5648_SW_RESET_REG, OV5648_SW_RESET_RESET);
+}
+
+static int ov5648_sw_standby(struct ov5648_sensor *sensor, int standby)
+{
+ u8 value = 0;
+
+ if (!standby)
+ value = OV5648_SW_STANDBY_STREAM_ON;
+
+ return ov5648_write(sensor, OV5648_SW_STANDBY_REG, value);
+}
+
+static int ov5648_chip_id_check(struct ov5648_sensor *sensor)
+{
+ u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
+ u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
+ unsigned int i;
+ u8 value;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = ov5648_read(sensor, regs[i], &value);
+ if (ret < 0)
+ return ret;
+
+ if (value != values[i]) {
+ dev_err(sensor->dev,
+ "chip id value mismatch: %#x instead of %#x\n",
+ value, values[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ov5648_avdd_internal_power(struct ov5648_sensor *sensor, int on)
+{
+ return ov5648_write(sensor, OV5648_A_PWC_PK_O0_REG,
+ on ? 0 : OV5648_A_PWC_PK_O0_BP_REGULATOR_N);
+}
+
+static int ov5648_pad_configure(struct ov5648_sensor *sensor)
+{
+ int ret;
+
+ /* Configure pads as input. */
+
+ ret = ov5648_write(sensor, OV5648_PAD_OEN1_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_PAD_OEN2_REG, 0);
+ if (ret)
+ return ret;
+
+ /* Disable FREX pin. */
+
+ return ov5648_write(sensor, OV5648_PAD_PK_REG,
+ OV5648_PAD_PK_DRIVE_STRENGTH_1X |
+ OV5648_PAD_PK_FREX_N);
+}
+
+static int ov5648_mipi_configure(struct ov5648_sensor *sensor)
+{
+ struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ &sensor->endpoint.bus.mipi_csi2;
+ unsigned int lanes_count = bus_mipi_csi2->num_data_lanes;
+ int ret;
+
+ ret = ov5648_write(sensor, OV5648_MIPI_CTRL0_REG,
+ OV5648_MIPI_CTRL0_CLK_LANE_AUTOGATE |
+ OV5648_MIPI_CTRL0_LANE_SELECT_LANE1 |
+ OV5648_MIPI_CTRL0_IDLE_LP11);
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_MIPI_SC_CTRL0_REG,
+ OV5648_MIPI_SC_CTRL0_MIPI_LANES(lanes_count) |
+ OV5648_MIPI_SC_CTRL0_PHY_LP_RX_PD |
+ OV5648_MIPI_SC_CTRL0_MIPI_EN);
+}
+
+static int ov5648_black_level_configure(struct ov5648_sensor *sensor)
+{
+ int ret;
+
+ /* Up to 6 lines are available for black level calibration. */
+
+ ret = ov5648_write(sensor, OV5648_BLC_CTRL1_REG,
+ OV5648_BLC_CTRL1_START_LINE(2));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_BLC_CTRL2_REG,
+ OV5648_BLC_CTRL2_AUTO_EN |
+ OV5648_BLC_CTRL2_RESET_FRAME_NUM(5));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_BLC_LINE_NUM_REG,
+ OV5648_BLC_LINE_NUM(4));
+ if (ret)
+ return ret;
+
+ return ov5648_update_bits(sensor, OV5648_BLC_CTRL5_REG,
+ OV5648_BLC_CTRL5_UPDATE_EN,
+ OV5648_BLC_CTRL5_UPDATE_EN);
+}
+
+static int ov5648_isp_configure(struct ov5648_sensor *sensor)
+{
+ u8 bits;
+ int ret;
+
+ /* Enable black and white level correction. */
+ bits = OV5648_ISP_CTRL0_BLACK_CORRECT_EN |
+ OV5648_ISP_CTRL0_WHITE_CORRECT_EN;
+
+ ret = ov5648_update_bits(sensor, OV5648_ISP_CTRL0_REG, bits, bits);
+ if (ret)
+ return ret;
+
+ /* Enable AWB. */
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL1_REG,
+ OV5648_ISP_CTRL1_AWB_EN);
+ if (ret)
+ return ret;
+
+ /* Enable AWB gain and windowing. */
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL2_REG,
+ OV5648_ISP_CTRL2_WIN_EN |
+ OV5648_ISP_CTRL2_AWB_GAIN_EN);
+ if (ret)
+ return ret;
+
+ /* Enable buffering and auto-binning. */
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL3_REG,
+ OV5648_ISP_CTRL3_BUF_EN |
+ OV5648_ISP_CTRL3_BIN_AUTO_EN);
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL4_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL1F_REG,
+ OV5648_ISP_CTRL1F_OUTPUT_EN);
+ if (ret)
+ return ret;
+
+ /* Enable post-binning filters. */
+ ret = ov5648_write(sensor, OV5648_ISP_CTRL4B_REG,
+ OV5648_ISP_CTRL4B_POST_BIN_H_EN |
+ OV5648_ISP_CTRL4B_POST_BIN_V_EN);
+ if (ret)
+ return ret;
+
+ /* Disable debanding and night mode. Debug bit seems necessary. */
+ ret = ov5648_write(sensor, OV5648_AEC_CTRL0_REG,
+ OV5648_AEC_CTRL0_DEBUG |
+ OV5648_AEC_CTRL0_START_SEL_EN);
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_MANUAL_CTRL_REG,
+ OV5648_MANUAL_CTRL_FRAME_DELAY(1));
+}
+
+static unsigned long ov5648_mode_pll1_rate(struct ov5648_sensor *sensor,
+ const struct ov5648_pll1_config *config)
+{
+ unsigned long xvclk_rate;
+ unsigned long pll1_rate;
+
+ xvclk_rate = clk_get_rate(sensor->xvclk);
+ pll1_rate = xvclk_rate * config->pll_mul;
+
+ switch (config->pll_pre_div) {
+ case 5:
+ pll1_rate *= 3;
+ pll1_rate /= 2;
+ break;
+ case 7:
+ pll1_rate *= 5;
+ pll1_rate /= 2;
+ break;
+ default:
+ pll1_rate /= config->pll_pre_div;
+ break;
+ }
+
+ return pll1_rate;
+}
+
+static int ov5648_mode_pll1_configure(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode,
+ u32 mbus_code)
+{
+ const struct ov5648_pll1_config *config;
+ u8 value;
+ int ret;
+
+ value = OV5648_PLL_CTRL0_PLL_CHARGE_PUMP(1);
+
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ config = mode->pll1_config[0];
+ value |= OV5648_PLL_CTRL0_BITS(8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ config = mode->pll1_config[1];
+ value |= OV5648_PLL_CTRL0_BITS(10);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ov5648_write(sensor, OV5648_PLL_CTRL0_REG, value);
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_PLL_DIV_REG,
+ OV5648_PLL_DIV_ROOT_DIV(config->root_div) |
+ OV5648_PLL_DIV_PLL_PRE_DIV(config->pll_pre_div));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_PLL_MUL_REG,
+ OV5648_PLL_MUL(config->pll_mul));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_PLL_CTRL1_REG,
+ OV5648_PLL_CTRL1_SYS_DIV(config->sys_div) |
+ OV5648_PLL_CTRL1_MIPI_DIV(config->mipi_div));
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_SRB_CTRL_REG,
+ OV5648_SRB_CTRL_SCLK_DIV(config->sclk_div) |
+ OV5648_SRB_CTRL_SCLK_ARBITER_EN);
+}
+
+static int ov5648_mode_pll2_configure(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode)
+{
+ const struct ov5648_pll2_config *config = mode->pll2_config;
+ int ret;
+
+ ret = ov5648_write(sensor, OV5648_PLLS_DIV_REG,
+ OV5648_PLLS_DIV_PLLS_PRE_DIV(config->plls_pre_div) |
+ OV5648_PLLS_DIV_PLLS_DIV_R(config->plls_div_r) |
+ OV5648_PLLS_DIV_PLLS_SEL_DIV(config->sel_div));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_PLLS_MUL_REG,
+ OV5648_PLLS_MUL(config->plls_mul));
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_PLLS_CTRL_REG,
+ OV5648_PLLS_CTRL_PLL_CHARGE_PUMP(1) |
+ OV5648_PLLS_CTRL_SYS_DIV(config->sys_div));
+}
+
+static int ov5648_mode_configure(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode, u32 mbus_code)
+{
+ int ret;
+
+ /* Crop Start X */
+
+ ret = ov5648_write(sensor, OV5648_CROP_START_X_H_REG,
+ OV5648_CROP_START_X_H(mode->crop_start_x));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_CROP_START_X_L_REG,
+ OV5648_CROP_START_X_L(mode->crop_start_x));
+ if (ret)
+ return ret;
+
+ /* Offset X */
+
+ ret = ov5648_write(sensor, OV5648_OFFSET_X_H_REG,
+ OV5648_OFFSET_X_H(mode->offset_x));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_OFFSET_X_L_REG,
+ OV5648_OFFSET_X_L(mode->offset_x));
+ if (ret)
+ return ret;
+
+ /* Output Size X */
+
+ ret = ov5648_write(sensor, OV5648_OUTPUT_SIZE_X_H_REG,
+ OV5648_OUTPUT_SIZE_X_H(mode->output_size_x));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_OUTPUT_SIZE_X_L_REG,
+ OV5648_OUTPUT_SIZE_X_L(mode->output_size_x));
+ if (ret)
+ return ret;
+
+ /* Crop End X */
+
+ ret = ov5648_write(sensor, OV5648_CROP_END_X_H_REG,
+ OV5648_CROP_END_X_H(mode->crop_end_x));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_CROP_END_X_L_REG,
+ OV5648_CROP_END_X_L(mode->crop_end_x));
+ if (ret)
+ return ret;
+
+ /* Horizontal Total Size */
+
+ ret = ov5648_write(sensor, OV5648_HTS_H_REG, OV5648_HTS_H(mode->hts));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_HTS_L_REG, OV5648_HTS_L(mode->hts));
+ if (ret)
+ return ret;
+
+ /* Crop Start Y */
+
+ ret = ov5648_write(sensor, OV5648_CROP_START_Y_H_REG,
+ OV5648_CROP_START_Y_H(mode->crop_start_y));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_CROP_START_Y_L_REG,
+ OV5648_CROP_START_Y_L(mode->crop_start_y));
+ if (ret)
+ return ret;
+
+ /* Offset Y */
+
+ ret = ov5648_write(sensor, OV5648_OFFSET_Y_H_REG,
+ OV5648_OFFSET_Y_H(mode->offset_y));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_OFFSET_Y_L_REG,
+ OV5648_OFFSET_Y_L(mode->offset_y));
+ if (ret)
+ return ret;
+
+ /* Output Size Y */
+
+ ret = ov5648_write(sensor, OV5648_OUTPUT_SIZE_Y_H_REG,
+ OV5648_OUTPUT_SIZE_Y_H(mode->output_size_y));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_OUTPUT_SIZE_Y_L_REG,
+ OV5648_OUTPUT_SIZE_Y_L(mode->output_size_y));
+ if (ret)
+ return ret;
+
+ /* Crop End Y */
+
+ ret = ov5648_write(sensor, OV5648_CROP_END_Y_H_REG,
+ OV5648_CROP_END_Y_H(mode->crop_end_y));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_CROP_END_Y_L_REG,
+ OV5648_CROP_END_Y_L(mode->crop_end_y));
+ if (ret)
+ return ret;
+
+ /* Vertical Total Size */
+
+ ret = ov5648_write(sensor, OV5648_VTS_H_REG, OV5648_VTS_H(mode->vts));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_VTS_L_REG, OV5648_VTS_L(mode->vts));
+ if (ret)
+ return ret;
+
+ /* Flip/Mirror/Binning */
+
+ /*
+ * A debug bit is enabled by default and needs to be cleared for
+ * subsampling to work.
+ */
+ ret = ov5648_update_bits(sensor, OV5648_TC20_REG,
+ OV5648_TC20_DEBUG |
+ OV5648_TC20_BINNING_VERT_EN,
+ mode->binning_y ? OV5648_TC20_BINNING_VERT_EN :
+ 0);
+ if (ret)
+ return ret;
+
+ ret = ov5648_update_bits(sensor, OV5648_TC21_REG,
+ OV5648_TC21_BINNING_HORZ_EN,
+ mode->binning_x ? OV5648_TC21_BINNING_HORZ_EN :
+ 0);
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_SUB_INC_X_REG,
+ OV5648_SUB_INC_X_ODD(mode->inc_x_odd) |
+ OV5648_SUB_INC_X_EVEN(mode->inc_x_even));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_SUB_INC_Y_REG,
+ OV5648_SUB_INC_Y_ODD(mode->inc_y_odd) |
+ OV5648_SUB_INC_Y_EVEN(mode->inc_y_even));
+ if (ret)
+ return ret;
+
+ /* PLLs */
+
+ ret = ov5648_mode_pll1_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+
+ ret = ov5648_mode_pll2_configure(sensor, mode);
+ if (ret)
+ return ret;
+
+ /* Extra registers */
+
+ if (mode->register_values) {
+ ret = ov5648_write_sequence(sensor, mode->register_values,
+ mode->register_values_count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned long ov5648_mode_mipi_clk_rate(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode,
+ u32 mbus_code)
+{
+ const struct ov5648_pll1_config *config;
+ unsigned long pll1_rate;
+
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ config = mode->pll1_config[0];
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ config = mode->pll1_config[1];
+ break;
+ default:
+ return 0;
+ }
+
+ pll1_rate = ov5648_mode_pll1_rate(sensor, config);
+
+ return pll1_rate / config->sys_div / config->mipi_div / 2;
+}
+
+/* Exposure */
+
+static int ov5648_exposure_auto_configure(struct ov5648_sensor *sensor,
+ bool enable)
+{
+ return ov5648_update_bits(sensor, OV5648_MANUAL_CTRL_REG,
+ OV5648_MANUAL_CTRL_AEC_MANUAL_EN,
+ enable ? 0 : OV5648_MANUAL_CTRL_AEC_MANUAL_EN);
+}
+
+static int ov5648_exposure_configure(struct ov5648_sensor *sensor, u32 exposure)
+{
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ if (ctrls->exposure_auto->val != V4L2_EXPOSURE_MANUAL)
+ return -EINVAL;
+
+ ret = ov5648_write(sensor, OV5648_EXPOSURE_CTRL_HH_REG,
+ OV5648_EXPOSURE_CTRL_HH(exposure));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_EXPOSURE_CTRL_H_REG,
+ OV5648_EXPOSURE_CTRL_H(exposure));
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_EXPOSURE_CTRL_L_REG,
+ OV5648_EXPOSURE_CTRL_L(exposure));
+}
+
+static int ov5648_exposure_value(struct ov5648_sensor *sensor,
+ u32 *exposure)
+{
+ u8 exposure_hh = 0, exposure_h = 0, exposure_l = 0;
+ int ret;
+
+ ret = ov5648_read(sensor, OV5648_EXPOSURE_CTRL_HH_REG, &exposure_hh);
+ if (ret)
+ return ret;
+
+ ret = ov5648_read(sensor, OV5648_EXPOSURE_CTRL_H_REG, &exposure_h);
+ if (ret)
+ return ret;
+
+ ret = ov5648_read(sensor, OV5648_EXPOSURE_CTRL_L_REG, &exposure_l);
+ if (ret)
+ return ret;
+
+ *exposure = OV5648_EXPOSURE_CTRL_HH_VALUE((u32)exposure_hh) |
+ OV5648_EXPOSURE_CTRL_H_VALUE((u32)exposure_h) |
+ OV5648_EXPOSURE_CTRL_L_VALUE((u32)exposure_l);
+
+ return 0;
+}
+
+/* Gain */
+
+static int ov5648_gain_auto_configure(struct ov5648_sensor *sensor, bool enable)
+{
+ return ov5648_update_bits(sensor, OV5648_MANUAL_CTRL_REG,
+ OV5648_MANUAL_CTRL_AGC_MANUAL_EN,
+ enable ? 0 : OV5648_MANUAL_CTRL_AGC_MANUAL_EN);
+}
+
+static int ov5648_gain_configure(struct ov5648_sensor *sensor, u32 gain)
+{
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ if (ctrls->gain_auto->val)
+ return -EINVAL;
+
+ ret = ov5648_write(sensor, OV5648_GAIN_CTRL_H_REG,
+ OV5648_GAIN_CTRL_H(gain));
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_GAIN_CTRL_L_REG,
+ OV5648_GAIN_CTRL_L(gain));
+}
+
+static int ov5648_gain_value(struct ov5648_sensor *sensor, u32 *gain)
+{
+ u8 gain_h = 0, gain_l = 0;
+ int ret;
+
+ ret = ov5648_read(sensor, OV5648_GAIN_CTRL_H_REG, &gain_h);
+ if (ret)
+ return ret;
+
+ ret = ov5648_read(sensor, OV5648_GAIN_CTRL_L_REG, &gain_l);
+ if (ret)
+ return ret;
+
+ *gain = OV5648_GAIN_CTRL_H_VALUE((u32)gain_h) |
+ OV5648_GAIN_CTRL_L_VALUE((u32)gain_l);
+
+ return 0;
+}
+
+/* White Balance */
+
+static int ov5648_white_balance_auto_configure(struct ov5648_sensor *sensor,
+ bool enable)
+{
+ return ov5648_write(sensor, OV5648_AWB_CTRL_REG,
+ enable ? 0 : OV5648_AWB_CTRL_GAIN_MANUAL_EN);
+}
+
+static int ov5648_white_balance_configure(struct ov5648_sensor *sensor,
+ u32 red_balance, u32 blue_balance)
+{
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ if (ctrls->white_balance_auto->val)
+ return -EINVAL;
+
+ ret = ov5648_write(sensor, OV5648_GAIN_RED_MAN_H_REG,
+ OV5648_GAIN_RED_MAN_H(red_balance));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_GAIN_RED_MAN_L_REG,
+ OV5648_GAIN_RED_MAN_L(red_balance));
+ if (ret)
+ return ret;
+
+ ret = ov5648_write(sensor, OV5648_GAIN_BLUE_MAN_H_REG,
+ OV5648_GAIN_BLUE_MAN_H(blue_balance));
+ if (ret)
+ return ret;
+
+ return ov5648_write(sensor, OV5648_GAIN_BLUE_MAN_L_REG,
+ OV5648_GAIN_BLUE_MAN_L(blue_balance));
+}
+
+/* Flip */
+
+static int ov5648_flip_vert_configure(struct ov5648_sensor *sensor, bool enable)
+{
+ u8 bits = OV5648_TC20_FLIP_VERT_ISP_EN |
+ OV5648_TC20_FLIP_VERT_SENSOR_EN;
+
+ return ov5648_update_bits(sensor, OV5648_TC20_REG, bits,
+ enable ? bits : 0);
+}
+
+static int ov5648_flip_horz_configure(struct ov5648_sensor *sensor, bool enable)
+{
+ u8 bits = OV5648_TC21_FLIP_HORZ_ISP_EN |
+ OV5648_TC21_FLIP_HORZ_SENSOR_EN;
+
+ return ov5648_update_bits(sensor, OV5648_TC21_REG, bits,
+ enable ? bits : 0);
+}
+
+/* Test Pattern */
+
+static int ov5648_test_pattern_configure(struct ov5648_sensor *sensor,
+ unsigned int index)
+{
+ if (index >= ARRAY_SIZE(ov5648_test_pattern_bits))
+ return -EINVAL;
+
+ return ov5648_write(sensor, OV5648_ISP_CTRL3D_REG,
+ ov5648_test_pattern_bits[index]);
+}
+
+/* State */
+
+static int ov5648_state_mipi_configure(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode,
+ u32 mbus_code)
+{
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ &sensor->endpoint.bus.mipi_csi2;
+ unsigned long mipi_clk_rate;
+ unsigned int bits_per_sample;
+ unsigned int lanes_count;
+ unsigned int i, j;
+ s64 mipi_pixel_rate;
+
+ mipi_clk_rate = ov5648_mode_mipi_clk_rate(sensor, mode, mbus_code);
+ if (!mipi_clk_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ov5648_link_freq_menu); i++) {
+ s64 freq = ov5648_link_freq_menu[i];
+
+ if (freq == mipi_clk_rate)
+ break;
+ }
+
+ for (j = 0; j < sensor->endpoint.nr_of_link_frequencies; j++) {
+ u64 freq = sensor->endpoint.link_frequencies[j];
+
+ if (freq == mipi_clk_rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ov5648_link_freq_menu)) {
+ dev_err(sensor->dev,
+ "failed to find %lu clk rate in link freq\n",
+ mipi_clk_rate);
+ } else if (j == sensor->endpoint.nr_of_link_frequencies) {
+ dev_err(sensor->dev,
+ "failed to find %lu clk rate in endpoint link-frequencies\n",
+ mipi_clk_rate);
+ } else {
+ __v4l2_ctrl_s_ctrl(ctrls->link_freq, i);
+ }
+
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ bits_per_sample = 8;
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ bits_per_sample = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lanes_count = bus_mipi_csi2->num_data_lanes;
+ mipi_pixel_rate = mipi_clk_rate * 2 * lanes_count / bits_per_sample;
+
+ __v4l2_ctrl_s_ctrl_int64(ctrls->pixel_rate, mipi_pixel_rate);
+
+ return 0;
+}
+
+static int ov5648_state_configure(struct ov5648_sensor *sensor,
+ const struct ov5648_mode *mode,
+ u32 mbus_code)
+{
+ int ret;
+
+ if (sensor->state.streaming)
+ return -EBUSY;
+
+ /* State will be configured at first power on otherwise. */
+ if (pm_runtime_enabled(sensor->dev) &&
+ !pm_runtime_suspended(sensor->dev)) {
+ ret = ov5648_mode_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov5648_state_mipi_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+
+ sensor->state.mode = mode;
+ sensor->state.mbus_code = mbus_code;
+
+ return 0;
+}
+
+static int ov5648_state_init(struct ov5648_sensor *sensor)
+{
+ return ov5648_state_configure(sensor, &ov5648_modes[0],
+ ov5648_mbus_codes[0]);
+}
+
+/* Sensor Base */
+
+static int ov5648_sensor_init(struct ov5648_sensor *sensor)
+{
+ int ret;
+
+ ret = ov5648_sw_reset(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to perform sw reset\n");
+ return ret;
+ }
+
+ ret = ov5648_sw_standby(sensor, 1);
+ if (ret) {
+ dev_err(sensor->dev, "failed to set sensor standby\n");
+ return ret;
+ }
+
+ ret = ov5648_chip_id_check(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to check sensor chip id\n");
+ return ret;
+ }
+
+ ret = ov5648_avdd_internal_power(sensor, !sensor->avdd);
+ if (ret) {
+ dev_err(sensor->dev, "failed to set internal avdd power\n");
+ return ret;
+ }
+
+ ret = ov5648_write_sequence(sensor, ov5648_init_sequence,
+ ARRAY_SIZE(ov5648_init_sequence));
+ if (ret) {
+ dev_err(sensor->dev, "failed to write init sequence\n");
+ return ret;
+ }
+
+ ret = ov5648_pad_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure pad\n");
+ return ret;
+ }
+
+ ret = ov5648_mipi_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure MIPI\n");
+ return ret;
+ }
+
+ ret = ov5648_isp_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure ISP\n");
+ return ret;
+ }
+
+ ret = ov5648_black_level_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure black level\n");
+ return ret;
+ }
+
+ /* Configure current mode. */
+ ret = ov5648_state_configure(sensor, sensor->state.mode,
+ sensor->state.mbus_code);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure state\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5648_sensor_power(struct ov5648_sensor *sensor, bool on)
+{
+ /* Keep initialized to zero for disable label. */
+ int ret = 0;
+
+ /*
+ * General notes about the power sequence:
+ * - power-down GPIO must be active (low) during power-on;
+ * - reset GPIO state does not matter during power-on;
+ * - XVCLK must be provided 1 ms before register access;
+ * - 10 ms are needed between power-down deassert and register access.
+ */
+
+ /* Note that regulator-and-GPIO-based power is untested. */
+ if (on) {
+ gpiod_set_value_cansleep(sensor->reset, 1);
+ gpiod_set_value_cansleep(sensor->powerdown, 1);
+
+ ret = regulator_enable(sensor->dovdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable DOVDD regulator\n");
+ goto disable;
+ }
+
+ if (sensor->avdd) {
+ ret = regulator_enable(sensor->avdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable AVDD regulator\n");
+ goto disable;
+ }
+ }
+
+ ret = regulator_enable(sensor->dvdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable DVDD regulator\n");
+ goto disable;
+ }
+
+ /* According to OV5648 power up diagram. */
+ usleep_range(5000, 10000);
+
+ ret = clk_prepare_enable(sensor->xvclk);
+ if (ret) {
+ dev_err(sensor->dev, "failed to enable XVCLK clock\n");
+ goto disable;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset, 0);
+ gpiod_set_value_cansleep(sensor->powerdown, 0);
+
+ usleep_range(20000, 25000);
+ } else {
+disable:
+ gpiod_set_value_cansleep(sensor->powerdown, 1);
+ gpiod_set_value_cansleep(sensor->reset, 1);
+
+ clk_disable_unprepare(sensor->xvclk);
+
+ regulator_disable(sensor->dvdd);
+
+ if (sensor->avdd)
+ regulator_disable(sensor->avdd);
+
+ regulator_disable(sensor->dovdd);
+ }
+
+ return ret;
+}
+
+/* Controls */
+
+static int ov5648_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *subdev = ov5648_ctrl_subdev(ctrl);
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov5648_exposure_value(sensor, &ctrls->exposure->val);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_CID_AUTOGAIN:
+ ret = ov5648_gain_value(sensor, &ctrls->gain->val);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov5648_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *subdev = ov5648_ctrl_subdev(ctrl);
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ unsigned int index;
+ bool enable;
+ int ret;
+
+ /* Wait for the sensor to be on before setting controls. */
+ if (pm_runtime_suspended(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ enable = ctrl->val == V4L2_EXPOSURE_AUTO;
+
+ ret = ov5648_exposure_auto_configure(sensor, enable);
+ if (ret)
+ return ret;
+
+ if (!enable && ctrls->exposure->is_new) {
+ ret = ov5648_exposure_configure(sensor,
+ ctrls->exposure->val);
+ if (ret)
+ return ret;
+ }
+ break;
+ case V4L2_CID_AUTOGAIN:
+ enable = !!ctrl->val;
+
+ ret = ov5648_gain_auto_configure(sensor, enable);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ ret = ov5648_gain_configure(sensor, ctrls->gain->val);
+ if (ret)
+ return ret;
+ }
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ enable = !!ctrl->val;
+
+ ret = ov5648_white_balance_auto_configure(sensor, enable);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ ret = ov5648_white_balance_configure(sensor,
+ ctrls->red_balance->val,
+ ctrls->blue_balance->val);
+ if (ret)
+ return ret;
+ }
+ break;
+ case V4L2_CID_HFLIP:
+ enable = !!ctrl->val;
+ return ov5648_flip_horz_configure(sensor, enable);
+ case V4L2_CID_VFLIP:
+ enable = !!ctrl->val;
+ return ov5648_flip_vert_configure(sensor, enable);
+ case V4L2_CID_TEST_PATTERN:
+ index = (unsigned int)ctrl->val;
+ return ov5648_test_pattern_configure(sensor, index);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ov5648_ctrl_ops = {
+ .g_volatile_ctrl = ov5648_g_volatile_ctrl,
+ .s_ctrl = ov5648_s_ctrl,
+};
+
+static int ov5648_ctrls_init(struct ov5648_sensor *sensor)
+{
+ struct ov5648_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+ const struct v4l2_ctrl_ops *ops = &ov5648_ctrl_ops;
+ int ret;
+
+ v4l2_ctrl_handler_init(handler, 32);
+
+ /* Use our mutex for ctrl locking. */
+ handler->lock = &sensor->mutex;
+
+ /* Exposure */
+
+ ctrls->exposure_auto = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ ctrls->exposure = v4l2_ctrl_new_std(handler, ops, V4L2_CID_EXPOSURE,
+ 16, 1048575, 16, 512);
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->exposure_auto, 1, true);
+
+ /* Gain */
+
+ ctrls->gain_auto =
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+
+ ctrls->gain = v4l2_ctrl_new_std(handler, ops, V4L2_CID_GAIN, 16, 1023,
+ 16, 16);
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->gain_auto, 0, true);
+
+ /* White Balance */
+
+ ctrls->white_balance_auto =
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0,
+ 1, 1, 1);
+
+ ctrls->red_balance = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_RED_BALANCE, 0, 4095,
+ 1, 1024);
+
+ ctrls->blue_balance = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_BLUE_BALANCE, 0, 4095,
+ 1, 1024);
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->white_balance_auto, 0, false);
+
+ /* Flip */
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ /* Test Pattern */
+
+ v4l2_ctrl_new_std_menu_items(handler, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov5648_test_pattern_menu) - 1,
+ 0, 0, ov5648_test_pattern_menu);
+
+ /* MIPI CSI-2 */
+
+ ctrls->link_freq =
+ v4l2_ctrl_new_int_menu(handler, NULL, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov5648_link_freq_menu) - 1,
+ 0, ov5648_link_freq_menu);
+
+ ctrls->pixel_rate =
+ v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1, 1);
+
+ if (handler->error) {
+ ret = handler->error;
+ goto error_ctrls;
+ }
+
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ sensor->subdev.ctrl_handler = handler;
+
+ return 0;
+
+error_ctrls:
+ v4l2_ctrl_handler_free(handler);
+
+ return ret;
+}
+
+/* Subdev Video Operations */
+
+static int ov5648_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct ov5648_state *state = &sensor->state;
+ int ret;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(sensor->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sensor->dev);
+ return ret;
+ }
+ }
+
+ mutex_lock(&sensor->mutex);
+ ret = ov5648_sw_standby(sensor, !enable);
+ mutex_unlock(&sensor->mutex);
+
+ if (ret)
+ return ret;
+
+ state->streaming = !!enable;
+
+ if (!enable)
+ pm_runtime_put(sensor->dev);
+
+ return 0;
+}
+
+static int ov5648_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ const struct ov5648_mode *mode;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ mode = sensor->state.mode;
+
+ switch (sensor->state.mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ interval->interval = mode->frame_interval[0];
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ interval->interval = mode->frame_interval[1];
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops ov5648_subdev_video_ops = {
+ .s_stream = ov5648_s_stream,
+ .g_frame_interval = ov5648_g_frame_interval,
+ .s_frame_interval = ov5648_g_frame_interval,
+};
+
+/* Subdev Pad Operations */
+
+static int ov5648_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(ov5648_mbus_codes))
+ return -EINVAL;
+
+ code_enum->code = ov5648_mbus_codes[code_enum->index];
+
+ return 0;
+}
+
+static void ov5648_mbus_format_fill(struct v4l2_mbus_framefmt *mbus_format,
+ u32 mbus_code,
+ const struct ov5648_mode *mode)
+{
+ mbus_format->width = mode->output_size_x;
+ mbus_format->height = mode->output_size_y;
+ mbus_format->code = mbus_code;
+
+ mbus_format->field = V4L2_FIELD_NONE;
+ mbus_format->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_format->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(mbus_format->colorspace);
+ mbus_format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ mbus_format->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(mbus_format->colorspace);
+}
+
+static int ov5648_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+
+ mutex_lock(&sensor->mutex);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, config,
+ format->pad);
+ else
+ ov5648_mbus_format_fill(mbus_format, sensor->state.mbus_code,
+ sensor->state.mode);
+
+ mutex_unlock(&sensor->mutex);
+
+ return 0;
+}
+
+static int ov5648_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ const struct ov5648_mode *mode;
+ u32 mbus_code = 0;
+ unsigned int index;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ if (sensor->state.streaming) {
+ ret = -EBUSY;
+ goto complete;
+ }
+
+ /* Try to find requested mbus code. */
+ for (index = 0; index < ARRAY_SIZE(ov5648_mbus_codes); index++) {
+ if (ov5648_mbus_codes[index] == mbus_format->code) {
+ mbus_code = mbus_format->code;
+ break;
+ }
+ }
+
+ /* Fallback to default. */
+ if (!mbus_code)
+ mbus_code = ov5648_mbus_codes[0];
+
+ /* Find the mode with nearest dimensions. */
+ mode = v4l2_find_nearest_size(ov5648_modes, ARRAY_SIZE(ov5648_modes),
+ output_size_x, output_size_y,
+ mbus_format->width, mbus_format->height);
+ if (!mode) {
+ ret = -EINVAL;
+ goto complete;
+ }
+
+ ov5648_mbus_format_fill(mbus_format, mbus_code, mode);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_get_try_format(subdev, config, format->pad) =
+ *mbus_format;
+ else if (sensor->state.mode != mode ||
+ sensor->state.mbus_code != mbus_code)
+ ret = ov5648_state_configure(sensor, mode, mbus_code);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov5648_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_frame_size_enum *size_enum)
+{
+ const struct ov5648_mode *mode;
+
+ if (size_enum->index >= ARRAY_SIZE(ov5648_modes))
+ return -EINVAL;
+
+ mode = &ov5648_modes[size_enum->index];
+
+ size_enum->min_width = size_enum->max_width = mode->output_size_x;
+ size_enum->min_height = size_enum->max_height = mode->output_size_y;
+
+ return 0;
+}
+
+static int ov5648_enum_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_frame_interval_enum *interval_enum)
+{
+ const struct ov5648_mode *mode = NULL;
+ unsigned int mode_index;
+ unsigned int interval_index;
+
+ if (interval_enum->index > 0)
+ return -EINVAL;
+
+ /*
+ * Multiple modes with the same dimensions may have different frame
+ * intervals, so look up each relevant mode.
+ */
+ for (mode_index = 0, interval_index = 0;
+ mode_index < ARRAY_SIZE(ov5648_modes); mode_index++) {
+ mode = &ov5648_modes[mode_index];
+
+ if (mode->output_size_x == interval_enum->width &&
+ mode->output_size_y == interval_enum->height) {
+ if (interval_index == interval_enum->index)
+ break;
+
+ interval_index++;
+ }
+ }
+
+ if (mode_index == ARRAY_SIZE(ov5648_modes))
+ return -EINVAL;
+
+ switch (interval_enum->code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ interval_enum->interval = mode->frame_interval[0];
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ interval_enum->interval = mode->frame_interval[1];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov5648_subdev_pad_ops = {
+ .enum_mbus_code = ov5648_enum_mbus_code,
+ .get_fmt = ov5648_get_fmt,
+ .set_fmt = ov5648_set_fmt,
+ .enum_frame_size = ov5648_enum_frame_size,
+ .enum_frame_interval = ov5648_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov5648_subdev_ops = {
+ .video = &ov5648_subdev_video_ops,
+ .pad = &ov5648_subdev_pad_ops,
+};
+
+static int ov5648_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct ov5648_state *state = &sensor->state;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ if (state->streaming) {
+ ret = ov5648_sw_standby(sensor, true);
+ if (ret)
+ goto complete;
+ }
+
+ ret = ov5648_sensor_power(sensor, false);
+ if (ret)
+ ov5648_sw_standby(sensor, false);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov5648_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ struct ov5648_state *state = &sensor->state;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ ret = ov5648_sensor_power(sensor, true);
+ if (ret)
+ goto complete;
+
+ ret = ov5648_sensor_init(sensor);
+ if (ret)
+ goto error_power;
+
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ if (ret)
+ goto error_power;
+
+ if (state->streaming) {
+ ret = ov5648_sw_standby(sensor, false);
+ if (ret)
+ goto error_power;
+ }
+
+ goto complete;
+
+error_power:
+ ov5648_sensor_power(sensor, false);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov5648_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *handle;
+ struct ov5648_sensor *sensor;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ unsigned long rate;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->dev = dev;
+ sensor->i2c_client = client;
+
+ /* Graph Endpoint */
+
+ handle = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!handle) {
+ dev_err(dev, "unable to find endpoint node\n");
+ return -EINVAL;
+ }
+
+ sensor->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(handle, &sensor->endpoint);
+ fwnode_handle_put(handle);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node\n");
+ return ret;
+ }
+
+ /* GPIOs */
+
+ sensor->powerdown = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->powerdown)) {
+ ret = PTR_ERR(sensor->powerdown);
+ goto error_endpoint;
+ }
+
+ sensor->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset)) {
+ ret = PTR_ERR(sensor->reset);
+ goto error_endpoint;
+ }
+
+ /* Regulators */
+
+ /* DVDD: digital core */
+ sensor->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(sensor->dvdd)) {
+ dev_err(dev, "cannot get DVDD (digital core) regulator\n");
+ ret = PTR_ERR(sensor->dvdd);
+ goto error_endpoint;
+ }
+
+ /* DOVDD: digital I/O */
+ sensor->dovdd = devm_regulator_get(dev, "dovdd");
+ if (IS_ERR(sensor->dvdd)) {
+ dev_err(dev, "cannot get DOVDD (digital I/O) regulator\n");
+ ret = PTR_ERR(sensor->dvdd);
+ goto error_endpoint;
+ }
+
+ /* AVDD: analog */
+ sensor->avdd = devm_regulator_get_optional(dev, "avdd");
+ if (IS_ERR(sensor->avdd)) {
+ dev_info(dev, "no AVDD regulator provided, using internal\n");
+ sensor->avdd = NULL;
+ }
+
+ /* External Clock */
+
+ sensor->xvclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xvclk)) {
+ dev_err(dev, "failed to get external clock\n");
+ ret = PTR_ERR(sensor->xvclk);
+ goto error_endpoint;
+ }
+
+ rate = clk_get_rate(sensor->xvclk);
+ if (rate != OV5648_XVCLK_RATE) {
+ dev_err(dev, "clock rate %lu Hz is unsupported\n", rate);
+ ret = -EINVAL;
+ goto error_endpoint;
+ }
+
+ /* Subdev, entity and pad */
+
+ subdev = &sensor->subdev;
+ v4l2_i2c_subdev_init(subdev, client, &ov5648_subdev_ops);
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ pad = &sensor->pad;
+ pad->flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, 1, pad);
+ if (ret)
+ goto error_entity;
+
+ /* Mutex */
+
+ mutex_init(&sensor->mutex);
+
+ /* Sensor */
+
+ ret = ov5648_ctrls_init(sensor);
+ if (ret)
+ goto error_mutex;
+
+ ret = ov5648_state_init(sensor);
+ if (ret)
+ goto error_ctrls;
+
+ /* Runtime PM */
+
+ pm_runtime_enable(sensor->dev);
+ pm_runtime_set_suspended(sensor->dev);
+
+ /* V4L2 subdev register */
+
+ ret = v4l2_async_register_subdev_sensor_common(subdev);
+ if (ret)
+ goto error_pm;
+
+ return 0;
+
+error_pm:
+ pm_runtime_disable(sensor->dev);
+
+error_ctrls:
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+error_mutex:
+ mutex_destroy(&sensor->mutex);
+
+error_entity:
+ media_entity_cleanup(&sensor->subdev.entity);
+
+error_endpoint:
+ v4l2_fwnode_endpoint_free(&sensor->endpoint);
+
+ return ret;
+}
+
+static int ov5648_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+
+ v4l2_async_unregister_subdev(subdev);
+ pm_runtime_disable(sensor->dev);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->mutex);
+ media_entity_cleanup(&subdev->entity);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov5648_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov5648_suspend, ov5648_resume, NULL)
+};
+
+static const struct of_device_id ov5648_of_match[] = {
+ { .compatible = "ovti,ov5648" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ov5648_of_match);
+
+static struct i2c_driver ov5648_driver = {
+ .driver = {
+ .name = "ov5648",
+ .of_match_table = ov5648_of_match,
+ .pm = &ov5648_pm_ops,
+ },
+ .probe_new = ov5648_probe,
+ .remove = ov5648_remove,
+};
+
+module_i2c_driver(ov5648_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("V4L2 driver for the OmniVision OV5648 image sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 148fd4e05029..866c8c2e8f59 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2084,7 +2084,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
/* By default, V4L2_CID_PIXEL_RATE is read only */
ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
- V4L2_CID_PIXEL_RATE, 0,
+ V4L2_CID_PIXEL_RATE,
+ link_freq_configs[0].pixel_rate,
link_freq_configs[0].pixel_rate,
1,
link_freq_configs[0].pixel_rate);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 5e35808037ad..ae00d717e599 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -624,7 +624,7 @@ static int ov5675_set_ctrl_hflip(struct ov5675 *ov5675, u32 ctrl_val)
return ov5675_write_reg(ov5675, OV5675_REG_FORMAT1,
OV5675_REG_VALUE_08BIT,
- ctrl_val ? val & ~BIT(3) : val);
+ ctrl_val ? val & ~BIT(3) : val | BIT(3));
}
static int ov5675_set_ctrl_vflip(struct ov5675 *ov5675, u8 ctrl_val)
@@ -639,7 +639,7 @@ static int ov5675_set_ctrl_vflip(struct ov5675 *ov5675, u8 ctrl_val)
ret = ov5675_write_reg(ov5675, OV5675_REG_FORMAT1,
OV5675_REG_VALUE_08BIT,
- ctrl_val ? val | BIT(4) | BIT(5) : val);
+ ctrl_val ? val | BIT(4) | BIT(5) : val & ~BIT(4) & ~BIT(5));
if (ret)
return ret;
@@ -652,7 +652,7 @@ static int ov5675_set_ctrl_vflip(struct ov5675 *ov5675, u8 ctrl_val)
return ov5675_write_reg(ov5675, OV5675_REG_FORMAT2,
OV5675_REG_VALUE_08BIT,
- ctrl_val ? val | BIT(1) : val);
+ ctrl_val ? val | BIT(1) : val & ~BIT(1));
}
static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index d73f9f540932..85dd13694bd2 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -22,13 +22,13 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <linux/module.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -194,7 +194,7 @@ struct ov6650 {
struct v4l2_ctrl *blue;
struct v4l2_ctrl *red;
};
- struct v4l2_clk *clk;
+ struct clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
struct v4l2_fract tpf; /* as requested with s_frame_interval */
@@ -459,9 +459,9 @@ static int ov6650_s_power(struct v4l2_subdev *sd, int on)
int ret = 0;
if (on)
- ret = v4l2_clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
else
- v4l2_clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -821,14 +821,14 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
u8 pidh, pidl, midh, midl;
int i, ret = 0;
- priv->clk = v4l2_clk_get(&client->dev, NULL);
+ priv->clk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
- dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
+ dev_err(&client->dev, "clk request err: %d\n", ret);
return ret;
}
- rate = v4l2_clk_get_rate(priv->clk);
+ rate = clk_get_rate(priv->clk);
for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
if (rate != ov6650_xclk[i].rate)
continue;
@@ -839,8 +839,8 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
break;
}
for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
- ret = v4l2_clk_set_rate(priv->clk, ov6650_xclk[i].rate);
- if (ret || v4l2_clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
+ ret = clk_set_rate(priv->clk, ov6650_xclk[i].rate);
+ if (ret || clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
continue;
xclk = &ov6650_xclk[i];
@@ -852,12 +852,12 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
dev_err(&client->dev, "unable to get supported clock rate\n");
if (!ret)
ret = -EINVAL;
- goto eclkput;
+ return ret;
}
ret = ov6650_s_power(sd, 1);
if (ret < 0)
- goto eclkput;
+ return ret;
msleep(20);
@@ -899,11 +899,6 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
done:
ov6650_s_power(sd, 0);
- if (!ret)
- return 0;
-eclkput:
- v4l2_clk_put(priv->clk);
-
return ret;
}
@@ -1089,7 +1084,6 @@ static int ov6650_remove(struct i2c_client *client)
{
struct ov6650 *priv = to_ov6650(client);
- v4l2_clk_put(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
return 0;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index d8cefd3d4045..b337f729d5e3 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -428,7 +428,7 @@ static const struct ov8856_reg mode_3264x2448_regs[] = {
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
- {0x3813, 0x02},
+ {0x3813, 0x01},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -821,7 +821,7 @@ static const struct ov8856_reg mode_1632x1224_regs[] = {
{0x3810, 0x00},
{0x3811, 0x02},
{0x3812, 0x00},
- {0x3813, 0x02},
+ {0x3813, 0x01},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x00},
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
new file mode 100644
index 000000000000..36a60fbc211d
--- /dev/null
+++ b/drivers/media/i2c/ov8865.c
@@ -0,0 +1,2972 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
+ * Copyright 2020 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-mediabus.h>
+
+/* Clock rate */
+
+#define OV8865_EXTCLK_RATE 24000000
+
+/* Register definitions */
+
+/* System */
+
+#define OV8865_SW_STANDBY_REG 0x100
+#define OV8865_SW_STANDBY_STREAM_ON BIT(0)
+
+#define OV8865_SW_RESET_REG 0x103
+#define OV8865_SW_RESET_RESET BIT(0)
+
+#define OV8865_PLL_CTRL0_REG 0x300
+#define OV8865_PLL_CTRL0_PRE_DIV(v) ((v) & GENMASK(2, 0))
+#define OV8865_PLL_CTRL1_REG 0x301
+#define OV8865_PLL_CTRL1_MUL_H(v) (((v) & GENMASK(9, 8)) >> 8)
+#define OV8865_PLL_CTRL2_REG 0x302
+#define OV8865_PLL_CTRL2_MUL_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_PLL_CTRL3_REG 0x303
+#define OV8865_PLL_CTRL3_M_DIV(v) (((v) - 1) & GENMASK(3, 0))
+#define OV8865_PLL_CTRL4_REG 0x304
+#define OV8865_PLL_CTRL4_MIPI_DIV(v) ((v) & GENMASK(1, 0))
+#define OV8865_PLL_CTRL5_REG 0x305
+#define OV8865_PLL_CTRL5_SYS_PRE_DIV(v) ((v) & GENMASK(1, 0))
+#define OV8865_PLL_CTRL6_REG 0x306
+#define OV8865_PLL_CTRL6_SYS_DIV(v) (((v) - 1) & BIT(0))
+
+#define OV8865_PLL_CTRL8_REG 0x308
+#define OV8865_PLL_CTRL9_REG 0x309
+#define OV8865_PLL_CTRLA_REG 0x30a
+#define OV8865_PLL_CTRLA_PRE_DIV_HALF(v) (((v) - 1) & BIT(0))
+#define OV8865_PLL_CTRLB_REG 0x30b
+#define OV8865_PLL_CTRLB_PRE_DIV(v) ((v) & GENMASK(2, 0))
+#define OV8865_PLL_CTRLC_REG 0x30c
+#define OV8865_PLL_CTRLC_MUL_H(v) (((v) & GENMASK(9, 8)) >> 8)
+#define OV8865_PLL_CTRLD_REG 0x30d
+#define OV8865_PLL_CTRLD_MUL_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_PLL_CTRLE_REG 0x30e
+#define OV8865_PLL_CTRLE_SYS_DIV(v) ((v) & GENMASK(2, 0))
+#define OV8865_PLL_CTRLF_REG 0x30f
+#define OV8865_PLL_CTRLF_SYS_PRE_DIV(v) (((v) - 1) & GENMASK(3, 0))
+#define OV8865_PLL_CTRL10_REG 0x310
+#define OV8865_PLL_CTRL11_REG 0x311
+#define OV8865_PLL_CTRL12_REG 0x312
+#define OV8865_PLL_CTRL12_PRE_DIV_HALF(v) ((((v) - 1) << 4) & BIT(4))
+#define OV8865_PLL_CTRL12_DAC_DIV(v) (((v) - 1) & GENMASK(3, 0))
+
+#define OV8865_PLL_CTRL1B_REG 0x31b
+#define OV8865_PLL_CTRL1C_REG 0x31c
+
+#define OV8865_PLL_CTRL1E_REG 0x31e
+#define OV8865_PLL_CTRL1E_PLL1_NO_LAT BIT(3)
+
+#define OV8865_PAD_OEN0_REG 0x3000
+
+#define OV8865_PAD_OEN2_REG 0x3002
+
+#define OV8865_CLK_RST5_REG 0x3005
+
+#define OV8865_CHIP_ID_HH_REG 0x300a
+#define OV8865_CHIP_ID_HH_VALUE 0x00
+#define OV8865_CHIP_ID_H_REG 0x300b
+#define OV8865_CHIP_ID_H_VALUE 0x88
+#define OV8865_CHIP_ID_L_REG 0x300c
+#define OV8865_CHIP_ID_L_VALUE 0x65
+#define OV8865_PAD_OUT2_REG 0x300d
+
+#define OV8865_PAD_SEL2_REG 0x3010
+#define OV8865_PAD_PK_REG 0x3011
+#define OV8865_PAD_PK_DRIVE_STRENGTH_1X (0 << 5)
+#define OV8865_PAD_PK_DRIVE_STRENGTH_2X (1 << 5)
+#define OV8865_PAD_PK_DRIVE_STRENGTH_3X (2 << 5)
+#define OV8865_PAD_PK_DRIVE_STRENGTH_4X (3 << 5)
+
+#define OV8865_PUMP_CLK_DIV_REG 0x3015
+#define OV8865_PUMP_CLK_DIV_PUMP_N(v) (((v) << 4) & GENMASK(6, 4))
+#define OV8865_PUMP_CLK_DIV_PUMP_P(v) ((v) & GENMASK(2, 0))
+
+#define OV8865_MIPI_SC_CTRL0_REG 0x3018
+#define OV8865_MIPI_SC_CTRL0_LANES(v) ((((v) - 1) << 5) & \
+ GENMASK(7, 5))
+#define OV8865_MIPI_SC_CTRL0_MIPI_EN BIT(4)
+#define OV8865_MIPI_SC_CTRL0_UNKNOWN BIT(1)
+#define OV8865_MIPI_SC_CTRL0_LANES_PD_MIPI BIT(0)
+#define OV8865_MIPI_SC_CTRL1_REG 0x3019
+#define OV8865_CLK_RST0_REG 0x301a
+#define OV8865_CLK_RST1_REG 0x301b
+#define OV8865_CLK_RST2_REG 0x301c
+#define OV8865_CLK_RST3_REG 0x301d
+#define OV8865_CLK_RST4_REG 0x301e
+
+#define OV8865_PCLK_SEL_REG 0x3020
+#define OV8865_PCLK_SEL_PCLK_DIV_MASK BIT(3)
+#define OV8865_PCLK_SEL_PCLK_DIV(v) ((((v) - 1) << 3) & BIT(3))
+
+#define OV8865_MISC_CTRL_REG 0x3021
+#define OV8865_MIPI_SC_CTRL2_REG 0x3022
+#define OV8865_MIPI_SC_CTRL2_CLK_LANES_PD_MIPI BIT(1)
+#define OV8865_MIPI_SC_CTRL2_PD_MIPI_RST_SYNC BIT(0)
+
+#define OV8865_MIPI_BIT_SEL_REG 0x3031
+#define OV8865_MIPI_BIT_SEL(v) (((v) << 0) & GENMASK(4, 0))
+#define OV8865_CLK_SEL0_REG 0x3032
+#define OV8865_CLK_SEL0_PLL1_SYS_SEL(v) (((v) << 7) & BIT(7))
+#define OV8865_CLK_SEL1_REG 0x3033
+#define OV8865_CLK_SEL1_MIPI_EOF BIT(5)
+#define OV8865_CLK_SEL1_UNKNOWN BIT(2)
+#define OV8865_CLK_SEL1_PLL_SCLK_SEL_MASK BIT(1)
+#define OV8865_CLK_SEL1_PLL_SCLK_SEL(v) (((v) << 1) & BIT(1))
+
+#define OV8865_SCLK_CTRL_REG 0x3106
+#define OV8865_SCLK_CTRL_SCLK_DIV(v) (((v) << 4) & GENMASK(7, 4))
+#define OV8865_SCLK_CTRL_SCLK_PRE_DIV(v) (((v) << 2) & GENMASK(3, 2))
+#define OV8865_SCLK_CTRL_UNKNOWN BIT(0)
+
+/* Exposure/gain */
+
+#define OV8865_EXPOSURE_CTRL_HH_REG 0x3500
+#define OV8865_EXPOSURE_CTRL_HH(v) (((v) & GENMASK(19, 16)) >> 16)
+#define OV8865_EXPOSURE_CTRL_H_REG 0x3501
+#define OV8865_EXPOSURE_CTRL_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_EXPOSURE_CTRL_L_REG 0x3502
+#define OV8865_EXPOSURE_CTRL_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_EXPOSURE_GAIN_MANUAL_REG 0x3503
+
+#define OV8865_GAIN_CTRL_H_REG 0x3508
+#define OV8865_GAIN_CTRL_H(v) (((v) & GENMASK(12, 8)) >> 8)
+#define OV8865_GAIN_CTRL_L_REG 0x3509
+#define OV8865_GAIN_CTRL_L(v) ((v) & GENMASK(7, 0))
+
+/* Timing */
+
+#define OV8865_CROP_START_X_H_REG 0x3800
+#define OV8865_CROP_START_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_CROP_START_X_L_REG 0x3801
+#define OV8865_CROP_START_X_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_CROP_START_Y_H_REG 0x3802
+#define OV8865_CROP_START_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_CROP_START_Y_L_REG 0x3803
+#define OV8865_CROP_START_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_CROP_END_X_H_REG 0x3804
+#define OV8865_CROP_END_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_CROP_END_X_L_REG 0x3805
+#define OV8865_CROP_END_X_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_CROP_END_Y_H_REG 0x3806
+#define OV8865_CROP_END_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_CROP_END_Y_L_REG 0x3807
+#define OV8865_CROP_END_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_OUTPUT_SIZE_X_H_REG 0x3808
+#define OV8865_OUTPUT_SIZE_X_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_OUTPUT_SIZE_X_L_REG 0x3809
+#define OV8865_OUTPUT_SIZE_X_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_OUTPUT_SIZE_Y_H_REG 0x380a
+#define OV8865_OUTPUT_SIZE_Y_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_OUTPUT_SIZE_Y_L_REG 0x380b
+#define OV8865_OUTPUT_SIZE_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_HTS_H_REG 0x380c
+#define OV8865_HTS_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_HTS_L_REG 0x380d
+#define OV8865_HTS_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_VTS_H_REG 0x380e
+#define OV8865_VTS_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_VTS_L_REG 0x380f
+#define OV8865_VTS_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_OFFSET_X_H_REG 0x3810
+#define OV8865_OFFSET_X_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_OFFSET_X_L_REG 0x3811
+#define OV8865_OFFSET_X_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_OFFSET_Y_H_REG 0x3812
+#define OV8865_OFFSET_Y_H(v) (((v) & GENMASK(14, 8)) >> 8)
+#define OV8865_OFFSET_Y_L_REG 0x3813
+#define OV8865_OFFSET_Y_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_INC_X_ODD_REG 0x3814
+#define OV8865_INC_X_ODD(v) ((v) & GENMASK(4, 0))
+#define OV8865_INC_X_EVEN_REG 0x3815
+#define OV8865_INC_X_EVEN(v) ((v) & GENMASK(4, 0))
+#define OV8865_VSYNC_START_H_REG 0x3816
+#define OV8865_VSYNC_START_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_VSYNC_START_L_REG 0x3817
+#define OV8865_VSYNC_START_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_VSYNC_END_H_REG 0x3818
+#define OV8865_VSYNC_END_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_VSYNC_END_L_REG 0x3819
+#define OV8865_VSYNC_END_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_HSYNC_FIRST_H_REG 0x381a
+#define OV8865_HSYNC_FIRST_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_HSYNC_FIRST_L_REG 0x381b
+#define OV8865_HSYNC_FIRST_L(v) ((v) & GENMASK(7, 0))
+
+#define OV8865_FORMAT1_REG 0x3820
+#define OV8865_FORMAT1_FLIP_VERT_ISP_EN BIT(2)
+#define OV8865_FORMAT1_FLIP_VERT_SENSOR_EN BIT(1)
+#define OV8865_FORMAT2_REG 0x3821
+#define OV8865_FORMAT2_HSYNC_EN BIT(6)
+#define OV8865_FORMAT2_FST_VBIN_EN BIT(5)
+#define OV8865_FORMAT2_FST_HBIN_EN BIT(4)
+#define OV8865_FORMAT2_ISP_HORZ_VAR2_EN BIT(3)
+#define OV8865_FORMAT2_FLIP_HORZ_ISP_EN BIT(2)
+#define OV8865_FORMAT2_FLIP_HORZ_SENSOR_EN BIT(1)
+#define OV8865_FORMAT2_SYNC_HBIN_EN BIT(0)
+
+#define OV8865_INC_Y_ODD_REG 0x382a
+#define OV8865_INC_Y_ODD(v) ((v) & GENMASK(4, 0))
+#define OV8865_INC_Y_EVEN_REG 0x382b
+#define OV8865_INC_Y_EVEN(v) ((v) & GENMASK(4, 0))
+
+#define OV8865_ABLC_NUM_REG 0x3830
+#define OV8865_ABLC_NUM(v) ((v) & GENMASK(4, 0))
+
+#define OV8865_ZLINE_NUM_REG 0x3836
+#define OV8865_ZLINE_NUM(v) ((v) & GENMASK(4, 0))
+
+#define OV8865_AUTO_SIZE_CTRL_REG 0x3841
+#define OV8865_AUTO_SIZE_CTRL_OFFSET_Y_REG BIT(5)
+#define OV8865_AUTO_SIZE_CTRL_OFFSET_X_REG BIT(4)
+#define OV8865_AUTO_SIZE_CTRL_CROP_END_Y_REG BIT(3)
+#define OV8865_AUTO_SIZE_CTRL_CROP_END_X_REG BIT(2)
+#define OV8865_AUTO_SIZE_CTRL_CROP_START_Y_REG BIT(1)
+#define OV8865_AUTO_SIZE_CTRL_CROP_START_X_REG BIT(0)
+#define OV8865_AUTO_SIZE_X_OFFSET_H_REG 0x3842
+#define OV8865_AUTO_SIZE_X_OFFSET_L_REG 0x3843
+#define OV8865_AUTO_SIZE_Y_OFFSET_H_REG 0x3844
+#define OV8865_AUTO_SIZE_Y_OFFSET_L_REG 0x3845
+#define OV8865_AUTO_SIZE_BOUNDARIES_REG 0x3846
+#define OV8865_AUTO_SIZE_BOUNDARIES_Y(v) (((v) << 4) & GENMASK(7, 4))
+#define OV8865_AUTO_SIZE_BOUNDARIES_X(v) ((v) & GENMASK(3, 0))
+
+/* PSRAM */
+
+#define OV8865_PSRAM_CTRL8_REG 0x3f08
+
+/* Black Level */
+
+#define OV8865_BLC_CTRL0_REG 0x4000
+#define OV8865_BLC_CTRL0_TRIG_RANGE_EN BIT(7)
+#define OV8865_BLC_CTRL0_TRIG_FORMAT_EN BIT(6)
+#define OV8865_BLC_CTRL0_TRIG_GAIN_EN BIT(5)
+#define OV8865_BLC_CTRL0_TRIG_EXPOSURE_EN BIT(4)
+#define OV8865_BLC_CTRL0_TRIG_MANUAL_EN BIT(3)
+#define OV8865_BLC_CTRL0_FREEZE_EN BIT(2)
+#define OV8865_BLC_CTRL0_ALWAYS_EN BIT(1)
+#define OV8865_BLC_CTRL0_FILTER_EN BIT(0)
+#define OV8865_BLC_CTRL1_REG 0x4001
+#define OV8865_BLC_CTRL1_DITHER_EN BIT(7)
+#define OV8865_BLC_CTRL1_ZERO_LINE_DIFF_EN BIT(6)
+#define OV8865_BLC_CTRL1_COL_SHIFT_256 (0 << 4)
+#define OV8865_BLC_CTRL1_COL_SHIFT_128 (1 << 4)
+#define OV8865_BLC_CTRL1_COL_SHIFT_64 (2 << 4)
+#define OV8865_BLC_CTRL1_COL_SHIFT_32 (3 << 4)
+#define OV8865_BLC_CTRL1_OFFSET_LIMIT_EN BIT(2)
+#define OV8865_BLC_CTRL1_COLUMN_CANCEL_EN BIT(1)
+#define OV8865_BLC_CTRL2_REG 0x4002
+#define OV8865_BLC_CTRL3_REG 0x4003
+#define OV8865_BLC_CTRL4_REG 0x4004
+#define OV8865_BLC_CTRL5_REG 0x4005
+#define OV8865_BLC_CTRL6_REG 0x4006
+#define OV8865_BLC_CTRL7_REG 0x4007
+#define OV8865_BLC_CTRL8_REG 0x4008
+#define OV8865_BLC_CTRL9_REG 0x4009
+#define OV8865_BLC_CTRLA_REG 0x400a
+#define OV8865_BLC_CTRLB_REG 0x400b
+#define OV8865_BLC_CTRLC_REG 0x400c
+#define OV8865_BLC_CTRLD_REG 0x400d
+#define OV8865_BLC_CTRLD_OFFSET_TRIGGER(v) ((v) & GENMASK(7, 0))
+
+#define OV8865_BLC_CTRL1F_REG 0x401f
+#define OV8865_BLC_CTRL1F_RB_REVERSE BIT(3)
+#define OV8865_BLC_CTRL1F_INTERPOL_X_EN BIT(2)
+#define OV8865_BLC_CTRL1F_INTERPOL_Y_EN BIT(1)
+
+#define OV8865_BLC_ANCHOR_LEFT_START_H_REG 0x4020
+#define OV8865_BLC_ANCHOR_LEFT_START_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_BLC_ANCHOR_LEFT_START_L_REG 0x4021
+#define OV8865_BLC_ANCHOR_LEFT_START_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_BLC_ANCHOR_LEFT_END_H_REG 0x4022
+#define OV8865_BLC_ANCHOR_LEFT_END_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_BLC_ANCHOR_LEFT_END_L_REG 0x4023
+#define OV8865_BLC_ANCHOR_LEFT_END_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_BLC_ANCHOR_RIGHT_START_H_REG 0x4024
+#define OV8865_BLC_ANCHOR_RIGHT_START_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_BLC_ANCHOR_RIGHT_START_L_REG 0x4025
+#define OV8865_BLC_ANCHOR_RIGHT_START_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_BLC_ANCHOR_RIGHT_END_H_REG 0x4026
+#define OV8865_BLC_ANCHOR_RIGHT_END_H(v) (((v) & GENMASK(11, 8)) >> 8)
+#define OV8865_BLC_ANCHOR_RIGHT_END_L_REG 0x4027
+#define OV8865_BLC_ANCHOR_RIGHT_END_L(v) ((v) & GENMASK(7, 0))
+
+#define OV8865_BLC_TOP_ZLINE_START_REG 0x4028
+#define OV8865_BLC_TOP_ZLINE_START(v) ((v) & GENMASK(5, 0))
+#define OV8865_BLC_TOP_ZLINE_NUM_REG 0x4029
+#define OV8865_BLC_TOP_ZLINE_NUM(v) ((v) & GENMASK(4, 0))
+#define OV8865_BLC_TOP_BLKLINE_START_REG 0x402a
+#define OV8865_BLC_TOP_BLKLINE_START(v) ((v) & GENMASK(5, 0))
+#define OV8865_BLC_TOP_BLKLINE_NUM_REG 0x402b
+#define OV8865_BLC_TOP_BLKLINE_NUM(v) ((v) & GENMASK(4, 0))
+#define OV8865_BLC_BOT_ZLINE_START_REG 0x402c
+#define OV8865_BLC_BOT_ZLINE_START(v) ((v) & GENMASK(5, 0))
+#define OV8865_BLC_BOT_ZLINE_NUM_REG 0x402d
+#define OV8865_BLC_BOT_ZLINE_NUM(v) ((v) & GENMASK(4, 0))
+#define OV8865_BLC_BOT_BLKLINE_START_REG 0x402e
+#define OV8865_BLC_BOT_BLKLINE_START(v) ((v) & GENMASK(5, 0))
+#define OV8865_BLC_BOT_BLKLINE_NUM_REG 0x402f
+#define OV8865_BLC_BOT_BLKLINE_NUM(v) ((v) & GENMASK(4, 0))
+
+#define OV8865_BLC_OFFSET_LIMIT_REG 0x4034
+#define OV8865_BLC_OFFSET_LIMIT(v) ((v) & GENMASK(7, 0))
+
+/* VFIFO */
+
+#define OV8865_VFIFO_READ_START_H_REG 0x4600
+#define OV8865_VFIFO_READ_START_H(v) (((v) & GENMASK(15, 8)) >> 8)
+#define OV8865_VFIFO_READ_START_L_REG 0x4601
+#define OV8865_VFIFO_READ_START_L(v) ((v) & GENMASK(7, 0))
+
+/* MIPI */
+
+#define OV8865_MIPI_CTRL0_REG 0x4800
+#define OV8865_MIPI_CTRL1_REG 0x4801
+#define OV8865_MIPI_CTRL2_REG 0x4802
+#define OV8865_MIPI_CTRL3_REG 0x4803
+#define OV8865_MIPI_CTRL4_REG 0x4804
+#define OV8865_MIPI_CTRL5_REG 0x4805
+#define OV8865_MIPI_CTRL6_REG 0x4806
+#define OV8865_MIPI_CTRL7_REG 0x4807
+#define OV8865_MIPI_CTRL8_REG 0x4808
+
+#define OV8865_MIPI_FCNT_MAX_H_REG 0x4810
+#define OV8865_MIPI_FCNT_MAX_L_REG 0x4811
+
+#define OV8865_MIPI_CTRL13_REG 0x4813
+#define OV8865_MIPI_CTRL14_REG 0x4814
+#define OV8865_MIPI_CTRL15_REG 0x4815
+#define OV8865_MIPI_EMBEDDED_DT_REG 0x4816
+
+#define OV8865_MIPI_HS_ZERO_MIN_H_REG 0x4818
+#define OV8865_MIPI_HS_ZERO_MIN_L_REG 0x4819
+#define OV8865_MIPI_HS_TRAIL_MIN_H_REG 0x481a
+#define OV8865_MIPI_HS_TRAIL_MIN_L_REG 0x481b
+#define OV8865_MIPI_CLK_ZERO_MIN_H_REG 0x481c
+#define OV8865_MIPI_CLK_ZERO_MIN_L_REG 0x481d
+#define OV8865_MIPI_CLK_PREPARE_MAX_REG 0x481e
+#define OV8865_MIPI_CLK_PREPARE_MIN_REG 0x481f
+#define OV8865_MIPI_CLK_POST_MIN_H_REG 0x4820
+#define OV8865_MIPI_CLK_POST_MIN_L_REG 0x4821
+#define OV8865_MIPI_CLK_TRAIL_MIN_H_REG 0x4822
+#define OV8865_MIPI_CLK_TRAIL_MIN_L_REG 0x4823
+#define OV8865_MIPI_LPX_P_MIN_H_REG 0x4824
+#define OV8865_MIPI_LPX_P_MIN_L_REG 0x4825
+#define OV8865_MIPI_HS_PREPARE_MIN_REG 0x4826
+#define OV8865_MIPI_HS_PREPARE_MAX_REG 0x4827
+#define OV8865_MIPI_HS_EXIT_MIN_H_REG 0x4828
+#define OV8865_MIPI_HS_EXIT_MIN_L_REG 0x4829
+#define OV8865_MIPI_UI_HS_ZERO_MIN_REG 0x482a
+#define OV8865_MIPI_UI_HS_TRAIL_MIN_REG 0x482b
+#define OV8865_MIPI_UI_CLK_ZERO_MIN_REG 0x482c
+#define OV8865_MIPI_UI_CLK_PREPARE_REG 0x482d
+#define OV8865_MIPI_UI_CLK_POST_MIN_REG 0x482e
+#define OV8865_MIPI_UI_CLK_TRAIL_MIN_REG 0x482f
+#define OV8865_MIPI_UI_LPX_P_MIN_REG 0x4830
+#define OV8865_MIPI_UI_HS_PREPARE_REG 0x4831
+#define OV8865_MIPI_UI_HS_EXIT_MIN_REG 0x4832
+#define OV8865_MIPI_PKT_START_SIZE_REG 0x4833
+
+#define OV8865_MIPI_PCLK_PERIOD_REG 0x4837
+#define OV8865_MIPI_LP_GPIO0_REG 0x4838
+#define OV8865_MIPI_LP_GPIO1_REG 0x4839
+
+#define OV8865_MIPI_CTRL3C_REG 0x483c
+#define OV8865_MIPI_LP_GPIO4_REG 0x483d
+
+#define OV8865_MIPI_CTRL4A_REG 0x484a
+#define OV8865_MIPI_CTRL4B_REG 0x484b
+#define OV8865_MIPI_CTRL4C_REG 0x484c
+#define OV8865_MIPI_LANE_TEST_PATTERN_REG 0x484d
+#define OV8865_MIPI_FRAME_END_DELAY_REG 0x484e
+#define OV8865_MIPI_CLOCK_TEST_PATTERN_REG 0x484f
+#define OV8865_MIPI_LANE_SEL01_REG 0x4850
+#define OV8865_MIPI_LANE_SEL01_LANE0(v) (((v) << 0) & GENMASK(2, 0))
+#define OV8865_MIPI_LANE_SEL01_LANE1(v) (((v) << 4) & GENMASK(6, 4))
+#define OV8865_MIPI_LANE_SEL23_REG 0x4851
+#define OV8865_MIPI_LANE_SEL23_LANE2(v) (((v) << 0) & GENMASK(2, 0))
+#define OV8865_MIPI_LANE_SEL23_LANE3(v) (((v) << 4) & GENMASK(6, 4))
+
+/* ISP */
+
+#define OV8865_ISP_CTRL0_REG 0x5000
+#define OV8865_ISP_CTRL0_LENC_EN BIT(7)
+#define OV8865_ISP_CTRL0_WHITE_BALANCE_EN BIT(4)
+#define OV8865_ISP_CTRL0_DPC_BLACK_EN BIT(2)
+#define OV8865_ISP_CTRL0_DPC_WHITE_EN BIT(1)
+#define OV8865_ISP_CTRL1_REG 0x5001
+#define OV8865_ISP_CTRL1_BLC_EN BIT(0)
+#define OV8865_ISP_CTRL2_REG 0x5002
+#define OV8865_ISP_CTRL2_DEBUG BIT(3)
+#define OV8865_ISP_CTRL2_VARIOPIXEL_EN BIT(2)
+#define OV8865_ISP_CTRL2_VSYNC_LATCH_EN BIT(0)
+#define OV8865_ISP_CTRL3_REG 0x5003
+
+#define OV8865_ISP_GAIN_RED_H_REG 0x5018
+#define OV8865_ISP_GAIN_RED_H(v) (((v) & GENMASK(13, 6)) >> 6)
+#define OV8865_ISP_GAIN_RED_L_REG 0x5019
+#define OV8865_ISP_GAIN_RED_L(v) ((v) & GENMASK(5, 0))
+#define OV8865_ISP_GAIN_GREEN_H_REG 0x501a
+#define OV8865_ISP_GAIN_GREEN_H(v) (((v) & GENMASK(13, 6)) >> 6)
+#define OV8865_ISP_GAIN_GREEN_L_REG 0x501b
+#define OV8865_ISP_GAIN_GREEN_L(v) ((v) & GENMASK(5, 0))
+#define OV8865_ISP_GAIN_BLUE_H_REG 0x501c
+#define OV8865_ISP_GAIN_BLUE_H(v) (((v) & GENMASK(13, 6)) >> 6)
+#define OV8865_ISP_GAIN_BLUE_L_REG 0x501d
+#define OV8865_ISP_GAIN_BLUE_L(v) ((v) & GENMASK(5, 0))
+
+/* VarioPixel */
+
+#define OV8865_VAP_CTRL0_REG 0x5900
+#define OV8865_VAP_CTRL1_REG 0x5901
+#define OV8865_VAP_CTRL1_HSUB_COEF(v) ((((v) - 1) << 2) & \
+ GENMASK(3, 2))
+#define OV8865_VAP_CTRL1_VSUB_COEF(v) (((v) - 1) & GENMASK(1, 0))
+
+/* Pre-DSP */
+
+#define OV8865_PRE_CTRL0_REG 0x5e00
+#define OV8865_PRE_CTRL0_PATTERN_EN BIT(7)
+#define OV8865_PRE_CTRL0_ROLLING_BAR_EN BIT(6)
+#define OV8865_PRE_CTRL0_TRANSPARENT_MODE BIT(5)
+#define OV8865_PRE_CTRL0_SQUARES_BW_MODE BIT(4)
+#define OV8865_PRE_CTRL0_PATTERN_COLOR_BARS 0
+#define OV8865_PRE_CTRL0_PATTERN_RANDOM_DATA 1
+#define OV8865_PRE_CTRL0_PATTERN_COLOR_SQUARES 2
+#define OV8865_PRE_CTRL0_PATTERN_BLACK 3
+
+/* Macros */
+
+#define ov8865_subdev_sensor(s) \
+ container_of(s, struct ov8865_sensor, subdev)
+
+#define ov8865_ctrl_subdev(c) \
+ (&container_of((c)->handler, struct ov8865_sensor, \
+ ctrls.handler)->subdev)
+
+/* Data structures */
+
+struct ov8865_register_value {
+ u16 address;
+ u8 value;
+ unsigned int delay_ms;
+};
+
+/*
+ * PLL1 Clock Tree:
+ *
+ * +-< EXTCLK
+ * |
+ * +-+ pll_pre_div_half (0x30a [0])
+ * |
+ * +-+ pll_pre_div (0x300 [2:0], special values:
+ * | 0: 1, 1: 1.5, 3: 2.5, 4: 3, 5: 4, 7: 8)
+ * +-+ pll_mul (0x301 [1:0], 0x302 [7:0])
+ * |
+ * +-+ m_div (0x303 [3:0])
+ * | |
+ * | +-> PHY_SCLK
+ * | |
+ * | +-+ mipi_div (0x304 [1:0], special values: 0: 4, 1: 5, 2: 6, 3: 8)
+ * | |
+ * | +-+ pclk_div (0x3020 [3])
+ * | |
+ * | +-> PCLK
+ * |
+ * +-+ sys_pre_div (0x305 [1:0], special values: 0: 3, 1: 4, 2: 5, 3: 6)
+ * |
+ * +-+ sys_div (0x306 [0])
+ * |
+ * +-+ sys_sel (0x3032 [7], 0: PLL1, 1: PLL2)
+ * |
+ * +-+ sclk_sel (0x3033 [1], 0: sys_sel, 1: PLL2 DAC_CLK)
+ * |
+ * +-+ sclk_pre_div (0x3106 [3:2], special values:
+ * | 0: 1, 1: 2, 2: 4, 3: 1)
+ * |
+ * +-+ sclk_div (0x3106 [7:4], special values: 0: 1)
+ * |
+ * +-> SCLK
+ */
+
+struct ov8865_pll1_config {
+ unsigned int pll_pre_div_half;
+ unsigned int pll_pre_div;
+ unsigned int pll_mul;
+ unsigned int m_div;
+ unsigned int mipi_div;
+ unsigned int pclk_div;
+ unsigned int sys_pre_div;
+ unsigned int sys_div;
+};
+
+/*
+ * PLL2 Clock Tree:
+ *
+ * +-< EXTCLK
+ * |
+ * +-+ pll_pre_div_half (0x312 [4])
+ * |
+ * +-+ pll_pre_div (0x30b [2:0], special values:
+ * | 0: 1, 1: 1.5, 3: 2.5, 4: 3, 5: 4, 7: 8)
+ * +-+ pll_mul (0x30c [1:0], 0x30d [7:0])
+ * |
+ * +-+ dac_div (0x312 [3:0])
+ * | |
+ * | +-> DAC_CLK
+ * |
+ * +-+ sys_pre_div (0x30f [3:0])
+ * |
+ * +-+ sys_div (0x30e [2:0], special values:
+ * | 0: 1, 1: 1.5, 3: 2.5, 4: 3, 5: 3.5, 6: 4, 7:5)
+ * |
+ * +-+ sys_sel (0x3032 [7], 0: PLL1, 1: PLL2)
+ * |
+ * +-+ sclk_sel (0x3033 [1], 0: sys_sel, 1: PLL2 DAC_CLK)
+ * |
+ * +-+ sclk_pre_div (0x3106 [3:2], special values:
+ * | 0: 1, 1: 2, 2: 4, 3: 1)
+ * |
+ * +-+ sclk_div (0x3106 [7:4], special values: 0: 1)
+ * |
+ * +-> SCLK
+ */
+
+struct ov8865_pll2_config {
+ unsigned int pll_pre_div_half;
+ unsigned int pll_pre_div;
+ unsigned int pll_mul;
+ unsigned int dac_div;
+ unsigned int sys_pre_div;
+ unsigned int sys_div;
+};
+
+struct ov8865_sclk_config {
+ unsigned int sys_sel;
+ unsigned int sclk_sel;
+ unsigned int sclk_pre_div;
+ unsigned int sclk_div;
+};
+
+/*
+ * General formulas for (array-centered) mode calculation:
+ * - photo_array_width = 3296
+ * - crop_start_x = (photo_array_width - output_size_x) / 2
+ * - crop_end_x = crop_start_x + offset_x + output_size_x - 1
+ *
+ * - photo_array_height = 2480
+ * - crop_start_y = (photo_array_height - output_size_y) / 2
+ * - crop_end_y = crop_start_y + offset_y + output_size_y - 1
+ */
+
+struct ov8865_mode {
+ unsigned int crop_start_x;
+ unsigned int offset_x;
+ unsigned int output_size_x;
+ unsigned int crop_end_x;
+ unsigned int hts;
+
+ unsigned int crop_start_y;
+ unsigned int offset_y;
+ unsigned int output_size_y;
+ unsigned int crop_end_y;
+ unsigned int vts;
+
+ /* With auto size, only output and total sizes need to be set. */
+ bool size_auto;
+ unsigned int size_auto_boundary_x;
+ unsigned int size_auto_boundary_y;
+
+ bool binning_x;
+ bool binning_y;
+ bool variopixel;
+ unsigned int variopixel_hsub_coef;
+ unsigned int variopixel_vsub_coef;
+
+ /* Bits for the format register, used for binning. */
+ bool sync_hbin;
+ bool horz_var2;
+
+ unsigned int inc_x_odd;
+ unsigned int inc_x_even;
+ unsigned int inc_y_odd;
+ unsigned int inc_y_even;
+
+ unsigned int vfifo_read_start;
+
+ unsigned int ablc_num;
+ unsigned int zline_num;
+
+ unsigned int blc_top_zero_line_start;
+ unsigned int blc_top_zero_line_num;
+ unsigned int blc_top_black_line_start;
+ unsigned int blc_top_black_line_num;
+
+ unsigned int blc_bottom_zero_line_start;
+ unsigned int blc_bottom_zero_line_num;
+ unsigned int blc_bottom_black_line_start;
+ unsigned int blc_bottom_black_line_num;
+
+ u8 blc_col_shift_mask;
+
+ unsigned int blc_anchor_left_start;
+ unsigned int blc_anchor_left_end;
+ unsigned int blc_anchor_right_start;
+ unsigned int blc_anchor_right_end;
+
+ struct v4l2_fract frame_interval;
+
+ const struct ov8865_pll1_config *pll1_config;
+ const struct ov8865_pll2_config *pll2_config;
+ const struct ov8865_sclk_config *sclk_config;
+
+ const struct ov8865_register_value *register_values;
+ unsigned int register_values_count;
+};
+
+struct ov8865_state {
+ const struct ov8865_mode *mode;
+ u32 mbus_code;
+
+ bool streaming;
+};
+
+struct ov8865_ctrls {
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct v4l2_ctrl_handler handler;
+};
+
+struct ov8865_sensor {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerdown;
+ struct regulator *avdd;
+ struct regulator *dvdd;
+ struct regulator *dovdd;
+ struct clk *extclk;
+
+ struct v4l2_fwnode_endpoint endpoint;
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+
+ struct mutex mutex;
+
+ struct ov8865_state state;
+ struct ov8865_ctrls ctrls;
+};
+
+/* Static definitions */
+
+/*
+ * EXTCLK = 24 MHz
+ * PHY_SCLK = 720 MHz
+ * MIPI_PCLK = 90 MHz
+ */
+static const struct ov8865_pll1_config ov8865_pll1_config_native = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 0,
+ .pll_mul = 30,
+ .m_div = 1,
+ .mipi_div = 3,
+ .pclk_div = 1,
+ .sys_pre_div = 1,
+ .sys_div = 2,
+};
+
+/*
+ * EXTCLK = 24 MHz
+ * DAC_CLK = 360 MHz
+ * SCLK = 144 MHz
+ */
+
+static const struct ov8865_pll2_config ov8865_pll2_config_native = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 0,
+ .pll_mul = 30,
+ .dac_div = 2,
+ .sys_pre_div = 5,
+ .sys_div = 0,
+};
+
+/*
+ * EXTCLK = 24 MHz
+ * DAC_CLK = 360 MHz
+ * SCLK = 80 MHz
+ */
+
+static const struct ov8865_pll2_config ov8865_pll2_config_binning = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 0,
+ .pll_mul = 30,
+ .dac_div = 2,
+ .sys_pre_div = 10,
+ .sys_div = 0,
+};
+
+static const struct ov8865_sclk_config ov8865_sclk_config_native = {
+ .sys_sel = 1,
+ .sclk_sel = 0,
+ .sclk_pre_div = 0,
+ .sclk_div = 0,
+};
+
+static const struct ov8865_register_value ov8865_register_values_native[] = {
+ /* Sensor */
+
+ { 0x3700, 0x48 },
+ { 0x3701, 0x18 },
+ { 0x3702, 0x50 },
+ { 0x3703, 0x32 },
+ { 0x3704, 0x28 },
+ { 0x3706, 0x70 },
+ { 0x3707, 0x08 },
+ { 0x3708, 0x48 },
+ { 0x3709, 0x80 },
+ { 0x370a, 0x01 },
+ { 0x370b, 0x70 },
+ { 0x370c, 0x07 },
+ { 0x3718, 0x14 },
+ { 0x3712, 0x44 },
+ { 0x371e, 0x31 },
+ { 0x371f, 0x7f },
+ { 0x3720, 0x0a },
+ { 0x3721, 0x0a },
+ { 0x3724, 0x04 },
+ { 0x3725, 0x04 },
+ { 0x3726, 0x0c },
+ { 0x3728, 0x0a },
+ { 0x3729, 0x03 },
+ { 0x372a, 0x06 },
+ { 0x372b, 0xa6 },
+ { 0x372c, 0xa6 },
+ { 0x372d, 0xa6 },
+ { 0x372e, 0x0c },
+ { 0x372f, 0x20 },
+ { 0x3730, 0x02 },
+ { 0x3731, 0x0c },
+ { 0x3732, 0x28 },
+ { 0x3736, 0x30 },
+ { 0x373a, 0x04 },
+ { 0x373b, 0x18 },
+ { 0x373c, 0x14 },
+ { 0x373e, 0x06 },
+ { 0x375a, 0x0c },
+ { 0x375b, 0x26 },
+ { 0x375d, 0x04 },
+ { 0x375f, 0x28 },
+ { 0x3767, 0x1e },
+ { 0x3772, 0x46 },
+ { 0x3773, 0x04 },
+ { 0x3774, 0x2c },
+ { 0x3775, 0x13 },
+ { 0x3776, 0x10 },
+ { 0x37a0, 0x88 },
+ { 0x37a1, 0x7a },
+ { 0x37a2, 0x7a },
+ { 0x37a3, 0x02 },
+ { 0x37a5, 0x09 },
+ { 0x37a7, 0x88 },
+ { 0x37a8, 0xb0 },
+ { 0x37a9, 0xb0 },
+ { 0x37aa, 0x88 },
+ { 0x37ab, 0x5c },
+ { 0x37ac, 0x5c },
+ { 0x37ad, 0x55 },
+ { 0x37ae, 0x19 },
+ { 0x37af, 0x19 },
+ { 0x37b3, 0x84 },
+ { 0x37b4, 0x84 },
+ { 0x37b5, 0x66 },
+
+ /* PSRAM */
+
+ { OV8865_PSRAM_CTRL8_REG, 0x16 },
+
+ /* ADC Sync */
+
+ { 0x4500, 0x68 },
+};
+
+static const struct ov8865_register_value ov8865_register_values_binning[] = {
+ /* Sensor */
+
+ { 0x3700, 0x24 },
+ { 0x3701, 0x0c },
+ { 0x3702, 0x28 },
+ { 0x3703, 0x19 },
+ { 0x3704, 0x14 },
+ { 0x3706, 0x38 },
+ { 0x3707, 0x04 },
+ { 0x3708, 0x24 },
+ { 0x3709, 0x40 },
+ { 0x370a, 0x00 },
+ { 0x370b, 0xb8 },
+ { 0x370c, 0x04 },
+ { 0x3718, 0x12 },
+ { 0x3712, 0x42 },
+ { 0x371e, 0x19 },
+ { 0x371f, 0x40 },
+ { 0x3720, 0x05 },
+ { 0x3721, 0x05 },
+ { 0x3724, 0x02 },
+ { 0x3725, 0x02 },
+ { 0x3726, 0x06 },
+ { 0x3728, 0x05 },
+ { 0x3729, 0x02 },
+ { 0x372a, 0x03 },
+ { 0x372b, 0x53 },
+ { 0x372c, 0xa3 },
+ { 0x372d, 0x53 },
+ { 0x372e, 0x06 },
+ { 0x372f, 0x10 },
+ { 0x3730, 0x01 },
+ { 0x3731, 0x06 },
+ { 0x3732, 0x14 },
+ { 0x3736, 0x20 },
+ { 0x373a, 0x02 },
+ { 0x373b, 0x0c },
+ { 0x373c, 0x0a },
+ { 0x373e, 0x03 },
+ { 0x375a, 0x06 },
+ { 0x375b, 0x13 },
+ { 0x375d, 0x02 },
+ { 0x375f, 0x14 },
+ { 0x3767, 0x1c },
+ { 0x3772, 0x23 },
+ { 0x3773, 0x02 },
+ { 0x3774, 0x16 },
+ { 0x3775, 0x12 },
+ { 0x3776, 0x08 },
+ { 0x37a0, 0x44 },
+ { 0x37a1, 0x3d },
+ { 0x37a2, 0x3d },
+ { 0x37a3, 0x01 },
+ { 0x37a5, 0x08 },
+ { 0x37a7, 0x44 },
+ { 0x37a8, 0x58 },
+ { 0x37a9, 0x58 },
+ { 0x37aa, 0x44 },
+ { 0x37ab, 0x2e },
+ { 0x37ac, 0x2e },
+ { 0x37ad, 0x33 },
+ { 0x37ae, 0x0d },
+ { 0x37af, 0x0d },
+ { 0x37b3, 0x42 },
+ { 0x37b4, 0x42 },
+ { 0x37b5, 0x33 },
+
+ /* PSRAM */
+
+ { OV8865_PSRAM_CTRL8_REG, 0x0b },
+
+ /* ADC Sync */
+
+ { 0x4500, 0x40 },
+};
+
+static const struct ov8865_mode ov8865_modes[] = {
+ /* 3264x2448 */
+ {
+ /* Horizontal */
+ .output_size_x = 3264,
+ .hts = 1944,
+
+ /* Vertical */
+ .output_size_y = 2448,
+ .vts = 2470,
+
+ .size_auto = true,
+ .size_auto_boundary_x = 8,
+ .size_auto_boundary_y = 4,
+
+ /* Subsample increase */
+ .inc_x_odd = 1,
+ .inc_x_even = 1,
+ .inc_y_odd = 1,
+ .inc_y_even = 1,
+
+ /* VFIFO */
+ .vfifo_read_start = 16,
+
+ .ablc_num = 4,
+ .zline_num = 1,
+
+ /* Black Level */
+
+ .blc_top_zero_line_start = 0,
+ .blc_top_zero_line_num = 2,
+ .blc_top_black_line_start = 4,
+ .blc_top_black_line_num = 4,
+
+ .blc_bottom_zero_line_start = 2,
+ .blc_bottom_zero_line_num = 2,
+ .blc_bottom_black_line_start = 8,
+ .blc_bottom_black_line_num = 2,
+
+ .blc_anchor_left_start = 576,
+ .blc_anchor_left_end = 831,
+ .blc_anchor_right_start = 1984,
+ .blc_anchor_right_end = 2239,
+
+ /* Frame Interval */
+ .frame_interval = { 1, 30 },
+
+ /* PLL */
+ .pll1_config = &ov8865_pll1_config_native,
+ .pll2_config = &ov8865_pll2_config_native,
+ .sclk_config = &ov8865_sclk_config_native,
+
+ /* Registers */
+ .register_values = ov8865_register_values_native,
+ .register_values_count =
+ ARRAY_SIZE(ov8865_register_values_native),
+ },
+ /* 3264x1836 */
+ {
+ /* Horizontal */
+ .output_size_x = 3264,
+ .hts = 2582,
+
+ /* Vertical */
+ .output_size_y = 1836,
+ .vts = 2002,
+
+ .size_auto = true,
+ .size_auto_boundary_x = 8,
+ .size_auto_boundary_y = 4,
+
+ /* Subsample increase */
+ .inc_x_odd = 1,
+ .inc_x_even = 1,
+ .inc_y_odd = 1,
+ .inc_y_even = 1,
+
+ /* VFIFO */
+ .vfifo_read_start = 16,
+
+ .ablc_num = 4,
+ .zline_num = 1,
+
+ /* Black Level */
+
+ .blc_top_zero_line_start = 0,
+ .blc_top_zero_line_num = 2,
+ .blc_top_black_line_start = 4,
+ .blc_top_black_line_num = 4,
+
+ .blc_bottom_zero_line_start = 2,
+ .blc_bottom_zero_line_num = 2,
+ .blc_bottom_black_line_start = 8,
+ .blc_bottom_black_line_num = 2,
+
+ .blc_anchor_left_start = 576,
+ .blc_anchor_left_end = 831,
+ .blc_anchor_right_start = 1984,
+ .blc_anchor_right_end = 2239,
+
+ /* Frame Interval */
+ .frame_interval = { 1, 30 },
+
+ /* PLL */
+ .pll1_config = &ov8865_pll1_config_native,
+ .pll2_config = &ov8865_pll2_config_native,
+ .sclk_config = &ov8865_sclk_config_native,
+
+ /* Registers */
+ .register_values = ov8865_register_values_native,
+ .register_values_count =
+ ARRAY_SIZE(ov8865_register_values_native),
+ },
+ /* 1632x1224 */
+ {
+ /* Horizontal */
+ .output_size_x = 1632,
+ .hts = 1923,
+
+ /* Vertical */
+ .output_size_y = 1224,
+ .vts = 1248,
+
+ .size_auto = true,
+ .size_auto_boundary_x = 8,
+ .size_auto_boundary_y = 8,
+
+ /* Subsample increase */
+ .inc_x_odd = 3,
+ .inc_x_even = 1,
+ .inc_y_odd = 3,
+ .inc_y_even = 1,
+
+ /* Binning */
+ .binning_y = true,
+ .sync_hbin = true,
+
+ /* VFIFO */
+ .vfifo_read_start = 116,
+
+ .ablc_num = 8,
+ .zline_num = 2,
+
+ /* Black Level */
+
+ .blc_top_zero_line_start = 0,
+ .blc_top_zero_line_num = 2,
+ .blc_top_black_line_start = 4,
+ .blc_top_black_line_num = 4,
+
+ .blc_bottom_zero_line_start = 2,
+ .blc_bottom_zero_line_num = 2,
+ .blc_bottom_black_line_start = 8,
+ .blc_bottom_black_line_num = 2,
+
+ .blc_anchor_left_start = 288,
+ .blc_anchor_left_end = 415,
+ .blc_anchor_right_start = 992,
+ .blc_anchor_right_end = 1119,
+
+ /* Frame Interval */
+ .frame_interval = { 1, 30 },
+
+ /* PLL */
+ .pll1_config = &ov8865_pll1_config_native,
+ .pll2_config = &ov8865_pll2_config_binning,
+ .sclk_config = &ov8865_sclk_config_native,
+
+ /* Registers */
+ .register_values = ov8865_register_values_binning,
+ .register_values_count =
+ ARRAY_SIZE(ov8865_register_values_binning),
+ },
+ /* 800x600 (SVGA) */
+ {
+ /* Horizontal */
+ .output_size_x = 800,
+ .hts = 1250,
+
+ /* Vertical */
+ .output_size_y = 600,
+ .vts = 640,
+
+ .size_auto = true,
+ .size_auto_boundary_x = 8,
+ .size_auto_boundary_y = 8,
+
+ /* Subsample increase */
+ .inc_x_odd = 3,
+ .inc_x_even = 1,
+ .inc_y_odd = 5,
+ .inc_y_even = 3,
+
+ /* Binning */
+ .binning_y = true,
+ .variopixel = true,
+ .variopixel_hsub_coef = 2,
+ .variopixel_vsub_coef = 1,
+ .sync_hbin = true,
+ .horz_var2 = true,
+
+ /* VFIFO */
+ .vfifo_read_start = 80,
+
+ .ablc_num = 8,
+ .zline_num = 2,
+
+ /* Black Level */
+
+ .blc_top_zero_line_start = 0,
+ .blc_top_zero_line_num = 2,
+ .blc_top_black_line_start = 2,
+ .blc_top_black_line_num = 2,
+
+ .blc_bottom_zero_line_start = 0,
+ .blc_bottom_zero_line_num = 0,
+ .blc_bottom_black_line_start = 4,
+ .blc_bottom_black_line_num = 2,
+
+ .blc_col_shift_mask = OV8865_BLC_CTRL1_COL_SHIFT_128,
+
+ .blc_anchor_left_start = 288,
+ .blc_anchor_left_end = 415,
+ .blc_anchor_right_start = 992,
+ .blc_anchor_right_end = 1119,
+
+ /* Frame Interval */
+ .frame_interval = { 1, 90 },
+
+ /* PLL */
+ .pll1_config = &ov8865_pll1_config_native,
+ .pll2_config = &ov8865_pll2_config_binning,
+ .sclk_config = &ov8865_sclk_config_native,
+
+ /* Registers */
+ .register_values = ov8865_register_values_binning,
+ .register_values_count =
+ ARRAY_SIZE(ov8865_register_values_binning),
+ },
+};
+
+static const u32 ov8865_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+};
+
+static const struct ov8865_register_value ov8865_init_sequence[] = {
+ /* Analog */
+
+ { 0x3604, 0x04 },
+ { 0x3602, 0x30 },
+ { 0x3605, 0x00 },
+ { 0x3607, 0x20 },
+ { 0x3608, 0x11 },
+ { 0x3609, 0x68 },
+ { 0x360a, 0x40 },
+ { 0x360c, 0xdd },
+ { 0x360e, 0x0c },
+ { 0x3610, 0x07 },
+ { 0x3612, 0x86 },
+ { 0x3613, 0x58 },
+ { 0x3614, 0x28 },
+ { 0x3617, 0x40 },
+ { 0x3618, 0x5a },
+ { 0x3619, 0x9b },
+ { 0x361c, 0x00 },
+ { 0x361d, 0x60 },
+ { 0x3631, 0x60 },
+ { 0x3633, 0x10 },
+ { 0x3634, 0x10 },
+ { 0x3635, 0x10 },
+ { 0x3636, 0x10 },
+ { 0x3638, 0xff },
+ { 0x3641, 0x55 },
+ { 0x3646, 0x86 },
+ { 0x3647, 0x27 },
+ { 0x364a, 0x1b },
+
+ /* Sensor */
+
+ { 0x3700, 0x24 },
+ { 0x3701, 0x0c },
+ { 0x3702, 0x28 },
+ { 0x3703, 0x19 },
+ { 0x3704, 0x14 },
+ { 0x3705, 0x00 },
+ { 0x3706, 0x38 },
+ { 0x3707, 0x04 },
+ { 0x3708, 0x24 },
+ { 0x3709, 0x40 },
+ { 0x370a, 0x00 },
+ { 0x370b, 0xb8 },
+ { 0x370c, 0x04 },
+ { 0x3718, 0x12 },
+ { 0x3719, 0x31 },
+ { 0x3712, 0x42 },
+ { 0x3714, 0x12 },
+ { 0x371e, 0x19 },
+ { 0x371f, 0x40 },
+ { 0x3720, 0x05 },
+ { 0x3721, 0x05 },
+ { 0x3724, 0x02 },
+ { 0x3725, 0x02 },
+ { 0x3726, 0x06 },
+ { 0x3728, 0x05 },
+ { 0x3729, 0x02 },
+ { 0x372a, 0x03 },
+ { 0x372b, 0x53 },
+ { 0x372c, 0xa3 },
+ { 0x372d, 0x53 },
+ { 0x372e, 0x06 },
+ { 0x372f, 0x10 },
+ { 0x3730, 0x01 },
+ { 0x3731, 0x06 },
+ { 0x3732, 0x14 },
+ { 0x3733, 0x10 },
+ { 0x3734, 0x40 },
+ { 0x3736, 0x20 },
+ { 0x373a, 0x02 },
+ { 0x373b, 0x0c },
+ { 0x373c, 0x0a },
+ { 0x373e, 0x03 },
+ { 0x3755, 0x40 },
+ { 0x3758, 0x00 },
+ { 0x3759, 0x4c },
+ { 0x375a, 0x06 },
+ { 0x375b, 0x13 },
+ { 0x375c, 0x40 },
+ { 0x375d, 0x02 },
+ { 0x375e, 0x00 },
+ { 0x375f, 0x14 },
+ { 0x3767, 0x1c },
+ { 0x3768, 0x04 },
+ { 0x3769, 0x20 },
+ { 0x376c, 0xc0 },
+ { 0x376d, 0xc0 },
+ { 0x376a, 0x08 },
+ { 0x3761, 0x00 },
+ { 0x3762, 0x00 },
+ { 0x3763, 0x00 },
+ { 0x3766, 0xff },
+ { 0x376b, 0x42 },
+ { 0x3772, 0x23 },
+ { 0x3773, 0x02 },
+ { 0x3774, 0x16 },
+ { 0x3775, 0x12 },
+ { 0x3776, 0x08 },
+ { 0x37a0, 0x44 },
+ { 0x37a1, 0x3d },
+ { 0x37a2, 0x3d },
+ { 0x37a3, 0x01 },
+ { 0x37a4, 0x00 },
+ { 0x37a5, 0x08 },
+ { 0x37a6, 0x00 },
+ { 0x37a7, 0x44 },
+ { 0x37a8, 0x58 },
+ { 0x37a9, 0x58 },
+ { 0x3760, 0x00 },
+ { 0x376f, 0x01 },
+ { 0x37aa, 0x44 },
+ { 0x37ab, 0x2e },
+ { 0x37ac, 0x2e },
+ { 0x37ad, 0x33 },
+ { 0x37ae, 0x0d },
+ { 0x37af, 0x0d },
+ { 0x37b0, 0x00 },
+ { 0x37b1, 0x00 },
+ { 0x37b2, 0x00 },
+ { 0x37b3, 0x42 },
+ { 0x37b4, 0x42 },
+ { 0x37b5, 0x33 },
+ { 0x37b6, 0x00 },
+ { 0x37b7, 0x00 },
+ { 0x37b8, 0x00 },
+ { 0x37b9, 0xff },
+
+ /* ADC Sync */
+
+ { 0x4503, 0x10 },
+};
+
+static const s64 ov8865_link_freq_menu[] = {
+ 360000000,
+};
+
+static const char *const ov8865_test_pattern_menu[] = {
+ "Disabled",
+ "Random data",
+ "Color bars",
+ "Color bars with rolling bar",
+ "Color squares",
+ "Color squares with rolling bar"
+};
+
+static const u8 ov8865_test_pattern_bits[] = {
+ 0,
+ OV8865_PRE_CTRL0_PATTERN_EN | OV8865_PRE_CTRL0_PATTERN_RANDOM_DATA,
+ OV8865_PRE_CTRL0_PATTERN_EN | OV8865_PRE_CTRL0_PATTERN_COLOR_BARS,
+ OV8865_PRE_CTRL0_PATTERN_EN | OV8865_PRE_CTRL0_ROLLING_BAR_EN |
+ OV8865_PRE_CTRL0_PATTERN_COLOR_BARS,
+ OV8865_PRE_CTRL0_PATTERN_EN | OV8865_PRE_CTRL0_PATTERN_COLOR_SQUARES,
+ OV8865_PRE_CTRL0_PATTERN_EN | OV8865_PRE_CTRL0_ROLLING_BAR_EN |
+ OV8865_PRE_CTRL0_PATTERN_COLOR_SQUARES,
+};
+
+/* Input/Output */
+
+static int ov8865_read(struct ov8865_sensor *sensor, u16 address, u8 *value)
+{
+ unsigned char data[2] = { address >> 8, address & 0xff };
+ struct i2c_client *client = sensor->i2c_client;
+ int ret;
+
+ ret = i2c_master_send(client, data, sizeof(data));
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c send error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, value, 1);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c recv error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov8865_write(struct ov8865_sensor *sensor, u16 address, u8 value)
+{
+ unsigned char data[3] = { address >> 8, address & 0xff, value };
+ struct i2c_client *client = sensor->i2c_client;
+ int ret;
+
+ ret = i2c_master_send(client, data, sizeof(data));
+ if (ret < 0) {
+ dev_dbg(&client->dev, "i2c send error at address %#04x\n",
+ address);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov8865_write_sequence(struct ov8865_sensor *sensor,
+ const struct ov8865_register_value *sequence,
+ unsigned int sequence_count)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < sequence_count; i++) {
+ ret = ov8865_write(sensor, sequence[i].address,
+ sequence[i].value);
+ if (ret)
+ break;
+
+ if (sequence[i].delay_ms)
+ msleep(sequence[i].delay_ms);
+ }
+
+ return ret;
+}
+
+static int ov8865_update_bits(struct ov8865_sensor *sensor, u16 address,
+ u8 mask, u8 bits)
+{
+ u8 value = 0;
+ int ret;
+
+ ret = ov8865_read(sensor, address, &value);
+ if (ret)
+ return ret;
+
+ value &= ~mask;
+ value |= bits;
+
+ return ov8865_write(sensor, address, value);
+}
+
+/* Sensor */
+
+static int ov8865_sw_reset(struct ov8865_sensor *sensor)
+{
+ return ov8865_write(sensor, OV8865_SW_RESET_REG, OV8865_SW_RESET_RESET);
+}
+
+static int ov8865_sw_standby(struct ov8865_sensor *sensor, int standby)
+{
+ u8 value = 0;
+
+ if (!standby)
+ value = OV8865_SW_STANDBY_STREAM_ON;
+
+ return ov8865_write(sensor, OV8865_SW_STANDBY_REG, value);
+}
+
+static int ov8865_chip_id_check(struct ov8865_sensor *sensor)
+{
+ u16 regs[] = { OV8865_CHIP_ID_HH_REG, OV8865_CHIP_ID_H_REG,
+ OV8865_CHIP_ID_L_REG };
+ u8 values[] = { OV8865_CHIP_ID_HH_VALUE, OV8865_CHIP_ID_H_VALUE,
+ OV8865_CHIP_ID_L_VALUE };
+ unsigned int i;
+ u8 value;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = ov8865_read(sensor, regs[i], &value);
+ if (ret < 0)
+ return ret;
+
+ if (value != values[i]) {
+ dev_err(sensor->dev,
+ "chip id value mismatch: %#x instead of %#x\n",
+ value, values[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ov8865_charge_pump_configure(struct ov8865_sensor *sensor)
+{
+ return ov8865_write(sensor, OV8865_PUMP_CLK_DIV_REG,
+ OV8865_PUMP_CLK_DIV_PUMP_P(1));
+}
+
+static int ov8865_mipi_configure(struct ov8865_sensor *sensor)
+{
+ struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ &sensor->endpoint.bus.mipi_csi2;
+ unsigned int lanes_count = bus_mipi_csi2->num_data_lanes;
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_MIPI_SC_CTRL0_REG,
+ OV8865_MIPI_SC_CTRL0_LANES(lanes_count) |
+ OV8865_MIPI_SC_CTRL0_MIPI_EN |
+ OV8865_MIPI_SC_CTRL0_UNKNOWN);
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_MIPI_SC_CTRL2_REG,
+ OV8865_MIPI_SC_CTRL2_PD_MIPI_RST_SYNC);
+ if (ret)
+ return ret;
+
+ if (lanes_count >= 2) {
+ ret = ov8865_write(sensor, OV8865_MIPI_LANE_SEL01_REG,
+ OV8865_MIPI_LANE_SEL01_LANE0(0) |
+ OV8865_MIPI_LANE_SEL01_LANE1(1));
+ if (ret)
+ return ret;
+ }
+
+ if (lanes_count >= 4) {
+ ret = ov8865_write(sensor, OV8865_MIPI_LANE_SEL23_REG,
+ OV8865_MIPI_LANE_SEL23_LANE2(2) |
+ OV8865_MIPI_LANE_SEL23_LANE3(3));
+ if (ret)
+ return ret;
+ }
+
+ ret = ov8865_update_bits(sensor, OV8865_CLK_SEL1_REG,
+ OV8865_CLK_SEL1_MIPI_EOF,
+ OV8865_CLK_SEL1_MIPI_EOF);
+ if (ret)
+ return ret;
+
+ /*
+ * This value might need to change depending on PCLK rate,
+ * but it's unclear how. This value seems to generally work
+ * while the default value was found to cause transmission errors.
+ */
+ return ov8865_write(sensor, OV8865_MIPI_PCLK_PERIOD_REG, 0x16);
+}
+
+static int ov8865_black_level_configure(struct ov8865_sensor *sensor)
+{
+ int ret;
+
+ /* Trigger BLC on relevant events and enable filter. */
+ ret = ov8865_write(sensor, OV8865_BLC_CTRL0_REG,
+ OV8865_BLC_CTRL0_TRIG_RANGE_EN |
+ OV8865_BLC_CTRL0_TRIG_FORMAT_EN |
+ OV8865_BLC_CTRL0_TRIG_GAIN_EN |
+ OV8865_BLC_CTRL0_TRIG_EXPOSURE_EN |
+ OV8865_BLC_CTRL0_FILTER_EN);
+ if (ret)
+ return ret;
+
+ /* Lower BLC offset trigger threshold. */
+ ret = ov8865_write(sensor, OV8865_BLC_CTRLD_REG,
+ OV8865_BLC_CTRLD_OFFSET_TRIGGER(16));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_CTRL1F_REG, 0);
+ if (ret)
+ return ret;
+
+ /* Increase BLC offset maximum limit. */
+ return ov8865_write(sensor, OV8865_BLC_OFFSET_LIMIT_REG,
+ OV8865_BLC_OFFSET_LIMIT(63));
+}
+
+static int ov8865_isp_configure(struct ov8865_sensor *sensor)
+{
+ int ret;
+
+ /* Disable lens correction. */
+ ret = ov8865_write(sensor, OV8865_ISP_CTRL0_REG,
+ OV8865_ISP_CTRL0_WHITE_BALANCE_EN |
+ OV8865_ISP_CTRL0_DPC_BLACK_EN |
+ OV8865_ISP_CTRL0_DPC_WHITE_EN);
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_ISP_CTRL1_REG,
+ OV8865_ISP_CTRL1_BLC_EN);
+}
+
+static unsigned long ov8865_mode_pll1_rate(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ const struct ov8865_pll1_config *config = mode->pll1_config;
+ unsigned long extclk_rate;
+ unsigned long pll1_rate;
+
+ extclk_rate = clk_get_rate(sensor->extclk);
+ pll1_rate = extclk_rate * config->pll_mul / config->pll_pre_div_half;
+
+ switch (config->pll_pre_div) {
+ case 0:
+ break;
+ case 1:
+ pll1_rate *= 3;
+ pll1_rate /= 2;
+ break;
+ case 3:
+ pll1_rate *= 5;
+ pll1_rate /= 2;
+ break;
+ case 4:
+ pll1_rate /= 3;
+ break;
+ case 5:
+ pll1_rate /= 4;
+ break;
+ case 7:
+ pll1_rate /= 8;
+ break;
+ default:
+ pll1_rate /= config->pll_pre_div;
+ break;
+ }
+
+ return pll1_rate;
+}
+
+static int ov8865_mode_pll1_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode,
+ u32 mbus_code)
+{
+ const struct ov8865_pll1_config *config = mode->pll1_config;
+ u8 value;
+ int ret;
+
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ value = OV8865_MIPI_BIT_SEL(10);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ov8865_write(sensor, OV8865_MIPI_BIT_SEL_REG, value);
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRLA_REG,
+ OV8865_PLL_CTRLA_PRE_DIV_HALF(config->pll_pre_div_half));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL0_REG,
+ OV8865_PLL_CTRL0_PRE_DIV(config->pll_pre_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL1_REG,
+ OV8865_PLL_CTRL1_MUL_H(config->pll_mul));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL2_REG,
+ OV8865_PLL_CTRL2_MUL_L(config->pll_mul));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL3_REG,
+ OV8865_PLL_CTRL3_M_DIV(config->m_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL4_REG,
+ OV8865_PLL_CTRL4_MIPI_DIV(config->mipi_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_update_bits(sensor, OV8865_PCLK_SEL_REG,
+ OV8865_PCLK_SEL_PCLK_DIV_MASK,
+ OV8865_PCLK_SEL_PCLK_DIV(config->pclk_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL5_REG,
+ OV8865_PLL_CTRL5_SYS_PRE_DIV(config->sys_pre_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL6_REG,
+ OV8865_PLL_CTRL6_SYS_DIV(config->sys_div));
+ if (ret)
+ return ret;
+
+ return ov8865_update_bits(sensor, OV8865_PLL_CTRL1E_REG,
+ OV8865_PLL_CTRL1E_PLL1_NO_LAT,
+ OV8865_PLL_CTRL1E_PLL1_NO_LAT);
+}
+
+static int ov8865_mode_pll2_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ const struct ov8865_pll2_config *config = mode->pll2_config;
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRL12_REG,
+ OV8865_PLL_CTRL12_PRE_DIV_HALF(config->pll_pre_div_half) |
+ OV8865_PLL_CTRL12_DAC_DIV(config->dac_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRLB_REG,
+ OV8865_PLL_CTRLB_PRE_DIV(config->pll_pre_div));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRLC_REG,
+ OV8865_PLL_CTRLC_MUL_H(config->pll_mul));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRLD_REG,
+ OV8865_PLL_CTRLD_MUL_L(config->pll_mul));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_PLL_CTRLF_REG,
+ OV8865_PLL_CTRLF_SYS_PRE_DIV(config->sys_pre_div));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_PLL_CTRLE_REG,
+ OV8865_PLL_CTRLE_SYS_DIV(config->sys_div));
+}
+
+static int ov8865_mode_sclk_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ const struct ov8865_sclk_config *config = mode->sclk_config;
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_CLK_SEL0_REG,
+ OV8865_CLK_SEL0_PLL1_SYS_SEL(config->sys_sel));
+ if (ret)
+ return ret;
+
+ ret = ov8865_update_bits(sensor, OV8865_CLK_SEL1_REG,
+ OV8865_CLK_SEL1_PLL_SCLK_SEL_MASK,
+ OV8865_CLK_SEL1_PLL_SCLK_SEL(config->sclk_sel));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_SCLK_CTRL_REG,
+ OV8865_SCLK_CTRL_UNKNOWN |
+ OV8865_SCLK_CTRL_SCLK_DIV(config->sclk_div) |
+ OV8865_SCLK_CTRL_SCLK_PRE_DIV(config->sclk_pre_div));
+}
+
+static int ov8865_mode_binning_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ unsigned int variopixel_hsub_coef, variopixel_vsub_coef;
+ u8 value;
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_FORMAT1_REG, 0);
+ if (ret)
+ return ret;
+
+ value = OV8865_FORMAT2_HSYNC_EN;
+
+ if (mode->binning_x)
+ value |= OV8865_FORMAT2_FST_HBIN_EN;
+
+ if (mode->binning_y)
+ value |= OV8865_FORMAT2_FST_VBIN_EN;
+
+ if (mode->sync_hbin)
+ value |= OV8865_FORMAT2_SYNC_HBIN_EN;
+
+ if (mode->horz_var2)
+ value |= OV8865_FORMAT2_ISP_HORZ_VAR2_EN;
+
+ ret = ov8865_write(sensor, OV8865_FORMAT2_REG, value);
+ if (ret)
+ return ret;
+
+ ret = ov8865_update_bits(sensor, OV8865_ISP_CTRL2_REG,
+ OV8865_ISP_CTRL2_VARIOPIXEL_EN,
+ mode->variopixel ?
+ OV8865_ISP_CTRL2_VARIOPIXEL_EN : 0);
+ if (ret)
+ return ret;
+
+ if (mode->variopixel) {
+ /* VarioPixel coefs needs to be > 1. */
+ variopixel_hsub_coef = mode->variopixel_hsub_coef;
+ variopixel_vsub_coef = mode->variopixel_vsub_coef;
+ } else {
+ variopixel_hsub_coef = 1;
+ variopixel_vsub_coef = 1;
+ }
+
+ ret = ov8865_write(sensor, OV8865_VAP_CTRL1_REG,
+ OV8865_VAP_CTRL1_HSUB_COEF(variopixel_hsub_coef) |
+ OV8865_VAP_CTRL1_VSUB_COEF(variopixel_vsub_coef));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_INC_X_ODD_REG,
+ OV8865_INC_X_ODD(mode->inc_x_odd));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_INC_X_EVEN_REG,
+ OV8865_INC_X_EVEN(mode->inc_x_even));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_INC_Y_ODD_REG,
+ OV8865_INC_Y_ODD(mode->inc_y_odd));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_INC_Y_EVEN_REG,
+ OV8865_INC_Y_EVEN(mode->inc_y_even));
+}
+
+static int ov8865_mode_black_level_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ int ret;
+
+ /* Note that a zero value for blc_col_shift_mask is the default 256. */
+ ret = ov8865_write(sensor, OV8865_BLC_CTRL1_REG,
+ mode->blc_col_shift_mask |
+ OV8865_BLC_CTRL1_OFFSET_LIMIT_EN);
+ if (ret)
+ return ret;
+
+ /* BLC top zero line */
+
+ ret = ov8865_write(sensor, OV8865_BLC_TOP_ZLINE_START_REG,
+ OV8865_BLC_TOP_ZLINE_START(mode->blc_top_zero_line_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_TOP_ZLINE_NUM_REG,
+ OV8865_BLC_TOP_ZLINE_NUM(mode->blc_top_zero_line_num));
+ if (ret)
+ return ret;
+
+ /* BLC top black line */
+
+ ret = ov8865_write(sensor, OV8865_BLC_TOP_BLKLINE_START_REG,
+ OV8865_BLC_TOP_BLKLINE_START(mode->blc_top_black_line_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_TOP_BLKLINE_NUM_REG,
+ OV8865_BLC_TOP_BLKLINE_NUM(mode->blc_top_black_line_num));
+ if (ret)
+ return ret;
+
+ /* BLC bottom zero line */
+
+ ret = ov8865_write(sensor, OV8865_BLC_BOT_ZLINE_START_REG,
+ OV8865_BLC_BOT_ZLINE_START(mode->blc_bottom_zero_line_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_BOT_ZLINE_NUM_REG,
+ OV8865_BLC_BOT_ZLINE_NUM(mode->blc_bottom_zero_line_num));
+ if (ret)
+ return ret;
+
+ /* BLC bottom black line */
+
+ ret = ov8865_write(sensor, OV8865_BLC_BOT_BLKLINE_START_REG,
+ OV8865_BLC_BOT_BLKLINE_START(mode->blc_bottom_black_line_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_BOT_BLKLINE_NUM_REG,
+ OV8865_BLC_BOT_BLKLINE_NUM(mode->blc_bottom_black_line_num));
+ if (ret)
+ return ret;
+
+ /* BLC anchor */
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_LEFT_START_H_REG,
+ OV8865_BLC_ANCHOR_LEFT_START_H(mode->blc_anchor_left_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_LEFT_START_L_REG,
+ OV8865_BLC_ANCHOR_LEFT_START_L(mode->blc_anchor_left_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_LEFT_END_H_REG,
+ OV8865_BLC_ANCHOR_LEFT_END_H(mode->blc_anchor_left_end));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_LEFT_END_L_REG,
+ OV8865_BLC_ANCHOR_LEFT_END_L(mode->blc_anchor_left_end));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_RIGHT_START_H_REG,
+ OV8865_BLC_ANCHOR_RIGHT_START_H(mode->blc_anchor_right_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_RIGHT_START_L_REG,
+ OV8865_BLC_ANCHOR_RIGHT_START_L(mode->blc_anchor_right_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_BLC_ANCHOR_RIGHT_END_H_REG,
+ OV8865_BLC_ANCHOR_RIGHT_END_H(mode->blc_anchor_right_end));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_BLC_ANCHOR_RIGHT_END_L_REG,
+ OV8865_BLC_ANCHOR_RIGHT_END_L(mode->blc_anchor_right_end));
+}
+
+static int ov8865_mode_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode, u32 mbus_code)
+{
+ int ret;
+
+ /* Output Size X */
+
+ ret = ov8865_write(sensor, OV8865_OUTPUT_SIZE_X_H_REG,
+ OV8865_OUTPUT_SIZE_X_H(mode->output_size_x));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_OUTPUT_SIZE_X_L_REG,
+ OV8865_OUTPUT_SIZE_X_L(mode->output_size_x));
+ if (ret)
+ return ret;
+
+ /* Horizontal Total Size */
+
+ ret = ov8865_write(sensor, OV8865_HTS_H_REG, OV8865_HTS_H(mode->hts));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_HTS_L_REG, OV8865_HTS_L(mode->hts));
+ if (ret)
+ return ret;
+
+ /* Output Size Y */
+
+ ret = ov8865_write(sensor, OV8865_OUTPUT_SIZE_Y_H_REG,
+ OV8865_OUTPUT_SIZE_Y_H(mode->output_size_y));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_OUTPUT_SIZE_Y_L_REG,
+ OV8865_OUTPUT_SIZE_Y_L(mode->output_size_y));
+ if (ret)
+ return ret;
+
+ /* Vertical Total Size */
+
+ ret = ov8865_write(sensor, OV8865_VTS_H_REG, OV8865_VTS_H(mode->vts));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_VTS_L_REG, OV8865_VTS_L(mode->vts));
+ if (ret)
+ return ret;
+
+ if (mode->size_auto) {
+ /* Auto Size */
+
+ ret = ov8865_write(sensor, OV8865_AUTO_SIZE_CTRL_REG,
+ OV8865_AUTO_SIZE_CTRL_OFFSET_Y_REG |
+ OV8865_AUTO_SIZE_CTRL_OFFSET_X_REG |
+ OV8865_AUTO_SIZE_CTRL_CROP_END_Y_REG |
+ OV8865_AUTO_SIZE_CTRL_CROP_END_X_REG |
+ OV8865_AUTO_SIZE_CTRL_CROP_START_Y_REG |
+ OV8865_AUTO_SIZE_CTRL_CROP_START_X_REG);
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_AUTO_SIZE_BOUNDARIES_REG,
+ OV8865_AUTO_SIZE_BOUNDARIES_Y(mode->size_auto_boundary_y) |
+ OV8865_AUTO_SIZE_BOUNDARIES_X(mode->size_auto_boundary_x));
+ if (ret)
+ return ret;
+ } else {
+ /* Crop Start X */
+
+ ret = ov8865_write(sensor, OV8865_CROP_START_X_H_REG,
+ OV8865_CROP_START_X_H(mode->crop_start_x));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_CROP_START_X_L_REG,
+ OV8865_CROP_START_X_L(mode->crop_start_x));
+ if (ret)
+ return ret;
+
+ /* Offset X */
+
+ ret = ov8865_write(sensor, OV8865_OFFSET_X_H_REG,
+ OV8865_OFFSET_X_H(mode->offset_x));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_OFFSET_X_L_REG,
+ OV8865_OFFSET_X_L(mode->offset_x));
+ if (ret)
+ return ret;
+
+ /* Crop End X */
+
+ ret = ov8865_write(sensor, OV8865_CROP_END_X_H_REG,
+ OV8865_CROP_END_X_H(mode->crop_end_x));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_CROP_END_X_L_REG,
+ OV8865_CROP_END_X_L(mode->crop_end_x));
+ if (ret)
+ return ret;
+
+ /* Crop Start Y */
+
+ ret = ov8865_write(sensor, OV8865_CROP_START_Y_H_REG,
+ OV8865_CROP_START_Y_H(mode->crop_start_y));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_CROP_START_Y_L_REG,
+ OV8865_CROP_START_Y_L(mode->crop_start_y));
+ if (ret)
+ return ret;
+
+ /* Offset Y */
+
+ ret = ov8865_write(sensor, OV8865_OFFSET_Y_H_REG,
+ OV8865_OFFSET_Y_H(mode->offset_y));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_OFFSET_Y_L_REG,
+ OV8865_OFFSET_Y_L(mode->offset_y));
+ if (ret)
+ return ret;
+
+ /* Crop End Y */
+
+ ret = ov8865_write(sensor, OV8865_CROP_END_Y_H_REG,
+ OV8865_CROP_END_Y_H(mode->crop_end_y));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_CROP_END_Y_L_REG,
+ OV8865_CROP_END_Y_L(mode->crop_end_y));
+ if (ret)
+ return ret;
+ }
+
+ /* VFIFO */
+
+ ret = ov8865_write(sensor, OV8865_VFIFO_READ_START_H_REG,
+ OV8865_VFIFO_READ_START_H(mode->vfifo_read_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_VFIFO_READ_START_L_REG,
+ OV8865_VFIFO_READ_START_L(mode->vfifo_read_start));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_ABLC_NUM_REG,
+ OV8865_ABLC_NUM(mode->ablc_num));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_ZLINE_NUM_REG,
+ OV8865_ZLINE_NUM(mode->zline_num));
+ if (ret)
+ return ret;
+
+ /* Binning */
+
+ ret = ov8865_mode_binning_configure(sensor, mode);
+ if (ret)
+ return ret;
+
+ /* Black Level */
+
+ ret = ov8865_mode_black_level_configure(sensor, mode);
+ if (ret)
+ return ret;
+
+ /* PLLs */
+
+ ret = ov8865_mode_pll1_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+
+ ret = ov8865_mode_pll2_configure(sensor, mode);
+ if (ret)
+ return ret;
+
+ ret = ov8865_mode_sclk_configure(sensor, mode);
+ if (ret)
+ return ret;
+
+ /* Extra registers */
+
+ if (mode->register_values) {
+ ret = ov8865_write_sequence(sensor, mode->register_values,
+ mode->register_values_count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned long ov8865_mode_mipi_clk_rate(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode)
+{
+ const struct ov8865_pll1_config *config = mode->pll1_config;
+ unsigned long pll1_rate;
+
+ pll1_rate = ov8865_mode_pll1_rate(sensor, mode);
+
+ return pll1_rate / config->m_div / 2;
+}
+
+/* Exposure */
+
+static int ov8865_exposure_configure(struct ov8865_sensor *sensor, u32 exposure)
+{
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_EXPOSURE_CTRL_HH_REG,
+ OV8865_EXPOSURE_CTRL_HH(exposure));
+ if (ret)
+ return ret;
+
+ ret = ov8865_write(sensor, OV8865_EXPOSURE_CTRL_H_REG,
+ OV8865_EXPOSURE_CTRL_H(exposure));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_EXPOSURE_CTRL_L_REG,
+ OV8865_EXPOSURE_CTRL_L(exposure));
+}
+
+/* Gain */
+
+static int ov8865_gain_configure(struct ov8865_sensor *sensor, u32 gain)
+{
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_GAIN_CTRL_H_REG,
+ OV8865_GAIN_CTRL_H(gain));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_GAIN_CTRL_L_REG,
+ OV8865_GAIN_CTRL_L(gain));
+}
+
+/* White Balance */
+
+static int ov8865_red_balance_configure(struct ov8865_sensor *sensor,
+ u32 red_balance)
+{
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_ISP_GAIN_RED_H_REG,
+ OV8865_ISP_GAIN_RED_H(red_balance));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_ISP_GAIN_RED_L_REG,
+ OV8865_ISP_GAIN_RED_L(red_balance));
+}
+
+static int ov8865_blue_balance_configure(struct ov8865_sensor *sensor,
+ u32 blue_balance)
+{
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_ISP_GAIN_BLUE_H_REG,
+ OV8865_ISP_GAIN_BLUE_H(blue_balance));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_ISP_GAIN_BLUE_L_REG,
+ OV8865_ISP_GAIN_BLUE_L(blue_balance));
+}
+
+/* Flip */
+
+static int ov8865_flip_vert_configure(struct ov8865_sensor *sensor, bool enable)
+{
+ u8 bits = OV8865_FORMAT1_FLIP_VERT_ISP_EN |
+ OV8865_FORMAT1_FLIP_VERT_SENSOR_EN;
+
+ return ov8865_update_bits(sensor, OV8865_FORMAT1_REG, bits,
+ enable ? bits : 0);
+}
+
+static int ov8865_flip_horz_configure(struct ov8865_sensor *sensor, bool enable)
+{
+ u8 bits = OV8865_FORMAT2_FLIP_HORZ_ISP_EN |
+ OV8865_FORMAT2_FLIP_HORZ_SENSOR_EN;
+
+ return ov8865_update_bits(sensor, OV8865_FORMAT2_REG, bits,
+ enable ? bits : 0);
+}
+
+/* Test Pattern */
+
+static int ov8865_test_pattern_configure(struct ov8865_sensor *sensor,
+ unsigned int index)
+{
+ if (index >= ARRAY_SIZE(ov8865_test_pattern_bits))
+ return -EINVAL;
+
+ return ov8865_write(sensor, OV8865_PRE_CTRL0_REG,
+ ov8865_test_pattern_bits[index]);
+}
+
+/* State */
+
+static int ov8865_state_mipi_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode,
+ u32 mbus_code)
+{
+ struct ov8865_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ &sensor->endpoint.bus.mipi_csi2;
+ unsigned long mipi_clk_rate;
+ unsigned int bits_per_sample;
+ unsigned int lanes_count;
+ unsigned int i, j;
+ s64 mipi_pixel_rate;
+
+ mipi_clk_rate = ov8865_mode_mipi_clk_rate(sensor, mode);
+ if (!mipi_clk_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ov8865_link_freq_menu); i++) {
+ s64 freq = ov8865_link_freq_menu[i];
+
+ if (freq == mipi_clk_rate)
+ break;
+ }
+
+ for (j = 0; j < sensor->endpoint.nr_of_link_frequencies; j++) {
+ u64 freq = sensor->endpoint.link_frequencies[j];
+
+ if (freq == mipi_clk_rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ov8865_link_freq_menu)) {
+ dev_err(sensor->dev,
+ "failed to find %lu clk rate in link freq\n",
+ mipi_clk_rate);
+ } else if (j == sensor->endpoint.nr_of_link_frequencies) {
+ dev_err(sensor->dev,
+ "failed to find %lu clk rate in endpoint link-frequencies\n",
+ mipi_clk_rate);
+ } else {
+ __v4l2_ctrl_s_ctrl(ctrls->link_freq, i);
+ }
+
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ bits_per_sample = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lanes_count = bus_mipi_csi2->num_data_lanes;
+ mipi_pixel_rate = mipi_clk_rate * 2 * lanes_count / bits_per_sample;
+
+ __v4l2_ctrl_s_ctrl_int64(ctrls->pixel_rate, mipi_pixel_rate);
+
+ return 0;
+}
+
+static int ov8865_state_configure(struct ov8865_sensor *sensor,
+ const struct ov8865_mode *mode,
+ u32 mbus_code)
+{
+ int ret;
+
+ if (sensor->state.streaming)
+ return -EBUSY;
+
+ /* State will be configured at first power on otherwise. */
+ if (pm_runtime_enabled(sensor->dev) &&
+ !pm_runtime_suspended(sensor->dev)) {
+ ret = ov8865_mode_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov8865_state_mipi_configure(sensor, mode, mbus_code);
+ if (ret)
+ return ret;
+
+ sensor->state.mode = mode;
+ sensor->state.mbus_code = mbus_code;
+
+ return 0;
+}
+
+static int ov8865_state_init(struct ov8865_sensor *sensor)
+{
+ return ov8865_state_configure(sensor, &ov8865_modes[0],
+ ov8865_mbus_codes[0]);
+}
+
+/* Sensor Base */
+
+static int ov8865_sensor_init(struct ov8865_sensor *sensor)
+{
+ int ret;
+
+ ret = ov8865_sw_reset(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to perform sw reset\n");
+ return ret;
+ }
+
+ ret = ov8865_sw_standby(sensor, 1);
+ if (ret) {
+ dev_err(sensor->dev, "failed to set sensor standby\n");
+ return ret;
+ }
+
+ ret = ov8865_chip_id_check(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to check sensor chip id\n");
+ return ret;
+ }
+
+ ret = ov8865_write_sequence(sensor, ov8865_init_sequence,
+ ARRAY_SIZE(ov8865_init_sequence));
+ if (ret) {
+ dev_err(sensor->dev, "failed to write init sequence\n");
+ return ret;
+ }
+
+ ret = ov8865_charge_pump_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure pad\n");
+ return ret;
+ }
+
+ ret = ov8865_mipi_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure MIPI\n");
+ return ret;
+ }
+
+ ret = ov8865_isp_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure ISP\n");
+ return ret;
+ }
+
+ ret = ov8865_black_level_configure(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure black level\n");
+ return ret;
+ }
+
+ /* Configure current mode. */
+ ret = ov8865_state_configure(sensor, sensor->state.mode,
+ sensor->state.mbus_code);
+ if (ret) {
+ dev_err(sensor->dev, "failed to configure state\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov8865_sensor_power(struct ov8865_sensor *sensor, bool on)
+{
+ /* Keep initialized to zero for disable label. */
+ int ret = 0;
+
+ if (on) {
+ gpiod_set_value_cansleep(sensor->reset, 1);
+ gpiod_set_value_cansleep(sensor->powerdown, 1);
+
+ ret = regulator_enable(sensor->dovdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable DOVDD regulator\n");
+ goto disable;
+ }
+
+ ret = regulator_enable(sensor->avdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable AVDD regulator\n");
+ goto disable;
+ }
+
+ ret = regulator_enable(sensor->dvdd);
+ if (ret) {
+ dev_err(sensor->dev,
+ "failed to enable DVDD regulator\n");
+ goto disable;
+ }
+
+ ret = clk_prepare_enable(sensor->extclk);
+ if (ret) {
+ dev_err(sensor->dev, "failed to enable EXTCLK clock\n");
+ goto disable;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset, 0);
+ gpiod_set_value_cansleep(sensor->powerdown, 0);
+
+ /* Time to enter streaming mode according to power timings. */
+ usleep_range(10000, 12000);
+ } else {
+disable:
+ gpiod_set_value_cansleep(sensor->powerdown, 1);
+ gpiod_set_value_cansleep(sensor->reset, 1);
+
+ clk_disable_unprepare(sensor->extclk);
+
+ regulator_disable(sensor->dvdd);
+ regulator_disable(sensor->avdd);
+ regulator_disable(sensor->dovdd);
+ }
+
+ return ret;
+}
+
+/* Controls */
+
+static int ov8865_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *subdev = ov8865_ctrl_subdev(ctrl);
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ unsigned int index;
+ int ret;
+
+ /* Wait for the sensor to be on before setting controls. */
+ if (pm_runtime_suspended(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ret = ov8865_exposure_configure(sensor, ctrl->val);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_CID_GAIN:
+ ret = ov8865_gain_configure(sensor, ctrl->val);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ return ov8865_red_balance_configure(sensor, ctrl->val);
+ case V4L2_CID_BLUE_BALANCE:
+ return ov8865_blue_balance_configure(sensor, ctrl->val);
+ case V4L2_CID_HFLIP:
+ return ov8865_flip_horz_configure(sensor, !!ctrl->val);
+ case V4L2_CID_VFLIP:
+ return ov8865_flip_vert_configure(sensor, !!ctrl->val);
+ case V4L2_CID_TEST_PATTERN:
+ index = (unsigned int)ctrl->val;
+ return ov8865_test_pattern_configure(sensor, index);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ov8865_ctrl_ops = {
+ .s_ctrl = ov8865_s_ctrl,
+};
+
+static int ov8865_ctrls_init(struct ov8865_sensor *sensor)
+{
+ struct ov8865_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+ const struct v4l2_ctrl_ops *ops = &ov8865_ctrl_ops;
+ int ret;
+
+ v4l2_ctrl_handler_init(handler, 32);
+
+ /* Use our mutex for ctrl locking. */
+ handler->lock = &sensor->mutex;
+
+ /* Exposure */
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_EXPOSURE, 16, 1048575, 16,
+ 512);
+
+ /* Gain */
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_GAIN, 128, 8191, 128, 128);
+
+ /* White Balance */
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_RED_BALANCE, 1, 32767, 1,
+ 1024);
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_BLUE_BALANCE, 1, 32767, 1,
+ 1024);
+
+ /* Flip */
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ /* Test Pattern */
+
+ v4l2_ctrl_new_std_menu_items(handler, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov8865_test_pattern_menu) - 1,
+ 0, 0, ov8865_test_pattern_menu);
+
+ /* MIPI CSI-2 */
+
+ ctrls->link_freq =
+ v4l2_ctrl_new_int_menu(handler, NULL, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov8865_link_freq_menu) - 1,
+ 0, ov8865_link_freq_menu);
+
+ ctrls->pixel_rate =
+ v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1, 1);
+
+ if (handler->error) {
+ ret = handler->error;
+ goto error_ctrls;
+ }
+
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ sensor->subdev.ctrl_handler = handler;
+
+ return 0;
+
+error_ctrls:
+ v4l2_ctrl_handler_free(handler);
+
+ return ret;
+}
+
+/* Subdev Video Operations */
+
+static int ov8865_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ struct ov8865_state *state = &sensor->state;
+ int ret;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(sensor->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sensor->dev);
+ return ret;
+ }
+ }
+
+ mutex_lock(&sensor->mutex);
+ ret = ov8865_sw_standby(sensor, !enable);
+ mutex_unlock(&sensor->mutex);
+
+ if (ret)
+ return ret;
+
+ state->streaming = !!enable;
+
+ if (!enable)
+ pm_runtime_put(sensor->dev);
+
+ return 0;
+}
+
+static int ov8865_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ const struct ov8865_mode *mode;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ mode = sensor->state.mode;
+ interval->interval = mode->frame_interval;
+
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops ov8865_subdev_video_ops = {
+ .s_stream = ov8865_s_stream,
+ .g_frame_interval = ov8865_g_frame_interval,
+ .s_frame_interval = ov8865_g_frame_interval,
+};
+
+/* Subdev Pad Operations */
+
+static int ov8865_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(ov8865_mbus_codes))
+ return -EINVAL;
+
+ code_enum->code = ov8865_mbus_codes[code_enum->index];
+
+ return 0;
+}
+
+static void ov8865_mbus_format_fill(struct v4l2_mbus_framefmt *mbus_format,
+ u32 mbus_code,
+ const struct ov8865_mode *mode)
+{
+ mbus_format->width = mode->output_size_x;
+ mbus_format->height = mode->output_size_y;
+ mbus_format->code = mbus_code;
+
+ mbus_format->field = V4L2_FIELD_NONE;
+ mbus_format->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_format->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(mbus_format->colorspace);
+ mbus_format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ mbus_format->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(mbus_format->colorspace);
+}
+
+static int ov8865_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_format *format)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+
+ mutex_lock(&sensor->mutex);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, config,
+ format->pad);
+ else
+ ov8865_mbus_format_fill(mbus_format, sensor->state.mbus_code,
+ sensor->state.mode);
+
+ mutex_unlock(&sensor->mutex);
+
+ return 0;
+}
+
+static int ov8865_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_format *format)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ const struct ov8865_mode *mode;
+ u32 mbus_code = 0;
+ unsigned int index;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ if (sensor->state.streaming) {
+ ret = -EBUSY;
+ goto complete;
+ }
+
+ /* Try to find requested mbus code. */
+ for (index = 0; index < ARRAY_SIZE(ov8865_mbus_codes); index++) {
+ if (ov8865_mbus_codes[index] == mbus_format->code) {
+ mbus_code = mbus_format->code;
+ break;
+ }
+ }
+
+ /* Fallback to default. */
+ if (!mbus_code)
+ mbus_code = ov8865_mbus_codes[0];
+
+ /* Find the mode with nearest dimensions. */
+ mode = v4l2_find_nearest_size(ov8865_modes, ARRAY_SIZE(ov8865_modes),
+ output_size_x, output_size_y,
+ mbus_format->width, mbus_format->height);
+ if (!mode) {
+ ret = -EINVAL;
+ goto complete;
+ }
+
+ ov8865_mbus_format_fill(mbus_format, mbus_code, mode);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_get_try_format(subdev, config, format->pad) =
+ *mbus_format;
+ else if (sensor->state.mode != mode ||
+ sensor->state.mbus_code != mbus_code)
+ ret = ov8865_state_configure(sensor, mode, mbus_code);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov8865_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_frame_size_enum *size_enum)
+{
+ const struct ov8865_mode *mode;
+
+ if (size_enum->index >= ARRAY_SIZE(ov8865_modes))
+ return -EINVAL;
+
+ mode = &ov8865_modes[size_enum->index];
+
+ size_enum->min_width = size_enum->max_width = mode->output_size_x;
+ size_enum->min_height = size_enum->max_height = mode->output_size_y;
+
+ return 0;
+}
+
+static int ov8865_enum_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_frame_interval_enum *interval_enum)
+{
+ const struct ov8865_mode *mode = NULL;
+ unsigned int mode_index;
+ unsigned int interval_index;
+
+ if (interval_enum->index > 0)
+ return -EINVAL;
+ /*
+ * Multiple modes with the same dimensions may have different frame
+ * intervals, so look up each relevant mode.
+ */
+ for (mode_index = 0, interval_index = 0;
+ mode_index < ARRAY_SIZE(ov8865_modes); mode_index++) {
+ mode = &ov8865_modes[mode_index];
+
+ if (mode->output_size_x == interval_enum->width &&
+ mode->output_size_y == interval_enum->height) {
+ if (interval_index == interval_enum->index)
+ break;
+
+ interval_index++;
+ }
+ }
+
+ if (mode_index == ARRAY_SIZE(ov8865_modes) || !mode)
+ return -EINVAL;
+
+ interval_enum->interval = mode->frame_interval;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov8865_subdev_pad_ops = {
+ .enum_mbus_code = ov8865_enum_mbus_code,
+ .get_fmt = ov8865_get_fmt,
+ .set_fmt = ov8865_set_fmt,
+ .enum_frame_size = ov8865_enum_frame_size,
+ .enum_frame_interval = ov8865_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov8865_subdev_ops = {
+ .video = &ov8865_subdev_video_ops,
+ .pad = &ov8865_subdev_pad_ops,
+};
+
+static int ov8865_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ struct ov8865_state *state = &sensor->state;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ if (state->streaming) {
+ ret = ov8865_sw_standby(sensor, true);
+ if (ret)
+ goto complete;
+ }
+
+ ret = ov8865_sensor_power(sensor, false);
+ if (ret)
+ ov8865_sw_standby(sensor, false);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov8865_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ struct ov8865_state *state = &sensor->state;
+ int ret = 0;
+
+ mutex_lock(&sensor->mutex);
+
+ ret = ov8865_sensor_power(sensor, true);
+ if (ret)
+ goto complete;
+
+ ret = ov8865_sensor_init(sensor);
+ if (ret)
+ goto error_power;
+
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ if (ret)
+ goto error_power;
+
+ if (state->streaming) {
+ ret = ov8865_sw_standby(sensor, false);
+ if (ret)
+ goto error_power;
+ }
+
+ goto complete;
+
+error_power:
+ ov8865_sensor_power(sensor, false);
+
+complete:
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
+static int ov8865_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *handle;
+ struct ov8865_sensor *sensor;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ unsigned long rate;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->dev = dev;
+ sensor->i2c_client = client;
+
+ /* Graph Endpoint */
+
+ handle = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!handle) {
+ dev_err(dev, "unable to find endpoint node\n");
+ return -EINVAL;
+ }
+
+ sensor->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(handle, &sensor->endpoint);
+ fwnode_handle_put(handle);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node\n");
+ return ret;
+ }
+
+ /* GPIOs */
+
+ sensor->powerdown = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->powerdown)) {
+ ret = PTR_ERR(sensor->powerdown);
+ goto error_endpoint;
+ }
+
+ sensor->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset)) {
+ ret = PTR_ERR(sensor->reset);
+ goto error_endpoint;
+ }
+
+ /* Regulators */
+
+ /* DVDD: digital core */
+ sensor->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(sensor->dvdd)) {
+ dev_err(dev, "cannot get DVDD (digital core) regulator\n");
+ ret = PTR_ERR(sensor->dvdd);
+ goto error_endpoint;
+ }
+
+ /* DOVDD: digital I/O */
+ sensor->dovdd = devm_regulator_get(dev, "dovdd");
+ if (IS_ERR(sensor->dovdd)) {
+ dev_err(dev, "cannot get DOVDD (digital I/O) regulator\n");
+ ret = PTR_ERR(sensor->dovdd);
+ goto error_endpoint;
+ }
+
+ /* AVDD: analog */
+ sensor->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(sensor->avdd)) {
+ dev_err(dev, "cannot get AVDD (analog) regulator\n");
+ ret = PTR_ERR(sensor->avdd);
+ goto error_endpoint;
+ }
+
+ /* External Clock */
+
+ sensor->extclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->extclk)) {
+ dev_err(dev, "failed to get external clock\n");
+ ret = PTR_ERR(sensor->extclk);
+ goto error_endpoint;
+ }
+
+ rate = clk_get_rate(sensor->extclk);
+ if (rate != OV8865_EXTCLK_RATE) {
+ dev_err(dev, "clock rate %lu Hz is unsupported\n", rate);
+ ret = -EINVAL;
+ goto error_endpoint;
+ }
+
+ /* Subdev, entity and pad */
+
+ subdev = &sensor->subdev;
+ v4l2_i2c_subdev_init(subdev, client, &ov8865_subdev_ops);
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ pad = &sensor->pad;
+ pad->flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, 1, pad);
+ if (ret)
+ goto error_entity;
+
+ /* Mutex */
+
+ mutex_init(&sensor->mutex);
+
+ /* Sensor */
+
+ ret = ov8865_ctrls_init(sensor);
+ if (ret)
+ goto error_mutex;
+
+ ret = ov8865_state_init(sensor);
+ if (ret)
+ goto error_ctrls;
+
+ /* Runtime PM */
+
+ pm_runtime_enable(sensor->dev);
+ pm_runtime_set_suspended(sensor->dev);
+
+ /* V4L2 subdev register */
+
+ ret = v4l2_async_register_subdev_sensor_common(subdev);
+ if (ret)
+ goto error_pm;
+
+ return 0;
+
+error_pm:
+ pm_runtime_disable(sensor->dev);
+
+error_ctrls:
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+error_mutex:
+ mutex_destroy(&sensor->mutex);
+
+error_entity:
+ media_entity_cleanup(&sensor->subdev.entity);
+
+error_endpoint:
+ v4l2_fwnode_endpoint_free(&sensor->endpoint);
+
+ return ret;
+}
+
+static int ov8865_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+
+ v4l2_async_unregister_subdev(subdev);
+ pm_runtime_disable(sensor->dev);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->mutex);
+ media_entity_cleanup(&subdev->entity);
+
+ v4l2_fwnode_endpoint_free(&sensor->endpoint);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov8865_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov8865_suspend, ov8865_resume, NULL)
+};
+
+static const struct of_device_id ov8865_of_match[] = {
+ { .compatible = "ovti,ov8865" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ov8865_of_match);
+
+static struct i2c_driver ov8865_driver = {
+ .driver = {
+ .name = "ov8865",
+ .of_match_table = ov8865_of_match,
+ .pm = &ov8865_pm_ops,
+ },
+ .probe_new = ov8865_probe,
+ .remove = ov8865_remove,
+};
+
+module_i2c_driver(ov8865_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("V4L2 driver for the OmniVision OV8865 image sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index e2a25240fc85..d36b04c49628 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -17,6 +17,7 @@
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*/
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -26,7 +27,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-async.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -333,13 +333,13 @@ static int ov9640_s_power(struct v4l2_subdev *sd, int on)
if (on) {
gpiod_set_value(priv->gpio_power, 1);
usleep_range(1000, 2000);
- ret = v4l2_clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
usleep_range(1000, 2000);
gpiod_set_value(priv->gpio_reset, 0);
} else {
gpiod_set_value(priv->gpio_reset, 1);
usleep_range(1000, 2000);
- v4l2_clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
usleep_range(1000, 2000);
gpiod_set_value(priv->gpio_power, 0);
}
@@ -719,7 +719,7 @@ static int ov9640_probe(struct i2c_client *client,
priv->subdev.ctrl_handler = &priv->hdl;
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
+ priv->clk = devm_clk_get(&client->dev, "mclk");
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto ectrlinit;
@@ -727,17 +727,15 @@ static int ov9640_probe(struct i2c_client *client,
ret = ov9640_video_probe(client);
if (ret)
- goto eprobe;
+ goto ectrlinit;
priv->subdev.dev = &client->dev;
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret)
- goto eprobe;
+ goto ectrlinit;
return 0;
-eprobe:
- v4l2_clk_put(priv->clk);
ectrlinit:
v4l2_ctrl_handler_free(&priv->hdl);
@@ -749,7 +747,6 @@ static int ov9640_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
- v4l2_clk_put(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
diff --git a/drivers/media/i2c/ov9640.h b/drivers/media/i2c/ov9640.h
index a8ed6992c1a8..c105594b2472 100644
--- a/drivers/media/i2c/ov9640.h
+++ b/drivers/media/i2c/ov9640.h
@@ -196,7 +196,7 @@ struct ov9640_reg {
struct ov9640_priv {
struct v4l2_subdev subdev;
struct v4l2_ctrl_handler hdl;
- struct v4l2_clk *clk;
+ struct clk *clk;
struct gpio_desc *gpio_power;
struct gpio_desc *gpio_reset;
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 16bcb764b0e0..90eb73f0e6e9 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -435,7 +435,7 @@ static int rdacm20_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops rdacm20_video_ops = {
+static const struct v4l2_subdev_video_ops rdacm20_video_ops = {
.s_stream = rdacm20_s_stream,
};
@@ -445,7 +445,7 @@ static const struct v4l2_subdev_pad_ops rdacm20_subdev_pad_ops = {
.set_fmt = rdacm20_get_fmt,
};
-static struct v4l2_subdev_ops rdacm20_subdev_ops = {
+static const struct v4l2_subdev_ops rdacm20_subdev_ops = {
.video = &rdacm20_video_ops,
.pad = &rdacm20_subdev_pad_ops,
};
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
new file mode 100644
index 000000000000..dcc21515e5a4
--- /dev/null
+++ b/drivers/media/i2c/rdacm21.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IMI RDACM21 GMSL Camera Driver
+ *
+ * Copyright (C) 2017-2020 Jacopo Mondi
+ * Copyright (C) 2017-2019 Kieran Bingham
+ * Copyright (C) 2017-2019 Laurent Pinchart
+ * Copyright (C) 2017-2019 Niklas Söderlund
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2015 Cogent Embedded, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/fwnode.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include "max9271.h"
+
+#define MAX9271_RESET_CYCLES 10
+
+#define OV490_I2C_ADDRESS 0x24
+
+#define OV490_PAGE_HIGH_REG 0xfffd
+#define OV490_PAGE_LOW_REG 0xfffe
+
+/*
+ * The SCCB slave handling is undocumented; the registers naming scheme is
+ * totally arbitrary.
+ */
+#define OV490_SCCB_SLAVE_WRITE 0x00
+#define OV490_SCCB_SLAVE_READ 0x01
+#define OV490_SCCB_SLAVE0_DIR 0x80195000
+#define OV490_SCCB_SLAVE0_ADDR_HIGH 0x80195001
+#define OV490_SCCB_SLAVE0_ADDR_LOW 0x80195002
+
+#define OV490_DVP_CTRL3 0x80286009
+
+#define OV490_ODS_CTRL_FRAME_OUTPUT_EN 0x0c
+#define OV490_ODS_CTRL 0x8029d000
+
+#define OV490_HOST_CMD 0x808000c0
+#define OV490_HOST_CMD_TRIGGER 0xc1
+
+#define OV490_ID_VAL 0x0490
+#define OV490_ID(_p, _v) ((((_p) & 0xff) << 8) | ((_v) & 0xff))
+#define OV490_PID 0x8080300a
+#define OV490_VER 0x8080300b
+#define OV490_PID_TIMEOUT 20
+#define OV490_OUTPUT_EN_TIMEOUT 300
+
+#define OV490_GPIO0 BIT(0)
+#define OV490_SPWDN0 BIT(0)
+#define OV490_GPIO_SEL0 0x80800050
+#define OV490_GPIO_SEL1 0x80800051
+#define OV490_GPIO_DIRECTION0 0x80800054
+#define OV490_GPIO_DIRECTION1 0x80800055
+#define OV490_GPIO_OUTPUT_VALUE0 0x80800058
+#define OV490_GPIO_OUTPUT_VALUE1 0x80800059
+
+#define OV490_ISP_HSIZE_LOW 0x80820060
+#define OV490_ISP_HSIZE_HIGH 0x80820061
+#define OV490_ISP_VSIZE_LOW 0x80820062
+#define OV490_ISP_VSIZE_HIGH 0x80820063
+
+#define OV10640_ID_HIGH 0xa6
+#define OV10640_CHIP_ID 0x300a
+#define OV10640_PIXEL_RATE 55000000
+
+struct rdacm21_device {
+ struct device *dev;
+ struct max9271_device serializer;
+ struct i2c_client *isp;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_ctrl_handler ctrls;
+ u32 addrs[2];
+ u16 last_page;
+};
+
+static inline struct rdacm21_device *sd_to_rdacm21(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rdacm21_device, sd);
+}
+
+static const struct ov490_reg {
+ u16 reg;
+ u8 val;
+} ov490_regs_wizard[] = {
+ {0xfffd, 0x80},
+ {0xfffe, 0x82},
+ {0x0071, 0x11},
+ {0x0075, 0x11},
+ {0xfffe, 0x29},
+ {0x6010, 0x01},
+ /*
+ * OV490 EMB line disable in YUV and RAW data,
+ * NOTE: EMB line is still used in ISP and sensor
+ */
+ {0xe000, 0x14},
+ {0xfffe, 0x28},
+ {0x6000, 0x04},
+ {0x6004, 0x00},
+ /*
+ * PCLK polarity - useless due to silicon bug.
+ * Use 0x808000bb register instead.
+ */
+ {0x6008, 0x00},
+ {0xfffe, 0x80},
+ {0x0091, 0x00},
+ /* bit[3]=0 - PCLK polarity workaround. */
+ {0x00bb, 0x1d},
+ /* Ov490 FSIN: app_fsin_from_fsync */
+ {0xfffe, 0x85},
+ {0x0008, 0x00},
+ {0x0009, 0x01},
+ /* FSIN0 source. */
+ {0x000A, 0x05},
+ {0x000B, 0x00},
+ /* FSIN0 delay. */
+ {0x0030, 0x02},
+ {0x0031, 0x00},
+ {0x0032, 0x00},
+ {0x0033, 0x00},
+ /* FSIN1 delay. */
+ {0x0038, 0x02},
+ {0x0039, 0x00},
+ {0x003A, 0x00},
+ {0x003B, 0x00},
+ /* FSIN0 length. */
+ {0x0070, 0x2C},
+ {0x0071, 0x01},
+ {0x0072, 0x00},
+ {0x0073, 0x00},
+ /* FSIN1 length. */
+ {0x0074, 0x64},
+ {0x0075, 0x00},
+ {0x0076, 0x00},
+ {0x0077, 0x00},
+ {0x0000, 0x14},
+ {0x0001, 0x00},
+ {0x0002, 0x00},
+ {0x0003, 0x00},
+ /*
+ * Load fsin0,load fsin1,load other,
+ * It will be cleared automatically.
+ */
+ {0x0004, 0x32},
+ {0x0005, 0x00},
+ {0x0006, 0x00},
+ {0x0007, 0x00},
+ {0xfffe, 0x80},
+ /* Sensor FSIN. */
+ {0x0081, 0x00},
+ /* ov10640 FSIN enable */
+ {0xfffe, 0x19},
+ {0x5000, 0x00},
+ {0x5001, 0x30},
+ {0x5002, 0x8c},
+ {0x5003, 0xb2},
+ {0xfffe, 0x80},
+ {0x00c0, 0xc1},
+ /* ov10640 HFLIP=1 by default */
+ {0xfffe, 0x19},
+ {0x5000, 0x01},
+ {0x5001, 0x00},
+ {0xfffe, 0x80},
+ {0x00c0, 0xdc},
+};
+
+static int ov490_read(struct rdacm21_device *dev, u16 reg, u8 *val)
+{
+ u8 buf[2] = { reg >> 8, reg };
+ int ret;
+
+ ret = i2c_master_send(dev->isp, buf, 2);
+ if (ret == 2)
+ ret = i2c_master_recv(dev->isp, val, 1);
+
+ if (ret < 0) {
+ dev_dbg(dev->dev, "%s: register 0x%04x read failed (%d)\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov490_write(struct rdacm21_device *dev, u16 reg, u8 val)
+{
+ u8 buf[3] = { reg >> 8, reg, val };
+ int ret;
+
+ ret = i2c_master_send(dev->isp, buf, 3);
+ if (ret < 0) {
+ dev_err(dev->dev, "%s: register 0x%04x write failed (%d)\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov490_set_page(struct rdacm21_device *dev, u16 page)
+{
+ u8 page_high = page >> 8;
+ u8 page_low = page;
+ int ret;
+
+ if (page == dev->last_page)
+ return 0;
+
+ if (page_high != (dev->last_page >> 8)) {
+ ret = ov490_write(dev, OV490_PAGE_HIGH_REG, page_high);
+ if (ret)
+ return ret;
+ }
+
+ if (page_low != (u8)dev->last_page) {
+ ret = ov490_write(dev, OV490_PAGE_LOW_REG, page_low);
+ if (ret)
+ return ret;
+ }
+
+ dev->last_page = page;
+ usleep_range(100, 150);
+
+ return 0;
+}
+
+static int ov490_read_reg(struct rdacm21_device *dev, u32 reg, u8 *val)
+{
+ int ret;
+
+ ret = ov490_set_page(dev, reg >> 16);
+ if (ret)
+ return ret;
+
+ ret = ov490_read(dev, (u16)reg, val);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "%s: 0x%08x = 0x%02x\n", __func__, reg, *val);
+
+ return 0;
+}
+
+static int ov490_write_reg(struct rdacm21_device *dev, u32 reg, u8 val)
+{
+ int ret;
+
+ ret = ov490_set_page(dev, reg >> 16);
+ if (ret)
+ return ret;
+
+ ret = ov490_write(dev, (u16)reg, val);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "%s: 0x%08x = 0x%02x\n", __func__, reg, val);
+
+ return 0;
+}
+
+static int rdacm21_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rdacm21_device *dev = sd_to_rdacm21(sd);
+
+ /*
+ * Enable serial link now that the ISP provides a valid pixel clock
+ * to start serializing video data on the GMSL link.
+ */
+ return max9271_set_serial_link(&dev->serializer, enable);
+}
+
+static int rdacm21_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
+
+ return 0;
+}
+
+static int rdacm21_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct rdacm21_device *dev = sd_to_rdacm21(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ mf->width = dev->fmt.width;
+ mf->height = dev->fmt.height;
+ mf->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ mf->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ mf->xfer_func = V4L2_XFER_FUNC_NONE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops rdacm21_video_ops = {
+ .s_stream = rdacm21_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rdacm21_subdev_pad_ops = {
+ .enum_mbus_code = rdacm21_enum_mbus_code,
+ .get_fmt = rdacm21_get_fmt,
+ .set_fmt = rdacm21_get_fmt,
+};
+
+static const struct v4l2_subdev_ops rdacm21_subdev_ops = {
+ .video = &rdacm21_video_ops,
+ .pad = &rdacm21_subdev_pad_ops,
+};
+
+static int ov10640_initialize(struct rdacm21_device *dev)
+{
+ u8 val;
+
+ /* Power-up OV10640 by setting RESETB and PWDNB pins high. */
+ ov490_write_reg(dev, OV490_GPIO_SEL0, OV490_GPIO0);
+ ov490_write_reg(dev, OV490_GPIO_SEL1, OV490_SPWDN0);
+ ov490_write_reg(dev, OV490_GPIO_DIRECTION0, OV490_GPIO0);
+ ov490_write_reg(dev, OV490_GPIO_DIRECTION1, OV490_SPWDN0);
+ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_GPIO0);
+ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_SPWDN0);
+ usleep_range(3000, 5000);
+
+ /* Read OV10640 ID to test communications. */
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
+
+ /* Trigger SCCB slave transaction and give it some time to complete. */
+ ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
+ usleep_range(1000, 1500);
+
+ ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
+ if (val != OV10640_ID_HIGH) {
+ dev_err(dev->dev, "OV10640 ID mismatch: (0x%02x)\n", val);
+ return -ENODEV;
+ }
+
+ dev_dbg(dev->dev, "OV10640 ID = 0x%2x\n", val);
+
+ return 0;
+}
+
+static int ov490_initialize(struct rdacm21_device *dev)
+{
+ u8 pid, ver, val;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Read OV490 Id to test communications. Give it up to 40msec to
+ * exit from reset.
+ */
+ for (i = 0; i < OV490_PID_TIMEOUT; ++i) {
+ ret = ov490_read_reg(dev, OV490_PID, &pid);
+ if (ret == 0)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (i == OV490_PID_TIMEOUT) {
+ dev_err(dev->dev, "OV490 PID read failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ov490_read_reg(dev, OV490_VER, &ver);
+ if (ret < 0)
+ return ret;
+
+ if (OV490_ID(pid, ver) != OV490_ID_VAL) {
+ dev_err(dev->dev, "OV490 ID mismatch (0x%04x)\n",
+ OV490_ID(pid, ver));
+ return -ENODEV;
+ }
+
+ /* Wait for firmware boot by reading streamon status. */
+ for (i = 0; i < OV490_OUTPUT_EN_TIMEOUT; ++i) {
+ ov490_read_reg(dev, OV490_ODS_CTRL, &val);
+ if (val == OV490_ODS_CTRL_FRAME_OUTPUT_EN)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (i == OV490_OUTPUT_EN_TIMEOUT) {
+ dev_err(dev->dev, "Timeout waiting for firmware boot\n");
+ return -ENODEV;
+ }
+
+ ret = ov10640_initialize(dev);
+ if (ret)
+ return ret;
+
+ /* Program OV490 with register-value table. */
+ for (i = 0; i < ARRAY_SIZE(ov490_regs_wizard); ++i) {
+ ret = ov490_write(dev, ov490_regs_wizard[i].reg,
+ ov490_regs_wizard[i].val);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "%s: register %u (0x%04x) write failed (%d)\n",
+ __func__, i, ov490_regs_wizard[i].reg, ret);
+
+ return -EIO;
+ }
+
+ usleep_range(100, 150);
+ }
+
+ /*
+ * The ISP is programmed with the content of a serial flash memory.
+ * Read the firmware configuration to reflect it through the V4L2 APIs.
+ */
+ ov490_read_reg(dev, OV490_ISP_HSIZE_HIGH, &val);
+ dev->fmt.width = (val & 0xf) << 8;
+ ov490_read_reg(dev, OV490_ISP_HSIZE_LOW, &val);
+ dev->fmt.width |= (val & 0xff);
+
+ ov490_read_reg(dev, OV490_ISP_VSIZE_HIGH, &val);
+ dev->fmt.height = (val & 0xf) << 8;
+ ov490_read_reg(dev, OV490_ISP_VSIZE_LOW, &val);
+ dev->fmt.height |= val & 0xff;
+
+ /* Set bus width to 12 bits with [0:11] ordering. */
+ ov490_write_reg(dev, OV490_DVP_CTRL3, 0x10);
+
+ dev_info(dev->dev, "Identified RDACM21 camera module\n");
+
+ return 0;
+}
+
+static int rdacm21_initialize(struct rdacm21_device *dev)
+{
+ int ret;
+
+ /* Verify communication with the MAX9271: ping to wakeup. */
+ dev->serializer.client->addr = MAX9271_DEFAULT_ADDR;
+ i2c_smbus_read_byte(dev->serializer.client);
+ usleep_range(3000, 5000);
+
+ /* Enable reverse channel and disable the serial link. */
+ ret = max9271_set_serial_link(&dev->serializer, false);
+ if (ret)
+ return ret;
+
+ /* Configure I2C bus at 105Kbps speed and configure GMSL. */
+ ret = max9271_configure_i2c(&dev->serializer,
+ MAX9271_I2CSLVSH_469NS_234NS |
+ MAX9271_I2CSLVTO_1024US |
+ MAX9271_I2CMSTBT_105KBPS);
+ if (ret)
+ return ret;
+
+ ret = max9271_verify_id(&dev->serializer);
+ if (ret)
+ return ret;
+
+ /* Enable GPIO1 and hold OV490 in reset during max9271 configuration. */
+ ret = max9271_enable_gpios(&dev->serializer, MAX9271_GPIO1OUT);
+ if (ret)
+ return ret;
+
+ ret = max9271_clear_gpios(&dev->serializer, MAX9271_GPIO1OUT);
+ if (ret)
+ return ret;
+
+ ret = max9271_configure_gmsl_link(&dev->serializer);
+ if (ret)
+ return ret;
+
+ ret = max9271_set_address(&dev->serializer, dev->addrs[0]);
+ if (ret)
+ return ret;
+ dev->serializer.client->addr = dev->addrs[0];
+
+ ret = max9271_set_translation(&dev->serializer, dev->addrs[1],
+ OV490_I2C_ADDRESS);
+ if (ret)
+ return ret;
+ dev->isp->addr = dev->addrs[1];
+
+ /* Release OV490 from reset and initialize it. */
+ ret = max9271_set_gpios(&dev->serializer, MAX9271_GPIO1OUT);
+ if (ret)
+ return ret;
+ usleep_range(3000, 5000);
+
+ ret = ov490_initialize(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Set reverse channel high threshold to increase noise immunity.
+ *
+ * This should be compensated by increasing the reverse channel
+ * amplitude on the remote deserializer side.
+ */
+ return max9271_set_high_threshold(&dev->serializer, true);
+}
+
+static int rdacm21_probe(struct i2c_client *client)
+{
+ struct rdacm21_device *dev;
+ struct fwnode_handle *ep;
+ int ret;
+
+ dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev = &client->dev;
+ dev->serializer.client = client;
+
+ ret = of_property_read_u32_array(client->dev.of_node, "reg",
+ dev->addrs, 2);
+ if (ret < 0) {
+ dev_err(dev->dev, "Invalid DT reg property: %d\n", ret);
+ return -EINVAL;
+ }
+
+ /* Create the dummy I2C client for the sensor. */
+ dev->isp = i2c_new_dummy_device(client->adapter, OV490_I2C_ADDRESS);
+ if (IS_ERR(dev->isp))
+ return PTR_ERR(dev->isp);
+
+ ret = rdacm21_initialize(dev);
+ if (ret < 0)
+ goto error;
+
+ /* Initialize and register the subdevice. */
+ v4l2_i2c_subdev_init(&dev->sd, client, &rdacm21_subdev_ops);
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ v4l2_ctrl_handler_init(&dev->ctrls, 1);
+ v4l2_ctrl_new_std(&dev->ctrls, NULL, V4L2_CID_PIXEL_RATE,
+ OV10640_PIXEL_RATE, OV10640_PIXEL_RATE, 1,
+ OV10640_PIXEL_RATE);
+ dev->sd.ctrl_handler = &dev->ctrls;
+
+ ret = dev->ctrls.error;
+ if (ret)
+ goto error_free_ctrls;
+
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->sd.entity.flags |= MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret < 0)
+ goto error_free_ctrls;
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ if (!ep) {
+ dev_err(&client->dev,
+ "Unable to get endpoint in node %pOF\n",
+ client->dev.of_node);
+ ret = -ENOENT;
+ goto error_free_ctrls;
+ }
+ dev->sd.fwnode = ep;
+
+ ret = v4l2_async_register_subdev(&dev->sd);
+ if (ret)
+ goto error_put_node;
+
+ return 0;
+
+error_put_node:
+ fwnode_handle_put(dev->sd.fwnode);
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&dev->ctrls);
+error:
+ i2c_unregister_device(dev->isp);
+
+ return ret;
+}
+
+static int rdacm21_remove(struct i2c_client *client)
+{
+ struct rdacm21_device *dev = sd_to_rdacm21(i2c_get_clientdata(client));
+
+ v4l2_async_unregister_subdev(&dev->sd);
+ v4l2_ctrl_handler_free(&dev->ctrls);
+ i2c_unregister_device(dev->isp);
+ fwnode_handle_put(dev->sd.fwnode);
+
+ return 0;
+}
+
+static const struct of_device_id rdacm21_of_ids[] = {
+ { .compatible = "imi,rdacm21" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rdacm21_of_ids);
+
+static struct i2c_driver rdacm21_i2c_driver = {
+ .driver = {
+ .name = "rdacm21",
+ .of_match_table = rdacm21_of_ids,
+ },
+ .probe_new = rdacm21_probe,
+ .remove = rdacm21_remove,
+};
+
+module_i2c_driver(rdacm21_i2c_driver);
+
+MODULE_DESCRIPTION("GMSL Camera driver for RDACM21");
+MODULE_AUTHOR("Jacopo Mondi");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 003ba22334cd..7f07ef56fbbd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -92,7 +92,6 @@ struct mipid02_dev {
u64 link_frequency;
struct v4l2_fwnode_endpoint tx;
/* remote source */
- struct v4l2_async_subdev asd;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *s_subdev;
/* registers */
@@ -844,6 +843,7 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
{
struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct i2c_client *client = bridge->i2c_client;
+ struct v4l2_async_subdev *asd;
struct device_node *ep_node;
int ret;
@@ -875,18 +875,17 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
bridge->rx = ep;
/* register async notifier so we get noticed when sensor is connected */
- bridge->asd.match.fwnode =
- fwnode_graph_get_remote_port_parent(of_fwnode_handle(ep_node));
- bridge->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ v4l2_async_notifier_init(&bridge->notifier);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &bridge->notifier,
+ of_fwnode_handle(ep_node),
+ struct v4l2_async_subdev);
of_node_put(ep_node);
- v4l2_async_notifier_init(&bridge->notifier);
- ret = v4l2_async_notifier_add_subdev(&bridge->notifier, &bridge->asd);
- if (ret) {
- dev_err(&client->dev, "fail to register asd to notifier %d",
- ret);
- fwnode_handle_put(bridge->asd.match.fwnode);
- return ret;
+ if (IS_ERR(asd)) {
+ dev_err(&client->dev, "fail to register asd to notifier %ld",
+ PTR_ERR(asd));
+ return PTR_ERR(asd);
}
bridge->notifier.ops = &mipid02_notifier_ops;
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 6f8ffab8840f..07b6d0c49bbf 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -976,8 +976,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
__le32 *cpu;
dma_addr_t dma = 0;
- if (NULL != risc->cpu && risc->size < size)
+ if (risc->cpu && risc->size < size) {
pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
+ risc->cpu = NULL;
+ }
if (NULL == risc->cpu) {
cpu = pci_zalloc_consistent(pci, size, &dma);
if (NULL == cpu)
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index 82d7f17e6a02..dce8274c81e6 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -2,7 +2,8 @@
config VIDEO_IPU3_CIO2
tristate "Intel ipu3-cio2 driver"
depends on VIDEO_V4L2 && PCI
- depends on (X86 && ACPI) || COMPILE_TEST
+ depends on ACPI || COMPILE_TEST
+ depends on X86
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -16,3 +17,21 @@ config VIDEO_IPU3_CIO2
Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2
connected camera.
The module will be called ipu3-cio2.
+
+config CIO2_BRIDGE
+ bool "IPU3 CIO2 Sensors Bridge"
+ depends on VIDEO_IPU3_CIO2 && ACPI
+ help
+ This extension provides an API for the ipu3-cio2 driver to create
+ connections to cameras that are hidden in the SSDB buffer in ACPI.
+ It can be used to enable support for cameras in detachable / hybrid
+ devices that ship with Windows.
+
+ Say Y here if your device is a detachable / hybrid laptop that comes
+ with Windows installed by the OEM, for example:
+
+ - Microsoft Surface models (except Surface Pro 3)
+ - The Lenovo Miix line (for example the 510, 520, 710 and 720)
+ - Dell 7285
+
+ If in doubt, say N here.
diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
index 98ddd5beafe0..933777e6ea8a 100644
--- a/drivers/media/pci/intel/ipu3/Makefile
+++ b/drivers/media/pci/intel/ipu3/Makefile
@@ -1,2 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
+
+ipu3-cio2-y += ipu3-cio2-main.o
+ipu3-cio2-$(CONFIG_CIO2_BRIDGE) += cio2-bridge.o
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
new file mode 100644
index 000000000000..c2199042d3db
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/property.h>
+#include <media/v4l2-fwnode.h>
+
+#include "cio2-bridge.h"
+
+/*
+ * Extend this array with ACPI Hardware IDs of devices known to be working
+ * plus the number of link-frequencies expected by their drivers, along with
+ * the frequency values in hertz. This is somewhat opportunistic way of adding
+ * support for this for now in the hopes of a better source for the information
+ * (possibly some encoded value in the SSDB buffer that we're unaware of)
+ * becoming apparent in the future.
+ *
+ * Do not add an entry for a sensor that is not actually supported.
+ */
+static const struct cio2_sensor_config cio2_supported_sensors[] = {
+ /* Omnivision OV5693 */
+ CIO2_SENSOR_CONFIG("INT33BE", 0),
+ /* Omnivision OV2680 */
+ CIO2_SENSOR_CONFIG("OVTI2680", 0),
+};
+
+static const struct cio2_property_names prop_names = {
+ .clock_frequency = "clock-frequency",
+ .rotation = "rotation",
+ .bus_type = "bus-type",
+ .data_lanes = "data-lanes",
+ .remote_endpoint = "remote-endpoint",
+ .link_frequencies = "link-frequencies",
+};
+
+static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ void *data, u32 size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = buffer.pointer;
+ if (!obj) {
+ dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&adev->dev, "Not an ACPI buffer\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ if (obj->buffer.length > size) {
+ dev_err(&adev->dev, "Given buffer is too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+
+ memcpy(data, obj->buffer.pointer, obj->buffer.length);
+
+out_free_buff:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static void cio2_bridge_create_fwnode_properties(
+ struct cio2_sensor *sensor,
+ struct cio2_bridge *bridge,
+ const struct cio2_sensor_config *cfg)
+{
+ sensor->prop_names = prop_names;
+
+ sensor->local_ref[0].node = &sensor->swnodes[SWNODE_CIO2_ENDPOINT];
+ sensor->remote_ref[0].node = &sensor->swnodes[SWNODE_SENSOR_ENDPOINT];
+
+ sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.clock_frequency,
+ sensor->ssdb.mclkspeed);
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
+ sensor->prop_names.rotation,
+ sensor->ssdb.degree);
+
+ sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes,
+ sensor->ssdb.lanes);
+ sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->local_ref);
+
+ if (cfg->nr_link_freqs > 0)
+ sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
+ sensor->prop_names.link_frequencies,
+ cfg->link_freqs,
+ cfg->nr_link_freqs);
+
+ sensor->cio2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes,
+ sensor->ssdb.lanes);
+ sensor->cio2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->remote_ref);
+}
+
+static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor)
+{
+ snprintf(sensor->node_names.remote_port,
+ sizeof(sensor->node_names.remote_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link);
+ snprintf(sensor->node_names.port,
+ sizeof(sensor->node_names.port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
+ snprintf(sensor->node_names.endpoint,
+ sizeof(sensor->node_names.endpoint),
+ SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
+}
+
+static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
+ struct cio2_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ cio2_bridge_init_swnode_names(sensor);
+
+ nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
+ sensor->dev_properties);
+ nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
+ &nodes[SWNODE_SENSOR_HID]);
+ nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_SENSOR_PORT],
+ sensor->ep_properties);
+ nodes[SWNODE_CIO2_PORT] = NODE_PORT(sensor->node_names.remote_port,
+ &bridge->cio2_hid_node);
+ nodes[SWNODE_CIO2_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_CIO2_PORT],
+ sensor->cio2_properties);
+}
+
+static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
+{
+ struct cio2_sensor *sensor;
+ unsigned int i;
+
+ for (i = 0; i < bridge->n_sensors; i++) {
+ sensor = &bridge->sensors[i];
+ software_node_unregister_nodes(sensor->swnodes);
+ acpi_dev_put(sensor->adev);
+ }
+}
+
+static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ struct cio2_bridge *bridge,
+ struct pci_dev *cio2)
+{
+ struct fwnode_handle *fwnode;
+ struct cio2_sensor *sensor;
+ struct acpi_device *adev;
+ int ret;
+
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+ if (!adev->status.enabled)
+ continue;
+
+ if (bridge->n_sensors >= CIO2_NUM_PORTS) {
+ dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
+ cio2_bridge_unregister_sensors(bridge);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ sensor = &bridge->sensors[bridge->n_sensors];
+ sensor->adev = adev;
+ strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
+
+ ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
+ &sensor->ssdb,
+ sizeof(sensor->ssdb));
+ if (ret)
+ goto err_put_adev;
+
+ if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
+ dev_err(&adev->dev,
+ "Number of lanes in SSDB is invalid\n");
+ ret = -EINVAL;
+ goto err_put_adev;
+ }
+
+ cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
+ cio2_bridge_create_connection_swnodes(bridge, sensor);
+
+ ret = software_node_register_nodes(sensor->swnodes);
+ if (ret)
+ goto err_put_adev;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[
+ SWNODE_SENSOR_HID]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto err_free_swnodes;
+ }
+
+ adev->fwnode.secondary = fwnode;
+
+ dev_info(&cio2->dev, "Found supported sensor %s\n",
+ acpi_dev_name(adev));
+
+ bridge->n_sensors++;
+ }
+
+ return 0;
+
+err_free_swnodes:
+ software_node_unregister_nodes(sensor->swnodes);
+err_put_adev:
+ acpi_dev_put(sensor->adev);
+err_out:
+ return ret;
+}
+
+static int cio2_bridge_connect_sensors(struct cio2_bridge *bridge,
+ struct pci_dev *cio2)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
+ const struct cio2_sensor_config *cfg =
+ &cio2_supported_sensors[i];
+
+ ret = cio2_bridge_connect_sensor(cfg, bridge, cio2);
+ if (ret)
+ goto err_unregister_sensors;
+ }
+
+ return 0;
+
+err_unregister_sensors:
+ cio2_bridge_unregister_sensors(bridge);
+ return ret;
+}
+
+int cio2_bridge_init(struct pci_dev *cio2)
+{
+ struct device *dev = &cio2->dev;
+ struct fwnode_handle *fwnode;
+ struct cio2_bridge *bridge;
+ unsigned int i;
+ int ret;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ strscpy(bridge->cio2_node_name, CIO2_HID,
+ sizeof(bridge->cio2_node_name));
+ bridge->cio2_hid_node.name = bridge->cio2_node_name;
+
+ ret = software_node_register(&bridge->cio2_hid_node);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register the CIO2 HID node\n");
+ goto err_free_bridge;
+ }
+
+ /*
+ * Map the lane arrangement, which is fixed for the IPU3 (meaning we
+ * only need one, rather than one per sensor). We include it as a
+ * member of the struct cio2_bridge rather than a global variable so
+ * that it survives if the module is unloaded along with the rest of
+ * the struct.
+ */
+ for (i = 0; i < CIO2_MAX_LANES; i++)
+ bridge->data_lanes[i] = i + 1;
+
+ ret = cio2_bridge_connect_sensors(bridge, cio2);
+ if (ret || bridge->n_sensors == 0)
+ goto err_unregister_cio2;
+
+ dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
+
+ fwnode = software_node_fwnode(&bridge->cio2_hid_node);
+ if (!fwnode) {
+ dev_err(dev, "Error getting fwnode from cio2 software_node\n");
+ ret = -ENODEV;
+ goto err_unregister_sensors;
+ }
+
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+
+err_unregister_sensors:
+ cio2_bridge_unregister_sensors(bridge);
+err_unregister_cio2:
+ software_node_unregister(&bridge->cio2_hid_node);
+err_free_bridge:
+ kfree(bridge);
+
+ return ret;
+}
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
new file mode 100644
index 000000000000..dd0ffcafa489
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+#ifndef __CIO2_BRIDGE_H
+#define __CIO2_BRIDGE_H
+
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include "ipu3-cio2.h"
+
+#define CIO2_HID "INT343E"
+#define CIO2_MAX_LANES 4
+#define MAX_NUM_LINK_FREQS 3
+
+#define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \
+ (const struct cio2_sensor_config) { \
+ .hid = _HID, \
+ .nr_link_freqs = _NR, \
+ .link_freqs = { __VA_ARGS__ } \
+ }
+
+#define NODE_SENSOR(_HID, _PROPS) \
+ (const struct software_node) { \
+ .name = _HID, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_PORT(_PORT, _SENSOR_NODE) \
+ (const struct software_node) { \
+ .name = _PORT, \
+ .parent = _SENSOR_NODE, \
+ }
+
+#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
+ (const struct software_node) { \
+ .name = _EP, \
+ .parent = _PORT, \
+ .properties = _PROPS, \
+ }
+
+enum cio2_sensor_swnodes {
+ SWNODE_SENSOR_HID,
+ SWNODE_SENSOR_PORT,
+ SWNODE_SENSOR_ENDPOINT,
+ SWNODE_CIO2_PORT,
+ SWNODE_CIO2_ENDPOINT,
+ SWNODE_COUNT
+};
+
+/* Data representation as it is in ACPI SSDB buffer */
+struct cio2_sensor_ssdb {
+ u8 version;
+ u8 sku;
+ u8 guid_csi2[16];
+ u8 devfunction;
+ u8 bus;
+ u32 dphylinkenfuses;
+ u32 clockdiv;
+ u8 link;
+ u8 lanes;
+ u32 csiparams[10];
+ u32 maxlanespeed;
+ u8 sensorcalibfileidx;
+ u8 sensorcalibfileidxInMBZ[3];
+ u8 romtype;
+ u8 vcmtype;
+ u8 platforminfo;
+ u8 platformsubinfo;
+ u8 flash;
+ u8 privacyled;
+ u8 degree;
+ u8 mipilinkdefined;
+ u32 mclkspeed;
+ u8 controllogicid;
+ u8 reserved1[3];
+ u8 mclkport;
+ u8 reserved2[13];
+} __packed;
+
+struct cio2_property_names {
+ char clock_frequency[16];
+ char rotation[9];
+ char bus_type[9];
+ char data_lanes[11];
+ char remote_endpoint[16];
+ char link_frequencies[17];
+};
+
+struct cio2_node_names {
+ char port[7];
+ char endpoint[11];
+ char remote_port[7];
+};
+
+struct cio2_sensor_config {
+ const char *hid;
+ const u8 nr_link_freqs;
+ const u64 link_freqs[MAX_NUM_LINK_FREQS];
+};
+
+struct cio2_sensor {
+ char name[ACPI_ID_LEN];
+ struct acpi_device *adev;
+
+ struct software_node swnodes[6];
+ struct cio2_node_names node_names;
+
+ struct cio2_sensor_ssdb ssdb;
+ struct cio2_property_names prop_names;
+ struct property_entry ep_properties[5];
+ struct property_entry dev_properties[3];
+ struct property_entry cio2_properties[3];
+ struct software_node_ref_args local_ref[1];
+ struct software_node_ref_args remote_ref[1];
+};
+
+struct cio2_bridge {
+ char cio2_node_name[ACPI_ID_LEN];
+ struct software_node cio2_hid_node;
+ u32 data_lanes[4];
+ unsigned int n_sensors;
+ struct cio2_sensor sensors[CIO2_NUM_PORTS];
+};
+
+#endif
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 36e354ecf71e..6e8c0c230e11 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
if (!q->sensor)
return -ENODEV;
- freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
@@ -1094,12 +1094,9 @@ static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
mpix->pixelformat = fmt->fourcc;
mpix->colorspace = V4L2_COLORSPACE_RAW;
mpix->field = V4L2_FIELD_NONE;
- memset(mpix->reserved, 0, sizeof(mpix->reserved));
mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
mpix->height;
- memset(mpix->plane_fmt[0].reserved, 0,
- sizeof(mpix->plane_fmt[0].reserved));
/* use default */
mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
@@ -1269,7 +1266,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
fmt->format.code = formats[0].mbus_code;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- if (formats[i].mbus_code == fmt->format.code) {
+ if (formats[i].mbus_code == mbus_code) {
fmt->format.code = mbus_code;
break;
}
@@ -1467,7 +1464,7 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- struct sensor_async_subdev *s_asd = NULL;
+ struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
ep = fwnode_graph_get_endpoint_by_id(
@@ -1481,27 +1478,22 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
if (ret)
goto err_parse;
- s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
- if (!s_asd) {
- ret = -ENOMEM;
+ s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &cio2->notifier, ep, struct sensor_async_subdev);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
goto err_parse;
}
s_asd->csi2.port = vep.base.port;
s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &cio2->notifier, ep, &s_asd->asd);
- if (ret)
- goto err_parse;
-
fwnode_handle_put(ep);
continue;
err_parse:
fwnode_handle_put(ep);
- kfree(s_asd);
return ret;
}
@@ -1702,11 +1694,28 @@ static void cio2_queues_exit(struct cio2_device *cio2)
cio2_queue_exit(cio2, &cio2->queue[i]);
}
+static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *endpoint;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (endpoint) {
+ fwnode_handle_put(endpoint);
+ return 0;
+ }
+
+ return cio2_check_fwnode_graph(fwnode->secondary);
+}
+
/**************** PCI interface ****************/
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
+ struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
struct cio2_device *cio2;
int r;
@@ -1715,6 +1724,23 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return -ENOMEM;
cio2->pci_dev = pci_dev;
+ /*
+ * On some platforms no connections to sensors are defined in firmware,
+ * if the device has no endpoints then we can try to build those as
+ * software_nodes parsed from SSDB.
+ */
+ r = cio2_check_fwnode_graph(fwnode);
+ if (r) {
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
+ dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
+ return -EINVAL;
+ }
+
+ r = cio2_bridge_init(pci_dev);
+ if (r)
+ return r;
+ }
+
r = pcim_enable_device(pci_dev);
if (r) {
dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index ccf0b85ae36f..3806d7f04d69 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -4,8 +4,26 @@
#ifndef __IPU3_CIO2_H
#define __IPU3_CIO2_H
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <linux/types.h>
+#include <asm/page.h>
+
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+struct cio2_fbpt_entry; /* defined here, after the first usage */
+struct pci_dev;
+
#define CIO2_NAME "ipu3-cio2"
#define CIO2_DEVICE_NAME "Intel IPU3 CIO2"
#define CIO2_ENTITY_NAME "ipu3-csi2"
@@ -437,4 +455,10 @@ static inline struct cio2_queue *vb2q_to_cio2_queue(struct vb2_queue *vq)
return container_of(vq, struct cio2_queue, vbq);
}
+#if IS_ENABLED(CONFIG_CIO2_BRIDGE)
+int cio2_bridge_init(struct pci_dev *cio2);
+#else
+static inline int cio2_bridge_init(struct pci_dev *cio2) { return 0; }
+#endif
+
#endif
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 28acb14490d5..6e448cb3b51c 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -873,6 +873,11 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (!(cmd & PCI_COMMAND_MASTER)) {
IVTV_ERR("Bus Mastering is not enabled\n");
+ if (itv->has_cx23415)
+ release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET,
+ IVTV_DECODER_SIZE);
+ release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
+ release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
return -ENXIO;
}
}
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 39e3c7f8c5b4..76a37fbd8458 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev)
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
- if (err)
+ if (err) {
+ video_device_release(dev->empress_dev);
+ dev->empress_dev = NULL;
return err;
+ }
dev->empress_dev->queue = q;
dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 5cc4ef21f9d3..aa0895d2d735 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -871,23 +871,24 @@ void saa7134_enable_i2s(struct saa7134_dev *dev)
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
- /* Set I2S format (SONY)  */
- saa_writeb(SAA7133_I2S_AUDIO_CONTROL, 0x00);
- /* Start I2S */
- saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x11);
- break;
+ /* Set I2S format (SONY)  */
+ saa_writeb(SAA7133_I2S_AUDIO_CONTROL, 0x00);
+ /* Start I2S */
+ saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x11);
+ break;
case PCI_DEVICE_ID_PHILIPS_SAA7134:
- i2s_format = (dev->input->amux == TV) ? 0x00 : 0x01;
+ i2s_format = (dev->input->amux == TV) ? 0x00 : 0x01;
- /* enable I2S audio output for the mpeg encoder */
- saa_writeb(SAA7134_I2S_OUTPUT_SELECT, 0x80);
- saa_writeb(SAA7134_I2S_OUTPUT_FORMAT, i2s_format);
- saa_writeb(SAA7134_I2S_OUTPUT_LEVEL, 0x0F);
- saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x01);
+ /* enable I2S audio output for the mpeg encoder */
+ saa_writeb(SAA7134_I2S_OUTPUT_SELECT, 0x80);
+ saa_writeb(SAA7134_I2S_OUTPUT_FORMAT, i2s_format);
+ saa_writeb(SAA7134_I2S_OUTPUT_LEVEL, 0x0F);
+ saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x01);
+ break;
default:
- break;
+ break;
}
}
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 245d9db280aa..89c5b79a5b24 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -103,13 +103,13 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
buf->pt_size = (SAA7164_PT_ENTRIES * sizeof(u64)) + 0x1000;
/* Allocate contiguous memory */
- buf->cpu = pci_alloc_consistent(port->dev->pci, buf->pci_size,
- &buf->dma);
+ buf->cpu = dma_alloc_coherent(&port->dev->pci->dev, buf->pci_size,
+ &buf->dma, GFP_KERNEL);
if (!buf->cpu)
goto fail1;
- buf->pt_cpu = pci_alloc_consistent(port->dev->pci, buf->pt_size,
- &buf->pt_dma);
+ buf->pt_cpu = dma_alloc_coherent(&port->dev->pci->dev, buf->pt_size,
+ &buf->pt_dma, GFP_KERNEL);
if (!buf->pt_cpu)
goto fail2;
@@ -137,7 +137,8 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
goto ret;
fail2:
- pci_free_consistent(port->dev->pci, buf->pci_size, buf->cpu, buf->dma);
+ dma_free_coherent(&port->dev->pci->dev, buf->pci_size, buf->cpu,
+ buf->dma);
fail1:
kfree(buf);
@@ -160,8 +161,9 @@ int saa7164_buffer_dealloc(struct saa7164_buffer *buf)
if (buf->flags != SAA7164_BUFFER_FREE)
log_warn(" freeing a non-free buffer\n");
- pci_free_consistent(dev->pci, buf->pci_size, buf->cpu, buf->dma);
- pci_free_consistent(dev->pci, buf->pt_size, buf->pt_cpu, buf->pt_dma);
+ dma_free_coherent(&dev->pci->dev, buf->pci_size, buf->cpu, buf->dma);
+ dma_free_coherent(&dev->pci->dev, buf->pt_size, buf->pt_cpu,
+ buf->pt_dma);
kfree(buf);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index f3a4e575a782..7973ae42873a 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1273,7 +1273,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
/* TODO */
- err = pci_set_dma_mask(pci_dev, 0xffffffff);
+ err = dma_set_mask(&pci_dev->dev, 0xffffffff);
if (err) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
goto fail_irq;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 2801a2b03fa0..4b4eb156e214 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -24,7 +24,7 @@
saa7164_bus..() : Manage a read/write memory ring buffer in the
| : PCIe Address space.
|
- | saa7164_fw...() : Load any frimware
+ | saa7164_fw...() : Load any firmware
| | : direct into the device
V V
<- ----------------- PCIe address space -------------------- ->
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index e6b74e161a05..c0604d9c7011 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -60,38 +60,44 @@ static void smi_ir_decode(struct smi_rc *ir)
{
struct smi_dev *dev = ir->dev;
struct rc_dev *rc_dev = ir->rc_dev;
- u32 dwIRControl, dwIRData;
- u8 index, ucIRCount, readLoop;
+ u32 control, data;
+ u8 index, ir_count, read_loop;
- dwIRControl = smi_read(IR_Init_Reg);
+ control = smi_read(IR_Init_Reg);
- if (dwIRControl & rbIRVld) {
- ucIRCount = (u8) smi_read(IR_Data_Cnt);
+ dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
- readLoop = ucIRCount/4;
- if (ucIRCount % 4)
- readLoop += 1;
- for (index = 0; index < readLoop; index++) {
- dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
+ if (control & rbIRVld) {
+ ir_count = (u8)smi_read(IR_Data_Cnt);
- ir->irData[index*4 + 0] = (u8)(dwIRData);
- ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
- ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
- ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
+ dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
+
+ read_loop = ir_count / 4;
+ if (ir_count % 4)
+ read_loop += 1;
+ for (index = 0; index < read_loop; index++) {
+ data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
+ dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
+
+ ir->irData[index * 4 + 0] = (u8)(data);
+ ir->irData[index * 4 + 1] = (u8)(data >> 8);
+ ir->irData[index * 4 + 2] = (u8)(data >> 16);
+ ir->irData[index * 4 + 3] = (u8)(data >> 24);
}
- smi_raw_process(rc_dev, ir->irData, ucIRCount);
- smi_set(IR_Init_Reg, rbIRVld);
+ smi_raw_process(rc_dev, ir->irData, ir_count);
}
- if (dwIRControl & rbIRhighidle) {
+ if (control & rbIRhighidle) {
struct ir_raw_event rawir = {};
+ dev_dbg(&rc_dev->dev, "high idle\n");
+
rawir.pulse = 0;
rawir.duration = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
ir_raw_event_store_with_filter(rc_dev, &rawir);
- smi_set(IR_Init_Reg, rbIRhighidle);
}
+ smi_set(IR_Init_Reg, rbIRVld);
ir_raw_event_handle(rc_dev);
}
@@ -150,7 +156,7 @@ int smi_ir_init(struct smi_dev *dev)
rc_dev->dev.parent = &dev->pci_dev->dev;
rc_dev->map_name = dev->info->rc_map;
- rc_dev->timeout = MS_TO_US(100);
+ rc_dev->timeout = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
rc_dev->rx_resolution = SMI_SAMPLE_PERIOD;
ir->rc_dev = rc_dev;
@@ -173,7 +179,7 @@ void smi_ir_exit(struct smi_dev *dev)
struct smi_rc *ir = &dev->ir;
struct rc_dev *rc_dev = ir->rc_dev;
- smi_ir_stop(ir);
rc_unregister_device(rc_dev);
+ smi_ir_stop(ir);
ir->rc_dev = NULL;
}
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index e7604b7ecc8d..0c300d019d9c 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -351,13 +351,15 @@ static void smi_dma_xfer(struct tasklet_struct *t)
static void smi_port_dma_free(struct smi_port *port)
{
if (port->cpu_addr[0]) {
- pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
- port->cpu_addr[0], port->dma_addr[0]);
+ dma_free_coherent(&port->dev->pci_dev->dev,
+ SMI_TS_DMA_BUF_SIZE, port->cpu_addr[0],
+ port->dma_addr[0]);
port->cpu_addr[0] = NULL;
}
if (port->cpu_addr[1]) {
- pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
- port->cpu_addr[1], port->dma_addr[1]);
+ dma_free_coherent(&port->dev->pci_dev->dev,
+ SMI_TS_DMA_BUF_SIZE, port->cpu_addr[1],
+ port->dma_addr[1]);
port->cpu_addr[1] = NULL;
}
}
@@ -398,9 +400,10 @@ static int smi_port_init(struct smi_port *port, int dmaChanUsed)
}
if (port->_dmaInterruptCH0) {
- port->cpu_addr[0] = pci_alloc_consistent(port->dev->pci_dev,
- SMI_TS_DMA_BUF_SIZE,
- &port->dma_addr[0]);
+ port->cpu_addr[0] = dma_alloc_coherent(&port->dev->pci_dev->dev,
+ SMI_TS_DMA_BUF_SIZE,
+ &port->dma_addr[0],
+ GFP_KERNEL);
if (!port->cpu_addr[0]) {
dev_err(&port->dev->pci_dev->dev,
"Port[%d] DMA CH0 memory allocation failed!\n",
@@ -410,9 +413,10 @@ static int smi_port_init(struct smi_port *port, int dmaChanUsed)
}
if (port->_dmaInterruptCH1) {
- port->cpu_addr[1] = pci_alloc_consistent(port->dev->pci_dev,
- SMI_TS_DMA_BUF_SIZE,
- &port->dma_addr[1]);
+ port->cpu_addr[1] = dma_alloc_coherent(&port->dev->pci_dev->dev,
+ SMI_TS_DMA_BUF_SIZE,
+ &port->dma_addr[1],
+ GFP_KERNEL);
if (!port->cpu_addr[1]) {
dev_err(&port->dev->pci_dev->dev,
"Port[%d] DMA CH1 memory allocation failed!\n",
@@ -963,7 +967,7 @@ static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* should we set to 32bit DMA? */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_iounmap;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 35a18d388f3f..fd1831e97b22 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -199,6 +199,21 @@ menuconfig V4L_MEM2MEM_DRIVERS
if V4L_MEM2MEM_DRIVERS
+config VIDEO_ALLEGRO_DVT
+ tristate "Allegro DVT Video IP Core"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ help
+ Support for the encoder video IP core by Allegro DVT. This core is
+ found for example on the Xilinx ZynqMP SoC in the EV family and is
+ called VCU in the reference manual.
+
+ To compile this driver as a module, choose M here: the module
+ will be called allegro.
+
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
depends on VIDEO_DEV && VIDEO_V4L2 && OF && (ARCH_MXC || COMPILE_TEST)
@@ -530,10 +545,9 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
- depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM if ARCH_QCOM
- select VIDEOBUF2_DMA_SG
+ select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
This is a V4L2 driver for Qualcomm Venus video accelerator
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 1d63aa956bcd..9d4d6370908d 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -3,6 +3,7 @@
# Makefile for the video capture/playback device drivers.
#
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
obj-$(CONFIG_VIDEO_ASPEED) += aspeed-video.o
obj-$(CONFIG_VIDEO_CADENCE) += cadence/
obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
diff --git a/drivers/media/platform/allegro-dvt/Makefile b/drivers/media/platform/allegro-dvt/Makefile
new file mode 100644
index 000000000000..66108a303774
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+allegro-objs := allegro-core.o allegro-mail.o
+allegro-objs += nal-rbsp.o nal-h264.o nal-hevc.o
+
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index 9f718f43282b..887b492e4ad1 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -30,6 +30,7 @@
#include "allegro-mail.h"
#include "nal-h264.h"
+#include "nal-hevc.h"
/*
* Support up to 4k video streams. The hardware actually supports higher
@@ -90,10 +91,16 @@
* because it needs to write SPS/PPS NAL units. The encoder writes the actual
* frame data after the offset.
*/
-#define ENCODER_STREAM_OFFSET SZ_64
+#define ENCODER_STREAM_OFFSET SZ_128
#define SIZE_MACROBLOCK 16
+/* Encoding options */
+#define LOG2_MAX_FRAME_NUM 4
+#define LOG2_MAX_PIC_ORDER_CNT 10
+#define BETA_OFFSET_DIV_2 -1
+#define TC_OFFSET_DIV_2 -1
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
@@ -167,13 +174,6 @@ static struct regmap_config allegro_sram_config = {
.cache_type = REGCACHE_NONE,
};
-enum allegro_state {
- ALLEGRO_STATE_ENCODING,
- ALLEGRO_STATE_DRAIN,
- ALLEGRO_STATE_WAIT_FOR_BUFFER,
- ALLEGRO_STATE_STOPPED,
-};
-
#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
struct allegro_channel {
@@ -196,22 +196,41 @@ struct allegro_channel {
unsigned int osequence;
u32 codec;
- enum v4l2_mpeg_video_h264_profile profile;
- enum v4l2_mpeg_video_h264_level level;
unsigned int sizeimage_encoded;
unsigned int csequence;
bool frame_rc_enable;
unsigned int bitrate;
unsigned int bitrate_peak;
- unsigned int cpb_size;
- unsigned int gop_size;
struct allegro_buffer config_blob;
+ unsigned int log2_max_frame_num;
+ bool temporal_mvp_enable;
+
+ bool enable_loop_filter_across_tiles;
+ bool enable_loop_filter_across_slices;
+ bool enable_deblocking_filter_override;
+ bool enable_reordering;
+ bool dbf_ovr_en;
+
unsigned int num_ref_idx_l0;
unsigned int num_ref_idx_l1;
+ /* Maximum range for motion estimation */
+ int b_hrz_me_range;
+ int b_vrt_me_range;
+ int p_hrz_me_range;
+ int p_vrt_me_range;
+ /* Size limits of coding unit */
+ int min_cu_size;
+ int max_cu_size;
+ /* Size limits of transform unit */
+ int min_tu_size;
+ int max_tu_size;
+ int max_transfo_depth_intra;
+ int max_transfo_depth_inter;
+
struct v4l2_ctrl *mpeg_video_h264_profile;
struct v4l2_ctrl *mpeg_video_h264_level;
struct v4l2_ctrl *mpeg_video_h264_i_frame_qp;
@@ -219,6 +238,16 @@ struct allegro_channel {
struct v4l2_ctrl *mpeg_video_h264_min_qp;
struct v4l2_ctrl *mpeg_video_h264_p_frame_qp;
struct v4l2_ctrl *mpeg_video_h264_b_frame_qp;
+
+ struct v4l2_ctrl *mpeg_video_hevc_profile;
+ struct v4l2_ctrl *mpeg_video_hevc_level;
+ struct v4l2_ctrl *mpeg_video_hevc_tier;
+ struct v4l2_ctrl *mpeg_video_hevc_i_frame_qp;
+ struct v4l2_ctrl *mpeg_video_hevc_max_qp;
+ struct v4l2_ctrl *mpeg_video_hevc_min_qp;
+ struct v4l2_ctrl *mpeg_video_hevc_p_frame_qp;
+ struct v4l2_ctrl *mpeg_video_hevc_b_frame_qp;
+
struct v4l2_ctrl *mpeg_video_frame_rc_enable;
struct { /* video bitrate mode control cluster */
struct v4l2_ctrl *mpeg_video_bitrate_mode;
@@ -246,21 +275,51 @@ struct allegro_channel {
struct completion completion;
unsigned int error;
- enum allegro_state state;
};
static inline int
-allegro_set_state(struct allegro_channel *channel, enum allegro_state state)
+allegro_channel_get_i_frame_qp(struct allegro_channel *channel)
{
- channel->state = state;
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_i_frame_qp);
+ else
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_i_frame_qp);
+}
- return 0;
+static inline int
+allegro_channel_get_p_frame_qp(struct allegro_channel *channel)
+{
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_p_frame_qp);
+ else
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_p_frame_qp);
}
-static inline enum allegro_state
-allegro_get_state(struct allegro_channel *channel)
+static inline int
+allegro_channel_get_b_frame_qp(struct allegro_channel *channel)
{
- return channel->state;
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_b_frame_qp);
+ else
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_b_frame_qp);
+}
+
+static inline int
+allegro_channel_get_min_qp(struct allegro_channel *channel)
+{
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_min_qp);
+ else
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_min_qp);
+}
+
+static inline int
+allegro_channel_get_max_qp(struct allegro_channel *channel)
+{
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_max_qp);
+ else
+ return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_max_qp);
}
struct allegro_m2m_buffer {
@@ -476,7 +535,7 @@ select_minimum_h264_level(unsigned int width, unsigned int height)
return level;
}
-static unsigned int maximum_bitrate(enum v4l2_mpeg_video_h264_level level)
+static unsigned int h264_maximum_bitrate(enum v4l2_mpeg_video_h264_level level)
{
switch (level) {
case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
@@ -515,7 +574,7 @@ static unsigned int maximum_bitrate(enum v4l2_mpeg_video_h264_level level)
}
}
-static unsigned int maximum_cpb_size(enum v4l2_mpeg_video_h264_level level)
+static unsigned int h264_maximum_cpb_size(enum v4l2_mpeg_video_h264_level level)
{
switch (level) {
case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
@@ -554,6 +613,86 @@ static unsigned int maximum_cpb_size(enum v4l2_mpeg_video_h264_level level)
}
}
+static enum v4l2_mpeg_video_hevc_level
+select_minimum_hevc_level(unsigned int width, unsigned int height)
+{
+ unsigned int luma_picture_size = width * height;
+ enum v4l2_mpeg_video_hevc_level level;
+
+ if (luma_picture_size <= 36864)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_1;
+ else if (luma_picture_size <= 122880)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_2;
+ else if (luma_picture_size <= 245760)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1;
+ else if (luma_picture_size <= 552960)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_3;
+ else if (luma_picture_size <= 983040)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1;
+ else if (luma_picture_size <= 2228224)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_4;
+ else if (luma_picture_size <= 8912896)
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_5;
+ else
+ level = V4L2_MPEG_VIDEO_HEVC_LEVEL_6;
+
+ return level;
+}
+
+static unsigned int hevc_maximum_bitrate(enum v4l2_mpeg_video_hevc_level level)
+{
+ /*
+ * See Rec. ITU-T H.265 v5 (02/2018), A.4.2 Profile-specific level
+ * limits for the video profiles.
+ */
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return 128;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return 1500;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return 3000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return 6000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return 10000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return 12000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return 20000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return 25000;
+ default:
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return 40000;
+ }
+}
+
+static unsigned int hevc_maximum_cpb_size(enum v4l2_mpeg_video_hevc_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return 350;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return 1500;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return 3000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return 6000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return 10000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return 12000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return 20000;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return 25000;
+ default:
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return 40000;
+ }
+}
+
static const struct fw_info *
allegro_get_firmware_info(struct allegro_dev *dev,
const struct firmware *fw,
@@ -877,6 +1016,55 @@ static u16 v4l2_level_to_mcu_level(enum v4l2_mpeg_video_h264_level level)
}
}
+static u8 hevc_profile_to_mcu_profile(enum v4l2_mpeg_video_hevc_profile profile)
+{
+ switch (profile) {
+ default:
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ return 1;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return 2;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return 3;
+ }
+}
+
+static u16 hevc_level_to_mcu_level(enum v4l2_mpeg_video_hevc_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return 10;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return 20;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return 30;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return 40;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return 50;
+ default:
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return 51;
+ }
+}
+
+static u8 hevc_tier_to_mcu_tier(enum v4l2_mpeg_video_hevc_tier tier)
+{
+ switch (tier) {
+ default:
+ case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
+ return 0;
+ case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
+ return 1;
+ }
+}
+
static u32
v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode)
{
@@ -913,13 +1101,26 @@ static s16 get_qp_delta(int minuend, int subtrahend)
return minuend - subtrahend;
}
+static u32 allegro_channel_get_entropy_mode(struct allegro_channel *channel)
+{
+#define ALLEGRO_ENTROPY_MODE_CAVLC 0
+#define ALLEGRO_ENTROPY_MODE_CABAC 1
+
+ /* HEVC always uses CABAC, but this has to be explicitly set */
+ if (channel->codec == V4L2_PIX_FMT_HEVC)
+ return ALLEGRO_ENTROPY_MODE_CABAC;
+
+ return ALLEGRO_ENTROPY_MODE_CAVLC;
+}
+
static int fill_create_channel_param(struct allegro_channel *channel,
struct create_channel_param *param)
{
- int i_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_i_frame_qp);
- int p_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_p_frame_qp);
- int b_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_b_frame_qp);
+ int i_frame_qp = allegro_channel_get_i_frame_qp(channel);
+ int p_frame_qp = allegro_channel_get_p_frame_qp(channel);
+ int b_frame_qp = allegro_channel_get_b_frame_qp(channel);
int bitrate_mode = v4l2_ctrl_g_ctrl(channel->mpeg_video_bitrate_mode);
+ unsigned int cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size);
param->width = channel->width;
param->height = channel->height;
@@ -927,38 +1128,61 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->colorspace =
v4l2_colorspace_to_mcu_colorspace(channel->colorspace);
param->src_mode = 0x0;
- param->profile = v4l2_profile_to_mcu_profile(channel->profile);
- param->constraint_set_flags = BIT(1);
+
param->codec = channel->codec;
- param->level = v4l2_level_to_mcu_level(channel->level);
- param->tier = 0;
+ if (channel->codec == V4L2_PIX_FMT_H264) {
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+
+ profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile);
+ level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level);
+
+ param->profile = v4l2_profile_to_mcu_profile(profile);
+ param->constraint_set_flags = BIT(1);
+ param->level = v4l2_level_to_mcu_level(level);
+ } else {
+ enum v4l2_mpeg_video_hevc_profile profile;
+ enum v4l2_mpeg_video_hevc_level level;
+ enum v4l2_mpeg_video_hevc_tier tier;
- param->log2_max_poc = 10;
- param->log2_max_frame_num = 4;
- param->temporal_mvp_enable = 1;
+ profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
+ level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
+ tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier);
- param->dbf_ovr_en = 1;
+ param->profile = hevc_profile_to_mcu_profile(profile);
+ param->level = hevc_level_to_mcu_level(level);
+ param->tier = hevc_tier_to_mcu_tier(tier);
+ }
+
+ param->log2_max_poc = LOG2_MAX_PIC_ORDER_CNT;
+ param->log2_max_frame_num = channel->log2_max_frame_num;
+ param->temporal_mvp_enable = channel->temporal_mvp_enable;
+
+ param->dbf_ovr_en = channel->dbf_ovr_en;
+ param->override_lf = channel->enable_deblocking_filter_override;
+ param->enable_reordering = channel->enable_reordering;
+ param->entropy_mode = allegro_channel_get_entropy_mode(channel);
param->rdo_cost_mode = 1;
param->custom_lda = 1;
param->lf = 1;
- param->lf_x_tile = 1;
- param->lf_x_slice = 1;
+ param->lf_x_tile = channel->enable_loop_filter_across_tiles;
+ param->lf_x_slice = channel->enable_loop_filter_across_slices;
param->src_bit_depth = 8;
- param->beta_offset = -1;
- param->tc_offset = -1;
+ param->beta_offset = BETA_OFFSET_DIV_2;
+ param->tc_offset = TC_OFFSET_DIV_2;
param->num_slices = 1;
- param->me_range[0] = 8;
- param->me_range[1] = 8;
- param->me_range[2] = 16;
- param->me_range[3] = 16;
- param->max_cu_size = ilog2(SIZE_MACROBLOCK);
- param->min_cu_size = ilog2(8);
- param->max_tu_size = 2;
- param->min_tu_size = 2;
- param->max_transfo_depth_intra = 1;
- param->max_transfo_depth_inter = 1;
+ param->me_range[0] = channel->b_hrz_me_range;
+ param->me_range[1] = channel->b_vrt_me_range;
+ param->me_range[2] = channel->p_hrz_me_range;
+ param->me_range[3] = channel->p_vrt_me_range;
+ param->max_cu_size = channel->max_cu_size;
+ param->min_cu_size = channel->min_cu_size;
+ param->max_tu_size = channel->max_tu_size;
+ param->min_tu_size = channel->min_tu_size;
+ param->max_transfo_depth_intra = channel->max_transfo_depth_intra;
+ param->max_transfo_depth_inter = channel->max_transfo_depth_inter;
param->prefetch_auto = 0;
param->prefetch_mem_offset = 0;
@@ -967,8 +1191,7 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->rate_control_mode = channel->frame_rc_enable ?
v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0;
- param->cpb_size = v4l2_cpb_size_to_mcu(channel->cpb_size,
- channel->bitrate_peak);
+ param->cpb_size = v4l2_cpb_size_to_mcu(cpb_size, channel->bitrate_peak);
/* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */
param->initial_rem_delay = param->cpb_size;
param->framerate = DIV_ROUND_UP(channel->framerate.numerator,
@@ -977,8 +1200,8 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->target_bitrate = channel->bitrate;
param->max_bitrate = channel->bitrate_peak;
param->initial_qp = i_frame_qp;
- param->min_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_min_qp);
- param->max_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_max_qp);
+ param->min_qp = allegro_channel_get_min_qp(channel);
+ param->max_qp = allegro_channel_get_max_qp(channel);
param->ip_delta = get_qp_delta(i_frame_qp, p_frame_qp);
param->pb_delta = get_qp_delta(p_frame_qp, b_frame_qp);
param->golden_ref = 0;
@@ -991,10 +1214,10 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->max_pixel_value = 255;
param->gop_ctrl_mode = 0x00000002;
- param->freq_idr = channel->gop_size;
+ param->freq_idr = v4l2_ctrl_g_ctrl(channel->mpeg_video_gop_size);
param->freq_lt = 0;
param->gdr_mode = 0x00000000;
- param->gop_length = channel->gop_size;
+ param->gop_length = v4l2_ctrl_g_ctrl(channel->mpeg_video_gop_size);
param->subframe_latency = 0x00000000;
param->lda_factors[0] = 51;
@@ -1060,7 +1283,7 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
struct allegro_channel *channel,
dma_addr_t paddr,
unsigned long size,
- u64 stream_id)
+ u64 dst_handle)
{
struct mcu_msg_put_stream_buffer msg;
@@ -1075,7 +1298,7 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
msg.size = size;
msg.offset = ENCODER_STREAM_OFFSET;
/* copied to mcu_msg_encode_frame_response */
- msg.stream_id = stream_id;
+ msg.dst_handle = dst_handle;
allegro_mbox_send(dev->mbox_command, &msg);
@@ -1274,23 +1497,30 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
/* Calculation of crop units in Rec. ITU-T H.264 (04/2017) p. 76 */
unsigned int crop_unit_x = 2;
unsigned int crop_unit_y = 2;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ unsigned int cpb_size;
+ unsigned int cpb_size_scale;
sps = kzalloc(sizeof(*sps), GFP_KERNEL);
if (!sps)
return -ENOMEM;
- sps->profile_idc = nal_h264_profile_from_v4l2(channel->profile);
+ profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile);
+ level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level);
+
+ sps->profile_idc = nal_h264_profile_from_v4l2(profile);
sps->constraint_set0_flag = 0;
sps->constraint_set1_flag = 1;
sps->constraint_set2_flag = 0;
sps->constraint_set3_flag = 0;
sps->constraint_set4_flag = 0;
sps->constraint_set5_flag = 0;
- sps->level_idc = nal_h264_level_from_v4l2(channel->level);
+ sps->level_idc = nal_h264_level_from_v4l2(level);
sps->seq_parameter_set_id = 0;
- sps->log2_max_frame_num_minus4 = 0;
+ sps->log2_max_frame_num_minus4 = LOG2_MAX_FRAME_NUM - 4;
sps->pic_order_cnt_type = 0;
- sps->log2_max_pic_order_cnt_lsb_minus4 = 6;
+ sps->log2_max_pic_order_cnt_lsb_minus4 = LOG2_MAX_PIC_ORDER_CNT - 4;
sps->max_num_ref_frames = 3;
sps->gaps_in_frame_num_value_allowed_flag = 0;
sps->pic_width_in_mbs_minus1 =
@@ -1331,13 +1561,15 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui.vcl_hrd_parameters_present_flag = 1;
sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
- sps->vui.vcl_hrd_parameters.cpb_size_scale = 1;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
+ cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size);
+ cpb_size_scale = ffs(cpb_size) - 4;
+ sps->vui.vcl_hrd_parameters.cpb_size_scale = cpb_size_scale;
sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] =
- (channel->cpb_size * 1000) / (1 << (4 + sps->vui.vcl_hrd_parameters.cpb_size_scale)) - 1;
+ (cpb_size * 1000) / (1 << (4 + cpb_size_scale)) - 1;
sps->vui.vcl_hrd_parameters.cbr_flag[0] =
!v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable);
sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31;
@@ -1392,45 +1624,165 @@ static ssize_t allegro_h264_write_pps(struct allegro_channel *channel,
return size;
}
-static bool allegro_channel_is_at_eos(struct allegro_channel *channel)
+static void allegro_channel_eos_event(struct allegro_channel *channel)
{
- bool is_at_eos = false;
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
- switch (allegro_get_state(channel)) {
- case ALLEGRO_STATE_STOPPED:
- is_at_eos = true;
- break;
- case ALLEGRO_STATE_DRAIN:
- case ALLEGRO_STATE_WAIT_FOR_BUFFER:
- mutex_lock(&channel->shadow_list_lock);
- if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0 &&
- list_empty(&channel->source_shadow_list))
- is_at_eos = true;
- mutex_unlock(&channel->shadow_list_lock);
- break;
- default:
- break;
- }
+ v4l2_event_queue_fh(&channel->fh, &eos_event);
+}
- return is_at_eos;
+static ssize_t allegro_hevc_write_vps(struct allegro_channel *channel,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_hevc_vps *vps;
+ struct nal_hevc_profile_tier_level *ptl;
+ ssize_t size;
+ unsigned int num_ref_frames = channel->num_ref_idx_l0;
+ s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
+ s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
+ s32 tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier);
+
+ vps = kzalloc(sizeof(*vps), GFP_KERNEL);
+ if (!vps)
+ return -ENOMEM;
+
+ vps->base_layer_internal_flag = 1;
+ vps->base_layer_available_flag = 1;
+ vps->temporal_id_nesting_flag = 1;
+
+ ptl = &vps->profile_tier_level;
+ ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
+ ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_progressive_source_flag = 1;
+ ptl->general_frame_only_constraint_flag = 1;
+ ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+
+ vps->sub_layer_ordering_info_present_flag = 0;
+ vps->max_dec_pic_buffering_minus1[0] = num_ref_frames;
+ vps->max_num_reorder_pics[0] = num_ref_frames;
+
+ size = nal_hevc_write_vps(&dev->plat_dev->dev, dest, n, vps);
+
+ kfree(vps);
+
+ return size;
}
-static void allegro_channel_buf_done(struct allegro_channel *channel,
- struct vb2_v4l2_buffer *buf,
- enum vb2_buffer_state state)
+static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
+ void *dest, size_t n)
{
- const struct v4l2_event eos_event = {
- .type = V4L2_EVENT_EOS
- };
+ struct allegro_dev *dev = channel->dev;
+ struct nal_hevc_sps *sps;
+ struct nal_hevc_profile_tier_level *ptl;
+ ssize_t size;
+ unsigned int num_ref_frames = channel->num_ref_idx_l0;
+ s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
+ s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
+ s32 tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier);
+
+ sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+ if (!sps)
+ return -ENOMEM;
+
+ sps->temporal_id_nesting_flag = 1;
+
+ ptl = &sps->profile_tier_level;
+ ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
+ ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_progressive_source_flag = 1;
+ ptl->general_frame_only_constraint_flag = 1;
+ ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+
+ sps->seq_parameter_set_id = 0;
+ sps->chroma_format_idc = 1; /* Only 4:2:0 sampling supported */
+ sps->pic_width_in_luma_samples = round_up(channel->width, 8);
+ sps->pic_height_in_luma_samples = round_up(channel->height, 8);
+ sps->conf_win_right_offset =
+ sps->pic_width_in_luma_samples - channel->width;
+ sps->conf_win_bottom_offset =
+ sps->pic_height_in_luma_samples - channel->height;
+ sps->conformance_window_flag =
+ sps->conf_win_right_offset || sps->conf_win_bottom_offset;
+
+ sps->log2_max_pic_order_cnt_lsb_minus4 = LOG2_MAX_PIC_ORDER_CNT - 4;
+
+ sps->sub_layer_ordering_info_present_flag = 1;
+ sps->max_dec_pic_buffering_minus1[0] = num_ref_frames;
+ sps->max_num_reorder_pics[0] = num_ref_frames;
+
+ sps->log2_min_luma_coding_block_size_minus3 =
+ channel->min_cu_size - 3;
+ sps->log2_diff_max_min_luma_coding_block_size =
+ channel->max_cu_size - channel->min_cu_size;
+ sps->log2_min_luma_transform_block_size_minus2 =
+ channel->min_tu_size - 2;
+ sps->log2_diff_max_min_luma_transform_block_size =
+ channel->max_tu_size - channel->min_tu_size;
+ sps->max_transform_hierarchy_depth_intra =
+ channel->max_transfo_depth_intra;
+ sps->max_transform_hierarchy_depth_inter =
+ channel->max_transfo_depth_inter;
+
+ sps->sps_temporal_mvp_enabled_flag = channel->temporal_mvp_enable;
+ sps->strong_intra_smoothing_enabled_flag = channel->max_cu_size > 4;
+
+ size = nal_hevc_write_sps(&dev->plat_dev->dev, dest, n, sps);
+
+ kfree(sps);
+
+ return size;
+}
+
+static ssize_t allegro_hevc_write_pps(struct allegro_channel *channel,
+ struct mcu_msg_encode_frame_response *msg,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_hevc_pps *pps;
+ ssize_t size;
+ int i;
+
+ pps = kzalloc(sizeof(*pps), GFP_KERNEL);
+ if (!pps)
+ return -ENOMEM;
+
+ pps->pps_pic_parameter_set_id = 0;
+ pps->pps_seq_parameter_set_id = 0;
+
+ if (msg->num_column > 1 || msg->num_row > 1) {
+ pps->tiles_enabled_flag = 1;
+ pps->num_tile_columns_minus1 = msg->num_column - 1;
+ pps->num_tile_rows_minus1 = msg->num_row - 1;
- if (allegro_channel_is_at_eos(channel)) {
- buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_event_queue_fh(&channel->fh, &eos_event);
+ for (i = 0; i < msg->num_column; i++)
+ pps->column_width_minus1[i] = msg->tile_width[i] - 1;
- allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
+ for (i = 0; i < msg->num_row; i++)
+ pps->row_height_minus1[i] = msg->tile_height[i] - 1;
}
- v4l2_m2m_buf_done(buf, state);
+ pps->loop_filter_across_tiles_enabled_flag =
+ channel->enable_loop_filter_across_tiles;
+ pps->pps_loop_filter_across_slices_enabled_flag =
+ channel->enable_loop_filter_across_slices;
+ pps->deblocking_filter_control_present_flag = 1;
+ pps->deblocking_filter_override_enabled_flag =
+ channel->enable_deblocking_filter_override;
+ pps->pps_beta_offset_div2 = BETA_OFFSET_DIV_2;
+ pps->pps_tc_offset_div2 = TC_OFFSET_DIV_2;
+
+ pps->lists_modification_present_flag = channel->enable_reordering;
+
+ size = nal_hevc_write_pps(&dev->plat_dev->dev, dest, n, pps);
+
+ kfree(pps);
+
+ return size;
}
static u64 allegro_put_buffer(struct allegro_channel *channel,
@@ -1491,7 +1843,7 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
channel->mcu_channel_id);
dst_buf = allegro_get_buffer(channel, &channel->stream_shadow_list,
- msg->stream_id);
+ msg->dst_handle);
if (!dst_buf)
v4l2_warn(&dev->v4l2_dev,
"channel %d: invalid stream buffer\n",
@@ -1500,6 +1852,12 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
if (!src_buf || !dst_buf)
goto err;
+ if (v4l2_m2m_is_last_draining_src_buf(channel->fh.m2m_ctx, src_buf)) {
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ allegro_channel_eos_event(channel);
+ v4l2_m2m_mark_stopped(channel->fh.m2m_ctx);
+ }
+
dst_buf->sequence = channel->csequence++;
if (msg->error_code & AL_ERROR) {
@@ -1550,8 +1908,27 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
curr = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
free = partition->offset;
+
+ if (channel->codec == V4L2_PIX_FMT_HEVC && msg->is_idr) {
+ len = allegro_hevc_write_vps(channel, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough space for video parameter set: %zd left\n",
+ free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd byte VPS nal unit\n",
+ channel->mcu_channel_id, len);
+ }
+
if (msg->is_idr) {
- len = allegro_h264_write_sps(channel, curr, free);
+ if (channel->codec == V4L2_PIX_FMT_H264)
+ len = allegro_h264_write_sps(channel, curr, free);
+ else
+ len = allegro_hevc_write_sps(channel, curr, free);
if (len < 0) {
v4l2_err(&dev->v4l2_dev,
"not enough space for sequence parameter set: %zd left\n",
@@ -1566,7 +1943,10 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
}
if (msg->slice_type == AL_ENC_SLICE_TYPE_I) {
- len = allegro_h264_write_pps(channel, curr, free);
+ if (channel->codec == V4L2_PIX_FMT_H264)
+ len = allegro_h264_write_pps(channel, curr, free);
+ else
+ len = allegro_hevc_write_pps(channel, msg, curr, free);
if (len < 0) {
v4l2_err(&dev->v4l2_dev,
"not enough space for picture parameter set: %zd left\n",
@@ -1584,7 +1964,10 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
dst_buf->vb2_buf.planes[0].data_offset = free;
free = 0;
} else {
- len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
+ if (channel->codec == V4L2_PIX_FMT_H264)
+ len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
+ else
+ len = nal_hevc_write_filler(&dev->plat_dev->dev, curr, free);
if (len < 0) {
v4l2_err(&dev->v4l2_dev,
"failed to write %zd filler data\n", free);
@@ -1626,7 +2009,7 @@ err:
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
if (dst_buf)
- allegro_channel_buf_done(channel, dst_buf, state);
+ v4l2_m2m_buf_done(dst_buf, state);
}
static int allegro_handle_init(struct allegro_dev *dev,
@@ -1984,6 +2367,16 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, false);
v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, false);
v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, false);
+
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_profile, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_level, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_tier, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_i_frame_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_max_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_min_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_p_frame_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_b_frame_qp, false);
+
v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, false);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false);
v4l2_ctrl_grab(channel->mpeg_video_bitrate, false);
@@ -2011,7 +2404,6 @@ static int allegro_create_channel(struct allegro_channel *channel)
{
struct allegro_dev *dev = channel->dev;
unsigned long timeout;
- enum v4l2_mpeg_video_h264_level min_level;
if (channel_exists(channel)) {
v4l2_warn(&dev->v4l2_dev,
@@ -2034,16 +2426,6 @@ static int allegro_create_channel(struct allegro_channel *channel)
DIV_ROUND_UP(channel->framerate.numerator,
channel->framerate.denominator));
- min_level = select_minimum_h264_level(channel->width, channel->height);
- if (channel->level < min_level) {
- v4l2_warn(&dev->v4l2_dev,
- "user %d: selected Level %s too low: increasing to Level %s\n",
- channel->user_id,
- v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[channel->level],
- v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[min_level]);
- channel->level = min_level;
- }
-
v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true);
v4l2_ctrl_grab(channel->mpeg_video_h264_level, true);
v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, true);
@@ -2051,6 +2433,16 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, true);
v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, true);
v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, true);
+
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_profile, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_level, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_tier, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_i_frame_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_max_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_min_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_p_frame_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_hevc_b_frame_qp, true);
+
v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, true);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true);
v4l2_ctrl_grab(channel->mpeg_video_bitrate, true);
@@ -2079,6 +2471,124 @@ err:
return channel->error;
}
+/**
+ * allegro_channel_adjust() - Adjust channel parameters to current format
+ * @channel: the channel to adjust
+ *
+ * Various parameters of a channel and their limits depend on the currently
+ * set format. Adjust the parameters after a format change in one go.
+ */
+static void allegro_channel_adjust(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ u32 codec = channel->codec;
+ struct v4l2_ctrl *ctrl;
+ s64 min;
+ s64 max;
+
+ channel->sizeimage_encoded =
+ estimate_stream_size(channel->width, channel->height);
+
+ if (codec == V4L2_PIX_FMT_H264) {
+ ctrl = channel->mpeg_video_h264_level;
+ min = select_minimum_h264_level(channel->width, channel->height);
+ } else {
+ ctrl = channel->mpeg_video_hevc_level;
+ min = select_minimum_hevc_level(channel->width, channel->height);
+ }
+ if (ctrl->minimum > min)
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "%s.minimum: %lld -> %lld\n",
+ v4l2_ctrl_get_name(ctrl->id), ctrl->minimum, min);
+ v4l2_ctrl_lock(ctrl);
+ __v4l2_ctrl_modify_range(ctrl, min, ctrl->maximum,
+ ctrl->step, ctrl->default_value);
+ v4l2_ctrl_unlock(ctrl);
+
+ ctrl = channel->mpeg_video_bitrate;
+ if (codec == V4L2_PIX_FMT_H264)
+ max = h264_maximum_bitrate(v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level));
+ else
+ max = hevc_maximum_bitrate(v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level));
+ if (ctrl->maximum < max)
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "%s: maximum: %lld -> %lld\n",
+ v4l2_ctrl_get_name(ctrl->id), ctrl->maximum, max);
+ v4l2_ctrl_lock(ctrl);
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max,
+ ctrl->step, ctrl->default_value);
+ v4l2_ctrl_unlock(ctrl);
+
+ ctrl = channel->mpeg_video_bitrate_peak;
+ v4l2_ctrl_lock(ctrl);
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max,
+ ctrl->step, ctrl->default_value);
+ v4l2_ctrl_unlock(ctrl);
+
+ v4l2_ctrl_activate(channel->mpeg_video_h264_profile,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_level,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_i_frame_qp,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_max_qp,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_min_qp,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_p_frame_qp,
+ codec == V4L2_PIX_FMT_H264);
+ v4l2_ctrl_activate(channel->mpeg_video_h264_b_frame_qp,
+ codec == V4L2_PIX_FMT_H264);
+
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_profile,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_level,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_tier,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_i_frame_qp,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_max_qp,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_min_qp,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_p_frame_qp,
+ codec == V4L2_PIX_FMT_HEVC);
+ v4l2_ctrl_activate(channel->mpeg_video_hevc_b_frame_qp,
+ codec == V4L2_PIX_FMT_HEVC);
+
+ if (codec == V4L2_PIX_FMT_H264)
+ channel->log2_max_frame_num = LOG2_MAX_FRAME_NUM;
+ channel->temporal_mvp_enable = true;
+ channel->dbf_ovr_en = (codec == V4L2_PIX_FMT_H264);
+ channel->enable_deblocking_filter_override = (codec == V4L2_PIX_FMT_HEVC);
+ channel->enable_reordering = (codec == V4L2_PIX_FMT_HEVC);
+ channel->enable_loop_filter_across_tiles = true;
+ channel->enable_loop_filter_across_slices = true;
+
+ if (codec == V4L2_PIX_FMT_H264) {
+ channel->b_hrz_me_range = 8;
+ channel->b_vrt_me_range = 8;
+ channel->p_hrz_me_range = 16;
+ channel->p_vrt_me_range = 16;
+ channel->max_cu_size = ilog2(16);
+ channel->min_cu_size = ilog2(8);
+ channel->max_tu_size = ilog2(4);
+ channel->min_tu_size = ilog2(4);
+ } else {
+ channel->b_hrz_me_range = 16;
+ channel->b_vrt_me_range = 16;
+ channel->p_hrz_me_range = 32;
+ channel->p_vrt_me_range = 32;
+ channel->max_cu_size = ilog2(32);
+ channel->min_cu_size = ilog2(8);
+ channel->max_tu_size = ilog2(32);
+ channel->min_tu_size = ilog2(4);
+ }
+ channel->max_transfo_depth_intra = 1;
+ channel->max_transfo_depth_inter = 1;
+}
+
static void allegro_set_default_params(struct allegro_channel *channel)
{
channel->width = ALLEGRO_WIDTH_DEFAULT;
@@ -2095,16 +2605,6 @@ static void allegro_set_default_params(struct allegro_channel *channel)
channel->sizeimage_raw = channel->stride * channel->height * 3 / 2;
channel->codec = V4L2_PIX_FMT_H264;
- channel->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
- channel->level =
- select_minimum_h264_level(channel->width, channel->height);
- channel->sizeimage_encoded =
- estimate_stream_size(channel->width, channel->height);
-
- channel->bitrate = maximum_bitrate(channel->level);
- channel->bitrate_peak = maximum_bitrate(channel->level);
- channel->cpb_size = maximum_cpb_size(channel->level);
- channel->gop_size = ALLEGRO_GOP_SIZE_DEFAULT;
}
static int allegro_queue_setup(struct vb2_queue *vq,
@@ -2145,10 +2645,6 @@ static int allegro_buf_prepare(struct vb2_buffer *vb)
struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
struct allegro_dev *dev = channel->dev;
- if (allegro_get_state(channel) == ALLEGRO_STATE_DRAIN &&
- V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
- return -EBUSY;
-
if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
if (vbuf->field == V4L2_FIELD_ANY)
vbuf->field = V4L2_FIELD_NONE;
@@ -2167,10 +2663,21 @@ static void allegro_buf_queue(struct vb2_buffer *vb)
{
struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type) &&
+ vb2_is_streaming(q) &&
+ v4l2_m2m_dst_buf_is_last(channel->fh.m2m_ctx)) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; i++)
+ vb->planes[i].bytesused = 0;
- if (allegro_get_state(channel) == ALLEGRO_STATE_WAIT_FOR_BUFFER &&
- vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- allegro_channel_buf_done(channel, vbuf, VB2_BUF_STATE_DONE);
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = channel->csequence++;
+
+ v4l2_m2m_last_buffer_done(channel->fh.m2m_ctx, vbuf);
+ allegro_channel_eos_event(channel);
return;
}
@@ -2186,12 +2693,12 @@ static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
"%s: start streaming\n",
V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ v4l2_m2m_update_start_streaming_state(channel->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
channel->osequence = 0;
- allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
- } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ else
channel->csequence = 0;
- }
return 0;
}
@@ -2216,10 +2723,9 @@ static void allegro_stop_streaming(struct vb2_queue *q)
}
mutex_unlock(&channel->shadow_list_lock);
- allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx)))
v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
- } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ } else {
mutex_lock(&channel->shadow_list_lock);
list_for_each_entry_safe(shadow, tmp,
&channel->stream_shadow_list, head) {
@@ -2232,6 +2738,12 @@ static void allegro_stop_streaming(struct vb2_queue *q)
while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx)))
v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
}
+
+ v4l2_m2m_update_stop_streaming_state(channel->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(channel->fh.m2m_ctx))
+ allegro_channel_eos_event(channel);
}
static const struct vb2_ops allegro_queue_ops = {
@@ -2337,9 +2849,6 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
"s_ctrl: %s = %d\n", v4l2_ctrl_get_name(ctrl->id), ctrl->val);
switch (ctrl->id) {
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- channel->level = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
channel->frame_rc_enable = ctrl->val;
break;
@@ -2349,12 +2858,6 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
v4l2_ctrl_activate(channel->mpeg_video_bitrate_peak,
ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
break;
- case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
- channel->cpb_size = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- channel->gop_size = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
@@ -2378,6 +2881,10 @@ static int allegro_open(struct file *file)
struct v4l2_ctrl_handler *handler;
u64 mask;
int ret;
+ unsigned int bitrate_max;
+ unsigned int bitrate_def;
+ unsigned int cpb_size_max;
+ unsigned int cpb_size_def;
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
@@ -2432,6 +2939,51 @@ static int allegro_open(struct file *file)
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
0, 51, 1, 30);
+
+ channel->mpeg_video_hevc_profile =
+ v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, 0x0,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
+ channel->mpeg_video_hevc_level =
+ v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 0x0,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1);
+ channel->mpeg_video_hevc_tier =
+ v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_TIER,
+ V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, 0x0,
+ V4L2_MPEG_VIDEO_HEVC_TIER_MAIN);
+ channel->mpeg_video_hevc_i_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
+ 0, 51, 1, 30);
+ channel->mpeg_video_hevc_max_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
+ 0, 51, 1, 51);
+ channel->mpeg_video_hevc_min_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ 0, 51, 1, 0);
+ channel->mpeg_video_hevc_p_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
+ 0, 51, 1, 30);
+ channel->mpeg_video_hevc_b_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
+ 0, 51, 1, 30);
+
channel->mpeg_video_frame_rc_enable =
v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
@@ -2443,26 +2995,35 @@ static int allegro_open(struct file *file)
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+ if (channel->codec == V4L2_PIX_FMT_H264) {
+ bitrate_max = h264_maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ bitrate_def = h264_maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ cpb_size_max = h264_maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ cpb_size_def = h264_maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ } else {
+ bitrate_max = hevc_maximum_bitrate(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1);
+ bitrate_def = hevc_maximum_bitrate(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1);
+ cpb_size_max = hevc_maximum_cpb_size(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1);
+ cpb_size_def = hevc_maximum_cpb_size(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1);
+ }
channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE,
- 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
- 1, channel->bitrate);
+ 0, bitrate_max, 1, bitrate_def);
channel->mpeg_video_bitrate_peak = v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
- 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
- 1, channel->bitrate_peak);
+ 0, bitrate_max, 1, bitrate_def);
channel->mpeg_video_cpb_size = v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
- 0, maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
- 1, channel->cpb_size);
+ 0, cpb_size_max, 1, cpb_size_def);
channel->mpeg_video_gop_size = v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
0, ALLEGRO_GOP_SIZE_MAX,
- 1, channel->gop_size);
+ 1, ALLEGRO_GOP_SIZE_DEFAULT);
v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -2477,14 +3038,14 @@ static int allegro_open(struct file *file)
v4l2_ctrl_cluster(3, &channel->mpeg_video_bitrate_mode);
+ v4l2_ctrl_handler_setup(handler);
+
channel->mcu_channel_id = -1;
channel->user_id = -1;
INIT_LIST_HEAD(&channel->buffers_reference);
INIT_LIST_HEAD(&channel->buffers_intermediate);
- list_add(&channel->list, &dev->channels);
-
channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
allegro_queue_init);
@@ -2493,9 +3054,12 @@ static int allegro_open(struct file *file)
goto error;
}
+ list_add(&channel->list, &dev->channels);
file->private_data = &channel->fh;
v4l2_fh_add(&channel->fh);
+ allegro_channel_adjust(channel);
+
return 0;
error:
@@ -2539,14 +3103,19 @@ static int allegro_querycap(struct file *file, void *fh,
static int allegro_enum_fmt_vid(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- if (f->index)
- return -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (f->index >= 1)
+ return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_NV12;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- f->pixelformat = V4L2_PIX_FMT_H264;
+ if (f->index >= 2)
+ return -EINVAL;
+ if (f->index == 0)
+ f->pixelformat = V4L2_PIX_FMT_H264;
+ if (f->index == 1)
+ f->pixelformat = V4L2_PIX_FMT_HEVC;
break;
default:
return -EINVAL;
@@ -2585,7 +3154,10 @@ static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height,
ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+ if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_HEVC &&
+ f->fmt.pix.pixelformat != V4L2_PIX_FMT_H264)
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
estimate_stream_size(f->fmt.pix.width, f->fmt.pix.height);
@@ -2593,6 +3165,30 @@ static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
return 0;
}
+static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ struct vb2_queue *vq;
+ int err;
+
+ err = allegro_try_fmt_vid_cap(file, fh, f);
+ if (err)
+ return err;
+
+ vq = v4l2_m2m_get_vq(channel->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ channel->codec = f->fmt.pix.pixelformat;
+
+ allegro_channel_adjust(channel);
+
+ return 0;
+}
+
static int allegro_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
@@ -2660,72 +3256,23 @@ static int allegro_s_fmt_vid_out(struct file *file, void *fh,
channel->quantization = f->fmt.pix.quantization;
channel->xfer_func = f->fmt.pix.xfer_func;
- channel->level =
- select_minimum_h264_level(channel->width, channel->height);
- channel->sizeimage_encoded =
- estimate_stream_size(channel->width, channel->height);
+ allegro_channel_adjust(channel);
return 0;
}
static int allegro_channel_cmd_stop(struct allegro_channel *channel)
{
- struct allegro_dev *dev = channel->dev;
- struct vb2_v4l2_buffer *dst_buf;
-
- switch (allegro_get_state(channel)) {
- case ALLEGRO_STATE_DRAIN:
- case ALLEGRO_STATE_WAIT_FOR_BUFFER:
- return -EBUSY;
- case ALLEGRO_STATE_ENCODING:
- allegro_set_state(channel, ALLEGRO_STATE_DRAIN);
- break;
- default:
- return 0;
- }
-
- /* If there are output buffers, they must be encoded */
- if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) != 0) {
- v4l2_dbg(1, debug, &dev->v4l2_dev,
- "channel %d: CMD_STOP: continue encoding src buffers\n",
- channel->mcu_channel_id);
- return 0;
- }
-
- /* If there are capture buffers, use it to signal EOS */
- dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
- if (dst_buf) {
- v4l2_dbg(1, debug, &dev->v4l2_dev,
- "channel %d: CMD_STOP: signaling EOS\n",
- channel->mcu_channel_id);
- allegro_channel_buf_done(channel, dst_buf, VB2_BUF_STATE_DONE);
- return 0;
- }
-
- /*
- * If there are no capture buffers, we need to wait for the next
- * buffer to signal EOS.
- */
- v4l2_dbg(1, debug, &dev->v4l2_dev,
- "channel %d: CMD_STOP: wait for CAPTURE buffer to signal EOS\n",
- channel->mcu_channel_id);
- allegro_set_state(channel, ALLEGRO_STATE_WAIT_FOR_BUFFER);
+ if (v4l2_m2m_has_stopped(channel->fh.m2m_ctx))
+ allegro_channel_eos_event(channel);
return 0;
}
static int allegro_channel_cmd_start(struct allegro_channel *channel)
{
- switch (allegro_get_state(channel)) {
- case ALLEGRO_STATE_DRAIN:
- case ALLEGRO_STATE_WAIT_FOR_BUFFER:
- return -EBUSY;
- case ALLEGRO_STATE_STOPPED:
- allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
- break;
- default:
- return 0;
- }
+ if (v4l2_m2m_has_stopped(channel->fh.m2m_ctx))
+ vb2_clear_last_buffer_dequeued(&channel->fh.m2m_ctx->cap_q_ctx.q);
return 0;
}
@@ -2740,17 +3287,15 @@ static int allegro_encoder_cmd(struct file *file, void *fh,
if (err)
return err;
- switch (cmd->cmd) {
- case V4L2_ENC_CMD_STOP:
+ err = v4l2_m2m_ioctl_encoder_cmd(file, fh, cmd);
+ if (err)
+ return err;
+
+ if (cmd->cmd == V4L2_ENC_CMD_STOP)
err = allegro_channel_cmd_stop(channel);
- break;
- case V4L2_ENC_CMD_START:
+
+ if (cmd->cmd == V4L2_ENC_CMD_START)
err = allegro_channel_cmd_start(channel);
- break;
- default:
- err = -EINVAL;
- break;
- }
return err;
}
@@ -2759,6 +3304,7 @@ static int allegro_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_HEVC:
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_NV12:
break;
@@ -2853,7 +3399,7 @@ static const struct v4l2_ioctl_ops allegro_ioctl_ops = {
.vidioc_enum_fmt_vid_out = allegro_enum_fmt_vid,
.vidioc_g_fmt_vid_cap = allegro_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = allegro_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = allegro_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = allegro_s_fmt_vid_cap,
.vidioc_g_fmt_vid_out = allegro_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = allegro_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = allegro_s_fmt_vid_out,
diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.c b/drivers/media/platform/allegro-dvt/allegro-mail.c
index 9286d2162377..7e08c5050f2e 100644
--- a/drivers/staging/media/allegro-dvt/allegro-mail.c
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.c
@@ -67,12 +67,16 @@ static inline u32 settings_get_mcu_codec(struct create_channel_param *param)
if (version < MCU_MSG_VERSION_2019_2) {
switch (pixelformat) {
+ case V4L2_PIX_FMT_HEVC:
+ return 2;
case V4L2_PIX_FMT_H264:
default:
return 1;
}
} else {
switch (pixelformat) {
+ case V4L2_PIX_FMT_HEVC:
+ return 1;
case V4L2_PIX_FMT_H264:
default:
return 0;
@@ -109,12 +113,17 @@ allegro_encode_config_blob(u32 *dst, struct create_channel_param *param)
val = 0;
val |= param->temporal_mvp_enable ? BIT(20) : 0;
- val |= FIELD_PREP(GENMASK(7, 4), param->log2_max_frame_num) |
- FIELD_PREP(GENMASK(3, 0), param->log2_max_poc);
+ val |= FIELD_PREP(GENMASK(7, 4), param->log2_max_frame_num);
+ if (version >= MCU_MSG_VERSION_2019_2)
+ val |= FIELD_PREP(GENMASK(3, 0), param->log2_max_poc - 1);
+ else
+ val |= FIELD_PREP(GENMASK(3, 0), param->log2_max_poc);
dst[i++] = val;
val = 0;
+ val |= param->enable_reordering ? BIT(0) : 0;
val |= param->dbf_ovr_en ? BIT(2) : 0;
+ val |= param->override_lf ? BIT(12) : 0;
dst[i++] = val;
if (version >= MCU_MSG_VERSION_2019_2) {
@@ -302,8 +311,8 @@ allegro_enc_put_stream_buffer(u32 *dst,
dst[i++] = msg->mcu_addr;
dst[i++] = msg->size;
dst[i++] = msg->offset;
- dst[i++] = lower_32_bits(msg->stream_id);
- dst[i++] = upper_32_bits(msg->stream_id);
+ dst[i++] = lower_32_bits(msg->dst_handle);
+ dst[i++] = upper_32_bits(msg->dst_handle);
return i * sizeof(*dst);
}
@@ -406,8 +415,8 @@ allegro_dec_encode_frame(struct mcu_msg_encode_frame_response *msg, u32 *src)
msg->channel_id = src[i++];
- msg->stream_id = src[i++];
- msg->stream_id |= (((u64)src[i++]) << 32);
+ msg->dst_handle = src[i++];
+ msg->dst_handle |= (((u64)src[i++]) << 32);
msg->user_param = src[i++];
msg->user_param |= (((u64)src[i++]) << 32);
msg->src_handle = src[i++];
diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.h b/drivers/media/platform/allegro-dvt/allegro-mail.h
index 486ecb12b098..2c7bc509eac3 100644
--- a/drivers/staging/media/allegro-dvt/allegro-mail.h
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.h
@@ -65,6 +65,7 @@ struct create_channel_param {
u32 temporal_mvp_enable;
u32 enable_reordering;
u32 dbf_ovr_en;
+ u32 override_lf;
u32 num_ref_idx_l0;
u32 num_ref_idx_l1;
u32 custom_lda;
@@ -191,7 +192,7 @@ struct mcu_msg_put_stream_buffer {
u32 mcu_addr;
u32 size;
u32 offset;
- u64 stream_id;
+ u64 dst_handle;
};
struct mcu_msg_encode_frame {
@@ -233,7 +234,7 @@ struct mcu_msg_encode_frame {
struct mcu_msg_encode_frame_response {
struct mcu_msg_header header;
u32 channel_id;
- u64 stream_id; /* see mcu_msg_put_stream_buffer */
+ u64 dst_handle; /* see mcu_msg_put_stream_buffer */
u64 user_param; /* see mcu_msg_encode_frame */
u64 src_handle; /* see mcu_msg_encode_frame */
u16 skip;
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/media/platform/allegro-dvt/nal-h264.c
index bd48b8883572..94dd9266d850 100644
--- a/drivers/staging/media/allegro-dvt/nal-h264.c
+++ b/drivers/media/platform/allegro-dvt/nal-h264.c
@@ -22,6 +22,7 @@
#include <linux/log2.h>
#include "nal-h264.h"
+#include "nal-rbsp.h"
/*
* See Rec. ITU-T H.264 (04/2017) Table 7-1 – NAL unit type codes, syntax
@@ -33,54 +34,6 @@ enum nal_unit_type {
FILLER_DATA = 12,
};
-struct rbsp;
-
-struct nal_h264_ops {
- int (*rbsp_bit)(struct rbsp *rbsp, int *val);
- int (*rbsp_bits)(struct rbsp *rbsp, int n, unsigned int *val);
- int (*rbsp_uev)(struct rbsp *rbsp, unsigned int *val);
- int (*rbsp_sev)(struct rbsp *rbsp, int *val);
-};
-
-/**
- * struct rbsp - State object for handling a raw byte sequence payload
- * @data: pointer to the data of the rbsp
- * @size: maximum size of the data of the rbsp
- * @pos: current bit position inside the rbsp
- * @num_consecutive_zeros: number of zeros before @pos
- * @ops: per datatype functions for interacting with the rbsp
- * @error: an error occurred while handling the rbsp
- *
- * This struct is passed around the various parsing functions and tracks the
- * current position within the raw byte sequence payload.
- *
- * The @ops field allows to separate the operation, i.e., reading/writing a
- * value from/to that rbsp, from the structure of the NAL unit. This allows to
- * have a single function for iterating the NAL unit, while @ops has function
- * pointers for handling each type in the rbsp.
- */
-struct rbsp {
- u8 *data;
- size_t size;
- unsigned int pos;
- unsigned int num_consecutive_zeros;
- struct nal_h264_ops *ops;
- int error;
-};
-
-static void rbsp_init(struct rbsp *rbsp, void *addr, size_t size,
- struct nal_h264_ops *ops)
-{
- if (!rbsp)
- return;
-
- rbsp->data = addr;
- rbsp->size = size;
- rbsp->pos = 0;
- rbsp->ops = ops;
- rbsp->error = 0;
-}
-
/**
* nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
* @profile: the profile as &enum v4l2_mpeg_video_h264_profile
@@ -155,281 +108,6 @@ int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
}
}
-static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value);
-static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value);
-
-/*
- * When reading or writing, the emulation_prevention_three_byte is detected
- * only when the 2 one bits need to be inserted. Therefore, we are not
- * actually adding the 0x3 byte, but the 2 one bits and the six 0 bits of the
- * next byte.
- */
-#define EMULATION_PREVENTION_THREE_BYTE (0x3 << 6)
-
-static int add_emulation_prevention_three_byte(struct rbsp *rbsp)
-{
- rbsp->num_consecutive_zeros = 0;
- rbsp_write_bits(rbsp, 8, EMULATION_PREVENTION_THREE_BYTE);
-
- return 0;
-}
-
-static int discard_emulation_prevention_three_byte(struct rbsp *rbsp)
-{
- unsigned int tmp = 0;
-
- rbsp->num_consecutive_zeros = 0;
- rbsp_read_bits(rbsp, 8, &tmp);
- if (tmp != EMULATION_PREVENTION_THREE_BYTE)
- return -EINVAL;
-
- return 0;
-}
-
-static inline int rbsp_read_bit(struct rbsp *rbsp)
-{
- int shift;
- int ofs;
- int bit;
- int err;
-
- if (rbsp->num_consecutive_zeros == 22) {
- err = discard_emulation_prevention_three_byte(rbsp);
- if (err)
- return err;
- }
-
- shift = 7 - (rbsp->pos % 8);
- ofs = rbsp->pos / 8;
- if (ofs >= rbsp->size)
- return -EINVAL;
-
- bit = (rbsp->data[ofs] >> shift) & 1;
-
- rbsp->pos++;
-
- if (bit == 1 ||
- (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0)))
- rbsp->num_consecutive_zeros = 0;
- else
- rbsp->num_consecutive_zeros++;
-
- return bit;
-}
-
-static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
-{
- int shift;
- int ofs;
-
- if (rbsp->num_consecutive_zeros == 22)
- add_emulation_prevention_three_byte(rbsp);
-
- shift = 7 - (rbsp->pos % 8);
- ofs = rbsp->pos / 8;
- if (ofs >= rbsp->size)
- return -EINVAL;
-
- rbsp->data[ofs] &= ~(1 << shift);
- rbsp->data[ofs] |= value << shift;
-
- rbsp->pos++;
-
- if (value ||
- (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
- rbsp->num_consecutive_zeros = 0;
- } else {
- rbsp->num_consecutive_zeros++;
- }
-
- return 0;
-}
-
-static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value)
-{
- int i;
- int bit;
- unsigned int tmp = 0;
-
- if (n > 8 * sizeof(*value))
- return -EINVAL;
-
- for (i = n; i > 0; i--) {
- bit = rbsp_read_bit(rbsp);
- if (bit < 0)
- return bit;
- tmp |= bit << (i - 1);
- }
-
- if (value)
- *value = tmp;
-
- return 0;
-}
-
-static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value)
-{
- int ret;
-
- if (n > 8 * sizeof(value))
- return -EINVAL;
-
- while (n--) {
- ret = rbsp_write_bit(rbsp, (value >> n) & 1);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *value)
-{
- int leading_zero_bits = 0;
- unsigned int tmp = 0;
- int ret;
-
- while ((ret = rbsp_read_bit(rbsp)) == 0)
- leading_zero_bits++;
- if (ret < 0)
- return ret;
-
- if (leading_zero_bits > 0) {
- ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
- if (ret)
- return ret;
- }
-
- if (value)
- *value = (1 << leading_zero_bits) - 1 + tmp;
-
- return 0;
-}
-
-static int rbsp_write_uev(struct rbsp *rbsp, unsigned int *value)
-{
- int ret;
- int leading_zero_bits;
-
- if (!value)
- return -EINVAL;
-
- leading_zero_bits = ilog2(*value + 1);
-
- ret = rbsp_write_bits(rbsp, leading_zero_bits, 0);
- if (ret)
- return ret;
-
- return rbsp_write_bits(rbsp, leading_zero_bits + 1, *value + 1);
-}
-
-static int rbsp_read_sev(struct rbsp *rbsp, int *value)
-{
- int ret;
- unsigned int tmp;
-
- ret = rbsp_read_uev(rbsp, &tmp);
- if (ret)
- return ret;
-
- if (value) {
- if (tmp & 1)
- *value = (tmp + 1) / 2;
- else
- *value = -(tmp / 2);
- }
-
- return 0;
-}
-
-static int rbsp_write_sev(struct rbsp *rbsp, int *value)
-{
- unsigned int tmp;
-
- if (!value)
- return -EINVAL;
-
- if (*value > 0)
- tmp = (2 * (*value)) | 1;
- else
- tmp = -2 * (*value);
-
- return rbsp_write_uev(rbsp, &tmp);
-}
-
-static int __rbsp_write_bit(struct rbsp *rbsp, int *value)
-{
- return rbsp_write_bit(rbsp, *value);
-}
-
-static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value)
-{
- return rbsp_write_bits(rbsp, n, *value);
-}
-
-static struct nal_h264_ops write = {
- .rbsp_bit = __rbsp_write_bit,
- .rbsp_bits = __rbsp_write_bits,
- .rbsp_uev = rbsp_write_uev,
- .rbsp_sev = rbsp_write_sev,
-};
-
-static int __rbsp_read_bit(struct rbsp *rbsp, int *value)
-{
- int tmp = rbsp_read_bit(rbsp);
-
- if (tmp < 0)
- return tmp;
- *value = tmp;
-
- return 0;
-}
-
-static struct nal_h264_ops read = {
- .rbsp_bit = __rbsp_read_bit,
- .rbsp_bits = rbsp_read_bits,
- .rbsp_uev = rbsp_read_uev,
- .rbsp_sev = rbsp_read_sev,
-};
-
-static inline void rbsp_bit(struct rbsp *rbsp, int *value)
-{
- if (rbsp->error)
- return;
- rbsp->error = rbsp->ops->rbsp_bit(rbsp, value);
-}
-
-static inline void rbsp_bits(struct rbsp *rbsp, int n, int *value)
-{
- if (rbsp->error)
- return;
- rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value);
-}
-
-static inline void rbsp_uev(struct rbsp *rbsp, unsigned int *value)
-{
- if (rbsp->error)
- return;
- rbsp->error = rbsp->ops->rbsp_uev(rbsp, value);
-}
-
-static inline void rbsp_sev(struct rbsp *rbsp, int *value)
-{
- if (rbsp->error)
- return;
- rbsp->error = rbsp->ops->rbsp_sev(rbsp, value);
-}
-
-static void nal_h264_rbsp_trailing_bits(struct rbsp *rbsp)
-{
- unsigned int rbsp_stop_one_bit = 1;
- unsigned int rbsp_alignment_zero_bit = 0;
-
- rbsp_bit(rbsp, &rbsp_stop_one_bit);
- rbsp_bits(rbsp, round_up(rbsp->pos, 8) - rbsp->pos,
- &rbsp_alignment_zero_bit);
-}
-
static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
@@ -767,7 +445,7 @@ ssize_t nal_h264_write_sps(const struct device *dev,
nal_h264_rbsp_sps(&rbsp, sps);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
if (rbsp.error)
return rbsp.error;
@@ -814,7 +492,7 @@ ssize_t nal_h264_read_sps(const struct device *dev,
nal_h264_rbsp_sps(&rbsp, sps);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
if (rbsp.error)
return rbsp.error;
@@ -859,7 +537,7 @@ ssize_t nal_h264_write_pps(const struct device *dev,
nal_h264_rbsp_pps(&rbsp, pps);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
if (rbsp.error)
return rbsp.error;
@@ -896,7 +574,7 @@ ssize_t nal_h264_read_pps(const struct device *dev,
nal_h264_rbsp_pps(&rbsp, pps);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
if (rbsp.error)
return rbsp.error;
@@ -942,7 +620,7 @@ ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n)
nal_h264_write_filler_data(&rbsp);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
return DIV_ROUND_UP(rbsp.pos, 8);
}
@@ -991,7 +669,7 @@ ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n)
return -EINVAL;
nal_h264_read_filler_data(&rbsp);
- nal_h264_rbsp_trailing_bits(&rbsp);
+ rbsp_trailing_bits(&rbsp);
if (rbsp.error)
return rbsp.error;
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.h b/drivers/media/platform/allegro-dvt/nal-h264.h
index 2ba7cbced7a5..2ba7cbced7a5 100644
--- a/drivers/staging/media/allegro-dvt/nal-h264.h
+++ b/drivers/media/platform/allegro-dvt/nal-h264.h
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.c b/drivers/media/platform/allegro-dvt/nal-hevc.c
new file mode 100644
index 000000000000..5db540c69bfe
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs.
+ *
+ * The conversion is defined in "ITU-T Rec. H.265 (02/2018) high efficiency
+ * video coding". Decoder drivers may use the parser to parse RBSP from
+ * encoded streams and configure the hardware, if the hardware is not able to
+ * parse RBSP itself. Encoder drivers may use the generator to generate the
+ * RBSP for VPS/SPS/PPS nal units and add them to the encoded stream if the
+ * hardware does not generate the units.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include "nal-hevc.h"
+#include "nal-rbsp.h"
+
+/*
+ * See Rec. ITU-T H.265 (02/2018) Table 7-1 – NAL unit type codes and NAL unit
+ * type classes
+ */
+enum nal_unit_type {
+ VPS_NUT = 32,
+ SPS_NUT = 33,
+ PPS_NUT = 34,
+ FD_NUT = 38,
+};
+
+int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ return 1;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return 2;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(nal_hevc_profile_from_v4l2);
+
+int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier)
+{
+ switch (tier) {
+ case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
+ return 0;
+ case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(nal_hevc_tier_from_v4l2);
+
+int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level)
+{
+ /*
+ * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
+ * shall be set equal to a value of 30 times the level number
+ * specified in Table A.6.
+ */
+ int factor = 30 / 10;
+
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return factor * 10;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return factor * 20;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return factor * 21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return factor * 30;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return factor * 31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return factor * 40;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return factor * 41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return factor * 50;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return factor * 51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return factor * 52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return factor * 60;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return factor * 61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return factor * 62;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(nal_hevc_level_from_v4l2);
+
+static void nal_hevc_write_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p[0] = 0x00;
+ p[1] = 0x00;
+ p[2] = 0x00;
+ p[3] = 0x01;
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_hevc_read_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_hevc_write_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i;
+
+ /* Keep 1 byte extra for terminating the NAL unit */
+ i = rbsp->size - DIV_ROUND_UP(rbsp->pos, 8) - 1;
+ memset(p, 0xff, i);
+ rbsp->pos += i * 8;
+}
+
+static void nal_hevc_read_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+
+ while (*p == 0xff) {
+ if (DIV_ROUND_UP(rbsp->pos, 8) > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p++;
+ rbsp->pos += 8;
+ }
+}
+
+static void nal_hevc_rbsp_profile_tier_level(struct rbsp *rbsp,
+ struct nal_hevc_profile_tier_level *ptl)
+{
+ unsigned int i;
+ unsigned int max_num_sub_layers_minus_1 = 0;
+
+ rbsp_bits(rbsp, 2, &ptl->general_profile_space);
+ rbsp_bit(rbsp, &ptl->general_tier_flag);
+ rbsp_bits(rbsp, 5, &ptl->general_profile_idc);
+ for (i = 0; i < 32; i++)
+ rbsp_bit(rbsp, &ptl->general_profile_compatibility_flag[i]);
+ rbsp_bit(rbsp, &ptl->general_progressive_source_flag);
+ rbsp_bit(rbsp, &ptl->general_interlaced_source_flag);
+ rbsp_bit(rbsp, &ptl->general_non_packed_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_frame_only_constraint_flag);
+ if (ptl->general_profile_idc == 4 ||
+ ptl->general_profile_compatibility_flag[4] ||
+ ptl->general_profile_idc == 5 ||
+ ptl->general_profile_compatibility_flag[5] ||
+ ptl->general_profile_idc == 6 ||
+ ptl->general_profile_compatibility_flag[6] ||
+ ptl->general_profile_idc == 7 ||
+ ptl->general_profile_compatibility_flag[7] ||
+ ptl->general_profile_idc == 8 ||
+ ptl->general_profile_compatibility_flag[8] ||
+ ptl->general_profile_idc == 9 ||
+ ptl->general_profile_compatibility_flag[9] ||
+ ptl->general_profile_idc == 10 ||
+ ptl->general_profile_compatibility_flag[10]) {
+ rbsp_bit(rbsp, &ptl->general_max_12bit_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_max_10bit_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_max_8bit_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_max_422chroma_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_max_420chroma_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_max_monochrome_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_intra_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_one_picture_only_constraint_flag);
+ rbsp_bit(rbsp, &ptl->general_lower_bit_rate_constraint_flag);
+ if (ptl->general_profile_idc == 5 ||
+ ptl->general_profile_compatibility_flag[5] ||
+ ptl->general_profile_idc == 9 ||
+ ptl->general_profile_compatibility_flag[9] ||
+ ptl->general_profile_idc == 10 ||
+ ptl->general_profile_compatibility_flag[10]) {
+ rbsp_bit(rbsp, &ptl->general_max_14bit_constraint_flag);
+ rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_33bits);
+ rbsp_bits(rbsp, 33 - 32, &ptl->general_reserved_zero_33bits);
+ } else {
+ rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_34bits);
+ rbsp_bits(rbsp, 34 - 2, &ptl->general_reserved_zero_34bits);
+ }
+ } else if (ptl->general_profile_idc == 2 ||
+ ptl->general_profile_compatibility_flag[2]) {
+ rbsp_bits(rbsp, 7, &ptl->general_reserved_zero_7bits);
+ rbsp_bit(rbsp, &ptl->general_one_picture_only_constraint_flag);
+ rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_35bits);
+ rbsp_bits(rbsp, 35 - 32, &ptl->general_reserved_zero_35bits);
+ } else {
+ rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_43bits);
+ rbsp_bits(rbsp, 43 - 32, &ptl->general_reserved_zero_43bits);
+ }
+ if ((ptl->general_profile_idc >= 1 && ptl->general_profile_idc <= 5) ||
+ ptl->general_profile_idc == 9 ||
+ ptl->general_profile_compatibility_flag[1] ||
+ ptl->general_profile_compatibility_flag[2] ||
+ ptl->general_profile_compatibility_flag[3] ||
+ ptl->general_profile_compatibility_flag[4] ||
+ ptl->general_profile_compatibility_flag[5] ||
+ ptl->general_profile_compatibility_flag[9])
+ rbsp_bit(rbsp, &ptl->general_inbld_flag);
+ else
+ rbsp_bit(rbsp, &ptl->general_reserved_zero_bit);
+ rbsp_bits(rbsp, 8, &ptl->general_level_idc);
+ if (max_num_sub_layers_minus_1 > 0)
+ rbsp_unsupported(rbsp);
+}
+
+static void nal_hevc_rbsp_vps(struct rbsp *rbsp, struct nal_hevc_vps *vps)
+{
+ unsigned int i, j;
+ unsigned int reserved_0xffff_16bits = 0xffff;
+
+ rbsp_bits(rbsp, 4, &vps->video_parameter_set_id);
+ rbsp_bit(rbsp, &vps->base_layer_internal_flag);
+ rbsp_bit(rbsp, &vps->base_layer_available_flag);
+ rbsp_bits(rbsp, 6, &vps->max_layers_minus1);
+ rbsp_bits(rbsp, 3, &vps->max_sub_layers_minus1);
+ rbsp_bits(rbsp, 1, &vps->temporal_id_nesting_flag);
+ rbsp_bits(rbsp, 16, &reserved_0xffff_16bits);
+ nal_hevc_rbsp_profile_tier_level(rbsp, &vps->profile_tier_level);
+ rbsp_bit(rbsp, &vps->sub_layer_ordering_info_present_flag);
+ for (i = vps->sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers_minus1;
+ i <= vps->max_sub_layers_minus1; i++) {
+ rbsp_uev(rbsp, &vps->max_dec_pic_buffering_minus1[i]);
+ rbsp_uev(rbsp, &vps->max_num_reorder_pics[i]);
+ rbsp_uev(rbsp, &vps->max_latency_increase_plus1[i]);
+ }
+ rbsp_bits(rbsp, 6, &vps->max_layer_id);
+ rbsp_uev(rbsp, &vps->num_layer_sets_minus1);
+ for (i = 0; i <= vps->num_layer_sets_minus1; i++)
+ for (j = 0; j <= vps->max_layer_id; j++)
+ rbsp_bit(rbsp, &vps->layer_id_included_flag[i][j]);
+ rbsp_bit(rbsp, &vps->timing_info_present_flag);
+ if (vps->timing_info_present_flag)
+ rbsp_unsupported(rbsp);
+ rbsp_bit(rbsp, &vps->extension_flag);
+ if (vps->extension_flag)
+ rbsp_unsupported(rbsp);
+}
+
+static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
+{
+ unsigned int i;
+
+ rbsp_bits(rbsp, 4, &sps->video_parameter_set_id);
+ rbsp_bits(rbsp, 3, &sps->max_sub_layers_minus1);
+ rbsp_bit(rbsp, &sps->temporal_id_nesting_flag);
+ nal_hevc_rbsp_profile_tier_level(rbsp, &sps->profile_tier_level);
+ rbsp_uev(rbsp, &sps->seq_parameter_set_id);
+
+ rbsp_uev(rbsp, &sps->chroma_format_idc);
+ if (sps->chroma_format_idc == 3)
+ rbsp_bit(rbsp, &sps->separate_colour_plane_flag);
+ rbsp_uev(rbsp, &sps->pic_width_in_luma_samples);
+ rbsp_uev(rbsp, &sps->pic_height_in_luma_samples);
+ rbsp_bit(rbsp, &sps->conformance_window_flag);
+ if (sps->conformance_window_flag) {
+ rbsp_uev(rbsp, &sps->conf_win_left_offset);
+ rbsp_uev(rbsp, &sps->conf_win_right_offset);
+ rbsp_uev(rbsp, &sps->conf_win_top_offset);
+ rbsp_uev(rbsp, &sps->conf_win_bottom_offset);
+ }
+ rbsp_uev(rbsp, &sps->bit_depth_luma_minus8);
+ rbsp_uev(rbsp, &sps->bit_depth_chroma_minus8);
+
+ rbsp_uev(rbsp, &sps->log2_max_pic_order_cnt_lsb_minus4);
+
+ rbsp_bit(rbsp, &sps->sub_layer_ordering_info_present_flag);
+ for (i = (sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1);
+ i <= sps->max_sub_layers_minus1; i++) {
+ rbsp_uev(rbsp, &sps->max_dec_pic_buffering_minus1[i]);
+ rbsp_uev(rbsp, &sps->max_num_reorder_pics[i]);
+ rbsp_uev(rbsp, &sps->max_latency_increase_plus1[i]);
+ }
+ rbsp_uev(rbsp, &sps->log2_min_luma_coding_block_size_minus3);
+ rbsp_uev(rbsp, &sps->log2_diff_max_min_luma_coding_block_size);
+ rbsp_uev(rbsp, &sps->log2_min_luma_transform_block_size_minus2);
+ rbsp_uev(rbsp, &sps->log2_diff_max_min_luma_transform_block_size);
+ rbsp_uev(rbsp, &sps->max_transform_hierarchy_depth_inter);
+ rbsp_uev(rbsp, &sps->max_transform_hierarchy_depth_intra);
+
+ rbsp_bit(rbsp, &sps->scaling_list_enabled_flag);
+ if (sps->scaling_list_enabled_flag)
+ rbsp_unsupported(rbsp);
+
+ rbsp_bit(rbsp, &sps->amp_enabled_flag);
+ rbsp_bit(rbsp, &sps->sample_adaptive_offset_enabled_flag);
+ rbsp_bit(rbsp, &sps->pcm_enabled_flag);
+ if (sps->pcm_enabled_flag) {
+ rbsp_bits(rbsp, 4, &sps->pcm_sample_bit_depth_luma_minus1);
+ rbsp_bits(rbsp, 4, &sps->pcm_sample_bit_depth_chroma_minus1);
+ rbsp_uev(rbsp, &sps->log2_min_pcm_luma_coding_block_size_minus3);
+ rbsp_uev(rbsp, &sps->log2_diff_max_min_pcm_luma_coding_block_size);
+ rbsp_bit(rbsp, &sps->pcm_loop_filter_disabled_flag);
+ }
+
+ rbsp_uev(rbsp, &sps->num_short_term_ref_pic_sets);
+ if (sps->num_short_term_ref_pic_sets > 0)
+ rbsp_unsupported(rbsp);
+
+ rbsp_bit(rbsp, &sps->long_term_ref_pics_present_flag);
+ if (sps->long_term_ref_pics_present_flag)
+ rbsp_unsupported(rbsp);
+
+ rbsp_bit(rbsp, &sps->sps_temporal_mvp_enabled_flag);
+ rbsp_bit(rbsp, &sps->strong_intra_smoothing_enabled_flag);
+ rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
+ if (sps->vui_parameters_present_flag)
+ rbsp_unsupported(rbsp);
+
+ rbsp_bit(rbsp, &sps->extension_present_flag);
+ if (sps->extension_present_flag) {
+ rbsp_bit(rbsp, &sps->sps_range_extension_flag);
+ rbsp_bit(rbsp, &sps->sps_multilayer_extension_flag);
+ rbsp_bit(rbsp, &sps->sps_3d_extension_flag);
+ rbsp_bit(rbsp, &sps->sps_scc_extension_flag);
+ rbsp_bits(rbsp, 5, &sps->sps_extension_4bits);
+ }
+ if (sps->sps_range_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (sps->sps_multilayer_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (sps->sps_3d_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (sps->sps_scc_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (sps->sps_extension_4bits)
+ rbsp_unsupported(rbsp);
+}
+
+static void nal_hevc_rbsp_pps(struct rbsp *rbsp, struct nal_hevc_pps *pps)
+{
+ unsigned int i;
+
+ rbsp_uev(rbsp, &pps->pps_pic_parameter_set_id);
+ rbsp_uev(rbsp, &pps->pps_seq_parameter_set_id);
+ rbsp_bit(rbsp, &pps->dependent_slice_segments_enabled_flag);
+ rbsp_bit(rbsp, &pps->output_flag_present_flag);
+ rbsp_bits(rbsp, 3, &pps->num_extra_slice_header_bits);
+ rbsp_bit(rbsp, &pps->sign_data_hiding_enabled_flag);
+ rbsp_bit(rbsp, &pps->cabac_init_present_flag);
+ rbsp_uev(rbsp, &pps->num_ref_idx_l0_default_active_minus1);
+ rbsp_uev(rbsp, &pps->num_ref_idx_l1_default_active_minus1);
+ rbsp_sev(rbsp, &pps->init_qp_minus26);
+ rbsp_bit(rbsp, &pps->constrained_intra_pred_flag);
+ rbsp_bit(rbsp, &pps->transform_skip_enabled_flag);
+ rbsp_bit(rbsp, &pps->cu_qp_delta_enabled_flag);
+ if (pps->cu_qp_delta_enabled_flag)
+ rbsp_uev(rbsp, &pps->diff_cu_qp_delta_depth);
+ rbsp_sev(rbsp, &pps->pps_cb_qp_offset);
+ rbsp_sev(rbsp, &pps->pps_cr_qp_offset);
+ rbsp_bit(rbsp, &pps->pps_slice_chroma_qp_offsets_present_flag);
+ rbsp_bit(rbsp, &pps->weighted_pred_flag);
+ rbsp_bit(rbsp, &pps->weighted_bipred_flag);
+ rbsp_bit(rbsp, &pps->transquant_bypass_enabled_flag);
+ rbsp_bit(rbsp, &pps->tiles_enabled_flag);
+ rbsp_bit(rbsp, &pps->entropy_coding_sync_enabled_flag);
+ if (pps->tiles_enabled_flag) {
+ rbsp_uev(rbsp, &pps->num_tile_columns_minus1);
+ rbsp_uev(rbsp, &pps->num_tile_rows_minus1);
+ rbsp_bit(rbsp, &pps->uniform_spacing_flag);
+ if (!pps->uniform_spacing_flag) {
+ for (i = 0; i < pps->num_tile_columns_minus1; i++)
+ rbsp_uev(rbsp, &pps->column_width_minus1[i]);
+ for (i = 0; i < pps->num_tile_rows_minus1; i++)
+ rbsp_uev(rbsp, &pps->row_height_minus1[i]);
+ }
+ rbsp_bit(rbsp, &pps->loop_filter_across_tiles_enabled_flag);
+ }
+ rbsp_bit(rbsp, &pps->pps_loop_filter_across_slices_enabled_flag);
+ rbsp_bit(rbsp, &pps->deblocking_filter_control_present_flag);
+ if (pps->deblocking_filter_control_present_flag) {
+ rbsp_bit(rbsp, &pps->deblocking_filter_override_enabled_flag);
+ rbsp_bit(rbsp, &pps->pps_deblocking_filter_disabled_flag);
+ if (!pps->pps_deblocking_filter_disabled_flag) {
+ rbsp_sev(rbsp, &pps->pps_beta_offset_div2);
+ rbsp_sev(rbsp, &pps->pps_tc_offset_div2);
+ }
+ }
+ rbsp_bit(rbsp, &pps->pps_scaling_list_data_present_flag);
+ if (pps->pps_scaling_list_data_present_flag)
+ rbsp_unsupported(rbsp);
+ rbsp_bit(rbsp, &pps->lists_modification_present_flag);
+ rbsp_uev(rbsp, &pps->log2_parallel_merge_level_minus2);
+ rbsp_bit(rbsp, &pps->slice_segment_header_extension_present_flag);
+ rbsp_bit(rbsp, &pps->pps_extension_present_flag);
+ if (pps->pps_extension_present_flag) {
+ rbsp_bit(rbsp, &pps->pps_range_extension_flag);
+ rbsp_bit(rbsp, &pps->pps_multilayer_extension_flag);
+ rbsp_bit(rbsp, &pps->pps_3d_extension_flag);
+ rbsp_bit(rbsp, &pps->pps_scc_extension_flag);
+ rbsp_bits(rbsp, 4, &pps->pps_extension_4bits);
+ }
+ if (pps->pps_range_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (pps->pps_multilayer_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (pps->pps_3d_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (pps->pps_scc_extension_flag)
+ rbsp_unsupported(rbsp);
+ if (pps->pps_extension_4bits)
+ rbsp_unsupported(rbsp);
+}
+
+/**
+ * nal_hevc_write_vps() - Write PPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @vps: &struct nal_hevc_vps to convert to RBSP
+ *
+ * Convert @vps to RBSP data and write it into @dest.
+ *
+ * The size of the VPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the VPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_hevc_write_vps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_vps *vps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_unit_type = VPS_NUT;
+ unsigned int nuh_layer_id = 0;
+ unsigned int nuh_temporal_id_plus1 = 1;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_hevc_write_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ nal_hevc_rbsp_vps(&rbsp, vps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_write_vps);
+
+/**
+ * nal_hevc_read_vps() - Read VPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @vps: the &struct nal_hevc_vps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @vps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_hevc_read_vps(const struct device *dev,
+ struct nal_hevc_vps *vps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_unit_type;
+ unsigned int nuh_layer_id;
+ unsigned int nuh_temporal_id_plus1;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_hevc_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ if (rbsp.error ||
+ forbidden_zero_bit != 0 ||
+ nal_unit_type != VPS_NUT)
+ return -EINVAL;
+
+ nal_hevc_rbsp_vps(&rbsp, vps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_read_vps);
+
+/**
+ * nal_hevc_write_sps() - Write SPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @sps: &struct nal_hevc_sps to convert to RBSP
+ *
+ * Convert @sps to RBSP data and write it into @dest.
+ *
+ * The size of the SPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the SPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_hevc_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_sps *sps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_unit_type = SPS_NUT;
+ unsigned int nuh_layer_id = 0;
+ unsigned int nuh_temporal_id_plus1 = 1;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_hevc_write_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ nal_hevc_rbsp_sps(&rbsp, sps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_write_sps);
+
+/**
+ * nal_hevc_read_sps() - Read SPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @sps: the &struct nal_hevc_sps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @sps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_hevc_read_sps(const struct device *dev,
+ struct nal_hevc_sps *sps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_unit_type;
+ unsigned int nuh_layer_id;
+ unsigned int nuh_temporal_id_plus1;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_hevc_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ if (rbsp.error ||
+ forbidden_zero_bit != 0 ||
+ nal_unit_type != SPS_NUT)
+ return -EINVAL;
+
+ nal_hevc_rbsp_sps(&rbsp, sps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_read_sps);
+
+/**
+ * nal_hevc_write_pps() - Write PPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @pps: &struct nal_hevc_pps to convert to RBSP
+ *
+ * Convert @pps to RBSP data and write it into @dest.
+ *
+ * The size of the PPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the PPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_hevc_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_pps *pps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_unit_type = PPS_NUT;
+ unsigned int nuh_layer_id = 0;
+ unsigned int nuh_temporal_id_plus1 = 1;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_hevc_write_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ nal_hevc_rbsp_pps(&rbsp, pps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_write_pps);
+
+/**
+ * nal_hevc_read_pps() - Read PPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @pps: the &struct nal_hevc_pps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @pps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_hevc_read_pps(const struct device *dev,
+ struct nal_hevc_pps *pps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_unit_type;
+ unsigned int nuh_layer_id;
+ unsigned int nuh_temporal_id_plus1;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_hevc_read_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ nal_hevc_rbsp_pps(&rbsp, pps);
+
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_read_pps);
+
+/**
+ * nal_hevc_write_filler() - Write filler data RBSP
+ * @dev: device pointer
+ * @dest: buffer to fill with filler data
+ * @n: size of the buffer to fill with filler data
+ *
+ * Write a filler data RBSP to @dest with a size of @n bytes and return the
+ * number of written filler data bytes.
+ *
+ * Use this function to generate dummy data in an RBSP data stream that can be
+ * safely ignored by hevc decoders.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.265
+ * (02/2018) 7.3.2.8 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_hevc_write_filler(const struct device *dev, void *dest, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_unit_type = FD_NUT;
+ unsigned int nuh_layer_id = 0;
+ unsigned int nuh_temporal_id_plus1 = 1;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_hevc_write_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ nal_hevc_write_filler_data(&rbsp);
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_write_filler);
+
+/**
+ * nal_hevc_read_filler() - Read filler data RBSP
+ * @dev: device pointer
+ * @src: buffer with RBSP data that is read
+ * @n: maximum size of src that shall be read
+ *
+ * Read a filler data RBSP from @src up to a maximum size of @n bytes and
+ * return the size of the filler data in bytes including the marker.
+ *
+ * This function is used to parse filler data and skip the respective bytes in
+ * the RBSP data.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.265
+ * (02/2018) 7.3.2.8 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_hevc_read_filler(const struct device *dev, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_unit_type;
+ unsigned int nuh_layer_id;
+ unsigned int nuh_temporal_id_plus1;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_hevc_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 6, &nal_unit_type);
+ rbsp_bits(&rbsp, 6, &nuh_layer_id);
+ rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1);
+
+ if (rbsp.error)
+ return rbsp.error;
+ if (forbidden_zero_bit != 0 ||
+ nal_unit_type != FD_NUT)
+ return -EINVAL;
+
+ nal_hevc_read_filler_data(&rbsp);
+ rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_hevc_read_filler);
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.h b/drivers/media/platform/allegro-dvt/nal-hevc.h
new file mode 100644
index 000000000000..fc994d4242d8
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs.
+ */
+
+#ifndef __NAL_HEVC_H__
+#define __NAL_HEVC_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+struct nal_hevc_profile_tier_level {
+ unsigned int general_profile_space;
+ unsigned int general_tier_flag;
+ unsigned int general_profile_idc;
+ unsigned int general_profile_compatibility_flag[32];
+ unsigned int general_progressive_source_flag;
+ unsigned int general_interlaced_source_flag;
+ unsigned int general_non_packed_constraint_flag;
+ unsigned int general_frame_only_constraint_flag;
+ union {
+ struct {
+ unsigned int general_max_12bit_constraint_flag;
+ unsigned int general_max_10bit_constraint_flag;
+ unsigned int general_max_8bit_constraint_flag;
+ unsigned int general_max_422chroma_constraint_flag;
+ unsigned int general_max_420chroma_constraint_flag;
+ unsigned int general_max_monochrome_constraint_flag;
+ unsigned int general_intra_constraint_flag;
+ unsigned int general_one_picture_only_constraint_flag;
+ unsigned int general_lower_bit_rate_constraint_flag;
+ union {
+ struct {
+ unsigned int general_max_14bit_constraint_flag;
+ unsigned int general_reserved_zero_33bits;
+ };
+ unsigned int general_reserved_zero_34bits;
+ };
+ };
+ struct {
+ unsigned int general_reserved_zero_7bits;
+ /* unsigned int general_one_picture_only_constraint_flag; */
+ unsigned int general_reserved_zero_35bits;
+ };
+ unsigned int general_reserved_zero_43bits;
+ };
+ union {
+ unsigned int general_inbld_flag;
+ unsigned int general_reserved_zero_bit;
+ };
+ unsigned int general_level_idc;
+};
+
+/**
+ * struct nal_hevc_vps - Video parameter set
+ *
+ * C struct representation of the video parameter set NAL unit as defined by
+ * Rec. ITU-T H.265 (02/2018) 7.3.2.1 Video parameter set RBSP syntax
+ */
+struct nal_hevc_vps {
+ unsigned int video_parameter_set_id;
+ unsigned int base_layer_internal_flag;
+ unsigned int base_layer_available_flag;
+ unsigned int max_layers_minus1;
+ unsigned int max_sub_layers_minus1;
+ unsigned int temporal_id_nesting_flag;
+ struct nal_hevc_profile_tier_level profile_tier_level;
+ unsigned int sub_layer_ordering_info_present_flag;
+ struct {
+ unsigned int max_dec_pic_buffering_minus1[7];
+ unsigned int max_num_reorder_pics[7];
+ unsigned int max_latency_increase_plus1[7];
+ };
+ unsigned int max_layer_id;
+ unsigned int num_layer_sets_minus1;
+ unsigned int layer_id_included_flag[1024][64];
+ unsigned int timing_info_present_flag;
+ struct {
+ unsigned int num_units_in_tick;
+ unsigned int time_scale;
+ unsigned int poc_proportional_to_timing_flag;
+ unsigned int num_ticks_poc_diff_one_minus1;
+ unsigned int num_hrd_parameters;
+ struct {
+ unsigned int hrd_layer_set_idx[0];
+ unsigned int cprms_present_flag[0];
+ };
+ /* hrd_parameters( cprms_present_flag[ i ], max_sub_layers_minus1 ) */
+ };
+ unsigned int extension_flag;
+ unsigned int extension_data_flag;
+};
+
+struct nal_hevc_sub_layer_hrd_parameters {
+ unsigned int bit_rate_value_minus1[1];
+ unsigned int cpb_size_value_minus1[1];
+ unsigned int cbr_flag[1];
+};
+
+struct nal_hevc_hrd_parameters {
+ unsigned int nal_hrd_parameters_present_flag;
+ unsigned int vcl_hrd_parameters_present_flag;
+ struct {
+ unsigned int sub_pic_hrd_params_present_flag;
+ struct {
+ unsigned int tick_divisor_minus2;
+ unsigned int du_cpb_removal_delay_increment_length_minus1;
+ unsigned int sub_pic_cpb_params_in_pic_timing_sei_flag;
+ unsigned int dpb_output_delay_du_length_minus1;
+ };
+ unsigned int bit_rate_scale;
+ unsigned int cpb_size_scale;
+ unsigned int cpb_size_du_scale;
+ unsigned int initial_cpb_removal_delay_length_minus1;
+ unsigned int au_cpb_removal_delay_length_minus1;
+ unsigned int dpb_output_delay_length_minus1;
+ };
+ struct {
+ unsigned int fixed_pic_rate_general_flag[1];
+ unsigned int fixed_pic_rate_within_cvs_flag[1];
+ unsigned int elemental_duration_in_tc_minus1[1];
+ unsigned int low_delay_hrd_flag[1];
+ unsigned int cpb_cnt_minus1[1];
+ struct nal_hevc_sub_layer_hrd_parameters nal_hrd[1];
+ struct nal_hevc_sub_layer_hrd_parameters vcl_hrd[1];
+ };
+};
+
+/**
+ * struct nal_hevc_vui_parameters - VUI parameters
+ *
+ * C struct representation of the VUI parameters as defined by Rec. ITU-T
+ * H.265 (02/2018) E.2.1 VUI parameters syntax.
+ */
+struct nal_hevc_vui_parameters {
+ unsigned int aspect_ratio_info_present_flag;
+ struct {
+ unsigned int aspect_ratio_idc;
+ unsigned int sar_width;
+ unsigned int sar_height;
+ };
+ unsigned int overscan_info_present_flag;
+ unsigned int overscan_appropriate_flag;
+ unsigned int video_signal_type_present_flag;
+ struct {
+ unsigned int video_format;
+ unsigned int video_full_range_flag;
+ unsigned int colour_description_present_flag;
+ struct {
+ unsigned int colour_primaries;
+ unsigned int transfer_characteristics;
+ unsigned int matrix_coeffs;
+ };
+ };
+ unsigned int chroma_loc_info_present_flag;
+ struct {
+ unsigned int chroma_sample_loc_type_top_field;
+ unsigned int chroma_sample_loc_type_bottom_field;
+ };
+ unsigned int neutral_chroma_indication_flag;
+ unsigned int field_seq_flag;
+ unsigned int frame_field_info_present_flag;
+ unsigned int default_display_window_flag;
+ struct {
+ unsigned int def_disp_win_left_offset;
+ unsigned int def_disp_win_right_offset;
+ unsigned int def_disp_win_top_offset;
+ unsigned int def_disp_win_bottom_offset;
+ };
+ unsigned int vui_timing_info_present_flag;
+ struct {
+ unsigned int vui_num_units_in_tick;
+ unsigned int vui_time_scale;
+ unsigned int vui_poc_proportional_to_timing_flag;
+ unsigned int vui_num_ticks_poc_diff_one_minus1;
+ unsigned int vui_hrd_parameters_present_flag;
+ struct nal_hevc_hrd_parameters nal_hrd_parameters;
+ };
+ unsigned int bitstream_restriction_flag;
+ struct {
+ unsigned int tiles_fixed_structure_flag;
+ unsigned int motion_vectors_over_pic_boundaries_flag;
+ unsigned int restricted_ref_pic_lists_flag;
+ unsigned int min_spatial_segmentation_idc;
+ unsigned int max_bytes_per_pic_denom;
+ unsigned int max_bits_per_min_cu_denom;
+ unsigned int log2_max_mv_length_horizontal;
+ unsigned int log2_max_mv_length_vertical;
+ };
+};
+
+/**
+ * struct nal_hevc_sps - Sequence parameter set
+ *
+ * C struct representation of the video parameter set NAL unit as defined by
+ * Rec. ITU-T H.265 (02/2018) 7.3.2.2 Sequence parameter set RBSP syntax
+ */
+struct nal_hevc_sps {
+ unsigned int video_parameter_set_id;
+ unsigned int max_sub_layers_minus1;
+ unsigned int temporal_id_nesting_flag;
+ struct nal_hevc_profile_tier_level profile_tier_level;
+ unsigned int seq_parameter_set_id;
+ unsigned int chroma_format_idc;
+ unsigned int separate_colour_plane_flag;
+ unsigned int pic_width_in_luma_samples;
+ unsigned int pic_height_in_luma_samples;
+ unsigned int conformance_window_flag;
+ struct {
+ unsigned int conf_win_left_offset;
+ unsigned int conf_win_right_offset;
+ unsigned int conf_win_top_offset;
+ unsigned int conf_win_bottom_offset;
+ };
+
+ unsigned int bit_depth_luma_minus8;
+ unsigned int bit_depth_chroma_minus8;
+ unsigned int log2_max_pic_order_cnt_lsb_minus4;
+ unsigned int sub_layer_ordering_info_present_flag;
+ struct {
+ unsigned int max_dec_pic_buffering_minus1[7];
+ unsigned int max_num_reorder_pics[7];
+ unsigned int max_latency_increase_plus1[7];
+ };
+ unsigned int log2_min_luma_coding_block_size_minus3;
+ unsigned int log2_diff_max_min_luma_coding_block_size;
+ unsigned int log2_min_luma_transform_block_size_minus2;
+ unsigned int log2_diff_max_min_luma_transform_block_size;
+ unsigned int max_transform_hierarchy_depth_inter;
+ unsigned int max_transform_hierarchy_depth_intra;
+
+ unsigned int scaling_list_enabled_flag;
+ unsigned int scaling_list_data_present_flag;
+ unsigned int amp_enabled_flag;
+ unsigned int sample_adaptive_offset_enabled_flag;
+ unsigned int pcm_enabled_flag;
+ struct {
+ unsigned int pcm_sample_bit_depth_luma_minus1;
+ unsigned int pcm_sample_bit_depth_chroma_minus1;
+ unsigned int log2_min_pcm_luma_coding_block_size_minus3;
+ unsigned int log2_diff_max_min_pcm_luma_coding_block_size;
+ unsigned int pcm_loop_filter_disabled_flag;
+ };
+
+ unsigned int num_short_term_ref_pic_sets;
+ unsigned int long_term_ref_pics_present_flag;
+ unsigned int sps_temporal_mvp_enabled_flag;
+ unsigned int strong_intra_smoothing_enabled_flag;
+ unsigned int vui_parameters_present_flag;
+ struct nal_hevc_vui_parameters vui;
+ unsigned int extension_present_flag;
+ struct {
+ unsigned int sps_range_extension_flag;
+ unsigned int sps_multilayer_extension_flag;
+ unsigned int sps_3d_extension_flag;
+ unsigned int sps_scc_extension_flag;
+ unsigned int sps_extension_4bits;
+ };
+};
+
+struct nal_hevc_pps {
+ unsigned int pps_pic_parameter_set_id;
+ unsigned int pps_seq_parameter_set_id;
+ unsigned int dependent_slice_segments_enabled_flag;
+ unsigned int output_flag_present_flag;
+ unsigned int num_extra_slice_header_bits;
+ unsigned int sign_data_hiding_enabled_flag;
+ unsigned int cabac_init_present_flag;
+ unsigned int num_ref_idx_l0_default_active_minus1;
+ unsigned int num_ref_idx_l1_default_active_minus1;
+ int init_qp_minus26;
+ unsigned int constrained_intra_pred_flag;
+ unsigned int transform_skip_enabled_flag;
+ unsigned int cu_qp_delta_enabled_flag;
+ unsigned int diff_cu_qp_delta_depth;
+ int pps_cb_qp_offset;
+ int pps_cr_qp_offset;
+ unsigned int pps_slice_chroma_qp_offsets_present_flag;
+ unsigned int weighted_pred_flag;
+ unsigned int weighted_bipred_flag;
+ unsigned int transquant_bypass_enabled_flag;
+ unsigned int tiles_enabled_flag;
+ unsigned int entropy_coding_sync_enabled_flag;
+ struct {
+ unsigned int num_tile_columns_minus1;
+ unsigned int num_tile_rows_minus1;
+ unsigned int uniform_spacing_flag;
+ struct {
+ unsigned int column_width_minus1[1];
+ unsigned int row_height_minus1[1];
+ };
+ unsigned int loop_filter_across_tiles_enabled_flag;
+ };
+ unsigned int pps_loop_filter_across_slices_enabled_flag;
+ unsigned int deblocking_filter_control_present_flag;
+ struct {
+ unsigned int deblocking_filter_override_enabled_flag;
+ unsigned int pps_deblocking_filter_disabled_flag;
+ struct {
+ int pps_beta_offset_div2;
+ int pps_tc_offset_div2;
+ };
+ };
+ unsigned int pps_scaling_list_data_present_flag;
+ unsigned int lists_modification_present_flag;
+ unsigned int log2_parallel_merge_level_minus2;
+ unsigned int slice_segment_header_extension_present_flag;
+ unsigned int pps_extension_present_flag;
+ struct {
+ unsigned int pps_range_extension_flag;
+ unsigned int pps_multilayer_extension_flag;
+ unsigned int pps_3d_extension_flag;
+ unsigned int pps_scc_extension_flag;
+ unsigned int pps_extension_4bits;
+ };
+};
+
+int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile);
+int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier);
+int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level);
+
+int nal_range_from_v4l2(enum v4l2_quantization quantization);
+int nal_color_primaries_from_v4l2(enum v4l2_colorspace colorspace);
+int nal_transfer_characteristics_from_v4l2(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func);
+int nal_matrix_coeffs_from_v4l2(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding);
+
+ssize_t nal_hevc_write_vps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_vps *vps);
+ssize_t nal_hevc_read_vps(const struct device *dev,
+ struct nal_hevc_vps *vps, void *src, size_t n);
+
+ssize_t nal_hevc_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_sps *sps);
+ssize_t nal_hevc_read_sps(const struct device *dev,
+ struct nal_hevc_sps *sps, void *src, size_t n);
+
+ssize_t nal_hevc_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_hevc_pps *pps);
+ssize_t nal_hevc_read_pps(const struct device *dev,
+ struct nal_hevc_pps *pps, void *src, size_t n);
+
+ssize_t nal_hevc_write_filler(const struct device *dev, void *dest, size_t n);
+ssize_t nal_hevc_read_filler(const struct device *dev, void *src, size_t n);
+
+#endif /* __NAL_HEVC_H__ */
diff --git a/drivers/media/platform/allegro-dvt/nal-rbsp.c b/drivers/media/platform/allegro-dvt/nal-rbsp.c
new file mode 100644
index 000000000000..d911322d0efa
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/nal-rbsp.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Helper functions to generate a raw byte sequence payload from values.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include "nal-rbsp.h"
+
+void rbsp_init(struct rbsp *rbsp, void *addr, size_t size,
+ struct nal_rbsp_ops *ops)
+{
+ if (!rbsp)
+ return;
+
+ rbsp->data = addr;
+ rbsp->size = size;
+ rbsp->pos = 0;
+ rbsp->ops = ops;
+ rbsp->error = 0;
+}
+
+void rbsp_unsupported(struct rbsp *rbsp)
+{
+ rbsp->error = -EINVAL;
+}
+
+static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value);
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value);
+
+/*
+ * When reading or writing, the emulation_prevention_three_byte is detected
+ * only when the 2 one bits need to be inserted. Therefore, we are not
+ * actually adding the 0x3 byte, but the 2 one bits and the six 0 bits of the
+ * next byte.
+ */
+#define EMULATION_PREVENTION_THREE_BYTE (0x3 << 6)
+
+static int add_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_write_bits(rbsp, 8, EMULATION_PREVENTION_THREE_BYTE);
+
+ return 0;
+}
+
+static int discard_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ unsigned int tmp = 0;
+
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_read_bits(rbsp, 8, &tmp);
+ if (tmp != EMULATION_PREVENTION_THREE_BYTE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift;
+ int ofs;
+ int bit;
+ int err;
+
+ if (rbsp->num_consecutive_zeros == 22) {
+ err = discard_emulation_prevention_three_byte(rbsp);
+ if (err)
+ return err;
+ }
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ bit = (rbsp->data[ofs] >> shift) & 1;
+
+ rbsp->pos++;
+
+ if (bit == 1 ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0)))
+ rbsp->num_consecutive_zeros = 0;
+ else
+ rbsp->num_consecutive_zeros++;
+
+ return bit;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
+{
+ int shift;
+ int ofs;
+
+ if (rbsp->num_consecutive_zeros == 22)
+ add_emulation_prevention_three_byte(rbsp);
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->data[ofs] &= ~(1 << shift);
+ rbsp->data[ofs] |= value << shift;
+
+ rbsp->pos++;
+
+ if (value ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
+ rbsp->num_consecutive_zeros = 0;
+ } else {
+ rbsp->num_consecutive_zeros++;
+ }
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ int i;
+ int bit;
+ unsigned int tmp = 0;
+
+ if (n > 8 * sizeof(*value))
+ return -EINVAL;
+
+ for (i = n; i > 0; i--) {
+ bit = rbsp_read_bit(rbsp);
+ if (bit < 0)
+ return bit;
+ tmp |= bit << (i - 1);
+ }
+
+ if (value)
+ *value = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value)
+{
+ int ret;
+
+ if (n > 8 * sizeof(value))
+ return -EINVAL;
+
+ while (n--) {
+ ret = rbsp_write_bit(rbsp, (value >> n) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (value)
+ *value = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int ret;
+ int leading_zero_bits;
+
+ if (!value)
+ return -EINVAL;
+
+ leading_zero_bits = ilog2(*value + 1);
+
+ ret = rbsp_write_bits(rbsp, leading_zero_bits, 0);
+ if (ret)
+ return ret;
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, *value + 1);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *value)
+{
+ int ret;
+ unsigned int tmp;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (value) {
+ if (tmp & 1)
+ *value = (tmp + 1) / 2;
+ else
+ *value = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+static int rbsp_write_sev(struct rbsp *rbsp, int *value)
+{
+ unsigned int tmp;
+
+ if (!value)
+ return -EINVAL;
+
+ if (*value > 0)
+ tmp = (2 * (*value)) | 1;
+ else
+ tmp = -2 * (*value);
+
+ return rbsp_write_uev(rbsp, &tmp);
+}
+
+static int __rbsp_write_bit(struct rbsp *rbsp, int *value)
+{
+ return rbsp_write_bit(rbsp, *value);
+}
+
+static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ return rbsp_write_bits(rbsp, n, *value);
+}
+
+struct nal_rbsp_ops write = {
+ .rbsp_bit = __rbsp_write_bit,
+ .rbsp_bits = __rbsp_write_bits,
+ .rbsp_uev = rbsp_write_uev,
+ .rbsp_sev = rbsp_write_sev,
+};
+
+static int __rbsp_read_bit(struct rbsp *rbsp, int *value)
+{
+ int tmp = rbsp_read_bit(rbsp);
+
+ if (tmp < 0)
+ return tmp;
+ *value = tmp;
+
+ return 0;
+}
+
+struct nal_rbsp_ops read = {
+ .rbsp_bit = __rbsp_read_bit,
+ .rbsp_bits = rbsp_read_bits,
+ .rbsp_uev = rbsp_read_uev,
+ .rbsp_sev = rbsp_read_sev,
+};
+
+void rbsp_bit(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bit(rbsp, value);
+}
+
+void rbsp_bits(struct rbsp *rbsp, int n, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value);
+}
+
+void rbsp_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_uev(rbsp, value);
+}
+
+void rbsp_sev(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_sev(rbsp, value);
+}
+
+void rbsp_trailing_bits(struct rbsp *rbsp)
+{
+ unsigned int rbsp_stop_one_bit = 1;
+ unsigned int rbsp_alignment_zero_bit = 0;
+
+ rbsp_bit(rbsp, &rbsp_stop_one_bit);
+ rbsp_bits(rbsp, round_up(rbsp->pos, 8) - rbsp->pos,
+ &rbsp_alignment_zero_bit);
+}
diff --git a/drivers/media/platform/allegro-dvt/nal-rbsp.h b/drivers/media/platform/allegro-dvt/nal-rbsp.h
new file mode 100644
index 000000000000..c72f49fed8d3
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/nal-rbsp.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef __NAL_RBSP_H__
+#define __NAL_RBSP_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct rbsp;
+
+struct nal_rbsp_ops {
+ int (*rbsp_bit)(struct rbsp *rbsp, int *val);
+ int (*rbsp_bits)(struct rbsp *rbsp, int n, unsigned int *val);
+ int (*rbsp_uev)(struct rbsp *rbsp, unsigned int *val);
+ int (*rbsp_sev)(struct rbsp *rbsp, int *val);
+};
+
+/**
+ * struct rbsp - State object for handling a raw byte sequence payload
+ * @data: pointer to the data of the rbsp
+ * @size: maximum size of the data of the rbsp
+ * @pos: current bit position inside the rbsp
+ * @num_consecutive_zeros: number of zeros before @pos
+ * @ops: per datatype functions for interacting with the rbsp
+ * @error: an error occurred while handling the rbsp
+ *
+ * This struct is passed around the various parsing functions and tracks the
+ * current position within the raw byte sequence payload.
+ *
+ * The @ops field allows to separate the operation, i.e., reading/writing a
+ * value from/to that rbsp, from the structure of the NAL unit. This allows to
+ * have a single function for iterating the NAL unit, while @ops has function
+ * pointers for handling each type in the rbsp.
+ */
+struct rbsp {
+ u8 *data;
+ size_t size;
+ unsigned int pos;
+ unsigned int num_consecutive_zeros;
+ struct nal_rbsp_ops *ops;
+ int error;
+};
+
+extern struct nal_rbsp_ops write;
+extern struct nal_rbsp_ops read;
+
+void rbsp_init(struct rbsp *rbsp, void *addr, size_t size,
+ struct nal_rbsp_ops *ops);
+void rbsp_unsupported(struct rbsp *rbsp);
+
+void rbsp_bit(struct rbsp *rbsp, int *value);
+void rbsp_bits(struct rbsp *rbsp, int n, int *value);
+void rbsp_uev(struct rbsp *rbsp, unsigned int *value);
+void rbsp_sev(struct rbsp *rbsp, int *value);
+
+void rbsp_trailing_bits(struct rbsp *rbsp);
+
+#endif /* __NAL_RBSP_H__ */
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 0fb9f9ba1219..6cdc77dda0e4 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2365,7 +2365,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
&vpfe->notifier, of_fwnode_handle(rem),
- sizeof(struct v4l2_async_subdev));
+ struct v4l2_async_subdev);
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index c46a79eace98..f2c4dadd6a0e 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1551,12 +1551,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
V4L2_JPEG_CHROMA_SUBSAMPLING_444);
- if (video->ctrl_handler.error) {
+ rc = video->ctrl_handler.error;
+ if (rc) {
v4l2_ctrl_handler_free(&video->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
- dev_err(video->dev, "Failed to init controls: %d\n",
- video->ctrl_handler.error);
+ dev_err(video->dev, "Failed to init controls: %d\n", rc);
return rc;
}
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index 24b784b893d6..fab8eca58d93 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -41,6 +41,7 @@ struct isc_buffer {
struct isc_subdev_entity {
struct v4l2_subdev *sd;
struct v4l2_async_subdev *asd;
+ struct device_node *epn;
struct v4l2_async_notifier notifier;
u32 pfe_cfg0;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index d74aa73f26be..0514be6153df 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -70,7 +70,6 @@ struct frame_buffer {
struct isi_graph_entity {
struct device_node *node;
- struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
};
@@ -1136,45 +1135,26 @@ static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
.complete = isi_graph_notify_complete,
};
-static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
-{
- struct device_node *ep = NULL;
- struct device_node *remote;
-
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
-
- remote = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- if (!remote)
- return -EINVAL;
-
- /* Remote node to connect */
- isi->entity.node = remote;
- isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
-}
-
static int isi_graph_init(struct atmel_isi *isi)
{
+ struct v4l2_async_subdev *asd;
+ struct device_node *ep;
int ret;
- /* Parse the graph to extract a list of subdevice DT nodes. */
- ret = isi_graph_parse(isi, isi->dev->of_node);
- if (ret < 0) {
- dev_err(isi->dev, "Graph parsing failed\n");
- return ret;
- }
+ ep = of_graph_get_next_endpoint(isi->dev->of_node, NULL);
+ if (!ep)
+ return -EINVAL;
v4l2_async_notifier_init(&isi->notifier);
- ret = v4l2_async_notifier_add_subdev(&isi->notifier, &isi->entity.asd);
- if (ret) {
- of_node_put(isi->entity.node);
- return ret;
- }
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &isi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
+ of_node_put(ep);
+
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
isi->notifier.ops = &isi_graph_notify_ops;
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index a3304f49e499..0b78fecfd2a8 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -57,7 +57,7 @@
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL, *rem;
+ struct device_node *epn = NULL;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
int ret;
@@ -71,17 +71,9 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
if (!epn)
return 0;
- rem = of_graph_get_remote_port_parent(epn);
- if (!rem) {
- dev_notice(dev, "Remote device at %pOF not found\n",
- epn);
- continue;
- }
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
- of_node_put(rem);
ret = -EINVAL;
dev_err(dev, "Could not parse the endpoint\n");
break;
@@ -90,21 +82,10 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
- of_node_put(rem);
- ret = -ENOMEM;
- break;
- }
-
- /* asd will be freed by the subsystem once it's added to the
- * notifier list
- */
- subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
- GFP_KERNEL);
- if (!subdev_entity->asd) {
- of_node_put(rem);
ret = -ENOMEM;
break;
}
+ subdev_entity->epn = epn;
flags = v4l2_epn.bus.parallel.flags;
@@ -121,12 +102,10 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
ISC_PFE_CFG0_CCIR656;
- subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- subdev_entity->asd->match.fwnode = of_fwnode_handle(rem);
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
-
of_node_put(epn);
+
return ret;
}
@@ -228,13 +207,20 @@ static int atmel_isc_probe(struct platform_device *pdev)
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ struct v4l2_async_subdev *asd;
+
v4l2_async_notifier_init(&subdev_entity->notifier);
- ret = v4l2_async_notifier_add_subdev(&subdev_entity->notifier,
- subdev_entity->asd);
- if (ret) {
- fwnode_handle_put(subdev_entity->asd->match.fwnode);
- kfree(subdev_entity->asd);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &subdev_entity->notifier,
+ of_fwnode_handle(subdev_entity->epn),
+ struct v4l2_async_subdev);
+
+ of_node_put(subdev_entity->epn);
+ subdev_entity->epn = NULL;
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
goto cleanup_subdev;
}
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index be9ec59774d6..c68a3eac62cd 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -81,7 +81,6 @@ struct csi2rx_priv {
struct media_pad pads[CSI2RX_PAD_MAX];
/* Remote source */
- struct v4l2_async_subdev asd;
struct v4l2_subdev *source_subdev;
int source_pad;
};
@@ -362,6 +361,7 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
{
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *fwh;
struct device_node *ep;
int ret;
@@ -395,17 +395,14 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return -EINVAL;
}
- csi2rx->asd.match.fwnode = fwnode_graph_get_remote_port_parent(fwh);
- csi2rx->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- of_node_put(ep);
-
v4l2_async_notifier_init(&csi2rx->notifier);
- ret = v4l2_async_notifier_add_subdev(&csi2rx->notifier, &csi2rx->asd);
- if (ret) {
- fwnode_handle_put(csi2rx->asd.match.fwnode);
- return ret;
- }
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi2rx->notifier,
+ fwh,
+ struct v4l2_async_subdev);
+ of_node_put(ep);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
csi2rx->notifier.ops = &csi2rx_notifier_ops;
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index fe9468b180e6..5f0aeb744e81 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -628,7 +628,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (err) {
v4l2_err(&vpbe_dev->v4l2_dev,
"unable to initialize the OSD device");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto fail_dev_unregister;
}
}
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 5e67994e62cc..f1ce10828b8e 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -41,7 +41,7 @@ MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
#define VPIF_CH2_MAX_MODES 15
#define VPIF_CH3_MAX_MODES 2
-spinlock_t vpif_lock;
+DEFINE_SPINLOCK(vpif_lock);
EXPORT_SYMBOL_GPL(vpif_lock);
void __iomem *vpif_base;
@@ -437,7 +437,6 @@ static int vpif_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get(&pdev->dev);
- spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
/*
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 72a0e94e2e21..8d2e165bf7de 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1584,7 +1584,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
&vpif_obj.notifier, of_fwnode_handle(rem),
- sizeof(struct v4l2_async_subdev));
+ struct v4l2_async_subdev);
if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 46afc029138f..e5f61d9b221d 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1117,23 +1117,6 @@ static void free_vpif_objs(void)
kfree(vpif_obj.dev[i]);
}
-static int vpif_async_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
-{
- int i;
-
- for (i = 0; i < vpif_obj.config->subdev_count; i++)
- if (!strcmp(vpif_obj.config->subdevinfo[i].name,
- subdev->name)) {
- vpif_obj.sd[i] = subdev;
- vpif_obj.sd[i]->grp_id = 1 << i;
- return 0;
- }
-
- return -EINVAL;
-}
-
static int vpif_probe_complete(void)
{
struct common_obj *common;
@@ -1230,16 +1213,6 @@ probe_out:
return err;
}
-static int vpif_async_complete(struct v4l2_async_notifier *notifier)
-{
- return vpif_probe_complete();
-}
-
-static const struct v4l2_async_notifier_operations vpif_async_ops = {
- .bound = vpif_async_bound,
- .complete = vpif_async_complete,
-};
-
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
@@ -1294,52 +1267,28 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_unregister;
}
- v4l2_async_notifier_init(&vpif_obj.notifier);
-
- if (!vpif_obj.config->asd_sizes) {
- i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id);
- for (i = 0; i < subdev_count; i++) {
- vpif_obj.sd[i] =
- v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
- i2c_adap,
- &subdevdata[i].
- board_info,
- NULL);
- if (!vpif_obj.sd[i]) {
- vpif_err("Error registering v4l2 subdevice\n");
- err = -ENODEV;
- goto probe_subdev_out;
- }
-
- if (vpif_obj.sd[i])
- vpif_obj.sd[i]->grp_id = 1 << i;
- }
- err = vpif_probe_complete();
- if (err) {
+ i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id);
+ for (i = 0; i < subdev_count; i++) {
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata[i].board_info,
+ NULL);
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
goto probe_subdev_out;
}
- } else {
- for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) {
- err = v4l2_async_notifier_add_subdev(
- &vpif_obj.notifier, vpif_obj.config->asd[i]);
- if (err)
- goto probe_cleanup;
- }
- vpif_obj.notifier.ops = &vpif_async_ops;
- err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
- &vpif_obj.notifier);
- if (err) {
- vpif_err("Error registering async notifier\n");
- err = -EINVAL;
- goto probe_cleanup;
- }
+ if (vpif_obj.sd[i])
+ vpif_obj.sd[i]->grp_id = 1 << i;
}
+ err = vpif_probe_complete();
+ if (err)
+ goto probe_subdev_out;
return 0;
-probe_cleanup:
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
probe_subdev_out:
kfree(vpif_obj.sd);
vpif_unregister:
@@ -1358,11 +1307,6 @@ static int vpif_remove(struct platform_device *device)
struct channel_obj *ch;
int i;
- if (vpif_obj.config->asd_sizes) {
- v4l2_async_notifier_unregister(&vpif_obj.notifier);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
- }
-
v4l2_device_unregister(&vpif_obj.v4l2_dev);
kfree(vpif_obj.sd);
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index f731a65eefd6..f98062e79167 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -118,7 +118,6 @@ struct vpif_device {
struct v4l2_device v4l2_dev;
struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
struct v4l2_subdev **sd;
- struct v4l2_async_notifier notifier;
struct vpif_display_config *config;
};
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index e636c33e847b..8e1e892085ec 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -401,6 +401,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
int index = fmd->num_sensors;
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
struct device_node *rem, *np;
+ struct v4l2_async_subdev *asd;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
int ret;
@@ -418,10 +419,10 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
pd->mux_id = (endpoint.base.port - 1) & 0x1;
rem = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
if (rem == NULL) {
v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n",
ep);
+ of_node_put(ep);
return 0;
}
@@ -450,6 +451,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
* checking parent's node name.
*/
np = of_get_parent(rem);
+ of_node_put(rem);
if (of_node_name_eq(np, "i2c-isp"))
pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
@@ -458,20 +460,20 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
of_node_put(np);
if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
- of_node_put(rem);
+ of_node_put(ep);
return -EINVAL;
}
- fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- fmd->sensor[index].asd.match.fwnode = of_fwnode_handle(rem);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &fmd->subdev_notifier, of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
- ret = v4l2_async_notifier_add_subdev(&fmd->subdev_notifier,
- &fmd->sensor[index].asd);
- if (ret) {
- of_node_put(rem);
- return ret;
- }
+ of_node_put(ep);
+
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+ fmd->sensor[index].asd = asd;
fmd->num_sensors++;
return 0;
@@ -1381,7 +1383,8 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
/* Find platform data for this sensor subdev */
for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
- if (fmd->sensor[i].asd.match.fwnode ==
+ if (fmd->sensor[i].asd &&
+ fmd->sensor[i].asd->match.fwnode ==
of_fwnode_handle(subdev->dev->of_node))
si = &fmd->sensor[i];
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
index 9447fafe23c6..a3876d668ea6 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -83,7 +83,7 @@ struct fimc_camclk_info {
*/
struct fimc_sensor_info {
struct fimc_source_info pdata;
- struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev *asd;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
};
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 00f623d62c96..9c94a8b58b7c 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -489,6 +489,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
int ret;
struct cafe_camera *cam;
struct mcam_camera *mcam;
+ struct v4l2_async_subdev *asd;
/*
* Start putting together one of our big camera structures.
@@ -546,9 +547,16 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
- mcam->asd.match_type = V4L2_ASYNC_MATCH_I2C;
- mcam->asd.match.i2c.adapter_id = i2c_adapter_id(cam->i2c_adapter);
- mcam->asd.match.i2c.address = ov7670_info.addr;
+ v4l2_async_notifier_init(&mcam->notifier);
+
+ asd = v4l2_async_notifier_add_i2c_subdev(&mcam->notifier,
+ i2c_adapter_id(cam->i2c_adapter),
+ ov7670_info.addr,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out_smbus_shutdown;
+ }
ret = mccic_register(mcam);
if (ret)
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index c012fd2e1d29..141bf5d97a04 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -931,6 +931,7 @@ static int mclk_enable(struct clk_hw *hw)
mclk_div = 2;
}
+ pm_runtime_get_sync(cam->dev);
clk_enable(cam->clk[0]);
mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
mcam_ctlr_power_up(cam);
@@ -944,6 +945,7 @@ static void mclk_disable(struct clk_hw *hw)
mcam_ctlr_power_down(cam);
clk_disable(cam->clk[0]);
+ pm_runtime_put(cam->dev);
}
static unsigned long mclk_recalc_rate(struct clk_hw *hw,
@@ -1866,16 +1868,6 @@ int mccic_register(struct mcam_camera *cam)
cam->pix_format = mcam_def_pix_format;
cam->mbus_code = mcam_def_mbus_code;
- /*
- * Register sensor notifier.
- */
- v4l2_async_notifier_init(&cam->notifier);
- ret = v4l2_async_notifier_add_subdev(&cam->notifier, &cam->asd);
- if (ret) {
- cam_warn(cam, "failed to add subdev to a notifier");
- goto out;
- }
-
cam->notifier.ops = &mccic_notify_ops;
ret = v4l2_async_notifier_register(&cam->v4l2_dev, &cam->notifier);
if (ret < 0) {
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index b55545822fd2..f324d808d737 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -151,7 +151,6 @@ struct mcam_camera {
*/
struct video_device vdev;
struct v4l2_async_notifier notifier;
- struct v4l2_async_subdev asd;
struct v4l2_subdev *sensor;
/* Videobuf2 stuff */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 032fdddbbecc..f2f09cea751d 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -180,6 +180,7 @@ static int mmpcam_probe(struct platform_device *pdev)
struct resource *res;
struct fwnode_handle *ep;
struct mmp_camera_platform_data *pdata;
+ struct v4l2_async_subdev *asd;
int ret;
cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
@@ -238,10 +239,15 @@ static int mmpcam_probe(struct platform_device *pdev)
if (!ep)
return -ENODEV;
- mcam->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- mcam->asd.match.fwnode = fwnode_graph_get_remote_port_parent(ep);
+ v4l2_async_notifier_init(&mcam->notifier);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(&mcam->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out;
+ }
/*
* Register the device with the core.
@@ -278,7 +284,6 @@ static int mmpcam_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
out:
- fwnode_handle_put(mcam->asd.match.fwnode);
mccic_shutdown(mcam);
return ret;
diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
index f526501bd473..153612ca96fc 100644
--- a/drivers/media/platform/meson/ge2d/ge2d.c
+++ b/drivers/media/platform/meson/ge2d/ge2d.c
@@ -988,6 +988,7 @@ static int ge2d_probe(struct platform_device *pdev)
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(&ge2d->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
goto unreg_v4l2_dev;
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 724c7333b6e5..ace4528cdc5e 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -199,7 +199,6 @@ static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
pix_mp->ycbcr_enc = ctx->ycbcr_enc;
pix_mp->quantization = ctx->quant;
}
- memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
max_w = variant->pix_max->target_rot_dis_w;
max_h = variant->pix_max->target_rot_dis_h;
@@ -247,8 +246,6 @@ static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
pix_mp->plane_fmt[i].bytesperline = bpl;
if (pix_mp->plane_fmt[i].sizeimage < sizeimage)
pix_mp->plane_fmt[i].sizeimage = sizeimage;
- memset(pix_mp->plane_fmt[i].reserved, 0,
- sizeof(pix_mp->plane_fmt[i].reserved));
mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%u (%u)", ctx->id,
i, bpl, pix_mp->plane_fmt[i].sizeimage, sizeimage);
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index c768a587a944..56d86e59421e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -657,7 +657,6 @@ static int vidioc_try_fmt(struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
- int i;
pix_fmt_mp->field = V4L2_FIELD_NONE;
@@ -715,12 +714,7 @@ static int vidioc_try_fmt(struct v4l2_format *f,
}
}
- for (i = 0; i < pix_fmt_mp->num_planes; i++)
- memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
- sizeof(pix_fmt_mp->plane_fmt[0].reserved));
-
pix_fmt_mp->flags = 0;
- memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
return 0;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 21de1431cfcb..8c917969c2f1 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -121,7 +121,6 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f,
return -EINVAL;
f->pixelformat = formats[f->index].fourcc;
- memset(f->reserved, 0, sizeof(f->reserved));
return 0;
}
@@ -252,7 +251,6 @@ static int vidioc_try_fmt(struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
- int i;
pix_fmt_mp->field = V4L2_FIELD_NONE;
@@ -320,13 +318,7 @@ static int vidioc_try_fmt(struct v4l2_format *f,
}
}
- for (i = 0; i < pix_fmt_mp->num_planes; i++)
- memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
- sizeof(pix_fmt_mp->plane_fmt[0].reserved));
-
pix_fmt_mp->flags = 0;
- memset(&pix_fmt_mp->reserved, 0x0,
- sizeof(pix_fmt_mp->reserved));
return 0;
}
@@ -532,8 +524,6 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
for (i = 0; i < pix->num_planes; i++) {
pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
- memset(&(pix->plane_fmt[i].reserved[0]), 0x0,
- sizeof(pix->plane_fmt[i].reserved));
}
pix->flags = 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index dfb42e19bf81..be3842e6ca47 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -303,7 +303,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = PTR_ERR((__force void *)dev->reg_base[VENC_SYS]);
goto err_res;
}
- mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_SYS]);
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_SYS, dev->reg_base[VENC_SYS]);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
@@ -332,7 +332,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = PTR_ERR((__force void *)dev->reg_base[VENC_LT_SYS]);
goto err_res;
}
- mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_LT_SYS]);
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_LT_SYS, dev->reg_base[VENC_LT_SYS]);
dev->enc_lt_irq = platform_get_irq(pdev, 1);
irq_set_status_flags(dev->enc_lt_irq, IRQ_NOAUTOEN);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
index a3c7a380c930..70580c2525ba 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -27,13 +27,13 @@ int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
if (!ret) {
status = -1; /* timeout */
- mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout time=%ums out %d %d!",
- ctx->id, ctx->type, command, timeout_ms,
- ctx->int_cond, ctx->int_type);
+ mtk_v4l2_err("[%d] ctx->type=%d, cmd=%d, wait_event_interruptible_timeout time=%ums out %d %d!",
+ ctx->id, ctx->type, command, timeout_ms,
+ ctx->int_cond, ctx->int_type);
} else if (-ERESTARTSYS == ret) {
- mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout interrupted by a signal %d %d",
- ctx->id, ctx->type, command, ctx->int_cond,
- ctx->int_type);
+ mtk_v4l2_err("[%d] ctx->type=%d, cmd=%d, wait_event_interruptible_timeout interrupted by a signal %d %d",
+ ctx->id, ctx->type, command, ctx->int_cond,
+ ctx->int_type);
status = -1;
}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5ea153a68522..d9880210b2ab 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -890,7 +890,8 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
if (vsi->show_frame & BIT(2)) {
- if (vpu_dec_start(&inst->vpu, NULL, 0)) {
+ ret = vpu_dec_start(&inst->vpu, NULL, 0);
+ if (ret) {
mtk_vcodec_err(inst, "vpu trig decoder failed");
goto DECODE_ERROR;
}
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index f73b5893220d..de16de46c0f4 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -12,6 +12,5 @@ config VIDEO_OMAP2_VOUT
depends on VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
- select FRAME_VECTOR
help
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index b1fc4518e275..a6bb7d9bf75f 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2126,21 +2126,6 @@ static void isp_parse_of_csi1_endpoint(struct device *dev,
buscfg->bus.ccp2.crc = 1;
}
-static int isp_alloc_isd(struct isp_async_subdev **isd,
- struct isp_bus_cfg **buscfg)
-{
- struct isp_async_subdev *__isd;
-
- __isd = kzalloc(sizeof(*__isd), GFP_KERNEL);
- if (!__isd)
- return -ENOMEM;
-
- *isd = __isd;
- *buscfg = &__isd->bus;
-
- return 0;
-}
-
static struct {
u32 phy;
u32 csi2_if;
@@ -2156,7 +2141,6 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
{
struct fwnode_handle *ep;
struct isp_async_subdev *isd = NULL;
- struct isp_bus_cfg *buscfg;
unsigned int i;
ep = fwnode_graph_get_endpoint_by_id(
@@ -2174,20 +2158,13 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret) {
- ret = isp_alloc_isd(&isd, &buscfg);
- if (ret)
- return ret;
- }
-
- if (!ret) {
- isp_parse_of_parallel_endpoint(isp->dev, &vep, buscfg);
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, &isd->asd);
+ isd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &isp->notifier, ep, struct isp_async_subdev);
+ if (!IS_ERR(isd))
+ isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
}
fwnode_handle_put(ep);
- if (ret)
- kfree(isd);
}
for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) {
@@ -2206,15 +2183,8 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i,
to_of_node(ep));
- ret = isp_alloc_isd(&isd, &buscfg);
- if (ret)
- return ret;
-
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (!ret) {
- buscfg->interface = isp_bus_interfaces[i].csi2_if;
- isp_parse_of_csi2_endpoint(isp->dev, &vep, buscfg);
- } else if (ret == -ENXIO) {
+ if (ret == -ENXIO) {
vep = (struct v4l2_fwnode_endpoint)
{ .bus_type = V4L2_MBUS_CSI1 };
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
@@ -2224,21 +2194,33 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
{ .bus_type = V4L2_MBUS_CCP2 };
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
}
- if (!ret) {
- buscfg->interface =
- isp_bus_interfaces[i].csi1_if;
- isp_parse_of_csi1_endpoint(isp->dev, &vep,
- buscfg);
- }
}
- if (!ret)
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, &isd->asd);
+ if (!ret) {
+ isd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &isp->notifier, ep, struct isp_async_subdev);
+
+ if (!IS_ERR(isd)) {
+ switch (vep.bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ isd->bus.interface =
+ isp_bus_interfaces[i].csi2_if;
+ isp_parse_of_csi2_endpoint(isp->dev, &vep, &isd->bus);
+ break;
+ case V4L2_MBUS_CSI1:
+ case V4L2_MBUS_CCP2:
+ isd->bus.interface =
+ isp_bus_interfaces[i].csi1_if;
+ isp_parse_of_csi1_endpoint(isp->dev, &vep,
+ &isd->bus);
+ break;
+ default:
+ break;
+ }
+ }
+ }
fwnode_handle_put(ep);
- if (ret)
- kfree(isd);
}
return 0;
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index b664ce7558a1..14077797f5e1 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -31,7 +31,6 @@
#include <linux/dma/pxa-dma.h>
#include <media/v4l2-async.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -656,8 +655,6 @@ struct pxa_camera_dev {
const struct pxa_camera_format_xlate *current_fmt;
struct v4l2_pix_format current_pix;
- struct v4l2_async_subdev asd;
-
/*
* PXA27x is only supposed to handle one camera on its Quick Capture
* interface. If anyone ever builds hardware to enable more than
@@ -677,7 +674,6 @@ struct pxa_camera_dev {
unsigned long ciclk;
unsigned long mclk;
u32 mclk_divisor;
- struct v4l2_clk *mclk_clk;
u16 width_flags; /* max 10 bits */
struct list_head capture;
@@ -1386,6 +1382,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
int ret = 0;
+#ifdef DEBUG
+ int i;
+#endif
switch (pcdev->channels) {
case 1:
@@ -2030,9 +2029,6 @@ static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static const struct v4l2_clk_ops pxa_camera_mclk_ops = {
-};
-
static const struct video_device pxa_camera_videodev_template = {
.name = "pxa-camera",
.minor = -1,
@@ -2140,11 +2136,6 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
pxa_camera_destroy_formats(pcdev);
- if (pcdev->mclk_clk) {
- v4l2_clk_unregister(pcdev->mclk_clk);
- pcdev->mclk_clk = NULL;
- }
-
video_unregister_device(&pcdev->vdev);
pcdev->sensor = NULL;
@@ -2199,11 +2190,11 @@ static int pxa_camera_resume(struct device *dev)
}
static int pxa_camera_pdata_from_dt(struct device *dev,
- struct pxa_camera_dev *pcdev,
- struct v4l2_async_subdev *asd)
+ struct pxa_camera_dev *pcdev)
{
u32 mclk_rate;
- struct device_node *remote, *np = dev->of_node;
+ struct v4l2_async_subdev *asd;
+ struct device_node *np = dev->of_node;
struct v4l2_fwnode_endpoint ep = { .bus_type = 0 };
int err = of_property_read_u32(np, "clock-frequency",
&mclk_rate);
@@ -2255,13 +2246,12 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- remote = of_graph_get_remote_port_parent(np);
- if (remote)
- asd->match.fwnode = of_fwnode_handle(remote);
- else
- dev_notice(dev, "no remote for %pOF\n", np);
-
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &pcdev->notifier,
+ of_fwnode_handle(np),
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd))
+ err = PTR_ERR(asd);
out:
of_node_put(np);
@@ -2278,7 +2268,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
.src_maxburst = 8,
.direction = DMA_DEV_TO_MEM,
};
- char clk_name[V4L2_CLK_NAME_SIZE];
int irq;
int err = 0, i;
@@ -2297,18 +2286,23 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk))
return PTR_ERR(pcdev->clk);
+ v4l2_async_notifier_init(&pcdev->notifier);
pcdev->res = res;
-
pcdev->pdata = pdev->dev.platform_data;
if (pcdev->pdata) {
+ struct v4l2_async_subdev *asd;
+
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
- pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
- pcdev->asd.match.i2c.adapter_id =
- pcdev->pdata->sensor_i2c_adapter_id;
- pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
+ asd = v4l2_async_notifier_add_i2c_subdev(
+ &pcdev->notifier,
+ pcdev->pdata->sensor_i2c_adapter_id,
+ pcdev->pdata->sensor_i2c_address,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd))
+ err = PTR_ERR(asd);
} else if (pdev->dev.of_node) {
- err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+ err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev);
} else {
return -ENODEV;
}
@@ -2400,43 +2394,18 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (err)
goto exit_deactivate;
- v4l2_async_notifier_init(&pcdev->notifier);
-
- err = v4l2_async_notifier_add_subdev(&pcdev->notifier, &pcdev->asd);
- if (err) {
- fwnode_handle_put(pcdev->asd.match.fwnode);
- goto exit_free_v4l2dev;
- }
-
- pcdev->notifier.ops = &pxa_camera_sensor_ops;
-
- if (!of_have_populated_dt())
- pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
-
err = pxa_camera_init_videobuf2(pcdev);
if (err)
goto exit_notifier_cleanup;
- v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
- pcdev->asd.match.i2c.adapter_id,
- pcdev->asd.match.i2c.address);
-
- pcdev->mclk_clk = v4l2_clk_register(&pxa_camera_mclk_ops, clk_name, NULL);
- if (IS_ERR(pcdev->mclk_clk)) {
- err = PTR_ERR(pcdev->mclk_clk);
- goto exit_notifier_cleanup;
- }
-
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
if (err)
- goto exit_free_clk;
+ goto exit_notifier_cleanup;
return 0;
-exit_free_clk:
- v4l2_clk_unregister(pcdev->mclk_clk);
exit_notifier_cleanup:
v4l2_async_notifier_cleanup(&pcdev->notifier);
-exit_free_v4l2dev:
v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
@@ -2463,11 +2432,6 @@ static int pxa_camera_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&pcdev->notifier);
v4l2_async_notifier_cleanup(&pcdev->notifier);
- if (pcdev->mclk_clk) {
- v4l2_clk_unregister(pcdev->mclk_clk);
- pcdev->mclk_clk = NULL;
- }
-
v4l2_device_unregister(&pcdev->v4l2_dev);
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index bd9334af1c73..97cea7c4d769 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -579,7 +579,7 @@ static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
break;
}
- if (k < f->index)
+ if (k == -1 || k < f->index)
/*
* All the unique pixel formats matching the arguments
* have been enumerated (k >= 0 and f->index > 0), or
@@ -961,6 +961,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
video->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
} else {
+ ret = -EINVAL;
goto error_video_register;
}
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 8fefce57bc49..7c0f669f8aa6 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -655,7 +655,6 @@ static int camss_of_parse_ports(struct camss *camss)
for_each_endpoint_of_node(dev->of_node, node) {
struct camss_async_subdev *csd;
- struct v4l2_async_subdev *asd;
if (!of_device_is_available(node))
continue;
@@ -667,17 +666,15 @@ static int camss_of_parse_ports(struct camss *camss)
goto err_cleanup;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(
+ csd = v4l2_async_notifier_add_fwnode_subdev(
&camss->notifier, of_fwnode_handle(remote),
- sizeof(*csd));
+ struct camss_async_subdev);
of_node_put(remote);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
+ if (IS_ERR(csd)) {
+ ret = PTR_ERR(csd);
goto err_cleanup;
}
- csd = container_of(asd, struct camss_async_subdev, asd);
-
ret = camss_of_parse_endpoint_node(dev, node, csd);
if (ret < 0)
goto err_cleanup;
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index dfc636865709..91ee6be10292 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -3,7 +3,9 @@
venus-core-objs += core.o helpers.o firmware.o \
hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
- hfi_parser.o pm_helpers.o dbgfs.o
+ hfi_parser.o pm_helpers.o dbgfs.o \
+ hfi_platform.o hfi_platform_v4.o \
+ hfi_platform_v6.o hfi_plat_bufs_v6.o \
venus-dec-objs += vdec.o vdec_ctrls.o
venus-enc-objs += venc.o venc_ctrls.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bdd293faaad0..f9896c121fd8 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -7,6 +7,7 @@
#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -22,6 +23,33 @@
#include "firmware.h"
#include "pm_helpers.h"
+static void venus_coredump(struct venus_core *core)
+{
+ struct device *dev;
+ phys_addr_t mem_phys;
+ size_t mem_size;
+ void *mem_va;
+ void *data;
+
+ dev = core->dev;
+ mem_phys = core->fw.mem_phys;
+ mem_size = core->fw.mem_size;
+
+ mem_va = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_va)
+ return;
+
+ data = vmalloc(mem_size);
+ if (!data) {
+ memunmap(mem_va);
+ return;
+ }
+
+ memcpy(data, mem_va, mem_size);
+ memunmap(mem_va);
+ dev_coredumpv(dev, data, mem_size, GFP_KERNEL);
+}
+
static void venus_event_notify(struct venus_core *core, u32 event)
{
struct venus_inst *inst;
@@ -67,6 +95,8 @@ static void venus_sys_error_handler(struct work_struct *work)
venus_shutdown(core);
+ venus_coredump(core);
+
pm_runtime_put_sync(core->dev);
while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
@@ -349,8 +379,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
+ pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
@@ -486,17 +518,6 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
-static const struct codec_freq_data sdm845_codec_freq_data[] = {
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
-};
-
static const struct bw_tbl sdm845_bw_table_enc[] = {
{ 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
{ 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
@@ -518,8 +539,6 @@ static const struct venus_resources sdm845_res = {
.bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
.bw_tbl_dec = sdm845_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
- .codec_freq_data = sdm845_codec_freq_data,
- .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "core", "bus" },
@@ -541,8 +560,6 @@ static const struct venus_resources sdm845_res_v2 = {
.bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
.bw_tbl_dec = sdm845_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
- .codec_freq_data = sdm845_codec_freq_data,
- .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
@@ -592,8 +609,6 @@ static const struct venus_resources sc7180_res = {
.bw_tbl_enc_size = ARRAY_SIZE(sc7180_bw_table_enc),
.bw_tbl_dec = sc7180_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sc7180_bw_table_dec),
- .codec_freq_data = sdm845_codec_freq_data,
- .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index f03ed427accd..a252ed32cc14 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -14,6 +14,7 @@
#include "dbgfs.h"
#include "hfi.h"
+#include "hfi_platform.h"
#define VDBGL "VenusLow : "
#define VDBGM "VenusMed : "
@@ -36,13 +37,6 @@ struct reg_val {
u32 value;
};
-struct codec_freq_data {
- u32 pixfmt;
- u32 session_type;
- unsigned long vpp_freq;
- unsigned long vsp_freq;
-};
-
struct bw_tbl {
u32 mbs_per_sec;
u32 avg;
@@ -61,8 +55,6 @@ struct venus_resources {
unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
- const struct codec_freq_data *codec_freq_data;
- unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
@@ -91,30 +83,6 @@ struct venus_format {
u32 flags;
};
-#define MAX_PLANES 4
-#define MAX_FMT_ENTRIES 32
-#define MAX_CAP_ENTRIES 32
-#define MAX_ALLOC_MODE_ENTRIES 16
-#define MAX_CODEC_NUM 32
-
-struct raw_formats {
- u32 buftype;
- u32 fmt;
-};
-
-struct venus_caps {
- u32 codec;
- u32 domain;
- bool cap_bufs_mode_dynamic;
- unsigned int num_caps;
- struct hfi_capability caps[MAX_CAP_ENTRIES];
- unsigned int num_pl;
- struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
- unsigned int num_fmts;
- struct raw_formats fmts[MAX_FMT_ENTRIES];
- bool valid; /* used only for Venus v1xx */
-};
-
/**
* struct venus_core - holds core parameters valid for all instances
*
@@ -123,7 +91,6 @@ struct venus_caps {
* @clks: an array of struct clk pointers
* @vcodec0_clks: an array of vcodec0 struct clk pointers
* @vcodec1_clks: an array of vcodec1 struct clk pointers
- * @pd_dl_venus: pmdomain device-link for venus domain
* @pmdomains: an array of pmdomains struct device pointers
* @vdev_dec: a reference to video device structure for decoder instances
* @vdev_enc: a reference to video device structure for encoder instances
@@ -145,7 +112,6 @@ struct venus_caps {
* @enc_codecs: encoders supported by this core
* @dec_codecs: decoders supported by this core
* @max_sessions_supported: holds the maximum number of sessions
- * @core_caps: core capabilities
* @priv: a private filed for HFI operations
* @ops: the core HFI operations
* @work: a delayed work for handling system fatal error
@@ -161,7 +127,6 @@ struct venus_core {
struct icc_path *cpucfg_path;
struct opp_table *opp_table;
bool has_opp_table;
- struct device_link *pd_dl_venus;
struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX];
struct device_link *opp_dl_venus;
struct device *opp_pmdomain;
@@ -177,6 +142,8 @@ struct venus_core {
struct device *dev;
struct iommu_domain *iommu_domain;
size_t mapped_mem_size;
+ phys_addr_t mem_phys;
+ size_t mem_size;
} fw;
struct mutex lock;
struct list_head instances;
@@ -191,15 +158,10 @@ struct venus_core {
unsigned long enc_codecs;
unsigned long dec_codecs;
unsigned int max_sessions_supported;
-#define ENC_ROTATION_CAPABILITY 0x1
-#define ENC_SCALING_CAPABILITY 0x2
-#define ENC_DEINTERLACE_CAPABILITY 0x4
-#define DEC_MULTI_STREAM_CAPABILITY 0x8
- unsigned int core_caps;
void *priv;
const struct hfi_ops *ops;
struct delayed_work work;
- struct venus_caps caps[MAX_CODEC_NUM];
+ struct hfi_plat_caps caps[MAX_CODEC_NUM];
unsigned int codecs_count;
unsigned int core0_usage_count;
unsigned int core1_usage_count;
@@ -230,10 +192,28 @@ struct venc_controls {
u32 h264_b_qp;
u32 h264_min_qp;
u32 h264_max_qp;
+ u32 h264_i_min_qp;
+ u32 h264_i_max_qp;
+ u32 h264_p_min_qp;
+ u32 h264_p_max_qp;
+ u32 h264_b_min_qp;
+ u32 h264_b_max_qp;
u32 h264_loop_filter_mode;
s32 h264_loop_filter_alpha;
s32 h264_loop_filter_beta;
+ u32 hevc_i_qp;
+ u32 hevc_p_qp;
+ u32 hevc_b_qp;
+ u32 hevc_min_qp;
+ u32 hevc_max_qp;
+ u32 hevc_i_min_qp;
+ u32 hevc_i_max_qp;
+ u32 hevc_p_min_qp;
+ u32 hevc_p_max_qp;
+ u32 hevc_b_min_qp;
+ u32 hevc_b_max_qp;
+
u32 vp8_min_qp;
u32 vp8_max_qp;
@@ -256,6 +236,8 @@ struct venc_controls {
u32 hevc;
u32 vp9;
} level;
+
+ u32 base_priority_id;
};
struct venus_buffer {
@@ -271,7 +253,8 @@ struct venus_buffer {
struct clock_data {
u32 core_id;
unsigned long freq;
- const struct codec_freq_data *codec_freq_data;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
};
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
@@ -285,7 +268,6 @@ enum venus_dec_state {
VENUS_DEC_STATE_DRAIN = 5,
VENUS_DEC_STATE_DECODING = 6,
VENUS_DEC_STATE_DRC = 7,
- VENUS_DEC_STATE_DRC_FLUSH_DONE = 8,
};
struct venus_ts_metadata {
@@ -350,7 +332,7 @@ struct venus_ts_metadata {
* @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
- * @last_buf: last capture buffer for dynamic-resoluton-change
+ * @next_buf_last: a flag to mark next queued capture buffer as last
*/
struct venus_inst {
struct list_head list;
@@ -413,7 +395,9 @@ struct venus_inst {
union hfi_get_property hprop;
unsigned int core_acquired: 1;
unsigned int bit_depth;
- struct vb2_buffer *last_buf;
+ unsigned int pic_struct;
+ bool next_buf_last;
+ bool drain_active;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
@@ -433,7 +417,7 @@ static inline void *to_hfi_priv(struct venus_core *core)
return core->priv;
}
-static inline struct venus_caps *
+static inline struct hfi_plat_caps *
venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain)
{
unsigned int c;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index d03e2dd5808c..89defc21ea81 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -201,6 +201,9 @@ int venus_boot(struct venus_core *core)
return -EINVAL;
}
+ core->fw.mem_size = mem_size;
+ core->fw.mem_phys = mem_phys;
+
if (core->use_tz)
ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
else
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 50439eb1ffea..76ece2ff8d39 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -7,7 +7,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mem2mem.h>
#include <asm/div64.h>
@@ -15,6 +15,8 @@
#include "helpers.h"
#include "hfi_helper.h"
#include "pm_helpers.h"
+#include "hfi_platform.h"
+#include "hfi_parser.h"
struct intbuf {
struct list_head list;
@@ -480,7 +482,7 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
static bool is_dynamic_bufmode(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
- struct venus_caps *caps;
+ struct hfi_plat_caps *caps;
/*
* v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
@@ -552,6 +554,51 @@ static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
return 0;
}
+static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
+ struct hfi_buffer_requirements *req)
+{
+ enum hfi_version version = inst->core->res->hfi_version;
+ const struct hfi_platform *hfi_plat;
+ struct hfi_plat_buffers_params params;
+ bool is_dec = inst->session_type == VIDC_SESSION_TYPE_DEC;
+ struct venc_controls *enc_ctr = &inst->controls.enc;
+
+ hfi_plat = hfi_platform_get(version);
+
+ if (!hfi_plat || !hfi_plat->bufreq)
+ return -EINVAL;
+
+ params.version = version;
+ params.num_vpp_pipes = hfi_platform_num_vpp_pipes(version);
+
+ if (is_dec) {
+ params.width = inst->width;
+ params.height = inst->height;
+ params.codec = inst->fmt_out->pixfmt;
+ params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_cap->pixfmt);
+ params.dec.max_mbs_per_frame = mbs_per_frame_max(inst);
+ params.dec.buffer_size_limit = 0;
+ params.dec.is_secondary_output =
+ inst->opb_buftype == HFI_BUFFER_OUTPUT2;
+ params.dec.is_interlaced =
+ inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE ?
+ true : false;
+ } else {
+ params.width = inst->out_width;
+ params.height = inst->out_height;
+ params.codec = inst->fmt_cap->pixfmt;
+ params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_out->pixfmt);
+ params.enc.work_mode = VIDC_WORK_MODE_2;
+ params.enc.rc_type = HFI_RATE_CONTROL_OFF;
+ if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ params.enc.rc_type = HFI_RATE_CONTROL_CQ;
+ params.enc.num_b_frames = enc_ctr->num_b_frames;
+ params.enc.is_tenbit = inst->bit_depth == VIDC_BITDEPTH_10;
+ }
+
+ return hfi_plat->bufreq(&params, inst->session_type, buftype, req);
+}
+
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req)
{
@@ -563,6 +610,10 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
if (req)
memset(req, 0, sizeof(*req));
+ ret = platform_get_bufreq(inst, type, req);
+ if (!ret)
+ return 0;
+
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
return ret;
@@ -986,6 +1037,8 @@ u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
if (compressed) {
sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
+ if (width < 1280 || height < 720)
+ sz *= 8;
return ALIGN(sz, SZ_4K);
}
@@ -1040,36 +1093,6 @@ int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
}
EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
-int venus_helper_init_codec_freq_data(struct venus_inst *inst)
-{
- const struct codec_freq_data *data;
- unsigned int i, data_size;
- u32 pixfmt;
- int ret = 0;
-
- if (!IS_V4(inst->core))
- return 0;
-
- data = inst->core->res->codec_freq_data;
- data_size = inst->core->res->codec_freq_data_size;
- pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
- inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
-
- for (i = 0; i < data_size; i++) {
- if (data[i].pixfmt == pixfmt &&
- data[i].session_type == inst->session_type) {
- inst->clk_data.codec_freq_data = &data[i];
- break;
- }
- }
-
- if (!inst->clk_data.codec_freq_data)
- ret = -EINVAL;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
-
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
@@ -1284,14 +1307,9 @@ int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct venus_buffer *buf = to_venus_buffer(vbuf);
- struct sg_table *sgt;
-
- sgt = vb2_dma_sg_plane_desc(vb, 0);
- if (!sgt)
- return -EFAULT;
buf->size = vb2_plane_size(vb, 0);
- buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
list_add_tail(&buf->reg_list, &inst->registeredbufs);
@@ -1343,28 +1361,29 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
int ret;
- mutex_lock(&inst->lock);
-
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+ /* Skip processing queued capture buffers after LAST flag */
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
+ V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
+ inst->codec_state == VENUS_DEC_STATE_DRC)
+ return;
+
cache_payload(inst, vb);
if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
!(inst->streamon_out && inst->streamon_cap))
- goto unlock;
+ return;
if (vb2_start_streaming_called(vb->vb2_queue)) {
ret = is_buf_refed(inst, vbuf);
if (ret)
- goto unlock;
+ return;
ret = session_process_buf(inst, vbuf);
if (ret)
return_buf_error(inst, vbuf);
}
-
-unlock:
- mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
@@ -1529,6 +1548,29 @@ void venus_helper_m2m_job_abort(void *priv)
}
EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
+int venus_helper_session_init(struct venus_inst *inst)
+{
+ enum hfi_version version = inst->core->res->hfi_version;
+ u32 session_type = inst->session_type;
+ u32 codec;
+ int ret;
+
+ codec = inst->session_type == VIDC_SESSION_TYPE_DEC ?
+ inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
+
+ ret = hfi_session_init(inst, codec);
+ if (ret)
+ return ret;
+
+ inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(version, codec,
+ session_type);
+ inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec,
+ session_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_session_init);
+
void venus_helper_init_instance(struct venus_inst *inst)
{
if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
@@ -1539,7 +1581,7 @@ void venus_helper_init_instance(struct venus_inst *inst)
}
EXPORT_SYMBOL_GPL(venus_helper_init_instance);
-static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
+static bool find_fmt_from_caps(struct hfi_plat_caps *caps, u32 buftype, u32 fmt)
{
unsigned int i;
@@ -1556,7 +1598,7 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
u32 *out_fmt, u32 *out2_fmt, bool ubwc)
{
struct venus_core *core = inst->core;
- struct venus_caps *caps;
+ struct hfi_plat_caps *caps;
u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
bool found, found_ubwc;
@@ -1620,3 +1662,21 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
return -EINVAL;
}
EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
+
+int venus_helper_set_stride(struct venus_inst *inst,
+ unsigned int width, unsigned int height)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO;
+
+ struct hfi_uncompressed_plane_actual_info plane_actual_info;
+
+ plane_actual_info.buffer_type = HFI_BUFFER_INPUT;
+ plane_actual_info.num_planes = 2;
+ plane_actual_info.plane_format[0].actual_stride = width;
+ plane_actual_info.plane_format[0].actual_plane_buffer_height = height;
+ plane_actual_info.plane_format[1].actual_stride = width;
+ plane_actual_info.plane_format[1].actual_plane_buffer_height = height / 2;
+
+ return hfi_session_set_property(inst, ptype, &plane_actual_info);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_stride);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index a4a0562bc83f..351093845499 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,7 +33,6 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
-int venus_helper_init_codec_freq_data(struct venus_inst *inst);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs);
@@ -48,6 +47,7 @@ unsigned int venus_helper_get_opb_size(struct venus_inst *inst);
void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf);
void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx);
void venus_helper_init_instance(struct venus_inst *inst);
+int venus_helper_session_init(struct venus_inst *inst);
int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt,
u32 *out2_fmt, bool ubwc);
int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
@@ -63,4 +63,6 @@ void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
struct vb2_v4l2_buffer *vbuf);
int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level);
int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level);
+int venus_helper_set_stride(struct venus_inst *inst, unsigned int aligned_width,
+ unsigned int aligned_height);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 638ed5cfe05e..0f2482367e06 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -175,6 +175,8 @@ static int wait_session_msg(struct venus_inst *inst)
int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
{
struct venus_core *core = inst->core;
+ bool max;
+ int ret;
if (!ops)
return -EINVAL;
@@ -184,11 +186,19 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
inst->ops = ops;
mutex_lock(&core->lock);
- list_add_tail(&inst->list, &core->instances);
- atomic_inc(&core->insts_count);
+
+ max = atomic_add_unless(&core->insts_count, 1,
+ core->max_sessions_supported);
+ if (!max) {
+ ret = -EAGAIN;
+ } else {
+ list_add_tail(&inst->list, &core->instances);
+ ret = 0;
+ }
+
mutex_unlock(&core->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hfi_session_create);
@@ -211,7 +221,7 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
mutex_unlock(&core->lock);
if (inst->state != INST_UNINIT)
- return -EINVAL;
+ return -EALREADY;
inst->hfi_codec = to_codec_type(pixfmt);
reinit_completion(&inst->done);
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 7022368c1e63..4f7565834469 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1205,6 +1205,18 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
break;
}
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
+ struct hfi_uncompressed_plane_actual_info *in = pdata;
+ struct hfi_uncompressed_plane_actual_info *info = prop_data;
+
+ info->buffer_type = in->buffer_type;
+ info->num_planes = in->num_planes;
+ info->plane_format[0] = in->plane_format[0];
+ if (in->num_planes > 1)
+ info->plane_format[1] = in->plane_format[1];
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
+ break;
+ }
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 60ee2479f7a6..6b524c7cde5f 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -364,6 +364,13 @@
#define HFI_HEVC_TIER_MAIN 0x1
#define HFI_HEVC_TIER_HIGH0 0x2
+#define HFI_VPX_PROFILE_MAIN 0x00000001
+
+#define HFI_VPX_LEVEL_VERSION_0 0x00000001
+#define HFI_VPX_LEVEL_VERSION_1 0x00000002
+#define HFI_VPX_LEVEL_VERSION_2 0x00000004
+#define HFI_VPX_LEVEL_VERSION_3 0x00000008
+
/* VP9 Profile 0, 8-bit */
#define HFI_VP9_PROFILE_P0 0x00000001
/* VP9 Profile 2, 10-bit */
@@ -571,7 +578,18 @@ struct hfi_bitrate {
#define HFI_CAPABILITY_LCU_SIZE 0x14
#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15
#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16
+#define HFI_CAPABILITY_I_FRAME_QP 0x20
+#define HFI_CAPABILITY_P_FRAME_QP 0x21
+#define HFI_CAPABILITY_B_FRAME_QP 0x22
+#define HFI_CAPABILITY_RATE_CONTROL_MODES 0x23
+#define HFI_CAPABILITY_BLUR_WIDTH 0x24
+#define HFI_CAPABILITY_BLUR_HEIGHT 0x25
+#define HFI_CAPABILITY_SLICE_BYTE 0x27
+#define HFI_CAPABILITY_SLICE_MB 0x28
#define HFI_CAPABILITY_MAX_VIDEOCORES 0x2b
+#define HFI_CAPABILITY_MAX_WORKMODES 0x2c
+#define HFI_CAPABILITY_ROTATION 0x2f
+#define HFI_CAPABILITY_COLOR_SPACE_CONVERSION 0x30
struct hfi_capability {
u32 capability_type;
@@ -908,13 +926,13 @@ struct hfi_uncompressed_plane_actual {
struct hfi_uncompressed_plane_actual_info {
u32 buffer_type;
u32 num_planes;
- struct hfi_uncompressed_plane_actual plane_format[1];
+ struct hfi_uncompressed_plane_actual plane_format[2];
};
struct hfi_uncompressed_plane_actual_constraints_info {
u32 buffer_type;
u32 num_planes;
- struct hfi_uncompressed_plane_constraints plane_format[1];
+ struct hfi_uncompressed_plane_constraints plane_format[2];
};
struct hfi_codec_supported {
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 363ee2a65453..7263c0c32695 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -11,12 +11,12 @@
#include "hfi_helper.h"
#include "hfi_parser.h"
-typedef void (*func)(struct venus_caps *cap, const void *data,
+typedef void (*func)(struct hfi_plat_caps *cap, const void *data,
unsigned int size);
static void init_codecs(struct venus_core *core)
{
- struct venus_caps *caps = core->caps, *cap;
+ struct hfi_plat_caps *caps = core->caps, *cap;
unsigned long bit;
for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
@@ -34,11 +34,11 @@ static void init_codecs(struct venus_core *core)
}
}
-static void for_each_codec(struct venus_caps *caps, unsigned int caps_num,
+static void for_each_codec(struct hfi_plat_caps *caps, unsigned int caps_num,
u32 codecs, u32 domain, func cb, void *data,
unsigned int size)
{
- struct venus_caps *cap;
+ struct hfi_plat_caps *cap;
unsigned int i;
for (i = 0; i < caps_num; i++) {
@@ -51,7 +51,7 @@ static void for_each_codec(struct venus_caps *caps, unsigned int caps_num,
}
static void
-fill_buf_mode(struct venus_caps *cap, const void *data, unsigned int num)
+fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num)
{
const u32 *type = data;
@@ -81,7 +81,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
}
}
-static void fill_profile_level(struct venus_caps *cap, const void *data,
+static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
unsigned int num)
{
const struct hfi_profile_level *pl = data;
@@ -107,7 +107,7 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
}
static void
-fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
+fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
{
const struct hfi_capability *caps = data;
@@ -132,7 +132,7 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
fill_caps, caps_arr, num_caps);
}
-static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
+static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
unsigned int num_fmts)
{
const struct raw_formats *formats = fmts;
@@ -211,7 +211,7 @@ static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
{
- struct venus_caps *caps, *cap;
+ struct hfi_plat_caps *caps, *cap;
unsigned int i;
u32 dom;
@@ -228,11 +228,49 @@ static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
}
}
+static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
+{
+ const struct hfi_platform *plat;
+ const struct hfi_plat_caps *caps = NULL;
+ u32 enc_codecs, dec_codecs, count = 0;
+ unsigned int entries;
+
+ if (inst)
+ return 0;
+
+ plat = hfi_platform_get(core->res->hfi_version);
+ if (!plat)
+ return -EINVAL;
+
+ if (plat->codecs)
+ plat->codecs(&enc_codecs, &dec_codecs, &count);
+
+ if (plat->capabilities)
+ caps = plat->capabilities(&entries);
+
+ if (!caps || !entries || !count)
+ return -EINVAL;
+
+ core->enc_codecs = enc_codecs;
+ core->dec_codecs = dec_codecs;
+ core->codecs_count = count;
+ core->max_sessions_supported = MAX_SESSIONS;
+ memset(core->caps, 0, sizeof(*caps) * MAX_CODEC_NUM);
+ memcpy(core->caps, caps, sizeof(*caps) * entries);
+
+ return 0;
+}
+
u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
u32 size)
{
unsigned int words_count = size >> 2;
u32 *word = buf, *data, codecs = 0, domain = 0;
+ int ret;
+
+ ret = hfi_platform_parser(core, inst);
+ if (!ret)
+ return HFI_ERR_NONE;
if (size % 4)
return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
@@ -276,6 +314,9 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
words_count--;
}
+ if (!core->max_sessions_supported)
+ core->max_sessions_supported = MAX_SESSIONS;
+
parser_fini(inst, codecs, domain);
return HFI_ERR_NONE;
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h
index 264e6dd2415f..5751d0140700 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.h
+++ b/drivers/media/platform/qcom/venus/hfi_parser.h
@@ -16,7 +16,7 @@ static inline u32 get_cap(struct venus_inst *inst, u32 type, u32 which)
{
struct venus_core *core = inst->core;
struct hfi_capability *cap = NULL;
- struct venus_caps *caps;
+ struct hfi_plat_caps *caps;
unsigned int i;
caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
@@ -112,4 +112,9 @@ static inline u32 core_num_max(struct venus_inst *inst)
return cap_max(inst, HFI_CAPABILITY_MAX_VIDEOCORES);
}
+static inline u32 mbs_per_frame_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_MBS_PER_FRAME);
+}
+
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs.h b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h
new file mode 100644
index 000000000000..52a51a3b964a
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HFI_PLATFORM_BUFFERS_H__
+#define __HFI_PLATFORM_BUFFERS_H__
+
+#include <linux/types.h>
+#include "hfi_helper.h"
+
+struct hfi_plat_buffers_params {
+ u32 width;
+ u32 height;
+ u32 codec;
+ u32 hfi_color_fmt;
+ enum hfi_version version;
+ u32 num_vpp_pipes;
+ union {
+ struct {
+ u32 max_mbs_per_frame;
+ u32 buffer_size_limit;
+ bool is_secondary_output;
+ bool is_interlaced;
+ } dec;
+ struct {
+ u32 work_mode;
+ u32 rc_type;
+ u32 num_b_frames;
+ bool is_tenbit;
+ } enc;
+ };
+};
+
+int hfi_plat_bufreq_v6(struct hfi_plat_buffers_params *params, u32 session_type,
+ u32 buftype, struct hfi_buffer_requirements *bufreq);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
new file mode 100644
index 000000000000..d43d1a53e72d
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -0,0 +1,1317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/videodev2.h>
+
+#include "hfi.h"
+#include "hfi_plat_bufs.h"
+#include "helpers.h"
+
+#define MIN_INPUT_BUFFERS 4
+#define MIN_ENC_OUTPUT_BUFFERS 4
+
+#define NV12_UBWC_Y_TILE_WIDTH 32
+#define NV12_UBWC_Y_TILE_HEIGHT 8
+#define NV12_UBWC_UV_TILE_WIDTH 16
+#define NV12_UBWC_UV_TILE_HEIGHT 8
+#define TP10_UBWC_Y_TILE_WIDTH 48
+#define TP10_UBWC_Y_TILE_HEIGHT 4
+#define METADATA_STRIDE_MULTIPLE 64
+#define METADATA_HEIGHT_MULTIPLE 16
+#define HFI_DMA_ALIGNMENT 256
+
+#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
+#define MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE 64
+#define MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE 64
+#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
+#define MAX_FE_NBR_DATA_CB_LINE_BUFFER_SIZE 320
+#define MAX_FE_NBR_DATA_CR_LINE_BUFFER_SIZE 320
+
+#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE (128 / 8)
+#define MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE (128 / 8)
+#define MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE (128 / 8)
+
+#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE (64 * 2 * 3)
+#define MAX_PE_NBR_DATA_LCU32_LINE_BUFFER_SIZE (32 * 2 * 3)
+#define MAX_PE_NBR_DATA_LCU16_LINE_BUFFER_SIZE (16 * 2 * 3)
+
+#define MAX_TILE_COLUMNS 32 /* 8K/256 */
+
+#define NUM_HW_PIC_BUF 10
+#define BIN_BUFFER_THRESHOLD (1280 * 736)
+#define H264D_MAX_SLICE 1800
+/* sizeof(h264d_buftab_t) aligned to 256 */
+#define SIZE_H264D_BUFTAB_T 256
+/* sizeof(h264d_hw_pic_t) aligned to 32 */
+#define SIZE_H264D_HW_PIC_T BIT(11)
+#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4)
+#define SIZE_H264D_VPP_CMD_PER_BUF 512
+
+/* Line Buffer definitions, One for Luma and 1/2 for each Chroma */
+#define SIZE_H264D_LB_FE_TOP_DATA(width, height) \
+ (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN((width), 16) * 3)
+
+#define SIZE_H264D_LB_FE_TOP_CTRL(width, height) \
+ (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
+
+#define SIZE_H264D_LB_FE_LEFT_CTRL(width, height) \
+ (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4))
+
+#define SIZE_H264D_LB_SE_TOP_CTRL(width, height) \
+ (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
+
+#define SIZE_H264D_LB_SE_LEFT_CTRL(width, height) \
+ (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4))
+
+#define SIZE_H264D_LB_PE_TOP_DATA(width, height) \
+ (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
+
+#define SIZE_H264D_LB_VSP_TOP(width, height) (((((width) + 15) >> 4) << 7))
+
+#define SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) \
+ (ALIGN((height), 16) * 32)
+
+#define SIZE_H264D_QP(width, height) \
+ ((((width) + 63) >> 6) * (((height) + 63) >> 6) * 128)
+
+#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf))
+
+#define H264_CABAC_HDR_RATIO_HD_TOT 1
+#define H264_CABAC_RES_RATIO_HD_TOT 3
+
+/*
+ * Some content need more bin buffer, but limit buffer
+ * size for high resolution
+ */
+#define NUM_SLIST_BUF_H264 (256 + 32)
+#define SIZE_SLIST_BUF_H264 512
+#define LCU_MAX_SIZE_PELS 64
+#define LCU_MIN_SIZE_PELS 16
+
+#define H265D_MAX_SLICE 600
+#define SIZE_H265D_HW_PIC_T SIZE_H264D_HW_PIC_T
+#define SIZE_H265D_BSE_CMD_PER_BUF (16 * sizeof(u32))
+#define SIZE_H265D_VPP_CMD_PER_BUF 256
+
+#define SIZE_H265D_LB_FE_TOP_DATA(width, height) \
+ (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * (ALIGN(width, 64) + 8) * 2)
+
+#define SIZE_H265D_LB_FE_TOP_CTRL(width, height) \
+ (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \
+ (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS))
+
+#define SIZE_H265D_LB_FE_LEFT_CTRL(width, height) \
+ (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \
+ (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS))
+
+#define SIZE_H265D_LB_SE_TOP_CTRL(width, height) \
+ ((LCU_MAX_SIZE_PELS / 8 * (128 / 8)) * (((width) + 15) >> 4))
+
+static inline u32 size_h265d_lb_se_left_ctrl(u32 width, u32 height)
+{
+ u32 x, y, z;
+
+ x = ((height + 16 - 1) / 8) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
+ y = ((height + 32 - 1) / 8) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
+ z = ((height + 64 - 1) / 8) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
+
+ return max3(x, y, z);
+}
+
+#define SIZE_H265D_LB_PE_TOP_DATA(width, height) \
+ (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * \
+ (ALIGN(width, LCU_MIN_SIZE_PELS) / LCU_MIN_SIZE_PELS))
+
+#define SIZE_H265D_LB_VSP_TOP(width, height) ((((width) + 63) >> 6) * 128)
+
+#define SIZE_H265D_LB_VSP_LEFT(width, height) ((((height) + 63) >> 6) * 128)
+
+#define SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height) \
+ SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height)
+
+#define SIZE_H265D_QP(width, height) SIZE_H264D_QP(width, height)
+
+#define H265_CABAC_HDR_RATIO_HD_TOT 2
+#define H265_CABAC_RES_RATIO_HD_TOT 2
+
+/*
+ * Some content need more bin buffer, but limit buffer size
+ * for high resolution
+ */
+#define SIZE_SLIST_BUF_H265 BIT(10)
+#define NUM_SLIST_BUF_H265 (80 + 20)
+#define H265_NUM_TILE_COL 32
+#define H265_NUM_TILE_ROW 128
+#define H265_NUM_TILE (H265_NUM_TILE_ROW * H265_NUM_TILE_COL + 1)
+
+static inline u32 size_vpxd_lb_fe_left_ctrl(u32 width, u32 height)
+{
+ u32 x, y, z;
+
+ x = ((height + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
+ y = ((height + 31) >> 5) * MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
+ z = ((height + 63) >> 6) * MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
+
+ return max3(x, y, z);
+}
+
+#define SIZE_VPXD_LB_FE_TOP_CTRL(width, height) \
+ (((ALIGN(width, 64) + 8) * 10 * 2)) /* small line */
+#define SIZE_VPXD_LB_SE_TOP_CTRL(width, height) \
+ ((((width) + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE)
+
+static inline u32 size_vpxd_lb_se_left_ctrl(u32 width, u32 height)
+{
+ u32 x, y, z;
+
+ x = ((height + 15) >> 4) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
+ y = ((height + 31) >> 5) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
+ z = ((height + 63) >> 6) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
+
+ return max3(x, y, z);
+}
+
+#define SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height) \
+ ALIGN((ALIGN(height, 16) / (4 / 2)) * 64, 32)
+#define SIZE_VP8D_LB_FE_TOP_DATA(width, height) \
+ ((ALIGN(width, 16) + 8) * 10 * 2)
+#define SIZE_VP9D_LB_FE_TOP_DATA(width, height) \
+ ((ALIGN(ALIGN(width, 16), 64) + 8) * 10 * 2)
+#define SIZE_VP8D_LB_PE_TOP_DATA(width, height) \
+ ((ALIGN(width, 16) >> 4) * 64)
+#define SIZE_VP9D_LB_PE_TOP_DATA(width, height) \
+ ((ALIGN(ALIGN(width, 16), 64) >> 6) * 176)
+#define SIZE_VP8D_LB_VSP_TOP(width, height) \
+ (((ALIGN(width, 16) >> 4) * 64 / 2) + 256)
+#define SIZE_VP9D_LB_VSP_TOP(width, height) \
+ (((ALIGN(ALIGN(width, 16), 64) >> 6) * 64 * 8) + 256)
+
+#define HFI_IRIS2_VP9D_COMV_SIZE \
+ ((((8192 + 63) >> 6) * ((4320 + 63) >> 6) * 8 * 8 * 2 * 8))
+
+#define VPX_DECODER_FRAME_CONCURENCY_LVL 2
+#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM 1
+#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN 2
+#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM 3
+#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN 2
+
+#define VP8_NUM_FRAME_INFO_BUF (5 + 1)
+#define VP9_NUM_FRAME_INFO_BUF (8 + 2 + 1 + 8)
+#define VP8_NUM_PROBABILITY_TABLE_BUF VP8_NUM_FRAME_INFO_BUF
+#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4)
+#define VP8_PROB_TABLE_SIZE 3840
+#define VP9_PROB_TABLE_SIZE 3840
+
+#define VP9_UDC_HEADER_BUF_SIZE (3 * 128)
+#define MAX_SUPERFRAME_HEADER_LEN 34
+#define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, 32)
+
+#define QMATRIX_SIZE (sizeof(u32) * 128 + 256)
+#define MP2D_QPDUMP_SIZE 115200
+#define HFI_IRIS2_ENC_PERSIST_SIZE 102400
+#define HFI_MAX_COL_FRAME 6
+#define HFI_VENUS_VENC_TRE_WB_BUFF_SIZE (65 << 4) /* in Bytes */
+#define HFI_VENUS_VENC_DB_LINE_BUFF_PER_MB 512
+#define HFI_VENUS_VPPSG_MAX_REGISTERS 2048
+#define HFI_VENUS_WIDTH_ALIGNMENT 128
+#define HFI_VENUS_WIDTH_TEN_BIT_ALIGNMENT 192
+#define HFI_VENUS_HEIGHT_ALIGNMENT 32
+
+#define SYSTEM_LAL_TILE10 192
+#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
+#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define MB_SIZE_IN_PIXEL (16 * 16)
+#define HDR10PLUS_PAYLOAD_SIZE 1024
+#define HDR10_HIST_EXTRADATA_SIZE 4096
+
+static u32 size_vpss_lb(u32 width, u32 height, u32 num_vpp_pipes)
+{
+ u32 vpss_4tap_top_buffer_size, vpss_div2_top_buffer_size;
+ u32 vpss_4tap_left_buffer_size, vpss_div2_left_buffer_size;
+ u32 opb_wr_top_line_luma_buf_size, opb_wr_top_line_chroma_buf_size;
+ u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size;
+ u32 macrotiling_size;
+ u32 size = 0;
+
+ vpss_4tap_top_buffer_size = 0;
+ vpss_div2_top_buffer_size = 0;
+ vpss_4tap_left_buffer_size = 0;
+ vpss_div2_left_buffer_size = 0;
+
+ macrotiling_size = 32;
+ opb_wr_top_line_luma_buf_size =
+ ALIGN(width, macrotiling_size) / macrotiling_size * 256;
+ opb_wr_top_line_luma_buf_size =
+ ALIGN(opb_wr_top_line_luma_buf_size, HFI_DMA_ALIGNMENT) +
+ (MAX_TILE_COLUMNS - 1) * 256;
+ opb_wr_top_line_luma_buf_size =
+ max(opb_wr_top_line_luma_buf_size, (32 * ALIGN(height, 16)));
+ opb_wr_top_line_chroma_buf_size = opb_wr_top_line_luma_buf_size;
+ opb_lb_wr_llb_y_buffer_size = ALIGN((ALIGN(height, 16) / 2) * 64, 32);
+ opb_lb_wr_llb_uv_buffer_size = opb_lb_wr_llb_y_buffer_size;
+ size = num_vpp_pipes *
+ 2 * (vpss_4tap_top_buffer_size + vpss_div2_top_buffer_size) +
+ 2 * (vpss_4tap_left_buffer_size + vpss_div2_left_buffer_size) +
+ opb_wr_top_line_luma_buf_size +
+ opb_wr_top_line_chroma_buf_size +
+ opb_lb_wr_llb_uv_buffer_size +
+ opb_lb_wr_llb_y_buffer_size;
+
+ return size;
+}
+
+static u32 size_h264d_hw_bin_buffer(u32 width, u32 height)
+{
+ u32 size_yuv, size_bin_hdr, size_bin_res;
+ u32 size = 0;
+ u32 product;
+
+ product = width * height;
+ size_yuv = (product <= BIN_BUFFER_THRESHOLD) ?
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1);
+
+ size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT;
+ size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT;
+ size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT);
+ size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT);
+ size = size_bin_hdr + size_bin_res;
+
+ return size;
+}
+
+static u32 h264d_scratch_size(u32 width, u32 height, bool is_interlaced)
+{
+ u32 aligned_width = ALIGN(width, 16);
+ u32 aligned_height = ALIGN(height, 16);
+ u32 size = 0;
+
+ if (!is_interlaced)
+ size = size_h264d_hw_bin_buffer(aligned_width, aligned_height);
+
+ return size;
+}
+
+static u32 size_h265d_hw_bin_buffer(u32 width, u32 height)
+{
+ u32 size_yuv, size_bin_hdr, size_bin_res;
+ u32 size = 0;
+ u32 product;
+
+ product = width * height;
+ size_yuv = (product <= BIN_BUFFER_THRESHOLD) ?
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1);
+ size_bin_hdr = size_yuv * H265_CABAC_HDR_RATIO_HD_TOT;
+ size_bin_res = size_yuv * H265_CABAC_RES_RATIO_HD_TOT;
+ size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT);
+ size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT);
+ size = size_bin_hdr + size_bin_res;
+
+ return size;
+}
+
+static u32 h265d_scratch_size(u32 width, u32 height, bool is_interlaced)
+{
+ u32 aligned_width = ALIGN(width, 16);
+ u32 aligned_height = ALIGN(height, 16);
+ u32 size = 0;
+
+ if (!is_interlaced)
+ size = size_h265d_hw_bin_buffer(aligned_width, aligned_height);
+
+ return size;
+}
+
+static u32 vpxd_scratch_size(u32 width, u32 height, bool is_interlaced)
+{
+ u32 aligned_width = ALIGN(width, 16);
+ u32 aligned_height = ALIGN(height, 16);
+ u32 size_yuv = aligned_width * aligned_height * 3 / 2;
+ u32 size = 0;
+
+ if (!is_interlaced) {
+ u32 binbuffer1_size, binbufer2_size;
+
+ binbuffer1_size = max_t(u32, size_yuv,
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1));
+ binbuffer1_size *= VPX_DECODER_FRAME_CONCURENCY_LVL *
+ VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM /
+ VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN;
+ binbufer2_size = max_t(u32, size_yuv,
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1));
+ binbufer2_size *= VPX_DECODER_FRAME_CONCURENCY_LVL *
+ VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM /
+ VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN;
+ size = ALIGN(binbuffer1_size + binbufer2_size,
+ HFI_DMA_ALIGNMENT);
+ }
+
+ return size;
+}
+
+static u32 mpeg2d_scratch_size(u32 width, u32 height, bool is_interlaced)
+{
+ return 0;
+}
+
+static u32 calculate_enc_output_frame_size(u32 width, u32 height, u32 rc_type)
+{
+ u32 aligned_width, aligned_height;
+ u32 mbs_per_frame;
+ u32 frame_size;
+
+ /*
+ * Encoder output size calculation: 32 Align width/height
+ * For resolution < 720p : YUVsize * 4
+ * For resolution > 720p & <= 4K : YUVsize / 2
+ * For resolution > 4k : YUVsize / 4
+ * Initially frame_size = YUVsize * 2;
+ */
+ aligned_width = ALIGN(width, 32);
+ aligned_height = ALIGN(height, 32);
+ mbs_per_frame = (ALIGN(aligned_height, 16) *
+ ALIGN(aligned_width, 16)) / 256;
+ frame_size = width * height * 3;
+
+ if (mbs_per_frame < NUM_MBS_720P)
+ frame_size = frame_size << 1;
+ else if (mbs_per_frame <= NUM_MBS_4K)
+ frame_size = frame_size >> 2;
+ else
+ frame_size = frame_size >> 3;
+
+ if (rc_type == HFI_RATE_CONTROL_OFF || rc_type == HFI_RATE_CONTROL_CQ)
+ frame_size = frame_size << 1;
+
+ /*
+ * In case of opaque color format bitdepth will be known
+ * with first ETB, buffers allocated already with 8 bit
+ * won't be sufficient for 10 bit
+ * calculate size considering 10-bit by default
+ * For 10-bit cases size = size * 1.25
+ */
+ frame_size *= 5;
+ frame_size /= 4;
+
+ return ALIGN(frame_size, SZ_4K);
+}
+
+static u32 calculate_enc_scratch_size(u32 width, u32 height, u32 work_mode,
+ u32 lcu_size, u32 num_vpp_pipes,
+ u32 rc_type)
+{
+ u32 aligned_width, aligned_height, bitstream_size;
+ u32 total_bitbin_buffers, size_single_pipe, bitbin_size;
+ u32 sao_bin_buffer_size, padded_bin_size, size;
+
+ aligned_width = ALIGN(width, lcu_size);
+ aligned_height = ALIGN(height, lcu_size);
+ bitstream_size =
+ calculate_enc_output_frame_size(width, height, rc_type);
+
+ bitstream_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT);
+
+ if (work_mode == VIDC_WORK_MODE_2) {
+ total_bitbin_buffers = 3;
+ bitbin_size = bitstream_size * 17 / 10;
+ bitbin_size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT);
+ } else {
+ total_bitbin_buffers = 1;
+ bitstream_size = aligned_width * aligned_height * 3;
+ bitbin_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT);
+ }
+
+ if (num_vpp_pipes > 2)
+ size_single_pipe = bitbin_size / 2;
+ else
+ size_single_pipe = bitbin_size;
+
+ size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
+ sao_bin_buffer_size =
+ (64 * (((width + 32) * (height + 32)) >> 10)) + 384;
+ padded_bin_size = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
+ size_single_pipe = sao_bin_buffer_size + padded_bin_size;
+ size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
+ bitbin_size = size_single_pipe * num_vpp_pipes;
+ size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT) *
+ total_bitbin_buffers + 512;
+
+ return size;
+}
+
+static u32 h264e_scratch_size(u32 width, u32 height, u32 work_mode,
+ u32 num_vpp_pipes, u32 rc_type)
+{
+ return calculate_enc_scratch_size(width, height, work_mode, 16,
+ num_vpp_pipes, rc_type);
+}
+
+static u32 h265e_scratch_size(u32 width, u32 height, u32 work_mode,
+ u32 num_vpp_pipes, u32 rc_type)
+{
+ return calculate_enc_scratch_size(width, height, work_mode, 32,
+ num_vpp_pipes, rc_type);
+}
+
+static u32 vp8e_scratch_size(u32 width, u32 height, u32 work_mode,
+ u32 num_vpp_pipes, u32 rc_type)
+{
+ return calculate_enc_scratch_size(width, height, work_mode, 16,
+ num_vpp_pipes, rc_type);
+}
+
+static u32 hfi_iris2_h264d_comv_size(u32 width, u32 height,
+ u32 yuv_buf_min_count)
+{
+ u32 frame_width_in_mbs = ((width + 15) >> 4);
+ u32 frame_height_in_mbs = ((height + 15) >> 4);
+ u32 col_mv_aligned_width = (frame_width_in_mbs << 6);
+ u32 col_zero_aligned_width = (frame_width_in_mbs << 2);
+ u32 col_zero_size = 0, size_colloc = 0, comv_size = 0;
+
+ col_mv_aligned_width = ALIGN(col_mv_aligned_width, 16);
+ col_zero_aligned_width = ALIGN(col_zero_aligned_width, 16);
+ col_zero_size =
+ col_zero_aligned_width * ((frame_height_in_mbs + 1) >> 1);
+ col_zero_size = ALIGN(col_zero_size, 64);
+ col_zero_size <<= 1;
+ col_zero_size = ALIGN(col_zero_size, 512);
+ size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1);
+ size_colloc = ALIGN(size_colloc, 64);
+ size_colloc <<= 1;
+ size_colloc = ALIGN(size_colloc, 512);
+ size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2);
+ comv_size = size_colloc * yuv_buf_min_count;
+ comv_size += 512;
+
+ return comv_size;
+}
+
+static u32 size_h264d_bse_cmd_buf(u32 height)
+{
+ u32 aligned_height = ALIGN(height, 32);
+
+ return min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4),
+ H264D_MAX_SLICE) * SIZE_H264D_BSE_CMD_PER_BUF;
+}
+
+static u32 size_h264d_vpp_cmd_buf(u32 height)
+{
+ u32 aligned_height = ALIGN(height, 32);
+
+ return min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4),
+ H264D_MAX_SLICE) * SIZE_H264D_VPP_CMD_PER_BUF;
+}
+
+static u32 hfi_iris2_h264d_non_comv_size(u32 width, u32 height,
+ u32 num_vpp_pipes)
+{
+ u32 size_bse, size_vpp, size;
+
+ size_bse = size_h264d_bse_cmd_buf(height);
+ size_vpp = size_h264d_vpp_cmd_buf(height);
+ size =
+ ALIGN(size_bse, HFI_DMA_ALIGNMENT) +
+ ALIGN(size_vpp, HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_FE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_FE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_FE_LEFT_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_H264D_LB_SE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_SE_LEFT_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_H264D_LB_PE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height),
+ HFI_DMA_ALIGNMENT) * 2 +
+ ALIGN(SIZE_H264D_QP(width, height), HFI_DMA_ALIGNMENT);
+
+ return ALIGN(size, HFI_DMA_ALIGNMENT);
+}
+
+static u32 size_h265d_bse_cmd_buf(u32 width, u32 height)
+{
+ u32 size;
+
+ size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ NUM_HW_PIC_BUF;
+ size = min_t(u32, size, H265D_MAX_SLICE + 1);
+ size = 2 * size * SIZE_H265D_BSE_CMD_PER_BUF;
+
+ return ALIGN(size, HFI_DMA_ALIGNMENT);
+}
+
+static u32 size_h265d_vpp_cmd_buf(u32 width, u32 height)
+{
+ u32 size;
+
+ size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ NUM_HW_PIC_BUF;
+ size = min_t(u32, size, H265D_MAX_SLICE + 1);
+ size = ALIGN(size, 4);
+ size = 2 * size * SIZE_H265D_VPP_CMD_PER_BUF;
+
+ return ALIGN(size, HFI_DMA_ALIGNMENT);
+}
+
+static u32 hfi_iris2_h265d_comv_size(u32 width, u32 height,
+ u32 yuv_buf_count_min)
+{
+ u32 size;
+
+ size = ALIGN(((((width + 15) >> 4) * ((height + 15) >> 4)) << 8), 512);
+ size *= yuv_buf_count_min;
+ size += 512;
+
+ return size;
+}
+
+static u32 hfi_iris2_h265d_non_comv_size(u32 width, u32 height,
+ u32 num_vpp_pipes)
+{
+ u32 size_bse, size_vpp, size;
+
+ size_bse = size_h265d_bse_cmd_buf(width, height);
+ size_vpp = size_h265d_vpp_cmd_buf(width, height);
+ size =
+ ALIGN(size_bse, HFI_DMA_ALIGNMENT) +
+ ALIGN(size_vpp, HFI_DMA_ALIGNMENT) +
+ ALIGN(NUM_HW_PIC_BUF * 20 * 22 * 4, HFI_DMA_ALIGNMENT) +
+ ALIGN(2 * sizeof(u16) *
+ (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_HW_PIC(SIZE_H265D_HW_PIC_T), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_FE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_FE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_FE_LEFT_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h265d_lb_se_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_H265D_LB_SE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_PE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_H265D_LB_VSP_LEFT(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height),
+ HFI_DMA_ALIGNMENT)
+ * 4 +
+ ALIGN(SIZE_H265D_QP(width, height), HFI_DMA_ALIGNMENT);
+
+ return ALIGN(size, HFI_DMA_ALIGNMENT);
+}
+
+static u32 hfi_iris2_vp8d_comv_size(u32 width, u32 height,
+ u32 yuv_min_buf_count)
+{
+ return (((width + 15) >> 4) * ((height + 15) >> 4) * 8 * 2);
+}
+
+static u32 h264d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes)
+{
+ u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0;
+
+ co_mv_size = hfi_iris2_h264d_comv_size(width, height, min_buf_count);
+ nonco_mv_size = hfi_iris2_h264d_non_comv_size(width, height,
+ num_vpp_pipes);
+ if (split_mode_enabled)
+ vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
+
+ return co_mv_size + nonco_mv_size + vpss_lb_size;
+}
+
+static u32 h265d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes)
+{
+ u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0;
+
+ co_mv_size = hfi_iris2_h265d_comv_size(width, height, min_buf_count);
+ nonco_mv_size = hfi_iris2_h265d_non_comv_size(width, height,
+ num_vpp_pipes);
+ if (split_mode_enabled)
+ vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
+
+ return co_mv_size + nonco_mv_size + vpss_lb_size +
+ HDR10_HIST_EXTRADATA_SIZE;
+}
+
+static u32 vp8d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0, size;
+
+ size = hfi_iris2_vp8d_comv_size(width, height, 0);
+ size += ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT);
+ if (split_mode_enabled)
+ vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
+
+ size += vpss_lb_size;
+
+ return size;
+}
+
+static u32 vp9d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0;
+ u32 size;
+
+ size =
+ ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_VP9D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP9D_LB_PE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP9D_LB_FE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT);
+
+ if (split_mode_enabled)
+ vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
+
+ size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE;
+
+ return size;
+}
+
+static u32 mpeg2d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0;
+ u32 size;
+
+ size =
+ ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
+ HFI_DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height),
+ HFI_DMA_ALIGNMENT);
+
+ if (split_mode_enabled)
+ vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
+
+ size += vpss_lb_size;
+
+ return size;
+}
+
+static u32
+calculate_enc_scratch1_size(u32 width, u32 height, u32 lcu_size, u32 num_ref,
+ bool ten_bit, u32 num_vpp_pipes, bool is_h265)
+{
+ u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size;
+ u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE;
+ u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size;
+ u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size;
+ u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size;
+ u32 vpss_line_buf, leftline_buf_meta_recony, h265e_colrcbuf_size;
+ u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize;
+ u32 h265e_lcubitmap_bufsize, se_stats_bufsize;
+ u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize;
+ u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size;
+ u32 width_lcu_num, height_lcu_num, width_coded, height_coded;
+ u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao;
+ u32 size, bit_depth, num_lcu_mb;
+ u32 vpss_line_buffer_size_1;
+
+ width_lcu_num = (width + lcu_size - 1) / lcu_size;
+ height_lcu_num = (height + lcu_size - 1) / lcu_size;
+ frame_num_lcu = width_lcu_num * height_lcu_num;
+ width_coded = width_lcu_num * lcu_size;
+ height_coded = height_lcu_num * lcu_size;
+ num_lcu_mb = (height_coded / lcu_size) *
+ ((width_coded + lcu_size * 8) / lcu_size);
+ slice_info_bufsize = 256 + (frame_num_lcu << 4);
+ slice_info_bufsize = ALIGN(slice_info_bufsize, HFI_DMA_ALIGNMENT);
+ line_buf_ctrl_size = ALIGN(width_coded, HFI_DMA_ALIGNMENT);
+ line_buf_ctrl_size_buffid2 = ALIGN(width_coded, HFI_DMA_ALIGNMENT);
+
+ bit_depth = ten_bit ? 10 : 8;
+ line_buf_data_size =
+ (((((bit_depth * width_coded + 1024) +
+ (HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 1) +
+ (((((bit_depth * width_coded + 1024) >> 1) +
+ (HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 2));
+
+ leftline_buf_ctrl_size = is_h265 ?
+ ((height_coded + 32) / 32 * 4 * 16) :
+ ((height_coded + 15) / 16 * 5 * 16);
+
+ if (num_vpp_pipes > 1) {
+ leftline_buf_ctrl_size += 512;
+ leftline_buf_ctrl_size =
+ ALIGN(leftline_buf_ctrl_size, 512) * num_vpp_pipes;
+ }
+
+ leftline_buf_ctrl_size =
+ ALIGN(leftline_buf_ctrl_size, HFI_DMA_ALIGNMENT);
+ leftline_buf_recon_pix_size = (((ten_bit + 1) * 2 *
+ (height_coded) + HFI_DMA_ALIGNMENT) +
+ (HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) &
+ (~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1;
+
+ topline_buf_ctrl_size_FE = is_h265 ? (64 * (width_coded >> 5)) :
+ (HFI_DMA_ALIGNMENT + 16 * (width_coded >> 4));
+ topline_buf_ctrl_size_FE =
+ ALIGN(topline_buf_ctrl_size_FE, HFI_DMA_ALIGNMENT);
+ leftline_buf_ctrl_size_FE =
+ (((HFI_DMA_ALIGNMENT + 64 * (height_coded >> 4)) +
+ (HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) &
+ (~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1) *
+ num_vpp_pipes;
+ leftline_buf_meta_recony = (HFI_DMA_ALIGNMENT + 64 *
+ ((height_coded) / (8 * (ten_bit ? 4 : 8))));
+ leftline_buf_meta_recony =
+ ALIGN(leftline_buf_meta_recony, HFI_DMA_ALIGNMENT);
+ leftline_buf_meta_recony = leftline_buf_meta_recony * num_vpp_pipes;
+ linebuf_meta_recon_uv = (HFI_DMA_ALIGNMENT + 64 *
+ ((height_coded) / (4 * (ten_bit ? 4 : 8))));
+ linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, HFI_DMA_ALIGNMENT);
+ linebuf_meta_recon_uv = linebuf_meta_recon_uv * num_vpp_pipes;
+ line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded);
+ line_buf_recon_pix_size =
+ ALIGN(line_buf_recon_pix_size, HFI_DMA_ALIGNMENT);
+ slice_cmd_buffer_size = ALIGN(20480, HFI_DMA_ALIGNMENT);
+ sps_pps_slice_hdr = 2048 + 4096;
+ col_mv_buf_size = is_h265 ? (16 * ((frame_num_lcu << 2) + 32)) :
+ (3 * 16 * (width_lcu_num * height_lcu_num + 32));
+ col_mv_buf_size =
+ ALIGN(col_mv_buf_size, HFI_DMA_ALIGNMENT) * (num_ref + 1);
+ h265e_colrcbuf_size =
+ (((width_lcu_num + 7) >> 3) * 16 * 2 * height_lcu_num);
+ if (num_vpp_pipes > 1)
+ h265e_colrcbuf_size =
+ ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) *
+ num_vpp_pipes;
+
+ h265e_colrcbuf_size = ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) *
+ HFI_MAX_COL_FRAME;
+ h265e_framerc_bufsize = (is_h265) ? (256 + 16 *
+ (14 + (((height_coded >> 5) + 7) >> 3))) :
+ (256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3)));
+ h265e_framerc_bufsize *= 6; /* multiply by max numtilescol */
+ if (num_vpp_pipes > 1)
+ h265e_framerc_bufsize =
+ ALIGN(h265e_framerc_bufsize, HFI_DMA_ALIGNMENT) *
+ num_vpp_pipes;
+
+ h265e_framerc_bufsize = ALIGN(h265e_framerc_bufsize, 512) *
+ HFI_MAX_COL_FRAME;
+ h265e_lcubitcnt_bufsize = 256 + 4 * frame_num_lcu;
+ h265e_lcubitcnt_bufsize =
+ ALIGN(h265e_lcubitcnt_bufsize, HFI_DMA_ALIGNMENT);
+ h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3);
+ h265e_lcubitmap_bufsize =
+ ALIGN(h265e_lcubitmap_bufsize, HFI_DMA_ALIGNMENT);
+ line_buf_sde_size = 256 + 16 * (width_coded >> 4);
+ line_buf_sde_size = ALIGN(line_buf_sde_size, HFI_DMA_ALIGNMENT);
+ if ((width_coded * height_coded) > (4096 * 2160))
+ se_stats_bufsize = 0;
+ else if ((width_coded * height_coded) > (1920 * 1088))
+ se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256);
+ else
+ se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256);
+
+ se_stats_bufsize = ALIGN(se_stats_bufsize, HFI_DMA_ALIGNMENT) * 2;
+ bse_slice_cmd_buffer_size = (((8192 << 2) + 7) & (~7)) * 6;
+ bse_reg_buffer_size = (((512 << 3) + 7) & (~7)) * 4;
+ vpp_reg_buffer_size =
+ (((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) & (~31)) * 10;
+ lambda_lut_size = 256 * 11;
+ override_buffer_size = 16 * ((num_lcu_mb + 7) >> 3);
+ override_buffer_size =
+ ALIGN(override_buffer_size, HFI_DMA_ALIGNMENT) * 2;
+ ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
+ vpss_line_buffer_size_1 = (((8192 >> 2) << 5) * num_vpp_pipes) + 64;
+ vpss_line_buf =
+ (((((max(width_coded, height_coded) + 3) >> 2) << 5) + 256) *
+ 16) + vpss_line_buffer_size_1;
+ topline_bufsize_fe_1stg_sao = 16 * (width_coded >> 5);
+ topline_bufsize_fe_1stg_sao =
+ ALIGN(topline_bufsize_fe_1stg_sao, HFI_DMA_ALIGNMENT);
+
+ size =
+ line_buf_ctrl_size + line_buf_data_size +
+ line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size +
+ vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE +
+ leftline_buf_ctrl_size_FE + line_buf_recon_pix_size +
+ leftline_buf_recon_pix_size +
+ leftline_buf_meta_recony + linebuf_meta_recon_uv +
+ h265e_colrcbuf_size + h265e_framerc_bufsize +
+ h265e_lcubitcnt_bufsize + h265e_lcubitmap_bufsize +
+ line_buf_sde_size +
+ topline_bufsize_fe_1stg_sao + override_buffer_size +
+ bse_reg_buffer_size + vpp_reg_buffer_size + sps_pps_slice_hdr +
+ slice_cmd_buffer_size + bse_slice_cmd_buffer_size +
+ ir_buffer_size + slice_info_bufsize + lambda_lut_size +
+ se_stats_bufsize + 1024;
+
+ return size;
+}
+
+static u32 h264e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
+ u32 num_vpp_pipes)
+{
+ return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit,
+ num_vpp_pipes, false);
+}
+
+static u32 h265e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
+ u32 num_vpp_pipes)
+{
+ return calculate_enc_scratch1_size(width, height, 32, num_ref, ten_bit,
+ num_vpp_pipes, true);
+}
+
+static u32 vp8e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
+ u32 num_vpp_pipes)
+{
+ return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit,
+ 1, false);
+}
+
+static u32 ubwc_metadata_plane_stride(u32 width, u32 metadata_stride_multi,
+ u32 tile_width_pels)
+{
+ return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels),
+ metadata_stride_multi);
+}
+
+static u32 ubwc_metadata_plane_bufheight(u32 height, u32 metadata_height_multi,
+ u32 tile_height_pels)
+{
+ return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels),
+ metadata_height_multi);
+}
+
+static u32 ubwc_metadata_plane_buffer_size(u32 metadata_stride,
+ u32 metadata_buf_height)
+{
+ return ALIGN(metadata_stride * metadata_buf_height, SZ_4K);
+}
+
+static u32 enc_scratch2_size(u32 width, u32 height, u32 num_ref, bool ten_bit)
+{
+ u32 aligned_width, aligned_height, chroma_height, ref_buf_height;
+ u32 luma_size, chroma_size;
+ u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c;
+ u32 ref_luma_stride_bytes, ref_chroma_height_bytes;
+ u32 ref_buf_size, ref_stride;
+ u32 size;
+
+ if (!ten_bit) {
+ aligned_height = ALIGN(height, HFI_VENUS_HEIGHT_ALIGNMENT);
+ chroma_height = height >> 1;
+ chroma_height = ALIGN(chroma_height,
+ HFI_VENUS_HEIGHT_ALIGNMENT);
+ aligned_width = ALIGN(width, HFI_VENUS_WIDTH_ALIGNMENT);
+ metadata_stride =
+ ubwc_metadata_plane_stride(width, 64,
+ NV12_UBWC_Y_TILE_WIDTH);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(height, 16,
+ NV12_UBWC_Y_TILE_HEIGHT);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = (aligned_height + chroma_height) * aligned_width +
+ meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ } else {
+ ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1))
+ & (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1));
+ ref_luma_stride_bytes =
+ ((width + SYSTEM_LAL_TILE10 - 1) / SYSTEM_LAL_TILE10) *
+ SYSTEM_LAL_TILE10;
+ ref_stride = 4 * (ref_luma_stride_bytes / 3);
+ ref_stride = (ref_stride + (128 - 1)) & (~(128 - 1));
+ luma_size = ref_buf_height * ref_stride;
+ ref_chroma_height_bytes = (((height + 1) >> 1) +
+ (32 - 1)) & (~(32 - 1));
+ chroma_size = ref_stride * ref_chroma_height_bytes;
+ luma_size = (luma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ chroma_size = (chroma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ ref_buf_size = luma_size + chroma_size;
+ metadata_stride =
+ ubwc_metadata_plane_stride(width,
+ METADATA_STRIDE_MULTIPLE,
+ TP10_UBWC_Y_TILE_WIDTH);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(height,
+ METADATA_HEIGHT_MULTIPLE,
+ TP10_UBWC_Y_TILE_HEIGHT);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = ref_buf_size + meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ }
+
+ return size;
+}
+
+static u32 enc_persist_size(void)
+{
+ return HFI_IRIS2_ENC_PERSIST_SIZE;
+}
+
+static u32 h264d_persist1_size(void)
+{
+ return ALIGN((SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264),
+ HFI_DMA_ALIGNMENT);
+}
+
+static u32 h265d_persist1_size(void)
+{
+ return ALIGN((SIZE_SLIST_BUF_H265 * NUM_SLIST_BUF_H265 + H265_NUM_TILE
+ * sizeof(u32)), HFI_DMA_ALIGNMENT);
+}
+
+static u32 vp8d_persist1_size(void)
+{
+ return ALIGN(VP8_NUM_PROBABILITY_TABLE_BUF * VP8_PROB_TABLE_SIZE,
+ HFI_DMA_ALIGNMENT);
+}
+
+static u32 vp9d_persist1_size(void)
+{
+ return
+ ALIGN(VP9_NUM_PROBABILITY_TABLE_BUF * VP9_PROB_TABLE_SIZE,
+ HFI_DMA_ALIGNMENT) +
+ ALIGN(HFI_IRIS2_VP9D_COMV_SIZE, HFI_DMA_ALIGNMENT) +
+ ALIGN(MAX_SUPERFRAME_HEADER_LEN, HFI_DMA_ALIGNMENT) +
+ ALIGN(VP9_UDC_HEADER_BUF_SIZE, HFI_DMA_ALIGNMENT) +
+ ALIGN(VP9_NUM_FRAME_INFO_BUF * CCE_TILE_OFFSET_SIZE,
+ HFI_DMA_ALIGNMENT);
+}
+
+static u32 mpeg2d_persist1_size(void)
+{
+ return QMATRIX_SIZE + MP2D_QPDUMP_SIZE;
+}
+
+struct dec_bufsize_ops {
+ u32 (*scratch)(u32 width, u32 height, bool is_interlaced);
+ u32 (*scratch1)(u32 width, u32 height, u32 min_buf_count,
+ bool split_mode_enabled, u32 num_vpp_pipes);
+ u32 (*persist1)(void);
+};
+
+struct enc_bufsize_ops {
+ u32 (*scratch)(u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes,
+ u32 rc_type);
+ u32 (*scratch1)(u32 width, u32 height, u32 num_ref, bool ten_bit,
+ u32 num_vpp_pipes);
+ u32 (*scratch2)(u32 width, u32 height, u32 num_ref, bool ten_bit);
+ u32 (*persist)(void);
+};
+
+static struct dec_bufsize_ops dec_h264_ops = {
+ .scratch = h264d_scratch_size,
+ .scratch1 = h264d_scratch1_size,
+ .persist1 = h264d_persist1_size,
+};
+
+static struct dec_bufsize_ops dec_h265_ops = {
+ .scratch = h265d_scratch_size,
+ .scratch1 = h265d_scratch1_size,
+ .persist1 = h265d_persist1_size,
+};
+
+static struct dec_bufsize_ops dec_vp8_ops = {
+ .scratch = vpxd_scratch_size,
+ .scratch1 = vp8d_scratch1_size,
+ .persist1 = vp8d_persist1_size,
+};
+
+static struct dec_bufsize_ops dec_vp9_ops = {
+ .scratch = vpxd_scratch_size,
+ .scratch1 = vp9d_scratch1_size,
+ .persist1 = vp9d_persist1_size,
+};
+
+static struct dec_bufsize_ops dec_mpeg2_ops = {
+ .scratch = mpeg2d_scratch_size,
+ .scratch1 = mpeg2d_scratch1_size,
+ .persist1 = mpeg2d_persist1_size,
+};
+
+static struct enc_bufsize_ops enc_h264_ops = {
+ .scratch = h264e_scratch_size,
+ .scratch1 = h264e_scratch1_size,
+ .scratch2 = enc_scratch2_size,
+ .persist = enc_persist_size,
+};
+
+static struct enc_bufsize_ops enc_h265_ops = {
+ .scratch = h265e_scratch_size,
+ .scratch1 = h265e_scratch1_size,
+ .scratch2 = enc_scratch2_size,
+ .persist = enc_persist_size,
+};
+
+static struct enc_bufsize_ops enc_vp8_ops = {
+ .scratch = vp8e_scratch_size,
+ .scratch1 = vp8e_scratch1_size,
+ .scratch2 = enc_scratch2_size,
+ .persist = enc_persist_size,
+};
+
+static u32
+calculate_dec_input_frame_size(u32 width, u32 height, u32 codec,
+ u32 max_mbs_per_frame, u32 buffer_size_limit)
+{
+ u32 frame_size, num_mbs;
+ u32 div_factor = 1;
+ u32 base_res_mbs = NUM_MBS_4K;
+
+ /*
+ * Decoder input size calculation:
+ * If clip is 8k buffer size is calculated for 8k : 8k mbs/4
+ * For 8k cases we expect width/height to be set always.
+ * In all other cases size is calculated for 4k:
+ * 4k mbs for VP8/VP9 and 4k/2 for remaining codecs
+ */
+ num_mbs = (ALIGN(height, 16) * ALIGN(width, 16)) / 256;
+ if (num_mbs > NUM_MBS_4K) {
+ div_factor = 4;
+ base_res_mbs = max_mbs_per_frame;
+ } else {
+ base_res_mbs = NUM_MBS_4K;
+ if (codec == V4L2_PIX_FMT_VP9)
+ div_factor = 1;
+ else
+ div_factor = 2;
+ }
+
+ frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor;
+
+ /* multiply by 10/8 (1.25) to get size for 10 bit case */
+ if (codec == V4L2_PIX_FMT_VP9 || codec == V4L2_PIX_FMT_HEVC)
+ frame_size = frame_size + (frame_size >> 2);
+
+ if (buffer_size_limit && buffer_size_limit < frame_size)
+ frame_size = buffer_size_limit;
+
+ return ALIGN(frame_size, SZ_4K);
+}
+
+static int output_buffer_count(u32 session_type, u32 codec)
+{
+ u32 output_min_count;
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ switch (codec) {
+ case V4L2_PIX_FMT_MPEG2:
+ case V4L2_PIX_FMT_VP8:
+ output_min_count = 6;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ output_min_count = 9;
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_HEVC:
+ default:
+ output_min_count = 8;
+ break;
+ }
+ } else {
+ output_min_count = MIN_ENC_OUTPUT_BUFFERS;
+ }
+
+ return output_min_count;
+}
+
+static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
+ struct hfi_buffer_requirements *bufreq)
+{
+ enum hfi_version version = params->version;
+ u32 codec = params->codec;
+ u32 width = params->width, height = params->height, out_min_count;
+ struct dec_bufsize_ops *dec_ops;
+ bool is_secondary_output = params->dec.is_secondary_output;
+ bool is_interlaced = params->dec.is_interlaced;
+ u32 max_mbs_per_frame = params->dec.max_mbs_per_frame;
+ u32 buffer_size_limit = params->dec.buffer_size_limit;
+ u32 num_vpp_pipes = params->num_vpp_pipes;
+
+ switch (codec) {
+ case V4L2_PIX_FMT_H264:
+ dec_ops = &dec_h264_ops;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ dec_ops = &dec_h265_ops;
+ break;
+ case V4L2_PIX_FMT_VP8:
+ dec_ops = &dec_vp8_ops;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ dec_ops = &dec_vp9_ops;
+ break;
+ case V4L2_PIX_FMT_MPEG2:
+ dec_ops = &dec_mpeg2_ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
+
+ bufreq->type = buftype;
+ bufreq->region_size = 0;
+ bufreq->count_min = 1;
+ bufreq->count_actual = 1;
+ bufreq->hold_count = 1;
+ bufreq->contiguous = 1;
+ bufreq->alignment = 256;
+
+ if (buftype == HFI_BUFFER_INPUT) {
+ bufreq->count_min = MIN_INPUT_BUFFERS;
+ bufreq->size =
+ calculate_dec_input_frame_size(width, height, codec,
+ max_mbs_per_frame,
+ buffer_size_limit);
+ } else if (buftype == HFI_BUFFER_OUTPUT ||
+ buftype == HFI_BUFFER_OUTPUT2) {
+ bufreq->count_min = out_min_count;
+ bufreq->size =
+ venus_helper_get_framesz_raw(params->hfi_color_fmt,
+ width, height);
+ } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
+ bufreq->size = dec_ops->scratch(width, height, is_interlaced);
+ } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
+ bufreq->size = dec_ops->scratch1(width, height, out_min_count,
+ is_secondary_output,
+ num_vpp_pipes);
+ } else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) {
+ bufreq->size = dec_ops->persist1();
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
+ struct hfi_buffer_requirements *bufreq)
+{
+ enum hfi_version version = params->version;
+ struct enc_bufsize_ops *enc_ops;
+ u32 width = params->width;
+ u32 height = params->height;
+ bool is_tenbit = params->enc.is_tenbit;
+ u32 num_bframes = params->enc.num_b_frames;
+ u32 codec = params->codec;
+ u32 work_mode = params->enc.work_mode;
+ u32 rc_type = params->enc.rc_type;
+ u32 num_vpp_pipes = params->num_vpp_pipes;
+ u32 num_ref;
+
+ switch (codec) {
+ case V4L2_PIX_FMT_H264:
+ enc_ops = &enc_h264_ops;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ enc_ops = &enc_h265_ops;
+ break;
+ case V4L2_PIX_FMT_VP8:
+ enc_ops = &enc_vp8_ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ num_ref = num_bframes > 0 ? num_bframes + 1 : 1;
+
+ bufreq->type = buftype;
+ bufreq->region_size = 0;
+ bufreq->count_min = 1;
+ bufreq->count_actual = 1;
+ bufreq->hold_count = 1;
+ bufreq->contiguous = 1;
+ bufreq->alignment = 256;
+
+ if (buftype == HFI_BUFFER_INPUT) {
+ bufreq->count_min = MIN_INPUT_BUFFERS;
+ bufreq->size =
+ venus_helper_get_framesz_raw(params->hfi_color_fmt,
+ width, height);
+ } else if (buftype == HFI_BUFFER_OUTPUT ||
+ buftype == HFI_BUFFER_OUTPUT2) {
+ bufreq->count_min =
+ output_buffer_count(VIDC_SESSION_TYPE_ENC, codec);
+ bufreq->size = calculate_enc_output_frame_size(width, height,
+ rc_type);
+ } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
+ bufreq->size = enc_ops->scratch(width, height, work_mode,
+ num_vpp_pipes, rc_type);
+ } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
+ bufreq->size = enc_ops->scratch1(width, height, num_ref,
+ is_tenbit, num_vpp_pipes);
+ } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_2(version)) {
+ bufreq->size = enc_ops->scratch2(width, height, num_ref,
+ is_tenbit);
+ } else if (buftype == HFI_BUFFER_INTERNAL_PERSIST) {
+ bufreq->size = enc_ops->persist();
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hfi_plat_bufreq_v6(struct hfi_plat_buffers_params *params, u32 session_type,
+ u32 buftype, struct hfi_buffer_requirements *bufreq)
+{
+ if (session_type == VIDC_SESSION_TYPE_DEC)
+ return bufreq_dec(params, buftype, bufreq);
+ else
+ return bufreq_enc(params, buftype, bufreq);
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
new file mode 100644
index 000000000000..8f47804e973f
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include "hfi_platform.h"
+
+const struct hfi_platform *hfi_platform_get(enum hfi_version version)
+{
+ switch (version) {
+ case HFI_VERSION_4XX:
+ return &hfi_plat_v4;
+ case HFI_VERSION_6XX:
+ return &hfi_plat_v6;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+unsigned long
+hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session_type)
+{
+ const struct hfi_platform *plat;
+ unsigned long freq = 0;
+
+ plat = hfi_platform_get(version);
+ if (!plat)
+ return 0;
+
+ if (plat->codec_vpp_freq)
+ freq = plat->codec_vpp_freq(session_type, codec);
+
+ return freq;
+}
+
+unsigned long
+hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session_type)
+{
+ const struct hfi_platform *plat;
+ unsigned long freq = 0;
+
+ plat = hfi_platform_get(version);
+ if (!plat)
+ return 0;
+
+ if (plat->codec_vpp_freq)
+ freq = plat->codec_vsp_freq(session_type, codec);
+
+ return freq;
+}
+
+u8 hfi_platform_num_vpp_pipes(enum hfi_version version)
+{
+ const struct hfi_platform *plat;
+
+ plat = hfi_platform_get(version);
+ if (!plat)
+ return 0;
+
+ if (plat->num_vpp_pipes)
+ return plat->num_vpp_pipes();
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
new file mode 100644
index 000000000000..3819bb2b36bd
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HFI_PLATFORM_H__
+#define __HFI_PLATFORM_H__
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include "hfi.h"
+#include "hfi_plat_bufs.h"
+#include "hfi_helper.h"
+
+#define MAX_PLANES 4
+#define MAX_FMT_ENTRIES 32
+#define MAX_CAP_ENTRIES 32
+#define MAX_ALLOC_MODE_ENTRIES 16
+#define MAX_CODEC_NUM 32
+#define MAX_SESSIONS 16
+
+struct raw_formats {
+ u32 buftype;
+ u32 fmt;
+};
+
+struct hfi_plat_caps {
+ u32 codec;
+ u32 domain;
+ bool cap_bufs_mode_dynamic;
+ unsigned int num_caps;
+ struct hfi_capability caps[MAX_CAP_ENTRIES];
+ unsigned int num_pl;
+ struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
+ unsigned int num_fmts;
+ struct raw_formats fmts[MAX_FMT_ENTRIES];
+ bool valid; /* used only for Venus v1xx */
+};
+
+struct hfi_platform_codec_freq_data {
+ u32 pixfmt;
+ u32 session_type;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
+};
+
+struct hfi_platform {
+ unsigned long (*codec_vpp_freq)(u32 session_type, u32 codec);
+ unsigned long (*codec_vsp_freq)(u32 session_type, u32 codec);
+ void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
+ const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
+ u8 (*num_vpp_pipes)(void);
+ int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
+ u32 buftype, struct hfi_buffer_requirements *bufreq);
+};
+
+extern const struct hfi_platform hfi_plat_v4;
+extern const struct hfi_platform hfi_plat_v6;
+
+const struct hfi_platform *hfi_platform_get(enum hfi_version version);
+unsigned long hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec,
+ u32 session_type);
+unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec,
+ u32 session_type);
+u8 hfi_platform_num_vpp_pipes(enum hfi_version version);
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v4.c b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
new file mode 100644
index 000000000000..3848bb6d7408
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include "hfi_platform.h"
+
+static const struct hfi_plat_caps caps[] = {
+{
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
+ .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10},
+ .num_fmts = 7,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP8,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
+ .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
+ .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
+ .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
+ .num_pl = 4,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP9,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_VP9_PROFILE_P0, 200},
+ .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
+ .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10},
+ .num_fmts = 7,
+}, {
+ .codec = HFI_VIDEO_CODEC_MPEG2,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 244800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14},
+ .pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .num_caps = 21,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 63, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 63, 1},
+ .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 63, 1},
+ .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
+ .caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
+ .caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
+ .num_caps = 24,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP8,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 240, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1},
+ .caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
+ .caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
+ .caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
+ .num_caps = 23,
+ .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
+ .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
+ .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
+ .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
+ .num_pl = 4,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+} };
+
+static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+{
+ *entries = ARRAY_SIZE(caps);
+ return caps;
+}
+
+static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+{
+ *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
+ HFI_VIDEO_CODEC_VP8;
+ *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
+ HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
+ HFI_VIDEO_CODEC_MPEG2;
+ *count = 8;
+}
+
+static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+};
+
+static const struct hfi_platform_codec_freq_data *
+get_codec_freq_data(u32 session_type, u32 pixfmt)
+{
+ const struct hfi_platform_codec_freq_data *data = codec_freq_data;
+ unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
+ const struct hfi_platform_codec_freq_data *found = NULL;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
+ found = &data[i];
+ break;
+ }
+ }
+
+ return found;
+}
+
+static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->vpp_freq;
+
+ return 0;
+}
+
+static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->vsp_freq;
+
+ return 0;
+}
+
+const struct hfi_platform hfi_plat_v4 = {
+ .codec_vpp_freq = codec_vpp_freq,
+ .codec_vsp_freq = codec_vsp_freq,
+ .codecs = get_codecs,
+ .capabilities = get_capabilities,
+};
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
new file mode 100644
index 000000000000..2278be13cb90
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include "hfi_platform.h"
+
+static const struct hfi_plat_caps caps[] = {
+{
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 5760, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 5760, 1},
+ /* ((5760 * 2880) / 256) */
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 36, 64800, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 200000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 36, 1958400, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 9,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
+ .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10},
+ .num_fmts = 7,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP8,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
+ .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
+ .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
+ .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
+ .num_pl = 4,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP9,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_VP9_PROFILE_P0, 200},
+ .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
+ .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10},
+ .num_fmts = 7,
+}, {
+ .codec = HFI_VIDEO_CODEC_MPEG2,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 244800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
+ .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1},
+ .num_caps = 10,
+ .pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14},
+ .pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .num_caps = 21,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 63, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 63, 1},
+ .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 63, 1},
+ .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
+ .caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
+ .caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
+ .num_caps = 24,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP8,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .cap_bufs_mode_dynamic = true,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
+ .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
+ .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
+ .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
+ .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 240, 1},
+ .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
+ .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
+ .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1},
+ .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
+ .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
+ .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
+ .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
+ .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1},
+ .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1},
+ .caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
+ .caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
+ .caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
+ .caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
+ .caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
+ .caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
+ .num_caps = 23,
+ .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
+ .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
+ .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
+ .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
+ .num_pl = 4,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
+ .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
+ .num_fmts = 4,
+} };
+
+static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+{
+ *entries = ARRAY_SIZE(caps);
+ return caps;
+}
+
+static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+{
+ *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
+ HFI_VIDEO_CODEC_VP8;
+ *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
+ HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
+ HFI_VIDEO_CODEC_MPEG2;
+ *count = 8;
+}
+
+static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 25 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 25 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 60 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 25 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 25 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 25 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 60 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 60 },
+};
+
+static const struct hfi_platform_codec_freq_data *
+get_codec_freq_data(u32 session_type, u32 pixfmt)
+{
+ const struct hfi_platform_codec_freq_data *data = codec_freq_data;
+ unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
+ const struct hfi_platform_codec_freq_data *found = NULL;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
+ found = &data[i];
+ break;
+ }
+ }
+
+ return found;
+}
+
+static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->vpp_freq;
+
+ return 0;
+}
+
+static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->vsp_freq;
+
+ return 0;
+}
+
+static u8 num_vpp_pipes(void)
+{
+ return 4;
+}
+
+const struct hfi_platform hfi_plat_v6 = {
+ .codec_vpp_freq = codec_vpp_freq,
+ .codec_vsp_freq = codec_vsp_freq,
+ .codecs = get_codecs,
+ .capabilities = get_capabilities,
+ .num_vpp_pipes = num_vpp_pipes,
+ .bufreq = hfi_plat_bufreq_v6,
+};
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 4be4a75ddcb6..50e03f8fc278 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -372,7 +372,7 @@ static void venus_soft_int(struct venus_hfi_device *hdev)
}
static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
- void *pkt)
+ void *pkt, bool sync)
{
struct device *dev = hdev->core->dev;
struct hfi_pkt_hdr *cmd_packet;
@@ -394,18 +394,29 @@ static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
return ret;
}
+ if (sync) {
+ /*
+ * Inform video hardware to raise interrupt for synchronous
+ * commands
+ */
+ queue = &hdev->queues[IFACEQ_MSG_IDX];
+ queue->qhdr->rx_req = 1;
+ /* ensure rx_req is updated in memory */
+ wmb();
+ }
+
if (rx_req)
venus_soft_int(hdev);
return 0;
}
-static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
+static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
{
int ret;
mutex_lock(&hdev->lock);
- ret = venus_iface_cmdq_write_nolock(hdev, pkt);
+ ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
mutex_unlock(&hdev->lock);
return ret;
@@ -428,7 +439,7 @@ static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
if (ret)
return ret;
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -778,7 +789,7 @@ static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -795,7 +806,7 @@ static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
pkt_sys_coverage_config(pkt, mode);
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -816,7 +827,7 @@ static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
pkt_sys_idle_indicator(pkt, enable);
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -834,7 +845,7 @@ static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
pkt_sys_power_control(pkt, enable);
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -885,14 +896,14 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
return ret;
}
-static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type)
+static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_pkt pkt;
pkt_session_cmd(&pkt, pkt_type, inst);
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, sync);
}
static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
@@ -922,7 +933,7 @@ static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
pkt_sys_pc_prep(&pkt);
- ret = venus_iface_cmdq_write(hdev, &pkt);
+ ret = venus_iface_cmdq_write(hdev, &pkt, false);
if (ret)
return ret;
@@ -1064,13 +1075,13 @@ static int venus_core_init(struct venus_core *core)
venus_set_state(hdev, VENUS_STATE_INIT);
- ret = venus_iface_cmdq_write(hdev, &pkt);
+ ret = venus_iface_cmdq_write(hdev, &pkt, false);
if (ret)
return ret;
pkt_sys_image_version(&version_pkt);
- ret = venus_iface_cmdq_write(hdev, &version_pkt);
+ ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
if (ret)
dev_warn(dev, "failed to send image version pkt to fw\n");
@@ -1099,7 +1110,7 @@ static int venus_core_ping(struct venus_core *core, u32 cookie)
pkt_sys_ping(&pkt, cookie);
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
@@ -1112,7 +1123,7 @@ static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_session_init(struct venus_inst *inst, u32 session_type,
@@ -1130,7 +1141,7 @@ static int venus_session_init(struct venus_inst *inst, u32 session_type,
if (ret)
goto err;
- ret = venus_iface_cmdq_write(hdev, &pkt);
+ ret = venus_iface_cmdq_write(hdev, &pkt, true);
if (ret)
goto err;
@@ -1151,7 +1162,7 @@ static int venus_session_end(struct venus_inst *inst)
dev_warn(dev, "fw coverage msg ON failed\n");
}
- return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END);
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
}
static int venus_session_abort(struct venus_inst *inst)
@@ -1160,7 +1171,7 @@ static int venus_session_abort(struct venus_inst *inst)
venus_flush_debug_queue(hdev);
- return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT);
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
}
static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
@@ -1173,22 +1184,22 @@ static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, true);
}
static int venus_session_start(struct venus_inst *inst)
{
- return venus_session_cmd(inst, HFI_CMD_SESSION_START);
+ return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
}
static int venus_session_stop(struct venus_inst *inst)
{
- return venus_session_cmd(inst, HFI_CMD_SESSION_STOP);
+ return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
}
static int venus_session_continue(struct venus_inst *inst)
{
- return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE);
+ return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
}
static int venus_session_etb(struct venus_inst *inst,
@@ -1205,7 +1216,7 @@ static int venus_session_etb(struct venus_inst *inst,
if (ret)
return ret;
- ret = venus_iface_cmdq_write(hdev, &pkt);
+ ret = venus_iface_cmdq_write(hdev, &pkt, false);
} else if (session_type == VIDC_SESSION_TYPE_ENC) {
struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
@@ -1213,7 +1224,7 @@ static int venus_session_etb(struct venus_inst *inst,
if (ret)
return ret;
- ret = venus_iface_cmdq_write(hdev, &pkt);
+ ret = venus_iface_cmdq_write(hdev, &pkt, false);
} else {
ret = -EINVAL;
}
@@ -1232,7 +1243,7 @@ static int venus_session_ftb(struct venus_inst *inst,
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_session_set_buffers(struct venus_inst *inst,
@@ -1252,7 +1263,7 @@ static int venus_session_set_buffers(struct venus_inst *inst,
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, pkt);
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_unset_buffers(struct venus_inst *inst,
@@ -1272,17 +1283,17 @@ static int venus_session_unset_buffers(struct venus_inst *inst,
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, pkt);
+ return venus_iface_cmdq_write(hdev, pkt, true);
}
static int venus_session_load_res(struct venus_inst *inst)
{
- return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES);
+ return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
}
static int venus_session_release_res(struct venus_inst *inst)
{
- return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES);
+ return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
}
static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
@@ -1299,7 +1310,7 @@ static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
if (ret)
return ret;
- ret = venus_iface_cmdq_write(hdev, pkt);
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
@@ -1320,7 +1331,7 @@ static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, pkt);
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
@@ -1339,7 +1350,7 @@ static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, pkt);
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
@@ -1352,7 +1363,7 @@ static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
if (ret)
return ret;
- return venus_iface_cmdq_write(hdev, &pkt);
+ return venus_iface_cmdq_write(hdev, &pkt, true);
}
static int venus_resume(struct venus_core *core)
@@ -1591,9 +1602,6 @@ int venus_hfi_create(struct venus_core *core)
hdev->suspended = true;
core->priv = hdev;
core->ops = &venus_hfi_ops;
- core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY |
- ENC_DEINTERLACE_CAPABILITY |
- DEC_MULTI_STREAM_CAPABILITY;
ret = venus_interface_queues_init(hdev);
if (ret)
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index a3850261d697..43c4e3d9e281 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -18,6 +18,7 @@
#include "hfi_parser.h"
#include "hfi_venus_io.h"
#include "pm_helpers.h"
+#include "hfi_platform.h"
static bool legacy_binding;
@@ -510,7 +511,7 @@ min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load)
if (inst_pos->state != INST_START)
continue;
- vpp_freq = inst_pos->clk_data.codec_freq_data->vpp_freq;
+ vpp_freq = inst_pos->clk_data.vpp_freq;
coreid = inst_pos->clk_data.core_id;
mbs_per_sec = load_per_instance(inst_pos);
@@ -559,7 +560,7 @@ static int decide_core(struct venus_inst *inst)
return 0;
inst_load = load_per_instance(inst);
- inst_load *= inst->clk_data.codec_freq_data->vpp_freq;
+ inst_load *= inst->clk_data.vpp_freq;
max_freq = core->res->freq_tbl[0].freq;
min_loaded_core(inst, &min_coreid, &min_load);
@@ -773,13 +774,6 @@ static int vcodec_domains_get(struct device *dev)
core->pmdomains[i] = pd;
}
- core->pd_dl_venus = device_link_add(dev, core->pmdomains[0],
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS |
- DL_FLAG_RPM_ACTIVE);
- if (!core->pd_dl_venus)
- return -ENODEV;
-
skip_pmdomains:
if (!core->has_opp_table)
return 0;
@@ -806,14 +800,12 @@ skip_pmdomains:
opp_dl_add_err:
dev_pm_opp_detach_genpd(core->opp_table);
opp_attach_err:
- if (core->pd_dl_venus) {
- device_link_del(core->pd_dl_venus);
- for (i = 0; i < res->vcodec_pmdomains_num; i++) {
- if (IS_ERR_OR_NULL(core->pmdomains[i]))
- continue;
- dev_pm_domain_detach(core->pmdomains[i], true);
- }
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ if (IS_ERR_OR_NULL(core->pmdomains[i]))
+ continue;
+ dev_pm_domain_detach(core->pmdomains[i], true);
}
+
return ret;
}
@@ -826,9 +818,6 @@ static void vcodec_domains_put(struct device *dev)
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
- if (core->pd_dl_venus)
- device_link_del(core->pd_dl_venus);
-
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
if (IS_ERR_OR_NULL(core->pmdomains[i]))
continue;
@@ -915,16 +904,30 @@ static void core_put_v4(struct device *dev)
static int core_power_v4(struct device *dev, int on)
{
struct venus_core *core = dev_get_drvdata(dev);
+ struct device *pmctrl = core->pmdomains[0];
int ret = 0;
if (on == POWER_ON) {
+ if (pmctrl) {
+ ret = pm_runtime_get_sync(pmctrl);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pmctrl);
+ return ret;
+ }
+ }
+
ret = core_clks_enable(core);
+ if (ret < 0 && pmctrl)
+ pm_runtime_put_sync(pmctrl);
} else {
/* Drop the performance state vote */
if (core->opp_pmdomain)
dev_pm_opp_set_rate(dev, 0);
core_clks_disable(core);
+
+ if (pmctrl)
+ pm_runtime_put_sync(pmctrl);
}
return ret;
@@ -939,10 +942,13 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
mbs_per_sec = load_per_instance(inst);
- vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ if (inst->state != INST_START)
+ return 0;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
- vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+ vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
/* 10 / 7 is overhead factor */
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 8488411204c3..e4dc97f00fc3 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -13,7 +13,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-dma-contig.h>
#include "hfi_venus_io.h"
#include "hfi_parser.h"
@@ -519,8 +519,10 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
ret = hfi_session_process_buf(inst, &fdata);
- if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING)
+ if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING) {
inst->codec_state = VENUS_DEC_STATE_DRAIN;
+ inst->drain_active = true;
+ }
}
unlock:
@@ -637,6 +639,7 @@ static int vdec_output_conf(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct hfi_enable en = { .enable = 1 };
+ struct hfi_buffer_requirements bufreq;
u32 width = inst->out_width;
u32 height = inst->out_height;
u32 out_fmt, out2_fmt;
@@ -712,6 +715,23 @@ static int vdec_output_conf(struct venus_inst *inst)
}
if (IS_V3(core) || IS_V4(core)) {
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (bufreq.size > inst->output_buf_size)
+ return -EINVAL;
+
+ if (inst->dpb_fmt) {
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT2,
+ &bufreq);
+ if (ret)
+ return ret;
+
+ if (bufreq.size > inst->output2_buf_size)
+ return -EINVAL;
+ }
+
if (inst->output2_buf_size) {
ret = venus_helper_set_bufsize(inst,
inst->output2_buf_size,
@@ -740,8 +760,8 @@ static int vdec_session_init(struct venus_inst *inst)
{
int ret;
- ret = hfi_session_init(inst, inst->fmt_out->pixfmt);
- if (ret == -EINVAL)
+ ret = venus_helper_session_init(inst);
+ if (ret == -EALREADY)
return 0;
else if (ret)
return ret;
@@ -751,10 +771,6 @@ static int vdec_session_init(struct venus_inst *inst)
if (ret)
goto deinit;
- ret = venus_helper_init_codec_freq_data(inst);
- if (ret)
- goto deinit;
-
return 0;
deinit:
hfi_session_deinit(inst);
@@ -917,10 +933,6 @@ static int vdec_start_capture(struct venus_inst *inst)
return 0;
reconfigure:
- ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
- if (ret)
- return ret;
-
ret = vdec_output_conf(inst);
if (ret)
return ret;
@@ -948,15 +960,21 @@ reconfigure:
venus_pm_load_scale(inst);
+ inst->next_buf_last = false;
+
ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
inst->codec_state = VENUS_DEC_STATE_DECODING;
+ if (inst->drain_active)
+ inst->codec_state = VENUS_DEC_STATE_DRAIN;
+
inst->streamon_cap = 1;
inst->sequence_cap = 0;
inst->reconfig = false;
+ inst->drain_active = false;
return 0;
@@ -972,7 +990,10 @@ static int vdec_start_output(struct venus_inst *inst)
if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
ret = venus_helper_process_initial_out_bufs(inst);
- inst->codec_state = VENUS_DEC_STATE_DECODING;
+ if (inst->next_buf_last)
+ inst->codec_state = VENUS_DEC_STATE_DRC;
+ else
+ inst->codec_state = VENUS_DEC_STATE_DECODING;
goto done;
}
@@ -988,6 +1009,7 @@ static int vdec_start_output(struct venus_inst *inst)
venus_helper_init_instance(inst);
inst->sequence_out = 0;
inst->reconfig = false;
+ inst->next_buf_last = false;
ret = vdec_set_properties(inst);
if (ret)
@@ -1077,13 +1099,14 @@ static int vdec_stop_capture(struct venus_inst *inst)
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
fallthrough;
case VENUS_DEC_STATE_DRAIN:
- vdec_cancel_dst_buffers(inst);
inst->codec_state = VENUS_DEC_STATE_STOPPED;
+ inst->drain_active = false;
+ fallthrough;
+ case VENUS_DEC_STATE_SEEK:
+ vdec_cancel_dst_buffers(inst);
break;
case VENUS_DEC_STATE_DRC:
- WARN_ON(1);
- fallthrough;
- case VENUS_DEC_STATE_DRC_FLUSH_DONE:
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
venus_helper_free_dpb_bufs(inst);
break;
@@ -1102,6 +1125,7 @@ static int vdec_stop_output(struct venus_inst *inst)
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
case VENUS_DEC_STATE_STOPPED:
+ case VENUS_DEC_STATE_DRC:
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
inst->codec_state = VENUS_DEC_STATE_SEEK;
break;
@@ -1207,10 +1231,28 @@ static void vdec_buf_cleanup(struct vb2_buffer *vb)
static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS };
vdec_pm_get_put(inst);
+ mutex_lock(&inst->lock);
+
+ if (inst->next_buf_last && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
+ inst->codec_state == VENUS_DEC_STATE_DRC) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vbuf->sequence = inst->sequence_cap++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_set_plane_payload(vb, 0, 0);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ v4l2_event_queue_fh(&inst->fh, &eos);
+ inst->next_buf_last = false;
+ mutex_unlock(&inst->lock);
+ return;
+ }
+
venus_helper_vb2_buf_queue(vb);
+ mutex_unlock(&inst->lock);
}
static const struct vb2_ops vdec_vb2_ops = {
@@ -1253,20 +1295,15 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
- if (inst->last_buf == vb) {
- inst->last_buf = NULL;
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- vb2_set_plane_payload(vb, 0, 0);
- vb->timestamp = 0;
- }
-
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
v4l2_event_queue_fh(&inst->fh, &ev);
- if (inst->codec_state == VENUS_DEC_STATE_DRAIN)
+ if (inst->codec_state == VENUS_DEC_STATE_DRAIN) {
+ inst->drain_active = false;
inst->codec_state = VENUS_DEC_STATE_STOPPED;
+ }
}
if (!bytesused)
@@ -1334,22 +1371,22 @@ static void vdec_event_change(struct venus_inst *inst,
if (inst->bit_depth != ev_data->bit_depth)
inst->bit_depth = ev_data->bit_depth;
+ if (inst->pic_struct != ev_data->pic_struct)
+ inst->pic_struct = ev_data->pic_struct;
+
dev_dbg(dev, VDBGM "event %s sufficient resources (%ux%u)\n",
sufficient ? "" : "not", ev_data->width, ev_data->height);
- if (sufficient) {
- hfi_session_continue(inst);
- } else {
- switch (inst->codec_state) {
- case VENUS_DEC_STATE_INIT:
- inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
- break;
- case VENUS_DEC_STATE_DECODING:
- inst->codec_state = VENUS_DEC_STATE_DRC;
- break;
- default:
- break;
- }
+ switch (inst->codec_state) {
+ case VENUS_DEC_STATE_INIT:
+ inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
+ break;
+ case VENUS_DEC_STATE_DECODING:
+ case VENUS_DEC_STATE_DRAIN:
+ inst->codec_state = VENUS_DEC_STATE_DRC;
+ break;
+ default:
+ break;
}
/*
@@ -1358,19 +1395,17 @@ static void vdec_event_change(struct venus_inst *inst,
* itself doesn't mark the last decoder output buffer with HFI EOS flag.
*/
- if (!sufficient && inst->codec_state == VENUS_DEC_STATE_DRC) {
- struct vb2_v4l2_buffer *last;
+ if (inst->codec_state == VENUS_DEC_STATE_DRC) {
int ret;
- last = v4l2_m2m_last_dst_buf(inst->m2m_ctx);
- if (last)
- inst->last_buf = &last->vb2_buf;
+ inst->next_buf_last = true;
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false);
if (ret)
dev_dbg(dev, VDBGH "flush output error %d\n", ret);
}
+ inst->next_buf_last = true;
inst->reconfig = true;
v4l2_event_queue_fh(&inst->fh, &ev);
wake_up(&inst->reconf_wait);
@@ -1413,8 +1448,7 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
static void vdec_flush_done(struct venus_inst *inst)
{
- if (inst->codec_state == VENUS_DEC_STATE_DRC)
- inst->codec_state = VENUS_DEC_STATE_DRC_FLUSH_DONE;
+ dev_dbg(inst->core->dev_dec, VDBGH "flush done\n");
}
static const struct hfi_inst_ops vdec_hfi_ops = {
@@ -1461,7 +1495,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &vdec_vb2_ops;
- src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
@@ -1475,7 +1509,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &vdec_vb2_ops;
- dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
@@ -1508,6 +1542,7 @@ static int vdec_open(struct file *file)
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
inst->bit_depth = VIDC_BITDEPTH_8;
+ inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE;
init_waitqueue_head(&inst->reconf_wait);
venus_helper_init_instance(inst);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 1c61602c5de1..6976ed553647 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
@@ -190,8 +190,10 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->height = clamp(pixmp->height, frame_height_min(inst),
frame_height_max(inst));
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = ALIGN(pixmp->width, 128);
pixmp->height = ALIGN(pixmp->height, 32);
+ }
pixmp->width = ALIGN(pixmp->width, 2);
pixmp->height = ALIGN(pixmp->height, 2);
@@ -335,13 +337,13 @@ venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
switch (s->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
- s->r.width = inst->width;
- s->r.height = inst->height;
- break;
- case V4L2_SEL_TGT_CROP:
s->r.width = inst->out_width;
s->r.height = inst->out_height;
break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.width = inst->width;
+ s->r.height = inst->height;
+ break;
default:
return -EINVAL;
}
@@ -360,12 +362,19 @@ venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
+ if (s->r.width > inst->out_width ||
+ s->r.height > inst->out_height)
+ return -EINVAL;
+
+ s->r.width = ALIGN(s->r.width, 2);
+ s->r.height = ALIGN(s->r.height, 2);
+
switch (s->target) {
case V4L2_SEL_TGT_CROP:
- if (s->r.width != inst->out_width ||
- s->r.height != inst->out_height ||
- s->r.top != 0 || s->r.left != 0)
- return -EINVAL;
+ s->r.top = 0;
+ s->r.left = 0;
+ inst->width = s->r.width;
+ inst->height = s->r.height;
break;
default:
return -EINVAL;
@@ -536,6 +545,7 @@ static int venc_set_properties(struct venus_inst *inst)
struct hfi_idr_period idrp;
struct hfi_quantization quant;
struct hfi_quantization_range quant_range;
+ struct hfi_enable en;
u32 ptype, rate_control, bitrate;
u32 profile, level;
int ret;
@@ -588,16 +598,19 @@ static int venc_set_properties(struct venus_inst *inst)
return ret;
}
- /* IDR periodicity, n:
- * n = 0 - only the first I-frame is IDR frame
- * n = 1 - all I-frames will be IDR frames
- * n > 1 - every n-th I-frame will be IDR frame
- */
- ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
- idrp.idr_period = 0;
- ret = hfi_session_set_property(inst, ptype, &idrp);
- if (ret)
- return ret;
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
+ inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ /* IDR periodicity, n:
+ * n = 0 - only the first I-frame is IDR frame
+ * n = 1 - all I-frames will be IDR frames
+ * n > 1 - every n-th I-frame will be IDR frame
+ */
+ ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+ idrp.idr_period = 0;
+ ret = hfi_session_set_property(inst, ptype, &idrp);
+ if (ret)
+ return ret;
+ }
if (ctr->num_b_frames) {
u32 max_num_b_frames = NUM_B_FRAMES_MAX;
@@ -655,6 +668,19 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
+ inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
+ if (ctr->header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
+ en.enable = 0;
+ else
+ en.enable = 1;
+
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
if (!ctr->bitrate_peak)
bitrate *= 2;
else
@@ -669,17 +695,28 @@ static int venc_set_properties(struct venus_inst *inst)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP;
- quant.qp_i = ctr->h264_i_qp;
- quant.qp_p = ctr->h264_p_qp;
- quant.qp_b = ctr->h264_b_qp;
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ quant.qp_i = ctr->hevc_i_qp;
+ quant.qp_p = ctr->hevc_p_qp;
+ quant.qp_b = ctr->hevc_b_qp;
+ } else {
+ quant.qp_i = ctr->h264_i_qp;
+ quant.qp_p = ctr->h264_p_qp;
+ quant.qp_b = ctr->h264_b_qp;
+ }
quant.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &quant);
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
- quant_range.min_qp = ctr->h264_min_qp;
- quant_range.max_qp = ctr->h264_max_qp;
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ quant_range.min_qp = ctr->hevc_min_qp;
+ quant_range.max_qp = ctr->hevc_max_qp;
+ } else {
+ quant_range.min_qp = ctr->h264_min_qp;
+ quant_range.max_qp = ctr->h264_max_qp;
+ }
quant_range.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &quant_range);
if (ret)
@@ -724,10 +761,17 @@ static int venc_init_session(struct venus_inst *inst)
{
int ret;
- ret = hfi_session_init(inst, inst->fmt_cap->pixfmt);
- if (ret)
+ ret = venus_helper_session_init(inst);
+ if (ret == -EALREADY)
+ return 0;
+ else if (ret)
return ret;
+ ret = venus_helper_set_stride(inst, inst->out_width,
+ inst->out_height);
+ if (ret)
+ goto deinit;
+
ret = venus_helper_set_input_resolution(inst, inst->width,
inst->height);
if (ret)
@@ -743,10 +787,6 @@ static int venc_init_session(struct venus_inst *inst)
if (ret)
goto deinit;
- ret = venus_helper_init_codec_freq_data(inst);
- if (ret)
- goto deinit;
-
ret = venc_set_properties(inst);
if (ret)
goto deinit;
@@ -762,17 +802,13 @@ static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num)
struct hfi_buffer_requirements bufreq;
int ret;
- ret = venc_init_session(inst);
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
- ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
-
*num = bufreq.count_actual;
- hfi_session_deinit(inst);
-
- return ret;
+ return 0;
}
static int venc_queue_setup(struct vb2_queue *q,
@@ -781,7 +817,7 @@ static int venc_queue_setup(struct vb2_queue *q,
{
struct venus_inst *inst = vb2_get_drv_priv(q);
unsigned int num, min = 4;
- int ret = 0;
+ int ret;
if (*num_planes) {
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
@@ -803,6 +839,13 @@ static int venc_queue_setup(struct vb2_queue *q,
return 0;
}
+ mutex_lock(&inst->lock);
+ ret = venc_init_session(inst);
+ mutex_unlock(&inst->lock);
+
+ if (ret)
+ return ret;
+
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmt_out->num_planes;
@@ -816,8 +859,8 @@ static int venc_queue_setup(struct vb2_queue *q,
inst->num_input_bufs = *num_buffers;
sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
- inst->width,
- inst->height);
+ inst->out_width,
+ inst->out_height);
inst->input_buf_size = sizes[0];
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -838,6 +881,49 @@ static int venc_queue_setup(struct vb2_queue *q,
return ret;
}
+static int venc_buf_init(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ inst->buf_count++;
+
+ return venus_helper_vb2_buf_init(vb);
+}
+
+static void venc_release_session(struct venus_inst *inst)
+{
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ ret = hfi_session_deinit(inst);
+ if (ret || inst->session_error)
+ hfi_session_abort(inst);
+
+ mutex_unlock(&inst->lock);
+
+ venus_pm_load_scale(inst);
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ venus_pm_release_core(inst);
+}
+
+static void venc_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ mutex_lock(&inst->lock);
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (!list_empty(&inst->registeredbufs))
+ list_del_init(&buf->reg_list);
+ mutex_unlock(&inst->lock);
+
+ inst->buf_count--;
+ if (!inst->buf_count)
+ venc_release_session(inst);
+}
+
static int venc_verify_conf(struct venus_inst *inst)
{
enum hfi_version ver = inst->core->res->hfi_version;
@@ -888,38 +974,32 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
inst->sequence_cap = 0;
inst->sequence_out = 0;
- ret = venc_init_session(inst);
- if (ret)
- goto bufs_done;
-
ret = venus_pm_acquire_core(inst);
if (ret)
- goto deinit_sess;
+ goto error;
ret = venc_set_properties(inst);
if (ret)
- goto deinit_sess;
+ goto error;
ret = venc_verify_conf(inst);
if (ret)
- goto deinit_sess;
+ goto error;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
inst->num_output_bufs, 0);
if (ret)
- goto deinit_sess;
+ goto error;
ret = venus_helper_vb2_start_streaming(inst);
if (ret)
- goto deinit_sess;
+ goto error;
mutex_unlock(&inst->lock);
return 0;
-deinit_sess:
- hfi_session_deinit(inst);
-bufs_done:
+error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
@@ -929,13 +1009,23 @@ bufs_done:
return ret;
}
+static void venc_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ mutex_lock(&inst->lock);
+ venus_helper_vb2_buf_queue(vb);
+ mutex_unlock(&inst->lock);
+}
+
static const struct vb2_ops venc_vb2_ops = {
.queue_setup = venc_queue_setup,
- .buf_init = venus_helper_vb2_buf_init,
+ .buf_init = venc_buf_init,
+ .buf_cleanup = venc_buf_cleanup,
.buf_prepare = venus_helper_vb2_buf_prepare,
.start_streaming = venc_start_streaming,
.stop_streaming = venus_helper_vb2_stop_streaming,
- .buf_queue = venus_helper_vb2_buf_queue,
+ .buf_queue = venc_vb2_buf_queue,
};
static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
@@ -1001,7 +1091,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &venc_vb2_ops;
- src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
@@ -1017,7 +1107,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &venc_vb2_ops;
- dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index cf860e6446c0..a52b80055173 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -135,9 +135,60 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
ctr->h264_min_qp = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP:
+ ctr->h264_i_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP:
+ ctr->h264_p_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP:
+ ctr->h264_b_min_qp = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
ctr->h264_max_qp = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP:
+ ctr->h264_i_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP:
+ ctr->h264_p_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP:
+ ctr->h264_b_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ ctr->hevc_i_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+ ctr->hevc_p_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+ ctr->hevc_b_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ ctr->hevc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP:
+ ctr->hevc_i_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP:
+ ctr->hevc_p_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP:
+ ctr->hevc_b_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ ctr->hevc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP:
+ ctr->hevc_i_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP:
+ ctr->hevc_p_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP:
+ ctr->hevc_b_max_qp = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
ctr->multi_slice_mode = ctrl->val;
break;
@@ -158,6 +209,20 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
ctr->header_mode = ctrl->val;
+ mutex_lock(&inst->lock);
+ if (inst->streamon_out && inst->streamon_cap) {
+ if (ctrl->val == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
+ en.enable = 0;
+ else
+ en.enable = 1;
+ ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret) {
+ mutex_unlock(&inst->lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
break;
@@ -208,6 +273,9 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
ctr->frame_skip_mode = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID:
+ ctr->base_priority_id = ctrl->val;
+ break;
default:
return -EINVAL;
}
@@ -223,7 +291,7 @@ int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 33);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 51);
if (ret)
return ret;
@@ -289,7 +357,8 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEADER_MODE,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
- 1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
@@ -311,19 +380,70 @@ int venc_ctrl_init(struct venus_inst *inst)
BITRATE_STEP, BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1);
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51);
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP, 1, 51, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, 1, 63, 1, 26);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, 1, 63, 1, 28);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, 1, 63, 1, 30);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, 1, 63, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP, 1, 63, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP, 1, 63, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP, 1, 63, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, 1, 63, 1, 63);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP, 1, 63, 1, 63);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP, 1, 63, 1, 63);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP, 1, 63, 1, 63);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN,
@@ -374,6 +494,10 @@ int venc_ctrl_init(struct venus_inst *inst)
(1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT)),
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID, 0,
+ 6, 1, 0);
+
ret = inst->ctrl_handler.error;
if (ret)
goto err;
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 98bff765b02e..cb3025992817 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -642,7 +642,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
}
asd = v4l2_async_notifier_add_fwnode_subdev(&vin->notifier, fwnode,
- sizeof(*asd));
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
out:
fwnode_handle_put(fwnode);
- return 0;
+ return ret;
}
static int rvin_parallel_init(struct rvin_dev *vin)
@@ -842,7 +842,8 @@ static int rvin_mc_parse_of(struct rvin_dev *vin, unsigned int id)
}
asd = v4l2_async_notifier_add_fwnode_subdev(&vin->group->notifier,
- fwnode, sizeof(*asd));
+ fwnode,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 945d2eb87233..e06cd512aba2 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -910,7 +910,7 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
priv->notifier.ops = &rcar_csi2_notify_ops;
asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
- sizeof(*asd));
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 48280ddb15b9..f30dafbdf61c 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1301,6 +1301,11 @@ void rvin_stop_streaming(struct rvin_dev *vin)
spin_lock_irqsave(&vin->qlock, flags);
+ if (vin->state == STOPPED) {
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ return;
+ }
+
vin->state = STOPPING;
/* Wait until only scratch buffer is used, max 3 interrupts. */
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index e6ea2b7991b8..457a65bf6b66 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -966,12 +966,9 @@ void rvin_v4l2_unregister(struct rvin_dev *vin)
video_unregister_device(&vin->vdev);
}
-static void rvin_notify(struct v4l2_subdev *sd,
- unsigned int notification, void *arg)
+static void rvin_notify_video_device(struct rvin_dev *vin,
+ unsigned int notification, void *arg)
{
- struct rvin_dev *vin =
- container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
-
switch (notification) {
case V4L2_DEVICE_NOTIFY_EVENT:
v4l2_event_queue(&vin->vdev, arg);
@@ -981,6 +978,41 @@ static void rvin_notify(struct v4l2_subdev *sd,
}
}
+static void rvin_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct v4l2_subdev *remote;
+ struct rvin_group *group;
+ struct media_pad *pad;
+ struct rvin_dev *vin =
+ container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
+ unsigned int i;
+
+ /* If no media controller, no need to route the event. */
+ if (!vin->info->use_mc) {
+ rvin_notify_video_device(vin, notification, arg);
+ return;
+ }
+
+ group = vin->group;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ vin = group->vin[i];
+ if (!vin)
+ continue;
+
+ pad = media_entity_remote_pad(&vin->pad);
+ if (!pad)
+ continue;
+
+ remote = media_entity_to_v4l2_subdev(pad->entity);
+ if (remote != sd)
+ continue;
+
+ rvin_notify_video_device(vin, notification, arg);
+ }
+}
+
int rvin_v4l2_register(struct rvin_dev *vin)
{
struct video_device *vdev = &vin->vdev;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index f318cd4b8086..83bd9a412a56 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1231,7 +1231,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
}
asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- sizeof(*asd));
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index c9448de885b6..01c1fbb97bf6 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1439,8 +1439,6 @@ static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
* pix->height / vsub;
- memset(pix->plane_fmt[i].reserved, 0,
- sizeof(pix->plane_fmt[i].reserved));
}
if (fmt->num_planes == 3) {
@@ -1448,8 +1446,6 @@ static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
- memset(pix->plane_fmt[2].reserved, 0,
- sizeof(pix->plane_fmt[2].reserved));
}
}
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 9b99ff368698..a7c198c17deb 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -648,6 +648,7 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
if (get_word_be(&jpeg_buffer, &word))
return 0;
skip(&jpeg_buffer, (long)word - 2);
+ break;
case 0:
break;
default:
@@ -793,7 +794,6 @@ static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
pix->colorspace = fmt->colorspace;
pix->field = V4L2_FIELD_NONE;
pix->num_planes = fmt->num_planes;
- memset(pix->reserved, 0, sizeof(pix->reserved));
jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
@@ -808,8 +808,6 @@ static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
(JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
pix->plane_fmt[0].bytesperline = 0;
- memset(pix->plane_fmt[0].reserved, 0,
- sizeof(pix->plane_fmt[0].reserved));
} else {
unsigned int i, bpl = 0;
@@ -822,8 +820,6 @@ static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
for (i = 0; i < pix->num_planes; ++i) {
pix->plane_fmt[i].bytesperline = bpl;
pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
- memset(pix->plane_fmt[i].reserved, 0,
- sizeof(pix->plane_fmt[i].reserved));
}
}
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index 4a633ad0e8fa..cd137101d41e 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -152,8 +152,8 @@ static inline struct ceu_buffer *vb2_to_ceu(struct vb2_v4l2_buffer *vbuf)
* ceu_subdev - Wraps v4l2 sub-device and provides async subdevice.
*/
struct ceu_subdev {
- struct v4l2_subdev *v4l2_sd;
struct v4l2_async_subdev asd;
+ struct v4l2_subdev *v4l2_sd;
/* per-subdevice mbus configuration options */
unsigned int mbus_flags;
@@ -174,7 +174,7 @@ struct ceu_device {
struct v4l2_device v4l2_dev;
/* subdevices descriptors */
- struct ceu_subdev *subdevs;
+ struct ceu_subdev **subdevs;
/* the subdevice currently in use */
struct ceu_subdev *sd;
unsigned int sd_index;
@@ -1195,7 +1195,7 @@ static int ceu_enum_input(struct file *file, void *priv,
if (inp->index >= ceudev->num_sd)
return -EINVAL;
- ceusd = &ceudev->subdevs[inp->index];
+ ceusd = ceudev->subdevs[inp->index];
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
@@ -1230,7 +1230,7 @@ static int ceu_s_input(struct file *file, void *priv, unsigned int i)
return 0;
ceu_sd_old = ceudev->sd;
- ceudev->sd = &ceudev->subdevs[i];
+ ceudev->sd = ceudev->subdevs[i];
/*
* Make sure we can generate output image formats and apply
@@ -1423,7 +1423,7 @@ static int ceu_notify_complete(struct v4l2_async_notifier *notifier)
* ceu formats.
*/
if (!ceudev->sd) {
- ceudev->sd = &ceudev->subdevs[0];
+ ceudev->sd = ceudev->subdevs[0];
ceudev->sd_index = 0;
}
@@ -1467,8 +1467,8 @@ static const struct v4l2_async_notifier_operations ceu_notify_ops = {
/*
* ceu_init_async_subdevs() - Initialize CEU subdevices and async_subdevs in
- * ceu device. Both DT and platform data parsing use
- * this routine.
+ * ceu device. Both DT and platform data parsing use
+ * this routine.
*
* Returns 0 for success, -ENOMEM for failure.
*/
@@ -1510,21 +1510,16 @@ static int ceu_parse_platform_data(struct ceu_device *ceudev,
/* Setup the ceu subdevice and the async subdevice. */
async_sd = &pdata->subdevs[i];
- ceu_sd = &ceudev->subdevs[i];
-
- INIT_LIST_HEAD(&ceu_sd->asd.list);
-
- ceu_sd->mbus_flags = async_sd->flags;
- ceu_sd->asd.match_type = V4L2_ASYNC_MATCH_I2C;
- ceu_sd->asd.match.i2c.adapter_id = async_sd->i2c_adapter_id;
- ceu_sd->asd.match.i2c.address = async_sd->i2c_address;
-
- ret = v4l2_async_notifier_add_subdev(&ceudev->notifier,
- &ceu_sd->asd);
- if (ret) {
+ ceu_sd = v4l2_async_notifier_add_i2c_subdev(&ceudev->notifier,
+ async_sd->i2c_adapter_id,
+ async_sd->i2c_address,
+ struct ceu_subdev);
+ if (IS_ERR(ceu_sd)) {
v4l2_async_notifier_cleanup(&ceudev->notifier);
- return ret;
+ return PTR_ERR(ceu_sd);
}
+ ceu_sd->mbus_flags = async_sd->flags;
+ ceudev->subdevs[i] = ceu_sd;
}
return pdata->num_subdevs;
@@ -1536,7 +1531,7 @@ static int ceu_parse_platform_data(struct ceu_device *ceudev,
static int ceu_parse_dt(struct ceu_device *ceudev)
{
struct device_node *of = ceudev->dev->of_node;
- struct device_node *ep, *remote;
+ struct device_node *ep;
struct ceu_subdev *ceu_sd;
unsigned int i;
int num_ep;
@@ -1578,20 +1573,15 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
}
/* Setup the ceu subdevice and the async subdevice. */
- ceu_sd = &ceudev->subdevs[i];
- INIT_LIST_HEAD(&ceu_sd->asd.list);
-
- remote = of_graph_get_remote_port_parent(ep);
- ceu_sd->mbus_flags = fw_ep.bus.parallel.flags;
- ceu_sd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- ceu_sd->asd.match.fwnode = of_fwnode_handle(remote);
-
- ret = v4l2_async_notifier_add_subdev(&ceudev->notifier,
- &ceu_sd->asd);
- if (ret) {
- of_node_put(remote);
+ ceu_sd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &ceudev->notifier, of_fwnode_handle(ep),
+ struct ceu_subdev);
+ if (IS_ERR(ceu_sd)) {
+ ret = PTR_ERR(ceu_sd);
goto error_cleanup;
}
+ ceu_sd->mbus_flags = fw_ep.bus.parallel.flags;
+ ceudev->subdevs[i] = ceu_sd;
of_node_put(ep);
}
@@ -1679,7 +1669,7 @@ static int ceu_probe(struct platform_device *pdev)
v4l2_async_notifier_init(&ceudev->notifier);
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- ceu_data = of_match_device(ceu_of_match, dev)->data;
+ ceu_data = of_device_get_match_data(dev);
num_subdevs = ceu_parse_dt(ceudev);
} else if (dev->platform_data) {
/* Assume SH4 if booting with platform data. */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 68da1eed753d..7474150b94ed 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -104,6 +104,7 @@
struct rkisp1_match_data {
const char * const *clks;
unsigned int size;
+ enum rkisp1_cif_isp_version isp_ver;
};
/* ----------------------------------------------------------------------------
@@ -251,7 +252,7 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- struct rkisp1_sensor_async *rk_asd = NULL;
+ struct rkisp1_sensor_async *rk_asd;
struct fwnode_handle *ep;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(rkisp1->dev),
@@ -264,9 +265,10 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
if (ret)
goto err_parse;
- rk_asd = kzalloc(sizeof(*rk_asd), GFP_KERNEL);
- if (!rk_asd) {
- ret = -ENOMEM;
+ rk_asd = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
+ struct rkisp1_sensor_async);
+ if (IS_ERR(rk_asd)) {
+ ret = PTR_ERR(rk_asd);
goto err_parse;
}
@@ -274,11 +276,6 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
- &rk_asd->asd);
- if (ret)
- goto err_parse;
-
dev_dbg(rkisp1->dev, "registered ep id %d with %d lanes\n",
vep.base.id, rk_asd->lanes);
@@ -289,7 +286,6 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
continue;
err_parse:
fwnode_handle_put(ep);
- kfree(rk_asd);
v4l2_async_notifier_cleanup(ntf);
return ret;
}
@@ -411,15 +407,16 @@ static const char * const rk3399_isp_clks[] = {
"hclk",
};
-static const struct rkisp1_match_data rk3399_isp_clk_data = {
+static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
.size = ARRAY_SIZE(rk3399_isp_clks),
+ .isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,rk3399-cif-isp",
- .data = &rk3399_isp_clk_data,
+ .data = &rk3399_isp_match_data,
},
{},
};
@@ -457,15 +454,15 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
static int rkisp1_probe(struct platform_device *pdev)
{
- const struct rkisp1_match_data *clk_data;
+ const struct rkisp1_match_data *match_data;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
- clk_data = of_device_get_match_data(&pdev->dev);
- if (!clk_data)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
@@ -494,15 +491,16 @@ static int rkisp1_probe(struct platform_device *pdev)
rkisp1->irq = irq;
- for (i = 0; i < clk_data->size; i++)
- rkisp1->clks[i].id = clk_data->clks[i];
- ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+ for (i = 0; i < match_data->size; i++)
+ rkisp1->clks[i].id = match_data->clks[i];
+ ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = clk_data->size;
+ rkisp1->clk_size = match_data->size;
pm_runtime_enable(&pdev->dev);
+ rkisp1->media_dev.hw_revision = match_data->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 889982d8ca41..2e5b57e3aedc 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -600,6 +600,39 @@ static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
return -EINVAL;
}
+static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct rkisp1_isp_mbus_info *mbus_info;
+
+ if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS ||
+ fse->pad == RKISP1_ISP_PAD_SOURCE_STATS)
+ return -ENOTTY;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ mbus_info = rkisp1_isp_mbus_info_get(fse->code);
+ if (!mbus_info)
+ return -EINVAL;
+
+ if (!(mbus_info->direction & RKISP1_ISP_SD_SINK) &&
+ fse->pad == RKISP1_ISP_PAD_SINK_VIDEO)
+ return -EINVAL;
+
+ if (!(mbus_info->direction & RKISP1_ISP_SD_SRC) &&
+ fse->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
+ return -EINVAL;
+
+ fse->min_width = RKISP1_ISP_MIN_WIDTH;
+ fse->max_width = RKISP1_ISP_MAX_WIDTH;
+ fse->min_height = RKISP1_ISP_MIN_HEIGHT;
+ fse->max_height = RKISP1_ISP_MAX_HEIGHT;
+
+ return 0;
+}
+
static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg)
{
@@ -880,6 +913,7 @@ static int rkisp1_subdev_link_validate(struct media_link *link)
static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
.enum_mbus_code = rkisp1_isp_enum_mbus_code,
+ .enum_frame_size = rkisp1_isp_enum_frame_size,
.get_selection = rkisp1_isp_get_selection,
.set_selection = rkisp1_isp_set_selection,
.init_cfg = rkisp1_isp_init_config,
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 6af4d551ffb5..aa5f45749543 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -391,7 +391,7 @@ static void rkisp1_goc_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
- for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
}
@@ -589,7 +589,6 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
- RKISP1_CIF_ISP_HIST_WEIGHT_44,
};
const u8 *weight;
unsigned int i;
@@ -622,6 +621,8 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
weight[2],
weight[3]),
hist_weight_regs[i]);
+
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
}
static void
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 8a8d960a679c..fa33080f51db 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -365,6 +365,7 @@
#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index 3ddab8fa8f2d..c1d07a2e8839 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -203,7 +203,7 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
- for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
@@ -233,10 +233,11 @@ static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
- for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
- pbuf->params.hist.hist_bins[i] =
- (u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+ }
}
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index b22dc1d72527..4ac48441f22c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1355,7 +1355,7 @@ static int sh_vou_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver __refdata sh_vou = {
+static struct platform_driver sh_vou = {
.remove = sh_vou_remove,
.driver = {
.name = "sh-vou",
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 5ceb366648b3..a7a6ea666740 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -826,6 +826,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
dev_err(dev,
"reset gpio for tsin%d not valid (gpio=%d)\n",
tsin->tsin_id, tsin->rst_gpio);
+ ret = -EINVAL;
goto err_node_put;
}
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 43f279e2a6a3..f59811e27f51 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -447,6 +447,7 @@ int hva_hw_runtime_resume(struct device *dev)
if (clk_set_rate(hva->clk, CLK_RATE)) {
dev_err(dev, "%s failed to set clock frequency\n",
HVA_PREFIX);
+ clk_disable_unprepare(hva->clk);
return -EINVAL;
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index b745f1342c2e..bbcc2254fa2e 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -99,13 +99,6 @@ enum state {
#define OVERRUN_ERROR_THRESHOLD 3
-struct dcmi_graph_entity {
- struct v4l2_async_subdev asd;
-
- struct device_node *remote_node;
- struct v4l2_subdev *source;
-};
-
struct dcmi_format {
u32 fourcc;
u32 mbus_code;
@@ -139,7 +132,7 @@ struct stm32_dcmi {
struct v4l2_device v4l2_dev;
struct video_device *vdev;
struct v4l2_async_notifier notifier;
- struct dcmi_graph_entity entity;
+ struct v4l2_subdev *source;
struct v4l2_format fmt;
struct v4l2_rect crop;
bool do_crop;
@@ -610,7 +603,7 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
struct v4l2_subdev_pad_config *pad_cfg,
struct v4l2_subdev_format *format)
{
- struct media_entity *entity = &dcmi->entity.source->entity;
+ struct media_entity *entity = &dcmi->source->entity;
struct v4l2_subdev *subdev;
struct media_pad *sink_pad = NULL;
struct media_pad *src_pad = NULL;
@@ -1018,7 +1011,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
+ ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1152,7 +1145,7 @@ static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
};
int ret;
- ret = v4l2_subdev_call(dcmi->entity.source, pad, get_fmt, NULL, &fmt);
+ ret = v4l2_subdev_call(dcmi->source, pad, get_fmt, NULL, &fmt);
if (ret)
return ret;
@@ -1181,7 +1174,7 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
+ ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1204,7 +1197,7 @@ static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
/*
* Get sensor bounds first
*/
- ret = v4l2_subdev_call(dcmi->entity.source, pad, get_selection,
+ ret = v4l2_subdev_call(dcmi->source, pad, get_selection,
NULL, &bounds);
if (!ret)
*r = bounds.r;
@@ -1385,7 +1378,7 @@ static int dcmi_enum_framesizes(struct file *file, void *fh,
fse.code = sd_fmt->mbus_code;
- ret = v4l2_subdev_call(dcmi->entity.source, pad, enum_frame_size,
+ ret = v4l2_subdev_call(dcmi->source, pad, enum_frame_size,
NULL, &fse);
if (ret)
return ret;
@@ -1402,7 +1395,7 @@ static int dcmi_g_parm(struct file *file, void *priv,
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.source, p);
+ return v4l2_g_parm_cap(video_devdata(file), dcmi->source, p);
}
static int dcmi_s_parm(struct file *file, void *priv,
@@ -1410,7 +1403,7 @@ static int dcmi_s_parm(struct file *file, void *priv,
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.source, p);
+ return v4l2_s_parm_cap(video_devdata(file), dcmi->source, p);
}
static int dcmi_enum_frameintervals(struct file *file, void *fh,
@@ -1432,7 +1425,7 @@ static int dcmi_enum_frameintervals(struct file *file, void *fh,
fie.code = sd_fmt->mbus_code;
- ret = v4l2_subdev_call(dcmi->entity.source, pad,
+ ret = v4l2_subdev_call(dcmi->source, pad,
enum_frame_interval, NULL, &fie);
if (ret)
return ret;
@@ -1452,7 +1445,7 @@ MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
static int dcmi_open(struct file *file)
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- struct v4l2_subdev *sd = dcmi->entity.source;
+ struct v4l2_subdev *sd = dcmi->source;
int ret;
if (mutex_lock_interruptible(&dcmi->lock))
@@ -1483,7 +1476,7 @@ unlock:
static int dcmi_release(struct file *file)
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- struct v4l2_subdev *sd = dcmi->entity.source;
+ struct v4l2_subdev *sd = dcmi->source;
bool fh_singular;
int ret;
@@ -1616,7 +1609,7 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
{
const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
unsigned int num_fmts = 0, i, j;
- struct v4l2_subdev *subdev = dcmi->entity.source;
+ struct v4l2_subdev *subdev = dcmi->source;
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
@@ -1675,7 +1668,7 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
{
unsigned int num_fsize = 0;
- struct v4l2_subdev *subdev = dcmi->entity.source;
+ struct v4l2_subdev *subdev = dcmi->source;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = dcmi->sd_format->mbus_code,
@@ -1727,14 +1720,13 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
* we search for the source subdevice
* in order to expose it through V4L2 interface
*/
- dcmi->entity.source =
- media_entity_to_v4l2_subdev(dcmi_find_source(dcmi));
- if (!dcmi->entity.source) {
+ dcmi->source = media_entity_to_v4l2_subdev(dcmi_find_source(dcmi));
+ if (!dcmi->source) {
dev_err(dcmi->dev, "Source subdevice not found\n");
return -ENODEV;
}
- dcmi->vdev->ctrl_handler = dcmi->entity.source->ctrl_handler;
+ dcmi->vdev->ctrl_handler = dcmi->source->ctrl_handler;
ret = dcmi_formats_init(dcmi);
if (ret) {
@@ -1813,46 +1805,29 @@ static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
.complete = dcmi_graph_notify_complete,
};
-static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
-{
- struct device_node *ep = NULL;
- struct device_node *remote;
-
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
-
- remote = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- if (!remote)
- return -EINVAL;
-
- /* Remote node to connect */
- dcmi->entity.remote_node = remote;
- dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
-}
-
static int dcmi_graph_init(struct stm32_dcmi *dcmi)
{
+ struct v4l2_async_subdev *asd;
+ struct device_node *ep;
int ret;
- /* Parse the graph to extract a list of subdevice DT nodes. */
- ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
- if (ret < 0) {
- dev_err(dcmi->dev, "Failed to parse graph\n");
- return ret;
+ ep = of_graph_get_next_endpoint(dcmi->dev->of_node, NULL);
+ if (!ep) {
+ dev_err(dcmi->dev, "Failed to get next endpoint\n");
+ return -EINVAL;
}
v4l2_async_notifier_init(&dcmi->notifier);
- ret = v4l2_async_notifier_add_subdev(&dcmi->notifier,
- &dcmi->entity.asd);
- if (ret) {
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &dcmi->notifier, of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
+
+ of_node_put(ep);
+
+ if (IS_ERR(asd)) {
dev_err(dcmi->dev, "Failed to add subdev notifier\n");
- of_node_put(dcmi->entity.remote_node);
- return ret;
+ return PTR_ERR(asd);
}
dcmi->notifier.ops = &dcmi_graph_notify_ops;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index ec46cff80fdb..8d40a7acba9c 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -118,6 +118,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_PARALLEL,
};
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *ep;
int ret;
@@ -134,10 +135,12 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
csi->bus = vep.bus.parallel;
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier,
- ep, &csi->asd);
- if (ret)
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier, ep,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
goto out;
+ }
csi->notifier.ops = &sun4i_csi_notify_ops;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
index 0f67ff652c2e..a5f61ee0ec4d 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -139,7 +139,6 @@ struct sun4i_csi {
struct v4l2_mbus_framefmt subdev_fmt;
/* V4L2 Async variables */
- struct v4l2_async_subdev asd;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *src_subdev;
int src_pad;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
index 1a2f65d83a6c..4785faddf630 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -113,8 +113,6 @@ static void _sun4i_csi_try_fmt(struct sun4i_csi *csi,
pix->num_planes = _fmt->num_planes;
pix->pixelformat = _fmt->fourcc;
- memset(pix->reserved, 0, sizeof(pix->reserved));
-
/* Align the width and height on the subsampling */
width = ALIGN(pix->width, _fmt->hsub);
height = ALIGN(pix->height, _fmt->vsub);
@@ -131,8 +129,6 @@ static void _sun4i_csi_try_fmt(struct sun4i_csi *csi,
bpl = pix->width / hsub * _fmt->bpp[i] / 8;
pix->plane_fmt[i].bytesperline = bpl;
pix->plane_fmt[i].sizeimage = bpl * pix->height / vsub;
- memset(pix->plane_fmt[i].reserved, 0,
- sizeof(pix->plane_fmt[i].reserved));
}
}
diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti-vpe/cal-camerarx.c
index 806cbf175d39..dd48017859cd 100644
--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
+++ b/drivers/media/platform/ti-vpe/cal-camerarx.c
@@ -116,8 +116,7 @@ void cal_camerarx_disable(struct cal_camerarx *phy)
#define TCLK_MISS 1
#define TCLK_SETTLE 14
-static void cal_camerarx_config(struct cal_camerarx *phy, s64 external_rate,
- const struct cal_fmt *fmt)
+static void cal_camerarx_config(struct cal_camerarx *phy, s64 external_rate)
{
unsigned int reg0, reg1;
unsigned int ths_term, ths_settle;
@@ -132,9 +131,9 @@ static void cal_camerarx_config(struct cal_camerarx *phy, s64 external_rate,
* CSI-2 is DDR and we only count used lanes.
*
* csi2_ddrclk_khz = external_rate / 1000
- * / (2 * num_lanes) * fmt->bpp;
+ * / (2 * num_lanes) * phy->fmtinfo->bpp;
*/
- csi2_ddrclk_khz = div_s64(external_rate * fmt->bpp,
+ csi2_ddrclk_khz = div_s64(external_rate * phy->fmtinfo->bpp,
2 * num_lanes * 1000);
phy_dbg(1, phy, "csi2_ddrclk_khz: %d\n", csi2_ddrclk_khz);
@@ -234,7 +233,42 @@ static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy)
phy_err(phy, "Timeout waiting for stop state\n");
}
-int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
+static void cal_camerarx_enable_irqs(struct cal_camerarx *phy)
+{
+ const u32 cio_err_mask =
+ CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
+
+ /* Enable CIO error IRQs. */
+ cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0),
+ CAL_HL_IRQ_CIO_MASK(phy->instance));
+ cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance),
+ cio_err_mask);
+}
+
+static void cal_camerarx_disable_irqs(struct cal_camerarx *phy)
+{
+ /* Disable CIO error irqs */
+ cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0),
+ CAL_HL_IRQ_CIO_MASK(phy->instance));
+ cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0);
+}
+
+static void cal_camerarx_ppi_enable(struct cal_camerarx *phy)
+{
+ cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
+ 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
+{
+ cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
+ 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static int cal_camerarx_start(struct cal_camerarx *phy)
{
s64 external_rate;
u32 sscounter;
@@ -251,6 +285,8 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
return ret;
}
+ cal_camerarx_enable_irqs(phy);
+
/*
* CSI-2 PHY Link Initialization Sequence, according to the DRA74xP /
* DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x /
@@ -289,7 +325,7 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
camerarx_read(phy, CAL_CSI2_PHY_REG0);
/* Program the PHY timing parameters. */
- cal_camerarx_config(phy, external_rate, fmt);
+ cal_camerarx_config(phy, external_rate);
/*
* b. Assert the FORCERXMODE signal.
@@ -340,6 +376,7 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
ret = v4l2_subdev_call(phy->sensor, video, s_stream, 1);
if (ret) {
v4l2_subdev_call(phy->sensor, core, s_power, 0);
+ cal_camerarx_disable_irqs(phy);
phy_err(phy, "stream on failed in subdev\n");
return ret;
}
@@ -359,14 +396,21 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
* implemented.
*/
+ /* Finally, enable the PHY Protocol Interface (PPI). */
+ cal_camerarx_ppi_enable(phy);
+
return 0;
}
-void cal_camerarx_stop(struct cal_camerarx *phy)
+static void cal_camerarx_stop(struct cal_camerarx *phy)
{
unsigned int i;
int ret;
+ cal_camerarx_ppi_disable(phy);
+
+ cal_camerarx_disable_irqs(phy);
+
cal_camerarx_power(phy, false);
/* Assert Complex IO Reset */
@@ -428,74 +472,6 @@ void cal_camerarx_i913_errata(struct cal_camerarx *phy)
camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10);
}
-/*
- * Enable the expected IRQ sources
- */
-void cal_camerarx_enable_irqs(struct cal_camerarx *phy)
-{
- u32 val;
-
- const u32 cio_err_mask =
- CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
-
- /* Enable CIO error irqs */
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0),
- CAL_HL_IRQ_CIO_MASK(phy->instance));
- cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance),
- cio_err_mask);
-
- /* Always enable OCPO error */
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
-
- /* Enable IRQ_WDMA_END 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(1), val);
- /* Enable IRQ_WDMA_START 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(2), val);
- /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
- cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(0), 0xFF000000);
-}
-
-void cal_camerarx_disable_irqs(struct cal_camerarx *phy)
-{
- u32 val;
-
- /* Disable CIO error irqs */
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0),
- CAL_HL_IRQ_CIO_MASK(phy->instance));
- cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0);
-
- /* Disable IRQ_WDMA_END 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(1), val);
- /* Disable IRQ_WDMA_START 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(2), val);
- /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
- cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(0), 0);
-}
-
-void cal_camerarx_ppi_enable(struct cal_camerarx *phy)
-{
- cal_write(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), BIT(3));
- cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
- 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
-}
-
-void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
-{
- cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
- 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
-}
-
static int cal_camerarx_regmap_init(struct cal_dev *cal,
struct cal_camerarx *phy)
{
@@ -533,8 +509,8 @@ static int cal_camerarx_regmap_init(struct cal_dev *cal,
static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
{
struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint;
- struct device_node *ep_node;
char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES * 2];
+ struct device_node *ep_node;
unsigned int i;
int ret;
@@ -582,9 +558,11 @@ static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
endpoint->bus.mipi_csi2.flags);
/* Retrieve the connected device and store it for later use. */
- phy->sensor_node = of_graph_get_remote_port_parent(ep_node);
+ phy->sensor_ep_node = of_graph_get_remote_endpoint(ep_node);
+ phy->sensor_node = of_graph_get_port_parent(phy->sensor_ep_node);
if (!phy->sensor_node) {
phy_dbg(3, phy, "Can't get remote parent\n");
+ of_node_put(phy->sensor_ep_node);
ret = -EINVAL;
goto done;
}
@@ -596,11 +574,227 @@ done:
return ret;
}
+/* ------------------------------------------------------------------
+ * V4L2 Subdev Operations
+ * ------------------------------------------------------------------
+ */
+
+static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct cal_camerarx, subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+cal_camerarx_get_pad_format(struct cal_camerarx *phy,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&phy->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &phy->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+
+ if (enable)
+ return cal_camerarx_start(phy);
+
+ cal_camerarx_stop(phy);
+ return 0;
+}
+
+static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+
+ /* No transcoding, source and sink codes must match. */
+ if (code->pad == CAL_CAMERARX_PAD_SOURCE) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ fmt = cal_camerarx_get_pad_format(phy, cfg,
+ CAL_CAMERARX_PAD_SINK,
+ code->which);
+ code->code = fmt->code;
+ return 0;
+ }
+
+ if (code->index >= cal_num_formats)
+ return -EINVAL;
+
+ code->code = cal_formats[code->index].code;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ const struct cal_format_info *fmtinfo;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ /* No transcoding, source and sink formats must match. */
+ if (fse->pad == CAL_CAMERARX_PAD_SOURCE) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = cal_camerarx_get_pad_format(phy, cfg,
+ CAL_CAMERARX_PAD_SINK,
+ fse->which);
+ if (fse->code != fmt->code)
+ return -EINVAL;
+
+ fse->min_width = fmt->width;
+ fse->max_width = fmt->width;
+ fse->min_height = fmt->height;
+ fse->max_height = fmt->height;
+
+ return 0;
+ }
+
+ fmtinfo = cal_format_by_code(fse->code);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
+ fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
+ fse->min_height = CAL_MIN_HEIGHT_LINES;
+ fse->max_height = CAL_MAX_HEIGHT_LINES;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = cal_camerarx_get_pad_format(phy, cfg, format->pad, format->which);
+ format->format = *fmt;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int bpp;
+
+ /* No transcoding, source and sink formats must match. */
+ if (format->pad == CAL_CAMERARX_PAD_SOURCE)
+ return cal_camerarx_sd_get_fmt(sd, cfg, format);
+
+ /*
+ * Default to the first format is the requested media bus code isn't
+ * supported.
+ */
+ fmtinfo = cal_format_by_code(format->format.code);
+ if (!fmtinfo)
+ fmtinfo = &cal_formats[0];
+
+ /*
+ * Clamp the size, update the code. The field and colorspace are
+ * accepted as-is.
+ */
+ bpp = ALIGN(fmtinfo->bpp, 8);
+
+ format->format.width = clamp_t(unsigned int, format->format.width,
+ CAL_MIN_WIDTH_BYTES * 8 / bpp,
+ CAL_MAX_WIDTH_BYTES * 8 / bpp);
+ format->format.height = clamp_t(unsigned int, format->format.height,
+ CAL_MIN_HEIGHT_LINES,
+ CAL_MAX_HEIGHT_LINES);
+ format->format.code = fmtinfo->code;
+
+ /* Store the format and propagate it to the source pad. */
+ fmt = cal_camerarx_get_pad_format(phy, cfg, CAL_CAMERARX_PAD_SINK,
+ format->which);
+ *fmt = format->format;
+
+ fmt = cal_camerarx_get_pad_format(phy, cfg, CAL_CAMERARX_PAD_SOURCE,
+ format->which);
+ *fmt = format->format;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ phy->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format format = {
+ .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CAL_CAMERARX_PAD_SINK,
+ .format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ },
+ };
+
+ return cal_camerarx_sd_set_fmt(sd, cfg, &format);
+}
+
+static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
+ .s_stream = cal_camerarx_sd_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
+ .init_cfg = cal_camerarx_sd_init_cfg,
+ .enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
+ .enum_frame_size = cal_camerarx_sd_enum_frame_size,
+ .get_fmt = cal_camerarx_sd_get_fmt,
+ .set_fmt = cal_camerarx_sd_set_fmt,
+};
+
+static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
+ .video = &cal_camerarx_video_ops,
+ .pad = &cal_camerarx_pad_ops,
+};
+
+static struct media_entity_operations cal_camerarx_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* ------------------------------------------------------------------
+ * Create and Destroy
+ * ------------------------------------------------------------------
+ */
+
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
unsigned int instance)
{
struct platform_device *pdev = to_platform_device(cal->dev);
struct cal_camerarx *phy;
+ struct v4l2_subdev *sd;
int ret;
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
@@ -632,9 +826,31 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
if (ret)
goto error;
+ /* Initialize the V4L2 subdev and media entity. */
+ sd = &phy->subdev;
+ v4l2_subdev_init(sd, &cal_camerarx_subdev_ops);
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance);
+ sd->dev = cal->dev;
+
+ phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ phy->pads[CAL_CAMERARX_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.ops = &cal_camerarx_media_ops;
+ ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads),
+ phy->pads);
+ if (ret)
+ goto error;
+
+ cal_camerarx_sd_init_cfg(sd, NULL);
+
+ ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
+ if (ret)
+ goto error;
+
return phy;
error:
+ media_entity_cleanup(&phy->subdev.entity);
kfree(phy);
return ERR_PTR(ret);
}
@@ -644,6 +860,9 @@ void cal_camerarx_destroy(struct cal_camerarx *phy)
if (!phy)
return;
+ v4l2_device_unregister_subdev(&phy->subdev);
+ media_entity_cleanup(&phy->subdev.entity);
+ of_node_put(phy->sensor_ep_node);
of_node_put(phy->sensor_node);
kfree(phy);
}
diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
index df472a175e83..779f1e1bc529 100644
--- a/drivers/media/platform/ti-vpe/cal-video.c
+++ b/drivers/media/platform/ti-vpe/cal-video.c
@@ -9,7 +9,6 @@
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
-#include <linux/delay.h>
#include <linux/ioctl.h>
#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
@@ -26,107 +25,6 @@
#include "cal.h"
-/* ------------------------------------------------------------------
- * Format Handling
- * ------------------------------------------------------------------
- */
-
-static const struct cal_fmt cal_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_YVYU,
- .code = MEDIA_BUS_FMT_YVYU8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_VYUY,
- .code = MEDIA_BUS_FMT_VYUY8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
- .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
- .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .bpp = 32,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG8,
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG8,
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB8,
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR10,
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG10,
- .code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG10,
- .code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB10,
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR12,
- .code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG12,
- .code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG12,
- .code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB12,
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .bpp = 12,
- },
-};
-
/* Print Four-character-code (FOURCC) */
static char *fourcc_to_str(u32 fmt)
{
@@ -146,31 +44,31 @@ static char *fourcc_to_str(u32 fmt)
* ------------------------------------------------------------------
*/
-static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
- u32 pixelformat)
+static const struct cal_format_info *find_format_by_pix(struct cal_ctx *ctx,
+ u32 pixelformat)
{
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
unsigned int k;
for (k = 0; k < ctx->num_active_fmt; k++) {
- fmt = ctx->active_fmt[k];
- if (fmt->fourcc == pixelformat)
- return fmt;
+ fmtinfo = ctx->active_fmt[k];
+ if (fmtinfo->fourcc == pixelformat)
+ return fmtinfo;
}
return NULL;
}
-static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
- u32 code)
+static const struct cal_format_info *find_format_by_code(struct cal_ctx *ctx,
+ u32 code)
{
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
unsigned int k;
for (k = 0; k < ctx->num_active_fmt; k++) {
- fmt = ctx->active_fmt[k];
- if (fmt->code == code)
- return fmt;
+ fmtinfo = ctx->active_fmt[k];
+ if (fmtinfo->code == code)
+ return fmtinfo;
}
return NULL;
@@ -193,14 +91,14 @@ static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
if (f->index >= ctx->num_active_fmt)
return -EINVAL;
- fmt = ctx->active_fmt[f->index];
+ fmtinfo = ctx->active_fmt[f->index];
- f->pixelformat = fmt->fourcc;
+ f->pixelformat = fmtinfo->fourcc;
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
@@ -248,27 +146,23 @@ static int __subdev_set_format(struct cal_ctx *ctx,
return 0;
}
-static int cal_calc_format_size(struct cal_ctx *ctx,
- const struct cal_fmt *fmt,
- struct v4l2_format *f)
+static void cal_calc_format_size(struct cal_ctx *ctx,
+ const struct cal_format_info *fmtinfo,
+ struct v4l2_format *f)
{
u32 bpl, max_width;
- if (!fmt) {
- ctx_dbg(3, ctx, "No cal_fmt provided!\n");
- return -EINVAL;
- }
-
/*
* Maximum width is bound by the DMA max width in bytes.
* We need to recalculate the actual maxi width depending on the
* number of bytes per pixels required.
*/
- max_width = MAX_WIDTH_BYTES / (ALIGN(fmt->bpp, 8) >> 3);
+ max_width = CAL_MAX_WIDTH_BYTES / (ALIGN(fmtinfo->bpp, 8) >> 3);
v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2,
- &f->fmt.pix.height, 32, MAX_HEIGHT_LINES, 0, 0);
+ &f->fmt.pix.height, 32, CAL_MAX_HEIGHT_LINES,
+ 0, 0);
- bpl = (f->fmt.pix.width * ALIGN(fmt->bpp, 8)) >> 3;
+ bpl = (f->fmt.pix.width * ALIGN(fmtinfo->bpp, 8)) >> 3;
f->fmt.pix.bytesperline = ALIGN(bpl, 16);
f->fmt.pix.sizeimage = f->fmt.pix.height *
@@ -278,8 +172,6 @@ static int cal_calc_format_size(struct cal_ctx *ctx,
__func__, fourcc_to_str(f->fmt.pix.pixelformat),
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
-
- return 0;
}
static int cal_g_fmt_vid_cap(struct file *file, void *priv,
@@ -296,18 +188,18 @@ static int cal_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse;
int ret, found;
- fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
- if (!fmt) {
+ fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+ if (!fmtinfo) {
ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
f->fmt.pix.pixelformat);
/* Just get the first one enumerated */
- fmt = ctx->active_fmt[0];
- f->fmt.pix.pixelformat = fmt->fourcc;
+ fmtinfo = ctx->active_fmt[0];
+ f->fmt.pix.pixelformat = fmtinfo->fourcc;
}
f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
@@ -316,7 +208,7 @@ static int cal_try_fmt_vid_cap(struct file *file, void *priv,
ret = 0;
found = false;
fse.pad = 0;
- fse.code = fmt->code;
+ fse.code = fmtinfo->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
for (fse.index = 0; ; fse.index++) {
ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_size,
@@ -348,7 +240,8 @@ static int cal_try_fmt_vid_cap(struct file *file, void *priv,
* updated properly during s_fmt
*/
f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
- return cal_calc_format_size(ctx, fmt, f);
+ cal_calc_format_size(ctx, fmtinfo, f);
+ return 0;
}
static int cal_s_fmt_vid_cap(struct file *file, void *priv,
@@ -356,8 +249,11 @@ static int cal_s_fmt_vid_cap(struct file *file, void *priv,
{
struct cal_ctx *ctx = video_drvdata(file);
struct vb2_queue *q = &ctx->vb_vidq;
- const struct cal_fmt *fmt;
- struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CAL_CAMERARX_PAD_SINK,
+ };
+ const struct cal_format_info *fmtinfo;
int ret;
if (vb2_is_busy(q)) {
@@ -369,28 +265,31 @@ static int cal_s_fmt_vid_cap(struct file *file, void *priv,
if (ret < 0)
return ret;
- fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+ fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
+ v4l2_fill_mbus_format(&sd_fmt.format, &f->fmt.pix, fmtinfo->code);
- ret = __subdev_set_format(ctx, &mbus_fmt);
+ ret = __subdev_set_format(ctx, &sd_fmt.format);
if (ret)
return ret;
/* Just double check nothing has gone wrong */
- if (mbus_fmt.code != fmt->code) {
+ if (sd_fmt.format.code != fmtinfo->code) {
ctx_dbg(3, ctx,
"%s subdev changed format on us, this should not happen\n",
__func__);
return -EINVAL;
}
- v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &sd_fmt.format);
ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
- cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
- ctx->fmt = fmt;
- ctx->m_fmt = mbus_fmt;
+ ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc;
+ ctx->v_fmt.fmt.pix.field = sd_fmt.format.field;
+ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
+
+ v4l2_subdev_call(&ctx->phy->subdev, pad, set_fmt, NULL, &sd_fmt);
+
+ ctx->fmtinfo = fmtinfo;
*f = ctx->v_fmt;
return 0;
@@ -400,13 +299,13 @@ static int cal_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse;
int ret;
/* check for valid format */
- fmt = find_format_by_pix(ctx, fsize->pixel_format);
- if (!fmt) {
+ fmtinfo = find_format_by_pix(ctx, fsize->pixel_format);
+ if (!fmtinfo) {
ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
fsize->pixel_format);
return -EINVAL;
@@ -414,7 +313,7 @@ static int cal_enum_framesizes(struct file *file, void *fh,
fse.index = fsize->index;
fse.pad = 0;
- fse.code = fmt->code;
+ fse.code = fmtinfo->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_size, NULL,
@@ -460,7 +359,7 @@ static int cal_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_interval_enum fie = {
.index = fival->index,
.width = fival->width,
@@ -469,11 +368,11 @@ static int cal_enum_frameintervals(struct file *file, void *priv,
};
int ret;
- fmt = find_format_by_pix(ctx, fival->pixel_format);
- if (!fmt)
+ fmtinfo = find_format_by_pix(ctx, fival->pixel_format);
+ if (!fmtinfo)
return -EINVAL;
- fie.code = fmt->code;
+ fie.code = fmtinfo->code;
ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_interval,
NULL, &fie);
if (ret)
@@ -488,7 +387,6 @@ static const struct v4l2_file_operations cal_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
- .read = vb2_fop_read,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = vb2_fop_mmap,
@@ -555,9 +453,6 @@ static int cal_buffer_prepare(struct vb2_buffer *vb)
vb.vb2_buf);
unsigned long size;
- if (WARN_ON(!ctx->fmt))
- return -EINVAL;
-
size = ctx->v_fmt.fmt.pix.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
ctx_err(ctx,
@@ -575,121 +470,88 @@ static void cal_buffer_queue(struct vb2_buffer *vb)
struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct cal_buffer *buf = container_of(vb, struct cal_buffer,
vb.vb2_buf);
- struct cal_dmaqueue *vidq = &ctx->vidq;
unsigned long flags;
/* recheck locking */
- spin_lock_irqsave(&ctx->slock, flags);
- list_add_tail(&buf->list, &vidq->active);
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->dma.lock, flags);
+ list_add_tail(&buf->list, &ctx->dma.queue);
+ spin_unlock_irqrestore(&ctx->dma.lock, flags);
}
-static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+static void cal_release_buffers(struct cal_ctx *ctx,
+ enum vb2_buffer_state state)
{
- struct cal_ctx *ctx = vb2_get_drv_priv(vq);
- struct cal_dmaqueue *dma_q = &ctx->vidq;
struct cal_buffer *buf, *tmp;
- unsigned long addr;
- unsigned long flags;
- int ret;
- spin_lock_irqsave(&ctx->slock, flags);
- if (list_empty(&dma_q->active)) {
- spin_unlock_irqrestore(&ctx->slock, flags);
- ctx_dbg(3, ctx, "buffer queue is empty\n");
- return -EIO;
+ /* Release all queued buffers. */
+ spin_lock_irq(&ctx->dma.lock);
+
+ list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+
+ if (ctx->dma.pending) {
+ vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state);
+ ctx->dma.pending = NULL;
+ }
+
+ if (ctx->dma.active) {
+ vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+ ctx->dma.active = NULL;
}
- buf = list_entry(dma_q->active.next, struct cal_buffer, list);
- ctx->cur_frm = buf;
- ctx->next_frm = buf;
+ spin_unlock_irq(&ctx->dma.lock);
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_buffer *buf;
+ dma_addr_t addr;
+ int ret;
+
+ spin_lock_irq(&ctx->dma.lock);
+ buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
+ ctx->dma.pending = buf;
list_del(&buf->list);
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irq(&ctx->dma.lock);
- addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
- ctx->sequence = 0;
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
pm_runtime_get_sync(ctx->cal->dev);
- cal_ctx_csi2_config(ctx);
- cal_ctx_pix_proc_config(ctx);
- cal_ctx_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
- ctx->v_fmt.fmt.pix.height);
+ cal_ctx_set_dma_addr(ctx, addr);
+ cal_ctx_start(ctx);
- cal_camerarx_enable_irqs(ctx->phy);
-
- ret = cal_camerarx_start(ctx->phy, ctx->fmt);
+ ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1);
if (ret)
goto err;
- cal_ctx_wr_dma_addr(ctx, addr);
- cal_camerarx_ppi_enable(ctx->phy);
-
if (cal_debug >= 4)
cal_quickdump_regs(ctx->cal);
return 0;
err:
- spin_lock_irqsave(&ctx->slock, flags);
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- ctx->cur_frm = NULL;
- ctx->next_frm = NULL;
- list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
- spin_unlock_irqrestore(&ctx->slock, flags);
+ cal_ctx_stop(ctx);
+ pm_runtime_put_sync(ctx->cal->dev);
+
+ cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED);
return ret;
}
static void cal_stop_streaming(struct vb2_queue *vq)
{
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
- struct cal_dmaqueue *dma_q = &ctx->vidq;
- struct cal_buffer *buf, *tmp;
- unsigned long timeout;
- unsigned long flags;
- bool dma_act;
-
- cal_camerarx_ppi_disable(ctx->phy);
-
- /* wait for stream and dma to finish */
- dma_act = true;
- timeout = jiffies + msecs_to_jiffies(500);
- while (dma_act && time_before(jiffies, timeout)) {
- msleep(50);
-
- spin_lock_irqsave(&ctx->slock, flags);
- dma_act = ctx->dma_act;
- spin_unlock_irqrestore(&ctx->slock, flags);
- }
-
- if (dma_act)
- ctx_err(ctx, "failed to disable dma cleanly\n");
-
- cal_camerarx_disable_irqs(ctx->phy);
- cal_camerarx_stop(ctx->phy);
- /* Release all active buffers */
- spin_lock_irqsave(&ctx->slock, flags);
- list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
+ cal_ctx_stop(ctx);
- if (ctx->cur_frm == ctx->next_frm) {
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- } else {
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- ctx->cur_frm = NULL;
- ctx->next_frm = NULL;
- spin_unlock_irqrestore(&ctx->slock, flags);
+ v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0);
pm_runtime_put_sync(ctx->cal->dev);
+
+ cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops cal_video_qops = {
@@ -713,20 +575,19 @@ static const struct video_device cal_videodev = {
.ioctl_ops = &cal_ioctl_ops,
.minor = -1,
.release = video_device_release_empty,
- .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
};
static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
{
struct v4l2_subdev_mbus_code_enum mbus_code;
struct v4l2_mbus_framefmt mbus_fmt;
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
unsigned int i, j, k;
int ret = 0;
/* Enumerate sub device formats and enable all matching local formats */
- ctx->active_fmt = devm_kcalloc(ctx->cal->dev, ARRAY_SIZE(cal_formats),
+ ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats,
sizeof(*ctx->active_fmt), GFP_KERNEL);
ctx->num_active_fmt = 0;
@@ -744,15 +605,15 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
"subdev %s: code: %04x idx: %u\n",
ctx->phy->sensor->name, mbus_code.code, j);
- for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
- const struct cal_fmt *fmt = &cal_formats[k];
+ for (k = 0; k < cal_num_formats; k++) {
+ fmtinfo = &cal_formats[k];
- if (mbus_code.code == fmt->code) {
- ctx->active_fmt[i] = fmt;
+ if (mbus_code.code == fmtinfo->code) {
+ ctx->active_fmt[i] = fmtinfo;
ctx_dbg(2, ctx,
"matched fourcc: %s: code: %04x idx: %u\n",
- fourcc_to_str(fmt->fourcc),
- fmt->code, i);
+ fourcc_to_str(fmtinfo->fourcc),
+ fmtinfo->code, i);
ctx->num_active_fmt = ++i;
}
}
@@ -768,20 +629,20 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
if (ret)
return ret;
- fmt = find_format_by_code(ctx, mbus_fmt.code);
- if (!fmt) {
+ fmtinfo = find_format_by_code(ctx, mbus_fmt.code);
+ if (!fmtinfo) {
ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
mbus_fmt.code);
return -EINVAL;
}
- /* Save current subdev format */
+ /* Save current format */
v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
- cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
- ctx->fmt = fmt;
- ctx->m_fmt = mbus_fmt;
+ ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc;
+ ctx->v_fmt.fmt.pix.field = mbus_fmt.field;
+ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
+ ctx->fmtinfo = fmtinfo;
return 0;
}
@@ -809,6 +670,18 @@ int cal_ctx_v4l2_register(struct cal_ctx *ctx)
return ret;
}
+ ret = media_create_pad_link(&ctx->phy->subdev.entity,
+ CAL_CAMERARX_PAD_SOURCE,
+ &vfd->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ ctx_err(ctx, "Failed to create media link for context %u\n",
+ ctx->index);
+ video_unregister_device(vfd);
+ return ret;
+ }
+
ctx_info(ctx, "V4L2 device registered as %s\n",
video_device_node_name(vfd));
@@ -830,13 +703,14 @@ int cal_ctx_v4l2_init(struct cal_ctx *ctx)
struct vb2_queue *q = &ctx->vb_vidq;
int ret;
- INIT_LIST_HEAD(&ctx->vidq.active);
- spin_lock_init(&ctx->slock);
+ INIT_LIST_HEAD(&ctx->dma.queue);
+ spin_lock_init(&ctx->dma.lock);
mutex_init(&ctx->mutex);
+ init_waitqueue_head(&ctx->dma.wait);
/* Initialize the vb2 queue. */
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = ctx;
q->buf_struct_size = sizeof(struct cal_buffer);
q->ops = &cal_video_qops;
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 59a0266b1f39..fa0931788040 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -44,6 +44,133 @@ module_param_named(debug, cal_debug, uint, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
/* ------------------------------------------------------------------
+ * Format Handling
+ * ------------------------------------------------------------------
+ */
+
+const struct cal_format_info cal_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .bpp = 12,
+ },
+};
+
+const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats);
+
+const struct cal_format_info *cal_format_by_fourcc(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
+ if (cal_formats[i].fourcc == fourcc)
+ return &cal_formats[i];
+ }
+
+ return NULL;
+}
+
+const struct cal_format_info *cal_format_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
+ if (cal_formats[i].code == code)
+ return &cal_formats[i];
+ }
+
+ return NULL;
+}
+
+/* ------------------------------------------------------------------
* Platform Data
* ------------------------------------------------------------------
*/
@@ -136,12 +263,9 @@ void cal_quickdump_regs(struct cal_dev *cal)
(__force const void *)cal->base,
resource_size(cal->res), false);
- for (i = 0; i < ARRAY_SIZE(cal->phy); ++i) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
struct cal_camerarx *phy = cal->phy[i];
- if (!phy)
- continue;
-
cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i,
&phy->res->start);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
@@ -156,7 +280,7 @@ void cal_quickdump_regs(struct cal_dev *cal)
* ------------------------------------------------------------------
*/
-void cal_ctx_csi2_config(struct cal_ctx *ctx)
+static void cal_ctx_csi2_config(struct cal_ctx *ctx)
{
u32 val;
@@ -181,11 +305,11 @@ void cal_ctx_csi2_config(struct cal_ctx *ctx)
cal_read(ctx->cal, CAL_CSI2_CTX0(ctx->index)));
}
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
+static void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
{
u32 val, extract, pack;
- switch (ctx->fmt->bpp) {
+ switch (ctx->fmtinfo->bpp) {
case 8:
extract = CAL_PIX_PROC_EXTRACT_B8;
pack = CAL_PIX_PROC_PACK_B8;
@@ -214,7 +338,7 @@ void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
*/
dev_warn_once(ctx->cal->dev,
"%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
- __FILE__, __LINE__, __func__, ctx->fmt->bpp);
+ __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp);
extract = CAL_PIX_PROC_EXTRACT_B8;
pack = CAL_PIX_PROC_PACK_B8;
break;
@@ -232,14 +356,15 @@ void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
cal_read(ctx->cal, CAL_PIX_PROC(ctx->index)));
}
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
- unsigned int height)
+static void cal_ctx_wr_dma_config(struct cal_ctx *ctx)
{
+ unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline;
u32 val;
val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index));
cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK);
- cal_set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
+ cal_set_field(&val, ctx->v_fmt.fmt.pix.height,
+ CAL_WR_DMA_CTRL_YSIZE_MASK);
cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
CAL_WR_DMA_CTRL_DTAG_MASK);
cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
@@ -251,14 +376,8 @@ void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->index,
cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index)));
- /*
- * width/16 not sure but giving it a whirl.
- * zero does not work right
- */
- cal_write_field(ctx->cal,
- CAL_WR_DMA_OFST(ctx->index),
- (width / 16),
- CAL_WR_DMA_OFST_MASK);
+ cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->index),
+ stride / 16, CAL_WR_DMA_OFST_MASK);
ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->index,
cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->index)));
@@ -266,11 +385,11 @@ void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
/* 64 bit word means no skipping */
cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
/*
- * (width*8)/64 this should be size of an entire line
- * in 64bit word but 0 means all data until the end
- * is detected automagically
+ * The XSIZE field is expressed in 64-bit units and prevents overflows
+ * in case of synchronization issues by limiting the number of bytes
+ * written per line.
*/
- cal_set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
+ cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK);
cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->index), val);
ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->index,
cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->index)));
@@ -287,9 +406,74 @@ void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", cal_read(ctx->cal, CAL_CTRL));
}
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr)
+{
+ cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->index), addr);
+}
+
+static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx)
+{
+ u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index));
+
+ cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS,
+ CAL_WR_DMA_CTRL_MODE_MASK);
+ cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->index), val);
+}
+
+static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx)
{
- cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->index), dmaaddr);
+ bool stopped;
+
+ spin_lock_irq(&ctx->dma.lock);
+ stopped = ctx->dma.state == CAL_DMA_STOPPED;
+ spin_unlock_irq(&ctx->dma.lock);
+
+ return stopped;
+}
+
+void cal_ctx_start(struct cal_ctx *ctx)
+{
+ ctx->sequence = 0;
+ ctx->dma.state = CAL_DMA_RUNNING;
+
+ /* Configure the CSI-2, pixel processing and write DMA contexts. */
+ cal_ctx_csi2_config(ctx);
+ cal_ctx_pix_proc_config(ctx);
+ cal_ctx_wr_dma_config(ctx);
+
+ /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1),
+ CAL_HL_IRQ_MASK(ctx->index));
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2),
+ CAL_HL_IRQ_MASK(ctx->index));
+}
+
+void cal_ctx_stop(struct cal_ctx *ctx)
+{
+ long timeout;
+
+ /*
+ * Request DMA stop and wait until it completes. If completion times
+ * out, forcefully disable the DMA.
+ */
+ spin_lock_irq(&ctx->dma.lock);
+ ctx->dma.state = CAL_DMA_STOP_REQUESTED;
+ spin_unlock_irq(&ctx->dma.lock);
+
+ timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
+ msecs_to_jiffies(500));
+ if (!timeout) {
+ ctx_err(ctx, "failed to disable dma cleanly\n");
+ cal_ctx_wr_dma_disable(ctx);
+ }
+
+ /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1),
+ CAL_HL_IRQ_MASK(ctx->index));
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2),
+ CAL_HL_IRQ_MASK(ctx->index));
+
+ ctx->dma.state = CAL_DMA_STOPPED;
}
/* ------------------------------------------------------------------
@@ -297,35 +481,70 @@ void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
* ------------------------------------------------------------------
*/
-static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
{
- struct cal_dmaqueue *dma_q = &ctx->vidq;
- struct cal_buffer *buf;
- unsigned long addr;
+ spin_lock(&ctx->dma.lock);
+
+ if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) {
+ /*
+ * If a stop is requested, disable the write DMA context
+ * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed,
+ * the current frame will complete and the DMA will then stop.
+ */
+ cal_ctx_wr_dma_disable(ctx);
+ ctx->dma.state = CAL_DMA_STOP_PENDING;
+ } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
+ /*
+ * Otherwise, if a new buffer is available, queue it to the
+ * hardware.
+ */
+ struct cal_buffer *buf;
+ dma_addr_t addr;
+
+ buf = list_first_entry(&ctx->dma.queue, struct cal_buffer,
+ list);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ cal_ctx_set_dma_addr(ctx, addr);
- buf = list_entry(dma_q->active.next, struct cal_buffer, list);
- ctx->next_frm = buf;
- list_del(&buf->list);
+ ctx->dma.pending = buf;
+ list_del(&buf->list);
+ }
- addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
- cal_ctx_wr_dma_addr(ctx, addr);
+ spin_unlock(&ctx->dma.lock);
}
-static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
{
- ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
- ctx->cur_frm->vb.field = ctx->m_fmt.field;
- ctx->cur_frm->vb.sequence = ctx->sequence++;
+ struct cal_buffer *buf = NULL;
+
+ spin_lock(&ctx->dma.lock);
+
+ /* If the DMA context was stopping, it is now stopped. */
+ if (ctx->dma.state == CAL_DMA_STOP_PENDING) {
+ ctx->dma.state = CAL_DMA_STOPPED;
+ wake_up(&ctx->dma.wait);
+ }
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
- ctx->cur_frm = ctx->next_frm;
+ /* If a new buffer was queued, complete the current buffer. */
+ if (ctx->dma.pending) {
+ buf = ctx->dma.active;
+ ctx->dma.active = ctx->dma.pending;
+ ctx->dma.pending = NULL;
+ }
+
+ spin_unlock(&ctx->dma.lock);
+
+ if (buf) {
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.field = ctx->v_fmt.fmt.pix.field;
+ buf->vb.sequence = ctx->sequence++;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
}
static irqreturn_t cal_irq(int irq_cal, void *data)
{
struct cal_dev *cal = data;
- struct cal_ctx *ctx;
- struct cal_dmaqueue *dma_q;
u32 status;
status = cal_read(cal, CAL_HL_IRQSTATUS(0));
@@ -337,7 +556,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
if (status & CAL_HL_IRQ_OCPO_ERR_MASK)
dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
- for (i = 0; i < CAL_NUM_CSI2_PORTS; ++i) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
if (status & CAL_HL_IRQ_CIO_MASK(i)) {
u32 cio_stat = cal_read(cal,
CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
@@ -360,17 +579,8 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
cal_write(cal, CAL_HL_IRQSTATUS(1), status);
for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
- if (status & CAL_HL_IRQ_MASK(i)) {
- ctx = cal->ctx[i];
-
- spin_lock(&ctx->slock);
- ctx->dma_act = false;
-
- if (ctx->cur_frm != ctx->next_frm)
- cal_process_buffer_complete(ctx);
-
- spin_unlock(&ctx->slock);
- }
+ if (status & CAL_HL_IRQ_MASK(i))
+ cal_irq_wdma_end(cal->ctx[i]);
}
}
@@ -383,17 +593,8 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
cal_write(cal, CAL_HL_IRQSTATUS(2), status);
for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
- if (status & CAL_HL_IRQ_MASK(i)) {
- ctx = cal->ctx[i];
- dma_q = &ctx->vidq;
-
- spin_lock(&ctx->slock);
- ctx->dma_act = true;
- if (!list_empty(&dma_q->active) &&
- ctx->cur_frm == ctx->next_frm)
- cal_schedule_next_buffer(ctx);
- spin_unlock(&ctx->slock);
- }
+ if (status & CAL_HL_IRQ_MASK(i))
+ cal_irq_wdma_start(cal->ctx[i]);
}
}
@@ -406,7 +607,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
*/
struct cal_v4l2_async_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev asd; /* Must be first */
struct cal_camerarx *phy;
};
@@ -421,6 +622,8 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct cal_camerarx *phy = to_cal_asd(asd)->phy;
+ int pad;
+ int ret;
if (phy->sensor) {
phy_info(phy, "Rejecting subdev %s (Already set!!)",
@@ -431,6 +634,25 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
phy->sensor = subdev;
phy_dbg(1, phy, "Using sensor %s for capture\n", subdev->name);
+ pad = media_entity_get_fwnode_pad(&subdev->entity,
+ of_fwnode_handle(phy->sensor_ep_node),
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ phy_err(phy, "Sensor %s has no connected source pad\n",
+ subdev->name);
+ return pad;
+ }
+
+ ret = media_create_pad_link(&subdev->entity, pad,
+ &phy->subdev.entity, CAL_CAMERARX_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ phy_err(phy, "Failed to create media link for sensor %s\n",
+ subdev->name);
+ return ret;
+ }
+
return 0;
}
@@ -460,26 +682,24 @@ static int cal_async_notifier_register(struct cal_dev *cal)
v4l2_async_notifier_init(&cal->notifier);
cal->notifier.ops = &cal_async_notifier_ops;
- for (i = 0; i < ARRAY_SIZE(cal->phy); ++i) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
struct cal_camerarx *phy = cal->phy[i];
struct cal_v4l2_async_subdev *casd;
- struct v4l2_async_subdev *asd;
struct fwnode_handle *fwnode;
- if (!phy || !phy->sensor_node)
+ if (!phy->sensor_node)
continue;
fwnode = of_fwnode_handle(phy->sensor_node);
- asd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
- fwnode,
- sizeof(*asd));
- if (IS_ERR(asd)) {
+ casd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
+ fwnode,
+ struct cal_v4l2_async_subdev);
+ if (IS_ERR(casd)) {
phy_err(phy, "Failed to add subdev to notifier\n");
- ret = PTR_ERR(asd);
+ ret = PTR_ERR(casd);
goto error;
}
- casd = to_cal_asd(asd);
casd->phy = phy;
}
@@ -797,6 +1017,11 @@ static int cal_probe(struct platform_device *pdev)
cal_get_hwinfo(cal);
pm_runtime_put_sync(&pdev->dev);
+ /* Initialize the media device. */
+ ret = cal_media_init(cal);
+ if (ret < 0)
+ goto error_pm_runtime;
+
/* Create CAMERARX PHYs. */
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
cal->phy[i] = cal_camerarx_create(cal, i);
@@ -816,11 +1041,6 @@ static int cal_probe(struct platform_device *pdev)
goto error_camerarx;
}
- /* Initialize the media device. */
- ret = cal_media_init(cal);
- if (ret < 0)
- goto error_camerarx;
-
/* Create contexts. */
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
if (!cal->phy[i]->sensor_node)
@@ -848,12 +1068,12 @@ error_context:
cal_ctx_v4l2_cleanup(ctx);
}
- cal_media_cleanup(cal);
-
error_camerarx:
- for (i = 0; i < ARRAY_SIZE(cal->phy); i++)
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_destroy(cal->phy[i]);
+ cal_media_cleanup(cal);
+
error_pm_runtime:
pm_runtime_disable(&pdev->dev);
@@ -878,7 +1098,7 @@ static int cal_remove(struct platform_device *pdev)
cal_media_cleanup(cal);
- for (i = 0; i < ARRAY_SIZE(cal->phy); i++)
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_destroy(cal->phy[i]);
pm_runtime_put_sync(&pdev->dev);
@@ -890,16 +1110,23 @@ static int cal_remove(struct platform_device *pdev)
static int cal_runtime_resume(struct device *dev)
{
struct cal_dev *cal = dev_get_drvdata(dev);
+ unsigned int i;
if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
/*
* Apply errata on both port everytime we (re-)enable
* the clock
*/
- cal_camerarx_i913_errata(cal->phy[0]);
- cal_camerarx_i913_errata(cal->phy[1]);
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
+ cal_camerarx_i913_errata(cal->phy[i]);
}
+ /*
+ * Enable global interrupts that are not related to a particular
+ * CAMERARAX or context.
+ */
+ cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
+
return 0;
}
diff --git a/drivers/media/platform/ti-vpe/cal.h b/drivers/media/platform/ti-vpe/cal.h
index 4123405ee0cf..d471b7f82519 100644
--- a/drivers/media/platform/ti-vpe/cal.h
+++ b/drivers/media/platform/ti-vpe/cal.h
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/media-device.h>
#include <media/v4l2-async.h>
@@ -24,21 +25,32 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
#include <media/videobuf2-v4l2.h>
#define CAL_MODULE_NAME "cal"
#define CAL_NUM_CONTEXT 2
#define CAL_NUM_CSI2_PORTS 2
-#define MAX_WIDTH_BYTES (8192 * 8)
-#define MAX_HEIGHT_LINES 16383
+/*
+ * The width is limited by the size of the CAL_WR_DMA_XSIZE_j.XSIZE field,
+ * expressed in multiples of 64 bits. The height is limited by the size of the
+ * CAL_CSI2_CTXi_j.CTXi_LINES and CAL_WR_DMA_CTRL_j.YSIZE fields, expressed in
+ * lines.
+ */
+#define CAL_MIN_WIDTH_BYTES 16
+#define CAL_MAX_WIDTH_BYTES (8192 * 8)
+#define CAL_MIN_HEIGHT_LINES 1
+#define CAL_MAX_HEIGHT_LINES 16383
+
+#define CAL_CAMERARX_PAD_SINK 0
+#define CAL_CAMERARX_PAD_SOURCE 1
struct device;
struct device_node;
struct resource;
struct regmap;
struct regmap_fied;
-struct v4l2_subdev;
/* CTRL_CORE_CAMERRX_CONTROL register field id */
enum cal_camerarx_field {
@@ -49,7 +61,14 @@ enum cal_camerarx_field {
F_MAX_FIELDS,
};
-struct cal_fmt {
+enum cal_dma_state {
+ CAL_DMA_RUNNING,
+ CAL_DMA_STOP_REQUESTED,
+ CAL_DMA_STOP_PENDING,
+ CAL_DMA_STOPPED,
+};
+
+struct cal_format_info {
u32 fourcc;
u32 code;
/* Bits per pixel */
@@ -63,8 +82,38 @@ struct cal_buffer {
struct list_head list;
};
+/**
+ * struct cal_dmaqueue - Queue of DMA buffers
+ * @active: Buffer being DMA'ed to for the current frame
+ */
struct cal_dmaqueue {
- struct list_head active;
+ /**
+ * Protects all fields in the cal_dmaqueue.
+ */
+ spinlock_t lock;
+
+ /**
+ * Buffers queued to the driver and waiting for DMA processing.
+ * Buffers are added to the list by the vb2 .buffer_queue() operation,
+ * and move to @pending when they are scheduled for the next frame.
+ */
+ struct list_head queue;
+ /**
+ * Buffer provided to the hardware to DMA the next frame. Will move to
+ * @active at the end of the current frame.
+ */
+ struct cal_buffer *pending;
+ /**
+ * Buffer being DMA'ed to for the current frame. Will be retired and
+ * given back to vb2 at the end of the current frame if a @pending
+ * buffer has been scheduled to replace it.
+ */
+ struct cal_buffer *active;
+
+ /** State of the DMA engine. */
+ enum cal_dma_state state;
+ /** Wait queue to signal a @state transition to CAL_DMA_STOPPED. */
+ struct wait_queue_head wait;
};
struct cal_camerarx_data {
@@ -108,8 +157,14 @@ struct cal_camerarx {
unsigned int instance;
struct v4l2_fwnode_endpoint endpoint;
+ struct device_node *sensor_ep_node;
struct device_node *sensor_node;
struct v4l2_subdev *sensor;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ const struct cal_format_info *fmtinfo;
};
struct cal_dev {
@@ -149,33 +204,22 @@ struct cal_ctx {
/* v4l2_ioctl mutex */
struct mutex mutex;
- /* v4l2 buffers lock */
- spinlock_t slock;
- struct cal_dmaqueue vidq;
+ struct cal_dmaqueue dma;
/* video capture */
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
/* Used to store current pixel format */
- struct v4l2_format v_fmt;
- /* Used to store current mbus frame format */
- struct v4l2_mbus_framefmt m_fmt;
+ struct v4l2_format v_fmt;
/* Current subdev enumerated format */
- const struct cal_fmt **active_fmt;
+ const struct cal_format_info **active_fmt;
unsigned int num_active_fmt;
unsigned int sequence;
struct vb2_queue vb_vidq;
unsigned int index;
unsigned int cport;
-
- /* Pointer pointing to current v4l2_buffer */
- struct cal_buffer *cur_frm;
- /* Pointer pointing to next v4l2_buffer */
- struct cal_buffer *next_frm;
-
- bool dma_act;
};
extern unsigned int cal_debug;
@@ -215,7 +259,7 @@ static inline void cal_write(struct cal_dev *cal, u32 offset, u32 val)
iowrite32(val, cal->base + offset);
}
-static inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask)
+static __always_inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask)
{
return FIELD_GET(mask, cal_read(cal, offset));
}
@@ -239,25 +283,22 @@ static inline void cal_set_field(u32 *valp, u32 field, u32 mask)
*valp = val;
}
+extern const struct cal_format_info cal_formats[];
+extern const unsigned int cal_num_formats;
+const struct cal_format_info *cal_format_by_fourcc(u32 fourcc);
+const struct cal_format_info *cal_format_by_code(u32 code);
+
void cal_quickdump_regs(struct cal_dev *cal);
void cal_camerarx_disable(struct cal_camerarx *phy);
-int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt);
-void cal_camerarx_stop(struct cal_camerarx *phy);
-void cal_camerarx_enable_irqs(struct cal_camerarx *phy);
-void cal_camerarx_disable_irqs(struct cal_camerarx *phy);
-void cal_camerarx_ppi_enable(struct cal_camerarx *phy);
-void cal_camerarx_ppi_disable(struct cal_camerarx *phy);
void cal_camerarx_i913_errata(struct cal_camerarx *phy);
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
unsigned int instance);
void cal_camerarx_destroy(struct cal_camerarx *phy);
-void cal_ctx_csi2_config(struct cal_ctx *ctx);
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx);
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
- unsigned int height);
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr);
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr);
+void cal_ctx_start(struct cal_ctx *ctx);
+void cal_ctx_stop(struct cal_ctx *ctx);
int cal_ctx_v4l2_register(struct cal_ctx *ctx);
void cal_ctx_v4l2_unregister(struct cal_ctx *ctx);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 779dd74b82d0..10251b787674 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1683,7 +1683,6 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
}
}
- memset(pix->reserved, 0, sizeof(pix->reserved));
for (i = 0; i < pix->num_planes; i++) {
plane_fmt = &pix->plane_fmt[i];
depth = fmt->vpdma_fmt[i]->depth;
@@ -1713,7 +1712,6 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
plane_fmt->bytesperline *
depth) >> 3;
}
- memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
}
return 0;
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 53570250a25d..133122e38515 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -370,19 +370,13 @@ static int video_mux_async_register(struct video_mux *vmux,
if (!ep)
continue;
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- fwnode_handle_put(ep);
- return -ENOMEM;
- }
-
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &vmux->notifier, ep, asd);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &vmux->notifier, ep, struct v4l2_async_subdev);
fwnode_handle_put(ep);
- if (ret) {
- kfree(asd);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
/* OK if asd already exists */
if (ret != -EEXIST)
return ret;
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 56c62122a81a..37cf33c7e6ca 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -44,16 +44,16 @@ struct vsp1_uif;
#define VSP1_MAX_UIF 2
#define VSP1_MAX_WPF 4
-#define VSP1_HAS_LUT (1 << 1)
-#define VSP1_HAS_SRU (1 << 2)
-#define VSP1_HAS_BRU (1 << 3)
-#define VSP1_HAS_CLU (1 << 4)
-#define VSP1_HAS_WPF_VFLIP (1 << 5)
-#define VSP1_HAS_WPF_HFLIP (1 << 6)
-#define VSP1_HAS_HGO (1 << 7)
-#define VSP1_HAS_HGT (1 << 8)
-#define VSP1_HAS_BRS (1 << 9)
-#define VSP1_HAS_EXT_DL (1 << 10)
+#define VSP1_HAS_LUT BIT(1)
+#define VSP1_HAS_SRU BIT(2)
+#define VSP1_HAS_BRU BIT(3)
+#define VSP1_HAS_CLU BIT(4)
+#define VSP1_HAS_WPF_VFLIP BIT(5)
+#define VSP1_HAS_WPF_HFLIP BIT(6)
+#define VSP1_HAS_HGO BIT(7)
+#define VSP1_HAS_HGT BIT(8)
+#define VSP1_HAS_BRS BIT(9)
+#define VSP1_HAS_EXT_DL BIT(10)
struct vsp1_device_info {
u32 version;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index dc62533cf32c..aa66e4f5f3f3 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
}
done:
- if (ret)
+ if (ret) {
pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(vsp1->fcp);
+ }
return ret;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index cc2856efea59..bf4015d852e3 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -359,7 +359,7 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
dev_dbg(xdev->dev, "parsing node %p\n", fwnode);
while (1) {
- struct v4l2_async_subdev *asd;
+ struct xvip_graph_entity *xge;
ep = fwnode_graph_get_next_endpoint(fwnode, ep);
if (ep == NULL)
@@ -382,12 +382,12 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
continue;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(
+ xge = v4l2_async_notifier_add_fwnode_subdev(
&xdev->notifier, remote,
- sizeof(struct xvip_graph_entity));
+ struct xvip_graph_entity);
fwnode_handle_put(remote);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
+ if (IS_ERR(xge)) {
+ ret = PTR_ERR(xge);
goto err_notifier_cleanup;
}
}
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index ad2ac16ff12d..c591c0851fa2 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -273,8 +273,8 @@ err_dev_reg:
return res;
}
-static int radio_isa_common_remove(struct radio_isa_card *isa,
- unsigned region_size)
+static void radio_isa_common_remove(struct radio_isa_card *isa,
+ unsigned region_size)
{
const struct radio_isa_ops *ops = isa->drv->ops;
@@ -285,7 +285,6 @@ static int radio_isa_common_remove(struct radio_isa_card *isa,
release_region(isa->io, region_size);
v4l2_info(&isa->v4l2_dev, "Removed radio card %s\n", isa->drv->card);
kfree(isa);
- return 0;
}
int radio_isa_probe(struct device *pdev, unsigned int dev)
@@ -338,11 +337,11 @@ int radio_isa_probe(struct device *pdev, unsigned int dev)
}
EXPORT_SYMBOL_GPL(radio_isa_probe);
-int radio_isa_remove(struct device *pdev, unsigned int dev)
+void radio_isa_remove(struct device *pdev, unsigned int dev)
{
struct radio_isa_card *isa = dev_get_drvdata(pdev);
- return radio_isa_common_remove(isa, isa->drv->region_size);
+ radio_isa_common_remove(isa, isa->drv->region_size);
}
EXPORT_SYMBOL_GPL(radio_isa_remove);
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index 2f0736edfda8..c9159958203e 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -91,7 +91,7 @@ struct radio_isa_driver {
int radio_isa_match(struct device *pdev, unsigned int dev);
int radio_isa_probe(struct device *pdev, unsigned int dev);
-int radio_isa_remove(struct device *pdev, unsigned int dev);
+void radio_isa_remove(struct device *pdev, unsigned int dev);
#ifdef CONFIG_PNP
int radio_isa_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id);
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 0388894cfe41..d0dde55b7930 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -293,11 +293,9 @@ static void fmr2_remove(struct fmr2 *fmr2)
kfree(fmr2);
}
-static int fmr2_isa_remove(struct device *pdev, unsigned int ndev)
+static void fmr2_isa_remove(struct device *pdev, unsigned int ndev)
{
fmr2_remove(dev_get_drvdata(pdev));
-
- return 0;
}
static void fmr2_pnp_remove(struct pnp_dev *pdev)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 2c0ee2e5b446..8a4b4040be45 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -92,6 +92,7 @@ config IR_SONY_DECODER
config IR_SANYO_DECODER
tristate "Enable IR raw decoder for the Sanyo protocol"
depends on RC_CORE
+ select BITREVERSE
help
Enable this option if you have an infrared remote control which
@@ -101,6 +102,7 @@ config IR_SANYO_DECODER
config IR_SHARP_DECODER
tristate "Enable IR raw decoder for the Sharp protocol"
depends on RC_CORE
+ select BITREVERSE
help
Enable this option if you have an infrared remote control which
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index 5c0508f2719f..a80cfcd87a95 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -30,6 +30,7 @@ config IR_IMG_HW
config IR_IMG_NEC
bool "NEC protocol support"
depends on IR_IMG_HW
+ select BITREVERSE
help
Say Y here to enable support for the NEC, extended NEC, and 32-bit
NEC protocols in the ImgTec infrared decoder block.
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index be8f2756a444..1524dc0fc566 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -320,7 +320,7 @@ again:
data->body);
spin_lock(&data->keylock);
if (scancode) {
- delay = nsecs_to_jiffies(dev->timeout) +
+ delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index e0242c9b6aeb..3e729a17b35f 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -491,6 +491,7 @@ static void irtoy_disconnect(struct usb_interface *intf)
static const struct usb_device_id irtoy_table[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xfd08, USB_CLASS_CDC_DATA) },
+ { USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xf58b, USB_CLASS_CDC_DATA) },
{ }
};
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index a905113fef6e..0c6229592e13 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f1dbd059ed08..5642595a057e 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -701,11 +701,18 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
data[0], data[1]);
break;
case MCE_RSP_EQIRCFS:
+ if (!data[0] && !data[1]) {
+ dev_dbg(dev, "%s: no carrier", inout);
+ break;
+ }
+ // prescaler should make sense
+ if (data[0] > 8)
+ break;
period = DIV_ROUND_CLOSEST((1U << data[0] * 2) *
(data[1] + 1), 10);
if (!period)
break;
- carrier = (1000 * 1000) / period;
+ carrier = USEC_PER_SEC / period;
dev_dbg(dev, "%s carrier of %u Hz (period %uus)",
inout, carrier, period);
break;
@@ -1169,7 +1176,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
switch (subcmd) {
/* the one and only 5-byte return value command */
case MCE_RSP_GETPORTSTATUS:
- if (buf_in[5] == 0)
+ if (buf_in[5] == 0 && *hi < 8)
ir->txports_cabled |= 1 << *hi;
break;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1d811e5ffb55..1fd62c1dac76 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
- unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+ unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
- dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+ dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8cc28c92d05d..96ae0294ac10 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
- jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+ jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 8555c7798706..168e1d2c876a 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -86,7 +86,6 @@ struct sunxi_ir_quirks {
};
struct sunxi_ir {
- spinlock_t ir_lock;
struct rc_dev *rc;
void __iomem *base;
int irq;
@@ -105,8 +104,6 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
struct sunxi_ir *ir = dev_id;
struct ir_raw_event rawir = {};
- spin_lock(&ir->ir_lock);
-
status = readl(ir->base + SUNXI_IR_RXSTA_REG);
/* clean all pending statuses */
@@ -137,8 +134,6 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
ir_raw_event_handle(ir->rc);
}
- spin_unlock(&ir->ir_lock);
-
return IRQ_HANDLED;
}
@@ -160,27 +155,102 @@ static int sunxi_ir_set_timeout(struct rc_dev *rc_dev, unsigned int timeout)
{
struct sunxi_ir *ir = rc_dev->priv;
unsigned int base_clk = clk_get_rate(ir->clk);
- unsigned long flags;
unsigned int ithr = sunxi_usec_to_ithr(base_clk, timeout);
dev_dbg(rc_dev->dev.parent, "setting idle threshold to %u\n", ithr);
- spin_lock_irqsave(&ir->ir_lock, flags);
/* Set noise threshold and idle threshold */
writel(REG_CIR_NTHR(SUNXI_IR_RXNOISE) | REG_CIR_ITHR(ithr),
ir->base + SUNXI_IR_CIR_REG);
- spin_unlock_irqrestore(&ir->ir_lock, flags);
rc_dev->timeout = sunxi_ithr_to_usec(base_clk, ithr);
return 0;
}
+static int sunxi_ir_hw_init(struct device *dev)
+{
+ struct sunxi_ir *ir = dev_get_drvdata(dev);
+ u32 tmp;
+ int ret;
+
+ ret = reset_control_deassert(ir->rst);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ir->apb_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable apb clk\n");
+ goto exit_assert_reset;
+ }
+
+ ret = clk_prepare_enable(ir->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ir clk\n");
+ goto exit_disable_apb_clk;
+ }
+
+ /* Enable CIR Mode */
+ writel(REG_CTL_MD, ir->base + SUNXI_IR_CTL_REG);
+
+ /* Set noise threshold and idle threshold */
+ sunxi_ir_set_timeout(ir->rc, ir->rc->timeout);
+
+ /* Invert Input Signal */
+ writel(REG_RXCTL_RPPI, ir->base + SUNXI_IR_RXCTL_REG);
+
+ /* Clear All Rx Interrupt Status */
+ writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
+
+ /*
+ * Enable IRQ on overflow, packet end, FIFO available with trigger
+ * level
+ */
+ writel(REG_RXINT_ROI_EN | REG_RXINT_RPEI_EN |
+ REG_RXINT_RAI_EN | REG_RXINT_RAL(ir->fifo_size / 2 - 1),
+ ir->base + SUNXI_IR_RXINT_REG);
+
+ /* Enable IR Module */
+ tmp = readl(ir->base + SUNXI_IR_CTL_REG);
+ writel(tmp | REG_CTL_GEN | REG_CTL_RXEN, ir->base + SUNXI_IR_CTL_REG);
+
+ return 0;
+
+exit_disable_apb_clk:
+ clk_disable_unprepare(ir->apb_clk);
+exit_assert_reset:
+ reset_control_assert(ir->rst);
+
+ return ret;
+}
+
+static void sunxi_ir_hw_exit(struct device *dev)
+{
+ struct sunxi_ir *ir = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ir->clk);
+ clk_disable_unprepare(ir->apb_clk);
+ reset_control_assert(ir->rst);
+}
+
+static int __maybe_unused sunxi_ir_suspend(struct device *dev)
+{
+ sunxi_ir_hw_exit(dev);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_ir_resume(struct device *dev)
+{
+ return sunxi_ir_hw_init(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(sunxi_ir_pm_ops, sunxi_ir_suspend, sunxi_ir_resume);
+
static int sunxi_ir_probe(struct platform_device *pdev)
{
int ret = 0;
- unsigned long tmp = 0;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
@@ -199,8 +269,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
return -ENODEV;
}
- spin_lock_init(&ir->ir_lock);
-
ir->fifo_size = quirks->fifo_size;
/* Clock */
@@ -223,43 +291,26 @@ static int sunxi_ir_probe(struct platform_device *pdev)
ir->rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(ir->rst))
return PTR_ERR(ir->rst);
- ret = reset_control_deassert(ir->rst);
- if (ret)
- return ret;
}
ret = clk_set_rate(ir->clk, b_clk_freq);
if (ret) {
dev_err(dev, "set ir base clock failed!\n");
- goto exit_reset_assert;
+ return ret;
}
dev_dbg(dev, "set base clock frequency to %d Hz.\n", b_clk_freq);
- if (clk_prepare_enable(ir->apb_clk)) {
- dev_err(dev, "try to enable apb_ir_clk failed\n");
- ret = -EINVAL;
- goto exit_reset_assert;
- }
-
- if (clk_prepare_enable(ir->clk)) {
- dev_err(dev, "try to enable ir_clk failed\n");
- ret = -EINVAL;
- goto exit_clkdisable_apb_clk;
- }
-
/* IO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ir->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ir->base)) {
- ret = PTR_ERR(ir->base);
- goto exit_clkdisable_clk;
+ return PTR_ERR(ir->base);
}
ir->rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate device\n");
- ret = -ENOMEM;
- goto exit_clkdisable_clk;
+ return -ENOMEM;
}
ir->rc->priv = ir;
@@ -275,6 +326,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* Frequency after IR internal divider with sample period in us */
ir->rc->rx_resolution = (USEC_PER_SEC / (b_clk_freq / 64));
+ ir->rc->timeout = IR_DEFAULT_TIMEOUT;
ir->rc->min_timeout = sunxi_ithr_to_usec(b_clk_freq, 0);
ir->rc->max_timeout = sunxi_ithr_to_usec(b_clk_freq, 255);
ir->rc->s_timeout = sunxi_ir_set_timeout;
@@ -301,67 +353,34 @@ static int sunxi_ir_probe(struct platform_device *pdev)
goto exit_free_dev;
}
- /* Enable CIR Mode */
- writel(REG_CTL_MD, ir->base+SUNXI_IR_CTL_REG);
-
- /* Set noise threshold and idle threshold */
- sunxi_ir_set_timeout(ir->rc, IR_DEFAULT_TIMEOUT);
-
- /* Invert Input Signal */
- writel(REG_RXCTL_RPPI, ir->base + SUNXI_IR_RXCTL_REG);
-
- /* Clear All Rx Interrupt Status */
- writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
-
- /*
- * Enable IRQ on overflow, packet end, FIFO available with trigger
- * level
- */
- writel(REG_RXINT_ROI_EN | REG_RXINT_RPEI_EN |
- REG_RXINT_RAI_EN | REG_RXINT_RAL(ir->fifo_size / 2 - 1),
- ir->base + SUNXI_IR_RXINT_REG);
-
- /* Enable IR Module */
- tmp = readl(ir->base + SUNXI_IR_CTL_REG);
- writel(tmp | REG_CTL_GEN | REG_CTL_RXEN, ir->base + SUNXI_IR_CTL_REG);
+ ret = sunxi_ir_hw_init(dev);
+ if (ret)
+ goto exit_free_dev;
dev_info(dev, "initialized sunXi IR driver\n");
return 0;
exit_free_dev:
rc_free_device(ir->rc);
-exit_clkdisable_clk:
- clk_disable_unprepare(ir->clk);
-exit_clkdisable_apb_clk:
- clk_disable_unprepare(ir->apb_clk);
-exit_reset_assert:
- reset_control_assert(ir->rst);
return ret;
}
static int sunxi_ir_remove(struct platform_device *pdev)
{
- unsigned long flags;
struct sunxi_ir *ir = platform_get_drvdata(pdev);
- clk_disable_unprepare(ir->clk);
- clk_disable_unprepare(ir->apb_clk);
- reset_control_assert(ir->rst);
-
- spin_lock_irqsave(&ir->ir_lock, flags);
- /* disable IR IRQ */
- writel(0, ir->base + SUNXI_IR_RXINT_REG);
- /* clear All Rx Interrupt Status */
- writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
- /* disable IR */
- writel(0, ir->base + SUNXI_IR_CTL_REG);
- spin_unlock_irqrestore(&ir->ir_lock, flags);
-
rc_unregister_device(ir->rc);
+ sunxi_ir_hw_exit(&pdev->dev);
+
return 0;
}
+static void sunxi_ir_shutdown(struct platform_device *pdev)
+{
+ sunxi_ir_hw_exit(&pdev->dev);
+}
+
static const struct sunxi_ir_quirks sun4i_a10_ir_quirks = {
.has_reset = false,
.fifo_size = 16,
@@ -397,9 +416,11 @@ MODULE_DEVICE_TABLE(of, sunxi_ir_match);
static struct platform_driver sunxi_ir_driver = {
.probe = sunxi_ir_probe,
.remove = sunxi_ir_remove,
+ .shutdown = sunxi_ir_shutdown,
.driver = {
.name = SUNXI_IR_DEV,
.of_match_table = sunxi_ir_match,
+ .pm = &sunxi_ir_pm_ops,
},
};
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 025f3ff77302..33f1c893c1b6 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -811,9 +811,6 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix_mp->xfer_func = ctx->state.xfer_func;
pix_mp->ycbcr_enc = ctx->state.ycbcr_enc;
pix_mp->quantization = ctx->state.quantization;
- memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
- memset(pix_mp->plane_fmt[0].reserved, 0,
- sizeof(pix_mp->plane_fmt[0].reserved));
break;
default:
return -EINVAL;
@@ -886,8 +883,6 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
info->sizeimage_mult / info->sizeimage_div;
if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT)
plane->sizeimage += sizeof(struct fwht_cframe_hdr);
- memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
- memset(plane->reserved, 0, sizeof(plane->reserved));
break;
default:
return -EINVAL;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index fc64d0c8492a..75617709c8ce 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -17,6 +17,8 @@
#include <linux/time.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <media/dvbdev.h>
+#include <media/media-device.h>
#include "vidtv_bridge.h"
#include "vidtv_common.h"
@@ -414,6 +416,7 @@ static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb)
ret = vidtv_bridge_register_adap(dvb);
if (ret < 0)
goto fail_adapter;
+ dvb_register_media_controller(&dvb->adapter, &dvb->mdev);
for (i = 0; i < NUM_FE; ++i) {
ret = vidtv_bridge_probe_demod(dvb, i);
@@ -493,6 +496,15 @@ static int vidtv_bridge_probe(struct platform_device *pdev)
dvb->pdev = pdev;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ dvb->mdev.dev = &pdev->dev;
+
+ strscpy(dvb->mdev.model, "vidtv", sizeof(dvb->mdev.model));
+ strscpy(dvb->mdev.bus_info, "platform:vidtv", sizeof(dvb->mdev.bus_info));
+
+ media_device_init(&dvb->mdev);
+#endif
+
ret = vidtv_bridge_dvb_init(dvb);
if (ret < 0)
goto err_dvb;
@@ -501,9 +513,22 @@ static int vidtv_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dvb);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ ret = media_device_register(&dvb->mdev);
+ if (ret) {
+ dev_err(dvb->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ goto err_media_device_register;
+ }
+#endif /* CONFIG_MEDIA_CONTROLLER_DVB */
+
dev_info(&pdev->dev, "Successfully initialized vidtv!\n");
return ret;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+err_media_device_register:
+ media_device_cleanup(&dvb->mdev);
+#endif /* CONFIG_MEDIA_CONTROLLER_DVB */
err_dvb:
kfree(dvb);
return ret;
@@ -516,6 +541,11 @@ static int vidtv_bridge_remove(struct platform_device *pdev)
dvb = platform_get_drvdata(pdev);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ media_device_unregister(&dvb->mdev);
+ media_device_cleanup(&dvb->mdev);
+#endif /* CONFIG_MEDIA_CONTROLLER_DVB */
+
mutex_destroy(&dvb->feed_lock);
for (i = 0; i < NUM_FE; ++i) {
@@ -527,6 +557,7 @@ static int vidtv_bridge_remove(struct platform_device *pdev)
dvb_dmxdev_release(&dvb->dmx_dev);
dvb_dmx_release(&dvb->demux);
dvb_unregister_adapter(&dvb->adapter);
+ dev_info(&pdev->dev, "Successfully removed vidtv\n");
return 0;
}
@@ -536,14 +567,13 @@ static void vidtv_bridge_dev_release(struct device *dev)
}
static struct platform_device vidtv_bridge_dev = {
- .name = "vidtv_bridge",
+ .name = VIDTV_PDEV_NAME,
.dev.release = vidtv_bridge_dev_release,
};
static struct platform_driver vidtv_bridge_driver = {
.driver = {
- .name = "vidtv_bridge",
- .suppress_bind_attrs = true,
+ .name = VIDTV_PDEV_NAME,
},
.probe = vidtv_bridge_probe,
.remove = vidtv_bridge_remove,
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.h b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
index 2528adaee27d..de47ce6e7b14 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
@@ -16,6 +16,7 @@
* For now, only one frontend is supported. See vidtv_start_streaming()
*/
#define NUM_FE 1
+#define VIDTV_PDEV_NAME "vidtv"
#include <linux/i2c.h>
#include <linux/platform_device.h>
@@ -24,6 +25,7 @@
#include <media/dmxdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
+#include <media/media-device.h>
#include "vidtv_mux.h"
@@ -42,6 +44,7 @@
* @feed_lock: Protects access to the start/stop stream logic/data.
* @streaming: Whether we are streaming now.
* @mux: The abstraction responsible for delivering MPEG TS packets to the bridge.
+ * @mdev: The media_device struct for media controller support.
*/
struct vidtv_dvb {
struct platform_device *pdev;
@@ -60,6 +63,10 @@ struct vidtv_dvb {
bool streaming;
struct vidtv_mux *mux;
+
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ struct media_device mdev;
+#endif /* CONFIG_MEDIA_CONTROLLER_DVB */
};
#endif // VIDTV_BRIDG_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
index 4511a2a98405..47ed7907db8d 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -506,10 +506,9 @@ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
case REGISTRATION_DESCRIPTOR:
default:
- curr = kzalloc(sizeof(*desc) + desc->length, GFP_KERNEL);
+ curr = kmemdup(desc, sizeof(*desc) + desc->length, GFP_KERNEL);
if (!curr)
return NULL;
- memcpy(curr, desc, sizeof(*desc) + desc->length);
}
if (!curr)
@@ -1164,6 +1163,8 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
struct vidtv_psi_desc *table_descriptor = args->pmt->descriptor;
struct vidtv_psi_table_pmt_stream *stream = args->pmt->stream;
struct vidtv_psi_desc *stream_descriptor;
+ u32 crc = INITIAL_CRC;
+ u32 nbytes = 0;
struct header_write_args h_args = {
.dest_buf = args->buf,
.dest_offset = args->offset,
@@ -1181,6 +1182,7 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
.new_psi_section = false,
.is_crc = false,
.dest_buf_sz = args->buf_sz,
+ .crc = &crc,
};
struct desc_write_args d_args = {
.dest_buf = args->buf,
@@ -1193,8 +1195,6 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
.pid = args->pid,
.dest_buf_sz = args->buf_sz,
};
- u32 crc = INITIAL_CRC;
- u32 nbytes = 0;
vidtv_psi_pmt_table_update_sec_len(args->pmt);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.h b/drivers/media/test-drivers/vidtv/vidtv_ts.h
index f5e8e1f37f05..09b4ffd02829 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_ts.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.h
@@ -44,7 +44,6 @@ struct vidtv_mpeg_ts {
u8 adaptation_field:1;
u8 scrambling:2;
} __packed;
- struct vidtv_mpeg_ts_adaption *adaption;
} __packed;
/**
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 11e3b5617f52..7957eadf3e2b 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -100,6 +100,14 @@
/* General User Controls */
+static void vivid_unregister_dev(bool valid, struct video_device *vdev)
+{
+ if (!valid)
+ return;
+ clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ v4l2_event_wake_all(vdev);
+}
+
static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_gen);
@@ -108,26 +116,16 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
case VIVID_CID_DISCONNECT:
v4l2_info(&dev->v4l2_dev, "disconnect\n");
dev->disconnect_error = true;
- if (dev->has_vid_cap)
- clear_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags);
- if (dev->has_vid_out)
- clear_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags);
- if (dev->has_vbi_cap)
- clear_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags);
- if (dev->has_vbi_out)
- clear_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags);
- if (dev->has_radio_rx)
- clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
- if (dev->has_radio_tx)
- clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
- if (dev->has_sdr_cap)
- clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
- if (dev->has_meta_cap)
- clear_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
- if (dev->has_meta_out)
- clear_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
- if (dev->has_touch_cap)
- clear_bit(V4L2_FL_REGISTERED, &dev->touch_cap_dev.flags);
+ vivid_unregister_dev(dev->has_vid_cap, &dev->vid_cap_dev);
+ vivid_unregister_dev(dev->has_vid_out, &dev->vid_out_dev);
+ vivid_unregister_dev(dev->has_vbi_cap, &dev->vbi_cap_dev);
+ vivid_unregister_dev(dev->has_vbi_out, &dev->vbi_out_dev);
+ vivid_unregister_dev(dev->has_radio_rx, &dev->radio_rx_dev);
+ vivid_unregister_dev(dev->has_radio_tx, &dev->radio_tx_dev);
+ vivid_unregister_dev(dev->has_sdr_cap, &dev->sdr_cap_dev);
+ vivid_unregister_dev(dev->has_meta_cap, &dev->meta_cap_dev);
+ vivid_unregister_dev(dev->has_meta_out, &dev->meta_out_dev);
+ vivid_unregister_dev(dev->has_touch_cap, &dev->touch_cap_dev);
break;
case VIVID_CID_BUTTON:
dev->button_pressed = 30;
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index e8e66390be41..7696a28fe407 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -62,6 +62,7 @@ static int it913x_init(struct dvb_frontend *fe)
break;
default:
dev_err(&pdev->dev, "unknown clock identifier %d\n", utmp);
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 0e26d22f0b26..53aa2558f71e 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
if (val == reg_initval[reg_index][0x00])
break;
}
- if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
+ if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
+ ret = -EINVAL;
goto failed;
+ }
memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
usleep_range(2000, 3000);
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 2fe2b2d335ba..b80661b8375f 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -18,6 +18,7 @@ config VIDEO_CX231XX_RC
bool "Conexant cx231xx Remote Controller additional support"
depends on RC_CORE=y || RC_CORE=VIDEO_CX231XX
depends on VIDEO_CX231XX
+ select BITREVERSE
default y
help
cx231xx hardware has a builtin RX/TX support. However, a few
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index c70b3cef3176..d33514acc2b5 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -51,6 +51,7 @@ static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
if (((req->addr & 0xff00) == 0xff00) ||
((req->addr & 0xff00) == 0xae00))
state->buf[0] = WRITE_VIRTUAL_MEMORY;
+ break;
case WRITE_VIRTUAL_MEMORY:
case COPY_FIRMWARE:
case DOWNLOAD_FIRMWARE:
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 5a7a9522d46d..1b6d4e4c52ca 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -336,6 +336,7 @@ static void lme2510_int_response(struct urb *lme_urb)
st->signal_level = ibuf[5];
st->signal_sn = ibuf[4];
st->time_key = ibuf[7];
+ break;
default:
break;
}
@@ -373,7 +374,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
struct lme2510_state *lme_int = adap_to_priv(adap);
struct usb_host_endpoint *ep;
- lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL);
if (lme_int->lme_urb == NULL)
return -ENOMEM;
@@ -391,9 +392,9 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
- lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
+ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
- usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
info("INT Interrupt Service Started");
return 0;
@@ -751,20 +752,6 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
return fw_lme;
}
-static int lme2510_kill_urb(struct usb_data_stream *stream)
-{
- int i;
-
- for (i = 0; i < stream->urbs_submitted; i++) {
- deb_info(3, "killing URB no. %d.", i);
- /* stop the URB */
- usb_kill_urb(stream->urb_list[i]);
- }
- stream->urbs_submitted = 0;
-
- return 0;
-}
-
static struct tda10086_config tda10086_config = {
.demod_address = 0x0e,
.invert = 0,
@@ -1198,11 +1185,6 @@ static int lme2510_get_rc_config(struct dvb_usb_device *d,
static void lme2510_exit(struct dvb_usb_device *d)
{
struct lme2510_state *st = d->priv;
- struct dvb_usb_adapter *adap = &d->adapter[0];
-
- if (adap != NULL) {
- lme2510_kill_urb(&adap->stream);
- }
if (st->lme_urb) {
usb_kill_urb(st->lme_urb);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 3952cc534b4a..97ed17a141bb 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -944,12 +944,6 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
if (dev->slave_demod) {
struct i2c_board_info info = {};
- /*
- * We continue on reduced mode, without DVB-T2/C, using master
- * demod, when slave demod fails.
- */
- ret = 0;
-
/* attach slave demodulator */
if (dev->slave_demod == SLAVE_DEMOD_MN88472) {
struct mn88472_config mn88472_config = {};
@@ -964,14 +958,11 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.platform_data = &mn88472_config;
request_module(info.type);
client = i2c_new_client_device(&d->i2c_adap, &info);
- if (!i2c_client_has_driver(client)) {
- dev->slave_demod = SLAVE_DEMOD_NONE;
+ if (!i2c_client_has_driver(client))
goto err_slave_demod_failed;
- }
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
- dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -986,14 +977,11 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.platform_data = &mn88473_config;
request_module(info.type);
client = i2c_new_client_device(&d->i2c_adap, &info);
- if (!i2c_client_has_driver(client)) {
- dev->slave_demod = SLAVE_DEMOD_NONE;
+ if (!i2c_client_has_driver(client))
goto err_slave_demod_failed;
- }
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
- dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -1009,10 +997,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe[1] = dvb_attach(cxd2841er_attach_t_c,
&cxd2837er_config,
&d->i2c_adap);
- if (!adap->fe[1]) {
- dev->slave_demod = SLAVE_DEMOD_NONE;
+ if (!adap->fe[1])
goto err_slave_demod_failed;
- }
adap->fe[1]->id = 1;
dev->i2c_client_slave_demod = NULL;
} else {
@@ -1029,14 +1015,11 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.platform_data = &si2168_config;
request_module(info.type);
client = i2c_new_client_device(&d->i2c_adap, &info);
- if (!i2c_client_has_driver(client)) {
- dev->slave_demod = SLAVE_DEMOD_NONE;
+ if (!i2c_client_has_driver(client))
goto err_slave_demod_failed;
- }
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
- dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -1047,10 +1030,18 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
}
}
return 0;
-err_slave_demod_failed:
+
err:
dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
+
+err_slave_demod_failed:
+ /*
+ * We continue on reduced mode, without DVB-T2/C, using master
+ * demod, when slave demod fails.
+ */
+ dev->slave_demod = SLAVE_DEMOD_NONE;
+ return 0;
}
static int rtl28xxu_frontend_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index f2031a933e54..b3c472b8c5a9 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -67,6 +67,7 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MXL692 if MEDIA_SUBDRV_AUTOSELECT
help
This adds support for DVB cards based on the
Empiatech em28xx chips.
@@ -77,5 +78,6 @@ config VIDEO_EM28XX_RC
depends on VIDEO_EM28XX
depends on !(RC_CORE=m && VIDEO_EM28XX=y)
default VIDEO_EM28XX
+ select BITREVERSE
help
Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5144888ae36f..d6c8ae213914 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -549,6 +549,21 @@ static const struct em28xx_reg_seq hauppauge_dualhd_dvb[] = {
{-1, -1, -1, -1},
};
+/* Hauppauge USB QuadHD */
+static struct em28xx_reg_seq hauppauge_usb_quadhd_atsc_reg_seq[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
+ {0x0d, 0xff, 0xff, 200},
+ {0x50, 0x04, 0xff, 300},
+ {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xf0, 100}, /* demod 1 reset */
+ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xd0, 0xf0, 100}, /* demod 2 reset */
+ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100},
+ {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50},
+ {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50},
+ {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50},
+ {-1, -1, -1, -1},
+};
+
/*
* Button definitions
*/
@@ -644,6 +659,22 @@ static struct em28xx_led hauppauge_dualhd_leds[] = {
{-1, 0, 0, 0},
};
+static struct em28xx_led hauppauge_usb_quadhd_leds[] = {
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = EM_GPIO_2,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING_TS2,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = EM_GPIO_0,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -2539,6 +2570,19 @@ const struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
+ /* 2040:826d Hauppauge USB QuadHD
+ * Empia 28274, Max Linear 692 ATSC combo demod/tuner
+ */
+ [EM2874_BOARD_HAUPPAUGE_USB_QUADHD] = {
+ .name = "Hauppauge USB QuadHD ATSC",
+ .def_i2c_bus = 1,
+ .has_dual_ts = 1,
+ .has_dvb = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = hauppauge_usb_quadhd_atsc_reg_seq,
+ .leds = hauppauge_usb_quadhd_leds,
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2672,6 +2716,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
{ USB_DEVICE(0x2040, 0x826d),
.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
+ { USB_DEVICE(0x2040, 0x846d),
+ .driver_info = EM2874_BOARD_HAUPPAUGE_USB_QUADHD },
{ USB_DEVICE(0x0438, 0xb002),
.driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
{ USB_DEVICE(0x2001, 0xf112),
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index e6088b5d1b80..584fa400cd7d 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -698,8 +698,10 @@ int em28xx_capture_start(struct em28xx *dev, int start)
if (dev->mode == EM28XX_ANALOG_MODE)
led = em28xx_find_led(dev, EM28XX_LED_ANALOG_CAPTURING);
- else
+ else if (dev->ts == PRIMARY_TS)
led = em28xx_find_led(dev, EM28XX_LED_DIGITAL_CAPTURING);
+ else
+ led = em28xx_find_led(dev, EM28XX_LED_DIGITAL_CAPTURING_TS2);
if (led)
em28xx_write_reg_bits(dev, led->gpio_reg,
@@ -956,14 +958,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
if (!usb_bufs->buf[i]) {
- em28xx_uninit_usb_xfer(dev, mode);
-
for (i--; i >= 0; i--)
kfree(usb_bufs->buf[i]);
- kfree(usb_bufs->buf);
- usb_bufs->buf = NULL;
-
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index fb9cbfa81a84..526424279637 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -62,6 +62,7 @@
#include "si2157.h"
#include "tc90522.h"
#include "qm1d1c0042.h"
+#include "mxl692.h"
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL v2");
@@ -1459,6 +1460,26 @@ static int em28174_dvb_init_hauppauge_wintv_dualhd_01595(struct em28xx *dev)
return 0;
}
+static int em2874_dvb_init_hauppauge_usb_quadhd(struct em28xx *dev)
+{
+ struct em28xx_dvb *dvb = dev->dvb;
+ struct mxl692_config mxl692_config = {};
+ unsigned char addr;
+
+ /* attach demod/tuner combo */
+ mxl692_config.id = (dev->ts == PRIMARY_TS) ? 0 : 1;
+ mxl692_config.fe = &dvb->fe[0];
+ addr = (dev->ts == PRIMARY_TS) ? 0x60 : 0x63;
+
+ dvb->i2c_client_demod = dvb_module_probe("mxl692", NULL,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ addr, &mxl692_config);
+ if (!dvb->i2c_client_demod)
+ return -ENODEV;
+
+ return 0;
+}
+
static int em28xx_dvb_init(struct em28xx *dev)
{
int result = 0, dvb_alt = 0;
@@ -1945,6 +1966,11 @@ static int em28xx_dvb_init(struct em28xx *dev)
if (result)
goto out_free;
break;
+ case EM2874_BOARD_HAUPPAUGE_USB_QUADHD:
+ result = em2874_dvb_init_hauppauge_usb_quadhd(dev);
+ if (result)
+ goto out_free;
+ break;
default:
dev_err(&dev->intf->dev,
"The frontend of your DVB/ATSC card isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 592b98b3643a..255395959255 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -294,6 +294,10 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
"reading from i2c device at 0x%x failed (error=%i)\n",
addr, ret);
return ret;
+ } else if (ret != len) {
+ dev_dbg(&dev->intf->dev,
+ "%i bytes read from i2c device at 0x%x requested, but %i bytes written\n",
+ ret, addr, len);
}
/*
* NOTE: some devices with two i2c buses have the bad habit to return 0
@@ -329,7 +333,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
}
dev_warn(&dev->intf->dev,
- "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ "read from i2c device at 0x%x failed with unknown error (status=%i)\n",
addr, ret);
return -EIO;
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 55a46faaf7b7..6648e11f1271 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -152,6 +152,7 @@
#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
#define EM28178_BOARD_PCTV_461E_V2 104
#define EM2860_BOARD_MYGICA_IGRABBER 105
+#define EM2874_BOARD_HAUPPAUGE_USB_QUADHD 106
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 61869636ec61..5e3339cc31c0 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -155,16 +155,17 @@ static const struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
-static void *pwc_alloc_urb_buffer(struct device *dev,
+static void *pwc_alloc_urb_buffer(struct usb_device *dev,
size_t size, dma_addr_t *dma_handle)
{
+ struct device *dmadev = dev->bus->sysdev;
void *buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
return NULL;
- *dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *dma_handle)) {
+ *dma_handle = dma_map_single(dmadev, buffer, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dmadev, *dma_handle)) {
kfree(buffer);
return NULL;
}
@@ -172,12 +173,14 @@ static void *pwc_alloc_urb_buffer(struct device *dev,
return buffer;
}
-static void pwc_free_urb_buffer(struct device *dev,
+static void pwc_free_urb_buffer(struct usb_device *dev,
size_t size,
void *buffer,
dma_addr_t dma_handle)
{
- dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE);
+ struct device *dmadev = dev->bus->sysdev;
+
+ dma_unmap_single(dmadev, dma_handle, size, DMA_FROM_DEVICE);
kfree(buffer);
}
@@ -282,6 +285,7 @@ static void pwc_frame_complete(struct pwc_device *pdev)
static void pwc_isoc_handler(struct urb *urb)
{
struct pwc_device *pdev = (struct pwc_device *)urb->context;
+ struct device *dmadev = urb->dev->bus->sysdev;
int i, fst, flen;
unsigned char *iso_buf = NULL;
@@ -328,7 +332,7 @@ static void pwc_isoc_handler(struct urb *urb)
/* Reset ISOC error counter. We did get here, after all. */
pdev->visoc_errors = 0;
- dma_sync_single_for_cpu(&urb->dev->dev,
+ dma_sync_single_for_cpu(dmadev,
urb->transfer_dma,
urb->transfer_buffer_length,
DMA_FROM_DEVICE);
@@ -379,7 +383,7 @@ static void pwc_isoc_handler(struct urb *urb)
pdev->vlast_packet_size = flen;
}
- dma_sync_single_for_device(&urb->dev->dev,
+ dma_sync_single_for_device(dmadev,
urb->transfer_dma,
urb->transfer_buffer_length,
DMA_FROM_DEVICE);
@@ -461,7 +465,7 @@ retry:
urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer_length = ISO_BUFFER_SIZE;
- urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev,
+ urb->transfer_buffer = pwc_alloc_urb_buffer(udev,
urb->transfer_buffer_length,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
@@ -524,7 +528,7 @@ static void pwc_iso_free(struct pwc_device *pdev)
if (urb) {
PWC_DEBUG_MEMORY("Freeing URB\n");
if (urb->transfer_buffer)
- pwc_free_urb_buffer(&urb->dev->dev,
+ pwc_free_urb_buffer(urb->dev,
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 19c90fa9e443..293a460f4616 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
if (ret < 0) {
printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
ret, __func__);
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return ret;
} else
printk(KERN_ERR "tm6000: pipe reset\n");
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 011e69427b7c..b3dde98499f4 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -347,6 +347,14 @@ static const struct uvc_control_info uvc_ctrls[] = {
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
+ {
+ .entity = UVC_GUID_EXT_GPIO_CONTROLLER,
+ .selector = UVC_CT_PRIVACY_CONTROL,
+ .index = 0,
+ .size = 1,
+ .flags = UVC_CTRL_FLAG_GET_CUR
+ | UVC_CTRL_FLAG_AUTO_UPDATE,
+ },
};
static const struct uvc_menu_info power_line_frequency_controls[] = {
@@ -735,6 +743,16 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
+ {
+ .id = V4L2_CID_PRIVACY,
+ .name = "Privacy",
+ .entity = UVC_GUID_EXT_GPIO_CONTROLLER,
+ .selector = UVC_CT_PRIVACY_CONTROL,
+ .size = 1,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
+ .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ },
};
/* ------------------------------------------------------------------------
@@ -826,31 +844,10 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping,
* Terminal and unit management
*/
-static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
-static const u8 uvc_media_transport_input_guid[16] =
- UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
-
static int uvc_entity_match_guid(const struct uvc_entity *entity,
- const u8 guid[16])
+ const u8 guid[16])
{
- switch (UVC_ENTITY_TYPE(entity)) {
- case UVC_ITT_CAMERA:
- return memcmp(uvc_camera_guid, guid, 16) == 0;
-
- case UVC_ITT_MEDIA_TRANSPORT_INPUT:
- return memcmp(uvc_media_transport_input_guid, guid, 16) == 0;
-
- case UVC_VC_PROCESSING_UNIT:
- return memcmp(uvc_processing_guid, guid, 16) == 0;
-
- case UVC_VC_EXTENSION_UNIT:
- return memcmp(entity->extension.guidExtensionCode,
- guid, 16) == 0;
-
- default:
- return 0;
- }
+ return memcmp(entity->guid, guid, sizeof(entity->guid)) == 0;
}
/* ------------------------------------------------------------------------
@@ -909,8 +906,8 @@ static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
}
if (ctrl == NULL && !next)
- uvc_trace(UVC_TRACE_CONTROL, "Control 0x%08x not found.\n",
- v4l2_id);
+ uvc_dbg(chain->dev, CONTROL, "Control 0x%08x not found\n",
+ v4l2_id);
return ctrl;
}
@@ -1001,10 +998,20 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
return -EACCES;
if (!ctrl->loaded) {
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info.selector,
+ if (ctrl->entity->get_cur) {
+ ret = ctrl->entity->get_cur(chain->dev,
+ ctrl->entity,
+ ctrl->info.selector,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
+ ctrl->info.size);
+ } else {
+ ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
+ ctrl->entity->id,
+ chain->dev->intfnum,
+ ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
+ }
if (ret < 0)
return ret;
@@ -1275,17 +1282,12 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
-static void uvc_ctrl_status_event_work(struct work_struct *work)
+void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
{
- struct uvc_device *dev = container_of(work, struct uvc_device,
- async_ctrl.work);
- struct uvc_ctrl_work *w = &dev->async_ctrl;
- struct uvc_video_chain *chain = w->chain;
struct uvc_control_mapping *mapping;
- struct uvc_control *ctrl = w->ctrl;
struct uvc_fh *handle;
unsigned int i;
- int ret;
mutex_lock(&chain->ctrl_mutex);
@@ -1293,7 +1295,7 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
ctrl->handle = NULL;
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
- s32 value = __uvc_ctrl_get_value(mapping, w->data);
+ s32 value = __uvc_ctrl_get_value(mapping, data);
/*
* handle may be NULL here if the device sends auto-update
@@ -1312,17 +1314,27 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
}
mutex_unlock(&chain->ctrl_mutex);
+}
+
+static void uvc_ctrl_status_event_work(struct work_struct *work)
+{
+ struct uvc_device *dev = container_of(work, struct uvc_device,
+ async_ctrl.work);
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+ int ret;
+
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
/* Resubmit the URB. */
w->urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(w->urb, GFP_KERNEL);
if (ret < 0)
- uvc_printk(KERN_ERR, "Failed to resubmit status URB (%d).\n",
- ret);
+ dev_err(&dev->udev->dev,
+ "Failed to resubmit status URB (%d).\n", ret);
}
-bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
- struct uvc_control *ctrl, const u8 *data)
+bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
{
struct uvc_device *dev = chain->dev;
struct uvc_ctrl_work *w = &dev->async_ctrl;
@@ -1708,8 +1720,12 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
if (data == NULL)
return -ENOMEM;
- ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
- info->selector, data, 1);
+ if (ctrl->entity->get_info)
+ ret = ctrl->entity->get_info(dev, ctrl->entity,
+ ctrl->info.selector, data);
+ else
+ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
+ dev->intfnum, info->selector, data, 1);
if (!ret)
info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
UVC_CTRL_FLAG_GET_CUR : 0)
@@ -1776,8 +1792,7 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
if (data == NULL)
return -ENOMEM;
- memcpy(info->entity, ctrl->entity->extension.guidExtensionCode,
- sizeof(info->entity));
+ memcpy(info->entity, ctrl->entity->guid, sizeof(info->entity));
info->index = ctrl->index;
info->selector = ctrl->index + 1;
@@ -1785,9 +1800,9 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum,
info->selector, data, 2);
if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "GET_LEN failed on control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
+ uvc_dbg(dev, CONTROL,
+ "GET_LEN failed on control %pUl/%u (%d)\n",
+ info->entity, info->selector, ret);
goto done;
}
@@ -1798,20 +1813,20 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
ret = uvc_ctrl_get_flags(dev, ctrl, info);
if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Failed to get flags for control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
+ uvc_dbg(dev, CONTROL,
+ "Failed to get flags for control %pUl/%u (%d)\n",
+ info->entity, info->selector, ret);
goto done;
}
uvc_ctrl_fixup_xu_info(dev, ctrl, info);
- uvc_trace(UVC_TRACE_CONTROL, "XU control %pUl/%u queried: len %u, "
- "flags { get %u set %u auto %u }.\n",
- info->entity, info->selector, info->size,
- (info->flags & UVC_CTRL_FLAG_GET_CUR) ? 1 : 0,
- (info->flags & UVC_CTRL_FLAG_SET_CUR) ? 1 : 0,
- (info->flags & UVC_CTRL_FLAG_AUTO_UPDATE) ? 1 : 0);
+ uvc_dbg(dev, CONTROL,
+ "XU control %pUl/%u queried: len %u, flags { get %u set %u auto %u }\n",
+ info->entity, info->selector, info->size,
+ (info->flags & UVC_CTRL_FLAG_GET_CUR) ? 1 : 0,
+ (info->flags & UVC_CTRL_FLAG_SET_CUR) ? 1 : 0,
+ (info->flags & UVC_CTRL_FLAG_AUTO_UPDATE) ? 1 : 0);
done:
kfree(data);
@@ -1836,9 +1851,10 @@ static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
ret = uvc_ctrl_add_info(dev, ctrl, &info);
if (ret < 0)
- uvc_trace(UVC_TRACE_CONTROL, "Failed to initialize control "
- "%pUl/%u on device %s entity %u\n", info.entity,
- info.selector, dev->udev->devpath, ctrl->entity->id);
+ uvc_dbg(dev, CONTROL,
+ "Failed to initialize control %pUl/%u on device %s entity %u\n",
+ info.entity, info.selector, dev->udev->devpath,
+ ctrl->entity->id);
return ret;
}
@@ -1866,7 +1882,7 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
}
if (!found) {
- uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
+ uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n",
xqry->unit);
return -ENOENT;
}
@@ -1882,8 +1898,8 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
}
if (!found) {
- uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u not found.\n",
- entity->extension.guidExtensionCode, xqry->selector);
+ uvc_dbg(chain->dev, CONTROL, "Control %pUl/%u not found\n",
+ entity->guid, xqry->selector);
return -ENOENT;
}
@@ -1995,10 +2011,10 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
-
- printk(KERN_INFO "restoring control %pUl/%u/%u\n",
- ctrl->info.entity, ctrl->info.index,
- ctrl->info.selector);
+ dev_info(&dev->udev->dev,
+ "restoring control %pUl/%u/%u\n",
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
@@ -2031,9 +2047,9 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
ctrl->initialized = 1;
- uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
- "entity %u\n", ctrl->info.entity, ctrl->info.selector,
- dev->udev->devpath, ctrl->entity->id);
+ uvc_dbg(dev, CONTROL, "Added control %pUl/%u to device %s entity %u\n",
+ ctrl->info.entity, ctrl->info.selector, dev->udev->devpath,
+ ctrl->entity->id);
return 0;
}
@@ -2070,8 +2086,7 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
map->set = uvc_set_le_value;
list_add_tail(&map->list, &ctrl->info.mappings);
- uvc_trace(UVC_TRACE_CONTROL,
- "Adding mapping '%s' to control %pUl/%u.\n",
+ uvc_dbg(dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
map->name, ctrl->info.entity, ctrl->info.selector);
return 0;
@@ -2088,9 +2103,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
int ret;
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
- uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control "
- "id 0x%08x is invalid.\n", mapping->name,
- mapping->id);
+ uvc_dbg(dev, CONTROL,
+ "Can't add mapping '%s', control id 0x%08x is invalid\n",
+ mapping->name, mapping->id);
return -EINVAL;
}
@@ -2135,8 +2150,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
- uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
- "control id 0x%08x already exists.\n",
+ uvc_dbg(dev, CONTROL,
+ "Can't add mapping '%s', control id 0x%08x already exists\n",
mapping->name, mapping->id);
ret = -EEXIST;
goto done;
@@ -2146,9 +2161,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
/* Prevent excess memory consumption */
if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) {
atomic_dec(&dev->nmappings);
- uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum "
- "mappings count (%u) exceeded.\n", mapping->name,
- UVC_MAX_CONTROL_MAPPINGS);
+ uvc_dbg(dev, CONTROL,
+ "Can't add mapping '%s', maximum mappings count (%u) exceeded\n",
+ mapping->name, UVC_MAX_CONTROL_MAPPINGS);
ret = -ENOMEM;
goto done;
}
@@ -2217,8 +2232,9 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
!uvc_test_bit(controls, blacklist[i].index))
continue;
- uvc_trace(UVC_TRACE_CONTROL, "%u/%u control is black listed, "
- "removing it.\n", entity->id, blacklist[i].index);
+ uvc_dbg(dev, CONTROL,
+ "%u/%u control is black listed, removing it\n",
+ entity->id, blacklist[i].index);
uvc_clear_bit(controls, blacklist[i].index);
}
@@ -2294,6 +2310,9 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
} else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) {
bmControls = entity->camera.bmControls;
bControlSize = entity->camera.bControlSize;
+ } else if (UVC_ENTITY_TYPE(entity) == UVC_EXT_GPIO_UNIT) {
+ bmControls = entity->gpio.bmControls;
+ bControlSize = entity->gpio.bControlSize;
}
/* Remove bogus/blacklisted controls */
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index ddb9eaa11be7..30ef2a3110f7 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -7,6 +7,7 @@
*/
#include <linux/atomic.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -31,7 +32,7 @@ unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_hw_timestamps_param;
unsigned int uvc_no_drop_param;
static unsigned int uvc_quirks_param = -1;
-unsigned int uvc_trace_param;
+unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
/* ------------------------------------------------------------------------
@@ -519,10 +520,10 @@ static int uvc_parse_format(struct uvc_device *dev,
case UVC_VS_FORMAT_FRAME_BASED:
n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28;
if (buflen < n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d FORMAT error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FORMAT error\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -534,8 +535,8 @@ static int uvc_parse_format(struct uvc_device *dev,
sizeof(format->name));
format->fcc = fmtdesc->fcc;
} else {
- uvc_printk(KERN_INFO, "Unknown video format %pUl\n",
- &buffer[5]);
+ dev_info(&streaming->intf->dev,
+ "Unknown video format %pUl\n", &buffer[5]);
snprintf(format->name, sizeof(format->name), "%pUl\n",
&buffer[5]);
format->fcc = 0;
@@ -583,10 +584,10 @@ static int uvc_parse_format(struct uvc_device *dev,
case UVC_VS_FORMAT_MJPEG:
if (buflen < 11) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d FORMAT error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FORMAT error\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -599,10 +600,10 @@ static int uvc_parse_format(struct uvc_device *dev,
case UVC_VS_FORMAT_DV:
if (buflen < 9) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d FORMAT error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FORMAT error\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -617,10 +618,10 @@ static int uvc_parse_format(struct uvc_device *dev,
strscpy(format->name, "HD-DV", sizeof(format->name));
break;
default:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d: unknown DV format %u\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber, buffer[8]);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d: unknown DV format %u\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber, buffer[8]);
return -EINVAL;
}
@@ -646,14 +647,14 @@ static int uvc_parse_format(struct uvc_device *dev,
case UVC_VS_FORMAT_STREAM_BASED:
/* Not supported yet. */
default:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d unsupported format %u\n",
- dev->udev->devnum, alts->desc.bInterfaceNumber,
- buffer[2]);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d unsupported format %u\n",
+ dev->udev->devnum, alts->desc.bInterfaceNumber,
+ buffer[2]);
return -EINVAL;
}
- uvc_trace(UVC_TRACE_DESCR, "Found format %s.\n", format->name);
+ uvc_dbg(dev, DESCR, "Found format %s\n", format->name);
buflen -= buffer[0];
buffer += buffer[0];
@@ -672,9 +673,10 @@ static int uvc_parse_format(struct uvc_device *dev,
n = n ? n : 3;
if (buflen < 26 + 4*n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d FRAME error\n", dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FRAME error\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -736,10 +738,10 @@ static int uvc_parse_format(struct uvc_device *dev,
frame->dwDefaultFrameInterval;
}
- uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
+ uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
frame->wWidth, frame->wHeight,
- 10000000/frame->dwDefaultFrameInterval,
- (100000000/frame->dwDefaultFrameInterval)%10);
+ 10000000 / frame->dwDefaultFrameInterval,
+ (100000000 / frame->dwDefaultFrameInterval) % 10);
format->nframes++;
buflen -= buffer[0];
@@ -755,10 +757,10 @@ static int uvc_parse_format(struct uvc_device *dev,
if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
buffer[2] == UVC_VS_COLORFORMAT) {
if (buflen < 6) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d COLORFORMAT error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d COLORFORMAT error\n",
+ dev->udev->devnum,
+ alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -790,15 +792,17 @@ static int uvc_parse_streaming(struct uvc_device *dev,
if (intf->cur_altsetting->desc.bInterfaceSubClass
!= UVC_SC_VIDEOSTREAMING) {
- uvc_trace(UVC_TRACE_DESCR, "device %d interface %d isn't a "
- "video streaming interface\n", dev->udev->devnum,
+ uvc_dbg(dev, DESCR,
+ "device %d interface %d isn't a video streaming interface\n",
+ dev->udev->devnum,
intf->altsetting[0].desc.bInterfaceNumber);
return -EINVAL;
}
if (usb_driver_claim_interface(&uvc_driver.driver, intf, dev)) {
- uvc_trace(UVC_TRACE_DESCR, "device %d interface %d is already "
- "claimed\n", dev->udev->devnum,
+ uvc_dbg(dev, DESCR,
+ "device %d interface %d is already claimed\n",
+ dev->udev->devnum,
intf->altsetting[0].desc.bInterfaceNumber);
return -EINVAL;
}
@@ -821,8 +825,9 @@ static int uvc_parse_streaming(struct uvc_device *dev,
if (ep->extralen > 2 &&
ep->extra[1] == USB_DT_CS_INTERFACE) {
- uvc_trace(UVC_TRACE_DESCR, "trying extra data "
- "from endpoint %u.\n", i);
+ uvc_dbg(dev, DESCR,
+ "trying extra data from endpoint %u\n",
+ i);
buffer = alts->endpoint[i].extra;
buflen = alts->endpoint[i].extralen;
break;
@@ -837,8 +842,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
}
if (buflen <= 2) {
- uvc_trace(UVC_TRACE_DESCR, "no class-specific streaming "
- "interface descriptors found.\n");
+ uvc_dbg(dev, DESCR,
+ "no class-specific streaming interface descriptors found\n");
goto error;
}
@@ -855,9 +860,9 @@ static int uvc_parse_streaming(struct uvc_device *dev,
break;
default:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
- "%d HEADER descriptor not found.\n", dev->udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d HEADER descriptor not found\n",
+ dev->udev->devnum, alts->desc.bInterfaceNumber);
goto error;
}
@@ -865,8 +870,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
n = buflen >= size ? buffer[size-1] : 0;
if (buflen < size + p*n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d HEADER descriptor is invalid.\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d HEADER descriptor is invalid\n",
dev->udev->devnum, alts->desc.bInterfaceNumber);
goto error;
}
@@ -917,8 +922,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
case UVC_VS_FORMAT_MPEG2TS:
case UVC_VS_FORMAT_STREAM_BASED:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
- "interface %d FORMAT %u is not supported.\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FORMAT %u is not supported\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber, _buffer[2]);
break;
@@ -942,8 +947,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
}
if (nformats == 0) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
- "%d has no supported formats defined.\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d has no supported formats defined\n",
dev->udev->devnum, alts->desc.bInterfaceNumber);
goto error;
}
@@ -991,8 +996,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
}
if (buflen)
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
- "%d has %u bytes of trailing descriptor garbage.\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d has %u bytes of trailing descriptor garbage\n",
dev->udev->devnum, alts->desc.bInterfaceNumber, buflen);
/* Parse the alternate settings to find the maximum bandwidth. */
@@ -1019,7 +1024,13 @@ error:
return ret;
}
-static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
+static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
+static const u8 uvc_gpio_guid[16] = UVC_GUID_EXT_GPIO_CONTROLLER;
+static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+
+static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
unsigned int num_pads, unsigned int extra_size)
{
struct uvc_entity *entity;
@@ -1028,7 +1039,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
unsigned int i;
extra_size = roundup(extra_size, sizeof(*entity->pads));
- num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+ else
+ num_inputs = 0;
size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
@@ -1038,13 +1052,32 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
entity->id = id;
entity->type = type;
+ /*
+ * Set the GUID for standard entity types. For extension units, the GUID
+ * is initialized by the caller.
+ */
+ switch (type) {
+ case UVC_EXT_GPIO_UNIT:
+ memcpy(entity->guid, uvc_gpio_guid, 16);
+ break;
+ case UVC_ITT_CAMERA:
+ memcpy(entity->guid, uvc_camera_guid, 16);
+ break;
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
+ memcpy(entity->guid, uvc_media_transport_input_guid, 16);
+ break;
+ case UVC_VC_PROCESSING_UNIT:
+ memcpy(entity->guid, uvc_processing_guid, 16);
+ break;
+ }
+
entity->num_links = 0;
entity->num_pads = num_pads;
entity->pads = ((void *)(entity + 1)) + extra_size;
for (i = 0; i < num_inputs; ++i)
entity->pads[i].flags = MEDIA_PAD_FL_SINK;
- if (!UVC_ENTITY_IS_OTERM(entity))
+ if (!UVC_ENTITY_IS_OTERM(entity) && num_pads)
entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
entity->bNrInPins = num_inputs;
@@ -1098,8 +1131,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
n = buflen >= 25 + p ? buffer[22+p] : 0;
if (buflen < 25 + p + 2*n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d EXTENSION_UNIT error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d EXTENSION_UNIT error\n",
udev->devnum, alts->desc.bInterfaceNumber);
break;
}
@@ -1109,7 +1142,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
if (unit == NULL)
return -ENOMEM;
- memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
+ memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
memcpy(unit->baSourceID, &buffer[22], p);
unit->extension.bControlSize = buffer[22+p];
@@ -1147,9 +1180,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
n = buflen >= 12 ? buffer[11] : 0;
if (buflen < 12 + n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d HEADER error\n", udev->devnum,
- alts->desc.bInterfaceNumber);
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d HEADER error\n",
+ udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1160,8 +1193,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
for (i = 0; i < n; ++i) {
intf = usb_ifnum_to_if(udev, buffer[12+i]);
if (intf == NULL) {
- uvc_trace(UVC_TRACE_DESCR, "device %d "
- "interface %d doesn't exists\n",
+ uvc_dbg(dev, DESCR,
+ "device %d interface %d doesn't exists\n",
udev->devnum, i);
continue;
}
@@ -1172,8 +1205,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
case UVC_VC_INPUT_TERMINAL:
if (buflen < 8) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d INPUT_TERMINAL error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d INPUT_TERMINAL error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1191,10 +1224,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
*/
type = get_unaligned_le16(&buffer[4]);
if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d INPUT_TERMINAL %d has invalid "
- "type 0x%04x, skipping\n", udev->devnum,
- alts->desc.bInterfaceNumber,
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d INPUT_TERMINAL %d has invalid type 0x%04x, skipping\n",
+ udev->devnum, alts->desc.bInterfaceNumber,
buffer[3], type);
return 0;
}
@@ -1214,8 +1246,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
}
if (buflen < len + n + p) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d INPUT_TERMINAL error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d INPUT_TERMINAL error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1261,8 +1293,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
case UVC_VC_OUTPUT_TERMINAL:
if (buflen < 9) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d OUTPUT_TERMINAL error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d OUTPUT_TERMINAL error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1272,10 +1304,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
*/
type = get_unaligned_le16(&buffer[4]);
if ((type & 0xff00) == 0) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d OUTPUT_TERMINAL %d has invalid "
- "type 0x%04x, skipping\n", udev->devnum,
- alts->desc.bInterfaceNumber, buffer[3], type);
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d OUTPUT_TERMINAL %d has invalid type 0x%04x, skipping\n",
+ udev->devnum, alts->desc.bInterfaceNumber,
+ buffer[3], type);
return 0;
}
@@ -1299,8 +1331,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
p = buflen >= 5 ? buffer[4] : 0;
if (buflen < 5 || buflen < 6 + p) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d SELECTOR_UNIT error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d SELECTOR_UNIT error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1325,8 +1357,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
p = dev->uvc_version >= 0x0110 ? 10 : 9;
if (buflen < p + n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d PROCESSING_UNIT error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d PROCESSING_UNIT error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1358,8 +1390,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
n = buflen >= 24 + p ? buffer[22+p] : 0;
if (buflen < 24 + p + n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
- "interface %d EXTENSION_UNIT error\n",
+ uvc_dbg(dev, DESCR,
+ "device %d videocontrol interface %d EXTENSION_UNIT error\n",
udev->devnum, alts->desc.bInterfaceNumber);
return -EINVAL;
}
@@ -1368,7 +1400,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
if (unit == NULL)
return -ENOMEM;
- memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
+ memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
memcpy(unit->baSourceID, &buffer[22], p);
unit->extension.bControlSize = buffer[22+p];
@@ -1385,8 +1417,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
break;
default:
- uvc_trace(UVC_TRACE_DESCR, "Found an unknown CS_INTERFACE "
- "descriptor (%u)\n", buffer[2]);
+ uvc_dbg(dev, DESCR,
+ "Found an unknown CS_INTERFACE descriptor (%u)\n",
+ buffer[2]);
break;
}
@@ -1431,8 +1464,9 @@ next_descriptor:
if (usb_endpoint_is_int_in(desc) &&
le16_to_cpu(desc->wMaxPacketSize) >= 8 &&
desc->bInterval != 0) {
- uvc_trace(UVC_TRACE_DESCR, "Found a Status endpoint "
- "(addr %02x).\n", desc->bEndpointAddress);
+ uvc_dbg(dev, DESCR,
+ "Found a Status endpoint (addr %02x)\n",
+ desc->bEndpointAddress);
dev->int_ep = ep;
}
}
@@ -1440,6 +1474,108 @@ next_descriptor:
return 0;
}
+/* -----------------------------------------------------------------------------
+ * Privacy GPIO
+ */
+
+static void uvc_gpio_event(struct uvc_device *dev)
+{
+ struct uvc_entity *unit = dev->gpio_unit;
+ struct uvc_video_chain *chain;
+ u8 new_val;
+
+ if (!unit)
+ return;
+
+ new_val = gpiod_get_value_cansleep(unit->gpio.gpio_privacy);
+
+ /* GPIO entities are always on the first chain. */
+ chain = list_first_entry(&dev->chains, struct uvc_video_chain, list);
+ uvc_ctrl_status_event(chain, unit->controls, &new_val);
+}
+
+static int uvc_gpio_get_cur(struct uvc_device *dev, struct uvc_entity *entity,
+ u8 cs, void *data, u16 size)
+{
+ if (cs != UVC_CT_PRIVACY_CONTROL || size < 1)
+ return -EINVAL;
+
+ *(u8 *)data = gpiod_get_value_cansleep(entity->gpio.gpio_privacy);
+
+ return 0;
+}
+
+static int uvc_gpio_get_info(struct uvc_device *dev, struct uvc_entity *entity,
+ u8 cs, u8 *caps)
+{
+ if (cs != UVC_CT_PRIVACY_CONTROL)
+ return -EINVAL;
+
+ *caps = UVC_CONTROL_CAP_GET | UVC_CONTROL_CAP_AUTOUPDATE;
+ return 0;
+}
+
+static irqreturn_t uvc_gpio_irq(int irq, void *data)
+{
+ struct uvc_device *dev = data;
+
+ uvc_gpio_event(dev);
+ return IRQ_HANDLED;
+}
+
+static int uvc_gpio_parse(struct uvc_device *dev)
+{
+ struct uvc_entity *unit;
+ struct gpio_desc *gpio_privacy;
+ int irq;
+
+ gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
+ GPIOD_IN);
+ if (IS_ERR_OR_NULL(gpio_privacy))
+ return PTR_ERR_OR_ZERO(gpio_privacy);
+
+ unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (!unit)
+ return -ENOMEM;
+
+ irq = gpiod_to_irq(gpio_privacy);
+ if (irq < 0) {
+ if (irq != EPROBE_DEFER)
+ dev_err(&dev->udev->dev,
+ "No IRQ for privacy GPIO (%d)\n", irq);
+ return irq;
+ }
+
+ unit->gpio.gpio_privacy = gpio_privacy;
+ unit->gpio.irq = irq;
+ unit->gpio.bControlSize = 1;
+ unit->gpio.bmControls = (u8 *)unit + sizeof(*unit);
+ unit->gpio.bmControls[0] = 1;
+ unit->get_cur = uvc_gpio_get_cur;
+ unit->get_info = uvc_gpio_get_info;
+ strscpy(unit->name, "GPIO", sizeof(unit->name));
+
+ list_add_tail(&unit->list, &dev->entities);
+
+ dev->gpio_unit = unit;
+
+ return 0;
+}
+
+static int uvc_gpio_init_irq(struct uvc_device *dev)
+{
+ struct uvc_entity *unit = dev->gpio_unit;
+
+ if (!unit || unit->gpio.irq < 0)
+ return 0;
+
+ return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
+ uvc_gpio_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "uvc_privacy_gpio", dev);
+}
+
/* ------------------------------------------------------------------------
* UVC device scan
*/
@@ -1475,24 +1611,23 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
{
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_VC_EXTENSION_UNIT:
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- XU %d", entity->id);
+ uvc_dbg_cont(PROBE, " <- XU %d", entity->id);
if (entity->bNrInPins != 1) {
- uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has more "
- "than 1 input pin.\n", entity->id);
+ uvc_dbg(chain->dev, DESCR,
+ "Extension unit %d has more than 1 input pin\n",
+ entity->id);
return -1;
}
break;
case UVC_VC_PROCESSING_UNIT:
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- PU %d", entity->id);
+ uvc_dbg_cont(PROBE, " <- PU %d", entity->id);
if (chain->processing != NULL) {
- uvc_trace(UVC_TRACE_DESCR, "Found multiple "
- "Processing Units in chain.\n");
+ uvc_dbg(chain->dev, DESCR,
+ "Found multiple Processing Units in chain\n");
return -1;
}
@@ -1500,16 +1635,15 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
break;
case UVC_VC_SELECTOR_UNIT:
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- SU %d", entity->id);
+ uvc_dbg_cont(PROBE, " <- SU %d", entity->id);
/* Single-input selector units are ignored. */
if (entity->bNrInPins == 1)
break;
if (chain->selector != NULL) {
- uvc_trace(UVC_TRACE_DESCR, "Found multiple Selector "
- "Units in chain.\n");
+ uvc_dbg(chain->dev, DESCR,
+ "Found multiple Selector Units in chain\n");
return -1;
}
@@ -1519,33 +1653,29 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_ITT_VENDOR_SPECIFIC:
case UVC_ITT_CAMERA:
case UVC_ITT_MEDIA_TRANSPORT_INPUT:
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- IT %d\n", entity->id);
+ uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id);
break;
case UVC_OTT_VENDOR_SPECIFIC:
case UVC_OTT_DISPLAY:
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " OT %d", entity->id);
+ uvc_dbg_cont(PROBE, " OT %d", entity->id);
break;
case UVC_TT_STREAMING:
- if (UVC_ENTITY_IS_ITERM(entity)) {
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- IT %d\n", entity->id);
- } else {
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " OT %d", entity->id);
- }
+ if (UVC_ENTITY_IS_ITERM(entity))
+ uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id);
+ else
+ uvc_dbg_cont(PROBE, " OT %d", entity->id);
break;
default:
- uvc_trace(UVC_TRACE_DESCR, "Unsupported entity type "
- "0x%04x found in chain.\n", UVC_ENTITY_TYPE(entity));
+ uvc_dbg(chain->dev, DESCR,
+ "Unsupported entity type 0x%04x found in chain\n",
+ UVC_ENTITY_TYPE(entity));
return -1;
}
@@ -1571,28 +1701,27 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
if (forward == prev)
continue;
if (forward->chain.next || forward->chain.prev) {
- uvc_trace(UVC_TRACE_DESCR, "Found reference to "
- "entity %d already in chain.\n", forward->id);
+ uvc_dbg(chain->dev, DESCR,
+ "Found reference to entity %d already in chain\n",
+ forward->id);
return -EINVAL;
}
switch (UVC_ENTITY_TYPE(forward)) {
case UVC_VC_EXTENSION_UNIT:
if (forward->bNrInPins != 1) {
- uvc_trace(UVC_TRACE_DESCR, "Extension unit %d "
- "has more than 1 input pin.\n",
- entity->id);
+ uvc_dbg(chain->dev, DESCR,
+ "Extension unit %d has more than 1 input pin\n",
+ entity->id);
return -EINVAL;
}
list_add_tail(&forward->chain, &chain->entities);
- if (uvc_trace_param & UVC_TRACE_PROBE) {
- if (!found)
- printk(KERN_CONT " (->");
+ if (!found)
+ uvc_dbg_cont(PROBE, " (->");
- printk(KERN_CONT " XU %d", forward->id);
- found = 1;
- }
+ uvc_dbg_cont(PROBE, " XU %d", forward->id);
+ found = 1;
break;
case UVC_OTT_VENDOR_SPECIFIC:
@@ -1600,24 +1729,23 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
case UVC_TT_STREAMING:
if (UVC_ENTITY_IS_ITERM(forward)) {
- uvc_trace(UVC_TRACE_DESCR, "Unsupported input "
- "terminal %u.\n", forward->id);
+ uvc_dbg(chain->dev, DESCR,
+ "Unsupported input terminal %u\n",
+ forward->id);
return -EINVAL;
}
list_add_tail(&forward->chain, &chain->entities);
- if (uvc_trace_param & UVC_TRACE_PROBE) {
- if (!found)
- printk(KERN_CONT " (->");
+ if (!found)
+ uvc_dbg_cont(PROBE, " (->");
- printk(KERN_CONT " OT %d", forward->id);
- found = 1;
- }
+ uvc_dbg_cont(PROBE, " OT %d", forward->id);
+ found = 1;
break;
}
}
if (found)
- printk(KERN_CONT ")");
+ uvc_dbg_cont(PROBE, ")");
return 0;
}
@@ -1642,36 +1770,33 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
break;
}
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " <- IT");
+ uvc_dbg_cont(PROBE, " <- IT");
chain->selector = entity;
for (i = 0; i < entity->bNrInPins; ++i) {
id = entity->baSourceID[i];
term = uvc_entity_by_id(chain->dev, id);
if (term == NULL || !UVC_ENTITY_IS_ITERM(term)) {
- uvc_trace(UVC_TRACE_DESCR, "Selector unit %d "
- "input %d isn't connected to an "
- "input terminal\n", entity->id, i);
+ uvc_dbg(chain->dev, DESCR,
+ "Selector unit %d input %d isn't connected to an input terminal\n",
+ entity->id, i);
return -1;
}
if (term->chain.next || term->chain.prev) {
- uvc_trace(UVC_TRACE_DESCR, "Found reference to "
- "entity %d already in chain.\n",
+ uvc_dbg(chain->dev, DESCR,
+ "Found reference to entity %d already in chain\n",
term->id);
return -EINVAL;
}
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT " %d", term->id);
+ uvc_dbg_cont(PROBE, " %d", term->id);
list_add_tail(&term->chain, &chain->entities);
uvc_scan_chain_forward(chain, term, entity);
}
- if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(KERN_CONT "\n");
+ uvc_dbg_cont(PROBE, "\n");
id = 0;
break;
@@ -1694,8 +1819,8 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
entity = uvc_entity_by_id(chain->dev, id);
if (entity == NULL) {
- uvc_trace(UVC_TRACE_DESCR, "Found reference to "
- "unknown entity %d.\n", id);
+ uvc_dbg(chain->dev, DESCR,
+ "Found reference to unknown entity %d\n", id);
return -EINVAL;
}
@@ -1708,7 +1833,7 @@ static int uvc_scan_chain(struct uvc_video_chain *chain,
{
struct uvc_entity *entity, *prev;
- uvc_trace(UVC_TRACE_PROBE, "Scanning UVC chain:");
+ uvc_dbg(chain->dev, PROBE, "Scanning UVC chain:");
entity = term;
prev = NULL;
@@ -1716,8 +1841,9 @@ static int uvc_scan_chain(struct uvc_video_chain *chain,
while (entity != NULL) {
/* Entity must not be part of an existing chain */
if (entity->chain.next || entity->chain.prev) {
- uvc_trace(UVC_TRACE_DESCR, "Found reference to "
- "entity %d already in chain.\n", entity->id);
+ uvc_dbg(chain->dev, DESCR,
+ "Found reference to entity %d already in chain\n",
+ entity->id);
return -EINVAL;
}
@@ -1871,9 +1997,8 @@ static int uvc_scan_fallback(struct uvc_device *dev)
list_add_tail(&chain->list, &dev->chains);
- uvc_trace(UVC_TRACE_PROBE,
- "Found a video chain by fallback heuristic (%s).\n",
- uvc_print_chain(chain));
+ uvc_dbg(dev, PROBE, "Found a video chain by fallback heuristic (%s)\n",
+ uvc_print_chain(chain));
return 0;
@@ -1915,8 +2040,8 @@ static int uvc_scan_device(struct uvc_device *dev)
continue;
}
- uvc_trace(UVC_TRACE_PROBE, "Found a valid video chain (%s).\n",
- uvc_print_chain(chain));
+ uvc_dbg(dev, PROBE, "Found a valid video chain (%s)\n",
+ uvc_print_chain(chain));
list_add_tail(&chain->list, &dev->chains);
}
@@ -1925,10 +2050,17 @@ static int uvc_scan_device(struct uvc_device *dev)
uvc_scan_fallback(dev);
if (list_empty(&dev->chains)) {
- uvc_printk(KERN_INFO, "No valid video chain found.\n");
+ dev_info(&dev->udev->dev, "No valid video chain found.\n");
return -1;
}
+ /* Add GPIO entity to the first chain. */
+ if (dev->gpio_unit) {
+ chain = list_first_entry(&dev->chains,
+ struct uvc_video_chain, list);
+ list_add_tail(&dev->gpio_unit->chain, &chain->entities);
+ }
+
return 0;
}
@@ -2077,8 +2209,9 @@ int uvc_register_video_device(struct uvc_device *dev,
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
- uvc_printk(KERN_ERR, "Failed to register %s device (%d).\n",
- v4l2_type_names[type], ret);
+ dev_err(&stream->intf->dev,
+ "Failed to register %s device (%d).\n",
+ v4l2_type_names[type], ret);
return ret;
}
@@ -2094,8 +2227,8 @@ static int uvc_register_video(struct uvc_device *dev,
/* Initialize the streaming interface with default parameters. */
ret = uvc_video_init(stream);
if (ret < 0) {
- uvc_printk(KERN_ERR, "Failed to initialize the device (%d).\n",
- ret);
+ dev_err(&stream->intf->dev,
+ "Failed to initialize the device (%d).\n", ret);
return ret;
}
@@ -2129,8 +2262,9 @@ static int uvc_register_terms(struct uvc_device *dev,
stream = uvc_stream_by_id(dev, term->id);
if (stream == NULL) {
- uvc_printk(KERN_INFO, "No streaming interface found "
- "for terminal %u.", term->id);
+ dev_info(&dev->udev->dev,
+ "No streaming interface found for terminal %u.",
+ term->id);
continue;
}
@@ -2163,8 +2297,8 @@ static int uvc_register_chains(struct uvc_device *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
ret = uvc_mc_register_entities(chain);
if (ret < 0)
- uvc_printk(KERN_INFO,
- "Failed to register entities (%d).\n", ret);
+ dev_info(&dev->udev->dev,
+ "Failed to register entities (%d).\n", ret);
#endif
}
@@ -2187,14 +2321,6 @@ static int uvc_probe(struct usb_interface *intf,
int function;
int ret;
- if (id->idVendor && id->idProduct)
- uvc_trace(UVC_TRACE_PROBE, "Probing known UVC device %s "
- "(%04x:%04x)\n", udev->devpath, id->idVendor,
- id->idProduct);
- else
- uvc_trace(UVC_TRACE_PROBE, "Probing generic UVC device %s\n",
- udev->devpath);
-
/* Allocate memory for the device and initialize it. */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
@@ -2214,6 +2340,13 @@ static int uvc_probe(struct usb_interface *intf,
dev->quirks = uvc_quirks_param == -1
? dev->info->quirks : uvc_quirks_param;
+ if (id->idVendor && id->idProduct)
+ uvc_dbg(dev, PROBE, "Probing known UVC device %s (%04x:%04x)\n",
+ udev->devpath, id->idVendor, id->idProduct);
+ else
+ uvc_dbg(dev, PROBE, "Probing generic UVC device %s\n",
+ udev->devpath);
+
if (udev->product != NULL)
strscpy(dev->name, udev->product, sizeof(dev->name));
else
@@ -2256,22 +2389,34 @@ static int uvc_probe(struct usb_interface *intf,
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
- uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
- "descriptors.\n");
+ uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n");
+ goto error;
+ }
+
+ /* Parse the associated GPIOs. */
+ if (uvc_gpio_parse(dev) < 0) {
+ uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n");
goto error;
}
- uvc_printk(KERN_INFO, "Found UVC %u.%02x device %s (%04x:%04x)\n",
- dev->uvc_version >> 8, dev->uvc_version & 0xff,
- udev->product ? udev->product : "<unnamed>",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
+ dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
+ dev->uvc_version >> 8, dev->uvc_version & 0xff,
+ udev->product ? udev->product : "<unnamed>",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
if (dev->quirks != dev->info->quirks) {
- uvc_printk(KERN_INFO, "Forcing device quirks to 0x%x by module "
- "parameter for testing purpose.\n", dev->quirks);
- uvc_printk(KERN_INFO, "Please report required quirks to the "
- "linux-uvc-devel mailing list.\n");
+ dev_info(&dev->udev->dev,
+ "Forcing device quirks to 0x%x by module parameter for testing purpose.\n",
+ dev->quirks);
+ dev_info(&dev->udev->dev,
+ "Please report required quirks to the linux-uvc-devel mailing list.\n");
+ }
+
+ if (dev->info->uvc_version) {
+ dev->uvc_version = dev->info->uvc_version;
+ dev_info(&dev->udev->dev, "Forcing UVC version to %u.%02x\n",
+ dev->uvc_version >> 8, dev->uvc_version & 0xff);
}
/* Register the V4L2 device. */
@@ -2300,12 +2445,19 @@ static int uvc_probe(struct usb_interface *intf,
/* Initialize the interrupt URB. */
if ((ret = uvc_status_init(dev)) < 0) {
- uvc_printk(KERN_INFO, "Unable to initialize the status "
- "endpoint (%d), status interrupt will not be "
- "supported.\n", ret);
+ dev_info(&dev->udev->dev,
+ "Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n",
+ ret);
}
- uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ ret = uvc_gpio_init_irq(dev);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev,
+ "Unable to request privacy GPIO IRQ (%d)\n", ret);
+ goto error;
+ }
+
+ uvc_dbg(dev, PROBE, "UVC device initialized\n");
usb_enable_autosuspend(udev);
return 0;
@@ -2337,7 +2489,7 @@ static int uvc_suspend(struct usb_interface *intf, pm_message_t message)
struct uvc_device *dev = usb_get_intfdata(intf);
struct uvc_streaming *stream;
- uvc_trace(UVC_TRACE_SUSPEND, "Suspending interface %u\n",
+ uvc_dbg(dev, SUSPEND, "Suspending interface %u\n",
intf->cur_altsetting->desc.bInterfaceNumber);
/* Controls are cached on the fly so they don't need to be saved. */
@@ -2355,8 +2507,8 @@ static int uvc_suspend(struct usb_interface *intf, pm_message_t message)
return uvc_video_suspend(stream);
}
- uvc_trace(UVC_TRACE_SUSPEND, "Suspend: video streaming USB interface "
- "mismatch.\n");
+ uvc_dbg(dev, SUSPEND,
+ "Suspend: video streaming USB interface mismatch\n");
return -EINVAL;
}
@@ -2366,7 +2518,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
struct uvc_streaming *stream;
int ret = 0;
- uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n",
+ uvc_dbg(dev, SUSPEND, "Resuming interface %u\n",
intf->cur_altsetting->desc.bInterfaceNumber);
if (intf->cur_altsetting->desc.bInterfaceSubClass ==
@@ -2395,8 +2547,8 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
}
}
- uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
- "mismatch.\n");
+ uvc_dbg(dev, SUSPEND,
+ "Resume: video streaming USB interface mismatch\n");
return -EINVAL;
}
@@ -2446,7 +2598,7 @@ module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(quirks, "Forced device quirks");
-module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(trace, uvc_dbg_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(trace, "Trace level bitmask");
module_param_named(timeout, uvc_timeout_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
@@ -2923,6 +3075,17 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Shenzhen Aoni Electronic Co.,Ltd 2K FHD camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1bcf,
+ .idProduct = 0x0b40,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
+ .uvc_version = 0x010a,
+ } },
/* SiGma Micro USB Web Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3002,7 +3165,6 @@ static int __init uvc_init(void)
return ret;
}
- printk(KERN_INFO DRIVER_DESC " (" DRIVER_VERSION ")\n");
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index ca3a9c2eec27..7c4d2f93d351 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -105,6 +105,7 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
case UVC_OTT_DISPLAY:
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
case UVC_EXTERNAL_VENDOR_SPECIFIC:
+ case UVC_EXT_GPIO_UNIT:
default:
function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
break;
@@ -139,8 +140,9 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_init_entity(chain, entity);
if (ret < 0) {
- uvc_printk(KERN_INFO, "Failed to initialize entity for "
- "entity %u\n", entity->id);
+ dev_info(&chain->dev->udev->dev,
+ "Failed to initialize entity for entity %u\n",
+ entity->id);
return ret;
}
}
@@ -148,8 +150,9 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_create_links(chain, entity);
if (ret < 0) {
- uvc_printk(KERN_INFO, "Failed to create links for "
- "entity %u\n", entity->id);
+ dev_info(&chain->dev->udev->dev,
+ "Failed to create links for entity %u\n",
+ entity->id);
return ret;
}
}
diff --git a/drivers/media/usb/uvc/uvc_isight.c b/drivers/media/usb/uvc/uvc_isight.c
index 135fd7fe6852..2578d6ee4829 100644
--- a/drivers/media/usb/uvc/uvc_isight.c
+++ b/drivers/media/usb/uvc/uvc_isight.c
@@ -40,6 +40,7 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
0xde, 0xad, 0xfa, 0xce
};
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
unsigned int maxlen, nbytes;
u8 *mem;
int is_header = 0;
@@ -49,15 +50,15 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) ||
(len >= 15 && memcmp(&data[3], hdr, 12) == 0)) {
- uvc_trace(UVC_TRACE_FRAME, "iSight header found\n");
+ uvc_dbg(stream->dev, FRAME, "iSight header found\n");
is_header = 1;
}
/* Synchronize to the input stream by waiting for a header packet. */
if (buf->state != UVC_BUF_STATE_ACTIVE) {
if (!is_header) {
- uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of "
- "sync).\n");
+ uvc_dbg(stream->dev, FRAME,
+ "Dropping packet (out of sync)\n");
return 0;
}
@@ -85,8 +86,8 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
buf->bytesused += nbytes;
if (len > maxlen || buf->bytesused == buf->length) {
- uvc_trace(UVC_TRACE_FRAME, "Frame complete "
- "(overflow).\n");
+ uvc_dbg(stream->dev, FRAME,
+ "Frame complete (overflow)\n");
buf->state = UVC_BUF_STATE_DONE;
}
}
@@ -103,9 +104,9 @@ void uvc_video_decode_isight(struct uvc_urb *uvc_urb, struct uvc_buffer *buf,
for (i = 0; i < urb->number_of_packets; ++i) {
if (urb->iso_frame_desc[i].status < 0) {
- uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
- "lost (%d).\n",
- urb->iso_frame_desc[i].status);
+ uvc_dbg(stream->dev, FRAME,
+ "USB isochronous frame lost (%d)\n",
+ urb->iso_frame_desc[i].status);
}
/* Decode the payload packet.
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index cd60c6c1749e..21a907d32bb7 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -32,12 +32,6 @@
* the driver.
*/
-static inline struct uvc_streaming *
-uvc_queue_to_stream(struct uvc_video_queue *queue)
-{
- return container_of(queue, struct uvc_streaming, queue);
-}
-
static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
{
return container_of(buf, struct uvc_buffer, buf);
@@ -109,7 +103,8 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
+ uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE,
+ "[E] Bytes used out of bounds\n");
return -EINVAL;
}
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 2bdb0ff203f8..753c8226db70 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -93,22 +93,21 @@ static void uvc_event_streaming(struct uvc_device *dev,
struct uvc_streaming_status *status, int len)
{
if (len < 3) {
- uvc_trace(UVC_TRACE_STATUS, "Invalid streaming status event "
- "received.\n");
+ uvc_dbg(dev, STATUS,
+ "Invalid streaming status event received\n");
return;
}
if (status->bEvent == 0) {
if (len < 4)
return;
- uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
- status->bOriginator,
- status->bValue[0] ? "pressed" : "released", len);
+ uvc_dbg(dev, STATUS, "Button (intf %u) %s len %d\n",
+ status->bOriginator,
+ status->bValue[0] ? "pressed" : "released", len);
uvc_input_report_key(dev, KEY_CAMERA, status->bValue[0]);
} else {
- uvc_trace(UVC_TRACE_STATUS,
- "Stream %u error event %02x len %d.\n",
- status->bOriginator, status->bEvent, len);
+ uvc_dbg(dev, STATUS, "Stream %u error event %02x len %d\n",
+ status->bOriginator, status->bEvent, len);
}
}
@@ -163,14 +162,13 @@ static bool uvc_event_control(struct urb *urb,
if (len < 6 || status->bEvent != 0 ||
status->bAttribute >= ARRAY_SIZE(attrs)) {
- uvc_trace(UVC_TRACE_STATUS, "Invalid control status event "
- "received.\n");
+ uvc_dbg(dev, STATUS, "Invalid control status event received\n");
return false;
}
- uvc_trace(UVC_TRACE_STATUS, "Control %u/%u %s change len %d.\n",
- status->bOriginator, status->bSelector,
- attrs[status->bAttribute], len);
+ uvc_dbg(dev, STATUS, "Control %u/%u %s change len %d\n",
+ status->bOriginator, status->bSelector,
+ attrs[status->bAttribute], len);
/* Find the control. */
ctrl = uvc_event_find_ctrl(dev, status, &chain);
@@ -179,7 +177,8 @@ static bool uvc_event_control(struct urb *urb,
switch (status->bAttribute) {
case UVC_CTRL_VALUE_CHANGE:
- return uvc_ctrl_status_event(urb, chain, ctrl, status->bValue);
+ return uvc_ctrl_status_event_async(urb, chain, ctrl,
+ status->bValue);
case UVC_CTRL_INFO_CHANGE:
case UVC_CTRL_FAILURE_CHANGE:
@@ -208,8 +207,9 @@ static void uvc_status_complete(struct urb *urb)
return;
default:
- uvc_printk(KERN_WARNING, "Non-zero status (%d) in status "
- "completion handler.\n", urb->status);
+ dev_warn(&dev->udev->dev,
+ "Non-zero status (%d) in status completion handler.\n",
+ urb->status);
return;
}
@@ -235,18 +235,18 @@ static void uvc_status_complete(struct urb *urb)
}
default:
- uvc_trace(UVC_TRACE_STATUS, "Unknown status event "
- "type %u.\n", dev->status[0]);
+ uvc_dbg(dev, STATUS, "Unknown status event type %u\n",
+ dev->status[0]);
break;
}
}
/* Resubmit the URB. */
urb->interval = dev->int_ep->desc.bInterval;
- if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- uvc_printk(KERN_ERR, "Failed to resubmit status URB (%d).\n",
- ret);
- }
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0)
+ dev_err(&dev->udev->dev,
+ "Failed to resubmit status URB (%d).\n", ret);
}
int uvc_status_init(struct uvc_device *dev)
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index fa06bfa174ad..252136cc885c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -75,8 +75,8 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
break;
default:
- uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
- "%u.\n", xmap->v4l2_type);
+ uvc_dbg(chain->dev, CONTROL,
+ "Unsupported V4L2 control type %u\n", xmap->v4l2_type);
ret = -ENOTTY;
goto free_map;
}
@@ -164,10 +164,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
return -EINVAL;
fcc = (u8 *)&fmt->fmt.pix.pixelformat;
- uvc_trace(UVC_TRACE_FORMAT, "Trying format 0x%08x (%c%c%c%c): %ux%u.\n",
- fmt->fmt.pix.pixelformat,
- fcc[0], fcc[1], fcc[2], fcc[3],
- fmt->fmt.pix.width, fmt->fmt.pix.height);
+ uvc_dbg(stream->dev, FORMAT, "Trying format 0x%08x (%c%c%c%c): %ux%u\n",
+ fmt->fmt.pix.pixelformat,
+ fcc[0], fcc[1], fcc[2], fcc[3],
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
/* Check if the hardware supports the requested format, use the default
* format otherwise.
@@ -207,16 +207,17 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
}
if (frame == NULL) {
- uvc_trace(UVC_TRACE_FORMAT, "Unsupported size %ux%u.\n",
- fmt->fmt.pix.width, fmt->fmt.pix.height);
+ uvc_dbg(stream->dev, FORMAT, "Unsupported size %ux%u\n",
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
return -EINVAL;
}
/* Use the default frame interval. */
interval = frame->dwDefaultFrameInterval;
- uvc_trace(UVC_TRACE_FORMAT, "Using default frame interval %u.%u us "
- "(%u.%u fps).\n", interval/10, interval%10, 10000000/interval,
- (100000000/interval)%10);
+ uvc_dbg(stream->dev, FORMAT,
+ "Using default frame interval %u.%u us (%u.%u fps)\n",
+ interval / 10, interval % 10, 10000000 / interval,
+ (100000000 / interval) % 10);
/* Set the format index, frame index and frame interval. */
memset(probe, 0, sizeof(*probe));
@@ -248,7 +249,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
goto done;
/* After the probe, update fmt with the values returned from
- * negotiation with the device.
+ * negotiation with the device. Some devices return invalid bFormatIndex
+ * and bFrameIndex values, in which case we can only assume they have
+ * accepted the requested format as-is.
*/
for (i = 0; i < stream->nformats; ++i) {
if (probe->bFormatIndex == stream->format[i].index) {
@@ -257,11 +260,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
}
}
- if (i == stream->nformats) {
- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
- probe->bFormatIndex);
- return -EINVAL;
- }
+ if (i == stream->nformats)
+ uvc_dbg(stream->dev, FORMAT,
+ "Unknown bFormatIndex %u, using default\n",
+ probe->bFormatIndex);
for (i = 0; i < format->nframes; ++i) {
if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
@@ -270,11 +272,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
}
}
- if (i == format->nframes) {
- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
- probe->bFrameIndex);
- return -EINVAL;
- }
+ if (i == format->nframes)
+ uvc_dbg(stream->dev, FORMAT,
+ "Unknown bFrameIndex %u, using default\n",
+ probe->bFrameIndex);
fmt->fmt.pix.width = frame->wWidth;
fmt->fmt.pix.height = frame->wHeight;
@@ -416,7 +417,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
interval = uvc_fraction_to_interval(timeperframe.numerator,
timeperframe.denominator);
- uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n",
+ uvc_dbg(stream->dev, FORMAT, "Setting frame interval to %u/%u (%u)\n",
timeperframe.numerator, timeperframe.denominator, interval);
mutex_lock(&stream->mutex);
@@ -545,8 +546,8 @@ static int uvc_v4l2_open(struct file *file)
struct uvc_fh *handle;
int ret = 0;
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_open\n");
stream = video_drvdata(file);
+ uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
ret = usb_autopm_get_interface(stream->dev->intf);
if (ret < 0)
@@ -588,7 +589,7 @@ static int uvc_v4l2_release(struct file *file)
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n");
+ uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
/* Only free resources if this is a privileged handle. */
if (uvc_has_privileges(handle))
@@ -1461,7 +1462,10 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
static ssize_t uvc_v4l2_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_read: not implemented.\n");
+ struct uvc_fh *handle = file->private_data;
+ struct uvc_streaming *stream = handle->stream;
+
+ uvc_dbg(stream->dev, CALLS, "%s: not implemented\n", __func__);
return -EINVAL;
}
@@ -1470,7 +1474,7 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n");
+ uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
return uvc_queue_mmap(&stream->queue, vma);
}
@@ -1480,7 +1484,7 @@ static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_poll\n");
+ uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
return uvc_queue_poll(&stream->queue, file, wait);
}
@@ -1493,7 +1497,7 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
- uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_get_unmapped_area\n");
+ uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
return uvc_queue_get_unmapped_area(&stream->queue, pgoff);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a6a441d92b94..f2f565281e63 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -76,9 +76,9 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (likely(ret == size))
return 0;
- uvc_printk(KERN_ERR,
- "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
- uvc_query_name(query), cs, unit, ret, size);
+ dev_err(&dev->udev->dev,
+ "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+ uvc_query_name(query), cs, unit, ret, size);
if (ret != -EPIPE)
return ret;
@@ -95,7 +95,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (ret != 1)
return ret < 0 ? ret : -EPIPE;
- uvc_trace(UVC_TRACE_CONTROL, "Control error %u\n", error);
+ uvc_dbg(dev, CONTROL, "Control error %u\n", error);
switch (error) {
case 0:
@@ -254,9 +254,9 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ret = -EIO;
goto out;
} else if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to query (%u) UVC %s control : "
- "%d (exp. %u).\n", query, probe ? "probe" : "commit",
- ret, size);
+ dev_err(&stream->intf->dev,
+ "Failed to query (%u) UVC %s control : %d (exp. %u).\n",
+ query, probe ? "probe" : "commit", ret, size);
ret = -EIO;
goto out;
}
@@ -334,9 +334,9 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
size, uvc_timeout_param);
if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to set UVC %s control : "
- "%d (exp. %u).\n", probe ? "probe" : "commit",
- ret, size);
+ dev_err(&stream->intf->dev,
+ "Failed to set UVC %s control : %d (exp. %u).\n",
+ probe ? "probe" : "commit", ret, size);
ret = -EIO;
}
@@ -705,12 +705,12 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
sof = y;
- uvc_trace(UVC_TRACE_CLOCK, "%s: PTS %u y %llu.%06llu SOF %u.%06llu "
- "(x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
- stream->dev->name, buf->pts,
- y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
- sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
- x1, x2, y1, y2, clock->sof_offset);
+ uvc_dbg(stream->dev, CLOCK,
+ "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
+ stream->dev->name, buf->pts,
+ y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ x1, x2, y1, y2, clock->sof_offset);
/* Second step, SOF to host clock conversion. */
x1 = (uvc_video_clock_host_sof(first) + 2048) << 16;
@@ -740,13 +740,13 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
timestamp = ktime_to_ns(first->host_time) + y - y1;
- uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %llu "
- "buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
- stream->dev->name,
- sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
- y, timestamp, vbuf->vb2_buf.timestamp,
- x1, first->host_sof, first->dev_sof,
- x2, last->host_sof, last->dev_sof, y1, y2);
+ uvc_dbg(stream->dev, CLOCK,
+ "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
+ stream->dev->name,
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ y, timestamp, vbuf->vb2_buf.timestamp,
+ x1, first->host_sof, first->dev_sof,
+ x2, last->host_sof, last->dev_sof, y1, y2);
/* Update the V4L2 buffer. */
vbuf->vb2_buf.timestamp = timestamp;
@@ -875,16 +875,15 @@ static void uvc_video_stats_update(struct uvc_streaming *stream)
{
struct uvc_stats_frame *frame = &stream->stats.frame;
- uvc_trace(UVC_TRACE_STATS, "frame %u stats: %u/%u/%u packets, "
- "%u/%u/%u pts (%searly %sinitial), %u/%u scr, "
- "last pts/stc/sof %u/%u/%u\n",
- stream->sequence, frame->first_data,
- frame->nb_packets - frame->nb_empty, frame->nb_packets,
- frame->nb_pts_diffs, frame->last_pts_diff, frame->nb_pts,
- frame->has_early_pts ? "" : "!",
- frame->has_initial_pts ? "" : "!",
- frame->nb_scr_diffs, frame->nb_scr,
- frame->pts, frame->scr_stc, frame->scr_sof);
+ uvc_dbg(stream->dev, STATS,
+ "frame %u stats: %u/%u/%u packets, %u/%u/%u pts (%searly %sinitial), %u/%u scr, last pts/stc/sof %u/%u/%u\n",
+ stream->sequence, frame->first_data,
+ frame->nb_packets - frame->nb_empty, frame->nb_packets,
+ frame->nb_pts_diffs, frame->last_pts_diff, frame->nb_pts,
+ frame->has_early_pts ? "" : "!",
+ frame->has_initial_pts ? "" : "!",
+ frame->nb_scr_diffs, frame->nb_scr,
+ frame->pts, frame->scr_stc, frame->scr_sof);
stream->stats.stream.nb_frames++;
stream->stats.stream.nb_packets += stream->stats.frame.nb_packets;
@@ -1039,8 +1038,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
/* Mark the buffer as bad if the error bit is set. */
if (data[1] & UVC_STREAM_ERR) {
- uvc_trace(UVC_TRACE_FRAME, "Marking buffer as bad (error bit "
- "set).\n");
+ uvc_dbg(stream->dev, FRAME,
+ "Marking buffer as bad (error bit set)\n");
buf->error = 1;
}
@@ -1054,8 +1053,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
*/
if (buf->state != UVC_BUF_STATE_ACTIVE) {
if (fid == stream->last_fid) {
- uvc_trace(UVC_TRACE_FRAME, "Dropping payload (out of "
- "sync).\n");
+ uvc_dbg(stream->dev, FRAME,
+ "Dropping payload (out of sync)\n");
if ((stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) &&
(data[1] & UVC_STREAM_EOF))
stream->last_fid ^= UVC_STREAM_FID;
@@ -1086,8 +1085,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
* previous payload had the EOF bit set.
*/
if (fid != stream->last_fid && buf->bytesused != 0) {
- uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
- "toggled).\n");
+ uvc_dbg(stream->dev, FRAME,
+ "Frame complete (FID bit toggled)\n");
buf->state = UVC_BUF_STATE_READY;
return -EAGAIN;
}
@@ -1120,8 +1119,8 @@ static void uvc_video_copy_data_work(struct work_struct *work)
ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
if (ret < 0)
- uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
- ret);
+ dev_err(&uvc_urb->stream->intf->dev,
+ "Failed to resubmit video URB (%d).\n", ret);
}
static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
@@ -1148,7 +1147,8 @@ static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
/* Complete the current frame if the buffer size was exceeded. */
if (len > maxlen) {
- uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n");
+ uvc_dbg(uvc_urb->stream->dev, FRAME,
+ "Frame complete (overflow)\n");
buf->error = 1;
buf->state = UVC_BUF_STATE_READY;
}
@@ -1161,9 +1161,9 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
{
/* Mark the buffer as done if the EOF marker is set. */
if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) {
- uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
+ uvc_dbg(stream->dev, FRAME, "Frame complete (EOF found)\n");
if (data[0] == len)
- uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
+ uvc_dbg(stream->dev, FRAME, "EOF in empty payload\n");
buf->state = UVC_BUF_STATE_READY;
if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
stream->last_fid ^= UVC_STREAM_FID;
@@ -1279,13 +1279,13 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
memcpy(&meta->length, mem, length);
meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
- uvc_trace(UVC_TRACE_FRAME,
- "%s(): t-sys %lluns, SOF %u, len %u, flags 0x%x, PTS %u, STC %u frame SOF %u\n",
- __func__, ktime_to_ns(time), meta->sof, meta->length,
- meta->flags,
- has_pts ? *(u32 *)meta->buf : 0,
- has_scr ? *(u32 *)scr : 0,
- has_scr ? *(u32 *)(scr + 4) & 0x7ff : 0);
+ uvc_dbg(stream->dev, FRAME,
+ "%s(): t-sys %lluns, SOF %u, len %u, flags 0x%x, PTS %u, STC %u frame SOF %u\n",
+ __func__, ktime_to_ns(time), meta->sof, meta->length,
+ meta->flags,
+ has_pts ? *(u32 *)meta->buf : 0,
+ has_scr ? *(u32 *)scr : 0,
+ has_scr ? *(u32 *)(scr + 4) & 0x7ff : 0);
}
/* ------------------------------------------------------------------------
@@ -1339,8 +1339,9 @@ static void uvc_video_decode_isoc(struct uvc_urb *uvc_urb,
for (i = 0; i < urb->number_of_packets; ++i) {
if (urb->iso_frame_desc[i].status < 0) {
- uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
- "lost (%d).\n", urb->iso_frame_desc[i].status);
+ uvc_dbg(stream->dev, FRAME,
+ "USB isochronous frame lost (%d)\n",
+ urb->iso_frame_desc[i].status);
/* Mark the buffer as faulty. */
if (buf != NULL)
buf->error = 1;
@@ -1507,8 +1508,9 @@ static void uvc_video_complete(struct urb *urb)
break;
default:
- uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
- "completion handler.\n", urb->status);
+ dev_warn(&stream->intf->dev,
+ "Non-zero status (%d) in video completion handler.\n",
+ urb->status);
fallthrough;
case -ENOENT: /* usb_poison_urb() called. */
if (stream->frozen)
@@ -1545,9 +1547,8 @@ static void uvc_video_complete(struct urb *urb)
if (!uvc_urb->async_operations) {
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
if (ret < 0)
- uvc_printk(KERN_ERR,
- "Failed to resubmit video URB (%d).\n",
- ret);
+ dev_err(&stream->intf->dev,
+ "Failed to resubmit video URB (%d).\n", ret);
return;
}
@@ -1628,15 +1629,16 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
}
if (i == UVC_URBS) {
- uvc_trace(UVC_TRACE_VIDEO, "Allocated %u URB buffers "
- "of %ux%u bytes each.\n", UVC_URBS, npackets,
- psize);
+ uvc_dbg(stream->dev, VIDEO,
+ "Allocated %u URB buffers of %ux%u bytes each\n",
+ UVC_URBS, npackets, psize);
return npackets;
}
}
- uvc_trace(UVC_TRACE_VIDEO, "Failed to allocate URB buffers (%u bytes "
- "per packet).\n", psize);
+ uvc_dbg(stream->dev, VIDEO,
+ "Failed to allocate URB buffers (%u bytes per packet)\n",
+ psize);
return 0;
}
@@ -1835,12 +1837,13 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
bandwidth = stream->ctrl.dwMaxPayloadTransferSize;
if (bandwidth == 0) {
- uvc_trace(UVC_TRACE_VIDEO, "Device requested null "
- "bandwidth, defaulting to lowest.\n");
+ uvc_dbg(stream->dev, VIDEO,
+ "Device requested null bandwidth, defaulting to lowest\n");
bandwidth = 1;
} else {
- uvc_trace(UVC_TRACE_VIDEO, "Device requested %u "
- "B/frame bandwidth.\n", bandwidth);
+ uvc_dbg(stream->dev, VIDEO,
+ "Device requested %u B/frame bandwidth\n",
+ bandwidth);
}
for (i = 0; i < intf->num_altsetting; ++i) {
@@ -1863,13 +1866,14 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
}
if (best_ep == NULL) {
- uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting "
- "for requested bandwidth.\n");
+ uvc_dbg(stream->dev, VIDEO,
+ "No fast enough alt setting for requested bandwidth\n");
return -EIO;
}
- uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
- "(%u B/frame bandwidth).\n", altsetting, best_psize);
+ uvc_dbg(stream->dev, VIDEO,
+ "Selecting alternate setting %u (%u B/frame bandwidth)\n",
+ altsetting, best_psize);
ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
@@ -1893,8 +1897,9 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
for_each_uvc_urb(uvc_urb, stream) {
ret = usb_submit_urb(uvc_urb->urb, gfp_flags);
if (ret < 0) {
- uvc_printk(KERN_ERR, "Failed to submit URB %u (%d).\n",
- uvc_urb_index(uvc_urb), ret);
+ dev_err(&stream->intf->dev,
+ "Failed to submit URB %u (%d).\n",
+ uvc_urb_index(uvc_urb), ret);
uvc_video_stop_transfer(stream, 1);
return ret;
}
@@ -1989,7 +1994,8 @@ int uvc_video_init(struct uvc_streaming *stream)
int ret;
if (stream->nformats == 0) {
- uvc_printk(KERN_INFO, "No supported video formats found.\n");
+ dev_info(&stream->intf->dev,
+ "No supported video formats found.\n");
return -EINVAL;
}
@@ -2029,8 +2035,8 @@ int uvc_video_init(struct uvc_streaming *stream)
}
if (format->nframes == 0) {
- uvc_printk(KERN_INFO, "No frame descriptor found for the "
- "default format.\n");
+ dev_info(&stream->intf->dev,
+ "No frame descriptor found for the default format.\n");
return -EINVAL;
}
@@ -2064,8 +2070,8 @@ int uvc_video_init(struct uvc_streaming *stream)
if (stream->intf->num_altsetting == 1)
stream->decode = uvc_video_encode_bulk;
else {
- uvc_printk(KERN_INFO, "Isochronous endpoints are not "
- "supported for video output devices.\n");
+ dev_info(&stream->intf->dev,
+ "Isochronous endpoints are not supported for video output devices.\n");
return -EINVAL;
}
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index a3dfacf069c4..97df5ecd66c9 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -6,6 +6,7 @@
#error "The uvcvideo.h header is deprecated, use linux/uvcvideo.h instead."
#endif /* __KERNEL__ */
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/usb.h>
@@ -37,6 +38,8 @@
(UVC_ENTITY_IS_TERM(entity) && \
((entity)->type & 0x8000) == UVC_TERM_OUTPUT)
+#define UVC_EXT_GPIO_UNIT 0x7ffe
+#define UVC_EXT_GPIO_UNIT_ID 0x100
/* ------------------------------------------------------------------------
* GUIDs
@@ -56,6 +59,9 @@
#define UVC_GUID_UVC_SELECTOR \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
+#define UVC_GUID_EXT_GPIO_CONTROLLER \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
@@ -212,6 +218,7 @@
* Structures.
*/
+struct gpio_desc;
struct uvc_device;
/* TODO: Put the most frequently accessed fields at the beginning of
@@ -301,9 +308,15 @@ struct uvc_entity {
* chain. */
unsigned int flags;
- u8 id;
+ /*
+ * Entities exposed by the UVC device use IDs 0-255, extra entities
+ * implemented by the driver (such as the GPIO entity) use IDs 256 and
+ * up.
+ */
+ u16 id;
u16 type;
char name[64];
+ u8 guid[16];
/* Media controller-related fields. */
struct video_device *vdev;
@@ -342,17 +355,28 @@ struct uvc_entity {
} selector;
struct {
- u8 guidExtensionCode[16];
u8 bNumControls;
u8 bControlSize;
u8 *bmControls;
u8 *bmControlsType;
} extension;
+
+ struct {
+ u8 bControlSize;
+ u8 *bmControls;
+ struct gpio_desc *gpio_privacy;
+ int irq;
+ } gpio;
};
u8 bNrInPins;
u8 *baSourceID;
+ int (*get_info)(struct uvc_device *dev, struct uvc_entity *entity,
+ u8 cs, u8 *caps);
+ int (*get_cur)(struct uvc_device *dev, struct uvc_entity *entity,
+ u8 cs, void *data, u16 size);
+
unsigned int ncontrols;
struct uvc_control *controls;
};
@@ -635,6 +659,7 @@ static inline u32 uvc_urb_index(const struct uvc_urb *uvc_urb)
struct uvc_device_info {
u32 quirks;
u32 meta_format;
+ u16 uvc_version;
};
struct uvc_device {
@@ -680,6 +705,8 @@ struct uvc_device {
struct uvc_control *ctrl;
const void *data;
} async_ctrl;
+
+ struct uvc_entity *gpio_unit;
};
enum uvc_handle_state {
@@ -702,18 +729,18 @@ struct uvc_driver {
* Debugging, printing and logging
*/
-#define UVC_TRACE_PROBE (1 << 0)
-#define UVC_TRACE_DESCR (1 << 1)
-#define UVC_TRACE_CONTROL (1 << 2)
-#define UVC_TRACE_FORMAT (1 << 3)
-#define UVC_TRACE_CAPTURE (1 << 4)
-#define UVC_TRACE_CALLS (1 << 5)
-#define UVC_TRACE_FRAME (1 << 7)
-#define UVC_TRACE_SUSPEND (1 << 8)
-#define UVC_TRACE_STATUS (1 << 9)
-#define UVC_TRACE_VIDEO (1 << 10)
-#define UVC_TRACE_STATS (1 << 11)
-#define UVC_TRACE_CLOCK (1 << 12)
+#define UVC_DBG_PROBE (1 << 0)
+#define UVC_DBG_DESCR (1 << 1)
+#define UVC_DBG_CONTROL (1 << 2)
+#define UVC_DBG_FORMAT (1 << 3)
+#define UVC_DBG_CAPTURE (1 << 4)
+#define UVC_DBG_CALLS (1 << 5)
+#define UVC_DBG_FRAME (1 << 7)
+#define UVC_DBG_SUSPEND (1 << 8)
+#define UVC_DBG_STATUS (1 << 9)
+#define UVC_DBG_VIDEO (1 << 10)
+#define UVC_DBG_STATS (1 << 11)
+#define UVC_DBG_CLOCK (1 << 12)
#define UVC_WARN_MINMAX 0
#define UVC_WARN_PROBE_DEF 1
@@ -721,24 +748,28 @@ struct uvc_driver {
extern unsigned int uvc_clock_param;
extern unsigned int uvc_no_drop_param;
-extern unsigned int uvc_trace_param;
+extern unsigned int uvc_dbg_param;
extern unsigned int uvc_timeout_param;
extern unsigned int uvc_hw_timestamps_param;
-#define uvc_trace(flag, msg...) \
- do { \
- if (uvc_trace_param & flag) \
- printk(KERN_DEBUG "uvcvideo: " msg); \
- } while (0)
-
-#define uvc_warn_once(dev, warn, msg...) \
- do { \
- if (!test_and_set_bit(warn, &dev->warnings)) \
- printk(KERN_INFO "uvcvideo: " msg); \
- } while (0)
-
-#define uvc_printk(level, msg...) \
- printk(level "uvcvideo: " msg)
+#define uvc_dbg(_dev, flag, fmt, ...) \
+do { \
+ if (uvc_dbg_param & UVC_DBG_##flag) \
+ dev_printk(KERN_DEBUG, &(_dev)->udev->dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define uvc_dbg_cont(flag, fmt, ...) \
+do { \
+ if (uvc_dbg_param & UVC_DBG_##flag) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define uvc_warn_once(_dev, warn, fmt, ...) \
+do { \
+ if (!test_and_set_bit(warn, &(_dev)->warnings)) \
+ dev_info(&(_dev)->udev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
/* --------------------------------------------------------------------------
* Internal functions.
@@ -787,6 +818,12 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
return vb2_is_streaming(&queue->queue);
}
+static inline struct uvc_streaming *
+uvc_queue_to_stream(struct uvc_video_queue *queue)
+{
+ return container_of(queue, struct uvc_streaming, queue);
+}
+
/* V4L2 interface */
extern const struct v4l2_ioctl_ops uvc_ioctl_ops;
extern const struct v4l2_file_operations uvc_fops;
@@ -838,7 +875,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
int uvc_ctrl_init_device(struct uvc_device *dev);
void uvc_ctrl_cleanup_device(struct uvc_device *dev);
int uvc_ctrl_restore_values(struct uvc_device *dev);
-bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data);
+void uvc_ctrl_status_event(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const u8 *data);
int uvc_ctrl_begin(struct uvc_video_chain *chain);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1e1c6b4d1874..d29b861367ea 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1181,15 +1181,11 @@ out:
return err;
}
-static void zr364xx_release(struct v4l2_device *v4l2_dev)
+static void zr364xx_board_uninit(struct zr364xx_camera *cam)
{
- struct zr364xx_camera *cam =
- container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
unsigned long i;
- v4l2_device_unregister(&cam->v4l2_dev);
-
- videobuf_mmap_free(&cam->vb_vidq);
+ zr364xx_stop_readpipe(cam);
/* release sys buffers */
for (i = 0; i < FRAMES; i++) {
@@ -1200,9 +1196,19 @@ static void zr364xx_release(struct v4l2_device *v4l2_dev)
cam->buffer.frame[i].lpvbits = NULL;
}
- v4l2_ctrl_handler_free(&cam->ctrl_handler);
/* release transfer buffer */
kfree(cam->pipe->transfer_buffer);
+}
+
+static void zr364xx_release(struct v4l2_device *v4l2_dev)
+{
+ struct zr364xx_camera *cam =
+ container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
+
+ videobuf_mmap_free(&cam->vb_vidq);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ zr364xx_board_uninit(cam);
+ v4l2_device_unregister(&cam->v4l2_dev);
kfree(cam);
}
@@ -1376,11 +1382,14 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
/* start read pipe */
err = zr364xx_start_readpipe(cam);
if (err)
- goto err_free;
+ goto err_free_frames;
DBG(": board initialized\n");
return 0;
+err_free_frames:
+ for (i = 0; i < FRAMES; i++)
+ vfree(cam->buffer.frame[i].lpvbits);
err_free:
kfree(cam->pipe->transfer_buffer);
cam->pipe->transfer_buffer = NULL;
@@ -1409,12 +1418,10 @@ static int zr364xx_probe(struct usb_interface *intf,
if (!cam)
return -ENOMEM;
- cam->v4l2_dev.release = zr364xx_release;
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
if (err < 0) {
dev_err(&udev->dev, "couldn't register v4l2_device\n");
- kfree(cam);
- return err;
+ goto free_cam;
}
hdl = &cam->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 1);
@@ -1423,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
if (hdl->error) {
err = hdl->error;
dev_err(&udev->dev, "couldn't register control\n");
- goto fail;
+ goto unregister;
}
/* save the init method used by this camera */
cam->method = id->driver_info;
@@ -1496,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
if (!cam->read_endpoint) {
err = -ENOMEM;
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
- goto fail;
+ goto unregister;
}
/* v4l */
@@ -1507,10 +1514,11 @@ static int zr364xx_probe(struct usb_interface *intf,
/* load zr364xx board specific */
err = zr364xx_board_init(cam);
- if (!err)
- err = v4l2_ctrl_handler_setup(hdl);
if (err)
- goto fail;
+ goto unregister;
+ err = v4l2_ctrl_handler_setup(hdl);
+ if (err)
+ goto board_uninit;
spin_lock_init(&cam->slock);
@@ -1525,16 +1533,21 @@ static int zr364xx_probe(struct usb_interface *intf,
err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (err) {
dev_err(&udev->dev, "video_register_device failed\n");
- goto fail;
+ goto free_handler;
}
+ cam->v4l2_dev.release = zr364xx_release;
dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
video_device_node_name(&cam->vdev));
return 0;
-fail:
+free_handler:
v4l2_ctrl_handler_free(hdl);
+board_uninit:
+ zr364xx_board_uninit(cam);
+unregister:
v4l2_device_unregister(&cam->v4l2_dev);
+free_cam:
kfree(cam);
return err;
}
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 2ef0c7c958a2..e4cd589b99a5 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -6,7 +6,7 @@
tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
- v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
+ v4l2-event.o v4l2-ctrls.o v4l2-subdev.o \
v4l2-async.o v4l2-common.o
videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index e3ab003a6c85..e638aa8aecb7 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
@@ -14,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -64,12 +66,6 @@ static bool match_i2c(struct v4l2_async_notifier *notifier,
#endif
}
-static bool match_devname(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
-{
- return !strcmp(asd->match.device_name, dev_name(sd->dev));
-}
-
static bool match_fwnode(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
@@ -88,6 +84,14 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
return true;
/*
+ * Check the same situation for any possible secondary assigned to the
+ * subdev's fwnode
+ */
+ if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
+ sd->fwnode->secondary == asd->match.fwnode)
+ return true;
+
+ /*
* Otherwise, check if the sd fwnode and the asd fwnode refer to an
* endpoint or a device. If they're of the same type, there's no match.
* Technically speaking this checks if the nodes refer to a connected
@@ -139,16 +143,6 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
return true;
}
-static bool match_custom(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
-{
- if (!asd->match.custom.match)
- /* Match always */
- return true;
-
- return asd->match.custom.match(sd->dev, asd);
-}
-
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
@@ -164,12 +158,6 @@ v4l2_async_find_match(struct v4l2_async_notifier *notifier,
list_for_each_entry(asd, &notifier->waiting, list) {
/* bus_type has been verified valid before */
switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_CUSTOM:
- match = match_custom;
- break;
- case V4L2_ASYNC_MATCH_DEVNAME:
- match = match_devname;
- break;
case V4L2_ASYNC_MATCH_I2C:
match = match_i2c;
break;
@@ -198,9 +186,6 @@ static bool asd_equal(struct v4l2_async_subdev *asd_x,
return false;
switch (asd_x->match_type) {
- case V4L2_ASYNC_MATCH_DEVNAME:
- return strcmp(asd_x->match.device_name,
- asd_y->match.device_name) == 0;
case V4L2_ASYNC_MATCH_I2C:
return asd_x->match.i2c.adapter_id ==
asd_y->match.i2c.adapter_id &&
@@ -467,8 +452,6 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
return -EINVAL;
switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_CUSTOM:
- case V4L2_ASYNC_MATCH_DEVNAME:
case V4L2_ASYNC_MATCH_I2C:
case V4L2_ASYNC_MATCH_FWNODE:
if (v4l2_async_notifier_has_async_subdev(notifier, asd,
@@ -628,7 +611,7 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
}
EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
-int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
+int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
int ret;
@@ -645,12 +628,12 @@ unlock:
mutex_unlock(&list_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
struct v4l2_async_subdev *
-v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size)
+__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -662,7 +645,7 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
asd->match.fwnode = fwnode_handle_get(fwnode);
- ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_notifier_add_subdev(notifier, asd);
if (ret) {
fwnode_handle_put(fwnode);
kfree(asd);
@@ -671,35 +654,35 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
-int
-v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- struct v4l2_async_subdev *asd)
+struct v4l2_async_subdev *
+__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size)
{
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *remote;
- int ret;
remote = fwnode_graph_get_remote_port_parent(endpoint);
if (!remote)
- return -ENOTCONN;
-
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- asd->match.fwnode = remote;
+ return ERR_PTR(-ENOTCONN);
- ret = v4l2_async_notifier_add_subdev(notif, asd);
- if (ret)
- fwnode_handle_put(remote);
-
- return ret;
+ asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
+ asd_struct_size);
+ /*
+ * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
+ * so drop the one we got in fwnode_graph_get_remote_port_parent.
+ */
+ fwnode_handle_put(remote);
+ return asd;
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
struct v4l2_async_subdev *
-v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size)
+__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -712,7 +695,7 @@ v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
asd->match.i2c.adapter_id = adapter_id;
asd->match.i2c.address = address;
- ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_notifier_add_subdev(notifier, asd);
if (ret) {
kfree(asd);
return ERR_PTR(ret);
@@ -720,32 +703,7 @@ v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
-
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
- const char *device_name,
- unsigned int asd_struct_size)
-{
- struct v4l2_async_subdev *asd;
- int ret;
-
- asd = kzalloc(asd_struct_size, GFP_KERNEL);
- if (!asd)
- return ERR_PTR(-ENOMEM);
-
- asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
- asd->match.device_name = device_name;
-
- ret = v4l2_async_notifier_add_subdev(notifier, asd);
- if (ret) {
- kfree(asd);
- return ERR_PTR(ret);
- }
-
- return asd;
-}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
@@ -817,6 +775,9 @@ EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
+ if (!sd->async_list.next)
+ return;
+
mutex_lock(&list_lock);
__v4l2_async_notifier_unregister(sd->subdev_notifier);
@@ -837,3 +798,64 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
+
+static void print_waiting_subdev(struct seq_file *s,
+ struct v4l2_async_subdev *asd)
+{
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_I2C:
+ seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
+ asd->match.i2c.address);
+ break;
+ case V4L2_ASYNC_MATCH_FWNODE: {
+ struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
+
+ devnode = fwnode_graph_is_endpoint(fwnode) ?
+ fwnode_graph_get_port_parent(fwnode) :
+ fwnode_handle_get(fwnode);
+
+ seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
+ devnode->dev ? dev_name(devnode->dev) : "nil",
+ fwnode);
+
+ fwnode_handle_put(devnode);
+ break;
+ }
+ }
+}
+
+static const char *
+v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->name;
+ else if (notifier->sd)
+ return notifier->sd->name;
+ else
+ return "nil";
+}
+
+static int pending_subdevs_show(struct seq_file *s, void *data)
+{
+ struct v4l2_async_notifier *notif;
+ struct v4l2_async_subdev *asd;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(notif, &notifier_list, list) {
+ seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
+ list_for_each_entry(asd, &notif->waiting, list)
+ print_waiting_subdev(s, asd);
+ }
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
+
+void v4l2_async_debug_init(struct dentry *debugfs_dir)
+{
+ debugfs_create_file("pending_async_subdevices", 0444, debugfs_dir, NULL,
+ &pending_subdevs_fops);
+}
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
deleted file mode 100644
index 91274eee6977..000000000000
--- a/drivers/media/v4l2-core/v4l2-clk.c
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * V4L2 clock service
- *
- * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- */
-
-#include <linux/atomic.h>
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
-
-static DEFINE_MUTEX(clk_lock);
-static LIST_HEAD(clk_list);
-
-static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
-{
- struct v4l2_clk *clk;
-
- list_for_each_entry(clk, &clk_list, list)
- if (!strcmp(dev_id, clk->dev_id))
- return clk;
-
- return ERR_PTR(-ENODEV);
-}
-
-struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
-{
- struct v4l2_clk *clk;
- struct clk *ccf_clk = clk_get(dev, id);
- char clk_name[V4L2_CLK_NAME_SIZE];
-
- if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
-
- if (!IS_ERR_OR_NULL(ccf_clk)) {
- clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk) {
- clk_put(ccf_clk);
- return ERR_PTR(-ENOMEM);
- }
- clk->clk = ccf_clk;
-
- return clk;
- }
-
- mutex_lock(&clk_lock);
- clk = v4l2_clk_find(dev_name(dev));
-
- /* if dev_name is not found, try use the OF name to find again */
- if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
- v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
- clk = v4l2_clk_find(clk_name);
- }
-
- if (!IS_ERR(clk))
- atomic_inc(&clk->use_count);
- mutex_unlock(&clk_lock);
-
- return clk;
-}
-EXPORT_SYMBOL(v4l2_clk_get);
-
-void v4l2_clk_put(struct v4l2_clk *clk)
-{
- struct v4l2_clk *tmp;
-
- if (IS_ERR(clk))
- return;
-
- if (clk->clk) {
- clk_put(clk->clk);
- kfree(clk);
- return;
- }
-
- mutex_lock(&clk_lock);
-
- list_for_each_entry(tmp, &clk_list, list)
- if (tmp == clk)
- atomic_dec(&clk->use_count);
-
- mutex_unlock(&clk_lock);
-}
-EXPORT_SYMBOL(v4l2_clk_put);
-
-static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
-{
- struct v4l2_clk *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&clk_lock);
-
- list_for_each_entry(tmp, &clk_list, list)
- if (tmp == clk) {
- ret = !try_module_get(clk->ops->owner);
- if (ret)
- ret = -EFAULT;
- break;
- }
-
- mutex_unlock(&clk_lock);
-
- return ret;
-}
-
-static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
-{
- module_put(clk->ops->owner);
-}
-
-int v4l2_clk_enable(struct v4l2_clk *clk)
-{
- int ret;
-
- if (clk->clk)
- return clk_prepare_enable(clk->clk);
-
- ret = v4l2_clk_lock_driver(clk);
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
-
- if (++clk->enable == 1 && clk->ops->enable) {
- ret = clk->ops->enable(clk);
- if (ret < 0)
- clk->enable--;
- }
-
- mutex_unlock(&clk->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_enable);
-
-/*
- * You might Oops if you try to disabled a disabled clock, because then the
- * driver isn't locked and could have been unloaded by now, so, don't do that
- */
-void v4l2_clk_disable(struct v4l2_clk *clk)
-{
- int enable;
-
- if (clk->clk)
- return clk_disable_unprepare(clk->clk);
-
- mutex_lock(&clk->lock);
-
- enable = --clk->enable;
- if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
- clk->dev_id))
- clk->enable++;
- else if (!enable && clk->ops->disable)
- clk->ops->disable(clk);
-
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-}
-EXPORT_SYMBOL(v4l2_clk_disable);
-
-unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
-{
- int ret;
-
- if (clk->clk)
- return clk_get_rate(clk->clk);
-
- ret = v4l2_clk_lock_driver(clk);
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
- if (!clk->ops->get_rate)
- ret = -ENOSYS;
- else
- ret = clk->ops->get_rate(clk);
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_get_rate);
-
-int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
-{
- int ret;
-
- if (clk->clk) {
- long r = clk_round_rate(clk->clk, rate);
- if (r < 0)
- return r;
- return clk_set_rate(clk->clk, r);
- }
-
- ret = v4l2_clk_lock_driver(clk);
-
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
- if (!clk->ops->set_rate)
- ret = -ENOSYS;
- else
- ret = clk->ops->set_rate(clk, rate);
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_set_rate);
-
-struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
- const char *dev_id,
- void *priv)
-{
- struct v4l2_clk *clk;
- int ret;
-
- if (!ops || !dev_id)
- return ERR_PTR(-EINVAL);
-
- clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
- if (!clk)
- return ERR_PTR(-ENOMEM);
-
- clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
- if (!clk->dev_id) {
- ret = -ENOMEM;
- goto ealloc;
- }
- clk->ops = ops;
- clk->priv = priv;
- atomic_set(&clk->use_count, 0);
- mutex_init(&clk->lock);
-
- mutex_lock(&clk_lock);
- if (!IS_ERR(v4l2_clk_find(dev_id))) {
- mutex_unlock(&clk_lock);
- ret = -EEXIST;
- goto eexist;
- }
- list_add_tail(&clk->list, &clk_list);
- mutex_unlock(&clk_lock);
-
- return clk;
-
-eexist:
-ealloc:
- kfree(clk->dev_id);
- kfree(clk);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(v4l2_clk_register);
-
-void v4l2_clk_unregister(struct v4l2_clk *clk)
-{
- if (WARN(atomic_read(&clk->use_count),
- "%s(): Refusing to unregister ref-counted %s clock!\n",
- __func__, clk->dev_id))
- return;
-
- mutex_lock(&clk_lock);
- list_del(&clk->list);
- mutex_unlock(&clk_lock);
-
- kfree(clk->dev_id);
- kfree(clk);
-}
-EXPORT_SYMBOL(v4l2_clk_unregister);
-
-struct v4l2_clk_fixed {
- unsigned long rate;
- struct v4l2_clk_ops ops;
-};
-
-static unsigned long fixed_get_rate(struct v4l2_clk *clk)
-{
- struct v4l2_clk_fixed *priv = clk->priv;
- return priv->rate;
-}
-
-struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- unsigned long rate, struct module *owner)
-{
- struct v4l2_clk *clk;
- struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-
- if (!priv)
- return ERR_PTR(-ENOMEM);
-
- priv->rate = rate;
- priv->ops.get_rate = fixed_get_rate;
- priv->ops.owner = owner;
-
- clk = v4l2_clk_register(&priv->ops, dev_id, priv);
- if (IS_ERR(clk))
- kfree(priv);
-
- return clk;
-}
-EXPORT_SYMBOL(__v4l2_clk_register_fixed);
-
-void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
-{
- kfree(clk->priv);
- v4l2_clk_unregister(clk);
-}
-EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 78007dba4677..133d20e40f82 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 5cbe0ffbf501..016cf6204cbb 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -920,6 +920,15 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
@@ -941,6 +950,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
@@ -969,6 +979,12 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
@@ -2165,7 +2181,8 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_INTEGER_MENU:
if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
return -ERANGE;
- if (ctrl->menu_skip_mask & (1ULL << ptr.p_s32[idx]))
+ if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
+ (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
return -EINVAL;
if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a593ea0598b5..b6a72d297775 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -28,6 +29,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#define VIDEO_NUM_DEVICES 256
#define VIDEO_NAME "video4linux"
@@ -37,6 +39,7 @@
__func__, ##arg); \
} while (0)
+static struct dentry *v4l2_debugfs_dir;
/*
* sysfs stuff
@@ -338,12 +341,14 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- __poll_t res = EPOLLERR | EPOLLHUP;
+ __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI;
- if (!vdev->fops->poll)
- return DEFAULT_POLLMASK;
- if (video_is_registered(vdev))
- res = vdev->fops->poll(filp, poll);
+ if (video_is_registered(vdev)) {
+ if (!vdev->fops->poll)
+ res = DEFAULT_POLLMASK;
+ else
+ res = vdev->fops->poll(filp, poll);
+ }
if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
dprintk("%s: poll: %08x\n",
video_device_node_name(vdev), res);
@@ -1086,6 +1091,8 @@ void video_unregister_device(struct video_device *vdev)
*/
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
+ v4l2_event_wake_all(vdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
@@ -1113,6 +1120,8 @@ static int __init videodev_init(void)
return -EIO;
}
+ v4l2_debugfs_dir = debugfs_create_dir("video4linux", NULL);
+ v4l2_async_debug_init(v4l2_debugfs_dir);
return 0;
}
@@ -1120,6 +1129,7 @@ static void __exit videodev_exit(void)
{
dev_t dev = MKDEV(VIDEO_MAJOR, 0);
+ debugfs_remove_recursive(v4l2_debugfs_dir);
class_unregister(&video_class);
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
}
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 290c6b213179..caad58bde326 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -187,6 +187,23 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
+void v4l2_event_wake_all(struct video_device *vdev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+
+ if (!vdev)
+ return;
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ wake_up_all(&fh->wait);
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
+
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
{
struct v4l2_fh *fh = sev->fh;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 5353e37eb950..2283ff3b8e1d 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -28,17 +28,6 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-enum v4l2_fwnode_bus_type {
- V4L2_FWNODE_BUS_TYPE_GUESS = 0,
- V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
- V4L2_FWNODE_BUS_TYPE_CSI1,
- V4L2_FWNODE_BUS_TYPE_CCP2,
- V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
- V4L2_FWNODE_BUS_TYPE_PARALLEL,
- V4L2_FWNODE_BUS_TYPE_BT656,
- NR_OF_V4L2_FWNODE_BUS_TYPE,
-};
-
static const struct v4l2_fwnode_bus_conv {
enum v4l2_fwnode_bus_type fwnode_bus_type;
enum v4l2_mbus_type mbus_type;
@@ -833,7 +822,7 @@ v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
if (ret < 0)
goto out_err;
- ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_notifier_add_subdev(notifier, asd);
if (ret < 0) {
/* not an error if asd already exists */
if (ret == -EEXIST)
@@ -955,7 +944,7 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
args.fwnode,
- sizeof(*asd));
+ struct v4l2_async_subdev);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
@@ -1255,7 +1244,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
struct v4l2_async_subdev *asd;
asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- sizeof(*asd));
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 3198abdd538c..31d1342e61e8 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -518,9 +518,9 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
- pr_cont("index=%d, count=%d, memory=%s, ",
- p->index, p->count,
- prt_names(p->memory, v4l2_memory_names));
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, ",
+ p->index, p->count, prt_names(p->memory, v4l2_memory_names),
+ p->capabilities);
v4l_print_format(&p->format, write_only);
}
@@ -3283,7 +3283,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
- void *mbuf = NULL;
+ void *mbuf = NULL, *array_buf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
@@ -3300,7 +3300,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
parg = sbuf;
} else {
/* too big to allocate from stack */
- mbuf = kvmalloc(ioc_size, GFP_KERNEL);
+ mbuf = kmalloc(ioc_size, GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
@@ -3318,27 +3318,21 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
has_array_args = err;
if (has_array_args) {
- /*
- * When adding new types of array args, make sure that the
- * parent argument to ioctl (which contains the pointer to the
- * array) fits into sbuf (so that mbuf will still remain
- * unused up to here).
- */
- mbuf = kvmalloc(array_size, GFP_KERNEL);
+ array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
- if (NULL == mbuf)
+ if (array_buf == NULL)
goto out_array_args;
err = -EFAULT;
if (in_compat_syscall())
- err = v4l2_compat_get_array_args(file, mbuf, user_ptr,
- array_size, orig_cmd,
- parg);
+ err = v4l2_compat_get_array_args(file, array_buf,
+ user_ptr, array_size,
+ orig_cmd, parg);
else
- err = copy_from_user(mbuf, user_ptr, array_size) ?
+ err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
goto out_array_args;
- *kernel_ptr = mbuf;
+ *kernel_ptr = array_buf;
}
/* Handles IOCTL */
@@ -3360,12 +3354,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
if (in_compat_syscall()) {
int put_err;
- put_err = v4l2_compat_put_array_args(file, user_ptr, mbuf,
- array_size, orig_cmd,
- parg);
+ put_err = v4l2_compat_put_array_args(file, user_ptr,
+ array_buf,
+ array_size,
+ orig_cmd, parg);
if (put_err)
err = put_err;
- } else if (copy_to_user(user_ptr, mbuf, array_size)) {
+ } else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
goto out_array_args;
@@ -3381,7 +3376,8 @@ out_array_args:
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
- kvfree(mbuf);
+ kvfree(array_buf);
+ kfree(mbuf);
return err;
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index b221b4e438a1..e7f4bf5bc8dd 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -887,9 +887,6 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
- poll_wait(file, &src_q->done_wq, wait);
- poll_wait(file, &dst_q->done_wq, wait);
-
/*
* There has to be at least one buffer queued on each queued_list, which
* means either in driver already or waiting for driver to claim it
@@ -922,9 +919,21 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
__poll_t req_events = poll_requested_events(wait);
__poll_t rc = 0;
+ /*
+ * poll_wait() MUST be called on the first invocation on all the
+ * potential queues of interest, even if we are not interested in their
+ * events during this first call. Failure to do so will result in
+ * queue's events to be ignored because the poll_table won't be capable
+ * of adding new wait queues thereafter.
+ */
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+
if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 3ea6913df176..72c0df129d5c 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -137,6 +137,15 @@ config TI_EMIF_SRAM
sequence so this driver provides several relocatable PM functions
for the SoC PM code to use.
+config FPGA_DFL_EMIF
+ tristate "FPGA DFL EMIF Driver"
+ depends on FPGA_DFL && HAS_IOMEM
+ help
+ This driver is for the EMIF private feature implemented under
+ FPGA Device Feature List (DFL) framework. It is used to expose
+ memory interface status information as well as memory clearing
+ control.
+
config MVEBU_DEVBUS
bool "Marvell EBU Device Bus Controller"
default y if PLAT_ORION
@@ -173,7 +182,7 @@ config JZ4780_NEMC
memory devices such as NAND and SRAM.
config MTK_SMI
- bool "Mediatek SoC Memory Controller driver" if COMPILE_TEST
+ tristate "MediaTek SoC Memory Controller driver" if COMPILE_TEST
depends on ARCH_MEDIATEK || COMPILE_TEST
help
This driver is for the Memory Controller module in MediaTek SoCs,
@@ -202,9 +211,9 @@ config RENESAS_RPCIF
depends on ARCH_RENESAS || COMPILE_TEST
select REGMAP_MMIO
help
- This supports Renesas R-Car Gen3 RPC-IF which provides either SPI
- host or HyperFlash. You'll have to select individual components
- under the corresponding menu.
+ This supports Renesas R-Car Gen3 or RZ/G2 RPC-IF which provides
+ either SPI host or HyperFlash. You'll have to select individual
+ components under the corresponding menu.
config STM32_FMC2_EBI
tristate "Support for FMC2 External Bus Interface on STM32MP SoCs"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index e71cf7b99641..bc7663ed1c25 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
obj-$(CONFIG_TI_EMIF_SRAM) += ti-emif-sram.o
+obj-$(CONFIG_FPGA_DFL_EMIF) += dfl-emif.o
+
ti-emif-sram-objs := ti-emif-pm.o ti-emif-sram-pm.o
AFLAGS_ti-emif-sram-pm.o :=-Wa,-march=armv7-a
diff --git a/drivers/memory/dfl-emif.c b/drivers/memory/dfl-emif.c
new file mode 100644
index 000000000000..3f719816771d
--- /dev/null
+++ b/drivers/memory/dfl-emif.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFL device driver for EMIF private feature
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define FME_FEATURE_ID_EMIF 0x9
+
+#define EMIF_STAT 0x8
+#define EMIF_STAT_INIT_DONE_SFT 0
+#define EMIF_STAT_CALC_FAIL_SFT 8
+#define EMIF_STAT_CLEAR_BUSY_SFT 16
+#define EMIF_CTRL 0x10
+#define EMIF_CTRL_CLEAR_EN_SFT 0
+#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(3, 0)
+
+#define EMIF_POLL_INVL 10000 /* us */
+#define EMIF_POLL_TIMEOUT 5000000 /* us */
+
+struct dfl_emif {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock; /* Serialises access to EMIF_CTRL reg */
+};
+
+struct emif_attr {
+ struct device_attribute attr;
+ u32 shift;
+ u32 index;
+};
+
+#define to_emif_attr(dev_attr) \
+ container_of(dev_attr, struct emif_attr, attr)
+
+static ssize_t emif_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct emif_attr *eattr = to_emif_attr(attr);
+ struct dfl_emif *de = dev_get_drvdata(dev);
+ u64 val;
+
+ val = readq(de->base + EMIF_STAT);
+
+ return sysfs_emit(buf, "%u\n",
+ !!(val & BIT_ULL(eattr->shift + eattr->index)));
+}
+
+static ssize_t emif_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct emif_attr *eattr = to_emif_attr(attr);
+ struct dfl_emif *de = dev_get_drvdata(dev);
+ u64 clear_busy_msk, clear_en_msk, val;
+ void __iomem *base = de->base;
+
+ if (!sysfs_streq(buf, "1"))
+ return -EINVAL;
+
+ clear_busy_msk = BIT_ULL(EMIF_STAT_CLEAR_BUSY_SFT + eattr->index);
+ clear_en_msk = BIT_ULL(EMIF_CTRL_CLEAR_EN_SFT + eattr->index);
+
+ spin_lock(&de->lock);
+ /* The CLEAR_EN field is WO, but other fields are RW */
+ val = readq(base + EMIF_CTRL);
+ val &= ~EMIF_CTRL_CLEAR_EN_MSK;
+ val |= clear_en_msk;
+ writeq(val, base + EMIF_CTRL);
+ spin_unlock(&de->lock);
+
+ if (readq_poll_timeout(base + EMIF_STAT, val,
+ !(val & clear_busy_msk),
+ EMIF_POLL_INVL, EMIF_POLL_TIMEOUT)) {
+ dev_err(de->dev, "timeout, fail to clear\n");
+ return -ETIMEDOUT;
+ }
+
+ return count;
+}
+
+#define emif_state_attr(_name, _shift, _index) \
+ static struct emif_attr emif_attr_##inf##_index##_##_name = \
+ { .attr = __ATTR(inf##_index##_##_name, 0444, \
+ emif_state_show, NULL), \
+ .shift = (_shift), .index = (_index) }
+
+#define emif_clear_attr(_index) \
+ static struct emif_attr emif_attr_##inf##_index##_clear = \
+ { .attr = __ATTR(inf##_index##_clear, 0200, \
+ NULL, emif_clear_store), \
+ .index = (_index) }
+
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 0);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 1);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 2);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 3);
+
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 0);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 1);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 2);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 3);
+
+emif_clear_attr(0);
+emif_clear_attr(1);
+emif_clear_attr(2);
+emif_clear_attr(3);
+
+static struct attribute *dfl_emif_attrs[] = {
+ &emif_attr_inf0_init_done.attr.attr,
+ &emif_attr_inf0_cal_fail.attr.attr,
+ &emif_attr_inf0_clear.attr.attr,
+
+ &emif_attr_inf1_init_done.attr.attr,
+ &emif_attr_inf1_cal_fail.attr.attr,
+ &emif_attr_inf1_clear.attr.attr,
+
+ &emif_attr_inf2_init_done.attr.attr,
+ &emif_attr_inf2_cal_fail.attr.attr,
+ &emif_attr_inf2_clear.attr.attr,
+
+ &emif_attr_inf3_init_done.attr.attr,
+ &emif_attr_inf3_cal_fail.attr.attr,
+ &emif_attr_inf3_clear.attr.attr,
+
+ NULL,
+};
+
+static umode_t dfl_emif_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct dfl_emif *de = dev_get_drvdata(kobj_to_dev(kobj));
+ struct emif_attr *eattr = container_of(attr, struct emif_attr,
+ attr.attr);
+ u64 val;
+
+ /*
+ * This device supports upto 4 memory interfaces, but not all
+ * interfaces are used on different platforms. The read out value of
+ * CLEAN_EN field (which is a bitmap) could tell how many interfaces
+ * are available.
+ */
+ val = FIELD_GET(EMIF_CTRL_CLEAR_EN_MSK, readq(de->base + EMIF_CTRL));
+
+ return (val & BIT_ULL(eattr->index)) ? attr->mode : 0;
+}
+
+static const struct attribute_group dfl_emif_group = {
+ .is_visible = dfl_emif_visible,
+ .attrs = dfl_emif_attrs,
+};
+
+static const struct attribute_group *dfl_emif_groups[] = {
+ &dfl_emif_group,
+ NULL,
+};
+
+static int dfl_emif_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct dfl_emif *de;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ de->base = devm_ioremap_resource(dev, &ddev->mmio_res);
+ if (IS_ERR(de->base))
+ return PTR_ERR(de->base);
+
+ de->dev = dev;
+ spin_lock_init(&de->lock);
+ dev_set_drvdata(dev, de);
+
+ return 0;
+}
+
+static const struct dfl_device_id dfl_emif_ids[] = {
+ { FME_ID, FME_FEATURE_ID_EMIF },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, dfl_emif_ids);
+
+static struct dfl_driver dfl_emif_driver = {
+ .drv = {
+ .name = "dfl-emif",
+ .dev_groups = dfl_emif_groups,
+ },
+ .id_table = dfl_emif_ids,
+ .probe = dfl_emif_probe,
+};
+module_dfl_driver(dfl_emif_driver);
+
+MODULE_DESCRIPTION("DFL EMIF driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index ddb1879f07d3..f7825eef5894 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -70,7 +70,7 @@ struct emif_data {
};
static struct emif_data *emif1;
-static spinlock_t emif_lock;
+static DEFINE_SPINLOCK(emif_lock);
static unsigned long irq_state;
static u32 t_ck; /* DDR clock period in ps */
static LIST_HEAD(device_list);
@@ -1531,7 +1531,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
/* One-time actions taken on probing the first device */
if (!emif1) {
emif1 = emif;
- spin_lock_init(&emif_lock);
/*
* TODO: register notifiers for frequency and voltage
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index ac350f8d1e20..b396253fcf4b 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -15,6 +15,7 @@
#include <linux/pm_runtime.h>
#include <soc/mediatek/smi.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
/* mt8173 */
#define SMI_LARB_MMU_EN 0xf00
@@ -43,6 +44,10 @@
/* mt2712 */
#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
#define F_MMU_EN BIT(0)
+#define BANK_SEL(id) ({ \
+ u32 _id = (id) & 0x3; \
+ (_id << 8 | _id << 10 | _id << 12 | _id << 14); \
+})
/* SMI COMMON */
#define SMI_BUS_SEL 0x220
@@ -87,6 +92,7 @@ struct mtk_smi_larb { /* larb: local arbiter */
const struct mtk_smi_larb_gen *larb_gen;
int larbid;
u32 *mmu;
+ unsigned char *bank;
};
static int mtk_smi_clk_enable(const struct mtk_smi *smi)
@@ -130,7 +136,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi)
int mtk_smi_larb_get(struct device *larbdev)
{
- int ret = pm_runtime_get_sync(larbdev);
+ int ret = pm_runtime_resume_and_get(larbdev);
return (ret < 0) ? ret : 0;
}
@@ -153,6 +159,7 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
if (dev == larb_mmu[i].dev) {
larb->larbid = i;
larb->mmu = &larb_mmu[i].mmu;
+ larb->bank = larb_mmu[i].bank;
return 0;
}
}
@@ -171,6 +178,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
reg |= F_MMU_EN;
+ reg |= BANK_SEL(larb->bank[i]);
writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
}
}
@@ -374,7 +382,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
int ret;
/* Power on smi-common. */
- ret = pm_runtime_get_sync(larb->smi_common_dev);
+ ret = pm_runtime_resume_and_get(larb->smi_common_dev);
if (ret < 0) {
dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
return ret;
@@ -587,26 +595,22 @@ static struct platform_driver mtk_smi_common_driver = {
}
};
+static struct platform_driver * const smidrivers[] = {
+ &mtk_smi_common_driver,
+ &mtk_smi_larb_driver,
+};
+
static int __init mtk_smi_init(void)
{
- int ret;
-
- ret = platform_driver_register(&mtk_smi_common_driver);
- if (ret != 0) {
- pr_err("Failed to register SMI driver\n");
- return ret;
- }
-
- ret = platform_driver_register(&mtk_smi_larb_driver);
- if (ret != 0) {
- pr_err("Failed to register SMI-LARB driver\n");
- goto err_unreg_smi;
- }
- return ret;
+ return platform_register_drivers(smidrivers, ARRAY_SIZE(smidrivers));
+}
+module_init(mtk_smi_init);
-err_unreg_smi:
- platform_driver_unregister(&mtk_smi_common_driver);
- return ret;
+static void __exit mtk_smi_exit(void)
+{
+ platform_unregister_drivers(smidrivers, ARRAY_SIZE(smidrivers));
}
+module_exit(mtk_smi_exit);
-module_init(mtk_smi_init);
+MODULE_DESCRIPTION("MediaTek SMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index 575fadbffa30..9eb8cc7de494 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -273,14 +273,12 @@ err_clk_enable:
return ret;
}
-static int pl172_remove(struct amba_device *adev)
+static void pl172_remove(struct amba_device *adev)
{
struct pl172_data *pl172 = amba_get_drvdata(adev);
clk_disable_unprepare(pl172->clk);
amba_release_regions(adev);
-
- return 0;
}
static const struct amba_id pl172_ids[] = {
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 73bd3023202f..3b5b1045edd9 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -426,14 +426,12 @@ out_clk_dis_aper:
return err;
}
-static int pl353_smc_remove(struct amba_device *adev)
+static void pl353_smc_remove(struct amba_device *adev)
{
struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
clk_disable_unprepare(pl353_smc->memclk);
clk_disable_unprepare(pl353_smc->aclk);
-
- return 0;
}
static const struct amba_id pl353_ids[] = {
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index c5ee4121a4d2..1dabb509dec3 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -278,7 +278,7 @@ static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
}
/**
- * find_target_freq_id() - Finds requested frequency in local DMC configuration
+ * find_target_freq_idx() - Finds requested frequency in local DMC configuration
* @dmc: device for which the information is checked
* @target_rate: requested frequency in KHz
*
@@ -998,7 +998,7 @@ static struct devfreq_dev_profile exynos5_dmc_df_profile = {
};
/**
- * exynos5_dmc_align_initial_frequency() - Align initial frequency value
+ * exynos5_dmc_align_init_freq() - Align initial frequency value
* @dmc: device for which the frequency is going to be set
* @bootloader_init_freq: initial frequency set by the bootloader in KHz
*
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index ca7077a06f4c..a70967a56e52 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -32,9 +32,11 @@ config TEGRA30_EMC
external memory.
config TEGRA124_EMC
- bool "NVIDIA Tegra124 External Memory Controller driver"
+ tristate "NVIDIA Tegra124 External Memory Controller driver"
default y
depends on TEGRA_MC && ARCH_TEGRA_124_SOC
+ select TEGRA124_CLK_EMC
+ select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
Tegra124 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 44064de962c2..a21163ccadc4 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -176,6 +176,13 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
if (!rst_ops)
return -ENODEV;
+ /* DMA flushing will fail if reset is already asserted */
+ if (rst_ops->reset_status) {
+ /* check whether reset is asserted */
+ if (rst_ops->reset_status(mc, rst))
+ return 0;
+ }
+
if (rst_ops->block_dma) {
/* block clients DMA requests */
err = rst_ops->block_dma(mc, rst);
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index ee8ee39e98ed..bee8d9f79b04 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -9,22 +9,29 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/interconnect-provider.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/sort.h>
#include <linux/string.h>
-#include <soc/tegra/emc.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/mc.h>
+#include "mc.h"
+
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X64 BIT(4)
#define EMC_INTSTATUS 0x0
#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
@@ -460,6 +467,17 @@ struct emc_timing {
u32 emc_zcal_interval;
};
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
@@ -470,6 +488,7 @@ struct tegra_emc {
struct clk *clk;
enum emc_dram_type dram_type;
+ unsigned int dram_bus_width;
unsigned int dram_num;
struct emc_timing last_timing;
@@ -481,6 +500,17 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ struct icc_provider provider;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
};
/* Timing change sequence functions */
@@ -562,8 +592,8 @@ static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
return timing;
}
-int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
- unsigned long rate)
+static int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
{
struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
struct emc_timing *last = &emc->last_timing;
@@ -790,8 +820,8 @@ int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
return 0;
}
-void tegra_emc_complete_timing_change(struct tegra_emc *emc,
- unsigned long rate)
+static void tegra_emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
{
struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
struct emc_timing *last = &emc->last_timing;
@@ -869,6 +899,14 @@ static void emc_read_current_timing(struct tegra_emc *emc,
static int emc_init(struct tegra_emc *emc)
{
emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc->dram_type & EMC_FBIO_CFG5_DRAM_WIDTH_X64)
+ emc->dram_bus_width = 64;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
emc->dram_type &= EMC_FBIO_CFG5_DRAM_TYPE_MASK;
emc->dram_type >>= EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
@@ -987,6 +1025,7 @@ static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra132-emc" },
{}
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct device_node *
tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
@@ -1007,6 +1046,83 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
return NULL;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -1079,7 +1195,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1109,7 +1225,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1127,15 +1243,6 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
unsigned int i;
int err;
- emc->clk = devm_clk_get(dev, "emc");
- if (IS_ERR(emc->clk)) {
- if (PTR_ERR(emc->clk) != -ENODEV) {
- dev_err(dev, "failed to get EMC clock: %ld\n",
- PTR_ERR(emc->clk));
- return;
- }
- }
-
emc->debugfs.min_rate = ULONG_MAX;
emc->debugfs.max_rate = 0;
@@ -1175,6 +1282,168 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra124 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ struct opp_table *hw_opp_table;
+ int err;
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ return err;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra124_clk_set_emc_callbacks(NULL, NULL);
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -1186,6 +1455,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (!emc)
return -ENOMEM;
+ mutex_init(&emc->rate_lock);
emc->dev = &pdev->dev;
emc->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -1199,23 +1469,15 @@ static int tegra_emc_probe(struct platform_device *pdev)
ram_code = tegra_read_ram_code();
np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code);
- if (!np) {
- dev_err(&pdev->dev,
- "no memory timings for RAM code %u found in DT\n",
- ram_code);
- return -ENOENT;
- }
-
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
-
- if (emc->num_timings == 0) {
- dev_err(&pdev->dev,
- "no memory timings for RAM code %u registered\n",
- ram_code);
- return -ENOENT;
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ } else {
+ dev_info(&pdev->dev,
+ "no memory timings for RAM code %u found in DT\n",
+ ram_code);
}
err = emc_init(emc);
@@ -1226,9 +1488,39 @@ static int tegra_emc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, emc);
+ tegra124_clk_set_emc_callbacks(tegra_emc_prepare_timing_change,
+ tegra_emc_complete_timing_change);
+
+ err = devm_add_action_or_reset(&pdev->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
+
+ tegra_emc_rate_requests_init(emc);
+
if (IS_ENABLED(CONFIG_DEBUG_FS))
emc_debugfs_init(&pdev->dev, emc);
+ tegra_emc_interconnect_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
return 0;
};
@@ -1238,11 +1530,11 @@ static struct platform_driver tegra_emc_driver = {
.name = "tegra-emc",
.of_match_table = tegra_emc_of_match,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra124 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index e2389573d3c0..459211f50c08 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -4,7 +4,8 @@
*/
#include <linux/of.h>
-#include <linux/mm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
#include <dt-bindings/memory/tegra124-mc.h>
@@ -1010,6 +1011,83 @@ static const struct tegra_mc_reset tegra124_mc_resets[] = {
TEGRA124_MC_RESET(GPU, 0x970, 0x974, 2),
};
+static int tegra124_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /* TODO: program PTSA */
+ return 0;
+}
+
+static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra124_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra124_mc_icc_ops = {
+ .xlate_extended = tegra124_mc_of_icc_xlate_extended,
+ .aggregate = tegra124_mc_icc_aggreate,
+ .set = tegra124_mc_icc_set,
+};
+
#ifdef CONFIG_ARCH_TEGRA_124_SOC
static const unsigned long tegra124_mc_emem_regs[] = {
MC_EMEM_ARB_CFG,
@@ -1061,6 +1139,7 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
};
#endif /* CONFIG_ARCH_TEGRA_124_SOC */
@@ -1091,5 +1170,6 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
};
#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index fa8af17b0e2d..d65e7c2a580b 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -125,9 +125,9 @@ static int tegra186_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
- tegra186_emc_debug_min_rate_get,
- tegra186_emc_debug_min_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
+ tegra186_emc_debug_min_rate_get,
+ tegra186_emc_debug_min_rate_set, "%llu\n");
static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate)
{
@@ -155,9 +155,9 @@ static int tegra186_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
- tegra186_emc_debug_max_rate_get,
- tegra186_emc_debug_max_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
+ tegra186_emc_debug_max_rate_get,
+ tegra186_emc_debug_max_rate_set, "%llu\n");
static int tegra186_emc_probe(struct platform_device *pdev)
{
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 686aaf477d8a..d653a6be8d7f 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -911,21 +911,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_process_id);
- struct opp_table *clk_opp_table, *hw_opp_table;
+ struct opp_table *hw_opp_table;
int err;
- clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
- err = PTR_ERR_OR_ZERO(clk_opp_table);
- if (err) {
- dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
err = PTR_ERR_OR_ZERO(hw_opp_table);
if (err) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
- goto put_clk_table;
+ return err;
}
err = dev_pm_opp_of_add_table(emc->dev);
@@ -954,8 +947,6 @@ remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
dev_pm_opp_put_supported_hw(hw_opp_table);
-put_clk_table:
- dev_pm_opp_put_clkname(clk_opp_table);
return err;
}
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 44ac155936aa..6985da0ffb35 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1483,21 +1483,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *clk_opp_table, *hw_opp_table;
+ struct opp_table *hw_opp_table;
int err;
- clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
- err = PTR_ERR_OR_ZERO(clk_opp_table);
- if (err) {
- dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
err = PTR_ERR_OR_ZERO(hw_opp_table);
if (err) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
- goto put_clk_table;
+ return err;
}
err = dev_pm_opp_of_add_table(emc->dev);
@@ -1526,8 +1519,6 @@ remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
dev_pm_opp_put_supported_hw(hw_opp_table);
-put_clk_table:
- dev_pm_opp_put_clkname(clk_opp_table);
return err;
}
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 159a16f5e7d6..51d20c2ccb75 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev)
*/
for_each_available_child_of_node(np, child_np) {
ret = of_aemif_parse_abus_config(pdev, child_np);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child_np);
goto error;
+ }
}
} else if (pdata && pdata->num_abus_data > 0) {
for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
@@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, child_np) {
ret = of_platform_populate(child_np, NULL,
dev_lookup, dev);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child_np);
goto error;
+ }
}
} else if (pdata) {
for (i = 0; i < pdata->num_sub_devices; i++) {
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 6c747c1e98cb..179fec2da56d 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -340,7 +340,7 @@ static struct platform_driver ti_emif_driver = {
.remove = ti_emif_remove,
.driver = {
.name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(ti_emif_of_match),
+ .of_match_table = ti_emif_of_match,
.pm = &ti_emif_pm_ops,
},
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index bdfce7b15621..b74efa469e90 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -659,15 +659,6 @@ config MFD_INTEL_LPSS_PCI
I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
PCH) in PCI mode.
-config MFD_INTEL_MSIC
- bool "Intel MSIC"
- depends on INTEL_SCU
- select MFD_CORE
- help
- Select this option to enable access to Intel MSIC (Avatele
- Passage) chip. This chip embeds audio, battery, GPIO, etc.
- devices used in Intel Medfield platforms.
-
config MFD_INTEL_PMC_BXT
tristate "Intel PMC Driver for Broxton"
depends on X86
@@ -2085,6 +2076,17 @@ config MFD_KHADAS_MCU
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_ACER_A500_EC
+ tristate "Support for Acer Iconia Tab A500 Embedded Controller"
+ depends on I2C
+ depends on (ARCH_TEGRA_2x_SOC && OF) || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP
+ help
+ Support for Embedded Controller found on Acer Iconia Tab A500.
+ The controller itself is ENE KB930, it is running firmware
+ customized for the specific needs of the Acer A500 hardware.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
@@ -2129,7 +2131,7 @@ config RAVE_SP_CORE
device found on several devices in RAVE line of hardware.
config SGI_MFD_IOC3
- tristate "SGI IOC3 core driver"
+ bool "SGI IOC3 core driver"
depends on PCI && MIPS && 64BIT
select MFD_CORE
help
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 14fdb188af02..834f5463af28 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -214,7 +214,6 @@ obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o
obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
-obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o
obj-$(CONFIG_MFD_INTEL_PMT) += intel_pmt.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
@@ -264,6 +263,7 @@ obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o
+obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index a3bac9da8cbb..a9037911162b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -21,7 +21,6 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/regulator/ab8500.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -610,52 +609,52 @@ int ab8500_suspend(struct ab8500 *ab8500)
}
static const struct mfd_cell ab8500_bm_devs[] = {
- OF_MFD_CELL("ab8500-charger", NULL, &ab8500_bm_data,
+ MFD_CELL_OF("ab8500-charger", NULL, &ab8500_bm_data,
sizeof(ab8500_bm_data), 0, "stericsson,ab8500-charger"),
- OF_MFD_CELL("ab8500-btemp", NULL, &ab8500_bm_data,
+ MFD_CELL_OF("ab8500-btemp", NULL, &ab8500_bm_data,
sizeof(ab8500_bm_data), 0, "stericsson,ab8500-btemp"),
- OF_MFD_CELL("ab8500-fg", NULL, &ab8500_bm_data,
+ MFD_CELL_OF("ab8500-fg", NULL, &ab8500_bm_data,
sizeof(ab8500_bm_data), 0, "stericsson,ab8500-fg"),
- OF_MFD_CELL("ab8500-chargalg", NULL, &ab8500_bm_data,
+ MFD_CELL_OF("ab8500-chargalg", NULL, &ab8500_bm_data,
sizeof(ab8500_bm_data), 0, "stericsson,ab8500-chargalg"),
};
static const struct mfd_cell ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
- OF_MFD_CELL("ab8500-debug",
+ MFD_CELL_OF("ab8500-debug",
NULL, NULL, 0, 0, "stericsson,ab8500-debug"),
#endif
- OF_MFD_CELL("ab8500-sysctrl",
+ MFD_CELL_OF("ab8500-sysctrl",
NULL, NULL, 0, 0, "stericsson,ab8500-sysctrl"),
- OF_MFD_CELL("ab8500-ext-regulator",
+ MFD_CELL_OF("ab8500-ext-regulator",
NULL, NULL, 0, 0, "stericsson,ab8500-ext-regulator"),
- OF_MFD_CELL("ab8500-regulator",
+ MFD_CELL_OF("ab8500-regulator",
NULL, NULL, 0, 0, "stericsson,ab8500-regulator"),
- OF_MFD_CELL("ab8500-clk",
+ MFD_CELL_OF("ab8500-clk",
NULL, NULL, 0, 0, "stericsson,ab8500-clk"),
- OF_MFD_CELL("ab8500-gpadc",
+ MFD_CELL_OF("ab8500-gpadc",
NULL, NULL, 0, 0, "stericsson,ab8500-gpadc"),
- OF_MFD_CELL("ab8500-rtc",
+ MFD_CELL_OF("ab8500-rtc",
NULL, NULL, 0, 0, "stericsson,ab8500-rtc"),
- OF_MFD_CELL("ab8500-acc-det",
+ MFD_CELL_OF("ab8500-acc-det",
NULL, NULL, 0, 0, "stericsson,ab8500-acc-det"),
- OF_MFD_CELL("ab8500-poweron-key",
+ MFD_CELL_OF("ab8500-poweron-key",
NULL, NULL, 0, 0, "stericsson,ab8500-poweron-key"),
- OF_MFD_CELL("ab8500-pwm",
+ MFD_CELL_OF("ab8500-pwm",
NULL, NULL, 0, 1, "stericsson,ab8500-pwm"),
- OF_MFD_CELL("ab8500-pwm",
+ MFD_CELL_OF("ab8500-pwm",
NULL, NULL, 0, 2, "stericsson,ab8500-pwm"),
- OF_MFD_CELL("ab8500-pwm",
+ MFD_CELL_OF("ab8500-pwm",
NULL, NULL, 0, 3, "stericsson,ab8500-pwm"),
- OF_MFD_CELL("ab8500-denc",
+ MFD_CELL_OF("ab8500-denc",
NULL, NULL, 0, 0, "stericsson,ab8500-denc"),
- OF_MFD_CELL("pinctrl-ab8500",
+ MFD_CELL_OF("pinctrl-ab8500",
NULL, NULL, 0, 0, "stericsson,ab8500-gpio"),
- OF_MFD_CELL("abx500-temp",
+ MFD_CELL_OF("abx500-temp",
NULL, NULL, 0, 0, "stericsson,abx500-temp"),
- OF_MFD_CELL("ab8500-usb",
+ MFD_CELL_OF("ab8500-usb",
NULL, NULL, 0, 0, "stericsson,ab8500-usb"),
- OF_MFD_CELL("ab8500-codec",
+ MFD_CELL_OF("ab8500-codec",
NULL, NULL, 0, 0, "stericsson,ab8500-codec"),
};
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
new file mode 100644
index 000000000000..80c2fdd14fc4
--- /dev/null
+++ b/drivers/mfd/acer-ec-a500.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Acer Iconia Tab A500 Embedded Controller Driver
+ *
+ * Copyright 2020 GRATE-driver project
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+#define A500_EC_I2C_ERR_TIMEOUT 500
+#define A500_EC_POWER_CMD_TIMEOUT 1000
+
+/*
+ * Controller's firmware expects specific command opcodes to be used for the
+ * corresponding registers. Unsupported commands are skipped by the firmware.
+ */
+#define CMD_SHUTDOWN 0x0
+#define CMD_WARM_REBOOT 0x0
+#define CMD_COLD_REBOOT 0x1
+
+enum {
+ REG_CURRENT_NOW = 0x03,
+ REG_SHUTDOWN = 0x52,
+ REG_WARM_REBOOT = 0x54,
+ REG_COLD_REBOOT = 0x55,
+};
+
+static struct i2c_client *a500_ec_client_pm_off;
+
+static int a500_ec_read(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_sizel)
+{
+ struct i2c_client *client = context;
+ unsigned int reg, retries = 5;
+ u16 *ret_val = val_buf;
+ s32 ret = 0;
+
+ reg = *(u8 *)reg_buf;
+
+ while (retries-- > 0) {
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret >= 0)
+ break;
+
+ msleep(A500_EC_I2C_ERR_TIMEOUT);
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev, "read 0x%x failed: %d\n", reg, ret);
+ return ret;
+ }
+
+ *ret_val = ret;
+
+ if (reg == REG_CURRENT_NOW)
+ fsleep(10000);
+
+ return 0;
+}
+
+static int a500_ec_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ unsigned int reg, val, retries = 5;
+ s32 ret = 0;
+
+ reg = *(u8 *)(data + 0);
+ val = *(u16 *)(data + 1);
+
+ while (retries-- > 0) {
+ ret = i2c_smbus_write_word_data(client, reg, val);
+ if (ret >= 0)
+ break;
+
+ msleep(A500_EC_I2C_ERR_TIMEOUT);
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev, "write 0x%x failed: %d\n", reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config a500_ec_regmap_config = {
+ .name = "KB930",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0xff,
+};
+
+static const struct regmap_bus a500_ec_regmap_bus = {
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .write = a500_ec_write,
+ .read = a500_ec_read,
+ .max_raw_read = 2,
+};
+
+static void a500_ec_poweroff(void)
+{
+ i2c_smbus_write_word_data(a500_ec_client_pm_off,
+ REG_SHUTDOWN, CMD_SHUTDOWN);
+
+ mdelay(A500_EC_POWER_CMD_TIMEOUT);
+}
+
+static int a500_ec_restart_notify(struct notifier_block *this,
+ unsigned long reboot_mode, void *data)
+{
+ if (reboot_mode == REBOOT_WARM)
+ i2c_smbus_write_word_data(a500_ec_client_pm_off,
+ REG_WARM_REBOOT, CMD_WARM_REBOOT);
+ else
+ i2c_smbus_write_word_data(a500_ec_client_pm_off,
+ REG_COLD_REBOOT, CMD_COLD_REBOOT);
+
+ mdelay(A500_EC_POWER_CMD_TIMEOUT);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block a500_ec_restart_handler = {
+ .notifier_call = a500_ec_restart_notify,
+ .priority = 200,
+};
+
+static const struct mfd_cell a500_ec_cells[] = {
+ { .name = "acer-a500-iconia-battery", },
+ { .name = "acer-a500-iconia-leds", },
+};
+
+static int a500_ec_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+ int err;
+
+ regmap = devm_regmap_init(&client->dev, &a500_ec_regmap_bus,
+ client, &a500_ec_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ err = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ a500_ec_cells, ARRAY_SIZE(a500_ec_cells),
+ NULL, 0, NULL);
+ if (err) {
+ dev_err(&client->dev, "failed to add sub-devices: %d\n", err);
+ return err;
+ }
+
+ if (of_device_is_system_power_controller(client->dev.of_node)) {
+ a500_ec_client_pm_off = client;
+
+ err = register_restart_handler(&a500_ec_restart_handler);
+ if (err)
+ return err;
+
+ if (!pm_power_off)
+ pm_power_off = a500_ec_poweroff;
+ }
+
+ return 0;
+}
+
+static int a500_ec_remove(struct i2c_client *client)
+{
+ if (of_device_is_system_power_controller(client->dev.of_node)) {
+ if (pm_power_off == a500_ec_poweroff)
+ pm_power_off = NULL;
+
+ unregister_restart_handler(&a500_ec_restart_handler);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id a500_ec_match[] = {
+ { .compatible = "acer,a500-iconia-ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, a500_ec_match);
+
+static struct i2c_driver a500_ec_driver = {
+ .driver = {
+ .name = "acer-a500-embedded-controller",
+ .of_match_table = a500_ec_match,
+ },
+ .probe_new = a500_ec_probe,
+ .remove = a500_ec_remove,
+};
+module_i2c_driver(a500_ec_driver);
+
+MODULE_DESCRIPTION("Acer Iconia Tab A500 Embedded Controller driver");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 193a96c8b1ea..20cb294c7512 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -145,7 +145,8 @@ static int sysmgr_probe(struct platform_device *pdev)
sysmgr_config.reg_write = s10_protected_reg_write;
/* Need physical address for SMCC call */
- regmap = devm_regmap_init(dev, NULL, (void *)res->start,
+ regmap = devm_regmap_init(dev, NULL,
+ (void *)(uintptr_t)res->start,
&sysmgr_config);
} else {
base = devm_ioremap(dev, res->start, resource_size(res));
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 000cb82023e3..75f1bc671d59 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -797,17 +797,6 @@ const struct dev_pm_ops arizona_pm_ops = {
EXPORT_SYMBOL_GPL(arizona_pm_ops);
#ifdef CONFIG_OF
-unsigned long arizona_of_get_type(struct device *dev)
-{
- const struct of_device_id *id = of_match_device(arizona_of_match, dev);
-
- if (id)
- return (unsigned long)id->data;
- else
- return 0;
-}
-EXPORT_SYMBOL_GPL(arizona_of_get_type);
-
static int arizona_of_get_core_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 4b58e3ad6eb6..5e83b730c4ce 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -23,14 +23,16 @@
static int arizona_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ const void *match_data;
struct arizona *arizona;
const struct regmap_config *regmap_config = NULL;
- unsigned long type;
+ unsigned long type = 0;
int ret;
- if (i2c->dev.of_node)
- type = arizona_of_get_type(&i2c->dev);
- else
+ match_data = device_get_match_data(&i2c->dev);
+ if (match_data)
+ type = (unsigned long)match_data;
+ else if (id)
type = id->driver_data;
switch (type) {
@@ -115,6 +117,7 @@ static struct i2c_driver arizona_i2c_driver = {
module_i2c_driver(arizona_i2c_driver);
+MODULE_SOFTDEP("pre: arizona_ldo1");
MODULE_DESCRIPTION("Arizona I2C bus interface");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 2633e147b76c..24a2c75d691a 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -7,7 +7,10 @@
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*/
+#include <linux/acpi.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -15,22 +18,141 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
+#include <uapi/linux/input-event-codes.h>
#include <linux/mfd/arizona/core.h>
#include "arizona.h"
+#ifdef CONFIG_ACPI
+const struct acpi_gpio_params reset_gpios = { 1, 0, false };
+const struct acpi_gpio_params ldoena_gpios = { 2, 0, false };
+
+static const struct acpi_gpio_mapping arizona_acpi_gpios[] = {
+ { "reset-gpios", &reset_gpios, 1, },
+ { "wlf,ldoena-gpios", &ldoena_gpios, 1 },
+ { }
+};
+
+/*
+ * The ACPI resources for the device only describe external GPIO-s. They do
+ * not provide mappings for the GPIO-s coming from the Arizona codec itself.
+ */
+static const struct gpiod_lookup arizona_soc_gpios[] = {
+ { "arizona", 2, "wlf,spkvdd-ena", 0, GPIO_ACTIVE_HIGH },
+ { "arizona", 4, "wlf,micd-pol", 0, GPIO_ACTIVE_LOW },
+};
+
+/*
+ * The AOSP 3.5 mm Headset: Accessory Specification gives the following values:
+ * Function A Play/Pause: 0 ohm
+ * Function D Voice assistant: 135 ohm
+ * Function B Volume Up 240 ohm
+ * Function C Volume Down 470 ohm
+ * Minimum Mic DC resistance 1000 ohm
+ * Minimum Ear speaker impedance 16 ohm
+ * Note the first max value below must be less then the min. speaker impedance,
+ * to allow CTIA/OMTP detection to work. The other max values are the closest
+ * value from extcon-arizona.c:arizona_micd_levels halfway 2 button resistances.
+ */
+static const struct arizona_micd_range arizona_micd_aosp_ranges[] = {
+ { .max = 11, .key = KEY_PLAYPAUSE },
+ { .max = 186, .key = KEY_VOICECOMMAND },
+ { .max = 348, .key = KEY_VOLUMEUP },
+ { .max = 752, .key = KEY_VOLUMEDOWN },
+};
+
+static void arizona_spi_acpi_remove_lookup(void *lookup)
+{
+ gpiod_remove_lookup_table(lookup);
+}
+
+static int arizona_spi_acpi_probe(struct arizona *arizona)
+{
+ struct gpiod_lookup_table *lookup;
+ acpi_status status;
+ int ret;
+
+ /* Add mappings for the 2 ACPI declared GPIOs used for reset and ldo-ena */
+ devm_acpi_dev_add_driver_gpios(arizona->dev, arizona_acpi_gpios);
+
+ /* Add lookups for the SoCs own GPIOs used for micdet-polarity and spkVDD-enable */
+ lookup = devm_kzalloc(arizona->dev,
+ struct_size(lookup, table, ARRAY_SIZE(arizona_soc_gpios) + 1),
+ GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ lookup->dev_id = dev_name(arizona->dev);
+ memcpy(lookup->table, arizona_soc_gpios, sizeof(arizona_soc_gpios));
+
+ gpiod_add_lookup_table(lookup);
+ ret = devm_add_action_or_reset(arizona->dev, arizona_spi_acpi_remove_lookup, lookup);
+ if (ret)
+ return ret;
+
+ /* Enable 32KHz clock from SoC to codec for jack-detect */
+ status = acpi_evaluate_object(ACPI_HANDLE(arizona->dev), "CLKE", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(arizona->dev, "Failed to enable 32KHz clk ACPI error %d\n", status);
+
+ /*
+ * Some DSDTs wrongly declare the IRQ trigger-type as IRQF_TRIGGER_FALLING
+ * The IRQ line will stay low when a new IRQ event happens between reading
+ * the IRQ status flags and acknowledging them. When the IRQ line stays
+ * low like this the IRQ will never trigger again when its type is set
+ * to IRQF_TRIGGER_FALLING. Correct the IRQ trigger-type to fix this.
+ *
+ * Note theoretically it is possible that some boards are not capable
+ * of handling active low level interrupts. In that case setting the
+ * flag to IRQF_TRIGGER_FALLING would not be a bug (and we would need
+ * to work around this) but so far all known usages of IRQF_TRIGGER_FALLING
+ * are a bug in the board's DSDT.
+ */
+ arizona->pdata.irq_flags = IRQF_TRIGGER_LOW;
+
+ /* Wait 200 ms after jack insertion */
+ arizona->pdata.micd_detect_debounce = 200;
+
+ /* Use standard AOSP values for headset-button mappings */
+ arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+
+ return 0;
+}
+
+static const struct acpi_device_id arizona_acpi_match[] = {
+ {
+ .id = "WM510204",
+ .driver_data = WM5102,
+ },
+ {
+ .id = "WM510205",
+ .driver_data = WM5102,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, arizona_acpi_match);
+#else
+static int arizona_spi_acpi_probe(struct arizona *arizona)
+{
+ return -ENODEV;
+}
+#endif
+
static int arizona_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ const void *match_data;
struct arizona *arizona;
const struct regmap_config *regmap_config = NULL;
- unsigned long type;
+ unsigned long type = 0;
int ret;
- if (spi->dev.of_node)
- type = arizona_of_get_type(&spi->dev);
- else
+ match_data = device_get_match_data(&spi->dev);
+ if (match_data)
+ type = (unsigned long)match_data;
+ else if (id)
type = id->driver_data;
switch (type) {
@@ -75,6 +197,12 @@ static int arizona_spi_probe(struct spi_device *spi)
arizona->dev = &spi->dev;
arizona->irq = spi->irq;
+ if (has_acpi_companion(&spi->dev)) {
+ ret = arizona_spi_acpi_probe(arizona);
+ if (ret)
+ return ret;
+ }
+
return arizona_dev_init(arizona);
}
@@ -102,6 +230,7 @@ static struct spi_driver arizona_spi_driver = {
.name = "arizona",
.pm = &arizona_pm_ops,
.of_match_table = of_match_ptr(arizona_of_match),
+ .acpi_match_table = ACPI_PTR(arizona_acpi_match),
},
.probe = arizona_spi_probe,
.remove = arizona_spi_remove,
@@ -110,6 +239,7 @@ static struct spi_driver arizona_spi_driver = {
module_spi_driver(arizona_spi_driver);
+MODULE_SOFTDEP("pre: arizona_ldo1");
MODULE_DESCRIPTION("Arizona SPI bus interface");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index 995efc6d7f32..801cbbcd71cb 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -50,13 +50,4 @@ int arizona_dev_exit(struct arizona *arizona);
int arizona_irq_init(struct arizona *arizona);
int arizona_irq_exit(struct arizona *arizona);
-#ifdef CONFIG_OF
-unsigned long arizona_of_get_type(struct device *dev);
-#else
-static inline unsigned long arizona_of_get_type(struct device *dev)
-{
- return 0;
-}
-#endif
-
#endif
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 2cfde81f5fbf..00ab48018d8d 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -54,7 +54,9 @@ static int axp20x_i2c_remove(struct i2c_client *i2c)
{
struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
- return axp20x_device_remove(axp20x);
+ axp20x_device_remove(axp20x);
+
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
index 4cdc79f5cc48..214bc0d84d44 100644
--- a/drivers/mfd/axp20x-rsb.c
+++ b/drivers/mfd/axp20x-rsb.c
@@ -49,11 +49,11 @@ static int axp20x_rsb_probe(struct sunxi_rsb_device *rdev)
return axp20x_device_probe(axp20x);
}
-static int axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
+static void axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
{
struct axp20x_dev *axp20x = sunxi_rsb_device_get_drvdata(rdev);
- return axp20x_device_remove(axp20x);
+ axp20x_device_remove(axp20x);
}
static const struct of_device_id axp20x_rsb_of_match[] = {
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index aa59496e4376..3eae04e24ac8 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -987,7 +987,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
}
EXPORT_SYMBOL(axp20x_device_probe);
-int axp20x_device_remove(struct axp20x_dev *axp20x)
+void axp20x_device_remove(struct axp20x_dev *axp20x)
{
if (axp20x == axp20x_pm_power_off) {
axp20x_pm_power_off = NULL;
@@ -996,8 +996,6 @@ int axp20x_device_remove(struct axp20x_dev *axp20x)
mfd_remove_devices(axp20x->dev);
regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
-
- return 0;
}
EXPORT_SYMBOL(axp20x_device_remove);
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index fab3cdc27ed6..e15b1acfb063 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * ROHM BD9571MWV-M MFD driver
+ * ROHM BD9571MWV-M and BD9574MVF-M core driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2020 Renesas Electronics Corporation
*
* Based on the TPS65086 driver
*/
@@ -18,6 +11,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
#include <linux/mfd/bd9571mwv.h>
@@ -110,13 +104,78 @@ static struct regmap_irq_chip bd9571mwv_irq_chip = {
.num_irqs = ARRAY_SIZE(bd9571mwv_irqs),
};
-static int bd9571mwv_identify(struct bd9571mwv *bd)
+static const struct mfd_cell bd9574mwf_cells[] = {
+ { .name = "bd9574mwf-regulator", },
+ { .name = "bd9574mwf-gpio", },
+};
+
+static const struct regmap_range bd9574mwf_readable_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_VENDOR_CODE, BD9571MWV_PRODUCT_REVISION),
+ regmap_reg_range(BD9571MWV_BKUP_MODE_CNT, BD9571MWV_BKUP_MODE_CNT),
+ regmap_reg_range(BD9571MWV_DVFS_VINIT, BD9571MWV_DVFS_SETVMAX),
+ regmap_reg_range(BD9571MWV_DVFS_SETVID, BD9571MWV_DVFS_MONIVDAC),
+ regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
+ regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INTMASK),
+ regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTMASK),
+};
+
+static const struct regmap_access_table bd9574mwf_readable_table = {
+ .yes_ranges = bd9574mwf_readable_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd9574mwf_readable_yes_ranges),
+};
+
+static const struct regmap_range bd9574mwf_writable_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_BKUP_MODE_CNT, BD9571MWV_BKUP_MODE_CNT),
+ regmap_reg_range(BD9571MWV_DVFS_SETVID, BD9571MWV_DVFS_SETVID),
+ regmap_reg_range(BD9571MWV_GPIO_DIR, BD9571MWV_GPIO_OUT),
+ regmap_reg_range(BD9571MWV_GPIO_INT_SET, BD9571MWV_GPIO_INTMASK),
+ regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTMASK),
+};
+
+static const struct regmap_access_table bd9574mwf_writable_table = {
+ .yes_ranges = bd9574mwf_writable_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd9574mwf_writable_yes_ranges),
+};
+
+static const struct regmap_range bd9574mwf_volatile_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
+ regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
+ regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
+ regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
+};
+
+static const struct regmap_access_table bd9574mwf_volatile_table = {
+ .yes_ranges = bd9574mwf_volatile_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd9574mwf_volatile_yes_ranges),
+};
+
+static const struct regmap_config bd9574mwf_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &bd9574mwf_readable_table,
+ .wr_table = &bd9574mwf_writable_table,
+ .volatile_table = &bd9574mwf_volatile_table,
+ .max_register = 0xff,
+};
+
+static struct regmap_irq_chip bd9574mwf_irq_chip = {
+ .name = "bd9574mwf",
+ .status_base = BD9571MWV_INT_INTREQ,
+ .mask_base = BD9571MWV_INT_INTMASK,
+ .ack_base = BD9571MWV_INT_INTREQ,
+ .init_ack_masked = true,
+ .num_regs = 1,
+ .irqs = bd9571mwv_irqs,
+ .num_irqs = ARRAY_SIZE(bd9571mwv_irqs),
+};
+
+static int bd957x_identify(struct device *dev, struct regmap *regmap)
{
- struct device *dev = bd->dev;
unsigned int value;
int ret;
- ret = regmap_read(bd->regmap, BD9571MWV_VENDOR_CODE, &value);
+ ret = regmap_read(regmap, BD9571MWV_VENDOR_CODE, &value);
if (ret) {
dev_err(dev, "Failed to read vendor code register (ret=%i)\n",
ret);
@@ -129,84 +188,82 @@ static int bd9571mwv_identify(struct bd9571mwv *bd)
return -EINVAL;
}
- ret = regmap_read(bd->regmap, BD9571MWV_PRODUCT_CODE, &value);
+ ret = regmap_read(regmap, BD9571MWV_PRODUCT_CODE, &value);
if (ret) {
dev_err(dev, "Failed to read product code register (ret=%i)\n",
ret);
return ret;
}
-
- if (value != BD9571MWV_PRODUCT_CODE_VAL) {
- dev_err(dev, "Invalid product code ID %02x (expected %02x)\n",
- value, BD9571MWV_PRODUCT_CODE_VAL);
- return -EINVAL;
- }
-
- ret = regmap_read(bd->regmap, BD9571MWV_PRODUCT_REVISION, &value);
+ ret = regmap_read(regmap, BD9571MWV_PRODUCT_REVISION, &value);
if (ret) {
dev_err(dev, "Failed to read revision register (ret=%i)\n",
ret);
return ret;
}
- dev_info(dev, "Device: BD9571MWV rev. %d\n", value & 0xff);
-
return 0;
}
static int bd9571mwv_probe(struct i2c_client *client,
- const struct i2c_device_id *ids)
+ const struct i2c_device_id *ids)
{
- struct bd9571mwv *bd;
- int ret;
-
- bd = devm_kzalloc(&client->dev, sizeof(*bd), GFP_KERNEL);
- if (!bd)
- return -ENOMEM;
+ const struct regmap_config *regmap_config;
+ const struct regmap_irq_chip *irq_chip;
+ const struct mfd_cell *cells;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ int ret, num_cells, irq = client->irq;
+
+ /* Read the PMIC product code */
+ ret = i2c_smbus_read_byte_data(client, BD9571MWV_PRODUCT_CODE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read product code\n");
+ return ret;
+ }
- i2c_set_clientdata(client, bd);
- bd->dev = &client->dev;
- bd->irq = client->irq;
+ switch (ret) {
+ case BD9571MWV_PRODUCT_CODE_BD9571MWV:
+ regmap_config = &bd9571mwv_regmap_config;
+ irq_chip = &bd9571mwv_irq_chip;
+ cells = bd9571mwv_cells;
+ num_cells = ARRAY_SIZE(bd9571mwv_cells);
+ break;
+ case BD9571MWV_PRODUCT_CODE_BD9574MWF:
+ regmap_config = &bd9574mwf_regmap_config;
+ irq_chip = &bd9574mwf_irq_chip;
+ cells = bd9574mwf_cells;
+ num_cells = ARRAY_SIZE(bd9574mwf_cells);
+ break;
+ default:
+ dev_err(dev, "Unsupported device 0x%x\n", ret);
+ return -ENODEV;
+ }
- bd->regmap = devm_regmap_init_i2c(client, &bd9571mwv_regmap_config);
- if (IS_ERR(bd->regmap)) {
- dev_err(bd->dev, "Failed to initialize register map\n");
- return PTR_ERR(bd->regmap);
+ regmap = devm_regmap_init_i2c(client, regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to initialize register map\n");
+ return PTR_ERR(regmap);
}
- ret = bd9571mwv_identify(bd);
+ ret = bd957x_identify(dev, regmap);
if (ret)
return ret;
- ret = regmap_add_irq_chip(bd->regmap, bd->irq, IRQF_ONESHOT, 0,
- &bd9571mwv_irq_chip, &bd->irq_data);
- if (ret) {
- dev_err(bd->dev, "Failed to register IRQ chip\n");
- return ret;
- }
-
- ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
- ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
- regmap_irq_get_domain(bd->irq_data));
+ ret = devm_regmap_add_irq_chip(dev, regmap, irq, IRQF_ONESHOT, 0,
+ irq_chip, &irq_data);
if (ret) {
- regmap_del_irq_chip(bd->irq, bd->irq_data);
+ dev_err(dev, "Failed to register IRQ chip\n");
return ret;
}
- return 0;
-}
-
-static int bd9571mwv_remove(struct i2c_client *client)
-{
- struct bd9571mwv *bd = i2c_get_clientdata(client);
-
- regmap_del_irq_chip(bd->irq, bd->irq_data);
-
- return 0;
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, num_cells,
+ NULL, 0, regmap_irq_get_domain(irq_data));
}
static const struct of_device_id bd9571mwv_of_match_table[] = {
{ .compatible = "rohm,bd9571mwv", },
+ { .compatible = "rohm,bd9574mwf", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bd9571mwv_of_match_table);
@@ -223,7 +280,6 @@ static struct i2c_driver bd9571mwv_driver = {
.of_match_table = bd9571mwv_of_match_table,
},
.probe = bd9571mwv_probe,
- .remove = bd9571mwv_remove,
.id_table = bd9571mwv_id_table,
};
module_i2c_driver(bd9571mwv_driver);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a5983d515db0..167faac9b75b 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2954,12 +2954,12 @@ static const struct mfd_cell common_prcmu_devs[] = {
};
static const struct mfd_cell db8500_prcmu_devs[] = {
- OF_MFD_CELL("db8500-prcmu-regulators", NULL,
+ MFD_CELL_OF("db8500-prcmu-regulators", NULL,
&db8500_regulators, sizeof(db8500_regulators), 0,
"stericsson,db8500-prcmu-regulator"),
- OF_MFD_CELL("cpuidle-dbx500",
+ MFD_CELL_OF("cpuidle-dbx500",
NULL, NULL, 0, 0, "stericsson,cpuidle-dbx500"),
- OF_MFD_CELL("db8500-thermal",
+ MFD_CELL_OF("db8500-thermal",
NULL, NULL, 0, 0, "stericsson,db8500-thermal"),
};
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index 576da62fbb0c..d87876747b91 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -234,7 +234,7 @@ static int gsc_probe(struct i2c_client *client)
ret = devm_regmap_add_irq_chip(dev, gsc->regmap, client->irq,
IRQF_ONESHOT | IRQF_SHARED |
- IRQF_TRIGGER_FALLING, 0,
+ IRQF_TRIGGER_LOW, 0,
&gsc_irq_chip, &irq_data);
if (ret)
return ret;
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 2d7c588ef1ed..1522c8afc540 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -277,6 +277,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&spt_info },
+ /* ADL-P */
+ { PCI_VDEVICE(INTEL, 0x51a8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x51a9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x51e8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51eb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&bxt_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
@@ -293,6 +306,21 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info },
+ /* ADL-S */
+ { PCI_VDEVICE(INTEL, 0x7aa8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7aa9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7acc), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7acd), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7ace), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7acf), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7adc), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
index b84579b7b4f0..06c977519479 100644
--- a/drivers/mfd/intel-m10-bmc.c
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -60,9 +60,52 @@ static ssize_t bmcfw_version_show(struct device *dev,
}
static DEVICE_ATTR_RO(bmcfw_version);
+static ssize_t mac_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *max10 = dev_get_drvdata(dev);
+ unsigned int macaddr_low, macaddr_high;
+ int ret;
+
+ ret = m10bmc_sys_read(max10, M10BMC_MAC_LOW, &macaddr_low);
+ if (ret)
+ return ret;
+
+ ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (u8)FIELD_GET(M10BMC_MAC_BYTE1, macaddr_low),
+ (u8)FIELD_GET(M10BMC_MAC_BYTE2, macaddr_low),
+ (u8)FIELD_GET(M10BMC_MAC_BYTE3, macaddr_low),
+ (u8)FIELD_GET(M10BMC_MAC_BYTE4, macaddr_low),
+ (u8)FIELD_GET(M10BMC_MAC_BYTE5, macaddr_high),
+ (u8)FIELD_GET(M10BMC_MAC_BYTE6, macaddr_high));
+}
+static DEVICE_ATTR_RO(mac_address);
+
+static ssize_t mac_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *max10 = dev_get_drvdata(dev);
+ unsigned int macaddr_high;
+ int ret;
+
+ ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n",
+ (u8)FIELD_GET(M10BMC_MAC_COUNT, macaddr_high));
+}
+static DEVICE_ATTR_RO(mac_count);
+
static struct attribute *m10bmc_attrs[] = {
&dev_attr_bmc_version.attr,
&dev_attr_bmcfw_version.attr,
+ &dev_attr_mac_address.attr,
+ &dev_attr_mac_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(m10bmc);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
deleted file mode 100644
index daa772f8146b..000000000000
--- a/drivers/mfd/intel_msic.c
+++ /dev/null
@@ -1,425 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#define MSIC_VENDOR(id) ((id >> 6) & 3)
-#define MSIC_VERSION(id) (id & 0x3f)
-#define MSIC_MAJOR(id) ('A' + ((id >> 3) & 7))
-#define MSIC_MINOR(id) (id & 7)
-
-/*
- * MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
- * Since IRQ block starts from address 0x002 we need to subtract that from
- * the actual IRQ status register address.
- */
-#define MSIC_IRQ_STATUS(x) (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
-#define MSIC_IRQ_STATUS_ACCDET MSIC_IRQ_STATUS(INTEL_MSIC_ACCDET)
-
-/*
- * The SCU hardware has limitation of 16 bytes per read/write buffer on
- * Medfield.
- */
-#define SCU_IPC_RWBUF_LIMIT 16
-
-/**
- * struct intel_msic - an MSIC MFD instance
- * @pdev: pointer to the platform device
- * @vendor: vendor ID
- * @version: chip version
- * @irq_base: base address of the mapped MSIC SRAM interrupt tree
- */
-struct intel_msic {
- struct platform_device *pdev;
- unsigned vendor;
- unsigned version;
- void __iomem *irq_base;
-};
-
-static const struct resource msic_touch_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_adc_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_battery_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_gpio_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_audio_resources[] = {
- DEFINE_RES_IRQ_NAMED(0, "IRQ"),
- /*
- * We will pass IRQ_BASE to the driver now but this can be removed
- * when/if the driver starts to use intel_msic_irq_read().
- */
- DEFINE_RES_MEM_NAMED(MSIC_IRQ_STATUS_ACCDET, 1, "IRQ_BASE"),
-};
-
-static const struct resource msic_hdmi_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_thermal_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_power_btn_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-static const struct resource msic_ocd_resources[] = {
- DEFINE_RES_IRQ(0),
-};
-
-/*
- * Devices that are part of the MSIC and are available via firmware
- * populated SFI DEVS table.
- */
-static struct mfd_cell msic_devs[] = {
- [INTEL_MSIC_BLOCK_TOUCH] = {
- .name = "msic_touch",
- .num_resources = ARRAY_SIZE(msic_touch_resources),
- .resources = msic_touch_resources,
- },
- [INTEL_MSIC_BLOCK_ADC] = {
- .name = "msic_adc",
- .num_resources = ARRAY_SIZE(msic_adc_resources),
- .resources = msic_adc_resources,
- },
- [INTEL_MSIC_BLOCK_BATTERY] = {
- .name = "msic_battery",
- .num_resources = ARRAY_SIZE(msic_battery_resources),
- .resources = msic_battery_resources,
- },
- [INTEL_MSIC_BLOCK_GPIO] = {
- .name = "msic_gpio",
- .num_resources = ARRAY_SIZE(msic_gpio_resources),
- .resources = msic_gpio_resources,
- },
- [INTEL_MSIC_BLOCK_AUDIO] = {
- .name = "msic_audio",
- .num_resources = ARRAY_SIZE(msic_audio_resources),
- .resources = msic_audio_resources,
- },
- [INTEL_MSIC_BLOCK_HDMI] = {
- .name = "msic_hdmi",
- .num_resources = ARRAY_SIZE(msic_hdmi_resources),
- .resources = msic_hdmi_resources,
- },
- [INTEL_MSIC_BLOCK_THERMAL] = {
- .name = "msic_thermal",
- .num_resources = ARRAY_SIZE(msic_thermal_resources),
- .resources = msic_thermal_resources,
- },
- [INTEL_MSIC_BLOCK_POWER_BTN] = {
- .name = "msic_power_btn",
- .num_resources = ARRAY_SIZE(msic_power_btn_resources),
- .resources = msic_power_btn_resources,
- },
- [INTEL_MSIC_BLOCK_OCD] = {
- .name = "msic_ocd",
- .num_resources = ARRAY_SIZE(msic_ocd_resources),
- .resources = msic_ocd_resources,
- },
-};
-
-/*
- * Other MSIC related devices which are not directly available via SFI DEVS
- * table. These can be pseudo devices, regulators etc. which are needed for
- * different purposes.
- *
- * These devices appear only after the MSIC driver itself is initialized so
- * we can guarantee that the SCU IPC interface is ready.
- */
-static const struct mfd_cell msic_other_devs[] = {
- /* Audio codec in the MSIC */
- {
- .id = -1,
- .name = "sn95031",
- },
-};
-
-/**
- * intel_msic_reg_read - read a single MSIC register
- * @reg: register to read
- * @val: register value is placed here
- *
- * Read a single register from MSIC. Returns %0 on success and negative
- * errno in case of failure.
- *
- * Function may sleep.
- */
-int intel_msic_reg_read(unsigned short reg, u8 *val)
-{
- return intel_scu_ipc_ioread8(reg, val);
-}
-EXPORT_SYMBOL_GPL(intel_msic_reg_read);
-
-/**
- * intel_msic_reg_write - write a single MSIC register
- * @reg: register to write
- * @val: value to write to that register
- *
- * Write a single MSIC register. Returns 0 on success and negative
- * errno in case of failure.
- *
- * Function may sleep.
- */
-int intel_msic_reg_write(unsigned short reg, u8 val)
-{
- return intel_scu_ipc_iowrite8(reg, val);
-}
-EXPORT_SYMBOL_GPL(intel_msic_reg_write);
-
-/**
- * intel_msic_reg_update - update a single MSIC register
- * @reg: register to update
- * @val: value to write to the register
- * @mask: specifies which of the bits are updated (%0 = don't update,
- * %1 = update)
- *
- * Perform an update to a register @reg. @mask is used to specify which
- * bits are updated. Returns %0 in case of success and negative errno in
- * case of failure.
- *
- * Function may sleep.
- */
-int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask)
-{
- return intel_scu_ipc_update_register(reg, val, mask);
-}
-EXPORT_SYMBOL_GPL(intel_msic_reg_update);
-
-/**
- * intel_msic_bulk_read - read an array of registers
- * @reg: array of register addresses to read
- * @buf: array where the read values are placed
- * @count: number of registers to read
- *
- * Function reads @count registers from the MSIC using addresses passed in
- * @reg. Read values are placed in @buf. Reads are performed atomically
- * wrt. MSIC.
- *
- * Returns %0 in case of success and negative errno in case of failure.
- *
- * Function may sleep.
- */
-int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count)
-{
- if (WARN_ON(count > SCU_IPC_RWBUF_LIMIT))
- return -EINVAL;
-
- return intel_scu_ipc_readv(reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(intel_msic_bulk_read);
-
-/**
- * intel_msic_bulk_write - write an array of values to the MSIC registers
- * @reg: array of registers to write
- * @buf: values to write to each register
- * @count: number of registers to write
- *
- * Function writes @count registers in @buf to MSIC. Writes are performed
- * atomically wrt MSIC. Returns %0 in case of success and negative errno in
- * case of failure.
- *
- * Function may sleep.
- */
-int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count)
-{
- if (WARN_ON(count > SCU_IPC_RWBUF_LIMIT))
- return -EINVAL;
-
- return intel_scu_ipc_writev(reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(intel_msic_bulk_write);
-
-/**
- * intel_msic_irq_read - read a register from an MSIC interrupt tree
- * @msic: MSIC instance
- * @reg: interrupt register (between %INTEL_MSIC_IRQLVL1 and
- * %INTEL_MSIC_RESETIRQ2)
- * @val: value of the register is placed here
- *
- * This function can be used by an MSIC subdevice interrupt handler to read
- * a register value from the MSIC interrupt tree. In this way subdevice
- * drivers don't have to map in the interrupt tree themselves but can just
- * call this function instead.
- *
- * Function doesn't sleep and is callable from interrupt context.
- *
- * Returns %-EINVAL if @reg is outside of the allowed register region.
- */
-int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, u8 *val)
-{
- if (WARN_ON(reg < INTEL_MSIC_IRQLVL1 || reg > INTEL_MSIC_RESETIRQ2))
- return -EINVAL;
-
- *val = readb(msic->irq_base + (reg - INTEL_MSIC_IRQLVL1));
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_msic_irq_read);
-
-static int intel_msic_init_devices(struct intel_msic *msic)
-{
- struct platform_device *pdev = msic->pdev;
- struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret, i;
-
- if (pdata->gpio) {
- struct mfd_cell *cell = &msic_devs[INTEL_MSIC_BLOCK_GPIO];
-
- cell->platform_data = pdata->gpio;
- cell->pdata_size = sizeof(*pdata->gpio);
- }
-
- if (pdata->ocd) {
- unsigned gpio = pdata->ocd->gpio;
-
- ret = devm_gpio_request_one(&pdev->dev, gpio,
- GPIOF_IN, "ocd_gpio");
- if (ret) {
- dev_err(&pdev->dev, "failed to register OCD GPIO\n");
- return ret;
- }
-
- ret = gpio_to_irq(gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "no IRQ number for OCD GPIO\n");
- return ret;
- }
-
- /* Update the IRQ number for the OCD */
- pdata->irq[INTEL_MSIC_BLOCK_OCD] = ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(msic_devs); i++) {
- if (!pdata->irq[i])
- continue;
-
- ret = mfd_add_devices(&pdev->dev, -1, &msic_devs[i], 1, NULL,
- pdata->irq[i], NULL);
- if (ret)
- goto fail;
- }
-
- ret = mfd_add_devices(&pdev->dev, 0, msic_other_devs,
- ARRAY_SIZE(msic_other_devs), NULL, 0, NULL);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- mfd_remove_devices(&pdev->dev);
-
- return ret;
-}
-
-static void intel_msic_remove_devices(struct intel_msic *msic)
-{
- struct platform_device *pdev = msic->pdev;
-
- mfd_remove_devices(&pdev->dev);
-}
-
-static int intel_msic_probe(struct platform_device *pdev)
-{
- struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct intel_msic *msic;
- struct resource *res;
- u8 id0, id1;
- int ret;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data passed\n");
- return -EINVAL;
- }
-
- /* First validate that we have an MSIC in place */
- ret = intel_scu_ipc_ioread8(INTEL_MSIC_ID0, &id0);
- if (ret) {
- dev_err(&pdev->dev, "failed to identify the MSIC chip (ID0)\n");
- return -ENXIO;
- }
-
- ret = intel_scu_ipc_ioread8(INTEL_MSIC_ID1, &id1);
- if (ret) {
- dev_err(&pdev->dev, "failed to identify the MSIC chip (ID1)\n");
- return -ENXIO;
- }
-
- if (MSIC_VENDOR(id0) != MSIC_VENDOR(id1)) {
- dev_err(&pdev->dev, "invalid vendor ID: %x, %x\n", id0, id1);
- return -ENXIO;
- }
-
- msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
- if (!msic)
- return -ENOMEM;
-
- msic->vendor = MSIC_VENDOR(id0);
- msic->version = MSIC_VERSION(id0);
- msic->pdev = pdev;
-
- /*
- * Map in the MSIC interrupt tree area in SRAM. This is exposed to
- * the clients via intel_msic_irq_read().
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(msic->irq_base))
- return PTR_ERR(msic->irq_base);
-
- platform_set_drvdata(pdev, msic);
-
- ret = intel_msic_init_devices(msic);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
- MSIC_MAJOR(msic->version), MSIC_MINOR(msic->version),
- msic->vendor);
-
- return 0;
-}
-
-static int intel_msic_remove(struct platform_device *pdev)
-{
- struct intel_msic *msic = platform_get_drvdata(pdev);
-
- intel_msic_remove_devices(msic);
-
- return 0;
-}
-
-static struct platform_driver intel_msic_driver = {
- .probe = intel_msic_probe,
- .remove = intel_msic_remove,
- .driver = {
- .name = "intel_msic",
- },
-};
-builtin_platform_driver(intel_msic_driver);
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index 761b4ef3a381..d1fc38a78acb 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -36,7 +36,6 @@
#define IQS62X_PROD_NUM 0x00
#define IQS62X_SYS_FLAGS 0x10
-#define IQS62X_SYS_FLAGS_IN_ATI BIT(2)
#define IQS620_HALL_FLAGS 0x16
#define IQS621_HALL_FLAGS 0x19
@@ -57,10 +56,10 @@
#define IQS620_TEMP_CAL_OFFS 0xC4
#define IQS62X_SYS_SETTINGS 0xD0
-#define IQS62X_SYS_SETTINGS_SOFT_RESET BIT(7)
#define IQS62X_SYS_SETTINGS_ACK_RESET BIT(6)
#define IQS62X_SYS_SETTINGS_EVENT_MODE BIT(5)
#define IQS62X_SYS_SETTINGS_CLK_DIV BIT(4)
+#define IQS62X_SYS_SETTINGS_COMM_ATI BIT(3)
#define IQS62X_SYS_SETTINGS_REDO_ATI BIT(1)
#define IQS62X_PWR_SETTINGS 0xD2
@@ -82,9 +81,8 @@
#define IQS62X_FW_REC_TYPE_MASK 3
#define IQS62X_FW_REC_TYPE_DATA 4
-#define IQS62X_ATI_POLL_SLEEP_US 10000
-#define IQS62X_ATI_POLL_TIMEOUT_US 500000
-#define IQS62X_ATI_STABLE_DELAY_MS 150
+#define IQS62X_ATI_STARTUP_MS 350
+#define IQS62X_FILT_SETTLE_MS 250
struct iqs62x_fw_rec {
u8 type;
@@ -112,9 +110,16 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
struct iqs62x_fw_blk *fw_blk;
unsigned int val;
int ret;
- u8 clk_div = 1;
list_for_each_entry(fw_blk, &iqs62x->fw_blk_head, list) {
+ /*
+ * In case ATI is in progress, wait for it to complete before
+ * lowering the core clock frequency.
+ */
+ if (fw_blk->addr == IQS62X_SYS_SETTINGS &&
+ *fw_blk->data & IQS62X_SYS_SETTINGS_CLK_DIV)
+ msleep(IQS62X_ATI_STARTUP_MS);
+
if (fw_blk->mask)
ret = regmap_update_bits(iqs62x->regmap, fw_blk->addr,
fw_blk->mask, *fw_blk->data);
@@ -135,7 +140,6 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
if (val & IQS620_PROX_SETTINGS_4_SAR_EN)
iqs62x->ui_sel = IQS62X_UI_SAR1;
-
fallthrough;
case IQS621_PROD_NUM:
@@ -183,28 +187,32 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
return ret;
}
- ret = regmap_read(iqs62x->regmap, IQS62X_SYS_SETTINGS, &val);
- if (ret)
- return ret;
-
- if (val & IQS62X_SYS_SETTINGS_CLK_DIV)
- clk_div = iqs62x->dev_desc->clk_div;
-
- ret = regmap_write(iqs62x->regmap, IQS62X_SYS_SETTINGS, val |
- IQS62X_SYS_SETTINGS_ACK_RESET |
- IQS62X_SYS_SETTINGS_EVENT_MODE |
- IQS62X_SYS_SETTINGS_REDO_ATI);
- if (ret)
- return ret;
-
- ret = regmap_read_poll_timeout(iqs62x->regmap, IQS62X_SYS_FLAGS, val,
- !(val & IQS62X_SYS_FLAGS_IN_ATI),
- IQS62X_ATI_POLL_SLEEP_US,
- IQS62X_ATI_POLL_TIMEOUT_US * clk_div);
+ /*
+ * Place the device in streaming mode at first so as not to miss the
+ * limited number of interrupts that would otherwise occur after ATI
+ * completes. The device is subsequently placed in event mode by the
+ * interrupt handler.
+ *
+ * In the meantime, mask interrupts during ATI to prevent the device
+ * from soliciting I2C traffic until the noise-sensitive ATI process
+ * is complete.
+ */
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_SYS_SETTINGS,
+ IQS62X_SYS_SETTINGS_ACK_RESET |
+ IQS62X_SYS_SETTINGS_EVENT_MODE |
+ IQS62X_SYS_SETTINGS_COMM_ATI |
+ IQS62X_SYS_SETTINGS_REDO_ATI,
+ IQS62X_SYS_SETTINGS_ACK_RESET |
+ IQS62X_SYS_SETTINGS_REDO_ATI);
if (ret)
return ret;
- msleep(IQS62X_ATI_STABLE_DELAY_MS * clk_div);
+ /*
+ * The following delay gives the device time to deassert its RDY output
+ * in case a communication window was open while the REDO_ATI field was
+ * written. This prevents an interrupt from being serviced prematurely.
+ */
+ usleep_range(5000, 5100);
return 0;
}
@@ -435,6 +443,11 @@ const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS] = {
.mask = BIT(7),
.val = BIT(7),
},
+ [IQS62X_EVENT_SYS_ATI] = {
+ .reg = IQS62X_EVENT_SYS,
+ .mask = BIT(2),
+ .val = BIT(2),
+ },
};
EXPORT_SYMBOL_GPL(iqs62x_events);
@@ -469,7 +482,6 @@ static irqreturn_t iqs62x_irq(int irq, void *context)
switch (event_reg) {
case IQS62X_EVENT_UI_LO:
event_data.ui_data = get_unaligned_le16(&event_map[i]);
-
fallthrough;
case IQS62X_EVENT_UI_HI:
@@ -490,7 +502,6 @@ static irqreturn_t iqs62x_irq(int irq, void *context)
case IQS62X_EVENT_HYST:
event_map[i] <<= iqs62x->dev_desc->hyst_shift;
-
fallthrough;
case IQS62X_EVENT_WHEEL:
@@ -525,19 +536,46 @@ static irqreturn_t iqs62x_irq(int irq, void *context)
"Failed to re-initialize device: %d\n", ret);
return IRQ_NONE;
}
+
+ iqs62x->event_cache |= BIT(IQS62X_EVENT_SYS_RESET);
+ reinit_completion(&iqs62x->ati_done);
+ } else if (event_flags & BIT(IQS62X_EVENT_SYS_ATI)) {
+ iqs62x->event_cache |= BIT(IQS62X_EVENT_SYS_ATI);
+ reinit_completion(&iqs62x->ati_done);
+ } else if (!completion_done(&iqs62x->ati_done)) {
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_SYS_SETTINGS,
+ IQS62X_SYS_SETTINGS_EVENT_MODE, 0xFF);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to enable event mode: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ msleep(IQS62X_FILT_SETTLE_MS);
+ complete_all(&iqs62x->ati_done);
}
- ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags,
- &event_data);
- if (ret & NOTIFY_STOP_MASK)
- return IRQ_NONE;
+ /*
+ * Reset and ATI events are not broadcast to the sub-device drivers
+ * until ATI has completed. Any other events that may have occurred
+ * during ATI are ignored.
+ */
+ if (completion_done(&iqs62x->ati_done)) {
+ event_flags |= iqs62x->event_cache;
+ ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags,
+ &event_data);
+ if (ret & NOTIFY_STOP_MASK)
+ return IRQ_NONE;
+
+ iqs62x->event_cache = 0;
+ }
/*
* Once the communication window is closed, a small delay is added to
* ensure the device's RDY output has been deasserted by the time the
* interrupt handler returns.
*/
- usleep_range(50, 100);
+ usleep_range(150, 200);
return IRQ_HANDLED;
}
@@ -571,6 +609,12 @@ static void iqs62x_firmware_load(const struct firmware *fw, void *context)
goto err_out;
}
+ if (!wait_for_completion_timeout(&iqs62x->ati_done,
+ msecs_to_jiffies(2000))) {
+ dev_err(&client->dev, "Failed to complete ATI\n");
+ goto err_out;
+ }
+
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
iqs62x->dev_desc->sub_devs,
iqs62x->dev_desc->num_sub_devs,
@@ -752,22 +796,17 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs620at",
.sub_devs = iqs620at_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs620at_sub_devs),
-
.prod_num = IQS620_PROD_NUM,
.sw_num = 0x08,
.cal_regs = iqs620at_cal_regs,
.num_cal_regs = ARRAY_SIZE(iqs620at_cal_regs),
-
.prox_mask = BIT(0),
.sar_mask = BIT(1) | BIT(7),
.hall_mask = BIT(2),
.hyst_mask = BIT(3),
.temp_mask = BIT(4),
-
.prox_settings = IQS620_PROX_SETTINGS_4,
.hall_flags = IQS620_HALL_FLAGS,
-
- .clk_div = 4,
.fw_name = "iqs620a.bin",
.event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
},
@@ -775,20 +814,15 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs620a",
.sub_devs = iqs620a_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs620a_sub_devs),
-
.prod_num = IQS620_PROD_NUM,
.sw_num = 0x08,
-
.prox_mask = BIT(0),
.sar_mask = BIT(1) | BIT(7),
.hall_mask = BIT(2),
.hyst_mask = BIT(3),
.temp_mask = BIT(4),
-
.prox_settings = IQS620_PROX_SETTINGS_4,
.hall_flags = IQS620_HALL_FLAGS,
-
- .clk_div = 4,
.fw_name = "iqs620a.bin",
.event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
},
@@ -796,23 +830,18 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs621",
.sub_devs = iqs621_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs621_sub_devs),
-
.prod_num = IQS621_PROD_NUM,
.sw_num = 0x09,
.cal_regs = iqs621_cal_regs,
.num_cal_regs = ARRAY_SIZE(iqs621_cal_regs),
-
.prox_mask = BIT(0),
.hall_mask = BIT(1),
.als_mask = BIT(2),
.hyst_mask = BIT(3),
.temp_mask = BIT(4),
-
.als_flags = IQS621_ALS_FLAGS,
.hall_flags = IQS621_HALL_FLAGS,
.hyst_shift = 5,
-
- .clk_div = 2,
.fw_name = "iqs621.bin",
.event_regs = &iqs621_event_regs[IQS62X_UI_PROX],
},
@@ -820,21 +849,16 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs622",
.sub_devs = iqs622_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs622_sub_devs),
-
.prod_num = IQS622_PROD_NUM,
.sw_num = 0x06,
-
.prox_mask = BIT(0),
.sar_mask = BIT(1),
.hall_mask = BIT(2),
.als_mask = BIT(3),
.ir_mask = BIT(4),
-
.prox_settings = IQS622_PROX_SETTINGS_4,
.als_flags = IQS622_ALS_FLAGS,
.hall_flags = IQS622_HALL_FLAGS,
-
- .clk_div = 2,
.fw_name = "iqs622.bin",
.event_regs = &iqs622_event_regs[IQS62X_UI_PROX],
},
@@ -842,14 +866,10 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs624",
.sub_devs = iqs624_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs624_sub_devs),
-
.prod_num = IQS624_PROD_NUM,
.sw_num = 0x0B,
-
.interval = IQS624_INTERVAL_NUM,
.interval_div = 3,
-
- .clk_div = 2,
.fw_name = "iqs624.bin",
.event_regs = &iqs624_event_regs[IQS62X_UI_PROX],
},
@@ -857,20 +877,16 @@ static const struct iqs62x_dev_desc iqs62x_devs[] = {
.dev_name = "iqs625",
.sub_devs = iqs625_sub_devs,
.num_sub_devs = ARRAY_SIZE(iqs625_sub_devs),
-
.prod_num = IQS625_PROD_NUM,
.sw_num = 0x0B,
-
.interval = IQS625_INTERVAL_NUM,
.interval_div = 10,
-
- .clk_div = 2,
.fw_name = "iqs625.bin",
.event_regs = &iqs625_event_regs[IQS62X_UI_PROX],
},
};
-static const struct regmap_config iqs62x_map_config = {
+static const struct regmap_config iqs62x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = IQS62X_MAX_REG,
@@ -894,9 +910,11 @@ static int iqs62x_probe(struct i2c_client *client)
BLOCKING_INIT_NOTIFIER_HEAD(&iqs62x->nh);
INIT_LIST_HEAD(&iqs62x->fw_blk_head);
+
+ init_completion(&iqs62x->ati_done);
init_completion(&iqs62x->fw_done);
- iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_map_config);
+ iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_regmap_config);
if (IS_ERR(iqs62x->regmap)) {
ret = PTR_ERR(iqs62x->regmap);
dev_err(&client->dev, "Failed to initialize register map: %d\n",
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 68d8f2b95287..55d3a6f97783 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -29,9 +29,9 @@
static const struct mfd_cell max8997_devs[] = {
{ .name = "max8997-pmic", },
{ .name = "max8997-rtc", },
- { .name = "max8997-battery", },
+ { .name = "max8997-battery", .of_compatible = "maxim,max8997-battery", },
{ .name = "max8997-haptic", },
- { .name = "max8997-muic", },
+ { .name = "max8997-muic", .of_compatible = "maxim,max8997-muic", },
{ .name = "max8997-led", .id = 1 },
{ .name = "max8997-led", .id = 2 },
};
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 98fa0af0e56e..4629dff187cd 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -214,8 +214,7 @@ static int mcp_sa11x0_probe(struct platform_device *dev)
* rate. This is the period for 3 64-bit frames. Always
* round this time up.
*/
- mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
- mcp->sclk_rate;
+ mcp->rw_timeout = DIV_ROUND_UP(64 * 3 * 1000000, mcp->sclk_rate);
ret = mcp_host_add(mcp, data->codec_pdata);
if (ret == 0)
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index 4661c1b29a72..480722acf706 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -292,17 +292,17 @@ static const struct resource mt6360_ldo_resources[] = {
};
static const struct mfd_cell mt6360_devs[] = {
- OF_MFD_CELL("mt6360_adc", mt6360_adc_resources,
+ MFD_CELL_OF("mt6360_adc", mt6360_adc_resources,
NULL, 0, 0, "mediatek,mt6360_adc"),
- OF_MFD_CELL("mt6360_chg", mt6360_chg_resources,
+ MFD_CELL_OF("mt6360_chg", mt6360_chg_resources,
NULL, 0, 0, "mediatek,mt6360_chg"),
- OF_MFD_CELL("mt6360_led", mt6360_led_resources,
+ MFD_CELL_OF("mt6360_led", mt6360_led_resources,
NULL, 0, 0, "mediatek,mt6360_led"),
- OF_MFD_CELL("mt6360_pmic", mt6360_pmic_resources,
+ MFD_CELL_OF("mt6360_pmic", mt6360_pmic_resources,
NULL, 0, 0, "mediatek,mt6360_pmic"),
- OF_MFD_CELL("mt6360_ldo", mt6360_ldo_resources,
+ MFD_CELL_OF("mt6360_ldo", mt6360_ldo_resources,
NULL, 0, 0, "mediatek,mt6360_ldo"),
- OF_MFD_CELL("mt6360_tcpc", NULL,
+ MFD_CELL_OF("mt6360_tcpc", NULL,
NULL, 0, 0, "mediatek,mt6360_tcpc"),
};
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 8a7cc0f86958..65b98f3fbd92 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
mutex_lock(&wm831x->auxadc_lock);
-
- list_del(&req->list);
ret = req->val;
out:
+ list_del(&req->list);
mutex_unlock(&wm831x->auxadc_lock);
kfree(req);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0d8099..f532c59bb59b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -50,14 +50,6 @@ config AD525X_DPOT_SPI
To compile this driver as a module, choose M here: the
module will be called ad525x_dpot-spi.
-config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on ARCH_AT91
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
- these blocks by different drivers despite processor differences.
-
config DUMMY_IRQ
tristate "Dummy IRQ handler"
help
@@ -112,19 +104,6 @@ config PHANTOM
If you choose to build module, its name will be phantom. If unsure,
say N here.
-config INTEL_MID_PTI
- tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST)
- help
- The PTI (Parallel Trace Interface) driver directs
- trace data routed from various parts in the system out
- through an Intel Penwell PTI port and out of the mobile
- device for analysis with a debugging tool (Lauterbach or Fido).
-
- You should select this driver if the target kernel is meant for
- an Intel Atom (non-netbook) mobile device containing a MIPI
- P1149.7 standard implementation.
-
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
@@ -478,6 +457,7 @@ source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
+source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e73330..99b6f15a3c70 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -8,9 +8,7 @@ obj-$(CONFIG_IBMVMC) += ibmvmc.o
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
-obj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
-obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm/
@@ -51,6 +49,7 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
+obj-$(CONFIG_BCM_VK) += bcm-vk/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
deleted file mode 100644
index 7de7840f613c..000000000000
--- a/drivers/misc/atmel_tclib.c
+++ /dev/null
@@ -1,200 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/of.h>
-#include <soc/at91/atmel_tcb.h>
-
-/*
- * This is a thin library to solve the problem of how to portably allocate
- * one of the TC blocks. For simplicity, it doesn't currently expect to
- * share individual timers between different drivers.
- */
-
-#if defined(CONFIG_AVR32)
-/* AVR32 has these divide PBB */
-const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, };
-EXPORT_SYMBOL(atmel_tc_divisors);
-
-#elif defined(CONFIG_ARCH_AT91)
-/* AT91 has these divide MCK */
-const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
-EXPORT_SYMBOL(atmel_tc_divisors);
-
-#endif
-
-static DEFINE_SPINLOCK(tc_list_lock);
-static LIST_HEAD(tc_list);
-
-/**
- * atmel_tc_alloc - allocate a specified TC block
- * @block: which block to allocate
- *
- * Caller allocates a block. If it is available, a pointer to a
- * pre-initialized struct atmel_tc is returned. The caller can access
- * the registers directly through the "regs" field.
- */
-struct atmel_tc *atmel_tc_alloc(unsigned block)
-{
- struct atmel_tc *tc;
- struct platform_device *pdev = NULL;
-
- spin_lock(&tc_list_lock);
- list_for_each_entry(tc, &tc_list, node) {
- if (tc->allocated)
- continue;
-
- if ((tc->pdev->dev.of_node && tc->id == block) ||
- (tc->pdev->id == block)) {
- pdev = tc->pdev;
- tc->allocated = true;
- break;
- }
- }
- spin_unlock(&tc_list_lock);
-
- return pdev ? tc : NULL;
-}
-EXPORT_SYMBOL_GPL(atmel_tc_alloc);
-
-/**
- * atmel_tc_free - release a specified TC block
- * @tc: Timer/counter block that was returned by atmel_tc_alloc()
- *
- * This reverses the effect of atmel_tc_alloc(), invalidating the resource
- * returned by that routine and making the TC available to other drivers.
- */
-void atmel_tc_free(struct atmel_tc *tc)
-{
- spin_lock(&tc_list_lock);
- if (tc->allocated)
- tc->allocated = false;
- spin_unlock(&tc_list_lock);
-}
-EXPORT_SYMBOL_GPL(atmel_tc_free);
-
-#if defined(CONFIG_OF)
-static struct atmel_tcb_config tcb_rm9200_config = {
- .counter_width = 16,
-};
-
-static struct atmel_tcb_config tcb_sam9x5_config = {
- .counter_width = 32,
-};
-
-static const struct of_device_id atmel_tcb_dt_ids[] = {
- {
- .compatible = "atmel,at91rm9200-tcb",
- .data = &tcb_rm9200_config,
- }, {
- .compatible = "atmel,at91sam9x5-tcb",
- .data = &tcb_sam9x5_config,
- }, {
- /* sentinel */
- }
-};
-
-MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids);
-#endif
-
-static int __init tc_probe(struct platform_device *pdev)
-{
- struct atmel_tc *tc;
- struct clk *clk;
- int irq;
- unsigned int i;
-
- if (of_get_child_count(pdev->dev.of_node))
- return -EBUSY;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
-
- tc = devm_kzalloc(&pdev->dev, sizeof(struct atmel_tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
-
- tc->pdev = pdev;
-
- clk = devm_clk_get(&pdev->dev, "t0_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- tc->slow_clk = devm_clk_get(&pdev->dev, "slow_clk");
- if (IS_ERR(tc->slow_clk))
- return PTR_ERR(tc->slow_clk);
-
- tc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tc->regs))
- return PTR_ERR(tc->regs);
-
- /* Now take SoC information if available */
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node);
- if (match)
- tc->tcb_config = match->data;
-
- tc->id = of_alias_get_id(tc->pdev->dev.of_node, "tcb");
- } else {
- tc->id = pdev->id;
- }
-
- tc->clk[0] = clk;
- tc->clk[1] = devm_clk_get(&pdev->dev, "t1_clk");
- if (IS_ERR(tc->clk[1]))
- tc->clk[1] = clk;
- tc->clk[2] = devm_clk_get(&pdev->dev, "t2_clk");
- if (IS_ERR(tc->clk[2]))
- tc->clk[2] = clk;
-
- tc->irq[0] = irq;
- tc->irq[1] = platform_get_irq(pdev, 1);
- if (tc->irq[1] < 0)
- tc->irq[1] = irq;
- tc->irq[2] = platform_get_irq(pdev, 2);
- if (tc->irq[2] < 0)
- tc->irq[2] = irq;
-
- for (i = 0; i < 3; i++)
- writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR));
-
- spin_lock(&tc_list_lock);
- list_add_tail(&tc->node, &tc_list);
- spin_unlock(&tc_list_lock);
-
- platform_set_drvdata(pdev, tc);
-
- return 0;
-}
-
-static void tc_shutdown(struct platform_device *pdev)
-{
- int i;
- struct atmel_tc *tc = platform_get_drvdata(pdev);
-
- for (i = 0; i < 3; i++)
- writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR));
-}
-
-static struct platform_driver tc_driver = {
- .driver = {
- .name = "atmel_tcb",
- .of_match_table = of_match_ptr(atmel_tcb_dt_ids),
- },
- .shutdown = tc_shutdown,
-};
-
-static int __init tc_init(void)
-{
- return platform_driver_probe(&tc_driver, tc_probe);
-}
-arch_initcall(tc_init);
diff --git a/drivers/misc/bcm-vk/Kconfig b/drivers/misc/bcm-vk/Kconfig
new file mode 100644
index 000000000000..68a972772b99
--- /dev/null
+++ b/drivers/misc/bcm-vk/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Broadcom VK device
+#
+config BCM_VK
+ tristate "Support for Broadcom VK Accelerators"
+ depends on PCI_MSI
+ help
+ Select this option to enable support for Broadcom
+ VK Accelerators. VK is used for performing
+ multiple specific offload processing tasks in parallel.
+ Such offload tasks assist in such operations as video
+ transcoding, compression, and crypto tasks.
+ This driver enables userspace programs to access these
+ accelerators via /dev/bcm-vk.N devices.
+
+ If unsure, say N.
+
+config BCM_VK_TTY
+ bool "Enable tty ports on a Broadcom VK Accelerator device"
+ depends on TTY
+ depends on BCM_VK
+ help
+ Select this option to enable tty support to allow console
+ access to Broadcom VK Accelerator cards from host.
+
+ Device node will in the form /dev/bcm-vk.x_ttyVKy where:
+ x is the instance of the VK card
+ y is the tty device number on the VK card.
diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile
new file mode 100644
index 000000000000..1df2ebe851ca
--- /dev/null
+++ b/drivers/misc/bcm-vk/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Broadcom VK driver
+#
+
+obj-$(CONFIG_BCM_VK) += bcm_vk.o
+bcm_vk-objs := \
+ bcm_vk_dev.o \
+ bcm_vk_msg.o \
+ bcm_vk_sg.o
+
+bcm_vk-$(CONFIG_BCM_VK_TTY) += bcm_vk_tty.o
diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h
new file mode 100644
index 000000000000..a1338f375589
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk.h
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_H
+#define BCM_VK_H
+
+#include <linux/atomic.h>
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched/signal.h>
+#include <linux/tty.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk_msg.h"
+
+#define DRV_MODULE_NAME "bcm-vk"
+
+/*
+ * Load Image is completed in two stages:
+ *
+ * 1) When the VK device boot-up, M7 CPU runs and executes the BootROM.
+ * The Secure Boot Loader (SBL) as part of the BootROM will run
+ * to open up ITCM for host to push BOOT1 image.
+ * SBL will authenticate the image before jumping to BOOT1 image.
+ *
+ * 2) Because BOOT1 image is a secured image, we also called it the
+ * Secure Boot Image (SBI). At second stage, SBI will initialize DDR
+ * and wait for host to push BOOT2 image to DDR.
+ * SBI will authenticate the image before jumping to BOOT2 image.
+ *
+ */
+/* Location of registers of interest in BAR0 */
+
+/* Request register for Secure Boot Loader (SBL) download */
+#define BAR_CODEPUSH_SBL 0x400
+/* Start of ITCM */
+#define CODEPUSH_BOOT1_ENTRY 0x00400000
+#define CODEPUSH_MASK 0xfffff000
+#define CODEPUSH_BOOTSTART BIT(0)
+
+/* Boot Status register */
+#define BAR_BOOT_STATUS 0x404
+
+#define SRAM_OPEN BIT(16)
+#define DDR_OPEN BIT(17)
+
+/* Firmware loader progress status definitions */
+#define FW_LOADER_ACK_SEND_MORE_DATA BIT(18)
+#define FW_LOADER_ACK_IN_PROGRESS BIT(19)
+#define FW_LOADER_ACK_RCVD_ALL_DATA BIT(20)
+
+/* Boot1/2 is running in standalone mode */
+#define BOOT_STDALONE_RUNNING BIT(21)
+
+/* definitions for boot status register */
+#define BOOT_STATE_MASK (0xffffffff & \
+ ~(FW_LOADER_ACK_SEND_MORE_DATA | \
+ FW_LOADER_ACK_IN_PROGRESS | \
+ BOOT_STDALONE_RUNNING))
+
+#define BOOT_ERR_SHIFT 4
+#define BOOT_ERR_MASK (0xf << BOOT_ERR_SHIFT)
+#define BOOT_PROG_MASK 0xf
+
+#define BROM_STATUS_NOT_RUN 0x2
+#define BROM_NOT_RUN (SRAM_OPEN | BROM_STATUS_NOT_RUN)
+#define BROM_STATUS_COMPLETE 0x6
+#define BROM_RUNNING (SRAM_OPEN | BROM_STATUS_COMPLETE)
+#define BOOT1_STATUS_COMPLETE 0x6
+#define BOOT1_RUNNING (DDR_OPEN | BOOT1_STATUS_COMPLETE)
+#define BOOT2_STATUS_COMPLETE 0x6
+#define BOOT2_RUNNING (FW_LOADER_ACK_RCVD_ALL_DATA | \
+ BOOT2_STATUS_COMPLETE)
+
+/* Boot request for Secure Boot Image (SBI) */
+#define BAR_CODEPUSH_SBI 0x408
+/* 64M mapped to BAR2 */
+#define CODEPUSH_BOOT2_ENTRY 0x60000000
+
+#define BAR_CARD_STATUS 0x410
+/* CARD_STATUS definitions */
+#define CARD_STATUS_TTYVK0_READY BIT(0)
+#define CARD_STATUS_TTYVK1_READY BIT(1)
+
+#define BAR_BOOT1_STDALONE_PROGRESS 0x420
+#define BOOT1_STDALONE_SUCCESS (BIT(13) | BIT(14))
+#define BOOT1_STDALONE_PROGRESS_MASK BOOT1_STDALONE_SUCCESS
+
+#define BAR_METADATA_VERSION 0x440
+#define BAR_OS_UPTIME 0x444
+#define BAR_CHIP_ID 0x448
+#define MAJOR_SOC_REV(_chip_id) (((_chip_id) >> 20) & 0xf)
+
+#define BAR_CARD_TEMPERATURE 0x45c
+/* defines for all temperature sensor */
+#define BCM_VK_TEMP_FIELD_MASK 0xff
+#define BCM_VK_CPU_TEMP_SHIFT 0
+#define BCM_VK_DDR0_TEMP_SHIFT 8
+#define BCM_VK_DDR1_TEMP_SHIFT 16
+
+#define BAR_CARD_VOLTAGE 0x460
+/* defines for voltage rail conversion */
+#define BCM_VK_VOLT_RAIL_MASK 0xffff
+#define BCM_VK_3P3_VOLT_REG_SHIFT 16
+
+#define BAR_CARD_ERR_LOG 0x464
+/* Error log register bit definition - register for error alerts */
+#define ERR_LOG_UECC BIT(0)
+#define ERR_LOG_SSIM_BUSY BIT(1)
+#define ERR_LOG_AFBC_BUSY BIT(2)
+#define ERR_LOG_HIGH_TEMP_ERR BIT(3)
+#define ERR_LOG_WDOG_TIMEOUT BIT(4)
+#define ERR_LOG_SYS_FAULT BIT(5)
+#define ERR_LOG_RAMDUMP BIT(6)
+#define ERR_LOG_COP_WDOG_TIMEOUT BIT(7)
+/* warnings */
+#define ERR_LOG_MEM_ALLOC_FAIL BIT(8)
+#define ERR_LOG_LOW_TEMP_WARN BIT(9)
+#define ERR_LOG_ECC BIT(10)
+#define ERR_LOG_IPC_DWN BIT(11)
+
+/* Alert bit definitions detectd on host */
+#define ERR_LOG_HOST_INTF_V_FAIL BIT(13)
+#define ERR_LOG_HOST_HB_FAIL BIT(14)
+#define ERR_LOG_HOST_PCIE_DWN BIT(15)
+
+#define BAR_CARD_ERR_MEM 0x468
+/* defines for mem err, all fields have same width */
+#define BCM_VK_MEM_ERR_FIELD_MASK 0xff
+#define BCM_VK_ECC_MEM_ERR_SHIFT 0
+#define BCM_VK_UECC_MEM_ERR_SHIFT 8
+/* threshold of event occurrence and logs start to come out */
+#define BCM_VK_ECC_THRESHOLD 10
+#define BCM_VK_UECC_THRESHOLD 1
+
+#define BAR_CARD_PWR_AND_THRE 0x46c
+/* defines for power and temp threshold, all fields have same width */
+#define BCM_VK_PWR_AND_THRE_FIELD_MASK 0xff
+#define BCM_VK_LOW_TEMP_THRE_SHIFT 0
+#define BCM_VK_HIGH_TEMP_THRE_SHIFT 8
+#define BCM_VK_PWR_STATE_SHIFT 16
+
+#define BAR_CARD_STATIC_INFO 0x470
+
+#define BAR_INTF_VER 0x47c
+#define BAR_INTF_VER_MAJOR_SHIFT 16
+#define BAR_INTF_VER_MASK 0xffff
+/*
+ * major and minor semantic version numbers supported
+ * Please update as required on interface changes
+ */
+#define SEMANTIC_MAJOR 1
+#define SEMANTIC_MINOR 0
+
+/*
+ * first door bell reg, ie for queue = 0. Only need the first one, as
+ * we will use the queue number to derive the others
+ */
+#define VK_BAR0_REGSEG_DB_BASE 0x484
+#define VK_BAR0_REGSEG_DB_REG_GAP 8 /*
+ * DB register gap,
+ * DB1 at 0x48c and DB2 at 0x494
+ */
+
+/* reset register and specific values */
+#define VK_BAR0_RESET_DB_NUM 3
+#define VK_BAR0_RESET_DB_SOFT 0xffffffff
+#define VK_BAR0_RESET_DB_HARD 0xfffffffd
+#define VK_BAR0_RESET_RAMPDUMP 0xa0000000
+
+#define VK_BAR0_Q_DB_BASE(q_num) (VK_BAR0_REGSEG_DB_BASE + \
+ ((q_num) * VK_BAR0_REGSEG_DB_REG_GAP))
+#define VK_BAR0_RESET_DB_BASE (VK_BAR0_REGSEG_DB_BASE + \
+ (VK_BAR0_RESET_DB_NUM * VK_BAR0_REGSEG_DB_REG_GAP))
+
+#define BAR_BOOTSRC_SELECT 0xc78
+/* BOOTSRC definitions */
+#define BOOTSRC_SOFT_ENABLE BIT(14)
+
+/* Card OS Firmware version size */
+#define BAR_FIRMWARE_TAG_SIZE 50
+#define FIRMWARE_STATUS_PRE_INIT_DONE 0x1f
+
+/* VK MSG_ID defines */
+#define VK_MSG_ID_BITMAP_SIZE 4096
+#define VK_MSG_ID_BITMAP_MASK (VK_MSG_ID_BITMAP_SIZE - 1)
+#define VK_MSG_ID_OVERFLOW 0xffff
+
+/*
+ * BAR1
+ */
+
+/* BAR1 message q definition */
+
+/* indicate if msgq ctrl in BAR1 is populated */
+#define VK_BAR1_MSGQ_DEF_RDY 0x60c0
+/* ready marker value for the above location, normal boot2 */
+#define VK_BAR1_MSGQ_RDY_MARKER 0xbeefcafe
+/* ready marker value for the above location, normal boot2 */
+#define VK_BAR1_DIAG_RDY_MARKER 0xdeadcafe
+/* number of msgqs in BAR1 */
+#define VK_BAR1_MSGQ_NR 0x60c4
+/* BAR1 queue control structure offset */
+#define VK_BAR1_MSGQ_CTRL_OFF 0x60c8
+
+/* BAR1 ucode and boot1 version tag */
+#define VK_BAR1_UCODE_VER_TAG 0x6170
+#define VK_BAR1_BOOT1_VER_TAG 0x61b0
+#define VK_BAR1_VER_TAG_SIZE 64
+
+/* Memory to hold the DMA buffer memory address allocated for boot2 download */
+#define VK_BAR1_DMA_BUF_OFF_HI 0x61e0
+#define VK_BAR1_DMA_BUF_OFF_LO (VK_BAR1_DMA_BUF_OFF_HI + 4)
+#define VK_BAR1_DMA_BUF_SZ (VK_BAR1_DMA_BUF_OFF_HI + 8)
+
+/* Scratch memory allocated on host for VK */
+#define VK_BAR1_SCRATCH_OFF_HI 0x61f0
+#define VK_BAR1_SCRATCH_OFF_LO (VK_BAR1_SCRATCH_OFF_HI + 4)
+#define VK_BAR1_SCRATCH_SZ_ADDR (VK_BAR1_SCRATCH_OFF_HI + 8)
+#define VK_BAR1_SCRATCH_DEF_NR_PAGES 32
+
+/* BAR1 DAUTH info */
+#define VK_BAR1_DAUTH_BASE_ADDR 0x6200
+#define VK_BAR1_DAUTH_STORE_SIZE 0x48
+#define VK_BAR1_DAUTH_VALID_SIZE 0x8
+#define VK_BAR1_DAUTH_MAX 4
+#define VK_BAR1_DAUTH_STORE_ADDR(x) \
+ (VK_BAR1_DAUTH_BASE_ADDR + \
+ (x) * (VK_BAR1_DAUTH_STORE_SIZE + VK_BAR1_DAUTH_VALID_SIZE))
+#define VK_BAR1_DAUTH_VALID_ADDR(x) \
+ (VK_BAR1_DAUTH_STORE_ADDR(x) + VK_BAR1_DAUTH_STORE_SIZE)
+
+/* BAR1 SOTP AUTH and REVID info */
+#define VK_BAR1_SOTP_REVID_BASE_ADDR 0x6340
+#define VK_BAR1_SOTP_REVID_SIZE 0x10
+#define VK_BAR1_SOTP_REVID_MAX 2
+#define VK_BAR1_SOTP_REVID_ADDR(x) \
+ (VK_BAR1_SOTP_REVID_BASE_ADDR + (x) * VK_BAR1_SOTP_REVID_SIZE)
+
+/* VK device supports a maximum of 3 bars */
+#define MAX_BAR 3
+
+/* default number of msg blk for inband SGL */
+#define BCM_VK_DEF_IB_SGL_BLK_LEN 16
+#define BCM_VK_IB_SGL_BLK_MAX 24
+
+enum pci_barno {
+ BAR_0 = 0,
+ BAR_1,
+ BAR_2
+};
+
+#ifdef CONFIG_BCM_VK_TTY
+#define BCM_VK_NUM_TTY 2
+#else
+#define BCM_VK_NUM_TTY 0
+#endif
+
+struct bcm_vk_tty {
+ struct tty_port port;
+ u32 to_offset; /* bar offset to use */
+ u32 to_size; /* to VK buffer size */
+ u32 wr; /* write offset shadow */
+ u32 from_offset; /* bar offset to use */
+ u32 from_size; /* from VK buffer size */
+ u32 rd; /* read offset shadow */
+ pid_t pid;
+ bool irq_enabled;
+ bool is_opened; /* tracks tty open/close */
+};
+
+/* VK device max power state, supports 3, full, reduced and low */
+#define MAX_OPP 3
+#define MAX_CARD_INFO_TAG_SIZE 64
+
+struct bcm_vk_card_info {
+ u32 version;
+ char os_tag[MAX_CARD_INFO_TAG_SIZE];
+ char cmpt_tag[MAX_CARD_INFO_TAG_SIZE];
+ u32 cpu_freq_mhz;
+ u32 cpu_scale[MAX_OPP];
+ u32 ddr_freq_mhz;
+ u32 ddr_size_MB;
+ u32 video_core_freq_mhz;
+};
+
+/* DAUTH related info */
+struct bcm_vk_dauth_key {
+ char store[VK_BAR1_DAUTH_STORE_SIZE];
+ char valid[VK_BAR1_DAUTH_VALID_SIZE];
+};
+
+struct bcm_vk_dauth_info {
+ struct bcm_vk_dauth_key keys[VK_BAR1_DAUTH_MAX];
+};
+
+/*
+ * Control structure of logging messages from the card. This
+ * buffer is for logmsg that comes from vk
+ */
+struct bcm_vk_peer_log {
+ u32 rd_idx;
+ u32 wr_idx;
+ u32 buf_size;
+ u32 mask;
+ char data[0];
+};
+
+/* max buf size allowed */
+#define BCM_VK_PEER_LOG_BUF_MAX SZ_16K
+/* max size per line of peer log */
+#define BCM_VK_PEER_LOG_LINE_MAX 256
+
+/*
+ * single entry for processing type + utilization
+ */
+#define BCM_VK_PROC_TYPE_TAG_LEN 8
+struct bcm_vk_proc_mon_entry_t {
+ char tag[BCM_VK_PROC_TYPE_TAG_LEN];
+ u32 used;
+ u32 max; /**< max capacity */
+};
+
+/**
+ * Structure for run time utilization
+ */
+#define BCM_VK_PROC_MON_MAX 8 /* max entries supported */
+struct bcm_vk_proc_mon_info {
+ u32 num; /**< no of entries */
+ u32 entry_size; /**< per entry size */
+ struct bcm_vk_proc_mon_entry_t entries[BCM_VK_PROC_MON_MAX];
+};
+
+struct bcm_vk_hb_ctrl {
+ struct timer_list timer;
+ u32 last_uptime;
+ u32 lost_cnt;
+};
+
+struct bcm_vk_alert {
+ u16 flags;
+ u16 notfs;
+};
+
+/* some alert counters that the driver will keep track */
+struct bcm_vk_alert_cnts {
+ u16 ecc;
+ u16 uecc;
+};
+
+struct bcm_vk {
+ struct pci_dev *pdev;
+ void __iomem *bar[MAX_BAR];
+ int num_irqs;
+
+ struct bcm_vk_card_info card_info;
+ struct bcm_vk_proc_mon_info proc_mon_info;
+ struct bcm_vk_dauth_info dauth_info;
+
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
+ struct miscdevice miscdev;
+ int devid; /* dev id allocated */
+
+#ifdef CONFIG_BCM_VK_TTY
+ struct tty_driver *tty_drv;
+ struct timer_list serial_timer;
+ struct bcm_vk_tty tty[BCM_VK_NUM_TTY];
+ struct workqueue_struct *tty_wq_thread;
+ struct work_struct tty_wq_work;
+#endif
+
+ /* Reference-counting to handle file operations */
+ struct kref kref;
+
+ spinlock_t msg_id_lock; /* Spinlock for msg_id */
+ u16 msg_id;
+ DECLARE_BITMAP(bmap, VK_MSG_ID_BITMAP_SIZE);
+ spinlock_t ctx_lock; /* Spinlock for component context */
+ struct bcm_vk_ctx ctx[VK_CMPT_CTX_MAX];
+ struct bcm_vk_ht_entry pid_ht[VK_PID_HT_SZ];
+ pid_t reset_pid; /* process that issue reset */
+
+ atomic_t msgq_inited; /* indicate if info has been synced with vk */
+ struct bcm_vk_msg_chan to_v_msg_chan;
+ struct bcm_vk_msg_chan to_h_msg_chan;
+
+ struct workqueue_struct *wq_thread;
+ struct work_struct wq_work; /* work queue for deferred job */
+ unsigned long wq_offload[1]; /* various flags on wq requested */
+ void *tdma_vaddr; /* test dma segment virtual addr */
+ dma_addr_t tdma_addr; /* test dma segment bus addr */
+
+ struct notifier_block panic_nb;
+ u32 ib_sgl_size; /* size allocated for inband sgl insertion */
+
+ /* heart beat mechanism control structure */
+ struct bcm_vk_hb_ctrl hb_ctrl;
+ /* house-keeping variable of error logs */
+ spinlock_t host_alert_lock; /* protection to access host_alert struct */
+ struct bcm_vk_alert host_alert;
+ struct bcm_vk_alert peer_alert; /* bits set by the card */
+ struct bcm_vk_alert_cnts alert_cnts;
+
+ /* offset of the peer log control in BAR2 */
+ u32 peerlog_off;
+ struct bcm_vk_peer_log peerlog_info; /* record of peer log info */
+ /* offset of processing monitoring info in BAR2 */
+ u32 proc_mon_off;
+};
+
+/* wq offload work items bits definitions */
+enum bcm_vk_wq_offload_flags {
+ BCM_VK_WQ_DWNLD_PEND = 0,
+ BCM_VK_WQ_DWNLD_AUTO = 1,
+ BCM_VK_WQ_NOTF_PEND = 2,
+};
+
+/* a macro to get an individual field with mask and shift */
+#define BCM_VK_EXTRACT_FIELD(_field, _reg, _mask, _shift) \
+ (_field = (((_reg) >> (_shift)) & (_mask)))
+
+struct bcm_vk_entry {
+ const u32 mask;
+ const u32 exp_val;
+ const char *str;
+};
+
+/* alerts that could be generated from peer */
+#define BCM_VK_PEER_ERR_NUM 12
+extern struct bcm_vk_entry const bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM];
+/* alerts detected by the host */
+#define BCM_VK_HOST_ERR_NUM 3
+extern struct bcm_vk_entry const bcm_vk_host_err[BCM_VK_HOST_ERR_NUM];
+
+/*
+ * check if PCIe interface is down on read. Use it when it is
+ * certain that _val should never be all ones.
+ */
+#define BCM_VK_INTF_IS_DOWN(val) ((val) == 0xffffffff)
+
+static inline u32 vkread32(struct bcm_vk *vk, enum pci_barno bar, u64 offset)
+{
+ return readl(vk->bar[bar] + offset);
+}
+
+static inline void vkwrite32(struct bcm_vk *vk,
+ u32 value,
+ enum pci_barno bar,
+ u64 offset)
+{
+ writel(value, vk->bar[bar] + offset);
+}
+
+static inline u8 vkread8(struct bcm_vk *vk, enum pci_barno bar, u64 offset)
+{
+ return readb(vk->bar[bar] + offset);
+}
+
+static inline void vkwrite8(struct bcm_vk *vk,
+ u8 value,
+ enum pci_barno bar,
+ u64 offset)
+{
+ writeb(value, vk->bar[bar] + offset);
+}
+
+static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk)
+{
+ u32 rdy_marker = 0;
+ u32 fw_status;
+
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+
+ if ((fw_status & VK_FWSTS_READY) == VK_FWSTS_READY)
+ rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+
+ return (rdy_marker == VK_BAR1_MSGQ_RDY_MARKER);
+}
+
+int bcm_vk_open(struct inode *inode, struct file *p_file);
+ssize_t bcm_vk_read(struct file *p_file, char __user *buf, size_t count,
+ loff_t *f_pos);
+ssize_t bcm_vk_write(struct file *p_file, const char __user *buf,
+ size_t count, loff_t *f_pos);
+__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait);
+int bcm_vk_release(struct inode *inode, struct file *p_file);
+void bcm_vk_release_data(struct kref *kref);
+irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id);
+irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id);
+irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id);
+int bcm_vk_msg_init(struct bcm_vk *vk);
+void bcm_vk_msg_remove(struct bcm_vk *vk);
+void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk);
+int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync);
+void bcm_vk_blk_drv_access(struct bcm_vk *vk);
+s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk);
+int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
+ const pid_t pid, const u32 q_num);
+void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val);
+int bcm_vk_auto_load_all_images(struct bcm_vk *vk);
+void bcm_vk_hb_init(struct bcm_vk *vk);
+void bcm_vk_hb_deinit(struct bcm_vk *vk);
+void bcm_vk_handle_notf(struct bcm_vk *vk);
+bool bcm_vk_drv_access_ok(struct bcm_vk *vk);
+void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask);
+
+#ifdef CONFIG_BCM_VK_TTY
+int bcm_vk_tty_init(struct bcm_vk *vk, char *name);
+void bcm_vk_tty_exit(struct bcm_vk *vk);
+void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk);
+void bcm_vk_tty_wq_exit(struct bcm_vk *vk);
+
+static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index)
+{
+ vk->tty[index].irq_enabled = true;
+}
+#else
+static inline int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
+{
+ return 0;
+}
+
+static inline void bcm_vk_tty_exit(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_wq_exit(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index)
+{
+}
+#endif /* CONFIG_BCM_VK_TTY */
+
+#endif
diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c
new file mode 100644
index 000000000000..6bfea3210389
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_dev.c
@@ -0,0 +1,1652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk.h"
+
+#define PCI_DEVICE_ID_VALKYRIE 0x5e87
+#define PCI_DEVICE_ID_VIPER 0x5e88
+
+static DEFINE_IDA(bcm_vk_ida);
+
+enum soc_idx {
+ VALKYRIE_A0 = 0,
+ VALKYRIE_B0,
+ VIPER,
+ VK_IDX_INVALID
+};
+
+enum img_idx {
+ IMG_PRI = 0,
+ IMG_SEC,
+ IMG_PER_TYPE_MAX
+};
+
+struct load_image_entry {
+ const u32 image_type;
+ const char *image_name[IMG_PER_TYPE_MAX];
+};
+
+#define NUM_BOOT_STAGES 2
+/* default firmware images names */
+static const struct load_image_entry image_tab[][NUM_BOOT_STAGES] = {
+ [VALKYRIE_A0] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}},
+ {VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}}
+ },
+ [VALKYRIE_B0] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}},
+ {VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}}
+ },
+
+ [VIPER] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vp-boot1.bin", ""}},
+ {VK_IMAGE_TYPE_BOOT2, {"vp-boot2.bin", ""}}
+ },
+};
+
+/* Location of memory base addresses of interest in BAR1 */
+/* Load Boot1 to start of ITCM */
+#define BAR1_CODEPUSH_BASE_BOOT1 0x100000
+
+/* Allow minimum 1s for Load Image timeout responses */
+#define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC)
+
+/* Image startup timeouts */
+#define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC)
+#define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC)
+
+/* 1ms wait for checking the transfer complete status */
+#define TXFR_COMPLETE_TIMEOUT_MS 1
+
+/* MSIX usages */
+#define VK_MSIX_MSGQ_MAX 3
+#define VK_MSIX_NOTF_MAX 1
+#define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY
+#define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \
+ VK_MSIX_TTY_MAX)
+#define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX)
+
+/* Number of bits set in DMA mask*/
+#define BCM_VK_DMA_BITS 64
+
+/* Ucode boot wait time */
+#define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC)
+/* 50% margin */
+#define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1)
+
+/* deinit time for the card os after receiving doorbell */
+#define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC)
+
+/*
+ * module parameters
+ */
+static bool auto_load = true;
+module_param(auto_load, bool, 0444);
+MODULE_PARM_DESC(auto_load,
+ "Load images automatically at PCIe probe time.\n");
+static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES;
+module_param(nr_scratch_pages, uint, 0444);
+MODULE_PARM_DESC(nr_scratch_pages,
+ "Number of pre allocated DMAable coherent pages.\n");
+static uint nr_ib_sgl_blk = BCM_VK_DEF_IB_SGL_BLK_LEN;
+module_param(nr_ib_sgl_blk, uint, 0444);
+MODULE_PARM_DESC(nr_ib_sgl_blk,
+ "Number of in-band msg blks for short SGL.\n");
+
+/*
+ * alerts that could be generated from peer
+ */
+const struct bcm_vk_entry bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM] = {
+ {ERR_LOG_UECC, ERR_LOG_UECC, "uecc"},
+ {ERR_LOG_SSIM_BUSY, ERR_LOG_SSIM_BUSY, "ssim_busy"},
+ {ERR_LOG_AFBC_BUSY, ERR_LOG_AFBC_BUSY, "afbc_busy"},
+ {ERR_LOG_HIGH_TEMP_ERR, ERR_LOG_HIGH_TEMP_ERR, "high_temp"},
+ {ERR_LOG_WDOG_TIMEOUT, ERR_LOG_WDOG_TIMEOUT, "wdog_timeout"},
+ {ERR_LOG_SYS_FAULT, ERR_LOG_SYS_FAULT, "sys_fault"},
+ {ERR_LOG_RAMDUMP, ERR_LOG_RAMDUMP, "ramdump"},
+ {ERR_LOG_COP_WDOG_TIMEOUT, ERR_LOG_COP_WDOG_TIMEOUT,
+ "cop_wdog_timeout"},
+ {ERR_LOG_MEM_ALLOC_FAIL, ERR_LOG_MEM_ALLOC_FAIL, "malloc_fail warn"},
+ {ERR_LOG_LOW_TEMP_WARN, ERR_LOG_LOW_TEMP_WARN, "low_temp warn"},
+ {ERR_LOG_ECC, ERR_LOG_ECC, "ecc"},
+ {ERR_LOG_IPC_DWN, ERR_LOG_IPC_DWN, "ipc_down"},
+};
+
+/* alerts detected by the host */
+const struct bcm_vk_entry bcm_vk_host_err[BCM_VK_HOST_ERR_NUM] = {
+ {ERR_LOG_HOST_PCIE_DWN, ERR_LOG_HOST_PCIE_DWN, "PCIe_down"},
+ {ERR_LOG_HOST_HB_FAIL, ERR_LOG_HOST_HB_FAIL, "hb_fail"},
+ {ERR_LOG_HOST_INTF_V_FAIL, ERR_LOG_HOST_INTF_V_FAIL, "intf_ver_fail"},
+};
+
+irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ if (!bcm_vk_drv_access_ok(vk)) {
+ dev_err(&vk->pdev->dev,
+ "Interrupt %d received when msgq not inited\n", irq);
+ goto skip_schedule_work;
+ }
+
+ /* if notification is not pending, set bit and schedule work */
+ if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+skip_schedule_work:
+ return IRQ_HANDLED;
+}
+
+static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 reg;
+ u16 major, minor;
+ int ret = 0;
+
+ /* read interface register */
+ reg = vkread32(vk, BAR_0, BAR_INTF_VER);
+ major = (reg >> BAR_INTF_VER_MAJOR_SHIFT) & BAR_INTF_VER_MASK;
+ minor = reg & BAR_INTF_VER_MASK;
+
+ /*
+ * if major number is 0, it is pre-release and it would be allowed
+ * to continue, else, check versions accordingly
+ */
+ if (!major) {
+ dev_warn(dev, "Pre-release major.minor=%d.%d - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ } else if (major != SEMANTIC_MAJOR) {
+ dev_err(dev,
+ "Intf major.minor=%d.%d rejected - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL);
+ ret = -EPFNOSUPPORT;
+ } else {
+ dev_dbg(dev,
+ "Intf major.minor=%d.%d passed - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ }
+ return ret;
+}
+
+static void bcm_vk_log_notf(struct bcm_vk *vk,
+ struct bcm_vk_alert *alert,
+ struct bcm_vk_entry const *entry_tab,
+ const u32 table_size)
+{
+ u32 i;
+ u32 masked_val, latched_val;
+ struct bcm_vk_entry const *entry;
+ u32 reg;
+ u16 ecc_mem_err, uecc_mem_err;
+ struct device *dev = &vk->pdev->dev;
+
+ for (i = 0; i < table_size; i++) {
+ entry = &entry_tab[i];
+ masked_val = entry->mask & alert->notfs;
+ latched_val = entry->mask & alert->flags;
+
+ if (masked_val == ERR_LOG_UECC) {
+ /*
+ * if there is difference between stored cnt and it
+ * is greater than threshold, log it.
+ */
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
+ BCM_VK_EXTRACT_FIELD(uecc_mem_err, reg,
+ BCM_VK_MEM_ERR_FIELD_MASK,
+ BCM_VK_UECC_MEM_ERR_SHIFT);
+ if ((uecc_mem_err != vk->alert_cnts.uecc) &&
+ (uecc_mem_err >= BCM_VK_UECC_THRESHOLD))
+ dev_info(dev,
+ "ALERT! %s.%d uecc RAISED - ErrCnt %d\n",
+ DRV_MODULE_NAME, vk->devid,
+ uecc_mem_err);
+ vk->alert_cnts.uecc = uecc_mem_err;
+ } else if (masked_val == ERR_LOG_ECC) {
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
+ BCM_VK_EXTRACT_FIELD(ecc_mem_err, reg,
+ BCM_VK_MEM_ERR_FIELD_MASK,
+ BCM_VK_ECC_MEM_ERR_SHIFT);
+ if ((ecc_mem_err != vk->alert_cnts.ecc) &&
+ (ecc_mem_err >= BCM_VK_ECC_THRESHOLD))
+ dev_info(dev, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n",
+ DRV_MODULE_NAME, vk->devid,
+ ecc_mem_err);
+ vk->alert_cnts.ecc = ecc_mem_err;
+ } else if (masked_val != latched_val) {
+ /* print a log as info */
+ dev_info(dev, "ALERT! %s.%d %s %s\n",
+ DRV_MODULE_NAME, vk->devid, entry->str,
+ masked_val ? "RAISED" : "CLEARED");
+ }
+ }
+}
+
+static void bcm_vk_dump_peer_log(struct bcm_vk *vk)
+{
+ struct bcm_vk_peer_log log;
+ struct bcm_vk_peer_log *log_info = &vk->peerlog_info;
+ char loc_buf[BCM_VK_PEER_LOG_LINE_MAX];
+ int cnt;
+ struct device *dev = &vk->pdev->dev;
+ unsigned int data_offset;
+
+ memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log));
+
+ dev_dbg(dev, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ log.buf_size, log.mask, log.rd_idx, log.wr_idx);
+
+ if (!log_info->buf_size) {
+ dev_err(dev, "Peer log dump disabled - skipped!\n");
+ return;
+ }
+
+ /* perform range checking for rd/wr idx */
+ if ((log.rd_idx > log_info->mask) ||
+ (log.wr_idx > log_info->mask) ||
+ (log.buf_size != log_info->buf_size) ||
+ (log.mask != log_info->mask)) {
+ dev_err(dev,
+ "Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n",
+ log_info->buf_size, log.buf_size,
+ log_info->mask, log.mask,
+ log.rd_idx, log.wr_idx);
+ return;
+ }
+
+ cnt = 0;
+ data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log);
+ loc_buf[BCM_VK_PEER_LOG_LINE_MAX - 1] = '\0';
+ while (log.rd_idx != log.wr_idx) {
+ loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx);
+
+ if ((loc_buf[cnt] == '\0') ||
+ (cnt == (BCM_VK_PEER_LOG_LINE_MAX - 1))) {
+ dev_err(dev, "%s", loc_buf);
+ cnt = 0;
+ } else {
+ cnt++;
+ }
+ log.rd_idx = (log.rd_idx + 1) & log.mask;
+ }
+ /* update rd idx at the end */
+ vkwrite32(vk, log.rd_idx, BAR_2,
+ vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx));
+}
+
+void bcm_vk_handle_notf(struct bcm_vk *vk)
+{
+ u32 reg;
+ struct bcm_vk_alert alert;
+ bool intf_down;
+ unsigned long flags;
+
+ /* handle peer alerts and then locally detected ones */
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG);
+ intf_down = BCM_VK_INTF_IS_DOWN(reg);
+ if (!intf_down) {
+ vk->peer_alert.notfs = reg;
+ bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err,
+ ARRAY_SIZE(bcm_vk_peer_err));
+ vk->peer_alert.flags = vk->peer_alert.notfs;
+ } else {
+ /* turn off access */
+ bcm_vk_blk_drv_access(vk);
+ }
+
+ /* check and make copy of alert with lock and then free lock */
+ spin_lock_irqsave(&vk->host_alert_lock, flags);
+ if (intf_down)
+ vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
+
+ alert = vk->host_alert;
+ vk->host_alert.flags = vk->host_alert.notfs;
+ spin_unlock_irqrestore(&vk->host_alert_lock, flags);
+
+ /* call display with copy */
+ bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
+ ARRAY_SIZE(bcm_vk_host_err));
+
+ /*
+ * If it is a sys fault or heartbeat timeout, we would like extract
+ * log msg from the card so that we would know what is the last fault
+ */
+ if (!intf_down &&
+ ((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
+ (vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
+ bcm_vk_dump_peer_log(vk);
+}
+
+static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
+ u64 offset, u32 mask, u32 value,
+ unsigned long timeout_ms)
+{
+ struct device *dev = &vk->pdev->dev;
+ unsigned long start_time;
+ unsigned long timeout;
+ u32 rd_val, boot_status;
+
+ start_time = jiffies;
+ timeout = start_time + msecs_to_jiffies(timeout_ms);
+
+ do {
+ rd_val = vkread32(vk, bar, offset);
+ dev_dbg(dev, "BAR%d Offset=0x%llx: 0x%x\n",
+ bar, offset, rd_val);
+
+ /* check for any boot err condition */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (boot_status & BOOT_ERR_MASK) {
+ dev_err(dev, "Boot Err 0x%x, progress 0x%x after %d ms\n",
+ (boot_status & BOOT_ERR_MASK) >> BOOT_ERR_SHIFT,
+ boot_status & BOOT_PROG_MASK,
+ jiffies_to_msecs(jiffies - start_time));
+ return -EFAULT;
+ }
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ cpu_relax();
+ cond_resched();
+ } while ((rd_val & mask) != value);
+
+ return 0;
+}
+
+static void bcm_vk_get_card_info(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 offset;
+ int i;
+ u8 *dst;
+ struct bcm_vk_card_info *info = &vk->card_info;
+
+ /* first read the offset from spare register */
+ offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO);
+ offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1);
+
+ /* based on the offset, read info to internal card info structure */
+ dst = (u8 *)info;
+ for (i = 0; i < sizeof(*info); i++)
+ *dst++ = vkread8(vk, BAR_2, offset++);
+
+#define CARD_INFO_LOG_FMT "version : %x\n" \
+ "os_tag : %s\n" \
+ "cmpt_tag : %s\n" \
+ "cpu_freq : %d MHz\n" \
+ "cpu_scale : %d full, %d lowest\n" \
+ "ddr_freq : %d MHz\n" \
+ "ddr_size : %d MB\n" \
+ "video_freq: %d MHz\n"
+ dev_dbg(dev, CARD_INFO_LOG_FMT, info->version, info->os_tag,
+ info->cmpt_tag, info->cpu_freq_mhz, info->cpu_scale[0],
+ info->cpu_scale[MAX_OPP - 1], info->ddr_freq_mhz,
+ info->ddr_size_MB, info->video_core_freq_mhz);
+
+ /*
+ * get the peer log pointer, only need the offset, and get record
+ * of the log buffer information which would be used for checking
+ * before dump, in case the BAR2 memory has been corrupted.
+ */
+ vk->peerlog_off = offset;
+ memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off,
+ sizeof(vk->peerlog_info));
+
+ /*
+ * Do a range checking and if out of bound, the record will be zeroed
+ * which guarantees that nothing would be dumped. In other words,
+ * peer dump is disabled.
+ */
+ if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) ||
+ (vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) ||
+ (vk->peerlog_info.rd_idx > vk->peerlog_info.mask) ||
+ (vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) {
+ dev_err(dev, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ vk->peerlog_info.buf_size,
+ vk->peerlog_info.mask,
+ vk->peerlog_info.rd_idx,
+ vk->peerlog_info.wr_idx);
+ memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
+ } else {
+ dev_dbg(dev, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ vk->peerlog_info.buf_size,
+ vk->peerlog_info.mask,
+ vk->peerlog_info.rd_idx,
+ vk->peerlog_info.wr_idx);
+ }
+}
+
+static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info;
+ u32 num, entry_size, offset, buf_size;
+ u8 *dst;
+
+ /* calculate offset which is based on peerlog offset */
+ buf_size = vkread32(vk, BAR_2,
+ vk->peerlog_off
+ + offsetof(struct bcm_vk_peer_log, buf_size));
+ offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log)
+ + buf_size;
+
+ /* first read the num and entry size */
+ num = vkread32(vk, BAR_2, offset);
+ entry_size = vkread32(vk, BAR_2, offset + sizeof(num));
+
+ /* check for max allowed */
+ if (num > BCM_VK_PROC_MON_MAX) {
+ dev_err(dev, "Processing monitoring entry %d exceeds max %d\n",
+ num, BCM_VK_PROC_MON_MAX);
+ return;
+ }
+ mon->num = num;
+ mon->entry_size = entry_size;
+
+ vk->proc_mon_off = offset;
+
+ /* read it once that will capture those static info */
+ dst = (u8 *)&mon->entries[0];
+ offset += sizeof(num) + sizeof(entry_size);
+ memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size);
+}
+
+static int bcm_vk_sync_card_info(struct bcm_vk *vk)
+{
+ u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+
+ /* check for marker, but allow diags mode to skip sync */
+ if (!bcm_vk_msgq_marker_valid(vk))
+ return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL);
+
+ /*
+ * Write down scratch addr which is used for DMA. For
+ * signed part, BAR1 is accessible only after boot2 has come
+ * up
+ */
+ if (vk->tdma_addr) {
+ vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1,
+ VK_BAR1_SCRATCH_OFF_HI);
+ vkwrite32(vk, (u32)vk->tdma_addr, BAR_1,
+ VK_BAR1_SCRATCH_OFF_LO);
+ vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
+ VK_BAR1_SCRATCH_SZ_ADDR);
+ }
+
+ /* get static card info, only need to read once */
+ bcm_vk_get_card_info(vk);
+
+ /* get the proc mon info once */
+ bcm_vk_get_proc_mon_info(vk);
+
+ return 0;
+}
+
+void bcm_vk_blk_drv_access(struct bcm_vk *vk)
+{
+ int i;
+
+ /*
+ * kill all the apps except for the process that is resetting.
+ * If not called during reset, reset_pid will be 0, and all will be
+ * killed.
+ */
+ spin_lock(&vk->ctx_lock);
+
+ /* set msgq_inited to 0 so that all rd/wr will be blocked */
+ atomic_set(&vk->msgq_inited, 0);
+
+ for (i = 0; i < VK_PID_HT_SZ; i++) {
+ struct bcm_vk_ctx *ctx;
+
+ list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
+ if (ctx->pid != vk->reset_pid) {
+ dev_dbg(&vk->pdev->dev,
+ "Send kill signal to pid %d\n",
+ ctx->pid);
+ kill_pid(find_vpid(ctx->pid), SIGKILL, 1);
+ }
+ }
+ }
+ bcm_vk_tty_terminate_tty_user(vk);
+ spin_unlock(&vk->ctx_lock);
+}
+
+static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp,
+ dma_addr_t host_buf_addr, u32 buf_size)
+{
+ /* update the dma address to the card */
+ vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1,
+ VK_BAR1_DMA_BUF_OFF_HI);
+ vkwrite32(vk, (u32)host_buf_addr, BAR_1,
+ VK_BAR1_DMA_BUF_OFF_LO);
+ vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ);
+}
+
+static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type,
+ const char *filename)
+{
+ struct device *dev = &vk->pdev->dev;
+ const struct firmware *fw = NULL;
+ void *bufp = NULL;
+ size_t max_buf, offset;
+ int ret;
+ u64 offset_codepush;
+ u32 codepush;
+ u32 value;
+ dma_addr_t boot_dma_addr;
+ bool is_stdalone;
+
+ if (load_type == VK_IMAGE_TYPE_BOOT1) {
+ /*
+ * After POR, enable VK soft BOOTSRC so bootrom do not clear
+ * the pushed image (the TCM memories).
+ */
+ value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT);
+ value |= BOOTSRC_SOFT_ENABLE;
+ vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT);
+
+ codepush = CODEPUSH_BOOTSTART + CODEPUSH_BOOT1_ENTRY;
+ offset_codepush = BAR_CODEPUSH_SBL;
+
+ /* Write a 1 to request SRAM open bit */
+ vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush);
+
+ /* Wait for VK to respond */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN,
+ SRAM_OPEN, LOAD_IMAGE_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "boot1 wait SRAM err - ret(%d)\n", ret);
+ goto err_buf_out;
+ }
+
+ max_buf = SZ_256K;
+ bufp = dma_alloc_coherent(dev,
+ max_buf,
+ &boot_dma_addr, GFP_KERNEL);
+ if (!bufp) {
+ dev_err(dev, "Error allocating 0x%zx\n", max_buf);
+ ret = -ENOMEM;
+ goto err_buf_out;
+ }
+ } else if (load_type == VK_IMAGE_TYPE_BOOT2) {
+ codepush = CODEPUSH_BOOT2_ENTRY;
+ offset_codepush = BAR_CODEPUSH_SBI;
+
+ /* Wait for VK to respond */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN,
+ DDR_OPEN, LOAD_IMAGE_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "boot2 wait DDR open error - ret(%d)\n",
+ ret);
+ goto err_buf_out;
+ }
+
+ max_buf = SZ_4M;
+ bufp = dma_alloc_coherent(dev,
+ max_buf,
+ &boot_dma_addr, GFP_KERNEL);
+ if (!bufp) {
+ dev_err(dev, "Error allocating 0x%zx\n", max_buf);
+ ret = -ENOMEM;
+ goto err_buf_out;
+ }
+
+ bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf);
+ } else {
+ dev_err(dev, "Error invalid image type 0x%x\n", load_type);
+ ret = -EINVAL;
+ goto err_buf_out;
+ }
+
+ offset = 0;
+ ret = request_partial_firmware_into_buf(&fw, filename, dev,
+ bufp, max_buf, offset);
+ if (ret) {
+ dev_err(dev, "Error %d requesting firmware file: %s\n",
+ ret, filename);
+ goto err_firmware_out;
+ }
+ dev_dbg(dev, "size=0x%zx\n", fw->size);
+ if (load_type == VK_IMAGE_TYPE_BOOT1)
+ memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1,
+ bufp,
+ fw->size);
+
+ dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", codepush, offset_codepush);
+ vkwrite32(vk, codepush, BAR_0, offset_codepush);
+
+ if (load_type == VK_IMAGE_TYPE_BOOT1) {
+ u32 boot_status;
+
+ /* wait until done */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
+ BOOT1_RUNNING,
+ BOOT1_RUNNING,
+ BOOT1_STARTUP_TIMEOUT_MS);
+
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ is_stdalone = !BCM_VK_INTF_IS_DOWN(boot_status) &&
+ (boot_status & BOOT_STDALONE_RUNNING);
+ if (ret && !is_stdalone) {
+ dev_err(dev,
+ "Timeout %ld ms waiting for boot1 to come up - ret(%d)\n",
+ BOOT1_STARTUP_TIMEOUT_MS, ret);
+ goto err_firmware_out;
+ } else if (is_stdalone) {
+ u32 reg;
+
+ reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS);
+ if ((reg & BOOT1_STDALONE_PROGRESS_MASK) ==
+ BOOT1_STDALONE_SUCCESS) {
+ dev_info(dev, "Boot1 standalone success\n");
+ ret = 0;
+ } else {
+ dev_err(dev, "Timeout %ld ms - Boot1 standalone failure\n",
+ BOOT1_STARTUP_TIMEOUT_MS);
+ ret = -EINVAL;
+ goto err_firmware_out;
+ }
+ }
+ } else if (load_type == VK_IMAGE_TYPE_BOOT2) {
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
+
+ /* To send more data to VK than max_buf allowed at a time */
+ do {
+ /*
+ * Check for ack from card. when Ack is received,
+ * it means all the data is received by card.
+ * Exit the loop after ack is received.
+ */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
+ FW_LOADER_ACK_RCVD_ALL_DATA,
+ FW_LOADER_ACK_RCVD_ALL_DATA,
+ TXFR_COMPLETE_TIMEOUT_MS);
+ if (ret == 0) {
+ dev_dbg(dev, "Exit boot2 download\n");
+ break;
+ } else if (ret == -EFAULT) {
+ dev_err(dev, "Error detected during ACK waiting");
+ goto err_firmware_out;
+ }
+
+ /* exit the loop, if there is no response from card */
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Error. No reply from card\n");
+ ret = -ETIMEDOUT;
+ goto err_firmware_out;
+ }
+
+ /* Wait for VK to open BAR space to copy new data */
+ ret = bcm_vk_wait(vk, BAR_0, offset_codepush,
+ codepush, 0,
+ TXFR_COMPLETE_TIMEOUT_MS);
+ if (ret == 0) {
+ offset += max_buf;
+ ret = request_partial_firmware_into_buf
+ (&fw,
+ filename,
+ dev, bufp,
+ max_buf,
+ offset);
+ if (ret) {
+ dev_err(dev,
+ "Error %d requesting firmware file: %s offset: 0x%zx\n",
+ ret, filename, offset);
+ goto err_firmware_out;
+ }
+ dev_dbg(dev, "size=0x%zx\n", fw->size);
+ dev_dbg(dev, "Signaling 0x%x to 0x%llx\n",
+ codepush, offset_codepush);
+ vkwrite32(vk, codepush, BAR_0, offset_codepush);
+ /* reload timeout after every codepush */
+ timeout = jiffies +
+ msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
+ } else if (ret == -EFAULT) {
+ dev_err(dev, "Error detected waiting for transfer\n");
+ goto err_firmware_out;
+ }
+ } while (1);
+
+ /* wait for fw status bits to indicate app ready */
+ ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS,
+ VK_FWSTS_READY,
+ VK_FWSTS_READY,
+ BOOT2_STARTUP_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "Boot2 not ready - ret(%d)\n", ret);
+ goto err_firmware_out;
+ }
+
+ is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) &
+ BOOT_STDALONE_RUNNING;
+ if (!is_stdalone) {
+ ret = bcm_vk_intf_ver_chk(vk);
+ if (ret) {
+ dev_err(dev, "failure in intf version check\n");
+ goto err_firmware_out;
+ }
+
+ /*
+ * Next, initialize Message Q if we are loading boot2.
+ * Do a force sync
+ */
+ ret = bcm_vk_sync_msgq(vk, true);
+ if (ret) {
+ dev_err(dev, "Boot2 Error reading comm msg Q info\n");
+ ret = -EIO;
+ goto err_firmware_out;
+ }
+
+ /* sync & channel other info */
+ ret = bcm_vk_sync_card_info(vk);
+ if (ret) {
+ dev_err(dev, "Syncing Card Info failure\n");
+ goto err_firmware_out;
+ }
+ }
+ }
+
+err_firmware_out:
+ release_firmware(fw);
+
+err_buf_out:
+ if (bufp)
+ dma_free_coherent(dev, max_buf, bufp, boot_dma_addr);
+
+ return ret;
+}
+
+static u32 bcm_vk_next_boot_image(struct bcm_vk *vk)
+{
+ u32 boot_status;
+ u32 fw_status;
+ u32 load_type = 0; /* default for unknown */
+
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+
+ if (!BCM_VK_INTF_IS_DOWN(boot_status) && (boot_status & SRAM_OPEN))
+ load_type = VK_IMAGE_TYPE_BOOT1;
+ else if (boot_status == BOOT1_RUNNING)
+ load_type = VK_IMAGE_TYPE_BOOT2;
+
+ /* Log status so that we know different stages */
+ dev_info(&vk->pdev->dev,
+ "boot-status value for next image: 0x%x : fw-status 0x%x\n",
+ boot_status, fw_status);
+
+ return load_type;
+}
+
+static enum soc_idx get_soc_idx(struct bcm_vk *vk)
+{
+ struct pci_dev *pdev = vk->pdev;
+ enum soc_idx idx = VK_IDX_INVALID;
+ u32 rev;
+ static enum soc_idx const vk_soc_tab[] = { VALKYRIE_A0, VALKYRIE_B0 };
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_VALKYRIE:
+ /* get the chip id to decide sub-class */
+ rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID));
+ if (rev < ARRAY_SIZE(vk_soc_tab)) {
+ idx = vk_soc_tab[rev];
+ } else {
+ /* Default to A0 firmware for all other chip revs */
+ idx = VALKYRIE_A0;
+ dev_warn(&pdev->dev,
+ "Rev %d not in image lookup table, default to idx=%d\n",
+ rev, idx);
+ }
+ break;
+
+ case PCI_DEVICE_ID_VIPER:
+ idx = VIPER;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device);
+ }
+ return idx;
+}
+
+static const char *get_load_fw_name(struct bcm_vk *vk,
+ const struct load_image_entry *entry)
+{
+ const struct firmware *fw;
+ struct device *dev = &vk->pdev->dev;
+ int ret;
+ unsigned long dummy;
+ int i;
+
+ for (i = 0; i < IMG_PER_TYPE_MAX; i++) {
+ fw = NULL;
+ ret = request_partial_firmware_into_buf(&fw,
+ entry->image_name[i],
+ dev, &dummy,
+ sizeof(dummy),
+ 0);
+ release_firmware(fw);
+ if (!ret)
+ return entry->image_name[i];
+ }
+ return NULL;
+}
+
+int bcm_vk_auto_load_all_images(struct bcm_vk *vk)
+{
+ int i, ret = -1;
+ enum soc_idx idx;
+ struct device *dev = &vk->pdev->dev;
+ u32 curr_type;
+ const char *curr_name;
+
+ idx = get_soc_idx(vk);
+ if (idx == VK_IDX_INVALID)
+ goto auto_load_all_exit;
+
+ /* log a message to know the relative loading order */
+ dev_dbg(dev, "Load All for device %d\n", vk->devid);
+
+ for (i = 0; i < NUM_BOOT_STAGES; i++) {
+ curr_type = image_tab[idx][i].image_type;
+ if (bcm_vk_next_boot_image(vk) == curr_type) {
+ curr_name = get_load_fw_name(vk, &image_tab[idx][i]);
+ if (!curr_name) {
+ dev_err(dev, "No suitable firmware exists for type %d",
+ curr_type);
+ ret = -ENOENT;
+ goto auto_load_all_exit;
+ }
+ ret = bcm_vk_load_image_by_type(vk, curr_type,
+ curr_name);
+ dev_info(dev, "Auto load %s, ret %d\n",
+ curr_name, ret);
+
+ if (ret) {
+ dev_err(dev, "Error loading default %s\n",
+ curr_name);
+ goto auto_load_all_exit;
+ }
+ }
+ }
+
+auto_load_all_exit:
+ return ret;
+}
+
+static int bcm_vk_trigger_autoload(struct bcm_vk *vk)
+{
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0)
+ return -EPERM;
+
+ set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+ return 0;
+}
+
+/*
+ * deferred work queue for draining and auto download.
+ */
+static void bcm_vk_wq_handler(struct work_struct *work)
+{
+ struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
+ struct device *dev = &vk->pdev->dev;
+ s32 ret;
+
+ /* check wq offload bit map to perform various operations */
+ if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) {
+ /* clear bit right the way for notification */
+ clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
+ bcm_vk_handle_notf(vk);
+ }
+ if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
+ bcm_vk_auto_load_all_images(vk);
+
+ /*
+ * at the end of operation, clear AUTO bit and pending
+ * bit
+ */
+ clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+ }
+
+ /* next, try to drain */
+ ret = bcm_to_h_msg_dequeue(vk);
+
+ if (ret == 0)
+ dev_dbg(dev, "Spurious trigger for workqueue\n");
+ else if (ret < 0)
+ bcm_vk_blk_drv_access(vk);
+}
+
+static long bcm_vk_load_image(struct bcm_vk *vk,
+ const struct vk_image __user *arg)
+{
+ struct device *dev = &vk->pdev->dev;
+ const char *image_name;
+ struct vk_image image;
+ u32 next_loadable;
+ enum soc_idx idx;
+ int image_idx;
+ int ret = -EPERM;
+
+ if (copy_from_user(&image, arg, sizeof(image)))
+ return -EACCES;
+
+ if ((image.type != VK_IMAGE_TYPE_BOOT1) &&
+ (image.type != VK_IMAGE_TYPE_BOOT2)) {
+ dev_err(dev, "invalid image.type %u\n", image.type);
+ return ret;
+ }
+
+ next_loadable = bcm_vk_next_boot_image(vk);
+ if (next_loadable != image.type) {
+ dev_err(dev, "Next expected image %u, Loading %u\n",
+ next_loadable, image.type);
+ return ret;
+ }
+
+ /*
+ * if something is pending download already. This could only happen
+ * for now when the driver is being loaded, or if someone has issued
+ * another download command in another shell.
+ */
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
+ dev_err(dev, "Download operation already pending.\n");
+ return ret;
+ }
+
+ image_name = image.filename;
+ if (image_name[0] == '\0') {
+ /* Use default image name if NULL */
+ idx = get_soc_idx(vk);
+ if (idx == VK_IDX_INVALID)
+ goto err_idx;
+
+ /* Image idx starts with boot1 */
+ image_idx = image.type - VK_IMAGE_TYPE_BOOT1;
+ image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]);
+ if (!image_name) {
+ dev_err(dev, "No suitable image found for type %d",
+ image.type);
+ ret = -ENOENT;
+ goto err_idx;
+ }
+ } else {
+ /* Ensure filename is NULL terminated */
+ image.filename[sizeof(image.filename) - 1] = '\0';
+ }
+ ret = bcm_vk_load_image_by_type(vk, image.type, image_name);
+ dev_info(dev, "Load %s, ret %d\n", image_name, ret);
+err_idx:
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+
+ return ret;
+}
+
+static int bcm_vk_reset_successful(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 fw_status, reset_reason;
+ int ret = -EAGAIN;
+
+ /*
+ * Reset could be triggered when the card in several state:
+ * i) in bootROM
+ * ii) after boot1
+ * iii) boot2 running
+ *
+ * i) & ii) - no status bits will be updated. If vkboot1
+ * runs automatically after reset, it will update the reason
+ * to be unknown reason
+ * iii) - reboot reason match + deinit done.
+ */
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+ /* immediate exit if interface goes down */
+ if (BCM_VK_INTF_IS_DOWN(fw_status)) {
+ dev_err(dev, "PCIe Intf Down!\n");
+ goto reset_exit;
+ }
+
+ reset_reason = (fw_status & VK_FWSTS_RESET_REASON_MASK);
+ if ((reset_reason == VK_FWSTS_RESET_MBOX_DB) ||
+ (reset_reason == VK_FWSTS_RESET_UNKNOWN))
+ ret = 0;
+
+ /*
+ * if some of the deinit bits are set, but done
+ * bit is not, this is a failure if triggered while boot2 is running
+ */
+ if ((fw_status & VK_FWSTS_DEINIT_TRIGGERED) &&
+ !(fw_status & VK_FWSTS_RESET_DONE))
+ ret = -EAGAIN;
+
+reset_exit:
+ dev_dbg(dev, "FW status = 0x%x ret %d\n", fw_status, ret);
+
+ return ret;
+}
+
+static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val)
+{
+ vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE);
+}
+
+static int bcm_vk_trigger_reset(struct bcm_vk *vk)
+{
+ u32 i;
+ u32 value, boot_status;
+ bool is_stdalone, is_boot2;
+ static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME,
+ BAR_INTF_VER,
+ BAR_CARD_VOLTAGE,
+ BAR_CARD_TEMPERATURE,
+ BAR_CARD_PWR_AND_THRE };
+
+ /* clean up before pressing the door bell */
+ bcm_vk_drain_msg_on_reset(vk);
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+ /* make tag '\0' terminated */
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG);
+
+ for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) {
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i));
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i));
+ }
+ for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++)
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i));
+
+ memset(&vk->card_info, 0, sizeof(vk->card_info));
+ memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
+ memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info));
+ memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts));
+
+ /*
+ * When boot request fails, the CODE_PUSH_OFFSET stays persistent.
+ * Allowing us to debug the failure. When we call reset,
+ * we should clear CODE_PUSH_OFFSET so ROM does not execute
+ * boot again (and fails again) and instead waits for a new
+ * codepush. And, if previous boot has encountered error, need
+ * to clear the entry values
+ */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (boot_status & BOOT_ERR_MASK) {
+ dev_info(&vk->pdev->dev,
+ "Card in boot error 0x%x, clear CODEPUSH val\n",
+ boot_status);
+ value = 0;
+ } else {
+ value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL);
+ value &= CODEPUSH_MASK;
+ }
+ vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL);
+
+ /* special reset handling */
+ is_stdalone = boot_status & BOOT_STDALONE_RUNNING;
+ is_boot2 = (boot_status & BOOT_STATE_MASK) == BOOT2_RUNNING;
+ if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) {
+ /*
+ * if card is in ramdump mode, it is hitting an error. Don't
+ * reset the reboot reason as it will contain valid info that
+ * is important - simply use special reset
+ */
+ vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS);
+ return VK_BAR0_RESET_RAMPDUMP;
+ } else if (is_stdalone && !is_boot2) {
+ dev_info(&vk->pdev->dev, "Hard reset on Standalone mode");
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
+ return VK_BAR0_RESET_DB_HARD;
+ }
+
+ /* reset fw_status with proper reason, and press db */
+ vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS);
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT);
+
+ /* clear other necessary registers and alert records */
+ for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++)
+ vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]);
+ memset(&vk->host_alert, 0, sizeof(vk->host_alert));
+ memset(&vk->peer_alert, 0, sizeof(vk->peer_alert));
+ /* clear 4096 bits of bitmap */
+ bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE);
+
+ return 0;
+}
+
+static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct vk_reset reset;
+ int ret = 0;
+ u32 ramdump_reset;
+ int special_reset;
+
+ if (copy_from_user(&reset, arg, sizeof(struct vk_reset)))
+ return -EFAULT;
+
+ /* check if any download is in-progress, if so return error */
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
+ dev_err(dev, "Download operation pending - skip reset.\n");
+ return -EPERM;
+ }
+
+ ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP;
+ dev_info(dev, "Issue Reset %s\n",
+ ramdump_reset ? "in ramdump mode" : "");
+
+ /*
+ * The following is the sequence of reset:
+ * - send card level graceful shut down
+ * - wait enough time for VK to handle its business, stopping DMA etc
+ * - kill host apps
+ * - Trigger interrupt with DB
+ */
+ bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0);
+
+ spin_lock(&vk->ctx_lock);
+ if (!vk->reset_pid) {
+ vk->reset_pid = task_pid_nr(current);
+ } else {
+ dev_err(dev, "Reset already launched by process pid %d\n",
+ vk->reset_pid);
+ ret = -EACCES;
+ }
+ spin_unlock(&vk->ctx_lock);
+ if (ret)
+ goto err_exit;
+
+ bcm_vk_blk_drv_access(vk);
+ special_reset = bcm_vk_trigger_reset(vk);
+
+ /*
+ * Wait enough time for card os to deinit
+ * and populate the reset reason.
+ */
+ msleep(BCM_VK_DEINIT_TIME_MS);
+
+ if (special_reset) {
+ /* if it is special ramdump reset, return the type to user */
+ reset.arg2 = special_reset;
+ if (copy_to_user(arg, &reset, sizeof(reset)))
+ ret = -EFAULT;
+ } else {
+ ret = bcm_vk_reset_successful(vk);
+ }
+
+err_exit:
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+ return ret;
+}
+
+static int bcm_vk_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct bcm_vk_ctx *ctx = file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ unsigned long pg_size;
+
+ /* only BAR2 is mmap possible, which is bar num 4 due to 64bit */
+#define VK_MMAPABLE_BAR 4
+
+ pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1)
+ >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > pg_size)
+ return -EINVAL;
+
+ vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR)
+ >> PAGE_SHIFT);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -EINVAL;
+ struct bcm_vk_ctx *ctx = file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ void __user *argp = (void __user *)arg;
+
+ dev_dbg(&vk->pdev->dev,
+ "ioctl, cmd=0x%02x, arg=0x%02lx\n",
+ cmd, arg);
+
+ mutex_lock(&vk->mutex);
+
+ switch (cmd) {
+ case VK_IOCTL_LOAD_IMAGE:
+ ret = bcm_vk_load_image(vk, argp);
+ break;
+
+ case VK_IOCTL_RESET:
+ ret = bcm_vk_reset(vk, argp);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&vk->mutex);
+
+ return ret;
+}
+
+static const struct file_operations bcm_vk_fops = {
+ .owner = THIS_MODULE,
+ .open = bcm_vk_open,
+ .read = bcm_vk_read,
+ .write = bcm_vk_write,
+ .poll = bcm_vk_poll,
+ .release = bcm_vk_release,
+ .mmap = bcm_vk_mmap,
+ .unlocked_ioctl = bcm_vk_ioctl,
+};
+
+static int bcm_vk_on_panic(struct notifier_block *nb,
+ unsigned long e, void *p)
+{
+ struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb);
+
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
+
+ return 0;
+}
+
+static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ int i;
+ int id;
+ int irq;
+ char name[20];
+ struct bcm_vk *vk;
+ struct device *dev = &pdev->dev;
+ struct miscdevice *misc_device;
+ u32 boot_status;
+
+ /* allocate vk structure which is tied to kref for freeing */
+ vk = kzalloc(sizeof(*vk), GFP_KERNEL);
+ if (!vk)
+ return -ENOMEM;
+
+ kref_init(&vk->kref);
+ if (nr_ib_sgl_blk > BCM_VK_IB_SGL_BLK_MAX) {
+ dev_warn(dev, "Inband SGL blk %d limited to max %d\n",
+ nr_ib_sgl_blk, BCM_VK_IB_SGL_BLK_MAX);
+ nr_ib_sgl_blk = BCM_VK_IB_SGL_BLK_MAX;
+ }
+ vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE;
+ mutex_init(&vk->mutex);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ goto err_free_exit;
+ }
+ vk->pdev = pci_dev_get(pdev);
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ /* make sure DMA is good */
+ err = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(BCM_VK_DMA_BITS));
+ if (err) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_disable_pdev;
+ }
+
+ /* The tdma is a scratch area for some DMA testings. */
+ if (nr_scratch_pages) {
+ vk->tdma_vaddr = dma_alloc_coherent
+ (dev,
+ nr_scratch_pages * PAGE_SIZE,
+ &vk->tdma_addr, GFP_KERNEL);
+ if (!vk->tdma_vaddr) {
+ err = -ENOMEM;
+ goto err_disable_pdev;
+ }
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vk);
+
+ irq = pci_alloc_irq_vectors(pdev,
+ 1,
+ VK_MSIX_IRQ_MAX,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+
+ if (irq < VK_MSIX_IRQ_MIN_REQ) {
+ dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n",
+ VK_MSIX_IRQ_MIN_REQ, irq);
+ err = (irq >= 0) ? -EINVAL : irq;
+ goto err_disable_pdev;
+ }
+
+ if (irq != VK_MSIX_IRQ_MAX)
+ dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n",
+ irq, VK_MSIX_IRQ_MAX);
+
+ for (i = 0; i < MAX_BAR; i++) {
+ /* multiple by 2 for 64 bit BAR mapping */
+ vk->bar[i] = pci_ioremap_bar(pdev, i * 2);
+ if (!vk->bar[i]) {
+ dev_err(dev, "failed to remap BAR%d\n", i);
+ err = -ENOMEM;
+ goto err_iounmap;
+ }
+ }
+
+ for (vk->num_irqs = 0;
+ vk->num_irqs < VK_MSIX_MSGQ_MAX;
+ vk->num_irqs++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_msgq_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ }
+ /* one irq for notification from VK */
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_notf_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ vk->num_irqs++;
+
+ for (i = 0;
+ (i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq);
+ i++, vk->num_irqs++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_tty_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed request tty IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ bcm_vk_tty_set_irq_enabled(vk, i);
+ }
+
+ id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ dev_err(dev, "unable to get id\n");
+ goto err_irq;
+ }
+
+ vk->devid = id;
+ snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ misc_device = &vk->miscdev;
+ misc_device->minor = MISC_DYNAMIC_MINOR;
+ misc_device->name = kstrdup(name, GFP_KERNEL);
+ if (!misc_device->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
+ misc_device->fops = &bcm_vk_fops,
+
+ err = misc_register(misc_device);
+ if (err) {
+ dev_err(dev, "failed to register device\n");
+ goto err_kfree_name;
+ }
+
+ INIT_WORK(&vk->wq_work, bcm_vk_wq_handler);
+
+ /* create dedicated workqueue */
+ vk->wq_thread = create_singlethread_workqueue(name);
+ if (!vk->wq_thread) {
+ dev_err(dev, "Fail to create workqueue thread\n");
+ err = -ENOMEM;
+ goto err_misc_deregister;
+ }
+
+ err = bcm_vk_msg_init(vk);
+ if (err) {
+ dev_err(dev, "failed to init msg queue info\n");
+ goto err_destroy_workqueue;
+ }
+
+ /* sync other info */
+ bcm_vk_sync_card_info(vk);
+
+ /* register for panic notifier */
+ vk->panic_nb.notifier_call = bcm_vk_on_panic;
+ err = atomic_notifier_chain_register(&panic_notifier_list,
+ &vk->panic_nb);
+ if (err) {
+ dev_err(dev, "Fail to register panic notifier\n");
+ goto err_destroy_workqueue;
+ }
+
+ snprintf(name, sizeof(name), KBUILD_MODNAME ".%d_ttyVK", id);
+ err = bcm_vk_tty_init(vk, name);
+ if (err)
+ goto err_unregister_panic_notifier;
+
+ /*
+ * lets trigger an auto download. We don't want to do it serially here
+ * because at probing time, it is not supposed to block for a long time.
+ */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (auto_load) {
+ if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) {
+ err = bcm_vk_trigger_autoload(vk);
+ if (err)
+ goto err_bcm_vk_tty_exit;
+ } else {
+ dev_err(dev,
+ "Auto-load skipped - BROM not in proper state (0x%x)\n",
+ boot_status);
+ }
+ }
+
+ /* enable hb */
+ bcm_vk_hb_init(vk);
+
+ dev_dbg(dev, "BCM-VK:%u created\n", id);
+
+ return 0;
+
+err_bcm_vk_tty_exit:
+ bcm_vk_tty_exit(vk);
+
+err_unregister_panic_notifier:
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &vk->panic_nb);
+
+err_destroy_workqueue:
+ destroy_workqueue(vk->wq_thread);
+
+err_misc_deregister:
+ misc_deregister(misc_device);
+
+err_kfree_name:
+ kfree(misc_device->name);
+ misc_device->name = NULL;
+
+err_ida_remove:
+ ida_simple_remove(&bcm_vk_ida, id);
+
+err_irq:
+ for (i = 0; i < vk->num_irqs; i++)
+ devm_free_irq(dev, pci_irq_vector(pdev, i), vk);
+
+ pci_disable_msix(pdev);
+ pci_disable_msi(pdev);
+
+err_iounmap:
+ for (i = 0; i < MAX_BAR; i++) {
+ if (vk->bar[i])
+ pci_iounmap(pdev, vk->bar[i]);
+ }
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ if (vk->tdma_vaddr)
+ dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
+ vk->tdma_vaddr, vk->tdma_addr);
+
+ pci_free_irq_vectors(pdev);
+ pci_disable_device(pdev);
+ pci_dev_put(pdev);
+
+err_free_exit:
+ kfree(vk);
+
+ return err;
+}
+
+void bcm_vk_release_data(struct kref *kref)
+{
+ struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref);
+ struct pci_dev *pdev = vk->pdev;
+
+ dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk);
+ pci_dev_put(pdev);
+ kfree(vk);
+}
+
+static void bcm_vk_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct bcm_vk *vk = pci_get_drvdata(pdev);
+ struct miscdevice *misc_device = &vk->miscdev;
+
+ bcm_vk_hb_deinit(vk);
+
+ /*
+ * Trigger a reset to card and wait enough time for UCODE to rerun,
+ * which re-initialize the card into its default state.
+ * This ensures when driver is re-enumerated it will start from
+ * a completely clean state.
+ */
+ bcm_vk_trigger_reset(vk);
+ usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US);
+
+ /* unregister panic notifier */
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &vk->panic_nb);
+
+ bcm_vk_msg_remove(vk);
+ bcm_vk_tty_exit(vk);
+
+ if (vk->tdma_vaddr)
+ dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
+ vk->tdma_vaddr, vk->tdma_addr);
+
+ /* remove if name is set which means misc dev registered */
+ if (misc_device->name) {
+ misc_deregister(misc_device);
+ kfree(misc_device->name);
+ ida_simple_remove(&bcm_vk_ida, vk->devid);
+ }
+ for (i = 0; i < vk->num_irqs; i++)
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
+
+ pci_disable_msix(pdev);
+ pci_disable_msi(pdev);
+
+ cancel_work_sync(&vk->wq_work);
+ destroy_workqueue(vk->wq_thread);
+ bcm_vk_tty_wq_exit(vk);
+
+ for (i = 0; i < MAX_BAR; i++) {
+ if (vk->bar[i])
+ pci_iounmap(pdev, vk->bar[i]);
+ }
+
+ dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid);
+
+ pci_release_regions(pdev);
+ pci_free_irq_vectors(pdev);
+ pci_disable_device(pdev);
+
+ kref_put(&vk->kref, bcm_vk_release_data);
+}
+
+static void bcm_vk_shutdown(struct pci_dev *pdev)
+{
+ struct bcm_vk *vk = pci_get_drvdata(pdev);
+ u32 reg, boot_stat;
+
+ reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ boot_stat = reg & BOOT_STATE_MASK;
+
+ if (boot_stat == BOOT1_RUNNING) {
+ /* simply trigger a reset interrupt to park it */
+ bcm_vk_trigger_reset(vk);
+ } else if (boot_stat == BROM_NOT_RUN) {
+ int err;
+ u16 lnksta;
+
+ /*
+ * The boot status only reflects boot condition since last reset
+ * As ucode will run only once to configure pcie, if multiple
+ * resets happen, we lost track if ucode has run or not.
+ * Here, read the current link speed and use that to
+ * sync up the bootstatus properly so that on reboot-back-up,
+ * it has the proper state to start with autoload
+ */
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ if (!err &&
+ (lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) {
+ reg |= BROM_STATUS_COMPLETE;
+ vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS);
+ }
+ }
+}
+
+static const struct pci_device_id bcm_vk_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VIPER), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, bcm_vk_ids);
+
+static struct pci_driver pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bcm_vk_ids,
+ .probe = bcm_vk_probe,
+ .remove = bcm_vk_remove,
+ .shutdown = bcm_vk_shutdown,
+};
+module_pci_driver(pci_driver);
+
+MODULE_DESCRIPTION("Broadcom VK Host Driver");
+MODULE_AUTHOR("Scott Branden <scott.branden@broadcom.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
new file mode 100644
index 000000000000..f40cf08a6192
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "bcm_vk.h"
+#include "bcm_vk_msg.h"
+#include "bcm_vk_sg.h"
+
+/* functions to manipulate the transport id in msg block */
+#define BCM_VK_MSG_Q_SHIFT 4
+#define BCM_VK_MSG_Q_MASK 0xF
+#define BCM_VK_MSG_ID_MASK 0xFFF
+
+#define BCM_VK_DMA_DRAIN_MAX_MS 2000
+
+/* number x q_size will be the max number of msg processed per loop */
+#define BCM_VK_MSG_PROC_MAX_LOOP 2
+
+/* module parameter */
+static bool hb_mon = true;
+module_param(hb_mon, bool, 0444);
+MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
+static int batch_log = 1;
+module_param(batch_log, int, 0444);
+MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
+
+static bool hb_mon_is_on(void)
+{
+ return hb_mon;
+}
+
+static u32 get_q_num(const struct vk_msg_blk *msg)
+{
+ u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
+
+ if (q_num >= VK_MSGQ_PER_CHAN_MAX)
+ q_num = VK_MSGQ_NUM_DEFAULT;
+ return q_num;
+}
+
+static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
+{
+ u32 trans_q;
+
+ if (q_num >= VK_MSGQ_PER_CHAN_MAX)
+ trans_q = VK_MSGQ_NUM_DEFAULT;
+ else
+ trans_q = q_num;
+
+ msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
+}
+
+static u32 get_msg_id(const struct vk_msg_blk *msg)
+{
+ return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
+}
+
+static void set_msg_id(struct vk_msg_blk *msg, u32 val)
+{
+ msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
+}
+
+static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
+{
+ return ((idx + inc) & qinfo->q_mask);
+}
+
+static
+struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
+ u32 idx)
+{
+ return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
+}
+
+static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
+ const struct bcm_vk_sync_qinfo *qinfo)
+{
+ u32 wr_idx, rd_idx;
+
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+ rd_idx = readl_relaxed(&msgq->rd_idx);
+
+ return ((wr_idx - rd_idx) & qinfo->q_mask);
+}
+
+static
+u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
+ const struct bcm_vk_sync_qinfo *qinfo)
+{
+ return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
+}
+
+/* number of retries when enqueue message fails before returning EAGAIN */
+#define BCM_VK_H2VK_ENQ_RETRY 10
+#define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
+
+bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
+{
+ return (!!atomic_read(&vk->msgq_inited));
+}
+
+void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
+{
+ struct bcm_vk_alert *alert = &vk->host_alert;
+ unsigned long flags;
+
+ /* use irqsave version as this maybe called inside timer interrupt */
+ spin_lock_irqsave(&vk->host_alert_lock, flags);
+ alert->notfs |= bit_mask;
+ spin_unlock_irqrestore(&vk->host_alert_lock, flags);
+
+ if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
+ queue_work(vk->wq_thread, &vk->wq_work);
+}
+
+/*
+ * Heartbeat related defines
+ * The heartbeat from host is a last resort. If stuck condition happens
+ * on the card, firmware is supposed to detect it. Therefore, the heartbeat
+ * values used will be more relaxed on the driver, which need to be bigger
+ * than the watchdog timeout on the card. The watchdog timeout on the card
+ * is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
+ */
+#define BCM_VK_HB_TIMER_S 3
+#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
+#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
+
+static void bcm_vk_hb_poll(struct timer_list *t)
+{
+ u32 uptime_s;
+ struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
+ timer);
+ struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
+
+ if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
+ /* read uptime from register and compare */
+ uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
+
+ if (uptime_s == hb->last_uptime)
+ hb->lost_cnt++;
+ else /* reset to avoid accumulation */
+ hb->lost_cnt = 0;
+
+ dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
+ hb->last_uptime, uptime_s, hb->lost_cnt);
+
+ /*
+ * if the interface goes down without any activity, a value
+ * of 0xFFFFFFFF will be continuously read, and the detection
+ * will be happened eventually.
+ */
+ hb->last_uptime = uptime_s;
+ } else {
+ /* reset heart beat lost cnt */
+ hb->lost_cnt = 0;
+ }
+
+ /* next, check if heartbeat exceeds limit */
+ if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
+ dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
+ BCM_VK_HB_LOST_MAX,
+ BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
+
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
+ }
+ /* re-arm timer */
+ mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+}
+
+void bcm_vk_hb_init(struct bcm_vk *vk)
+{
+ struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
+
+ timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
+ mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+}
+
+void bcm_vk_hb_deinit(struct bcm_vk *vk)
+{
+ struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
+
+ del_timer(&hb->timer);
+}
+
+static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
+ unsigned int start,
+ unsigned int nbits)
+{
+ spin_lock(&vk->msg_id_lock);
+ bitmap_clear(vk->bmap, start, nbits);
+ spin_unlock(&vk->msg_id_lock);
+}
+
+/*
+ * allocate a ctx per file struct
+ */
+static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
+{
+ u32 i;
+ struct bcm_vk_ctx *ctx = NULL;
+ u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
+
+ spin_lock(&vk->ctx_lock);
+
+ /* check if it is in reset, if so, don't allow */
+ if (vk->reset_pid) {
+ dev_err(&vk->pdev->dev,
+ "No context allowed during reset by pid %d\n",
+ vk->reset_pid);
+
+ goto in_reset_exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
+ if (!vk->ctx[i].in_use) {
+ vk->ctx[i].in_use = true;
+ ctx = &vk->ctx[i];
+ break;
+ }
+ }
+
+ if (!ctx) {
+ dev_err(&vk->pdev->dev, "All context in use\n");
+
+ goto all_in_use_exit;
+ }
+
+ /* set the pid and insert it to hash table */
+ ctx->pid = pid;
+ ctx->hash_idx = hash_idx;
+ list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
+
+ /* increase kref */
+ kref_get(&vk->kref);
+
+ /* clear counter */
+ atomic_set(&ctx->pend_cnt, 0);
+ atomic_set(&ctx->dma_cnt, 0);
+ init_waitqueue_head(&ctx->rd_wq);
+
+all_in_use_exit:
+in_reset_exit:
+ spin_unlock(&vk->ctx_lock);
+
+ return ctx;
+}
+
+static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
+{
+ u16 rc = VK_MSG_ID_OVERFLOW;
+ u16 test_bit_count = 0;
+
+ spin_lock(&vk->msg_id_lock);
+ while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
+ /*
+ * first time come in this loop, msg_id will be 0
+ * and the first one tested will be 1. We skip
+ * VK_SIMPLEX_MSG_ID (0) for one way host2vk
+ * communication
+ */
+ vk->msg_id++;
+ if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
+ vk->msg_id = 1;
+
+ if (test_bit(vk->msg_id, vk->bmap)) {
+ test_bit_count++;
+ continue;
+ }
+ rc = vk->msg_id;
+ bitmap_set(vk->bmap, vk->msg_id, 1);
+ break;
+ }
+ spin_unlock(&vk->msg_id_lock);
+
+ return rc;
+}
+
+static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
+{
+ u32 idx;
+ u32 hash_idx;
+ pid_t pid;
+ struct bcm_vk_ctx *entry;
+ int count = 0;
+
+ if (!ctx) {
+ dev_err(&vk->pdev->dev, "NULL context detected\n");
+ return -EINVAL;
+ }
+ idx = ctx->idx;
+ pid = ctx->pid;
+
+ spin_lock(&vk->ctx_lock);
+
+ if (!vk->ctx[idx].in_use) {
+ dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
+ } else {
+ vk->ctx[idx].in_use = false;
+ vk->ctx[idx].miscdev = NULL;
+
+ /* Remove it from hash list and see if it is the last one. */
+ list_del(&ctx->node);
+ hash_idx = ctx->hash_idx;
+ list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
+ if (entry->pid == pid)
+ count++;
+ }
+ }
+
+ spin_unlock(&vk->ctx_lock);
+
+ return count;
+}
+
+static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
+{
+ int proc_cnt;
+
+ bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
+ if (proc_cnt)
+ atomic_dec(&entry->ctx->dma_cnt);
+
+ kfree(entry->to_h_msg);
+ kfree(entry);
+}
+
+static void bcm_vk_drain_all_pend(struct device *dev,
+ struct bcm_vk_msg_chan *chan,
+ struct bcm_vk_ctx *ctx)
+{
+ u32 num;
+ struct bcm_vk_wkent *entry, *tmp;
+ struct bcm_vk *vk;
+ struct list_head del_q;
+
+ if (ctx)
+ vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+
+ INIT_LIST_HEAD(&del_q);
+ spin_lock(&chan->pendq_lock);
+ for (num = 0; num < chan->q_nr; num++) {
+ list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
+ if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
+ list_del(&entry->node);
+ list_add_tail(&entry->node, &del_q);
+ }
+ }
+ }
+ spin_unlock(&chan->pendq_lock);
+
+ /* batch clean up */
+ num = 0;
+ list_for_each_entry_safe(entry, tmp, &del_q, node) {
+ list_del(&entry->node);
+ num++;
+ if (ctx) {
+ struct vk_msg_blk *msg;
+ int bit_set;
+ bool responded;
+ u32 msg_id;
+
+ /* if it is specific ctx, log for any stuck */
+ msg = entry->to_v_msg;
+ msg_id = get_msg_id(msg);
+ bit_set = test_bit(msg_id, vk->bmap);
+ responded = entry->to_h_msg ? true : false;
+ if (num <= batch_log)
+ dev_info(dev,
+ "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
+ msg->function_id, msg->size,
+ msg_id, entry->seq_num,
+ msg->context_id, entry->ctx->idx,
+ msg->cmd, msg->arg,
+ responded ? "T" : "F", bit_set);
+ if (responded)
+ atomic_dec(&ctx->pend_cnt);
+ else if (bit_set)
+ bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
+ }
+ bcm_vk_free_wkent(dev, entry);
+ }
+ if (num && ctx)
+ dev_info(dev, "Total drained items %d [fd-%d]\n",
+ num, ctx->idx);
+}
+
+void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
+{
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
+}
+
+/*
+ * Function to sync up the messages queue info that is provided by BAR1
+ */
+int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
+{
+ struct bcm_vk_msgq __iomem *msgq;
+ struct device *dev = &vk->pdev->dev;
+ u32 msgq_off;
+ u32 num_q;
+ struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
+ &vk->to_h_msg_chan};
+ struct bcm_vk_msg_chan *chan;
+ int i, j;
+ int ret = 0;
+
+ /*
+ * If the driver is loaded at startup where vk OS is not up yet,
+ * the msgq-info may not be available until a later time. In
+ * this case, we skip and the sync function is supposed to be
+ * called again.
+ */
+ if (!bcm_vk_msgq_marker_valid(vk)) {
+ dev_info(dev, "BAR1 msgq marker not initialized.\n");
+ return -EAGAIN;
+ }
+
+ msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
+
+ /* each side is always half the total */
+ num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
+ if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
+ dev_err(dev,
+ "Advertised msgq %d error - max %d allowed\n",
+ num_q, VK_MSGQ_PER_CHAN_MAX);
+ return -EINVAL;
+ }
+
+ vk->to_v_msg_chan.q_nr = num_q;
+ vk->to_h_msg_chan.q_nr = num_q;
+
+ /* first msgq location */
+ msgq = vk->bar[BAR_1] + msgq_off;
+
+ /*
+ * if this function is called when it is already inited,
+ * something is wrong
+ */
+ if (bcm_vk_drv_access_ok(vk) && !force_sync) {
+ dev_err(dev, "Msgq info already in sync\n");
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
+ chan = chan_list[i];
+ memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
+
+ for (j = 0; j < num_q; j++) {
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 msgq_start;
+ u32 msgq_size;
+ u32 msgq_nxt;
+ u32 msgq_db_offset, q_db_offset;
+
+ chan->msgq[j] = msgq;
+ msgq_start = readl_relaxed(&msgq->start);
+ msgq_size = readl_relaxed(&msgq->size);
+ msgq_nxt = readl_relaxed(&msgq->nxt);
+ msgq_db_offset = readl_relaxed(&msgq->db_offset);
+ q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
+ if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
+ msgq_db_offset = q_db_offset;
+ else
+ /* fall back to default */
+ msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
+
+ dev_info(dev,
+ "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
+ j,
+ readw_relaxed(&msgq->type),
+ readw_relaxed(&msgq->num),
+ msgq_start,
+ msgq_db_offset,
+ readl_relaxed(&msgq->rd_idx),
+ readl_relaxed(&msgq->wr_idx),
+ msgq_size,
+ msgq_nxt);
+
+ qinfo = &chan->sync_qinfo[j];
+ /* formulate and record static info */
+ qinfo->q_start = vk->bar[BAR_1] + msgq_start;
+ qinfo->q_size = msgq_size;
+ /* set low threshold as 50% or 1/2 */
+ qinfo->q_low = qinfo->q_size >> 1;
+ qinfo->q_mask = qinfo->q_size - 1;
+ qinfo->q_db_offset = msgq_db_offset;
+
+ msgq++;
+ }
+ }
+ atomic_set(&vk->msgq_inited, 1);
+
+ return ret;
+}
+
+static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
+{
+ u32 i;
+
+ mutex_init(&chan->msgq_mutex);
+ spin_lock_init(&chan->pendq_lock);
+ for (i = 0; i < VK_MSGQ_MAX_NR; i++)
+ INIT_LIST_HEAD(&chan->pendq[i]);
+
+ return 0;
+}
+
+static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
+ struct bcm_vk_wkent *entry)
+{
+ struct bcm_vk_ctx *ctx;
+
+ spin_lock(&chan->pendq_lock);
+ list_add_tail(&entry->node, &chan->pendq[q_num]);
+ if (entry->to_h_msg) {
+ ctx = entry->ctx;
+ atomic_inc(&ctx->pend_cnt);
+ wake_up_interruptible(&ctx->rd_wq);
+ }
+ spin_unlock(&chan->pendq_lock);
+}
+
+static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
+ struct bcm_vk_wkent *entry,
+ struct _vk_data *data,
+ unsigned int num_planes)
+{
+ unsigned int i;
+ unsigned int item_cnt = 0;
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct vk_msg_blk *msg = &entry->to_v_msg[0];
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 ib_sgl_size = 0;
+ u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
+ u32 avail;
+ u32 q_num;
+
+ /* check if high watermark is hit, and if so, skip */
+ q_num = get_q_num(msg);
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+ avail = msgq_avail_space(msgq, qinfo);
+ if (avail < qinfo->q_low) {
+ dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
+ avail, qinfo->q_size);
+ return 0;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ if (data[i].address &&
+ (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
+ item_cnt++;
+ memcpy(buf, entry->dma[i].sglist, data[i].size);
+ ib_sgl_size += data[i].size;
+ buf += data[i].size;
+ }
+ }
+
+ dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
+ item_cnt, ib_sgl_size, vk->ib_sgl_size);
+
+ /* round up size */
+ ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
+ >> VK_MSGQ_BLK_SZ_SHIFT;
+
+ return ib_sgl_size;
+}
+
+void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
+{
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
+
+ vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
+}
+
+static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
+{
+ static u32 seq_num;
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct device *dev = &vk->pdev->dev;
+ struct vk_msg_blk *src = &entry->to_v_msg[0];
+
+ struct vk_msg_blk __iomem *dst;
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 q_num = get_q_num(src);
+ u32 wr_idx; /* local copy */
+ u32 i;
+ u32 avail;
+ u32 retry;
+
+ if (entry->to_v_blks != src->size + 1) {
+ dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
+ entry->to_v_blks,
+ src->size + 1,
+ get_msg_id(src),
+ src->function_id,
+ src->context_id);
+ return -EMSGSIZE;
+ }
+
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+
+ mutex_lock(&chan->msgq_mutex);
+
+ avail = msgq_avail_space(msgq, qinfo);
+
+ /* if not enough space, return EAGAIN and let app handles it */
+ retry = 0;
+ while ((avail < entry->to_v_blks) &&
+ (retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
+ mutex_unlock(&chan->msgq_mutex);
+
+ msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
+ mutex_lock(&chan->msgq_mutex);
+ avail = msgq_avail_space(msgq, qinfo);
+ }
+ if (retry > BCM_VK_H2VK_ENQ_RETRY) {
+ mutex_unlock(&chan->msgq_mutex);
+ return -EAGAIN;
+ }
+
+ /* at this point, mutex is taken and there is enough space */
+ entry->seq_num = seq_num++; /* update debug seq number */
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+
+ if (wr_idx >= qinfo->q_size) {
+ dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
+ wr_idx, qinfo->q_size);
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
+ goto idx_err;
+ }
+
+ dst = msgq_blk_addr(qinfo, wr_idx);
+ for (i = 0; i < entry->to_v_blks; i++) {
+ memcpy_toio(dst, src, sizeof(*dst));
+
+ src++;
+ wr_idx = msgq_inc(qinfo, wr_idx, 1);
+ dst = msgq_blk_addr(qinfo, wr_idx);
+ }
+
+ /* flush the write pointer */
+ writel(wr_idx, &msgq->wr_idx);
+
+ /* log new info for debugging */
+ dev_dbg(dev,
+ "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
+ readl_relaxed(&msgq->num),
+ readl_relaxed(&msgq->rd_idx),
+ wr_idx,
+ entry->to_v_blks,
+ msgq_occupied(msgq, qinfo),
+ msgq_avail_space(msgq, qinfo),
+ readl_relaxed(&msgq->size));
+ /*
+ * press door bell based on queue number. 1 is added to the wr_idx
+ * to avoid the value of 0 appearing on the VK side to distinguish
+ * from initial value.
+ */
+ bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
+idx_err:
+ mutex_unlock(&chan->msgq_mutex);
+ return 0;
+}
+
+int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
+ const pid_t pid, const u32 q_num)
+{
+ int rc = 0;
+ struct bcm_vk_wkent *entry;
+ struct device *dev = &vk->pdev->dev;
+
+ /*
+ * check if the marker is still good. Sometimes, the PCIe interface may
+ * have gone done, and if so and we ship down thing based on broken
+ * values, kernel may panic.
+ */
+ if (!bcm_vk_msgq_marker_valid(vk)) {
+ dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
+ vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
+ return -EINVAL;
+ }
+
+ entry = kzalloc(sizeof(*entry) +
+ sizeof(struct vk_msg_blk), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* fill up necessary data */
+ entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
+ set_q_num(&entry->to_v_msg[0], q_num);
+ set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
+ entry->to_v_blks = 1; /* always 1 block */
+
+ entry->to_v_msg[0].cmd = shut_type;
+ entry->to_v_msg[0].arg = pid;
+
+ rc = bcm_to_v_msg_enqueue(vk, entry);
+ if (rc)
+ dev_err(dev,
+ "Sending shutdown message to q %d for pid %d fails.\n",
+ get_q_num(&entry->to_v_msg[0]), pid);
+
+ kfree(entry);
+
+ return rc;
+}
+
+static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
+ const u32 q_num)
+{
+ int rc = 0;
+ struct device *dev = &vk->pdev->dev;
+
+ /*
+ * don't send down or do anything if message queue is not initialized
+ * and if it is the reset session, clear it.
+ */
+ if (!bcm_vk_drv_access_ok(vk)) {
+ if (vk->reset_pid == pid)
+ vk->reset_pid = 0;
+ return -EPERM;
+ }
+
+ dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
+
+ /* only need to do it if it is not the reset process */
+ if (vk->reset_pid != pid)
+ rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
+ else
+ /* put reset_pid to 0 if it is exiting last session */
+ vk->reset_pid = 0;
+
+ return rc;
+}
+
+static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
+ struct bcm_vk_msg_chan *chan,
+ u16 q_num,
+ u16 msg_id)
+{
+ bool found = false;
+ struct bcm_vk_wkent *entry;
+
+ spin_lock(&chan->pendq_lock);
+ list_for_each_entry(entry, &chan->pendq[q_num], node) {
+ if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
+ list_del(&entry->node);
+ found = true;
+ bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
+ break;
+ }
+ }
+ spin_unlock(&chan->pendq_lock);
+ return ((found) ? entry : NULL);
+}
+
+s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
+ struct vk_msg_blk *data;
+ struct vk_msg_blk __iomem *src;
+ struct vk_msg_blk *dst;
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ struct bcm_vk_wkent *entry;
+ u32 rd_idx, wr_idx;
+ u32 q_num, msg_id, j;
+ u32 num_blks;
+ s32 total = 0;
+ int cnt = 0;
+ int msg_processed = 0;
+ int max_msg_to_process;
+ bool exit_loop;
+
+ /*
+ * drain all the messages from the queues, and find its pending
+ * entry in the to_v queue, based on msg_id & q_num, and move the
+ * entry to the to_h pending queue, waiting for user space
+ * program to extract
+ */
+ mutex_lock(&chan->msgq_mutex);
+
+ for (q_num = 0; q_num < chan->q_nr; q_num++) {
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+ max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
+
+ rd_idx = readl_relaxed(&msgq->rd_idx);
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+ msg_processed = 0;
+ exit_loop = false;
+ while ((rd_idx != wr_idx) && !exit_loop) {
+ u8 src_size;
+
+ /*
+ * Make a local copy and get pointer to src blk
+ * The rd_idx is masked before getting the pointer to
+ * avoid out of bound access in case the interface goes
+ * down. It will end up pointing to the last block in
+ * the buffer, but subsequent src->size check would be
+ * able to catch this.
+ */
+ src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
+ src_size = readb(&src->size);
+
+ if ((rd_idx >= qinfo->q_size) ||
+ (src_size > (qinfo->q_size - 1))) {
+ dev_crit(dev,
+ "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
+ rd_idx, src_size, qinfo->q_size);
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk,
+ ERR_LOG_HOST_PCIE_DWN);
+ goto idx_err;
+ }
+
+ num_blks = src_size + 1;
+ data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
+ if (data) {
+ /* copy messages and linearize it */
+ dst = data;
+ for (j = 0; j < num_blks; j++) {
+ memcpy_fromio(dst, src, sizeof(*dst));
+
+ dst++;
+ rd_idx = msgq_inc(qinfo, rd_idx, 1);
+ src = msgq_blk_addr(qinfo, rd_idx);
+ }
+ total++;
+ } else {
+ /*
+ * if we could not allocate memory in kernel,
+ * that is fatal.
+ */
+ dev_crit(dev, "Kernel mem allocation failure.\n");
+ total = -ENOMEM;
+ goto idx_err;
+ }
+
+ /* flush rd pointer after a message is dequeued */
+ writel(rd_idx, &msgq->rd_idx);
+
+ /* log new info for debugging */
+ dev_dbg(dev,
+ "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
+ readl_relaxed(&msgq->num),
+ rd_idx,
+ wr_idx,
+ num_blks,
+ msgq_occupied(msgq, qinfo),
+ msgq_avail_space(msgq, qinfo),
+ readl_relaxed(&msgq->size));
+
+ /*
+ * No need to search if it is an autonomous one-way
+ * message from driver, as these messages do not bear
+ * a to_v pending item. Currently, only the shutdown
+ * message falls into this category.
+ */
+ if (data->function_id == VK_FID_SHUTDOWN) {
+ kfree(data);
+ continue;
+ }
+
+ msg_id = get_msg_id(data);
+ /* lookup original message in to_v direction */
+ entry = bcm_vk_dequeue_pending(vk,
+ &vk->to_v_msg_chan,
+ q_num,
+ msg_id);
+
+ /*
+ * if there is message to does not have prior send,
+ * this is the location to add here
+ */
+ if (entry) {
+ entry->to_h_blks = num_blks;
+ entry->to_h_msg = data;
+ bcm_vk_append_pendq(&vk->to_h_msg_chan,
+ q_num, entry);
+
+ } else {
+ if (cnt++ < batch_log)
+ dev_info(dev,
+ "Could not find MsgId[0x%x] for resp func %d bmap %d\n",
+ msg_id, data->function_id,
+ test_bit(msg_id, vk->bmap));
+ kfree(data);
+ }
+ /* Fetch wr_idx to handle more back-to-back events */
+ wr_idx = readl(&msgq->wr_idx);
+
+ /*
+ * cap the max so that even we try to handle more back-to-back events,
+ * so that it won't hold CPU too long or in case rd/wr idexes are
+ * corrupted which triggers infinite looping.
+ */
+ if (++msg_processed >= max_msg_to_process) {
+ dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
+ q_num, max_msg_to_process);
+ exit_loop = true;
+ }
+ }
+ }
+idx_err:
+ mutex_unlock(&chan->msgq_mutex);
+ dev_dbg(dev, "total %d drained from queues\n", total);
+
+ return total;
+}
+
+/*
+ * init routine for all required data structures
+ */
+static int bcm_vk_data_init(struct bcm_vk *vk)
+{
+ int i;
+
+ spin_lock_init(&vk->ctx_lock);
+ for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
+ vk->ctx[i].in_use = false;
+ vk->ctx[i].idx = i; /* self identity */
+ vk->ctx[i].miscdev = NULL;
+ }
+ spin_lock_init(&vk->msg_id_lock);
+ spin_lock_init(&vk->host_alert_lock);
+ vk->msg_id = 0;
+
+ /* initialize hash table */
+ for (i = 0; i < VK_PID_HT_SZ; i++)
+ INIT_LIST_HEAD(&vk->pid_ht[i].head);
+
+ return 0;
+}
+
+irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ if (!bcm_vk_drv_access_ok(vk)) {
+ dev_err(&vk->pdev->dev,
+ "Interrupt %d received when msgq not inited\n", irq);
+ goto skip_schedule_work;
+ }
+
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+skip_schedule_work:
+ return IRQ_HANDLED;
+}
+
+int bcm_vk_open(struct inode *inode, struct file *p_file)
+{
+ struct bcm_vk_ctx *ctx;
+ struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
+ struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+ int rc = 0;
+
+ /* get a context and set it up for file */
+ ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
+ if (!ctx) {
+ dev_err(dev, "Error allocating context\n");
+ rc = -ENOMEM;
+ } else {
+ /*
+ * set up context and replace private data with context for
+ * other methods to use. Reason for the context is because
+ * it is allowed for multiple sessions to open the sysfs, and
+ * for each file open, when upper layer query the response,
+ * only those that are tied to a specific open should be
+ * returned. The context->idx will be used for such binding
+ */
+ ctx->miscdev = miscdev;
+ p_file->private_data = ctx;
+ dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
+ ctx->idx, ctx->pid);
+ }
+ return rc;
+}
+
+ssize_t bcm_vk_read(struct file *p_file,
+ char __user *buf,
+ size_t count,
+ loff_t *f_pos)
+{
+ ssize_t rc = -ENOMSG;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
+ miscdev);
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
+ struct bcm_vk_wkent *entry = NULL;
+ u32 q_num;
+ u32 rsp_length;
+ bool found = false;
+
+ if (!bcm_vk_drv_access_ok(vk))
+ return -EPERM;
+
+ dev_dbg(dev, "Buf count %zu\n", count);
+ found = false;
+
+ /*
+ * search through the pendq on the to_h chan, and return only those
+ * that belongs to the same context. Search is always from the high to
+ * the low priority queues
+ */
+ spin_lock(&chan->pendq_lock);
+ for (q_num = 0; q_num < chan->q_nr; q_num++) {
+ list_for_each_entry(entry, &chan->pendq[q_num], node) {
+ if (entry->ctx->idx == ctx->idx) {
+ if (count >=
+ (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
+ list_del(&entry->node);
+ atomic_dec(&ctx->pend_cnt);
+ found = true;
+ } else {
+ /* buffer not big enough */
+ rc = -EMSGSIZE;
+ }
+ goto read_loop_exit;
+ }
+ }
+ }
+read_loop_exit:
+ spin_unlock(&chan->pendq_lock);
+
+ if (found) {
+ /* retrieve the passed down msg_id */
+ set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
+ rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
+ if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
+ rc = rsp_length;
+
+ bcm_vk_free_wkent(dev, entry);
+ } else if (rc == -EMSGSIZE) {
+ struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
+
+ /*
+ * in this case, return just the first block, so
+ * that app knows what size it is looking for.
+ */
+ set_msg_id(&tmp_msg, entry->usr_msg_id);
+ tmp_msg.size = entry->to_h_blks - 1;
+ if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
+ dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+
+ssize_t bcm_vk_write(struct file *p_file,
+ const char __user *buf,
+ size_t count,
+ loff_t *f_pos)
+{
+ ssize_t rc;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
+ miscdev);
+ struct bcm_vk_msgq __iomem *msgq;
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_wkent *entry;
+ u32 sgl_extra_blks;
+ u32 q_num;
+ u32 msg_size;
+ u32 msgq_size;
+
+ if (!bcm_vk_drv_access_ok(vk))
+ return -EPERM;
+
+ dev_dbg(dev, "Msg count %zu\n", count);
+
+ /* first, do sanity check where count should be multiple of basic blk */
+ if (count & (VK_MSGQ_BLK_SIZE - 1)) {
+ dev_err(dev, "Failure with size %zu not multiple of %zu\n",
+ count, VK_MSGQ_BLK_SIZE);
+ rc = -EINVAL;
+ goto write_err;
+ }
+
+ /* allocate the work entry + buffer for size count and inband sgl */
+ entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
+ GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto write_err;
+ }
+
+ /* now copy msg from user space, and then formulate the work entry */
+ if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
+ rc = -EFAULT;
+ goto write_free_ent;
+ }
+
+ entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
+ entry->ctx = ctx;
+
+ /* do a check on the blk size which could not exceed queue space */
+ q_num = get_q_num(&entry->to_v_msg[0]);
+ msgq = vk->to_v_msg_chan.msgq[q_num];
+ msgq_size = readl_relaxed(&msgq->size);
+ if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
+ > (msgq_size - 1)) {
+ dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
+ entry->to_v_blks, msgq_size - 1);
+ rc = -EINVAL;
+ goto write_free_ent;
+ }
+
+ /* Use internal message id */
+ entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
+ rc = bcm_vk_get_msg_id(vk);
+ if (rc == VK_MSG_ID_OVERFLOW) {
+ dev_err(dev, "msg_id overflow\n");
+ rc = -EOVERFLOW;
+ goto write_free_ent;
+ }
+ set_msg_id(&entry->to_v_msg[0], rc);
+ ctx->q_num = q_num;
+
+ dev_dbg(dev,
+ "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
+ ctx->q_num, ctx->idx, entry->usr_msg_id,
+ get_msg_id(&entry->to_v_msg[0]));
+
+ if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
+ /* Convert any pointers to sg list */
+ unsigned int num_planes;
+ int dir;
+ struct _vk_data *data;
+
+ /*
+ * check if we are in reset, if so, no buffer transfer is
+ * allowed and return error.
+ */
+ if (vk->reset_pid) {
+ dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
+ ctx->pid);
+ rc = -EACCES;
+ goto write_free_msgid;
+ }
+
+ num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
+ if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+
+ /* Calculate vk_data location */
+ /* Go to end of the message */
+ msg_size = entry->to_v_msg[0].size;
+ if (msg_size > entry->to_v_blks) {
+ rc = -EMSGSIZE;
+ goto write_free_msgid;
+ }
+
+ data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
+
+ /* Now back up to the start of the pointers */
+ data -= num_planes;
+
+ /* Convert user addresses to DMA SG List */
+ rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
+ if (rc)
+ goto write_free_msgid;
+
+ atomic_inc(&ctx->dma_cnt);
+ /* try to embed inband sgl */
+ sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
+ num_planes);
+ entry->to_v_blks += sgl_extra_blks;
+ entry->to_v_msg[0].size += sgl_extra_blks;
+ } else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
+ entry->to_v_msg[0].context_id == VK_NEW_CTX) {
+ /*
+ * Init happens in 2 stages, only the first stage contains the
+ * pid that needs translating.
+ */
+ pid_t org_pid, pid;
+
+ /*
+ * translate the pid into the unique host space as user
+ * may run sessions inside containers or process
+ * namespaces.
+ */
+#define VK_MSG_PID_MASK 0xffffff00
+#define VK_MSG_PID_SH 8
+ org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
+ >> VK_MSG_PID_SH;
+
+ pid = task_tgid_nr(current);
+ entry->to_v_msg[0].arg =
+ (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
+ (pid << VK_MSG_PID_SH);
+ if (org_pid != pid)
+ dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
+ org_pid, org_pid, pid, pid);
+ }
+
+ /*
+ * store work entry to pending queue until a response is received.
+ * This needs to be done before enqueuing the message
+ */
+ bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
+
+ rc = bcm_to_v_msg_enqueue(vk, entry);
+ if (rc) {
+ dev_err(dev, "Fail to enqueue msg to to_v queue\n");
+
+ /* remove message from pending list */
+ entry = bcm_vk_dequeue_pending
+ (vk,
+ &vk->to_v_msg_chan,
+ q_num,
+ get_msg_id(&entry->to_v_msg[0]));
+ goto write_free_ent;
+ }
+
+ return count;
+
+write_free_msgid:
+ bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
+write_free_ent:
+ kfree(entry);
+write_err:
+ return rc;
+}
+
+__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
+{
+ __poll_t ret = 0;
+ int cnt;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+
+ poll_wait(p_file, &ctx->rd_wq, wait);
+
+ cnt = atomic_read(&ctx->pend_cnt);
+ if (cnt) {
+ ret = (__force __poll_t)(POLLIN | POLLRDNORM);
+ if (cnt < 0) {
+ dev_err(dev, "Error cnt %d, setting back to 0", cnt);
+ atomic_set(&ctx->pend_cnt, 0);
+ }
+ }
+
+ return ret;
+}
+
+int bcm_vk_release(struct inode *inode, struct file *p_file)
+{
+ int ret;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+ pid_t pid = ctx->pid;
+ int dma_cnt;
+ unsigned long timeout, start_time;
+
+ /*
+ * if there are outstanding DMA transactions, need to delay long enough
+ * to ensure that the card side would have stopped touching the host buffer
+ * and its SGL list. A race condition could happen if the host app is killed
+ * abruptly, eg kill -9, while some DMA transfer orders are still inflight.
+ * Nothing could be done except for a delay as host side is running in a
+ * completely async fashion.
+ */
+ start_time = jiffies;
+ timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
+ dma_cnt, ctx->idx, pid);
+ break;
+ }
+ dma_cnt = atomic_read(&ctx->dma_cnt);
+ cpu_relax();
+ cond_resched();
+ } while (dma_cnt);
+ dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
+ ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
+
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
+
+ ret = bcm_vk_free_ctx(vk, ctx);
+ if (ret == 0)
+ ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
+ else
+ ret = 0;
+
+ kref_put(&vk->kref, bcm_vk_release_data);
+
+ return ret;
+}
+
+int bcm_vk_msg_init(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ int ret;
+
+ if (bcm_vk_data_init(vk)) {
+ dev_err(dev, "Error initializing internal data structures\n");
+ return -EINVAL;
+ }
+
+ if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
+ bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
+ dev_err(dev, "Error initializing communication channel\n");
+ return -EIO;
+ }
+
+ /* read msgq info if ready */
+ ret = bcm_vk_sync_msgq(vk, false);
+ if (ret && (ret != -EAGAIN)) {
+ dev_err(dev, "Error reading comm msg Q info\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void bcm_vk_msg_remove(struct bcm_vk *vk)
+{
+ bcm_vk_blk_drv_access(vk);
+
+ /* drain all pending items */
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
+}
+
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h
new file mode 100644
index 000000000000..4eaad84825d6
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_MSG_H
+#define BCM_VK_MSG_H
+
+#include <uapi/linux/misc/bcm_vk.h>
+#include "bcm_vk_sg.h"
+
+/* Single message queue control structure */
+struct bcm_vk_msgq {
+ u16 type; /* queue type */
+ u16 num; /* queue number */
+ u32 start; /* offset in BAR1 where the queue memory starts */
+
+ u32 rd_idx; /* read idx */
+ u32 wr_idx; /* write idx */
+
+ u32 size; /*
+ * size, which is in number of 16byte blocks,
+ * to align with the message data structure.
+ */
+ u32 nxt; /*
+ * nxt offset to the next msg queue struct.
+ * This is to provide flexibity for alignment purposes.
+ */
+
+/* Least significant 16 bits in below field hold doorbell register offset */
+#define DB_SHIFT 16
+
+ u32 db_offset; /* queue doorbell register offset in BAR0 */
+
+ u32 rsvd;
+};
+
+/*
+ * Structure to record static info from the msgq sync. We keep local copy
+ * for some of these variables for both performance + checking purpose.
+ */
+struct bcm_vk_sync_qinfo {
+ void __iomem *q_start;
+ u32 q_size;
+ u32 q_mask;
+ u32 q_low;
+ u32 q_db_offset;
+};
+
+#define VK_MSGQ_MAX_NR 4 /* Maximum number of message queues */
+
+/*
+ * message block - basic unit in the message where a message's size is always
+ * N x sizeof(basic_block)
+ */
+struct vk_msg_blk {
+ u8 function_id;
+#define VK_FID_TRANS_BUF 5
+#define VK_FID_SHUTDOWN 8
+#define VK_FID_INIT 9
+ u8 size; /* size of the message in number of vk_msg_blk's */
+ u16 trans_id; /* transport id, queue & msg_id */
+ u32 context_id;
+#define VK_NEW_CTX 0
+ u32 cmd;
+#define VK_CMD_PLANES_MASK 0x000f /* number of planes to up/download */
+#define VK_CMD_UPLOAD 0x0400 /* memory transfer to vk */
+#define VK_CMD_DOWNLOAD 0x0500 /* memory transfer from vk */
+#define VK_CMD_MASK 0x0f00 /* command mask */
+ u32 arg;
+};
+
+/* vk_msg_blk is 16 bytes fixed */
+#define VK_MSGQ_BLK_SIZE (sizeof(struct vk_msg_blk))
+/* shift for fast division of basic msg blk size */
+#define VK_MSGQ_BLK_SZ_SHIFT 4
+
+/* use msg_id 0 for any simplex host2vk communication */
+#define VK_SIMPLEX_MSG_ID 0
+
+/* context per session opening of sysfs */
+struct bcm_vk_ctx {
+ struct list_head node; /* use for linkage in Hash Table */
+ unsigned int idx;
+ bool in_use;
+ pid_t pid;
+ u32 hash_idx;
+ u32 q_num; /* queue number used by the stream */
+ struct miscdevice *miscdev;
+ atomic_t pend_cnt; /* number of items pending to be read from host */
+ atomic_t dma_cnt; /* any dma transaction outstanding */
+ wait_queue_head_t rd_wq;
+};
+
+/* pid hash table entry */
+struct bcm_vk_ht_entry {
+ struct list_head head;
+};
+
+#define VK_DMA_MAX_ADDRS 4 /* Max 4 DMA Addresses */
+/* structure for house keeping a single work entry */
+struct bcm_vk_wkent {
+ struct list_head node; /* for linking purpose */
+ struct bcm_vk_ctx *ctx;
+
+ /* Store up to 4 dma pointers */
+ struct bcm_vk_dma dma[VK_DMA_MAX_ADDRS];
+
+ u32 to_h_blks; /* response */
+ struct vk_msg_blk *to_h_msg;
+
+ /*
+ * put the to_v_msg at the end so that we could simply append to_v msg
+ * to the end of the allocated block
+ */
+ u32 usr_msg_id;
+ u32 to_v_blks;
+ u32 seq_num;
+ struct vk_msg_blk to_v_msg[0];
+};
+
+/* queue stats counters */
+struct bcm_vk_qs_cnts {
+ u32 cnt; /* general counter, used to limit output */
+ u32 acc_sum;
+ u32 max_occ; /* max during a sampling period */
+ u32 max_abs; /* the abs max since reset */
+};
+
+/* control channel structure for either to_v or to_h communication */
+struct bcm_vk_msg_chan {
+ u32 q_nr;
+ /* Mutex to access msgq */
+ struct mutex msgq_mutex;
+ /* pointing to BAR locations */
+ struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR];
+ /* Spinlock to access pending queue */
+ spinlock_t pendq_lock;
+ /* for temporary storing pending items, one for each queue */
+ struct list_head pendq[VK_MSGQ_MAX_NR];
+ /* static queue info from the sync */
+ struct bcm_vk_sync_qinfo sync_qinfo[VK_MSGQ_MAX_NR];
+};
+
+/* totol number of message q allowed by the driver */
+#define VK_MSGQ_PER_CHAN_MAX 3
+#define VK_MSGQ_NUM_DEFAULT (VK_MSGQ_PER_CHAN_MAX - 1)
+
+/* total number of supported ctx, 32 ctx each for 5 components */
+#define VK_CMPT_CTX_MAX (32 * 5)
+
+/* hash table defines to store the opened FDs */
+#define VK_PID_HT_SHIFT_BIT 7 /* 128 */
+#define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT)
+
+/* The following are offsets of DDR info provided by the vk card */
+#define VK_BAR0_SEG_SIZE (4 * SZ_1K) /* segment size for BAR0 */
+
+/* shutdown types supported */
+#define VK_SHUTDOWN_PID 1
+#define VK_SHUTDOWN_GRACEFUL 2
+
+#endif
diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.c b/drivers/misc/bcm-vk/bcm_vk_sg.c
new file mode 100644
index 000000000000..2e9daaf3e492
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_sg.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/pgtable.h>
+#include <linux/vmalloc.h>
+
+#include <asm/page.h>
+#include <asm/unaligned.h>
+
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk.h"
+#include "bcm_vk_msg.h"
+#include "bcm_vk_sg.h"
+
+/*
+ * Valkyrie has a hardware limitation of 16M transfer size.
+ * So limit the SGL chunks to 16M.
+ */
+#define BCM_VK_MAX_SGL_CHUNK SZ_16M
+
+static int bcm_vk_dma_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata);
+static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
+
+/* Uncomment to dump SGLIST */
+/* #define BCM_VK_DUMP_SGLIST */
+
+static int bcm_vk_dma_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int direction,
+ struct _vk_data *vkdata)
+{
+ dma_addr_t addr, sg_addr;
+ int err;
+ int i;
+ int offset;
+ u32 size;
+ u32 remaining_size;
+ u32 transfer_size;
+ u64 data;
+ unsigned long first, last;
+ struct _vk_data *sgdata;
+
+ /* Get 64-bit user address */
+ data = get_unaligned(&vkdata->address);
+
+ /* offset into first page */
+ offset = offset_in_page(data);
+
+ /* Calculate number of pages */
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ dma->nr_pages = last - first + 1;
+
+ /* Allocate DMA pages */
+ dma->pages = kmalloc_array(dma->nr_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!dma->pages)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
+ data, vkdata->size, dma->nr_pages);
+
+ dma->direction = direction;
+
+ /* Get user pages into memory */
+ err = get_user_pages_fast(data & PAGE_MASK,
+ dma->nr_pages,
+ direction == DMA_FROM_DEVICE,
+ dma->pages);
+ if (err != dma->nr_pages) {
+ dma->nr_pages = (err >= 0) ? err : 0;
+ dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
+ err, dma->nr_pages);
+ return err < 0 ? err : -EINVAL;
+ }
+
+ /* Max size of sg list is 1 per mapped page + fields at start */
+ dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
+ (sizeof(u32) * SGLIST_VKDATA_START);
+
+ /* Allocate sglist */
+ dma->sglist = dma_alloc_coherent(dev,
+ dma->sglen,
+ &dma->handle,
+ GFP_KERNEL);
+ if (!dma->sglist)
+ return -ENOMEM;
+
+ dma->sglist[SGLIST_NUM_SG] = 0;
+ dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
+ remaining_size = vkdata->size;
+ sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
+
+ /* Map all pages into DMA */
+ size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
+ remaining_size -= size;
+ sg_addr = dma_map_page(dev,
+ dma->pages[0],
+ offset,
+ size,
+ dma->direction);
+ transfer_size = size;
+ if (unlikely(dma_mapping_error(dev, sg_addr))) {
+ __free_page(dma->pages[0]);
+ return -EIO;
+ }
+
+ for (i = 1; i < dma->nr_pages; i++) {
+ size = min_t(size_t, PAGE_SIZE, remaining_size);
+ remaining_size -= size;
+ addr = dma_map_page(dev,
+ dma->pages[i],
+ 0,
+ size,
+ dma->direction);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ __free_page(dma->pages[i]);
+ return -EIO;
+ }
+
+ /*
+ * Compress SG list entry when pages are contiguous
+ * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
+ */
+ if ((addr == (sg_addr + transfer_size)) &&
+ ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
+ /* pages are contiguous, add to same sg entry */
+ transfer_size += size;
+ } else {
+ /* pages are not contiguous, write sg entry */
+ sgdata->size = transfer_size;
+ put_unaligned(sg_addr, (u64 *)&sgdata->address);
+ dma->sglist[SGLIST_NUM_SG]++;
+
+ /* start new sg entry */
+ sgdata++;
+ sg_addr = addr;
+ transfer_size = size;
+ }
+ }
+ /* Write last sg list entry */
+ sgdata->size = transfer_size;
+ put_unaligned(sg_addr, (u64 *)&sgdata->address);
+ dma->sglist[SGLIST_NUM_SG]++;
+
+ /* Update pointers and size field to point to sglist */
+ put_unaligned((u64)dma->handle, &vkdata->address);
+ vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
+ (sizeof(u32) * SGLIST_VKDATA_START);
+
+#ifdef BCM_VK_DUMP_SGLIST
+ dev_dbg(dev,
+ "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
+ (u64)dma->sglist,
+ dma->handle,
+ dma->sglen,
+ vkdata->size);
+ for (i = 0; i < vkdata->size / sizeof(u32); i++)
+ dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
+#endif
+
+ return 0;
+}
+
+int bcm_vk_sg_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata,
+ int num)
+{
+ int i;
+ int rc = -EINVAL;
+
+ /* Convert user addresses to DMA SG List */
+ for (i = 0; i < num; i++) {
+ if (vkdata[i].size && vkdata[i].address) {
+ /*
+ * If both size and address are non-zero
+ * then DMA alloc.
+ */
+ rc = bcm_vk_dma_alloc(dev,
+ &dma[i],
+ dir,
+ &vkdata[i]);
+ } else if (vkdata[i].size ||
+ vkdata[i].address) {
+ /*
+ * If one of size and address are zero
+ * there is a problem.
+ */
+ dev_err(dev,
+ "Invalid vkdata %x 0x%x 0x%llx\n",
+ i, vkdata[i].size, vkdata[i].address);
+ rc = -EINVAL;
+ } else {
+ /*
+ * If size and address are both zero
+ * don't convert, but return success.
+ */
+ rc = 0;
+ }
+
+ if (rc)
+ goto fail_alloc;
+ }
+ return rc;
+
+fail_alloc:
+ while (i > 0) {
+ i--;
+ if (dma[i].sglist)
+ bcm_vk_dma_free(dev, &dma[i]);
+ }
+ return rc;
+}
+
+static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
+{
+ dma_addr_t addr;
+ int i;
+ int num_sg;
+ u32 size;
+ struct _vk_data *vkdata;
+
+ dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
+
+ /* Unmap all pages in the sglist */
+ num_sg = dma->sglist[SGLIST_NUM_SG];
+ vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
+ for (i = 0; i < num_sg; i++) {
+ size = vkdata[i].size;
+ addr = get_unaligned(&vkdata[i].address);
+
+ dma_unmap_page(dev, addr, size, dma->direction);
+ }
+
+ /* Free allocated sglist */
+ dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
+
+ /* Release lock on all pages */
+ for (i = 0; i < dma->nr_pages; i++)
+ put_page(dma->pages[i]);
+
+ /* Free allocated dma pages */
+ kfree(dma->pages);
+ dma->sglist = NULL;
+
+ return 0;
+}
+
+int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
+ int *proc_cnt)
+{
+ int i;
+
+ *proc_cnt = 0;
+ /* Unmap and free all pages and sglists */
+ for (i = 0; i < num; i++) {
+ if (dma[i].sglist) {
+ bcm_vk_dma_free(dev, &dma[i]);
+ *proc_cnt += 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.h b/drivers/misc/bcm-vk/bcm_vk_sg.h
new file mode 100644
index 000000000000..81b3d0976ddb
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_sg.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_SG_H
+#define BCM_VK_SG_H
+
+#include <linux/dma-mapping.h>
+
+struct bcm_vk_dma {
+ /* for userland buffer */
+ struct page **pages;
+ int nr_pages;
+
+ /* common */
+ dma_addr_t handle;
+ /*
+ * sglist is of the following LE format
+ * [U32] num_sg = number of sg addresses (N)
+ * [U32] totalsize = totalsize of data being transferred in sglist
+ * [U32] size[0] = size of data in address0
+ * [U32] addr_l[0] = lower 32-bits of address0
+ * [U32] addr_h[0] = higher 32-bits of address0
+ * ..
+ * [U32] size[N-1] = size of data in addressN-1
+ * [U32] addr_l[N-1] = lower 32-bits of addressN-1
+ * [U32] addr_h[N-1] = higher 32-bits of addressN-1
+ */
+ u32 *sglist;
+#define SGLIST_NUM_SG 0
+#define SGLIST_TOTALSIZE 1
+#define SGLIST_VKDATA_START 2
+
+ int sglen; /* Length (bytes) of sglist */
+ int direction;
+};
+
+struct _vk_data {
+ u32 size; /* data size in bytes */
+ u64 address; /* Pointer to data */
+} __packed;
+
+/*
+ * Scatter-gather DMA buffer API.
+ *
+ * These functions provide a simple way to create a page list and a
+ * scatter-gather list from userspace address and map the memory
+ * for DMA operation.
+ */
+int bcm_vk_sg_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata,
+ int num);
+
+int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
+ int *proc_cnt);
+
+#endif
+
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
new file mode 100644
index 000000000000..4d02692ecfc7
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include "bcm_vk.h"
+
+/* TTYVK base offset is 0x30000 into BAR1 */
+#define BAR1_TTYVK_BASE_OFFSET 0x300000
+/* Each TTYVK channel (TO or FROM) is 0x10000 */
+#define BAR1_TTYVK_CHAN_OFFSET 0x100000
+/* Each TTYVK channel has TO and FROM, hence the * 2 */
+#define BAR1_TTYVK_BASE(index) (BAR1_TTYVK_BASE_OFFSET + \
+ ((index) * BAR1_TTYVK_CHAN_OFFSET * 2))
+/* TO TTYVK channel base comes before FROM for each index */
+#define TO_TTYK_BASE(index) BAR1_TTYVK_BASE(index)
+#define FROM_TTYK_BASE(index) (BAR1_TTYVK_BASE(index) + \
+ BAR1_TTYVK_CHAN_OFFSET)
+
+struct bcm_vk_tty_chan {
+ u32 reserved;
+ u32 size;
+ u32 wr;
+ u32 rd;
+ u32 *data;
+};
+
+#define VK_BAR_CHAN(v, DIR, e) ((v)->DIR##_offset \
+ + offsetof(struct bcm_vk_tty_chan, e))
+#define VK_BAR_CHAN_SIZE(v, DIR) VK_BAR_CHAN(v, DIR, size)
+#define VK_BAR_CHAN_WR(v, DIR) VK_BAR_CHAN(v, DIR, wr)
+#define VK_BAR_CHAN_RD(v, DIR) VK_BAR_CHAN(v, DIR, rd)
+#define VK_BAR_CHAN_DATA(v, DIR, off) (VK_BAR_CHAN(v, DIR, data) + (off))
+
+#define VK_BAR0_REGSEG_TTY_DB_OFFSET 0x86c
+
+/* Poll every 1/10 of second - temp hack till we use MSI interrupt */
+#define SERIAL_TIMER_VALUE (HZ / 10)
+
+static void bcm_vk_tty_poll(struct timer_list *t)
+{
+ struct bcm_vk *vk = from_timer(vk, t, serial_timer);
+
+ queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
+ mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
+}
+
+irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void bcm_vk_tty_wq_handler(struct work_struct *work)
+{
+ struct bcm_vk *vk = container_of(work, struct bcm_vk, tty_wq_work);
+ struct bcm_vk_tty *vktty;
+ int card_status;
+ int count;
+ unsigned char c;
+ int i;
+ int wr;
+
+ card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
+ if (BCM_VK_INTF_IS_DOWN(card_status))
+ return;
+
+ for (i = 0; i < BCM_VK_NUM_TTY; i++) {
+ count = 0;
+ /* Check the card status that the tty channel is ready */
+ if ((card_status & BIT(i)) == 0)
+ continue;
+
+ vktty = &vk->tty[i];
+
+ /* Don't increment read index if tty app is closed */
+ if (!vktty->is_opened)
+ continue;
+
+ /* Fetch the wr offset in buffer from VK */
+ wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, from));
+
+ /* safe to ignore until bar read gives proper size */
+ if (vktty->from_size == 0)
+ continue;
+
+ if (wr >= vktty->from_size) {
+ dev_err(&vk->pdev->dev,
+ "ERROR: wq handler ttyVK%d wr:0x%x > 0x%x\n",
+ i, wr, vktty->from_size);
+ /* Need to signal and close device in this case */
+ continue;
+ }
+
+ /*
+ * Simple read of circular buffer and
+ * insert into tty flip buffer
+ */
+ while (vk->tty[i].rd != wr) {
+ c = vkread8(vk, BAR_1,
+ VK_BAR_CHAN_DATA(vktty, from, vktty->rd));
+ vktty->rd++;
+ if (vktty->rd >= vktty->from_size)
+ vktty->rd = 0;
+ tty_insert_flip_char(&vktty->port, c, TTY_NORMAL);
+ count++;
+ }
+
+ if (count) {
+ tty_flip_buffer_push(&vktty->port);
+
+ /* Update read offset from shadow register to card */
+ vkwrite32(vk, vktty->rd, BAR_1,
+ VK_BAR_CHAN_RD(vktty, from));
+ }
+ }
+}
+
+static int bcm_vk_tty_open(struct tty_struct *tty, struct file *file)
+{
+ int card_status;
+ struct bcm_vk *vk;
+ struct bcm_vk_tty *vktty;
+ int index;
+
+ /* initialize the pointer in case something fails */
+ tty->driver_data = NULL;
+
+ vk = (struct bcm_vk *)dev_get_drvdata(tty->dev);
+ index = tty->index;
+
+ if (index >= BCM_VK_NUM_TTY)
+ return -EINVAL;
+
+ vktty = &vk->tty[index];
+
+ vktty->pid = task_pid_nr(current);
+ vktty->to_offset = TO_TTYK_BASE(index);
+ vktty->from_offset = FROM_TTYK_BASE(index);
+
+ /* Do not allow tty device to be opened if tty on card not ready */
+ card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
+ if (BCM_VK_INTF_IS_DOWN(card_status) || ((card_status & BIT(index)) == 0))
+ return -EBUSY;
+
+ /*
+ * Get shadow registers of the buffer sizes and the "to" write offset
+ * and "from" read offset
+ */
+ vktty->to_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, to));
+ vktty->wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, to));
+ vktty->from_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, from));
+ vktty->rd = vkread32(vk, BAR_1, VK_BAR_CHAN_RD(vktty, from));
+ vktty->is_opened = true;
+
+ if (tty->count == 1 && !vktty->irq_enabled) {
+ timer_setup(&vk->serial_timer, bcm_vk_tty_poll, 0);
+ mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ }
+ return 0;
+}
+
+static void bcm_vk_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct bcm_vk *vk = dev_get_drvdata(tty->dev);
+
+ if (tty->index >= BCM_VK_NUM_TTY)
+ return;
+
+ vk->tty[tty->index].is_opened = false;
+
+ if (tty->count == 1)
+ del_timer_sync(&vk->serial_timer);
+}
+
+static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val)
+{
+ vkwrite32(vk, db_val, BAR_0,
+ VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET);
+}
+
+static int bcm_vk_tty_write(struct tty_struct *tty,
+ const unsigned char *buffer,
+ int count)
+{
+ int index;
+ struct bcm_vk *vk;
+ struct bcm_vk_tty *vktty;
+ int i;
+
+ index = tty->index;
+ vk = dev_get_drvdata(tty->dev);
+ vktty = &vk->tty[index];
+
+ /* Simple write each byte to circular buffer */
+ for (i = 0; i < count; i++) {
+ vkwrite8(vk, buffer[i], BAR_1,
+ VK_BAR_CHAN_DATA(vktty, to, vktty->wr));
+ vktty->wr++;
+ if (vktty->wr >= vktty->to_size)
+ vktty->wr = 0;
+ }
+ /* Update write offset from shadow register to card */
+ vkwrite32(vk, vktty->wr, BAR_1, VK_BAR_CHAN_WR(vktty, to));
+ bcm_vk_tty_doorbell(vk, 0);
+
+ return count;
+}
+
+static int bcm_vk_tty_write_room(struct tty_struct *tty)
+{
+ struct bcm_vk *vk = dev_get_drvdata(tty->dev);
+
+ return vk->tty[tty->index].to_size - 1;
+}
+
+static const struct tty_operations serial_ops = {
+ .open = bcm_vk_tty_open,
+ .close = bcm_vk_tty_close,
+ .write = bcm_vk_tty_write,
+ .write_room = bcm_vk_tty_write_room,
+};
+
+int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
+{
+ int i;
+ int err;
+ struct tty_driver *tty_drv;
+ struct device *dev = &vk->pdev->dev;
+
+ tty_drv = tty_alloc_driver
+ (BCM_VK_NUM_TTY,
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(tty_drv))
+ return PTR_ERR(tty_drv);
+
+ /* Save struct tty_driver for uninstalling the device */
+ vk->tty_drv = tty_drv;
+
+ /* initialize the tty driver */
+ tty_drv->driver_name = KBUILD_MODNAME;
+ tty_drv->name = kstrdup(name, GFP_KERNEL);
+ if (!tty_drv->name) {
+ err = -ENOMEM;
+ goto err_put_tty_driver;
+ }
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->init_termios = tty_std_termios;
+ tty_set_operations(tty_drv, &serial_ops);
+
+ /* register the tty driver */
+ err = tty_register_driver(tty_drv);
+ if (err) {
+ dev_err(dev, "tty_register_driver failed\n");
+ goto err_kfree_tty_name;
+ }
+
+ for (i = 0; i < BCM_VK_NUM_TTY; i++) {
+ struct device *tty_dev;
+
+ tty_port_init(&vk->tty[i].port);
+ tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
+ i, dev);
+ if (IS_ERR(tty_dev)) {
+ err = PTR_ERR(tty_dev);
+ goto unwind;
+ }
+ dev_set_drvdata(tty_dev, vk);
+ vk->tty[i].is_opened = false;
+ }
+
+ INIT_WORK(&vk->tty_wq_work, bcm_vk_tty_wq_handler);
+ vk->tty_wq_thread = create_singlethread_workqueue("tty");
+ if (!vk->tty_wq_thread) {
+ dev_err(dev, "Fail to create tty workqueue thread\n");
+ err = -ENOMEM;
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (--i >= 0)
+ tty_port_unregister_device(&vk->tty[i].port, tty_drv, i);
+ tty_unregister_driver(tty_drv);
+
+err_kfree_tty_name:
+ kfree(tty_drv->name);
+ tty_drv->name = NULL;
+
+err_put_tty_driver:
+ put_tty_driver(tty_drv);
+
+ return err;
+}
+
+void bcm_vk_tty_exit(struct bcm_vk *vk)
+{
+ int i;
+
+ del_timer_sync(&vk->serial_timer);
+ for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
+ tty_port_unregister_device(&vk->tty[i].port,
+ vk->tty_drv,
+ i);
+ tty_port_destroy(&vk->tty[i].port);
+ }
+ tty_unregister_driver(vk->tty_drv);
+
+ kfree(vk->tty_drv->name);
+ vk->tty_drv->name = NULL;
+
+ put_tty_driver(vk->tty_drv);
+}
+
+void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk)
+{
+ struct bcm_vk_tty *vktty;
+ int i;
+
+ for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
+ vktty = &vk->tty[i];
+ if (vktty->pid)
+ kill_pid(find_vpid(vktty->pid), SIGKILL, 1);
+ }
+}
+
+void bcm_vk_tty_wq_exit(struct bcm_vk *vk)
+{
+ cancel_work_sync(&vk->tty_wq_work);
+ destroy_workqueue(vk->tty_wq_thread);
+}
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 8859011672cb..8200af22b529 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -398,6 +398,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5227_extra_init_hw(pcr);
+ /* Power down OCP for power consumption */
+ if (!pcr->card_exist)
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 2aa6648fa41f..273311184669 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -59,12 +59,6 @@ static const struct pci_device_id rtsx_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
-static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
-{
- pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC, 0);
-}
-
static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
rtsx_pci_write_register(pcr, MSGTXDATA0,
@@ -1512,6 +1506,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
+ u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1572,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
@@ -1800,7 +1799,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
struct pci_dev *pcidev = to_pci_dev(device);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
- int ret = 0;
handle = pci_get_drvdata(pcidev);
pcr = handle->pcr;
@@ -1825,7 +1823,7 @@ static int rtsx_pci_runtime_resume(struct device *device)
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
mutex_unlock(&pcr->pcr_mutex);
- return ret;
+ return 0;
}
#else /* CONFIG_PM */
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 2a1783f32254..53b919856426 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -170,8 +170,6 @@ int cxllib_get_PE_attributes(struct task_struct *task,
unsigned long translation_mode,
struct cxllib_pe_attributes *attr)
{
- struct mm_struct *mm = NULL;
-
if (translation_mode != CXL_TRANSLATED_MODE &&
translation_mode != CXL_REAL_MODE)
return -EINVAL;
@@ -182,7 +180,7 @@ int cxllib_get_PE_attributes(struct task_struct *task,
true);
attr->lpid = mfspr(SPRN_LPID);
if (task) {
- mm = get_task_mm(task);
+ struct mm_struct *mm = get_task_mm(task);
if (mm == NULL)
return -EINVAL;
/*
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index d97a243ad30c..c173a5e88c91 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -178,7 +178,7 @@ static ssize_t perst_reloads_same_image_store(struct device *device,
if ((rc != 1) || !(val == 1 || val == 0))
return -EINVAL;
- adapter->perst_same_image = (val == 1 ? true : false);
+ adapter->perst_same_image = (val == 1);
return count;
}
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 7c45f82b4302..80114f4c80ad 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
};
+static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
+ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
+};
+
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
@@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
+}
+
static int eeprom_93xx46_read(void *priv, unsigned int off,
void *val, size_t count)
{
@@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
cmd_addr, edev->spi->max_speed_hz);
+ if (has_quirk_extra_read_cycle(edev)) {
+ cmd_addr <<= 1;
+ bits += 1;
+ }
+
spi_message_init(&m);
t[0].tx_buf = (char *)&cmd_addr;
@@ -363,6 +377,7 @@ static void select_deassert(void *context)
static const struct of_device_id eeprom_93xx46_of_table[] = {
{ .compatible = "eeprom-93xx46", },
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ { .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
@@ -512,3 +527,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
MODULE_ALIAS("spi:93xx46");
+MODULE_ALIAS("spi:eeprom-93xx46");
+MODULE_ALIAS("spi:93lc46b");
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 70eb5ed942d0..f12e909034ac 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -520,12 +520,13 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
{
struct fastrpc_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
+ int ret;
table = &a->sgt;
- if (!dma_map_sgtable(attachment->dev, table, dir, 0))
- return ERR_PTR(-ENOMEM);
-
+ ret = dma_map_sgtable(attachment->dev, table, dir, 0);
+ if (ret)
+ table = ERR_PTR(ret);
return table;
}
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 1640340d3e62..293d79811372 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -6,7 +6,6 @@
config HABANA_AI
tristate "HabanaAI accelerators (habanalabs)"
depends on PCI && HAS_IOMEM
- select FRAME_VECTOR
select GENERIC_ALLOCATOR
select HWMON
help
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index eccd8c7dc62d..5d8b48288cf4 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -1,7 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+include $(src)/common/mmu/Makefile
+habanalabs-y += $(HL_COMMON_MMU_FILES)
+
+include $(src)/common/pci/Makefile
+habanalabs-y += $(HL_COMMON_PCI_FILES)
+
HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/mmu.o common/mmu_v1.o \
- common/firmware_if.o common/pci.o
+ common/command_submission.o common/firmware_if.o
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c
index a2fdf31cf27c..ede04c032b6e 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
@@ -50,8 +50,10 @@ unsigned long hl_asid_alloc(struct hl_device *hdev)
void hl_asid_free(struct hl_device *hdev, unsigned long asid)
{
- if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
- "Invalid ASID %lu", asid))
+ if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) {
+ dev_crit(hdev->dev, "Invalid ASID %lu", asid);
return;
+ }
+
clear_bit(asid, hdev->asid_bitmap);
}
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 6f6a904ab6ca..d9adb9a5e4d8 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -635,10 +635,12 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
cb_handle >>= PAGE_SHIFT;
cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
- if (!cb)
+ /* hl_cb_get should never fail here */
+ if (!cb) {
+ dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
+ (u32) cb_handle);
goto destroy_cb;
+ }
return cb;
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..7bd4a03b3429 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -48,8 +48,8 @@ void hl_sob_reset_error(struct kref *ref)
struct hl_device *hdev = hw_sob->hdev;
dev_crit(hdev->dev,
- "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
- hw_sob->q_idx, hw_sob->sob_id);
+ "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
+ hw_sob->q_idx, hw_sob->sob_id);
}
/**
@@ -149,9 +149,10 @@ void hl_fence_get(struct hl_fence *fence)
kref_get(&fence->refcount);
}
-static void hl_fence_init(struct hl_fence *fence)
+static void hl_fence_init(struct hl_fence *fence, u64 sequence)
{
kref_init(&fence->refcount);
+ fence->cs_sequence = sequence;
fence->error = 0;
fence->timestamp = ktime_set(0, 0);
init_completion(&fence->completion);
@@ -184,6 +185,28 @@ static void cs_job_put(struct hl_cs_job *job)
kref_put(&job->refcount, cs_job_do_release);
}
+bool cs_needs_completion(struct hl_cs *cs)
+{
+ /* In case this is a staged CS, only the last CS in sequence should
+ * get a completion, any non staged CS will always get a completion
+ */
+ if (cs->staged_cs && !cs->staged_last)
+ return false;
+
+ return true;
+}
+
+bool cs_needs_timeout(struct hl_cs *cs)
+{
+ /* In case this is a staged CS, only the first CS in sequence should
+ * get a timeout, any non staged CS will always get a timeout
+ */
+ if (cs->staged_cs && !cs->staged_first)
+ return false;
+
+ return true;
+}
+
static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
{
/*
@@ -225,6 +248,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.queue_type = job->queue_type;
parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
+ parser.completion = cs_needs_completion(job->cs);
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
@@ -290,13 +314,153 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW)
+ /* We decrement reference only for a CS that gets completion
+ * because the reference was incremented only for this kind of CS
+ * right before it was scheduled.
+ *
+ * In staged submission, only the last CS marked as 'staged_last'
+ * gets completion, hence its release function will be called from here.
+ * As for all the rest CS's in the staged submission which do not get
+ * completion, their CS reference will be decremented by the
+ * 'staged_last' CS during the CS release flow.
+ * All relevant PQ CI counters will be incremented during the CS release
+ * flow by calling 'hl_hw_queue_update_ci'.
+ */
+ if (cs_needs_completion(cs) &&
+ (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW))
cs_put(cs);
cs_job_put(job);
}
+/*
+ * hl_staged_cs_find_first - locate the first CS in this staged submission
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: staged submission sequence number
+ *
+ * @note: This function must be called under 'hdev->cs_mirror_lock'
+ *
+ * Find and return a CS pointer with the given sequence
+ */
+struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
+{
+ struct hl_cs *cs;
+
+ list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
+ if (cs->staged_cs && cs->staged_first &&
+ cs->sequence == cs_seq)
+ return cs;
+
+ return NULL;
+}
+
+/*
+ * is_staged_cs_last_exists - returns true if the last CS in sequence exists
+ *
+ * @hdev: pointer to device structure
+ * @cs: staged submission member
+ *
+ */
+bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs *last_entry;
+
+ last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
+ staged_cs_node);
+
+ if (last_entry->staged_last)
+ return true;
+
+ return false;
+}
+
+/*
+ * staged_cs_get - get CS reference if this CS is a part of a staged CS
+ *
+ * @hdev: pointer to device structure
+ * @cs: current CS
+ * @cs_seq: staged submission sequence number
+ *
+ * Increment CS reference for every CS in this staged submission except for
+ * the CS which get completion.
+ */
+static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
+{
+ /* Only the last CS in this staged submission will get a completion.
+ * We must increment the reference for all other CS's in this
+ * staged submission.
+ * Once we get a completion we will release the whole staged submission.
+ */
+ if (!cs->staged_last)
+ cs_get(cs);
+}
+
+/*
+ * staged_cs_put - put a CS in case it is part of staged submission
+ *
+ * @hdev: pointer to device structure
+ * @cs: CS to put
+ *
+ * This function decrements a CS reference (for a non completion CS)
+ */
+static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
+{
+ /* We release all CS's in a staged submission except the last
+ * CS which we have never incremented its reference.
+ */
+ if (!cs_needs_completion(cs))
+ cs_put(cs);
+}
+
+static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
+{
+ bool next_entry_found = false;
+ struct hl_cs *next;
+
+ if (!cs_needs_timeout(cs))
+ return;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ /* We need to handle tdr only once for the complete staged submission.
+ * Hence, we choose the CS that reaches this function first which is
+ * the CS marked as 'staged_last'.
+ */
+ if (cs->staged_cs && cs->staged_last)
+ cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+
+ spin_unlock(&hdev->cs_mirror_lock);
+
+ /* Don't cancel TDR in case this CS was timedout because we might be
+ * running from the TDR context
+ */
+ if (cs && (cs->timedout ||
+ hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
+ return;
+
+ if (cs && cs->tdr_active)
+ cancel_delayed_work_sync(&cs->work_tdr);
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ /* queue TDR for next CS */
+ list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
+ if (cs_needs_timeout(next)) {
+ next_entry_found = true;
+ break;
+ }
+
+ if (next_entry_found && !next->tdr_active) {
+ next->tdr_active = true;
+ schedule_delayed_work(&next->work_tdr,
+ hdev->timeout_jiffies);
+ }
+
+ spin_unlock(&hdev->cs_mirror_lock);
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
@@ -346,36 +510,37 @@ static void cs_do_release(struct kref *ref)
hdev->asic_funcs->hw_queues_unlock(hdev);
- /* Need to update CI for internal queues */
- hl_int_hw_queue_update_ci(cs);
+ /* Need to update CI for all queue jobs that does not get completion */
+ hl_hw_queue_update_ci(cs);
/* remove CS from CS mirror list */
spin_lock(&hdev->cs_mirror_lock);
list_del_init(&cs->mirror_node);
spin_unlock(&hdev->cs_mirror_lock);
- /* Don't cancel TDR in case this CS was timedout because we might be
- * running from the TDR context
- */
- if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- struct hl_cs *next;
-
- if (cs->tdr_active)
- cancel_delayed_work_sync(&cs->work_tdr);
+ cs_handle_tdr(hdev, cs);
- spin_lock(&hdev->cs_mirror_lock);
-
- /* queue TDR for next CS */
- next = list_first_entry_or_null(&hdev->cs_mirror_list,
- struct hl_cs, mirror_node);
+ if (cs->staged_cs) {
+ /* the completion CS decrements reference for the entire
+ * staged submission
+ */
+ if (cs->staged_last) {
+ struct hl_cs *staged_cs, *tmp;
- if (next && !next->tdr_active) {
- next->tdr_active = true;
- schedule_delayed_work(&next->work_tdr,
- hdev->timeout_jiffies);
+ list_for_each_entry_safe(staged_cs, tmp,
+ &cs->staged_cs_node, staged_cs_node)
+ staged_cs_put(hdev, staged_cs);
}
- spin_unlock(&hdev->cs_mirror_lock);
+ /* A staged CS will be a member in the list only after it
+ * was submitted. We used 'cs_mirror_lock' when inserting
+ * it to list so we will use it again when removing it
+ */
+ if (cs->submitted) {
+ spin_lock(&hdev->cs_mirror_lock);
+ list_del(&cs->staged_cs_node);
+ spin_unlock(&hdev->cs_mirror_lock);
+ }
}
out:
@@ -461,7 +626,8 @@ static void cs_timedout(struct work_struct *work)
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_cs_type cs_type, struct hl_cs **cs_new)
+ enum hl_cs_type cs_type, u64 user_sequence,
+ struct hl_cs **cs_new)
{
struct hl_cs_counters_atomic *cntr;
struct hl_fence *other = NULL;
@@ -472,8 +638,14 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
+
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, ctx);
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +658,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -502,6 +676,18 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
(hdev->asic_prop.max_pending_cs - 1)];
if (other && !completion_done(&other->completion)) {
+ /* If the following statement is true, it means we have reached
+ * a point in which only part of the staged submission was
+ * submitted and we don't have enough room in the 'cs_pending'
+ * array for the rest of the submission.
+ * This causes a deadlock because this CS will never be
+ * completed as it depends on future CS's for completion.
+ */
+ if (other->cs_sequence == user_sequence)
+ dev_crit_ratelimited(hdev->dev,
+ "Staged CS %llu deadlock due to lack of resources",
+ user_sequence);
+
dev_dbg_ratelimited(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
@@ -513,12 +699,14 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
/* init hl_fence */
- hl_fence_init(&cs_cmpl->base_fence);
+ hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
cs->sequence = cs_cmpl->cs_seq;
@@ -542,6 +730,7 @@ free_fence:
kfree(cs_cmpl);
free_cs:
kfree(cs);
+ hl_ctx_put(ctx);
return rc;
}
@@ -549,6 +738,8 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs_job *job, *tmp;
+ staged_cs_put(hdev, cs);
+
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
complete_job(hdev, job);
}
@@ -558,21 +749,35 @@ void hl_cs_rollback_all(struct hl_device *hdev)
int i;
struct hl_cs *cs, *tmp;
- /* flush all completions */
+ /* flush all completions before iterating over the CS mirror list in
+ * order to avoid a race with the release functions
+ */
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
- cs->ctx->asid, cs->sequence);
+ cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
}
+void hl_pending_cb_list_flush(struct hl_ctx *ctx)
+{
+ struct hl_pending_cb *pending_cb, *tmp;
+
+ list_for_each_entry_safe(pending_cb, tmp,
+ &ctx->pending_cb_list, cb_node) {
+ list_del(&pending_cb->cb_node);
+ hl_cb_put(pending_cb->cb);
+ kfree(pending_cb);
+ }
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -727,6 +932,12 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
return -EBUSY;
}
+ if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !hdev->supports_staged_submission) {
+ dev_err(hdev->dev, "staged submission not supported");
+ return -EPERM;
+ }
+
cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
@@ -764,11 +975,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +991,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -790,39 +1009,77 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
return 0;
}
+static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
+ u64 sequence, u32 flags)
+{
+ if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
+ return 0;
+
+ cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
+ cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
+
+ if (cs->staged_first) {
+ /* Staged CS sequence is the first CS sequence */
+ INIT_LIST_HEAD(&cs->staged_cs_node);
+ cs->staged_sequence = cs->sequence;
+ } else {
+ /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
+ * under the cs_mirror_lock
+ */
+ cs->staged_sequence = sequence;
+ }
+
+ /* Increment CS reference if needed */
+ staged_cs_get(hdev, cs);
+
+ cs->staged_cs = true;
+
+ return 0;
+}
+
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
- u32 num_chunks, u64 *cs_seq, bool timestamp)
+ u32 num_chunks, u64 *cs_seq, u32 flags)
{
- bool int_queues_only = true;
+ bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
+ u64 user_sequence;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
+ user_sequence = *cs_seq;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
- /* increment refcnt for context */
- hl_ctx_get(hdev, hpriv->ctx);
+ if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
+ staged_mid = true;
+ else
+ staged_mid = false;
- rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
- if (rc) {
- hl_ctx_put(hpriv->ctx);
+ rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
+ staged_mid ? user_sequence : ULLONG_MAX, &cs);
+ if (rc)
goto free_cs_chunk_array;
- }
- cs->timestamp = !!timestamp;
+ cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
*cs_seq = cs->sequence;
hl_debugfs_add_cs(cs);
+ rc = cs_staged_submission(hdev, cs, user_sequence, flags);
+ if (rc)
+ goto free_cs_object;
+
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -832,8 +1089,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +1098,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +1113,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -883,15 +1139,16 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
* Only increment for JOB on external or H/W queues, because
* only for those JOBs we get completion
*/
- if (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW)
+ if (cs_needs_completion(cs) &&
+ (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW))
cs_get(cs);
hl_debugfs_add_job(hdev, job);
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -900,11 +1157,14 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ /* We allow a CS with any queue type combination as long as it does
+ * not get a completion
+ */
+ if (int_queues_only && cs_needs_completion(cs)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
- "Reject CS %d.%llu because only internal queues jobs are present\n",
+ "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -938,6 +1198,129 @@ out:
return rc;
}
+static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
+{
+ struct hw_queue_properties *hw_queue_prop;
+ struct hl_cs_counters_atomic *cntr;
+ struct hl_cs_job *job;
+
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
+ cntr = &hdev->aggregated_cs_counters;
+
+ job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
+ if (!job) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ return -ENOMEM;
+ }
+
+ job->id = 0;
+ job->cs = cs;
+ job->user_cb = cb;
+ atomic_inc(&job->user_cb->cs_cnt);
+ job->user_cb_size = size;
+ job->hw_queue_id = hw_queue_id;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size;
+
+ /* increment refcount as for external queues we get completion */
+ cs_get(cs);
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ hl_debugfs_add_job(hdev, job);
+
+ return 0;
+}
+
+static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_pending_cb *pending_cb, *tmp;
+ struct list_head local_cb_list;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ u32 hw_queue_id;
+ u32 cb_size;
+ int process_list, rc = 0;
+
+ if (list_empty(&ctx->pending_cb_list))
+ return 0;
+
+ process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
+
+ /* Only a single thread is allowed to process the list */
+ if (!process_list)
+ return 0;
+
+ if (list_empty(&ctx->pending_cb_list))
+ goto free_pending_cb_token;
+
+ /* move all list elements to a local list */
+ INIT_LIST_HEAD(&local_cb_list);
+ spin_lock(&ctx->pending_cb_lock);
+ list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
+ cb_node)
+ list_move_tail(&pending_cb->cb_node, &local_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
+
+ rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs);
+ if (rc)
+ goto add_list_elements;
+
+ hl_debugfs_add_cs(cs);
+
+ /* Iterate through pending cb list, create jobs and add to CS */
+ list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
+ cb = pending_cb->cb;
+ cb_size = pending_cb->cb_size;
+ hw_queue_id = pending_cb->hw_queue_id;
+
+ rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
+ hw_queue_id);
+ if (rc)
+ goto free_cs_object;
+ }
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu (%d)\n",
+ ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ /* pending cb was scheduled successfully */
+ list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
+ list_del(&pending_cb->cb_node);
+ kfree(pending_cb);
+ }
+
+ cs_put(cs);
+
+ goto free_pending_cb_token;
+
+free_cs_object:
+ cs_rollback(hdev, cs);
+ cs_put(cs);
+add_list_elements:
+ spin_lock(&ctx->pending_cb_lock);
+ list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
+ cb_node)
+ list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
+free_pending_cb_token:
+ atomic_set(&ctx->thread_pending_cb_token, 1);
+
+ return rc;
+}
+
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
@@ -987,7 +1370,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, false);
+ cs_seq, 0);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1042,7 +1425,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1435,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1445,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1543,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1551,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1563,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1576,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1587,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1599,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1624,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1239,15 +1642,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
}
- /* increment refcnt for context */
- hl_ctx_get(hdev, ctx);
-
- rc = allocate_cs(hdev, ctx, cs_type, &cs);
+ rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs);
if (rc) {
if (cs_type == CS_TYPE_WAIT ||
cs_type == CS_TYPE_COLLECTIVE_WAIT)
hl_fence_put(sig_fence);
- hl_ctx_put(ctx);
goto free_cs_chunk_array;
}
@@ -1270,8 +1669,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
@@ -1307,7 +1709,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
enum hl_cs_type cs_type;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks;
+ u32 num_chunks, flags;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1318,10 +1720,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
if (rc)
goto out;
+ rc = hl_submit_pending_cb(hpriv);
+ if (rc)
+ goto out;
+
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
num_chunks = args->in.num_chunks_execute;
+ flags = args->in.cs_flags;
+
+ /* In case this is a staged CS, user should supply the CS sequence */
+ if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
+ cs_seq = args->in.seq;
switch (cs_type) {
case CS_TYPE_SIGNAL:
@@ -1332,7 +1744,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+ args->in.cs_flags);
break;
}
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index f65e6559149b..cda871afb8f4 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -12,9 +12,14 @@
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- u64 idle_mask = 0;
+ u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
int i;
+ /* Release all allocated pending cb's, those cb's were never
+ * scheduled so it is safe to release them here
+ */
+ hl_pending_cb_list_flush(ctx);
+
/*
* If we arrived here, there are no jobs waiting for this context
* on its queues so we can safely remove it.
@@ -50,12 +55,15 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
if ((!hdev->pldm) && (hdev->pdev) &&
(!hdev->asic_funcs->is_device_idle(hdev,
- &idle_mask, NULL)))
+ idle_mask,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)))
dev_notice(hdev->dev,
- "device not idle after user context is closed (0x%llx)\n",
- idle_mask);
+ "device not idle after user context is closed (0x%llx, 0x%llx)\n",
+ idle_mask[0], idle_mask[1]);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
+ hdev->asic_funcs->ctx_fini(ctx);
+ hl_vm_ctx_fini(ctx);
hl_mmu_ctx_fini(ctx);
}
}
@@ -140,8 +148,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
+ INIT_LIST_HEAD(&ctx->pending_cb_list);
+ spin_lock_init(&ctx->pending_cb_lock);
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
+ atomic_set(&ctx->thread_pending_cb_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
@@ -151,11 +162,18 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
- rc = hl_mmu_ctx_init(ctx);
+ rc = hl_vm_ctx_init(ctx);
if (rc) {
- dev_err(hdev->dev, "Failed to init mmu ctx module\n");
+ dev_err(hdev->dev, "Failed to init mem ctx module\n");
+ rc = -ENOMEM;
goto err_free_cs_pending;
}
+
+ rc = hdev->asic_funcs->ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "ctx_init failed\n");
+ goto err_vm_ctx_fini;
+ }
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
@@ -194,7 +212,8 @@ err_cb_va_pool_fini:
err_vm_ctx_fini:
hl_vm_ctx_fini(ctx);
err_asid_free:
- hl_asid_free(hdev, ctx->asid);
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ hl_asid_free(hdev, ctx->asid);
err_free_cs_pending:
kfree(ctx->cs_pending);
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index cef716643979..df847a6d19f4 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -310,8 +310,8 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
struct hl_ctx *ctx;
- struct hl_mmu_hop_info hops_info;
- u64 virt_addr = dev_entry->mmu_addr;
+ struct hl_mmu_hop_info hops_info = {0};
+ u64 virt_addr = dev_entry->mmu_addr, phys_addr;
int i;
if (!hdev->mmu_enable)
@@ -333,8 +333,19 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
- seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
- dev_entry->mmu_asid, dev_entry->mmu_addr);
+ phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
+
+ if (hops_info.scrambled_vaddr &&
+ (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
+ seq_printf(s,
+ "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr,
+ hops_info.scrambled_vaddr,
+ hops_info.unscrambled_paddr, phys_addr);
+ else
+ seq_printf(s,
+ "asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
for (i = 0 ; i < hops_info.used_hops ; i++) {
seq_printf(s, "hop%d_addr: 0x%llx\n",
@@ -403,7 +414,7 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
- hdev->asic_funcs->is_device_idle(hdev, NULL, s);
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s);
return 0;
}
@@ -865,6 +876,17 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+
+ hdev->asic_funcs->ack_protection_bits_errors(hdev);
+
+ return 0;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -922,6 +944,11 @@ static const struct file_operations hl_stop_on_err_fops = {
.write = hl_stop_on_err_write
};
+static const struct file_operations hl_security_violations_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_security_violations_read
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1071,6 +1098,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_stop_on_err_fops);
+ debugfs_create_file("dump_security_violations",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_security_violations_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
ent = debugfs_create_file(hl_debugfs_list[i].name,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..15fcb5c31c4b 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -142,6 +142,9 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
case HL_MMAP_TYPE_CB:
return hl_cb_mmap(hpriv, vma);
+
+ case HL_MMAP_TYPE_BLOCK:
+ return hl_hw_block_mmap(hpriv, vma);
}
return -EINVAL;
@@ -373,7 +376,6 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
- mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
@@ -414,7 +416,6 @@ static void device_early_fini(struct hl_device *hdev)
{
int i;
- mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@@ -1037,7 +1038,7 @@ kill_processes:
if (hard_reset) {
/* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
@@ -1092,6 +1093,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1105,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
@@ -1156,12 +1159,20 @@ kill_processes:
atomic_set(&hdev->in_reset, 0);
hdev->needs_reset = false;
- if (hard_reset)
+ dev_notice(hdev->dev, "Successfully finished resetting the device\n");
+
+ if (hard_reset) {
hdev->hard_reset_cnt++;
- else
- hdev->soft_reset_cnt++;
- dev_warn(hdev->dev, "Successfully finished resetting the device\n");
+ /* After reset is done, we are ready to receive events from
+ * the F/W. We can't do it before because we will ignore events
+ * and if those events are fatal, we won't know about it and
+ * the device will be operational although it shouldn't be
+ */
+ hdev->asic_funcs->enable_events_from_fw(hdev);
+ } else {
+ hdev->soft_reset_cnt++;
+ }
return 0;
@@ -1312,11 +1323,16 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->compute_ctx = NULL;
+ hl_debugfs_add_device(hdev);
+
+ /* debugfs nodes are created in hl_ctx_init so it must be called after
+ * hl_debugfs_add_device.
+ */
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
kfree(hdev->kernel_ctx);
- goto mmu_fini;
+ goto remove_device_from_debugfs;
}
rc = hl_cb_pool_init(hdev);
@@ -1325,8 +1341,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
- hl_debugfs_add_device(hdev);
-
/*
* From this point, in case of an error, add char devices and create
* sysfs nodes as part of the error flow, to allow debugging.
@@ -1409,12 +1423,21 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->init_done = true;
+ /* After initialization is done, we are ready to receive events from
+ * the F/W. We can't do it before because we will ignore events and if
+ * those events are fatal, we won't know about it and the device will
+ * be operational although it shouldn't be
+ */
+ hdev->asic_funcs->enable_events_from_fw(hdev);
+
return 0;
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
+remove_device_from_debugfs:
+ hl_debugfs_remove_device(hdev);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
@@ -1480,11 +1503,21 @@ void hl_device_fini(struct hl_device *hdev)
usleep_range(50, 200);
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
if (ktime_compare(ktime_get(), timeout) > 0) {
- WARN(1, "Failed to remove device because reset function did not finish\n");
+ dev_crit(hdev->dev,
+ "Failed to remove device because reset function did not finish\n");
return;
}
}
+ /* Disable PCI access from device F/W so it won't send us additional
+ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+ * can't have the F/W sending us interrupts after that. We need to
+ * disable the access here because if the device is marked disable, the
+ * message won't be send. Also, in case of heartbeat, the device CPU is
+ * marked as disable so this message won't be sent
+ */
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
/* Mark device as disabled */
hdev->disabled = true;
@@ -1504,8 +1537,6 @@ void hl_device_fini(struct hl_device *hdev)
device_late_fini(hdev);
- hl_debugfs_remove_device(hdev);
-
/*
* Halt the engines and disable interrupts so we won't get any more
* completions from H/W and we won't have any accesses from the
@@ -1537,6 +1568,8 @@ void hl_device_fini(struct hl_device *hdev)
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
dev_err(hdev->dev, "kernel ctx is still alive\n");
+ hl_debugfs_remove_device(hdev);
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..09706c571e95 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -90,9 +90,10 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result)
{
+ struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
- u32 tmp;
+ u32 tmp, expected_ack_val;
int rc = 0;
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
@@ -115,14 +116,23 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto out;
}
+ /* set fence to a non valid value */
+ pkt->fence = UINT_MAX;
+
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
if (rc) {
dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
goto out;
}
+ if (hdev->asic_prop.fw_app_security_map &
+ CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
+ expected_ack_val = queue->pi;
+ else
+ expected_ack_val = CPUCP_PACKET_FENCE_VAL;
+
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
- (tmp == CPUCP_PACKET_FENCE_VAL), 1000,
+ (tmp == expected_ack_val), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@@ -279,8 +289,74 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
return rc;
}
+static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
+ u32 cpu_security_boot_status_reg)
+{
+ u32 err_val, security_val;
+
+ /* Some of the firmware status codes are deprecated in newer f/w
+ * versions. In those versions, the errors are reported
+ * in different registers. Therefore, we need to check those
+ * registers and print the exact errors. Moreover, there
+ * may be multiple errors, so we need to report on each error
+ * separately. Some of the error codes might indicate a state
+ * that is not an error per-se, but it is an error in production
+ * environment
+ */
+ err_val = RREG32(boot_err0_reg);
+ if (!(err_val & CPU_BOOT_ERR0_ENABLED))
+ return 0;
+
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+ dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+
+ if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
+ if (hdev->bmc_enable)
+ dev_warn(hdev->dev,
+ "Device boot error - Skipped waiting for BMC\n");
+ else
+ err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ dev_err(hdev->dev,
+ "Device boot error - Serdes data from BMC not available\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - NIC F/W initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+ dev_warn(hdev->dev,
+ "Device boot warning - security not ready\n");
+ if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
+ dev_err(hdev->dev, "Device boot error - security failure\n");
+ if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
+ dev_err(hdev->dev, "Device boot error - eFuse failure\n");
+ if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
+ dev_err(hdev->dev, "Device boot error - PLL failure\n");
+
+ security_val = RREG32(cpu_security_boot_status_reg);
+ if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
+ dev_dbg(hdev->dev, "Device security status %#x\n",
+ security_val);
+
+ if (err_val & ~CPU_BOOT_ERR0_ENABLED)
+ return -EIO;
+
+ return 0;
+}
+
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg)
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet pkt = {};
@@ -314,6 +390,12 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
goto out;
}
+ rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ if (rc) {
+ dev_err(hdev->dev, "Errors in device boot\n");
+ goto out;
+ }
+
memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
sizeof(prop->cpucp_info));
@@ -402,6 +484,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
}
counters->rx_throughput = result;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +500,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
counters->tx_throughput = result;
/* Fetch PCI replay counter */
+ memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
@@ -478,58 +565,6 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
return rc;
}
-static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
- u32 cpu_security_boot_status_reg)
-{
- u32 err_val, security_val;
-
- /* Some of the firmware status codes are deprecated in newer f/w
- * versions. In those versions, the errors are reported
- * in different registers. Therefore, we need to check those
- * registers and print the exact errors. Moreover, there
- * may be multiple errors, so we need to report on each error
- * separately. Some of the error codes might indicate a state
- * that is not an error per-se, but it is an error in production
- * environment
- */
- err_val = RREG32(boot_err0_reg);
- if (!(err_val & CPU_BOOT_ERR0_ENABLED))
- return;
-
- if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
- dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
- if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
- dev_warn(hdev->dev,
- "Device boot warning - Skipped DRAM initialization\n");
- if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
- dev_warn(hdev->dev,
- "Device boot error - Skipped waiting for BMC\n");
- if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
- dev_err(hdev->dev,
- "Device boot error - Serdes data from BMC not available\n");
- if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
- dev_err(hdev->dev,
- "Device boot error - NIC F/W initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
- dev_warn(hdev->dev,
- "Device boot warning - security not ready\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
- dev_err(hdev->dev, "Device boot error - security failure\n");
- if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
- dev_err(hdev->dev, "Device boot error - eFuse failure\n");
-
- security_val = RREG32(cpu_security_boot_status_reg);
- if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
- dev_dbg(hdev->dev, "Device security status %#x\n",
- security_val);
-}
-
static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
/* Some of the status codes below are deprecated in newer f/w
@@ -627,25 +662,41 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot security status %#x\n",
+ security_status);
+
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +706,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +775,26 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+
+ dev_dbg(hdev->dev,
+ "Firmware boot CPU security status %#x\n",
+ prop->fw_boot_cpu_security_map);
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,21 +863,34 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ if (rc)
+ return rc;
+
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
+
+ dev_dbg(hdev->dev,
+ "Firmware application CPU security status %#x\n",
+ prop->fw_app_security_map);
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+ return 0;
+
out:
fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..d933878b24d1 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -28,17 +28,18 @@
#define HL_NAME "habanalabs"
/* Use upper bits of mmap offset to store habana driver specific information.
- * bits[63:62] - Encode mmap type
+ * bits[63:61] - Encode mmap type
* bits[45:0] - mmap offset value
*
* NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
* defines are w.r.t to PAGE_SIZE
*/
-#define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
-#define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT)
+#define HL_MMAP_TYPE_SHIFT (61 - PAGE_SHIFT)
+#define HL_MMAP_TYPE_MASK (0x7ull << HL_MMAP_TYPE_SHIFT)
+#define HL_MMAP_TYPE_BLOCK (0x4ull << HL_MMAP_TYPE_SHIFT)
#define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
-#define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT)
+#define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
#define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
#define HL_PENDING_RESET_PER_SEC 10
@@ -408,6 +409,9 @@ struct hl_mmu_properties {
* @sync_stream_first_mon: first monitor available for sync stream use
* @first_available_user_sob: first sob available for the user
* @first_available_user_mon: first monitor available for the user
+ * @first_available_user_msix_interrupt: first available msix interrupt
+ * reserved for the user
+ * @first_available_cq: first available CQ for the user.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_disabled: true if security measures are disabled in firmware,
@@ -416,6 +420,7 @@ struct hl_mmu_properties {
* from BOOT_DEV_STS0
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
+ * @num_functional_hbms: number of functional HBMs in each DCORE.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -468,18 +473,22 @@ struct asic_fixed_properties {
u16 sync_stream_first_mon;
u16 first_available_user_sob[HL_MAX_DCORES];
u16 first_available_user_mon[HL_MAX_DCORES];
+ u16 first_available_user_msix_interrupt;
+ u16 first_available_cq[HL_MAX_DCORES];
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_disabled;
u8 fw_security_status_valid;
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
+ u8 num_functional_hbms;
};
/**
* struct hl_fence - software synchronization primitive
* @completion: fence is implemented using completion
* @refcount: refcount for this fence
+ * @cs_sequence: sequence of the corresponding command submission
* @error: mark this fence with error
* @timestamp: timestamp upon completion
*
@@ -487,6 +496,7 @@ struct asic_fixed_properties {
struct hl_fence {
struct completion completion;
struct kref refcount;
+ u64 cs_sequence;
int error;
ktime_t timestamp;
};
@@ -846,6 +856,19 @@ enum div_select_defs {
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
* @collective_wait_create_jobs: allocate collective wait cs jobs
+ * @scramble_addr: Routine to scramble the address prior of mapping it
+ * in the MMU.
+ * @descramble_addr: Routine to de-scramble the address prior of
+ * showing it to users.
+ * @ack_protection_bits_errors: ack and dump all security violations
+ * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
+ * also returns the size of the block if caller supplies
+ * a valid pointer for it
+ * @hw_block_mmap: mmap a HW block with a given id.
+ * @enable_events_from_fw: send interrupt to firmware to notify them the
+ * driver is ready to receive asynchronous events. This
+ * function should be called during the first init and
+ * after every hard-reset of the device
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -918,8 +941,8 @@ struct hl_asic_funcs {
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, u64 *mask,
- struct seq_file *s);
+ bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s);
int (*soft_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -944,7 +967,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -955,6 +978,14 @@ struct hl_asic_funcs {
int (*collective_wait_create_jobs)(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
u32 collective_engine_id);
+ u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
+ u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
+ void (*ack_protection_bits_errors)(struct hl_device *hdev);
+ int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id);
+ int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ u32 block_id, u32 block_size);
+ void (*enable_events_from_fw)(struct hl_device *hdev);
};
@@ -1000,6 +1031,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1039,21 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
+};
+
+/**
+ * struct hl_pending_cb - pending command buffer structure
+ * @cb_node: cb node in pending cb list
+ * @cb: command buffer to send in next submission
+ * @cb_size: command buffer size
+ * @hw_queue_id: destination queue id
+ */
+struct hl_pending_cb {
+ struct list_head cb_node;
+ struct hl_cb *cb;
+ u32 cb_size;
+ u32 hw_queue_id;
};
/**
@@ -1024,6 +1071,8 @@ struct hl_cs_counters_atomic {
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
* MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
+ * pending_cb_list: list of pending command buffers waiting to be sent upon
+ * next user command submission context.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
@@ -1032,11 +1081,17 @@ struct hl_cs_counters_atomic {
* index to cs_pending array.
* @dram_default_hops: array that holds all hops addresses needed for default
* DRAM mapping.
+ * @pending_cb_lock: spinlock to protect pending cb list
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_ctx_switch_token: token to prevent multiple threads of the same
* context from running the context switch phase.
* Only a single thread should run it.
+ * @thread_pending_cb_token: token to prevent multiple threads from processing
+ * the pending CB list. Only a single thread should
+ * process the list since it is protected by a
+ * spinlock and we don't want to halt the entire
+ * command submission sequence.
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the context switch phase from moving to their
* execution phase before the context switch phase
@@ -1055,13 +1110,16 @@ struct hl_ctx {
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
+ struct list_head pending_cb_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
u64 cs_sequence;
u64 *dram_default_hops;
+ spinlock_t pending_cb_lock;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
atomic_t thread_ctx_switch_token;
+ atomic_t thread_pending_cb_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
u32 handle;
@@ -1087,7 +1145,8 @@ struct hl_ctx_mgr {
* struct hl_userptr - memory mapping chunk information
* @vm_type: type of the VM.
* @job_node: linked-list node for hanging the object on the Job's list.
- * @vec: pointer to the frame vector.
+ * @pages: pointer to struct page array
+ * @npages: size of @pages array
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
@@ -1098,7 +1157,8 @@ struct hl_ctx_mgr {
struct hl_userptr {
enum vm_type_t vm_type; /* must be first */
struct list_head job_node;
- struct frame_vector *vec;
+ struct page **pages;
+ unsigned int npages;
struct sg_table *sgt;
enum dma_data_direction dir;
struct list_head debugfs_list;
@@ -1120,8 +1180,11 @@ struct hl_userptr {
* @finish_work: workqueue object to run when CS is completed by H/W.
* @work_tdr: delayed work node for TDR.
* @mirror_node : node in device mirror list of command submissions.
+ * @staged_cs_node: node in the staged cs list.
* @debugfs_list: node in debugfs list of command submissions.
* @sequence: the sequence number of this CS.
+ * @staged_sequence: the sequence of the staged submission this CS is part of,
+ * relevant only if staged_cs is set.
* @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
@@ -1129,7 +1192,11 @@ struct hl_userptr {
* @tdr_active: true if TDR was activated for this CS (to prevent
* double TDR activation).
* @aborted: true if CS was aborted due to some device error.
- * @timestamp: true if a timestmap must be captured upon completion
+ * @timestamp: true if a timestmap must be captured upon completion.
+ * @staged_last: true if this is the last staged CS and needs completion.
+ * @staged_first: true if this is the first staged CS and we need to receive
+ * timeout for this CS.
+ * @staged_cs: true if this CS is part of a staged submission.
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1142,8 +1209,10 @@ struct hl_cs {
struct work_struct finish_work;
struct delayed_work work_tdr;
struct list_head mirror_node;
+ struct list_head staged_cs_node;
struct list_head debugfs_list;
u64 sequence;
+ u64 staged_sequence;
enum hl_cs_type type;
u8 submitted;
u8 completed;
@@ -1151,6 +1220,9 @@ struct hl_cs {
u8 tdr_active;
u8 aborted;
u8 timestamp;
+ u8 staged_last;
+ u8 staged_first;
+ u8 staged_cs;
};
/**
@@ -1221,6 +1293,7 @@ struct hl_cs_job {
* MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
* have streams so the engine can't be busy by another
* stream.
+ * @completion: true if we need completion for this CS.
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
@@ -1235,6 +1308,7 @@ struct hl_cs_parser {
u8 job_id;
u8 is_kernel_allocated_cb;
u8 contains_dma_pkt;
+ u8 completion;
};
/*
@@ -1684,12 +1758,20 @@ struct hl_mmu_per_hop_info {
* struct hl_mmu_hop_info - A structure describing the TLB hops and their
* hop-entries that were created in order to translate a virtual address to a
* physical one.
+ * @scrambled_vaddr: The value of the virtual address after scrambling. This
+ * address replaces the original virtual-address when mapped
+ * in the MMU tables.
+ * @unscrambled_paddr: The un-scrambled physical address.
* @hop_info: Array holding the per-hop information used for the translation.
* @used_hops: The number of hops used for the translation.
+ * @range_type: virtual address range type.
*/
struct hl_mmu_hop_info {
+ u64 scrambled_vaddr;
+ u64 unscrambled_paddr;
struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
u32 used_hops;
+ enum hl_va_range_type range_type;
};
/**
@@ -1762,7 +1844,6 @@ struct hl_mmu_funcs {
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
* @vm: virtual memory manager for MMU.
- * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
* @hwmon_dev: H/W monitor device.
* @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
@@ -1840,6 +1921,7 @@ struct hl_mmu_funcs {
* user processes
* @device_fini_pending: true if device_fini was called and might be
* waiting for the reset thread to finish
+ * @supports_staged_submission: true if staged submissions are supported
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1877,7 +1959,6 @@ struct hl_device {
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
struct hl_vm vm;
- struct mutex mmu_cache_lock;
struct device *hwmon_dev;
enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
@@ -1946,6 +2027,7 @@ struct hl_device {
u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
+ u8 supports_staged_submission;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2063,7 +2145,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
-void hl_int_hw_queue_update_ci(struct hl_cs *cs);
+void hl_hw_queue_update_ci(struct hl_cs *cs);
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
@@ -2119,6 +2201,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
bool map_cb, u64 *handle);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
u32 handle);
void hl_cb_put(struct hl_cb *cb);
@@ -2132,6 +2215,7 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
+void hl_pending_cb_list_flush(struct hl_ctx *ctx);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
@@ -2139,6 +2223,10 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
void hl_fence_put(struct hl_fence *fence);
void hl_fence_get(struct hl_fence *fence);
void cs_get(struct hl_cs *cs);
+bool cs_needs_completion(struct hl_cs *cs);
+bool cs_needs_timeout(struct hl_cs *cs);
+bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
+struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -2180,6 +2268,9 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
+u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
@@ -2196,7 +2287,8 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg);
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..03af61cecd37 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -57,12 +57,23 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
hw_ip.sram_base_address = prop->sram_user_base_address;
- hw_ip.dram_base_address = prop->dram_user_base_address;
+ hw_ip.dram_base_address =
+ hdev->mmu_enable && prop->dram_supports_virtual_memory ?
+ prop->dmmu.start_addr : prop->dram_user_base_address;
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
- hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+
+ if (hdev->mmu_enable)
+ hw_ip.dram_size =
+ DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
+ prop->dram_page_size) *
+ prop->dram_page_size;
+ else
+ hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
+ hw_ip.dram_page_size = prop->dram_page_size;
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
@@ -79,6 +90,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
+ hw_ip.first_available_interrupt_id =
+ prop->first_available_user_msix_interrupt;
return copy_to_user(out, &hw_ip,
min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
}
@@ -132,7 +145,10 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
return -EINVAL;
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
- &hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask_ext,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
+ hw_idle.busy_engines_mask =
+ lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -335,6 +351,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +370,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -376,7 +397,8 @@ static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
prop->first_available_user_sob[args->dcore_id];
sm_info.first_available_monitor =
prop->first_available_user_mon[args->dcore_id];
-
+ sm_info.first_available_cq =
+ prop->first_available_cq[args->dcore_id];
return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
sizeof(sm_info))) ? -EFAULT : 0;
@@ -406,7 +428,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..0f335182267f 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -38,7 +38,7 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
return (abs(delta) - queue_len);
}
-void hl_int_hw_queue_update_ci(struct hl_cs *cs)
+void hl_hw_queue_update_ci(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
struct hl_hw_queue *q;
@@ -53,8 +53,13 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
return;
+ /* We must increment CI for every queue that will never get a
+ * completion, there are 2 scenarios this can happen:
+ * 1. All queues of a non completion CS will never get a completion.
+ * 2. Internal queues never gets completion.
+ */
for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_INT)
+ if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
}
@@ -292,6 +297,10 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
len = job->job_cb_size;
ptr = cb->bus_address;
+ /* Skip completion flow in case this is a non completion CS */
+ if (!cs_needs_completion(job->cs))
+ goto submit_bd;
+
cq_pkt.data = cpu_to_le32(
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK) |
@@ -318,6 +327,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
+submit_bd:
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
@@ -418,8 +428,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
@@ -522,6 +535,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
+ bool first_entry;
u32 max_queues;
cntr = &hdev->aggregated_cs_counters;
@@ -545,7 +559,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
+ cs->jobs_in_queue_cnt[i],
+ cs_needs_completion(cs) ?
+ true : false);
break;
case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
@@ -580,12 +596,38 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
hdev->asic_funcs->collective_wait_init_cs(cs);
spin_lock(&hdev->cs_mirror_lock);
+
+ /* Verify staged CS exists and add to the staged list */
+ if (cs->staged_cs && !cs->staged_first) {
+ struct hl_cs *staged_cs;
+
+ staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (!staged_cs) {
+ dev_err(hdev->dev,
+ "Cannot find staged submission sequence %llu",
+ cs->staged_sequence);
+ rc = -EINVAL;
+ goto unlock_cs_mirror;
+ }
+
+ if (is_staged_cs_last_exists(hdev, staged_cs)) {
+ dev_err(hdev->dev,
+ "Staged submission sequence %llu already submitted",
+ cs->staged_sequence);
+ rc = -EINVAL;
+ goto unlock_cs_mirror;
+ }
+
+ list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
+ }
+
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
/* Queue TDR if the CS is the first entry and if timeout is wanted */
+ first_entry = list_first_entry(&hdev->cs_mirror_list,
+ struct hl_cs, mirror_node) == cs;
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
- (list_first_entry(&hdev->cs_mirror_list,
- struct hl_cs, mirror_node) == cs)) {
+ first_entry && cs_needs_timeout(cs)) {
cs->tdr_active = true;
schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
@@ -620,6 +662,8 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
goto out;
+unlock_cs_mirror:
+ spin_unlock(&hdev->cs_mirror_lock);
unroll_cq_resv:
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index cbe9da4e0211..1f5910517b0e 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -14,6 +14,9 @@
#define HL_MMU_DEBUG 0
+/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
+#define DRAM_POOL_PAGE_SIZE SZ_8M
+
/*
* The va ranges in context object contain a list with the available chunks of
* device virtual memory.
@@ -38,15 +41,14 @@
*/
/*
- * alloc_device_memory - allocate device memory
- *
- * @ctx : current context
- * @args : host parameters containing the requested size
- * @ret_handle : result handle
+ * alloc_device_memory() - allocate device memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters containing the requested size.
+ * @ret_handle: result handle.
*
* This function does the following:
- * - Allocate the requested size rounded up to 'dram_page_size' pages
- * - Return unique handle
+ * - Allocate the requested size rounded up to 'dram_page_size' pages.
+ * - Return unique handle for later map/unmap/free.
*/
static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
u32 *ret_handle)
@@ -55,15 +57,14 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0, total_size, num_pgs, i;
- u32 num_curr_pgs, page_size, page_shift;
+ u32 num_curr_pgs, page_size;
int handle, rc;
bool contiguous;
num_curr_pgs = 0;
page_size = hdev->asic_prop.dram_page_size;
- page_shift = __ffs(page_size);
- num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
- total_size = num_pgs << page_shift;
+ num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
+ total_size = num_pgs * page_size;
if (!total_size) {
dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
@@ -182,17 +183,17 @@ pages_pack_err:
return rc;
}
-/*
- * dma_map_host_va - DMA mapping of the given host virtual address.
- * @hdev: habanalabs device structure
- * @addr: the host virtual address of the memory area
- * @size: the size of the memory area
- * @p_userptr: pointer to result userptr structure
+/**
+ * dma_map_host_va() - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure.
+ * @addr: the host virtual address of the memory area.
+ * @size: the size of the memory area.
+ * @p_userptr: pointer to result userptr structure.
*
* This function does the following:
- * - Allocate userptr structure
- * - Pin the given host memory using the userptr structure
- * - Perform DMA mapping to have the DMA addresses of the pages
+ * - Allocate userptr structure.
+ * - Pin the given host memory using the userptr structure.
+ * - Perform DMA mapping to have the DMA addresses of the pages.
*/
static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr **p_userptr)
@@ -236,14 +237,14 @@ userptr_err:
return rc;
}
-/*
- * dma_unmap_host_va - DMA unmapping of the given host virtual address.
- * @hdev: habanalabs device structure
- * @userptr: userptr to free
+/**
+ * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure.
+ * @userptr: userptr to free.
*
* This function does the following:
- * - Unpins the physical pages
- * - Frees the userptr structure
+ * - Unpins the physical pages.
+ * - Frees the userptr structure.
*/
static void dma_unmap_host_va(struct hl_device *hdev,
struct hl_userptr *userptr)
@@ -252,14 +253,13 @@ static void dma_unmap_host_va(struct hl_device *hdev,
kfree(userptr);
}
-/*
- * dram_pg_pool_do_release - free DRAM pages pool
- *
- * @ref : pointer to reference object
+/**
+ * dram_pg_pool_do_release() - free DRAM pages pool
+ * @ref: pointer to reference object.
*
* This function does the following:
- * - Frees the idr structure of physical pages handles
- * - Frees the generic pool of DRAM physical pages
+ * - Frees the idr structure of physical pages handles.
+ * - Frees the generic pool of DRAM physical pages.
*/
static void dram_pg_pool_do_release(struct kref *ref)
{
@@ -274,15 +274,15 @@ static void dram_pg_pool_do_release(struct kref *ref)
gen_pool_destroy(vm->dram_pg_pool);
}
-/*
- * free_phys_pg_pack - free physical page pack
- * @hdev: habanalabs device structure
- * @phys_pg_pack: physical page pack to free
+/**
+ * free_phys_pg_pack() - free physical page pack.
+ * @hdev: habanalabs device structure.
+ * @phys_pg_pack: physical page pack to free.
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
- * structure by returning it to the general pool
- * - Free the hl_vm_phys_pg_pack structure
+ * structure by returning it to the general pool.
+ * - Free the hl_vm_phys_pg_pack structure.
*/
static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
@@ -313,20 +313,20 @@ static void free_phys_pg_pack(struct hl_device *hdev,
kfree(phys_pg_pack);
}
-/*
- * free_device_memory - free device memory
- *
- * @ctx : current context
- * @handle : handle of the memory chunk to free
+/**
+ * free_device_memory() - free device memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters containing the requested size.
*
* This function does the following:
- * - Free the device memory related to the given handle
+ * - Free the device memory related to the given handle.
*/
-static int free_device_memory(struct hl_ctx *ctx, u32 handle)
+static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u32 handle = args->free.handle;
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
@@ -361,16 +361,15 @@ static int free_device_memory(struct hl_ctx *ctx, u32 handle)
return 0;
}
-/*
- * clear_va_list_locked - free virtual addresses list
- *
- * @hdev : habanalabs device structure
- * @va_list : list of virtual addresses to free
+/**
+ * clear_va_list_locked() - free virtual addresses list.
+ * @hdev: habanalabs device structure.
+ * @va_list: list of virtual addresses to free.
*
* This function does the following:
- * - Iterate over the list and free each virtual addresses block
+ * - Iterate over the list and free each virtual addresses block.
*
- * This function should be called only when va_list lock is taken
+ * This function should be called only when va_list lock is taken.
*/
static void clear_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
@@ -383,16 +382,15 @@ static void clear_va_list_locked(struct hl_device *hdev,
}
}
-/*
- * print_va_list_locked - print virtual addresses list
- *
- * @hdev : habanalabs device structure
- * @va_list : list of virtual addresses to print
+/**
+ * print_va_list_locked() - print virtual addresses list.
+ * @hdev: habanalabs device structure.
+ * @va_list: list of virtual addresses to print.
*
* This function does the following:
- * - Iterate over the list and print each virtual addresses block
+ * - Iterate over the list and print each virtual addresses block.
*
- * This function should be called only when va_list lock is taken
+ * This function should be called only when va_list lock is taken.
*/
static void print_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
@@ -409,18 +407,17 @@ static void print_va_list_locked(struct hl_device *hdev,
#endif
}
-/*
- * merge_va_blocks_locked - merge a virtual block if possible
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @va_block : virtual block to merge with adjacent blocks
+/**
+ * merge_va_blocks_locked() - merge a virtual block if possible.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @va_block: virtual block to merge with adjacent blocks.
*
* This function does the following:
* - Merge the given blocks with the adjacent blocks if their virtual ranges
- * create a contiguous virtual range
+ * create a contiguous virtual range.
*
- * This Function should be called only when va_list lock is taken
+ * This Function should be called only when va_list lock is taken.
*/
static void merge_va_blocks_locked(struct hl_device *hdev,
struct list_head *va_list, struct hl_vm_va_block *va_block)
@@ -445,19 +442,18 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
}
}
-/*
- * add_va_block_locked - add a virtual block to the virtual addresses list
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @start : start virtual address
- * @end : end virtual address
+/**
+ * add_va_block_locked() - add a virtual block to the virtual addresses list.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Add the given block to the virtual blocks list and merge with other
- * blocks if a contiguous virtual block can be created
+ * - Add the given block to the virtual blocks list and merge with other blocks
+ * if a contiguous virtual block can be created.
*
- * This Function should be called only when va_list lock is taken
+ * This Function should be called only when va_list lock is taken.
*/
static int add_va_block_locked(struct hl_device *hdev,
struct list_head *va_list, u64 start, u64 end)
@@ -501,16 +497,15 @@ static int add_va_block_locked(struct hl_device *hdev,
return 0;
}
-/*
- * add_va_block - wrapper for add_va_block_locked
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @start : start virtual address
- * @end : end virtual address
+/**
+ * add_va_block() - wrapper for add_va_block_locked.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Takes the list lock and calls add_va_block_locked
+ * - Takes the list lock and calls add_va_block_locked.
*/
static inline int add_va_block(struct hl_device *hdev,
struct hl_va_range *va_range, u64 start, u64 end)
@@ -524,8 +519,9 @@ static inline int add_va_block(struct hl_device *hdev,
return rc;
}
-/*
+/**
* get_va_block() - get a virtual block for the given size and alignment.
+ *
* @hdev: pointer to the habanalabs device structure.
* @va_range: pointer to the virtual addresses range.
* @size: requested block size.
@@ -534,33 +530,51 @@ static inline int add_va_block(struct hl_device *hdev,
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
- * given size and alignment.
+ * given size, hint address and alignment.
* - Reserve the requested block and update the list.
* - Return the start address of the virtual block.
*/
-static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
- u64 size, u64 hint_addr, u32 va_block_align)
+static u64 get_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range,
+ u64 size, u64 hint_addr, u32 va_block_align)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
- u64 valid_start, valid_size, prev_start, prev_end, align_mask,
- res_valid_start = 0, res_valid_size = 0;
+ u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
+ align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
bool add_prev = false;
+ bool is_align_pow_2 = is_power_of_2(va_range->page_size);
- align_mask = ~((u64)va_block_align - 1);
+ if (is_align_pow_2)
+ align_mask = ~((u64)va_block_align - 1);
+ else
+ /*
+ * with non-power-of-2 range we work only with page granularity
+ * and the start address is page aligned,
+ * so no need for alignment checking.
+ */
+ size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
+ va_range->page_size;
- /* check if hint_addr is aligned */
- if (hint_addr & (va_block_align - 1))
+ tmp_hint_addr = hint_addr;
+
+ /* Check if we need to ignore hint address */
+ if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
+ (!is_align_pow_2 &&
+ do_div(tmp_hint_addr, va_range->page_size))) {
+ dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n",
+ hint_addr);
hint_addr = 0;
+ }
mutex_lock(&va_range->lock);
print_va_list_locked(hdev, &va_range->list);
list_for_each_entry(va_block, &va_range->list, node) {
- /* calc the first possible aligned addr */
+ /* Calc the first possible aligned addr */
valid_start = va_block->start;
- if (valid_start & (va_block_align - 1)) {
+ if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
valid_start &= align_mask;
valid_start += va_block_align;
if (valid_start > va_block->end)
@@ -568,35 +582,41 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
}
valid_size = va_block->end - valid_start;
+ if (valid_size < size)
+ continue;
- if (valid_size >= size &&
- (!new_va_block || valid_size < res_valid_size)) {
+ /* Pick the minimal length block which has the required size */
+ if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
- res_valid_start = valid_start;
- res_valid_size = valid_size;
+ reserved_valid_start = valid_start;
+ reserved_valid_size = valid_size;
}
if (hint_addr && hint_addr >= valid_start &&
- ((hint_addr + size) <= va_block->end)) {
+ (hint_addr + size) <= va_block->end) {
new_va_block = va_block;
- res_valid_start = hint_addr;
- res_valid_size = valid_size;
+ reserved_valid_start = hint_addr;
+ reserved_valid_size = valid_size;
break;
}
}
if (!new_va_block) {
dev_err(hdev->dev, "no available va block for size %llu\n",
- size);
+ size);
goto out;
}
- if (res_valid_start > new_va_block->start) {
+ /*
+ * Check if there is some leftover range due to reserving the new
+ * va block, then return it to the main virtual addresses list.
+ */
+ if (reserved_valid_start > new_va_block->start) {
prev_start = new_va_block->start;
- prev_end = res_valid_start - 1;
+ prev_end = reserved_valid_start - 1;
- new_va_block->start = res_valid_start;
- new_va_block->size = res_valid_size;
+ new_va_block->start = reserved_valid_start;
+ new_va_block->size = reserved_valid_size;
add_prev = true;
}
@@ -617,7 +637,7 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
out:
mutex_unlock(&va_range->lock);
- return res_valid_start;
+ return reserved_valid_start;
}
/*
@@ -644,9 +664,9 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
/**
* hl_get_va_range_type() - get va_range type for the given address and size.
- * @address: The start address of the area we want to validate.
- * @size: The size in bytes of the area we want to validate.
- * @type: returned va_range type
+ * @address: the start address of the area we want to validate.
+ * @size: the size in bytes of the area we want to validate.
+ * @type: returned va_range type.
*
* Return: true if the area is inside a valid range, false otherwise.
*/
@@ -667,16 +687,15 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
return -EINVAL;
}
-/*
- * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block
- *
+/**
+ * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
* @hdev: pointer to the habanalabs device structure
- * @ctx: current context
- * @start: start virtual address
- * @end: end virtual address
+ * @ctx: pointer to the context structure.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Takes the list lock and calls add_va_block_locked
+ * - Takes the list lock and calls add_va_block_locked.
*/
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size)
@@ -701,11 +720,10 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
return rc;
}
-/*
- * get_sg_info - get number of pages and the DMA address from SG list
- *
- * @sg : the SG list
- * @dma_addr : pointer to DMA address to return
+/**
+ * get_sg_info() - get number of pages and the DMA address from SG list.
+ * @sg: the SG list.
+ * @dma_addr: pointer to DMA address to return.
*
* Calculate the number of consecutive pages described by the SG list. Take the
* offset of the address in the first page, add to it the length and round it up
@@ -719,17 +737,17 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
}
-/*
- * init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- * @ctx: current context
- * @userptr: userptr to initialize from
- * @pphys_pg_pack: result pointer
+/**
+ * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
+ * memory
+ * @ctx: pointer to the context structure.
+ * @userptr: userptr to initialize from.
+ * @pphys_pg_pack: result pointer.
*
* This function does the following:
- * - Pin the physical pages related to the given virtual block
+ * - Pin the physical pages related to the given virtual block.
* - Create a physical page pack from the physical pages related to the given
- * virtual block
+ * virtual block.
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
@@ -821,16 +839,16 @@ page_pack_arr_mem_err:
return rc;
}
-/*
- * map_phys_pg_pack - maps the physical page pack.
- * @ctx: current context
- * @vaddr: start address of the virtual area to map from
- * @phys_pg_pack: the pack of physical pages to map to
+/**
+ * map_phys_pg_pack() - maps the physical page pack..
+ * @ctx: pointer to the context structure.
+ * @vaddr: start address of the virtual area to map from.
+ * @phys_pg_pack: the pack of physical pages to map to.
*
* This function does the following:
- * - Maps each chunk of virtual memory to matching physical chunk
- * - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise
+ * - Maps each chunk of virtual memory to matching physical chunk.
+ * - Stores number of successful mappings in the given argument.
+ * - Returns 0 on success, error code otherwise.
*/
static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
@@ -875,19 +893,21 @@ err:
return rc;
}
-/*
- * unmap_phys_pg_pack - unmaps the physical page pack
- * @ctx: current context
- * @vaddr: start address of the virtual area to unmap
- * @phys_pg_pack: the pack of physical pages to unmap
+/**
+ * unmap_phys_pg_pack() - unmaps the physical page pack.
+ * @ctx: pointer to the context structure.
+ * @vaddr: start address of the virtual area to unmap.
+ * @phys_pg_pack: the pack of physical pages to unmap.
*/
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
+ bool is_host_addr;
u32 page_size;
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
@@ -900,14 +920,18 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, when unmapping host memory we pass through
+ * the Linux kernel to unpin the pages and that takes a long
+ * time. Therefore, sleep every 32K pages to avoid soft lockup
*/
- if (hdev->pldm)
- usleep_range(500, 1000);
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
}
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
- u64 *paddr)
+ u64 *paddr)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
@@ -930,19 +954,18 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
return 0;
}
-/*
- * map_device_va - map the given memory
- *
- * @ctx : current context
- * @args : host parameters with handle/host virtual address
- * @device_addr : pointer to result device virtual address
+/**
+ * map_device_va() - map the given memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters with handle/host virtual address.
+ * @device_addr: pointer to result device virtual address.
*
* This function does the following:
* - If given a physical device memory handle, map to a device virtual block
- * and return the start address of this block
+ * and return the start address of this block.
* - If given a host virtual address and size, find the related physical pages,
* map a device virtual block to this pages and return the start address of
- * this block
+ * this block.
*/
static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *device_addr)
@@ -1028,7 +1051,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
hint_addr = args->map_device.hint_addr;
- /* DRAM VA alignment is the same as the DRAM page size */
+ /* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
va_block_align = hdev->asic_prop.dmmu.page_size;
}
@@ -1119,24 +1142,26 @@ init_page_pack_err:
return rc;
}
-/*
- * unmap_device_va - unmap the given device virtual address
- *
- * @ctx : current context
- * @vaddr : device virtual address to unmap
- * @ctx_free : true if in context free flow, false otherwise.
+/**
+ * unmap_device_va() - unmap the given device virtual address.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters with device virtual address to unmap.
+ * @ctx_free: true if in context free flow, false otherwise.
*
* This function does the following:
- * - Unmap the physical pages related to the given virtual address
- * - return the device virtual block to the virtual block list
+ * - unmap the physical pages related to the given virtual address.
+ * - return the device virtual block to the virtual block list.
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
+static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
+ bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
struct hl_va_range *va_range;
+ u64 vaddr = args->unmap.device_virt_addr;
enum vm_type_t *vm_type;
bool is_userptr;
int rc = 0;
@@ -1195,7 +1220,13 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
goto mapping_cnt_err;
}
- vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
+ if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
+ vaddr = prop->dram_base_address +
+ DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
+ phys_pg_pack->page_size) *
+ phys_pg_pack->page_size;
+ else
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
@@ -1258,12 +1289,90 @@ vm_type_err:
return rc;
}
+static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
+ u32 *size)
+{
+ u32 block_id = 0;
+ int rc;
+
+ rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
+
+ *handle = block_id | HL_MMAP_TYPE_BLOCK;
+ *handle <<= PAGE_SHIFT;
+
+ return rc;
+}
+
+static void hw_block_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_ctx *ctx = (struct hl_ctx *) vma->vm_private_data;
+
+ hl_ctx_put(ctx);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct hw_block_vm_ops = {
+ .close = hw_block_vm_close
+};
+
+/**
+ * hl_hw_block_mmap() - mmap a hw block to user.
+ * @hpriv: pointer to the private data of the fd
+ * @vma: pointer to vm_area_struct of the process
+ *
+ * Driver increments context reference for every HW block mapped in order
+ * to prevent user from closing FD without unmapping first
+ */
+int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 block_id, block_size;
+ int rc;
+
+ /* We use the page offset to hold the block id and thus we need to clear
+ * it before doing the mmap itself
+ */
+ block_id = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+
+ /* Driver only allows mapping of a complete HW block */
+ block_size = vma->vm_end - vma->vm_start;
+
+#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (uintptr_t) vma->vm_start, block_size)) {
+#else
+ if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
+#endif
+ dev_err(hdev->dev,
+ "user pointer is invalid - 0x%lx\n",
+ vma->vm_start);
+
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &hw_block_vm_ops;
+ vma->vm_private_data = hpriv->ctx;
+
+ hl_ctx_get(hdev, hpriv->ctx);
+
+ rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
+ if (rc) {
+ hl_ctx_put(hpriv->ctx);
+ return rc;
+ }
+
+ vma->vm_pgoff = block_id;
+
+ return 0;
+}
+
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
- u64 device_addr = 0;
- u32 handle = 0;
+ u64 block_handle, device_addr = 0;
+ u32 handle = 0, block_size;
int rc;
switch (args->in.op) {
@@ -1286,7 +1395,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
break;
case HL_MEM_OP_FREE:
- rc = free_device_memory(ctx, args->in.free.handle);
+ rc = free_device_memory(ctx, &args->in);
break;
case HL_MEM_OP_MAP:
@@ -1295,7 +1404,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
rc = 0;
} else {
rc = get_paddr_from_handle(ctx, &args->in,
- &device_addr);
+ &device_addr);
}
memset(args, 0, sizeof(*args));
@@ -1306,6 +1415,13 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
rc = 0;
break;
+ case HL_MEM_OP_MAP_BLOCK:
+ rc = map_block(hdev, args->in.map_block.block_addr,
+ &block_handle, &block_size);
+ args->out.block_handle = block_handle;
+ args->out.block_size = block_size;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
@@ -1322,8 +1438,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
union hl_mem_args *args = data;
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
- u64 device_addr = 0;
- u32 handle = 0;
+ u64 block_handle, device_addr = 0;
+ u32 handle = 0, block_size;
int rc;
if (!hl_device_operational(hdev, &status)) {
@@ -1394,7 +1510,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
- rc = free_device_memory(ctx, args->in.free.handle);
+ rc = free_device_memory(ctx, &args->in);
break;
case HL_MEM_OP_MAP:
@@ -1405,8 +1521,14 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
- false);
+ rc = unmap_device_va(ctx, &args->in, false);
+ break;
+
+ case HL_MEM_OP_MAP_BLOCK:
+ rc = map_block(hdev, args->in.map_block.block_addr,
+ &block_handle, &block_size);
+ args->out.block_handle = block_handle;
+ args->out.block_size = block_size;
break;
default:
@@ -1430,58 +1552,53 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EFAULT;
}
- userptr->vec = frame_vector_create(npages);
- if (!userptr->vec) {
- dev_err(hdev->dev, "Failed to create frame vector\n");
+ userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
+ GFP_KERNEL);
+ if (!userptr->pages)
return -ENOMEM;
- }
- rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- userptr->vec);
+ rc = pin_user_pages_fast(start, npages,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ userptr->pages);
if (rc != npages) {
dev_err(hdev->dev,
"Failed to map host memory, user ptr probably wrong\n");
if (rc < 0)
- goto destroy_framevec;
- rc = -EFAULT;
- goto put_framevec;
- }
-
- if (frame_vector_to_pages(userptr->vec) < 0) {
- dev_err(hdev->dev,
- "Failed to translate frame vector to pages\n");
+ goto destroy_pages;
+ npages = rc;
rc = -EFAULT;
- goto put_framevec;
+ goto put_pages;
}
+ userptr->npages = npages;
rc = sg_alloc_table_from_pages(userptr->sgt,
- frame_vector_pages(userptr->vec),
- npages, offset, size, GFP_ATOMIC);
+ userptr->pages,
+ npages, offset, size, GFP_ATOMIC);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
- goto put_framevec;
+ goto put_pages;
}
return 0;
-put_framevec:
- put_vaddr_frames(userptr->vec);
-destroy_framevec:
- frame_vector_destroy(userptr->vec);
+put_pages:
+ unpin_user_pages(userptr->pages, npages);
+destroy_pages:
+ kvfree(userptr->pages);
return rc;
}
-/*
- * hl_pin_host_memory - pins a chunk of host memory.
- * @hdev: pointer to the habanalabs device structure
- * @addr: the host virtual address of the memory area
- * @size: the size of the memory area
- * @userptr: pointer to hl_userptr structure
+/**
+ * hl_pin_host_memory() - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure.
+ * @addr: the host virtual address of the memory area.
+ * @size: the size of the memory area.
+ * @userptr: pointer to hl_userptr structure.
*
* This function does the following:
- * - Pins the physical pages
- * - Create an SG list from those pages
+ * - Pins the physical pages.
+ * - Create an SG list from those pages.
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr)
@@ -1554,8 +1671,6 @@ free_sgt:
*/
void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
- struct page **pages;
-
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
@@ -1563,15 +1678,8 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
userptr->sgt->nents,
userptr->dir);
- pages = frame_vector_pages(userptr->vec);
- if (!IS_ERR(pages)) {
- int i;
-
- for (i = 0; i < frame_vector_count(userptr->vec); i++)
- set_page_dirty_lock(pages[i]);
- }
- put_vaddr_frames(userptr->vec);
- frame_vector_destroy(userptr->vec);
+ unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
+ kvfree(userptr->pages);
list_del(&userptr->job_node);
@@ -1579,11 +1687,10 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
kfree(userptr->sgt);
}
-/*
- * hl_userptr_delete_list - clear userptr list
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr_list : pointer to the list to clear
+/**
+ * hl_userptr_delete_list() - clear userptr list.
+ * @hdev: pointer to the habanalabs device structure.
+ * @userptr_list: pointer to the list to clear.
*
* This function does the following:
* - Iterates over the list and unpins the host memory and frees the userptr
@@ -1602,12 +1709,11 @@ void hl_userptr_delete_list(struct hl_device *hdev,
INIT_LIST_HEAD(userptr_list);
}
-/*
- * hl_userptr_is_pinned - returns whether the given userptr is pinned
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr_list : pointer to the list to clear
- * @userptr : pointer to userptr to check
+/**
+ * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
+ * @hdev: pointer to the habanalabs device structure.
+ * @userptr_list: pointer to the list to clear.
+ * @userptr: pointer to userptr to check.
*
* This function does the following:
* - Iterates over the list and checks if the given userptr is in it, means is
@@ -1625,12 +1731,12 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
return false;
}
-/*
- * va_range_init - initialize virtual addresses range
- * @hdev: pointer to the habanalabs device structure
- * @va_range: pointer to the range to initialize
- * @start: range start address
- * @end: range end address
+/**
+ * va_range_init() - initialize virtual addresses range.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_range: pointer to the range to initialize.
+ * @start: range start address.
+ * @end: range end address.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
@@ -1643,15 +1749,21 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
INIT_LIST_HEAD(&va_range->list);
- /* PAGE_SIZE alignment */
+ /*
+ * PAGE_SIZE alignment
+ * it is the callers responsibility to align the addresses if the
+ * page size is not a power of 2
+ */
- if (start & (PAGE_SIZE - 1)) {
- start &= PAGE_MASK;
- start += PAGE_SIZE;
- }
+ if (is_power_of_2(page_size)) {
+ if (start & (PAGE_SIZE - 1)) {
+ start &= PAGE_MASK;
+ start += PAGE_SIZE;
+ }
- if (end & (PAGE_SIZE - 1))
- end &= PAGE_MASK;
+ if (end & (PAGE_SIZE - 1))
+ end &= PAGE_MASK;
+ }
if (start >= end) {
dev_err(hdev->dev, "too small vm range for va list\n");
@@ -1672,13 +1784,13 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
return 0;
}
-/*
- * va_range_fini() - clear a virtual addresses range
- * @hdev: pointer to the habanalabs structure
- * va_range: pointer to virtual addresses range
+/**
+ * va_range_fini() - clear a virtual addresses range.
+ * @hdev: pointer to the habanalabs structure.
+ * va_range: pointer to virtual addresses rang.e
*
* This function does the following:
- * - Frees the virtual addresses block list and its lock
+ * - Frees the virtual addresses block list and its lock.
*/
static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
{
@@ -1690,22 +1802,22 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
kfree(va_range);
}
-/*
- * vm_ctx_init_with_ranges() - initialize virtual memory for context
- * @ctx: pointer to the habanalabs context structure
+/**
+ * vm_ctx_init_with_ranges() - initialize virtual memory for context.
+ * @ctx: pointer to the habanalabs context structure.
* @host_range_start: host virtual addresses range start.
* @host_range_end: host virtual addresses range end.
* @host_huge_range_start: host virtual addresses range start for memory
- * allocated with huge pages.
+ * allocated with huge pages.
* @host_huge_range_end: host virtual addresses range end for memory allocated
* with huge pages.
* @dram_range_start: dram virtual addresses range start.
* @dram_range_end: dram virtual addresses range end.
*
* This function initializes the following:
- * - MMU for context
- * - Virtual address to area descriptor hashtable
- * - Virtual block list of available virtual memory
+ * - MMU for context.
+ * - Virtual address to area descriptor hashtable.
+ * - Virtual block list of available virtual memory.
*/
static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
u64 host_range_start,
@@ -1826,7 +1938,8 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
dram_range_start = prop->dmmu.start_addr;
dram_range_end = prop->dmmu.end_addr;
- dram_page_size = prop->dmmu.page_size;
+ dram_page_size = prop->dram_page_size ?
+ prop->dram_page_size : prop->dmmu.page_size;
host_range_start = prop->pmmu.start_addr;
host_range_end = prop->pmmu.end_addr;
host_page_size = prop->pmmu.page_size;
@@ -1840,15 +1953,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
dram_range_start, dram_range_end, dram_page_size);
}
-/*
- * hl_vm_ctx_fini - virtual memory teardown of context
- *
- * @ctx : pointer to the habanalabs context structure
+/**
+ * hl_vm_ctx_fini() - virtual memory teardown of context.
+ * @ctx: pointer to the habanalabs context structure.
*
* This function perform teardown the following:
- * - Virtual block list of available virtual memory
- * - Virtual address to area descriptor hashtable
- * - MMU for context
+ * - Virtual block list of available virtual memory.
+ * - Virtual address to area descriptor hashtable.
+ * - MMU for context.
*
* In addition this function does the following:
* - Unmaps the existing hashtable nodes if the hashtable is not empty. The
@@ -1867,9 +1979,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
struct hl_vm_phys_pg_pack *phys_pg_list;
struct hl_vm_hash_node *hnode;
struct hlist_node *tmp_node;
+ struct hl_mem_in args;
int i;
- if (!ctx->hdev->mmu_enable)
+ if (!hdev->mmu_enable)
return;
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
@@ -1886,13 +1999,18 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr, true);
+ args.unmap.device_virt_addr = hnode->vaddr;
+ unmap_device_va(ctx, &args, true);
}
+ mutex_lock(&ctx->mmu_lock);
+
/* invalidate the cache once after the unmapping loop */
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+ mutex_unlock(&ctx->mmu_lock);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
@@ -1919,19 +2037,19 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* because the user notifies us on allocations. If the user is no more,
* all DRAM is available
*/
- if (!ctx->hdev->asic_prop.dram_supports_virtual_memory)
- atomic64_set(&ctx->hdev->dram_used_mem, 0);
+ if (ctx->asid != HL_KERNEL_ASID_ID &&
+ !hdev->asic_prop.dram_supports_virtual_memory)
+ atomic64_set(&hdev->dram_used_mem, 0);
}
-/*
- * hl_vm_init - initialize virtual memory module
- *
- * @hdev : pointer to the habanalabs device structure
+/**
+ * hl_vm_init() - initialize virtual memory module.
+ * @hdev: pointer to the habanalabs device structure.
*
* This function initializes the following:
- * - MMU module
- * - DRAM physical pages pool of 2MB
- * - Idr for device memory allocation handles
+ * - MMU module.
+ * - DRAM physical pages pool of 2MB.
+ * - Idr for device memory allocation handles.
*/
int hl_vm_init(struct hl_device *hdev)
{
@@ -1939,7 +2057,13 @@ int hl_vm_init(struct hl_device *hdev)
struct hl_vm *vm = &hdev->vm;
int rc;
- vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
+ if (is_power_of_2(prop->dram_page_size))
+ vm->dram_pg_pool =
+ gen_pool_create(__ffs(prop->dram_page_size), -1);
+ else
+ vm->dram_pg_pool =
+ gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
+
if (!vm->dram_pg_pool) {
dev_err(hdev->dev, "Failed to create dram page pool\n");
return -ENOMEM;
@@ -1972,15 +2096,14 @@ pool_add_err:
return rc;
}
-/*
- * hl_vm_fini - virtual memory module teardown
- *
- * @hdev : pointer to the habanalabs device structure
+/**
+ * hl_vm_fini() - virtual memory module teardown.
+ * @hdev: pointer to the habanalabs device structure.
*
* This function perform teardown to the following:
- * - Idr for device memory allocation handles
- * - DRAM physical pages pool of 2MB
- * - MMU module
+ * - Idr for device memory allocation handles.
+ * - DRAM physical pages pool of 2MB.
+ * - MMU module.
*/
void hl_vm_fini(struct hl_device *hdev)
{
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/misc/habanalabs/common/mmu/Makefile
new file mode 100644
index 000000000000..d852c3874658
--- /dev/null
+++ b/drivers/misc/habanalabs/common/mmu/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 33ae953d3a36..71703a32350f 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -7,9 +7,9 @@
#include <linux/slab.h>
-#include "habanalabs.h"
+#include "../habanalabs.h"
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
@@ -166,7 +166,6 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
mmu_prop = &prop->pmmu;
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
-
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and unmap them separately.
@@ -174,11 +173,21 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't unmap\n",
- page_size, mmu_prop->page_size >> 10);
+ /*
+ * MMU page size may differ from DRAM page size.
+ * In such case work with the DRAM page size and let the MMU
+ * scrambling routine to handle this mismatch when
+ * calculating the address to remove from the MMU page table
+ */
+ if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
+ real_page_size = prop->dram_page_size;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
- return -EFAULT;
+ return -EFAULT;
+ }
}
npages = page_size / real_page_size;
@@ -236,7 +245,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
@@ -253,6 +262,17 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
*/
if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
+ } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
+ (prop->dram_page_size < mmu_prop->page_size)) {
+ /*
+ * MMU page size may differ from DRAM page size.
+ * In such case work with the DRAM page size and let the MMU
+ * scrambling routine handle this mismatch when calculating
+ * the address to place in the MMU page table. (in that case
+ * also make sure that the dram_page_size smaller than the
+ * mmu page size)
+ */
+ real_page_size = prop->dram_page_size;
} else {
dev_err(hdev->dev,
"page size of %u is not %uKB aligned, can't map\n",
@@ -261,9 +281,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
return -EFAULT;
}
- WARN_ONCE((phys_addr & (real_page_size - 1)),
- "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
- phys_addr, real_page_size);
+ /*
+ * Verify that the phys and virt addresses are aligned with the
+ * MMU page size (in dram this means checking the address and MMU
+ * after scrambling)
+ */
+ if ((is_dram_addr &&
+ ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
+ (mmu_prop->page_size - 1)) ||
+ (hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
+ (mmu_prop->page_size - 1)))) ||
+ (!is_dram_addr && ((phys_addr & (real_page_size - 1)) ||
+ (virt_addr & (real_page_size - 1)))))
+ dev_crit(hdev->dev,
+ "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size",
+ phys_addr, virt_addr, real_page_size);
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
@@ -444,19 +476,53 @@ void hl_mmu_swap_in(struct hl_ctx *ctx)
hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
}
+static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
+ struct hl_mmu_hop_info *hops,
+ u64 *phys_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
+ u32 hop0_shift_off;
+ void *p;
+
+ /* last hop holds the phys address and flags */
+ if (hops->unscrambled_paddr)
+ tmp_phys_addr = hops->unscrambled_paddr;
+ else
+ tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
+
+ if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
+ p = &prop->pmmu_huge;
+ else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
+ p = &prop->pmmu;
+ else /* HL_VA_RANGE_TYPE_DRAM */
+ p = &prop->dmmu;
+
+ /*
+ * find the correct hop shift field in hl_mmu_properties structure
+ * in order to determine the right maks for the page offset.
+ */
+ hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
+ p = (char *)p + hop0_shift_off;
+ p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
+ hop_shift = *(u64 *)p;
+ offset_mask = (1ull << hop_shift) - 1;
+ addr_mask = ~(offset_mask);
+ *phys_addr = (tmp_phys_addr & addr_mask) |
+ (virt_addr & offset_mask);
+}
+
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
{
struct hl_mmu_hop_info hops;
- u64 tmp_addr;
int rc;
rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
if (rc)
return rc;
- /* last hop holds the phys address and flags */
- tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val;
- *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK);
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr);
return 0;
}
@@ -473,6 +539,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (!hdev->mmu_enable)
return -EOPNOTSUPP;
+ hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
+
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
@@ -491,6 +559,11 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
mutex_unlock(&ctx->mmu_lock);
+ /* add page offset to physical address */
+ if (hops->unscrambled_paddr)
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
+ &hops->unscrambled_paddr);
+
return rc;
}
@@ -512,3 +585,28 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
return 0;
}
+
+/**
+ * hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
+ * @hdev: pointer to device data.
+ * @addr: The address to scramble.
+ *
+ * Return: The scrambled address.
+ */
+u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
+{
+ return addr;
+}
+
+/**
+ * hl_mmu_descramble_addr() - The generic mmu address descrambling
+ * routine.
+ * @hdev: pointer to device data.
+ * @addr: The address to descramble.
+ *
+ * Return: The un-scrambled address.
+ */
+u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
+{
+ return addr;
+}
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index 2ce6ea89d4fa..c5e93ff32586 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -5,8 +5,8 @@
* All Rights Reserved.
*/
-#include "habanalabs.h"
-#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
#include <linux/slab.h>
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ }
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
/**
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/misc/habanalabs/common/pci/Makefile
index f3d599867619..dc922a686683 100644
--- a/drivers/net/ethernet/aurora/Makefile
+++ b/drivers/misc/habanalabs/common/pci/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AURORA_NB8800) += nb8800.o
+HL_COMMON_PCI_FILES := common/pci/pci.o
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index 923b2606e29f..b799f9258fb0 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -5,8 +5,8 @@
* All Rights Reserved.
*/
-#include "habanalabs.h"
-#include "../include/hw_ip/pci/pci_general.h"
+#include "../habanalabs.h"
+#include "../../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,48 +298,16 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
/**
- * hl_pci_set_dma_mask() - Set DMA masks for the device.
- * @hdev: Pointer to hl_device structure.
- *
- * This function sets the DMA masks (regular and consistent) for a specified
- * value. If it doesn't succeed, it tries to set it to a fall-back value
- *
- * Return: 0 on success, non-zero for failure.
- */
-static int hl_pci_set_dma_mask(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- int rc;
-
- /* set DMA mask */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
- return rc;
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci consistent dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
- return rc;
- }
-
- return 0;
-}
-
-/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
*
@@ -371,9 +343,14 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
- rc = hl_pci_set_dma_mask(hdev);
- if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(hdev->dma_mask));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to set dma mask to %d bits, error %d\n",
+ hdev->dma_mask, rc);
goto unmap_pci_bars;
+ }
return 0;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..9152242778f5 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -238,6 +225,12 @@ gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
"MSG AXI LBW returned with error"
};
+enum gaudi_sm_sei_cause {
+ GAUDI_SM_SEI_SO_OVERFLOW,
+ GAUDI_SM_SEI_LBW_4B_UNALIGNED,
+ GAUDI_SM_SEI_AXI_RESPONSE_ERR
+};
+
static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
@@ -367,6 +360,10 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
struct hl_cs_job *job);
static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
+static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
+ u32 num_regs, u32 val);
+static int gaudi_schedule_register_memset(struct hl_device *hdev,
+ u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -374,7 +371,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -530,6 +527,11 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->sync_stream_first_mon +
(num_sync_stream_queues * HL_RSVD_MONS);
+ prop->first_available_user_msix_interrupt = USHRT_MAX;
+
+ for (i = 0 ; i < HL_MAX_DCORES ; i++)
+ prop->first_available_cq[i] = USHRT_MAX;
+
/* disable fw security for now, set it in a later stage */
prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
@@ -667,12 +669,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +681,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +705,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
-
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +853,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -953,11 +928,17 @@ static void gaudi_sob_group_hw_reset(struct kref *ref)
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- int i;
+ u64 base_addr;
+ int rc;
- for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- (hw_sob_group->base_sob_id + i) * 4, 0);
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob_group->base_sob_id * 4;
+ rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
+ base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
+ if (rc)
+ dev_err(hdev->dev,
+ "failed resetting sob group - sob base %u, count %u",
+ hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
kref_init(&hw_sob_group->kref);
}
@@ -1048,6 +1029,8 @@ static void gaudi_collective_master_init_job(struct hl_device *hdev,
cprop->hw_sob_group[sob_group_offset].base_sob_id;
master_monitor = prop->collective_mstr_mon_id[0];
+ cprop->hw_sob_group[sob_group_offset].queue_id = queue_id;
+
dev_dbg(hdev->dev,
"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
master_sob_base, cprop->mstr_sob_mask[0],
@@ -1110,7 +1093,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -1288,7 +1271,7 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
u32 queue_id, collective_queue, num_jobs;
u32 stream, nic_queue, nic_idx = 0;
bool skip;
- int i, rc;
+ int i, rc = 0;
/* Verify wait queue id is configured as master */
hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id];
@@ -1399,8 +1382,6 @@ static int gaudi_late_init(struct hl_device *hdev)
return rc;
}
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
-
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
@@ -1647,6 +1628,7 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
+ hdev->supports_staged_submission = true;
return 0;
@@ -2449,8 +2431,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3442,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3477,6 +3460,12 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
+ /* GC sends work to DMA engine through Upper CP in DMA5 so
+ * we need to not enable clock gating in that DMA
+ */
+ if (i == GAUDI_HBM_DMA_4)
+ enable = 0;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
enable ? QMAN_CGM1_PWR_GATE_EN : 0);
@@ -3513,7 +3502,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3743,6 +3732,7 @@ static int gaudi_init_cpu(struct hl_device *hdev)
static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_eq *eq;
u32 status;
struct hl_hw_queue *cpu_pq =
@@ -3799,6 +3789,10 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
+ /* update FW application security bits */
+ if (prop->fw_security_status_valid)
+ prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+
gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
@@ -3806,7 +3800,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3841,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3886,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3928,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3975,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -4027,7 +4035,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -4441,9 +4450,12 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
- if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ)
+ if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) {
+ /* make sure device CPU will read latest data from host */
+ mb();
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GAUDI_EVENT_PI_UPDATE);
+ }
}
static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
@@ -4542,7 +4554,6 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 idle_mask = 0;
int rc = 0;
u64 val = 0;
@@ -4555,8 +4566,8 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
hdev,
mmDMA0_CORE_STS0/* dummy */,
val/* dummy */,
- (hdev->asic_funcs->is_device_idle(hdev,
- &idle_mask, NULL)),
+ (hdev->asic_funcs->is_device_idle(hdev, NULL,
+ 0, NULL)),
1000,
HBM_SCRUBBING_TIMEOUT_US);
if (rc) {
@@ -5084,7 +5095,8 @@ static int gaudi_validate_cb(struct hl_device *hdev,
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI-X interrupt
*/
- parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+ if (parser->completion)
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
return rc;
}
@@ -5311,8 +5323,11 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI interrupt
*/
- parser->patched_cb_size = parser->user_cb_size +
- sizeof(struct packet_msg_prot) * 2;
+ if (parser->completion)
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+ else
+ parser->patched_cb_size = parser->user_cb_size;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
@@ -5328,10 +5343,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -5400,10 +5415,10 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -5603,31 +5618,206 @@ release_cb:
return rc;
}
-static void gaudi_restore_sm_registers(struct hl_device *hdev)
+static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
+ u32 num_regs, u32 val)
+{
+ struct packet_msg_long *pkt;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int i, rc;
+
+ cb_size = (sizeof(*pkt) * num_regs) + sizeof(struct packet_msg_prot);
+
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
+ return -ENOMEM;
+ }
+
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
+ if (!cb)
+ return -EFAULT;
+
+ pkt = cb->kernel_address;
+
+ ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+
+ for (i = 0; i < num_regs ; i++, pkt++) {
+ pkt->ctl = cpu_to_le32(ctl);
+ pkt->value = cpu_to_le32(val);
+ pkt->addr = cpu_to_le64(reg_base + (i * 4));
+ }
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ atomic_inc(&job->user_cb->cs_cnt);
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = cb_size;
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ atomic_dec(&cb->cs_cnt);
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int gaudi_schedule_register_memset(struct hl_device *hdev,
+ u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
{
+ struct hl_ctx *ctx = hdev->compute_ctx;
+ struct hl_pending_cb *pending_cb;
+ struct packet_msg_long *pkt;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
int i;
- for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) {
- WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
- WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
- WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ /* If no compute context available or context is going down
+ * memset registers directly
+ */
+ if (!ctx || kref_read(&ctx->refcount) == 0)
+ return gaudi_memset_registers(hdev, reg_base, num_regs, val);
+
+ cb_size = (sizeof(*pkt) * num_regs) +
+ sizeof(struct packet_msg_prot) * 2;
+
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
+ return -ENOMEM;
+ }
+
+ pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
+ if (!pending_cb)
+ return -ENOMEM;
+
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
+ if (!cb) {
+ kfree(pending_cb);
+ return -EFAULT;
+ }
+
+ pkt = cb->kernel_address;
+
+ ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+
+ for (i = 0; i < num_regs ; i++, pkt++) {
+ pkt->ctl = cpu_to_le32(ctl);
+ pkt->value = cpu_to_le32(val);
+ pkt->addr = cpu_to_le64(reg_base + (i * 4));
+ }
+
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ pending_cb->cb = cb;
+ pending_cb->cb_size = cb_size;
+ /* The queue ID MUST be an external queue ID. Otherwise, we will
+ * have undefined behavior
+ */
+ pending_cb->hw_queue_id = hw_queue_id;
+
+ spin_lock(&ctx->pending_cb_lock);
+ list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
+
+ return 0;
+}
+
+static int gaudi_restore_sm_registers(struct hl_device *hdev)
+{
+ u64 base_addr;
+ u32 num_regs;
+ int rc;
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
}
- for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) {
- WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
- WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
- WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
}
- i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4;
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
- for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4);
+ num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
- i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4;
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 +
+ (GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4);
+ num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
- for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ return 0;
}
static void gaudi_restore_dma_registers(struct hl_device *hdev)
@@ -5684,18 +5874,23 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev)
}
}
-static void gaudi_restore_user_registers(struct hl_device *hdev)
+static int gaudi_restore_user_registers(struct hl_device *hdev)
{
- gaudi_restore_sm_registers(hdev);
+ int rc;
+
+ rc = gaudi_restore_sm_registers(hdev);
+ if (rc)
+ return rc;
+
gaudi_restore_dma_registers(hdev);
gaudi_restore_qm_registers(hdev);
+
+ return 0;
}
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
- gaudi_restore_user_registers(hdev);
-
- return 0;
+ return gaudi_restore_user_registers(hdev);
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
@@ -5754,8 +5949,6 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -5801,8 +5994,6 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -5852,8 +6043,6 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -5902,8 +6091,6 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -5948,7 +6135,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
return;
if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
- WARN(1, "asid %u is too big\n", asid);
+ dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
@@ -6251,7 +6438,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
dev_err_ratelimited(hdev->dev,
"Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
@@ -6682,6 +6869,34 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
}
+static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_sm_sei_data *sei_data)
+{
+ u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
+
+ switch (sei_data->sei_cause) {
+ case SM_SEI_SO_OVERFLOW:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: SO %u overflow/underflow",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ case SM_SEI_LBW_4B_UNALIGNED:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ case SM_SEI_AXI_RESPONSE_ERR:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: AXI ID %u response error",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ default:
+ dev_err(hdev->dev, "Unknown SM SEI cause %u",
+ le32_to_cpu(sei_data->sei_log));
+ break;
+ }
+}
+
static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
struct hl_eq_ecc_data *ecc_data)
{
@@ -6898,7 +7113,9 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
int err = 0;
- if (!hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_status_valid &&
+ (hdev->asic_prop.fw_app_security_map &
+ CPU_BOOT_DEV_STS0_HBM_ECC_EN)) {
if (!hbm_ecc_data) {
dev_err(hdev->dev, "No FW ECC data");
return 0;
@@ -6920,14 +7137,24 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
dev_err(hdev->dev,
- "HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
- device, ch, type, wr_par, rd_par, ca_par, serr, derr);
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch, wr_par, rd_par, ca_par, serr, derr);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%u, SEC_CNT=%d, DEC_CNT=%d\n",
+ device, ch, hbm_ecc_data->first_addr, type,
+ hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt,
+ hbm_ecc_data->dec_cnt);
err = 1;
return 0;
}
+ if (!hdev->asic_prop.fw_security_disabled) {
+ dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n");
+ return 0;
+ }
+
base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
@@ -7177,6 +7404,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
+ hl_fw_unmask_irq(hdev, event_type);
break;
case GAUDI_EVENT_TPC0_DEC:
@@ -7305,6 +7533,13 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
hl_fw_unmask_irq(hdev, event_type);
break;
+ case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_print_sm_sei_info(hdev, event_type,
+ &eq_entry->sm_sei_data);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
gaudi_print_clk_change_info(hdev, event_type);
hl_fw_unmask_irq(hdev, event_type);
@@ -7354,8 +7589,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_PS, 3);
WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
@@ -7371,8 +7604,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
WREG32(mmSTLB_INV_SET, 0);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -7395,8 +7626,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
hdev->hard_reset_pending)
return 0;
- mutex_lock(&hdev->mmu_cache_lock);
-
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
@@ -7424,8 +7653,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -7487,7 +7714,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
+ rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -7507,13 +7734,14 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
- struct seq_file *s)
+static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s)
{
struct gaudi_device *gaudi = hdev->asic_specific;
const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
const char *nic_fmt = "%-5d%-9s%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
bool is_idle = true, is_eng_idle, is_slave;
u64 offset;
@@ -7539,9 +7767,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_DMA_0 + dma_id);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
if (s)
seq_printf(s, fmt, dma_id,
is_eng_idle ? "Y" : "N", qm_glbl_sts0,
@@ -7562,9 +7789,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_TPC_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
if (s)
seq_printf(s, fmt, i,
is_eng_idle ? "Y" : "N",
@@ -7591,9 +7817,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_MME_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
if (s) {
if (!is_slave)
seq_printf(s, fmt, i,
@@ -7619,9 +7844,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_NIC_0 + port);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (s)
seq_printf(s, nic_fmt, port,
is_eng_idle ? "Y" : "N",
@@ -7635,9 +7859,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_NIC_0 + port);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (s)
seq_printf(s, nic_fmt, port,
is_eng_idle ? "Y" : "N",
@@ -7900,18 +8123,16 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
+ if (ctx->asid == HL_KERNEL_ASID_ID)
+ return 0;
+
gaudi_mmu_prepare(ctx->hdev, ctx->asid);
return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
{
- struct hl_device *hdev = ctx->hdev;
-
- /* Gaudi will NEVER support more then a single compute context.
- * Therefore, don't clear anything unless it is the compute context
- */
- if (hdev->compute_ctx != ctx)
+ if (ctx->asid == HL_KERNEL_ASID_ID)
return;
gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
@@ -7936,7 +8157,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7952,10 +8173,10 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, eb);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -7972,10 +8193,10 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -8021,10 +8242,10 @@ static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -8042,10 +8263,10 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
- ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
@@ -8241,12 +8462,16 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+ int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4,
- 0);
+ rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
+ CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 1, 0);
+ if (rc)
+ dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
kref_init(&hw_sob->kref);
}
@@ -8270,6 +8495,24 @@ static u64 gaudi_get_device_time(struct hl_device *hdev)
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
+static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id)
+{
+ return -EPERM;
+}
+
+static int gaudi_block_mmap(struct hl_device *hdev,
+ struct vm_area_struct *vma,
+ u32 block_id, u32 block_size)
+{
+ return -EPERM;
+}
+
+static void gaudi_enable_events_from_fw(struct hl_device *hdev)
+{
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8346,7 +8589,13 @@ static const struct hl_asic_funcs gaudi_funcs = {
.set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
.get_device_time = gaudi_get_device_time,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
- .collective_wait_create_jobs = gaudi_collective_wait_create_jobs
+ .collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
+ .scramble_addr = hl_mmu_scramble_addr,
+ .descramble_addr = hl_mmu_descramble_addr,
+ .ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
+ .get_hw_block_id = gaudi_get_hw_block_id,
+ .hw_block_mmap = gaudi_block_mmap,
+ .enable_events_from_fw = gaudi_enable_events_from_fw
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..50bb4ad570fd 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
@@ -258,11 +251,13 @@ enum gaudi_nic_mask {
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB group. group will reset once refcount is zero.
* @base_sob_id: base sob id of this SOB group.
+ * @queue_id: id of the queue that waits on this sob group
*/
struct gaudi_hw_sob_group {
struct hl_device *hdev;
struct kref kref;
u32 base_sob_id;
+ u32 queue_id;
};
#define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS)
@@ -340,6 +335,7 @@ struct gaudi_device {
};
void gaudi_init_security(struct hl_device *hdev);
+void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..6e56fa1c6c69 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -633,9 +634,21 @@ static int gaudi_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- /* Workaround for H3 #HW-2075 bug: use small data chunks */
- WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) |
- PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ if (hdev->asic_prop.fw_security_disabled) {
+ /* make ETR not privileged */
+ val = FIELD_PREP(
+ PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
+ /* make ETR non-secured (inverted logic) */
+ val |= FIELD_PREP(
+ PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
+ /*
+ * Workaround for H3 #HW-2075 bug: use small data
+ * chunks
+ */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK,
+ is_host ? 0 : 7);
+ WREG32(mmPSOC_ETR_AXICTL, val);
+ }
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
@@ -874,7 +887,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index e10181692d0b..7085f45814ae 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -13052,3 +13052,8 @@ void gaudi_init_security(struct hl_device *hdev)
gaudi_init_protection_bits(hdev);
}
+
+void gaudi_ack_protection_bits_errors(struct hl_device *hdev)
+{
+
+}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..ed566c52ccaa 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -455,6 +455,11 @@ int goya_get_fixed_properties(struct hl_device *hdev)
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
+ prop->first_available_user_msix_interrupt = USHRT_MAX;
+
+ for (i = 0 ; i < HL_MAX_DCORES ; i++)
+ prop->first_available_cq[i] = USHRT_MAX;
+
/* disable fw security for now, set it in a later stage */
prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
@@ -613,12 +618,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +630,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +699,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -777,9 +797,6 @@ int goya_late_init(struct hl_device *hdev)
return rc;
}
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
-
return 0;
}
@@ -1171,6 +1188,7 @@ static int goya_stop_external_queues(struct hl_device *hdev)
int goya_init_cpu_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_eq *eq;
u32 status;
struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
@@ -1223,6 +1241,10 @@ int goya_init_cpu_queues(struct hl_device *hdev)
return -EIO;
}
+ /* update FW application security bits */
+ if (prop->fw_security_status_valid)
+ prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+
goya->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
@@ -2704,7 +2726,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -2788,9 +2811,12 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
- if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
+ if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
+ /* make sure device CPU will read latest data from host */
+ mb();
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+ }
}
void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
@@ -2898,7 +2924,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
dev_err_ratelimited(hdev->dev,
"Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
@@ -3860,10 +3886,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -3932,10 +3958,10 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -4106,9 +4132,6 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
} else {
rc = -EFAULT;
}
@@ -4162,9 +4185,6 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
} else {
rc = -EFAULT;
}
@@ -4207,9 +4227,6 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
} else {
rc = -EFAULT;
}
@@ -4250,9 +4267,6 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
} else {
rc = -EFAULT;
}
@@ -4861,8 +4875,6 @@ int goya_context_switch(struct hl_device *hdev, u32 asid)
WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
- goya_mmu_prepare(hdev, asid);
-
goya_clear_sm_regs(hdev);
return 0;
@@ -5028,7 +5040,7 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
return;
if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
- WARN(1, "asid %u is too big\n", asid);
+ dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
@@ -5057,8 +5069,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_ALL_START, 1);
@@ -5070,8 +5080,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -5101,8 +5109,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/*
* TODO: currently invalidate entire L0 & L1 as in regular hard
* invalidation. Need to apply invalidation of specific cache lines with
@@ -5125,8 +5131,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -5156,7 +5160,7 @@ int goya_cpucp_info_get(struct hl_device *hdev)
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
+ rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -5191,11 +5195,12 @@ static void goya_disable_clock_gating(struct hl_device *hdev)
/* clock gating not supported in Goya */
}
-static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
- struct seq_file *s)
+static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
mme_arch_sts;
bool is_idle = true, is_eng_idle;
@@ -5215,9 +5220,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GOYA_ENGINE_ID_DMA_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
if (s)
seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
@@ -5239,9 +5243,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GOYA_ENGINE_ID_TPC_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
if (s)
seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
@@ -5260,8 +5263,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_MME_0, mask);
if (s) {
seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
@@ -5305,6 +5308,9 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
static int goya_ctx_init(struct hl_ctx *ctx)
{
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ goya_mmu_prepare(ctx->hdev, ctx->asid);
+
return 0;
}
@@ -5324,7 +5330,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
@@ -5383,6 +5389,24 @@ static void goya_ctx_fini(struct hl_ctx *ctx)
}
+static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id)
+{
+ return -EPERM;
+}
+
+static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u32 block_id, u32 block_size)
+{
+ return -EPERM;
+}
+
+static void goya_enable_events_from_fw(struct hl_device *hdev)
+{
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5459,7 +5483,13 @@ static const struct hl_asic_funcs goya_funcs = {
.set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
.get_device_time = goya_get_device_time,
.collective_wait_init_cs = goya_collective_wait_init_cs,
- .collective_wait_create_jobs = goya_collective_wait_create_jobs
+ .collective_wait_create_jobs = goya_collective_wait_create_jobs,
+ .scramble_addr = hl_mmu_scramble_addr,
+ .descramble_addr = hl_mmu_descramble_addr,
+ .ack_protection_bits_errors = goya_ack_protection_bits_errors,
+ .get_hw_block_id = goya_get_hw_block_id,
+ .hw_block_mmap = goya_block_mmap,
+ .enable_events_from_fw = goya_enable_events_from_fw
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 8b3408211af6..23fe099ed218 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -173,6 +173,7 @@ void goya_init_mme_qmans(struct hl_device *hdev);
void goya_init_tpc_qmans(struct hl_device *hdev);
int goya_init_cpu_queues(struct hl_device *hdev);
void goya_init_security(struct hl_device *hdev);
+void goya_ack_protection_bits_errors(struct hl_device *hdev);
int goya_late_init(struct hl_device *hdev);
void goya_late_fini(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 6fa03933b438..6b7445cca580 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -434,8 +434,15 @@ static int goya_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- WREG32(mmPSOC_ETR_AXICTL,
- 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ if (hdev->asic_prop.fw_security_disabled) {
+ /* make ETR not privileged */
+ val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
+ /* make ETR non-secured (inverted logic) */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
+ /* burst size 8 */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 7);
+ WREG32(mmPSOC_ETR_AXICTL, val);
+ }
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index 14701836f92b..14c3bae3ccdc 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -3120,3 +3120,8 @@ void goya_init_security(struct hl_device *hdev)
goya_init_protection_bits(hdev);
}
+
+void goya_ack_protection_bits_errors(struct hl_device *hdev)
+{
+
+}
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 00bd9b392f93..b77c1c16c32c 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -58,11 +58,25 @@ struct hl_eq_ecc_data {
__u8 pad[7];
};
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
struct hl_eq_ecc_data ecc_data;
struct hl_eq_hbm_ecc_data hbm_ecc_data;
+ struct hl_eq_sm_sei_data sm_sei_data;
__le64 data[7];
};
};
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..e87f5a98e193 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -70,6 +70,9 @@
* checksum. Trying to program image again
* might solve this.
*
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -88,6 +91,7 @@
#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9)
#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10)
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
/*
@@ -145,11 +149,27 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +191,10 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +228,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
index 9ccba8437ec9..49335e8334b4 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -212,6 +212,10 @@ enum gaudi_async_event_id {
GAUDI_EVENT_NIC_SEI_2 = 266,
GAUDI_EVENT_NIC_SEI_3 = 267,
GAUDI_EVENT_NIC_SEI_4 = 268,
+ GAUDI_EVENT_DMA_IF_SEI_0 = 277,
+ GAUDI_EVENT_DMA_IF_SEI_1 = 278,
+ GAUDI_EVENT_DMA_IF_SEI_2 = 279,
+ GAUDI_EVENT_DMA_IF_SEI_3 = 280,
GAUDI_EVENT_PCIE_FLR = 290,
GAUDI_EVENT_TPC0_BMON_SPMU = 300,
GAUDI_EVENT_TPC0_KRN_ERR = 301,
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index b9b90d079e23..b53aeda9a982 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -388,7 +388,10 @@ enum axi_id {
#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6)
#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6)
-#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2
+#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00
/* STLB_CACHE_INV */
#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index f30f2c0458d7..6e097ace2e96 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -78,6 +78,9 @@ struct packet_wreg_bulk {
__le64 values[0]; /* data starts here */
};
+#define GAUDI_PKT_LONG_CTL_OP_SHIFT 20
+#define GAUDI_PKT_LONG_CTL_OP_MASK 0x00300000
+
struct packet_msg_long {
__le32 value;
__le32 ctl;
@@ -111,18 +114,6 @@ struct packet_msg_long {
#define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22
#define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000
-#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24
-#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000
-
-#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29
-#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000
-
-#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30
-#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000
-
-#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31
-#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000
-
struct packet_msg_short {
__le32 value;
__le32 ctl;
@@ -146,18 +137,6 @@ struct packet_msg_prot {
#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
-#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24
-#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000
-
-#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29
-#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000
-
-#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30
-#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000
-
-#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31
-#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000
-
struct packet_fence {
__le32 cfg;
__le32 ctl;
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 067489bd048e..9ff3cb245580 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -259,6 +259,9 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
-#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2
+#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 6b888d04392d..aa12097668d3 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -16,6 +16,7 @@ lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 2907db260fba..935acc6bbf3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -44,7 +44,8 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
bus = cl->dev;
mutex_lock(&bus->device_lock);
- if (bus->dev_state != MEI_DEV_ENABLED) {
+ if (bus->dev_state != MEI_DEV_ENABLED &&
+ bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
@@ -60,6 +61,13 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
goto out;
}
+ if (vtag) {
+ /* Check if vtag is supported by client */
+ rets = mei_cl_vt_support_check(cl);
+ if (rets)
+ goto out;
+ }
+
if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
@@ -128,7 +136,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
bus = cl->dev;
mutex_lock(&bus->device_lock);
- if (bus->dev_state != MEI_DEV_ENABLED) {
+ if (bus->dev_state != MEI_DEV_ENABLED &&
+ bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
@@ -878,22 +887,17 @@ static int mei_cl_device_probe(struct device *dev)
static int mei_cl_device_remove(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- struct mei_cl_driver *cldrv;
- int ret = 0;
+ struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
- if (!cldev || !dev->driver)
- return 0;
-
- cldrv = to_mei_cl_driver(dev->driver);
if (cldrv->remove)
- ret = cldrv->remove(cldev);
+ cldrv->remove(cldev);
mei_cldev_unregister_callbacks(cldev);
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
- return ret;
+ return 0;
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index a56d41321f32..4378a9b25848 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
#include <linux/mei.h>
@@ -990,7 +991,8 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0;
}
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
+ dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl);
return 0;
@@ -1737,7 +1739,7 @@ static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
*
* @cb: message callback structure
*
- * Return: a pointer to initialized header
+ * Return: a pointer to initialized header or ERR_PTR on failure
*/
static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
@@ -2113,6 +2115,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_DISCONNECT:
case MEI_FOP_NOTIFY_STOP:
case MEI_FOP_NOTIFY_START:
+ case MEI_FOP_DMA_MAP:
+ case MEI_FOP_DMA_UNMAP:
if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
@@ -2139,3 +2143,286 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
+
+static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
+{
+ struct mei_cl *cl;
+
+ list_for_each_entry(cl, &dev->file_list, link)
+ if (cl->dma.buffer_id == buffer_id)
+ return cl;
+ return NULL;
+}
+
+/**
+ * mei_cl_irq_dma_map - send client dma map request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_dma_map_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
+ list_move_tail(&cb->list, cmpl_list);
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ return 0;
+}
+
+/**
+ * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_dma_unmap_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
+ list_move_tail(&cb->list, cmpl_list);
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ return 0;
+}
+
+static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
+{
+ cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
+ &cl->dma.daddr, GFP_KERNEL);
+ if (!cl->dma.vaddr)
+ return -ENOMEM;
+
+ cl->dma.buffer_id = buf_id;
+ cl->dma.size = size;
+
+ return 0;
+}
+
+static void mei_cl_dma_free(struct mei_cl *cl)
+{
+ cl->dma.buffer_id = 0;
+ dmam_free_coherent(cl->dev->dev,
+ cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
+ cl->dma.size = 0;
+ cl->dma.vaddr = NULL;
+ cl->dma.daddr = 0;
+}
+
+/**
+ * mei_cl_alloc_and_map - send client dma map request
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ * @buffer_id: id of the mapped buffer
+ * @size: size of the buffer
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return:
+ * * -ENODEV
+ * * -EINVAL
+ * * -EOPNOTSUPP
+ * * -EPROTO
+ * * -ENOMEM;
+ */
+int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
+ u8 buffer_id, size_t size)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!dev->hbm_f_cd_supported) {
+ cl_dbg(dev, cl, "client dma is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (buffer_id == 0)
+ return -EINVAL;
+
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
+ if (cl->dma_mapped)
+ return -EPROTO;
+
+ if (mei_cl_dma_map_find(dev, buffer_id)) {
+ cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
+ cl->dma.buffer_id);
+ return -EPROTO;
+ }
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ rets = mei_cl_dma_alloc(cl, buffer_id, size);
+ if (rets) {
+ pm_runtime_put_noidle(dev->dev);
+ return rets;
+ }
+
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ if (mei_hbuf_acquire(dev)) {
+ if (mei_hbm_cl_dma_map_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait,
+ cl->dma_mapped ||
+ cl->status ||
+ !mei_cl_is_connected(cl),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+
+ if (!cl->dma_mapped && !cl->status)
+ cl->status = -EFAULT;
+
+ rets = cl->status;
+
+out:
+ if (rets)
+ mei_cl_dma_free(cl);
+
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_unmap_and_free - send client dma unmap request
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!dev->hbm_f_cd_supported) {
+ cl_dbg(dev, cl, "client dma is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
+ if (!cl->dma_mapped)
+ return -EPROTO;
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ if (mei_hbuf_acquire(dev)) {
+ if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait,
+ !cl->dma_mapped ||
+ cl->status ||
+ !mei_cl_is_connected(cl),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+
+ if (cl->dma_mapped && !cl->status)
+ cl->status = -EFAULT;
+
+ rets = cl->status;
+
+ if (!rets)
+ mei_cl_dma_free(cl);
+out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ mei_io_cb_free(cb);
+ return rets;
+}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9e08a9843bba..b12cdcde9436 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -265,6 +265,14 @@ void mei_cl_notify(struct mei_cl *cl);
void mei_cl_all_disconnect(struct mei_device *dev);
+int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list);
+int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list);
+int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
+ u8 buffer_id, size_t size);
+int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp);
+
#define MEI_CL_FMT "cl:host=%02d me=%02d "
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 3ab1a431d810..1ce61e9e24fc 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -106,6 +106,7 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported);
seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported);
seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported);
+ seq_printf(m, "\tCD: %01d\n", dev->hbm_f_cd_supported);
}
seq_printf(m, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 686e8b6a4c55..d0277c7fed10 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -339,7 +339,9 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD;
if (dev->hbm_f_vt_supported)
- req.capability_requested[0] = HBM_CAP_VT;
+ req.capability_requested[0] |= HBM_CAP_VT;
+ if (dev->hbm_f_cd_supported)
+ req.capability_requested[0] |= HBM_CAP_CD;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
@@ -593,6 +595,117 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
}
/**
+ * mei_hbm_cl_dma_map_req - send client dma map request
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_client_dma_map_request req;
+ int ret;
+
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+
+ memset(&req, 0, sizeof(req));
+
+ req.hbm_cmd = MEI_HBM_CLIENT_DMA_MAP_REQ_CMD;
+ req.client_buffer_id = cl->dma.buffer_id;
+ req.address_lsb = lower_32_bits(cl->dma.daddr);
+ req.address_msb = upper_32_bits(cl->dma.daddr);
+ req.size = cl->dma.size;
+
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
+ if (ret)
+ dev_err(dev->dev, "dma map request failed: ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_hbm_cl_dma_unmap_req - send client dma unmap request
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_client_dma_unmap_request req;
+ int ret;
+
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+
+ memset(&req, 0, sizeof(req));
+
+ req.hbm_cmd = MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD;
+ req.client_buffer_id = cl->dma.buffer_id;
+
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
+ if (ret)
+ dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret);
+
+ return ret;
+}
+
+static void mei_hbm_cl_dma_map_res(struct mei_device *dev,
+ struct hbm_client_dma_response *res)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb, *next;
+
+ cl = NULL;
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
+ if (cb->fop_type != MEI_FOP_DMA_MAP)
+ continue;
+ if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped)
+ continue;
+
+ cl = cb->cl;
+ break;
+ }
+ if (!cl)
+ return;
+
+ dev_dbg(dev->dev, "cl dma map result = %d\n", res->status);
+ cl->status = res->status;
+ if (!cl->status)
+ cl->dma_mapped = 1;
+ wake_up(&cl->wait);
+}
+
+static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev,
+ struct hbm_client_dma_response *res)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb, *next;
+
+ cl = NULL;
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
+ if (cb->fop_type != MEI_FOP_DMA_UNMAP)
+ continue;
+ if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped)
+ continue;
+
+ cl = cb->cl;
+ break;
+ }
+ if (!cl)
+ return;
+
+ dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status);
+ cl->status = res->status;
+ if (!cl->status)
+ cl->dma_mapped = 0;
+ wake_up(&cl->wait);
+}
+
+/**
* mei_hbm_prop_req - request property for a single client
*
* @dev: the device structure
@@ -1085,6 +1198,13 @@ static void mei_hbm_config_features(struct mei_device *dev)
(dev->version.major_version == HBM_MAJOR_VERSION_CAP &&
dev->version.minor_version >= HBM_MINOR_VERSION_CAP))
dev->hbm_f_cap_supported = 1;
+
+ /* Client DMA Support */
+ dev->hbm_f_cd_supported = 0;
+ if (dev->version.major_version > HBM_MAJOR_VERSION_CD ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_CD &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_CD))
+ dev->hbm_f_cd_supported = 1;
}
/**
@@ -1124,6 +1244,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
struct mei_hbm_cl_cmd *cl_cmd;
struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *fctrl;
+ struct hbm_client_dma_response *client_dma_res;
/* read the message to our buffer */
BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
@@ -1177,6 +1298,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1215,7 +1340,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_CAP_SETUP) {
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_CAP_SETUP) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1224,6 +1354,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
capability_res = (struct hbm_capability_response *)mei_msg;
if (!(capability_res->capability_granted[0] & HBM_CAP_VT))
dev->hbm_f_vt_supported = 0;
+ if (!(capability_res->capability_granted[0] & HBM_CAP_CD))
+ dev->hbm_f_cd_supported = 0;
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
@@ -1247,7 +1379,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_DR_SETUP) {
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_DR_SETUP) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1311,6 +1448,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1349,6 +1490,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1373,7 +1518,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
return -EPROTO;
}
- dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
dev_info(dev->dev, "hbm: stop response: resetting.\n");
/* force the reset */
return -EPROTO;
@@ -1426,6 +1571,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_hbm_cl_notify(dev, cl_cmd);
break;
+ case MEI_HBM_CLIENT_DMA_MAP_RES_CMD:
+ dev_dbg(dev->dev, "hbm: client dma map response: message received.\n");
+ client_dma_res = (struct hbm_client_dma_response *)mei_msg;
+ mei_hbm_cl_dma_map_res(dev, client_dma_res);
+ break;
+
+ case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD:
+ dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n");
+ client_dma_res = (struct hbm_client_dma_response *)mei_msg;
+ mei_hbm_cl_dma_unmap_res(dev, client_dma_res);
+ break;
+
default:
WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd);
return -EPROTO;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 4d95e38e4ddf..cd5b08ca34b6 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -10,6 +10,7 @@
struct mei_device;
struct mei_msg_hdr;
struct mei_cl;
+struct mei_dma_data;
/**
* enum mei_hbm_state - host bus message protocol state
@@ -51,6 +52,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
void mei_hbm_pg_resume(struct mei_device *dev);
int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 request);
-
+int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl);
#endif /* _MEI_HBM_H_ */
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 9ae9669e46ea..ec2a4fce8581 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -569,8 +569,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
- verify_mprime_in->header.buffer_len =
- WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
+ verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
@@ -845,16 +844,19 @@ enable_err_exit:
return ret;
}
-static int mei_hdcp_remove(struct mei_cl_device *cldev)
+static void mei_hdcp_remove(struct mei_cl_device *cldev)
{
struct i915_hdcp_comp_master *comp_master =
mei_cldev_get_drvdata(cldev);
+ int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
kfree(comp_master);
mei_cldev_set_drvdata(cldev, NULL);
- return mei_cldev_disable(cldev);
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
}
#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9cf8d8f60cfe..14be76d4c2e6 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -101,6 +101,11 @@
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+#define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */
+
+#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
+#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index df2fb9520dd8..b10606550613 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -88,6 +88,12 @@
#define HBM_MINOR_VERSION_CAP 2
#define HBM_MAJOR_VERSION_CAP 2
+/*
+ * MEI version with client DMA support
+ */
+#define HBM_MINOR_VERSION_CD 2
+#define HBM_MAJOR_VERSION_CD 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -136,6 +142,12 @@
#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13
#define MEI_HBM_CAPABILITIES_RES_CMD 0x93
+#define MEI_HBM_CLIENT_DMA_MAP_REQ_CMD 0x14
+#define MEI_HBM_CLIENT_DMA_MAP_RES_CMD 0x94
+
+#define MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD 0x15
+#define MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD 0x95
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -648,6 +660,8 @@ struct hbm_dma_ring_ctrl {
/* virtual tag supported */
#define HBM_CAP_VT BIT(0)
+/* client dma supported */
+#define HBM_CAP_CD BIT(2)
/**
* struct hbm_capability_request - capability request from host to fw
@@ -671,4 +685,51 @@ struct hbm_capability_response {
u8 capability_granted[3];
} __packed;
+/**
+ * struct hbm_client_dma_map_request - client dma map request from host to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @client_buffer_id: client buffer id
+ * @reserved: reserved
+ * @address_lsb: DMA address LSB
+ * @address_msb: DMA address MSB
+ * @size: DMA size
+ */
+struct hbm_client_dma_map_request {
+ u8 hbm_cmd;
+ u8 client_buffer_id;
+ u8 reserved[2];
+ u32 address_lsb;
+ u32 address_msb;
+ u32 size;
+} __packed;
+
+/**
+ * struct hbm_client_dma_unmap_request
+ * client dma unmap request from the host to the firmware
+ *
+ * @hbm_cmd: bus message command header
+ * @status: unmap status
+ * @client_buffer_id: client buffer id
+ * @reserved: reserved
+ */
+struct hbm_client_dma_unmap_request {
+ u8 hbm_cmd;
+ u8 status;
+ u8 client_buffer_id;
+ u8 reserved;
+} __packed;
+
+/**
+ * struct hbm_client_dma_response
+ * client dma unmap response from the firmware to the host
+ *
+ * @hbm_cmd: bus message command header
+ * @status: command status
+ */
+struct hbm_client_dma_response {
+ u8 hbm_cmd;
+ u8 status;
+} __packed;
+
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index bcee77768b91..5c8cb679b997 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -303,9 +303,12 @@ void mei_stop(struct mei_device *dev)
dev_dbg(dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
- mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ mei_set_devstate(dev, MEI_DEV_POWERING_DOWN);
mutex_unlock(&dev->device_lock);
mei_cl_bus_remove_devices(dev);
+ mutex_lock(&dev->device_lock);
+ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ mutex_unlock(&dev->device_lock);
mei_cancel_work(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 326955b04fda..a98f6b895af7 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -295,12 +295,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
static inline int hdr_is_valid(u32 msg_hdr)
{
struct mei_msg_hdr *mei_hdr;
+ u32 expected_len = 0;
mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
if (!msg_hdr || mei_hdr->reserved)
return -EBADMSG;
- if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE)
+ if (mei_hdr->dma_ring)
+ expected_len += MEI_SLOT_SIZE;
+ if (mei_hdr->extended)
+ expected_len += MEI_SLOT_SIZE;
+ if (mei_hdr->length < expected_len)
return -EBADMSG;
return 0;
@@ -324,6 +329,8 @@ int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl *cl;
int ret;
u32 ext_meta_hdr_u32;
+ u32 hdr_size_left;
+ u32 hdr_size_ext;
int i;
int ext_hdr_end;
@@ -353,6 +360,7 @@ int mei_irq_read_handler(struct mei_device *dev,
}
ext_hdr_end = 1;
+ hdr_size_left = mei_hdr->length;
if (mei_hdr->extended) {
if (!dev->rd_msg_hdr[1]) {
@@ -363,8 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_dbg(dev->dev, "extended header is %08x\n",
ext_meta_hdr_u32);
}
- meta_hdr = ((struct mei_ext_meta_hdr *)
- dev->rd_msg_hdr + 1);
+ meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
+ if (check_add_overflow((u32)sizeof(*meta_hdr),
+ mei_slots2data(meta_hdr->size),
+ &hdr_size_ext)) {
+ dev_err(dev->dev, "extended message size too big %d\n",
+ meta_hdr->size);
+ return -EBADMSG;
+ }
+ if (hdr_size_left < hdr_size_ext) {
+ dev_err(dev->dev, "corrupted message header len %d\n",
+ mei_hdr->length);
+ return -EBADMSG;
+ }
+ hdr_size_left -= hdr_size_ext;
+
ext_hdr_end = meta_hdr->size + 2;
for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
dev->rd_msg_hdr[i] = mei_read_hdr(dev);
@@ -376,6 +397,12 @@ int mei_irq_read_handler(struct mei_device *dev,
}
if (mei_hdr->dma_ring) {
+ if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
+ dev_err(dev->dev, "corrupted message header len %d\n",
+ mei_hdr->length);
+ return -EBADMSG;
+ }
+
dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
@@ -520,6 +547,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
if (ret)
return ret;
break;
+ case MEI_FOP_DMA_MAP:
+ ret = mei_cl_irq_dma_map(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
+ case MEI_FOP_DMA_UNMAP:
+ ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
default:
BUG();
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 9f6682033ed7..28937b6e7e0c 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1026,7 +1026,7 @@ static ssize_t tx_queue_limit_show(struct device *device,
size = dev->tx_queue_limit;
mutex_unlock(&dev->device_lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t tx_queue_limit_store(struct device *device,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 8c395bfdf6f3..b7b6ef344e80 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -57,6 +57,7 @@ enum mei_dev_state {
MEI_DEV_ENABLED,
MEI_DEV_RESETTING,
MEI_DEV_DISABLED,
+ MEI_DEV_POWERING_DOWN,
MEI_DEV_POWER_DOWN,
MEI_DEV_POWER_UP
};
@@ -78,6 +79,8 @@ enum mei_file_transaction_states {
* @MEI_FOP_DISCONNECT_RSP: disconnect response
* @MEI_FOP_NOTIFY_START: start notification
* @MEI_FOP_NOTIFY_STOP: stop notification
+ * @MEI_FOP_DMA_MAP: request client dma map
+ * @MEI_FOP_DMA_UNMAP: request client dma unmap
*/
enum mei_cb_file_ops {
MEI_FOP_READ = 0,
@@ -87,6 +90,8 @@ enum mei_cb_file_ops {
MEI_FOP_DISCONNECT_RSP,
MEI_FOP_NOTIFY_START,
MEI_FOP_NOTIFY_STOP,
+ MEI_FOP_DMA_MAP,
+ MEI_FOP_DMA_UNMAP,
};
/**
@@ -112,6 +117,13 @@ struct mei_msg_data {
unsigned char *data;
};
+struct mei_dma_data {
+ u8 buffer_id;
+ void *vaddr;
+ dma_addr_t daddr;
+ size_t size;
+};
+
/**
* struct mei_dma_dscr - dma address descriptor
*
@@ -235,6 +247,8 @@ struct mei_cl_vtag {
* @rd_pending: pending read credits
* @rd_completed_lock: protects rd_completed queue
* @rd_completed: completed read
+ * @dma: dma settings
+ * @dma_mapped: dma buffer is currently mapped.
*
* @cldev: device on the mei client bus
*/
@@ -262,6 +276,8 @@ struct mei_cl {
struct list_head rd_pending;
spinlock_t rd_completed_lock; /* protects rd_completed queue */
struct list_head rd_completed;
+ struct mei_dma_data dma;
+ u8 dma_mapped;
struct mei_cl_device *cldev;
};
@@ -450,6 +466,7 @@ struct mei_fw_version {
* @hbm_f_dr_supported : hbm feature dma ring supported
* @hbm_f_vt_supported : hbm feature vtag supported
* @hbm_f_cap_supported : hbm feature capabilities message supported
+ * @hbm_f_cd_supported : hbm feature client dma supported
*
* @fw_ver : FW versions
*
@@ -537,6 +554,7 @@ struct mei_device {
unsigned int hbm_f_dr_supported:1;
unsigned int hbm_f_vt_supported:1;
unsigned int hbm_f_cap_supported:1;
+ unsigned int hbm_f_cd_supported:1;
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1de9ef7a272b..a7e179626b63 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -107,6 +107,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 4d1b44de1492..e70525eedaae 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -15,7 +15,7 @@
static dev_t ocxl_dev;
static struct class *ocxl_class;
-static struct mutex minors_idr_lock;
+static DEFINE_MUTEX(minors_idr_lock);
static struct idr minors_idr;
static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
@@ -588,7 +588,6 @@ int ocxl_file_init(void)
{
int rc;
- mutex_init(&minors_idr_lock);
idr_init(&minors_idr);
rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl");
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index eff481ce08ee..1b2868ca4f2a 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -68,7 +68,6 @@
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
-#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_LS1088A 0x80c0
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
deleted file mode 100644
index 7236ae527b19..000000000000
--- a/drivers/misc/pti.c
+++ /dev/null
@@ -1,978 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * pti.c - PTI driver for cJTAG data extration
- *
- * Copyright (C) Intel 2010
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/pci.h>
-#include <linux/mutex.h>
-#include <linux/miscdevice.h>
-#include <linux/intel-pti.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#define DRIVERNAME "pti"
-#define PCINAME "pciPTI"
-#define TTYNAME "ttyPTI"
-#define CHARNAME "pti"
-#define PTITTY_MINOR_START 0
-#define PTITTY_MINOR_NUM 2
-#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */
-#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */
-#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */
-#define MODEM_BASE_ID 71 /* modem master ID address */
-#define CONTROL_ID 72 /* control master ID address */
-#define CONSOLE_ID 73 /* console master ID address */
-#define OS_BASE_ID 74 /* base OS master ID address */
-#define APP_BASE_ID 80 /* base App master ID address */
-#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */
-#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */
-#define APERTURE_14 0x3800000 /* offset to first OS write addr */
-#define APERTURE_LEN 0x400000 /* address length */
-
-struct pti_tty {
- struct pti_masterchannel *mc;
-};
-
-struct pti_dev {
- struct tty_port port[PTITTY_MINOR_NUM];
- unsigned long pti_addr;
- unsigned long aperture_base;
- void __iomem *pti_ioaddr;
- u8 ia_app[MAX_APP_IDS];
- u8 ia_os[MAX_OS_IDS];
- u8 ia_modem[MAX_MODEM_IDS];
-};
-
-/*
- * This protects access to ia_app, ia_os, and ia_modem,
- * which keeps track of channels allocated in
- * an aperture write id.
- */
-static DEFINE_MUTEX(alloclock);
-
-static const struct pci_device_id pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
- {0}
-};
-
-static struct tty_driver *pti_tty_driver;
-static struct pti_dev *drv_data;
-
-static unsigned int pti_console_channel;
-static unsigned int pti_control_channel;
-
-/**
- * pti_write_to_aperture()- The private write function to PTI HW.
- *
- * @mc: The 'aperture'. It's part of a write address that holds
- * a master and channel ID.
- * @buf: Data being written to the HW that will ultimately be seen
- * in a debugging tool (Fido, Lauterbach).
- * @len: Size of buffer.
- *
- * Since each aperture is specified by a unique
- * master/channel ID, no two processes will be writing
- * to the same aperture at the same time so no lock is required. The
- * PTI-Output agent will send these out in the order that they arrived, and
- * thus, it will intermix these messages. The debug tool can then later
- * regroup the appropriate message segments together reconstituting each
- * message.
- */
-static void pti_write_to_aperture(struct pti_masterchannel *mc,
- u8 *buf,
- int len)
-{
- int dwordcnt;
- int final;
- int i;
- u32 ptiword;
- u32 __iomem *aperture;
- u8 *p = buf;
-
- /*
- * calculate the aperture offset from the base using the master and
- * channel id's.
- */
- aperture = drv_data->pti_ioaddr + (mc->master << 15)
- + (mc->channel << 8);
-
- dwordcnt = len >> 2;
- final = len - (dwordcnt << 2); /* final = trailing bytes */
- if (final == 0 && dwordcnt != 0) { /* always need a final dword */
- final += 4;
- dwordcnt--;
- }
-
- for (i = 0; i < dwordcnt; i++) {
- ptiword = be32_to_cpu(*(u32 *)p);
- p += 4;
- iowrite32(ptiword, aperture);
- }
-
- aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */
-
- ptiword = 0;
- for (i = 0; i < final; i++)
- ptiword |= *p++ << (24-(8*i));
-
- iowrite32(ptiword, aperture);
- return;
-}
-
-/**
- * pti_control_frame_built_and_sent()- control frame build and send function.
- *
- * @mc: The master / channel structure on which the function
- * built a control frame.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * To be able to post process the PTI contents on host side, a control frame
- * is added before sending any PTI content. So the host side knows on
- * each PTI frame the name of the thread using a dedicated master / channel.
- * The thread name is retrieved from 'current' global variable if 'thread_name'
- * is 'NULL', else it is retrieved from 'thread_name' parameter.
- * This function builds this frame and sends it to a master ID CONTROL_ID.
- * The overhead is only 32 bytes since the driver only writes to HW
- * in 32 byte chunks.
- */
-static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
- const char *thread_name)
-{
- /*
- * Since we access the comm member in current's task_struct, we only
- * need to be as large as what 'comm' in that structure is.
- */
- char comm[TASK_COMM_LEN];
- struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
- .channel = 0};
- const char *thread_name_p;
- const char *control_format = "%3d %3d %s";
- u8 control_frame[CONTROL_FRAME_LEN];
-
- if (!thread_name) {
- if (!in_interrupt())
- get_task_comm(comm, current);
- else
- strncpy(comm, "Interrupt", TASK_COMM_LEN);
-
- /* Absolutely ensure our buffer is zero terminated. */
- comm[TASK_COMM_LEN-1] = 0;
- thread_name_p = comm;
- } else {
- thread_name_p = thread_name;
- }
-
- mccontrol.channel = pti_control_channel;
- pti_control_channel = (pti_control_channel + 1) & 0x7f;
-
- snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master,
- mc->channel, thread_name_p);
- pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame));
-}
-
-/**
- * pti_write_full_frame_to_aperture()- high level function to
- * write to PTI.
- *
- * @mc: The 'aperture'. It's part of a write address that holds
- * a master and channel ID.
- * @buf: Data being written to the HW that will ultimately be seen
- * in a debugging tool (Fido, Lauterbach).
- * @len: Size of buffer.
- *
- * All threads sending data (either console, user space application, ...)
- * are calling the high level function to write to PTI meaning that it is
- * possible to add a control frame before sending the content.
- */
-static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc,
- const unsigned char *buf,
- int len)
-{
- pti_control_frame_built_and_sent(mc, NULL);
- pti_write_to_aperture(mc, (u8 *)buf, len);
-}
-
-/**
- * get_id()- Allocate a master and channel ID.
- *
- * @id_array: an array of bits representing what channel
- * id's are allocated for writing.
- * @max_ids: The max amount of available write IDs to use.
- * @base_id: The starting SW channel ID, based on the Intel
- * PTI arch.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * Returns:
- * pti_masterchannel struct with master, channel ID address
- * 0 for error
- *
- * Each bit in the arrays ia_app and ia_os correspond to a master and
- * channel id. The bit is one if the id is taken and 0 if free. For
- * every master there are 128 channel id's.
- */
-static struct pti_masterchannel *get_id(u8 *id_array,
- int max_ids,
- int base_id,
- const char *thread_name)
-{
- struct pti_masterchannel *mc;
- int i, j, mask;
-
- mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL);
- if (mc == NULL)
- return NULL;
-
- /* look for a byte with a free bit */
- for (i = 0; i < max_ids; i++)
- if (id_array[i] != 0xff)
- break;
- if (i == max_ids) {
- kfree(mc);
- return NULL;
- }
- /* find the bit in the 128 possible channel opportunities */
- mask = 0x80;
- for (j = 0; j < 8; j++) {
- if ((id_array[i] & mask) == 0)
- break;
- mask >>= 1;
- }
-
- /* grab it */
- id_array[i] |= mask;
- mc->master = base_id;
- mc->channel = ((i & 0xf)<<3) + j;
- /* write new master Id / channel Id allocation to channel control */
- pti_control_frame_built_and_sent(mc, thread_name);
- return mc;
-}
-
-/*
- * The following three functions:
- * pti_request_mastercahannel(), mipi_release_masterchannel()
- * and pti_writedata() are an API for other kernel drivers to
- * access PTI.
- */
-
-/**
- * pti_request_masterchannel()- Kernel API function used to allocate
- * a master, channel ID address
- * to write to PTI HW.
- *
- * @type: 0- request Application master, channel aperture ID
- * write address.
- * 1- request OS master, channel aperture ID write
- * address.
- * 2- request Modem master, channel aperture ID
- * write address.
- * Other values, error.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * Returns:
- * pti_masterchannel struct
- * 0 for error
- */
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name)
-{
- struct pti_masterchannel *mc;
-
- mutex_lock(&alloclock);
-
- switch (type) {
-
- case 0:
- mc = get_id(drv_data->ia_app, MAX_APP_IDS,
- APP_BASE_ID, thread_name);
- break;
-
- case 1:
- mc = get_id(drv_data->ia_os, MAX_OS_IDS,
- OS_BASE_ID, thread_name);
- break;
-
- case 2:
- mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS,
- MODEM_BASE_ID, thread_name);
- break;
- default:
- mc = NULL;
- }
-
- mutex_unlock(&alloclock);
- return mc;
-}
-EXPORT_SYMBOL_GPL(pti_request_masterchannel);
-
-/**
- * pti_release_masterchannel()- Kernel API function used to release
- * a master, channel ID address
- * used to write to PTI HW.
- *
- * @mc: master, channel apeture ID address to be released. This
- * will de-allocate the structure via kfree().
- */
-void pti_release_masterchannel(struct pti_masterchannel *mc)
-{
- u8 master, channel, i;
-
- mutex_lock(&alloclock);
-
- if (mc) {
- master = mc->master;
- channel = mc->channel;
-
- if (master == APP_BASE_ID) {
- i = channel >> 3;
- drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7));
- } else if (master == OS_BASE_ID) {
- i = channel >> 3;
- drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7));
- } else {
- i = channel >> 3;
- drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7));
- }
-
- kfree(mc);
- }
-
- mutex_unlock(&alloclock);
-}
-EXPORT_SYMBOL_GPL(pti_release_masterchannel);
-
-/**
- * pti_writedata()- Kernel API function used to write trace
- * debugging data to PTI HW.
- *
- * @mc: Master, channel aperture ID address to write to.
- * Null value will return with no write occurring.
- * @buf: Trace debuging data to write to the PTI HW.
- * Null value will return with no write occurring.
- * @count: Size of buf. Value of 0 or a negative number will
- * return with no write occuring.
- */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count)
-{
- /*
- * since this function is exported, this is treated like an
- * API function, thus, all parameters should
- * be checked for validity.
- */
- if ((mc != NULL) && (buf != NULL) && (count > 0))
- pti_write_to_aperture(mc, buf, count);
- return;
-}
-EXPORT_SYMBOL_GPL(pti_writedata);
-
-/*
- * for the tty_driver_*() basic function descriptions, see tty_driver.h.
- * Specific header comments made for PTI-related specifics.
- */
-
-/**
- * pti_tty_driver_open()- Open an Application master, channel aperture
- * ID to the PTI device via tty device.
- *
- * @tty: tty interface.
- * @filp: filp interface pased to tty_port_open() call.
- *
- * Returns:
- * int, 0 for success
- * otherwise, fail value
- *
- * The main purpose of using the tty device interface is for
- * each tty port to have a unique PTI write aperture. In an
- * example use case, ttyPTI0 gets syslogd and an APP aperture
- * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route
- * modem messages into PTI. Modem trace data does not have to
- * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct
- * master IDs. These messages go through the PTI HW and out of
- * the handheld platform and to the Fido/Lauterbach device.
- */
-static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp)
-{
- /*
- * we actually want to allocate a new channel per open, per
- * system arch. HW gives more than plenty channels for a single
- * system task to have its own channel to write trace data. This
- * also removes a locking requirement for the actual write
- * procedure.
- */
- return tty_port_open(tty->port, tty, filp);
-}
-
-/**
- * pti_tty_driver_close()- close tty device and release Application
- * master, channel aperture ID to the PTI device via tty device.
- *
- * @tty: tty interface.
- * @filp: filp interface pased to tty_port_close() call.
- *
- * The main purpose of using the tty device interface is to route
- * syslog daemon messages to the PTI HW and out of the handheld platform
- * and to the Fido/Lauterbach device.
- */
-static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp)
-{
- tty_port_close(tty->port, tty, filp);
-}
-
-/**
- * pti_tty_install()- Used to set up specific master-channels
- * to tty ports for organizational purposes when
- * tracing viewed from debuging tools.
- *
- * @driver: tty driver information.
- * @tty: tty struct containing pti information.
- *
- * Returns:
- * 0 for success
- * otherwise, error
- */
-static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- int idx = tty->index;
- struct pti_tty *pti_tty_data;
- int ret = tty_standard_install(driver, tty);
-
- if (ret == 0) {
- pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL);
- if (pti_tty_data == NULL)
- return -ENOMEM;
-
- if (idx == PTITTY_MINOR_START)
- pti_tty_data->mc = pti_request_masterchannel(0, NULL);
- else
- pti_tty_data->mc = pti_request_masterchannel(2, NULL);
-
- if (pti_tty_data->mc == NULL) {
- kfree(pti_tty_data);
- return -ENXIO;
- }
- tty->driver_data = pti_tty_data;
- }
-
- return ret;
-}
-
-/**
- * pti_tty_cleanup()- Used to de-allocate master-channel resources
- * tied to tty's of this driver.
- *
- * @tty: tty struct containing pti information.
- */
-static void pti_tty_cleanup(struct tty_struct *tty)
-{
- struct pti_tty *pti_tty_data = tty->driver_data;
- if (pti_tty_data == NULL)
- return;
- pti_release_masterchannel(pti_tty_data->mc);
- kfree(pti_tty_data);
- tty->driver_data = NULL;
-}
-
-/**
- * pti_tty_driver_write()- Write trace debugging data through the char
- * interface to the PTI HW. Part of the misc device implementation.
- *
- * @tty: tty struct containing pti information.
- * @buf: trace data to be written.
- * @len: # of byte to write.
- *
- * Returns:
- * int, # of bytes written
- * otherwise, error
- */
-static int pti_tty_driver_write(struct tty_struct *tty,
- const unsigned char *buf, int len)
-{
- struct pti_tty *pti_tty_data = tty->driver_data;
- if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) {
- pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len);
- return len;
- }
- /*
- * we can't write to the pti hardware if the private driver_data
- * and the mc address is not there.
- */
- else
- return -EFAULT;
-}
-
-/**
- * pti_tty_write_room()- Always returns 2048.
- *
- * @tty: contains tty info of the pti driver.
- */
-static int pti_tty_write_room(struct tty_struct *tty)
-{
- return 2048;
-}
-
-/**
- * pti_char_open()- Open an Application master, channel aperture
- * ID to the PTI device. Part of the misc device implementation.
- *
- * @inode: not used.
- * @filp: Output- will have a masterchannel struct set containing
- * the allocated application PTI aperture write address.
- *
- * Returns:
- * int, 0 for success
- * otherwise, a fail value
- */
-static int pti_char_open(struct inode *inode, struct file *filp)
-{
- struct pti_masterchannel *mc;
-
- /*
- * We really do want to fail immediately if
- * pti_request_masterchannel() fails,
- * before assigning the value to filp->private_data.
- * Slightly easier to debug if this driver needs debugging.
- */
- mc = pti_request_masterchannel(0, NULL);
- if (mc == NULL)
- return -ENOMEM;
- filp->private_data = mc;
- return 0;
-}
-
-/**
- * pti_char_release()- Close a char channel to the PTI device. Part
- * of the misc device implementation.
- *
- * @inode: Not used in this implementaiton.
- * @filp: Contains private_data that contains the master, channel
- * ID to be released by the PTI device.
- *
- * Returns:
- * always 0
- */
-static int pti_char_release(struct inode *inode, struct file *filp)
-{
- pti_release_masterchannel(filp->private_data);
- filp->private_data = NULL;
- return 0;
-}
-
-/**
- * pti_char_write()- Write trace debugging data through the char
- * interface to the PTI HW. Part of the misc device implementation.
- *
- * @filp: Contains private data which is used to obtain
- * master, channel write ID.
- * @data: trace data to be written.
- * @len: # of byte to write.
- * @ppose: Not used in this function implementation.
- *
- * Returns:
- * int, # of bytes written
- * otherwise, error value
- *
- * Notes: From side discussions with Alan Cox and experimenting
- * with PTI debug HW like Nokia's Fido box and Lauterbach
- * devices, 8192 byte write buffer used by USER_COPY_SIZE was
- * deemed an appropriate size for this type of usage with
- * debugging HW.
- */
-static ssize_t pti_char_write(struct file *filp, const char __user *data,
- size_t len, loff_t *ppose)
-{
- struct pti_masterchannel *mc;
- void *kbuf;
- const char __user *tmp;
- size_t size = USER_COPY_SIZE;
- size_t n = 0;
-
- tmp = data;
- mc = filp->private_data;
-
- kbuf = kmalloc(size, GFP_KERNEL);
- if (kbuf == NULL) {
- pr_err("%s(%d): buf allocation failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- do {
- if (len - n > USER_COPY_SIZE)
- size = USER_COPY_SIZE;
- else
- size = len - n;
-
- if (copy_from_user(kbuf, tmp, size)) {
- kfree(kbuf);
- return n ? n : -EFAULT;
- }
-
- pti_write_to_aperture(mc, kbuf, size);
- n += size;
- tmp += size;
-
- } while (len > n);
-
- kfree(kbuf);
- return len;
-}
-
-static const struct tty_operations pti_tty_driver_ops = {
- .open = pti_tty_driver_open,
- .close = pti_tty_driver_close,
- .write = pti_tty_driver_write,
- .write_room = pti_tty_write_room,
- .install = pti_tty_install,
- .cleanup = pti_tty_cleanup
-};
-
-static const struct file_operations pti_char_driver_ops = {
- .owner = THIS_MODULE,
- .write = pti_char_write,
- .open = pti_char_open,
- .release = pti_char_release,
-};
-
-static struct miscdevice pti_char_driver = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = CHARNAME,
- .fops = &pti_char_driver_ops
-};
-
-/**
- * pti_console_write()- Write to the console that has been acquired.
- *
- * @c: Not used in this implementaiton.
- * @buf: Data to be written.
- * @len: Length of buf.
- */
-static void pti_console_write(struct console *c, const char *buf, unsigned len)
-{
- static struct pti_masterchannel mc = {.master = CONSOLE_ID,
- .channel = 0};
-
- mc.channel = pti_console_channel;
- pti_console_channel = (pti_console_channel + 1) & 0x7f;
-
- pti_write_full_frame_to_aperture(&mc, buf, len);
-}
-
-/**
- * pti_console_device()- Return the driver tty structure and set the
- * associated index implementation.
- *
- * @c: Console device of the driver.
- * @index: index associated with c.
- *
- * Returns:
- * always value of pti_tty_driver structure when this function
- * is called.
- */
-static struct tty_driver *pti_console_device(struct console *c, int *index)
-{
- *index = c->index;
- return pti_tty_driver;
-}
-
-/**
- * pti_console_setup()- Initialize console variables used by the driver.
- *
- * @c: Not used.
- * @opts: Not used.
- *
- * Returns:
- * always 0.
- */
-static int pti_console_setup(struct console *c, char *opts)
-{
- pti_console_channel = 0;
- pti_control_channel = 0;
- return 0;
-}
-
-/*
- * pti_console struct, used to capture OS printk()'s and shift
- * out to the PTI device for debugging. This cannot be
- * enabled upon boot because of the possibility of eating
- * any serial console printk's (race condition discovered).
- * The console should be enabled upon when the tty port is
- * used for the first time. Since the primary purpose for
- * the tty port is to hook up syslog to it, the tty port
- * will be open for a really long time.
- */
-static struct console pti_console = {
- .name = TTYNAME,
- .write = pti_console_write,
- .device = pti_console_device,
- .setup = pti_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = 0,
-};
-
-/**
- * pti_port_activate()- Used to start/initialize any items upon
- * first opening of tty_port().
- *
- * @port: The tty port number of the PTI device.
- * @tty: The tty struct associated with this device.
- *
- * Returns:
- * always returns 0
- *
- * Notes: The primary purpose of the PTI tty port 0 is to hook
- * the syslog daemon to it; thus this port will be open for a
- * very long time.
- */
-static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
-{
- if (port->tty->index == PTITTY_MINOR_START)
- console_start(&pti_console);
- return 0;
-}
-
-/**
- * pti_port_shutdown()- Used to stop/shutdown any items upon the
- * last tty port close.
- *
- * @port: The tty port number of the PTI device.
- *
- * Notes: The primary purpose of the PTI tty port 0 is to hook
- * the syslog daemon to it; thus this port will be open for a
- * very long time.
- */
-static void pti_port_shutdown(struct tty_port *port)
-{
- if (port->tty->index == PTITTY_MINOR_START)
- console_stop(&pti_console);
-}
-
-static const struct tty_port_operations tty_port_ops = {
- .activate = pti_port_activate,
- .shutdown = pti_port_shutdown,
-};
-
-/*
- * Note the _probe() call sets everything up and ties the char and tty
- * to successfully detecting the PTI device on the pci bus.
- */
-
-/**
- * pti_pci_probe()- Used to detect pti on the pci bus and set
- * things up in the driver.
- *
- * @pdev: pci_dev struct values for pti.
- * @ent: pci_device_id struct for pti driver.
- *
- * Returns:
- * 0 for success
- * otherwise, error
- */
-static int pti_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned int a;
- int retval;
- int pci_bar = 1;
-
- dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
- __func__, __LINE__, pdev->vendor, pdev->device);
-
- retval = misc_register(&pti_char_driver);
- if (retval) {
- pr_err("%s(%d): CHAR registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
- goto err;
- }
-
- retval = pci_enable_device(pdev);
- if (retval != 0) {
- dev_err(&pdev->dev,
- "%s: pci_enable_device() returned error %d\n",
- __func__, retval);
- goto err_unreg_misc;
- }
-
- drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
- if (drv_data == NULL) {
- retval = -ENOMEM;
- dev_err(&pdev->dev,
- "%s(%d): kmalloc() returned NULL memory.\n",
- __func__, __LINE__);
- goto err_disable_pci;
- }
- drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
-
- retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
- if (retval != 0) {
- dev_err(&pdev->dev,
- "%s(%d): pci_request_region() returned error %d\n",
- __func__, __LINE__, retval);
- goto err_free_dd;
- }
- drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
- drv_data->pti_ioaddr =
- ioremap((u32)drv_data->aperture_base,
- APERTURE_LEN);
- if (!drv_data->pti_ioaddr) {
- retval = -ENOMEM;
- goto err_rel_reg;
- }
-
- pci_set_drvdata(pdev, drv_data);
-
- for (a = 0; a < PTITTY_MINOR_NUM; a++) {
- struct tty_port *port = &drv_data->port[a];
- tty_port_init(port);
- port->ops = &tty_port_ops;
-
- tty_port_register_device(port, pti_tty_driver, a, &pdev->dev);
- }
-
- register_console(&pti_console);
-
- return 0;
-err_rel_reg:
- pci_release_region(pdev, pci_bar);
-err_free_dd:
- kfree(drv_data);
-err_disable_pci:
- pci_disable_device(pdev);
-err_unreg_misc:
- misc_deregister(&pti_char_driver);
-err:
- return retval;
-}
-
-/**
- * pti_pci_remove()- Driver exit method to remove PTI from
- * PCI bus.
- * @pdev: variable containing pci info of PTI.
- */
-static void pti_pci_remove(struct pci_dev *pdev)
-{
- struct pti_dev *drv_data = pci_get_drvdata(pdev);
- unsigned int a;
-
- unregister_console(&pti_console);
-
- for (a = 0; a < PTITTY_MINOR_NUM; a++) {
- tty_unregister_device(pti_tty_driver, a);
- tty_port_destroy(&drv_data->port[a]);
- }
-
- iounmap(drv_data->pti_ioaddr);
- kfree(drv_data);
- pci_release_region(pdev, 1);
- pci_disable_device(pdev);
-
- misc_deregister(&pti_char_driver);
-}
-
-static struct pci_driver pti_pci_driver = {
- .name = PCINAME,
- .id_table = pci_ids,
- .probe = pti_pci_probe,
- .remove = pti_pci_remove,
-};
-
-/**
- * pti_init()- Overall entry/init call to the pti driver.
- * It starts the registration process with the kernel.
- *
- * Returns:
- * int __init, 0 for success
- * otherwise value is an error
- *
- */
-static int __init pti_init(void)
-{
- int retval;
-
- /* First register module as tty device */
-
- pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM);
- if (pti_tty_driver == NULL) {
- pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- pti_tty_driver->driver_name = DRIVERNAME;
- pti_tty_driver->name = TTYNAME;
- pti_tty_driver->major = 0;
- pti_tty_driver->minor_start = PTITTY_MINOR_START;
- pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
- pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS;
- pti_tty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
- pti_tty_driver->init_termios = tty_std_termios;
-
- tty_set_operations(pti_tty_driver, &pti_tty_driver_ops);
-
- retval = tty_register_driver(pti_tty_driver);
- if (retval) {
- pr_err("%s(%d): TTY registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
-
- goto put_tty;
- }
-
- retval = pci_register_driver(&pti_pci_driver);
- if (retval) {
- pr_err("%s(%d): PCI registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
- goto unreg_tty;
- }
-
- return 0;
-unreg_tty:
- tty_unregister_driver(pti_tty_driver);
-put_tty:
- put_tty_driver(pti_tty_driver);
- pti_tty_driver = NULL;
- return retval;
-}
-
-/**
- * pti_exit()- Unregisters this module as a tty and pci driver.
- */
-static void __exit pti_exit(void)
-{
- tty_unregister_driver(pti_tty_driver);
- pci_unregister_driver(&pti_pci_driver);
- put_tty_driver(pti_tty_driver);
-}
-
-module_init(pti_init);
-module_exit(pti_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ken Mills, Jay Freyensee");
-MODULE_DESCRIPTION("PTI Driver");
-
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..9f350e05ef68 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -19,6 +19,47 @@
#include <uapi/misc/pvpanic.h>
static void __iomem *base;
+static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+static unsigned int events;
+
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%x\n", capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%x\n", events);
+}
+
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int tmp;
+ int err;
+
+ err = kstrtouint(buf, 16, &tmp);
+ if (err)
+ return err;
+
+ if ((tmp & capability) != tmp)
+ return -EINVAL;
+
+ events = tmp;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_dev_attrs[] = {
+ &dev_attr_capability.attr,
+ &dev_attr_events.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pvpanic_dev);
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
@@ -27,7 +68,8 @@ MODULE_LICENSE("GPL");
static void
pvpanic_send_event(unsigned int event)
{
- iowrite8(event, base);
+ if (event & capability & events)
+ iowrite8(event, base);
}
static int
@@ -55,15 +97,31 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* initlize capability by RDPT */
+ capability &= ioread8(base);
+ events = capability;
- atomic_notifier_chain_register(&panic_notifier_list,
- &pvpanic_panic_nb);
+ if (capability)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pvpanic_panic_nb);
return 0;
}
@@ -71,8 +129,9 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
static int pvpanic_mmio_remove(struct platform_device *pdev)
{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &pvpanic_panic_nb);
+ if (capability)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pvpanic_panic_nb);
return 0;
}
@@ -93,6 +152,7 @@ static struct platform_driver pvpanic_mmio_driver = {
.name = "pvpanic-mmio",
.of_match_table = pvpanic_mmio_match,
.acpi_match_table = pvpanic_device_ids,
+ .dev_groups = pvpanic_dev_groups,
},
.probe = pvpanic_mmio_probe,
.remove = pvpanic_mmio_remove,
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 23837d0d6f4a..2508f83bdc3f 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -208,7 +208,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
} else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
- "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
+ "xp_remote_memcpy(0x%p, 0x%p, %u)\n", dst,
(void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
@@ -218,7 +218,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
* !!! appears in_use and we can't just call
* !!! dev_kfree_skb.
*/
- dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
+ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%x) "
"returned error=0x%x\n", dst,
(void *)msg->buf_pa, msg->size, ret);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index c49065887e8f..880c33ab9f47 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -237,7 +237,9 @@ static struct qp_list qp_guest_endpoints = {
#define QPE_NUM_PAGES(_QPE) ((u32) \
(DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
-
+#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \
+ ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \
+ (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY)
/*
* Frees kernel VA space for a given queue and its queue header, and
@@ -528,7 +530,7 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
- if (size > SIZE_MAX - PAGE_SIZE)
+ if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE))
return NULL;
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
@@ -537,6 +539,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
+ return NULL;
+
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
@@ -630,7 +635,7 @@ static void qp_release_pages(struct page **pages,
for (i = 0; i < num_pages; i++) {
if (dirty)
- set_page_dirty(pages[i]);
+ set_page_dirty_lock(pages[i]);
put_page(pages[i]);
pages[i] = NULL;
@@ -1207,7 +1212,7 @@ static int qp_alloc_guest_work(struct vmci_handle *handle,
} else {
result = qp_alloc_hypercall(queue_pair_entry);
if (result < VMCI_SUCCESS) {
- pr_warn("qp_alloc_hypercall result = %d\n", result);
+ pr_devel("qp_alloc_hypercall result = %d\n", result);
goto error;
}
}
@@ -1929,6 +1934,9 @@ int vmci_qp_broker_alloc(struct vmci_handle handle,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context)
{
+ if (!QP_SIZES_ARE_VALID(produce_size, consume_size))
+ return VMCI_ERROR_NO_RESOURCES;
+
return qp_broker_alloc(handle, peer, flags, priv_flags,
produce_size, consume_size,
page_store, context, NULL, NULL, NULL, NULL);
@@ -2685,8 +2693,7 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
* used by the device is NO_RESOURCES, so use that here too.
*/
- if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
- produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+ if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize))
return VMCI_ERROR_NO_RESOURCES;
retval = vmci_route(&src, &dst, false, &route);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 00017fc29a52..c4e6e924d9be 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -104,7 +104,7 @@ struct vmci_qp_dtch_info {
struct vmci_qp_page_store {
/* Reference to pages backing the queue pair. */
u64 pages;
- /* Length of pageList/virtual addres range (in pages). */
+ /* Length of pageList/virtual address range (in pages). */
u32 len;
};
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index c12fe13e4b14..ae8b69aee619 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -81,3 +81,11 @@ config MMC_TEST
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
+config MMC_CRYPTO
+ bool "MMC Crypto Engine Support"
+ depends on BLK_INLINE_ENCRYPTION
+ help
+ Enable Crypto Engine Support in MMC.
+ Enabling this makes it possible for the kernel to use the crypto
+ capabilities of the MMC device (if present) to perform crypto
+ operations on data being transferred to/from the device.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 95ffe008ebdf..6a907736cd7a 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+mmc_core-$(CONFIG_MMC_CRYPTO) += crypto.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 42e27a298218..d666e24fbe0e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -51,6 +51,7 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "crypto.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
@@ -253,7 +254,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
@@ -629,7 +630,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_put_request(req);
@@ -698,7 +699,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
@@ -1247,6 +1248,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
memset(brq, 0, sizeof(struct mmc_blk_request));
+ mmc_crypto_prepare_req(mqrq);
+
brq->mrq.data = &brq->data;
brq->mrq.tag = req->tag;
@@ -2722,7 +2725,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
@@ -2761,7 +2764,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
if (err) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 19f1ee57fb34..1136b859ddd8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -37,6 +37,7 @@
#include "core.h"
#include "card.h"
+#include "crypto.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
@@ -547,10 +548,10 @@ int mmc_cqe_recovery(struct mmc_host *host)
host->cqe_ops->cqe_recovery_start(host);
memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = MMC_STOP_TRANSMISSION,
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0);
memset(&cmd, 0, sizeof(cmd));
@@ -558,7 +559,7 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0);
host->cqe_ops->cqe_recovery_finish(host);
@@ -992,6 +993,8 @@ void mmc_set_initial_state(struct mmc_host *host)
host->ops->hs400_enhanced_strobe(host, &host->ios);
mmc_set_ios(host);
+
+ mmc_crypto_set_initial_state(host);
}
/**
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c
new file mode 100644
index 000000000000..419a368f8402
--- /dev/null
+++ b/drivers/mmc/core/crypto.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MMC crypto engine (inline encryption) support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/blk-crypto.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "crypto.h"
+#include "queue.h"
+
+void mmc_crypto_set_initial_state(struct mmc_host *host)
+{
+ /* Reset might clear all keys, so reprogram all the keys. */
+ if (host->caps2 & MMC_CAP2_CRYPTO)
+ blk_ksm_reprogram_all_keys(&host->ksm);
+}
+
+void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
+{
+ if (host->caps2 & MMC_CAP2_CRYPTO)
+ blk_ksm_register(&host->ksm, q);
+}
+EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
+
+void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
+{
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+
+ if (!req->crypt_keyslot)
+ return;
+
+ mrq->crypto_enabled = true;
+ mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
+
+ /*
+ * For now we assume that all MMC drivers set max_dun_bytes_supported=4,
+ * which is the limit for CQHCI crypto. So all DUNs should be 32-bit.
+ */
+ WARN_ON_ONCE(req->crypt_ctx->bc_dun[0] > U32_MAX);
+
+ mrq->data_unit_num = req->crypt_ctx->bc_dun[0];
+}
+EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
diff --git a/drivers/mmc/core/crypto.h b/drivers/mmc/core/crypto.h
new file mode 100644
index 000000000000..fbe9a520bf90
--- /dev/null
+++ b/drivers/mmc/core/crypto.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MMC crypto engine (inline encryption) support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _MMC_CORE_CRYPTO_H
+#define _MMC_CORE_CRYPTO_H
+
+struct mmc_host;
+struct mmc_queue_req;
+struct request_queue;
+
+#ifdef CONFIG_MMC_CRYPTO
+
+void mmc_crypto_set_initial_state(struct mmc_host *host);
+
+void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host);
+
+void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq);
+
+#else /* CONFIG_MMC_CRYPTO */
+
+static inline void mmc_crypto_set_initial_state(struct mmc_host *host)
+{
+}
+
+static inline void mmc_crypto_setup_queue(struct request_queue *q,
+ struct mmc_host *host)
+{
+}
+
+static inline void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
+{
+}
+
+#endif /* !CONFIG_MMC_CRYPTO */
+
+#endif /* _MMC_CORE_CRYPTO_H */
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 96b2ca1f1b06..9b89a91b6b47 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -25,6 +25,7 @@
#include <linux/mmc/slot-gpio.h>
#include "core.h"
+#include "crypto.h"
#include "host.h"
#include "slot-gpio.h"
#include "pwrseq.h"
@@ -163,6 +164,50 @@ static void mmc_retune_timer(struct timer_list *t)
mmc_retune_needed(host);
}
+static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
+ struct mmc_clk_phase *phase)
+{
+ int degrees[2] = {0};
+ int rc;
+
+ rc = device_property_read_u32_array(dev, prop, degrees, 2);
+ phase->valid = !rc;
+ if (phase->valid) {
+ phase->in_deg = degrees[0];
+ phase->out_deg = degrees[1];
+ }
+}
+
+void
+mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map)
+{
+ struct device *dev = host->parent;
+
+ mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
+ &map->phase[MMC_TIMING_LEGACY]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
+ &map->phase[MMC_TIMING_MMC_HS]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-sd-hs",
+ &map->phase[MMC_TIMING_SD_HS]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr12",
+ &map->phase[MMC_TIMING_UHS_SDR12]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr25",
+ &map->phase[MMC_TIMING_UHS_SDR25]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr50",
+ &map->phase[MMC_TIMING_UHS_SDR50]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr104",
+ &map->phase[MMC_TIMING_UHS_SDR104]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-uhs-ddr50",
+ &map->phase[MMC_TIMING_UHS_DDR50]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-mmc-ddr52",
+ &map->phase[MMC_TIMING_MMC_DDR52]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs200",
+ &map->phase[MMC_TIMING_MMC_HS200]);
+ mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs400",
+ &map->phase[MMC_TIMING_MMC_HS400]);
+}
+EXPORT_SYMBOL(mmc_of_parse_clk_phase);
+
/**
* mmc_of_parse() - parse host's device-tree node
* @host: host whose node should be parsed.
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index ff3063ce2acd..0d80b72ddde8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1697,7 +1697,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
if (err) {
- err = 0;
/*
* Just disable enhanced area off & sz
* will try to enable ERASE_GROUP_DEF
@@ -1802,7 +1801,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
card->ext_csd.hpi_en = 0;
- err = 0;
} else {
card->ext_csd.hpi_en = 1;
}
@@ -1831,7 +1829,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
mmc_hostname(card->host), err);
card->ext_csd.cache_ctrl = 0;
- err = 0;
} else {
card->ext_csd.cache_ctrl = 1;
}
@@ -1851,7 +1848,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_hostname(card->host));
card->ext_csd.cmdq_support = false;
card->ext_csd.cmdq_depth = 0;
- err = 0;
}
}
/*
@@ -1899,7 +1895,7 @@ err:
static int mmc_can_sleep(struct mmc_card *card)
{
- return (card && card->ext_csd.rev >= 3);
+ return card->ext_csd.rev >= 3;
}
static int mmc_sleep(struct mmc_host *host)
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index baa6314f69b4..265d95ec82ce 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -296,7 +296,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
return 0;
}
-static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
+static int mmc_spi_send_csd(struct mmc_host *host, u32 *csd)
{
int ret, i;
__be32 *csd_tmp;
@@ -305,7 +305,7 @@ static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
if (!csd_tmp)
return -ENOMEM;
- ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
+ ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CSD, csd_tmp, 16);
if (ret)
goto err;
@@ -320,7 +320,7 @@ err:
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
if (mmc_host_is_spi(card->host))
- return mmc_spi_send_csd(card, csd);
+ return mmc_spi_send_csd(card->host, csd);
return mmc_send_cxd_native(card->host, card->rca << 16, csd,
MMC_SEND_CSD);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 152e7525ed33..63524551a13a 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -624,7 +624,7 @@ static unsigned int mmc_test_capacity(struct mmc_card *card)
* Fill the first couple of sectors of the card with known data
* so that bad reads/writes can be detected
*/
-static int __mmc_test_prepare(struct mmc_test_card *test, int write)
+static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
{
int ret, i;
@@ -633,7 +633,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
return ret;
if (write)
- memset(test->buffer, 0xDF, 512);
+ memset(test->buffer, val, 512);
else {
for (i = 0; i < 512; i++)
test->buffer[i] = i;
@@ -650,31 +650,17 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
static int mmc_test_prepare_write(struct mmc_test_card *test)
{
- return __mmc_test_prepare(test, 1);
+ return __mmc_test_prepare(test, 1, 0xDF);
}
static int mmc_test_prepare_read(struct mmc_test_card *test)
{
- return __mmc_test_prepare(test, 0);
+ return __mmc_test_prepare(test, 0, 0);
}
static int mmc_test_cleanup(struct mmc_test_card *test)
{
- int ret, i;
-
- ret = mmc_test_set_blksize(test, 512);
- if (ret)
- return ret;
-
- memset(test->buffer, 0, 512);
-
- for (i = 0; i < BUFFER_SIZE / 512; i++) {
- ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
- if (ret)
- return ret;
- }
-
- return 0;
+ return __mmc_test_prepare(test, 1, 0);
}
/*******************************************************************/
@@ -2124,7 +2110,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test,
if (mmc_can_erase(test->card) &&
tdata->prepare & MMC_TEST_PREP_ERASE) {
ret = mmc_erase(test->card, dev_addr,
- size / 512, MMC_SECURE_ERASE_ARG);
+ size / 512, test->card->erase_arg);
if (ret)
ret = mmc_erase(test->card, dev_addr,
size / 512, MMC_ERASE_ARG);
@@ -3267,17 +3253,12 @@ static void mmc_test_remove(struct mmc_card *card)
mmc_test_free_dbgfs_file(card);
}
-static void mmc_test_shutdown(struct mmc_card *card)
-{
-}
-
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmc_test",
},
.probe = mmc_test_probe,
.remove = mmc_test_remove,
- .shutdown = mmc_test_shutdown,
};
static int __init mmc_test_init(void)
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index de7cb0369c30..27d2b8ed9484 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/backing-dev.h>
@@ -19,6 +18,7 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "crypto.h"
#include "host.h"
#define MMC_DMA_MAP_MERGE_SEGMENTS 512
@@ -33,8 +33,6 @@ void mmc_cqe_check_busy(struct mmc_queue *mq)
{
if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
-
- mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
}
static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
@@ -384,8 +382,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
/*
@@ -405,6 +405,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
mutex_init(&mq->complete_lock);
init_waitqueue_head(&mq->wait);
+
+ mmc_crypto_setup_queue(mq->queue, host);
}
static inline bool mmc_merge_capable(struct mmc_host *host)
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index fd11491ced9f..57c59b6cb1b9 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -81,7 +81,6 @@ struct mmc_queue {
int in_flight[MMC_ISSUE_MAX];
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
-#define MMC_CQE_QUEUE_FULL BIT(1)
bool busy;
bool use_cqe;
bool recovery_needed;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6f054c449d46..6fa51a6ed058 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -860,7 +860,7 @@ try_again:
return err;
}
-int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card)
+int mmc_sd_get_csd(struct mmc_card *card)
{
int err;
@@ -1046,7 +1046,7 @@ retry:
}
if (!oldcard) {
- err = mmc_sd_get_csd(host, card);
+ err = mmc_sd_get_csd(card);
if (err)
goto free_card;
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 497c026a5c5a..1af5a038bae9 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -10,7 +10,7 @@ struct mmc_host;
struct mmc_card;
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
-int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
+int mmc_sd_get_csd(struct mmc_card *card);
void mmc_decode_cid(struct mmc_card *card);
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 694a212cbe25..0fda7784cab2 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -751,7 +751,7 @@ try_again:
* Read CSD, before selecting the card
*/
if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
- err = mmc_sd_get_csd(host, card);
+ err = mmc_sd_get_csd(card);
if (err)
goto remove;
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 44bea5e4aeda..b23773583179 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -20,6 +20,8 @@
#include "sdio_cis.h"
#include "sdio_ops.h"
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
do {
unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next;
if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 596f32637315..b236dfe2e879 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -168,6 +168,20 @@ config MMC_SDHCI_OF_ASPEED
If unsure, say N.
+config MMC_SDHCI_OF_ASPEED_TEST
+ bool "Tests for the ASPEED SDHCI driver"
+ depends on MMC_SDHCI_OF_ASPEED && KUNIT=y
+ help
+ Enable KUnit tests for the ASPEED SDHCI driver. Select this
+ option only if you will boot the kernel for the purpose of running
+ unit tests (e.g. under UML or qemu).
+
+ The KUnit tests generally exercise parts of the driver that do not
+ directly touch the hardware, for example, the phase correction
+ calculations.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_AT91
tristate "SDHCI OF support for the Atmel SDMMC controller"
depends on MMC_SDHCI_PLTFM
@@ -312,18 +326,6 @@ config MMC_SDHCI_S3C
If unsure, say N.
-config MMC_SDHCI_SIRF
- tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
- depends on ARCH_SIRF || COMPILE_TEST
- depends on MMC_SDHCI_PLTFM
- select MMC_SDHCI_IO_ACCESSORS
- help
- This selects the SDHCI support for SiRF System-on-Chip devices.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
config MMC_SDHCI_PXAV3
tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
depends on CLKDEV_LOOKUP
@@ -544,6 +546,7 @@ config MMC_SDHCI_MSM
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
+ select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -608,13 +611,6 @@ config MMC_DAVINCI
If you have an DAVINCI board with a Multimedia Card slot,
say Y or M here. If unsure, say N.
-config MMC_GOLDFISH
- tristate "goldfish qemu Multimedia Card Interface support"
- depends on GOLDFISH || COMPILE_TEST
- help
- This selects the Goldfish Multimedia card Interface emulation
- found on the Goldfish Android virtual device emulation.
-
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER
@@ -868,15 +864,6 @@ config MMC_DW_ROCKCHIP
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on RK3066, RK3188 and RK3288 SoC's.
-config MMC_DW_ZX
- tristate "ZTE specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW && ARCH_ZX
- select MMC_DW_PLTFM
- help
- This selects support for ZTE SoC specific extensions to the
- Synopsys DesignWare Memory Card Interface driver. Select this option
- for platforms based on ZX296718 SoC's.
-
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 451c25fc2c69..6df5c4774260 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
-obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
@@ -34,7 +33,6 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
-obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -61,7 +59,6 @@ obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
-obj-$(CONFIG_MMC_DW_ZX) += dw_mmc-zx.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
@@ -104,6 +101,8 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
+cqhci-y += cqhci-core.o
+cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
ifeq ($(CONFIG_CB710_DEBUG),y)
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
deleted file mode 100644
index e878fdf8f20a..000000000000
--- a/drivers/mmc/host/android-goldfish.c
+++ /dev/null
@@ -1,545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2007, Google Inc.
- * Copyright 2012, Intel Inc.
- *
- * based on omap.c driver, which was
- * Copyright (C) 2004 Nokia Corporation
- * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
- * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
- * Other hacks (DMA, SD, etc) by David Brownell
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/major.h>
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/hdreg.h>
-#include <linux/kdev_t.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/clk.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include <asm/types.h>
-#include <linux/uaccess.h>
-
-#define DRIVER_NAME "goldfish_mmc"
-
-#define BUFFER_SIZE 16384
-
-#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
-#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
-
-enum {
- /* status register */
- MMC_INT_STATUS = 0x00,
- /* set this to enable IRQ */
- MMC_INT_ENABLE = 0x04,
- /* set this to specify buffer address */
- MMC_SET_BUFFER = 0x08,
-
- /* MMC command number */
- MMC_CMD = 0x0C,
-
- /* MMC argument */
- MMC_ARG = 0x10,
-
- /* MMC response (or R2 bits 0 - 31) */
- MMC_RESP_0 = 0x14,
-
- /* MMC R2 response bits 32 - 63 */
- MMC_RESP_1 = 0x18,
-
- /* MMC R2 response bits 64 - 95 */
- MMC_RESP_2 = 0x1C,
-
- /* MMC R2 response bits 96 - 127 */
- MMC_RESP_3 = 0x20,
-
- MMC_BLOCK_LENGTH = 0x24,
- MMC_BLOCK_COUNT = 0x28,
-
- /* MMC state flags */
- MMC_STATE = 0x2C,
-
- /* MMC_INT_STATUS bits */
-
- MMC_STAT_END_OF_CMD = 1U << 0,
- MMC_STAT_END_OF_DATA = 1U << 1,
- MMC_STAT_STATE_CHANGE = 1U << 2,
- MMC_STAT_CMD_TIMEOUT = 1U << 3,
-
- /* MMC_STATE bits */
- MMC_STATE_INSERTED = 1U << 0,
- MMC_STATE_READ_ONLY = 1U << 1,
-};
-
-/*
- * Command types
- */
-#define OMAP_MMC_CMDTYPE_BC 0
-#define OMAP_MMC_CMDTYPE_BCR 1
-#define OMAP_MMC_CMDTYPE_AC 2
-#define OMAP_MMC_CMDTYPE_ADTC 3
-
-
-struct goldfish_mmc_host {
- struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
- struct device *dev;
- unsigned char id; /* 16xx chips have 2 MMC blocks */
- void *virt_base;
- unsigned int phys_base;
- int irq;
- unsigned char bus_mode;
- unsigned char hw_bus_mode;
-
- unsigned int sg_len;
- unsigned dma_done:1;
- unsigned dma_in_use:1;
-
- void __iomem *reg_base;
-};
-
-static inline int
-goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
-{
- return 0;
-}
-
-static ssize_t
-goldfish_mmc_show_cover_switch(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct goldfish_mmc_host *host = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
- "closed");
-}
-
-static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
-
-static void
-goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
-{
- u32 cmdreg;
- u32 resptype;
- u32 cmdtype;
-
- host->cmd = cmd;
-
- resptype = 0;
- cmdtype = 0;
-
- /* Our hardware needs to know exact type */
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_NONE:
- break;
- case MMC_RSP_R1:
- case MMC_RSP_R1B:
- /* resp 1, 1b, 6, 7 */
- resptype = 1;
- break;
- case MMC_RSP_R2:
- resptype = 2;
- break;
- case MMC_RSP_R3:
- resptype = 3;
- break;
- default:
- dev_err(mmc_dev(mmc_from_priv(host)),
- "Invalid response type: %04x\n", mmc_resp_type(cmd));
- break;
- }
-
- if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
- cmdtype = OMAP_MMC_CMDTYPE_ADTC;
- else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
- cmdtype = OMAP_MMC_CMDTYPE_BC;
- else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
- cmdtype = OMAP_MMC_CMDTYPE_BCR;
- else
- cmdtype = OMAP_MMC_CMDTYPE_AC;
-
- cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
-
- if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
- cmdreg |= 1 << 6;
-
- if (cmd->flags & MMC_RSP_BUSY)
- cmdreg |= 1 << 11;
-
- if (host->data && !(host->data->flags & MMC_DATA_WRITE))
- cmdreg |= 1 << 15;
-
- GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
- GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
-}
-
-static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
- struct mmc_data *data)
-{
- if (host->dma_in_use) {
- enum dma_data_direction dma_data_dir;
-
- dma_data_dir = mmc_get_dma_dir(data);
-
- if (dma_data_dir == DMA_FROM_DEVICE) {
- /*
- * We don't really have DMA, so we need
- * to copy from our platform driver buffer
- */
- sg_copy_from_buffer(data->sg, 1, host->virt_base,
- data->sg->length);
- }
- host->data->bytes_xfered += data->sg->length;
- dma_unmap_sg(mmc_dev(mmc_from_priv(host)), data->sg,
- host->sg_len, dma_data_dir);
- }
-
- host->data = NULL;
- host->sg_len = 0;
-
- /*
- * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
- * dozens of requests until the card finishes writing data.
- * It'd be cheaper to just wait till an EOFB interrupt arrives...
- */
-
- if (!data->stop) {
- host->mrq = NULL;
- mmc_request_done(mmc_from_priv(host), data->mrq);
- return;
- }
-
- goldfish_mmc_start_command(host, data->stop);
-}
-
-static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
- struct mmc_data *data)
-{
- if (!host->dma_in_use) {
- goldfish_mmc_xfer_done(host, data);
- return;
- }
- if (host->dma_done)
- goldfish_mmc_xfer_done(host, data);
-}
-
-static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
- struct mmc_command *cmd)
-{
- host->cmd = NULL;
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136) {
- /* response type 2 */
- cmd->resp[3] =
- GOLDFISH_MMC_READ(host, MMC_RESP_0);
- cmd->resp[2] =
- GOLDFISH_MMC_READ(host, MMC_RESP_1);
- cmd->resp[1] =
- GOLDFISH_MMC_READ(host, MMC_RESP_2);
- cmd->resp[0] =
- GOLDFISH_MMC_READ(host, MMC_RESP_3);
- } else {
- /* response types 1, 1b, 3, 4, 5, 6 */
- cmd->resp[0] =
- GOLDFISH_MMC_READ(host, MMC_RESP_0);
- }
- }
-
- if (host->data == NULL || cmd->error) {
- host->mrq = NULL;
- mmc_request_done(mmc_from_priv(host), cmd->mrq);
- }
-}
-
-static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
-{
- struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
- u16 status;
- int end_command = 0;
- int end_transfer = 0;
- int state_changed = 0;
- int cmd_timeout = 0;
-
- while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
- GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
-
- if (status & MMC_STAT_END_OF_CMD)
- end_command = 1;
-
- if (status & MMC_STAT_END_OF_DATA)
- end_transfer = 1;
-
- if (status & MMC_STAT_STATE_CHANGE)
- state_changed = 1;
-
- if (status & MMC_STAT_CMD_TIMEOUT) {
- end_command = 0;
- cmd_timeout = 1;
- }
- }
-
- if (cmd_timeout) {
- struct mmc_request *mrq = host->mrq;
- mrq->cmd->error = -ETIMEDOUT;
- host->mrq = NULL;
- mmc_request_done(mmc_from_priv(host), mrq);
- }
-
- if (end_command)
- goldfish_mmc_cmd_done(host, host->cmd);
-
- if (end_transfer) {
- host->dma_done = 1;
- goldfish_mmc_end_of_data(host, host->data);
- } else if (host->data != NULL) {
- /*
- * WORKAROUND -- after porting this driver from 2.6 to 3.4,
- * during device initialization, cases where host->data is
- * non-null but end_transfer is false would occur. Doing
- * nothing in such cases results in no further interrupts,
- * and initialization failure.
- * TODO -- find the real cause.
- */
- host->dma_done = 1;
- goldfish_mmc_end_of_data(host, host->data);
- }
-
- if (state_changed) {
- u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
- pr_info("%s: Card detect now %d\n", __func__,
- (state & MMC_STATE_INSERTED));
- mmc_detect_change(mmc_from_priv(host), 0);
- }
-
- if (!end_command && !end_transfer && !state_changed && !cmd_timeout) {
- status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
- dev_info(mmc_dev(mmc_from_priv(host)), "spurious irq 0x%04x\n",
- status);
- if (status != 0) {
- GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
- GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
- struct mmc_request *req)
-{
- struct mmc_data *data = req->data;
- int block_size;
- unsigned sg_len;
- enum dma_data_direction dma_data_dir;
-
- host->data = data;
- if (data == NULL) {
- GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
- GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
- host->dma_in_use = 0;
- return;
- }
-
- block_size = data->blksz;
-
- GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
- GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
-
- /*
- * Cope with calling layer confusion; it issues "single
- * block" writes using multi-block scatterlists.
- */
- sg_len = (data->blocks == 1) ? 1 : data->sg_len;
-
- dma_data_dir = mmc_get_dma_dir(data);
-
- host->sg_len = dma_map_sg(mmc_dev(mmc_from_priv(host)), data->sg,
- sg_len, dma_data_dir);
- host->dma_done = 0;
- host->dma_in_use = 1;
-
- if (dma_data_dir == DMA_TO_DEVICE) {
- /*
- * We don't really have DMA, so we need to copy to our
- * platform driver buffer
- */
- sg_copy_to_buffer(data->sg, 1, host->virt_base,
- data->sg->length);
- }
-}
-
-static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
-{
- struct goldfish_mmc_host *host = mmc_priv(mmc);
-
- WARN_ON(host->mrq != NULL);
-
- host->mrq = req;
- goldfish_mmc_prepare_data(host, req);
- goldfish_mmc_start_command(host, req->cmd);
-}
-
-static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct goldfish_mmc_host *host = mmc_priv(mmc);
-
- host->bus_mode = ios->bus_mode;
- host->hw_bus_mode = host->bus_mode;
-}
-
-static int goldfish_mmc_get_ro(struct mmc_host *mmc)
-{
- uint32_t state;
- struct goldfish_mmc_host *host = mmc_priv(mmc);
-
- state = GOLDFISH_MMC_READ(host, MMC_STATE);
- return ((state & MMC_STATE_READ_ONLY) != 0);
-}
-
-static const struct mmc_host_ops goldfish_mmc_ops = {
- .request = goldfish_mmc_request,
- .set_ios = goldfish_mmc_set_ios,
- .get_ro = goldfish_mmc_get_ro,
-};
-
-static int goldfish_mmc_probe(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct goldfish_mmc_host *host = NULL;
- struct resource *res;
- int ret = 0;
- int irq;
- dma_addr_t buf_addr;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (res == NULL || irq < 0)
- return -ENXIO;
-
- mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
- if (mmc == NULL) {
- ret = -ENOMEM;
- goto err_alloc_host_failed;
- }
-
- host = mmc_priv(mmc);
-
- pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
- host->reg_base = ioremap(res->start, resource_size(res));
- if (host->reg_base == NULL) {
- ret = -ENOMEM;
- goto ioremap_failed;
- }
- host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
- &buf_addr, GFP_KERNEL);
-
- if (host->virt_base == 0) {
- ret = -ENOMEM;
- goto dma_alloc_failed;
- }
- host->phys_base = buf_addr;
-
- host->id = pdev->id;
- host->irq = irq;
-
- mmc->ops = &goldfish_mmc_ops;
- mmc->f_min = 400000;
- mmc->f_max = 24000000;
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- mmc->caps = MMC_CAP_4_BIT_DATA;
- mmc->caps2 = MMC_CAP2_NO_SDIO;
-
- /* Use scatterlist DMA to reduce per-transfer costs.
- * NOTE max_seg_size assumption that small blocks aren't
- * normally used (except e.g. for reading SD registers).
- */
- mmc->max_segs = 32;
- mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
- mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
- mmc->max_req_size = BUFFER_SIZE;
- mmc->max_seg_size = mmc->max_req_size;
-
- ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
- if (ret) {
- dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
- goto err_request_irq_failed;
- }
-
- host->dev = &pdev->dev;
- platform_set_drvdata(pdev, host);
-
- ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
- if (ret)
- dev_warn(mmc_dev(mmc), "Unable to create sysfs attributes\n");
-
- GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
- GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
- MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
- MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
-
- mmc_add_host(mmc);
- return 0;
-
-err_request_irq_failed:
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
- host->phys_base);
-dma_alloc_failed:
- iounmap(host->reg_base);
-ioremap_failed:
- mmc_free_host(mmc);
-err_alloc_host_failed:
- return ret;
-}
-
-static int goldfish_mmc_remove(struct platform_device *pdev)
-{
- struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
- struct mmc_host *mmc = mmc_from_priv(host);
-
- BUG_ON(host == NULL);
-
- mmc_remove_host(mmc);
- free_irq(host->irq, host);
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
- iounmap(host->reg_base);
- mmc_free_host(mmc);
- return 0;
-}
-
-static struct platform_driver goldfish_mmc_driver = {
- .probe = goldfish_mmc_probe,
- .remove = goldfish_mmc_remove,
- .driver = {
- .name = DRIVER_NAME,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
-};
-
-module_platform_driver(goldfish_mmc_driver);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 444bd3a0a922..807177c953f3 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1719,9 +1719,9 @@ static void atmci_detect_change(struct timer_list *t)
}
}
-static void atmci_tasklet_func(unsigned long priv)
+static void atmci_tasklet_func(struct tasklet_struct *t)
{
- struct atmel_mci *host = (struct atmel_mci *)priv;
+ struct atmel_mci *host = from_tasklet(host, t, tasklet);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
enum atmel_mci_state state = host->state;
@@ -2401,45 +2401,45 @@ static void atmci_get_cap(struct atmel_mci *host)
dev_info(&host->pdev->dev,
"version: 0x%x\n", version);
- host->caps.has_dma_conf_reg = 0;
- host->caps.has_pdc = 1;
- host->caps.has_cfg_reg = 0;
- host->caps.has_cstor_reg = 0;
- host->caps.has_highspeed = 0;
- host->caps.has_rwproof = 0;
- host->caps.has_odd_clk_div = 0;
- host->caps.has_bad_data_ordering = 1;
- host->caps.need_reset_after_xfer = 1;
- host->caps.need_blksz_mul_4 = 1;
- host->caps.need_notbusy_for_read_ops = 0;
+ host->caps.has_dma_conf_reg = false;
+ host->caps.has_pdc = true;
+ host->caps.has_cfg_reg = false;
+ host->caps.has_cstor_reg = false;
+ host->caps.has_highspeed = false;
+ host->caps.has_rwproof = false;
+ host->caps.has_odd_clk_div = false;
+ host->caps.has_bad_data_ordering = true;
+ host->caps.need_reset_after_xfer = true;
+ host->caps.need_blksz_mul_4 = true;
+ host->caps.need_notbusy_for_read_ops = false;
/* keep only major version number */
switch (version & 0xf00) {
case 0x600:
case 0x500:
- host->caps.has_odd_clk_div = 1;
+ host->caps.has_odd_clk_div = true;
fallthrough;
case 0x400:
case 0x300:
- host->caps.has_dma_conf_reg = 1;
- host->caps.has_pdc = 0;
- host->caps.has_cfg_reg = 1;
- host->caps.has_cstor_reg = 1;
- host->caps.has_highspeed = 1;
+ host->caps.has_dma_conf_reg = true;
+ host->caps.has_pdc = false;
+ host->caps.has_cfg_reg = true;
+ host->caps.has_cstor_reg = true;
+ host->caps.has_highspeed = true;
fallthrough;
case 0x200:
- host->caps.has_rwproof = 1;
- host->caps.need_blksz_mul_4 = 0;
- host->caps.need_notbusy_for_read_ops = 1;
+ host->caps.has_rwproof = true;
+ host->caps.need_blksz_mul_4 = false;
+ host->caps.need_notbusy_for_read_ops = true;
fallthrough;
case 0x100:
- host->caps.has_bad_data_ordering = 0;
- host->caps.need_reset_after_xfer = 0;
+ host->caps.has_bad_data_ordering = false;
+ host->caps.need_reset_after_xfer = false;
fallthrough;
case 0x0:
break;
default:
- host->caps.has_pdc = 0;
+ host->caps.has_pdc = false;
dev_warn(&host->pdev->dev,
"Unmanaged mci version, set minimum capabilities\n");
break;
@@ -2496,7 +2496,7 @@ static int atmci_probe(struct platform_device *pdev)
host->mapbase = regs->start;
- tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
+ tasklet_setup(&host->tasklet, atmci_tasklet_func);
ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
if (ret) {
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bd00515fbaba..0acc237843f7 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -253,9 +253,9 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host)
mmc_request_done(host->mmc, mrq);
}
-static void au1xmmc_tasklet_finish(unsigned long param)
+static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
{
- struct au1xmmc_host *host = (struct au1xmmc_host *) param;
+ struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
au1xmmc_finish_request(host);
}
@@ -363,9 +363,9 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
au1xmmc_finish_request(host);
}
-static void au1xmmc_tasklet_data(unsigned long param)
+static void au1xmmc_tasklet_data(struct tasklet_struct *t)
{
- struct au1xmmc_host *host = (struct au1xmmc_host *)param;
+ struct au1xmmc_host *host = from_tasklet(host, t, data_task);
u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
@@ -1037,11 +1037,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
if (host->platdata)
mmc->caps &= ~(host->platdata->mask_host_caps);
- tasklet_init(&host->data_task, au1xmmc_tasklet_data,
- (unsigned long)host);
+ tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
- tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
- (unsigned long)host);
+ tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
if (has_dbdma()) {
ret = au1xmmc_dbdma_init(host);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index c5da3aaee334..4bb8f2800a2b 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -436,12 +436,11 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
struct mmc_request *req;
- unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
if (host->need_irq_handler_lock)
- spin_lock_irqsave(&host->irq_handler_lock, flags);
+ spin_lock(&host->irq_handler_lock);
else
__acquire(&host->irq_handler_lock);
@@ -504,7 +503,7 @@ no_req_done:
host->release_bus(host);
out:
if (host->need_irq_handler_lock)
- spin_unlock_irqrestore(&host->irq_handler_lock, flags);
+ spin_unlock(&host->irq_handler_lock);
else
__release(&host->irq_handler_lock);
return IRQ_RETVAL(emm_int != 0);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index e84ed84ea4cc..6d623b2681c3 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -646,14 +646,14 @@ static int cb710_mmc_irq_handler(struct cb710_slot *slot)
return 1;
}
-static void cb710_mmc_finish_request_tasklet(unsigned long data)
+static void cb710_mmc_finish_request_tasklet(struct tasklet_struct *t)
{
- struct mmc_host *mmc = (void *)data;
- struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ struct cb710_mmc_reader *reader = from_tasklet(reader, t,
+ finish_req_tasklet);
struct mmc_request *mrq = reader->mrq;
reader->mrq = NULL;
- mmc_request_done(mmc, mrq);
+ mmc_request_done(mmc_from_priv(reader), mrq);
}
static const struct mmc_host_ops cb710_mmc_host = {
@@ -718,8 +718,8 @@ static int cb710_mmc_init(struct platform_device *pdev)
reader = mmc_priv(mmc);
- tasklet_init(&reader->finish_req_tasklet,
- cb710_mmc_finish_request_tasklet, (unsigned long)mmc);
+ tasklet_setup(&reader->finish_req_tasklet,
+ cb710_mmc_finish_request_tasklet);
spin_lock_init(&reader->irq_lock);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci-core.c
index 697fe40756bf..93b0432bb601 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -18,6 +18,7 @@
#include <linux/mmc/card.h>
#include "cqhci.h"
+#include "cqhci-crypto.h"
#define DCMD_SLOT 31
#define NUM_SLOTS 32
@@ -258,6 +259,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
cqcfg |= CQHCI_TASK_DESC_SZ;
+ if (mmc->caps2 & MMC_CAP2_CRYPTO)
+ cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
+
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
@@ -408,13 +412,15 @@ static void cqhci_disable(struct mmc_host *mmc)
}
static void cqhci_prep_task_desc(struct mmc_request *mrq,
- u64 *data, bool intr)
+ struct cqhci_host *cq_host, int tag)
{
+ __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
u32 req_flags = mrq->data->flags;
+ u64 desc0;
- *data = CQHCI_VALID(1) |
+ desc0 = CQHCI_VALID(1) |
CQHCI_END(1) |
- CQHCI_INT(intr) |
+ CQHCI_INT(1) |
CQHCI_ACT(0x5) |
CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
@@ -425,8 +431,19 @@ static void cqhci_prep_task_desc(struct mmc_request *mrq,
CQHCI_BLK_COUNT(mrq->data->blocks) |
CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
- pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
- mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+ task_desc[0] = cpu_to_le64(desc0);
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
+
+ task_desc[1] = cpu_to_le64(desc1);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
+ mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
+ } else {
+ pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
+ mmc_hostname(mrq->host), mrq->tag, desc0);
+ }
}
static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
@@ -567,8 +584,6 @@ static inline int cqhci_tag(struct mmc_request *mrq)
static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
int err = 0;
- u64 data = 0;
- u64 *task_desc = NULL;
int tag = cqhci_tag(mrq);
struct cqhci_host *cq_host = mmc->cqe_private;
unsigned long flags;
@@ -598,9 +613,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
if (mrq->data) {
- task_desc = (__le64 __force *)get_desc(cq_host, tag);
- cqhci_prep_task_desc(mrq, &data, 1);
- *task_desc = cpu_to_le64(data);
+ cqhci_prep_task_desc(mrq, cq_host, tag);
+
err = cqhci_prep_tran_desc(mrq, cq_host, tag);
if (err) {
pr_err("%s: cqhci: failed to setup tx desc: %d\n",
@@ -671,6 +685,7 @@ static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
struct cqhci_host *cq_host = mmc->cqe_private;
struct cqhci_slot *slot;
u32 terri;
+ u32 tdpe;
int tag;
spin_lock(&cq_host->lock);
@@ -709,6 +724,30 @@ static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
}
}
+ /*
+ * Handle ICCE ("Invalid Crypto Configuration Error"). This should
+ * never happen, since the block layer ensures that all crypto-enabled
+ * I/O requests have a valid keyslot before they reach the driver.
+ *
+ * Note that GCE ("General Crypto Error") is different; it already got
+ * handled above by checking TERRI.
+ */
+ if (status & CQHCI_IS_ICCE) {
+ tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
+ WARN_ONCE(1,
+ "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
+ mmc_hostname(mmc), status, tdpe);
+ while (tdpe != 0) {
+ tag = __ffs(tdpe);
+ tdpe &= ~(1 << tag);
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
if (!cq_host->recovery_halt) {
/*
* The only way to guarantee forward progress is to mark at
@@ -774,7 +813,8 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
- if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
+ cmd_error || data_error)
cqhci_error_irq(mmc, status, cmd_error, data_error);
if (status & CQHCI_IS_TCC) {
@@ -1141,6 +1181,13 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
goto out_err;
}
+ err = cqhci_crypto_init(cq_host);
+ if (err) {
+ pr_err("%s: CQHCI crypto initialization failed\n",
+ mmc_hostname(mmc));
+ goto out_err;
+ }
+
spin_lock_init(&cq_host->lock);
init_completion(&cq_host->halt_comp);
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
new file mode 100644
index 000000000000..6419cfbb4ab7
--- /dev/null
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CQHCI crypto engine (inline encryption) support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/blk-crypto.h>
+#include <linux/keyslot-manager.h>
+#include <linux/mmc/host.h>
+
+#include "cqhci-crypto.h"
+
+/* Map from blk-crypto modes to CQHCI crypto algorithm IDs and key sizes */
+static const struct cqhci_crypto_alg_entry {
+ enum cqhci_crypto_alg alg;
+ enum cqhci_crypto_key_size key_size;
+} cqhci_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = {
+ [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
+ .alg = CQHCI_CRYPTO_ALG_AES_XTS,
+ .key_size = CQHCI_CRYPTO_KEY_SIZE_256,
+ },
+};
+
+static inline struct cqhci_host *
+cqhci_host_from_ksm(struct blk_keyslot_manager *ksm)
+{
+ struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm);
+
+ return mmc->cqe_private;
+}
+
+static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg,
+ int slot)
+{
+ u32 slot_offset = cq_host->crypto_cfg_register + slot * sizeof(*cfg);
+ int i;
+
+ if (cq_host->ops->program_key)
+ return cq_host->ops->program_key(cq_host, cfg, slot);
+
+ /* Clear CFGE */
+ cqhci_writel(cq_host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
+
+ /* Write the key */
+ for (i = 0; i < 16; i++) {
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[i]),
+ slot_offset + i * sizeof(cfg->reg_val[0]));
+ }
+ /* Write dword 17 */
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[17]),
+ slot_offset + 17 * sizeof(cfg->reg_val[0]));
+ /* Write dword 16, which includes the new value of CFGE */
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[16]),
+ slot_offset + 16 * sizeof(cfg->reg_val[0]));
+ return 0;
+}
+
+static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+
+{
+ struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ const union cqhci_crypto_cap_entry *ccap_array =
+ cq_host->crypto_cap_array;
+ const struct cqhci_crypto_alg_entry *alg =
+ &cqhci_crypto_algs[key->crypto_cfg.crypto_mode];
+ u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
+ int i;
+ int cap_idx = -1;
+ union cqhci_crypto_cfg_entry cfg = {};
+ int err;
+
+ BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
+ for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) {
+ if (ccap_array[i].algorithm_id == alg->alg &&
+ ccap_array[i].key_size == alg->key_size &&
+ (ccap_array[i].sdus_mask & data_unit_mask)) {
+ cap_idx = i;
+ break;
+ }
+ }
+ if (WARN_ON(cap_idx < 0))
+ return -EOPNOTSUPP;
+
+ cfg.data_unit_size = data_unit_mask;
+ cfg.crypto_cap_idx = cap_idx;
+ cfg.config_enable = CQHCI_CRYPTO_CONFIGURATION_ENABLE;
+
+ if (ccap_array[cap_idx].algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS) {
+ /* In XTS mode, the blk_crypto_key's size is already doubled */
+ memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key + CQHCI_CRYPTO_KEY_MAX_SIZE/2,
+ key->raw + key->size/2, key->size/2);
+ } else {
+ memcpy(cfg.crypto_key, key->raw, key->size);
+ }
+
+ err = cqhci_crypto_program_key(cq_host, &cfg, slot);
+
+ memzero_explicit(&cfg, sizeof(cfg));
+ return err;
+}
+
+static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
+{
+ /*
+ * Clear the crypto cfg on the device. Clearing CFGE
+ * might not be sufficient, so just clear the entire cfg.
+ */
+ union cqhci_crypto_cfg_entry cfg = {};
+
+ return cqhci_crypto_program_key(cq_host, &cfg, slot);
+}
+
+static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+
+ return cqhci_crypto_clear_keyslot(cq_host, slot);
+}
+
+/*
+ * The keyslot management operations for CQHCI crypto.
+ *
+ * Note that the block layer ensures that these are never called while the host
+ * controller is runtime-suspended. However, the CQE won't necessarily be
+ * "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
+ * CQHCI_CFG register. But the hardware allows that.
+ */
+static const struct blk_ksm_ll_ops cqhci_ksm_ops = {
+ .keyslot_program = cqhci_crypto_keyslot_program,
+ .keyslot_evict = cqhci_crypto_keyslot_evict,
+};
+
+static enum blk_crypto_mode_num
+cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cqhci_crypto_algs); i++) {
+ BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
+ if (cqhci_crypto_algs[i].alg == cap.algorithm_id &&
+ cqhci_crypto_algs[i].key_size == cap.key_size)
+ return i;
+ }
+ return BLK_ENCRYPTION_MODE_INVALID;
+}
+
+/**
+ * cqhci_crypto_init - initialize CQHCI crypto support
+ * @cq_host: a cqhci host
+ *
+ * If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
+ * CQHCI_CAP_CS, initialize the crypto support. This involves reading the
+ * crypto capability registers, initializing the keyslot manager, clearing all
+ * keyslots, and enabling 128-bit task descriptors.
+ *
+ * Return: 0 if crypto was initialized or isn't supported; whether
+ * MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
+ * Also can return a negative errno value on unexpected error.
+ */
+int cqhci_crypto_init(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ struct device *dev = mmc_dev(mmc);
+ struct blk_keyslot_manager *ksm = &mmc->ksm;
+ unsigned int num_keyslots;
+ unsigned int cap_idx;
+ enum blk_crypto_mode_num blk_mode_num;
+ unsigned int slot;
+ int err = 0;
+
+ if (!(mmc->caps2 & MMC_CAP2_CRYPTO) ||
+ !(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
+ goto out;
+
+ cq_host->crypto_capabilities.reg_val =
+ cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
+
+ cq_host->crypto_cfg_register =
+ (u32)cq_host->crypto_capabilities.config_array_ptr * 0x100;
+
+ cq_host->crypto_cap_array =
+ devm_kcalloc(dev, cq_host->crypto_capabilities.num_crypto_cap,
+ sizeof(cq_host->crypto_cap_array[0]), GFP_KERNEL);
+ if (!cq_host->crypto_cap_array) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * CCAP.CFGC is off by one, so the actual number of crypto
+ * configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
+ */
+ num_keyslots = cq_host->crypto_capabilities.config_count + 1;
+
+ err = devm_blk_ksm_init(dev, ksm, num_keyslots);
+ if (err)
+ goto out;
+
+ ksm->ksm_ll_ops = cqhci_ksm_ops;
+ ksm->dev = dev;
+
+ /* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
+ ksm->max_dun_bytes_supported = 4;
+
+ /*
+ * Cache all the crypto capabilities and advertise the supported crypto
+ * modes and data unit sizes to the block layer.
+ */
+ for (cap_idx = 0; cap_idx < cq_host->crypto_capabilities.num_crypto_cap;
+ cap_idx++) {
+ cq_host->crypto_cap_array[cap_idx].reg_val =
+ cpu_to_le32(cqhci_readl(cq_host,
+ CQHCI_CRYPTOCAP +
+ cap_idx * sizeof(__le32)));
+ blk_mode_num = cqhci_find_blk_crypto_mode(
+ cq_host->crypto_cap_array[cap_idx]);
+ if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
+ continue;
+ ksm->crypto_modes_supported[blk_mode_num] |=
+ cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
+ }
+
+ /* Clear all the keyslots so that we start in a known state. */
+ for (slot = 0; slot < num_keyslots; slot++)
+ cqhci_crypto_clear_keyslot(cq_host, slot);
+
+ /* CQHCI crypto requires the use of 128-bit task descriptors. */
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ return 0;
+
+out:
+ mmc->caps2 &= ~MMC_CAP2_CRYPTO;
+ return err;
+}
diff --git a/drivers/mmc/host/cqhci-crypto.h b/drivers/mmc/host/cqhci-crypto.h
new file mode 100644
index 000000000000..60b58ee0e625
--- /dev/null
+++ b/drivers/mmc/host/cqhci-crypto.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CQHCI crypto engine (inline encryption) support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef LINUX_MMC_CQHCI_CRYPTO_H
+#define LINUX_MMC_CQHCI_CRYPTO_H
+
+#include <linux/mmc/host.h>
+
+#include "cqhci.h"
+
+#ifdef CONFIG_MMC_CRYPTO
+
+int cqhci_crypto_init(struct cqhci_host *host);
+
+/*
+ * Returns the crypto bits that should be set in bits 64-127 of the
+ * task descriptor.
+ */
+static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq)
+{
+ if (!mrq->crypto_enabled)
+ return 0;
+
+ return CQHCI_CRYPTO_ENABLE_BIT |
+ CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) |
+ mrq->data_unit_num;
+}
+
+#else /* CONFIG_MMC_CRYPTO */
+
+static inline int cqhci_crypto_init(struct cqhci_host *host)
+{
+ return 0;
+}
+
+static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_MMC_CRYPTO */
+
+#endif /* LINUX_MMC_CQHCI_CRYPTO_H */
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index 89bf6adbce8c..ba9387ed90eb 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -22,10 +22,13 @@
/* capabilities */
#define CQHCI_CAP 0x04
+#define CQHCI_CAP_CS 0x10000000 /* Crypto Support */
+
/* configuration */
#define CQHCI_CFG 0x08
#define CQHCI_DCMD 0x00001000
#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_CRYPTO_GENERAL_ENABLE 0x00000002
#define CQHCI_ENABLE 0x00000001
/* control */
@@ -39,8 +42,11 @@
#define CQHCI_IS_TCC BIT(1)
#define CQHCI_IS_RED BIT(2)
#define CQHCI_IS_TCL BIT(3)
+#define CQHCI_IS_GCE BIT(4) /* General Crypto Error */
+#define CQHCI_IS_ICCE BIT(5) /* Invalid Crypto Config Error */
-#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED | \
+ CQHCI_IS_GCE | CQHCI_IS_ICCE)
/* interrupt status enable */
#define CQHCI_ISTE 0x14
@@ -78,6 +84,9 @@
/* task clear */
#define CQHCI_TCLR 0x38
+/* task descriptor processing error */
+#define CQHCI_TDPE 0x3c
+
/* send status config 1 */
#define CQHCI_SSC1 0x40
#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16)
@@ -107,6 +116,10 @@
/* command response argument */
#define CQHCI_CRA 0x5C
+/* crypto capabilities */
+#define CQHCI_CCAP 0x100
+#define CQHCI_CRYPTOCAP 0x104
+
#define CQHCI_INT_ALL 0xF
#define CQHCI_IC_DEFAULT_ICCTH 31
#define CQHCI_IC_DEFAULT_ICTOVAL 1
@@ -133,11 +146,70 @@
#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+/* crypto task descriptor fields (for bits 64-127 of task descriptor) */
+#define CQHCI_CRYPTO_ENABLE_BIT (1ULL << 47)
+#define CQHCI_CRYPTO_KEYSLOT(x) ((u64)(x) << 32)
+
/* transfer descriptor fields */
#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+/* CCAP - Crypto Capability 100h */
+union cqhci_crypto_capabilities {
+ __le32 reg_val;
+ struct {
+ u8 num_crypto_cap;
+ u8 config_count;
+ u8 reserved;
+ u8 config_array_ptr;
+ };
+};
+
+enum cqhci_crypto_key_size {
+ CQHCI_CRYPTO_KEY_SIZE_INVALID = 0,
+ CQHCI_CRYPTO_KEY_SIZE_128 = 1,
+ CQHCI_CRYPTO_KEY_SIZE_192 = 2,
+ CQHCI_CRYPTO_KEY_SIZE_256 = 3,
+ CQHCI_CRYPTO_KEY_SIZE_512 = 4,
+};
+
+enum cqhci_crypto_alg {
+ CQHCI_CRYPTO_ALG_AES_XTS = 0,
+ CQHCI_CRYPTO_ALG_BITLOCKER_AES_CBC = 1,
+ CQHCI_CRYPTO_ALG_AES_ECB = 2,
+ CQHCI_CRYPTO_ALG_ESSIV_AES_CBC = 3,
+};
+
+/* x-CRYPTOCAP - Crypto Capability X */
+union cqhci_crypto_cap_entry {
+ __le32 reg_val;
+ struct {
+ u8 algorithm_id;
+ u8 sdus_mask; /* Supported data unit size mask */
+ u8 key_size;
+ u8 reserved;
+ };
+};
+
+#define CQHCI_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
+#define CQHCI_CRYPTO_KEY_MAX_SIZE 64
+/* x-CRYPTOCFG - Crypto Configuration X */
+union cqhci_crypto_cfg_entry {
+ __le32 reg_val[32];
+ struct {
+ u8 crypto_key[CQHCI_CRYPTO_KEY_MAX_SIZE];
+ u8 data_unit_size;
+ u8 crypto_cap_idx;
+ u8 reserved_1;
+ u8 config_enable;
+ u8 reserved_multi_host;
+ u8 reserved_2;
+ u8 vsb[2];
+ u8 reserved_3[56];
+ };
+};
+
struct cqhci_host_ops;
struct mmc_host;
struct mmc_request;
@@ -196,6 +268,12 @@ struct cqhci_host {
struct completion halt_comp;
wait_queue_head_t wait_queue;
struct cqhci_slot *slot;
+
+#ifdef CONFIG_MMC_CRYPTO
+ union cqhci_crypto_capabilities crypto_capabilities;
+ union cqhci_crypto_cap_entry *crypto_cap_array;
+ u32 crypto_cfg_register;
+#endif
};
struct cqhci_host_ops {
@@ -208,6 +286,10 @@ struct cqhci_host_ops {
u64 *data);
void (*pre_enable)(struct mmc_host *mmc);
void (*post_disable)(struct mmc_host *mmc);
+#ifdef CONFIG_MMC_CRYPTO
+ int (*program_key)(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg, int slot);
+#endif
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
deleted file mode 100644
index 51bcc6332f3a..000000000000
--- a/drivers/mmc/host/dw_mmc-zx.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ZX Specific Extensions for Synopsys DW Multimedia Card Interface driver
- *
- * Copyright (C) 2016, Linaro Ltd.
- * Copyright (C) 2016, ZTE Corp.
- */
-
-#include <linux/clk.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#include "dw_mmc.h"
-#include "dw_mmc-pltfm.h"
-#include "dw_mmc-zx.h"
-
-struct dw_mci_zx_priv_data {
- struct regmap *sysc_base;
-};
-
-enum delay_type {
- DELAY_TYPE_READ, /* read dqs delay */
- DELAY_TYPE_CLK, /* clk sample delay */
-};
-
-static int dw_mci_zx_emmc_set_delay(struct dw_mci *host, unsigned int delay,
- enum delay_type dflag)
-{
- struct dw_mci_zx_priv_data *priv = host->priv;
- struct regmap *sysc_base = priv->sysc_base;
- unsigned int clksel;
- unsigned int loop = 1000;
- int ret;
-
- if (!sysc_base)
- return -EINVAL;
-
- ret = regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
- PARA_HALF_CLK_MODE | PARA_DLL_BYPASS_MODE |
- PARA_PHASE_DET_SEL_MASK |
- PARA_DLL_LOCK_NUM_MASK |
- DLL_REG_SET | PARA_DLL_START_MASK,
- PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4));
- if (ret)
- return ret;
-
- ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG1, &clksel);
- if (ret)
- return ret;
-
- if (dflag == DELAY_TYPE_CLK) {
- clksel &= ~CLK_SAMP_DELAY_MASK;
- clksel |= CLK_SAMP_DELAY(delay);
- } else {
- clksel &= ~READ_DQS_DELAY_MASK;
- clksel |= READ_DQS_DELAY(delay);
- }
-
- regmap_write(sysc_base, LB_AON_EMMC_CFG_REG1, clksel);
- regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
- PARA_DLL_START_MASK | PARA_DLL_LOCK_NUM_MASK |
- DLL_REG_SET,
- PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4) |
- DLL_REG_SET);
-
- do {
- ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG2, &clksel);
- if (ret)
- return ret;
-
- } while (--loop && !(clksel & ZX_DLL_LOCKED));
-
- if (!loop) {
- dev_err(host->dev, "Error: %s dll lock fail\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int dw_mci_zx_emmc_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
-{
- struct dw_mci *host = slot->host;
- struct mmc_host *mmc = slot->mmc;
- int ret, len = 0, start = 0, end = 0, delay, best = 0;
-
- for (delay = 1; delay < 128; delay++) {
- ret = dw_mci_zx_emmc_set_delay(host, delay, DELAY_TYPE_CLK);
- if (!ret && mmc_send_tuning(mmc, opcode, NULL)) {
- if (start >= 0) {
- end = delay - 1;
- /* check and update longest good range */
- if ((end - start) > len) {
- best = (start + end) >> 1;
- len = end - start;
- }
- }
- start = -1;
- end = 0;
- continue;
- }
- if (start < 0)
- start = delay;
- }
-
- if (start >= 0) {
- end = delay - 1;
- if ((end - start) > len) {
- best = (start + end) >> 1;
- len = end - start;
- }
- }
- if (best < 0)
- return -EIO;
-
- dev_info(host->dev, "%s best range: start %d end %d\n", __func__,
- start, end);
- return dw_mci_zx_emmc_set_delay(host, best, DELAY_TYPE_CLK);
-}
-
-static int dw_mci_zx_prepare_hs400_tuning(struct dw_mci *host,
- struct mmc_ios *ios)
-{
- int ret;
-
- /* config phase shift as 90 degree */
- ret = dw_mci_zx_emmc_set_delay(host, 32, DELAY_TYPE_READ);
- if (ret < 0)
- return -EIO;
-
- return 0;
-}
-
-static int dw_mci_zx_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
-{
- struct dw_mci *host = slot->host;
-
- if (host->verid == 0x290a) /* only for emmc */
- return dw_mci_zx_emmc_execute_tuning(slot, opcode);
- /* TODO: Add 0x210a dedicated tuning for sd/sdio */
-
- return 0;
-}
-
-static int dw_mci_zx_parse_dt(struct dw_mci *host)
-{
- struct device_node *np = host->dev->of_node;
- struct device_node *node;
- struct dw_mci_zx_priv_data *priv;
- struct regmap *sysc_base;
-
- /* syscon is needed only by emmc */
- node = of_parse_phandle(np, "zte,aon-syscon", 0);
- if (node) {
- sysc_base = syscon_node_to_regmap(node);
- of_node_put(node);
-
- if (IS_ERR(sysc_base))
- return dev_err_probe(host->dev, PTR_ERR(sysc_base),
- "Can't get syscon\n");
- } else {
- return 0;
- }
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->sysc_base = sysc_base;
- host->priv = priv;
-
- return 0;
-}
-
-static unsigned long zx_dwmmc_caps[3] = {
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
-};
-
-static const struct dw_mci_drv_data zx_drv_data = {
- .caps = zx_dwmmc_caps,
- .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
- .execute_tuning = dw_mci_zx_execute_tuning,
- .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
- .parse_dt = dw_mci_zx_parse_dt,
-};
-
-static const struct of_device_id dw_mci_zx_match[] = {
- { .compatible = "zte,zx296718-dw-mshc", .data = &zx_drv_data},
- {},
-};
-MODULE_DEVICE_TABLE(of, dw_mci_zx_match);
-
-static int dw_mci_zx_probe(struct platform_device *pdev)
-{
- const struct dw_mci_drv_data *drv_data;
- const struct of_device_id *match;
-
- match = of_match_node(dw_mci_zx_match, pdev->dev.of_node);
- drv_data = match->data;
-
- return dw_mci_pltfm_register(pdev, drv_data);
-}
-
-static const struct dev_pm_ops dw_mci_zx_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
-};
-
-static struct platform_driver dw_mci_zx_pltfm_driver = {
- .probe = dw_mci_zx_probe,
- .remove = dw_mci_pltfm_remove,
- .driver = {
- .name = "dwmmc_zx",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = dw_mci_zx_match,
- .pm = &dw_mci_zx_dev_pm_ops,
- },
-};
-
-module_platform_driver(dw_mci_zx_pltfm_driver);
-
-MODULE_DESCRIPTION("ZTE emmc/sd driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h
deleted file mode 100644
index 09ac52766f14..000000000000
--- a/drivers/mmc/host/dw_mmc-zx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DW_MMC_ZX_H_
-#define _DW_MMC_ZX_H_
-
-/* ZX296718 SoC specific DLL register offset. */
-#define LB_AON_EMMC_CFG_REG0 0x1B0
-#define LB_AON_EMMC_CFG_REG1 0x1B4
-#define LB_AON_EMMC_CFG_REG2 0x1B8
-
-/* LB_AON_EMMC_CFG_REG0 register defines */
-#define PARA_DLL_START(x) ((x) & 0xFF)
-#define PARA_DLL_START_MASK 0xFF
-#define DLL_REG_SET BIT(8)
-#define PARA_DLL_LOCK_NUM(x) (((x) & 7) << 16)
-#define PARA_DLL_LOCK_NUM_MASK (7 << 16)
-#define PARA_PHASE_DET_SEL(x) (((x) & 7) << 20)
-#define PARA_PHASE_DET_SEL_MASK (7 << 20)
-#define PARA_DLL_BYPASS_MODE BIT(23)
-#define PARA_HALF_CLK_MODE BIT(24)
-
-/* LB_AON_EMMC_CFG_REG1 register defines */
-#define READ_DQS_DELAY(x) ((x) & 0x7F)
-#define READ_DQS_DELAY_MASK (0x7F)
-#define READ_DQS_BYPASS_MODE BIT(7)
-#define CLK_SAMP_DELAY(x) (((x) & 0x7F) << 8)
-#define CLK_SAMP_DELAY_MASK (0x7F << 8)
-#define CLK_SAMP_BYPASS_MODE BIT(15)
-
-/* LB_AON_EMMC_CFG_REG2 register defines */
-#define ZX_DLL_LOCKED BIT(2)
-
-#endif /* _DW_MMC_ZX_H_ */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a5244435556b..2f4de30f650b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1952,9 +1952,9 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
return true;
}
-static void dw_mci_tasklet_func(unsigned long priv)
+static void dw_mci_tasklet_func(struct tasklet_struct *t)
{
- struct dw_mci *host = (struct dw_mci *)priv;
+ struct dw_mci *host = from_tasklet(host, t, tasklet);
struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
@@ -3308,7 +3308,7 @@ int dw_mci_probe(struct dw_mci *host)
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
- tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+ tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
if (ret)
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index a1f92fed2a55..b3c636edbb46 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -152,7 +152,6 @@ struct jz4740_mmc_host {
enum jz4740_mmc_version version;
int irq;
- int card_detect_irq;
void __iomem *base;
struct resource *mem_res;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 13f6a2c0ed04..eb6c02bc4a02 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -227,7 +227,6 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
struct mmc_data *data = mrq->data;
struct scatterlist *sg;
int i;
- bool use_desc_chain_mode = true;
/*
* When Controller DMA cannot directly access DDR memory, disable
@@ -237,25 +236,33 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
if (host->dram_access_quirk)
return;
- /*
- * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
- * reported. For some strange reason this occurs in descriptor
- * chain mode only. So let's fall back to bounce buffer mode
- * for command SD_IO_RW_EXTENDED.
- */
- if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
- return;
+ if (data->blocks > 1) {
+ /*
+ * In block mode DMA descriptor format, "length" field indicates
+ * number of blocks and there is no way to pass DMA size that
+ * is not multiple of SDIO block size, making it impossible to
+ * tie more than one memory buffer with single SDIO block.
+ * Block mode sg buffer size should be aligned with SDIO block
+ * size, otherwise chain mode could not be used.
+ */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length % data->blksz) {
+ WARN_ONCE(1, "unaligned sg len %u blksize %u\n",
+ sg->length, data->blksz);
+ return;
+ }
+ }
+ }
- for_each_sg(data->sg, sg, data->sg_len, i)
+ for_each_sg(data->sg, sg, data->sg_len, i) {
/* check for 8 byte alignment */
- if (sg->offset & 7) {
+ if (sg->offset % 8) {
WARN_ONCE(1, "unaligned scatterlist buffer\n");
- use_desc_chain_mode = false;
- break;
+ return;
}
+ }
- if (use_desc_chain_mode)
- data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
+ data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
}
static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b5a41a7ce165..17dbc81c221e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
+#include <linux/gpio/consumer.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -1888,6 +1889,65 @@ static struct mmc_host_ops mmci_ops = {
.start_signal_voltage_switch = mmci_sig_volt_switch,
};
+static void mmci_probe_level_translator(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct mmci_host *host = mmc_priv(mmc);
+ struct gpio_desc *cmd_gpio;
+ struct gpio_desc *ck_gpio;
+ struct gpio_desc *ckin_gpio;
+ int clk_hi, clk_lo;
+
+ /*
+ * Assume the level translator is present if st,use-ckin is set.
+ * This is to cater for DTs which do not implement this test.
+ */
+ host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
+
+ cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
+ if (IS_ERR(cmd_gpio))
+ goto exit_cmd;
+
+ ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
+ if (IS_ERR(ck_gpio))
+ goto exit_ck;
+
+ ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
+ if (IS_ERR(ckin_gpio))
+ goto exit_ckin;
+
+ /* All GPIOs are valid, test whether level translator works */
+
+ /* Sample CKIN */
+ clk_hi = !!gpiod_get_value(ckin_gpio);
+
+ /* Set CK low */
+ gpiod_set_value(ck_gpio, 0);
+
+ /* Sample CKIN */
+ clk_lo = !!gpiod_get_value(ckin_gpio);
+
+ /* Tristate all */
+ gpiod_direction_input(cmd_gpio);
+ gpiod_direction_input(ck_gpio);
+
+ /* Level translator is present if CK signal is propagated to CKIN */
+ if (!clk_hi || clk_lo) {
+ host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
+ dev_warn(dev,
+ "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
+ }
+
+ gpiod_put(ckin_gpio);
+
+exit_ckin:
+ gpiod_put(ck_gpio);
+exit_ck:
+ gpiod_put(cmd_gpio);
+exit_cmd:
+ pinctrl_select_default_state(dev);
+}
+
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1913,7 +1973,7 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
if (of_get_property(np, "st,neg-edge", NULL))
host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
if (of_get_property(np, "st,use-ckin", NULL))
- host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
+ mmci_probe_level_translator(mmc);
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
@@ -1949,15 +2009,15 @@ static int mmci_probe(struct amba_device *dev,
if (!mmc)
return -ENOMEM;
- ret = mmci_of_parse(np, mmc);
- if (ret)
- goto host_free;
-
host = mmc_priv(mmc);
host->mmc = mmc;
host->mmc_ops = &mmci_ops;
mmc->ops = &mmci_ops;
+ ret = mmci_of_parse(np, mmc);
+ if (ret)
+ goto host_free;
+
/*
* Some variant (STM32) doesn't have opendrain bit, nevertheless
* pins can be set accordingly using pinctrl
@@ -2195,7 +2255,7 @@ static int mmci_probe(struct amba_device *dev,
return ret;
}
-static int mmci_remove(struct amba_device *dev)
+static void mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
@@ -2223,8 +2283,6 @@ static int mmci_remove(struct amba_device *dev)
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
}
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index de09c6347524..898ed1b023df 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1127,13 +1127,13 @@ static void msdc_track_cmd_data(struct msdc_host *host,
static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
{
unsigned long flags;
- bool ret;
- ret = cancel_delayed_work(&host->req_timeout);
- if (!ret) {
- /* delay work already running */
- return;
- }
+ /*
+ * No need check the return value of cancel_delayed_work, as only ONE
+ * path will go here!
+ */
+ cancel_delayed_work(&host->req_timeout);
+
spin_lock_irqsave(&host->lock, flags);
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
@@ -1155,7 +1155,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
bool done = false;
bool sbc_error;
unsigned long flags;
- u32 *rsp = cmd->resp;
+ u32 *rsp;
if (mrq->sbc && cmd == mrq->cmd &&
(events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
@@ -1176,6 +1176,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (done)
return true;
+ rsp = cmd->resp;
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
@@ -1363,7 +1364,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
struct mmc_request *mrq, struct mmc_data *data)
{
- struct mmc_command *stop = data->stop;
+ struct mmc_command *stop;
unsigned long flags;
bool done;
unsigned int check_data = events &
@@ -1379,6 +1380,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
if (done)
return true;
+ stop = data->stop;
if (check_data || (stop && stop->error)) {
dev_dbg(host->dev, "DMA status: 0x%8X\n",
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 56bbc6cd9c84..947581de7860 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -628,7 +628,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc);
if (ret)
- goto out_clk_disable;
+ goto out_free_dma;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 6aa0537f1f84..5e5af34090f1 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -878,9 +878,9 @@ static void mmc_omap_cover_timer(struct timer_list *t)
tasklet_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_handler(unsigned long param)
+static void mmc_omap_cover_handler(struct tasklet_struct *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
+ struct mmc_omap_slot *slot = from_tasklet(slot, t, cover_tasklet);
int cover_open = mmc_omap_cover_is_open(slot);
mmc_detect_change(slot->mmc, 0);
@@ -1269,8 +1269,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
if (slot->pdata->get_cover_state != NULL) {
timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
- tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
- (unsigned long)slot);
+ tasklet_setup(&slot->cover_tasklet, mmc_omap_cover_handler);
}
r = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index aa9cc49206d1..2f8038d69f67 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -177,7 +177,7 @@ struct omap_hsmmc_host {
struct regulator *pbias;
bool pbias_enabled;
void __iomem *base;
- int vqmmc_enabled;
+ bool vqmmc_enabled;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
unsigned int dma_len;
@@ -232,7 +232,7 @@ static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
goto err_vqmmc;
}
- host->vqmmc_enabled = 1;
+ host->vqmmc_enabled = true;
}
return 0;
@@ -256,7 +256,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
return ret;
}
- host->vqmmc_enabled = 0;
+ host->vqmmc_enabled = false;
}
if (!IS_ERR(mmc->supply.vmmc)) {
@@ -285,22 +285,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
return 0;
if (power_on) {
- if (host->pbias_enabled == 0) {
+ if (!host->pbias_enabled) {
ret = regulator_enable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg enable fail\n");
return ret;
}
- host->pbias_enabled = 1;
+ host->pbias_enabled = true;
}
} else {
- if (host->pbias_enabled == 1) {
+ if (host->pbias_enabled) {
ret = regulator_disable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg disable fail\n");
return ret;
}
- host->pbias_enabled = 0;
+ host->pbias_enabled = false;
}
}
@@ -1861,8 +1861,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->base = base + pdata->reg_offset;
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
- host->pbias_enabled = 0;
- host->vqmmc_enabled = 0;
+ host->pbias_enabled = false;
+ host->vqmmc_enabled = false;
platform_set_drvdata(pdev, host);
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 53b81582f1af..5490962dc8e5 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -640,7 +640,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
owl_host->irq = platform_get_irq(pdev, 0);
if (owl_host->irq < 0) {
ret = -EINVAL;
- goto err_free_host;
+ goto err_release_channel;
}
ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
@@ -648,19 +648,21 @@ static int owl_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %d\n",
owl_host->irq);
- goto err_free_host;
+ goto err_release_channel;
}
ret = mmc_add_host(mmc);
if (ret) {
dev_err(&pdev->dev, "Failed to add host\n");
- goto err_free_host;
+ goto err_release_channel;
}
dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
return 0;
+err_release_channel:
+ dma_release_channel(owl_host->dma);
err_free_host:
mmc_free_host(mmc);
@@ -674,6 +676,7 @@ static int owl_mmc_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
disable_irq(owl_host->irq);
+ dma_release_channel(owl_host->dma);
mmc_free_host(mmc);
return 0;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 38f028e70633..158c21e5a942 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -768,10 +768,12 @@ static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host)
return false;
}
-static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
+static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host,
+ struct mmc_request *mrq)
{
struct renesas_sdhi *priv = host_to_priv(host);
bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool ret = false;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -785,11 +787,19 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
if (mmc_doing_tune(host->mmc))
return false;
+ if (((mrq->cmd->error == -ETIMEDOUT) ||
+ (mrq->data && mrq->data->error == -ETIMEDOUT)) &&
+ ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ (host->ops.get_cd && host->ops.get_cd(host->mmc))))
+ ret |= true;
+
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN)
- return renesas_sdhi_auto_correction(host);
+ ret |= renesas_sdhi_auto_correction(host);
+ else
+ ret |= renesas_sdhi_manual_correction(host, use_4tap);
- return renesas_sdhi_manual_correction(host, use_4tap);
+ return ret;
}
static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index fe13e1ea22dc..ff97f15e317c 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -56,6 +56,12 @@
#define INFO2_DTRANERR1 BIT(17)
#define INFO2_DTRANERR0 BIT(16)
+enum renesas_sdhi_dma_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_PRE_MAPPED,
+ COOKIE_MAPPED,
+};
+
/*
* Specification of this driver:
* - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma
@@ -172,6 +178,50 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
tasklet_schedule(&priv->dma_priv.dma_complete);
}
+/*
+ * renesas_sdhi_internal_dmac_map() will be called with two difference
+ * sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
+ * sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
+ * pointer in a mmc_data instead of host->sg_ptr.
+ */
+static void
+renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host *host,
+ struct mmc_data *data,
+ enum renesas_sdhi_dma_cookie cookie)
+{
+ bool unmap = cookie == COOKIE_UNMAPPED ? (data->host_cookie != cookie) :
+ (data->host_cookie == cookie);
+
+ if (unmap) {
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+}
+
+static bool
+renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
+ struct mmc_data *data,
+ enum renesas_sdhi_dma_cookie cookie)
+{
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
+ return true;
+
+ if (!dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data)))
+ return false;
+
+ data->host_cookie = cookie;
+
+ /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
+ renesas_sdhi_internal_dmac_unmap(host, data, cookie);
+ return false;
+ }
+
+ return true;
+}
+
static void
renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
@@ -182,14 +232,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
if (!test_bit(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY, &global_flags))
dtran_mode |= DTRAN_MODE_ADDR_MODE;
- if (!dma_map_sg(&host->pdev->dev, sg, host->sg_len,
- mmc_get_dma_dir(data)))
+ if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
goto force_pio;
- /* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg_dma_address(sg), 8))
- goto force_pio_with_unmap;
-
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
@@ -212,7 +257,7 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
return;
force_pio_with_unmap:
- dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+ renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
force_pio:
renesas_sdhi_internal_dmac_enable_dma(host, false);
@@ -245,7 +290,7 @@ static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
dir = DMA_TO_DEVICE;
renesas_sdhi_internal_dmac_enable_dma(host, false);
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir);
+ renesas_sdhi_internal_dmac_unmap(host, host->data, COOKIE_MAPPED);
if (dir == DMA_FROM_DEVICE)
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
@@ -274,6 +319,32 @@ static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host)
renesas_sdhi_internal_dmac_complete(host);
}
+static void renesas_sdhi_internal_dmac_post_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
+}
+
+static void renesas_sdhi_internal_dmac_pre_req(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ data->host_cookie = COOKIE_UNMAPPED;
+ renesas_sdhi_internal_dmac_map(host, data, COOKIE_PRE_MAPPED);
+}
+
static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
@@ -295,6 +366,10 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
tasklet_init(&host->dma_issue,
renesas_sdhi_internal_dmac_issue_tasklet_fn,
(unsigned long)host);
+
+ /* Add pre_req and post_req */
+ host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
+ host->ops.post_req = renesas_sdhi_internal_dmac_post_req;
}
static void
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index e6f5bbce5685..4ca937415734 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -906,6 +906,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (host->power_state == SDMMC_POWER_ON)
return 0;
+ msleep(100);
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
@@ -1425,7 +1427,8 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
if (pcr->rtd3_en)
mmc->caps = mmc->caps | MMC_CAP_AGGRESSIVE_PM;
- mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
+ mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE |
+ MMC_CAP2_NO_SDIO;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
mmc->ops = &realtek_pci_sdmmc_ops;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index a33a7823c265..0ca6f6d30b75 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -540,9 +540,9 @@ static void do_pio_write(struct s3cmci_host *host)
enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
}
-static void pio_tasklet(unsigned long data)
+static void pio_tasklet(struct tasklet_struct *t)
{
- struct s3cmci_host *host = (struct s3cmci_host *) data;
+ struct s3cmci_host *host = from_tasklet(host, t, pio_tasklet);
s3cmci_disable_irq(host, true);
@@ -1532,7 +1532,7 @@ static int s3cmci_probe(struct platform_device *pdev)
host->pdata = pdev->dev.platform_data;
spin_lock_init(&host->complete_lock);
- tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
+ tasklet_setup(&host->pio_tasklet, pio_tasklet);
if (host->is2440) {
host->sdiimsk = S3C2440_SDIIMSK;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index bbf3496f4495..f9780c65ebe9 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -314,11 +314,7 @@ err_clk:
static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
{
- int ret;
-
- ret = sdhci_pltfm_unregister(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to shutdown\n");
+ sdhci_pltfm_suspend(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 16ed19f47939..a20459744d21 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1666,9 +1666,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ int dead;
pm_runtime_get_sync(&pdev->dev);
+ dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index c9434b461aab..ddeaf8e1f72f 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -296,9 +296,27 @@ static const struct of_device_id sdhci_iproc_of_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
#ifdef CONFIG_ACPI
+/*
+ * This is a duplicate of bcm2835_(pltfrm_)data without caps quirks
+ * which are provided by the ACPI table.
+ */
+static const struct sdhci_pltfm_data sdhci_bcm_arasan_data = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .ops = &sdhci_iproc_32only_ops,
+};
+
+static const struct sdhci_iproc_data bcm_arasan_data = {
+ .pdata = &sdhci_bcm_arasan_data,
+};
+
static const struct acpi_device_id sdhci_iproc_acpi_ids[] = {
{ .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data },
{ .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data },
+ { .id = "BCM2847", .driver_data = (kernel_ulong_t)&bcm_arasan_data },
+ { .id = "BRCME88C", .driver_data = (kernel_ulong_t)&bcm2711_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9c7927b03253..5e1da4df096f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -13,6 +13,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
+#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
@@ -255,10 +256,12 @@ struct sdhci_msm_variant_info {
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
+ void __iomem *ice_mem; /* MSM ICE mapped address (if available) */
int pwr_irq; /* power irq */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
- struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
+ /* core, iface, cal, sleep, and ice clocks */
+ struct clk_bulk_data bulk_clks[5];
unsigned long clk_rate;
struct mmc_host *mmc;
struct opp_table *opp_table;
@@ -327,8 +330,7 @@ static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
writel_relaxed(val, host->ioaddr + offset);
}
-static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
- unsigned int clock)
+static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host)
{
struct mmc_ios ios = host->mmc->ios;
/*
@@ -341,8 +343,8 @@ static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
ios.timing == MMC_TIMING_MMC_DDR52 ||
ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)
- clock *= 2;
- return clock;
+ return 2;
+ return 1;
}
static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -352,20 +354,36 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
struct clk *core_clk = msm_host->bulk_clks[0].clk;
+ unsigned long achieved_rate;
+ unsigned int desired_rate;
+ unsigned int mult;
int rc;
- clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
+ mult = msm_get_clock_mult_for_bus_mode(host);
+ desired_rate = clock * mult;
+ rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
- mmc_hostname(host->mmc), clock,
- curr_ios.timing);
+ mmc_hostname(host->mmc), desired_rate, curr_ios.timing);
return;
}
- msm_host->clk_rate = clock;
+
+ /*
+ * Qualcomm clock drivers by default round clock _up_ if they can't
+ * make the requested rate. This is not good for SD. Yell if we
+ * encounter it.
+ */
+ achieved_rate = clk_get_rate(core_clk);
+ if (achieved_rate > desired_rate)
+ pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n",
+ mmc_hostname(host->mmc), desired_rate, achieved_rate);
+ host->mmc->actual_clock = achieved_rate / mult;
+
+ /* Stash the rate we requested to use in sdhci_msm_runtime_resume() */
+ msm_host->clk_rate = desired_rate;
+
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(core_clk),
- curr_ios.timing);
+ mmc_hostname(host->mmc), achieved_rate, curr_ios.timing);
}
/* Platform specific tuning */
@@ -1744,13 +1762,6 @@ static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
- /*
- * Keep actual_clock as zero -
- * - since there is no divider used so no need of having actual_clock.
- * - MSM controller uses SDCLK for data timeout calculation. If
- * actual_clock is zero, host->clock is taken for calculation.
- */
- host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
@@ -1773,7 +1784,7 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
if (!clock) {
- msm_host->clk_rate = clock;
+ host->mmc->actual_clock = msm_host->clk_rate = 0;
goto out;
}
@@ -1786,6 +1797,246 @@ out:
/*****************************************************************************\
* *
+ * Inline Crypto Engine (ICE) support *
+ * *
+\*****************************************************************************/
+
+#ifdef CONFIG_MMC_CRYPTO
+
+#define AES_256_XTS_KEY_SIZE 64
+
+/* QCOM ICE registers */
+
+#define QCOM_ICE_REG_VERSION 0x0008
+
+#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_FUSE_SETTING_MASK 0x1
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
+
+#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
+
+#define sdhci_msm_ice_writel(host, val, reg) \
+ writel((val), (host)->ice_mem + (reg))
+#define sdhci_msm_ice_readl(host, reg) \
+ readl((host)->ice_mem + (reg))
+
+static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host)
+{
+ struct device *dev = mmc_dev(msm_host->mmc);
+ u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION);
+ int major = regval >> 24;
+ int minor = (regval >> 16) & 0xFF;
+ int step = regval & 0xFFFF;
+
+ /* For now this driver only supports ICE version 3. */
+ if (major != 3) {
+ dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
+ major, minor, step);
+ return false;
+ }
+
+ dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
+ major, minor, step);
+
+ /* If fuses are blown, ICE might not work in the standard way. */
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING);
+ if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
+ dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
+ return false;
+ }
+ return true;
+}
+
+static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
+{
+ return devm_clk_get(dev, "ice");
+}
+
+static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
+ struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = msm_host->mmc;
+ struct device *dev = mmc_dev(mmc);
+ struct resource *res;
+ int err;
+
+ if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
+ return 0;
+
+ res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM,
+ "ice");
+ if (!res) {
+ dev_warn(dev, "ICE registers not found\n");
+ goto disable;
+ }
+
+ if (!qcom_scm_ice_available()) {
+ dev_warn(dev, "ICE SCM interface not found\n");
+ goto disable;
+ }
+
+ msm_host->ice_mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(msm_host->ice_mem)) {
+ err = PTR_ERR(msm_host->ice_mem);
+ dev_err(dev, "Failed to map ICE registers; err=%d\n", err);
+ return err;
+ }
+
+ if (!sdhci_msm_ice_supported(msm_host))
+ goto disable;
+
+ mmc->caps2 |= MMC_CAP2_CRYPTO;
+ return 0;
+
+disable:
+ dev_warn(dev, "Disabling inline encryption support\n");
+ return 0;
+}
+
+static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host)
+{
+ u32 regval;
+
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
+ /*
+ * Enable low power mode sequence
+ * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+ */
+ regval |= 0x7000;
+ sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+}
+
+static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host)
+{
+ u32 regval;
+
+ /* ICE Optimizations Enable Sequence */
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
+ regval |= 0xD807100;
+ /* ICE HPG requires delay before writing */
+ udelay(5);
+ sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+ udelay(5);
+}
+
+/*
+ * Wait until the ICE BIST (built-in self-test) has completed.
+ *
+ * This may be necessary before ICE can be used.
+ *
+ * Note that we don't really care whether the BIST passed or failed; we really
+ * just want to make sure that it isn't still running. This is because (a) the
+ * BIST is a FIPS compliance thing that never fails in practice, (b) ICE is
+ * documented to reject crypto requests if the BIST fails, so we needn't do it
+ * in software too, and (c) properly testing storage encryption requires testing
+ * the full storage stack anyway, and not relying on hardware-level self-tests.
+ */
+static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host)
+{
+ u32 regval;
+ int err;
+
+ err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS,
+ regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
+ 50, 5000);
+ if (err)
+ dev_err(mmc_dev(msm_host->mmc),
+ "Timed out waiting for ICE self-test to complete\n");
+ return err;
+}
+
+static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
+{
+ if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
+ return;
+ sdhci_msm_ice_low_power_mode_enable(msm_host);
+ sdhci_msm_ice_optimization_enable(msm_host);
+ sdhci_msm_ice_wait_bist_status(msm_host);
+}
+
+static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
+{
+ if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
+ return 0;
+ return sdhci_msm_ice_wait_bist_status(msm_host);
+}
+
+/*
+ * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
+ * vendor-specific SCM calls for this; it doesn't support the standard way.
+ */
+static int sdhci_msm_program_key(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg,
+ int slot)
+{
+ struct device *dev = mmc_dev(cq_host->mmc);
+ union cqhci_crypto_cap_entry cap;
+ union {
+ u8 bytes[AES_256_XTS_KEY_SIZE];
+ u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
+ } key;
+ int i;
+ int err;
+
+ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
+ return qcom_scm_ice_invalidate_key(slot);
+
+ /* Only AES-256-XTS has been tested so far. */
+ cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
+ if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
+ cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) {
+ dev_err_ratelimited(dev,
+ "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
+ cap.algorithm_id, cap.key_size);
+ return -EINVAL;
+ }
+
+ memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
+
+ /*
+ * The SCM call byte-swaps the 32-bit words of the key. So we have to
+ * do the same, in order for the final key be correct.
+ */
+ for (i = 0; i < ARRAY_SIZE(key.words); i++)
+ __cpu_to_be32s(&key.words[i]);
+
+ err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ cfg->data_unit_size);
+ memzero_explicit(&key, sizeof(key));
+ return err;
+}
+#else /* CONFIG_MMC_CRYPTO */
+static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
+{
+ return NULL;
+}
+
+static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
+ struct cqhci_host *cq_host)
+{
+ return 0;
+}
+
+static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
+{
+}
+
+static inline int __maybe_unused
+sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
+{
+ return 0;
+}
+#endif /* !CONFIG_MMC_CRYPTO */
+
+/*****************************************************************************\
+ * *
* MSM Command Queue Engine (CQE) *
* *
\*****************************************************************************/
@@ -1802,6 +2053,16 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
+static void sdhci_msm_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_cqe_enable(mmc);
+ sdhci_msm_ice_enable(msm_host);
+}
+
static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1834,8 +2095,11 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
}
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
- .enable = sdhci_cqe_enable,
+ .enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
+#ifdef CONFIG_MMC_CRYPTO
+ .program_key = sdhci_msm_program_key,
+#endif
};
static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
@@ -1871,6 +2135,10 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ ret = sdhci_msm_ice_init(msm_host, cq_host);
+ if (ret)
+ goto cleanup;
+
ret = cqhci_init(cq_host, host->mmc, dma64);
if (ret) {
dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
@@ -2311,6 +2579,11 @@ static int sdhci_msm_probe(struct platform_device *pdev)
clk = NULL;
msm_host->bulk_clks[3].clk = clk;
+ clk = sdhci_msm_ice_get_clk(&pdev->dev);
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[4].clk = clk;
+
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
if (ret)
@@ -2524,12 +2797,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
* Whenever core-clock is gated dynamically, it's needed to
* restore the SDR DLL settings when the clock is ungated.
*/
- if (msm_host->restore_dll_config && msm_host->clk_rate)
+ if (msm_host->restore_dll_config && msm_host->clk_rate) {
ret = sdhci_msm_restore_sdr_dll_config(host);
+ if (ret)
+ return ret;
+ }
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return ret;
+ return sdhci_msm_ice_resume(msm_host);
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 4f3774bcda94..839965f7c717 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1380,26 +1380,25 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
/**
* sdhci_arasan_update_support64b - Set SUPPORT_64B (64-bit System Bus Support)
+ * @host: The sdhci_host
+ * @value: The value to write
*
* This should be set based on the System Address Bus.
* 0: the Core supports only 32-bit System Address Bus.
* 1: the Core supports 64-bit System Address Bus.
*
- * NOTES:
- * - For Keem Bay, it is required to clear this bit. Its default value is 1'b1.
- * Keem Bay does not support 64-bit access.
- *
- * @host: The sdhci_host
- * @value: The value to write
+ * NOTE:
+ * For Keem Bay, it is required to clear this bit. Its default value is 1'b1.
+ * Keem Bay does not support 64-bit access.
*/
static void sdhci_arasan_update_support64b(struct sdhci_host *host, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
- sdhci_arasan->soc_ctl_map;
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
/* Having a map is optional */
+ soc_ctl_map = sdhci_arasan->soc_ctl_map;
if (!soc_ctl_map)
return;
@@ -1508,17 +1507,16 @@ cleanup:
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
- const struct of_device_id *match;
struct device_node *node;
struct clk *clk_xin;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct sdhci_arasan_data *sdhci_arasan;
- struct device_node *np = pdev->dev.of_node;
const struct sdhci_arasan_of_data *data;
- match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
- data = match->data;
+ data = of_device_get_match_data(dev);
host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan));
if (IS_ERR(host))
@@ -1531,42 +1529,41 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->soc_ctl_map = data->soc_ctl_map;
sdhci_arasan->clk_ops = data->clk_ops;
- node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
+ node = of_parse_phandle(np, "arasan,soc-ctl-syscon", 0);
if (node) {
sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
of_node_put(node);
if (IS_ERR(sdhci_arasan->soc_ctl_base)) {
- ret = dev_err_probe(&pdev->dev,
+ ret = dev_err_probe(dev,
PTR_ERR(sdhci_arasan->soc_ctl_base),
"Can't get syscon\n");
goto err_pltfm_free;
}
}
- sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
+ sdhci_arasan->clk_ahb = devm_clk_get(dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
- dev_err(&pdev->dev, "clk_ahb clock not found.\n");
- ret = PTR_ERR(sdhci_arasan->clk_ahb);
+ ret = dev_err_probe(dev, PTR_ERR(sdhci_arasan->clk_ahb),
+ "clk_ahb clock not found.\n");
goto err_pltfm_free;
}
- clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
+ clk_xin = devm_clk_get(dev, "clk_xin");
if (IS_ERR(clk_xin)) {
- dev_err(&pdev->dev, "clk_xin clock not found.\n");
- ret = PTR_ERR(clk_xin);
+ ret = dev_err_probe(dev, PTR_ERR(clk_xin), "clk_xin clock not found.\n");
goto err_pltfm_free;
}
ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
if (ret) {
- dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
+ dev_err(dev, "Unable to enable AHB clock.\n");
goto err_pltfm_free;
}
ret = clk_prepare_enable(clk_xin);
if (ret) {
- dev_err(&pdev->dev, "Unable to enable SD clock.\n");
+ dev_err(dev, "Unable to enable SD clock.\n");
goto clk_dis_ahb;
}
@@ -1580,8 +1577,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
pltfm_host->clk = clk_xin;
- if (of_device_is_compatible(pdev->dev.of_node,
- "rockchip,rk3399-sdhci-5.1"))
+ if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
@@ -1595,7 +1591,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_update_baseclkfreq(host);
- ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
+ ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, dev);
if (ret)
goto clk_disable_all;
@@ -1604,29 +1600,26 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
arasan_zynqmp_execute_tuning;
}
- arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+ arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);
ret = mmc_of_parse(host->mmc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+ ret = dev_err_probe(dev, ret, "parsing dt failed.\n");
goto unreg_clk;
}
sdhci_arasan->phy = ERR_PTR(-ENODEV);
- if (of_device_is_compatible(pdev->dev.of_node,
- "arasan,sdhci-5.1")) {
- sdhci_arasan->phy = devm_phy_get(&pdev->dev,
- "phy_arasan");
+ if (of_device_is_compatible(np, "arasan,sdhci-5.1")) {
+ sdhci_arasan->phy = devm_phy_get(dev, "phy_arasan");
if (IS_ERR(sdhci_arasan->phy)) {
- ret = PTR_ERR(sdhci_arasan->phy);
- dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n");
+ ret = dev_err_probe(dev, PTR_ERR(sdhci_arasan->phy),
+ "No phy for arasan,sdhci-5.1.\n");
goto unreg_clk;
}
ret = phy_init(sdhci_arasan->phy);
if (ret < 0) {
- dev_err(&pdev->dev, "phy_init err.\n");
+ dev_err(dev, "phy_init err.\n");
goto unreg_clk;
}
@@ -1651,7 +1644,7 @@ err_add_host:
if (!IS_ERR(sdhci_arasan->phy))
phy_exit(sdhci_arasan->phy);
unreg_clk:
- sdhci_arasan_unregister_sdclk(&pdev->dev);
+ sdhci_arasan_unregister_sdclk(dev);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
diff --git a/drivers/mmc/host/sdhci-of-aspeed-test.c b/drivers/mmc/host/sdhci-of-aspeed-test.c
new file mode 100644
index 000000000000..bb67d159b7d8
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-aspeed-test.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2020 IBM Corp. */
+
+#include <kunit/test.h>
+
+static void aspeed_sdhci_phase_ddr52(struct kunit *test)
+{
+ int rate = 52000000;
+
+ KUNIT_EXPECT_EQ(test, 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 0));
+ KUNIT_EXPECT_EQ(test, 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 1));
+ KUNIT_EXPECT_EQ(test, 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 2));
+ KUNIT_EXPECT_EQ(test, 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 3));
+ KUNIT_EXPECT_EQ(test, 2,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 4));
+ KUNIT_EXPECT_EQ(test, 3,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 5));
+ KUNIT_EXPECT_EQ(test, 14,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 23));
+ KUNIT_EXPECT_EQ(test, 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 24));
+ KUNIT_EXPECT_EQ(test, 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 25));
+
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 180));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 181));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 182));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 183));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 2,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 184));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 3,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 185));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 203));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 204));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 205));
+}
+
+static void aspeed_sdhci_phase_hs200(struct kunit *test)
+{
+ int rate = 200000000;
+
+ KUNIT_EXPECT_EQ(test, 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 0));
+ KUNIT_EXPECT_EQ(test, 0,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 5));
+ KUNIT_EXPECT_EQ(test, 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 6));
+ KUNIT_EXPECT_EQ(test, 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 7));
+ KUNIT_EXPECT_EQ(test, 14,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 89));
+ KUNIT_EXPECT_EQ(test, 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 90));
+ KUNIT_EXPECT_EQ(test, 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 91));
+ KUNIT_EXPECT_EQ(test, 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 96));
+
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 180));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 185));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 186));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 187));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 269));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 270));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 271));
+ KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ aspeed_sdhci_phase_to_tap(NULL, rate, 276));
+}
+
+static struct kunit_case aspeed_sdhci_test_cases[] = {
+ KUNIT_CASE(aspeed_sdhci_phase_ddr52),
+ KUNIT_CASE(aspeed_sdhci_phase_hs200),
+ {}
+};
+
+static struct kunit_suite aspeed_sdhci_test_suite = {
+ .name = "sdhci-of-aspeed",
+ .test_cases = aspeed_sdhci_test_cases,
+};
+
+static struct kunit_suite *aspeed_sdc_test_suite_array[] = {
+ &aspeed_sdhci_test_suite,
+ NULL,
+};
+
+static struct kunit_suite **aspeed_sdc_test_suites
+ __used __section(".kunit_test_suites") = aspeed_sdc_test_suite_array;
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 4f008ba3280e..7d8692e90996 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/math64.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -16,9 +17,19 @@
#include "sdhci-pltfm.h"
-#define ASPEED_SDC_INFO 0x00
-#define ASPEED_SDC_S1MMC8 BIT(25)
-#define ASPEED_SDC_S0MMC8 BIT(24)
+#define ASPEED_SDC_INFO 0x00
+#define ASPEED_SDC_S1_MMC8 BIT(25)
+#define ASPEED_SDC_S0_MMC8 BIT(24)
+#define ASPEED_SDC_PHASE 0xf4
+#define ASPEED_SDC_S1_PHASE_IN GENMASK(25, 21)
+#define ASPEED_SDC_S0_PHASE_IN GENMASK(20, 16)
+#define ASPEED_SDC_S1_PHASE_OUT GENMASK(15, 11)
+#define ASPEED_SDC_S1_PHASE_IN_EN BIT(10)
+#define ASPEED_SDC_S1_PHASE_OUT_EN GENMASK(9, 8)
+#define ASPEED_SDC_S0_PHASE_OUT GENMASK(7, 3)
+#define ASPEED_SDC_S0_PHASE_IN_EN BIT(2)
+#define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0)
+#define ASPEED_SDC_PHASE_MAX 31
struct aspeed_sdc {
struct clk *clk;
@@ -28,9 +39,37 @@ struct aspeed_sdc {
void __iomem *regs;
};
+struct aspeed_sdhci_tap_param {
+ bool valid;
+
+#define ASPEED_SDHCI_TAP_PARAM_INVERT_CLK BIT(4)
+ u8 in;
+ u8 out;
+};
+
+struct aspeed_sdhci_tap_desc {
+ u32 tap_mask;
+ u32 enable_mask;
+ u8 enable_value;
+};
+
+struct aspeed_sdhci_phase_desc {
+ struct aspeed_sdhci_tap_desc in;
+ struct aspeed_sdhci_tap_desc out;
+};
+
+struct aspeed_sdhci_pdata {
+ unsigned int clk_div_start;
+ const struct aspeed_sdhci_phase_desc *phase_desc;
+ size_t nr_phase_descs;
+};
+
struct aspeed_sdhci {
+ const struct aspeed_sdhci_pdata *pdata;
struct aspeed_sdc *parent;
u32 width_mask;
+ struct mmc_clk_phase_map phase_map;
+ const struct aspeed_sdhci_phase_desc *phase_desc;
};
static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
@@ -50,14 +89,125 @@ static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
spin_unlock(&sdc->lock);
}
+static u32
+aspeed_sdc_set_phase_tap(const struct aspeed_sdhci_tap_desc *desc,
+ u8 tap, bool enable, u32 reg)
+{
+ reg &= ~(desc->enable_mask | desc->tap_mask);
+ if (enable) {
+ reg |= tap << __ffs(desc->tap_mask);
+ reg |= desc->enable_value << __ffs(desc->enable_mask);
+ }
+
+ return reg;
+}
+
+static void
+aspeed_sdc_set_phase_taps(struct aspeed_sdc *sdc,
+ const struct aspeed_sdhci_phase_desc *desc,
+ const struct aspeed_sdhci_tap_param *taps)
+{
+ u32 reg;
+
+ spin_lock(&sdc->lock);
+ reg = readl(sdc->regs + ASPEED_SDC_PHASE);
+
+ reg = aspeed_sdc_set_phase_tap(&desc->in, taps->in, taps->valid, reg);
+ reg = aspeed_sdc_set_phase_tap(&desc->out, taps->out, taps->valid, reg);
+
+ writel(reg, sdc->regs + ASPEED_SDC_PHASE);
+ spin_unlock(&sdc->lock);
+}
+
+#define PICOSECONDS_PER_SECOND 1000000000000ULL
+#define ASPEED_SDHCI_NR_TAPS 15
+/* Measured value with *handwave* environmentals and static loading */
+#define ASPEED_SDHCI_MAX_TAP_DELAY_PS 1253
+static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
+ int phase_deg)
+{
+ u64 phase_period_ps;
+ u64 prop_delay_ps;
+ u64 clk_period_ps;
+ unsigned int tap;
+ u8 inverted;
+
+ phase_deg %= 360;
+
+ if (phase_deg >= 180) {
+ inverted = ASPEED_SDHCI_TAP_PARAM_INVERT_CLK;
+ phase_deg -= 180;
+ dev_dbg(dev,
+ "Inverting clock to reduce phase correction from %d to %d degrees\n",
+ phase_deg + 180, phase_deg);
+ } else {
+ inverted = 0;
+ }
+
+ prop_delay_ps = ASPEED_SDHCI_MAX_TAP_DELAY_PS / ASPEED_SDHCI_NR_TAPS;
+ clk_period_ps = div_u64(PICOSECONDS_PER_SECOND, (u64)rate_hz);
+ phase_period_ps = div_u64((u64)phase_deg * clk_period_ps, 360ULL);
+
+ tap = div_u64(phase_period_ps, prop_delay_ps);
+ if (tap > ASPEED_SDHCI_NR_TAPS) {
+ dev_warn(dev,
+ "Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
+ tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
+ tap = ASPEED_SDHCI_NR_TAPS;
+ }
+
+ return inverted | tap;
+}
+
+static void
+aspeed_sdhci_phases_to_taps(struct device *dev, unsigned long rate,
+ const struct mmc_clk_phase *phases,
+ struct aspeed_sdhci_tap_param *taps)
+{
+ taps->valid = phases->valid;
+
+ if (!phases->valid)
+ return;
+
+ taps->in = aspeed_sdhci_phase_to_tap(dev, rate, phases->in_deg);
+ taps->out = aspeed_sdhci_phase_to_tap(dev, rate, phases->out_deg);
+}
+
+static void
+aspeed_sdhci_configure_phase(struct sdhci_host *host, unsigned long rate)
+{
+ struct aspeed_sdhci_tap_param _taps = {0}, *taps = &_taps;
+ struct mmc_clk_phase *params;
+ struct aspeed_sdhci *sdhci;
+ struct device *dev;
+
+ dev = host->mmc->parent;
+ sdhci = sdhci_pltfm_priv(sdhci_priv(host));
+
+ if (!sdhci->phase_desc)
+ return;
+
+ params = &sdhci->phase_map.phase[host->timing];
+ aspeed_sdhci_phases_to_taps(dev, rate, params, taps);
+ aspeed_sdc_set_phase_taps(sdhci->parent, sdhci->phase_desc, taps);
+ dev_dbg(dev,
+ "Using taps [%d, %d] for [%d, %d] degrees of phase correction at %luHz (%d)\n",
+ taps->in & ASPEED_SDHCI_NR_TAPS,
+ taps->out & ASPEED_SDHCI_NR_TAPS,
+ params->in_deg, params->out_deg, rate, host->timing);
+}
+
static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host;
- unsigned long parent;
+ unsigned long parent, bus;
+ struct aspeed_sdhci *sdhci;
int div;
u16 clk;
pltfm_host = sdhci_priv(host);
+ sdhci = sdhci_pltfm_priv(pltfm_host);
+
parent = clk_get_rate(pltfm_host->clk);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
@@ -68,14 +218,34 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
- for (div = 2; div < 256; div *= 2) {
- if ((parent / div) <= clock)
+ /*
+ * Regarding the AST2600:
+ *
+ * If (EMMC12C[7:6], EMMC12C[15:8] == 0) then
+ * period of SDCLK = period of SDMCLK.
+ *
+ * If (EMMC12C[7:6], EMMC12C[15:8] != 0) then
+ * period of SDCLK = period of SDMCLK * 2 * (EMMC12C[7:6], EMMC[15:8])
+ *
+ * If you keep EMMC12C[7:6] = 0 and EMMC12C[15:8] as one-hot,
+ * 0x1/0x2/0x4/etc, you will find it is compatible to AST2400 or AST2500
+ *
+ * Keep the one-hot behaviour for backwards compatibility except for
+ * supporting the value 0 in (EMMC12C[7:6], EMMC12C[15:8]), and capture
+ * the 0-value capability in clk_div_start.
+ */
+ for (div = sdhci->pdata->clk_div_start; div < 256; div *= 2) {
+ bus = parent / div;
+ if (bus <= clock)
break;
}
+
div >>= 1;
clk = div << SDHCI_DIVIDER_SHIFT;
+ aspeed_sdhci_configure_phase(host, bus);
+
sdhci_enable_clk(host, clk);
}
@@ -157,6 +327,7 @@ static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev,
static int aspeed_sdhci_probe(struct platform_device *pdev)
{
+ const struct aspeed_sdhci_pdata *aspeed_pdata;
struct sdhci_pltfm_host *pltfm_host;
struct aspeed_sdhci *dev;
struct sdhci_host *host;
@@ -164,12 +335,19 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
int slot;
int ret;
+ aspeed_pdata = of_device_get_match_data(&pdev->dev);
+ if (!aspeed_pdata) {
+ dev_err(&pdev->dev, "Missing platform configuration data\n");
+ return -EINVAL;
+ }
+
host = sdhci_pltfm_init(pdev, &aspeed_sdhci_pdata, sizeof(*dev));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
dev = sdhci_pltfm_priv(pltfm_host);
+ dev->pdata = aspeed_pdata;
dev->parent = dev_get_drvdata(pdev->dev.parent);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -180,8 +358,17 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
else if (slot >= 2)
return -EINVAL;
- dev_info(&pdev->dev, "Configuring for slot %d\n", slot);
- dev->width_mask = !slot ? ASPEED_SDC_S0MMC8 : ASPEED_SDC_S1MMC8;
+ if (slot < dev->pdata->nr_phase_descs) {
+ dev->phase_desc = &dev->pdata->phase_desc[slot];
+ } else {
+ dev_info(&pdev->dev,
+ "Phase control not supported for slot %d\n", slot);
+ dev->phase_desc = NULL;
+ }
+
+ dev->width_mask = !slot ? ASPEED_SDC_S0_MMC8 : ASPEED_SDC_S1_MMC8;
+
+ dev_info(&pdev->dev, "Configured for slot %d\n", slot);
sdhci_get_of_property(pdev);
@@ -199,6 +386,9 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
if (ret)
goto err_sdhci_add;
+ if (dev->phase_desc)
+ mmc_of_parse_clk_phase(host->mmc, &dev->phase_map);
+
ret = sdhci_add_host(host);
if (ret)
goto err_sdhci_add;
@@ -230,10 +420,49 @@ static int aspeed_sdhci_remove(struct platform_device *pdev)
return 0;
}
+static const struct aspeed_sdhci_pdata ast2400_sdhci_pdata = {
+ .clk_div_start = 2,
+};
+
+static const struct aspeed_sdhci_phase_desc ast2600_sdhci_phase[] = {
+ /* SDHCI/Slot 0 */
+ [0] = {
+ .in = {
+ .tap_mask = ASPEED_SDC_S0_PHASE_IN,
+ .enable_mask = ASPEED_SDC_S0_PHASE_IN_EN,
+ .enable_value = 1,
+ },
+ .out = {
+ .tap_mask = ASPEED_SDC_S0_PHASE_OUT,
+ .enable_mask = ASPEED_SDC_S0_PHASE_OUT_EN,
+ .enable_value = 3,
+ },
+ },
+ /* SDHCI/Slot 1 */
+ [1] = {
+ .in = {
+ .tap_mask = ASPEED_SDC_S1_PHASE_IN,
+ .enable_mask = ASPEED_SDC_S1_PHASE_IN_EN,
+ .enable_value = 1,
+ },
+ .out = {
+ .tap_mask = ASPEED_SDC_S1_PHASE_OUT,
+ .enable_mask = ASPEED_SDC_S1_PHASE_OUT_EN,
+ .enable_value = 3,
+ },
+ },
+};
+
+static const struct aspeed_sdhci_pdata ast2600_sdhci_pdata = {
+ .clk_div_start = 1,
+ .phase_desc = ast2600_sdhci_phase,
+ .nr_phase_descs = ARRAY_SIZE(ast2600_sdhci_phase),
+};
+
static const struct of_device_id aspeed_sdhci_of_match[] = {
- { .compatible = "aspeed,ast2400-sdhci", },
- { .compatible = "aspeed,ast2500-sdhci", },
- { .compatible = "aspeed,ast2600-sdhci", },
+ { .compatible = "aspeed,ast2400-sdhci", .data = &ast2400_sdhci_pdata, },
+ { .compatible = "aspeed,ast2500-sdhci", .data = &ast2400_sdhci_pdata, },
+ { .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
{ }
};
@@ -327,6 +556,29 @@ static struct platform_driver aspeed_sdc_driver = {
.remove = aspeed_sdc_remove,
};
+#if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST)
+#include "sdhci-of-aspeed-test.c"
+
+static inline int aspeed_sdc_tests_init(void)
+{
+ return __kunit_test_suites_init(aspeed_sdc_test_suites);
+}
+
+static inline void aspeed_sdc_tests_exit(void)
+{
+ __kunit_test_suites_exit(aspeed_sdc_test_suites);
+}
+#else
+static inline int aspeed_sdc_tests_init(void)
+{
+ return 0;
+}
+
+static inline void aspeed_sdc_tests_exit(void)
+{
+}
+#endif
+
static int __init aspeed_sdc_init(void)
{
int rc;
@@ -337,7 +589,18 @@ static int __init aspeed_sdc_init(void)
rc = platform_driver_register(&aspeed_sdc_driver);
if (rc < 0)
- platform_driver_unregister(&aspeed_sdhci_driver);
+ goto cleanup_sdhci;
+
+ rc = aspeed_sdc_tests_init();
+ if (rc < 0) {
+ platform_driver_unregister(&aspeed_sdc_driver);
+ goto cleanup_sdhci;
+ }
+
+ return 0;
+
+cleanup_sdhci:
+ platform_driver_unregister(&aspeed_sdhci_driver);
return rc;
}
@@ -345,6 +608,8 @@ module_init(aspeed_sdc_init);
static void __exit aspeed_sdc_exit(void)
{
+ aspeed_sdc_tests_exit();
+
platform_driver_unregister(&aspeed_sdc_driver);
platform_driver_unregister(&aspeed_sdhci_driver);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4b673792b5a4..59d8d96ce206 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -16,6 +16,8 @@
#include "sdhci-pltfm.h"
+#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
+
/* DWCMSHC specific Mode Select value */
#define DWCMSHC_CTRL_HS400 0x7
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on dwcmsch host controller.
+ */
+ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+ else
+ host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ dwcmshc_check_auto_cmd23(mmc, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -87,6 +112,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.ops = &sdhci_dwcmshc_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static int dwcmshc_probe(struct platform_device *pdev)
@@ -133,6 +159,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ host->mmc_host_ops.request = dwcmshc_request;
+
err = sdhci_add_host(host);
if (err)
goto err_clk;
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index b85edd62e7f0..4a0f69b97a78 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -84,12 +84,21 @@
#define GLI_9763E_VHS_REV_W 0x2
#define PCIE_GLI_9763E_MB 0x888
#define GLI_9763E_MB_CMDQ_OFF BIT(19)
+#define GLI_9763E_MB_ERP_ON BIT(7)
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9763E_CFG2 0x8A4
+#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
+#define GLI_9763E_CFG2_L1DLY_MAX 0x3FF
+
#define PCIE_GLI_9763E_MMC_CTRL 0x960
#define GLI_9763E_HS400_SLOW BIT(3)
+#define PCIE_GLI_9763E_CLKRXDLY 0x934
+#define GLI_9763E_HS400_RXDLY GENMASK(31, 28)
+#define GLI_9763E_HS400_RXDLY_5 0x5
+
#define SDHCI_GLI_9763E_CQE_BASE_ADDR 0x200
#define GLI_9763E_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
SDHCI_TRNS_BLK_CNT_EN | \
@@ -791,6 +800,17 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
value &= ~GLI_9763E_HS400_SLOW;
pci_write_config_dword(pdev, PCIE_GLI_9763E_MMC_CTRL, value);
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG2, &value);
+ value &= ~GLI_9763E_CFG2_L1DLY;
+ /* set ASPM L1 entry delay to 260us */
+ value |= FIELD_PREP(GLI_9763E_CFG2_L1DLY, GLI_9763E_CFG2_L1DLY_MAX);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, &value);
+ value &= ~GLI_9763E_HS400_RXDLY;
+ value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
@@ -814,7 +834,8 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
pci_read_config_dword(pdev, PCIE_GLI_9763E_MB, &value);
if (!(value & GLI_9763E_MB_CMDQ_OFF))
- host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ if (value & GLI_9763E_MB_ERP_ON)
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index fa76748d8929..94e3f72f6405 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -33,6 +33,8 @@
#define O2_SD_ADMA2 0xE7
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_MISC_CTRL 0x1C0
+#define O2_SD_PWR_FORCE_L0 0x0002
#define O2_SD_TUNING_CTRL 0x300
#define O2_SD_PLL_SETTING 0x304
#define O2_SD_MISC_SETTING 0x308
@@ -300,6 +302,8 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
int current_bus_width = 0;
+ u32 scratch32 = 0;
+ u16 scratch = 0;
/*
* This handler only implements the eMMC tuning that is specific to
@@ -312,6 +316,17 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
(opcode != MMC_SEND_TUNING_BLOCK)))
return -EINVAL;
+
+ /* Force power mode enter L0 */
+ scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
+ scratch |= O2_SD_PWR_FORCE_L0;
+ sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
+
+ /* wait DLL lock, timeout value 5ms */
+ if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
+ scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
+ pr_warn("%s: DLL can't lock in 5ms after force L0 during tuning.\n",
+ mmc_hostname(host->mmc));
/*
* Judge the tuning reason, whether caused by dll shift
* If cause by dll shift, should call sdhci_o2_dll_recovery
@@ -344,6 +359,11 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_set_bus_width(host, current_bus_width);
}
+ /* Cancel force power mode enter L0 */
+ scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
+ scratch &= ~(O2_SD_PWR_FORCE_L0);
+ sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
+
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 6301b81cf573..9bd717ff784b 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
return host->private;
}
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev);
int sdhci_pltfm_resume(struct device *dev);
-extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#else
+static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
+static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
deleted file mode 100644
index e9b347b3af7e..000000000000
--- a/drivers/mmc/host/sdhci-sirf.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SDHCI support for SiRF primaII and marco SoCs
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/mmc/slot-gpio.h>
-#include "sdhci-pltfm.h"
-
-#define SDHCI_CLK_DELAY_SETTING 0x4C
-#define SDHCI_SIRF_8BITBUS BIT(3)
-#define SIRF_TUNING_COUNT 16384
-
-static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
-{
- u8 ctrl;
-
- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- ctrl &= ~(SDHCI_CTRL_4BITBUS | SDHCI_SIRF_8BITBUS);
-
- /*
- * CSR atlas7 and prima2 SD host version is not 3.0
- * 8bit-width enable bit of CSR SD hosts is 3,
- * while stardard hosts use bit 5
- */
- if (width == MMC_BUS_WIDTH_8)
- ctrl |= SDHCI_SIRF_8BITBUS;
- else if (width == MMC_BUS_WIDTH_4)
- ctrl |= SDHCI_CTRL_4BITBUS;
-
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-}
-
-static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg)
-{
- u32 val = readl(host->ioaddr + reg);
-
- if (unlikely((reg == SDHCI_CAPABILITIES_1) &&
- (host->mmc->caps & MMC_CAP_UHS_SDR50))) {
- /* fake CAP_1 register */
- val = SDHCI_SUPPORT_DDR50 |
- SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
- }
-
- if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) {
- u32 prss = val;
- /* fake chips as V3.0 host conreoller */
- prss &= ~(0xFF << 16);
- val = prss | (SDHCI_SPEC_300 << 16);
- }
- return val;
-}
-
-static u16 sdhci_sirf_readw_le(struct sdhci_host *host, int reg)
-{
- u16 ret = 0;
-
- ret = readw(host->ioaddr + reg);
-
- if (unlikely(reg == SDHCI_HOST_VERSION)) {
- ret = readw(host->ioaddr + SDHCI_HOST_VERSION);
- ret |= SDHCI_SPEC_300;
- }
-
- return ret;
-}
-
-static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode)
-{
- int tuning_seq_cnt = 3;
- int phase;
- u8 tuned_phase_cnt = 0;
- int rc = 0, longest_range = 0;
- int start = -1, end = 0, tuning_value = -1, range = 0;
- u16 clock_setting;
- struct mmc_host *mmc = host->mmc;
-
- clock_setting = sdhci_readw(host, SDHCI_CLK_DELAY_SETTING);
- clock_setting &= ~0x3fff;
-
-retry:
- phase = 0;
- tuned_phase_cnt = 0;
- do {
- sdhci_writel(host,
- clock_setting | phase,
- SDHCI_CLK_DELAY_SETTING);
-
- if (!mmc_send_tuning(mmc, opcode, NULL)) {
- /* Tuning is successful at this tuning point */
- tuned_phase_cnt++;
- dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
- mmc_hostname(mmc), phase);
- if (start == -1)
- start = phase;
- end = phase;
- range++;
- if (phase == (SIRF_TUNING_COUNT - 1)
- && range > longest_range)
- tuning_value = (start + end) / 2;
- } else {
- dev_dbg(mmc_dev(mmc), "%s: Found bad phase = %d\n",
- mmc_hostname(mmc), phase);
- if (range > longest_range) {
- tuning_value = (start + end) / 2;
- longest_range = range;
- }
- start = -1;
- end = range = 0;
- }
- } while (++phase < SIRF_TUNING_COUNT);
-
- if (tuned_phase_cnt && tuning_value > 0) {
- /*
- * Finally set the selected phase in delay
- * line hw block.
- */
- phase = tuning_value;
- sdhci_writel(host,
- clock_setting | phase,
- SDHCI_CLK_DELAY_SETTING);
-
- dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
- mmc_hostname(mmc), phase);
- } else {
- if (--tuning_seq_cnt)
- goto retry;
- /* Tuning failed */
- dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
- mmc_hostname(mmc));
- rc = -EIO;
- }
-
- return rc;
-}
-
-static const struct sdhci_ops sdhci_sirf_ops = {
- .read_l = sdhci_sirf_readl_le,
- .read_w = sdhci_sirf_readw_le,
- .platform_execute_tuning = sdhci_sirf_execute_tuning,
- .set_clock = sdhci_set_clock,
- .get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .set_bus_width = sdhci_sirf_set_bus_width,
- .reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
-static const struct sdhci_pltfm_data sdhci_sirf_pdata = {
- .ops = &sdhci_sirf_ops,
- .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static int sdhci_sirf_probe(struct platform_device *pdev)
-{
- struct sdhci_host *host;
- struct sdhci_pltfm_host *pltfm_host;
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get clock");
- return PTR_ERR(clk);
- }
-
- host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, 0);
- if (IS_ERR(host))
- return PTR_ERR(host);
-
- pltfm_host = sdhci_priv(host);
- pltfm_host->clk = clk;
-
- sdhci_get_of_property(pdev);
-
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret)
- goto err_clk_prepare;
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_sdhci_add;
-
- /*
- * We must request the IRQ after sdhci_add_host(), as the tasklet only
- * gets setup in sdhci_add_host() and we oops.
- */
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
- if (ret == -EPROBE_DEFER)
- goto err_request_cd;
- if (!ret)
- mmc_gpiod_request_cd_irq(host->mmc);
-
- return 0;
-
-err_request_cd:
- sdhci_remove_host(host, 0);
-err_sdhci_add:
- clk_disable_unprepare(pltfm_host->clk);
-err_clk_prepare:
- sdhci_pltfm_free(pdev);
- return ret;
-}
-
-static const struct of_device_id sdhci_sirf_of_match[] = {
- { .compatible = "sirf,prima2-sdhc" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match);
-
-static struct platform_driver sdhci_sirf_driver = {
- .driver = {
- .name = "sdhci-sirf",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = sdhci_sirf_of_match,
- .pm = &sdhci_pltfm_pmops,
- },
- .probe = sdhci_sirf_probe,
- .remove = sdhci_pltfm_unregister,
-};
-
-module_platform_driver(sdhci_sirf_driver);
-
-MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco");
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index f85171edabeb..5dc36efff47f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -708,14 +708,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
- struct mmc_host *mmc = host->mmc;
- mmc_remove_host(mmc);
+ sdhci_remove_host(host, 0);
+
clk_disable_unprepare(sprd_host->clk_sdio);
clk_disable_unprepare(sprd_host->clk_enable);
clk_disable_unprepare(sprd_host->clk_2x_enable);
- mmc_free_host(mmc);
+ sdhci_pltfm_free(pdev);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index c67611fdaa8a..666cee4c7f7c 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
@@ -684,6 +689,7 @@ static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = {
static const struct of_device_id sdhci_xenon_dt_ids[] = {
{ .compatible = "marvell,armada-ap806-sdhci", .data = (void *)XENON_AP806},
+ { .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807},
{ .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110},
{ .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700},
{}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 646823ddd317..2d73407ee52e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
-#include <linux/swiotlb.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -4582,12 +4581,8 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
} else if (host->flags & SDHCI_USE_SDMA) {
mmc->max_segs = 1;
- if (swiotlb_max_segment()) {
- unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
- IO_TLB_SEGSIZE;
- mmc->max_req_size = min(mmc->max_req_size,
- max_req_size);
- }
+ mmc->max_req_size = min_t(size_t, mmc->max_req_size,
+ dma_max_mapping_size(mmc_dev(mmc)));
} else { /* PIO */
mmc->max_segs = SDHCI_MAX_SEGS;
}
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index a64ea143d185..7a34649b0754 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -514,6 +514,26 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
+static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = {
+ .ops = &sdhci_j721e_8bit_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
+static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = {
+ .pdata = &sdhci_am64_8bit_pdata,
+ .flags = DLL_PRESENT | DLL_CALIB,
+};
+
+static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = {
+ .ops = &sdhci_j721e_4bit_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
+static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = {
+ .pdata = &sdhci_am64_4bit_pdata,
+ .flags = IOMUX_PRESENT,
+};
+
static const struct soc_device_attribute sdhci_am654_devices[] = {
{ .family = "AM65X",
.revision = "SR1.0",
@@ -737,6 +757,14 @@ static const struct of_device_id sdhci_am654_of_match[] = {
.compatible = "ti,j721e-sdhci-4bit",
.data = &sdhci_j721e_4bit_drvdata,
},
+ {
+ .compatible = "ti,am64-sdhci-8bit",
+ .data = &sdhci_am64_8bit_drvdata,
+ },
+ {
+ .compatible = "ti,am64-sdhci-4bit",
+ .data = &sdhci_am64_4bit_drvdata,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_am654_of_match);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 6310693f2ac0..2702736a1c57 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -245,6 +245,7 @@ struct sunxi_idma_des {
struct sunxi_mmc_cfg {
u32 idma_des_size_bits;
+ u32 idma_des_shift;
const struct sunxi_mmc_clk_delay *clk_delays;
/* does the IP block support autocalibration? */
@@ -344,7 +345,7 @@ static int sunxi_mmc_init_host(struct sunxi_mmc_host *host)
/* Enable CEATA support */
mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
/* Set DMA descriptor list base address */
- mmc_writel(host, REG_DLBA, host->sg_dma);
+ mmc_writel(host, REG_DLBA, host->sg_dma >> host->cfg->idma_des_shift);
rval = mmc_readl(host, REG_GCTRL);
rval |= SDXC_INTERRUPT_ENABLE_BIT;
@@ -374,8 +375,10 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
next_desc += sizeof(struct sunxi_idma_des);
pdes[i].buf_addr_ptr1 =
- cpu_to_le32(sg_dma_address(&data->sg[i]));
- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc);
+ cpu_to_le32(sg_dma_address(&data->sg[i]) >>
+ host->cfg->idma_des_shift);
+ pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
+ host->cfg->idma_des_shift);
}
pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
@@ -1179,6 +1182,23 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
.needs_new_timings = true,
};
+static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
+ .idma_des_size_bits = 16,
+ .idma_des_shift = 2,
+ .clk_delays = NULL,
+ .can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
+static const struct sunxi_mmc_cfg sun50i_a100_emmc_cfg = {
+ .idma_des_size_bits = 13,
+ .idma_des_shift = 2,
+ .clk_delays = NULL,
+ .can_calibrate = true,
+ .needs_new_timings = true,
+};
+
static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg },
@@ -1187,6 +1207,8 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
+ { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
+ { .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -1507,6 +1529,8 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
#endif
static const struct dev_pm_ops sunxi_mmc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend,
sunxi_mmc_runtime_resume,
NULL)
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index fd8b72d3e02c..9fdf7ea06e3f 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -731,9 +731,9 @@ err_out:
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd(unsigned long data)
+static void tifm_sd_end_cmd(struct tasklet_struct *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_tasklet(host, t, finish_tasklet);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
@@ -968,8 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
*/
mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
- tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
- (unsigned long)host);
+ tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd);
timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 784fa6ed5843..2d1db9396d4a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -181,7 +181,7 @@ struct tmio_mmc_host {
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*reset)(struct tmio_mmc_host *host);
- bool (*check_retune)(struct tmio_mmc_host *host);
+ bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 942b8375179c..49c2d406c48e 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -477,8 +477,10 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
if (!data)
goto out;
- if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
- stat & TMIO_STAT_TXUNDERRUN)
+ if (stat & TMIO_STAT_DATATIMEOUT)
+ data->error = -ETIMEDOUT;
+ else if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
+ stat & TMIO_STAT_TXUNDERRUN)
data->error = -EILSEQ;
if (host->dma_on && (data->flags & MMC_DATA_WRITE)) {
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
@@ -802,7 +804,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
}
/* Error means retune, but executed command was still successful */
- if (host->check_retune && host->check_retune(host))
+ if (host->check_retune && host->check_retune(host, mrq))
mmc_retune_needed(host->mmc);
/* If SET_BLOCK_COUNT, continue with main command */
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index a6cd16771d4e..2413b6750cec 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -81,9 +81,9 @@ static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
}
/* external DMA engine */
-static void uniphier_sd_external_dma_issue(unsigned long arg)
+static void uniphier_sd_external_dma_issue(struct tasklet_struct *t)
{
- struct tmio_mmc_host *host = (void *)arg;
+ struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
uniphier_sd_dma_endisable(host, 1);
@@ -190,8 +190,7 @@ static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
host->chan_rx = chan;
host->chan_tx = chan;
- tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue,
- (unsigned long)host);
+ tasklet_setup(&host->dma_issue, uniphier_sd_external_dma_issue);
}
static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
@@ -228,9 +227,9 @@ static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
.dataend = uniphier_sd_external_dma_dataend,
};
-static void uniphier_sd_internal_dma_issue(unsigned long arg)
+static void uniphier_sd_internal_dma_issue(struct tasklet_struct *t)
{
- struct tmio_mmc_host *host = (void *)arg;
+ struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -309,8 +308,7 @@ static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue,
- (unsigned long)host);
+ tasklet_setup(&host->dma_issue, uniphier_sd_internal_dma_issue);
}
static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index e2d5112d809d..615f3d008af1 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1858,10 +1858,12 @@ static int usdhi6_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret < 0)
- goto e_clk_off;
+ goto e_release_dma;
return 0;
+e_release_dma:
+ usdhi6_dma_release(host);
e_clk_off:
clk_disable_unprepare(host->clk);
e_free_mmc:
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9b755ea0fa03..4f4c0813f9fd 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -959,14 +959,12 @@ static void via_sdc_timeout(struct timer_list *t)
spin_unlock_irqrestore(&sdhost->lock, flags);
}
-static void via_sdc_tasklet_finish(unsigned long param)
+static void via_sdc_tasklet_finish(struct tasklet_struct *t)
{
- struct via_crdr_mmc_host *host;
+ struct via_crdr_mmc_host *host = from_tasklet(host, t, finish_tasklet);
unsigned long flags;
struct mmc_request *mrq;
- host = (struct via_crdr_mmc_host *)param;
-
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timer);
@@ -1050,8 +1048,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
INIT_WORK(&host->carddet_work, via_sdc_card_detect);
- tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
- (unsigned long)host);
+ tasklet_setup(&host->finish_tasklet, via_sdc_tasklet_finish);
addrbase = host->sdhc_mmiobase;
writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index cd63ea865b77..67ecd342fe5f 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -987,9 +987,9 @@ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
return host->mrq->cmd->data;
}
-static void wbsd_tasklet_card(unsigned long param)
+static void wbsd_tasklet_card(struct tasklet_struct *t)
{
- struct wbsd_host *host = (struct wbsd_host *)param;
+ struct wbsd_host *host = from_tasklet(host, t, card_tasklet);
u8 csr;
int delay = -1;
@@ -1036,9 +1036,9 @@ static void wbsd_tasklet_card(unsigned long param)
mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
}
-static void wbsd_tasklet_fifo(unsigned long param)
+static void wbsd_tasklet_fifo(struct tasklet_struct *t)
{
- struct wbsd_host *host = (struct wbsd_host *)param;
+ struct wbsd_host *host = from_tasklet(host, t, fifo_tasklet);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1067,9 +1067,9 @@ end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_crc(unsigned long param)
+static void wbsd_tasklet_crc(struct tasklet_struct *t)
{
- struct wbsd_host *host = (struct wbsd_host *)param;
+ struct wbsd_host *host = from_tasklet(host, t, crc_tasklet);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1091,9 +1091,9 @@ end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_timeout(unsigned long param)
+static void wbsd_tasklet_timeout(struct tasklet_struct *t)
{
- struct wbsd_host *host = (struct wbsd_host *)param;
+ struct wbsd_host *host = from_tasklet(host, t, timeout_tasklet);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1115,9 +1115,9 @@ end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_finish(unsigned long param)
+static void wbsd_tasklet_finish(struct tasklet_struct *t)
{
- struct wbsd_host *host = (struct wbsd_host *)param;
+ struct wbsd_host *host = from_tasklet(host, t, finish_tasklet);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1449,16 +1449,11 @@ static int wbsd_request_irq(struct wbsd_host *host, int irq)
/*
* Set up tasklets. Must be done before requesting interrupt.
*/
- tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
- (unsigned long)host);
- tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
- (unsigned long)host);
- tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
- (unsigned long)host);
- tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
- (unsigned long)host);
- tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
- (unsigned long)host);
+ tasklet_setup(&host->card_tasklet, wbsd_tasklet_card);
+ tasklet_setup(&host->fifo_tasklet, wbsd_tasklet_fifo);
+ tasklet_setup(&host->crc_tasklet, wbsd_tasklet_crc);
+ tasklet_setup(&host->timeout_tasklet, wbsd_tasklet_timeout);
+ tasklet_setup(&host->finish_tasklet, wbsd_tasklet_finish);
/*
* Allocate interrupt.
diff --git a/drivers/most/core.c b/drivers/most/core.c
index 353ab277cbc6..e4412c7d25b0 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -379,7 +379,7 @@ static struct attribute *channel_attrs[] = {
NULL,
};
-static struct attribute_group channel_attr_group = {
+static const struct attribute_group channel_attr_group = {
.attrs = channel_attrs,
.is_visible = channel_attr_is_visible,
};
@@ -436,7 +436,7 @@ static struct attribute *interface_attrs[] = {
NULL,
};
-static struct attribute_group interface_attr_group = {
+static const struct attribute_group interface_attr_group = {
.attrs = interface_attrs,
};
@@ -718,7 +718,7 @@ static struct attribute *mc_attrs[] = {
NULL,
};
-static struct attribute_group mc_attr_group = {
+static const struct attribute_group mc_attr_group = {
.attrs = mc_attrs,
};
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index cfd170946ba4..5b04ae6c3057 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -222,6 +222,7 @@ static int phram_setup(const char *val)
uint64_t start;
uint64_t len;
uint64_t erasesize = PAGE_SIZE;
+ uint32_t rem;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -263,8 +264,11 @@ static int phram_setup(const char *val)
}
}
+ if (erasesize)
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || do_div(len, (uint32_t)erasesize) != 0) {
+ || erasesize > UINT_MAX || rem) {
parse_err("illegal erasesize or len\n");
goto error;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 1888523d9745..983999c020d6 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -924,7 +924,7 @@ static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
BUG_ON(bytes != 1 && bytes != 2);
seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(cmd)),
+ SEQ_OPC_OPCODE(cmd));
stfsm_load_seq(fsm, seq);
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 377ef0fc4e3e..ca00d211e73e 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -90,8 +90,8 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
u32 win_base;
map->map.bankwidth = 1;
- map->map.read = mtd_pci_read8,
- map->map.write = mtd_pci_write8,
+ map->map.read = mtd_pci_read8;
+ map->map.write = mtd_pci_write8;
map->map.size = 0x00800000;
map->base = ioremap(pci_resource_start(dev, 0),
@@ -185,8 +185,8 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
return -ENXIO;
map->map.bankwidth = 4;
- map->map.read = mtd_pci_read32,
- map->map.write = mtd_pci_write32,
+ map->map.read = mtd_pci_read32;
+ map->map.write = mtd_pci_write32;
map->map.size = len;
map->base = ioremap(base, len);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 58eefa43af14..795dec4483c2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1053,7 +1053,6 @@ static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
if (ret < 0)
return ret;
- eb = d->eb_data + (newblock / d->pages_per_eblk);
d->page_data[page] = newblock;
return 0;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 442a039b92f3..30f061939560 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -102,13 +102,6 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
-config MTD_NAND_TANGO
- tristate "Tango NAND controller"
- depends on ARCH_TANGO || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enables the NAND Flash controller on Tango chips.
-
config MTD_NAND_SHARPSL
tristate "Sharp SL Series (C7xx + others) NAND controller"
depends on ARCH_PXA || COMPILE_TEST
@@ -331,8 +324,7 @@ source "drivers/mtd/nand/raw/ingenic/Kconfig"
config MTD_NAND_FSMC
tristate "ST Micros FSMC NAND controller"
depends on OF && HAS_IOMEM
- depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
- COMPILE_TEST
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || COMPILE_TEST
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 32475a28d8f8..d011c6c53f8f 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
-obj-$(CONFIG_MTD_NAND_TANGO) += tango_nand.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 5cdf05bcbf8f..3fa8c22d3f36 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- nand_extract_bits(buf, step * eccsize, tmp_buf,
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index fdb112e8a90d..8b49fd56cf96 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -318,8 +318,10 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
}
tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
- if (!tx)
- return -ENXIO;
+ if (!tx) {
+ ret = -ENXIO;
+ goto err_unmap;
+ }
tx->callback = callback;
tx->callback_param = ebu_host;
@@ -579,7 +581,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
struct nand_chip *nand;
- struct mtd_info *mtd = NULL;
+ struct mtd_info *mtd;
struct resource *res;
char *resname;
int ret;
@@ -647,12 +649,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->ebu + EBU_ADDR_SEL(cs));
nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
- mtd = nand_to_mtd(&ebu_host->chip);
mtd->dev.parent = dev;
ebu_host->dev = dev;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 42d4881d598d..79da6b02e209 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2396,7 +2396,7 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
* be greater than that to be sure tCCS delay is respected.
*/
nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
- period_ns) - 2,
+ period_ns) - 2;
nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
period_ns);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index fd705dd1768d..f78302e16b84 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1731,7 +1731,7 @@ static int mxcnd_probe(struct platform_device *pdev)
this->legacy.chip_delay = 5;
nand_set_controller_data(this, host);
- nand_set_flash_node(this, pdev->dev.of_node),
+ nand_set_flash_node(this, pdev->dev.of_node);
this->legacy.dev_ready = mxc_nand_dev_ready;
this->legacy.cmdfunc = mxc_nand_command;
this->legacy.read_byte = mxc_nand_read_byte;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f2b9250c0ea8..0750121ac371 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
if (!bch)
return 0;
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index fbb9955f2467..2c3e65cb68f3 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
- if (section >= chip->ecc.steps)
+ if (section >= engine_conf->nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- oobregion->offset = off + (section * (chip->ecc.bytes + 1));
- oobregion->length = chip->ecc.bytes;
+ oobregion->offset = off + (section * (engine_conf->code_size + 1));
+ oobregion->length = engine_conf->code_size;
return 0;
}
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 667e4bfe369f..fd4c318b520f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2821,6 +2821,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
return 0;
}
+static const char * const probes[] = { "qcomsmem", NULL };
+
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
@@ -2884,7 +2886,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
}
}
- ret = mtd_device_register(mtd, NULL, 0);
+ ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
deleted file mode 100644
index 359187b5a4be..000000000000
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ /dev/null
@@ -1,727 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2016 Sigma Designs
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-/* Offsets relative to chip->base */
-#define PBUS_CMD 0
-#define PBUS_ADDR 4
-#define PBUS_DATA 8
-
-/* Offsets relative to reg_base */
-#define NFC_STATUS 0x00
-#define NFC_FLASH_CMD 0x04
-#define NFC_DEVICE_CFG 0x08
-#define NFC_TIMING1 0x0c
-#define NFC_TIMING2 0x10
-#define NFC_XFER_CFG 0x14
-#define NFC_PKT_0_CFG 0x18
-#define NFC_PKT_N_CFG 0x1c
-#define NFC_BB_CFG 0x20
-#define NFC_ADDR_PAGE 0x24
-#define NFC_ADDR_OFFSET 0x28
-#define NFC_XFER_STATUS 0x2c
-
-/* NFC_STATUS values */
-#define CMD_READY BIT(31)
-
-/* NFC_FLASH_CMD values */
-#define NFC_READ 1
-#define NFC_WRITE 2
-
-/* NFC_XFER_STATUS values */
-#define PAGE_IS_EMPTY BIT(16)
-
-/* Offsets relative to mem_base */
-#define METADATA 0x000
-#define ERROR_REPORT 0x1c0
-
-/*
- * Error reports are split in two bytes:
- * byte 0 for the first packet in the page (PKT_0)
- * byte 1 for other packets in the page (PKT_N, for N > 0)
- * ERR_COUNT_PKT_N is the max error count over all but the first packet.
- */
-#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
-#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
-#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
-#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
-
-/* Offsets relative to pbus_base */
-#define PBUS_CS_CTRL 0x83c
-#define PBUS_PAD_MODE 0x8f0
-
-/* PBUS_CS_CTRL values */
-#define PBUS_IORDY BIT(31)
-
-/*
- * PBUS_PAD_MODE values
- * In raw mode, the driver communicates directly with the NAND chips.
- * In NFC mode, the NAND Flash controller manages the communication.
- * We use NFC mode for read and write; raw mode for everything else.
- */
-#define MODE_RAW 0
-#define MODE_NFC BIT(31)
-
-#define METADATA_SIZE 4
-#define BBM_SIZE 6
-#define FIELD_ORDER 15
-
-#define MAX_CS 4
-
-struct tango_nfc {
- struct nand_controller hw;
- void __iomem *reg_base;
- void __iomem *mem_base;
- void __iomem *pbus_base;
- struct tango_chip *chips[MAX_CS];
- struct dma_chan *chan;
- int freq_kHz;
-};
-
-#define to_tango_nfc(ptr) container_of(ptr, struct tango_nfc, hw)
-
-struct tango_chip {
- struct nand_chip nand_chip;
- void __iomem *base;
- u32 timing1;
- u32 timing2;
- u32 xfer_cfg;
- u32 pkt_0_cfg;
- u32 pkt_n_cfg;
- u32 bb_cfg;
-};
-
-#define to_tango_chip(ptr) container_of(ptr, struct tango_chip, nand_chip)
-
-#define XFER_CFG(cs, page_count, steps, metadata_size) \
- ((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size))
-
-#define PKT_CFG(size, strength) ((size) << 16 | (strength))
-
-#define BB_CFG(bb_offset, bb_size) ((bb_offset) << 16 | (bb_size))
-
-#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
-
-static void tango_select_target(struct nand_chip *chip, unsigned int cs)
-{
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- struct tango_chip *tchip = to_tango_chip(chip);
-
- writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
- writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
- writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
- writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
- writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
- writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
-}
-
-static int tango_waitrdy(struct nand_chip *chip, unsigned int timeout_ms)
-{
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- u32 status;
-
- return readl_relaxed_poll_timeout(nfc->pbus_base + PBUS_CS_CTRL,
- status, status & PBUS_IORDY, 20,
- timeout_ms);
-}
-
-static int tango_exec_instr(struct nand_chip *chip,
- const struct nand_op_instr *instr)
-{
- struct tango_chip *tchip = to_tango_chip(chip);
- unsigned int i;
-
- switch (instr->type) {
- case NAND_OP_CMD_INSTR:
- writeb_relaxed(instr->ctx.cmd.opcode, tchip->base + PBUS_CMD);
- return 0;
- case NAND_OP_ADDR_INSTR:
- for (i = 0; i < instr->ctx.addr.naddrs; i++)
- writeb_relaxed(instr->ctx.addr.addrs[i],
- tchip->base + PBUS_ADDR);
- return 0;
- case NAND_OP_DATA_IN_INSTR:
- ioread8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.in,
- instr->ctx.data.len);
- return 0;
- case NAND_OP_DATA_OUT_INSTR:
- iowrite8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.out,
- instr->ctx.data.len);
- return 0;
- case NAND_OP_WAITRDY_INSTR:
- return tango_waitrdy(chip,
- instr->ctx.waitrdy.timeout_ms);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int tango_exec_op(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only)
-{
- unsigned int i;
- int ret = 0;
-
- if (check_only)
- return 0;
-
- tango_select_target(chip, op->cs);
- for (i = 0; i < op->ninstrs; i++) {
- ret = tango_exec_instr(chip, &op->instrs[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/*
- * The controller does not check for bitflips in erased pages,
- * therefore software must check instead.
- */
-static int check_erased_page(struct nand_chip *chip, u8 *buf)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- u8 *meta = chip->oob_poi + BBM_SIZE;
- u8 *ecc = chip->oob_poi + BBM_SIZE + METADATA_SIZE;
- const int ecc_size = chip->ecc.bytes;
- const int pkt_size = chip->ecc.size;
- int i, res, meta_len, bitflips = 0;
-
- for (i = 0; i < chip->ecc.steps; ++i) {
- meta_len = i ? 0 : METADATA_SIZE;
- res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
- meta, meta_len,
- chip->ecc.strength);
- if (res < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += res;
-
- bitflips = max(res, bitflips);
- buf += pkt_size;
- ecc += ecc_size;
- }
-
- return bitflips;
-}
-
-static int decode_error_report(struct nand_chip *chip)
-{
- u32 status, res;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
-
- status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
- if (status & PAGE_IS_EMPTY)
- return 0;
-
- res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
-
- if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
- return -EBADMSG;
-
- /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
- mtd->ecc_stats.corrected +=
- ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
-
- return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
-}
-
-static void tango_dma_callback(void *arg)
-{
- complete(arg);
-}
-
-static int do_dma(struct tango_nfc *nfc, enum dma_data_direction dir, int cmd,
- const void *buf, int len, int page)
-{
- void __iomem *addr = nfc->reg_base + NFC_STATUS;
- struct dma_chan *chan = nfc->chan;
- struct dma_async_tx_descriptor *desc;
- enum dma_transfer_direction tdir;
- struct scatterlist sg;
- struct completion tx_done;
- int err = -EIO;
- u32 res, val;
-
- sg_init_one(&sg, buf, len);
- if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
- return -EIO;
-
- tdir = dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
- desc = dmaengine_prep_slave_sg(chan, &sg, 1, tdir, DMA_PREP_INTERRUPT);
- if (!desc)
- goto dma_unmap;
-
- desc->callback = tango_dma_callback;
- desc->callback_param = &tx_done;
- init_completion(&tx_done);
-
- writel_relaxed(MODE_NFC, nfc->pbus_base + PBUS_PAD_MODE);
-
- writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
- writel_relaxed(0, nfc->reg_base + NFC_ADDR_OFFSET);
- writel_relaxed(cmd, nfc->reg_base + NFC_FLASH_CMD);
-
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
-
- res = wait_for_completion_timeout(&tx_done, HZ);
- if (res > 0)
- err = readl_poll_timeout(addr, val, val & CMD_READY, 0, 1000);
-
- writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
-
-dma_unmap:
- dma_unmap_sg(chan->device->dev, &sg, 1, dir);
-
- return err;
-}
-
-static int tango_read_page(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- int err, res, len = mtd->writesize;
-
- tango_select_target(chip, chip->cur_cs);
- if (oob_required)
- chip->ecc.read_oob(chip, page);
-
- err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
- if (err)
- return err;
-
- res = decode_error_report(chip);
- if (res < 0) {
- chip->ecc.read_oob_raw(chip, page);
- res = check_erased_page(chip, buf);
- }
-
- return res;
-}
-
-static int tango_write_page(struct nand_chip *chip, const u8 *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- const struct nand_sdr_timings *timings;
- int err, len = mtd->writesize;
- u8 status;
-
- /* Calling tango_write_oob() would send PAGEPROG twice */
- if (oob_required)
- return -ENOTSUPP;
-
- tango_select_target(chip, chip->cur_cs);
- writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
- err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
- if (err)
- return err;
-
- timings = nand_get_sdr_timings(nand_get_interface_config(chip));
- err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max));
- if (err)
- return err;
-
- err = nand_status_op(chip, &status);
- if (err)
- return err;
-
- return (status & NAND_STATUS_FAIL) ? -EIO : 0;
-}
-
-static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
-{
- *pos += len;
-
- if (!*buf) {
- /* skip over "len" bytes */
- nand_change_read_column_op(chip, *pos, NULL, 0, false);
- } else {
- struct tango_chip *tchip = to_tango_chip(chip);
-
- ioread8_rep(tchip->base + PBUS_DATA, *buf, len);
- *buf += len;
- }
-}
-
-static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
-{
- *pos += len;
-
- if (!*buf) {
- /* skip over "len" bytes */
- nand_change_write_column_op(chip, *pos, NULL, 0, false);
- } else {
- struct tango_chip *tchip = to_tango_chip(chip);
-
- iowrite8_rep(tchip->base + PBUS_DATA, *buf, len);
- *buf += len;
- }
-}
-
-/*
- * Physical page layout (not drawn to scale)
- *
- * NB: Bad Block Marker area splits PKT_N in two (N1, N2).
- *
- * +---+-----------------+-------+-----+-----------+-----+----+-------+
- * | M | PKT_0 | ECC_0 | ... | N1 | BBM | N2 | ECC_N |
- * +---+-----------------+-------+-----+-----------+-----+----+-------+
- *
- * Logical page layout:
- *
- * +-----+---+-------+-----+-------+
- * oob = | BBM | M | ECC_0 | ... | ECC_N |
- * +-----+---+-------+-----+-------+
- *
- * +-----------------+-----+-----------------+
- * buf = | PKT_0 | ... | PKT_N |
- * +-----------------+-----+-----------------+
- */
-static void raw_read(struct nand_chip *chip, u8 *buf, u8 *oob)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- u8 *oob_orig = oob;
- const int page_size = mtd->writesize;
- const int ecc_size = chip->ecc.bytes;
- const int pkt_size = chip->ecc.size;
- int pos = 0; /* position within physical page */
- int rem = page_size; /* bytes remaining until BBM area */
-
- if (oob)
- oob += BBM_SIZE;
-
- aux_read(chip, &oob, METADATA_SIZE, &pos);
-
- while (rem > pkt_size) {
- aux_read(chip, &buf, pkt_size, &pos);
- aux_read(chip, &oob, ecc_size, &pos);
- rem = page_size - pos;
- }
-
- aux_read(chip, &buf, rem, &pos);
- aux_read(chip, &oob_orig, BBM_SIZE, &pos);
- aux_read(chip, &buf, pkt_size - rem, &pos);
- aux_read(chip, &oob, ecc_size, &pos);
-}
-
-static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- const u8 *oob_orig = oob;
- const int page_size = mtd->writesize;
- const int ecc_size = chip->ecc.bytes;
- const int pkt_size = chip->ecc.size;
- int pos = 0; /* position within physical page */
- int rem = page_size; /* bytes remaining until BBM area */
-
- if (oob)
- oob += BBM_SIZE;
-
- aux_write(chip, &oob, METADATA_SIZE, &pos);
-
- while (rem > pkt_size) {
- aux_write(chip, &buf, pkt_size, &pos);
- aux_write(chip, &oob, ecc_size, &pos);
- rem = page_size - pos;
- }
-
- aux_write(chip, &buf, rem, &pos);
- aux_write(chip, &oob_orig, BBM_SIZE, &pos);
- aux_write(chip, &buf, pkt_size - rem, &pos);
- aux_write(chip, &oob, ecc_size, &pos);
-}
-
-static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
-{
- tango_select_target(chip, chip->cur_cs);
- nand_read_page_op(chip, page, 0, NULL, 0);
- raw_read(chip, buf, chip->oob_poi);
- return 0;
-}
-
-static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
- int oob_required, int page)
-{
- tango_select_target(chip, chip->cur_cs);
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- raw_write(chip, buf, chip->oob_poi);
- return nand_prog_page_end_op(chip);
-}
-
-static int tango_read_oob(struct nand_chip *chip, int page)
-{
- tango_select_target(chip, chip->cur_cs);
- nand_read_page_op(chip, page, 0, NULL, 0);
- raw_read(chip, NULL, chip->oob_poi);
- return 0;
-}
-
-static int tango_write_oob(struct nand_chip *chip, int page)
-{
- tango_select_target(chip, chip->cur_cs);
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- raw_write(chip, NULL, chip->oob_poi);
- return nand_prog_page_end_op(chip);
-}
-
-static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (idx >= ecc->steps)
- return -ERANGE;
-
- res->offset = BBM_SIZE + METADATA_SIZE + ecc->bytes * idx;
- res->length = ecc->bytes;
-
- return 0;
-}
-
-static int oob_free(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
-{
- return -ERANGE; /* no free space in spare area */
-}
-
-static const struct mtd_ooblayout_ops tango_nand_ooblayout_ops = {
- .ecc = oob_ecc,
- .free = oob_free,
-};
-
-static u32 to_ticks(int kHz, int ps)
-{
- return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
-}
-
-static int tango_set_timings(struct nand_chip *chip, int csline,
- const struct nand_interface_config *conf)
-{
- const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
- struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- struct tango_chip *tchip = to_tango_chip(chip);
- u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
- int kHz = nfc->freq_kHz;
-
- if (IS_ERR(sdr))
- return PTR_ERR(sdr);
-
- if (csline == NAND_DATA_IFACE_CHECK_ONLY)
- return 0;
-
- Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
- Textw = to_ticks(kHz, sdr->tWB_max);
- Twc = to_ticks(kHz, sdr->tWC_min);
- Twpw = to_ticks(kHz, sdr->tWC_min - sdr->tWP_min);
-
- Tacc = to_ticks(kHz, sdr->tREA_max);
- Thold = to_ticks(kHz, sdr->tREH_min);
- Trpw = to_ticks(kHz, sdr->tRC_min - sdr->tREH_min);
- Textr = to_ticks(kHz, sdr->tRHZ_max);
-
- tchip->timing1 = TIMING(Trdy, Textw, Twc, Twpw);
- tchip->timing2 = TIMING(Tacc, Thold, Trpw, Textr);
-
- return 0;
-}
-
-static int tango_attach_chip(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- ecc->algo = NAND_ECC_ALGO_BCH;
- ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
-
- ecc->read_page_raw = tango_read_page_raw;
- ecc->write_page_raw = tango_write_page_raw;
- ecc->read_page = tango_read_page;
- ecc->write_page = tango_write_page;
- ecc->read_oob = tango_read_oob;
- ecc->write_oob = tango_write_oob;
-
- return 0;
-}
-
-static const struct nand_controller_ops tango_controller_ops = {
- .attach_chip = tango_attach_chip,
- .setup_interface = tango_set_timings,
- .exec_op = tango_exec_op,
-};
-
-static int chip_init(struct device *dev, struct device_node *np)
-{
- u32 cs;
- int err, res;
- struct mtd_info *mtd;
- struct nand_chip *chip;
- struct tango_chip *tchip;
- struct nand_ecc_ctrl *ecc;
- struct tango_nfc *nfc = dev_get_drvdata(dev);
-
- tchip = devm_kzalloc(dev, sizeof(*tchip), GFP_KERNEL);
- if (!tchip)
- return -ENOMEM;
-
- res = of_property_count_u32_elems(np, "reg");
- if (res < 0)
- return res;
-
- if (res != 1)
- return -ENOTSUPP; /* Multi-CS chips are not supported */
-
- err = of_property_read_u32_index(np, "reg", 0, &cs);
- if (err)
- return err;
-
- if (cs >= MAX_CS)
- return -EINVAL;
-
- chip = &tchip->nand_chip;
- ecc = &chip->ecc;
- mtd = nand_to_mtd(chip);
-
- chip->options = NAND_USES_DMA |
- NAND_NO_SUBPAGE_WRITE |
- NAND_WAIT_TCCS;
- chip->controller = &nfc->hw;
- tchip->base = nfc->pbus_base + (cs * 256);
-
- nand_set_flash_node(chip, np);
- mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
- mtd->dev.parent = dev;
-
- err = nand_scan(chip, 1);
- if (err)
- return err;
-
- tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE);
- tchip->pkt_0_cfg = PKT_CFG(ecc->size + METADATA_SIZE, ecc->strength);
- tchip->pkt_n_cfg = PKT_CFG(ecc->size, ecc->strength);
- tchip->bb_cfg = BB_CFG(mtd->writesize, BBM_SIZE);
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err) {
- nand_cleanup(chip);
- return err;
- }
-
- nfc->chips[cs] = tchip;
-
- return 0;
-}
-
-static int tango_nand_remove(struct platform_device *pdev)
-{
- struct tango_nfc *nfc = platform_get_drvdata(pdev);
- struct nand_chip *chip;
- int cs, ret;
-
- dma_release_channel(nfc->chan);
-
- for (cs = 0; cs < MAX_CS; ++cs) {
- if (nfc->chips[cs]) {
- chip = &nfc->chips[cs]->nand_chip;
- ret = mtd_device_unregister(nand_to_mtd(chip));
- WARN_ON(ret);
- nand_cleanup(chip);
- }
- }
-
- return 0;
-}
-
-static int tango_nand_probe(struct platform_device *pdev)
-{
- int err;
- struct clk *clk;
- struct resource *res;
- struct tango_nfc *nfc;
- struct device_node *np;
-
- nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
- if (!nfc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nfc->reg_base))
- return PTR_ERR(nfc->reg_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- nfc->mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nfc->mem_base))
- return PTR_ERR(nfc->mem_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- nfc->pbus_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nfc->pbus_base))
- return PTR_ERR(nfc->pbus_base);
-
- writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
- if (IS_ERR(nfc->chan))
- return PTR_ERR(nfc->chan);
-
- platform_set_drvdata(pdev, nfc);
- nand_controller_init(&nfc->hw);
- nfc->hw.ops = &tango_controller_ops;
- nfc->freq_kHz = clk_get_rate(clk) / 1000;
-
- for_each_child_of_node(pdev->dev.of_node, np) {
- err = chip_init(&pdev->dev, np);
- if (err) {
- tango_nand_remove(pdev);
- of_node_put(np);
- return err;
- }
- }
-
- return 0;
-}
-
-static const struct of_device_id tango_nand_ids[] = {
- { .compatible = "sigma,smp8758-nand" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, tango_nand_ids);
-
-static struct platform_driver tango_nand_driver = {
- .probe = tango_nand_probe,
- .remove = tango_nand_remove,
- .driver = {
- .name = "tango-nand",
- .of_match_table = tango_nand_ids,
- },
-};
-
-module_platform_driver(tango_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sigma Designs");
-MODULE_DESCRIPTION("Tango4 NAND Flash controller driver");
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 8ea545bb924d..61d932c1b718 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
- if (req->ooblen)
- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
- req->ooblen);
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
return 0;
}
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index e72354322f62..d90c30229052 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -160,3 +160,11 @@ config MTD_REDBOOT_PARTS_READONLY
'FIS directory' images, enable this option.
endif # MTD_REDBOOT_PARTS
+
+config MTD_QCOMSMEM_PARTS
+ tristate "Qualcomm SMEM NAND flash partition parser"
+ depends on MTD_NAND_QCOM || COMPILE_TEST
+ depends on QCOM_SMEM
+ help
+ This provides support for parsing partitions from Shared Memory (SMEM)
+ for NAND flash on Qualcomm platforms.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index b0c5f62f9e85..50eb0b0a2210 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
index 980e332bdac4..26116694c821 100644
--- a/drivers/mtd/parsers/afs.c
+++ b/drivers/mtd/parsers/afs.c
@@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd,
return i;
out_free_parts:
- while (i >= 0) {
+ while (--i >= 0)
kfree(parts[i].name);
- i--;
- }
kfree(parts);
*pparts = NULL;
return ret;
diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
index d69607b48227..fab0949aabba 100644
--- a/drivers/mtd/parsers/parser_imagetag.c
+++ b/drivers/mtd/parsers/parser_imagetag.c
@@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
pr_err("invalid rootfs address: %*ph\n",
(int)sizeof(buf->flash_image_start),
buf->flash_image_start);
+ ret = -EINVAL;
goto out;
}
@@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
pr_err("invalid kernel address: %*ph\n",
(int)sizeof(buf->kernel_address),
buf->kernel_address);
+ ret = -EINVAL;
goto out;
}
@@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
pr_err("invalid kernel length: %*ph\n",
(int)sizeof(buf->kernel_length),
buf->kernel_length);
+ ret = -EINVAL;
goto out;
}
@@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
pr_err("invalid total length: %*ph\n",
(int)sizeof(buf->total_length),
buf->total_length);
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
new file mode 100644
index 000000000000..808cb33d71f8
--- /dev/null
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm SMEM NAND flash partition parser
+ *
+ * Copyright (C) 2020, Linaro Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+#define SMEM_AARM_PARTITION_TABLE 9
+#define SMEM_APPS 0
+
+#define SMEM_FLASH_PART_MAGIC1 0x55ee73aa
+#define SMEM_FLASH_PART_MAGIC2 0xe35ebddb
+#define SMEM_FLASH_PTABLE_V3 3
+#define SMEM_FLASH_PTABLE_V4 4
+#define SMEM_FLASH_PTABLE_MAX_PARTS_V3 16
+#define SMEM_FLASH_PTABLE_MAX_PARTS_V4 48
+#define SMEM_FLASH_PTABLE_HDR_LEN (4 * sizeof(u32))
+#define SMEM_FLASH_PTABLE_NAME_SIZE 16
+
+/**
+ * struct smem_flash_pentry - SMEM Flash partition entry
+ * @name: Name of the partition
+ * @offset: Offset in blocks
+ * @length: Length of the partition in blocks
+ * @attr: Flags for this partition
+ */
+struct smem_flash_pentry {
+ char name[SMEM_FLASH_PTABLE_NAME_SIZE];
+ __le32 offset;
+ __le32 length;
+ u8 attr;
+} __packed __aligned(4);
+
+/**
+ * struct smem_flash_ptable - SMEM Flash partition table
+ * @magic1: Partition table Magic 1
+ * @magic2: Partition table Magic 2
+ * @version: Partition table version
+ * @numparts: Number of partitions in this ptable
+ * @pentry: Flash partition entries belonging to this ptable
+ */
+struct smem_flash_ptable {
+ __le32 magic1;
+ __le32 magic2;
+ __le32 version;
+ __le32 numparts;
+ struct smem_flash_pentry pentry[SMEM_FLASH_PTABLE_MAX_PARTS_V4];
+} __packed __aligned(4);
+
+static int parse_qcomsmem_part(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct smem_flash_pentry *pentry;
+ struct smem_flash_ptable *ptable;
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ struct mtd_partition *parts;
+ int ret, i, numparts;
+ char *name, *c;
+
+ pr_debug("Parsing partition table info from SMEM\n");
+ ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+ if (IS_ERR(ptable)) {
+ pr_err("Error reading partition table header\n");
+ return PTR_ERR(ptable);
+ }
+
+ /* Verify ptable magic */
+ if (le32_to_cpu(ptable->magic1) != SMEM_FLASH_PART_MAGIC1 ||
+ le32_to_cpu(ptable->magic2) != SMEM_FLASH_PART_MAGIC2) {
+ pr_err("Partition table magic verification failed\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that # of partitions is less than the max we have allocated */
+ numparts = le32_to_cpu(ptable->numparts);
+ if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ pr_err("Partition numbers exceed the max limit\n");
+ return -EINVAL;
+ }
+
+ /* Find out length of partition data based on table version */
+ if (le32_to_cpu(ptable->version) <= SMEM_FLASH_PTABLE_V3) {
+ len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V3 *
+ sizeof(struct smem_flash_pentry);
+ } else if (le32_to_cpu(ptable->version) == SMEM_FLASH_PTABLE_V4) {
+ len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V4 *
+ sizeof(struct smem_flash_pentry);
+ } else {
+ pr_err("Unknown ptable version (%d)", le32_to_cpu(ptable->version));
+ return -EINVAL;
+ }
+
+ /*
+ * Now that the partition table header has been parsed, verified
+ * and the length of the partition table calculated, read the
+ * complete partition table
+ */
+ ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+ if (IS_ERR_OR_NULL(ptable)) {
+ pr_err("Error reading partition table\n");
+ return PTR_ERR(ptable);
+ }
+
+ parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (i = 0; i < numparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] == '\0')
+ continue;
+
+ name = kstrdup(pentry->name, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out_free_parts;
+ }
+
+ /* Convert name to lower case */
+ for (c = name; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ parts[i].name = name;
+ parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[i].mask_flags = pentry->attr;
+ parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+ i, pentry->name, le32_to_cpu(pentry->offset),
+ le32_to_cpu(pentry->length), pentry->attr);
+ }
+
+ pr_debug("SMEM partition table found: ver: %d len: %d\n",
+ le32_to_cpu(ptable->version), numparts);
+ *pparts = parts;
+
+ return numparts;
+
+out_free_parts:
+ while (--i >= 0)
+ kfree(parts[i].name);
+ kfree(parts);
+ *pparts = NULL;
+
+ return ret;
+}
+
+static const struct of_device_id qcomsmem_of_match_table[] = {
+ { .compatible = "qcom,smem-part" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
+
+static struct mtd_part_parser mtd_parser_qcomsmem = {
+ .parse_fn = parse_qcomsmem_part,
+ .name = "qcomsmem",
+ .of_match_table = qcomsmem_of_match_table,
+};
+module_mtd_part_parser(mtd_parser_qcomsmem);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm SMEM NAND flash partition parser");
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 7c26f8f565cb..47fbf1d1e557 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -399,8 +399,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
for_each_available_child_of_node(dev->of_node, np) {
ret = hisi_spi_nor_register(np, host);
- if (ret)
+ if (ret) {
+ of_node_put(np);
goto fail;
+ }
if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 555fe55d14ae..825610a2e9dc 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
@@ -73,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 20df44b753da..0522304f52fa 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -465,7 +465,7 @@ static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
{
int ret;
@@ -854,6 +854,43 @@ int spi_nor_wait_till_ready(struct spi_nor *nor)
}
/**
+ * spi_nor_global_block_unlock() - Unlock Global Block Protection.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_global_block_unlock(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
+ NULL, 0);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
* spi_nor_write_sr() - Write the Status Register.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to DMA-able buffer to write to the Status Register.
@@ -1364,14 +1401,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
erase = &map->erase_type[i];
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION &&
+ region->size <= len)
+ return erase;
+
/* Don't erase more than what the user has asked for. */
if (erase->size > len)
continue;
- /* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION)
- return erase;
-
spi_nor_div_by_erase_size(erase, addr, &rem);
if (rem)
continue;
@@ -1515,6 +1553,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
goto destroy_erase_cmd_list;
if (prev_erase != erase ||
+ erase->size != cmd->size ||
region->offset & SNOR_OVERLAID_REGION) {
cmd = spi_nor_init_erase_cmd(region, erase);
if (IS_ERR(cmd)) {
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index d631ee299de3..4a3f7f150b5d 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -434,12 +434,14 @@ int spi_nor_write_disable(struct spi_nor *nor);
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_global_block_unlock(struct spi_nor *nor);
int spi_nor_lock_and_prep(struct spi_nor *nor);
void spi_nor_unlock_and_unprep(struct spi_nor *nor);
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
int spi_nor_read_sr(struct spi_nor *nor, u8 *sr);
+int spi_nor_read_cr(struct spi_nor *nor, u8 *cr);
int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len);
int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1);
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index 6ee7719e5903..25142ec4737b 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -788,7 +788,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
int i;
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (!(erase_type & BIT(i)))
+ if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
continue;
if (region->size & erase[i].size_mask) {
spi_nor_region_mark_overlay(region);
@@ -858,6 +858,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size;
}
+ spi_nor_region_mark_end(&region[i - 1]);
save_uniform_erase_type = map->uniform_erase_type;
map->uniform_erase_type = spi_nor_sort_erase_mask(map,
@@ -881,8 +882,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
if (!(regions_erase_type & BIT(erase[i].idx)))
spi_nor_set_erase_type(&erase[i], 0, 0xFF);
- spi_nor_region_mark_end(&region[i - 1]);
-
return 0;
}
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 00e48da0744a..980f4c09c91d 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -8,6 +8,53 @@
#include "core.h"
+#define SST26VF_CR_BPNV BIT(3)
+
+static int sst26vf_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static int sst26vf_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ /* We only support unlocking the entire flash array. */
+ if (ofs != 0 || len != nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (!(nor->bouncebuf[0] & SST26VF_CR_BPNV)) {
+ dev_dbg(nor->dev, "Any block has been permanently locked\n");
+ return -EINVAL;
+ }
+
+ return spi_nor_global_block_unlock(nor);
+}
+
+static int sst26vf_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct spi_nor_locking_ops sst26vf_locking_ops = {
+ .lock = sst26vf_lock,
+ .unlock = sst26vf_unlock,
+ .is_locked = sst26vf_is_locked,
+};
+
+static void sst26vf_default_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &sst26vf_locking_ops;
+}
+
+static const struct spi_nor_fixups sst26vf_fixups = {
+ .default_init = sst26vf_default_init,
+};
+
static const struct flash_info sst_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
@@ -39,8 +86,9 @@ static const struct flash_info sst_parts[] = {
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &sst26vf_fixups },
};
static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 892494c8cb7c..ccc5979642b7 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -142,7 +142,6 @@ struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
return tbl;
err:
- kfree(tbl->entries);
kfree(tbl);
return ERR_PTR(err);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 2f3312c31e51..8a7306cc1947 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -913,12 +913,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
ubi_err(ubi, "bad data_size");
goto bad;
}
- } else if (lnum == used_ebs - 1) {
- if (data_size == 0) {
- ubi_err(ubi, "bad data_size at last LEB");
- goto bad;
- }
- } else {
+ } else if (lnum > used_ebs - 1) {
ubi_err(ubi, "too high lnum");
goto bad;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 260f9f46668b..b09bed554f26 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -42,6 +42,7 @@ config BONDING
tristate "Bonding driver support"
depends on INET
depends on IPV6 || IPV6=n
+ depends on TLS || TLS_DEVICE=n
help
Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
Channels together. This is called 'Etherchannel' by Cisco,
@@ -87,9 +88,10 @@ config WIREGUARD
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
select ARM_CRYPTO if ARM
select ARM64_CRYPTO if ARM64
- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
+ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
+ select CRYPTO_BLAKE2S_ARM if ARM
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 36e2e41ed2aa..f4990ff32fa4 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
-obj-$(CONFIG_MHI_NET) += mhi_net.o
+obj-$(CONFIG_MHI_NET) += mhi/
#
# Networking Drivers
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 98df38fe553c..12d085405bd0 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -332,7 +332,7 @@ static int __init arc_rimi_init(void)
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void)
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 22a49c6d7ae6..5d4a4c7efbbf 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -298,6 +298,10 @@ struct arcnet_local {
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
@@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index e04efc0a5c97..1bad1866ae46 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -387,15 +387,49 @@ static void arcnet_timer(struct timer_list *t)
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
}
-static void arcnet_reply_tasklet(unsigned long data)
+static void reset_device_work(struct work_struct *work)
{
- struct arcnet_local *lp = (struct arcnet_local *)data;
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
+}
+
+static void arcnet_reply_tasklet(struct tasklet_struct *t)
+{
+ struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet);
struct sk_buff *ackskb, *skb;
struct sock_exterr_skb *serr;
@@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name)
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
@@ -483,8 +530,7 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
- tasklet_init(&lp->reply_tasklet, arcnet_reply_tasklet,
- (unsigned long)lp);
+ tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet);
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
@@ -587,6 +633,10 @@ int arcnet_close(struct net_device *dev)
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
@@ -820,6 +870,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
@@ -852,11 +905,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
@@ -1052,6 +1108,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index f983c4ce6b07..be618e4b9ed5 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -169,7 +169,7 @@ static int __init com20020_init(void)
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -182,7 +182,7 @@ static void __exit com20020_exit(void)
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index eb7f76753c9c..8bdc44b7e09a 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -291,7 +291,7 @@ static void com20020pci_remove(struct pci_dev *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ffcf358..b88a109b3b15 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -67,7 +67,7 @@ static void regdump(struct net_device *dev)
/* set up the address register */
count = 0;
arcnet_outb((count >> 8) | RDDATAflag | AUTOINCflag,
- ioaddr, com20020_REG_W_ADDR_HI);
+ ioaddr, COM20020_REG_W_ADDR_HI);
arcnet_outb(count & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
for (count = 0; count < 256 + 32; count++) {
@@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link)
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index cf214b730671..3856b447d38e 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -396,7 +396,7 @@ static int __init com90io_init(void)
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
@@ -419,7 +419,7 @@ static void __exit com90io_exit(void)
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 3dc3d533cb19..d8dfb9ea0de8 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -554,7 +554,7 @@ err_free_irq:
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -672,7 +672,7 @@ static void __exit com90xx_exit(void)
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..7511bca9c15e 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -240,12 +240,6 @@ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
- /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
- * socket type is v6 an explicit call to udp_encap_enable is needed.
- */
- if (sock->sk->sk_family == AF_INET6)
- udp_encap_enable();
-
rcu_assign_pointer(bareudp->sock, sock);
return 0;
}
@@ -380,7 +374,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -532,10 +526,12 @@ static void bareudp_setup(struct net_device *dev)
dev->netdev_ops = &bareudp_netdev_ops;
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -644,6 +640,14 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -661,17 +665,13 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
-
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, NULL);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
@@ -722,7 +722,6 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
- LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -746,8 +745,7 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
return dev;
err:
- bareudp_dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
+ bareudp_dellink(dev, NULL);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bareudp_dev_create);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index aa001b16765a..6908822d9773 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -73,6 +73,8 @@ enum ad_link_speed_type {
AD_LINK_SPEED_50000MBPS,
AD_LINK_SPEED_56000MBPS,
AD_LINK_SPEED_100000MBPS,
+ AD_LINK_SPEED_200000MBPS,
+ AD_LINK_SPEED_400000MBPS,
};
/* compare MAC addresses */
@@ -245,6 +247,8 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_50000MBPS
* %AD_LINK_SPEED_56000MBPS
* %AD_LINK_SPEED_100000MBPS
+ * %AD_LINK_SPEED_200000MBPS
+ * %AD_LINK_SPEED_400000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -312,13 +316,21 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_100000MBPS;
break;
+ case SPEED_200000:
+ speed = AD_LINK_SPEED_200000MBPS;
+ break;
+
+ case SPEED_400000:
+ speed = AD_LINK_SPEED_400000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
- pr_warn_once("%s: (slave %s): unknown ethtool speed (%d) for port %d (set it to 0)\n",
- slave->bond->dev->name,
- slave->dev->name, slave->speed,
- port->actor_port_number);
+ pr_err_once("%s: (slave %s): unknown ethtool speed (%d) for port %d (set it to 0)\n",
+ slave->bond->dev->name,
+ slave->dev->name, slave->speed,
+ port->actor_port_number);
speed = 0;
break;
}
@@ -733,6 +745,12 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_100000MBPS:
bandwidth = nports * 100000;
break;
+ case AD_LINK_SPEED_200000MBPS:
+ bandwidth = nports * 200000;
+ break;
+ case AD_LINK_SPEED_400000MBPS:
+ bandwidth = nports * 400000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5fe5232cc3f3..74cbbb22470b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -83,6 +83,9 @@
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "bonding_priv.h"
@@ -164,7 +167,7 @@ module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
- "4 for encap layer 3+4");
+ "4 for encap layer 3+4, 5 for vlan+srcmac");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -301,6 +304,19 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
+bool bond_sk_check(struct bonding *bond)
+{
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
/*---------------------------------- VLAN -----------------------------------*/
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -1212,6 +1228,13 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ features |= BOND_TLS_FEATURES;
+ else
+ features &= ~BOND_TLS_FEATURES;
+#endif
+
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
@@ -1434,6 +1457,8 @@ static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
+ case BOND_XMIT_POLICY_VLAN_SRCMAC:
+ return NETDEV_LAG_HASH_VLAN_SRCMAC;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
@@ -1922,6 +1947,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_unregister;
}
+ bond_lower_state_changed(new_slave);
+
res = bond_sysfs_slave_add(new_slave);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
@@ -3494,6 +3521,27 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
return true;
}
+static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u32 srcmac_vendor = 0, srcmac_dev = 0;
+ u16 vlan;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
+
+ for (i = 3; i < ETH_ALEN; i++)
+ srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
+
+ if (!skb_vlan_tag_present(skb))
+ return srcmac_vendor ^ srcmac_dev;
+
+ vlan = skb_vlan_tag_get(skb);
+
+ return vlan ^ srcmac_vendor ^ srcmac_dev;
+}
+
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
struct flow_keys *fk)
@@ -3501,10 +3549,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
int noff, proto = -1;
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ switch (bond->params.xmit_policy) {
+ case BOND_XMIT_POLICY_ENCAP23:
+ case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
fk, NULL, 0, 0, 0, 0);
+ default:
+ break;
}
fk->ports.ports = 0;
@@ -3539,6 +3591,16 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
return true;
}
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+{
+ hash ^= (__force u32)flow_get_u32_dst(flow) ^
+ (__force u32)flow_get_u32_src(flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+ /* discard lowest hash bit to deal with the common even ports pattern */
+ return hash >> 1;
+}
+
/**
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
@@ -3556,6 +3618,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return bond_vlan_srcmac_hash(skb);
+
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
return bond_eth_hash(skb);
@@ -3569,12 +3634,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
else
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- hash ^= (__force u32)flow_get_u32_dst(&flow) ^
- (__force u32)flow_get_u32_src(&flow);
- hash ^= (hash >> 16);
- hash ^= (hash >> 8);
- return hash >> 1;
+ return bond_ip_hash(hash, &flow);
}
/*-------------------------- Device entry points ----------------------------*/
@@ -4547,6 +4608,95 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
return NULL;
}
+static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
+{
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
+ flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
+ break;
+ }
+ fallthrough;
+#endif
+ default: /* AF_INET */
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
+ flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
+ break;
+ }
+
+ flow->ports.src = inet_sk(sk)->inet_sport;
+ flow->ports.dst = inet_sk(sk)->inet_dport;
+}
+
+/**
+ * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
+ * @sk: socket to use for headers
+ *
+ * This function will extract the necessary field from the socket and use
+ * them to generate a hash based on the LAYER34 xmit_policy.
+ * Assumes that sk is a TCP or UDP socket.
+ */
+static u32 bond_sk_hash_l34(struct sock *sk)
+{
+ struct flow_keys flow;
+ u32 hash;
+
+ bond_sk_to_flow(sk, &flow);
+
+ /* L4 */
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ /* L3 */
+ return bond_ip_hash(hash, &flow);
+}
+
+static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
+ struct sock *sk)
+{
+ struct bond_up_slave *slaves;
+ struct slave *slave;
+ unsigned int count;
+ u32 hash;
+
+ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ hash = bond_sk_hash_l34(sk);
+ slave = slaves->arr[hash % count];
+
+ return slave->dev;
+}
+
+static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
+ struct sock *sk)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *lower = NULL;
+
+ rcu_read_lock();
+ if (bond_sk_check(bond))
+ lower = __bond_sk_get_lower_dev(bond, sk);
+ rcu_read_unlock();
+
+ return lower;
+}
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ return bond_tx_drop(dev, skb);
+}
+#endif
+
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@@ -4555,6 +4705,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
+ return bond_tls_device_xmit(bond, skb, dev);
+#endif
+
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
@@ -4683,6 +4838,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
+ .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
};
static const struct device_type bond_type = {
@@ -4754,6 +4910,10 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ bond_dev->features |= BOND_TLS_FEATURES;
+#endif
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a4e4e15f574d..77d7c38bd435 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -96,12 +96,13 @@ static const struct bond_opt_value bond_pps_tbl[] = {
};
static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
- { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
- { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
- { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
- { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
- { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
- { NULL, -1, 0},
+ { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+ { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+ { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+ { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+ { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+ { "vlan+srcmac", BOND_XMIT_POLICY_VLAN_SRCMAC, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_arp_validate_tbl[] = {
@@ -745,17 +746,30 @@ const struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
-static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+static bool bond_set_xfrm_features(struct bonding *bond)
{
if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
- return;
+ return false;
+
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+ return true;
+}
+
+static bool bond_set_tls_features(struct bonding *bond)
+{
+ if (!IS_ENABLED(CONFIG_TLS_DEVICE))
+ return false;
- if (mode == BOND_MODE_ACTIVEBACKUP)
- bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+ if (bond_sk_check(bond))
+ bond->dev->wanted_features |= BOND_TLS_FEATURES;
else
- bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+ bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
- netdev_update_features(bond_dev);
+ return true;
}
static int bond_option_mode_set(struct bonding *bond,
@@ -780,13 +794,20 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- bond_set_xfrm_features(bond->dev, newval->value);
-
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED) {
+ bool update = false;
+
+ update |= bond_set_xfrm_features(bond);
+ update |= bond_set_tls_features(bond);
+
+ if (update)
+ netdev_update_features(bond->dev);
+ }
+
return 0;
}
@@ -1219,6 +1240,10 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED)
+ if (bond_set_tls_features(bond))
+ netdev_update_features(bond->dev);
+
return 0;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index bcc14c5875bf..8215cd77301f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -89,8 +89,7 @@ static inline void update_tty_status(struct ser_device *ser)
ser->tty_status =
ser->tty->stopped << 5 |
ser->tty->flow_stopped << 3 |
- ser->tty->packet << 2 |
- ser->tty->port->low_latency << 1;
+ ser->tty->packet << 2;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 47a6d62b7511..106f089eb2a8 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -598,9 +598,9 @@ err:
return NETDEV_TX_OK;
}
-static void cfv_tx_release_tasklet(unsigned long drv)
+static void cfv_tx_release_tasklet(struct tasklet_struct *t)
{
- struct cfv_info *cfv = (struct cfv_info *)drv;
+ struct cfv_info *cfv = from_tasklet(cfv, t, tx_release_tasklet);
cfv_release_used_buf(cfv->vq_tx);
}
@@ -716,9 +716,7 @@ static int cfv_probe(struct virtio_device *vdev)
cfv->ctx.head = USHRT_MAX;
netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
- tasklet_init(&cfv->tx_release_tasklet,
- cfv_tx_release_tasklet,
- (unsigned long)cfv);
+ tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet);
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 22164300122d..a2b4463d8480 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += dev.o
-can-dev-y += rx-offload.o
-
-can-dev-$(CONFIG_CAN_LEDS) += led.o
-
+obj-y += dev/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 5284f0ab3b06..9ad9b39f480e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -484,7 +484,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->len;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
+ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
/*
* we have to stop the queue and deliver all messages in case
@@ -856,7 +856,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
+ can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
dev->stats.tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 63f48b016ecd..ef474bae47a1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -476,7 +476,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
- can_put_echo_skb(skb, dev, idx);
+ can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
atomic_add((1 << idx), &priv->tx_active);
@@ -733,7 +733,7 @@ static void c_can_do_tx(struct net_device *dev)
pend &= ~(1 << idx);
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
c_can_inval_tx_object(dev, IF_RX, obj);
- can_get_echo_skb(dev, idx);
+ can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
pkts++;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 8d9f332c35e0..f8a130f594e2 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -702,8 +702,8 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
stats->tx_bytes += cf->len;
stats->tx_packets++;
- can_put_echo_skb(priv->tx_skb, dev, 0);
- can_get_echo_skb(dev, 0);
+ can_put_echo_skb(priv->tx_skb, dev, 0, 0);
+ can_get_echo_skb(dev, 0, NULL);
priv->tx_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
deleted file mode 100644
index 3486704c8a95..000000000000
--- a/drivers/net/can/dev.c
+++ /dev/null
@@ -1,1338 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/can-ml.h>
-#include <linux/can/dev.h>
-#include <linux/can/skb.h>
-#include <linux/can/netlink.h>
-#include <linux/can/led.h>
-#include <linux/of.h>
-#include <net/rtnetlink.h>
-
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
-/* CAN DLC to real data length conversion helpers */
-
-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
- 8, 12, 16, 20, 24, 32, 48, 64};
-
-/* get data length from raw data length code (DLC) */
-u8 can_fd_dlc2len(u8 dlc)
-{
- return dlc2len[dlc & 0x0F];
-}
-EXPORT_SYMBOL_GPL(can_fd_dlc2len);
-
-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
- 9, 9, 9, 9, /* 9 - 12 */
- 10, 10, 10, 10, /* 13 - 16 */
- 11, 11, 11, 11, /* 17 - 20 */
- 12, 12, 12, 12, /* 21 - 24 */
- 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
- 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
- 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_fd_len2dlc(u8 len)
-{
- if (unlikely(len > 64))
- return 0xF;
-
- return len2dlc[len];
-}
-EXPORT_SYMBOL_GPL(can_fd_len2dlc);
-
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800000)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500000)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- netdev_err(dev, "bit-timing calculation not available\n");
- return -EINVAL;
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
-/* Checks the validity of the specified bit-timing parameters prop_seg,
- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
- * prescaler value brp. You can find more information in the header
- * file linux/can/netlink.h.
- */
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
- u64 brp64;
-
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
-
- brp64 = (u64)priv->clock.freq * (u64)bt->tq;
- if (btc->brp_inc > 1)
- do_div(brp64, btc->brp_inc);
- brp64 += 500000000UL - 1;
- do_div(brp64, 1000000000UL); /* the practicable BRP */
- if (btc->brp_inc > 1)
- brp64 *= btc->brp_inc;
- bt->brp = (u32)brp64;
-
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
- return -EINVAL;
-
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
-
- return 0;
-}
-
-/* Checks the validity of predefined bitrate settings */
-static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- for (i = 0; i < bitrate_const_cnt; i++) {
- if (bt->bitrate == bitrate_const[i])
- break;
- }
-
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
-}
-
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- int err;
-
- /* Depending on the given can_bittiming parameter structure the CAN
- * timing parameters are calculated based on the provided bitrate OR
- * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
- * provided directly which are then checked and fixed up.
- */
- if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static void can_update_state_error_stats(struct net_device *dev,
- enum can_state new_state)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (new_state <= priv->state)
- return;
-
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- priv->can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- priv->can_stats.error_passive++;
- break;
- case CAN_STATE_BUS_OFF:
- priv->can_stats.bus_off++;
- break;
- default:
- break;
- }
-}
-
-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_TX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_TX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_RX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_RX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static const char *can_get_state_str(const enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return "Error Active";
- case CAN_STATE_ERROR_WARNING:
- return "Error Warning";
- case CAN_STATE_ERROR_PASSIVE:
- return "Error Passive";
- case CAN_STATE_BUS_OFF:
- return "Bus Off";
- case CAN_STATE_STOPPED:
- return "Stopped";
- case CAN_STATE_SLEEPING:
- return "Sleeping";
- default:
- return "<unknown>";
- }
-
- return "<unknown>";
-}
-
-void can_change_state(struct net_device *dev, struct can_frame *cf,
- enum can_state tx_state, enum can_state rx_state)
-{
- struct can_priv *priv = netdev_priv(dev);
- enum can_state new_state = max(tx_state, rx_state);
-
- if (unlikely(new_state == priv->state)) {
- netdev_warn(dev, "%s: oops, state did not change", __func__);
- return;
- }
-
- netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
- can_get_state_str(priv->state), priv->state,
- can_get_state_str(new_state), new_state);
-
- can_update_state_error_stats(dev, new_state);
- priv->state = new_state;
-
- if (!cf)
- return;
-
- if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_BUSOFF;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= tx_state >= rx_state ?
- can_tx_state_to_frame(dev, tx_state) : 0;
- cf->data[1] |= tx_state <= rx_state ?
- can_rx_state_to_frame(dev, rx_state) : 0;
-}
-EXPORT_SYMBOL_GPL(can_change_state);
-
-/* Local echo of CAN messages
- *
- * CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.rst). To test the handling of CAN
- * interfaces that do not support the local echo both driver types are
- * implemented. In the case that the driver does not support the echo
- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
- * to perform the echo as a fallback solution.
- */
-static void can_flush_echo_skb(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- int i;
-
- for (i = 0; i < priv->echo_skb_max; i++) {
- if (priv->echo_skb[i]) {
- kfree_skb(priv->echo_skb[i]);
- priv->echo_skb[i] = NULL;
- stats->tx_dropped++;
- stats->tx_aborted_errors++;
- }
- }
-}
-
-/* Put the skb on the stack to be looped backed locally lateron
- *
- * The function is typically called in the start_xmit function
- * of the device driver. The driver must protect access to
- * priv->echo_skb, if necessary.
- */
-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- /* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
- (skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
- kfree_skb(skb);
- return 0;
- }
-
- if (!priv->echo_skb[idx]) {
- skb = can_create_echo_skb(skb);
- if (!skb)
- return -ENOMEM;
-
- /* make settings for echo to reduce code in irq context */
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->dev = dev;
-
- /* save this skb for tx interrupt echo handling */
- priv->echo_skb[idx] = skb;
- } else {
- /* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
- kfree_skb(skb);
- return -EBUSY;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_put_echo_skb);
-
-struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (idx >= priv->echo_skb_max) {
- netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
- __func__, idx, priv->echo_skb_max);
- return NULL;
- }
-
- if (priv->echo_skb[idx]) {
- /* Using "struct canfd_frame::len" for the frame
- * length is supported on both CAN and CANFD frames.
- */
- struct sk_buff *skb = priv->echo_skb[idx];
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
-
- /* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
-
- priv->echo_skb[idx] = NULL;
-
- return skb;
- }
-
- return NULL;
-}
-
-/* Get the skb from the stack and loop it back locally
- *
- * The function is typically called when the TX done interrupt
- * is handled in the device driver. The driver must protect
- * access to priv->echo_skb, if necessary.
- */
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct sk_buff *skb;
- u8 len;
-
- skb = __can_get_echo_skb(dev, idx, &len);
- if (!skb)
- return 0;
-
- skb_get(skb);
- if (netif_rx(skb) == NET_RX_SUCCESS)
- dev_consume_skb_any(skb);
- else
- dev_kfree_skb_any(skb);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(can_get_echo_skb);
-
-/* Remove the skb from the stack and free it.
- *
- * The function is typically called when TX failed.
- */
-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- dev_kfree_skb_any(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(can_free_echo_skb);
-
-/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- struct can_frame *cf;
- int err;
-
- BUG_ON(netif_carrier_ok(dev));
-
- /* No synchronization needed because the device is bus-off and
- * no messages can come in or go out.
- */
- can_flush_echo_skb(dev);
-
- /* send restart message upstream */
- skb = alloc_can_err_skb(dev, &cf);
- if (!skb)
- goto restart;
-
- cf->can_id |= CAN_ERR_RESTARTED;
-
- netif_rx_ni(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
-restart:
- netdev_dbg(dev, "restarted\n");
- priv->can_stats.restarts++;
-
- /* Now restart the device */
- err = priv->do_set_mode(dev, CAN_MODE_START);
-
- netif_carrier_on(dev);
- if (err)
- netdev_err(dev, "Error %d during restart", err);
-}
-
-static void can_restart_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct can_priv *priv = container_of(dwork, struct can_priv,
- restart_work);
-
- can_restart(priv->dev);
-}
-
-int can_restart_now(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* A manual restart is only permitted if automatic restart is
- * disabled and the device is in the bus-off state
- */
- if (priv->restart_ms)
- return -EINVAL;
- if (priv->state != CAN_STATE_BUS_OFF)
- return -EBUSY;
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
-
- return 0;
-}
-
-/* CAN bus-off
- *
- * This functions should be called when the device goes bus-off to
- * tell the netif layer that no more packets can be sent or received.
- * If enabled, a timer is started to trigger bus-off recovery.
- */
-void can_bus_off(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (priv->restart_ms)
- netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
- priv->restart_ms);
- else
- netdev_info(dev, "bus-off\n");
-
- netif_carrier_off(dev);
-
- if (priv->restart_ms)
- schedule_delayed_work(&priv->restart_work,
- msecs_to_jiffies(priv->restart_ms));
-}
-EXPORT_SYMBOL_GPL(can_bus_off);
-
-static void can_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct can_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cf = skb_put_zero(skb, sizeof(struct can_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_skb);
-
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct canfd_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
-
-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, cf);
- if (unlikely(!skb))
- return NULL;
-
- (*cf)->can_id = CAN_ERR_FLAG;
- (*cf)->len = CAN_ERR_DLC;
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
-
-/* Allocate and setup space for the CAN network device */
-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
- unsigned int txqs, unsigned int rxqs)
-{
- struct net_device *dev;
- struct can_priv *priv;
- int size;
-
- /* We put the driver's priv, the CAN mid layer priv and the
- * echo skb into the netdevice's priv. The memory layout for
- * the netdev_priv is like this:
- *
- * +-------------------------+
- * | driver's priv |
- * +-------------------------+
- * | struct can_ml_priv |
- * +-------------------------+
- * | array of struct sk_buff |
- * +-------------------------+
- */
-
- size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
-
- if (echo_skb_max)
- size = ALIGN(size, sizeof(struct sk_buff *)) +
- echo_skb_max * sizeof(struct sk_buff *);
-
- dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
- txqs, rxqs);
- if (!dev)
- return NULL;
-
- priv = netdev_priv(dev);
- priv->dev = dev;
-
- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
-
- if (echo_skb_max) {
- priv->echo_skb_max = echo_skb_max;
- priv->echo_skb = (void *)priv +
- (size - echo_skb_max * sizeof(struct sk_buff *));
- }
-
- priv->state = CAN_STATE_STOPPED;
-
- INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
-
-/* Free space of the CAN network device */
-void free_candev(struct net_device *dev)
-{
- free_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(free_candev);
-
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
-
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
-
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
-
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_change_mtu);
-
-/* Common open function when the device gets opened.
- *
- * This function should be called in the open function of the device
- * driver.
- */
-int open_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (!priv->bittiming.bitrate) {
- netdev_err(dev, "bit-timing not yet defined\n");
- return -EINVAL;
- }
-
- /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
- if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
- netdev_err(dev, "incorrect/missing data bit-timing\n");
- return -EINVAL;
- }
-
- /* Switch carrier on if device was stopped while in bus-off state */
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(open_candev);
-
-#ifdef CONFIG_OF
-/* Common function that can be used to understand the limitation of
- * a transceiver when it provides no means to determine these limitations
- * at runtime.
- */
-void of_can_transceiver(struct net_device *dev)
-{
- struct device_node *dn;
- struct can_priv *priv = netdev_priv(dev);
- struct device_node *np = dev->dev.parent->of_node;
- int ret;
-
- dn = of_get_child_by_name(np, "can-transceiver");
- if (!dn)
- return;
-
- ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
- of_node_put(dn);
- if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
- netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
-}
-EXPORT_SYMBOL_GPL(of_can_transceiver);
-#endif
-
-/* Common close function for cleanup before the device gets closed.
- *
- * This function should be called in the close function of the device
- * driver.
- */
-void close_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_flush_echo_skb(dev);
-}
-EXPORT_SYMBOL_GPL(close_candev);
-
-/* CAN netlink interface */
-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- [IFLA_CAN_STATE] = { .type = NLA_U32 },
- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
- [IFLA_CAN_DATA_BITTIMING]
- = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
-};
-
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- bool is_can_fd = false;
-
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- */
-
- if (!data)
- return 0;
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
- }
-
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct can_priv *priv = netdev_priv(dev);
- int err;
-
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
-
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
-
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
- else
- dev->mtu = CAN_MTU;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
- /* Do not allow changing restart delay while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- }
-
- if (data[IFLA_CAN_RESTART]) {
- /* Do not allow a restart while not running */
- if (!(dev->flags & IFF_UP))
- return -EINVAL;
- err = can_restart_now(dev);
- if (err)
- return err;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_TERMINATION]) {
- const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
- const unsigned int num_term = priv->termination_const_cnt;
- unsigned int i;
-
- if (!priv->do_set_termination)
- return -EOPNOTSUPP;
-
- /* check whether given value is supported by the interface */
- for (i = 0; i < num_term; i++) {
- if (termval == priv->termination_const[i])
- break;
- }
- if (i >= num_term)
- return -EINVAL;
-
- /* Finally, set the termination value */
- err = priv->do_set_termination(dev, termval);
- if (err)
- return err;
-
- priv->termination = termval;
- }
-
- return 0;
-}
-
-static size_t can_get_size(const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- size_t size = 0;
-
- if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- if (priv->termination_const) {
- size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
- size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
- priv->termination_const_cnt);
- }
- if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
- size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
-
- return size;
-}
-
-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
- enum can_state state = priv->state;
-
- if (priv->do_get_state)
- priv->do_get_state(dev, &state);
-
- if ((priv->bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
-
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
-
- nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-
- (priv->do_get_berr_counter &&
- !priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
-
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
-
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
-
- (priv->termination_const &&
- (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
- nla_put(skb, IFLA_CAN_TERMINATION_CONST,
- sizeof(*priv->termination_const) *
- priv->termination_const_cnt,
- priv->termination_const))) ||
-
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
-
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
-
- (nla_put(skb, IFLA_CAN_BITRATE_MAX,
- sizeof(priv->bitrate_max),
- &priv->bitrate_max))
- )
-
- return -EMSGSIZE;
-
- return 0;
-}
-
-static size_t can_get_xstats_size(const struct net_device *dev)
-{
- return sizeof(struct can_device_stats);
-}
-
-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (nla_put(skb, IFLA_INFO_XSTATS,
- sizeof(priv->can_stats), &priv->can_stats))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
-static void can_dellink(struct net_device *dev, struct list_head *head)
-{
-}
-
-static struct rtnl_link_ops can_link_ops __read_mostly = {
- .kind = "can",
- .maxtype = IFLA_CAN_MAX,
- .policy = can_policy,
- .setup = can_setup,
- .validate = can_validate,
- .newlink = can_newlink,
- .changelink = can_changelink,
- .dellink = can_dellink,
- .get_size = can_get_size,
- .fill_info = can_fill_info,
- .get_xstats_size = can_get_xstats_size,
- .fill_xstats = can_fill_xstats,
-};
-
-/* Register the CAN network device */
-int register_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Ensure termination_const, termination_const_cnt and
- * do_set_termination consistency. All must be either set or
- * unset.
- */
- if ((!priv->termination_const != !priv->termination_const_cnt) ||
- (!priv->termination_const != !priv->do_set_termination))
- return -EINVAL;
-
- if (!priv->bitrate_const != !priv->bitrate_const_cnt)
- return -EINVAL;
-
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
- return -EINVAL;
-
- dev->rtnl_link_ops = &can_link_ops;
- netif_carrier_off(dev);
-
- return register_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(register_candev);
-
-/* Unregister the CAN network device */
-void unregister_candev(struct net_device *dev)
-{
- unregister_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(unregister_candev);
-
-/* Test if a network device is a candev based device
- * and return the can_priv* if so.
- */
-struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
- return NULL;
-
- return netdev_priv(dev);
-}
-EXPORT_SYMBOL_GPL(safe_candev_priv);
-
-static __init int can_dev_init(void)
-{
- int err;
-
- can_led_notifier_init();
-
- err = rtnl_link_register(&can_link_ops);
- if (!err)
- pr_info(MOD_DESC "\n");
-
- return err;
-}
-module_init(can_dev_init);
-
-static __exit void can_dev_exit(void)
-{
- rtnl_link_unregister(&can_link_ops);
-
- can_led_notifier_exit();
-}
-module_exit(can_dev_exit);
-
-MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
new file mode 100644
index 000000000000..3e2e207861fc
--- /dev/null
+++ b/drivers/net/can/dev/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+can-dev-y += bittiming.o
+can-dev-y += dev.o
+can-dev-y += length.o
+can-dev-y += netlink.o
+can-dev-y += rx-offload.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
new file mode 100644
index 000000000000..f7fe226bb395
--- /dev/null
+++ b/drivers/net/can/dev/bittiming.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800000)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500000)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+/* Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int tseg1, alltseg;
+ u64 brp64;
+
+ tseg1 = bt->prop_seg + bt->phase_seg1;
+ if (!bt->sjw)
+ bt->sjw = 1;
+ if (bt->sjw > btc->sjw_max ||
+ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+ return -ERANGE;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ return -EINVAL;
+
+ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+
+ return 0;
+}
+
+/* Checks the validity of predefined bitrate settings */
+static int
+can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ int err;
+
+ /* Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate && btc)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate && btc)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
+ else
+ err = -EINVAL;
+
+ return err;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
new file mode 100644
index 000000000000..311d8564d611
--- /dev/null
+++ b/drivers/net/can/dev/dev.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/can-ml.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/can/led.h>
+#include <linux/of.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
+ default:
+ break;
+ }
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+EXPORT_SYMBOL_GPL(can_get_state_str);
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (!cf)
+ return;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
+/* CAN device restart for bus-off recovery */
+static void can_restart(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ BUG_ON(netif_carrier_ok(dev));
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ goto restart;
+
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+
+ netif_rx_ni(skb);
+
+restart:
+ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+
+ netif_carrier_on(dev);
+ if (err)
+ netdev_err(dev, "Error %d during restart", err);
+}
+
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv,
+ restart_work);
+
+ can_restart(priv->dev);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
+
+ return 0;
+}
+
+/* CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+
+ if (priv->restart_ms)
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_HW_CSUM;
+}
+
+/* Allocate and setup space for the CAN network device */
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct can_ml_priv *can_ml;
+ struct net_device *dev;
+ struct can_priv *priv;
+ int size;
+
+ /* We put the driver's priv, the CAN mid layer priv and the
+ * echo skb into the netdevice's priv. The memory layout for
+ * the netdev_priv is like this:
+ *
+ * +-------------------------+
+ * | driver's priv |
+ * +-------------------------+
+ * | struct can_ml_priv |
+ * +-------------------------+
+ * | array of struct sk_buff |
+ * +-------------------------+
+ */
+
+ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+
+ if (echo_skb_max)
+ size = ALIGN(size, sizeof(struct sk_buff *)) +
+ echo_skb_max * sizeof(struct sk_buff *);
+
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
+
+ if (echo_skb_max) {
+ priv->echo_skb_max = echo_skb_max;
+ priv->echo_skb = (void *)priv +
+ (size - echo_skb_max * sizeof(struct sk_buff *));
+ }
+
+ priv->state = CAN_STATE_STOPPED;
+
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+
+/* Free space of the CAN network device */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+/* changing MTU and control mode for CAN/CANFD devices */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ /* 'CANFD-only' controllers can not switch to CAN_MTU */
+ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ return -EINVAL;
+
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ /* check for potential CANFD ability */
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/* Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.bitrate) {
+ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
+ /* Switch carrier on if device was stopped while in bus-off state */
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
+/* Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+/* Register the CAN network device */
+int register_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
+ dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/* Unregister the CAN network device */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+/* Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ can_led_notifier_init();
+
+ err = can_netlink_register();
+ if (!err)
+ pr_info(MOD_DESC "\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ can_netlink_unregister();
+
+ can_led_notifier_exit();
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c
new file mode 100644
index 000000000000..b48140b1102e
--- /dev/null
+++ b/drivers/net/can/dev/length.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2012, 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ */
+
+#include <linux/can/dev.h>
+
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64
+};
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc)
+{
+ return dlc2len[dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+
+static const u8 len2dlc[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15 /* 57 - 64 */
+};
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len)
+{
+ /* check for length mapping table size at build time */
+ BUILD_BUG_ON(ARRAY_SIZE(len2dlc) != CANFD_MAX_DLEN + 1);
+
+ if (unlikely(len > CANFD_MAX_DLEN))
+ return CANFD_MAX_DLC;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+
+/**
+ * can_skb_get_frame_len() - Calculate the CAN Frame length in bytes
+ * of a given skb.
+ * @skb: socket buffer of a CAN message.
+ *
+ * Do a rough calculation: bit stuffing is ignored and length in bits
+ * is rounded up to a length in bytes.
+ *
+ * Rationale: this function is to be used for the BQL functions
+ * (netdev_sent_queue() and netdev_completed_queue()) which expect a
+ * value in bytes. Just using skb->len is insufficient because it will
+ * return the constant value of CAN(FD)_MTU. Doing the bit stuffing
+ * calculation would be too expensive in term of computing resources
+ * for no noticeable gain.
+ *
+ * Remarks: The payload of CAN FD frames with BRS flag are sent at a
+ * different bitrate. Currently, the can-utils canbusload tool does
+ * not support CAN-FD yet and so we could not run any benchmark to
+ * measure the impact. There might be possible improvement here.
+ *
+ * Return: length in bytes.
+ */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
+{
+ const struct canfd_frame *cf = (const struct canfd_frame *)skb->data;
+ u8 len;
+
+ if (can_is_canfd_skb(skb))
+ len = canfd_sanitize_len(cf->len);
+ else if (cf->can_id & CAN_RTR_FLAG)
+ len = 0;
+ else
+ len = cf->len;
+
+ if (can_is_canfd_skb(skb)) {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CANFD_FRAME_OVERHEAD_EFF;
+ else
+ len += CANFD_FRAME_OVERHEAD_SFF;
+ } else {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CAN_FRAME_OVERHEAD_EFF;
+ else
+ len += CAN_FRAME_OVERHEAD_SFF;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
new file mode 100644
index 000000000000..867f6be31230
--- /dev/null
+++ b/drivers/net/can/dev/netlink.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+#include <net/rtnetlink.h>
+
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+};
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ bool is_can_fd = false;
+
+ /* Make sure that valid CAN FD configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_FD set
+ */
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ }
+
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+ u32 maskedflags;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = priv->ctrlmode_static;
+ maskedflags = cm->flags & cm->mask;
+
+ /* check whether provided bits are allowed to be passed */
+ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ return -EOPNOTSUPP;
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ /* make sure static options are provided by configuration */
+ if ((maskedflags & ctrlstatic) != ctrlstatic)
+ return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
+ return 0;
+}
+
+static size_t can_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size = 0;
+
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+
+ return size;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec = { };
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
+ (priv->do_get_berr_counter &&
+ !priv->do_get_berr_counter(dev, &bec) &&
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
+ )
+
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+ return sizeof(struct can_device_stats);
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (nla_put(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+}
+
+struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .validate = can_validate,
+ .newlink = can_newlink,
+ .changelink = can_changelink,
+ .dellink = can_dellink,
+ .get_size = can_get_size,
+ .fill_info = can_fill_info,
+ .get_xstats_size = can_get_xstats_size,
+ .fill_xstats = can_fill_xstats,
+};
+
+int can_netlink_register(void)
+{
+ return rtnl_link_register(&can_link_ops);
+}
+
+void can_netlink_unregister(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+}
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 3c1912c0430b..ab2c1543786c 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -263,7 +263,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp)
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
@@ -271,7 +272,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
u8 len;
int err;
- skb = __can_get_echo_skb(dev, idx, &len);
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
new file mode 100644
index 000000000000..6a64fe410987
--- /dev/null
+++ b/drivers/net/can/dev/skb.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+/* Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.rst). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < priv->echo_skb_max; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/* Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return -ENOMEM;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save frame_len to reuse it when transmission is completed */
+ can_skb_prv(skb)->frame_len = frame_len;
+
+ skb_tx_timestamp(skb);
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+ kfree_skb(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+struct sk_buff *
+__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
+ unsigned int *frame_len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct sk_buff *skb;
+ u8 len;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/* Remove the skb from the stack and free it.
+ *
+ * The function is typically called when TX failed.
+ */
+void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ if (priv->echo_skb[idx]) {
+ dev_kfree_skb_any(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_free_echo_skb);
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->len = CAN_ERR_DLC;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 038fe1036df2..971ada36e37f 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,6 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -17,6 +18,7 @@
#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -242,6 +244,8 @@
#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
/* support memory detection and correction */
#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -347,6 +351,7 @@ struct flexcan_priv {
u8 mb_count;
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
u64 rx_mask;
u64 tx_mask;
@@ -358,6 +363,9 @@ struct flexcan_priv {
struct regulator *reg_xceiver;
struct flexcan_stop_mode stm;
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
/* Read and Write APIs */
u32 (*read)(void __iomem *addr);
void (*write)(u32 val, void __iomem *addr);
@@ -387,7 +395,7 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -546,18 +554,42 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
priv->write(reg_mcr, &regs->mcr);
}
+static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled)
+{
+ u8 idx = priv->scu_idx;
+ u32 rsrc_id, val;
+
+ rsrc_id = IMX_SC_R_CAN(idx);
+
+ if (enabled)
+ val = 1;
+ else
+ val = 0;
+
+ /* stop mode request via scu firmware */
+ return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id,
+ IMX_SC_C_IPG_STOP, val);
+}
+
static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
reg_mcr = priv->read(&regs->mcr);
reg_mcr |= FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, true);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ }
return flexcan_low_power_enter_ack(priv);
}
@@ -566,10 +598,17 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
/* remove stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 0);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, false);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+ }
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
@@ -776,7 +815,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
priv->write(can_id, &priv->tx_mb->can_id);
priv->write(ctrl, &priv->tx_mb->can_ctrl);
@@ -1083,8 +1122,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
- stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
- 0, reg_ctrl << 16);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload, 0,
+ reg_ctrl << 16, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
@@ -1867,7 +1907,7 @@ static void unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
-static int flexcan_setup_stop_mode(struct platform_device *pdev)
+static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -1912,11 +1952,6 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
"gpr %s req_gpr=0x02%x req_bit=%u\n",
gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
- device_set_wakeup_capable(&pdev->dev, true);
-
- if (of_property_read_bool(np, "wakeup-source"))
- device_set_wakeup_enable(&pdev->dev, true);
-
return 0;
out_put_node:
@@ -1924,6 +1959,58 @@ out_put_node:
return ret;
}
+static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ u8 scu_idx;
+ int ret;
+
+ ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "failed to get scu index\n");
+ return ret;
+ }
+
+ priv = netdev_priv(dev);
+ priv->scu_idx = scu_idx;
+
+ /* this function could be deferred probe, return -EPROBE_DEFER */
+ return imx_scu_get_handle(&priv->sc_ipc_handle);
+}
+
+/* flexcan_setup_stop_mode - Setup stop mode for wakeup
+ *
+ * Return: = 0 setup stop mode successfully or doesn't support this feature
+ * < 0 fail to setup stop mode (could be deferred probe)
+ */
+static int flexcan_setup_stop_mode(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ int ret;
+
+ priv = netdev_priv(dev);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ ret = flexcan_setup_stop_mode_scfw(pdev);
+ else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ ret = flexcan_setup_stop_mode_gpr(pdev);
+ else
+ /* return 0 directly if doesn't support stop mode feature */
+ return 0;
+
+ if (ret)
+ return ret;
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ if (of_property_read_bool(pdev->dev.of_node, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ return 0;
+}
+
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
@@ -2054,17 +2141,20 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ err = flexcan_setup_stop_mode(pdev);
+ if (err < 0) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "setup stop mode failed\n");
+ goto failed_setup_stop_mode;
+ }
+
of_can_transceiver(dev);
devm_can_led_init(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
- err = flexcan_setup_stop_mode(pdev);
- if (err)
- dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
- }
-
return 0;
+ failed_setup_stop_mode:
+ unregister_flexcandev(dev);
failed_register:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index f5d94a692576..4a8453290530 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -517,7 +517,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
stats->tx_packets++;
stats->tx_bytes += priv->txdlc[i];
priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i);
+ can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i);
@@ -1448,7 +1448,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* taken.
*/
priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
- can_put_echo_skb(skb, dev, slotindex);
+ can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
* read from the memory
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 86b0e1406a21..5bb957a26bc6 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -629,7 +629,7 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* TX IRQ */
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
- stats->tx_bytes += can_get_echo_skb(ndev, 0);
+ stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -922,7 +922,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
/* Start the transmission */
writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 969cedb9b0b6..37e05010ca91 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -778,7 +778,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx);
+ can_put_echo_skb(skb, netdev, can->echo_idx, 0);
/* Move echo index to the next slot */
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
@@ -1467,7 +1467,7 @@ static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
struct net_device_stats *stats = &can->can.dev->stats;
stats->tx_bytes += dlc;
@@ -1533,7 +1533,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
index ef7963ff2006..d717bbc9e033 100644
--- a/drivers/net/can/m_can/Makefile
+++ b/drivers/net/can/m_can/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_CAN_M_CAN) += m_can.o
obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o
obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o
obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o
+
+tcan4x5x-objs :=
+tcan4x5x-objs += tcan4x5x-core.o
+tcan4x5x-objs += tcan4x5x-regmap.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..3752520a7d4b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -930,7 +930,7 @@ static void m_can_echo_tx_event(struct net_device *dev)
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
stats->tx_packets++;
}
}
@@ -972,7 +972,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
@@ -1483,7 +1483,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1554,7 +1554,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx);
+ can_put_echo_skb(skb, dev, putidx, 0);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 24c737c4fc44..b7caec769ddb 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -2,15 +2,8 @@
// SPI to CAN driver for the Texas Instruments TCAN4x5x
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
+#include "tcan4x5x.h"
-#include <linux/regulator/consumer.h>
-#include <linux/gpio/consumer.h>
-
-#include "m_can.h"
-
-#define DEVICE_NAME "tcan4x5x"
#define TCAN4X5X_EXT_CLK_DEF 40000000
#define TCAN4X5X_DEV_ID0 0x00
@@ -88,14 +81,10 @@
#define TCAN4X5X_MRAM_START 0x8000
#define TCAN4X5X_MCAN_OFFSET 0x1000
-#define TCAN4X5X_MAX_REGISTER 0x8fff
#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
#define TCAN4X5X_SET_ALL_INT 0xffffffff
-#define TCAN4X5X_WRITE_CMD (0x61 << 24)
-#define TCAN4X5X_READ_CMD (0x41 << 24)
-
#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6))
#define TCAN4X5X_MODE_SLEEP 0x00
#define TCAN4X5X_MODE_STANDBY BIT(6)
@@ -113,48 +102,12 @@
#define TCAN4X5X_WD_3_S_TIMER BIT(29)
#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
-struct tcan4x5x_priv {
- struct m_can_classdev cdev;
-
- struct regmap *regmap;
- struct spi_device *spi;
-
- struct gpio_desc *reset_gpio;
- struct gpio_desc *device_wake_gpio;
- struct gpio_desc *device_state_gpio;
- struct regulator *power;
-};
-
static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
{
return container_of(cdev, struct tcan4x5x_priv, cdev);
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -191,72 +144,6 @@ static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
return ret;
}
-static int regmap_spi_gather_write(void *context, const void *reg,
- size_t reg_len, const void *val,
- size_t val_len)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message m;
- u32 addr;
- struct spi_transfer t[2] = {
- { .tx_buf = &addr, .len = reg_len, .cs_change = 0,},
- { .tx_buf = val, .len = val_len, },
- };
-
- addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2;
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
-{
- u16 *reg = (u16 *)(data);
- const u32 *val = data + 4;
-
- return regmap_spi_gather_write(context, reg, 4, val, count - 4);
-}
-
-static int regmap_spi_async_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len,
- struct regmap_async *a)
-{
- return -ENOTSUPP;
-}
-
-static struct regmap_async *regmap_spi_async_alloc(void)
-{
- return NULL;
-}
-
-static int tcan4x5x_regmap_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
-
- u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2;
-
- return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size);
-}
-
-static struct regmap_bus tcan4x5x_bus = {
- .write = tcan4x5x_regmap_write,
- .gather_write = regmap_spi_gather_write,
- .async_write = regmap_spi_async_write,
- .async_alloc = regmap_spi_async_alloc,
- .read = tcan4x5x_regmap_read,
- .read_flag_mask = 0x00,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
{
struct tcan4x5x_priv *priv = cdev_to_priv(cdev);
@@ -411,13 +298,6 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
return 0;
}
-static const struct regmap_config tcan4x5x_regmap = {
- .reg_bits = 32,
- .val_bits = 32,
- .cache_type = REGCACHE_NONE,
- .max_register = TCAN4X5X_MAX_REGISTER,
-};
-
static struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
.read_reg = tcan4x5x_read_reg,
@@ -469,24 +349,19 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
/* Configure the SPI bus */
- spi->bits_per_word = 32;
+ spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret)
goto out_m_can_class_free_dev;
- priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
- &spi->dev, &tcan4x5x_regmap);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
+ ret = tcan4x5x_regmap_init(priv);
+ if (ret)
goto out_m_can_class_free_dev;
- }
ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
@@ -528,23 +403,26 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
}
static const struct of_device_id tcan4x5x_of_match[] = {
- { .compatible = "ti,tcan4x5x", },
- { }
+ {
+ .compatible = "ti,tcan4x5x",
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, tcan4x5x_of_match);
static const struct spi_device_id tcan4x5x_id_table[] = {
{
- .name = "tcan4x5x",
- .driver_data = 0,
+ .name = "tcan4x5x",
+ }, {
+ /* sentinel */
},
- { }
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
- .name = DEVICE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
.pm = NULL,
},
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
new file mode 100644
index 000000000000..ca80dbaf7a3f
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2018-2019 Texas Instruments Incorporated
+// http://www.ti.com/
+
+#include "tcan4x5x.h"
+
+#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
+#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
+
+#define TCAN4X5X_MAX_REGISTER 0x8ffc
+
+static int tcan4x5x_regmap_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
+{
+ return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32),
+ data + sizeof(__be32),
+ count - sizeof(__be32));
+}
+
+static int tcan4x5x_regmap_read(void *context,
+ const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx;
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[2] = {
+ {
+ .tx_buf = buf_tx,
+ }
+ };
+ struct spi_message msg;
+ int err;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ if (TCAN4X5X_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ }
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range tcan4x5x_reg_table_yes_range[] = {
+ regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */
+ regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/
+ regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
+ regmap_reg_range(0x8000, 0x87fc), /* MRAM */
+};
+
+static const struct regmap_access_table tcan4x5x_reg_table = {
+ .yes_ranges = tcan4x5x_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range),
+};
+
+static const struct regmap_config tcan4x5x_regmap = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .pad_bits = 8,
+ .val_bits = 32,
+ .wr_table = &tcan4x5x_reg_table,
+ .rd_table = &tcan4x5x_reg_table,
+ .max_register = TCAN4X5X_MAX_REGISTER,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus tcan4x5x_bus = {
+ .write = tcan4x5x_regmap_write,
+ .gather_write = tcan4x5x_regmap_gather_write,
+ .read = tcan4x5x_regmap_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ .max_raw_read = 256,
+ .max_raw_write = 256,
+};
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv)
+{
+ priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus,
+ priv->spi, &tcan4x5x_regmap);
+ return PTR_ERR_OR_ZERO(priv->regmap);
+}
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
new file mode 100644
index 000000000000..c66da829b795
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+ *
+ * Copyright (c) 2020 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _TCAN4X5X_H
+#define _TCAN4X5X_H
+
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include "m_can.h"
+
+#define TCAN4X5X_SANITIZE_SPI 1
+
+struct __packed tcan4x5x_buf_cmd {
+ u8 cmd;
+ __be16 addr;
+ u8 len;
+};
+
+struct tcan4x5x_map_buf {
+ struct tcan4x5x_buf_cmd cmd;
+ u8 data[256 * sizeof(u32)];
+} ____cacheline_aligned;
+
+struct tcan4x5x_priv {
+ struct m_can_classdev cdev;
+
+ struct regmap *regmap;
+ struct spi_device *spi;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *device_wake_gpio;
+ struct gpio_desc *device_state_gpio;
+ struct regulator *power;
+
+ struct tcan4x5x_map_buf map_buf_rx;
+ struct tcan4x5x_map_buf map_buf_tx;
+};
+
+static inline void
+tcan4x5x_spi_cmd_set_len(struct tcan4x5x_buf_cmd *cmd, u8 len)
+{
+ /* number of u32 */
+ cmd->len = len >> 2;
+}
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv);
+
+#endif
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 5ed00a1558e1..fa32e418eb29 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -270,7 +270,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
- can_put_echo_skb(skb, dev, buf_id);
+ can_put_echo_skb(skb, dev, buf_id, 0);
/* Enable interrupt. */
priv->tx_active |= 1 << buf_id;
@@ -448,7 +448,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
out_8(&regs->cantbsel, mask);
stats->tx_bytes += in_8(&regs->tx.dlr);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id);
+ can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 4f9e7ec192aa..92a54a5fd4c5 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -711,7 +711,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
@@ -924,7 +924,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
&priv->regs->ifregs[1].data[i / 2]);
}
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
/* Set the size of the data. Update if2_mcont */
iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index c5334b0c3038..00847cbaf7b6 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -266,7 +266,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client);
+ can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
@@ -716,7 +716,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->echo_lock, flags);
/* prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, ndev, priv->echo_idx);
+ can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
/* move echo index to the next slot */
priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index c803327f8f79..4870c4ea190a 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -386,7 +386,7 @@ static void rcar_can_tx_done(struct net_device *ndev)
stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
RCAR_CAN_FIFO_DEPTH];
priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
priv->tx_tail++;
netif_wake_queue(ndev);
}
@@ -617,7 +617,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
* the CPU-side pointer for the transmit FIFO to the next
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 2778ed5c61d1..d8d233e62990 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1044,7 +1044,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
stats->tx_packets++;
stats->tx_bytes += priv->tx_len[sent];
priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent);
+ can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
@@ -1390,7 +1390,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b6a7003c51d2..9e86488ba55f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -318,7 +318,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < cf->len; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
cmd_reg_val |= CMD_AT;
@@ -531,7 +531,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_bytes +=
priv->read_reg(priv, SJA1000_FI) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 6ea802c66124..3dbba8d61afb 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -159,7 +159,7 @@ static int tscan1_probe(struct device *dev, unsigned id)
return -ENXIO;
}
-static int tscan1_remove(struct device *dev, unsigned id /*unused*/)
+static void tscan1_remove(struct device *dev, unsigned id /*unused*/)
{
struct net_device *netdev;
struct sja1000_priv *priv;
@@ -179,8 +179,6 @@ static int tscan1_remove(struct device *dev, unsigned id /*unused*/)
release_region(pld_base, TSCAN1_PLD_SIZE);
free_sja1000dev(netdev);
-
- return 0;
}
static struct isa_driver tscan1_isa_driver = {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a1bd1be09548..30c8d53c9745 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -516,6 +516,7 @@ static struct slcan *slc_alloc(void)
int i;
char name[IFNAMSIZ];
struct net_device *dev = NULL;
+ struct can_ml_priv *can_ml;
struct slcan *sl;
int size;
@@ -538,7 +539,8 @@ static struct slcan *slc_alloc(void)
dev->base_addr = i;
sl = netdev_priv(dev);
- dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
/* Initialize channel control data */
sl->magic = SLCAN_MAGIC;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 40070c930202..c44f3411e561 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -104,7 +104,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
card->tx.last_bus = priv->index;
++card->tx.pending;
++priv->tx.pending;
- can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ can_put_echo_skb(skb, dev, priv->tx.echo_put, 0);
++priv->tx.echo_put;
if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
priv->tx.echo_put = 0;
@@ -284,7 +284,7 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get);
+ can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index f9455de94786..c3e020c90111 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -586,7 +586,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -725,7 +725,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 25859d16d06f..f69fb4238a65 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1002,7 +1002,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -1171,7 +1171,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..3c5b92911d46 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -335,6 +335,8 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
u8 len;
int i, j;
+ netdev_reset_queue(priv->ndev);
+
/* TEF */
tef_ring = priv->tef;
tef_ring->head = 0;
@@ -594,11 +596,9 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
"Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
osc, osc_reference);
return -ETIMEDOUT;
- } else if (err) {
- return err;
}
- return 0;
+ return err;
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -649,7 +649,7 @@ static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
if (osc != osc_reference) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
+ "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
osc, osc_reference);
return -ETIMEDOUT;
}
@@ -664,7 +664,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
if (i)
netdev_info(priv->ndev,
- "Retrying to reset Controller.\n");
+ "Retrying to reset controller.\n");
err = mcp251xfd_chip_softreset_do(priv);
if (err == -ETIMEDOUT)
@@ -1237,7 +1237,7 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
}
netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
"full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
"not empty" : "empty",
@@ -1249,7 +1249,8 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
u32 seq, seq_masked, tef_tail_masked;
@@ -1271,7 +1272,8 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
mcp251xfd_get_tef_tail(priv),
- hw_tef_obj->ts);
+ hw_tef_obj->ts,
+ frame_len_ptr);
stats->tx_packets++;
priv->tef->tail++;
@@ -1308,6 +1310,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
const u8 offset, const u8 len)
{
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
(offset > tx_ring->obj_num ||
@@ -1322,12 +1325,13 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
return regmap_bulk_read(priv->map_rx,
mcp251xfd_get_tef_obj_addr(offset),
hw_tef_obj,
- sizeof(*hw_tef_obj) / sizeof(u32) * len);
+ sizeof(*hw_tef_obj) / val_bytes * len);
}
static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
{
struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
u8 tef_tail, len, l;
int err, i;
@@ -1349,7 +1353,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
}
for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ unsigned int frame_len;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
/* -EAGAIN means the Sequence Number in the TEF
* doesn't match our tef_tail. This can happen if we
* read the TEF objects too early. Leave loop let the
@@ -1359,6 +1365,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
goto out_netif_wake_queue;
if (err)
return err;
+
+ total_frame_len += frame_len;
}
out_netif_wake_queue:
@@ -1368,13 +1376,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1396,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1439,6 +1447,7 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
struct sk_buff *skb)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u8 dlc;
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
u32 sid, eid;
@@ -1454,9 +1463,10 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
hw_rx_obj->id);
}
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
+
/* CANFD */
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
- u8 dlc;
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
cfd->flags |= CANFD_ESI;
@@ -1464,17 +1474,17 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
cfd->flags |= CANFD_BRS;
- dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
cfd->len = can_fd_dlc2len(dlc);
} else {
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
cfd->can_id |= CAN_RTR_FLAG;
- cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
- hw_rx_obj->flags));
+ can_frame_set_cc_len((struct can_frame *)cfd, dlc,
+ priv->can.ctrlmode);
}
- memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
}
static int
@@ -1492,7 +1502,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
else
skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
- if (!cfd) {
+ if (!skb) {
stats->rx_dropped++;
return 0;
}
@@ -1511,12 +1521,13 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
const u8 offset, const u8 len)
{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
int err;
err = regmap_bulk_read(priv->map_rx,
mcp251xfd_get_rx_obj_addr(ring, offset),
hw_rx_obj,
- len * ring->obj_size / sizeof(u32));
+ len * ring->obj_size / val_bytes);
return err;
}
@@ -1553,10 +1564,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1581,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
@@ -1878,7 +1889,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
"Controller changed into %s Mode (%u).\n",
mcp251xfd_get_mode_str(mode), mode);
- /* After the application requests Normal mode, the Controller
+ /* After the application requests Normal mode, the controller
* will automatically attempt to retransmit the message that
* caused the TX MAB underflow.
*
@@ -2138,6 +2149,7 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -2163,7 +2175,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
&priv->regs_status,
sizeof(priv->regs_status) /
- sizeof(u32));
+ val_bytes);
if (err)
goto out_fail;
@@ -2301,7 +2313,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
union mcp251xfd_tx_obj_load_buf *load_buf;
u8 dlc;
u32 id, flags;
- int offset, len;
+ int len_sanitized = 0, len;
if (cfd->can_id & CAN_EFF_FLAG) {
u32 sid, eid;
@@ -2322,12 +2334,12 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
* harm, only the lower 7 bits will be transferred into the
* TEF object.
*/
- dlc = can_fd_len2dlc(cfd->len);
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
- FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
if (cfd->can_id & CAN_RTR_FLAG)
flags |= MCP251XFD_OBJ_FLAGS_RTR;
+ else
+ len_sanitized = canfd_sanitize_len(cfd->len);
/* CANFD */
if (can_is_canfd_skb(skb)) {
@@ -2338,8 +2350,15 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
if (cfd->flags & CANFD_BRS)
flags |= MCP251XFD_OBJ_FLAGS_BRS;
+
+ dlc = can_fd_len2dlc(cfd->len);
+ } else {
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ priv->can.ctrlmode);
}
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
+
load_buf = &tx_obj->buf;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
hw_tx_obj = &load_buf->crc.hw_tx_obj;
@@ -2349,17 +2368,22 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
put_unaligned_le32(id, &hw_tx_obj->id);
put_unaligned_le32(flags, &hw_tx_obj->flags);
- /* Clear data at end of CAN frame */
- offset = round_down(cfd->len, sizeof(u32));
- len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
- if (MCP251XFD_SANITIZE_CAN && len)
- memset(hw_tx_obj->data + offset, 0x0, len);
+ /* Copy data */
memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+ /* Clear unused data at end of CAN frame */
+ if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
+ int pad_len;
+
+ pad_len = len_sanitized - cfd->len;
+ if (pad_len)
+ memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
+ }
+
/* Number of bytes to be written into the RAM of the controller */
len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
if (MCP251XFD_SANITIZE_CAN)
- len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
+ len += round_up(len_sanitized, sizeof(u32));
else
len += round_up(cfd->len, sizeof(u32));
@@ -2419,6 +2443,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct mcp251xfd_priv *priv = netdev_priv(ndev);
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
u8 tx_head;
int err;
@@ -2434,10 +2459,12 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
/* Stop queue if we occupy the complete TX FIFO */
tx_head = mcp251xfd_get_tx_head(tx_ring);
tx_ring->head++;
- if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
+ if (mcp251xfd_get_tx_free(tx_ring) == 0)
netif_stop_queue(ndev);
- can_put_echo_skb(skb, ndev, tx_head);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
@@ -2822,32 +2849,28 @@ static int mcp251xfd_probe(struct spi_device *spi)
rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
GPIOD_IN);
- if (PTR_ERR(rx_int) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- else if (IS_ERR(rx_int))
- return PTR_ERR(rx_int);
+ if (IS_ERR(rx_int))
+ return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
+ "Failed to get RX-INT!\n");
reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
- if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- else if (PTR_ERR(reg_vdd) == -ENODEV)
+ if (PTR_ERR(reg_vdd) == -ENODEV)
reg_vdd = NULL;
else if (IS_ERR(reg_vdd))
- return PTR_ERR(reg_vdd);
+ return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
+ "Failed to get VDD regulator!\n");
reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
- if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- else if (PTR_ERR(reg_xceiver) == -ENODEV)
+ if (PTR_ERR(reg_xceiver) == -ENODEV)
reg_xceiver = NULL;
else if (IS_ERR(reg_xceiver))
- return PTR_ERR(reg_xceiver);
+ return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
+ "Failed to get Transceiver regulator!\n");
clk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ dev_err_probe(&spi->dev, PTR_ERR(clk),
+ "Failed to get Oscillator (clock)!\n");
freq = clk_get_rate(clk);
/* Sanity check */
@@ -2886,7 +2909,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO;
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_CC_LEN8_DLC;
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
@@ -2902,7 +2926,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
spi_get_device_id(spi)->driver_data;
/* Errata Reference:
- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
+ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
*
* The SPI can write corrupted data to the RAM at fast SPI
* speeds:
@@ -2915,18 +2939,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
* Ensure that FSCK is less than or equal to 0.85 *
* (FSYSCLK/2).
*
- * Known good and bad combinations are:
+ * Known good combinations are:
*
- * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config
+ * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk config
*
- * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
- * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
- * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
- * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
- * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
- * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
- * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
- * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx>
+ * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default
+ * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default
+ * 2518 40 MHz fsl,imx6dl fsl,imx51-ecspi 15000000 Hz 75.00% 30000000 Hz default
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index cb6398c2a560..480bd4480bdf 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -305,7 +305,7 @@
#define MCP251XFD_OBJ_FLAGS_BRS BIT(6)
#define MCP251XFD_OBJ_FLAGS_RTR BIT(5)
#define MCP251XFD_OBJ_FLAGS_IDE BIT(4)
-#define MCP251XFD_OBJ_FLAGS_DLC GENMASK(3, 0)
+#define MCP251XFD_OBJ_FLAGS_DLC_MASK GENMASK(3, 0)
#define MCP251XFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18)
#define MCP251XFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0)
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 783b63218b7b..54aa7c25c4de 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -448,7 +448,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ);
@@ -655,7 +655,7 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
readl(priv->base +
SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a6850ff0b55b..73245d8836a9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -513,7 +513,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
- can_put_echo_skb(skb, ndev, mbxno);
+ can_put_echo_skb(skb, ndev, mbxno, 0);
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
@@ -757,7 +757,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
stamp = hecc_read_stamp(priv, mbxno);
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
- mbxno, stamp);
+ mbxno, stamp, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 25eee4466364..18f40eb20360 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -518,7 +518,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -801,7 +801,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9eed75a4b678..562acbf454fd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -357,7 +357,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index);
@@ -783,7 +783,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_jobs);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 0487095e1fd0..a00dc1904415 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -370,7 +370,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id);
+ can_get_echo_skb(netdev, hf->echo_id, NULL);
gs_free_tx_context(txc);
@@ -525,7 +525,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, idx);
+ can_put_echo_skb(skb, netdev, idx, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e2d58846c40c..2b7efd296758 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -578,7 +578,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv;
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 480bd2ecb296..dcee8dc828ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1151,7 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 98c016ef0607..59ba7c7beec0 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -594,7 +594,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index df54eb7d4b36..1f649d178010 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -237,7 +237,7 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += ctx->dlc;
can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx);
+ can_get_echo_skb(netdev, ctx->ndx, NULL);
}
if (urb->status)
@@ -355,7 +355,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0);
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
@@ -466,7 +466,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
struct mcba_usb_msg_ka_usb *msg)
{
if (unlikely(priv->usb_ka_first_pass)) {
- netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n",
+ netdev_info(priv->netdev, "PIC USB version %u.%u\n",
msg->soft_ver_major, msg->soft_ver_minor);
priv->usb_ka_first_pass = false;
@@ -492,7 +492,7 @@ static void mcba_usb_process_ka_can(struct mcba_priv *priv,
struct mcba_usb_msg_ka_can *msg)
{
if (unlikely(priv->can_ka_first_pass)) {
- netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n",
+ netdev_info(priv->netdev, "PIC CAN version %u.%u\n",
msg->soft_ver_major, msg->soft_ver_minor);
priv->can_ka_first_pass = false;
@@ -554,7 +554,7 @@ static void mcba_usb_process_rx(struct mcba_priv *priv,
break;
default:
- netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)",
+ netdev_warn(priv->netdev, "Unsupported msg (0x%X)",
msg->cmd_id);
break;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 251835ea15aa..573b11559d73 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -309,7 +309,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
/* do wakeup tx queue in case of success only */
@@ -365,7 +365,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 61631f4fd92a..f347ecc79aef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 7d92da8954fe..fa403c080871 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -672,7 +672,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* update statistics */
up->netdev->stats.tx_packets++;
up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index);
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index);
@@ -1137,7 +1137,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
/* put the skb on can loopback stack */
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_put_echo_skb(skb, up->netdev, echo_index);
+ can_put_echo_skb(skb, up->netdev, echo_index, 0);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
/* transmit it */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 44478304ff46..e8c42430a4fc 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -585,7 +585,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
@@ -664,7 +664,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_urbs);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 39ca14b0585d..067705e2850b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
- dev->ml_priv = netdev_priv(dev);
+ can_set_ml_priv(dev, netdev_priv(dev));
/* set flags according to driver capabilities */
if (echo)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index fa47bab510bb..8861a7d875e7 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
@@ -139,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
static void vxcan_setup(struct net_device *dev)
{
+ struct can_ml_priv *can_ml;
+
dev->type = ARPHRD_CAN;
dev->mtu = CANFD_MTU;
dev->hard_header_len = 0;
@@ -147,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
dev->flags = (IFF_NOARP|IFF_ECHO);
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
- dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+
+ can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3f54edee92eb..37fa19c62d73 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -592,9 +592,9 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
@@ -1292,7 +1292,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
while (frames_sent--) {
stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ priv->tx_max, NULL);
priv->tx_tail++;
stats->tx_packets++;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6a0488589fc..3af373e90806 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -60,6 +60,8 @@ source "drivers/net/dsa/qca/Kconfig"
source "drivers/net/dsa/sja1105/Kconfig"
+source "drivers/net/dsa/xrs700x/Kconfig"
+
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a84adb140a04..f3598c040994 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -24,3 +24,4 @@ obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
obj-y += sja1105/
+obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 288b5a5c3e0d..a162499bcafc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -510,6 +510,52 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
}
EXPORT_SYMBOL(b53_imp_vlan_setup);
+static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
+ bool unicast)
+{
+ u16 uc;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+}
+
+static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
+ bool multicast)
+{
+ u16 mc;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+}
+
+static void b53_port_set_learning(struct b53_device *dev, int port,
+ bool learning)
+{
+ u16 reg;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
+ if (learning)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -522,7 +568,9 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_br_egress_floods(ds, port, true, true);
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -655,7 +703,9 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_brcm_hdr_setup(dev->ds, port);
- b53_br_egress_floods(dev->ds, port, true, true);
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -1375,25 +1425,22 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
return -EOPNOTSUPP;
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
@@ -1404,47 +1451,51 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
return 0;
}
-EXPORT_SYMBOL(b53_vlan_prepare);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
- u16 vid;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
- b53_get_vlan_entry(dev, vid, vl);
+ vl = &dev->vlans[vlan->vid];
- if (vid == 0 && vid == b53_default_pvid(dev))
- untagged = true;
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members |= BIT(port);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag |= BIT(port);
- else
- vl->untag &= ~BIT(port);
+ if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+ untagged = true;
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ vl->members |= BIT(port);
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag |= BIT(port);
+ else
+ vl->untag &= ~BIT(port);
+
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid_end);
- b53_fast_age_vlan(dev, vid);
+ vlan->vid);
+ b53_fast_age_vlan(dev, vlan->vid);
}
+
+ return 0;
}
EXPORT_SYMBOL(b53_vlan_add);
@@ -1454,27 +1505,24 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct b53_vlan *vl;
- u16 vid;
u16 pvid;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ vl = &dev->vlans[vlan->vid];
- b53_get_vlan_entry(dev, vid, vl);
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members &= ~BIT(port);
+ vl->members &= ~BIT(port);
- if (pvid == vid)
- pvid = b53_default_pvid(dev);
+ if (pvid == vlan->vid)
+ pvid = b53_default_pvid(dev);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag &= ~(BIT(port));
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag &= ~(BIT(port));
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
b53_fast_age_vlan(dev, pvid);
@@ -1751,8 +1799,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct b53_device *priv = ds->priv;
@@ -1762,19 +1810,7 @@ int b53_mdb_prepare(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return 0;
-}
-EXPORT_SYMBOL(b53_mdb_prepare);
-
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct b53_device *priv = ds->priv;
- int ret;
-
- ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
- if (ret)
- dev_err(ds->dev, "failed to add MDB entry\n");
+ return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
}
EXPORT_SYMBOL(b53_mdb_add);
@@ -1932,37 +1968,43 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_br_fast_age);
-int b53_br_egress_floods(struct dsa_switch *ds, int port,
- bool unicast, bool multicast)
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
{
- struct b53_device *dev = ds->priv;
- u16 uc, mc;
-
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ return -EINVAL;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ return 0;
+}
+EXPORT_SYMBOL(b53_br_flags_pre);
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_FLOOD)
+ b53_port_set_ucast_flood(ds->priv, port,
+ !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_MCAST_FLOOD)
+ b53_port_set_mcast_flood(ds->priv, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+ if (flags.mask & BR_LEARNING)
+ b53_port_set_learning(ds->priv, port,
+ !!(flags.val & BR_LEARNING));
return 0;
+}
+EXPORT_SYMBOL(b53_br_flags);
+
+int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
+ struct netlink_ext_ack *extack)
+{
+ b53_port_set_mcast_flood(ds->priv, port, mrouter);
+ return 0;
}
-EXPORT_SYMBOL(b53_br_egress_floods);
+EXPORT_SYMBOL(b53_set_mrouter);
static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
@@ -2203,11 +2245,12 @@ static const struct dsa_switch_ops b53_switch_ops = {
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
+ .port_pre_bridge_flags = b53_br_flags_pre,
+ .port_bridge_flags = b53_br_flags,
+ .port_set_mrouter = b53_set_mrouter,
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
- .port_egress_floods = b53_br_egress_floods,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -2215,7 +2258,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
.port_max_mtu = b53_get_max_mtu,
@@ -2459,6 +2501,20 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ /* Starfighter 2 */
+ {
+ .chip_id = BCM4908_DEVICE_ID,
+ .dev_name = "BCM4908",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 256,
+ .cpu_port = 8, /* TODO: ports 4, 5, 8 */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
{
.chip_id = BCM7445_DEVICE_ID,
.dev_name = "BCM7445",
@@ -2606,9 +2662,8 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
- ds->configure_vlan_while_not_filtering = true;
ds->untag_bridge_pvid = true;
- dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
+ dev->vlan_enabled = true;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 7c67409bb186..8419bb7f4505 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -64,6 +64,7 @@ struct b53_io_ops {
#define B53_INVALID_LANE 0xff
enum {
+ BCM4908_DEVICE_ID = 0x4908,
BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65,
BCM5389_DEVICE_ID = 0x89,
@@ -325,8 +326,14 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
-int b53_br_egress_floods(struct dsa_switch *ds, int port,
- bool unicast, bool multicast);
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
+ struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
@@ -348,11 +355,10 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
int speed, int duplex,
bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+ struct netlink_ext_ack *extack);
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -361,10 +367,8 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index c90985c294a2..b2c539a42154 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -115,6 +115,7 @@
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
#define B53_IPMC_FLOOD_MASK 0x36
+#define B53_DIS_LEARNING 0x3c
/*
* Override Ports 0-7 State on devices with xMII interfaces (8 bit)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1e9a0adda2d6..5ee8103b8e9c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -105,7 +105,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_IMP;
else
offset = CORE_STS_OVERRIDE_IMP2;
@@ -222,23 +223,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
- /* Enable learning */
- reg = core_readl(priv, CORE_DIS_LEARN);
- reg &= ~BIT(port);
- core_writel(priv, reg, CORE_DIS_LEARN);
-
/* Enable Broadcom tags for that port if requested */
- if (priv->brcm_tag_mask & BIT(port)) {
+ if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
- /* Disable learning on ASP port */
- if (port == 7) {
- reg = core_readl(priv, CORE_DIS_LEARN);
- reg |= BIT(port);
- core_writel(priv, reg, CORE_DIS_LEARN);
- }
- }
-
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
@@ -509,15 +497,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
@@ -715,7 +707,8 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
u32 reg, offset;
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -742,7 +735,8 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -1110,10 +1104,12 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
+ .port_pre_bridge_flags = b53_br_flags_pre,
+ .port_bridge_flags = b53_br_flags,
.port_stp_state_set = b53_br_set_stp_state,
+ .port_set_mrouter = b53_set_mrouter,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -1123,7 +1119,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
};
@@ -1135,6 +1130,30 @@ struct bcm_sf2_of_data {
unsigned int num_cfp_rules;
};
+static const u16 bcm_sf2_4908_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_CROSSBAR] = 0xc8,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
+ .type = BCM4908_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_4908_reg_offsets,
+ .num_cfp_rules = 0, /* FIXME */
+};
+
/* Register offsets for the SWITCH_REG_* block */
static const u16 bcm_sf2_7445_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
@@ -1183,6 +1202,9 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
};
static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm4908-switch",
+ .data = &bcm_sf2_4908_data
+ },
{ .compatible = "brcm,bcm7445-switch-v4.0",
.data = &bcm_sf2_7445_data
},
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d82cee5d9202..a7e2fcf2df2c 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -885,18 +885,15 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
return -EINVAL;
vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
- vlan.vid_begin = vid;
- vlan.vid_end = vid;
- if (cpu_to_be32(fs->h_ext.data[1]) & 1)
+ vlan.vid = vid;
+ if (be32_to_cpu(fs->h_ext.data[1]) & 1)
vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
else
vlan.flags = 0;
- ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
+ ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
if (ret)
return ret;
-
- ds->ops->port_vlan_add(ds, port_num, &vlan);
}
/*
@@ -942,8 +939,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
return -EINVAL;
if ((fs->flow_type & FLOW_EXT) &&
- !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
- ds->ops->port_vlan_del))
+ !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
return -EOPNOTSUPP;
if (fs->location != RX_CLS_LOC_ANY &&
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index d8a5e6269c0e..1d2d55c9f8aa 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -17,6 +17,7 @@ enum bcm_sf2_reg_offs {
REG_SWITCH_REVISION,
REG_PHY_REVISION,
REG_SPHY_CNTRL,
+ REG_CROSSBAR,
REG_RGMII_0_CNTRL,
REG_RGMII_1_CNTRL,
REG_RGMII_2_CNTRL,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e38906ae8f23..bfdf3324aac3 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -191,7 +191,7 @@ static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
@@ -199,53 +199,37 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct dsa_loop_priv *ps = ds->priv;
- struct mii_bus *bus = ps->bus;
-
- dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d",
- __func__, port, vlan->vid_begin, vlan->vid_end);
-
- /* Just do a sleeping operation to make lockdep checks effective */
- mdiobus_read(bus, ps->port_base + port, MII_BMSR);
-
- if (vlan->vid_end > ARRAY_SIZE(ps->vlans))
- return -ERANGE;
-
- return 0;
-}
-
-static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid;
+
+ if (vlan->vid >= ARRAY_SIZE(ps->vlans))
+ return -ERANGE;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members |= BIT(port);
- if (untagged)
- vl->untagged |= BIT(port);
- else
- vl->untagged &= ~BIT(port);
+ vl->members |= BIT(port);
+ if (untagged)
+ vl->untagged |= BIT(port);
+ else
+ vl->untagged &= ~BIT(port);
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
if (pvid)
- ps->ports[port].pvid = vid;
+ ps->ports[port].pvid = vlan->vid;
+
+ return 0;
}
static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
@@ -253,26 +237,24 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct dsa_loop_priv *ps = ds->priv;
+ u16 pvid = ps->ports[port].pvid;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid, pvid = ps->ports[port].pvid;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members &= ~BIT(port);
- if (untagged)
- vl->untagged &= ~BIT(port);
+ vl->members &= ~BIT(port);
+ if (untagged)
+ vl->untagged &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
ps->ports[port].pvid = pvid;
return 0;
@@ -307,7 +289,6 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.port_bridge_leave = dsa_loop_port_bridge_leave,
.port_stp_state_set = dsa_loop_port_stp_state_set,
.port_vlan_filtering = dsa_loop_port_vlan_filtering,
- .port_vlan_prepare = dsa_loop_port_vlan_prepare,
.port_vlan_add = dsa_loop_port_vlan_add,
.port_vlan_del = dsa_loop_port_vlan_del,
.port_change_mtu = dsa_loop_port_change_mtu,
@@ -344,7 +325,6 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
- ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..9ea2c643f8f8 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,8 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
+ depends on NET_SCH_TAPRIO
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 6420b76ea37c..463137c39db2 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -3,7 +3,7 @@
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
- * Copyright (C) 2019,2020 Linutronix GmbH
+ * Copyright (C) 2019-2021 Linutronix GmbH
* Author Kurt Kanzenbach <kurt@linutronix.de>
*/
@@ -153,6 +153,13 @@ static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
hellcreek_write(hellcreek, val, HR_VIDCFG);
}
+static void hellcreek_select_tgd(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << TR_TGDSEL_TDGSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, TR_TGDSEL);
+}
+
static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
{
u16 val;
@@ -214,12 +221,11 @@ static void hellcreek_feature_detect(struct hellcreek *hellcreek)
features = hellcreek_read(hellcreek, HR_FEABITS0);
- /* Currently we only detect the size of the FDB table */
+ /* Only detect the size of the FDB table. The size and current
+ * utilization can be queried via devlink.
+ */
hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
HR_FEABITS0_FDBBINS_SHIFT) * 32;
-
- dev_info(hellcreek->dev, "Feature detect: FDB entries=%zu\n",
- hellcreek->fdb_entries);
}
static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
@@ -335,7 +341,8 @@ static u16 hellcreek_private_vid(int port)
}
static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
int i;
@@ -348,14 +355,14 @@ static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
*/
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
const u16 restricted_vid = hellcreek_private_vid(i);
- u16 vid;
if (!dsa_is_user_port(ds, i))
continue;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (vid == restricted_vid)
- return -EBUSY;
+ if (vlan->vid == restricted_vid) {
+ NL_SET_ERR_MSG_MOD(extack, "VID restricted by driver");
+ return -EBUSY;
+ }
}
return 0;
@@ -440,34 +447,36 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
mutex_unlock(&hellcreek->reg_lock);
}
-static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
+ int err;
+
+ err = hellcreek_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
- dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
- vlan->vid_begin, vlan->vid_end, port,
- untagged ? "untagged" : "tagged",
+ dev_dbg(hellcreek->dev, "Add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+ hellcreek_apply_vlan(hellcreek, port, vlan->vid, pvid, untagged);
+
+ return 0;
}
static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
- dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
- vlan->vid_begin, vlan->vid_end, port);
+ dev_dbg(hellcreek->dev, "Remove VLAN %d on port %d\n", vlan->vid, port);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_unapply_vlan(hellcreek, port, vid);
+ hellcreek_unapply_vlan(hellcreek, port, vlan->vid);
return 0;
}
@@ -867,13 +876,10 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
vlan_filtering ? "Enable" : "Disable", port);
@@ -998,6 +1004,84 @@ out:
return ret;
}
+static u64 hellcreek_devlink_vlan_table_get(void *priv)
+{
+ struct hellcreek *hellcreek = priv;
+ u64 count = 0;
+ int i;
+
+ mutex_lock(&hellcreek->reg_lock);
+ for (i = 0; i < VLAN_N_VID; ++i)
+ if (hellcreek->vidmbrcfg[i])
+ count++;
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return count;
+}
+
+static u64 hellcreek_devlink_fdb_table_get(void *priv)
+{
+ struct hellcreek *hellcreek = priv;
+ u64 count = 0;
+
+ /* Reading this register has side effects. Synchronize against the other
+ * FDB operations.
+ */
+ mutex_lock(&hellcreek->reg_lock);
+ count = hellcreek_read(hellcreek, HR_FDBMAX);
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return count;
+}
+
+static int hellcreek_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_vlan_params;
+ struct devlink_resource_size_params size_fdb_params;
+ struct hellcreek *hellcreek = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_vlan_params, VLAN_N_VID,
+ VLAN_N_VID,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ devlink_resource_size_params_init(&size_fdb_params,
+ hellcreek->fdb_entries,
+ hellcreek->fdb_entries,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VLAN", VLAN_N_VID,
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_vlan_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "FDB", hellcreek->fdb_entries,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_fdb_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ hellcreek_devlink_vlan_table_get,
+ hellcreek);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+ hellcreek_devlink_fdb_table_get,
+ hellcreek);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+
+ return err;
+}
+
static int hellcreek_setup(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
@@ -1038,11 +1122,6 @@ static int hellcreek_setup(struct dsa_switch *ds)
/* Configure PCP <-> TC mapping */
hellcreek_setup_tc_identity_mapping(hellcreek);
- /* Allow VLAN configurations while not filtering which is the default
- * for new DSA drivers.
- */
- ds->configure_vlan_while_not_filtering = true;
-
/* The VLAN awareness is a global switch setting. Therefore, mixed vlan
* filtering setups are not supported.
*/
@@ -1056,9 +1135,22 @@ static int hellcreek_setup(struct dsa_switch *ds)
return ret;
}
+ /* Register devlink resources with DSA */
+ ret = hellcreek_setup_devlink_resources(ds);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to setup devlink resources!\n");
+ return ret;
+ }
+
return 0;
}
+static void hellcreek_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
+}
+
static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1135,6 +1227,296 @@ out:
return ret;
}
+static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ const struct tc_taprio_sched_entry *cur, *initial, *next;
+ size_t i;
+
+ cur = initial = &schedule->entries[0];
+ next = cur + 1;
+
+ for (i = 1; i <= schedule->num_entries; ++i) {
+ u16 data;
+ u8 gates;
+
+ cur++;
+ next++;
+
+ if (i == schedule->num_entries)
+ gates = initial->gate_mask ^
+ cur->gate_mask;
+ else
+ gates = next->gate_mask ^
+ cur->gate_mask;
+
+ data = gates;
+
+ if (i == schedule->num_entries)
+ data |= TR_GCLDAT_GCLWRLAST;
+
+ /* Gates states */
+ hellcreek_write(hellcreek, data, TR_GCLDAT);
+
+ /* Time interval */
+ hellcreek_write(hellcreek,
+ cur->interval & 0x0000ffff,
+ TR_GCLTIL);
+ hellcreek_write(hellcreek,
+ (cur->interval & 0xffff0000) >> 16,
+ TR_GCLTIH);
+
+ /* Commit entry */
+ data = ((i - 1) << TR_GCLCMD_GCLWRADR_SHIFT) |
+ (initial->gate_mask <<
+ TR_GCLCMD_INIT_GATE_STATES_SHIFT);
+ hellcreek_write(hellcreek, data, TR_GCLCMD);
+ }
+}
+
+static void hellcreek_set_cycle_time(struct hellcreek *hellcreek,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ u32 cycle_time = schedule->cycle_time;
+
+ hellcreek_write(hellcreek, cycle_time & 0x0000ffff, TR_CTWRL);
+ hellcreek_write(hellcreek, (cycle_time & 0xffff0000) >> 16, TR_CTWRH);
+}
+
+static void hellcreek_switch_schedule(struct hellcreek *hellcreek,
+ ktime_t start_time)
+{
+ struct timespec64 ts = ktime_to_timespec64(start_time);
+
+ /* Start schedule at this point of time */
+ hellcreek_write(hellcreek, ts.tv_nsec & 0x0000ffff, TR_ESTWRL);
+ hellcreek_write(hellcreek, (ts.tv_nsec & 0xffff0000) >> 16, TR_ESTWRH);
+
+ /* Arm timer, set seconds and switch schedule */
+ hellcreek_write(hellcreek, TR_ESTCMD_ESTARM | TR_ESTCMD_ESTSWCFG |
+ ((ts.tv_sec & TR_ESTCMD_ESTSEC_MASK) <<
+ TR_ESTCMD_ESTSEC_SHIFT), TR_ESTCMD);
+}
+
+static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ s64 base_time_ns, current_ns;
+
+ /* The switch allows a schedule to be started only eight seconds within
+ * the future. Therefore, check the current PTP time if the schedule is
+ * startable or not.
+ */
+
+ /* Use the "cached" time. That should be alright, as it's updated quite
+ * frequently in the PTP code.
+ */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Calculate difference to admin base time */
+ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
+
+ return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
+}
+
+static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ ktime_t base_time, current_time;
+ s64 current_ns;
+ u32 cycle_time;
+
+ /* First select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Forward base time into the future if needed */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ current_time = ns_to_ktime(current_ns);
+ base_time = hellcreek_port->current_schedule->base_time;
+ cycle_time = hellcreek_port->current_schedule->cycle_time;
+
+ if (ktime_compare(current_time, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time, base_time),
+ cycle_time);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle_time);
+ }
+
+ /* Set admin base time and switch schedule */
+ hellcreek_switch_schedule(hellcreek, base_time);
+
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+
+ dev_dbg(hellcreek->dev, "Armed EST timer for port %d\n",
+ hellcreek_port->port);
+}
+
+static void hellcreek_check_schedule(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek_port *hellcreek_port;
+ struct hellcreek *hellcreek;
+ bool startable;
+
+ hellcreek_port = dw_to_hellcreek_port(dw);
+ hellcreek = hellcreek_port->hellcreek;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek,
+ hellcreek_port->port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, hellcreek_port->port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Reschedule */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+}
+
+static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ bool startable;
+ u16 ctrl;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Configure traffic schedule on port %d\n",
+ port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+ hellcreek_port->current_schedule = taprio_offload_get(taprio);
+
+ /* Then select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Enable gating and keep defaults */
+ ctrl = (0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT) | TR_TGDCTRL_GATE_EN;
+ hellcreek_write(hellcreek, ctrl, TR_TGDCTRL);
+
+ /* Cancel pending schedule */
+ hellcreek_write(hellcreek, 0x00, TR_ESTCMD);
+
+ /* Setup a new schedule */
+ hellcreek_setup_gcl(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Configure cycle time */
+ hellcreek_set_cycle_time(hellcreek, hellcreek_port->current_schedule);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek, port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return 0;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Schedule periodic schedule check */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+
+ return 0;
+}
+
+static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Remove traffic schedule on port %d\n", port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+
+ /* Then select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Disable gating and return to regular switching flow */
+ hellcreek_write(hellcreek, 0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT,
+ TR_TGDCTRL);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
+ struct tc_taprio_qopt_offload *schedule)
+{
+ size_t i;
+
+ /* Does this hellcreek version support Qbv in hardware? */
+ if (!hellcreek->pdata->qbv_support)
+ return false;
+
+ /* cycle time can only be 32bit */
+ if (schedule->cycle_time > (u32)-1)
+ return false;
+
+ /* cycle time extension is not supported */
+ if (schedule->cycle_time_extension)
+ return false;
+
+ /* Only set command is supported */
+ for (i = 0; i < schedule->num_entries; ++i)
+ if (schedule->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ return true;
+}
+
+static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (type != TC_SETUP_QDISC_TAPRIO)
+ return -EOPNOTSUPP;
+
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
+
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
+
+ return hellcreek_port_del_schedule(ds, port);
+}
+
static const struct dsa_switch_ops hellcreek_ds_ops = {
.get_ethtool_stats = hellcreek_get_ethtool_stats,
.get_sset_count = hellcreek_get_sset_count,
@@ -1153,13 +1535,14 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_hwtstamp_get = hellcreek_port_hwtstamp_get,
.port_prechangeupper = hellcreek_port_prechangeupper,
.port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_setup_tc = hellcreek_port_setup_tc,
.port_stp_state_set = hellcreek_port_stp_state_set,
.port_txtstamp = hellcreek_port_txtstamp,
.port_vlan_add = hellcreek_vlan_add,
.port_vlan_del = hellcreek_vlan_del,
.port_vlan_filtering = hellcreek_vlan_filtering,
- .port_vlan_prepare = hellcreek_vlan_prepare,
.setup = hellcreek_setup,
+ .teardown = hellcreek_teardown,
};
static int hellcreek_probe(struct platform_device *pdev)
@@ -1208,6 +1591,9 @@ static int hellcreek_probe(struct platform_device *pdev)
port->hellcreek = hellcreek;
port->port = i;
+
+ INIT_DELAYED_WORK(&port->schedule_work,
+ hellcreek_check_schedule);
}
mutex_init(&hellcreek->reg_lock);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index e81781ebc31c..305e76dab34d 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -3,7 +3,7 @@
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
- * Copyright (C) 2019,2020 Linutronix GmbH
+ * Copyright (C) 2019-2021 Linutronix GmbH
* Author Kurt Kanzenbach <kurt@linutronix.de>
*/
@@ -21,6 +21,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <net/dsa.h>
+#include <net/pkt_sched.h>
/* Ports:
* - 0: CPU
@@ -246,6 +247,10 @@ struct hellcreek_port {
/* Per-port timestamping resources */
struct hellcreek_port_hwtstamp port_hwtstamp;
+
+ /* Per-port Qbv schedule information */
+ struct tc_taprio_qopt_offload *current_schedule;
+ struct delayed_work schedule_work;
};
struct hellcreek_fdb_entry {
@@ -283,4 +288,20 @@ struct hellcreek {
size_t fdb_entries;
};
+/* A Qbv schedule can only started up to 8 seconds in the future. If the delta
+ * between the base time and the current ptp time is larger than 8 seconds, then
+ * use periodic work to check for the schedule to be started. The delayed work
+ * cannot be armed directly to $base_time - 8 + X, because for large deltas the
+ * PTP frequency matters.
+ */
+#define HELLCREEK_SCHEDULE_PERIOD (2 * HZ)
+#define dw_to_hellcreek_port(dw) \
+ container_of(dw, struct hellcreek_port, schedule_work)
+
+/* Devlink resources */
+enum hellcreek_devlink_resource_id {
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+};
+
#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index aa1142d6a9f5..344374025426 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1232,14 +1232,19 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
+ int err;
+
+ err = lan9303_port_mdb_prepare(ds, port, mdb);
+ if (err)
+ return err;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
- lan9303_alr_add_port(chip, mdb->addr, port, false);
+ return lan9303_alr_add_port(chip, mdb->addr, port, false);
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1274,7 +1279,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.port_fdb_add = lan9303_port_fdb_add,
.port_fdb_del = lan9303_port_fdb_del,
.port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_prepare = lan9303_port_mdb_prepare,
.port_mdb_add = lan9303_port_mdb_add,
.port_mdb_del = lan9303_port_mdb_del,
};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..52e865a3912c 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -738,21 +728,16 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
- if (switchdev_trans_ph_prepare(trans)) {
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
-
- if (!bridge)
- return 0;
-
- if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
- return -EIO;
-
- return 0;
+ if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Dynamic toggling of vlan_filtering not supported");
+ return -EIO;
}
if (vlan_filtering) {
@@ -791,15 +776,8 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
- struct switchdev_trans trans;
-
- /* Skip the prepare phase, this shouldn't return an error
- * during setup.
- */
- trans.ph_prepare = false;
-
gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, &trans);
+ gswip_port_vlan_filtering(ds, i, false, NULL);
}
/* enable Switch */
@@ -822,9 +800,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -854,6 +831,9 @@ static int gswip_setup(struct dsa_switch *ds)
}
gswip_port_enable(ds, cpu_port, NULL);
+
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1152,61 +1132,64 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
}
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
- u16 vid;
- int i;
int pos = max_ports;
+ int i, idx = -1;
/* We only support VLAN filtering on bridges */
if (!dsa_is_cpu_port(ds, port) && !bridge)
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int idx = -1;
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vid) {
- idx = i;
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
break;
}
}
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
+ return -ENOSPC;
}
}
return 0;
}
-static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
+ int err;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1214,10 +1197,10 @@ static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
* this.
*/
if (dsa_is_cpu_port(ds, port))
- return;
+ return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
+ return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
+ untagged, pvid);
}
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
@@ -1226,8 +1209,6 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
- int err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1237,13 +1218,7 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
if (dsa_is_cpu_port(ds, port))
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
- if (err)
- return err;
- }
-
- return 0;
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
}
static void gswip_port_fast_age(struct dsa_switch *ds, int port)
@@ -1447,11 +1422,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1517,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
@@ -1623,7 +1597,6 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_prepare = gswip_port_vlan_prepare,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c973db101b72..b4b7de63ca79 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -784,54 +784,54 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
bool flag,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
}
-static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, new_pvid = 0;
+ u16 data, new_pvid = 0;
u8 fid, member, valid;
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- /* First time to setup the VLAN entry. */
- if (!valid) {
- /* Need to find a way to map VID to FID. */
- fid = 1;
- valid = 1;
- }
- member |= BIT(port);
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- new_pvid = vid;
- }
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vlan->vid;
if (new_pvid) {
+ u16 vid;
+
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
vid &= 0xfff;
vid |= new_pvid;
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
}
+
+ return 0;
}
static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
@@ -839,7 +839,7 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, pvid, new_pvid = 0;
+ u16 data, pvid, new_pvid = 0;
u8 fid, member, valid;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -847,24 +847,22 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- member &= ~BIT(port);
+ member &= ~BIT(port);
- /* Invalidate the entry if no more member. */
- if (!member) {
- fid = 0;
- valid = 0;
- }
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
- if (pvid == vid)
- new_pvid = 1;
+ if (pvid == vlan->vid)
+ new_pvid = 1;
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
- }
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
if (new_pvid != pvid)
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
@@ -1098,6 +1096,8 @@ static int ksz8795_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1116,11 +1116,9 @@ static const struct dsa_switch_ops ksz8795_switch_ops = {
.port_stp_state_set = ksz8795_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz8795_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz8795_port_vlan_add,
.port_vlan_del = ksz8795_port_vlan_del,
.port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz_port_mdb_add,
.port_mdb_del = ksz_port_mdb_del,
.port_mirror_add = ksz8795_port_mirror_add,
@@ -1187,6 +1185,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.port_cnt = 5, /* total cpu and user ports */
},
{
+ /*
+ * WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ */
.chip_id = 0x8794,
.dev_name = "KSZ8794",
.num_vlans = 4096,
@@ -1220,9 +1232,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
dev->num_vlans = chip->num_vlans;
dev->num_alus = chip->num_alus;
dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
+ dev->port_cnt = fls(chip->cpu_ports);
+ dev->cpu_port = fls(chip->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->port_cnt - 1;
dev->cpu_ports = chip->cpu_ports;
-
+ dev->host_mask = chip->cpu_ports;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+ chip->cpu_ports;
break;
}
}
@@ -1231,17 +1247,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
if (!dev->cpu_ports)
return -ENODEV;
- dev->port_mask = BIT(dev->port_cnt) - 1;
- dev->port_mask |= dev->host_mask;
-
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
dev->mib_cnt = ARRAY_SIZE(mib_names);
- dev->phy_port_cnt = dev->port_cnt - 1;
-
- dev->cpu_port = dev->port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 42e647b67abd..55e5d479acce 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -494,13 +494,10 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
bool flag,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -514,38 +511,41 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
- u16 vid;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return;
- }
-
- vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
- if (untagged)
- vlan_table[1] |= BIT(port);
- else
- vlan_table[1] &= ~BIT(port);
- vlan_table[1] &= ~(BIT(dev->cpu_port));
+ err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
+ return err;
+ }
- vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+ vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return;
- }
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
+ return err;
}
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
+
+ return 0;
}
static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
@@ -554,30 +554,27 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
- u16 vid;
u16 pvid;
ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
pvid = pvid & 0xFFF;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
- vlan_table[2] &= ~BIT(port);
+ vlan_table[2] &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- if (untagged)
- vlan_table[1] &= ~BIT(port);
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
}
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
@@ -784,14 +781,15 @@ exit:
return ret;
}
-static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
u32 data;
int index;
u32 mac_hi, mac_lo;
+ int err = 0;
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
@@ -806,7 +804,8 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- if (ksz9477_wait_alu_sta_ready(dev)) {
+ err = ksz9477_wait_alu_sta_ready(dev);
+ if (err) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
@@ -829,8 +828,10 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->num_statics) {
+ err = -ENOSPC;
goto exit;
+ }
/* add entry */
static_table[0] = ALU_V_STATIC_VALID;
@@ -852,6 +853,7 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
exit:
mutex_unlock(&dev->alu_mutex);
+ return err;
}
static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1381,6 +1383,8 @@ static int ksz9477_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1399,13 +1403,11 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_stp_state_set = ksz9477_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz9477_port_vlan_add,
.port_vlan_del = ksz9477_port_vlan_del,
.port_fdb_dump = ksz9477_port_fdb_dump,
.port_fdb_add = ksz9477_port_fdb_add,
.port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz9477_port_mdb_add,
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index cf743133b0b9..a7e5ac60baef 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -213,15 +213,6 @@ void ksz_port_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
-
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data)
{
@@ -253,16 +244,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
}
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* nothing to do */
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
-
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -284,7 +267,7 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
/* no available entry */
if (index == dev->num_statics && !empty)
- return;
+ return -ENOSPC;
/* add entry */
if (index == dev->num_statics) {
@@ -301,6 +284,8 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
alu.fid = mdb->vid;
}
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
@@ -400,7 +385,7 @@ int ksz_switch_register(struct ksz_device *dev,
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
- usleep_range(100, 1000);
+ msleep(100);
}
mutex_init(&dev->dev_mutex);
@@ -434,7 +419,7 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (port_num >= dev->port_cnt)
+ if (!(dev->port_mask & BIT(port_num)))
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 720f22275c84..f212775372ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -161,14 +161,10 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a67cac15a724..c17de2bcf2fe 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,6 +18,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <net/dsa.h>
#include "mt7530.h"
@@ -1375,13 +1376,9 @@ mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
}
static int
-mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1397,15 +1394,6 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-
static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
@@ -1493,31 +1481,30 @@ mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
}
-static void
+static int
mt7530_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mt7530_hw_vlan_entry new_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid;
mutex_lock(&priv->reg_mutex);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
- mt7530_hw_vlan_update(priv, vid, &new_entry,
- mt7530_hw_vlan_add);
- }
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
if (pvid) {
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
- G0_PORT_VID(vlan->vid_end));
- priv->ports[port].pvid = vlan->vid_end;
+ G0_PORT_VID(vlan->vid));
+ priv->ports[port].pvid = vlan->vid;
}
mutex_unlock(&priv->reg_mutex);
+
+ return 0;
}
static int
@@ -1526,22 +1513,20 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
{
struct mt7530_hw_vlan_entry target_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid, pvid;
+ u16 pvid;
mutex_lock(&priv->reg_mutex);
pvid = priv->ports[port].pvid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&target_entry, port, 0);
- mt7530_hw_vlan_update(priv, vid, &target_entry,
- mt7530_hw_vlan_del);
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
+ mt7530_hw_vlan_del);
- /* PVID is being restored to the default whenever the PVID port
- * is being removed from the VLAN.
- */
- if (pvid == vid)
- pvid = G0_PORT_VID_DEF;
- }
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (pvid == vlan->vid)
+ pvid = G0_PORT_VID_DEF;
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
priv->ports[port].pvid = pvid;
@@ -1639,6 +1624,109 @@ mtk_get_tag_protocol(struct dsa_switch *ds, int port,
}
}
+static inline u32
+mt7530_gpio_to_bit(unsigned int offset)
+{
+ /* Map GPIO offset to register bit
+ * [ 2: 0] port 0 LED 0..2 as GPIO 0..2
+ * [ 6: 4] port 1 LED 0..2 as GPIO 3..5
+ * [10: 8] port 2 LED 0..2 as GPIO 6..8
+ * [14:12] port 3 LED 0..2 as GPIO 9..11
+ * [18:16] port 4 LED 0..2 as GPIO 12..14
+ */
+ return BIT(offset + offset / 3);
+}
+
+static int
+mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
+}
+
+static void
+mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+}
+
+static int
+mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int
+mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
+ mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
+
+ return 0;
+}
+
+static int
+mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
+
+ return 0;
+}
+
+static int
+mt7530_setup_gpio(struct mt7530_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct gpio_chip *gc;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
+ mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
+ mt7530_write(priv, MT7530_LED_IO_MODE, 0);
+
+ gc->label = "mt7530";
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->get_direction = mt7530_gpio_get_direction;
+ gc->direction_input = mt7530_gpio_direction_input;
+ gc->direction_output = mt7530_gpio_direction_output;
+ gc->get = mt7530_gpio_get;
+ gc->set = mt7530_gpio_set;
+ gc->base = -1;
+ gc->ngpio = 15;
+ gc->can_sleep = true;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+
static int
mt7530_setup(struct dsa_switch *ds)
{
@@ -1656,7 +1744,6 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
if (priv->id == ID_MT7530) {
@@ -1781,6 +1868,12 @@ mt7530_setup(struct dsa_switch *ds)
}
}
+ if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
+ ret = mt7530_setup_gpio(priv);
+ if (ret)
+ return ret;
+ }
+
mt7530_setup_port5(ds, interface);
/* Flush the FDB table */
@@ -1895,7 +1988,6 @@ mt7531_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
/* Flush the FDB table */
@@ -2618,7 +2710,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
.port_vlan_filtering = mt7530_port_vlan_filtering,
- .port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 32d8969b3ace..64a9bb377e15 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -554,6 +554,26 @@ enum mt7531_clk_skew {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+/* Registers for LED GPIO control (MT7530 only)
+ * All registers follow this pattern:
+ * [ 2: 0] port 0
+ * [ 6: 4] port 1
+ * [10: 8] port 2
+ * [14:12] port 3
+ * [18:16] port 4
+ */
+
+/* LED enable, 0: Disable, 1: Enable (Default) */
+#define MT7530_LED_EN 0x7d00
+/* LED mode, 0: GPIO mode, 1: PHY mode (Default) */
+#define MT7530_LED_IO_MODE 0x7d04
+/* GPIO direction, 0: Input, 1: Output */
+#define MT7530_LED_GPIO_DIR 0x7d10
+/* GPIO output enable, 0: Disable, 1: Enable */
+#define MT7530_LED_GPIO_OE 0x7d14
+/* GPIO value, 0: Low, 1: High */
+#define MT7530_LED_GPIO_DATA 0x7d18
+
#define MT7530_CREV 0x7ffc
#define CHIP_NAME_SHIFT 16
#define MT7530_ID 0x7530
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 51185e4d7d15..05af632b0f59 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -9,23 +9,10 @@ config NET_DSA_MV88E6XXX
This driver adds support for most of the Marvell 88E6xxx models of
Ethernet switch chips, except 88E6060.
-config NET_DSA_MV88E6XXX_GLOBAL2
- bool "Switch Global 2 Registers support"
- default y
- depends on NET_DSA_MV88E6XXX
- help
- This registers set at internal SMI address 0x1C provides extended
- features like EEPROM interface, trunking, cross-chip setup, etc.
-
- It is required on most chips. If the chip you compile the support for
- doesn't have such registers set, say N here. In doubt, say Y.
-
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on NET_DSA_MV88E6XXX_GLOBAL2
depends on PTP_1588_CLOCK
- imply NETWORK_PHY_TIMESTAMPING
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 4b080b448ce7..c8eca2b6f959 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -5,9 +5,9 @@ mv88e6xxx-objs += devlink.o
mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_avb.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o
+mv88e6xxx-objs += global2.o
+mv88e6xxx-objs += global2_avb.o
+mv88e6xxx-objs += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eafe6bedc692..903d619e08ed 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1396,15 +1396,32 @@ static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
+ struct dsa_switch_tree *dst = chip->ds->dst;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Skip the local source device, which uses in-chip port VLAN */
- if (dev != chip->ds->index)
+ if (dev != chip->ds->index) {
pvlan = mv88e6xxx_port_vlan(chip, dev, port);
+ ds = dsa_switch_find(dst->index, dev);
+ dp = ds ? dsa_to_port(ds, port) : NULL;
+ if (dp && dp->lag_dev) {
+ /* As the PVT is used to limit flooding of
+ * FORWARD frames, which use the LAG ID as the
+ * source port, we must translate dev/port to
+ * the special "LAG device" in the PVT, using
+ * the LAG ID as the port number.
+ */
+ dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
+ port = dsa_lag_id(dst, dp->lag_dev);
+ }
+ }
+
return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
}
@@ -1529,72 +1546,70 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
- u16 vid_begin, u16 vid_end)
+ u16 vid)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ if (!vid)
+ return -EOPNOTSUPP;
+
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
- if (!vid_begin)
- return -EOPNOTSUPP;
-
- vlan.vid = vid_begin - 1;
+ vlan.vid = vid - 1;
vlan.valid = false;
- do {
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
- if (err)
- return err;
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
- if (!vlan.valid)
- break;
+ if (!vlan.valid)
+ return 0;
- if (vlan.vid > vid_end)
- break;
+ if (vlan.vid != vid)
+ return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
- if (!dsa_to_port(ds, i)->slave)
- continue;
+ if (!dsa_to_port(ds, i)->slave)
+ continue;
- if (vlan.member[i] ==
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
- continue;
+ if (vlan.member[i] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
- break; /* same bridge, check next VLAN */
+ if (dsa_to_port(ds, i)->bridge_dev ==
+ dsa_to_port(ds, port)->bridge_dev)
+ break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
- continue;
+ if (!dsa_to_port(ds, i)->bridge_dev)
+ continue;
- dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
- return -EOPNOTSUPP;
- }
- } while (vlan.vid < vid_end);
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
+ port, vlan.vid, i,
+ netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ return -EOPNOTSUPP;
+ }
return 0;
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP;
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -1617,13 +1632,9 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
* members, do not support it (yet) and fallback to software VLAN.
*/
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
- vlan->vid_end);
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid);
mv88e6xxx_reg_unlock(chip);
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
return err;
}
@@ -1676,7 +1687,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!entry.portvec)
entry.state = 0;
} else {
- entry.portvec |= BIT(port);
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
entry.state = state;
}
@@ -1923,9 +1938,6 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- if (!vid)
- return -EOPNOTSUPP;
-
vlan.vid = vid - 1;
vlan.valid = false;
@@ -1970,18 +1982,20 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
bool warn;
u8 member;
- u16 vid;
+ int err;
- if (!mv88e6xxx_max_vid(chip))
- return;
+ err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
@@ -1997,16 +2011,25 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn))
- dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
- vid, untagged ? 'u' : 't');
-
- if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
- dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
- vlan->vid_end);
+ err = mv88e6xxx_port_vlan_join(chip, port, vlan->vid, member, warn);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+ vlan->vid, untagged ? 'u' : 't');
+ goto out;
+ }
+ if (pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to set PVID %d\n",
+ port, vlan->vid);
+ goto out;
+ }
+ }
+out:
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
@@ -2055,8 +2078,8 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u16 pvid, vid;
int err = 0;
+ u16 pvid;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
@@ -2067,16 +2090,14 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (err)
goto unlock;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = mv88e6xxx_port_vlan_leave(chip, port, vid);
+ err = mv88e6xxx_port_vlan_leave(chip, port, vlan->vid);
+ if (err)
+ goto unlock;
+
+ if (vlan->vid == pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, 0);
if (err)
goto unlock;
-
- if (vid == pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, 0);
- if (err)
- goto unlock;
- }
}
unlock:
@@ -2415,12 +2436,20 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
bool flood;
+ int err;
/* Upstream ports flood frames with unknown unicast or multicast DA */
flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
- if (chip->info->ops->port_set_egress_floods)
- return chip->info->ops->port_set_egress_floods(chip, port,
- flood, flood);
+ if (chip->info->ops->port_set_ucast_flood) {
+ err = chip->info->ops->port_set_ucast_flood(chip, port, flood);
+ if (err)
+ return err;
+ }
+ if (chip->info->ops->port_set_mcast_flood) {
+ err = chip->info->ops->port_set_mcast_flood(chip, port, flood);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -2860,7 +2889,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- ds->configure_vlan_while_not_filtering = true;
mv88e6xxx_reg_lock(chip);
@@ -3221,7 +3249,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
@@ -3260,7 +3289,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3295,7 +3325,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
@@ -3339,7 +3370,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
@@ -3375,7 +3407,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
@@ -3419,7 +3452,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3469,7 +3503,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3547,7 +3582,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3591,7 +3627,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3642,7 +3679,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3686,7 +3724,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -3737,7 +3776,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_pause = mv88e6185_port_set_pause,
@@ -3782,7 +3822,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_pause_limit = mv88e6390_port_pause_limit,
@@ -3842,7 +3883,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_pause_limit = mv88e6390_port_pause_limit,
@@ -3901,7 +3943,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3961,7 +4004,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4019,7 +4063,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
@@ -4035,8 +4080,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6250_g1_reset,
- .vtu_getnext = mv88e6250_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
.phylink_validate = mv88e6065_phylink_validate,
@@ -4059,7 +4104,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -4118,7 +4164,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4161,7 +4208,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4204,7 +4252,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4257,7 +4306,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4298,7 +4348,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4344,7 +4395,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4406,7 +4458,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -4470,7 +4523,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
@@ -5213,10 +5267,6 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
/* Update the compatible info with the probed one */
chip->info = info;
- err = mv88e6xxx_g2_require(chip);
- if (err)
- return err;
-
dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
chip->info->prod_num, chip->info->name, rev);
@@ -5249,27 +5299,18 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
-static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
-
- return 0;
-}
-
-static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
- MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
- dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
- port);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
@@ -5359,22 +5400,346 @@ static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
mutex_unlock(&chip->reg_lock);
}
-static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
- bool unicast, bool multicast)
+static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ ops = chip->info->ops;
+
+ if ((flags.mask & BR_FLOOD) && !ops->port_set_ucast_flood)
+ return -EINVAL;
+
+ if ((flags.mask & BR_MCAST_FLOOD) && !ops->port_set_mcast_flood)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err = -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
- if (chip->info->ops->port_set_egress_floods)
- err = chip->info->ops->port_set_egress_floods(chip, port,
- unicast,
- multicast);
+
+ if (flags.mask & BR_FLOOD) {
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ err = chip->info->ops->port_set_ucast_flood(chip, port,
+ unicast);
+ if (err)
+ goto out;
+ }
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+
+ err = chip->info->ops->port_set_mcast_flood(chip, port,
+ multicast);
+ if (err)
+ goto out;
+ }
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port,
+ bool mrouter,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!chip->info->ops->port_set_mcast_flood)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ int id, members = 0;
+
+ if (!mv88e6xxx_has_lag(chip))
+ return false;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > 8)
+ return false;
+
+ /* We could potentially relax this to include active
+ * backup in the future.
+ */
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ /* Ideally we would also validate that the hash type matches
+ * the hardware. Alas, this is always set to unknown on team
+ * interfaces.
+ */
+ return true;
+}
+
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ u16 map = 0;
+ int id;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ /* Build the map of all ports to distribute flows destined for
+ * this LAG. This can be either a local user port, or a DSA
+ * port if the LAG port is on a remote chip.
+ */
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
+
+ return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
+}
+
+static const u8 mv88e6xxx_lag_mask_table[8][8] = {
+ /* Row number corresponds to the number of active members in a
+ * LAG. Each column states which of the eight hash buckets are
+ * mapped to the column:th port in the LAG.
+ *
+ * Example: In a LAG with three active ports, the second port
+ * ([2][1]) would be selected for traffic mapped to buckets
+ * 3,4,5 (0x38).
+ */
+ { 0xff, 0, 0, 0, 0, 0, 0, 0 },
+ { 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
+ { 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
+ { 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
+ { 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
+ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
+};
+
+static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
+ int num_tx, int nth)
+{
+ u8 active = 0;
+ int i;
+
+ num_tx = num_tx <= 8 ? num_tx : 8;
+ if (nth < num_tx)
+ active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
+
+ for (i = 0; i < 8; i++) {
+ if (BIT(i) & active)
+ mask[i] |= BIT(port);
+ }
+}
+
+static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ unsigned int id, num_tx;
+ struct net_device *lag;
+ struct dsa_port *dp;
+ int i, err, nth;
+ u16 mask[8];
+ u16 ivec;
+
+ /* Assume no port is a member of any LAG. */
+ ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
+
+ /* Disable all masks for ports that _are_ members of a LAG. */
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (!dp->lag_dev || dp->ds != ds)
+ continue;
+
+ ivec &= ~BIT(dp->index);
+ }
+
+ for (i = 0; i < 8; i++)
+ mask[i] = ivec;
+
+ /* Enable the correct subset of masks for all LAG ports that
+ * are in the Tx set.
+ */
+ dsa_lags_foreach_id(id, ds->dst) {
+ lag = dsa_lag_dev(ds->dst, id);
+ if (!lag)
+ continue;
+
+ num_tx = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (dp->lag_tx_enabled)
+ num_tx++;
+ }
+
+ if (!num_tx)
+ continue;
+
+ nth = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (!dp->lag_tx_enabled)
+ continue;
+
+ if (dp->ds == ds)
+ mv88e6xxx_lag_set_port_mask(mask, dp->index,
+ num_tx, nth);
+
+ nth++;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
+ struct net_device *lag)
+{
+ int err;
+
+ err = mv88e6xxx_lag_sync_masks(ds);
+
+ if (!err)
+ err = mv88e6xxx_lag_sync_map(ds, lag);
+
+ return err;
+}
+
+static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err, id;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ id = dsa_lag_id(ds->dst, lag);
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_set_trunk(chip, port, true, id);
+ if (err)
+ goto err_unlock;
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto err_clear_trunk;
+
+ mv88e6xxx_reg_unlock(chip);
+ return 0;
+
+err_clear_trunk:
+ mv88e6xxx_port_set_trunk(chip, port, false, 0);
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
return err;
}
+static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_trunk;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_trunk;
+}
+
+static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
+ int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_pvt;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_pvt;
+}
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
@@ -5404,17 +5769,17 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.set_ageing_time = mv88e6xxx_set_ageing_time,
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
- .port_egress_floods = mv88e6xxx_port_egress_floods,
+ .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
+ .port_bridge_flags = mv88e6xxx_port_bridge_flags,
+ .port_set_mrouter = mv88e6xxx_port_set_mrouter,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
@@ -5429,6 +5794,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
.devlink_info_get = mv88e6xxx_devlink_info_get,
+ .port_lag_change = mv88e6xxx_port_lag_change,
+ .port_lag_join = mv88e6xxx_port_lag_join,
+ .port_lag_leave = mv88e6xxx_port_lag_leave,
+ .crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
+ .crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
+ .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -5448,6 +5819,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
+ /* Some chips support up to 32, but that requires enabling the
+ * 5-bit port mode, which we do not support. 640k^W16 ought to
+ * be enough for anyone.
+ */
+ ds->num_lag_ids = mv88e6xxx_has_lag(chip) ? 16 : 0;
+
dev_set_drvdata(dev, ds);
return dsa_register_switch(ds);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 3543055bcb51..a57c8886f3ac 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -454,8 +454,10 @@ struct mv88e6xxx_ops {
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
- int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast);
+ int (*port_set_ucast_flood)(struct mv88e6xxx_chip *chip, int port,
+ bool unicast);
+ int (*port_set_mcast_flood)(struct mv88e6xxx_chip *chip, int port,
+ bool multicast);
int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
@@ -662,6 +664,11 @@ static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
return chip->info->pvt;
}
+static inline bool mv88e6xxx_has_lag(struct mv88e6xxx_chip *chip)
+{
+ return !!chip->info->global2_addr;
+}
+
static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
{
return chip->info->num_databases;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 80a182c5b98a..7c396964d0b2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -336,10 +336,6 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
-int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
-int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 66ddf67b8737..ae12c981923e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -336,35 +336,6 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_vtu_vid_read(chip, entry);
}
-int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 val;
- int err;
-
- err = mv88e6xxx_g1_vtu_getnext(chip, entry);
- if (err)
- return err;
-
- if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- /* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[5:4] are located in VTU Operation 9:8
- */
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
- if (err)
- return err;
-
- entry->fid = val & 0x000f;
- entry->fid |= (val & 0x0300) >> 4;
- }
-
- return 0;
-}
-
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -385,7 +356,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
/* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[7:4] are located in VTU Operation 11:8
+ * VTU DBNum[7:4] ([5:4] for 6250) are located in VTU Operation 11:8 (9:8)
*/
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
if (err)
@@ -393,6 +364,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
entry->fid = val & 0x000f;
entry->fid |= (val & 0x0f00) >> 4;
+ entry->fid &= mv88e6xxx_num_databases(chip) - 1;
}
return 0;
@@ -462,35 +434,6 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return 0;
}
-int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
- int err;
-
- err = mv88e6xxx_g1_vtu_op_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
- if (err)
- return err;
-
- if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- /* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[5:4] are located in VTU Operation 9:8
- */
- op |= entry->fid & 0x000f;
- op |= (entry->fid & 0x0030) << 4;
- }
-
- return mv88e6xxx_g1_vtu_op(chip, op);
-}
-
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -512,6 +455,10 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
+ *
+ * For the 6250/6220, the latter are really [5:4] and
+ * 9:8, but in those cases bits 7:6 of entry->fid are
+ * 0 since they have num_databases = 64.
*/
op |= entry->fid & 0x000f;
op |= (entry->fid & 0x00f0) << 4;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 75b227d0f73b..da8bac8813e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -126,8 +126,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
/* Offset 0x07: Trunk Mask Table register */
-static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hash, u16 mask)
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask)
{
u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
@@ -140,8 +140,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
/* Offset 0x08: Trunk Mapping Table register */
-static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
- u16 map)
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 1f42ee656816..4127f82275ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -101,6 +101,7 @@
#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
/* Offset 0x0C: Cross-chip Port VLAN Data Register */
#define MV88E6XXX_G2_PVT_DATA 0x0c
@@ -295,13 +296,6 @@
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ 2
-#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
-
-static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
-{
- return 0;
-}
-
int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
@@ -345,6 +339,10 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask);
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map);
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
@@ -365,179 +363,4 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
-#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
-
-static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->global2_addr) {
- dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static inline int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip,
- int reg, int bit, int val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip,
- int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip,
- int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
- u8 *addr)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
- int src_dev, int src_port, u16 data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
-{
-}
-
-static inline int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus)
-{
- return 0;
-}
-
-static inline void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus)
-{
-}
-
-static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
-static const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {};
-static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
-
-static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
-static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
-static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
-
-static const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {};
-
-static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
- bool external)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
- int target, int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
- u16 kind, u16 bin)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
- u16 *stats)
-{
- return -EOPNOTSUPP;
-}
-
-#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
-
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 77a5fd1798cd..4561f289ab76 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -789,8 +789,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
-static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
- int port, bool unicast)
+int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast)
{
int err;
u16 reg;
@@ -807,8 +807,8 @@ static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
-int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast)
+int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool unicast)
{
int err;
u16 reg;
@@ -817,16 +817,28 @@ int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK;
+ if (unicast)
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
+ else
+ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool multicast)
+{
+ int err;
+ u16 reg;
- if (unicast && multicast)
- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA;
- else if (unicast)
- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
- else if (multicast)
- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ if (multicast)
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
else
- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA;
+ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
@@ -851,6 +863,27 @@ int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
+
+ if (trunk)
+ val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
+ (id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
+ else
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+}
+
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
@@ -992,8 +1025,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
};
-static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
- int port, bool multicast)
+int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast)
{
int err;
u16 reg;
@@ -1010,18 +1043,6 @@ static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
-int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast)
-{
- int err;
-
- err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
- if (err)
- return err;
-
- return mv88e6185_port_set_default_forward(chip, port, multicast);
-}
-
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 500e1d4896ff..e6d0eaa6aa1d 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -154,11 +154,8 @@
#define MV88E6185_PORT_CTL0_USE_IP 0x0020
#define MV88E6185_PORT_CTL0_USE_TAG 0x0010
#define MV88E6185_PORT_CTL0_FORWARD_UNKNOWN 0x0004
-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK 0x000c
-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA 0x0000
-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA 0x0004
-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA 0x0008
-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA 0x000c
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC 0x0004
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC 0x0008
#define MV88E6XXX_PORT_CTL0_STATE_MASK 0x0003
#define MV88E6XXX_PORT_CTL0_STATE_DISABLED 0x0000
#define MV88E6XXX_PORT_CTL0_STATE_BLOCKING 0x0001
@@ -168,6 +165,9 @@
/* Offset 0x05: Port Control 1 */
#define MV88E6XXX_PORT_CTL1 0x05
#define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
+#define MV88E6XXX_PORT_CTL1_TRUNK_PORT 0x4000
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK 0x0f00
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT 8
#define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
/* Offset 0x06: Port Based VLAN Map */
@@ -340,10 +340,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
-int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast);
-int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast);
+int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast);
+int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast);
+int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool unicast);
+int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool multicast);
int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action);
@@ -351,6 +355,8 @@ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port);
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id);
int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
size_t size);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index c110e82a7973..932b6b6fe817 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -6,6 +6,7 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
select PCS_LYNX
@@ -19,6 +20,7 @@ config NET_DSA_MSCC_SEVILLE
depends on NET_VENDOR_MICROSEMI
depends on HAS_IOMEM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select PCS_LYNX
help
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 7dc230677b78..628afb47b579 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019-2021 NXP Semiconductors
*
* This is an umbrella module for all network switches that are
* register-compatible with Ocelot and that perform I/O to their host CPU
@@ -13,8 +13,10 @@
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
+#include <linux/dsa/8021q.h>
+#include <linux/dsa/ocelot.h>
#include <linux/platform_device.h>
-#include <linux/packing.h>
+#include <linux/ptp_classify.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/pci.h>
@@ -24,11 +26,614 @@
#include <net/dsa.h>
#include "felix.h"
+static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot *ocelot = &felix->ocelot;
+ struct dsa_switch *ds = felix->ds;
+ int key_length, upstream, err;
+
+ /* We don't need to install the rxvlan into the other ports' filtering
+ * tables, because we're just pushing the rxvlan when sending towards
+ * the CPU
+ */
+ if (!pvid)
+ return 0;
+
+ key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
+ upstream = dsa_upstream_port(ds, port);
+
+ outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
+ GFP_KERNEL);
+ if (!outer_tagging_rule)
+ return -ENOMEM;
+
+ outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ outer_tagging_rule->prio = 1;
+ outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.tc_offload = false;
+ outer_tagging_rule->block_id = VCAP_ES0;
+ outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ outer_tagging_rule->lookup = 0;
+ outer_tagging_rule->ingress_port.value = port;
+ outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
+ outer_tagging_rule->egress_port.value = upstream;
+ outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
+ outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
+ outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
+ outer_tagging_rule->action.tag_a_vid_sel = 1;
+ outer_tagging_rule->action.vid_a_val = vid;
+
+ err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
+ if (err)
+ kfree(outer_tagging_rule);
+
+ return err;
+}
+
+static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ struct ocelot *ocelot = &felix->ocelot;
+ struct dsa_switch *ds = felix->ds;
+ int upstream, err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (ocelot->ports[port]->is_dsa_8021q_cpu)
+ return 0;
+
+ untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!untagging_rule)
+ return -ENOMEM;
+
+ redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!redirect_rule) {
+ kfree(untagging_rule);
+ return -ENOMEM;
+ }
+
+ upstream = dsa_upstream_port(ds, port);
+
+ untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ untagging_rule->ingress_port_mask = BIT(upstream);
+ untagging_rule->vlan.vid.value = vid;
+ untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
+ untagging_rule->prio = 1;
+ untagging_rule->id.cookie = port;
+ untagging_rule->id.tc_offload = false;
+ untagging_rule->block_id = VCAP_IS1;
+ untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ untagging_rule->lookup = 0;
+ untagging_rule->action.vlan_pop_cnt_ena = true;
+ untagging_rule->action.vlan_pop_cnt = 1;
+ untagging_rule->action.pag_override_mask = 0xff;
+ untagging_rule->action.pag_val = port;
+
+ err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
+ if (err) {
+ kfree(untagging_rule);
+ kfree(redirect_rule);
+ return err;
+ }
+
+ redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ redirect_rule->ingress_port_mask = BIT(upstream);
+ redirect_rule->pag = port;
+ redirect_rule->prio = 1;
+ redirect_rule->id.cookie = port;
+ redirect_rule->id.tc_offload = false;
+ redirect_rule->block_id = VCAP_IS2;
+ redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ redirect_rule->lookup = 0;
+ redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ redirect_rule->action.port_mask = BIT(port);
+
+ err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
+ if (err) {
+ ocelot_vcap_filter_del(ocelot, untagging_rule);
+ kfree(redirect_rule);
+ return err;
+ }
+
+ return 0;
+}
+
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct ocelot *ocelot = ds->priv;
+
+ if (vid_is_dsa_8021q_rxvlan(vid))
+ return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
+ port, vid, pvid, untagged);
+
+ if (vid_is_dsa_8021q_txvlan(vid))
+ return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
+ port, vid, pvid, untagged);
+
+ return 0;
+}
+
+static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = &felix->ocelot;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ port, false);
+ /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
+ * installing outer tagging ES0 rules where they weren't needed.
+ * But in rxvlan_del, the API doesn't give us the "flags" anymore,
+ * so that forces us to be slightly sloppy here, and just assume that
+ * if we didn't find an outer_tagging_rule it means that there was
+ * none in the first place, i.e. rxvlan_del is called on a non-pvid
+ * port. This is most probably true though.
+ */
+ if (!outer_tagging_rule)
+ return 0;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ struct ocelot_vcap_block *block_vcap_is1;
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot *ocelot = &felix->ocelot;
+ int err;
+
+ if (ocelot->ports[port]->is_dsa_8021q_cpu)
+ return 0;
+
+ block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
+ port, false);
+ if (!untagging_rule)
+ return 0;
+
+ err = ocelot_vcap_filter_del(ocelot, untagging_rule);
+ if (err)
+ return err;
+
+ redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
+ port, false);
+ if (!redirect_rule)
+ return 0;
+
+ return ocelot_vcap_filter_del(ocelot, redirect_rule);
+}
+
+static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ if (vid_is_dsa_8021q_rxvlan(vid))
+ return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
+ port, vid);
+
+ if (vid_is_dsa_8021q_txvlan(vid))
+ return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
+ port, vid);
+
+ return 0;
+}
+
+static const struct dsa_8021q_ops felix_tag_8021q_ops = {
+ .vlan_add = felix_tag_8021q_vlan_add,
+ .vlan_del = felix_tag_8021q_vlan_del,
+};
+
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA master can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
+static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->ports[port]->is_dsa_8021q_cpu = true;
+ ocelot->npi = -1;
+
+ /* Overwrite PGID_CPU with the non-tagging port */
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
+
+ ocelot_apply_bridge_fwd_mask(ocelot);
+}
+
+static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
+{
+ ocelot->ports[port]->is_dsa_8021q_cpu = false;
+
+ /* Restore PGID_CPU */
+ ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
+ PGID_CPU);
+
+ ocelot_apply_bridge_fwd_mask(ocelot);
+}
+
+/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
+ * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
+ * tag_8021q CPU port.
+ */
+static int felix_setup_mmio_filtering(struct felix *felix)
+{
+ unsigned long user_ports = 0, cpu_ports = 0;
+ struct ocelot_vcap_filter *redirect_rule;
+ struct ocelot_vcap_filter *tagging_rule;
+ struct ocelot *ocelot = &felix->ocelot;
+ struct dsa_switch *ds = felix->ds;
+ int port, ret;
+
+ tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!tagging_rule)
+ return -ENOMEM;
+
+ redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!redirect_rule) {
+ kfree(tagging_rule);
+ return -ENOMEM;
+ }
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (dsa_is_user_port(ds, port))
+ user_ports |= BIT(port);
+ if (dsa_is_cpu_port(ds, port))
+ cpu_ports |= BIT(port);
+ }
+
+ tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
+ tagging_rule->ingress_port_mask = user_ports;
+ tagging_rule->prio = 1;
+ tagging_rule->id.cookie = ocelot->num_phys_ports;
+ tagging_rule->id.tc_offload = false;
+ tagging_rule->block_id = VCAP_IS1;
+ tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ tagging_rule->lookup = 0;
+ tagging_rule->action.pag_override_mask = 0xff;
+ tagging_rule->action.pag_val = ocelot->num_phys_ports;
+
+ ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
+ if (ret) {
+ kfree(tagging_rule);
+ kfree(redirect_rule);
+ return ret;
+ }
+
+ redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ redirect_rule->ingress_port_mask = user_ports;
+ redirect_rule->pag = ocelot->num_phys_ports;
+ redirect_rule->prio = 1;
+ redirect_rule->id.cookie = ocelot->num_phys_ports;
+ redirect_rule->id.tc_offload = false;
+ redirect_rule->block_id = VCAP_IS2;
+ redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ redirect_rule->lookup = 0;
+ redirect_rule->action.cpu_copy_ena = true;
+ if (felix->info->quirk_no_xtr_irq) {
+ /* Redirect to the tag_8021q CPU but also copy PTP packets to
+ * the CPU port module
+ */
+ redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ redirect_rule->action.port_mask = cpu_ports;
+ } else {
+ /* Trap PTP packets only to the CPU port module (which is
+ * redirected to the NPI port)
+ */
+ redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ redirect_rule->action.port_mask = 0;
+ }
+
+ ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
+ if (ret) {
+ ocelot_vcap_filter_del(ocelot, tagging_rule);
+ kfree(redirect_rule);
+ return ret;
+ }
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
+ return 0;
+}
+
+static int felix_teardown_mmio_filtering(struct felix *felix)
+{
+ struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
+ struct ocelot_vcap_block *block_vcap_is1;
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot *ocelot = &felix->ocelot;
+ int err;
+
+ block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
+ ocelot->num_phys_ports,
+ false);
+ if (!tagging_rule)
+ return -ENOENT;
+
+ err = ocelot_vcap_filter_del(ocelot, tagging_rule);
+ if (err)
+ return err;
+
+ redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
+ ocelot->num_phys_ports,
+ false);
+ if (!redirect_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, redirect_rule);
+}
+
+static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long cpu_flood;
+ int port, err;
+
+ felix_8021q_cpu_port_init(ocelot, cpu);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ /* This overwrites ocelot_init():
+ * Do not forward BPDU frames to the CPU port module,
+ * for 2 reasons:
+ * - When these packets are injected from the tag_8021q
+ * CPU port, we want them to go out, not loop back
+ * into the system.
+ * - STP traffic ingressing on a user port should go to
+ * the tag_8021q CPU port, not to the hardware CPU
+ * port module.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
+ ANA_PORT_CPU_FWD_BPDU_CFG, port);
+ }
+
+ /* In tag_8021q mode, the CPU port module is unused, except for PTP
+ * frames. So we want to disable flooding of any kind to the CPU port
+ * module, since packets going there will end in a black hole.
+ */
+ cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
+
+ felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
+ GFP_KERNEL);
+ if (!felix->dsa_8021q_ctx)
+ return -ENOMEM;
+
+ felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops;
+ felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD);
+ felix->dsa_8021q_ctx->ds = ds;
+
+ err = dsa_8021q_setup(felix->dsa_8021q_ctx, true);
+ if (err)
+ goto out_free_dsa_8021_ctx;
+
+ err = felix_setup_mmio_filtering(felix);
+ if (err)
+ goto out_teardown_dsa_8021q;
+
+ return 0;
+
+out_teardown_dsa_8021q:
+ dsa_8021q_setup(felix->dsa_8021q_ctx, false);
+out_free_dsa_8021_ctx:
+ kfree(felix->dsa_8021q_ctx);
+ return err;
+}
+
+static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int err, port;
+
+ err = felix_teardown_mmio_filtering(felix);
+ if (err)
+ dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
+ err);
+
+ err = dsa_8021q_setup(felix->dsa_8021q_ctx, false);
+ if (err)
+ dev_err(ds->dev, "dsa_8021q_setup returned %d", err);
+
+ kfree(felix->dsa_8021q_ctx);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ /* Restore the logic from ocelot_init:
+ * do not forward BPDU frames to the front ports.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ port);
+ }
+
+ felix_8021q_cpu_port_deinit(ocelot, cpu);
+}
+
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
+ * running Linux, and this forms a DSA setup together with the enetc or fman
+ * DSA master.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->npi_xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->npi_inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
+
+static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
+{
+ /* Restore hardware defaults */
+ int unused_port = ocelot->num_phys_ports + 2;
+
+ ocelot->npi = -1;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
+ QSYS_EXT_CPU_CFG);
+
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+
+ /* Enable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+}
+
+static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cpu_flood;
+
+ felix_npi_port_init(ocelot, cpu);
+
+ /* Include the CPU port module (and indirectly, the NPI port)
+ * in the forwarding mask for unknown unicast - the hardware
+ * default value for ANA_FLOODING_FLD_UNICAST excludes
+ * BIT(ocelot->num_phys_ports), and so does ocelot_init,
+ * since Ocelot relies on whitelisting MAC addresses towards
+ * PGID_CPU.
+ * We do this because DSA does not yet perform RX filtering,
+ * and the NPI port does not perform source address learning,
+ * so traffic sent to Linux is effectively unknown from the
+ * switch's perspective.
+ */
+ cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+ ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
+ ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+
+ return 0;
+}
+
+static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_npi_port_deinit(ocelot, cpu);
+}
+
+static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
+ enum dsa_tag_protocol proto)
+{
+ int err;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ err = felix_setup_tag_npi(ds, cpu);
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ err = felix_setup_tag_8021q(ds, cpu);
+ break;
+ default:
+ err = -EPROTONOSUPPORT;
+ }
+
+ return err;
+}
+
+static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
+ enum dsa_tag_protocol proto)
+{
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ felix_teardown_tag_npi(ds, cpu);
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ felix_teardown_tag_8021q(ds, cpu);
+ break;
+ default:
+ break;
+ }
+}
+
+/* This always leaves the switch in a consistent state, because although the
+ * tag_8021q setup can fail, the NPI setup can't. So either the change is made,
+ * or the restoration is guaranteed to work.
+ */
+static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+ enum dsa_tag_protocol proto)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ enum dsa_tag_protocol old_proto = felix->tag_proto;
+ int err;
+
+ if (proto != DSA_TAG_PROTO_SEVILLE &&
+ proto != DSA_TAG_PROTO_OCELOT &&
+ proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ return -EPROTONOSUPPORT;
+
+ felix_del_tag_protocol(ds, cpu, old_proto);
+
+ err = felix_set_tag_protocol(ds, cpu, proto);
+ if (err) {
+ felix_set_tag_protocol(ds, cpu, old_proto);
+ return err;
+ }
+
+ felix->tag_proto = proto;
+
+ return 0;
+}
+
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_OCELOT;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto;
}
static int felix_set_ageing_time(struct dsa_switch *ds,
@@ -65,19 +670,12 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
return ocelot_fdb_del(ocelot, port, addr, vid);
}
-/* This callback needs to be present */
-static int felix_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int felix_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
@@ -96,6 +694,26 @@ static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
return ocelot_bridge_stp_state_set(ocelot, port, state);
}
+static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags val,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_pre_bridge_flags(ocelot, port, val);
+}
+
+static int felix_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags val,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_bridge_flags(ocelot, port, val);
+
+ return 0;
+}
+
static int felix_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
@@ -112,12 +730,40 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
ocelot_port_bridge_leave(ocelot, port, br);
}
+static int felix_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_lag_join(ocelot, port, bond, info);
+}
+
+static int felix_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_lag_leave(ocelot, port, bond);
+
+ return 0;
+}
+
+static int felix_lag_change(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
+
+ return 0;
+}
+
static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid, flags = vlan->flags;
- int err;
+ u16 flags = vlan->flags;
/* Ocelot switches copy frames as-is to the CPU, so the flags:
* egress-untagged or not, pvid or not, make no difference. This
@@ -130,61 +776,42 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
if (port == ocelot->npi)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_prepare(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err)
- return err;
- }
-
- return 0;
+ return ocelot_vlan_prepare(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_vlan_filtering(ocelot, port, enabled, trans);
+ return ocelot_port_vlan_filtering(ocelot, port, enabled);
}
-static void felix_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
u16 flags = vlan->flags;
- u16 vid;
int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_add(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err) {
- dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
- vid, port, err);
- return;
- }
- }
+ err = felix_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
+ return ocelot_vlan_add(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int felix_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid;
- int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_del(ocelot, port, vid);
- if (err) {
- dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
- vid, port, err);
- return err;
- }
- }
- return 0;
+ return ocelot_vlan_del(ocelot, port, vlan->vid);
}
static int felix_port_enable(struct dsa_switch *ds, int port,
@@ -233,9 +860,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
+ DEV_MAC_ENA_CFG);
- ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+
+ err = ocelot_port_flush(ocelot, port);
+ if (err)
+ dev_err(ocelot->dev, "failed to flush port %d: %d\n",
+ port, err);
+
+ /* Put the port in reset. */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -328,7 +970,7 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
ANA_PORT_QOS_CFG,
port);
- for (i = 0; i < FELIX_NUM_TC * 2; i++) {
+ for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
ocelot_rmw_ix(ocelot,
(ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
@@ -451,12 +1093,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
- ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
- ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
- ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->devlink = felix->ds->devlink;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
@@ -501,7 +1143,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
struct regmap *target;
- u8 *template;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -527,22 +1168,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return PTR_ERR(target);
}
- template = devm_kzalloc(ocelot->dev, OCELOT_TOTAL_TAG_LEN,
- GFP_KERNEL);
- if (!template) {
- dev_err(ocelot->dev,
- "Failed to allocate memory for DSA tag\n");
- kfree(port_phy_modes);
- return -ENOMEM;
- }
-
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
- ocelot_port->xmit_template = template;
ocelot->ports[port] = ocelot_port;
-
- felix->info->xmit_template_populate(ocelot, port);
}
kfree(port_phy_modes);
@@ -556,28 +1185,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
-/* The CPU port module is connected to the Node Processor Interface (NPI). This
- * is the mode through which frames can be injected from and extracted to an
- * external CPU, over Ethernet.
- */
-static void felix_npi_port_init(struct ocelot *ocelot, int port)
-{
- ocelot->npi = port;
-
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
- QSYS_EXT_CPU_CFG);
-
- /* NPI port Injection/Extraction configuration */
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
- ocelot->xtr_prefix);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
- ocelot->inj_prefix);
-
- /* Disable transmission of pause frames */
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
-}
-
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -595,7 +1202,7 @@ static int felix_setup(struct dsa_switch *ds)
err = ocelot_init(ocelot);
if (err)
- return err;
+ goto out_mdiobus_free;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
@@ -607,10 +1214,10 @@ static int felix_setup(struct dsa_switch *ds)
}
for (port = 0; port < ds->num_ports; port++) {
- ocelot_init_port(ocelot, port);
+ if (dsa_is_unused_port(ds, port))
+ continue;
- if (dsa_is_cpu_port(ds, port))
- felix_npi_port_init(ocelot, port);
+ ocelot_init_port(ocelot, port);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
@@ -618,19 +1225,41 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, port);
}
- /* Include the CPU port module in the forwarding mask for unknown
- * unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
- * excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
- * Ocelot relies on whitelisting MAC addresses towards PGID_CPU.
- */
- ocelot_write_rix(ocelot,
- ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
- ANA_PGID_PGID, PGID_UC);
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_deinit_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_cpu_port(ds, port))
+ continue;
+
+ /* The initial tag protocol is NPI which always returns 0, so
+ * there's no real point in checking for errors.
+ */
+ felix_set_tag_protocol(ds, port, felix->tag_proto);
+ }
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
+ ds->assisted_learning_on_cpu_port = true;
return 0;
+
+out_deinit_ports:
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ ocelot_deinit_port(ocelot, port);
+ }
+
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
+
+out_mdiobus_free:
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
+
+ return err;
}
static void felix_teardown(struct dsa_switch *ds)
@@ -639,14 +1268,26 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
int port;
- if (felix->info->mdio_bus_free)
- felix->info->mdio_bus_free(ocelot);
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_cpu_port(ds, port))
+ continue;
- for (port = 0; port < ocelot->num_phys_ports; port++)
- ocelot_deinit_port(ocelot, port);
+ felix_del_tag_protocol(ds, port, felix->tag_proto);
+ }
+
+ ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
- /* stop workqueue thread */
ocelot_deinit(ocelot);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ ocelot_deinit_port(ocelot, port);
+ }
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
@@ -665,20 +1306,79 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
return ocelot_hwstamp_set(ocelot, port, ifr);
}
+static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int err, grp = 0;
+
+ if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ return false;
+
+ if (!felix->info->quirk_no_xtr_irq)
+ return false;
+
+ if (ptp_type == PTP_CLASS_NONE)
+ return false;
+
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ struct sk_buff *skb;
+ unsigned int type;
+
+ err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
+ if (err)
+ goto out;
+
+ /* We trap to the CPU port module all PTP frames, but
+ * felix_rxtstamp() only gets called for event frames.
+ * So we need to avoid sending duplicate general
+ * message frames by running a second BPF classifier
+ * here and dropping those.
+ */
+ __skb_push(skb, ETH_HLEN);
+
+ type = ptp_classify_raw(skb);
+
+ __skb_pull(skb, ETH_HLEN);
+
+ if (type == PTP_CLASS_NONE) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ netif_rx(skb);
+ }
+
+out:
+ if (err < 0)
+ ocelot_drain_cpu_queue(ocelot, 0);
+
+ return true;
+}
+
static bool felix_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
+ u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
struct skb_shared_hwtstamps *shhwtstamps;
struct ocelot *ocelot = ds->priv;
- u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
u32 tstamp_lo, tstamp_hi;
struct timespec64 ts;
u64 tstamp, val;
+ /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
+ * for RX timestamping. Then free it, and poll for its copy through
+ * MMIO in the CPU port module, and inject that into the stack from
+ * ocelot_xtr_poll().
+ */
+ if (felix_check_xtr_pkt(ocelot, type)) {
+ kfree_skb(skb);
+ return true;
+ }
+
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- packing(extraction, &val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+ ocelot_xfh_get_rew_val(extraction, &val);
tstamp_lo = (u32)val;
tstamp_hi = tstamp >> 32;
@@ -780,46 +1480,200 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
+static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int felix_sb_occ_snapshot(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int felix_sb_occ_max_clear(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_cur, p_max);
+}
+
+static int felix_mrp_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
+
+static int felix_mrp_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
+
+static int
+felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add_ring_role(ocelot, port, mrp);
+}
+
+static int
+felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_del_ring_role(ocelot, port, mrp);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
- .get_tag_protocol = felix_get_tag_protocol,
- .setup = felix_setup,
- .teardown = felix_teardown,
- .set_ageing_time = felix_set_ageing_time,
- .get_strings = felix_get_strings,
- .get_ethtool_stats = felix_get_ethtool_stats,
- .get_sset_count = felix_get_sset_count,
- .get_ts_info = felix_get_ts_info,
- .phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
- .port_enable = felix_port_enable,
- .port_disable = felix_port_disable,
- .port_fdb_dump = felix_fdb_dump,
- .port_fdb_add = felix_fdb_add,
- .port_fdb_del = felix_fdb_del,
- .port_mdb_prepare = felix_mdb_prepare,
- .port_mdb_add = felix_mdb_add,
- .port_mdb_del = felix_mdb_del,
- .port_bridge_join = felix_bridge_join,
- .port_bridge_leave = felix_bridge_leave,
- .port_stp_state_set = felix_bridge_stp_state_set,
- .port_vlan_prepare = felix_vlan_prepare,
- .port_vlan_filtering = felix_vlan_filtering,
- .port_vlan_add = felix_vlan_add,
- .port_vlan_del = felix_vlan_del,
- .port_hwtstamp_get = felix_hwtstamp_get,
- .port_hwtstamp_set = felix_hwtstamp_set,
- .port_rxtstamp = felix_rxtstamp,
- .port_txtstamp = felix_txtstamp,
- .port_change_mtu = felix_change_mtu,
- .port_max_mtu = felix_get_max_mtu,
- .port_policer_add = felix_port_policer_add,
- .port_policer_del = felix_port_policer_del,
- .cls_flower_add = felix_cls_flower_add,
- .cls_flower_del = felix_cls_flower_del,
- .cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .get_tag_protocol = felix_get_tag_protocol,
+ .change_tag_protocol = felix_change_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_mdb_add = felix_mdb_add,
+ .port_mdb_del = felix_mdb_del,
+ .port_pre_bridge_flags = felix_pre_bridge_flags,
+ .port_bridge_flags = felix_bridge_flags,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_lag_join = felix_lag_join,
+ .port_lag_leave = felix_lag_leave,
+ .port_lag_change = felix_lag_change,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
+ .devlink_sb_pool_get = felix_sb_pool_get,
+ .devlink_sb_pool_set = felix_sb_pool_set,
+ .devlink_sb_port_pool_get = felix_sb_port_pool_get,
+ .devlink_sb_port_pool_set = felix_sb_port_pool_set,
+ .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
+ .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
+ .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
+ .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
+ .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
+ .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
+ .port_mrp_add = felix_mrp_add,
+ .port_mrp_del = felix_mrp_del,
+ .port_mrp_add_ring_role = felix_mrp_add_ring_role,
+ .port_mrp_del_ring_role = felix_mrp_del_ring_role,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 4c717324ac2f..4d96cad815d5 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,7 +5,6 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
-#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
@@ -15,7 +14,6 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
- int shared_queue_sz;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
@@ -25,6 +23,19 @@ struct felix_info {
int switch_pci_bar;
int imdio_pci_bar;
const struct ptp_clock_info *ptp_caps;
+
+ /* Some Ocelot switches are integrated into the SoC without the
+ * extraction IRQ line connected to the ARM GIC. By enabling this
+ * workaround, the few packets that are delivered to the CPU port
+ * module (currently only PTP) are copied not only to the hardware CPU
+ * port module, but also to the 802.1Q Ethernet CPU port, and polling
+ * the extraction registers is triggered once the DSA tagger sees a PTP
+ * frame. The Ethernet frame is only used as a notification: it is
+ * dropped, and the original frame is extracted over MMIO and annotated
+ * with the RX timestamp.
+ */
+ bool quirk_no_xtr_irq;
+
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
void (*phylink_validate)(struct ocelot *ocelot, int port,
@@ -36,7 +47,6 @@ struct felix_info {
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
- void (*xmit_template_populate)(struct ocelot *ocelot, int port);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -50,6 +60,8 @@ struct felix {
struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
+ struct dsa_8021q_context *dsa_8021q_ctx;
+ enum dsa_tag_protocol tag_proto;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2e5bbdca5ea4..5ff623ee76a6 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -8,7 +8,7 @@
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
-#include <linux/packing.h>
+#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
@@ -1006,9 +1006,27 @@ static u16 vsc9959_wm_enc(u16 value)
return value;
}
+static u16 vsc9959_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(8, 0));
+
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1321,31 +1339,6 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
-static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u8 *template = ocelot_port->xmit_template;
- u64 bypass, dest, src;
- __be32 *prefix;
- u8 *injection;
-
- /* Set the source port as the CPU port module and not the
- * NPI port
- */
- src = ocelot->num_phys_ports;
- dest = BIT(port);
- bypass = true;
-
- injection = template + OCELOT_SHORT_PREFIX_LEN;
- prefix = (__be32 *)template;
-
- packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
- packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
-
- *prefix = cpu_to_be32(0x8880000a);
-}
-
static const struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1356,12 +1349,12 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
- .shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
- .num_tx_queues = FELIX_NUM_TC,
+ .num_tx_queues = OCELOT_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
+ .quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
@@ -1369,7 +1362,6 @@ static const struct felix_info felix_info_vsc9959 = {
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
- .xmit_template_populate = vsc9959_xmit_template_populate,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -1408,17 +1400,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
goto err_pci_enable;
}
- /* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
- }
-
felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
if (!felix) {
err = -ENOMEM;
@@ -1429,7 +1410,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = FELIX_NUM_TC;
+ ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
@@ -1461,6 +1442,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
err = dsa_register_switch(ds);
if (err) {
@@ -1474,9 +1456,8 @@ err_register_ds:
kfree(ds);
err_alloc_ds:
err_alloc_irq:
-err_alloc_felix:
kfree(felix);
-err_dma:
+err_alloc_felix:
pci_disable_device(pdev);
err_pci_enable:
return err;
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ebbaf6817ec8..84f93a874d50 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -8,7 +8,7 @@
#include <soc/mscc/ocelot.h>
#include <linux/of_platform.h>
#include <linux/pcs-lynx.h>
-#include <linux/packing.h>
+#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
#include "felix.h"
@@ -1057,9 +1057,27 @@ static u16 vsc9953_wm_enc(u16 value)
return value;
}
+static u16 vsc9953_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(9, 0));
+
+ if (wm & BIT(9))
+ return (wm & GENMASK(8, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(25, 13)) >> 13;
+ *maxuse = val & GENMASK(12, 0);
+}
+
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
+ .wm_dec = vsc9953_wm_dec,
+ .wm_stat = vsc9953_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1147,31 +1165,6 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdiobus_unregister(felix->imdio);
}
-static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u8 *template = ocelot_port->xmit_template;
- u64 bypass, dest, src;
- __be32 *prefix;
- u8 *injection;
-
- /* Set the source port as the CPU port module and not the
- * NPI port
- */
- src = ocelot->num_phys_ports;
- dest = BIT(port);
- bypass = true;
-
- injection = template + OCELOT_SHORT_PREFIX_LEN;
- prefix = (__be32 *)template;
-
- packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
- packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
-
- *prefix = cpu_to_be32(0x88800005);
-}
-
static const struct felix_info seville_info_vsc9953 = {
.target_io_res = vsc9953_target_io_res,
.port_io_res = vsc9953_port_io_res,
@@ -1181,14 +1174,13 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
- .shared_queue_sz = 256 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
+ .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
- .xmit_template_populate = vsc9953_xmit_template_populate,
};
static int seville_probe(struct platform_device *pdev)
@@ -1228,6 +1220,7 @@ static int seville_probe(struct platform_device *pdev)
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
err = dsa_register_switch(ds);
if (err) {
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 4d49c5f2b790..ca2ad77b71f1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -101,6 +101,9 @@
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
AR9331_SW_PORT_STATUS_SPEED_M)
+/* MIB registers */
+#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
+
/* Phy bypass mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
@@ -154,6 +157,66 @@
#define AR9331_SW_MDIO_POLL_SLEEP_US 1
#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+/* The interval should be small enough to avoid overflow of 32bit MIBs */
+/*
+ * FIXME: until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct ar9331_sw_stats_raw {
+ u32 rxbroad; /* 0x00 */
+ u32 rxpause; /* 0x04 */
+ u32 rxmulti; /* 0x08 */
+ u32 rxfcserr; /* 0x0c */
+ u32 rxalignerr; /* 0x10 */
+ u32 rxrunt; /* 0x14 */
+ u32 rxfragment; /* 0x18 */
+ u32 rx64byte; /* 0x1c */
+ u32 rx128byte; /* 0x20 */
+ u32 rx256byte; /* 0x24 */
+ u32 rx512byte; /* 0x28 */
+ u32 rx1024byte; /* 0x2c */
+ u32 rx1518byte; /* 0x30 */
+ u32 rxmaxbyte; /* 0x34 */
+ u32 rxtoolong; /* 0x38 */
+ u32 rxgoodbyte; /* 0x3c */
+ u32 rxgoodbyte_hi;
+ u32 rxbadbyte; /* 0x44 */
+ u32 rxbadbyte_hi;
+ u32 rxoverflow; /* 0x4c */
+ u32 filtered; /* 0x50 */
+ u32 txbroad; /* 0x54 */
+ u32 txpause; /* 0x58 */
+ u32 txmulti; /* 0x5c */
+ u32 txunderrun; /* 0x60 */
+ u32 tx64byte; /* 0x64 */
+ u32 tx128byte; /* 0x68 */
+ u32 tx256byte; /* 0x6c */
+ u32 tx512byte; /* 0x70 */
+ u32 tx1024byte; /* 0x74 */
+ u32 tx1518byte; /* 0x78 */
+ u32 txmaxbyte; /* 0x7c */
+ u32 txoversize; /* 0x80 */
+ u32 txbyte; /* 0x84 */
+ u32 txbyte_hi;
+ u32 txcollision; /* 0x8c */
+ u32 txabortcol; /* 0x90 */
+ u32 txmulticol; /* 0x94 */
+ u32 txsinglecol; /* 0x98 */
+ u32 txexcdefer; /* 0x9c */
+ u32 txdefer; /* 0xa0 */
+ u32 txlatecol; /* 0xa4 */
+};
+
+struct ar9331_sw_port {
+ int idx;
+ struct delayed_work mib_read;
+ struct rtnl_link_stats64 stats;
+ struct spinlock stats_lock;
+};
+
struct ar9331_sw_priv {
struct device *dev;
struct dsa_switch ds;
@@ -165,8 +228,17 @@ struct ar9331_sw_priv {
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
struct reset_control *sw_reset;
+ struct ar9331_sw_port port[AR9331_SW_PORTS];
};
+static struct ar9331_sw_priv *ar9331_sw_port_to_priv(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_port *p = port - port->idx;
+
+ return (struct ar9331_sw_priv *)((void *)p -
+ offsetof(struct ar9331_sw_priv, port));
+}
+
/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
* If some kind of optimization is used, the request should be repeated.
*/
@@ -330,6 +402,8 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
if (ret)
goto error;
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
error:
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
@@ -424,6 +498,7 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
int ret;
@@ -431,6 +506,8 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
AR9331_SW_PORT_STATUS_MAC_MASK, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+
+ cancel_delayed_work_sync(&p->mib_read);
}
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -441,10 +518,13 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
+ schedule_delayed_work(&p->mib_read, 0);
+
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
case SPEED_1000:
@@ -477,6 +557,73 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
+static void ar9331_read_stats(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct rtnl_link_stats64 *stats = &port->stats;
+ struct ar9331_sw_stats_raw raw;
+ int ret;
+
+ /* Do the slowest part first, to avoid needless locking for long time */
+ ret = regmap_bulk_read(priv->regmap, AR9331_MIB_COUNTER(port->idx),
+ &raw, sizeof(raw) / sizeof(u32));
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return;
+ }
+ /* All MIB counters are cleared automatically on read */
+
+ spin_lock(&port->stats_lock);
+
+ stats->rx_bytes += raw.rxgoodbyte;
+ stats->tx_bytes += raw.txbyte;
+
+ stats->rx_packets += raw.rx64byte + raw.rx128byte + raw.rx256byte +
+ raw.rx512byte + raw.rx1024byte + raw.rx1518byte + raw.rxmaxbyte;
+ stats->tx_packets += raw.tx64byte + raw.tx128byte + raw.tx256byte +
+ raw.tx512byte + raw.tx1024byte + raw.tx1518byte + raw.txmaxbyte;
+
+ stats->rx_length_errors += raw.rxrunt + raw.rxfragment + raw.rxtoolong;
+ stats->rx_crc_errors += raw.rxfcserr;
+ stats->rx_frame_errors += raw.rxalignerr;
+ stats->rx_missed_errors += raw.rxoverflow;
+ stats->rx_dropped += raw.filtered;
+ stats->rx_errors += raw.rxfcserr + raw.rxalignerr + raw.rxrunt +
+ raw.rxfragment + raw.rxoverflow + raw.rxtoolong;
+
+ stats->tx_window_errors += raw.txlatecol;
+ stats->tx_fifo_errors += raw.txunderrun;
+ stats->tx_aborted_errors += raw.txabortcol;
+ stats->tx_errors += raw.txoversize + raw.txabortcol + raw.txunderrun +
+ raw.txlatecol;
+
+ stats->multicast += raw.rxmulti;
+ stats->collisions += raw.txcollision;
+
+ spin_unlock(&port->stats_lock);
+}
+
+static void ar9331_do_stats_poll(struct work_struct *work)
+{
+ struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
+ mib_read.work);
+
+ ar9331_read_stats(port);
+
+ schedule_delayed_work(&port->mib_read, STATS_INTERVAL_JIFFIES);
+}
+
+static void ar9331_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
@@ -485,6 +632,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+ .get_stats64 = ar9331_get_stats64,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -796,7 +944,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv;
struct dsa_switch *ds;
- int ret;
+ int ret, i;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -831,6 +979,14 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->ops = &priv->ops;
dev_set_drvdata(&mdiodev->dev, priv);
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ port->idx = i;
+ spin_lock_init(&port->stats_lock);
+ INIT_DELAYED_WORK(&port->mib_read, ar9331_do_stats_poll);
+ }
+
ret = dsa_register_switch(ds);
if (ret)
goto err_remove_irq;
@@ -846,6 +1002,13 @@ err_remove_irq:
static void ar9331_sw_remove(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ cancel_delayed_work_sync(&port->mib_read);
+ }
irq_domain_remove(priv->irqdomain);
mdiobus_unregister(priv->mbus);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 5bdac669a339..cdaf9f85a2cb 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1295,13 +1295,10 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
static int
qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
@@ -1316,38 +1313,32 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
static int
-qca8k_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- return 0;
-}
-
-static void
qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_add(priv, port, vid, untagged);
-
- if (ret)
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
if (pvid) {
int shift = 16 * (port % 2);
qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift,
- vlan->vid_end << shift);
+ 0xfff << shift, vlan->vid << shift);
qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid_end) |
- QCA8K_PORT_VLAN_SVID(vlan->vid_end));
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
}
+
+ return 0;
}
static int
@@ -1356,11 +1347,8 @@ qca8k_port_vlan_del(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_del(priv, port, vid);
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
@@ -1393,7 +1381,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_prepare = qca8k_port_vlan_prepare,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_validate = qca8k_phylink_validate,
@@ -1446,7 +1433,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->configure_vlan_while_not_filtering = true;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 6b6a3dec0984..fcf465f7f922 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -130,13 +130,11 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
int rtl8366_init_vlan(struct realtek_smi *smi);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 83d481ef9273..75897a369096 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -341,19 +341,15 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct realtek_smi *smi = ds->priv;
struct rtl8366_vlan_4k vlan4k;
int ret;
/* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (switchdev_trans_ph_prepare(trans)) {
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- return 0;
- }
+ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
dev_info(smi->dev, "%s filtering on port %d\n",
vlan_filtering ? "enable" : "disable",
@@ -379,76 +375,61 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct realtek_smi *smi = ds->priv;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return -EINVAL;
-
- dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
- vlan->vid_begin, vlan->vid_end);
-
- /* Enable VLAN in the hardware
- * FIXME: what's with this 4k business?
- * Just rtl8366_enable_vlan() seems inconclusive.
- */
- return rtl8366_enable_vlan4k(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
-
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
struct realtek_smi *smi = ds->priv;
u32 member = 0;
u32 untag = 0;
- u16 vid;
int ret;
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return;
+ if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
+ return -EINVAL;
+ }
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(smi, true);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
+ return ret;
+ }
dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid_begin,
- port,
- untagged ? "untagged" : "tagged",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
dev_err(smi->dev, "port is DSA or CPU port\n");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- member |= BIT(port);
-
- if (untagged)
- untag |= BIT(port);
+ member |= BIT(port);
- ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
- if (ret)
- dev_err(smi->dev,
- "failed to set up VLAN %04x",
- vid);
+ if (untagged)
+ untag |= BIT(port);
- if (!pvid)
- continue;
+ ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ if (ret) {
+ dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ return ret;
+ }
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret)
- dev_err(smi->dev,
- "failed to set PVID on port %d to VLAN %04x",
- port, vid);
+ if (!pvid)
+ return 0;
- if (!ret)
- dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
- vid, port);
+ ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ if (ret) {
+ dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ port, vlan->vid);
+ return ret;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
@@ -456,46 +437,39 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_smi *smi = ds->priv;
- u16 vid;
- int ret;
-
- dev_info(smi->dev, "del VLAN on port %d\n", port);
+ int ret, i;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int i;
+ dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
- dev_info(smi->dev, "del VLAN %04x\n", vid);
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ if (vlan->vid == vlanmc.vid) {
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member && vlanmc.untag) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to remove VLAN %04x\n",
+ vlan->vid);
return ret;
-
- if (vid == vlanmc.vid) {
- /* Remove this port from the VLAN */
- vlanmc.member &= ~BIT(port);
- vlanmc.untag &= ~BIT(port);
- /*
- * If no ports are members of this VLAN
- * anymore then clear the whole member
- * config so it can be reused.
- */
- if (!vlanmc.member && vlanmc.untag) {
- vlanmc.vid = 0;
- vlanmc.priority = 0;
- vlanmc.fid = 0;
- }
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret) {
- dev_err(smi->dev,
- "failed to remove VLAN %04x\n",
- vid);
- return ret;
- }
- break;
}
+ break;
}
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index cfe56960f44b..a89093bc6c6a 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -601,108 +601,114 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
/* Found in a vendor driver */
+/* Struct for handling the jam tables' entries */
+struct rtl8366rb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
/* For the "version 0" early silicon, appear in most source releases */
-static const u16 rtl8366rb_init_jam_ver_0[] = {
- 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
- 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
- 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
- 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
- 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
- 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
- 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
- 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
- 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
- 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
- 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
- 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
- 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
- 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
- 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
- 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
- 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
- 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
- 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
- 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
- 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
- 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
- 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
- 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
- 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
- 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
- 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
- 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
- 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
+ {0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
+ {0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
+ {0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
+ {0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
+ {0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
+ {0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
+ {0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
+ {0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
+ {0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
+ {0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
+ {0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
+ {0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
+ {0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
+ {0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
+ {0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
+ {0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
+ {0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
+ {0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
+ {0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
+ {0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
+ {0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
+ {0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
+ {0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
+ {0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
+ {0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
+ {0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
+ {0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
+ {0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
+ {0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
};
/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
-static const u16 rtl8366rb_init_jam_ver_1[] = {
- 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
- 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
- 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
- 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
- 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
- 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
- 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
- 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
- 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
- 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
- 0xBE79, 0x3C3C, 0xBE00, 0x1340,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
+ {0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
+ {0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
+ {0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
+ {0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
+ {0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
+ {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
+ {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
+ {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
+ {0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
+ {0xBE79, 0x3C3C}, {0xBE00, 0x1340},
};
/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
-static const u16 rtl8366rb_init_jam_ver_2[] = {
- 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
- 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
- 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
- 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
- 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
- 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
- 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
- 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
- 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
- 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
- 0xBE00, 0x1340, 0x0F51, 0x0010,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
+ {0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
+ {0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
+ {0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
+ {0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
+ {0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
+ {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
+ {0xBE00, 0x1340}, {0x0F51, 0x0010},
};
/* Appears in a DDWRT code dump */
-static const u16 rtl8366rb_init_jam_ver_3[] = {
- 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
- 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
- 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
- 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
- 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
- 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
- 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
- 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
- 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
- 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
- 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
- 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
- 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
- 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
- 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
- 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
- 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
+ {0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
+ {0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
+ {0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
+ {0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
+ {0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
+ {0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
+ {0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
+ {0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
+ {0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
+ {0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
+ {0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
};
/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
-static const u16 rtl8366rb_init_jam_f5d8235[] = {
- 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
- 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
- 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
- 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
- 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
- 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
- 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
- 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
- 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
+ {0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
+ {0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
+ {0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
+ {0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
+ {0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
+ {0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
+ {0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
+ {0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
+ {0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
};
/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
-static const u16 rtl8366rb_init_jam_dgn3500[] = {
- 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
- 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
- 0x0401, 0x0000, 0x0431, 0x0960,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
+ {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
+ {0x0401, 0x0000}, {0x0431, 0x0960},
};
/* This jam table activates "green ethernet", which means low power mode
@@ -710,16 +716,53 @@ static const u16 rtl8366rb_init_jam_dgn3500[] = {
* necessary, and the ports should enter power saving mode 10 seconds after
* a cable is disconnected. Seems to always be the same.
*/
-static const u16 rtl8366rb_green_jam[][2] = {
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
{0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
{0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
{0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
};
+/* Function that jams the tables in the proper registers */
+static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ int jam_size, struct realtek_smi *smi,
+ bool write_dbg)
+{
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < jam_size; i++) {
+ if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ }
+ }
+ if (write_dbg)
+ dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ jam_table[i].val,
+ jam_table[i].reg);
+ ret = regmap_write(smi->map,
+ jam_table[i].reg,
+ jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_smi *smi = ds->priv;
- const u16 *jam_table;
+ const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
@@ -788,54 +831,16 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- i = 0;
- while (i < jam_size) {
- if ((jam_table[i] & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
- RTL8366RB_PHY_ACCESS_BUSY_REG,
- &val);
- if (ret)
- return ret;
- if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
- if (ret)
- return ret;
- }
- }
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
- jam_table[i + 1],
- jam_table[i]);
- ret = regmap_write(smi->map,
- jam_table[i],
- jam_table[i + 1]);
- if (ret)
- return ret;
- i += 2;
- }
+ ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ if (ret)
+ return ret;
/* Set up the "green ethernet" feature */
- i = 0;
- while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
- &val);
- if (ret)
- return ret;
- if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
- if (ret)
- return ret;
- ret = regmap_write(smi->map,
- rtl8366rb_green_jam[i][0],
- rtl8366rb_green_jam[i][1]);
- if (ret)
- return ret;
- i++;
- }
- }
+ ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
+ ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ if (ret)
+ return ret;
+
ret = regmap_write(smi->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
@@ -972,6 +977,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1504,7 +1511,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
.port_vlan_filtering = rtl8366_vlan_filtering,
- .port_vlan_prepare = rtl8366_vlan_prepare,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 4ebc4a5a7b35..f9e87fb33da0 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -94,6 +94,7 @@ struct sja1105_info {
* pop it when it's equal to TPID2.
*/
u16 qinq_tpid;
+ bool can_limit_mcast_flood;
int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
@@ -204,6 +205,9 @@ struct sja1105_private {
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_NUM_PORTS];
bool best_effort_vlan_filtering;
+ unsigned long learn_ena;
+ unsigned long ucast_egress_floods;
+ unsigned long bcast_egress_floods;
const struct sja1105_info *info;
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
@@ -246,7 +250,7 @@ enum sja1105_reset_reason {
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans);
+ struct netlink_ext_ack *extack);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_devlink.c */
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 4a2ec395bcb0..b6a4a16b8c7e 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -135,7 +135,6 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
rtnl_lock();
for (port = 0; port < ds->num_ports; port++) {
- struct switchdev_trans trans;
struct dsa_port *dp;
if (!dsa_is_user_port(ds, port))
@@ -144,13 +143,7 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
dp = dsa_to_port(ds, port);
vlan_filtering = dsa_port_is_vlan_filtering(dp);
- trans.ph_prepare = true;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
- if (rc)
- break;
-
- trans.ph_prepare = false;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, NULL);
if (rc)
break;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4ca029650993..7692338730df 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -25,6 +25,8 @@
#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
+#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+
static const struct dsa_switch_ops sja1105_switch_ops;
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
@@ -42,15 +44,16 @@ static void
sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
int from, int to, bool allow)
{
- if (allow) {
- l2_fwd[from].bc_domain |= BIT(to);
+ if (allow)
l2_fwd[from].reach_port |= BIT(to);
- l2_fwd[from].fl_domain |= BIT(to);
- } else {
- l2_fwd[from].bc_domain &= ~BIT(to);
+ else
l2_fwd[from].reach_port &= ~BIT(to);
- l2_fwd[from].fl_domain &= ~BIT(to);
- }
+}
+
+static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to)
+{
+ return !!(l2_fwd[from].reach_port & BIT(to));
}
/* Structure used to temporarily transport device tree
@@ -220,17 +223,43 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
static int sja1105_init_static_fdb(struct sja1105_private *priv)
{
+ struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
+ int port;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
- /* We only populate the FDB table through dynamic
- * L2 Address Lookup entries
+ /* We only populate the FDB table through dynamic L2 Address Lookup
+ * entries, except for a special entry at the end which is a catch-all
+ * for unknown multicast and will be used to control flooding domain.
*/
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
+
+ if (!priv->info->can_limit_mcast_flood)
+ return 0;
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+ l2_lookup = table->entries;
+
+ /* All L2 multicast addresses have an odd first octet */
+ l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].lockeds = true;
+ l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
+
+ /* Flood multicast to every port by default */
+ for (port = 0; port < priv->ds->num_ports; port++)
+ if (!dsa_is_unused_port(priv->ds, port))
+ l2_lookup[0].destports |= BIT(port);
+
return 0;
}
@@ -317,7 +346,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ table->entries = kzalloc(table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
@@ -385,11 +414,23 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
for (j = 0; j < SJA1105_NUM_TC; j++)
l2fwd[i].vlan_pmap[j] = j;
+ /* All ports start up with egress flooding enabled,
+ * including the CPU port.
+ */
+ priv->ucast_egress_floods |= BIT(i);
+ priv->bcast_egress_floods |= BIT(i);
+
if (i == upstream)
continue;
sja1105_port_allow_traffic(l2fwd, i, upstream, true);
sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+
+ l2fwd[i].bc_domain = BIT(upstream);
+ l2fwd[i].fl_domain = BIT(upstream);
+
+ l2fwd[upstream].bc_domain |= BIT(i);
+ l2fwd[upstream].fl_domain |= BIT(i);
}
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
@@ -1514,6 +1555,12 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
*/
if (!(l2_lookup.destports & BIT(port)))
continue;
+
+ /* We need to hide the FDB entry for unknown multicast */
+ if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ continue;
+
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
@@ -1524,17 +1571,10 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-/* This callback needs to be present */
-static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
- sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
@@ -1543,6 +1583,50 @@ static int sja1105_mdb_del(struct dsa_switch *ds, int port,
return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
}
+/* Common function for unicast and broadcast flood configuration.
+ * Flooding is configured between each {ingress, egress} port pair, and since
+ * the bridge's semantics are those of "egress flooding", it means we must
+ * enable flooding towards this port from all ingress ports that are in the
+ * same forwarding domain.
+ */
+static int sja1105_manage_flood_domains(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct dsa_switch *ds = priv->ds;
+ int from, to, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (from = 0; from < ds->num_ports; from++) {
+ u64 fl_domain = 0, bc_domain = 0;
+
+ for (to = 0; to < priv->ds->num_ports; to++) {
+ if (!sja1105_can_forward(l2_fwd, from, to))
+ continue;
+
+ if (priv->ucast_egress_floods & BIT(to))
+ fl_domain |= BIT(to);
+ if (priv->bcast_egress_floods & BIT(to))
+ bc_domain |= BIT(to);
+ }
+
+ /* Nothing changed, nothing to do */
+ if (l2_fwd[from].fl_domain == fl_domain &&
+ l2_fwd[from].bc_domain == bc_domain)
+ continue;
+
+ l2_fwd[from].fl_domain = fl_domain;
+ l2_fwd[from].bc_domain = bc_domain;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ from, &l2_fwd[from], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
struct net_device *br, bool member)
{
@@ -1580,8 +1664,12 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
return rc;
}
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
- port, &l2_fwd[port], true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+ if (rc)
+ return rc;
+
+ return sja1105_manage_flood_domains(priv);
}
static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -1612,12 +1700,12 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
- mac[port].dyn_learn = true;
+ mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
case BR_STATE_FORWARDING:
mac[port].ingress = true;
mac[port].egress = true;
- mac[port].dyn_learn = true;
+ mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -2607,35 +2695,12 @@ out:
return rc;
}
-static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct sja1105_private *priv = ds->priv;
- u16 vid;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- return 0;
-
- /* If the user wants best-effort VLAN filtering (aka vlan_filtering
- * bridge plus tagging), be sure to at least deny alterations to the
- * configuration done by dsa_8021q.
- */
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (vid_is_dsa_8021q(vid)) {
- dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
@@ -2647,16 +2712,12 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
u16 tpid, tpid2;
int rc;
- if (switchdev_trans_ph_prepare(trans)) {
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type == SJA1105_RULE_VL) {
- dev_err(ds->dev,
- "Cannot change VLAN filtering with active VL rules\n");
- return -EBUSY;
- }
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change VLAN filtering with active VL rules");
+ return -EBUSY;
}
-
- return 0;
}
if (enabled) {
@@ -2736,7 +2797,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
- dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
/* Switch port identification based on 802.1Q is only passable
* if we are not under a vlan_filtering bridge. So make sure
@@ -2794,29 +2855,36 @@ static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
return 0;
}
-static void sja1105_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
- &priv->bridge_vlans);
- if (rc < 0)
- return;
- if (rc > 0)
- vlan_table_changed = true;
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
+ vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 1024-3071 reserved for dsa_8021q operation");
+ return -EBUSY;
}
+ rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
+ return rc;
+ if (rc > 0)
+ vlan_table_changed = true;
+
if (!vlan_table_changed)
- return;
+ return 0;
- rc = sja1105_build_vlan_table(priv, true);
- if (rc)
- dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
+ return sja1105_build_vlan_table(priv, true);
}
static int sja1105_vlan_del(struct dsa_switch *ds, int port,
@@ -2824,14 +2892,11 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
- if (rc > 0)
- vlan_table_changed = true;
- }
+ rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
if (!vlan_table_changed)
return 0;
@@ -2934,7 +2999,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
+ priv->best_effort_vlan_filtering = true;
rc = sja1105_devlink_setup(ds);
if (rc < 0)
@@ -3274,6 +3339,142 @@ static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
+static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
+ bool enabled)
+{
+ struct sja1105_mac_config_entry *mac;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].dyn_learn = enabled;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+ if (rc)
+ return rc;
+
+ if (enabled)
+ priv->learn_ena |= BIT(port);
+ else
+ priv->learn_ena &= ~BIT(port);
+
+ return 0;
+}
+
+static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_FLOOD) {
+ if (flags.val & BR_FLOOD)
+ priv->ucast_egress_floods |= BIT(to);
+ else
+ priv->ucast_egress_floods |= BIT(to);
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ if (flags.val & BR_BCAST_FLOOD)
+ priv->bcast_egress_floods |= BIT(to);
+ else
+ priv->bcast_egress_floods |= BIT(to);
+ }
+
+ return sja1105_manage_flood_domains(priv);
+}
+
+static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (match = 0; match < table->entry_count; match++)
+ if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ break;
+
+ if (match == table->entry_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not find FDB entry for unknown multicast");
+ return -ENOSPC;
+ }
+
+ if (flags.val & BR_MCAST_FLOOD)
+ l2_lookup[match].destports |= BIT(to);
+ else
+ l2_lookup[match].destports &= ~BIT(to);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match],
+ true);
+}
+
+static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
+ !priv->info->can_limit_mcast_flood) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This chip cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ rc = sja1105_port_set_learning(priv, port, learn_ena);
+ if (rc)
+ return rc;
+ }
+
+ if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
+ rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
+ if (rc)
+ return rc;
+ }
+
+ /* For chips that can't offload BR_MCAST_FLOOD independently, there
+ * is nothing to do here, we ensured the configuration is in sync by
+ * offloading BR_FLOOD.
+ */
+ if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
+ rc = sja1105_port_mcast_flood(priv, port, flags,
+ extack);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
@@ -3297,12 +3498,12 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_fdb_del = sja1105_fdb_del,
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
+ .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
+ .port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
- .port_vlan_prepare = sja1105_vlan_prepare,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_vlan_add,
.port_vlan_del = sja1105_vlan_del,
- .port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 591c5734747d..f7a1514f81e8 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -512,6 +512,7 @@ const struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
@@ -529,6 +530,7 @@ const struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
@@ -546,6 +548,7 @@ const struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
@@ -564,6 +567,7 @@ const struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
@@ -582,6 +586,7 @@ const struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
@@ -601,6 +606,7 @@ const struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 139b7b4fbd0d..a8efb7fac395 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -85,7 +85,7 @@ u32 sja1105_crc32(const void *buf, size_t len)
/* seed */
crc = ~0;
for (i = 0; i < len; i += 4) {
- sja1105_unpack((void *)buf + i, &word, 31, 0, 4);
+ sja1105_unpack(buf + i, &word, 31, 0, 4);
crc = crc32_le(crc, (u8 *)&word, 4);
}
return ~crc;
diff --git a/drivers/net/dsa/xrs700x/Kconfig b/drivers/net/dsa/xrs700x/Kconfig
new file mode 100644
index 000000000000..d10a4dce1676
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_XRS700X
+ tristate
+ depends on NET_DSA
+ select NET_DSA_TAG_XRS700X
+ select REGMAP
+ help
+ This enables support for Arrow SpeedChips XRS7003/7004 gigabit
+ Ethernet switches.
+
+config NET_DSA_XRS700X_I2C
+ tristate "Arrow XRS7000X series switch in I2C mode"
+ depends on NET_DSA && I2C
+ select NET_DSA_XRS700X
+ select REGMAP_I2C
+ help
+ Enable I2C support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
+
+config NET_DSA_XRS700X_MDIO
+ tristate "Arrow XRS7000X series switch in MDIO mode"
+ depends on NET_DSA
+ select NET_DSA_XRS700X
+ help
+ Enable MDIO support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
diff --git a/drivers/net/dsa/xrs700x/Makefile b/drivers/net/dsa/xrs700x/Makefile
new file mode 100644
index 000000000000..51a3a7d9296a
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_XRS700X) += xrs700x.o
+obj-$(CONFIG_NET_DSA_XRS700X_I2C) += xrs700x_i2c.o
+obj-$(CONFIG_NET_DSA_XRS700X_MDIO) += xrs700x_mdio.o
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
new file mode 100644
index 000000000000..f025f968f96d
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+#include <linux/of_device.h>
+#include <linux/netdev_features.h>
+#include <linux/if_hsr.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
+
+#define XRS7000X_SUPPORTED_HSR_FEATURES \
+ (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
+ NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
+
+#define XRS7003E_ID 0x100
+#define XRS7003F_ID 0x101
+#define XRS7004E_ID 0x200
+#define XRS7004F_ID 0x201
+
+const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
+EXPORT_SYMBOL(xrs7003e_info);
+
+const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
+EXPORT_SYMBOL(xrs7003f_info);
+
+const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
+EXPORT_SYMBOL(xrs7004e_info);
+
+const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
+EXPORT_SYMBOL(xrs7004f_info);
+
+struct xrs700x_regfield {
+ struct reg_field rf;
+ struct regmap_field **rmf;
+};
+
+struct xrs700x_mib {
+ unsigned int offset;
+ const char *name;
+ int stats64_offset;
+};
+
+#define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
+#define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
+
+static const struct xrs700x_mib xrs700x_mibs[] = {
+ XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
+ XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
+ XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
+ XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
+ XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
+ XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
+ XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
+ XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
+ XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
+ XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
+ XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
+ XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
+ XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
+ XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
+};
+
+static void xrs700x_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(xrs700x_mibs);
+}
+
+static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+ struct rtnl_link_stats64 stats;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&p->mib_mutex);
+
+ /* Capture counter values */
+ regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ unsigned int high = 0, low = 0, reg;
+
+ reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
+ regmap_read(priv->regmap, reg, &low);
+ regmap_read(priv->regmap, reg + 2, &high);
+
+ p->mib_data[i] += (high << 16) | low;
+
+ if (xrs700x_mibs[i].stats64_offset >= 0) {
+ u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
+ *(u64 *)s += p->mib_data[i];
+ }
+ }
+
+ /* multicast must be added to rx_packets (which already includes
+ * unicast and broadcast)
+ */
+ stats.rx_packets += stats.multicast;
+
+ u64_stats_update_begin(&p->syncp);
+ p->stats64 = stats;
+ u64_stats_update_end(&p->syncp);
+
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_mib_work(struct work_struct *work)
+{
+ struct xrs700x *priv = container_of(work, struct xrs700x,
+ mib_work.work);
+ int i;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ xrs700x_read_port_counters(priv, i);
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+}
+
+static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
+ u64 *data)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+
+ xrs700x_read_port_counters(priv, port);
+
+ mutex_lock(&p->mib_mutex);
+ memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ *s = p->stats64;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+}
+
+static int xrs700x_setup_regmap_range(struct xrs700x *priv)
+{
+ struct xrs700x_regfield regfields[] = {
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_forward
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_management
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_sel_speed
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_cur_speed
+ }
+ };
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(regfields); i++) {
+ *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ regfields[i].rf);
+ if (IS_ERR(*regfields[i].rmf))
+ return PTR_ERR(*regfields[i].rmf);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_XRS700X;
+}
+
+static int xrs700x_reset(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
+ val, !(val & XRS_GENERAL_RESET),
+ 10, 1000);
+error:
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int bpdus = 1;
+ unsigned int val;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ bpdus = 0;
+ fallthrough;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = XRS_PORT_DISABLED;
+ break;
+ case BR_STATE_LEARNING:
+ val = XRS_PORT_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = XRS_PORT_FORWARDING;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_fields_write(priv->ps_forward, port, val);
+
+ /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
+ * which allows BPDU forwarding to the CPU port when the front facing
+ * port is in disabled/learning state.
+ */
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
+
+ dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
+ __func__, port, state, val);
+}
+
+/* Add an inbound policy filter which matches the BPDU destination MAC
+ * and forwards to the CPU port. Leave the policy disabled, it will be
+ * enabled as needed.
+ */
+static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare all 48 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
+ if (ret)
+ return ret;
+
+ /* match BPDU destination 01:80:c2:00:00:00 */
+ for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
+ eth_stp_addr[i] |
+ (eth_stp_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror BPDU to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_port_setup(struct dsa_switch *ds, int port)
+{
+ bool cpu_port = dsa_is_cpu_port(ds, port);
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int ret, i;
+
+ xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
+
+ /* Disable forwarding to non-CPU ports */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+ if (ret)
+ return ret;
+
+ val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
+ ret = regmap_fields_write(priv->ps_management, port, val);
+ if (ret)
+ return ret;
+
+ if (!cpu_port) {
+ ret = xrs700x_port_add_bpdu_ipf(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_setup(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ int ret, i;
+
+ ret = xrs700x_reset(ds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = xrs700x_port_setup(ds, i);
+ if (ret)
+ return ret;
+ }
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+
+ return 0;
+}
+
+static void xrs700x_teardown(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+
+ cancel_delayed_work_sync(&priv->mib_work);
+}
+
+static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ case 3:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ /* The switch only supports full duplex. */
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = XRS_PORT_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = XRS_PORT_SPEED_100;
+ break;
+ case SPEED_10:
+ val = XRS_PORT_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ regmap_fields_write(priv->ps_sel_speed, port, val);
+
+ dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
+ __func__, port, mode, speed);
+}
+
+static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
+ struct net_device *bridge, bool join)
+{
+ unsigned int i, cpu_mask = 0, mask = 0;
+ struct xrs700x *priv = ds->priv;
+ int ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+
+ cpu_mask |= BIT(i);
+
+ if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ continue;
+
+ mask |= BIT(i);
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
+ if (ret)
+ return ret;
+ }
+
+ if (!join) {
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
+ cpu_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ return xrs700x_bridge_common(ds, port, bridge, true);
+}
+
+static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ xrs700x_bridge_common(ds, port, bridge, false);
+}
+
+static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ unsigned int val = XRS_HSR_CFG_HSR_PRP;
+ struct dsa_port *partner = NULL, *dp;
+ struct xrs700x *priv = ds->priv;
+ struct net_device *slave;
+ int ret, i, hsr_pair[2];
+ enum hsr_version ver;
+
+ ret = hsr_get_version(hsr, &ver);
+ if (ret)
+ return ret;
+
+ /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
+ if (port != 1 && port != 2)
+ return -EOPNOTSUPP;
+
+ if (ver == HSR_V1)
+ val |= XRS_HSR_CFG_HSR;
+ else if (ver == PRP_V1)
+ val |= XRS_HSR_CFG_PRP;
+ else
+ return -EOPNOTSUPP;
+
+ dsa_hsr_foreach_port(dp, ds, hsr) {
+ partner = dp;
+ }
+
+ /* We can't enable redundancy on the switch until both
+ * redundant ports have signed up.
+ */
+ if (!partner)
+ return 0;
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_DISABLED);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
+
+ regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
+ val | XRS_HSR_CFG_LANID_A);
+ regmap_write(priv->regmap, XRS_HSR_CFG(port),
+ val | XRS_HSR_CFG_LANID_B);
+
+ /* Clear bits for both redundant ports (HSR only) and the CPU port to
+ * enable forwarding.
+ */
+ val = GENMASK(ds->num_ports - 1, 0);
+ if (ver == HSR_V1) {
+ val &= ~BIT(partner->index);
+ val &= ~BIT(port);
+ }
+ val &= ~BIT(dsa_upstream_port(ds, port));
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_FORWARDING);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+
+ hsr_pair[0] = port;
+ hsr_pair[1] = partner->index;
+ for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
+ slave = dsa_to_port(ds, hsr_pair[i])->slave;
+ slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
+ }
+
+ return 0;
+}
+
+static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct dsa_port *partner = NULL, *dp;
+ struct xrs700x *priv = ds->priv;
+ struct net_device *slave;
+ int i, hsr_pair[2];
+ unsigned int val;
+
+ dsa_hsr_foreach_port(dp, ds, hsr) {
+ partner = dp;
+ }
+
+ if (!partner)
+ return 0;
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_DISABLED);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
+
+ regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
+ regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
+
+ /* Clear bit for the CPU port to enable forwarding. */
+ val = GENMASK(ds->num_ports - 1, 0);
+ val &= ~BIT(dsa_upstream_port(ds, port));
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_FORWARDING);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+
+ hsr_pair[0] = port;
+ hsr_pair[1] = partner->index;
+ for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
+ slave = dsa_to_port(ds, hsr_pair[i])->slave;
+ slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops xrs700x_ops = {
+ .get_tag_protocol = xrs700x_get_tag_protocol,
+ .setup = xrs700x_setup,
+ .teardown = xrs700x_teardown,
+ .port_stp_state_set = xrs700x_port_stp_state_set,
+ .phylink_validate = xrs700x_phylink_validate,
+ .phylink_mac_link_up = xrs700x_mac_link_up,
+ .get_strings = xrs700x_get_strings,
+ .get_sset_count = xrs700x_get_sset_count,
+ .get_ethtool_stats = xrs700x_get_ethtool_stats,
+ .get_stats64 = xrs700x_get_stats64,
+ .port_bridge_join = xrs700x_bridge_join,
+ .port_bridge_leave = xrs700x_bridge_leave,
+ .port_hsr_join = xrs700x_hsr_join,
+ .port_hsr_leave = xrs700x_hsr_leave,
+};
+
+static int xrs700x_detect(struct xrs700x *priv)
+{
+ const struct xrs700x_info *info;
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
+ if (ret) {
+ dev_err(priv->dev, "error %d while reading switch id.\n",
+ ret);
+ return ret;
+ }
+
+ info = of_device_get_match_data(priv->dev);
+ if (!info)
+ return -EINVAL;
+
+ if (info->id == id) {
+ priv->ds->num_ports = info->num_ports;
+ dev_info(priv->dev, "%s detected.\n", info->name);
+ return 0;
+ }
+
+ dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
+ info->id, id);
+
+ return -ENODEV;
+}
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
+{
+ struct dsa_switch *ds;
+ struct xrs700x *priv;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+
+ priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
+
+ ds->ops = &xrs700x_ops;
+ ds->priv = priv;
+ priv->dev = base;
+
+ priv->ds = ds;
+ priv->priv = devpriv;
+
+ return priv;
+}
+EXPORT_SYMBOL(xrs700x_switch_alloc);
+
+static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+
+ p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
+ sizeof(*p->mib_data), GFP_KERNEL);
+ if (!p->mib_data)
+ return -ENOMEM;
+
+ mutex_init(&p->mib_mutex);
+ u64_stats_init(&p->syncp);
+
+ return 0;
+}
+
+int xrs700x_switch_register(struct xrs700x *priv)
+{
+ int ret;
+ int i;
+
+ ret = xrs700x_detect(priv);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_setup_regmap_range(priv);
+ if (ret)
+ return ret;
+
+ priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
+ sizeof(*priv->ports), GFP_KERNEL);
+ if (!priv->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ ret = xrs700x_alloc_port_mib(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_register);
+
+void xrs700x_switch_remove(struct xrs700x *priv)
+{
+ dsa_unregister_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_remove);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
new file mode 100644
index 000000000000..ff62cf61b091
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+
+struct xrs700x_info {
+ unsigned int id;
+ const char *name;
+ size_t num_ports;
+};
+
+extern const struct xrs700x_info xrs7003e_info;
+extern const struct xrs700x_info xrs7003f_info;
+extern const struct xrs700x_info xrs7004e_info;
+extern const struct xrs700x_info xrs7004f_info;
+
+struct xrs700x_port {
+ struct mutex mib_mutex; /* protects mib_data */
+ u64 *mib_data;
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync syncp;
+};
+
+struct xrs700x {
+ struct dsa_switch *ds;
+ struct device *dev;
+ void *priv;
+ struct regmap *regmap;
+ struct regmap_field *ps_forward;
+ struct regmap_field *ps_management;
+ struct regmap_field *ps_sel_speed;
+ struct regmap_field *ps_cur_speed;
+ struct delayed_work mib_work;
+ struct xrs700x_port *ports;
+};
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
+int xrs700x_switch_register(struct xrs700x *priv);
+void xrs700x_switch_remove(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
new file mode 100644
index 000000000000..489d9385b4f0
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+struct xrs700x_i2c_cmd {
+ __be32 reg;
+ __be16 val;
+} __packed;
+
+static int xrs700x_i2c_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct xrs700x_i2c_cmd cmd;
+ int ret;
+
+ cmd.reg = cpu_to_be32(reg | 1);
+
+ ret = i2c_master_send(i2c, (char *)&cmd.reg, sizeof(cmd.reg));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(i2c, (char *)&cmd.val, sizeof(cmd.val));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_recv returned %d\n", ret);
+ return ret;
+ }
+
+ *val = be16_to_cpu(cmd.val);
+ return 0;
+}
+
+static int xrs700x_i2c_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct xrs700x_i2c_cmd cmd;
+ int ret;
+
+ cmd.reg = cpu_to_be32(reg);
+ cmd.val = cpu_to_be16(val);
+
+ ret = i2c_master_send(i2c, (char *)&cmd, sizeof(cmd));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_i2c_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_i2c_reg_read,
+ .reg_write = xrs700x_i2c_reg_write,
+ .max_register = 0,
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&i2c->dev, i2c);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&i2c->dev, NULL, &i2c->dev,
+ &xrs700x_i2c_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_i2c_remove(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ xrs700x_switch_remove(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id xrs700x_i2c_id[] = {
+ { "xrs700x-switch", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
+
+static const struct of_device_id __maybe_unused xrs700x_i2c_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_i2c_dt_ids);
+
+static struct i2c_driver xrs700x_i2c_driver = {
+ .driver = {
+ .name = "xrs700x-i2c",
+ .of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
+ },
+ .probe = xrs700x_i2c_probe,
+ .remove = xrs700x_i2c_remove,
+ .id_table = xrs700x_i2c_id,
+};
+
+module_i2c_driver(xrs700x_i2c_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
new file mode 100644
index 000000000000..44f58bee04a4
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/of.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS_MDIO_IBA0 0x10
+#define XRS_MDIO_IBA1 0x11
+#define XRS_MDIO_IBD 0x14
+
+#define XRS_IB_READ 0x0
+#define XRS_IB_WRITE 0x1
+
+static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
+ return ret;
+ }
+
+ *val = (unsigned int)ret;
+
+ return 0;
+}
+
+static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_mdio_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_mdio_reg_read,
+ .reg_write = xrs700x_mdio_reg_write,
+ .max_register = XRS_VLAN(VLAN_N_VID - 1),
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&mdiodev->dev, mdiodev);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev,
+ &xrs700x_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ xrs700x_switch_remove(priv);
+}
+
+static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_mdio_dt_ids);
+
+static struct mdio_driver xrs700x_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "xrs700x-mdio",
+ .of_match_table = of_match_ptr(xrs700x_mdio_dt_ids),
+ },
+ .probe = xrs700x_mdio_probe,
+ .remove = xrs700x_mdio_remove,
+};
+
+mdio_module_driver(xrs700x_mdio_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA MDIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_reg.h b/drivers/net/dsa/xrs700x/xrs700x_reg.h
new file mode 100644
index 000000000000..470d00e07f15
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_reg.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Register Base Addresses */
+#define XRS_DEVICE_ID_BASE 0x0
+#define XRS_GPIO_BASE 0x10000
+#define XRS_PORT_OFFSET 0x10000
+#define XRS_PORT_BASE(x) (0x200000 + XRS_PORT_OFFSET * (x))
+#define XRS_RTC_BASE 0x280000
+#define XRS_TS_OFFSET 0x8000
+#define XRS_TS_BASE(x) (0x290000 + XRS_TS_OFFSET * (x))
+#define XRS_SWITCH_CONF_BASE 0x300000
+
+/* Device Identification Registers */
+#define XRS_DEV_ID0 (XRS_DEVICE_ID_BASE + 0)
+#define XRS_DEV_ID1 (XRS_DEVICE_ID_BASE + 2)
+#define XRS_INT_ID0 (XRS_DEVICE_ID_BASE + 4)
+#define XRS_INT_ID1 (XRS_DEVICE_ID_BASE + 6)
+#define XRS_REV_ID (XRS_DEVICE_ID_BASE + 8)
+
+/* GPIO Registers */
+#define XRS_CONFIG0 (XRS_GPIO_BASE + 0x1000)
+#define XRS_INPUT_STATUS0 (XRS_GPIO_BASE + 0x1002)
+#define XRS_CONFIG1 (XRS_GPIO_BASE + 0x1004)
+#define XRS_INPUT_STATUS1 (XRS_GPIO_BASE + 0x1006)
+#define XRS_CONFIG2 (XRS_GPIO_BASE + 0x1008)
+#define XRS_INPUT_STATUS2 (XRS_GPIO_BASE + 0x100a)
+
+/* Port Configuration Registers */
+#define XRS_PORT_GEN_BASE(x) (XRS_PORT_BASE(x) + 0x0)
+#define XRS_PORT_HSR_BASE(x) (XRS_PORT_BASE(x) + 0x2000)
+#define XRS_PORT_PTP_BASE(x) (XRS_PORT_BASE(x) + 0x4000)
+#define XRS_PORT_CNT_BASE(x) (XRS_PORT_BASE(x) + 0x6000)
+#define XRS_PORT_IPO_BASE(x) (XRS_PORT_BASE(x) + 0x8000)
+
+/* Port Configuration Registers - General and State */
+#define XRS_PORT_STATE(x) (XRS_PORT_GEN_BASE(x) + 0x0)
+#define XRS_PORT_FORWARDING 0
+#define XRS_PORT_LEARNING 1
+#define XRS_PORT_DISABLED 2
+#define XRS_PORT_MODE_NORMAL 0
+#define XRS_PORT_MODE_MANAGEMENT 1
+#define XRS_PORT_SPEED_1000 0x12
+#define XRS_PORT_SPEED_100 0x20
+#define XRS_PORT_SPEED_10 0x30
+#define XRS_PORT_VLAN(x) (XRS_PORT_GEN_BASE(x) + 0x10)
+#define XRS_PORT_VLAN0_MAPPING(x) (XRS_PORT_GEN_BASE(x) + 0x12)
+#define XRS_PORT_FWD_MASK(x) (XRS_PORT_GEN_BASE(x) + 0x14)
+#define XRS_PORT_VLAN_PRIO(x) (XRS_PORT_GEN_BASE(x) + 0x16)
+
+/* Port Configuration Registers - HSR/PRP */
+#define XRS_HSR_CFG(x) (XRS_PORT_HSR_BASE(x) + 0x0)
+#define XRS_HSR_CFG_HSR_PRP BIT(0)
+#define XRS_HSR_CFG_HSR 0
+#define XRS_HSR_CFG_PRP BIT(8)
+#define XRS_HSR_CFG_LANID_A 0
+#define XRS_HSR_CFG_LANID_B BIT(10)
+
+/* Port Configuration Registers - PTP */
+#define XRS_PTP_RX_SYNC_DELAY_NS_LO(x) (XRS_PORT_PTP_BASE(x) + 0x2)
+#define XRS_PTP_RX_SYNC_DELAY_NS_HI(x) (XRS_PORT_PTP_BASE(x) + 0x4)
+#define XRS_PTP_RX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0xa)
+#define XRS_PTP_TX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0x12)
+
+/* Port Configuration Registers - Counter */
+#define XRS_CNT_CTRL(x) (XRS_PORT_CNT_BASE(x) + 0x0)
+#define XRS_RX_GOOD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x200)
+#define XRS_RX_GOOD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x202)
+#define XRS_RX_BAD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x204)
+#define XRS_RX_BAD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x206)
+#define XRS_RX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x208)
+#define XRS_RX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x20a)
+#define XRS_RX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x20c)
+#define XRS_RX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x20e)
+#define XRS_RX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x210)
+#define XRS_RX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x212)
+#define XRS_RX_UNDERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x214)
+#define XRS_RX_UNDERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x216)
+#define XRS_RX_FRAGMENTS_L (XRS_PORT_CNT_BASE(0) + 0x218)
+#define XRS_RX_FRAGMENTS_H (XRS_PORT_CNT_BASE(0) + 0x21a)
+#define XRS_RX_OVERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x21c)
+#define XRS_RX_OVERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x21e)
+#define XRS_RX_JABBER_L (XRS_PORT_CNT_BASE(0) + 0x220)
+#define XRS_RX_JABBER_H (XRS_PORT_CNT_BASE(0) + 0x222)
+#define XRS_RX_ERR_L (XRS_PORT_CNT_BASE(0) + 0x224)
+#define XRS_RX_ERR_H (XRS_PORT_CNT_BASE(0) + 0x226)
+#define XRS_RX_CRC_L (XRS_PORT_CNT_BASE(0) + 0x228)
+#define XRS_RX_CRC_H (XRS_PORT_CNT_BASE(0) + 0x22a)
+#define XRS_RX_64_L (XRS_PORT_CNT_BASE(0) + 0x22c)
+#define XRS_RX_64_H (XRS_PORT_CNT_BASE(0) + 0x22e)
+#define XRS_RX_65_127_L (XRS_PORT_CNT_BASE(0) + 0x230)
+#define XRS_RX_65_127_H (XRS_PORT_CNT_BASE(0) + 0x232)
+#define XRS_RX_128_255_L (XRS_PORT_CNT_BASE(0) + 0x234)
+#define XRS_RX_128_255_H (XRS_PORT_CNT_BASE(0) + 0x236)
+#define XRS_RX_256_511_L (XRS_PORT_CNT_BASE(0) + 0x238)
+#define XRS_RX_256_511_H (XRS_PORT_CNT_BASE(0) + 0x23a)
+#define XRS_RX_512_1023_L (XRS_PORT_CNT_BASE(0) + 0x23c)
+#define XRS_RX_512_1023_H (XRS_PORT_CNT_BASE(0) + 0x23e)
+#define XRS_RX_1024_1536_L (XRS_PORT_CNT_BASE(0) + 0x240)
+#define XRS_RX_1024_1536_H (XRS_PORT_CNT_BASE(0) + 0x242)
+#define XRS_RX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x244)
+#define XRS_RX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x246)
+#define XRS_RX_WRONGLAN_L (XRS_PORT_CNT_BASE(0) + 0x248)
+#define XRS_RX_WRONGLAN_H (XRS_PORT_CNT_BASE(0) + 0x24a)
+#define XRS_RX_DUPLICATE_L (XRS_PORT_CNT_BASE(0) + 0x24c)
+#define XRS_RX_DUPLICATE_H (XRS_PORT_CNT_BASE(0) + 0x24e)
+#define XRS_TX_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x280)
+#define XRS_TX_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x282)
+#define XRS_TX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x284)
+#define XRS_TX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x286)
+#define XRS_TX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x288)
+#define XRS_TX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x28a)
+#define XRS_TX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x28c)
+#define XRS_TX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x28e)
+#define XRS_TX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x290)
+#define XRS_TX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x292)
+#define XRS_PRIQ_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c0)
+#define XRS_PRIQ_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c2)
+#define XRS_EARLY_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c4)
+#define XRS_EARLY_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c6)
+
+/* Port Configuration Registers - Inbound Policy 0 - 15 */
+#define XRS_ETH_ADDR_CFG(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x0)
+#define XRS_ETH_ADDR_FWD_ALLOW(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x2)
+#define XRS_ETH_ADDR_FWD_MIRROR(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x4)
+#define XRS_ETH_ADDR_0(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x8)
+#define XRS_ETH_ADDR_1(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xa)
+#define XRS_ETH_ADDR_2(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xc)
+
+/* RTC Registers */
+#define XRS_CUR_NSEC0 (XRS_RTC_BASE + 0x1004)
+#define XRS_CUR_NSEC1 (XRS_RTC_BASE + 0x1006)
+#define XRS_CUR_SEC0 (XRS_RTC_BASE + 0x1008)
+#define XRS_CUR_SEC1 (XRS_RTC_BASE + 0x100a)
+#define XRS_CUR_SEC2 (XRS_RTC_BASE + 0x100c)
+#define XRS_TIME_CC0 (XRS_RTC_BASE + 0x1010)
+#define XRS_TIME_CC1 (XRS_RTC_BASE + 0x1012)
+#define XRS_TIME_CC2 (XRS_RTC_BASE + 0x1014)
+#define XRS_STEP_SIZE0 (XRS_RTC_BASE + 0x1020)
+#define XRS_STEP_SIZE1 (XRS_RTC_BASE + 0x1022)
+#define XRS_STEP_SIZE2 (XRS_RTC_BASE + 0x1024)
+#define XRS_ADJUST_NSEC0 (XRS_RTC_BASE + 0x1034)
+#define XRS_ADJUST_NSEC1 (XRS_RTC_BASE + 0x1036)
+#define XRS_ADJUST_SEC0 (XRS_RTC_BASE + 0x1038)
+#define XRS_ADJUST_SEC1 (XRS_RTC_BASE + 0x103a)
+#define XRS_ADJUST_SEC2 (XRS_RTC_BASE + 0x103c)
+#define XRS_TIME_CMD (XRS_RTC_BASE + 0x1040)
+
+/* Time Stamper Registers */
+#define XRS_TS_CTRL(x) (XRS_TS_BASE(x) + 0x1000)
+#define XRS_TS_INT_MASK(x) (XRS_TS_BASE(x) + 0x1008)
+#define XRS_TS_INT_STATUS(x) (XRS_TS_BASE(x) + 0x1010)
+#define XRS_TS_NSEC0(x) (XRS_TS_BASE(x) + 0x1104)
+#define XRS_TS_NSEC1(x) (XRS_TS_BASE(x) + 0x1106)
+#define XRS_TS_SEC0(x) (XRS_TS_BASE(x) + 0x1108)
+#define XRS_TS_SEC1(x) (XRS_TS_BASE(x) + 0x110a)
+#define XRS_TS_SEC2(x) (XRS_TS_BASE(x) + 0x110c)
+#define XRS_PNCT0(x) (XRS_TS_BASE(x) + 0x1110)
+#define XRS_PNCT1(x) (XRS_TS_BASE(x) + 0x1112)
+
+/* Switch Configuration Registers */
+#define XRS_SWITCH_GEN_BASE (XRS_SWITCH_CONF_BASE + 0x0)
+#define XRS_SWITCH_TS_BASE (XRS_SWITCH_CONF_BASE + 0x2000)
+#define XRS_SWITCH_VLAN_BASE (XRS_SWITCH_CONF_BASE + 0x4000)
+
+/* Switch Configuration Registers - General */
+#define XRS_GENERAL (XRS_SWITCH_GEN_BASE + 0x10)
+#define XRS_GENERAL_TIME_TRAILER BIT(9)
+#define XRS_GENERAL_MOD_SYNC BIT(10)
+#define XRS_GENERAL_CUT_THRU BIT(13)
+#define XRS_GENERAL_CLR_MAC_TBL BIT(14)
+#define XRS_GENERAL_RESET BIT(15)
+#define XRS_MT_CLEAR_MASK (XRS_SWITCH_GEN_BASE + 0x12)
+#define XRS_ADDRESS_AGING (XRS_SWITCH_GEN_BASE + 0x20)
+#define XRS_TS_CTRL_TX (XRS_SWITCH_GEN_BASE + 0x28)
+#define XRS_TS_CTRL_RX (XRS_SWITCH_GEN_BASE + 0x2a)
+#define XRS_INT_MASK (XRS_SWITCH_GEN_BASE + 0x2c)
+#define XRS_INT_STATUS (XRS_SWITCH_GEN_BASE + 0x2e)
+#define XRS_MAC_TABLE0 (XRS_SWITCH_GEN_BASE + 0x200)
+#define XRS_MAC_TABLE1 (XRS_SWITCH_GEN_BASE + 0x202)
+#define XRS_MAC_TABLE2 (XRS_SWITCH_GEN_BASE + 0x204)
+#define XRS_MAC_TABLE3 (XRS_SWITCH_GEN_BASE + 0x206)
+
+/* Switch Configuration Registers - Frame Timestamp */
+#define XRS_TX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x0)
+#define XRS_TX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x2)
+#define XRS_TX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x4)
+#define XRS_TX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x6)
+#define XRS_TX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+#define XRS_RX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x200)
+#define XRS_RX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x202)
+#define XRS_RX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x204)
+#define XRS_RX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x206)
+#define XRS_RX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+
+/* Switch Configuration Registers - VLAN */
+#define XRS_VLAN(v) (XRS_SWITCH_VLAN_BASE + 0x2 * (v))
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 667f38c9e4c6..53e1f7e07959 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -335,12 +335,11 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int el3_isa_remove(struct device *pdev,
+static void el3_isa_remove(struct device *pdev,
unsigned int ndev)
{
el3_device_remove(pdev);
dev_set_drvdata(pdev, NULL);
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index de50e8b9e656..ad04660b97b8 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,7 +33,6 @@ source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/aurora/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index f8f38dcb5f8a..1e7dc8a7762d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 06596fa1f9fe..102f2c91fdb8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -404,6 +404,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
break;
}
@@ -424,7 +425,10 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
break;
}
- fallthrough;
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
+ break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
@@ -1585,10 +1589,9 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- xdp->data = page_address(rx_info->page) + rx_info->page_offset;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_hard_start = page_address(rx_info->page);
- xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+ rx_info->page_offset,
+ rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
@@ -1634,8 +1637,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
- xdp.rxq = &rx_ring->xdp_rxq;
- xdp.frame_sz = ENA_PAGE_SIZE;
+ xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
xdp_verdict = XDP_PASS;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b40d4377cc71..b2cd3bdba9f8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1279,10 +1279,18 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
+#ifndef MDIO_PMA_RX_CTRL1
+#define MDIO_PMA_RX_CTRL1 0x8051
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
+#ifndef MDIO_PCS_DIGITAL_STAT
+#define MDIO_PCS_DIGITAL_STAT 0x8010
+#endif
+
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@@ -1358,6 +1366,8 @@
#define XGBE_KR_TRAINING_ENABLE BIT(1)
#define XGBE_PCS_CL37_BP BIT(12)
+#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
+#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
#define XGBE_AN_CL37_INT_CMPLT BIT(0)
#define XGBE_AN_CL37_INT_MASK 0x01
@@ -1375,6 +1385,10 @@
#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+#define XGBE_PMA_RX_RST_0_MASK BIT(4)
+#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
+#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2709a2db5657..4f714f874c4f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1368,6 +1368,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
return;
netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(pdata->netdev);
xgbe_stop_timers(pdata);
flush_workqueue(pdata->dev_workqueue);
@@ -2295,8 +2296,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_setup_tc = xgbe_setup_tc,
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = xgbe_features_check,
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 93ef5a30cb8d..4e97b4869522 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
&an_restart);
if (an_restart) {
xgbe_phy_config_aneg(pdata);
- return;
+ goto adjust_link;
}
if (pdata->phy.link) {
@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
- netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 859ded0c06b0..18e48b3bc402 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -922,6 +922,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
if ((phy_id & 0xfffffff0) != 0x03625d10)
return false;
+ /* Reset PHY - wait for self-clearing reset bit to clear */
+ genphy_soft_reset(phy_data->phydev);
+
/* Disable RGMII mode */
phy_write(phy_data->phydev, 0x18, 0x7007);
reg = phy_read(phy_data->phydev, 0x18);
@@ -1953,6 +1956,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
+ XGBE_PCS_PSEQ_STATE_MASK);
+ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
+ /* Mailbox command timed out, reset of RX block is required.
+ * This can be done by asseting the reset bit and wait for
+ * its compeletion.
+ */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
+ ndelay(20);
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
+ usleep_range(40, 50);
+ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
+ }
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int cmd, unsigned int sub_cmd)
{
@@ -1960,9 +1984,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int wait;
/* Log if a previous command did not complete */
- if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox not ready for command\n");
+ xgbe_phy_rx_reset(pdata);
+ }
/* Construct the command */
XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
@@ -1984,6 +2010,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox command did not complete\n");
+
+ /* Reset on error */
+ xgbe_phy_rx_reset(pdata);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
@@ -2584,6 +2613,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
if (reg & MDIO_STAT1_LSTATUS)
return 1;
+ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
+ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
+ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+ *an_restart = 1;
+ }
+ }
+
/* No link, attempt a receiver reset cycle */
if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 8f70a3909929..4af0cd9530de 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -71,8 +71,10 @@ static int aq_ndev_open(struct net_device *ndev)
goto err_exit;
err = aq_nic_start(aq_nic);
- if (err < 0)
+ if (err < 0) {
+ aq_nic_stop(aq_nic);
goto err_exit;
+ }
err_exit:
if (err < 0)
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index dd5c8a9038bb..a60ce9030581 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -223,8 +223,6 @@
#define AG71XX_REG_RX_SM 0x01b0
#define AG71XX_REG_TX_SM 0x01b4
-#define ETH_SWITCH_HEADER_LEN 2
-
#define AG71XX_DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV \
| NETIF_MSG_PROBE \
@@ -933,7 +931,7 @@ static void ag71xx_hw_setup(struct ag71xx *ag)
static unsigned int ag71xx_max_frame_len(unsigned int mtu)
{
- return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
+ return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
deleted file mode 100644
index 9ee30ea90bfa..000000000000
--- a/drivers/net/ethernet/aurora/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config NET_VENDOR_AURORA
- bool "Aurora VLSI devices"
- default y
- help
- If you have a network (Ethernet) device belonging to this class,
- say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- questions about Aurora devices. If you say Y, you will be asked
- for your specific device in the following questions.
-
-if NET_VENDOR_AURORA
-
-config AURORA_NB8800
- tristate "Aurora AU-NB8800 support"
- depends on HAS_DMA
- select PHYLIB
- help
- Support for the AU-NB8800 gigabit Ethernet controller.
-
-endif
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
deleted file mode 100644
index 5b20185cbd62..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ /dev/null
@@ -1,1520 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
- *
- * Mostly rewritten, based on driver from Sigma Designs. Original
- * copyright notice below.
- *
- * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
- *
- * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
- */
-
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/cache.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <asm/barrier.h>
-
-#include "nb8800.h"
-
-static void nb8800_tx_done(struct net_device *dev);
-static int nb8800_dma_stop(struct net_device *dev);
-
-static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
-{
- return readb_relaxed(priv->base + reg);
-}
-
-static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
-{
- return readl_relaxed(priv->base + reg);
-}
-
-static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
-{
- writeb_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
-{
- writew_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
-{
- writel_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readb(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writeb(priv, reg, new);
-}
-
-static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readl(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writel(priv, reg, new);
-}
-
-static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
- bool set)
-{
- nb8800_maskb(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, 0);
-}
-
-static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
- bool set)
-{
- nb8800_maskl(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, 0);
-}
-
-static int nb8800_mdio_wait(struct mii_bus *bus)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
-
- return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
- val, !(val & MDIO_CMD_GO), 1, 1000);
-}
-
-static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
-{
- struct nb8800_priv *priv = bus->priv;
- int err;
-
- err = nb8800_mdio_wait(bus);
- if (err)
- return err;
-
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
- udelay(10);
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
-
- return nb8800_mdio_wait(bus);
-}
-
-static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
- int err;
-
- err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
- if (err)
- return err;
-
- val = nb8800_readl(priv, NB8800_MDIO_STS);
- if (val & MDIO_STS_ERR)
- return 0xffff;
-
- return val & 0xffff;
-}
-
-static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
-{
- u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
- MDIO_CMD_DATA(val) | MDIO_CMD_WR;
-
- return nb8800_mdio_cmd(bus, cmd);
-}
-
-static void nb8800_mac_tx(struct net_device *dev, bool enable)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
- cpu_relax();
-
- nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
-}
-
-static void nb8800_mac_rx(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
-}
-
-static void nb8800_mac_af(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
-}
-
-static void nb8800_start_rx(struct net_device *dev)
-{
- nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
-}
-
-static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
- int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
- dma_addr_t dma_addr;
- struct page *page;
- unsigned long offset;
- void *data;
-
- data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
- if (!data)
- return -ENOMEM;
-
- page = virt_to_head_page(data);
- offset = data - page_address(page);
-
- dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- skb_free_frag(data);
- return -ENOMEM;
- }
-
- rxb->page = page;
- rxb->offset = offset;
- rxd->desc.s_addr = dma_addr;
-
- return 0;
-}
-
-static void nb8800_receive(struct net_device *dev, unsigned int i,
- unsigned int len)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct page *page = priv->rx_bufs[i].page;
- int offset = priv->rx_bufs[i].offset;
- void *data = page_address(page) + offset;
- dma_addr_t dma = rxd->desc.s_addr;
- struct sk_buff *skb;
- unsigned int size;
- int err;
-
- size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
-
- skb = napi_alloc_skb(&priv->napi, size);
- if (!skb) {
- netdev_err(dev, "rx skb allocation failed\n");
- dev->stats.rx_dropped++;
- return;
- }
-
- if (len <= RX_COPYBREAK) {
- dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
- skb_put_data(skb, data, len);
- dma_sync_single_for_device(&dev->dev, dma, len,
- DMA_FROM_DEVICE);
- } else {
- err = nb8800_alloc_rx(dev, i, true);
- if (err) {
- netdev_err(dev, "rx buffer allocation failed\n");
- dev->stats.rx_dropped++;
- dev_kfree_skb(skb);
- return;
- }
-
- dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
- skb_put_data(skb, data, RX_COPYHDR);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- offset + RX_COPYHDR, len - RX_COPYHDR,
- RX_BUF_SIZE);
- }
-
- skb->protocol = eth_type_trans(skb, dev);
- napi_gro_receive(&priv->napi, skb);
-}
-
-static void nb8800_rx_error(struct net_device *dev, u32 report)
-{
- if (report & RX_LENGTH_ERR)
- dev->stats.rx_length_errors++;
-
- if (report & RX_FCS_ERR)
- dev->stats.rx_crc_errors++;
-
- if (report & RX_FIFO_OVERRUN)
- dev->stats.rx_fifo_errors++;
-
- if (report & RX_ALIGNMENT_ERROR)
- dev->stats.rx_frame_errors++;
-
- dev->stats.rx_errors++;
-}
-
-static int nb8800_poll(struct napi_struct *napi, int budget)
-{
- struct net_device *dev = napi->dev;
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- unsigned int last = priv->rx_eoc;
- unsigned int next;
- int work = 0;
-
- nb8800_tx_done(dev);
-
-again:
- do {
- unsigned int len;
-
- next = (last + 1) % RX_DESC_COUNT;
-
- rxd = &priv->rx_descs[next];
-
- if (!rxd->report)
- break;
-
- len = RX_BYTES_TRANSFERRED(rxd->report);
-
- if (IS_RX_ERROR(rxd->report))
- nb8800_rx_error(dev, rxd->report);
- else
- nb8800_receive(dev, next, len);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- if (rxd->report & RX_MULTICAST_PKT)
- dev->stats.multicast++;
-
- rxd->report = 0;
- last = next;
- work++;
- } while (work < budget);
-
- if (work) {
- priv->rx_descs[last].desc.config |= DESC_EOC;
- wmb(); /* ensure new EOC is written before clearing old */
- priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
- priv->rx_eoc = last;
- nb8800_start_rx(dev);
- }
-
- if (work < budget) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- /* If a packet arrived after we last checked but
- * before writing RX_ITR, the interrupt will be
- * delayed, so we retrieve it now.
- */
- if (priv->rx_descs[next].report)
- goto again;
-
- napi_complete_done(napi, work);
- }
-
- return work;
-}
-
-static void __nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb;
- u32 txc_cr;
-
- txb = &priv->tx_bufs[priv->tx_queue];
- if (!txb->ready)
- return;
-
- txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
- if (txc_cr & TCR_EN)
- return;
-
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb(); /* ensure desc addr is written before starting DMA */
- nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
-
- priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
-}
-
-static void nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock_irq(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock_irq(&priv->tx_lock);
-}
-
-static void nb8800_tx_dma_start_irq(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock(&priv->tx_lock);
-}
-
-static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_desc *txd;
- struct nb8800_tx_buf *txb;
- struct nb8800_dma_desc *desc;
- dma_addr_t dma_addr;
- unsigned int dma_len;
- unsigned int align;
- unsigned int next;
- bool xmit_more;
-
- if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
-
- align = (8 - (uintptr_t)skb->data) & 7;
-
- dma_len = skb->len - align;
- dma_addr = dma_map_single(&dev->dev, skb->data + align,
- dma_len, DMA_TO_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- netdev_err(dev, "tx dma mapping error\n");
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- xmit_more = netdev_xmit_more();
- if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- xmit_more = false;
- }
-
- next = priv->tx_next;
- txb = &priv->tx_bufs[next];
- txd = &priv->tx_descs[next];
- desc = &txd->desc[0];
-
- next = (next + 1) % TX_DESC_COUNT;
-
- if (align) {
- memcpy(txd->buf, skb->data, align);
-
- desc->s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
- desc->config = DESC_BTS(2) | DESC_DS | align;
-
- desc++;
- }
-
- desc->s_addr = dma_addr;
- desc->n_addr = priv->tx_bufs[next].dma_desc;
- desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
-
- if (!xmit_more)
- desc->config |= DESC_EOC;
-
- txb->skb = skb;
- txb->dma_addr = dma_addr;
- txb->dma_len = dma_len;
-
- if (!priv->tx_chain) {
- txb->chain_len = 1;
- priv->tx_chain = txb;
- } else {
- priv->tx_chain->chain_len++;
- }
-
- netdev_sent_queue(dev, skb->len);
-
- priv->tx_next = next;
-
- if (!xmit_more) {
- smp_wmb();
- priv->tx_chain->ready = true;
- priv->tx_chain = NULL;
- nb8800_tx_dma_start(dev);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void nb8800_tx_error(struct net_device *dev, u32 report)
-{
- if (report & TX_LATE_COLLISION)
- dev->stats.collisions++;
-
- if (report & TX_PACKET_DROPPED)
- dev->stats.tx_dropped++;
-
- if (report & TX_FIFO_UNDERRUN)
- dev->stats.tx_fifo_errors++;
-
- dev->stats.tx_errors++;
-}
-
-static void nb8800_tx_done(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int limit = priv->tx_next;
- unsigned int done = priv->tx_done;
- unsigned int packets = 0;
- unsigned int len = 0;
-
- while (done != limit) {
- struct nb8800_tx_desc *txd = &priv->tx_descs[done];
- struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
- struct sk_buff *skb;
-
- if (!txd->report)
- break;
-
- skb = txb->skb;
- len += skb->len;
-
- dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
- DMA_TO_DEVICE);
-
- if (IS_TX_ERROR(txd->report)) {
- nb8800_tx_error(dev, txd->report);
- kfree_skb(skb);
- } else {
- consume_skb(skb);
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
- dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
-
- txb->skb = NULL;
- txb->ready = false;
- txd->report = 0;
-
- done = (done + 1) % TX_DESC_COUNT;
- packets++;
- }
-
- if (packets) {
- smp_mb__before_atomic();
- atomic_add(packets, &priv->tx_free);
- netdev_completed_queue(dev, packets, len);
- netif_wake_queue(dev);
- priv->tx_done = done;
- }
-}
-
-static irqreturn_t nb8800_irq(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct nb8800_priv *priv = netdev_priv(dev);
- irqreturn_t ret = IRQ_NONE;
- u32 val;
-
- /* tx interrupt */
- val = nb8800_readl(priv, NB8800_TXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_TXC_SR, val);
-
- if (val & TSR_DI)
- nb8800_tx_dma_start_irq(dev);
-
- if (val & TSR_TI)
- napi_schedule_irqoff(&priv->napi);
-
- if (unlikely(val & TSR_DE))
- netdev_err(dev, "TX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & TSR_TO))
- netdev_err(dev, "TX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- /* rx interrupt */
- val = nb8800_readl(priv, NB8800_RXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_RXC_SR, val);
-
- if (likely(val & (RSR_RI | RSR_DI))) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
- napi_schedule_irqoff(&priv->napi);
- }
-
- if (unlikely(val & RSR_DE))
- netdev_err(dev, "RX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & RSR_RO))
- netdev_err(dev, "RX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
-static void nb8800_mac_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- bool gigabit = priv->speed == SPEED_1000;
- u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
- u32 mac_mode = 0;
- u32 slot_time;
- u32 phy_clk;
- u32 ict;
-
- if (!priv->duplex)
- mac_mode |= HALF_DUPLEX;
-
- if (gigabit) {
- if (phy_interface_is_rgmii(dev->phydev))
- mac_mode |= RGMII_MODE;
-
- mac_mode |= GMAC_MODE;
- phy_clk = 125000000;
-
- /* Should be 512 but register is only 8 bits */
- slot_time = 255;
- } else {
- phy_clk = 25000000;
- slot_time = 128;
- }
-
- ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
-
- nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
- nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
- nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
-}
-
-static void nb8800_pause_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- u32 rxcr;
-
- if (priv->pause_aneg) {
- if (!phydev || !phydev->link)
- return;
-
- priv->pause_rx = phydev->pause;
- priv->pause_tx = phydev->pause ^ phydev->asym_pause;
- }
-
- nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
-
- rxcr = nb8800_readl(priv, NB8800_RXC_CR);
- if (!!(rxcr & RCR_FL) == priv->pause_tx)
- return;
-
- if (netif_running(dev)) {
- napi_disable(&priv->napi);
- netif_tx_lock_bh(dev);
- nb8800_dma_stop(dev);
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- nb8800_start_rx(dev);
- netif_tx_unlock_bh(dev);
- napi_enable(&priv->napi);
- } else {
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- }
-}
-
-static void nb8800_link_reconfigure(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int change = 0;
-
- if (phydev->link) {
- if (phydev->speed != priv->speed) {
- priv->speed = phydev->speed;
- change = 1;
- }
-
- if (phydev->duplex != priv->duplex) {
- priv->duplex = phydev->duplex;
- change = 1;
- }
-
- if (change)
- nb8800_mac_config(dev);
-
- nb8800_pause_config(dev);
- }
-
- if (phydev->link != priv->link) {
- priv->link = phydev->link;
- change = 1;
- }
-
- if (change)
- phy_print_status(phydev);
-}
-
-static void nb8800_update_mac_addr(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
-}
-
-static int nb8800_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *sock = addr;
-
- if (netif_running(dev))
- return -EBUSY;
-
- ether_addr_copy(dev->dev_addr, sock->sa_data);
- nb8800_update_mac_addr(dev);
-
- return 0;
-}
-
-static void nb8800_mc_init(struct net_device *dev, int val)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_MC_INIT, val);
- readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
- 1, 1000);
-}
-
-static void nb8800_set_rx_mode(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
-
- if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- nb8800_mac_af(dev, false);
- return;
- }
-
- nb8800_mac_af(dev, true);
- nb8800_mc_init(dev, 0);
-
- netdev_for_each_mc_addr(ha, dev) {
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
-
- nb8800_mc_init(dev, 0xff);
- }
-}
-
-#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
-#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
-
-static void nb8800_dma_free(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- if (priv->rx_bufs) {
- for (i = 0; i < RX_DESC_COUNT; i++)
- if (priv->rx_bufs[i].page)
- put_page(priv->rx_bufs[i].page);
-
- kfree(priv->rx_bufs);
- priv->rx_bufs = NULL;
- }
-
- if (priv->tx_bufs) {
- for (i = 0; i < TX_DESC_COUNT; i++)
- kfree_skb(priv->tx_bufs[i].skb);
-
- kfree(priv->tx_bufs);
- priv->tx_bufs = NULL;
- }
-
- if (priv->rx_descs) {
- dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
- priv->rx_desc_dma);
- priv->rx_descs = NULL;
- }
-
- if (priv->tx_descs) {
- dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
- priv->tx_desc_dma);
- priv->tx_descs = NULL;
- }
-}
-
-static void nb8800_dma_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- struct nb8800_tx_desc *txd;
- unsigned int i;
-
- for (i = 0; i < RX_DESC_COUNT; i++) {
- dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
-
- rxd = &priv->rx_descs[i];
- rxd->desc.n_addr = rx_dma + sizeof(*rxd);
- rxd->desc.r_addr =
- rx_dma + offsetof(struct nb8800_rx_desc, report);
- rxd->desc.config = priv->rx_dma_config;
- rxd->report = 0;
- }
-
- rxd->desc.n_addr = priv->rx_desc_dma;
- rxd->desc.config |= DESC_EOC;
-
- priv->rx_eoc = RX_DESC_COUNT - 1;
-
- for (i = 0; i < TX_DESC_COUNT; i++) {
- struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
- dma_addr_t r_dma = txb->dma_desc +
- offsetof(struct nb8800_tx_desc, report);
-
- txd = &priv->tx_descs[i];
- txd->desc[0].r_addr = r_dma;
- txd->desc[1].r_addr = r_dma;
- txd->report = 0;
- }
-
- priv->tx_next = 0;
- priv->tx_queue = 0;
- priv->tx_done = 0;
- atomic_set(&priv->tx_free, TX_DESC_COUNT);
-
- nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
-
- wmb(); /* ensure all setup is written before starting */
-}
-
-static int nb8800_dma_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int n_rx = RX_DESC_COUNT;
- unsigned int n_tx = TX_DESC_COUNT;
- unsigned int i;
- int err;
-
- priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
- &priv->rx_desc_dma, GFP_KERNEL);
- if (!priv->rx_descs)
- goto err_out;
-
- priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
- if (!priv->rx_bufs)
- goto err_out;
-
- for (i = 0; i < n_rx; i++) {
- err = nb8800_alloc_rx(dev, i, false);
- if (err)
- goto err_out;
- }
-
- priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
- &priv->tx_desc_dma, GFP_KERNEL);
- if (!priv->tx_descs)
- goto err_out;
-
- priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
- if (!priv->tx_bufs)
- goto err_out;
-
- for (i = 0; i < n_tx; i++)
- priv->tx_bufs[i].dma_desc =
- priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
-
- nb8800_dma_reset(dev);
-
- return 0;
-
-err_out:
- nb8800_dma_free(dev);
-
- return -ENOMEM;
-}
-
-static int nb8800_dma_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
- struct nb8800_tx_desc *txd = &priv->tx_descs[0];
- int retry = 5;
- u32 txcr;
- u32 rxcr;
- int err;
- unsigned int i;
-
- /* wait for tx to finish */
- err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
- !(txcr & TCR_EN) &&
- priv->tx_done == priv->tx_next,
- 1000, 1000000);
- if (err)
- return err;
-
- /* The rx DMA only stops if it reaches the end of chain.
- * To make this happen, we set the EOC flag on all rx
- * descriptors, put the device in loopback mode, and send
- * a few dummy frames. The interrupt handler will ignore
- * these since NAPI is disabled and no real frames are in
- * the tx queue.
- */
-
- for (i = 0; i < RX_DESC_COUNT; i++)
- priv->rx_descs[i].desc.config |= DESC_EOC;
-
- txd->desc[0].s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
- memset(txd->buf, 0, sizeof(txd->buf));
-
- nb8800_mac_af(dev, false);
- nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
-
- do {
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb();
- nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
-
- err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
- rxcr, !(rxcr & RCR_EN),
- 1000, 100000);
- } while (err && --retry);
-
- nb8800_mac_af(dev, true);
- nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
- nb8800_dma_reset(dev);
-
- return retry ? 0 : -ETIMEDOUT;
-}
-
-static void nb8800_pause_adv(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return;
-
- phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx);
-}
-
-static int nb8800_open(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev;
- int err;
-
- /* clear any pending interrupts */
- nb8800_writel(priv, NB8800_RXC_SR, 0xf);
- nb8800_writel(priv, NB8800_TXC_SR, 0xf);
-
- err = nb8800_dma_init(dev);
- if (err)
- return err;
-
- err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
- if (err)
- goto err_free_dma;
-
- nb8800_mac_rx(dev, true);
- nb8800_mac_tx(dev, true);
-
- phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!phydev) {
- err = -ENODEV;
- goto err_free_irq;
- }
-
- nb8800_pause_adv(dev);
-
- netdev_reset_queue(dev);
- napi_enable(&priv->napi);
- netif_start_queue(dev);
-
- nb8800_start_rx(dev);
- phy_start(phydev);
-
- return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err_free_dma:
- nb8800_dma_free(dev);
-
- return err;
-}
-
-static int nb8800_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- phy_stop(phydev);
-
- netif_stop_queue(dev);
- napi_disable(&priv->napi);
-
- nb8800_dma_stop(dev);
- nb8800_mac_rx(dev, false);
- nb8800_mac_tx(dev, false);
-
- phy_disconnect(phydev);
-
- free_irq(dev->irq, dev);
-
- nb8800_dma_free(dev);
-
- return 0;
-}
-
-static const struct net_device_ops nb8800_netdev_ops = {
- .ndo_open = nb8800_open,
- .ndo_stop = nb8800_stop,
- .ndo_start_xmit = nb8800_xmit,
- .ndo_set_mac_address = nb8800_set_mac_address,
- .ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void nb8800_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- pp->autoneg = priv->pause_aneg;
- pp->rx_pause = priv->pause_rx;
- pp->tx_pause = priv->pause_tx;
-}
-
-static int nb8800_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- priv->pause_aneg = pp->autoneg;
- priv->pause_rx = pp->rx_pause;
- priv->pause_tx = pp->tx_pause;
-
- nb8800_pause_adv(dev);
-
- if (!priv->pause_aneg)
- nb8800_pause_config(dev);
- else if (phydev)
- phy_start_aneg(phydev);
-
- return 0;
-}
-
-static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
- "rx_bytes_ok",
- "rx_frames_ok",
- "rx_undersize_frames",
- "rx_fragment_frames",
- "rx_64_byte_frames",
- "rx_127_byte_frames",
- "rx_255_byte_frames",
- "rx_511_byte_frames",
- "rx_1023_byte_frames",
- "rx_max_size_frames",
- "rx_oversize_frames",
- "rx_bad_fcs_frames",
- "rx_broadcast_frames",
- "rx_multicast_frames",
- "rx_control_frames",
- "rx_pause_frames",
- "rx_unsup_control_frames",
- "rx_align_error_frames",
- "rx_overrun_frames",
- "rx_jabber_frames",
- "rx_bytes",
- "rx_frames",
-
- "tx_bytes_ok",
- "tx_frames_ok",
- "tx_64_byte_frames",
- "tx_127_byte_frames",
- "tx_255_byte_frames",
- "tx_511_byte_frames",
- "tx_1023_byte_frames",
- "tx_max_size_frames",
- "tx_oversize_frames",
- "tx_broadcast_frames",
- "tx_multicast_frames",
- "tx_control_frames",
- "tx_pause_frames",
- "tx_underrun_frames",
- "tx_single_collision_frames",
- "tx_multi_collision_frames",
- "tx_deferred_collision_frames",
- "tx_late_collision_frames",
- "tx_excessive_collision_frames",
- "tx_bytes",
- "tx_frames",
- "tx_collisions",
-};
-
-#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
-
-static int nb8800_get_sset_count(struct net_device *dev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return NB8800_NUM_STATS;
-
- return -EOPNOTSUPP;
-}
-
-static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
-{
- if (sset == ETH_SS_STATS)
- memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
-}
-
-static u32 nb8800_read_stat(struct net_device *dev, int index)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_STAT_INDEX, index);
-
- return nb8800_readl(priv, NB8800_STAT_DATA);
-}
-
-static void nb8800_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *st)
-{
- unsigned int i;
- u32 rx, tx;
-
- for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
- rx = nb8800_read_stat(dev, i);
- tx = nb8800_read_stat(dev, i | 0x80);
- st[i] = rx;
- st[i + NB8800_NUM_STATS / 2] = tx;
- }
-}
-
-static const struct ethtool_ops nb8800_ethtool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = nb8800_get_pauseparam,
- .set_pauseparam = nb8800_set_pauseparam,
- .get_sset_count = nb8800_get_sset_count,
- .get_strings = nb8800_get_strings,
- .get_ethtool_stats = nb8800_get_ethtool_stats,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static int nb8800_hw_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 val;
-
- val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
- nb8800_writeb(priv, NB8800_TX_CTL1, val);
-
- /* Collision retry count */
- nb8800_writeb(priv, NB8800_TX_CTL2, 5);
-
- val = RX_PAD_STRIP | RX_AF_EN;
- nb8800_writeb(priv, NB8800_RX_CTL, val);
-
- /* Chosen by fair dice roll */
- nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
-
- /* TX cycles per deferral period */
- nb8800_writeb(priv, NB8800_TX_SDP, 12);
-
- /* The following three threshold values have been
- * experimentally determined for good results.
- */
-
- /* RX/TX FIFO threshold for partial empty (64-bit entries) */
- nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
-
- /* RX/TX FIFO threshold for partial full (64-bit entries) */
- nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
-
- /* Buffer size for transmit (64-bit entries) */
- nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
-
- /* Configure tx DMA */
-
- val = nb8800_readl(priv, NB8800_TXC_CR);
- val &= TCR_LE; /* keep endian setting */
- val |= TCR_DM; /* DMA descriptor mode */
- val |= TCR_RS; /* automatically store tx status */
- val |= TCR_DIE; /* interrupt on DMA chain completion */
- val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
- val |= TCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_TXC_CR, val);
-
- /* TX complete interrupt after 10 ms or 7 frames (see above) */
- val = clk_get_rate(priv->clk) / 100;
- nb8800_writel(priv, NB8800_TX_ITR, val);
-
- /* Configure rx DMA */
-
- val = nb8800_readl(priv, NB8800_RXC_CR);
- val &= RCR_LE; /* keep endian setting */
- val |= RCR_DM; /* DMA descriptor mode */
- val |= RCR_RS; /* automatically store rx status */
- val |= RCR_DIE; /* interrupt at end of DMA chain */
- val |= RCR_RFI(7); /* interrupt after 7 frames received */
- val |= RCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_RXC_CR, val);
-
- /* The rx interrupt can fire before the DMA has completed
- * unless a small delay is added. 50 us is hopefully enough.
- */
- priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
-
- /* In NAPI poll mode we want to disable interrupts, but the
- * hardware does not permit this. Delay 10 ms instead.
- */
- priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
-
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
-
- /* Flow control settings */
-
- /* Pause time of 0.1 ms */
- val = 100000 / 512;
- nb8800_writeb(priv, NB8800_PQ1, val >> 8);
- nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
-
- /* Auto-negotiate by default */
- priv->pause_aneg = true;
- priv->pause_rx = true;
- priv->pause_tx = true;
-
- nb8800_mc_init(dev, 0);
-
- return 0;
-}
-
-static int nb8800_tangox_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 pad_mode = PAD_MODE_MII;
-
- switch (priv->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_GMII:
- pad_mode = PAD_MODE_MII;
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII;
- break;
-
- default:
- dev_err(dev->dev.parent, "unsupported phy mode %s\n",
- phy_modes(priv->phy_mode));
- return -EINVAL;
- }
-
- nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
-
- return 0;
-}
-
-static int nb8800_tangox_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int clk_div;
-
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
- usleep_range(1000, 10000);
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
-
- wmb(); /* ensure reset is cleared before proceeding */
-
- clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
- nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tangox_ops = {
- .init = nb8800_tangox_init,
- .reset = nb8800_tangox_reset,
-};
-
-static int nb8800_tango4_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int err;
-
- err = nb8800_tangox_init(dev);
- if (err)
- return err;
-
- /* On tango4 interrupt on DMA completion per frame works and gives
- * better performance despite generating more rx interrupts.
- */
-
- /* Disable unnecessary interrupt on rx completion */
- nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
-
- /* Request interrupt on descriptor DMA completion */
- priv->rx_dma_config |= DESC_ID;
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tango4_ops = {
- .init = nb8800_tango4_init,
- .reset = nb8800_tangox_reset,
-};
-
-static const struct of_device_id nb8800_dt_ids[] = {
- {
- .compatible = "aurora,nb8800",
- },
- {
- .compatible = "sigma,smp8642-ethernet",
- .data = &nb8800_tangox_ops,
- },
- {
- .compatible = "sigma,smp8734-ethernet",
- .data = &nb8800_tango4_ops,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
-
-static int nb8800_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- const struct nb8800_ops *ops = NULL;
- struct nb8800_priv *priv;
- struct resource *res;
- struct net_device *dev;
- struct mii_bus *bus;
- const unsigned char *mac;
- void __iomem *base;
- int irq;
- int ret;
-
- match = of_match_device(nb8800_dt_ids, &pdev->dev);
- if (match)
- ops = match->data;
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
-
- dev = alloc_etherdev(sizeof(*priv));
- if (!dev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- priv = netdev_priv(dev);
- priv->base = base;
-
- ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
- if (ret)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(priv->clk);
- goto err_free_dev;
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_free_dev;
-
- spin_lock_init(&priv->tx_lock);
-
- if (ops && ops->reset) {
- ret = ops->reset(dev);
- if (ret)
- goto err_disable_clk;
- }
-
- bus = devm_mdiobus_alloc(&pdev->dev);
- if (!bus) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
-
- bus->name = "nb8800-mii";
- bus->read = nb8800_mdio_read;
- bus->write = nb8800_mdio_write;
- bus->parent = &pdev->dev;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
- (unsigned long)res->start);
- bus->priv = priv;
-
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret) {
- dev_err(&pdev->dev, "failed to register MII bus\n");
- goto err_disable_clk;
- }
-
- if (of_phy_is_fixed_link(pdev->dev.of_node)) {
- ret = of_phy_register_fixed_link(pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "bad fixed-link spec\n");
- goto err_free_bus;
- }
- priv->phy_node = of_node_get(pdev->dev.of_node);
- }
-
- if (!priv->phy_node)
- priv->phy_node = of_parse_phandle(pdev->dev.of_node,
- "phy-handle", 0);
-
- if (!priv->phy_node) {
- dev_err(&pdev->dev, "no PHY specified\n");
- ret = -ENODEV;
- goto err_free_bus;
- }
-
- priv->mii_bus = bus;
-
- ret = nb8800_hw_init(dev);
- if (ret)
- goto err_deregister_fixed_link;
-
- if (ops && ops->init) {
- ret = ops->init(dev);
- if (ret)
- goto err_deregister_fixed_link;
- }
-
- dev->netdev_ops = &nb8800_netdev_ops;
- dev->ethtool_ops = &nb8800_ethtool_ops;
- dev->flags |= IFF_MULTICAST;
- dev->irq = irq;
-
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(dev->dev_addr, mac);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_hw_addr_random(dev);
-
- nb8800_update_mac_addr(dev);
-
- netif_carrier_off(dev);
-
- ret = register_netdev(dev);
- if (ret) {
- netdev_err(dev, "failed to register netdev\n");
- goto err_free_dma;
- }
-
- netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
-
- netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
-
- return 0;
-
-err_free_dma:
- nb8800_dma_free(dev);
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_free_bus:
- of_node_put(priv->phy_node);
- mdiobus_unregister(bus);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
-err_free_dev:
- free_netdev(dev);
-
- return ret;
-}
-
-static int nb8800_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct nb8800_priv *priv = netdev_priv(ndev);
-
- unregister_netdev(ndev);
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
- of_node_put(priv->phy_node);
-
- mdiobus_unregister(priv->mii_bus);
-
- clk_disable_unprepare(priv->clk);
-
- nb8800_dma_free(ndev);
- free_netdev(ndev);
-
- return 0;
-}
-
-static struct platform_driver nb8800_driver = {
- .driver = {
- .name = "nb8800",
- .of_match_table = nb8800_dt_ids,
- },
- .probe = nb8800_probe,
- .remove = nb8800_remove,
-};
-
-module_platform_driver(nb8800_driver);
-
-MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
-MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
deleted file mode 100644
index 40941fb6065b..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NB8800_H_
-#define _NB8800_H_
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/bitops.h>
-
-#define RX_DESC_COUNT 256
-#define TX_DESC_COUNT 256
-
-#define NB8800_DESC_LOW 4
-
-#define RX_BUF_SIZE 1552
-
-#define RX_COPYBREAK 256
-#define RX_COPYHDR 128
-
-#define MAX_MDC_CLOCK 2500000
-
-/* Stargate Solutions SSN8800 core registers */
-#define NB8800_TX_CTL1 0x000
-#define TX_TPD BIT(5)
-#define TX_APPEND_FCS BIT(4)
-#define TX_PAD_EN BIT(3)
-#define TX_RETRY_EN BIT(2)
-#define TX_EN BIT(0)
-
-#define NB8800_TX_CTL2 0x001
-
-#define NB8800_RX_CTL 0x004
-#define RX_BC_DISABLE BIT(7)
-#define RX_RUNT BIT(6)
-#define RX_AF_EN BIT(5)
-#define RX_PAUSE_EN BIT(3)
-#define RX_SEND_CRC BIT(2)
-#define RX_PAD_STRIP BIT(1)
-#define RX_EN BIT(0)
-
-#define NB8800_RANDOM_SEED 0x008
-#define NB8800_TX_SDP 0x14
-#define NB8800_TX_TPDP1 0x18
-#define NB8800_TX_TPDP2 0x19
-#define NB8800_SLOT_TIME 0x1c
-
-#define NB8800_MDIO_CMD 0x020
-#define MDIO_CMD_GO BIT(31)
-#define MDIO_CMD_WR BIT(26)
-#define MDIO_CMD_ADDR(x) ((x) << 21)
-#define MDIO_CMD_REG(x) ((x) << 16)
-#define MDIO_CMD_DATA(x) ((x) << 0)
-
-#define NB8800_MDIO_STS 0x024
-#define MDIO_STS_ERR BIT(31)
-
-#define NB8800_MC_ADDR(i) (0x028 + (i))
-#define NB8800_MC_INIT 0x02e
-#define NB8800_UC_ADDR(i) (0x03c + (i))
-
-#define NB8800_MAC_MODE 0x044
-#define RGMII_MODE BIT(7)
-#define HALF_DUPLEX BIT(4)
-#define BURST_EN BIT(3)
-#define LOOPBACK_EN BIT(2)
-#define GMAC_MODE BIT(0)
-
-#define NB8800_IC_THRESHOLD 0x050
-#define NB8800_PE_THRESHOLD 0x051
-#define NB8800_PF_THRESHOLD 0x052
-#define NB8800_TX_BUFSIZE 0x054
-#define NB8800_FIFO_CTL 0x056
-#define NB8800_PQ1 0x060
-#define NB8800_PQ2 0x061
-#define NB8800_SRC_ADDR(i) (0x06a + (i))
-#define NB8800_STAT_DATA 0x078
-#define NB8800_STAT_INDEX 0x07c
-#define NB8800_STAT_CLEAR 0x07d
-
-#define NB8800_SLEEP_MODE 0x07e
-#define SLEEP_MODE BIT(0)
-
-#define NB8800_WAKEUP 0x07f
-#define WAKEUP BIT(0)
-
-/* Aurora NB8800 host interface registers */
-#define NB8800_TXC_CR 0x100
-#define TCR_LK BIT(12)
-#define TCR_DS BIT(11)
-#define TCR_BTS(x) (((x) & 0x7) << 8)
-#define TCR_DIE BIT(7)
-#define TCR_TFI(x) (((x) & 0x7) << 4)
-#define TCR_LE BIT(3)
-#define TCR_RS BIT(2)
-#define TCR_DM BIT(1)
-#define TCR_EN BIT(0)
-
-#define NB8800_TXC_SR 0x104
-#define TSR_DE BIT(3)
-#define TSR_DI BIT(2)
-#define TSR_TO BIT(1)
-#define TSR_TI BIT(0)
-
-#define NB8800_TX_SAR 0x108
-#define NB8800_TX_DESC_ADDR 0x10c
-
-#define NB8800_TX_REPORT_ADDR 0x110
-#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
-#define TX_FIRST_DEFERRAL BIT(7)
-#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
-#define TX_LATE_COLLISION BIT(2)
-#define TX_PACKET_DROPPED BIT(1)
-#define TX_FIFO_UNDERRUN BIT(0)
-#define IS_TX_ERROR(r) ((r) & 0x07)
-
-#define NB8800_TX_FIFO_SR 0x114
-#define NB8800_TX_ITR 0x118
-
-#define NB8800_RXC_CR 0x200
-#define RCR_FL BIT(13)
-#define RCR_LK BIT(12)
-#define RCR_DS BIT(11)
-#define RCR_BTS(x) (((x) & 7) << 8)
-#define RCR_DIE BIT(7)
-#define RCR_RFI(x) (((x) & 7) << 4)
-#define RCR_LE BIT(3)
-#define RCR_RS BIT(2)
-#define RCR_DM BIT(1)
-#define RCR_EN BIT(0)
-
-#define NB8800_RXC_SR 0x204
-#define RSR_DE BIT(3)
-#define RSR_DI BIT(2)
-#define RSR_RO BIT(1)
-#define RSR_RI BIT(0)
-
-#define NB8800_RX_SAR 0x208
-#define NB8800_RX_DESC_ADDR 0x20c
-
-#define NB8800_RX_REPORT_ADDR 0x210
-#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
-#define RX_MULTICAST_PKT BIT(9)
-#define RX_BROADCAST_PKT BIT(8)
-#define RX_LENGTH_ERR BIT(7)
-#define RX_FCS_ERR BIT(6)
-#define RX_RUNT_PKT BIT(5)
-#define RX_FIFO_OVERRUN BIT(4)
-#define RX_LATE_COLLISION BIT(3)
-#define RX_ALIGNMENT_ERROR BIT(2)
-#define RX_ERROR_MASK 0xfc
-#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
-
-#define NB8800_RX_FIFO_SR 0x214
-#define NB8800_RX_ITR 0x218
-
-/* Sigma Designs SMP86xx additional registers */
-#define NB8800_TANGOX_PAD_MODE 0x400
-#define PAD_MODE_MASK 0x7
-#define PAD_MODE_MII 0x0
-#define PAD_MODE_RGMII 0x1
-#define PAD_MODE_GTX_CLK_INV BIT(3)
-#define PAD_MODE_GTX_CLK_DELAY BIT(4)
-
-#define NB8800_TANGOX_MDIO_CLKDIV 0x420
-#define NB8800_TANGOX_RESET 0x424
-
-/* Hardware DMA descriptor */
-struct nb8800_dma_desc {
- u32 s_addr; /* start address */
- u32 n_addr; /* next descriptor address */
- u32 r_addr; /* report address */
- u32 config;
-} __aligned(8);
-
-#define DESC_ID BIT(23)
-#define DESC_EOC BIT(22)
-#define DESC_EOF BIT(21)
-#define DESC_LK BIT(20)
-#define DESC_DS BIT(19)
-#define DESC_BTS(x) (((x) & 0x7) << 16)
-
-/* DMA descriptor and associated data for rx.
- * Allocated from coherent memory.
- */
-struct nb8800_rx_desc {
- /* DMA descriptor */
- struct nb8800_dma_desc desc;
-
- /* Status report filled in by hardware */
- u32 report;
-};
-
-/* Address of buffer on rx ring */
-struct nb8800_rx_buf {
- struct page *page;
- unsigned long offset;
-};
-
-/* DMA descriptors and associated data for tx.
- * Allocated from coherent memory.
- */
-struct nb8800_tx_desc {
- /* DMA descriptor. The second descriptor is used if packet
- * data is unaligned.
- */
- struct nb8800_dma_desc desc[2];
-
- /* Status report filled in by hardware */
- u32 report;
-
- /* Bounce buffer for initial unaligned part of packet */
- u8 buf[8] __aligned(8);
-};
-
-/* Packet in tx queue */
-struct nb8800_tx_buf {
- /* Currently queued skb */
- struct sk_buff *skb;
-
- /* DMA address of the first descriptor */
- dma_addr_t dma_desc;
-
- /* DMA address of packet data */
- dma_addr_t dma_addr;
-
- /* Length of DMA mapping, less than skb->len if alignment
- * buffer is used.
- */
- unsigned int dma_len;
-
- /* Number of packets in chain starting here */
- unsigned int chain_len;
-
- /* Packet chain ready to be submitted to hardware */
- bool ready;
-};
-
-struct nb8800_priv {
- struct napi_struct napi;
-
- void __iomem *base;
-
- /* RX DMA descriptors */
- struct nb8800_rx_desc *rx_descs;
-
- /* RX buffers referenced by DMA descriptors */
- struct nb8800_rx_buf *rx_bufs;
-
- /* Current end of chain */
- u32 rx_eoc;
-
- /* Value for rx interrupt time register in NAPI interrupt mode */
- u32 rx_itr_irq;
-
- /* Value for rx interrupt time register in NAPI poll mode */
- u32 rx_itr_poll;
-
- /* Value for config field of rx DMA descriptors */
- u32 rx_dma_config;
-
- /* TX DMA descriptors */
- struct nb8800_tx_desc *tx_descs;
-
- /* TX packet queue */
- struct nb8800_tx_buf *tx_bufs;
-
- /* Number of free tx queue entries */
- atomic_t tx_free;
-
- /* First free tx queue entry */
- u32 tx_next;
-
- /* Next buffer to transmit */
- u32 tx_queue;
-
- /* Start of current packet chain */
- struct nb8800_tx_buf *tx_chain;
-
- /* Next buffer to reclaim */
- u32 tx_done;
-
- /* Lock for DMA activation */
- spinlock_t tx_lock;
-
- struct mii_bus *mii_bus;
- struct device_node *phy_node;
-
- /* PHY connection type from DT */
- phy_interface_t phy_mode;
-
- /* Current link status */
- int speed;
- int duplex;
- int link;
-
- /* Pause settings */
- bool pause_aneg;
- bool pause_rx;
- bool pause_tx;
-
- /* DMA base address of rx descriptors, see rx_descs above */
- dma_addr_t rx_desc_dma;
-
- /* DMA base address of tx descriptors, see tx_descs above */
- dma_addr_t tx_desc_dma;
-
- struct clk *clk;
-};
-
-struct nb8800_ops {
- int (*init)(struct net_device *dev);
- int (*reset)(struct net_device *dev);
-};
-
-#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7b79528d6eed..f8a168b73307 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -51,6 +51,14 @@ config B44_PCI
depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
default y
+config BCM4908_ENET
+ tristate "Broadcom BCM4908 internal mac support"
+ depends on ARCH_BCM4908 || COMPILE_TEST
+ default y
+ help
+ This driver supports Ethernet controller integrated into Broadcom
+ BCM4908 family SoCs.
+
config BCM63XX_ENET
tristate "Broadcom 63xx internal mac support"
depends on BCM63XX
@@ -174,7 +182,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on OF
select BGMAC
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 7046ad6d3d0e..0ddfb5b5d53c 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM4908_ENET) += bcm4908_enet.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
new file mode 100644
index 000000000000..0b70e9e0ddad
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "bcm4908_enet.h"
+#include "unimac.h"
+
+#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
+#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
+#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
+#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
+
+#define ENET_TX_BDS_NUM 200
+#define ENET_RX_BDS_NUM 200
+#define ENET_RX_BDS_NUM_MAX 8192
+
+#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
+ ENET_DMA_CH_CFG_INT_NO_DESC | \
+ ENET_DMA_CH_CFG_INT_BUFF_DONE)
+#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
+
+#define ENET_MTU_MAX ETH_DATA_LEN /* Is it possible to support 2044? */
+#define BRCM_MAX_TAG_LEN 6
+#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ETH_FCS_LEN + 4) /* 32 */
+
+struct bcm4908_enet_dma_ring_bd {
+ __le32 ctl;
+ __le32 addr;
+} __packed;
+
+struct bcm4908_enet_dma_ring_slot {
+ struct sk_buff *skb;
+ unsigned int len;
+ dma_addr_t dma_addr;
+};
+
+struct bcm4908_enet_dma_ring {
+ int is_tx;
+ int read_idx;
+ int write_idx;
+ int length;
+ u16 cfg_block;
+ u16 st_ram_block;
+
+ union {
+ void *cpu_addr;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ };
+ dma_addr_t dma_addr;
+
+ struct bcm4908_enet_dma_ring_slot *slots;
+};
+
+struct bcm4908_enet {
+ struct device *dev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ void __iomem *base;
+
+ struct bcm4908_enet_dma_ring tx_ring;
+ struct bcm4908_enet_dma_ring rx_ring;
+};
+
+/***
+ * R/W ops
+ */
+
+static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return readl(enet->base + offset);
+}
+
+static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ writel(value, enet->base + offset);
+}
+
+static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN_ON(set & ~mask);
+
+ val = enet_read(enet, offset);
+ val = (val & ~mask) | (set & mask);
+ enet_write(enet, offset, val);
+}
+
+static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_maskset(enet, offset, set, set);
+}
+
+static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return enet_read(enet, ENET_UNIMAC + offset);
+}
+
+static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ enet_write(enet, ENET_UNIMAC + offset, value);
+}
+
+static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_set(enet, ENET_UNIMAC + offset, set);
+}
+
+/***
+ * Helpers
+ */
+
+static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
+}
+
+static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
+{
+ enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
+}
+
+/***
+ * DMA
+ */
+
+static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ struct device *dev = enet->dev;
+
+ ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
+ if (!ring->cpu_addr)
+ return -ENOMEM;
+
+ if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
+ dev_err(dev, "Invalid DMA ring alignment\n");
+ goto err_free_buf_descs;
+ }
+
+ ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ if (!ring->slots)
+ goto err_free_buf_descs;
+
+ ring->read_idx = 0;
+ ring->write_idx = 0;
+
+ return 0;
+
+err_free_buf_descs:
+ dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+ return -ENOMEM;
+}
+
+static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int size;
+
+ size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (rx_ring->cpu_addr)
+ dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
+ kfree(rx_ring->slots);
+
+ size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (tx_ring->cpu_addr)
+ dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
+ kfree(tx_ring->slots);
+}
+
+static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+
+ tx_ring->length = ENET_TX_BDS_NUM;
+ tx_ring->is_tx = 1;
+ tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
+ tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
+ return err;
+ }
+
+ rx_ring->length = ENET_RX_BDS_NUM;
+ rx_ring->is_tx = 0;
+ rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
+ rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
+ int i;
+
+ /* Disable the DMA controller and channel */
+ for (i = 0; i < ARRAY_SIZE(rings); i++)
+ enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
+
+ /* Reset channels state */
+ for (i = 0; i < ARRAY_SIZE(rings); i++) {
+ struct bcm4908_enet_dma_ring *ring = rings[i];
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
+ }
+}
+
+static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
+{
+ struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
+ struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
+ struct device *dev = enet->dev;
+ u32 tmp;
+ int err;
+
+ slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
+
+ slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
+ if (!slot->skb)
+ return -ENOMEM;
+
+ slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev, slot->dma_addr);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer: %d\n", err);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ return err;
+ }
+
+ tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ if (idx == enet->rx_ring.length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+ buf_desc->ctl = cpu_to_le32(tmp);
+ buf_desc->addr = cpu_to_le32(slot->dma_addr);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
+ int reset_subch = ring->is_tx ? 1 : 0;
+
+ /* Reset the DMA channel */
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
+
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
+ (uint32_t)ring->dma_addr);
+}
+
+static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ int i;
+
+ for (i = rx_ring->length - 1; i >= 0; i--) {
+ slot = &rx_ring->slots[i];
+ if (!slot->skb)
+ continue;
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ }
+}
+
+static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+ int i;
+
+ for (i = 0; i < rx_ring->length; i++) {
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
+ bcm4908_enet_dma_uninit(enet);
+ return err;
+ }
+ }
+
+ bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
+ bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+}
+
+static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+
+ deadline = jiffies + usecs_to_jiffies(2000);
+ do {
+ tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
+ if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
+ return;
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+ usleep_range(10, 30);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
+}
+
+/***
+ * Ethernet driver
+ */
+
+static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
+{
+ u32 cmd;
+
+ bcm4908_enet_set_mtu(enet, enet->netdev->mtu);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
+ enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
+
+ enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
+ enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
+
+ enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
+ enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
+ cmd &= ~CMD_TX_EN;
+ cmd &= ~CMD_RX_EN;
+ cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
+ enet_umac_write(enet, UMAC_CMD, cmd);
+
+ enet_maskset(enet, ENET_GMAC_STATUS,
+ ENET_GMAC_STATUS_ETH_SPEED_MASK |
+ ENET_GMAC_STATUS_HD |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP,
+ ENET_GMAC_STATUS_ETH_SPEED_1000 |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP);
+}
+
+static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
+{
+ struct bcm4908_enet *enet = dev_id;
+
+ bcm4908_enet_intrs_off(enet);
+ bcm4908_enet_intrs_ack(enet);
+
+ napi_schedule(&enet->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm4908_enet_open(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct device *dev = enet->dev;
+ int err;
+
+ err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
+ return err;
+ }
+
+ bcm4908_enet_gmac_init(enet);
+ bcm4908_enet_dma_reset(enet);
+ bcm4908_enet_dma_init(enet);
+
+ enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
+
+ enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
+ bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
+
+ napi_enable(&enet->napi);
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ bcm4908_enet_intrs_ack(enet);
+ bcm4908_enet_intrs_on(enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_stop(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ napi_disable(&enet->napi);
+
+ bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
+ bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
+
+ bcm4908_enet_dma_uninit(enet);
+
+ free_irq(enet->netdev->irq, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ int free_buf_descs;
+ u32 tmp;
+
+ /* Free transmitted skbs */
+ while (ring->read_idx != ring->write_idx) {
+ buf_desc = &ring->buf_desc[ring->read_idx];
+ if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
+ break;
+ slot = &ring->slots[ring->read_idx];
+
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ if (++ring->read_idx == ring->length)
+ ring->read_idx = 0;
+ }
+
+ /* Don't use the last empty buf descriptor */
+ if (ring->read_idx <= ring->write_idx)
+ free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
+ else
+ free_buf_descs = ring->read_idx - ring->write_idx;
+ if (free_buf_descs < 2)
+ return NETDEV_TX_BUSY;
+
+ /* Hardware removes OWN bit after sending data */
+ buf_desc = &ring->buf_desc[ring->write_idx];
+ if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->write_idx];
+ slot->skb = skb;
+ slot->len = skb->len;
+ slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
+ return NETDEV_TX_BUSY;
+
+ tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ tmp |= DMA_CTL_STATUS_SOP;
+ tmp |= DMA_CTL_STATUS_EOP;
+ tmp |= DMA_CTL_STATUS_APPEND_CRC;
+ if (ring->write_idx + 1 == ring->length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+
+ buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
+ buf_desc->ctl = cpu_to_le32(tmp);
+
+ bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
+
+ if (++ring->write_idx == ring->length - 1)
+ ring->write_idx = 0;
+ enet->netdev->stats.tx_bytes += skb->len;
+ enet->netdev->stats.tx_packets++;
+
+ return NETDEV_TX_OK;
+}
+
+static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
+{
+ struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
+ struct device *dev = enet->dev;
+ int handled = 0;
+
+ while (handled < weight) {
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ struct bcm4908_enet_dma_ring_slot slot;
+ u32 ctl;
+ int len;
+ int err;
+
+ buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
+ ctl = le32_to_cpu(buf_desc->ctl);
+ if (ctl & DMA_CTL_STATUS_OWN)
+ break;
+
+ slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
+
+ /* Provide new buffer before unpinning the old one */
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
+ if (err)
+ break;
+
+ if (++enet->rx_ring.read_idx == enet->rx_ring.length)
+ enet->rx_ring.read_idx = 0;
+
+ len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+
+ if (len < ETH_ZLEN ||
+ (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
+ kfree_skb(slot.skb);
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+
+ dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+
+ skb_put(slot.skb, len - ETH_FCS_LEN);
+ slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
+ netif_receive_skb(slot.skb);
+
+ enet->netdev->stats.rx_packets++;
+ enet->netdev->stats.rx_bytes += len;
+
+ handled++;
+ }
+
+ if (handled < weight) {
+ napi_complete_done(napi, handled);
+ bcm4908_enet_intrs_on(enet);
+ }
+
+ return handled;
+}
+
+static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ bcm4908_enet_set_mtu(enet, new_mtu);
+
+ return 0;
+}
+
+static const struct net_device_ops bcm4908_enet_netdev_ops = {
+ .ndo_open = bcm4908_enet_open,
+ .ndo_stop = bcm4908_enet_stop,
+ .ndo_start_xmit = bcm4908_enet_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = bcm4908_enet_change_mtu,
+};
+
+static int bcm4908_enet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct bcm4908_enet *enet;
+ int err;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(*enet));
+ if (!netdev)
+ return -ENOMEM;
+
+ enet = netdev_priv(netdev);
+ enet->dev = dev;
+ enet->netdev = netdev;
+
+ enet->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(enet->base)) {
+ dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
+ return PTR_ERR(enet->base);
+ }
+
+ netdev->irq = platform_get_irq_byname(pdev, "rx");
+ if (netdev->irq < 0)
+ return netdev->irq;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ err = bcm4908_enet_dma_alloc(enet);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &bcm4908_enet_netdev_ops;
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->mtu = ETH_DATA_LEN;
+ netdev->max_mtu = ENET_MTU_MAX;
+ netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
+
+ err = register_netdev(netdev);
+ if (err) {
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_remove(struct platform_device *pdev)
+{
+ struct bcm4908_enet *enet = platform_get_drvdata(pdev);
+
+ unregister_netdev(enet->netdev);
+ netif_napi_del(&enet->napi);
+ bcm4908_enet_dma_free(enet);
+
+ return 0;
+}
+
+static const struct of_device_id bcm4908_enet_of_match[] = {
+ { .compatible = "brcm,bcm4908-enet"},
+ {},
+};
+
+static struct platform_driver bcm4908_enet_driver = {
+ .driver = {
+ .name = "bcm4908_enet",
+ .of_match_table = bcm4908_enet_of_match,
+ },
+ .probe = bcm4908_enet_probe,
+ .remove = bcm4908_enet_remove,
+};
+module_platform_driver(bcm4908_enet_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.h b/drivers/net/ethernet/broadcom/bcm4908_enet.h
new file mode 100644
index 000000000000..8a3ede2da537
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __BCM4908_ENET_H
+#define __BCM4908_ENET_H
+
+#define ENET_CONTROL 0x000
+#define ENET_MIB_CTRL 0x004
+#define ENET_MIB_CTRL_CLR_MIB 0x00000001
+#define ENET_RX_ERR_MASK 0x008
+#define ENET_MIB_MAX_PKT_SIZE 0x00C
+#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
+#define ENET_DIAG_OUT 0x01c
+#define ENET_ENABLE_DROP_PKT 0x020
+#define ENET_IRQ_ENABLE 0x024
+#define ENET_IRQ_ENABLE_OVFL 0x00000001
+#define ENET_GMAC_STATUS 0x028
+#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
+#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
+#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
+#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
+#define ENET_GMAC_STATUS_HD 0x00000004
+#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
+#define ENET_GMAC_STATUS_LINK_UP 0x00000010
+#define ENET_IRQ_STATUS 0x02c
+#define ENET_IRQ_STATUS_OVFL 0x00000001
+#define ENET_OVERFLOW_COUNTER 0x030
+#define ENET_FLUSH 0x034
+#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
+#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
+#define ENET_RSV_SELECT 0x038
+#define ENET_BP_FORCE 0x03c
+#define ENET_BP_FORCE_FORCE 0x00000001
+#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
+#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
+#define ENET_TX_CRC_CTRL 0x044
+#define ENET_MIB 0x200
+#define ENET_UNIMAC 0x400
+#define ENET_DMA 0x800
+#define ENET_DMA_CONTROLLER_CFG 0x800
+#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
+#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
+#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
+#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
+#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
+#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
+#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
+#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
+#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
+#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
+#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
+#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
+#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
+#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
+#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
+#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
+#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
+#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
+#define ENET_DMA_CH0_CFG 0xa00 /* RX */
+#define ENET_DMA_CH1_CFG 0xa10 /* TX */
+#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
+#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
+
+#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
+#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
+#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
+#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
+#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
+#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
+#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
+#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
+#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
+#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
+#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
+#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
+#define ENET_DMA_CH_CFG_SIZE 0x10
+
+#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
+#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
+#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
+#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
+#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
+
+#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
+#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
+#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
+#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
+#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
+#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
+#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
+#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
+#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
+#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
+#define DMA_CTL_LEN_DESC_USEFPM 0x80000000
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 916824cca3fd..977f097fc7bf 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
* refill rx queue
*/
-static int bcm_enet_refill_rx(struct net_device *dev)
+static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
struct bcm_enet_priv *priv;
@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
- struct sk_buff *skb;
- dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
- if (!priv->rx_skb[desc_idx]) {
- skb = netdev_alloc_skb(dev, priv->rx_skb_size);
- if (!skb)
+ if (!priv->rx_buf[desc_idx]) {
+ void *buf;
+
+ if (likely(napi_mode))
+ buf = napi_alloc_frag(priv->rx_frag_size);
+ else
+ buf = netdev_alloc_frag(priv->rx_frag_size);
+ if (unlikely(!buf))
break;
- priv->rx_skb[desc_idx] = skb;
- p = dma_map_single(&priv->pdev->dev, skb->data,
- priv->rx_skb_size,
- DMA_FROM_DEVICE);
- desc->address = p;
+ priv->rx_buf[desc_idx] = buf;
+ desc->address = dma_map_single(&priv->pdev->dev,
+ buf + priv->rx_buf_offset,
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
}
- len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, false);
spin_unlock(&priv->rx_lock);
}
@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
+ struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
+ INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
int desc_idx;
u32 len_stat;
unsigned int len;
+ void *buf;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
- priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
/* valid packet */
- skb = priv->rx_skb[desc_idx];
+ buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
- struct sk_buff *nskb;
-
- nskb = napi_alloc_skb(&priv->napi, len);
- if (!nskb) {
+ skb = napi_alloc_skb(&priv->napi, len);
+ if (unlikely(!skb)) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
- memcpy(nskb->data, skb->data, len);
+ memcpy(skb->data, buf + priv->rx_buf_offset, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
- skb = nskb;
} else {
- dma_unmap_single(&priv->pdev->dev, desc->address,
- priv->rx_skb_size, DMA_FROM_DEVICE);
- priv->rx_skb[desc_idx] = NULL;
+ dma_unmap_single(kdev, desc->address,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ priv->rx_buf[desc_idx] = NULL;
+
+ skb = build_skb(buf, priv->rx_frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(buf);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, priv->rx_buf_offset);
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_receive_skb(skb);
+ list_add_tail(&skb->list, &rx_list);
+
+ } while (processed < budget);
- } while (--budget > 0);
+ netif_receive_skb_list(&rx_list);
+ priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, true);
/* kick rx dma */
enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
+ unsigned int bytes;
int released;
priv = netdev_priv(dev);
+ bytes = 0;
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
+ bytes += skb->len;
dev_kfree_skb(skb);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
@@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
- enet_dmac_writel(priv, priv->dma_chan_en_mask,
+ if (!netdev_xmit_more() || !priv->tx_desc_count)
+ enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->tx_chan);
/* stop queue if no more desc available */
@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
priv->pause_tx ? "tx" : "off");
}
+static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_buf[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(priv->rx_buf[i]);
+ }
+ kfree(priv->rx_buf);
+}
+
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
+ if (!priv->rx_buf) {
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMAC_BUFALLOC, priv->rx_chan);
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -1159,7 +1187,6 @@ static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
@@ -1186,21 +1213,10 @@ static int bcm_enet_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1214,6 +1230,9 @@ static int bcm_enet_stop(struct net_device *dev)
if (priv->has_phy)
phy_disconnect(dev->phydev);
+ /* reset BQL after forced tx reclaim to prevent kernel panic */
+ netdev_reset_queue(dev);
+
return 0;
}
@@ -1622,9 +1641,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
- priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
+ priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
dev->mtu = new_mtu;
return 0;
}
@@ -1709,6 +1731,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD;
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
@@ -2126,7 +2149,7 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -2136,11 +2159,11 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (!priv->rx_buf) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -2187,8 +2210,8 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -2287,18 +2310,7 @@ static int bcm_enetsw_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -2327,7 +2339,6 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
@@ -2348,21 +2359,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2372,6 +2372,9 @@ static int bcm_enetsw_stop(struct net_device *dev)
free_irq(priv->irq_tx, dev);
free_irq(priv->irq_rx, dev);
+ /* reset BQL after forced tx reclaim to prevent kernel panic */
+ netdev_reset_queue(dev);
+
return 0;
}
@@ -2659,6 +2662,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
pd = dev_get_platdata(&pdev->dev);
if (pd) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 1d3c917eb830..78f1830fb3cb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
- /* size of allocated rx skbs */
- unsigned int rx_skb_size;
+ /* size of allocated rx buffers */
+ unsigned int rx_buf_size;
- /* list of skb given to hw for rx */
- struct sk_buff **rx_skb;
+ /* allocated rx buffer offset */
+ unsigned int rx_buf_offset;
+
+ /* size of allocated rx frag */
+ unsigned int rx_frag_size;
+
+ /* list of buffer given to hw for rx */
+ void **rx_buf;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..777bbf6d2586 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -2310,33 +2311,22 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, qp, port;
- struct net_device *dev;
-
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
- if (info->switch_number)
+ if (dp->ds->index)
return 0;
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
@@ -2376,27 +2366,16 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_unmap_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_unmap_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
- struct net_device *dev;
unsigned int q, qp, port;
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
-
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
num_tx_queues = slave_dev->real_num_tx_queues;
@@ -2417,17 +2396,30 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bcm_sysport_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- int ret = NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct bcm_sysport_priv *priv;
+ int ret = 0;
+
+ priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
+ if (priv->netdev != dev)
+ return NOTIFY_DONE;
switch (event) {
- case DSA_PORT_REGISTER:
- ret = bcm_sysport_map_queues(nb, ptr);
- break;
- case DSA_PORT_UNREGISTER:
- ret = bcm_sysport_unmap_queues(nb, ptr);
+ case NETDEV_CHANGEUPPER:
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dsa_slave_dev_check(info->upper_dev))
+ return NOTIFY_DONE;
+
+ if (info->linking)
+ ret = bcm_sysport_map_queues(dev, info->upper_dev);
+ else
+ ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
break;
}
@@ -2503,8 +2495,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2571,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
@@ -2599,9 +2594,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
- priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+ priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
- ret = register_dsa_notifier(&priv->dsa_notifier);
+ ret = register_netdevice_notifier(&priv->netdev_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
@@ -2628,7 +2623,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_notifier:
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2646,7 +2641,7 @@ static int bcm_sysport_remove(struct platform_device *pdev)
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 3a5cb6f128f5..984f76e74b43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/dim.h>
+#include "unimac.h"
+
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
@@ -213,39 +215,6 @@ struct bcm_rsb {
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_10 0
-#define CMD_SPEED_100 1
-#define CMD_SPEED_1000 2
-#define CMD_SPEED_2500 3
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00c
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
@@ -787,7 +756,7 @@ struct bcm_sysport_priv {
struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
- struct notifier_block dsa_notifier;
+ struct notifier_block netdev_notifier;
unsigned int per_port_num_tx_queues;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8a7d8e..075f6e146b29 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -746,25 +746,25 @@ error:
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change? Try if after stabilizng driver.
*/
-static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
- bool force)
+static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
{
- u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
u32 new_val = (cmdcfg & mask) | set;
u32 cmdcfg_sr;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~0, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
- bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+ bgmac_umac_write(bgmac, UMAC_CMD, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~cmdcfg_sr, 0);
udelay(2);
}
@@ -773,9 +773,9 @@ static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
u32 tmp;
tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC0, tmp);
tmp = (addr[4] << 8) | addr[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC1, tmp);
}
static void bgmac_set_rx_mode(struct net_device *net_dev)
@@ -783,9 +783,9 @@ static void bgmac_set_rx_mode(struct net_device *net_dev)
struct bgmac *bgmac = netdev_priv(net_dev);
if (net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_PROMISC, true);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_PROMISC, 0, true);
}
#if 0 /* We don't use that regs yet */
@@ -825,21 +825,21 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
static void bgmac_mac_speed(struct bgmac *bgmac)
{
- u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN);
u32 set = 0;
switch (bgmac->mac_speed) {
case SPEED_10:
- set |= BGMAC_CMDCFG_ES_10;
+ set |= CMD_SPEED_10 << CMD_SPEED_SHIFT;
break;
case SPEED_100:
- set |= BGMAC_CMDCFG_ES_100;
+ set |= CMD_SPEED_100 << CMD_SPEED_SHIFT;
break;
case SPEED_1000:
- set |= BGMAC_CMDCFG_ES_1000;
+ set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
break;
case SPEED_2500:
- set |= BGMAC_CMDCFG_ES_2500;
+ set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT;
break;
default:
dev_err(bgmac->dev, "Unsupported speed: %d\n",
@@ -847,9 +847,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
}
if (bgmac->mac_duplex == DUPLEX_HALF)
- set |= BGMAC_CMDCFG_HD;
+ set |= CMD_HD_EN;
- bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+ bgmac_umac_cmd_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
@@ -917,7 +917,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
udelay(1);
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
@@ -986,34 +986,34 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
- * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
- * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * Specs don't say about using UMAC_CMD_SR, but in this routine
+ * UMAC_CMD is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
-
- bgmac_cmdcfg_maskset(bgmac,
- ~(BGMAC_CMDCFG_TE |
- BGMAC_CMDCFG_RE |
- BGMAC_CMDCFG_RPI |
- BGMAC_CMDCFG_TAI |
- BGMAC_CMDCFG_HD |
- BGMAC_CMDCFG_ML |
- BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_RL |
- BGMAC_CMDCFG_RED |
- BGMAC_CMDCFG_PE |
- BGMAC_CMDCFG_TPI |
- BGMAC_CMDCFG_PAD_EN |
- BGMAC_CMDCFG_PF),
- BGMAC_CMDCFG_PROM |
- BGMAC_CMDCFG_NLC |
- BGMAC_CMDCFG_CFE |
- cmdcfg_sr,
- false);
+ cmdcfg_sr = CMD_SW_RESET_OLD;
+
+ bgmac_umac_cmd_maskset(bgmac,
+ ~(CMD_TX_EN |
+ CMD_RX_EN |
+ CMD_RX_PAUSE_IGNORE |
+ CMD_TX_ADDR_INS |
+ CMD_HD_EN |
+ CMD_LCL_LOOP_EN |
+ CMD_CNTL_FRM_EN |
+ CMD_RMT_LOOP_EN |
+ CMD_RX_ERR_DISC |
+ CMD_PRBL_EN |
+ CMD_TX_PAUSE_IGNORE |
+ CMD_PAD_EN |
+ CMD_PAUSE_FWD),
+ CMD_PROMISC |
+ CMD_NO_LEN_CHK |
+ CMD_CNTL_FRM_EN |
+ cmdcfg_sr,
+ false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
@@ -1049,16 +1049,16 @@ static void bgmac_enable(struct bgmac *bgmac)
u32 mode;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
- bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- cmdcfg_sr, true);
+ cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
+ bgmac_umac_cmd_maskset(bgmac, ~(CMD_TX_EN | CMD_RX_EN),
+ cmdcfg_sr, true);
udelay(2);
- cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
- bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+ cmdcfg |= CMD_TX_EN | CMD_RX_EN;
+ bgmac_umac_write(bgmac, UMAC_CMD, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
@@ -1078,7 +1078,7 @@ static void bgmac_enable(struct bgmac *bgmac)
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
- bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
@@ -1105,18 +1105,18 @@ static void bgmac_chip_init(struct bgmac *bgmac)
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_RX_PAUSE_IGNORE, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_LCL_LOOP_EN, 0, false);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + ETHER_MAX_LEN);
bgmac_chip_intrs_on(bgmac);
@@ -1252,7 +1252,7 @@ static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 351c598a3ec6..110088e662ea 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -4,6 +4,8 @@
#include <linux/netdevice.h>
+#include "unimac.h"
+
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
@@ -169,47 +171,7 @@
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
-#define BGMAC_UNIMAC_VERSION 0x800
-#define BGMAC_HDBKP_CTL 0x804
-#define BGMAC_CMDCFG 0x808 /* Configuration */
-#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
-#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
-#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
-#define BGMAC_CMDCFG_ES_10 0x00000000
-#define BGMAC_CMDCFG_ES_100 0x00000004
-#define BGMAC_CMDCFG_ES_1000 0x00000008
-#define BGMAC_CMDCFG_ES_2500 0x0000000C
-#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
-#define BGMAC_CMDCFG_PAD_EN 0x00000020
-#define BGMAC_CMDCFG_CF 0x00000040
-#define BGMAC_CMDCFG_PF 0x00000080
-#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
-#define BGMAC_CMDCFG_TAI 0x00000200
-#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
-#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
-#define BGMAC_CMDCFG_AE 0x00400000
-#define BGMAC_CMDCFG_CFE 0x00800000
-#define BGMAC_CMDCFG_NLC 0x01000000
-#define BGMAC_CMDCFG_RL 0x02000000
-#define BGMAC_CMDCFG_RED 0x04000000
-#define BGMAC_CMDCFG_PE 0x08000000
-#define BGMAC_CMDCFG_TPI 0x10000000
-#define BGMAC_CMDCFG_AT 0x20000000
-#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
-#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
-#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
-#define BGMAC_PAUSEQUANTA 0x818
-#define BGMAC_MAC_MODE 0x844
-#define BGMAC_OUTERTAG 0x848
-#define BGMAC_INNERTAG 0x84c
-#define BGMAC_TXIPG 0x85c
-#define BGMAC_PAUSE_CTL 0xb30
-#define BGMAC_TX_FLUSH 0xb34
-#define BGMAC_RX_STATUS 0xb38
-#define BGMAC_TX_STATUS 0xb3c
+#define BGMAC_UNIMAC 0x800
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
@@ -556,6 +518,16 @@ static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
bgmac->write(bgmac, offset, value);
}
+static inline u32 bgmac_umac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac_read(bgmac, BGMAC_UNIMAC + offset);
+}
+
+static inline void bgmac_umac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac_write(bgmac, BGMAC_UNIMAC + offset, value);
+}
+
static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
{
return bgmac->idm_read(bgmac, offset);
@@ -609,6 +581,11 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
+static inline void bgmac_umac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, u32 set)
+{
+ bgmac_maskset(bgmac, BGMAC_UNIMAC + offset, mask, set);
+}
+
static inline int bgmac_phy_connect(struct bgmac *bgmac)
{
return bgmac->phy_connect(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28069b290862..b652ed72a621 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13071,8 +13071,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..a680fd9c68ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -255,7 +255,9 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -1265,8 +1267,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
- if (netif_msg_rx_err(bp))
- netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
@@ -2021,10 +2022,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
- if (netif_msg_hw(bp))
- netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
- data1, data2);
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ char *fatal_str = "non-fatal";
+
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2036,14 +2036,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ fatal_str = "fatal";
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- } else {
- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
- bp->fw_reset_max_dsecs * 100);
}
+ netif_warn(bp, hw, bp->dev,
+ "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ fatal_str, data1, data2,
+ bp->fw_reset_min_dsecs * 100,
+ bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -2052,16 +2055,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled)
+ if (!fw_health->enabled) {
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[0]\n");
break;
-
- if (netif_msg_drv(bp))
- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
- fw_health->enabled, fw_health->master,
- bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_CNT_REG),
- bnxt_fw_health_readl(bp,
- BNXT_FW_HEALTH_REG));
+ }
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2070,8 +2068,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
+ fw_health->master, fw_health->last_fw_reset_cnt,
+ bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ goto async_event_process_exit;
case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
@@ -2094,6 +2101,20 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_sched_reset(bp, rxr);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ if (fw_health) {
+ fw_health->echo_req_data1 = data1;
+ fw_health->echo_req_data2 = data2;
+ set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2394,6 +2415,10 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
while (1) {
work_done += bnxt_poll_work(bp, cpr, budget - work_done);
@@ -2468,6 +2493,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
int work_done = 0;
u32 cons;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
@@ -2675,6 +2704,23 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
+static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+{
+ u8 init_val = mem_init->init_val;
+ u16 offset = mem_init->offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNXT_MEM_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += mem_init->size)
+ *(p2 + i + offset) = init_val;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -2734,9 +2780,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->init_val)
- memset(rmem->pg_arr[i], rmem->init_val,
- rmem->page_size);
+ if (rmem->mem_init)
+ bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -4272,6 +4318,9 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
+ if (!bp->irq_tbl)
+ return;
+
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
@@ -4425,6 +4474,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* Limit timeout to an upper limit */
+ timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -6732,6 +6783,39 @@ func_qcfg_exit:
return rc;
}
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
+ struct hwrm_func_backing_store_qcaps_output *resp)
+{
+ struct bnxt_mem_init *mem_init;
+ u16 init_mask;
+ u8 init_val;
+ u8 *offset;
+ int i;
+
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+ offset = &resp->qp_init_offset;
+ mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
+ mem_init->init_val = init_val;
+ mem_init->offset = BNXT_MEM_INVALID_OFFSET;
+ if (!init_mask)
+ continue;
+ if (i == BNXT_CTX_MEM_INIT_STAT)
+ offset = &resp->stat_init_offset;
+ if (init_mask & (1 << i))
+ mem_init->offset = *offset * 4;
+ else
+ mem_init->init_val = 0;
+ }
+ ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+}
+
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -6786,12 +6870,16 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
- ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+
+ bnxt_init_ctx_initializer(ctx, resp);
+
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6814,6 +6902,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
{
u8 pg_size = 0;
+ if (!rmem->nr_pages)
+ return;
+
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
@@ -6843,6 +6934,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ u32 req_len = sizeof(req);
__le32 *num_entries;
__le64 *pg_dir;
u32 flags = 0;
@@ -6853,6 +6945,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (!ctx)
return 0;
+ if (req_len > bp->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
req.enables = cpu_to_le32(enables);
@@ -6925,7 +7019,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -6935,7 +7030,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -6954,7 +7049,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, bool use_init_val)
+ u8 depth, struct bnxt_mem_init *mem_init)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -6992,8 +7087,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7008,8 +7102,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7073,6 +7166,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_mem_init *init;
u32 mem_size, ena, entries;
u32 entries_sp, min;
u32 num_mr, num_ah;
@@ -7100,39 +7194,54 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->qp_entry_size) {
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->srq_entry_size) {
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->cq_entry_size) {
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->vnic_entry_size) {
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->stat_entry_size) {
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@@ -7145,10 +7254,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
- if (rc)
- return rc;
+ if (ctx->mrav_entry_size) {
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
+ if (rc)
+ return rc;
+ }
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
@@ -7157,10 +7269,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tim_entry_size) {
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
@@ -7174,10 +7288,13 @@ skip_rdma:
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp;
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tqm_entry_size) {
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
+ NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
@@ -7435,9 +7552,22 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
- return;
+ if (!bp->chip_num) {
+ __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
+ bp->chip_num = readl(bp->bar0 +
+ BNXT_FW_HEALTH_WIN_BASE +
+ BNXT_GRC_REG_CHIP_NUM);
+ }
+ if (!BNXT_CHIP_P5(bp)) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+ status_loc = BNXT_GRC_REG_STATUS_P5 |
+ BNXT_FW_HEALTH_REG_TYPE_BAR0;
+ } else {
+ status_loc = readl(hs + offsetof(struct hcomm_status,
+ fw_status_loc));
}
if (__bnxt_alloc_fw_health(bp)) {
@@ -7445,7 +7575,6 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
return;
}
- status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
@@ -8600,7 +8729,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -8808,7 +8937,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
{
int i;
- if (!bp->bnapi)
+ if (!bp->bnapi ||
+ test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8825,6 +8955,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
{
int i;
+ clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
@@ -8853,9 +8984,10 @@ void bnxt_tx_disable(struct bnxt *bp)
txr->dev_state = BNXT_DEV_STATE_CLOSING;
}
}
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bp->dev);
/* Stop all TX queues */
netif_tx_disable(bp->dev);
- netif_carrier_off(bp->dev);
}
void bnxt_tx_enable(struct bnxt *bp)
@@ -9331,13 +9463,60 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
static int bnxt_fw_init_one(struct bnxt *bp);
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
+{
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
+
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
+
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
+}
+
+static int bnxt_try_recover_fw(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ int retry = 0, rc;
+ u32 sts;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ do {
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ break;
+ retry++;
+ } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (!BNXT_FW_IS_HEALTHY(sts)) {
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ rc = -ENODEV;
+ }
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ return bnxt_fw_reset_via_optee(bp);
+ }
+ return rc;
+ }
+
+ return -ENODEV;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
bool resc_reinit = false, fw_reset = false;
+ int rc, retry = 0;
u32 flags = 0;
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -9346,10 +9525,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ while (retry < BNXT_FW_IF_RETRY) {
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc != -EAGAIN)
+ break;
+
+ msleep(50);
+ retry++;
+ }
if (!rc)
flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc == -EAGAIN)
+ return rc;
+ if (rc && up) {
+ rc = bnxt_try_recover_fw(bp);
+ fw_reset = true;
+ }
if (rc)
return rc;
@@ -9689,6 +9883,25 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+static int bnxt_reinit_after_abort(struct bnxt *bp)
+{
+ int rc;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EBUSY;
+
+ rc = bnxt_fw_init_one(bp);
+ if (!rc) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (!rc) {
+ clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ }
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -9847,8 +10060,14 @@ static int bnxt_open(struct net_device *dev)
int rc;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
- netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
- return -ENODEV;
+ rc = bnxt_reinit_after_abort(bp);
+ if (rc) {
+ if (rc == -EBUSY)
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
+ else
+ netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
+ return -ENODEV;
+ }
}
rc = bnxt_hwrm_if_change(bp, true);
@@ -10785,11 +11004,23 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- /* When firmware is fatal state, disable PCI device to prevent
- * any potential bad DMAs before freeing kernel memory.
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ u16 val = 0;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff)
+ bp->fw_reset_min_dsecs = 0;
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
+ }
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -10994,6 +11225,17 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+static void bnxt_fw_echo_reply(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_func_echo_response_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
+ req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+ req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -11061,6 +11303,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
+ if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
+ bnxt_fw_echo_reply(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -11177,21 +11422,6 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static int bnxt_fw_reset_via_optee(struct bnxt *bp)
-{
-#ifdef CONFIG_TEE_BNXT_FW
- int rc = tee_bnxt_fw_load();
-
- if (rc)
- netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
-
- return rc;
-#else
- netdev_err(bp->dev, "OP-TEE not supported\n");
- return -ENODEV;
-#endif
-}
-
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -11200,19 +11430,10 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
rc = bnxt_hwrm_ver_get(bp);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
-
- netdev_err(bp->dev,
- "Firmware not responding, status: 0x%x\n",
- sts);
- if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
- netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
- rc = bnxt_fw_reset_via_optee(bp);
- if (!rc)
- rc = bnxt_hwrm_ver_get(bp);
- }
- }
+ rc = bnxt_try_recover_fw(bp);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
}
@@ -11412,6 +11633,12 @@ static void bnxt_reset_all(struct bnxt *bp)
bp->fw_reset_timestamp = jiffies;
}
+static bool bnxt_fw_reset_timeout(struct bnxt *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
@@ -11433,8 +11660,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_timestamp));
goto fw_reset_abort;
} else if (n > 0) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
@@ -11463,8 +11689,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
- !time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ !bnxt_fw_reset_timeout(bp)) {
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
@@ -11488,6 +11713,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u32 val;
+ if (!bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
+ &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
+ }
+ }
val = bnxt_fw_health_readl(bp,
BNXT_FW_RESET_INPROG_REG);
if (val)
@@ -11506,8 +11745,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
if (rc) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted\n");
goto fw_reset_abort_status;
}
@@ -12088,8 +12326,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -12541,9 +12777,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- if (BNXT_PF(bp))
- bnxt_vpd_read_info(bp);
-
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
@@ -12555,6 +12788,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
+
if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
if (BNXT_CHIP_SR2(bp))
@@ -12887,10 +13123,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +13155,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +13174,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..1259e68cba2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -656,6 +656,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_MAX_TIMEOUT 40000
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -713,6 +714,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
+struct bnxt_mem_init {
+ u8 init_val;
+ u16 offset;
+#define BNXT_MEM_INVALID_OFFSET 0xffff
+ u16 size;
+};
+
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -722,7 +730,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- u8 init_val;
+ struct bnxt_mem_init *mem_init;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -1345,9 +1353,14 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1436,6 +1449,13 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1461,7 +1481,6 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
u8 tqm_fp_rings_count;
u32 flags;
@@ -1474,7 +1493,16 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
+
+#define BNXT_CTX_MEM_INIT_QP 0
+#define BNXT_CTX_MEM_INIT_SRQ 1
+#define BNXT_CTX_MEM_INIT_CQ 2
+#define BNXT_CTX_MEM_INIT_VNIC 3
+#define BNXT_CTX_MEM_INIT_STAT 4
+#define BNXT_CTX_MEM_INIT_MRAV 5
+#define BNXT_CTX_MEM_INIT_MAX 6
+ struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
struct bnxt_fw_health {
@@ -1503,6 +1531,8 @@ struct bnxt_fw_health {
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
+ u32 echo_req_data1;
+ u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
@@ -1527,9 +1557,22 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
((reg) & BNXT_GRC_OFFSET_MASK))
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1783,6 +1826,7 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1898,6 +1942,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
int fw_reset_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b7b69ed62db..64381be935a8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -44,21 +44,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val, health_status;
+ u32 val;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- health_status = val & 0xffff;
- if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ if (BNXT_FW_IS_BOOTING(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Not yet completed initialization");
if (rc)
return rc;
- } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ } else if (BNXT_FW_IS_ERR(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Encountered fatal error and cannot recover");
if (rc)
@@ -472,8 +471,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
u32 ver = nvm_cfg_ver.vu32;
- sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
- ver & 0xF);
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
+ ver & 0xf);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
buf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ff79d5d14c4..2f8b193a772d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |=
+ install.flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
+ install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2d3e962bdac3..6199f125bc13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2020 Broadcom Inc.
+ * Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -103,6 +103,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
#define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
@@ -164,6 +165,7 @@ struct cmd_nums {
#define HWRM_VNIC_PLCMODES_CFG 0x48UL
#define HWRM_VNIC_PLCMODES_QCFG 0x49UL
#define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
#define HWRM_RING_ALLOC 0x50UL
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
@@ -184,6 +186,9 @@ struct cmd_nums {
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
#define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
#define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -217,6 +222,8 @@ struct cmd_nums {
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -347,6 +354,8 @@ struct cmd_nums {
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -359,6 +368,11 @@ struct cmd_nums {
#define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
+ #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
+ #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -384,6 +398,7 @@ struct cmd_nums {
#define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
#define HWRM_TF_TCAM_SET 0x2f8UL
#define HWRM_TF_TCAM_GET 0x2f9UL
#define HWRM_TF_TCAM_MOVE 0x2faUL
@@ -486,9 +501,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.1.68"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 16
+#define HWRM_VERSION_STR "1.10.2.16"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -563,8 +578,9 @@ struct hwrm_ver_get_output {
__le16 max_resp_len;
__le16 def_req_timeout;
u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
u8 unused_0[2];
u8 always_1;
__le16 hwrm_intf_major;
@@ -708,6 +724,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -815,6 +833,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
@@ -832,7 +852,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1033,6 +1054,26 @@ struct hwrm_async_event_cmpl_deferred_response {
__le32 event_data1;
};
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1271,6 +1312,14 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1315,6 +1364,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1450,6 +1501,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1731,6 +1784,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1993,7 +2047,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
+/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2024,13 +2078,49 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
- __le32 rsvd;
- __le16 rsvd1;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
u8 tqm_fp_rings_count;
- u8 valid;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 rsvd[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
};
-/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2041,22 +2131,25 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2358,6 +2451,63 @@ struct hwrm_func_backing_store_cfg_input {
__le16 tqm_entry_size;
__le16 mrav_entry_size;
__le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2463,6 +2613,27 @@ struct hwrm_error_recovery_qcfg_output {
u8 valid;
};
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -2930,6 +3101,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -3528,8 +3700,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4119,7 +4291,10 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 unused_0;
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
char qid0_name[16];
char qid1_name[16];
char qid2_name[16];
@@ -4128,7 +4303,34 @@ struct hwrm_queue_qportcfg_output {
char qid5_name[16];
char qid6_name[16];
char qid7_name[16];
- u8 unused_1[7];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
u8 valid;
};
@@ -5142,8 +5344,10 @@ struct hwrm_vnic_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- u8 unused_0[4];
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ __le16 virtio_net_fid;
+ u8 unused_0[2];
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -5260,6 +5464,9 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5585,11 +5792,15 @@ struct hwrm_ring_alloc_output {
__le16 resp_len;
__le16 ring_id;
__le16 logical_ring_id;
- u8 unused_0[3];
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
u8 valid;
};
-/* hwrm_ring_free_input (size:192b/24B) */
+/* hwrm_ring_free_input (size:256b/32B) */
struct hwrm_ring_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5604,9 +5815,13 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
#define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
__le16 ring_id;
- u8 unused_1[4];
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
};
/* hwrm_ring_free_output (size:128b/16B) */
@@ -5644,7 +5859,11 @@ struct hwrm_ring_reset_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[4];
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
u8 consumer_idx[3];
u8 valid;
};
@@ -6988,21 +7207,23 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
u8 unused_0[3];
u8 valid;
};
@@ -7420,7 +7641,13 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 unused_0[6];
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
u8 valid;
};
@@ -7472,7 +7699,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
__le16 len;
u8 version;
u8 count;
@@ -8000,6 +8228,9 @@ struct hwrm_dbg_coredump_initiate_output {
struct coredump_data_hdr {
__le32 address;
__le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
__le32 instance;
__le32 next_offset;
};
@@ -8669,7 +8900,6 @@ struct hcomm_status {
#define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
#define HCOMM_STATUS_TRUE_OFFSET_SFT 2
};
-
#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8c8368c2f335..64dbbb04b043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fcc262064766..641303894341 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -133,12 +133,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
+ xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index f6ca01da141d..0a6d91b0f0aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,8 @@
#include <linux/dim.h>
#include <linux/ethtool.h>
+#include "../unimac.h"
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -150,63 +152,6 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
-#define UMAC_HD_BKP_CTRL 0x004
-#define HD_FC_EN (1 << 0)
-#define HD_FC_BKOFF_OK (1 << 1)
-#define IPG_CONFIG_RX_SHIFT 2
-#define IPG_CONFIG_RX_MASK 0x1F
-
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define UMAC_SPEED_10 0
-#define UMAC_SPEED_100 1
-#define UMAC_SPEED_1000 2
-#define UMAC_SPEED_2500 3
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00C
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_MODE 0x44
-#define MODE_LINK_STATUS (1 << 5)
-
-#define UMAC_EEE_CTRL 0x064
-#define EN_LPI_RX_PAUSE (1 << 0)
-#define EN_LPI_TX_PFC (1 << 1)
-#define EN_LPI_TX_PAUSE (1 << 2)
-#define EEE_EN (1 << 3)
-#define RX_FIFO_CHECK (1 << 4)
-#define EEE_TX_CLK_DIS (1 << 5)
-#define DIS_EEE_10M (1 << 6)
-#define LP_IDLE_PREDICTION_MODE (1 << 7)
-
-#define UMAC_EEE_LPI_TIMER 0x068
-#define UMAC_EEE_WAKE_TIMER 0x06C
-#define UMAC_EEE_REF_COUNT 0x070
-#define EEE_REFERENCE_COUNT_MASK 0xffff
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..5335244e4577 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -63,11 +63,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
/* speed */
if (phydev->speed == SPEED_1000)
- cmd_bits = UMAC_SPEED_1000;
+ cmd_bits = CMD_SPEED_1000;
else if (phydev->speed == SPEED_100)
- cmd_bits = UMAC_SPEED_100;
+ cmd_bits = CMD_SPEED_100;
else
- cmd_bits = UMAC_SPEED_10;
+ cmd_bits = CMD_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
@@ -359,7 +359,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* those versions of GENET.
*/
if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5143cdd0eeca..d2381929931b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1580,12 +1580,6 @@ static int tg3_mdio_init(struct tg3 *tp)
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tg3_flag(tp, RGMII_INBAND_DISABLE))
- phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fallthrough;
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
@@ -12826,11 +12820,13 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = tg3_nvram_logical_addr(tp, offset);
}
- }
- if (!offset || !len) {
- offset = TG3_NVM_VPD_OFF;
- len = TG3_NVM_VPD_LEN;
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+ } else {
+ len = TG3_NVM_PCI_VPD_MAX_LEN;
}
buf = kmalloc(len, GFP_KERNEL);
@@ -12846,26 +12842,16 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
goto error;
}
+ *vpdlen = len;
} else {
- u8 *ptr;
ssize_t cnt;
- unsigned int pos = 0;
-
- ptr = (u8 *)&buf[0];
- for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
- cnt = pci_read_vpd(tp->pdev, pos,
- len - pos, ptr);
- if (cnt == -ETIMEDOUT || cnt == -EINTR)
- cnt = 0;
- else if (cnt < 0)
- goto error;
- }
- if (pos != len)
+
+ cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
+ if (cnt < 0)
goto error;
+ *vpdlen = cnt;
}
- *vpdlen = len;
-
return buf;
error:
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..46ec4fdfd16a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,6 +2101,7 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
+#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/broadcom/unimac.h b/drivers/net/ethernet/broadcom/unimac.h
new file mode 100644
index 000000000000..585a85286257
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/unimac.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __UNIMAC_H
+#define __UNIMAC_H
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET_OLD (1 << 11)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_RX_ERR_DISC (1 << 26)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_PAUSE_QUANTA 0x018
+#define UMAC_MODE 0x044
+#define MODE_LINK_STATUS (1 << 5)
+#define UMAC_FRM_TAG0 0x048 /* outer tag */
+#define UMAC_FRM_TAG1 0x04c /* inner tag */
+#define UMAC_TX_IPG_LEN 0x05c
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+#define UMAC_RX_IPG_INV 0x078
+#define UMAC_MACSEC_PROG_TX_CRC 0x310
+#define UMAC_MACSEC_CTRL 0x314
+#define UMAC_PAUSE_CTRL 0x330
+#define UMAC_TX_FLUSH 0x334
+#define UMAC_RX_FIFO_STATUS 0x338
+#define UMAC_TX_FIFO_STATUS 0x33c
+
+#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..472bf8f220bc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
+ return;
+
+ /* In case of MII the PHY is the clock master */
+ if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
switch (speed) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 37d064193f0f..2a0d64e5797c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1163,7 +1163,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7d00d3a8ded4..7c5af4beedc6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3219,8 +3219,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_set_vf_mac = liquidio_set_vf_mac,
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 103440f97bc8..516f166ceff8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1879,8 +1879,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 387a57cbfb73..e159194d0aef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -545,7 +545,7 @@ static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
static u32 octeon_device_count;
/* locks device array (i.e. octeon_device[]) */
-static spinlock_t octeon_devices_lock;
+static DEFINE_SPINLOCK(octeon_devices_lock);
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -563,7 +563,6 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
- spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 5e50bb19bf26..ecffebd513be 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1556,18 +1556,7 @@ static struct platform_driver octeon_mgmt_driver = {
.remove = octeon_mgmt_remove,
};
-static int __init octeon_mgmt_mod_init(void)
-{
- return platform_driver_register(&octeon_mgmt_driver);
-}
-
-static void __exit octeon_mgmt_mod_exit(void)
-{
- platform_driver_unregister(&octeon_mgmt_driver);
-}
-
-module_init(octeon_mgmt_mod_init);
-module_exit(octeon_mgmt_mod_exit);
+module_platform_driver(octeon_mgmt_driver);
MODULE_SOFTDEP("pre: mdio-cavium");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f3b7b443f964..c33b4e837515 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -530,6 +530,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb)
{
+ unsigned char *hard_start, *data;
struct xdp_buff xdp;
struct page *page;
u32 action;
@@ -547,12 +548,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
- xdp.data = (void *)cpu_addr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ &rq->xdp_rxq);
+ hard_start = page_address(page);
+ data = (unsigned char *)cpu_addr;
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 6475060649e9..0321be77366c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -238,7 +238,6 @@ struct adapter {
int msg_enable;
u32 mmio_len;
- struct work_struct ext_intr_handler_task;
struct adapter_params params;
/* Terminator modules. */
@@ -257,6 +256,7 @@ struct adapter {
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
+ u32 pending_thread_intr;
u32 slow_intr_mask;
int t1powersave;
};
@@ -334,8 +334,7 @@ void t1_interrupts_enable(adapter_t *adapter);
void t1_interrupts_disable(adapter_t *adapter);
void t1_interrupts_clear(adapter_t *adapter);
int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-void t1_elmer0_ext_intr(adapter_t *adapter);
-int t1_slow_intr_handler(adapter_t *adapter);
+irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
@@ -347,7 +346,6 @@ int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
int t1_init_hw_modules(adapter_t *adapter);
int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
void t1_free_sw_modules(adapter_t *adapter);
-void t1_fatal_err(adapter_t *adapter);
void t1_link_changed(adapter_t *adapter, int port_id);
void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0e4a0f413960..512da98019c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -211,9 +211,10 @@ static int cxgb_up(struct adapter *adapter)
t1_interrupts_clear(adapter);
adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
- err = request_irq(adapter->pdev->irq, t1_interrupt,
- adapter->params.has_msi ? 0 : IRQF_SHARED,
- adapter->name, adapter);
+ err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
+ t1_interrupt_thread,
+ adapter->params.has_msi ? 0 : IRQF_SHARED,
+ adapter->name, adapter);
if (err) {
if (adapter->params.has_msi)
pci_disable_msi(adapter->pdev);
@@ -916,51 +917,6 @@ static void mac_stats_task(struct work_struct *work)
spin_unlock(&adapter->work_lock);
}
-/*
- * Processes elmer0 external interrupts in process context.
- */
-static void ext_intr_task(struct work_struct *work)
-{
- struct adapter *adapter =
- container_of(work, struct adapter, ext_intr_handler_task);
-
- t1_elmer0_ext_intr_handler(adapter);
-
- /* Now reenable external interrupts */
- spin_lock_irq(&adapter->async_lock);
- adapter->slow_intr_mask |= F_PL_INTR_EXT;
- writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
- writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
- spin_unlock_irq(&adapter->async_lock);
-}
-
-/*
- * Interrupt-context handler for elmer0 external interrupts.
- */
-void t1_elmer0_ext_intr(struct adapter *adapter)
-{
- /*
- * Schedule a task to handle external interrupts as we require
- * a process context. We disable EXT interrupts in the interim
- * and let the task reenable them when it's done.
- */
- adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
- writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
- schedule_work(&adapter->ext_intr_handler_task);
-}
-
-void t1_fatal_err(struct adapter *adapter)
-{
- if (adapter->flags & FULL_INIT_DONE) {
- t1_sge_stop(adapter->sge);
- t1_interrupts_disable(adapter);
- }
- pr_alert("%s: encountered fatal error, operation suspended\n",
- adapter->name);
-}
-
static const struct net_device_ops cxgb_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -1062,8 +1018,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->async_lock);
spin_lock_init(&adapter->mac_lock);
- INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task);
INIT_DELAYED_WORK(&adapter->stats_update_task,
mac_stats_task);
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 2d9c2b5a690a..cda01f22c71c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -940,10 +940,11 @@ void t1_sge_intr_clear(struct sge *sge)
/*
* SGE 'Error' interrupt handler
*/
-int t1_sge_intr_error_handler(struct sge *sge)
+bool t1_sge_intr_error_handler(struct sge *sge)
{
struct adapter *adapter = sge->adapter;
u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+ bool wake = false;
if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
cause &= ~F_PACKET_TOO_BIG;
@@ -967,11 +968,14 @@ int t1_sge_intr_error_handler(struct sge *sge)
sge->stats.pkt_mismatch++;
pr_alert("%s: SGE packet mismatch\n", adapter->name);
}
- if (cause & SGE_INT_FATAL)
- t1_fatal_err(adapter);
+ if (cause & SGE_INT_FATAL) {
+ t1_interrupts_disable(adapter);
+ adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
+ wake = true;
+ }
writel(cause, adapter->regs + A_SG_INT_CAUSE);
- return 0;
+ return wake;
}
const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
@@ -1619,11 +1623,46 @@ int t1_poll(struct napi_struct *napi, int budget)
return work_done;
}
+irqreturn_t t1_interrupt_thread(int irq, void *data)
+{
+ struct adapter *adapter = data;
+ u32 pending_thread_intr;
+
+ spin_lock_irq(&adapter->async_lock);
+ pending_thread_intr = adapter->pending_thread_intr;
+ adapter->pending_thread_intr = 0;
+ spin_unlock_irq(&adapter->async_lock);
+
+ if (!pending_thread_intr)
+ return IRQ_NONE;
+
+ if (pending_thread_intr & F_PL_INTR_EXT)
+ t1_elmer0_ext_intr_handler(adapter);
+
+ /* This error is fatal, interrupts remain off */
+ if (pending_thread_intr & F_PL_INTR_SGE_ERR) {
+ pr_alert("%s: encountered fatal error, operation suspended\n",
+ adapter->name);
+ t1_sge_stop(adapter->sge);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irq(&adapter->async_lock);
+ adapter->slow_intr_mask |= F_PL_INTR_EXT;
+
+ writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ spin_unlock_irq(&adapter->async_lock);
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t t1_interrupt(int irq, void *data)
{
struct adapter *adapter = data;
struct sge *sge = adapter->sge;
- int handled;
+ irqreturn_t handled;
if (likely(responses_pending(adapter))) {
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
@@ -1645,10 +1684,10 @@ irqreturn_t t1_interrupt(int irq, void *data)
handled = t1_slow_intr_handler(adapter);
spin_unlock(&adapter->async_lock);
- if (!handled)
+ if (handled == IRQ_NONE)
sge->stats.unhandled_irqs++;
- return IRQ_RETVAL(handled != 0);
+ return handled;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index a1ba591b3431..716705b96f26 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *);
+irqreturn_t t1_interrupt_thread(int irq, void *data);
irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
@@ -81,7 +82,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
-int t1_sge_intr_error_handler(struct sge *);
+bool t1_sge_intr_error_handler(struct sge *sge);
void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index ea0f8741d7cf..310add28fcf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -170,7 +170,7 @@ void t1_link_changed(adapter_t *adapter, int port_id)
t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
}
-static int t1_pci_intr_handler(adapter_t *adapter)
+static bool t1_pci_intr_handler(adapter_t *adapter)
{
u32 pcix_cause;
@@ -179,9 +179,13 @@ static int t1_pci_intr_handler(adapter_t *adapter)
if (pcix_cause) {
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
pcix_cause);
- t1_fatal_err(adapter); /* PCI errors are fatal */
+ /* PCI errors are fatal */
+ t1_interrupts_disable(adapter);
+ adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
+ pr_alert("%s: PCI error encountered.\n", adapter->name);
+ return true;
}
- return 0;
+ return false;
}
#ifdef CONFIG_CHELSIO_T1_1G
@@ -210,13 +214,16 @@ static int fpga_phy_intr_handler(adapter_t *adapter)
/*
* Slow path interrupt handler for FPGAs.
*/
-static int fpga_slow_intr(adapter_t *adapter)
+static irqreturn_t fpga_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
+ irqreturn_t ret = IRQ_NONE;
cause &= ~F_PL_INTR_SGE_DATA;
- if (cause & F_PL_INTR_SGE_ERR)
- t1_sge_intr_error_handler(adapter->sge);
+ if (cause & F_PL_INTR_SGE_ERR) {
+ if (t1_sge_intr_error_handler(adapter->sge))
+ ret = IRQ_WAKE_THREAD;
+ }
if (cause & FPGA_PCIX_INTERRUPT_GMAC)
fpga_phy_intr_handler(adapter);
@@ -231,14 +238,19 @@ static int fpga_slow_intr(adapter_t *adapter)
/* Clear TP interrupt */
writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
}
- if (cause & FPGA_PCIX_INTERRUPT_PCIX)
- t1_pci_intr_handler(adapter);
+ if (cause & FPGA_PCIX_INTERRUPT_PCIX) {
+ if (t1_pci_intr_handler(adapter))
+ ret = IRQ_WAKE_THREAD;
+ }
/* Clear the interrupts just processed. */
if (cause)
writel(cause, adapter->regs + A_PL_CAUSE);
- return cause != 0;
+ if (ret != IRQ_NONE)
+ return ret;
+
+ return cause == 0 ? IRQ_NONE : IRQ_HANDLED;
}
#endif
@@ -842,31 +854,45 @@ void t1_interrupts_clear(adapter_t* adapter)
/*
* Slow path interrupt handler for ASICs.
*/
-static int asic_slow_intr(adapter_t *adapter)
+static irqreturn_t asic_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
+ irqreturn_t ret = IRQ_HANDLED;
cause &= adapter->slow_intr_mask;
if (!cause)
- return 0;
- if (cause & F_PL_INTR_SGE_ERR)
- t1_sge_intr_error_handler(adapter->sge);
+ return IRQ_NONE;
+ if (cause & F_PL_INTR_SGE_ERR) {
+ if (t1_sge_intr_error_handler(adapter->sge))
+ ret = IRQ_WAKE_THREAD;
+ }
if (cause & F_PL_INTR_TP)
t1_tp_intr_handler(adapter->tp);
if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi);
- if (cause & F_PL_INTR_PCIX)
- t1_pci_intr_handler(adapter);
- if (cause & F_PL_INTR_EXT)
- t1_elmer0_ext_intr(adapter);
+ if (cause & F_PL_INTR_PCIX) {
+ if (t1_pci_intr_handler(adapter))
+ ret = IRQ_WAKE_THREAD;
+ }
+ if (cause & F_PL_INTR_EXT) {
+ /* Wake the threaded interrupt to handle external interrupts as
+ * we require a process context. We disable EXT interrupts in
+ * the interim and let the thread reenable them when it's done.
+ */
+ adapter->pending_thread_intr |= F_PL_INTR_EXT;
+ adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ ret = IRQ_WAKE_THREAD;
+ }
/* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE);
readl(adapter->regs + A_PL_CAUSE); /* flush writes */
- return 1;
+ return ret;
}
-int t1_slow_intr_handler(adapter_t *adapter)
+irqreturn_t t1_slow_intr_handler(adapter_t *adapter)
{
#ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(adapter))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 876f90e5795e..d5218e74284c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -220,9 +220,6 @@ struct cudbg_mps_tcam {
u8 reserved[2];
};
-#define CUDBG_VPD_PF_SIZE 0x800
-#define CUDBG_SCFG_VER_ADDR 0x06
-#define CUDBG_SCFG_VER_LEN 4
#define CUDBG_VPD_VER_ADDR 0x18c7
#define CUDBG_VPD_VER_LEN 2
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 75474f810249..6c85a10f465c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -2686,10 +2686,10 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
char vpd_str[CUDBG_VPD_VER_LEN + 1];
- u32 scfg_vers, vpd_vers, fw_vers;
struct cudbg_vpd_data *vpd_data;
struct vpd_params vpd = { 0 };
- int rc, ret;
+ u32 vpd_vers, fw_vers;
+ int rc;
rc = t4_get_raw_vpd_params(padap, &vpd);
if (rc)
@@ -2699,24 +2699,6 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- /* Serial Configuration Version is located beyond the PF's vpd size.
- * Temporarily give access to entire EEPROM to get it.
- */
- rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
- if (rc < 0)
- return rc;
-
- ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
- &scfg_vers);
-
- /* Restore back to original PF's vpd size */
- rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
- if (rc < 0)
- return rc;
-
- if (ret)
- return ret;
-
rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
vpd_str);
if (rc)
@@ -2737,7 +2719,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
- vpd_data->scfg_vers = scfg_vers;
+ vpd_data->scfg_vers = t4_read_reg(padap, PCIE_STATIC_SPARE2_A);
vpd_data->vpd_vers = vpd_vers;
vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 8e681ce72d62..314f8d806723 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -414,7 +414,6 @@ struct pf_resources {
};
struct pci_params {
- unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7fd264a6d085..6264bc66a4fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3201,8 +3201,6 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
int err;
u8 *na;
- adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
- PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
if (err)
return;
@@ -3882,8 +3880,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
@@ -5139,7 +5135,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* See if FW supports FW_FILTER2 work request */
if (is_t4(adap->params.chip)) {
- adap->params.filter2_wr_support = 0;
+ adap->params.filter2_wr_support = false;
} else {
params[0] = FW_PARAM_DEV(FILTER2_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b49f2fa9b18..34546f5312ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -46,6 +46,9 @@
#define MAX_ULD_QSETS 16
#define MAX_ULD_NPORTS 4
+/* ulp_mem_io + ulptx_idata + payload + padding */
+#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 196652a114c5..256fae15e032 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct adapter *adapter;
int qidx, credits, ret;
size_t fw_hdr_copy_len;
+ unsigned int chip_ver;
u64 cntrl, *end;
u32 wr_mid;
@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
goto out_free;
}
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/* After we're done injecting the Work Request for this
@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
cpl = (void *)(lso + 1);
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ if (chip_ver <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
@@ -2842,17 +2846,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
- * data. We currently use the same limit as for Ethernet packets.
+ * data.
+ * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
+ * However, FW_ULPTX_WR commands have a 256 byte immediate only
+ * payload limit.
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
- if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ if (unlikely(opcode == FW_ULPTX_WR))
+ return skb->len <= MAX_IMM_ULPTX_WR_LEN;
+ else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
return skb->len <= SGE_MAX_WR_LEN;
else
- return skb->len <= MAX_IMM_TX_PKT_LEN;
+ return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/**
@@ -3598,6 +3607,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. These Egress Update messages will be our sole CIDX Updates
+ * we get since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}
@@ -4583,11 +4611,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
+ FW_EQ_ETH_CMD_AUTOEQUIQE_F :
+ FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
+ HOSTFCMODE_INGRESS_QUEUE_X :
+ HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
@@ -4598,6 +4630,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
: FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 98d01a7497ec..98829e482bfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2689,7 +2689,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
-#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_eeprom_ptov - translate a physical EEPROM address to virtual
@@ -2745,7 +2744,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret = 0, addr;
int ec, sn, pn, na;
- u8 *vpd, csum;
+ u8 *vpd, csum, base_val = 0;
unsigned int vpdr_len, kw_offset, id_len;
vpd = vmalloc(VPD_LEN);
@@ -2755,17 +2754,11 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
if (ret < 0)
goto out;
- /* The VPD shall have a unique identifier specified by the PCI SIG.
- * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
- * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
- * is expected to automatically put this entry at the
- * beginning of the VPD.
- */
- addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 0c5373462ced..0b1b5f9c67d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index b11a172b5174..695916ba0405 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -884,6 +884,12 @@
#define TDUE_V(x) ((x) << TDUE_S)
#define TDUE_F TDUE_V(1U)
+/* SPARE2 register contains 32-bit value at offset 0x6 in Serial INIT
+ * Configuration flashed on EEPROM. This value corresponds to 32-bit
+ * Serial Configuration Version information.
+ */
+#define PCIE_STATIC_SPARE2_A 0x5bfc
+
/* registers for module MC */
#define MC_INT_CAUSE_A 0x7518
#define MC_P_INT_CAUSE_A 0x41318
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 92473dda55d9..22a0220123ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -40,6 +40,13 @@
#define TCB_L2T_IX_M 0xfffULL
#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
index bc06e83fd3c6..521955e1f894 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
@@ -42,6 +42,7 @@ config CHELSIO_TLS_DEVICE
depends on CHELSIO_T4
depends on TLS
depends on TLS_DEVICE
+ select CRYPTO_LIB_AES
help
This flag enables support for kernel tls offload over Chelsio T6
crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 47d9268a7e3c..585590520076 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -92,9 +92,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME,
- .nrxq = MAX_ULD_QSETS,
- /* Max ntxq will be derived from fw config file*/
- .rxq_size = 1024,
.add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change,
.tx_handler = ch_ipsec_xmit,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 1b7e8c91b541..46a809f2aeca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -9,6 +9,7 @@
#include <linux/ip.h>
#include <net/ipv6.h>
#include <linux/netdevice.h>
+#include <crypto/aes.h>
#include "chcr_ktls.h"
static LIST_HEAD(uld_ctx_list);
@@ -74,7 +75,7 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
struct ktls_key_ctx *kctx = &tx_info->key_ctx;
- struct crypto_cipher *cipher;
+ struct crypto_aes_ctx aes_ctx;
unsigned char *key, *salt;
switch (crypto_info->cipher_type) {
@@ -135,18 +136,14 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
*/
- cipher = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(cipher)) {
- ret = -ENOMEM;
- goto out;
- }
- ret = crypto_cipher_setkey(cipher, key, keylen);
+ ret = aes_expandkey(&aes_ctx, key, keylen);
if (ret)
- goto out1;
+ goto out;
memset(ghash_h, 0, ghash_size);
- crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ aes_encrypt(&aes_ctx, ghash_h, ghash_h);
+ memzero_explicit(&aes_ctx, sizeof(aes_ctx));
/* fill the Key context */
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
@@ -155,7 +152,7 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
key_ctx_size >> 4);
} else {
ret = -EINVAL;
- goto out1;
+ goto out;
}
memcpy(kctx->salt, salt, tx_info->salt_size);
@@ -163,8 +160,6 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
memcpy(kctx->key + keylen, ghash_h, ghash_size);
tx_info->key_ctx_len = key_ctx_size;
-out1:
- crypto_free_cipher(cipher);
out:
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 72bb123d53db..9e2378013642 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t);
int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index a0e0d8a83681..19dc7dc054a2 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -32,6 +32,7 @@
#include "chtls.h"
#include "chtls_cm.h"
#include "clip_tbl.h"
+#include "t4_tcb.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
if (sk->sk_state != TCP_SYN_RECV)
chtls_send_abort(sk, mode, skb);
else
- goto out;
+ chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+ TCB_FIELD_COOKIE_TFLAG, 1);
return;
out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,15 +1157,20 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
- goto free_sk;
+ if (!n || !n->dev)
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
- goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1399,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1415,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1599,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1936,6 +1951,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
else if (tcp_sk(sk)->linger2 < 0 &&
!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
chtls_abort_conn(sk, skb);
+ else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+ chtls_set_quiesce_ctrl(sk, 0);
break;
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2014,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2062,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2094,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2113,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2139,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2296,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(rpl);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+
+ /* return EINVAL if socket doesn't exist */
+ if (!sk)
+ return -EINVAL;
+
+ /* Reusing the skb as size of cpl_set_tcb_field structure
+ * is greater than cpl_abort_req
+ */
+ if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+ chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+ kfree_skb(skb);
+ return 0;
+}
+
chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2330,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
[CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
[CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
- [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_SET_TCB_RPL] = chtls_set_tcb_rpl,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index 47ba81e42f5d..b1161bdeda4d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -50,9 +50,6 @@
#define MIN_RCV_WND (24 * 1024U)
#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
-/* ulp_mem_io + ulptx_idata + payload + padding */
-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
-
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index a4fb463af22a..1e67140b0f80 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
return ret < 0 ? ret : 0;
}
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t)
+{
+ struct sk_buff *skb;
+ unsigned int wrlen;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return;
+
+ __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+ send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
/*
* Set one of the t_flags bits in the TCB.
*/
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
TF_RX_QUIESCE_V(val));
}
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+}
+
/* TLS Key bitmap processing */
int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
{
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index fb269d587b74..f04ec53544ae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2509,8 +2509,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2535,8 +2533,6 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d402d83d9edd..b6eba29d8e99 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5179,8 +5179,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = be_features_check,
.ndo_get_phys_port_id = be_get_phys_port_id,
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4360ce4d3fb6..720dc99bd1fc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
struct xdp_frame **init_xdpf)
{
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
- void *new_buff;
+ void *new_buff, *aligned_data;
struct page *p;
+ u32 data_shift;
+ int headroom;
/* Check the data alignment and make sure the headroom is large
* enough to store the xdpf backpointer. Use an aligned headroom
@@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
* byte frame headroom. If the XDP program uses all of it, copy the
* data to a new buffer and make room for storing the backpointer.
*/
- if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
xdpf->headroom >= priv->tx_headroom) {
xdpf->headroom = priv->tx_headroom;
return 0;
}
+ /* Try to move the data inside the buffer just enough to align it and
+ * store the xdpf backpointer. If the available headroom isn't large
+ * enough, resort to allocating a new buffer and copying the data.
+ */
+ aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+ data_shift = xdpf->data - aligned_data;
+
+ /* The XDP frame's headroom needs to be large enough to accommodate
+ * shifting the data as well as storing the xdpf backpointer.
+ */
+ if (xdpf->headroom >= data_shift + priv->tx_headroom) {
+ memmove(aligned_data, xdpf->data, xdpf->len);
+ xdpf->data = aligned_data;
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* The new xdp_frame is stored in the new buffer. Reserve enough space
+ * in the headroom for storing it along with the driver's private
+ * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+ * guarantee the data's alignment in the buffer.
+ */
+ headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+ DPAA_FD_DATA_ALIGNMENT);
+
+ /* Assure the extended headroom and data don't overflow the buffer,
+ * while maintaining the mandatory tailroom.
+ */
+ if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ return -ENOMEM;
+
p = dev_alloc_pages(0);
if (unlikely(!p))
return -ENOMEM;
/* Copy the data to the new buffer at a properly aligned offset */
new_buff = page_address(p);
- memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+ memcpy(new_buff + headroom, xdpf->data, xdpf->len);
/* Create an XDP frame around the new buffer in a similar fashion
* to xdp_convert_buff_to_frame.
*/
new_xdpf = new_buff;
- new_xdpf->data = new_buff + priv->tx_headroom;
+ new_xdpf->data = new_buff + headroom;
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
@@ -2532,12 +2566,10 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
return XDP_PASS;
}
- xdp.data = vaddr + fd_off;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + qm_fd_get_length(fd);
- xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
- xdp.rxq = &dpaa_fq->xdp_rxq;
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
/* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the
@@ -2638,7 +2670,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
u32 hash;
u64 ns;
- np = container_of(&portal, struct dpaa_napi_portal, p);
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2653,6 +2684,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
+ np = &percpu_priv->np;
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index fb0bcd18ec0c..492943bb9c48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
- int err;
+ int err, offset;
rcu_read_lock();
@@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog)
goto out;
- xdp.data = vaddr + dpaa2_fd_get_offset(fd);
- xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.rxq = &ch->xdp_rxq;
-
- xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
- (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -399,10 +395,20 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
- if (unlikely(err))
+ if (unlikely(err)) {
+ addr = dma_map_page(priv->net_dev->dev.parent,
+ virt_to_page(vaddr), 0,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ ch->buf_count++;
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ }
ch->stats.xdp_drop++;
- else
+ } else {
ch->stats.xdp_redirect++;
+ }
break;
}
@@ -768,12 +774,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -1262,6 +1267,22 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1648,7 +1669,7 @@ set_cgtd:
* CG taildrop threshold, so it won't interfere with it; we also
* want frames in non-PFC enabled traffic classes to be kept in check)
*/
- td.enable = !tx_pause || (tx_pause && pfc);
+ td.enable = !tx_pause || pfc;
if (priv->rx_cgtd_enabled == td.enable)
return;
@@ -1691,7 +1712,7 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
*/
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1730,7 +1751,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- if (!priv->mac) {
+ if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
* make sure we don't race against the link up notification,
* which may come immediately after dpni_enable();
@@ -1752,7 +1773,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
phylink_start(priv->mac->phylink);
return 0;
@@ -1826,11 +1847,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- if (!priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ } else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- } else {
- phylink_stop(priv->mac->phylink);
}
/* On dpni_disable(), the MC firmware will:
@@ -1952,6 +1973,43 @@ static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
}
}
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -2058,6 +2116,13 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
bool enable;
int err;
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
err = dpaa2_eth_set_rx_csum(priv, enable);
@@ -2115,7 +2180,7 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
return -EOPNOTSUPP;
@@ -2507,6 +2572,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -4015,6 +4082,9 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC;
net_dev->hw_features = net_dev->features;
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
return 0;
}
@@ -4042,10 +4112,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
- return 0;
- if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
@@ -4056,23 +4127,38 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
mac->mc_io = priv->mc_io;
mac->net_dev = priv->net_dev;
- err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
- kfree(mac);
- return err;
- }
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
priv->mac = mac;
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ goto err_close_mac;
+ }
+ }
+
return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (!priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
return;
- dpaa2_mac_disconnect(priv->mac);
+ dpaa2_mac_close(priv->mac);
kfree(priv->mac);
priv->mac = NULL;
}
@@ -4101,7 +4187,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d236b8695c39..9b6a89709ce1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -693,6 +693,21 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac &&
+ (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index f981a523e13a..bf59708b869e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,7 +85,7 @@ static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_nway_reset(priv->mac->phylink);
return -EOPNOTSUPP;
@@ -97,7 +97,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_ksettings_get(priv->mac->phylink,
link_settings);
@@ -115,7 +115,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (!priv->mac)
+ if (!dpaa2_eth_is_type_phy(priv))
return -ENOTSUPP;
return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
@@ -127,7 +127,7 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
- if (priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
return;
}
@@ -150,7 +150,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_set_pauseparam(priv->mac->phylink,
pause);
if (pause->autoneg)
@@ -198,7 +198,7 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_strings(p);
break;
}
@@ -211,7 +211,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
num_ss_stats += dpaa2_mac_get_sset_count();
return num_ss_stats;
default:
@@ -313,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
*(data + i++) = buf_cnt;
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 828c177df03d..ccaf7e35abeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -79,10 +79,20 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ /* We can switch between SGMII and 1000BASE-X at runtime with
+ * pcs-lynx
+ */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (mac->pcs &&
+ (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
+ return false;
+ return interface != mac->if_mode;
+
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -122,13 +132,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
fallthrough;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
break;
default:
goto empty_set;
@@ -174,30 +188,22 @@ static void dpaa2_mac_link_up(struct phylink_config *config,
dpmac_state->up = 1;
- if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
- /* If the DPMAC is configured for PHY mode, we need
- * to pass the link parameters to the MC firmware.
- */
- dpmac_state->rate = speed;
-
- if (duplex == DUPLEX_HALF)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else if (duplex == DUPLEX_FULL)
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
-
- /* This is lossy; the firmware really should take the pause
- * enablement status rather than pause/asym pause status.
- */
- if (rx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (rx_pause ^ tx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
- }
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
@@ -228,32 +234,6 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.mac_link_down = dpaa2_mac_link_down,
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io)
-{
- struct dpmac_attr attr;
- bool fixed = false;
- u16 mc_handle = 0;
- int err;
-
- err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
- &mc_handle);
- if (err || !mc_handle)
- return false;
-
- err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
- if (err)
- goto out;
-
- if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
- fixed = true;
-
-out:
- dpmac_close(mc_io, 0, mc_handle);
-
- return fixed;
-}
-
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct device_node *dpmac_node, int id)
{
@@ -302,36 +282,20 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
- struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
struct device_node *dpmac_node;
struct phylink *phylink;
- struct dpmac_attr attr;
int err;
- err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
- &dpmac_dev->mc_handle);
- if (err || !dpmac_dev->mc_handle) {
- netdev_err(net_dev, "dpmac_open() = %d\n", err);
- return -ENODEV;
- }
-
- err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
- if (err) {
- netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
- goto err_close_dpmac;
- }
-
- mac->if_link_type = attr.link_type;
+ mac->if_link_type = mac->attr.link_type;
- dpmac_node = dpaa2_mac_get_node(attr.id);
+ dpmac_node = dpaa2_mac_get_node(mac->attr.id);
if (!dpmac_node) {
- netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
- err = -ENODEV;
- goto err_close_dpmac;
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
}
- err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
if (err < 0) {
err = -EINVAL;
goto err_put_node;
@@ -351,9 +315,10 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
- if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
- attr.eth_if != DPMAC_ETH_IF_RGMII) {
- err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
goto err_put_node;
}
@@ -389,8 +354,7 @@ err_pcs_destroy:
dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
-err_close_dpmac:
- dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+
return err;
}
@@ -402,8 +366,40 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ return 0;
- dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 955a52856210..13d42dd58ec9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,7 @@ struct dpaa2_mac {
struct dpmac_link_state state;
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -28,6 +29,10 @@ struct dpaa2_mac {
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
struct fsl_mc_io *mc_io);
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 90453dc7baef..9f80bdfeedec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -62,6 +62,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
@@ -662,4 +666,17 @@ struct dpni_rsp_single_step_cfg {
__le32 peer_delay;
};
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6ea7db66a632..aa429c17c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1225,6 +1225,99 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_add_mac_addr() - Add MAC address filter
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index e7b9e195b534..4e96d9362dd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1114,4 +1114,13 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_single_step_cfg *ptp_cfg);
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index d99ea0f4e4a6..ab92382c399a 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -27,7 +27,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI
+ depends on PCI && MDIO_DEVRES && MDIO_BUS
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index e1e950d48c92..c71fe8d751d5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index ee0116ed4738..70e6d97b380f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -14,23 +14,6 @@
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
-{
- return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
-}
-
-static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
- u32 val)
-{
- enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
-}
-
-#define enetc_mdio_rd(mdio_priv, off) \
- _enetc_mdio_rd(mdio_priv, ENETC_##off)
-#define enetc_mdio_wr(mdio_priv, off, val) \
- _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
-
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
@@ -47,15 +30,29 @@ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
-#define MDIO_DATA(x) ((x) & 0xffff)
-#define TIMEOUT 1000
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
- u32 val;
+ bool is_busy;
- return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
- !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
}
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
@@ -75,7 +72,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -83,11 +80,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -95,7 +92,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
}
/* write the value */
- enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -121,7 +118,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -129,11 +126,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -141,21 +138,21 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
/* initiate the read */
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
return value;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed8fcb8b486e..515c5b29d7aa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
phylink_destroy(priv->phylink);
}
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
@@ -1079,6 +1136,8 @@ err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
@@ -1098,14 +1157,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- enetc_phylink_destroy(priv);
- enetc_mdiobus_destroy(pf);
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
unregister_netdev(si->ndev);
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
enetc_free_msix(priv);
enetc_free_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c527f4ee1d3a..0602d5d5d2ee 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -462,6 +462,11 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 04f24c66cf36..3db882322b2b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -944,7 +945,6 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
@@ -953,7 +953,8 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -995,7 +996,8 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- val = readl(fep->hwp + FEC_RACC);
+ u32 val = readl(fep->hwp + FEC_RACC);
+
/* align IP header */
val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -1662,7 +1664,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1693,6 +1694,8 @@ static void fec_get_mac(struct net_device *ndev)
if (FEC_FLASHMAC)
iap = (unsigned char *)FEC_FLASHMAC;
#else
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+
if (pdata)
iap = (unsigned char *)&pdata->mac;
#endif
@@ -2165,9 +2168,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
@@ -2180,6 +2183,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bb9887f98841..62f42921933d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -111,6 +111,7 @@ do { \
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -442,6 +443,9 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_10G;
break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
default:
tmp |= IF_MODE_GMII;
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d391a45cebb6..541de32ea662 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -58,7 +58,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..ef4e2febeb5b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -70,9 +70,32 @@ static struct {
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
-static struct ucc_geth_info ugeth_primary_info = {
+static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
+{
+ static const u8 count[] = {
+ [UCC_GETH_NUM_OF_THREADS_1] = 1,
+ [UCC_GETH_NUM_OF_THREADS_2] = 2,
+ [UCC_GETH_NUM_OF_THREADS_4] = 4,
+ [UCC_GETH_NUM_OF_THREADS_6] = 6,
+ [UCC_GETH_NUM_OF_THREADS_8] = 8,
+ };
+ if (idx >= ARRAY_SIZE(count))
+ return 0;
+ return count[idx];
+}
+
+static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static const struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
- .bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
@@ -90,8 +113,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
- .numQueuesTx = 1,
- .numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
@@ -157,8 +178,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
-static struct ucc_geth_info ugeth_info[8];
-
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
@@ -558,7 +577,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
int i;
int length;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
@@ -567,7 +586,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
@@ -671,32 +690,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
- int numThreadsTxNumerical;
- switch (ugeth->ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
- numThreadsTxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
pr_info("Thread data TXs:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_tx);
- for (i = 0; i < numThreadsTxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data TX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_tx[i]);
@@ -705,32 +704,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
}
if (ugeth->p_thread_data_rx) {
- int numThreadsRxNumerical;
- switch (ugeth->ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
- numThreadsRxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
pr_info("Thread data RX:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_rx);
- for (i = 0; i < numThreadsRxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data RX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_rx[i]);
@@ -905,7 +884,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_send_q_mem_reg) {
pr_info("Send Q memory registers:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
pr_info("SQQD[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
@@ -937,7 +916,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
pr_info("RX IRQ coalescing tables:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_irq_coalescing_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX IRQ coalescing table entry[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
@@ -959,7 +938,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_rx_bd_qs_tbl) {
pr_info("RX BD QS tables:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX BD QS table[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i]);
@@ -1835,7 +1814,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
@@ -1856,12 +1835,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
kfree(ugeth->rx_skbuff[i]);
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->rx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ kfree(ugeth->p_rx_bd_ring[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
@@ -1880,7 +1854,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
@@ -1898,15 +1872,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
kfree(ugeth->tx_skbuff[i]);
- if (ugeth->p_tx_bd_ring[i]) {
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->tx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->tx_bd_ring_offset[i]);
- ugeth->p_tx_bd_ring[i] = NULL;
- }
+ kfree(ugeth->p_tx_bd_ring[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
}
}
@@ -1921,50 +1888,39 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
ugeth->uccf = NULL;
}
- if (ugeth->p_thread_data_tx) {
- qe_muram_free(ugeth->thread_dat_tx_offset);
- ugeth->p_thread_data_tx = NULL;
- }
- if (ugeth->p_thread_data_rx) {
- qe_muram_free(ugeth->thread_dat_rx_offset);
- ugeth->p_thread_data_rx = NULL;
- }
- if (ugeth->p_exf_glbl_param) {
- qe_muram_free(ugeth->exf_glbl_param_offset);
- ugeth->p_exf_glbl_param = NULL;
- }
- if (ugeth->p_rx_glbl_pram) {
- qe_muram_free(ugeth->rx_glbl_pram_offset);
- ugeth->p_rx_glbl_pram = NULL;
- }
- if (ugeth->p_tx_glbl_pram) {
- qe_muram_free(ugeth->tx_glbl_pram_offset);
- ugeth->p_tx_glbl_pram = NULL;
- }
- if (ugeth->p_send_q_mem_reg) {
- qe_muram_free(ugeth->send_q_mem_reg_offset);
- ugeth->p_send_q_mem_reg = NULL;
- }
- if (ugeth->p_scheduler) {
- qe_muram_free(ugeth->scheduler_offset);
- ugeth->p_scheduler = NULL;
- }
- if (ugeth->p_tx_fw_statistics_pram) {
- qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
- ugeth->p_tx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_fw_statistics_pram) {
- qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
- ugeth->p_rx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_irq_coalescing_tbl) {
- qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
- ugeth->p_rx_irq_coalescing_tbl = NULL;
- }
- if (ugeth->p_rx_bd_qs_tbl) {
- qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
- ugeth->p_rx_bd_qs_tbl = NULL;
- }
+ qe_muram_free_addr(ugeth->p_thread_data_tx);
+ ugeth->p_thread_data_tx = NULL;
+
+ qe_muram_free_addr(ugeth->p_thread_data_rx);
+ ugeth->p_thread_data_rx = NULL;
+
+ qe_muram_free_addr(ugeth->p_exf_glbl_param);
+ ugeth->p_exf_glbl_param = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_glbl_pram);
+ ugeth->p_rx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_glbl_pram);
+ ugeth->p_tx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_send_q_mem_reg);
+ ugeth->p_send_q_mem_reg = NULL;
+
+ qe_muram_free_addr(ugeth->p_scheduler);
+ ugeth->p_scheduler = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -2073,15 +2029,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
- (uf_info->bd_mem_part == MEM_PART_MURAM))) {
- if (netif_msg_probe(ugeth))
- pr_err("Bad memory partition value\n");
- return -EINVAL;
- }
-
/* Rx BD lengths */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
@@ -2092,7 +2041,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* Tx BD lengths */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
pr_err("Tx BD ring length must be no smaller than 2\n");
@@ -2109,14 +2058,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* num Tx queues */
- if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
- if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of rx queues too large\n");
return -EINVAL;
@@ -2124,7 +2073,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
- if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2133,7 +2082,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
- if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2156,10 +2105,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
@@ -2198,53 +2147,32 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Allocate in multiple of
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
- according to spec */
- length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
- / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_TX_BD_RING_ALIGNMENT;
- ugeth->tx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
-
- if (ugeth->tx_bd_ring_offset[j] != 0)
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->tx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_TX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- tx_bd_ring_offset[j]);
- }
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
+ u32 alloc;
+
+ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
- memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
- length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
}
/* Init Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2252,9 +2180,6 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
- ugeth->tx_skbuff[j][i] = NULL;
-
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
@@ -2284,27 +2209,15 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ u32 alloc;
+
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_RX_BD_RING_ALIGNMENT;
- ugeth->rx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
- if (ugeth->rx_bd_ring_offset[j] != 0)
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->rx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_RX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- rx_bd_ring_offset[j]);
- }
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n");
@@ -2313,11 +2226,11 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
}
/* Init Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2325,9 +2238,6 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
- ugeth->rx_skbuff[j][i] = NULL;
-
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
@@ -2359,10 +2269,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
- u16 test;
u8 function_code = 0;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
+ s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
@@ -2371,45 +2281,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
- switch (ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
+ numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
+ if (!numThreadsRxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Rx threads value\n");
return -EINVAL;
}
- switch (ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
+ numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
+ if (!numThreadsTxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Tx threads value\n");
return -EINVAL;
@@ -2507,20 +2387,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
- ugeth->tx_glbl_pram_offset =
+ tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (tx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_tx_glbl_pram =
- (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
- tx_glbl_pram_offset);
- /* Zero out p_tx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
-
+ ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
/* Fill global PRAM */
/* TQPTR */
@@ -2554,7 +2429,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
- qe_muram_alloc(ug_info->numQueuesTx *
+ qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
@@ -2570,29 +2445,20 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32) virt_to_phys(endOfRing));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32)qe_muram_dma(endOfRing));
- }
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
}
/* schedulerbasepointer */
- if (ug_info->numQueuesTx > 1) {
+ if (ucc_geth_tx_queues(ug_info) > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
@@ -2608,8 +2474,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
- /* Zero out p_scheduler */
- memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
@@ -2652,23 +2516,18 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
- /* Zero out p_tx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
- 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
- if (ug_info->numQueuesTx > 1)
+ if (ucc_geth_tx_queues(ug_info) > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
- temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
- test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
-
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
@@ -2678,20 +2537,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
- ugeth->rx_glbl_pram_offset =
+ rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (rx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_rx_glbl_pram =
- (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
- rx_glbl_pram_offset);
- /* Zero out p_rx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
-
+ ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
/* Fill global PRAM */
/* RQPTR */
@@ -2729,16 +2583,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
- /* Zero out p_rx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
- sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
@@ -2754,7 +2605,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
@@ -2803,7 +2654,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
@@ -2817,23 +2668,12 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
- /* Zero out p_rx_bd_qs_tbl */
- memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
- 0,
- ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
- sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
- }
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
/* rest of fields handled by QE */
}
@@ -2854,7 +2694,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
- remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
@@ -2937,14 +2777,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
- kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
- /* Zero out *p_init_enet_param_shadow */
- memset((char *)ugeth->p_init_enet_param_shadow,
- 0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
@@ -2964,7 +2801,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
- ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
@@ -3002,7 +2839,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_init_enet_param_shadow->txglobal =
- ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -3016,7 +2853,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
/* Load Rx bds with buffers */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill Rx bds with buffers\n");
@@ -3287,12 +3124,12 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
/* Tx event processing */
spin_lock(&ugeth->lock);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
@@ -3685,6 +3522,36 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
+static int ucc_geth_parse_clock(struct device_node *np, const char *which,
+ enum qe_clock *out)
+{
+ const char *sprop;
+ char buf[24];
+
+ snprintf(buf, sizeof(buf), "%s-clock-name", which);
+ sprop = of_get_property(np, buf, NULL);
+ if (sprop) {
+ *out = qe_clock_source(sprop);
+ } else {
+ u32 val;
+
+ snprintf(buf, sizeof(buf), "%s-clock", which);
+ if (of_property_read_u32(np, buf, &val)) {
+ /* If both *-clock-name and *-clock are missing,
+ * we want to tell people to use *-clock-name.
+ */
+ pr_err("missing %s-clock-name property\n", buf);
+ return -EINVAL;
+ }
+ *out = val;
+ }
+ if (*out < QE_CLK_NONE || *out > QE_CLK24) {
+ pr_err("invalid %s property\n", buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3695,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
@@ -3725,62 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = &ugeth_info[ucc_num];
- if (ug_info == NULL) {
- if (netif_msg_probe(&debug))
- pr_err("[%d] Missing additional data!\n", ucc_num);
- return -ENODEV;
- }
+ ug_info = kmalloc(sizeof(*ug_info), GFP_KERNEL);
+ if (ug_info == NULL)
+ return -ENOMEM;
+ memcpy(ug_info, &ugeth_primary_info, sizeof(*ug_info));
ug_info->uf_info.ucc_num = ucc_num;
- sprop = of_get_property(np, "rx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.rx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.rx_clock > QE_CLK24)) {
- pr_err("invalid rx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "rx-clock", NULL);
- if (!prop) {
- /* If both rx-clock-name and rx-clock are missing,
- we want to tell people to use rx-clock-name. */
- pr_err("missing rx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.rx_clock = *prop;
- }
-
- sprop = of_get_property(np, "tx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.tx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.tx_clock > QE_CLK24)) {
- pr_err("invalid tx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "tx-clock", NULL);
- if (!prop) {
- pr_err("missing tx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid tx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.tx_clock = *prop;
- }
+ err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
+ if (err)
+ goto err_free_info;
+ err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
+ if (err)
+ goto err_free_info;
err = of_address_to_resource(np, 0, &res);
if (err)
- return -EINVAL;
+ goto err_free_info;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
@@ -3793,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
*/
err = of_phy_register_fixed_link(np);
if (err)
- return err;
+ goto err_free_info;
ug_info->phy_node = of_node_get(np);
}
@@ -3889,6 +3716,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3923,6 +3751,8 @@ err_deregister_fixed_link:
of_phy_deregister_fixed_link(np);
of_node_put(ug_info->tbi_node);
of_node_put(ug_info->phy_node);
+err_free_info:
+ kfree(ug_info);
return err;
}
@@ -3934,12 +3764,13 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ kfree(ugeth->ug_info);
+ free_netdev(dev);
return 0;
}
@@ -3967,17 +3798,10 @@ static struct platform_driver ucc_geth_driver = {
static int __init ucc_geth_init(void)
{
- int i, ret;
-
if (netif_msg_drv(&debug))
pr_info(DRV_DESC "\n");
- for (i = 0; i < 8; i++)
- memcpy(&(ugeth_info[i]), &ugeth_primary_info,
- sizeof(ugeth_primary_info));
-
- ret = platform_driver_register(&ucc_geth_driver);
- return ret;
+ return platform_driver_register(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..4294ed096ebb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
@@ -1069,8 +1076,6 @@ struct ucc_geth_tad_params {
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
- u8 numQueuesTx;
- u8 numQueuesRx;
int ipCheckSumCheck;
int ipCheckSumGenerate;
int rxExtendedFiltering;
@@ -1158,9 +1163,7 @@ struct ucc_geth_private {
struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
u32 exf_glbl_param_offset;
struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
- u32 rx_glbl_pram_offset;
struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
- u32 tx_glbl_pram_offset;
struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
@@ -1178,9 +1181,7 @@ struct ucc_geth_private {
struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
- u32 tx_bd_ring_offset[NUM_TX_QUEUES];
u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
- u32 rx_bd_ring_offset[NUM_RX_QUEUES];
u8 __iomem *confBd[NUM_TX_QUEUES];
u8 __iomem *txBd[NUM_TX_QUEUES];
u8 __iomem *rxBd[NUM_RX_QUEUES];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 858cb293152a..5d7824d2b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1502,7 +1502,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a7daf6d4511e..e9e60a935f40 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -272,7 +272,7 @@ struct hnae3_ring_chain_node {
};
#define HNAE3_IS_TX_RING(node) \
- (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+ (((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX)
/* device specification info from firmware */
struct hnae3_dev_specs {
@@ -284,13 +284,14 @@ struct hnae3_dev_specs {
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+ u16 max_frm_size;
+ u16 max_qset_num;
};
struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
void (*link_status_change)(struct hnae3_handle *handle, bool state);
- int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
void (*process_hw_error)(struct hnae3_handle *handle,
@@ -410,8 +411,6 @@ struct hnae3_ae_dev {
* Get the len of the regs dump
* get_rss_key_size()
* Get rss key size
- * get_rss_indir_size()
- * Get rss indirection table size
* get_rss()
* Get rss table
* set_rss()
@@ -465,6 +464,8 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
+ * dbg_read_cmd
+ * Execute debugfs read command.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -553,7 +554,6 @@ struct hnae3_ae_ops {
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
- u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
@@ -620,6 +620,8 @@ struct hnae3_ae_ops {
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
+ int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -773,9 +775,13 @@ struct hnae3_handle {
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae3_set_bit(origin, shift, val) \
- hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+ hnae3_set_field(origin, 0x1 << (shift), shift, val)
#define hnae3_get_bit(origin, shift) \
- hnae3_get_field((origin), (0x1 << (shift)), (shift))
+ hnae3_get_field(origin, 0x1 << (shift), shift)
+
+#define HNAE3_DBG_TM_NODES "tm_nodes"
+#define HNAE3_DBG_TM_PRI "tm_priority"
+#define HNAE3_DBG_TM_QSET "tm_qset"
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9d4e9c053a8f..dd11c57027bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -7,7 +7,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -162,7 +162,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
continue;
dev_info(&h->pdev->dev,
- " %4d %4d %4d\n",
+ " %4d %4u %4d\n",
i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
@@ -389,6 +389,9 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
kinfo->tc_info.num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
+ dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
+ dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
@@ -420,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
return (*ppos = len);
}
+static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ int ret = 0;
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -427,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
- int ret = 0;
+ int ret;
if (*ppos != 0)
return 0;
@@ -458,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
count = cmd_buf_tmp - cmd_buf + 1;
}
- if (strncmp(cmd_buf, "help", 4) == 0)
- hns3_dbg_help(handle);
- else if (strncmp(cmd_buf, "queue info", 10) == 0)
- ret = hns3_dbg_queue_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "queue map", 9) == 0)
- ret = hns3_dbg_queue_map(handle);
- else if (strncmp(cmd_buf, "bd info", 7) == 0)
- ret = hns3_dbg_bd_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "dev capability", 14) == 0)
- hns3_dbg_dev_caps(handle);
- else if (strncmp(cmd_buf, "dev spec", 8) == 0)
- hns3_dbg_dev_specs(handle);
- else if (handle->ae_algo->ops->dbg_run_cmd)
- ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
- else
- ret = -EOPNOTSUPP;
-
+ ret = hns3_dbg_check_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
@@ -484,6 +495,39 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
return count;
}
+static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *read_buf;
+ ssize_t size = 0;
+ int ret = 0;
+
+ read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ cmd_buf = filp->f_path.dentry->d_iname;
+
+ if (ops->dbg_read_cmd)
+ ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf,
+ HNS3_DBG_READ_LEN);
+
+ if (ret) {
+ dev_info(priv->dev, "unknown command\n");
+ goto out;
+ }
+
+ size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+ strlen(read_buf));
+
+out:
+ kfree(read_buf);
+ return size;
+}
+
static const struct file_operations hns3_dbg_cmd_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -491,14 +535,31 @@ static const struct file_operations hns3_dbg_cmd_fops = {
.write = hns3_dbg_cmd_write,
};
+static const struct file_operations hns3_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_read,
+};
+
void hns3_dbg_init(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const char *name = pci_name(handle->pdev);
+ struct dentry *entry_dir;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
&hns3_dbg_cmd_fops);
+
+ entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs);
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2)
+ debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 405e49033417..bf4302a5cf95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -32,7 +32,7 @@
#define CREATE_TRACE_POINTS
#include "hns3_trace.h"
-#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
#define hns3_rl_err(fmt, ...) \
@@ -1070,7 +1070,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
* HW checksum of the non-IP packets and GSO packets is handled at
* different place in the following code
*/
- if (skb->csum_not_inet || skb_is_gso(skb) ||
+ if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
!test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
return false;
@@ -2329,7 +2329,7 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
pci_ers_result_t ret;
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+ dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-static bool hns3_page_is_reusable(struct page *page)
-{
- return page_to_nid(page) == numa_mem_id() &&
- !page_is_pfmemalloc(page);
-}
-
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
@@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages, or the stack is still using the page
- * when page_offset rollback to zero, flag default unreuse
+ /* Avoid re-using remote and pfmemalloc pages, or the stack is still
+ * using the page when page_offset rollback to zero, flag default
+ * unreuse
*/
- if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
+ if (!dev_page_is_reusable(desc_cb->priv) ||
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
@@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(hns3_page_is_reusable(desc_cb->priv)))
+ /* We can reuse buffer as-is, just make sure it is reusable */
+ if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv,
@@ -4089,7 +4084,7 @@ out_when_alloc_ring_memory:
return -ENOMEM;
}
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -4098,7 +4093,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
hns3_fini_ring(&priv->ring[i]);
hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
- return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
@@ -4286,8 +4280,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dbg_init(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
- netdev->max_mtu = HNS3_MAX_MTU;
+ netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
@@ -4327,7 +4320,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4353,9 +4345,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
@@ -4384,20 +4374,6 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
}
}
-static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct net_device *ndev = kinfo->netdev;
-
- if (tc > HNAE3_MAX_TC)
- return -EINVAL;
-
- if (!ndev)
- return -ENODEV;
-
- return hns3_nic_set_real_num_queue(ndev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4664,7 +4640,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
@@ -4682,13 +4657,11 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
- return ret;
+ return 0;
}
static int hns3_reset_notify(struct hnae3_handle *handle,
@@ -4834,7 +4807,6 @@ static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
- .setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
.process_hw_error = hns3_process_hw_error,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 0a7b606e7c93..d069b04ee587 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -56,9 +56,8 @@ enum hns3_nic_state {
#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
-#define HNS3_MAC_MAX_FRAME 9728
-#define HNS3_MAX_MTU \
- (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
+#define HNS3_MAX_MTU(max_frm_size) \
+ ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -555,7 +554,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hns3_read_dev(a, reg) \
- hns3_read_reg((a)->io_base, (reg))
+ hns3_read_reg((a)->io_base, reg)
static inline bool hns3_nic_resetting(struct net_device *netdev)
{
@@ -565,7 +564,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
}
#define hns3_write_dev(a, reg, value) \
- hns3_write_reg((a)->io_base, (reg), (value))
+ hns3_write_reg((a)->io_base, reg, value)
#define ring_to_dev(ring) ((ring)->dev)
@@ -589,15 +588,15 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+ for (pos = (head).ring; (pos); pos = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
-#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
-#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
void hns3_ethtool_set_ops(struct net_device *netdev);
@@ -606,7 +605,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index e2fc443fe92c..adcec4ea7cb9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -456,7 +456,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -859,11 +859,9 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- if (!h->ae_algo->ops->get_rss_indir_size)
- return 0;
-
- return h->ae_algo->ops->get_rss_indir_size(h);
+ return ae_dev->dev_specs.rss_ind_tbl_size;
}
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index b728be4737f8..1bd0ddfaec4d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
-static int hclge_cmd_convert_err_code(u16 desc_ret)
+struct errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num)
{
- switch (desc_ret) {
- case HCLGE_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGE_CMD_NO_AUTH:
- return -EPERM;
- case HCLGE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGE_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGE_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGE_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGE_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGE_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGE_CMD_TIMEOUT:
- return -ETIME;
- case HCLGE_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGE_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGE_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
}
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ struct errcode hclge_cmd_errcode[] = {
+ {HCLGE_CMD_EXEC_SUCCESS, 0},
+ {HCLGE_CMD_NO_AUTH, -EPERM},
+ {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGE_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGE_CMD_NEXT_ERR, -ENOSR},
+ {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGE_CMD_PARA_ERR, -EINVAL},
+ {HCLGE_CMD_RESULT_ERR, -ERANGE},
+ {HCLGE_CMD_TIMEOUT, -ETIME},
+ {HCLGE_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGE_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
return hclge_cmd_convert_err_code(desc_ret);
}
+static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /**
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclge_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- struct hclge_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int retval;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclge_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!complete)
- retval = -EBADE;
- else
- retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- retval = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclge_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return retval;
+ return ret;
}
static void hclge_set_default_capability(struct hclge_dev *hdev)
@@ -363,6 +380,15 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
}
+static __le32 hclge_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
{
@@ -373,6 +399,7 @@ hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_build_api_caps();
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index edfadb5cb1c3..ff52a65b4cff 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -160,6 +160,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
@@ -385,11 +386,15 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGE_API_CAP_BITS {
+ HCLGE_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -1126,7 +1131,8 @@ struct hclge_dev_specs_0_cmd {
#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
struct hclge_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 max_qset_num;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -1138,9 +1144,9 @@ static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, (reg), (value))
+ hclge_write_reg((a)->io_base, reg, value)
#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, (reg))
+ hclge_read_reg((a)->io_base, reg)
static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e08d11b8ecf1..5bf5db91d16c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -176,29 +176,6 @@ static int hclge_map_update(struct hclge_dev *hdev)
return hclge_rss_init_hw(hdev);
}
-static int hclge_client_setup_tc(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = hdev->vport;
- struct hnae3_client *client;
- struct hnae3_handle *handle;
- int ret;
- u32 i;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &vport[i].nic;
- client = handle->client;
-
- if (!client || !client->ops || !client->ops->setup_tc)
- continue;
-
- ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int hclge_notify_down_uinit(struct hclge_dev *hdev)
{
int ret;
@@ -257,10 +234,6 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_client_setup_tc(hdev);
- if (ret)
- goto err_out;
-
ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 8f6dea5198cf..6b1d197df881 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -696,17 +696,16 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_tqp_tx_queue_tc_cmd *tc;
+ u16 group_id, queue_id, qset_id;
enum hclge_opcode_type cmd;
+ u8 grp_num, pri_id, tc_id;
struct hclge_desc desc;
- int queue_id, group_id;
- int tc_id, qset_id;
- int pri_id, ret;
u16 qs_id_l;
u16 qs_id_h;
- u8 grp_num;
+ int ret;
u32 i;
- ret = kstrtouint(cmd_buf, 0, &queue_id);
+ ret = kstrtou16(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -754,7 +753,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
tc_id = tc->tc_id & 0x7;
dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
- dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n",
queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -800,6 +799,140 @@ err_tm_map_cmd_send:
cmd, ret);
}
+static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tm nodes, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
+ pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
+ nodes->pg_base_id, nodes->pg_num);
+ pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
+ nodes->pri_base_id, nodes->pri_num);
+ pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_pri_shaper_para c_shaper_para;
+ struct hclge_pri_shaper_para p_shaper_para;
+ u8 pri_num, sch_mode, weight;
+ char *sch_mode_str;
+ int pos = 0;
+ int ret;
+ u8 i;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ pos += scnprintf(buf + pos, len - pos,
+ "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U ");
+ pos += scnprintf(buf + pos, len - pos,
+ "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
+
+ for (i = 0; i < pri_num; i++) {
+ ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4s %3u %3u %3u %3u ",
+ i, sch_mode_str, weight, c_shaper_para.ir_b,
+ c_shaper_para.ir_u, c_shaper_para.ir_s);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %1u %6u ",
+ c_shaper_para.bs_b, c_shaper_para.bs_s,
+ c_shaper_para.flag, c_shaper_para.rate);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u ",
+ p_shaper_para.ir_b, p_shaper_para.ir_u,
+ p_shaper_para.ir_s, p_shaper_para.bs_b,
+ p_shaper_para.bs_s);
+ pos += scnprintf(buf + pos, len - pos, "%1u %6u\n",
+ p_shaper_para.flag, p_shaper_para.rate);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 priority, link_vld, sch_mode, weight;
+ char *sch_mode_str;
+ int ret, pos;
+ u16 qset_num;
+ u16 i;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n");
+
+ for (i = 0; i < qset_num; i++) {
+ ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %1u %4s %3u\n",
+ i, priority, link_vld, sch_mode_str, weight);
+ }
+
+ return 0;
+}
+
static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
{
struct hclge_cfg_pause_param_cmd *pause_param;
@@ -851,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
- struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
- struct hclge_rx_priv_wl_buf *rx_priv_wl;
- struct hclge_rx_com_wl *rx_packet_cnt;
- struct hclge_rx_com_thrd *rx_com_thrd;
- struct hclge_rx_com_wl *rx_com_wl;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc[2];
+ struct hclge_desc desc;
int i, ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
-
- tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
- rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(rx_buf_cmd->buf_num[i]));
@@ -891,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
le16_to_cpu(rx_buf_cmd->shared_buf));
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low));
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low));
- dev_info(&hdev->pdev->dev, "\n");
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
@@ -944,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
@@ -967,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ return 0;
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ enum hclge_opcode_type cmd;
+ int ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
return;
err_qos_cmd_send:
@@ -1465,8 +1670,6 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
const char *cmd_buf)
{
-#define HCLGE_MAX_QSET_NUM 1024
-
u16 qsid;
int ret;
@@ -1476,9 +1679,9 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
return;
}
- if (qsid >= HCLGE_MAX_QSET_NUM) {
- dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
- qsid);
+ if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
+ qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
return;
}
@@ -1591,3 +1794,22 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
return 0;
}
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES,
+ strlen(HNAE3_DBG_TM_NODES)) == 0)
+ return hclge_dbg_dump_tm_nodes(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI,
+ strlen(HNAE3_DBG_TM_PRI)) == 0)
+ return hclge_dbg_dump_tm_pri(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET,
+ strlen(HNAE3_DBG_TM_QSET)) == 0)
+ return hclge_dbg_dump_tm_qset(hdev, buf, len);
+
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 9ee55ee0487d..0ca7f1b984bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1073,7 +1073,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
* This function querys number of mpf and pf buffer descriptors.
*/
static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
- int *mpf_bd_num, int *pf_bd_num)
+ u32 *mpf_bd_num, u32 *pf_bd_num)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_min_bd_num, pf_min_bd_num;
@@ -1102,7 +1102,7 @@ static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
*mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
*pf_bd_num = le32_to_cpu(desc_bd.data[1]);
if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
- dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n",
*mpf_bd_num, *pf_bd_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..34b744df6709 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -24,7 +25,7 @@
#include "hnae3.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -55,8 +56,6 @@
#define HCLGE_LINK_STATUS_MS 10
-#define HCLGE_VF_VPORT_START_NUM 1
-
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -628,7 +627,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -636,7 +635,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -752,7 +751,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -929,7 +929,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return 0;
}
-static int hclge_parse_speed(int speed_cmd, int *speed)
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
case 6:
@@ -1372,6 +1372,8 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1390,7 +1392,9 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1405,8 +1409,12 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -4236,11 +4244,6 @@ static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_RSS_KEY_SIZE;
}
-static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_IND_TBL_SIZE;
-}
-
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
@@ -4282,6 +4285,7 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
+ int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
@@ -4290,8 +4294,10 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
@@ -4397,6 +4403,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
@@ -4421,7 +4428,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
/* Get indirect table */
if (indir)
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
return 0;
@@ -4430,6 +4437,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
@@ -4461,7 +4469,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* Update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
@@ -4493,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
+ struct ethtool_rxnfc *nfc,
+ struct hclge_rss_input_tuple_cmd *req)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
u8 tuple_sets;
- int ret;
-
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
@@ -4537,8 +4535,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4553,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4572,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
+ u8 *tuple_sets)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- u8 tuple_sets;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclge_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -4702,14 +4743,15 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
int i, j;
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
-static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
@@ -4717,6 +4759,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ u16 *rss_ind_tbl;
+
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
@@ -4730,17 +4774,27 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_algo = rss_algo;
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ vport[i].rss_indirection_tbl = rss_ind_tbl;
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
+
+ return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
@@ -5488,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->psrc)
@@ -5518,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->l4_proto)
@@ -5575,7 +5625,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
if (fs->m_ext.vlan_tci &&
be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
dev_err(&hdev->pdev->dev,
- "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
return -EINVAL;
}
@@ -8303,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
}
}
-void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
{
- int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_cfg, *tmp;
- struct hclge_dev *hdev = vport->back;
- struct list_head tmp_del_list, *list;
- int ret;
-
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- list = &vport->uc_mac_list;
- unsync = hclge_rm_uc_addr_common;
- } else {
- list = &vport->mc_mac_list;
- unsync = hclge_rm_mc_addr_common;
- }
-
- INIT_LIST_HEAD(&tmp_del_list);
-
- if (!is_del_list)
- set_bit(vport->vport_id, hdev->vport_config_block);
-
- spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, &tmp_del_list);
+ list_add_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -8342,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
break;
}
}
+}
- spin_unlock_bh(&vport->mac_list_lock);
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
- list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
@@ -8363,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
spin_lock_bh(&vport->mac_list_lock);
@@ -8769,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -8824,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -8838,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -8849,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9661,7 +9753,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
@@ -9810,12 +9902,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
@@ -10578,7 +10677,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- hclge_rss_init_cfg(hdev);
+ ret = hclge_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
@@ -10806,7 +10910,7 @@ static void hclge_reset_vf_rate(struct hclge_dev *hdev)
}
}
-static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
@@ -10827,7 +10931,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
if (ret)
return ret;
@@ -11069,6 +11173,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
@@ -11112,11 +11217,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
@@ -11796,7 +11902,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
- .get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -11847,6 +11952,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..19d7f28773f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -17,6 +17,8 @@
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_VF_VPORT_START_NUM 1
+
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
@@ -44,15 +46,12 @@
#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
-#define HCLGE_RAS_OTHER_STS_REG 0x20B00
-#define HCLGE_FUNC_RESET_STS_REG 0x20C00
#define HCLGE_GRO_EN_REG 0x28000
/* bar registers for rcb */
@@ -97,8 +96,6 @@
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-#define HCLGE_RSS_CFG_TBL_NUM \
- (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
@@ -107,6 +104,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
@@ -146,6 +145,8 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_MAX_QSET_NUM 1024
+
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
@@ -724,7 +725,7 @@ struct hclge_vf_vlan_cfg {
* x = (~k) & v
* y = (k ^ ~v) & k
*/
-#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_x(x, k, v) (x = ~(k) & (v))
#define calc_y(y, k, v) \
do { \
const typeof(k) _k_ = (k); \
@@ -920,7 +921,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
- u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
@@ -1004,6 +1005,8 @@ int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 754c09ada901..51a36e74f088 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -56,7 +56,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->msg.resp_status = resp;
} else {
dev_warn(&hdev->pdev->dev,
- "failed to send response to VF, response status %d is out-of-bound\n",
+ "failed to send response to VF, response status %u is out-of-bound\n",
resp);
resp_pf_to_vf->msg.resp_status = EIO;
}
@@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
- int i = 0;
+ int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
- req->msg.param[i].ring_type);
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg.param[i].tqp_index]);
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
index = mbx_req->msg.data[0];
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return;
+ }
+
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 82742a64f3b7..151afd1f0688 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -41,8 +41,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
struct hclge_shaper_ir_para *ir_para,
u32 max_tm_rate)
{
+#define DEFAULT_SHAPER_IR_B 126
#define DIVISOR_CLK (1000 * 8)
-#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
@@ -69,10 +70,10 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
+ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
ir_para->ir_u = 0;
ir_para->ir_s = 0;
@@ -81,7 +82,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc >= ir && ir) {
ir_s_calc++;
- ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
+ ir_calc = DEFAULT_DIVISOR_IR_B /
+ (tick * (1 << ir_s_calc));
}
ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
@@ -92,12 +94,12 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
while (ir_calc < ir) {
ir_u_calc++;
- numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
+ numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
ir_para->ir_b = (ir * tick + (denominator >> 1)) /
@@ -640,13 +642,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
- kinfo->tc_info.num_tc = vport->vport_id ? 1 :
+ if (vport->vport_id) {
+ kinfo->tc_info.num_tc = 1;
+ vport->qs_offset = HNAE3_MAX_TC +
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM;
+ vport_max_rss_size = hdev->vf_rss_size_max;
+ } else {
+ kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
- (vport->vport_id ? (vport->vport_id - 1) : 0);
+ vport->qs_offset = 0;
+ vport_max_rss_size = hdev->pf_rss_size_max;
+ }
- vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
- hdev->pf_rss_size_max;
max_rss_size = min_t(u16, vport_max_rss_size,
hclge_vport_get_max_rss_size(vport));
@@ -1616,3 +1623,189 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
return hclge_tm_bp_setup(hdev);
}
+
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ /* Each PF has 8 qsets and each VF has 1 qset */
+ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *qset_num = le16_to_cpu(nodes->qset_num);
+ return 0;
+}
+
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pri num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *pri_num = nodes->pri_num;
+ return 0;
+}
+
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset map priority, ret = %d\n", ret);
+ return ret;
+ }
+
+ *priority = map->priority;
+ *link_vld = map->link_vld;
+ return 0;
+}
+
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
+{
+ struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
+ qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
+ qs_sch_mode->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = qs_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
+{
+ struct hclge_qs_weight_cmd *qs_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ qs_weight->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = qs_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
+{
+ struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
+ pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
+ pri_sch_mode->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = pri_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ priority_weight->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = priority_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 5498d73ed34b..b25d76023af0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -17,10 +17,13 @@
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
-#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
#define HCLGE_ETHER_MAX_RATE 100000
+#define HCLGE_TM_PF_MAX_PRI_NUM 8
+#define HCLGE_TM_PF_MAX_QSET_NUM 8
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -65,6 +68,18 @@ struct hclge_priority_weight_cmd {
u8 dwrr;
};
+struct hclge_pri_sch_mode_cfg_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ u8 sch_mode;
+};
+
+struct hclge_qs_sch_mode_cfg_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ u8 sch_mode;
+};
+
struct hclge_qs_weight_cmd {
__le16 qs_id;
u8 dwrr;
@@ -173,13 +188,34 @@ struct hclge_shaper_ir_para {
u8 ir_s; /* IR_S parameter of IR shaper */
};
+struct hclge_tm_nodes_cmd {
+ u8 pg_base_id;
+ u8 pri_base_id;
+ __le16 qset_base_id;
+ __le16 queue_base_id;
+ u8 pg_num;
+ u8 pri_num;
+ __le16 qset_num;
+ __le16 queue_num;
+};
+
+struct hclge_pri_shaper_para {
+ u8 ir_b;
+ u8 ir_u;
+ u8 ir_s;
+ u8 bs_b;
+ u8 bs_s;
+ u8 flag;
+ u32 rate;
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH))
+ hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
+ HCLGE_TM_SHAP_##string##_LSH)
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
@@ -195,5 +231,15 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
-
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld);
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index e04c0cfeb95c..46700c427849 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+struct vf_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
static int hclgevf_cmd_convert_err_code(u16 desc_ret)
{
- switch (desc_ret) {
- case HCLGEVF_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGEVF_CMD_NO_AUTH:
- return -EPERM;
- case HCLGEVF_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGEVF_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGEVF_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGEVF_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGEVF_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGEVF_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGEVF_CMD_TIMEOUT:
- return -ETIME;
- case HCLGEVF_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGEVF_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGEVF_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct vf_errcode hclgevf_cmd_errcode[] = {
+ {HCLGEVF_CMD_EXEC_SUCCESS, 0},
+ {HCLGEVF_CMD_NO_AUTH, -EPERM},
+ {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
+ {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGEVF_CMD_PARA_ERR, -EINVAL},
+ {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
+ {HCLGEVF_CMD_TIMEOUT, -ETIME},
+ {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGEVF_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclgevf_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ /* Get the result of hardware write back */
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
}
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+ hw->cmq.last_status = desc_ret;
+
+ return hclgevf_cmd_convert_err_code(desc_ret);
+}
+
+static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
}
/* hclgevf_cmd_send - send command to command queue
@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- struct hclgevf_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int status = 0;
- u16 retval;
- u16 opcode;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = le16_to_cpu(desc[0].opcode);
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclgevf_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use);
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw))
- break;
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (hclgevf_cmd_csq_done(hw)) {
- complete = true;
- handle = 0;
-
- while (handle < num) {
- /* Get the result of hardware write back */
- desc_to_use = &hw->cmq.csq.desc[ntc];
- desc[handle] = *desc_to_use;
-
- if (likely(!hclgevf_is_special_opcode(opcode)))
- retval = le16_to_cpu(desc[handle].retval);
- else
- retval = le16_to_cpu(desc[0].retval);
-
- status = hclgevf_cmd_convert_err_code(retval);
- hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
- ntc++;
- handle++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- }
-
- if (!complete)
- status = -EBADE;
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return status;
+ return ret;
}
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
@@ -342,6 +362,15 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
+static __le32 hclgevf_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -352,6 +381,7 @@ static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ resp->api_caps = hclgevf_build_api_caps();
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
return status;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 82eed258e8c1..8a37a22a176b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -161,11 +161,15 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGEVF_API_CAP_BITS {
+ HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -212,8 +216,8 @@ struct hclgevf_rss_input_tuple_cmd {
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
- u16 start_table_index;
- u16 rss_set_bitmap;
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
@@ -225,7 +229,7 @@ struct hclgevf_rss_indirection_table_cmd {
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
struct hclgevf_rss_tc_mode_cmd {
- u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
@@ -274,7 +278,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
-#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
@@ -292,7 +295,8 @@ struct hclgevf_dev_specs_0_cmd {
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -310,9 +314,9 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
}
#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, (reg), (value))
+ hclgevf_write_reg((a)->io_base, reg, value)
#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, (reg))
+ hclgevf_read_reg((a)->io_base, reg)
#define HCLGEVF_SEND_SYNC(flag) \
((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..700e068764c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -180,7 +180,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -188,7 +188,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -642,26 +642,25 @@ static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
return HCLGEVF_RSS_KEY_SIZE;
}
-static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_IND_TBL_SIZE;
-}
-
static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
{
const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
struct hclgevf_rss_indirection_table_cmd *req;
struct hclgevf_desc desc;
+ int rss_cfg_tbl_num;
int status;
int i, j;
req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGEVF_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
false);
- req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
- req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ req->start_table_index =
+ cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
@@ -702,12 +701,16 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -795,7 +798,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
}
if (indir)
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
return 0;
@@ -838,7 +841,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
@@ -870,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc,
+ struct hclgevf_rss_input_tuple_cmd *req)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
u8 tuple_sets;
- int ret;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
@@ -917,8 +908,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -933,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -951,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
+ int flow_type, u8 *tuple_sets)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -2482,8 +2519,9 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
-static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
+static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
@@ -2492,7 +2530,16 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ u8 *rss_ind_tbl;
+
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
@@ -2502,13 +2549,18 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
/* Initialize RSS indirect table */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+
+ return 0;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
@@ -3045,6 +3097,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
@@ -3063,6 +3116,7 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
@@ -3077,6 +3131,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
@@ -3263,7 +3319,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- hclgevf_rss_init_cfg(hdev);
+ ret = hclgevf_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_config;
+ }
+
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3441,11 +3502,12 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
+ sizeof(u32), GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
hdev->rss_cfg.rss_size = kinfo->rss_size;
@@ -3684,7 +3746,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
.get_rss_key_size = hclgevf_get_rss_key_size,
- .get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..8c27ecd819af 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -113,8 +113,7 @@
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-#define HCLGEVF_RSS_CFG_TBL_NUM \
- (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
@@ -122,6 +121,10 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+
+#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
@@ -215,7 +218,8 @@ struct hclgevf_rss_cfg {
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
- u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ /* shadow table */
+ u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index a0bfb509e002..c612ef526d16 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -20,7 +20,7 @@
* 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
* 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
* Should prevent lockup.
- * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong.
+ * 1.04 RMK 17/09/1997 Added more info when initialisation of chip goes wrong.
* TDR now only reports failure when chip reports non-zero
* TDR time-distance.
* 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
@@ -117,7 +117,7 @@ ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
* Some inline assembler to allow fast transfers on to/off of the card.
* Since this driver depends on some features presented by the ARM
* specific architecture, and that you can't configure this driver
- * without specifiing ARM mode, this is not a problem.
+ * without specifying ARM mode, this is not a problem.
*
* This routine is essentially an optimised memcpy from the card's
* onboard RAM to kernel memory.
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index c00b9097eeea..471be6ec7e8a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -38,6 +38,7 @@
#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/processor.h>
@@ -2390,11 +2391,11 @@ static int emac_check_deps(struct emac_instance *dev,
static void emac_put_deps(struct emac_instance *dev)
{
- of_dev_put(dev->mal_dev);
- of_dev_put(dev->zmii_dev);
- of_dev_put(dev->rgmii_dev);
- of_dev_put(dev->mdio_dev);
- of_dev_put(dev->tah_dev);
+ platform_device_put(dev->mal_dev);
+ platform_device_put(dev->zmii_dev);
+ platform_device_put(dev->rgmii_dev);
+ platform_device_put(dev->mdio_dev);
+ platform_device_put(dev->tah_dev);
}
static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
@@ -2435,7 +2436,7 @@ static int emac_wait_deps(struct emac_instance *dev)
for (i = 0; i < EMAC_DEP_COUNT; i++) {
of_node_put(deps[i].node);
if (err)
- of_dev_put(deps[i].ofdev);
+ platform_device_put(deps[i].ofdev);
}
if (err == 0) {
dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
@@ -2444,7 +2445,7 @@ static int emac_wait_deps(struct emac_instance *dev)
dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
}
- of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
+ platform_device_put(deps[EMAC_DEP_PREV_IDX].ofdev);
return err;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f302504faa8a..118a4bd3f877 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -115,7 +115,7 @@ struct ibmvnic_stat {
#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof(struct ibmvnic_statistics, stat))
-#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
+#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
static const struct ibmvnic_stat ibmvnic_stats[] = {
{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
@@ -247,46 +247,23 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
if (!ltb->buff)
return;
+ /* VIOS automatically unmaps the long term buffer at remote
+ * end for the following resets:
+ * FAILOVER, MOBILITY, TIMEOUT.
+ */
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
- adapter->reset_reason != VNIC_RESET_MOBILITY)
+ adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
+static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
{
- struct device *dev = &adapter->vdev->dev;
- int rc;
+ if (!ltb->buff)
+ return -EINVAL;
memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -508,8 +485,7 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
+ rc = reset_long_term_buff(&rx_pool->long_term_buff);
}
if (rc)
@@ -632,12 +608,11 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
+static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ rc = reset_long_term_buff(&tx_pool->long_term_buff);
if (rc)
return rc;
@@ -664,10 +639,10 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tso_pool[i]);
if (rc)
return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tx_pool[i]);
if (rc)
return rc;
}
@@ -955,6 +930,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -1196,12 +1172,25 @@ static int ibmvnic_open(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
- /* If device failover is pending, just set device state and return.
- * Device operation will be handled by reset routine.
+ ASSERT_RTNL();
+
+ /* If device failover is pending or we are about to reset, just set
+ * device state and return. Device operation will be handled by reset
+ * routine.
+ *
+ * It should be safe to overwrite the adapter->state here. Since
+ * we hold the rtnl, either the reset has not actually started or
+ * the rtnl got dropped during the set_link_state() in do_reset().
+ * In the former case, no one else is changing the state (again we
+ * have the rtnl) and in the latter case, do_reset() will detect and
+ * honor our setting below.
*/
- if (adapter->failover_pending) {
+ if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
+ netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n",
+ adapter->state, adapter->failover_pending);
adapter->state = VNIC_OPEN;
- return 0;
+ rc = 0;
+ goto out;
}
if (adapter->state != VNIC_CLOSED) {
@@ -1220,11 +1209,12 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
out:
- /*
- * If open fails due to a pending failover, set device state and
- * return. Device operation will be handled by reset routine.
+ /* If open failed and there is a pending failover or in-progress reset,
+ * set device state and return. Device operation will be handled by
+ * reset routine. See also comments above regarding rtnl.
*/
- if (rc && adapter->failover_pending) {
+ if (rc &&
+ (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
adapter->state = VNIC_OPEN;
rc = 0;
}
@@ -1352,10 +1342,8 @@ static int __ibmvnic_close(struct net_device *netdev)
adapter->state = VNIC_CLOSING;
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- if (rc)
- return rc;
adapter->state = VNIC_CLOSED;
- return 0;
+ return rc;
}
static int ibmvnic_close(struct net_device *netdev)
@@ -1383,10 +1371,10 @@ static int ibmvnic_close(struct net_device *netdev)
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
- * @hdr_field - bitfield determining needed headers
- * @skb - socket buffer
- * @hdr_len - array of header lengths
- * @tot_len - total length of data
+ * @hdr_field: bitfield determining needed headers
+ * @skb: socket buffer
+ * @hdr_len: array of header lengths
+ * @hdr_data: buffer to write the header to
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
@@ -1443,11 +1431,11 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
/**
* create_hdr_descs - create header and header extension descriptors
- * @hdr_field - bitfield determining needed headers
- * @data - buffer containing header data
- * @len - length of data buffer
- * @hdr_len - array of individual header lengths
- * @scrq_arr - descriptor array
+ * @hdr_field: bitfield determining needed headers
+ * @hdr_data: buffer containing header data
+ * @len: length of data buffer
+ * @hdr_len: array of individual header lengths
+ * @scrq_arr: descriptor array
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
@@ -1495,10 +1483,9 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @skb - socket buffer
- * @num_entries - number of descriptors to be sent
- * @subcrq - first TX descriptor
- * @hdr_field - bit field determining which headers will be sent
+ * @txbuff: tx buffer
+ * @num_entries: number of descriptors to be sent
+ * @hdr_field: bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
@@ -1701,6 +1688,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
+ /* post changes to long_term_buff *dst before VIOS accessing it */
+ dma_wmb();
+
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -1924,93 +1914,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return rc;
}
-/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- goto out;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
- goto out;
- }
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- goto out;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
- goto out;
- }
-
- rc = init_resources(adapter);
- if (rc)
- goto out;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc) {
- rc = IBMVNIC_OPEN_FAILED;
- goto out;
- }
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
-out:
- if (rc)
- adapter->state = reset_state;
- return rc;
-}
-
-/**
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2027,17 +1931,27 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->state, adapter->failover_pending,
rwi->reset_reason, reset_state);
- rtnl_lock();
- /*
- * Now that we have the rtnl lock, clear any pending failover.
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
+
+ /* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will
* block until failover is complete.
*/
if (rwi->reset_reason == VNIC_RESET_FAILOVER)
adapter->failover_pending = false;
+ /* read the state and check (again) after getting rtnl */
+ reset_state = adapter->state;
+
+ if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
+ rc = -EBUSY;
+ goto out;
+ }
+
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -2049,25 +1963,53 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state == VNIC_OPEN) {
+ /* When we dropped rtnl, ibmvnic_open() got
+ * it and noticed that we are resetting and
+ * set the adapter state to OPEN. Update our
+ * new "target" state, and resume the reset
+ * from VNIC_CLOSING state.
+ */
+ netdev_dbg(netdev,
+ "Open changed state from %d, updating.\n",
+ reset_state);
+ reset_state = VNIC_OPEN;
+ adapter->state = VNIC_CLOSING;
+ }
+
+ if (adapter->state != VNIC_CLOSING) {
+ /* If someone else changed the adapter state
+ * when we dropped the rtnl, fail the reset
+ */
+ rc = -1;
+ goto out;
+ }
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -2076,7 +2018,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -2111,11 +2055,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_login(netdev);
- if (rc) {
+ if (rc)
goto out;
- }
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2137,14 +2084,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = reset_tx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
rc = reset_rx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
}
@@ -2180,7 +2127,9 @@ out:
/* restore the adapter state if reset failed */
if (rc)
adapter->state = reset_state;
- rtnl_unlock();
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
adapter->state, adapter->failover_pending, rc);
@@ -2196,6 +2145,14 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
rwi->reset_reason);
+ /* read the state and check (again) after getting rtnl */
+ reset_state = adapter->state;
+
+ if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
+ rc = -EBUSY;
+ goto out;
+ }
+
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@@ -2311,12 +2268,8 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
- /*
- * Since we are doing a hard reset now, clear the
+ if (adapter->force_reset_recovery) {
+ /* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
*/
@@ -2341,8 +2294,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2389,8 +2341,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
- /*
- * If failover is pending don't schedule any other reset.
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+
+ /* If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a
* a failover reset scheduled, we will detect and drop the
* duplicate reset when walking the ->rwi_list below.
@@ -2405,18 +2358,16 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- ret = adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = EAGAIN;
+ ret = EAGAIN;
goto err;
}
- spin_lock_irqsave(&adapter->rwi_lock, flags);
-
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
reason);
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
}
@@ -2424,8 +2375,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
- ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
}
@@ -2438,12 +2387,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
- return 0;
+ ret = 0;
err:
+ /* ibmvnic_close() below can block, so drop the lock first */
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
+ if (ret == ENOMEM)
+ ibmvnic_close(netdev);
+
return -ret;
}
@@ -2511,16 +2465,9 @@ restart_poll:
if (!pending_scrq(adapter, rx_scrq))
break;
- /* The queue entry at the current index is peeked at above
- * to determine that there is a valid descriptor awaiting
- * processing. We want to be sure that the current slot
- * holds a valid descriptor before reading its contents.
- */
- dma_rmb();
next = ibmvnic_next_scrq(adapter, rx_scrq);
- rx_buff =
- (struct ibmvnic_rx_buff *)be64_to_cpu(next->
- rx_comp.correlator);
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(next->rx_comp.correlator);
/* do error checking */
if (next->rx_comp.rc) {
netdev_dbg(netdev, "rx buffer returned with rc %x\n",
@@ -2541,6 +2488,8 @@ restart_poll:
offset = be16_to_cpu(next->rx_comp.off_frame_data);
flags = next->rx_comp.flags;
skb = rx_buff->skb;
+ /* load long_term_buff before copying to skb */
+ dma_rmb();
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
@@ -2583,7 +2532,6 @@ restart_poll:
if (napi_complete_done(napi, frames_processed)) {
enable_scrq_irq(adapter, rx_scrq);
if (pending_scrq(adapter, rx_scrq)) {
- rmb();
if (napi_reschedule(napi)) {
disable_scrq_irq(adapter, rx_scrq);
goto restart_poll;
@@ -2712,9 +2660,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
- strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, adapter->fw_version,
+ strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
+ strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, adapter->fw_version,
sizeof(info->fw_version));
}
@@ -2826,7 +2774,6 @@ static int ibmvnic_set_channels(struct net_device *netdev,
channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues);
return ret;
-
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2915,8 +2862,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
- data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
- ibmvnic_stats[i].offset));
+ data[i] = be64_to_cpu(IBMVNIC_GET_STAT
+ (adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
data[i] = adapter->tx_stats_buffers[j].packets;
@@ -2956,6 +2903,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
return 0;
}
+
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2981,9 +2929,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3227,7 +3173,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && (rc != H_FUNCTION))
+ if (rc && rc != H_FUNCTION)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -3258,13 +3204,6 @@ restart_loop:
int total_bytes = 0;
int num_packets = 0;
- /* The queue entry at the current index is peeked at above
- * to determine that there is a valid descriptor awaiting
- * processing. We want to be sure that the current slot
- * holds a valid descriptor before reading its contents.
- */
- dma_rmb();
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
if (next->tx_comp.rcs[i])
@@ -3638,11 +3577,16 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
+ int rc;
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
- return 1;
- else
- return 0;
+ rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
+
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
+ */
+ dma_rmb();
+
+ return rc;
}
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
@@ -3661,8 +3605,8 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
}
spin_unlock_irqrestore(&scrq->lock, flags);
- /* Ensure that the entire buffer descriptor has been
- * loaded before reading its contents
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
*/
dma_rmb();
@@ -3712,7 +3656,7 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
int rc;
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
cpu_to_be64(remote_handle),
ioba, num_entries);
@@ -3732,8 +3676,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
int rc;
netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
if (!adapter->crq.active &&
crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
@@ -3742,7 +3686,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
}
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_CRQ, ua,
cpu_to_be64(u64_crq[0]),
@@ -3873,7 +3817,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
@@ -3936,15 +3882,15 @@ static int send_login(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) {
if (adapter->tx_scrq[i]) {
- tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
- crq_num);
+ tx_list_p[i] =
+ cpu_to_be64(adapter->tx_scrq[i]->crq_num);
}
}
for (i = 0; i < adapter->req_rx_queues; i++) {
if (adapter->rx_scrq[i]) {
- rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
- crq_num);
+ rx_list_p[i] =
+ cpu_to_be64(adapter->rx_scrq[i]->crq_num);
}
}
@@ -3960,7 +3906,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Login Buffer:\n");
for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_buf))[i]);
+ ((unsigned long *)(adapter->login_buf))[i]);
}
memset(&crq, 0, sizeof(crq));
@@ -4328,7 +4274,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(buf))[i]);
+ ((unsigned long *)(buf))[i]);
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -4487,8 +4433,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be64_to_cpu(crq->request_capability_rsp.
- number), name);
+ (long)be64_to_cpu(crq->request_capability_rsp.number),
+ name);
if (be16_to_cpu(crq->request_capability_rsp.capability) ==
REQ_MTU) {
@@ -4558,7 +4504,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_rsp_buf))[i]);
+ ((unsigned long *)(adapter->login_rsp_buf))[i]);
}
/* Sanity checks */
@@ -4901,8 +4847,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
@@ -4918,7 +4864,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ if (rc && rc != -EBUSY) {
+ /* We were unable to schedule the failover
+ * reset either because the adapter was still
+ * probing (eg: during kexec) or we could not
+ * allocate memory. Clear the failover_pending
+ * flag since no one else will. We ignore
+ * EBUSY because it means either FAILOVER reset
+ * is already scheduled or the adapter is
+ * being removed.
+ */
+ netdev_err(netdev,
+ "Error %ld scheduling failover reset\n",
+ rc);
+ adapter->failover_pending = false;
+ }
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -5084,6 +5045,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
@@ -5355,8 +5322,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- spin_lock_init(&adapter->stats_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
__ibmvnic_delayed_reset);
@@ -5438,12 +5403,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (test_bit(0, &adapter->resetting)) {
- spin_unlock_irqrestore(&adapter->state_lock, flags);
- return -EBUSY;
- }
+ /* If ibmvnic_reset() is scheduling a reset, wait for it to
+ * finish. Then, set the state to REMOVING to prevent it from
+ * scheduling any more work and to have reset functions ignore
+ * any resets that have already been scheduled. Drop the lock
+ * after setting state, so __ibmvnic_reset() which is called
+ * from the flush_work() below, can make progress.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
adapter->state = VNIC_REMOVING;
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
spin_unlock_irqrestore(&adapter->state_lock, flags);
flush_work(&adapter->ibmvnic_reset);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index c09c3f6bba9f..806aa75a4e86 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -31,7 +31,7 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_MAX_IND_DESCS 16
#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
@@ -845,6 +845,7 @@ struct ibmvnic_crq_queue {
union ibmvnic_crq *msgs;
int size, cur;
dma_addr_t msg_token;
+ /* Used for serialization of msgs, cur */
spinlock_t lock;
bool active;
char name[32];
@@ -876,6 +877,7 @@ struct ibmvnic_sub_crq_queue {
unsigned int irq;
unsigned int pool_index;
int scrq_num;
+ /* Used for serialization of msgs, cur */
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
@@ -985,7 +987,6 @@ struct ibmvnic_adapter {
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
struct completion stats_done;
- spinlock_t stats_lock;
int replenish_no_mem;
int replenish_add_buff_success;
int replenish_add_buff_failure;
@@ -1080,9 +1081,16 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
+ /* Used for serialization of state field. When taking both state
+ * and rwi locks, take state lock first.
+ */
+ spinlock_t state_lock;
enum ibmvnic_reset_reason reset_reason;
- spinlock_t rwi_lock;
struct list_head rwi_list;
+ /* Used for serialization of rwi_list. When taking both state
+ * and rwi locks, take state lock first
+ */
+ spinlock_t rwi_lock;
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
@@ -1096,7 +1104,4 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
-
- /* Used for serializatin of state field */
- spinlock_t state_lock;
};
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 8cc651d37a7f..f8d78af76d7d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1739,10 +1739,10 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr_t dma_addr;
cb->command = nic->tx_command;
- dma_addr = pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
dev_kfree_skb_any(skb);
skb = NULL;
return -ENOMEM;
@@ -1828,10 +1828,10 @@ static int e100_tx_clean(struct nic *nic)
dev->stats.tx_packets++;
dev->stats.tx_bytes += cb->skb->len;
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
tx_cleaned = 1;
@@ -1855,10 +1855,10 @@ static void e100_clean_cbs(struct nic *nic)
while (nic->cbs_avail != nic->params.cbs.count) {
struct cb *cb = nic->cb_to_clean;
if (cb->skb) {
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb(cb->skb);
}
nic->cb_to_clean = nic->cb_to_clean->next;
@@ -1925,10 +1925,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
/* Init, and map the RFD. */
skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
- rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
@@ -1941,8 +1941,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
if (rx->prev->skb) {
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
- pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ rx->prev->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
return 0;
@@ -1961,8 +1963,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
return -EAGAIN;
/* Need to sync before taking a peek at cb_complete bit */
- pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
@@ -1981,9 +1983,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_FROM_DEVICE);
return -ENODATA;
}
@@ -1995,8 +1997,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
actual_size = RFD_BUF_LEN - sizeof(struct rfd);
/* Get data */
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
/* If this buffer has the el bit, but we think the receiver
* is still running, check to see if it really stopped while
@@ -2097,22 +2099,25 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
(struct rfd *)new_before_last_rx->skb->data;
new_before_last_rfd->size = 0;
new_before_last_rfd->command |= cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- new_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ new_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
/* Now that we have a new stopping point, we can clear the old
* stopping point. We must sync twice to get the proper
* ordering on the hardware side of things. */
old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
if (restart_required) {
@@ -2134,8 +2139,9 @@ static void e100_rx_clean_list(struct nic *nic)
if (nic->rxs) {
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
if (rx->skb) {
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev,
+ rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
dev_kfree_skb(rx->skb);
}
}
@@ -2177,8 +2183,8 @@ static int e100_rx_alloc_list(struct nic *nic)
before_last = (struct rfd *)rx->skb->data;
before_last->command |= cpu_to_le16(cb_el);
before_last->size = 0;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
@@ -2377,8 +2383,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
msleep(10);
- pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb->data, ETH_DATA_LEN))
@@ -2751,16 +2757,16 @@ static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e100_alloc(struct nic *nic)
{
- nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
- &nic->dma_addr);
+ nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem),
+ &nic->dma_addr, GFP_KERNEL);
return nic->mem ? 0 : -ENOMEM;
}
static void e100_free(struct nic *nic)
{
if (nic->mem) {
- pci_free_consistent(nic->pdev, sizeof(struct mem),
- nic->mem, nic->dma_addr);
+ dma_free_coherent(&nic->pdev->dev, sizeof(struct mem),
+ nic->mem, nic->dma_addr);
nic->mem = NULL;
}
}
@@ -2853,7 +2859,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5e28cf4fa2cd..042de276e632 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2632,7 +2632,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (unlikely(adapter->link_speed != SPEED_1000)) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..0ac8d79a7987 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
@@ -3875,13 +3886,6 @@ static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* And invalidate the previously valid segment by setting
- * its signature word (0x13) high_byte to 0b. This can be
- * done without an erase because flash erase sets all bits
- * to 1's. We can write 1's to 0's without an erase
- */
- act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
-
/* offset in words but we read dword */
act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 99b8252eb969..247f44f4cb30 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -194,17 +194,12 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE);
}
-static inline bool fm10k_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page,
unsigned int __maybe_unused truesize)
{
- /* avoid re-using remote pages */
- if (unlikely(fm10k_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -265,8 +260,8 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!fm10k_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (dev_page_is_reusable(page))
return true;
/* this page cannot be reused so discard it */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 5c19ff452558..2fb52bd6fc0e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1531,8 +1531,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
.ndo_features_check = fm10k_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..cd53981fa5e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_H_
#define _I40E_H_
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
@@ -210,14 +213,18 @@ struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
- u8 ip4_proto;
+ u8 ipl4_proto;
/* TX packet view of src and dst */
__be32 dst_ip;
__be32 src_ip;
+ __be32 dst_ip6[4];
+ __be32 src_ip6[4];
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
+ __be16 vlan_etype;
+ __be16 vlan_tag;
/* Flexible data to match within the packet payload */
__be16 flex_word;
u16 flex_offset;
@@ -286,6 +293,9 @@ struct i40e_cloud_filter {
u8 tunnel_type;
};
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -471,6 +481,11 @@ struct i40e_pf {
u16 fd_sctp4_filter_cnt;
u16 fd_ip4_filter_cnt;
+ u16 fd_tcp6_filter_cnt;
+ u16 fd_udp6_filter_cnt;
+ u16 fd_sctp6_filter_cnt;
+ u16 fd_ip6_filter_cnt;
+
/* Flexible filter table values that need to be programmed into
* hardware, which expects L3 and L4 to be programmed separately. We
* need to ensure that the values are in ascended order and don't have
@@ -623,6 +638,8 @@ struct i40e_pf {
u16 dcbx_cap;
struct i40e_filter_control_settings filter_settings;
+ struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */
+ struct i40e_dcbx_config tmp_cfg;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
@@ -1119,6 +1136,12 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
int i40e_count_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
+{
+ return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+}
+
+void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
@@ -1128,6 +1151,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi);
bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg);
+int i40e_dcb_sw_default_config(struct i40e_pf *pf);
#endif /* CONFIG_I40E_DCB */
void i40e_ptp_rx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hang(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 1e960c3c7ef0..ce626eace692 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
@@ -1080,6 +1080,7 @@ struct i40e_aqc_add_remove_control_packet_filter {
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
__le16 seid;
__le16 queue;
u8 reserved[2];
@@ -2184,6 +2185,14 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index adc9e4fa4789..ec19e18305ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include "i40e.h"
#include "i40e_type.h"
@@ -3662,6 +3662,46 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
}
/**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code
+i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_lldp_set_local_mib *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buff_size);
+ cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
+ cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
* i40e_aq_cfg_lldp_mib_change_event
* @hw: pointer to the hw struct
* @enable_update: Enable or Disable event posting
@@ -4480,6 +4520,29 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
}
/**
+ * i40e_aq_suspend_port_tx
+ * @hw: pointer to the hardware structure
+ * @seid: port seid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Suspend port's Tx traffic
+ **/
+i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
+ cmd->vsi_seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_resume_port_tx
* @hw: pointer to the hardware structure
* @cmd_details: pointer to command details structure or NULL
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9de503c5f99b..243b0d2b7b72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include "i40e_adminq.h"
#include "i40e_prototype.h"
@@ -933,6 +933,953 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
}
/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+ struct i40e_virt_mem mem;
+ i40e_status ret;
+ u8 *lldpmib;
+
+ if (!lldp_status)
+ return I40E_ERR_PARAM;
+
+ /* Allocate buffer for the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ /* MIB is not available yet but the agent is running */
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ ret = 0;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+ ret = 0;
+ }
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = htons(typelength);
+}
+
+/**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ * @dcbcfg: pointer to modified dcbx config structure *
+ * @tlvid: tlv id to be added
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ i40e_status ret;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid, typelength;
+ struct i40e_lldp_org_tlv *tlv;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ tlvid = I40E_TLV_ID_START;
+ do {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= I40E_TLV_ID_END_OF_LLDPPDU ||
+ offset >= I40E_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ } while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
+ *miblen = offset;
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_dcb_hw_rx_fifo_config
+ * @hw: pointer to the hw struct
+ * @ets_mode: Strict Priority or Round Robin mode
+ * @non_ets_mode: Strict Priority or Round Robin
+ * @max_exponent: Exponent to calculate max refill credits
+ * @lltc_map: Low latency TC bitmap
+ *
+ * Configure HW Rx FIFO as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent,
+ u8 lltc_map)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
+ I40E_PRTDCB_RETSC_LLTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSC, reg);
+}
+
+/**
+ * i40e_dcb_hw_rx_cmd_monitor_config
+ * @hw: pointer to the hw struct
+ * @num_tc: Total number of traffic class
+ * @num_ports: Total number of ports on device
+ *
+ * Configure HW Rx command monitor as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports)
+{
+ u32 threshold;
+ u32 fifo_size;
+ u32 reg;
+
+ /* Set the threshold and fifo_size based on number of ports */
+ switch (num_ports) {
+ case 1:
+ threshold = I40E_DCB_1_PORT_THRESHOLD;
+ fifo_size = I40E_DCB_1_PORT_FIFO_SIZE;
+ break;
+ case 2:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ case 4:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ default:
+ i40e_debug(hw, I40E_DEBUG_DCB, "Invalid num_ports %u.\n",
+ (u32)num_ports);
+ return;
+ }
+
+ /* The hardware manual describes setting up of I40E_PRT_SWR_PM_THR
+ * based on the number of ports and traffic classes for a given port as
+ * part of DCB configuration.
+ */
+ reg = rd32(hw, I40E_PRT_SWR_PM_THR);
+ reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
+ I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ wr32(hw, I40E_PRT_SWR_PM_THR, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RPPMC);
+ reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
+ I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ wr32(hw, I40E_PRTDCB_RPPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_pfc_config
+ * @hw: pointer to the hw struct
+ * @pfc_en: Bitmap of PFC enabled priorities
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Priority Flow Controller as part of DCB configuration.
+ **/
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc)
+{
+ u16 refresh_time = (u16)I40E_DEFAULT_PAUSE_TIME / 2;
+ u32 link_speed = hw->phy.link_info.link_speed;
+ u8 first_pfc_prio = 0;
+ u8 num_pfc_tc = 0;
+ u8 tc2pfc = 0;
+ u32 reg;
+ u8 i;
+
+ /* Get Number of PFC TCs and TC2PFC map */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (pfc_en & BIT(i)) {
+ if (!first_pfc_prio)
+ first_pfc_prio = i;
+ /* Set bit for the PFC TC */
+ tc2pfc |= BIT(prio_tc[i]);
+ num_pfc_tc++;
+ }
+ }
+
+ switch (link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ reg = rd32(hw, I40E_PRTDCB_MFLCN);
+ reg |= BIT(I40E_PRTDCB_MFLCN_DPF_SHIFT) &
+ I40E_PRTDCB_MFLCN_DPF_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ if (pfc_en) {
+ reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCM_MASK;
+ reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_MFLCN, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_FCCFG);
+ reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
+ if (pfc_en)
+ reg |= (I40E_DCB_PFC_ENABLED <<
+ I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
+ I40E_PRTDCB_FCCFG_TFCE_MASK;
+ wr32(hw, I40E_PRTDCB_FCCFG, reg);
+
+ /* FCTTV and FCRTV to be set by default */
+ break;
+ case I40E_LINK_SPEED_40GB:
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ reg |= BIT(I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
+
+ for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ if (pfc_en) {
+ reg |= ((u32)refresh_time <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ }
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
+ }
+ /* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA default value is 0xFFFF
+ * for all user priorities
+ */
+ break;
+ }
+
+ reg = rd32(hw, I40E_PRTDCB_TC2PFC);
+ reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
+ I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ wr32(hw, I40E_PRTDCB_TC2PFC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RUP);
+ reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
+ I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ wr32(hw, I40E_PRTDCB_RUP, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TDPMC);
+ reg &= ~I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TDPMC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TCPMC);
+ reg &= ~I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TCPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_set_num_tc
+ * @hw: pointer to the hw struct
+ * @num_tc: number of traffic classes
+ *
+ * Configure number of traffic classes in HW
+ **/
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
+ I40E_PRTDCB_GENC_NUMTC_MASK;
+ wr32(hw, I40E_PRTDCB_GENC, reg);
+}
+
+/**
+ * i40e_dcb_hw_get_num_tc
+ * @hw: pointer to the hw struct
+ *
+ * Returns number of traffic classes configured in HW
+ **/
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
+ I40E_PRTDCB_GENC_NUMTC_SHIFT);
+}
+
+/**
+ * i40e_dcb_hw_rx_ets_bw_config
+ * @hw: pointer to the hw struct
+ * @bw_share: Bandwidth share indexed per traffic class
+ * @mode: Strict Priority or Round Robin mode between UP sharing same
+ * traffic class
+ * @prio_type: TC is ETS enabled or strict priority
+ *
+ * Configure HW Rx ETS bandwidth as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type)
+{
+ u32 reg;
+ u8 i;
+
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
+ reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_ets_bw_config
+ * @hw: pointer to the hw struct
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Rx UP2TC map as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RUP2TC);
+#define I40E_UP2TC_REG(val, i) \
+ (((val) << I40E_PRTDCB_RUP2TC_UP##i##TC_SHIFT) & \
+ I40E_PRTDCB_RUP2TC_UP##i##TC_MASK)
+
+ reg |= I40E_UP2TC_REG(prio_tc[0], 0);
+ reg |= I40E_UP2TC_REG(prio_tc[1], 1);
+ reg |= I40E_UP2TC_REG(prio_tc[2], 2);
+ reg |= I40E_UP2TC_REG(prio_tc[3], 3);
+ reg |= I40E_UP2TC_REG(prio_tc[4], 4);
+ reg |= I40E_UP2TC_REG(prio_tc[5], 5);
+ reg |= I40E_UP2TC_REG(prio_tc[6], 6);
+ reg |= I40E_UP2TC_REG(prio_tc[7], 7);
+
+ wr32(hw, I40E_PRTDCB_RUP2TC, reg);
+}
+
+/**
+ * i40e_dcb_hw_calculate_pool_sizes - configure dcb pool sizes
+ * @hw: pointer to the hw struct
+ * @num_ports: Number of available ports on the device
+ * @eee_enabled: EEE enabled for the given port
+ * @pfc_en: Bit map of PFC enabled traffic classes
+ * @mfs_tc: Array of max frame size for each traffic class
+ * @pb_cfg: pointer to packet buffer configuration
+ *
+ * Calculate the shared and dedicated per TC pool sizes,
+ * watermarks and threshold values.
+ **/
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg)
+{
+ u32 pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 low_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 total_pool_size = 0;
+ int shared_pool_size; /* Need signed variable */
+ u32 port_pb_size;
+ u32 mfs_max = 0;
+ u32 pcirtt;
+ u8 i;
+
+ /* Get the MFS(max) for the port */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (mfs_tc[i] > mfs_max)
+ mfs_max = mfs_tc[i];
+ }
+
+ pcirtt = I40E_BT2B(I40E_PCIRTT_LINK_SPEED_10G);
+
+ /* Calculate effective Rx PB size per port */
+ port_pb_size = I40E_DEVICE_RPB_SIZE / num_ports;
+ if (eee_enabled)
+ port_pb_size -= I40E_BT2B(I40E_EEE_TX_LPI_EXIT_TIME);
+ port_pb_size -= mfs_max;
+
+ /* Step 1 Calculating tc pool/shared pool sizes and watermarks */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (pfc_en & BIT(i)) {
+ low_wm[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = low_wm[i];
+ high_wm[i] += ((mfs_max > I40E_MAX_FRAME_SIZE)
+ ? mfs_max : I40E_MAX_FRAME_SIZE);
+ pool_size[i] = high_wm[i];
+ pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max,
+ mfs_tc[i]));
+ } else {
+ low_wm[i] = 0;
+ pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = pool_size[i];
+ }
+ total_pool_size += pool_size[i];
+ }
+
+ shared_pool_size = port_pb_size - total_pool_size;
+ if (shared_pool_size > 0) {
+ pb_cfg->shared_pool_size = shared_pool_size;
+ pb_cfg->shared_pool_high_wm = shared_pool_size;
+ pb_cfg->shared_pool_low_wm = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pb_cfg->shared_pool_low_thresh[i] = 0;
+ pb_cfg->shared_pool_high_thresh[i] = shared_pool_size;
+ pb_cfg->tc_pool_size[i] = pool_size[i];
+ pb_cfg->tc_pool_high_wm[i] = high_wm[i];
+ pb_cfg->tc_pool_low_wm[i] = low_wm[i];
+ }
+
+ } else {
+ i40e_debug(hw, I40E_DEBUG_DCB,
+ "The shared pool size for the port is negative %d.\n",
+ shared_pool_size);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_pb_config
+ * @hw: pointer to the hw struct
+ * @old_pb_cfg: Existing Rx Packet buffer configuration
+ * @new_pb_cfg: New Rx Packet buffer configuration
+ *
+ * Program the Rx Packet Buffer registers.
+ **/
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg)
+{
+ u32 old_val;
+ u32 new_val;
+ u32 reg;
+ u8 i;
+
+ /* The Rx Packet buffer register programming needs to be done in a
+ * certain order and the following code is based on that
+ * requirement.
+ */
+
+ /* Program the shared pool low water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+
+ /* Write Dedicated Pool Sizes per TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ new_val = new_pb_cfg->tc_pool_size[i];
+ reg = rd32(hw, I40E_PRTRPB_DPS(i));
+ reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
+ I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DPS(i), reg);
+ }
+
+ /* Write Shared Pool Size per port */
+ new_val = new_pb_cfg->shared_pool_size;
+ reg = rd32(hw, I40E_PRTRPB_SPS);
+ reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
+ I40E_PRTRPB_SPS_SPS_MASK;
+ wr32(hw, I40E_PRTRPB_SPS, reg);
+
+ /* Program the shared pool low water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+}
+
+/**
* _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
* @hw: pointer to the HW structure
* @lldp_cfg: pointer to hold lldp configuration variables
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2b1a2e81ac73..2370ceecb061 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
#include "i40e_type.h"
+#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
#define I40E_DCBX_STATUS_DISABLED 7
#define I40E_TLV_TYPE_END 0
@@ -22,6 +24,7 @@
#define I40E_CEE_DCBX_OUI 0x001b21
#define I40E_CEE_DCBX_TYPE 2
+#define I40E_CEE_SUBTYPE_CTRL 1
#define I40E_CEE_SUBTYPE_PG_CFG 2
#define I40E_CEE_SUBTYPE_PFC_CFG 3
#define I40E_CEE_SUBTYPE_APP_PRI 4
@@ -64,6 +67,8 @@
#define I40E_IEEE_TSA_ETS 2
/* Defines for IEEE PFC TLV */
+#define I40E_DCB_PFC_ENABLED 2
+#define I40E_DCB_PFC_FORCED_NUM_TC 2
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
@@ -77,9 +82,30 @@
#define I40E_IEEE_APP_PRIO_SHIFT 5
#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
-#pragma pack(1)
+#define I40E_IEEE_TLV_HEADER_LENGTH 2
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+/* Defines for default SW DCB config */
+#define I40E_IEEE_DEFAULT_ETS_TCBW 100
+#define I40E_IEEE_DEFAULT_ETS_WILLING 1
+#define I40E_IEEE_DEFAULT_PFC_WILLING 1
+#define I40E_IEEE_DEFAULT_NUM_APPS 1
+#define I40E_IEEE_DEFAULT_APP_PRIO 3
+#pragma pack(1)
/* IEEE 802.1AB LLDP Organization specific TLV */
struct i40e_lldp_org_tlv {
__be16 typelength;
@@ -102,7 +128,9 @@ struct i40e_cee_ctrl_tlv {
struct i40e_cee_feat_tlv {
struct i40e_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
u8 subtype;
u8 tlvinfo[1];
};
@@ -116,13 +144,140 @@ struct i40e_cee_app_prio {
};
#pragma pack()
+enum i40e_get_fw_lldp_status_resp {
+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
+/* Data structures to pass for SW DCBX */
+struct i40e_rx_pb_config {
+ u32 shared_pool_size;
+ u32 shared_pool_high_wm;
+ u32 shared_pool_low_wm;
+ u32 shared_pool_high_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 shared_pool_low_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_low_wm[I40E_MAX_TRAFFIC_CLASS];
+};
+
+enum i40e_dcb_arbiter_mode {
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY = 0,
+ I40E_DCB_ARB_MODE_ROUND_ROBIN = 1
+};
+
+#define I40E_DCB_DEFAULT_MAX_EXPONENT 0xB
+#define I40E_DEFAULT_PAUSE_TIME 0xffff
+#define I40E_MAX_FRAME_SIZE 4608 /* 4.5 KB */
+
+#define I40E_DEVICE_RPB_SIZE 968000 /* 968 KB */
+
+/* BitTimes (BT) conversion */
+#define I40E_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
+#define I40E_B2BT(BT) ((BT) * 8)
+#define I40E_BT2B(BT) (((BT) + (8 - 1)) / 8)
+
+/* Max Frame(TC) = MFS(max) + MFS(TC) */
+#define I40E_MAX_FRAME_TC(mfs_max, mfs_tc) I40E_B2BT((mfs_max) + (mfs_tc))
+
+/* EEE Tx LPI Exit time in Bit Times */
+#define I40E_EEE_TX_LPI_EXIT_TIME 142500
+
+/* PCI Round Trip Time in Bit Times */
+#define I40E_PCIRTT_LINK_SPEED_10G 20000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_20G 40000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_40G 80000
+
+/* PFC Frame Delay Bit Times */
+#define I40E_PFC_FRAME_DELAY 672
+
+/* Worst case Cable (10GBase-T) Delay Bit Times */
+#define I40E_CABLE_DELAY 5556
+
+/* Higher Layer Delay @10G Bit Times */
+#define I40E_HIGHER_LAYER_DELAY_10G 6144
+
+/* Interface Delays in Bit Times */
+/* TODO: Add for other link speeds 20G/40G/etc. */
+#define I40E_INTERFACE_DELAY_10G_MAC_CONTROL 8192
+#define I40E_INTERFACE_DELAY_10G_MAC 8192
+#define I40E_INTERFACE_DELAY_10G_RS 8192
+
+#define I40E_INTERFACE_DELAY_XGXS 2048
+#define I40E_INTERFACE_DELAY_XAUI 2048
+
+#define I40E_INTERFACE_DELAY_10G_BASEX_PCS 2048
+#define I40E_INTERFACE_DELAY_10G_BASER_PCS 3584
+#define I40E_INTERFACE_DELAY_LX4_PMD 512
+#define I40E_INTERFACE_DELAY_CX4_PMD 512
+#define I40E_INTERFACE_DELAY_SERIAL_PMA 512
+#define I40E_INTERFACE_DELAY_PMD 512
+
+#define I40E_INTERFACE_DELAY_10G_BASET 25600
+
+/* Hardware RX DCB config related defines */
+#define I40E_DCB_1_PORT_THRESHOLD 0xF
+#define I40E_DCB_1_PORT_FIFO_SIZE 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC 0xF
+#define I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC 0xC
+#define I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC 0x9
+#define I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC 0x6
+#define I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC 0x4
+#define I40E_DCB_WATERMARK_START_FACTOR 0x2
+
+/* delay values for with 10G BaseT in Bit Times */
+#define I40E_INTERFACE_DELAY_10G_COPPER \
+ (I40E_INTERFACE_DELAY_10G_MAC + (2 * I40E_INTERFACE_DELAY_XAUI) \
+ + I40E_INTERFACE_DELAY_10G_BASET)
+#define I40E_DV_TC(mfs_max, mfs_tc) \
+ ((2 * I40E_MAX_FRAME_TC(mfs_max, mfs_tc)) \
+ + I40E_PFC_FRAME_DELAY \
+ + (2 * I40E_CABLE_DELAY) \
+ + (2 * I40E_INTERFACE_DELAY_10G_COPPER) \
+ + I40E_HIGHER_LAYER_DELAY_10G)
+static inline u32 I40E_STD_DV_TC(u32 mfs_max, u32 mfs_tc)
+{
+ return I40E_DV_TC(mfs_max, mfs_tc) + I40E_B2BT(mfs_max);
+}
+
+/* APIs for SW DCBX */
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent, u8 lltc_map);
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports);
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc);
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type);
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg);
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg);
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
+ u16 *status);
i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
+ struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
+i40e_status i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status);
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 9deae9a35423..0345132a0ef5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifdef CONFIG_I40E_DCB
#include "i40e.h"
#include <net/dcbnl.h>
+#define I40E_DCBNL_STATUS_SUCCESS 0
+#define I40E_DCBNL_STATUS_ERROR 1
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_dcb_app_priority_table *app);
/**
* i40e_get_pfc_delay - retrieve PFC Link Delay
* @hw: pointer to hardware struct
@@ -33,14 +37,13 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
struct i40e_dcbx_config *dcbxcfg;
- struct i40e_hw *hw = &pf->hw;
if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
- dcbxcfg = &hw->local_dcbx_config;
+ dcbxcfg = &pf->hw.local_dcbx_config;
ets->willing = dcbxcfg->etscfg.willing;
- ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->ets_cap = I40E_MAX_TRAFFIC_CLASS;
ets->cbs = dcbxcfg->etscfg.cbs;
memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
sizeof(ets->tc_tx_bw));
@@ -84,7 +87,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
pfc->mbc = dcbxcfg->pfc.mbc;
i40e_get_pfc_delay(hw, &pfc->delay);
- /* Get Requests/Indicatiosn */
+ /* Get Requests/Indications */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
pfc->requests[i] = pf->stats.priority_xoff_tx[i];
pfc->indications[i] = pf->stats.priority_xoff_rx[i];
@@ -94,6 +97,713 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
}
/**
+ * i40e_dcbnl_ieee_setets - set IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Set IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Update the ETS configuration for temp */
+ pf->tmp_cfg.etscfg.willing = ets->willing;
+ pf->tmp_cfg.etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.etscfg.cbs = ets->cbs;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pf->tmp_cfg.etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ pf->tmp_cfg.etscfg.tsatable[i] = ets->tc_tsa[i];
+ pf->tmp_cfg.etscfg.prioritytable[i] = ets->prio_tc[i];
+ pf->tmp_cfg.etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ pf->tmp_cfg.etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ pf->tmp_cfg.etsrec.prioritytable[i] = ets->reco_prio_tc[i];
+ }
+
+ /* Commit changes to HW */
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB ETS configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setpfc - set local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
+ *
+ * Sets local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ if (pfc->pfc_cap)
+ pf->tmp_cfg.pfc.pfccap = pfc->pfc_cap;
+ else
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.pfc.pfcenable = pfc->pfc_en;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB PFC configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setapp - set local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Sets local IEEE App configuration
+ **/
+static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcb_app_priority_table new_app;
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ if (old_cfg->numapps == I40E_DCBX_MAX_APPS)
+ return -EINVAL;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ new_app.selector = app->selector;
+ new_app.protocolid = app->protocol;
+ new_app.priority = app->priority;
+ /* Already internally available */
+ if (i40e_dcbnl_find_app(old_cfg, &new_app))
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ /* Add the app */
+ pf->tmp_cfg.app[pf->tmp_cfg.numapps++] = new_app;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_delapp - delete local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Deletes local IEEE App configuration other than the first application
+ * required by firmware
+ **/
+static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, j, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Need one app for FW so keep it */
+ if (old_cfg->numapps == 1)
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Find and reset the app */
+ for (i = 1; i < pf->tmp_cfg.numapps; i++) {
+ if (app->selector == pf->tmp_cfg.app[i].selector &&
+ app->protocol == pf->tmp_cfg.app[i].protocolid &&
+ app->priority == pf->tmp_cfg.app[i].priority) {
+ /* Reset the app data */
+ pf->tmp_cfg.app[i].selector = 0;
+ pf->tmp_cfg.app[i].protocolid = 0;
+ pf->tmp_cfg.app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* If the specific DCB app not found */
+ if (i == pf->tmp_cfg.numapps)
+ return -EINVAL;
+
+ pf->tmp_cfg.numapps--;
+ /* Overwrite the tmp_cfg app */
+ for (j = i; j < pf->tmp_cfg.numapps; j++)
+ pf->tmp_cfg.app[j] = old_cfg->app[j + 1];
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getstate - Get DCB enabled state
+ * @netdev: the corresponding netdev
+ *
+ * Get the current DCB enabled state
+ **/
+static u8 i40e_dcbnl_getstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
+ !!(pf->flags & I40E_FLAG_DCB_ENABLED));
+ return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+}
+
+/**
+ * i40e_dcbnl_setstate - Set DCB state
+ * @netdev: the corresponding netdev
+ * @state: enable or disable
+ *
+ * Set the DCB state
+ **/
+static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int ret = I40E_DCBNL_STATUS_SUCCESS;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return ret;
+
+ dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
+ state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ /* Nothing to do */
+ if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return ret;
+
+ if (i40e_is_sw_dcb(pf)) {
+ if (state) {
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ memcpy(&pf->hw.desired_dcbx_config,
+ &pf->hw.local_dcbx_config,
+ sizeof(struct i40e_dcbx_config));
+ } else {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ } else {
+ /* Cannot directly manipulate FW LLDP Agent */
+ ret = I40E_DCBNL_STATUS_ERROR;
+ }
+ return ret;
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio_type, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (tc >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ /* Use only up_map to map tc */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (up_map & BIT(i))
+ pf->tmp_cfg.etscfg.prioritytable[i] = tc;
+ }
+ pf->tmp_cfg.etscfg.tsatable[tc] = I40E_IEEE_TSA_ETS;
+ dev_dbg(&pf->pdev->dev,
+ "Set PG config tc=%d bwg_id=%d prio_type=%d bw_pct=%d up_map=%d\n",
+ tc, bwg_id, prio_type, bw_pct, up_map);
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Tx BW settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ pf->tmp_cfg.etscfg.tcbwtable[pgid] = bw_pct;
+ dev_dbg(&pf->pdev->dev, "Set PG BW config tc=%d bw_pct=%d\n",
+ pgid, bw_pct);
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @pgid: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx TC PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_set_pg_bwg_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_tx - Get CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type,
+ u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+ dev_dbg(&pf->pdev->dev, "Get PG config prio=%d tc=%d\n",
+ prio, *pgid);
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_tx - Get CEE PG BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Tx BW settings for given TC in CEE mode
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pf->hw.local_dcbx_config.etscfg.tcbwtable[pgid];
+ dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Rx PG settings for CEE mode. The UP2TC map is applied in same
+ * manner for Tx and Rx (symmetrical) so return the TC information for
+ * given priority accordingly.
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Rx BW settings for given TC in CEE mode
+ * The adapter doesn't support Rx ETS and runs in strict priority
+ * mode in Rx path and hence just return 0.
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+ *bw_pct = 0;
+}
+
+/**
+ * i40e_dcbnl_set_pfc_cfg - Set CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Set the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ if (setting)
+ pf->tmp_cfg.pfc.pfcenable |= BIT(prio);
+ else
+ pf->tmp_cfg.pfc.pfcenable &= ~BIT(prio);
+ dev_dbg(&pf->pdev->dev,
+ "Set PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, setting, pf->tmp_cfg.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_get_pfc_cfg - Get CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Get the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pf->hw.local_dcbx_config.pfc.pfcenable >> prio) & 0x1;
+ dev_dbg(&pf->pdev->dev,
+ "Get PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, *setting, pf->hw.local_dcbx_config.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_cee_set_all - Commit CEE DCB settings to hardware
+ * @netdev: the corresponding netdev
+ *
+ * Commit the current DCB configuration to hardware
+ **/
+static u8 i40e_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int err;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ dev_dbg(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
+ err = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+
+ return err ? I40E_DCBNL_STATUS_ERROR : I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: the corresponding netdev
+ * @capid: the capability type
+ * @cap: the capability value
+ *
+ * Return the capability value for a given capability type
+ **/
+static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ case DCB_CAP_ATTR_BCN:
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(&pf->pdev->dev, "Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs supported by the device
+ *
+ * Return the total number of TCs supported by the adapter
+ **/
+static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return -EINVAL;
+
+ *num = I40E_MAX_TRAFFIC_CLASS;
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setnumtcs - Set CEE number of traffic classes
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs
+ *
+ * Set the total number of TCs (Unsupported)
+ **/
+static int i40e_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ return -EINVAL;
+}
+
+/**
+ * i40e_dcbnl_getpfcstate - Get CEE PFC mode
+ * @netdev: the corresponding netdev
+ *
+ * Get the current PFC enabled state
+ **/
+static u8 i40e_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Return enabled if any PFC enabled UP */
+ if (pf->hw.local_dcbx_config.pfc.pfcenable)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setpfcstate - Set CEE PFC mode
+ * @netdev: the corresponding netdev
+ * @state: required state
+ *
+ * The PFC state to be set; this is enabled/disabled based on the PFC
+ * priority settings and not via this call for i40e driver
+ **/
+static void i40e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "PFC State is modified via PFC config.\n");
+}
+
+/**
+ * i40e_dcbnl_getapp - Get CEE APP
+ * @netdev: the corresponding netdev
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ *
+ * Return the CEE mode app for the given idtype and id
+ **/
+static int i40e_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * i40e_dcbnl_setdcbx - set required DCBx capability
+ * @netdev: the corresponding netdev
+ * @mode: new DCB mode managed or CEE+IEEE
+ *
+ * Set DCBx capability features
+ **/
+static u8 i40e_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Do not allow to set mode if managed by Firmware */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return I40E_DCBNL_STATUS_SUCCESS;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ else
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+
+ dev_dbg(&pf->pdev->dev, "mode=%d\n", mode);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
* i40e_dcbnl_getdcbx - retrieve current DCBx capability
* @dev: the corresponding netdev
*
@@ -132,7 +842,31 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getets = i40e_dcbnl_ieee_getets,
.ieee_getpfc = i40e_dcbnl_ieee_getpfc,
.getdcbx = i40e_dcbnl_getdcbx,
- .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+ .ieee_setets = i40e_dcbnl_ieee_setets,
+ .ieee_setpfc = i40e_dcbnl_ieee_setpfc,
+ .ieee_setapp = i40e_dcbnl_ieee_setapp,
+ .ieee_delapp = i40e_dcbnl_ieee_delapp,
+ .getstate = i40e_dcbnl_getstate,
+ .setstate = i40e_dcbnl_setstate,
+ .setpgtccfgtx = i40e_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = i40e_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = i40e_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = i40e_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = i40e_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = i40e_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = i40e_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = i40e_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = i40e_dcbnl_set_pfc_cfg,
+ .getpfccfg = i40e_dcbnl_get_pfc_cfg,
+ .setall = i40e_dcbnl_cee_set_all,
+ .getcap = i40e_dcbnl_get_cap,
+ .getnumtcs = i40e_dcbnl_getnumtcs,
+ .setnumtcs = i40e_dcbnl_setnumtcs,
+ .getpfcstate = i40e_dcbnl_getpfcstate,
+ .setpfcstate = i40e_dcbnl_setpfcstate,
+ .getapp = i40e_dcbnl_getapp,
+ .setdcbx = i40e_dcbnl_setdcbx,
};
/**
@@ -152,12 +886,16 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
u8 prio, tc_map;
int i;
+ /* SW DCB taken care by DCBNL set calls */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
/* DCB not enabled */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
return;
dcbxcfg = &hw->local_dcbx_config;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 26ba1f3eb2d8..c70dec65a572 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3222,13 +3222,30 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
fsp->m_u.usr_ip4_spec.proto = 0;
}
- /* Reverse the src and dest notion, since the HW views them from
- * Tx perspective where as the user expects it from Rx filter view.
- */
- fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+ if (fsp->flow_type == IPV6_USER_FLOW ||
+ fsp->flow_type == UDP_V6_FLOW ||
+ fsp->flow_type == TCP_V6_FLOW ||
+ fsp->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->src_port;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+ }
switch (rule->flow_type) {
case SCTP_V4_FLOW:
@@ -3240,9 +3257,21 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
case UDP_V4_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
case IP_USER_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ break;
default:
/* If we have stored a filter with a flow type not listed here
* it is almost certainly a driver bug. WARN(), and then
@@ -3258,6 +3287,20 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
input_set = i40e_read_fd_input_set(pf, index);
no_input_set:
+ if (input_set & I40E_L3_V6_SRC_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF);
+ }
+
+ if (input_set & I40E_L3_V6_DST_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF);
+ }
+
if (input_set & I40E_L3_SRC_MASK)
fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
@@ -3275,6 +3318,14 @@ no_input_set:
else
fsp->ring_cookie = rule->q_index;
+ if (rule->vlan_tag) {
+ fsp->h_ext.vlan_etype = rule->vlan_etype;
+ fsp->m_ext.vlan_etype = htons(0xFFFF);
+ fsp->h_ext.vlan_tci = rule->vlan_tag;
+ fsp->m_ext.vlan_tci = htons(0xFFFF);
+ fsp->flow_type |= FLOW_EXT;
+ }
+
if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
struct i40e_vsi *vsi;
@@ -3921,6 +3972,14 @@ static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
return "sctp4";
case IP_USER_FLOW:
return "ip4";
+ case TCP_V6_FLOW:
+ return "tcp6";
+ case UDP_V6_FLOW:
+ return "udp6";
+ case SCTP_V6_FLOW:
+ return "sctp6";
+ case IPV6_USER_FLOW:
+ return "ip6";
default:
return "unknown";
}
@@ -4056,9 +4115,14 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
struct ethtool_rx_flow_spec *fsp,
struct i40e_rx_flow_userdef *userdef)
{
- struct i40e_pf *pf = vsi->back;
+ static const __be32 ipv6_full_mask[4] = {cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff)};
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
struct ethtool_tcpip4_spec *tcp_ip4_spec;
struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct i40e_pf *pf = vsi->back;
u64 current_mask, new_mask;
bool new_flex_offset = false;
bool flex_l3 = false;
@@ -4080,11 +4144,28 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ fdir_filter_count = &pf->fd_sctp6_filter_cnt;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ fdir_filter_count = &pf->fd_tcp6_filter_cnt;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ fdir_filter_count = &pf->fd_udp6_filter_cnt;
+ break;
case IP_USER_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ fdir_filter_count = &pf->fd_ip6_filter_cnt;
+ flex_l3 = true;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -4147,6 +4228,53 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
break;
+ case SCTP_V6_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ fallthrough;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6dst))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip6_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip6_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic Classes is not supported. */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ break;
case IP_USER_FLOW:
usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
@@ -4187,10 +4315,62 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EINVAL;
break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fsp->m_u.usr_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+ else if (!usr_ip6_spec->l4_4_bytes)
+ new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic class is not supported. */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EINVAL;
+
+ break;
default:
return -EOPNOTSUPP;
}
+ if (fsp->flow_type & FLOW_EXT) {
+ /* Allow only 802.1Q and no etype defined, as
+ * later it's modified to 0x8100
+ */
+ if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) &&
+ fsp->h_ext.vlan_etype != 0)
+ return -EOPNOTSUPP;
+ if (fsp->m_ext.vlan_tci == htons(0xFFFF))
+ new_mask |= I40E_VLAN_SRC_MASK;
+ else
+ new_mask &= ~I40E_VLAN_SRC_MASK;
+ }
+
/* First, clear all flexible filter entries */
new_mask &= ~I40E_FLEX_INPUT_MASK;
@@ -4370,7 +4550,9 @@ static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
a->dst_port != b->dst_port ||
a->src_port != b->src_port ||
a->flow_type != b->flow_type ||
- a->ip4_proto != b->ip4_proto)
+ a->ipl4_proto != b->ipl4_proto ||
+ a->vlan_tag != b->vlan_tag ||
+ a->vlan_etype != b->vlan_etype)
return false;
return true;
@@ -4528,15 +4710,38 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
input->flow_type = fsp->flow_type & ~FLOW_EXT;
- input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
- /* Reverse the src and dest notion, since the HW expects them to be from
- * Tx perspective where as the input from user is from Rx filter view.
- */
- input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
- input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
- input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->vlan_etype = fsp->h_ext.vlan_etype;
+ if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci)
+ input->vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ if (fsp->m_ext.vlan_tci && input->vlan_etype)
+ input->vlan_tag = fsp->h_ext.vlan_tci;
+ if (input->flow_type == IPV6_USER_FLOW ||
+ input->flow_type == UDP_V6_FLOW ||
+ input->flow_type == TCP_V6_FLOW ||
+ input->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ input->dst_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(__be32) * 4);
+ memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto;
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ }
if (userdef.flex_filter) {
input->flex_filter = true;
@@ -4878,7 +5083,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- bool is_reset_needed;
+ u32 reset_needed = 0;
i40e_status status;
u32 i, j;
@@ -4923,9 +5128,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
flags_complete:
changed_flags = orig_flags ^ new_flags;
- is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
- I40E_FLAG_DISABLE_FW_LLDP));
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
* checks to ensure that the changes are supported and safe.
@@ -5033,23 +5240,13 @@ flags_complete:
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- struct i40e_dcbx_config *dcbcfg;
-
+#ifdef CONFIG_I40E_DCB
+ i40e_dcb_sw_default_config(pf);
+#endif /* CONFIG_I40E_DCB */
+ i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
- i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
- /* reset local_dcbx_config to default */
- dcbcfg = &pf->hw.local_dcbx_config;
- dcbcfg->etscfg.willing = 1;
- dcbcfg->etscfg.maxtcs = 0;
- dcbcfg->etscfg.tcbwtable[0] = 100;
- for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
- dcbcfg->etscfg.tcbwtable[i] = 0;
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
- dcbcfg->etscfg.prioritytable[i] = 0;
- dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
- dcbcfg->pfc.willing = 1;
- dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
+ i40e_set_lldp_forwarding(pf, false);
status = i40e_aq_start_lldp(&pf->hw, false, NULL);
if (status) {
adq_err = pf->hw.aq.asq_last_status;
@@ -5057,7 +5254,7 @@ flags_complete:
case I40E_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
- is_reset_needed = false;
+ reset_needed = 0;
break;
case I40E_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
@@ -5086,8 +5283,8 @@ flags_complete:
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if (is_reset_needed)
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+ if (reset_needed)
+ i40e_do_reset(pf, reset_needed, true);
return 0;
}
@@ -5252,12 +5449,131 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp phy_cfg;
+ enum i40e_status_code status = 0;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (phy_cfg.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ edata->supported = SUPPORTED_Autoneg;
+ edata->lp_advertised = edata->supported;
+
+ /* Get current configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
+ edata->eee_enabled = !!edata->advertised;
+ edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
+
+ edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
+
+ return 0;
+}
+
+static int i40e_is_eee_param_supported(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
+ {edata->tx_lpi_timer, "tx-timer"},
+ {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev,
+ "EEE setting %s not supported\n",
+ param[i].name);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
}
static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_aq_set_phy_config config;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ __le16 eee_capability;
+
+ /* Deny parameters we don't support */
+ if (i40e_is_eee_param_supported(netdev, edata))
+ return -EOPNOTSUPP;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (abilities.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ /* Cache initial EEE capability */
+ eee_capability = abilities.eee_capability;
+
+ /* Get current PHY configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Cache current PHY configuration */
+ config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ /* Set desired EEE state */
+ if (edata->eee_enabled) {
+ config.eee_capability = eee_capability;
+ config.eeer |= cpu_to_le32(I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ } else {
+ config.eee_capability = 0;
+ config.eeer &= cpu_to_le32(~I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ }
+
+ /* Apply modified PHY configuration */
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status)
+ return -EAGAIN;
+
+ return 0;
}
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..353deae139f9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include <linux/etherdevice.h>
#include <linux/of_net.h>
@@ -35,7 +35,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_prep_for_reset(struct i40e_pf *pf);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -2614,7 +2616,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
return;
}
@@ -2632,7 +2634,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
}
}
- clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -3494,6 +3495,24 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
}
/**
+ * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
+ * @pf: Pointer to the targeted PF
+ *
+ * Set all flow director counters to 0.
+ */
+static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
+{
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+ pf->fd_tcp6_filter_cnt = 0;
+ pf->fd_udp6_filter_cnt = 0;
+ pf->fd_sctp6_filter_cnt = 0;
+ pf->fd_ip6_filter_cnt = 0;
+}
+
+/**
* i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
* @vsi: Pointer to the targeted VSI
*
@@ -3510,10 +3529,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
return;
/* Reset FDir counters as we're replaying all existing filters */
- pf->fd_tcp4_filter_cnt = 0;
- pf->fd_udp4_filter_cnt = 0;
- pf->fd_sctp4_filter_cnt = 0;
- pf->fd_ip4_filter_cnt = 0;
+ i40e_reset_fdir_filter_cnt(pf);
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
@@ -5289,6 +5305,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
vsi->seid);
return ret;
}
+ memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
@@ -5919,7 +5936,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
ch->seid = ctxt.seid;
ch->vsi_number = ctxt.vsi_number;
- ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
+ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
/* copy just the sections touched not the entire info
* since not all sections are valid as returned by
@@ -5941,6 +5958,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
i40e_status ret;
int i;
+ memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = ch->enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
@@ -6396,6 +6414,9 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
+ if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
+ return;
+
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
continue;
@@ -6463,6 +6484,316 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
+ * i40e_suspend_port_tx - Suspend port Tx
+ * @pf: PF struct
+ *
+ * Suspend a port's Tx and issue a PF reset in case of failure.
+ **/
+static int i40e_suspend_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Suspend Port Tx failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_hw_set_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
+ **/
+static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
+ int ret;
+
+ /* Check if need reconfiguration */
+ if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
+ return 0;
+ }
+
+ /* Config change disable all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Set DCB Config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* In case of reset do not try to resume anything */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
+ /* Re-start the VSIs if disabled */
+ ret = i40e_resume_port_tx(pf);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+ i40e_pf_unquiesce_all_vsi(pf);
+ }
+err:
+ return ret;
+}
+
+/**
+ * i40e_hw_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF
+ **/
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
+ u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_dcbx_config *old_cfg;
+ u8 mode[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_rx_pb_config pb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u8 num_ports = hw->num_ports;
+ bool need_reconfig;
+ int ret = -EINVAL;
+ u8 lltc_map = 0;
+ u8 tc_map = 0;
+ u8 new_numtc;
+ u8 i;
+
+ dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
+ /* Un-pack information to Program ETS HW via shared API
+ * numtc, tcmap
+ * LLTC map
+ * ETS/NON-ETS arbiter mode
+ * max exponent (credit refills)
+ * Total number of ports
+ * PFC priority bit-map
+ * Priority Table
+ * BW % per TC
+ * Arbiter mode between UPs sharing same TC
+ * TSA table (ETS or non-ETS)
+ * EEE enabled or not
+ * MFS TC table
+ */
+
+ new_numtc = i40e_dcb_get_num_tc(new_cfg);
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ for (i = 0; i < new_numtc; i++) {
+ tc_map |= BIT(i);
+ switch (new_cfg->etscfg.tsatable[i]) {
+ case I40E_IEEE_TSA_ETS:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
+ ets_data.tc_bw_share_credits[i] =
+ new_cfg->etscfg.tcbwtable[i];
+ break;
+ case I40E_IEEE_TSA_STRICT:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
+ lltc_map |= BIT(i);
+ ets_data.tc_bw_share_credits[i] =
+ I40E_DCB_STRICT_PRIO_CREDITS;
+ break;
+ default:
+ /* Invalid TSA type */
+ need_reconfig = false;
+ goto out;
+ }
+ }
+
+ old_cfg = &hw->local_dcbx_config;
+ /* Check if need reconfiguration */
+ need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
+
+ /* If needed, enable/disable frame tagging, disable all VSIs
+ * and suspend port tx
+ */
+ if (need_reconfig) {
+ /* Enable DCB tagging only when more than one TC */
+ if (new_numtc > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+ ret = i40e_suspend_port_tx(pf);
+ if (ret)
+ goto err;
+ }
+
+ /* Configure Port ETS Tx Scheduler */
+ ets_data.tc_valid_bits = tc_map;
+ ets_data.tc_strict_priority_flags = lltc_map;
+ ret = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Modify Port ETS failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Configure Rx ETS HW */
+ memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
+ i40e_dcb_hw_set_num_tc(hw, new_numtc);
+ i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY,
+ I40E_DCB_DEFAULT_MAX_EXPONENT,
+ lltc_map);
+ i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
+ i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
+ prio_type);
+ i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
+ new_cfg->etscfg.prioritytable);
+ i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
+
+ /* Configure Rx Packet Buffers in HW */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ mfs_tc[i] += I40E_PACKET_HDR_PAD;
+ }
+
+ i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
+ false, new_cfg->pfc.pfcenable,
+ mfs_tc, &pb_cfg);
+ i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
+
+ /* Update the local Rx Packet buffer config */
+ pf->pb_cfg = pb_cfg;
+
+ /* Inform the FW about changes to DCB configuration */
+ ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "DCB Updated failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Update the port DCBx configuration */
+ *old_cfg = *new_cfg;
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* Re-start the VSIs if disabled */
+ if (need_reconfig) {
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+
+ /* Wait for the PF's queues to be disabled */
+ ret = i40e_pf_wait_queues_disabled(pf);
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ goto err;
+ } else {
+ i40e_pf_unquiesce_all_vsi(pf);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ }
+ /* registers are set, lets apply */
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ ret = i40e_hw_set_dcb_config(pf, new_cfg);
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
+ * @pf: PF being queried
+ *
+ * Set default DCB configuration in case DCB is to be done in SW.
+ **/
+int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ struct i40e_hw *hw = &pf->hw;
+ int err;
+
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ /* Update the local cached instance with TC0 ETS */
+ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
+ pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ pf->tmp_cfg.etscfg.maxtcs = 0;
+ pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+ pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
+ pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
+ pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
+ pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
+ ets_data.tc_strict_priority_flags = 0; /* ETS */
+ ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
+
+ /* Enable ETS on the Physical port */
+ err = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "Enable Port ETS failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* Update the local cached instance with TC0 ETS */
+ dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ dcb_cfg->etscfg.cbs = 0;
+ dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+
+out:
+ return err;
+}
+
+/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6472,18 +6803,31 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
static int i40e_init_pf_dcb(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int err = 0;
+ int err;
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
- if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
- dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
+ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ dev_info(&pf->pdev->dev, "DCB is not supported.\n");
err = I40E_NOT_SUPPORTED;
goto out;
}
-
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
+ goto out;
+ }
+ dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ /* at init capable but disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto out;
+ }
err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
@@ -6523,6 +6867,40 @@ out:
#endif /* CONFIG_I40E_DCB */
/**
+ * i40e_set_lldp_forwarding - set forwarding of lldp frames
+ * @pf: PF being configured
+ * @enable: if forwarding to OS shall be enabled
+ *
+ * Toggle forwarding of lldp frames behavior,
+ * When passing DCB control from firmware to software
+ * lldp frames must be forwarded to the software based
+ * lldp agent.
+ */
+void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
+{
+ if (pf->lan_vsi == I40E_NO_VSI)
+ return;
+
+ if (!pf->vsi[pf->lan_vsi])
+ return;
+
+ /* No need to check the outcome, commands may fail
+ * if desired value is already set
+ */
+ i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+ pf->vsi[pf->lan_vsi]->seid, 0,
+ enable, NULL, NULL);
+
+ i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+ pf->vsi[pf->lan_vsi]->seid, 0,
+ enable, NULL, NULL);
+}
+
+/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
* @isup: true of link is up, false otherwise
@@ -7598,8 +7976,8 @@ static inline void
i40e_set_cld_element(struct i40e_cloud_filter *filter,
struct i40e_aqc_cloud_filters_element_data *cld)
{
- int i, j;
u32 ipa;
+ int i;
memset(cld, 0, sizeof(*cld));
ether_addr_copy(cld->outer_mac, filter->dst_mac);
@@ -7610,14 +7988,14 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
if (filter->n_proto == ETH_P_IPV6) {
#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
- for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
- i++, j += 2) {
+ for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
- ipa = cpu_to_le32(ipa);
- memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
+
+ *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
}
} else {
ipa = be32_to_cpu(filter->dst_ipv4);
+
memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
}
@@ -7665,6 +8043,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
if (filter->flags >= ARRAY_SIZE(flag_table))
return I40E_ERR_CONFIG;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter);
@@ -7728,10 +8108,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
- if (filter->src_port || filter->src_ipv4 ||
+ if (filter->src_port ||
+ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EOPNOTSUPP;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -7755,7 +8138,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
}
- } else if (filter->dst_ipv4 ||
+ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
@@ -8285,7 +8668,6 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_FIN |
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
-
udp_tunnel_get_rx_info(netdev);
return 0;
@@ -8400,32 +8782,51 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
pf->fdir_pf_active_filters = 0;
- pf->fd_tcp4_filter_cnt = 0;
- pf->fd_udp4_filter_cnt = 0;
- pf->fd_sctp4_filter_cnt = 0;
- pf->fd_ip4_filter_cnt = 0;
+ i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for TCP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for UDP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for UDP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for SCTP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for SCTP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -8531,6 +8932,13 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
dev_info(&pf->pdev->dev,
pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
"FW LLDP is disabled\n" :
@@ -8643,6 +9051,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0;
u8 type;
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
+ !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ /* let firmware decide if the DCB should be disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+
/* Not DCB capable or capability disabled */
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
@@ -8674,10 +9090,20 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
goto exit;
}
@@ -8881,8 +9307,17 @@ static void i40e_delete_invalid_filter(struct i40e_pf *pf,
case SCTP_V4_FLOW:
pf->fd_sctp4_filter_cnt--;
break;
+ case TCP_V6_FLOW:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case UDP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case SCTP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
case IP_USER_FLOW:
- switch (filter->ip4_proto) {
+ switch (filter->ipl4_proto) {
case IPPROTO_TCP:
pf->fd_tcp4_filter_cnt--;
break;
@@ -8897,6 +9332,22 @@ static void i40e_delete_invalid_filter(struct i40e_pf *pf,
break;
}
break;
+ case IPV6_USER_FLOW:
+ switch (filter->ipl4_proto) {
+ case IPPROTO_TCP:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case IPPROTO_UDP:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case IPPROTO_SCTP:
+ pf->fd_sctp6_filter_cnt--;
+ break;
+ case IPPROTO_IP:
+ pf->fd_ip6_filter_cnt--;
+ break;
+ }
+ break;
}
/* Remove the filter from the list and free memory */
@@ -8930,7 +9381,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
* rules active.
*/
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
- (pf->fd_tcp4_filter_cnt == 0))
+ pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
i40e_reenable_fdir_atr(pf);
/* if hw had a problem adding a filter, delete it */
@@ -9099,6 +9550,9 @@ static void i40e_link_event(struct i40e_pf *pf)
u8 new_link_speed, old_link_speed;
i40e_status status;
bool new_link, old_link;
+#ifdef CONFIG_I40E_DCB
+ int err;
+#endif /* CONFIG_I40E_DCB */
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
@@ -9142,6 +9596,31 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_PTP)
i40e_ptp_set_increment(pf);
+#ifdef CONFIG_I40E_DCB
+ if (new_link == old_link)
+ return;
+ /* Not SW DCB so firmware will take care of default settings */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return;
+
+ /* We cover here only link down, as after link up in case of SW DCB
+ * SW LLDP agent will take care of setting it up
+ */
+ if (!new_link) {
+ dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
+ memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
+ } else {
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ }
+#endif /* CONFIG_I40E_DCB */
}
/**
@@ -9218,7 +9697,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
* precedence before starting a new reset sequence.
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
i40e_reset(pf);
i40e_rebuild(pf, false, false);
}
@@ -9350,7 +9829,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
switch (opcode) {
case i40e_aqc_opc_get_link_status:
+ rtnl_lock();
i40e_handle_link_event(pf, &event);
+ rtnl_unlock();
break;
case i40e_aqc_opc_send_msg_to_pf:
ret = i40e_vc_process_vf_msg(pf,
@@ -9364,7 +9845,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
#ifdef CONFIG_I40E_DCB
rtnl_lock();
- ret = i40e_handle_lldp_event(pf, &event);
+ i40e_handle_lldp_event(pf, &event);
rtnl_unlock();
#endif /* CONFIG_I40E_DCB */
break;
@@ -9850,12 +10331,10 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
*
* Close up the VFs and other things in prep for PF Reset.
**/
-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
+static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret = 0;
@@ -9870,12 +10349,7 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
- /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
- if (!lock_acquired)
- rtnl_lock();
i40e_pf_quiesce_all_vsi(pf);
- if (!lock_acquired)
- rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
@@ -9991,7 +10465,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 val;
int v;
@@ -10087,24 +10560,41 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
- /* Enable FW to write a default DCB config on link-up */
- i40e_aq_set_dcb_parameters(hw, true, NULL);
-
-#ifdef CONFIG_I40E_DCB
- ret = i40e_init_pf_dcb(pf);
- if (ret) {
- dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
- /* Continue without DCB enabled */
- }
-#endif /* CONFIG_I40E_DCB */
- /* do basic switch setup */
if (!lock_acquired)
rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_unlock;
+#ifdef CONFIG_I40E_DCB
+ /* Enable FW to write a default DCB config on link-up
+ * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
+ * is not supported with new link speed
+ */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ } else {
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
+ ret);
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
+ }
+ }
+ }
+
+#endif /* CONFIG_I40E_DCB */
+
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
*/
@@ -10117,13 +10607,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- /* make sure our flow control settings are restored */
- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
- if (ret)
- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -10241,6 +10724,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid);
+#ifdef CONFIG_I40E_DCB
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+ i40e_set_lldp_forwarding(pf, true);
+#endif /* CONFIG_I40E_DCB */
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
@@ -10307,7 +10794,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
**/
static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
{
- i40e_prep_for_reset(pf, lock_acquired);
+ i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, false, lock_acquired);
}
@@ -11641,7 +12128,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
u16 qcount;
vsi->req_queue_pairs = queue_count;
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
pf->alloc_rss_size = new_rss_size;
@@ -11699,6 +12186,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
+ memset(&bw_data, 0, sizeof(bw_data));
+
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
@@ -12438,9 +12927,10 @@ out_err:
* i40e_xdp_setup - add/remove an XDP program
* @vsi: VSI to changed
* @prog: XDP program
+ * @extack: netlink extended ack
**/
-static int i40e_xdp_setup(struct i40e_vsi *vsi,
- struct bpf_prog *prog)
+static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_pf *pf = vsi->back;
@@ -12449,17 +12939,16 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
int i;
/* Don't allow frames that span over multiple buffers */
- if (frame_size > vsi->rx_buf_len)
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
return -EINVAL;
-
- if (!i40e_enabled_xdp_vsi(vsi) && !prog)
- return 0;
+ }
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
if (need_reset)
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
old_prog = xchg(&vsi->xdp_prog, prog);
@@ -12759,7 +13248,7 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
- return i40e_xdp_setup(vsi, xdp->prog);
+ return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
case XDP_SETUP_XSK_POOL:
return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
@@ -12794,8 +13283,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -14696,6 +15183,10 @@ err_switch_setup:
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct i40e_aq_get_phy_abilities_resp abilities;
+#ifdef CONFIG_I40E_DCB
+ enum i40e_get_fw_lldp_status_resp lldp_status;
+ i40e_status status;
+#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
@@ -14704,7 +15195,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
u32 val;
u32 i;
- u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -14952,6 +15442,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
+ (!status &&
+ lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
+ (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
+ (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
dev_info(&pdev->dev,
(pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
"FW LLDP is disabled\n" :
@@ -14960,7 +15456,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Enable FW to write default DCB config on link-up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
-#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
@@ -15038,24 +15533,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
- /* Make sure flow control is set according to current settings */
- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_phy_cap\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on set_phy_config\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_link_info\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
-
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -15112,6 +15589,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_info(&pdev->dev,
"setup of misc vector failed: %d\n", err);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
goto err_vsis;
}
}
@@ -15253,6 +15732,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid);
+#ifdef CONFIG_I40E_DCB
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+ i40e_set_lldp_forwarding(pf, true);
+#endif /* CONFIG_I40E_DCB */
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
@@ -15455,7 +15938,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -15505,7 +15988,7 @@ static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
}
/**
@@ -15609,7 +16092,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -15668,7 +16151,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/
rtnl_lock();
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5c1378641b3b..aaea297640e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
@@ -200,6 +200,10 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
@@ -289,6 +293,9 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u8 filter_count);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code
+i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 564df22f3f46..36f7b27a04ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
@@ -34,12 +34,137 @@
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PRT_SWR_PM_THR 0x0026CD00 /* Reset: CORER */
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT 0
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_MASK I40E_MASK(0xFF, I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
@@ -359,6 +484,27 @@
#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
@@ -398,8 +544,34 @@
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4aca637d4a23..627794b31e33 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -42,9 +42,6 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
- flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
- (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
-
/* Use LAN VSI Id if not programmed by user */
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
@@ -160,59 +157,180 @@ dma_fail:
return -1;
}
-#define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @add: true adds a filter, false removes it
+ * i40e_create_dummy_packet - Constructs dummy packet for HW
+ * @dummy_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns address of layer 4 protocol dummy packet.
+ **/
+static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ bool is_vlan = !!data->vlan_tag;
+ struct vlan_hdr vlan;
+ struct ipv6hdr ipv6;
+ struct ethhdr eth;
+ struct iphdr ip;
+ u8 *tmp;
+
+ if (ipv4) {
+ eth.h_proto = cpu_to_be16(ETH_P_IP);
+ ip.protocol = l4proto;
+ ip.version = 0x4;
+ ip.ihl = 0x5;
+
+ ip.daddr = data->dst_ip;
+ ip.saddr = data->src_ip;
+ } else {
+ eth.h_proto = cpu_to_be16(ETH_P_IPV6);
+ ipv6.nexthdr = l4proto;
+ ipv6.version = 0x6;
+
+ memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
+ sizeof(__be32) * 4);
+ }
+
+ if (is_vlan) {
+ vlan.h_vlan_TCI = data->vlan_tag;
+ vlan.h_vlan_encapsulated_proto = eth.h_proto;
+ eth.h_proto = data->vlan_etype;
+ }
+
+ tmp = dummy_packet;
+ memcpy(tmp, &eth, sizeof(eth));
+ tmp += sizeof(eth);
+
+ if (is_vlan) {
+ memcpy(tmp, &vlan, sizeof(vlan));
+ tmp += sizeof(vlan);
+ }
+
+ if (ipv4) {
+ memcpy(tmp, &ip, sizeof(ip));
+ tmp += sizeof(ip);
+ } else {
+ memcpy(tmp, &ipv6, sizeof(ipv6));
+ tmp += sizeof(ipv6);
+ }
+
+ return tmp;
+}
+
+/**
+ * i40e_create_dummy_udp_packet - helper function to create UDP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate udp fields.
**/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
{
- struct i40e_pf *pf = vsi->back;
struct udphdr *udp;
- struct iphdr *ip;
- u8 *raw_packet;
- int ret;
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ u8 *tmp;
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
- if (!raw_packet)
- return -ENOMEM;
- memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
+ udp = (struct udphdr *)(tmp);
+ udp->dest = data->dst_port;
+ udp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_tcp_packet - helper function to create TCP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate tcp fields.
+ **/
+static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct tcphdr *tcp;
+ u8 *tmp;
+ /* Dummy tcp packet */
+ static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
- ip->daddr = fd_data->dst_ip;
- udp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- udp->source = fd_data->src_port;
+ tcp = (struct tcphdr *)tmp;
+ memcpy(tcp, tcp_packet, sizeof(tcp_packet));
+ tcp->dest = data->dst_port;
+ tcp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate sctp fields.
+ **/
+static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
+ u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct sctphdr *sctp;
+ u8 *tmp;
+
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
+
+ sctp = (struct sctphdr *)tmp;
+ sctp->dest = data->dst_port;
+ sctp->source = data->src_port;
+}
+
+/**
+ * i40e_prepare_fdir_filter - Prepare and program fdir filter
+ * @pf: physical function to attach filter to
+ * @fd_data: filter data
+ * @add: add or delete filter
+ * @packet_addr: address of dummy packet, used in filtering
+ * @payload_offset: offset from dummy packet address to user defined data
+ * @pctype: Packet type for which filter is used
+ *
+ * Helper function to offset data of dummy packet, program it and
+ * handle errors.
+ **/
+static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
+ struct i40e_fdir_filter *fd_data,
+ bool add, char *packet_addr,
+ int payload_offset, u8 pctype)
+{
+ int ret;
if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
+ u8 *payload;
__be16 pattern = fd_data->flex_word;
u16 off = fd_data->flex_offset;
+ payload = packet_addr + payload_offset;
+
+ /* If user provided vlan, offset payload by vlan header length */
+ if (!!fd_data->vlan_tag)
+ payload += VLAN_HLEN;
+
*((__force __be16 *)(payload + off)) = pattern;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ fd_data->pctype = pctype;
+ ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
/* Free the packet buffer since it wasn't added to the ring */
- kfree(raw_packet);
return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
@@ -225,238 +343,243 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
- if (add)
- pf->fd_udp4_filter_cnt++;
- else
- pf->fd_udp4_filter_cnt--;
+ return ret;
+}
- return 0;
+/**
+ * i40e_change_filter_num - Prepare and program fdir filter
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @add: add or delete filter
+ * @ipv4_filter_num: field to update
+ * @ipv6_filter_num: field to update
+ *
+ * Update filter number field for pf.
+ **/
+static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
+ u16 *ipv6_filter_num)
+{
+ if (add) {
+ if (ipv4)
+ (*ipv4_filter_num)++;
+ else
+ (*ipv6_filter_num)++;
+ } else {
+ if (ipv4)
+ (*ipv4_filter_num)--;
+ else
+ (*ipv6_filter_num)--;
+ }
}
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * i40e_add_del_fdir_udp - Add/Remove UDP filters
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct tcphdr *tcp;
- struct iphdr *ip;
u8 *raw_packet;
int ret;
- /* Dummy packet */
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
- 0x0, 0x72, 0, 0, 0, 0};
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip;
- tcp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- tcp->source = fd_data->src_port;
+ i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
- *((__force __be16 *)(payload + off)) = pattern;
+ if (ret) {
+ kfree(raw_packet);
+ return ret;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
+ &pf->fd_udp6_filter_cnt);
+
+ return 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+#define I40E_TCPIP6_DUMMY_PACKET_LEN 74
+/**
+ * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+
if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* Free the packet buffer since it wasn't added to the ring */
kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
+ return ret;
}
+ i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
+ &pf->fd_tcp6_filter_cnt);
+
if (add) {
- pf->fd_tcp4_filter_cnt++;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
- } else {
- pf->fd_tcp4_filter_cnt--;
}
-
return 0;
}
-#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+#define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
* a specific flow spec
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct sctphdr *sctp;
- struct iphdr *ip;
u8 *raw_packet;
int ret;
- /* Dummy packet */
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->daddr = fd_data->dst_ip;
- sctp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- sctp->source = fd_data->src_port;
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
+ i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
- *((__force __be16 *)(payload + off)) = pattern;
- }
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* Free the packet buffer since it wasn't added to the ring */
kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
+ return ret;
}
- if (add)
- pf->fd_sctp4_filter_cnt++;
- else
- pf->fd_sctp4_filter_cnt--;
+ i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
+ &pf->fd_sctp6_filter_cnt);
return 0;
}
-#define I40E_IP_DUMMY_PACKET_LEN 34
+#define I40E_IP_DUMMY_PACKET_LEN 34
+#define I40E_IP6_DUMMY_PACKET_LEN 54
/**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct iphdr *ip;
+ int payload_offset;
u8 *raw_packet;
+ int iter_start;
+ int iter_end;
int ret;
int i;
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
- for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ if (ipv4) {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ } else {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ }
+
+ for (i = iter_start; i <= iter_end; i++) {
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
-
- ip->saddr = fd_data->src_ip;
- ip->daddr = fd_data->dst_ip;
- ip->protocol = 0;
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
-
- *((__force __be16 *)(payload + off)) = pattern;
- }
+ /* IPv6 no header option differs from IPv4 */
+ (void)i40e_create_dummy_packet
+ (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
+ fd_data);
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* The packet buffer wasn't added to the ring so we
- * need to free it now.
- */
- kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- }
+ payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
+ I40E_IP6_DUMMY_PACKET_LEN;
+ ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
+ payload_offset, i);
+ if (ret)
+ goto err;
}
- if (add)
- pf->fd_ip4_filter_cnt++;
- else
- pf->fd_ip4_filter_cnt--;
+ i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
+ &pf->fd_ip6_filter_cnt);
return 0;
+err:
+ kfree(raw_packet);
+ return ret;
}
/**
@@ -469,37 +592,68 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add)
{
+ enum ip_ver { ipv6 = 0, ipv4 = 1 };
struct i40e_pf *pf = vsi->back;
int ret;
switch (input->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
break;
case UDP_V4_FLOW:
- ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
break;
case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
+ break;
+ case TCP_V6_FLOW:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case UDP_V6_FLOW:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case SCTP_V6_FLOW:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
break;
case IP_USER_FLOW:
- switch (input->ip4_proto) {
+ switch (input->ipl4_proto) {
case IPPROTO_TCP:
- ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
break;
case IPPROTO_UDP:
- ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
break;
case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
break;
case IPPROTO_IP:
- ret = i40e_add_del_fdir_ipv4(vsi, input, add);
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
break;
default:
/* We cannot support masking based on protocol */
dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
- input->ip4_proto);
+ input->ipl4_proto);
+ return -EINVAL;
+ }
+ break;
+ case IPV6_USER_FLOW:
+ switch (input->ipl4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_IP:
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
+ break;
+ default:
+ /* We cannot support masking based on protocol */
+ dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
+ input->ipl4_proto);
return -EINVAL;
}
break;
@@ -1416,6 +1570,17 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1443,6 +1608,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->rx_offset = i40e_rx_offset(rx_ring);
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
@@ -1478,17 +1644,6 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
-/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
unsigned int size)
{
@@ -1497,8 +1652,8 @@ static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
#if (PAGE_SIZE < 8192)
truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = i40e_rx_offset(rx_ring) ?
- SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -1549,7 +1704,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = i40e_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -1793,7 +1948,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
skb_record_rx_queue(skb, rx_ring->queue_index);
if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+ __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(vlan_tag));
@@ -1809,9 +1964,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
* @skb: pointer to current skb being fixed
* @rx_desc: pointer to the EOP Rx descriptor
*
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
* In addition if skb is not at least 60 bytes we need to pad it so that
* it is large enough to qualify as a valid Ethernet frame.
*
@@ -1844,46 +1996,15 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
}
/**
- * i40e_page_is_reusable - check if any reuse is possible
- * @page: page struct to check
- *
- * A page is not reusable if it was allocated under low memory
- * conditions, or it's not in the same NUMA node as this CPU.
- */
-static inline bool i40e_page_is_reusable(struct page *page)
-{
- return (page_to_nid(page) == numa_mem_id()) &&
- !page_is_pfmemalloc(page);
-}
-
-/**
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
+ * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
+ * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
@@ -1891,7 +2012,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!i40e_page_is_reusable(page)))
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -1937,7 +2058,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -2151,25 +2272,13 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
* i40e_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
- **/
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+ union i40e_rx_desc *rx_desc)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
@@ -2344,17 +2453,18 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
**/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
+ struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+ frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
@@ -2406,12 +2516,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- i40e_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
@@ -2448,7 +2557,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
- if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+ i40e_inc_ntc(rx_ring);
+ if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
@@ -3113,13 +3223,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 5f531b195959..86fed05b4f19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -387,6 +387,7 @@ struct i40e_ring {
*/
struct i40e_channel *ch;
+ u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c0bdc666f557..5c10faaca790 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -517,6 +517,7 @@ struct i40e_dcbx_config {
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
@@ -1420,6 +1421,8 @@ struct i40e_lldp_variables {
#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
#define I40E_VERIFY_TAG_SHIFT 31
#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+#define I40E_VLAN_SRC_SHIFT 55
+#define I40E_VLAN_SRC_MASK (0x1ULL << I40E_VLAN_SRC_SHIFT)
#define I40E_FLEX_50_SHIFT 13
#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..1b6ec9be155a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->queues_enabled) {
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
- } else if (vf->link_forced) {
+ if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
-
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -1772,7 +1766,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1775,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- vf->queues_enabled = true;
-
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- /* Immediately mark queues as disabled */
- vf->queues_enabled = false;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -4046,20 +4035,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4068,6 +4053,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5491215d81de..091e32c1bb46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,7 +98,6 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
- bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 47eb9c584a12..fc32c5019b0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -215,9 +215,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
-
- count--;
- } while (count);
+ } while (--count);
no_buffers:
if (rx_ring->next_to_use != ntu) {
@@ -250,27 +248,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
- return NULL;
+ goto out;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
+out:
xsk_buff_free(xdp);
return skb;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp_buff,
+ union i40e_rx_desc *rx_desc,
+ unsigned int *rx_packets,
+ unsigned int *rx_bytes,
+ unsigned int size,
+ unsigned int xdp_res)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct sk_buff *skb;
+
+ *rx_packets = 1;
+ *rx_bytes = size;
+
+ if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
+ return;
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
+ if (xdp_res == I40E_XDP_CONSUMED) {
+ xsk_buff_free(xdp_buff);
+ return;
+ }
+
+ if (xdp_res == I40E_XDP_PASS) {
+ /* NB! We are not checking for errors using
+ * i40e_test_staterr with
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
+ * SBP is *not* set in PRT_SBPVSI (default not set).
+ */
+ skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ if (eth_skb_pad(skb)) {
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ *rx_bytes = skb->len;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ return;
+ }
+
+ /* Should never get here, as all valid cases have been handled already.
+ */
+ WARN_ON_ONCE(1);
}
/**
@@ -284,17 +323,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
- struct sk_buff *skb;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi;
+ unsigned int rx_packets;
+ unsigned int rx_bytes;
+ struct xdp_buff *bi;
unsigned int size;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
@@ -307,11 +349,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- xsk_buff_free(*bi);
- *bi = NULL;
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ xsk_buff_free(bi);
+ next_to_clean = (next_to_clean + 1) & count_mask;
continue;
}
@@ -320,53 +360,22 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- (*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
-
- xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
- if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(*bi);
-
- *bi = NULL;
- total_rx_bytes += size;
- total_rx_packets++;
-
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
- continue;
- }
-
- /* XDP_PASS path */
-
- /* NB! We are not checking for errors using
- * i40e_test_staterr with
- * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
- * SBP is *not* set in PRT_SBPVSI (default not set).
- */
- skb = i40e_construct_skb_zc(rx_ring, *bi);
- *bi = NULL;
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- break;
- }
-
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
-
- if (eth_skb_pad(skb))
- continue;
-
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- i40e_process_skb_fields(rx_ring, rx_desc, skb);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi->data_end = bi->data + size;
+ xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
+ &rx_bytes, size, xdp_res);
+ total_rx_packets += rx_packets;
+ total_rx_bytes += rx_bytes;
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+ next_to_clean = (next_to_clean + 1) & count_mask;
}
+ rx_ring->next_to_clean = next_to_clean;
+ cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
+
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
@@ -374,7 +383,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ if (failure || next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
@@ -444,7 +453,7 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
struct i40e_tx_desc *tx_desc;
tx_desc = I40E_TX_DESC(xdp_ring, ntu);
- tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+ tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
}
/**
@@ -604,16 +613,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!rx_bi)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
- rx_bi = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 256fa07d54d5..ffaf2742a2e0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1142,19 +1142,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
}
/**
- * iavf_page_is_reusable - check if any reuse is possible
- * @page: page struct to check
- *
- * A page is not reusable if it was allocated under low memory
- * conditions, or it's not in the same NUMA node as this CPU.
- */
-static inline bool iavf_page_is_reusable(struct page *page)
-{
- return (page_to_nid(page) == numa_mem_id()) &&
- !page_is_pfmemalloc(page);
-}
-
-/**
* iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive
*
@@ -1187,7 +1174,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!iavf_page_is_reusable(page)))
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index ed08ace4f05a..647e7fde11b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -911,7 +911,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
return;
}
- speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
+ speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
if (!speed)
return;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 6da4f43f2348..73da4f71f530 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -24,6 +24,7 @@ ice-y := ice_main.o \
ice_flow.o \
ice_devlink.o \
ice_fw_update.o \
+ ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 56725356a17b..357706444dd5 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -39,6 +39,7 @@
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
@@ -55,6 +56,7 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_lag.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -68,7 +70,9 @@
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MIN_MSIX 2
+#define ICE_MIN_LAN_TXRX_MSIX 1
+#define ICE_MIN_LAN_OICR_MSIX 1
+#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
@@ -164,7 +168,7 @@ struct ice_tc_cfg {
struct ice_res_tracker {
u16 num_entries;
u16 end;
- u16 list[1];
+ u16 list[];
};
struct ice_qs_cfg {
@@ -324,9 +328,11 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xsk_buff_pool **xsk_pools;
- u16 num_xsk_pools_used;
- u16 num_xsk_pools;
+
+ /* setup back reference, to which aggregator node this VSI
+ * corresponds to
+ */
+ struct ice_agg_node *agg_node;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -375,6 +381,13 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_agg_node {
+ u32 agg_id;
+#define ICE_MAX_VSIS_IN_AGG_NODE 64
+ u32 num_vsis;
+ u8 valid;
+};
+
struct ice_pf {
struct pci_dev *pdev;
@@ -441,9 +454,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
-#ifdef CONFIG_DCB
u16 dcbx_cap;
-#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@@ -453,6 +464,15 @@ struct ice_pf {
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
+ struct ice_lag *lag; /* Link Aggregation information */
+
+#define ICE_INVALID_AGG_NODE_ID 0
+#define ICE_PF_AGG_NODE_ID_START 1
+#define ICE_MAX_PF_AGG_NODES 32
+ struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
+#define ICE_VF_AGG_NODE_ID_START 65
+#define ICE_MAX_VF_AGG_NODES 32
+ struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
};
struct ice_netdev_priv {
@@ -515,17 +535,15 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
- !ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return pools[qid];
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -555,11 +573,31 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
return pf->vsi[pf->ctrl_vsi_idx];
}
+/**
+ * ice_set_sriov_cap - enable SRIOV in PF flags
+ * @pf: PF struct
+ */
+static inline void ice_set_sriov_cap(struct ice_pf *pf)
+{
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1)
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+}
+
+/**
+ * ice_clear_sriov_cap - disable SRIOV in PF flags
+ * @pf: PF struct
+ */
+static inline void ice_clear_sriov_cap(struct ice_pf *pf)
+{
+ clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+}
+
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index b06fbe99d8e9..80186589153b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -695,6 +695,18 @@ struct ice_aqc_sched_elem_cmd {
__le32 addr_low;
};
+struct ice_aqc_txsched_move_grp_info_hdr {
+ __le32 src_parent_teid;
+ __le32 dest_parent_teid;
+ __le16 num_elems;
+ __le16 reserved;
+};
+
+struct ice_aqc_move_elem {
+ struct ice_aqc_txsched_move_grp_info_hdr hdr;
+ __le32 teid[];
+};
+
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -1334,33 +1346,6 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
-/* The result of netlist NVM read comes in a TLV format. The actual data
- * (netlist header) starts from word offset 1 (byte 2). The FW strips
- * out the type field from the TLV header so all the netlist fields
- * should adjust their offset value by 1 word (2 bytes) in order to map
- * their correct location.
- */
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
-#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
-#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
-
-/* netlist ID block field offsets (word offsets) */
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
-#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
-#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
-#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
-#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
-#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
-#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
-
/* Used for NVM Set Package Data command - 0x070A */
struct ice_aqc_nvm_pkg_data {
u8 reserved[3];
@@ -1555,6 +1540,16 @@ struct ice_aqc_lldp_stop_start_specific_agent {
u8 reserved[15];
};
+/* LLDP Filter Control (direct 0x0A0A) */
+struct ice_aqc_lldp_filter_ctrl {
+ u8 cmd_flags;
+#define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0
+#define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1
+ u8 reserved1;
+ __le16 vsi_num;
+ u8 reserved2[12];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1878,6 +1873,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
+ struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1977,6 +1973,7 @@ enum ice_adminq_opc {
ice_aqc_opc_add_sched_elems = 0x0401,
ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
+ ice_aqc_opc_move_sched_elems = 0x0408,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
@@ -2018,6 +2015,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
+ ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 6d7e7dd0ebe2..3d9475e222cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -110,7 +110,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (status)
return status;
- resp = (struct ice_aqc_manage_mac_read_resp *)buf;
+ resp = buf;
flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
@@ -907,6 +907,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
goto err_unroll_alloc;
}
+ ice_sched_get_psm_clk_freq(hw);
/* Initialize port_info struct with scheduler data */
status = ice_sched_init_port(hw->port_info);
@@ -1653,7 +1654,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
if (!buf)
return ICE_ERR_PARAM;
- if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ if (buf_size < flex_array_size(buf, elem, num_entries))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1979,7 +1980,7 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+ cap_resp = buf;
memset(func_p, 0, sizeof(*func_p));
@@ -2109,7 +2110,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+ cap_resp = buf;
memset(dev_p, 0, sizeof(*dev_p));
@@ -4078,6 +4079,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
for (i = 0; i < ICE_SW_LKUP_LAST; i++)
list_replace_init(&sw->recp_list[i].filt_rules,
&sw->recp_list[i].filt_replay_rules);
+ ice_sched_replay_agg_vsi_preinit(hw);
return 0;
}
@@ -4109,6 +4111,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ if (!status)
+ status = ice_replay_vsi_agg(hw, vsi_handle);
return status;
}
@@ -4122,6 +4126,7 @@ void ice_replay_post(struct ice_hw *hw)
{
/* Delete old entries from replay filter list head */
ice_rm_all_sw_replay_rule_info(hw);
+ ice_sched_replay_agg(hw);
}
/**
@@ -4366,3 +4371,50 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
+
+/**
+ * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
+ * @hw: pointer to HW struct
+ */
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
+{
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
+ hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
+ * @hw: pointer to HW struct
+ * @vsi_num: absolute HW index for VSI
+ * @add: boolean for if adding or removing a filter
+ */
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+{
+ struct ice_aqc_lldp_filter_ctrl *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_filter_ctrl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
+
+ if (add)
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
+ else
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
+
+ cmd->vsi_num = cpu_to_le16(vsi_num);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3ebb973878c7..baf4064fcbfe 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -175,4 +175,7 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 4db12d1f5808..b2d8a5932b1d 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -838,7 +838,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*/
static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
{
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ struct ice_aq_desc *cq_desc = desc;
u16 len;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
@@ -868,7 +868,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (buf_len < len)
len = buf_len;
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 2a3147ee0bbb..e42727941ef5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -850,9 +850,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
else if (dcbx_mode == ICE_DCBX_MODE_CEE)
- dcbx_cfg = &pi->desired_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
* or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
@@ -863,7 +863,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
goto out;
/* Get Remote DCB Config */
- dcbx_cfg = &pi->remote_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
@@ -892,14 +892,14 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
/* CEE mode */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
}
@@ -916,26 +916,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*/
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = 0;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
- pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
- ret = ice_get_dcb_cfg(pi);
+ ret = ice_get_dcb_cfg(hw->port_info);
if (ret)
return ret;
- pi->is_sw_lldp = false;
- } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ qos_cfg->is_sw_lldp = false;
+ } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -943,7 +943,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
if (ret)
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
}
return ret;
@@ -958,21 +958,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*/
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
- pi->is_sw_lldp = !ena_mib;
+ qos_cfg->is_sw_lldp = !ena_mib;
return ret;
}
@@ -1270,7 +1270,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
hw = pi->hw;
/* update the HW local config */
- dcbcfg = &pi->local_dcbx_cfg;
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 36abd6b7280c..1e8f71ffc8ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -28,7 +28,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
- dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
@@ -134,7 +134,7 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
else
mode = DCB_CAP_DCBX_LLD_MANAGED;
- if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return mode | DCB_CAP_DCBX_VER_CEE;
else
return mode | DCB_CAP_DCBX_VER_IEEE;
@@ -277,10 +277,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
int ret = ICE_DCB_NO_HW_CHG;
struct ice_vsi *pf_vsi;
- curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* FW does not care if change happened */
- if (!pf->hw.port_info->is_sw_lldp)
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ret = ICE_DCB_HW_CHG_RST;
/* Enable DCB tagging only when more than one TC */
@@ -327,7 +327,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
*/
- if (pf->hw.port_info->is_sw_lldp) {
+ if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Set DCB Config failed\n");
@@ -360,7 +360,7 @@ free_cfg:
*/
static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
{
- struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
u8 i;
/* Ensure ETS recommended DCB configuration is not already set */
@@ -446,7 +446,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
mutex_lock(&pf->tc_mutex);
- if (!pf->hw.port_info->is_sw_lldp)
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
@@ -455,9 +455,9 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- if (!pf->hw.port_info->is_sw_lldp) {
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_cfg_lldp_mib_change(&pf->hw, true);
- if (ret && !pf->hw.port_info->is_sw_lldp) {
+ if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Failed to register for MIB changes\n");
goto dcb_error;
}
@@ -510,11 +510,12 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
int ret = 0;
pi = pf->hw.port_info;
- newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
+ newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
+ GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked))
@@ -545,7 +546,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
if (!dcbcfg)
return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
@@ -608,7 +609,7 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
*/
static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret;
@@ -638,7 +639,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
*/
void ice_pf_dcb_recfg(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
u8 tc_map = 0;
int v, ret;
@@ -691,7 +692,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
port_info = hw->port_info;
err = ice_init_dcb(hw, false);
- if (err && !port_info->is_sw_lldp) {
+ if (err && !port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err;
}
@@ -858,7 +859,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
- &pi->remote_dcbx_cfg);
+ &pi->qos_cfg.remote_dcbx_cfg);
if (ret) {
dev_err(dev, "Failed to get remote DCB config\n");
return;
@@ -868,10 +869,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
- tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
+ tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
- memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0,
+ sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info);
@@ -881,7 +883,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
/* No change detected in DCBX configs */
- if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg,
+ sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
goto out;
}
@@ -889,13 +892,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
- &pi->local_dcbx_cfg);
- ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
+ &pi->qos_cfg.local_dcbx_cfg);
+ ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg);
if (!need_reconfig)
goto out;
/* Enable DCB tagging only when more than one TC */
- if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
+ if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 87f91b750d59..468a63f7eff9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -34,12 +34,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
{
struct ice_dcbx_cfg *dcbxcfg;
- struct ice_port_info *pi;
struct ice_pf *pf;
pf = ice_netdev_to_pf(netdev);
- pi = pf->hw.port_info;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ets->willing = dcbxcfg->etscfg.willing;
ets->ets_cap = dcbxcfg->etscfg.maxtcs;
@@ -74,7 +72,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -136,7 +134,7 @@ ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
return -EINVAL;
- *num = IEEE_8021QAZ_MAX_TCS;
+ *num = pf->hw.func_caps.common_cap.maxtc;
return 0;
}
@@ -159,6 +157,11 @@ static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_qos_cfg *qos_cfg;
+
+ /* if FW LLDP agent is running, DCBNL not allowed to change mode */
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return ICE_DCB_NO_HW_CHG;
/* No support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -171,10 +174,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode;
+ qos_cfg = &pf->hw.port_info->qos_cfg;
if (mode & DCB_CAP_DCBX_VER_CEE)
- pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
else
- pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -225,7 +229,7 @@ static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
struct ice_dcbx_cfg *dcbxcfg;
int i;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
pfc->pfc_cap = dcbxcfg->pfc.pfccap;
pfc->pfc_en = dcbxcfg->pfc.pfcena;
pfc->mbc = dcbxcfg->pfc.mbc;
@@ -256,7 +260,7 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
if (pfc->pfc_cap)
new_cfg->pfc.pfccap = pfc->pfc_cap;
@@ -293,9 +297,9 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
- prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
+ prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
}
/**
@@ -316,7 +320,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
if (set)
@@ -338,7 +342,7 @@ static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
struct ice_port_info *pi = pf->hw.port_info;
/* Return enabled if any UP enabled for PFC */
- if (pi->local_dcbx_cfg.pfc.pfcena)
+ if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
return 1;
return 0;
@@ -378,8 +382,8 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
if (state) {
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
- memcpy(&pf->hw.port_info->desired_dcbx_cfg,
- &pf->hw.port_info->local_dcbx_cfg,
+ memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
+ &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
sizeof(struct ice_dcbx_cfg));
} else {
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
@@ -413,7 +417,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
*pgid);
}
@@ -444,7 +448,7 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
@@ -474,7 +478,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
- *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
pgid, *bw_pct);
}
@@ -498,7 +502,7 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
}
@@ -528,7 +532,7 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
}
/**
@@ -699,9 +703,9 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
ret = -EINVAL;
@@ -751,7 +755,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
mutex_lock(&pf->tc_mutex);
- old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps <= 1)
goto delapp_out;
@@ -760,7 +764,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
if (ret)
goto delapp_out;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
for (i = 1; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
@@ -813,7 +817,7 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -884,7 +888,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
for (i = 0; i < dcbxcfg->numapps; i++) {
u8 prio, tc_map;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 29d6192b15f3..cf685eeea198 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,132 +6,226 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+/* context for devlink info version reporting */
+struct ice_info_ctx {
+ char buf[128];
+ struct ice_orom_info pending_orom;
+ struct ice_nvm_info pending_nvm;
+ struct ice_netlist_info pending_netlist;
+ struct ice_hw_dev_caps dev_caps;
+};
+
+/* The following functions are used to format specific strings for various
+ * devlink info versions. The ctx parameter is used to provide the storage
+ * buffer, as well as any ancillary information calculated when the info
+ * request was made.
+ *
+ * If a version does not exist, for example when attempting to get the
+ * inactive version of flash when there is no pending update, the function
+ * should leave the buffer in the ctx structure empty and return 0.
+ */
+
+static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%8phD", dsn);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
- status = ice_read_pba_string(hw, (u8 *)buf, len);
+ status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
return -EIO;
return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
hw->fw_patch);
return 0;
}
-static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
return 0;
}
-static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "0x%08x", hw->fw_build);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+
+ return 0;
+}
+
+static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_orom_info *orom = &pf->hw.flash.orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
+
+ return 0;
+}
+
+static int
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_orom_info *orom = &ctx->pending_orom;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_orom_info *orom = &pf->hw.nvm.orom;
+ struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
- snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
+static int
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
{
- struct ice_nvm_info *nvm = &pf->hw.nvm;
+ struct ice_nvm_info *nvm = &ctx->pending_nvm;
- snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
return 0;
}
-static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_nvm_info *nvm = &pf->hw.nvm;
+ struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
- snprintf(buf, len, "0x%08x", nvm->eetrack);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
+static int
+ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_nvm_info *nvm = &ctx->pending_nvm;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%s", hw->active_pkg_name);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
pkg->draft);
return 0;
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+ struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
netlist->cust_ver);
return 0;
}
-static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+
+ return 0;
+}
+
+static int
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
{
- struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+ struct ice_netlist_info *netlist = &ctx->pending_netlist;
- snprintf(buf, len, "0x%08x", netlist->hash);
+ /* The netlist version fields are BCD formatted */
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
+ netlist->cust_ver);
return 0;
}
-#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
-#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
+static int
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_netlist_info *netlist = &ctx->pending_netlist;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+
+ return 0;
+}
+
+#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
+#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
+#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
+
+/* The combined() macro inserts both the running entry as well as a stored
+ * entry. The running entry will always report the version from the active
+ * handler. The stored entry will first try the pending handler, and fallback
+ * to the active handler if the pending function does not report a version.
+ * The pending handler should check the status of a pending update for the
+ * relevant flash component. It should only fill in the buffer in the case
+ * where a valid pending version is available. This ensures that the related
+ * stored and running versions remain in sync, and that stored versions are
+ * correctly reported as expected.
+ */
+#define combined(key, active, pending) \
+ running(key, active), \
+ stored(key, pending, active)
enum ice_version_type {
ICE_VERSION_FIXED,
@@ -142,20 +236,21 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, char *buf, size_t len);
+ int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
running("fw.mgmt.api", ice_info_fw_api),
running("fw.mgmt.build", ice_info_fw_build),
- running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
- running("fw.psid.api", ice_info_nvm_ver),
- running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
+ combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
+ combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
+ combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
- running("fw.netlist", ice_info_netlist_ver),
- running("fw.netlist.build", ice_info_netlist_build),
+ combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
+ combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
};
/**
@@ -174,60 +269,128 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
- char buf[100];
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_info_ctx *ctx;
+ enum ice_status status;
size_t i;
int err;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* discover capabilities first */
+ status = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (status) {
+ err = -EIO;
+ goto out_free_ctx;
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
+ status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
+ }
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
+ status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
+ }
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
+ status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
+ }
+ }
+
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
- return err;
+ goto out_free_ctx;
}
- ice_info_get_dsn(pf, buf, sizeof(buf));
+ ice_info_get_dsn(pf, ctx);
- err = devlink_info_serial_number_put(req, buf);
+ err = devlink_info_serial_number_put(req, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
- return err;
+ goto out_free_ctx;
}
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
enum ice_version_type type = ice_devlink_versions[i].type;
const char *key = ice_devlink_versions[i].key;
- err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
+ memset(ctx->buf, 0, sizeof(ctx->buf));
+
+ err = ice_devlink_versions[i].getter(pf, ctx);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- return err;
+ goto out_free_ctx;
+ }
+
+ /* If the default getter doesn't report a version, use the
+ * fallback function. This is primarily useful in the case of
+ * "stored" versions that want to report the same value as the
+ * running version in the normal case of no pending update.
+ */
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
+ err = ice_devlink_versions[i].fallback(pf, ctx);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
+ goto out_free_ctx;
+ }
}
+ /* Do not report missing versions */
+ if (ctx->buf[0] == '\0')
+ continue;
+
switch (type) {
case ICE_VERSION_FIXED:
- err = devlink_info_version_fixed_put(req, key, buf);
+ err = devlink_info_version_fixed_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
- return err;
+ goto out_free_ctx;
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, buf);
+ err = devlink_info_version_running_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
- return err;
+ goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, buf);
+ err = devlink_info_version_stored_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
- return err;
+ goto out_free_ctx;
}
break;
}
}
- return 0;
+out_free_ctx:
+ kfree(ctx);
+ return err;
}
/**
@@ -433,7 +596,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
void *nvm_data;
u32 nvm_size;
- nvm_size = hw->nvm.flash_size;
+ nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
@@ -533,7 +696,7 @@ void ice_devlink_init_regions(struct ice_pf *pf)
struct device *dev = ice_pf_to_dev(pf);
u64 nvm_size;
- nvm_size = pf->hw.nvm.flash_size;
+ nvm_size = pf->hw.flash.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
nvm_size);
if (IS_ERR(pf->nvm_region)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9e8e9531cd87..2dcfa685b763 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include <net/dcbnl.h>
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -179,8 +180,8 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct ice_orom_info *orom;
struct ice_nvm_info *nvm;
- nvm = &hw->nvm;
- orom = &nvm->orom;
+ nvm = &hw->flash.nvm;
+ orom = &hw->flash.orom;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
@@ -188,7 +189,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
* determined) which contains more pertinent information.
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver,
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
@@ -250,7 +251,7 @@ static int ice_get_eeprom_len(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
- return (int)pf->hw.nvm.flash_size;
+ return (int)pf->hw.flash.flash_size;
}
static int
@@ -1238,10 +1239,18 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(dev, "Fail to init DCB\n");
+
+ pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
+ pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
enum ice_status status;
bool dcbx_agent_status;
+ /* Remove rule to direct LLDP packets to default VSI.
+ * The FW LLDP engine will now be consuming them.
+ */
+ ice_cfg_sw_lldp(vsi, false, false);
+
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
*/
@@ -1270,16 +1279,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (status)
dev_dbg(dev, "Fail to init DCB\n");
- /* Remove rule to direct LLDP packets to default VSI.
- * The FW LLDP engine will now be consuming them.
- */
- ice_cfg_sw_lldp(vsi, false, false);
-
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
+ pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
+ pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+
ice_nway_reset(netdev);
}
}
@@ -2979,7 +2986,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = 0;
pause->tx_pause = 0;
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
@@ -3031,7 +3038,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
@@ -3258,8 +3265,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_txq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3268,8 +3275,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_rxq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3322,6 +3329,18 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
}
/**
+ * ice_get_valid_rss_size - return valid number of RSS queues
+ * @hw: pointer to the HW structure
+ * @new_size: requested RSS queues
+ */
+static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
+{
+ struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+
+ return min_t(int, new_size, BIT(caps->rss_table_entry_width));
+}
+
+/**
* ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
* @vsi: VSI to reconfigure RSS LUT on
* @req_rss_size: requested range of queue numbers for hashing
@@ -3348,14 +3367,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
return -ENOMEM;
/* set RSS LUT parameters */
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
vsi->rss_size = 1;
- } else {
- struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
-
- vsi->rss_size = min_t(int, req_rss_size,
- BIT(caps->rss_table_entry_width));
- }
+ else
+ vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
/* create/set RSS LUT */
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
@@ -3434,9 +3449,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
ice_vsi_recfg_qs(vsi, new_rx, new_tx);
- if (new_rx && !netif_is_rxfh_configured(dev))
+ if (!netif_is_rxfh_configured(dev))
return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ /* Update rss_size due to change in Rx queues */
+ vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 2d27f66ac853..192729546bbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
- input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+ /* if no protocol requested, use IPPROTO_NONE */
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ input->ip.v6.proto = IPPROTO_NONE;
+ else
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index f5e81b555353..5e1fd30c0a0f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1525,7 +1525,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) +
- (count * sizeof(buf->section_entry[0]));
+ flex_array_size(buf, section_entry, count);
buf->data_end = cpu_to_le16(data_end);
return 0;
@@ -2727,7 +2727,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_XLT1_RSS:
case ICE_SID_XLT1_ACL:
case ICE_SID_XLT1_PE:
- xlt1 = (struct ice_xlt1_section *)sect;
+ xlt1 = sect;
src = xlt1->value;
sect_len = le16_to_cpu(xlt1->count) *
sizeof(*hw->blk[block_id].xlt1.t);
@@ -2740,7 +2740,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_XLT2_RSS:
case ICE_SID_XLT2_ACL:
case ICE_SID_XLT2_PE:
- xlt2 = (struct ice_xlt2_section *)sect;
+ xlt2 = sect;
src = (__force u8 *)xlt2->value;
sect_len = le16_to_cpu(xlt2->count) *
sizeof(*hw->blk[block_id].xlt2.t);
@@ -2753,7 +2753,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_PROFID_TCAM_RSS:
case ICE_SID_PROFID_TCAM_ACL:
case ICE_SID_PROFID_TCAM_PE:
- pid = (struct ice_prof_id_section *)sect;
+ pid = sect;
src = (u8 *)pid->entry;
sect_len = le16_to_cpu(pid->count) *
sizeof(*hw->blk[block_id].prof.t);
@@ -2766,7 +2766,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_PROFID_REDIR_RSS:
case ICE_SID_PROFID_REDIR_ACL:
case ICE_SID_PROFID_REDIR_PE:
- pr = (struct ice_prof_redir_section *)sect;
+ pr = sect;
src = pr->redir_value;
sect_len = le16_to_cpu(pr->count) *
sizeof(*hw->blk[block_id].prof_redir.t);
@@ -2779,7 +2779,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_FLD_VEC_RSS:
case ICE_SID_FLD_VEC_ACL:
case ICE_SID_FLD_VEC_PE:
- es = (struct ice_sw_fv_section *)sect;
+ es = sect;
src = (u8 *)es->fv;
sect_len = (u32)(le16_to_cpu(es->count) *
hw->blk[block_id].es.fvw) *
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8f81b95e679c..dcec0360ce55 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -417,6 +417,11 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
return err;
}
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define ICE_FW_ERASE_TIMEOUT 300
+
/**
* ice_erase_nvm_module - Erase an NVM module and await firmware completion
* @pf: the PF data structure
@@ -449,7 +454,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink = priv_to_devlink(pf);
- devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0);
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
status = ice_aq_erase_nvm(hw, module, NULL);
if (status) {
@@ -461,8 +466,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- /* Yes, this really can take minutes to complete */
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 90abc8612a6a..093a1818a392 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -86,6 +86,9 @@
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
#define QRXFLXP_CNTXT_TS_M BIT(11)
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M ICE_M(0x3, 4)
+#define GLGEN_CLKSTAT_SRC 0x000B826C
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
new file mode 100644
index 000000000000..4599fc3b4ed8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+/* Link Aggregation code */
+
+#include "ice.h"
+#include "ice_lag.h"
+
+/**
+ * ice_lag_nop_handler - no-op Rx handler to disable LAG
+ * @pskb: pointer to skb pointer
+ */
+rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
+{
+ return RX_HANDLER_PASS;
+}
+
+/**
+ * ice_lag_set_primary - set PF LAG state as Primary
+ * @lag: LAG info struct
+ */
+static void ice_lag_set_primary(struct ice_lag *lag)
+{
+ struct ice_pf *pf = lag->pf;
+
+ if (!pf)
+ return;
+
+ if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
+ dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
+ netdev_name(lag->netdev));
+ return;
+ }
+
+ lag->role = ICE_LAG_PRIMARY;
+}
+
+/**
+ * ice_lag_set_backup - set PF LAG state to Backup
+ * @lag: LAG info struct
+ */
+static void ice_lag_set_backup(struct ice_lag *lag)
+{
+ struct ice_pf *pf = lag->pf;
+
+ if (!pf)
+ return;
+
+ if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
+ dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
+ netdev_name(lag->netdev));
+ return;
+ }
+
+ lag->role = ICE_LAG_BACKUP;
+}
+
+/**
+ * ice_display_lag_info - print LAG info
+ * @lag: LAG info struct
+ */
+static void ice_display_lag_info(struct ice_lag *lag)
+{
+ const char *name, *peer, *upper, *role, *bonded, *master;
+ struct device *dev = &lag->pf->pdev->dev;
+
+ name = lag->netdev ? netdev_name(lag->netdev) : "unset";
+ peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
+ upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
+ master = lag->master ? "TRUE" : "FALSE";
+ bonded = lag->bonded ? "BONDED" : "UNBONDED";
+
+ switch (lag->role) {
+ case ICE_LAG_NONE:
+ role = "NONE";
+ break;
+ case ICE_LAG_PRIMARY:
+ role = "PRIMARY";
+ break;
+ case ICE_LAG_BACKUP:
+ role = "BACKUP";
+ break;
+ case ICE_LAG_UNSET:
+ role = "UNSET";
+ break;
+ default:
+ role = "ERROR";
+ }
+
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
+ bonded, peer, upper, role, master);
+}
+
+/**
+ * ice_lag_info_event - handle NETDEV_BONDING_INFO event
+ * @lag: LAG info struct
+ * @ptr: opaque data pointer
+ *
+ * ptr is to be cast to (netdev_notifier_bonding_info *)
+ */
+static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *netdev_tmp;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ const char *lag_netdev_name;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ info = ptr;
+ lag_netdev_name = netdev_name(lag->netdev);
+ bonding_info = &info->bonding_info;
+
+ if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
+ return;
+
+ if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
+ netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
+ goto lag_out;
+ }
+
+ if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
+ netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ goto lag_out;
+ }
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
+ if (!netif_is_ice(netdev_tmp))
+ continue;
+
+ if (netdev_tmp && netdev_tmp != lag->netdev &&
+ lag->peer_netdev != netdev_tmp) {
+ dev_hold(netdev_tmp);
+ lag->peer_netdev = netdev_tmp;
+ }
+ }
+ rcu_read_unlock();
+
+ if (bonding_info->slave.state)
+ ice_lag_set_backup(lag);
+ else
+ ice_lag_set_primary(lag);
+
+lag_out:
+ ice_display_lag_info(lag);
+}
+
+/**
+ * ice_lag_link - handle LAG link event
+ * @lag: LAG info struct
+ * @info: info from the netdev notifier
+ */
+static void
+ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *netdev_tmp, *upper = info->upper_dev;
+ struct ice_pf *pf = lag->pf;
+ int peers = 0;
+
+ if (lag->bonded)
+ dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
+ netdev_name(lag->netdev));
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, netdev_tmp)
+ peers++;
+ rcu_read_unlock();
+
+ if (lag->upper_netdev != upper) {
+ dev_hold(upper);
+ lag->upper_netdev = upper;
+ }
+
+ ice_clear_sriov_cap(pf);
+
+ lag->bonded = true;
+ lag->role = ICE_LAG_UNSET;
+
+ /* if this is the first element in an LAG mark as master */
+ lag->master = !!(peers == 1);
+}
+
+/**
+ * ice_lag_unlink - handle unlink event
+ * @lag: LAG info struct
+ * @info: info from netdev notification
+ */
+static void
+ice_lag_unlink(struct ice_lag *lag,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *netdev_tmp, *upper = info->upper_dev;
+ struct ice_pf *pf = lag->pf;
+ bool found = false;
+
+ if (!lag->bonded) {
+ netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
+ return;
+ }
+
+ /* determine if we are in the new LAG config or not */
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
+ if (netdev_tmp == lag->netdev) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (found)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ }
+
+ if (lag->peer_netdev) {
+ dev_put(lag->peer_netdev);
+ lag->peer_netdev = NULL;
+ }
+
+ ice_set_sriov_cap(pf);
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
+ * ice_lag_changeupper_event - handle LAG changeupper event
+ * @lag: LAG info struct
+ * @ptr: opaque pointer data
+ *
+ * ptr is to be cast into netdev_notifier_changeupper_info
+ */
+static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *netdev;
+
+ info = ptr;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* not for this netdev */
+ if (netdev != lag->netdev)
+ return;
+
+ if (!info->upper_dev) {
+ netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
+ return;
+ }
+
+ netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
+
+ if (!netif_is_lag_master(info->upper_dev)) {
+ netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ return;
+ }
+
+ if (info->linking)
+ ice_lag_link(lag, info);
+ else
+ ice_lag_unlink(lag, info);
+
+ ice_display_lag_info(lag);
+}
+
+/**
+ * ice_lag_changelower_event - handle LAG changelower event
+ * @lag: LAG info struct
+ * @ptr: opaque data pointer
+ *
+ * ptr to be cast to netdev_notifier_changelowerstate_info
+ */
+static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev)
+ return;
+
+ netdev_dbg(netdev, "bonding info\n");
+
+ if (!netif_is_lag_port(netdev))
+ netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
+}
+
+/**
+ * ice_lag_event_handler - handle LAG events from netdev
+ * @notif_blk: notifier block registered by this netdev
+ * @event: event type
+ * @ptr: opaque data containing notifier event
+ */
+static int
+ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct ice_lag *lag;
+
+ lag = container_of(notif_blk, struct ice_lag, notif_block);
+
+ if (!lag->netdev)
+ return NOTIFY_DONE;
+
+ /* Check that the netdev is in the working namespace */
+ if (!net_eq(dev_net(netdev), &init_net))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ice_lag_changeupper_event(lag, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ ice_lag_changelower_event(lag, ptr);
+ break;
+ case NETDEV_BONDING_INFO:
+ ice_lag_info_event(lag, ptr);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * ice_register_lag_handler - register LAG handler on netdev
+ * @lag: LAG struct
+ */
+static int ice_register_lag_handler(struct ice_lag *lag)
+{
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct notifier_block *notif_blk;
+
+ notif_blk = &lag->notif_block;
+
+ if (!notif_blk->notifier_call) {
+ notif_blk->notifier_call = ice_lag_event_handler;
+ if (register_netdevice_notifier(notif_blk)) {
+ notif_blk->notifier_call = NULL;
+ dev_err(dev, "FAIL register LAG event handler!\n");
+ return -EINVAL;
+ }
+ dev_dbg(dev, "LAG event handler registered\n");
+ }
+ return 0;
+}
+
+/**
+ * ice_unregister_lag_handler - unregister LAG handler on netdev
+ * @lag: LAG struct
+ */
+static void ice_unregister_lag_handler(struct ice_lag *lag)
+{
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct notifier_block *notif_blk;
+
+ notif_blk = &lag->notif_block;
+ if (notif_blk->notifier_call) {
+ unregister_netdevice_notifier(notif_blk);
+ dev_dbg(dev, "LAG event handler unregistered\n");
+ }
+}
+
+/**
+ * ice_init_lag - initialize support for LAG
+ * @pf: PF struct
+ *
+ * Alloc memory for LAG structs and initialize the elements.
+ * Memory will be freed in ice_deinit_lag
+ */
+int ice_init_lag(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+ int err;
+
+ pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
+ if (!pf->lag)
+ return -ENOMEM;
+ lag = pf->lag;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi) {
+ dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
+ err = -EIO;
+ goto lag_error;
+ }
+
+ lag->pf = pf;
+ lag->netdev = vsi->netdev;
+ lag->role = ICE_LAG_NONE;
+ lag->bonded = false;
+ lag->peer_netdev = NULL;
+ lag->upper_netdev = NULL;
+ lag->notif_block.notifier_call = NULL;
+
+ err = ice_register_lag_handler(lag);
+ if (err) {
+ dev_warn(dev, "INIT LAG: Failed to register event handler\n");
+ goto lag_error;
+ }
+
+ ice_display_lag_info(lag);
+
+ dev_dbg(dev, "INIT LAG complete\n");
+ return 0;
+
+lag_error:
+ kfree(lag);
+ pf->lag = NULL;
+ return err;
+}
+
+/**
+ * ice_deinit_lag - Clean up LAG
+ * @pf: PF struct
+ *
+ * Clean up kernel LAG info and free memory
+ * This function is meant to only be called on driver remove/shutdown
+ */
+void ice_deinit_lag(struct ice_pf *pf)
+{
+ struct ice_lag *lag;
+
+ lag = pf->lag;
+
+ if (!lag)
+ return;
+
+ if (lag->pf)
+ ice_unregister_lag_handler(lag);
+
+ if (lag->upper_netdev)
+ dev_put(lag->upper_netdev);
+
+ if (lag->peer_netdev)
+ dev_put(lag->peer_netdev);
+
+ kfree(lag);
+
+ pf->lag = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
new file mode 100644
index 000000000000..c2e3688dd8fd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_LAG_H_
+#define _ICE_LAG_H_
+
+#include <linux/netdevice.h>
+
+/* LAG roles for netdev */
+enum ice_lag_role {
+ ICE_LAG_NONE,
+ ICE_LAG_PRIMARY,
+ ICE_LAG_BACKUP,
+ ICE_LAG_UNSET
+};
+
+struct ice_pf;
+
+/* LAG info struct */
+struct ice_lag {
+ struct ice_pf *pf; /* backlink to PF struct */
+ struct net_device *netdev; /* this PF's netdev */
+ struct net_device *peer_netdev;
+ struct net_device *upper_netdev; /* upper bonding netdev */
+ struct notifier_block notif_block;
+ u8 bonded:1; /* currently bonded */
+ u8 master:1; /* this is a master */
+ u8 handler:1; /* did we register a rx_netdev_handler */
+ /* each thing blocking bonding will increment this value by one.
+ * If this value is zero, then bonding is allowed.
+ */
+ u16 dis_lag;
+ u8 role;
+};
+
+int ice_init_lag(struct ice_pf *pf);
+void ice_deinit_lag(struct ice_pf *pf);
+rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb);
+
+/**
+ * ice_disable_lag - increment LAG disable count
+ * @lag: LAG struct
+ */
+static inline void ice_disable_lag(struct ice_lag *lag)
+{
+ /* If LAG this PF is not already disabled, disable it */
+ rtnl_lock();
+ if (!netdev_is_rx_handler_busy(lag->netdev)) {
+ if (!netdev_rx_handler_register(lag->netdev,
+ ice_lag_nop_handler,
+ NULL))
+ lag->handler = true;
+ }
+ rtnl_unlock();
+ lag->dis_lag++;
+}
+
+/**
+ * ice_enable_lag - decrement disable count for a PF
+ * @lag: LAG struct
+ *
+ * Decrement the disable counter for a port, and if that count reaches
+ * zero, then remove the no-op Rx handler from that netdev
+ */
+static inline void ice_enable_lag(struct ice_lag *lag)
+{
+ if (lag->dis_lag)
+ lag->dis_lag--;
+ if (!lag->dis_lag && lag->handler) {
+ rtnl_lock();
+ netdev_rx_handler_unregister(lag->netdev);
+ rtnl_unlock();
+ lag->handler = false;
+ }
+}
+
+/**
+ * ice_is_lag_dis - is LAG disabled
+ * @lag: LAG struct
+ *
+ * Return true if bonding is disabled
+ */
+static inline bool ice_is_lag_dis(struct ice_lag *lag)
+{
+ return !!(lag->dis_lag);
+}
+#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 3df67486d42d..8d4e2ad4328d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
- num_online_cpus());
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
- num_online_cpus());
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+ vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+ max_t(int, vsi->alloc_rxq,
+ vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
@@ -2074,7 +2078,7 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
@@ -2141,11 +2145,18 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
- if (tx)
+ if (tx) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
- else
- status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
+ } else {
+ if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
+ create);
+ } else {
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
+ ICE_FWD_TO_VSI);
+ }
+ }
if (status)
dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
@@ -2154,6 +2165,126 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
/**
+ * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
+ * @vsi: pointer to the VSI
+ *
+ * This function will allocate new scheduler aggregator now if needed and will
+ * move specified VSI into it.
+ */
+static void ice_set_agg_vsi(struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_agg_node *agg_node_iter = NULL;
+ u32 agg_id = ICE_INVALID_AGG_NODE_ID;
+ struct ice_agg_node *agg_node = NULL;
+ int node_offset, max_agg_nodes = 0;
+ struct ice_port_info *port_info;
+ struct ice_pf *pf = vsi->back;
+ u32 agg_node_id_start = 0;
+ enum ice_status status;
+
+ /* create (as needed) scheduler aggregator node and move VSI into
+ * corresponding aggregator node
+ * - PF aggregator node to contains VSIs of type _PF and _CTRL
+ * - VF aggregator nodes will contain VF VSI
+ */
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return;
+
+ switch (vsi->type) {
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ case ICE_VSI_PF:
+ max_agg_nodes = ICE_MAX_PF_AGG_NODES;
+ agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
+ agg_node_iter = &pf->pf_agg_node[0];
+ break;
+ case ICE_VSI_VF:
+ /* user can create 'n' VFs on a given PF, but since max children
+ * per aggregator node can be only 64. Following code handles
+ * aggregator(s) for VF VSIs, either selects a agg_node which
+ * was already created provided num_vsis < 64, otherwise
+ * select next available node, which will be created
+ */
+ max_agg_nodes = ICE_MAX_VF_AGG_NODES;
+ agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
+ agg_node_iter = &pf->vf_agg_node[0];
+ break;
+ default:
+ /* other VSI type, handle later if needed */
+ dev_dbg(dev, "unexpected VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
+ return;
+ }
+
+ /* find the appropriate aggregator node */
+ for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
+ /* see if we can find space in previously created
+ * node if num_vsis < 64, otherwise skip
+ */
+ if (agg_node_iter->num_vsis &&
+ agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ agg_node_iter++;
+ continue;
+ }
+
+ if (agg_node_iter->valid &&
+ agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
+ agg_id = agg_node_iter->agg_id;
+ agg_node = agg_node_iter;
+ break;
+ }
+
+ /* find unclaimed agg_id */
+ if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
+ agg_id = node_offset + agg_node_id_start;
+ agg_node = agg_node_iter;
+ break;
+ }
+ /* move to next agg_node */
+ agg_node_iter++;
+ }
+
+ if (!agg_node)
+ return;
+
+ /* if selected aggregator node was not created, create it */
+ if (!agg_node->valid) {
+ status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
+ (u8)vsi->tc_cfg.ena_tc);
+ if (status) {
+ dev_err(dev, "unable to create aggregator node with agg_id %u\n",
+ agg_id);
+ return;
+ }
+ /* aggregator node is created, store the neeeded info */
+ agg_node->valid = true;
+ agg_node->agg_id = agg_id;
+ }
+
+ /* move VSI to corresponding aggregator node */
+ status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
+ (u8)vsi->tc_cfg.ena_tc);
+ if (status) {
+ dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, agg_id);
+ return;
+ }
+
+ /* keep active children count for aggregator node */
+ agg_node->num_vsis++;
+
+ /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
+ * to aggregator node
+ */
+ vsi->agg_node = agg_node;
+ dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
+ vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
+ vsi->agg_node->num_vsis);
+}
+
+/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2323,6 +2454,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_cfg_sw_lldp(vsi, true, true);
}
+ if (!vsi->agg_node)
+ ice_set_agg_vsi(vsi);
return vsi;
unroll_clear_rings:
@@ -2338,6 +2471,8 @@ unroll_vsi_init:
unroll_get_qs:
ice_vsi_put_qs(vsi);
unroll_vsi_alloc:
+ if (vsi_type == ICE_VSI_VF)
+ ice_enable_lag(pf->lag);
ice_vsi_clear(vsi);
return NULL;
@@ -2665,6 +2800,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c52b9bb0e3ab..2c23c8f468a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -44,6 +44,11 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+bool netif_is_ice(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ice_netdev_ops);
+}
+
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
@@ -430,11 +435,19 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
*/
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
{
+ int node;
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], locked);
+
+ for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
+ pf->pf_agg_node[node].num_vsis = 0;
+
+ for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
+ pf->vf_agg_node[node].num_vsis = 0;
+
}
/**
@@ -785,15 +798,9 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
u8 mib_type, *buf, *lldpmib = NULL;
u16 len, typelen, offset = 0;
struct ice_lldp_org_tlv *tlv;
- struct ice_hw *hw;
+ struct ice_hw *hw = &pf->hw;
u32 ouisubtype;
- if (!pf) {
- dev_dbg(dev, "%s NULL pf pointer\n", __func__);
- return;
- }
-
- hw = &pf->hw;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) {
@@ -2482,6 +2489,22 @@ free_qmap:
}
/**
+ * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
+ * @vsi: VSI to schedule napi on
+ */
+static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_pool)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2525,16 +2548,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_pools) {
- int i;
-
- ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
-
- if (rx_ring->xsk_pool)
- napi_schedule(&rx_ring->q_vector->napi);
- }
- }
+ if (!ret && prog)
+ ice_vsi_rx_napi_schedule(vsi);
return (ret || xdp_ring_err) ? -ENOMEM : 0;
}
@@ -3376,28 +3391,20 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
+ int v_left, v_actual, v_other, v_budget = 0;
struct device *dev = ice_pf_to_dev(pf);
- int v_left, v_actual, v_budget = 0;
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
- /* reserve one vector for miscellaneous handler */
- needed = 1;
+ /* reserve for LAN miscellaneous handler */
+ needed = ICE_MIN_LAN_OICR_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
v_budget += needed;
v_left -= needed;
- /* reserve vectors for LAN traffic */
- needed = min_t(int, num_online_cpus(), v_left);
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve one vector for flow director */
+ /* reserve for flow director */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
needed = ICE_FDIR_MSIX;
if (v_left < needed)
@@ -3406,9 +3413,19 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* total used for non-traffic vectors */
+ v_other = v_budget;
+
+ /* reserve vectors for LAN traffic */
+ needed = min_t(int, num_online_cpus(), v_left);
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_lan_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
-
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
@@ -3420,7 +3437,6 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, v_budget);
-
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
@@ -3430,18 +3446,23 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
- if (v_actual < ICE_MIN_LAN_VECS) {
+ if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_VECS;
+ int v_traffic = v_actual - v_other;
+
+ if (v_actual == ICE_MIN_MSIX ||
+ v_traffic < ICE_MIN_LAN_TXRX_MSIX)
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ else
+ pf->num_lan_msix = v_traffic;
+
+ dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
}
}
@@ -3499,9 +3520,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors;
/* set up vector assignment tracking */
- pf->irq_tracker =
- devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
- (sizeof(u16) * vectors), GFP_KERNEL);
+ pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
+ struct_size(pf->irq_tracker, list, vectors),
+ GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
@@ -4237,6 +4258,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_cfg_lldp_mib_change(&pf->hw, true);
}
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
@@ -4359,6 +4383,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ ice_deinit_lag(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
@@ -4884,9 +4909,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
goto err_update_filters;
}
- /* Add filter for new MAC. If filter exists, just return success */
+ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
@@ -6123,15 +6154,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < (int)netdev->min_mtu) {
- netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
- netdev->min_mtu);
- return -EINVAL;
- } else if (new_mtu > (int)netdev->max_mtu) {
- netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
- netdev->min_mtu);
- return -EINVAL;
- }
/* if a reset is in progress, wait for some time for it to complete */
do {
if (ice_is_reset_in_progress(pf->state)) {
@@ -6156,7 +6178,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
- netdev_err(netdev, "change MTU if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_down err %d\n", err);
return err;
}
@@ -6248,6 +6270,8 @@ const char *ice_stat_str(enum ice_status stat_err)
return "ICE_ERR_OUT_OF_RANGE";
case ICE_ERR_ALREADY_EXISTS:
return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM:
+ return "ICE_ERR_NVM";
case ICE_ERR_NVM_CHECKSUM:
return "ICE_ERR_NVM_CHECKSUM";
case ICE_ERR_BUF_TOO_SHORT:
@@ -6790,6 +6814,4 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index f729cd0c6224..75ccbfc07f99 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -72,7 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
- if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
+ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
@@ -213,7 +213,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
@@ -227,13 +227,186 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
*/
void ice_release_nvm(struct ice_hw *hw)
{
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return;
ice_release_res(hw, ICE_NVM_RES_ID);
}
/**
+ * ice_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Returns the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ enum ice_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case ICE_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case ICE_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case ICE_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module);
+ return 0;
+ }
+
+ switch (active_bank) {
+ case ICE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case ICE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n",
+ active_bank);
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case ICE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank);
+ return 0;
+}
+
+/**
+ * ice_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ice_determine_active_flash_banks()
+ * during initialization.
+ */
+static enum ice_status
+ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
+ u32 offset, u8 *data, u32 length)
+{
+ enum ice_status status;
+ u32 start;
+
+ start = ice_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
+ module);
+ return ICE_ERR_PARAM;
+ }
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ status = ice_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ice_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ice_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ */
+static enum ice_status
+ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ enum ice_status status;
+ __le16 data_local;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
+ (__force u8 *)&data_local, sizeof(u16));
+ if (!status)
+ *data = le16_to_cpu(data_local);
+
+ return status;
+}
+
+/**
+ * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ */
+static enum ice_status
+ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+}
+
+/**
+ * ice_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ */
+static enum ice_status
+ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ enum ice_status status;
+ __le16 data_local;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
+ (__force u8 *)&data_local, sizeof(u16));
+ if (!status)
+ *data = le16_to_cpu(data_local);
+
+ return status;
+}
+
+/**
* ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -380,138 +553,246 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
}
/**
- * ice_get_orom_ver_info - Read Option ROM version information
+ * ice_get_nvm_ver_info - Read NVM version information
* @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
*
- * Read the Combo Image version data from the Boot Configuration TLV and fill
- * in the option ROM version data.
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the NVM info structure.
*/
-static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+static enum ice_status
+ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
- u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
- struct ice_orom_info *orom = &hw->nvm.orom;
+ u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status;
- u32 combo_ver;
- status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
- ICE_SR_BOOT_CFG_PTR);
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n");
return status;
}
- /* Boot Configuration Block must have length at least 2 words
- * (Combo Image Version High and Combo Image Version Low)
- */
- if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
- }
+ nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
- &combo_hi);
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n");
+ return status;
+ }
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n");
return status;
}
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
- &combo_lo);
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ return 0;
+}
+
+/**
+ * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ */
+enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+{
+ return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ice_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ */
+static enum ice_status
+ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ struct ice_orom_civd_info *civd)
+{
+ struct ice_orom_civd_info tmp;
+ enum ice_status status;
+ u32 offset;
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ u8 sum = 0, i;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&tmp, sizeof(tmp));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
+ return status;
+ }
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(tmp); i++)
+ sum += ((u8 *)&tmp)[i];
+
+ if (sum) {
+ ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
+ sum);
+ return ICE_ERR_NVM;
+ }
+
+ *civd = tmp;
+ return 0;
+ }
+
+ return ICE_ERR_NVM;
+}
+
+/**
+ * ice_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ */
+static enum ice_status
+ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
+{
+ struct ice_orom_civd_info civd;
+ enum ice_status status;
+ u32 combo_ver;
+
+ status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n");
return status;
}
- combo_ver = ((u32)combo_hi << 16) | combo_lo;
+ combo_ver = le32_to_cpu(civd.combo_ver);
- orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
- ICE_OROM_VER_SHIFT);
+ orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT);
orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
- orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
- ICE_OROM_VER_BUILD_SHIFT);
+ orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT);
return 0;
}
/**
- * ice_get_netlist_ver_info
+ * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Reads the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ */
+enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+{
+ return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ice_get_netlist_info
* @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
*
- * Get the netlist version information
- */
-static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
-{
- struct ice_netlist_ver_info *ver = &hw->netlist_ver;
- enum ice_status ret;
- u32 id_blk_start;
- __le16 raw_data;
- u16 data, i;
- u16 *buff;
-
- ret = ice_acquire_nvm(hw, ICE_RES_READ);
- if (ret)
- return ret;
- buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
- GFP_KERNEL);
- if (!buff) {
- ret = ICE_ERR_NO_MEMORY;
- goto exit_no_mem;
- }
-
- /* read module length */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ */
+static enum ice_status
+ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
+ struct ice_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ enum ice_status status;
+ u16 *id_blk;
- data = le16_to_cpu(raw_data);
- /* exit if length is = 0 */
- if (!data)
- goto exit_error;
+ status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
+ if (status)
+ return status;
- /* read node count */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
- data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+ if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
+ ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
+ ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
+ return ICE_ERR_NVM;
+ }
+
+ status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the netlist ID block */
+ if (length < ICE_NETLIST_ID_BLK_SIZE) {
+ ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
+ ICE_NETLIST_ID_BLK_SIZE, length);
+ return ICE_ERR_NVM;
+ }
- /* netlist ID block starts from offset 4 + node count * 2 */
- id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+ status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
+ if (status)
+ return status;
+ node_count &= ICE_LINK_TOPO_NODE_COUNT_M;
- /* read the entire netlist ID block */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- id_blk_start * 2,
- ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
- false, NULL);
- if (ret)
+ id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
+ ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
goto exit_error;
- for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
- buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
-
- ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
- ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
- ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
- ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
- ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__force __le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER];
/* Read the left most 4 bytes of SHA */
- ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+ netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
exit_error:
- kfree(buff);
-exit_no_mem:
- ice_release_nvm(hw);
- return ret;
+ kfree(id_blk);
+
+ return status;
+}
+
+/**
+ * ice_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ */
+enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+{
+ return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
/**
@@ -555,7 +836,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
- hw->nvm.flash_size = max_size;
+ hw->flash.flash_size = max_size;
err_read_flat_nvm:
ice_release_nvm(hw);
@@ -564,6 +845,151 @@ err_read_flat_nvm:
}
/**
+ * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & ICE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return 0;
+}
+
+/**
+ * ice_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return 0;
+}
+
+/**
+ * ice_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ice_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ */
+static enum ice_status
+ice_determine_active_flash_banks(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ enum ice_status status;
+ u16 ctrl_word;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n");
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
+ return ICE_ERR_CFG;
+ }
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = ICE_2ND_FLASH_BANK;
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n");
+ return status;
+ }
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -572,8 +998,7 @@ err_read_flat_nvm:
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
- struct ice_nvm_info *nvm = &hw->nvm;
- u16 eetrack_lo, eetrack_hi, ver;
+ struct ice_flash_info *flash = &hw->flash;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -585,54 +1010,43 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
/* Switching to words (sr_size contains power of 2) */
- nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
+ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
- nvm->blank_nvm_mode = false;
+ flash->blank_nvm_mode = false;
} else {
/* Blank programming mode */
- nvm->blank_nvm_mode = true;
+ flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
+ status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
return status;
}
- nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
- return status;
- }
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
+ status = ice_determine_active_flash_banks(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n");
return status;
}
- nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
-
- status = ice_discover_flash_size(hw);
+ status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
- ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
return status;
}
- status = ice_get_orom_ver_info(hw);
- if (status) {
+ status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom);
+ if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
- return status;
- }
/* read the netlist version information */
- status = ice_get_netlist_ver_info(hw);
+ status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 8d430909f846..c6f05f43d593 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -4,6 +4,14 @@
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
+struct ice_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+} __packed;
+
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
@@ -14,6 +22,12 @@ enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status
+ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+enum ice_status
+ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+enum ice_status
+ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
+enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f0912e44d4ad..2403cb38b93c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -431,6 +431,27 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
+ * ice_aq_move_sched_elems - move scheduler elements
+ * @hw: pointer to the HW struct
+ * @grps_req: number of groups to move
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_movd: returns total number of groups moved
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move scheduling elements (0x0408)
+ */
+static enum ice_status
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_movd, cd);
+}
+
+/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
@@ -1022,6 +1043,28 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
}
/**
+ * ice_sched_get_agg_layer - get the current aggregator layer number
+ * @hw: pointer to the HW struct
+ *
+ * This function returns the current aggregator layer number
+ */
+static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
+{
+ /* Num Layers aggregator layer
+ * 9 4
+ * 7 or less sw_entry_point_layer
+ */
+ /* calculate the aggregator layer based on number of layers. */
+ if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
+ u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+
+ if (layer > hw->sw_entry_point_layer)
+ return layer;
+ }
+ return hw->sw_entry_point_layer;
+}
+
+/**
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
* @pi: port information structure
*
@@ -1239,6 +1282,46 @@ sched_query_out:
}
/**
+ * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
+ * @hw: pointer to the HW struct
+ *
+ * Determine the PSM clock frequency and store in HW struct
+ */
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
+{
+ u32 val, clk_src;
+
+ val = rd32(hw, GLGEN_CLKSTAT_SRC);
+ clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
+ GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+
+#define PSM_CLK_SRC_367_MHZ 0x0
+#define PSM_CLK_SRC_416_MHZ 0x1
+#define PSM_CLK_SRC_446_MHZ 0x2
+#define PSM_CLK_SRC_390_MHZ 0x3
+
+ switch (clk_src) {
+ case PSM_CLK_SRC_367_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_416_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_446_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_390_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
+ clk_src);
+ /* fall back to a safe default */
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ }
+}
+
+/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the HW struct
* @base: pointer to the base node
@@ -1364,7 +1447,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/**
* ice_sched_get_vsi_node - Get a VSI node based on VSI ID
- * @hw: pointer to the HW struct
+ * @pi: pointer to the port information structure
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
@@ -1372,14 +1455,14 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* TC branch
*/
static struct ice_sched_node *
-ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
+ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 vsi_handle)
{
struct ice_sched_node *node;
u8 vsi_layer;
- vsi_layer = ice_sched_get_vsi_layer(hw);
- node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
+ node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
/* Check whether it already exists */
while (node) {
@@ -1392,6 +1475,38 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
}
/**
+ * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
+ * @pi: pointer to the port information structure
+ * @tc_node: pointer to the TC node
+ * @agg_id: aggregator ID
+ *
+ * This function retrieves an aggregator node for a given aggregator ID from
+ * a given TC branch
+ */
+static struct ice_sched_node *
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u32 agg_id)
+{
+ struct ice_sched_node *node;
+ struct ice_hw *hw = pi->hw;
+ u8 agg_layer;
+
+ if (!hw)
+ return NULL;
+ agg_layer = ice_sched_get_agg_layer(hw);
+ node = ice_sched_get_first_node(pi, tc_node, agg_layer);
+
+ /* Check whether it already exists */
+ while (node) {
+ if (node->agg_id == agg_id)
+ return node;
+ node = node->sibling;
+ }
+
+ return node;
+}
+
+/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
* @num_qs: number of queues
@@ -1444,7 +1559,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
@@ -1477,7 +1592,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
- * @hw: pointer to the HW struct
+ * @pi: pointer to the port info structure
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1486,15 +1601,15 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* layers
*/
static void
-ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
+ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *node;
u8 vsil;
int i;
- vsil = ice_sched_get_vsi_layer(hw);
- for (i = vsil; i >= hw->sw_entry_point_layer; i--)
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
/* Add intermediate nodes if TC has no children and
* need at least one node for VSI
*/
@@ -1504,11 +1619,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw->port_info, tc_node,
- (u8)i);
+ node = ice_sched_get_first_node(pi, tc_node, (u8)i);
/* scan all the siblings */
while (node) {
- if (node->num_children < hw->max_children[i])
+ if (node->num_children < pi->hw->max_children[i])
break;
node = node->sibling;
}
@@ -1588,14 +1702,13 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *tc_node;
- struct ice_hw *hw = pi->hw;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
/* calculate number of supported nodes needed for this VSI */
- ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
+ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
/* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
@@ -1628,7 +1741,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!tc_node)
return ICE_ERR_CFG;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
@@ -1691,7 +1804,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return ICE_ERR_PARAM;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
if (!enable) {
@@ -1712,7 +1825,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
if (status)
return status;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
@@ -1821,7 +1934,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!tc_node)
continue;
- vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
continue;
@@ -1874,6 +1987,720 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_get_agg_info - get the aggregator ID
+ * @hw: pointer to the hardware structure
+ * @agg_id: aggregator ID
+ *
+ * This function validates aggregator ID. The function returns info if
+ * aggregator ID is present in list otherwise it returns null.
+ */
+static struct ice_sched_agg_info *
+ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry)
+ if (agg_info->agg_id == agg_id)
+ return agg_info;
+
+ return NULL;
+}
+
+/**
+ * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
+ * @hw: pointer to the HW struct
+ * @node: pointer to a child node
+ * @num_nodes: num nodes count array
+ *
+ * This function walks through the aggregator subtree to find a free parent
+ * node
+ */
+static struct ice_sched_node *
+ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+ u16 *num_nodes)
+{
+ u8 l = node->tx_sched_layer;
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ /* Is it VSI parent layer ? */
+ if (l == vsil - 1)
+ return (node->num_children < hw->max_children[l]) ? node : NULL;
+
+ /* We have intermediate nodes. Let's walk through the subtree. If the
+ * intermediate node has space to add a new node then clear the count
+ */
+ if (node->num_children < hw->max_children[l])
+ num_nodes[l] = 0;
+ /* The below recursive call is intentional and wouldn't go more than
+ * 2 or 3 iterations.
+ */
+
+ for (i = 0; i < node->num_children; i++) {
+ struct ice_sched_node *parent;
+
+ parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
+ num_nodes);
+ if (parent)
+ return parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_sched_update_parent - update the new parent in SW DB
+ * @new_parent: pointer to a new parent node
+ * @node: pointer to a child node
+ *
+ * This function removes the child from the old parent and adds it to a new
+ * parent
+ */
+static void
+ice_sched_update_parent(struct ice_sched_node *new_parent,
+ struct ice_sched_node *node)
+{
+ struct ice_sched_node *old_parent;
+ u8 i, j;
+
+ old_parent = node->parent;
+
+ /* update the old parent children */
+ for (i = 0; i < old_parent->num_children; i++)
+ if (old_parent->children[i] == node) {
+ for (j = i + 1; j < old_parent->num_children; j++)
+ old_parent->children[j - 1] =
+ old_parent->children[j];
+ old_parent->num_children--;
+ break;
+ }
+
+ /* now move the node to a new parent */
+ new_parent->children[new_parent->num_children++] = node;
+ node->parent = new_parent;
+ node->info.parent_teid = new_parent->info.node_teid;
+}
+
+/**
+ * ice_sched_move_nodes - move child nodes to a given parent
+ * @pi: port information structure
+ * @parent: pointer to parent node
+ * @num_items: number of child nodes to be moved
+ * @list: pointer to child node teids
+ *
+ * This function move the child nodes to a given parent.
+ */
+static enum ice_status
+ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ u16 num_items, u32 *list)
+{
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *node;
+ enum ice_status status = 0;
+ u16 i, grps_movd = 0;
+ struct ice_hw *hw;
+ u16 buf_len;
+
+ hw = pi->hw;
+
+ if (!parent || !num_items)
+ return ICE_ERR_PARAM;
+
+ /* Does parent have enough space */
+ if (parent->num_children + num_items >
+ hw->max_children[parent->tx_sched_layer])
+ return ICE_ERR_AQ_FULL;
+
+ buf_len = struct_size(buf, teid, 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_items; i++) {
+ node = ice_sched_find_node_by_teid(pi->root, list[i]);
+ if (!node) {
+ status = ICE_ERR_PARAM;
+ goto move_err_exit;
+ }
+
+ buf->hdr.src_parent_teid = node->info.parent_teid;
+ buf->hdr.dest_parent_teid = parent->info.node_teid;
+ buf->teid[0] = node->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
+ &grps_movd, NULL);
+ if (status && grps_movd != 1) {
+ status = ICE_ERR_CFG;
+ goto move_err_exit;
+ }
+
+ /* update the SW DB */
+ ice_sched_update_parent(parent, node);
+ }
+
+move_err_exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_sched_move_vsi_to_agg - move VSI to aggregator node
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function moves a VSI to an aggregator node or its subtree.
+ * Intermediate nodes may be created if required.
+ */
+static enum ice_status
+ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
+ u8 tc)
+{
+ struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ u32 first_node_teid, vsi_teid;
+ enum ice_status status;
+ u16 num_nodes_added;
+ u8 aggl, vsil, i;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Is this VSI already part of given aggregator? */
+ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
+ return 0;
+
+ aggl = ice_sched_get_agg_layer(pi->hw);
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+
+ /* set intermediate node count to 1 between aggregator and VSI layers */
+ for (i = aggl + 1; i < vsil; i++)
+ num_nodes[i] = 1;
+
+ /* Check if the aggregator subtree has any free node to add the VSI */
+ for (i = 0; i < agg_node->num_children; i++) {
+ parent = ice_sched_get_free_vsi_parent(pi->hw,
+ agg_node->children[i],
+ num_nodes);
+ if (parent)
+ goto move_nodes;
+ }
+
+ /* add new nodes */
+ parent = agg_node;
+ for (i = aggl + 1; i < vsil; i++) {
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ else
+ parent = parent->children[0];
+
+ if (!parent)
+ return ICE_ERR_CFG;
+ }
+
+move_nodes:
+ vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
+ return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
+}
+
+/**
+ * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
+ * @pi: port information structure
+ * @agg_info: aggregator info
+ * @tc: traffic class number
+ * @rm_vsi_info: true or false
+ *
+ * This function move all the VSI(s) to the default aggregator and delete
+ * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
+ * caller holds the scheduler lock.
+ */
+static enum ice_status
+ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
+ struct ice_sched_agg_info *agg_info, u8 tc,
+ bool rm_vsi_info)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *tmp;
+ enum ice_status status = 0;
+
+ list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
+ list_entry) {
+ u16 vsi_handle = agg_vsi_info->vsi_handle;
+
+ /* Move VSI to default aggregator */
+ if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
+ continue;
+
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
+ ICE_DFLT_AGG_ID, tc);
+ if (status)
+ break;
+
+ clear_bit(tc, agg_vsi_info->tc_bitmap);
+ if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
+ * @pi: port information structure
+ * @node: node pointer
+ *
+ * This function checks whether the aggregator is attached with any VSI or not.
+ */
+static bool
+ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
+{
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ if (node->tx_sched_layer < vsil - 1) {
+ for (i = 0; i < node->num_children; i++)
+ if (ice_sched_is_agg_inuse(pi, node->children[i]))
+ return true;
+ return false;
+ } else {
+ return node->num_children ? true : false;
+ }
+}
+
+/**
+ * ice_sched_rm_agg_cfg - remove the aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function removes the aggregator node and intermediate nodes if any
+ * from the given TC
+ */
+static enum ice_status
+ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *tc_node, *agg_node;
+ struct ice_hw *hw = pi->hw;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Can't remove the aggregator node if it has children */
+ if (ice_sched_is_agg_inuse(pi, agg_node))
+ return ICE_ERR_IN_USE;
+
+ /* need to remove the whole subtree if aggregator node is the
+ * only child.
+ */
+ while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
+ struct ice_sched_node *parent = agg_node->parent;
+
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ if (parent->num_children > 1)
+ break;
+
+ agg_node = parent;
+ }
+
+ ice_free_sched_node(pi, agg_node);
+ return 0;
+}
+
+/**
+ * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
+ * @pi: port information structure
+ * @agg_info: aggregator ID
+ * @tc: TC number
+ * @rm_vsi_info: bool value true or false
+ *
+ * This function removes aggregator reference to VSI of given TC. It removes
+ * the aggregator configuration completely for requested TC. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
+ u8 tc, bool rm_vsi_info)
+{
+ enum ice_status status = 0;
+
+ /* If nothing to remove - return success */
+ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ goto exit_rm_agg_cfg_tc;
+
+ status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ /* Delete aggregator node(s) */
+ status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ clear_bit(tc, agg_info->tc_bitmap);
+exit_rm_agg_cfg_tc:
+ return status;
+}
+
+/**
+ * ice_save_agg_tc_bitmap - save aggregator TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc_bitmap: 8 bits TC bitmap
+ *
+ * Save aggregator TC bitmap. This function needs to be called with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
+ unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return 0;
+}
+
+/**
+ * ice_sched_add_agg_cfg - create an aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function creates an aggregator node and intermediate nodes if required
+ * for the given TC
+ */
+static enum ice_status
+ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *parent, *agg_node, *tc_node;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u32 first_node_teid;
+ u16 num_nodes_added;
+ u8 i, aggl;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ /* Does Agg node already exist ? */
+ if (agg_node)
+ return status;
+
+ aggl = ice_sched_get_agg_layer(hw);
+
+ /* need one node in Agg layer */
+ num_nodes[aggl] = 1;
+
+ /* Check whether the intermediate nodes have space to add the
+ * new aggregator. If they are full, then SW needs to allocate a new
+ * intermediate node on those layers
+ */
+ for (i = hw->sw_entry_point_layer; i < aggl; i++) {
+ parent = ice_sched_get_first_node(pi, tc_node, i);
+
+ /* scan all the siblings */
+ while (parent) {
+ if (parent->num_children < hw->max_children[i])
+ break;
+ parent = parent->sibling;
+ }
+
+ /* all the nodes are full, reserve one for this layer */
+ if (!parent)
+ num_nodes[i]++;
+ }
+
+ /* add the aggregator node */
+ parent = tc_node;
+ for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added) {
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ /* register aggregator ID with the aggregator node */
+ if (parent && i == aggl)
+ parent->agg_id = agg_id;
+ } else {
+ parent = parent->children[0];
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sched_cfg_agg - configure aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * It registers a unique aggregator node into scheduler services. It
+ * allows a user to register with a unique ID to track it's resources.
+ * The aggregator type determines if this is a queue group, VSI group
+ * or aggregator group. It then creates the aggregator node(s) for requested
+ * TC(s) or removes an existing aggregator node including its configuration
+ * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
+ * resources and remove aggregator ID.
+ * This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info) {
+ /* Create new entry for new aggregator ID */
+ agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
+ GFP_KERNEL);
+ if (!agg_info)
+ return ICE_ERR_NO_MEMORY;
+
+ agg_info->agg_id = agg_id;
+ agg_info->agg_type = agg_type;
+ agg_info->tc_bitmap[0] = 0;
+
+ /* Initialize the aggregator VSI list head */
+ INIT_LIST_HEAD(&agg_info->agg_vsi_list);
+
+ /* Add new entry in aggregator list */
+ list_add(&agg_info->list_entry, &hw->agg_list);
+ }
+ /* Create aggregator node(s) for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc)) {
+ /* Delete aggregator cfg TC if it exists previously */
+ status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
+ if (status)
+ break;
+ continue;
+ }
+
+ /* Check if aggregator node for TC already exists */
+ if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ continue;
+
+ /* Create new aggregator node for TC */
+ status = ice_sched_add_agg_cfg(pi, agg_id, tc);
+ if (status)
+ break;
+
+ /* Save aggregator node's TC information */
+ set_bit(tc, agg_info->tc_bitmap);
+ }
+
+ return status;
+}
+
+/**
+ * ice_cfg_agg - config aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * This function configures aggregator node(s).
+ */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
+ u8 tc_bitmap)
+{
+ unsigned long bitmap = tc_bitmap;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type,
+ (unsigned long *)&bitmap);
+ if (!status)
+ status = ice_save_agg_tc_bitmap(pi, agg_id,
+ (unsigned long *)&bitmap);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_get_agg_vsi_info - get the aggregator ID
+ * @agg_info: aggregator info
+ * @vsi_handle: software VSI handle
+ *
+ * The function returns aggregator VSI info based on VSI handle. This function
+ * needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_vsi_info *
+ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle)
+ return agg_vsi_info;
+
+ return NULL;
+}
+
+/**
+ * ice_get_vsi_agg_info - get the aggregator info of VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: Sw VSI handle
+ *
+ * The function returns aggregator info of VSI represented via vsi_handle. The
+ * VSI has in this case a different aggregator than the default one. This
+ * function needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_info *
+ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (agg_vsi_info)
+ return agg_info;
+ }
+ return NULL;
+}
+
+/**
+ * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+ bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return 0;
+}
+
+/**
+ * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * This function moves VSI to a new or default aggregator node. If VSI is
+ * already associated to the aggregator node then no operation is performed on
+ * the tree. This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ u16 vsi_handle, unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info) {
+ /* Create new entry for VSI under aggregator list */
+ agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*agg_vsi_info), GFP_KERNEL);
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+
+ /* add VSI ID into the aggregator list */
+ agg_vsi_info->vsi_handle = vsi_handle;
+ list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
+ }
+ /* Move VSI node to new aggregator node for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc))
+ continue;
+
+ /* Move VSI to new aggregator */
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
+ if (status)
+ break;
+
+ set_bit(tc, agg_vsi_info->tc_bitmap);
+ }
+ return status;
+}
+
+/**
* ice_sched_rm_unused_rl_prof - remove unused RL profile
* @pi: port information structure
*
@@ -1955,7 +2782,6 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
buf = node->info;
data = &buf.data;
@@ -1970,7 +2796,32 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
}
/* Configure element */
- status = ice_sched_update_elem(hw, node, &buf);
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_move_vsi_to_agg - moves VSI to new or default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Move or associate VSI to a new or default aggregator node.
+ */
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap)
+{
+ unsigned long bitmap = tc_bitmap;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
+ (unsigned long *)&bitmap);
+ if (!status)
+ status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
+ (unsigned long *)&bitmap);
+ mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2045,11 +2896,12 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
*
* This function calculates the wakeup parameter of RL profile.
*/
-static u16 ice_sched_calc_wakeup(s32 bw)
+static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
{
s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
s32 wakeup_f_int;
@@ -2057,7 +2909,7 @@ static u16 ice_sched_calc_wakeup(s32 bw)
/* Get the wakeup integer value */
bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
- wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
+ wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
} else {
@@ -2066,8 +2918,7 @@ static u16 ice_sched_calc_wakeup(s32 bw)
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
- ICE_RL_PROF_FREQUENCY,
- bytes_per_sec);
+ hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
wakeup_f = wakeup_a - wakeup_b;
@@ -2087,13 +2938,15 @@ static u16 ice_sched_calc_wakeup(s32 bw)
/**
* ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
* @profile: profile parameters to return
*
* This function converts the BW to profile structure format.
*/
static enum ice_status
-ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
+ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
+ struct ice_aqc_rl_profile_elem *profile)
{
enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
@@ -2113,7 +2966,7 @@ ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
- ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
+ ts_rate = div64_long((s64)hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
@@ -2137,7 +2990,7 @@ ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
if (found) {
u16 wm;
- wm = ice_sched_calc_wakeup(bw);
+ wm = ice_sched_calc_wakeup(hw, bw);
profile->rl_multiply = cpu_to_le16(mv);
profile->wake_up_calc = cpu_to_le16(wm);
profile->rl_encode = cpu_to_le16(encode);
@@ -2206,7 +3059,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
if (!rl_prof_elem)
return NULL;
- status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
+ status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
if (status)
goto exit_add_rl_prof;
@@ -2941,6 +3794,156 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
}
/**
+ * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
+ * @pi: port info struct
+ * @tc_bitmap: 8 bits TC bitmap to check
+ * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
+ *
+ * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
+ * may be missing, it returns enabled TCs. This function needs to be called with
+ * scheduler lock held.
+ */
+static void
+ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
+ unsigned long *tc_bitmap,
+ unsigned long *ena_tc_bitmap)
+{
+ u8 tc;
+
+ /* Some TC(s) may be missing after reset, adjust for replay */
+ ice_for_each_traffic_class(tc)
+ if (ice_is_tc_ena(*tc_bitmap, tc) &&
+ (ice_sched_get_tc_node(pi, tc)))
+ set_bit(tc, ena_tc_bitmap);
+}
+
+/**
+ * ice_sched_replay_agg - recreate aggregator node(s)
+ * @hw: pointer to the HW struct
+ *
+ * This function recreate aggregator type nodes which are not replayed earlier.
+ * It also replay aggregator BW information. These aggregator nodes are not
+ * associated with VSI type node yet.
+ */
+void ice_sched_replay_agg(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ mutex_lock(&pi->sched_lock);
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry)
+ /* replay aggregator (re-create aggregator node) */
+ if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS)) {
+ DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ enum ice_status status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi,
+ agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ status = ice_sched_cfg_agg(hw->port_info,
+ agg_info->agg_id,
+ ICE_AGG_TYPE_AGG,
+ replay_bitmap);
+ if (status) {
+ dev_info(ice_hw_to_dev(hw),
+ "Replay agg id[%d] failed\n",
+ agg_info->agg_id);
+ /* Move on to next one */
+ continue;
+ }
+ }
+ mutex_unlock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
+ * @hw: pointer to the HW struct
+ *
+ * This function initialize aggregator(s) TC bitmap to zero. A required
+ * preinit step for replaying aggregators.
+ */
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ mutex_lock(&pi->sched_lock);
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_info->tc_bitmap[0] = 0;
+ list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
+ list_entry)
+ agg_vsi_info->tc_bitmap[0] = 0;
+ }
+ mutex_unlock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays aggregator node, VSI to aggregator type nodes, and
+ * their node bandwidth information. This function needs to be called with
+ * scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+ if (!agg_info)
+ return 0; /* Not present in list - default Agg case */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return 0; /* Not present in list - default Agg case */
+ ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Replay aggregator node associated to vsi_handle */
+ status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
+ ICE_AGG_TYPE_AGG, replay_bitmap);
+ if (status)
+ return status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Move this VSI (vsi_handle) to above aggregator */
+ return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
+ replay_bitmap);
+}
+
+/**
+ * ice_replay_vsi_agg - replay VSI to aggregator node
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays association of VSI to aggregator type nodes, and
+ * node bandwidth information.
+ */
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_replay_vsi_agg(hw, vsi_handle);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
* ice_sched_replay_q_bw - replay queue type node BW
* @pi: port information structure
* @q_ctx: queue context structure
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 0e55ae0d446f..9beef8f0ec76 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -8,6 +8,7 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_AGG_LAYER_OFFSET 6
#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
/* Burst size is a 12 bits register that is configured while creating the RL
* profile(s). MSB is a granularity bit and tells the granularity type
@@ -23,12 +24,16 @@
((BIT(11) - 1) * 64) /* In Bytes */
#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
-#define ICE_RL_PROF_FREQUENCY 446000000
#define ICE_RL_PROF_ACCURACY_BYTES 128
#define ICE_RL_PROF_MULTIPLIER 10000
#define ICE_RL_PROF_TS_MULTIPLIER 32
#define ICE_RL_PROF_FRACTION 512
+#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
+#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
+#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
+#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
+
/* BW rate limit profile parameters list entry along
* with bandwidth maintained per layer in port info
*/
@@ -43,6 +48,8 @@ struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u16 vsi_handle;
+ /* save aggregator VSI TC bitmap */
+ DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
struct ice_sched_agg_info {
@@ -51,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* save aggregator TC bitmap */
+ DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
/* FW AQ command calls */
@@ -60,6 +69,8 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
+
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
void ice_sched_clear_agg(struct ice_hw *hw);
@@ -78,6 +89,14 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+
+/* Tx scheduler rate limiter functions */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, u8 tc_bitmap);
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap);
enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
@@ -85,6 +104,9 @@ enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
+void ice_sched_replay_agg(struct ice_hw *hw);
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 4028c6365172..dbf66057371d 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -29,6 +29,7 @@ enum ice_status {
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
+ ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index c33612132ddf..67c965a3f5d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -603,7 +603,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
+ devm_kfree(ice_hw_to_dev(hw), rbuf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a2d0aad8cfdd..b7dc25da1202 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -375,6 +375,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
@@ -384,10 +389,6 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (rx_buf->skb) {
- dev_kfree_skb(rx_buf->skb);
- rx_buf->skb = NULL;
- }
if (!rx_buf->page)
continue;
@@ -443,6 +444,22 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
}
/**
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+{
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+/**
* ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the Rx ring to set up
*
@@ -476,6 +493,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->rx_offset = ice_rx_offset(rx_ring);
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -493,22 +511,6 @@ err:
return -ENOMEM;
}
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
-{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
static unsigned int
ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
@@ -517,8 +519,8 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
#if (PAGE_SIZE < 8192)
truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = ice_rx_offset(rx_ring) ?
- SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -537,22 +539,20 @@ static int
ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- int err, result = ICE_XDP_PASS;
struct ice_ring *xdp_ring;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- break;
+ return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- break;
+ return ice_xmit_xdp_buff(xdp, xdp_ring);
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- break;
+ return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -560,11 +560,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- result = ICE_XDP_CONSUMED;
- break;
+ return ICE_XDP_CONSUMED;
}
-
- return result;
}
/**
@@ -656,7 +653,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
bi->dma = dma;
bi->page = page;
- bi->page_offset = ice_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -729,15 +726,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
}
/**
- * ice_page_is_reserved - check if reuse is possible
- * @page: page struct to check
- */
-static bool ice_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
-/**
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
* @rx_buf: Rx buffer to adjust
* @size: Size of adjustment
@@ -775,8 +763,8 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
- /* avoid re-using remote pages */
- if (unlikely(ice_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -818,7 +806,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
#else
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
@@ -864,7 +852,6 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
/**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @skb: skb to be used
* @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount
*
@@ -872,8 +859,8 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size, int *rx_buf_pgcnt)
+ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -885,7 +872,6 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
0;
#endif
prefetchw(rx_buf->page);
- *skb = rx_buf->skb;
if (!size)
return rx_buf;
@@ -1047,29 +1033,24 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* clear contents of buffer_info */
rx_buf->page = NULL;
- rx_buf->skb = NULL;
}
/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
*
* If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1089,24 +1070,26 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*/
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_res, xdp_xmit = 0;
+ struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
bool failure;
- xdp.rxq = &rx_ring->xdp_rxq;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
+ unsigned char *hard_start;
unsigned int size;
u16 stat_err_bits;
int rx_buf_pgcnt;
@@ -1141,7 +1124,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1151,10 +1134,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb;
}
- xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
- xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
@@ -1204,7 +1186,7 @@ construct_skb:
cleaned_count++;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc, skb))
+ if (ice_is_non_eop(rx_ring, rx_desc))
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
@@ -1234,6 +1216,7 @@ construct_skb:
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_pkts++;
@@ -1244,6 +1227,7 @@ construct_skb:
if (xdp_prog)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1505,22 +1489,11 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
struct ice_vsi *vsi = q_vector->vsi;
u32 itr_val;
- /* when exiting WB_ON_ITR lets set a low ITR value and trigger
- * interrupts to expire right away in case we have more work ready to go
- * already
+ /* when exiting WB_ON_ITR just reset the countdown and let ITR
+ * resume it's normal "interrupts-enabled" path
*/
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
- itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
- /* set target back to last user set value */
- rx->target_itr = rx->itr_setting;
- /* set current to what we just wrote and dynamic if needed */
- rx->current_itr = ICE_WB_ON_ITR_USECS |
- (rx->itr_setting & ICE_ITR_DYNAMIC);
- /* allow normal interrupt flow to start */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
q_vector->itr_countdown = 0;
- return;
- }
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
@@ -1560,10 +1533,8 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
- wr32(&q_vector->vsi->back->hw,
- GLINT_DYN_CTL(q_vector->reg_idx),
- itr_val);
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
}
/**
@@ -1573,30 +1544,29 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
* We need to tell hardware to write-back completed descriptors even when
* interrupts are disabled. Descriptors will be written back on cache line
* boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
- * descriptors may not be written back if they don't fill a cache line until the
- * next interrupt.
+ * descriptors may not be written back if they don't fill a cache line until
+ * the next interrupt.
*
- * This sets the write-back frequency to 2 microseconds as that is the minimum
- * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
- * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ * This sets the write-back frequency to whatever was set previously for the
+ * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
+ * aren't meddling with the INTENA_M bit.
*/
static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
- /* already in WB_ON_ITR mode no need to change it */
+ /* already in wb_on_itr mode no need to change it */
if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
return;
- if (q_vector->num_ring_rx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_RX_ITR));
-
- if (q_vector->num_ring_tx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_TX_ITR));
+ /* use previously set ITR values for all of the ITR indices by
+ * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
+ * be static in non-adaptive mode (user configured)
+ */
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
}
@@ -1663,8 +1633,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
}
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ /* Set the writeback on ITR so partial completions of
+ * cache-lines will still continue even if we're polling.
+ */
+ ice_set_wb_on_itr(q_vector);
return budget;
+ }
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
@@ -1923,12 +1898,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
@@ -2418,7 +2396,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
if (unlikely(skb->priority == TC_PRIO_CONTROL &&
vsi->type == ICE_VSI_PF &&
- vsi->port_info->is_sw_lldp))
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ff1a1cbd078e..5dab77504fa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -165,7 +165,6 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
union {
struct {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -240,7 +239,6 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
-#define ICE_WB_ON_ITR_USECS 2
#define ICE_IN_WB_ON_ITR_MODE 255
/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
* setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
@@ -296,8 +294,10 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
struct xsk_buff_pool *xsk_pool;
+ u16 rx_offset;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
+ struct sk_buff *skb;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index bc2f4390b51d..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,12 +191,7 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
- /* this is tracked separately to help us debug stack drops */
- rx_ring->rx_stats.gro_dropped++;
- netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
- rx_ring->q_index);
- }
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 2226a291a394..a6cb0c35748c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -313,14 +313,62 @@ struct ice_orom_info {
u16 build; /* Build version of OROM */
};
-/* NVM Information */
+/* NVM version information */
struct ice_nvm_info {
+ u32 eetrack;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ice_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ice_flash_bank {
+ ICE_INVALID_FLASH_BANK,
+ ICE_1ST_FLASH_BANK,
+ ICE_2ND_FLASH_BANK,
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ice_bank_select {
+ ICE_ACTIVE_FLASH_BANK,
+ ICE_INACTIVE_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ice_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ice_flash_bank nvm_bank; /* Active NVM bank */
+ enum ice_flash_bank orom_bank; /* Active OROM bank */
+ enum ice_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ice_flash_info {
struct ice_orom_info orom; /* Option ROM version info */
- u32 eetrack; /* NVM data version */
+ struct ice_nvm_info nvm; /* NVM version information */
+ struct ice_netlist_info netlist;/* Netlist version info */
+ struct ice_bank_info banks; /* Flash Bank information */
u16 sr_words; /* Shadow RAM size in words */
u32 flash_size; /* Size of available flash in bytes */
- u8 major_ver; /* major version of NVM package */
- u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@@ -348,16 +396,6 @@ struct ice_link_default_override_tlv {
#define ICE_NVM_VER_LEN 32
-/* netlist version information */
-struct ice_netlist_ver_info {
- u32 major; /* major high/low */
- u32 minor; /* minor high/low */
- u32 type; /* type high/low */
- u32 rev; /* revision high/low */
- u32 hash; /* SHA-1 hash word */
- u16 cust_ver; /* customer version */
-};
-
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -365,7 +403,11 @@ struct ice_netlist_ver_info {
#define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+/* ICE_DFLT_AGG_ID means that all new VM(s)/VSI node connects
+ * to driver defined policy for default aggregator
+ */
#define ICE_INVAL_TEID 0xFFFFFFFF
+#define ICE_DFLT_AGG_ID 0
struct ice_sched_node {
struct ice_sched_node *parent;
@@ -514,6 +556,14 @@ struct ice_dcbx_cfg {
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
+struct ice_qos_cfg {
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp : 1;
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to HW instance */
@@ -537,13 +587,7 @@ struct ice_port_info {
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
- struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
- /* DCBX info */
- struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
- struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
- /* LLDP/DCBX Status */
- u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
- u8 is_sw_lldp:1;
+ struct ice_qos_cfg qos_cfg;
u8 is_vf:1;
};
@@ -576,6 +620,8 @@ struct ice_hw {
void *back;
struct ice_aqc_layer_props *layer_info;
struct ice_port_info *port_info;
+ /* PSM clock frequency for calculating RL profile params */
+ u32 psm_clk_freq;
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
@@ -605,10 +651,9 @@ struct ice_hw {
u8 evb_veb; /* true for VEB, false for VEPA */
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
- struct ice_nvm_info nvm;
+ struct ice_flash_info flash;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
- struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
@@ -765,6 +810,7 @@ struct ice_hw_port_stats {
};
/* Checksum and Shadow RAM pointers */
+#define ICE_SR_NVM_CTRL_WORD 0x00
#define ICE_SR_BOOT_CFG_PTR 0x132
#define ICE_SR_NVM_WOL_CFG 0x19
#define ICE_NVM_OROM_VER_OFF 0x02
@@ -784,10 +830,71 @@ struct ice_hw_port_stats {
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_NVM_BANK_SIZE 0x43
#define ICE_SR_1ST_OROM_BANK_PTR 0x44
+#define ICE_SR_OROM_BANK_SIZE 0x45
#define ICE_SR_NETLIST_BANK_PTR 0x46
+#define ICE_SR_NETLIST_BANK_SIZE 0x47
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+/* CSS Header words */
+#define ICE_NVM_CSS_SREV_L 0x14
+#define ICE_NVM_CSS_SREV_H 0x15
+
+/* Length of CSS header section in words */
+#define ICE_CSS_HEADER_LENGTH 330
+
+/* Offset of Shadow RAM copy in the NVM bank area. */
+#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
+
+/* Size in bytes of Option ROM trailer */
+#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define ICE_NETLIST_TYPE_OFFSET 0x0000
+#define ICE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ice_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+
+#define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+
+#define ICE_LINK_TOPO_NODE_COUNT_M ICE_M(0x3FF, 0)
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define ICE_NETLIST_ID_BLK_SIZE 0x30
+#define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define ICE_NETLIST_ID_BLK_REV_LOW 0x08
+#define ICE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define ICE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* Auxiliary field, mask, and shift definition for Shadow RAM and NVM Flash */
+#define ICE_SR_CTRL_WORD_1_S 0x06
+#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
+#define ICE_SR_CTRL_WORD_VALID 0x1
+#define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
+
+#define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
/* Link override related */
#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
@@ -803,4 +910,9 @@ struct ice_hw_port_stats {
/* Hash redirection LUT for VSI - maximum array size */
#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+/* AQ API version for LLDP_FILTER_CONTROL */
+#define ICE_FW_API_LLDP_FLTR_MAJ 1
+#define ICE_FW_API_LLDP_FLTR_MIN 7
+#define ICE_FW_API_LLDP_FLTR_PATCH 1
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index ec7f6c64132e..1f38a8d0c525 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1057,11 +1057,45 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
* ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
* @vf: VF to rebuild host configuration on
*/
static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
ice_vf_set_host_trust_cfg(vf);
@@ -1073,6 +1107,8 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
}
/**
@@ -1677,6 +1713,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
return 0;
}
@@ -1688,6 +1726,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (err)
return err;
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
return num_vfs;
}
@@ -1879,6 +1919,29 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct ice_port_info *pi = vsi->port_info;
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_info)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* ice_vc_get_vf_res_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1960,6 +2023,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->max_vectors = pf->num_msix_per_vf;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
@@ -2312,12 +2376,12 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
- bool rm_promisc;
int ret = 0;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2344,8 +2408,13 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
- !(info->flags & FLAG_VF_MULTICAST_PROMISC);
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+
+ rm_promisc = !allmulti && !alluni;
if (vsi->num_vlan || vf->port_vlan_info) {
struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
@@ -2375,7 +2444,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
+ bool set_dflt_vsi = alluni || allmulti;
if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
/* only attempt to set the default forwarding VSI if
@@ -2399,12 +2468,12 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
enum ice_status status;
u8 promisc_m;
- if (info->flags & FLAG_VF_UNICAST_PROMISC) {
+ if (alluni) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
+ } else if (allmulti) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -2432,15 +2501,16 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
}
- if (info->flags & FLAG_VF_MULTICAST_PROMISC)
- set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- else
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
+ else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
- if (info->flags & FLAG_VF_UNICAST_PROMISC)
- set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- else
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
+ else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -2952,6 +3022,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+
num_rxq++;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -2964,7 +3036,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
- if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2972,6 +3044,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is not
+ * expected to account for it in the MTU calculation
+ */
+ if (vf->port_vlan_info)
+ vsi->max_frame += VLAN_HLEN;
}
/* VF can request to configure less than allocated queues or default
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1782146db644..83f3c9574ed1 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -260,45 +260,6 @@ free_buf:
}
/**
- * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
- * @vsi: VSI to allocate the buffer pool on
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
-{
- if (vsi->xsk_pools)
- return 0;
-
- vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
- GFP_KERNEL);
-
- if (!vsi->xsk_pools) {
- vsi->num_xsk_pools = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
- * @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the buffer pool
- */
-static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
-{
- vsi->xsk_pools[qid] = NULL;
- vsi->num_xsk_pools_used--;
-
- if (vsi->num_xsk_pools_used == 0) {
- kfree(vsi->xsk_pools);
- vsi->xsk_pools = NULL;
- vsi->num_xsk_pools = 0;
- }
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
@@ -307,12 +268,12 @@ static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
*/
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
- !vsi->xsk_pools[qid])
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!pool)
return -EINVAL;
- xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_pool(vsi, qid);
+ xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
}
@@ -333,22 +294,11 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_pools)
- vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_pools)
+ if (qid >= vsi->netdev->real_num_rx_queues ||
+ qid >= vsi->netdev->real_num_tx_queues)
return -EINVAL;
- err = ice_xsk_alloc_pools(vsi);
- if (err)
- return err;
-
- if (vsi->xsk_pools && vsi->xsk_pools[qid])
- return -EBUSY;
-
- vsi->xsk_pools[qid] = pool;
- vsi->num_xsk_pools_used++;
-
- err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -517,11 +467,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
u32 act;
rcu_read_lock();
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
- return ICE_XDP_PASS;
- }
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
@@ -842,11 +791,8 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_pools)
- return false;
-
- for (i = 0; i < vsi->num_xsk_pools; i++) {
- if (vsi->xsk_pools[i])
+ ice_for_each_rxq(vsi, i) {
+ if (xsk_get_pool_from_qid(vsi->netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03f78fdb0dcd..878b31d534ec 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -316,7 +316,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
break;
case E1000_TDBAL(0):
for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDBAL(n));
+ regs[n] = rd32(E1000_TDBAL(n));
break;
case E1000_TDBAH(0):
for (n = 0; n < 4; n++)
@@ -3156,7 +3156,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the PCIe SR-IOV capability.
*/
if (pdev->is_virtfn) {
- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+ WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
pci_name(pdev), pdev->vendor, pdev->device);
return -EINVAL;
}
@@ -4482,8 +4482,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
else
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
- if (hw->mac.type != e1000_i211)
- mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
+ mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
}
igb_vmm_control(adapter);
@@ -5959,15 +5958,6 @@ static int igb_tso(struct igb_ring *tx_ring,
return 1;
}
-static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -5990,10 +5980,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- igb_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -8227,18 +8214,13 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool igb_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(igb_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -8681,13 +8663,13 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
+ u32 frame_sz = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+ frame_sz = igb_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8715,12 +8697,12 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- igb_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = igb_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 30fdea24e94a..fb3fbcb13331 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2072,15 +2072,6 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
return 1;
}
-static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol)
{
@@ -2102,10 +2093,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((protocol == htons(ETH_P_IPV6)) &&
- igbvf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 35baae900c1f..5d2809dfd06a 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -217,6 +217,8 @@ struct igc_adapter {
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
+
+ char fw_version[32];
};
void igc_up(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 32f5fd684139..b909f00a79e6 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -129,7 +129,6 @@
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
@@ -160,6 +159,7 @@
#define IGC_NVM_RW_REG_START 1 /* Start operation */
#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */
/* NVM Word Offsets */
#define NVM_CHECKSUM_REG 0x003F
@@ -179,7 +179,6 @@
#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define IGC_STATUS_FUNC_SHIFT 2
-#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
@@ -284,7 +283,6 @@
#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 4b9ec7d0b727..495bed47ed0a 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -75,7 +75,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
break;
case IGC_TDBAL(0):
for (n = 0; n < 4; n++)
- regs[n] = rd32(IGC_RDBAL(n));
+ regs[n] = rd32(IGC_TDBAL(n));
break;
case IGC_TDBAH(0):
for (n = 0; n < 4; n++)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 61d331ce38cd..824a6c454bca 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -129,11 +129,28 @@ static void igc_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u16 nvm_version = 0;
+ u16 gphy_version;
+
+ strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+
+ /* NVM image version is reported as firmware version for i225 device */
+ hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version);
- strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+ /* gPHY firmware version is reported as PHY FW version */
+ gphy_version = igc_read_phy_fw_version(hw);
- /* add fw_version here */
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ scnprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%x:%x",
+ nvm_version,
+ gphy_version);
+
+ strscpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
@@ -544,7 +561,6 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
if (ret_val == 0)
hw->nvm.ops.update(hw);
- /* check if need: igc_set_fw_version(adapter); */
kfree(eeprom_buff);
return ret_val;
}
@@ -1675,12 +1691,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
@@ -1708,7 +1730,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Asym_Pause);
}
- status = rd32(IGC_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) {
@@ -1792,6 +1815,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 9da5f83ce456..4461f8b9a864 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -113,7 +113,6 @@ struct igc_nvm_operations {
s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
s32 (*update)(struct igc_hw *hw);
s32 (*validate)(struct igc_hw *hw);
- s32 (*valid_led_default)(struct igc_hw *hw, u16 *data);
};
struct igc_phy_operations {
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 8b67d9b49a83..7ec04e48860c 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
- s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -IGC_ERR_NVM;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 09cd0ec7ee87..67b8ffd21d8a 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
}
out:
- return 0;
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index afd6a62da29d..7ac9597ddb84 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,15 +949,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
}
}
-static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -980,10 +971,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if ((first->protocol == htons(ETH_P_IP) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- (first->protocol == htons(ETH_P_IPV6) &&
- igc_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -1660,18 +1648,13 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool igc_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(igc_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -3686,6 +3669,7 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc1522 += rd32(IGC_PRC1522);
adapter->stats.tlpic += rd32(IGC_TLPIC);
adapter->stats.rlpic += rd32(IGC_RLPIC);
+ adapter->stats.hgptc += rd32(IGC_HGPTC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 8e1799508edc..83aeb5e7076f 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -791,3 +791,21 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
return ret_val;
}
+
+/**
+ * igc_read_phy_fw_version - Read gPHY firmware version
+ * @hw: pointer to the HW structure
+ */
+u16 igc_read_phy_fw_version(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 gphy_version = 0;
+ u16 ret_val;
+
+ /* NVM image version is reported as firmware version for i225 device */
+ ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version);
+ if (ret_val)
+ hw_dbg("igc_phy: read wrong gphy version\n");
+
+ return gphy_version;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
index 25cba33de7e2..1b031372d206 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.h
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -17,5 +17,6 @@ void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
+u16 igc_read_phy_fw_version(struct igc_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index b52dd9d737e8..3e5cb7aef9da 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -13,6 +13,7 @@
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
+#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index de0fc6ecf491..a604552fa634 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -349,6 +349,7 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
+ u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 393d1c2cd853..fae84202d870 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1520,7 +1520,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
}
}
-static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
}
@@ -1561,7 +1561,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = ixgbe_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
rx_ring->rx_stats.alloc_rx_page++;
@@ -1940,19 +1940,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool ixgbe_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(ixgbe_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -2006,8 +2001,8 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ unsigned int truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -2254,8 +2249,8 @@ static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
#if (PAGE_SIZE < 8192)
truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -2291,22 +2286,22 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
#ifdef IXGBE_FCOE
int ddp_bytes;
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -2336,12 +2331,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbe_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -6584,6 +6578,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
@@ -8040,15 +8035,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
-static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
@@ -8074,10 +8060,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbe_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -10278,8 +10261,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4061cd7db5dd..449d7d5b280d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -781,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool ixgbevf_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -1121,19 +1116,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
@@ -1161,12 +1155,12 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbevf_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbevf_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
@@ -3844,15 +3838,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
@@ -3873,10 +3858,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbevf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 41815b609569..7fe15a3286f4 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -94,7 +94,6 @@ config MVPP2
config MVPP2_PTP
bool "Marvell Armada 8K Enable PTP support"
- depends on NETWORK_PHY_TIMESTAMPING
depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
(PTP_1588_CLOCK && MVPP2 = m)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..a635cf84608a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -102,6 +102,8 @@
#define MVNETA_TX_NO_DATA_SWAP BIT(5)
#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
+#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
@@ -490,6 +492,7 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
+ u8 prio_tc_map[8];
phy_interface_t phy_interface;
struct device_node *dn;
@@ -2263,11 +2266,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
-
- xdp->data_hard_start = data;
- xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
- xdp->data_end = xdp->data + data_len;
- xdp_set_data_meta_invalid(xdp);
+ xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+ data_len, false);
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
@@ -2363,9 +2363,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
u32 desc_status, frame_sz;
struct xdp_buff xdp_buf;
+ xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- xdp_buf.frame_sz = PAGE_SIZE;
- xdp_buf.rxq = &rxq->xdp_rxq;
sinfo.nr_frags = 0;
@@ -3432,7 +3431,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
return -ENOMEM;
/* Setup XPS mapping */
- if (txq_number > 1)
+ if (pp->neta_armada3700)
+ cpu = 0;
+ else if (txq_number > 1)
cpu = txq->id % num_present_cpus();
else
cpu = pp->rxq_def % num_present_cpus();
@@ -4210,6 +4211,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
node_online);
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
+ * are routed to CPU 0, so we don't need all the cpu-hotplug support
+ */
+ if (pp->neta_armada3700)
+ return 0;
spin_lock(&pp->lock);
/*
@@ -4432,7 +4438,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -4919,6 +4925,63 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
return phylink_ethtool_set_eee(pp->phylink, eee);
}
+static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
+}
+
+static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+{
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+}
+
+static int mvneta_setup_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt *qopt)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u8 num_tc;
+ int i;
+
+ qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = qopt->num_tc;
+
+ if (num_tc > rxq_number)
+ return -EINVAL;
+
+ if (!num_tc) {
+ mvneta_clear_rx_prio_map(pp);
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
+
+ mvneta_setup_rx_prio_map(pp);
+
+ netdev_set_num_tc(dev, qopt->num_tc);
+ for (i = 0; i < qopt->num_tc; i++)
+ netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
+
+ return 0;
+}
+
+static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return mvneta_setup_mqprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -4931,6 +4994,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_do_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp,
.ndo_xdp_xmit = mvneta_xdp_xmit,
+ .ndo_setup_tc = mvneta_setup_tc,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -5255,7 +5319,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 6bd7e405e830..8edba5ea90f0 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -60,6 +60,9 @@
/* Top Registers */
#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
#define MVPP2_DSA_EXTENDED BIT(5)
+#define MVPP2_VER_ID_REG 0x50b0
+#define MVPP2_VER_PP22 0x10
+#define MVPP2_VER_PP23 0x11
/* Parser Registers */
#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
@@ -292,6 +295,8 @@
#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
+#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
/* Buffer Manager registers */
#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
@@ -319,6 +324,10 @@
#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_BPPI_HIGH_THRESH 0x1E
+#define MVPP2_BM_BPPI_LOW_THRESH 0x1C
+#define MVPP23_BM_BPPI_HIGH_THRESH 0x34
+#define MVPP23_BM_BPPI_LOW_THRESH 0x28
#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
@@ -347,6 +356,10 @@
#define MVPP2_OVERRUN_ETH_DROP 0x7000
#define MVPP2_CLS_ETH_DROP 0x7020
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+#define MVPP23_BM_8POOL_MODE BIT(8)
+
/* Hit counters registers */
#define MVPP2_CTRS_IDX 0x7040
#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
@@ -469,7 +482,7 @@
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
-/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
*/
#define MVPP22_XLG_CTRL0_REG 0x100
@@ -506,7 +519,7 @@
#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
-/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+/* SMI registers. PPv2.2 and PPv2.3, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
@@ -582,7 +595,7 @@
#define MVPP2_QUEUE_NEXT_DESC(q, index) \
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
-/* XPCS registers. PPv2.2 only */
+/* XPCS registers.PPv2.2 and PPv2.3 */
#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
#define MVPP22_MPCS_CTRL 0x14
#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
@@ -593,7 +606,16 @@
#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
-/* XPCS registers. PPv2.2 only */
+/* FCA registers. PPv2.2 and PPv2.3 */
+#define MVPP22_FCA_BASE(port) (0x7600 + (port) * 0x1000)
+#define MVPP22_FCA_REG_SIZE 16
+#define MVPP22_FCA_REG_MASK 0xFFFF
+#define MVPP22_FCA_CONTROL_REG 0x0
+#define MVPP22_FCA_ENABLE_PERIODIC BIT(11)
+#define MVPP22_PERIODIC_COUNTER_LSB_REG (0x110)
+#define MVPP22_PERIODIC_COUNTER_MSB_REG (0x114)
+
+/* XPCS registers. PPv2.2 and PPv2.3 */
#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
#define MVPP22_XPCS_CFG0 0x0
#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
@@ -651,9 +673,9 @@
#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
#define GENCONF_CTRL0 0x1120
-#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
-#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
-#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
+#define GENCONF_CTRL0_PORT2_RGMII BIT(0)
+#define GENCONF_CTRL0_PORT3_RGMII_MII BIT(1)
+#define GENCONF_CTRL0_PORT3_RGMII BIT(2)
/* Various constants */
@@ -712,8 +734,8 @@
#define MVPP2_PORT_MAX_RXQ 32
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD_MAX 1024
-#define MVPP2_MAX_RXD_DFLT 128
+#define MVPP2_MAX_RXD_MAX 2048
+#define MVPP2_MAX_RXD_DFLT 1024
/* Max number of Tx descriptors */
#define MVPP2_MAX_TXD_MAX 2048
@@ -741,13 +763,73 @@
#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_16KB 16
+#define MVPP22_TX_FIFO_DATA_SIZE_18KB 18
#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 3
+#define MVPP22_TX_FIFO_DATA_SIZE_1KB 1
#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
#define MVPP2_TX_FIFO_THRESHOLD(kb) \
((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+/* RX FIFO threshold in 1KB granularity */
+#define MVPP23_PORT0_FIFO_TRSH (9 * 1024)
+#define MVPP23_PORT1_FIFO_TRSH (4 * 1024)
+#define MVPP23_PORT2_FIFO_TRSH (2 * 1024)
+
+/* RX Flow Control Registers */
+#define MVPP2_RX_FC_REG(port) (0x150 + 4 * (port))
+#define MVPP2_RX_FC_EN BIT(24)
+#define MVPP2_RX_FC_TRSH_OFFS 16
+#define MVPP2_RX_FC_TRSH_MASK (0xFF << MVPP2_RX_FC_TRSH_OFFS)
+#define MVPP2_RX_FC_TRSH_UNIT 256
+
+/* MSS Flow control */
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 100
+
+#define MSS_RXQ_TRESH_BASE 0x200
+#define MSS_RXQ_TRESH_OFFS 4
+#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
+ * MSS_RXQ_TRESH_OFFS))
+
+#define MSS_BUF_POOL_BASE 0x40
+#define MSS_BUF_POOL_OFFS 4
+#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \
+ + (id) * MSS_BUF_POOL_OFFS)
+
+#define MSS_BUF_POOL_STOP_MASK 0xFFF
+#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS)
+#define MSS_BUF_POOL_START_OFFS 12
+#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS)
+#define MSS_BUF_POOL_PORTS_OFFS 24
+#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \
+ ((id) + MSS_BUF_POOL_PORTS_OFFS))
+
+#define MSS_RXQ_TRESH_START_MASK 0xFFFF
+#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS)
+#define MSS_RXQ_TRESH_STOP_OFFS 16
+
+#define MSS_RXQ_ASS_BASE 0x80
+#define MSS_RXQ_ASS_OFFS 4
+#define MSS_RXQ_ASS_PER_REG 4
+#define MSS_RXQ_ASS_PER_OFFS 8
+#define MSS_RXQ_ASS_PORTID_OFFS 0
+#define MSS_RXQ_ASS_PORTID_MASK 0x3
+#define MSS_RXQ_ASS_HOSTID_OFFS 2
+#define MSS_RXQ_ASS_HOSTID_MASK 0x3F
+
+#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_PER_OFFS)
+#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_OFFS)
+#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
+
+#define MSS_THRESHOLD_STOP 768
+#define MSS_THRESHOLD_START 1024
+#define MSS_FC_MAX_TIMEOUT 5000
+
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -845,8 +927,8 @@ enum mvpp22_ptp_packet_format {
#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
/* BM constants */
-#define MVPP2_BM_JUMBO_BUF_NUM 512
-#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_JUMBO_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 2048
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
@@ -925,16 +1007,18 @@ struct mvpp2 {
/* Shared registers' base addresses */
void __iomem *lms_base;
void __iomem *iface_base;
+ void __iomem *cm3_base;
- /* On PPv2.2, each "software thread" can access the base
+ /* On PPv2.2 and PPv2.3, each "software thread" can access the base
* register through a separate address space, each 64 KB apart
* from each other. Typically, such address spaces will be
* used per CPU.
*/
void __iomem *swth_base[MVPP2_MAX_THREADS];
- /* On PPv2.2, some port control registers are located into the system
- * controller space. These registers are accessible through a regmap.
+ /* On PPv2.2 and PPv2.3, some port control registers are located into
+ * the system controller space. These registers are accessible
+ * through a regmap.
*/
struct regmap *sysctrl_base;
@@ -976,7 +1060,7 @@ struct mvpp2 {
u32 tclk;
/* HW version */
- enum { MVPP21, MVPP22 } hw_version;
+ enum { MVPP21, MVPP22, MVPP23 } hw_version;
/* Maximum number of RXQs per port */
unsigned int max_port_rxqs;
@@ -996,6 +1080,12 @@ struct mvpp2 {
/* page_pool allocator */
struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
+
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
+
+ /* Spinlocks for CM3 shared memory configuration */
+ spinlock_t mss_spinlock;
};
struct mvpp2_pcpu_stats {
@@ -1158,6 +1248,9 @@ struct mvpp2_port {
bool rx_hwtstamp;
enum hwtstamp_tx_types tx_hwtstamp_type;
struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
+
+ /* Firmware TX flow control */
+ bool tx_fc;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1220,7 +1313,7 @@ struct mvpp21_rx_desc {
__le32 reserved8;
};
-/* HW TX descriptor for PPv2.2 */
+/* HW TX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_tx_desc {
__le32 command;
u8 packet_offset;
@@ -1232,7 +1325,7 @@ struct mvpp22_tx_desc {
__le64 buf_cookie_misc;
};
-/* HW RX descriptor for PPv2.2 */
+/* HW RX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_rx_desc {
__le32 status;
__le16 reserved1;
@@ -1418,6 +1511,8 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
+
#ifdef CONFIG_MVPP2_PTP
int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
@@ -1450,4 +1545,5 @@ static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
{
return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
}
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..663157dc8062 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -143,7 +143,7 @@ struct mvpp2_cls_c2_entry {
/* Number of per-port dedicated entries in the C2 TCAM */
#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
-/* Each port has oen range per flow type + one entry controling the global RSS
+/* Each port has one range per flow type + one entry controlling the global RSS
* setting and the default rx queue
*/
#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..1767c60056c5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -91,6 +91,16 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
return cpu % priv->nthreads;
}
+static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
enum dma_data_direction dma_dir)
@@ -319,7 +329,7 @@ static int mvpp2_get_nrxqs(struct mvpp2 *priv)
{
unsigned int nrxqs;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
return 1;
/* According to the PPv2.2 datasheet and our experiments on
@@ -384,7 +394,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED(size, 16))
return -EINVAL;
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
* bytes per buffer pointer
*/
if (priv->hw_version == MVPP21)
@@ -413,6 +423,19 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold for MVPP23 */
+ if (priv->hw_version == MVPP23) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
bm_pool->size = size;
@@ -446,7 +469,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
u32 val;
u32 dma_addr_highbits, phys_addr_highbits;
@@ -581,6 +604,16 @@ err_unroll_pools:
return err;
}
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
@@ -634,6 +667,9 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
if (!priv->bm_pools)
return -ENOMEM;
+ if (priv->hw_version == MVPP23)
+ mvpp23_bm_set_8pool_mode(priv);
+
err = mvpp2_bm_pools_init(dev, priv);
if (err < 0)
return err;
@@ -731,6 +767,210 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return data;
}
+/* Routine enable flow control for RXQs condition */
+static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of CoS queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = port->nqvecs;
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs condition */
+static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool condition */
+static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ }
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* disable/enable flow control for BM pool on all ports */
+static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+{
+ struct mvpp2_port *port;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
+ }
+ }
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
+ }
+
+ priv->global_tx_fc = false;
+ return -EOPNOTSUPP;
+}
+
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
@@ -742,7 +982,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
u32 val = 0;
if (sizeof(dma_addr_t) == 8)
@@ -1061,6 +1301,16 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
new_long_pool = MVPP2_BM_LONG;
if (new_long_pool != port->pool_long->id) {
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
+
/* Remove port from old short & long pool */
port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
port->pool_long->pkt_size);
@@ -1078,6 +1328,25 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
mvpp2_swf_bm_pool_init(port);
mvpp2_set_hw_csum(port, new_long_pool);
+
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
out_set:
@@ -1133,14 +1402,19 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
+ int cpu = smp_processor_id();
+ u32 thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu > port->priv->nthreads)
return;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@@ -1150,20 +1424,25 @@ static void mvpp2_interrupts_mask(void *arg)
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- u32 val;
+ int cpu = smp_processor_id();
+ u32 val, thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu >= port->priv->nthreads)
return;
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@@ -1172,7 +1451,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
u32 val;
int i;
- if (port->priv->hw_version != MVPP22)
+ if (port->priv->hw_version == MVPP21)
return;
if (mask)
@@ -1188,6 +1467,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@@ -1199,7 +1481,7 @@ static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
{
- return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
+ return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
}
/* Port configuration routines */
@@ -1231,9 +1513,9 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ val |= GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1250,9 +1532,9 @@ static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
if (port->gop_id > 1) {
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val &= ~GENCONF_CTRL0_PORT0_RGMII;
+ val &= ~GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
+ val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
}
@@ -1280,6 +1562,49 @@ static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
}
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x100 faster than pause quanta to ensure that link
+ * partner won't send traffic if port is in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
+}
+
static int mvpp22_gop_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
@@ -1324,6 +1649,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
val |= GENCONF_SOFT_RESET1_GOP;
regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+ mvpp22_gop_fca_set_periodic_timer(port);
+
unsupported_conf:
return 0;
@@ -1817,7 +2144,7 @@ static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
@@ -1830,7 +2157,7 @@ static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -1851,7 +2178,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -2287,7 +2614,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
int queue;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (smp_processor_id() >= port->priv->nthreads)
return;
for (queue = 0; queue < port->ntxqs; queue++) {
@@ -2348,6 +2675,20 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
+/* Set the number of non-occupied descriptors threshold */
+static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@@ -2370,17 +2711,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -2610,6 +2952,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -2927,6 +3272,9 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
for (queue = 0; queue < port->nrxqs; queue++)
mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
}
/* Init all Rx queues for port */
@@ -2939,6 +3287,10 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port)
if (err)
goto err_cleanup;
}
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
return 0;
err_cleanup:
@@ -3562,17 +3914,17 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
frag_size = bm_pool->frag_size;
if (xdp_prog) {
- xdp.data_hard_start = data;
- xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
- xdp.data_end = xdp.data + rx_bytes;
- xdp.frame_sz = PAGE_SIZE;
+ struct xdp_rxq_info *xdp_rxq;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
- xdp.rxq = &rxq->xdp_rxq_short;
+ xdp_rxq = &rxq->xdp_rxq_short;
else
- xdp.rxq = &rxq->xdp_rxq_long;
+ xdp_rxq = &rxq->xdp_rxq_long;
- xdp_set_data_meta_invalid(&xdp);
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
@@ -3683,7 +4035,7 @@ static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
struct mvpp2_tx_desc *desc)
{
/* We only need to clear the low bits */
- if (port->priv->hw_version != MVPP21)
+ if (port->priv->hw_version >= MVPP22)
desc->pp22.ptp_descriptor &=
cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
}
@@ -4195,7 +4547,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
/* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22)
+ if (port->priv->hw_version >= MVPP22)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
@@ -4238,6 +4590,8 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
new_rx_pending = MVPP2_MAX_RXD_MAX;
+ else if (ring->rx_pending < MSS_THRESHOLD_START)
+ new_rx_pending = MSS_THRESHOLD_START;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
@@ -4345,9 +4699,10 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
-static bool mvpp22_rss_is_supported(void)
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{
- return queue_mode == MVPP2_QDIST_MULTI_MODE;
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK);
}
static int mvpp2_open(struct net_device *dev)
@@ -4411,7 +4766,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->port_irq) {
+ if (priv->hw_version >= MVPP22 && port->port_irq) {
err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
@@ -4578,6 +4933,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
*/
static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
{
+ bool change_percpu = (percpu != priv->percpu_pools);
int numbufs = MVPP2_BM_POOLS_NUM, i;
struct mvpp2_port *port = NULL;
bool status[MVPP2_MAX_PORTS];
@@ -4593,6 +4949,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
if (priv->percpu_pools)
numbufs = port->nrxqs * 2;
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, false);
+
for (i = 0; i < numbufs; i++)
mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
@@ -4607,6 +4966,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
mvpp2_open(port->dev);
}
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, true);
+
return 0;
}
@@ -5152,7 +5514,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0, i, loc = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5187,7 +5549,7 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5208,7 +5570,9 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
- return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
@@ -5217,7 +5581,7 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
@@ -5235,7 +5599,7 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5256,7 +5620,7 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
@@ -5278,7 +5642,7 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5463,7 +5827,7 @@ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
return;
}
- /* Handle the more complicated PPv2.2 case */
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
@@ -5487,7 +5851,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5865,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5583,7 +5959,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
- if (mvpp22_rss_is_supported())
+ if (mvpp22_rss_is_supported(port))
mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
@@ -5628,7 +6004,7 @@ static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
/* Checks if the port dt description has the required Tx interrupts:
* - PPv2.1: there are no such interrupts.
- * - PPv2.2:
+ * - PPv2.2 and PPv2.3:
* - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
* - The new ones have: "hifX" with X in [0..8]
*
@@ -5869,8 +6245,11 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+
+ if (port->priv->global_tx_fc) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ }
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
@@ -5962,7 +6341,7 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -6049,7 +6428,7 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
MVPP2_GMAC_PORT_RESET_MASK,
MVPP2_GMAC_PORT_RESET_MASK);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
@@ -6103,7 +6482,7 @@ static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- if (port->priv->hw_version == MVPP22 &&
+ if (port->priv->hw_version >= MVPP22 &&
port->phy_interface != interface) {
port->phy_interface = interface;
@@ -6151,6 +6530,7 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
+ int i;
if (mvpp2_is_xlg(interface)) {
if (!phylink_autoneg_inband(mode)) {
@@ -6201,6 +6581,23 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
val);
}
+ if (port->priv->global_tx_fc) {
+ port->tx_fc = tx_pause;
+ if (tx_pause)
+ mvpp2_rxq_enable_fc(port);
+ else
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
+ }
+
mvpp2_port_enable(port);
mvpp2_egress_enable(port);
@@ -6467,7 +6864,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
- if (mvpp22_rss_is_supported()) {
+ if (mvpp22_rss_is_supported(port)) {
dev->hw_features |= NETIF_F_RXHASH;
dev->features |= NETIF_F_NTUPLE;
}
@@ -6618,7 +7015,7 @@ static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
}
-/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2.
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
* 4kB fixed space must be assigned for the loopback port.
* Redistribute remaining avialable 44kB space among all active ports.
* Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
@@ -6667,6 +7064,55 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
+{
+ int port, val;
+
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
+ if (port == 0) {
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else {
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ }
+ }
+}
+
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
{
int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
@@ -6675,9 +7121,9 @@ static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
}
-/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2.
- * 3kB fixed space must be assigned for the loopback port.
- * Redistribute remaining avialable 16kB space among all active ports.
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
* The 10G interface should use 10kB (which is maximum possible size
* per single port).
*/
@@ -6688,9 +7134,9 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
int size_remainder;
int port, size;
- /* The loopback requires fixed 3kB of the FIFO space assignment. */
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
- MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
/* Set TX FIFO size to 0 for inactive ports. */
@@ -6698,7 +7144,7 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
mvpp22_tx_fifo_set_hw(priv, port, 0);
/* Assign remaining TX FIFO space among all active ports. */
- size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB;
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
remaining_ports_count = hweight_long(port_map);
for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
@@ -6783,7 +7229,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
mvpp2_axi_init(priv);
/* Disable HW PHY polling */
@@ -6818,6 +7264,8 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
} else {
mvpp22_rx_fifo_init(priv);
mvpp22_tx_fifo_init(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
}
if (priv->hw_version == MVPP21)
@@ -6843,6 +7291,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ if (has_acpi_companion(&pdev->dev))
+ dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
+ else
+ dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
+ return 0;
+ }
+
+ priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+
+ return PTR_ERR_OR_ZERO(priv->cm3_base);
+}
+
static int mvpp2_probe(struct platform_device *pdev)
{
const struct acpi_device_id *acpi_id;
@@ -6899,9 +7366,18 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if handler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -6914,7 +7390,7 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
- if (priv->hw_version == MVPP22 &&
+ if (priv->hw_version >= MVPP22 &&
mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
priv->percpu_pools = 1;
@@ -6959,7 +7435,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
if (IS_ERR(priv->mg_clk)) {
err = PTR_ERR(priv->mg_clk);
@@ -7000,7 +7476,7 @@ static int mvpp2_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
goto err_axi_clk;
@@ -7020,6 +7496,12 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->port_map |= BIT(i);
}
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
@@ -7059,6 +7541,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
@@ -7075,10 +7563,10 @@ err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..4812cdb4609e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
/* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
return 0;
}
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -882,15 +914,15 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -899,7 +931,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -967,12 +1000,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1134,6 +1172,21 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set MH entry that skip parser */
+ pe.index = MVPP2_PE_MH_SKIP_PRS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
}
/* Set default entires (place holder) for promiscuous, non-promiscuous and
@@ -1162,6 +1215,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1392,8 +1446,9 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1597,8 +1652,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1647,8 +1703,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1727,19 +1784,20 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1752,14 +1810,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..c16e5b9947bd 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -103,10 +103,11 @@
#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
MVPP2_PRS_MAC_RANGE_SIZE + 1)
/* VLAN filtering range */
-#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 32)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_MH_SKIP_PRS (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -129,7 +130,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index eb535c98ca38..1a3455620b38 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -4,10 +4,10 @@
#
ccflags-y += -I$(src)
-obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
-obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
-octeontx2_mbox-y := mbox.o rvu_trace.o
-octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
- rvu_cpt.o rvu_devlink.o
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..9caa375d01b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -14,53 +14,18 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
-#define DRV_NAME "octeontx2-cgx"
-#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
-
-/**
- * struct lmac
- * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
- * @cmd_lock: Lock to serialize the command interface
- * @resp: command response
- * @link_info: link related information
- * @event_cb: callback for linkchange events
- * @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
- * @cgx: parent cgx port
- * @lmac_id: lmac port id
- * @name: lmac port name
- */
-struct lmac {
- wait_queue_head_t wq_cmd_cmplt;
- struct mutex cmd_lock;
- u64 resp;
- struct cgx_link_user_info link_info;
- struct cgx_event_cb event_cb;
- spinlock_t event_cb_lock;
- bool cmd_pend;
- struct cgx *cgx;
- u8 lmac_id;
- char *name;
-};
-
-struct cgx {
- void __iomem *reg_base;
- struct pci_dev *pdev;
- u8 cgx_id;
- u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
- struct work_struct cgx_cmd_work;
- struct workqueue_struct *cgx_cmd_workq;
- struct list_head cgx_list;
-};
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
static LIST_HEAD(cgx_list);
@@ -76,22 +41,45 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cgx_id_table);
-static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+static bool is_dev_rpm(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ return cgx && test_bit(lmac_id, &cgx->lmac_bmap);
+}
+
+struct mac_ops *get_mac_ops(void *cgxd)
{
- writeq(val, cgx->reg_base + (lmac << 18) + offset);
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
}
-static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
- return readq(cgx->reg_base + (lmac << 18) + offset);
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
}
-static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
return NULL;
@@ -135,6 +123,20 @@ void *cgx_get_pdata(int cgx_id)
return NULL;
}
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
int cgx_get_cgxid(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -185,8 +187,10 @@ static u64 mac2u64 (u8 *mac_addr)
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg;
+ mac_ops = cgx_dev->mac_ops;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
@@ -205,8 +209,11 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg;
+ mac_ops = cgx_dev->mac_ops;
+
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -215,15 +222,16 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -237,10 +245,10 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
u8 lmac_type;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
@@ -262,11 +270,13 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg = 0;
if (!cgx)
return;
+ mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
@@ -324,7 +334,7 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
@@ -334,18 +344,77 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -362,7 +431,7 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
struct cgx *cgx = cgxd;
u64 cfg, last;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -377,13 +446,16 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
@@ -394,13 +466,16 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause)
+static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
@@ -424,11 +499,12 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return;
if (enable) {
/* Enable receive pause frames */
@@ -486,6 +562,9 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
+ if (is_dev_rpm(cgx))
+ return;
+
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -508,7 +587,7 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
}
/* CGX Firmware interface low level support */
-static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
struct cgx *cgx = lmac->cgx;
struct device *dev;
@@ -556,8 +635,7 @@ unlock:
return err;
}
-static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
- struct cgx *cgx, int lmac_id)
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
{
struct lmac *lmac;
int err;
@@ -592,6 +670,7 @@ static inline void cgx_link_usertable_init(void)
cgx_speed_mbps[CGX_LINK_25G] = 25000;
cgx_speed_mbps[CGX_LINK_40G] = 40000;
cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_80G] = 80000;
cgx_speed_mbps[CGX_LINK_100G] = 100000;
cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
@@ -606,6 +685,143 @@ static inline void cgx_link_usertable_init(void)
cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
}
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
+}
+
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+ args->mode = mode;
+ args->ports = 0;
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
+
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
@@ -615,6 +831,8 @@ static inline void link_status_user_format(u64 lstat,
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
@@ -642,6 +860,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
+
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
@@ -670,7 +891,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event)
id = FIELD_GET(EVTREG_ID, event);
if (id == CGX_CMD_LINK_BRING_UP ||
- id == CGX_CMD_LINK_BRING_DOWN)
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
return true;
else
return false;
@@ -686,12 +908,16 @@ static inline bool cgx_event_is_linkevent(u64 event)
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
+ u64 event, offset, clear_bit;
struct lmac *lmac = data;
struct cgx *cgx;
- u64 event;
cgx = lmac->cgx;
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
if (!FIELD_GET(EVTREG_ACK, event))
@@ -727,7 +953,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
* Ack the interrupt register as well.
*/
cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
- cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
return IRQ_HANDLED;
}
@@ -771,20 +997,79 @@ int cgx_get_fwdata_base(u64 *base)
{
u64 req = 0, resp;
struct cgx *cgx;
+ int first_lmac;
int err;
cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
if (!cgx)
return -ENXIO;
+ first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
*base = FIELD_GET(RESP_FWD_BASE, resp);
return err;
}
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ if (args.mode)
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -800,10 +1085,11 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
- return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
}
static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
@@ -836,8 +1122,8 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
struct device *dev = &cgx->pdev->dev;
int i, err;
- /* Do Link up for all the lmacs */
- for (i = 0; i < cgx->lmac_count; i++) {
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -857,44 +1143,118 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+static void cgx_lmac_get_fifolen(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
+ u64 lmac_list;
int i, err;
- cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ cgx_lmac_get_fifolen(cgx);
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane)
+ lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
- lmac->lmac_id = i;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
- err = request_irq(pci_irq_vector(cgx->pdev,
- CGX_LMAC_FWI + i * 9),
- cgx_fwi_event_handler, 0, lmac->name, lmac);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
if (err)
- return err;
-
- /* Enable interrupt */
- cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
- FW_CGX_INT);
+ goto err_irq;
/* Add reference */
- cgx->lmac_idmap[i] = lmac;
- cgx_lmac_pause_frm_config(cgx, i, true);
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
@@ -909,12 +1269,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for (i = 0; i < cgx->lmac_count; i++) {
- cgx_lmac_pause_frm_config(cgx, i, false);
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
- free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->name);
kfree(lmac);
}
@@ -922,6 +1282,37 @@ static int cgx_lmac_exit(struct cgx *cgx)
return 0;
}
+static void cgx_populate_features(struct cgx *cgx)
+{
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+}
+
+static struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+};
+
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -935,6 +1326,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
+ /* Use mac_ops to get MAC specific features */
+ if (pdev->device == PCI_DEVID_CN10K_RPM)
+ cgx->mac_ops = rpm_get_mac_ops();
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
@@ -956,7 +1353,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- nvec = CGX_NVEC;
+ nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
@@ -980,6 +1377,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cgx_link_usertable_init();
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
+
err = cgx_lmac_init(cgx);
if (err)
goto err_release_lmac;
@@ -1003,8 +1404,10 @@ static void cgx_remove(struct pci_dev *pdev)
{
struct cgx *cgx = pci_get_drvdata(pdev);
- cgx_lmac_exit(cgx);
- list_del(&cgx->cgx_list);
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index bcfc3e5f66bb..12521262164a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -13,6 +13,7 @@
#include "mbox.h"
#include "cgx_fw_if.h"
+#include "rpm.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_CGX 0xA059
@@ -42,12 +43,12 @@
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
-#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
-#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
@@ -55,7 +56,13 @@
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
@@ -81,7 +88,6 @@
#define CGX_CMD_TIMEOUT 2200 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
-#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
enum cgx_nix_stat_type {
@@ -147,5 +153,16 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
-
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index c3702fa58b6b..aa4e42f78f13 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -43,7 +43,13 @@ enum cgx_error_type {
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
- CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
};
/* LINK speed types */
@@ -59,10 +65,41 @@ enum cgx_link_speed {
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
+ CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_SPEED_MAX,
};
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_MAX /* = 29 */
+};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE,
@@ -75,12 +112,25 @@ enum cgx_cmd_id {
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
- CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_LINK_STAT_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
@@ -154,6 +204,7 @@ enum cgx_cmd_own {
* CGX_STAT_SUCCESS
*/
#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
@@ -171,13 +222,19 @@ struct cgx_lnk_sts {
uint64_t full_duplex:1;
uint64_t speed:4; /* cgx_link_speed */
uint64_t err_type:10;
- uint64_t reserved2:39;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t port:8;
+ uint64_t reserved2:28;
};
#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28)
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
@@ -199,4 +256,12 @@ struct cgx_lnk_sts {
#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 17f6f42f4453..e66109367487 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -155,6 +155,8 @@ enum nix_scheduler {
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
@@ -191,6 +193,9 @@ enum nix_scheduler {
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+
+#define SDP_CHANNELS 256
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000000..45706fd87120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef LMAC_COMMON_H
+#define LMAC_COMMON_H
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(void);
+
+#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index bbabb8e64201..0a37ca96aab8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -20,9 +20,9 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -56,12 +56,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
-int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
- void *reg_base, int direction, int ndevs)
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
{
- struct otx2_mbox_dev *mdev;
- int devid;
-
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -121,7 +118,6 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
mbox->reg_base = reg_base;
- mbox->hwbase = hwbase;
mbox->pdev = pdev;
mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
@@ -129,11 +125,27 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
otx2_mbox_destroy(mbox);
return -ENOMEM;
}
-
mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
for (devid = 0; devid < ndevs; devid++) {
mdev = &mbox->dev[devid];
mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
spin_lock_init(&mdev->mbox_lock);
/* Init header to reset value */
otx2_mbox_reset(mbox, devid);
@@ -143,6 +155,35 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
EXPORT_SYMBOL(otx2_mbox_init);
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
@@ -175,9 +216,9 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index f919283ddc34..ea456099b33c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -52,6 +52,7 @@
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
+ void *hwbase;
spinlock_t mbox_lock;
u16 msg_size; /* Total msg size to be sent */
u16 rsp_size; /* Total rsp size to be sure the reply is ok */
@@ -98,6 +99,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -149,6 +153,16 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
+ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -237,6 +251,9 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -354,12 +371,17 @@ struct get_hw_cap_rsp {
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
-#define CGX_RX_STATS_COUNT 13
+#define CGX_RX_STATS_COUNT 9
#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
@@ -373,6 +395,8 @@ struct cgx_link_user_info {
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
@@ -391,6 +415,98 @@ struct cgx_pause_frm_cfg {
u8 tx_pause;
};
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 ports;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */
+#define RVU_MAC_VERSION BIT_ULL(2)
+#define RVU_MAC_CGX BIT_ULL(3)
+#define RVU_MAC_RPM BIT_ULL(4)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -545,6 +661,39 @@ struct nix_lf_free_req {
u64 flags;
};
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+};
+
/* NIX AQ enqueue msg */
struct nix_aq_enq_req {
struct mbox_msghdr hdr;
@@ -717,6 +866,8 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -807,6 +958,12 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 max_mtu;
+ u16 min_mtu;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1071,6 +1228,7 @@ struct cpt_rd_wr_reg_msg {
u64 *ret_val;
u64 val;
u8 is_write;
+ int blkaddr;
};
struct cpt_lf_alloc_req_msg {
@@ -1078,6 +1236,7 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
+ int blkaddr;
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a1f79445db71..3c640f6aba92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -162,6 +162,11 @@ enum key_fields {
NPC_DIP_IPV4,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index f69f4f35ae48..1ee37853f338 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -21,6 +21,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_PTP_BAR_NO 0
@@ -234,6 +237,15 @@ static const struct pci_device_id ptp_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CN10K_A_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_A_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_B_PTP) },
{ 0, }
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000000..a91ccdc59403
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+};
+
+struct mac_ops *rpm_get_mac_ops(void)
+{
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ return 0;
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (enable) {
+ /* Enable 802.3 pause frame mode */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable receive pause frames */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Set pause time and interval */
+ cfg = rpm_read(rpm, lmac_id,
+ RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
+ cfg &= ~0xFFFFULL;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
+ cfg | RPM_DEFAULT_PAUSE_TIME);
+ /* Set pause interval as the hardware default is too short */
+ cfg = rpm_read(rpm, lmac_id,
+ RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
+ cfg &= ~0xFFFFULL;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
+ cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
+
+ } else {
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+ lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
+ if (lmac_type == LMAC_MODE_100G_R) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000000..d32e74bd5964
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+
+/* Registers */
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_LPCSX_CONTROL1 0x30000
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+
+#define RPM_LMAC_FWI 0xa
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e8fd712860a1..d9a1a71c7ccc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -22,7 +22,7 @@
#include "rvu_trace.h"
-#define DRV_NAME "octeontx2-af"
+#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -78,6 +78,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (is_rvu_96xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -646,7 +649,7 @@ setup_vfmsix:
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
- * create a IOMMU mapping for the physcial address configured by
+ * create an IOMMU mapping for the physical address configured by
* firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
@@ -852,6 +855,31 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
return rvu_alloc_bitmap(&block->lf);
}
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1003,6 +1031,10 @@ cpt:
rvu_scan_block(rvu, block);
}
+ err = rvu_set_channels_base(rvu);
+ if (err)
+ goto msix_err;
+
err = rvu_npc_init(rvu);
if (err)
goto npc_err;
@@ -1018,10 +1050,14 @@ cpt:
if (err)
goto npa_err;
+ rvu_get_lbk_bufsize(rvu);
+
err = rvu_nix_init(rvu);
if (err)
goto nix_err;
+ rvu_program_channels(rvu);
+
return 0;
nix_err:
@@ -1323,7 +1359,7 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
break;
default:
return rvu_get_blkaddr(rvu, blktype, 0);
- };
+ }
if (is_block_implemented(rvu->hw, blkaddr))
return blkaddr;
@@ -1936,41 +1972,105 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- void __iomem *hwbase = NULL, *reg_base;
- int err, i, dir, dir_up;
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
struct rvu_work *mwork;
+ void **mbox_regions;
const char *name;
- u64 bar4_addr;
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions)
+ return -ENOMEM;
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ if (err)
+ goto free_regions;
break;
case TYPE_AFVF:
name = "rvu_afvf_mailbox";
- bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ if (err)
+ goto free_regions;
break;
default:
- return -EINVAL;
+ return err;
}
mw->mbox_wq = alloc_workqueue(name,
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
num);
- if (!mw->mbox_wq)
- return -ENOMEM;
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
sizeof(struct rvu_work), GFP_KERNEL);
@@ -1986,23 +2086,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto exit;
}
- /* Mailbox is a reserved memory (in RAM) region shared between
- * RVU devices, shouldn't be mapped as device memory to allow
- * unaligned accesses.
- */
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
- if (!hwbase) {
- dev_err(rvu->dev, "Unable to map mailbox region\n");
- err = -ENOMEM;
- goto exit;
- }
-
- err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
- reg_base, dir_up, num);
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
@@ -2015,25 +2105,36 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
-
+ kfree(mbox_regions);
return 0;
+
exit:
- if (hwbase)
- iounmap((void __iomem *)hwbase);
destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
return err;
}
static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
if (mw->mbox_wq) {
flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
- if (mw->mbox.hwbase)
- iounmap((void __iomem *)mw->mbox.hwbase);
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
otx2_mbox_destroy(&mw->mbox);
otx2_mbox_destroy(&mw->mbox_up);
@@ -2150,6 +2251,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
else if (block->addr == BLKADDR_NPA)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2650,8 +2754,6 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
}
-#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-
int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b1a6ecfd563e..fa6e46e36ae4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -19,12 +19,15 @@
#include "common.h"
#include "mbox.h"
#include "npc.h"
+#include "rvu_reg.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -33,6 +36,7 @@
#define NAME_SIZE 32
#define MAX_NIX_BLKS 2
+#define MAX_CPT_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -47,6 +51,11 @@ struct dump_ctx {
bool all;
};
+struct cpt_ctx {
+ int blkaddr;
+ struct rvu *rvu;
+};
+
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
@@ -61,6 +70,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
};
@@ -296,6 +306,8 @@ struct hw_cap {
bool nix_shaping; /* Is shaping and coloring supported */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
};
struct rvu_hwinfo {
@@ -304,14 +316,20 @@ struct rvu_hwinfo {
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
u8 npc_kpus; /* No of parser units */
u8 npc_pkinds; /* No of port kinds */
u8 npc_intfs; /* No of interfaces */
u8 npc_kpu_entries; /* No of KPU entries */
u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
bool npc_ext_set; /* Extended register set */
struct hw_cap cap;
@@ -350,6 +368,10 @@ struct rvu_fwdata {
u64 msixtr_base;
#define FWDATA_RESERVED_MEM 1023
u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 5
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ /* Do not add new fields below this line */
};
struct ptp;
@@ -465,6 +487,59 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM);
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
+}
+
/* Function Prototypes
* RVU
*/
@@ -601,6 +676,15 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+
+/* CPT APIs */
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d298b9357177..e668e482383a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
@@ -42,6 +43,20 @@ static struct _req_type __maybe_unused \
MBOX_UP_CGX_MESSAGES
#undef M
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
/* Returns bitmap of mapped PFs */
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
@@ -92,9 +107,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt_max = rvu->cgx_cnt_max;
- int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
int size, free_pkind;
+ int cgx, lmac, iter;
if (!cgx_cnt_max)
return 0;
@@ -125,14 +141,17 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
- lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ pf++;
}
}
return 0;
@@ -154,8 +173,10 @@ static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
- if (err)
+ if (err) {
+ kfree(qentry);
goto skip_add;
+ }
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
@@ -251,6 +272,7 @@ static void cgx_evhandler_task(struct work_struct *work)
static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
struct cgx_event_cb cb;
int cgx, lmac, err;
void *cgxd;
@@ -271,7 +293,8 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -349,6 +372,7 @@ int rvu_cgx_init(struct rvu *rvu)
int rvu_cgx_exit(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
int cgx, lmac;
void *cgxd;
@@ -356,7 +380,8 @@ int rvu_cgx_exit(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -381,6 +406,7 @@ static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -390,11 +416,12 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
if (enable)
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
else
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
@@ -426,10 +453,11 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp)
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
@@ -440,28 +468,63 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Rx stats */
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
- rsp->rx_stats[stat] = rx_stat;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
- rsp->tx_stats[stat] = tx_stat;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ return cgx_get_fec_stats(cgxd, lmac, rsp);
+}
+
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -469,6 +532,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +551,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
int rc = 0, i;
u64 cfg;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
@@ -532,6 +601,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
u8 cgx_id, lmac_id;
void *cgxd;
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -618,17 +690,49 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
return err;
}
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ int rvu_def_cgx_id = 0;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
- return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
@@ -651,7 +755,12 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
@@ -660,16 +769,32 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
if (req->set)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- req->tx_pause, req->rx_pause);
+ mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
+ req->tx_pause, req->rx_pause);
else
- cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- &rsp->tx_pause, &rsp->rx_pause);
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
+ &rsp->tx_pause,
+ &rsp->rx_pause);
return 0;
}
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
@@ -761,3 +886,56 @@ exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return -ENXIO;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000000..7d9e71c6965f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 cpt_chan_base;
+ u64 nix_const;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT.
+ */
+ hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
+ ((nix_const >> 16) & 0xFFULL);
+ hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
+ (nix_const & 0xFFULL);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= 0x800) {
+ hw->cpt_chan_base = 0x800;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = SDP_CHANNELS;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 35261d52c997..0945c3a3b180 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -65,13 +65,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
int num_lfs, slot;
u64 val;
+ blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+
if (req->eng_grpmsk == 0x0)
return CPT_AF_ERR_GRP_INVALID;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
-
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
@@ -114,23 +114,17 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot;
struct rvu_block *block;
- int cptlf, blkaddr;
- int num_lfs, slot;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
if (!num_lfs)
- return CPT_AF_ERR_LF_INVALID;
+ return 0;
for (slot = 0; slot < num_lfs; slot++) {
cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
@@ -146,6 +140,21 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
{
u64 offset = req->reg_offset;
@@ -208,9 +217,9 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
{
int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
+ blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
@@ -231,3 +240,92 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
return 0;
}
+
+#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
+#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
+#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
+#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int i = 0, hard_lp_ctr = 100000;
+ u64 inprog, grp_ptr;
+ u16 nq_ptr, dq_ptr;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ /* Disable executions in the LF's queue */
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ if (INPROG_GRB_PARTIAL(inprog)) {
+ i = 0;
+ hard_lp_ctr--;
+ } else {
+ i++;
+ }
+
+ grp_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot,
+ CPT_LF_Q_GRP_PTR));
+ nq_ptr = (grp_ptr >> 32) & 0x7FFF;
+ dq_ptr = grp_ptr & 0x7FFF;
+
+ } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+
+ i = 0;
+ hard_lp_ctr = 100000;
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((INPROG_INFLIGHT(inprog) == 0) &&
+ (INPROG_GWB(inprog) < 40) &&
+ ((INPROG_GRB(inprog) == 0) ||
+ (INPROG_GRB((inprog)) == 40))) {
+ i++;
+ } else {
+ i = 0;
+ hard_lp_ctr--;
+ }
+ } while (hard_lp_ctr && (i < 10));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ /* Set group drop to help clear out hardware */
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ reg |= BIT_ULL(17);
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d27543c1a166..aa2ca8780b9c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -19,6 +19,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -109,6 +110,89 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65–127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65–127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
enum cpt_eng_type {
CPT_AE_TYPE = 1,
CPT_SE_TYPE = 2,
@@ -234,6 +318,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ int rvu_def_cgx_id = 0;
char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
@@ -241,7 +327,9 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
u16 pcifunc;
domain = 2;
- seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
@@ -262,7 +350,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
&lmac_id);
- sprintf(cgx, "CGX%d", cgx_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
@@ -385,7 +473,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count);
+ cmd_buf = memdup_user(buffer, count + 1);
if (IS_ERR(cmd_buf))
return -ENOMEM;
@@ -449,6 +537,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
@@ -468,6 +557,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
@@ -485,12 +577,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
@@ -512,6 +607,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
@@ -525,8 +622,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
- seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
}
/* Reads aura/pool's ctx from admin queue */
@@ -910,11 +1009,78 @@ static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
@@ -974,10 +1140,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
(u64)sq_ctx->dropped_pkts);
}
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
@@ -1439,6 +1689,7 @@ static void rvu_dbg_npa_init(struct rvu *rvu)
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
@@ -1450,6 +1701,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (!rvu)
return -ENODEV;
+ mac_ops = get_mac_ops(cgxd);
+
+ if (!mac_ops)
+ return 0;
+
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
@@ -1459,7 +1715,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
- seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
@@ -1481,7 +1738,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Tx stats */
- seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
@@ -1500,24 +1758,35 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Rx stats */
- seq_puts(s, "\n=======CGX RX_STATS======\n\n");
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
- seq_puts(s, "\n=======CGX TX_STATS======\n\n");
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
- stat++;
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
}
return err;
@@ -1547,21 +1816,34 @@ RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
+ int rvu_def_cgx_id = 0;
int i, lmac_id;
char dname[20];
void *cgx;
- rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
/* cgx debugfs dir */
- sprintf(dname, "cgx%d", i);
+ sprintf(dname, "%s%d", mac_ops->name, i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+
+ for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
@@ -1757,6 +2039,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
default:
+ seq_puts(s, "\n");
break;
}
}
@@ -1785,7 +2068,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
} else {
switch (rule->rx_action.op) {
case NIX_RX_ACTIONOP_DROP:
@@ -1806,7 +2089,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
}
}
@@ -1903,20 +2186,16 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_rx_miss_act_fops);
}
-/* CPT debugfs APIs */
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
u64 busy_sts = 0, free_sts = 0;
u32 e_min = 0, e_max = 0, e, i;
u16 max_ses, max_ies, max_aes;
- int blkaddr;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
@@ -1976,16 +2255,13 @@ RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u32 e_max, e;
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
@@ -2013,17 +2289,15 @@ RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
- struct rvu_hwinfo *hw = rvu->hw;
+ struct cpt_ctx *ctx = filp->private;
+ int blkaddr = ctx->blkaddr;
+ struct rvu *rvu = ctx->rvu;
struct rvu_block *block;
- int blkaddr;
+ struct rvu_hwinfo *hw;
u64 reg;
u32 lf;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
+ hw = rvu->hw;
block = &hw->block[blkaddr];
if (!block->lf.bmap)
return -ENODEV;
@@ -2048,13 +2322,10 @@ RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg0, reg1;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
@@ -2078,15 +2349,11 @@ RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu;
- int blkaddr;
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg;
- rvu = filp->private;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
seq_printf(filp, "CPT instruction requests %llu\n", reg);
reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
@@ -2107,45 +2374,76 @@ static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
-static void rvu_dbg_cpt_init(struct rvu *rvu)
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
{
- if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ struct cpt_ctx *ctx;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
return;
- rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[0];
+ ctx->blkaddr = BLKADDR_CPT0;
+ ctx->rvu = rvu;
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[1];
+ ctx->blkaddr = BLKADDR_CPT1;
+ ctx->rvu = rvu;
+ }
- debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_pc_fops);
- debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_ae_sts_fops);
- debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_se_sts_fops);
- debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_ie_sts_fops);
- debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_engines_info_fops);
- debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_lfs_info_fops);
- debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_err_info_fops);
}
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
+}
+
void rvu_dbg_init(struct rvu *rvu)
{
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
- debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rvu_pf_cgx_map_fops);
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
rvu_dbg_npa_init(rvu);
rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
- rvu_dbg_cpt_init(rvu);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bc0e4113370e..10a98bcb7c54 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -52,6 +52,650 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
return rvu->irq_allocated[offset];
}
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+err:
+ rvu_nix_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
static void rvu_npa_intr_work(struct work_struct *work)
{
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
@@ -698,9 +1342,14 @@ static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
static int rvu_health_reporters_create(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
+ int err;
rvu_dl = rvu->rvu_dl;
- return rvu_npa_health_reporters_create(rvu_dl);
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
}
static void rvu_health_reporters_destroy(struct rvu *rvu)
@@ -712,6 +1361,7 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_dl = rvu->rvu_dl;
rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
}
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
index d7578fa92ac1..471e57dedb20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -41,11 +41,38 @@ struct rvu_npa_health_reporters {
struct work_struct ras_work;
};
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
struct rvu_devlink {
struct devlink *dl;
struct rvu *rvu;
struct workqueue_struct *devlink_wq;
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
};
/* Devlink APIs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a8dfbb6d1774..d3000194e2d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -16,6 +16,7 @@
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
+#include "lmac_common.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -214,6 +215,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct mac_ops *mac_ops;
int pkind, pf, vf, lbkid;
u8 cgx_id, lmac_id;
int err;
@@ -233,17 +235,19 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
"PF_Func 0x%x: Invalid pkind\n", pcifunc);
return -EINVAL;
}
- pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true, true);
+ mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
+ rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -262,10 +266,10 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
pfvf->tx_chan_base = vf & 0x1 ?
- NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
- NIX_CHAN_LBK_CHX(lbkid, vf + 1);
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
@@ -1000,6 +1004,14 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -2535,6 +2547,43 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* RPM supports FIFO len 128 KB */
+ if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+ return 0;
+}
+
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -2580,6 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ int l4_key_offset;
if (!alg)
return -EINVAL;
@@ -2712,6 +2762,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field_marker = false;
keyoff_marker = false;
}
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
@@ -2783,11 +2839,31 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_mask = 0xF;
field->fn_mask = 1; /* Mask out the first nibble */
break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
}
field->ena = 1;
/* Found a valid flow key type */
if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
@@ -3072,6 +3148,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
u64 cfg, lmac_fifo_len;
struct nix_hw *nix_hw;
u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -3081,7 +3158,12 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return -EINVAL;
- if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
+
+ if (!req->sdp_link && req->maxlen > max_mtu)
return NIX_AF_ERR_FRS_INVALID;
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
@@ -3141,7 +3223,8 @@ linkcfg:
/* Update transmit credits for CGX links */
lmac_fifo_len =
- CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ rvu_cgx_get_fifolen(rvu) /
+ cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
@@ -3181,23 +3264,40 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
return 0;
}
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ /* CN10k supports 72KB FIFO size and max packet size of 64k */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
static void nix_link_config(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
+ u16 lbk_max_frs, lmac_max_frs;
u64 tx_credits;
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+
/* Set default min/max packet lengths allowed on NIX Rx links.
*
* With HW reset minlen value of 60byte, HW will treat ARP pkts
* as undersize and report them to SW as error pkts, hence
* setting it to 40 bytes.
*/
- for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ for (link = 0; link < hw->cgx_links; link++) {
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
}
+ for (link = hw->cgx_links; link < hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
@@ -3209,7 +3309,8 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
*/
for (cgx = 0; cgx < hw->cgx; cgx++) {
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
+ lmac_max_frs) / 16;
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
@@ -3223,7 +3324,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
/* Set Tx credits for LBK link */
slink = hw->cgx_links;
for (link = slink; link < (slink + hw->lbk_links); link++) {
- tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
@@ -3354,14 +3455,6 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
- /* Set num of links of each type */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- hw->cgx = (cfg >> 12) & 0xF;
- hw->lmac_per_cgx = (cfg >> 8) & 0xF;
- hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = (cfg >> 24) & 0xF;
- hw->sdp_links = 1;
-
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
if (err)
@@ -3596,10 +3689,14 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int blkaddr;
+ int blkaddr, pf;
int nixlf;
u64 cfg;
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5cf9b7a907ae..04bb0803a5c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -102,9 +102,9 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
return -EINVAL;
} else {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
+ base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0);
/* CGX mapped functions has maximum of 16 channels */
- end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
+ end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF);
}
if (channel < base || channel > end)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 14832b66d1fe..4ba9d54ce4e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -26,6 +26,11 @@ static const char * const npc_flow_names[] = {
[NPC_DIP_IPV4] = "ipv4 destination ip",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port",
[NPC_DPORT_TCP] = "tcp destination port",
[NPC_SPORT_UDP] = "udp source port",
@@ -212,13 +217,13 @@ static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
return false;
}
-static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
- u8 intf)
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
{
if (!npc_is_field_present(rvu, type, intf) ||
npc_check_overlap(rvu, blkaddr, type, 0, intf))
- return -EOPNOTSUPP;
- return 0;
+ return false;
+ return true;
}
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
@@ -269,7 +274,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
break;
default:
return;
- };
+ }
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -448,14 +453,13 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
u64 tcp_udp_sctp;
- int err, hdr;
+ int hdr;
if (is_npc_intf_tx(intf))
features = &mcam->tx_features;
for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
- err = npc_check_field(rvu, blkaddr, hdr, intf);
- if (!err)
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
@@ -464,13 +468,26 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp)
- if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
/* for vlan corresponding layer type should be in the key */
if (*features & BIT_ULL(NPC_OUTER_VID))
- if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
}
@@ -743,13 +760,13 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
return;
/* For tcp/udp/sctp LTYPE should be present in entry */
- if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf);
@@ -758,6 +775,15 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 0fb2aa909a23..3e401fd8ac63 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -44,6 +44,11 @@
#define RVU_AF_PFME_INT_W1S (0x28c8)
#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -100,6 +105,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -399,12 +406,16 @@
#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
#define NIX_PRIV_LFX_INT_CFG (0x8000020)
#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
@@ -484,9 +495,17 @@
#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
#define CPT_AF_LF_CTL2_SHIFT 3
#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_GRP_PTR 0x120
+
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
@@ -629,4 +648,17 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index e2153d47c373..5e5f45c7eab0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -74,6 +74,16 @@ enum npa_af_int_vec_e {
NPA_AF_INT_VEC_CNT = 0x5,
};
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -129,63 +139,29 @@ enum npa_inpq {
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_17_23 : 7;
- u64 lf : 9;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 lf : 9;
u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 avg_level : 8;
- u64 reserved_118_119 : 2;
- u64 shift : 6;
- u64 aura_drop : 8;
- u64 reserved_98_103 : 6;
- u64 bp_ena : 2;
- u64 aura_drop_ena : 1;
- u64 pool_drop_ena : 1;
- u64 reserved_93 : 1;
- u64 avg_con : 9;
- u64 pool_way_mask : 16;
- u64 pool_caching : 1;
- u64 reserved_65 : 2;
- u64 ena : 1;
-#else
- u64 ena : 1;
+ u64 ena : 1; /* W1 */
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
@@ -199,59 +175,24 @@ struct npa_aura_s {
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 reserved_189_191 : 3;
- u64 nix1_bpid : 9;
- u64 reserved_177_179 : 3;
- u64 nix0_bpid : 9;
- u64 reserved_164_167 : 4;
- u64 count : 36;
-#else
- u64 count : 36;
+ u64 count : 36; /* W2 */
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_252_255 : 4;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_up_crossing : 1;
- u64 fc_ena : 1;
- u64 reserved_240_243 : 4;
- u64 bp : 8;
- u64 reserved_228_231 : 4;
- u64 limit : 36;
-#else
- u64 limit : 36;
+ u64 limit : 36; /* W3 */
u64 reserved_228_231 : 4;
u64 bp : 8;
- u64 reserved_240_243 : 4;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
-#endif
u64 fc_addr; /* W4 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 reserved_379_383 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_371 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_363 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 update_time : 16;
- u64 pool_drop : 8;
-#else
- u64 pool_drop : 8;
+ u64 pool_drop : 8; /* W5 */
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -263,31 +204,15 @@ struct npa_aura_s {
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 reserved_420_447 : 28;
- u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_420_447 : 28;
-#endif
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 reserved_115_127 : 13;
- u64 buf_size : 11;
- u64 reserved_100_103 : 4;
- u64 buf_offset : 12;
- u64 stack_way_mask : 16;
- u64 reserved_70_71 : 3;
- u64 stack_caching : 1;
- u64 reserved_66_67 : 2;
- u64 nat_align : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
@@ -298,36 +223,10 @@ struct npa_pool_s {
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 stack_pages : 32;
- u64 stack_max_pages : 32;
-#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_240_255 : 16;
- u64 op_pc : 48;
-#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 reserved_316_319 : 4;
- u64 update_time : 16;
- u64 reserved_297_299 : 3;
- u64 fc_up_crossing : 1;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_ena : 1;
- u64 avg_con : 9;
- u64 avg_level : 8;
- u64 reserved_270_271 : 2;
- u64 shift : 6;
- u64 reserved_260_263 : 4;
- u64 stack_offset : 4;
-#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
@@ -338,26 +237,13 @@ struct npa_pool_s {
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
- u64 reserved_297_299 : 3;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
u64 update_time : 16;
u64 reserved_316_319 : 4;
-#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
- u64 reserved_571_575 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_563 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_555 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 reserved_512_535 : 24;
-#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -369,14 +255,10 @@ struct npa_pool_s {
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 reserved_612_639 : 28;
u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_612_639 : 28;
-#endif
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
@@ -404,6 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BAND_PROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -418,59 +301,29 @@ enum nix_aq_instop {
/* NIX admin queue instruction structure */
struct nix_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_15_23 : 9;
- u64 lf : 7;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
- u64 lf : 7;
- u64 reserved_15_23 : 9;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NIX admin queue result structure */
struct nix_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 wrptr : 20;
- u64 avg_con : 9;
- u64 cint_idx : 7;
- u64 cq_err : 1;
- u64 qint_idx : 7;
- u64 rsvd_81_83 : 3;
- u64 bpid : 9;
- u64 rsvd_69_71 : 3;
- u64 bp_ena : 1;
- u64 rsvd_64_67 : 4;
-#else
u64 rsvd_64_67 : 4;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
@@ -481,31 +334,10 @@ struct nix_cq_ctx_s {
u64 cint_idx : 7;
u64 avg_con : 9;
u64 wrptr : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 update_time : 16;
- u64 avg_level : 8;
- u64 head : 20;
- u64 tail : 20;
-#else
u64 tail : 20;
u64 head : 20;
u64 avg_level : 8;
u64 update_time : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 cq_err_int_ena : 8;
- u64 cq_err_int : 8;
- u64 qsize : 4;
- u64 rsvd_233_235 : 3;
- u64 caching : 1;
- u64 substream : 20;
- u64 rsvd_210_211 : 2;
- u64 ena : 1;
- u64 drop_ena : 1;
- u64 drop : 8;
- u64 bp : 8;
-#else
u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
@@ -517,20 +349,161 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
-#endif
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
};
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 wqe_aura : 20;
- u64 substream : 20;
- u64 cq : 20;
- u64 ena_wqwd : 1;
- u64 ipsech_ena : 1;
- u64 sso_ena : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 sso_ena : 1;
u64 ipsech_ena : 1;
@@ -538,19 +511,6 @@ struct nix_rq_ctx_s {
u64 cq : 20;
u64 substream : 20;
u64 wqe_aura : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 rsvd_127_122 : 6;
- u64 lpb_drop_ena : 1;
- u64 spb_drop_ena : 1;
- u64 xqe_drop_ena : 1;
- u64 wqe_caching : 1;
- u64 pb_caching : 2;
- u64 sso_tt : 2;
- u64 sso_grp : 10;
- u64 lpb_aura : 20;
- u64 spb_aura : 20;
-#else
u64 spb_aura : 20;
u64 lpb_aura : 20;
u64 sso_grp : 10;
@@ -561,23 +521,7 @@ struct nix_rq_ctx_s {
u64 spb_drop_ena : 1;
u64 lpb_drop_ena : 1;
u64 rsvd_127_122 : 6;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 xqe_hdr_split : 1;
- u64 xqe_imm_copy : 1;
- u64 rsvd_189_184 : 6;
- u64 xqe_imm_size : 6;
- u64 later_skip : 6;
- u64 rsvd_171 : 1;
- u64 first_skip : 7;
- u64 lpb_sizem1 : 12;
- u64 spb_ena : 1;
- u64 rsvd_150_148 : 3;
- u64 wqe_skip : 2;
- u64 spb_sizem1 : 6;
- u64 rsvd_139_128 : 12;
-#else
- u64 rsvd_139_128 : 12;
+ u64 rsvd_139_128 : 12; /* W2 */
u64 spb_sizem1 : 6;
u64 wqe_skip : 2;
u64 rsvd_150_148 : 3;
@@ -590,18 +534,7 @@ struct nix_rq_ctx_s {
u64 rsvd_189_184 : 6;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 spb_pool_pass : 8;
- u64 spb_pool_drop : 8;
- u64 spb_aura_pass : 8;
- u64 spb_aura_drop : 8;
- u64 wqe_pool_pass : 8;
- u64 wqe_pool_drop : 8;
- u64 xqe_pass : 8;
- u64 xqe_drop : 8;
-#else
- u64 xqe_drop : 8;
+ u64 xqe_drop : 8; /* W3*/
u64 xqe_pass : 8;
u64 wqe_pool_drop : 8;
u64 wqe_pool_pass : 8;
@@ -609,19 +542,7 @@ struct nix_rq_ctx_s {
u64 spb_aura_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_pool_pass : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 rsvd_319_315 : 5;
- u64 qint_idx : 7;
- u64 rq_int_ena : 8;
- u64 rq_int : 8;
- u64 rsvd_291_288 : 4;
- u64 lpb_pool_pass : 8;
- u64 lpb_pool_drop : 8;
- u64 lpb_aura_pass : 8;
- u64 lpb_aura_drop : 8;
-#else
- u64 lpb_aura_drop : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
u64 lpb_aura_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_pool_pass : 8;
@@ -630,55 +551,21 @@ struct nix_rq_ctx_s {
u64 rq_int_ena : 8;
u64 qint_idx : 7;
u64 rsvd_319_315 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 rsvd_383_366 : 18;
- u64 flow_tagw : 6;
- u64 bad_utag : 8;
- u64 good_utag : 8;
- u64 ltag : 24;
-#else
- u64 ltag : 24;
+ u64 ltag : 24; /* W5 */
u64 good_utag : 8;
u64 bad_utag : 8;
u64 flow_tagw : 6;
u64 rsvd_383_366 : 18;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 rsvd_447_432 : 16;
- u64 octs : 48;
-#else
- u64 octs : 48;
+ u64 octs : 48; /* W6 */
u64 rsvd_447_432 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
- u64 rsvd_511_496 : 16;
- u64 pkts : 48;
-#else
- u64 pkts : 48;
+ u64 pkts : 48; /* W7 */
u64 rsvd_511_496 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 drop_octs : 48; /* W8 */
u64 rsvd_575_560 : 16;
- u64 drop_octs : 48;
-#else
- u64 drop_octs : 48;
- u64 rsvd_575_560 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_624 : 16;
- u64 drop_pkts : 48;
-#else
- u64 drop_pkts : 48;
+ u64 drop_pkts : 48; /* W9 */
u64 rsvd_639_624 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 re_pkts : 48; /* W10 */
u64 rsvd_703_688 : 16;
- u64 re_pkts : 48;
-#else
- u64 re_pkts : 48;
- u64 rsvd_703_688 : 16;
-#endif
u64 rsvd_767_704; /* W11 */
u64 rsvd_831_768; /* W12 */
u64 rsvd_895_832; /* W13 */
@@ -701,30 +588,12 @@ enum nix_stype {
/* NIX Send queue context structure */
struct nix_sq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 sqe_way_mask : 16;
- u64 cq : 20;
- u64 sdp_mcast : 1;
- u64 substream : 20;
- u64 qint_idx : 6;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 qint_idx : 6;
u64 substream : 20;
u64 sdp_mcast : 1;
u64 cq : 20;
u64 sqe_way_mask : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 sqb_count : 16;
- u64 default_chan : 12;
- u64 smq_rr_quantum : 24;
- u64 sso_ena : 1;
- u64 xoff : 1;
- u64 cq_ena : 1;
- u64 smq : 9;
-#else
u64 smq : 9;
u64 cq_ena : 1;
u64 xoff : 1;
@@ -732,37 +601,12 @@ struct nix_sq_ctx_s {
u64 smq_rr_quantum : 24;
u64 default_chan : 12;
u64 sqb_count : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 rsvd_191 : 1;
- u64 sqe_stype : 2;
- u64 sq_int_ena : 8;
- u64 sq_int : 8;
- u64 sqb_aura : 20;
- u64 smq_rr_count : 25;
-#else
u64 smq_rr_count : 25;
u64 sqb_aura : 20;
u64 sq_int : 8;
u64 sq_int_ena : 8;
u64 sqe_stype : 2;
u64 rsvd_191 : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 rsvd_255_253 : 3;
- u64 smq_next_sq_vld : 1;
- u64 smq_pend : 1;
- u64 smenq_next_sqb_vld : 1;
- u64 head_offset : 6;
- u64 smenq_offset : 6;
- u64 tail_offset : 6;
- u64 smq_lso_segnum : 8;
- u64 smq_next_sq : 20;
- u64 mnq_dis : 1;
- u64 lmt_dis : 1;
- u64 cq_limit : 8;
- u64 max_sqe_size : 2;
-#else
u64 max_sqe_size : 2;
u64 cq_limit : 8;
u64 lmt_dis : 1;
@@ -776,23 +620,11 @@ struct nix_sq_ctx_s {
u64 smq_pend : 1;
u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3;
-#endif
u64 next_sqb : 64;/* W4 */
u64 tail_sqb : 64;/* W5 */
u64 smenq_sqb : 64;/* W6 */
u64 smenq_next_sqb : 64;/* W7 */
u64 head_sqb : 64;/* W8 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_630 : 10;
- u64 vfi_lso_vld : 1;
- u64 vfi_lso_vlan1_ins_ena : 1;
- u64 vfi_lso_vlan0_ins_ena : 1;
- u64 vfi_lso_mps : 14;
- u64 vfi_lso_sb : 8;
- u64 vfi_lso_sizem1 : 3;
- u64 vfi_lso_total : 18;
- u64 rsvd_583_576 : 8;
-#else
u64 rsvd_583_576 : 8;
u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3;
@@ -802,68 +634,28 @@ struct nix_sq_ctx_s {
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_658 : 46;
- u64 scm_lso_rem : 18;
-#else
u64 scm_lso_rem : 18;
u64 rsvd_703_658 : 46;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
- u64 rsvd_767_752 : 16;
- u64 octs : 48;
-#else
u64 octs : 48;
u64 rsvd_767_752 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
- u64 rsvd_831_816 : 16;
- u64 pkts : 48;
-#else
u64 pkts : 48;
u64 rsvd_831_816 : 16;
-#endif
u64 rsvd_895_832 : 64;/* W13 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
- u64 rsvd_959_944 : 16;
- u64 dropped_octs : 48;
-#else
u64 dropped_octs : 48;
u64 rsvd_959_944 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
- u64 rsvd_1023_1008 : 16;
- u64 dropped_pkts : 48;
-#else
u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16;
-#endif
};
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- uint32_t reserved_20_31 : 12;
- uint32_t rq : 20;
-#else
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-#endif
};
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- uint64_t next : 16;
- uint64_t pf_func : 16;
- uint64_t rsvd_31_24 : 8;
- uint64_t index : 20;
- uint64_t eol : 1;
- uint64_t rsvd_2 : 1;
- uint64_t op : 2;
-#else
uint64_t op : 2;
uint64_t rsvd_2 : 1;
uint64_t eol : 1;
@@ -871,7 +663,6 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
-#endif
};
enum nix_lsoalg {
@@ -890,15 +681,6 @@ enum nix_txlayer {
};
struct nix_lso_format {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd_19_63 : 45;
- u64 alg : 3;
- u64 rsvd_14_15 : 2;
- u64 sizem1 : 2;
- u64 rsvd_10_11 : 2;
- u64 layer : 2;
- u64 offset : 8;
-#else
u64 offset : 8;
u64 layer : 2;
u64 rsvd_10_11 : 2;
@@ -906,24 +688,9 @@ struct nix_lso_format {
u64 rsvd_14_15 : 2;
u64 alg : 3;
u64 rsvd_19_63 : 45;
-#endif
};
struct nix_rx_flowkey_alg {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_35_63 :29;
- u64 ltype_match :4;
- u64 ltype_mask :4;
- u64 sel_chan :1;
- u64 ena :1;
- u64 reserved_24_24 :1;
- u64 lid :3;
- u64 bytesm1 :5;
- u64 hdr_offset :8;
- u64 fn_mask :1;
- u64 ln_mask :1;
- u64 key_offset :6;
-#else
u64 key_offset :6;
u64 ln_mask :1;
u64 fn_mask :1;
@@ -936,7 +703,6 @@ struct nix_rx_flowkey_alg {
u64 ltype_mask :4;
u64 ltype_match :4;
u64 reserved_35_63 :29;
-#endif
};
/* NIX VTAG size */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 4193ae3bde6b..745aa8a19499 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's OcteonTX2 ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o
-octeontx2_nicvf-y := otx2_vf.o
+rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o otx2_flows.o cn10k.o
+rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
new file mode 100644
index 000000000000..9ec0313f13fc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+
+static struct dev_hw_ops otx2_hw_ops = {
+ .sq_aq_init = otx2_sq_aq_init,
+ .sqe_flush = otx2_sqe_flush,
+ .aura_freeptr = otx2_aura_freeptr,
+ .refill_pool_ptrs = otx2_refill_pool_ptrs,
+};
+
+static struct dev_hw_ops cn10k_hw_ops = {
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+};
+
+int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+{
+ int size, num_lines;
+ u64 base;
+
+ if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
+ pf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ pf->hw_ops = &cn10k_hw_ops;
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ (MBOX_SIZE * (pf->total_vfs + 1));
+
+ size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
+ (MBOX_SIZE * (pf->total_vfs + 1));
+
+ pf->hw.lmt_base = ioremap(base, size);
+
+ if (!pf->hw.lmt_base) {
+ dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ return -ENOMEM;
+ }
+
+ /* FIXME: Get the num of LMTST lines from LMT table */
+ pf->tot_lmt_lines = size / LMT_LINE_SIZE;
+ num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
+ pf->hw.tx_queues;
+ /* Number of LMT lines per SQ queues */
+ pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
+
+ pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
+ return 0;
+}
+
+int cn10k_vf_lmtst_init(struct otx2_nic *vf)
+{
+ int size, num_lines;
+
+ if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
+ vf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ vf->hw_ops = &cn10k_hw_ops;
+ size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
+ vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ size);
+ if (!vf->hw.lmt_base) {
+ dev_err(vf->dev, "Unable to map VF LMTST region\n");
+ return -ENOMEM;
+ }
+
+ vf->tot_lmt_lines = size / LMT_LINE_SIZE;
+ /* LMTST lines per SQ */
+ num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
+ vf->hw.tx_queues;
+ vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
+ vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ (qidx * pfvf->nix_lmt_size));
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/
+ aq->sq.smq_rr_weight = pfvf->netdev->mtu;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+#define NPA_MAX_BURST 16
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[NPA_MAX_BURST];
+ int num_ptrs = 1;
+ dma_addr_t bufptr;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
+ if (num_ptrs--)
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs,
+ cq->rbpool->lmt_addr);
+ break;
+ }
+ cq->pool_ptrs--;
+ ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ num_ptrs++;
+ if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs,
+ cq->rbpool->lmt_addr);
+ num_ptrs = 1;
+ }
+ }
+}
+
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+{
+ struct otx2_nic *pfvf = dev;
+ int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
+ u64 val = 0, tar_addr = 0;
+
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ cn10k_lmt_flush(val, tar_addr);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
new file mode 100644
index 000000000000..e0bc595cbb78
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef CN10K_H
+#define CN10K_H
+
+#include "otx2_common.h"
+
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_pf_lmtst_init(struct otx2_nic *pf);
+int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 73fb94dd5fbc..cf7875d51d87 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -15,6 +15,7 @@
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
+#include "cn10k.h"
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
@@ -60,6 +61,19 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
}
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (req)
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
{
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
@@ -216,7 +230,6 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -270,14 +283,17 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -297,10 +313,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -335,9 +351,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -345,13 +362,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -473,33 +496,54 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize);
+ buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
- iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
page_frag_free(buf);
return -ENOMEM;
}
- return iova;
+ return 0;
}
-static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t addr;
+ int ret;
local_bh_disable();
- addr = __otx2_alloc_rbuf(pfvf, pool);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
local_bh_enable();
- return addr;
+ return ret;
+}
+
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma)
+{
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ return -ENOMEM;
+ }
+ return 0;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
@@ -561,8 +605,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
- OTX2_MIN_MTU;
+ req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
+ | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
@@ -704,9 +748,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
-#define SEND_CQ_SKID 2000
-
static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -740,11 +781,48 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
- struct nix_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -781,40 +859,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
sq->aura_id = sqb_aura;
sq->aura_fc_addr = pool->fc_addr->base;
- sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- /* Get memory to put this msg */
- aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
- if (!aq)
- return -ENOMEM;
-
- aq->sq.cq = pfvf->hw.rx_queues + qidx;
- aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
- aq->sq.cq_ena = 1;
- aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = DFLT_RR_QTM;
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
- aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
- aq->sq.sqb_aura = sqb_aura;
- aq->sq.sq_int_ena = NIX_SQINT_BITS;
- aq->sq.qint_idx = 0;
- /* Due pipelining impact minimum 2000 unused SQ CQE's
- * need to maintain to avoid CQ overflow.
- */
- aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
-
- /* Fill AQ info */
- aq->qidx = qidx;
- aq->ctype = NIX_AQ_CTYPE_SQ;
- aq->op = NIX_AQ_INSTOP_INIT;
+ return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
- return otx2_sync_mbox_msg(&pfvf->mbox);
}
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
@@ -893,7 +944,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- s64 bufptr;
+ dma_addr_t bufptr;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
@@ -903,8 +954,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool);
- if (bufptr <= 0) {
+ if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
@@ -919,7 +969,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
}
return;
}
- otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
@@ -986,7 +1036,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->sq_cnt = pfvf->hw.tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->rss_grps = MAX_RSS_GROUPS;
nixlf->xqe_sz = NIX_XQESZ_W16;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
@@ -1163,6 +1213,11 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->rbsize = buf_size;
+ /* Set LMTST addr for NPA batch free */
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
+ pool->lmt_addr = (__force u64 *)((u64)pfvf->hw.npa_lmt_base +
+ (pool_id * LMT_LINE_SIZE));
+
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!aq) {
@@ -1203,8 +1258,8 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ dma_addr_t bufptr;
int err, ptr;
- s64 bufptr;
/* Calculate number of SQBs needed.
*
@@ -1249,10 +1304,9 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
@@ -1270,7 +1324,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
int stack_pages, pool_id, rq;
struct otx2_pool *pool;
int err, ptr, num_ptrs;
- s64 bufptr;
+ dma_addr_t bufptr;
num_ptrs = pfvf->qset.rqe_cnt;
@@ -1300,11 +1354,10 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id,
- bufptr + OTX2_HEAD_ROOM);
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
}
}
@@ -1481,6 +1534,13 @@ void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
}
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -1576,6 +1636,46 @@ void otx2_set_cints_affinity(struct otx2_nic *pfvf)
}
}
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+{
+ struct nix_hw_info *rsp;
+ struct msg_req *req;
+ u16 max_mtu;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!rc) {
+ rsp = (struct nix_hw_info *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ /* HW counts VLAN insertion bytes (8 for double tag)
+ * irrespective of whether SQE is requesting to insert VLAN
+ * in the packet or not. Hence these 8 bytes have to be
+ * discounted from max packet size otherwise HW will throw
+ * SMQ errors
+ */
+ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to get MTU from hardware setting default value(1500)\n");
+ max_mtu = 1500;
+ }
+ return max_mtu;
+}
+EXPORT_SYMBOL(otx2_get_max_mtu);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 103430400a8a..a518c2283f18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -50,14 +50,21 @@ enum arua_mapped_qtypes {
#define NIX_LF_ERR_VEC 0x81
#define NIX_LF_POISON_VEC 0x82
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -186,7 +193,6 @@ struct otx2_hw {
u8 lso_tsov6_idx;
u8 lso_udpv4_idx;
u8 lso_udpv6_idx;
- u8 hw_tso;
/* MSI-X */
u8 cint_cnt; /* CQ interrupt count */
@@ -200,8 +206,20 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
u8 cgx_links; /* No. of CGX links present in HW */
u8 lbk_links; /* No. of LBK links present in HW */
+#define HW_TSO 0
+#define CN10K_MBOX 1
+#define CN10K_LMTST 2
+ unsigned long cap_flag;
+
+#define LMT_LINE_SIZE 128
+#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
+ void __iomem *lmt_base;
+ u64 *npa_lmt_base;
+ u64 *nix_lmt_base;
};
struct otx2_vf_config {
@@ -260,9 +278,18 @@ struct otx2_flow_config {
struct list_head flow_list;
};
+struct dev_hw_ops {
+ int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+ void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ void (*aura_freeptr)(void *dev, int aura, u64 buf);
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
+ struct dev_hw_ops *hw_ops;
void *iommu_domain;
u16 max_frs;
u16 rbsize; /* Receive buffer size */
@@ -311,6 +338,10 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+ /* LMTST Lines info */
+ u16 tot_lmt_lines;
+ u16 nix_lmt_lines;
+ u32 nix_lmt_size;
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
@@ -335,6 +366,25 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM);
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -343,10 +393,10 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
- hw->hw_tso = true;
+ __set_bit(HW_TSO, &hw->cap_flag);
if (is_96xx_A0(pfvf->pdev)) {
- hw->hw_tso = false;
+ __clear_bit(HW_TSO, &hw->cap_flag);
/* Time based irq coalescing is not supported */
pfvf->hw.cq_qcount_wait = 0x0;
@@ -357,6 +407,13 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.rq_skid = 600;
pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
}
+ if (is_96xx_B0(pfvf->pdev))
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ if (!is_dev_otx2(pfvf->pdev)) {
+ __set_bit(CN10K_MBOX, &hw->cap_flag);
+ __set_bit(CN10K_LMTST, &hw->cap_flag);
+ }
}
/* Register read/write APIs */
@@ -465,10 +522,51 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
}
#else
-#define otx2_write128(lo, hi, addr)
+#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
#endif
+static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
+ u64 *ptrs, u64 num_ptrs,
+ u64 *lmt_addr)
+{
+ u64 size = 0, count_eot = 0;
+ u64 tar_addr, val = 0;
+
+ tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
+ /* LMTID is same as AURA Id */
+ val = (aura & 0x7FF) | BIT_ULL(63);
+ /* Set if [127:64] of last 128bit word has a valid pointer */
+ count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Set AURA ID to free pointer */
+ ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are valid from NPA_LF_AURA_BATCH_FREE0.
+ *
+ * tar_addr[6:4] is LMTST size-1 in units of 128b.
+ */
+ if (num_ptrs > 2) {
+ size = (sizeof(u64) * num_ptrs) / 16;
+ if (!count_eot)
+ size++;
+ tar_addr |= ((size - 1) & 0x7) << 4;
+ }
+ memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ /* Perform LMTST flush */
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_pool *pool;
+ u64 ptrs[2];
+
+ pool = &pfvf->qset.pool[aura];
+ ptrs[1] = buf;
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
+}
+
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
@@ -480,11 +578,12 @@ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
}
/* Free pointer to a pool/aura */
-static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
- int aura, s64 buf)
+static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
{
- otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
- otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+ struct otx2_nic *pfvf = dev;
+ void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
+
+ otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
}
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
@@ -632,18 +731,23 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -656,6 +760,9 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
@@ -664,6 +771,7 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
@@ -684,13 +792,14 @@ int otx2_get_flow(struct otx2_nic *pfvf,
int otx2_get_all_flows(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
- struct ethtool_rx_flow_spec *fsp);
+ struct ethtool_rxnfc *nfc);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 67171b66a56c..f4962a97a075 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include <linux/net_tstamp.h>
+#include <linux/linkmode.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
@@ -32,6 +33,14 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
+/* Physical link config */
+#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111
+
+enum link_mode {
+ OTX2_MODE_SUPPORTED,
+ OTX2_MODE_ADVERTISED
+};
+
static const struct otx2_stat otx2_dev_stats[] = {
OTX2_DEV_STAT(rx_ucast_frames),
OTX2_DEV_STAT(rx_bcast_frames),
@@ -66,6 +75,8 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -128,6 +139,10 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -160,11 +175,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -183,6 +217,32 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
*(data++) = pfvf->hw.cgx_tx_stats[stat];
*(data++) = pfvf->reset_count;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
@@ -195,9 +255,11 @@ static int otx2_get_sset_count(struct net_device *netdev, int sset)
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
+ + 1;
}
/* Get no of queues device supports and current queue count */
@@ -448,10 +510,14 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- case AH_ESP_V6_FLOW:
+ break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -459,6 +525,7 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
default:
return -EINVAL;
}
+
return 0;
}
@@ -527,6 +594,36 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
return -EINVAL;
}
break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
@@ -581,7 +678,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
- ret = otx2_add_flow(pfvf, &nfc->fs);
+ ret = otx2_add_flow(pfvf, nfc);
break;
case ETHTOOL_SRXCLSRLDEL:
if (netif_running(dev) && ntuple)
@@ -641,42 +738,50 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -688,20 +793,85 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
+ otx2_set_rss_table(pfvf, *rss_context);
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
}
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -751,6 +921,304 @@ static int otx2_get_ts_info(struct net_device *netdev,
return 0;
}
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ const int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
+#define FEC_MAX_INDEX 4
+ if (pfvf->linfo.fec < FEC_MAX_INDEX)
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
+ return 0;
+}
+
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct mbox *mbox = &pfvf->mbox;
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ /* Firmware does not support AUTO mode consider it as FEC_OFF */
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_AUTO:
+ fec = OTX2_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0)
+ pfvf->linfo.fec = rsp->fec;
+ else
+ err = rsp->fec;
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static void otx2_get_fec_info(u64 index, int req_mode,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
+
+ switch (index) {
+ case OTX2_FEC_NONE:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ }
+
+ /* Add fec modes to existing modes */
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_fec_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_fec_modes);
+}
+
+static void otx2_get_link_mode_info(u64 link_mode_bmap,
+ bool req_mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
+ const int otx2_sgmii_features[6] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ };
+ /* CGX link modes to Ethtool link mode mapping */
+ const int cgx_link_mode[27] = {
+ 0, /* SGMII Mode */
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ 0,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
+ };
+ u8 bit;
+
+ link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES;
+
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
+ /* SGMII mode is set */
+ if (bit == 0)
+ linkmode_set_bit_array(otx2_sgmii_features,
+ ARRAY_SIZE(otx2_sgmii_features),
+ otx2_link_modes);
+ else
+ linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
+
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ otx2_link_modes);
+ else
+ linkmode_copy(link_ksettings->link_modes.supported,
+ otx2_link_modes);
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ ethtool_link_ksettings_add_link_mode(cmd,
+ supported,
+ Autoneg);
+
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
+ OTX2_MODE_SUPPORTED, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec,
+ OTX2_MODE_SUPPORTED, cmd);
+ return 0;
+}
+
+static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
+ u64 *mode)
+{
+ u32 bit_pos;
+
+ /* Firmware does not support requesting multiple advertised modes
+ * return first set bit
+ */
+ bit_pos = find_first_bit(cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
+ *mode = bit_pos;
+}
+
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct ethtool_link_ksettings cur_ks;
+ struct cgx_set_link_mode_req *req;
+ struct mbox *mbox = &pf->mbox;
+ int err = 0;
+
+ memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
+
+ if (!ethtool_validate_speed(cmd->base.speed) ||
+ !ethtool_validate_duplex(cmd->base.duplex))
+ return -EINVAL;
+
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ otx2_get_link_ksettings(netdev, &cur_ks);
+
+ /* Check requested modes against supported modes by hardware */
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+
+ req->args.speed = cmd->base.speed;
+ /* firmware expects 1 for half duplex and 0 for full duplex
+ * hence inverting
+ */
+ req->args.duplex = cmd->base.duplex ^ 0x1;
+ req->args.an = cmd->base.autoneg;
+ otx2_get_advertised_mode(cmd, &req->args.mode);
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -771,11 +1239,17 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
@@ -850,6 +1324,20 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -866,6 +1354,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
@@ -874,6 +1364,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index be8ccfce1848..0dbbf38e0597 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell.
*/
@@ -16,6 +16,7 @@ struct otx2_flow {
u32 location;
u16 entry;
bool is_vf;
+ u8 rss_ctx_id;
int vf;
};
@@ -245,6 +246,7 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
if (iter->location == location) {
nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
return 0;
}
}
@@ -270,14 +272,16 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
return err;
}
-static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -297,10 +301,16 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (ipv4_l4_mask->ip4src) {
memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
sizeof(pkt->ip4src));
@@ -339,20 +349,60 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tos & ah_esp_mask->tos))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
break;
default:
break;
}
+
+ return 0;
}
-static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -372,10 +422,16 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
sizeof(pkt->ip6src));
@@ -414,10 +470,47 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
default:
break;
}
+
+ return 0;
}
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
@@ -428,8 +521,9 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
u32 flow_type;
+ int ret;
- flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
switch (flow_type) {
/* bits not set in mask are don't care */
case ETHER_FLOW:
@@ -455,13 +549,21 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
- otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
case IPV6_USER_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
- otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
default:
return -EOPNOTSUPP;
@@ -532,9 +634,13 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* change to unicast only if action of default entry is not
* requested by user
*/
- if (req->op != NIX_RX_ACTION_DEFAULT)
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ } else {
req->op = NIX_RX_ACTIONOP_UCAST;
- req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
if (vf > pci_num_vf(pfvf->pdev)) {
mutex_unlock(&pfvf->mbox.lock);
@@ -555,14 +661,16 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
-int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
- u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
bool new = false;
+ u32 ring;
int err;
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
@@ -585,6 +693,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
/* struct copy */
flow->flow_spec = *fsp;
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
err = otx2_add_flow_msg(pfvf, flow);
if (err) {
if (new)
@@ -647,6 +758,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
return 0;
}
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 634d60655a74..53ab1814d74b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell International Ltd.
*
@@ -22,10 +22,11 @@
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#include <rvu_trace.h>
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_NAME "rvu_nicpf"
+#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -585,9 +586,17 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq((void __iomem *)((u64)pf->reg_base +
+ RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
if (!hwbase) {
err = -ENOMEM;
goto free_wq;
@@ -779,6 +788,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
default:
if (msg->rc)
dev_err(pf->dev,
@@ -1039,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
* device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1276,6 +1288,33 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
}
}
+static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
+{
+ int frame_size;
+ int total_size;
+ int rbuf_size;
+
+ /* The data transferred by NIX to memory consists of actual packet
+ * plus additional data which has timestamp and/or EDSA/HIGIG2
+ * headers if interface is configured in corresponding modes.
+ * NIX transfers entire data using 6 segments/buffers and writes
+ * a CQE_RX descriptor with those segment addresses. First segment
+ * has additional data prepended to packet. Also software omits a
+ * headroom of 128 bytes and sizeof(struct skb_shared_info) in
+ * each segment. Hence the total size of memory needed
+ * to receive a packet with 'mtu' is:
+ * frame size = mtu + additional data;
+ * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * each receive buffer size = memory / 6;
+ */
+ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ total_size = frame_size + (OTX2_HEAD_ROOM +
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ rbuf_size = total_size / 6;
+
+ return ALIGN(rbuf_size, 2048);
+}
+
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
struct nix_lf_free_req *free_req;
@@ -1292,9 +1331,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = hw->tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
- OTX2_ETH_HLEN);
+ pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1487,6 +1526,14 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
+ /* Reserve LMT lines for NPA AURA batch free */
+ pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ /* Reserve LMT lines for NIX TX */
+ pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
+ (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ }
+
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -2325,6 +2372,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_netdev;
}
+ otx2_setup_dev_hw_settings(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
@@ -2350,7 +2399,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(pf);
+ err = cn10k_pf_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -2405,7 +2456,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(pf);
err = register_netdev(netdev);
if (err) {
@@ -2435,6 +2486,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (hw->lmt_base)
+ iounmap(hw->lmt_base);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2594,6 +2647,9 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_ptp_destroy(pf);
otx2_mcam_flow_del(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_base)
+ iounmap(pf->hw.lmt_base);
+
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 867f646e0802..21b811c6ee0f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -57,6 +59,7 @@
#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_VF_MBOX_REGION (0xC0000)
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -91,6 +94,7 @@
#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+#define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400)
/* NIX LF registers */
#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index cba59ddf71bb..1f49b3caf5d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -142,7 +142,9 @@ struct nix_rx_parse_s {
u64 vtag0_ptr : 8; /* W5 */
u64 vtag1_ptr : 8;
u64 flow_key_alg : 5;
- u64 rsvd_383_341 : 43;
+ u64 rsvd_359_341 : 19;
+ u64 color : 2;
+ u64 rsvd_383_362 : 22;
u64 rsvd_447_384; /* W6 */
};
@@ -218,7 +220,8 @@ struct nix_sqe_ext_s {
u64 vlan1_ins_tci : 16;
u64 vlan0_ins_ena : 1;
u64 vlan1_ins_ena : 1;
- u64 rsvd_127_114 : 14;
+ u64 init_color : 2;
+ u64 rsvd_127_116 : 12;
};
struct nix_sqe_sg_s {
@@ -237,7 +240,8 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
u64 offset : 16; /* W0 */
- u64 rsvd_52_16 : 37;
+ u64 rsvd_51_16 : 36;
+ u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
u64 alg : 4;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d0e25414f1a1..22ec03a618b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -17,6 +17,7 @@
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -199,7 +200,8 @@ static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
- otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx,
+ *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
@@ -255,12 +257,11 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- if (cqe->sg.segs == 1)
- return false;
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (pfvf->netdev->features & NETIF_F_RXALL)
return false;
/* Free buffer back to pool */
@@ -275,9 +276,14 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe)
{
struct nix_rx_parse_s *parse = &cqe->parse;
+ struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ void *end, *start;
+ u64 *seg_addr;
+ u16 *seg_size;
+ int seg;
- if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
@@ -286,9 +292,19 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
- cq->pool_ptrs++;
-
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ seg_size = (void *)sg;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
+ otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
+ parse);
+ cq->pool_ptrs++;
+ }
+ start += sizeof(*sg);
+ }
otx2_set_rxhash(pfvf, cqe, skb);
skb_record_rx_queue(skb, cq->cq_idx);
@@ -304,7 +320,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- s64 bufptr;
while (likely(processed_cqe < budget)) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
@@ -330,29 +345,23 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
if (unlikely(!cq->pool_ptrs))
return 0;
-
/* Refill pool with new buffers */
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
+
+ return processed_cqe;
+}
+
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ dma_addr_t bufptr;
+
while (cq->pool_ptrs) {
- bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
- if (unlikely(bufptr <= 0)) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr))
break;
- }
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
-
- return processed_cqe;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@@ -439,7 +448,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
return workdone;
}
-static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx)
{
u64 status;
@@ -797,7 +807,7 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
}
}
@@ -806,15 +816,17 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
{
int payload_len, last_seg_size;
- if (!pfvf->hw.hw_tso)
+ if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
+ return true;
+
+ /* On 96xx A0, HW TSO not supported */
+ if (!is_96xx_B0(pfvf->pdev))
return false;
/* HW has an issue due to which when the payload of the last LSO
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
*/
- if (!is_96xx_B0(pfvf->pdev))
- return true;
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
@@ -915,7 +927,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 73af15685657..52486c1f0973 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -24,7 +24,6 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64
-#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -114,6 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
+ u64 *lmt_addr;
u16 rbsize;
};
@@ -156,4 +156,10 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index d3e4cfd244e2..085be90a03eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -7,9 +7,10 @@
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "cn10k.h"
-#define DRV_NAME "octeontx2-nicvf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
@@ -277,7 +278,7 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
vf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -297,16 +298,25 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e PF0) and this VF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
- */
- hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
- if (!hwbase) {
- dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
- err = -ENOMEM;
- goto exit;
+ if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
@@ -329,6 +339,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
return 0;
exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
destroy_workqueue(vf->mbox_wq);
return err;
}
@@ -525,6 +537,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
}
+ otx2_setup_dev_hw_settings(vf);
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -548,7 +561,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(vf);
+ err = cn10k_vf_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -571,7 +586,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(vf);
INIT_WORK(&vf->reset_task, otx2vf_reset_task);
@@ -600,6 +615,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_detach_rsrc:
+ if (hw->lmt_base)
+ iounmap(hw->lmt_base);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -628,8 +645,11 @@ static void otx2vf_remove(struct pci_dev *pdev)
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
-
otx2_detach_resources(&vf->mbox);
+
+ if (vf->hw.lmt_base)
+ iounmap(vf->hw.lmt_base);
+
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 7d83e1f91ef1..49e052273f30 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -580,63 +580,55 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
}
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = prestera_hw_port_learning_set(port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
- memcpy(&br_port->flags, &flags, sizeof(flags));
+ memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
struct prestera_switch *sw = port->sw;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
- ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
}
static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
bool vlan_enabled)
{
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge = prestera_bridge_by_dev(sw->swdev, dev);
if (WARN_ON(!bridge))
return -EINVAL;
@@ -665,19 +657,15 @@ static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
return 0;
}
-static int presterar_port_attr_stp_state_set(struct prestera_port *port,
- struct switchdev_trans *trans,
- struct net_device *dev,
- u8 state)
+static int prestera_port_attr_stp_state_set(struct prestera_port *port,
+ struct net_device *dev,
+ u8 state)
{
struct prestera_bridge_port *br_port;
struct prestera_bridge_vlan *br_vlan;
int err;
u16 vid;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -713,34 +701,31 @@ err_port_stp_set:
static int prestera_port_obj_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = presterar_port_attr_stp_state_set(port, trans,
- attr->orig_dev,
- attr->u.stp_state);
+ err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- if (attr->u.brport_flags &
+ if (attr->u.brport_flags.mask &
~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = prestera_port_attr_br_flags_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = prestera_port_attr_br_ageing_set(port, trans,
+ err = prestera_port_attr_br_ageing_set(port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = prestera_port_attr_br_vlan_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
default:
@@ -1020,7 +1005,6 @@ prestera_bridge_port_vlan_del(struct prestera_port *port,
static int prestera_port_vlans_add(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1029,14 +1013,10 @@ static int prestera_port_vlans_add(struct prestera_port *port,
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- u16 vid;
if (netif_is_bridge_master(dev))
return 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1045,22 +1025,13 @@ static int prestera_port_vlans_add(struct prestera_port *port,
if (!bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = prestera_bridge_port_vlan_add(port, br_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return prestera_bridge_port_vlan_add(port, br_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static int prestera_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
@@ -1069,7 +1040,7 @@ static int prestera_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- return prestera_port_vlans_add(port, vlan, trans, extack);
+ return prestera_port_vlans_add(port, vlan, extack);
default:
return -EOPNOTSUPP;
}
@@ -1081,7 +1052,6 @@ static int prestera_port_vlans_del(struct prestera_port *port,
struct net_device *dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- u16 vid;
if (netif_is_bridge_master(dev))
return -EOPNOTSUPP;
@@ -1093,8 +1063,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
if (!br_port->bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- prestera_bridge_port_vlan_del(port, br_port, vid);
+ prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ebe1406c6e64..dbec8e187a68 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4806,12 +4806,11 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
if (!is_valid_ether_addr(dev->dev_addr)) {
struct sockaddr sa = { AF_UNSPEC };
- netdev_warn(dev,
- "Invalid MAC address, defaulting to random\n");
+ dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n");
eth_hw_addr_random(dev);
memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
if (sky2_set_mac_address(dev, &sa))
- netdev_warn(dev, "Failed to set MAC address.\n");
+ dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n");
}
return dev;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6d2d60675ffd..01d3ee4b5829 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -353,7 +353,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
/* Setup gmac */
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
/* Only update control register when needed! */
@@ -759,8 +759,8 @@ static void mtk_get_stats64(struct net_device *dev,
static inline int mtk_max_frag_size(int mtu)
{
/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
- mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
+ mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -771,7 +771,7 @@ static inline int mtk_max_buf_size(int frag_size)
int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
return buf_size;
}
@@ -2499,6 +2499,35 @@ static void mtk_uninit(struct net_device *dev)
mtk_rx_irq_disable(eth, ~0);
}
+static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int length = new_mtu + MTK_RX_ETH_HLEN;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (length <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (length <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (length <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2795,6 +2824,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
@@ -2896,7 +2926,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
- eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ else
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 454cfcd465fd..fd3cec8f06ba 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,12 +17,13 @@
#include <linux/phylink.h>
#define MTK_QDMA_PAGE_SIZE 2048
-#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_DMA_SIZE 256
#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -320,7 +321,12 @@
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
-#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
+#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
+#define MAC_MCR_MAX_RX_1518 0x0
+#define MAC_MCR_MAX_RX_1536 0x1
+#define MAC_MCR_MAX_RX_1552 0x2
+#define MAC_MCR_MAX_RX_2048 0x3
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32aad4d32b88..51b9700fce83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2839,8 +2839,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
@@ -2873,8 +2871,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1c9118a66c9..e35e4d7ef4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -682,8 +682,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
- xdp.rxq = &ring->xdp_rxq;
- xdp.frame_sz = priv->frag_info[0].frag_stride;
+ xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -777,10 +776,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = va - frags[0].page_offset;
- xdp.data = va;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + length;
+ xdp_prepare_buff(&xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 394f43add85c..a99e71bc7b3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
if (!fs_rule->mirr_mbox) {
mlx4_err(dev, "rule mirroring mailbox is null\n");
+ mlx4_free_cmd_mailbox(dev, mailbox);
return -EINVAL;
}
memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6e4d7bb7fea2..9d623e38d783 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -149,14 +149,14 @@ config MLX5_IPSEC
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload accelaration"
+ bool "IPSec XFRM cryptography-offload acceleration"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
default n
help
- Build support for IPsec cryptography-offload accelaration in the NIC.
+ Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
@@ -166,7 +166,6 @@ config MLX5_FPGA_TLS
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
- depends on XPS
select MLX5_EN_TLS
default n
help
@@ -181,7 +180,6 @@ config MLX5_TLS
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- depends on XPS
select MLX5_ACCEL
select MLX5_EN_TLS
default n
@@ -192,7 +190,7 @@ config MLX5_TLS
config MLX5_EN_TLS
bool
help
- Build support for TLS cryptography-offload accelaration in the NIC.
+ Build support for TLS cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
@@ -203,3 +201,22 @@ config MLX5_SW_STEERING
default y
help
Build support for software-managed steering in the NIC.
+
+config MLX5_SF
+ bool "Mellanox Technologies subfunction device support using auxiliary device"
+ depends on MLX5_CORE && MLX5_CORE_EN
+ default n
+ help
+ Build support for subfuction device in the NIC. A Mellanox subfunction
+ device can support RDMA, netdevice and vdpa device.
+ It is similar to a SRIOV VF but it doesn't require SRIOV support.
+
+config MLX5_SF_MANAGER
+ bool
+ depends on MLX5_SF && MLX5_ESWITCH
+ default y
+ help
+ Build support for subfuction port in the NIC. A Mellanox subfunction
+ port is managed through devlink. A subfunction supports RDMA, netdevice
+ and vdpa device. It is similar to a SRIOV VF but it doesn't require
+ SRIOV support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 77961643d5a9..8cb2625472c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
+ fw_reset.o qos.o
#
# Netdev basic
@@ -25,7 +26,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
+ en/qos.o en/trap.o
#
# Netdev extra
@@ -38,6 +40,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o lib/fs_chains.o en/tc_tun.o \
+ esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
@@ -83,5 +86,15 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
+ steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
+#
+# SF device
+#
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+
+#
+# SF manager
+#
+mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 50c7b9ee80c3..e8cecd50558d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -333,6 +333,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_DEALLOC_SF:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -464,6 +465,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_QUERY_VHCA_STATE:
+ case MLX5_CMD_OP_MODIFY_VHCA_STATE:
+ case MLX5_CMD_OP_ALLOC_SF:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -657,6 +661,10 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(ALLOC_SF);
+ MLX5_COMMAND_STR_CASE(DEALLOC_SF);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3261d0dc1104..d7d8a68ef23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,8 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -127,6 +129,22 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool sf_dev_allocated;
+
+ sf_dev_allocated = mlx5_sf_dev_allocated(dev);
+ if (sf_dev_allocated) {
+ /* Reload results in deleting SF device which further results in
+ * unregistering devlink instance while holding devlink_mutext.
+ * Hence, do not support reload.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlx5_lag_is_active(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+ return -EOPNOTSUPP;
+ }
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
@@ -168,6 +186,91 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
return 0;
}
+static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.id == trap_id)
+ return dl_trap;
+
+ return NULL;
+}
+
+static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
+ if (!dl_trap)
+ return -ENOMEM;
+
+ dl_trap->trap.id = trap->id;
+ dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
+ dl_trap->item = trap_ctx;
+
+ if (mlx5_find_trap_by_id(dev, trap->id)) {
+ kfree(dl_trap);
+ mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
+ return -EEXIST;
+ }
+
+ list_add_tail(&dl_trap->list, &dev->priv.traps);
+ return 0;
+}
+
+static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
+ return;
+ }
+ list_del(&dl_trap->list);
+ kfree(dl_trap);
+}
+
+static int mlx5_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum devlink_trap_action action_orig;
+ struct mlx5_devlink_trap *dl_trap;
+ int err = 0;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (action == dl_trap->trap.action)
+ goto out;
+
+ action_orig = dl_trap->trap.action;
+ dl_trap->trap.action = action;
+ err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
+ &dl_trap->trap);
+ if (err)
+ dl_trap->trap.action = action_orig;
+out:
+ return err;
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -179,6 +282,12 @@ static const struct devlink_ops mlx5_devlink_ops = {
.port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
.port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ .port_new = mlx5_devlink_sf_port_new,
+ .port_del = mlx5_devlink_sf_port_del,
+ .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
+#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -186,8 +295,59 @@ static const struct devlink_ops mlx5_devlink_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
+ .trap_init = mlx5_devlink_trap_init,
+ .trap_fini = mlx5_devlink_trap_fini,
+ .trap_action_set = mlx5_devlink_trap_action_set,
};
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
+ return;
+ }
+
+ if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
+ mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
+ dl_trap->trap.action);
+ return;
+ }
+ devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
+}
+
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devlink_trap *dl_trap;
+ int count = 0;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
+ count++;
+
+ return count;
+}
+
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
+ trap_id);
+ return -EINVAL;
+ }
+
+ *action = dl_trap->trap.action;
+ return 0;
+}
+
struct devlink *mlx5_devlink_alloc(void)
{
return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
@@ -273,6 +433,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -358,6 +522,49 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
#endif
}
+#define MLX5_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
+
+static const struct devlink_trap mlx5_traps_arr[] = {
+ MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
+};
+
+static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+};
+
+static int mlx5_devlink_traps_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *core_dev = devlink_priv(devlink);
+ int err;
+
+ err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ if (err)
+ return err;
+
+ err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
+ if (err)
+ goto err_trap_group;
+ return 0;
+
+err_trap_group:
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ return err;
+}
+
+static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+{
+ devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+}
+
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
int err;
@@ -372,8 +579,16 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+
+ err = mlx5_devlink_traps_register(devlink);
+ if (err)
+ goto traps_reg_err;
+
return 0;
+traps_reg_err:
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
params_reg_err:
devlink_unregister(devlink);
return err;
@@ -381,6 +596,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ mlx5_devlink_traps_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index f0de327a59be..eff107dad922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -12,6 +12,24 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
+struct mlx5_trap_ctx {
+ int id;
+ int action;
+};
+
+struct mlx5_devlink_trap {
+ struct mlx5_trap_ctx trap;
+ void *item;
+ struct list_head list;
+};
+
+struct mlx5_core_dev;
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port);
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action);
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
index 1177860a2ee4..f15718db5d0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -15,7 +15,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha,
bool neigh_connected),
TP_ARGS(nhe, ha, neigh_connected),
- TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name)
__array(u8, ha, ETH_ALEN)
__array(u8, v4, 4)
__array(u8, v6, 16)
@@ -25,7 +25,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, mn->dev->name);
+ __assign_str(devname, nhe->neigh_dev->name);
__entry->neigh_connected = neigh_connected;
memcpy(__entry->ha, ha, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
index d4e6cfaaade3..ac52ef37f38a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -77,7 +77,7 @@ TRACE_EVENT(mlx5e_stats_flower,
TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used),
TP_ARGS(nhe, neigh_used),
- TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name)
__array(u8, v4, 4)
__array(u8, v6, 16)
__field(bool, neigh_used)
@@ -86,7 +86,7 @@ TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, mn->dev->name);
+ __assign_str(devname, nhe->neigh_dev->name);
__entry->neigh_used = neigh_used;
p32 = (__be32 *)__entry->v4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 055baf3b6cb1..7435fe6829b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -55,7 +55,9 @@
#include "en_stats.h"
#include "en/dcbnl.h"
#include "en/fs.h"
+#include "en/qos.h"
#include "lib/hv_vhca.h"
+#include "lib/clock.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -136,10 +138,10 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
-#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x8
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -161,6 +163,9 @@ do { \
##__VA_ARGS__); \
} while (0)
+#define mlx5e_state_dereference(priv, p) \
+ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
+
enum mlx5e_rq_group {
MLX5E_RQ_GROUP_REGULAR,
MLX5E_RQ_GROUP_XSK,
@@ -389,6 +394,7 @@ struct mlx5e_txqsq {
u32 rate_limit;
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
+ cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -560,6 +566,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT,
@@ -650,6 +657,7 @@ struct mlx5e_rq {
/* XDP read-mostly */
struct xdp_rxq_info xdp_rxq;
+ cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
enum mlx5e_channel_state {
@@ -663,11 +671,13 @@ struct mlx5e_channel {
struct mlx5e_xdpsq rq_xdpsq;
struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
+ struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
__be32 mkey_be;
+ u16 qos_sqs_size;
u8 num_tc;
u8 lag_port;
@@ -756,6 +766,8 @@ struct mlx5e_modify_sq_param {
int next_state;
int rl_update;
int rl_index;
+ bool qos_update;
+ u16 qos_queue_group_id;
};
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
@@ -788,10 +800,22 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5e_sq_stats **qos_sq_stats;
+ u16 max_qos_sqs;
+ u16 maj_id;
+ u16 defcls;
+};
+
+struct mlx5e_trap;
+
struct mlx5e_priv {
/* priv data path fields - start */
/* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
+ MLX5E_QOS_MAX_LEAF_NODES];
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -826,8 +850,10 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
+ struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats trap_stats;
struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch;
u8 max_opened_tc;
@@ -836,6 +862,7 @@ struct mlx5e_priv {
u16 q_counter;
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ struct notifier_block blocking_events_nb;
int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
@@ -859,6 +886,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
+ struct mlx5e_htb htb;
};
struct mlx5e_rx_handlers {
@@ -870,8 +898,7 @@ extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile, void *ppriv);
+ struct net_device *netdev);
void (*cleanup)(struct mlx5e_priv *priv);
int (*init_rx)(struct mlx5e_priv *priv);
void (*cleanup_rx)(struct mlx5e_priv *priv);
@@ -942,6 +969,8 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -986,6 +1015,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context);
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
@@ -1010,6 +1040,9 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
@@ -1020,8 +1053,10 @@ struct mlx5e_create_sq_param;
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn);
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
@@ -1047,6 +1082,8 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
+void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
@@ -1120,24 +1157,25 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
-int mlx5e_netdev_init(struct net_device *netdev,
- struct mlx5e_priv *priv,
- struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
-void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
-struct net_device*
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- int nch, void *ppriv);
+static inline unsigned int
+mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
+{
+ return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+}
+
+int mlx5e_priv_init(struct mlx5e_priv *priv,
+ struct net_device *netdev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
+struct net_device *
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
-void mlx5e_build_nic_params(struct mlx5e_priv *priv,
- struct mlx5e_xsk *xsk,
- struct mlx5e_rss_params *rss_params,
- struct mlx5e_params *params,
- u16 mtu);
+void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 5749557749b0..a16297e7e2ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -44,6 +44,11 @@ struct mlx5e_l2_rule {
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+struct mlx5e_promisc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rule;
+};
+
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
@@ -53,6 +58,7 @@ struct mlx5e_vlan_table {
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
+ struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
@@ -62,7 +68,7 @@ struct mlx5e_l2_table {
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
+ struct mlx5_flow_handle *trap_rule;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
@@ -126,7 +132,8 @@ struct mlx5e_ttc_table {
/* NIC prio FTS */
enum {
- MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_PROMISC_FT_LEVEL,
+ MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
@@ -241,6 +248,7 @@ struct mlx5e_flow_steering {
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
+ struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
@@ -288,6 +296,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 718f8c0a4f6b..84e501e057b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
- return err;
+ goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 43271a3856ca..36381a2ed5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -179,7 +179,7 @@ int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
if (stop_room >= sq_size) {
- netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n",
+ netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n",
stop_room, sq_size);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 807147d97a0f..ea2cfb04b31a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -118,6 +118,8 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param);
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param);
void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 2a2bac30daaa..d57b6f06382f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -3,7 +3,6 @@
#include "en/ptp.h"
#include "en/txrx.h"
-#include "lib/clock.h"
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
@@ -70,6 +69,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
int budget)
{
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
@@ -77,7 +77,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out;
}
- hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
+ hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
@@ -183,6 +183,9 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
+ sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
node = dev_to_node(mlx5_core_dma_dev(mdev));
@@ -261,7 +264,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
- err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
goto err_free_txqsq;
@@ -428,16 +431,13 @@ static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
if (err)
return err;
- napi_enable(&c->napi);
-
err = mlx5e_ptp_open_txqsqs(c, cparams);
if (err)
- goto disable_napi;
+ goto close_cqs;
return 0;
-disable_napi:
- napi_disable(&c->napi);
+close_cqs:
mlx5e_ptp_close_cqs(c);
return err;
@@ -446,7 +446,6 @@ disable_napi:
static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
{
mlx5e_ptp_close_txqsqs(c);
- napi_disable(&c->napi);
mlx5e_ptp_close_cqs(c);
}
@@ -515,6 +514,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
{
int tc;
+ napi_enable(&c->napi);
+
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
}
@@ -525,4 +526,6 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+
+ napi_disable(&c->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
new file mode 100644
index 000000000000..12d7ad061237
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en.h"
+#include "params.h"
+#include "../qos.h"
+
+#define BYTES_IN_MBIT 125000
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
+}
+
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
+{
+ int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
+
+ return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
+}
+
+/* Software representation of the QoS tree (internal to this file) */
+
+static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
+{
+ int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(priv->htb.qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+static struct mlx5e_qos_node *
+mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, priv->htb.qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, priv->htb.qos_used_qids);
+ mlx5e_update_tx_netdev_queues(priv);
+ }
+ kfree_rcu(node, rcu);
+}
+
+/* TX datapath API */
+
+static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+{
+ /* These channel params are safe to access from the datapath, because:
+ * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * and the number of queues can't change while HTB offload is active.
+ * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * mlx5e_select_queue to finish while holding priv->state_lock,
+ * preventing other code from changing the number of queues.
+ */
+ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
+
+ return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
+}
+
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_sw_node_find_rcu(priv, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_channel *c;
+ int ix;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ return mlx5e_state_dereference(priv, qos_sqs[qid]);
+}
+
+/* SQ lifecycle */
+
+static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ struct mlx5e_qos_node *node)
+{
+ struct mlx5e_create_cq_param ccp = {};
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_sq_param param_sq;
+ struct mlx5e_cq_param param_cq;
+ int txq_ix, ix, qid, err = 0;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+
+ params = &chs->params;
+
+ txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+
+ WARN_ON(node->qid > priv->htb.max_qos_sqs);
+ if (node->qid == priv->htb.max_qos_sqs) {
+ struct mlx5e_sq_stats *stats, **stats_list = NULL;
+
+ if (priv->htb.max_qos_sqs == 0) {
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list),
+ GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+ }
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ kvfree(stats_list);
+ return -ENOMEM;
+ }
+ if (stats_list)
+ WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
+ /* Order max_qos_sqs increment after writing the array pointer.
+ * Pairs with smp_load_acquire in en_stats.c.
+ */
+ smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ }
+
+ ix = node->qid % params->num_channels;
+ qid = node->qid / params->num_channels;
+ c = chs->c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+
+ if (!sq)
+ return -ENOMEM;
+
+ mlx5e_build_create_cq_param(&ccp, c);
+
+ memset(&param_sq, 0, sizeof(param_sq));
+ memset(&param_cq, 0, sizeof(param_cq));
+ mlx5e_build_sq_param(priv, params, &param_sq);
+ mlx5e_build_tx_cq_param(priv, params, &param_cq);
+ err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ if (err)
+ goto err_free_sq;
+ err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
+ &param_sq, sq, 0, node->hw_id, node->qid);
+ if (err)
+ goto err_close_cq;
+
+ rcu_assign_pointer(qos_sqs[qid], sq);
+
+ return 0;
+
+err_close_cq:
+ mlx5e_close_cq(&sq->cq);
+err_free_sq:
+ kfree(sq);
+ return err;
+}
+
+static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, node->qid);
+
+ WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
+
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ mlx5e_activate_txqsq(sq);
+}
+
+static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, qid);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+}
+
+static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int ix;
+
+ params = &priv->channels.params;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+}
+
+void mlx5e_qos_close_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
+ if (!qos_sqs)
+ return;
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+ }
+
+ kvfree(qos_sqs);
+}
+
+static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_close_queues(chs->c[i]);
+}
+
+static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ u16 qos_sqs_size;
+ int i;
+
+ qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
+
+ for (i = 0; i < chs->num; i++) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
+ if (!sqs)
+ goto err_free;
+
+ WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
+ smp_wmb(); /* Pairs with mlx5e_napi_poll. */
+ rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
+ }
+
+ return 0;
+
+err_free:
+ while (--i >= 0) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
+ lockdep_is_held(&priv->state_lock));
+
+ synchronize_rcu(); /* Sync with NAPI. */
+ kvfree(sqs);
+ }
+ return -ENOMEM;
+}
+
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ if (!priv->htb.maj_id)
+ return 0;
+
+ err = mlx5e_qos_alloc_queues(priv, chs);
+ if (err)
+ return err;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = mlx5e_open_qos_sq(priv, chs, node);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ mlx5e_activate_qos_sq(priv, node);
+ }
+}
+
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_params *params = &c->priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
+ if (!qos_sqs)
+ return;
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ u16 qid = params->num_channels * i + c->ix;
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ }
+}
+
+static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_deactivate_queues(chs->c[i]);
+}
+
+/* HTB API */
+
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ return err;
+ }
+
+ root = mlx5e_sw_node_create_root(priv);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ WRITE_ONCE(priv->htb.defcls, htb_defcls);
+ /* Order maj_id after defcls - pairs with
+ * mlx5e_select_queue/mlx5e_select_htb_queues.
+ */
+ smp_store_release(&priv->htb.maj_id, htb_maj_id);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_sw_node_delete(priv, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+ return err;
+}
+
+int mlx5e_htb_root_del(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+
+ WRITE_ONCE(priv->htb.maj_id, 0);
+ synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
+
+ root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
+ if (err)
+ qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_sw_node_delete(priv, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
+{
+ *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+
+ qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ int qid;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_find_unused_qos_qid(priv);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_sw_node_find(priv, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_sw_node_delete(priv, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_sw_node_delete(priv, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+{
+ qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
+ netdev_tx_reset_queue(txq);
+ netif_tx_start_queue(txq);
+}
+
+static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
+
+ *old_qid = *new_qid = 0;
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ mlx5e_sw_node_delete(priv, node);
+
+ moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb.qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, moved_qid);
+
+ __set_bit(qid, priv->htb.qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
+ *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
+ return 0;
+}
+
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, qid);
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_sw_node_delete(priv, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_qos_update_children(priv, node, extack);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
new file mode 100644
index 000000000000..5af7991fcd19
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_QOS_H
+#define __MLX5E_EN_QOS_H
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_priv;
+struct mlx5e_channels;
+struct mlx5e_channel;
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
+
+/* TX datapath API */
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
+struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
+
+/* SQ lifecycle */
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+
+/* HTB API */
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_root_del(struct mlx5e_priv *priv);
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index 58e27038c947..be0ee03de721 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,10 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ bool neigh_connected, same_dev;
struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv;
- bool neigh_connected;
u8 nud_state, dead;
rtnl_lock();
@@ -146,12 +146,16 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
memcpy(ha, n->ha, ETH_ALEN);
nud_state = n->nud_state;
dead = n->dead;
+ same_dev = READ_ONCE(nhe->neigh_dev) == n->dev;
read_unlock_bh(&n->lock);
neigh_connected = (nud_state & NUD_VALID) && !dead;
trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+ if (!same_dev)
+ goto out;
+
list_for_each_entry(e, &nhe->encap_list, encap_list) {
if (!mlx5e_encap_take(e))
continue;
@@ -160,6 +164,7 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
mlx5e_encap_put(priv, e);
}
+out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
}
@@ -175,7 +180,6 @@ static struct neigh_update_work *mlx5e_alloc_neigh_update_work(struct mlx5e_priv
if (WARN_ON(!update_work))
return NULL;
- m_neigh.dev = n->dev;
m_neigh.family = n->ops->family;
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
@@ -246,7 +250,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
rcu_read_lock();
list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
neigh_list) {
- if (p->dev == nhe->m_neigh.dev) {
+ if (p->dev == READ_ONCE(nhe->neigh_dev)) {
found = true;
break;
}
@@ -279,7 +283,7 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
if (err)
- return err;
+ goto out_err;
INIT_LIST_HEAD(&neigh_update->neigh_list);
mutex_init(&neigh_update->encap_lock);
@@ -287,14 +291,19 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_neigh_stats_work);
mlx5e_rep_neigh_update_init_interval(rpriv);
- rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
- err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ neigh_update->netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&neigh_update->netevent_nb);
if (err)
- goto out_err;
+ goto out_notifier;
return 0;
-out_err:
+out_notifier:
+ neigh_update->netevent_nb.notifier_call = NULL;
rhashtable_destroy(&neigh_update->neigh_ht);
+out_err:
+ netdev_warn(rpriv->netdev,
+ "Failed to initialize neighbours handling for vport %d\n",
+ rpriv->rep->vport);
return err;
}
@@ -303,6 +312,9 @@ void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ if (!rpriv->neigh_update.netevent_nb.notifier_call)
+ return;
+
unregister_netevent_notifier(&neigh_update->netevent_nb);
flush_workqueue(priv->wq); /* flush neigh update works */
@@ -361,7 +373,8 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
}
int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev,
struct mlx5e_neigh_hash_entry **nhe)
{
int err;
@@ -371,10 +384,11 @@ int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
return -ENOMEM;
(*nhe)->priv = priv;
- memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ memcpy(&(*nhe)->m_neigh, m_neigh, sizeof(*m_neigh));
spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1);
+ WRITE_ONCE((*nhe)->neigh_dev, neigh_dev);
err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
index 32b239189c95..6fe0ab970943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
@@ -16,7 +16,8 @@ struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev,
struct mlx5e_neigh_hash_entry **nhe);
void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..065126370acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -26,7 +26,9 @@ struct mlx5e_rep_indr_block_priv {
};
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
@@ -39,9 +41,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
return err;
mutex_lock(&rpriv->neigh_update.encap_lock);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
if (!nhe) {
- err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
if (err) {
mutex_unlock(&rpriv->neigh_update.encap_lock);
mlx5_tun_entropy_refcount_dec(tun_entropy,
@@ -122,7 +124,7 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
}
unlock:
mutex_unlock(&esw->offloads.encap_tbl_lock);
- mlx5e_put_encap_flow_list(priv, &flow_list);
+ mlx5e_put_flow_list(priv, &flow_list);
}
static int
@@ -626,6 +628,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
@@ -646,7 +653,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = reg_c1 & ZONE_RESTORE_MAX;
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
@@ -655,7 +662,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
return false;
}
- tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG);
+ tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index fdf9702c2d7d..d0661578467b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -27,7 +27,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
unsigned char ha[ETH_ALEN]);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev);
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..f3f6eb081948 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -12,6 +12,7 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>
#include <linux/workqueue.h>
+#include <linux/refcount.h>
#include <linux/xarray.h>
#include "lib/fs_chains.h"
@@ -27,6 +28,7 @@
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
+#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -51,11 +53,11 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
- struct mutex shared_counter_lock;
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_fs_chains *chains;
+ spinlock_t ht_lock; /* protects ft entries */
};
struct mlx5_ct_flow {
@@ -118,21 +120,32 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
+};
+
+enum {
+ MLX5_CT_ENTRY_FLAG_VALID,
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
struct mlx5_ct_tuple tuple_nat;
struct mlx5_ct_zone_rule zone_rules[2];
+
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct work_struct work;
+
+ refcount_t refcnt;
+ unsigned long flags;
};
static const struct rhashtable_params cts_ht_params = {
@@ -166,6 +179,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+ return !!(entry->tuple_nat_node.next);
+}
+
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -394,13 +413,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -633,6 +653,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
}
ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
+ ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
meta->ct_metadata.mark,
@@ -699,13 +720,13 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
- mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
- entry->tuple.zone & MLX5_CT_ZONE_MASK,
- MLX5_CT_ZONE_MASK);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
@@ -732,16 +753,117 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static bool
+mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
+}
+
+static struct mlx5_ct_entry *
+mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
+{
+ struct mlx5_ct_entry *entry;
+
+ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
+ tuples_ht_params);
+ if (entry && mlx5_tc_ct_entry_valid(entry) &&
+ refcount_inc_not_zero(&entry->refcnt)) {
+ return entry;
+ } else if (!entry) {
+ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+ tuple, tuples_nat_ht_params);
+ if (entry && mlx5_tc_ct_entry_valid(entry) &&
+ refcount_inc_not_zero(&entry->refcnt))
+ return entry;
+ }
+
+ return entry ? ERR_PTR(-EINVAL) : NULL;
+}
+
+static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
+{
+ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
+
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
+}
+
+static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
+{
+ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
+
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+
+ spin_lock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_counter_put(ct_priv, entry);
+ kfree(entry);
+}
+
+static void
+mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
+{
+ if (!refcount_dec_and_test(&entry->refcnt))
+ return;
+
+ mlx5_tc_ct_entry_del(entry);
+}
+
+static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
+{
+ struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
+
+ mlx5_tc_ct_entry_del(entry);
+}
+
+static void
+__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
+{
+ struct mlx5e_priv *priv;
+
+ if (!refcount_dec_and_test(&entry->refcnt))
+ return;
+
+ priv = netdev_priv(entry->ct_priv->netdev);
+ INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
+ queue_work(priv->wq, &entry->work);
+}
+
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
- int ret;
/* get the reversed tuple */
tmp_port = rev_tuple.port.src;
@@ -763,29 +885,32 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
}
/* Use the same counter as the reverse direction */
- mutex_lock(&ct_priv->shared_counter_lock);
- rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
- tuples_ht_params);
- if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
- mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
- }
+ spin_lock_bh(&ct_priv->ht_lock);
+ rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
+
+ if (IS_ERR(rev_entry)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ goto create_counter;
}
- mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
+ if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+ ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+ shared_counter = rev_entry->counter;
+ spin_unlock_bh(&ct_priv->ht_lock);
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
- return ERR_PTR(ret);
+ mlx5_tc_ct_entry_put(rev_entry);
+ return shared_counter;
}
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+create_counter:
+
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter))
+ return shared_counter;
+
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +923,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +948,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -839,10 +967,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (!meta_action)
return -EOPNOTSUPP;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (entry)
- return 0;
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&ct_priv->ht_lock);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -851,6 +983,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->tuple.zone = ft->zone;
entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie;
+ refcount_set(&entry->refcnt, 2);
+ entry->ct_priv = ct_priv;
err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
if (err)
@@ -861,84 +995,85 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_set;
- err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ spin_lock_bh(&ct_priv->ht_lock);
+
+ err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
+ cts_ht_params);
+ if (err)
+ goto err_entries;
+
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
if (err)
goto err_tuple;
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
- err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
}
+ spin_unlock_bh(&ct_priv->ht_lock);
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
ft->zone_restore_id);
if (err)
goto err_rules;
- err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
- cts_ht_params);
- if (err)
- goto err_insert;
+ set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
+ mlx5_tc_ct_entry_put(entry); /* this function reference */
return 0;
-err_insert:
- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
+ spin_lock_bh(&ct_priv->ht_lock);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
err_tuple:
+ rhashtable_remove_fast(&ft->ct_entries_ht,
+ &entry->node,
+ cts_ht_params);
+err_entries:
+ spin_unlock_bh(&ct_priv->ht_lock);
err_set:
kfree(entry);
- netdev_warn(ct_priv->netdev,
- "Failed to offload ct entry, err: %d\n", err);
+ if (err != -EEXIST)
+ netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
return err;
}
-static void
-mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_ct_entry *entry)
-{
- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
- mutex_lock(&ct_priv->shared_counter_lock);
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
- mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
-
-}
-
static int
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow)
{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
unsigned long cookie = flow->cookie;
struct mlx5_ct_entry *entry;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (!entry)
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (!entry) {
+ spin_unlock_bh(&ct_priv->ht_lock);
return -ENOENT;
+ }
- mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
- WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params));
- kfree(entry);
+ if (!mlx5_tc_ct_entry_valid(entry)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ return -EINVAL;
+ }
+
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_entry_put(entry);
return 0;
}
@@ -947,19 +1082,30 @@ static int
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
struct flow_cls_offload *f)
{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
unsigned long cookie = f->cookie;
struct mlx5_ct_entry *entry;
u64 lastuse, packets, bytes;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (!entry)
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (!entry) {
+ spin_unlock_bh(&ct_priv->ht_lock);
return -ENOENT;
+ }
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ return -EINVAL;
+ }
+
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5_tc_ct_entry_put(entry);
return 0;
}
@@ -1061,8 +1207,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
- bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask;
@@ -1088,9 +1234,10 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
- TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ TCA_FLOWER_KEY_CT_FLAGS_NEW |
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
NL_SET_ERR_MSG_MOD(extack,
- "only ct_state trk, est and new are supported for offload");
+ "only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
}
@@ -1099,13 +1246,17 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
if (new) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1220,9 +1371,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
pre_ct->flow_rule = rule;
/* add miss rule */
- memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1451,11 +1601,9 @@ err_mapping:
static void
mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
{
- struct mlx5_tc_ct_priv *ct_priv = arg;
struct mlx5_ct_entry *entry = ptr;
- mlx5_tc_ct_del_ft_entry(ct_priv, entry);
- kfree(entry);
+ mlx5_tc_ct_entry_put(entry);
}
static void
@@ -1733,7 +1881,6 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
goto err_set_registers;
}
- dealloc_mod_hdr_actions(mod_acts);
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
@@ -1933,6 +2080,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_mapping_labels;
}
+ spin_lock_init(&ct_priv->ht_lock);
ct_priv->ns_type = ns_type;
ct_priv->chains = chains;
ct_priv->netdev = priv->netdev;
@@ -1967,7 +2115,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
idr_init(&ct_priv->fte_ids);
mutex_init(&ct_priv->control_lock);
- mutex_init(&ct_priv->shared_counter_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
@@ -2010,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
- mutex_destroy(&ct_priv->shared_counter_lock);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
}
@@ -2032,14 +2178,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
return false;
- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
- tuples_ht_params);
- if (!entry)
- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
- &tuple, tuples_nat_ht_params);
- if (!entry)
+ spin_lock(&ct_priv->ht_lock);
+
+ entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
+ if (!entry) {
+ spin_unlock(&ct_priv->ht_lock);
return false;
+ }
+
+ if (IS_ERR(entry)) {
+ spin_unlock(&ct_priv->ht_lock);
+ return false;
+ }
+ spin_unlock(&ct_priv->ht_lock);
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+ __mlx5_tc_ct_entry_put(entry);
+
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 6503b614337c..69e618d17071 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -73,7 +73,7 @@ struct mlx5_ct_attr {
#define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\
- .mlen = 1,\
+ .mlen = (ESW_ZONE_ID_BITS / 8),\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_1) + 3,\
}
@@ -81,14 +81,12 @@ struct mlx5_ct_attr {
#define nic_zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
.moffset = 2,\
- .mlen = 1,\
+ .mlen = (ESW_ZONE_ID_BITS / 8),\
}
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
-#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
-#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
new file mode 100644
index 000000000000..c223591ffc22
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_PRIV_H__
+#define __MLX5_EN_TC_PRIV_H__
+
+#include "en_tc.h"
+
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
+
+#define MLX5E_TC_MAX_SPLITS 1
+
+enum {
+ MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
+ MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
+ MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
+ MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
+ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
+ MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
+ MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
+ MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
+};
+
+struct mlx5e_tc_flow_parse_attr {
+ const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct net_device *filter_dev;
+ struct mlx5_flow_spec spec;
+ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
+ int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct ethhdr eth;
+};
+
+/* Helper struct for accessing a struct containing list_head array.
+ * Containing struct
+ * |- Helper array
+ * [0] Helper item 0
+ * |- list_head item 0
+ * |- index (0)
+ * [1] Helper item 1
+ * |- list_head item 1
+ * |- index (1)
+ * To access the containing struct from one of the list_head items:
+ * 1. Get the helper item from the list_head item using
+ * helper item =
+ * container_of(list_head item, helper struct type, list_head field)
+ * 2. Get the contining struct from the helper item and its index in the array:
+ * containing struct =
+ * container_of(helper item, containing struct type, helper field[index])
+ */
+struct encap_flow_item {
+ struct mlx5e_encap_entry *e; /* attached encap instance */
+ struct list_head list;
+ int index;
+};
+
+struct encap_route_flow_item {
+ struct mlx5e_route_entry *r; /* attached route instance */
+ int index;
+};
+
+struct mlx5e_tc_flow {
+ struct rhash_head node;
+ struct mlx5e_priv *priv;
+ u64 cookie;
+ unsigned long flags;
+ struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
+
+ /* flows sharing the same reformat object - currently mpls decap */
+ struct list_head l3_to_l2_reformat;
+ struct mlx5e_decap_entry *decap_reformat;
+
+ /* flows sharing same route entry */
+ struct list_head decap_routes;
+ struct mlx5e_route_entry *decap_route;
+ struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
+
+ /* Flow can be associated with multiple encap IDs.
+ * The number of encaps is bounded by the number of supported
+ * destinations.
+ */
+ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
+ struct list_head hairpin; /* flows sharing the same hairpin */
+ struct list_head peer; /* flows with peer flow */
+ struct list_head unready; /* flows not ready to be offloaded (e.g
+ * due to missing route)
+ */
+ struct net_device *orig_dev; /* netdev adding flow first */
+ int tmp_entry_index;
+ struct list_head tmp_list; /* temporary flow list used by neigh update */
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+ struct completion init_done;
+ int tunnel_id; /* the mapped tunnel id of this flow */
+ struct mlx5_flow_attr *attr;
+};
+
+u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
+
+struct mlx5_flow_handle *
+mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
+
+static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before setting bit. */
+ smp_mb__before_atomic();
+ set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+ clear_bit(flag, &flow->flags);
+}
+
+#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ bool ret = test_bit(flag, &flow->flags);
+
+ /* Read fields of flow structure only after checking flags. */
+ smp_mb__after_atomic();
+ return ret;
+}
+
+#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow);
+struct mlx5_flow_handle *
+mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec);
+void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
+void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
+
+struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
+
+#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 90930e54b6f2..f8075a604605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -6,10 +6,32 @@
#include <net/geneve.h>
#include <net/bareudp.h>
#include "en/tc_tun.h"
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+struct mlx5e_tc_tun_route_attr {
+ struct net_device *out_dev;
+ struct net_device *route_dev;
+ union {
+ struct flowi4 fl4;
+ struct flowi6 fl6;
+ } fl;
+ struct neighbour *n;
+ u8 ttl;
+};
+
+#define TC_TUN_ROUTE_ATTR_INIT(name) struct mlx5e_tc_tun_route_attr name = {}
+
+static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr)
+{
+ if (attr->n)
+ neigh_release(attr->n);
+ if (attr->route_dev)
+ dev_put(attr->route_dev);
+}
+
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
{
if (netif_is_vxlan(tunnel_dev))
@@ -79,12 +101,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+ struct mlx5e_tc_tun_route_attr *attr)
{
+ struct net_device *route_dev;
+ struct net_device *out_dev;
struct neighbour *n;
struct rtable *rt;
@@ -97,46 +117,50 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = mdev->priv.eswitch;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- fl4->flowi4_oif = uplink_dev->ifindex;
+ attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
}
- rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+ rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
- ip_rt_put(rt);
- return -ENETUNREACH;
+ ret = -ENETUNREACH;
+ goto err_rt_release;
}
#else
return -EOPNOTSUPP;
#endif
- ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
- if (ret < 0) {
- ip_rt_put(rt);
- return ret;
- }
- dev_hold(*route_dev);
+ ret = get_route_and_out_devs(priv, rt->dst.dev, &route_dev, &out_dev);
+ if (ret < 0)
+ goto err_rt_release;
+ dev_hold(route_dev);
- if (!(*out_ttl))
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
- n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
- ip_rt_put(rt);
+ if (!attr->ttl)
+ attr->ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &attr->fl.fl4.daddr);
if (!n) {
- dev_put(*route_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_dev_release;
}
- *out_n = n;
+ ip_rt_put(rt);
+ attr->route_dev = route_dev;
+ attr->out_dev = out_dev;
+ attr->n = n;
return 0;
+
+err_dev_release:
+ dev_put(route_dev);
+err_rt_release:
+ ip_rt_put(rt);
+ return ret;
}
-static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
- struct neighbour *n)
+static void mlx5e_route_lookup_ipv4_put(struct mlx5e_tc_tun_route_attr *attr)
{
- neigh_release(n);
- dev_put(route_dev);
+ mlx5e_tc_tun_route_attr_cleanup(attr);
}
static const char *mlx5e_netdev_kind(struct net_device *dev)
@@ -188,28 +212,26 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
- struct net_device *out_dev, *route_dev;
- struct flowi4 fl4 = {};
- struct neighbour *n;
+ struct mlx5e_neigh m_neigh = {};
+ TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
char *encap_header;
- u8 nud_state, ttl;
struct iphdr *ip;
+ u8 nud_state;
int err;
/* add the IP fields */
- fl4.flowi4_tos = tun_key->tos;
- fl4.daddr = tun_key->u.ipv4.dst;
- fl4.saddr = tun_key->u.ipv4.src;
- ttl = tun_key->ttl;
+ attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
+ attr.fl.fl4.saddr = tun_key->u.ipv4.src;
+ attr.ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr);
if (err)
return err;
ipv4_encap_size =
- (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) +
e->tunnel->calc_hlen(e);
@@ -226,40 +248,36 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto release_neigh;
}
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
- e->route_dev_ifindex = route_dev->ifindex;
+ m_neigh.family = attr.n->ops->family;
+ memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len);
+ e->out_dev = attr.out_dev;
+ e->route_dev_ifindex = attr.route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
*/
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev);
if (err)
goto free_encap;
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ read_unlock_bh(&attr.n->lock);
/* add ethernet header */
- ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IP);
/* add ip header */
ip->tos = tun_key->tos;
ip->version = 0x4;
ip->ihl = 0x5;
- ip->ttl = ttl;
- ip->daddr = fl4.daddr;
- ip->saddr = fl4.saddr;
+ ip->ttl = attr.ttl;
+ ip->daddr = attr.fl.fl4.daddr;
+ ip->saddr = attr.fl.fl4.saddr;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
@@ -271,7 +289,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
+ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
@@ -287,8 +305,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- mlx5e_route_lookup_ipv4_put(route_dev, n);
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
return err;
destroy_neigh_entry:
@@ -296,55 +314,155 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- mlx5e_route_lookup_ipv4_put(route_dev, n);
+ mlx5e_route_lookup_ipv4_put(&attr);
+ return err;
+}
+
+int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ int ipv4_encap_size;
+ char *encap_header;
+ struct iphdr *ip;
+ u8 nud_state;
+ int err;
+
+ /* add the IP fields */
+ attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
+ attr.fl.fl4.saddr = tun_key->u.ipv4.src;
+ attr.ttl = tun_key->ttl;
+
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr);
+ if (err)
+ return err;
+
+ ipv4_encap_size =
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct iphdr) +
+ e->tunnel->calc_hlen(e);
+
+ if (max_encap_size < ipv4_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv4_encap_size, max_encap_size);
+ err = -EOPNOTSUPP;
+ goto release_neigh;
+ }
+
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
+
+ e->route_dev_ifindex = attr.route_dev->ifindex;
+
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev);
+ read_unlock_bh(&attr.n->lock);
+
+ /* add ethernet header */
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
+ ETH_P_IP);
+
+ /* add ip header */
+ ip->tos = tun_key->tos;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->ttl = attr.ttl;
+ ip->daddr = attr.fl.fl4.daddr;
+ ip->saddr = attr.fl.fl4.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
+ &ip->protocol, e);
+ if (err)
+ goto free_encap;
+
+ e->encap_size = ipv4_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+ goto release_neigh;
+ }
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
+ goto free_encap;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+ return err;
+
+free_encap:
+ kfree(encap_header);
+release_neigh:
+ mlx5e_route_lookup_ipv4_put(&attr);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+ struct mlx5e_tc_tun_route_attr *attr)
{
+ struct net_device *route_dev;
+ struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
-
int ret;
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!attr->ttl)
+ attr->ttl = ip6_dst_hoplimit(dst);
- ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0) {
- dst_release(dst);
- return ret;
- }
+ ret = get_route_and_out_devs(priv, dst->dev, &route_dev, &out_dev);
+ if (ret < 0)
+ goto err_dst_release;
- dev_hold(*route_dev);
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
+ dev_hold(route_dev);
+ n = dst_neigh_lookup(dst, &attr->fl.fl6.daddr);
if (!n) {
- dev_put(*route_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_dev_release;
}
- *out_n = n;
+ dst_release(dst);
+ attr->out_dev = out_dev;
+ attr->route_dev = route_dev;
+ attr->n = n;
return 0;
+
+err_dev_release:
+ dev_put(route_dev);
+err_dst_release:
+ dst_release(dst);
+ return ret;
}
-static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
- struct neighbour *n)
+static void mlx5e_route_lookup_ipv6_put(struct mlx5e_tc_tun_route_attr *attr)
{
- neigh_release(n);
- dev_put(route_dev);
+ mlx5e_tc_tun_route_attr_cleanup(attr);
}
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
@@ -353,28 +471,25 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
- struct net_device *out_dev, *route_dev;
- struct flowi6 fl6 = {};
+ struct mlx5e_neigh m_neigh = {};
+ TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
- struct neighbour *n = NULL;
int ipv6_encap_size;
char *encap_header;
- u8 nud_state, ttl;
+ u8 nud_state;
int err;
- ttl = tun_key->ttl;
+ attr.ttl = tun_key->ttl;
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
+ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
- fl6.daddr = tun_key->u.ipv6.dst;
- fl6.saddr = tun_key->u.ipv6.src;
-
- err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
if (err)
return err;
ipv6_encap_size =
- (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct ipv6hdr) +
e->tunnel->calc_hlen(e);
@@ -391,39 +506,35 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
- e->route_dev_ifindex = route_dev->ifindex;
+ m_neigh.family = attr.n->ops->family;
+ memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len);
+ e->out_dev = attr.out_dev;
+ e->route_dev_ifindex = attr.route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
*/
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev);
if (err)
goto free_encap;
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ read_unlock_bh(&attr.n->lock);
/* add ethernet header */
- ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IPV6);
/* add ip header */
ip6_flow_hdr(ip6h, tun_key->tos, 0);
/* the HW fills up ipv6 payload len */
- ip6h->hop_limit = ttl;
- ip6h->daddr = fl6.daddr;
- ip6h->saddr = fl6.saddr;
+ ip6h->hop_limit = attr.ttl;
+ ip6h->daddr = attr.fl.fl6.daddr;
+ ip6h->saddr = attr.fl.fl6.saddr;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
@@ -435,7 +546,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
+ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
@@ -452,8 +563,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- mlx5e_route_lookup_ipv6_put(route_dev, n);
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
return err;
destroy_neigh_entry:
@@ -461,10 +572,160 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- mlx5e_route_lookup_ipv6_put(route_dev, n);
+ mlx5e_route_lookup_ipv6_put(&attr);
return err;
}
+
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ struct ipv6hdr *ip6h;
+ int ipv6_encap_size;
+ char *encap_header;
+ u8 nud_state;
+ int err;
+
+ attr.ttl = tun_key->ttl;
+
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
+ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
+ if (err)
+ return err;
+
+ ipv6_encap_size =
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct ipv6hdr) +
+ e->tunnel->calc_hlen(e);
+
+ if (max_encap_size < ipv6_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv6_encap_size, max_encap_size);
+ err = -EOPNOTSUPP;
+ goto release_neigh;
+ }
+
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
+
+ e->route_dev_ifindex = attr.route_dev->ifindex;
+
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev);
+ read_unlock_bh(&attr.n->lock);
+
+ /* add ethernet header */
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
+ ETH_P_IPV6);
+
+ /* add ip header */
+ ip6_flow_hdr(ip6h, tun_key->tos, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->hop_limit = attr.ttl;
+ ip6h->daddr = attr.fl.fl6.daddr;
+ ip6h->saddr = attr.fl.fl6.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
+ &ip6h->nexthdr, e);
+ if (err)
+ goto free_encap;
+
+ e->encap_size = ipv6_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+ goto release_neigh;
+ }
+
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
+ goto free_encap;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+ return err;
+
+free_encap:
+ kfree(encap_header);
+release_neigh:
+ mlx5e_route_lookup_ipv6_put(&attr);
+ return err;
+}
+#endif
+
+int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *flow_attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ u16 vport_num;
+ int err = 0;
+
+ if (flow_attr->ip_version == 4) {
+ /* Addresses are swapped for decap */
+ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
+ attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
+ err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
+ }
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (flow_attr->ip_version == 6) {
+ /* Addresses are swapped for decap */
+ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
+ attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
+ err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
+ }
#endif
+ else
+ return 0;
+
+ if (err)
+ return err;
+
+ if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev))
+ goto out;
+
+ err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.vxlan_vni);
+ esw_attr->rx_tun_attr->decap_vport = vport_num;
+
+out:
+ if (flow_attr->ip_version == 4)
+ mlx5e_route_lookup_ipv4_put(&attr);
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (flow_attr->ip_version == 6)
+ mlx5e_route_lookup_ipv6_put(&attr);
+#endif
+ return err;
+}
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
@@ -625,14 +886,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
}
}
- /* Enforce DMAC when offloading incoming tunneled flows.
- * Flow counters require a match on the DMAC.
- */
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16), priv->netdev->dev_addr);
-
/* let software handle IP fragments */
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 704359df6095..67de2bf36861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -11,6 +11,8 @@
#include "en.h"
#include "en_rep.h"
+#ifdef CONFIG_MLX5_ESWITCH
+
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
@@ -59,17 +61,30 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
#else
static inline int
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{ return -EOPNOTSUPP; }
#endif
+int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
@@ -86,4 +101,6 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v);
+#endif /* CONFIG_MLX5_ESWITCH */
+
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
new file mode 100644
index 000000000000..6a116335bb21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -0,0 +1,1653 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <net/fib_notifier.h>
+#include "tc_tun_encap.h"
+#include "en_tc.h"
+#include "tc_tun.h"
+#include "rep/tc.h"
+#include "diag/en_tc_tracepoint.h"
+
+enum {
+ MLX5E_ROUTE_ENTRY_VALID = BIT(0),
+};
+
+struct mlx5e_route_key {
+ int ip_version;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } endpoint_ip;
+};
+
+struct mlx5e_route_entry {
+ struct mlx5e_route_key key;
+ struct list_head encap_entries;
+ struct list_head decap_flows;
+ u32 flags;
+ struct hlist_node hlist;
+ refcount_t refcnt;
+ int tunnel_dev_index;
+ struct rcu_head rcu;
+};
+
+struct mlx5e_tc_tun_encap {
+ struct mlx5e_priv *priv;
+ struct notifier_block fib_nb;
+ spinlock_t route_lock; /* protects route_tbl */
+ unsigned long route_tbl_last_update;
+ DECLARE_HASHTABLE(route_tbl, 8);
+};
+
+static bool mlx5e_route_entry_valid(struct mlx5e_route_entry *r)
+{
+ return r->flags & MLX5E_ROUTE_ENTRY_VALID;
+}
+
+int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ struct mlx5_rx_tun_attr *tun_attr;
+ void *daddr, *saddr;
+ u8 ip_version;
+
+ tun_attr = kvzalloc(sizeof(*tun_attr), GFP_KERNEL);
+ if (!tun_attr)
+ return -ENOMEM;
+
+ esw_attr->rx_tun_attr = tun_attr;
+ ip_version = mlx5e_tc_get_ip_version(spec, true);
+
+ if (ip_version == 4) {
+ daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ tun_attr->dst_ip.v4 = *(__be32 *)daddr;
+ tun_attr->src_ip.v4 = *(__be32 *)saddr;
+ if (!tun_attr->dst_ip.v4 || !tun_attr->src_ip.v4)
+ return 0;
+ }
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (ip_version == 6) {
+ int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+ struct in6_addr zerov6 = {};
+
+ daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
+ memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
+ if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
+ !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
+ return 0;
+ }
+#endif
+ /* Only set the flag if both src and dst ip addresses exist. They are
+ * required to establish routing.
+ */
+ flow_flag_set(flow, TUN_RX);
+ return 0;
+}
+
+static bool mlx5e_tc_flow_all_encaps_valid(struct mlx5_esw_flow_attr *esw_attr)
+{
+ bool all_flow_encaps_valid = true;
+ int i;
+
+ /* Flow can be associated with multiple encap entries.
+ * Before offloading the flow verify that all of them have
+ * a valid neighbour.
+ */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
+ all_flow_encaps_valid = false;
+ break;
+ }
+ }
+
+ return all_flow_encaps_valid;
+}
+
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
+ return;
+
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
+ PTR_ERR(e->pkt_reformat));
+ return;
+ }
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
+
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
+ esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+
+ /* Do not offload flows with unresolved neighbors */
+ if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
+ continue;
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ continue;
+ }
+
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
+ flow->rule[0] = rule;
+ /* was unset when slow path rule removed */
+ flow_flag_set(flow, OFFLOADED);
+ }
+}
+
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
+
+ /* update from encap rule to slow path rule */
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+ /* mark the flow's encap dest as non-valid */
+ esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ continue;
+ }
+
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ flow->rule[0] = rule;
+ /* was unset when fast path rule removed */
+ flow_flag_set(flow, OFFLOADED);
+ }
+
+ /* we know that the encap is valid */
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+}
+
+static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
+ struct list_head *flow_list,
+ int index)
+{
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ return;
+ wait_for_completion(&flow->init_done);
+
+ flow->tmp_entry_index = index;
+ list_add(&flow->tmp_list, flow_list);
+}
+
+/* Takes reference to all flows attached to encap and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
+{
+ struct encap_flow_item *efi;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ mlx5e_take_tmp_flow(flow, flow_list, efi->index);
+ }
+}
+
+/* Takes reference to all flows attached to route and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
+ struct list_head *flow_list)
+{
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, &r->decap_flows, decap_routes)
+ mlx5e_take_tmp_flow(flow, flow_list, 0);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_encap_entry *next = NULL;
+
+retry:
+ rcu_read_lock();
+
+ /* find encap with non-zero reference counter value */
+ for (next = e ?
+ list_next_or_null_rcu(&nhe->encap_list,
+ &e->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list) :
+ list_first_or_null_rcu(&nhe->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list);
+ next;
+ next = list_next_or_null_rcu(&nhe->encap_list,
+ &next->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list))
+ if (mlx5e_encap_take(next))
+ break;
+
+ rcu_read_unlock();
+
+ /* release starting encap */
+ if (e)
+ mlx5e_encap_put(netdev_priv(e->out_dev), e);
+ if (!next)
+ return next;
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&next->res_ready);
+ /* continue searching if encap entry is not in valid state after completion */
+ if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ e = next;
+ goto retry;
+ }
+
+ return next;
+}
+
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ struct mlx5e_encap_entry *e = NULL;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
+ u64 lastuse;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (m_neigh->family == AF_INET6)
+ tbl = ipv6_stub->nd_tbl;
+#endif
+ else
+ return;
+
+ /* mlx5e_get_next_valid_encap() releases previous encap before returning
+ * next one.
+ */
+ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
+ struct mlx5e_priv *priv = netdev_priv(e->out_dev);
+ struct encap_flow_item *efi, *tmp;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_for_each_entry_safe(efi, tmp, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow,
+ encaps[efi->index]);
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ list_add(&flow->tmp_list, &flow_list);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
+ counter = mlx5e_tc_get_counter(flow);
+ lastuse = mlx5_fc_query_lastuse(counter);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_put_flow_list(priv, &flow_list);
+ if (neigh_used) {
+ /* release current encap before breaking the loop */
+ mlx5e_encap_put(priv, e);
+ break;
+ }
+ }
+
+ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
+
+ if (neigh_used) {
+ nhe->reported_lastuse = jiffies;
+
+ /* find the relevant neigh according to the cached device and
+ * dst ip pair
+ */
+ n = neigh_lookup(tbl, &m_neigh->dst_ip, READ_ONCE(nhe->neigh_dev));
+ if (!n)
+ return;
+
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ WARN_ON(!list_empty(&e->flows));
+
+ if (e->compl_result > 0) {
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ }
+
+ kfree(e->tun_info);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+}
+
+static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_decap_entry *d)
+{
+ WARN_ON(!list_empty(&d->flows));
+
+ if (!d->compl_result)
+ mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
+
+ kfree_rcu(d, rcu);
+}
+
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
+ return;
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
+static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index);
+
+void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index)
+{
+ struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (flow->attr->esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5e_detach_encap_route(priv, flow, out_index);
+
+ /* flow wasn't fully initialized */
+ if (!e)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->encaps[out_index].list);
+ flow->encaps[out_index].e = NULL;
+ if (!refcount_dec_and_test(&e->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_entry *d = flow->decap_reformat;
+
+ if (!d)
+ return;
+
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ list_del(&flow->l3_to_l2_reformat);
+ flow->decap_reformat = NULL;
+
+ if (!refcount_dec_and_test(&d->refcnt)) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
+struct encap_key {
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
+};
+
+static int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
+{
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+}
+
+static int cmp_decap_info(struct mlx5e_decap_key *a,
+ struct mlx5e_decap_key *b)
+{
+ return memcmp(&a->key, &b->key, sizeof(b->key));
+}
+
+static int hash_encap_info(struct encap_key *key)
+{
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tc_tunnel->tunnel_type);
+}
+
+static int hash_decap_info(struct mlx5e_decap_key *key)
+{
+ return jhash(&key->key, sizeof(key->key), 0);
+}
+
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e;
+ struct encap_key e_key;
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
+ if (!cmp_encap_info(&e_key, key) &&
+ mlx5e_encap_take(e))
+ return e;
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_decap_entry *
+mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_key r_key;
+ struct mlx5e_decap_entry *e;
+
+ hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
+ hlist, hash_key) {
+ r_key = e->key;
+ if (!cmp_decap_info(&r_key, key) &&
+ mlx5e_decap_take(e))
+ return e;
+ }
+ return NULL;
+}
+
+struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+ size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+ return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < out_index; i++) {
+ if (flow->encaps[i].e != e)
+ continue;
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+ netdev_err(priv->netdev, "can't duplicate encap action\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ struct net_device *out_dev,
+ int route_dev_ifindex,
+ int out_index)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *route_dev;
+ u16 vport_num;
+ int err = 0;
+ u32 data;
+
+ route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
+
+ if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(out_dev, route_dev))
+ goto out;
+
+ err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ attr->dest_chain = 0;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+ data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch,
+ vport_num);
+ err = mlx5e_tc_match_to_reg_set_and_get_id(esw->dev, mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB,
+ VPORT_TO_REG, data);
+ if (err >= 0) {
+ esw_attr->dests[out_index].src_port_rewrite_act_id = err;
+ err = 0;
+ }
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+ return err;
+}
+
+static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ struct net_device *out_dev,
+ int route_dev_ifindex,
+ int out_index)
+{
+ int act_id = attr->dests[out_index].src_port_rewrite_act_id;
+ struct net_device *route_dev;
+ u16 vport_num;
+ int err = 0;
+ u32 data;
+
+ route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
+
+ if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ data = mlx5_eswitch_get_vport_metadata_for_set(attr->in_mdev->priv.eswitch,
+ vport_num);
+ mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+ return err;
+}
+
+static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_tun_encap *encap;
+ unsigned int ret;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ encap = uplink_priv->encap;
+
+ spin_lock_bh(&encap->route_lock);
+ ret = encap->route_tbl_last_update;
+ spin_unlock_bh(&encap->route_lock);
+ return ret;
+}
+
+static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_encap_entry *e,
+ bool new_encap_entry,
+ unsigned long tbl_time_before,
+ int out_index);
+
+int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
+ struct netlink_ext_ack *extack,
+ struct net_device **encap_dev,
+ bool *encap_valid)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ const struct ip_tunnel_info *tun_info;
+ unsigned long tbl_time_before = 0;
+ struct encap_key key;
+ struct mlx5e_encap_entry *e;
+ bool entry_created = false;
+ unsigned short family;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = attr->parse_attr;
+ tun_info = parse_attr->tun_info[out_index];
+ family = ip_tunnel_info_af(tun_info);
+ key.ip_tun_key = &tun_info->key;
+ key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
+ if (!key.tc_tunnel) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
+ return -EOPNOTSUPP;
+ }
+
+ hash_key = hash_encap_info(&key);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ e = mlx5e_encap_get(priv, &key, hash_key);
+
+ /* must verify if encap is valid or not */
+ if (e) {
+ /* Check that entry was not already attached to this flow */
+ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ wait_for_completion(&e->res_ready);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (e->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
+ goto attach_flow;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
+ entry_created = true;
+ INIT_LIST_HEAD(&e->route_list);
+
+ tun_info = mlx5e_dup_tun_info(tun_info);
+ if (!tun_info) {
+ err = -ENOMEM;
+ goto out_err_init;
+ }
+ e->tun_info = tun_info;
+ err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
+ if (err)
+ goto out_err_init;
+
+ INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ if (family == AF_INET)
+ err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
+ else if (family == AF_INET6)
+ err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ complete_all(&e->res_ready);
+ if (err) {
+ e->compl_result = err;
+ goto out_err;
+ }
+ e->compl_result = 1;
+
+attach_flow:
+ err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before,
+ out_index);
+ if (err)
+ goto out_err;
+
+ flow->encaps[out_index].e = e;
+ list_add(&flow->encaps[out_index].list, &e->flows);
+ flow->encaps[out_index].index = out_index;
+ *encap_dev = e->out_dev;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
+ attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ *encap_valid = true;
+ } else {
+ *encap_valid = false;
+ }
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ if (e)
+ mlx5e_encap_put(priv, e);
+ return err;
+
+out_err_init:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ kfree(tun_info);
+ kfree(e);
+ return err;
+}
+
+int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_decap_entry *d;
+ struct mlx5e_decap_key key;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = flow->attr->parse_attr;
+ if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "encap header larger than max supported");
+ return -EOPNOTSUPP;
+ }
+
+ key.key = parse_attr->eth;
+ hash_key = hash_decap_info(&key);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ d = mlx5e_decap_get(priv, &key, hash_key);
+ if (d) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ wait_for_completion(&d->res_ready);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ if (d->compl_result) {
+ err = -EREMOTEIO;
+ goto out_free;
+ }
+ goto found;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ d->key = key;
+ refcount_set(&d->refcnt, 1);
+ init_completion(&d->res_ready);
+ INIT_LIST_HEAD(&d->flows);
+ hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
+ sizeof(parse_attr->eth),
+ &parse_attr->eth,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(d->pkt_reformat)) {
+ err = PTR_ERR(d->pkt_reformat);
+ d->compl_result = err;
+ }
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ complete_all(&d->res_ready);
+ if (err)
+ goto out_free;
+
+found:
+ flow->decap_reformat = d;
+ attr->decap_pkt_reformat = d->pkt_reformat;
+ list_add(&flow->l3_to_l2_reformat, &d->flows);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return 0;
+
+out_free:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ mlx5e_decap_put(priv, d);
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return err;
+}
+
+static int cmp_route_info(struct mlx5e_route_key *a,
+ struct mlx5e_route_key *b)
+{
+ if (a->ip_version == 4 && b->ip_version == 4)
+ return memcmp(&a->endpoint_ip.v4, &b->endpoint_ip.v4,
+ sizeof(a->endpoint_ip.v4));
+ else if (a->ip_version == 6 && b->ip_version == 6)
+ return memcmp(&a->endpoint_ip.v6, &b->endpoint_ip.v6,
+ sizeof(a->endpoint_ip.v6));
+ return 1;
+}
+
+static u32 hash_route_info(struct mlx5e_route_key *key)
+{
+ if (key->ip_version == 4)
+ return jhash(&key->endpoint_ip.v4, sizeof(key->endpoint_ip.v4), 0);
+ return jhash(&key->endpoint_ip.v6, sizeof(key->endpoint_ip.v6), 0);
+}
+
+static void mlx5e_route_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r)
+{
+ WARN_ON(!list_empty(&r->decap_flows));
+ WARN_ON(!list_empty(&r->encap_entries));
+
+ kfree_rcu(r, rcu);
+}
+
+static void mlx5e_route_put(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&r->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_route_put_locked(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
+ if (!refcount_dec_and_test(&r->refcnt))
+ return;
+ hash_del_rcu(&r->hlist);
+ mlx5e_route_dealloc(priv, r);
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_get(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key,
+ u32 hash_key)
+{
+ struct mlx5e_route_key r_key;
+ struct mlx5e_route_entry *r;
+
+ hash_for_each_possible(encap->route_tbl, r, hlist, hash_key) {
+ r_key = r->key;
+ if (!cmp_route_info(&r_key, key) &&
+ refcount_inc_not_zero(&r->refcnt))
+ return r;
+ }
+ return NULL;
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_get_create(struct mlx5e_priv *priv,
+ struct mlx5e_route_key *key,
+ int tunnel_dev_index,
+ unsigned long *route_tbl_change_time)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_tun_encap *encap;
+ struct mlx5e_route_entry *r;
+ u32 hash_key;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ encap = uplink_priv->encap;
+
+ hash_key = hash_route_info(key);
+ spin_lock_bh(&encap->route_lock);
+ r = mlx5e_route_get(encap, key, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+ if (r) {
+ if (!mlx5e_route_entry_valid(r)) {
+ mlx5e_route_put_locked(priv, r);
+ return ERR_PTR(-EINVAL);
+ }
+ return r;
+ }
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+
+ r->key = *key;
+ r->flags |= MLX5E_ROUTE_ENTRY_VALID;
+ r->tunnel_dev_index = tunnel_dev_index;
+ refcount_set(&r->refcnt, 1);
+ INIT_LIST_HEAD(&r->decap_flows);
+ INIT_LIST_HEAD(&r->encap_entries);
+
+ spin_lock_bh(&encap->route_lock);
+ *route_tbl_change_time = encap->route_tbl_last_update;
+ hash_add(encap->route_tbl, &r->hlist, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+
+ return r;
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_lookup_for_update(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key)
+{
+ u32 hash_key = hash_route_info(key);
+ struct mlx5e_route_entry *r;
+
+ spin_lock_bh(&encap->route_lock);
+ encap->route_tbl_last_update = jiffies;
+ r = mlx5e_route_get(encap, key, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+
+ return r;
+}
+
+struct mlx5e_tc_fib_event_data {
+ struct work_struct work;
+ unsigned long event;
+ struct mlx5e_route_entry *r;
+ struct net_device *ul_dev;
+};
+
+static void mlx5e_tc_fib_event_work(struct work_struct *work);
+static struct mlx5e_tc_fib_event_data *
+mlx5e_tc_init_fib_work(unsigned long event, struct net_device *ul_dev, gfp_t flags)
+{
+ struct mlx5e_tc_fib_event_data *fib_work;
+
+ fib_work = kzalloc(sizeof(*fib_work), flags);
+ if (WARN_ON(!fib_work))
+ return NULL;
+
+ INIT_WORK(&fib_work->work, mlx5e_tc_fib_event_work);
+ fib_work->event = event;
+ fib_work->ul_dev = ul_dev;
+
+ return fib_work;
+}
+
+static int
+mlx5e_route_enqueue_update(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ unsigned long event)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct net_device *ul_dev;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ ul_dev = uplink_rpriv->netdev;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_KERNEL);
+ if (!fib_work)
+ return -ENOMEM;
+
+ dev_hold(ul_dev);
+ refcount_inc(&r->refcnt);
+ fib_work->r = r;
+ queue_work(priv->wq, &fib_work->work);
+
+ return 0;
+}
+
+int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ unsigned long tbl_time_before, tbl_time_after;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ int err = 0;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (!esw_attr->rx_tun_attr)
+ goto out;
+
+ tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
+ tbl_time_after = tbl_time_before;
+ err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
+ if (err || !esw_attr->rx_tun_attr->decap_vport)
+ goto out;
+
+ key.ip_version = attr->ip_version;
+ if (key.ip_version == 4)
+ key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
+ else
+ key.endpoint_ip.v6 = esw_attr->rx_tun_attr->dst_ip.v6;
+
+ r = mlx5e_route_get_create(priv, &key, parse_attr->filter_dev->ifindex,
+ &tbl_time_after);
+ if (IS_ERR(r)) {
+ err = PTR_ERR(r);
+ goto out;
+ }
+ /* Routing changed concurrently. FIB event handler might have missed new
+ * entry, schedule update.
+ */
+ if (tbl_time_before != tbl_time_after) {
+ err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
+ if (err) {
+ mlx5e_route_put_locked(priv, r);
+ goto out;
+ }
+ }
+
+ flow->decap_route = r;
+ list_add(&flow->decap_routes, &r->decap_flows);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return 0;
+
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return err;
+}
+
+static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_encap_entry *e,
+ bool new_encap_entry,
+ unsigned long tbl_time_before,
+ int out_index)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ unsigned long tbl_time_after = tbl_time_before;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ const struct ip_tunnel_info *tun_info;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ unsigned short family;
+ int err = 0;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ tun_info = parse_attr->tun_info[out_index];
+ family = ip_tunnel_info_af(tun_info);
+
+ if (family == AF_INET) {
+ key.endpoint_ip.v4 = tun_info->key.u.ipv4.src;
+ key.ip_version = 4;
+ } else if (family == AF_INET6) {
+ key.endpoint_ip.v6 = tun_info->key.u.ipv6.src;
+ key.ip_version = 6;
+ }
+
+ err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev,
+ e->route_dev_ifindex, out_index);
+ if (err || !(esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return err;
+
+ r = mlx5e_route_get_create(priv, &key, parse_attr->mirred_ifindex[out_index],
+ &tbl_time_after);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ /* Routing changed concurrently. FIB event handler might have missed new
+ * entry, schedule update.
+ */
+ if (tbl_time_before != tbl_time_after) {
+ err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
+ if (err) {
+ mlx5e_route_put_locked(priv, r);
+ return err;
+ }
+ }
+
+ flow->encap_routes[out_index].r = r;
+ if (new_encap_entry)
+ list_add(&e->route_list, &r->encap_entries);
+ flow->encap_routes[out_index].index = out_index;
+ return 0;
+}
+
+void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_route_entry *r = flow->decap_route;
+
+ if (!r)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->decap_routes);
+ flow->decap_route = NULL;
+
+ if (!refcount_dec_and_test(&r->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index)
+{
+ struct mlx5e_route_entry *r = flow->encap_routes[out_index].r;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e, *tmp;
+
+ if (!r)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ flow->encap_routes[out_index].r = NULL;
+
+ if (!refcount_dec_and_test(&r->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ list_for_each_entry_safe(e, tmp, &r->encap_entries, route_list)
+ list_del_init(&e->route_list);
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *encap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, encap_flows, tmp_list) {
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ esw_attr = attr->esw_attr;
+
+ if (flow_flag_test(flow, SLOW))
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
+ else
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+ attr->modify_hdr = NULL;
+
+ esw_attr->dests[flow->tmp_entry_index].flags &=
+ ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_NO_ROUTE;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ e->pkt_reformat = NULL;
+ }
+}
+
+static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
+ struct net_device *tunnel_dev,
+ struct mlx5e_encap_entry *e,
+ struct list_head *encap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = ip_tunnel_info_af(e->tun_info) == AF_INET ?
+ mlx5e_tc_tun_update_header_ipv4(priv, tunnel_dev, e) :
+ mlx5e_tc_tun_update_header_ipv6(priv, tunnel_dev, e);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update encap header, %d", err);
+ e->flags &= ~MLX5_ENCAP_ENTRY_NO_ROUTE;
+
+ list_for_each_entry(flow, encap_flows, tmp_list) {
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+
+ if (flow_flag_test(flow, FAILED))
+ continue;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ spec = &parse_attr->spec;
+
+ err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
+ e->out_dev, e->route_dev_ifindex,
+ flow->tmp_entry_index);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update VF tunnel err=%d", err);
+ continue;
+ }
+
+ err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
+ err);
+ continue;
+ }
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
+ esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
+ goto offload_to_slow_path;
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ }
+ } else {
+offload_to_slow_path:
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+ /* mark the flow's encap dest as non-valid */
+ esw_attr->dests[flow->tmp_entry_index].flags &=
+ ~MLX5_ESW_DEST_ENCAP_VALID;
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ }
+ }
+ flow_flag_set(flow, OFFLOADED);
+ }
+}
+
+static int mlx5e_update_route_encaps(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ struct list_head *flow_list,
+ bool replace)
+{
+ struct net_device *tunnel_dev;
+ struct mlx5e_encap_entry *e;
+
+ tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
+ if (!tunnel_dev)
+ return -ENODEV;
+
+ list_for_each_entry(e, &r->encap_entries, route_list) {
+ LIST_HEAD(encap_flows);
+
+ mlx5e_take_all_encap_flows(e, &encap_flows);
+ if (list_empty(&encap_flows))
+ continue;
+
+ if (mlx5e_route_entry_valid(r))
+ mlx5e_invalidate_encap(priv, e, &encap_flows);
+
+ if (!replace) {
+ list_splice(&encap_flows, flow_list);
+ continue;
+ }
+
+ mlx5e_reoffload_encap(priv, tunnel_dev, e, &encap_flows);
+ list_splice(&encap_flows, flow_list);
+ }
+
+ return 0;
+}
+
+static void mlx5e_unoffload_flow_list(struct mlx5e_priv *priv,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, flow_list, tmp_list)
+ if (mlx5e_is_offloaded_flow(flow))
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+}
+
+static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
+ struct list_head *decap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, decap_flows, tmp_list) {
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ if (flow_flag_test(flow, FAILED))
+ continue;
+
+ parse_attr = attr->parse_attr;
+ spec = &parse_attr->spec;
+ err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
+ err);
+ continue;
+ }
+
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached decap flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ flow_flag_set(flow, OFFLOADED);
+ }
+ }
+}
+
+static int mlx5e_update_route_decap_flows(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ struct list_head *flow_list,
+ bool replace)
+{
+ struct net_device *tunnel_dev;
+ LIST_HEAD(decap_flows);
+
+ tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
+ if (!tunnel_dev)
+ return -ENODEV;
+
+ mlx5e_take_all_route_decap_flows(r, &decap_flows);
+ if (mlx5e_route_entry_valid(r))
+ mlx5e_unoffload_flow_list(priv, &decap_flows);
+ if (replace)
+ mlx5e_reoffload_decap(priv, &decap_flows);
+
+ list_splice(&decap_flows, flow_list);
+
+ return 0;
+}
+
+static void mlx5e_tc_fib_event_work(struct work_struct *work)
+{
+ struct mlx5e_tc_fib_event_data *event_data =
+ container_of(work, struct mlx5e_tc_fib_event_data, work);
+ struct net_device *ul_dev = event_data->ul_dev;
+ struct mlx5e_priv *priv = netdev_priv(ul_dev);
+ struct mlx5e_route_entry *r = event_data->r;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+ bool replace;
+ int err;
+
+ /* sync with concurrent neigh updates */
+ rtnl_lock();
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ replace = event_data->event == FIB_EVENT_ENTRY_REPLACE;
+
+ if (!mlx5e_route_entry_valid(r) && !replace)
+ goto out;
+
+ err = mlx5e_update_route_encaps(priv, r, &flow_list, replace);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update route encaps, %d\n",
+ err);
+
+ err = mlx5e_update_route_decap_flows(priv, r, &flow_list, replace);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update route decap flows, %d\n",
+ err);
+
+ if (replace)
+ r->flags |= MLX5E_ROUTE_ENTRY_VALID;
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ rtnl_unlock();
+
+ mlx5e_put_flow_list(priv, &flow_list);
+ mlx5e_route_put(priv, event_data->r);
+ dev_put(event_data->ul_dev);
+ kfree(event_data);
+}
+
+static struct mlx5e_tc_fib_event_data *
+mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
+ struct net_device *ul_dev,
+ struct mlx5e_tc_tun_encap *encap,
+ unsigned long event,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ struct net_device *fib_dev;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+ fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ fen_info->dst_len != 32)
+ return NULL;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
+ if (!fib_work)
+ return ERR_PTR(-ENOMEM);
+
+ key.endpoint_ip.v4 = htonl(fen_info->dst);
+ key.ip_version = 4;
+
+ /* Can't fail after this point because releasing reference to r
+ * requires obtaining sleeping mutex which we can't do in atomic
+ * context.
+ */
+ r = mlx5e_route_lookup_for_update(encap, &key);
+ if (!r)
+ goto out;
+ fib_work->r = r;
+ dev_hold(ul_dev);
+
+ return fib_work;
+
+out:
+ kfree(fib_work);
+ return NULL;
+}
+
+static struct mlx5e_tc_fib_event_data *
+mlx5e_init_fib_work_ipv6(struct mlx5e_priv *priv,
+ struct net_device *ul_dev,
+ struct mlx5e_tc_tun_encap *encap,
+ unsigned long event,
+ struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen_info;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ struct net_device *fib_dev;
+
+ fen_info = container_of(info, struct fib6_entry_notifier_info, info);
+ fib_dev = fib6_info_nh_dev(fen_info->rt);
+ if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ fen_info->rt->fib6_dst.plen != 128)
+ return NULL;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
+ if (!fib_work)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&key.endpoint_ip.v6, &fen_info->rt->fib6_dst.addr,
+ sizeof(fen_info->rt->fib6_dst.addr));
+ key.ip_version = 6;
+
+ /* Can't fail after this point because releasing reference to r
+ * requires obtaining sleeping mutex which we can't do in atomic
+ * context.
+ */
+ r = mlx5e_route_lookup_for_update(encap, &key);
+ if (!r)
+ goto out;
+ fib_work->r = r;
+ dev_hold(ul_dev);
+
+ return fib_work;
+
+out:
+ kfree(fib_work);
+ return NULL;
+}
+
+static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct fib_notifier_info *info = ptr;
+ struct mlx5e_tc_tun_encap *encap;
+ struct net_device *ul_dev;
+ struct mlx5e_priv *priv;
+
+ encap = container_of(nb, struct mlx5e_tc_tun_encap, fib_nb);
+ priv = encap->priv;
+ ul_dev = priv->netdev;
+ priv = netdev_priv(ul_dev);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ if (info->family == AF_INET)
+ fib_work = mlx5e_init_fib_work_ipv4(priv, ul_dev, encap, event, info);
+ else if (info->family == AF_INET6)
+ fib_work = mlx5e_init_fib_work_ipv6(priv, ul_dev, encap, event, info);
+ else
+ return NOTIFY_DONE;
+
+ if (!IS_ERR_OR_NULL(fib_work)) {
+ queue_work(priv->wq, &fib_work->work);
+ } else if (IS_ERR(fib_work)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
+ PTR_ERR(fib_work));
+ }
+
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_tun_encap *encap;
+ int err;
+
+ encap = kvzalloc(sizeof(*encap), GFP_KERNEL);
+ if (!encap)
+ return ERR_PTR(-ENOMEM);
+
+ encap->priv = priv;
+ encap->fib_nb.notifier_call = mlx5e_tc_tun_fib_event;
+ spin_lock_init(&encap->route_lock);
+ hash_init(encap->route_tbl);
+ err = register_fib_notifier(dev_net(priv->netdev), &encap->fib_nb,
+ NULL, NULL);
+ if (err) {
+ kvfree(encap);
+ return ERR_PTR(err);
+ }
+
+ return encap;
+}
+
+void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap)
+{
+ if (!encap)
+ return;
+
+ unregister_fib_notifier(dev_net(encap->priv->netdev), &encap->fib_nb);
+ flush_workqueue(encap->priv->wq); /* flush fib event works */
+ kvfree(encap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
new file mode 100644
index 000000000000..3391504d9a08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_TUN_ENCAP_H__
+#define __MLX5_EN_TC_TUN_ENCAP_H__
+
+#include "tc_priv.h"
+
+void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index);
+
+int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
+ struct netlink_ext_ack *extack,
+ struct net_device **encap_dev,
+ bool *encap_valid);
+int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info);
+
+int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec);
+
+struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv);
+void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap);
+
+#endif /* __MLX5_EN_TC_TUN_ENCAP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 1f9526244222..3479672e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
- if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
+ !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
flow_rule_match_mpls(rule, &match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
new file mode 100644
index 000000000000..37fc1d77ded7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies */
+
+#include <net/page_pool.h>
+#include "en/txrx.h"
+#include "en/params.h"
+#include "en/trap.h"
+
+static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi);
+ struct mlx5e_ch_stats *ch_stats = trap_ctx->stats;
+ struct mlx5e_rq *rq = &trap_ctx->rq;
+ bool busy = false;
+ int work_done = 0;
+
+ ch_stats->poll++;
+
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ busy |= work_done == budget;
+ busy |= rq->post_wqes(rq);
+
+ if (busy)
+ return budget;
+
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ return work_done;
+
+ mlx5e_cq_arm(&rq->cq);
+ return work_done;
+}
+
+static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct page_pool_params pp_params = {};
+ int node = dev_to_node(mdev->device);
+ u32 pool_size;
+ int wq_sz;
+ int err;
+ int i;
+
+ rqp->wq.db_numa_node = node;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->mdev = mdev;
+ rq->priv = priv;
+ rq->stats = stats;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
+ pool_size = 1 << params->log_rq_mtu_frames;
+
+ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
+
+ rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+ rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
+ (wq_sz << rq->wqe.info.log_num_frags)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.frags) {
+ err = -ENOMEM;
+ goto err_wq_cyc_destroy;
+ }
+
+ err = mlx5e_init_di_list(rq, wq_sz, node);
+ if (err)
+ goto err_free_frags;
+
+ rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+
+ mlx5e_rq_set_trap_handlers(rq, params);
+
+ /* Create a page_pool and register it with rxq */
+ pp_params.order = 0;
+ pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.pool_size = pool_size;
+ pp_params.nid = node;
+ pp_params.dev = mdev->device;
+ pp_params.dma_dir = rq->buff.map_dir;
+
+ /* page_pool can be used even when there is no rq->xdp_prog,
+ * given page_pool does not handle DMA mapping there is no
+ * required state to clear. And page_pool gracefully handle
+ * elevated refcnt.
+ */
+ rq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->page_pool)) {
+ err = PTR_ERR(rq->page_pool);
+ rq->page_pool = NULL;
+ goto err_free_di_list;
+ }
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe_cyc *wqe =
+ mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
+ int f;
+
+ for (f = 0; f < rq->wqe.info.num_frags; f++) {
+ u32 frag_size = rq->wqe.info.arr[f].frag_size |
+ MLX5_HW_START_PADDING;
+
+ wqe->data[f].byte_count = cpu_to_be32(frag_size);
+ wqe->data[f].lkey = rq->mkey_be;
+ }
+ /* check if num_frags is not a pow of two */
+ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
+ wqe->data[f].byte_count = 0;
+ wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ wqe->data[f].addr = 0;
+ }
+ }
+ return 0;
+
+err_free_di_list:
+ mlx5e_free_di_list(rq);
+err_free_frags:
+ kvfree(rq->wqe.frags);
+err_wq_cyc_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
+{
+ page_pool_destroy(rq->page_pool);
+ mlx5e_free_di_list(rq);
+ kvfree(rq->wqe.frags);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder trap_moder = {};
+ struct mlx5e_cq *cq = &rq->cq;
+ int err;
+
+ ccp.node = dev_to_node(mdev->device);
+ ccp.ch_stats = ch_stats;
+ ccp.napi = napi;
+ ccp.ix = 0;
+ err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_rq(rq, rq_param);
+ if (err)
+ goto err_free_rq;
+
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+err_free_rq:
+ mlx5e_free_trap_rq(rq);
+err_destroy_cq:
+ mlx5e_close_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
+{
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+ mlx5e_free_trap_rq(rq);
+ mlx5e_close_cq(&rq->cq);
+}
+
+static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 rqn)
+{
+ void *tirc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+ err = mlx5e_create_tir(mdev, tir, in);
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
+{
+ mlx5e_destroy_tir(mdev, tir);
+}
+
+static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
+{
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
+{
+ struct mlx5e_params *params = &t->params;
+
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
+ mlx5e_init_rq_type_params(priv->mdev, params);
+ params->sw_mtu = priv->netdev->max_mtu;
+ mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
+}
+
+static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
+{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_trap *t;
+ int err;
+
+ t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5e_build_trap_params(priv, t);
+
+ t->priv = priv;
+ t->mdev = priv->mdev;
+ t->tstamp = &priv->tstamp;
+ t->pdev = mlx5_core_dma_dev(priv->mdev);
+ t->netdev = priv->netdev;
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ t->stats = &priv->trap_stats.ch;
+
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+
+ err = mlx5e_open_trap_rq(priv, &t->napi,
+ &priv->trap_stats.rq,
+ &t->params, &t->rq_param,
+ &priv->trap_stats.ch,
+ &t->rq);
+ if (unlikely(err))
+ goto err_napi_del;
+
+ err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
+ if (err)
+ goto err_close_trap_rq;
+
+ return t;
+
+err_close_trap_rq:
+ mlx5e_close_trap_rq(&t->rq);
+err_napi_del:
+ netif_napi_del(&t->napi);
+ kvfree(t);
+ return ERR_PTR(err);
+}
+
+void mlx5e_close_trap(struct mlx5e_trap *trap)
+{
+ mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_close_trap_rq(&trap->rq);
+ netif_napi_del(&trap->napi);
+ kvfree(trap);
+}
+
+static void mlx5e_activate_trap(struct mlx5e_trap *trap)
+{
+ napi_enable(&trap->napi);
+ mlx5e_activate_trap_rq(&trap->rq);
+ napi_schedule(&trap->napi);
+}
+
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap = priv->en_trap;
+
+ mlx5e_deactivate_trap_rq(&trap->rq);
+ napi_disable(&trap->napi);
+}
+
+static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap;
+
+ trap = mlx5e_open_trap(priv);
+ if (IS_ERR(trap))
+ goto out;
+
+ mlx5e_activate_trap(trap);
+out:
+ return trap;
+}
+
+static void mlx5e_del_trap_queue(struct mlx5e_priv *priv)
+{
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+}
+
+static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap)
+{
+ return en_trap->tir.tirn;
+}
+
+static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
+{
+ bool open_queue = !priv->en_trap;
+ struct mlx5e_trap *trap;
+ int err;
+
+ if (open_queue) {
+ trap = mlx5e_add_trap_queue(priv);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ priv->en_trap = trap;
+ }
+
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ if (open_queue)
+ mlx5e_del_trap_queue(priv);
+ return err;
+}
+
+static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
+{
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ mlx5e_remove_vlan_trap(priv);
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ mlx5e_remove_mac_trap(priv);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ return -EINVAL;
+ }
+ if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev))
+ mlx5e_del_trap_queue(priv);
+
+ return 0;
+}
+
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx)
+{
+ int err = 0;
+
+ /* Traps are unarmed when interface is down, no need to update
+ * them. The configuration is saved in the core driver,
+ * queried and applied upon interface up operation in
+ * mlx5e_open_locked().
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ switch (trap_ctx->action) {
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err = mlx5e_handle_action_trap(priv, trap_ctx->id);
+ break;
+ case DEVLINK_TRAP_ACTION_DROP:
+ err = mlx5e_handle_action_drop(priv, trap_ctx->id);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__,
+ trap_ctx->action);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable)
+{
+ enum devlink_trap_action action;
+ int err;
+
+ err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action);
+ if (err)
+ return err;
+ if (action == DEVLINK_TRAP_ACTION_TRAP)
+ err = enable ? mlx5e_handle_action_trap(priv, trap_id) :
+ mlx5e_handle_action_drop(priv, trap_id);
+ return err;
+}
+
+static const int mlx5e_traps_arr[] = {
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
+};
+
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) {
+ err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable);
+ if (err)
+ return err;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
new file mode 100644
index 000000000000..aa3f17658c6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies */
+
+#ifndef __MLX5E_TRAP_H__
+#define __MLX5E_TRAP_H__
+
+#include "../en.h"
+#include "../devlink.h"
+
+struct mlx5e_trap {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_tir tir;
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+
+ /* data path - accessed per napi poll */
+ struct mlx5e_ch_stats *stats;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct hwtstamp_config *tstamp;
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
+
+ struct mlx5e_params params;
+ struct mlx5e_rq_param rq_param;
+};
+
+void mlx5e_close_trap(struct mlx5e_trap *trap);
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv);
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx);
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..2371b83dad9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -26,6 +26,13 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+static inline
+ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
+{
+ return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
+ clock, cqe_ts);
+}
+
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
@@ -371,6 +378,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index d487e5e37162..8d991c3b7a50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
/* Let other device's napi(s) and XSK wakeups see our new state. */
- synchronize_rcu();
+ synchronize_net();
}
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index d87c345878d3..f4bce1365639 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -111,7 +111,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
+ synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..cc0efac7b812 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -142,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#endif
-
+#else
return false;
+#endif
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -171,8 +173,8 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#endif
#if IS_ENABLED(CONFIG_GENEVE)
- if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index a9b45606dbdb..a97e8d205094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
}
}
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features)
-{
- struct sec_path *sp = skb_sec_path(skb);
- struct xfrm_state *x;
-
- if (sp && sp->len) {
- x = sp->xvec[0];
- if (x && x->xso.offload_handle)
- return true;
- }
- return false;
-}
-
void mlx5e_ipsec_build_inverse_table(void)
{
u16 mss_inv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9df9b9a8e09b..3e80742a3caf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_inverse_table_init(void);
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
@@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips
return ipsec_st->x;
}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+}
+
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (sp && sp->len) {
+ struct xfrm_state *x = sp->xvec[0];
+
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6c5c54bcd9be..5cb936541b9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- return NUM_IPSEC_SW_COUNTERS;
+ return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+ return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 1b392696280d..95293ee0d38d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,6 +2,7 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
+#include "en_accel/tls.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
@@ -86,16 +87,33 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err;
- if (priv->netdev->features & NETIF_F_HW_TLS_RX)
+ if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ return 0;
+
+ priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
+ if (!priv->tls->rx_wq)
+ return -ENOMEM;
+
+ if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
err = mlx5e_accel_fs_tcp_create(priv);
+ if (err) {
+ destroy_workqueue(priv->tls->rx_wq);
+ return err;
+ }
+ }
- return err;
+ return 0;
}
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
+ if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ return;
+
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
mlx5e_accel_fs_tcp_destroy(priv);
+
+ destroy_workqueue(priv->tls->rx_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 6a1d82503ef8..d06532d0baa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_ktls_rx_resync_ctx resync;
};
+static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+ if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
+ return false;
+
+ kfree(priv_rx);
+ return true;
+}
+
+static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+ refcount_inc(&priv_rx->resync.refcnt);
+}
+
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
@@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work)
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
return;
}
@@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work)
sq = &c->async_icosq;
if (resync_post_get_progress_params(sq, priv_rx))
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -377,7 +391,11 @@ unlock:
return err;
}
-/* Function is called with elevated refcount, it decreases it. */
+/* Function can be called with the refcount being either elevated or not.
+ * It decreases the refcount and may free the kTLS priv context.
+ * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was
+ * already in flight.
+ */
void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
struct mlx5e_icosq *sq)
{
@@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
priv_rx->stats->tls_resync_req_end++;
out:
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
kfree(buf);
}
@@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk)
return false;
resync = &priv_rx->resync;
- refcount_inc(&resync->refcnt);
+ mlx5e_ktls_priv_rx_get(priv_rx);
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
return true;
}
@@ -625,31 +643,6 @@ err_create_key:
return err;
}
-/* Elevated refcount on the resync object means there are
- * outstanding operations (uncompleted GET_PSV WQEs) that
- * will read the resync / priv_rx objects once completed.
- * Wait for them to avoid use-after-free.
- */
-static void wait_for_resync(struct net_device *netdev,
- struct mlx5e_ktls_rx_resync_ctx *resync)
-{
-#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
- unsigned int refcnt;
-
- do {
- refcnt = refcount_read(&resync->refcnt);
- if (refcnt == 1)
- return;
-
- msleep(20);
- } while (time_before(jiffies, exp_time));
-
- netdev_warn(netdev,
- "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
- refcnt);
-}
-
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
- synchronize_rcu(); /* Sync with NAPI */
+ synchronize_net(); /* Sync with NAPI */
if (!cancel_work_sync(&priv_rx->rule.work))
/* completion is needed, as the priv_rx in the add flow
* is maintained on the wqe info (wi), not on the socket.
@@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
wait_for_completion(&priv_rx->add_ctx);
resync = &priv_rx->resync;
if (cancel_work_sync(&resync->work))
- refcount_dec(&resync->refcnt);
- wait_for_resync(netdev, resync);
+ mlx5e_ktls_priv_rx_put(priv_rx);
priv_rx->stats->tls_del++;
if (priv_rx->rule.rule)
@@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
- kfree(priv_rx);
+ /* priv_rx should normally be freed here, but if there is an outstanding
+ * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
+ * processed.
+ */
+ mlx5e_ktls_priv_rx_put(priv_rx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index fee991f5ee7c..d6b21b899dbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -231,12 +231,6 @@ int mlx5e_tls_init(struct mlx5e_priv *priv)
if (!tls)
return -ENOMEM;
- tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
- if (!tls->rx_wq) {
- kfree(tls);
- return -ENOMEM;
- }
-
priv->tls = tls;
return 0;
}
@@ -248,7 +242,6 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
if (!tls)
return;
- destroy_workqueue(tls->rx_wq);
kfree(tls);
priv->tls = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d20243d6a032..f23c67575073 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
+ bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!opened)
reset_channels = false;
- }
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
- if (reset_channels)
+ if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
- else
+ } else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
+ if (!err && !opened)
+ priv->channels.params = new_channels.params;
+ }
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..abdf721bb264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -447,12 +447,29 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = priv->channels.params;
+ /* Don't allow changing the number of channels if HTB offload is active,
+ * because the numeration of the QoS SQs will change, while per-queue
+ * qdiscs are attached.
+ */
+ if (priv->htb.maj_id) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
+ __func__);
+ goto out;
+ }
+
+ new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
+ if (err)
+ *cur_params = old_params;
+
goto out;
}
@@ -519,7 +536,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
-mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
struct mlx5_core_dev *mdev = priv->mdev;
int tc;
@@ -534,6 +551,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
coal->tx_coalesce_usecs,
coal->tx_max_coalesced_frames);
}
+ }
+}
+
+static void
+mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int i;
+
+ for (i = 0; i < priv->channels.num; ++i) {
+ struct mlx5e_channel *c = priv->channels.c[i];
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
@@ -580,21 +608,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
tx_moder->pkts = coal->tx_max_coalesced_frames;
new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto out;
- }
- /* we are opened */
-
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
- if (!reset_rx && !reset_tx) {
- mlx5e_set_priv_channels_coalesce(priv, coal);
- priv->channels.params = new_channels.params;
- goto out;
- }
-
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
@@ -608,6 +624,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
mlx5e_reset_tx_moderation(&new_channels.params, mode);
}
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ if (!reset_rx && !reset_tx) {
+ if (!coal->use_adaptive_rx_coalesce)
+ mlx5e_set_priv_channels_rx_coalesce(priv, coal);
+ if (!coal->use_adaptive_tx_coalesce)
+ mlx5e_set_priv_channels_tx_coalesce(priv, coal);
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
@@ -1010,6 +1040,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1149,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
@@ -1954,6 +1996,16 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
return -EOPNOTSUPP;
+ /* Don't allow changing the PTP state if HTB offload is active, because
+ * the numeration of the QoS SQs will change, while per-queue qdiscs are
+ * attached.
+ */
+ if (priv->htb.maj_id) {
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
+ __func__);
+ return -EINVAL;
+ }
+
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..16ce7756ac43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -46,7 +46,6 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
enum {
MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1,
- MLX5E_PROMISC = 2,
};
enum {
@@ -306,6 +305,79 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
+static struct mlx5_flow_handle *
+mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
+{
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = trap_id;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+ return rule;
+}
+
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.vlan.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.vlan.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.vlan.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
+ priv->fs.vlan.trap_rule = NULL;
+ }
+}
+
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.l2.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.l2.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.l2.trap_rule);
+ priv->fs.l2.trap_rule = NULL;
+ }
+}
+
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.cvlan_filter_disabled)
@@ -419,6 +491,8 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ mlx5e_remove_vlan_trap(priv);
+
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
@@ -596,6 +670,83 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_apply_netdev_addr(priv);
}
+#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
+#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
+
+static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_handle **rule_p;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ rule_p = &priv->fs.promisc.rule;
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(*rule_p)) {
+ err = PTR_ERR(*rule_p);
+ *rule_p = NULL;
+ netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ }
+ kvfree(spec);
+ return err;
+}
+
+static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ return err;
+ }
+
+ err = mlx5e_add_promisc_rule(priv);
+ if (err)
+ goto err_destroy_promisc_table;
+
+ return 0;
+
+err_destroy_promisc_table:
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+
+ return err;
+}
+
+static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ return;
+ mlx5_del_flow_rules(priv->fs.promisc.rule);
+ priv->fs.promisc.rule = NULL;
+}
+
+static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ return;
+ mlx5e_del_promisc_rule(priv);
+ mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
+ priv->fs.promisc.ft.t = NULL;
+}
+
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
@@ -615,14 +766,15 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+ int err;
if (enable_promisc) {
- if (!priv->channels.params.vlan_strip_disable)
+ err = mlx5e_create_promisc_table(priv);
+ if (err)
+ enable_promisc = false;
+ if (!priv->channels.params.vlan_strip_disable && !err)
netdev_warn_once(ndev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
- mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -635,11 +787,8 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
- if (disable_promisc) {
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
- mlx5e_del_l2_flow_rule(priv, &ea->promisc);
- }
+ if (disable_promisc)
+ mlx5e_destroy_promisc_table(priv);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
@@ -942,6 +1091,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1237,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1304,9 +1455,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
-
- case MLX5E_PROMISC:
- break;
}
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -1323,12 +1471,12 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
}
#define MLX5E_NUM_L2_GROUPS 3
-#define MLX5E_L2_GROUP1_SIZE BIT(0)
-#define MLX5E_L2_GROUP2_SIZE BIT(15)
-#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_GROUP1_SIZE BIT(15)
+#define MLX5E_L2_GROUP2_SIZE BIT(0)
+#define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
MLX5E_L2_GROUP2_SIZE +\
- MLX5E_L2_GROUP3_SIZE)
+ MLX5E_L2_GROUP_TRAP_SIZE)
static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1351,7 +1499,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
- /* Flow Group for promiscuous */
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1360,9 +1510,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for full match */
- eth_broadcast_addr(mc_dmac);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1371,11 +1521,10 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for allmulti */
- eth_zero_addr(mc_dmac);
- mc_dmac[0] = 0x01;
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_L2_GROUP3_SIZE;
+ ix += MLX5E_L2_GROUP_TRAP_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
@@ -1390,6 +1539,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
@@ -1432,15 +1582,17 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 4
+#define MLX5E_NUM_VLAN_GROUPS 5
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE +\
- MLX5E_VLAN_GROUP3_SIZE)
+ MLX5E_VLAN_GROUP3_SIZE +\
+ MLX5E_VLAN_GROUP_TRAP_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1495,6 +1647,15 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7a79d330c075..ec2fcb2a2977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -65,6 +65,9 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "qos.h"
+#include "en/trap.h"
+#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -106,7 +109,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
- if (MLX5_IPSEC_DEV(mdev))
+ if (mlx5_fpga_is_ipsec_device(mdev))
return false;
if (params->xdp_prog) {
@@ -211,6 +214,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
+static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ int err;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_TYPE_TRAP:
+ err = mlx5e_handle_trap_event(priv, data);
+ break;
+ default:
+ netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
+{
+ priv->blocking_events_nb.notifier_call = blocking_event;
+ mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
+}
+
+static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
+{
+ mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
@@ -342,13 +372,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-static int mlx5e_init_di_list(struct mlx5e_rq *rq,
- int wq_sz, int cpu)
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
- GFP_KERNEL, cpu_to_node(cpu));
+ rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
return -ENOMEM;
@@ -357,7 +385,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+void mlx5e_free_di_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
}
@@ -422,6 +450,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = xsk_pool;
+ rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
@@ -499,7 +530,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
+ err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
if (err)
goto err_rq_frags;
@@ -650,11 +681,10 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-static int mlx5e_create_rq(struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
-
+ u8 ts_format;
void *in;
void *rqc;
void *wq;
@@ -667,6 +697,9 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
if (!in)
return -ENOMEM;
+ ts_format = mlx5_is_real_time_rq(mdev) ?
+ MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -674,6 +707,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -774,7 +808,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
@@ -914,7 +948,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
}
void mlx5e_close_rq(struct mlx5e_rq *rq)
@@ -1143,7 +1177,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1154,6 +1187,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
+ sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1187,6 +1223,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_create_sq_param *csp,
u32 *sqn)
{
+ u8 ts_format;
void *in;
void *sqc;
void *wq;
@@ -1199,6 +1236,9 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
if (!in)
return -ENOMEM;
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -1207,6 +1247,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn);
MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
@@ -1233,6 +1275,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p)
{
+ u64 bitmask = 0;
void *in;
void *sqc;
int inlen;
@@ -1248,9 +1291,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
MLX5_SET(sqc, sqc, state, p->next_state);
if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
- MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ bitmask |= 1;
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
+ if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
+ bitmask |= 1 << 2;
+ MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
+ }
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
err = mlx5_core_modify_sq(mdev, sqn, in);
@@ -1267,6 +1315,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
@@ -1278,6 +1327,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
msp.curr_state = MLX5_SQC_STATE_RST;
msp.next_state = MLX5_SQC_STATE_RDY;
+ if (qos_queue_group_id) {
+ msp.qos_update = true;
+ msp.qos_queue_group_id = qos_queue_group_id;
+ }
err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
mlx5e_destroy_sq(mdev, *sqn);
@@ -1288,13 +1341,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate);
-static int mlx5e_open_txqsq(struct mlx5e_channel *c,
- u32 tisn,
- int txq_ix,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq,
- int tc)
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1304,12 +1353,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (err)
return err;
+ if (qos_queue_group_id)
+ sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
+ else
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1348,7 +1402,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
+ synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
mlx5e_tx_disable_queue(sq->txq);
@@ -1366,7 +1420,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
}
}
-static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
@@ -1403,7 +1457,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1423,7 +1477,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
{
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
- synchronize_rcu(); /* Sync with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
}
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
@@ -1452,7 +1506,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
@@ -1502,7 +1556,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
struct mlx5e_channel *c = sq->channel;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- synchronize_rcu(); /* Sync with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_xdpsq_descs(sq);
@@ -1703,7 +1757,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc);
+ params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
if (err)
goto err_close_sqs;
}
@@ -1826,12 +1880,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
@@ -1855,13 +1909,11 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_rx_cq;
- napi_enable(&c->napi);
-
spin_lock_init(&c->async_icosq_lock);
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
if (err)
- goto err_disable_napi;
+ goto err_close_xdpsq_cq;
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
if (err)
@@ -1904,9 +1956,7 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
-err_disable_napi:
- napi_disable(&c->napi);
-
+err_close_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -1937,7 +1987,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mlx5e_close_icosq(&c->async_icosq);
- napi_disable(&c->napi);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
@@ -2022,6 +2071,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
+ napi_enable(&c->napi);
+
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
@@ -2044,6 +2095,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
+ mlx5e_qos_deactivate_queues(c);
+
+ napi_disable(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2051,6 +2105,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
+ mlx5e_qos_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
@@ -2068,10 +2123,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0;
int i;
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev))
+ if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -2200,9 +2253,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2381,10 +2433,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
+err_close_ptp:
+ if (chs->port_ptp)
+ mlx5e_port_ptp_close(chs->port_ptp);
+
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
@@ -2917,11 +2977,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
+{
+ int qos_queues, nch, ntc, num_txqs, err;
+
+ qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc + qos_queues;
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+ num_txqs += ntc;
+
+ mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+ err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
+ if (err)
+ netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+
+ return err;
+}
+
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
- int num_txqs, num_rxqs, nch, ntc;
int old_num_txqs, old_ntc;
+ int num_rxqs, nch, ntc;
int err;
old_num_txqs = netdev->real_num_tx_queues;
@@ -2929,18 +3009,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
- num_txqs = nch * ntc;
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
- num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
- err = netif_set_real_num_tx_queues(netdev, num_txqs);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ err = mlx5e_update_tx_netdev_queues(priv);
+ if (err)
goto err_tcs;
- }
err = netif_set_real_num_rx_queues(netdev, num_rxqs);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
@@ -3044,6 +3119,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
@@ -3161,7 +3237,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
@@ -3185,6 +3262,7 @@ int mlx5e_open_locked(struct net_device *netdev)
priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
+ mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
@@ -3220,6 +3298,7 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ mlx5e_apply_traps(priv, false);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@@ -3609,32 +3688,88 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ /* MQPRIO is another toplevel qdisc that can't be attached
+ * simultaneously with the offloaded HTB.
+ */
+ if (WARN_ON(priv->htb.maj_id)) {
+ err = -EINVAL;
+ goto out;
+ }
+
new_channels.params = priv->channels.params;
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ if (err)
+ priv->channels.params = old_params;
+
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL);
- if (err)
- goto out;
- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
- new_channels.params.num_tc);
out:
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock);
return err;
}
+static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+ int res;
+
+ switch (htb->command) {
+ case TC_HTB_CREATE:
+ return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
+ htb->extack);
+ case TC_HTB_DESTROY:
+ return mlx5e_htb_root_del(priv);
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
+ htb->rate, htb->ceil, htb->extack);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
+ htb->rate, htb->ceil, htb->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
+ htb->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(priv, htb->classid,
+ htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
+ htb->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_get_txq_by_classid(priv, htb->classid);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
switch (type) {
case TC_SETUP_BLOCK: {
@@ -3648,6 +3783,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
+ case TC_SETUP_QDISC_HTB:
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_setup_tc_htb(priv, type_data);
+ mutex_unlock(&priv->state_lock);
+ return err;
default:
return -EOPNOTSUPP;
}
@@ -3756,21 +3896,21 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- struct mlx5e_params *old_params;
+ struct mlx5e_params *cur_params;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
err = -EINVAL;
goto out;
}
- old_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ cur_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
@@ -3778,18 +3918,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = *old_params;
+ new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
- *old_params = new_channels.params;
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
+ *cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ *cur_params = old_params;
goto out;
}
@@ -3812,20 +3957,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+#endif
+
+ if (!enable && priv->htb.maj_id) {
+ netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
return 0;
}
-#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@@ -3923,9 +4073,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
-#endif
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3958,6 +4106,7 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (!params->vlan_strip_disable)
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
+
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
@@ -4005,7 +4154,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
max_mtu = min(max_mtu_frame, max_mtu_page);
- netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
+ netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
@@ -4066,9 +4215,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (!reset) {
+ unsigned int old_mtu = params->sw_mtu;
+
params->sw_mtu = new_mtu;
- if (preactivate)
- preactivate(priv, NULL);
+ if (preactivate) {
+ err = preactivate(priv, NULL);
+ if (err) {
+ params->sw_mtu = old_mtu;
+ goto out;
+ }
+ }
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -4375,10 +4531,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
-#ifdef CONFIG_MLX5_EN_IPSEC
if (mlx5e_ipsec_feature_check(skb, netdev, features))
return features;
-#endif
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4437,8 +4591,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (MLX5_IPSEC_DEV(priv->mdev)) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
+ netdev_warn(netdev,
+ "XDP is not available on Innova cards with IPsec support\n");
return -EINVAL;
}
@@ -4621,8 +4776,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
@@ -4790,15 +4943,15 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
-void mlx5e_build_nic_params(struct mlx5e_priv *priv,
- struct mlx5e_xsk *xsk,
- struct mlx5e_rss_params *rss_params,
- struct mlx5e_params *params,
- u16 mtu)
+void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
+ struct mlx5e_rss_params *rss_params = &priv->rss_params;
+ struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
+ priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
+
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
@@ -4856,6 +5009,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
/* AF_XDP */
params->xsk = xsk;
+
+ /* Do not update netdev->features directly in here
+ * on mlx5e_attach_netdev() we will call mlx5e_update_features()
+ * To update netdev->features please modify mlx5e_fix_features()
+ */
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4957,8 +5115,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- mlx5e_vxlan_set_netdev_info(priv);
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5008,31 +5164,27 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXFCS;
netdev->features = netdev->hw_features;
- if (!priv->channels.params.lro_en)
- netdev->features &= ~NETIF_F_LRO;
+ /* Defaults */
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
-
- if (!priv->channels.params.scatter_fcs_en)
- netdev->features &= ~NETIF_F_RXFCS;
-
- /* prefere CQE compression over rxhash */
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
- netdev->features &= ~NETIF_F_RXHASH;
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
+ if (mlx5_qos_is_supported(mdev))
+ netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -5083,33 +5235,28 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
int err;
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
-
- mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
- netdev->mtu);
+ mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
+ mlx5e_vxlan_set_netdev_info(priv);
mlx5e_timestamp_init(priv);
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
+
err = mlx5e_tls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- mlx5e_build_nic_netdev(netdev);
+
err = mlx5e_devlink_port_register(priv);
if (err)
mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+
mlx5e_health_create_reporters(priv);
return 0;
@@ -5121,7 +5268,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_devlink_port_unregister(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5250,6 +5396,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
+ mlx5e_enable_blocking_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
@@ -5287,6 +5434,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_disable_blocking_events(priv);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5317,27 +5470,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
};
/* mlx5e generic netdev management API (move to en_common.c) */
-
-/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
-int mlx5e_netdev_init(struct net_device *netdev,
- struct mlx5e_priv *priv,
- struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5e_priv_init(struct mlx5e_priv *priv,
+ struct net_device *netdev,
+ struct mlx5_core_dev *mdev)
{
+ memset(priv, 0, sizeof(*priv));
+
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
return -ENOMEM;
mutex_init(&priv->state_lock);
+ hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5347,9 +5496,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
if (!priv->wq)
goto err_free_cpumask;
- /* netdev init */
- netif_carrier_off(netdev);
-
return 0;
err_free_cpumask:
@@ -5358,38 +5504,39 @@ err_free_cpumask:
return -ENOMEM;
}
-void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
+void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{
+ int i;
+
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb.max_qos_sqs; i++)
+ kfree(priv->htb.qos_sq_stats[i]);
+ kvfree(priv->htb.qos_sq_stats);
}
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- int nch,
- void *ppriv)
+struct net_device *
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
- unsigned int ptp_txqs = 0;
int err;
- if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
- ptp_txqs = profile->max_tc;
-
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * profile->max_tc + ptp_txqs,
- nch * profile->rq_groups);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- err = profile->init(mdev, netdev, profile, ppriv);
+ err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
if (err) {
- mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
+ mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
}
+ netif_carrier_off(netdev);
+ dev_net_set(netdev, mlx5_core_net(mdev));
+
return netdev;
err_free_netdev:
@@ -5398,14 +5545,23 @@ err_free_netdev:
return NULL;
}
+static void mlx5e_update_features(struct net_device *netdev)
+{
+ if (netdev->reg_state != NETREG_REGISTERED)
+ return; /* features will be updated on netdev registration */
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
- const struct mlx5e_profile *profile;
+ const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
- profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
@@ -5445,6 +5601,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (profile->enable)
profile->enable(priv);
+ mlx5e_update_features(priv->netdev);
+
return 0;
err_cleanup_tx:
@@ -5471,13 +5629,76 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
cancel_work_sync(&priv->update_stats_work);
}
+static int
+mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5e_priv_init(priv, netdev, mdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
+ return err;
+ }
+ netif_carrier_off(netdev);
+ priv->profile = new_profile;
+ priv->ppriv = new_ppriv;
+ err = new_profile->init(priv->mdev, priv->netdev);
+ if (err)
+ return err;
+ err = mlx5e_attach_netdev(priv);
+ if (err)
+ new_profile->cleanup(priv);
+ return err;
+}
+
+int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
+ const struct mlx5e_profile *orig_profile = priv->profile;
+ void *orig_ppriv = priv->ppriv;
+ int err, rollback_err;
+
+ /* sanity */
+ if (new_max_nch != priv->max_nch) {
+ netdev_warn(priv->netdev,
+ "%s: Replacing profile with different max channels\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* cleanup old profile */
+ mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
+
+ err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
+ if (err) { /* roll back to original profile */
+ netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
+ __func__, err);
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
+ if (rollback_err) {
+ netdev_err(priv->netdev,
+ "%s: failed to rollback to orig profile, %d\n",
+ __func__, rollback_err);
+ }
+ return err;
+}
+
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
{
- const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- if (profile->cleanup)
- profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
free_netdev(netdev);
}
@@ -5523,28 +5744,48 @@ static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
- void *priv;
+ unsigned int txqs, rxqs, ptp_txqs = 0;
+ struct mlx5e_priv *priv;
+ int qos_sqs = 0;
int err;
int nch;
+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ ptp_txqs = profile->max_tc;
+
+ if (mlx5_qos_is_supported(mdev))
+ qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
+
nch = mlx5e_get_max_num_channels(mdev);
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
+ txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
+ rxqs = nch * profile->rq_groups;
+ netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
}
- dev_net_set(netdev, mlx5_core_net(mdev));
+ mlx5e_build_nic_netdev(netdev);
+
priv = netdev_priv(netdev);
dev_set_drvdata(&adev->dev, priv);
+ priv->profile = profile;
+ priv->ppriv = NULL;
+ err = profile->init(mdev, netdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
err = mlx5e_resume(adev);
if (err) {
mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
- goto err_destroy_netdev;
+ goto err_profile_cleanup;
}
err = register_netdev(netdev);
@@ -5560,6 +5801,8 @@ static int mlx5e_probe(struct auxiliary_device *adev,
err_resume:
mlx5e_suspend(adev, state);
+err_profile_cleanup:
+ profile->cleanup(priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
return err;
@@ -5573,6 +5816,7 @@ static void mlx5e_remove(struct auxiliary_device *adev)
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
+ priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 989c70c1eda3..a132fff7a980 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <generated/utsrelease.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
@@ -653,8 +652,6 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
@@ -686,7 +683,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params = &priv->channels.params;
+
+ params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = netdev->mtu;
@@ -712,20 +712,16 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
-static void mlx5e_build_rep_netdev(struct net_device *netdev)
+static void mlx5e_build_rep_netdev(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_core_dev *mdev = priv->mdev;
-
SET_NETDEV_DEV(netdev, mdev->device);
if (rep->vport == MLX5_VPORT_UPLINK) {
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
- mlx5e_vxlan_set_netdev_info(priv);
mlx5e_dcbnl_build_rep_netdev(netdev);
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
@@ -737,7 +733,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_NETNS_LOCAL;
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
@@ -755,30 +753,27 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err;
-
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
-
- priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
mlx5e_build_rep_params(netdev);
- mlx5e_build_rep_netdev(netdev);
-
mlx5e_timestamp_init(priv);
return 0;
}
+static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_vxlan_set_netdev_info(priv);
+ return mlx5e_init_rep(mdev, netdev);
+}
+
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
- mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1055,7 +1050,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
mlx5e_set_netdev_mtu_boundaries(priv);
+ mlx5e_rep_neigh_init(rpriv);
+}
+
+static void mlx5e_rep_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ mlx5e_rep_neigh_cleanup(rpriv);
}
static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
@@ -1090,6 +1095,7 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
@@ -1108,12 +1114,15 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
+ mlx5e_rep_neigh_init(rpriv);
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
+ mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
@@ -1165,6 +1174,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
+ .disable = mlx5e_rep_disable,
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
@@ -1175,7 +1185,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
- .init = mlx5e_init_rep,
+ .init = mlx5e_init_ul_rep,
.cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_ul_rep_rx,
.cleanup_rx = mlx5e_cleanup_ul_rep_rx,
@@ -1201,6 +1211,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
struct devlink_port *dl_port;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ unsigned int txqs, rxqs;
int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
@@ -1210,10 +1222,13 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
/* rpriv->rep to be looked up when profile->init() is called */
rpriv->rep = rep;
- nch = mlx5e_get_max_num_channels(dev);
profile = (rep->vport == MLX5_VPORT_UPLINK) ?
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
- netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
+
+ nch = mlx5e_get_max_num_channels(dev);
+ txqs = nch * profile->max_tc;
+ rxqs = nch * profile->rq_groups;
+ netdev = mlx5e_create_netdev(dev, txqs, rxqs);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
@@ -1222,7 +1237,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- dev_net_set(netdev, mlx5_core_net(dev));
+ mlx5e_build_rep_netdev(netdev, dev, rep);
+
rpriv->netdev = netdev;
rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
@@ -1233,20 +1249,21 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_destroy_netdev;
}
- err = mlx5e_attach_netdev(netdev_priv(netdev));
+ priv = netdev_priv(netdev);
+ priv->profile = profile;
+ priv->ppriv = rpriv;
+ err = profile->init(dev, netdev);
if (err) {
- netdev_warn(netdev,
- "Failed to attach representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev, "rep profile init failed, %d\n", err);
goto err_destroy_mdev_resources;
}
- err = mlx5e_rep_neigh_init(rpriv);
+ err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
netdev_warn(netdev,
- "Failed to initialized neighbours handling for vport %d\n",
+ "Failed to attach representor netdev for vport %d\n",
rep->vport);
- goto err_detach_netdev;
+ goto err_cleanup_profile;
}
err = register_netdev(netdev);
@@ -1254,7 +1271,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_detach_netdev;
}
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
@@ -1262,12 +1279,12 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
devlink_port_type_eth_set(dl_port, netdev);
return 0;
-err_neigh_cleanup:
- mlx5e_rep_neigh_cleanup(rpriv);
-
err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
+err_cleanup_profile:
+ priv->profile->cleanup(priv);
+
err_destroy_mdev_resources:
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(dev);
@@ -1292,8 +1309,8 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
if (dl_port)
devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
- mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 988195ab1c54..d1696404cca9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -59,6 +59,8 @@ struct mlx5e_neigh_update_table {
struct mlx5_tc_ct_priv;
struct mlx5e_rep_bond;
+struct mlx5e_tc_tun_encap;
+
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -90,6 +92,9 @@ struct mlx5_rep_uplink_priv {
/* support eswitch vports bonding */
struct mlx5e_rep_bond *bond;
+
+ /* tc tunneling encapsulation private data */
+ struct mlx5e_tc_tun_encap *encap;
};
struct mlx5e_rep_priv {
@@ -110,7 +115,6 @@ struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
}
struct mlx5e_neigh {
- struct net_device *dev;
union {
__be32 v4;
struct in6_addr v6;
@@ -122,6 +126,7 @@ struct mlx5e_neigh_hash_entry {
struct rhash_head rhash_node;
struct mlx5e_neigh m_neigh;
struct mlx5e_priv *priv;
+ struct net_device *neigh_dev;
/* Save the neigh hash entry in a list on the representor in
* addition to the hash table. In order to iterate easily over the
@@ -153,6 +158,7 @@ enum {
/* set when the encap entry is successfully offloaded into HW */
MLX5_ENCAP_ENTRY_VALID = BIT(0),
MLX5_REFORMAT_DECAP = BIT(1),
+ MLX5_ENCAP_ENTRY_NO_ROUTE = BIT(2),
};
struct mlx5e_decap_key {
@@ -175,12 +181,12 @@ struct mlx5e_encap_entry {
struct mlx5e_neigh_hash_entry *nhe;
/* neigh hash entry list of encaps sharing the same neigh */
struct list_head encap_list;
- struct mlx5e_neigh m_neigh;
/* a node of the eswitch encap hash table which keeping all the encap
* entries
*/
struct hlist_node encap_hlist;
struct list_head flows;
+ struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7f5851c61218..1b6ad94ebb10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -47,11 +47,11 @@
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
-#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
+#include "devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -212,11 +212,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
-static inline bool mlx5e_page_is_reserved(struct page *page)
-{
- return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
-}
-
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
@@ -229,7 +224,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
+ if (!dev_page_is_reusable(dma_info->page)) {
stats->cache_waive++;
return false;
}
@@ -1066,9 +1061,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
}
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
- skb_hwtstamps(skb)->hwtstamp =
- mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
-
+ skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
+ rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1126,12 +1120,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
- xdp->data_hard_start = va;
- xdp->data = va + headroom;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &rq->xdp_rxq;
- xdp->frame_sz = rq->buff.frame0_sz;
+ xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, va, headroom, len, false);
}
static struct sk_buff *
@@ -1262,8 +1252,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1316,8 +1308,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1371,8 +1365,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1528,8 +1524,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1667,9 +1665,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
}
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- skb_hwtstamps(skb)->hwtstamp =
- mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
-
+ skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
+ rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1786,12 +1783,10 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev)) {
- netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
+ if (mlx5_fpga_is_ipsec_device(mdev)) {
+ netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
-#endif
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
@@ -1821,3 +1816,48 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
return 0;
}
+
+static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_priv *priv = netdev_priv(rq->netdev);
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct mlx5e_wqe_frag_info *wi;
+ struct sk_buff *skb;
+ u32 cqe_bcnt;
+ u16 trap_id;
+ u16 ci;
+
+ trap_id = get_cqe_flow_tag(cqe);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+ if (!skb)
+ goto free_wqe;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ skb_push(skb, ETH_HLEN);
+
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
+ dev_kfree_skb_any(skb);
+
+free_wqe:
+ mlx5e_free_rx_wqe(rq, wi, false);
+ mlx5_wq_cyc_pop(wq);
+}
+
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
+{
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_wqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+ rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2cf2042b37c7..92c5b81427b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -420,6 +420,25 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
}
}
+static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
+ struct mlx5e_sw_stats *s)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (i = 0; i < max_qos_sqs; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -449,6 +468,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
}
}
mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
+ mlx5e_stats_grp_sw_update_stats_qos(priv, s);
}
static const struct counter_desc q_stats_desc[] = {
@@ -1740,6 +1760,41 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
+static const struct counter_desc qos_sq_stats_desc[] = {
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+#endif
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
@@ -1750,6 +1805,49 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ int i, qid;
+
+ for (qid = 0; qid < max_qos_sqs; qid++)
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ qos_sq_stats_desc[i].format, qid);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i, qid;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (qid = 0; qid < max_qos_sqs; qid++) {
+ struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
+
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
@@ -1932,6 +2030,7 @@ MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
@@ -1955,6 +2054,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
+ &MLX5E_STATS_GRP(qos),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e41fc11f2ce7..93c41312fb03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -55,6 +55,8 @@
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
+
struct counter_desc {
char format[ETH_GSTRING_LEN];
size_t offset; /* Byte offset */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4cdf834fa74a..0da69b98f38f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -63,97 +63,16 @@
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
+#include "en/tc_priv.h"
+#include "en/tc_tun_encap.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
-
-enum {
- MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
- MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
- MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
- MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
- MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
- MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
- MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
- MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
- MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
- MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
- MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
-};
-
-#define MLX5E_TC_MAX_SPLITS 1
-
-/* Helper struct for accessing a struct containing list_head array.
- * Containing struct
- * |- Helper array
- * [0] Helper item 0
- * |- list_head item 0
- * |- index (0)
- * [1] Helper item 1
- * |- list_head item 1
- * |- index (1)
- * To access the containing struct from one of the list_head items:
- * 1. Get the helper item from the list_head item using
- * helper item =
- * container_of(list_head item, helper struct type, list_head field)
- * 2. Get the contining struct from the helper item and its index in the array:
- * containing struct =
- * container_of(helper item, containing struct type, helper field[index])
- */
-struct encap_flow_item {
- struct mlx5e_encap_entry *e; /* attached encap instance */
- struct list_head list;
- int index;
-};
-
-struct mlx5e_tc_flow {
- struct rhash_head node;
- struct mlx5e_priv *priv;
- u64 cookie;
- unsigned long flags;
- struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
-
- /* flows sharing the same reformat object - currently mpls decap */
- struct list_head l3_to_l2_reformat;
- struct mlx5e_decap_entry *decap_reformat;
-
- /* Flow can be associated with multiple encap IDs.
- * The number of encaps is bounded by the number of supported
- * destinations.
- */
- struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5e_tc_flow *peer_flow;
- struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
- struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
- struct list_head hairpin; /* flows sharing the same hairpin */
- struct list_head peer; /* flows with peer flow */
- struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
- struct net_device *orig_dev; /* netdev adding flow first */
- int tmp_efi_index;
- struct list_head tmp_list; /* temporary flow list used by neigh update */
- refcount_t refcnt;
- struct rcu_head rcu_head;
- struct completion init_done;
- int tunnel_id; /* the mapped tunnel id of this flow */
- struct mlx5_flow_attr *attr;
-};
-
-struct mlx5e_tc_flow_parse_attr {
- const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
- struct net_device *filter_dev;
- struct mlx5_flow_spec spec;
- struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
- int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
- struct ethhdr eth;
-};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -164,10 +83,15 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.moffset = 0,
.mlen = 2,
},
+ [VPORT_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
+ .moffset = 2,
+ .mlen = 2,
+ },
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
.moffset = 1,
- .mlen = 3,
+ .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
@@ -189,6 +113,14 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
+/* To avoid false lock dependency warning set the tc_ht lock
+ * class different than the lock class of the ht being used when deleting
+ * last flow from a group and then deleting a group, we get into del_sw_flow_group()
+ * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
+ * it's different than the ht->mutex here.
+ */
+static struct lock_class_key tc_ht_lock_key;
+
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
void
@@ -238,11 +170,11 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
}
int
-mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
- enum mlx5_flow_namespace_type ns,
- enum mlx5e_tc_attr_to_reg type,
- u32 data)
+mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
{
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
@@ -266,9 +198,10 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
MLX5_SET(set_action_in, modact, offset, moffset * 8);
MLX5_SET(set_action_in, modact, length, mlen * 8);
MLX5_SET(set_action_in, modact, data, data);
+ err = mod_hdr_acts->num_actions;
mod_hdr_acts->num_actions++;
- return 0;
+ return err;
}
static struct mlx5_tc_ct_priv *
@@ -317,6 +250,41 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+int
+mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
+{
+ int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
+
+ return ret < 0 ? ret : 0;
+}
+
+void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ int act_id, u32 data)
+{
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
+ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
+ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ char *modact;
+
+ modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
+
+ /* Firmware has 5bit length field and 0 means 32bits */
+ if (mlen == 4)
+ mlen = 0;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field, mfield);
+ MLX5_SET(set_action_in, modact, offset, moffset * 8);
+ MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, data, data);
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -354,15 +322,14 @@ struct mlx5e_hairpin_entry {
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
-static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
+struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
{
if (!flow || !refcount_inc_not_zero(&flow->refcnt))
return ERR_PTR(-EINVAL);
return flow;
}
-static void mlx5e_flow_put(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
if (refcount_dec_and_test(&flow->refcnt)) {
mlx5e_tc_del_flow(priv, flow);
@@ -370,48 +337,6 @@ static void mlx5e_flow_put(struct mlx5e_priv *priv,
}
}
-static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- /* Complete all memory stores before setting bit. */
- smp_mb__before_atomic();
- set_bit(flag, &flow->flags);
-}
-
-#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
-
-static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
- unsigned long flag)
-{
- /* test_and_set_bit() provides all necessary barriers */
- return test_and_set_bit(flag, &flow->flags);
-}
-
-#define flow_flag_test_and_set(flow, flag) \
- __flow_flag_test_and_set(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
-static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- /* Complete all memory stores before clearing bit. */
- smp_mb__before_atomic();
- clear_bit(flag, &flow->flags);
-}
-
-#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
-static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- bool ret = test_bit(flag, &flow->flags);
-
- /* Read fields of flow structure only after checking flags. */
- smp_mb__after_atomic();
- return ret;
-}
-
-#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
@@ -422,7 +347,7 @@ static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, FT);
}
-static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
}
@@ -1137,23 +1062,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
kfree(flow->attr);
}
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index);
-
-static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *mirred_dev,
- int out_index,
- struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid);
-static int mlx5e_attach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack);
-static void mlx5e_detach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
-
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -1162,6 +1071,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
@@ -1185,13 +1097,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
return rule;
}
-static void
-mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
+void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ goto offload_rule_0;
+
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
@@ -1200,10 +1114,11 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
@@ -1229,9 +1144,8 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
return rule;
}
-static void
-mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow)
+void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *slow_attr;
@@ -1299,6 +1213,63 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
+
+bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
+{
+ struct mlx5_core_dev *out_mdev, *route_mdev;
+ struct mlx5e_priv *out_priv, *route_priv;
+
+ out_priv = netdev_priv(out_dev);
+ out_mdev = out_priv->mdev;
+ route_priv = netdev_priv(route_dev);
+ route_mdev = route_priv->mdev;
+
+ if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
+ route_mdev->coredev_type != MLX5_COREDEV_VF)
+ return false;
+
+ return same_hw_devs(out_priv, route_priv);
+}
+
+int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
+{
+ struct mlx5e_priv *out_priv, *route_priv;
+ struct mlx5_core_dev *route_mdev;
+ struct mlx5_eswitch *esw;
+ u16 vhca_id;
+ int err;
+
+ out_priv = netdev_priv(out_dev);
+ esw = out_priv->mdev->priv.eswitch;
+ route_priv = netdev_priv(route_dev);
+ route_mdev = route_priv->mdev;
+
+ vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
+ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ return err;
+}
+
+int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
+ struct mlx5_modify_hdr *mod_hdr;
+
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev,
+ get_flow_name_space(flow),
+ mod_hdr_acts->num_actions,
+ mod_hdr_acts->actions);
+ if (IS_ERR(mod_hdr))
+ return PTR_ERR(mod_hdr);
+
+ WARN_ON(flow->attr->modify_hdr);
+ flow->attr->modify_hdr = mod_hdr;
+
+ return 0;
+}
+
static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
@@ -1308,21 +1279,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
+ bool vf_tun = false, encap_valid = true;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
- bool encap_valid = true;
u32 max_prio, max_chain;
int err = 0;
int out_index;
- if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
- NL_SET_ERR_MSG_MOD(extack,
- "E-switch priorities unsupported, upgrade FW");
- return -EOPNOTSUPP;
- }
-
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
@@ -1332,20 +1297,28 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ if (flow_flag_test(flow, TUN_RX)) {
+ err = mlx5e_attach_decap_route(priv, flow);
+ if (err)
+ goto err_out;
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
err = mlx5e_attach_decap(priv, flow, extack);
if (err)
- return err;
+ goto err_out;
}
parse_attr = attr->parse_attr;
@@ -1363,8 +1336,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
- return err;
+ goto err_out;
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ vf_tun = true;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[out_index].rep = rpriv->rep;
@@ -1373,20 +1349,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
- return err;
+ goto err_out;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (err)
- return err;
+ if (vf_tun) {
+ err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ if (err)
+ goto err_out;
+ } else {
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
+ if (err)
+ goto err_out;
+ }
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw_attr->counter_dev, true);
- if (IS_ERR(counter))
- return PTR_ERR(counter);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_out;
+ }
attr->counter = counter;
}
@@ -1400,12 +1383,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
- if (IS_ERR(flow->rule[0]))
- return PTR_ERR(flow->rule[0]);
- else
- flow_flag_set(flow, OFFLOADED);
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
+ goto err_out;
+ }
+ flow_flag_set(flow, OFFLOADED);
return 0;
+
+err_out:
+ flow_flag_set(flow, FAILED);
+ return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
@@ -1426,8 +1414,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ bool vf_tun = false;
int out_index;
+ esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
if (flow_flag_test(flow, NOT_READY))
@@ -1445,20 +1436,33 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ if (flow->decap_route)
+ mlx5e_detach_decap_route(priv, flow);
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ vf_tun = true;
+ if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
- kvfree(attr->parse_attr);
+ }
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
+ if (vf_tun && attr->modify_hdr)
+ mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+ else
+ mlx5e_detach_mod_hdr(priv, flow);
+ }
+ kvfree(attr->parse_attr);
+ kvfree(attr->esw_attr->rx_tun_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
+ mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@@ -1466,141 +1470,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
kfree(flow->attr);
}
-void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct list_head *flow_list)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_attr *attr;
- struct mlx5_flow_spec *spec;
- struct mlx5e_tc_flow *flow;
- int err;
-
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
- MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
- return;
- }
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(priv);
-
- list_for_each_entry(flow, flow_list, tmp_list) {
- bool all_flow_encaps_valid = true;
- int i;
-
- if (!mlx5e_is_offloaded_flow(flow))
- continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
-
- esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
- esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- /* Flow can be associated with multiple encap entries.
- * Before offloading the flow verify that all of them have
- * a valid neighbour.
- */
- for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
- continue;
- if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
- all_flow_encaps_valid = false;
- break;
- }
- }
- /* Do not offload flows with unresolved neighbors */
- if (!all_flow_encaps_valid)
- continue;
- /* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
- err);
- continue;
- }
-
- mlx5e_tc_unoffload_from_slow_path(esw, flow);
- flow->rule[0] = rule;
- /* was unset when slow path rule removed */
- flow_flag_set(flow, OFFLOADED);
- }
-}
-
-void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct list_head *flow_list)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_attr *attr;
- struct mlx5_flow_spec *spec;
- struct mlx5e_tc_flow *flow;
- int err;
-
- list_for_each_entry(flow, flow_list, tmp_list) {
- if (!mlx5e_is_offloaded_flow(flow))
- continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
-
- /* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
- /* mark the flow's encap dest as non-valid */
- esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
-
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
- err);
- continue;
- }
-
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
- flow->rule[0] = rule;
- /* was unset when fast path rule removed */
- flow_flag_set(flow, OFFLOADED);
- }
-
- /* we know that the encap is valid */
- e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
-}
-
-static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
return flow->attr->counter;
}
-/* Takes reference to all flows attached to encap and adds the flows to
- * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
- */
-void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
-{
- struct encap_flow_item *efi;
- struct mlx5e_tc_flow *flow;
-
- list_for_each_entry(efi, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
- if (IS_ERR(mlx5e_flow_get(flow)))
- continue;
- wait_for_completion(&flow->init_done);
-
- flow->tmp_efi_index = efi->index;
- list_add(&flow->tmp_list, flow_list);
- }
-}
-
/* Iterate over tmp_list of flows attached to flow_list head. */
-void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
+void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
{
struct mlx5e_tc_flow *flow, *tmp;
@@ -1608,222 +1484,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l
mlx5e_flow_put(priv, flow);
}
-static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_encap_entry *next = NULL;
-
-retry:
- rcu_read_lock();
-
- /* find encap with non-zero reference counter value */
- for (next = e ?
- list_next_or_null_rcu(&nhe->encap_list,
- &e->encap_list,
- struct mlx5e_encap_entry,
- encap_list) :
- list_first_or_null_rcu(&nhe->encap_list,
- struct mlx5e_encap_entry,
- encap_list);
- next;
- next = list_next_or_null_rcu(&nhe->encap_list,
- &next->encap_list,
- struct mlx5e_encap_entry,
- encap_list))
- if (mlx5e_encap_take(next))
- break;
-
- rcu_read_unlock();
-
- /* release starting encap */
- if (e)
- mlx5e_encap_put(netdev_priv(e->out_dev), e);
- if (!next)
- return next;
-
- /* wait for encap to be fully initialized */
- wait_for_completion(&next->res_ready);
- /* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
- e = next;
- goto retry;
- }
-
- return next;
-}
-
-void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
- struct mlx5e_encap_entry *e = NULL;
- struct mlx5e_tc_flow *flow;
- struct mlx5_fc *counter;
- struct neigh_table *tbl;
- bool neigh_used = false;
- struct neighbour *n;
- u64 lastuse;
-
- if (m_neigh->family == AF_INET)
- tbl = &arp_tbl;
-#if IS_ENABLED(CONFIG_IPV6)
- else if (m_neigh->family == AF_INET6)
- tbl = ipv6_stub->nd_tbl;
-#endif
- else
- return;
-
- /* mlx5e_get_next_valid_encap() releases previous encap before returning
- * next one.
- */
- while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
- struct mlx5e_priv *priv = netdev_priv(e->out_dev);
- struct encap_flow_item *efi, *tmp;
- struct mlx5_eswitch *esw;
- LIST_HEAD(flow_list);
-
- esw = priv->mdev->priv.eswitch;
- mutex_lock(&esw->offloads.encap_tbl_lock);
- list_for_each_entry_safe(efi, tmp, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow,
- encaps[efi->index]);
- if (IS_ERR(mlx5e_flow_get(flow)))
- continue;
- list_add(&flow->tmp_list, &flow_list);
-
- if (mlx5e_is_offloaded_flow(flow)) {
- counter = mlx5e_tc_get_counter(flow);
- lastuse = mlx5_fc_query_lastuse(counter);
- if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
- neigh_used = true;
- break;
- }
- }
- }
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_put_encap_flow_list(priv, &flow_list);
- if (neigh_used) {
- /* release current encap before breaking the loop */
- mlx5e_encap_put(priv, e);
- break;
- }
- }
-
- trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
-
- if (neigh_used) {
- nhe->reported_lastuse = jiffies;
-
- /* find the relevant neigh according to the cached device and
- * dst ip pair
- */
- n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
- if (!n)
- return;
-
- neigh_event_send(n, NULL);
- neigh_release(n);
- }
-}
-
-static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
-{
- WARN_ON(!list_empty(&e->flows));
-
- if (e->compl_result > 0) {
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-
- if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
- }
-
- kfree(e->tun_info);
- kfree(e->encap_header);
- kfree_rcu(e, rcu);
-}
-
-static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
- struct mlx5e_decap_entry *d)
-{
- WARN_ON(!list_empty(&d->flows));
-
- if (!d->compl_result)
- mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
-
- kfree_rcu(d, rcu);
-}
-
-void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
- return;
- hash_del_rcu(&e->encap_hlist);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_encap_dealloc(priv, e);
-}
-
-static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
- return;
- hash_del_rcu(&d->hlist);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- mlx5e_decap_dealloc(priv, d);
-}
-
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
-{
- struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- /* flow wasn't fully initialized */
- if (!e)
- return;
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- list_del(&flow->encaps[out_index].list);
- flow->encaps[out_index].e = NULL;
- if (!refcount_dec_and_test(&e->refcnt)) {
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- return;
- }
- hash_del_rcu(&e->encap_hlist);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_encap_dealloc(priv, e);
-}
-
-static void mlx5e_detach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_decap_entry *d = flow->decap_reformat;
-
- if (!d)
- return;
-
- mutex_lock(&esw->offloads.decap_tbl_lock);
- list_del(&flow->l3_to_l2_reformat);
- flow->decap_reformat = NULL;
-
- if (!refcount_dec_and_test(&d->refcnt)) {
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return;
- }
- hash_del_rcu(&d->hlist);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- mlx5e_decap_dealloc(priv, d);
-}
-
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
@@ -2081,6 +1741,29 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
}
}
+u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
+{
+ void *headers_v;
+ u16 ethertype;
+ u8 ip_version;
+
+ if (outer)
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ else
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
+
+ ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
+ /* Return ip_version converted from ethertype anyway */
+ if (!ip_version) {
+ ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+ if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
+ ip_version = 4;
+ else if (ethertype == ETH_P_IPV6)
+ ip_version = 6;
+ }
+ return ip_version;
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2089,6 +1772,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
u8 *match_level,
bool *match_inner)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
bool needs_mapping, sets_mapping;
@@ -2126,6 +1810,31 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
*/
if (!netif_is_bareudp(filter_dev))
flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ err = mlx5e_tc_set_attr_rx_tun(flow, spec);
+ if (err)
+ return err;
+ } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ struct mlx5_flow_spec *tmp_spec;
+
+ tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
+ if (!tmp_spec) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
+ netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
+ return -ENOMEM;
+ }
+ memcpy(tmp_spec, spec, sizeof(*tmp_spec));
+
+ err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
+ if (err) {
+ kvfree(tmp_spec);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
+ netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
+ return err;
+ }
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ kvfree(tmp_spec);
+ if (err)
+ return err;
}
if (!needs_mapping && !sets_mapping)
@@ -2269,8 +1978,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -3574,35 +3283,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-struct encap_key {
- const struct ip_tunnel_key *ip_tun_key;
- struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static inline int cmp_encap_info(struct encap_key *a,
- struct encap_key *b)
-{
- return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
-}
-
-static inline int cmp_decap_info(struct mlx5e_decap_key *a,
- struct mlx5e_decap_key *b)
-{
- return memcmp(&a->key, &b->key, sizeof(b->key));
-}
-
-static inline int hash_encap_info(struct encap_key *key)
-{
- return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
- key->tc_tunnel->tunnel_type);
-}
-
-static inline int hash_decap_info(struct mlx5e_decap_key *key)
-{
- return jhash(&key->key, sizeof(key->key), 0);
-}
-
static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
@@ -3616,277 +3296,6 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
same_hw_devs(priv, peer_priv));
}
-bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
-{
- return refcount_inc_not_zero(&e->refcnt);
-}
-
-static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
-{
- return refcount_inc_not_zero(&e->refcnt);
-}
-
-static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
- uintptr_t hash_key)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_encap_entry *e;
- struct encap_key e_key;
-
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
- encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info->key;
- e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, key) &&
- mlx5e_encap_take(e))
- return e;
- }
-
- return NULL;
-}
-
-static struct mlx5e_decap_entry *
-mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
- uintptr_t hash_key)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_decap_key r_key;
- struct mlx5e_decap_entry *e;
-
- hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
- hlist, hash_key) {
- r_key = e->key;
- if (!cmp_decap_info(&r_key, key) &&
- mlx5e_decap_take(e))
- return e;
- }
- return NULL;
-}
-
-static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
-{
- size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
-
- return kmemdup(tun_info, tun_size, GFP_KERNEL);
-}
-
-static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- int out_index,
- struct mlx5e_encap_entry *e,
- struct netlink_ext_ack *extack)
-{
- int i;
-
- for (i = 0; i < out_index; i++) {
- if (flow->encaps[i].e != e)
- continue;
- NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
- netdev_err(priv->netdev, "can't duplicate encap action\n");
- return true;
- }
-
- return false;
-}
-
-static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *mirred_dev,
- int out_index,
- struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
- const struct ip_tunnel_info *tun_info;
- struct encap_key key;
- struct mlx5e_encap_entry *e;
- unsigned short family;
- uintptr_t hash_key;
- int err = 0;
-
- parse_attr = attr->parse_attr;
- tun_info = parse_attr->tun_info[out_index];
- family = ip_tunnel_info_af(tun_info);
- key.ip_tun_key = &tun_info->key;
- key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
- if (!key.tc_tunnel) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
- return -EOPNOTSUPP;
- }
-
- hash_key = hash_encap_info(&key);
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- e = mlx5e_encap_get(priv, &key, hash_key);
-
- /* must verify if encap is valid or not */
- if (e) {
- /* Check that entry was not already attached to this flow */
- if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
- err = -EOPNOTSUPP;
- goto out_err;
- }
-
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- wait_for_completion(&e->res_ready);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- if (e->compl_result < 0) {
- err = -EREMOTEIO;
- goto out_err;
- }
- goto attach_flow;
- }
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e) {
- err = -ENOMEM;
- goto out_err;
- }
-
- refcount_set(&e->refcnt, 1);
- init_completion(&e->res_ready);
-
- tun_info = dup_tun_info(tun_info);
- if (!tun_info) {
- err = -ENOMEM;
- goto out_err_init;
- }
- e->tun_info = tun_info;
- err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err)
- goto out_err_init;
-
- INIT_LIST_HEAD(&e->flows);
- hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- if (family == AF_INET)
- err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
- else if (family == AF_INET6)
- err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- complete_all(&e->res_ready);
- if (err) {
- e->compl_result = err;
- goto out_err;
- }
- e->compl_result = 1;
-
-attach_flow:
- flow->encaps[out_index].e = e;
- list_add(&flow->encaps[out_index].list, &e->flows);
- flow->encaps[out_index].index = out_index;
- *encap_dev = e->out_dev;
- if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
- attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- *encap_valid = true;
- } else {
- *encap_valid = false;
- }
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- return err;
-
-out_err:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- if (e)
- mlx5e_encap_put(priv, e);
- return err;
-
-out_err_init:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- kfree(tun_info);
- kfree(e);
- return err;
-}
-
-static int mlx5e_attach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_decap_entry *d;
- struct mlx5e_decap_key key;
- uintptr_t hash_key;
- int err = 0;
-
- parse_attr = flow->attr->parse_attr;
- if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
- NL_SET_ERR_MSG_MOD(extack,
- "encap header larger than max supported");
- return -EOPNOTSUPP;
- }
-
- key.key = parse_attr->eth;
- hash_key = hash_decap_info(&key);
- mutex_lock(&esw->offloads.decap_tbl_lock);
- d = mlx5e_decap_get(priv, &key, hash_key);
- if (d) {
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- wait_for_completion(&d->res_ready);
- mutex_lock(&esw->offloads.decap_tbl_lock);
- if (d->compl_result) {
- err = -EREMOTEIO;
- goto out_free;
- }
- goto found;
- }
-
- d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d) {
- err = -ENOMEM;
- goto out_err;
- }
-
- d->key = key;
- refcount_set(&d->refcnt, 1);
- init_completion(&d->res_ready);
- INIT_LIST_HEAD(&d->flows);
- hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
- sizeof(parse_attr->eth),
- &parse_attr->eth,
- MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(d->pkt_reformat)) {
- err = PTR_ERR(d->pkt_reformat);
- d->compl_result = err;
- }
- mutex_lock(&esw->offloads.decap_tbl_lock);
- complete_all(&d->res_ready);
- if (err)
- goto out_free;
-
-found:
- flow->decap_reformat = d;
- attr->decap_pkt_reformat = d->pkt_reformat;
- list_add(&flow->l3_to_l2_reformat, &d->flows);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return 0;
-
-out_free:
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- mlx5e_decap_put(priv, d);
- return err;
-
-out_err:
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return err;
-}
-
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
@@ -4239,7 +3648,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (encap) {
parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
+ parse_attr->tun_info[esw_attr->out_count] =
+ mlx5e_dup_tun_info(info);
if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
@@ -4376,6 +3786,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
}
}
+ /* always set IP version for indirect table handling */
+ attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
@@ -4656,7 +4069,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4801,6 +4213,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0;
err_free:
+ flow_flag_set(flow, FAILED);
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
@@ -5007,13 +4420,13 @@ errout:
return err;
}
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
+ u32 rate_mbps = 0;
u16 vport_num;
- u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
@@ -5030,7 +4443,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+ rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
+ }
+
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
@@ -5209,6 +4626,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err)
return err;
+ lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
+
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
@@ -5316,7 +4735,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
+ /* 0xFFF is reserved for stack devices slow path table mark */
+ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5327,8 +4747,18 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
if (err)
goto err_ht_init;
- return err;
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ uplink_priv->encap = mlx5e_tc_tun_init(priv);
+ if (IS_ERR(uplink_priv->encap)) {
+ err = PTR_ERR(uplink_priv->encap);
+ goto err_register_fib_notifier;
+ }
+ return 0;
+
+err_register_fib_notifier:
+ rhashtable_destroy(tc_ht);
err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
@@ -5345,10 +4775,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
struct mlx5_rep_uplink_priv *uplink_priv;
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
-
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+ mlx5e_tc_tun_cleanup(uplink_priv->encap);
+
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
@@ -5448,7 +4879,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
- ZONE_RESTORE_MAX;
+ ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
zone_restore_id))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 4a2ce241522e..89003ae7775a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -37,6 +37,8 @@
#include "en.h"
#include "eswitch.h"
#include "en/tc_ct.h"
+#include "en/tc_tun.h"
+#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -76,6 +78,7 @@ struct mlx5_flow_attr {
struct mlx5_flow_table *dest_ft;
u8 inner_match_level;
u8 outer_match_level;
+ u8 ip_version;
u32 flags;
union {
struct mlx5_esw_flow_attr esw_attr[0];
@@ -83,6 +86,19 @@ struct mlx5_flow_attr {
};
};
+struct mlx5_rx_tun_attr {
+ u16 decap_vport;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } src_ip; /* Valid if decap_vport is not zero */
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip; /* Valid if decap_vport is not zero */
+ u32 vni;
+};
+
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
@@ -158,7 +174,7 @@ bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
-void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
+void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
@@ -167,6 +183,7 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
CHAIN_TO_REG,
+ VPORT_TO_REG,
TUNNEL_TO_REG,
CTSTATE_TO_REG,
ZONE_TO_REG,
@@ -197,6 +214,11 @@ int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
enum mlx5e_tc_attr_to_reg type,
u32 data);
+void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ int act_id, u32 data);
+
void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 data,
@@ -207,6 +229,16 @@ void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
u32 *data,
u32 *mask);
+int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data);
+
+int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow);
+
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
int namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
@@ -242,6 +274,10 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr);
+bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
+int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
+ u16 *vport);
+
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -283,7 +319,7 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
reg_b = be32_to_cpu(cqe->ft_metadata);
- if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
return false;
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..bdbffe484fce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,7 +39,6 @@
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
-#include "lib/clock.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -106,28 +105,53 @@ return_txq:
return priv->port_ptp_tc2realtxq[up];
}
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ u16 htb_maj_id)
+{
+ u16 classid;
+
+ if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int num_tc_x_num_ch;
int txq_ix;
int up = 0;
int ch_ix;
- if (unlikely(priv->channels.port_ptp)) {
- int num_tc_x_num_ch;
+ /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- mlx5e_use_ptpsq(skb))
- return mlx5e_select_ptpsq(dev, skb);
+ if (unlikely(htb_maj_id)) {
+ txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
+ if (txq_ix > 0)
+ return txq_ix;
+ }
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(priv->channels.port_ptp))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel txqs.
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
* If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq().
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
*/
if (unlikely(txq_ix >= num_tc_x_num_ch))
txq_ix %= num_tc_x_num_ch;
@@ -241,9 +265,8 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
-
} else
sq->stats->csum_none++;
}
@@ -682,9 +705,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -703,6 +726,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ if (unlikely(!sq)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
@@ -714,7 +741,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +759,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
@@ -773,7 +801,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct skb_shared_hwtstamps hwts = {};
u64 ts = get_cqe_ts(cqe);
- hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
+ hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq)
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a3cfe06d5116..d54da3797c30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -115,17 +115,21 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
+ struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
+ u16 qos_sqs_size;
bool xsk_open;
int i;
rcu_read_lock();
+ qos_sqs = rcu_dereference(c->qos_sqs);
+
xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
ch_stats->poll++;
@@ -133,6 +137,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ if (unlikely(qos_sqs)) {
+ smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */
+ qos_sqs_size = READ_ONCE(c->qos_sqs_size);
+
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq)
+ busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
+ }
+ }
+
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
@@ -186,6 +202,16 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
}
+ if (unlikely(qos_sqs)) {
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq) {
+ mlx5e_handle_tx_dim(sq);
+ mlx5e_cq_arm(&sq->cq);
+ }
+ }
+ }
mlx5e_handle_rx_dim(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc0afa03d407..174dfbc996c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -467,7 +467,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
- eq_table->irq_table = dev->priv.irq_table;
+ eq_table->irq_table = mlx5_irq_table_get(dev);
return 0;
}
@@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
+ if (MLX5_CAP_GEN_MAX(dev, vhca_state))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 4c74e2690d57..26b37a0f8762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -150,7 +150,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
{
- return mlx5_eswitch_is_vf_vport(esw, vport_num);
+ return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index ffff11baa3d0..cb1e181f4c6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -122,3 +122,44 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
vport = mlx5_eswitch_get_vport(esw, vport_num);
return vport->dl_port;
}
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct netdev_phys_item_id ppid = {};
+ unsigned int dl_port_index;
+ struct mlx5_vport *vport;
+ struct devlink *devlink;
+ u16 pfnum;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ pfnum = PCI_FUNC(dev->pdev->devfn);
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
+ devlink = priv_to_devlink(dev);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ err = devlink_port_register(devlink, dl_port, dl_port_index);
+ if (err)
+ return err;
+
+ vport->dl_port = dl_port;
+ return 0;
+}
+
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+ devlink_port_unregister(vport->dl_port);
+ vport->dl_port = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
new file mode 100644
index 000000000000..6f6772bf61a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/etherdevice.h>
+#include <linux/idr.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+#include "fs_core.h"
+#include "esw/indir_table.h"
+#include "lib/fs_chains.h"
+
+#define MLX5_ESW_INDIR_TABLE_SIZE 128
+#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
+
+struct mlx5_esw_indir_table_rule {
+ struct list_head list;
+ struct mlx5_flow_handle *handle;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip;
+ u32 vni;
+ struct mlx5_modify_hdr *mh;
+ refcount_t refcnt;
+};
+
+struct mlx5_esw_indir_table_entry {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *recirc_grp;
+ struct mlx5_flow_group *fwd_grp;
+ struct mlx5_flow_handle *fwd_rule;
+ struct list_head recirc_rules;
+ int recirc_cnt;
+ int fwd_ref;
+
+ u16 vport;
+ u8 ip_version;
+};
+
+struct mlx5_esw_indir_table {
+ struct mutex lock; /* protects table */
+ DECLARE_HASHTABLE(table, 8);
+};
+
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void)
+{
+ struct mlx5_esw_indir_table *indir = kvzalloc(sizeof(*indir), GFP_KERNEL);
+
+ if (!indir)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&indir->lock);
+ hash_init(indir->table);
+ return indir;
+}
+
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+{
+ mutex_destroy(&indir->lock);
+ kvfree(indir);
+}
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ /* Use indirect table for all IP traffic from UL to VF with vport
+ * destination when source rewrite flag is set.
+ */
+ return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
+ mlx5_eswitch_is_vf_vport(esw, vport_num) &&
+ esw->dev == dest_mdev &&
+ attr->ip_version &&
+ attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+}
+
+u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
+}
+
+static struct mlx5_esw_indir_table_rule *
+mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_esw_indir_table_rule *rule;
+
+ list_for_each_entry(rule, &e->recirc_rules, list)
+ if (rule->vni == attr->rx_tun_attr->vni &&
+ !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
+ sizeof(attr->rx_tun_attr->dst_ip)))
+ goto found;
+ return NULL;
+
+found:
+ refcount_inc(&rule->refcnt);
+ return rule;
+}
+
+static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_esw_indir_table_rule *rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ int err = 0;
+ u32 data;
+
+ rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
+ if (rule)
+ return 0;
+
+ if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
+ return -EINVAL;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return -ENOMEM;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS |
+ MLX5_MATCH_MISC_PARAMETERS_2;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ outer_headers.ip_version, 0xf);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
+ attr->ip_version);
+ } else if (attr->ip_version) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
+ (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_ethertype;
+ }
+
+ if (attr->ip_version == 4) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ MLX5_SET(fte_match_param, rule_spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
+ } else if (attr->ip_version == 6) {
+ int len = sizeof(struct in6_addr);
+
+ memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, len);
+ memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &esw_attr->rx_tun_attr->dst_ip.v6, len);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ misc_parameters.vxlan_vni);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
+ MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
+ MLX5_VPORT_UPLINK));
+
+ /* Modify flow source to recirculate packet */
+ data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ VPORT_TO_REG, data);
+ if (err)
+ goto err_mod_hdr_regc0;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ TUNNEL_TO_REG, ESW_TUN_SLOW_TABLE_GOTO_VPORT);
+ if (err)
+ goto err_mod_hdr_regc1;
+
+ flow_act.modify_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions, mod_acts.actions);
+ if (IS_ERR(flow_act.modify_hdr)) {
+ err = PTR_ERR(flow_act.modify_hdr);
+ goto err_mod_hdr_alloc;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(dest.ft)) {
+ err = PTR_ERR(dest.ft);
+ goto err_table;
+ }
+ handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_handle;
+ }
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ rule->handle = handle;
+ rule->vni = esw_attr->rx_tun_attr->vni;
+ rule->mh = flow_act.modify_hdr;
+ memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+ sizeof(esw_attr->rx_tun_attr->dst_ip));
+ refcount_set(&rule->refcnt, 1);
+ list_add(&rule->list, &e->recirc_rules);
+ e->recirc_cnt++;
+ goto out;
+
+err_handle:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+err_table:
+ mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
+err_mod_hdr_alloc:
+err_mod_hdr_regc1:
+ dealloc_mod_hdr_actions(&mod_acts);
+err_mod_hdr_regc0:
+err_ethertype:
+ kfree(rule);
+out:
+ kfree(rule_spec);
+ return err;
+}
+
+static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5_esw_indir_table_rule *rule;
+
+ list_for_each_entry(rule, &e->recirc_rules, list)
+ if (rule->vni == esw_attr->rx_tun_attr->vni &&
+ !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+ sizeof(esw_attr->rx_tun_attr->dst_ip)))
+ goto found;
+
+ return;
+
+found:
+ if (!refcount_dec_and_test(&rule->refcnt))
+ return;
+
+ mlx5_del_flow_rules(rule->handle);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_modify_header_dealloc(esw->dev, rule->mh);
+ list_del(&rule->list);
+ kfree(rule);
+ e->recirc_cnt--;
+}
+
+static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
+ MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
+
+ if (attr->ip_version == 4) {
+ MLX5_SET_TO_ONES(fte_match_param, match,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else if (attr->ip_version == 6) {
+ memset(MLX5_ADDR_OF(fte_match_param, match,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, sizeof(struct in6_addr));
+ } else {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
+ e->recirc_grp = mlx5_create_flow_group(e->ft, in);
+ if (IS_ERR(e->recirc_grp)) {
+ err = PTR_ERR(e->recirc_grp);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&e->recirc_rules);
+ e->recirc_cnt = 0;
+
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ kfree(in);
+ return -ENOMEM;
+ }
+
+ /* Hold one entry */
+ MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
+ e->fwd_grp = mlx5_create_flow_group(e->ft, in);
+ if (IS_ERR(e->fwd_grp)) {
+ err = PTR_ERR(e->fwd_grp);
+ goto err_out;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = e->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(e->fwd_rule)) {
+ mlx5_destroy_flow_group(e->fwd_grp);
+ err = PTR_ERR(e->fwd_rule);
+ }
+
+err_out:
+ kfree(spec);
+ kfree(in);
+ return err;
+}
+
+static struct mlx5_esw_indir_table_entry *
+mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec, u16 vport, bool decap)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_esw_indir_table_entry *e;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ root_ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns)
+ return ERR_PTR(-ENOENT);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.max_fte = MLX5_ESW_INDIR_TABLE_SIZE;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.level = 1;
+
+ ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto tbl_err;
+ }
+ e->ft = ft;
+ e->vport = vport;
+ e->ip_version = attr->ip_version;
+ e->fwd_ref = !decap;
+
+ err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
+ if (err)
+ goto recirc_grp_err;
+
+ if (decap) {
+ err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ if (err)
+ goto recirc_rule_err;
+ }
+
+ err = mlx5_create_indir_fwd_group(esw, e);
+ if (err)
+ goto fwd_grp_err;
+
+ hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
+ vport << 16 | attr->ip_version);
+
+ return e;
+
+fwd_grp_err:
+ if (decap)
+ mlx5_esw_indir_table_rule_put(esw, attr, e);
+recirc_rule_err:
+ mlx5_destroy_flow_group(e->recirc_grp);
+recirc_grp_err:
+ mlx5_destroy_flow_table(e->ft);
+tbl_err:
+ kfree(e);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_esw_indir_table_entry *
+mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
+{
+ struct mlx5_esw_indir_table_entry *e;
+ u32 key = vport << 16 | ip_version;
+
+ hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
+ if (e->vport == vport && e->ip_version == ip_version)
+ return e;
+
+ return NULL;
+}
+
+struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+{
+ struct mlx5_esw_indir_table_entry *e;
+ int err;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ if (e) {
+ if (!decap) {
+ e->fwd_ref++;
+ } else {
+ err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ if (err)
+ goto out_err;
+ }
+ } else {
+ e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
+ if (IS_ERR(e)) {
+ err = PTR_ERR(e);
+ esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
+ goto out_err;
+ }
+ }
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+ return e->ft;
+
+out_err:
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+ return ERR_PTR(err);
+}
+
+void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+{
+ struct mlx5_esw_indir_table_entry *e;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ if (!e)
+ goto out;
+
+ if (!decap)
+ e->fwd_ref--;
+ else
+ mlx5_esw_indir_table_rule_put(esw, attr, e);
+
+ if (e->fwd_ref || e->recirc_cnt)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_group(e->recirc_grp);
+ mlx5_del_flow_rules(e->fwd_rule);
+ mlx5_destroy_flow_group(e->fwd_grp);
+ mlx5_destroy_flow_table(e->ft);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
new file mode 100644
index 000000000000..cb9eafd1b4ee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_ESW_FT_H__
+#define __MLX5_ESW_FT_H__
+
+#ifdef CONFIG_MLX5_CLS_ACT
+
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void);
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
+
+struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap);
+void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap);
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev);
+
+u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr);
+
+#else
+/* indir API stubs */
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void)
+{
+ return NULL;
+}
+
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+{
+}
+
+static inline struct mlx5_flow_table *
+mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+{
+}
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev)
+{
+ return false;
+}
+
+static inline u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+{
+ return 0;
+}
+#endif
+
+#endif /* __MLX5_ESW_FT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da901e364656..aba17835465b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1042,8 +1042,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
void *vport_elem;
int err = 0;
- if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
- !MLX5_CAP_QOS(dev, esw_scheduling))
+ if (!esw->qos.enabled)
return 0;
if (vport->qos.enabled)
@@ -1273,8 +1272,8 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
-static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
- enum mlx5_eswitch_vport_event enabled_events)
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int ret;
@@ -1301,6 +1300,13 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ if (ret)
+ goto err_vhca_mapping;
+ }
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1308,9 +1314,14 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
done:
mutex_unlock(&esw->state_lock);
return ret;
+
+err_vhca_mapping:
+ esw_vport_cleanup(esw, vport);
+ mutex_unlock(&esw->state_lock);
+ return ret;
}
-static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
@@ -1326,6 +1337,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1366,9 +1382,15 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ u16 max_sf_vports;
u32 *out;
int err;
+ max_sf_vports = mlx5_sf_max_functions(dev);
+ /* Device interface is array of 64-bits */
+ if (max_sf_vports)
+ outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
+
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1376,7 +1398,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (!err)
return out;
@@ -1426,7 +1448,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
{
int err;
- err = esw_enable_vport(esw, vport_num, enabled_events);
+ err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
if (err)
return err;
@@ -1437,14 +1459,14 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
return err;
}
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@@ -1594,6 +1616,15 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
kvfree(out);
}
+static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
+{
+ struct mlx5_esw_event_info info = {};
+
+ info.new_mode = mode;
+
+ blocking_notifier_call_chain(&esw->n_head, 0, &info);
+}
+
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
@@ -1654,6 +1685,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ mlx5_esw_mode_change_notify(esw, mode);
+
return 0;
abort:
@@ -1710,6 +1743,11 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+
mlx5_eswitch_event_handlers_unregister(esw);
if (esw->mode == MLX5_ESWITCH_LEGACY)
@@ -1794,6 +1832,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
atomic64_set(&esw->offloads.num_flows, 0);
ida_init(&esw->offloads.vport_metadata_ida);
+ xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock);
@@ -1810,6 +1849,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw;
+ BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
return 0;
abort:
if (esw->work_queue)
@@ -1832,6 +1872,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
+ WARN_ON(!xa_empty(&esw->offloads.vhca_map));
+ xa_destroy(&esw->offloads.vhca_map);
ida_destroy(&esw->offloads.vport_metadata_ida);
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
@@ -1899,7 +1941,8 @@ static bool
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num);
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
}
int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
@@ -2500,4 +2543,12 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&esw->n_head, nb);
+}
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&esw->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index cf87de94418f..fdf5c8c05c1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,6 +36,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/atomic.h>
+#include <linux/xarray.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
@@ -43,6 +44,7 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#include "lib/fs_chains.h"
+#include "sf/sf.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -159,6 +161,8 @@ struct mlx5_vport {
struct devlink_port *dl_port;
};
+struct mlx5_esw_indir_table;
+
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -175,9 +179,11 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *send_to_vport_meta_grp;
struct mlx5_flow_group *peer_miss_grp;
struct mlx5_flow_handle **peer_miss_rules;
struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
@@ -189,6 +195,8 @@ struct mlx5_eswitch_fdb {
struct mutex lock;
} vports;
+ struct mlx5_esw_indir_table *indir;
+
} offloads;
};
u32 flags;
@@ -211,6 +219,7 @@ struct mlx5_esw_offload {
struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
+ struct xarray vhca_map;
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
atomic64_t num_flows;
@@ -277,6 +286,7 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
+ struct blocking_notifier_head n_head;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -385,12 +395,14 @@ enum mlx5_flow_match_level {
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
};
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
};
struct mlx5_esw_flow_attr {
@@ -411,7 +423,9 @@ struct mlx5_esw_flow_attr {
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
+ int src_port_rewrite_act_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5_rx_tun_attr *rx_tun_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
};
@@ -499,6 +513,40 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
+static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
+{
+ /* PF and VF vports indices start from 0 to max_vfs */
+ return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
+}
+
+static inline int
+mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num - mlx5_sf_start_function_id(esw->dev) +
+ MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline u16
+mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
+{
+ return mlx5_sf_start_function_id(esw->dev) + idx -
+ (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
+}
+
+static inline bool
+mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_sf_supported(esw->dev) &&
+ vport_num >= mlx5_sf_start_function_id(esw->dev) &&
+ (vport_num < (mlx5_sf_start_function_id(esw->dev) +
+ mlx5_sf_max_functions(esw->dev)));
+}
+
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
@@ -527,6 +575,10 @@ static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
+
+ /* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
@@ -540,6 +592,12 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
+ /* SF vports indices are after VFs and before ECPF */
+ if (mlx5_sf_supported(esw->dev) &&
+ index > mlx5_core_max_vfs(esw->dev))
+ return mlx5_esw_sf_vport_index_to_num(esw, index);
+
+ /* PF and VF vports start from 0 to max_vfs */
return index;
}
@@ -625,6 +683,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
+#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
+ for ((i) = mlx5_esw_sf_start_idx(esw); \
+ (rep) = &(esw)->offloads.vport_reps[(i)], \
+ (i) < mlx5_esw_sf_end_idx(esw); (i++))
+
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -638,6 +701,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
@@ -656,6 +723,9 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -667,6 +737,30 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+
+/**
+ * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ *
+ * @new_mode: New mode of eswitch.
+ */
+struct mlx5_esw_event_info {
+ u16 new_mode;
+};
+
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2f6a0ae20650..94cb0217b4f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -38,7 +38,9 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
+#include "esw/indir_table.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -257,7 +259,9 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr,
+ struct mlx5_eswitch *src_esw,
+ u16 vport)
{
void *misc2;
void *misc;
@@ -266,10 +270,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
* VHCA in dual-port RoCE mode, and matching on source vport may fail.
*/
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ vport = mlx5_esw_indir_table_decap_vport(attr);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
- attr->in_rep->vport));
+ mlx5_eswitch_get_vport_metadata_for_match(src_esw,
+ vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
@@ -278,12 +284,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+ MLX5_CAP_GEN(src_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -295,6 +301,299 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
}
}
+static int
+esw_setup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5_flow_table *ft;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ mlx5_esw_indir_table_decap_vport(attr), true);
+ return PTR_ERR_OR_ZERO(ft);
+}
+
+static void
+esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_decap_vport(attr),
+ true);
+}
+
+static int
+esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ return esw_setup_decap_indir(esw, attr, spec);
+ return 0;
+}
+
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
+}
+
+static int
+esw_setup_chain_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level,
+ int i)
+{
+ struct mlx5_flow_table *ft;
+
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_chains_get_table(chains, chain, prio, level);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = ft;
+ return 0;
+}
+
+static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+ int from, int to)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int i;
+
+ for (i = from; i < to; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ false);
+}
+
+static bool
+esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ return true;
+ return false;
+}
+
+static int
+esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_fs_chains *chains,
+ struct mlx5_flow_attr *attr,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
+ if (err)
+ goto err_setup_chain;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+ }
+ return 0;
+
+err_setup_chain:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+}
+
+static bool
+esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ return true;
+ return false;
+}
+
+static int
+esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ bool ignore_flow_lvl,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ if (ignore_flow_lvl)
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ esw_attr->dests[j].rep->vport, false);
+ if (IS_ERR(dest[*i].ft)) {
+ err = PTR_ERR(dest[*i].ft);
+ goto err_indir_tbl_get;
+ }
+ }
+
+ if (mlx5_esw_indir_table_decap_vport(attr)) {
+ err = esw_setup_decap_indir(esw, attr, spec);
+ if (err)
+ goto err_indir_tbl_get;
+ }
+
+ return 0;
+
+err_indir_tbl_get:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+ esw_cleanup_decap_indir(esw, attr);
+}
+
+static void
+esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
+{
+ mlx5_chains_put_table(chains, chain, prio, level);
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+ if (pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static int
+esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int i)
+{
+ int j;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
+ esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
+ return i;
+}
+
+static int
+esw_setup_dests(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int err = 0;
+
+ if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
+ MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw))
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+
+ if (attr->dest_ft) {
+ esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ } else if (esw_is_indir_table(esw, attr)) {
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
+ err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
+ } else {
+ *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+ }
+
+ return err;
+}
+
+static void
+esw_cleanup_dests(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+
+ if (attr->dest_ft) {
+ esw_cleanup_decap_indir(esw, attr);
+ } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (attr->dest_chain)
+ esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
+ else if (esw_is_indir_table(esw, attr))
+ esw_cleanup_indir_table(esw, attr);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ esw_cleanup_chain_src_port_rewrite(esw, attr);
+ }
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -308,7 +607,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
- int j, i = 0;
+ int i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
@@ -329,50 +628,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- struct mlx5_flow_table *ft;
-
- if (attr->dest_ft) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = attr->dest_ft;
- i++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
- i++;
- } else if (attr->dest_chain) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_chains_get_table(chains, attr->dest_chain,
- 1, 0);
- if (IS_ERR(ft)) {
- rule = ERR_CAST(ft);
- goto err_create_goto_table;
- }
-
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = ft;
- i++;
- } else {
- for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[j].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |=
- MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- }
- i++;
- }
+ int err;
+
+ err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_goto_table;
}
}
@@ -407,15 +671,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
-
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
@@ -434,8 +698,7 @@ err_add_rule:
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
err_create_goto_table:
return rule;
}
@@ -453,7 +716,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
- int i;
+ int i, err = 0;
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
@@ -472,22 +735,26 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[i].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
+ if (esw_is_indir_table(esw, attr))
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+ &i);
+ else
+ esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_chain_src_rewrite;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb;
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -495,13 +762,16 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
- if (IS_ERR(rule))
- goto add_err;
+ if (IS_ERR(rule)) {
+ i = esw_attr->split_count;
+ goto err_chain_src_rewrite;
+ }
atomic64_inc(&esw->offloads.num_flows);
return rule;
-add_err:
+err_chain_src_rewrite:
+ esw_put_dest_tables_loop(esw, attr, 0, i);
esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
@@ -542,13 +812,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) {
esw_vport_tbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+ esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
- if (attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
}
}
@@ -810,6 +1080,81 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
+ int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
+
+ if (!num_vfs || !flows)
+ return;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
+ mlx5_del_flow_rules(flows[i++]);
+
+ kvfree(flows);
+}
+
+static int
+mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ int num_vfs, vport_num, rule_idx = 0, err = 0;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_handle **flows;
+ struct mlx5_flow_spec *spec;
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ if (!flows)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
+
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
+ rule_idx, PTR_ERR(flow_rule));
+ goto rule_err;
+ }
+ flows[rule_idx++] = flow_rule;
+ }
+
+ esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
+ kvfree(spec);
+ return 0;
+
+rule_err:
+ while (--rule_idx >= 0)
+ mlx5_del_flow_rules(flows[rule_idx]);
+ kvfree(spec);
+alloc_err:
+ kvfree(flows);
+ return err;
+}
+
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
@@ -1292,11 +1637,11 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
+ int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
u32 flags = 0, *flow_group_in;
- int table_size, ix, err = 0;
struct mlx5_flow_group *g;
void *match_criteria;
u8 *dmac;
@@ -1322,7 +1667,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
- MLX5_ESW_MISS_FLOWS + esw->total_vports;
+ MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1370,6 +1715,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
+ /* meta send to vport */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ if (num_vfs) {
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
+ ix += num_vfs;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+ err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+ if (err)
+ goto meta_rule_err;
+ }
+
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
@@ -1437,6 +1814,11 @@ miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
+meta_rule_err:
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
+send_vport_meta_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
@@ -1458,7 +1840,10 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -1800,11 +2185,22 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
+static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int i;
+
+ mlx5_esw_for_each_sf_rep(esw, i, rep)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
+ __unload_reps_sf_vport(esw, rep_type);
+
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -1822,7 +2218,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -1846,7 +2242,7 @@ err_reps:
return err;
}
-static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2171,12 +2567,20 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
+ struct mlx5_esw_indir_table *indir;
int err;
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
+ indir = mlx5_esw_indir_table_init();
+ if (IS_ERR(indir)) {
+ err = PTR_ERR(indir);
+ goto create_indir_err;
+ }
+ esw->fdb_table.offloads.indir = indir;
+
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -2208,6 +2612,8 @@ create_restore_err:
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
create_acl_err:
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
+create_indir_err:
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
@@ -2219,6 +2625,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
@@ -2824,3 +3231,126 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ int err;
+
+ err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ return err;
+
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ if (err)
+ goto devlink_err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto rep_err;
+ return 0;
+
+rep_err:
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+devlink_err:
+ mlx5_esw_vport_disable(esw, vport_num);
+ return err;
+}
+
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
+}
+
+static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
+ !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return -EPERM;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
+int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *old_entry, *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err) {
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
+ vport_num, err);
+ return err;
+ }
+
+ vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
+ if (!vhca_map_entry)
+ return -ENOMEM;
+
+ *vhca_map_entry = vport_num;
+ old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
+ if (xa_is_err(old_entry)) {
+ kfree(vhca_map_entry);
+ return xa_err(old_entry);
+ }
+ kfree(old_entry);
+ return 0;
+}
+
+void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err)
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
+ vport_num, err);
+
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ kfree(vhca_map_entry);
+}
+
+int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
+{
+ u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
+
+ if (!res)
+ return -ENOENT;
+
+ *vport_num = *res;
+ return 0;
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport)))
+ return 0;
+
+ return vport->metadata;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 3ce17c3d7a00..d713ae24d6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -23,7 +23,7 @@ static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
static int pcie_core(struct notifier_block *, unsigned long, void *);
-/* handler which forwards the event to events->nh, driver notifiers */
+/* handler which forwards the event to events->fw_nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
static struct mlx5_nb events_nbs_ref[] = {
@@ -55,12 +55,14 @@ struct mlx5_events {
struct mlx5_core_dev *dev;
struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
- /* driver notifier chain */
- struct atomic_notifier_head nh;
+ /* driver notifier chain for fw events */
+ struct atomic_notifier_head fw_nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
/*pcie_core*/
struct work_struct pcie_core_work;
+ /* driver notifier chain for sw events */
+ struct blocking_notifier_head sw_nh;
};
static const char *eqe_type_str(u8 type)
@@ -110,6 +112,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
+ case MLX5_EVENT_TYPE_VHCA_STATE_CHANGE:
+ return "MLX5_EVENT_TYPE_VHCA_STATE_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
@@ -331,7 +335,7 @@ static int forward_event(struct notifier_block *nb, unsigned long event, void *d
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
eqe_type_str(eqe->type), eqe->sub_type);
- atomic_notifier_call_chain(&events->nh, event, data);
+ atomic_notifier_call_chain(&events->fw_nh, event, data);
return NOTIFY_OK;
}
@@ -342,7 +346,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
if (!events)
return -ENOMEM;
- ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
+ ATOMIC_INIT_NOTIFIER_HEAD(&events->fw_nh);
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
@@ -351,6 +355,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+ BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh);
return 0;
}
@@ -383,11 +388,14 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
flush_workqueue(events->wq);
}
+/* This API is used only for processing and forwarding firmware
+ * events to mlx5 consumer.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_register(&events->nh, nb);
+ return atomic_notifier_chain_register(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_register);
@@ -395,11 +403,41 @@ int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *n
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_unregister(&events->nh, nb);
+ return atomic_notifier_chain_unregister(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_unregister);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
{
- return atomic_notifier_call_chain(&events->nh, event, data);
+ return atomic_notifier_call_chain(&events->fw_nh, event, data);
+}
+
+/* This API is used only for processing and forwarding driver-specific
+ * events to mlx5 consumers.
+ */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_register(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_unregister(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_call_chain(&events->sw_nh, event, data);
+}
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
+{
+ queue_work(dev->priv.events->wq, work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index cc67366495b0..22bee4990232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
struct ida halloc;
};
-static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index db88eb4c49e3..8931b5584477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
+bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
#else
static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
@@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
+static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b899539a0786..66ad599bd488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -105,8 +105,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 6
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -572,7 +572,7 @@ static void del_hw_fte(struct fs_node *node)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
- node->active = 0;
+ node->active = false;
}
}
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -1759,6 +1760,7 @@ search_again_locked:
if (!fte_tmp)
continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ /* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
@@ -1811,6 +1813,8 @@ skip_search:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1909,6 +1913,8 @@ search_again_locked:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 54523bed16cd..0c32c485eb58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
return true;
}
+static void enter_error_state(struct mlx5_core_dev *dev, bool force)
+{
+ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_cmd_flush(dev);
+ }
+
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+}
+
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
bool err_detected = false;
@@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
goto unlock;
}
- if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
- dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- mlx5_cmd_flush(dev);
- }
-
- mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+ enter_error_state(dev, force);
unlock:
mutex_unlock(&dev->intf_state_mutex);
}
@@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_enter_error_state(dev, false);
+ enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
@@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
dev->priv.health.fatal_error = fatal_error;
print_health_info(dev);
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_trigger_health_work(dev);
- goto out;
+ return;
}
count = ioread32be(health->health_counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 97b5fcb1f406..1eeca45cfcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -72,23 +72,15 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-int mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- int err;
-
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
+ netif_carrier_off(netdev);
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
- netdev->mtu);
+ mlx5e_build_nic_params(priv, NULL, netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -112,7 +104,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
void mlx5i_cleanup(struct mlx5e_priv *priv)
{
- mlx5e_netdev_cleanup(priv->netdev, priv);
+ mlx5e_priv_cleanup(priv);
}
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
@@ -753,7 +745,14 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
goto destroy_ht;
}
- prof->init(mdev, netdev, prof, ipriv);
+ err = mlx5e_priv_init(epriv, netdev, mdev);
+ if (err)
+ goto destroy_mdev_resources;
+
+ epriv->profile = prof;
+ epriv->ppriv = ipriv;
+
+ prof->init(mdev, netdev);
err = mlx5e_attach_netdev(epriv);
if (err)
@@ -777,6 +776,7 @@ detach:
prof->cleanup(epriv);
if (ipriv->sub_interface)
return err;
+destroy_mdev_resources:
mlx5e_destroy_mdev_resources(mdev);
destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index b79dc1e28c41..99d46fda9f82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -87,10 +87,7 @@ void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
-int mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
+int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5i_cleanup(struct mlx5e_priv *priv);
int mlx5i_update_nic_rx(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 7163d9f6c4a6..3d0a18a0bed4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -276,14 +276,12 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
/* Called directly after IPoIB netdevice was created to initialize SW structs */
static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
int err;
- err = mlx5i_init(mdev, netdev, profile, ppriv);
+ err = mlx5i_init(mdev, netdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index c70c1f0ca0c1..b0e129d0f6d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -67,39 +67,68 @@ enum {
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
-static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts)
+static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
+}
+
+static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
+}
+
+static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
+{
+ u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!MLX5_CAP_MCAM_REG(dev, mtutc))
+ return -EOPNOTSUPP;
+
+ return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
+ MLX5_REG_MTUTC, 0, 1);
+}
+
+static u64 mlx5_read_time(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts,
+ bool real_time)
{
u32 timer_h, timer_h1, timer_l;
- timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
+ &dev->iseg->internal_timer_h);
ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
+ &dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
- timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
+ timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
+ &dev->iseg->internal_timer_h);
if (timer_h != timer_h1) {
/* wrap around */
ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
+ &dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
}
- return (u64)timer_l | (u64)timer_h1 << 32;
+ return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
+ (u64)timer_l | (u64)timer_h1 << 32;
}
static u64 read_internal_timer(const struct cyclecounter *cc)
{
- struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+ struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
+ struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
- return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
+ return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_timer *timer;
u32 sign;
if (!clock_info)
@@ -109,10 +138,11 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
smp_store_mb(clock_info->sign,
sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
- clock_info->cycles = clock->tc.cycle_last;
- clock_info->mult = clock->cycles.mult;
- clock_info->nsec = clock->tc.nsec;
- clock_info->frac = clock->tc.frac;
+ timer = &clock->timer;
+ clock_info->cycles = timer->tc.cycle_last;
+ clock_info->mult = timer->cycles.mult;
+ clock_info->nsec = timer->tc.nsec;
+ clock_info->frac = timer->tc.frac;
smp_store_release(&clock_info->sign,
sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
@@ -151,92 +181,184 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
+ struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ timer = container_of(dwork, struct mlx5_timer, overflow_work);
+ clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_read(&clock->tc);
+ timecounter_read(&timer->tc);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+ schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+}
+
+static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
+ const struct timespec64 *ts)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
+ ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
+ return -EINVAL;
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
+ MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
+ MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- u64 ns = timespec64_to_ns(ts);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ err = mlx5_ptp_settime_real_time(mdev, ts);
+ if (err)
+ return err;
+
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_init(&clock->tc, &clock->cycles, ns);
+ timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
+static
+struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
+ struct ptp_system_timestamp *sts)
+{
+ struct timespec64 ts;
+ u64 time;
+
+ time = mlx5_read_time(mdev, sts, true);
+ ts = ns_to_timespec64(time);
+ return ts;
+}
+
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ if (mlx5_real_time_mode(mdev)) {
+ *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
+ goto out;
+ }
+
write_seqlock_irqsave(&clock->lock, flags);
- cycles = mlx5_read_internal_timer(mdev, sts);
- ns = timecounter_cyc2time(&clock->tc, cycles);
+ cycles = mlx5_read_time(mdev, sts, false);
+ ns = timecounter_cyc2time(&timer->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
-
*ts = ns_to_timespec64(ns);
-
+out:
return 0;
}
+static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ /* HW time adjustment range is s16. If out of range, settime instead */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ struct timespec64 ts;
+ s64 ns;
+
+ ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
+ ns = timespec64_to_ns(&ts) + delta;
+ ts = ns_to_timespec64(ns);
+ return mlx5_ptp_settime_real_time(mdev, &ts);
+ }
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
+ MLX5_SET(mtutc_reg, in, time_adjustment, delta);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
+}
+
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ if (err)
+ return err;
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_adjtime(&clock->tc, delta);
+ timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
+static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
+}
+
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
u32 diff;
u64 adj;
+ int err;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+ err = mlx5_ptp_adjfreq_real_time(mdev, delta);
+ if (err)
+ return err;
if (delta < 0) {
neg_adj = 1;
delta = -delta;
}
- adj = clock->nominal_c_mult;
+ adj = timer->nominal_c_mult;
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_read(&clock->tc);
- clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
- clock->nominal_c_mult + diff;
+ timecounter_read(&timer->tc);
+ timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
+ timer->nominal_c_mult + diff;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
@@ -305,6 +427,45 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
MLX5_EVENT_MODE_REPETETIVE & on);
}
+static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u64 cycles_now, cycles_delta;
+ u64 nsec_now, nsec_delta;
+ struct mlx5_timer *timer;
+ unsigned long flags;
+
+ timer = &clock->timer;
+
+ cycles_now = mlx5_read_time(mdev, NULL, false);
+ write_seqlock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
+ nsec_delta = target_ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
+ timer->cycles.mult);
+ write_sequnlock_irqrestore(&clock->lock, flags);
+
+ return cycles_now + cycles_delta;
+}
+
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
+ s64 sec, u32 nsec)
+{
+ struct timespec64 ts;
+ s64 target_ns;
+
+ ts.tv_sec = sec;
+ ts.tv_nsec = nsec;
+ target_ns = timespec64_to_ns(&ts);
+
+ return find_target_cycles(mdev, target_ns);
+}
+
+static u64 perout_conf_real_time(s64 sec, u32 nsec)
+{
+ return (u64)nsec | (u64)sec << 32;
+}
+
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
@@ -314,11 +475,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp = 0;
- u64 cycles_now, cycles_delta;
struct timespec64 ts;
- unsigned long flags;
u32 field_select = 0;
+ u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
@@ -335,12 +494,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
-
+ field_select = MLX5_MTPPS_FS_ENABLE;
if (on) {
+ bool rt_mode = mlx5_real_time_mode(mdev);
+ u32 nsec;
+ s64 sec;
+
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -350,23 +513,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(mdev, NULL);
- write_seqlock_irqsave(&clock->lock, flags);
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
- clock->cycles.mult);
- write_sequnlock_irqrestore(&clock->lock, flags);
- time_stamp = cycles_now + cycles_delta;
- field_select = MLX5_MTPPS_FS_PIN_MODE |
- MLX5_MTPPS_FS_PATTERN |
- MLX5_MTPPS_FS_ENABLE |
- MLX5_MTPPS_FS_TIME_STAMP;
- } else {
- field_select = MLX5_MTPPS_FS_ENABLE;
+ nsec = rq->perout.start.nsec;
+ sec = rq->perout.start.sec;
+
+ if (rt_mode && sec > U32_MAX)
+ return -EINVAL;
+
+ time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
+ perout_conf_internal_timer(mdev, sec, nsec);
+
+ field_select |= MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_TIME_STAMP;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -537,25 +695,50 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
+static void ts_next_sec(struct timespec64 *ts)
+{
+ ts->tv_sec += 1;
+ ts->tv_nsec = 0;
+}
+
+static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
+ struct mlx5_clock *clock)
+{
+ bool rt_mode = mlx5_real_time_mode(mdev);
+ struct timespec64 ts;
+ s64 target_ns;
+
+ if (rt_mode)
+ ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
+ else
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
+
+ ts_next_sec(&ts);
+ target_ns = timespec64_to_ns(&ts);
+
+ return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
+ find_target_cycles(mdev, target_ns);
+}
+
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
struct ptp_clock_event ptp_event;
- u64 cycles_now, cycles_delta;
- u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
struct mlx5_core_dev *mdev;
- struct timespec64 ts;
unsigned long flags;
+ u64 ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp =
+ ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
+ mlx5_real_time_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp)) :
mlx5_timecounter_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
@@ -569,17 +752,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
- mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
- cycles_now = mlx5_read_internal_timer(mdev, NULL);
- ts.tv_sec += 1;
- ts.tv_nsec = 0;
- ns = timespec64_to_ns(&ts);
+ ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
- clock->cycles.mult);
- clock->pps_info.start[pin] = cycles_now + cycles_delta;
+ clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_work(&clock->pps_info.out_work);
break;
@@ -591,29 +766,32 @@ static int mlx5_pps_event(struct notifier_block *nb,
return NOTIFY_OK;
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
- u64 overflow_cycles;
- u64 ns;
- u64 frac = 0;
+ struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
- if (!dev_freq) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
- seqlock_init(&clock->lock);
- clock->cycles.read = read_internal_timer;
- clock->cycles.shift = MLX5_CYCLES_SHIFT;
- clock->cycles.mult = clocksource_khz2mult(dev_freq,
- clock->cycles.shift);
- clock->nominal_c_mult = clock->cycles.mult;
- clock->cycles.mask = CLOCKSOURCE_MASK(41);
-
- timecounter_init(&clock->tc, &clock->cycles,
+ timer->cycles.read = read_internal_timer;
+ timer->cycles.shift = MLX5_CYCLES_SHIFT;
+ timer->cycles.mult = clocksource_khz2mult(dev_freq,
+ timer->cycles.shift);
+ timer->nominal_c_mult = timer->cycles.mult;
+ timer->cycles.mask = CLOCKSOURCE_MASK(41);
+
+ timecounter_init(&timer->tc, &timer->cycles,
ktime_to_ns(ktime_get_real()));
+}
+
+static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+{
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_timer *timer = &clock->timer;
+ u64 overflow_cycles;
+ u64 frac = 0;
+ u64 ns;
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -622,32 +800,77 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
- overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
- overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+ overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
+ overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
- ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
+ ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
frac, &frac);
do_div(ns, NSEC_PER_SEC / HZ);
- clock->overflow_period = ns;
+ timer->overflow_period = ns;
- mdev->clock_info =
- (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
- if (mdev->clock_info) {
- mdev->clock_info->nsec = clock->tc.nsec;
- mdev->clock_info->cycles = clock->tc.cycle_last;
- mdev->clock_info->mask = clock->cycles.mask;
- mdev->clock_info->mult = clock->nominal_c_mult;
- mdev->clock_info->shift = clock->cycles.shift;
- mdev->clock_info->frac = clock->tc.frac;
- mdev->clock_info->overflow_period = clock->overflow_period;
+ INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
+ if (timer->overflow_period)
+ schedule_delayed_work(&timer->overflow_work, 0);
+ else
+ mlx5_core_warn(mdev,
+ "invalid overflow period, overflow_work is not scheduled\n");
+
+ if (clock_info)
+ clock_info->overflow_period = timer->overflow_period;
+}
+
+static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_ib_clock_info *info;
+ struct mlx5_timer *timer;
+
+ mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
+ if (!mdev->clock_info) {
+ mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
+ return;
+ }
+
+ info = mdev->clock_info;
+ timer = &clock->timer;
+
+ info->nsec = timer->tc.nsec;
+ info->cycles = timer->tc.cycle_last;
+ info->mask = timer->cycles.mask;
+ info->mult = timer->nominal_c_mult;
+ info->shift = timer->cycles.shift;
+ info->frac = timer->tc.frac;
+}
+
+static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ mlx5_timecounter_init(mdev);
+ mlx5_init_clock_info(mdev);
+ mlx5_init_overflow_period(clock);
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ if (mlx5_real_time_mode(mdev)) {
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ mlx5_ptp_settime(&clock->ptp_info, &ts);
+ }
+}
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return;
}
+ seqlock_init(&clock->lock);
+ mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
- INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
- if (clock->overflow_period)
- schedule_delayed_work(&clock->overflow_work, 0);
- else
- mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -684,7 +907,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->overflow_work);
+ cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index 31600924bdc3..a12c7da618a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,24 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
+{
+ u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
+
+ return (rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
+}
+
+static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
+{
+ u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format);
+
+ return (sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
+}
+
+typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
+
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
@@ -45,17 +63,27 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
+ struct mlx5_timer *timer = &clock->timer;
unsigned int seq;
u64 nsec;
do {
seq = read_seqbegin(&clock->lock);
- nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ nsec = timecounter_cyc2time(&timer->tc, timestamp);
} while (read_seqretry(&clock->lock, seq));
return ns_to_ktime(nsec);
}
+#define REAL_TIME_TO_NS(hi, low) (((u64)hi) * NSEC_PER_SEC + ((u64)low))
+
+static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ u64 time = REAL_TIME_TO_NS(timestamp >> 32, timestamp & 0xFFFFFFFF);
+
+ return ns_to_ktime(time);
+}
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
@@ -69,6 +97,12 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
{
return 0;
}
+
+static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 947f346bdc2d..381325b4a863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -141,9 +141,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
@@ -541,13 +538,13 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *ft;
- struct prio *prio_s = NULL;
struct fs_chain *chain_s;
struct list_head *pos;
+ struct prio *prio_s;
u32 *flow_group_in;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 3a9fa629503f..d046db7bb047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..c568896cfb23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -73,6 +73,9 @@
#include "ecpf.h"
#include "lib/hv_vhca.h"
#include "diag/rsc_dump.h"
+#include "sf/vhca_event.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -82,7 +85,6 @@ unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
-#define MLX5_DEFAULT_PROF 2
static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
@@ -235,8 +237,8 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
- (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
- (u16)(LINUX_VERSION_CODE & 0xffff));
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
@@ -567,6 +569,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
+ mlx5_vhca_state_cap_handle(dev, set_hca_cap);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -884,6 +888,24 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup;
}
+ err = mlx5_vhca_event_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
+ goto err_fpga_cleanup;
+ }
+
+ err = mlx5_sf_hw_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
+ goto err_sf_hw_table_cleanup;
+ }
+
+ err = mlx5_sf_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF table %d\n", err);
+ goto err_sf_table_cleanup;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -894,6 +916,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_sf_table_cleanup:
+ mlx5_sf_hw_table_cleanup(dev);
+err_sf_hw_table_cleanup:
+ mlx5_vhca_event_cleanup(dev);
+err_fpga_cleanup:
+ mlx5_fpga_cleanup(dev);
err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch);
err_sriov_cleanup:
@@ -925,6 +953,9 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_sf_table_cleanup(dev);
+ mlx5_sf_hw_table_cleanup(dev);
+ mlx5_vhca_event_cleanup(dev);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
@@ -1129,6 +1160,14 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_vhca_event_start(dev);
+
+ err = mlx5_sf_hw_table_create(dev);
+ if (err) {
+ mlx5_core_err(dev, "sf table create failed %d\n", err);
+ goto err_vhca;
+ }
+
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1141,11 +1180,16 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_sf_dev_table_create(dev);
+
return 0;
err_sriov:
mlx5_ec_cleanup(dev);
err_ec:
+ mlx5_sf_hw_table_destroy(dev);
+err_vhca:
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1171,8 +1215,11 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
@@ -1283,7 +1330,7 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
-static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
@@ -1305,6 +1352,8 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
+ INIT_LIST_HEAD(&priv->traps);
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1333,7 +1382,7 @@ err_health_init:
return err;
}
-static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1368,8 +1417,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1394,7 +1445,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- devlink_reload_enable(devlink);
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_reload_enable(devlink);
return 0;
err_load_one:
@@ -1403,6 +1455,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
@@ -1673,6 +1726,10 @@ static int __init init(void)
if (err)
goto err_debug;
+ err = mlx5_sf_driver_register();
+ if (err)
+ goto err_sf;
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5e_init();
if (err) {
@@ -1683,6 +1740,8 @@ static int __init init(void)
return 0;
+err_sf:
+ pci_unregister_driver(&mlx5_core_driver);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1693,6 +1752,7 @@ static void __exit cleanup(void)
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
#endif
+ mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0a0302ce7144..efe403c7e354 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -117,6 +117,8 @@ enum mlx5_semaphore_space_address {
MLX5_SEMAPHORE_SW_RESET = 0x20,
};
+#define MLX5_DEFAULT_PROF 2
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -176,6 +178,7 @@ struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
@@ -257,6 +260,17 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_SF;
+}
+
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
+
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 9eb51f06d3ae..50af84e76fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -56,6 +56,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->key |= mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
+ init_waitqueue_head(&mkey->wait);
mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index eb956ce904bc..c0656d4782e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
- u16 func_id;
+ u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
@@ -74,12 +74,17 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+ return (u32)func_id | (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
if (!root)
return ERR_PTR(-ENOMEM);
- err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;
- root = page_root_per_func_id(dev, func_id);
+ root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
nfp->addr = addr;
nfp->page = page;
- nfp->func_id = func_id;
+ nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
- u32 func_id)
+ u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
- if (iter->func_id != func_id)
+ if (iter->function != function)
continue;
fp = iter;
}
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;
- root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
@@ -291,7 +296,7 @@ map:
goto map;
}
- err = insert_page(dev, addr, page, func_id);
+ err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr, func_id);
+ err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
- err = alloc_system_page(dev, func_id);
+ err = alloc_system_page(dev, function);
if (err)
goto out_4k;
@@ -384,7 +390,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
@@ -392,14 +398,15 @@ out_free:
return err;
}
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
+ bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 6fd974920394..a61e09aff152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -30,6 +30,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
if (!irq_table)
return -ENOMEM;
@@ -40,6 +43,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
+ if (mlx5_core_is_sf(dev))
+ return;
+
kvfree(dev->priv.irq_table);
}
@@ -268,6 +274,9 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
int nvec;
int err;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_IRQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
@@ -319,6 +328,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
+ if (mlx5_core_is_sf(dev))
+ return;
+
/* free_irq requires that affinity and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
@@ -332,3 +344,11 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
kfree(table->irq);
}
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.irq_table;
+#endif
+ return dev->priv.irq_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
new file mode 100644
index 000000000000..0777be24a307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "qos.h"
+
+#define MLX5_QOS_DEFAULT_DWRR_UID 0
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, qos))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_bw_share))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_rate_limit))
+ return false;
+ return true;
+}
+
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group);
+}
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ void *attr;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
+{
+ return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
+}
+
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ u32 bitmask = 0;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+
+ return mlx5_modify_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id, bitmask);
+}
+
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id)
+{
+ return mlx5_destroy_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, id);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
new file mode 100644
index 000000000000..125e4e47e6f7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_QOS_H
+#define __MLX5_QOS_H
+
+#include "mlx5_core.h"
+
+#define MLX5_DEBUG_QOS_MASK BIT(4)
+
+#define qos_err(mdev, fmt, ...) \
+ mlx5_core_err(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_warn(mdev, fmt, ...) \
+ mlx5_core_warn(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_dbg(mdev, fmt, ...) \
+ mlx5_core_dbg_mask(mdev, MLX5_DEBUG_QOS_MASK, "QoS: " fmt, ##__VA_ARGS__)
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev);
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+ u32 max_avg_bw, u32 id);
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
new file mode 100644
index 000000000000..a8d75c2f0275
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "priv.h"
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_sf_in)] = {};
+
+ MLX5_SET(alloc_sf_in, in, opcode, MLX5_CMD_OP_ALLOC_SF);
+ MLX5_SET(alloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_sf_in)] = {};
+
+ MLX5_SET(dealloc_sf_in, in, opcode, MLX5_CMD_OP_DEALLOC_SF);
+ MLX5_SET(dealloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ MLX5_SET(enable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
new file mode 100644
index 000000000000..b265f27b2166
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "sf/vhca_event.h"
+#include "sf/sf.h"
+#include "sf/mlx5_ifc_vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_dev_table {
+ struct xarray devices;
+ unsigned int max_sfs;
+ phys_addr_t base_address;
+ u64 sf_bar_length;
+ struct notifier_block nb;
+ struct mlx5_core_dev *dev;
+};
+
+static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
+}
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!mlx5_sf_dev_supported(dev))
+ return false;
+
+ return !xa_empty(&table->devices);
+}
+
+static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
+}
+static DEVICE_ATTR_RO(sfnum);
+
+static struct attribute *sf_device_attrs[] = {
+ &dev_attr_sfnum.attr,
+ NULL,
+};
+
+static const struct attribute_group sf_attr_group = {
+ .attrs = sf_device_attrs,
+};
+
+static const struct attribute_group *sf_attr_groups[2] = {
+ &sf_attr_group,
+ NULL
+};
+
+static void mlx5_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_adev_idx_free(adev->id);
+ kfree(sf_dev);
+}
+
+static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
+{
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
+
+static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+ struct mlx5_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ int id;
+
+ id = mlx5_adev_idx_alloc();
+ if (id < 0) {
+ err = id;
+ goto add_err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ mlx5_adev_idx_free(id);
+ err = -ENOMEM;
+ goto add_err;
+ }
+ pdev = dev->pdev;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = MLX5_SF_DEV_ID_NAME;
+ sf_dev->adev.dev.release = mlx5_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+ sf_dev->adev.dev.groups = sf_attr_groups;
+ sf_dev->sfnum = sfnum;
+ sf_dev->parent_mdev = dev;
+
+ if (!table->max_sfs) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ err = -EOPNOTSUPP;
+ goto add_err;
+ }
+ sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ goto add_err;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ put_device(&sf_dev->adev.dev);
+ goto add_err;
+ }
+
+ err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
+ if (err)
+ goto xa_err;
+ return;
+
+xa_err:
+ mlx5_sf_dev_remove(sf_dev);
+add_err:
+ mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
+ sf_index, sfnum, err);
+}
+
+static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ xa_erase(&table->devices, sf_index);
+ mlx5_sf_dev_remove(sf_dev);
+}
+
+static int
+mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
+{
+ struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_dev *sf_dev;
+ u16 sf_index;
+
+ sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ sf_dev = xa_load(&table->devices, sf_index);
+ switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_ALLOCATED:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ break;
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ else
+ mlx5_core_err(table->dev,
+ "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
+ sf_index, event->sw_function_id);
+ break;
+ case MLX5_VHCA_STATE_ACTIVE:
+ if (!sf_dev)
+ mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_core_dev *dev = table->dev;
+ u16 max_functions;
+ u16 function_id;
+ int err = 0;
+ bool ecpu;
+ int i;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ ecpu = mlx5_read_embedded_cpu(dev);
+ /* Arm the vhca context as the vhca event notifier */
+ for (i = 0; i < max_functions; i++) {
+ err = mlx5_vhca_event_arm(dev, function_id, ecpu);
+ if (err)
+ return err;
+
+ function_id++;
+ }
+ return 0;
+}
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table;
+ unsigned int max_sfs;
+ int err;
+
+ if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_err;
+ }
+
+ table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ table->dev = dev;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
+ table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
+ table->base_address = pci_resource_start(dev->pdev, 2);
+ table->max_sfs = max_sfs;
+ xa_init(&table->devices);
+ dev->priv.sf_dev_table = table;
+
+ err = mlx5_vhca_event_notifier_register(dev, &table->nb);
+ if (err)
+ goto vhca_err;
+ err = mlx5_sf_dev_vhca_arm_all(table);
+ if (err)
+ goto arm_err;
+ mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
+ return;
+
+arm_err:
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+vhca_err:
+ table->max_sfs = 0;
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+table_err:
+ mlx5_core_err(dev, "SF DEV table create err = %d\n", err);
+}
+
+static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_sf_dev *sf_dev;
+ unsigned long index;
+
+ xa_for_each(&table->devices, index, sf_dev) {
+ xa_erase(&table->devices, index);
+ mlx5_sf_dev_remove(sf_dev);
+ }
+}
+
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+
+ /* Now that event handler is not running, it is safe to destroy
+ * the sf device without race.
+ */
+ mlx5_sf_dev_destroy_all(table);
+
+ WARN_ON(!xa_empty(&table->devices));
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
new file mode 100644
index 000000000000..4de02902aef1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_DEV_H__
+#define __MLX5_SF_DEV_H__
+
+#ifdef CONFIG_MLX5_SF
+
+#include <linux/auxiliary_bus.h>
+
+#define MLX5_SF_DEV_ID_NAME "sf"
+
+struct mlx5_sf_dev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *parent_mdev;
+ struct mlx5_core_dev *mdev;
+ phys_addr_t bar_base_addr;
+ u32 sfnum;
+};
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_driver_register(void);
+void mlx5_sf_driver_unregister(void);
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
+
+#else
+
+static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_driver_register(void)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_driver_unregister(void)
+{
+}
+
+static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
new file mode 100644
index 000000000000..c4bf555c25ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "devlink.h"
+
+static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = mlx5_devlink_alloc();
+ if (!devlink)
+ return -ENOMEM;
+
+ mdev = devlink_priv(devlink);
+ mdev->device = &adev->dev;
+ mdev->pdev = sf_dev->parent_mdev->pdev;
+ mdev->bar_addr = sf_dev->bar_base_addr;
+ mdev->iseg_base = sf_dev->bar_base_addr;
+ mdev->coredev_type = MLX5_COREDEV_SF;
+ mdev->priv.parent_mdev = sf_dev->parent_mdev;
+ mdev->priv.adev_idx = adev->id;
+ sf_dev->mdev = mdev;
+
+ err = mlx5_mdev_init(mdev, MLX5_DEFAULT_PROF);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err);
+ goto mdev_err;
+ }
+
+ mdev->iseg = ioremap(mdev->iseg_base, sizeof(*mdev->iseg));
+ if (!mdev->iseg) {
+ mlx5_core_warn(mdev, "remap error\n");
+ err = -ENOMEM;
+ goto remap_err;
+ }
+
+ err = mlx5_load_one(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
+ goto load_one_err;
+ }
+ return 0;
+
+load_one_err:
+ iounmap(mdev->iseg);
+remap_err:
+ mlx5_mdev_uninit(mdev);
+mdev_err:
+ mlx5_devlink_free(devlink);
+ return err;
+}
+
+static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, true);
+ iounmap(sf_dev->mdev->iseg);
+ mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_devlink_free(devlink);
+}
+
+static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_unload_one(sf_dev->mdev, false);
+}
+
+static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+ { .name = MLX5_ADEV_NAME "." MLX5_SF_DEV_ID_NAME, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_sf_dev_id_table);
+
+static struct auxiliary_driver mlx5_sf_driver = {
+ .name = MLX5_SF_DEV_ID_NAME,
+ .probe = mlx5_sf_dev_probe,
+ .remove = mlx5_sf_dev_remove,
+ .shutdown = mlx5_sf_dev_shutdown,
+ .id_table = mlx5_sf_dev_id_table,
+};
+
+int mlx5_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&mlx5_sf_driver);
+}
+
+void mlx5_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlx5_sf_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
new file mode 100644
index 000000000000..c2ba41bb7a70
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "eswitch.h"
+#include "priv.h"
+#include "sf/dev/dev.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf {
+ struct devlink_port dl_port;
+ unsigned int port_index;
+ u16 id;
+ u16 hw_fn_id;
+ u16 hw_state;
+};
+
+struct mlx5_sf_table {
+ struct mlx5_core_dev *dev; /* To refer from notifier context. */
+ struct xarray port_indices; /* port index based lookup. */
+ refcount_t refcount;
+ struct completion disable_complete;
+ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
+ struct notifier_block esw_nb;
+ struct notifier_block vhca_nb;
+ u8 ecpu: 1;
+};
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
+{
+ return xa_load(&table->port_indices, port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
+{
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ xa_for_each(&table->port_indices, index, sf) {
+ if (sf->hw_fn_id == fn_id)
+ return sf;
+ }
+ return NULL;
+}
+
+static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+}
+
+static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ xa_erase(&table->port_indices, sf->port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
+{
+ unsigned int dl_port_index;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int id_err;
+ int err;
+
+ id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
+ if (id_err < 0) {
+ err = id_err;
+ goto id_err;
+ }
+
+ sf = kzalloc(sizeof(*sf), GFP_KERNEL);
+ if (!sf) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ sf->id = id_err;
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
+ sf->port_index = dl_port_index;
+ sf->hw_fn_id = hw_fn_id;
+ sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
+
+ err = mlx5_sf_id_insert(table, sf);
+ if (err)
+ goto insert_err;
+
+ return sf;
+
+insert_err:
+ kfree(sf);
+alloc_err:
+ mlx5_sf_hw_table_sf_free(table->dev, id_err);
+id_err:
+ if (err == -EEXIST)
+ NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
+ return ERR_PTR(err);
+}
+
+static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_hw_table_sf_free(table->dev, sf->id);
+ kfree(sf);
+}
+
+static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return NULL;
+
+ return refcount_inc_not_zero(&table->refcount) ? table : NULL;
+}
+
+static void mlx5_sf_table_put(struct mlx5_sf_table *table)
+{
+ if (refcount_dec_and_test(&table->refcount))
+ complete(&table->disable_complete);
+}
+
+static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_ACTIVE:
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_STATE_ACTIVE;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ default:
+ return DEVLINK_PORT_FN_STATE_INACTIVE;
+ }
+}
+
+static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_ACTIVE:
+ default:
+ return DEVLINK_PORT_FN_OPSTATE_DETACHED;
+ }
+}
+
+static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
+{
+ return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
+}
+
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table)
+ return -EOPNOTSUPP;
+
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -EOPNOTSUPP;
+ goto sf_err;
+ }
+ mutex_lock(&table->sf_state_lock);
+ *state = mlx5_sf_to_devlink_state(sf->hw_state);
+ *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (mlx5_sf_is_active(sf))
+ return 0;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
+ return -EINVAL;
+
+ err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
+ return 0;
+}
+
+static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (!mlx5_sf_is_active(sf))
+ return 0;
+
+ err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
+ return 0;
+}
+
+static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ struct mlx5_sf *sf,
+ enum devlink_port_fn_state state)
+{
+ int err = 0;
+
+ mutex_lock(&table->sf_state_lock);
+ if (state == mlx5_sf_to_devlink_state(sf->hw_state))
+ goto out;
+ if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
+ err = mlx5_sf_activate(dev, sf);
+ else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
+ err = mlx5_sf_deactivate(dev, sf);
+ else
+ err = -EINVAL;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return err;
+}
+
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = mlx5_sf_state_set(dev, table, sf, state);
+out:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int err;
+
+ sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
+ if (IS_ERR(sf))
+ return PTR_ERR(sf);
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
+ err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+ if (err)
+ goto esw_err;
+ *new_port_index = sf->port_index;
+ return 0;
+
+esw_err:
+ mlx5_sf_free(table, sf);
+ return err;
+}
+
+static int
+mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+ if (!new_attr->sfnum_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "User must provide unique sfnum. Driver does not support auto assignment");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->controller_valid && new_attr->controller) {
+ NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ int err;
+
+ err = mlx5_sf_new_check_attr(dev, new_attr, extack);
+ if (err)
+ return err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_sf_free(table, sf);
+ } else if (mlx5_sf_is_active(sf)) {
+ /* Even if its active, it is treated as in_use because by the time,
+ * it is disabled here, it may getting used. So it is safe to
+ * always look for the event to ensure that it is recycled only after
+ * firmware gives confirmation that it is detached by the driver.
+ */
+ mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ } else {
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ }
+}
+
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, port_index);
+ if (!sf) {
+ err = -ENODEV;
+ goto sf_err;
+ }
+
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+
+ mutex_lock(&table->sf_state_lock);
+ mlx5_sf_dealloc(table, sf);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
+ new_state == MLX5_VHCA_STATE_ALLOCATED)
+ return true;
+
+ return false;
+}
+
+static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ bool update = false;
+ struct mlx5_sf *sf;
+
+ table = mlx5_sf_table_try_get(table->dev);
+ if (!table)
+ return 0;
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
+ if (!sf)
+ goto sf_err;
+
+ /* When driver is attached or detached to a function, an event
+ * notifies such state change.
+ */
+ update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
+ if (update)
+ sf->hw_state = event->new_vhca_state;
+sf_err:
+ mutex_unlock(&table->sf_state_lock);
+ mlx5_sf_table_put(table);
+ return 0;
+}
+
+static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ init_completion(&table->disable_complete);
+ refcount_set(&table->refcount, 1);
+}
+
+static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ /* At this point, no new user commands can start and no vhca event can
+ * arrive. It is safe to destroy all user created SFs.
+ */
+ xa_for_each(&table->port_indices, index, sf) {
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_dealloc(table, sf);
+ }
+}
+
+static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ if (!refcount_read(&table->refcount))
+ return;
+
+ /* Balances with refcount_set; drop the reference so that new user cmd cannot start
+ * and new vhca event handler cannnot run.
+ */
+ mlx5_sf_table_put(table);
+ wait_for_completion(&table->disable_complete);
+
+ mlx5_sf_deactivate_all(table);
+}
+
+static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ const struct mlx5_esw_event_info *mode = data;
+
+ switch (mode->new_mode) {
+ case MLX5_ESWITCH_OFFLOADS:
+ mlx5_sf_table_enable(table);
+ break;
+ case MLX5_ESWITCH_NONE:
+ mlx5_sf_table_disable(table);
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
+}
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table;
+ int err;
+
+ if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ mutex_init(&table->sf_state_lock);
+ table->dev = dev;
+ xa_init(&table->port_indices);
+ dev->priv.sf_table = table;
+ refcount_set(&table->refcount, 0);
+ table->esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
+ if (err)
+ goto reg_err;
+
+ table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ return 0;
+
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+reg_err:
+ mutex_destroy(&table->sf_state_lock);
+ kfree(table);
+ dev->priv.sf_table = NULL;
+ return err;
+}
+
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+ WARN_ON(refcount_read(&table->refcount));
+ mutex_destroy(&table->sf_state_lock);
+ WARN_ON(!xa_empty(&table->port_indices));
+ kfree(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
new file mode 100644
index 000000000000..58b6be0b03d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+#include <linux/mlx5/driver.h>
+#include "vhca_event.h"
+#include "priv.h"
+#include "sf.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_hw {
+ u32 usr_sfnum;
+ u8 allocated: 1;
+ u8 pending_delete: 1;
+};
+
+struct mlx5_sf_hw_table {
+ struct mlx5_core_dev *dev;
+ struct mlx5_sf_hw *sfs;
+ int max_local_functions;
+ u8 ecpu: 1;
+ struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
+ struct notifier_block vhca_nb;
+};
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
+{
+ return sw_id + mlx5_sf_start_function_id(dev);
+}
+
+static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
+{
+ return hw_id - mlx5_sf_start_function_id(dev);
+}
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ int sw_id = -ENOSPC;
+ u16 hw_fn_id;
+ int err;
+ int i;
+
+ if (!table->max_local_functions)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&table->table_lock);
+ /* Check if sf with same sfnum already exists or not. */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
+ err = -EEXIST;
+ goto exist_err;
+ }
+ }
+
+ /* Find the free entry and allocate the entry from the array */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (!table->sfs[i].allocated) {
+ table->sfs[i].usr_sfnum = usr_sfnum;
+ table->sfs[i].allocated = true;
+ sw_id = i;
+ break;
+ }
+ }
+ if (sw_id == -ENOSPC) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
+ err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+ if (err)
+ goto err;
+
+ err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
+ if (err)
+ goto vhca_err;
+
+ mutex_unlock(&table->table_lock);
+ return sw_id;
+
+vhca_err:
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+err:
+ table->sfs[i].allocated = false;
+exist_err:
+ mutex_unlock(&table->table_lock);
+ return err;
+}
+
+static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u16 hw_fn_id;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ table->sfs[id].pending_delete = false;
+}
+
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ mutex_lock(&table->table_lock);
+ _mlx5_sf_hw_id_free(dev, id);
+ mutex_unlock(&table->table_lock);
+}
+
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ u16 hw_fn_id;
+ u8 state;
+ int err;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+ mutex_lock(&table->table_lock);
+ err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
+ if (err)
+ goto err;
+ state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+ if (state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ } else {
+ table->sfs[id].pending_delete = true;
+ }
+err:
+ mutex_unlock(&table->table_lock);
+}
+
+static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
+{
+ int i;
+
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated)
+ _mlx5_sf_hw_id_free(table->dev, i);
+ }
+}
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table;
+ struct mlx5_sf_hw *sfs;
+ int max_functions;
+
+ if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
+ if (!sfs)
+ goto table_err;
+
+ mutex_init(&table->table_lock);
+ table->dev = dev;
+ table->sfs = sfs;
+ table->max_local_functions = max_functions;
+ table->ecpu = mlx5_read_embedded_cpu(dev);
+ dev->priv.sf_hw_table = table;
+ mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
+ return 0;
+
+table_err:
+ kfree(table);
+ return -ENOMEM;
+}
+
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mutex_destroy(&table->table_lock);
+ kfree(table->sfs);
+ kfree(table);
+}
+
+static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_hw *sf_hw;
+ u16 sw_id;
+
+ if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ return 0;
+
+ sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
+ sf_hw = &table->sfs[sw_id];
+
+ mutex_lock(&table->table_lock);
+ /* SF driver notified through firmware that SF is finally detached.
+ * Hence recycle the sf hardware id for reuse.
+ */
+ if (sf_hw->allocated && sf_hw->pending_delete)
+ _mlx5_sf_hw_id_free(table->dev, sw_id);
+ mutex_unlock(&table->table_lock);
+ return 0;
+}
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return 0;
+
+ table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+}
+
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ /* Dealloc SFs whose firmware event has been missed. */
+ mlx5_sf_hw_dealloc_all(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
new file mode 100644
index 000000000000..1daf5a122ba3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_IFC_VHCA_EVENT_H__
+#define __MLX5_IFC_VHCA_EVENT_H__
+
+enum mlx5_ifc_vhca_state {
+ MLX5_VHCA_STATE_INVALID = 0x0,
+ MLX5_VHCA_STATE_ALLOCATED = 0x1,
+ MLX5_VHCA_STATE_ACTIVE = 0x2,
+ MLX5_VHCA_STATE_IN_USE = 0x3,
+ MLX5_VHCA_STATE_TEARDOWN_REQUEST = 0x4,
+};
+
+struct mlx5_ifc_vhca_state_context_bits {
+ u8 arm_change_event[0x1];
+ u8 reserved_at_1[0xb];
+ u8 vhca_state[0x4];
+ u8 reserved_at_10[0x10];
+
+ u8 sw_function_id[0x20];
+
+ u8 reserved_at_40[0x80];
+};
+
+struct mlx5_ifc_query_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+struct mlx5_ifc_query_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_vhca_state_field_select_bits {
+ u8 reserved_at_0[0x1e];
+ u8 sw_function_id[0x1];
+ u8 arm_change_event[0x1];
+};
+
+struct mlx5_ifc_modify_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ struct mlx5_ifc_vhca_state_field_select_bits vhca_state_field_select;
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
new file mode 100644
index 000000000000..cb02a51d0986
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_PRIV_H__
+#define __MLX5_SF_PRIV_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
new file mode 100644
index 000000000000..0b6aea1e6a94
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_H__
+#define __MLX5_SF_H__
+
+#include <linux/mlx5/driver.h>
+
+static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
+#ifdef CONFIG_MLX5_SF
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf);
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ if (!mlx5_sf_supported(dev))
+ return 0;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ return MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ return 1 << MLX5_CAP_GEN(dev, log_max_sf);
+}
+
+#else
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_MLX5_SF_MANAGER
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *add_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index);
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+#else
+
+static inline int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
new file mode 100644
index 000000000000..af2f2dd9db25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_ifc_vhca_event.h"
+#include "mlx5_core.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_vhca_state_notifier {
+ struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
+ struct blocking_notifier_head n_head;
+};
+
+struct mlx5_vhca_event_work {
+ struct work_struct work;
+ struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_vhca_state_event event;
+};
+
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
+
+ MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
+ MLX5_SET(query_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *in, u32 inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
+
+ return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
+}
+
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
+
+ return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
+}
+
+static void
+mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ int err;
+
+ err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
+ if (err)
+ return;
+
+ event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.sw_function_id);
+ event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.vhca_state);
+
+ mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
+
+ blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+}
+
+static void mlx5_vhca_state_work_handler(struct work_struct *_work)
+{
+ struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
+ struct mlx5_vhca_state_notifier *notifier = work->notifier;
+ struct mlx5_core_dev *dev = notifier->dev;
+
+ mlx5_vhca_event_notify(dev, &work->event);
+}
+
+static int
+mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_vhca_state_notifier *notifier =
+ mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_vhca_event_work *work;
+ struct mlx5_eqe *eqe = data;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+ INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
+ work->notifier = notifier;
+ work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
+ work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
+ mlx5_events_work_enqueue(notifier->dev, &work->work);
+ return NOTIFY_OK;
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
+}
+
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!mlx5_vhca_event_supported(dev))
+ return 0;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ dev->priv.vhca_state_notifier = notifier;
+ notifier->dev = dev;
+ BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
+ MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
+ return 0;
+}
+
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ kfree(dev->priv.vhca_state_notifier);
+ dev->priv.vhca_state_notifier = NULL;
+}
+
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_register(dev, &notifier->nb);
+}
+
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_unregister(dev, &notifier->nb);
+}
+
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ if (!dev->priv.vhca_state_notifier)
+ return -EOPNOTSUPP;
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+}
+
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
new file mode 100644
index 000000000000..1fe1ec6f4d4b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_VHCA_EVENT_H__
+#define __MLX5_VHCA_EVENT_H__
+
+#ifdef CONFIG_MLX5_SF
+
+struct mlx5_vhca_state_event {
+ u16 function_id;
+ u16 sw_function_id;
+ u8 new_vhca_state;
+ bool ecpu;
+};
+
+static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN_MAX(dev, vhca_state);
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen);
+#else
+
+static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+}
+
+static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index df1363a34a42..28a7971cac6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
};
-struct dr_action_modify_field_conv {
- u16 hw_field;
- u8 start;
- u8 end;
- u8 l3_type;
- u8 l4_type;
-};
-
-static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
- },
-};
-
-#define MAX_VLANS 2
-struct dr_action_vlan_info {
- int count;
- u32 headers[MAX_VLANS];
-};
-
-struct dr_action_apply_attr {
- u32 modify_index;
- u16 modify_actions;
- u32 decap_index;
- u16 decap_actions;
- u8 decap_with_vlan:1;
- u64 final_icm_addr;
- u32 flow_tag;
- u32 ctr_id;
- u16 gvmi;
- u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
- struct dr_action_vlan_info vlans;
-};
-
static int
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
enum mlx5dr_action_type *action_type)
@@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
return 0;
}
-static void dr_actions_init_next_ste(u8 **last_ste,
- u32 *added_stes,
- enum mlx5dr_ste_entry_type entry_type,
- u16 gvmi)
-{
- (*added_stes)++;
- *last_ste += DR_STE_SIZE;
- mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
-}
-
-static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
-
- /* We want to make sure the modify header comes before L2
- * encapsulation. The reason for that is that we support
- * modify headers for outer headers only
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_push_vlan(last_ste,
- attr->vlans.headers[i],
- encap);
- }
- }
-
- if (encap) {
- /* Modify header and encapsulation require a different STEs.
- * Since modify header STE format doesn't support encapsulation
- * tunneling_action.
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
- action_type_set[DR_ACTION_TYP_PUSH_VLAN])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
- /* Whenever prio_tag_required enabled, we can be sure that the
- * previous table (ACL) already push vlan to our packet,
- * And due to HW limitation we need to set this bit, otherwise
- * push vlan + reformat will not work.
- */
- if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
- mlx5dr_ste_set_go_back_bit(last_ste);
- }
-
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-}
-
-static void dr_actions_apply_rx(u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-
- if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->decap_actions,
- attr->decap_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
- mlx5dr_ste_set_rx_decap(last_ste);
-
- if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i ||
- action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
- action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_set_rx_pop_vlan(last_ste);
- }
- }
-
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_MODIFY_PKT,
- attr->gvmi);
- else
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
-
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TAG]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
- }
-}
-
/* Apply the actions on the rule STE array starting from the last_ste.
* Actions might require more than one STE, new_num_stes will return
* the new size of the STEs array, rule with actions.
@@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
enum mlx5dr_ste_entry_type ste_type,
u8 *action_type_set,
u8 *last_ste,
- struct dr_action_apply_attr *attr,
+ struct mlx5dr_ste_actions_attr *attr,
u32 *new_num_stes)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
if (ste_type == MLX5DR_STE_TYPE_RX)
- dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
else
- dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
- last_ste += added_stes * DR_STE_SIZE;
*new_num_stes += added_stes;
-
- mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
- mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static enum dr_action_domain
@@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_ste_actions_attr attr = {};
struct mlx5dr_action *dest_action = NULL;
u32 state = DR_ACTION_STATE_NO_ACTION;
- struct dr_action_apply_attr attr = {};
enum dr_action_domain action_domain;
bool recalc_cs_required = false;
u8 *last_ste;
@@ -735,7 +447,8 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite.index;
attr.modify_actions = action->rewrite.num_of_actions;
- recalc_cs_required = action->rewrite.modify_ttl;
+ recalc_cs_required = action->rewrite.modify_ttl &&
+ !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
@@ -756,12 +469,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
break;
case DR_ACTION_TYP_POP_VLAN:
- max_actions_type = MAX_VLANS;
+ max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- max_actions_type = MAX_VLANS;
- if (attr.vlans.count == MAX_VLANS)
+ max_actions_type = MLX5DR_MAX_VLANS;
+ if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
@@ -789,9 +502,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
- /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect
- * checksum calculation. In this case we will use a FW table to
- * recalculate.
+ /* Due to a HW bug in some devices, modifying TTL on RX flows will
+ * cause an incorrect checksum calculation. In this case we will
+ * use a FW table to recalculate.
*/
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
rx_rule && recalc_cs_required && dest_action) {
@@ -817,132 +530,6 @@ out_invalid_arg:
return -EINVAL;
}
-#define CVLAN_ETHERTYPE 0x8100
-#define SVLAN_ETHERTYPE 0x88a8
-#define HDR_LEN_L2_ONLY 14
-#define HDR_LEN_L2_VLAN 18
-#define REWRITE_HW_ACTION_NUM 6
-
-static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
- struct mlx5dr_action *action,
- void *data, size_t data_sz)
-{
- struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
- u64 ops[REWRITE_HW_ACTION_NUM] = {};
- u32 hdr_fld_4b;
- u16 hdr_fld_2b;
- u16 vlan_type;
- bool vlan;
- int i = 0;
- int ret;
-
- vlan = (data_sz != HDR_LEN_L2_ONLY);
-
- /* dmac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* smac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
- MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* dmac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- /* ethertype + (optional) vlan */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 32);
- if (!vlan) {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
- } else {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
- hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
- }
- i++;
-
- /* smac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- if (vlan) {
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- i++;
- }
-
- action->rewrite.data = (void *)ops;
- action->rewrite.num_of_actions = i;
-
- ret = mlx5dr_send_postsend_action(dmn, action);
- if (ret) {
- mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
- return ret;
- }
-
- return 0;
-}
-
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
@@ -1217,21 +804,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
- if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
- return -EINVAL;
+ u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
+ int ret;
+
+ ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+ data, data_sz,
+ hw_actions,
+ ACTION_CACHE_LINE_SIZE,
+ &action->rewrite.num_of_actions);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+ return ret;
+ }
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk)
+ if (!action->rewrite.chunk) {
+ mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
+ }
+ action->rewrite.data = (void *)hw_actions;
action->rewrite.index = (action->rewrite.chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
- ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
+ mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite.chunk);
return ret;
}
@@ -1243,6 +843,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
}
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
{
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
@@ -1315,31 +918,13 @@ dec_ref:
return NULL;
}
-static const struct dr_action_modify_field_conv *
-dr_action_modify_get_hw_info(u16 sw_field)
-{
- const struct dr_action_modify_field_conv *hw_action_info;
-
- if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
- goto not_found;
-
- hw_action_info = &dr_action_conv_arr[sw_field];
- if (!hw_action_info->end && !hw_action_info->start)
- goto not_found;
-
- return hw_action_info;
-
-not_found:
- return NULL;
-}
-
static int
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 max_length;
u16 sw_field;
u32 data;
@@ -1349,7 +934,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
@@ -1357,20 +942,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
max_length = hw_action_info->end - hw_action_info->start + 1;
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start);
-
- /* PRM defines that length zero specific length of 32bits */
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- max_length == 32 ? 0 : max_length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_add(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start,
+ max_length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1381,9 +958,9 @@ static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
@@ -1395,7 +972,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
@@ -1411,19 +988,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start + offset);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_set(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start + offset,
+ length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1434,12 +1004,12 @@ static int
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
@@ -1450,8 +1020,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
- hw_src_action_info = dr_action_modify_get_hw_info(src_field);
- hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
+ hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
@@ -1471,23 +1041,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_copy, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
- hw_dst_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
- hw_dst_action_info->start + dst_offset);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
- hw_src_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
- hw_src_action_info->start + dst_offset);
+ mlx5dr_ste_set_action_copy(dmn->ste_ctx,
+ hw_action,
+ hw_dst_action_info->hw_field,
+ hw_dst_action_info->start + dst_offset,
+ length,
+ hw_src_action_info->hw_field,
+ hw_src_action_info->start + src_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
@@ -1499,8 +1059,8 @@ static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 action;
int ret;
@@ -1677,15 +1237,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
- u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
- u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
- u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
+ u16 hw_field = 0;
+ u32 l3_type = 0;
+ u32 l4_type = 0;
*modify_ttl = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index ba65ec406cfa..30b0136b5bc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
caps->uplink_icm_address_tx =
MLX5_CAP64_ESW_FLOWTABLE(mdev,
sw_steering_uplink_icm_address_tx);
- caps->sw_owner =
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
- sw_owner);
+ caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+ if (!caps->sw_owner_v2)
+ caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
return 0;
}
@@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->nic_tx_allow_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
- caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
- caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
+ caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
+ caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
+
+ if (!caps->rx_sw_owner_v2)
+ caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
+ if (!caps->tx_sw_owner_v2)
+ caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
- caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
+ caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
caps->hdr_modify_icm_addr =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index aa2c2d6c44e6..7091b1be84ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -4,6 +4,11 @@
#include <linux/mlx5/eswitch.h>
#include "dr_types.h"
+#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
+ ((dmn)->info.caps.dmn_type##_sw_owner || \
+ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+
static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
@@ -57,6 +62,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
+ dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
+ if (!dmn->ste_ctx) {
+ mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
@@ -181,6 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
return ret;
dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
+ dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
@@ -223,18 +235,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (ret)
return ret;
- if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
- mlx5dr_err(dmn, "SW steering is not supported on this device\n");
- return -EOPNOTSUPP;
- }
-
ret = dr_domain_query_fdb_caps(mdev, dmn);
if (ret)
return ret;
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
- if (!dmn->info.caps.rx_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
@@ -243,7 +250,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
- if (!dmn->info.caps.tx_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
@@ -255,7 +262,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (!dmn->info.caps.eswitch_manager)
return -ENOTSUPP;
- if (!dmn->info.caps.fdb_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
return -ENOTSUPP;
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 6527eb4df153..15673cd10039 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -113,7 +113,8 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
static bool
@@ -135,7 +136,8 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
static bool
@@ -148,12 +150,14 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
@@ -221,6 +225,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb;
bool inner, rx;
@@ -259,80 +264,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
- mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
- mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
- mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
+ mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
+ &mask, dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
- mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn))
- mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc))
- mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Inner */
@@ -343,50 +357,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
- mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6d73719db1f4..b337d6626bff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
@@ -25,11 +26,11 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
- mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
+ mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
0, last_ste->hw_ste,
ste_info_last, send_list, true);
@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -103,14 +106,19 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
int ret;
list_del(&ste_info->send_list);
+
+ /* Copy data to ste, only reduced size or control, the last 16B (mask)
+ * is already written to the hw.
+ */
+ if (ste_info->size == DR_STE_SIZE_CTRL)
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+ else
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
- /* Copy data to ste, only reduced size, the last 16B (mask)
- * is already written to the hw.
- */
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
out:
kfree(ste_info);
@@ -169,6 +177,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
@@ -180,11 +189,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(new_ste,
+ ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
@@ -224,6 +233,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
@@ -237,7 +247,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
@@ -253,7 +264,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste,
hw_ste);
if (!new_ste) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
@@ -391,7 +402,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
new_htbl,
formatted_ste,
@@ -436,18 +448,20 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B
*/
- mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
+ prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0];
} else {
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ cur_htbl->pointing_ste->hw_ste,
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
- mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
+ mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
0, ste_to_update->hw_ste, ste_info,
update_list, false);
@@ -496,6 +510,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
@@ -507,8 +523,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
+ miss_list, send_list)) {
+ mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -659,6 +676,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
@@ -692,10 +710,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit;
/* Point current ste to the new action */
- mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ prev_hw_ste,
+ action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
@@ -722,6 +742,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */
@@ -730,7 +751,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location;
@@ -743,7 +765,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 24dede1b0a20..83c4c877d558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -431,6 +431,8 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
{
struct postsend_info send_info = {};
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
+
send_info.write.addr = (uintptr_t)data;
send_info.write.length = size;
send_info.write.lkey = 0;
@@ -457,6 +459,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
+
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u32 ste_index = i * (byte_size / DR_STE_SIZE);
@@ -480,6 +484,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
mask, DR_STE_SIZE_MASK);
+ /* Only when we have mask we need to re-arrange the STE */
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
+ data + (j * DR_STE_SIZE),
+ DR_STE_SIZE);
}
}
@@ -509,6 +517,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u32 byte_size = htbl->chunk->byte_size;
int iterations;
int num_stes;
+ u8 *copy_dst;
u8 *data;
int ret;
int i;
@@ -518,20 +527,22 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
- for (i = 0; i < num_stes; i++) {
- u8 *copy_dst;
-
- /* Copy the same ste on the data buffer */
- copy_dst = data + i * DR_STE_SIZE;
- memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
-
- if (update_hw_ste) {
- /* Copy the reduced ste to hash table ste_arr */
+ if (update_hw_ste) {
+ /* Copy the reduced STE to hash table ste_arr */
+ for (i = 0; i < num_stes; i++) {
copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
+
+ /* Copy the same STE on the data buffer */
+ for (i = 0; i < num_stes; i++) {
+ copy_dst = data + i * DR_STE_SIZE;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
+ }
+
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u8 ste_index = i * (byte_size / DR_STE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index d275823bff2f..f49abc7a4b9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -3,104 +3,7 @@
#include <linux/types.h>
#include <linux/crc32.h>
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-#define STE_IPV4 0x1
-#define STE_IPV6 0x2
-#define STE_TCP 0x1
-#define STE_UDP 0x2
-#define STE_SPI 0x3
-#define IP_VERSION_IPV4 0x4
-#define IP_VERSION_IPV6 0x6
-#define STE_SVLAN 0x1
-#define STE_CVLAN 0x2
-
-#define DR_STE_ENABLE_FLOW_TAG BIT(31)
-
-/* Set to STE a specific value using DR_STE_SET */
-#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
- if ((spec)->s_fname) { \
- MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
- (spec)->s_fname = 0; \
- } \
-} while (0)
-
-/* Set to STE spec->s_fname to tag->t_fname */
-#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
-
-/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
-
-/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
-
-#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
- MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
-} while (0)
-
-#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
- (_misc)->outer_first_mpls_over_gre_label || \
- (_misc)->outer_first_mpls_over_gre_exp || \
- (_misc)->outer_first_mpls_over_gre_s_bos || \
- (_misc)->outer_first_mpls_over_gre_ttl)
-#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
- (_misc)->outer_first_mpls_over_udp_label || \
- (_misc)->outer_first_mpls_over_udp_exp || \
- (_misc)->outer_first_mpls_over_udp_s_bos || \
- (_misc)->outer_first_mpls_over_udp_ttl)
-
-#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
- ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
- (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
- MLX5DR_STE_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_tunl_action {
- DR_STE_TUNL_ACTION_NONE = 0,
- DR_STE_TUNL_ACTION_ENABLE = 1,
- DR_STE_TUNL_ACTION_DECAP = 2,
- DR_STE_TUNL_ACTION_L3_DECAP = 3,
- DR_STE_TUNL_ACTION_POP_VLAN = 4,
-};
-
-enum dr_ste_action_type {
- DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
- DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
- DR_STE_ACTION_TYPE_ENCAP = 4,
-};
+#include "dr_ste.h"
struct dr_hw_ste_format {
u8 ctrl[DR_STE_SIZE_CTRL];
@@ -115,6 +18,11 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
return (__force u32)htonl(crc);
}
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
+}
+
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -142,7 +50,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
return index;
}
-static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
{
u16 byte_mask = 0;
int i;
@@ -155,7 +63,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
-static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+static u8 *dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -169,104 +77,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
}
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
- DR_STE_ENABLE_FLOW_TAG | flow_tag);
-}
-
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
- /* This can be used for both rx_steering_mult and for sx_transmit */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
-}
-
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
-}
-
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
- bool go_back)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- DR_STE_ACTION_TYPE_PUSH_VLAN);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
- * push vlan will not work.
- */
- if (go_back)
- mlx5dr_ste_set_go_back_bit(hw_ste_p);
-}
-
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
- /* The hardware expects here size in words (2 byte) */
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
-}
-
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_DECAP);
-}
-
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_POP_VLAN);
-}
-
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_L3_DECAP);
- MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
-}
-
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
-}
-
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
-{
- return MLX5_GET(ste_general, hw_ste_p, entry_type);
-}
-
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index)
-{
- MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
- num_of_actions);
- MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
- re_write_index);
-}
-
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
- u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
- MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
- MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
- /* Set GVMI once, this is the same for RX/TX
- * bits 63_48 of next table base / miss address encode the next GVMI
- */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
-}
-
static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
{
memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
@@ -279,21 +89,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u64 miss_addr)
{
- u64 index =
- (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
- MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
-
- return index << 6;
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
}
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste, u64 miss_addr)
{
- u64 index = (icm_addr >> 5) | ht_size;
+ u8 *hw_ste_p = ste->hw_ste;
+
+ ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
- MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
- MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
}
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
@@ -317,15 +132,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
return &ste->htbl->miss_list[index];
}
-static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
u8 *hw_ste = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
- MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
+ ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
}
@@ -363,7 +179,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
/* Free ste which is the head and the only one in miss_list */
static void
-dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
@@ -380,7 +197,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
*/
memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -399,13 +216,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
* |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
*/
static void
-dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
+dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ struct mlx5dr_ste *next_ste,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
{
struct mlx5dr_ste_htbl *next_miss_htbl;
+ u8 hw_ste[DR_STE_SIZE] = {};
+ int sb_idx;
next_miss_htbl = next_ste->htbl;
@@ -418,13 +239,19 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Move data from next into ste */
dr_ste_replace(ste, next_ste);
+ /* Copy all 64 hw_ste bytes */
+ memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ sb_idx = ste->ste_chain_location - 1;
+ mlx5dr_ste_set_bit_mask(hw_ste,
+ nic_matcher->ste_builder[sb_idx].bit_mask);
+
/* Del the htbl that contains the next_ste.
* The origin htbl stay with the same number of entries.
*/
mlx5dr_htbl_put(next_miss_htbl);
- mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
- 0, ste->hw_ste,
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+ 0, hw_ste,
ste_info_head,
send_ste_list,
true /* Copy data */);
@@ -436,7 +263,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Free ste that is located in the middle of the miss list:
* |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
*/
-static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_send_info *ste_info,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
@@ -448,10 +276,10 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
if (WARN_ON(!prev_ste))
return;
- miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
- mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
+ ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
- mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
+ mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
prev_ste->hw_ste, ste_info,
send_ste_list, true /* Copy data*/);
@@ -467,6 +295,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
{
struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info ste_info_head;
struct mlx5dr_ste *next_ste, *first_ste;
bool put_on_origin_table = true;
@@ -495,18 +324,22 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
if (!next_ste) {
/* One and only entry in the list */
- dr_ste_remove_head_ste(ste, nic_matcher,
+ dr_ste_remove_head_ste(ste_ctx, ste,
+ nic_matcher,
&ste_info_head,
&send_ste_list,
stats_tbl);
} else {
/* First but not only entry in the list */
- dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
+ dr_ste_replace_head_ste(nic_matcher, ste,
+ next_ste, &ste_info_head,
&send_ste_list, stats_tbl);
put_on_origin_table = false;
}
} else { /* Ste in the middle of the list */
- dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ dr_ste_remove_middle_ste(ste_ctx, ste,
+ &ste_info_head, &send_ste_list,
+ stats_tbl);
}
/* Update HW */
@@ -530,34 +363,25 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst)
return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u32 ste_size)
{
- u64 index = miss_addr >> 6;
-
- /* Miss address for TX and RX STEs located in the same offsets */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
-}
-
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
-{
- u8 *hw_ste = ste->hw_ste;
-
- MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
- mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ if (ste_ctx->prepare_for_postsend)
+ ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
}
/* Init one ste as a pattern for ste data array */
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -565,13 +389,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi,
{
struct mlx5dr_ste ste = {};
- mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
else
- mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -582,7 +406,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
{
u8 formatted_ste[DR_STE_SIZE] = {};
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
htbl,
formatted_ste,
@@ -597,18 +422,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
u8 *cur_hw_ste,
enum mlx5dr_icm_chunk_size log_table_size)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u8 next_lu_type;
+ u16 next_lu_type;
u16 byte_mask;
- next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
- byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+ next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
+ byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
@@ -628,7 +453,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
goto free_table;
}
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
+ cur_hw_ste, next_htbl);
ste->next_htbl = next_htbl;
next_htbl->pointing_ste = ste;
}
@@ -657,7 +483,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask)
+ u16 lu_type, u16 byte_mask)
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
@@ -709,6 +535,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
return 0;
}
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
+{
+ const struct mlx5dr_ste_action_modify_field *hw_field;
+
+ if (sw_field >= ste_ctx->modify_field_arr_sz)
+ return NULL;
+
+ hw_field = &ste_ctx->modify_field_arr[sw_field];
+ if (!hw_field->end && !hw_field->start)
+ return NULL;
+
+ return hw_field;
+}
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_set((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_add((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ ste_ctx->set_action_copy((u8 *)hw_action,
+ dst_hw_field, dst_shifter, dst_len,
+ src_hw_field, src_shifter);
+}
+
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
+ return -EINVAL;
+
+ return ste_ctx->set_action_decap_l3_list(data, data_sz,
+ hw_action, hw_action_sz,
+ used_hw_action_num);
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
@@ -738,6 +650,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
int ret, i;
@@ -748,14 +661,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
sb = nic_matcher->ste_builder;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
- mlx5dr_ste_init(ste_arr,
- sb->lu_type,
- nic_dmn->ste_type,
- dmn->info.caps.gvmi);
+ ste_ctx->ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
+ ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -765,45 +678,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
* not relevant for the last ste in the chain.
*/
sb++;
- MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
- MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
+ ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
}
ste_arr += DR_STE_SIZE;
}
return 0;
}
-static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- if (mask->smac_47_16 || mask->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
- mask->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
- mask->smac_47_16 << 16 | mask->smac_15_0);
- mask->smac_47_16 = 0;
- mask->smac_15_0 = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
-
- if (mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- } else if (mask->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->svlan_tag = 0;
- }
-}
-
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
{
spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
@@ -1045,566 +927,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
}
-static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- if (spec->smac_47_16 || spec->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
- spec->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
- spec->smac_47_16 << 16 | spec->smac_15_0);
- spec->smac_47_16 = 0;
- spec->smac_15_0 = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
+ ste_ctx->build_eth_l2_src_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
- bool inner,
- u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_address, mask, dst_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_address, mask, src_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- ecn, mask, ip_ecn);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
- mask->tcp_flags = 0;
- }
-}
-
-static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void
-dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_mask->inner_second_cvlan_tag ||
- misc_mask->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->inner_second_cvlan_tag = 0;
- misc_mask->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, inner_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, inner_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, inner_second_prio);
- } else {
- if (misc_mask->outer_second_cvlan_tag ||
- misc_mask->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->outer_second_cvlan_tag = 0;
- misc_mask->outer_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, outer_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, outer_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, outer_second_prio);
- }
+ ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
}
-static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *tag)
-{
- struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_spec = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_spec->inner_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->inner_second_cvlan_tag = 0;
- } else if (misc_spec->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
- } else {
- if (misc_spec->outer_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->outer_second_cvlan_tag = 0;
- } else if (misc_spec->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->outer_second_svlan_tag = 0;
- }
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
- }
-
- return 0;
-}
-
-static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
- DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
-}
-
-static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l2_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
-}
-
-static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask,
- l2_tunneling_network_id, (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
+ ste_ctx->build_eth_l2_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
- (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
- dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
+ ste_ctx->build_eth_l2_tnl_init(sb, mask);
}
-static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
-}
-
-static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
- mask->tcp_flags = 0;
- }
+ ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
}
-static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
- DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+ ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
}
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
@@ -1622,653 +1031,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
}
-static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (inner)
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
- else
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
-}
-
-static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (sb->inner)
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
- else
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
-
- return 0;
-}
-
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
-}
-
-static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+ ste_ctx->build_mpls_init(sb, mask);
}
-static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
-
- DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
- DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
- DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
-
- DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
-
- DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
-{
- dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_gre_tag;
-}
-
-static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
-}
-
-static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
- return 0;
+ ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
-}
-
-#define ICMP_TYPE_OFFSET_FIRST_DW 24
-#define ICMP_CODE_OFFSET_FIRST_DW 16
-#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
-
-static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- u8 *bit_mask)
-{
- bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
- struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- u32 icmp_header_data_mask;
- u32 icmp_type_mask;
- u32 icmp_code_mask;
- int dw0_location;
- int dw1_location;
-
- if (is_ipv4_mask) {
- icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
- icmp_type_mask = misc_3_mask->icmpv4_type;
- icmp_code_mask = misc_3_mask->icmpv4_code;
- dw0_location = caps->flex_parser_id_icmp_dw0;
- dw1_location = caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
- icmp_type_mask = misc_3_mask->icmpv6_type;
- icmp_code_mask = misc_3_mask->icmpv6_code;
- dw0_location = caps->flex_parser_id_icmpv6_dw0;
- dw1_location = caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_type = 0;
- else
- misc_3_mask->icmpv6_type = 0;
- }
- if (icmp_code_mask) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_code = 0;
- else
- misc_3_mask->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
- (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_header_data = 0;
- else
- misc_3_mask->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u32 icmp_header_data;
- int dw0_location;
- int dw1_location;
- u32 icmp_type;
- u32 icmp_code;
- bool is_ipv4;
-
- is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
- if (is_ipv4) {
- icmp_header_data = misc_3->icmpv4_header_data;
- icmp_type = misc_3->icmpv4_type;
- icmp_code = misc_3->icmpv4_code;
- dw0_location = sb->caps->flex_parser_id_icmp_dw0;
- dw1_location = sb->caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data = misc_3->icmpv6_header_data;
- icmp_type = misc_3->icmpv6_type;
- icmp_code = misc_3->icmpv6_code;
- dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
- dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_type = 0;
- else
- misc_3->icmpv6_type = 0;
- }
-
- if (icmp_code) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_code = 0;
- else
- misc_3->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4)
- misc_3->icmpv4_header_data = 0;
- else
- misc_3->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ ste_ctx->build_tnl_mpls_init(sb, mask);
}
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
- if (ret)
- return ret;
-
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
-
- return 0;
-}
-
-static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(general_purpose, bit_mask,
- general_purpose_lookup_field, misc_2_mask,
- metadata_reg_a);
-}
-
-static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
- misc_2_mask, metadata_reg_a);
-
- return 0;
+ return ste_ctx->build_icmp_init(sb, mask);
}
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
-}
-
-static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- if (inner) {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- inner_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- inner_tcp_ack_num);
- } else {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- outer_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- outer_tcp_ack_num);
- }
-}
-
-static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- if (sb->inner) {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
- } else {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
- }
-
- return 0;
+ ste_ctx->build_general_purpose_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+ ste_ctx->build_eth_l4_misc_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_flags,
- misc_3_mask, outer_vxlan_gpe_flags);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_next_protocol,
- misc_3_mask, outer_vxlan_gpe_next_protocol);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_vni,
- misc_3_mask, outer_vxlan_gpe_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_flags, misc3,
- outer_vxlan_gpe_flags);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_next_protocol, misc3,
- outer_vxlan_gpe_next_protocol);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_vni, misc3,
- outer_vxlan_gpe_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
- sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+ ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_protocol_type,
- misc_mask, geneve_protocol_type);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_oam,
- misc_mask, geneve_oam);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_opt_len,
- misc_mask, geneve_opt_len);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_vni,
- misc_mask, geneve_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_protocol_type, misc, geneve_protocol_type);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_oam, misc, geneve_oam);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_opt_len, misc, geneve_opt_len);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_vni, misc, geneve_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
+ ste_ctx->build_tnl_geneve_init(sb, mask);
}
-static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
- misc_2_mask, metadata_reg_c_0);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
- misc_2_mask, metadata_reg_c_1);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
- misc_2_mask, metadata_reg_c_2);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
- misc_2_mask, metadata_reg_c_3);
-}
-
-static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
- DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
- DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
- DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
+ ste_ctx->build_register_0_init(sb, mask);
}
-static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
- misc_2_mask, metadata_reg_c_4);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
- misc_2_mask, metadata_reg_c_5);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
- misc_2_mask, metadata_reg_c_6);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
- misc_2_mask, metadata_reg_c_7);
-}
-
-static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
- DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
- DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
- DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+ ste_ctx->build_register_1_init(sb, mask);
}
-static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
- misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
- struct mlx5dr_cmd_vport_cap *vport_cap;
- struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
- u8 *bit_mask = sb->bit_mask;
- bool source_gvmi_set;
-
- DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
-
- if (sb->vhca_id_valid) {
- /* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
- else
- return -EINVAL;
- } else {
- caps = &dmn->info.caps;
- }
-
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
- if (!vport_cap)
- return -EINVAL;
-
- source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
- if (vport_cap->vport_gvmi && source_gvmi_set)
- MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
-
- misc->source_eswitch_owner_vhca_id = 0;
- misc->source_port = 0;
-
- return 0;
-}
-
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
@@ -2276,12 +1142,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->dmn = dmn;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+ ste_ctx->build_src_gvmi_qpn_init(sb, mask);
+}
+
+static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
+ [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
+ [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
+{
+ if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return NULL;
+
+ return mlx5dr_ste_ctx_arr[version];
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
new file mode 100644
index 000000000000..06bcb0ee8f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef _DR_STE_
+#define _DR_STE_
+
+#include "dr_types.h"
+
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define HDR_LEN_L2_MACS 0xC
+#define HDR_LEN_L2_VLAN 0x4
+#define HDR_LEN_L2_ETHER 0x2
+#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
+#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
+#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
+ struct mlx5dr_match_misc2 *_mask = mask; \
+ u8 *_tag = tag; \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+enum dr_ste_action_modify_type_l3 {
+ DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
+};
+
+enum dr_ste_action_modify_type_l4 {
+ DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
+};
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+
+#define DR_STE_CTX_BUILDER(fname) \
+ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
+ struct mlx5dr_match_param *mask))
+
+struct mlx5dr_ste_ctx {
+ /* Builders */
+ void DR_STE_CTX_BUILDER(eth_l2_src_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
+ void DR_STE_CTX_BUILDER(eth_l2_src);
+ void DR_STE_CTX_BUILDER(eth_l2_dst);
+ void DR_STE_CTX_BUILDER(eth_l2_tnl);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
+ void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
+ void DR_STE_CTX_BUILDER(mpls);
+ void DR_STE_CTX_BUILDER(tnl_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls);
+ int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(general_purpose);
+ void DR_STE_CTX_BUILDER(eth_l4_misc);
+ void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
+ void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(register_0);
+ void DR_STE_CTX_BUILDER(register_1);
+ void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+
+ /* Getters and Setters */
+ void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi);
+ void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
+ u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
+ u64 (*get_miss_addr)(u8 *hw_ste_p);
+ void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+ void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
+ u16 (*get_byte_mask)(u8 *hw_ste_p);
+
+ /* Actions */
+ void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ u32 modify_field_arr_sz;
+ const struct mlx5dr_ste_action_modify_field *modify_field_arr;
+ void (*set_action_set)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_add)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_copy)(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+ int (*set_action_decap_l3_list)(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+ /* Send */
+ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
+};
+
+extern struct mlx5dr_ste_ctx ste_ctx_v0;
+extern struct mlx5dr_ste_ctx ste_ctx_v1;
+
+#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
new file mode 100644
index 000000000000..9ec079247c4b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+#define SVLAN_ETHERTYPE 0x88a8
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+enum dr_ste_v0_action_tunl {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_v0_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+enum dr_ste_v0_action_mdfy_op {
+ DR_STE_ACTION_MDFY_OP_COPY = 0x1,
+ DR_STE_ACTION_MDFY_OP_SET = 0x2,
+ DR_STE_ACTION_MDFY_OP_ADD = 0x3,
+};
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
+ (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
+ DR_STE_V0_LU_TYPE_##lookup_type##_O)
+
+enum {
+ DR_STE_V0_LU_TYPE_NOP = 0x00,
+ DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
+ DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
+ DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
+ DR_STE_V0_LU_TYPE_GRE = 0x16,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum {
+ DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
+ DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
+ DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
+ DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
+ ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+}
+
+static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
+}
+
+static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
+}
+
+static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
+ dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ dr_ste_v0_set_go_back_bit(hw_ste_p);
+}
+
+static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+static void dr_ste_v0_arr_init_next(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+ entry_type, gvmi);
+}
+
+static void
+dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ dr_ste_v0_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void
+dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ dr_ste_v0_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v0_set_action_set(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_add(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_copy(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
+
+static int
+dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u32 hw_action_num;
+ int required_actions;
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+
+ vlan = (data_sz != HDR_LEN_L2);
+ hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
+ required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
+
+ if (hw_action_num < required_actions)
+ return -ENOMEM;
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
+ }
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ }
+
+ *used_hw_action_num = required_actions;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
+}
+
+static int
+dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls, misc2, outer, tag);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+
+static int
+dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u32 *icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u8 *icmp_type;
+ u8 *icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = &misc_3->icmpv4_header_data;
+ icmp_type = &misc_3->icmpv4_type;
+ icmp_code = &misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = &misc_3->icmpv6_header_data;
+ icmp_type = &misc_3->icmpv6_type;
+ icmp_code = &misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
+
+ *icmp_type = 0;
+ *icmp_code = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ *icmp_header_data);
+ *icmp_header_data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ int ret;
+
+ ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2, metadata_reg_a);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
+}
+
+static int
+dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
+}
+
+static int
+dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int
+dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+ bool source_gvmi_set;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+ if (source_gvmi_set) {
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v0 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v0_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v0_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v0_build_register_0_init,
+ .build_register_1_init = &dr_ste_v0_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v0_init,
+ .set_next_lu_type = &dr_ste_v0_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v0_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v0_set_miss_addr,
+ .get_miss_addr = &dr_ste_v0_get_miss_addr,
+ .set_hit_addr = &dr_ste_v0_set_hit_addr,
+ .set_byte_mask = &dr_ste_v0_set_byte_mask,
+ .get_byte_mask = &dr_ste_v0_get_byte_mask,
+
+ /* Actions */
+ .set_actions_rx = &dr_ste_v0_set_actions_rx,
+ .set_actions_tx = &dr_ste_v0_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v0_action_modify_field_arr,
+ .set_action_set = &dr_ste_v0_set_action_set,
+ .set_action_add = &dr_ste_v0_set_action_add,
+ .set_action_copy = &dr_ste_v0_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
new file mode 100644
index 000000000000..4088d6e51508
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -0,0 +1,1633 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include "mlx5_ifc_dr_ste_v1.h"
+#include "dr_ste.h"
+
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
+}
+
+static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
+ MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
+}
+
+static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
+}
+
+static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+{
+ u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
+ u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
+
+ return (mode << 8 | index);
+}
+
+static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
+ u32 ste_size)
+{
+ u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
+ u8 *mask = tag + DR_STE_SIZE_TAG;
+ u8 tmp_tag[DR_STE_SIZE_TAG] = {};
+
+ if (ste_size == DR_STE_SIZE_CTRL)
+ return;
+
+ WARN_ON(ste_size != DR_STE_SIZE);
+
+ /* Backup tag */
+ memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
+
+ /* Swap mask and tag both are the same size */
+ memcpy(tag, mask, DR_STE_SIZE_MASK);
+ memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
+}
+
+static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
+{
+ MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_FLOW_TAG);
+ MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
+}
+
+static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
+}
+
+static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
+}
+
+static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ start_offset, HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ inline_data, vlan_hdr);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
+ u8 *s_action,
+ u16 decap_actions,
+ u32 decap_index)
+{
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
+ decap_actions);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
+ decap_index);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
+ u8 *s_action,
+ u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
+ num_of_actions);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
+ re_write_index);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
+ u32 *added_stes,
+ u16 gvmi)
+{
+ u8 *action;
+
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
+ dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
+
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
+ memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
+}
+
+static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+ u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+ bool allow_encap = true;
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
+ attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+ last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->modify_actions,
+ attr->modify_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_encap = false;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_encap = true;
+ }
+ dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+ if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_encap = true;
+ }
+ dr_ste_v1_set_tx_encap(last_ste, action,
+ attr->reformat_id,
+ attr->reformat_size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+ u8 *d_action;
+
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+ dr_ste_v1_set_tx_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat_id,
+ attr->reformat_size);
+ action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+ action += DR_STE_ACTION_TRIPLE_SZ;
+ }
+
+ dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+ u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+ bool allow_modify_hdr = true;
+ bool allow_ctr = true;
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v1_set_rx_decap_l3(last_ste, action,
+ attr->decap_actions,
+ attr->decap_index);
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->decap_actions,
+ attr->decap_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
+ dr_ste_v1_set_rx_decap(last_ste, action);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = true;
+ }
+ dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
+ !allow_modify_hdr) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ }
+
+ dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ /* Modify header and decapsulation must use different STEs */
+ if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = true;
+ }
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->modify_actions,
+ attr->modify_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR]) {
+ /* Counter action set after decap to exclude decaped header */
+ if (!allow_ctr) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = false;
+ }
+ dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+ }
+
+ dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v1_set_action_set(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
+ MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
+}
+
+static void dr_ste_v1_set_action_add(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
+ MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
+}
+
+static void dr_ste_v1_set_action_copy(u8 *d_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
+ MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
+ MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+
+static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ memcpy(padded_data, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add the new header inline + 2 extra bytes */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
+ i * 2);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ data_ptr += inline_data_sz;
+ used_actions++;
+ }
+
+ /* Remove 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+ DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
+}
+
+static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (sb->inner)
+ DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
+ else
+ DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
+}
+
+static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
+}
+
+static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
+ DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
+ DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+ misc2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+ misc2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+ misc2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+ misc2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+ misc2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+ misc2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+ misc2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+ misc2, outer_first_mpls_over_udp_ttl);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
+}
+
+static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
+ u32 *icmp_header_data;
+ u8 *icmp_type;
+ u8 *icmp_code;
+
+ if (is_ipv4) {
+ icmp_header_data = &misc3->icmpv4_header_data;
+ icmp_type = &misc3->icmpv4_type;
+ icmp_code = &misc3->icmpv4_code;
+ } else {
+ icmp_header_data = &misc3->icmpv6_header_data;
+ icmp_type = &misc3->icmpv6_type;
+ icmp_code = &misc3->icmpv6_code;
+ }
+
+ MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
+ MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
+ MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
+
+ *icmp_header_data = 0;
+ *icmp_type = 0;
+ *icmp_code = 0;
+
+ return 0;
+}
+
+static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
+
+ return 0;
+}
+
+static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc2, metadata_reg_a);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
+}
+
+static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
+}
+
+static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
+}
+
+static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
+}
+
+static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
+}
+
+static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+
+ DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
+ return 0;
+
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ return 0;
+}
+
+static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v1 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+ /* Actions */
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v1_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 51880df26724..4af0e4e6a13c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_matcher_rx_tx;
+struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
u8 *hw_ste;
@@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
};
struct mlx5dr_ste_htbl {
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
@@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
u8 vhca_id_valid:1;
struct mlx5dr_domain *dmn;
struct mlx5dr_cmd_caps *caps;
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
@@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
struct mlx5dr_ste_htbl *
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask);
+ u16 lu_type, u16 byte_mask);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
@@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
-void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
- struct mlx5dr_ste_htbl *next_htbl);
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 miss_addr);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
- int size, bool encap_l3);
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
- bool go_back);
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index);
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+#define MLX5DR_MAX_VLANS 2
+
+struct mlx5dr_ste_actions_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct {
+ int count;
+ u32 headers[MLX5DR_MAX_VLANS];
+ } vlans;
+};
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher);
@@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
return !ste->refcount;
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
- struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
@@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
u32 outer_vxlan_gpe_next_protocol:8;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u32 icmpv6_code:8;
- u32 icmpv6_type:8;
- u32 icmpv4_code:8;
- u32 icmpv4_type:8;
+ u8 icmpv6_code;
+ u8 icmpv6_type;
+ u8 icmpv4_code;
+ u8 icmpv4_type;
u8 reserved_auto3[0x1c];
};
@@ -598,7 +666,8 @@ struct mlx5dr_esw_caps {
u64 drop_icm_address_tx;
u64 uplink_icm_address_rx;
u64 uplink_icm_address_tx;
- bool sw_owner;
+ u8 sw_owner:1;
+ u8 sw_owner_v2:1;
};
struct mlx5dr_cmd_vport_cap {
@@ -631,6 +700,9 @@ struct mlx5dr_cmd_caps {
bool rx_sw_owner;
bool tx_sw_owner;
bool fdb_sw_owner;
+ u8 rx_sw_owner_v2:1;
+ u8 tx_sw_owner_v2:1;
+ u8 fdb_sw_owner_v2:1;
u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps;
@@ -671,6 +743,7 @@ struct mlx5dr_domain {
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache;
+ struct mlx5dr_ste_ctx *ste_ctx;
};
struct mlx5dr_table_rx_tx {
@@ -725,6 +798,14 @@ struct mlx5dr_rule_member {
struct list_head use_ste_list;
};
+struct mlx5dr_ste_action_modify_field {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -995,12 +1076,16 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
+
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u32 ste_size);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_htbl_connect_info *connect_info,
bool update_hw_ste);
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -1126,6 +1211,8 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
u32 group_id,
struct mlx5dr_cmd_fte_info *fte);
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
+
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index e01c3766c7de..83df6df6b459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H
enum {
- MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
- MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
- MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
- MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
- MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
- MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
-};
-
-enum {
- MLX5DR_STE_LU_TYPE_NOP = 0x00,
- MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
- MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
- MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
- MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
- MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
- MLX5DR_STE_LU_TYPE_GRE = 0x16,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
- MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
new file mode 100644
index 000000000000..34c2bd17a8b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef MLX5_IFC_DR_STE_V1_H
+#define MLX5_IFC_DR_STE_V1_H
+
+enum mlx5_ifc_ste_v1_modify_hdr_offset {
+ MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20,
+};
+
+struct mlx5_ifc_ste_single_action_flow_tag_v1_bits {
+ u8 action_id[0x8];
+ u8 flow_tag[0x18];
+};
+
+struct mlx5_ifc_ste_single_action_modify_list_v1_bits {
+ u8 action_id[0x8];
+ u8 num_of_modify_actions[0x8];
+ u8 modify_actions_ptr[0x10];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 end_anchor[0x6];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_11[0x1];
+ u8 start_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_copy_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_17[0x2];
+ u8 destination_length[0x6];
+
+ u8 reserved_at_20[0x8];
+ u8 source_dw_offset[0x8];
+ u8 reserved_at_30[0x2];
+ u8 source_right_shifter[0x6];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_ste_double_action_set_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_add_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 add_value[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 start_offset[0x7];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 start_offset[0x7];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits {
+ u8 action_id[0x8];
+ u8 modify_actions_pattern_pointer[0x18];
+
+ u8 number_of_modify_actions[0x8];
+ u8 modify_actions_argument_pointer[0x18];
+};
+
+struct mlx5_ifc_ste_match_bwc_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 byte_mask[0x10];
+ u8 next_entry_format[0x1];
+ u8 mask_mode[0x1];
+ u8 gvmi[0xe];
+
+ u8 action[0x40];
+};
+
+struct mlx5_ifc_ste_mask_and_match_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 action[0x60];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_v1_bits {
+ u8 reserved_at_0[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_loopback[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encapsulation_type[0x2];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x6];
+ u8 tcp_syn[0x1];
+ u8 reserved_at_67[0x3];
+ u8 force_loopback[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_v1_bits {
+ u8 reserved_at_0[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encapsulation_type[0x2];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x6];
+ u8 tcp_syn[0x1];
+ u8 reserved_at_67[0x3];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 smac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 reserved_at_50[0x2];
+ u8 functional_lb[0x1];
+ u8 reserved_at_53[0x5];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 reserved_at_5c[0x2];
+ u8 first_vlan_qualifier[0x2];
+
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 smac_15_0[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits {
+ u8 source_address[0x20];
+
+ u8 destination_address[0x20];
+
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+
+ u8 reserved_at_60[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 fragmented[0x1];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ u8 protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_v1_bits {
+ u8 l2_tunneling_network_id[0x20];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x3];
+ u8 ip_fragmented[0x1];
+ u8 reserved_at_64[0x2];
+ u8 encp_type[0x2];
+ u8 reserved_at_68[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits {
+ u8 identification[0x10];
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+
+ u8 total_length[0x10];
+ u8 checksum[0x10];
+
+ u8 version[0x4];
+ u8 ihl[0x4];
+ u8 time_to_live[0x8];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x1c];
+ u8 voq_internal_prio[0x4];
+};
+
+struct mlx5_ifc_ste_eth_l4_v1_bits {
+ u8 ipv6_version[0x4];
+ u8 reserved_at_4[0x4];
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ u8 ipv6_hop_limit[0x8];
+ u8 protocol[0x8];
+
+ u8 src_port[0x10];
+ u8 dst_port[0x10];
+
+ u8 first_fragment[0x1];
+ u8 reserved_at_41[0xb];
+ u8 flow_label[0x14];
+
+ u8 tcp_data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 fragmented[0x1];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 ipv6_paylen[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_v1_bits {
+ u8 window_size[0x10];
+ u8 urgent_pointer[0x10];
+
+ u8 ack_num[0x20];
+
+ u8 seq_num[0x20];
+
+ u8 length[0x10];
+ u8 checksum[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_v1_bits {
+ u8 reserved_at_0[0x15];
+ u8 mpls_ok[0x1];
+ u8 mpls4_s_bit[0x1];
+ u8 mpls4_qualifier[0x1];
+ u8 mpls3_s_bit[0x1];
+ u8 mpls3_qualifier[0x1];
+ u8 mpls2_s_bit[0x1];
+ u8 mpls2_qualifier[0x1];
+ u8 mpls1_s_bit[0x1];
+ u8 mpls1_qualifier[0x1];
+ u8 mpls0_s_bit[0x1];
+ u8 mpls0_qualifier[0x1];
+
+ u8 mpls0_label[0x14];
+ u8 mpls0_exp[0x3];
+ u8 mpls0_s_bos[0x1];
+ u8 mpls0_ttl[0x8];
+
+ u8 mpls1_label[0x20];
+
+ u8 mpls2_label[0x20];
+};
+
+struct mlx5_ifc_ste_gre_v1_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 strict_src_route[0x1];
+ u8 recur[0x3];
+ u8 flags[0x5];
+ u8 version[0x3];
+ u8 gre_protocol[0x10];
+
+ u8 reserved_at_20[0x20];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_v1_bits {
+ u8 loopback_synd[0x8];
+ u8 reserved_at_8[0x7];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+
+ u8 force_lb[0x1];
+ u8 reserved_at_21[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_icmp_v1_bits {
+ u8 icmp_payload_data[0x20];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+#endif /* MLX5_IFC_DR_STE_V1_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 4177786b8eaf..612b0ac31db2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -124,7 +124,10 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bdafc85fd874..e05c5c0f3ae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
@@ -1160,6 +1161,18 @@ EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
- return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
+ return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
+
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
+{
+ u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 5d9ddf36fb4e..e6f677e42007 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -267,7 +267,7 @@ struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
const void *first_tlv_ptr;
const void *cb_top_ptr;
- mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ mfa2_file = kzalloc(sizeof(*mfa2_file), GFP_KERNEL);
if (!mfa2_file)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 685037e052af..52fdc34251ba 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -84,6 +84,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ atomic_t active_ports_count;
bool fw_flash_in_progress;
struct {
struct devlink_health_reporter *fw_fatal;
@@ -96,8 +97,36 @@ struct mlxsw_core {
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
-static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+static u64 mlxsw_ports_occ_get(void *priv)
{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ return atomic_read(&mlxsw_core->active_ports_count);
+}
+
+static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params ports_num_params;
+ u32 max_ports;
+
+ max_ports = mlxsw_core->max_ports - 1;
+ devlink_resource_size_params_init(&ports_num_params, max_ports,
+ max_ports, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
+}
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ int err;
+
/* Switch ports are numbered from 1 to queried value */
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
@@ -110,11 +139,30 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
if (!mlxsw_core->ports)
return -ENOMEM;
+ if (!reload) {
+ err = mlxsw_core_resources_ports_register(mlxsw_core);
+ if (err)
+ goto err_resources_ports_register;
+ }
+ atomic_set(&mlxsw_core->active_ports_count, 0);
+ devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
+
return 0;
+
+err_resources_ports_register:
+ kfree(mlxsw_core->ports);
+ return err;
}
-static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+
+ devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ if (!reload)
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+
kfree(mlxsw_core->ports);
}
@@ -1897,7 +1945,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_register_resources;
}
- err = mlxsw_ports_init(mlxsw_core);
+ err = mlxsw_ports_init(mlxsw_core, reload);
if (err)
goto err_ports_init;
@@ -1986,7 +2034,7 @@ err_devlink_register:
err_emad_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
devlink_resources_unregister(devlink, NULL);
@@ -2056,7 +2104,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -2755,16 +2803,25 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
const unsigned char *switch_id,
unsigned char switch_id_len)
{
- return __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber,
- splittable, lanes,
- switch_id, switch_id_len);
+ int err;
+
+ err = __mlxsw_core_port_init(mlxsw_core, local_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ splittable, lanes,
+ switch_id, switch_id_len);
+ if (err)
+ return err;
+
+ atomic_inc(&mlxsw_core->active_ports_count);
+ return 0;
}
EXPORT_SYMBOL(mlxsw_core_port_init);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
{
+ atomic_dec(&mlxsw_core->active_ports_count);
+
__mlxsw_core_port_fini(mlxsw_core, local_port);
}
EXPORT_SYMBOL(mlxsw_core_port_fini);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6b3ccbf6b238..8af7d9d03475 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -19,6 +19,11 @@
#include "cmd.h"
#include "resources.h"
+enum mlxsw_core_resource_id {
+ MLXSW_CORE_RESOURCE_PORTS = 1,
+ MLXSW_CORE_RESOURCE_MAX,
+};
+
struct mlxsw_core;
struct mlxsw_core_port;
struct mlxsw_driver;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 8fa286ccdd6b..bf85ce9835d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,7 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
+ if (crit_temp > emerg_temp) {
+ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+ tz->tzdev->type, crit_temp, emerg_temp);
+ return 0;
+ }
+
/* According to the system thermal requirements, the thermal zones are
* defined with four trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- if (emerg_temp > crit_temp)
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
MLXSW_THERMAL_MODULE_TEMP_SHIFT;
- else
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4eeae8d78006..d0052537e627 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
- mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
- if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
if (!frag_len)
return;
- pci_unmap_single(pdev, mapaddr, frag_len, direction);
+ dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size, &mem_item->mapaddr,
+ GFP_KERNEL);
if (!mem_item->buf)
return -ENOMEM;
@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err_q_ops_init:
kfree(q->elem_info);
err_elem_info_alloc:
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
return err;
}
@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
q_ops->fini(mlxsw_pci, q);
kfree(q->elem_info);
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mem_item = &mlxsw_pci->fw_area.items[i];
mem_item->size = MLXSW_PCI_PAGE_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size,
+ &mem_item->mapaddr, GFP_KERNEL);
if (!mem_item->buf) {
err = -ENOMEM;
goto err_alloc;
@@ -1304,8 +1304,8 @@ err_alloc:
for (i--; i >= 0; i--) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
return err;
@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
}
@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
int err = 0;
mbox->size = MLXSW_CMD_MBOX_SIZE;
- mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
- &mbox->mapaddr);
+ mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr, GFP_KERNEL);
if (!mbox->buf) {
dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
err = -ENOMEM;
@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
- pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
- mbox->mapaddr);
+ dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
}
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a6956cfc9cb1..d9d9e1f488f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -52,7 +52,7 @@
#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
enum mlxsw_sp_resource_id {
- MLXSW_SP_RESOURCE_KVD = 1,
+ MLXSW_SP_RESOURCE_KVD = MLXSW_CORE_RESOURCE_MAX,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
@@ -328,15 +328,16 @@ struct mlxsw_sp_port_type_speed_ops {
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode);
+ unsigned long *mode);
u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
- void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
- bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd);
+ void (*from_ptys_link_mode)(struct mlxsw_sp *mlxsw_sp,
+ bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
- u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd);
+ u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 540616469e28..bd7f873f6290 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -858,7 +858,7 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
static void
mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
- u8 width, struct ethtool_link_ksettings *cmd)
+ struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -869,13 +869,13 @@ mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
- ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap,
cmd->link_modes.supported);
}
static void
mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
- u32 eth_proto_admin, bool autoneg, u8 width,
+ u32 eth_proto_admin, bool autoneg,
struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -886,7 +886,7 @@ mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
return;
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
cmd->link_modes.advertising);
}
@@ -960,16 +960,14 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
ops = mlxsw_sp->port_type_speed_ops;
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
- mlxsw_sp_port->mapping.width, cmd);
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
- mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
- mlxsw_sp_port->mapping.width, cmd);
+ mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
- ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
- eth_proto_oper, cmd);
+ ops->from_ptys_link_mode(mlxsw_sp, netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
return 0;
}
@@ -997,14 +995,13 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
eth_proto_new = autoneg ?
- ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
- cmd) :
- ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
- cmd->base.speed);
+ ops->to_ptys_advert_link(mlxsw_sp, cmd) :
+ ops->to_ptys_speed_lanes(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "No supported speed requested\n");
+ netdev_err(dev, "No supported speed or lanes requested\n");
return -EINVAL;
}
@@ -1063,20 +1060,21 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
- .get_drvinfo = mlxsw_sp_port_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_link_ext_state = mlxsw_sp_port_get_link_ext_state,
- .get_pauseparam = mlxsw_sp_port_get_pauseparam,
- .set_pauseparam = mlxsw_sp_port_set_pauseparam,
- .get_strings = mlxsw_sp_port_get_strings,
- .set_phys_id = mlxsw_sp_port_set_phys_id,
- .get_ethtool_stats = mlxsw_sp_port_get_stats,
- .get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
- .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
- .get_module_info = mlxsw_sp_get_module_info,
- .get_module_eeprom = mlxsw_sp_get_module_eeprom,
- .get_ts_info = mlxsw_sp_get_ts_info,
+ .cap_link_lanes_supported = true,
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_state = mlxsw_sp_port_get_link_ext_state,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .set_phys_id = mlxsw_sp_port_set_phys_id,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
+ .get_module_info = mlxsw_sp_get_module_info,
+ .get_module_eeprom = mlxsw_sp_get_module_eeprom,
+ .get_ts_info = mlxsw_sp_get_ts_info,
};
struct mlxsw_sp1_port_link_mode {
@@ -1198,7 +1196,7 @@ mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode)
+ unsigned long *mode)
{
int i;
@@ -1223,19 +1221,21 @@ mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
}
static void
-mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
- u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ cmd->link_mode = -1;
if (!carrier_ok)
return;
- cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
- if (cmd->base.speed != SPEED_UNKNOWN)
- cmd->base.duplex = DUPLEX_FULL;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+ }
}
static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
@@ -1260,7 +1260,7 @@ static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_
}
static u32
-mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
@@ -1274,14 +1274,17 @@ mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
return ptys_proto;
}
-static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
- u32 speed)
+static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
+ if (cmd->lanes > width)
+ return ptys_proto;
+
for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sp1_port_link_mode[i].speed)
+ if (cmd->base.speed == mlxsw_sp1_port_link_mode[i].speed)
ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1321,10 +1324,10 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.from_ptys_speed = mlxsw_sp1_from_ptys_speed,
- .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .from_ptys_link_mode = mlxsw_sp1_from_ptys_link_mode,
.ptys_max_speed = mlxsw_sp1_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
- .to_ptys_speed = mlxsw_sp1_to_ptys_speed,
+ .to_ptys_speed_lanes = mlxsw_sp1_to_ptys_speed_lanes,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
.ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
@@ -1486,7 +1489,8 @@ struct mlxsw_sp2_port_link_mode {
int m_ethtool_len;
u32 mask;
u32 speed;
- u8 mask_width;
+ u32 width;
+ u8 mask_sup_width;
};
static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
@@ -1494,105 +1498,117 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
.mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_1000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_5000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_10000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_40000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_25000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_50000,
+ .width = 2,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
.speed = SPEED_50000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
.speed = SPEED_100000,
+ .width = 2,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_200000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_400000,
+ .width = 8,
},
};
@@ -1619,14 +1635,12 @@ mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
static void
mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode)
+ unsigned long *mode)
{
- u8 mask_width = mlxsw_sp_port_mask_width_get(width);
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
- (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
mode);
}
@@ -1646,19 +1660,24 @@ mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
}
static void
-mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
- u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
+ struct mlxsw_sp2_port_link_mode link;
+ int i;
+
+ cmd->link_mode = -1;
if (!carrier_ok)
return;
- cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
- if (cmd->base.speed != SPEED_UNKNOWN)
- cmd->base.duplex = DUPLEX_FULL;
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
+ link = mlxsw_sp2_port_link_mode[i];
+ cmd->link_mode = link.mask_ethtool[1];
+ }
+ }
}
static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
@@ -1698,33 +1717,50 @@ mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
}
static u32
-mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
const struct ethtool_link_ksettings *cmd)
{
- u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
- mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
- u8 width, u32 speed)
+static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
{
u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+ struct mlxsw_sp2_port_link_mode link_mode;
u32 ptys_proto = 0;
int i;
+ if (cmd->lanes > width)
+ return ptys_proto;
+
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
- (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
- ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+ if (cmd->base.speed == mlxsw_sp2_port_link_mode[i].speed) {
+ link_mode = mlxsw_sp2_port_link_mode[i];
+
+ if (!cmd->lanes) {
+ /* If number of lanes was not set by user space,
+ * choose the link mode that supports the width
+ * of the port.
+ */
+ if (mask_width & link_mode.mask_sup_width)
+ ptys_proto |= link_mode.mask;
+ } else if (cmd->lanes == link_mode.width) {
+ /* Else if the number of lanes was set, choose
+ * the link mode that its actual width equals to
+ * it.
+ */
+ ptys_proto |= link_mode.mask;
+ }
+ }
}
return ptys_proto;
}
@@ -1764,10 +1800,10 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.from_ptys_speed = mlxsw_sp2_from_ptys_speed,
- .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .from_ptys_link_mode = mlxsw_sp2_from_ptys_link_mode,
.ptys_max_speed = mlxsw_sp2_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
- .to_ptys_speed = mlxsw_sp2_to_ptys_speed,
+ .to_ptys_speed_lanes = mlxsw_sp2_to_ptys_speed_lanes,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
.ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 41424ee909a0..9ce90841f92d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4309,11 +4309,18 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
if (event != NEXTHOP_EVENT_REPLACE)
return 0;
- if (!info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
info->extack);
- return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
- info->extack);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
+ info->nh_grp,
+ info->extack);
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ return -EOPNOTSUPP;
+ }
}
static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
@@ -4321,13 +4328,17 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
{
const struct net_device *dev;
- if (info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
/* Already validated earlier. */
return true;
-
- dev = info->nh->dev;
- return info->nh->gw_family || info->nh->is_reject ||
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ default:
+ return false;
+ }
}
static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
@@ -4410,11 +4421,22 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct nh_notifier_info *info)
{
- unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
+ unsigned int nhs;
int err, i;
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ nhs = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nhs = info->nh_grp->num_nh;
+ break;
+ default:
+ return -EINVAL;
+ }
+
nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
if (!nhgi)
return -ENOMEM;
@@ -4427,12 +4449,18 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
int weight;
nh = &nhgi->nexthops[i];
- if (info->is_grp) {
- nh_obj = &info->nh_grp->nh_entries[i].nh;
- weight = info->nh_grp->nh_entries[i].weight;
- } else {
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
nh_obj = info->nh;
weight = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_nexthop_obj_init;
}
err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
weight);
@@ -4915,6 +4943,25 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
}
static void
+mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
+{
+ u32 *p_dst = (u32 *) &fen_info->dst;
+ struct fib_rt_info fri;
+
+ fri.fi = fen_info->fi;
+ fri.tb_id = fen_info->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = fen_info->dst_len;
+ fri.tos = fen_info->tos;
+ fri.type = fen_info->type;
+ fri.offload = false;
+ fri.trap = false;
+ fri.offload_failed = true;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
+}
+
+static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
@@ -4935,6 +4982,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
+ fri.offload_failed = false;
fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
@@ -4957,9 +5005,35 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
+ fri.offload_failed = false;
fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ int i;
+
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
+ for (i = 0; i < nrt6; i++)
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
+ false, false, true);
+}
+#else
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
@@ -4976,10 +5050,18 @@ mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
- fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
- !should_offload);
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ should_offload, !should_offload, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
}
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
@@ -4990,8 +5072,16 @@ mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
- fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ false, false, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
}
+#endif
static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
@@ -6976,6 +7066,8 @@ static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
+ &fib_event->fen_info);
}
fib_info_put(fib_event->fen_info.fi);
break;
@@ -6997,6 +7089,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_event *fib_event)
{
+ struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
int err;
mlxsw_sp_span_respin(mlxsw_sp);
@@ -7008,6 +7101,9 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_event->rt_arr,
+ fib6_event->nrt6);
}
mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
@@ -7017,6 +7113,9 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_event->rt_arr,
+ fib6_event->nrt6);
}
mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index c6c5826aba41..1892cea05ee7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp1_span_cpu_can_handle,
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
.configure = mlxsw_sp1_span_entry_cpu_configure,
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
.can_handle = mlxsw_sp_port_dev_check,
.parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp2_span_cpu_can_handle,
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
.configure = mlxsw_sp2_span_entry_cpu_configure,
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
if (!refcount_read(&curr->ref_count))
continue;
+ if (curr->ops->is_static)
+ continue;
+
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
if (err)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index d907718bc8c5..aa1cd409c0e2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
};
struct mlxsw_sp_span_entry_ops {
+ bool is_static;
bool (*can_handle)(const struct net_device *to_dev);
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index cea42f6ed89b..23b7e8d6386b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -527,7 +527,6 @@ mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u8 state)
{
@@ -535,9 +534,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -657,56 +653,59 @@ err_port_bridge_vlan_learning_set:
return err;
}
-static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
- *mlxsw_sp_port,
- struct switchdev_trans *trans,
- unsigned long brport_flags)
+static int
+mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_brport_flags flags)
{
- if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
return 0;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_UC,
- brport_flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
- brport_flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
+ bridge_port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
if (bridge_port->bridge_device->multicast_enabled)
goto out;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- brport_flags &
- BR_MCAST_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ flags.val & BR_MCAST_FLOOD);
+ if (err)
+ return err;
+ }
out:
- memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
+ memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
@@ -724,35 +723,26 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
}
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
- ageing_time > MLXSW_SP_MAX_AGEING_TIME)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -765,16 +755,12 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u16 vlan_proto)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -784,16 +770,12 @@ static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_p
}
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -825,7 +807,6 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool mc_disabled)
{
@@ -834,9 +815,6 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -896,16 +874,12 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_mrouter)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -922,53 +896,52 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
- trans,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
- err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_protocol);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
- err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
@@ -977,8 +950,7 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
- if (switchdev_trans_ph_commit(trans))
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1211,23 +1183,20 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_port_vlan *vlan)
{
u16 pvid;
- u16 vid;
pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
if (!pvid)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vlan->vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vlan->vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
}
}
@@ -1236,7 +1205,6 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1244,14 +1212,12 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev) &&
- switchdev_trans_ph_prepare(trans))
+ br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -1259,9 +1225,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1269,17 +1232,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
@@ -1716,8 +1671,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -1729,9 +1683,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
return 0;
@@ -1813,7 +1764,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1823,22 +1773,19 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
- extack);
- if (switchdev_trans_ph_prepare(trans)) {
- /* The event is emitted before the changes are actually
- * applied to the bridge. Therefore schedule the respin
- * call for later, so that the respin logic sees the
- * updated bridge state.
- */
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
- }
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
+
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1873,7 +1820,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
@@ -1885,8 +1831,7 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
return 0;
}
@@ -3406,12 +3351,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct switchdev_trans *trans = port_obj_info->trans;
struct mlxsw_sp_bridge_device *bridge_device;
struct netlink_ext_ack *extack;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
br_dev = netdev_master_upper_dev_get(vxlan_dev);
@@ -3424,9 +3367,6 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
port_obj_info->handled = true;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
@@ -3434,18 +3374,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
- vxlan_dev, vid,
- flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vlan->vid,
+ flag_untagged,
+ flag_pvid, extack);
}
static void
@@ -3458,7 +3390,6 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
br_dev = netdev_master_upper_dev_get(vxlan_dev);
if (!br_dev)
@@ -3477,9 +3408,8 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
- vxlan_dev, vid);
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
+ vlan->vid);
}
static int
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 42bc014136fe..93df3049cdc0 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -31,6 +31,8 @@ config KS8851
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
SPI driver for Micrel KS8851 SPI attached network chip.
@@ -40,6 +42,8 @@ config KS8851_MLL
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 2b319e451121..e2eb0caeac82 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -358,6 +358,7 @@ union ks8851_tx_hdr {
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
* @gpio: Optional reset_n gpio
+ * @mii_bus: Pointer to MII bus structure
* @lock: Bus access lock callback
* @unlock: Bus access unlock callback
* @rdreg16: 16bit register read callback
@@ -403,6 +404,7 @@ struct ks8851_net {
struct regulator *vdd_reg;
struct regulator *vdd_io;
int gpio;
+ struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
unsigned long *flags);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 6fc7483aea03..2feed6ce19d3 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -23,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "ks8851.h"
@@ -932,7 +931,25 @@ static int ks8851_phy_reg(int reg)
return KS_P1ANLPR;
}
- return 0x0;
+ return -EOPNOTSUPP;
+}
+
+static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ int result;
+ int ksreg;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (ksreg < 0)
+ return ksreg;
+
+ ks8851_lock(ks, &flags);
+ result = ks8851_rdreg16(ks, ksreg);
+ ks8851_unlock(ks, &flags);
+
+ return result;
}
/**
@@ -952,20 +969,13 @@ static int ks8851_phy_reg(int reg)
*/
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
- int ksreg;
- int result;
+ int ret;
- ksreg = ks8851_phy_reg(reg);
- if (!ksreg)
+ ret = ks8851_phy_read_common(dev, phy_addr, reg);
+ if (ret < 0)
return 0x0; /* no error return allowed, so use zero */
- ks8851_lock(ks, &flags);
- result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
-
- return result;
+ return ret;
}
static void ks8851_phy_write(struct net_device *dev,
@@ -976,13 +986,37 @@ static void ks8851_phy_write(struct net_device *dev,
int ksreg;
ksreg = ks8851_phy_reg(reg);
- if (ksreg) {
+ if (ksreg >= 0) {
ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
ks8851_unlock(ks, &flags);
}
}
+static int ks8851_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ if (phy_id != 0)
+ return -EOPNOTSUPP;
+
+ /* KS8851 PHY ID registers are swapped in HW, swap them back. */
+ if (reg == MII_PHYSID1)
+ reg = MII_PHYSID2;
+ else if (reg == MII_PHYSID2)
+ reg = MII_PHYSID1;
+
+ return ks8851_phy_read_common(ks->netdev, phy_id, reg);
+}
+
+static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ ks8851_phy_write(ks->netdev, phy_id, reg, val);
+ return 0;
+}
+
/**
* ks8851_read_selftest - read the selftest memory info.
* @ks: The device state
@@ -1046,6 +1080,42 @@ int ks8851_resume(struct device *dev)
}
#endif
+static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ks8851_eth_mii";
+ mii_bus->read = ks8851_mdio_read;
+ mii_bus->write = ks8851_mdio_write;
+ mii_bus->priv = ks;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)BIT(0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = mdiobus_register(mii_bus);
+ if (ret)
+ goto err_mdiobus_register;
+
+ ks->mii_bus = mii_bus;
+
+ return 0;
+
+err_mdiobus_register:
+ mdiobus_free(mii_bus);
+ return ret;
+}
+
+static void ks8851_unregister_mdiobus(struct ks8851_net *ks)
+{
+ mdiobus_unregister(ks->mii_bus);
+ mdiobus_free(ks->mii_bus);
+}
+
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en)
{
@@ -1104,6 +1174,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+ SET_NETDEV_DEV(netdev, dev);
+
/* setup EEPROM state */
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
@@ -1120,6 +1192,10 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
dev_info(dev, "message enable is %d\n", msg_en);
+ ret = ks8851_register_mdiobus(ks, dev);
+ if (ret)
+ goto err_mdio;
+
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1128,7 +1204,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
skb_queue_head_init(&ks->txq);
netdev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(netdev, dev);
dev_set_drvdata(dev, ks);
@@ -1156,7 +1231,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "failed to register network device\n");
- goto err_netdev;
+ goto err_id;
}
netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
@@ -1165,8 +1240,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
return 0;
-err_netdev:
err_id:
+ ks8851_unregister_mdiobus(ks);
+err_mdio:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
regulator_disable(ks->vdd_reg);
@@ -1180,6 +1256,8 @@ int ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
+ ks8851_unregister_mdiobus(priv);
+
if (netif_msg_drv(priv))
dev_info(dev, "remove\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 3bab0cb2b1a5..2e8fcce50f9d 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 4ec7f1615977..479406ecbaa3 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 3804310c853a..dbdfabff3b00 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1253,7 +1253,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
goto done;
- descriptor_type = (descriptor->data0) &
+ descriptor_type = le32_to_cpu(descriptor->data0) &
TX_DESC_DATA0_DTYPE_MASK_;
if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
goto clean_up_data_descriptor;
@@ -1313,7 +1313,7 @@ static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
{
- while ((*tx->head_cpu_ptr) != (tx->last_head)) {
+ while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
lan743x_tx_release_desc(tx, tx->last_head, false);
tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
}
@@ -1399,10 +1399,10 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
if (dma_mapping_error(dev, dma_ptr))
return -ENOMEM;
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1443,7 +1443,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1487,7 +1487,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1513,10 +1513,10 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
return -ENOMEM;
}
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1560,7 +1560,7 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -1926,15 +1926,6 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
return ((++index) % rx->ring_size);
}
-static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
-{
- int length = 0;
-
- length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
- return __netdev_alloc_skb(rx->adapter->netdev,
- length, GFP_ATOMIC | GFP_DMA);
-}
-
static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
{
/* update the tail once per 8 descriptors */
@@ -1943,36 +1934,57 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
- struct sk_buff *skb)
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
{
+ struct net_device *netdev = rx->adapter->netdev;
+ struct device *dev = &rx->adapter->pdev->dev;
struct lan743x_rx_buffer_info *buffer_info;
+ unsigned int buffer_length, used_length;
struct lan743x_rx_descriptor *descriptor;
- int length = 0;
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+
+ buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
- length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- buffer_info->skb = skb;
- if (!(buffer_info->skb))
+ skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ if (!skb)
return -ENOMEM;
- buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
- buffer_info->skb->data,
- length,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&rx->adapter->pdev->dev,
- buffer_info->dma_ptr)) {
- buffer_info->dma_ptr = 0;
+ dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_ptr)) {
+ dev_kfree_skb_any(skb);
return -ENOMEM;
}
+ if (buffer_info->dma_ptr) {
+ /* sync used area of buffer only */
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
+ /* frame length is valid only if LS bit is set.
+ * it's a safe upper bound for the used area in this
+ * buffer.
+ */
+ used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
+ (le32_to_cpu(descriptor->data0)),
+ buffer_info->buffer_length);
+ else
+ used_length = buffer_info->buffer_length;
+ dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
+ used_length,
+ DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
- buffer_info->buffer_length = length;
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ buffer_info->skb = skb;
+ buffer_info->dma_ptr = dma_ptr;
+ buffer_info->buffer_length = buffer_length;
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
- (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
- skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
lan743x_rx_update_tail(rx, index);
return 0;
@@ -1986,12 +1998,12 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
((buffer_info->buffer_length) &
- RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ RX_DESC_DATA0_BUF_LENGTH_MASK_)));
lan743x_rx_update_tail(rx, index);
}
@@ -2021,16 +2033,32 @@ static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
memset(buffer_info, 0, sizeof(*buffer_info));
}
-static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+static struct sk_buff *
+lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
+{
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_irq(skb);
+ return NULL;
+ }
+ frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 2);
+ if (skb->len > frame_length) {
+ skb->tail -= skb->len - frame_length;
+ skb->len = frame_length;
+ }
+ return skb;
+}
+
+static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
{
- struct skb_shared_hwtstamps *hwtstamps = NULL;
+ int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
+ struct lan743x_rx_descriptor *descriptor, *desc_ext;
+ struct net_device *netdev = rx->adapter->netdev;
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
- int current_head_index = *rx->head_cpu_ptr;
struct lan743x_rx_buffer_info *buffer_info;
- struct lan743x_rx_descriptor *descriptor;
+ int frame_length, buffer_length;
int extension_index = -1;
- int first_index = -1;
- int last_index = -1;
+ bool is_last, is_first;
+ struct sk_buff *skb;
if (current_head_index < 0 || current_head_index >= rx->ring_size)
goto done;
@@ -2038,163 +2066,120 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
goto done;
- if (rx->last_head != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[rx->last_head];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
- goto done;
+ if (rx->last_head == current_head_index)
+ goto done;
- if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
- goto done;
+ descriptor = &rx->ring_cpu_ptr[rx->last_head];
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
+ goto done;
+ buffer_info = &rx->buffer_info[rx->last_head];
- first_index = rx->last_head;
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
- last_index = rx->last_head;
- } else {
- int index;
-
- index = lan743x_rx_next_index(rx, first_index);
- while (index != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
- goto done;
-
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
- last_index = index;
- break;
- }
- index = lan743x_rx_next_index(rx, index);
- }
- }
- if (last_index >= 0) {
- descriptor = &rx->ring_cpu_ptr[last_index];
- if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
- /* extension is expected to follow */
- int index = lan743x_rx_next_index(rx,
- last_index);
- if (index != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 &
- RX_DESC_DATA0_OWN_) {
- goto done;
- }
- if (descriptor->data0 &
- RX_DESC_DATA0_EXT_) {
- extension_index = index;
- } else {
- goto done;
- }
- } else {
- /* extension is not yet available */
- /* prevent processing of this packet */
- first_index = -1;
- last_index = -1;
- }
- }
- }
- }
- if (first_index >= 0 && last_index >= 0) {
- int real_last_index = last_index;
- struct sk_buff *skb = NULL;
- u32 ts_sec = 0;
- u32 ts_nsec = 0;
-
- /* packet is available */
- if (first_index == last_index) {
- /* single buffer packet */
- struct sk_buff *new_skb = NULL;
- int packet_length;
-
- new_skb = lan743x_rx_allocate_skb(rx);
- if (!new_skb) {
- /* failed to allocate next skb.
- * Memory is very low.
- * Drop this packet and reuse buffer.
- */
- lan743x_rx_reuse_ring_element(rx, first_index);
- goto process_extension;
- }
+ is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
+ is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
- buffer_info = &rx->buffer_info[first_index];
- skb = buffer_info->skb;
- descriptor = &rx->ring_cpu_ptr[first_index];
-
- /* unmap from dma */
- if (buffer_info->dma_ptr) {
- dma_unmap_single(&rx->adapter->pdev->dev,
- buffer_info->dma_ptr,
- buffer_info->buffer_length,
- DMA_FROM_DEVICE);
- buffer_info->dma_ptr = 0;
- buffer_info->buffer_length = 0;
- }
- buffer_info->skb = NULL;
- packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
- (descriptor->data0);
- skb_put(skb, packet_length - 4);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
- lan743x_rx_init_ring_element(rx, first_index, new_skb);
- } else {
- int index = first_index;
+ if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
+ /* extension is expected to follow */
+ int index = lan743x_rx_next_index(rx, rx->last_head);
- /* multi buffer packet not supported */
- /* this should not happen since
- * buffers are allocated to be at least jumbo size
- */
+ if (index == current_head_index)
+ /* extension not yet available */
+ goto done;
+ desc_ext = &rx->ring_cpu_ptr[index];
+ if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
+ /* extension not yet available */
+ goto done;
+ if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
+ goto move_forward;
+ extension_index = index;
+ }
- /* clean up buffers */
- if (first_index <= last_index) {
- while ((index >= first_index) &&
- (index <= last_index)) {
- lan743x_rx_reuse_ring_element(rx,
- index);
- index = lan743x_rx_next_index(rx,
- index);
- }
- } else {
- while ((index >= first_index) ||
- (index <= last_index)) {
- lan743x_rx_reuse_ring_element(rx,
- index);
- index = lan743x_rx_next_index(rx,
- index);
- }
- }
- }
+ /* Only the last buffer in a multi-buffer frame contains the total frame
+ * length. The chip occasionally sends more buffers than strictly
+ * required to reach the total frame length.
+ * Handle this by adding all buffers to the skb in their entirety.
+ * Once the real frame length is known, trim the skb.
+ */
+ frame_length =
+ RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
+ buffer_length = buffer_info->buffer_length;
+
+ netdev_dbg(netdev, "%s%schunk: %d/%d",
+ is_first ? "first " : " ",
+ is_last ? "last " : " ",
+ frame_length, buffer_length);
+
+ /* save existing skb, allocate new skb and map to dma */
+ skb = buffer_info->skb;
+ if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, rx->last_head);
+ /* drop packet that was being assembled */
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = NULL;
+ goto process_extension;
+ }
+
+ /* add buffers to skb via skb->frag_list */
+ if (is_first) {
+ skb_reserve(skb, RX_HEAD_PADDING);
+ skb_put(skb, buffer_length - RX_HEAD_PADDING);
+ if (rx->skb_head)
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = skb;
+ } else if (rx->skb_head) {
+ skb_put(skb, buffer_length);
+ if (skb_shinfo(rx->skb_head)->frag_list)
+ rx->skb_tail->next = skb;
+ else
+ skb_shinfo(rx->skb_head)->frag_list = skb;
+ rx->skb_tail = skb;
+ rx->skb_head->len += skb->len;
+ rx->skb_head->data_len += skb->len;
+ rx->skb_head->truesize += skb->truesize;
+ } else {
+ /* packet to assemble has already been dropped because one or
+ * more of its buffers could not be allocated
+ */
+ netdev_dbg(netdev, "drop buffer intended for dropped packet");
+ dev_kfree_skb_irq(skb);
+ }
process_extension:
- if (extension_index >= 0) {
- descriptor = &rx->ring_cpu_ptr[extension_index];
- buffer_info = &rx->buffer_info[extension_index];
-
- ts_sec = descriptor->data1;
- ts_nsec = (descriptor->data2 &
- RX_DESC_DATA2_TS_NS_MASK_);
- lan743x_rx_reuse_ring_element(rx, extension_index);
- real_last_index = extension_index;
- }
+ if (extension_index >= 0) {
+ u32 ts_sec;
+ u32 ts_nsec;
- if (!skb) {
- result = RX_PROCESS_RESULT_PACKET_DROPPED;
- goto move_forward;
- }
+ ts_sec = le32_to_cpu(desc_ext->data1);
+ ts_nsec = (le32_to_cpu(desc_ext->data2) &
+ RX_DESC_DATA2_TS_NS_MASK_);
+ if (rx->skb_head)
+ skb_hwtstamps(rx->skb_head)->hwtstamp =
+ ktime_set(ts_sec, ts_nsec);
+ lan743x_rx_reuse_ring_element(rx, extension_index);
+ rx->last_head = extension_index;
+ netdev_dbg(netdev, "process extension");
+ }
- if (extension_index < 0)
- goto pass_packet_to_os;
- hwtstamps = skb_hwtstamps(skb);
- if (hwtstamps)
- hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
+ if (is_last && rx->skb_head)
+ rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
-pass_packet_to_os:
- /* pass packet to OS */
- napi_gro_receive(&rx->napi, skb);
- result = RX_PROCESS_RESULT_PACKET_RECEIVED;
+ if (is_last && rx->skb_head) {
+ rx->skb_head->protocol = eth_type_trans(rx->skb_head,
+ rx->adapter->netdev);
+ netdev_dbg(netdev, "sending %d byte frame to OS",
+ rx->skb_head->len);
+ napi_gro_receive(&rx->napi, rx->skb_head);
+ rx->skb_head = NULL;
+ }
move_forward:
- /* push tail and head forward */
- rx->last_tail = real_last_index;
- rx->last_head = lan743x_rx_next_index(rx, real_last_index);
- }
+ /* push tail and head forward */
+ rx->last_tail = rx->last_head;
+ rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
+ result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
done:
return result;
}
@@ -2213,12 +2198,12 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
DMAC_INT_BIT_RXFRM_(rx->channel_number));
}
for (count = 0; count < weight; count++) {
- result = lan743x_rx_process_packet(rx);
+ result = lan743x_rx_process_buffer(rx);
if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
break;
}
rx->frame_count += count;
- if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
+ if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
return weight;
if (!napi_complete_done(napi, count))
@@ -2330,9 +2315,7 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
-
- ret = lan743x_rx_init_ring_element(rx, index, new_skb);
+ ret = lan743x_rx_init_ring_element(rx, index);
if (ret)
goto cleanup;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 404af3f4635e..6080028c1df2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -661,7 +661,7 @@ struct lan743x_tx {
struct lan743x_tx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
int last_head;
int last_tail;
@@ -691,7 +691,7 @@ struct lan743x_rx {
struct lan743x_rx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
u32 last_head;
u32 last_tail;
@@ -699,6 +699,8 @@ struct lan743x_rx {
struct napi_struct napi;
u32 frame_count;
+
+ struct sk_buff *skb_head, *skb_tail;
};
struct lan743x_adapter {
@@ -775,10 +777,10 @@ struct lan743x_adapter {
#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000)
struct lan743x_tx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
@@ -813,10 +815,10 @@ struct lan743x_tx_buffer_info {
#define RX_HEAD_PADDING NET_IP_ALIGN
struct lan743x_rx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
@@ -831,8 +833,7 @@ struct lan743x_rx_buffer_info {
#define LAN743X_RX_RING_SIZE (65)
#define RX_PROCESS_RESULT_NOTHING_TO_DO (0)
-#define RX_PROCESS_RESULT_PACKET_RECEIVED (1)
-#define RX_PROCESS_RESULT_PACKET_DROPPED (2)
+#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1)
u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index ee7bb7e24e8e..c0ede0ca7115 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -14,6 +14,7 @@ if NET_VENDOR_MICROSEMI
# Users should depend on NET_SWITCHDEV, HAS_IOMEM
config MSCC_OCELOT_SWITCH_LIB
select REGMAP_MMIO
+ select PACKING
select PHYLIB
tristate
help
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 58f94c3d80f9..722c27694b21 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -6,7 +6,9 @@ mscc_ocelot_switch_lib-y := \
ocelot_police.o \
ocelot_vcap.o \
ocelot_flower.o \
- ocelot_ptp.o
+ ocelot_ptp.o \
+ ocelot_devlink.o
+mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_vsc7514.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0b9992bd6626..46e5c9136bac 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Microsemi Corporation
*/
+#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
@@ -60,14 +61,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
- ANA_TABLES_MACACCESS_DEST_IDX(port) |
- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
- ANA_TABLES_MACACCESS);
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot);
}
@@ -208,25 +222,20 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware, struct switchdev_trans *trans)
+ bool vlan_aware)
{
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
u32 val;
- if (switchdev_trans_ph_prepare(trans)) {
- struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
- struct ocelot_vcap_filter *filter;
-
- list_for_each_entry(filter, &block->rules, list) {
- if (filter->ingress_port_mask & BIT(port) &&
- filter->action.vid_replace_ena) {
- dev_err(ocelot->dev,
- "Cannot change VLAN state with vlan modify rules active\n");
- return -EBUSY;
- }
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
}
-
- return 0;
}
ocelot_port->vlan_aware = vlan_aware;
@@ -362,6 +371,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
}
}
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+ return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+ int err, val;
+
+ /* Disable dequeuing from the egress queues */
+ ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE, port);
+
+ /* Disable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+ /* Disable priority flow control */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+ /* Wait at least the time it takes to receive a frame of maximum length
+ * at the port.
+ * Worst-case delays for 10 kilobyte jumbo frames are:
+ * 8 ms on a 10M port
+ * 800 μs on a 100M port
+ * 80 μs on a 1G port
+ * 32 μs on a 2.5G port
+ */
+ usleep_range(8000, 10000);
+
+ /* Disable half duplex backpressure. */
+ ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+ SYS_FRONT_PORT_MODE, port);
+
+ /* Flush the queues associated with the port. */
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+ REW_PORT_CFG, port);
+
+ /* Enable dequeuing from the egress queues. */
+ ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+ port);
+
+ /* Wait until flushing is complete. */
+ err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+ 100, 2000000, false, ocelot, port);
+
+ /* Clear flushing again. */
+ ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev)
{
@@ -566,6 +629,229 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+ u32 *rval)
+{
+ u32 bytes_valid, val;
+
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_NOT_READY) {
+ if (ifh)
+ return -EIO;
+
+ do {
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ } while (val == XTR_NOT_READY);
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_ESCAPE)
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
+{
+ int i, err = 0;
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
+ err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
+ if (err != 4)
+ return (err < 0) ? err : -EIO;
+ }
+
+ return 0;
+}
+
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 tod_in_ns, full_ts_in_ns, cpuq;
+ u64 timestamp, src_port, len;
+ u32 xfh[OCELOT_TAG_LEN / 4];
+ struct net_device *dev;
+ struct timespec64 ts;
+ struct sk_buff *skb;
+ int sz, buf_len;
+ u32 val, *buf;
+ int err;
+
+ err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
+ if (err)
+ return err;
+
+ ocelot_xfh_get_src_port(xfh, &src_port);
+ ocelot_xfh_get_len(xfh, &len);
+ ocelot_xfh_get_rew_val(xfh, &timestamp);
+ ocelot_xfh_get_cpuq(xfh, &cpuq);
+
+ if (WARN_ON(src_port >= ocelot->num_phys_ports))
+ return -EINVAL;
+
+ dev = ocelot->ops->port_to_netdev(ocelot, src_port);
+ if (!dev)
+ return -EINVAL;
+
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ return -ENOMEM;
+ }
+
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ if (sz < 0) {
+ err = sz;
+ goto out_free_skb;
+ }
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ if (sz < 0) {
+ err = sz;
+ goto out_free_skb;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded.
+ */
+ if (ocelot->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if IS_ENABLED(CONFIG_BRIDGE_MRP)
+ if (skb->protocol == cpu_to_be16(ETH_P_MRP) &&
+ cpuq & BIT(OCELOT_MRP_CPUQ))
+ skb->offload_fwd_mark = 0;
+#endif
+
+ *nskb = skb;
+
+ return 0;
+
+out_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_xtr_poll_frame);
+
+bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+{
+ u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
+ return false;
+ if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ocelot_can_inject);
+
+void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb)
+{
+ u32 ifh[OCELOT_TAG_LEN / 4] = {0};
+ unsigned int i, count, last;
+
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+ ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+ ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+ ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; i++)
+ ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+
+ /* Add padding */
+ while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ i++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+ QS_INJ_CTRL_EOF,
+ QS_INJ_CTRL, grp);
+
+ /* Add dummy CRC */
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ skb_tx_timestamp(skb);
+
+ skb->dev->stats.tx_packets++;
+ skb->dev->stats.tx_bytes += skb->len;
+}
+EXPORT_SYMBOL(ocelot_port_inject_frame);
+
+void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
+{
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+}
+EXPORT_SYMBOL(ocelot_drain_cpu_queue);
+
int ocelot_fdb_add(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid)
{
@@ -881,10 +1167,103 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
+static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
+ bool only_active_ports)
+{
+ u32 mask = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->bond == bond) {
+ if (only_active_ports && !ocelot_port->lag_tx_active)
+ continue;
+
+ mask |= BIT(port);
+ }
+ }
+
+ return mask;
+}
+
+static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+{
+ u32 mask = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->is_dsa_8021q_cpu)
+ mask |= BIT(port);
+ }
+
+ return mask;
+}
+
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+{
+ unsigned long cpu_fwd_mask;
+ int port;
+
+ /* If a DSA tag_8021q CPU exists, it needs to be included in the
+ * regular forwarding path of the front ports regardless of whether
+ * those are bridged or standalone.
+ * If DSA tag_8021q is not used, this returns 0, which is fine because
+ * the hardware-based CPU port module can be a destination for packets
+ * even if it isn't part of PGID_SRC.
+ */
+ cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
+
+ /* Apply FWD mask. The loop is needed to add/remove the current port as
+ * a source for the other ports.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long mask;
+
+ if (!ocelot_port) {
+ /* Unused ports can't send anywhere */
+ mask = 0;
+ } else if (ocelot_port->is_dsa_8021q_cpu) {
+ /* The DSA tag_8021q CPU ports need to be able to
+ * forward packets to all other ports except for
+ * themselves
+ */
+ mask = GENMASK(ocelot->num_phys_ports - 1, 0);
+ mask &= ~cpu_fwd_mask;
+ } else if (ocelot->bridge_fwd_mask & BIT(port)) {
+ struct net_device *bond = ocelot_port->bond;
+
+ mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ if (bond) {
+ mask &= ~ocelot_get_bond_mask(ocelot, bond,
+ false);
+ }
+ } else {
+ /* Standalone ports forward only to DSA tag_8021q CPU
+ * ports (if those exist), or to the hardware CPU port
+ * module otherwise.
+ */
+ mask = cpu_fwd_mask;
+ }
+
+ ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
+ }
+}
+EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
+
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 port_cfg;
- int p, i;
if (!(BIT(port) & ocelot->bridge_mask))
return;
@@ -896,7 +1275,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot->bridge_fwd_mask |= BIT(port);
fallthrough;
case BR_STATE_LEARNING:
- port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ if (ocelot_port->learn_ena)
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
default:
@@ -907,32 +1287,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
- /* Apply FWD mask. The loop is needed to add/remove the current port as
- * a source for the other ports.
- */
- for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (ocelot->bridge_fwd_mask & BIT(p)) {
- unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
-
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- unsigned long bond_mask = ocelot->lags[i];
-
- if (!bond_mask)
- continue;
-
- if (bond_mask & BIT(p)) {
- mask &= ~bond_mask;
- break;
- }
- }
-
- ocelot_write_rix(ocelot, mask,
- ANA_PGID_PGID, PGID_SRC + p);
- } else {
- ocelot_write_rix(ocelot, 0,
- ANA_PGID_PGID, PGID_SRC + p);
- }
- }
+ ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
@@ -1179,7 +1534,6 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_vlan pvid = {0}, native_vlan = {0};
- struct switchdev_trans trans;
int ret;
ocelot->bridge_mask &= ~BIT(port);
@@ -1187,13 +1541,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- trans.ph_prepare = true;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
- if (ret)
- return ret;
-
- trans.ph_prepare = false;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ ret = ocelot_port_vlan_filtering(ocelot, port, false);
if (ret)
return ret;
@@ -1206,6 +1554,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
+ unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
int i, port, lag;
/* Reset destination and aggregation PGIDS */
@@ -1216,22 +1565,40 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
ANA_PGID_PGID, i);
- /* Now, set PGIDs for each LAG */
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->bond)
+ continue;
+
+ visited &= ~BIT(port);
+ }
+
+ /* Now, set PGIDs for each active LAG */
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ struct net_device *bond = ocelot->ports[lag]->bond;
+ int num_active_ports = 0;
unsigned long bond_mask;
- int aggr_count = 0;
u8 aggr_idx[16];
- bond_mask = ocelot->lags[lag];
- if (!bond_mask)
+ if (!bond || (visited & BIT(lag)))
continue;
+ bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
+
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
// Destination mask
ocelot_write_rix(ocelot, bond_mask,
ANA_PGID_PGID, port);
- aggr_idx[aggr_count] = port;
- aggr_count++;
+ aggr_idx[num_active_ports++] = port;
}
for_each_aggr_pgid(ocelot, i) {
@@ -1239,63 +1606,74 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
ac &= ~bond_mask;
- ac |= BIT(aggr_idx[i % aggr_count]);
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
}
- }
-}
-static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
-{
- unsigned long bond_mask = ocelot->lags[lag];
- unsigned int p;
-
- for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
- u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (port = lag; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ if (!ocelot_port)
+ continue;
- /* Use lag port as logical port for port i */
- ocelot_write_gix(ocelot, port_cfg |
- ANA_PORT_PORT_CFG_PORTID_VAL(lag),
- ANA_PORT_PORT_CFG, p);
+ if (ocelot_port->bond == bond)
+ visited |= BIT(port);
+ }
}
}
-int ocelot_port_lag_join(struct ocelot *ocelot, int port,
- struct net_device *bond)
+/* When offloading a bonding interface, the switch ports configured under the
+ * same bond must have the same logical port ID, equal to the physical port ID
+ * of the lowest numbered physical port in that bond. Otherwise, in standalone/
+ * bridged mode, each port has a logical port ID equal to its physical port ID.
+ */
+static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
{
- struct net_device *ndev;
- u32 bond_mask = 0;
- int lag, lp;
+ int port;
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(bond, ndev) {
- struct ocelot_port_private *priv = netdev_priv(ndev);
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct net_device *bond;
- bond_mask |= BIT(priv->chip_port);
- }
- rcu_read_unlock();
+ if (!ocelot_port)
+ continue;
- lp = __ffs(bond_mask);
+ bond = ocelot_port->bond;
+ if (bond) {
+ int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
+ false));
- /* If the new port is the lowest one, use it as the logical port from
- * now on
- */
- if (port == lp) {
- lag = port;
- ocelot->lags[port] = bond_mask;
- bond_mask &= ~BIT(port);
- if (bond_mask) {
- lp = __ffs(bond_mask);
- ocelot->lags[lp] = 0;
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG_PORTID_VAL_M,
+ ANA_PORT_PORT_CFG, port);
+ } else {
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG_PORTID_VAL_M,
+ ANA_PORT_PORT_CFG, port);
}
- } else {
- lag = lp;
- ocelot->lags[lp] |= BIT(port);
}
+}
+
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info)
+{
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return -EOPNOTSUPP;
+
+ ocelot->ports[port]->bond = bond;
- ocelot_setup_lag(ocelot, lag);
+ ocelot_setup_logical_port_ids(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot);
ocelot_set_aggr_pgids(ocelot);
return 0;
@@ -1305,33 +1683,24 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- u32 port_cfg;
- int i;
-
- /* Remove port from any lag */
- for (i = 0; i < ocelot->num_phys_ports; i++)
- ocelot->lags[i] &= ~BIT(port);
+ ocelot->ports[port]->bond = NULL;
- /* if it was the logical port of the lag, move the lag config to the
- * next port
- */
- if (ocelot->lags[port]) {
- int n = __ffs(ocelot->lags[port]);
+ ocelot_setup_logical_port_ids(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_set_aggr_pgids(ocelot);
+}
+EXPORT_SYMBOL(ocelot_port_lag_leave);
- ocelot->lags[n] = ocelot->lags[port];
- ocelot->lags[port] = 0;
+void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- ocelot_setup_lag(ocelot, n);
- }
-
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
- port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
- ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
- ANA_PORT_PORT_CFG, port);
+ ocelot_port->lag_tx_active = lag_tx_active;
+ /* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
}
-EXPORT_SYMBOL(ocelot_port_lag_leave);
+EXPORT_SYMBOL(ocelot_port_lag_change);
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
@@ -1349,9 +1718,9 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
if (port == ocelot->npi) {
maxlen += OCELOT_TAG_LEN;
- if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
maxlen += OCELOT_SHORT_PREFIX_LEN;
- else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
maxlen += OCELOT_LONG_PREFIX_LEN;
}
@@ -1366,7 +1735,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
pause_stop);
/* Tail dropping watermarks */
- atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
+ atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
@@ -1381,9 +1750,9 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
if (port == ocelot->npi) {
max_mtu -= OCELOT_TAG_LEN;
- if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
max_mtu -= OCELOT_SHORT_PREFIX_LEN;
- else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
max_mtu -= OCELOT_LONG_PREFIX_LEN;
}
@@ -1391,6 +1760,86 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
+static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ if (enabled)
+ val = ANA_PORT_PORT_CFG_LEARN_ENA;
+
+ ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
+ ANA_PORT_PORT_CFG, port);
+
+ ocelot_port->learn_ena = enabled;
+}
+
+static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
+}
+
+static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+}
+
+static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
+}
+
+int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
+
+void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_LEARNING)
+ ocelot_port_set_learning(ocelot, port,
+ !!(flags.val & BR_LEARNING));
+
+ if (flags.mask & BR_FLOOD)
+ ocelot_port_set_ucast_flood(ocelot, port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ ocelot_port_set_mcast_flood(ocelot, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ ocelot_port_set_bcast_flood(ocelot, port,
+ !!(flags.val & BR_BCAST_FLOOD));
+}
+EXPORT_SYMBOL(ocelot_port_bridge_flags);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1440,6 +1889,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
REW_PORT_VLAN_CFG_PORT_TPID_M,
REW_PORT_VLAN_CFG, port);
+ /* Disable source address learning for standalone mode */
+ ocelot_port_set_learning(ocelot, port, false);
+
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
@@ -1468,9 +1920,9 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
/* CPU port Injection/Extraction configuration */
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
- ocelot->xtr_prefix);
+ OCELOT_TAG_PREFIX_NONE);
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
- ocelot->inj_prefix);
+ OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
@@ -1479,6 +1931,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ANA_PORT_VLAN_CFG, cpu);
}
+static void ocelot_detect_features(struct ocelot *ocelot)
+{
+ int mmgt, eq_ctrl;
+
+ /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
+ * the number of 240-byte free memory words (aka 4-cell chunks) and not
+ * 192 bytes as the documentation incorrectly says.
+ */
+ mmgt = ocelot_read(ocelot, SYS_MMGT);
+ ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
+
+ eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
+ ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
+}
+
int ocelot_init(struct ocelot *ocelot)
{
char queue_name[32];
@@ -1493,11 +1960,6 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
- sizeof(u32), GFP_KERNEL);
- if (!ocelot->lags)
- return -ENOMEM;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
@@ -1521,6 +1983,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
@@ -1540,7 +2003,10 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
ANA_AGGR_CFG_AC_DMAC_ENA |
ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
- ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG);
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
+ ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
+ ANA_AGGR_CFG);
/* Set MAC age time to default value. The entry is aged after
* 2*AGE_PERIOD
@@ -1559,7 +2025,7 @@ int ocelot_init(struct ocelot *ocelot)
/* Setup flooding PGIDs */
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
@@ -1580,15 +2046,18 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Allow broadcast MAC frames. */
for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
- ocelot_write_rix(ocelot,
- ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
- ANA_PGID_PGID, PGID_MC);
+ /* Allow broadcast and unknown L2 multicast to the CPU. */
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_BC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 291d39d49c4e..db6b1a4c3926 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -32,15 +32,6 @@
#define OCELOT_PTP_QUEUE_SZ 128
-struct frame_info {
- u32 len;
- u16 port;
- u16 vid;
- u8 tag_type;
- u16 rew_op;
- u32 timestamp; /* rew_val */
-};
-
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -109,10 +100,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
unsigned int vid, enum macaccess_entry_type type);
int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid);
-int ocelot_port_lag_join(struct ocelot *ocelot, int port,
- struct net_device *bond);
-void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
- struct net_device *bond);
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
int ocelot_netdev_to_port(struct net_device *dev);
@@ -121,13 +108,16 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct phy_device *phy);
-
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+void ocelot_release_port(struct ocelot_port *ocelot_port);
+int ocelot_devlink_init(struct ocelot *ocelot);
+void ocelot_devlink_teardown(struct ocelot *ocelot);
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour);
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+extern const struct devlink_ops ocelot_devlink_ops;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
new file mode 100644
index 000000000000..edafbd37d12c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2020-2021 NXP Semiconductors
+ */
+#include <net/devlink.h>
+#include "ocelot.h"
+
+/* The queue system tracks four resource consumptions:
+ * Resource 0: Memory tracked per source port
+ * Resource 1: Frame references tracked per source port
+ * Resource 2: Memory tracked per destination port
+ * Resource 3: Frame references tracked per destination port
+ */
+#define OCELOT_RESOURCE_SZ 256
+#define OCELOT_NUM_RESOURCES 4
+
+#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
+#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
+
+/* For each resource type there are 4 types of watermarks:
+ * Q_RSRV: reservation per QoS class per port
+ * PRIO_SHR: sharing watermark per QoS class across all ports
+ * P_RSRV: reservation per port
+ * COL_SHR: sharing watermark per color (drop precedence) across all ports
+ */
+#define xxx_Q_RSRV_x 0
+#define xxx_PRIO_SHR_x 216
+#define xxx_P_RSRV_x 224
+#define xxx_COL_SHR_x 254
+
+/* Reservation Watermarks
+ * ----------------------
+ *
+ * For setting up the reserved areas, egress watermarks exist per port and per
+ * QoS class for both ingress and egress.
+ */
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_E
+ */
+#define BUF_Q_RSRV_E(port, prio) \
+ (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_E
+ */
+#define BUF_P_RSRV_E(port) \
+ (BUF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_I
+ */
+#define BUF_Q_RSRV_I(port, prio) \
+ (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_I
+ */
+#define BUF_P_RSRV_I(port) \
+ (BUF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_E
+ */
+#define REF_Q_RSRV_E(port, prio) \
+ (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_E
+ */
+#define REF_P_RSRV_E(port) \
+ (REF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_I
+ */
+#define REF_Q_RSRV_I(port, prio) \
+ (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_I
+ */
+#define REF_P_RSRV_I(port) \
+ (REF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Sharing Watermarks
+ * ------------------
+ *
+ * The shared memory area is shared between all ports.
+ */
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_E
+ */
+#define BUF_PRIO_SHR_E(prio) \
+ (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_E
+ */
+#define BUF_COL_SHR_E(dp) \
+ (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_I
+ */
+#define BUF_PRIO_SHR_I(prio) \
+ (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_I
+ */
+#define BUF_COL_SHR_I(dp) \
+ (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_E
+ */
+#define REF_PRIO_SHR_E(prio) \
+ (REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_E
+ */
+#define REF_COL_SHR_E(dp) \
+ (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_I
+ */
+#define REF_PRIO_SHR_I(prio) \
+ (REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_I
+ */
+#define REF_COL_SHR_I(dp) \
+ (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
+{
+ int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
+
+ return ocelot->ops->wm_dec(wm);
+}
+
+static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
+{
+ u32 wm = ocelot->ops->wm_enc(val);
+
+ ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
+}
+
+static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
+ u32 *maxuse)
+{
+ int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
+
+ return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
+}
+
+/* The hardware comes out of reset with strange defaults: the sum of all
+ * reservations for frame memory is larger than the total buffer size.
+ * One has to wonder how can the reservation watermarks still guarantee
+ * anything under congestion.
+ * Bring some sense into the hardware by changing the defaults to disable all
+ * reservations and rely only on the sharing watermark for frames with drop
+ * precedence 0. The user can still explicitly request reservations per port
+ * and per port-tc through devlink-sb.
+ */
+static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
+ int port)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
+ }
+
+ ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
+}
+
+/* We want the sharing watermarks to consume all nonreserved resources, for
+ * efficient resource utilization (a single traffic flow should be able to use
+ * up the entire buffer space and frame resources as long as there's no
+ * interference).
+ * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
+ * per color (drop precedence).
+ * The trouble with configuring these sharing watermarks is that:
+ * (1) There's a risk that we overcommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to the max
+ * (b) all 2 per-color sharing watermarks to the max
+ * (2) There's a risk that we undercommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to "max / 8"
+ * (b) all 2 per-color sharing watermarks to "max / 2"
+ * So for Linux, let's just disable the sharing watermarks per traffic class
+ * (setting them to 0 will make them always exceeded), and rely only on the
+ * sharing watermark for drop priority 0. So frames with drop priority set to 1
+ * by QoS classification or policing will still be allowed, but only as long as
+ * the port and port-TC reservations are not exceeded.
+ */
+static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
+ }
+}
+
+static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
+ u32 *buf_rsrv_e)
+{
+ int port, prio;
+
+ *buf_rsrv_i = 0;
+ *buf_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *buf_rsrv_i += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_I(port, prio));
+ *buf_rsrv_e += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_E(port, prio));
+ }
+
+ *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
+ *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
+ }
+
+ *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
+ *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
+}
+
+static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
+ u32 *ref_rsrv_e)
+{
+ int port, prio;
+
+ *ref_rsrv_i = 0;
+ *ref_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *ref_rsrv_i += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_I(port, prio));
+ *ref_rsrv_e += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_E(port, prio));
+ }
+
+ *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
+ *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
+ }
+}
+
+/* Calculate all reservations, then set up the sharing watermark for DP=0 to
+ * consume the remaining resources up to the pool's configured size.
+ */
+static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+ u32 buf_shr_i, buf_shr_e;
+ u32 ref_shr_i, ref_shr_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
+ buf_rsrv_i;
+ buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
+ buf_rsrv_e;
+ ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
+ ref_rsrv_i;
+ ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
+ ref_rsrv_e;
+
+ buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
+ buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
+
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
+}
+
+/* Ensure that all reservations can be enforced */
+static int ocelot_watermark_validate(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/* The hardware works like this:
+ *
+ * Frame forwarding decision taken
+ * |
+ * v
+ * +--------------------+--------------------+--------------------+
+ * | | | |
+ * v v v v
+ * Ingress memory Egress memory Ingress frame Egress frame
+ * check check reference check reference check
+ * | | | |
+ * v v v v
+ * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
+ *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
+ * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
+ * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
+ * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v v v v v v v v
+ * fail success fail success fail success fail success
+ * | | | | | | | |
+ * v v v v v v v v
+ * +-----+----+ +-----+----+ +-----+----+ +-----+-----+
+ * | | | |
+ * +-------> OR <-------+ +-------> OR <-------+
+ * | |
+ * v v
+ * +----------------> AND <-----------------+
+ * |
+ * v
+ * FIFO drop / accept
+ *
+ * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
+ * At least one (ingress or egress) memory pool and one (ingress or egress)
+ * frame reference pool need to have resources for frame acceptance to succeed.
+ *
+ * The following watermarks are controlled explicitly through devlink-sb:
+ * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
+ * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
+ * The following watermarks are controlled implicitly through devlink-sb:
+ * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
+ * The following watermarks are unused and disabled:
+ * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
+ *
+ * This function overrides the hardware defaults with more sane ones (no
+ * reservations by default, let sharing use all resources) and disables the
+ * unused watermarks.
+ */
+static void ocelot_watermark_init(struct ocelot *ocelot)
+{
+ int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
+ int port;
+
+ ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++)
+ ocelot_disable_reservation_watermarks(ocelot, port);
+
+ ocelot_disable_tc_sharing_watermarks(ocelot);
+ ocelot_setup_sharing_watermarks(ocelot);
+}
+
+/* Pool size and type are fixed up at runtime. Keeping this structure to
+ * look up the cell size multipliers.
+ */
+static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
+ [OCELOT_SB_BUF] = {
+ .cell_size = OCELOT_BUFFER_CELL_SZ,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+ [OCELOT_SB_REF] = {
+ .cell_size = 1,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+};
+
+/* Returns the pool size configured through ocelot_sb_pool_set */
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ if (sb_index >= OCELOT_SB_NUM)
+ return -ENODEV;
+ if (pool_index >= OCELOT_SB_POOL_NUM)
+ return -ENODEV;
+
+ *pool_info = ocelot_sb_pool[sb_index];
+ pool_info->size = ocelot->pool_size[sb_index][pool_index];
+ if (pool_index)
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
+ else
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_get);
+
+/* The pool size received here configures the total amount of resources used on
+ * ingress (or on egress, depending upon the pool index). The pool size, minus
+ * the values for the port and port-tc reservations, is written into the
+ * COL_SHR(dp=0) sharing watermark.
+ */
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ u32 old_pool_size;
+ int err;
+
+ if (sb_index >= OCELOT_SB_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid sb, use 0 for buffers and 1 for frame references");
+ return -ENODEV;
+ }
+ if (pool_index >= OCELOT_SB_POOL_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pool, use 0 for ingress and 1 for egress");
+ return -ENODEV;
+ }
+ if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only static threshold supported");
+ return -EOPNOTSUPP;
+ }
+
+ old_pool_size = ocelot->pool_size[sb_index][pool_index];
+ ocelot->pool_size[sb_index][pool_index] = size;
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot->pool_size[sb_index][pool_index] = old_pool_size;
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_set);
+
+/* This retrieves the configuration made with ocelot_sb_port_pool_set */
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_get);
+
+/* This configures the P_RSRV per-port reserved resource watermark */
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_set);
+
+/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ *p_pool_index = 0;
+ else
+ *p_pool_index = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
+
+/* This configures the Q_RSRV per-port-tc reserved resource watermark */
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ /* Paranoid check? */
+ if (pool_index == OCELOT_SB_POOL_ING &&
+ pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
+ return -EINVAL;
+ if (pool_index == OCELOT_SB_POOL_EGR &&
+ pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
+ return -EINVAL;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
+
+/* The hardware does not support atomic snapshots, we'll read out the
+ * occupancy registers individually and have this as just a stub.
+ */
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
+
+/* The watermark occupancy registers are cleared upon read,
+ * so let's read them.
+ */
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
+{
+ u32 inuse, maxuse;
+ int port, prio;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ case OCELOT_SB_REF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
+
+/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
+
+/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot)
+{
+ int err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
+ ocelot->packet_buffer_size, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err)
+ return err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
+ ocelot->num_frame_refs, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err) {
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ return err;
+ }
+
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
+
+ ocelot_watermark_init(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_register);
+
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
+{
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 729495a1a77e..c3ac026f6aea 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -622,7 +622,8 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
int ret;
filter->prio = f->common.prio;
- filter->id = f->cookie;
+ filter->id.cookie = f->cookie;
+ filter->id.tc_offload = true;
ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
if (ret)
@@ -717,7 +718,7 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
block = &ocelot->block[block_id];
- filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter)
return 0;
@@ -741,7 +742,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
block = &ocelot->block[block_id];
- filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 0acb45948418..ea4e83410fe4 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+ u32 cur = ocelot_port_readl(port, reg);
+
+ ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL(ocelot_port_rmwl);
+
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
new file mode 100644
index 000000000000..683da320bfd8
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
+ * Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/mrp_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <uapi/linux/mrp_bridge.h>
+#include "ocelot.h"
+#include "ocelot_vcap.h"
+
+static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port)
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *filter;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port,
+ false);
+ if (!filter)
+ return 0;
+
+ return ocelot_vcap_filter_del(ocelot, filter);
+}
+
+int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (mrp->p_port != dev && mrp->s_port != dev)
+ return 0;
+
+ if (ocelot->mrp_ring_id != 0 &&
+ ocelot->mrp_s_port &&
+ ocelot->mrp_p_port)
+ return -EINVAL;
+
+ if (mrp->p_port == dev)
+ ocelot->mrp_p_port = dev;
+
+ if (mrp->s_port == dev)
+ ocelot->mrp_s_port = dev;
+
+ ocelot->mrp_ring_id = mrp->ring_id;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mrp_add);
+
+int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ if (ocelot->mrp_ring_id == 0 &&
+ !ocelot->mrp_s_port &&
+ !ocelot->mrp_p_port)
+ return -EINVAL;
+
+ if (ocelot_mrp_del_vcap(ocelot, priv->chip_port))
+ return -EINVAL;
+
+ if (ocelot->mrp_p_port == dev)
+ ocelot->mrp_p_port = NULL;
+
+ if (ocelot->mrp_s_port == dev)
+ ocelot->mrp_s_port = NULL;
+
+ ocelot->mrp_ring_id = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mrp_del);
+
+int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+ int err;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_ring_id != mrp->ring_id)
+ return -EINVAL;
+
+ if (!mrp->sw_backup)
+ return -EOPNOTSUPP;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ filter->prio = 1;
+ filter->id.cookie = priv->chip_port;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS2;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->ingress_port_mask = BIT(priv->chip_port);
+ *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP);
+ *(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0x0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_mrp_add_ring_role);
+
+int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_ring_id != mrp->ring_id)
+ return -EINVAL;
+
+ if (!mrp->sw_backup)
+ return -EOPNOTSUPP;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ return ocelot_mrp_del_vcap(ocelot, priv->chip_port);
+}
+EXPORT_SYMBOL(ocelot_mrp_del_ring_role);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2bd2840d88bd..12cb6867a2d0 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1,13 +1,191 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
* Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
+#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
+{
+ return devlink_priv(dlp->devlink);
+}
+
+static int devlink_port_to_port(struct devlink_port *dlp)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+
+ return dlp - ocelot->devlink_ports;
+}
+
+static int ocelot_devlink_sb_pool_get(struct devlink *dl,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index,
+ u16 pool_index, u32 *p_cur,
+ u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int
+ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
+ tc_index, pool_type,
+ p_cur, p_max);
+}
+
+const struct devlink_ops ocelot_devlink_ops = {
+ .sb_pool_get = ocelot_devlink_sb_pool_get,
+ .sb_pool_set = ocelot_devlink_sb_pool_set,
+ .sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
+ .sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
+};
+
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+ int id_len = sizeof(ocelot->base_mac);
+ struct devlink *dl = ocelot->devlink;
+ struct devlink_port_attrs attrs = {};
+
+ memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
+ attrs.switch_id.id_len = id_len;
+ attrs.phys.port_number = port;
+ attrs.flavour = flavour;
+
+ devlink_port_attrs_set(dlp, &attrs);
+
+ return devlink_port_register(dl, dlp, port);
+}
+
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+
+ devlink_port_unregister(dlp);
+}
+
+static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return &ocelot->devlink_ports[port];
+}
+
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
@@ -310,53 +488,20 @@ static int ocelot_port_stop(struct net_device *dev)
return 0;
}
-/* Generate the IFH for frame injection
- *
- * The IFH is a 128bit-value
- * bit 127: bypass the analyzer processing
- * bit 56-67: destination mask
- * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
- * bit 20-27: cpu extraction queue mask
- * bit 16: tag type 0: C-tag, 1: S-tag
- * bit 0-11: VID
- */
-static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
-{
- ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
- ifh[1] = (0xf00 & info->port) >> 8;
- ifh[2] = (0xff & info->port) << 24;
- ifh[3] = (info->tag_type << 16) | info->vid;
-
- return 0;
-}
-
-static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct skb_shared_info *shinfo = skb_shinfo(skb);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- u32 val, ifh[OCELOT_TAG_LEN / 4];
- struct frame_info info = {};
- u8 grp = 0; /* Send everything on CPU group 0 */
- unsigned int i, count, last;
int port = priv->chip_port;
+ u32 rew_op = 0;
- val = ocelot_read(ocelot, QS_INJ_STATUS);
- if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
- (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+ if (!ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
- ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
- QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
-
- info.port = BIT(port);
- info.tag_type = IFH_TAG_TYPE_C;
- info.vid = skb_vlan_tag_get(skb);
-
/* Check if timestamping is needed */
- if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
- info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ rew_op = ocelot_port->ptp_cmd;
if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
struct sk_buff *clone;
@@ -369,45 +514,11 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
ocelot_port_add_txtstamp_skb(ocelot, port, clone);
- info.rew_op |= clone->cb[0] << 3;
+ rew_op |= clone->cb[0] << 3;
}
}
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
- info.rew_op = ocelot_port->ptp_cmd;
- if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= skb->cb[0] << 3;
- }
-
- ocelot_gen_ifh(ifh, &info);
-
- for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
- ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
- QS_INJ_WR, grp);
-
- count = (skb->len + 3) / 4;
- last = skb->len % 4;
- for (i = 0; i < count; i++)
- ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
-
- /* Add padding */
- while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
- ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
- i++;
- }
-
- /* Indicate EOF and valid bytes in last word */
- ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
- QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
- QS_INJ_CTRL_EOF,
- QS_INJ_CTRL, grp);
-
- /* Add dummy CRC */
- ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
- skb_tx_timestamp(skb);
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
kfree_skb(skb);
@@ -457,7 +568,7 @@ static void ocelot_mact_work(struct work_struct *work)
break;
default:
break;
- };
+ }
kfree(w);
}
@@ -525,20 +636,6 @@ static void ocelot_set_rx_mode(struct net_device *dev)
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
-static int ocelot_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- int port = priv->chip_port;
- int ret;
-
- ret = snprintf(buf, len, "p%d", port);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -689,18 +786,6 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
-static int ocelot_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
-
- ppid->id_len = sizeof(ocelot->base_mac);
- memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
-
- return 0;
-}
-
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -727,7 +812,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_set_rx_mode = ocelot_set_rx_mode,
- .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
@@ -736,9 +820,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
- .ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_do_ioctl = ocelot_ioctl,
+ .ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -825,12 +909,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
- struct switchdev_trans *trans,
u8 state)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
-
ocelot_bridge_stp_state_set(ocelot, port, state);
}
@@ -859,7 +939,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -868,19 +948,24 @@ static int ocelot_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot, port, trans,
- attr->u.stp_state);
+ ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering, trans);
+ ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = ocelot_port_pre_bridge_flags(ocelot, port,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -890,73 +975,89 @@ static int ocelot_port_attr_set(struct net_device *dev,
}
static int ocelot_port_obj_add_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- if (switchdev_trans_ph_prepare(trans))
- ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
- untagged);
- else
- ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
- if (ret)
- return ret;
- }
- return 0;
+ ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged);
}
-static int ocelot_port_vlan_del_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
{
- int ret;
- u16 vid;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_del(dev, vid);
+ return ocelot_port_mdb_add(ocelot, port, mdb);
+}
- if (ret)
- return ret;
- }
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
- return 0;
+ return ocelot_port_mdb_del(ocelot, port, mdb);
}
-static int ocelot_port_obj_add_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+static int ocelot_port_obj_mrp_add(struct net_device *dev,
+ const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
- return ocelot_port_mdb_add(ocelot, port, mdb);
+static int ocelot_port_obj_mrp_del(struct net_device *dev,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_mrp_del(ocelot, port, mrp);
}
-static int ocelot_port_obj_del_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb)
+static int
+ocelot_port_obj_mrp_add_ring_role(struct net_device *dev,
+ const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ return ocelot_mrp_add_ring_role(ocelot, port, mrp);
+}
+
+static int
+ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -964,12 +1065,17 @@ static int ocelot_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_MRP:
+ ret = ocelot_port_obj_mrp_add(dev, SWITCHDEV_OBJ_MRP(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
+ ret = ocelot_port_obj_mrp_add_ring_role(dev,
+ SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
return -EOPNOTSUPP;
@@ -985,12 +1091,19 @@ static int ocelot_port_obj_del(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- ret = ocelot_port_vlan_del_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ ret = ocelot_vlan_vid_del(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
+ case SWITCHDEV_OBJ_ID_MRP:
+ ret = ocelot_port_obj_mrp_del(dev, SWITCHDEV_OBJ_MRP(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
+ ret = ocelot_port_obj_mrp_del_ring_role(dev,
+ SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
+ break;
default:
return -EOPNOTSUPP;
}
@@ -998,9 +1111,42 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_netdevice_port_event(struct net_device *dev,
- unsigned long event,
- struct netdev_notifier_changeupper_info *info)
+static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+
+ err = ocelot_port_bridge_join(ocelot, port, bridge);
+ if (err)
+ return err;
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return 0;
+}
+
+static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+
+ err = ocelot_port_bridge_leave(ocelot, port, bridge);
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return err;
+}
+
+static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
@@ -1008,73 +1154,103 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
int port = priv->chip_port;
int err = 0;
- switch (event) {
- case NETDEV_CHANGEUPPER:
- if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking) {
- err = ocelot_port_bridge_join(ocelot, port,
- info->upper_dev);
- } else {
- err = ocelot_port_bridge_leave(ocelot, port,
- info->upper_dev);
- }
- }
- if (netif_is_lag_master(info->upper_dev)) {
- if (info->linking)
- err = ocelot_port_lag_join(ocelot, port,
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking) {
+ err = ocelot_netdevice_bridge_join(ocelot, port,
info->upper_dev);
- else
- ocelot_port_lag_leave(ocelot, port,
- info->upper_dev);
+ } else {
+ err = ocelot_netdevice_bridge_leave(ocelot, port,
+ info->upper_dev);
+ }
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking) {
+ err = ocelot_port_lag_join(ocelot, port,
+ info->upper_dev,
+ info->upper_info);
+ if (err == -EOPNOTSUPP) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Offloading not supported");
+ err = 0;
+ }
+ } else {
+ ocelot_port_lag_leave(ocelot, port,
+ info->upper_dev);
}
- break;
- default:
- break;
}
- return err;
+ return notifier_from_errno(err);
+}
+
+static int
+ocelot_netdevice_lag_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+ int err = NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ err = ocelot_netdevice_changeupper(lower, info);
+ if (err)
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int
+ocelot_netdevice_changelowerstate(struct net_device *dev,
+ struct netdev_lag_lower_state_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ bool is_active = info->link_up && info->tx_enabled;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot_port->bond)
+ return NOTIFY_DONE;
+
+ if (ocelot_port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ ocelot_port_lag_change(ocelot, port, is_active);
+
+ return NOTIFY_OK;
}
static int ocelot_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
+ switch (event) {
+ case NETDEV_CHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
- if (event == NETDEV_PRECHANGEUPPER &&
- netif_is_lag_master(info->upper_dev)) {
- struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
- struct netlink_ext_ack *extack;
+ if (ocelot_netdevice_dev_check(dev))
+ return ocelot_netdevice_changeupper(dev, info);
- if (lag_upper_info &&
- lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
- extack = netdev_notifier_info_to_extack(&info->info);
- NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+ if (netif_is_lag_master(dev))
+ return ocelot_netdevice_lag_changeupper(dev, info);
- ret = -EINVAL;
- goto notify;
- }
+ break;
}
+ case NETDEV_CHANGELOWERSTATE: {
+ struct netdev_notifier_changelowerstate_info *info = ptr;
- if (netif_is_lag_master(dev)) {
- struct net_device *slave;
- struct list_head *iter;
+ if (!ocelot_netdevice_dev_check(dev))
+ break;
- netdev_for_each_lower_dev(dev, slave, iter) {
- ret = ocelot_netdevice_port_event(slave, event, info);
- if (ret)
- goto notify;
- }
- } else {
- ret = ocelot_netdevice_port_event(dev, event, info);
+ return ocelot_netdevice_changelowerstate(dev,
+ info->lower_state_info);
+ }
+ default:
+ break;
}
-notify:
- return notifier_from_errno(ret);
+ return NOTIFY_DONE;
}
struct notifier_block ocelot_netdevice_nb __read_mostly = {
@@ -1173,7 +1349,19 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
free_netdev(dev);
+ ocelot->ports[port] = NULL;
+ return err;
}
- return err;
+ return 0;
+}
+
+void ocelot_release_port(struct ocelot_port *ocelot_port)
+{
+ struct ocelot_port_private *priv = container_of(ocelot_port,
+ struct ocelot_port_private,
+ port);
+
+ unregister_netdev(priv->dev);
+ free_netdev(priv->dev);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d8c778ee6f1b..37a232911395 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -959,6 +959,12 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
list_add(&filter->list, pos->prev);
}
+static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
+ const struct ocelot_vcap_filter *b)
+{
+ return !memcmp(&a->id, &b->id, sizeof(struct ocelot_vcap_id));
+}
+
static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
@@ -966,7 +972,7 @@ static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
int index = 0;
list_for_each_entry(tmp, &block->rules, list) {
- if (filter->id == tmp->id)
+ if (ocelot_vcap_filter_equal(filter, tmp))
return index;
index++;
}
@@ -991,16 +997,19 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
}
struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
+ bool tc_offload)
{
struct ocelot_vcap_filter *filter;
list_for_each_entry(filter, &block->rules, list)
- if (filter->id == id)
+ if (filter->id.tc_offload == tc_offload &&
+ filter->id.cookie == cookie)
return filter;
return NULL;
}
+EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id);
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol
@@ -1150,6 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
vcap_entry_set(ocelot, index, filter);
return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_filter_add);
static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
@@ -1160,7 +1170,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_vcap_filter, list);
- if (tmp->id == filter->id) {
+ if (ocelot_vcap_filter_equal(filter, tmp)) {
if (tmp->block_id == VCAP_IS2 &&
tmp->action.police_ena)
ocelot_vcap_policer_del(ocelot, block,
@@ -1204,6 +1214,7 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_filter_del);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
index 82fd10581a14..523611ccc48f 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -7,304 +7,13 @@
#define _MSCC_OCELOT_VCAP_H_
#include "ocelot.h"
-#include "ocelot_police.h"
-#include <net/sch_generic.h>
-#include <net/pkt_cls.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <net/flow_offload.h>
#define OCELOT_POLICER_DISCARD 0x17f
-struct ocelot_ipv4 {
- u8 addr[4];
-};
-
-enum ocelot_vcap_bit {
- OCELOT_VCAP_BIT_ANY,
- OCELOT_VCAP_BIT_0,
- OCELOT_VCAP_BIT_1
-};
-
-struct ocelot_vcap_u8 {
- u8 value[1];
- u8 mask[1];
-};
-
-struct ocelot_vcap_u16 {
- u8 value[2];
- u8 mask[2];
-};
-
-struct ocelot_vcap_u24 {
- u8 value[3];
- u8 mask[3];
-};
-
-struct ocelot_vcap_u32 {
- u8 value[4];
- u8 mask[4];
-};
-
-struct ocelot_vcap_u40 {
- u8 value[5];
- u8 mask[5];
-};
-
-struct ocelot_vcap_u48 {
- u8 value[6];
- u8 mask[6];
-};
-
-struct ocelot_vcap_u64 {
- u8 value[8];
- u8 mask[8];
-};
-
-struct ocelot_vcap_u128 {
- u8 value[16];
- u8 mask[16];
-};
-
-struct ocelot_vcap_vid {
- u16 value;
- u16 mask;
-};
-
-struct ocelot_vcap_ipv4 {
- struct ocelot_ipv4 value;
- struct ocelot_ipv4 mask;
-};
-
-struct ocelot_vcap_udp_tcp {
- u16 value;
- u16 mask;
-};
-
-struct ocelot_vcap_port {
- u8 value;
- u8 mask;
-};
-
-enum ocelot_vcap_key_type {
- OCELOT_VCAP_KEY_ANY,
- OCELOT_VCAP_KEY_ETYPE,
- OCELOT_VCAP_KEY_LLC,
- OCELOT_VCAP_KEY_SNAP,
- OCELOT_VCAP_KEY_ARP,
- OCELOT_VCAP_KEY_IPV4,
- OCELOT_VCAP_KEY_IPV6
-};
-
-struct ocelot_vcap_key_vlan {
- struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
- struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
- enum ocelot_vcap_bit dei; /* DEI */
- enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
-};
-
-struct ocelot_vcap_key_etype {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
- struct ocelot_vcap_u16 etype;
- struct ocelot_vcap_u16 data; /* MAC data */
-};
-
-struct ocelot_vcap_key_llc {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
-
- /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
- struct ocelot_vcap_u32 llc;
-};
-
-struct ocelot_vcap_key_snap {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
-
- /* SNAP header: Organization Code at byte 0, Type at byte 3 */
- struct ocelot_vcap_u40 snap;
-};
-
-struct ocelot_vcap_key_arp {
- struct ocelot_vcap_u48 smac;
- enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
- enum ocelot_vcap_bit req; /* Opcode request/reply */
- enum ocelot_vcap_bit unknown; /* Opcode unknown */
- enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
- enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
-
- /**< Protocol addr. length 4, hardware length 6 */
- enum ocelot_vcap_bit length;
-
- enum ocelot_vcap_bit ip; /* Protocol address type IP */
- enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
- struct ocelot_vcap_ipv4 sip; /* Sender IP address */
- struct ocelot_vcap_ipv4 dip; /* Target IP address */
-};
-
-struct ocelot_vcap_key_ipv4 {
- enum ocelot_vcap_bit ttl; /* TTL zero */
- enum ocelot_vcap_bit fragment; /* Fragment */
- enum ocelot_vcap_bit options; /* Header options */
- struct ocelot_vcap_u8 ds;
- struct ocelot_vcap_u8 proto; /* Protocol */
- struct ocelot_vcap_ipv4 sip; /* Source IP address */
- struct ocelot_vcap_ipv4 dip; /* Destination IP address */
- struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
- struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
- struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
- enum ocelot_vcap_bit tcp_fin;
- enum ocelot_vcap_bit tcp_syn;
- enum ocelot_vcap_bit tcp_rst;
- enum ocelot_vcap_bit tcp_psh;
- enum ocelot_vcap_bit tcp_ack;
- enum ocelot_vcap_bit tcp_urg;
- enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
- enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
- enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
-};
-
-struct ocelot_vcap_key_ipv6 {
- struct ocelot_vcap_u8 proto; /* IPv6 protocol */
- struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
- struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
- enum ocelot_vcap_bit ttl; /* TTL zero */
- struct ocelot_vcap_u8 ds;
- struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
- struct ocelot_vcap_udp_tcp sport;
- struct ocelot_vcap_udp_tcp dport;
- enum ocelot_vcap_bit tcp_fin;
- enum ocelot_vcap_bit tcp_syn;
- enum ocelot_vcap_bit tcp_rst;
- enum ocelot_vcap_bit tcp_psh;
- enum ocelot_vcap_bit tcp_ack;
- enum ocelot_vcap_bit tcp_urg;
- enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
- enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
- enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
-};
-
-enum ocelot_mask_mode {
- OCELOT_MASK_MODE_NONE,
- OCELOT_MASK_MODE_PERMIT_DENY,
- OCELOT_MASK_MODE_POLICY,
- OCELOT_MASK_MODE_REDIRECT,
-};
-
-enum ocelot_es0_tag {
- OCELOT_NO_ES0_TAG,
- OCELOT_ES0_TAG,
- OCELOT_FORCE_PORT_TAG,
- OCELOT_FORCE_UNTAG,
-};
-
-enum ocelot_tag_tpid_sel {
- OCELOT_TAG_TPID_SEL_8021Q,
- OCELOT_TAG_TPID_SEL_8021AD,
-};
-
-struct ocelot_vcap_action {
- union {
- /* VCAP ES0 */
- struct {
- enum ocelot_es0_tag push_outer_tag;
- enum ocelot_es0_tag push_inner_tag;
- enum ocelot_tag_tpid_sel tag_a_tpid_sel;
- int tag_a_vid_sel;
- int tag_a_pcp_sel;
- u16 vid_a_val;
- u8 pcp_a_val;
- u8 dei_a_val;
- enum ocelot_tag_tpid_sel tag_b_tpid_sel;
- int tag_b_vid_sel;
- int tag_b_pcp_sel;
- u16 vid_b_val;
- u8 pcp_b_val;
- u8 dei_b_val;
- };
-
- /* VCAP IS1 */
- struct {
- bool vid_replace_ena;
- u16 vid;
- bool vlan_pop_cnt_ena;
- int vlan_pop_cnt;
- bool pcp_dei_ena;
- u8 pcp;
- u8 dei;
- bool qos_ena;
- u8 qos_val;
- u8 pag_override_mask;
- u8 pag_val;
- };
-
- /* VCAP IS2 */
- struct {
- bool cpu_copy_ena;
- u8 cpu_qu_num;
- enum ocelot_mask_mode mask_mode;
- unsigned long port_mask;
- bool police_ena;
- struct ocelot_policer pol;
- u32 pol_ix;
- };
- };
-};
-
-struct ocelot_vcap_stats {
- u64 bytes;
- u64 pkts;
- u64 used;
-};
-
-enum ocelot_vcap_filter_type {
- OCELOT_VCAP_FILTER_DUMMY,
- OCELOT_VCAP_FILTER_PAG,
- OCELOT_VCAP_FILTER_OFFLOAD,
-};
-
-struct ocelot_vcap_filter {
- struct list_head list;
-
- enum ocelot_vcap_filter_type type;
- int block_id;
- int goto_target;
- int lookup;
- u8 pag;
- u16 prio;
- u32 id;
-
- struct ocelot_vcap_action action;
- struct ocelot_vcap_stats stats;
- /* For VCAP IS1 and IS2 */
- unsigned long ingress_port_mask;
- /* For VCAP ES0 */
- struct ocelot_vcap_port ingress_port;
- struct ocelot_vcap_port egress_port;
-
- enum ocelot_vcap_bit dmac_mc;
- enum ocelot_vcap_bit dmac_bc;
- struct ocelot_vcap_key_vlan vlan;
-
- enum ocelot_vcap_key_type key_type;
- union {
- /* OCELOT_VCAP_KEY_ANY: No specific fields */
- struct ocelot_vcap_key_etype etype;
- struct ocelot_vcap_key_llc llc;
- struct ocelot_vcap_key_snap snap;
- struct ocelot_vcap_key_arp arp;
- struct ocelot_vcap_key_ipv4 ipv4;
- struct ocelot_vcap_key_ipv6 ipv6;
- } key;
-};
-
-int ocelot_vcap_filter_add(struct ocelot *ocelot,
- struct ocelot_vcap_filter *rule,
- struct netlink_ext_ack *extack);
-int ocelot_vcap_filter_del(struct ocelot *ocelot,
- struct ocelot_vcap_filter *rule);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
-struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id);
void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 9cf2bc5f4289..4bd7e9d9ec61 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Microsemi Corporation
*/
+#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_net.h>
@@ -18,8 +19,6 @@
#include <soc/mscc/ocelot_hsio.h>
#include "ocelot.h"
-#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
-
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
@@ -517,7 +516,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
- ocelot->shared_queue_sz = 224 * 1024;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -533,181 +531,28 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
return 0;
}
-static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
-{
- u8 llen, wlen;
- u64 ifh[2];
-
- ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
- ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
-
- wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
- llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
-
- info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
-
- info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
-
- info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
-
- info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
- info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
-
- return 0;
-}
-
-static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
- u32 *rval)
-{
- u32 val;
- u32 bytes_valid;
-
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- if (val == XTR_NOT_READY) {
- if (ifh)
- return -EIO;
-
- do {
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- } while (val == XTR_NOT_READY);
- }
-
- switch (val) {
- case XTR_ABORT:
- return -EIO;
- case XTR_EOF_0:
- case XTR_EOF_1:
- case XTR_EOF_2:
- case XTR_EOF_3:
- case XTR_PRUNED:
- bytes_valid = XTR_VALID_BYTES(val);
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- if (val == XTR_ESCAPE)
- *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- else
- *rval = val;
-
- return bytes_valid;
- case XTR_ESCAPE:
- *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-
- return 4;
- default:
- *rval = val;
-
- return 4;
- }
-}
-
static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
{
struct ocelot *ocelot = arg;
- int i = 0, grp = 0;
- int err = 0;
-
- if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
- return IRQ_NONE;
+ int grp = 0, err;
- do {
- struct skb_shared_hwtstamps *shhwtstamps;
- struct ocelot_port_private *priv;
- struct ocelot_port *ocelot_port;
- u64 tod_in_ns, full_ts_in_ns;
- struct frame_info info = {};
- struct net_device *dev;
- u32 ifh[4], val, *buf;
- struct timespec64 ts;
- int sz, len, buf_len;
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
- for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
- err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
- if (err != 4)
- break;
- }
-
- if (err != 4)
- break;
-
- /* At this point the IFH was read correctly, so it is safe to
- * presume that there is no error. The err needs to be reset
- * otherwise a frame could come in CPU queue between the while
- * condition and the check for error later on. And in that case
- * the new frame is just removed and not processed.
- */
- err = 0;
-
- ocelot_parse_ifh(ifh, &info);
-
- ocelot_port = ocelot->ports[info.port];
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
- dev = priv->dev;
-
- skb = netdev_alloc_skb(dev, info.len);
-
- if (unlikely(!skb)) {
- netdev_err(dev, "Unable to allocate sk_buff\n");
- err = -ENOMEM;
- break;
- }
- buf_len = info.len - ETH_FCS_LEN;
- buf = (u32 *)skb_put(skb, buf_len);
-
- len = 0;
- do {
- sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
- *buf++ = val;
- len += sz;
- } while (len < buf_len);
-
- /* Read the FCS */
- sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
- /* Update the statistics if part of the FCS was read before */
- len -= ETH_FCS_LEN - sz;
-
- if (unlikely(dev->features & NETIF_F_RXFCS)) {
- buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
- *buf = val;
- }
-
- if (sz < 0) {
- err = sz;
- break;
- }
-
- if (ocelot->ptp) {
- ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
- tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
- if ((tod_in_ns & 0xffffffff) < info.timestamp)
- full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
- info.timestamp;
- else
- full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
- info.timestamp;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = full_ts_in_ns;
- }
+ err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
+ if (err)
+ goto out;
- /* Everything we see on an interface that is in the HW bridge
- * has already been forwarded.
- */
- if (ocelot->bridge_mask & BIT(info.port))
- skb->offload_fwd_mark = 1;
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
- skb->protocol = eth_type_trans(skb, dev);
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
- dev->stats.rx_bytes += len;
- dev->stats.rx_packets++;
- } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+ }
- if (err)
- while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
- ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+out:
+ if (err < 0)
+ ocelot_drain_cpu_queue(ocelot, 0);
return IRQ_HANDLED;
}
@@ -764,9 +609,25 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
+static u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
@@ -1036,12 +897,19 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_devlink_teardown(ocelot, port);
+}
+
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
ocelot_port = ocelot->ports[port];
@@ -1049,12 +917,7 @@ static void mscc_ocelot_release_ports(struct ocelot *ocelot)
continue;
ocelot_deinit_port(ocelot, port);
-
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
-
- unregister_netdev(priv->dev);
- free_netdev(priv->dev);
+ ocelot_release_port(ocelot_port);
}
}
@@ -1062,36 +925,55 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
struct device_node *ports)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ u32 devlink_ports_registered = 0;
struct device_node *portnp;
- int err;
+ int port, err;
+ u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
+ ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports,
+ sizeof(*ocelot->devlink_ports),
+ GFP_KERNEL);
+ if (!ocelot->devlink_ports)
+ return -ENOMEM;
+
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ struct devlink_port *dlp;
phy_interface_t phy_mode;
struct phy_device *phy;
struct regmap *target;
struct resource *res;
struct phy *serdes;
char res_name[8];
- u32 port;
- if (of_property_read_u32(portnp, "reg", &port))
+ if (of_property_read_u32(portnp, "reg", &reg))
+ continue;
+
+ port = reg;
+ if (port < 0 || port >= ocelot->num_phys_ports) {
+ dev_err(ocelot->dev,
+ "invalid port number: %d >= %d\n", port,
+ ocelot->num_phys_ports);
continue;
+ }
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
res_name);
target = ocelot_regmap_init(ocelot, res);
- if (IS_ERR(target))
- continue;
+ if (IS_ERR(target)) {
+ err = PTR_ERR(target);
+ goto out_teardown;
+ }
phy_node = of_parse_phandle(portnp, "phy-handle", 0);
if (!phy_node)
@@ -1102,15 +984,25 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!phy)
continue;
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL);
+ if (err) {
+ of_node_put(portnp);
+ goto out_teardown;
+ }
+ devlink_ports_registered |= BIT(port);
+
err = ocelot_probe_port(ocelot, port, target, phy);
if (err) {
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
ocelot_port = ocelot->ports[port];
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
+ dlp = &ocelot->devlink_ports[port];
+ devlink_port_type_eth_set(dlp, priv->dev);
of_get_phy_mode(portnp, &phy_mode);
@@ -1135,7 +1027,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_teardown;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -1149,13 +1042,36 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
priv->serdes = serdes;
}
+ /* Initialize unused devlink ports at the end */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (devlink_ports_registered & BIT(port))
+ continue;
+
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_UNUSED);
+ if (err)
+ goto out_teardown;
+
+ devlink_ports_registered |= BIT(port);
+ }
+
return 0;
+
+out_teardown:
+ /* Unregister the network interfaces */
+ mscc_ocelot_release_ports(ocelot);
+ /* Tear down devlink ports for the registered network interfaces */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (devlink_ports_registered & BIT(port))
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+ return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
@@ -1163,6 +1079,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
+ struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -1186,10 +1103,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
- ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
- if (!ocelot)
+ devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
+ if (!devlink)
return -ENOMEM;
+ ocelot = devlink_priv(devlink);
+ ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
@@ -1206,7 +1125,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = NULL;
continue;
}
- return PTR_ERR(target);
+ err = PTR_ERR(target);
+ goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
@@ -1215,24 +1135,27 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
- return PTR_ERR(hsio);
+ err = PTR_ERR(hsio);
+ goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
- return err;
+ goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
- if (irq_xtr < 0)
- return -ENODEV;
+ if (irq_xtr < 0) {
+ err = irq_xtr;
+ goto out_free_devlink;
+ }
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
- return err;
+ goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
@@ -1241,7 +1164,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
- return err;
+ goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
@@ -1250,25 +1173,32 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
- ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
- ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE;
ocelot->npi = -1;
err = ocelot_init(ocelot);
if (err)
goto out_put_ports;
- err = mscc_ocelot_init_ports(pdev, ports);
+ err = devlink_register(devlink, ocelot->dev);
if (err)
goto out_ocelot_deinit;
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_ocelot_devlink_unregister;
+
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_ocelot_release_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1288,10 +1218,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
return 0;
+out_ocelot_release_ports:
+ mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+out_ocelot_devlink_unregister:
+ devlink_unregister(devlink);
out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
+out_free_devlink:
+ devlink_free(devlink);
return err;
}
@@ -1300,11 +1237,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
+ devlink_free(ocelot->devlink);
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0a721f6e8676..e31f8fbbc696 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
return 0;
}
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, false);
}
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, true);
}
@@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
- [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
- [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+ [BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4,
+ [BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8,
[BPF_ST | BPF_MEM | BPF_B] = mem_st1,
[BPF_ST | BPF_MEM | BPF_H] = mem_st2,
[BPF_ST | BPF_MEM | BPF_W] = mem_st4,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index fac9c6f9e197..d0e17eebddd9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
{
- return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
}
static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index e92ee510fd52..9d235c0ce46a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP;
}
- if (is_mbpf_xadd(meta)) {
+ if (is_mbpf_atomic(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
@@ -523,12 +523,17 @@ exit_check_ptr:
}
static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
+ if (meta->insn.imm != BPF_ADD) {
+ pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+ return -EOPNOTSUPP;
+ }
+
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
@@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
if (is_mbpf_store(meta))
return nfp_bpf_check_store(nfp_prog, meta, env);
- if (is_mbpf_xadd(meta))
- return nfp_bpf_check_xadd(nfp_prog, meta, env);
+ if (is_mbpf_atomic(meta))
+ return nfp_bpf_check_atomic(nfp_prog, meta, env);
if (is_mbpf_alu(meta))
return nfp_bpf_check_alu(nfp_prog, meta, env);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f21fb573ea3e..eeb30680b4dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1822,8 +1822,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1914,10 +1914,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int dma_off;
int act;
- xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
- xdp.data = orig_data;
- xdp.data_meta = orig_data;
- xdp.data_end = orig_data + pkt_len;
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -3656,8 +3656,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9bed42719ae5..563dba384a53 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -249,8 +249,6 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
return (usecs * mult) / div;
}
-typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
-
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..162a1ff1e9d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
@@ -979,7 +979,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
stats->vlan_inserted++;
}
- if (skb->csum_not_inet)
+ if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
stats->csum++;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 5e9f8ee99800..2fcbcecb41d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS;
}
- addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &md_template_addr, GFP_KERNEL);
if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM;
@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2);
}
- pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
return 0;
}
@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
- addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &hostrq_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
- addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &cardrsp_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port;
out_free_rsp:
- pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
}
@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
- rq_addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &rq_phys_addr);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
- rsp_addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &rsp_phys_addr);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx;
tx_ring = adapter->tx_ring;
- addr = pci_alloc_consistent(pdev,
- sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
- &recv_ctx->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM;
@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate rds ring [%d]\n",
@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate sds ring [%d]\n",
@@ -874,19 +877,17 @@ done:
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->hwctx != NULL) {
- pci_free_consistent(adapter->pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- recv_ctx->hwctx,
- recv_ctx->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ recv_ctx->hwctx, recv_ctx->phys_addr);
recv_ctx->hwctx = NULL;
}
tx_ring = adapter->tx_ring;
if (tx_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL;
}
@@ -894,10 +895,10 @@ done:
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- rds_ring->desc_head,
- rds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
rds_ring->desc_head = NULL;
}
}
@@ -906,10 +907,10 @@ done:
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- sds_ring->desc_head,
- sds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
sds_ring->desc_head = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 94546ed5f867..08f9477d2ee8 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter)
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
- pci_unmap_single(adapter->pdev,
- rx_buf->dma,
- rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
- pci_unmap_single(adapter->pdev, buffrag->dma,
- buffrag->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
- pci_unmap_page(adapter->pdev, buffrag->dma,
- buffrag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
}
@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
- adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- &adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr,
+ GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
}
if (i) {
- pci_free_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- adapter->dummy_dma.addr,
- adapter->dummy_dma.phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
- dma = pci_map_single(pdev, skb->data,
- rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(pdev, dma)) {
+ if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
skb = buffer->skb;
if (!skb)
@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
- pci_unmap_single(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
- pci_unmap_page(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f21847739ef1..7e6bac85495d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter)
cmask = mask;
}
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ if (dma_set_mask(&pdev->dev, mask) == 0 &&
+ dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
adapter->pci_using_dac = 1;
return 0;
}
@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
mask = DMA_BIT_MASK(32+shift);
- err = pci_set_dma_mask(pdev, mask);
+ err = dma_set_mask(&pdev->dev, mask);
if (err)
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- err = pci_set_consistent_dma_mask(pdev, mask);
+ err = dma_set_coherent_mask(&pdev->dev, mask);
if (err)
goto err_out;
}
@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
return 0;
err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
+ dma_set_mask(&pdev->dev, old_mask);
+ dma_set_coherent_mask(&pdev->dev, old_cmask);
return err;
}
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_features = netxen_set_features,
};
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
netxen_initialize_interrupt_registers(adapter);
netxen_set_msix_bit(pdev, 0);
- if (netxen_function_zero(pdev)) {
+ if (adapter->portnum == 0) {
if (!netxen_setup_msi_interrupts(adapter, num_msix))
netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
else
@@ -1983,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
- map = pci_map_single(pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto out_err;
nf->dma = map;
@@ -2009,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev,
unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
nf->dma = 0ULL;
out_err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 3efc5899f656..2e62a2c4eb63 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -168,6 +168,12 @@ struct qede_dump_info {
u32 args[QEDE_DUMP_MAX_ARGS];
};
+struct qede_coalesce {
+ bool isvalid;
+ u16 rxc;
+ u16 txc;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -194,6 +200,7 @@ struct qede_dev {
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
+ struct qede_coalesce *coal_entry;
u8 req_num_tx;
u8 fp_num_tx;
u8 req_num_rx;
@@ -581,6 +588,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f);
void qede_forced_speed_maps_init(void);
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal);
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index bedbb85a179a..1560ad3d9290 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -819,8 +819,7 @@ out:
return rc;
}
-static int qede_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
@@ -855,6 +854,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set RX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].rxc = rxc;
+ edev->coal_entry[i].isvalid = true;
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -874,6 +875,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set TX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].txc = txc;
+ edev->coal_entry[i].isvalid = true;
}
}
@@ -2105,6 +2108,129 @@ err:
return rc;
}
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rxc, txc;
+ int rc = 0;
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ rxc, 0,
+ fp->rxq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set RX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].rxc = rxc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ 0, txc,
+ fp->txq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set TX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].txc = txc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+out:
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_get_per_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ void *rx_handle = NULL, *tx_handle = NULL;
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rx_coal, tx_coal;
+ int rc = 0;
+
+ rx_coal = QED_DEFAULT_RX_USECS;
+ tx_coal = QED_DEFAULT_TX_USECS;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ rx_handle = fp->rxq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
+ rx_handle);
+ if (rc) {
+ DP_INFO(edev, "Read Rx coalesce error\n");
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+ if (fp->type & QEDE_FASTPATH_TX)
+ tx_handle = fp->txq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
+ tx_handle);
+ if (rc)
+ DP_INFO(edev, "Read Tx coalesce error\n");
+
+out:
+ __qede_unlock(edev);
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
@@ -2148,6 +2274,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_fecparam = qede_set_fecparam,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
.flash_device = qede_flash_device,
.get_dump_flag = qede_get_dump_flag,
.get_dump_data = qede_get_dump_data,
@@ -2177,6 +2305,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
};
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..8c47a9d2a965 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1090,12 +1090,9 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data_hard_start = page_address(bd->data);
- xdp.data = xdp.data_hard_start + *data_offset;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.rxq = &rxq->xdp_rxq;
- xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1453,7 +1450,8 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
- if (rx_work_done < budget) {
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
napi_complete_done(napi, rx_work_done);
@@ -1799,6 +1797,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9cf960a6d007..4d952036ba82 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -663,8 +663,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +686,6 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
};
@@ -707,8 +703,6 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
.ndo_xdp_xmit = qede_xdp_transmit,
@@ -910,6 +904,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
{
u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
+ void *mem;
int i;
edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
@@ -919,6 +914,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
+ mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
+ sizeof(*edev->coal_entry), GFP_KERNEL);
+ if (!mem) {
+ DP_ERR(edev, "coalesce entry allocation failed\n");
+ kfree(edev->coal_entry);
+ goto err;
+ }
+ edev->coal_entry = mem;
+
fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
/* Allocate the FP elements for Rx queues followed by combined and then
@@ -1326,8 +1330,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- if (mode != QEDE_REMOVE_RECOVERY)
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ kfree(edev->coal_entry);
free_netdev(ndev);
+ }
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -2334,8 +2340,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ struct ethtool_coalesce coal = {};
u8 num_tc;
- int rc;
+ int rc, i;
DP_INFO(edev, "Starting qede load\n");
@@ -2396,6 +2403,18 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
edev->state = QEDE_STATE_OPEN;
+ coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
+ coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
+
+ for_each_queue(i) {
+ if (edev->coal_entry[i].isvalid) {
+ coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
+ coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
+ }
+ __qede_unlock(edev);
+ qede_set_per_coalesce(edev->ndev, i, &coal);
+ __qede_lock(edev);
+ }
DP_INFO(edev, "Ending successfully qede load\n");
goto out;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 27740c027681..214e347097a7 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -315,12 +315,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
* buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1802,13 +1801,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
* first buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1943,18 +1941,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
goto invalid_seg_count;
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[i],
- mapaddr),
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
dma_unmap_len(&tx_cb->map[i], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
qdev->ndev->stats.tx_packets++;
@@ -2021,10 +2017,9 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev->ndev->stats.rx_bytes += length;
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb->data);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
@@ -2067,10 +2062,9 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb2 = lrg_buf_cb2->skb;
skb_put(skb2, length); /* Just the second buffer length here. */
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb2->data);
skb_checksum_none_assert(skb2);
@@ -2319,9 +2313,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
err);
@@ -2357,11 +2351,11 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- map = pci_map_single(qdev->pdev, oal,
+ map = dma_map_single(&qdev->pdev->dev, oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -2423,24 +2417,24 @@ map_error:
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[seg], mapaddr),
- dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
oal++;
seg++;
}
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
dma_unmap_addr(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return NETDEV_TX_BUSY;
@@ -2525,9 +2519,8 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
wmb();
qdev->req_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->req_q_size,
- &qdev->req_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
if ((qdev->req_q_virt_addr == NULL) ||
LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
@@ -2536,16 +2529,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
}
qdev->rsp_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->rsp_q_size,
- &qdev->rsp_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
if ((qdev->rsp_q_virt_addr == NULL) ||
LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
netdev_err(qdev->ndev, "rspQ allocation failed\n");
- pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
- qdev->req_q_virt_addr,
- qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
return -ENOMEM;
}
@@ -2561,15 +2552,13 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
return;
}
- pci_free_consistent(qdev->pdev,
- qdev->req_q_size,
- qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev->req_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->rsp_q_size,
- qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev->rsp_q_virt_addr = NULL;
@@ -2593,9 +2582,9 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
return -ENOMEM;
qdev->lrg_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- &qdev->lrg_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
@@ -2613,15 +2602,16 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev->small_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- &qdev->small_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->small_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
- pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
return -ENOMEM;
}
@@ -2638,17 +2628,15 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
return;
}
kfree(qdev->lrg_buf);
- pci_free_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
qdev->lrg_buf_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- qdev->small_buf_q_alloc_virt_addr,
- qdev->small_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
qdev->small_buf_q_virt_addr = NULL;
@@ -2666,9 +2654,9 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
QL_SMALL_BUFFER_SIZE);
qdev->small_buf_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- &qdev->small_buf_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
if (qdev->small_buf_virt_addr == NULL) {
netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
@@ -2701,10 +2689,10 @@ static void ql_free_small_buffers(struct ql3_adapter *qdev)
return;
}
if (qdev->small_buf_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- qdev->small_buf_virt_addr,
- qdev->small_buf_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
qdev->small_buf_virt_addr = NULL;
}
@@ -2719,10 +2707,10 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb, mapaddr),
dma_unmap_len(lrg_buf_cb, maplen),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
break;
@@ -2774,13 +2762,11 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
* buffer
*/
skb_reserve(skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
- skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -2865,8 +2851,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
* Network Completion Queue Producer Index Register
*/
qdev->shadow_reg_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
if (qdev->shadow_reg_virt_addr != NULL) {
qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
@@ -2921,10 +2907,9 @@ err_small_buffers:
err_buffer_queues:
ql_free_net_req_rsp_queues(qdev);
err_req_rsp:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
return -ENOMEM;
}
@@ -2937,10 +2922,9 @@ static void ql_free_mem_resources(struct ql3_adapter *qdev)
ql_free_buffer_queues(qdev);
ql_free_net_req_rsp_queues(qdev);
if (qdev->shadow_reg_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
qdev->shadow_reg_virt_addr = NULL;
}
}
@@ -3641,18 +3625,15 @@ static void ql_reset_work(struct work_struct *work)
if (tx_cb->skb) {
netdev_printk(KERN_DEBUG, ndev,
"Freeing lost SKB\n");
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[0],
- mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
for (j = 1; j < tx_cb->seg_count; j++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[j],
- mapaddr),
- dma_unmap_len(&tx_cb->map[j],
- maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
}
dev_kfree_skb(tx_cb->skb);
tx_cb->skb = NULL;
@@ -3784,13 +3765,10 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
pci_using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c2faf96fcade..96b947fde646 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -520,8 +520,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qlcnic_features_check,
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3d7d3ab383f8..3d00b3232308 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -183,6 +183,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ goto done;
+ }
+
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 7be86ef5a584..2728df46ec41 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -63,6 +63,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
+ RTL_GIGA_MAC_VER_53,
RTL_GIGA_MAC_VER_60,
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..f704da3f214c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,6 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
+#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
#include "r8169.h"
@@ -145,6 +146,7 @@ static const struct {
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
+ [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
[RTL_GIGA_MAC_VER_60] = {"RTL8125A" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
@@ -260,6 +262,9 @@ enum rtl8168_8101_registers {
#define CSIAR_BYTE_ENABLE 0x0000f000
#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
+#define D3COLD_NO_PLL_DOWN BIT(7)
+#define D3HOT_NO_PLL_DOWN BIT(6)
+#define D3_NO_PLL_DOWN (BIT(7) | BIT(6))
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
#define EPHYAR_WRITE_CMD 0x80000000
@@ -529,6 +534,9 @@ enum rtl_rx_desc_bit {
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
+
+#define RxCSFailMask (IPFail | UDPFail | TCPFail)
+
RxVlanTag = (1 << 16), /* VLAN tag available */
};
@@ -584,6 +592,12 @@ enum rtl_flag {
RTL_FLAG_MAX
};
+enum rtl_dash_type {
+ RTL_DASH_NONE,
+ RTL_DASH_DP,
+ RTL_DASH_EP,
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -591,6 +605,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
enum mac_version mac_version;
+ enum rtl_dash_type dash_type;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -682,7 +697,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_52;
+ tp->mac_version <= RTL_GIGA_MAC_VER_53;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -746,14 +761,77 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
-static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
- if (reg & 0xffff0001) {
- if (net_ratelimit())
- netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
- return true;
- }
- return false;
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (type == ERIAR_OOB &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_53))
+ *cmd |= 0x7f0 << 18;
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
+}
+
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
+{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
+ if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
+ return;
+
+ RTL_W32(tp, ERIDR, val);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+}
+
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+{
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(tp, ERIDR) : ~0;
+}
+
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
+{
+ u32 val = rtl_eri_read(tp, addr);
+
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
+{
+ rtl_w0w1_eri(tp, addr, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
+{
+ rtl_w0w1_eri(tp, addr, 0, m);
+}
+
+static bool rtl_ocp_reg_failure(u32 reg)
+{
+ return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
}
DECLARE_RTL_COND(rtl_ocp_gphy_cond)
@@ -763,7 +841,7 @@ DECLARE_RTL_COND(rtl_ocp_gphy_cond)
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
@@ -773,7 +851,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, GPHY_OCP, reg << 15);
@@ -784,7 +862,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
@@ -792,7 +870,7 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, OCPDR, reg << 15);
@@ -808,6 +886,25 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
+/* Work around a hw issue with RTL8168g PHY, the quirk disables
+ * PHY MCU interrupts before PHY power-down.
+ */
+static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_49:
+ if (value & BMCR_RESET || !(value & BMCR_PDOWN))
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
+ else
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+ break;
+ default:
+ break;
+ }
+};
+
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -818,6 +915,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
if (tp->ocp_base != OCP_STD_PHY_BASE)
reg -= 0x10;
+ if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
+ rtl8168g_phy_suspend_quirk(tp, value);
+
r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
}
@@ -1009,70 +1109,6 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
-{
- /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
- *cmd |= 0x7f0 << 18;
-}
-
-DECLARE_RTL_COND(rtl_eriar_cond)
-{
- return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
-}
-
-static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
-{
- u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
-
- BUG_ON((addr & 3) || (mask == 0));
- RTL_W32(tp, ERIDR, val);
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
-}
-
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val)
-{
- _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
-}
-
-static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
-{
- u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
-
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
- RTL_R32(tp, ERIDR) : ~0;
-}
-
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
-{
- return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
-}
-
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
-{
- u32 val = rtl_eri_read(tp, addr);
-
- rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
-}
-
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
-{
- rtl_w0w1_eri(tp, addr, p, 0);
-}
-
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
-{
- rtl_w0w1_eri(tp, addr, 0, m);
-}
-
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
@@ -1158,19 +1194,10 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_start(tp);
- break;
- default:
- BUG();
- break;
- }
}
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
@@ -1189,44 +1216,52 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_stop(tp);
- break;
- default:
- BUG();
- break;
- }
}
static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
+ return r8168dp_ocp_read(tp, reg) & BIT(15);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
-static bool r8168_check_dash(struct rtl8169_private *tp)
+static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- return r8168dp_check_dash(tp);
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
- return r8168ep_check_dash(tp);
+ return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
- return false;
+ return RTL_DASH_NONE;
+ }
+}
+
+static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ if (enable)
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
+ else
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ break;
+ default:
+ break;
}
}
@@ -1396,6 +1431,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@@ -1930,6 +1966,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
/* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
{ 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
/* 8168EP family. */
@@ -1962,7 +1999,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ /* It seems this early RTL8168dp version never made it to
+ * the wild. Let's see whether somebody complains, if not
+ * we'll remove support for this chip version completely.
+ * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ */
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
{ 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
@@ -1996,9 +2037,12 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR */
- { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
- { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
+ /* FIXME: where did these entries come from ? -- FR
+ * Not even r8101 vendor driver knows these id's,
+ * so let's disable detection for now. -- HK
+ * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
+ * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
+ */
/* 8110 family. */
{ 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2081,18 +2125,12 @@ static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
-static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
- const u16 w[] = {
- addr[0] | (addr[1] << 8),
- addr[2] | (addr[3] << 8),
- addr[4] | (addr[5] << 8)
- };
-
- rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
- rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
- rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
- rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
}
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
@@ -2142,14 +2180,14 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
genphy_soft_reset(tp->phydev);
}
-static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_unlock_config_regs(tp);
- RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
+ RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
rtl_pci_commit(tp);
- RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ RTL_W32(tp, MAC0, get_unaligned_le32(addr));
rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
@@ -2172,28 +2210,16 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
- break;
- default:
- break;
- }
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@@ -2202,64 +2228,10 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
- rtl_wol_suspend_quirk(tp);
- return;
- }
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- default:
- break;
+ rtl_wol_enable_rx(tp);
}
}
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
- break;
- default:
- break;
- }
-
- phy_resume(tp->phydev);
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -2272,7 +2244,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
@@ -2313,14 +2285,14 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, MaxTxPacketSize, 0x24);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x0c);
+ RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
}
@@ -2338,13 +2310,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
+ int readrq = 4096;
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168b_1_hw_jumbo_enable(tp);
} else {
r8168b_1_hw_jumbo_disable(tp);
@@ -2352,7 +2325,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168c_hw_jumbo_enable(tp);
} else {
r8168c_hw_jumbo_disable(tp);
@@ -2365,20 +2338,18 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ if (jumbo)
r8168e_hw_jumbo_enable(tp);
- } else {
+ else
r8168e_hw_jumbo_disable(tp);
- }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
- pcie_set_readrq(tp->pci_dev, 4096);
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, readrq);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -2447,7 +2418,7 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
@@ -3706,6 +3677,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
+ [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
@@ -4044,17 +4016,72 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+static bool rtl_skb_is_udp(struct sk_buff *skb)
{
+ int no = skb_network_offset(skb);
+ struct ipv6hdr *i6h, _i6h;
+ struct iphdr *ih, _ih;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
+ return ih && ih->protocol == IPPROTO_UDP;
+ case htons(ETH_P_IPV6):
+ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
+ return i6h && i6h->nexthdr == IPPROTO_UDP;
+ default:
+ return false;
+ }
+}
+
+#define RTL_MIN_PATCH_LEN 47
+
+/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
+static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto = 0, len = skb->len;
+
+ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ unsigned int trans_data_len = skb_tail_pointer(skb) -
+ skb_transport_header(skb);
+
+ if (trans_data_len >= offsetof(struct udphdr, len) &&
+ trans_data_len < RTL_MIN_PATCH_LEN) {
+ u16 dest = ntohs(udp_hdr(skb)->dest);
+
+ /* dest is a standard PTP port */
+ if (dest == 319 || dest == 320)
+ padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
+ }
+
+ if (trans_data_len < sizeof(struct udphdr))
+ padto = max_t(unsigned int, padto,
+ len + sizeof(struct udphdr) - trans_data_len);
+ }
+
+ return padto;
+}
+
+static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto;
+
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
- return true;
+ padto = max_t(unsigned int, padto, ETH_ZLEN);
default:
- return false;
+ break;
}
+
+ return padto;
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4126,9 +4153,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
- /* eth_skb_pad would free the skb on error */
- return !__skb_put_padto(skb, ETH_ZLEN, false);
+ unsigned int padto = rtl_quirk_packet_padto(tp, skb);
+
+ /* skb_padto would free the skb on error */
+ return !__skb_put_padto(skb, padto, false);
}
return true;
@@ -4305,6 +4333,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (skb->len < ETH_ZLEN)
features &= ~NETIF_F_CSUM_MASK;
+ if (rtl_quirk_packet_padto(tp, skb))
+ features &= ~NETIF_F_CSUM_MASK;
+
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
@@ -4406,10 +4437,9 @@ static inline int rtl8169_fragmented_frame(u32 status)
static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 status = opts1 & RxProtoMask;
+ u32 status = opts1 & (RxProtoMask | RxCSFailMask);
- if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ if (status == RxProtoTCP || status == RxProtoUDP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4523,8 +4553,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- rtl_irq_disable(tp);
- napi_schedule(&tp->napi);
+ if (napi_schedule_prep(&tp->napi)) {
+ rtl_irq_disable(tp);
+ __napi_schedule(&tp->napi);
+ }
out:
rtl_ack_events(tp, status);
@@ -4556,10 +4588,10 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct net_device *dev = tp->dev;
int work_done;
- work_done = rtl_rx(dev, tp, budget);
-
rtl_tx(dev, tp, budget);
+ work_done = rtl_rx(dev, tp, budget);
+
if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
@@ -4616,12 +4648,12 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp, true);
- rtl_pll_power_down(tp);
+ rtl_prepare_power_down(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
- rtl_pll_power_up(tp);
+ phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -4643,10 +4675,10 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- phy_disconnect(tp->phydev);
-
free_irq(pci_irq_vector(pdev, 0), tp);
+ phy_disconnect(tp->phydev);
+
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
@@ -4776,9 +4808,12 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
#ifdef CONFIG_PM
-static int rtl8169_net_resume(struct rtl8169_private *tp)
+static int rtl8169_runtime_resume(struct device *dev)
{
+ struct rtl8169_private *tp = dev_get_drvdata(dev);
+
rtl_rar_set(tp, tp->dev->dev_addr);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
if (tp->TxDescArray)
rtl8169_up(tp);
@@ -4812,7 +4847,7 @@ static int __maybe_unused rtl8169_resume(struct device *device)
if (tp->mac_version == RTL_GIGA_MAC_VER_37)
rtl_init_rxcfg(tp);
- return rtl8169_net_resume(tp);
+ return rtl8169_runtime_resume(device);
}
static int rtl8169_runtime_suspend(struct device *device)
@@ -4832,15 +4867,6 @@ static int rtl8169_runtime_suspend(struct device *device)
return 0;
}
-static int rtl8169_runtime_resume(struct device *device)
-{
- struct rtl8169_private *tp = dev_get_drvdata(device);
-
- __rtl8169_set_wol(tp, tp->saved_wolopts);
-
- return rtl8169_net_resume(tp);
-}
-
static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4888,12 +4914,10 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl_rar_set(tp, tp->dev->perm_addr);
if (system_state == SYSTEM_POWER_OFF) {
- if (tp->saved_wolopts) {
- rtl_wol_suspend_quirk(tp);
+ if (tp->saved_wolopts)
rtl_wol_shutdown_quirk(tp);
- }
- pci_wake_from_d3(pdev, true);
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
}
@@ -4907,7 +4931,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(tp->dev);
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
@@ -4975,16 +4999,12 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
{
/* Get MAC address */
if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
- u32 value = rtl_eri_read(tp, 0xe0);
-
- mac_addr[0] = (value >> 0) & 0xff;
- mac_addr[1] = (value >> 8) & 0xff;
- mac_addr[2] = (value >> 16) & 0xff;
- mac_addr[3] = (value >> 24) & 0xff;
+ u32 value;
+ value = rtl_eri_read(tp, 0xe0);
+ put_unaligned_le32(value, mac_addr);
value = rtl_eri_read(tp, 0xe4);
- mac_addr[4] = (value >> 0) & 0xff;
- mac_addr[5] = (value >> 8) & 0xff;
+ put_unaligned_le16(value, mac_addr + 4);
} else if (rtl_is_8125(tp)) {
rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
@@ -5036,7 +5056,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->name = "r8169";
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
- new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ new_bus->irq[0] = PHY_MAC_INTERRUPT;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
@@ -5099,7 +5119,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
@@ -5265,12 +5285,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
if (chipset == RTL_GIGA_MAC_NONE) {
- dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
+ dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid);
return -ENODEV;
}
tp->mac_version = chipset;
+ tp->dash_type = rtl_check_dash(tp);
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
@@ -5340,6 +5362,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
+ rtl_set_d3_pll_down(tp, true);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5360,9 +5384,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* chip gets powered up in rtl_open() */
- rtl_pll_power_down(tp);
-
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5376,7 +5397,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
"ok" : "ko");
- if (r8168_check_dash(tp)) {
+ if (tp->dash_type != RTL_DASH_NONE) {
netdev_info(dev, "DASH enabled\n");
rtl8168_driver_start(tp);
}
@@ -5393,9 +5414,7 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
-#ifdef CONFIG_PM
- .driver.pm = &rtl8169_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&rtl8169_pm_ops),
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 913d030d73eb..50f0f621b1aa 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1358,6 +1358,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
+ [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 7453b17a37a2..cb47e68c1a3e 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -165,7 +165,7 @@ enum ravb_reg {
GTO2 = 0x03A8,
GIC = 0x03AC,
GIS = 0x03B0,
- GCPT = 0x03B4, /* Undocumented? */
+ GCPT = 0x03B4, /* Documented for R-Car Gen3 only */
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
@@ -225,7 +225,7 @@ enum CSR_BIT {
CSR_OPS_RESET = 0x00000001,
CSR_OPS_CONFIG = 0x00000002,
CSR_OPS_OPERATION = 0x00000004,
- CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */
CSR_DTS = 0x00000100,
CSR_TPO0 = 0x00010000,
CSR_TPO1 = 0x00020000,
@@ -241,13 +241,12 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
-/* APSR */
+/* APSR (R-Car Gen3 only) */
enum APSR_BIT {
- APSR_MEMS = 0x00000002,
- APSR_CMSW = 0x00000010,
- APSR_DM = 0x00006000, /* Undocumented? */
- APSR_DM_RDM = 0x00002000,
- APSR_DM_TDM = 0x00004000,
+ APSR_MEMS = 0x00000002, /* Undocumented */
+ APSR_CMSW = 0x00000010,
+ APSR_RDM = 0x00002000,
+ APSR_TDM = 0x00004000,
};
/* RCR */
@@ -530,16 +529,16 @@ enum RIS2_BIT {
/* TIC */
enum TIC_BIT {
- TIC_FTE0 = 0x00000001, /* Undocumented? */
- TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIC_TFUE = 0x00000100,
TIC_TFWE = 0x00000200,
};
/* TIS */
enum TIS_BIT {
- TIS_FTF0 = 0x00000001, /* Undocumented? */
- TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
@@ -547,8 +546,8 @@ enum TIS_BIT {
/* ISS */
enum ISS_BIT {
- ISS_FRS = 0x00000001, /* Undocumented? */
- ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */
+ ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */
ISS_ES = 0x00000040,
ISS_MS = 0x00000080,
ISS_TFUS = 0x00000100,
@@ -608,13 +607,13 @@ enum GTI_BIT {
/* GIC */
enum GIC_BIT {
- GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */
GIC_PTME = 0x00000004,
};
/* GIS */
enum GIS_BIT {
- GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
@@ -808,10 +807,10 @@ enum ECMR_BIT {
ECMR_TE = 0x00000020,
ECMR_RE = 0x00000040,
ECMR_MPDE = 0x00000200,
- ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
@@ -830,7 +829,7 @@ enum ECSR_BIT {
enum ECSIPR_BIT {
ECSIPR_ICDIP = 0x00000001,
ECSIPR_MPDIP = 0x00000002,
- ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+ ECSIPR_LCHNGIP = 0x00000004,
};
/* PIR */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index bd30505fbc57..eb0c03bdb12d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2034,10 +2034,10 @@ static void ravb_set_delay_mode(struct net_device *ndev)
u32 set = 0;
if (priv->rxcidm)
- set |= APSR_DM_RDM;
+ set |= APSR_RDM;
if (priv->txcidm)
- set |= APSR_DM_TDM;
- ravb_modify(ndev, APSR, APSR_DM, set);
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
static int ravb_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c63304632935..590b088bc4c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 6fad25321dc5..315a6e5c0f59 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -103,15 +103,13 @@ struct rocker_world_ops {
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans);
+ unsigned long brport_flags);
int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
rocker_port,
unsigned long *
p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans);
+ u32 ageing_time);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index dd0bc7f0aaee..3473d296b2e2 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1550,17 +1550,13 @@ static void rocker_world_port_stop(struct rocker_port *rocker_port)
}
static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_attr_stp_state_set(rocker_port, state);
}
@@ -1580,8 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
@@ -1595,7 +1590,7 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
if (err)
return err;
- if (brport_flags & ~brport_flags_s)
+ if (flags.mask & ~brport_flags_s)
return -EINVAL;
return 0;
@@ -1603,52 +1598,37 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
- trans);
+ return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
}
static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
-
+ u32 ageing_time)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
- trans);
+ return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
}
static int
rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_obj_vlan_add(rocker_port, vlan);
}
@@ -2066,8 +2046,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
********************/
static int rocker_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2075,23 +2054,19 @@ static int rocker_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = rocker_world_port_attr_stp_state_set(rocker_port,
- attr->u.stp_state,
- trans);
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
- attr->u.ageing_time,
- trans);
+ attr->u.ageing_time);
break;
default:
err = -EOPNOTSUPP;
@@ -2102,8 +2077,7 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2111,8 +2085,7 @@ static int rocker_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = rocker_world_port_obj_vlan_add(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -2725,8 +2698,7 @@ rocker_switchdev_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = rocker_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = rocker_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -2847,8 +2819,7 @@ rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = rocker_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = rocker_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = rocker_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7072b249c8bd..967a634ee9ac 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -923,7 +923,7 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
struct ofdpa_flow_tbl_entry *entry;
u32 priority;
bool vlan_bridging = !!vlan_id;
- bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool dflt = !eth_dst || eth_dst_mask;
bool wild = false;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -2488,8 +2488,7 @@ static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
unsigned long orig_flags;
@@ -2497,14 +2496,11 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
orig_flags = ofdpa_port->brport_flags;
ofdpa_port->brport_flags = brport_flags;
- if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
- !switchdev_trans_ph_prepare(trans))
+
+ if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(ofdpa_port->rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_port->brport_flags = orig_flags;
-
return err;
}
@@ -2520,18 +2516,15 @@ ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
+ u32 ageing_time)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- if (!switchdev_trans_ph_prepare(trans)) {
- ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
- if (ofdpa_port->ageing_time < ofdpa->ageing_time)
- ofdpa->ageing_time = ofdpa_port->ageing_time;
- mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
- }
+ ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (ofdpa_port->ageing_time < ofdpa->ageing_time)
+ ofdpa->ageing_time = ofdpa_port->ageing_time;
+ mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
return 0;
}
@@ -2540,32 +2533,16 @@ static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index b1e7f7ab281c..fceb6d637235 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -203,8 +203,8 @@ int sxgbe_mdio_register(struct net_device *ndev)
case PHY_POLL:
irq_str = "POLL";
break;
- case PHY_IGNORE_INTERRUPT:
- irq_str = "IGNORE";
+ case PHY_MAC_INTERRUPT:
+ irq_str = "MAC";
break;
default:
sprintf(irq_num, "%d", phy->irq);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 718308076341..36c8625a6fd7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -612,8 +612,6 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
};
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a4a626e9cd9a..1bfeee283ea9 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -17,6 +17,7 @@
#include "rx_common.h"
#include "nic.h"
#include "sriov.h"
+#include "workarounds.h"
/* This is the first interrupt mode to try out of:
* 0 => MSI-X
@@ -137,6 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
{
unsigned int n_channels = parallelism;
int vec_count;
+ int tx_per_ev;
int n_xdp_tx;
int n_xdp_ev;
@@ -149,9 +151,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* multiple tx queues, assuming tx and ev queues are both
* maximum size.
*/
-
+ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index aaa112877561..89c5c75f479f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -293,14 +293,10 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
efx->rx_prefix_size);
- xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
-
+ xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
/* No support yet for XDP metadata */
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + rx_buf->len;
- xdp.rxq = &rx_queue->xdp_rxq_info;
- xdp.frame_sz = efx->rx_page_buf_step;
+ xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+ rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 742a1f7a838c..891b49281bc6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2191,7 +2191,7 @@ static const struct of_device_id smc91x_match[] = {
MODULE_DEVICE_TABLE(of, smc91x_match);
/**
- * of_try_set_control_gpio - configure a gpio if it exists
+ * try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
* @desc: where to store the GPIO descriptor, if it exists
* @name: name of the GPIO in DT
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 823d9a7184fe..606c79de93a6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -557,6 +557,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -582,6 +583,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -594,6 +596,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -623,6 +626,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -1589,6 +1593,8 @@ static int smsc911x_open(struct net_device *dev)
int retval;
int irq_flags;
+ pm_runtime_get_sync(dev->dev.parent);
+
/* find and start the given phy */
if (!dev->phydev) {
retval = smsc911x_mii_probe(dev);
@@ -1735,6 +1741,7 @@ mii_free_out:
phy_disconnect(dev->phydev);
dev->phydev = NULL;
out:
+ pm_runtime_put(dev->dev.parent);
return retval;
}
@@ -1766,6 +1773,7 @@ static int smsc911x_stop(struct net_device *dev)
dev->phydev = NULL;
}
netif_carrier_off(dev);
+ pm_runtime_put(dev->dev.parent);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -2334,7 +2342,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -2540,6 +2547,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
}
spin_unlock_irq(&pdata->mac_lock);
+ pm_runtime_put(&pdev->dev);
netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 19d20a6d0d44..3c53051bdacf 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -956,8 +956,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u32 xdp_act = 0;
int done = 0;
- xdp.rxq = &dring->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE;
+ xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
@@ -1016,10 +1015,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
dma_dir);
prefetch(desc->addr);
- xdp.data_hard_start = desc->addr;
- xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + pkt_len;
+ xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
+ pkt_len, false);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 53f14c5a9e02..e675ba12fde2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -219,6 +219,14 @@ config DWMAC_INTEL_PLAT
This selects the Intel platform specific glue layer support for
the stmmac device driver. This driver is used for the Intel Keem Bay
SoC.
+
+config DWMAC_VISCONTI
+ tristate "Toshiba Visconti DWMAC support"
+ default ARCH_VISCONTI
+ depends on OF && COMMON_CLK && (ARCH_VISCONTI || COMPILE_TEST)
+ help
+ Support for ethernet controller on Visconti SoCs.
+
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 24e6145d4eae..366740ab9c5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
+obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 82b1c7a5a7a9..6c19fcc76c6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -74,8 +74,6 @@ MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
static int intel_eth_plat_probe(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
const struct of_device_id *match;
@@ -83,7 +81,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
unsigned long rate;
int ret;
- plat_dat = priv->plat;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
@@ -129,7 +126,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
@@ -143,7 +140,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..751dfdeec41c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
int ret;
int i;
+ plat->pdev = pdev;
plat->phy_addr = -1;
plat->clk_csr = 5;
plat->has_gmac = 0;
@@ -375,6 +376,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -406,6 +408,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -457,6 +460,21 @@ static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
+static int adls_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ /* SerDes power up and power down are done in BIOS for ADL */
+
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adls_sgmii1g_info = {
+ .setup = adls_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -721,7 +739,11 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
@@ -735,6 +757,10 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..848e5c37746b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -68,10 +68,21 @@
*/
#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+#define PRG_ETH1 0x4
+
+/* Defined for adding a delay to the input RX_CLK for better timing.
+ * Each step is 200ps. These bits are used with external RGMII PHYs
+ * because RGMII RX only has the small window. cfg_rxclk_dly can
+ * adjust the window between RX_CLK and RX_DATA and improve the stability
+ * of "rx data valid".
+ */
+#define PRG_ETH1_CFG_RXCLK_DLY GENMASK(19, 16)
+
struct meson8b_dwmac;
struct meson8b_dwmac_data {
int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+ bool has_prg_eth1_rgmii_rx_delay;
};
struct meson8b_dwmac {
@@ -82,7 +93,7 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
- u32 rx_delay_ns;
+ u32 rx_delay_ps;
struct clk *timing_adj_clk;
};
@@ -135,7 +146,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
@@ -268,32 +279,37 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
return 0;
}
-static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
{
- u32 tx_dly_config, rx_dly_config, delay_config;
+ u32 tx_dly_config, rx_adj_config, cfg_rxclk_dly, delay_config;
int ret;
+ rx_adj_config = 0;
+ cfg_rxclk_dly = 0;
tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
dwmac->tx_delay_ns >> 1);
- if (dwmac->rx_delay_ns == 2)
- rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
- else
- rx_dly_config = 0;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay)
+ cfg_rxclk_dly = FIELD_PREP(PRG_ETH1_CFG_RXCLK_DLY,
+ dwmac->rx_delay_ps / 200);
+ else if (dwmac->rx_delay_ps == 2000)
+ rx_adj_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
- delay_config = tx_dly_config | rx_dly_config;
+ delay_config = tx_dly_config | rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
delay_config = tx_dly_config;
+ cfg_rxclk_dly = 0;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- delay_config = rx_dly_config;
+ delay_config = rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RMII:
delay_config = 0;
+ cfg_rxclk_dly = 0;
break;
default:
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
@@ -301,7 +317,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
dev_err(dwmac->dev,
"The timing-adjustment clock is mandatory for the RX delay re-timing\n");
@@ -323,6 +339,16 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
delay_config);
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH1, PRG_ETH1_CFG_RXCLK_DLY,
+ cfg_rxclk_dly);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+
if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
@@ -406,16 +432,30 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- /* use 0ns as fallback since this is what most boards actually use */
- if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
- &dwmac->rx_delay_ns))
- dwmac->rx_delay_ns = 0;
+ /* RX delay defaults to 0ps since this is what many boards use */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-internal-delay-ps",
+ &dwmac->rx_delay_ps)) {
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ps))
+ /* convert ns to ps */
+ dwmac->rx_delay_ps *= 1000;
+ }
- if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
- dev_err(&pdev->dev,
- "The only allowed RX delays values are: 0ns, 2ns");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay) {
+ if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
+ dev_err(dwmac->dev,
+ "The RGMII RX delay range is 0..3000ps in 200ps steps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ } else {
+ if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
+ dev_err(dwmac->dev,
+ "The only allowed RGMII RX delays values are: 0ps, 2000ps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
@@ -425,6 +465,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ ret = meson8b_init_rgmii_delays(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -453,10 +497,17 @@ err_remove_config_dt:
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
.set_phy_mode = meson8b_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
};
static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
.set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_g12a_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = true,
};
static const struct of_device_id meson8b_dwmac_match[] = {
@@ -478,7 +529,7 @@ static const struct of_device_id meson8b_dwmac_match[] = {
},
{
.compatible = "amlogic,meson-g12a-dwmac",
- .data = &meson_axg_dwmac_data,
+ .data = &meson_g12a_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..6b75cf2603ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -788,12 +805,12 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
/* Make sure the EPHY is properly reseted, as U-Boot may leave
* it at deasserted state, and thus it may fail to reset EMAC.
+ *
+ * This assumes the driver has exclusive access to the EPHY reset.
*/
- reset_control_assert(gmac->rst_ephy);
-
- ret = reset_control_deassert(gmac->rst_ephy);
+ ret = reset_control_reset(gmac->rst_ephy);
if (ret) {
- dev_err(priv->device, "Cannot deassert internal phy\n");
+ dev_err(priv->device, "Cannot reset internal PHY\n");
clk_disable_unprepare(gmac->ephy_clk);
return ret;
}
@@ -803,15 +820,14 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
return 0;
}
-static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+static void sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
{
if (!gmac->internal_phy_powered)
- return 0;
+ return;
clk_disable_unprepare(gmac->ephy_clk);
reset_control_assert(gmac->rst_ephy);
gmac->internal_phy_powered = false;
- return 0;
}
/* MDIO multiplexing switch function
@@ -831,7 +847,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +854,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +867,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +897,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +926,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +944,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +962,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +983,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +997,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1003,17 +1018,8 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
- if (gmac->internal_phy_powered)
- sun8i_dwmac_unpower_internal_phy(gmac);
- }
-
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
+ if (gmac->variant->soc_has_internal_phy)
+ sun8i_dwmac_unpower_internal_phy(gmac);
clk_disable_unprepare(gmac->tx_clk);
@@ -1049,16 +1055,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1135,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1198,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1215,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1224,13 +1229,14 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
+
/* The mux must be registered after parent MDIO
* so after stmmac_dvr_probe()
*/
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1245,52 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
- return ret;
+ return 0;
+
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
+}
+
+static void sun8i_dwmac_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ sun8i_dwmac_exit(pdev, gmac);
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1312,8 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
+ .shutdown = sun8i_dwmac_shutdown,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
new file mode 100644
index 000000000000..d23be45a64e5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Toshiba Visconti Ethernet Support
+ *
+ * (C) Copyright 2020 TOSHIBA CORPORATION
+ * (C) Copyright 2020 Toshiba Electronic Devices & Storage Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+#define REG_ETHER_CONTROL 0x52D4
+#define ETHER_ETH_CONTROL_RESET BIT(17)
+
+#define REG_ETHER_CLOCK_SEL 0x52D0
+#define ETHER_CLK_SEL_TX_CLK_EN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EN BIT(1)
+#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
+#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
+#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
+#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
+#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
+#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
+#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
+
+#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
+
+#define ETHER_CONFIG_INTF_MII 0
+#define ETHER_CONFIG_INTF_RGMII BIT(0)
+#define ETHER_CONFIG_INTF_RMII BIT(2)
+
+struct visconti_eth {
+ void __iomem *reg;
+ u32 phy_intf_sel;
+ struct clk *phy_ref_clk;
+ spinlock_t lock; /* lock to protect register update */
+};
+
+static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct visconti_eth *dwmac = priv;
+ unsigned int val, clk_sel_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwmac->lock, flags);
+
+ /* adjust link */
+ val = readl(dwmac->reg + MAC_CTRL_REG);
+ val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+
+ switch (speed) {
+ case SPEED_1000:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+ case SPEED_100:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
+ val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
+ break;
+ case SPEED_10:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
+ val |= GMAC_CONFIG_PS;
+ break;
+ default:
+ /* No bit control */
+ break;
+ }
+
+ writel(val, dwmac->reg + MAC_CTRL_REG);
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ switch (dwmac->phy_intf_sel) {
+ case ETHER_CONFIG_INTF_RGMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ break;
+ case ETHER_CONFIG_INTF_RMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ break;
+ case ETHER_CONFIG_INTF_MII:
+ default:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_EN;
+ break;
+ }
+
+ /* Start clock */
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+}
+
+static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ unsigned int reg_val, clk_sel_val;
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ reg_val = dwmac->phy_intf_sel;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ /* Enable TX/RX clock */
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ writel(clk_sel_val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ writel((clk_sel_val | ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN),
+ dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* release internal-reset */
+ reg_val |= ETHER_ETH_CONTROL_RESET;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ return 0;
+}
+
+static int visconti_eth_clock_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ int err;
+
+ dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(dwmac->phy_ref_clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ return PTR_ERR(dwmac->phy_ref_clk);
+ }
+
+ err = clk_prepare_enable(dwmac->phy_ref_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int visconti_eth_clock_remove(struct platform_device *pdev)
+{
+ struct visconti_eth *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(dwmac->phy_ref_clk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+static int visconti_eth_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct visconti_eth *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto remove_config;
+ }
+
+ spin_lock_init(&dwmac->lock);
+ dwmac->reg = stmmac_res.addr;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+
+ ret = visconti_eth_clock_probe(pdev, plat_dat);
+ if (ret)
+ goto remove_config;
+
+ visconti_eth_init_hw(pdev, plat_dat);
+
+ plat_dat->dma_cfg->aal = 1;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto remove;
+
+ return ret;
+
+remove:
+ visconti_eth_clock_remove(pdev);
+remove_config:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int visconti_eth_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = stmmac_pltfr_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = visconti_eth_clock_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove clock: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
+}
+
+static const struct of_device_id visconti_eth_dwmac_match[] = {
+ { .compatible = "toshiba,visconti-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match);
+
+static struct platform_driver visconti_eth_dwmac_driver = {
+ .probe = visconti_eth_dwmac_probe,
+ .remove = visconti_eth_dwmac_remove,
+ .driver = {
+ .name = "visconti-eth-dwmac",
+ .of_match_table = visconti_eth_dwmac_match,
+ },
+};
+module_platform_driver(visconti_eth_dwmac_driver);
+
+MODULE_AUTHOR("Toshiba");
+MODULE_DESCRIPTION("Toshiba Visconti Ethernet DWMAC glue driver");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 03e79a677c8b..8f7ac24545ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate)
{
- u32 speed, total_offset, offset, ctrl, ctr_low;
- u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
- u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
int i, ret = 0x0;
- u64 total_ctr;
-
- if (extcfg & GMAC_CONFIG_EIPG_EN) {
- offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
- offset = 104 + (offset * 8);
- } else {
- offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
- offset = 96 - (offset * 8);
- }
-
- speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
- speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
- switch (speed) {
- case 0x0:
- offset = offset * 1000; /* 1G */
- break;
- case 0x1:
- offset = offset * 400; /* 2.5G */
- break;
- case 0x2:
- offset = offset * 100000; /* 10M */
- break;
- case 0x3:
- offset = offset * 10000; /* 100M */
- break;
- default:
- return -EINVAL;
- }
-
- offset = offset / 1000;
+ u32 ctrl;
ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
if (ret)
return ret;
- total_offset = 0;
for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
if (ret)
return ret;
-
- total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
- total_ctr += total_offset;
-
- ctr_low = do_div(total_ctr, 1000000000);
-
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
- if (ret)
- return ret;
-
ctrl = readl(ioaddr + MTL_EST_CONTROL);
ctrl &= ~PTOV;
ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9e54f953634b..c5642985ef95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -268,6 +268,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
+ if (priv->plat->pdev) {
+ strlcpy(info->bus_info, pci_name(priv->plat->pdev),
+ sizeof(info->bus_info));
+ }
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5b1c12ff98c0..26b971cd4da5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->rx_napi);
+ __napi_schedule(&ch->rx_napi);
}
}
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->tx_napi);
+ __napi_schedule(&ch->tx_napi);
}
}
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = new_mtu;
+ dev->mtu = mtu;
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f5bed4d26e80..44bb133c3000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -316,6 +316,32 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
+ /* Port Transmit Rate and Speed Divider */
+ switch (priv->speed) {
+ case SPEED_10000:
+ ptr = 32;
+ speed_div = 10000000;
+ break;
+ case SPEED_5000:
+ ptr = 32;
+ speed_div = 5000000;
+ break;
+ case SPEED_2500:
+ ptr = 8;
+ speed_div = 2500000;
+ break;
+ case SPEED_1000:
+ ptr = 8;
+ speed_div = 1000000;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ speed_div = 100000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
@@ -324,12 +350,13 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
- }
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
- /* Port Transmit Rate and Speed Divider */
- ptr = (priv->speed == SPEED_100) ? 4 : 8;
- speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
/* Final adjustments for HW */
value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
@@ -599,7 +626,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time;
+ struct timespec64 time, current_time;
+ ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -694,7 +722,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
}
/* Adjust for real system time */
- time = ktime_to_timespec64(qopt->base_time);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ if (ktime_after(qopt->base_time, current_time_ns)) {
+ time = ktime_to_timespec64(qopt->base_time);
+ } else {
+ ktime_t base_time;
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+ qopt->cycle_time);
+ base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index 8c4195a9a2cc..589797bad1f9 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -634,7 +634,7 @@ err_out:
void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
{
- desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+ desc_ops->alloc_channels_and_rings = xlgmac_alloc_channels_and_rings;
desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
desc_ops->map_tx_skb = xlgmac_map_tx_skb;
desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 26aa7f32151f..26d178f8616b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -654,7 +654,7 @@ static int xlgmac_open(struct net_device *netdev)
pdata->rx_buf_size = ret;
/* Allocate the channels and rings */
- ret = desc_ops->alloc_channles_and_rings(pdata);
+ ret = desc_ops->alloc_channels_and_rings(pdata);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index cab3e40a86b9..8598aaf3ec99 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -379,7 +379,7 @@ struct xlgmac_channel {
} ____cacheline_aligned;
struct xlgmac_desc_ops {
- int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+ int (*alloc_channels_and_rings)(struct xlgmac_pdata *pdata);
void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
int (*map_tx_skb)(struct xlgmac_channel *channel,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index abfc4c435d59..affcf92cd3aa 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -92,6 +92,7 @@ config TI_CPTS
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select NET_DEVLINK
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
@@ -105,6 +106,15 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPSW_SWITCHDEV
+ bool "TI K3 AM654x/J721E CPSW Switch mode support"
+ depends on TI_K3_AM65_CPSW_NUSS
+ depends on NET_SWITCHDEV
+ help
+ This enables switchdev support for TI K3 CPSWxG Ethernet
+ Switch. Enable this driver to support hardware switch support for AM65
+ CPSW NUSS driver.
+
config TI_K3_AM65_CPTS
tristate "TI K3 AM65x CPTS"
depends on ARCH_K3 && OF
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 6e779292545d..75f761efbea7 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -26,4 +26,5 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 766e8866bbef..638d7b03be4b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -31,6 +31,7 @@
#include "cpsw_ale.h"
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
#include "k3-cppi-desc-pool.h"
#include "am65-cpts.h"
@@ -228,6 +229,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
u32 port_mask, unreg_mcast = 0;
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -255,6 +259,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -277,6 +284,11 @@ static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
{
struct am65_cpsw_common *common = port->common;
+ if (promisc && !common->is_emac_mode) {
+ dev_dbg(common->dev, "promisc mode requested in switch mode");
+ return;
+ }
+
if (promisc) {
/* Enable promiscuous mode */
cpsw_ale_control_set(common->ale, port->port_id,
@@ -366,8 +378,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_err(dev, "Failed to map rx skb buffer\n");
return -EINVAL;
@@ -375,6 +388,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -406,6 +420,11 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
}
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
netdev_features_t features)
{
@@ -452,9 +471,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
- cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
- ALE_PORT_NOLEARN, 1);
/* switch to vlan unaware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
@@ -468,6 +484,11 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
port_mask, port_mask,
port_mask & ~ALE_PORT_HOST);
+ if (common->is_emac_mode)
+ am65_cpsw_init_host_port_emac(common);
+ else
+ am65_cpsw_init_host_port_switch(common);
+
for (i = 0; i < common->rx_chns.descs_num; i++) {
skb = __netdev_alloc_skb_ip_align(NULL,
AM65_CPSW_MAX_PACKET_SIZE,
@@ -596,7 +617,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- u32 port_mask;
int ret, i;
ret = pm_runtime_get_sync(common->dev);
@@ -629,19 +649,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
- if (port->slave.mac_only) {
- /* enable mac-only mode on port */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_MACONLY, 1);
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_NOLEARN, 1);
- }
-
- port_mask = BIT(port->port_id) | ALE_PORT_HOST;
- cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
- HOST_PORT_NUM, ALE_SECURE, 0);
- cpsw_ale_add_mcast(common->ale, ndev->broadcast,
- port_mask, 0, 0, ALE_MCAST_FWD_2);
+ if (common->is_emac_mode)
+ am65_cpsw_init_port_emac_ale(port);
+ else
+ am65_cpsw_init_port_switch_ale(port);
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
@@ -691,8 +702,9 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_kfree_skb_any(skb);
@@ -779,6 +791,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
@@ -793,18 +806,19 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
- dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
if (new_skb) {
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&common->napi_rx, skb);
- ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -864,7 +878,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct device *dev,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
@@ -875,20 +888,23 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
next_desc = first_desc;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_single(dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
while (next_desc_dma) {
next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
next_desc_dma);
cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_page(dev, buf_dma, buf_dma_len,
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
@@ -906,7 +922,7 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_kfree_skb_any(skb);
}
@@ -926,7 +942,7 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
ndev = skb->dev;
@@ -1119,9 +1135,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
netif_txq = netdev_get_tx_queue(ndev, q_idx);
/* Map the linear buffer */
- buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb buffer\n");
ndev->stats.tx_errors++;
goto err_free_skb;
@@ -1130,7 +1146,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
dev_dbg(dev, "Failed to allocate descriptor\n");
- dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
goto busy_stop_q;
}
@@ -1140,6 +1157,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pkttype(first_desc, 0x7);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
@@ -1175,9 +1193,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
- buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ndev->stats.tx_errors++;
@@ -1185,11 +1203,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
}
cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(next_desc,
buf_dma, frag_size, buf_dma, frag_size);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
pkt_len += frag_size;
@@ -1237,14 +1257,14 @@ done_tx:
return NETDEV_TX_OK;
err_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
err_free_skb:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
busy_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
busy_stop_q:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
@@ -1441,6 +1461,13 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
+static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return &port->devlink_port;
+}
+
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1454,6 +1481,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
+ .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1545,16 +1573,6 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->tx_chn_name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- dev_err(dev, "Failed to create poll %d\n", ret);
- goto err;
- }
tx_chn->tx_chn =
k3_udma_glue_request_tx_chn(dev,
@@ -1565,6 +1583,17 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
"Failed to request tx dma channel\n");
goto err;
}
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq <= 0) {
@@ -1622,14 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
/* init all flows */
rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num;
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
- rx_chn->descs_num,
- hdesc_size, "rx");
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- dev_err(dev, "Failed to create rx poll %d\n", ret);
- goto err;
- }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -1637,6 +1658,16 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
"Failed to request rx dma channel\n");
goto err;
}
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
@@ -2018,6 +2049,441 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
}
}
+static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
+{
+ int set_val = 0;
+ int i;
+
+ if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
+ set_val = 1;
+
+ dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 1; i <= common->port_num; i++) {
+ struct am65_cpsw_port *port = am65_common_get_port(common, i);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+bool am65_cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ return !common->is_emac_mode;
+ }
+
+ return false;
+}
+
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ if (!common->br_members) {
+ common->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (common->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ common->br_members |= BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ return NOTIFY_DONE;
+}
+
+static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ common->br_members &= ~BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ if (!common->br_members)
+ common->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int am65_cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
+ else
+ am65_cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return 0;
+
+ cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
+ ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = am65_cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return;
+
+ am65_cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops am65_cpsw_devlink_ops = {};
+
+static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
+{
+ cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ am65_cpsw_init_stp_ale_entry(common);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
+
+ /* learning make no sense in multi-mac mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *common = dl_priv->common;
+
+ dev_dbg(common->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !common->is_emac_mode;
+
+ return 0;
+}
+
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *common = port->common;
+ u32 port_mask;
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ if (slave->mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
+}
+
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *cpsw = port->common;
+ u32 port_mask;
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_NOLEARN, 0);
+
+ cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
+ slave->port_vlan);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_MACONLY, 0);
+}
+
+static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *cpsw = dl_priv->common;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->is_emac_mode)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ cpsw->is_emac_mode = !switch_en;
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+
+ if (!sl_ndev)
+ continue;
+
+ slave = am65_ndev_to_slave(sl_ndev);
+ if (switch_en)
+ slave->port_vlan = cpsw->default_vlan;
+ else
+ slave->port_vlan = 0;
+ }
+
+ goto exit;
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ am65_cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ slave = am65_ndev_to_slave(sl_ndev);
+ slave->port_vlan = cpsw->default_vlan;
+
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_switch_ale(port);
+ }
+
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ am65_cpsw_init_host_port_emac(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ port->slave.port_vlan = 0;
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_emac_ale(port);
+ }
+ }
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static const struct devlink_param am65_cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ am65_cpsw_dl_switch_mode_get,
+ am65_cpsw_dl_switch_mode_set, NULL),
+};
+
+static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
+{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
+ }
+}
+
+static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
+{
+ struct devlink_port_attrs attrs = {};
+ struct am65_cpsw_devlink *dl_priv;
+ struct device *dev = common->dev;
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int ret = 0;
+ int i;
+
+ common->devlink =
+ devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!common->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(common->devlink);
+ dl_priv->common = common;
+
+ ret = devlink_register(common->devlink, dev);
+ if (ret) {
+ dev_err(dev, "devlink reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ /* Provide devlink hook to switch mode when multiple external ports
+ * are present NUSS switchdev driver is enabled.
+ */
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ ret = devlink_params_register(common->devlink,
+ am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "devlink params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+ devlink_params_publish(common->devlink);
+ }
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->port_id;
+ attrs.switch_id.id_len = sizeof(resource_size_t);
+ memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
+ devlink_port_attrs_set(dl_port, &attrs);
+
+ ret = devlink_port_register(common->devlink, dl_port, port->port_id);
+ if (ret) {
+ dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
+ port->port_id, ret);
+ goto dl_port_unreg;
+ }
+ devlink_port_type_eth_set(dl_port, port->ndev);
+ }
+
+ return ret;
+
+dl_port_unreg:
+ am65_cpsw_unregister_devlink_ports(common);
+dl_unreg:
+ devlink_unregister(common->devlink);
+dl_free:
+ devlink_free(common->devlink);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+{
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ devlink_params_unpublish(common->devlink);
+ devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ }
+
+ am65_cpsw_unregister_devlink_ports(common);
+ devlink_unregister(common->devlink);
+ devlink_free(common->devlink);
+}
+
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
@@ -2051,14 +2517,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
}
}
+ ret = am65_cpsw_register_notifiers(common);
+ if (ret)
+ goto err_cleanup_ndev;
+
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ goto clean_unregister_notifiers;
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
- return 0;
+ return 0;
+clean_unregister_notifiers:
+ am65_cpsw_unregister_notifiers(common);
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
+
return ret;
}
@@ -2102,9 +2578,16 @@ static const struct am65_cpsw_pdata j721e_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2131,6 +2614,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ u64 id_temp;
int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
@@ -2150,6 +2634,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (IS_ERR(common->ss_base))
return PTR_ERR(common->ss_base);
common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+ /* Use device's physical base address as switch id */
+ id_temp = cpu_to_be64(res->start);
+ memcpy(common->switch_id, &id_temp, sizeof(res->start));
node = of_get_child_by_name(dev->of_node, "ethernet-ports");
if (!node)
@@ -2163,12 +2650,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
-
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "error setting dma mask: %d\n", ret);
- return ret;
- }
+ common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports),
@@ -2248,6 +2730,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common);
+ common->is_emac_mode = true;
+
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
goto err_of_clear;
@@ -2281,6 +2765,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_unregister_devlink(common);
+ am65_cpsw_unregister_notifiers(common);
+
/* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev)
*/
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 02aed4c0ceba..5d93e346f05e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -6,12 +6,14 @@
#ifndef AM65_CPSW_NUSS_H_
#define AM65_CPSW_NUSS_H_
+#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
+#include <net/devlink.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -22,6 +24,8 @@ struct am65_cpts;
#define AM65_CPSW_MAX_RX_QUEUES 1
#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
+
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
@@ -32,6 +36,7 @@ struct am65_cpsw_slave_data {
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
+ int port_vlan;
};
struct am65_cpsw_port {
@@ -47,6 +52,7 @@ struct am65_cpsw_port {
bool tx_ts_enabled;
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
+ struct devlink_port devlink_port;
};
struct am65_cpsw_host {
@@ -56,6 +62,7 @@ struct am65_cpsw_host {
};
struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
struct napi_struct napi_tx;
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
@@ -69,6 +76,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_rx_chn {
struct device *dev;
+ struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
@@ -83,6 +91,15 @@ struct am65_cpsw_pdata {
const char *ale_dev_id;
};
+enum cpsw_devlink_param_id {
+ AM65_CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ AM65_CPSW_DL_PARAM_SWITCH_MODE,
+};
+
+struct am65_cpsw_devlink {
+ struct am65_cpsw_common *common;
+};
+
struct am65_cpsw_common {
struct device *dev;
struct device *mdio_dev;
@@ -115,6 +132,14 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
+
+ bool is_emac_mode;
+ u16 br_members;
+ int default_vlan;
+ struct devlink *devlink;
+ struct net_device *hw_bridge_dev;
+ struct notifier_block am65_cpsw_netdevice_nb;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
};
struct am65_cpsw_ndev_stats {
@@ -129,6 +154,7 @@ struct am65_cpsw_ndev_priv {
u32 msg_enable;
struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats;
+ bool offload_fwd_mark;
};
#define am65_ndev_to_priv(ndev) \
@@ -156,4 +182,6 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+bool am65_cpsw_port_dev_check(const struct net_device *dev);
+
#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 3bdd4dbcd2ff..ebcc6386cc34 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -356,7 +356,7 @@ static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
-/**
+/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
new file mode 100644
index 000000000000..d93ffd8a08b0
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments K3 AM65 Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
+#include "cpsw_ale.h"
+
+struct am65_cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct am65_cpsw_port *port;
+ unsigned long event;
+};
+
+static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ u8 cpsw_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_STATE, cpsw_state);
+ netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags flags)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
+
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, port->port_id);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
+ unreg_mcast_add);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ if (port->port_id)
+ pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (port->port_id)
+ writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+}
+
+static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(port->port_id);
+ flags = port->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ netdev_err(port->ndev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
+ if (!pvid)
+ return ret;
+
+ am65_cpsw_set_pvid(port, vid, 0, 0);
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == am65_cpsw_get_pvid(port))
+ am65_cpsw_set_pvid(port, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
+ ALE_VLAN, vid);
+ netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
+ port->ndev->name, vlan->vid, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
+}
+
+static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
+}
+
+static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int err;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int del_mask;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(port->port_id);
+
+ /* Ignore error as error code is returned only when entry is already removed */
+ cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return 0;
+}
+
+static int am65_cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_add(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_add(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int am65_cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_del(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_del(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void am65_cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct am65_cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct am65_cpsw_switchdev_event_work, work);
+ struct am65_cpsw_port *port = switchdev_work->port;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_id = port->port_id;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ am65_cpsw_fdb_offload_notify(port->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int am65_cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct am65_cpsw_switchdev_event_work *switchdev_work;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
+ switchdev_work->port = port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = am65_cpsw_switchdev_event,
+};
+
+static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = am65_cpsw_switchdev_blocking_event,
+};
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.h b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
new file mode 100644
index 000000000000..a67a7606bc80
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+ skb->offload_fwd_mark = val;
+}
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw);
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw);
+#else
+static inline int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+}
+
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+}
+
+#endif
+
+#endif /* DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 5dc60ecabe56..9caaae79fc95 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -727,7 +727,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
/**
* am65_cpts_rx_enable - enable rx timestamping
* @cpts: cpts handle
- * @skb: packet
+ * @en: enable
*
* This functions enables rx packets timestamping. The CPTS can timestamp all
* rx packets.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b0f00b4edd94..fd966567464c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -392,29 +392,21 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
port = priv->emac_port + cpsw->data.dual_emac;
- ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cdc308a2aa3e..d828f856237a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1256,6 +1256,13 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.major_ver_mask = 0x7,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
{ },
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2f5e0ad23ad7..58a64313ac00 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -335,28 +335,20 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
- ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 99f44563e10f..bb59e768915e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1323,7 +1323,7 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
}
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port)
+ struct page *page, int port, int *len)
{
struct cpsw_common *cpsw = priv->cpsw;
struct net_device *ndev = priv->ndev;
@@ -1341,10 +1341,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
}
act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
switch (act) {
case XDP_PASS:
ret = CPSW_XDP_PASS;
- break;
+ goto out;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -1370,8 +1373,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
trace_xdp_exception(ndev, prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
goto drop;
}
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
out:
rcu_read_unlock();
return ret;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 7b7f3596b20d..a323bea54faa 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -438,7 +438,7 @@ int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
struct page *page, int port);
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port);
+ struct page *page, int port, int *len);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 29747da5c514..a72bb570756f 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -24,16 +24,12 @@ struct cpsw_switchdev_event_work {
unsigned long event;
};
-static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans, u8 state)
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
{
struct cpsw_common *cpsw = priv->cpsw;
u8 cpsw_state;
int ret = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
switch (state) {
case BR_STATE_FORWARDING:
cpsw_state = ALE_PORT_STATE_FORWARD;
@@ -60,32 +56,31 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct cpsw_common *cpsw = priv->cpsw;
- bool unreg_mcast_add = false;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
- if (brport_flags & BR_MCAST_FLOOD)
- unreg_mcast_add = true;
- dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
- unreg_mcast_add, priv->emac_port);
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
- cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
- unreg_mcast_add);
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+ }
return 0;
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
- if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
@@ -93,7 +88,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
static int cpsw_port_attr_set(struct net_device *ndev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
@@ -102,15 +97,15 @@ static int cpsw_port_attr_set(struct net_device *ndev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ ret = cpsw_port_attr_br_flags_pre_set(ndev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ ret = cpsw_port_stp_state_set(priv, attr->u.stp_state);
dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ ret = cpsw_port_attr_br_flags_set(priv, attr->orig_dev,
attr->u.brport_flags);
break;
default:
@@ -253,56 +248,24 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
}
static int cpsw_port_vlans_add(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
- priv->ndev->name, vlan->vid_begin, vlan->flags);
+ priv->ndev->name, vlan->vid, vlan->flags);
if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
return 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int cpsw_port_vlans_del(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan)
-
-{
- struct net_device *orig_dev = vlan->obj.orig_dev;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_del(priv, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
+ return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
static int cpsw_port_mdb_add(struct cpsw_priv *priv,
- struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -311,9 +274,6 @@ static int cpsw_port_mdb_add(struct cpsw_priv *priv,
int port_mask;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
@@ -352,7 +312,6 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
static int cpsw_port_obj_add(struct net_device *ndev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
@@ -365,11 +324,11 @@ static int cpsw_port_obj_add(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_add(priv, vlan, trans);
+ err = cpsw_port_vlans_add(priv, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
- err = cpsw_port_mdb_add(priv, mdb, trans);
+ err = cpsw_port_mdb_add(priv, mdb);
break;
default:
err = -EOPNOTSUPP;
@@ -392,7 +351,7 @@ static int cpsw_port_obj_del(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_del(priv, vlan);
+ err = cpsw_port_vlan_del(priv, vlan->vid, vlan->obj.orig_dev);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cfff3d48807a..a4efd5e35158 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -358,20 +358,16 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- const struct of_device_id *of_id;
+ const struct davinci_mdio_of_param *of_mdio_data;
ret = davinci_mdio_probe_dt(&data->pdata, pdev);
if (ret)
return ret;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
- of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
- if (of_id) {
- const struct davinci_mdio_of_param *of_mdio_data;
-
- of_mdio_data = of_id->data;
- if (of_mdio_data)
- autosuspend_delay_ms =
+ of_mdio_data = of_device_get_match_data(&pdev->dev);
+ if (of_mdio_data) {
+ autosuspend_delay_ms =
of_mdio_data->autosuspend_delay_ms;
}
} else {
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3d1fc8d2ca66..55e652624bd7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1100,7 +1100,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
return packets_done;
}
-/**
+/*
* gelic_card_interrupt - event handler for gelic_net
*/
static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
@@ -1400,6 +1400,7 @@ out:
/**
* gelic_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
@@ -1431,6 +1432,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
/**
* gelic_ether_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
+ * @napi: napi structure
*
* fills out function pointers in the net_device structure
*/
@@ -1632,7 +1634,7 @@ static void gelic_card_get_vlan_info(struct gelic_card *card)
dev_info(ctodev(card), "internal vlan %s\n",
card->vlan_required? "enabled" : "disabled");
}
-/**
+/*
* ps3_gelic_driver_probe - add a device to the control of this driver
*/
static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
@@ -1787,7 +1789,7 @@ fail_open:
return result;
}
-/**
+/*
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5f5b33e6653b..d5a75ef7e3ca 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -254,7 +254,7 @@ spider_net_set_promisc(struct spider_net_card *card)
/**
* spider_net_get_descr_status -- returns the status of a descriptor
- * @descr: descriptor to look at
+ * @hwdescr: descriptor to look at
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
@@ -542,6 +542,7 @@ error:
/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
+ * @netdev: interface device structure
* @addr: multicast address
*
* returns the hash value.
@@ -890,7 +891,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
+ * @t: timer context used to obtain the pointer to net card data structure
*
* spider_net_cleanup_tx_ring is called by either the tx_timer
* or from the NAPI polling routine.
@@ -1063,6 +1064,7 @@ static void show_rx_chain(struct spider_net_card *card)
/**
* spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ * @card: card structure
*
* If the driver fails to keep up and empty the queue, then the
* hardware wil run out of room to put incoming packets. This
@@ -1220,7 +1222,7 @@ bad_desc:
/**
* spider_net_poll - NAPI poll function called by the stack to return packets
- * @netdev: interface device structure
+ * @napi: napi device structure
* @budget: number of packets we can pass to the stack at most
*
* returns 0 if no more packets available to the driver/stack. Returns 1,
@@ -1268,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
- * @ptr: pointer to new MAC address
+ * @p: pointer to new MAC address
*
* Returns 0 on success, <0 on failure. Currently, we don't support this
* and will always return EOPNOTSUPP.
@@ -1340,6 +1342,8 @@ spider_net_link_reset(struct net_device *netdev)
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
+ * @error_reg1: interrupt status register 1 (GHIINT1STS)
+ * @error_reg2: interrupt status register 2 (GHIINT2STS)
*
* spider_net_handle_error_irq treats or ignores all error conditions
* found when an interrupt is presented
@@ -1961,8 +1965,7 @@ init_firmware_failed:
/**
* spider_net_link_phy
- * @data: used for pointer to card structure
- *
+ * @t: timer context used to obtain the pointer to net card data structure
*/
static void spider_net_link_phy(struct timer_list *t)
{
@@ -2140,7 +2143,7 @@ spider_net_stop(struct net_device *netdev)
/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
- * @data: data, is interface device structure
+ * @work: work context used to obtain the pointer to net card data structure
*
* called as task when tx hangs, resets interface (if interface is up)
*/
@@ -2174,6 +2177,7 @@ out:
/**
* spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 3b2137d1f4c6..c6eb7f2368aa 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -18,7 +18,6 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || COMPILE_TEST
select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index a03c3ca1b28d..1e966a39967e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -339,6 +339,10 @@
#define DELAY_OF_ONE_MILLISEC 1000
+/* Xilinx PCS/PMA PHY register for switching 1000BaseX or SGMII */
+#define XLNX_MII_STD_SELECT_REG 0x11
+#define XLNX_MII_STD_SELECT_SGMII BIT(0)
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -377,22 +381,29 @@ struct axidma_bd {
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
+ * @phylink: Pointer to phylink instance
+ * @phylink_config: phylink configuration settings
+ * @pcs_phy: Reference to PCS/PMA PHY if used
+ * @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
+ * @clk: Clock for AXI bus
* @mii_bus: Pointer to MII bus structure
* @mii_clk_div: MII bus clock divider value
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
- * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
+ * @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
+ * @eth_irq: Ethernet core IRQ number
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
- * @last_link: Phy link state in which the PHY was negotiated earlier
* @features: Stores the extended features supported by the axienet hw
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @tx_bd_num: Size of TX buffer descriptor ring
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @rx_bd_num: Size of RX buffer descriptor ring
* @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
* accessed currently. Used while alloc. BDs before a TX starts
* @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
@@ -414,23 +425,20 @@ struct axienet_local {
struct net_device *ndev;
struct device *dev;
- /* Connection to PHY device */
struct device_node *phy_node;
struct phylink *phylink;
struct phylink_config phylink_config;
- /* Reference to PCS/PMA PHY if used */
struct mdio_device *pcs_phy;
- /* Clock for AXI bus */
+ bool switch_x_sgmii;
+
struct clk *clk;
- /* MDIO bus data */
- struct mii_bus *mii_bus; /* MII bus reference */
- u8 mii_clk_div; /* MII bus clock divider value */
+ struct mii_bus *mii_bus;
+ u8 mii_clk_div;
- /* IO registers, dma functions and IRQs */
resource_size_t regs_start;
void __iomem *regs;
void __iomem *dma_regs;
@@ -442,10 +450,9 @@ struct axienet_local {
int eth_irq;
phy_interface_t phy_mode;
- u32 options; /* Current options word */
+ u32 options;
u32 features;
- /* Buffer descriptors */
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 6fea980acf64..3a8775e0ca55 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1469,6 +1469,13 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(lp->phylink, cmd);
}
+static int axienet_ethtools_nway_reset(struct net_device *dev)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(lp->phylink);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
@@ -1483,6 +1490,7 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.set_coalesce = axienet_ethtools_set_coalesce,
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
+ .nway_reset = axienet_ethtools_nway_reset,
};
static void axienet_validate(struct phylink_config *config,
@@ -1494,13 +1502,22 @@ static void axienet_validate(struct phylink_config *config,
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* Only support the mode we are configured for */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (lp->switch_x_sgmii)
+ break;
+ fallthrough;
+ default:
+ if (state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
}
phylink_set(mask, Autoneg);
@@ -1560,6 +1577,33 @@ static void axienet_mac_an_restart(struct phylink_config *config)
phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
}
+static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (!lp->switch_x_sgmii)
+ return 0;
+
+ ret = mdiobus_write(lp->pcs_phy->bus,
+ lp->pcs_phy->addr,
+ XLNX_MII_STD_SELECT_REG,
+ iface == PHY_INTERFACE_MODE_SGMII ?
+ XLNX_MII_STD_SELECT_SGMII : 0);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ ret);
+ return ret;
+ default:
+ return 0;
+ }
+}
+
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -1637,6 +1681,7 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
+ .mac_prepare = axienet_mac_prepare,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1817,6 +1862,18 @@ static int axienet_probe(struct platform_device *pdev)
lp->options = XAE_OPTION_DEFAULTS;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+
+ lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ ret = PTR_ERR(lp->clk);
+ goto free_netdev;
+ }
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
+ goto free_netdev;
+ }
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
@@ -1876,6 +1933,9 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,switch-x-sgmii");
+
/* Start with the proprietary, and broken phy_type */
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
if (!ret) {
@@ -1905,6 +1965,12 @@ static int axienet_probe(struct platform_device *pdev)
if (ret)
goto free_netdev;
}
+ if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
+ dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
+ ret = -EINVAL;
+ goto free_netdev;
+ }
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
@@ -1992,20 +2058,6 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
- lp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lp->clk)) {
- dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
- PTR_ERR(lp->clk));
- lp->clk = NULL;
- } else {
- ret = clk_prepare_enable(lp->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock: %d\n",
- ret);
- goto free_netdev;
- }
- }
-
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 008b9a40faad..007840d4a807 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1193,8 +1193,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
- (unsigned int __force)ndev->mem_start,
+ "Xilinx EmacLite at 0x%08lX mapped to 0x%08lX, irq=%d\n",
+ (unsigned long __force)ndev->mem_start,
(unsigned long __force)lp->base_addr, ndev->irq);
return 0;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 2e5202923510..0152f1e70783 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -247,7 +247,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
}
#endif
-static spinlock_t mdio_lock;
+static DEFINE_SPINLOCK(mdio_lock);
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
static int ports_open;
@@ -528,7 +528,6 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
- spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5523f069b9a5..4ac0373326ef 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1197,11 +1197,12 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
/* MTU range: 68 - (something less than 65535) */
@@ -1851,16 +1852,10 @@ static int geneve_netdevice_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- } else if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, false);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, true);
- }
+ if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ geneve_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ geneve_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4c04e271f184..39c00f050fbd 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -189,8 +189,10 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
- !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
- return -1;
+ !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
+ pctx->dev->stats.rx_length_errors++;
+ goto err;
+ }
netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
@@ -206,6 +208,10 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
netif_rx(skb);
return 0;
+
+err:
+ pctx->dev->stats.rx_dropped++;
+ return -1;
}
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
@@ -515,8 +521,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
goto err_rt;
}
- skb_dst_drop(skb);
-
/* This is similar to tnl_update_pmtu(). */
df = iph->frag_off;
if (df) {
@@ -539,7 +543,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
@@ -592,7 +595,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
ip4_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
- true, false);
+ !net_eq(sock_net(pktinfo.pctx->sk),
+ dev_net(dev)),
+ false);
break;
}
@@ -610,13 +615,23 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
};
+static const struct device_type gtp_type = {
+ .name = "gtp",
+};
+
static void gtp_link_setup(struct net_device *dev)
{
+ unsigned int max_gtp_header_len = sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ sizeof(struct gtp0_header);
+
dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &gtp_type);
dev->hard_header_len = 0;
dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
/* Zero header length. */
dev->type = ARPHRD_NONE;
@@ -626,11 +641,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
- /* Assume largest header, ie. GTPv0. */
- dev->needed_headroom = LL_MAX_HEADER +
- sizeof(struct iphdr) +
- sizeof(struct udphdr) +
- sizeof(struct gtp0_header);
+ dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
@@ -727,7 +738,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
static size_t gtp_get_size(const struct net_device *dev)
{
- return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
+ return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
+ nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -736,6 +748,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
+ goto nla_put_failure;
return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2a87cfa27ac0..e1a497d3c9ba 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -105,9 +105,43 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
u32 processor_masks_entry_size;
};
-/* Fwd declaration */
-struct ndis_tcp_ip_checksum_info;
-struct ndis_pkt_8021q_info;
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
+};
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -194,7 +228,8 @@ int netvsc_send(struct net_device *net,
struct sk_buff *skb,
bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp);
+ struct rndis_message *resp,
+ void *data);
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan);
@@ -884,9 +919,10 @@ struct multi_recv_comp {
#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */
struct nvsc_rsc {
- const struct ndis_pkt_8021q_info *vlan;
- const struct ndis_tcp_ip_checksum_info *csum_info;
- const u32 *hash_info;
+ struct ndis_pkt_8021q_info vlan;
+ struct ndis_tcp_ip_checksum_info csum_info;
+ u32 hash_info;
+ u8 ppi_flags; /* valid/present bits for the above PPIs */
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
@@ -894,6 +930,10 @@ struct nvsc_rsc {
u32 len[NVSP_RSC_MAX];
};
+#define NVSC_RSC_VLAN BIT(0) /* valid/present bit for 'vlan' */
+#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
+#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
+
struct netvsc_stats {
u64 packets;
u64 bytes;
@@ -1002,6 +1042,7 @@ struct net_device_context {
struct netvsc_channel {
struct vmbus_channel *channel;
struct netvsc_device *net_device;
+ void *recv_buf; /* buffer to copy packets out from the receive buffer */
const struct vmpacket_descriptor *desc;
struct napi_struct napi;
struct multi_send_data msd;
@@ -1234,18 +1275,6 @@ struct rndis_pktinfo_id {
u16 pkt_id;
};
-struct ndis_pkt_8021q_info {
- union {
- struct {
- u32 pri:3; /* User Priority */
- u32 cfi:1; /* Canonical Format ID */
- u32 vlanid:12; /* VLAN ID */
- u32 reserved:16;
- };
- u32 value;
- };
-};
-
struct ndis_object_header {
u8 type;
u8 revision;
@@ -1436,32 +1465,6 @@ struct ndis_offload_params {
};
};
-struct ndis_tcp_ip_checksum_info {
- union {
- struct {
- u32 is_ipv4:1;
- u32 is_ipv6:1;
- u32 tcp_checksum:1;
- u32 udp_checksum:1;
- u32 ip_header_checksum:1;
- u32 reserved:11;
- u32 tcp_header_offset:10;
- } transmit;
- struct {
- u32 tcp_checksum_failed:1;
- u32 udp_checksum_failed:1;
- u32 ip_checksum_failed:1;
- u32 tcp_checksum_succeeded:1;
- u32 udp_checksum_succeeded:1;
- u32 ip_checksum_succeeded:1;
- u32 loopback:1;
- u32 tcp_checksum_value_invalid:1;
- u32 ip_checksum_value_invalid:1;
- } receive;
- u32 value;
- };
-};
-
struct ndis_tcp_lso_info {
union {
struct {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2350342b961f..c64cc7639c39 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -22,6 +22,7 @@
#include <linux/prefetch.h>
#include <asm/sync_bitops.h>
+#include <asm/mshyperv.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
@@ -37,6 +38,10 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ /* Block sending traffic to VF if it's about to be gone */
+ if (!vf)
+ net_device_ctx->data_path_is_vf = vf;
+
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
if (vf)
@@ -50,8 +55,11 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- VMBUS_RQST_ID_NO_RESPONSE,
- VM_PKT_DATA_INBAND, 0);
+ (unsigned long)init_pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ wait_for_completion(&nv_dev->channel_init_wait);
+ net_device_ctx->data_path_is_vf = vf;
}
/* Worker to setup sub channels on initial setup
@@ -124,6 +132,7 @@ static void free_netvsc_device(struct rcu_head *head)
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
+ kfree(nvdev->chan_table[i].recv_buf);
vfree(nvdev->chan_table[i].mrc.slots);
}
@@ -303,7 +312,7 @@ static int netvsc_init_buf(struct hv_device *device,
struct nvsp_message *init_packet;
unsigned int buf_size;
size_t map_words;
- int ret = 0;
+ int i, ret = 0;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -397,6 +406,16 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
+ if (nvchan->recv_buf == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
/* Setup receive completion ring.
* Add 1 to the recv_section_cnt because at least one entry in a
* ring buffer has to be empty.
@@ -544,7 +563,10 @@ static int negotiate_nvsp_ver(struct hv_device *device,
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
- init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+ if (hv_is_isolation_supported())
+ netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
+ else
+ init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
/* Teaming bit is needed to receive link speed updates */
init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
@@ -591,6 +613,13 @@ static int netvsc_connect_vsp(struct hv_device *device,
goto cleanup;
}
+ if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
+ netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
+ net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
+ ret = -EPROTO;
+ goto cleanup;
+ }
+
pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
/* Send the ndis version */
@@ -754,8 +783,31 @@ static void netvsc_send_completion(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
+ const struct nvsp_message *nvsp_packet;
u32 msglen = hv_pkt_datalen(desc);
+ struct nvsp_message *pkt_rqst;
+ u64 cmd_rqst;
+
+ /* First check if this is a VMBUS completion without data payload */
+ if (!msglen) {
+ cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
+ (u64)desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction id\n");
+ return;
+ }
+
+ pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
+ switch (pkt_rqst->hdr.msg_type) {
+ case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
+ complete(&net_device->channel_init_wait);
+ break;
+
+ default:
+ netdev_err(ndev, "Unexpected VMBUS completion!!\n");
+ }
+ return;
+ }
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
@@ -763,6 +815,7 @@ static void netvsc_send_completion(struct net_device *ndev,
return;
}
+ nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
@@ -887,6 +940,7 @@ static inline int netvsc_send_pkt(
int ret;
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
+ memset(&nvmsg, 0, sizeof(struct nvsp_message));
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
rpkt->channel_type = 0; /* 0 is RMC_DATA */
@@ -1252,6 +1306,19 @@ static int netvsc_receive(struct net_device *ndev,
continue;
}
+ /* We're going to copy (sections of) the packet into nvchan->recv_buf;
+ * make sure that nvchan->recv_buf is large enough to hold the packet.
+ */
+ if (unlikely(buflen > net_device->recv_section_size)) {
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Packet too big: buflen=%u recv_section_size=%u\n",
+ buflen, net_device->recv_section_size);
+
+ continue;
+ }
+
data = recv_buf + offset;
nvchan->rsc.is_last = (i == count - 1);
@@ -1262,8 +1329,11 @@ static int netvsc_receive(struct net_device *ndev,
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
- if (unlikely(ret != NVSP_STAT_SUCCESS))
+ if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
+ }
}
enq_receive_complete(ndev, net_device, q_idx,
@@ -1306,7 +1376,7 @@ static void netvsc_send_table(struct net_device *ndev,
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
- if (offset > msglen - count * sizeof(u32)) {
+ if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
@@ -1357,7 +1427,10 @@ static void netvsc_receive_inband(struct net_device *ndev,
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(ndev, nvmsg, msglen);
+ if (hv_is_isolation_supported())
+ netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
+ else
+ netvsc_send_vf(ndev, nvmsg, msglen);
break;
}
}
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 440486d9c999..aa877da113f8 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -37,6 +37,12 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
if (!prog)
goto out;
+ /* Ensure that the below memcpy() won't overflow the page buffer. */
+ if (len > ndev->mtu + ETH_HLEN) {
+ act = XDP_DROP;
+ goto out;
+ }
+
/* allocate page buffer for data */
page = alloc_page(GFP_ATOMIC);
if (!page) {
@@ -44,12 +50,8 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
goto out;
}
- xdp->data_hard_start = page_address(page);
- xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &nvchan->xdp_rxq;
- xdp->frame_sz = PAGE_SIZE;
+ xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
memcpy(xdp->data, data, len);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f32f28311d57..8176fa0c8b16 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -539,7 +539,8 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
- netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
+ net_device_ctx->data_path_is_vf)
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
@@ -742,7 +743,8 @@ static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
* netvsc_linkstatus_callback - Link up/down notification
*/
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -756,12 +758,24 @@ void netvsc_linkstatus_callback(struct net_device *net,
return;
}
+ /* Copy the RNDIS indicate status into nvchan->recv_buf */
+ memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
+
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
- speed = *(u32 *)((void *)indicate
- + indicate->status_buf_offset) / 10000;
+ /* Validate status_buf_offset */
+ if (indicate->status_buflen < sizeof(speed) ||
+ indicate->status_buf_offset < sizeof(*indicate) ||
+ resp->msg_len - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
+ resp->msg_len - RNDIS_HEADER_SIZE - indicate->status_buf_offset
+ < indicate->status_buflen) {
+ netdev_err(net, "invalid rndis_indicate_status packet\n");
+ return;
+ }
+
+ speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed;
return;
}
@@ -816,10 +830,11 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
- const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
+ const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
- nvchan->rsc.csum_info;
- const u32 *hash_info = nvchan->rsc.hash_info;
+ &nvchan->rsc.csum_info;
+ const u32 *hash_info = &nvchan->rsc.hash_info;
+ u8 ppi_flags = nvchan->rsc.ppi_flags;
struct sk_buff *skb;
void *xbuf = xdp->data_hard_start;
int i;
@@ -863,22 +878,28 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* We compute it here if the flags are set, because on Linux, the IP
* checksum is always checked.
*/
- if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
csum_info->receive.ip_checksum_succeeded &&
- skb->protocol == htons(ETH_P_IP))
+ skb->protocol == htons(ETH_P_IP)) {
+ /* Check that there is enough space to hold the IP header. */
+ if (skb_headlen(skb) < sizeof(struct iphdr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
netvsc_comp_ipcsum(skb);
+ }
/* Do L4 checksum offload if enabled and present. */
- if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (hash_info && (net->features & NETIF_F_RXHASH))
+ if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
- if (vlan) {
+ if (ppi_flags & NVSC_RSC_VLAN) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
@@ -2381,12 +2402,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
* interface, there is only the CHANGE event and no UP or DOWN event.
*/
-static int netvsc_vf_changed(struct net_device *vf_netdev)
+static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
- bool vf_is_up = netif_running(vf_netdev);
+ bool vf_is_up = false;
+
+ if (event != NETDEV_GOING_DOWN)
+ vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
@@ -2399,7 +2423,6 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
- net_device_ctx->data_path_is_vf = vf_is_up;
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
@@ -2716,7 +2739,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
- return netvsc_vf_changed(event_dev);
+ case NETDEV_GOING_DOWN:
+ return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 598713c0d5a8..123cc9d25f5e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -127,70 +127,89 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct net_device *netdev,
- const struct rndis_message *rndis_msg)
+ const struct rndis_message *rndis_msg,
+ const void *data)
{
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
- "data offset %u data len %u, # oob %u, "
- "oob offset %u, oob len %u, pkt offset %u, "
- "pkt len %u\n",
- rndis_msg->msg_len,
- rndis_msg->msg.pkt.data_offset,
- rndis_msg->msg.pkt.data_len,
- rndis_msg->msg.pkt.num_oob_data_elements,
- rndis_msg->msg.pkt.oob_data_offset,
- rndis_msg->msg.pkt.oob_data_len,
- rndis_msg->msg.pkt.per_pkt_info_offset,
- rndis_msg->msg.pkt.per_pkt_info_len);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
+ const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
+ "data offset %u data len %u, # oob %u, "
+ "oob offset %u, oob len %u, pkt offset %u, "
+ "pkt len %u\n",
+ rndis_msg->msg_len,
+ pkt->data_offset,
+ pkt->data_len,
+ pkt->num_oob_data_elements,
+ pkt->oob_data_offset,
+ pkt->oob_data_len,
+ pkt->per_pkt_info_offset,
+ pkt->per_pkt_info_len);
+ }
break;
case RNDIS_MSG_INIT_C:
- netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
- "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
- "device flags %d, max xfer size 0x%x, max pkts %u, "
- "pkt aligned %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.init_complete.req_id,
- rndis_msg->msg.init_complete.status,
- rndis_msg->msg.init_complete.major_ver,
- rndis_msg->msg.init_complete.minor_ver,
- rndis_msg->msg.init_complete.dev_flags,
- rndis_msg->msg.init_complete.max_xfer_size,
- rndis_msg->msg.init_complete.
- max_pkt_per_msg,
- rndis_msg->msg.init_complete.
- pkt_alignment_factor);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_initialize_complete)) {
+ const struct rndis_initialize_complete *init_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
+ "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
+ "device flags %d, max xfer size 0x%x, max pkts %u, "
+ "pkt aligned %u)\n",
+ rndis_msg->msg_len,
+ init_complete->req_id,
+ init_complete->status,
+ init_complete->major_ver,
+ init_complete->minor_ver,
+ init_complete->dev_flags,
+ init_complete->max_xfer_size,
+ init_complete->max_pkt_per_msg,
+ init_complete->pkt_alignment_factor);
+ }
break;
case RNDIS_MSG_QUERY_C:
- netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
- "(len %u, id 0x%x, status 0x%x, buf len %u, "
- "buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.query_complete.req_id,
- rndis_msg->msg.query_complete.status,
- rndis_msg->msg.query_complete.
- info_buflen,
- rndis_msg->msg.query_complete.
- info_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_query_complete)) {
+ const struct rndis_query_complete *query_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
+ "(len %u, id 0x%x, status 0x%x, buf len %u, "
+ "buf offset %u)\n",
+ rndis_msg->msg_len,
+ query_complete->req_id,
+ query_complete->status,
+ query_complete->info_buflen,
+ query_complete->info_buf_offset);
+ }
break;
case RNDIS_MSG_SET_C:
- netdev_dbg(netdev,
- "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.set_complete.req_id,
- rndis_msg->msg.set_complete.status);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
+ const struct rndis_set_complete *set_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->msg_len,
+ set_complete->req_id,
+ set_complete->status);
+ }
break;
case RNDIS_MSG_INDICATE:
- netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
- "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.indicate_status.status,
- rndis_msg->msg.indicate_status.status_buflen,
- rndis_msg->msg.indicate_status.status_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_indicate_status)) {
+ const struct rndis_indicate_status *indicate_status =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
+ rndis_msg->msg_len,
+ indicate_status->status,
+ indicate_status->status_buflen,
+ indicate_status->status_buf_offset);
+ }
break;
default:
@@ -246,11 +265,20 @@ static void rndis_set_link_state(struct rndis_device *rdev,
{
u32 link_status;
struct rndis_query_complete *query_complete;
+ u32 msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
+ return;
query_complete = &request->response_msg.msg.query_complete;
if (query_complete->status == RNDIS_STATUS_SUCCESS &&
- query_complete->info_buflen == sizeof(u32)) {
+ query_complete->info_buflen >= sizeof(u32) &&
+ query_complete->info_buf_offset >= sizeof(*query_complete) &&
+ msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ >= query_complete->info_buflen) {
memcpy(&link_status, (void *)((unsigned long)query_complete +
query_complete->info_buf_offset), sizeof(u32));
rdev->link_state = link_status != 0;
@@ -259,8 +287,10 @@ static void rndis_set_link_state(struct rndis_device *rdev,
static void rndis_filter_receive_response(struct net_device *ndev,
struct netvsc_device *nvdev,
- const struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data)
{
+ u32 *req_id = &resp->msg.init_complete.req_id;
struct rndis_device *dev = nvdev->extension;
struct rndis_request *request = NULL;
bool found = false;
@@ -285,14 +315,16 @@ static void rndis_filter_receive_response(struct net_device *ndev,
return;
}
+ /* Copy the request ID into nvchan->recv_buf */
+ *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
+
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
/*
* All request/response message contains RequestId as the 1st
* field
*/
- if (request->request_msg.msg.init_req.req_id
- == resp->msg.init_complete.req_id) {
+ if (request->request_msg.msg.init_req.req_id == *req_id) {
found = true;
break;
}
@@ -302,8 +334,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (found) {
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
- memcpy(&request->response_msg, resp,
- resp->msg_len);
+ memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
+ memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ data + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
@@ -332,7 +366,7 @@ static void rndis_filter_receive_response(struct net_device *ndev,
netdev_err(ndev,
"no rndis request found for this response "
"(id 0x%x res type 0x%x)\n",
- resp->msg.init_complete.req_id,
+ *req_id,
resp->ndis_msg_type);
}
}
@@ -343,7 +377,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
*/
static inline void *rndis_get_ppi(struct net_device *ndev,
struct rndis_packet *rpkt,
- u32 rpkt_len, u32 type, u8 internal)
+ u32 rpkt_len, u32 type, u8 internal,
+ u32 ppi_size, void *data)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -359,7 +394,8 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
return NULL;
}
- if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
+ rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
rpkt->per_pkt_info_len);
return NULL;
@@ -367,6 +403,8 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
rpkt->per_pkt_info_offset);
+ /* Copy the PPIs into nvchan->recv_buf */
+ memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
len = rpkt->per_pkt_info_len;
while (len > 0) {
@@ -381,8 +419,15 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
continue;
}
- if (ppi->type == type && ppi->internal == internal)
+ if (ppi->type == type && ppi->internal == internal) {
+ /* ppi->size should be big enough to hold the returned object. */
+ if (ppi->size - ppi->ppi_offset < ppi_size) {
+ netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
+ ppi->size, ppi->ppi_offset);
+ continue;
+ }
return (void *)((ulong)ppi + ppi->ppi_offset);
+ }
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
}
@@ -402,10 +447,29 @@ void rsc_add_data(struct netvsc_channel *nvchan,
if (cnt) {
nvchan->rsc.pktlen += len;
} else {
- nvchan->rsc.vlan = vlan;
- nvchan->rsc.csum_info = csum_info;
+ /* The data/values pointed by vlan, csum_info and hash_info are shared
+ * across the different 'fragments' of the RSC packet; store them into
+ * the packet itself.
+ */
+ if (vlan != NULL) {
+ memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
+ }
+ if (csum_info != NULL) {
+ memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
+ }
nvchan->rsc.pktlen = len;
- nvchan->rsc.hash_info = hash_info;
+ if (hash_info != NULL) {
+ nvchan->rsc.hash_info = *hash_info;
+ nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
+ }
}
nvchan->rsc.data[cnt] = data;
@@ -417,7 +481,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan,
struct rndis_message *msg,
- u32 data_buflen)
+ void *data, u32 data_buflen)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
@@ -425,7 +489,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct rndis_pktinfo_id *pktinfo_id;
const u32 *hash_info;
u32 data_offset, rpkt_len;
- void *data;
bool rsc_more = false;
int ret;
@@ -436,6 +499,9 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
+ /* Copy the RNDIS packet into nvchan->recv_buf */
+ memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
+
/* Validate rndis_pkt offset */
if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
netdev_err(ndev, "invalid rndis packet offset: %u\n",
@@ -461,15 +527,17 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
-
- csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
+ data);
- hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
+ sizeof(*csum_info), data);
- pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
+ sizeof(*hash_info), data);
- data = (void *)msg + data_offset;
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
+ sizeof(*pktinfo_id), data);
/* Identify RSC frags, drop erroneous packets */
if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
@@ -498,7 +566,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* the data packet to the stack, without the rndis trailer padding
*/
rsc_add_data(nvchan, vlan, csum_info, hash_info,
- data, rndis_pkt->data_len);
+ data + data_offset, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
@@ -509,8 +577,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return ret;
drop:
- /* Drop incomplete packet */
- nvchan->rsc.cnt = 0;
return NVSP_STAT_FAIL;
}
@@ -520,33 +586,41 @@ int rndis_filter_receive(struct net_device *ndev,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct rndis_message *rndis_msg = data;
+ struct rndis_message *rndis_msg = nvchan->recv_buf;
- if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(ndev, rndis_msg);
+ if (buflen < RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS msg header into nvchan->recv_buf */
+ memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
/* Validate incoming rndis_message packet */
- if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
+ if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
buflen < rndis_msg->msg_len) {
netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
buflen, rndis_msg->msg_len);
return NVSP_STAT_FAIL;
}
+ if (netif_msg_rx_status(net_device_ctx))
+ dump_rndis_message(ndev, rndis_msg, data);
+
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
- rndis_msg, buflen);
+ rndis_msg, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
/* completion msgs */
- rndis_filter_receive_response(ndev, net_dev, rndis_msg);
+ rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
break;
case RNDIS_MSG_INDICATE:
/* notification msgs */
- netvsc_linkstatus_callback(ndev, rndis_msg);
+ netvsc_linkstatus_callback(ndev, rndis_msg, data);
break;
default:
netdev_err(ndev,
@@ -567,6 +641,7 @@ static int rndis_filter_query_device(struct rndis_device *dev,
u32 inresult_size = *result_size;
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
+ u32 msg_len;
int ret = 0;
if (!result)
@@ -634,8 +709,19 @@ static int rndis_filter_query_device(struct rndis_device *dev,
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
+ msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
+ ret = -1;
+ goto cleanup;
+ }
- if (query_complete->info_buflen > inresult_size) {
+ if (query_complete->info_buflen > inresult_size ||
+ query_complete->info_buf_offset < sizeof(*query_complete) ||
+ msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ < query_complete->info_buflen) {
ret = -1;
goto cleanup;
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index fa63d4dee0ba..ab7022582154 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -59,9 +59,9 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
-static void ifb_ri_tasklet(unsigned long _txp)
+static void ifb_ri_tasklet(struct tasklet_struct *t)
{
- struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
+ struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
struct netdev_queue *txq;
struct sk_buff *skb;
@@ -170,8 +170,7 @@ static int ifb_dev_init(struct net_device *dev)
__skb_queue_head_init(&txp->tq);
u64_stats_init(&txp->rsync);
u64_stats_init(&txp->tsync);
- tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
- (unsigned long)txp);
+ tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
netif_tx_start_queue(netdev_get_tx_queue(dev, i));
}
return 0;
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 9f0d2a93379c..b68f1289b89e 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,9 +1,10 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on ARCH_QCOM && 64BIT && NET
- depends on QCOM_Q6V5_MSS
+ depends on 64BIT && NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_QMI_HELPERS
- select QCOM_MDT_LOADER
help
Choose Y or M here to include support for the Qualcomm
IP Accelerator (IPA), a hardware block present in some
@@ -11,7 +12,8 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 SoC.
+ between the AP and modem, on the Qualcomm SDM845 and SC7180
+ SoCs.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..390d3403386a 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -89,9 +89,9 @@
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
-#define GSI_CMD_TIMEOUT 5 /* seconds */
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
-#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
@@ -175,6 +175,12 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0];
}
+/* An initialized channel has a non-null GSI pointer */
+static bool gsi_channel_initialized(struct gsi_channel *channel)
+{
+ return !!channel->gsi;
+}
+
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
@@ -195,8 +201,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
/* Turn off all GSI interrupts initially */
static void gsi_irq_setup(struct gsi *gsi)
{
- u32 adjust;
-
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
@@ -206,10 +210,9 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- /* Reverse the offset adjustment for inter-EE register offsets */
- adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+ /* The inter-EE registers are in the non-adjusted address range */
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
@@ -220,7 +223,59 @@ static void gsi_irq_teardown(struct gsi *gsi)
/* Nothing to do */
}
-static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
@@ -234,11 +289,11 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
gsi_irq_type_enable(gsi, GSI_IEOB);
}
-static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
u32 val;
- gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+ gsi->ieob_enabled_bitmap &= ~event_mask;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
@@ -248,6 +303,11 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
+static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
+}
+
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
@@ -307,11 +367,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
static bool
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
- return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+ return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
@@ -326,110 +388,110 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one event ring command at a time, and event
- * control interrupts should only occur when such a command
- * is issued here. Only permit *this* event ring to trigger
- * an interrupt, and only enable the event control IRQ type
- * when we expect it to occur.
- */
- val = BIT(evt_ring_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ev_ctrl_disable(gsi);
- if (success)
- return 0;
+ if (!timeout)
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
- opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
+ opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
+ enum gsi_evt_ring_state state;
/* Get initial event ring state */
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
- if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
+ enum gsi_evt_ring_state state;
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
+ enum gsi_evt_ring_state state;
- if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, state);
}
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
u32 channel_id = gsi_channel_id(channel);
- void *virt = channel->gsi->virt;
+ void __iomem *virt = channel->gsi->virt;
u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
@@ -438,41 +500,30 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one channel command at a time, and channel
- * control interrupts should only occur when such a command is
- * issued here. So we only permit *this* channel to trigger
- * an interrupt and only enable the channel control IRQ type
- * when we expect it to occur.
- */
- val = BIT(channel_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ch_ctrl_disable(gsi);
- if (success)
- return 0;
+ if (!timeout)
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +532,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +541,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +559,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +568,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +586,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +602,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,9 +624,9 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
- msleep(1); /* A short delay is required before a RESET command */
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
@@ -590,11 +638,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +653,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +661,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
@@ -684,22 +732,38 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
}
-/* Return the last (most recent) transaction completed on a channel. */
+/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
+ const struct list_head *list;
struct gsi_trans *trans;
spin_lock_bh(&trans_info->spinlock);
- if (!list_empty(&trans_info->complete))
- trans = list_last_entry(&trans_info->complete,
- struct gsi_trans, links);
- else if (!list_empty(&trans_info->polled))
- trans = list_last_entry(&trans_info->polled,
- struct gsi_trans, links);
- else
- trans = NULL;
+ /* There is a small chance a TX transaction got allocated just
+ * before we disabled transmits, so check for that.
+ */
+ if (channel->toward_ipa) {
+ list = &trans_info->alloc;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->pending;
+ if (!list_empty(list))
+ goto done;
+ }
+
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ */
+ list = &trans_info->complete;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->polled;
+ if (list_empty(list))
+ list = NULL;
+done:
+ trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
/* Caller will wait for this, so take a reference */
if (trans)
@@ -723,24 +787,6 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
-/* Stop channel activity. Transactions may not be allocated until thawed. */
-static void gsi_channel_freeze(struct gsi_channel *channel)
-{
- gsi_channel_trans_quiesce(channel);
-
- napi_disable(&channel->napi);
-
- gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
-}
-
-/* Allow transactions to be used on the channel again. */
-static void gsi_channel_thaw(struct gsi_channel *channel)
-{
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
-
- napi_enable(&channel->napi);
-}
-
/* Program a channel for use */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
@@ -832,51 +878,92 @@ static void gsi_channel_deprogram(struct gsi_channel *channel)
/* Nothing to do */
}
-/* Start an allocated GSI channel */
-int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
- struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi *gsi = channel->gsi;
int ret;
+ if (!start)
+ return 0;
+
mutex_lock(&gsi->mutex);
ret = gsi_channel_start_command(channel);
mutex_unlock(&gsi->mutex);
- gsi_channel_thaw(channel);
-
return ret;
}
-/* Stop a started channel */
-int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 retries;
int ret;
- gsi_channel_freeze(channel);
+ /* Enable NAPI and the completion interrupt */
+ napi_enable(&channel->napi);
+ gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
- /* RX channels might require a little time to enter STOPPED state */
- retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+ ret = __gsi_channel_start(channel, true);
+ if (ret) {
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+ }
- mutex_lock(&gsi->mutex);
+ return ret;
+}
+
+static int gsi_channel_stop_retry(struct gsi_channel *channel)
+{
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
+ int ret;
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
- msleep(1);
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
+ return ret;
+}
+
+static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
+{
+ struct gsi *gsi = channel->gsi;
+ int ret;
+
+ /* Wait for any underway transactions to complete before stopping. */
+ gsi_channel_trans_quiesce(channel);
+
+ if (!stop)
+ return 0;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_stop_retry(channel);
+
mutex_unlock(&gsi->mutex);
- /* Thaw the channel if we need to retry (or on error) */
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ ret = __gsi_channel_stop(channel, true);
if (ret)
- gsi_channel_thaw(channel);
+ return ret;
- return ret;
+ /* Disable the completion interrupt and NAPI if successful */
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+
+ return 0;
}
/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
@@ -901,11 +988,14 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
- if (stop)
- return gsi_channel_stop(gsi, channel_id);
+ ret = __gsi_channel_stop(channel, stop);
+ if (ret)
+ return ret;
- gsi_channel_freeze(channel);
+ /* Ensure NAPI polling has finished. */
+ napi_synchronize(&channel->napi);
return 0;
}
@@ -915,12 +1005,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- if (start)
- return gsi_channel_start(gsi, channel_id);
-
- gsi_channel_thaw(channel);
-
- return 0;
+ return __gsi_channel_start(channel, start);
}
/**
@@ -1029,7 +1114,6 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
evt_ring = &gsi->evt_ring[evt_ring_id];
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
complete(&evt_ring->completion);
}
@@ -1167,6 +1251,7 @@ static void gsi_isr_ieob(struct gsi *gsi)
u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_irq_ieob_disable(gsi, event_mask);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
while (event_mask) {
@@ -1174,7 +1259,6 @@ static void gsi_isr_ieob(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
- gsi_irq_ieob_disable(gsi, evt_ring_id);
napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
}
}
@@ -1362,7 +1446,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
/* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
return -EINVAL; /* Not a good error value, but distinct */
@@ -1419,7 +1503,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
}
/* Consult hardware, move any newly completed transactions to completed list */
-static void gsi_channel_update(struct gsi_channel *channel)
+static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1438,7 +1522,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return;
+ return NULL;
/* Get the transaction for the latest completed event. Take a
* reference to keep it from completing before we give the events
@@ -1463,6 +1547,8 @@ static void gsi_channel_update(struct gsi_channel *channel)
gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
gsi_trans_free(trans);
+
+ return gsi_channel_trans_complete(channel);
}
/**
@@ -1483,11 +1569,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
/* Get the first transaction from the completed list */
trans = gsi_channel_trans_complete(channel);
- if (!trans) {
- /* List is empty; see if there's more to do */
- gsi_channel_update(channel);
- trans = gsi_channel_trans_complete(channel);
- }
+ if (!trans) /* List is empty; see if there's more to do */
+ trans = gsi_channel_update(channel);
if (trans)
gsi_trans_move_polled(trans);
@@ -1510,23 +1593,20 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
static int gsi_channel_poll(struct napi_struct *napi, int budget)
{
struct gsi_channel *channel;
- int count = 0;
+ int count;
channel = container_of(napi, struct gsi_channel, napi);
- while (count < budget) {
+ for (count = 0; count < budget; count++) {
struct gsi_trans *trans;
- count++;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
gsi_trans_complete(trans);
}
- if (count < budget) {
- napi_complete(&channel->napi);
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
- }
+ if (count < budget && napi_complete(napi))
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
return count;
}
@@ -1564,8 +1644,8 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
u32 evt_ring_id = channel->evt_ring_id;
int ret;
- if (!channel->gsi)
- return 0; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return 0;
ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
if (ret)
@@ -1601,8 +1681,8 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
- if (!channel->gsi)
- return; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return;
netif_napi_del(&channel->napi);
@@ -1616,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
- bool success;
+ bool timeout;
u32 val;
/* The error global interrupt type is always enabled (until we
@@ -1639,12 +1719,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- if (success)
+ if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
@@ -1696,9 +1776,10 @@ static int gsi_channel_setup(struct gsi *gsi)
while (channel_id < GSI_CHANNEL_COUNT_MAX) {
struct gsi_channel *channel = &gsi->channel[channel_id++];
- if (!channel->gsi)
- continue; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ continue;
+ ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;
@@ -2014,8 +2095,8 @@ err_clear_gsi:
/* Inverse of gsi_channel_init_one() */
static void gsi_channel_exit_one(struct gsi_channel *channel)
{
- if (!channel->gsi)
- return; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return;
if (channel->command)
ipa_cmd_pool_exit(channel);
@@ -2103,9 +2184,8 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
gsi->dev = dev;
gsi->version = version;
- /* The GSI layer performs NAPI on all endpoints. NAPI requires a
- * network device structure, but the GSI layer does not have one,
- * so we must create a dummy network device for this purpose.
+ /* GSI uses NAPI on all channels. Create a dummy network device
+ * for the channel NAPI contexts to be associated with.
*/
init_dummy_netdev(&gsi->dummy_dev);
@@ -2130,13 +2210,13 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
return -EINVAL;
}
- gsi->virt = ioremap(res->start, size);
- if (!gsi->virt) {
+ gsi->virt_raw = ioremap(res->start, size);
+ if (!gsi->virt_raw) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
- /* Adjust register range pointer downward for newer IPA versions */
- gsi->virt -= adjust;
+ /* Most registers are accessed using an adjusted register range */
+ gsi->virt = gsi->virt_raw - adjust;
init_completion(&gsi->completion);
@@ -2155,7 +2235,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
err_irq_exit:
gsi_irq_exit(gsi);
err_iounmap:
- iounmap(gsi->virt);
+ iounmap(gsi->virt_raw);
return ret;
}
@@ -2166,7 +2246,7 @@ void gsi_exit(struct gsi *gsi)
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
gsi_irq_exit(gsi);
- iounmap(gsi->virt);
+ iounmap(gsi->virt_raw);
}
/* The maximum number of outstanding TREs on a channel. This limits
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 96c9aed397aa..efc980f96109 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -142,7 +142,6 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
struct completion completion; /* signals event ring state changes */
- enum gsi_evt_ring_state state;
struct gsi_ring ring;
};
@@ -150,7 +149,8 @@ struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
- void __iomem *virt;
+ void __iomem *virt_raw; /* I/O mapped address range */
+ void __iomem *virt; /* Adjusted for most registers */
u32 irq;
u32 channel_count;
u32 evt_ring_count;
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 0e138bbd8205..1622d8cf8dea 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -38,17 +38,21 @@
* (though the actual limit is hardware-dependent).
*/
-/* GSI EE registers as a group are shifted downward by a fixed
- * constant amount for IPA versions 4.5 and beyond. This applies
- * to all GSI registers we use *except* the ones that disable
- * inter-EE interrupts for channels and event channels.
+/* GSI EE registers as a group are shifted downward by a fixed constant amount
+ * for IPA versions 4.5 and beyond. This applies to all GSI registers we use
+ * *except* the ones that disable inter-EE interrupts for channels and event
+ * channels.
*
- * We handle this by adjusting the pointer to the mapped GSI memory
- * region downward. Then in the one place we use them (gsi_irq_setup())
- * we undo that adjustment for the inter-EE interrupt registers.
+ * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
+ * the mapped range is held in gsi->virt_raw. The inter-EE interrupt
+ * registers are accessed using that pointer.
+ *
+ * Most registers are accessed using gsi->virt, which is a copy of the "raw"
+ * pointer, adjusted downward by the fixed amount.
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
+/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
@@ -59,16 +63,7 @@
#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
(0x0000c01c + 0x1000 * (ee))
-#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c028 + 0x1000 * (ee))
-
-#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c02c + 0x1000 * (ee))
-
+/* All other register offsets are relative to gsi->virt */
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 4d4606b5fa95..3a4ab8a94d82 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -13,6 +13,7 @@
#include "ipa_cmd.h"
+struct page;
struct scatterlist;
struct device;
struct sk_buff;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 6c2371084c55..802077631371 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -43,7 +43,7 @@ enum ipa_flag {
* @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
- * @modem_rproc: Remoteproc handle for modem subsystem
+ * @completion: Used to signal pipeline clear transfer complete
* @smp2p: SMP2P information
* @clock: IPA clocking information
* @table_addr: DMA address of filter/route table content
@@ -83,7 +83,7 @@ struct ipa {
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
- struct rproc *modem_rproc;
+ struct completion completion;
struct notifier_block nb;
void *notifier;
struct ipa_smp2p *smp2p;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..69ef6ea41e61 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/refcount.h>
@@ -31,142 +31,154 @@
*/
/**
+ * struct ipa_interconnect - IPA interconnect information
+ * @path: Interconnect path
+ * @average_bandwidth: Average interconnect bandwidth (KB/second)
+ * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
+ */
+struct ipa_interconnect {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
- * @memory_path: Memory interconnect
- * @imem_path: Internal memory interconnect
- * @config_path: Configuration space interconnect
- * @interconnect_data: Interconnect configuration data
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
- struct icc_path *memory_path;
- struct icc_path *imem_path;
- struct icc_path *config_path;
- const struct ipa_interconnect_data *interconnect_data;
+ u32 interconnect_count;
+ struct ipa_interconnect *interconnect;
};
-static struct icc_path *
-ipa_interconnect_init_one(struct device *dev, const char *name)
+static int ipa_interconnect_init_one(struct device *dev,
+ struct ipa_interconnect *interconnect,
+ const struct ipa_interconnect_data *data)
{
struct icc_path *path;
- path = of_icc_get(dev, name);
- if (IS_ERR(path))
- dev_err(dev, "error %ld getting %s interconnect\n",
- PTR_ERR(path), name);
+ path = of_icc_get(dev, data->name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err_probe(dev, ret, "error getting %s interconnect\n",
+ data->name);
- return path;
+ return ret;
+ }
+
+ interconnect->path = path;
+ interconnect->average_bandwidth = data->average_bandwidth;
+ interconnect->peak_bandwidth = data->peak_bandwidth;
+
+ return 0;
}
-/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
{
- struct icc_path *path;
-
- path = ipa_interconnect_init_one(dev, "memory");
- if (IS_ERR(path))
- goto err_return;
- clock->memory_path = path;
+ icc_put(interconnect->path);
+ memset(interconnect, 0, sizeof(*interconnect));
+}
- path = ipa_interconnect_init_one(dev, "imem");
- if (IS_ERR(path))
- goto err_memory_path_put;
- clock->imem_path = path;
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
+ const struct ipa_interconnect_data *data)
+{
+ struct ipa_interconnect *interconnect;
+ u32 count;
+ int ret;
- path = ipa_interconnect_init_one(dev, "config");
- if (IS_ERR(path))
- goto err_imem_path_put;
- clock->config_path = path;
+ count = clock->interconnect_count;
+ interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
+ if (!interconnect)
+ return -ENOMEM;
+ clock->interconnect = interconnect;
+
+ while (count--) {
+ ret = ipa_interconnect_init_one(dev, interconnect, data++);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_put:
- icc_put(clock->imem_path);
-err_memory_path_put:
- icc_put(clock->memory_path);
-err_return:
- return PTR_ERR(path);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
+
+ return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_clock *clock)
{
- icc_put(clock->config_path);
- icc_put(clock->imem_path);
- icc_put(clock->memory_path);
+ struct ipa_interconnect *interconnect;
+
+ interconnect = clock->interconnect + clock->interconnect_count;
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
int ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- return ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_memory_path_disable;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_imem_path_disable;
+ u32 i;
+
+ interconnect = clock->interconnect;
+ for (i = 0; i < clock->interconnect_count; i++) {
+ ret = icc_set_bw(interconnect->path,
+ interconnect->average_bandwidth,
+ interconnect->peak_bandwidth);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_disable:
- (void)icc_set_bw(clock->imem_path, 0, 0);
-err_memory_path_disable:
- (void)icc_set_bw(clock->memory_path, 0, 0);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ (void)icc_set_bw(interconnect->path, 0, 0);
return ret;
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+static void ipa_interconnect_disable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ int result = 0;
+ u32 count;
int ret;
- ret = icc_set_bw(clock->memory_path, 0, 0);
- if (ret)
- return ret;
-
- ret = icc_set_bw(clock->imem_path, 0, 0);
- if (ret)
- goto err_memory_path_reenable;
-
- ret = icc_set_bw(clock->config_path, 0, 0);
- if (ret)
- goto err_imem_path_reenable;
-
- return 0;
-
-err_imem_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- (void)icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
-err_memory_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- (void)icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
+ count = clock->interconnect_count;
+ interconnect = clock->interconnect + count;
+ while (count--) {
+ interconnect--;
+ ret = icc_set_bw(interconnect->path, 0, 0);
+ if (ret && !result)
+ result = ret;
+ }
- return ret;
+ if (result)
+ dev_err(&ipa->pdev->dev,
+ "error %d disabling IPA interconnects\n", ret);
}
/* Turn on IPA clocks, including interconnects */
@@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- (void)ipa_interconnect_disable(ipa);
+ ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@@ -269,7 +281,8 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
clk = clk_get(dev, "core");
if (IS_ERR(clk)) {
- dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
+ dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
+
return ERR_CAST(clk);
}
@@ -286,9 +299,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
goto err_clk_put;
}
clock->core = clk;
- clock->interconnect_data = data->interconnect;
+ clock->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(clock, dev);
+ ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 002e51448510..35e35852c25c 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -244,11 +244,15 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
if (ipa->version != IPA_VERSION_3_5_1)
bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
BUILD_BUG_ON(bit_count > 32);
- offset_max = ~0 >> (32 - bit_count);
+ offset_max = ~0U >> (32 - bit_count);
+ /* Make sure the offset can be represented by the field(s)
+ * that holds it. Also make sure the offset is not outside
+ * the overall IPA memory range.
+ */
if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
- ipa->mem_offset + offset, offset_max);
+ name, ipa->mem_offset, offset, offset_max);
return false;
}
@@ -261,12 +265,24 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
const char *name;
u32 offset;
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
- name = "filter/route hash flush";
- if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
- return false;
+ /* If hashed tables are supported, ensure the hash flush register
+ * offset will fit in a register write IPA immediate command.
+ */
+ if (ipa_table_hash_support(ipa)) {
+ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+ }
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
+ /* Each endpoint can have a status endpoint associated with it,
+ * and this is recorded in an endpoint register. If the modem
+ * crashes, we reset the status endpoint for all modem endpoints
+ * using a register write IPA immediate command. Make sure the
+ * worst case (highest endpoint number) offset of that endpoint
+ * fits in the register write command field(s) that must hold it.
+ */
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -529,7 +545,7 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
direction, opcode);
}
-static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
@@ -543,14 +559,14 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
- payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+ payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
}
/* Issue a small command TX data transfer */
-static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
@@ -558,8 +574,6 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
- /* assert(size <= sizeof(*payload)); */
-
/* Just transfer a zero-filled payload structure */
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
@@ -567,34 +581,53 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
direction, opcode);
}
-void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+/* Add immediate commands to a transaction to clear the hardware pipeline */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ /* This will complete when the transfer is received */
+ reinit_completion(&ipa->completion);
+ /* Issue a no-op register write command (mask 0 means no write) */
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+
+ /* Send a data packet through the IPA pipeline. The packet_init
+ * command says to send the next packet directly to the exception
+ * endpoint without any other IPA processing. The tag_status
+ * command requests that status be generated on completion of
+ * that transfer, and that it will be tagged with a value.
+ * Finally, the transfer command sends a small packet of data
+ * (instead of a command) using the command endpoint.
+ */
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
- ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
- ipa_cmd_transfer_add(trans, 4);
+ ipa_cmd_ip_tag_status_add(trans);
+ ipa_cmd_transfer_add(trans);
}
-/* Returns the number of commands required for the tag process */
-u32 ipa_cmd_tag_process_count(void)
+/* Returns the number of commands required to clear the pipeline */
+u32 ipa_cmd_pipeline_clear_count(void)
{
return 4;
}
-void ipa_cmd_tag_process(struct ipa *ipa)
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
+{
+ wait_for_completion(&ipa->completion);
+}
+
+void ipa_cmd_pipeline_clear(struct ipa *ipa)
{
- u32 count = ipa_cmd_tag_process_count();
+ u32 count = ipa_cmd_pipeline_clear_count();
struct gsi_trans *trans;
trans = ipa_cmd_trans_alloc(ipa, count);
if (trans) {
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
} else {
dev_err(&ipa->pdev->dev,
"error allocating %u entry tag transaction\n", count);
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4ed09c486abc..6dd3d35cf315 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -157,26 +157,30 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
u16 size, dma_addr_t addr, bool toward_ipa);
/**
- * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction
* @trans: GSI transaction
*/
-void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans);
/**
- * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline
*
* Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * to hold commands to clear the pipeline
*/
-u32 ipa_cmd_tag_process_count(void);
+u32 ipa_cmd_pipeline_clear_count(void);
/**
- * ipa_cmd_tag_process() - Perform a tag process
- *
- * @Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete
+ * @ipa: - IPA pointer
+ */
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
+
+/**
+ * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
+ * @ipa: - IPA pointer
*/
-void ipa_cmd_tag_process(struct ipa *ipa);
+void ipa_cmd_pipeline_clear(struct ipa *ipa);
/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 5cc0ed77edb9..997b51ceb7d7 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 465000, /* 465 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 68570, /* 68.570 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 30000, /* 30 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SC7180 SoC. */
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index f8fee8d3ca42..88c9c3562ab7 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 600000, /* 600 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 350000, /* 350 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 40000, /* 40 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SDM845 SoC. */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 0ed5ffe2b8da..b476fc373f7f 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -258,32 +258,28 @@ struct ipa_mem_data {
u32 smem_size;
};
-/** enum ipa_interconnect_id - IPA interconnect identifier */
-enum ipa_interconnect_id {
- IPA_INTERCONNECT_MEMORY,
- IPA_INTERCONNECT_IMEM,
- IPA_INTERCONNECT_CONFIG,
- IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
-};
-
/**
- * struct ipa_interconnect_data - description of IPA interconnect rates
- * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
- * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
*/
struct ipa_interconnect_data {
- u32 peak_rate;
- u32 average_rate;
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
};
/**
* struct ipa_clock_data - description of IPA clock and interconnect rates
* @core_clock_rate: Core clock rate (Hz)
- * @interconnect: Array of interconnect bandwidth parameters
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
*/
struct ipa_clock_data {
u32 core_clock_rate;
- struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f4be9812a1f..7209ee3c3124 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -69,8 +69,11 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
+#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
#ifdef IPA_VALIDATE
@@ -171,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 limit;
- /* Not sure where this constraint come from... */
- BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
-
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
@@ -399,7 +399,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
* That won't happen, and we could be more precise, but this is fine
* for now. We need to end the transaction with a "tag process."
*/
- count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -428,11 +428,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
/* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
+
return 0;
}
@@ -588,7 +590,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
- val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -1015,31 +1017,34 @@ err_free_pages:
}
/**
- * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @count: Number of buffers to send to hardware
+ * @add_one: Whether this is replacing a just-consumed buffer
*
- * Allocate RX packet wrapper structures with maximal socket buffers
- * for an endpoint. These are supplied to the hardware, which fills
- * them with incoming data.
+ * The IPA hardware can hold a fixed number of receive buffers for an RX
+ * endpoint, based on the number of entries in the underlying channel ring
+ * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
+ * more receive buffers can be supplied to the hardware. Replenishing for
+ * an endpoint can be disabled, in which case requests to replenish a
+ * buffer are "saved", and transferred to the backlog once it is re-enabled
+ * again.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
- if (count)
- atomic_add(count, &endpoint->replenish_saved);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_saved);
return;
}
-
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
return;
@@ -1047,8 +1052,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1075,7 +1080,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@@ -1094,7 +1099,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1164,19 +1169,53 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
return true;
if (!status->pkt_len)
return true;
- endpoint_id = u32_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+ endpoint_id = u8_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
if (endpoint_id != endpoint->endpoint_id)
return true;
return false; /* Don't skip this packet, process it */
}
+static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ struct ipa_endpoint *command_endpoint;
+ struct ipa *ipa = endpoint->ipa;
+ u32 endpoint_id;
+
+ if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ return false; /* No valid tag */
+
+ /* The status contains a valid tag. We know the packet was sent to
+ * this endpoint (already verified by ipa_endpoint_status_skip()).
+ * If the packet came from the AP->command TX endpoint we know
+ * this packet was sent as part of the pipeline clear process.
+ */
+ endpoint_id = u8_get_bits(status->endp_src_idx,
+ IPA_STATUS_SRC_IDX_FMASK);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (endpoint_id == command_endpoint->endpoint_id) {
+ complete(&ipa->completion);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "unexpected tagged packet from endpoint %u\n",
+ endpoint_id);
+ }
+
+ return true;
+}
+
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_status_drop_packet(const struct ipa_status *status)
+static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
{
u32 val;
+ /* If the status indicates a tagged transfer, we'll drop the packet */
+ if (ipa_endpoint_status_tag(endpoint, status))
+ return true;
+
/* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1213,12 +1252,11 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
continue;
}
- /* Compute the amount of buffer space consumed by the
- * packet, including the status element. If the hardware
- * is configured to pad packet data to an aligned boundary,
- * account for that. And if checksum offload is is enabled
- * a trailer containing computed checksum information will
- * be appended.
+ /* Compute the amount of buffer space consumed by the packet,
+ * including the status element. If the hardware is configured
+ * to pad packet data to an aligned boundary, account for that.
+ * And if checksum offload is enabled a trailer containing
+ * computed checksum information will be appended.
*/
align = endpoint->data->rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
@@ -1226,16 +1264,21 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
if (endpoint->data->checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- /* Charge the new packet with a proportional fraction of
- * the unused space in the original receive buffer.
- * XXX Charge a proportion of the *whole* receive buffer?
- */
- if (!ipa_status_drop_packet(status)) {
- u32 extra = unused * len / total_len;
- void *data2 = data + sizeof(*status);
- u32 len2 = le16_to_cpu(status->pkt_len);
+ if (!ipa_endpoint_status_drop(endpoint, status)) {
+ void *data2;
+ u32 extra;
+ u32 len2;
/* Client receives only packet data (no status) */
+ data2 = data + sizeof(*status);
+ len2 = le16_to_cpu(status->pkt_len);
+
+ /* Have the true size reflect the extra unused space in
+ * the original receive buffer. Distribute the "cost"
+ * proportionately across all aggregated packets in the
+ * buffer.
+ */
+ extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
}
@@ -1257,7 +1300,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, 1);
+ ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;
@@ -1378,7 +1421,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
@@ -1399,7 +1442,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
@@ -1564,7 +1607,7 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_tag_process(ipa);
+ ipa_cmd_pipeline_clear(ipa);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 84bb8ae92725..97c1b55405cb 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/remoteproc.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -581,10 +580,10 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
return -EINVAL;
for (i = 0; i < data->resource_src_count; i++)
- ipa_resource_config_src(ipa, data->resource_src);
+ ipa_resource_config_src(ipa, &data->resource_src[i]);
for (i = 0; i < data->resource_dst_count; i++)
- ipa_resource_config_dst(ipa, data->resource_dst);
+ ipa_resource_config_dst(ipa, &data->resource_dst[i]);
return 0;
}
@@ -729,19 +728,6 @@ static const struct of_device_id ipa_match[] = {
};
MODULE_DEVICE_TABLE(of, ipa_match);
-static phandle of_property_read_phandle(const struct device_node *np,
- const char *name)
-{
- struct property *prop;
- int len = 0;
-
- prop = of_find_property(np, name, &len);
- if (!prop || len != sizeof(__be32))
- return 0;
-
- return be32_to_cpup(prop->value);
-}
-
/* Check things that can be validated at build time. This just
* groups these things BUILD_BUG_ON() calls don't clutter the rest
* of the code.
@@ -807,10 +793,8 @@ static int ipa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
- struct rproc *rproc;
bool modem_init;
struct ipa *ipa;
- phandle ph;
int ret;
ipa_validate_build();
@@ -829,25 +813,12 @@ static int ipa_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- /* We rely on remoteproc to tell us about modem state changes */
- ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!ph) {
- dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
- return -EINVAL;
- }
-
- rproc = rproc_get_by_phandle(ph);
- if (!rproc)
- return -EPROBE_DEFER;
-
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
clock = ipa_clock_init(dev, data->clock_data);
- if (IS_ERR(clock)) {
- ret = PTR_ERR(clock);
- goto err_rproc_put;
- }
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -858,9 +829,9 @@ static int ipa_probe(struct platform_device *pdev)
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
- ipa->modem_rproc = rproc;
ipa->clock = clock;
ipa->version = data->version;
+ init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
if (ret)
@@ -935,8 +906,6 @@ err_kfree_ipa:
kfree(ipa);
err_clock_exit:
ipa_clock_exit(clock);
-err_rproc_put:
- rproc_put(rproc);
return ret;
}
@@ -944,7 +913,6 @@ err_rproc_put:
static int ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -970,7 +938,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_reg_exit(ipa);
kfree(ipa);
ipa_clock_exit(clock);
- rproc_put(rproc);
return 0;
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 0cc3a3374caa..f25029b9ec85 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
- dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
@@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
- dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index e34fe2d77324..9b08eb823984 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e6b0827a244e..732e691e9aa6 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -408,15 +408,18 @@ enum ipa_cs_offload_en {
static inline u32 ipa_header_size_encoded(enum ipa_version version,
u32 header_size)
{
+ u32 size = header_size & field_mask(HDR_LEN_FMASK);
u32 val;
- val = u32_encode_bits(header_size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(size, HDR_LEN_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(header_size == size); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- header_size >>= hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
+ size = header_size >> hweight32(HDR_LEN_FMASK);
+ val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
return val;
}
@@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
u32 offset)
{
+ u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
u32 val;
- val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(offset == off); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- offset >>= hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
+ off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
+ val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
return val;
}
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 32e2d3e052d5..baaab3dd0e63 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -239,6 +239,11 @@ static void ipa_table_validate_build(void)
#endif /* !IPA_VALIDATE */
+bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
+
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
@@ -412,8 +417,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
struct gsi_trans *trans;
u32 val;
- /* IPA version 4.2 does not support hashed tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return 0;
trans = ipa_cmd_trans_alloc(ipa, 1);
@@ -531,8 +535,7 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
u32 ep_mask = ipa->filter_map;
- /* IPA version 4.2 has no hashed route tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return;
while (ep_mask) {
@@ -582,8 +585,7 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
{
u32 route_id;
- /* IPA version 4.2 has no hashed route tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return;
for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 78038d14fcea..1a68d20f19d6 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -52,6 +52,12 @@ static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
#endif /* !IPA_VALIDATE */
/**
+ * ipa_table_hash_support() - Return true if hashed tables are supported
+ * @ipa: IPA pointer
+ */
+bool ipa_table_hash_support(struct ipa *ipa);
+
+/**
* ipa_table_reset() - Reset filter and route tables entries to "none"
* @ipa: IPA pointer
* @modem: Whether to reset modem or AP entries
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8801d093135c..6cd50106e611 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -651,8 +651,7 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Should not reach here */
- WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
- port->mode);
+ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
out:
kfree_skb(skb);
return NET_XMIT_DROP;
@@ -749,8 +748,7 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
}
/* Should not reach here */
- WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
- port->mode);
+ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fb51329f8964..9a9a5cf36a4b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1385,7 +1385,7 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return ret;
}
- if (!data || !data[IFLA_MACVLAN_MACADDR_DATA])
+ if (!data[IFLA_MACVLAN_MACADDR_DATA])
return 0;
head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..d3915f831854 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
diff --git a/drivers/net/mdio/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c
index b72c6d185175..f0cff584e176 100644
--- a/drivers/net/mdio/mdio-moxart.c
+++ b/drivers/net/mdio/mdio-moxart.c
@@ -125,7 +125,7 @@ static int moxart_mdio_probe(struct platform_device *pdev)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id);
bus->parent = &pdev->dev;
- /* Setting PHY_IGNORE_INTERRUPT here even if it has no effect,
+ /* Setting PHY_MAC_INTERRUPT here even if it has no effect,
* of_mdiobus_register() sets these PHY_POLL.
* Ideally, the interrupt from MAC controller could be used to
* detect link state changes, not polling, i.e. if there was
@@ -133,7 +133,7 @@ static int moxart_mdio_probe(struct platform_device *pdev)
* interrupt handled in ethernet drivercode.
*/
for (i = 0; i < PHY_MAX_ADDR; i++)
- bus->irq[i] = PHY_IGNORE_INTERRUPT;
+ bus->irq[i] = PHY_MAC_INTERRUPT;
data = bus->priv;
data->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 4daf94bb56a5..ea9d5855fb52 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -462,36 +462,6 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
}
EXPORT_SYMBOL(of_phy_get_and_connect);
-/**
- * of_phy_attach - Attach to a PHY without starting the state machine
- * @dev: pointer to net_device claiming the phy
- * @phy_np: Node pointer for the PHY
- * @flags: flags to pass to the PHY
- * @iface: PHY data interface type
- *
- * If successful, returns a pointer to the phy_device with the embedded
- * struct device refcount incremented by one, or NULL on failure. The
- * refcount must be dropped by calling phy_disconnect() or phy_detach().
- */
-struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np, u32 flags,
- phy_interface_t iface)
-{
- struct phy_device *phy = of_phy_find_device(phy_np);
- int ret;
-
- if (!phy)
- return NULL;
-
- ret = phy_attach_direct(dev, phy, flags, iface);
-
- /* refcount is held by phy_attach_direct() on success */
- put_device(&phy->mdio.dev);
-
- return ret ? NULL : phy;
-}
-EXPORT_SYMBOL(of_phy_attach);
-
/*
* of_phy_is_fixed_link() and of_phy_register_fixed_link() must
* support two DT bindings:
diff --git a/drivers/net/mhi/Makefile b/drivers/net/mhi/Makefile
new file mode 100644
index 000000000000..f71b9f8f3c4f
--- /dev/null
+++ b/drivers/net/mhi/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MHI_NET) += mhi_net.o
+
+mhi_net-y := net.o proto_mbim.o
diff --git a/drivers/net/mhi/mhi.h b/drivers/net/mhi/mhi.h
new file mode 100644
index 000000000000..12e7407d712a
--- /dev/null
+++ b/drivers/net/mhi/mhi.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* MHI Network driver - Network over MHI bus
+ *
+ * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t rx_length_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ const struct mhi_net_proto *proto;
+ void *proto_data;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+ int msg_enable;
+};
+
+struct mhi_net_proto {
+ int (*init)(struct mhi_net_dev *mhi_netdev);
+ struct sk_buff * (*tx_fixup)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb);
+ void (*rx)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb);
+};
+
+extern const struct mhi_net_proto proto_mbim;
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi/net.c
index fa41d8c42f05..f59960876083 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi/net.c
@@ -12,30 +12,15 @@
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
+#include "mhi.h"
+
#define MHI_NET_MIN_MTU ETH_MIN_MTU
#define MHI_NET_MAX_MTU 0xffff
#define MHI_NET_DEFAULT_MTU 0x4000
-struct mhi_net_stats {
- u64_stats_t rx_packets;
- u64_stats_t rx_bytes;
- u64_stats_t rx_errors;
- u64_stats_t rx_dropped;
- u64_stats_t tx_packets;
- u64_stats_t tx_bytes;
- u64_stats_t tx_errors;
- u64_stats_t tx_dropped;
- atomic_t rx_queued;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync rx_syncp;
-};
-
-struct mhi_net_dev {
- struct mhi_device *mdev;
- struct net_device *ndev;
- struct delayed_work rx_refill;
- struct mhi_net_stats stats;
- u32 rx_queue_sz;
+struct mhi_device_info {
+ const char *netname;
+ const struct mhi_net_proto *proto;
};
static int mhi_ndo_open(struct net_device *ndev)
@@ -67,26 +52,35 @@ static int mhi_ndo_stop(struct net_device *ndev)
static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ const struct mhi_net_proto *proto = mhi_netdev->proto;
struct mhi_device *mdev = mhi_netdev->mdev;
int err;
+ if (proto && proto->tx_fixup) {
+ skb = proto->tx_fixup(mhi_netdev, skb);
+ if (unlikely(!skb))
+ goto exit_drop;
+ }
+
err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
if (unlikely(err)) {
net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
ndev->name, err);
-
- u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
- u64_stats_inc(&mhi_netdev->stats.tx_dropped);
- u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
-
- /* drop the packet */
dev_kfree_skb_any(skb);
+ goto exit_drop;
}
if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
+
+exit_drop:
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ return NETDEV_TX_OK;
}
static void mhi_ndo_get_stats64(struct net_device *ndev,
@@ -101,6 +95,7 @@ static void mhi_ndo_get_stats64(struct net_device *ndev,
stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
+ stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
} while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
do {
@@ -122,7 +117,7 @@ static const struct net_device_ops mhi_netdev_ops = {
static void mhi_net_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
- ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->type = ARPHRD_RAWIP;
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -133,38 +128,101 @@ static void mhi_net_setup(struct net_device *ndev)
ndev->tx_queue_len = 1000;
}
+static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *head = mhi_netdev->skbagg_head;
+ struct sk_buff *tail = mhi_netdev->skbagg_tail;
+
+ /* This is non-paged skb chaining using frag_list */
+ if (!head) {
+ mhi_netdev->skbagg_head = skb;
+ return skb;
+ }
+
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ tail->next = skb;
+
+ head->len += skb->len;
+ head->data_len += skb->len;
+ head->truesize += skb->truesize;
+
+ mhi_netdev->skbagg_tail = skb;
+
+ return mhi_netdev->skbagg_head;
+}
+
static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ const struct mhi_net_proto *proto = mhi_netdev->proto;
struct sk_buff *skb = mhi_res->buf_addr;
- int remaining;
+ int free_desc_count;
- remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+ free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (unlikely(mhi_res->transaction_status)) {
- dev_kfree_skb_any(skb);
-
- /* MHI layer stopping/resetting the DL channel */
- if (mhi_res->transaction_status == -ENOTCONN)
+ switch (mhi_res->transaction_status) {
+ case -EOVERFLOW:
+ /* Packet can not fit in one MHI buffer and has been
+ * split over multiple MHI transfers, do re-aggregation.
+ * That usually means the device side MTU is larger than
+ * the host side MTU/MRU. Since this is not optimal,
+ * print a warning (once).
+ */
+ netdev_warn_once(mhi_netdev->ndev,
+ "Fragmented packets received, fix MTU?\n");
+ skb_put(skb, mhi_res->bytes_xferd);
+ mhi_net_skb_agg(mhi_netdev, skb);
+ break;
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the DL channel */
+ dev_kfree_skb_any(skb);
return;
-
- u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
- u64_stats_inc(&mhi_netdev->stats.rx_errors);
- u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ }
} else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ if (mhi_netdev->skbagg_head) {
+ /* Aggregate the final fragment */
+ skb = mhi_net_skb_agg(mhi_netdev, skb);
+ mhi_netdev->skbagg_head = NULL;
+ }
+
u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
u64_stats_inc(&mhi_netdev->stats.rx_packets);
- u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- skb->protocol = htons(ETH_P_MAP);
- skb_put(skb, mhi_res->bytes_xferd);
- netif_rx(skb);
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ if (proto && proto->rx)
+ proto->rx(mhi_netdev, skb);
+ else
+ netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
- if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
}
@@ -211,7 +269,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
struct sk_buff *skb;
int err;
- while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+ while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
skb = netdev_alloc_skb(ndev, size);
if (unlikely(!skb))
break;
@@ -224,8 +282,6 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
break;
}
- atomic_inc(&mhi_netdev->stats.rx_queued);
-
/* Do not hog the CPU if rx buffers are consumed faster than
* queued (unlikely).
*/
@@ -233,21 +289,25 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
}
/* If we're still starved of rx buffers, reschedule later */
- if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}
+static struct device_type wwan_type = {
+ .name = "wwan",
+};
+
static int mhi_net_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
- const char *netname = (char *)id->driver_data;
+ const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
struct device *dev = &mhi_dev->dev;
struct mhi_net_dev *mhi_netdev;
struct net_device *ndev;
int err;
- ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
- mhi_net_setup);
+ ndev = alloc_netdev(sizeof(*mhi_netdev), info->netname,
+ NET_NAME_PREDICTABLE, mhi_net_setup);
if (!ndev)
return -ENOMEM;
@@ -255,10 +315,10 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
dev_set_drvdata(dev, mhi_netdev);
mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev;
+ mhi_netdev->skbagg_head = NULL;
+ mhi_netdev->proto = info->proto;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
-
- /* All MHI net channels have 128 ring elements (at least for now) */
- mhi_netdev->rx_queue_sz = 128;
+ SET_NETDEV_DEVTYPE(ndev, &wwan_type);
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
@@ -269,12 +329,23 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
if (err)
goto out_err;
+ /* Number of transfer descriptors determines size of the queue */
+ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
err = register_netdev(ndev);
if (err)
goto out_err;
+ if (mhi_netdev->proto) {
+ err = mhi_netdev->proto->init(mhi_netdev);
+ if (err)
+ goto out_err_proto;
+ }
+
return 0;
+out_err_proto:
+ unregister_netdev(ndev);
out_err:
free_netdev(ndev);
return err;
@@ -288,12 +359,32 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
mhi_unprepare_from_transfer(mhi_netdev->mdev);
+ if (mhi_netdev->skbagg_head)
+ kfree_skb(mhi_netdev->skbagg_head);
+
free_netdev(mhi_netdev->ndev);
}
+static const struct mhi_device_info mhi_hwip0 = {
+ .netname = "mhi_hwip%d",
+};
+
+static const struct mhi_device_info mhi_swip0 = {
+ .netname = "mhi_swip%d",
+};
+
+static const struct mhi_device_info mhi_hwip0_mbim = {
+ .netname = "mhi_mbim%d",
+ .proto = &proto_mbim,
+};
+
static const struct mhi_device_id mhi_net_id_table[] = {
- { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
- { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
+ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
+ /* Software data PATH (to modem CPU) */
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
+ /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
+ { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
{}
};
MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c
new file mode 100644
index 000000000000..75b5484c40d5
--- /dev/null
+++ b/drivers/net/mhi/proto_mbim.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI Network driver - Network over MHI bus
+ *
+ * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
+ *
+ * This driver copy some code from cdc_ncm, which is:
+ * Copyright (C) ST-Ericsson 2010-2012
+ * and cdc_mbim, which is:
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc_ncm.h>
+
+#include "mhi.h"
+
+#define MBIM_NDP16_SIGN_MASK 0x00ffffff
+
+struct mbim_context {
+ u16 rx_seq;
+ u16 tx_seq;
+};
+
+static void __mbim_length_errors_inc(struct mhi_net_dev *dev)
+{
+ u64_stats_update_begin(&dev->stats.rx_syncp);
+ u64_stats_inc(&dev->stats.rx_length_errors);
+ u64_stats_update_end(&dev->stats.rx_syncp);
+}
+
+static void __mbim_errors_inc(struct mhi_net_dev *dev)
+{
+ u64_stats_update_begin(&dev->stats.rx_syncp);
+ u64_stats_inc(&dev->stats.rx_errors);
+ u64_stats_update_end(&dev->stats.rx_syncp);
+}
+
+static int mbim_rx_verify_nth16(struct sk_buff *skb)
+{
+ struct mhi_net_dev *dev = netdev_priv(skb->dev);
+ struct mbim_context *ctx = dev->proto_data;
+ struct usb_cdc_ncm_nth16 *nth16;
+ int len;
+
+ if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
+ sizeof(struct usb_cdc_ncm_ndp16)) {
+ netif_dbg(dev, rx_err, dev->ndev, "frame too short\n");
+ __mbim_length_errors_inc(dev);
+ return -EINVAL;
+ }
+
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
+
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ netif_dbg(dev, rx_err, dev->ndev,
+ "invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
+ __mbim_errors_inc(dev);
+ return -EINVAL;
+ }
+
+ /* No limit on the block length, except the size of the data pkt */
+ len = le16_to_cpu(nth16->wBlockLength);
+ if (len > skb->len) {
+ netif_dbg(dev, rx_err, dev->ndev,
+ "NTB does not fit into the skb %u/%u\n", len,
+ skb->len);
+ __mbim_length_errors_inc(dev);
+ return -EINVAL;
+ }
+
+ if (ctx->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !(ctx->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
+ netif_dbg(dev, rx_err, dev->ndev,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth16->wSequence);
+
+ return le16_to_cpu(nth16->wNdpIndex);
+}
+
+static int mbim_rx_verify_ndp16(struct sk_buff *skb, int ndpoffset)
+{
+ struct mhi_net_dev *dev = netdev_priv(skb->dev);
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ int ret;
+
+ if (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16) > skb->len) {
+ netif_dbg(dev, rx_err, dev->ndev, "invalid NDP offset <%u>\n",
+ ndpoffset);
+ return -EINVAL;
+ }
+
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
+
+ if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ netif_dbg(dev, rx_err, dev->ndev, "invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
+ return -EINVAL;
+ }
+
+ ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
+ / sizeof(struct usb_cdc_ncm_dpe16));
+ ret--; /* Last entry is always a NULL terminator */
+
+ if (sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
+ netif_dbg(dev, rx_err, dev->ndev,
+ "Invalid nframes = %d\n", ret);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
+{
+ struct net_device *ndev = mhi_netdev->ndev;
+ int ndpoffset;
+
+ if (skb_linearize(skb))
+ goto error;
+
+ /* Check NTB header and retrieve first NDP offset */
+ ndpoffset = mbim_rx_verify_nth16(skb);
+ if (ndpoffset < 0) {
+ net_err_ratelimited("%s: Incorrect NTB header\n", ndev->name);
+ goto error;
+ }
+
+ /* Process each NDP */
+ while (1) {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ int nframes, n;
+
+ /* Check NDP header and retrieve number of datagrams */
+ nframes = mbim_rx_verify_ndp16(skb, ndpoffset);
+ if (nframes < 0) {
+ net_err_ratelimited("%s: Incorrect NDP16\n", ndev->name);
+ __mbim_length_errors_inc(mhi_netdev);
+ goto error;
+ }
+
+ /* Only IP data type supported, no DSS in MHI context */
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
+ if ((ndp16->dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
+ != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
+ net_err_ratelimited("%s: Unsupported NDP type\n", ndev->name);
+ __mbim_errors_inc(mhi_netdev);
+ goto next_ndp;
+ }
+
+ /* Only primary IP session 0 (0x00) supported for now */
+ if (ndp16->dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) {
+ net_err_ratelimited("%s: bad packet session\n", ndev->name);
+ __mbim_errors_inc(mhi_netdev);
+ goto next_ndp;
+ }
+
+ /* de-aggregate and deliver IP packets */
+ dpe16 = ndp16->dpe16;
+ for (n = 0; n < nframes; n++, dpe16++) {
+ u16 dgram_offset = le16_to_cpu(dpe16->wDatagramIndex);
+ u16 dgram_len = le16_to_cpu(dpe16->wDatagramLength);
+ struct sk_buff *skbn;
+
+ if (!dgram_offset || !dgram_len)
+ break; /* null terminator */
+
+ skbn = netdev_alloc_skb(ndev, dgram_len);
+ if (!skbn)
+ continue;
+
+ skb_put(skbn, dgram_len);
+ memcpy(skbn->data, skb->data + dgram_offset, dgram_len);
+
+ switch (skbn->data[0] & 0xf0) {
+ case 0x40:
+ skbn->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skbn->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ net_err_ratelimited("%s: unknown protocol\n",
+ ndev->name);
+ __mbim_errors_inc(mhi_netdev);
+ dev_kfree_skb_any(skbn);
+ continue;
+ }
+
+ netif_rx(skbn);
+ }
+next_ndp:
+ /* Other NDP to process? */
+ ndpoffset = (int)le16_to_cpu(ndp16->wNextNdpIndex);
+ if (!ndpoffset)
+ break;
+ }
+
+ /* free skb */
+ dev_consume_skb_any(skb);
+ return;
+error:
+ dev_kfree_skb_any(skb);
+}
+
+struct mbim_tx_hdr {
+ struct usb_cdc_ncm_nth16 nth16;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[2];
+} __packed;
+
+static struct sk_buff *mbim_tx_fixup(struct mhi_net_dev *mhi_netdev,
+ struct sk_buff *skb)
+{
+ struct mbim_context *ctx = mhi_netdev->proto_data;
+ unsigned int dgram_size = skb->len;
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct mbim_tx_hdr *mbim_hdr;
+
+ /* For now, this is a partial implementation of CDC MBIM, only one NDP
+ * is sent, containing the IP packet (no aggregation).
+ */
+
+ /* Ensure we have enough headroom for crafting MBIM header */
+ if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
+
+ /* Fill NTB header */
+ nth16 = &mbim_hdr->nth16;
+ nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ nth16->wBlockLength = cpu_to_le16(skb->len);
+ nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+
+ /* Fill the unique NDP */
+ ndp16 = &mbim_hdr->ndp16;
+ ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ + sizeof(struct usb_cdc_ncm_dpe16) * 2);
+ ndp16->wNextNdpIndex = 0;
+
+ /* Datagram follows the mbim header */
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
+ ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
+
+ /* null termination */
+ ndp16->dpe16[1].wDatagramIndex = 0;
+ ndp16->dpe16[1].wDatagramLength = 0;
+
+ return skb;
+}
+
+static int mbim_init(struct mhi_net_dev *mhi_netdev)
+{
+ struct net_device *ndev = mhi_netdev->ndev;
+
+ mhi_netdev->proto_data = devm_kzalloc(&ndev->dev,
+ sizeof(struct mbim_context),
+ GFP_KERNEL);
+ if (!mhi_netdev->proto_data)
+ return -ENOMEM;
+
+ ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
+
+ return 0;
+}
+
+const struct mhi_net_proto proto_mbim = {
+ .init = mbim_init,
+ .rx = mbim_rx,
+ .tx_fixup = mbim_tx_fixup,
+};
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 816af1f55e2c..dbeb29fa16e8 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1012,23 +1012,25 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
- nsim_dev->fib_data = nsim_fib_create(devlink, extack);
- if (IS_ERR(nsim_dev->fib_data))
- return PTR_ERR(nsim_dev->fib_data);
-
nsim_devlink_param_load_driverinit_values(devlink);
err = nsim_dev_dummy_region_init(nsim_dev, devlink);
if (err)
- goto err_fib_destroy;
+ return err;
err = nsim_dev_traps_init(devlink);
if (err)
goto err_dummy_region_exit;
+ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_traps_exit;
+ }
+
err = nsim_dev_health_init(nsim_dev, devlink);
if (err)
- goto err_traps_exit;
+ goto err_fib_destroy;
err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
if (err)
@@ -1043,12 +1045,12 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
err_health_exit:
nsim_dev_health_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_traps_exit:
nsim_dev_traps_exit(devlink);
err_dummy_region_exit:
nsim_dev_dummy_region_exit(nsim_dev);
-err_fib_destroy:
- nsim_fib_destroy(devlink, nsim_dev->fib_data);
return err;
}
@@ -1080,15 +1082,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_devlink_free;
- nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
- if (IS_ERR(nsim_dev->fib_data)) {
- err = PTR_ERR(nsim_dev->fib_data);
- goto err_resources_unregister;
- }
-
err = devlink_register(devlink, &nsim_bus_dev->dev);
if (err)
- goto err_fib_destroy;
+ goto err_resources_unregister;
err = devlink_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
@@ -1108,9 +1104,15 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_traps_exit;
+ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_debugfs_exit;
+ }
+
err = nsim_dev_health_init(nsim_dev, devlink);
if (err)
- goto err_debugfs_exit;
+ goto err_fib_destroy;
err = nsim_bpf_dev_init(nsim_dev);
if (err)
@@ -1128,6 +1130,8 @@ err_bpf_dev_exit:
nsim_bpf_dev_exit(nsim_dev);
err_health_exit:
nsim_dev_health_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_debugfs_exit:
nsim_dev_debugfs_exit(nsim_dev);
err_traps_exit:
@@ -1139,8 +1143,6 @@ err_params_unregister:
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
devlink_unregister(devlink);
-err_fib_destroy:
- nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
err_devlink_free:
@@ -1157,10 +1159,10 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
debugfs_remove(nsim_dev->take_snapshot);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_health_exit(nsim_dev);
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
mutex_destroy(&nsim_dev->port_list_lock);
- nsim_fib_destroy(devlink, nsim_dev->fib_data);
}
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 45d8a7790bd5..46fb414f7ca6 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -26,12 +26,13 @@
#include <net/fib_rules.h>
#include <net/net_namespace.h>
#include <net/nexthop.h>
+#include <linux/debugfs.h>
#include "netdevsim.h"
struct nsim_fib_entry {
u64 max;
- u64 num;
+ atomic64_t num;
};
struct nsim_per_fib_data {
@@ -46,10 +47,15 @@ struct nsim_fib_data {
struct nsim_fib_entry nexthops;
struct rhashtable fib_rt_ht;
struct list_head fib_rt_list;
- spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct mutex fib_lock; /* Protects hashtable and list */
struct notifier_block nexthop_nb;
struct rhashtable nexthop_ht;
struct devlink *devlink;
+ struct work_struct fib_event_work;
+ struct list_head fib_event_queue;
+ spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
+ struct dentry *ddir;
+ bool fail_route_offload;
};
struct nsim_fib_rt_key {
@@ -83,6 +89,22 @@ struct nsim_fib6_rt_nh {
struct fib6_info *rt;
};
+struct nsim_fib6_event {
+ struct fib6_info **rt_arr;
+ unsigned int nrt6;
+};
+
+struct nsim_fib_event {
+ struct list_head list; /* node in fib queue */
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct nsim_fib6_event fib6_event;
+ };
+ struct nsim_fib_data *data;
+ unsigned long event;
+ int family;
+};
+
static const struct rhashtable_params nsim_fib_rt_ht_params = {
.key_offset = offsetof(struct nsim_fib_rt, key),
.head_offset = offsetof(struct nsim_fib_rt, ht_node),
@@ -128,7 +150,7 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
return 0;
}
- return max ? entry->max : entry->num;
+ return max ? entry->max : atomic64_read(&entry->num);
}
static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
@@ -165,14 +187,12 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
int err = 0;
if (add) {
- if (entry->num < entry->max) {
- entry->num++;
- } else {
+ if (!atomic64_add_unless(&entry->num, 1, entry->max)) {
err = -ENOSPC;
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries");
}
} else {
- entry->num--;
+ atomic64_dec_if_positive(&entry->num);
}
return err;
@@ -196,20 +216,15 @@ static int nsim_fib_rule_event(struct nsim_fib_data *data,
return err;
}
-static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
- struct netlink_ext_ack *extack)
+static int nsim_fib_account(struct nsim_fib_entry *entry, bool add)
{
int err = 0;
if (add) {
- if (entry->num < entry->max) {
- entry->num++;
- } else {
+ if (!atomic64_add_unless(&entry->num, 1, entry->max))
err = -ENOSPC;
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
- }
} else {
- entry->num--;
+ atomic64_dec_if_positive(&entry->num);
}
return err;
@@ -254,7 +269,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data,
{
struct nsim_fib4_rt *fib4_rt;
- fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_ATOMIC);
+ fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_KERNEL);
if (!fib4_rt)
return NULL;
@@ -291,6 +306,25 @@ nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
return container_of(fib_rt, struct nsim_fib4_rt, common);
}
+static void
+nsim_fib4_rt_offload_failed_flag_set(struct net *net,
+ struct fib_entry_notifier_info *fen_info)
+{
+ u32 *p_dst = (u32 *)&fen_info->dst;
+ struct fib_rt_info fri;
+
+ fri.fi = fen_info->fi;
+ fri.tb_id = fen_info->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = fen_info->dst_len;
+ fri.tos = fen_info->tos;
+ fri.type = fen_info->type;
+ fri.offload = false;
+ fri.trap = false;
+ fri.offload_failed = true;
+ fib_alias_hw_flags_set(net, &fri);
+}
+
static void nsim_fib4_rt_hw_flags_set(struct net *net,
const struct nsim_fib4_rt *fib4_rt,
bool trap)
@@ -307,55 +341,57 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net,
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
+ fri.offload_failed = false;
fib_alias_hw_flags_set(net, &fri);
}
static int nsim_fib4_rt_add(struct nsim_fib_data *data,
- struct nsim_fib4_rt *fib4_rt,
- struct netlink_ext_ack *extack)
+ struct nsim_fib4_rt *fib4_rt)
{
struct net *net = devlink_net(data->devlink);
int err;
- err = nsim_fib_account(&data->ipv4.fib, true, extack);
- if (err)
- return err;
-
err = rhashtable_insert_fast(&data->fib_rt_ht,
&fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv4 route");
+ if (err)
goto err_fib_dismiss;
- }
+ /* Simulate hardware programming latency. */
+ msleep(1);
nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
return 0;
err_fib_dismiss:
- nsim_fib_account(&data->ipv4.fib, false, extack);
+ /* Drop the accounting that was increased from the notification
+ * context when FIB_EVENT_ENTRY_REPLACE was triggered.
+ */
+ nsim_fib_account(&data->ipv4.fib, false);
return err;
}
static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
struct nsim_fib4_rt *fib4_rt,
- struct nsim_fib4_rt *fib4_rt_old,
- struct netlink_ext_ack *extack)
+ struct nsim_fib4_rt *fib4_rt_old)
{
struct net *net = devlink_net(data->devlink);
int err;
- /* We are replacing a route, so no need to change the accounting. */
+ /* We are replacing a route, so need to remove the accounting which
+ * was increased when FIB_EVENT_ENTRY_REPLACE was triggered.
+ */
+ err = nsim_fib_account(&data->ipv4.fib, false);
+ if (err)
+ return err;
err = rhashtable_replace_fast(&data->fib_rt_ht,
&fib4_rt_old->common.ht_node,
&fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv4 route");
+ if (err)
return err;
- }
+ msleep(1);
nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
@@ -367,19 +403,27 @@ static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
struct fib_entry_notifier_info *fen_info)
{
- struct netlink_ext_ack *extack = fen_info->info.extack;
struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
int err;
+ if (data->fail_route_offload) {
+ /* For testing purposes, user set debugfs fail_route_offload
+ * value to true. Simulate hardware programming latency and then
+ * fail.
+ */
+ msleep(1);
+ return -EINVAL;
+ }
+
fib4_rt = nsim_fib4_rt_create(data, fen_info);
if (!fib4_rt)
return -ENOMEM;
fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
if (!fib4_rt_old)
- err = nsim_fib4_rt_add(data, fib4_rt, extack);
+ err = nsim_fib4_rt_add(data, fib4_rt);
else
- err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old, extack);
+ err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old);
if (err)
nsim_fib4_rt_destroy(fib4_rt);
@@ -390,31 +434,31 @@ static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
const struct fib_entry_notifier_info *fen_info)
{
- struct netlink_ext_ack *extack = fen_info->info.extack;
struct nsim_fib4_rt *fib4_rt;
fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
- if (WARN_ON_ONCE(!fib4_rt))
+ if (!fib4_rt)
return;
rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
- nsim_fib_account(&data->ipv4.fib, false, extack);
nsim_fib4_rt_destroy(fib4_rt);
}
static int nsim_fib4_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info,
+ struct fib_entry_notifier_info *fen_info,
unsigned long event)
{
- struct fib_entry_notifier_info *fen_info;
int err = 0;
- fen_info = container_of(info, struct fib_entry_notifier_info, info);
-
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib4_rt_insert(data, fen_info);
+ if (err) {
+ struct net *net = devlink_net(data->devlink);
+
+ nsim_fib4_rt_offload_failed_flag_set(net, fen_info);
+ }
break;
case FIB_EVENT_ENTRY_DEL:
nsim_fib4_rt_remove(data, fen_info);
@@ -445,7 +489,7 @@ static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
{
struct nsim_fib6_rt_nh *fib6_rt_nh;
- fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_ATOMIC);
+ fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_KERNEL);
if (!fib6_rt_nh)
return -ENOMEM;
@@ -457,33 +501,42 @@ static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
return 0;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void nsim_rt6_release(struct fib6_info *rt)
+{
+ fib6_info_release(rt);
+}
+#else
+static void nsim_rt6_release(struct fib6_info *rt)
+{
+}
+#endif
+
static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
const struct fib6_info *rt)
{
struct nsim_fib6_rt_nh *fib6_rt_nh;
fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
- if (WARN_ON_ONCE(!fib6_rt_nh))
+ if (!fib6_rt_nh)
return;
fib6_rt->nhs--;
list_del(&fib6_rt_nh->list);
-#if IS_ENABLED(CONFIG_IPV6)
- fib6_info_release(fib6_rt_nh->rt);
-#endif
+ nsim_rt6_release(fib6_rt_nh->rt);
kfree(fib6_rt_nh);
}
static struct nsim_fib6_rt *
nsim_fib6_rt_create(struct nsim_fib_data *data,
- struct fib6_entry_notifier_info *fen6_info)
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
- struct fib6_info *iter, *rt = fen6_info->rt;
+ struct fib6_info *rt = rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
int i = 0;
int err;
- fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC);
+ fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_KERNEL);
if (!fib6_rt)
return ERR_PTR(-ENOMEM);
@@ -497,32 +550,18 @@ nsim_fib6_rt_create(struct nsim_fib_data *data,
*/
INIT_LIST_HEAD(&fib6_rt->nh_list);
- err = nsim_fib6_rt_nh_add(fib6_rt, rt);
- if (err)
- goto err_fib_rt_fini;
-
- if (!fen6_info->nsiblings)
- return fib6_rt;
-
- list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
- if (i == fen6_info->nsiblings)
- break;
-
- err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ for (i = 0; i < nrt6; i++) {
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt_arr[i]);
if (err)
goto err_fib6_rt_nh_del;
- i++;
}
- WARN_ON_ONCE(i != fen6_info->nsiblings);
return fib6_rt;
err_fib6_rt_nh_del:
- list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
- fib6_siblings)
- nsim_fib6_rt_nh_del(fib6_rt, iter);
- nsim_fib6_rt_nh_del(fib6_rt, rt);
-err_fib_rt_fini:
+ for (i--; i >= 0; i--) {
+ nsim_fib6_rt_nh_del(fib6_rt, rt_arr[i]);
+ };
nsim_fib_rt_fini(&fib6_rt->common);
kfree(fib6_rt);
return ERR_PTR(err);
@@ -555,127 +594,163 @@ nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
}
static int nsim_fib6_rt_append(struct nsim_fib_data *data,
- struct fib6_entry_notifier_info *fen6_info)
+ struct nsim_fib6_event *fib6_event)
{
- struct fib6_info *iter, *rt = fen6_info->rt;
+ struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
- int i = 0;
- int err;
+ int i, err;
+
+ if (data->fail_route_offload) {
+ /* For testing purposes, user set debugfs fail_route_offload
+ * value to true. Simulate hardware programming latency and then
+ * fail.
+ */
+ msleep(1);
+ return -EINVAL;
+ }
fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
- if (WARN_ON_ONCE(!fib6_rt))
+ if (!fib6_rt)
return -EINVAL;
- err = nsim_fib6_rt_nh_add(fib6_rt, rt);
- if (err)
- return err;
- rt->trap = true;
-
- if (!fen6_info->nsiblings)
- return 0;
-
- list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
- if (i == fen6_info->nsiblings)
- break;
-
- err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ for (i = 0; i < fib6_event->nrt6; i++) {
+ err = nsim_fib6_rt_nh_add(fib6_rt, fib6_event->rt_arr[i]);
if (err)
goto err_fib6_rt_nh_del;
- iter->trap = true;
- i++;
+
+ fib6_event->rt_arr[i]->trap = true;
}
- WARN_ON_ONCE(i != fen6_info->nsiblings);
return 0;
err_fib6_rt_nh_del:
- list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
- fib6_siblings) {
- iter->trap = false;
- nsim_fib6_rt_nh_del(fib6_rt, iter);
+ for (i--; i >= 0; i--) {
+ fib6_event->rt_arr[i]->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
- rt->trap = false;
- nsim_fib6_rt_nh_del(fib6_rt, rt);
return err;
}
-static void nsim_fib6_rt_hw_flags_set(const struct nsim_fib6_rt *fib6_rt,
+#if IS_ENABLED(CONFIG_IPV6)
+static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+
+{
+ struct net *net = devlink_net(data->devlink);
+ int i;
+
+ for (i = 0; i < nrt6; i++)
+ fib6_info_hw_flags_set(net, rt_arr[i], false, false, true);
+}
+#else
+static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data,
+ const struct nsim_fib6_rt *fib6_rt,
bool trap)
{
+ struct net *net = devlink_net(data->devlink);
struct nsim_fib6_rt_nh *fib6_rt_nh;
list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
- fib6_info_hw_flags_set(fib6_rt_nh->rt, false, trap);
+ fib6_info_hw_flags_set(net, fib6_rt_nh->rt, false, trap, false);
}
+#else
+static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data,
+ const struct nsim_fib6_rt *fib6_rt,
+ bool trap)
+{
+}
+#endif
static int nsim_fib6_rt_add(struct nsim_fib_data *data,
- struct nsim_fib6_rt *fib6_rt,
- struct netlink_ext_ack *extack)
+ struct nsim_fib6_rt *fib6_rt)
{
int err;
- err = nsim_fib_account(&data->ipv6.fib, true, extack);
- if (err)
- return err;
-
err = rhashtable_insert_fast(&data->fib_rt_ht,
&fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv6 route");
+
+ if (err)
goto err_fib_dismiss;
- }
- nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+ msleep(1);
+ nsim_fib6_rt_hw_flags_set(data, fib6_rt, true);
return 0;
err_fib_dismiss:
- nsim_fib_account(&data->ipv6.fib, false, extack);
+ /* Drop the accounting that was increased from the notification
+ * context when FIB_EVENT_ENTRY_REPLACE was triggered.
+ */
+ nsim_fib_account(&data->ipv6.fib, false);
return err;
}
static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
struct nsim_fib6_rt *fib6_rt,
- struct nsim_fib6_rt *fib6_rt_old,
- struct netlink_ext_ack *extack)
+ struct nsim_fib6_rt *fib6_rt_old)
{
int err;
- /* We are replacing a route, so no need to change the accounting. */
+ /* We are replacing a route, so need to remove the accounting which
+ * was increased when FIB_EVENT_ENTRY_REPLACE was triggered.
+ */
+ err = nsim_fib_account(&data->ipv6.fib, false);
+ if (err)
+ return err;
+
err = rhashtable_replace_fast(&data->fib_rt_ht,
&fib6_rt_old->common.ht_node,
&fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv6 route");
+
+ if (err)
return err;
- }
- nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+ msleep(1);
+ nsim_fib6_rt_hw_flags_set(data, fib6_rt, true);
- nsim_fib6_rt_hw_flags_set(fib6_rt_old, false);
+ nsim_fib6_rt_hw_flags_set(data, fib6_rt_old, false);
nsim_fib6_rt_destroy(fib6_rt_old);
return 0;
}
static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
- struct fib6_entry_notifier_info *fen6_info)
+ struct nsim_fib6_event *fib6_event)
{
- struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
int err;
- fib6_rt = nsim_fib6_rt_create(data, fen6_info);
+ if (data->fail_route_offload) {
+ /* For testing purposes, user set debugfs fail_route_offload
+ * value to true. Simulate hardware programming latency and then
+ * fail.
+ */
+ msleep(1);
+ return -EINVAL;
+ }
+
+ fib6_rt = nsim_fib6_rt_create(data, fib6_event->rt_arr,
+ fib6_event->nrt6);
if (IS_ERR(fib6_rt))
return PTR_ERR(fib6_rt);
- fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
if (!fib6_rt_old)
- err = nsim_fib6_rt_add(data, fib6_rt, extack);
+ err = nsim_fib6_rt_add(data, fib6_rt);
else
- err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old, extack);
+ err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old);
if (err)
nsim_fib6_rt_destroy(fib6_rt);
@@ -683,112 +758,276 @@ static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
return err;
}
-static void
-nsim_fib6_rt_remove(struct nsim_fib_data *data,
- const struct fib6_entry_notifier_info *fen6_info)
+static void nsim_fib6_rt_remove(struct nsim_fib_data *data,
+ struct nsim_fib6_event *fib6_event)
{
- struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
+ int i;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
* notification for a route we do not have. Therefore, do not warn if
* route was not found.
*/
- fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
if (!fib6_rt)
return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
- if (fen6_info->nsiblings + 1 != fib6_rt->nhs) {
- nsim_fib6_rt_nh_del(fib6_rt, fen6_info->rt);
+ if (fib6_event->nrt6 != fib6_rt->nhs) {
+ for (i = 0; i < fib6_event->nrt6; i++)
+ nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
return;
}
rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
- nsim_fib_account(&data->ipv6.fib, false, extack);
nsim_fib6_rt_destroy(fib6_rt);
}
-static int nsim_fib6_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info,
- unsigned long event)
+static int nsim_fib6_event_init(struct nsim_fib6_event *fib6_event,
+ struct fib6_entry_notifier_info *fen6_info)
{
- struct fib6_entry_notifier_info *fen6_info;
- int err = 0;
+ struct fib6_info *rt = fen6_info->rt;
+ struct fib6_info **rt_arr;
+ struct fib6_info *iter;
+ unsigned int nrt6;
+ int i = 0;
+
+ nrt6 = fen6_info->nsiblings + 1;
+
+ rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
+ if (!rt_arr)
+ return -ENOMEM;
- fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+ fib6_event->rt_arr = rt_arr;
+ fib6_event->nrt6 = nrt6;
- if (fen6_info->rt->fib6_src.plen) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
+ rt_arr[0] = rt;
+ fib6_info_hold(rt);
+
+ if (!fen6_info->nsiblings)
return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ rt_arr[i + 1] = iter;
+ fib6_info_hold(iter);
+ i++;
}
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+}
+
+static void nsim_fib6_event_fini(struct nsim_fib6_event *fib6_event)
+{
+ int i;
+
+ for (i = 0; i < fib6_event->nrt6; i++)
+ nsim_rt6_release(fib6_event->rt_arr[i]);
+ kfree(fib6_event->rt_arr);
+}
+
+static int nsim_fib6_event(struct nsim_fib_data *data,
+ struct nsim_fib6_event *fib6_event,
+ unsigned long event)
+{
+ int err;
+
+ if (fib6_event->rt_arr[0]->fib6_src.plen)
+ return 0;
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = nsim_fib6_rt_insert(data, fen6_info);
+ err = nsim_fib6_rt_insert(data, fib6_event);
+ if (err)
+ goto err_rt_offload_failed_flag_set;
break;
case FIB_EVENT_ENTRY_APPEND:
- err = nsim_fib6_rt_append(data, fen6_info);
+ err = nsim_fib6_rt_append(data, fib6_event);
+ if (err)
+ goto err_rt_offload_failed_flag_set;
break;
case FIB_EVENT_ENTRY_DEL:
- nsim_fib6_rt_remove(data, fen6_info);
+ nsim_fib6_rt_remove(data, fib6_event);
break;
default:
break;
}
+ return 0;
+
+err_rt_offload_failed_flag_set:
+ nsim_fib6_rt_offload_failed_flag_set(data, fib6_event->rt_arr,
+ fib6_event->nrt6);
return err;
}
-static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, unsigned long event)
+static int nsim_fib_event(struct nsim_fib_event *fib_event)
{
int err = 0;
- switch (info->family) {
+ switch (fib_event->family) {
case AF_INET:
- err = nsim_fib4_event(data, info, event);
+ nsim_fib4_event(fib_event->data, &fib_event->fen_info,
+ fib_event->event);
+ fib_info_put(fib_event->fen_info.fi);
break;
case AF_INET6:
- err = nsim_fib6_event(data, info, event);
+ nsim_fib6_event(fib_event->data, &fib_event->fib6_event,
+ fib_event->event);
+ nsim_fib6_event_fini(&fib_event->fib6_event);
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
+ struct nsim_fib_event *fib_event,
+ unsigned long event)
+{
+ struct nsim_fib_data *data = fib_event->data;
+ struct fib_entry_notifier_info *fen_info;
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ fib_event->fen_info = *fen_info;
+ extack = info->extack;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib_account(&data->ipv4.fib, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
+ return err;
+ }
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib_account(&data->ipv4.fib, false);
+ break;
+ }
+
+ /* Take reference on fib_info to prevent it from being
+ * freed while event is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_event->fen_info.fi);
+
+ return 0;
+}
+
+static int nsim_fib6_prepare_event(struct fib_notifier_info *info,
+ struct nsim_fib_event *fib_event,
+ unsigned long event)
+{
+ struct nsim_fib_data *data = fib_event->data;
+ struct fib6_entry_notifier_info *fen6_info;
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+
+ err = nsim_fib6_event_init(&fib_event->fib6_event, fen6_info);
+ if (err)
+ return err;
+
+ extack = info->extack;
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib_account(&data->ipv6.fib, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
+ goto err_fib6_event_fini;
+ }
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib_account(&data->ipv6.fib, false);
break;
}
+ return 0;
+
+err_fib6_event_fini:
+ nsim_fib6_event_fini(&fib_event->fib6_event);
return err;
}
+static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct nsim_fib_event *fib_event;
+ int err;
+
+ if (info->family != AF_INET && info->family != AF_INET6)
+ /* netdevsim does not support 'RTNL_FAMILY_IP6MR' and
+ * 'RTNL_FAMILY_IPMR' and should ignore them.
+ */
+ return NOTIFY_DONE;
+
+ fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
+ if (!fib_event)
+ return NOTIFY_BAD;
+
+ fib_event->data = data;
+ fib_event->event = event;
+ fib_event->family = info->family;
+
+ switch (info->family) {
+ case AF_INET:
+ err = nsim_fib4_prepare_event(info, fib_event, event);
+ break;
+ case AF_INET6:
+ err = nsim_fib6_prepare_event(info, fib_event, event);
+ break;
+ }
+
+ if (err)
+ goto err_fib_prepare_event;
+
+ /* Enqueue the event and trigger the work */
+ spin_lock_bh(&data->fib_event_queue_lock);
+ list_add_tail(&fib_event->list, &data->fib_event_queue);
+ spin_unlock_bh(&data->fib_event_queue_lock);
+ schedule_work(&data->fib_event_work);
+
+ return NOTIFY_DONE;
+
+err_fib_prepare_event:
+ kfree(fib_event);
+ return NOTIFY_BAD;
+}
+
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
struct fib_notifier_info *info = ptr;
- int err = 0;
-
- /* IPv6 routes can be added via RAs from softIRQ. */
- spin_lock_bh(&data->fib_lock);
+ int err;
switch (event) {
case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = nsim_fib_rule_event(data, info,
event == FIB_EVENT_RULE_ADD);
- break;
-
+ return notifier_from_errno(err);
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info, event);
- break;
+ return nsim_fib_event_schedule_work(data, info, event);
}
- spin_unlock_bh(&data->fib_lock);
-
- return notifier_from_errno(err);
+ return NOTIFY_DONE;
}
static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
@@ -799,7 +1038,7 @@ static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
- nsim_fib_account(&data->ipv4.fib, false, NULL);
+ nsim_fib_account(&data->ipv4.fib, false);
nsim_fib4_rt_destroy(fib4_rt);
}
@@ -809,8 +1048,8 @@ static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
struct nsim_fib6_rt *fib6_rt;
fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
- nsim_fib6_rt_hw_flags_set(fib6_rt, false);
- nsim_fib_account(&data->ipv6.fib, false, NULL);
+ nsim_fib6_rt_hw_flags_set(data, fib6_rt, false);
+ nsim_fib_account(&data->ipv6.fib, false);
nsim_fib6_rt_destroy(fib6_rt);
}
@@ -838,6 +1077,9 @@ static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
fib_nb);
struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+ /* Flush the work to make sure there is no race with notifications. */
+ flush_work(&data->fib_event_work);
+
/* The notifier block is still not registered, so we do not need to
* take any locks here.
*/
@@ -847,8 +1089,8 @@ static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
nsim_fib_rt_free(fib_rt, data);
}
- data->ipv4.rules.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
+ atomic64_set(&data->ipv4.rules.num, 0ULL);
+ atomic64_set(&data->ipv6.rules.num, 0ULL);
}
static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
@@ -860,7 +1102,7 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL);
if (!nexthop)
- return NULL;
+ return ERR_PTR(-ENOMEM);
nexthop->id = info->id;
@@ -868,15 +1110,20 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
* occupy.
*/
- if (!info->is_grp) {
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
occ = 1;
- goto out;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ for (i = 0; i < info->nh_grp->num_nh; i++)
+ occ += info->nh_grp->nh_entries[i].weight;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ kfree(nexthop);
+ return ERR_PTR(-EOPNOTSUPP);
}
- for (i = 0; i < info->nh_grp->num_nh; i++)
- occ += info->nh_grp->nh_entries[i].weight;
-
-out:
nexthop->occ = occ;
return nexthop;
}
@@ -889,22 +1136,28 @@ static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop)
static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ,
bool add, struct netlink_ext_ack *extack)
{
- int err = 0;
+ int i, err = 0;
if (add) {
- if (data->nexthops.num + occ <= data->nexthops.max) {
- data->nexthops.num += occ;
- } else {
- err = -ENOSPC;
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops");
- }
+ for (i = 0; i < occ; i++)
+ if (!atomic64_add_unless(&data->nexthops.num, 1,
+ data->nexthops.max)) {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops");
+ goto err_num_decrease;
+ }
} else {
- if (WARN_ON(occ > data->nexthops.num))
+ if (WARN_ON(occ > atomic64_read(&data->nexthops.num)))
return -EINVAL;
- data->nexthops.num -= occ;
+ atomic64_sub(occ, &data->nexthops.num);
}
return err;
+
+err_num_decrease:
+ atomic64_sub(i, &data->nexthops.num);
+ return err;
+
}
static int nsim_nexthop_add(struct nsim_fib_data *data,
@@ -972,8 +1225,8 @@ static int nsim_nexthop_insert(struct nsim_fib_data *data,
int err;
nexthop = nsim_nexthop_create(data, info);
- if (!nexthop)
- return -ENOMEM;
+ if (IS_ERR(nexthop))
+ return PTR_ERR(nexthop);
nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
nsim_nexthop_ht_params);
@@ -1097,10 +1350,52 @@ static void nsim_fib_set_max_all(struct nsim_fib_data *data,
}
}
+static void nsim_fib_event_work(struct work_struct *work)
+{
+ struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
+ fib_event_work);
+ struct nsim_fib_event *fib_event, *next_fib_event;
+
+ LIST_HEAD(fib_event_queue);
+
+ spin_lock_bh(&data->fib_event_queue_lock);
+ list_splice_init(&data->fib_event_queue, &fib_event_queue);
+ spin_unlock_bh(&data->fib_event_queue_lock);
+
+ mutex_lock(&data->fib_lock);
+ list_for_each_entry_safe(fib_event, next_fib_event, &fib_event_queue,
+ list) {
+ nsim_fib_event(fib_event);
+ list_del(&fib_event->list);
+ kfree(fib_event);
+ cond_resched();
+ }
+ mutex_unlock(&data->fib_lock);
+}
+
+static int
+nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
+{
+ data->ddir = debugfs_create_dir("fib", nsim_dev->ddir);
+ if (IS_ERR(data->ddir))
+ return PTR_ERR(data->ddir);
+
+ data->fail_route_offload = false;
+ debugfs_create_bool("fail_route_offload", 0600, data->ddir,
+ &data->fail_route_offload);
+ return 0;
+}
+
+static void nsim_fib_debugfs_exit(struct nsim_fib_data *data)
+{
+ debugfs_remove_recursive(data->ddir);
+}
+
struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct nsim_fib_data *data;
+ struct nsim_dev *nsim_dev;
int err;
data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -1108,16 +1403,25 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
return ERR_PTR(-ENOMEM);
data->devlink = devlink;
- err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
+ nsim_dev = devlink_priv(devlink);
+ err = nsim_fib_debugfs_init(data, nsim_dev);
if (err)
goto err_data_free;
- spin_lock_init(&data->fib_lock);
+ err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
+ if (err)
+ goto err_debugfs_exit;
+
+ mutex_init(&data->fib_lock);
INIT_LIST_HEAD(&data->fib_rt_list);
err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
if (err)
goto err_rhashtable_nexthop_destroy;
+ INIT_WORK(&data->fib_event_work, nsim_fib_event_work);
+ INIT_LIST_HEAD(&data->fib_event_queue);
+ spin_lock_init(&data->fib_event_queue_lock);
+
nsim_fib_set_max_all(data, devlink);
data->nexthop_nb.notifier_call = nsim_nexthop_event_nb;
@@ -1161,11 +1465,15 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
err_nexthop_nb_unregister:
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
err_rhashtable_fib_destroy:
+ flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
err_rhashtable_nexthop_destroy:
rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
data);
+ mutex_destroy(&data->fib_lock);
+err_debugfs_exit:
+ nsim_fib_debugfs_exit(data);
err_data_free:
kfree(data);
return ERR_PTR(err);
@@ -1185,10 +1493,14 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
+ flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
data);
+ WARN_ON_ONCE(!list_empty(&data->fib_event_queue));
WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
+ mutex_destroy(&data->fib_lock);
+ nsim_fib_debugfs_exit(data);
kfree(data);
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 7178468302c8..aec92440eef1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -258,8 +258,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_devlink_port = nsim_get_devlink_port,
};
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 62bb9272dcb2..af36cd647bf5 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -11,6 +11,7 @@
#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
+#define IEEE8023_LINK_TIMER_NS 10000000
#define LINK_TIMER_LO 0x12
#define LINK_TIMER_HI 0x13
@@ -83,6 +84,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
switch (state->interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
phylink_mii_c22_pcs_get_state(lynx->mdio, state);
@@ -108,6 +110,30 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
state->link, state->an_enabled, state->an_complete);
}
+static int lynx_pcs_config_1000basex(struct mdio_device *pcs,
+ unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u32 link_timer;
+ int err;
+
+ link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+
+ err = mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
+ 0);
+ if (err)
+ return err;
+
+ return phylink_mii_c22_pcs_config(pcs, mode,
+ PHY_INTERFACE_MODE_1000BASEX,
+ advertising);
+}
+
static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
const unsigned long *advertising)
{
@@ -163,6 +189,8 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
switch (ifmode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return lynx_pcs_config_1000basex(lynx->mdio, mode, advertising);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
@@ -185,6 +213,13 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
return 0;
}
+static void lynx_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ phylink_mii_c22_pcs_an_restart(lynx->mdio);
+}
+
static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
int speed, int duplex)
{
@@ -290,6 +325,7 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
.pcs_get_state = lynx_pcs_get_state,
.pcs_config = lynx_pcs_config,
+ .pcs_an_restart = lynx_pcs_an_restart,
.pcs_link_up = lynx_pcs_link_up,
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index d0b36fd6c265..c2aa4c92edde 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -132,6 +132,11 @@
#define AT803X_MIN_DOWNSHIFT 2
#define AT803X_MAX_DOWNSHIFT 9
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
@@ -146,8 +151,11 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
int flags;
#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+#define AT803X_DISABLE_SMARTEEE BIT(1)
u16 clk_25m_reg;
u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -411,13 +419,32 @@ static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
struct at803x_priv *priv = phydev->priv;
- u32 freq, strength;
+ u32 freq, strength, tw;
unsigned int sel;
int ret;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return 0;
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
if (!ret) {
switch (freq) {
@@ -526,22 +553,47 @@ static void at803x_remove(struct phy_device *phydev)
regulator_disable(priv->vddio);
}
-static int at803x_clk_out_config(struct phy_device *phydev)
+static int at803x_smarteee_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
- int val;
+ u16 mask = 0, val = 0;
+ int ret;
- if (!priv->clk_25m_mask)
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
return 0;
- val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
- if (val < 0)
- return val;
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
- val &= ~priv->clk_25m_mask;
- val |= priv->clk_25m_reg;
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
- return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
}
static int at8031_pll_config(struct phy_device *phydev)
@@ -584,6 +636,10 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
ret = at803x_clk_out_config(phydev);
if (ret < 0)
return ret;
@@ -594,7 +650,13 @@ static int at803x_config_init(struct phy_device *phydev)
return ret;
}
- return 0;
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
}
static int at803x_ack_interrupt(struct phy_device *phydev)
@@ -1128,6 +1190,7 @@ static struct phy_driver at803x_driver[] = {
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
+ .config_aneg = at803x_config_aneg,
.soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 15812001b3ff..e79297a4bae8 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -612,6 +612,7 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -633,6 +634,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
+ { PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 8a4ec3222168..fa0be591ae79 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -26,7 +26,46 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
-static int bcm54xx_config_clock_delay(struct phy_device *phydev);
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
+{
+ int rc, val;
+
+ /* handling PHY's internal RX clock delay */
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Disable RGMII RXC-RXD skew */
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ }
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ /* Enable RGMII RXC-RXD skew */
+ val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ }
+ rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ val);
+ if (rc < 0)
+ return rc;
+
+ /* handling PHY's internal TX clock delay */
+ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ /* Disable internal TX clock delay */
+ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ }
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Enable internal TX clock delay */
+ val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ }
+ rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
static int bcm54210e_config_init(struct phy_device *phydev)
{
@@ -64,45 +103,62 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
-static int bcm54xx_config_clock_delay(struct phy_device *phydev)
+static int bcm54616s_config_init(struct phy_device *phydev)
{
int rc, val;
- /* handling PHY's internal RX clock delay */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_1000BASEX)
+ return 0;
+
+ /* Ensure proper interface mode is selected. */
+ /* Disable RGMII mode */
val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ if (val < 0)
+ return val;
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN;
val |= MII_BCM54XX_AUXCTL_MISC_WREN;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- /* Disable RGMII RXC-RXD skew */
- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- }
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- /* Enable RGMII RXC-RXD skew */
- val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- }
rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
val);
if (rc < 0)
return rc;
- /* handling PHY's internal TX clock delay */
- val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- /* Disable internal TX clock delay */
- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
- }
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- /* Enable internal TX clock delay */
- val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
- }
- rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ /* Select 1000BASE-X register set (primary SerDes) */
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ if (val < 0)
+ return val;
+ val |= BCM54XX_SHD_MODE_1000BX;
+ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
if (rc < 0)
return rc;
- return 0;
+ /* Power down SerDes interface */
+ rc = phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
+ if (rc < 0)
+ return rc;
+
+ /* Select proper interface mode */
+ val &= ~BCM54XX_SHD_INTF_SEL_MASK;
+ val |= phydev->interface == PHY_INTERFACE_MODE_SGMII ?
+ BCM54XX_SHD_INTF_SEL_SGMII :
+ BCM54XX_SHD_INTF_SEL_GBIC;
+ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
+ if (rc < 0)
+ return rc;
+
+ /* Power up SerDes interface */
+ rc = phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
+ if (rc < 0)
+ return rc;
+
+ /* Select copper register set */
+ val &= ~BCM54XX_SHD_MODE_1000BX;
+ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
+ if (rc < 0)
+ return rc;
+
+ /* Power up copper interface */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
}
/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
@@ -195,6 +251,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54210E &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
return;
@@ -229,9 +286,10 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
- val |= BCM54810_SHD_SCR3_TRDDAPD;
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
+ val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
}
@@ -283,15 +341,17 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm54xx_adjust_rxrefclk(phydev);
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
+ switch (BRCM_PHY_MODEL(phydev)) {
+ case PHY_ID_BCM54210E:
err = bcm54210e_config_init(phydev);
- if (err)
- return err;
- } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) {
+ break;
+ case PHY_ID_BCM54612E:
err = bcm54612e_config_init(phydev);
- if (err)
- return err;
- } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
+ break;
+ case PHY_ID_BCM54616S:
+ err = bcm54616s_config_init(phydev);
+ break;
+ case PHY_ID_BCM54810:
/* For BCM54810, we need to disable BroadR-Reach function */
val = bcm_phy_read_exp(phydev,
BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
@@ -299,24 +359,31 @@ static int bcm54xx_config_init(struct phy_device *phydev)
err = bcm_phy_write_exp(phydev,
BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
val);
- if (err < 0)
- return err;
+ break;
}
+ if (err)
+ return err;
bcm54xx_phydsp_config(phydev);
- /* Encode link speed into LED1 and LED3 pair (green/amber).
+ /* For non-SFP setups, encode link speed into LED1 and LED3 pair
+ * (green/amber).
* Also flash these two LEDs on activity. This means configuring
* them for MULTICOLOR and encoding link/activity into them.
+ * Don't do this for devices on an SFP module, since some of these
+ * use the LED outputs to control the SFP LOS signal, and changing
+ * these settings will cause LOS to malfunction.
*/
- val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
- BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
- bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
-
- val = BCM_LED_MULTICOLOR_IN_PHASE |
- BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
- BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
- bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+ if (!phy_on_sfp(phydev)) {
+ val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+ bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+ val = BCM_LED_MULTICOLOR_IN_PHASE |
+ BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+ bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+ }
return 0;
}
@@ -361,96 +428,6 @@ static int bcm54811_config_init(struct phy_device *phydev)
return err;
}
-static int bcm5482_config_init(struct phy_device *phydev)
-{
- int err, reg;
-
- err = bcm54xx_config_init(phydev);
-
- if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
- /*
- * Enable secondary SerDes and its use as an LED source
- */
- reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_SSD);
- bcm_phy_write_shadow(phydev, BCM5482_SHD_SSD,
- reg |
- BCM5482_SHD_SSD_LEDM |
- BCM5482_SHD_SSD_EN);
-
- /*
- * Enable SGMII slave mode and auto-detection
- */
- reg = BCM5482_SSD_SGMII_SLAVE | MII_BCM54XX_EXP_SEL_SSD;
- err = bcm_phy_read_exp(phydev, reg);
- if (err < 0)
- return err;
- err = bcm_phy_write_exp(phydev, reg, err |
- BCM5482_SSD_SGMII_SLAVE_EN |
- BCM5482_SSD_SGMII_SLAVE_AD);
- if (err < 0)
- return err;
-
- /*
- * Disable secondary SerDes powerdown
- */
- reg = BCM5482_SSD_1000BX_CTL | MII_BCM54XX_EXP_SEL_SSD;
- err = bcm_phy_read_exp(phydev, reg);
- if (err < 0)
- return err;
- err = bcm_phy_write_exp(phydev, reg,
- err & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
- if (err < 0)
- return err;
-
- /*
- * Select 1000BASE-X register set (primary SerDes)
- */
- reg = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
- bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE,
- reg | BCM54XX_SHD_MODE_1000BX);
-
- /*
- * LED1=ACTIVITYLED, LED3=LINKSPD[2]
- * (Use LED1 as secondary SerDes ACTIVITY LED)
- */
- bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1,
- BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) |
- BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2));
-
- /*
- * Auto-negotiation doesn't seem to work quite right
- * in this mode, so we disable it and force it to the
- * right speed/duplex setting. Only 'link status'
- * is important.
- */
- phydev->autoneg = AUTONEG_DISABLE;
- phydev->speed = SPEED_1000;
- phydev->duplex = DUPLEX_FULL;
- }
-
- return err;
-}
-
-static int bcm5482_read_status(struct phy_device *phydev)
-{
- int err;
-
- err = genphy_read_status(phydev);
-
- if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
- /*
- * Only link status matters for 1000Base-X mode, so force
- * 1000 Mbit/s full-duplex status
- */
- if (phydev->link) {
- phydev->speed = SPEED_1000;
- phydev->duplex = DUPLEX_FULL;
- }
- }
-
- return err;
-}
-
static int bcm5481_config_aneg(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
@@ -473,9 +450,20 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+struct bcm54616s_phy_priv {
+ bool mode_1000bx_en;
+};
+
static int bcm54616s_probe(struct phy_device *phydev)
{
- int val, intf_sel;
+ struct bcm54616s_phy_priv *priv;
+ int val;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
if (val < 0)
@@ -487,8 +475,7 @@ static int bcm54616s_probe(struct phy_device *phydev)
* RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
* support is still missing as of now.
*/
- intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
- if (intf_sel == 1) {
+ if ((val & BCM54XX_SHD_INTF_SEL_MASK) == BCM54XX_SHD_INTF_SEL_RGMII) {
val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
if (val < 0)
return val;
@@ -499,7 +486,9 @@ static int bcm54616s_probe(struct phy_device *phydev)
* 1000BASE-X configuration.
*/
if (!(val & BCM54616S_100FX_MODE))
- phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
+ priv->mode_1000bx_en = true;
+
+ phydev->port = PORT_FIBRE;
}
return 0;
@@ -507,10 +496,11 @@ static int bcm54616s_probe(struct phy_device *phydev)
static int bcm54616s_config_aneg(struct phy_device *phydev)
{
+ struct bcm54616s_phy_priv *priv = phydev->priv;
int ret;
/* Aneg firstly. */
- if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ if (priv->mode_1000bx_en)
ret = genphy_c37_config_aneg(phydev);
else
ret = genphy_config_aneg(phydev);
@@ -523,9 +513,10 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
static int bcm54616s_read_status(struct phy_device *phydev)
{
+ struct bcm54616s_phy_priv *priv = phydev->priv;
int err;
- if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ if (priv->mode_1000bx_en)
err = genphy_c37_read_status(phydev);
else
err = genphy_read_status(phydev);
@@ -800,8 +791,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
/* PHY_GBIT_FEATURES */
- .config_init = bcm5482_config_init,
- .read_status = bcm5482_read_status,
+ .config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
}, {
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index fff371ca1086..be1224b4447b 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -554,6 +554,9 @@ static int dp83822_probe(struct phy_device *phydev)
dp83822_of_init(phydev);
+ if (dp83822->fx_enabled)
+ phydev->port = PORT_FIBRE;
+
return 0;
}
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index b30bc142d82e..755220c6451f 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -855,6 +855,10 @@ static int dp83869_probe(struct phy_device *phydev)
if (ret)
return ret;
+ if (dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_1000_BASE)
+ phydev->port = PORT_FIBRE;
+
return dp83869_config_init(phydev);
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index b632947cbcdf..a00a667454a9 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -37,16 +37,35 @@ MODULE_LICENSE("GPL");
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON BIT(1) /* IP101A/G APS Mode bit */
+#define IP101A_G_AUTO_MDIX_DIS BIT(11)
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */
#define IP101A_G_IRQ_ALL_MASK BIT(11) /* IRQ's inactive */
#define IP101A_G_IRQ_SPEED_CHANGE BIT(2)
#define IP101A_G_IRQ_DUPLEX_CHANGE BIT(1)
#define IP101A_G_IRQ_LINK_CHANGE BIT(0)
+#define IP101A_G_PHY_STATUS 18
+#define IP101A_G_MDIX BIT(9)
+#define IP101A_G_PHY_SPEC_CTRL 30
+#define IP101A_G_FORCE_MDIX BIT(3)
+#define IP101G_PAGE_CONTROL 0x14
+#define IP101G_PAGE_CONTROL_MASK GENMASK(4, 0)
#define IP101G_DIGITAL_IO_SPEC_CTRL 0x1d
#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32 BIT(2)
+#define IP101G_DEFAULT_PAGE 16
+
+#define IP101G_P1_CNT_CTRL 17
+#define CNT_CTRL_RX_EN BIT(13)
+#define IP101G_P8_CNT_CTRL 17
+#define CNT_CTRL_RDCLR_EN BIT(15)
+#define IP101G_CNT_REG 18
+
+#define IP175C_PHY_ID 0x02430d80
+#define IP1001_PHY_ID 0x02430d90
+#define IP101A_PHY_ID 0x02430c54
+
/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin
* (pin number 21). The hardware default is RXER (receive error) mode. But it
* can be configured to interrupt mode manually.
@@ -57,8 +76,19 @@ enum ip101gr_sel_intr32 {
IP101GR_SEL_INTR32_RXER,
};
+struct ip101g_hw_stat {
+ const char *name;
+ int page;
+};
+
+static struct ip101g_hw_stat ip101g_hw_stats[] = {
+ { "phy_crc_errors", 1 },
+ { "phy_symbol_errors", 11, },
+};
+
struct ip101a_g_phy_priv {
enum ip101gr_sel_intr32 sel_intr32;
+ u64 stats[ARRAY_SIZE(ip101g_hw_stats)];
};
static int ip175c_config_init(struct phy_device *phydev)
@@ -116,36 +146,10 @@ static int ip175c_config_init(struct phy_device *phydev)
return 0;
}
-static int ip1xx_reset(struct phy_device *phydev)
-{
- int bmcr;
-
- /* Software Reset PHY */
- bmcr = phy_read(phydev, MII_BMCR);
- if (bmcr < 0)
- return bmcr;
- bmcr |= BMCR_RESET;
- bmcr = phy_write(phydev, MII_BMCR, bmcr);
- if (bmcr < 0)
- return bmcr;
-
- do {
- bmcr = phy_read(phydev, MII_BMCR);
- if (bmcr < 0)
- return bmcr;
- } while (bmcr & BMCR_RESET);
-
- return 0;
-}
-
static int ip1001_config_init(struct phy_device *phydev)
{
int c;
- c = ip1xx_reset(phydev);
- if (c < 0)
- return c;
-
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP1001_SPEC_CTRL_STATUS_2);
if (c < 0)
@@ -184,7 +188,7 @@ static int ip175c_read_status(struct phy_device *phydev)
genphy_read_status(phydev);
else
/* Don't need to read status for switch ports */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ phydev->irq = PHY_MAC_INTERRUPT;
return 0;
}
@@ -228,30 +232,30 @@ static int ip101a_g_probe(struct phy_device *phydev)
return 0;
}
-static int ip101a_g_config_init(struct phy_device *phydev)
+static int ip101a_g_config_intr_pin(struct phy_device *phydev)
{
struct ip101a_g_phy_priv *priv = phydev->priv;
- int err, c;
+ int oldpage, err = 0;
- c = ip1xx_reset(phydev);
- if (c < 0)
- return c;
+ oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
+ if (oldpage < 0)
+ goto out;
/* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
switch (priv->sel_intr32) {
case IP101GR_SEL_INTR32_RXER:
- err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
- IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
+ err = __phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
if (err < 0)
- return err;
+ goto out;
break;
case IP101GR_SEL_INTR32_INTR:
- err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
- IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
- IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
+ err = __phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
if (err < 0)
- return err;
+ goto out;
break;
default:
@@ -265,17 +269,136 @@ static int ip101a_g_config_init(struct phy_device *phydev)
break;
}
+out:
+ return phy_restore_page(phydev, oldpage, err);
+}
+
+static int ip101a_config_init(struct phy_device *phydev)
+{
+ int ret;
+
/* Enable Auto Power Saving mode */
- c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
- c |= IP101A_G_APS_ON;
+ ret = phy_set_bits(phydev, IP10XX_SPEC_CTRL_STATUS, IP101A_G_APS_ON);
+ if (ret)
+ return ret;
+
+ return ip101a_g_config_intr_pin(phydev);
+}
+
+static int ip101g_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable the PHY counters */
+ ret = phy_modify_paged(phydev, 1, IP101G_P1_CNT_CTRL,
+ CNT_CTRL_RX_EN, CNT_CTRL_RX_EN);
+ if (ret)
+ return ret;
+
+ /* Clear error counters on read */
+ ret = phy_modify_paged(phydev, 8, IP101G_P8_CNT_CTRL,
+ CNT_CTRL_RDCLR_EN, CNT_CTRL_RDCLR_EN);
+ if (ret)
+ return ret;
+
+ return ip101a_g_config_intr_pin(phydev);
+}
+
+static int ip101a_g_read_status(struct phy_device *phydev)
+{
+ int oldpage, ret, stat1, stat2;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
+ if (oldpage < 0)
+ goto out;
+
+ ret = __phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ if (ret < 0)
+ goto out;
+ stat1 = ret;
+
+ ret = __phy_read(phydev, IP101A_G_PHY_SPEC_CTRL);
+ if (ret < 0)
+ goto out;
+ stat2 = ret;
+
+ if (stat1 & IP101A_G_AUTO_MDIX_DIS) {
+ if (stat2 & IP101A_G_FORCE_MDIX)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ if (stat2 & IP101A_G_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ ret = 0;
+
+out:
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static int ip101a_g_config_mdix(struct phy_device *phydev)
+{
+ u16 ctrl = 0, ctrl2 = 0;
+ int oldpage;
+ int ret = 0;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ ctrl = IP101A_G_AUTO_MDIX_DIS;
+ break;
+ case ETH_TP_MDI_X:
+ ctrl = IP101A_G_AUTO_MDIX_DIS;
+ ctrl2 = IP101A_G_FORCE_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ break;
+ default:
+ return 0;
+ }
+
+ oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
+ if (oldpage < 0)
+ goto out;
- return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ ret = __phy_modify(phydev, IP10XX_SPEC_CTRL_STATUS,
+ IP101A_G_AUTO_MDIX_DIS, ctrl);
+ if (ret)
+ goto out;
+
+ ret = __phy_modify(phydev, IP101A_G_PHY_SPEC_CTRL,
+ IP101A_G_FORCE_MDIX, ctrl2);
+
+out:
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static int ip101a_g_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ip101a_g_config_mdix(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
}
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
{
- int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+ int err;
+ err = phy_read_paged(phydev, IP101G_DEFAULT_PAGE,
+ IP101A_G_IRQ_CONF_STATUS);
if (err < 0)
return err;
@@ -294,10 +417,12 @@ static int ip101a_g_config_intr(struct phy_device *phydev)
/* INTR pin used: Speed/link/duplex will cause an interrupt */
val = IP101A_G_IRQ_PIN_USED;
- err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+ err = phy_write_paged(phydev, IP101G_DEFAULT_PAGE,
+ IP101A_G_IRQ_CONF_STATUS, val);
} else {
val = IP101A_G_IRQ_ALL_MASK;
- err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+ err = phy_write_paged(phydev, IP101G_DEFAULT_PAGE,
+ IP101A_G_IRQ_CONF_STATUS, val);
if (err)
return err;
@@ -311,7 +436,8 @@ static irqreturn_t ip101a_g_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
- irq_status = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+ irq_status = phy_read_paged(phydev, IP101G_DEFAULT_PAGE,
+ IP101A_G_IRQ_CONF_STATUS);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
@@ -327,34 +453,171 @@ static irqreturn_t ip101a_g_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+/* The IP101A doesn't really have a page register. We just pretend to have one
+ * so we can use the paged versions of the callbacks of the IP101G.
+ */
+static int ip101a_read_page(struct phy_device *phydev)
+{
+ return IP101G_DEFAULT_PAGE;
+}
+
+static int ip101a_write_page(struct phy_device *phydev, int page)
+{
+ WARN_ONCE(page != IP101G_DEFAULT_PAGE, "wrong page selected\n");
+
+ return 0;
+}
+
+static int ip101g_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, IP101G_PAGE_CONTROL);
+}
+
+static int ip101g_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, IP101G_PAGE_CONTROL, page);
+}
+
+static int ip101a_g_has_page_register(struct phy_device *phydev)
+{
+ int oldval, val, ret;
+
+ oldval = phy_read(phydev, IP101G_PAGE_CONTROL);
+ if (oldval < 0)
+ return oldval;
+
+ ret = phy_write(phydev, IP101G_PAGE_CONTROL, 0xffff);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, IP101G_PAGE_CONTROL);
+ if (val < 0)
+ return val;
+
+ ret = phy_write(phydev, IP101G_PAGE_CONTROL, oldval);
+ if (ret)
+ return ret;
+
+ return val == IP101G_PAGE_CONTROL_MASK;
+}
+
+static int ip101a_g_match_phy_device(struct phy_device *phydev, bool ip101a)
+{
+ int ret;
+
+ if (phydev->phy_id != IP101A_PHY_ID)
+ return 0;
+
+ /* The IP101A and the IP101G share the same PHY identifier.The IP101G
+ * seems to be a successor of the IP101A and implements more functions.
+ * Amongst other things there is a page select register, which is not
+ * available on the IP101A. Use this to distinguish these two.
+ */
+ ret = ip101a_g_has_page_register(phydev);
+ if (ret < 0)
+ return ret;
+
+ return ip101a == !ret;
+}
+
+static int ip101a_match_phy_device(struct phy_device *phydev)
+{
+ return ip101a_g_match_phy_device(phydev, true);
+}
+
+static int ip101g_match_phy_device(struct phy_device *phydev)
+{
+ return ip101a_g_match_phy_device(phydev, false);
+}
+
+static int ip101g_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(ip101g_hw_stats);
+}
+
+static void ip101g_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip101g_hw_stats); i++)
+ strscpy(data + i * ETH_GSTRING_LEN,
+ ip101g_hw_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static u64 ip101g_get_stat(struct phy_device *phydev, int i)
+{
+ struct ip101g_hw_stat stat = ip101g_hw_stats[i];
+ struct ip101a_g_phy_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ val = phy_read_paged(phydev, stat.page, IP101G_CNT_REG);
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void ip101g_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip101g_hw_stats); i++)
+ data[i] = ip101g_get_stat(phydev, i);
+}
+
static struct phy_driver icplus_driver[] = {
{
- .phy_id = 0x02430d80,
+ PHY_ID_MATCH_MODEL(IP175C_PHY_ID),
.name = "ICPlus IP175C",
- .phy_id_mask = 0x0ffffff0,
/* PHY_BASIC_FEATURES */
- .config_init = &ip175c_config_init,
- .config_aneg = &ip175c_config_aneg,
- .read_status = &ip175c_read_status,
+ .config_init = ip175c_config_init,
+ .config_aneg = ip175c_config_aneg,
+ .read_status = ip175c_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x02430d90,
+ PHY_ID_MATCH_MODEL(IP1001_PHY_ID),
.name = "ICPlus IP1001",
- .phy_id_mask = 0x0ffffff0,
/* PHY_GBIT_FEATURES */
- .config_init = &ip1001_config_init,
+ .config_init = ip1001_config_init,
+ .soft_reset = genphy_soft_reset,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x02430c54,
- .name = "ICPlus IP101A/G",
- .phy_id_mask = 0x0ffffff0,
- /* PHY_BASIC_FEATURES */
+ .name = "ICPlus IP101A",
+ .match_phy_device = ip101a_match_phy_device,
+ .probe = ip101a_g_probe,
+ .read_page = ip101a_read_page,
+ .write_page = ip101a_write_page,
+ .config_intr = ip101a_g_config_intr,
+ .handle_interrupt = ip101a_g_handle_interrupt,
+ .config_init = ip101a_config_init,
+ .config_aneg = ip101a_g_config_aneg,
+ .read_status = ip101a_g_read_status,
+ .soft_reset = genphy_soft_reset,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+}, {
+ .name = "ICPlus IP101G",
+ .match_phy_device = ip101g_match_phy_device,
.probe = ip101a_g_probe,
+ .read_page = ip101g_read_page,
+ .write_page = ip101g_write_page,
.config_intr = ip101a_g_config_intr,
.handle_interrupt = ip101a_g_handle_interrupt,
- .config_init = &ip101a_g_config_init,
+ .config_init = ip101g_config_init,
+ .config_aneg = ip101a_g_config_aneg,
+ .read_status = ip101a_g_read_status,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = ip101g_get_sset_count,
+ .get_strings = ip101g_get_strings,
+ .get_stats = ip101g_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
@@ -362,9 +625,9 @@ static struct phy_driver icplus_driver[] = {
module_phy_driver(icplus_driver);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
- { 0x02430d80, 0x0ffffff0 },
- { 0x02430d90, 0x0ffffff0 },
- { 0x02430c54, 0x0ffffff0 },
+ { PHY_ID_MATCH_MODEL(IP175C_PHY_ID) },
+ { PHY_ID_MATCH_MODEL(IP1001_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(IP101A_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 0ee23d29c0d4..bde3356a2f86 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -292,6 +292,7 @@ static int lxt973_probe(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, val);
/* Remember that the port is in fiber mode. */
phydev->priv = lxt973_probe;
+ phydev->port = PORT_FIBRE;
} else {
phydev->priv = NULL;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 620052c023a5..e26a5d663f8a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -684,16 +684,19 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
if (err < 0)
goto error;
- /* Do not touch the fiber page if we're in copper->sgmii mode */
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- return 0;
-
/* Then the fiber link */
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
- err = marvell_config_aneg_fiber(phydev);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ /* Do not touch the fiber advertisement if we're in copper->sgmii mode.
+ * Just ensure that SGMII-side autonegotiation is enabled.
+ * If we switched from some other mode to SGMII it may not be.
+ */
+ err = genphy_check_and_restart_aneg(phydev, false);
+ else
+ err = marvell_config_aneg_fiber(phydev);
if (err < 0)
goto error;
@@ -1552,6 +1555,7 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
phydev->asym_pause = 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->port = fiber ? PORT_FIBRE : PORT_TP;
if (phydev->autoneg == AUTONEG_ENABLE)
err = marvell_read_status_page_an(phydev, fiber, status);
@@ -2852,7 +2856,6 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1145_config_init,
.config_aneg = m88e1101_config_aneg,
- .read_status = genphy_read_status,
.config_intr = marvell_config_intr,
.handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1901ba277413..b1bb9b8e1e4e 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -631,6 +631,7 @@ static int mv3310_read_status_10gbaser(struct phy_device *phydev)
phydev->link = 1;
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
+ phydev->port = PORT_FIBRE;
return 0;
}
@@ -690,6 +691,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
phydev->duplex = cssr1 & MV_PCS_CSSR1_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
+ phydev->port = PORT_TP;
phydev->mdix = cssr1 & MV_PCS_CSSR1_MDIX ?
ETH_TP_MDI_X : ETH_TP_MDI;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2b42e46066b4..823518554079 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -543,8 +543,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
mutex_init(&bus->mdio_lock);
mutex_init(&bus->shared_lock);
- /* de-assert bus level PHY GPIO reset */
- gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW);
+ /* assert bus level PHY GPIO reset */
+ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpiod)) {
err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
"mii_bus %s couldn't get reset GPIO\n",
@@ -553,8 +553,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
return err;
} else if (gpiod) {
bus->reset_gpiod = gpiod;
-
- gpiod_set_value_cansleep(gpiod, 1);
fsleep(bus->reset_delay_us);
gpiod_set_value_cansleep(gpiod, 0);
if (bus->reset_post_delay_us > 0)
@@ -740,7 +738,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
retval = bus->read(bus, addr, regnum);
@@ -766,7 +764,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
err = bus->write(bus, addr, regnum, val);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 54e0d75203da..a14a00328fa3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -341,14 +341,19 @@ static int kszphy_config_init(struct phy_device *phydev)
return kszphy_config_reset(phydev);
}
+static int ksz8041_fiber_mode(struct phy_device *phydev)
+{
+ struct device_node *of_node = phydev->mdio.dev.of_node;
+
+ return of_property_read_bool(of_node, "micrel,fiber-mode");
+}
+
static int ksz8041_config_init(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct device_node *of_node = phydev->mdio.dev.of_node;
-
/* Limit supported and advertised modes in fiber mode */
- if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
+ if (ksz8041_fiber_mode(phydev)) {
phydev->dev_flags |= MICREL_PHY_FXEN;
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
@@ -1176,6 +1181,9 @@ static int kszphy_probe(struct phy_device *phydev)
}
}
+ if (ksz8041_fiber_mode(phydev))
+ phydev->port = PORT_FIBRE;
+
/* Support legacy board-file configuration */
if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
priv->rmii_ref_clk_sel = true;
@@ -1295,6 +1303,7 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = ksz8081_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
@@ -1368,7 +1377,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
- .read_status = genphy_read_status,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
@@ -1389,7 +1397,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ886X Switch",
+ .name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile
index d8e22a4eeeff..78d84194f79a 100644
--- a/drivers/net/phy/mscc/Makefile
+++ b/drivers/net/phy/mscc/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_MICROSEMI_PHY) := mscc.o
mscc-objs := mscc_main.o
+mscc-objs += mscc_serdes.o
ifdef CONFIG_MACSEC
mscc-objs += mscc_macsec.o
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 9481bce94c2e..a50235fdf7d9 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -102,6 +102,7 @@ enum rgmii_clock_delay {
#define PHY_MCB_S6G_READ BIT(30)
#define PHY_S6G_PLL5G_CFG0 0x06
+#define PHY_S6G_PLL5G_CFG2 0x08
#define PHY_S6G_LCPLL_CFG 0x11
#define PHY_S6G_PLL_CFG 0x2b
#define PHY_S6G_COMMON_CFG 0x2c
@@ -121,6 +122,9 @@ enum rgmii_clock_delay {
#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
#define PHY_S6G_PLL_FSM_ENA_POS 7
+#define PHY_S6G_CFG2_FSM_DIS 1
+#define PHY_S6G_CFG2_FSM_CLK_BP 23
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
@@ -136,6 +140,10 @@ enum rgmii_clock_delay {
#define MSCC_PHY_PAGE_1588 0x1588 /* PTP (1588) */
#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
+#define MSCC_PHY_GPIO_CONTROL_2 14
+
+#define MSCC_PHY_COMA_MODE 0x2000 /* input(1) / output(0) */
+#define MSCC_PHY_COMA_OUTPUT 0x1000 /* value to output */
/* Extended Page 1 Registers */
#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
@@ -335,6 +343,10 @@ enum rgmii_clock_delay {
#define VSC8584_REVB 0x0001
#define MSCC_DEV_REV_MASK GENMASK(3, 0)
+#define MSCC_ROM_TRAP_SERDES_6G_CFG 0x1E48
+#define MSCC_RAM_TRAP_SERDES_6G_CFG 0x1E4F
+#define PATCH_VEC_ZERO_EN 0x0100
+
struct reg_val {
u16 reg;
u32 val;
@@ -412,6 +424,22 @@ struct vsc8531_edge_rate_table {
};
#endif /* CONFIG_OF_MDIO */
+enum csr_target {
+ MACRO_CTRL = 0x07,
+};
+
+u32 vsc85xx_csr_read(struct phy_device *phydev,
+ enum csr_target target, u32 reg);
+
+int vsc85xx_csr_write(struct phy_device *phydev,
+ enum csr_target target, u32 reg, u32 val);
+
+int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val);
+int phy_base_read(struct phy_device *phydev, u32 regnum);
+int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
+int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
+int vsc8584_cmd(struct phy_device *phydev, u16 val);
+
#if IS_ENABLED(CONFIG_MACSEC)
int vsc8584_macsec_init(struct phy_device *phydev);
void vsc8584_handle_macsec_interrupt(struct phy_device *phydev);
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 2f2157e3deab..3a7705228ed5 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -17,7 +17,7 @@
#include <linux/of.h>
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
-
+#include "mscc_serdes.h"
#include "mscc.h"
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
@@ -689,7 +689,7 @@ out_unlock:
}
/* phydev->bus->mdio_lock should be locked when using this function */
-static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
{
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
@@ -700,7 +700,7 @@ static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/* phydev->bus->mdio_lock should be locked when using this function */
-static int phy_base_read(struct phy_device *phydev, u32 regnum)
+int phy_base_read(struct phy_device *phydev, u32 regnum)
{
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
@@ -710,6 +710,113 @@ static int phy_base_read(struct phy_device *phydev, u32 regnum)
return __phy_package_read(phydev, regnum);
}
+u32 vsc85xx_csr_read(struct phy_device *phydev,
+ enum csr_target target, u32 reg)
+{
+ unsigned long deadline;
+ u32 val, val_l, val_h;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
+ /* non-MACsec access */
+ target &= 0x3;
+ else
+ target = 0;
+
+ /* Trigger CSR Action - Read into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target));
+
+ /* Wait for register access*/
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return 0xffffffff;
+
+ /* Read the Least Significant Word (LSW) (17) */
+ val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
+
+ /* Read the Most Significant Word (MSW) (18) */
+ val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return (val_h << 16) | val_l;
+}
+
+int vsc85xx_csr_write(struct phy_device *phydev,
+ enum csr_target target, u32 reg, u32 val)
+{
+ unsigned long deadline;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ /* Write the Least Significant Word (LSW) (17) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
+
+ /* Write the Most Significant Word (MSW) (18) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
+
+ if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
+ /* non-MACsec access */
+ target &= 0x3;
+ else
+ target = 0;
+
+ /* Trigger CSR Action - Write into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target));
+
+ /* Wait for register access */
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return -ETIMEDOUT;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
/* bus->mdio_lock should be locked when using this function */
static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
{
@@ -719,7 +826,7 @@ static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
}
/* bus->mdio_lock should be locked when using this function */
-static int vsc8584_cmd(struct phy_device *phydev, u16 val)
+int vsc8584_cmd(struct phy_device *phydev, u16 val)
{
unsigned long deadline;
u16 reg_val;
@@ -1131,6 +1238,92 @@ out:
return ret;
}
+/* Access LCPLL Cfg_2 */
+static void vsc8584_pll5g_cfg2_wr(struct phy_device *phydev,
+ bool disable_fsm)
+{
+ u32 rd_dat;
+
+ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
+ rd_dat &= ~BIT(PHY_S6G_CFG2_FSM_DIS);
+ rd_dat |= (disable_fsm << PHY_S6G_CFG2_FSM_DIS);
+ vsc85xx_csr_write(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2, rd_dat);
+}
+
+/* trigger a read to the spcified MCB */
+static int vsc8584_mcb_rd_trig(struct phy_device *phydev,
+ u32 mcb_reg_addr, u8 mcb_slave_num)
+{
+ u32 rd_dat = 0;
+
+ /* read MCB */
+ vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
+ (0x40000000 | (1L << mcb_slave_num)));
+
+ return read_poll_timeout(vsc85xx_csr_read, rd_dat,
+ !(rd_dat & 0x40000000),
+ 4000, 200000, 0,
+ phydev, MACRO_CTRL, mcb_reg_addr);
+}
+
+/* trigger a write to the spcified MCB */
+static int vsc8584_mcb_wr_trig(struct phy_device *phydev,
+ u32 mcb_reg_addr,
+ u8 mcb_slave_num)
+{
+ u32 rd_dat = 0;
+
+ /* write back MCB */
+ vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
+ (0x80000000 | (1L << mcb_slave_num)));
+
+ return read_poll_timeout(vsc85xx_csr_read, rd_dat,
+ !(rd_dat & 0x80000000),
+ 4000, 200000, 0,
+ phydev, MACRO_CTRL, mcb_reg_addr);
+}
+
+/* Sequence to Reset LCPLL for the VIPER and ELISE PHY */
+static int vsc8584_pll5g_reset(struct phy_device *phydev)
+{
+ bool dis_fsm;
+ int ret = 0;
+
+ ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
+ if (ret < 0)
+ goto done;
+ dis_fsm = 1;
+
+ /* Reset LCPLL */
+ vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
+
+ /* write back LCPLL MCB */
+ ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
+ if (ret < 0)
+ goto done;
+
+ /* 10 mSec sleep while LCPLL is hold in reset */
+ usleep_range(10000, 20000);
+
+ /* read LCPLL MCB into CSRs */
+ ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
+ if (ret < 0)
+ goto done;
+ dis_fsm = 0;
+
+ /* Release the Reset of LCPLL */
+ vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
+
+ /* write back LCPLL MCB */
+ ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
+ if (ret < 0)
+ goto done;
+
+ usleep_range(110000, 200000);
+done:
+ return ret;
+}
+
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_config_pre_init(struct phy_device *phydev)
{
@@ -1323,6 +1516,21 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
vsc8531->addr = addr;
}
+static void vsc85xx_coma_mode_release(struct phy_device *phydev)
+{
+ /* The coma mode (pin or reg) provides an optional feature that
+ * may be used to control when the PHYs become active.
+ * Alternatively the COMA_MODE pin may be connected low
+ * so that the PHYs are fully active once out of reset.
+ */
+
+ /* Enable output (mode=0) and write zero to it */
+ vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_EXTENDED_GPIO);
+ __phy_modify(phydev, MSCC_PHY_GPIO_CONTROL_2,
+ MSCC_PHY_COMA_MODE | MSCC_PHY_COMA_OUTPUT, 0);
+ vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD);
+}
+
static int vsc8584_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
@@ -1541,6 +1749,100 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return 0;
}
+static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
+ u32 op)
+{
+ unsigned long deadline;
+ u32 val;
+ int ret;
+
+ ret = vsc85xx_csr_write(phydev, PHY_MCB_TARGET, reg,
+ op | (1 << mcb));
+ if (ret)
+ return -EINVAL;
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = vsc85xx_csr_read(phydev, PHY_MCB_TARGET, reg);
+
+ if (val == 0xffffffff)
+ return -EIO;
+
+ } while (time_before(jiffies, deadline) && (val & op));
+
+ if (val & op)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Trigger a read to the specified MCB */
+int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
+}
+
+/* Trigger a write to the specified MCB */
+int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
+}
+
+static int vsc8514_config_host_serdes(struct phy_device *phydev)
+{
+ int ret;
+ u16 val;
+
+ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+ if (ret)
+ return ret;
+
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+ val &= ~MAC_CFG_MASK;
+ val |= MAC_CFG_QSGMII;
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+ if (ret)
+ return ret;
+
+ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+ if (ret)
+ return ret;
+
+ ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
+ if (ret)
+ return ret;
+
+ ret = vsc8584_cmd(phydev,
+ PROC_CMD_MCB_ACCESS_MAC_CONF |
+ PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
+ if (ret) {
+ dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Apply 6G SerDes FOJI Algorithm
+ * Initial condition requirement:
+ * 1. hold 8051 in reset
+ * 2. disable patch vector 0, in order to allow IB cal poll during FoJi
+ * 3. deassert 8051 reset after change patch vector status
+ * 4. proceed with FoJi (vsc85xx_sd6g_config_v2)
+ */
+ vsc8584_micro_assert_reset(phydev);
+ val = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ /* clear bit 8, to disable patch vector 0 */
+ val &= ~PATCH_VEC_ZERO_EN;
+ ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, val);
+ /* Enable 8051 clock, don't set patch present, disable PRAM clock override */
+ vsc8584_micro_deassert_reset(phydev, false);
+
+ return vsc85xx_sd6g_config_v2(phydev);
+}
+
static int vsc8514_config_pre_init(struct phy_device *phydev)
{
/* These are the settings to override the silicon default
@@ -1569,8 +1871,16 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
+ struct device *dev = &phydev->mdio.dev;
unsigned int i;
u16 reg;
+ int ret;
+
+ ret = vsc8584_pll5g_reset(phydev);
+ if (ret < 0) {
+ dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
+ return ret;
+ }
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
@@ -1602,151 +1912,48 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
reg &= ~SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
- return 0;
-}
-
-static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev,
- u32 target, u32 reg)
-{
- unsigned long deadline;
- u32 val, val_l, val_h;
-
- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
-
- /* CSR registers are grouped under different Target IDs.
- * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
- * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
- * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
- * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
- */
-
- /* Setup the Target ID */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
- MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
-
- /* Trigger CSR Action - Read into the CSR's */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
- MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
- MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
- MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
-
- /* Wait for register access*/
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- usleep_range(500, 1000);
- val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
- } while (time_before(jiffies, deadline) &&
- !(val & MSCC_PHY_CSR_CNTL_19_CMD));
-
- if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
- return 0xffffffff;
-
- /* Read the Least Significant Word (LSW) (17) */
- val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
-
- /* Read the Most Significant Word (MSW) (18) */
- val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
-
- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_STANDARD);
-
- return (val_h << 16) | val_l;
-}
-
-static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev,
- u32 target, u32 reg, u32 val)
-{
- unsigned long deadline;
-
- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
-
- /* CSR registers are grouped under different Target IDs.
- * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
- * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
- * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
- * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ /* Add pre-patching commands to:
+ * 1. enable 8051 clock, operate 8051 clock at 125 MHz
+ * instead of HW default 62.5MHz
+ * 2. write patch vector 0, to skip IB cal polling executed
+ * as part of the 0x80E0 ROM command
*/
+ vsc8584_micro_deassert_reset(phydev, false);
- /* Setup the Target ID */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
- MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
-
- /* Write the Least Significant Word (LSW) (17) */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
-
- /* Write the Most Significant Word (MSW) (18) */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
-
- /* Trigger CSR Action - Write into the CSR's */
- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
- MSCC_PHY_CSR_CNTL_19_CMD |
- MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
- MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
-
- /* Wait for register access */
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- usleep_range(500, 1000);
- val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
- } while (time_before(jiffies, deadline) &&
- !(val & MSCC_PHY_CSR_CNTL_19_CMD));
-
- if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
- return -ETIMEDOUT;
-
+ vsc8584_micro_assert_reset(phydev);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_STANDARD);
-
- return 0;
-}
-
-static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
- u32 op)
-{
- unsigned long deadline;
- u32 val;
- int ret;
-
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg,
- op | (1 << mcb));
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+ /* ROM address to trap, for patch vector 0 */
+ reg = MSCC_ROM_TRAP_SERDES_6G_CFG;
+ ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
if (ret)
- return -EINVAL;
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- usleep_range(500, 1000);
- val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg);
-
- if (val == 0xffffffff)
- return -EIO;
-
- } while (time_before(jiffies, deadline) && (val & op));
-
- if (val & op)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/* Trigger a read to the specified MCB */
-static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
-{
- return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
-}
+ goto err;
+ /* RAM address to jump to, when patch vector 0 enabled */
+ reg = MSCC_RAM_TRAP_SERDES_6G_CFG;
+ ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
+ if (ret)
+ goto err;
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
+ ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+ if (ret)
+ goto err;
-/* Trigger a write to the specified MCB */
-static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
-{
- return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
+ /* Enable 8051 clock, don't set patch present
+ * yet, disable PRAM clock override
+ */
+ vsc8584_micro_deassert_reset(phydev, false);
+ return ret;
+ err:
+ /* restore 8051 and bail w error */
+ vsc8584_micro_deassert_reset(phydev, false);
+ return ret;
}
static int vsc8514_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
- unsigned long deadline;
int ret, i;
- u16 val;
- u32 reg;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
@@ -1763,123 +1970,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
- if (phy_package_init_once(phydev))
- vsc8514_config_pre_init(phydev);
-
- ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_EXTENDED_GPIO);
- if (ret)
- goto err;
-
- val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
-
- val &= ~MAC_CFG_MASK;
- val |= MAC_CFG_QSGMII;
- ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
- if (ret)
- goto err;
-
- ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_STANDARD);
- if (ret)
- goto err;
-
- ret = vsc8584_cmd(phydev,
- PROC_CMD_MCB_ACCESS_MAC_CONF |
- PROC_CMD_RST_CONF_PORT |
- PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
- if (ret)
- goto err;
-
- /* 6g mcb */
- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
- /* lcpll mcb */
- phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
- /* pll5gcfg0 */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_PLL5G_CFG0, 0x7036f145);
- if (ret)
- goto err;
-
- phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
- /* pllcfg */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_PLL_CFG,
- (3 << PHY_S6G_PLL_ENA_OFFS_POS) |
- (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS)
- | (0 << PHY_S6G_PLL_FSM_ENA_POS));
- if (ret)
- goto err;
-
- /* commoncfg */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_COMMON_CFG,
- (0 << PHY_S6G_SYS_RST_POS) |
- (0 << PHY_S6G_ENA_LANE_POS) |
- (0 << PHY_S6G_ENA_LOOP_POS) |
- (0 << PHY_S6G_QRATE_POS) |
- (3 << PHY_S6G_IF_MODE_POS));
- if (ret)
- goto err;
-
- /* misccfg */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_MISC_CFG, 1);
- if (ret)
- goto err;
-
- /* gpcfg */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_GPC_CFG, 768);
- if (ret)
- goto err;
-
- phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0);
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- usleep_range(500, 1000);
- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
- 0); /* read 6G MCB into CSRs */
- reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
- PHY_S6G_PLL_STATUS);
- if (reg == 0xffffffff) {
- phy_unlock_mdio_bus(phydev);
- return -EIO;
- }
-
- } while (time_before(jiffies, deadline) && (reg & BIT(12)));
-
- if (reg & BIT(12)) {
- phy_unlock_mdio_bus(phydev);
- return -ETIMEDOUT;
- }
-
- /* misccfg */
- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
- PHY_S6G_MISC_CFG, 0);
- if (ret)
- goto err;
-
- phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- usleep_range(500, 1000);
- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
- 0); /* read 6G MCB into CSRs */
- reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
- PHY_S6G_IB_STATUS0);
- if (reg == 0xffffffff) {
- phy_unlock_mdio_bus(phydev);
- return -EIO;
- }
-
- } while (time_before(jiffies, deadline) && !(reg & BIT(8)));
-
- if (!(reg & BIT(8))) {
- phy_unlock_mdio_bus(phydev);
- return -ETIMEDOUT;
+ if (phy_package_init_once(phydev)) {
+ ret = vsc8514_config_pre_init(phydev);
+ if (ret)
+ goto err;
+ ret = vsc8514_config_host_serdes(phydev);
+ if (ret)
+ goto err;
+ vsc85xx_coma_mode_release(phydev);
}
phy_unlock_mdio_bus(phydev);
diff --git a/drivers/net/phy/mscc/mscc_serdes.c b/drivers/net/phy/mscc/mscc_serdes.c
new file mode 100644
index 000000000000..b3e854f53d67
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc_serdes.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Author: Bjarni Jonasson <bjarni.jonassoni@microchip.com>
+ * License: Dual MIT/GPL
+ * Copyright (c) 2021 Microsemi Corporation
+ */
+
+#include <linux/phy.h>
+#include "mscc_serdes.h"
+#include "mscc.h"
+
+static int pll5g_detune(struct phy_device *phydev)
+{
+ u32 rd_dat;
+ int ret;
+
+ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
+ rd_dat &= ~PHY_S6G_PLL5G_CFG2_GAIN_MASK;
+ rd_dat |= PHY_S6G_PLL5G_CFG2_ENA_GAIN;
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_PLL5G_CFG2, rd_dat);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int pll5g_tune(struct phy_device *phydev)
+{
+ u32 rd_dat;
+ int ret;
+
+ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
+ rd_dat &= ~PHY_S6G_PLL5G_CFG2_ENA_GAIN;
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_PLL5G_CFG2, rd_dat);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_pll_cfg_wr(struct phy_device *phydev,
+ const u32 pll_ena_offs,
+ const u32 pll_fsm_ctrl_data,
+ const u32 pll_fsm_ena)
+{
+ int ret;
+
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_PLL_CFG,
+ (pll_fsm_ena << PHY_S6G_PLL_ENA_OFFS_POS) |
+ (pll_fsm_ctrl_data << PHY_S6G_PLL_FSM_CTRL_DATA_POS) |
+ (pll_ena_offs << PHY_S6G_PLL_FSM_ENA_POS));
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_common_cfg_wr(struct phy_device *phydev,
+ const u32 sys_rst,
+ const u32 ena_lane,
+ const u32 ena_loop,
+ const u32 qrate,
+ const u32 if_mode,
+ const u32 pwd_tx)
+{
+ /* ena_loop = 8 for eloop */
+ /* = 4 for floop */
+ /* = 2 for iloop */
+ /* = 1 for ploop */
+ /* qrate = 1 for SGMII, 0 for QSGMII */
+ /* if_mode = 1 for SGMII, 3 for QSGMII */
+
+ int ret;
+
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_COMMON_CFG,
+ (sys_rst << PHY_S6G_SYS_RST_POS) |
+ (ena_lane << PHY_S6G_ENA_LANE_POS) |
+ (ena_loop << PHY_S6G_ENA_LOOP_POS) |
+ (qrate << PHY_S6G_QRATE_POS) |
+ (if_mode << PHY_S6G_IF_MODE_POS));
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_des_cfg_wr(struct phy_device *phydev,
+ const u32 des_phy_ctrl,
+ const u32 des_mbtr_ctrl,
+ const u32 des_bw_hyst,
+ const u32 des_bw_ana,
+ const u32 des_cpmd_sel)
+{
+ u32 reg_val;
+ int ret;
+
+ /* configurable terms */
+ reg_val = (des_phy_ctrl << PHY_S6G_DES_PHY_CTRL_POS) |
+ (des_mbtr_ctrl << PHY_S6G_DES_MBTR_CTRL_POS) |
+ (des_cpmd_sel << PHY_S6G_DES_CPMD_SEL_POS) |
+ (des_bw_hyst << PHY_S6G_DES_BW_HYST_POS) |
+ (des_bw_ana << PHY_S6G_DES_BW_ANA_POS);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_DES_CFG,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_ib_cfg0_wr(struct phy_device *phydev,
+ const u32 ib_rtrm_adj,
+ const u32 ib_sig_det_clk_sel,
+ const u32 ib_reg_pat_sel_offset,
+ const u32 ib_cal_ena)
+{
+ u32 base_val;
+ u32 reg_val;
+ int ret;
+
+ /* constant terms */
+ base_val = 0x60a85837;
+ /* configurable terms */
+ reg_val = base_val | (ib_rtrm_adj << 25) |
+ (ib_sig_det_clk_sel << 16) |
+ (ib_reg_pat_sel_offset << 8) |
+ (ib_cal_ena << 3);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_CFG0,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_ib_cfg1_wr(struct phy_device *phydev,
+ const u32 ib_tjtag,
+ const u32 ib_tsdet,
+ const u32 ib_scaly,
+ const u32 ib_frc_offset,
+ const u32 ib_filt_offset)
+{
+ u32 ib_filt_val;
+ u32 reg_val = 0;
+ int ret;
+
+ /* constant terms */
+ ib_filt_val = 0xe0;
+ /* configurable terms */
+ reg_val = (ib_tjtag << 17) + (ib_tsdet << 12) + (ib_scaly << 8) +
+ ib_filt_val + (ib_filt_offset << 4) + (ib_frc_offset << 0);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_CFG1,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_ib_cfg2_wr(struct phy_device *phydev,
+ const u32 ib_tinfv,
+ const u32 ib_tcalv,
+ const u32 ib_ureg)
+{
+ u32 ib_cfg2_val;
+ u32 base_val;
+ int ret;
+
+ /* constant terms */
+ base_val = 0x0f878010;
+ /* configurable terms */
+ ib_cfg2_val = base_val | ((ib_tinfv) << 28) | ((ib_tcalv) << 5) |
+ (ib_ureg << 0);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_CFG2,
+ ib_cfg2_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_ib_cfg3_wr(struct phy_device *phydev,
+ const u32 ib_ini_hp,
+ const u32 ib_ini_mid,
+ const u32 ib_ini_lp,
+ const u32 ib_ini_offset)
+{
+ u32 reg_val;
+ int ret;
+
+ reg_val = (ib_ini_hp << 24) + (ib_ini_mid << 16) +
+ (ib_ini_lp << 8) + (ib_ini_offset << 0);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_CFG3,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_ib_cfg4_wr(struct phy_device *phydev,
+ const u32 ib_max_hp,
+ const u32 ib_max_mid,
+ const u32 ib_max_lp,
+ const u32 ib_max_offset)
+{
+ u32 reg_val;
+ int ret;
+
+ reg_val = (ib_max_hp << 24) + (ib_max_mid << 16) +
+ (ib_max_lp << 8) + (ib_max_offset << 0);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_CFG4,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_misc_cfg_wr(struct phy_device *phydev,
+ const u32 lane_rst)
+{
+ int ret;
+
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_MISC_CFG,
+ lane_rst);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_gp_cfg_wr(struct phy_device *phydev, const u32 gp_cfg_val)
+{
+ int ret;
+
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_GP_CFG,
+ gp_cfg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_dft_cfg2_wr(struct phy_device *phydev,
+ const u32 rx_ji_ampl,
+ const u32 rx_step_freq,
+ const u32 rx_ji_ena,
+ const u32 rx_waveform_sel,
+ const u32 rx_freqoff_dir,
+ const u32 rx_freqoff_ena)
+{
+ u32 reg_val;
+ int ret;
+
+ /* configurable terms */
+ reg_val = (rx_ji_ampl << 8) | (rx_step_freq << 4) |
+ (rx_ji_ena << 3) | (rx_waveform_sel << 2) |
+ (rx_freqoff_dir << 1) | rx_freqoff_ena;
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_IB_DFT_CFG2,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+static int vsc85xx_sd6g_dft_cfg0_wr(struct phy_device *phydev,
+ const u32 prbs_sel,
+ const u32 test_mode,
+ const u32 rx_dft_ena)
+{
+ u32 reg_val;
+ int ret;
+
+ /* configurable terms */
+ reg_val = (prbs_sel << 20) | (test_mode << 16) | (rx_dft_ena << 2);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_DFT_CFG0,
+ reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+/* Access LCPLL Cfg_0 */
+static int vsc85xx_pll5g_cfg0_wr(struct phy_device *phydev,
+ const u32 selbgv820)
+{
+ u32 base_val;
+ u32 reg_val;
+ int ret;
+
+ /* constant terms */
+ base_val = 0x7036f145;
+ /* configurable terms */
+ reg_val = base_val | (selbgv820 << 23);
+ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
+ PHY_S6G_PLL5G_CFG0, reg_val);
+ if (ret)
+ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
+ return ret;
+}
+
+int vsc85xx_sd6g_config_v2(struct phy_device *phydev)
+{
+ u32 ib_sig_det_clk_sel_cal = 0;
+ u32 ib_sig_det_clk_sel_mm = 7;
+ u32 pll_fsm_ctrl_data = 60;
+ unsigned long deadline;
+ u32 des_bw_ana_val = 3;
+ u32 ib_tsdet_cal = 16;
+ u32 ib_tsdet_mm = 5;
+ u32 ib_rtrm_adj;
+ u32 if_mode = 1;
+ u32 gp_iter = 5;
+ u32 val32 = 0;
+ u32 qrate = 1;
+ u32 iter;
+ int val = 0;
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* Detune/Unlock LCPLL */
+ ret = pll5g_detune(phydev);
+ if (ret)
+ return ret;
+
+ /* 0. Reset RCPLL */
+ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 0, 0, qrate, if_mode, 0);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
+ if (ret)
+ return ret;
+
+ /* 1. Configure sd6g for SGMII prior to sd6g_IB_CAL */
+ ib_rtrm_adj = 13;
+ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg3_wr(phydev, 0, 31, 1, 31);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 2. Start rcpll_fsm */
+ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
+ PHY_S6G_PLL_STATUS);
+ /* wait for bit 12 to clear */
+ } while (time_before(jiffies, deadline) && (val32 & BIT(12)));
+
+ if (val32 & BIT(12))
+ return -ETIMEDOUT;
+
+ /* 4. Release digital reset and disable transmitter */
+ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 5. Apply a frequency offset on RX-side (using internal FoJi logic) */
+ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 2, 0, 0, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 2);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 6. Prepare required settings for IBCAL */
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_cal, 0, 0);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 7. Start IB_CAL */
+ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj,
+ ib_sig_det_clk_sel_cal, 0, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ /* 11 cycles (for ViperA) or 5 cycles (for ViperB & Elise) w/ SW clock */
+ for (iter = 0; iter < gp_iter; iter++) {
+ /* set gp(0) */
+ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 769);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ /* clear gp(0) */
+ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 0, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 8. Wait for IB cal to complete */
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
+ PHY_S6G_IB_STATUS0);
+ /* wait for bit 8 to set */
+ } while (time_before(jiffies, deadline) && (~val32 & BIT(8)));
+
+ if (~val32 & BIT(8))
+ return -ETIMEDOUT;
+
+ /* 9. Restore cfg values for mission mode */
+ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 10. Re-enable transmitter */
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 11. Disable frequency offset generation (using internal FoJi logic) */
+ ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 0, 0, 0, 0, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* Tune/Re-lock LCPLL */
+ ret = pll5g_tune(phydev);
+ if (ret)
+ return ret;
+
+ /* 12. Configure for Final Configuration and Settings */
+ /* a. Reset RCPLL */
+ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 1, 0, qrate, if_mode, 0);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* b. Configure sd6g for desired operating mode */
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO);
+ ret = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+ if ((ret & MAC_CFG_MASK) == MAC_CFG_QSGMII) {
+ /* QSGMII */
+ pll_fsm_ctrl_data = 120;
+ qrate = 0;
+ if_mode = 3;
+ des_bw_ana_val = 5;
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC;
+
+ ret = vsc8584_cmd(phydev, val);
+ if (ret) {
+ dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ } else if ((ret & MAC_CFG_MASK) == MAC_CFG_SGMII) {
+ /* SGMII */
+ pll_fsm_ctrl_data = 60;
+ qrate = 1;
+ if_mode = 1;
+ des_bw_ana_val = 3;
+
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_SGMII_MAC;
+
+ ret = vsc8584_cmd(phydev, val);
+ if (ret) {
+ dev_err(&phydev->mdio.dev, "%s: SGMII error: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ } else {
+ dev_err(&phydev->mdio.dev, "%s: invalid mac_if: %x\n",
+ __func__, ret);
+ }
+
+ ret = phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ if (ret)
+ return ret;
+ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_pll5g_cfg0_wr(phydev, 4);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg3_wr(phydev, 0, 31, 1, 31);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
+ if (ret)
+ return ret;
+ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 13. Start rcpll_fsm */
+ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
+ if (ret)
+ return ret;
+ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+
+ /* 14. Wait for PLL cal to complete */
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ if (ret)
+ return ret;
+ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
+ PHY_S6G_PLL_STATUS);
+ /* wait for bit 12 to clear */
+ } while (time_before(jiffies, deadline) && (val32 & BIT(12)));
+
+ if (val32 & BIT(12))
+ return -ETIMEDOUT;
+
+ /* release lane reset */
+ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
+ if (ret)
+ return ret;
+
+ return phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+}
diff --git a/drivers/net/phy/mscc/mscc_serdes.h b/drivers/net/phy/mscc/mscc_serdes.h
new file mode 100644
index 000000000000..2a6371322af9
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc_serdes.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Copyright (c) 2021 Microsemi Corporation
+ */
+
+#ifndef _MSCC_SERDES_PHY_H_
+#define _MSCC_SERDES_PHY_H_
+
+#define PHY_S6G_PLL5G_CFG2_GAIN_MASK GENMASK(9, 5)
+#define PHY_S6G_PLL5G_CFG2_ENA_GAIN 1
+
+#define PHY_S6G_DES_PHY_CTRL_POS 13
+#define PHY_S6G_DES_MBTR_CTRL_POS 10
+#define PHY_S6G_DES_CPMD_SEL_POS 8
+#define PHY_S6G_DES_BW_HYST_POS 5
+#define PHY_S6G_DES_BW_ANA_POS 1
+#define PHY_S6G_DES_CFG 0x21
+#define PHY_S6G_IB_CFG0 0x22
+#define PHY_S6G_IB_CFG1 0x23
+#define PHY_S6G_IB_CFG2 0x24
+#define PHY_S6G_IB_CFG3 0x25
+#define PHY_S6G_IB_CFG4 0x26
+#define PHY_S6G_GP_CFG 0x2E
+#define PHY_S6G_DFT_CFG0 0x35
+#define PHY_S6G_IB_DFT_CFG2 0x37
+
+int vsc85xx_sd6g_config_v2(struct phy_device *phydev);
+
+#endif /* _MSCC_PHY_SERDES_H_ */
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 5a8c8eb18582..46160baaafe3 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -19,8 +19,6 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
-#define DEBUG
-
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 45f75533c47c..1be07e45d314 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -308,7 +308,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
- cmd->base.port = PORT_MII;
+ cmd->base.port = phydev->port;
cmd->base.transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
@@ -724,7 +724,7 @@ static int phy_check_link_status(struct phy_device *phydev)
{
int err;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
/* Keep previous state if loopback is enabled because some PHYs
* report that Link is Down when loopback is enabled.
@@ -1145,7 +1145,7 @@ void phy_state_machine(struct work_struct *work)
}
/* Only re-schedule a PHY state machine change if we are polling the
- * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+ * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
* between states from phy_mac_interrupt().
*
* In state PHY_HALTED the PHY gets suspended, so rescheduling the
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 80c2e646c093..ce495473cd5d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -300,50 +300,22 @@ static int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- ret = phy_resume(phydev);
+ ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
-no_resume:
- if (phydev->attached_dev && phydev->adjust_link)
- phy_start_machine(phydev);
-
- return 0;
-}
-
-static int mdio_bus_phy_restore(struct device *dev)
-{
- struct phy_device *phydev = to_phy_device(dev);
- struct net_device *netdev = phydev->attached_dev;
- int ret;
-
- if (!netdev)
- return 0;
-
- ret = phy_init_hw(phydev);
+ ret = phy_resume(phydev);
if (ret < 0)
return ret;
-
+no_resume:
if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev);
return 0;
}
-static const struct dev_pm_ops mdio_bus_phy_pm_ops = {
- .suspend = mdio_bus_phy_suspend,
- .resume = mdio_bus_phy_resume,
- .freeze = mdio_bus_phy_suspend,
- .thaw = mdio_bus_phy_resume,
- .restore = mdio_bus_phy_restore,
-};
-
-#define MDIO_BUS_PHY_PM_OPS (&mdio_bus_phy_pm_ops)
-
-#else
-
-#define MDIO_BUS_PHY_PM_OPS NULL
-
+static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
+ mdio_bus_phy_resume);
#endif /* CONFIG_PM */
/**
@@ -554,7 +526,7 @@ static const struct device_type mdio_bus_phy_type = {
.name = "PHY",
.groups = phy_dev_groups,
.release = phy_device_release,
- .pm = MDIO_BUS_PHY_PM_OPS,
+ .pm = pm_ptr(&mdio_bus_phy_pm_ops),
};
static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
@@ -606,6 +578,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
dev->pause = 0;
dev->asym_pause = 0;
dev->link = 0;
+ dev->port = PORT_TP;
dev->interface = PHY_INTERFACE_MODE_GMII;
dev->autoneg = AUTONEG_ENABLE;
@@ -1143,10 +1116,19 @@ int phy_init_hw(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->drv->config_init)
+ if (phydev->drv->config_init) {
ret = phydev->drv->config_init(phydev);
+ if (ret < 0)
+ return ret;
+ }
- return ret;
+ if (phydev->drv->config_intr) {
+ ret = phydev->drv->config_intr(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(phy_init_hw);
@@ -1166,8 +1148,8 @@ char *phy_attached_info_irq(struct phy_device *phydev)
case PHY_POLL:
irq_str = "POLL";
break;
- case PHY_IGNORE_INTERRUPT:
- irq_str = "IGNORE";
+ case PHY_MAC_INTERRUPT:
+ irq_str = "MAC";
break;
default:
snprintf(irq_num, sizeof(irq_num), "%d", phydev->irq);
@@ -1376,6 +1358,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->sfp_bus_attached)
dev->sfp_bus = phydev->sfp_bus;
+ else if (dev->sfp_bus)
+ phydev->is_on_sfp_module = true;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1403,6 +1387,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->state = PHY_READY;
+ /* Port is set to PORT_TP by default and the actual PHY driver will set
+ * it to different value depending on the PHY configuration. If we have
+ * the generic PHY driver we can't figure it out, thus set the old
+ * legacy PORT_MII value.
+ */
+ if (using_genphy)
+ phydev->port = PORT_MII;
+
/* Initial carrier state is off as the phy is about to be
* (re)initialized.
*/
@@ -1740,7 +1732,7 @@ int __phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = phydev->drv;
int ret;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
if (!phydrv || !phydrv->resume)
return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 84f6e197f965..053c92e02cd8 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -306,6 +306,10 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 2500baseX_Full);
break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ phylink_set(pl->supported, 5000baseT_Full);
+ break;
+
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 99ecd6c4c15a..821e85a97367 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -60,6 +60,9 @@
#define RTL_LPADV_5000FULL BIT(6)
#define RTL_LPADV_2500FULL BIT(5)
+#define RTL9000A_GINMR 0x14
+#define RTL9000A_GINMR_LINK_STATUS BIT(4)
+
#define RTLGEN_SPEED_MASK 0x0630
#define RTL_GENERIC_PHYID 0x001cc800
@@ -655,6 +658,122 @@ static int rtlgen_resume(struct phy_device *phydev)
return ret;
}
+static int rtl9000a_config_init(struct phy_device *phydev)
+{
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int rtl9000a_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (ret == 1)
+ ret = genphy_soft_reset(phydev);
+
+ return ret;
+}
+
+static int rtl9000a_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+ if (ret & CTL1000_AS_MASTER)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ if (ret & LPA_1000MSRES)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+ else
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int rtl9000a_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL8211F_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl9000a_config_intr(struct phy_device *phydev)
+{
+ u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl9000a_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ val = (u16)~RTL9000A_GINMR_LINK_STATUS;
+ err = phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+ } else {
+ val = ~0;
+ err = phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+ if (err)
+ return err;
+
+ err = rtl9000a_ack_interrupt(phydev);
+ }
+
+ return phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+}
+
+static irqreturn_t rtl9000a_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, RTL8211F_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8211F_INER_LINK_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -823,6 +942,19 @@ static struct phy_driver realtek_drvs[] = {
.handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001ccb00),
+ .name = "RTL9000AA_RTL9000AN Ethernet",
+ .features = PHY_BASIC_T1_FEATURES,
+ .config_init = rtl9000a_config_init,
+ .config_aneg = rtl9000a_config_aneg,
+ .read_status = rtl9000a_read_status,
+ .config_intr = rtl9000a_config_intr,
+ .handle_interrupt = rtl9000a_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
},
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 20b91f5dfc6e..2e11176c6b94 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -44,6 +44,17 @@ static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
phylink_set(modes, 2500baseX_Full);
}
+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ unsigned long *modes)
+{
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+ * modes and set only one mode which module supports: 1000baseX_Full.
+ */
+ phylink_zero(modes);
+ phylink_set(modes, 1000baseX_Full);
+}
+
static const struct sfp_quirk sfp_quirks[] = {
{
// Alcatel Lucent G-010S-P can operate at 2500base-X, but
@@ -63,6 +74,10 @@ static const struct sfp_quirk sfp_quirks[] = {
.vendor = "HUAWEI",
.part = "MA5671A",
.modes = sfp_quirk_2500basex,
+ }, {
+ .vendor = "UBNT",
+ .part = "UF-INSTANT",
+ .modes = sfp_quirk_ubnt_uf_instant,
},
};
@@ -265,6 +280,12 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ /* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
+ if (id->base.e100_base_fx || id->base.e100_base_lx)
+ phylink_set(modes, 100baseFX_Full);
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ phylink_set(modes, 100baseFX_Full);
+
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
@@ -337,11 +358,16 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* the bitrate to determine supported modes. Some BiDi modules (eg,
* 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing
* wavelengths, so do not set any transceiver bits.
+ *
+ * Do the same for modules supporting 2500BASE-X. Note that some
+ * modules use 2500Mbaud rather than 3100 or 3200Mbaud for
+ * 2500BASE-X, so we allow some slack here.
*/
- if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
- /* If the bit rate allows 1000baseX */
- if (br_nom && br_min <= 1300 && br_max >= 1200)
+ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
+ if (br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ if (br_min <= 3200 && br_max >= 2500)
+ phylink_set(modes, 2500baseX_Full);
}
if (bus->sfp_quirk)
@@ -374,6 +400,9 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
phylink_test(link_modes, 10000baseT_Full))
return PHY_INTERFACE_MODE_10GBASER;
+ if (phylink_test(link_modes, 5000baseT_Full))
+ return PHY_INTERFACE_MODE_5GBASER;
+
if (phylink_test(link_modes, 2500baseX_Full))
return PHY_INTERFACE_MODE_2500BASEX;
@@ -384,6 +413,9 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
if (phylink_test(link_modes, 1000baseX_Full))
return PHY_INTERFACE_MODE_1000BASEX;
+ if (phylink_test(link_modes, 100baseFX_Full))
+ return PHY_INTERFACE_MODE_100BASEX;
+
dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n");
return PHY_INTERFACE_MODE_NA;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 91d74c1a920a..7998acc689b7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/hwmon.h>
@@ -258,6 +259,9 @@ struct sfp {
char *hwmon_name;
#endif
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dir;
+#endif
};
static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -273,8 +277,21 @@ static const struct sff_data sff_data = {
static bool sfp_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFF8024_ID_SFP &&
- id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+ if (id->base.phys_id == SFF8024_ID_SFP &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP)
+ return true;
+
+ /* SFP GPON module Ubiquiti U-Fiber Instant has in its EEPROM stored
+ * phys id SFF instead of SFP. Therefore mark this module explicitly
+ * as supported based on vendor name and pn match.
+ */
+ if (id->base.phys_id == SFF8024_ID_SFF_8472 &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP &&
+ !memcmp(id->base.vendor_name, "UBNT ", 16) &&
+ !memcmp(id->base.vendor_pn, "UF-INSTANT ", 16))
+ return true;
+
+ return false;
}
static const struct sff_data sfp_data = {
@@ -336,19 +353,11 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
size_t len)
{
struct i2c_msg msgs[2];
- size_t block_size;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ size_t block_size = sfp->i2c_block_size;
size_t this_len;
- u8 bus_addr;
int ret;
- if (a2) {
- block_size = 16;
- bus_addr = 0x51;
- } else {
- block_size = sfp->i2c_block_size;
- bus_addr = 0x50;
- }
-
msgs[0].addr = bus_addr;
msgs[0].flags = 0;
msgs[0].len = 1;
@@ -1282,6 +1291,20 @@ static void sfp_hwmon_probe(struct work_struct *work)
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
int err, i;
+ /* hwmon interface needs to access 16bit registers in atomic way to
+ * guarantee coherency of the diagnostic monitoring data. If it is not
+ * possible to guarantee coherency because EEPROM is broken in such way
+ * that does not support atomic 16bit read operation then we have to
+ * skip registration of hwmon device.
+ */
+ if (sfp->i2c_block_size < 2) {
+ dev_info(sfp->dev,
+ "skipping hwmon device registration due to broken EEPROM\n");
+ dev_info(sfp->dev,
+ "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
+ return;
+ }
+
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
if (err < 0) {
if (sfp->hwmon_tries--) {
@@ -1390,6 +1413,54 @@ static void sfp_module_tx_enable(struct sfp *sfp)
sfp_set_state(sfp, sfp->state);
}
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int sfp_debug_state_show(struct seq_file *s, void *data)
+{
+ struct sfp *sfp = s->private;
+
+ seq_printf(s, "Module state: %s\n",
+ mod_state_to_str(sfp->sm_mod_state));
+ seq_printf(s, "Module probe attempts: %d %d\n",
+ R_PROBE_RETRY_INIT - sfp->sm_mod_tries_init,
+ R_PROBE_RETRY_SLOW - sfp->sm_mod_tries);
+ seq_printf(s, "Device state: %s\n",
+ dev_state_to_str(sfp->sm_dev_state));
+ seq_printf(s, "Main state: %s\n",
+ sm_state_to_str(sfp->sm_state));
+ seq_printf(s, "Fault recovery remaining retries: %d\n",
+ sfp->sm_fault_retries);
+ seq_printf(s, "PHY probe remaining retries: %d\n",
+ sfp->sm_phy_retries);
+ seq_printf(s, "moddef0: %d\n", !!(sfp->state & SFP_F_PRESENT));
+ seq_printf(s, "rx_los: %d\n", !!(sfp->state & SFP_F_LOS));
+ seq_printf(s, "tx_fault: %d\n", !!(sfp->state & SFP_F_TX_FAULT));
+ seq_printf(s, "tx_disable: %d\n", !!(sfp->state & SFP_F_TX_DISABLE));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sfp_debug_state);
+
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+ sfp->debugfs_dir = debugfs_create_dir(dev_name(sfp->dev), NULL);
+
+ debugfs_create_file("state", 0600, sfp->debugfs_dir, sfp,
+ &sfp_debug_state_fops);
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+ debugfs_remove_recursive(sfp->debugfs_dir);
+}
+#else
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+}
+#endif
+
static void sfp_module_tx_fault_reset(struct sfp *sfp)
{
unsigned int state = sfp->state;
@@ -1482,15 +1553,19 @@ static void sfp_sm_link_down(struct sfp *sfp)
static void sfp_sm_link_check_los(struct sfp *sfp)
{
- unsigned int los = sfp->state & SFP_F_LOS;
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+ bool los = false;
/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
- * are set, we assume that no LOS signal is available.
+ * are set, we assume that no LOS signal is available. If both are
+ * set, we assume LOS is not implemented (and is meaningless.)
*/
- if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
- los ^= SFP_F_LOS;
- else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
- los = 0;
+ if (los_options == los_inverted)
+ los = !(sfp->state & SFP_F_LOS);
+ else if (los_options == los_normal)
+ los = !!(sfp->state & SFP_F_LOS);
if (los)
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1500,18 +1575,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_LOW) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_HIGH);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_LOW) ||
+ (los_options == los_normal && event == SFP_E_LOS_HIGH);
}
static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_HIGH) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_LOW);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_HIGH) ||
+ (los_options == los_normal && event == SFP_E_LOS_LOW);
}
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
@@ -1642,26 +1721,30 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
return 0;
}
-/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a
- * single read. Switch back to reading 16 byte blocks unless we have
- * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly,
- * some VSOL V2801F have the vendor name changed to OEM.
+/* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL
+ * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do
+ * not support multibyte reads from the EEPROM. Each multi-byte read
+ * operation returns just one byte of EEPROM followed by zeros. There is
+ * no way to identify which modules are using Realtek RTL8672 and RTL9601C
+ * chips. Moreover every OEM of V-SOL V2801F module puts its own vendor
+ * name and vendor id into EEPROM, so there is even no way to detect if
+ * module is V-SOL V2801F. Therefore check for those zeros in the read
+ * data and then based on check switch to reading EEPROM to one byte
+ * at a time.
*/
-static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base)
+static bool sfp_id_needs_byte_io(struct sfp *sfp, void *buf, size_t len)
{
- if (!memcmp(base->vendor_name, "VSOL ", 16))
- return 1;
- if (!memcmp(base->vendor_name, "OEM ", 16) &&
- !memcmp(base->vendor_pn, "V2801F ", 16))
- return 1;
+ size_t i, block_size = sfp->i2c_block_size;
- /* Some modules can't cope with long reads */
- return 16;
-}
+ /* Already using byte IO */
+ if (block_size == 1)
+ return false;
-static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base)
-{
- sfp->i2c_block_size = sfp_quirk_i2c_block_size(base);
+ for (i = 1; i < len; i += block_size) {
+ if (memchr_inv(buf + i, '\0', min(block_size - 1, len - i)))
+ return false;
+ }
+ return true;
}
static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
@@ -1705,11 +1788,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- /* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte
- * reads from the EEPROM, so start by reading the base identifying
- * information one byte at a time.
+ /* Some SFP modules and also some Linux I2C drivers do not like reads
+ * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
+ * a time.
*/
- sfp->i2c_block_size = 1;
+ sfp->i2c_block_size = 16;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -1723,6 +1806,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
return -EAGAIN;
}
+ /* Some SFP modules (e.g. Nokia 3FE46541AA) lock up if read from
+ * address 0x51 is just one byte at a time. Also SFF-8472 requires
+ * that EEPROM supports atomic 16bit read operation for diagnostic
+ * fields, so do not switch to one byte reading at a time unless it
+ * is really required and we have no other option.
+ */
+ if (sfp_id_needs_byte_io(sfp, &id.base, sizeof(id.base))) {
+ dev_info(sfp->dev,
+ "Detected broken RTL8672/RTL9601C emulated EEPROM\n");
+ dev_info(sfp->dev,
+ "Switching to reading EEPROM to one byte at a time\n");
+ sfp->i2c_block_size = 1;
+
+ ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ if (ret < 0) {
+ if (report)
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n",
+ ret);
+ return -EAGAIN;
+ }
+
+ if (ret != sizeof(id.base)) {
+ dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ return -EAGAIN;
+ }
+ }
+
/* Cotsworks do not seem to update the checksums when they
* do the final programming with the final module part number,
* serial number and date code.
@@ -1757,9 +1867,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
}
}
- /* Apply any early module-specific quirks */
- sfp_quirks_base(sfp, &id.base);
-
ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
if (ret < 0) {
if (report)
@@ -2483,6 +2590,8 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ sfp_debugfs_init(sfp);
+
return 0;
}
@@ -2490,6 +2599,7 @@ static int sfp_remove(struct platform_device *pdev)
{
struct sfp *sfp = platform_get_drvdata(pdev);
+ sfp_debugfs_exit(sfp);
sfp_unregister_socket(sfp->sfp_bus);
rtnl_lock();
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 33372756a451..ddb78fb4d6dc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
/* Make clk optional to keep DTB backward compatibility. */
priv->refclk = clk_get_optional(dev, NULL);
if (IS_ERR(priv->refclk))
- dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+ return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ "Failed to request clock\n");
ret = clk_prepare_enable(priv->refclk);
if (ret)
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 29a0917a81e6..2a91caa4f37b 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -101,7 +101,7 @@ static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
char *flags, int count);
static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
-static void ppp_async_process(unsigned long arg);
+static void ppp_async_process(struct tasklet_struct *t);
static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
int len, int inbound);
@@ -179,7 +179,7 @@ ppp_asynctty_open(struct tty_struct *tty)
ap->lcp_fcs = -1;
skb_queue_head_init(&ap->rqueue);
- tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
+ tasklet_setup(&ap->tsk, ppp_async_process);
refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead);
@@ -259,7 +259,8 @@ static int ppp_asynctty_hangup(struct tty_struct *tty)
*/
static ssize_t
ppp_asynctty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t count)
+ unsigned char *buf, size_t count,
+ void **cookie, unsigned long offset)
{
return -EAGAIN;
}
@@ -488,9 +489,9 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
-static void ppp_async_process(unsigned long arg)
+static void ppp_async_process(struct tasklet_struct *t)
{
- struct asyncppp *ap = (struct asyncppp *) arg;
+ struct asyncppp *ap = from_tasklet(ap, t, tsk);
struct sk_buff *skb;
/* process received packets */
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 09c27f7773f9..d445ecb1d0c7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pch->upl);
return -EALREADY;
}
+ refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pchb->upl);
goto err_unset;
}
+ refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
write_unlock_bh(&pchb->upl);
- refcount_inc(&pch->file.refcnt);
- refcount_inc(&pchb->file.refcnt);
-
return 0;
err_unset:
write_lock_bh(&pch->upl);
+ /* Re-read pch->bridge with upl held in case it was modified concurrently */
+ pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
+
+ if (pchb)
+ if (refcount_dec_and_test(&pchb->file.refcnt))
+ ppp_destroy_channel(pchb);
+
return -EALREADY;
}
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 0f338752c38b..d8890923a9e3 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -90,7 +90,7 @@ static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
-static void ppp_sync_process(unsigned long arg);
+static void ppp_sync_process(struct tasklet_struct *t);
static int ppp_sync_push(struct syncppp *ap);
static void ppp_sync_flush_output(struct syncppp *ap);
static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
@@ -177,7 +177,7 @@ ppp_sync_open(struct tty_struct *tty)
ap->raccm = ~0U;
skb_queue_head_init(&ap->rqueue);
- tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
+ tasklet_setup(&ap->tsk, ppp_sync_process);
refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp);
@@ -257,7 +257,8 @@ static int ppp_sync_hangup(struct tty_struct *tty)
*/
static ssize_t
ppp_sync_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t count)
+ unsigned char *buf, size_t count,
+ void **cookie, unsigned long offset)
{
return -EAGAIN;
}
@@ -480,9 +481,9 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
-static void ppp_sync_process(unsigned long arg)
+static void ppp_sync_process(struct tasklet_struct *t)
{
- struct syncppp *ap = (struct syncppp *) arg;
+ struct syncppp *ap = from_tasklet(ap, t, tsk);
struct sk_buff *skb;
/* process received packets */
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index ee5058445d06..0fe78826c8fa 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -278,10 +278,8 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
header = (struct pptp_gre_header *)(skb->data);
/* ack in different place if S = 0 */
- ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
-
- ack = ntohl(ack);
-
+ ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
+ ntohl(header->seq);
if (ack > opt->ack_recv)
opt->ack_recv = ack;
/* also handle sequence number wrap-around */
@@ -355,7 +353,7 @@ static int pptp_rcv(struct sk_buff *skb)
/* if invalid, discard this packet */
goto drop;
- po = lookup_chan(htons(header->call_id), iph->saddr);
+ po = lookup_chan(ntohs(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 1f4bdd94407a..8e3a28ba6b28 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -713,8 +713,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_probe_transport_header(skb);
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
@@ -722,12 +721,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
if (tap) {
@@ -1093,10 +1090,9 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
}
ret = 0;
- u = tap->dev->type;
+ dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
- copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
- put_user(u, &ifr->ifr_hwaddr.sa_family))
+ copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
ret = -EFAULT;
tap_put_tap_dev(tap);
rtnl_unlock();
@@ -1111,7 +1107,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return -ENOLINK;
}
- ret = dev_set_mac_address(tap->dev, &sa, NULL);
+ ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
@@ -1166,8 +1162,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
}
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c19dac21c468..dd7917cab2b1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..fc86da7f1628 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
@@ -1599,12 +1599,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct xdp_buff xdp;
u32 act;
- xdp.data_hard_start = buf;
- xdp.data = buf + pad;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &tfile->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf, pad, len, false);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1814,12 +1810,10 @@ drop:
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2344,9 +2338,9 @@ static int tun_xdp_one(struct tun_struct *tun,
skb_xdp = true;
goto build;
}
+
+ xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
xdp_set_data_meta_invalid(xdp);
- xdp->rxq = &tfile->xdp_rxq;
- xdp->frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2741,7 +2735,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
- /* free_netdev() won't check refcnt, to aovid race
+ /* free_netdev() won't check refcnt, to avoid race
* with dev_put() we need publish tun after registration.
*/
rcu_assign_pointer(tfile->tun, tun);
@@ -3113,15 +3107,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCGIFHWADDR:
/* Get hw address */
- memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
- ifr.ifr_hwaddr.sa_family = tun->dev->type;
+ dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
- ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
+ ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
break;
case TUNGETSNDBUF:
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1e3719028780..fbbe78643631 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -631,7 +631,6 @@ config USB_NET_AQC111
config USB_RTL8153_ECM
tristate "RTL8153 ECM support"
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
- default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8c1d61c2cbac..a9b551028659 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -793,6 +793,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
@@ -962,6 +969,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..4087c9e33781 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -61,7 +61,7 @@ static bool prefer_mbim;
module_param(prefer_mbim, bool, 0644);
MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
-static void cdc_ncm_txpath_bh(unsigned long param);
+static void cdc_ncm_txpath_bh(struct tasklet_struct *t);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
@@ -813,9 +813,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
if (!ctx)
return -ENOMEM;
+ ctx->dev = dev;
+
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev);
+ tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
@@ -1199,7 +1201,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1415,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1468,10 +1474,10 @@ static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void cdc_ncm_txpath_bh(unsigned long param)
+static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
{
- struct usbnet *dev = (struct usbnet *)param;
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
+ struct usbnet *dev = ctx->dev;
spin_lock_bh(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
@@ -1823,6 +1829,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ /* if the speed hasn't changed, don't report it.
+ * RTL8156 shipped before 2021 sends notification about every 32ms.
+ */
+ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+ return;
+
+ dev->rx_speed = rx_speed;
+ dev->tx_speed = tx_speed;
+
/*
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
@@ -1863,10 +1878,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 2bb28db89432..31d51346786a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -370,7 +370,7 @@ static struct usb_driver hso_driver;
static struct tty_driver *tty_drv;
static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS];
static struct hso_device *network_table[HSO_MAX_NET_DEVICES];
-static spinlock_t serial_table_lock;
+static DEFINE_SPINLOCK(serial_table_lock);
static const s32 default_port_spec[] = {
HSO_INTF_MUX | HSO_PORT_NETWORK,
@@ -1213,9 +1213,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
* This needs to be a tasklet otherwise we will
* end up recursively calling this function.
*/
-static void hso_unthrottle_tasklet(unsigned long data)
+static void hso_unthrottle_tasklet(struct tasklet_struct *t)
{
- struct hso_serial *serial = (struct hso_serial *)data;
+ struct hso_serial *serial = from_tasklet(serial, t,
+ unthrottle_tasklet);
unsigned long flags;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1264,9 +1265,8 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
serial->rx_state = RX_IDLE;
/* Force default termio settings */
_hso_serial_set_termios(tty, NULL);
- tasklet_init(&serial->unthrottle_tasklet,
- hso_unthrottle_tasklet,
- (unsigned long)serial);
+ tasklet_setup(&serial->unthrottle_tasklet,
+ hso_unthrottle_tasklet);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
hso_stop_serial_device(serial->parent);
@@ -3236,7 +3236,6 @@ static int __init hso_init(void)
pr_info("%s\n", version);
/* Initialise the serial table semaphore and table */
- spin_lock_init(&serial_table_lock);
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
serial_table[i] = NULL;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index bf243edeb064..e81c5699c952 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3375,9 +3375,9 @@ static void lan78xx_rx_bh(struct lan78xx_net *dev)
netif_wake_queue(dev->net);
}
-static void lan78xx_bh(unsigned long param)
+static void lan78xx_bh(struct tasklet_struct *t)
{
- struct lan78xx_net *dev = (struct lan78xx_net *)param;
+ struct lan78xx_net *dev = from_tasklet(dev, t, bh);
struct sk_buff *skb;
struct skb_data *entry;
@@ -3655,7 +3655,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->txq_pend);
mutex_init(&dev->phy_mutex);
- tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+ tasklet_setup(&dev->bh, lan78xx_bh);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 32e1335c94ad..9a907182569c 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -553,12 +553,11 @@ tl_sched:
tasklet_schedule(&pegasus->rx_tl);
}
-static void rx_fixup(unsigned long data)
+static void rx_fixup(struct tasklet_struct *t)
{
- pegasus_t *pegasus;
+ pegasus_t *pegasus = from_tasklet(pegasus, t, rx_tl);
int status;
- pegasus = (pegasus_t *) data;
if (pegasus->flags & PEGASUS_UNPLUG)
return;
@@ -1129,7 +1128,7 @@ static int pegasus_probe(struct usb_interface *intf,
goto out1;
}
- tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
+ tasklet_setup(&pegasus->rx_tl, rx_fixup);
INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..17a050521b86 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -57,6 +57,7 @@ struct qmi_wwan_state {
enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
QMI_WWAN_FLAG_MUX = 1 << 1,
+ QMI_WWAN_FLAG_PASS_THROUGH = 1 << 2,
};
enum qmi_wwan_quirks {
@@ -186,7 +187,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
net = qmimux_find_dev(dev, hdr->mux_id);
if (!net)
goto skip;
- skbn = netdev_alloc_skb(net, pkt_len);
+ skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
skbn->dev = net;
@@ -203,6 +204,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
goto skip;
}
+ skb_reserve(skbn, LL_MAX_HEADER);
skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
if (netif_rx(skbn) != NET_RX_SUCCESS) {
net->stats.rx_errors++;
@@ -217,6 +219,28 @@ skip:
return 1;
}
+static ssize_t mux_id_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct qmimux_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ return sysfs_emit(buf, "0x%02x\n", priv->mux_id);
+}
+
+static DEVICE_ATTR_RO(mux_id);
+
+static struct attribute *qmi_wwan_sysfs_qmimux_attrs[] = {
+ &dev_attr_mux_id.attr,
+ NULL,
+};
+
+static struct attribute_group qmi_wwan_sysfs_qmimux_attr_group = {
+ .name = "qmap",
+ .attrs = qmi_wwan_sysfs_qmimux_attrs,
+};
+
static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
{
struct net_device *new_dev;
@@ -239,6 +263,8 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
goto out_free_newdev;
}
+ new_dev->sysfs_groups[0] = &qmi_wwan_sysfs_qmimux_attr_group;
+
err = register_netdevice(new_dev);
if (err < 0)
goto out_free_newdev;
@@ -325,6 +351,13 @@ static ssize_t raw_ip_store(struct device *d, struct device_attribute *attr, co
if (enable == (info->flags & QMI_WWAN_FLAG_RAWIP))
return len;
+ /* ip mode cannot be cleared when pass through mode is set */
+ if (!enable && (info->flags & QMI_WWAN_FLAG_PASS_THROUGH)) {
+ netdev_err(dev->net,
+ "Cannot clear ip mode on pass through device\n");
+ return -EINVAL;
+ }
+
if (!rtnl_trylock())
return restart_syscall();
@@ -455,14 +488,59 @@ err:
return ret;
}
+static ssize_t pass_through_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info;
+
+ info = (void *)&dev->data;
+ return sprintf(buf, "%c\n",
+ info->flags & QMI_WWAN_FLAG_PASS_THROUGH ? 'Y' : 'N');
+}
+
+static ssize_t pass_through_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info;
+ bool enable;
+
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ info = (void *)&dev->data;
+
+ /* no change? */
+ if (enable == (info->flags & QMI_WWAN_FLAG_PASS_THROUGH))
+ return len;
+
+ /* pass through mode can be set for raw ip devices only */
+ if (!(info->flags & QMI_WWAN_FLAG_RAWIP)) {
+ netdev_err(dev->net,
+ "Cannot set pass through mode on non ip device\n");
+ return -EINVAL;
+ }
+
+ if (enable)
+ info->flags |= QMI_WWAN_FLAG_PASS_THROUGH;
+ else
+ info->flags &= ~QMI_WWAN_FLAG_PASS_THROUGH;
+
+ return len;
+}
+
static DEVICE_ATTR_RW(raw_ip);
static DEVICE_ATTR_RW(add_mux);
static DEVICE_ATTR_RW(del_mux);
+static DEVICE_ATTR_RW(pass_through);
static struct attribute *qmi_wwan_sysfs_attrs[] = {
&dev_attr_raw_ip.attr,
&dev_attr_add_mux.attr,
&dev_attr_del_mux.attr,
+ &dev_attr_pass_through.attr,
NULL,
};
@@ -509,6 +587,11 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (info->flags & QMI_WWAN_FLAG_MUX)
return qmimux_rx_fixup(dev, skb);
+ if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
+ skb->protocol = htons(ETH_P_MAP);
+ return (netif_rx(skb) == NET_RX_SUCCESS);
+ }
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -1013,6 +1096,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
@@ -1234,6 +1318,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
+ {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -1301,12 +1386,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c448d6089821..b246817f3405 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1371,6 +1371,10 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
static int
r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+static int
+rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
+ u32 advertising);
+
static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -2393,11 +2397,9 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(unsigned long data)
+static void bottom_half(struct tasklet_struct *t)
{
- struct r8152 *tp;
-
- tp = (struct r8152 *)data;
+ struct r8152 *tp = from_tasklet(tp, t, tx_tl);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -2630,21 +2632,24 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
-static void rtl_set_eee_plus(struct r8152 *tp)
+static void rtl_eee_plus_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
- u8 speed;
- speed = rtl8152_get_speed(tp);
- if (speed & _10bps) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ if (enable)
ocp_data |= EEEP_CR_EEEP_TX;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
- } else {
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ else
ocp_data &= ~EEEP_CR_EEEP_TX;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
+}
+
+static void rtl_set_eee_plus(struct r8152 *tp)
+{
+ if (rtl8152_get_speed(tp) & _10bps)
+ rtl_eee_plus_en(tp, true);
+ else
+ rtl_eee_plus_en(tp, false);
}
static void rxdy_gated_en(struct r8152 *tp, bool enable)
@@ -3148,10 +3153,22 @@ static void r8153b_ups_flags(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags);
}
-static void r8153b_green_en(struct r8152 *tp, bool enable)
+static void rtl_green_en(struct r8152 *tp, bool enable)
{
u16 data;
+ data = sram_read(tp, SRAM_GREEN_CFG);
+ if (enable)
+ data |= GREEN_ETH_EN;
+ else
+ data &= ~GREEN_ETH_EN;
+ sram_write(tp, SRAM_GREEN_CFG, data);
+
+ tp->ups_info.green = enable;
+}
+
+static void r8153b_green_en(struct r8152 *tp, bool enable)
+{
if (enable) {
sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */
sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */
@@ -3162,11 +3179,7 @@ static void r8153b_green_en(struct r8152 *tp, bool enable)
sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */
}
- data = sram_read(tp, SRAM_GREEN_CFG);
- data |= GREEN_ETH_EN;
- sram_write(tp, SRAM_GREEN_CFG, data);
-
- tp->ups_info.green = enable;
+ rtl_green_en(tp, true);
}
static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
@@ -3207,8 +3220,6 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
ocp_data |= BIT(0);
ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
} else {
- u16 data;
-
ocp_data &= ~(UPS_EN | USP_PREWAKE);
ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
@@ -3216,31 +3227,20 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
ocp_data &= ~BIT(0);
ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- ocp_data &= ~PCUT_STATUS;
- ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
-
- data = r8153_phy_status(tp, 0);
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) {
+ int i;
- switch (data) {
- case PHY_STAT_PWRDN:
- case PHY_STAT_EXT_INIT:
- r8153b_green_en(tp,
- test_bit(GREEN_ETHERNET, &tp->flags));
-
- data = r8152_mdio_read(tp, MII_BMCR);
- data &= ~BMCR_PDOWN;
- data |= BMCR_RESET;
- r8152_mdio_write(tp, MII_BMCR, data);
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
- data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
- fallthrough;
+ tp->rtl_ops.hw_phy_cfg(tp);
- default:
- if (data != PHY_STAT_LAN_ON)
- netif_warn(tp, link, tp->netdev,
- "PHY not ready");
- break;
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed,
+ tp->duplex, tp->advertising);
}
}
}
@@ -3371,7 +3371,7 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
}
}
@@ -3470,59 +3470,76 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
ocp_write_word(tp, type, PLA_BP_BA, 0);
}
-static int r8153_patch_request(struct r8152 *tp, bool request)
+static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
{
- u16 data;
+ u16 data, check;
int i;
data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
- if (request)
+ if (request) {
data |= PATCH_REQUEST;
- else
+ check = 0;
+ } else {
data &= ~PATCH_REQUEST;
+ check = PATCH_READY;
+ }
ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
- for (i = 0; request && i < 5000; i++) {
+ for (i = 0; wait && i < 5000; i++) {
+ u32 ocp_data;
+
usleep_range(1000, 2000);
- if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
+ if ((ocp_data & PATCH_READY) ^ check)
break;
}
- if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
- netif_err(tp, drv, tp->netdev, "patch request fail\n");
- r8153_patch_request(tp, false);
+ if (request && wait &&
+ !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
+ dev_err(&tp->intf->dev, "PHY patch request fail\n");
+ rtl_phy_patch_request(tp, false, false);
return -ETIME;
} else {
return 0;
}
}
-static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key)
+static void rtl_patch_key_set(struct r8152 *tp, u16 key_addr, u16 patch_key)
{
- if (r8153_patch_request(tp, true)) {
- dev_err(&tp->intf->dev, "patch request fail\n");
- return -ETIME;
- }
+ if (patch_key && key_addr) {
+ sram_write(tp, key_addr, patch_key);
+ sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK);
+ } else if (key_addr) {
+ u16 data;
- sram_write(tp, key_addr, patch_key);
- sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK);
+ sram_write(tp, 0x0000, 0x0000);
- return 0;
+ data = ocp_reg_read(tp, OCP_PHY_LOCK);
+ data &= ~PATCH_LOCK;
+ ocp_reg_write(tp, OCP_PHY_LOCK, data);
+
+ sram_write(tp, key_addr, 0x0000);
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
-static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr)
+static int
+rtl_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key, bool wait)
{
- u16 data;
+ if (rtl_phy_patch_request(tp, true, wait))
+ return -ETIME;
- sram_write(tp, 0x0000, 0x0000);
+ rtl_patch_key_set(tp, key_addr, patch_key);
- data = ocp_reg_read(tp, OCP_PHY_LOCK);
- data &= ~PATCH_LOCK;
- ocp_reg_write(tp, OCP_PHY_LOCK, data);
+ return 0;
+}
- sram_write(tp, key_addr, 0x0000);
+static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
+{
+ rtl_patch_key_set(tp, key_addr, 0);
- r8153_patch_request(tp, false);
+ rtl_phy_patch_request(tp, false, wait);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
@@ -4007,7 +4024,7 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info);
}
-static void rtl8152_apply_firmware(struct r8152 *tp)
+static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut)
{
struct rtl_fw *rtl_fw = &tp->rtl_fw;
const struct firmware *fw;
@@ -4038,12 +4055,11 @@ static void rtl8152_apply_firmware(struct r8152 *tp)
case RTL_FW_PHY_START:
key = (struct fw_phy_patch_key *)block;
key_addr = __le16_to_cpu(key->key_reg);
- r8153_pre_ram_code(tp, key_addr,
- __le16_to_cpu(key->key_data));
+ rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut);
break;
case RTL_FW_PHY_STOP:
WARN_ON(!key_addr);
- r8153_post_ram_code(tp, key_addr);
+ rtl_post_ram_code(tp, key_addr, !power_cut);
break;
case RTL_FW_PHY_NC:
rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block);
@@ -4248,7 +4264,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- rtl8152_apply_firmware(tp);
+ rtl8152_apply_firmware(tp, false);
rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -4530,7 +4546,7 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
- rtl8152_apply_firmware(tp);
+ rtl8152_apply_firmware(tp, false);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -4598,13 +4614,37 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
u32 ocp_data;
u16 data;
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if (ocp_data & PCUT_STATUS) {
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+ }
+
/* disable ALDPS before updating the PHY parameters */
r8153_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
- rtl8152_apply_firmware(tp);
+ /* U1/U2/L1 idle timer. 500 us */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500);
+
+ data = r8153_phy_status(tp, 0);
+
+ switch (data) {
+ case PHY_STAT_PWRDN:
+ case PHY_STAT_EXT_INIT:
+ rtl8152_apply_firmware(tp, true);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ break;
+ case PHY_STAT_LAN_ON:
+ default:
+ rtl8152_apply_firmware(tp, false);
+ break;
+ }
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
@@ -4645,7 +4685,7 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
/* Advnace EEE */
- if (!r8153_patch_request(tp, true)) {
+ if (!rtl_phy_patch_request(tp, true, true)) {
data = ocp_reg_read(tp, OCP_POWER_CFG);
data |= EEE_CLKDIV_EN;
ocp_reg_write(tp, OCP_POWER_CFG, data);
@@ -4662,7 +4702,7 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5));
tp->ups_info._250m_ckdiv = true;
- r8153_patch_request(tp, false);
+ rtl_phy_patch_request(tp, false, true);
}
if (tp->eee_en)
@@ -5027,7 +5067,7 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_aldps_en(tp, true);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
}
@@ -5530,9 +5570,6 @@ static void r8153b_init(struct r8152 *tp)
/* MSC timer = 0xfff * 8ms = 32760 ms */
ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff);
- /* U1/U2/L1 idle timer. 500 us */
- ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500);
-
r8153b_power_cut_en(tp, false);
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
@@ -5546,8 +5583,9 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= POLL_LINK_CHG;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
+
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
@@ -5730,6 +5768,9 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
int ret = 0;
+ if (!tp->rtl_ops.autosuspend_en)
+ return -EBUSY;
+
set_bit(SELECTIVE_SUSPEND, &tp->flags);
smp_mb__after_atomic();
@@ -6129,6 +6170,11 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
struct r8152 *tp = netdev_priv(net);
int ret;
+ if (!tp->rtl_ops.eee_get) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out;
@@ -6151,6 +6197,11 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
struct r8152 *tp = netdev_priv(net);
int ret;
+ if (!tp->rtl_ops.eee_set) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out;
@@ -6550,7 +6601,7 @@ static int rtl_ops_init(struct r8152 *tp)
default:
ret = -ENODEV;
- netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+ dev_err(&tp->intf->dev, "Unknown Device\n");
break;
}
@@ -6714,7 +6765,7 @@ static int rtl8152_probe(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
- tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp);
+ tasklet_setup(&tp->tx_tl, bottom_half);
tasklet_disable(&tp->tx_tl);
netdev->netdev_ops = &rtl8152_netdev_ops;
@@ -6807,7 +6858,7 @@ static int rtl8152_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
- netif_err(tp, probe, netdev, "couldn't register the device\n");
+ dev_err(&intf->dev, "couldn't register the device\n");
goto out1;
}
@@ -6877,6 +6928,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 2c3fabd38b16..20b2df8d74ae 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
};
static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6609d21ef894..f813ca9dec53 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
- 0, (void **) &phym, &reply_len);
+ reply_len, (void **)&phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index bf8a60533f3e..7656f2a3afd9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -577,9 +577,9 @@ static void free_skb_pool(rtl8150_t *dev)
dev_kfree_skb(dev->rx_skb_pool[i]);
}
-static void rx_fixup(unsigned long data)
+static void rx_fixup(struct tasklet_struct *t)
{
- struct rtl8150 *dev = (struct rtl8150 *)data;
+ struct rtl8150 *dev = from_tasklet(dev, t, tl);
struct sk_buff *skb;
int status;
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
return -ENOMEM;
}
- tasklet_init(&dev->tl, rx_fixup, (unsigned long)dev);
+ tasklet_setup(&dev->tl, rx_fixup);
spin_lock_init(&dev->rx_pool_lock);
dev->udev = udev;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1447da1d5729..b4c8080e6f87 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1539,11 +1539,11 @@ static void usbnet_bh (struct timer_list *t)
}
}
-static void usbnet_bh_tasklet(unsigned long data)
+static void usbnet_bh_tasklet(struct tasklet_struct *t)
{
- struct timer_list *t = (struct timer_list *)data;
+ struct usbnet *dev = from_tasklet(dev, t, bh);
- usbnet_bh(t);
+ usbnet_bh(&dev->delay);
}
@@ -1673,8 +1673,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = usbnet_bh_tasklet;
- dev->bh.data = (unsigned long)&dev->delay;
+ tasklet_setup(&dev->bh, usbnet_bh_tasklet);
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1964,12 +1963,12 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, buf, size,
USB_CTRL_GET_TIMEOUT);
if (err > 0 && err <= size) {
- if (data)
- memcpy(data, buf, err);
- else
- netdev_dbg(dev->net,
- "Huh? Data requested but thrown away.\n");
- }
+ if (data)
+ memcpy(data, buf, err);
+ else
+ netdev_dbg(dev->net,
+ "Huh? Data requested but thrown away.\n");
+ }
kfree(buf);
out:
return err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 02bfcdf50a7a..aa1a66ad2ce5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -35,6 +35,7 @@
#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
#define VETH_XDP_TX_BULK_SIZE 16
+#define VETH_XDP_BATCH 16
struct veth_stats {
u64 rx_drops;
@@ -562,20 +563,13 @@ static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
return 0;
}
-static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
- struct xdp_frame *frame,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
+ struct xdp_frame *frame,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
- void *hard_start = frame->data - frame->headroom;
- int len = frame->len, delta = 0;
struct xdp_frame orig_frame;
struct bpf_prog *xdp_prog;
- unsigned int headroom;
- struct sk_buff *skb;
-
- /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
- hard_start -= sizeof(struct xdp_frame);
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -590,8 +584,8 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
switch (act) {
case XDP_PASS:
- delta = frame->data - xdp.data;
- len = xdp.data_end - xdp.data;
+ if (xdp_update_frame_from_buff(&xdp, frame))
+ goto err_xdp;
break;
case XDP_TX:
orig_frame = *frame;
@@ -629,19 +623,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
}
rcu_read_unlock();
- headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
- skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
- if (!skb) {
- xdp_return_frame(frame);
- stats->rx_drops++;
- goto err;
- }
-
- xdp_release_frame(frame);
- xdp_scrub_frame(frame);
- skb->protocol = eth_type_trans(skb, rq->dev);
-err:
- return skb;
+ return frame;
err_xdp:
rcu_read_unlock();
xdp_return_frame(frame);
@@ -649,12 +631,43 @@ xdp_xmit:
return NULL;
}
+/* frames array contains VETH_XDP_BATCH at most */
+static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
+ int n_xdpf, struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *skbs[VETH_XDP_BATCH];
+ int i;
+
+ if (xdp_alloc_skb_bulk(skbs, n_xdpf,
+ GFP_ATOMIC | __GFP_ZERO) < 0) {
+ for (i = 0; i < n_xdpf; i++)
+ xdp_return_frame(frames[i]);
+ stats->rx_drops += n_xdpf;
+
+ return;
+ }
+
+ for (i = 0; i < n_xdpf; i++) {
+ struct sk_buff *skb = skbs[i];
+
+ skb = __xdp_build_skb_from_frame(frames[i], skb,
+ rq->dev);
+ if (!skb) {
+ xdp_return_frame(frames[i]);
+ stats->rx_drops++;
+ continue;
+ }
+ napi_gro_receive(&rq->xdp_napi, skb);
+ }
+}
+
static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
struct sk_buff *skb,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- u32 pktlen, headroom, act, metalen;
+ u32 pktlen, headroom, act, metalen, frame_sz;
void *orig_data, *orig_data_end;
struct bpf_prog *xdp_prog;
int mac_len, delta, off;
@@ -710,15 +723,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb = nskb;
}
- xdp.data_hard_start = skb->head;
- xdp.data = skb_mac_header(skb);
- xdp.data_end = xdp.data + pktlen;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
-
/* SKB "head" area always have tailroom for skb_shared_info */
- xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
- xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frame_sz = skb_end_pointer(skb) - skb->head;
+ frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -800,32 +809,45 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- int i, done = 0;
+ int i, done = 0, n_xdpf = 0;
+ void *xdpf[VETH_XDP_BATCH];
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
- struct sk_buff *skb;
if (!ptr)
break;
if (veth_is_xdp_frame(ptr)) {
+ /* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
stats->xdp_bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, bq, stats);
+ frame = veth_xdp_rcv_one(rq, frame, bq, stats);
+ if (frame) {
+ /* XDP_PASS */
+ xdpf[n_xdpf++] = frame;
+ if (n_xdpf == VETH_XDP_BATCH) {
+ veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
+ bq, stats);
+ n_xdpf = 0;
+ }
+ }
} else {
- skb = ptr;
+ /* ndo_start_xmit */
+ struct sk_buff *skb = ptr;
+
stats->xdp_bytes += skb->len;
skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
+ if (skb)
+ napi_gro_receive(&rq->xdp_napi, skb);
}
-
- if (skb)
- napi_gro_receive(&rq->xdp_napi, skb);
-
done++;
}
+ if (n_xdpf)
+ veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
+
u64_stats_update_begin(&rq->stats.syncp);
rq->stats.vs.xdp_redirect += stats->xdp_redirect;
rq->stats.vs.xdp_bytes += stats->xdp_bytes;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4c41df624dbb..82e520d2cb12 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -689,12 +689,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
page = xdp_page;
}
- xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
- xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp.data_end = xdp.data + len;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
+ xdp_headroom, len, true);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -732,6 +729,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
+ goto err_xdp;
case XDP_DROP:
goto err_xdp;
}
@@ -859,12 +857,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* the descriptor on if we get an XDP_TX return code.
*/
data = page_address(xdp_page) + offset;
- xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
- xdp.data = data + vi->hdr_len;
- xdp.data_end = xdp.data + (len - vi->hdr_len);
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = frame_sz - vi->hdr_len;
+ xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
+ VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -2093,14 +2088,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 336504b7531d..6e87f1fc4874 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -451,12 +451,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
- if (tq->buf_info) {
- dma_free_coherent(&adapter->pdev->dev,
- tq->tx_ring.size * sizeof(tq->buf_info[0]),
- tq->buf_info, tq->buf_info_pa);
- tq->buf_info = NULL;
- }
+ kfree(tq->buf_info);
+ tq->buf_info = NULL;
}
@@ -505,8 +501,6 @@ static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
- size_t sz;
-
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
@@ -534,9 +528,9 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
goto err;
}
- sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
- tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &tq->buf_info_pa, GFP_KERNEL);
+ tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
+ GFP_KERNEL,
+ dev_to_node(&adapter->pdev->dev));
if (!tq->buf_info)
goto err;
@@ -1737,13 +1731,9 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->comp_ring.base = NULL;
}
- if (rq->buf_info[0]) {
- size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
- (rq->rx_ring[0].size + rq->rx_ring[1].size);
- dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
- rq->buf_info_pa);
- rq->buf_info[0] = rq->buf_info[1] = NULL;
- }
+ kfree(rq->buf_info[0]);
+ rq->buf_info[0] = NULL;
+ rq->buf_info[1] = NULL;
}
static void
@@ -1883,10 +1873,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
goto err;
}
- sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
- rq->rx_ring[1].size);
- bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
- GFP_KERNEL);
+ bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
+ sizeof(rq->buf_info[0][0]), GFP_KERNEL,
+ dev_to_node(&adapter->pdev->dev));
if (!bi)
goto err;
@@ -2522,14 +2511,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
+ tqc->ddPA = cpu_to_le64(~0ULL);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
- tqc->ddLen = cpu_to_le32(
- sizeof(struct vmxnet3_tx_buf_info) *
- tqc->txRingSize);
+ tqc->ddLen = cpu_to_le32(0);
tqc->intrIdx = tq->comp_ring.intr_idx;
}
@@ -2541,14 +2528,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
+ rqc->ddPA = cpu_to_le64(~0ULL);
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
- rqc->ddLen = cpu_to_le32(
- sizeof(struct vmxnet3_rx_buf_info) *
- (rqc->rxRingSize[0] +
- rqc->rxRingSize[1]));
+ rqc->ddLen = cpu_to_le32(0);
rqc->intrIdx = rq->comp_ring.intr_idx;
if (VMXNET3_VERSION_GE_3(adapter)) {
rqc->rxDataRingBasePA =
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index d958b92c9429..e910596b79cf 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -240,7 +240,6 @@ struct vmxnet3_tx_queue {
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
- dma_addr_t buf_info_pa;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
@@ -298,7 +297,6 @@ struct vmxnet3_rx_queue {
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
u32 dataRingQid; /* rqID in RCD for buffer from data ring */
struct vmxnet3_rx_buf_info *buf_info[2];
- dma_addr_t buf_info_pa;
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a8ad710629e6..666dd201c3d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3283,12 +3283,13 @@ static void vxlan_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
@@ -4521,17 +4522,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
- if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, false);
+ if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, true);
- } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- }
+ else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ vxlan_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ vxlan_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
@@ -4725,7 +4721,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
- unsigned int h;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
@@ -4739,14 +4734,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(vxlan->dev, head);
}
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
+ unsigned int h;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
@@ -4759,6 +4753,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
unregister_netdevice_many(&list);
rtnl_unlock();
+
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
}
static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index b50cf11d197d..686a25d3b512 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -566,11 +566,11 @@ MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
static void do_bottom_half_tx(struct fst_card_info *card);
static void do_bottom_half_rx(struct fst_card_info *card);
-static void fst_process_tx_work_q(unsigned long work_q);
-static void fst_process_int_work_q(unsigned long work_q);
+static void fst_process_tx_work_q(struct tasklet_struct *unused);
+static void fst_process_int_work_q(struct tasklet_struct *unused);
-static DECLARE_TASKLET_OLD(fst_tx_task, fst_process_tx_work_q);
-static DECLARE_TASKLET_OLD(fst_int_task, fst_process_int_work_q);
+static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q);
+static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
static spinlock_t fst_work_q_lock;
@@ -600,7 +600,7 @@ fst_q_work_item(u64 * queue, int card_index)
}
static void
-fst_process_tx_work_q(unsigned long /*void **/work_q)
+fst_process_tx_work_q(struct tasklet_struct *unused)
{
unsigned long flags;
u64 work_txq;
@@ -630,7 +630,7 @@ fst_process_tx_work_q(unsigned long /*void **/work_q)
}
static void
-fst_process_int_work_q(unsigned long /*void **/work_q)
+fst_process_int_work_q(struct tasklet_struct *unused)
{
unsigned long flags;
u64 work_intq;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index bb164805804e..4aaa6388b9ee 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -169,11 +169,11 @@ static int x25_open(struct net_device *dev)
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
- return result;
+ return -ENOMEM;
result = lapb_getparms(dev, &params);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
if (state(hdlc)->settings.dce)
params.mode = params.mode | LAPB_DCE;
@@ -188,7 +188,7 @@ static int x25_open(struct net_device *dev)
result = lapb_setparms(dev, &params);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 7c5cf77e9ef1..ecea09fd21cb 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -323,7 +323,7 @@ struct desc {
static int ports_open;
static struct dma_pool *dma_pool;
-static spinlock_t npe_lock;
+static DEFINE_SPINLOCK(npe_lock);
static const struct {
int tx, txdone, rx, rxfree;
@@ -1402,8 +1402,6 @@ static int __init hss_init_module(void)
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
return -ENODEV;
- spin_lock_init(&npe_lock);
-
return platform_driver_register(&ixp4xx_hss_driver);
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 93c7e8502845..6c163db52835 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -854,7 +854,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&sc->lmc_lock);
pci_set_master(pdev);
- printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
+ printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
dev->base_addr, dev->irq);
err = register_hdlc_device(dev);
@@ -899,6 +899,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
+ unregister_hdlc_device(dev);
+ return -EIO;
break;
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 2fde439543fb..3092a09d3eaa 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1535,7 +1535,7 @@ sbni_setup( char *p )
goto bad_param;
for( n = 0, parm = 0; *p && n < 8; ) {
- (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
+ (*dest[ parm ])[ n ] = simple_strtoul( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
if( *p == ';' ) {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a3ed49cd95c3..551ddaaaf540 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6))
net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
dev->name, &ipv6_hdr(skb)->daddr);
- goto err;
+ goto err_icmp;
}
family = READ_ONCE(peer->endpoint.addr.sa_family);
@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
struct sk_buff *segs = skb_gso_segment(skb, 0);
- if (unlikely(IS_ERR(segs))) {
+ if (IS_ERR(segs)) {
ret = PTR_ERR(segs);
goto err_peer;
}
@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
err_peer:
wg_peer_put(peer);
-err:
- ++dev->stats.tx_errors;
+err_icmp:
if (skb->protocol == htons(ETH_P_IP))
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+err:
+ ++dev->stats.tx_errors;
kfree_skb(skb);
return ret;
}
@@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- wg_packet_queue_free(&wg->decrypt_queue, true);
- wg_packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue);
+ wg_packet_queue_free(&wg->encrypt_queue);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
@@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
goto err_destroy_handshake_send;
ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
- true, MAX_QUEUED_PACKETS);
+ MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_destroy_packet_crypt;
ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
- true, MAX_QUEUED_PACKETS);
+ MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_free_encrypt_queue;
@@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
err_free_decrypt_queue:
- wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue);
err_free_encrypt_queue:
- wg_packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index 4d0144e16947..854bc3d97150 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -27,13 +27,14 @@ struct multicore_worker {
struct crypt_queue {
struct ptr_ring ring;
- union {
- struct {
- struct multicore_worker __percpu *worker;
- int last_cpu;
- };
- struct work_struct work;
- };
+ struct multicore_worker __percpu *worker;
+ int last_cpu;
+};
+
+struct prev_queue {
+ struct sk_buff *head, *tail, *peeked;
+ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
+ atomic_t count;
};
struct wg_device {
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index b3b6370e6b95..cd5cb0292cb6 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
peer = kzalloc(sizeof(*peer), GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- peer->device = wg;
+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ goto err;
+ peer->device = wg;
wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
public_key, preshared_key, peer);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
- goto err_1;
- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
- MAX_QUEUED_PACKETS))
- goto err_2;
- if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
- MAX_QUEUED_PACKETS))
- goto err_3;
-
peer->internal_id = atomic64_inc_return(&peer_counter);
peer->serial_work_cpu = nr_cpumask_bits;
wg_cookie_init(&peer->latest_cookie);
wg_timers_init(peer);
wg_cookie_checker_precompute_peer_keys(peer);
spin_lock_init(&peer->keypairs.keypair_update_lock);
- INIT_WORK(&peer->transmit_handshake_work,
- wg_packet_handshake_send_worker);
+ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
+ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
+ wg_prev_queue_init(&peer->tx_queue);
+ wg_prev_queue_init(&peer->rx_queue);
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
return peer;
-err_3:
- wg_packet_queue_free(&peer->tx_queue, false);
-err_2:
- dst_cache_destroy(&peer->endpoint_cache);
-err_1:
+err:
kfree(peer);
return ERR_PTR(ret);
}
@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
dst_cache_destroy(&peer->endpoint_cache);
- wg_packet_queue_free(&peer->rx_queue, false);
- wg_packet_queue_free(&peer->tx_queue, false);
+ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 23af40922997..8d53b687a1d1 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -36,16 +36,17 @@ struct endpoint {
struct wg_peer {
struct wg_device *device;
- struct crypt_queue tx_queue, rx_queue;
+ struct prev_queue tx_queue, rx_queue;
struct sk_buff_head staged_packet_queue;
int serial_work_cpu;
+ bool is_dead;
struct noise_keypairs keypairs;
struct endpoint endpoint;
struct dst_cache endpoint_cache;
rwlock_t endpoint_lock;
struct noise_handshake handshake;
atomic64_t last_sent_handshake;
- struct work_struct transmit_handshake_work, clear_peer_work;
+ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
struct cookie latest_cookie;
struct hlist_node pubkey_hash;
u64 rx_bytes, tx_bytes;
@@ -61,9 +62,8 @@ struct wg_peer {
struct rcu_head rcu;
struct list_head peer_list;
struct list_head allowedips_list;
- u64 internal_id;
struct napi_struct napi;
- bool is_dead;
+ u64 internal_id;
};
struct wg_peer *wg_peer_create(struct wg_device *wg,
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 71b8e80b58e1..48e7b982a307 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -9,8 +9,7 @@ struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
{
int cpu;
- struct multicore_worker __percpu *worker =
- alloc_percpu(struct multicore_worker);
+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
if (!worker)
return NULL;
@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
}
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len)
+ unsigned int len)
{
int ret;
@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
- if (function) {
- if (multicore) {
- queue->worker = wg_packet_percpu_multicore_worker_alloc(
- function, queue);
- if (!queue->worker) {
- ptr_ring_cleanup(&queue->ring, NULL);
- return -ENOMEM;
- }
- } else {
- INIT_WORK(&queue->work, function);
- }
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
+ if (!queue->worker) {
+ ptr_ring_cleanup(&queue->ring, NULL);
+ return -ENOMEM;
}
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
+void wg_packet_queue_free(struct crypt_queue *queue)
{
- if (multicore)
- free_percpu(queue->worker);
+ free_percpu(queue->worker);
WARN_ON(!__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, NULL);
}
+
+#define NEXT(skb) ((skb)->prev)
+#define STUB(queue) ((struct sk_buff *)&queue->empty)
+
+void wg_prev_queue_init(struct prev_queue *queue)
+{
+ NEXT(STUB(queue)) = NULL;
+ queue->head = queue->tail = STUB(queue);
+ queue->peeked = NULL;
+ atomic_set(&queue->count, 0);
+ BUILD_BUG_ON(
+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
+ offsetof(struct prev_queue, empty) ||
+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
+ offsetof(struct prev_queue, empty));
+}
+
+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
+{
+ WRITE_ONCE(NEXT(skb), NULL);
+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
+}
+
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
+{
+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
+ return false;
+ __wg_prev_queue_enqueue(queue, skb);
+ return true;
+}
+
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
+{
+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
+
+ if (tail == STUB(queue)) {
+ if (!next)
+ return NULL;
+ queue->tail = next;
+ tail = next;
+ next = smp_load_acquire(&NEXT(next));
+ }
+ if (next) {
+ queue->tail = next;
+ atomic_dec(&queue->count);
+ return tail;
+ }
+ if (tail != READ_ONCE(queue->head))
+ return NULL;
+ __wg_prev_queue_enqueue(queue, STUB(queue));
+ next = smp_load_acquire(&NEXT(tail));
+ if (next) {
+ queue->tail = next;
+ atomic_dec(&queue->count);
+ return tail;
+ }
+ return NULL;
+}
+
+#undef NEXT
+#undef STUB
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index dfb674e03076..4ef2944a68bc 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -17,12 +17,13 @@ struct wg_device;
struct wg_peer;
struct multicore_worker;
struct crypt_queue;
+struct prev_queue;
struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+ unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
return cpu;
}
+void wg_prev_queue_init(struct prev_queue *queue);
+
+/* Multi producer */
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
+
+/* Single consumer */
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
+
+/* Single consumer */
+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
+{
+ if (queue->peeked)
+ return queue->peeked;
+ queue->peeked = wg_prev_queue_dequeue(queue);
+ return queue->peeked;
+}
+
+/* Single consumer */
+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
+{
+ queue->peeked = NULL;
+}
+
static inline int wg_queue_enqueue_per_device_and_peer(
- struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct crypt_queue *device_queue, struct prev_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
{
int cpu;
@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* We first queue this up for the peer ingestion, but the consumer
* will wait for the state to change to CRYPTED or DEAD before.
*/
- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
return -ENOSPC;
+
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
return 0;
}
-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
- struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
atomic_set_release(&PACKET_CB(skb)->state, state);
- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
- peer->internal_id),
- peer->device->packet_crypt_wq, &queue->work);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
+ peer->device->packet_crypt_wq, &peer->transmit_packet_work);
wg_peer_put(peer);
}
-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 2c9551ea6dc7..7dc84bcca261 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -444,7 +444,6 @@ packet_processed:
int wg_packet_rx_poll(struct napi_struct *napi, int budget)
{
struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
- struct crypt_queue *queue = &peer->rx_queue;
struct noise_keypair *keypair;
struct endpoint endpoint;
enum packet_state state;
@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(budget <= 0))
return 0;
- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
+ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
PACKET_STATE_UNCRYPTED) {
- __ptr_ring_discard_one(&queue->ring);
- peer = PACKET_PEER(skb);
+ wg_prev_queue_drop_peeked(&peer->rx_queue);
keypair = PACKET_CB(skb)->keypair;
free = true;
@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct work_struct *work)
enum packet_state state =
likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
- wg_queue_enqueue_per_peer_napi(skb, state);
+ wg_queue_enqueue_per_peer_rx(skb, state);
if (need_resched())
cond_resched();
}
@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
- &peer->rx_queue, skb,
- wg->packet_crypt_wq,
- &wg->decrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
+ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
rcu_read_unlock_bh();
return;
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index f74b9341ab0f..5368f7c35b4b 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
wg_packet_send_staged_packets(peer);
}
-static void wg_packet_create_data_done(struct sk_buff *first,
- struct wg_peer *peer)
+static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
{
struct sk_buff *skb, *next;
bool is_keepalive, data_sent = false;
@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(struct sk_buff *first,
void wg_packet_tx_worker(struct work_struct *work)
{
- struct crypt_queue *queue = container_of(work, struct crypt_queue,
- work);
+ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
struct noise_keypair *keypair;
enum packet_state state;
struct sk_buff *first;
- struct wg_peer *peer;
- while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
+ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
PACKET_STATE_UNCRYPTED) {
- __ptr_ring_discard_one(&queue->ring);
- peer = PACKET_PEER(first);
+ wg_prev_queue_drop_peeked(&peer->tx_queue);
keypair = PACKET_CB(first)->keypair;
if (likely(state == PACKET_STATE_CRYPTED))
- wg_packet_create_data_done(first, peer);
+ wg_packet_create_data_done(peer, first);
else
kfree_skb_list(first);
@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct work_struct *work)
break;
}
}
- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
- state);
+ wg_queue_enqueue_per_peer_tx(first, state);
if (need_resched())
cond_resched();
}
}
-static void wg_packet_create_data(struct sk_buff *first)
+static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
{
- struct wg_peer *peer = PACKET_PEER(first);
struct wg_device *wg = peer->device;
int ret = -EINVAL;
@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct sk_buff *first)
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
- &peer->tx_queue, first,
- wg->packet_crypt_wq,
- &wg->encrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
+ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- wg_queue_enqueue_per_peer(&peer->tx_queue, first,
- PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err:
rcu_read_unlock_bh();
if (likely(!ret || ret == -EPIPE))
@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
packets.prev->next = NULL;
wg_peer_get(keypair->entry.peer);
PACKET_CB(packets.next)->keypair = keypair;
- wg_packet_create_data(packets.next);
+ wg_packet_create_data(peer, packets.next);
return;
out_invalid:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 410b318e57fb..d9ad850daa79 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
- *(__force __be32 *)&endpoint->src_if4 = 0;
+ endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
@@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
rt->dst.dev->ifindex != endpoint->src_if4)))) {
endpoint->src4.s_addr = 0;
- *(__force __be32 *)&endpoint->src_if4 = 0;
+ endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
@@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
ip_rt_put(rt);
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
}
- if (unlikely(IS_ERR(rt))) {
+ if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
@@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
NULL);
- if (unlikely(IS_ERR(dst))) {
+ if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7a364eca46d6..f083fb9038c3 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
void ath_hw_setbssidmask(struct ath_common *common);
-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
+void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
int ath_key_config(struct ath_common *common,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
bool ath_hw_keyreset(struct ath_common *common, u16 entry);
+bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 05a61975c83f..869524852fba 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -626,7 +626,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
- napi_enable(&ar->napi);
+ ath10k_core_napi_enable(ar);
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
@@ -644,8 +644,7 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
ath10k_ahb_irq_disable(ar);
synchronize_irq(ar_ahb->irq);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
+ ath10k_core_napi_sync_disable(ar);
ath10k_pci_flush(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index eeb6ff6aa2e1..2f9be182fbfb 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -90,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = true,
+ .dynamic_sar_support = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -124,6 +125,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = true,
+ .dynamic_sar_support = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -159,6 +161,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -189,6 +192,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
+ .dynamic_sar_support = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -223,6 +227,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -257,6 +262,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -291,6 +297,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -329,6 +336,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
+ .dynamic_sar_support = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -369,6 +377,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -416,6 +425,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -460,6 +470,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -494,6 +505,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -530,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -557,6 +570,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
+ .dynamic_sar_support = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -598,6 +612,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -625,6 +640,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = false,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
+ .dynamic_sar_support = true,
},
};
@@ -2305,6 +2321,31 @@ void ath10k_core_start_recovery(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_start_recovery);
+void ath10k_core_napi_enable(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
+ return;
+
+ napi_enable(&ar->napi);
+ set_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
+}
+EXPORT_SYMBOL(ath10k_core_napi_enable);
+
+void ath10k_core_napi_sync_disable(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
+ return;
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+ clear_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
+}
+EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
+
static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 51f7e960e297..648ed36f845f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -868,6 +868,9 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during recovery process and not complete */
ATH10K_FLAG_RESTARTING,
+
+ /* protected by conf_mutex */
+ ATH10K_FLAG_NAPI_ENABLED,
};
enum ath10k_cal_mode {
@@ -1016,7 +1019,6 @@ struct ath10k {
enum ath10k_hw_rev hw_rev;
u16 dev_id;
u32 chip_id;
- enum ath10k_dev_type dev_type;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -1293,6 +1295,9 @@ struct ath10k {
bool coex_support;
int coex_gpio_pin;
+ s32 tx_power_2g_limit;
+ s32 tx_power_5g_limit;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1308,6 +1313,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
extern unsigned long ath10k_coredump_mask;
+void ath10k_core_napi_sync_disable(struct ath10k *ar);
+void ath10k_core_napi_enable(struct ath10k *ar);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 997c1c80aba7..0af787f49b33 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -34,6 +34,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_QMI = 0x00200000,
+ ATH10K_DBG_STA = 0x00400000,
ATH10K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 31df6dd04bf6..0a37be6a7d33 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -449,6 +449,10 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
}
ep = &htc->endpoint[eid];
+ if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
+ ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
+ goto out;
+ }
payload_len = __le16_to_cpu(hdr->len);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index cad59494f175..956157946106 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -2241,7 +2241,7 @@ struct htt_rx_chan_info {
* Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
* rounded up to a cache line size.
*/
-#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_BUF_SIZE 2048
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 9c4e6cf2137a..1a08156d5011 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2781,13 +2781,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx addba tid %hu peer_id %hu size %hhu\n",
+ "htt rx addba tid %u peer_id %u size %u\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@@ -2802,7 +2802,7 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
+ "htt rx start rx ba session sta %pM tid %u size %u\n",
peer->addr, tid, ev->window_size);
ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
@@ -2821,13 +2821,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx delba tid %hu peer_id %hu\n",
+ "htt rx delba tid %u peer_id %u\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@@ -2842,7 +2842,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx stop rx ba session sta %pM tid %hu\n",
+ "htt rx stop rx ba session sta %pM tid %u\n",
peer->addr, tid);
ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
@@ -3102,7 +3102,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
num_records, num_resp_ids,
le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
@@ -3127,12 +3127,12 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
max_num_msdus = le16_to_cpu(record->num_msdus);
max_num_bytes = le32_to_cpu(record->num_bytes);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
i, peer_id, tid, max_num_msdus, max_num_bytes);
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
- ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@@ -3146,7 +3146,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
*/
if (unlikely(!txq)) {
- ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@@ -3259,7 +3259,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+ "htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
info0, info1, enable, num_records, mode, threshold);
len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
@@ -3296,7 +3296,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
- ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@@ -3310,7 +3310,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
*/
if (unlikely(!txq)) {
- ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@@ -3348,7 +3348,7 @@ static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
return i;
}
- ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
+ ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
return -EINVAL;
}
@@ -3502,13 +3502,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
return;
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
- ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
+ ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
return;
}
if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
(txrate.mcs > 7 || txrate.nss < 1)) {
- ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
+ ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
txrate.mcs, txrate.nss);
return;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 1fc0a312ab58..d6b8bdcef416 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -72,7 +72,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
- ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+ ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
peer_id, tid);
return;
}
@@ -81,7 +81,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
peer_id, tid, count);
}
@@ -213,7 +213,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
idr_remove(&htt->pending_tx, msdu_id);
}
@@ -507,7 +507,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0};
- ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
tx_done.msdu_id = msdu_id;
tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
@@ -569,6 +569,8 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
desc_hdr = (struct htt_data_tx_desc *)
(skb->data + sizeof(*htt_hdr));
flags1 = __le16_to_cpu(desc_hdr->flags1);
+ skb_pull(skb, sizeof(struct htt_cmd_hdr));
+ skb_pull(skb, sizeof(struct htt_data_tx_desc));
}
}
@@ -1557,7 +1559,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
@@ -1766,7 +1768,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index c6ded21f5ed6..6b03c7787e36 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -623,6 +623,8 @@ struct ath10k_hw_params {
/* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
bool supports_peer_stats_info;
+
+ bool dynamic_sar_support;
};
struct htt_rx_desc;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 7d98250380ec..bb6c5ee43ac0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -81,6 +81,17 @@ static struct ieee80211_rate ath10k_rates_rev2[] = {
{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
};
+static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = {
+ {.start_freq = 2402, .end_freq = 2494 },
+ {.start_freq = 5170, .end_freq = 5875 },
+};
+
+static const struct cfg80211_sar_capa ath10k_sar_capa = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)),
+ .freq_ranges = &ath10k_sar_freq_ranges[0],
+};
+
#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -2177,7 +2188,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
return;
- bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
+ info->ssid_len ? info->ssid : NULL, info->ssid_len,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -2880,6 +2892,158 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
return 0;
}
+static bool ath10k_mac_is_connected(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+ int tx_power_2g, tx_power_5g;
+ bool connected;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* ath10k internally uses unit of 0.5 dBm so multiply by 2 */
+ tx_power_2g = txpower * 2;
+ tx_power_5g = txpower * 2;
+
+ connected = ath10k_mac_is_connected(ar);
+
+ if (connected && ar->tx_power_2g_limit)
+ if (tx_power_2g > ar->tx_power_2g_limit)
+ tx_power_2g = ar->tx_power_2g_limit;
+
+ if (connected && ar->tx_power_5g_limit)
+ if (tx_power_5g > ar->tx_power_5g_limit)
+ tx_power_5g = ar->tx_power_5g_limit;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n",
+ tx_power_2g, tx_power_5g);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_2g);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ tx_power_2g, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_5g);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ tx_power_5g, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ /* txpower not initialized yet? */
+ if (arvif->txpower == INT_MIN)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_set_sar_power(struct ath10k *ar)
+{
+ if (!ar->hw_params.dynamic_sar_support)
+ return -EOPNOTSUPP;
+
+ if (!ath10k_mac_is_connected(ar))
+ return 0;
+
+ /* if connected, then arvif->txpower must be valid */
+ return ath10k_mac_txpower_recalc(ar);
+}
+
+static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ const struct cfg80211_sar_sub_specs *sub_specs;
+ struct ath10k *ar = hw->priv;
+ u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!ar->hw_params.dynamic_sar_support) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sub_specs = sar->sub_specs;
+
+ /* 0dbm is not a practical value for ath10k, so use 0
+ * as no SAR limitation on it.
+ */
+ ar->tx_power_2g_limit = 0;
+ ar->tx_power_5g_limit = 0;
+
+ /* note the power is in 0.25dbm unit, while ath10k uses
+ * 0.5dbm unit.
+ */
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sub_specs->freq_range_index == 0)
+ ar->tx_power_2g_limit = sub_specs->power / 2;
+ else if (sub_specs->freq_range_index == 1)
+ ar->tx_power_5g_limit = sub_specs->power / 2;
+
+ sub_specs++;
+ }
+
+ ret = ath10k_mac_set_sar_power(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sar power: %d", ret);
+ goto err;
+ }
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
/* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2968,6 +3132,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->is_up = true;
+ ath10k_mac_set_sar_power(ar);
+
/* Workaround: Some firmware revisions (tested with qca6174
* WLAN.RM.2.0-00073) have buggy powersave state machine and must be
* poked with peer param command.
@@ -3010,6 +3176,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false;
+ ath10k_mac_txpower_recalc(ar);
+
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
@@ -3763,23 +3931,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
- int ret = 0;
- spin_lock_bh(&ar->data_lock);
-
- if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+ if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
ath10k_warn(ar, "wmi mgmt tx queue is full\n");
- ret = -ENOSPC;
- goto unlock;
+ return -ENOSPC;
}
- __skb_queue_tail(q, skb);
+ skb_queue_tail(q, skb);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
-unlock:
- spin_unlock_bh(&ar->data_lock);
-
- return ret;
+ return 0;
}
static enum ath10k_mac_tx_path
@@ -3954,9 +4115,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
spin_unlock_bh(&ar->data_lock);
if (peer)
- /* FIXME: should this use ath10k_warn()? */
- ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
- peer_addr, vdev_id);
+ ath10k_warn(ar, "peer %pM on vdev %d already present\n",
+ peer_addr, vdev_id);
if (!peer) {
ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
@@ -5207,65 +5367,6 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
-static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
-{
- int ret;
- u32 param;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
-
- param = ar->wmi.pdev_param->txpower_limit2g;
- ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
- if (ret) {
- ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
- txpower, ret);
- return ret;
- }
-
- param = ar->wmi.pdev_param->txpower_limit5g;
- ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
- if (ret) {
- ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
- txpower, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ath10k_mac_txpower_recalc(struct ath10k *ar)
-{
- struct ath10k_vif *arvif;
- int ret, txpower = -1;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- list_for_each_entry(arvif, &ar->arvifs, list) {
- /* txpower not initialized yet? */
- if (arvif->txpower == INT_MIN)
- continue;
-
- if (txpower == -1)
- txpower = arvif->txpower;
- else
- txpower = min(txpower, arvif->txpower);
- }
-
- if (txpower == -1)
- return 0;
-
- ret = ath10k_mac_txpower_setup(ar, txpower);
- if (ret) {
- ath10k_warn(ar, "failed to setup tx power %d: %d\n",
- txpower, ret);
- return ret;
- }
-
- return 0;
-}
-
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
@@ -6565,7 +6666,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
enum wmi_phy_mode mode;
mode = chan_to_phymode(&def);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n",
sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -6584,7 +6685,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n",
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -6595,7 +6696,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n",
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -6606,7 +6707,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -7302,7 +7403,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
u32 num_tdls_stations;
- ath10k_dbg(ar, ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_STA,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
arvif->vdev_id, sta->addr,
ar->num_stations + 1, ar->max_num_stations,
@@ -7402,7 +7503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Existing station deletion.
*/
- ath10k_dbg(ar, ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_STA,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
@@ -7474,7 +7575,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, vif, sta, false);
@@ -7487,7 +7588,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Tdls station authorized.
*/
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n",
sta->addr);
ret = ath10k_station_assoc(ar, vif, sta, false);
@@ -7510,7 +7611,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, vif, sta);
@@ -8069,7 +8170,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
vdev_param = ar->wmi.vdev_param->fixed_rate;
@@ -8327,7 +8428,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
return;
}
- ath10k_dbg(ar, ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_STA,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->bandwidth, sta->rx_nss,
sta->smps_mode);
@@ -8426,7 +8527,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n",
arvif->vdev_id, sta->addr, tid, action);
switch (action) {
@@ -8522,7 +8623,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
arvif = (void *)vifs[i].vif->drv_priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
@@ -8596,7 +8697,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %hu width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8620,7 +8721,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %hu width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8685,7 +8786,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -9117,7 +9218,9 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar))
return;
+ mutex_lock(&ar->conf_mutex);
ath10k_debug_fw_stats_request(ar);
+ mutex_unlock(&ar->conf_mutex);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -9272,6 +9375,7 @@ static const struct ieee80211_ops ath10k_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
#endif
+ .set_sar_specs = ath10k_mac_set_sar_specs,
};
#define CHAN2G(_channel, _freq, _flags) { \
@@ -10009,6 +10113,9 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
+ if (ar->hw_params.dynamic_sar_support)
+ ar->hw->wiphy->sar_capa = &ath10k_sar_capa;
+
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 2328df09875c..e7fde635e0ee 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1958,7 +1958,7 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
- napi_enable(&ar->napi);
+ ath10k_core_napi_enable(ar);
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
@@ -2075,8 +2075,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
+
+ ath10k_core_napi_sync_disable(ar);
+
cancel_work_sync(&ar_pci->dump_work);
/* Most likely the device has HTT Rx ring configured. The only way to
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index c415090d1f37..b746052737e0 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1859,7 +1859,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
int ret;
- napi_enable(&ar->napi);
+ ath10k_core_napi_enable(ar);
/* Sleep 20 ms before HIF interrupts are disabled.
* This will give target plenty of time to process the BMI done
@@ -1992,8 +1992,7 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
spin_unlock_bh(&ar_sdio->wr_async_lock);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
+ ath10k_core_napi_sync_disable(ar);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index bf9a8cb713dc..d66593f0950f 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -915,8 +915,7 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
ath10k_snoc_irq_disable(ar);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
+ ath10k_core_napi_sync_disable(ar);
ath10k_snoc_buffer_cleanup(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
}
@@ -926,7 +925,8 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
- napi_enable(&ar->napi);
+
+ ath10k_core_napi_enable(ar);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1003,6 +1003,39 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar,
NULL);
}
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
+
+ clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
+
+ return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+}
+
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1024,6 +1057,7 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar)
ath10k_snoc_wlan_disable(ar);
ath10k_ce_free_rri(ar);
+ ath10k_hw_power_off(ar);
}
static int ath10k_snoc_hif_power_up(struct ath10k *ar,
@@ -1034,10 +1068,16 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
__func__, ar->state);
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ return ret;
+ }
+
ret = ath10k_snoc_wlan_enable(ar, fw_mode);
if (ret) {
ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
- return ret;
+ goto err_hw_power_off;
}
ath10k_ce_alloc_rri(ar);
@@ -1045,14 +1085,18 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
ret = ath10k_snoc_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
- goto err_wlan_enable;
+ goto err_free_rri;
}
return 0;
-err_wlan_enable:
+err_free_rri:
+ ath10k_ce_free_rri(ar);
ath10k_snoc_wlan_disable(ar);
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
return ret;
}
@@ -1369,39 +1413,6 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_hw_power_on(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
-
- ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
- if (ret)
- goto vreg_off;
-
- return ret;
-
-vreg_off:
- regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
- return ret;
-}
-
-static int ath10k_hw_power_off(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
-
- clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
-
- return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
-}
-
static void ath10k_msa_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
@@ -1711,22 +1722,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
if (ret)
goto err_free_irq;
- ret = ath10k_hw_power_on(ar);
- if (ret) {
- ath10k_err(ar, "failed to power on device: %d\n", ret);
- goto err_free_irq;
- }
-
ret = ath10k_setup_msa_resources(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
- goto err_power_off;
+ goto err_free_irq;
}
ret = ath10k_fw_init(ar);
if (ret) {
ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
- goto err_power_off;
+ goto err_free_irq;
}
ret = ath10k_qmi_init(ar, msa_size);
@@ -1742,9 +1747,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
err_fw_deinit:
ath10k_fw_deinit(ar);
-err_power_off:
- ath10k_hw_power_off(ar);
-
err_free_irq:
ath10k_snoc_free_irq(ar);
@@ -1772,7 +1774,6 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
ath10k_core_unregister(ar);
- ath10k_hw_power_off(ar);
ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 842e42ec814f..4714c86bb501 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -283,7 +283,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
),
TP_printk(
- "%s %s %d size %hu",
+ "%s %s %d size %u",
__get_str(driver),
__get_str(device),
__entry->hw_type,
@@ -488,7 +488,7 @@ TRACE_EVENT(ath10k_wmi_diag_container,
),
TP_printk(
- "%s %s diag container type %hhu timestamp %u code %u len %d",
+ "%s %s diag container type %u timestamp %u code %u len %d",
__get_str(driver),
__get_str(device),
__entry->type,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index aefe1f7f906c..7c9ea0c073d8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -211,7 +211,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
- "received htt peer map event with idx out of bounds: %hu\n",
+ "received htt peer map event with idx out of bounds: %u\n",
ev->peer_id);
return;
}
@@ -247,7 +247,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
- "received htt peer unmap event with idx out of bounds: %hu\n",
+ "received htt peer unmap event with idx out of bounds: %u\n",
ev->peer_id);
return;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7b5834157fe5..d97b33f789e4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -93,7 +93,7 @@ ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
if (tlv_len > len) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@@ -102,7 +102,7 @@ ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
wmi_tlv_policies[tlv_tag].min_len &&
wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
@@ -240,8 +240,10 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
__le32_to_cpu(stat->last_tx_rate_code),
__le32_to_cpu(stat->last_tx_bitrate_kbps));
+ rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
if (!sta) {
+ rcu_read_unlock();
ath10k_warn(ar, "not found station for peer stats\n");
return -EINVAL;
}
@@ -251,6 +253,7 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
+ rcu_read_unlock();
return 0;
}
@@ -421,7 +424,7 @@ static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
vdev_id = __le32_to_cpu(ev->vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
vdev_id, noa->num_descriptors);
ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
@@ -573,13 +576,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
case WMI_TDLS_TEARDOWN_REASON_TX:
case WMI_TDLS_TEARDOWN_REASON_RSSI:
case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ rcu_read_lock();
station = ieee80211_find_sta_by_ifaddr(ar->hw,
ev->peer_macaddr.addr,
NULL);
if (!station) {
ath10k_warn(ar, "did not find station from tdls peer event");
- kfree(tb);
- return;
+ goto exit;
}
arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
ieee80211_tdls_oper_request(
@@ -590,6 +593,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
);
break;
}
+
+exit:
+ rcu_read_unlock();
kfree(tb);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 1f33947e2088..d48b922215eb 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3497,7 +3497,7 @@ void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
return;
}
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
arg.mac_addr);
rcu_read_lock();
@@ -7506,7 +7506,7 @@ ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(value);
- ath10k_dbg(ar, ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_STA,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
return skb;
@@ -9551,7 +9551,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
struct sk_buff *msdu;
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "force cleanup mgmt msdu_id %hu\n", msdu_id);
+ "force cleanup mgmt msdu_id %u\n", msdu_id);
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 799bf3de1117..8d29845774df 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -200,6 +200,7 @@ struct ath11k_vif {
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
+ u16 ast_idx;
u16 tcl_metadata;
u8 hal_addr_search_flags;
u8 search_type;
@@ -875,14 +876,6 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
-void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
-void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
- u8 *mac_addr, u16 ast_hash);
-struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
- const u8 *addr);
-struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
- const u8 *addr);
-struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 9191ffa081c2..e13684343ec3 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -3845,6 +3845,18 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
htt_stats_buf->num_obss_tx_ppdu_success);
len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ htt_stats_buf->num_non_srg_opportunities);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_tried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunies = %u\n",
+ htt_stats_buf->num_srg_opportunities);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ htt_stats_buf->num_srg_ppdu_tried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n",
+ htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
buf[buf_len - 1] = 0;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 74b2086eed9d..567a26d485a9 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1656,8 +1656,19 @@ struct htt_tx_sounding_stats_tlv {
};
struct htt_pdev_obss_pd_stats_tlv {
- u32 num_obss_tx_ppdu_success;
- u32 num_obss_tx_ppdu_failure;
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+ u32 num_sr_tx_transmissions;
+ u32 num_spatial_reuse_opportunities;
+ u32 num_non_srg_opportunities;
+ u32 num_non_srg_ppdu_tried;
+ u32 num_non_srg_ppdu_success;
+ u32 num_srg_opportunities;
+ u32 num_srg_ppdu_tried;
+ u32 num_srg_ppdu_success;
+ u32 num_psr_opportunities;
+ u32 num_psr_ppdu_tried;
+ u32 num_psr_ppdu_success;
};
struct htt_ring_backpressure_stats_tlv {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..850ad38b888f 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1163,7 +1163,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
}
}
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&ab->base_lock);
return ret;
}
@@ -1292,7 +1292,7 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
len -= sizeof(*tlv);
if (tlv_len > len) {
- ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@@ -1381,22 +1381,22 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
*/
if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
- ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
- ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
- ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
+ ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
- ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
+ ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
mcs, nss);
return;
}
@@ -1652,6 +1652,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
u8 mac_addr[ETH_ALEN];
u16 peer_mac_h16;
u16 ast_hash;
+ u16 hw_peer_id;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
@@ -1672,7 +1673,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
- ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP2:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
@@ -1685,7 +1686,10 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
resp->peer_map_ev.info2);
- ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
+ hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
+ resp->peer_map_ev.info1);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -2294,6 +2298,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2319,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 6a3fcea6c233..1a0b9be9ce6a 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -165,6 +165,7 @@ tcl_ring_sel:
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.bss_ast_hash = arvif->ast_hash;
+ ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL &&
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index a755aa86c5de..569e790d83a1 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -71,6 +71,8 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
ti->dscp_tid_tbl_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
ti->bss_ast_hash);
tcl_cmd->info4 = 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index d4760a20fdac..c291e59c3ca6 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -29,6 +29,7 @@ struct hal_tx_info {
u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
u16 bss_ast_hash;
+ u16 bss_ast_idx;
u8 tid;
u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
u8 lmac_id;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..b391169576e2 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1871,6 +1871,158 @@ static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
return ret;
}
+static int ath11k_mac_config_obss_pd(struct ath11k *ar,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ u32 bitmap[2], param_id, param_val, pdev_id;
+ int ret;
+ s8 non_srg_th = 0, srg_th = 0;
+
+ pdev_id = ar->pdev->pdev_id;
+
+ /* Set and enable SRG/non-SRG OBSS PD Threshold */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
+ if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n",
+ he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
+ he_obss_pd->max_offset);
+
+ param_val = 0;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
+ non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD;
+ } else {
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
+ he_obss_pd->non_srg_max_offset);
+ else
+ non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
+
+ param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
+ }
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+ srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
+ param_val |= ATH11K_OBSS_PD_SRG_EN;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
+ param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
+ } else {
+ non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
+ /* SRG not supported and threshold in dB */
+ param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
+ ATH11K_OBSS_PD_THRESHOLD_IN_DBM);
+ }
+
+ param_val |= (non_srg_th & GENMASK(7, 0));
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable OBSS PD for all access category */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
+ param_val = 0xf;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_per_ac for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SR Prohibit */
+ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
+ param_val = !!(he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ /* Set SRG BSS Color Bitmap */
+ memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set bss_color_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SRG Partial BSSID Bitmap */
+ memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set partial_bssid_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ memset(bitmap, 0xff, sizeof(bitmap));
+
+ /* Enable all BSS Colors for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all patial BSSID mask for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all BSS Colors for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all patial BSSID mask for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -2114,8 +2266,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
- ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
- &info->he_obss_pd);
+ ath11k_mac_config_obss_pd(ar, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if (vif->type == NL80211_IFTYPE_AP) {
@@ -3021,6 +3172,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -4247,11 +4399,6 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* Configure the hash seed for hash based reo dest ring selection */
ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
- mutex_unlock(&ar->conf_mutex);
-
- rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
- &ab->pdevs[ar->pdev_idx]);
-
/* allow device to enter IMPS */
if (ab->hw_params.idle_ps) {
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
@@ -4261,6 +4408,12 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
}
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
return 0;
err:
@@ -4848,7 +5001,7 @@ static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac chanctx add freq %hu width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -4872,7 +5025,7 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac chanctx remove freq %hu width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -5116,7 +5269,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
arvif = (void *)vifs[i].vif->drv_priv;
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
@@ -5213,7 +5366,7 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -5284,7 +5437,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5449,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
@@ -5579,7 +5735,7 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
@@ -6356,17 +6512,20 @@ static int __ath11k_mac_register(struct ath11k *ar)
ret = ath11k_regd_update(ar, true);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
- goto err_free_if_combs;
+ goto err_unregister_hw;
}
ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
- goto err_free_if_combs;
+ goto err_unregister_hw;
}
return 0;
+err_unregister_hw:
+ ieee80211_unregister_hw(ar->hw);
+
err_free_if_combs:
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 597104a9078d..455577905505 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -116,6 +116,12 @@ struct ath11k_generic_iter {
#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
+#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
+#define ATH11K_OBSS_PD_THRESHOLD_IN_DBM BIT(29)
+#define ATH11K_OBSS_PD_SRG_EN BIT(30)
+#define ATH11K_OBSS_PD_NON_SRG_EN BIT(31)
+
extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
void ath11k_mac_destroy(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..d14416816acc 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -328,7 +328,7 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
- val |= GCC_GCC_PCIE_HOT_RST_VAL | 0x10;
+ val |= GCC_GCC_PCIE_HOT_RST_VAL;
ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
@@ -1050,8 +1086,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
int ret;
- dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
-
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
&ath11k_pci_bus_params);
if (!ab) {
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..f49abefa9618 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
@@ -101,7 +118,7 @@ exit:
}
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
- u8 *mac_addr, u16 ast_hash)
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
{
struct ath11k_peer *peer;
@@ -115,6 +132,7 @@ void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
peer->vdev_id = vdev_id;
peer->peer_id = peer_id;
peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
ether_addr_copy(peer->addr, mac_addr);
list_add(&peer->list, &ab->peers);
wake_up(&ab->peer_mapping_wq);
@@ -292,7 +310,11 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
- arvif->ast_hash = peer->ast_hash;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..619db001be8e 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -14,6 +14,7 @@ struct ath11k_peer {
int peer_id;
u16 ast_hash;
u8 pdev_idx;
+ u16 hw_peer_id;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
@@ -31,7 +32,7 @@ struct ath11k_peer {
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
- u8 *mac_addr, u16 ast_hash);
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
@@ -43,5 +44,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..1aca841cd147 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,17 +1673,24 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
req->mem_seg[i].size = ab->qmi.target_mem[i].size;
req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi req mem_seg[%d] 0x%llx %u %u\n", i,
+ ab->qmi.target_mem[i].paddr,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].type);
}
}
@@ -1708,6 +1716,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1756,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1765,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2542,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b876fec7fa1b..e1a1df169034 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -247,7 +247,9 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
}
rtnl_lock();
- ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
+ wiphy_lock(ar->hw->wiphy);
+ ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
+ wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
kfree(regd_copy);
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 66d0aae7816c..d2d2a3cb0826 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -43,7 +43,7 @@ TRACE_EVENT(ath11k_htt_pktlog,
),
TP_printk(
- "%s %s size %hu pktlog_checksum %d",
+ "%s %s size %u pktlog_checksum %d",
__get_str(driver),
__get_str(device),
__entry->buf_len,
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..cccfd3bd4d27 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -169,7 +169,7 @@ ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
len -= sizeof(*tlv);
if (tlv_len > len) {
- ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@@ -177,7 +177,7 @@ ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
wmi_tlv_policies[tlv_tag].min_len &&
wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
- ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
@@ -2971,6 +2971,233 @@ ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
}
int
+ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd pdev_id %d bss color bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd pdev_id %d partial bssid bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable)
@@ -3460,6 +3687,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 993674228c9e..3ade1ddd35c9 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -257,6 +257,16 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_RAP_CONFIG_CMDID,
+ WMI_PDEV_DSM_FILTER_CMDID,
+ WMI_PDEV_FRAME_INJECT_CMDID,
+ WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -919,6 +929,9 @@ enum wmi_tlv_pdev_param {
WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+ WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
};
enum wmi_tlv_vdev_param {
@@ -1812,10 +1825,15 @@ enum wmi_tlv_tag {
WMI_TAG_NDP_CHANNEL_INFO,
WMI_TAG_NDP_CMD,
WMI_TAG_NDP_EVENT,
- /* TODO add all the missing cmds */
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_MAX
};
@@ -2039,6 +2057,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
WMI_MAX_EXT_SERVICE
};
@@ -4781,6 +4800,12 @@ struct wmi_obss_spatial_reuse_params_cmd {
u32 vdev_id;
} __packed;
+struct wmi_pdev_obss_pd_bitmap_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 bitmap[2];
+} __packed;
+
#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
@@ -5316,6 +5341,16 @@ int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
+int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 8f2719ff463c..532eeac9e83e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -522,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 9c83e9a4299b..29527e8dcced 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3648,7 +3648,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
kfree(mc_filter);
}
- unregister_netdevice(vif->ndev);
+ cfg80211_unregister_netdevice(vif->ndev);
ar->num_vif--;
}
@@ -3821,7 +3821,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
netdev_set_default_ethtool_ops(ndev, &ath6kl_ethtool_ops);
- if (register_netdevice(ndev))
+ if (cfg80211_register_netdevice(ndev))
goto err;
ar->avail_idx_map &= ~BIT(fw_vif_idx);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index ebb9f163710f..4f0a7a185fc9 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -212,11 +212,13 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
ar->avail_idx_map |= BIT(i);
rtnl_lock();
+ wiphy_lock(ar->wiphy);
/* Add an initial station interface */
wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, 0, INFRA_NETWORK);
+ wiphy_unlock(ar->wiphy);
rtnl_unlock();
if (!wdev) {
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 39bf19686175..9b5c7d8f2b95 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1904,7 +1904,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
spin_unlock_bh(&ar->list_lock);
ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
+ wiphy_lock(ar->wiphy);
ath6kl_cfg80211_vif_cleanup(vif);
+ wiphy_unlock(ar->wiphy);
rtnl_unlock();
spin_lock_bh(&ar->list_lock);
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index a84bb9b6573f..e150d82eddb6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 017a43bc400c..4c81b1d7f417 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
ah->nf_override = val;
- if (ah->curchan)
+ if (ah->curchan) {
+ ath9k_ps_wakeup(sc);
ath9k_hw_loadnf(ah, ah->curchan);
+ ath9k_ps_restore(sc);
+ }
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 2b7832b1c800..72ef319feeda 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1461,7 +1461,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 023599e10dd5..b7b65b1c90e8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -820,6 +820,7 @@ struct ath_hw {
struct ath9k_pacal_info pacal_info;
struct ar5416Stats stats;
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
+ DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);
enum ath9k_int imask;
u32 imrs2_reg;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index caebe3fd6869..45f6402478b5 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -821,12 +821,80 @@ exit:
ieee80211_free_txskb(hw, skb);
}
+static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_info *txinfo;
+ struct ath_frame_info *fi;
+
+ list_for_each_entry(bf, txq_list, list) {
+ if (bf->bf_state.stale || !bf->bf_mpdu)
+ continue;
+
+ txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
+ fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ if (fi->keyix == keyix)
+ return true;
+ }
+
+ return false;
+}
+
+static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int i;
+ struct ath_txq *txq;
+ bool key_in_use = false;
+
+ for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+ txq = &sc->tx.txq[i];
+ if (!txq->axq_depth)
+ continue;
+ if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
+ continue;
+
+ ath_txq_lock(sc, txq);
+ key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ int idx = txq->txq_tailidx;
+
+ while (!key_in_use &&
+ !list_empty(&txq->txq_fifo[idx])) {
+ key_in_use = ath9k_txq_list_has_key(
+ &txq->txq_fifo[idx], keyix);
+ INCR(idx, ATH_TXFIFO_DEPTH);
+ }
+ }
+ ath_txq_unlock(sc, txq);
+ }
+
+ return key_in_use;
+}
+
+static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!test_bit(keyix, ah->pending_del_keymap) ||
+ ath9k_txq_has_key(sc, keyix))
+ return;
+
+ /* No more TXQ frames point to this key cache entry, so delete it. */
+ clear_bit(keyix, ah->pending_del_keymap);
+ ath_key_delete(common, keyix);
+}
+
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
bool prev_idle;
+ int i;
ath9k_deinit_channel_context(sc);
@@ -894,6 +962,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
+ for (i = 0; i < ATH_KEYMAX; i++)
+ ath9k_pending_key_del(sc, i);
+
+ /* Clear key cache entries explicitly to get rid of any potentially
+ * remaining keys.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
ath9k_ps_restore(sc);
sc->ps_idle = prev_idle;
@@ -1538,12 +1614,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_node *an = (struct ath_node *) sta->drv_priv;
- struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
if (!an->ps_key)
return;
- ath_key_delete(common, &ps_key);
+ ath_key_delete(common, an->ps_key);
an->ps_key = 0;
an->key_idx[0] = 0;
}
@@ -1714,6 +1789,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (sta)
an = (struct ath_node *)sta->drv_priv;
+ /* Delete pending key cache entries if no more frames are pointing to
+ * them in TXQs.
+ */
+ for (i = 0; i < ATH_KEYMAX; i++)
+ ath9k_pending_key_del(sc, i);
+
switch (cmd) {
case SET_KEY:
if (sta)
@@ -1743,7 +1824,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
+ /* Delay key cache entry deletion until there are no
+ * remaining TXQ frames pointing to this entry.
+ */
+ set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
+ ath_hw_keysetmac(common, key->hw_key_idx, NULL);
+ } else {
+ ath_key_delete(common, key->hw_key_idx);
+ }
if (an) {
for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
if (an->key_idx[i] != key->hw_key_idx)
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 56999a3b9d3b..4a500095555c 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -240,7 +240,7 @@ struct carl9170_cmd {
struct carl9170_bcn_ctrl_cmd bcn_ctrl;
struct carl9170_rx_filter_cmd rx_filter;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
- } __packed;
+ } __packed __aligned(4);
} __packed __aligned(4);
#define CARL9170_TX_STATUS_QUEUE 3
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index ea17995b32f4..bb73553fd7c2 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -367,27 +367,27 @@ struct ar9170_rx_macstatus {
struct ar9170_rx_frame_single {
struct ar9170_rx_head phy_head;
- struct ieee80211_hdr i3e;
+ struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_phystatus phy_tail;
struct ar9170_rx_macstatus macstatus;
-} __packed;
+};
struct ar9170_rx_frame_head {
struct ar9170_rx_head phy_head;
- struct ieee80211_hdr i3e;
+ struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_macstatus macstatus;
-} __packed;
+};
struct ar9170_rx_frame_middle {
- struct ieee80211_hdr i3e;
+ struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_macstatus macstatus;
-} __packed;
+};
struct ar9170_rx_frame_tail {
- struct ieee80211_hdr i3e;
+ struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_phystatus phy_tail;
struct ar9170_rx_macstatus macstatus;
-} __packed;
+};
struct ar9170_rx_frame {
union {
@@ -395,8 +395,8 @@ struct ar9170_rx_frame {
struct ar9170_rx_frame_head head;
struct ar9170_rx_frame_middle middle;
struct ar9170_rx_frame_tail tail;
- } __packed;
-} __packed;
+ };
+};
static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
{
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 1816b4e7dc26..61b59a804e30 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
}
EXPORT_SYMBOL(ath_hw_keyreset);
-static bool ath_hw_keysetmac(struct ath_common *common,
- u16 entry, const u8 *mac)
+bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,
return true;
}
+EXPORT_SYMBOL(ath_hw_keysetmac);
static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
const struct ath_keyval *k,
@@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
/*
* Delete Key.
*/
-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
+void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
{
- ath_hw_keyreset(common, key->hw_key_idx);
- if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ /* Leave CCMP and TKIP (main key) configured to avoid disabling
+ * encryption for potentially pending frames already in a TXQ with the
+ * keyix pointing to this key entry. Instead, only clear the MAC address
+ * to prevent RX processing from using this key cache entry.
+ */
+ if (test_bit(hw_key_idx, common->ccmp_keymap) ||
+ test_bit(hw_key_idx, common->tkip_keymap))
+ ath_hw_keysetmac(common, hw_key_idx, NULL);
+ else
+ ath_hw_keyreset(common, hw_key_idx);
+ if (hw_key_idx < IEEE80211_WEP_NKID)
return;
- clear_bit(key->hw_key_idx, common->keymap);
- clear_bit(key->hw_key_idx, common->ccmp_keymap);
- if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ clear_bit(hw_key_idx, common->keymap);
+ clear_bit(hw_key_idx, common->ccmp_keymap);
+ if (!test_bit(hw_key_idx, common->tkip_keymap))
return;
- clear_bit(key->hw_key_idx + 64, common->keymap);
+ clear_bit(hw_key_idx + 64, common->keymap);
- clear_bit(key->hw_key_idx, common->tkip_keymap);
- clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
+ clear_bit(hw_key_idx, common->tkip_keymap);
+ clear_bit(hw_key_idx + 64, common->tkip_keymap);
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
- ath_hw_keyreset(common, key->hw_key_idx + 32);
- clear_bit(key->hw_key_idx + 32, common->keymap);
- clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ ath_hw_keyreset(common, hw_key_idx + 32);
+ clear_bit(hw_key_idx + 32, common->keymap);
+ clear_bit(hw_key_idx + 64 + 32, common->keymap);
- clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
- clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
+ clear_bit(hw_key_idx + 32, common->tkip_keymap);
+ clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath_key_delete);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 5867bd9c2f64..afb4877eaad8 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1140,7 +1140,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
session);
break;
case IEEE80211_AMPDU_RX_STOP:
- wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_START:
spin_lock_bh(&sta_priv->ampdu_lock);
@@ -1164,6 +1164,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
spin_unlock_bh(&sta_priv->ampdu_lock);
+ wcn36xx_smd_del_ba(wcn, tid, 1, get_sta_index(vif, sta_priv));
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
default:
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 5445277dd8de..d0c3a1557e8d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -485,7 +485,6 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
#define PREPARE_HAL_PTT_MSG_BUF(send_buf, p_msg_body) \
do { \
- memset(send_buf, 0, p_msg_body->header.len); \
memcpy(send_buf, p_msg_body, p_msg_body->header.len); \
} while (0)
@@ -2467,7 +2466,7 @@ out:
return ret;
}
-int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
int ret;
@@ -2477,7 +2476,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
msg_body.sta_index = sta_index;
msg_body.tid = tid;
- msg_body.direction = 0;
+ msg_body.direction = direction;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index b1d8083d9d9d..462860572e1f 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -135,7 +135,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 direction,
u8 sta_index);
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
-int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 1c42410d68e1..6746fd206d2a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -441,7 +441,9 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
int rc;
- u8 txflag = RATE_INFO_FLAGS_DMG;
+ u8 tx_mcs, rx_mcs;
+ u8 tx_rate_flag = RATE_INFO_FLAGS_DMG;
+ u8 rx_rate_flag = RATE_INFO_FLAGS_DMG;
memset(&reply, 0, sizeof(reply));
@@ -451,13 +453,15 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
if (rc)
return rc;
+ tx_mcs = le16_to_cpu(reply.evt.bf_mcs);
+
wil_dbg_wmi(wil, "Link status for CID %d MID %d: {\n"
- " MCS %d TSF 0x%016llx\n"
+ " MCS %s TSF 0x%016llx\n"
" BF status 0x%08x RSSI %d SQI %d%%\n"
" Tx Tpt %d goodput %d Rx goodput %d\n"
" Sectors(rx:tx) my %d:%d peer %d:%d\n"
" Tx mode %d}\n",
- cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs),
+ cid, vif->mid, WIL_EXTENDED_MCS_CHECK(tx_mcs),
le64_to_cpu(reply.evt.tsf), reply.evt.status,
reply.evt.rssi,
reply.evt.sqi,
@@ -481,12 +485,30 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG)
- txflag = RATE_INFO_FLAGS_EDMG;
+ if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG) {
+ tx_rate_flag = RATE_INFO_FLAGS_EDMG;
+ rx_rate_flag = RATE_INFO_FLAGS_EDMG;
+ }
+
+ rx_mcs = stats->last_mcs_rx;
+
+ /* check extended MCS (12.1) and convert it into
+ * base MCS (7) + EXTENDED_SC_DMG flag
+ */
+ if (tx_mcs == WIL_EXTENDED_MCS_26) {
+ tx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
+ tx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
+ }
+ if (rx_mcs == WIL_EXTENDED_MCS_26) {
+ rx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
+ rx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
+ }
+
+ sinfo->txrate.flags = tx_rate_flag;
+ sinfo->rxrate.flags = rx_rate_flag;
+ sinfo->txrate.mcs = tx_mcs;
+ sinfo->rxrate.mcs = rx_mcs;
- sinfo->txrate.flags = txflag;
- sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
- sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->txrate.n_bonded_ch =
wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
sinfo->rxrate.n_bonded_ch =
@@ -2820,7 +2842,9 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
if (p2p_wdev) {
+ wiphy_lock(wil->wiphy);
cfg80211_unregister_wdev(p2p_wdev);
+ wiphy_unlock(wil->wiphy);
kfree(p2p_wdev);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 2d618f90afa7..4c944e595978 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1294,6 +1294,7 @@ static int bf_show(struct seq_file *s, void *data)
for (i = 0; i < wil->max_assoc_sta; i++) {
u32 status;
+ u8 bf_mcs;
cmd.cid = i;
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
@@ -1305,9 +1306,10 @@ static int bf_show(struct seq_file *s, void *data)
continue;
status = le32_to_cpu(reply.evt.status);
+ bf_mcs = le16_to_cpu(reply.evt.bf_mcs);
seq_printf(s, "CID %d {\n"
" TSF = 0x%016llx\n"
- " TxMCS = %2d TxTpt = %4d\n"
+ " TxMCS = %s TxTpt = %4d\n"
" SQI = %4d\n"
" RSSI = %4d\n"
" Status = 0x%08x %s\n"
@@ -1316,7 +1318,7 @@ static int bf_show(struct seq_file *s, void *data)
"}\n",
i,
le64_to_cpu(reply.evt.tsf),
- le16_to_cpu(reply.evt.bf_mcs),
+ WIL_EXTENDED_MCS_CHECK(bf_mcs),
le32_to_cpu(reply.evt.tx_tpt),
reply.evt.sqi,
reply.evt.rssi,
@@ -1443,8 +1445,10 @@ static int link_show(struct seq_file *s, void *data)
if (rc)
goto out;
- seq_printf(s, " Tx_mcs = %d\n", sinfo->txrate.mcs);
- seq_printf(s, " Rx_mcs = %d\n", sinfo->rxrate.mcs);
+ seq_printf(s, " Tx_mcs = %s\n",
+ WIL_EXTENDED_MCS_CHECK(sinfo->txrate.mcs));
+ seq_printf(s, " Rx_mcs = %s\n",
+ WIL_EXTENDED_MCS_CHECK(sinfo->rxrate.mcs));
seq_printf(s, " SQ = %d\n", sinfo->signal);
} else {
seq_puts(s, " INVALID MID\n");
@@ -1848,7 +1852,7 @@ static void wil_link_stats_print_basic(struct wil6210_vif *vif,
snprintf(per, sizeof(per), "%d%%", basic->per_average);
seq_printf(s, "CID %d {\n"
- "\tTxMCS %d TxTpt %d\n"
+ "\tTxMCS %s TxTpt %d\n"
"\tGoodput(rx:tx) %d:%d\n"
"\tRxBcastFrames %d\n"
"\tRSSI %d SQI %d SNR %d PER %s\n"
@@ -1856,7 +1860,8 @@ static void wil_link_stats_print_basic(struct wil6210_vif *vif,
"\tSectors(rx:tx) my %d:%d peer %d:%d\n"
"}\n",
basic->cid,
- basic->bf_mcs, le32_to_cpu(basic->tx_tpt),
+ WIL_EXTENDED_MCS_CHECK(basic->bf_mcs),
+ le32_to_cpu(basic->tx_tpt),
le32_to_cpu(basic->rx_goodput),
le32_to_cpu(basic->tx_goodput),
le32_to_cpu(basic->rx_bcast_frames),
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 07b4a252a23c..0913f0bf60e7 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -424,7 +424,7 @@ int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif)
if (rc)
return rc;
}
- rc = register_netdevice(ndev);
+ rc = cfg80211_register_netdevice(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
if (any_active && vif->mid != 0)
@@ -473,7 +473,9 @@ int wil_if_add(struct wil6210_priv *wil)
wil_update_net_queues_bh(wil, vif, NULL, true);
rtnl_lock();
+ wiphy_lock(wiphy);
rc = wil_vif_add(wil, vif);
+ wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
goto out_wiphy;
@@ -511,7 +513,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
/* during unregister_netdevice cfg80211_leave may perform operations
* such as stop AP, disconnect, so we only clear the VIF afterwards
*/
- unregister_netdevice(ndev);
+ cfg80211_unregister_netdevice(ndev);
if (any_active && vif->mid != 0)
wmi_port_delete(wil, vif->mid);
@@ -543,15 +545,18 @@ void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
wil_dbg_misc(wil, "if_remove\n");
rtnl_lock();
+ wiphy_lock(wiphy);
wil_vif_remove(wil, 0);
+ wiphy_unlock(wiphy);
rtnl_unlock();
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
- wiphy_unregister(wdev->wiphy);
+ wiphy_unregister(wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c174323c5c0b..ce40d94909ad 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -473,8 +473,10 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil6210_debugfs_remove(wil);
rtnl_lock();
+ wiphy_lock(wil->wiphy);
wil_p2p_wdev_free(wil);
wil_remove_all_additional_vifs(wil);
+ wiphy_unlock(wil->wiphy);
rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 8ca2ce51c83e..201c8c35e0c9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1026,6 +1026,8 @@ skipping:
stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
+ else if (stats->last_mcs_rx == WIL_EXTENDED_MCS_26)
+ stats->rx_per_mcs[WIL_BASE_MCS_FOR_EXTENDED_26]++;
stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5dc881d3c057..30392eb1cbbd 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -89,6 +89,9 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
#define WIL6210_MAX_STATUS_RINGS (8)
#define WIL_WMI_CALL_GENERAL_TO_MS 100
+#define WIL_EXTENDED_MCS_26 (26) /* FW reports MCS 12.1 to driver as "26" */
+#define WIL_BASE_MCS_FOR_EXTENDED_26 (7) /* MCS 7 is base MCS for MCS 12.1 */
+#define WIL_EXTENDED_MCS_CHECK(x) (((x) == WIL_EXTENDED_MCS_26) ? "12.1" : #x)
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 8699f8279a8b..823ec6e78a22 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -851,9 +851,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
d_status = le16_to_cpu(data->info.status);
fc = rx_mgmt_frame->frame_control;
- wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n",
- data->info.channel, data->info.mcs, data->info.rssi,
- data->info.sqi);
+ wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %s RSSI %d SQI %d%%\n",
+ data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
+ data->info.rssi, data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@@ -1422,8 +1422,9 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
else
signal = data->info.sqi;
- wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
- data->info.channel, data->info.mcs, data->info.rssi);
+ wil_dbg_wmi(wil, "sched scan result: channel %d MCS %s RSSI %d\n",
+ data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
+ data->info.rssi);
wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
d_len, data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 404257800033..7582761c61e2 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -101,7 +101,7 @@ do { \
static uint at76_debug = DBG_DEFAULTS;
/* Protect against concurrent firmware loading and parsing */
-static struct mutex fw_mutex;
+static DEFINE_MUTEX(fw_mutex);
static struct fwentry firmwares[] = {
[0] = { "" },
@@ -2572,8 +2572,6 @@ static int __init at76_mod_init(void)
printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " loading\n");
- mutex_init(&fw_mutex);
-
/* register this driver with the USB subsystem */
result = usb_register(&at76_driver);
if (result < 0)
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index b669dff24b6e..665b737fbb0d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -5311,7 +5311,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
for (i = 0; i < 4; i++) {
if (dev->phy.rev >= 3)
- table[i] = coef[i];
+ coef[i] = table[i];
else
coef[i] = 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 0ee421f30aa2..f4405d7861b6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5196,6 +5196,48 @@ exit:
return err;
}
+static int brcmf_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
+ struct net_device *ndev,
+ s32 rssi_low, s32 rssi_high)
+{
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
+ int err = 0;
+
+ brcmf_dbg(TRACE, "low=%d high=%d", rssi_low, rssi_high);
+
+ ifp = netdev_priv(ndev);
+ vif = ifp->vif;
+
+ if (rssi_low != vif->cqm_rssi_low || rssi_high != vif->cqm_rssi_high) {
+ /* The firmware will send an event when the RSSI is less than or
+ * equal to a configured level and the previous RSSI event was
+ * less than or equal to a different level. Set a third level
+ * so that we also detect the transition from rssi <= rssi_high
+ * to rssi > rssi_high.
+ */
+ struct brcmf_rssi_event_le config = {
+ .rate_limit_msec = cpu_to_le32(0),
+ .rssi_level_num = 3,
+ .rssi_levels = {
+ clamp_val(rssi_low, S8_MIN, S8_MAX - 2),
+ clamp_val(rssi_high, S8_MIN + 1, S8_MAX - 1),
+ S8_MAX,
+ },
+ };
+
+ err = brcmf_fil_iovar_data_set(ifp, "rssi_event", &config,
+ sizeof(config));
+ if (err) {
+ err = -EINVAL;
+ } else {
+ vif->cqm_rssi_low = rssi_low;
+ vif->cqm_rssi_high = rssi_high;
+ }
+ }
+
+ return err;
+}
static int
brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -5502,6 +5544,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.update_mgmt_frame_registrations =
brcmf_cfg80211_update_mgmt_frame_registrations,
.mgmt_tx = brcmf_cfg80211_mgmt_tx,
+ .set_cqm_rssi_range_config = brcmf_cfg80211_set_cqm_rssi_range_config,
.remain_on_channel = brcmf_p2p_remain_on_channel,
.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
.get_channel = brcmf_cfg80211_get_channel,
@@ -5611,7 +5654,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
return false;
}
-static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
+ const struct brcmf_event_msg *e)
{
u32 event = e->event_code;
u16 flags = e->flags;
@@ -5620,6 +5664,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
(event == BRCMF_E_DISASSOC_IND) ||
((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
brcmf_dbg(CONN, "Processing link down\n");
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
return true;
}
return false;
@@ -6067,7 +6113,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
brcmf_net_setcarrier(ifp, true);
- } else if (brcmf_is_linkdown(e)) {
+ } else if (brcmf_is_linkdown(ifp->vif, e)) {
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif) &&
test_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -6137,6 +6183,47 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
return 0;
}
+static s32 brcmf_notify_rssi(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ struct brcmf_rssi_be *info = data;
+ s32 rssi, snr, noise;
+ s32 low, high, last;
+
+ if (e->datalen < sizeof(*info)) {
+ brcmf_err("insufficient RSSI event data\n");
+ return 0;
+ }
+
+ rssi = be32_to_cpu(info->rssi);
+ snr = be32_to_cpu(info->snr);
+ noise = be32_to_cpu(info->noise);
+
+ low = vif->cqm_rssi_low;
+ high = vif->cqm_rssi_high;
+ last = vif->cqm_rssi_last;
+
+ brcmf_dbg(TRACE, "rssi=%d snr=%d noise=%d low=%d high=%d last=%d\n",
+ rssi, snr, noise, low, high, last);
+
+ vif->cqm_rssi_last = rssi;
+
+ if (rssi <= low || rssi == 0) {
+ brcmf_dbg(INFO, "LOW rssi=%d\n", rssi);
+ cfg80211_cqm_rssi_notify(ifp->ndev,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ rssi, GFP_KERNEL);
+ } else if (rssi > high) {
+ brcmf_dbg(INFO, "HIGH rssi=%d\n", rssi);
+ cfg80211_cqm_rssi_notify(ifp->ndev,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ rssi, GFP_KERNEL);
+ }
+
+ return 0;
+}
+
static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
@@ -6235,6 +6322,7 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_p2p_notify_action_tx_complete);
brcmf_fweh_register(cfg->pub, BRCMF_E_PSK_SUP,
brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_RSSI, brcmf_notify_rssi);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -7169,6 +7257,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_DFS_OFFLOAD);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
wiphy_read_of_freq_limits(wiphy);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 17817cdb5de2..e90a30808c22 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -213,6 +213,9 @@ struct vif_saved_ie {
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
+ * @cqm_rssi_low: Lower RSSI limit for CQM monitoring
+ * @cqm_rssi_high: Upper RSSI limit for CQM monitoring
+ * @cqm_rssi_last: Last RSSI reading for CQM monitoring
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -224,6 +227,9 @@ struct brcmf_cfg80211_vif {
u16 mgmt_rx_reg;
bool mbss;
int is_11d;
+ s32 cqm_rssi_low;
+ s32 cqm_rssi_high;
+ s32 cqm_rssi_last;
};
/* association inform */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 5bf11e46fc49..45037decba40 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -720,6 +720,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
case BRCM_CC_43664_CHIP_ID:
+ case BRCM_CC_43666_CHIP_ID:
return 0x200000;
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 3dd28f5fef19..ea78fe527c5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -633,7 +633,7 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
+int brcmf_net_attach(struct brcmf_if *ifp, bool locked)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
@@ -656,8 +656,8 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
- if (rtnl_locked)
- err = register_netdevice(ndev);
+ if (locked)
+ err = cfg80211_register_netdevice(ndev);
else
err = register_netdev(ndev);
if (err != 0) {
@@ -677,11 +677,11 @@ fail:
return -EBADE;
}
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+void brcmf_net_detach(struct net_device *ndev, bool locked)
{
if (ndev->reg_state == NETREG_REGISTERED) {
- if (rtnl_locked)
- unregister_netdevice(ndev);
+ if (locked)
+ cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
} else {
@@ -758,7 +758,7 @@ int brcmf_net_mon_attach(struct brcmf_if *ifp)
ndev = ifp->ndev;
ndev->netdev_ops = &brcmf_netdev_ops_mon;
- err = register_netdevice(ndev);
+ err = cfg80211_register_netdevice(ndev);
if (err)
bphy_err(drvr, "Failed to register %s device\n", ndev->name);
@@ -909,7 +909,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
}
static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
- bool rtnl_locked)
+ bool locked)
{
struct brcmf_if *ifp;
int ifidx;
@@ -938,7 +938,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
- brcmf_net_detach(ifp->ndev, rtnl_locked);
+ brcmf_net_detach(ifp->ndev, locked);
} else {
/* Only p2p device interfaces which get dynamically created
* end up here. In this case the p2p module should be informed
@@ -947,7 +947,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
- brcmf_p2p_ifp_removed(ifp, rtnl_locked);
+ brcmf_p2p_ifp_removed(ifp, locked);
kfree(ifp);
}
@@ -956,14 +956,14 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
}
-void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
+void brcmf_remove_interface(struct brcmf_if *ifp, bool locked)
{
if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp))
return;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx,
ifp->ifidx);
brcmf_proto_del_if(ifp->drvr, ifp);
- brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked);
+ brcmf_del_if(ifp->drvr, ifp->bsscfgidx, locked);
}
static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 5767d665cee5..8212c9de14f1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -201,16 +201,16 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
char *brcmf_ifname(struct brcmf_if *ifp);
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable);
-int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+int brcmf_net_attach(struct brcmf_if *ifp, bool locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
bool is_p2pdev, const char *name, u8 *mac_addr);
-void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
+void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
+void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 4aa2561934d7..6d5188b78f2d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -40,6 +40,18 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
};
+static const struct brcmf_dmi_data predia_basic_data = {
+ BRCM_CC_43341_CHIP_ID, 2, "predia-basic"
+};
+
+/* Note the Voyo winpad A15 tablet uses the same Ampak AP6330 module, with the
+ * exact same nvram file as the Prowise-PT301 tablet. Since the nvram for the
+ * Prowise-PT301 is already in linux-firmware we just point to that here.
+ */
+static const struct brcmf_dmi_data voyo_winpad_a15_data = {
+ BRCM_CC_4330_CHIP_ID, 4, "Prowise-PT301"
+};
+
static const struct dmi_system_id dmi_platform_data[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
@@ -111,6 +123,26 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&pov_tab_p1006w_data,
},
+ {
+ /* Predia Basic tablet (+ with keyboard dock) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ /* Mx.WT107.KUBNGEA02 with the version-nr dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
+ },
+ .driver_data = (void *)&predia_basic_data,
+ },
+ {
+ /* Voyo winpad A15 tablet */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"),
+ },
+ .driver_data = (void *)&voyo_winpad_a15_data,
+ },
{}
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d821a4758f8c..d40104b8df55 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -319,8 +319,10 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
u8 *nvram;
nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
- if (!nvram)
- goto fail;
+ if (!nvram) {
+ nvp->nvram_len = 0;
+ return;
+ }
/* Copy all valid entries, release old nvram and assign new one.
* Valid entries are of type pcie/X/Y/ where X = domain_nr and
@@ -350,10 +352,6 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
kfree(nvp->nvram);
nvp->nvram = nvram;
nvp->nvram_len = j;
- return;
-fail:
- kfree(nvram);
- nvp->nvram_len = 0;
}
static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 2e31cc10c195..ff2ef557f0ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -753,6 +753,34 @@ struct brcmf_assoclist_le {
};
/**
+ * struct brcmf_rssi_be - RSSI threshold event format
+ *
+ * @rssi: receive signal strength (in dBm)
+ * @snr: signal-noise ratio
+ * @noise: noise (in dBm)
+ */
+struct brcmf_rssi_be {
+ __be32 rssi;
+ __be32 snr;
+ __be32 noise;
+};
+
+#define BRCMF_MAX_RSSI_LEVELS 8
+
+/**
+ * struct brcm_rssi_event_le - rssi_event IOVAR format
+ *
+ * @rate_limit_msec: RSSI event rate limit
+ * @rssi_level_num: number of supplied RSSI levels
+ * @rssi_levels: RSSI levels in ascending order
+ */
+struct brcmf_rssi_event_le {
+ __le32 rate_limit_msec;
+ s8 rssi_level_num;
+ s8 rssi_levels[BRCMF_MAX_RSSI_LEVELS];
+};
+
+/**
* struct brcmf_wowl_wakeind_le - Wakeup indicators
* Note: note both fields contain same information.
*
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index ec6fc7a150a6..6d30a0fcecea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2430,7 +2430,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return err;
}
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
@@ -2439,11 +2439,15 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- if (!rtnl_locked)
+ if (locked) {
rtnl_lock();
- cfg80211_unregister_wdev(&vif->wdev);
- if (!rtnl_locked)
+ wiphy_lock(cfg->wiphy);
+ cfg80211_unregister_wdev(&vif->wdev);
+ wiphy_unlock(cfg->wiphy);
rtnl_unlock();
+ } else {
+ cfg80211_unregister_wdev(&vif->wdev);
+ }
brcmf_free_vif(vif);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 45bc502fcb34..ad79e3b7e74a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -77,6 +77,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
+ BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h
index 9035cc4d6ff3..0c685eeaed33 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h
@@ -783,7 +783,7 @@ struct d11txh {
u8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
struct ieee80211_rts rts_frame; /* 0x2f - 0x36 */
u16 PAD; /* 0x37 */
-} __packed;
+} __packed __aligned(2);
#define D11_TXH_LEN 112 /* bytes */
@@ -1469,7 +1469,7 @@ struct d11rxhdr {
/* htphy PhyRxStatus_1: */
/* core enables for {3..0}, 0=disabled, 1=enabled */
#define PRXS1_HTPHY_CORE_MASK 0x000F
-/* antenna configation */
+/* antenna configuration */
#define PRXS1_HTPHY_ANTCFG_MASK 0x00F0
/* Mixmode PLCP Length low byte mask */
#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index c6c4be05159d..00309b272a0e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -48,6 +48,7 @@
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
#define BRCM_CC_43664_CHIP_ID 43664
+#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 28675a4ad861..341d6a2bc690 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2593,8 +2593,7 @@ out:
*/
if (ret != IL_INVALID_STATION &&
(!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
- ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
- (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS))) {
IL_ERR("Requested station info for sta %d before ready.\n",
ret);
ret = IL_INVALID_STATION;
@@ -2813,8 +2812,10 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
spin_lock_irqsave(&il->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
- struct il_ht_agg *agg = NULL;
- WARN_ON(!qc);
+ struct il_ht_agg *agg;
+
+ if (WARN_ON(!qc))
+ goto out;
agg = &il->stations[sta_id].tid[tid].agg;
@@ -2830,9 +2831,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
D_TX_REPLY("Retry scheduler reclaim scd_ssn "
"%d idx %d\n", scd_ssn, idx);
freed = il4965_tx_queue_reclaim(il, txq_id, idx);
- if (qc)
- il4965_free_tfds_in_queue(il, sta_id, tid,
- freed);
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
if (il->mac80211_registered &&
il_queue_space(&txq->q) > txq->q.low_mark &&
@@ -2862,6 +2861,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
il_queue_space(&txq->q) > txq->q.low_mark)
il_wake_queue(il, txq);
}
+out:
if (qc && likely(sta_id != IL_INVALID_STATION))
il4965_txq_check_empty(il, sta_id, tid, txq_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 8a4579bb10d3..44c4fe975390 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -76,8 +76,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
- .led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -102,8 +101,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .rx_with_siso_diversity = true
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 7140a5f3ea8b..df6ac00340b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -102,8 +102,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -129,8 +128,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -150,8 +148,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .rx_with_siso_diversity = true
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -177,8 +174,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .rx_with_siso_diversity = true
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 7220fc8fd9b0..0a0e25a3c681 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 59
+#define IWL_22000_UCODE_API_MAX 62
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -42,7 +42,10 @@
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
+#define IWL_SNJ_A_JF_B_FW_PRE "iwlwifi-SoSnj-a0-jf-b0-"
+#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0-"
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
+#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
@@ -76,8 +79,14 @@
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
+#define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
@@ -133,7 +142,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mac_addr_from_csr = true, \
.ht_params = &iwl_22000_ht_params, \
.nvm_ver = IWL_22000_NVM_VERSION, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.trans.use_tfh = true, \
.trans.rf_id = true, \
.trans.gen2 = true, \
@@ -238,6 +246,44 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_snj_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+};
+
+const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+ .integrated = true,
+ /* TODO: the following values need to be checked */
+ .xtal_latency = 500,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
+};
+
+const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+ .integrated = true,
+ /* TODO: the following values need to be checked */
+ .xtal_latency = 12000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -314,6 +360,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
@@ -340,6 +387,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -366,6 +425,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
@@ -581,9 +652,22 @@ const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
-const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = {
- .name = iwl_ax201_name,
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+const struct iwl_cfg iwl_cfg_snj_hr_b0 = {
+ .fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = {
+ .fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_ma_a0_hr_b0 = {
+ .fw_name_pre = IWL_MA_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@@ -596,6 +680,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0 = {
+ .fw_name_pre = IWL_MA_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
.uhb_supported = true,
@@ -610,6 +701,24 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
+ .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -625,6 +734,9 @@ MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 3591336dc644..6cdd7d983bda 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -74,8 +74,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
- .led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -138,8 +137,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .internal_wimax_coex = true
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index b4a8a6804c39..541a3ec85777 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -123,8 +123,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -177,8 +176,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -213,8 +211,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -268,8 +265,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -301,8 +297,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .internal_wimax_coex = true
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -327,8 +322,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .internal_wimax_coex = true
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index c542140e534e..b24dc5523a52 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -95,7 +95,6 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.dccm_offset = IWL7000_DCCM_OFFSET
#define IWL_DEVICE_7000 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 4ff8a56414a3..a6454287d506 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2014, 2018-2020 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -125,7 +125,6 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8265_2ac_cfg = {
@@ -134,7 +133,6 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@@ -144,7 +142,6 @@ const struct iwl_cfg iwl8275_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@@ -154,7 +151,6 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index eb5db204d84b..c4164bf508e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -97,7 +97,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.mon_smem_regs = { \
.write_ptr = { \
.addr = LDBG_M2S_BUF_WPTR, \
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 461af5831156..c01523f64bfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -406,7 +406,6 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
u32 i;
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
if (mode == 0)
ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
@@ -414,7 +413,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- if (!iwl_trans_grab_nic_access(priv->trans, &reg_flags))
+ if (!iwl_trans_grab_nic_access(priv->trans))
return;
/* Set starting address; reads will auto-increment */
@@ -446,7 +445,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
}
}
/* Allow device to power down */
- iwl_trans_release_nic_access(priv->trans, &reg_flags);
+ iwl_trans_release_nic_access(priv->trans);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -1694,7 +1693,6 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
struct iwl_trans *trans = priv->trans;
@@ -1718,7 +1716,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- if (!iwl_trans_grab_nic_access(trans, &reg_flags))
+ if (!iwl_trans_grab_nic_access(trans))
return pos;
/* Set starting address; reads will auto-increment */
@@ -1757,7 +1755,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
/* Allow device to power down */
- iwl_trans_release_nic_access(trans, &reg_flags);
+ iwl_trans_release_nic_access(trans);
return pos;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 8181ba573110..2684a924ba57 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2020 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -155,7 +155,6 @@ static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
struct iwl_priv *priv = from_timer(priv, t,
thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -171,8 +170,8 @@ static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
- if (iwl_trans_grab_nic_access(priv->trans, &flags))
- iwl_trans_release_nic_access(priv->trans, &flags);
+ if (iwl_trans_grab_nic_access(priv->trans))
+ iwl_trans_release_nic_access(priv->trans);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 15248b064380..82a4f7e8ba54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -9,9 +9,15 @@
#include "acpi.h"
#include "fw/runtime.h"
-static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
- 0xA5, 0xB3, 0x1F, 0x73,
- 0x8E, 0x28, 0x5A, 0xDE);
+const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
+IWL_EXPORT_SYMBOL(iwl_guid);
+
+const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
+ 0x81, 0x4F, 0x75, 0xE4,
+ 0xDD, 0x26, 0xB5, 0xFD);
+IWL_EXPORT_SYMBOL(iwl_rfi_guid);
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
@@ -64,11 +70,12 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
* function.
*/
static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args)
+ union acpi_object *args,
+ const guid_t *guid)
{
union acpi_object *obj;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func,
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func,
args);
if (!obj) {
IWL_DEBUG_DEV_RADIO(dev,
@@ -80,19 +87,46 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
}
/*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ const guid_t *guid, u64 *value,
+ size_t expected_size)
{
union acpi_object *obj;
- int ret;
+ int ret = 0;
- obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
- if (IS_ERR(obj))
+ obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
return -ENOENT;
+ }
- if (obj->type != ACPI_TYPE_BUFFER) {
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
+
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
@@ -100,15 +134,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
goto out;
}
- if (obj->buffer.length != sizeof(u8)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method returned invalid buffer, length=%d\n",
- obj->buffer.length);
- ret = -EINVAL;
- goto out;
- }
-
- ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
@@ -116,6 +141,26 @@ out:
ACPI_FREE(obj);
return ret;
}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+ guid, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
@@ -443,11 +488,16 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
@@ -481,11 +531,16 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
@@ -541,11 +596,17 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev > 1) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
fwrt->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 042dd247d387..030c50082568 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -54,9 +54,9 @@
#define ACPI_WGDS_TABLE_SIZE 3
#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \
- IWL_NUM_SUB_BANDS) + 3)
+ IWL_NUM_SUB_BANDS) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
- IWL_NUM_SUB_BANDS_V2) + 3)
+ IWL_NUM_SUB_BANDS_V2) + 2)
/* PPAG gain value bounds in 1/8 dBm */
#define ACPI_PPAG_MIN_LB -16
@@ -93,13 +93,27 @@ enum iwl_dsm_values_indonesia {
DSM_VALUE_INDONESIA_MAX
};
+/* DSM RFI uses a different GUID, so need separate definitions */
+
+#define DSM_RFI_FUNC_ENABLE 3
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_ENABLE,
+ DSM_VALUE_RFI_DISABLE,
+ DSM_VALUE_RFI_MAX
+};
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
+extern const guid_t iwl_guid;
+extern const guid_t iwl_rfi_guid;
+
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
@@ -159,7 +173,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value)
{
return -ENOENT;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index b916b38b3092..c625d319142e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -284,7 +284,7 @@ enum iwl_legacy_cmds {
/* Phy */
/**
- * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
+ * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd_v1 or &struct iwl_phy_cfg_cmd_v3
*/
PHY_CONFIGURATION_CMD = 0x6a,
@@ -606,6 +606,16 @@ enum iwl_system_subcmd_ids {
* @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd
*/
FW_ERROR_RECOVERY_CMD = 0x7,
+
+ /**
+ * @RFI_CONFIG_CMD: &struct iwl_rfi_config_cmd
+ */
+ RFI_CONFIG_CMD = 0xb,
+
+ /**
+ * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd
+ */
+ RFI_GET_FREQ_TABLE_CMD = 0xc,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index b472f08b06e6..d299bba3aa54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -53,6 +53,12 @@ enum iwl_data_path_subcmd_ids {
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
+ * @MONITOR_NOTIF: Datapath monitoring notification, using
+ * &struct iwl_datapath_monitor_notif
+ */
+ MONITOR_NOTIF = 0xF4,
+
+ /**
* @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
*/
RX_NO_DATA_NOTIF = 0xF5,
@@ -153,4 +159,14 @@ struct iwl_channel_estimation_cfg {
__le64 frame_types;
} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
+enum iwl_datapath_monitor_notif_type {
+ IWL_DP_MON_NOTIF_TYPE_EXT_CCA,
+};
+
+struct iwl_datapath_monitor_notif {
+ __le32 type;
+ u8 mac_id;
+ u8 reserved[3];
+} __packed; /* MONITOR_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index ace0ef46001a..8adccd5da095 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -186,6 +186,21 @@ struct iwl_shared_mem_cfg {
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
/**
+ * struct iwl_mfuart_load_notif_v1 - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwl_mfuart_load_notif_v1 {
+ __le32 installed_ver;
+ __le32 external_ver;
+ __le32 status;
+ __le32 duration;
+} __packed; /* MFU_LOADER_NTFY_API_S_VER_1 */
+
+/**
* struct iwl_mfuart_load_notif - mfuart image version & status
* ( MFUART_LOAD_NOTIFICATION = 0xb1 )
* @installed_ver: installed image version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 28aa28138908..ceeef8749765 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -12,7 +12,12 @@
enum iwl_location_subcmd_ids {
/**
* @TOF_RANGE_REQ_CMD: TOF ranging request,
- * uses &struct iwl_tof_range_req_cmd
+ * uses one of &struct iwl_tof_range_req_cmd_v5,
+ * &struct iwl_tof_range_req_cmd_v7,
+ * &struct iwl_tof_range_req_cmd_v8,
+ * &struct iwl_tof_range_req_cmd_v9,
+ * &struct iwl_tof_range_req_cmd_v11,
+ * &struct iwl_tof_range_req_cmd_v7
*/
TOF_RANGE_REQ_CMD = 0x0,
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 2d03d7bb5da5..93084bbad534 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -452,6 +452,10 @@ struct iwl_he_pkt_ext {
* enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
* @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
* parameter set, i.e. the backoff counters for trig-based ACs
+ * @STA_CTXT_HE_NIC_NOT_ACK_ENABLED: mark that the NIC doesn't support receiving
+ * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
+ * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
+ * len delim to determine if AGG or single.
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
@@ -466,6 +470,7 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
STA_CTXT_HE_ACK_ENABLED = BIT(11),
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+ STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index b2706209b7d7..fbca9dd872e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -415,15 +415,26 @@ enum iwl_lari_config_masks {
};
/**
- * struct iwl_lari_config_change_cmd - change LARI configuration
+ * struct iwl_lari_config_change_cmd_v1 - change LARI configuration
* @config_bitmap: bit map of the config commands. each bit will trigger a
* different predefined FW config operation
*/
-struct iwl_lari_config_change_cmd {
+struct iwl_lari_config_change_cmd_v1 {
__le32 config_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
/**
+ * struct iwl_lari_config_change_cmd_v2 - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets
+ */
+struct iwl_lari_config_change_cmd_v2 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
new file mode 100644
index 000000000000..c678b9aa9b55
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+#ifndef __iwl_fw_api_rfi_h__
+#define __iwl_fw_api_rfi_h__
+
+#define IWL_RFI_LUT_ENTRY_CHANNELS_NUM 15
+#define IWL_RFI_LUT_SIZE 24
+#define IWL_RFI_LUT_INSTALLED_SIZE 4
+
+/**
+ * struct iwl_rfi_lut_entry - an entry in the RFI frequency LUT.
+ *
+ * @freq: frequency
+ * @channels: channels that can be interfered at frequency freq (at most 15)
+ * @bands: the corresponding bands
+ */
+struct iwl_rfi_lut_entry {
+ __le16 freq;
+ u8 channels[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
+ u8 bands[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
+} __packed;
+
+/**
+ * struct iwl_rfi_config_cmd - RFI configuration table
+ *
+ * @entry: a table can have 24 frequency/channel mappings
+ * @oem: specifies if this is the default table or set by OEM
+ */
+struct iwl_rfi_config_cmd {
+ struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE];
+ u8 oem;
+ u8 reserved[3];
+} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * iwl_rfi_freq_table_status - status of the frequency table query
+ * @RFI_FREQ_TABLE_OK: can be used
+ * @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later
+ * @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW
+ */
+enum iwl_rfi_freq_table_status {
+ RFI_FREQ_TABLE_OK,
+ RFI_FREQ_TABLE_DVFS_NOT_READY,
+ RFI_FREQ_TABLE_DISABLED,
+};
+
+/**
+ * struct iwl_rfi_freq_table_resp_cmd - get the rfi freq table used by FW
+ *
+ * @table: table used by FW
+ * @status: see &iwl_rfi_freq_table_status
+ */
+struct iwl_rfi_freq_table_resp_cmd {
+ struct iwl_rfi_lut_entry table[IWL_RFI_LUT_INSTALLED_SIZE];
+ __le32 status;
+} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_rfi_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 821ed472ccff..2c74db823778 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -140,7 +140,8 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
* @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension
* algorithm
- * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: this frame is protected using
+ * CMAC or GMAC
* @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
* @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
* @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
@@ -167,7 +168,7 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
- RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
+ RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
@@ -239,6 +240,8 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+ /* overlayed since IWL_UCODE_TLV_API_DEPRECATE_TTAK */
+ IWL_RX_MPDU_STATUS_REPLAY_ERROR = BIT(7),
IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK,
IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 931c0f48de99..6b8ca35cec1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -542,7 +542,8 @@ struct iwl_scan_config_v2 {
* struct iwl_scan_config
* @enable_cam_mode: whether to enable CAM mode.
* @enable_promiscouos_mode: whether to enable promiscouos mode
- * @bcast_sta_id: the index of the station in the fw
+ * @bcast_sta_id: the index of the station in the fw. Deprecated starting with
+ * API version 5.
* @reserved: reserved
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@@ -554,7 +555,7 @@ struct iwl_scan_config {
u8 reserved;
__le32 tx_chains;
__le32 rx_chains;
-} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */
/**
* enum iwl_umac_scan_flags - UMAC scan flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index b2d8ccf5f5dd..24e4a82a55da 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -20,6 +20,7 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_MASK: BT priority value
* @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
* on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
@@ -51,6 +52,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
TX_CMD_FLG_BT_PRIO_POS = 11,
+ TX_CMD_FLG_BT_PRIO_MASK = BIT(11) | BIT(12),
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),
@@ -177,7 +179,7 @@ enum iwl_tx_offload_assist_flags_pos {
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
- * @tx_flags: combination of TX_CMD_FLG_*
+ * @tx_flags: combination of TX_CMD_FLG_*, see &enum iwl_tx_flags
* @scratch: scratch buffer used by the device
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
@@ -238,7 +240,7 @@ struct iwl_tx_cmd {
__le16 pm_frame_timeout;
__le16 reserved4;
u8 payload[0];
- struct ieee80211_hdr hdr[];
+ struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@@ -855,6 +857,32 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+#define IWL_TX_FLUSH_QUEUE_RSP 16
+
+/**
+ * struct iwl_flush_queue_info - virtual flush queue info
+ * @queue_num: virtual queue id
+ * @read_before_flush: read pointer before flush
+ * @read_after_flush: read pointer after flush
+ */
+struct iwl_flush_queue_info {
+ __le16 tid;
+ __le16 queue_num;
+ __le16 read_before_flush;
+ __le16 read_after_flush;
+} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @num_flushed_queues: number of queues in queues array
+ * @queues: all flushed queues
+ */
+struct iwl_tx_path_flush_cmd_rsp {
+ __le16 sta_id;
+ __le16 num_flushed_queues;
+ struct iwl_flush_queue_info queues[IWL_TX_FLUSH_QUEUE_RSP];
+} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
+
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 0f0a6727701b..504729663c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -33,12 +33,11 @@ static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
u8 *pos = (void *)(*dump_data)->data;
- unsigned long flags;
int i;
IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
@@ -56,7 +55,7 @@ static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
*dump_data = iwl_fw_error_next_data(*dump_data);
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
}
static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
@@ -172,11 +171,10 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
- unsigned long flags;
IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
@@ -194,7 +192,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
}
static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
@@ -204,12 +202,11 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
- unsigned long flags;
int i, j;
IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
@@ -299,7 +296,7 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
}
}
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
}
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
@@ -527,7 +524,6 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_error_dump_data **data =
(struct iwl_fw_error_dump_data **)ptr;
- unsigned long flags;
u32 i;
if (!data)
@@ -535,7 +531,7 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
for (i = 0; i < range_len; i++) {
@@ -558,7 +554,7 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
*data = iwl_fw_error_next_data(*data);
}
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
/*
@@ -1048,7 +1044,6 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
u32 addr = le32_to_cpu(reg->addrs[idx]);
u32 dphy_state;
u32 dphy_addr;
- unsigned long flags;
int i;
range->internal_base_addr = cpu_to_le32(addr);
@@ -1060,7 +1055,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset);
indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset);
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW :
@@ -1082,7 +1077,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
*val++ = cpu_to_le32(prph_val);
}
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1157,10 +1152,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
{
- /* increase idx by 1 since the pages are from 1 to
- * fwrt->num_of_paging_blk + 1
- */
- struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block;
+ struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
@@ -1183,6 +1175,9 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
+ /* all paged index start from 1 to skip CSS section */
+ idx++;
+
if (!fwrt->trans->trans_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
@@ -1297,13 +1292,12 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
- unsigned long flags;
int i;
if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
return -EIO;
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
@@ -1345,7 +1339,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1429,14 +1423,13 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
- unsigned long flags;
int i;
iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
if (!rxf_data.size)
return -EIO;
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
@@ -1479,7 +1472,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1596,9 +1589,8 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
- unsigned long flags;
- if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
+ if (!iwl_trans_grab_nic_access(fwrt->trans)) {
IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
}
@@ -1615,7 +1607,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
&addrs->cur_frag);
- iwl_trans_release_nic_access(fwrt->trans, &flags);
+ iwl_trans_release_nic_access(fwrt->trans);
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
@@ -1684,8 +1676,12 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
- if (fwrt->trans->trans_cfg->gen2)
- return fwrt->trans->init_dram.paging_cnt;
+ if (fwrt->trans->trans_cfg->gen2) {
+ if (fwrt->trans->init_dram.paging_cnt)
+ return fwrt->trans->init_dram.paging_cnt - 1;
+ else
+ return 0;
+ }
return fwrt->num_of_paging_blk;
}
@@ -1750,15 +1746,13 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
u32 size = sizeof(struct iwl_fw_ini_error_dump);
- if (fwrt->trans->trans_cfg->gen2) {
- for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
- size += range_header_len +
- fwrt->trans->init_dram.paging[i].size;
- } else {
- for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
- i++)
- size += range_header_len +
- fwrt->fw_paging_db[i].fw_paging_size;
+ /* start from 1 to skip CSS section */
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
+ size += range_header_len;
+ if (fwrt->trans->trans_cfg->gen2)
+ size += fwrt->trans->init_dram.paging[i].size;
+ else
+ size += fwrt->fw_paging_db[i].fw_paging_size;
}
return size;
@@ -2071,7 +2065,8 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
- dump->regions_mask = trigger->regions_mask;
+ dump->regions_mask = trigger->regions_mask &
+ ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk);
dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
memcpy(dump->build_tag, fwrt->fw->human_readable,
@@ -2200,7 +2195,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
};
int i;
u32 size = 0;
- u64 regions_mask = le64_to_cpu(trigger->regions_mask);
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
+ ~(fwrt->trans->dbg.unsupported_region_msk);
BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
@@ -2451,7 +2447,8 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
return -EIO;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
+ if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT &&
+ trig_type != FW_DBG_TRIGGER_DRIVER)
return -EIO;
iwl_dbg_tlv_time_point(fwrt,
@@ -2758,7 +2755,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
- unsigned long flags;
int i;
struct {
u32 addr;
@@ -2778,7 +2774,7 @@ void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
};
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
IWL_ERR(fwrt, "Fseq Registers:\n");
@@ -2788,7 +2784,7 @@ void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
fseq_regs[i].str);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 597bc88479ba..35dffcaf5aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,6 +93,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_HW_TYPE = 58,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+ IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61,
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
@@ -439,6 +440,9 @@ enum iwl_ucode_tlv_capa {
*/
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
+ IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
+
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@@ -866,7 +870,7 @@ struct iwl_fw_dbg_trigger_time_event {
* tx_bar: tid bitmap to configure on what tid the trigger should occur
* when a BAR is send (for an Rx BlocAck session).
* frame_timeout: tid bitmap to configure on what tid the trigger should occur
- * when a frame times out in the reodering buffer.
+ * when a frame times out in the reordering buffer.
*/
struct iwl_fw_dbg_trigger_ba {
__le16 rx_ba_start;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index c93d247621ec..1dee4714e505 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -219,6 +219,9 @@ struct iwl_fw {
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
+
+ u8 *phy_integration_ver;
+ u32 phy_integration_ver_len;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index e317b051b8ed..986913f2fbd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -36,11 +36,13 @@ IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
{
iwl_fw_suspend_timestamp(fwrt);
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
{
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, NULL);
iwl_fw_resume_timestamp(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 6d8f7bff1243..fd070ca5e517 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/******************************************************************************
- *
- * Copyright(c) 2020 Intel Corporation
- *
- *****************************************************************************/
+/*
+ * Copyright(c) 2020-2021 Intel Corporation
+ */
#include "iwl-drv.h"
#include "pnvm.h"
@@ -12,6 +10,7 @@
#include "fw/api/commands.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/alive.h"
+#include <linux/efi.h>
struct iwl_pnvm_section {
__le32 offset;
@@ -198,14 +197,14 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
le32_to_cpu(sku_id->data[1]),
le32_to_cpu(sku_id->data[2]));
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
int ret;
- data += sizeof(*tlv) + ALIGN(tlv_len, 4);
- len -= ALIGN(tlv_len, 4);
-
ret = iwl_pnvm_handle_section(trans, data, len);
if (!ret)
return 0;
@@ -221,24 +220,89 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
-int iwl_pnvm_load(struct iwl_trans *trans,
- struct iwl_notif_wait_data *notif_wait)
+#if defined(CONFIG_EFI)
+
+#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
+ 0xb2, 0xec, 0xf5, 0xa3, \
+ 0x59, 0x4f, 0x4a, 0xea)
+
+#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
+
+#define IWL_HARDCODED_PNVM_SIZE 4096
+
+struct pnvm_sku_package {
+ u8 rev;
+ u8 reserved1[3];
+ u32 total_size;
+ u8 n_skus;
+ u8 reserved2[11];
+ u8 data[];
+};
+
+static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
+ u8 **data, size_t *len)
+{
+ struct efivar_entry *pnvm_efivar;
+ struct pnvm_sku_package *package;
+ unsigned long package_size;
+ int err;
+
+ pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
+ if (!pnvm_efivar)
+ return -ENOMEM;
+
+ memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
+ sizeof(IWL_UEFI_OEM_PNVM_NAME));
+ pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+ /*
+ * TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_PNVM_SIZE;
+
+ package = kmalloc(package_size, GFP_KERNEL);
+ if (!package) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
+ if (err) {
+ IWL_DEBUG_FW(trans,
+ "PNVM UEFI variable not found %d (len %zd)\n",
+ err, package_size);
+ goto out;
+ }
+
+ IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %zd\n", package_size);
+
+ *data = kmemdup(package->data, *len, GFP_KERNEL);
+ if (!*data)
+ err = -ENOMEM;
+ *len = package_size - sizeof(*package);
+
+out:
+ kfree(package);
+ kfree(pnvm_efivar);
+
+ return err;
+}
+#else /* CONFIG_EFI */
+static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
+ u8 **data, size_t *len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_EFI */
+
+static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
{
const struct firmware *pnvm;
- struct iwl_notification_wait pnvm_wait;
- static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
- PNVM_INIT_COMPLETE_NTFY) };
char pnvm_name[64];
int ret;
- /* if the SKU_ID is empty, there's nothing to do */
- if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
- return 0;
-
- /* if we already have it, nothing to do either */
- if (trans->pnvm_loaded)
- return 0;
-
/*
* The prefix unfortunately includes a hyphen at the end, so
* don't add the dot here...
@@ -254,12 +318,67 @@ int iwl_pnvm_load(struct iwl_trans *trans,
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
- } else {
- iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
+ return ret;
+ }
- release_firmware(pnvm);
+ *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ *len = pnvm->size;
+
+ return 0;
+}
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait)
+{
+ u8 *data;
+ size_t len;
+ struct iwl_notification_wait pnvm_wait;
+ static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ PNVM_INIT_COMPLETE_NTFY) };
+ int ret;
+
+ /* if the SKU_ID is empty, there's nothing to do */
+ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ return 0;
+
+ /*
+ * If we already loaded (or tried to load) it before, we just
+ * need to set it again.
+ */
+ if (trans->pnvm_loaded) {
+ ret = iwl_trans_set_pnvm(trans, NULL, 0);
+ if (ret)
+ return ret;
+ goto skip_parse;
+ }
+
+ /* First attempt to get the PNVM from BIOS */
+ ret = iwl_pnvm_get_from_efi(trans, &data, &len);
+ if (!ret)
+ goto parse;
+
+ /* If it's not available, try from the filesystem */
+ ret = iwl_pnvm_get_from_fs(trans, &data, &len);
+ if (ret) {
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+
+ goto skip_parse;
}
+parse:
+ iwl_pnvm_parse(trans, data, len);
+
+ kfree(data);
+
+skip_parse:
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
iwl_pnvm_complete_fn, trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 27cb0406ba9a..75f99ff7f908 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __IWL_CONFIG_H__
@@ -325,10 +325,6 @@ struct iwl_fw_mon_regs {
* @features: hw features, any combination of feature_passlist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
- * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
- * station can receive in HT
- * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
- * station can receive in VHT
* @dccm_offset: offset from which DCCM begins
* @dccm_len: length of DCCM (including runtime stack CCM)
* @dccm2_offset: offset from which the second DCCM begins
@@ -395,8 +391,6 @@ struct iwl_cfg {
u8 non_shared_ant;
u8 nvm_hw_section_num;
u8 max_tx_agg_size;
- u8 max_ht_ampdu_exponent;
- u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
u16 num_rbds;
@@ -418,6 +412,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QU 0x33
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_QNJ 0x36
+#define IWL_CFG_MAC_TYPE_SO 0x37
#define IWL_CFG_MAC_TYPE_SNJ 0x42
#define IWL_CFG_MAC_TYPE_MA 0x44
@@ -444,8 +439,11 @@ struct iwl_cfg {
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
+#define IWL_CFG_NO_CDB 0x0
+#define IWL_CFG_CDB 0x1
+
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -457,6 +455,7 @@ struct iwl_dev_info {
u8 rf_id;
u8 no_160;
u8 cores;
+ u8 cdb;
const struct iwl_cfg *cfg;
const char *name;
};
@@ -473,6 +472,9 @@ extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
@@ -491,6 +493,7 @@ extern const char iwl9260_killer_1550_name[];
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -574,6 +577,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
@@ -597,10 +602,15 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
-extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
+extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index fe0c03c8d390..6ccde7e30211 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -277,6 +277,8 @@
#define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
#define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
#define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
+#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
+#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
/**
* hw_rev values
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index a654147d3cd6..579bc81cc0ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -61,7 +61,8 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
+static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
@@ -76,9 +77,9 @@ static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
return 0;
}
-static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
+static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
+ const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 ver = le32_to_cpu(hdr->version);
@@ -91,9 +92,9 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
}
static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv)
+ const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
+ const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
@@ -105,9 +106,9 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv)
+ const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
+ const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
u32 buf_location;
u32 alloc_id;
@@ -145,9 +146,9 @@ err:
}
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv)
+ const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
+ const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
u32 tp = le32_to_cpu(hcmd->time_point);
if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
@@ -169,9 +170,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv)
+ const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
+ const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
struct iwl_ucode_tlv **active_reg;
u32 id = le32_to_cpu(reg->id);
u32 type = le32_to_cpu(reg->type);
@@ -180,13 +181,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
- /* For safe using a string from FW make sure we have a
- * null terminator
- */
- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
@@ -221,9 +215,10 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv)
+ const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
+ struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
struct iwl_ucode_tlv *dup = NULL;
int ret;
@@ -244,8 +239,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
GFP_KERNEL);
if (!dup)
return -ENOMEM;
- trig = (void *)dup->data;
- trig->occurrences = cpu_to_le32(-1);
+ dup_trig = (void *)dup->data;
+ dup_trig->occurrences = cpu_to_le32(-1);
tlv = dup;
}
@@ -256,7 +251,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
- struct iwl_ucode_tlv *tlv) = {
+ const struct iwl_ucode_tlv *tlv) = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
[IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
@@ -264,10 +259,10 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
[IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
};
-void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
+void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext)
{
- struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
+ const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 domain = le32_to_cpu(hdr->domain);
@@ -403,7 +398,7 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans)
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u32 tlv_len;
while (len >= sizeof(*tlv)) {
@@ -744,12 +739,12 @@ static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
}
}
-static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
- struct iwl_ucode_tlv *old)
+static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
+ const struct iwl_ucode_tlv *old)
{
- struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
- struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
- __le32 *new_data = new_trig->data, *old_data = old_trig->data;
+ const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
+ const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
+ const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
int i, j;
@@ -964,6 +959,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
+ u32 failed_alloc = 0;
if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
return;
@@ -995,10 +991,43 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
continue;
ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
- if (ret)
+
+ if (ret) {
IWL_WARN(fwrt,
"WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
i, ret);
+ failed_alloc |= BIT(i);
+ }
+ }
+
+ if (!failed_alloc)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
+ struct iwl_fw_ini_region_tlv *reg;
+ struct iwl_ucode_tlv **active_reg =
+ &fwrt->trans->dbg.active_regions[i];
+ u32 reg_type;
+
+ if (!*active_reg)
+ continue;
+
+ reg = (void *)(*active_reg)->data;
+ reg_type = le32_to_cpu(reg->type);
+
+ if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
+ !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
+ continue;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: removing allocation id %d from region id %d\n",
+ le32_to_cpu(reg->dram_alloc_id), i);
+
+ failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id);
+ fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
+
+ kfree(*active_reg);
+ *active_reg = NULL;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index 246823878281..92c720527946 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
#include <linux/device.h>
#include <linux/types.h>
+#include <fw/file.h>
+#include <fw/api/dbg-tlv.h>
/**
* struct iwl_dbg_tlv_node - debug TLV node
@@ -43,7 +45,7 @@ struct iwl_fw_runtime;
void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
-void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
+void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext);
void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d44bc61c34f5..eb168dc535d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -127,6 +127,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
kfree(drv->fw.ucode_capa.cmd_versions);
+ kfree(drv->fw.phy_integration_ver);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -558,7 +559,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
bool *usniffer_images)
{
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
u32 tlv_len;
@@ -1143,6 +1144,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_cmd_versions =
tlv_len / sizeof(struct iwl_fw_cmd_version);
break;
+ case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION:
+ if (drv->fw.phy_integration_ver) {
+ IWL_ERR(drv,
+ "phy integration str ignored, already exists\n");
+ break;
+ }
+
+ drv->fw.phy_integration_ver =
+ kmemdup(tlv_data, tlv_len, GFP_KERNEL);
+ if (!drv->fw.phy_integration_ver)
+ return -ENOMEM;
+ drv->fw.phy_integration_ver_len = tlv_len;
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index c21062777caf..f12b86563728 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#include <linux/types.h>
@@ -711,12 +711,11 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (cfg->ht_params->ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
- if ((trans->trans_cfg->mq_rx_supported &&
- iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
- iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+ if (trans->trans_cfg->mq_rx_supported ||
+ iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 2ac20d0a30eb..33d42e08d5b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -66,10 +66,10 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
- unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+
+ if (iwl_trans_grab_nic_access(trans)) {
value = iwl_read32(trans, reg);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
return value;
@@ -78,22 +78,18 @@ IWL_EXPORT_SYMBOL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
- unsigned long flags;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write32(trans, reg, value);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct32);
void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
{
- unsigned long flags;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write64(trans, reg, value);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct64);
@@ -139,27 +135,25 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
- unsigned long flags;
u32 val = 0x5a5a5a5a;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
return val;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
- unsigned long flags;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
+ mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
@@ -178,13 +172,11 @@ int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
- unsigned long flags;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write_prph_no_grab(trans, ofs,
iwl_read_prph_no_grab(trans, ofs) |
mask);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
@@ -192,26 +184,23 @@ IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask)
{
- unsigned long flags;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write_prph_no_grab(trans, ofs,
(iwl_read_prph_no_grab(trans, ofs) &
mask) | bits);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
- unsigned long flags;
u32 val;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
@@ -219,8 +208,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
- iwl_write_prph(trans, DEVICE_SET_NMI_REG,
- DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV, 1);
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
@@ -445,3 +434,39 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
return err < 0 ? err : 0;
}
IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
+
+void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
+ u32 sw_err_bit)
+{
+ unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+ bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
+
+ /* if the interrupts were already disabled, there is no point in
+ * calling iwl_disable_interrupts
+ */
+ if (interrupts_enabled)
+ iwl_trans_interrupts(trans, false);
+
+ iwl_force_nmi(trans);
+ while (time_after(timeout, jiffies)) {
+ u32 inta_hw = iwl_read32(trans, inta_addr);
+
+ /* Error detected by uCode */
+ if (inta_hw & sw_err_bit) {
+ /* Clear causes register */
+ iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
+ break;
+ }
+
+ mdelay(1);
+ }
+
+ /* enable interrupts only if there were already enabled before this
+ * function to avoid a case were the driver enable interrupts before
+ * proper configurations were made
+ */
+ if (interrupts_enabled)
+ iwl_trans_interrupts(trans, true);
+
+ iwl_trans_fw_error(trans);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 39bceee4e9e7..3c21c0e081f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+ u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 720193d16539..af684f80b0cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -453,8 +453,6 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
const struct iwl_cfg *cfg = trans->cfg;
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
- unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
- IEEE80211_VHT_MAX_AMPDU_1024K);
vht_cap->vht_supported = true;
@@ -462,7 +460,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
- max_ampdu_exponent <<
+ IEEE80211_VHT_MAX_AMPDU_1024K <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (data->vht160_supported)
@@ -585,6 +583,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index aca1ccdd1aa4..e1f5a9741850 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -80,4 +80,5 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
*/
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
+
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 9097fe310693..868da7e79a45 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/debugfs.h>
+#include "iwl-dbg-tlv.h"
struct iwl_op_mode;
struct iwl_trans;
@@ -83,6 +84,7 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
+ * @time_point: called when transport layer wants to collect debug data
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -104,6 +106,9 @@ struct iwl_op_mode_ops {
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
+ void (*time_point)(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -196,4 +201,11 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
+static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data)
+{
+ op_mode->ops->time_point(op_mode, tp_id, tp_data);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 0b03fdedc1f7..3ce77e4eb7e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -301,6 +301,12 @@
#define RADIO_RSP_ADDR_POS (6)
#define RADIO_RSP_RD_CMD (3)
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR 0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR 0xa03480
+
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
@@ -359,6 +365,7 @@ enum {
/* device family 22000 WPROT register */
#define PREG_PRPH_WPROT_22000 0xA04D00
+#define SB_MODIFY_CFG_FLAG 0xA03088
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index cc76826da5d5..60e0db4a5e20 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -13,6 +13,7 @@
#include "iwl-fh.h"
#include "queue/tx.h"
#include <linux/dmapool.h>
+#include "fw/api/commands.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
@@ -102,6 +103,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
return NULL;
}
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans->wait_command_queue);
+
return trans;
}
@@ -130,6 +134,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
+ /*
+ * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
+ * bit is set early in the D3 flow, before we send all the commands
+ * that configure the firmware for D3 operation (power, patterns, ...)
+ * and we don't want to flag all those with CMD_SEND_IN_D3.
+ * So use the system_pm_mode instead. The only command sent after
+ * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
+ * CMD_SEND_IN_D3.
+ */
+ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+ !(cmd->flags & CMD_SEND_IN_D3)))
+ return -EHOSTDOWN;
+
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -145,10 +162,12 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
- if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
- cmd->id = DEF_ID(cmd->id);
+ if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
+ if (cmd->id != REPLY_ERROR)
+ cmd->id = DEF_ID(cmd->id);
+ }
- ret = trans->ops->send_cmd(trans, cmd);
+ ret = iwl_trans_txq_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 2d65bb82f7fe..4a5822c1be13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -107,12 +107,16 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
+ * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
+ * SUSPEND and RESUME commands. We are in D3 mode when we set
+ * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_WANT_ASYNC_CALLBACK = BIT(3),
+ CMD_SEND_IN_D3 = BIT(4),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -514,6 +518,7 @@ struct iwl_trans_rxq_dma_data {
* of the trans debugfs
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
+ * @interrupts: disable/enable interrupts to transport
*/
struct iwl_trans_ops {
@@ -574,19 +579,17 @@ struct iwl_trans_ops {
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
void (*sw_reset)(struct iwl_trans *trans);
- bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
- void (*release_nic_access)(struct iwl_trans *trans,
- unsigned long *flags);
+ bool (*grab_nic_access)(struct iwl_trans *trans);
+ void (*release_nic_access)(struct iwl_trans *trans);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
- int (*suspend)(struct iwl_trans *trans);
- void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
+ void (*interrupts)(struct iwl_trans *trans, bool enable);
};
/**
@@ -742,6 +745,7 @@ struct iwl_trans_debug {
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
+ u64 unsupported_region_msk;
struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
struct list_head debug_info_tlv_list;
struct iwl_dbg_tlv_time_point_data
@@ -914,6 +918,7 @@ struct iwl_trans_txqs {
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @wide_cmd_header: true when ucode supports wide command header format
+ * @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @iml_len: the length of the image loader
@@ -957,6 +962,7 @@ struct iwl_trans {
int command_groups_size;
bool wide_cmd_header;
+ wait_queue_head_t wait_command_queue;
u8 num_rx_queues;
size_t iml_len;
@@ -1073,20 +1079,6 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test, reset);
}
-static inline int iwl_trans_suspend(struct iwl_trans *trans)
-{
- if (!trans->ops->suspend)
- return 0;
-
- return trans->ops->suspend(trans);
-}
-
-static inline void iwl_trans_resume(struct iwl_trans *trans)
-{
- if (trans->ops->resume)
- trans->ops->resume(trans);
-}
-
static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
{
@@ -1375,14 +1367,14 @@ iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
trans->ops->set_bits_mask(trans, reg, mask, value);
}
-#define iwl_trans_grab_nic_access(trans, flags) \
+#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans, flags)))
+ likely((trans)->ops->grab_nic_access(trans)))
static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
+iwl_trans_release_nic_access(struct iwl_trans *trans)
{
- trans->ops->release_nic_access(trans, flags);
+ trans->ops->release_nic_access(trans);
__release(nic_access);
}
@@ -1409,6 +1401,9 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
trans->ops->sync_nmi(trans);
}
+void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
+ u32 sw_err_bit);
+
static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len)
{
@@ -1430,6 +1425,12 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
+static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
+{
+ if (trans->ops->interrupts)
+ trans->ops->interrupts(trans, enable);
+}
+
/*****************************************************
* transport helper functions
*****************************************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index dd268c4bd371..75fc2d935e5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -6,6 +6,7 @@ iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-y += ftm-responder.o ftm-initiator.o
+iwlmvm-y += rfi.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c025188fa9bc..a7dc85c704a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -975,7 +975,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@@ -997,6 +997,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ synchronize_net();
+
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) {
ret = 1;
@@ -1065,6 +1067,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
@@ -1103,19 +1107,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_trans *trans = mvm->trans;
- int ret;
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
- ret = iwl_trans_suspend(trans);
- if (ret)
- return ret;
-
- trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
return __iwl_mvm_suspend(hw, wowlan, false);
}
@@ -2032,8 +2028,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
- clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
@@ -2049,12 +2043,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
goto err;
}
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END,
- NULL);
-
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
if (ret)
goto err;
@@ -2067,7 +2059,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (d0i3_first) {
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
};
int len;
@@ -2100,6 +2092,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
}
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@@ -2143,11 +2137,13 @@ err:
out_iterate:
if (!test)
- ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ ieee80211_iterate_active_interfaces_mtx(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
/* nothing else to do if we already sent D0I3_END_CMD */
@@ -2169,21 +2165,12 @@ out:
return 1;
}
-static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
-{
- iwl_trans_resume(mvm->trans);
-
- return __iwl_mvm_resume(mvm, false);
-}
-
int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = iwl_mvm_resume_d3(mvm);
-
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ ret = __iwl_mvm_resume(mvm, false);
iwl_mvm_resume_tcm(mvm);
@@ -2210,10 +2197,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
- synchronize_net();
-
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
@@ -2283,8 +2266,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
iwl_fw_runtime_resume(&mvm->fwrt);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
iwl_abort_notification_waits(&mvm->notif_wait);
if (!unified_image) {
int remaining_time = 10;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 573e46956c14..38d0bfb649cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 80f848a9ee13..130760572262 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -91,7 +91,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF)
? : count;
mutex_unlock(&mvm->mutex);
return ret;
@@ -101,7 +101,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
+ ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
@@ -712,6 +712,30 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
return ret;
}
+static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char *buf;
+ size_t bufsz;
+ int pos;
+ ssize_t ret;
+
+ bufsz = mvm->fw->phy_integration_ver_len + 2;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len,
+ mvm->fw->phy_integration_ver);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+ return ret;
+}
+
#define PRINT_STATS_LE32(_struct, _memb) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, #_memb, \
@@ -1117,24 +1141,22 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
+ struct iwl_op_mode *opmode = container_of((void *)mvm,
+ struct iwl_op_mode,
+ op_mode_specific);
struct iwl_rx_cmd_buffer rxb = {
._rx_page_order = 0,
.truesize = 0, /* not used */
._offset = 0,
};
struct iwl_rx_packet *pkt;
- struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
- size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(struct iwl_rx_mpdu_desc) :
- IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- /* supporting only 9000 descriptor */
+ /* supporting only MQ RX */
if (!mvm->trans->trans_cfg->mq_rx_supported)
return -ENOTSUPP;
@@ -1147,23 +1169,13 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
if (ret)
goto out;
- /* avoid invalid memory access */
- if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
- goto out;
-
- /* check this is RX packet */
- if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
- WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
- goto out;
-
- /* check the length in metadata matches actual received length */
- desc = (void *)pkt->data;
- if (le16_to_cpu(desc->mpdu_len) !=
- (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
+ /* avoid invalid memory access and malformed packet */
+ if (bin_len < sizeof(*pkt) ||
+ bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
goto out;
local_bh_disable();
- iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+ iwl_mvm_rx_mq(opmode, NULL, &rxb);
local_bh_enable();
ret = 0;
@@ -1337,6 +1349,24 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
+static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ u32 timepoint;
+
+ if (kstrtou32(buf, 0, &timepoint))
+ return -EINVAL;
+
+ if (timepoint == IWL_FW_INI_TIME_POINT_INVALID ||
+ timepoint >= IWL_FW_INI_TIME_POINT_NUM)
+ return -EINVAL;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL);
+
+ return count;
+}
+
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1746,6 +1776,69 @@ iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret = 0;
+ u16 op_id;
+
+ if (kstrtou16(buf, 10, &op_id))
+ return -EINVAL;
+
+ /* value zero triggers re-sending the default table to the device */
+ if (!op_id)
+ ret = iwl_rfi_send_config_cmd(mvm, NULL);
+ else
+ ret = -EOPNOTSUPP; /* in the future a new table will be added */
+
+ return ret ?: count;
+}
+
+/* The size computation is as follows:
+ * each number needs at most 3 characters, number of rows is the size of
+ * the table; So, need 5 chars for the "freq: " part and each tuple afterwards
+ * needs 6 characters for numbers and 5 for the punctuation around.
+ */
+#define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5)))
+
+static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_rfi_freq_table_resp_cmd *resp;
+ u32 status;
+ char buf[IWL_RFI_BUF_SIZE];
+ int i, j, pos = 0;
+
+ resp = iwl_rfi_get_freq_table(mvm);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ status = le32_to_cpu(resp->status);
+ if (status != RFI_FREQ_TABLE_OK) {
+ scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(resp->table); i++) {
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ",
+ resp->table[i].freq);
+
+ for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++)
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos,
+ "(%d, %d) ",
+ resp->table[i].channels[j],
+ resp->table[i].bands[j]);
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n");
+ }
+
+out:
+ kfree(resp);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@@ -1766,6 +1859,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
+MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
@@ -1773,6 +1867,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
@@ -1795,6 +1890,7 @@ MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16);
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1941,26 +2037,24 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600);
}
-void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
- mvm->debugfs_dir = dbgfs_dir;
-
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
- MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
@@ -1978,8 +2072,12 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600);
+
+ if (mvm->fw->phy_integration_ver)
+ MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400);
#ifdef CONFIG_ACPI
- MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400);
#endif
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
@@ -2031,12 +2129,13 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
debugfs_create_blob("nvm_reg", S_IRUSR,
mvm->debugfs_dir, &mvm->nvm_reg_blob);
- debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
+ debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
+ &iwl_dbgfs_mem_ops);
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index d7ca1f98883b..73a82f07dc59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -36,5 +36,6 @@
#include "fw/api/stats.h"
#include "fw/api/location.h"
#include "fw/api/tx.h"
+#include "fw/api/rfi.h"
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0637eb1cff4e..15e2773ce7e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -6,6 +6,7 @@
*/
#include <net/mac80211.h>
#include <linux/netdevice.h>
+#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@@ -475,9 +476,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
/* Load NVM to NIC if needed */
if (mvm->nvm_file_name) {
- iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
- mvm->nvm_sections);
- iwl_mvm_load_nvm_to_nic(mvm);
+ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
+ if (ret)
+ goto error;
+ ret = iwl_mvm_load_nvm_to_nic(mvm);
+ if (ret)
+ goto error;
}
if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
@@ -633,6 +638,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
iwl_wait_phy_db_entry,
mvm->phy_db);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
/* Will also start the device */
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
if (ret) {
@@ -656,8 +663,11 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
}
/* In case we read the NVM from external file, load it to the NIC */
- if (mvm->nvm_file_name)
- iwl_mvm_load_nvm_to_nic(mvm);
+ if (mvm->nvm_file_name) {
+ ret = iwl_mvm_load_nvm_to_nic(mvm);
+ if (ret)
+ goto remove_notif;
+ }
WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
"Too old NVM version (0x%0x, required = 0x%0x)",
@@ -859,12 +869,10 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
@@ -884,6 +892,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
+ /*
+ * Set the revision on versions that contain it.
+ * This must be done after calling iwl_sar_geo_init().
+ */
+ if (cmd_ver == 3)
+ cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER))
+ cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
0, len, &cmd);
@@ -892,7 +910,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data, *enabled;
- union iwl_ppag_table_cmd ppag_table;
int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
s8 *gain;
@@ -946,8 +963,8 @@ read_table:
goto out_free;
}
- ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
- if (!ppag_table.v1.enabled) {
+ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
+ if (!mvm->fwrt.ppag_table.v1.enabled) {
ret = 0;
goto out_free;
}
@@ -962,16 +979,23 @@ read_table:
union acpi_object *ent;
ent = &wifi_pkg->package.elements[idx++];
- if (ent->type != ACPI_TYPE_INTEGER ||
- (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
- (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
- (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
- (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- ppag_table.v1.enabled = cpu_to_le32(0);
+ if (ent->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
+
gain[i * num_sub_bands + j] = ent->integer.value;
+
+ if ((j == 0 &&
+ (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
+ gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
+ (j != 0 &&
+ (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
+ gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
+ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+ ret = -EINVAL;
+ goto out_free;
+ }
}
}
ret = 0;
@@ -984,7 +1008,6 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
u8 cmd_ver;
int i, j, ret, num_sub_bands, cmd_size;
- union iwl_ppag_table_cmd ppag_table;
s8 *gain;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
@@ -1003,7 +1026,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS;
gain = mvm->fwrt.ppag_table.v1.gain[0];
- cmd_size = sizeof(ppag_table.v1);
+ cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
if (mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v2 but FW supports v1, sending truncated table\n");
@@ -1011,7 +1034,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
} else if (cmd_ver == 2) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = mvm->fwrt.ppag_table.v2.gain[0];
- cmd_size = sizeof(ppag_table.v2);
+ cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
if (mvm->fwrt.ppag_ver == 1) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v1 but FW supports v2, sending padded table\n");
@@ -1031,7 +1054,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, cmd_size, &ppag_table);
+ 0, cmd_size, &mvm->fwrt.ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -1039,6 +1062,29 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return ret;
}
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+};
+
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
int ret;
@@ -1050,6 +1096,15 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
ret);
return 0;
}
+
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(mvm,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+ return 0;
+ }
+
return iwl_mvm_ppag_send_cmd(mvm);
}
@@ -1090,20 +1145,23 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
+ u8 value;
+
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2);
+ DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &iwl_guid, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ else if (value >= DSM_VALUE_INDONESIA_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
- ret);
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ else if (value == DSM_VALUE_INDONESIA_ENABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
return DSM_VALUE_INDONESIA_ENABLE;
@@ -1112,27 +1170,53 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
return DSM_VALUE_INDONESIA_DISABLE;
}
+static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+{
+ u8 value;
+ int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, DSM_RFI_FUNC_ENABLE,
+ &iwl_rfi_guid, &value);
+
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+
+ } else if (value >= DSM_VALUE_RFI_MAX) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
+ value);
+
+ } else if (value == DSM_VALUE_RFI_ENABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
+ return DSM_VALUE_RFI_ENABLE;
+ }
+
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
+
+ /* default behaviour is disabled */
+ return DSM_VALUE_RFI_DISABLE;
+}
+
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
+ u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD);
+ DSM_FUNC_DISABLE_SRD,
+ &iwl_guid, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_SRD_MAX)
+ else if (value >= DSM_VALUE_SRD_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, ret=%d\n",
- ret);
+ "DSM function DISABLE_SRD return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ else if (value == DSM_VALUE_SRD_PASSIVE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
return DSM_VALUE_SRD_PASSIVE;
- } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ } else if (value == DSM_VALUE_SRD_DISABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: disabling SRD\n");
return DSM_VALUE_SRD_DISABLE;
@@ -1145,7 +1229,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
u8 ret;
int cmd_ret;
- struct iwl_lari_config_change_cmd cmd = {};
+ struct iwl_lari_config_change_cmd_v2 cmd = {};
if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
cmd.config_bitmap |=
@@ -1163,11 +1247,18 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
/* apply more config masks here */
if (cmd.config_bitmap) {
- IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n");
+ size_t cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw,
+ REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE, 1) == 2 ?
+ sizeof(struct iwl_lari_config_change_cmd_v2) :
+ sizeof(struct iwl_lari_config_change_cmd_v1);
+ IWL_DEBUG_RADIO(mvm,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
+ le32_to_cpu(cmd.config_bitmap));
cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
- 0, sizeof(cmd), &cmd);
+ 0, cmd_size, &cmd);
if (cmd_ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
@@ -1209,6 +1300,11 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
}
+
+static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+{
+ return DSM_VALUE_RFI_DISABLE;
+}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@@ -1312,8 +1408,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret)
return ret;
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
-
mvm->rfkill_safe_init_done = false;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
@@ -1547,6 +1641,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_ftm_initiator_smooth_config(mvm);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
+ if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ iwl_rfi_send_config_cmd(mvm, NULL);
+ }
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9078fcb5286c..fd5e08961651 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1289,6 +1289,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
struct ieee80211_vif *csa_vif;
@@ -1304,6 +1305,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr =
&beacon_v5->beacon_notify_hdr;
+ if (unlikely(pkt_len < sizeof(*beacon_v5)))
+ return;
+
mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0;
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
@@ -1314,6 +1318,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate));
} else {
+ if (unlikely(pkt_len < sizeof(*beacon)))
+ return;
+
mvm->ibss_manager = beacon->ibss_mgr_status != 0;
status = le32_to_cpu(beacon->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
@@ -1419,12 +1426,13 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb;
u32 size = le32_to_cpu(sb->byte_count);
- if (size == 0)
+ if (size == 0 || pkt_len < struct_size(sb, data, size))
return;
skb = alloc_skb(size, GFP_ATOMIC);
@@ -1460,14 +1468,10 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
- int len = iwl_rx_packet_payload_len(pkt);
u32 id = le32_to_cpu(notif->mac_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
- if (WARN_ON_ONCE(len < sizeof(*notif)))
- return;
-
IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
notif->noa_active, notif->csa_counter);
@@ -1514,12 +1518,8 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
struct ieee80211_vif *csa_vif, *vif;
struct iwl_mvm_vif *mvmvif;
- int len = iwl_rx_packet_payload_len(pkt);
u32 id_n_color, csa_id, mac_id;
- if (WARN_ON_ONCE(len < sizeof(*notif)))
- return;
-
id_n_color = le32_to_cpu(notif->id_and_color);
mac_id = id_n_color & FW_CTXT_ID_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index da32937ba9a7..baf7404c137d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -260,7 +260,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
int ret;
bool changed;
const struct ieee80211_regdomain *r =
- rtnl_dereference(mvm->hw->wiphy->regd);
+ wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd);
if (!r)
return -ENOENT;
@@ -282,7 +282,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
/* update cfg80211 if the regdomain was changed */
if (changed)
- ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
else
ret = 0;
@@ -472,6 +472,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -816,8 +821,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
rcu_read_lock();
do {
while (likely(!mvmtxq->stopped &&
- (mvm->trans->system_pm_mode ==
- IWL_PLAT_PM_MODE_DISABLED))) {
+ !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
if (!skb) {
@@ -1368,15 +1372,13 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
{
- struct iwl_mvm *mvm;
struct iwl_mvm_vif *mvmvif;
struct ieee80211_vif *vif;
mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
- mvm = mvmvif->mvm;
- iwl_mvm_abort_channel_switch(mvm->hw, vif);
+ /* Trigger disconnect (should clear the CSA state) */
ieee80211_chswitch_done(vif, false);
}
@@ -2005,9 +2007,21 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u32 flags;
int i;
+ const struct ieee80211_sta_he_cap *own_he_cap = NULL;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ const struct ieee80211_supported_band *sband;
rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band];
+ own_he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
if (IS_ERR_OR_NULL(sta)) {
rcu_read_unlock();
@@ -2194,6 +2208,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
}
+ if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN))
+ flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED;
+
if (vif->bss_conf.nontransmitted) {
flags |= STA_CTXT_HE_REF_BSSID_VALID;
ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
@@ -2404,12 +2422,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update power mode\n");
}
- if (changes & BSS_CHANGED_TXPOWER) {
- IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
- bss_conf->txpower);
- iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
- }
-
if (changes & BSS_CHANGED_CQM) {
IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
/* reset cqm events tracking */
@@ -2641,12 +2653,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
IWL_WARN(mvm, "Failed updating beacon data\n");
- if (changes & BSS_CHANGED_TXPOWER) {
- IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
- bss_conf->txpower);
- iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
- }
-
if (changes & BSS_CHANGED_FTM_RESPONDER) {
int ret = iwl_mvm_ftm_start_responder(mvm, vif);
@@ -2686,6 +2692,12 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON_ONCE(1);
}
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+ }
+
mutex_unlock(&mvm->mutex);
}
@@ -3009,6 +3021,39 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
mvmvif->he_ru_2mhz_block = !iter_data.tolerated;
}
+static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!mvm->cca_40mhz_workaround)
+ return;
+
+ /* decrement and check that we reached zero */
+ mvm->cca_40mhz_workaround--;
+ if (mvm->cca_40mhz_workaround)
+ return;
+
+ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(vif));
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)he_cap;
+
+ he->he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3048,6 +3093,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* No need to make sure deferred TX indication is off since the
* worker will already remove it if it was on
*/
+
+ /*
+ * Additionally, reset the 40 MHz capability if we disconnected
+ * from the AP now.
+ */
+ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
}
mutex_lock(&mvm->mutex);
@@ -3389,6 +3440,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
+ if (keyidx == 6 || keyidx == 7)
+ rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6],
+ key);
+
if ((vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP) && !sta) {
/*
@@ -3497,6 +3552,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case DISABLE_KEY:
+ if (keyidx == 6 || keyidx == 7)
+ RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6],
+ NULL);
+
ret = -ENOENT;
for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
if (mvmvif->ap_early_keys[i] == key) {
@@ -4194,6 +4253,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
iwl_mvm_binding_remove_vif(mvm, vif);
out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
mvmvif->phy_ctxt = NULL;
iwl_mvm_power_update_mac(mvm);
}
@@ -4628,7 +4690,8 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (mvmvif->csa_failed)
goto out_unlock;
- IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id);
+ IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d mode = %d\n",
+ mvmvif->id, chsw->count, chsw->block_tx);
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
@@ -4645,7 +4708,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
if (drop) {
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm,
- iwl_mvm_flushable_queues(mvm) & queues, 0);
+ iwl_mvm_flushable_queues(mvm) & queues);
mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
@@ -4663,7 +4726,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
continue;
if (drop)
- iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
@@ -4945,6 +5008,34 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_mlme_event *mlme)
+{
+ if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED ||
+ mlme->status == MLME_TIMEOUT)) {
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
+ NULL);
+ return;
+ }
+
+ if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED ||
+ mlme->status == MLME_TIMEOUT)) {
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
+ NULL);
+ return;
+ }
+
+ if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) {
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_DEASSOC,
+ NULL);
+ return;
+ }
+}
+
static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
@@ -4959,6 +5050,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+ if (iwl_trans_dbg_ini_valid(mvm->trans)) {
+ iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme);
+ return;
+ }
+
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ed0e8b751737..0a963d01b825 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -419,6 +419,10 @@ struct iwl_mvm_vif {
/* 26-tone RU OFDMA transmissions should be blocked */
bool he_ru_2mhz_block;
+
+ struct {
+ struct ieee80211_key_conf __rcu *keys[2];
+ } bcn_prot;
};
static inline struct iwl_mvm_vif *
@@ -796,6 +800,8 @@ struct iwl_mvm {
bool hw_registered;
bool rfkill_safe_init_done;
+ u8 cca_40mhz_workaround;
+
u32 ampdu_ref;
bool ampdu_toggle;
@@ -887,8 +893,12 @@ struct iwl_mvm {
/* last smart fifo state that was successfully sent to firmware */
enum iwl_sf_state sf_state;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /*
+ * Leave this pointer outside the ifdef below so that it can be
+ * assigned without ifdef in the source code.
+ */
struct dentry *debugfs_dir;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 dbgfs_sram_offset, dbgfs_sram_len;
u32 dbgfs_prph_reg_addr;
bool disable_power_off;
@@ -1471,10 +1481,9 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
-int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
- u16 tids, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
@@ -1547,6 +1556,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
*/
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
@@ -1692,12 +1704,11 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#else
-static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
- struct dentry *dbgfs_dir)
+static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
}
static inline void
@@ -1898,7 +1909,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
-void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
@@ -1995,6 +2005,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 size);
void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
#define MVM_TCM_PERIOD_MSEC 500
@@ -2029,6 +2040,10 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
+ struct iwl_rfi_lut_entry *rfi_table);
+struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
switch (band) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index abb8c1088c2f..7fb4e618f76e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -545,7 +545,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
}
- retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
kfree(regd);
return retval;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 98f62d78cf9c..ebed82c590e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -146,6 +146,70 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
+static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_vif *vif;
+
+ if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+ return;
+
+ vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
+ if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!vif->bss_conf.chandef.chan ||
+ vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ return;
+
+ if (!vif->bss_conf.assoc)
+ return;
+
+ /* this shouldn't happen *again*, ignore it */
+ if (mvm->cca_40mhz_workaround)
+ return;
+
+ /*
+ * We'll decrement this on disconnect - so set to 2 since we'll
+ * still have to disconnect from the current AP first.
+ */
+ mvm->cca_40mhz_workaround = 2;
+
+ /*
+ * This capability manipulation isn't really ideal, but it's the
+ * easiest choice - otherwise we'd have to do some major changes
+ * in mac80211 to support this, which isn't worth it. This does
+ * mean that userspace may have outdated information, but that's
+ * actually not an issue at all.
+ */
+ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(vif));
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)he_cap;
+
+ WARN_ON(!he->has_he);
+ WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+ he->he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ ieee80211_disconnect(vif, true);
+}
+
/**
* enum iwl_rx_handler_context context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@@ -169,15 +233,21 @@ enum iwl_rx_handler_context {
* @fn: the function is called when notification is received
*/
struct iwl_rx_handlers {
- u16 cmd_id;
+ u16 cmd_id, min_size;
enum iwl_rx_handler_context context;
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
-#define RX_HANDLER(_cmd_id, _fn, _context) \
- { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
-#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
- { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
+#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \
+ { .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
+#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
+#define RX_HANDLER(_cmd_id, _fn, _context, _struct) \
+ { .cmd_id = _cmd_id, .fn = _fn, \
+ .context = _context, .min_size = sizeof(_struct), }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \
+ .context = _context, .min_size = sizeof(_struct), }
/*
* Handlers for fw notifications
@@ -187,85 +257,107 @@ struct iwl_rx_handlers {
* The handler can be one from three contexts, see &iwl_rx_handler_context
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
- RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
- RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
+ struct iwl_mvm_tx_resp),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_ba_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
- iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
+ iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
+ struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
- RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+ RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
- iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
+ iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
+ struct iwl_ba_window_status_notif),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
- RX_HANDLER_SYNC),
+ RX_HANDLER_SYNC, struct iwl_time_event_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
- iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
+ iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_session_prot_notif),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
- RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_eosp_notification),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
- iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+ struct iwl_lmac_scan_complete_notif),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_lmac_scan_complete_notif,
- RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
- RX_HANDLER_SYNC),
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
+ RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
+ iwl_mvm_rx_scan_match_found,
+ RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
- iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+ iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+ struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
- RX_HANDLER_SYNC),
+ RX_HANDLER_SYNC, struct iwl_card_state_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
- RX_HANDLER_SYNC),
+ RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
- RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
+ struct iwl_error_resp),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
- iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
- RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
- RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
- iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
+ struct iwl_uapsd_misbehaving_ap_notif),
+ RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
- iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
+ iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
+ struct ct_kill_notif),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_tdls_channel_switch_notif),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
- RX_HANDLER_SYNC),
+ RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
- iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED),
+ iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_ftm_responder_stats),
- RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
- iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF,
- iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+ iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
+ iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
- iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
+ iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
+ struct iwl_mfu_assert_dump_notif),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
- iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
+ iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
+ struct iwl_stored_beacon_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
- iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
+ iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
+ struct iwl_mu_group_mgmt_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
- iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
+ iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_pm_state_notification),
RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
iwl_mvm_probe_resp_data_notif,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_probe_resp_data_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF,
iwl_mvm_channel_switch_noa_notif,
- RX_HANDLER_SYNC),
+ RX_HANDLER_SYNC, struct iwl_channel_switch_noa_notif),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
+ iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_datapath_monitor_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -410,6 +502,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+ HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -552,6 +645,44 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
+static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ ret = iwl_run_init_mvm_ucode(mvm);
+
+ if (ret && ret != -ERFKILL)
+ iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
+
+ if (!iwlmvm_mod_params.init_dbg || !ret)
+ iwl_mvm_stop_device(mvm);
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+ return ret;
+}
+
+static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
+ ret = iwl_mvm_mac_setup_register(mvm);
+ if (ret)
+ return ret;
+ mvm->hw_registered = true;
+
+ iwl_mvm_dbgfs_register(mvm);
+
+ return 0;
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -773,47 +904,40 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_free;
- mutex_lock(&mvm->mutex);
- err = iwl_run_init_mvm_ucode(mvm);
- if (err && err != -ERFKILL)
- iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
- if (!iwlmvm_mod_params.init_dbg || !err)
- iwl_mvm_stop_device(mvm);
- mutex_unlock(&mvm->mutex);
- if (err < 0) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
-
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
goto out_free;
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
- err = iwl_mvm_mac_setup_register(mvm);
- if (err)
- goto out_free;
- mvm->hw_registered = true;
-
min_backoff = iwl_mvm_min_backoff(mvm);
iwl_mvm_thermal_initialize(mvm, min_backoff);
- iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
-
if (!iwl_mvm_has_new_rx_stats_api(mvm))
memset(&mvm->rx_stats_v3, 0,
sizeof(struct mvm_statistics_rx_v3));
else
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
- iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+ mvm->debugfs_dir = dbgfs_dir;
+
+ if (iwl_mvm_start_get_nvm(mvm))
+ goto out_thermal_exit;
+
+ if (iwl_mvm_start_post_nvm(mvm))
+ goto out_thermal_exit;
return op_mode;
+ out_thermal_exit:
+ iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@@ -960,6 +1084,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt)
{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
int i;
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
@@ -981,6 +1106,9 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
+ if (unlikely(pkt_len < rx_h->min_size))
+ return;
+
if (rx_h->context == RX_HANDLER_SYNC) {
rx_h->fn(mvm, rxb);
return;
@@ -1020,9 +1148,9 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -1205,6 +1333,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
@@ -1255,7 +1384,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
@@ -1310,6 +1439,15 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm, true);
}
+static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
+}
+
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.async_cb = iwl_mvm_async_cb, \
@@ -1322,7 +1460,8 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
.nic_config = iwl_mvm_nic_config, \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
- .stop = iwl_op_mode_mvm_stop
+ .stop = iwl_op_mode_mvm_stop, \
+ .time_point = iwl_op_mode_mvm_time_point
static const struct iwl_op_mode_ops iwl_mvm_ops = {
IWL_MVM_COMMON_OPS,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
new file mode 100644
index 000000000000..873919048143
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include "mvm.h"
+#include "fw/api/commands.h"
+#include "fw/api/phy-ctxt.h"
+
+/**
+ * DDR needs frequency in units of 16.666MHz, so provide FW with the
+ * frequency values in the adjusted format.
+ */
+const static struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
+ /* LPDDR4 */
+
+ /* frequency 3733MHz */
+ {cpu_to_le16(223), {114, 116, 118, 120, 122,},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 4267MHz */
+ {cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+
+ /* DDR5ePOR */
+
+ /* frequency 4000MHz */
+ {cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 4400MHz */
+ {cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
+
+ /* LPDDR5iPOR */
+
+ /* frequency 5200MHz */
+ {cpu_to_le16(312), {36, 38, 40, 42, 50,},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 6000MHz */
+ {cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 6400MHz */
+ {cpu_to_le16(384), {79, 83, 85, 87, 89, 91, 93,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+};
+
+int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
+{
+ int ret;
+ struct iwl_rfi_config_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, RFI_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+ return -EOPNOTSUPP;
+
+ /* in case no table is passed, use the default one */
+ if (!rfi_table) {
+ memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
+ } else {
+ memcpy(cmd.table, rfi_table, sizeof(cmd.table));
+ /* notify FW the table is not the default one */
+ cmd.oem = 1;
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
+
+ return ret;
+}
+
+struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
+{
+ struct iwl_rfi_freq_table_resp_cmd *resp;
+ int resp_size = sizeof(*resp);
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, RFI_GET_FREQ_TABLE_CMD),
+ .flags = CMD_WANT_SKB,
+ };
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ mutex_unlock(&mvm->mutex);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ return ERR_PTR(-EIO);
+
+ resp = kzalloc(resp_size, GFP_KERNEL);
+ if (!resp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(resp, cmd.resp_pkt->data, resp_size);
+
+ iwl_free_resp(&cmd);
+ return resp;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 490a561c71db..8772b65c9dab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -248,14 +248,13 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct iwl_tlc_config_cmd *cmd)
{
int i;
- unsigned long tmp;
- unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ u16 supp = 0;
+ unsigned long tmp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
- supp = 0;
tmp = sta->supp_rates[sband->band];
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index f0364add85f9..8ef5399ad9be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -20,6 +20,10 @@
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (unlikely(pkt_len < sizeof(mvm->last_phy_info)))
+ return;
memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
mvm->ampdu_ref++;
@@ -874,12 +878,11 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
int i;
- u32 pkt_len = iwl_rx_packet_payload_len(pkt);
- if (WARN_ONCE(pkt_len != sizeof(*notif),
- "Received window status notification of wrong size (%u)\n",
- pkt_len))
- return;
+ BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX);
rcu_read_lock();
for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 4dc7c65a1130..c21736f80c29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -272,7 +272,72 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
rx_status->chain_signal[2] = S8_MIN;
}
-static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 status)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+ u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY);
+ u8 keyid;
+ struct ieee80211_key_conf *key;
+ u32 len = le16_to_cpu(desc->mpdu_len);
+ const u8 *frame = (void *)hdr;
+
+ /*
+ * For non-beacon, we don't really care. But beacons may
+ * be filtered out, and we thus need the firmware's replay
+ * detection, otherwise beacons the firmware previously
+ * filtered could be replayed, or something like that, and
+ * it can filter a lot - though usually only if nothing has
+ * changed.
+ */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+
+ /* good cases */
+ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+ !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
+ return 0;
+
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* what? */
+ if (fwkeyid != 6 && fwkeyid != 7)
+ return -1;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]);
+ if (!key)
+ return -1;
+
+ if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+ return -1;
+
+ /*
+ * See if the key ID matches - if not this may be due to a
+ * switch and the firmware may erroneously report !MIC_OK.
+ */
+ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+ if (keyid != fwkeyid)
+ return -1;
+
+ /* Report status to mac80211 */
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ ieee80211_key_mic_failure(key);
+ else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ ieee80211_key_replay(key);
+
+ return -1;
+}
+
+static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats, u16 phy_info,
struct iwl_rx_mpdu_desc *desc,
u32 pkt_flags, int queue, u8 *crypt_len)
@@ -345,6 +410,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return -1;
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
+ case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
+ return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
default:
/*
* Sometimes we can get frames that were not decrypted
@@ -1567,6 +1634,23 @@ static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
}
}
+struct iwl_rx_sta_csa {
+ bool all_sta_unblocked;
+ struct ieee80211_vif *vif;
+};
+
+static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_rx_sta_csa *rx_sta_csa = data;
+
+ if (mvmsta->vif != rx_sta_csa->vif)
+ return;
+
+ if (mvmsta->disable_tx)
+ rx_sta_csa->all_sta_unblocked = false;
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1682,15 +1766,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_decode_lsig(skb, &phy_data);
- rx_status = IEEE80211_SKB_RXCB(skb);
-
- if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
- le32_to_cpu(pkt->len_n_flags), queue,
- &crypt_len)) {
- kfree_skb(skb);
- return;
- }
-
/*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
@@ -1774,6 +1849,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
}
+ if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
+ le32_to_cpu(pkt->len_n_flags), queue,
+ &crypt_len)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
@@ -1798,10 +1880,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+ struct iwl_rx_sta_csa rx_sta_csa = {
+ .all_sta_unblocked = true,
+ .vif = tx_blocked_vif,
+ };
if (mvmvif->csa_target_freq == rx_status->freq)
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
false);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_rx_get_sta_block_tx,
+ &rx_sta_csa);
+
+ if (rx_sta_csa.all_sta_unblocked) {
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ /* Unblock BCAST / MCAST station */
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+ }
}
rs_update_last_rssi(mvm, mvmsta, rx_status);
@@ -1938,6 +2034,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
+ if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
+ return;
+
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -2067,6 +2166,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data;
+ if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+ return;
+
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
le16_to_cpu(release->nssn),
queue, 0);
@@ -2087,6 +2189,9 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
IWL_BAR_FRAME_RELEASE_TID_MASK);
struct iwl_mvm_baid_data *baid_data;
+ if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+ return;
+
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mvm->baid_map)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 97d2de8f1582..caf87f320094 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1253,14 +1253,16 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) < 12)
+ ADD_STA, 0) < 12) {
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
- /*
- * Fw doesn't use this sta anymore, pending deprecation via HOST API
- * change.
- */
- else
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ SCAN_CFG_CMD, 0) < 5) {
+ /*
+ * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
+ * version 5.
+ */
cfg.bcast_sta_id = 0xff;
+ }
cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
@@ -2854,12 +2856,19 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
.aborted = true,
};
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_uid_status[uid] = 0;
}
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
- if (uid >= 0 && !mvm->fw_restart) {
- ieee80211_sched_scan_stopped(mvm->hw);
+ if (uid >= 0) {
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if (!mvm->fw_restart)
+ ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
@@ -2889,6 +2898,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
.aborted = true,
};
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
ieee80211_scan_completed(mvm->hw, &info);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index dc174410bf9c..3a411bbda5fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
@@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
@@ -3105,11 +3111,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_has_new_tx_api(mvm)) {
if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
- BIT(tid), 0))
+ BIT(tid)))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_wait_txq_empty(mvm->trans, txq_id);
} else {
- if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
}
@@ -3304,7 +3310,8 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
/* verify the key details match the required command's expectations */
if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
- (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
+ keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
(keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
@@ -3353,9 +3360,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
((u64) pn[0] << 40));
}
- IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+ IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
remove_key ? "removing" : "installing",
- igtk_cmd.sta_id);
+ keyconf->keyidx >= 6 ? "B" : "",
+ keyconf->keyidx, igtk_cmd.sta_id);
if (!iwl_mvm_has_new_rx_api(mvm)) {
struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
@@ -3809,7 +3817,7 @@ static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -3823,12 +3831,11 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta;
int i;
- lockdep_assert_held(&mvm->mutex);
+ rcu_read_lock();
/* Block/unblock all the stations of the given mvmvif */
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
if (IS_ERR_OR_NULL(sta))
continue;
@@ -3840,6 +3847,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
}
+ rcu_read_unlock();
+
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 4e1bdf13e5e7..0b012f8c9eb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -999,9 +999,6 @@ void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (!te_data->running)
- return;
-
spin_lock_bh(&mvm->time_event_lock);
id = te_data->id;
spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 507625f96dd7..2a7339b12b13 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -44,7 +44,7 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
iwl_mvm_set_hw_ctkill_state(mvm, false);
}
-void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+static void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
{
/* ignore the notification if we are in test mode */
if (mvm->temperature_test)
@@ -156,12 +156,6 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct ct_kill_notif *notif;
- int len = iwl_rx_packet_payload_len(pkt);
-
- if (WARN_ON_ONCE(len != sizeof(*notif))) {
- IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
- return;
- }
notif = (struct ct_kill_notif *)pkt->data;
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
@@ -267,7 +261,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
if (ret)
- IWL_ERR(mvm, "Getting the temperature timed out\n");
+ IWL_WARN(mvm, "Getting the temperature timed out\n");
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a983c215df31..1ad621d13ad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -263,19 +263,26 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, __le16 fc)
{
- int rate_idx;
+ int rate_idx = -1;
u8 rate_plcp;
u32 rate_flags = 0;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- /* HT rate doesn't make sense for a non data frame */
- WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
- "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
- info->control.rates[0].flags,
- info->control.rates[0].idx);
+ /* info->control is only relevant for non HW rate control */
+ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_data(fc),
+ "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx,
+ le16_to_cpu(fc), mvmsta->sta_state);
+
+ rate_idx = info->control.rates[0].idx;
+ }
- rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(
@@ -305,7 +312,7 @@ static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
- return iwl_mvm_get_tx_rate(mvm, info, sta) |
+ return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
}
@@ -773,6 +780,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
@@ -795,6 +803,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
} else {
if (qos) {
u8 *qc;
@@ -1321,12 +1331,24 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
}
static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
- u32 status)
+ u32 status, __le16 frame_control)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i;
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
+ enum iwl_fw_ini_time_point tp =
+ IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+ if (ieee80211_is_action(frame_control))
+ tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ tp, NULL);
+ return;
+ }
+
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_TX_STATUS);
if (!trig)
@@ -1444,7 +1466,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (skb_freed > 1)
info->flags |= IEEE80211_TX_STAT_ACK;
- iwl_mvm_tx_status_check_trigger(mvm, status);
+ iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
info->status.rates[0].count = tx_resp->failure_frame + 1;
iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
@@ -1628,10 +1650,13 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
+ bool tirgger_timepoint = false;
for (i = 0; i < tx_resp->frame_count; i++) {
u16 fstatus = le16_to_cpu(frame_status[i].status);
-
+ /* In case one frame wasn't transmitted trigger time point */
+ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
+ AGG_TX_STATE_TRANSMITTED);
IWL_DEBUG_TX_REPLY(mvm,
"status %s (0x%04x), try-count (%d) seq (0x%x)\n",
iwl_get_agg_tx_status(fstatus),
@@ -1640,6 +1665,11 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
AGG_TX_STATE_TRY_CNT_POS,
le16_to_cpu(frame_status[i].sequence));
}
+
+ if (tirgger_timepoint)
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
+
}
#else
static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
@@ -1701,7 +1731,8 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int txq, int index,
- struct ieee80211_tx_info *ba_info, u32 rate)
+ struct ieee80211_tx_info *tx_info, u32 rate,
+ bool is_flush)
{
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data = NULL;
@@ -1744,7 +1775,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
* frames because before failing a frame the firmware transmits
* it without aggregation at least once.
*/
- info->flags |= IEEE80211_TX_STAT_ACK;
+ if (!is_flush)
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
/*
@@ -1763,7 +1795,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
- "invalid BA notification: Q %d, tid %d\n",
+ "invalid reclaim request: Q %d, tid %d\n",
tid_data->txq_id, tid);
rcu_read_unlock();
return;
@@ -1778,26 +1810,28 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
freed = 0;
/* pack lq color from tid_data along the reduced txp */
- ba_info->status.status_driver_data[0] =
+ tx_info->status.status_driver_data[0] =
RS_DRV_DATA_PACK(tid_data->lq_color,
- ba_info->status.status_driver_data[0]);
- ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+ tx_info->status.status_driver_data[0]);
+ tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (ieee80211_is_data_qos(hdr->frame_control))
- freed++;
- else
- WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+ if (!is_flush) {
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+ }
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
if (freed == 1) {
info->flags |= IEEE80211_TX_STAT_AMPDU;
- memcpy(&info->status, &ba_info->status,
- sizeof(ba_info->status));
+ memcpy(&info->status, &tx_info->status,
+ sizeof(tx_info->status));
iwl_mvm_hwrate_to_tx_status(rate, info);
}
}
@@ -1808,7 +1842,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
* possible (i.e. first MPDU in the aggregation wasn't acked)
* Still it's important to update RS about sent vs. acked.
*/
- if (skb_queue_empty(&reclaimed_skbs)) {
+ if (!is_flush && skb_queue_empty(&reclaimed_skbs)) {
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
@@ -1818,13 +1852,13 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
- ba_info->band = chanctx_conf->def.chan->band;
- iwl_mvm_hwrate_to_tx_status(rate, ba_info);
+ tx_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, tx_info);
if (!iwl_mvm_has_tlc_offload(mvm)) {
IWL_DEBUG_TX_REPLY(mvm,
"No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
}
}
@@ -1840,6 +1874,7 @@ out:
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
int sta_id, tid, txq, index;
struct ieee80211_tx_info ba_info = {};
struct iwl_mvm_ba_notif *ba_notif;
@@ -1852,8 +1887,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+ u16 tfd_cnt;
int i;
+ if (unlikely(sizeof(*ba_res) > pkt_len))
+ return;
+
sta_id = ba_res->sta_id;
ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
@@ -1862,8 +1901,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
- if (!le16_to_cpu(ba_res->tfd_cnt))
- goto out;
+ tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+ if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len)
+ return;
rcu_read_lock();
@@ -1878,7 +1918,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
*/
/* Free per TID */
- for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
+ for (i = 0; i < tfd_cnt; i++) {
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
@@ -1893,14 +1933,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
- le32_to_cpu(ba_res->tx_rate));
+ le32_to_cpu(ba_res->tx_rate), false);
}
if (mvmsta)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-out:
+
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
@@ -1936,7 +1976,7 @@ out:
rcu_read_unlock();
iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags);
+ tid_data->rate_n_flags, false);
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
@@ -1960,7 +2000,7 @@ out:
* 2) flush the Tx path
* 3) wait for the transport queues to be empty
*/
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
{
int ret;
struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
@@ -1969,29 +2009,89 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
};
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
-
- ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
sizeof(flush_cmd), &flush_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
return ret;
}
-int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
- u16 tids, u32 flags)
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
{
int ret;
+ struct iwl_tx_path_flush_cmd_rsp *rsp;
struct iwl_tx_path_flush_cmd flush_cmd = {
.sta_id = cpu_to_le32(sta_id),
.tid_mask = cpu_to_le16(tids),
};
+ struct iwl_host_cmd cmd = {
+ .id = TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ };
+
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
- ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
- sizeof(flush_cmd), &flush_cmd);
- if (ret)
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+ cmd.flags |= CMD_WANT_SKB;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ sta_id, tids);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+ }
+
+ if (cmd.flags & CMD_WANT_SKB) {
+ int i;
+ int num_flushed_queues;
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ rsp = (void *)cmd.resp_pkt->data;
+
+ if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
+ "sta_id %d != rsp_sta_id %d",
+ sta_id, le16_to_cpu(rsp->sta_id))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+ if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+ "num_flushed_queues %d", num_flushed_queues)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct ieee80211_tx_info tx_info = {};
+ struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+ int tid = le16_to_cpu(queue_info->tid);
+ int read_before = le16_to_cpu(queue_info->read_before_flush);
+ int read_after = le16_to_cpu(queue_info->read_after_flush);
+ int queue_num = le16_to_cpu(queue_info->queue_num);
+
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "tid %d queue_id %d read-before %d read-after %d\n",
+ tid, queue_num, read_before, read_after);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
+ &tx_info, 0, true);
+ }
+free_rsp:
+ iwl_free_resp(&cmd);
+ }
return ret;
}
@@ -2004,10 +2104,10 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
offsetof(struct iwl_mvm_sta, sta_id));
if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff);
if (internal)
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk);
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index ee2e0cb47584..b6b481ff1518 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -45,8 +45,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
if (cmd->flags & CMD_WANT_SKB)
return ret;
- /* Silently ignore failures if RFKILL is asserted */
- if (!ret || ret == -ERFKILL)
+ /*
+ * Silently ignore failures if RFKILL is asserted or
+ * we are in suspend\resume process
+ */
+ if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
return 0;
return ret;
}
@@ -496,18 +499,33 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
- u32 error;
+ u32 error, data1;
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ error = UMAG_SB_CPU_2_STATUS;
+ data1 = UMAG_SB_CPU_1_STATUS;
+ } else if (mvm->trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ error = SB_CPU_2_STATUS;
+ data1 = SB_CPU_1_STATUS;
+ } else {
+ return;
+ }
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
IWL_ERR(trans, "IML/ROM dump:\n");
if (error & 0xFFFF0000)
- IWL_ERR(trans, "IML/ROM SYSASSERT:\n");
+ IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
- iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS));
+ iwl_read_umac_prph(trans, data1));
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+ iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
@@ -525,8 +543,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_mvm_dump_iml_error_log(mvm);
+ iwl_mvm_dump_iml_error_log(mvm);
iwl_fw_error_print_fseq_regs(&mvm->fwrt);
}
@@ -832,6 +849,36 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
return bss_iter_data.vif;
}
+struct iwl_bss_find_iter_data {
+ struct ieee80211_vif *vif;
+ u32 macid;
+};
+
+static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_find_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->id == data->macid)
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
+{
+ struct iwl_bss_find_iter_data data = {
+ .macid = macid,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_find_iface_iterator, &data);
+
+ return data.vif;
+}
+
struct iwl_sta_iter_data {
bool assoc;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 36bf414a388a..8fba190e84cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
@@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /*
- * The firmware initializes this again later (to a smaller
- * value), but for the boot process initialize the LTR to
- * ~250 usec.
- */
- u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
@@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
@@ -286,12 +298,18 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
- &trans_pcie->pnvm_dram);
- if (ret < 0) {
- IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
- ret);
- return ret;
+ /* only allocate the DRAM if not allocated yet */
+ if (!trans->pnvm_loaded) {
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+ &trans_pcie->pnvm_dram);
+ if (ret < 0) {
+ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
+ ret);
+ return ret;
+ }
}
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 965982612e74..314fec4a89ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -478,44 +478,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
- {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
- {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x2726, 0x0078, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x2726, 0x007C, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
- {IWL_PCI_DEVICE(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0)},
- {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
- {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
- {IWL_PCI_DEVICE(0x2726, 0x2074, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x2726, 0x4070, iwlax201_cfg_snj_hr_b0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
- {IWL_PCI_DEVICE(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long)},
- {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
- {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
- {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
- {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
- {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+/* So devices */
+ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
+ {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7E80, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
#endif /* CONFIG_IWLMVM */
@@ -524,16 +497,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _no_160, _cores, _cfg, _name) \
+ _rf_id, _no_160, _cores, _cdb, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \
.no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
- .mac_step = _mac_step }
+ .mac_step = _mac_step, .cdb = _cdb }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- _cfg, _name)
+ IWL_CFG_NO_CDB, _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -555,15 +528,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
- /* QnJ with Hr */
- IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
-
- /* SnJ with HR*/
- IWL_DEV_INFO(0x2726, 0x0244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
- IWL_DEV_INFO(0x2726, 0x1651, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
- IWL_DEV_INFO(0x2726, 0x1652, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
- IWL_DEV_INFO(0x2726, 0x4244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
-
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
@@ -629,101 +593,137 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ /* So with HR */
+ IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+ IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+ IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL),
+ IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+ IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+ IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL),
+
+ /* SnJ with HR */
+ IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x00B4, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL),
+ IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
+ IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
+
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9461_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9461_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9462_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9462_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9560_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS,
+ IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@@ -731,176 +731,176 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QnJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@@ -908,40 +908,139 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
+
+/* QnJ with Hr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+
+/* SnJ with Jf */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_a0_jf_b0, iwl9560_name),
+
+/* SnJ with Hr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_hr_b0, iwl_ax101_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_snj_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_ma_a0_hr_b0, iwl_ax201_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma_a0_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
+ iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma_a0_mr_a0, iwl_ma_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_snj_a0_mr_a0, iwl_ma_name),
+/* So with Hr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
#endif /* CONFIG_IWLMVM */
};
@@ -991,6 +1090,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(dev_info->rf_type == (u16)IWL_CFG_ANY ||
dev_info->rf_type ==
CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id)) &&
+ (dev_info->cdb == IWL_CFG_NO_CDB ||
+ CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id)) &&
(dev_info->rf_id == (u8)IWL_CFG_ANY ||
dev_info->rf_id ==
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device)) &&
@@ -1005,6 +1106,16 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /*
+ * Workaround for problematic SnJ device: sometimes when
+ * certain RF modules are connected to SnJ, the device ID
+ * changes to QnJ's ID. So we are using QnJ's trans_cfg until
+ * here. But if we detect that the MAC type is actually SnJ,
+ * we should switch to it here to avoid problems later.
+ */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ)
+ iwl_trans->trans_cfg = &iwl_so_trans_cfg;
+
#if IS_ENABLED(CONFIG_IWLMVM)
/*
* special-case 7265D, it has the same PCI IDs.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a528d3d99c5a..d9688c7bed07 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -363,7 +363,6 @@ struct iwl_trans_pcie {
bool ucode_write_complete;
bool sx_complete;
wait_queue_head_t ucode_write_waitq;
- wait_queue_head_t wait_command_queue;
wait_queue_head_t sx_waitq;
u8 def_rx_queue;
@@ -418,8 +417,7 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
- struct msix_entry *entry)
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
{
/*
* Before sending the interrupt the HW disables it to prevent
@@ -429,7 +427,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
}
static inline struct iwl_trans *
@@ -462,7 +460,6 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
@@ -569,9 +566,9 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
@@ -601,9 +598,9 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
@@ -762,7 +759,6 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -800,4 +796,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
bool test, bool reset);
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 37bbd9a07f36..42426e25cac6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -207,10 +207,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
if (!rxq->need_update)
continue;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq->need_update = false;
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -255,7 +255,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
while (rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
@@ -269,16 +269,16 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
/*
* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8.
*/
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -301,7 +301,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
__le32 *bd = (__le32 *)rxq->bd;
/* The overwritten rxb must be a used one */
@@ -320,14 +320,14 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -433,28 +433,28 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
while (1) {
unsigned int offset;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
return;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -466,19 +466,19 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -514,10 +514,10 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
/* If we were scheduled - there is at least one request */
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* swap out the rba->rbd_empty to a local list */
list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
while (pending) {
int i;
@@ -577,21 +577,21 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
pending);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* add the allocated rbds to the allocator allocated list */
list_splice_tail(&local_allocated, &rba->rbd_allocated);
/* get more empty RBDs for current pending requests */
list_splice_tail_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
atomic_inc(&rba->req_ready);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* return unused rbds to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
}
@@ -834,8 +834,11 @@ err:
trans_pcie->base_rb_stts_dma = 0;
}
kfree(trans_pcie->rx_pool);
+ trans_pcie->rx_pool = NULL;
kfree(trans_pcie->global_table);
+ trans_pcie->global_table = NULL;
kfree(trans_pcie->rxq);
+ trans_pcie->rxq = NULL;
return ret;
}
@@ -844,7 +847,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
- unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@@ -862,7 +864,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
/* Stop Rx DMA */
@@ -899,7 +901,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -913,7 +915,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
- unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@@ -934,7 +935,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
/* Stop Rx DMA */
@@ -992,7 +993,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Enable the relevant rx queues */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -1008,10 +1009,76 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+
+static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
- WARN_ON(1);
- return 0;
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, rxq->id);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, 0);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
}
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
@@ -1030,12 +1097,12 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
cancel_work_sync(&rba->rx_alloc);
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
INIT_LIST_HEAD(&rba->rbd_allocated);
INIT_LIST_HEAD(&rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
@@ -1046,7 +1113,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
/*
* Set read write pointer to reflect that we have processed
* and used all buffers, but have not restocked the Rx queue
@@ -1062,11 +1129,27 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rx_init_rxb_lists(rxq);
- if (!rxq->napi.poll)
+ if (!rxq->napi.poll) {
+ int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
+
+ if (trans_pcie->msix_enabled) {
+ poll = iwl_pcie_napi_poll_msix;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX &&
+ i == 0)
+ poll = iwl_pcie_napi_poll_msix_shared;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+ i == 1)
+ poll = iwl_pcie_napi_poll_msix_shared;
+ }
+
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- iwl_pcie_dummy_napi_poll, 64);
+ poll, NAPI_POLL_WEIGHT);
+ napi_enable(&rxq->napi);
+ }
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
/* move the pool to the default queue and allocator ownerships */
@@ -1108,9 +1191,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
- spin_lock(&trans_pcie->rxq->lock);
+ spin_lock_bh(&trans_pcie->rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
- spin_unlock(&trans_pcie->rxq->lock);
+ spin_unlock_bh(&trans_pcie->rxq->lock);
return 0;
}
@@ -1163,8 +1246,10 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
iwl_pcie_free_rxq_dma(trans, rxq);
- if (rxq->napi.poll)
+ if (rxq->napi.poll) {
+ napi_disable(&rxq->napi);
netif_napi_del(&rxq->napi);
+ }
}
kfree(trans_pcie->rx_pool);
kfree(trans_pcie->global_table);
@@ -1417,16 +1502,15 @@ out_err:
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct napi_struct *napi;
struct iwl_rxq *rxq;
- u32 r, i, count = 0;
+ u32 r, i, count = 0, handled = 0;
bool emergency = false;
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
- return;
+ return budget;
rxq = &trans_pcie->rxq[queue];
@@ -1444,7 +1528,7 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
- while (i != r) {
+ while (i != r && ++handled < budget) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
/* number of RBDs still waiting for page allocation */
@@ -1545,18 +1629,9 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- napi = &rxq->napi;
- if (napi->poll) {
- napi_gro_flush(napi, false);
-
- if (napi->rx_count) {
- netif_receive_skb_list(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
- }
- }
-
iwl_pcie_rxq_restock(trans, rxq);
+
+ return handled;
}
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
@@ -1576,20 +1651,25 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
+ struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
+ if (WARN_ONCE(!rxq, "Got MSI-X interrupt before we have Rx queues"))
+ return IRQ_NONE;
+
lock_map_acquire(&trans->sync_cmd_lockdep_map);
local_bh_disable();
- iwl_pcie_rx_handle(trans, entry->entry);
+ if (napi_schedule_prep(&rxq->napi))
+ __napi_schedule(&rxq->napi);
+ else
+ iwl_pcie_clear_irq(trans, entry->entry);
local_bh_enable();
- iwl_pcie_clear_irq(trans, entry);
-
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
@@ -1600,7 +1680,6 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1612,7 +1691,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMG_PS_CTRL_VAL_RESET_REQ))) {
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
return;
}
@@ -1627,7 +1706,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
}
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
@@ -1742,7 +1821,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
@@ -1757,10 +1836,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
/* dram interrupt table not set yet,
* use legacy interrupt.
@@ -1797,7 +1877,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
*/
if (test_bit(STATUS_INT_ENABLED, &trans->status))
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_NONE;
}
@@ -1808,7 +1888,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* already raised an interrupt.
*/
IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
goto out;
}
@@ -1829,7 +1909,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1949,7 +2029,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rx++;
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
@@ -1974,20 +2057,22 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
inta & ~trans_pcie->inta_mask);
}
- spin_lock(&trans_pcie->irq_lock);
- /* only Re-enable all interrupt if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- /* we are loading the firmware, enable FH_TX interrupt only */
- else if (handled & CSR_INT_BIT_FH_TX)
- iwl_enable_fw_load_int(trans);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(trans);
- /* Re-enable the ALIVE / Rx interrupt if it occurred */
- else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ if (!polling) {
+ spin_lock_bh(&trans_pcie->irq_lock);
+ /* only Re-enable all interrupt if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ /* we are loading the firmware, enable FH_TX interrupt only */
+ else if (handled & CSR_INT_BIT_FH_TX)
+ iwl_enable_fw_load_int(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ }
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -2049,7 +2134,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
if (!trans_pcie->ict_tbl)
return;
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -2067,7 +2152,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
@@ -2075,9 +2160,9 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
irqreturn_t iwl_pcie_isr(int irq, void *data)
@@ -2109,10 +2194,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
@@ -2120,7 +2206,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
@@ -2146,14 +2232,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
inta_fh & MSIX_FH_INT_CAUSES_Q1) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 1);
+ if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[1].napi);
+ }
local_bh_enable();
}
@@ -2248,7 +2340,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
wake_up(&trans_pcie->fw_reset_waitq);
}
- iwl_pcie_clear_irq(trans, entry);
+ if (!polling)
+ iwl_pcie_clear_irq(trans, entry->entry);
lock_map_release(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c602b815dcc2..497ef3405da3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -10,6 +10,8 @@
#include "internal.h"
#include "fw/dbg.h"
+#define FW_RESET_TIMEOUT (HZ / 5)
+
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -104,7 +106,7 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
/* wait 200ms */
ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
- trans_pcie->fw_reset_done, HZ / 5);
+ trans_pcie->fw_reset_done, FW_RESET_TIMEOUT);
if (!ret)
IWL_ERR(trans,
"firmware didn't ACK the reset - continue anyway\n");
@@ -198,6 +200,10 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
+ NULL);
+
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
@@ -213,9 +219,9 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
iwl_pcie_gen2_apm_init(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
iwl_op_mode_nic_config(trans->op_mode);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 285e0d586021..1bf4c37fe960 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -511,9 +511,9 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
int ret;
/* nic_init */
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
ret = iwl_pcie_apm_init(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
if (ret)
return ret;
@@ -523,11 +523,15 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
iwl_op_mode_nic_config(trans->op_mode);
/* Allocate the RX queue, or reset if it is already allocated */
- iwl_pcie_rx_init(trans);
+ ret = iwl_pcie_rx_init(trans);
+ if (ret)
+ return ret;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_tx_init(trans))
+ if (iwl_pcie_tx_init(trans)) {
+ iwl_pcie_rx_free(trans);
return -ENOMEM;
+ }
if (trans->trans_cfg->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
@@ -636,17 +640,16 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
int ret;
trans_pcie->ucode_write_complete = false;
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return -EIO;
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
byte_cnt);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
trans_pcie->ucode_write_complete, 5 * HZ);
@@ -1376,6 +1379,10 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
+ NULL);
+
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
@@ -1966,13 +1973,12 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
- unsigned long *flags)
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+ spin_lock_bh(&trans_pcie->reg_lock);
if (trans_pcie->cmd_hold_nic_awake)
goto out;
@@ -2057,7 +2063,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
}
err:
- spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+ spin_unlock_bh(&trans_pcie->reg_lock);
return false;
}
@@ -2070,8 +2076,7 @@ out:
return true;
}
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
- unsigned long *flags)
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2095,21 +2100,21 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
out:
- spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+ spin_unlock_bh(&trans_pcie->reg_lock);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
- unsigned long flags;
int offs = 0;
u32 *vals = buf;
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
addr + 4 * offs);
@@ -2118,14 +2123,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
HBUS_TARG_MEM_RDAT);
offs++;
- /* calling ktime_get is expensive so
- * do it once in 128 reads
- */
- if (offs % 128 == 0 && ktime_after(ktime_get(),
- timeout))
+ if (time_after(jiffies, end)) {
+ resched = true;
break;
+ }
}
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
+
+ if (resched)
+ cond_resched();
} else {
return -EBUSY;
}
@@ -2137,16 +2143,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
- unsigned long flags;
int offs, ret = 0;
const u32 *vals = buf;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
+ if (iwl_trans_grab_nic_access(trans)) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT,
vals ? vals[offs] : 0);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
} else {
ret = -EBUSY;
}
@@ -2294,11 +2299,10 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ spin_lock_bh(&trans_pcie->reg_lock);
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ spin_unlock_bh(&trans_pcie->reg_lock);
}
static const char *get_csr_string(int cmd)
@@ -2943,11 +2947,10 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data)
{
u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
- unsigned long flags;
__le32 *val;
int i;
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return 0;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
@@ -2965,7 +2968,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
i));
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
*data = iwl_fw_error_next_data(*data);
@@ -2979,10 +2982,9 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
{
u32 buf_size_in_dwords = (monitor_len >> 2);
u32 *buffer = (u32 *)fw_mon_data->data;
- unsigned long flags;
u32 i;
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return 0;
iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
@@ -2991,7 +2993,7 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
MON_DMARB_RD_DATA_ADDR);
iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
return monitor_len;
}
@@ -3284,16 +3286,29 @@ static struct iwl_trans_dump_data
return dump_data;
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
{
- return 0;
+ if (enable)
+ iwl_enable_interrupts(trans);
+ else
+ iwl_disable_interrupts(trans);
}
-static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
+ u32 inta_addr, sw_err_bit;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->msix_enabled) {
+ inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ } else {
+ inta_addr = CSR_INT;
+ sw_err_bit = CSR_INT_BIT_SW_ERR;
+ }
+
+ iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-#endif /* CONFIG_PM_SLEEP */
#define IWL_TRANS_COMMON_OPS \
.op_mode_leave = iwl_trans_pcie_op_mode_leave, \
@@ -3314,25 +3329,17 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
- .sync_nmi = iwl_trans_pcie_sync_nmi
-
-#ifdef CONFIG_PM_SLEEP
-#define IWL_TRANS_PM_OPS \
- .suspend = iwl_trans_pcie_suspend, \
- .resume = iwl_trans_pcie_resume,
-#else
-#define IWL_TRANS_PM_OPS
-#endif /* CONFIG_PM_SLEEP */
+ .interrupts = iwl_trans_pci_interrupts, \
+ .sync_nmi = iwl_trans_pcie_sync_nmi \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
- IWL_TRANS_PM_OPS
.start_hw = iwl_trans_pcie_start_hw,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .send_cmd = iwl_trans_pcie_send_hcmd,
+ .send_cmd = iwl_pcie_enqueue_hcmd,
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_txq_reclaim,
@@ -3353,13 +3360,12 @@ static const struct iwl_trans_ops trans_ops_pcie = {
static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
IWL_TRANS_COMMON_OPS,
- IWL_TRANS_PM_OPS
.start_hw = iwl_trans_pcie_start_hw,
.fw_alive = iwl_trans_pcie_gen2_fw_alive,
.start_fw = iwl_trans_pcie_gen2_start_fw,
.stop_device = iwl_trans_pcie_gen2_stop_device,
- .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+ .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
.tx = iwl_txq_gen2_tx,
.reclaim = iwl_txq_reclaim,
@@ -3494,9 +3500,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
- /* Initialize the wait queue for commands */
- init_waitqueue_head(&trans_pcie->wait_command_queue);
-
init_waitqueue_head(&trans_pcie->sx_waitq);
@@ -3536,48 +3539,3 @@ out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
-
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
- bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
- u32 inta_addr, sw_err_bit;
-
- if (trans_pcie->msix_enabled) {
- inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
- sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
- } else {
- inta_addr = CSR_INT;
- sw_err_bit = CSR_INT_BIT_SW_ERR;
- }
-
- /* if the interrupts were already disabled, there is no point in
- * calling iwl_disable_interrupts
- */
- if (interrupts_enabled)
- iwl_disable_interrupts(trans);
-
- iwl_force_nmi(trans);
- while (time_after(timeout, jiffies)) {
- u32 inta_hw = iwl_read32(trans, inta_addr);
-
- /* Error detected by uCode */
- if (inta_hw & sw_err_bit) {
- /* Clear causes register */
- iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
- break;
- }
-
- mdelay(1);
- }
-
- /* enable interrupts only if there were already enabled before this
- * function to avoid a case were the driver enable interrupts before
- * proper configurations were made
- */
- if (interrupts_enabled)
- iwl_enable_interrupts(trans);
-
- iwl_trans_fw_error(trans);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8757246a90d5..4456abb9a074 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -24,14 +24,13 @@
* failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
-static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
- unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int i, cmd_pos, idx;
@@ -244,11 +243,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ spin_lock(&trans_pcie->reg_lock);
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_txq_inc_wr_ptr(trans, txq);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
@@ -257,124 +256,3 @@ free_dup_buf:
kfree(dup_buf);
return idx;
}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n", cmd_str))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
- cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
- cmd_str, ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- cmd_str);
- ret = -ETIMEDOUT;
-
- iwl_trans_pcie_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (cmd->flags & CMD_ASYNC) {
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
- }
-
- return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
-}
-
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5dda0015522d..381e8f90b6f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
@@ -218,12 +223,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
- unsigned long flags;
-
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ spin_lock(&trans_pcie->reg_lock);
if (txq_id == trans->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ spin_unlock(&trans_pcie->reg_lock);
}
}
@@ -389,13 +392,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
int ch, ret;
u32 mask = 0;
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
goto out;
/* Stop each Tx DMA channel */
@@ -411,10 +413,10 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
out:
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
/*
@@ -571,7 +573,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_scd_deactivate_fifos(trans);
@@ -580,7 +582,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
@@ -674,7 +676,6 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
- unsigned long flags;
int nfreed = 0;
u16 r;
@@ -705,9 +706,10 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
if (txq->read_ptr == txq->write_ptr) {
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ /* BHs are also disabled due to txq->lock */
+ spin_lock(&trans_pcie->reg_lock);
iwl_pcie_clear_cmd_in_flight(trans);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ spin_unlock(&trans_pcie->reg_lock);
}
iwl_txq_progress(txq);
@@ -909,14 +911,13 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
-static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
- unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
@@ -1159,20 +1160,19 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ spin_lock(&trans_pcie->reg_lock);
ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
if (ret < 0) {
idx = ret;
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
- goto out;
+ goto unlock_reg;
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
-
+ unlock_reg:
+ spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1244,7 +1244,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd_id));
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
}
meta->flags = 0;
@@ -1252,142 +1252,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- ret = iwl_pcie_enqueue_hcmd(trans, cmd);
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
- iwl_get_cmd_string(trans, cmd->id));
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n",
- iwl_get_cmd_string(trans, cmd->id)))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
- iwl_get_cmd_string(trans, cmd->id));
-
- cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- iwl_get_cmd_string(trans, cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_get_cmd_string(trans, cmd->id));
- ret = -ETIMEDOUT;
-
- iwl_trans_pcie_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- iwl_trans_pcie_dump_regs(trans);
- IWL_ERR(trans, "FW error in SYNC CMD %s\n",
- iwl_get_cmd_string(trans, cmd->id));
- dump_stack();
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n",
- iwl_get_cmd_string(trans, cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
-{
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (cmd->flags & CMD_ASYNC)
- return iwl_pcie_send_hcmd_async(trans, cmd);
-
- /* We still can fail on RFKILL that can be asserted while we wait */
- return iwl_pcie_send_hcmd_sync(trans, cmd);
-}
-
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta)
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 27eea909e32d..833f43d1ca7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
* idx is bounded by n_window
*/
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, idx));
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
@@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
+ if (!WARN_ON_ONCE(!skb))
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_txq_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
*/
int rd_ptr = txq->read_ptr;
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
@@ -1580,6 +1577,10 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__func__, txq_id, last_to_free,
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ NULL);
goto out;
}
@@ -1725,3 +1726,132 @@ next_queue:
}
}
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ cmd_idx = trans->ops->send_cmd(trans, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_trans_sync_nmi(trans);
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+ !(cmd->flags & CMD_SEND_IN_D3))) {
+ IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
+ return -EHOSTDOWN;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ ret = trans->ops->send_cmd(trans, cmd);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_trans_txq_send_hcmd_sync(trans, cmd);
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
index cff694c25ccc..af1dbdf5617a 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -181,4 +181,5 @@ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void iwl_txq_progress(struct iwl_txq *txq);
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3b3fc7c9c91d..fa7d4c20dc13 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -311,6 +311,12 @@ static struct net_device *hwsim_mon; /* global monitor netdev */
.hw_value = (_freq), \
}
+#define CHAN6G(_freq) { \
+ .band = NL80211_BAND_6GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
static const struct ieee80211_channel hwsim_channels_2ghz[] = {
CHAN2G(2412), /* Channel 1 */
CHAN2G(2417), /* Channel 2 */
@@ -377,6 +383,68 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5925), /* Channel 185 */
};
+static const struct ieee80211_channel hwsim_channels_6ghz[] = {
+ CHAN6G(5955), /* Channel 1 */
+ CHAN6G(5975), /* Channel 5 */
+ CHAN6G(5995), /* Channel 9 */
+ CHAN6G(6015), /* Channel 13 */
+ CHAN6G(6035), /* Channel 17 */
+ CHAN6G(6055), /* Channel 21 */
+ CHAN6G(6075), /* Channel 25 */
+ CHAN6G(6095), /* Channel 29 */
+ CHAN6G(6115), /* Channel 33 */
+ CHAN6G(6135), /* Channel 37 */
+ CHAN6G(6155), /* Channel 41 */
+ CHAN6G(6175), /* Channel 45 */
+ CHAN6G(6195), /* Channel 49 */
+ CHAN6G(6215), /* Channel 53 */
+ CHAN6G(6235), /* Channel 57 */
+ CHAN6G(6255), /* Channel 61 */
+ CHAN6G(6275), /* Channel 65 */
+ CHAN6G(6295), /* Channel 69 */
+ CHAN6G(6315), /* Channel 73 */
+ CHAN6G(6335), /* Channel 77 */
+ CHAN6G(6355), /* Channel 81 */
+ CHAN6G(6375), /* Channel 85 */
+ CHAN6G(6395), /* Channel 89 */
+ CHAN6G(6415), /* Channel 93 */
+ CHAN6G(6435), /* Channel 97 */
+ CHAN6G(6455), /* Channel 181 */
+ CHAN6G(6475), /* Channel 105 */
+ CHAN6G(6495), /* Channel 109 */
+ CHAN6G(6515), /* Channel 113 */
+ CHAN6G(6535), /* Channel 117 */
+ CHAN6G(6555), /* Channel 121 */
+ CHAN6G(6575), /* Channel 125 */
+ CHAN6G(6595), /* Channel 129 */
+ CHAN6G(6615), /* Channel 133 */
+ CHAN6G(6635), /* Channel 137 */
+ CHAN6G(6655), /* Channel 141 */
+ CHAN6G(6675), /* Channel 145 */
+ CHAN6G(6695), /* Channel 149 */
+ CHAN6G(6715), /* Channel 153 */
+ CHAN6G(6735), /* Channel 157 */
+ CHAN6G(6755), /* Channel 161 */
+ CHAN6G(6775), /* Channel 165 */
+ CHAN6G(6795), /* Channel 169 */
+ CHAN6G(6815), /* Channel 173 */
+ CHAN6G(6835), /* Channel 177 */
+ CHAN6G(6855), /* Channel 181 */
+ CHAN6G(6875), /* Channel 185 */
+ CHAN6G(6895), /* Channel 189 */
+ CHAN6G(6915), /* Channel 193 */
+ CHAN6G(6935), /* Channel 197 */
+ CHAN6G(6955), /* Channel 201 */
+ CHAN6G(6975), /* Channel 205 */
+ CHAN6G(6995), /* Channel 209 */
+ CHAN6G(7015), /* Channel 213 */
+ CHAN6G(7035), /* Channel 217 */
+ CHAN6G(7055), /* Channel 221 */
+ CHAN6G(7075), /* Channel 225 */
+ CHAN6G(7095), /* Channel 229 */
+ CHAN6G(7115), /* Channel 233 */
+};
+
#define NUM_S1G_CHANS_US 51
static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US];
@@ -548,6 +616,7 @@ struct mac80211_hwsim_data {
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
+ struct ieee80211_channel channels_6ghz[ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
@@ -578,7 +647,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
unsigned long next_start, start, end;
} survey_data[ARRAY_SIZE(hwsim_channels_2ghz) +
- ARRAY_SIZE(hwsim_channels_5ghz)];
+ ARRAY_SIZE(hwsim_channels_5ghz) +
+ ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
@@ -3149,6 +3219,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_2ghz));
memcpy(data->channels_5ghz, hwsim_channels_5ghz,
sizeof(hwsim_channels_5ghz));
+ memcpy(data->channels_6ghz, hwsim_channels_6ghz,
+ sizeof(hwsim_channels_6ghz));
memcpy(data->channels_s1g, hwsim_channels_s1g,
sizeof(hwsim_channels_s1g));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 44fbd0acb87a..a63c5e622ee3 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -981,7 +981,7 @@ out:
static int if_sdio_enter_deep_sleep(struct lbs_private *priv)
{
- int ret = -1;
+ int ret;
struct cmd_header cmd;
memset(&cmd, 0, sizeof(cmd));
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a6b9dc6700b1..a2ed268ce0da 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2097,7 +2097,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2173,7 +2173,8 @@ static int
mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
const u8 *ssid, const u8 *bssid, int mode,
struct ieee80211_channel *channel,
- struct cfg80211_connect_params *sme, bool privacy)
+ struct cfg80211_connect_params *sme, bool privacy,
+ struct cfg80211_bss **sel_bss)
{
struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
@@ -2307,17 +2308,31 @@ done:
}
}
+ if (bss)
+ cfg80211_ref_bss(priv->adapter->wiphy, bss);
+
ret = mwifiex_bss_start(priv, bss, &req_ssid);
if (ret)
- return ret;
+ goto cleanup;
if (mode == NL80211_IFTYPE_ADHOC) {
/* Inform the BSS information to kernel, otherwise
* kernel will give a panic after successful assoc */
- if (mwifiex_cfg80211_inform_ibss_bss(priv))
- return -EFAULT;
+ if (mwifiex_cfg80211_inform_ibss_bss(priv)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
}
+ /* Pass the selected BSS entry to caller. */
+ if (sel_bss) {
+ *sel_bss = bss;
+ bss = NULL;
+ }
+
+cleanup:
+ if (bss)
+ cfg80211_put_bss(priv->adapter->wiphy, bss);
return ret;
}
@@ -2334,6 +2349,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_adapter *adapter = priv->adapter;
+ struct cfg80211_bss *bss = NULL;
int ret;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
@@ -2366,14 +2382,15 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
(int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
- priv->bss_mode, sme->channel, sme, 0);
+ priv->bss_mode, sme->channel, sme, 0,
+ &bss);
if (!ret) {
- cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
- NULL, 0, WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
+ cfg80211_connect_bss(priv->netdev, priv->cfg_bssid, bss, NULL,
+ 0, NULL, 0, WLAN_STATUS_SUCCESS,
+ GFP_KERNEL, NL80211_TIMEOUT_UNSPECIFIED);
mwifiex_dbg(priv->adapter, MSG,
"info: associated to bssid %pM successfully\n",
priv->cfg_bssid);
@@ -2504,7 +2521,7 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->chandef.chan, NULL,
- params->privacy);
+ params->privacy, NULL);
done:
if (!ret) {
cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
@@ -2576,7 +2593,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_block = false;
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
if (!user_scan_cfg)
@@ -3081,7 +3098,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mutex_init(&priv->async_mutex);
/* Register network device */
- if (register_netdevice(dev)) {
+ if (cfg80211_register_netdevice(dev)) {
mwifiex_dbg(adapter, ERROR, "cannot register network device\n");
ret = -EFAULT;
goto err_reg_netdev;
@@ -3160,7 +3177,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
netif_carrier_off(priv->netdev);
if (wdev->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(wdev->netdev);
+ cfg80211_unregister_netdevice(wdev->netdev);
if (priv->dfs_cac_workqueue) {
flush_workqueue(priv->dfs_cac_workqueue);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index ee52fb839ef7..529dfd8b7ae8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -598,12 +598,14 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
}
rtnl_lock();
+ wiphy_lock(adapter->wiphy);
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create default STA interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
@@ -614,6 +616,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create AP interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
@@ -625,10 +628,12 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create p2p client interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
}
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -1440,9 +1445,11 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (!priv)
continue;
rtnl_lock();
+ wiphy_lock(adapter->wiphy);
if (priv->netdev &&
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 5f0a61b974ee..94228b316df1 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -571,7 +571,7 @@ static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
#endif
/* PCI Device Driver */
-static struct pci_driver __refdata mwifiex_pcie = {
+static struct pci_driver mwifiex_pcie = {
.name = "mwifiex_pcie",
.id_table = mwifiex_ids,
.probe = mwifiex_pcie_probe,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index abf3b0233ccc..c9f8c056aa51 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -808,7 +808,7 @@ struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
char data[];
-} __packed;
+} __packed __aligned(2);
/* Routines to add/remove DMA header from skb. */
static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
@@ -1208,9 +1208,8 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
- int refilled;
+ int refilled = 0;
- refilled = 0;
while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
struct sk_buff *skb;
dma_addr_t addr;
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 31015d2a8e7d..9ff43f1fc50d 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -24,8 +24,13 @@ config MT76x02_USB
tristate
select MT76_USB
+config MT76_CONNAC_LIB
+ tristate
+ select MT76_CORE
+
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index e53584db0756..94efe3c29053 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_MT76_USB) += mt76-usb.o
obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o
obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
+obj-$(CONFIG_MT76_CONNAC_LIB) += mt76-connac-lib.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
@@ -26,8 +27,11 @@ mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
+mt76-connac-lib-y := mt76_connac_mcu.o mt76_connac_mac.o
+
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
+obj-$(CONFIG_MT7921E) += mt7921/
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 73eeb00d5aa6..19098b852d0a 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -411,8 +411,12 @@ unmap:
free:
#ifdef CONFIG_NL80211_TESTMODE
/* fix tx_done accounting on queue overflow */
- if (tx_info.skb == dev->test.tx_skb)
- dev->test.tx_done--;
+ if (mt76_is_testmode_skb(dev, skb, &hw)) {
+ struct mt76_phy *phy = hw->priv;
+
+ if (tx_info.skb == phy->test.tx_skb)
+ phy->test.tx_done--;
+ }
#endif
e.skb = tx_info.skb;
@@ -509,15 +513,17 @@ static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
- struct page *page = virt_to_head_page(data);
- int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
- offset += q->buf_offset;
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page) + q->buf_offset;
+
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
+ } else {
+ skb_free_frag(data);
}
if (more)
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 90278aeb6721..665b54c5c8ae 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -75,8 +75,8 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
#ifdef CONFIG_NL80211_TESTMODE
- dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
- dev->test.mtd_offset = offset;
+ dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
+ dev->test_mtd.offset = offset;
#endif
out_put_node:
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index a840396f2c74..696d00d1976c 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -387,9 +387,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
- dev->phy2 = NULL;
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(phy->hw);
+ dev->phy2 = NULL;
}
EXPORT_SYMBOL_GPL(mt76_unregister_phy);
@@ -519,10 +519,10 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->test.state == MT76_TM_STATE_RX_FRAMES) {
- dev->test.rx_stats.packets[q]++;
+ if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
+ phy->test.rx_stats.packets[q]++;
if (status->flag & RX_FLAG_FAILED_FCS_CRC)
- dev->test.rx_stats.fcs_error[q]++;
+ phy->test.rx_stats.fcs_error[q]++;
}
#endif
__skb_queue_tail(&dev->rx_skb[q], skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 3e496a188bf0..8bf45497cfca 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -81,6 +81,7 @@ enum mt76_rxq_id {
MT_RXQ_MCU,
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
+ MT_RXQ_EXT_WA,
__MT_RXQ_MAX
};
@@ -515,10 +516,10 @@ struct mt76_rx_status {
};
struct mt76_testmode_ops {
- int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state);
- int (*set_params)(struct mt76_dev *dev, struct nlattr **tb,
+ int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
+ int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
enum mt76_testmode_state new_state);
- int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg);
+ int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
};
struct mt76_testmode_data {
@@ -539,17 +540,20 @@ struct mt76_testmode_data {
u8 tx_ltf;
u8 tx_antenna_mask;
+ u8 tx_spe_idx;
+
+ u8 tx_duty_cycle;
+ u32 tx_time;
+ u32 tx_ipg;
u32 freq_offset;
u8 tx_power[4];
u8 tx_power_control;
- const char *mtd_name;
- u32 mtd_offset;
-
u32 tx_pending;
u32 tx_queued;
+ u16 tx_queued_limit;
u32 tx_done;
struct {
u64 packets[__MT_RXQ_MAX];
@@ -557,6 +561,14 @@ struct mt76_testmode_data {
} rx_stats;
};
+struct mt76_vif {
+ u8 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+ u8 scan_seq_num;
+};
+
struct mt76_phy {
struct ieee80211_hw *hw;
struct mt76_dev *dev;
@@ -578,10 +590,16 @@ struct mt76_phy {
u8 macaddr[ETH_ALEN];
- u32 vif_mask;
-
int txpower_cur;
u8 antenna_mask;
+ u16 chainmask;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct mt76_testmode_data test;
+#endif
+
+ struct delayed_work mac_work;
+ u8 mac_work_count;
};
struct mt76_dev {
@@ -622,7 +640,6 @@ struct mt76_dev {
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
- struct delayed_work mac_work;
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -630,6 +647,8 @@ struct mt76_dev {
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
+ u32 vif_mask;
+
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
@@ -646,6 +665,7 @@ struct mt76_dev {
struct mt76_rate_power rate_power;
+ char alpha2[3];
enum nl80211_dfs_regions region;
u32 debugfs_reg;
@@ -661,9 +681,11 @@ struct mt76_dev {
#ifdef CONFIG_NL80211_TESTMODE
const struct mt76_testmode_ops *test_ops;
- struct mt76_testmode_data test;
+ struct {
+ const char *name;
+ u32 offset;
+ } test_mtd;
#endif
-
struct workqueue_struct *wq;
union {
@@ -931,10 +953,27 @@ static inline u8 mt76_tx_power_nss_delta(u8 nss)
return nss_delta[nss - 1];
}
-static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
+static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
{
#ifdef CONFIG_NL80211_TESTMODE
- return dev->test.state != MT76_TM_STATE_OFF;
+ return phy->test.state != MT76_TM_STATE_OFF;
+#else
+ return false;
+#endif
+}
+
+static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ struct ieee80211_hw **hw)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ if (skb == dev->phy.test.tx_skb)
+ *hw = dev->phy.hw;
+ else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
+ *hw = dev->phy2->hw;
+ else
+ return false;
+ return true;
#else
return false;
#endif
@@ -1016,17 +1055,17 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb, void *data, int len);
-int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state);
+int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
-static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable)
+static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
#ifdef CONFIG_NL80211_TESTMODE
enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
- if (disable || dev->test.state == MT76_TM_STATE_OFF)
+ if (disable || phy->test.state == MT76_TM_STATE_OFF)
state = MT76_TM_STATE_OFF;
- mt76_testmode_set_state(dev, state);
+ mt76_testmode_set_state(phy, state);
#endif
}
@@ -1052,7 +1091,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
-void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index b14e08046e20..f0b879c3eba8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -532,7 +532,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
spin_lock_init(&dev->sta_poll_lock);
spin_lock_init(&dev->ps_lock);
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7603_mac_work);
tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet);
dev->slottime = 9;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 55095e66f2ef..cc4e7bc48294 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1631,9 +1631,13 @@ mt7603_edcca_check(struct mt7603_dev *dev)
if (rssi0 > 128)
rssi0 -= 256;
- rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
- if (rssi1 > 128)
- rssi1 -= 256;
+ if (dev->mphy.antenna_mask & BIT(1)) {
+ rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
+ if (rssi1 > 128)
+ rssi1 -= 256;
+ } else {
+ rssi1 = rssi0;
+ }
if (max(rssi0, rssi1) >= -40 &&
dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
@@ -1788,7 +1792,7 @@ out:
void mt7603_mac_work(struct work_struct *work)
{
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
- mt76.mac_work.work);
+ mphy.mac_work.work);
bool reset = false;
int i, idx;
@@ -1796,7 +1800,7 @@ void mt7603_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
- dev->mac_work_count++;
+ dev->mphy.mac_work_count++;
mt76_update_survey(&dev->mt76);
mt7603_edcca_check(dev);
@@ -1807,7 +1811,7 @@ void mt7603_mac_work(struct work_struct *work)
dev->mt76.aggr_stats[idx++] += val >> 16;
}
- if (dev->mac_work_count == 10)
+ if (dev->mphy.mac_work_count == 10)
mt7603_false_cca_check(dev);
if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
@@ -1838,17 +1842,17 @@ void mt7603_mac_work(struct work_struct *work)
dev->rx_dma_idx = ~0;
memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
reset = true;
- dev->mac_work_count = 0;
+ dev->mphy.mac_work_count = 0;
}
- if (dev->mac_work_count >= 10)
- dev->mac_work_count = 0;
+ if (dev->mphy.mac_work_count >= 10)
+ dev->mphy.mac_work_count = 0;
mutex_unlock(&dev->mt76.mutex);
if (reset)
mt7603_mac_watchdog_reset(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 6d47b57cbc39..8edea1e7a602 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -17,7 +17,7 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_start(dev);
dev->mphy.survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
- mt7603_mac_work(&dev->mt76.mac_work.work);
+ mt7603_mac_work(&dev->mphy.mac_work.work);
return 0;
}
@@ -28,7 +28,7 @@ mt7603_stop(struct ieee80211_hw *hw)
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
mt7603_mac_stop(dev);
}
@@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
- mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
+ mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
- dev->mphy.vif_mask |= BIT(mvif->idx);
+ dev->mt76.vif_mask |= BIT(mvif->idx);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -105,7 +105,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&dev->sta_poll_lock);
mutex_lock(&dev->mt76.mutex);
- dev->mphy.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
}
@@ -137,7 +137,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
u8 bw = MT_BW_20;
bool failed = false;
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
@@ -178,7 +178,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_txq_schedule_all(&dev->mphy);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
/* reset channel stats */
@@ -200,7 +200,7 @@ out:
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
if (failed)
- mt7603_mac_work(&dev->mt76.mac_work.work);
+ mt7603_mac_work(&dev->mphy.mac_work.work);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 6e0a92a28b1c..b787c56fd8d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -132,8 +132,6 @@ struct mt7603_dev {
spinlock_t ps_lock;
- u8 mac_work_count;
-
u8 mcu_running;
u8 ed_monitor_enabled;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
index f372fb629caf..30fba36ff46b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -2,7 +2,8 @@
config MT7615_COMMON
tristate
- select MT76_CORE
+ select WANT_DEV_COREDUMP
+ select MT76_CONNAC_LIB
config MT7615E
tristate "MediaTek MT7615E and MT7663E (PCIe) support"
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 4d5e3f8b2a62..7ae48b4fa564 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -21,6 +21,20 @@ mt7615_radar_pattern_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL,
mt7615_radar_pattern_set, "%lld\n");
+static int mt7615_config(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ int ret;
+
+ mt7615_mutex_acquire(dev);
+ ret = mt76_connac_mcu_chip_config(&dev->mt76);
+ mt7615_mutex_release(dev);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7615_config, "%lld\n");
+
static int
mt7615_scs_set(void *data, u64 val)
{
@@ -525,6 +539,9 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg);
debugfs_create_file_unsafe("rf_regval", 0600, dir, dev,
&fops_rf_reg);
+ if (is_mt7663(&dev->mt76))
+ debugfs_create_file("chip_config", 0600, dir, dev,
+ &fops_config);
if (mt76_is_sdio(&dev->mt76))
debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir,
mt7663s_sched_quota_read);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 3232ebd5eda6..2eab23898c77 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -161,7 +161,7 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
dev->chainmask = BIT(tx_mask) - 1;
dev->mphy.antenna_mask = dev->chainmask;
- dev->phy.chainmask = dev->chainmask;
+ dev->mphy.chainmask = dev->chainmask;
}
static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index a73b76e57c7f..571390fa4de7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -10,15 +10,16 @@
#include <linux/etherdevice.h>
#include "mt7615.h"
#include "mac.h"
+#include "mcu.h"
#include "eeprom.h"
-void mt7615_phy_init(struct mt7615_dev *dev)
+static void
+mt7615_phy_init(struct mt7615_dev *dev)
{
/* disable rf low power beacon mode */
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
-EXPORT_SYMBOL_GPL(mt7615_phy_init);
static void
mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
@@ -79,7 +80,8 @@ mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
}
}
-void mt7615_mac_init(struct mt7615_dev *dev)
+static void
+mt7615_mac_init(struct mt7615_dev *dev)
{
int i;
@@ -95,7 +97,7 @@ void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
MT_TMAC_CTCR0_INS_DDLMT_EN);
- mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
+ mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
mt7615_mac_set_scs(&dev->phy, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
@@ -128,9 +130,9 @@ void mt7615_mac_init(struct mt7615_dev *dev)
mt7615_init_mac_chain(dev, 1);
}
}
-EXPORT_SYMBOL_GPL(mt7615_mac_init);
-void mt7615_check_offload_capability(struct mt7615_dev *dev)
+static void
+mt7615_check_offload_capability(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
@@ -162,7 +164,6 @@ void mt7615_check_offload_capability(struct mt7615_dev *dev)
wiphy->max_sched_scan_reqs = 0;
}
}
-EXPORT_SYMBOL_GPL(mt7615_check_offload_capability);
bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
{
@@ -286,6 +287,16 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
}
EXPORT_SYMBOL_GPL(mt7615_init_txpower);
+void mt7615_init_work(struct mt7615_dev *dev)
+{
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+}
+EXPORT_SYMBOL_GPL(mt7615_init_work);
+
static void
mt7615_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
@@ -296,13 +307,16 @@ mt7615_regd_notifier(struct wiphy *wiphy,
struct mt7615_phy *phy = mphy->priv;
struct cfg80211_chan_def *chandef = &mphy->chandef;
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
mt7615_mutex_acquire(dev);
- mt7615_dfs_init_radar_detector(phy);
+
+ if (chandef->chan->flags & IEEE80211_CHAN_RADAR)
+ mt7615_dfs_init_radar_detector(phy);
+ if (mt7615_firmware_offload(phy->dev))
+ mt76_connac_mcu_set_channel_domain(mphy);
+
mt7615_mutex_release(dev);
}
@@ -331,11 +345,12 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
}
wiphy->reg_notifier = mt7615_regd_notifier;
- wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
+ wiphy->max_sched_scan_plan_interval =
+ MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- wiphy->max_scan_ie_len = MT7615_SCAN_IE_LEN;
- wiphy->max_sched_scan_ssids = MT7615_MAX_SCHED_SCAN_SSID;
- wiphy->max_match_sets = MT7615_MAX_SCAN_MATCH;
+ wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
+ wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
+ wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
wiphy->max_sched_scan_reqs = 1;
wiphy->max_scan_ssids = 4;
@@ -362,9 +377,9 @@ mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
dev->mphy.antenna_mask = dev->chainmask >> 2;
else
dev->mphy.antenna_mask = dev->chainmask >> 1;
- dev->phy.chainmask = dev->mphy.antenna_mask;
- dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
mt76_set_stream_caps(&dev->mphy, true);
}
@@ -375,7 +390,7 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
dev->mphy.antenna_mask = dev->chainmask;
- dev->phy.chainmask = dev->chainmask;
+ dev->mphy.chainmask = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
mt76_set_stream_caps(&dev->mphy, true);
@@ -404,11 +419,11 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
- mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
mt7615_init_wiphy(mphy->hw);
- INIT_DELAYED_WORK(&phy->mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&mphy->mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work);
skb_queue_head_init(&phy->scan_event_list);
@@ -471,9 +486,11 @@ void mt7615_init_device(struct mt7615_dev *dev)
init_completion(&dev->pm.wake_cmpl);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
- INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
+ INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
skb_queue_head_init(&dev->phy.scan_event_list);
+ skb_queue_head_init(&dev->coredump.msg_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
init_waitqueue_head(&dev->reset_wait);
@@ -488,7 +505,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 0f360be0b885..59fdd0fc2ad4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -7,6 +7,7 @@
* Lorenzo Bianconi <lorenzo@kernel.org>
*/
+#include <linux/devcoredump.h>
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
#include "mt7615.h"
@@ -14,6 +15,7 @@
#include "../dma.h"
#include "mt7615_trace.h"
#include "mac.h"
+#include "mcu.h"
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
@@ -186,7 +188,7 @@ mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
}
-static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
+static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
{
#ifdef CONFIG_NL80211_TESTMODE
u32 rxv1 = le32_to_cpu(rxv[0]);
@@ -210,13 +212,13 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
foe = (foe * foe_const) >> 15;
}
- dev->test.last_freq_offset = foe;
- dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
- dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
- dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
- dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
- dev->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
- dev->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
+ phy->test.last_freq_offset = foe;
+ phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
+ phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
+ phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
+ phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
+ phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
+ phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
#endif
}
@@ -326,7 +328,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
* that PHY.
*/
if (phy_idx < 0) {
- int first_chain = ffs(phy2->chainmask) - 1;
+ int first_chain = ffs(phy2->mt76->chainmask) - 1;
phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
}
@@ -435,7 +437,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- mt7615_mac_fill_tm_rx(dev, rxd);
+ mt7615_mac_fill_tm_rx(mphy->priv, rxd);
rxd += 6;
if ((u8 *)rxd - skb->data >= skb->len)
@@ -544,7 +546,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
u16 seqno = 0;
if (vif) {
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -1465,7 +1467,7 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
- mt7615_pm_power_save_sched(dev);
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
mt76_worker_schedule(&dev->mt76.tx_worker);
}
@@ -1789,11 +1791,11 @@ void mt7615_update_channel(struct mt76_dev *mdev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- if (mt7615_pm_wake(dev))
+ if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
return;
__mt7615_update_channel(dev);
- mt7615_pm_power_save_sched(dev);
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
EXPORT_SYMBOL_GPL(mt7615_update_channel);
@@ -1862,97 +1864,20 @@ void mt7615_pm_wake_work(struct work_struct *work)
{
struct mt7615_dev *dev;
struct mt76_phy *mphy;
- int i;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
pm.wake_work);
mphy = dev->phy.mt76;
- if (mt7615_mcu_set_drv_ctrl(dev)) {
+ if (!mt7615_mcu_set_drv_ctrl(dev))
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ else
dev_err(mphy->dev->dev, "failed to wake device\n");
- goto out;
- }
-
- spin_lock_bh(&dev->pm.txq_lock);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid;
-
- if (!dev->pm.tx_q[i].skb)
- continue;
-
- wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
- if (msta && wcid->sta)
- sta = container_of((void *)msta, struct ieee80211_sta,
- drv_priv);
-
- mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb);
- dev->pm.tx_q[i].skb = NULL;
- }
- spin_unlock_bh(&dev->pm.txq_lock);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
-out:
ieee80211_wake_queues(mphy->hw);
complete_all(&dev->pm.wake_cmpl);
}
-int mt7615_pm_wake(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = dev->phy.mt76;
-
- if (!mt7615_firmware_offload(dev))
- return 0;
-
- if (!mt76_is_mmio(mphy->dev))
- return 0;
-
- if (!test_bit(MT76_STATE_PM, &mphy->state))
- return 0;
-
- if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
- return 0;
-
- if (queue_work(dev->mt76.wq, &dev->pm.wake_work))
- reinit_completion(&dev->pm.wake_cmpl);
-
- if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) {
- ieee80211_wake_queues(mphy->hw);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt7615_pm_wake);
-
-void mt7615_pm_power_save_sched(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = dev->phy.mt76;
-
- if (!mt7615_firmware_offload(dev))
- return;
-
- if (!mt76_is_mmio(mphy->dev))
- return;
-
- if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state))
- return;
-
- dev->pm.last_activity = jiffies;
-
- if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
- return;
-
- if (!test_bit(MT76_STATE_PM, &mphy->state))
- queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work,
- dev->pm.idle_timeout);
-}
-EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched);
-
void mt7615_pm_power_save_work(struct work_struct *work)
{
struct mt7615_dev *dev;
@@ -1976,17 +1901,17 @@ out:
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_phy *phy;
- struct mt76_dev *mdev;
+ struct mt76_phy *mphy;
- phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
- mac_work.work);
- mdev = &phy->dev->mt76;
+ mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
+ mac_work.work);
+ phy = mphy->priv;
mt7615_mutex_acquire(phy->dev);
mt7615_update_survey(phy->dev);
- if (++phy->mac_work_count == 5) {
- phy->mac_work_count = 0;
+ if (++mphy->mac_work_count == 5) {
+ mphy->mac_work_count = 0;
mt7615_mac_update_mib_stats(phy);
mt7615_mac_scs_check(phy);
@@ -1994,8 +1919,8 @@ void mt7615_mac_work(struct work_struct *work)
mt7615_mutex_release(phy->dev);
- mt76_tx_status_check(mdev, NULL, false);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ mt76_tx_status_check(mphy->dev, NULL, false);
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7615_WATCHDOG_TIME);
}
@@ -2017,7 +1942,16 @@ mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct ieee80211_hw *hw = priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
- mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon);
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ mt7615_mcu_add_beacon(dev, hw, vif,
+ vif->bss_conf.enable_beacon);
+ break;
+ default:
+ break;
+ }
}
static void
@@ -2058,6 +1992,23 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7615_dma_reset);
+void mt7615_tx_token_put(struct mt7615_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7615_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb)
+ dev_kfree_skb_any(txwi->skb);
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+}
+EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
+
void mt7615_mac_reset_work(struct work_struct *work)
{
struct mt7615_phy *phy2;
@@ -2078,11 +2029,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->phy.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
del_timer_sync(&dev->phy.roc_timer);
cancel_work_sync(&dev->phy.roc_work);
if (phy2) {
- cancel_delayed_work_sync(&phy2->mac_work);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
del_timer_sync(&phy2->roc_timer);
cancel_work_sync(&phy2->roc_work);
}
@@ -2101,6 +2052,9 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+ mt7615_tx_token_put(dev);
+ idr_init(&dev->token);
+
if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7615_dma_reset(dev);
@@ -2134,10 +2088,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt7615_mutex_release(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT7615_WATCHDOG_TIME);
if (phy2)
- ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &phy2->mt76->mac_work,
MT7615_WATCHDOG_TIME);
}
@@ -2319,3 +2274,44 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
return 0;
}
+
+void mt7615_coredump_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+ char *dump, *data;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+ coredump.work.work);
+
+ if (time_is_after_jiffies(dev->coredump.last_activity +
+ 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
+ queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
+ MT76_CONNAC_COREDUMP_TIMEOUT);
+ return;
+ }
+
+ dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
+ data = dump;
+
+ while (true) {
+ struct sk_buff *skb;
+
+ spin_lock_bh(&dev->mt76.lock);
+ skb = __skb_dequeue(&dev->coredump.msg_list);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ if (!skb)
+ break;
+
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
+ break;
+
+ memcpy(data, skb->data, skb->len);
+ data += skb->len;
+
+ dev_kfree_skb(skb);
+ }
+ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ GFP_KERNEL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 56dd0b4e4460..25faf486d279 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -24,22 +24,6 @@ static bool mt7615_dev_running(struct mt7615_dev *dev)
return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
}
-static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev,
- struct mt7615_sta *msta)
-{
- int i;
-
- spin_lock_bh(&dev->pm.txq_lock);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- if (msta && dev->pm.tx_q[i].msta != msta)
- continue;
-
- dev_kfree_skb(dev->pm.tx_q[i].skb);
- dev->pm.tx_q[i].skb = NULL;
- }
- spin_unlock_bh(&dev->pm.txq_lock);
-}
-
static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -55,22 +39,24 @@ static int mt7615_start(struct ieee80211_hw *hw)
if (!running) {
mt7615_mcu_set_pm(dev, 0, 0);
- mt7615_mcu_set_mac_enable(dev, 0, true);
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
mt7615_mac_enable_nf(dev, 0);
}
if (phy != &dev->phy) {
mt7615_mcu_set_pm(dev, 1, 0);
- mt7615_mcu_set_mac_enable(dev, 1, true);
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false);
mt7615_mac_enable_nf(dev, 1);
}
- mt7615_mcu_set_channel_domain(phy);
+ if (mt7615_firmware_offload(dev))
+ mt76_connac_mcu_set_channel_domain(phy->mt76);
+
mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
if (!running)
@@ -86,30 +72,30 @@ static void mt7615_stop(struct ieee80211_hw *hw)
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
- mt7615_free_pending_tx_skbs(dev, NULL);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7615_mutex_acquire(dev);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
if (phy != &dev->phy) {
mt7615_mcu_set_pm(dev, 1, 1);
- mt7615_mcu_set_mac_enable(dev, 1, false);
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, false, false);
}
if (!mt7615_dev_running(dev)) {
mt7615_mcu_set_pm(dev, 0, 1);
- mt7615_mcu_set_mac_enable(dev, 0, false);
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
}
mt7615_mutex_release(dev);
@@ -181,14 +167,14 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mt7615_mutex_acquire(dev);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
if (vif->type == NL80211_IFTYPE_MONITOR &&
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
- if (mvif->idx >= MT7615_MAX_INTERFACES) {
+ mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
@@ -198,26 +184,26 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
ret = -ENOSPC;
goto out;
}
- mvif->omac_idx = idx;
+ mvif->mt76.omac_idx = idx;
- mvif->band_idx = ext_phy;
+ mvif->mt76.band_idx = ext_phy;
if (mt7615_ext_phy(dev))
- mvif->wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
- mvif->idx % (MT7615_MAX_WMM_SETS / 2);
+ mvif->mt76.wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
+ mvif->mt76.idx % (MT7615_MAX_WMM_SETS / 2);
else
- mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+ mvif->mt76.wmm_idx = mvif->mt76.idx % MT7615_MAX_WMM_SETS;
- dev->mphy.vif_mask |= BIT(mvif->idx);
- dev->omac_mask |= BIT_ULL(mvif->omac_idx);
- phy->omac_mask |= BIT_ULL(mvif->omac_idx);
+ dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
mt7615_mcu_set_dbdc(dev);
- idx = MT7615_WTBL_RESERVED - mvif->idx;
+ idx = MT7615_WTBL_RESERVED - mvif->mt76.idx;
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->band_idx;
+ mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -228,7 +214,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mtxq->wcid = &mvif->sta.wcid;
}
- ret = mt7615_mcu_add_dev_info(dev, vif, true);
+ ret = mt7615_mcu_add_dev_info(phy, vif, true);
if (ret)
goto out;
@@ -252,20 +238,20 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt7615_mutex_acquire(dev);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
if (vif == phy->monitor_vif)
phy->monitor_vif = NULL;
- mt7615_free_pending_tx_skbs(dev, msta);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
mt7615_mac_set_beacon_filter(phy, vif, false);
- mt7615_mcu_add_dev_info(dev, vif, false);
+ mt7615_mcu_add_dev_info(phy, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- dev->mphy.vif_mask &= ~BIT(mvif->idx);
- dev->omac_mask &= ~BIT_ULL(mvif->omac_idx);
- phy->omac_mask &= ~BIT_ULL(mvif->omac_idx);
+ dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
+ phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt7615_mutex_release(dev);
@@ -300,7 +286,7 @@ int mt7615_set_channel(struct mt7615_phy *phy)
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mt7615_mutex_acquire(dev);
@@ -321,7 +307,7 @@ int mt7615_set_channel(struct mt7615_phy *phy)
mt7615_mac_set_timing(phy);
ret = mt7615_dfs_init_radar_detector(phy);
mt7615_mac_cca_stats_reset(phy);
- mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76));
+ mt7615_mcu_set_sku_en(phy, true);
mt7615_mac_reset_counters(dev);
phy->noise = 0;
@@ -334,8 +320,9 @@ out:
mt76_txq_schedule_all(phy->mt76);
- if (!mt76_testmode_enabled(&dev->mt76))
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ if (!mt76_testmode_enabled(phy->mt76))
+ ieee80211_queue_delayed_work(phy->mt76->hw,
+ &phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
return ret;
@@ -411,9 +398,9 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER)) {
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->mt76.test.state != MT76_TM_STATE_OFF) {
+ if (phy->mt76->test.state != MT76_TM_STATE_OFF) {
mt7615_mutex_acquire(dev);
- mt76_testmode_reset(&dev->mt76, false);
+ mt76_testmode_reset(phy->mt76, false);
mt7615_mutex_release(dev);
}
#endif
@@ -425,7 +412,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt7615_mutex_acquire(dev);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
@@ -444,7 +431,7 @@ static int
mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
int err;
@@ -480,7 +467,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
phy->rxfilter &= ~(_hw); \
- if (!mt76_testmode_enabled(&dev->mt76)) \
+ if (!mt76_testmode_enabled(phy->mt76)) \
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\
} while (0)
@@ -541,7 +528,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon);
- mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
+ mt7615_mcu_sta_add(phy, vif, NULL, info->enable_beacon);
if (vif->p2p && info->enable_beacon)
mt7615_mcu_set_p2p_oppps(hw, vif);
@@ -552,7 +539,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
if (changed & BSS_CHANGED_PS)
- mt7615_mcu_set_vif_ps(dev, vif);
+ mt76_connac_mcu_set_vif_ps(&dev->mt76, vif);
if (changed & BSS_CHANGED_ARP_FILTER)
mt7615_mcu_update_arp_filter(hw, vif, info);
@@ -578,6 +565,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_phy *phy;
int idx, err;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
@@ -588,23 +576,20 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->band_idx;
+ msta->wcid.ext_phy = mvif->mt76.band_idx;
- err = mt7615_pm_wake(dev);
+ phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ err = mt76_connac_pm_wake(phy->mt76, &dev->pm);
if (err)
return err;
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
- struct mt7615_phy *phy;
-
- phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mt7615_mcu_add_bss_info(phy, vif, sta, true);
- }
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7615_mcu_sta_add(dev, vif, sta, true);
+ mt7615_mcu_sta_add(&dev->phy, vif, sta, true);
- mt7615_pm_power_save_sched(dev);
+ mt76_connac_power_save_sched(phy->mt76, &dev->pm);
return 0;
}
@@ -615,27 +600,26 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_phy *phy;
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
- mt7615_free_pending_tx_skbs(dev, msta);
- mt7615_pm_wake(dev);
+ phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ mt76_connac_pm_wake(phy->mt76, &dev->pm);
- mt7615_mcu_sta_add(dev, vif, sta, false);
+ mt7615_mcu_sta_add(&dev->phy, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_phy *phy;
-
- phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mt7615_mcu_add_bss_info(phy, vif, sta, false);
- }
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- mt7615_pm_power_save_sched(dev);
+ mt76_connac_power_save_sched(phy->mt76, &dev->pm);
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
@@ -720,25 +704,17 @@ static void mt7615_tx(struct ieee80211_hw *hw,
skb_set_queue_mapping(skb, qid);
}
- spin_lock_bh(&dev->pm.txq_lock);
- if (!dev->pm.tx_q[qid].skb) {
- ieee80211_stop_queues(hw);
- dev->pm.tx_q[qid].msta = msta;
- dev->pm.tx_q[qid].skb = skb;
- queue_work(dev->mt76.wq, &dev->pm.wake_work);
- } else {
- dev_kfree_skb(skb);
- }
- spin_unlock_bh(&dev->pm.txq_lock);
+ mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb);
}
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int band = phy != &dev->phy;
mt7615_mutex_acquire(dev);
- mt7615_mcu_set_rts_thresh(phy, val);
+ mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band);
mt7615_mutex_release(dev);
return 0;
@@ -910,7 +886,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
else
tx_ant <<= 1;
}
- phy->chainmask = tx_ant;
+ phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
@@ -993,8 +969,12 @@ mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_phy *mphy = hw->priv;
int err;
+ /* fall-back to sw-scan */
+ if (!mt7615_firmware_offload(dev))
+ return 1;
+
mt7615_mutex_acquire(dev);
- err = mt7615_mcu_hw_scan(mphy->priv, vif, req);
+ err = mt76_connac_mcu_hw_scan(mphy, vif, req);
mt7615_mutex_release(dev);
return err;
@@ -1007,7 +987,7 @@ mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76_phy *mphy = hw->priv;
mt7615_mutex_acquire(dev);
- mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
+ mt76_connac_mcu_cancel_hw_scan(mphy, vif);
mt7615_mutex_release(dev);
}
@@ -1020,13 +1000,16 @@ mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_phy *mphy = hw->priv;
int err;
+ if (!mt7615_firmware_offload(dev))
+ return -EOPNOTSUPP;
+
mt7615_mutex_acquire(dev);
- err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
+ err = mt76_connac_mcu_sched_scan_req(mphy, vif, req);
if (err < 0)
goto out;
- err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
+ err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true);
out:
mt7615_mutex_release(dev);
@@ -1040,8 +1023,11 @@ mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76_phy *mphy = hw->priv;
int err;
+ if (!mt7615_firmware_offload(dev))
+ return -EOPNOTSUPP;
+
mt7615_mutex_acquire(dev);
- err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
+ err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false);
mt7615_mutex_release(dev);
return err;
@@ -1101,26 +1087,27 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
static int mt7615_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
int err = 0;
cancel_delayed_work_sync(&dev->pm.ps_work);
- mt7615_free_pending_tx_skbs(dev, NULL);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7615_mutex_acquire(dev);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_mcu_set_suspend_iter, phy);
+ mt76_connac_mcu_set_suspend_iter,
+ phy->mt76);
if (!mt7615_dev_running(dev))
- err = mt7615_mcu_set_hif_suspend(dev, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
mt7615_mutex_release(dev);
@@ -1129,8 +1116,8 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
static int mt7615_resume(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
bool running;
mt7615_mutex_acquire(dev);
@@ -1141,7 +1128,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
if (!running) {
int err;
- err = mt7615_mcu_set_hif_suspend(dev, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
if (err < 0) {
mt7615_mutex_release(dev);
return err;
@@ -1151,9 +1138,10 @@ static int mt7615_resume(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_mcu_set_suspend_iter, phy);
+ mt76_connac_mcu_set_suspend_iter,
+ phy->mt76);
- ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
mt7615_mutex_release(dev);
@@ -1176,7 +1164,7 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
mt7615_mutex_acquire(dev);
- mt7615_mcu_update_gtk_rekey(hw, vif, data);
+ mt76_connac_mcu_update_gtk_rekey(hw, vif, data);
mt7615_mutex_release(dev);
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index a44b7766dec6..631596fc2f36 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- enum mt76_txq_id qid;
+ enum mt76_mcuq_id qid;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
@@ -481,29 +481,14 @@ mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
}
static void
-mt7615_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_beacon_loss_event *event = priv;
-
- if (mvif->idx != event->bss_idx)
- return;
-
- if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
- return;
-
- ieee80211_beacon_loss(vif);
-}
-
-static void
mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
- struct mt7615_beacon_loss_event *event;
+ struct mt76_connac_beacon_loss_event *event;
struct mt76_phy *mphy;
u8 band_idx = 0; /* DBDC support */
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
- event = (struct mt7615_beacon_loss_event *)skb->data;
+ event = (struct mt76_connac_beacon_loss_event *)skb->data;
if (band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
else
@@ -511,18 +496,19 @@ mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_mcu_beacon_loss_iter, event);
+ mt76_connac_mcu_beacon_loss_iter,
+ event);
}
static void
mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
- struct mt7615_mcu_bss_event *event;
+ struct mt76_connac_mcu_bss_event *event;
struct mt76_phy *mphy;
u8 band_idx = 0; /* DBDC support */
- event = (struct mt7615_mcu_bss_event *)(skb->data +
- sizeof(struct mt7615_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ event = (struct mt76_connac_mcu_bss_event *)skb->data;
if (band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
@@ -557,6 +543,10 @@ mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
case MCU_EVENT_BSS_ABSENCE:
mt7615_mcu_bss_event(dev, skb);
break;
+ case MCU_EVENT_COREDUMP:
+ mt76_connac_mcu_coredump_event(&dev->mt76, skb,
+ &dev->coredump);
+ return;
default:
break;
}
@@ -575,6 +565,7 @@ void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
rxd->eid == MCU_EVENT_BSS_ABSENCE ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_COREDUMP ||
rxd->eid == MCU_EVENT_ROC ||
!rxd->seq)
mt7615_mcu_rx_unsolicited_event(dev, skb);
@@ -582,29 +573,12 @@ void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_mcu_rx_event(&dev->mt76, skb);
}
-static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
- u32 len, u32 mode)
-{
- struct {
- __le32 addr;
- __le32 len;
- __le32 mode;
- } req = {
- .addr = cpu_to_le32(addr),
- .len = cpu_to_le32(len),
- .mode = cpu_to_le32(mode),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ,
- &req, sizeof(req), true);
-}
-
static int
mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool bssid, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u32 idx = mvif->omac_idx - REPEATER_BSSID_START;
+ u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
u32 mask = dev->omac_mask >> 32 & ~BIT(idx);
const u8 *addr = vif->addr;
struct {
@@ -636,10 +610,11 @@ mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif,
}
static int
-mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+mt7615_mcu_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
struct {
struct req_hdr {
u8 omac_idx;
@@ -657,8 +632,8 @@ mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
} __packed tlv;
} data = {
.hdr = {
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
+ .omac_idx = mvif->mt76.omac_idx,
+ .band_idx = mvif->mt76.band_idx,
.tlv_num = cpu_to_le16(1),
.is_tlv_append = 1,
},
@@ -666,11 +641,11 @@ mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
- .band_idx = mvif->band_idx,
+ .band_idx = mvif->mt76.band_idx,
},
};
- if (mvif->omac_idx >= REPEATER_BSSID_START)
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
return mt7615_mcu_muar_config(dev, vif, false, enable);
memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
@@ -703,10 +678,10 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
u8 bcc_cnt;
__le16 bcc_ie_pos;
} __packed req = {
- .omac_idx = mvif->omac_idx,
+ .omac_idx = mvif->mt76.omac_idx,
.enable = enable,
.wlan_idx = wcid->idx,
- .band_idx = mvif->band_idx,
+ .band_idx = mvif->mt76.band_idx,
};
struct sk_buff *skb;
@@ -720,7 +695,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
return -EINVAL;
}
- if (mvif->band_idx) {
+ if (mvif->mt76.band_idx) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
}
@@ -774,86 +749,6 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
sizeof(req), true);
}
-static struct sk_buff *
-mt7615_mcu_alloc_sta_req(struct mt7615_dev *dev, struct mt7615_vif *mvif,
- struct mt7615_sta *msta)
-{
- struct sta_req_hdr hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta ? msta->wcid.idx : 0,
- .muar_idx = msta ? mvif->omac_idx : 0,
- .is_tlv_append = 1,
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7615_STA_UPDATE_MAX_SIZE);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- return skb;
-}
-
-static struct wtbl_req_hdr *
-mt7615_mcu_alloc_wtbl_req(struct mt7615_dev *dev, struct mt7615_sta *msta,
- int cmd, void *sta_wtbl, struct sk_buff **skb)
-{
- struct tlv *sta_hdr = sta_wtbl;
- struct wtbl_req_hdr hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = cmd,
- };
- struct sk_buff *nskb = *skb;
-
- if (!nskb) {
- nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT7615_WTBL_UPDATE_BA_SIZE);
- if (!nskb)
- return ERR_PTR(-ENOMEM);
-
- *skb = nskb;
- }
-
- if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
-
- return skb_put_data(nskb, &hdr, sizeof(hdr));
-}
-
-static struct tlv *
-mt7615_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
- void *sta_ntlv, void *sta_wtbl)
-{
- struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
- struct tlv *sta_hdr = sta_wtbl;
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
- u16 ntlv;
-
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
-
- ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
- ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
-
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
-
- return ptlv;
-}
-
-static struct tlv *
-mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
-{
- return mt7615_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
-}
-
static int
mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
@@ -864,7 +759,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
u8 wlan_idx = mvif->sta.wcid.idx;
struct tlv *tlv;
- tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
@@ -893,7 +788,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
bss->network_type = cpu_to_le32(type);
bss->dtim_period = vif->bss_conf.dtim_period;
bss->bmc_tx_wlan_idx = wlan_idx;
- bss->wmm_idx = mvif->wmm_idx;
+ bss->wmm_idx = mvif->mt76.wmm_idx;
bss->active = enable;
return 0;
@@ -903,12 +798,12 @@ static void
mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ u8 omac_idx = mvif->mt76.omac_idx;
struct bss_info_omac *omac;
struct tlv *tlv;
u32 type = 0;
- u8 idx;
- tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
@@ -933,11 +828,10 @@ mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
}
omac = (struct bss_info_omac *)tlv;
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->omac_idx;
- omac->band_idx = mvif->band_idx;
- omac->hw_bss_idx = idx;
+ omac->omac_idx = mvif->mt76.omac_idx;
+ omac->band_idx = mvif->mt76.band_idx;
+ omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
}
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
@@ -949,304 +843,17 @@ mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
int ext_bss_idx, tsf_offset;
struct tlv *tlv;
- ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
if (ext_bss_idx < 0)
return;
- tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
ext = (struct bss_info_ext_bss *)tlv;
tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
}
-static void
-mt7615_mcu_sta_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
-{
- struct sta_rec_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7615_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
-
- ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
- ba->winsize = cpu_to_le16(params->buf_size);
- ba->ssn = cpu_to_le16(params->ssn);
- ba->ba_en = enable << params->tid;
- ba->amsdu = params->amsdu;
- ba->tid = params->tid;
-}
-
-static void
-mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
- struct sta_rec_basic *basic;
- struct tlv *tlv;
- int conn_type;
-
- tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
-
- basic = (struct sta_rec_basic *)tlv;
- basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
-
- if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
-
- if (!sta) {
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
- return;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- conn_type = CONNECTION_P2P_GC;
- else
- conn_type = CONNECTION_INFRA_STA;
- basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(sta->aid);
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- conn_type = CONNECTION_P2P_GO;
- else
- conn_type = CONNECTION_INFRA_AP;
- basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(vif->bss_conf.aid);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- basic->aid = cpu_to_le16(sta->aid);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->qos = sta->wme;
-}
-
-static void
-mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
-{
- struct tlv *tlv;
-
- if (sta->ht_cap.ht_supported) {
- struct sta_rec_ht *ht;
-
- tlv = mt7615_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
- ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
- }
- if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *vht;
-
- tlv = mt7615_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
- vht = (struct sta_rec_vht *)tlv;
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- }
-}
-
-static void
-mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct sta_rec_uapsd *uapsd;
- struct tlv *tlv;
-
- if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
- return;
-
- tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
- uapsd = (struct sta_rec_uapsd *)tlv;
-
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
- uapsd->dac_map |= BIT(3);
- uapsd->tac_map |= BIT(3);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
- uapsd->dac_map |= BIT(2);
- uapsd->tac_map |= BIT(2);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
- uapsd->dac_map |= BIT(1);
- uapsd->tac_map |= BIT(1);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
- uapsd->dac_map |= BIT(0);
- uapsd->tac_map |= BIT(0);
- }
- uapsd->max_sp = sta->max_sp;
-}
-
-static void
-mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct wtbl_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
- wtbl_tlv, sta_wtbl);
-
- ba = (struct wtbl_ba *)tlv;
- ba->tid = params->tid;
-
- if (tx) {
- ba->ba_type = MT_BA_TYPE_ORIGINATOR;
- ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
- ba->ba_winsize = cpu_to_le16(params->buf_size);
- ba->ba_en = enable;
- } else {
- memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
- ba->ba_type = MT_BA_TYPE_RECIPIENT;
- ba->rst_ba_tid = params->tid;
- ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
- ba->rst_ba_sb = 1;
- }
-
- if (enable && tx) {
- u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
- int i;
-
- for (i = 7; i > 0; i--) {
- if (params->buf_size >= ba_range[i])
- break;
- }
- ba->ba_winsize_idx = i;
- }
-}
-
-static void
-mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_generic *generic;
- struct wtbl_rx *rx;
- struct wtbl_spe *spe;
- struct tlv *tlv;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
- wtbl_tlv, sta_wtbl);
-
- generic = (struct wtbl_generic *)tlv;
-
- if (sta) {
- if (vif->type == NL80211_IFTYPE_STATION)
- generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
- else
- generic->partial_aid = cpu_to_le16(sta->aid);
- memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->muar_idx = mvif->omac_idx;
- generic->qos = sta->wme;
- } else {
- eth_broadcast_addr(generic->peer_addr);
- generic->muar_idx = 0xe;
- }
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
- wtbl_tlv, sta_wtbl);
-
- rx = (struct wtbl_rx *)tlv;
- rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
- rx->rca2 = 1;
- rx->rv = 1;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
- wtbl_tlv, sta_wtbl);
- spe = (struct wtbl_spe *)tlv;
- spe->spe_idx = 24;
-}
-
-static void
-mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct tlv *tlv;
- struct wtbl_ht *ht = NULL;
- u32 flags = 0;
-
- if (sta->ht_cap.ht_supported) {
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
- wtbl_tlv, sta_wtbl);
- ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
- ht->ht = 1;
-
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- flags |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- flags |= MT_WTBL_W5_SHORT_GI_40;
- }
-
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *vht;
- u8 af;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
- wtbl_tlv, sta_wtbl);
- vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- vht->vht = 1;
-
- af = (sta->vht_cap.cap &
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
-
- if (ht)
- ht->af = max(ht->af, af);
-
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
- flags |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
- flags |= MT_WTBL_W5_SHORT_GI_160;
- }
-
- /* wtbl smps */
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
- struct wtbl_smps *smps;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
- wtbl_tlv, sta_wtbl);
- smps = (struct wtbl_smps *)tlv;
- smps->smps = 1;
- }
-
- if (sta->ht_cap.ht_supported) {
- /* sgi */
- u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
- MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
- struct wtbl_raw *raw;
-
- tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
- sizeof(*raw), wtbl_tlv,
- sta_wtbl);
- raw = (struct wtbl_raw *)tlv;
- raw->val = cpu_to_le32(flags);
- raw->msk = cpu_to_le32(~msk);
- raw->wtbl_idx = 1;
- raw->dw = 5;
- }
-}
-
static int
mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
@@ -1255,10 +862,10 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct mt7615_dev *dev = phy->dev;
struct sk_buff *skb;
- if (mvif->omac_idx >= REPEATER_BSSID_START)
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
mt7615_mcu_muar_config(dev, vif, true, enable);
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, NULL);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1267,8 +874,8 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable);
- if (enable && mvif->omac_idx >= EXT_BSSID_START &&
- mvif->omac_idx < REPEATER_BSSID_START)
+ if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
+ mvif->mt76.omac_idx < REPEATER_BSSID_START)
mt7615_mcu_bss_ext_tlv(skb, mvif);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
@@ -1286,22 +893,25 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
struct sk_buff *skb = NULL;
int err;
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, NULL, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, true,
+ NULL, wtbl_hdr);
err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
true);
if (err < 0)
return err;
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, true);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
@@ -1318,11 +928,12 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
struct sk_buff *skb;
int err;
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
@@ -1330,47 +941,52 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
return err;
skb = NULL;
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, NULL, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false,
+ NULL, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
true);
}
static int
-mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct mt7615_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
- sskb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(sskb))
return PTR_ERR(sskb);
- mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
- if (enable && sta) {
- mt7615_mcu_sta_ht_tlv(sskb, sta);
- mt7615_mcu_sta_uapsd(sskb, vif, sta);
- }
+ mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+ if (enable && sta)
+ mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif);
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- NULL, &wskb);
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, NULL,
+ &wskb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
if (enable) {
- mt7615_mcu_wtbl_generic_tlv(wskb, vif, sta, NULL, wtbl_hdr);
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, wskb, vif, sta,
+ NULL, wtbl_hdr);
if (sta)
- mt7615_mcu_wtbl_ht_tlv(wskb, sta, NULL, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
+ NULL, wtbl_hdr);
}
cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@@ -1413,17 +1029,19 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
struct tlv *sta_wtbl;
struct sk_buff *skb;
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7615_mcu_sta_ba_tlv(skb, params, enable, tx);
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
+ mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
+ sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
@@ -1446,46 +1064,22 @@ mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
}
static int
-mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, int cmd)
+__mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable, int cmd)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct mt7615_sta *msta;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
-
- msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
-
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ struct mt76_wcid *wcid;
- mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
- if (enable && sta) {
- mt7615_mcu_sta_ht_tlv(skb, sta);
- mt7615_mcu_sta_uapsd(skb, vif, sta);
- }
-
- sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (enable) {
- mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
- if (sta)
- mt7615_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
- }
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+ wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
+ return mt76_connac_mcu_add_sta_cmd(phy, vif, sta, wcid, enable, cmd);
}
static int
-mt7615_mcu_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
- return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
- MCU_EXT_CMD_STA_REC_UPDATE);
+ return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
+ MCU_EXT_CMD_STA_REC_UPDATE);
}
static const struct mt7615_mcu_ops sta_update_ops = {
@@ -1501,247 +1095,12 @@ static const struct mt7615_mcu_ops sta_update_ops = {
};
static int
-mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct {
- struct {
- u8 omac_idx;
- u8 band_idx;
- __le16 pad;
- } __packed hdr;
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 pad;
- u8 omac_addr[ETH_ALEN];
- } __packed tlv;
- } dev_req = {
- .hdr = {
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- },
- .tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(struct req_tlv)),
- .active = enable,
- },
- };
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_bss_basic_tlv basic;
- } basic_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .basic = {
- .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
- .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- .wmm_idx = mvif->wmm_idx,
- .active = enable,
- .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx),
- .sta_idx = cpu_to_le16(mvif->sta.wcid.idx),
- .conn_state = 1,
- },
- };
- int err, idx, cmd, len;
- void *data;
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_STATION:
- basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
- basic_req.basic.hw_bss_idx = idx;
-
- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
-
- cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
- data = enable ? (void *)&dev_req : (void *)&basic_req;
- len = enable ? sizeof(dev_req) : sizeof(basic_req);
-
- err = mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
- if (err < 0)
- return err;
-
- cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
- data = enable ? (void *)&basic_req : (void *)&dev_req;
- len = enable ? sizeof(basic_req) : sizeof(dev_req);
-
- return mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
-}
-
-static int
mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
return 0;
}
static int
-mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
- struct mt7615_dev *dev = phy->dev;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_bss_basic_tlv basic;
- struct mt7615_bss_qos_tlv qos;
- } basic_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .basic = {
- .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
- .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
- .dtim_period = vif->bss_conf.dtim_period,
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- .wmm_idx = mvif->wmm_idx,
- .active = true, /* keep bss deactivated */
- .phymode = 0x38,
- },
- .qos = {
- .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
- .len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)),
- .qos = vif->bss_conf.qos,
- },
- };
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct rlm_tlv {
- __le16 tag;
- __le16 len;
- u8 control_channel;
- u8 center_chan;
- u8 center_chan2;
- u8 bw;
- u8 tx_streams;
- u8 rx_streams;
- u8 short_st;
- u8 ht_op_info;
- u8 sco;
- u8 pad[3];
- } __packed rlm;
- } __packed rlm_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .rlm = {
- .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
- .len = cpu_to_le16(sizeof(struct rlm_tlv)),
- .control_channel = chandef->chan->hw_value,
- .center_chan = ieee80211_frequency_to_channel(freq1),
- .center_chan2 = ieee80211_frequency_to_channel(freq2),
- .tx_streams = hweight8(phy->mt76->antenna_mask),
- .rx_streams = phy->chainmask,
- .short_st = true,
- },
- };
- int err, conn_type;
- u8 idx;
-
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
- basic_req.basic.hw_bss_idx = idx;
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- conn_type = CONNECTION_P2P_GO;
- else
- conn_type = CONNECTION_INFRA_AP;
- basic_req.basic.conn_type = cpu_to_le32(conn_type);
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- conn_type = CONNECTION_P2P_GC;
- else
- conn_type = CONNECTION_INFRA_STA;
- basic_req.basic.conn_type = cpu_to_le32(conn_type);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
- basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx);
- basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx);
- basic_req.basic.conn_state = !enable;
-
- err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
- &basic_req, sizeof(basic_req), true);
- if (err < 0)
- return err;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_40:
- rlm_req.rlm.bw = CMD_CBW_40MHZ;
- break;
- case NL80211_CHAN_WIDTH_80:
- rlm_req.rlm.bw = CMD_CBW_80MHZ;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- rlm_req.rlm.bw = CMD_CBW_8080MHZ;
- break;
- case NL80211_CHAN_WIDTH_160:
- rlm_req.rlm.bw = CMD_CBW_160MHZ;
- break;
- case NL80211_CHAN_WIDTH_5:
- rlm_req.rlm.bw = CMD_CBW_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
- rlm_req.rlm.bw = CMD_CBW_10MHZ;
- break;
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- default:
- rlm_req.rlm.bw = CMD_CBW_20MHZ;
- break;
- }
-
- if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 1; /* SCA */
- else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 3; /* SCB */
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
- &rlm_req, sizeof(rlm_req), true);
-}
-
-static int
mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1775,7 +1134,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
} __packed beacon_tlv;
} req = {
.hdr = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
@@ -1814,44 +1173,42 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
}
static int
-mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable)
+mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ bool enable)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
- struct mt7615_vif *mvif = msta->vif;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
- int err;
-
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
+ return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid,
+ enable);
+}
- mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, sta_wtbl,
- wtbl_hdr);
+static int
+mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
- if (err < 0)
- return err;
+ return mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+ enable);
+}
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+static inline int
+mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
- mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+static int
+mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
+ enable, true);
}
static int
@@ -1866,43 +1223,38 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
struct sk_buff *skb;
int err;
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD_STA_REC_UPDATE, true);
if (err < 0 || !enable)
return err;
- skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, sta_wtbl,
- wtbl_hdr);
+ mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false,
+ sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD_STA_REC_UPDATE, true);
}
-static int
-mt7615_mcu_uni_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
- return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
- MCU_UNI_CMD_STA_REC_UPDATE);
-}
-
static const struct mt7615_mcu_ops uni_update_ops = {
.add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
.set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
@@ -1915,59 +1267,19 @@ static const struct mt7615_mcu_ops uni_update_ops = {
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
-static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
- u32 option)
-{
- struct {
- __le32 option;
- __le32 addr;
- } req = {
- .option = cpu_to_le32(option),
- .addr = cpu_to_le32(addr),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ, &req,
- sizeof(req), true);
-}
-
int mt7615_mcu_restart(struct mt76_dev *dev)
{
return mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, 0, true);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_restart);
-static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
-{
- struct {
- __le32 op;
- } req = {
- .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL, &req,
- sizeof(req), true);
-}
-
-static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
-{
- struct {
- u8 check_crc;
- u8 reserved[3];
- } req = {
- .check_crc = 0,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ, &req,
- sizeof(req), true);
-}
-
static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
const struct firmware *fw = NULL;
int len, ret, sem;
- sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
switch (sem) {
case PATCH_IS_DL:
return 0;
@@ -1995,7 +1307,8 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
len = fw->size - sizeof(*hdr);
- ret = mt7615_mcu_init_download(dev, addr, len, DL_MODE_NEED_RSP);
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -2008,14 +1321,14 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
goto out;
}
- ret = mt7615_mcu_start_patch(dev);
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
if (ret)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
release_firmware(fw);
- sem = mt7615_mcu_patch_sem_ctrl(dev, 0);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
break;
@@ -2056,7 +1369,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
- err = mt7615_mcu_init_download(dev, addr, len, mode);
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
if (err) {
dev_err(dev->mt76.dev, "Download request failed\n");
return err;
@@ -2075,15 +1389,6 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return 0;
}
-static const struct wiphy_wowlan_support mt7615_wowlan_support = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
- .n_patterns = 1,
- .pattern_min_len = 1,
- .pattern_max_len = MT7615_WOW_PATTEN_MAX_LEN,
- .max_nd_match_sets = 10,
-};
-
static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
{
const struct mt7615_fw_trailer *hdr;
@@ -2110,8 +1415,9 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
if (ret)
goto out;
- ret = mt7615_mcu_start_firmware(dev, le32_to_cpu(hdr->addr),
- FW_START_OVERRIDE);
+ ret = mt76_connac_mcu_start_firmware(&dev->mt76,
+ le32_to_cpu(hdr->addr),
+ FW_START_OVERRIDE);
if (ret) {
dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
goto out;
@@ -2161,7 +1467,8 @@ static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
if (ret)
goto out;
- ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
+ ret = mt76_connac_mcu_start_firmware(&dev->mt76, 0,
+ FW_START_WORKING_PDA_CR4);
if (ret) {
dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
goto out;
@@ -2298,7 +1605,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
- ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -2325,7 +1633,7 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
override_addr, flag);
- ret = mt7615_mcu_start_firmware(dev, override_addr, flag);
+ ret = mt76_connac_mcu_start_firmware(&dev->mt76, override_addr, flag);
if (ret) {
dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
goto out;
@@ -2410,7 +1718,7 @@ int __mt7663_load_firmware(struct mt7615_dev *dev)
#ifdef CONFIG_PM
if (mt7615_firmware_offload(dev))
- dev->mt76.hw->wiphy->wowlan = &mt7615_wowlan_support;
+ dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
#endif /* CONFIG_PM */
dev_dbg(dev->mt76.dev, "Firmware init done\n");
@@ -2522,42 +1830,6 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
}
-EXPORT_SYMBOL_GPL(mt7615_mcu_set_eeprom);
-
-int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
-{
- struct {
- u8 enable;
- u8 band;
- u8 rsv[2];
- } __packed req = {
- .enable = enable,
- .band = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, &req,
- sizeof(req), true);
-}
-
-int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val)
-{
- struct mt7615_dev *dev = phy->dev;
- struct {
- u8 prot_idx;
- u8 band;
- u8 rsv[2];
- __le32 len_thresh;
- __le32 pkt_thresh;
- } __packed req = {
- .prot_idx = 1,
- .band = phy != &dev->phy,
- .len_thresh = cpu_to_le32(val),
- .pkt_thresh = cpu_to_le32(0x2),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, &req,
- sizeof(req), true);
-}
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params)
@@ -2664,7 +1936,6 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, &req,
sizeof(req), true);
}
-EXPORT_SYMBOL_GPL(mt7615_mcu_del_wtbl_all);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
@@ -2880,7 +2151,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.control_chan = chandef->chan->hw_value,
.center_chan = ieee80211_frequency_to_channel(freq1),
.tx_streams = hweight8(phy->mt76->antenna_mask),
- .rx_streams_mask = phy->chainmask,
+ .rx_streams_mask = phy->mt76->chainmask,
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
@@ -2895,7 +2166,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
req.band_idx = phy != &dev->phy;
req.bw = mt7615_mcu_chan_bw(chandef);
- if (mt76_testmode_enabled(&dev->mt76))
+ if (mt76_testmode_enabled(phy->mt76))
memset(req.txpower_sku, 0x3f, 49);
else
mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
@@ -2956,296 +2227,6 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
sizeof(req), true);
}
-int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct {
- u8 bss_idx;
- u8 ps_state; /* 0: device awake
- * 1: static power save
- * 2: dynamic power saving
- */
- } req = {
- .bss_idx = mvif->idx,
- .ps_state = vif->bss_conf.ps ? 2 : 0,
- };
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return -ENOTSUPP;
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE, &req,
- sizeof(req), false);
-}
-
-int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct mt7615_dev *dev = phy->dev;
- struct mt7615_mcu_channel_domain {
- __le32 country_code; /* regulatory_request.alpha2 */
- u8 bw_2g; /* BW_20_40M 0
- * BW_20M 1
- * BW_20_40_80M 2
- * BW_20_40_80_160M 3
- * BW_20_40_80_8080M 4
- */
- u8 bw_5g;
- __le16 pad;
- u8 n_2ch;
- u8 n_5ch;
- __le16 pad2;
- } __packed hdr = {
- .bw_2g = 0,
- .bw_5g = 3,
- .n_2ch = mphy->sband_2g.sband.n_channels,
- .n_5ch = mphy->sband_5g.sband.n_channels,
- };
- struct mt7615_mcu_chan {
- __le16 hw_value;
- __le16 pad;
- __le32 flags;
- } __packed;
- int i, n_channels = hdr.n_2ch + hdr.n_5ch;
- int len = sizeof(hdr) + n_channels * sizeof(struct mt7615_mcu_chan);
- struct sk_buff *skb;
-
- if (!mt7615_firmware_offload(dev))
- return 0;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- for (i = 0; i < n_channels; i++) {
- struct ieee80211_channel *chan;
- struct mt7615_mcu_chan channel;
-
- if (i < hdr.n_2ch)
- chan = &mphy->sband_2g.sband.channels[i];
- else
- chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch];
-
- channel.hw_value = cpu_to_le16(chan->hw_value);
- channel.flags = cpu_to_le32(chan->flags);
- channel.pad = 0;
-
- skb_put_data(skb, &channel, sizeof(channel));
- }
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_SET_CHAN_DOMAIN,
- false);
-}
-
-#define MT7615_SCAN_CHANNEL_TIME 60
-int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *scan_req)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct cfg80211_scan_request *sreq = &scan_req->req;
- int n_ssids = 0, err, i, duration = MT7615_SCAN_CHANNEL_TIME;
- int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
- struct ieee80211_channel **scan_list = sreq->channels;
- struct mt7615_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- struct mt7615_mcu_scan_channel *chan;
- struct mt7615_hw_scan_req *req;
- struct sk_buff *skb;
-
- /* fall-back to sw-scan */
- if (!mt7615_firmware_offload(dev))
- return 1;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(*req));
- if (!skb)
- return -ENOMEM;
-
- set_bit(MT76_HW_SCANNING, &phy->mt76->state);
- mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
-
- req = (struct mt7615_hw_scan_req *)skb_put(skb, sizeof(*req));
-
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
- req->bss_idx = mvif->idx;
- req->scan_type = sreq->n_ssids ? 1 : 0;
- req->probe_req_num = sreq->n_ssids ? 2 : 0;
- req->version = 1;
-
- for (i = 0; i < sreq->n_ssids; i++) {
- if (!sreq->ssids[i].ssid_len)
- continue;
-
- req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
- memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
- sreq->ssids[i].ssid_len);
- n_ssids++;
- }
- req->ssid_type = n_ssids ? BIT(2) : BIT(0);
- req->ssid_type_ext = n_ssids ? BIT(0) : 0;
- req->ssids_num = n_ssids;
-
- /* increase channel time for passive scan */
- if (!sreq->n_ssids)
- duration *= 2;
- req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
- req->channel_min_dwell_time = cpu_to_le16(duration);
- req->channel_dwell_time = cpu_to_le16(duration);
-
- req->channels_num = min_t(u8, sreq->n_channels, 32);
- req->ext_channels_num = min_t(u8, ext_channels_num, 32);
- for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
- if (i >= 32)
- chan = &req->ext_channels[i - 32];
- else
- chan = &req->channels[i];
-
- chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
- chan->channel_num = scan_list[i]->hw_value;
- }
- req->channel_type = sreq->n_channels ? 4 : 0;
-
- if (sreq->ie_len > 0) {
- memcpy(req->ies, sreq->ie, sreq->ie_len);
- req->ies_len = cpu_to_le16(sreq->ie_len);
- }
-
- memcpy(req->bssid, sreq->bssid, ETH_ALEN);
- if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- get_random_mask_addr(req->random_mac, sreq->mac_addr,
- sreq->mac_addr_mask);
- req->scan_func = 1;
- }
-
- err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN,
- false);
- if (err < 0)
- clear_bit(MT76_HW_SCANNING, &phy->mt76->state);
-
- return err;
-}
-
-int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
- struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = phy->dev;
- struct {
- u8 seq_num;
- u8 is_ext_channel;
- u8 rsv[2];
- } __packed req = {
- .seq_num = mvif->scan_seq_num,
- };
-
- if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- ieee80211_scan_completed(phy->mt76->hw, &info);
- }
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req,
- sizeof(req), false);
-}
-
-int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *sreq)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct ieee80211_channel **scan_list = sreq->channels;
- struct mt7615_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- struct mt7615_mcu_scan_channel *chan;
- struct mt7615_sched_scan_req *req;
- struct cfg80211_match_set *match;
- struct cfg80211_ssid *ssid;
- struct sk_buff *skb;
- int i;
-
- if (!mt7615_firmware_offload(dev))
- return -ENOTSUPP;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(*req) + sreq->ie_len);
- if (!skb)
- return -ENOMEM;
-
- mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
-
- req = (struct mt7615_sched_scan_req *)skb_put(skb, sizeof(*req));
- req->version = 1;
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
-
- if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- get_random_mask_addr(req->random_mac, sreq->mac_addr,
- sreq->mac_addr_mask);
- req->scan_func = 1;
- }
-
- req->ssids_num = sreq->n_ssids;
- for (i = 0; i < req->ssids_num; i++) {
- ssid = &sreq->ssids[i];
- memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
- req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
- }
-
- req->match_num = sreq->n_match_sets;
- for (i = 0; i < req->match_num; i++) {
- match = &sreq->match_sets[i];
- memcpy(req->match[i].ssid, match->ssid.ssid,
- match->ssid.ssid_len);
- req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
- req->match[i].ssid_len = match->ssid.ssid_len;
- }
-
- req->channel_type = sreq->n_channels ? 4 : 0;
- req->channels_num = min_t(u8, sreq->n_channels, 64);
- for (i = 0; i < req->channels_num; i++) {
- chan = &req->channels[i];
- chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
- chan->channel_num = scan_list[i]->hw_value;
- }
-
- req->intervals_num = sreq->n_scan_plans;
- for (i = 0; i < req->intervals_num; i++)
- req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
-
- if (sreq->ie_len > 0) {
- req->ie_len = cpu_to_le16(sreq->ie_len);
- memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
- }
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_SCHED_SCAN_REQ,
- false);
-}
-
-int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
- struct ieee80211_vif *vif,
- bool enable)
-{
- struct mt7615_dev *dev = phy->dev;
- struct {
- u8 active; /* 0: enabled 1: disabled */
- u8 rsv[3];
- } __packed req = {
- .active = !enable,
- };
-
- if (!mt7615_firmware_offload(dev))
- return -ENOTSUPP;
-
- if (enable)
- set_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
- else
- clear_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE, &req,
- sizeof(req), false);
-}
-
static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
{
int i;
@@ -3531,7 +2512,7 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
u8 bmc_triggered_ac;
u8 pad;
} req = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
.aid = cpu_to_le16(vif->bss_conf.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
@@ -3540,7 +2521,7 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
u8 bss_idx;
u8 pad[3];
} req_hdr = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
};
int err;
@@ -3556,307 +2537,13 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
sizeof(req), false);
}
-#ifdef CONFIG_PM
-int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
-{
- struct {
- struct {
- u8 hif_type; /* 0x0: HIF_SDIO
- * 0x1: HIF_USB
- * 0x2: HIF_PCIE
- */
- u8 pad[3];
- } __packed hdr;
- struct hif_suspend_tlv {
- __le16 tag;
- __le16 len;
- u8 suspend;
- } __packed hif_suspend;
- } req = {
- .hif_suspend = {
- .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
- .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
- .suspend = suspend,
- },
- };
-
- if (mt76_is_mmio(&dev->mt76))
- req.hdr.hif_type = 2;
- else if (mt76_is_usb(&dev->mt76))
- req.hdr.hif_type = 1;
- else if (mt76_is_sdio(&dev->mt76))
- req.hdr.hif_type = 0;
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, &req,
- sizeof(req), true);
-}
-EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
-
-static int
-mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif,
- bool suspend, struct cfg80211_wowlan *wowlan)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = phy->dev;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_wow_ctrl_tlv wow_ctrl_tlv;
- struct mt7615_wow_gpio_param_tlv gpio_tlv;
- } req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .wow_ctrl_tlv = {
- .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
- .len = cpu_to_le16(sizeof(struct mt7615_wow_ctrl_tlv)),
- .cmd = suspend ? 1 : 2,
- },
- .gpio_tlv = {
- .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM),
- .len = cpu_to_le16(sizeof(struct mt7615_wow_gpio_param_tlv)),
- .gpio_pin = 0xff, /* follow fw about GPIO pin */
- },
- };
-
- if (wowlan->magic_pkt)
- req.wow_ctrl_tlv.trigger |= BIT(0);
- if (wowlan->disconnect)
- req.wow_ctrl_tlv.trigger |= BIT(2);
- if (wowlan->nd_config) {
- mt7615_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
- req.wow_ctrl_tlv.trigger |= BIT(5);
- mt7615_mcu_sched_scan_enable(phy, vif, suspend);
- }
-
- if (mt76_is_mmio(&dev->mt76))
- req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
- else if (mt76_is_usb(&dev->mt76))
- req.wow_ctrl_tlv.wakeup_hif = WOW_USB;
- else if (mt76_is_sdio(&dev->mt76))
- req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO;
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, &req,
- sizeof(req), true);
-}
-
-static int
-mt7615_mcu_set_wow_pattern(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
- u8 index, bool enable,
- struct cfg80211_pkt_pattern *pattern)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_wow_pattern_tlv *ptlv;
- struct sk_buff *skb;
- struct req_hdr {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr = {
- .bss_idx = mvif->idx,
- };
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(hdr) + sizeof(*ptlv));
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, &hdr, sizeof(hdr));
- ptlv = (struct mt7615_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
- ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
- ptlv->len = cpu_to_le16(sizeof(*ptlv));
- ptlv->data_len = pattern->pattern_len;
- ptlv->enable = enable;
- ptlv->index = index;
-
- memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
- memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_SUSPEND,
- true);
-}
-
-static int
-mt7615_mcu_set_suspend_mode(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
- bool enable, u8 mdtim, bool wow_suspend)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_suspend_tlv suspend_tlv;
- } req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .suspend_tlv = {
- .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
- .len = cpu_to_le16(sizeof(struct mt7615_suspend_tlv)),
- .enable = enable,
- .mdtim = mdtim,
- .wow_suspend = wow_suspend,
- },
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, &req,
- sizeof(req), true);
-}
-
-static int
-mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
- bool suspend)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_gtk_rekey_tlv gtk_tlv;
- } __packed req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .gtk_tlv = {
- .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
- .len = cpu_to_le16(sizeof(struct mt7615_gtk_rekey_tlv)),
- .rekey_mode = !suspend,
- },
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, &req,
- sizeof(req), true);
-}
-
-static int
-mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool suspend)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt7615_arpns_tlv arpns;
- } req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .arpns = {
- .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
- .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
- .mode = suspend,
- },
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, &req,
- sizeof(req), true);
-}
-
-void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mt7615_phy *phy = priv;
- bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
- struct ieee80211_hw *hw = phy->mt76->hw;
- struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
- int i;
-
- mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend);
- mt7615_mcu_set_arp_filter(phy->dev, vif, suspend);
-
- mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
-
- for (i = 0; i < wowlan->n_patterns; i++)
- mt7615_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
- &wowlan->patterns[i]);
- mt7615_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
-}
-
-static void
-mt7615_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
- void *data)
-{
- struct mt7615_gtk_rekey_tlv *gtk_tlv = data;
- u32 cipher;
-
- if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
- key->cipher != WLAN_CIPHER_SUITE_CCMP &&
- key->cipher != WLAN_CIPHER_SUITE_TKIP)
- return;
-
- if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
- gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
- cipher = BIT(3);
- } else {
- gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
- cipher = BIT(4);
- }
-
- /* we are assuming here to have a single pairwise key */
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
- gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
- gtk_tlv->group_cipher = cpu_to_le32(cipher);
- gtk_tlv->keyid = key->keyidx;
- }
-}
-
-int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *key)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
- struct mt7615_gtk_rekey_tlv *gtk_tlv;
- struct sk_buff *skb;
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr = {
- .bss_idx = mvif->idx,
- };
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(hdr) + sizeof(*gtk_tlv));
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, &hdr, sizeof(hdr));
- gtk_tlv = (struct mt7615_gtk_rekey_tlv *)skb_put(skb,
- sizeof(*gtk_tlv));
- gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
- gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
- gtk_tlv->rekey_mode = 2;
- gtk_tlv->option = 1;
-
- rcu_read_lock();
- ieee80211_iter_keys_rcu(hw, vif, mt7615_mcu_key_iter, gtk_tlv);
- rcu_read_unlock();
-
- memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
- memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
- memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
- true);
-}
-#endif /* CONFIG_PM */
-
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = phy->dev;
struct mt7615_roc_tlv req = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
.active = !chan,
.max_interval = cpu_to_le32(duration),
.primary_chan = chan ? chan->hw_value : 0,
@@ -3884,14 +2571,14 @@ int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
u8 bss_idx;
u8 pad[3];
} __packed hdr;
- struct mt7615_arpns_tlv arp;
+ struct mt76_connac_arpns_tlv arp;
} req_hdr = {
.hdr = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
- .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
.ips_num = len,
.mode = 2, /* update */
.option = 1,
@@ -3929,7 +2616,7 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
u8 rsv[3];
} __packed req = {
.ct_win = cpu_to_le32(ct_window),
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
};
if (!mt7615_firmware_offload(dev))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 6ef5670211d1..3874f45da9eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -4,6 +4,8 @@
#ifndef __MT7615_MCU_H
#define __MT7615_MCU_H
+#include "../mt76_connac_mcu.h"
+
struct mt7615_mcu_txd {
__le32 txd[8];
@@ -90,6 +92,7 @@ enum {
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_EXT = 0xed,
MCU_EVENT_RESTART_DL = 0xef,
+ MCU_EVENT_COREDUMP = 0xf0,
};
/* ext event table */
@@ -236,64 +239,6 @@ enum {
MCU_S2D_H2CN
};
-#define MCU_FW_PREFIX BIT(31)
-#define MCU_UNI_PREFIX BIT(30)
-#define MCU_CE_PREFIX BIT(29)
-#define MCU_QUERY_PREFIX BIT(28)
-#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
- MCU_CE_PREFIX | MCU_QUERY_PREFIX)
-
-#define MCU_QUERY_MASK BIT(16)
-
-enum {
- MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
- MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
- MCU_CMD_INIT_ACCESS_REG = 0x3,
- MCU_CMD_PATCH_START_REQ = 0x05,
- MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
- MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
- MCU_CMD_EXT_CID = 0xED,
- MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xEE,
- MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xEF,
-};
-
-enum {
- MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
- MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
- MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
- MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
- MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
- MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
- MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
- MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
- MCU_EXT_CMD_EDCA_UPDATE = 0x27,
- MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
- MCU_EXT_CMD_GET_TEMP = 0x2c,
- MCU_EXT_CMD_WTBL_UPDATE = 0x32,
- MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
- MCU_EXT_CMD_ATE_CTRL = 0x3d,
- MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
- MCU_EXT_CMD_DBDC_CTRL = 0x45,
- MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
- MCU_EXT_CMD_MUAR_UPDATE = 0x48,
- MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
- MCU_EXT_CMD_SET_RX_PATH = 0x4e,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
- MCU_EXT_CMD_RXDCOC_CAL = 0x59,
- MCU_EXT_CMD_TXDPD_CAL = 0x60,
- MCU_EXT_CMD_SET_RDD_TH = 0x7c,
- MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
-};
-
-enum {
- MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
- MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
- MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
- MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
- MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
- MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
-};
-
enum {
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
@@ -305,269 +250,11 @@ struct mt7615_mcu_uni_event {
__le32 status; /* 0: success, others: fail */
} __packed;
-struct mt7615_beacon_loss_event {
- u8 bss_idx;
- u8 reason;
- u8 pad[2];
-} __packed;
-
-struct mt7615_mcu_scan_ssid {
- __le32 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
-} __packed;
-
-struct mt7615_mcu_scan_channel {
- u8 band; /* 1: 2.4GHz
- * 2: 5.0GHz
- * Others: Reserved
- */
- u8 channel_num;
-} __packed;
-
-struct mt7615_mcu_scan_match {
- __le32 rssi_th;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
- u8 rsv[3];
-} __packed;
-
-struct mt7615_hw_scan_req {
- u8 seq_num;
- u8 bss_idx;
- u8 scan_type; /* 0: PASSIVE SCAN
- * 1: ACTIVE SCAN
- */
- u8 ssid_type; /* BIT(0) wildcard SSID
- * BIT(1) P2P wildcard SSID
- * BIT(2) specified SSID + wildcard SSID
- * BIT(2) + ssid_type_ext BIT(0) specified SSID only
- */
- u8 ssids_num;
- u8 probe_req_num; /* Number of probe request for each SSID */
- u8 scan_func; /* BIT(0) Enable random MAC scan
- * BIT(1) Disable DBDC scan type 1~3.
- * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan).
- */
- u8 version; /* 0: Not support fields after ies.
- * 1: Support fields after ies.
- */
- struct mt7615_mcu_scan_ssid ssids[4];
- __le16 probe_delay_time;
- __le16 channel_dwell_time; /* channel Dwell interval */
- __le16 timeout_value;
- u8 channel_type; /* 0: Full channels
- * 1: Only 2.4GHz channels
- * 2: Only 5GHz channels
- * 3: P2P social channel only (channel #1, #6 and #11)
- * 4: Specified channels
- * Others: Reserved
- */
- u8 channels_num; /* valid when channel_type is 4 */
- /* valid when channels_num is set */
- struct mt7615_mcu_scan_channel channels[32];
- __le16 ies_len;
- u8 ies[MT7615_SCAN_IE_LEN];
- /* following fields are valid if version > 0 */
- u8 ext_channels_num;
- u8 ext_ssids_num;
- __le16 channel_min_dwell_time;
- struct mt7615_mcu_scan_channel ext_channels[32];
- struct mt7615_mcu_scan_ssid ext_ssids[6];
- u8 bssid[ETH_ALEN];
- u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */
- u8 pad[63];
- u8 ssid_type_ext;
-} __packed;
-
-#define SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64
-struct mt7615_hw_scan_done {
- u8 seq_num;
- u8 sparse_channel_num;
- struct mt7615_mcu_scan_channel sparse_channel;
- u8 complete_channel_num;
- u8 current_state;
- u8 version;
- u8 pad;
- __le32 beacon_scan_num;
- u8 pno_enabled;
- u8 pad2[3];
- u8 sparse_channel_valid_num;
- u8 pad3[3];
- u8 channel_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
- /* idle format for channel_idle_time
- * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms)
- * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms)
- * 2: dwell time (16us)
- */
- __le16 channel_idle_time[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
- /* beacon and probe response count */
- u8 beacon_probe_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
- u8 mdrdy_count[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
- __le32 beacon_2g_num;
- __le32 beacon_5g_num;
-} __packed;
-
-struct mt7615_sched_scan_req {
- u8 version;
- u8 seq_num;
- u8 stop_on_match;
- u8 ssids_num;
- u8 match_num;
- u8 pad;
- __le16 ie_len;
- struct mt7615_mcu_scan_ssid ssids[MT7615_MAX_SCHED_SCAN_SSID];
- struct mt7615_mcu_scan_match match[MT7615_MAX_SCAN_MATCH];
- u8 channel_type;
- u8 channels_num;
- u8 intervals_num;
- u8 scan_func; /* BIT(0) eable random mac address */
- struct mt7615_mcu_scan_channel channels[64];
- __le16 intervals[MT7615_MAX_SCHED_SCAN_INTERVAL];
- u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
- u8 pad2[58];
-} __packed;
-
-struct nt7615_sched_scan_done {
- u8 seq_num;
- u8 status; /* 0: ssid found */
- __le16 pad;
-} __packed;
-
struct mt7615_mcu_reg_event {
__le32 reg;
__le32 val;
} __packed;
-struct mt7615_mcu_bss_event {
- u8 bss_idx;
- u8 is_absent;
- u8 free_quota;
- u8 pad;
-} __packed;
-
-struct mt7615_bss_basic_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 omac_idx;
- u8 hw_bss_idx;
- u8 band_idx;
- __le32 conn_type;
- u8 conn_state;
- u8 wmm_idx;
- u8 bssid[ETH_ALEN];
- __le16 bmc_tx_wlan_idx;
- __le16 bcn_interval;
- u8 dtim_period;
- u8 phymode; /* bit(0): A
- * bit(1): B
- * bit(2): G
- * bit(3): GN
- * bit(4): AN
- * bit(5): AC
- */
- __le16 sta_idx;
- u8 nonht_basic_phy;
- u8 pad[3];
-} __packed;
-
-struct mt7615_bss_qos_tlv {
- __le16 tag;
- __le16 len;
- u8 qos;
- u8 pad[3];
-} __packed;
-
-enum {
- WOW_USB = 1,
- WOW_PCIE = 2,
- WOW_GPIO = 3,
-};
-
-struct mt7615_wow_ctrl_tlv {
- __le16 tag;
- __le16 len;
- u8 cmd; /* 0x1: PM_WOWLAN_REQ_START
- * 0x2: PM_WOWLAN_REQ_STOP
- * 0x3: PM_WOWLAN_PARAM_CLEAR
- */
- u8 trigger; /* 0: NONE
- * BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT
- * BIT(1): NL80211_WOWLAN_TRIG_ANY
- * BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT
- * BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE
- * BIT(4): BEACON_LOST
- * BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT
- */
- u8 wakeup_hif; /* 0x0: HIF_SDIO
- * 0x1: HIF_USB
- * 0x2: HIF_PCIE
- * 0x3: HIF_GPIO
- */
- u8 pad;
- u8 rsv[4];
-} __packed;
-
-struct mt7615_wow_gpio_param_tlv {
- __le16 tag;
- __le16 len;
- u8 gpio_pin;
- u8 trigger_lvl;
- u8 pad[2];
- __le32 gpio_interval;
- u8 rsv[4];
-} __packed;
-
-#define MT7615_WOW_MASK_MAX_LEN 16
-#define MT7615_WOW_PATTEN_MAX_LEN 128
-struct mt7615_wow_pattern_tlv {
- __le16 tag;
- __le16 len;
- u8 index; /* pattern index */
- u8 enable; /* 0: disable
- * 1: enable
- */
- u8 data_len; /* pattern length */
- u8 pad;
- u8 mask[MT7615_WOW_MASK_MAX_LEN];
- u8 pattern[MT7615_WOW_PATTEN_MAX_LEN];
- u8 rsv[4];
-} __packed;
-
-struct mt7615_suspend_tlv {
- __le16 tag;
- __le16 len;
- u8 enable; /* 0: suspend mode disabled
- * 1: suspend mode enabled
- */
- u8 mdtim; /* LP parameter */
- u8 wow_suspend; /* 0: update by origin policy
- * 1: update by wow dtim
- */
- u8 pad[5];
-} __packed;
-
-struct mt7615_gtk_rekey_tlv {
- __le16 tag;
- __le16 len;
- u8 kek[NL80211_KEK_LEN];
- u8 kck[NL80211_KCK_LEN];
- u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
- u8 rekey_mode; /* 0: rekey offload enable
- * 1: rekey offload disable
- * 2: rekey update
- */
- u8 keyid;
- u8 pad[2];
- __le32 proto; /* WPA-RSN-WAPI-OPSN */
- __le32 pairwise_cipher;
- __le32 group_cipher;
- __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
- __le32 mgmt_group_cipher;
- u8 option; /* 1: rekey data update without enabling offload */
- u8 reserverd[3];
-} __packed;
-
struct mt7615_roc_tlv {
u8 bss_idx;
u8 token;
@@ -585,65 +272,6 @@ struct mt7615_roc_tlv {
u8 rsv1[8];
} __packed;
-struct mt7615_arpns_tlv {
- __le16 tag;
- __le16 len;
- u8 mode;
- u8 ips_num;
- u8 option;
- u8 pad[1];
-} __packed;
-
-/* offload mcu commands */
-enum {
- MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
- MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
- MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
- MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
- MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
- MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
- MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1c,
- MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
- MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
- MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
- MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
- MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
-};
-
-#define MCU_CMD_ACK BIT(0)
-#define MCU_CMD_UNI BIT(1)
-#define MCU_CMD_QUERY BIT(2)
-
-#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | MCU_CMD_QUERY)
-
-enum {
- UNI_BSS_INFO_BASIC = 0,
- UNI_BSS_INFO_RLM = 2,
- UNI_BSS_INFO_BCN_CONTENT = 7,
- UNI_BSS_INFO_QBSS = 15,
- UNI_BSS_INFO_UAPSD = 19,
-};
-
-enum {
- UNI_SUSPEND_MODE_SETTING,
- UNI_SUSPEND_WOW_CTRL,
- UNI_SUSPEND_WOW_GPIO_PARAM,
- UNI_SUSPEND_WOW_WAKEUP_PORT,
- UNI_SUSPEND_WOW_PATTERN,
-};
-
-enum {
- UNI_OFFLOAD_OFFLOAD_ARP,
- UNI_OFFLOAD_OFFLOAD_ND,
- UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
- UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
-};
-
-enum {
- PATCH_SEM_RELEASE = 0x0,
- PATCH_SEM_GET = 0x1
-};
-
enum {
PATCH_NOT_DL_SEM_FAIL = 0x0,
PATCH_IS_DL = 0x1,
@@ -664,34 +292,6 @@ enum {
FW_STATE_N9_RDY = 2,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
-#define CONN_STATE_DISCONNECT 0
-#define CONN_STATE_CONNECT 1
-#define CONN_STATE_PORT_SECURE 2
-
-enum {
- DEV_INFO_ACTIVE,
- DEV_INFO_MAX_NUM
-};
-
enum {
DBDC_TYPE_WMM,
DBDC_TYPE_MGMT,
@@ -704,11 +304,6 @@ enum {
__DBDC_TYPE_MAX,
};
-struct tlv {
- __le16 tag;
- __le16 len;
-} __packed;
-
struct bss_info_omac {
__le16 tag;
__le16 len;
@@ -767,157 +362,6 @@ enum {
BSS_INFO_MAX_NUM
};
-enum {
- WTBL_RESET_AND_SET = 1,
- WTBL_SET,
- WTBL_QUERY,
- WTBL_RESET_ALL
-};
-
-struct wtbl_req_hdr {
- u8 wlan_idx;
- u8 operation;
- __le16 tlv_num;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_generic {
- __le16 tag;
- __le16 len;
- u8 peer_addr[ETH_ALEN];
- u8 muar_idx;
- u8 skip_tx;
- u8 cf_ack;
- u8 qos;
- u8 mesh;
- u8 adm;
- __le16 partial_aid;
- u8 baf_en;
- u8 aad_om;
-} __packed;
-
-struct wtbl_rx {
- __le16 tag;
- __le16 len;
- u8 rcid;
- u8 rca1;
- u8 rca2;
- u8 rv;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_ht {
- __le16 tag;
- __le16 len;
- u8 ht;
- u8 ldpc;
- u8 af;
- u8 mm;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_vht {
- __le16 tag;
- __le16 len;
- u8 ldpc;
- u8 dyn_bw;
- u8 vht;
- u8 txop_ps;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_tx_ps {
- __le16 tag;
- __le16 len;
- u8 txps;
- u8 rsv[3];
-} __packed;
-
-struct wtbl_hdr_trans {
- __le16 tag;
- __le16 len;
- u8 to_ds;
- u8 from_ds;
- u8 disable_rx_trans;
- u8 rsv;
-} __packed;
-
-enum {
- MT_BA_TYPE_INVALID,
- MT_BA_TYPE_ORIGINATOR,
- MT_BA_TYPE_RECIPIENT
-};
-
-enum {
- RST_BA_MAC_TID_MATCH,
- RST_BA_MAC_MATCH,
- RST_BA_NO_MATCH
-};
-
-struct wtbl_ba {
- __le16 tag;
- __le16 len;
- /* common */
- u8 tid;
- u8 ba_type;
- u8 rsv0[2];
- /* originator only */
- __le16 sn;
- u8 ba_en;
- u8 ba_winsize_idx;
- __le16 ba_winsize;
- /* recipient only */
- u8 peer_addr[ETH_ALEN];
- u8 rst_ba_tid;
- u8 rst_ba_sel;
- u8 rst_ba_sb;
- u8 band_idx;
- u8 rsv1[4];
-} __packed;
-
-struct wtbl_bf {
- __le16 tag;
- __le16 len;
- u8 ibf;
- u8 ebf;
- u8 ibf_vht;
- u8 ebf_vht;
- u8 gid;
- u8 pfmu_idx;
- u8 rsv[2];
-} __packed;
-
-struct wtbl_smps {
- __le16 tag;
- __le16 len;
- u8 smps;
- u8 rsv[3];
-} __packed;
-
-struct wtbl_pn {
- __le16 tag;
- __le16 len;
- u8 pn[6];
- u8 rsv[2];
-} __packed;
-
-struct wtbl_spe {
- __le16 tag;
- __le16 len;
- u8 spe_idx;
- u8 rsv[3];
-} __packed;
-
-struct wtbl_raw {
- __le16 tag;
- __le16 len;
- u8 wtbl_idx;
- u8 dw;
- u8 rsv[2];
- __le32 msk;
- __le32 val;
-} __packed;
-
#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
sizeof(struct wtbl_generic) + \
sizeof(struct wtbl_rx) + \
@@ -943,127 +387,6 @@ struct wtbl_raw {
sizeof(struct wtbl_ba))
enum {
- WTBL_GENERIC,
- WTBL_RX,
- WTBL_HT,
- WTBL_VHT,
- WTBL_PEER_PS, /* not used */
- WTBL_TX_PS,
- WTBL_HDR_TRANS,
- WTBL_SEC_KEY,
- WTBL_BA,
- WTBL_RDG, /* obsoleted */
- WTBL_PROTECT, /* not used */
- WTBL_CLEAR, /* not used */
- WTBL_BF,
- WTBL_SMPS,
- WTBL_RAW_DATA, /* debug only */
- WTBL_PN,
- WTBL_SPE,
- WTBL_MAX_NUM
-};
-
-struct sta_ntlv_hdr {
- u8 rsv[2];
- __le16 tlv_num;
-} __packed;
-
-struct sta_req_hdr {
- u8 bss_idx;
- u8 wlan_idx;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 muar_idx;
- u8 rsv[2];
-} __packed;
-
-struct sta_rec_state {
- __le16 tag;
- __le16 len;
- u8 state;
- __le32 flags;
- u8 vhtop;
- u8 pad[2];
-} __packed;
-
-struct sta_rec_basic {
- __le16 tag;
- __le16 len;
- __le32 conn_type;
- u8 conn_state;
- u8 qos;
- __le16 aid;
- u8 peer_addr[ETH_ALEN];
-#define EXTRA_INFO_VER BIT(0)
-#define EXTRA_INFO_NEW BIT(1)
- __le16 extra_info;
-} __packed;
-
-struct sta_rec_ht {
- __le16 tag;
- __le16 len;
- __le16 ht_cap;
- u16 rsv;
-} __packed;
-
-struct sta_rec_vht {
- __le16 tag;
- __le16 len;
- __le32 vht_cap;
- __le16 vht_rx_mcs_map;
- __le16 vht_tx_mcs_map;
-} __packed;
-
-struct sta_rec_ba {
- __le16 tag;
- __le16 len;
- u8 tid;
- u8 ba_type;
- u8 amsdu;
- u8 ba_en;
- __le16 ssn;
- __le16 winsize;
-} __packed;
-
-struct sta_rec_uapsd {
- __le16 tag;
- __le16 len;
- u8 dac_map;
- u8 tac_map;
- u8 max_sp;
- u8 rsv0;
- __le16 listen_interval;
- u8 rsv1[2];
-} __packed;
-
-enum {
- STA_REC_BASIC,
- STA_REC_RA,
- STA_REC_RA_CMM_INFO,
- STA_REC_RA_UPDATE,
- STA_REC_BF,
- STA_REC_AMSDU, /* for CR4 */
- STA_REC_BA,
- STA_REC_STATE,
- STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
- STA_REC_HT,
- STA_REC_VHT,
- STA_REC_APPS,
- STA_REC_WTBL = 13,
- STA_REC_MAX_NUM
-};
-
-enum {
- CMD_CBW_20MHZ,
- CMD_CBW_40MHZ,
- CMD_CBW_80MHZ,
- CMD_CBW_160MHZ,
- CMD_CBW_10MHZ,
- CMD_CBW_5MHZ,
- CMD_CBW_8080MHZ
-};
-
-enum {
CH_SWITCH_NORMAL = 0,
CH_SWITCH_SCAN = 3,
CH_SWITCH_MCC = 4,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 99b8abdbb08f..491841bc6291 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include <linux/regmap.h>
-#include "../mt76.h"
+#include "../mt76_connac_mcu.h"
#include "regs.h"
#define MT7615_MAX_INTERFACES 16
@@ -65,11 +65,6 @@
#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
-#define MT7615_SCAN_IE_LEN 600
-#define MT7615_MAX_SCHED_SCAN_INTERVAL 10
-#define MT7615_MAX_SCHED_SCAN_SSID 10
-#define MT7615_MAX_SCAN_MATCH 16
-
struct mt7615_vif;
struct mt7615_sta;
struct mt7615_dfs_pulse;
@@ -133,12 +128,7 @@ struct mt7615_sta {
};
struct mt7615_vif {
- u8 idx;
- u8 omac_idx;
- u8 band_idx;
- u8 wmm_idx;
- u8 scan_seq_num;
-
+ struct mt76_vif mt76; /* must be first */
struct mt7615_sta sta;
};
@@ -171,8 +161,6 @@ struct mt7615_phy {
s8 ofdm_sensitivity;
s8 cck_sensitivity;
- u16 chainmask;
-
s16 coverage_class;
u8 slottime;
@@ -185,9 +173,6 @@ struct mt7615_phy {
struct mib_stats mib;
- struct delayed_work mac_work;
- u8 mac_work_count;
-
struct sk_buff_head scan_event_list;
struct delayed_work scan_work;
@@ -195,13 +180,24 @@ struct mt7615_phy {
struct timer_list roc_timer;
wait_queue_head_t roc_wait;
bool roc_grant;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 *reg_backup;
+
+ s16 last_freq_offset;
+ u8 last_rcpi[4];
+ s8 last_ib_rssi[4];
+ s8 last_wb_rssi[4];
+ } test;
+#endif
};
#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
-#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
-#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
-#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
+#define mt7615_mcu_sta_add(phy, ...) ((phy)->dev)->mcu_ops->sta_add((phy), __VA_ARGS__)
+#define mt7615_mcu_add_dev_info(phy, ...) ((phy)->dev)->mcu_ops->add_dev_info((phy), __VA_ARGS__)
+#define mt7615_mcu_add_bss_info(phy, ...) ((phy)->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
@@ -213,11 +209,10 @@ struct mt7615_mcu_ops {
int (*add_rx_ba)(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable);
- int (*sta_add)(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
+ int (*sta_add)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
- int (*add_dev_info)(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable);
+ int (*add_dev_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ bool enable);
int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
int (*add_beacon_offload)(struct mt7615_dev *dev,
@@ -281,33 +276,8 @@ struct mt7615_dev {
u32 muar_mask;
-#ifdef CONFIG_NL80211_TESTMODE
- struct {
- u32 *reg_backup;
-
- s16 last_freq_offset;
- u8 last_rcpi[4];
- s8 last_ib_rssi[4];
- s8 last_wb_rssi[4];
- } test;
-#endif
-
- struct {
- bool enable;
-
- spinlock_t txq_lock;
- struct {
- struct mt7615_sta *msta;
- struct sk_buff *skb;
- } tx_q[IEEE80211_NUM_ACS];
-
- struct work_struct wake_work;
- struct completion wake_cmpl;
-
- struct delayed_work ps_work;
- unsigned long last_activity;
- unsigned long idle_timeout;
- } pm;
+ struct mt76_connac_pm pm;
+ struct mt76_connac_coredump coredump;
};
enum tx_pkt_queue_idx {
@@ -326,20 +296,6 @@ enum tx_pkt_queue_idx {
};
enum {
- HW_BSSID_0 = 0x0,
- HW_BSSID_1,
- HW_BSSID_2,
- HW_BSSID_3,
- HW_BSSID_MAX = HW_BSSID_3,
- EXT_BSSID_START = 0x10,
- EXT_BSSID_1,
- EXT_BSSID_15 = 0x1f,
- EXT_BSSID_MAX = EXT_BSSID_15,
- REPEATER_BSSID_START = 0x20,
- REPEATER_BSSID_MAX = 0x3f,
-};
-
-enum {
MT_RX_SEL0,
MT_RX_SEL1,
};
@@ -407,7 +363,6 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
-void mt7615_check_offload_capability(struct mt7615_dev *dev);
void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
@@ -428,8 +383,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
void mt7615_pm_wake_work(struct work_struct *work);
-int mt7615_pm_wake(struct mt7615_dev *dev);
-void mt7615_pm_power_save_sched(struct mt7615_dev *dev);
void mt7615_pm_power_save_work(struct work_struct *work);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
@@ -485,19 +438,10 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
return MT7615_WTBL_SIZE;
}
-static inline void mt7615_mutex_acquire(struct mt7615_dev *dev)
- __acquires(&dev->mt76.mutex)
-{
- mutex_lock(&dev->mt76.mutex);
- mt7615_pm_wake(dev);
-}
-
-static inline void mt7615_mutex_release(struct mt7615_dev *dev)
- __releases(&dev->mt76.mutex)
-{
- mt7615_pm_power_save_sched(dev);
- mutex_unlock(&dev->mt76.mutex);
-}
+#define mt7615_mutex_acquire(dev) \
+ mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm)
+#define mt7615_mutex_release(dev) \
+ mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
{
@@ -525,9 +469,8 @@ void mt7615_roc_work(struct work_struct *work);
void mt7615_roc_timer(struct timer_list *timer);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
-void mt7615_phy_init(struct mt7615_dev *dev);
-void mt7615_mac_init(struct mt7615_dev *dev);
int mt7615_set_channel(struct mt7615_phy *phy);
+void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_dev *mdev);
@@ -558,24 +501,11 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
-int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq);
-int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy);
-int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *scan_req);
-int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
- struct ieee80211_vif *vif);
-int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *sreq);
-int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
- struct ieee80211_vif *vif,
- bool enable);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -583,7 +513,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-
+void mt7615_tx_token_put(struct mt7615_dev *dev);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -604,7 +534,6 @@ int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
-int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif);
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
@@ -620,18 +549,13 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
bool enable);
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
-int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
-void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif);
-int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *key);
int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info);
int __mt7663_load_firmware(struct mt7615_dev *dev);
u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+void mt7615_coredump_work(struct work_struct *work);
/* usb */
int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index dbd29d897b29..71487f532f36 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include "mt7615.h"
+#include "mcu.h"
static const struct pci_device_id mt7615_pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7615) },
@@ -75,14 +76,14 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
bool hif_suspend;
int i, err;
- err = mt7615_pm_wake(dev);
+ err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (err < 0)
return err;
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev);
if (hif_suspend) {
- err = mt7615_mcu_set_hif_suspend(dev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
return err;
}
@@ -130,7 +131,7 @@ restore:
}
napi_enable(&mdev->tx_napi);
if (hif_suspend)
- mt7615_mcu_set_hif_suspend(dev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false);
return err;
}
@@ -172,7 +173,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt7615_mcu_set_hif_suspend(dev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 27fcb1374685..72395925ddee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -12,7 +12,7 @@
#include "mac.h"
#include "eeprom.h"
-static void mt7615_init_work(struct work_struct *work)
+static void mt7615_pci_init_work(struct work_struct *work)
{
struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
mcu_work);
@@ -27,12 +27,7 @@ static void mt7615_init_work(struct work_struct *work)
if (ret)
return;
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
- mt7615_check_offload_capability(dev);
-
+ mt7615_init_work(dev);
if (dev->dbdc_support)
mt7615_register_ext_phy(dev);
}
@@ -44,7 +39,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
- INIT_WORK(&dev->mcu_work, mt7615_init_work);
+ INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@@ -160,9 +155,7 @@ int mt7615_register_device(struct mt7615_dev *dev)
void mt7615_unregister_device(struct mt7615_dev *dev)
{
- struct mt76_txwi_cache *txwi;
bool mcu_running;
- int id;
mcu_running = mt7615_wait_for_mcu_init(dev);
@@ -172,15 +165,7 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
- mt7615_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb)
- dev_kfree_skb_any(txwi->skb);
- mt76_put_txwi(&dev->mt76, txwi);
- }
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
+ mt7615_tx_token_put(dev);
tasklet_disable(&dev->irq_tasklet);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 4cf7c5d34325..1b4cb145f38e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -118,7 +118,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
txp->bss_idx = mvif->idx;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 347975eaba86..305bb8597531 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -17,6 +17,7 @@
#include "mt7615.h"
#include "sdio.h"
#include "mac.h"
+#include "mcu.h"
static const struct sdio_device_id mt7663s_table[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) },
@@ -227,11 +228,7 @@ static void mt7663s_init_work(struct work_struct *work)
if (mt7663s_mcu_init(dev))
return;
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
- mt7615_check_offload_capability(dev);
+ mt7615_init_work(dev);
}
static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
@@ -417,7 +414,7 @@ static int mt7663s_suspend(struct device *dev)
mt7615_firmware_offload(mdev)) {
int err;
- err = mt7615_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true);
if (err < 0)
return err;
}
@@ -456,7 +453,7 @@ static int mt7663s_resume(struct device *dev)
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev))
- err = mt7615_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 13d77f8fca86..9fb506f2ace6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
- int len = 0, err, i, order;
+ int len = 0, err, i;
struct page *page;
u8 *buf;
@@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- order = get_order(len);
- page = __dev_alloc_pages(GFP_KERNEL, order);
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
if (!page)
return -ENOMEM;
@@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- __free_pages(page, order);
+ put_page(page);
return err;
}
@@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (q->queued + i + 1 == q->ndesc)
break;
}
- __free_pages(page, order);
+ put_page(page);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index 8fc97a52411a..59d99264f5e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -67,8 +67,8 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
};
u8 *tx_power = NULL;
- if (dev->mt76.test.state != MT76_TM_STATE_OFF)
- tx_power = dev->mt76.test.tx_power;
+ if (mphy->test.state != MT76_TM_STATE_OFF)
+ tx_power = mphy->test.tx_power;
len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
@@ -95,14 +95,15 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
}
static void
-mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
+mt7615_tm_reg_backup_restore(struct mt7615_phy *phy)
{
- u32 *b = dev->test.reg_backup;
+ struct mt7615_dev *dev = phy->dev;
+ u32 *b = phy->test.reg_backup;
int n_regs = ARRAY_SIZE(reg_backup_list);
int n_rf_regs = ARRAY_SIZE(rf_backup_list);
int i;
- if (dev->mt76.test.state == MT76_TM_STATE_OFF) {
+ if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i], b[i]);
@@ -120,7 +121,7 @@ mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
if (!b)
return;
- dev->test.reg_backup = b;
+ phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
b[i] = mt76_rr(dev, reg_backup_list[i]);
for (i = 0; i < n_rf_regs; i++)
@@ -128,30 +129,23 @@ mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
rf_backup_list[i].reg);
}
-
static void
-mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy)
+mt7615_tm_init(struct mt7615_phy *phy)
{
+ struct mt7615_dev *dev = phy->dev;
unsigned int total_flags = ~0;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
+ mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF);
+
mutex_unlock(&dev->mt76.mutex);
mt7615_set_channel(phy);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
- mt7615_tm_reg_backup_restore(dev);
-}
-
-static void
-mt7615_tm_init(struct mt7615_dev *dev)
-{
- mt7615_tm_init_phy(dev, &dev->phy);
-
- if (dev->mt76.phy2)
- mt7615_tm_init_phy(dev, dev->mt76.phy2->priv);
+ mt7615_tm_reg_backup_restore(phy);
}
static void
@@ -175,9 +169,10 @@ mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en)
}
static void
-mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
+mt7615_tm_set_tx_antenna(struct mt7615_phy *phy, bool en)
{
- struct mt76_testmode_data *td = &dev->mt76.test;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_testmode_data *td = &phy->mt76->test;
u8 mask = td->tx_antenna_mask;
int i;
@@ -185,7 +180,7 @@ mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
return;
if (!en)
- mask = dev->phy.chainmask;
+ mask = phy->mt76->chainmask;
for (i = 0; i < 4; i++) {
mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
@@ -228,26 +223,28 @@ mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
}
static void
-mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en)
+mt7615_tm_set_tx_frames(struct mt7615_phy *phy, bool en)
{
+ struct mt7615_dev *dev = phy->dev;
struct ieee80211_tx_info *info;
- struct sk_buff *skb = dev->mt76.test.tx_skb;
+ struct sk_buff *skb = phy->mt76->test.tx_skb;
- mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
- mt7615_tm_set_tx_antenna(dev, en);
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7615_tm_set_tx_antenna(phy, en);
mt7615_tm_set_rx_enable(dev, !en);
if (!en || !skb)
return;
info = IEEE80211_SKB_CB(skb);
- info->control.vif = dev->phy.monitor_vif;
+ info->control.vif = phy->monitor_vif;
}
static void
-mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed)
+mt7615_tm_update_params(struct mt7615_phy *phy, u32 changed)
{
- struct mt76_testmode_data *td = &dev->mt76.test;
- bool en = dev->mt76.test.state != MT76_TM_STATE_OFF;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ bool en = phy->mt76->test.state != MT76_TM_STATE_OFF;
if (changed & BIT(TM_CHANGED_TXPOWER_CTRL))
mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL,
@@ -256,25 +253,25 @@ mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed)
mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET,
en, en ? td->freq_offset : 0);
if (changed & BIT(TM_CHANGED_TXPOWER))
- mt7615_tm_set_tx_power(&dev->phy);
+ mt7615_tm_set_tx_power(phy);
}
static int
-mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
+mt7615_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt76_testmode_data *td = &mdev->test;
+ struct mt7615_phy *phy = mphy->priv;
+ struct mt76_testmode_data *td = &mphy->test;
enum mt76_testmode_state prev_state = td->state;
- mdev->test.state = state;
+ mphy->test.state = state;
if (prev_state == MT76_TM_STATE_TX_FRAMES)
- mt7615_tm_set_tx_frames(dev, false);
+ mt7615_tm_set_tx_frames(phy, false);
else if (state == MT76_TM_STATE_TX_FRAMES)
- mt7615_tm_set_tx_frames(dev, true);
+ mt7615_tm_set_tx_frames(phy, true);
if (state <= MT76_TM_STATE_IDLE)
- mt7615_tm_init(dev);
+ mt7615_tm_init(phy);
if ((state == MT76_TM_STATE_IDLE &&
prev_state == MT76_TM_STATE_OFF) ||
@@ -290,18 +287,18 @@ mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
changed |= BIT(i);
}
- mt7615_tm_update_params(dev, changed);
+ mt7615_tm_update_params(phy, changed);
}
return 0;
}
static int
-mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
+mt7615_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
enum mt76_testmode_state new_state)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt76_testmode_data *td = &dev->mt76.test;
+ struct mt76_testmode_data *td = &mphy->test;
+ struct mt7615_phy *phy = mphy->priv;
u32 changed = 0;
int i;
@@ -311,7 +308,7 @@ mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
td->state == MT76_TM_STATE_OFF)
return 0;
- if (td->tx_antenna_mask & ~dev->phy.chainmask)
+ if (td->tx_antenna_mask & ~mphy->chainmask)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
@@ -319,15 +316,15 @@ mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
changed |= BIT(i);
}
- mt7615_tm_update_params(dev, changed);
+ mt7615_tm_update_params(phy, changed);
return 0;
}
static int
-mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
+mt7615_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_phy *phy = mphy->priv;
void *rx, *rssi;
int i;
@@ -335,15 +332,15 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rx)
return -ENOMEM;
- if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset))
+ if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, phy->test.last_freq_offset))
return -ENOMEM;
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++)
- if (nla_put_u8(msg, i, dev->test.last_rcpi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_rcpi); i++)
+ if (nla_put_u8(msg, i, phy->test.last_rcpi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
@@ -352,8 +349,8 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_ib_rssi); i++)
- if (nla_put_s8(msg, i, dev->test.last_ib_rssi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_ib_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_ib_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
@@ -362,8 +359,8 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_wb_rssi); i++)
- if (nla_put_s8(msg, i, dev->test.last_wb_rssi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_wb_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_wb_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index a60cfa345521..0396ad532ba6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -29,7 +29,7 @@ static void mt7663u_stop(struct ieee80211_hw *hw)
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
cancel_delayed_work_sync(&phy->scan_work);
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mt76u_stop_tx(&dev->mt76);
}
@@ -47,11 +47,7 @@ static void mt7663u_init_work(struct work_struct *work)
if (mt7663u_mcu_init(dev))
return;
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
- mt7615_check_offload_capability(dev);
+ mt7615_init_work(dev);
}
static int mt7663u_probe(struct usb_interface *usb_intf,
@@ -173,7 +169,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
mt7615_firmware_offload(dev)) {
int err;
- err = mt7615_mcu_set_hif_suspend(dev, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
if (err < 0)
return err;
}
@@ -201,7 +197,7 @@ static int mt7663u_resume(struct usb_interface *intf)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt7615_mcu_set_hif_suspend(dev, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
new file mode 100644
index 000000000000..0d58606391b0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT76_CONNAC_H
+#define __MT76_CONNAC_H
+
+#include "mt76.h"
+
+#define MT76_CONNAC_SCAN_IE_LEN 600
+#define MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL 10
+#define MT76_CONNAC_MAX_SCHED_SCAN_SSID 10
+#define MT76_CONNAC_MAX_SCAN_MATCH 16
+
+#define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
+#define MT76_CONNAC_COREDUMP_SZ (128 * 1024)
+
+enum {
+ CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
+ CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
+ CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80,
+ CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ,
+
+ CMD_HE_MCS_BW80 = 0,
+ CMD_HE_MCS_BW160,
+ CMD_HE_MCS_BW8080,
+ CMD_HE_MCS_BW_NUM
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX = HW_BSSID_3,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_15 = 0x1f,
+ EXT_BSSID_MAX = EXT_BSSID_15,
+ REPEATER_BSSID_START = 0x20,
+ REPEATER_BSSID_MAX = 0x3f,
+};
+
+struct mt76_connac_pm {
+ bool enable;
+
+ spinlock_t txq_lock;
+ struct {
+ struct mt76_wcid *wcid;
+ struct sk_buff *skb;
+ } tx_q[IEEE80211_NUM_ACS];
+
+ struct work_struct wake_work;
+ struct completion wake_cmpl;
+
+ struct delayed_work ps_work;
+ unsigned long last_activity;
+ unsigned long idle_timeout;
+};
+
+struct mt76_connac_coredump {
+ struct sk_buff_head msg_list;
+ struct delayed_work work;
+ unsigned long last_activity;
+};
+
+extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
+
+static inline bool is_mt7921(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7961;
+}
+
+int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
+void mt76_connac_power_save_sched(struct mt76_phy *phy,
+ struct mt76_connac_pm *pm);
+void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
+ struct mt76_wcid *wcid);
+
+static inline void
+mt76_connac_mutex_acquire(struct mt76_dev *dev, struct mt76_connac_pm *pm)
+ __acquires(&dev->mutex)
+{
+ mutex_lock(&dev->mutex);
+ mt76_connac_pm_wake(&dev->phy, pm);
+}
+
+static inline void
+mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
+ __releases(&dev->mutex)
+{
+ mt76_connac_power_save_sched(&dev->phy, pm);
+ mutex_unlock(&dev->mutex);
+}
+
+void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
+ struct mt76_connac_pm *pm,
+ struct mt76_wcid *wcid,
+ struct sk_buff *skb);
+void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
+ struct mt76_connac_pm *pm);
+
+#endif /* __MT76_CONNAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
new file mode 100644
index 000000000000..c5f5037f5757
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt76_connac.h"
+
+int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ if (!pm->enable)
+ return 0;
+
+ if (!mt76_is_mmio(dev))
+ return 0;
+
+ if (!test_bit(MT76_STATE_PM, &phy->state))
+ return 0;
+
+ if (test_bit(MT76_HW_SCANNING, &phy->state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
+ return 0;
+
+ if (queue_work(dev->wq, &pm->wake_work))
+ reinit_completion(&pm->wake_cmpl);
+
+ if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) {
+ ieee80211_wake_queues(phy->hw);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_pm_wake);
+
+void mt76_connac_power_save_sched(struct mt76_phy *phy,
+ struct mt76_connac_pm *pm)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ if (!mt76_is_mmio(dev))
+ return;
+
+ if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state))
+ return;
+
+ pm->last_activity = jiffies;
+
+ if (test_bit(MT76_HW_SCANNING, &phy->state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
+ return;
+
+ if (!test_bit(MT76_STATE_PM, &phy->state))
+ queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);
+
+void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
+ struct mt76_wcid *wcid)
+{
+ int i;
+
+ spin_lock_bh(&pm->txq_lock);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (wcid && pm->tx_q[i].wcid != wcid)
+ continue;
+
+ dev_kfree_skb(pm->tx_q[i].skb);
+ pm->tx_q[i].skb = NULL;
+ }
+ spin_unlock_bh(&pm->txq_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_free_pending_tx_skbs);
+
+void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
+ struct mt76_connac_pm *pm,
+ struct mt76_wcid *wcid,
+ struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+ struct mt76_phy *phy = hw->priv;
+
+ spin_lock_bh(&pm->txq_lock);
+ if (!pm->tx_q[qid].skb) {
+ ieee80211_stop_queues(hw);
+ pm->tx_q[qid].wcid = wcid;
+ pm->tx_q[qid].skb = skb;
+ queue_work(phy->dev->wq, &pm->wake_work);
+ } else {
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&pm->txq_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_pm_queue_skb);
+
+void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
+ struct mt76_connac_pm *pm)
+{
+ int i;
+
+ spin_lock_bh(&pm->txq_lock);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ struct mt76_wcid *wcid = pm->tx_q[i].wcid;
+ struct ieee80211_sta *sta = NULL;
+
+ if (!pm->tx_q[i].skb)
+ continue;
+
+ if (wcid && wcid->sta)
+ sta = container_of((void *)wcid, struct ieee80211_sta,
+ drv_priv);
+
+ mt76_tx(phy, sta, wcid, pm->tx_q[i].skb);
+ pm->tx_q[i].skb = NULL;
+ }
+ spin_unlock_bh(&pm->txq_lock);
+
+ mt76_worker_schedule(&phy->dev->tx_worker);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
new file mode 100644
index 000000000000..6cbccfb05f8b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -0,0 +1,1842 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt76_connac_mcu.h"
+
+int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_FW_START_REQ, &req, sizeof(req),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_firmware);
+
+int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get)
+{
+ u32 op = get ? PATCH_SEM_GET : PATCH_SEM_RELEASE;
+ struct {
+ __le32 op;
+ } req = {
+ .op = cpu_to_le32(op),
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_SEM_CONTROL, &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_patch_sem_ctrl);
+
+int mt76_connac_mcu_start_patch(struct mt76_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_FINISH_REQ, &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_patch);
+
+#define MCU_PATCH_ADDRESS 0x200000
+
+int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
+ u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+ int cmd;
+
+ if (is_mt7921(dev) &&
+ (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
+ cmd = MCU_CMD_PATCH_START_REQ;
+ else
+ cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ;
+
+ return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_init_download);
+
+int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_connac_mcu_channel_domain {
+ u8 alpha2[4]; /* regulatory_request.alpha2 */
+ u8 bw_2g; /* BW_20_40M 0
+ * BW_20M 1
+ * BW_20_40_80M 2
+ * BW_20_40_80_160M 3
+ * BW_20_40_80_8080M 4
+ */
+ u8 bw_5g;
+ __le16 pad;
+ u8 n_2ch;
+ u8 n_5ch;
+ __le16 pad2;
+ } __packed hdr = {
+ .bw_2g = 0,
+ .bw_5g = 3,
+ };
+ struct mt76_connac_mcu_chan {
+ __le16 hw_value;
+ __le16 pad;
+ __le32 flags;
+ } __packed channel;
+ int len, i, n_max_channels, n_2ch = 0, n_5ch = 0;
+ struct ieee80211_channel *chan;
+ struct sk_buff *skb;
+
+ n_max_channels = phy->sband_2g.sband.n_channels +
+ phy->sband_5g.sband.n_channels;
+ len = sizeof(hdr) + n_max_channels * sizeof(channel);
+
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, sizeof(hdr));
+
+ for (i = 0; i < phy->sband_2g.sband.n_channels; i++) {
+ chan = &phy->sband_2g.sband.channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ channel.hw_value = cpu_to_le16(chan->hw_value);
+ channel.flags = cpu_to_le32(chan->flags);
+ channel.pad = 0;
+
+ skb_put_data(skb, &channel, sizeof(channel));
+ n_2ch++;
+ }
+ for (i = 0; i < phy->sband_5g.sband.n_channels; i++) {
+ chan = &phy->sband_5g.sband.channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ channel.hw_value = cpu_to_le16(chan->hw_value);
+ channel.flags = cpu_to_le32(chan->flags);
+ channel.pad = 0;
+
+ skb_put_data(skb, &channel, sizeof(channel));
+ n_5ch++;
+ }
+
+ BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(hdr.alpha2));
+ memcpy(hdr.alpha2, dev->alpha2, sizeof(dev->alpha2));
+ hdr.n_2ch = n_2ch;
+ hdr.n_5ch = n_5ch;
+
+ memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain);
+
+int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable,
+ bool hdr_trans)
+{
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req_mac = {
+ .enable = enable,
+ .band = band,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD_MAC_INIT_CTRL, &req_mac,
+ sizeof(req_mac), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
+
+int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 ps_state; /* 0: device awake
+ * 1: static power save
+ * 2: dynamic power saving
+ */
+ } req = {
+ .bss_idx = mvif->idx,
+ .ps_state = vif->bss_conf.ps ? 2 : 0,
+ };
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps);
+
+int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band)
+{
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = band,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD_PROTECT_CTRL, &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
+
+void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_connac_beacon_loss_event *event = priv;
+
+ if (mvif->idx != event->bss_idx)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ ieee80211_beacon_loss(vif);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_beacon_loss_iter);
+
+struct tlv *
+mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
+
+struct sk_buff *
+mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .muar_idx = wcid ? mvif->omac_idx : 0,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
+ &hdr.wlan_idx_hi);
+ skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
+
+struct wtbl_req_hdr *
+mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int cmd, void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
+ &hdr.wlan_idx_hi);
+ if (!nskb) {
+ nskb = mt76_mcu_msg_alloc(dev, NULL,
+ MT76_CONNAC_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ sta_hdr->len = cpu_to_le16(sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
+
+void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enable)
+{
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+ int conn_type;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->qos = sta->wme;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
+
+static void
+mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct sta_rec_uapsd *uapsd;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+ uapsd = (struct sta_rec_uapsd *)tlv;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+ uapsd->dac_map |= BIT(3);
+ uapsd->tac_map |= BIT(3);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+ uapsd->dac_map |= BIT(2);
+ uapsd->tac_map |= BIT(2);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+ uapsd->dac_map |= BIT(1);
+ uapsd->tac_map |= BIT(1);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+ uapsd->dac_map |= BIT(0);
+ uapsd->tac_map |= BIT(0);
+ }
+ uapsd->max_sp = sta->max_sp;
+}
+
+void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct wtbl_spe *spe;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_GENERIC,
+ sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ else
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ if (is_mt7921(dev) &&
+ vif->type == NL80211_IFTYPE_STATION)
+ memcpy(generic->peer_addr, vif->bss_conf.bssid,
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(generic->peer_addr);
+
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+
+ if (is_mt7921(dev))
+ return;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
+ wtbl_tlv, sta_wtbl);
+ spe = (struct wtbl_spe *)tlv;
+ spe->spe_idx = 24;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_generic_tlv);
+
+static void
+mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!sta->max_amsdu_len)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+
+ wcid->amsdu = true;
+}
+
+#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
+#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+static void
+mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_he *he;
+ struct tlv *tlv;
+ u32 cap = 0;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+
+ he = (struct sta_rec_he *)tlv;
+
+ if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ cap |= STA_REC_HE_CAP_HTC;
+
+ if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ cap |= STA_REC_HE_CAP_BSR;
+
+ if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ cap |= STA_REC_HE_CAP_OM;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
+ cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ cap |= STA_REC_HE_CAP_BQR;
+
+ if (elem->phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
+ cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ cap |= STA_REC_HE_CAP_LDPC;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
+ cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
+ cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
+
+ if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
+ cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
+ cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
+
+ he->he_cap = cpu_to_le32(cap);
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+
+ he->max_nss_mcs[CMD_HE_MCS_BW160] =
+ he_cap->he_mcs_nss_supp.rx_mcs_160;
+ fallthrough;
+ default:
+ he->max_nss_mcs[CMD_HE_MCS_BW80] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80;
+ break;
+ }
+
+ he->t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ he->max_ampdu_exp =
+ HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
+
+ he->bw_set =
+ HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
+ he->device_class =
+ HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
+ he->punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+
+ he->dcm_tx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
+ he->dcm_tx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
+
+ he->pkt_ext = 2;
+}
+
+static u8
+mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ u8 mode = 0;
+
+ if (sta) {
+ ht_cap = &sta->ht_cap;
+ vht_cap = &sta->vht_cap;
+ he_cap = &sta->he_cap;
+ } else {
+ struct ieee80211_supported_band *sband;
+
+ sband = mphy->hw->wiphy->bands[band];
+ ht_cap = &sband->ht_cap;
+ vht_cap = &sband->vht_cap;
+ he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+ }
+
+ if (band == NL80211_BAND_2GHZ) {
+ mode |= PHY_TYPE_BIT_HR_DSSS | PHY_TYPE_BIT_ERP;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_TYPE_BIT_HT;
+
+ if (he_cap->has_he)
+ mode |= PHY_TYPE_BIT_HE;
+ } else if (band == NL80211_BAND_5GHZ) {
+ mode |= PHY_TYPE_BIT_OFDM;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_TYPE_BIT_HT;
+
+ if (vht_cap->vht_supported)
+ mode |= PHY_TYPE_BIT_VHT;
+
+ if (he_cap->has_he)
+ mode |= PHY_TYPE_BIT_HE;
+ }
+
+ return mode;
+}
+
+void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_dev *dev = mphy->dev;
+ struct sta_rec_ra_info *ra_info;
+ struct sta_rec_state *state;
+ struct sta_rec_phy *phy;
+ struct tlv *tlv;
+
+ /* starec ht */
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ }
+
+ /* starec vht */
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+ int len;
+
+ len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ }
+
+ /* starec uapsd */
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
+
+ if (!is_mt7921(dev))
+ return;
+
+ if (sta->ht_cap.ht_supported)
+ mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
+
+ /* starec he */
+ if (sta->he_cap.has_he)
+ mt76_connac_mcu_sta_he_tlv(skb, sta);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
+ phy = (struct sta_rec_phy *)tlv;
+ phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
+ phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
+ ra_info = (struct sta_rec_ra_info *)tlv;
+ ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]);
+
+ if (sta->ht_cap.ht_supported)
+ memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
+ HT_MCS_MASK_NUM);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
+ state = (struct sta_rec_state *)tlv;
+ state->state = 2;
+
+ if (sta->vht_cap.vht_supported) {
+ state->vht_opmode = sta->bandwidth;
+ state->vht_opmode |= (sta->rx_nss - 1) <<
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
+
+static void
+mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_smps *smps;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ smps->smps = true;
+}
+
+void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ht *ht = NULL;
+ struct tlv *tlv;
+ u32 flags = 0;
+
+ if (sta->ht_cap.ht_supported) {
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = true;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+ u8 af;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_VHT,
+ sizeof(*vht), wtbl_tlv,
+ sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->vht = true;
+
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->vht_cap.cap);
+ if (ht)
+ ht->af = max(ht->af, af);
+ }
+
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
+
+ if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
+ /* sgi */
+ u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+ struct wtbl_raw *raw;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
+ sizeof(*raw), wtbl_tlv,
+ sta_wtbl);
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ flags |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ flags |= MT_WTBL_W5_SHORT_GI_40;
+
+ if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ flags |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ flags |= MT_WTBL_W5_SHORT_GI_160;
+ }
+ raw = (struct wtbl_raw *)tlv;
+ raw->val = cpu_to_le32(flags);
+ raw->msk = cpu_to_le32(~msk);
+ raw->wtbl_idx = 1;
+ raw->dw = 5;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
+
+int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid,
+ bool enable, int cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_dev *dev = phy->dev;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta)
+ mt76_connac_mcu_sta_tlv(phy, skb, sta, vif);
+
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
+ WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
+ wtbl_hdr);
+ if (sta)
+ mt76_connac_mcu_wtbl_ht_tlv(dev, skb, sta, sta_wtbl,
+ wtbl_hdr);
+ }
+
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
+
+void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (is_mt7921(dev))
+ return;
+
+ if (enable && tx) {
+ u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ int i;
+
+ for (i = 7; i > 0; i--) {
+ if (params->buf_size >= ba_range[i])
+ break;
+ }
+ ba->ba_winsize_idx = i;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
+
+int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 pad;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_bss_basic_tlv basic;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = enable,
+ .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
+ .sta_idx = cpu_to_le16(wcid->idx),
+ .conn_state = 1,
+ },
+ };
+ int err, idx, cmd, len;
+ void *data;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ basic_req.basic.hw_bss_idx = idx;
+
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+
+ cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
+ data = enable ? (void *)&dev_req : (void *)&basic_req;
+ len = enable ? sizeof(dev_req) : sizeof(basic_req);
+
+ err = mt76_mcu_send_msg(dev, cmd, data, len, true);
+ if (err < 0)
+ return err;
+
+ cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
+ data = enable ? (void *)&basic_req : (void *)&dev_req;
+ len = enable ? sizeof(basic_req) : sizeof(dev_req);
+
+ return mt76_mcu_send_msg(dev, cmd, data, len, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_dev);
+
+void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
+
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
+ sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
+ wtbl_hdr);
+
+ ret = mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (ret)
+ return ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE,
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
+
+static u8
+mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_dev *dev = phy->dev;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ u8 mode = 0;
+
+ if (!is_mt7921(dev))
+ return 0x38;
+
+ if (sta) {
+ ht_cap = &sta->ht_cap;
+ vht_cap = &sta->vht_cap;
+ he_cap = &sta->he_cap;
+ } else {
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+ ht_cap = &sband->ht_cap;
+ vht_cap = &sband->vht_cap;
+ he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+ }
+
+ if (band == NL80211_BAND_2GHZ) {
+ mode |= PHY_MODE_B | PHY_MODE_G;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_GN;
+
+ if (he_cap->has_he)
+ mode |= PHY_MODE_AX_24G;
+ } else if (band == NL80211_BAND_5GHZ) {
+ mode |= PHY_MODE_A;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_AN;
+
+ if (vht_cap->vht_supported)
+ mode |= PHY_MODE_AC;
+
+ if (he_cap->has_he)
+ mode |= PHY_MODE_AX_5G;
+ }
+
+ return mode;
+}
+
+static const struct ieee80211_sta_he_cap *
+mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
+{
+ enum nl80211_band band = phy->chandef.chan->band;
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+
+ return ieee80211_get_he_iftype_cap(sband, vif->type);
+}
+
+#define DEFAULT_HE_PE_DURATION 4
+#define DEFAULT_HE_DURATION_RTS_THRES 1023
+static void
+mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct tlv *tlv)
+{
+ const struct ieee80211_sta_he_cap *cap;
+ struct bss_info_uni_he *he;
+
+ cap = mt76_connac_get_he_phy_cap(phy, vif);
+
+ he = (struct bss_info_uni_he *)tlv;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ if (!he->he_pe_duration)
+ he->he_pe_duration = DEFAULT_HE_PE_DURATION;
+
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ if (!he->he_rts_thres)
+ he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
+
+ he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
+ he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_dev *mdev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_bss_basic_tlv basic;
+ struct mt76_connac_bss_qos_tlv qos;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = true, /* keep bss deactivated */
+ .phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
+ },
+ .qos = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
+ .qos = vif->bss_conf.qos,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 short_st;
+ u8 ht_op_info;
+ u8 sco;
+ u8 pad[3];
+ } __packed rlm;
+ } __packed rlm_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .rlm = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
+ .len = cpu_to_le16(sizeof(struct rlm_tlv)),
+ .control_channel = chandef->chan->hw_value,
+ .center_chan = ieee80211_frequency_to_channel(freq1),
+ .center_chan2 = ieee80211_frequency_to_channel(freq2),
+ .tx_streams = hweight8(phy->antenna_mask),
+ .rx_streams = phy->chainmask,
+ .short_st = true,
+ },
+ };
+ int err, conn_type;
+ u8 idx;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ basic_req.basic.hw_bss_idx = idx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx);
+ basic_req.basic.sta_idx = cpu_to_le16(wcid->idx);
+ basic_req.basic.conn_state = !enable;
+
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &basic_req,
+ sizeof(basic_req), true);
+ if (err < 0)
+ return err;
+
+ if (vif->bss_conf.he_support) {
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bss_info_uni_he he;
+ } he_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .he = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
+ .len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
+ },
+ };
+
+ mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
+ (struct tlv *)&he_req.he);
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &he_req, sizeof(he_req), true);
+ if (err < 0)
+ return err;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rlm_req.rlm.bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ rlm_req.rlm.bw = CMD_CBW_20MHZ;
+ break;
+ }
+
+ if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 1; /* SCA */
+ else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 3; /* SCB */
+
+ return mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &rlm_req,
+ sizeof(rlm_req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
+
+#define MT76_CONNAC_SCAN_CHANNEL_TIME 60
+int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *sreq = &scan_req->req;
+ int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
+ int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt76_dev *mdev = phy->dev;
+ bool ext_phy = phy == mdev->phy2;
+ struct mt76_connac_mcu_scan_channel *chan;
+ struct mt76_connac_hw_scan_req *req;
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ set_bit(MT76_HW_SCANNING, &phy->state);
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
+
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->bss_idx = mvif->idx;
+ req->scan_type = sreq->n_ssids ? 1 : 0;
+ req->probe_req_num = sreq->n_ssids ? 2 : 0;
+ req->version = 1;
+
+ for (i = 0; i < sreq->n_ssids; i++) {
+ if (!sreq->ssids[i].ssid_len)
+ continue;
+
+ req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
+ memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
+ sreq->ssids[i].ssid_len);
+ n_ssids++;
+ }
+ req->ssid_type = n_ssids ? BIT(2) : BIT(0);
+ req->ssid_type_ext = n_ssids ? BIT(0) : 0;
+ req->ssids_num = n_ssids;
+
+ /* increase channel time for passive scan */
+ if (!sreq->n_ssids)
+ duration *= 2;
+ req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
+ req->channel_min_dwell_time = cpu_to_le16(duration);
+ req->channel_dwell_time = cpu_to_le16(duration);
+
+ req->channels_num = min_t(u8, sreq->n_channels, 32);
+ req->ext_channels_num = min_t(u8, ext_channels_num, 32);
+ for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
+ if (i >= 32)
+ chan = &req->ext_channels[i - 32];
+ else
+ chan = &req->channels[i];
+
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+ req->channel_type = sreq->n_channels ? 4 : 0;
+
+ if (sreq->ie_len > 0) {
+ memcpy(req->ies, sreq->ie, sreq->ie_len);
+ req->ies_len = cpu_to_le16(sreq->ie_len);
+ }
+
+ memcpy(req->bssid, sreq->bssid, ETH_ALEN);
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
+ if (err < 0)
+ clear_bit(MT76_HW_SCANNING, &phy->state);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_hw_scan);
+
+int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ u8 seq_num;
+ u8 is_ext_channel;
+ u8 rsv[2];
+ } __packed req = {
+ .seq_num = mvif->scan_seq_num,
+ };
+
+ if (test_and_clear_bit(MT76_HW_SCANNING, &phy->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(phy->hw, &info);
+ }
+
+ return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan);
+
+int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt76_connac_mcu_scan_channel *chan;
+ struct mt76_connac_sched_scan_req *req;
+ struct mt76_dev *mdev = phy->dev;
+ bool ext_phy = phy == mdev->phy2;
+ struct cfg80211_match_set *match;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ int i;
+
+ skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req) + sreq->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
+ req->version = 1;
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ req->ssids_num = sreq->n_ssids;
+ for (i = 0; i < req->ssids_num; i++) {
+ ssid = &sreq->ssids[i];
+ memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
+ req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
+ }
+
+ req->match_num = sreq->n_match_sets;
+ for (i = 0; i < req->match_num; i++) {
+ match = &sreq->match_sets[i];
+ memcpy(req->match[i].ssid, match->ssid.ssid,
+ match->ssid.ssid_len);
+ req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
+ req->match[i].ssid_len = match->ssid.ssid_len;
+ }
+
+ req->channel_type = sreq->n_channels ? 4 : 0;
+ req->channels_num = min_t(u8, sreq->n_channels, 64);
+ for (i = 0; i < req->channels_num; i++) {
+ chan = &req->channels[i];
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+
+ req->intervals_num = sreq->n_scan_plans;
+ for (i = 0; i < req->intervals_num; i++)
+ req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
+
+ if (sreq->ie_len > 0) {
+ req->ie_len = cpu_to_le16(sreq->ie_len);
+ memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
+ }
+
+ return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req);
+
+int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct {
+ u8 active; /* 0: enabled 1: disabled */
+ u8 rsv[3];
+ } __packed req = {
+ .active = !enable,
+ };
+
+ if (enable)
+ set_bit(MT76_HW_SCHED_SCANNING, &phy->state);
+ else
+ clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
+
+ return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
+
+int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
+{
+ struct {
+ __le16 id;
+ u8 type;
+ u8 resp_type;
+ __le16 data_size;
+ __le16 resv;
+ u8 data[320];
+ } req = {
+ .resp_type = 0,
+ };
+
+ memcpy(req.data, "assert", 7);
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
+ false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
+
+void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_connac_coredump *coredump)
+{
+ spin_lock_bh(&dev->lock);
+ __skb_queue_tail(&coredump->msg_list, skb);
+ spin_unlock_bh(&dev->lock);
+
+ coredump->last_activity = jiffies;
+
+ queue_delayed_work(dev->wq, &coredump->work,
+ MT76_CONNAC_COREDUMP_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
+
+#ifdef CONFIG_PM
+
+const struct wiphy_wowlan_support mt76_connac_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = 1,
+ .pattern_min_len = 1,
+ .pattern_max_len = MT76_CONNAC_WOW_PATTEN_MAX_LEN,
+ .max_nd_match_sets = 10,
+};
+EXPORT_SYMBOL_GPL(mt76_connac_wowlan_support);
+
+static void
+mt76_connac_mcu_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct mt76_connac_gtk_rekey_tlv *gtk_tlv = data;
+ u32 cipher;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ key->cipher != WLAN_CIPHER_SUITE_CCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
+ cipher = BIT(3);
+ } else {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
+ cipher = BIT(4);
+ }
+
+ /* we are assuming here to have a single pairwise key */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
+ gtk_tlv->group_cipher = cpu_to_le32(cipher);
+ gtk_tlv->keyid = key->keyidx;
+ }
+}
+
+int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_connac_gtk_rekey_tlv *gtk_tlv;
+ struct mt76_phy *phy = hw->priv;
+ struct sk_buff *skb;
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(phy->dev, NULL,
+ sizeof(hdr) + sizeof(*gtk_tlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
+ sizeof(*gtk_tlv));
+ gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+ gtk_tlv->rekey_mode = 2;
+ gtk_tlv->option = 1;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(hw, vif, mt76_connac_mcu_key_iter, gtk_tlv);
+ rcu_read_unlock();
+
+ memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
+ memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
+ memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
+
+ return mt76_mcu_skb_send_msg(phy->dev, skb, MCU_UNI_CMD_OFFLOAD, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_gtk_rekey);
+
+static int
+mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ bool suspend)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arpns;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
+ .mode = suspend,
+ },
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
+ true);
+}
+
+static int
+mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ bool suspend)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_gtk_rekey_tlv gtk_tlv;
+ } __packed req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .gtk_tlv = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_gtk_rekey_tlv)),
+ .rekey_mode = !suspend,
+ },
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
+ true);
+}
+
+static int
+mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ bool enable, u8 mdtim,
+ bool wow_suspend)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_suspend_tlv suspend_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .suspend_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_suspend_tlv)),
+ .enable = enable,
+ .mdtim = mdtim,
+ .wow_suspend = wow_suspend,
+ },
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
+ true);
+}
+
+static int
+mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ u8 index, bool enable,
+ struct cfg80211_pkt_pattern *pattern)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_connac_wow_pattern_tlv *ptlv;
+ struct sk_buff *skb;
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(hdr) + sizeof(*ptlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
+ ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ ptlv->data_len = pattern->pattern_len;
+ ptlv->enable = enable;
+ ptlv->index = index;
+
+ memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
+ memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
+}
+
+static int
+mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ bool suspend, struct cfg80211_wowlan *wowlan)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_wow_ctrl_tlv wow_ctrl_tlv;
+ struct mt76_connac_wow_gpio_param_tlv gpio_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .wow_ctrl_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_wow_ctrl_tlv)),
+ .cmd = suspend ? 1 : 2,
+ },
+ .gpio_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_wow_gpio_param_tlv)),
+ .gpio_pin = 0xff, /* follow fw about GPIO pin */
+ },
+ };
+
+ if (wowlan->magic_pkt)
+ req.wow_ctrl_tlv.trigger |= BIT(0);
+ if (wowlan->disconnect)
+ req.wow_ctrl_tlv.trigger |= BIT(2);
+ if (wowlan->nd_config) {
+ mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
+ req.wow_ctrl_tlv.trigger |= BIT(5);
+ mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
+ }
+
+ if (mt76_is_mmio(dev))
+ req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
+ else if (mt76_is_usb(dev))
+ req.wow_ctrl_tlv.wakeup_hif = WOW_USB;
+ else if (mt76_is_sdio(dev))
+ req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO;
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
+ true);
+}
+
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
+{
+ struct {
+ struct {
+ u8 hif_type; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ */
+ u8 pad[3];
+ } __packed hdr;
+ struct hif_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 suspend;
+ } __packed hif_suspend;
+ } req = {
+ .hif_suspend = {
+ .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
+ .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
+ .suspend = suspend,
+ },
+ };
+
+ if (mt76_is_mmio(dev))
+ req.hdr.hif_type = 2;
+ else if (mt76_is_usb(dev))
+ req.hdr.hif_type = 1;
+ else if (mt76_is_sdio(dev))
+ req.hdr.hif_type = 0;
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_HIF_CTRL, &req, sizeof(req),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
+
+void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_phy *phy = priv;
+ bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->state);
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+ int i;
+
+ mt76_connac_mcu_set_gtk_rekey(phy->dev, vif, suspend);
+ mt76_connac_mcu_set_arp_filter(phy->dev, vif, suspend);
+
+ mt76_connac_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
+
+ for (i = 0; i < wowlan->n_patterns; i++)
+ mt76_connac_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
+ &wowlan->patterns[i]);
+ mt76_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_iter);
+
+#endif /* CONFIG_PM */
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
new file mode 100644
index 000000000000..c1e1df5f7cd7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -0,0 +1,979 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT76_CONNAC_MCU_H
+#define __MT76_CONNAC_MCU_H
+
+#include "mt76_connac.h"
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+/* sta_rec */
+
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
+struct sta_req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx_lo;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 wlan_idx_hi;
+ u8 rsv;
+} __packed;
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+ /* mt7921 */
+ u8 rts_bw_sig;
+ u8 rsv[3];
+} __packed;
+
+struct sta_rec_uapsd {
+ __le16 tag;
+ __le16 len;
+ u8 dac_map;
+ u8 tac_map;
+ u8 max_sp;
+ u8 rsv0;
+ __le16 listen_interval;
+ u8 rsv1[2];
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+struct sta_rec_he {
+ __le16 tag;
+ __le16 len;
+
+ __le32 he_cap;
+
+ u8 t_frame_dur;
+ u8 max_ampdu_exp;
+ u8 bw_set;
+ u8 device_class;
+ u8 dcm_tx_mode;
+ u8 dcm_tx_max_nss;
+ u8 dcm_rx_mode;
+ u8 dcm_rx_max_nss;
+ u8 dcm_max_ru;
+ u8 punc_pream_rx;
+ u8 pkt_ext;
+ u8 rsv1;
+
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+
+ u8 rsv2[2];
+} __packed;
+
+struct sta_rec_amsdu {
+ __le16 tag;
+ __le16 len;
+ u8 max_amsdu_num;
+ u8 max_mpdu_size;
+ u8 amsdu_en;
+ u8 rsv;
+} __packed;
+
+struct sta_rec_state {
+ __le16 tag;
+ __le16 len;
+ __le32 flags;
+ u8 state;
+ u8 vht_opmode;
+ u8 action;
+ u8 rsv[1];
+} __packed;
+
+#define HT_MCS_MASK_NUM 10
+struct sta_rec_ra_info {
+ __le16 tag;
+ __le16 len;
+ __le16 legacy;
+ u8 rx_mcs_bitmask[HT_MCS_MASK_NUM];
+} __packed;
+
+struct sta_rec_phy {
+ __le16 tag;
+ __le16 len;
+ __le16 basic_rate;
+ u8 phy_type;
+ u8 ampdu;
+ u8 rts_policy;
+ u8 rcpi;
+ u8 rsv[2];
+} __packed;
+
+/* wtbl_rec */
+
+struct wtbl_req_hdr {
+ u8 wlan_idx_lo;
+ u8 operation;
+ __le16 tlv_num;
+ u8 wlan_idx_hi;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_tx_ps {
+ __le16 tag;
+ __le16 len;
+ u8 txps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_hdr_trans {
+ __le16 tag;
+ __le16 len;
+ u8 to_ds;
+ u8 from_ds;
+ u8 disable_rx_trans;
+ u8 rsv;
+} __packed;
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+/* mt7615 only */
+
+struct wtbl_bf {
+ __le16 tag;
+ __le16 len;
+ u8 ibf;
+ u8 ebf;
+ u8 ibf_vht;
+ u8 ebf_vht;
+ u8 gid;
+ u8 pfmu_idx;
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_pn {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_spe {
+ __le16 tag;
+ __le16 len;
+ u8 spe_idx;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_raw {
+ __le16 tag;
+ __le16 len;
+ u8 wtbl_idx;
+ u8 dw;
+ u8 rsv[2];
+ __le32 msk;
+ __le32 val;
+} __packed;
+
+#define MT76_CONNAC_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) +\
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+#define MT76_CONNAC_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_he) + \
+ sizeof(struct sta_rec_ba) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct tlv) + \
+ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
+
+#define MT76_CONNAC_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU,
+ STA_REC_BA,
+ STA_REC_STATE,
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_KEY,
+ STA_REC_WTBL,
+ STA_REC_HE,
+ STA_REC_HW_AMSDU,
+ STA_REC_WTBL_AADOM,
+ STA_REC_KEY_V2,
+ STA_REC_MURU,
+ STA_REC_MUEDCA,
+ STA_REC_BFEE,
+ STA_REC_PHY = 0x15,
+ STA_REC_MAX_NUM
+};
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+/* HE MAC */
+#define STA_REC_HE_CAP_HTC BIT(0)
+#define STA_REC_HE_CAP_BQR BIT(1)
+#define STA_REC_HE_CAP_BSR BIT(2)
+#define STA_REC_HE_CAP_OM BIT(3)
+#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4)
+/* HE PHY */
+#define STA_REC_HE_CAP_DUAL_BAND BIT(5)
+#define STA_REC_HE_CAP_LDPC BIT(6)
+#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7)
+#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8)
+/* STBC */
+#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9)
+#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10)
+#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11)
+#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12)
+/* GI */
+#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13)
+#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14)
+#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15)
+#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16)
+#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17)
+/* 242 TONE */
+#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18)
+#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19)
+#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20)
+
+#define PHY_MODE_A BIT(0)
+#define PHY_MODE_B BIT(1)
+#define PHY_MODE_G BIT(2)
+#define PHY_MODE_GN BIT(3)
+#define PHY_MODE_AN BIT(4)
+#define PHY_MODE_AC BIT(5)
+#define PHY_MODE_AX_24G BIT(6)
+#define PHY_MODE_AX_5G BIT(7)
+#define PHY_MODE_AX_6G BIT(8)
+
+#define MODE_CCK BIT(0)
+#define MODE_OFDM BIT(1)
+#define MODE_HT BIT(2)
+#define MODE_VHT BIT(3)
+#define MODE_HE BIT(4)
+
+enum {
+ PHY_TYPE_HR_DSSS_INDEX = 0,
+ PHY_TYPE_ERP_INDEX,
+ PHY_TYPE_ERP_P2P_INDEX,
+ PHY_TYPE_OFDM_INDEX,
+ PHY_TYPE_HT_INDEX,
+ PHY_TYPE_VHT_INDEX,
+ PHY_TYPE_HE_INDEX,
+ PHY_TYPE_INDEX_NUM
+};
+
+#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
+#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
+#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)
+#define PHY_TYPE_BIT_HT BIT(PHY_TYPE_HT_INDEX)
+#define PHY_TYPE_BIT_VHT BIT(PHY_TYPE_VHT_INDEX)
+#define PHY_TYPE_BIT_HE BIT(PHY_TYPE_HE_INDEX)
+
+#define MT_WTBL_RATE_TX_MODE GENMASK(9, 6)
+#define MT_WTBL_RATE_MCS GENMASK(5, 0)
+#define MT_WTBL_RATE_NSS GENMASK(12, 10)
+#define MT_WTBL_RATE_HE_GI GENMASK(7, 4)
+#define MT_WTBL_RATE_GI GENMASK(3, 0)
+
+#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
+#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
+#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
+#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
+#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
+#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
+#define MT_WTBL_W5_MPDU_FAIL_COUNT GENMASK(25, 23)
+#define MT_WTBL_W5_MPDU_OK_COUNT GENMASK(28, 26)
+#define MT_WTBL_W5_RATE_IDX GENMASK(31, 29)
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+#define MCU_CMD_ACK BIT(0)
+#define MCU_CMD_UNI BIT(1)
+#define MCU_CMD_QUERY BIT(2)
+
+#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
+ MCU_CMD_QUERY)
+
+#define MCU_FW_PREFIX BIT(31)
+#define MCU_UNI_PREFIX BIT(30)
+#define MCU_CE_PREFIX BIT(29)
+#define MCU_QUERY_PREFIX BIT(28)
+#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
+ MCU_CE_PREFIX | MCU_QUERY_PREFIX)
+
+#define MCU_QUERY_MASK BIT(16)
+
+enum {
+ MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
+ MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_GET_TEMP = 0x2c,
+ MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+ MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+ MCU_EXT_CMD_ATE_CTRL = 0x3d,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_DBDC_CTRL = 0x45,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
+ MCU_EXT_CMD_MUAR_UPDATE = 0x48,
+ MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_RXDCOC_CAL = 0x59,
+ MCU_EXT_CMD_TXDPD_CAL = 0x60,
+ MCU_EXT_CMD_SET_RDD_TH = 0x7c,
+ MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
+};
+
+enum {
+ MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
+ MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
+ MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
+ MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
+ MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
+ MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
+ MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_NIC_POWER_CTRL = MCU_FW_PREFIX | 0x4,
+ MCU_CMD_PATCH_START_REQ = MCU_FW_PREFIX | 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
+ MCU_CMD_EXT_CID = 0xed,
+ MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xee,
+ MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xef,
+};
+
+/* offload mcu commands */
+enum {
+ MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
+ MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
+ MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
+ MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
+ MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
+ MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
+ MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1d,
+ MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
+ MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
+ MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
+ MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+ MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
+ MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
+ MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
+ MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5,
+ MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd,
+};
+
+enum {
+ PATCH_SEM_RELEASE,
+ PATCH_SEM_GET
+};
+
+enum {
+ UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_RLM = 2,
+ UNI_BSS_INFO_HE_BASIC = 5,
+ UNI_BSS_INFO_BCN_CONTENT = 7,
+ UNI_BSS_INFO_QBSS = 15,
+ UNI_BSS_INFO_UAPSD = 19,
+ UNI_BSS_INFO_PS = 21,
+ UNI_BSS_INFO_BCNFT = 22,
+};
+
+enum {
+ UNI_OFFLOAD_OFFLOAD_ARP,
+ UNI_OFFLOAD_OFFLOAD_ND,
+ UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
+ UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
+};
+
+enum {
+ UNI_SUSPEND_MODE_SETTING,
+ UNI_SUSPEND_WOW_CTRL,
+ UNI_SUSPEND_WOW_GPIO_PARAM,
+ UNI_SUSPEND_WOW_WAKEUP_PORT,
+ UNI_SUSPEND_WOW_PATTERN,
+};
+
+enum {
+ WOW_USB = 1,
+ WOW_PCIE = 2,
+ WOW_GPIO = 3,
+};
+
+struct mt76_connac_bss_basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 omac_idx;
+ u8 hw_bss_idx;
+ u8 band_idx;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 wmm_idx;
+ u8 bssid[ETH_ALEN];
+ __le16 bmc_tx_wlan_idx;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 phymode; /* bit(0): A
+ * bit(1): B
+ * bit(2): G
+ * bit(3): GN
+ * bit(4): AN
+ * bit(5): AC
+ */
+ __le16 sta_idx;
+ u8 nonht_basic_phy;
+ u8 pad[3];
+} __packed;
+
+struct mt76_connac_bss_qos_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 qos;
+ u8 pad[3];
+} __packed;
+
+struct mt76_connac_beacon_loss_event {
+ u8 bss_idx;
+ u8 reason;
+ u8 pad[2];
+} __packed;
+
+struct mt76_connac_mcu_bss_event {
+ u8 bss_idx;
+ u8 is_absent;
+ u8 free_quota;
+ u8 pad;
+} __packed;
+
+struct mt76_connac_mcu_scan_ssid {
+ __le32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+struct mt76_connac_mcu_scan_channel {
+ u8 band; /* 1: 2.4GHz
+ * 2: 5.0GHz
+ * Others: Reserved
+ */
+ u8 channel_num;
+} __packed;
+
+struct mt76_connac_mcu_scan_match {
+ __le32 rssi_th;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 rsv[3];
+} __packed;
+
+struct mt76_connac_hw_scan_req {
+ u8 seq_num;
+ u8 bss_idx;
+ u8 scan_type; /* 0: PASSIVE SCAN
+ * 1: ACTIVE SCAN
+ */
+ u8 ssid_type; /* BIT(0) wildcard SSID
+ * BIT(1) P2P wildcard SSID
+ * BIT(2) specified SSID + wildcard SSID
+ * BIT(2) + ssid_type_ext BIT(0) specified SSID only
+ */
+ u8 ssids_num;
+ u8 probe_req_num; /* Number of probe request for each SSID */
+ u8 scan_func; /* BIT(0) Enable random MAC scan
+ * BIT(1) Disable DBDC scan type 1~3.
+ * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan).
+ */
+ u8 version; /* 0: Not support fields after ies.
+ * 1: Support fields after ies.
+ */
+ struct mt76_connac_mcu_scan_ssid ssids[4];
+ __le16 probe_delay_time;
+ __le16 channel_dwell_time; /* channel Dwell interval */
+ __le16 timeout_value;
+ u8 channel_type; /* 0: Full channels
+ * 1: Only 2.4GHz channels
+ * 2: Only 5GHz channels
+ * 3: P2P social channel only (channel #1, #6 and #11)
+ * 4: Specified channels
+ * Others: Reserved
+ */
+ u8 channels_num; /* valid when channel_type is 4 */
+ /* valid when channels_num is set */
+ struct mt76_connac_mcu_scan_channel channels[32];
+ __le16 ies_len;
+ u8 ies[MT76_CONNAC_SCAN_IE_LEN];
+ /* following fields are valid if version > 0 */
+ u8 ext_channels_num;
+ u8 ext_ssids_num;
+ __le16 channel_min_dwell_time;
+ struct mt76_connac_mcu_scan_channel ext_channels[32];
+ struct mt76_connac_mcu_scan_ssid ext_ssids[6];
+ u8 bssid[ETH_ALEN];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */
+ u8 pad[63];
+ u8 ssid_type_ext;
+} __packed;
+
+#define MT76_CONNAC_SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64
+
+struct mt76_connac_hw_scan_done {
+ u8 seq_num;
+ u8 sparse_channel_num;
+ struct mt76_connac_mcu_scan_channel sparse_channel;
+ u8 complete_channel_num;
+ u8 current_state;
+ u8 version;
+ u8 pad;
+ __le32 beacon_scan_num;
+ u8 pno_enabled;
+ u8 pad2[3];
+ u8 sparse_channel_valid_num;
+ u8 pad3[3];
+ u8 channel_num[MT76_CONNAC_SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* idle format for channel_idle_time
+ * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms)
+ * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms)
+ * 2: dwell time (16us)
+ */
+ __le16 channel_idle_time[MT76_CONNAC_SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* beacon and probe response count */
+ u8 beacon_probe_num[MT76_CONNAC_SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ u8 mdrdy_count[MT76_CONNAC_SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ __le32 beacon_2g_num;
+ __le32 beacon_5g_num;
+} __packed;
+
+struct mt76_connac_sched_scan_req {
+ u8 version;
+ u8 seq_num;
+ u8 stop_on_match;
+ u8 ssids_num;
+ u8 match_num;
+ u8 pad;
+ __le16 ie_len;
+ struct mt76_connac_mcu_scan_ssid ssids[MT76_CONNAC_MAX_SCHED_SCAN_SSID];
+ struct mt76_connac_mcu_scan_match match[MT76_CONNAC_MAX_SCAN_MATCH];
+ u8 channel_type;
+ u8 channels_num;
+ u8 intervals_num;
+ u8 scan_func; /* BIT(0) eable random mac address */
+ struct mt76_connac_mcu_scan_channel channels[64];
+ __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
+ u8 pad2[58];
+} __packed;
+
+struct mt76_connac_sched_scan_done {
+ u8 seq_num;
+ u8 status; /* 0: ssid found */
+ __le16 pad;
+} __packed;
+
+struct bss_info_uni_he {
+ __le16 tag;
+ __le16 len;
+ __le16 he_rts_thres;
+ u8 he_pe_duration;
+ u8 su_disable;
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+ u8 rsv[2];
+} __packed;
+
+struct mt76_connac_gtk_rekey_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_LEN];
+ u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+ u8 rekey_mode; /* 0: rekey offload enable
+ * 1: rekey offload disable
+ * 2: rekey update
+ */
+ u8 keyid;
+ u8 pad[2];
+ __le32 proto; /* WPA-RSN-WAPI-OPSN */
+ __le32 pairwise_cipher;
+ __le32 group_cipher;
+ __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
+ __le32 mgmt_group_cipher;
+ u8 option; /* 1: rekey data update without enabling offload */
+ u8 reserverd[3];
+} __packed;
+
+#define MT76_CONNAC_WOW_MASK_MAX_LEN 16
+#define MT76_CONNAC_WOW_PATTEN_MAX_LEN 128
+
+struct mt76_connac_wow_pattern_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 index; /* pattern index */
+ u8 enable; /* 0: disable
+ * 1: enable
+ */
+ u8 data_len; /* pattern length */
+ u8 pad;
+ u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
+ u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
+ u8 rsv[4];
+} __packed;
+
+struct mt76_connac_wow_ctrl_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cmd; /* 0x1: PM_WOWLAN_REQ_START
+ * 0x2: PM_WOWLAN_REQ_STOP
+ * 0x3: PM_WOWLAN_PARAM_CLEAR
+ */
+ u8 trigger; /* 0: NONE
+ * BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT
+ * BIT(1): NL80211_WOWLAN_TRIG_ANY
+ * BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT
+ * BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE
+ * BIT(4): BEACON_LOST
+ * BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT
+ */
+ u8 wakeup_hif; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ * 0x3: HIF_GPIO
+ */
+ u8 pad;
+ u8 rsv[4];
+} __packed;
+
+struct mt76_connac_wow_gpio_param_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 gpio_pin;
+ u8 trigger_lvl;
+ u8 pad[2];
+ __le32 gpio_interval;
+ u8 rsv[4];
+} __packed;
+
+struct mt76_connac_arpns_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 mode;
+ u8 ips_num;
+ u8 option;
+ u8 pad[1];
+} __packed;
+
+struct mt76_connac_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable; /* 0: suspend mode disabled
+ * 1: suspend mode enabled
+ */
+ u8 mdtim; /* LP parameter */
+ u8 wow_suspend; /* 0: update by origin policy
+ * 1: update by wow dtim
+ */
+ u8 pad[5];
+} __packed;
+
+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+
+static inline void
+mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ u8 *wlan_idx_lo, u8 *wlan_idx_hi)
+{
+ *wlan_idx_hi = 0;
+
+ if (is_mt7921(dev)) {
+ *wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
+ *wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
+ } else {
+ *wlan_idx_lo = wcid ? wcid->idx : 0;
+ }
+}
+
+struct sk_buff *
+mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid);
+struct wtbl_req_hdr *
+mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int cmd, void *sta_wtbl, struct sk_buff **skb);
+struct tlv *mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag,
+ int len, void *sta_ntlv,
+ void *sta_wtbl);
+static inline struct tlv *
+mt76_connac_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt76_connac_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
+int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
+void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv);
+void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif);
+void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv);
+void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv);
+void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx);
+int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid,
+ bool enable);
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx);
+int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid,
+ bool enable);
+int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid,
+ bool enable, int cmd);
+void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
+int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable,
+ bool hdr_trans);
+int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
+ u32 mode);
+int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
+int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
+int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+
+int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req);
+int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq);
+int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable);
+int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key);
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
+void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
+void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_connac_coredump *coredump);
+#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index b87d8e136cb9..02d0aa0b815e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -16,7 +16,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@@ -28,7 +28,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
clear_bit(MT76_RESTART, &dev->mphy.state);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index b12cb17cb43d..a593a7796d23 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -82,7 +82,7 @@ static void mt76x0u_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
mt76u_stop_tx(&dev->mt76);
mt76x02u_exit_beacon_config(dev);
@@ -108,7 +108,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
return ret;
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index d626817a2103..4d58c2c1c0ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -82,8 +82,6 @@ struct mt76x02_dev {
struct mutex phy_mutex;
- u16 chainmask;
-
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 16b40a73fd1f..771bad60e1bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -345,7 +345,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
+ u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -685,7 +685,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
status->rate_idx = idx;
break;
case MT_PHY_TYPE_VHT: {
- u8 n_rxstream = dev->chainmask & 0xf;
+ u8 n_rxstream = dev->mphy.chainmask & 0xf;
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
@@ -777,7 +777,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0, nstreams = dev->chainmask & 0xf;
+ int pad_len = 0, nstreams = dev->mphy.chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
@@ -1162,7 +1162,7 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
void mt76x02_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
- mt76.mac_work.work);
+ mphy.mac_work.work);
int i, idx;
mutex_lock(&dev->mt76.mutex);
@@ -1185,7 +1185,7 @@ void mt76x02_mac_work(struct work_struct *work)
mt76_tx_status_check(&dev->mt76, NULL, false);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index e7e87311d355..e7a46ac97f51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -416,7 +416,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
memset(msta, 0, sizeof(*msta));
}
- dev->mphy.vif_mask = 0;
+ dev->mt76.vif_mask = 0;
dev->mt76.beacon_mask = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index aaadc15ea83c..2e53b0c1afdd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -16,7 +16,7 @@ void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->chainmask & 0xf) {
+ switch (dev->mphy.chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -35,7 +35,7 @@ void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->chainmask >> 8) & 0xf;
+ txpath = (dev->mphy.chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 7ac20d3c16d7..ab671e21f882 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -149,7 +149,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt76x02_mac_work);
hw->queues = 4;
hw->max_rates = 1;
@@ -197,10 +197,10 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->chainmask = 0x202;
+ dev->mphy.chainmask = 0x202;
dev->mphy.antenna_mask = 3;
} else {
- dev->chainmask = 0x101;
+ dev->mphy.chainmask = 0x101;
dev->mphy.antenna_mask = 1;
}
}
@@ -304,7 +304,7 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
unsigned int idx = 0;
/* Allow to change address in HW if we create first interface. */
- if (!dev->mphy.vif_mask &&
+ if (!dev->mt76.vif_mask &&
(((vif->addr[0] ^ dev->mphy.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mphy.macaddr + 1, ETH_ALEN - 1)))
mt76x02_mac_setaddr(dev, vif->addr);
@@ -329,11 +329,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx += 8;
/* vif is already set or idx is 8 for AP/Mesh/... */
- if (dev->mphy.vif_mask & BIT(idx) ||
+ if (dev->mt76.vif_mask & BIT(idx) ||
(vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
- dev->mphy.vif_mask |= BIT(idx);
+ dev->mt76.vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
@@ -346,7 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- dev->mphy.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 3c2738903d7d..ac83ce5f3e8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -29,7 +29,7 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
.idx = channel,
.scan = scan,
.bw = bw,
- .chainmask = cpu_to_le16(dev->chainmask),
+ .chainmask = cpu_to_le16(dev->mphy.chainmask),
};
/* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 620484390418..c6fa8cf92529 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -271,7 +271,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&dev->wdt_work);
clear_bit(MT76_RESTART, &dev->mphy.state);
mt76x02_mcu_set_radio_state(dev, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 98f4cf398320..933125b07ea3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -14,7 +14,7 @@ mt76x2_start(struct ieee80211_hw *hw)
mt76x02_mac_start(dev);
mt76x2_phy_start(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
@@ -116,7 +116,7 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
mutex_lock(&dev->mt76.mutex);
- dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mphy.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
dev->mphy.antenna_mask = tx_ant;
mt76_set_stream_caps(&dev->mphy, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index ffc2deba29ac..85dcdc22fbeb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -236,7 +236,7 @@ fail:
void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
mt76x2u_mac_stop(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index bab4e6e1904e..b66836928d9d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -15,7 +15,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
if (ret)
return ret;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 7d810fbf2862..77dcd71e49a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -6,6 +6,32 @@
/** global debugfs **/
+static int
+mt7915_implicit_txbf_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EBUSY;
+
+ dev->ibf = !!val;
+
+ return mt7915_mcu_set_txbf_type(dev);
+}
+
+static int
+mt7915_implicit_txbf_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->ibf;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
+ mt7915_implicit_txbf_set, "%lld\n");
+
/* test knob of system layer 1/2 error recovery */
static int mt7915_ser_trigger_set(void *data, u64 val)
{
@@ -355,6 +381,8 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
mt7915_queues_acq);
debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_file("implicit_txbf", 0600, dir, dev,
+ &fops_implicit_txbf);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
/* test knobs */
debugfs_create_file("radar_trigger", 0200, dir, dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 8c1f9c77b14f..bf51304a770b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -73,34 +73,41 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-void mt7915_dma_prefetch(struct mt7915_dev *dev)
+static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
#define PREFETCH(base, depth) ((base) << 16 | (depth))
- mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x80, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL, PREFETCH(0xc0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL, PREFETCH(0x180, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL, PREFETCH(0x1c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL, PREFETCH(0x200, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL, PREFETCH(0x240, 0x4));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL, PREFETCH(0x280, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL, PREFETCH(0x2c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL, PREFETCH(0x300, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL, PREFETCH(0x340, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL, PREFETCH(0x380, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL, PREFETCH(0x3c0, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL, PREFETCH(0x3c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL, PREFETCH(0x400, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL, PREFETCH(0x440, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0));
+ mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
+}
+
+void mt7915_dma_prefetch(struct mt7915_dev *dev)
+{
+ __mt7915_dma_prefetch(dev, 0);
+ if (dev->hif2)
+ __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
@@ -204,6 +211,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
/* Increase buffer size to receive large VHT/HE MPDUs */
struct mt76_bus_ops *bus_ops;
int rx_buf_size = MT_RX_BUF_SIZE * 2;
+ u32 hif1_ofs = 0;
int ret;
dev->bus_ops = dev->mt76.bus;
@@ -219,14 +227,14 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt76_dma_attach(&dev->mt76);
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+
/* configure global setting */
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- /* configure perfetch settings */
- mt7915_dma_prefetch(dev);
-
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
@@ -235,6 +243,21 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
MT7915_TX_RING_SIZE);
@@ -283,7 +306,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
if (dev->dbdc_support) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
- rx_buf_size, MT_RX_DATA_RING_BASE);
+ rx_buf_size,
+ MT_RX_DATA_RING_BASE + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
+ MT7915_RXQ_MCU_WA_EXT,
+ MT7915_RX_MCU_RING_SIZE,
+ rx_buf_size,
+ MT_RX_EVENT_RING_BASE + hif1_ofs);
if (ret)
return ret;
}
@@ -326,6 +359,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN));
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+ }
+
/* enable interrupts for TX/RX rings */
mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 7a2be3f61398..660398ac53c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -22,7 +22,10 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
if (ret < 0)
return ret;
- memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+ if (ret)
+ dev->flash_mode = true;
+ else
+ memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
return 0;
}
@@ -50,12 +53,15 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
u32 val;
val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
- val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
+ val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+ if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
+ val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+
switch (val) {
- case MT_EE_5GHZ:
+ case MT_EE_BAND_SEL_5GHZ:
phy->mt76->cap.has_5ghz = true;
break;
- case MT_EE_2GHZ:
+ case MT_EE_BAND_SEL_2GHZ:
phy->mt76->cap.has_2ghz = true;
break;
default:
@@ -67,26 +73,30 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
{
- u8 nss, tx_mask[2] = {}, *eeprom = dev->mt76.eeprom.data;
+ u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
mt7915_eeprom_parse_band_config(&dev->phy);
/* read tx mask from eeprom */
- tx_mask[0] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
- eeprom[MT_EE_WIFI_CONF]);
- if (dev->dbdc_support)
- tx_mask[1] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
- eeprom[MT_EE_WIFI_CONF + 1]);
-
- nss = tx_mask[0] + tx_mask[1];
- if (!nss || nss > 4) {
- tx_mask[0] = 4;
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
+ if (!nss || nss > 4)
nss = 4;
+
+ nss_band = nss;
+
+ if (dev->dbdc_support) {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (!nss_band || nss_band > 2)
+ nss_band = 2;
+
+ if (nss_band >= nss)
+ nss = 4;
}
dev->chainmask = BIT(nss) - 1;
- dev->mphy.antenna_mask = BIT(tx_mask[0]) - 1;
- dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.antenna_mask = BIT(nss_band) - 1;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 6712032b40df..3ee8c27bb61b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -25,17 +25,20 @@ enum mt7915_eeprom_field {
__MT_EE_MAX = 0xe00
};
-#define MT_EE_WIFI_CONF_TX_MASK GENMASK(2, 0)
-#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(7, 6)
-#define MT_EE_WIFI_CONF_TSSI0_2G BIT(0)
-#define MT_EE_WIFI_CONF_TSSI0_5G BIT(2)
-#define MT_EE_WIFI_CONF_TSSI1_5G BIT(4)
+#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
+#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
+#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
+#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
+#define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2)
+#define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4)
enum mt7915_eeprom_band {
- MT_EE_DUAL_BAND,
- MT_EE_5GHZ,
- MT_EE_2GHZ,
- MT_EE_DBDC,
+ MT_EE_BAND_SEL_DEFAULT,
+ MT_EE_BAND_SEL_5GHZ,
+ MT_EE_BAND_SEL_2GHZ,
+ MT_EE_BAND_SEL_DUAL,
};
#define SKU_DELTA_VAL GENMASK(5, 0)
@@ -116,9 +119,9 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
/* TODO: DBDC */
if (band == NL80211_BAND_5GHZ)
- return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_5G;
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
else
- return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_2G;
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
}
extern const struct sku_group mt7915_sku_groups[];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..ad4e5b95158b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
@@ -169,18 +169,19 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
{
int ret;
- /*
- * TODO: DBDC & check whether iBF phase calibration data has
- * been stored in eeprom offset 0x651~0x7b8, then write down
- * 0x1111 into 0x651 and 0x651 to trigger iBF.
- */
+
+ if (dev->dbdc_support) {
+ ret = mt7915_mcu_set_txbf_module(dev);
+ if (ret)
+ return ret;
+ }
/* trigger sounding packets */
ret = mt7915_mcu_set_txbf_sounding(dev);
if (ret)
return ret;
- /* enable iBF & eBF */
+ /* enable eBF */
return mt7915_mcu_set_txbf_type(dev);
}
@@ -235,12 +236,12 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
- mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
mt7915_init_wiphy(mphy->hw);
INIT_LIST_HEAD(&phy->stats_list);
- INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work);
+ INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_band_config(phy);
mt7915_set_stream_vht_txbf_caps(phy);
@@ -329,7 +330,7 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
{
- int nss = hweight8(phy->chainmask);
+ int nss = hweight8(phy->mt76->chainmask);
u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -440,8 +441,7 @@ static int
mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
- int i, idx = 0;
- int nss = hweight8(phy->chainmask);
+ int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
u16 mcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -622,7 +622,7 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->mt76.phy.priv = &dev->phy;
INIT_LIST_HEAD(&dev->phy.stats_list);
INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
- INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7915_mac_work);
INIT_LIST_HEAD(&dev->sta_rc_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
@@ -648,8 +648,8 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
mt76_set_stream_caps(&dev->mphy, true);
mt7915_set_stream_vht_txbf_caps(&dev->phy);
@@ -672,28 +672,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
void mt7915_unregister_device(struct mt7915_dev *dev)
{
- struct mt76_txwi_cache *txwi;
- int id;
-
mt7915_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
mt7915_dma_cleanup(dev);
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
- mt7915_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb) {
- struct ieee80211_hw *hw;
-
- hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
- ieee80211_free_txskb(hw, txwi->skb);
- }
- mt76_put_txwi(&dev->mt76, txwi);
- dev->token_count--;
- }
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
+ mt7915_tx_token_put(dev);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index f504eeb221f9..eb889f8d6fea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -565,13 +565,20 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
#ifdef CONFIG_NL80211_TESTMODE
void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
{
+ struct mt7915_phy *phy = &dev->phy;
__le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv_hdr = rxd + 2;
__le32 *rxv = rxd + 4;
u32 rcpi, ib_rssi, wb_rssi, v20, v21;
+ bool ext_phy;
s32 foe;
u8 snr;
int i;
+ ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
+ if (ext_phy)
+ phy = mt7915_ext_phy(dev);
+
rcpi = le32_to_cpu(rxv[6]);
ib_rssi = le32_to_cpu(rxv[7]);
wb_rssi = le32_to_cpu(rxv[8]) >> 5;
@@ -580,9 +587,9 @@ void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
if (i == 3)
wb_rssi = le32_to_cpu(rxv[9]);
- dev->test.last_rcpi[i] = rcpi & 0xff;
- dev->test.last_ib_rssi[i] = ib_rssi & 0xff;
- dev->test.last_wb_rssi[i] = wb_rssi & 0xff;
+ phy->test.last_rcpi[i] = rcpi & 0xff;
+ phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
+ phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
}
v20 = le32_to_cpu(rxv[20]);
@@ -593,26 +600,26 @@ void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
- dev->test.last_freq_offset = foe;
- dev->test.last_snr = snr;
+ phy->test.last_freq_offset = foe;
+ phy->test.last_snr = snr;
dev_kfree_skb(skb);
}
#endif
static void
-mt7915_mac_write_txwi_tm(struct mt7915_dev *dev, struct mt76_phy *mphy,
- __le32 *txwi, struct sk_buff *skb)
+mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NL80211_TESTMODE
- struct mt76_testmode_data *td = &dev->mt76.test;
+ struct mt76_testmode_data *td = &phy->mt76->test;
u8 rate_idx = td->tx_rate_idx;
u8 nss = td->tx_rate_nss;
u8 bw, mode;
u16 rateval = 0;
u32 val;
- if (skb != dev->mt76.test.tx_skb)
+ if (skb != phy->mt76->test.tx_skb)
return;
switch (td->tx_rate_mode) {
@@ -644,7 +651,7 @@ mt7915_mac_write_txwi_tm(struct mt7915_dev *dev, struct mt76_phy *mphy,
break;
}
- switch (mphy->chandef.width) {
+ switch (phy->mt76->chandef.width) {
case NL80211_CHAN_WIDTH_40:
bw = 1;
break;
@@ -693,12 +700,12 @@ mt7915_mac_write_txwi_tm(struct mt7915_dev *dev, struct mt76_phy *mphy,
if (mode >= MT_PHY_TYPE_HE_SU)
val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
- if (td->tx_rate_ldpc)
+ if (td->tx_rate_ldpc || bw > 0)
val |= MT_TXD6_LDPC;
txwi[6] |= cpu_to_le32(val);
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
- dev->test.spe_idx));
+ phy->test.spe_idx));
#endif
}
@@ -902,8 +909,8 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
- if (mt76_testmode_enabled(&dev->mt76))
- mt7915_mac_write_txwi_tm(dev, mphy, txwi, skb);
+ if (mt76_testmode_enabled(mphy))
+ mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
}
static void
@@ -942,6 +949,9 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
int id, i, nbuf = tx_info->nbuf - 1;
u8 *txwi = (u8 *)txwi_ptr;
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
if (!wcid)
wcid = &dev->mt76.global_wcid;
@@ -1048,20 +1058,19 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
status.rate = &msta->stats.tx_rate;
}
- hw = mt76_tx_status_get_hw(mdev, skb);
-
#ifdef CONFIG_NL80211_TESTMODE
- if (skb == mdev->test.tx_skb) {
+ if (mt76_is_testmode_skb(mdev, skb, &hw)) {
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct ieee80211_vif *vif = phy->monitor_vif;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb);
-
return;
}
#endif
+ hw = mt76_tx_status_get_hw(mdev, skb);
+
if (info->flags & IEEE80211_TX_CTL_AMPDU)
info->flags |= IEEE80211_TX_STAT_AMPDU;
@@ -1353,7 +1362,7 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
u32 val, sum = 0, n = 0;
int nss, i;
- for (nss = 0; nss < hweight8(phy->chainmask); nss++) {
+ for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
@@ -1434,7 +1443,15 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct ieee80211_hw *hw = priv;
- mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ break;
+ default:
+ break;
+ }
}
static void
@@ -1457,12 +1474,21 @@ mt7915_dma_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy_ext = dev->mt76.phy2;
+ u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_clear(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN));
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ }
usleep_range(1000, 2000);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
@@ -1483,6 +1509,35 @@ mt7915_dma_reset(struct mt7915_phy *phy)
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN));
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ }
+}
+
+void mt7915_tx_token_put(struct mt7915_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7915_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
+ mt76_put_txwi(&dev->mt76, txwi);
+ dev->token_count--;
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
}
/* system error recovery */
@@ -1506,9 +1561,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->phy.mac_work);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
if (phy2)
- cancel_delayed_work_sync(&phy2->mac_work);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
@@ -1525,6 +1580,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+ mt7915_tx_token_put(dev);
+ idr_init(&dev->token);
+
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7915_dma_reset(&dev->phy);
@@ -1559,10 +1617,11 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt7915_update_beacons(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT7915_WATCHDOG_TIME);
if (phy2)
- ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &phy2->mt76->mac_work,
MT7915_WATCHDOG_TIME);
}
@@ -1673,17 +1732,17 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
void mt7915_mac_work(struct work_struct *work)
{
struct mt7915_phy *phy;
- struct mt76_dev *mdev;
+ struct mt76_phy *mphy;
- phy = (struct mt7915_phy *)container_of(work, struct mt7915_phy,
- mac_work.work);
- mdev = &phy->dev->mt76;
+ mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
+ mac_work.work);
+ phy = mphy->priv;
- mutex_lock(&mdev->mutex);
+ mutex_lock(&mphy->dev->mutex);
- mt76_update_survey(mdev);
- if (++phy->mac_work_count == 5) {
- phy->mac_work_count = 0;
+ mt76_update_survey(mphy->dev);
+ if (++mphy->mac_work_count == 5) {
+ mphy->mac_work_count = 0;
mt7915_mac_update_mib_stats(phy);
}
@@ -1691,11 +1750,11 @@ void mt7915_mac_work(struct work_struct *work)
if (++phy->sta_work_count == 10) {
phy->sta_work_count = 0;
mt7915_mac_sta_stats_work(phy);
- };
+ }
- mutex_unlock(&mdev->mutex);
+ mutex_unlock(&mphy->dev->mutex);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7915_WATCHDOG_TIME);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index d420392b952d..96ff3fb0d1f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -95,6 +95,8 @@ enum rx_pkt_type {
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXV_HDR_BAND_IDX BIT(24)
+
/* P-RXV */
#define MT_PRXV_TX_RATE GENMASK(6, 0)
#define MT_PRXV_TX_DCM BIT(4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 0c82aa2ef219..d4969b2e1ffb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -26,6 +26,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool running;
+ flush_work(&dev->init_work);
+
mutex_lock(&dev->mt76.mutex);
running = mt7915_dev_running(dev);
@@ -44,13 +46,13 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 1);
}
- mt7915_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76));
- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7915_mcu_set_sku_en(phy, true);
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- if (!mt76_testmode_enabled(&dev->mt76))
- ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ if (!mt76_testmode_enabled(phy->mt76))
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7915_WATCHDOG_TIME);
if (!running)
@@ -66,11 +68,11 @@ static void mt7915_stop(struct ieee80211_hw *hw)
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
@@ -153,13 +155,13 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mutex_lock(&dev->mt76.mutex);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
if (vif->type == NL80211_IFTYPE_MONITOR &&
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->idx = ffs(~phy->mt76->vif_mask) - 1;
+ mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
if (mvif->idx >= MT7915_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -184,7 +186,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
- phy->mt76->vif_mask |= BIT(mvif->idx);
+ dev->mt76.vif_mask |= BIT(mvif->idx);
phy->omac_mask |= BIT_ULL(mvif->omac_idx);
idx = MT7915_WTBL_RESERVED - mvif->idx;
@@ -228,7 +230,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
/* TODO: disable beacon for the bss */
mutex_lock(&dev->mt76.mutex);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
mutex_unlock(&dev->mt76.mutex);
if (vif == phy->monitor_vif)
@@ -239,7 +241,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
- phy->mt76->vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT(mvif->idx);
phy->omac_mask &= ~BIT_ULL(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
@@ -273,7 +275,7 @@ int mt7915_set_channel(struct mt7915_phy *phy)
struct mt7915_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mac_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
@@ -281,7 +283,7 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
- ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
@@ -298,8 +300,9 @@ out:
mt76_txq_schedule_all(phy->mt76);
- if (!mt76_testmode_enabled(&dev->mt76))
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ if (!mt76_testmode_enabled(phy->mt76))
+ ieee80211_queue_delayed_work(phy->mt76->hw,
+ &phy->mt76->mac_work,
MT7915_WATCHDOG_TIME);
return ret;
@@ -365,9 +368,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->mt76.test.state != MT76_TM_STATE_OFF) {
+ if (phy->mt76->test.state != MT76_TM_STATE_OFF) {
mutex_lock(&dev->mt76.mutex);
- mt76_testmode_reset(&dev->mt76, false);
+ mt76_testmode_reset(phy->mt76, false);
mutex_unlock(&dev->mt76.mutex);
}
#endif
@@ -396,7 +399,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN,
enabled);
- mt76_testmode_reset(&dev->mt76, true);
+ mt76_testmode_reset(phy->mt76, true);
mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
@@ -427,7 +430,6 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
-
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -441,6 +443,8 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
+ mutex_lock(&dev->mt76.mutex);
+
phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
MT_WF_RFCR_DROP_FRAME_REPORT |
@@ -471,6 +475,8 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
else
mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+
+ mutex_unlock(&dev->mt76.mutex);
}
static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
@@ -808,7 +814,7 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
else
tx_ant <<= 1;
}
- phy->chainmask = tx_ant;
+ phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 5fdd1a6d32ee..195929242b72 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -66,9 +66,6 @@ struct mt7915_fw_region {
#define MCU_PATCH_ADDRESS 0x200000
-#define MT_STA_BFER BIT(0)
-#define MT_STA_BFEE BIT(1)
-
#define FW_FEATURE_SET_ENCRYPT BIT(0)
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
@@ -232,18 +229,14 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
if (seq != rxd->seq)
return -EAGAIN;
- switch (cmd) {
- case -MCU_CMD_PATCH_SEM_CONTROL:
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
- break;
- case MCU_EXT_CMD_THERMAL_CTRL:
+ } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
skb_pull(skb, sizeof(*rxd) + 4);
ret = le32_to_cpu(*(__le32 *)skb->data);
- break;
- default:
+ } else {
skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
- break;
}
return ret;
@@ -255,10 +248,10 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_mcu_txd *mcu_txd;
- u8 seq, pkt_fmt, qidx;
- enum mt76_txq_id txq;
+ enum mt76_mcuq_id qid;
__le32 *txd;
u32 val;
+ u8 seq;
/* TODO: make dynamic based on msg type */
mdev->mcu.timeout = 20 * HZ;
@@ -267,28 +260,22 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (!seq)
seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (cmd == -MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ if (cmd == MCU_CMD(FW_SCATTER)) {
+ qid = MT_MCUQ_FWDL;
goto exit;
}
mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
-
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
- txq = MT_MCUQ_WA;
- qidx = MT_TX_MCU_PORT_RX_Q0;
- pkt_fmt = MT_TX_TYPE_CMD;
- } else {
- txq = MT_MCUQ_WM;
- qidx = MT_TX_MCU_PORT_RX_Q0;
- pkt_fmt = MT_TX_TYPE_CMD;
- }
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ qid = MT_MCUQ_WA;
+ else
+ qid = MT_MCUQ_WM;
txd = mcu_txd->txd;
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
- FIELD_PREP(MT_TXD0_PKT_FMT, pkt_fmt) |
- FIELD_PREP(MT_TXD0_Q_IDX, qidx);
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
+ FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
txd[0] = cpu_to_le32(val);
val = MT_TXD1_LONG_FORMAT |
@@ -296,37 +283,50 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
txd[1] = cpu_to_le32(val);
mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
- mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, qidx));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
+ MT_TX_MCU_PORT_RX_Q0));
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
- if (cmd < 0) {
- mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->cid = -cmd;
- } else {
- mcu_txd->cid = MCU_CMD_EXT_CID;
- mcu_txd->ext_cid = cmd;
+ mcu_txd->cid = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
+ if (mcu_txd->ext_cid) {
mcu_txd->ext_cid_ack = 1;
/* do not use Q_SET for efuse */
- if (cmd == MCU_EXT_CMD_EFUSE_ACCESS)
+ if (cmd & __MCU_CMD_FIELD_QUERY)
mcu_txd->set_query = MCU_Q_QUERY;
else
mcu_txd->set_query = MCU_Q_SET;
}
- if (cmd == MCU_EXT_CMD_MWDS_SUPPORT)
+ if (cmd & __MCU_CMD_FIELD_WA)
mcu_txd->s2d_index = MCU_S2D_H2C;
else
mcu_txd->s2d_index = MCU_S2D_H2N;
- WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS &&
- mcu_txd->set_query != MCU_Q_QUERY);
exit:
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
+}
+
+static void
+mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
+{
+ struct {
+ __le32 args[3];
+ } req = {
+ .args = {
+ cpu_to_le32(a1),
+ cpu_to_le32(a2),
+ cpu_to_le32(a3),
+ },
+ };
+
+ mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
static void
@@ -674,6 +674,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MONITOR:
break;
case NL80211_IFTYPE_STATION:
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
@@ -702,16 +703,21 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
bss = (struct bss_info_basic *)tlv;
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->network_type = cpu_to_le32(type);
- bss->dtim_period = vif->bss_conf.dtim_period;
bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL);
bss->wmm_idx = mvif->wmm_idx;
bss->active = enable;
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL);
+ } else {
+ memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
+ }
+
return 0;
}
@@ -727,6 +733,7 @@ mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
type = CONNECTION_INFRA_AP;
@@ -832,9 +839,9 @@ static void
mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct mt7915_phy *phy)
{
+ int max_nss = hweight8(phy->mt76->chainmask);
struct bss_info_ra *ra;
struct tlv *tlv;
- int max_nss = hweight8(phy->chainmask);
tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
@@ -972,7 +979,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
if (enable)
ether_addr_copy(req.addr, addr);
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MUAR_UPDATE, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), &req,
sizeof(req), true);
}
@@ -996,6 +1003,9 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ goto out;
+
if (enable) {
mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
mt7915_mcu_bss_bmc_tlv(skb, phy);
@@ -1009,16 +1019,17 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mvif->omac_idx < REPEATER_BSSID_START)
mt7915_mcu_bss_ext_tlv(skb, mvif);
}
-
+out:
return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
- MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
/** starec & wtbl **/
static int
-mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
+ struct ieee80211_key_conf *key, enum set_key_cmd cmd)
{
+ struct mt7915_sta_key_conf *bip = &msta->bip;
struct sta_rec_sec *sec;
struct tlv *tlv;
u32 len = sizeof(*sec);
@@ -1038,22 +1049,23 @@ mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key,
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_id = key->keyidx;
if (cipher == MT_CIPHER_BIP_CMAC_128) {
sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ sec_key->key_id = bip->keyidx;
sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
+ memcpy(sec_key->key, bip->key, 16);
sec_key = &sec->key[1];
sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
- memcpy(sec_key->key, key->key + 16, 16);
+ memcpy(sec_key->key, key->key, 16);
sec->n_cipher = 2;
} else {
sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
@@ -1063,6 +1075,12 @@ mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key,
memcpy(sec_key->key + 24, key->key + 16, 8);
}
+ /* store key_conf for BIP batch update */
+ if (cipher == MT_CIPHER_AES_CCMP) {
+ memcpy(bip->key, key->key, key->keylen);
+ bip->keyidx = key->keyidx;
+ }
+
len -= sizeof(*sec_key);
sec->n_cipher = 1;
}
@@ -1088,12 +1106,12 @@ int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = mt7915_mcu_sta_key_tlv(skb, key, cmd);
+ ret = mt7915_mcu_sta_key_tlv(msta, skb, key, cmd);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
static void
@@ -1107,7 +1125,7 @@ mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
ba->winsize = cpu_to_le16(params->buf_size);
ba->ssn = cpu_to_le16(params->ssn);
ba->ba_en = enable << params->tid;
@@ -1173,7 +1191,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
if (ret)
return ret;
@@ -1185,7 +1203,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
@@ -1521,7 +1539,7 @@ mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_sta_muru_tlv(skb, sta);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
static void
@@ -1688,7 +1706,7 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
true);
}
@@ -1713,12 +1731,13 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
static void
mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
{
+ bf->bf_cap = MT_EBF;
bf->sounding_phy = MT_PHY_TYPE_OFDM;
bf->ndp_rate = 0; /* mcs0 */
bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
@@ -1726,13 +1745,14 @@ mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
}
static void
-mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf)
+mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
+ struct sta_rec_bf *bf)
{
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
- bf->bf_cap |= MT_IBF;
+ bf->bf_cap = MT_IBF;
if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF &&
(mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
@@ -1745,43 +1765,46 @@ mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf)
else if (mcs->rx_mask[1])
n = 1;
+ bf->nr = hweight8(phy->mt76->chainmask) - 1;
bf->nc = min_t(u8, bf->nr, n);
- bf->ibf_ncol = bf->nc;
-
- if (sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc)
- bf->ibf_timeout = 0x48;
+ bf->ibf_ncol = n;
}
static void
mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
- struct sta_rec_bf *bf)
+ struct sta_rec_bf *bf, bool explicit)
{
struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
- u8 bfee_nr, bfer_nr, n, tx_ant = hweight8(phy->chainmask) - 1;
- u16 mcs_map;
+ u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+ u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+ u8 tx_ant = hweight8(phy->mt76->chainmask) - 1;
bf->tx_mode = MT_PHY_TYPE_VHT;
- bf->bf_cap |= MT_EBF;
-
- mt7915_mcu_sta_sounding_rate(bf);
- bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
- pc->cap);
- bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
- vc->cap);
- mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+ if (explicit) {
+ u8 bfee_nr, bfer_nr;
- n = min_t(u8, bfer_nr, bfee_nr);
- bf->nr = min_t(u8, n, tx_ant);
- n = mt7915_mcu_get_sta_nss(mcs_map);
+ mt7915_mcu_sta_sounding_rate(bf);
+ bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ vc->cap);
+ bf->nr = min_t(u8, min_t(u8, bfer_nr, bfee_nr), tx_ant);
+ bf->nc = min_t(u8, nss_mcs, bf->nr);
+ bf->ibf_ncol = bf->nc;
- bf->nc = min_t(u8, n, bf->nr);
- bf->ibf_ncol = bf->nc;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->nr = 1;
+ } else {
+ bf->bf_cap = MT_IBF;
+ bf->nr = tx_ant;
+ bf->nc = min_t(u8, nss_mcs, bf->nr);
+ bf->ibf_ncol = nss_mcs;
- /* force nr from 4 to 2 */
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
- bf->nr = 1;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->ibf_nrow = 1;
+ }
}
static void
@@ -1790,19 +1813,14 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
{
struct ieee80211_sta_he_cap *pc = &sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
- const struct ieee80211_he_cap_elem *ve;
- const struct ieee80211_sta_he_cap *vc;
- u8 bfee_nr, bfer_nr, nss_mcs;
- u16 mcs_map;
-
- vc = mt7915_get_he_phy_cap(phy, vif);
- ve = &vc->he_cap_elem;
+ const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
+ u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
+ u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+ u8 bfee_nr, bfer_nr;
bf->tx_mode = MT_PHY_TYPE_HE_SU;
- bf->bf_cap |= MT_EBF;
-
mt7915_mcu_sta_sounding_rate(bf);
-
bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB,
pe->phy_cap_info[6]);
bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB,
@@ -1811,10 +1829,6 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
ve->phy_cap_info[5]);
bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
pe->phy_cap_info[4]);
-
- mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.tx_mcs_80);
- nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
-
bf->nr = min_t(u8, bfer_nr, bfee_nr);
bf->nc = min_t(u8, nss_mcs, bf->nr);
bf->ibf_ncol = bf->nc;
@@ -1853,11 +1867,11 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
static void
mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct mt7915_phy *phy,
- bool enable)
+ bool enable, bool explicit)
{
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- int tx_ant = hweight8(phy->chainmask) - 1;
const u8 matrix[4][4] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
@@ -1875,19 +1889,29 @@ mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
return;
}
+ /* he: eBF only, in accordance with spec
+ * vht: support eBF and iBF
+ * ht: iBF only, since mac80211 lacks of eBF support
+ */
+ if (sta->he_cap.has_he && explicit)
+ mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
+ else if (sta->vht_cap.vht_supported)
+ mt7915_mcu_sta_bfer_vht(sta, phy, bf, explicit);
+ else if (sta->ht_cap.ht_supported)
+ mt7915_mcu_sta_bfer_ht(sta, phy, bf);
+ else
+ return;
+
bf->bw = sta->bandwidth;
bf->ibf_dbw = sta->bandwidth;
bf->ibf_nrow = tx_ant;
- bf->ibf_timeout = 0x18;
- if (sta->he_cap.has_he)
- mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
- else if (sta->vht_cap.vht_supported)
- mt7915_mcu_sta_bfer_vht(sta, phy, bf);
- else if (sta->ht_cap.ht_supported)
- mt7915_mcu_sta_bfer_ht(sta, bf);
+ if (!explicit && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc)
+ bf->ibf_timeout = 0x48;
+ else
+ bf->ibf_timeout = 0x18;
- if (bf->bf_cap & MT_EBF && bf->nr != tx_ant)
+ if (explicit && bf->nr != tx_ant)
bf->mem_20m = matrix[tx_ant][bf->nc];
else
bf->mem_20m = matrix[bf->nr][bf->nc];
@@ -1910,9 +1934,9 @@ static void
mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct mt7915_phy *phy)
{
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
- int tx_ant = hweight8(phy->chainmask) - 1;
u8 nr = 0;
tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
@@ -1931,20 +1955,26 @@ mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
/* reply with identity matrix to avoid 2x2 BF negative gain */
- if (nr == 1 && tx_ant == 2)
- bfee->fb_identity_matrix = true;
+ bfee->fb_identity_matrix = !!(nr == 1 && tx_ant == 2);
}
-static u8
-mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int
+mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
{
- u8 type = 0;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_phy *phy;
+ struct sk_buff *skb;
+ int r, len;
+ bool ebfee = 0, ebf = 0;
if (vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP)
return 0;
+ phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+
if (sta->he_cap.has_he) {
struct ieee80211_he_cap_elem *pe;
const struct ieee80211_he_cap_elem *ve;
@@ -1954,15 +1984,12 @@ mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif,
vc = mt7915_get_he_phy_cap(phy, vif);
ve = &vc->he_cap_elem;
- if ((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
- HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
- HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]))
- type |= MT_STA_BFEE;
-
- if ((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
- HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
- HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]))
- type |= MT_STA_BFER;
+ ebfee = !!((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]));
+ ebf = !!((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]));
} else if (sta->vht_cap.vht_supported) {
struct ieee80211_sta_vht_cap *pc;
struct ieee80211_sta_vht_cap *vc;
@@ -1975,53 +2002,30 @@ mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif,
ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- if ((pc->cap & cr) && (vc->cap & ce))
- type |= MT_STA_BFEE;
-
- if ((vc->cap & cr) && (pc->cap & ce))
- type |= MT_STA_BFER;
- } else if (sta->ht_cap.ht_supported) {
- /* TODO: iBF */
+ ebfee = !!((pc->cap & cr) && (vc->cap & ce));
+ ebf = !!((vc->cap & cr) && (pc->cap & ce));
}
- return type;
-}
-
-static int
-mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt7915_phy *phy;
- struct sk_buff *skb;
- int r, len;
- u8 type;
-
- phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
-
- type = mt7915_mcu_sta_txbf_type(phy, vif, sta);
-
/* must keep each tag independent */
/* starec bf */
- if (type & MT_STA_BFER) {
+ if (ebf || dev->ibf) {
len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf);
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable);
+ mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable, ebf);
r = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
if (r)
return r;
}
/* starec bfee */
- if (type & MT_STA_BFEE) {
+ if (ebfee) {
len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee);
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
@@ -2031,7 +2035,7 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_sta_bfee_tlv(skb, sta, phy);
r = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
if (r)
return r;
}
@@ -2199,33 +2203,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
-}
-
-static int
-mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
-#define MT_STA_BSS_GROUP 1
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct {
- __le32 action;
- u8 wlan_idx_lo;
- u8 status;
- u8 wlan_idx_hi;
- u8 rsv0[5];
- __le32 val;
- u8 rsv1[8];
- } __packed req = {
- .action = cpu_to_le32(MT_STA_BSS_GROUP),
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
- .val = cpu_to_le32(mvif->idx % 16),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL, &req,
- sizeof(req), true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2237,10 +2215,6 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return 0;
/* must keep the order */
- ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
- return ret;
-
ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
if (ret)
return ret;
@@ -2287,7 +2261,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
@@ -2333,7 +2307,7 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
@@ -2375,7 +2349,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
return mt7915_mcu_muar_config(phy, vif, false, enable);
memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE),
&data, sizeof(data), true);
}
@@ -2468,7 +2442,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
dev_kfree_skb(skb);
return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
- MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
@@ -2482,7 +2456,7 @@ static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
.addr = cpu_to_le32(addr),
};
- return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(FW_START_REQ), &req,
sizeof(req), true);
}
@@ -2495,7 +2469,7 @@ static int mt7915_mcu_restart(struct mt76_dev *dev)
.power_mode = 1,
};
- return mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req,
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
sizeof(req), false);
}
@@ -2507,7 +2481,7 @@ static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
.op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
};
- return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_SEM_CONTROL), &req,
sizeof(req), true);
}
@@ -2520,7 +2494,7 @@ static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
.check_crc = 0,
};
- return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_FINISH_REQ), &req,
sizeof(req), true);
}
@@ -2553,9 +2527,9 @@ static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
int attr;
if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
- attr = -MCU_CMD_PATCH_START_REQ;
+ attr = MCU_CMD(PATCH_START_REQ);
else
- attr = -MCU_CMD_TARGET_ADDRESS_LEN_REQ;
+ attr = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
}
@@ -2616,7 +2590,7 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
goto out;
}
- ret = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
dl, len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send patch\n");
@@ -2685,7 +2659,7 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
return err;
}
- err = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
data + offset, len);
if (err) {
dev_err(dev->mt76.dev, "Failed to send firmware.\n");
@@ -2815,7 +2789,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl)
.ctrl_val = ctrl
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, &data,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data,
sizeof(data), true);
}
@@ -2833,7 +2807,7 @@ int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level)
.level = level,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL, &data,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_DBG_CTRL), &data,
sizeof(data), false);
}
@@ -2846,7 +2820,7 @@ static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled)
.enable = enabled
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MWDS_SUPPORT, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(MWDS_SUPPORT), &req,
sizeof(req), false);
}
@@ -2873,6 +2847,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
mt7915_mcu_fw_log_2_host(dev, 0);
mt7915_mcu_set_mwds(dev, 1);
+ mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_RED, 0, 0);
return 0;
}
@@ -2919,12 +2894,12 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
};
int ret;
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS,
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
&req_trans, sizeof(req_trans), false);
if (ret)
return ret;
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL),
&req_mac, sizeof(req_mac), true);
}
@@ -2940,7 +2915,7 @@ int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
.enable = enable + 1,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SCS_CTRL), &req,
sizeof(req), false);
}
@@ -2960,34 +2935,25 @@ int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
.pkt_thresh = cpu_to_le32(0x2),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
sizeof(req), true);
}
+int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
+{
+ struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
+ u8 num = req->total;
+ size_t len = sizeof(*req) -
+ (IEEE80211_NUM_ACS - num) * sizeof(struct edca);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), req,
+ len, true);
+}
+
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
{
-#define WMM_AIFS_SET BIT(0)
-#define WMM_CW_MIN_SET BIT(1)
-#define WMM_CW_MAX_SET BIT(2)
-#define WMM_TXOP_SET BIT(3)
-#define WMM_PARAM_SET GENMASK(3, 0)
#define TX_CMD_MODE 1
- struct edca {
- u8 queue;
- u8 set;
- u8 aifs;
- u8 cw_min;
- __le16 cw_max;
- __le16 txop;
- };
- struct mt7915_mcu_tx {
- u8 total;
- u8 action;
- u8 valid;
- u8 mode;
-
- struct edca edca[IEEE80211_NUM_ACS];
- } __packed req = {
+ struct mt7915_mcu_tx req = {
.valid = true,
.mode = TX_CMD_MODE,
.total = IEEE80211_NUM_ACS,
@@ -3014,8 +2980,8 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
else
e->cw_max = cpu_to_le16(10);
}
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
- sizeof(req), true);
+
+ return mt7915_mcu_update_edca(dev, &req);
}
int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
@@ -3045,7 +3011,7 @@ int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
.band_idx = band,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), &req,
sizeof(req), true);
}
@@ -3066,7 +3032,7 @@ int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
.val = val,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), &req,
sizeof(req), true);
}
@@ -3081,7 +3047,7 @@ int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
.min_lpn = cpu_to_le16(val),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
sizeof(req), true);
}
@@ -3112,7 +3078,7 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
#undef __req_field
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
sizeof(req), true);
}
@@ -3129,8 +3095,8 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
u8 max_crpn;
u8 min_crpr;
u8 min_pw;
- u32 min_pri;
- u32 max_pri;
+ __le32 min_pri;
+ __le32 max_pri;
u8 max_pw;
u8 min_crbn;
u8 max_crbn;
@@ -3138,7 +3104,7 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
u8 max_stgpn;
u8 min_stgpr;
u8 rsv[2];
- u32 min_stgpr_diff;
+ __le32 min_stgpr_diff;
} __packed req = {
.tag = cpu_to_le32(0x2),
.radar_type = cpu_to_le16(index),
@@ -3164,7 +3130,7 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
#undef __req_field_u32
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
sizeof(req), true);
}
@@ -3173,6 +3139,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
+ bool ext_phy = phy != &dev->phy;
struct {
u8 control_ch;
u8 center_ch;
@@ -3196,16 +3163,22 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
.bw = mt7915_mcu_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
- .band_idx = phy != &dev->phy,
+ .band_idx = ext_phy,
.channel_band = chandef->chan->band,
};
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->mt76.test.tx_antenna_mask &&
- (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES ||
- dev->mt76.test.state == MT76_TM_STATE_RX_FRAMES)) {
- req.tx_streams_num = fls(dev->mt76.test.tx_antenna_mask);
- req.rx_streams = dev->mt76.test.tx_antenna_mask;
+ if (phy->mt76->test.tx_antenna_mask &&
+ (phy->mt76->test.state == MT76_TM_STATE_TX_FRAMES ||
+ phy->mt76->test.state == MT76_TM_STATE_RX_FRAMES ||
+ phy->mt76->test.state == MT76_TM_STATE_TX_CONT)) {
+ req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
+ req.rx_streams = phy->mt76->test.tx_antenna_mask;
+
+ if (ext_phy) {
+ req.tx_streams_num = 2;
+ req.rx_streams >>= 2;
+ }
}
#endif
@@ -3217,7 +3190,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
else
req.switch_reason = CH_SWITCH_NORMAL;
- if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
+ if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
req.rx_streams = hweight8(req.rx_streams);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -3229,18 +3202,58 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
+static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
+{
+#define TOTAL_PAGE_MASK GENMASK(7, 5)
+#define PAGE_IDX_MASK GENMASK(4, 2)
+#define PER_PAGE_SIZE 0x400
+ struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
+ u8 total = MT7915_EEPROM_SIZE / PER_PAGE_SIZE;
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ int eep_len;
+ int i;
+
+ for (i = 0; i <= total; i++, eep += eep_len) {
+ struct sk_buff *skb;
+ int ret;
+
+ if (i == total)
+ eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
+ else
+ eep_len = PER_PAGE_SIZE;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(req) + eep_len);
+ if (!skb)
+ return -ENOMEM;
+
+ req.format = FIELD_PREP(TOTAL_PAGE_MASK, total) |
+ FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE;
+ req.len = cpu_to_le16(eep_len);
+
+ skb_put_data(skb, &req, sizeof(req));
+ skb_put_data(skb, eep, eep_len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(EFUSE_BUFFER_MODE), true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
{
- struct req_hdr {
- u8 buffer_mode;
- u8 format;
- __le16 len;
- } __packed req = {
+ struct mt7915_mcu_eeprom req = {
.buffer_mode = EE_MODE_EFUSE,
.format = EE_FORMAT_WHOLE,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ if (dev->flash_mode)
+ return mt7915_mcu_set_eeprom_flash(dev);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
&req, sizeof(req), true);
}
@@ -3254,7 +3267,7 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
int ret;
u8 *buf;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req,
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), &req,
sizeof(req), true, &skb);
if (ret)
return ret;
@@ -3279,7 +3292,7 @@ int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
.action = index,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
sizeof(req), true);
}
@@ -3297,7 +3310,7 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
.dump_group = cpu_to_le16(1),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RATE_CTRL), &req,
sizeof(req), false);
}
@@ -3326,7 +3339,7 @@ int mt7915_mcu_set_sku(struct mt7915_phy *phy)
req.val[i] = hw->conf.power_level * 2 + delta[i];
return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
sizeof(req), true);
}
@@ -3348,7 +3361,7 @@ int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
.enable = en,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
sizeof(req), false);
}
@@ -3367,7 +3380,7 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
};
return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
sizeof(req), true);
}
@@ -3384,10 +3397,29 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
.band = band,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SER_TRIGGER),
&req, sizeof(req), false);
}
+int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev)
+{
+#define MT_BF_MODULE_UPDATE 25
+ struct {
+ u8 action;
+ u8 bf_num;
+ u8 bf_bitmap;
+ u8 bf_sel[8];
+ u8 rsv[8];
+ } __packed req = {
+ .action = MT_BF_MODULE_UPDATE,
+ .bf_num = 2,
+ .bf_bitmap = GENMASK(1, 0),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), true);
+}
+
int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
{
#define MT_BF_TYPE_UPDATE 20
@@ -3399,10 +3431,10 @@ int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
} __packed req = {
.action = MT_BF_TYPE_UPDATE,
.ebf = true,
- .ibf = false,
+ .ibf = dev->ibf,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
sizeof(req), true);
}
@@ -3421,7 +3453,7 @@ int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
.snd_mode = MT_BF_PROCESSING,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
sizeof(req), true);
}
@@ -3446,7 +3478,7 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
.val = cpu_to_le32(enable),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
@@ -3473,7 +3505,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
int ret;
int i;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_PHY_STAT_INFO,
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index cd1a4256c843..2d584142c27b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -49,6 +49,8 @@ enum {
enum {
MCU_ATE_SET_TRX = 0x1,
MCU_ATE_SET_FREQ_OFFSET = 0xa,
+ MCU_ATE_SET_SLOT_TIME = 0x13,
+ MCU_ATE_CLEAN_TXQUEUE = 0x1c,
};
struct mt7915_mcu_rxd {
@@ -118,6 +120,12 @@ struct mt7915_mcu_rdd_report {
} hw_pulse[32];
} __packed;
+struct mt7915_mcu_eeprom {
+ u8 buffer_mode;
+ u8 format;
+ __le16 len;
+} __packed;
+
struct mt7915_mcu_eeprom_info {
__le32 addr;
__le32 valid;
@@ -176,6 +184,30 @@ struct mt7915_mcu_phy_rx_info {
#define MT_RA_RATE_DCM_EN BIT(4)
#define MT_RA_RATE_BW GENMASK(14, 13)
+struct edca {
+ u8 queue;
+ u8 set;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+};
+
+struct mt7915_mcu_tx {
+ u8 total;
+ u8 action;
+ u8 valid;
+ u8 mode;
+
+ struct edca edca[IEEE80211_NUM_ACS];
+} __packed;
+
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET GENMASK(3, 0)
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -193,6 +225,12 @@ enum {
MCU_S2D_H2CN
};
+
+#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
+#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
+#define __MCU_CMD_FIELD_QUERY BIT(16)
+#define __MCU_CMD_FIELD_WA BIT(17)
+
enum {
MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
MCU_CMD_FW_START_REQ = 0x02,
@@ -201,6 +239,7 @@ enum {
MCU_CMD_PATCH_START_REQ = 0x05,
MCU_CMD_PATCH_FINISH_REQ = 0x07,
MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_WA_PARAM = 0xC4,
MCU_CMD_EXT_CID = 0xED,
MCU_CMD_FW_SCATTER = 0xEE,
MCU_CMD_RESTART_DL_REQ = 0xEF,
@@ -208,6 +247,7 @@ enum {
enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
+ MCU_EXT_CMD_RF_TEST = 0x04,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
@@ -239,6 +279,29 @@ enum {
};
enum {
+ MCU_WA_PARAM_CMD_QUERY,
+ MCU_WA_PARAM_CMD_SET,
+ MCU_WA_PARAM_CMD_CAPABILITY,
+ MCU_WA_PARAM_CMD_DEBUG,
+};
+
+enum {
+ MCU_WA_PARAM_RED = 0x0e,
+};
+
+#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, MCU_CMD_##_t)
+#define MCU_EXT_CMD(_t) (MCU_CMD(EXT_CID) | \
+ FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
+ MCU_EXT_CMD_##_t))
+#define MCU_EXT_QUERY(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_QUERY)
+
+#define MCU_WA_CMD(_t) (MCU_CMD(_t) | __MCU_CMD_FIELD_WA)
+#define MCU_WA_EXT_CMD(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_WA)
+#define MCU_WA_PARAM_CMD(_t) (MCU_WA_CMD(WA_PARAM) | \
+ FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
+ MCU_WA_PARAM_CMD_##_t))
+
+enum {
PATCH_SEM_RELEASE,
PATCH_SEM_GET
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 0339abf360d3..5c7eefdf2013 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -61,6 +61,7 @@ enum mt7915_rxq_id {
MT7915_RXQ_BAND1,
MT7915_RXQ_MCU_WM = 0,
MT7915_RXQ_MCU_WA,
+ MT7915_RXQ_MCU_WA_EXT,
};
struct mt7915_sta_stats {
@@ -72,6 +73,11 @@ struct mt7915_sta_stats {
unsigned long jiffies;
};
+struct mt7915_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
struct mt7915_sta {
struct mt76_wcid wcid; /* must be first */
@@ -85,6 +91,8 @@ struct mt7915_sta {
struct mt7915_sta_stats stats;
unsigned long ampdu_state;
+
+ struct mt7915_sta_key_conf bip;
};
struct mt7915_vif {
@@ -107,6 +115,14 @@ struct mib_stats {
u16 ba_miss_cnt;
};
+struct mt7915_hif {
+ struct list_head list;
+
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+};
+
struct mt7915_phy {
struct mt76_phy *mt76;
struct mt7915_dev *dev;
@@ -119,7 +135,6 @@ struct mt7915_phy {
u64 omac_mask;
u16 noise;
- u16 chainmask;
s16 coverage_class;
u8 slottime;
@@ -133,9 +148,21 @@ struct mt7915_phy {
struct mib_stats mib;
struct list_head stats_list;
- struct delayed_work mac_work;
- u8 mac_work_count;
u8 sta_work_count;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 *reg_backup;
+
+ s32 last_freq_offset;
+ u8 last_rcpi[4];
+ s8 last_ib_rssi[4];
+ s8 last_wb_rssi[4];
+ u8 last_snr;
+
+ u8 spe_idx;
+ } test;
+#endif
};
struct mt7915_dev {
@@ -144,10 +171,13 @@ struct mt7915_dev {
struct mt76_phy mphy;
};
+ struct mt7915_hif *hif2;
+
const struct mt76_bus_ops *bus_ops;
struct mt7915_phy phy;
u16 chainmask;
+ u32 hif_idx;
struct work_struct init_work;
struct work_struct rc_work;
@@ -168,21 +198,9 @@ struct mt7915_dev {
s8 **rate_power; /* TODO: use mt76_rate_power */
bool dbdc_support;
+ bool flash_mode;
bool fw_debug;
-
-#ifdef CONFIG_NL80211_TESTMODE
- struct {
- u32 *reg_backup;
-
- s32 last_freq_offset;
- u8 last_rcpi[4];
- s8 last_ib_rssi[4];
- s8 last_wb_rssi[4];
- u8 last_snr;
-
- u8 spe_idx;
- } test;
-#endif
+ bool ibf;
};
enum {
@@ -271,7 +289,6 @@ static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
}
extern const struct ieee80211_ops mt7915_ops;
-extern struct pci_driver mt7915_pci_driver;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
@@ -319,6 +336,7 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
int mt7915_set_channel(struct mt7915_phy *phy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
+int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
struct ieee80211_sta *sta, u32 rate);
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
@@ -334,6 +352,7 @@ int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_sku(struct mt7915_phy *phy);
int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
+int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
@@ -356,14 +375,23 @@ static inline bool is_mt7915(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7915;
}
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
+ u32 clear, u32 set);
+
static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
{
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+ if (dev->hif2)
+ mt7915_dual_hif_set_irq_mask(dev, true, 0, mask);
+ else
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
{
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+ if (dev->hif2)
+ mt7915_dual_hif_set_irq_mask(dev, true, mask, 0);
+ else
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static inline u32
@@ -463,6 +491,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt7915_tx_token_put(struct mt7915_dev *dev);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index aeb86fbea41c..13880cc9c9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -12,11 +12,72 @@
#include "mac.h"
#include "../trace.h"
+static LIST_HEAD(hif_list);
+static DEFINE_SPINLOCK(hif_lock);
+static u32 hif_idx;
+
static const struct pci_device_id mt7915_pci_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
{ },
};
+static const struct pci_device_id mt7915_hif_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { },
+};
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static struct mt7915_hif *
+mt7915_pci_get_hif2(struct mt7915_dev *dev)
+{
+ struct mt7915_hif *hif;
+ u32 val;
+
+ spin_lock_bh(&hif_lock);
+
+ list_for_each_entry(hif, &hif_list, list) {
+ val = readl(hif->regs + MT_PCIE_RECOG_ID);
+ val &= MT_PCIE_RECOG_ID_MASK;
+ if (val != dev->hif_idx)
+ continue;
+
+ get_device(hif->dev);
+ goto out;
+ }
+ hif = NULL;
+
+out:
+ spin_unlock_bh(&hif_lock);
+
+ return hif;
+}
+
+static void mt7915_put_hif2(struct mt7915_hif *hif)
+{
+ if (!hif)
+ return;
+
+ put_device(hif->dev);
+}
+
static void
mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
@@ -26,6 +87,7 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
[MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
[MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
[MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
+ [MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
};
mt7915_irq_enable(dev, rx_irq_mask[q]);
@@ -35,12 +97,20 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
- u32 intr, mask;
+ u32 intr, intr1, mask;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
@@ -67,6 +137,9 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_RX_DONE_WA)
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+ if (intr & MT_INT_RX_DONE_WA_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -103,6 +176,53 @@ mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev)
return 0;
}
+static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
+{
+ struct mt7915_hif *hif;
+
+ dev->hif_idx = ++hif_idx;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
+ return;
+
+ mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
+
+ hif = mt7915_pci_get_hif2(dev);
+ if (!hif)
+ return;
+
+ dev->hif2 = hif;
+
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
+ mt7915_put_hif2(hif);
+ hif = NULL;
+ }
+
+ /* master switch of PCIe tnterrupt enable */
+ mt7915_l1_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+}
+
+static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
+{
+ struct mt7915_hif *hif;
+
+ hif = devm_kzalloc(&pdev->dev, sizeof(*hif), GFP_KERNEL);
+ if (!hif)
+ return -ENOMEM;
+
+ hif->dev = &pdev->dev;
+ hif->regs = pcim_iomap_table(pdev)[0];
+ hif->irq = pdev->irq;
+ spin_lock_bh(&hif_lock);
+ list_add(&hif->list, &hif_list);
+ spin_unlock_bh(&hif_lock);
+ pci_set_drvdata(pdev, hif);
+
+ return 0;
+}
+
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -141,6 +261,9 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
+ if (id->device == 0x7916)
+ return mt7915_pci_hif2_probe(pdev);
+
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
&drv_ops);
if (!mdev)
@@ -166,6 +289,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
if (ret)
goto error;
+ mt7915_pci_init_hif2(dev);
+
ret = mt7915_register_device(dev);
if (ret)
goto error;
@@ -177,24 +302,64 @@ error:
return ret;
}
+static void mt7915_hif_remove(struct pci_dev *pdev)
+{
+ struct mt7915_hif *hif = pci_get_drvdata(pdev);
+
+ list_del(&hif->list);
+}
+
static void mt7915_pci_remove(struct pci_dev *pdev)
{
- struct mt76_dev *mdev = pci_get_drvdata(pdev);
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt76_dev *mdev;
+ struct mt7915_dev *dev;
+ mdev = pci_get_drvdata(pdev);
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ mt7915_put_hif2(dev->hif2);
mt7915_unregister_device(dev);
}
-struct pci_driver mt7915_pci_driver = {
+static struct pci_driver mt7915_hif_driver = {
+ .name = KBUILD_MODNAME "_hif",
+ .id_table = mt7915_hif_device_table,
+ .probe = mt7915_pci_probe,
+ .remove = mt7915_hif_remove,
+};
+
+static struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
-module_pci_driver(mt7915_pci_driver);
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
+}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
+MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 848703e6eb7c..ed0c9a24bb53 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -59,6 +59,13 @@
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
+#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
+
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
+#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
+
#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
#define MT_IFS_EIFS GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
@@ -70,7 +77,6 @@
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
@@ -342,21 +348,36 @@
#define MT_INT_RX_DONE_DATA1 BIT(17)
#define MT_INT_RX_DONE_WM BIT(0)
#define MT_INT_RX_DONE_WA BIT(1)
-#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | GENMASK(17, 16))
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
#define MT_INT_TX_DONE_MCU_WA BIT(15)
#define MT_INT_TX_DONE_FWDL BIT(26)
#define MT_INT_TX_DONE_MCU_WM BIT(27)
#define MT_INT_TX_DONE_BAND0 BIT(30)
#define MT_INT_TX_DONE_BAND1 BIT(31)
+
+#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
+ MT_INT_TX_DONE_BAND1)
+
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
MT_INT_TX_DONE_MCU_WM | \
MT_INT_TX_DONE_FWDL)
+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
+#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
+#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
+
+#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
+#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
+#define MT_PCIE_RECOG_ID_SEM BIT(31)
+
/* WFDMA0 PCIE1 */
#define MT_WFDMA0_PCIE1_BASE 0xd8000
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
@@ -411,6 +432,10 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_PCIE1_MAC_BASE 0x74020000
+#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
+#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
+
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 9ee82e2d262c..7fb2170a9561 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -70,30 +70,32 @@ mt7915_tm_set_tx_power(struct mt7915_phy *phy)
};
u8 *tx_power = NULL;
- if (dev->mt76.test.state != MT76_TM_STATE_OFF)
- tx_power = dev->mt76.test.tx_power;
+ if (phy->mt76->test.state != MT76_TM_STATE_OFF)
+ tx_power = phy->mt76->test.tx_power;
/* Tx power of the other antennas are the same as antenna 0 */
if (tx_power && tx_power[0])
req.tx_power = tx_power[0];
ret = mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
&req, sizeof(req), false);
return ret;
}
static int
-mt7915_tm_set_freq_offset(struct mt7915_dev *dev, bool en, u32 val)
+mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val)
{
+ struct mt7915_dev *dev = phy->dev;
struct mt7915_tm_cmd req = {
.testmode_en = en,
.param_idx = MCU_ATE_SET_FREQ_OFFSET,
+ .param.freq.band = phy != &dev->phy,
.param.freq.freq_offset = cpu_to_le32(val),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
sizeof(req), false);
}
@@ -110,14 +112,14 @@ mt7915_tm_mode_ctrl(struct mt7915_dev *dev, bool enable)
};
return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
&req, sizeof(req), false);
}
static int
-mt7915_tm_set_trx(struct mt7915_dev *dev, struct mt7915_phy *phy,
- int type, bool en)
+mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
{
+ struct mt7915_dev *dev = phy->dev;
struct mt7915_tm_cmd req = {
.testmode_en = 1,
.param_idx = MCU_ATE_SET_TRX,
@@ -126,19 +128,230 @@ mt7915_tm_set_trx(struct mt7915_dev *dev, struct mt7915_phy *phy,
.param.trx.band = phy != &dev->phy,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = 1,
+ .param_idx = MCU_ATE_CLEAN_TXQUEUE,
+ .param.clean.wcid = wcid,
+ .param.clean.band = phy != &dev->phy,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
+ .param_idx = MCU_ATE_SET_SLOT_TIME,
+ .param.slot.slot_time = slot_time,
+ .param.slot.sifs = sifs,
+ .param.slot.rifs = 2,
+ .param.slot.eifs = cpu_to_le16(60),
+ .param.slot.band = phy != &dev->phy,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
sizeof(req), false);
}
+static int
+mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
+ u16 cw_max, u16 txop)
+{
+ struct mt7915_mcu_tx req = { .total = 1 };
+ struct edca *e = &req.edca[0];
+
+ e->queue = qid;
+ e->set = WMM_PARAM_SET;
+
+ e->aifs = aifs;
+ e->cw_min = cw_min;
+ e->cw_max = cpu_to_le16(cw_max);
+ e->txop = cpu_to_le16(txop);
+
+ return mt7915_mcu_update_edca(dev, &req);
+}
+
+static int
+mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
+{
+#define TM_DEFAULT_SIFS 10
+#define TM_MAX_SIFS 127
+#define TM_MAX_AIFSN 0xf
+#define TM_MIN_AIFSN 0x1
+#define BBP_PROC_TIME 1500
+ struct mt7915_dev *dev = phy->dev;
+ u8 sig_ext = (mode == MT76_TM_TX_MODE_CCK) ? 0 : 6;
+ u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
+ u8 aifsn = TM_MIN_AIFSN;
+ u32 i2t_time, tr2t_time, txv_time;
+ bool ext_phy = phy != &dev->phy;
+ u16 cw = 0;
+
+ if (ipg < sig_ext + slot_time + sifs)
+ ipg = 0;
+
+ if (!ipg)
+ goto done;
+
+ ipg -= sig_ext;
+
+ if (ipg <= (TM_MAX_SIFS + slot_time)) {
+ sifs = ipg - slot_time;
+ } else {
+ u32 val = (ipg + slot_time) / slot_time;
+
+ while (val >>= 1)
+ cw++;
+
+ if (cw > 16)
+ cw = 16;
+
+ ipg -= ((1 << cw) - 1) * slot_time;
+
+ aifsn = ipg / slot_time;
+ if (aifsn > TM_MAX_AIFSN)
+ aifsn = TM_MAX_AIFSN;
+
+ ipg -= aifsn * slot_time;
+
+ if (ipg > TM_DEFAULT_SIFS) {
+ if (ipg < TM_MAX_SIFS)
+ sifs = ipg;
+ else
+ sifs = TM_MAX_SIFS;
+ }
+ }
+done:
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(ext_phy),
+ MT_TMAC_ATCR_TXV_TOUT);
+ txv_time *= 50; /* normal clock time */
+
+ i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
+ tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
+
+ mt76_set(dev, MT_TMAC_TRCR0(ext_phy),
+ FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
+ FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
+
+ mt7915_tm_set_slot_time(phy, slot_time, sifs);
+
+ return mt7915_tm_set_wmm_qid(dev,
+ mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
+ aifsn, cw, cw, 0);
+}
+
+static int
+mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt76_testmode_data *td = &mphy->test;
+ struct sk_buff *old = td->tx_skb, *new;
+ struct ieee80211_supported_band *sband;
+ struct rate_info rate = {};
+ u16 flags = 0, tx_len;
+ u32 bitrate;
+
+ if (!tx_time || !old)
+ return 0;
+
+ rate.mcs = td->tx_rate_idx;
+ rate.nss = td->tx_rate_nss;
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_CCK:
+ case MT76_TM_TX_MODE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT76_TM_TX_MODE_HT:
+ rate.mcs += rate.nss * 8;
+ flags |= RATE_INFO_FLAGS_MCS;
+
+ if (td->tx_rate_sgi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (td->tx_rate_sgi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT76_TM_TX_MODE_HE_SU:
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ case MT76_TM_TX_MODE_HE_TB:
+ case MT76_TM_TX_MODE_HE_MU:
+ rate.he_gi = td->tx_rate_sgi;
+ flags |= RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ break;
+ }
+ rate.flags = flags;
+
+ switch (mphy->chandef.width) {
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ rate.bw = RATE_INFO_BW_160;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rate.bw = RATE_INFO_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ rate.bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ break;
+ }
+
+ bitrate = cfg80211_calculate_bitrate(&rate);
+ tx_len = bitrate * tx_time / 10 / 8;
+
+ if (tx_len < sizeof(struct ieee80211_hdr))
+ tx_len = sizeof(struct ieee80211_hdr);
+ else if (tx_len > IEEE80211_MAX_FRAME_LEN)
+ tx_len = IEEE80211_MAX_FRAME_LEN;
+
+ new = alloc_skb(tx_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ skb_copy_header(new, old);
+ __skb_put_zero(new, tx_len);
+ memcpy(new->data, old->data, sizeof(struct ieee80211_hdr));
+
+ dev_kfree_skb(old);
+ td->tx_skb = new;
+
+ return 0;
+}
+
static void
-mt7915_tm_reg_backup_restore(struct mt7915_dev *dev, struct mt7915_phy *phy)
+mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
{
int n_regs = ARRAY_SIZE(reg_backup_list);
+ struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
- u32 *b = dev->test.reg_backup;
+ u32 *b = phy->test.reg_backup;
int i;
- if (dev->mt76.test.state == MT76_TM_STATE_OFF) {
+ if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
return;
@@ -151,7 +364,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_dev *dev, struct mt7915_phy *phy)
if (!b)
return;
- dev->test.reg_backup = b;
+ phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
@@ -182,93 +395,260 @@ mt7915_tm_reg_backup_restore(struct mt7915_dev *dev, struct mt7915_phy *phy)
}
static void
-mt7915_tm_init(struct mt7915_dev *dev)
+mt7915_tm_init(struct mt7915_phy *phy, bool en)
{
- bool en = !(dev->mt76.test.state == MT76_TM_STATE_OFF);
+ struct mt7915_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &dev->phy.mt76->state))
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
+ mt7915_mcu_set_sku_en(phy, !en);
+
mt7915_tm_mode_ctrl(dev, en);
- mt7915_tm_reg_backup_restore(dev, &dev->phy);
- mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_TXRX, !en);
+ mt7915_tm_reg_backup_restore(phy);
+ mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
+
+ mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
+}
+
+static void
+mt7915_tm_update_channel(struct mt7915_phy *phy)
+{
+ mutex_unlock(&phy->dev->mt76.mutex);
+ mt7915_set_channel(phy);
+ mutex_lock(&phy->dev->mt76.mutex);
+
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
}
static void
-mt7915_tm_set_tx_frames(struct mt7915_dev *dev, bool en)
+mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
{
static const u8 spe_idx_map[] = {0, 0, 1, 0, 3, 2, 4, 0,
9, 8, 6, 10, 16, 12, 18, 0};
- struct sk_buff *skb = dev->mt76.test.tx_skb;
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt7915_dev *dev = phy->dev;
struct ieee80211_tx_info *info;
+ u8 duty_cycle = td->tx_duty_cycle;
+ u32 tx_time = td->tx_time;
+ u32 ipg = td->tx_ipg;
- mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_RX_RXV, false);
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+ mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
if (en) {
- u8 tx_ant = dev->mt76.test.tx_antenna_mask;
+ mt7915_tm_update_channel(phy);
+
+ if (td->tx_spe_idx) {
+ phy->test.spe_idx = td->tx_spe_idx;
+ } else {
+ u8 tx_ant = td->tx_antenna_mask;
- mutex_unlock(&dev->mt76.mutex);
- mt7915_set_channel(&dev->phy);
- mutex_lock(&dev->mt76.mutex);
+ if (phy != &dev->phy)
+ tx_ant >>= 2;
+ phy->test.spe_idx = spe_idx_map[tx_ant];
+ }
+ }
- mt7915_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
- dev->test.spe_idx = spe_idx_map[tx_ant];
+ /* if all three params are set, duty_cycle will be ignored */
+ if (duty_cycle && tx_time && !ipg) {
+ ipg = tx_time * 100 / duty_cycle - tx_time;
+ } else if (duty_cycle && !tx_time && ipg) {
+ if (duty_cycle < 100)
+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
}
- mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_TX, en);
+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
+ mt7915_tm_set_tx_len(phy, tx_time);
- if (!en || !skb)
+ if (ipg)
+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
+
+ if (!en || !td->tx_skb)
return;
- info = IEEE80211_SKB_CB(skb);
- info->control.vif = dev->phy.monitor_vif;
+ info = IEEE80211_SKB_CB(td->tx_skb);
+ info->control.vif = phy->monitor_vif;
+
+ mt7915_tm_set_trx(phy, TM_MAC_TX, en);
}
static void
-mt7915_tm_set_rx_frames(struct mt7915_dev *dev, bool en)
+mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
{
- if (en) {
- mutex_unlock(&dev->mt76.mutex);
- mt7915_set_channel(&dev->phy);
- mutex_lock(&dev->mt76.mutex);
+ if (en)
+ mt7915_tm_update_channel(phy);
- mt7915_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+}
+
+static int
+mt7915_tm_rf_switch_mode(struct mt7915_dev *dev, u32 oper)
+{
+ struct mt7915_tm_rf_test req = {
+ .op.op_mode = cpu_to_le32(oper),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
+{
+#define TX_CONT_START 0x05
+#define TX_CONT_STOP 0x06
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = ieee80211_frequency_to_channel(chandef->center_freq1);
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ u32 func_idx = en ? TX_CONT_START : TX_CONT_STOP;
+ u8 rate_idx = td->tx_rate_idx, mode;
+ u16 rateval;
+ struct mt7915_tm_rf_test req = {
+ .action = 1,
+ .icap_len = 120,
+ .op.rf.func_idx = cpu_to_le32(func_idx),
+ };
+ struct tm_tx_cont *tx_cont = &req.op.rf.param.tx_cont;
+
+ tx_cont->control_ch = chandef->chan->hw_value;
+ tx_cont->center_ch = freq1;
+ tx_cont->tx_ant = td->tx_antenna_mask;
+ tx_cont->band = phy != &dev->phy;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ tx_cont->bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ tx_cont->bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ tx_cont->bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ tx_cont->bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ tx_cont->bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ tx_cont->bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ tx_cont->bw = CMD_CBW_20MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ tx_cont->bw = CMD_CBW_20MHZ;
+ break;
+ default:
+ break;
+ }
+
+ if (!en) {
+ req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
+ goto out;
}
- mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_RX_RXV, en);
+ if (td->tx_rate_mode <= MT76_TM_TX_MODE_OFDM) {
+ struct ieee80211_supported_band *sband;
+ u8 idx = rate_idx;
+
+ if (chandef->chan->band == NL80211_BAND_5GHZ)
+ sband = &phy->mt76->sband_5g.sband;
+ else
+ sband = &phy->mt76->sband_2g.sband;
+
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_OFDM)
+ idx += 4;
+ rate_idx = sband->bitrates[idx].hw_value & 0xff;
+ }
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_CCK:
+ mode = MT_PHY_TYPE_CCK;
+ break;
+ case MT76_TM_TX_MODE_OFDM:
+ mode = MT_PHY_TYPE_OFDM;
+ break;
+ case MT76_TM_TX_MODE_HT:
+ mode = MT_PHY_TYPE_HT;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ mode = MT_PHY_TYPE_VHT;
+ break;
+ case MT76_TM_TX_MODE_HE_SU:
+ mode = MT_PHY_TYPE_HE_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ mode = MT_PHY_TYPE_HE_EXT_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_TB:
+ mode = MT_PHY_TYPE_HE_TB;
+ break;
+ case MT76_TM_TX_MODE_HE_MU:
+ mode = MT_PHY_TYPE_HE_MU;
+ break;
+ default:
+ break;
+ }
+
+ rateval = mode << 6 | rate_idx;
+ tx_cont->rateval = cpu_to_le16(rateval);
+
+out:
+ if (!en) {
+ int ret;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ return mt7915_tm_rf_switch_mode(dev, RF_OPER_NORMAL);
+ }
+
+ mt7915_tm_rf_switch_mode(dev, RF_OPER_RF_TEST);
+ mt7915_tm_update_channel(phy);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
}
static void
-mt7915_tm_update_params(struct mt7915_dev *dev, u32 changed)
+mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
{
- struct mt76_testmode_data *td = &dev->mt76.test;
- bool en = dev->mt76.test.state != MT76_TM_STATE_OFF;
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ bool en = phy->mt76->test.state != MT76_TM_STATE_OFF;
if (changed & BIT(TM_CHANGED_FREQ_OFFSET))
- mt7915_tm_set_freq_offset(dev, en, en ? td->freq_offset : 0);
+ mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
if (changed & BIT(TM_CHANGED_TXPOWER))
- mt7915_tm_set_tx_power(&dev->phy);
+ mt7915_tm_set_tx_power(phy);
}
static int
-mt7915_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
+mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- struct mt76_testmode_data *td = &mdev->test;
+ struct mt76_testmode_data *td = &mphy->test;
+ struct mt7915_phy *phy = mphy->priv;
enum mt76_testmode_state prev_state = td->state;
- mdev->test.state = state;
-
- if (prev_state == MT76_TM_STATE_TX_FRAMES)
- mt7915_tm_set_tx_frames(dev, false);
- else if (state == MT76_TM_STATE_TX_FRAMES)
- mt7915_tm_set_tx_frames(dev, true);
- else if (prev_state == MT76_TM_STATE_RX_FRAMES)
- mt7915_tm_set_rx_frames(dev, false);
- else if (state == MT76_TM_STATE_RX_FRAMES)
- mt7915_tm_set_rx_frames(dev, true);
- else if (prev_state == MT76_TM_STATE_OFF || state == MT76_TM_STATE_OFF)
- mt7915_tm_init(dev);
+ mphy->test.state = state;
+
+ if (prev_state == MT76_TM_STATE_TX_FRAMES ||
+ state == MT76_TM_STATE_TX_FRAMES)
+ mt7915_tm_set_tx_frames(phy, state == MT76_TM_STATE_TX_FRAMES);
+ else if (prev_state == MT76_TM_STATE_RX_FRAMES ||
+ state == MT76_TM_STATE_RX_FRAMES)
+ mt7915_tm_set_rx_frames(phy, state == MT76_TM_STATE_RX_FRAMES);
+ else if (prev_state == MT76_TM_STATE_TX_CONT ||
+ state == MT76_TM_STATE_TX_CONT)
+ mt7915_tm_set_tx_cont(phy, state == MT76_TM_STATE_TX_CONT);
+ else if (prev_state == MT76_TM_STATE_OFF ||
+ state == MT76_TM_STATE_OFF)
+ mt7915_tm_init(phy, !(state == MT76_TM_STATE_OFF));
if ((state == MT76_TM_STATE_IDLE &&
prev_state == MT76_TM_STATE_OFF) ||
@@ -284,18 +664,18 @@ mt7915_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
changed |= BIT(i);
}
- mt7915_tm_update_params(dev, changed);
+ mt7915_tm_update_params(phy, changed);
}
return 0;
}
static int
-mt7915_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
+mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
enum mt76_testmode_state new_state)
{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- struct mt76_testmode_data *td = &dev->mt76.test;
+ struct mt76_testmode_data *td = &mphy->test;
+ struct mt7915_phy *phy = mphy->priv;
u32 changed = 0;
int i;
@@ -305,7 +685,7 @@ mt7915_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
td->state == MT76_TM_STATE_OFF)
return 0;
- if (td->tx_antenna_mask & ~dev->phy.chainmask)
+ if (td->tx_antenna_mask & ~mphy->chainmask)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
@@ -313,15 +693,15 @@ mt7915_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
changed |= BIT(i);
}
- mt7915_tm_update_params(dev, changed);
+ mt7915_tm_update_params(phy, changed);
return 0;
}
static int
-mt7915_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
+mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_phy *phy = mphy->priv;
void *rx, *rssi;
int i;
@@ -329,15 +709,15 @@ mt7915_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rx)
return -ENOMEM;
- if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset))
+ if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, phy->test.last_freq_offset))
return -ENOMEM;
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++)
- if (nla_put_u8(msg, i, dev->test.last_rcpi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_rcpi); i++)
+ if (nla_put_u8(msg, i, phy->test.last_rcpi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
@@ -346,8 +726,8 @@ mt7915_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_ib_rssi); i++)
- if (nla_put_s8(msg, i, dev->test.last_ib_rssi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_ib_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_ib_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
@@ -356,13 +736,13 @@ mt7915_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rssi)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->test.last_wb_rssi); i++)
- if (nla_put_s8(msg, i, dev->test.last_wb_rssi[i]))
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_wb_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_wb_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
- if (nla_put_u8(msg, MT76_TM_RX_ATTR_SNR, dev->test.last_snr))
+ if (nla_put_u8(msg, MT76_TM_RX_ATTR_SNR, phy->test.last_snr))
return -ENOMEM;
nla_nest_end(msg, rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
index 964f2d7fde3a..8f8533ef9859 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
@@ -16,6 +16,23 @@ struct mt7915_tm_freq_offset {
__le32 freq_offset;
};
+struct mt7915_tm_slot_time {
+ u8 slot_time;
+ u8 sifs;
+ u8 rifs;
+ u8 _rsv;
+ __le16 eifs;
+ u8 band;
+ u8 _rsv1[5];
+};
+
+struct mt7915_tm_clean_txq {
+ bool sta_pause;
+ u8 wcid; /* 256 sta */
+ u8 band;
+ u8 rsv;
+};
+
struct mt7915_tm_cmd {
u8 testmode_en;
u8 param_idx;
@@ -24,6 +41,8 @@ struct mt7915_tm_cmd {
__le32 data;
struct mt7915_tm_trx trx;
struct mt7915_tm_freq_offset freq;
+ struct mt7915_tm_slot_time slot;
+ struct mt7915_tm_clean_txq clean;
u8 test[72];
} param;
} __packed;
@@ -37,4 +56,44 @@ enum {
TM_MAC_RX_RXV,
};
+struct tm_tx_cont {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_ant;
+ __le16 rateval;
+ u8 band;
+ u8 txfd_mode;
+};
+
+struct mt7915_tm_rf_test {
+ u8 action;
+ u8 icap_len;
+ u8 _rsv[2];
+ union {
+ __le32 op_mode;
+ __le32 freq;
+
+ struct {
+ __le32 func_idx;
+ union {
+ __le32 func_data;
+ __le32 cal_dump;
+
+ struct tm_tx_cont tx_cont;
+
+ u8 _pad[80];
+ } param;
+ } rf;
+ } op;
+} __packed;
+
+enum {
+ RF_OPER_NORMAL,
+ RF_OPER_RF_TEST,
+ RF_OPER_ICAP,
+ RF_OPER_ICAP_OVERLAP,
+ RF_OPER_WIFI_SPECTRUM,
+};
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
new file mode 100644
index 000000000000..001f2b9cec26
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: ISC
+config MT7921E
+ tristate "MediaTek MT7921E (PCIe) support"
+ select MT76_CONNAC_LIB
+ select WANT_DEV_COREDUMP
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7921E 802.11ax 2x2:2SS wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
new file mode 100644
index 000000000000..09d1446ad933
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -0,0 +1,5 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7921E) += mt7921e.o
+
+mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
new file mode 100644
index 000000000000..0dc8e25e18e4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7921.h"
+#include "eeprom.h"
+
+static int
+mt7921_fw_debug_set(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+
+ dev->fw_debug = (u8)val;
+
+ mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
+
+ return 0;
+}
+
+static int
+mt7921_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7921_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7921_fw_debug_get,
+ mt7921_fw_debug_set, "%lld\n");
+
+static void
+mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
+ struct seq_file *file)
+{
+ struct mt7921_dev *dev = file->private;
+ int bound[15], range[4], i;
+
+ if (!phy)
+ return;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < ARRAY_SIZE(range); i++)
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
+
+ seq_printf(file, "\nPhy0\n");
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i] + 1, bound[i + 1]);
+
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+}
+
+static int
+mt7921_tx_stats_read(struct seq_file *file, void *data)
+{
+ struct mt7921_dev *dev = file->private;
+ int stat[8], i, n;
+
+ mt7921_ampdu_stat_read_phy(&dev->phy, file);
+
+ /* Tx amsdu info */
+ seq_puts(file, "Tx MSDU stat:\n");
+ for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
+ stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ n += stat[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stat); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ",
+ i + 1, stat[i]);
+ if (n != 0)
+ seq_printf(file, "(%d%%)\n", stat[i] * 100 / n);
+ else
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int
+mt7921_tx_stats_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7921_tx_stats_read, inode->i_private);
+}
+
+static const struct file_operations fops_tx_stats = {
+ .open = mt7921_tx_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int
+mt7921_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7921_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int j, acs = i / 4, index = i % 4;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (index << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ }
+
+ return 0;
+}
+
+static int
+mt7921_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7921_dev *dev = dev_get_drvdata(s->private);
+ struct {
+ struct mt76_queue *q;
+ char *queue;
+ } queue_map[] = {
+ { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
+ { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" },
+ { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_queue *q = queue_map[i].q;
+
+ if (!q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
+ }
+
+ return 0;
+}
+
+static int
+mt7921_pm_set(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+ struct mt76_phy *mphy = dev->phy.mt76;
+ int ret = 0;
+
+ mt7921_mutex_acquire(dev);
+
+ dev->pm.enable = val;
+
+ ieee80211_iterate_active_interfaces(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_pm_interface_iter, mphy->priv);
+ mt7921_mutex_release(dev);
+
+ return ret;
+}
+
+static int
+mt7921_pm_get(void *data, u64 *val)
+{
+ struct mt7921_dev *dev = data;
+
+ *val = dev->pm.enable;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
+
+static int
+mt7921_pm_idle_timeout_set(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+
+ dev->pm.idle_timeout = msecs_to_jiffies(val);
+
+ return 0;
+}
+
+static int
+mt7921_pm_idle_timeout_get(void *data, u64 *val)
+{
+ struct mt7921_dev *dev = data;
+
+ *val = jiffies_to_msecs(dev->pm.idle_timeout);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get,
+ mt7921_pm_idle_timeout_set, "%lld\n");
+
+static int mt7921_config(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+ int ret;
+
+ mt7921_mutex_acquire(dev);
+ ret = mt76_connac_mcu_chip_config(&dev->mt76);
+ mt7921_mutex_release(dev);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7921_config, "%lld\n");
+
+int mt7921_init_debugfs(struct mt7921_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7921_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7921_queues_acq);
+ debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
+ debugfs_create_file("idle-timeout", 0600, dir, dev,
+ &fops_pm_idle_timeout);
+ debugfs_create_file("chip_config", 0600, dir, dev, &fops_config);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
new file mode 100644
index 000000000000..cd9665610284
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7921.h"
+#include "../dma.h"
+#include "mac.h"
+
+int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
+{
+ int i, err;
+
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++)
+ phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
+
+ return 0;
+}
+
+void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ enum rx_pkt_type type;
+ u16 flag;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7921_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7921_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_NORMAL_MCU:
+ case PKT_TYPE_NORMAL:
+ if (!mt7921_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static void
+mt7921_tx_cleanup(struct mt7921_dev *dev)
+{
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
+}
+
+static int mt7921_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7921_dev *dev;
+
+ dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
+
+ mt7921_tx_cleanup(dev);
+
+ if (napi_complete_done(napi, 0))
+ mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ return 0;
+}
+
+void mt7921_dma_prefetch(struct mt7921_dev *dev)
+{
+#define PREFETCH(base, depth) ((base) << 16 | (depth))
+
+ mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
+ mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
+}
+
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+static int mt7921_dmashdl_disabled(struct mt7921_dev *dev)
+{
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
+ return 0;
+}
+
+int mt7921_dma_init(struct mt7921_dev *dev)
+{
+ /* Increase buffer size to receive large VHT/HE MPDUs */
+ struct mt76_bus_ops *bus_ops;
+ int rx_buf_size = MT_RX_BUF_SIZE * 2;
+ int ret;
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mt76_dma_attach(&dev->mt76);
+
+ /* reset */
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ ret = mt7921_dmashdl_disabled(dev);
+ if (ret)
+ return ret;
+
+ /* disable WFDMA0 */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ mt76_poll(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+ /* init tx queue */
+ ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
+ MT7921_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4);
+
+ /* command to WM */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM,
+ MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL,
+ MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* event from WM before firmware download */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT7921_RXQ_MCU_WM,
+ MT7921_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_RX_EVENT_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* Change mcu queue after firmware download */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ MT7921_RXQ_MCU_WM,
+ MT7921_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_WFDMA0(0x540));
+ if (ret)
+ return ret;
+
+ /* rx data */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
+ rx_buf_size, MT_RX_DATA_RING_BASE);
+ if (ret)
+ return ret;
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7921_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ /* configure perfetch settings */
+ mt7921_dma_prefetch(dev);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+
+ /* configure delay interrupt */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
+ MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
+ MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ mt76_set(dev, 0x54000120, BIT(1));
+
+ /* enable interrupts for TX/RX rings */
+ mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ return 0;
+}
+
+void mt7921_dma_cleanup(struct mt7921_dev *dev)
+{
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ /* reset */
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
new file mode 100644
index 000000000000..691d14a1a7bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7921.h"
+#include "eeprom.h"
+
+static u32 mt7921_eeprom_read(struct mt7921_dev *dev, u32 offset)
+{
+ u8 *data = dev->mt76.eeprom.data;
+
+ if (data[offset] == 0xff)
+ mt7921_mcu_get_eeprom(dev, offset);
+
+ return data[offset];
+}
+
+static int mt7921_eeprom_load(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7921_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ memset(dev->mt76.eeprom.data, -1, MT7921_EEPROM_SIZE);
+
+ return 0;
+}
+
+static int mt7921_check_eeprom(struct mt7921_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u16 val;
+
+ mt7921_eeprom_read(dev, MT_EE_CHIP_ID);
+ val = get_unaligned_le16(eeprom);
+
+ switch (val) {
+ case 0x7961:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ u32 val;
+
+ val = mt7921_eeprom_read(dev, MT_EE_WIFI_CONF);
+ val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
+
+ switch (val) {
+ case MT_EE_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ break;
+ case MT_EE_2GHZ:
+ phy->mt76->cap.has_2ghz = true;
+ break;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ phy->mt76->cap.has_5ghz = true;
+ break;
+ }
+}
+
+static void mt7921_eeprom_parse_hw_cap(struct mt7921_dev *dev)
+{
+ u8 tx_mask;
+
+ mt7921_eeprom_parse_band_config(&dev->phy);
+
+ /* TODO: read NSS with MCU_CMD_NIC_CAPV2 */
+ tx_mask = 2;
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
+}
+
+int mt7921_eeprom_init(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt7921_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7921_check_eeprom(dev);
+ if (ret)
+ return ret;
+
+ mt7921_eeprom_parse_hw_cap(dev);
+ memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mphy);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
new file mode 100644
index 000000000000..54f30401343c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7921_EEPROM_H
+#define __MT7921_EEPROM_H
+
+#include "mt7921.h"
+
+enum mt7921_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_WIFI_CONF = 0x07c,
+ __MT_EE_MAX = 0x3bf
+};
+
+#define MT_EE_WIFI_CONF_TX_MASK BIT(0)
+#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(3, 2)
+
+enum mt7921_eeprom_band {
+ MT_EE_NA,
+ MT_EE_5GHZ,
+ MT_EE_2GHZ,
+ MT_EE_DUAL_BAND,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
new file mode 100644
index 000000000000..89a13b4a74a4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include "mt7921.h"
+#include "mac.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7921_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = MT7921_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = MT7921_MAX_INTERFACES,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static void
+mt7921_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
+ dev->mt76.region = request->dfs_region;
+
+ mt7921_mutex_acquire(dev);
+ mt76_connac_mcu_set_channel_domain(hw->priv);
+ mt7921_mutex_release(dev);
+}
+
+static void
+mt7921_init_wiphy(struct ieee80211_hw *hw)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct wiphy *wiphy = hw->wiphy;
+
+ hw->queues = 4;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7921_sta);
+ hw->vif_data_size = sizeof(struct mt7921_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
+ wiphy->max_scan_ssids = 4;
+ wiphy->max_sched_scan_plan_interval =
+ MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+ wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
+ wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->reg_notifier = mt7921_regd_notifier;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+
+ hw->max_tx_fragments = 4;
+}
+
+static void
+mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
+{
+ u32 mask, set;
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_set(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mask = MT_MDP_RCFR0_MCU_RX_MGMT |
+ MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_MDP_RCFR0_MCU_RX_CTL_BAR;
+ set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
+
+ mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
+ MT_MDP_RCFR1_RX_DROPPED_UCAST |
+ MT_MDP_RCFR1_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
+ /* disable rx rate report by default due to hw issues */
+ mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+}
+
+static void mt7921_mac_init(struct mt7921_dev *dev)
+{
+ int i;
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
+ /* disable hardware de-agg */
+ mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+ mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
+
+ for (i = 0; i < MT7921_WTBL_SIZE; i++)
+ mt7921_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ for (i = 0; i < 2; i++)
+ mt7921_mac_init_band(dev, i);
+
+ mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
+}
+
+static void mt7921_init_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ init_work);
+
+ mt7921_mcu_set_eeprom(dev);
+ mt7921_mac_init(dev);
+}
+
+static int mt7921_init_hardware(struct mt7921_dev *dev)
+{
+ int ret, idx;
+
+ INIT_WORK(&dev->init_work, mt7921_init_work);
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7921_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7921_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7921_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+int mt7921_register_device(struct mt7921_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+
+ INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work);
+ INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work);
+ init_completion(&dev->pm.wake_cmpl);
+ spin_lock_init(&dev->pm.txq_lock);
+ set_bit(MT76_STATE_PM, &dev->mphy.state);
+ INIT_LIST_HEAD(&dev->phy.stats_list);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
+ INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
+ INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
+ skb_queue_head_init(&dev->phy.scan_event_list);
+ skb_queue_head_init(&dev->coredump.msg_list);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+
+ ret = mt7921_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt7921_init_wiphy(hw);
+ dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7921_set_stream_he_caps(&dev->phy);
+
+ ret = mt76_register_device(&dev->mt76, true, mt7921_rates,
+ ARRAY_SIZE(mt7921_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
+ return mt7921_init_debugfs(dev);
+}
+
+void mt7921_unregister_device(struct mt7921_dev *dev)
+{
+ mt76_unregister_device(&dev->mt76);
+ mt7921_mcu_exit(dev);
+ mt7921_dma_cleanup(dev);
+
+ mt7921_tx_token_put(dev);
+
+ tasklet_disable(&dev->irq_tasklet);
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
new file mode 100644
index 000000000000..3f9097481a5e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -0,0 +1,1516 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/devcoredump.h>
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7921.h"
+#include "../dma.h"
+#include "mac.h"
+#include "mcu.h"
+
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
+
+static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
+ u16 idx, bool unicast)
+{
+ struct mt7921_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7921_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid)
+{
+ mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
+ FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
+
+ return MT_WTBL_LMAC_OFFS(wcid, 0);
+}
+
+static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
+{
+ static const u8 ac_to_tid[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7921_sta *msta;
+ u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
+ int i;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+ u32 addr;
+ u16 idx;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&sta_poll_list,
+ struct mt7921_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ idx = msta->wcid.idx;
+ addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+
+ addr += 8;
+ }
+
+ if (clear) {
+ mt7921_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 q = mt7921_lmac_mapping(dev, i);
+ u32 tx_cur = tx_time[q];
+ u32 rx_cur = rx_time[q];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static void
+mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct ieee80211_radiotap_he *he,
+ __le32 *rxv)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
+ ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
+ struct mt76_rx_status *status,
+ __le32 *rxv, u32 phy)
+{
+ /* TODO: struct ieee80211_radiotap_he_mu */
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he = NULL;
+ u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
+
+ switch (phy) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+ HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+
+ mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
+
+ mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
+ struct mt76_rx_status *status, u8 chfreq)
+{
+ if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
+ !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
+ !test_bit(MT76_STATE_ROC, &mphy->state)) {
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ return;
+ }
+
+ status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
+}
+
+int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7921_phy *phy = &dev->phy;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv = NULL;
+ u32 mode = 0;
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ u32 rxd3 = le32_to_cpu(rxd[3]);
+ bool unicast, insert_ccmp_hdr = false;
+ u8 remove_pad;
+ int i, idx;
+ u8 chfreq;
+
+ memset(status, 0, sizeof(*status));
+
+ if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
+ return -EINVAL;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
+ unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ struct mt7921_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ mt7921_get_status_freq_info(dev, mphy, status, chfreq);
+
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
+ !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd[14]) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd[14];
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 6;
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ /* RXD Group 3 - P-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+ u32 v0, v1, v2;
+
+ rxv = rxd;
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v1 = le32_to_cpu(rxv[1]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ if (v0 & MT_PRXV_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+ status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
+ status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
+ status->signal = status->chain_signal[0];
+
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+
+ /* RXD Group 5 - C-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ bool cck = false;
+
+ rxd += 18;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_VHT;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ fallthrough;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_HE;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ }
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
+ mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
+
+ hdr = mt76_skb_get_hdr(skb);
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+static void
+mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid)
+{
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ u8 fc_type, fc_stype;
+ bool wmm = false;
+ u32 val;
+
+ if (wcid->sta) {
+ struct ieee80211_sta *sta;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ wmm = sta->wme;
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
+ val |= MT_TXD1_ETH_802_3;
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = IEEE80211_FTYPE_DATA >> 2;
+ fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+}
+
+static void
+mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct ieee80211_key_conf *key)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ __le16 fc = hdr->frame_control;
+ u8 fc_type, fc_stype;
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+
+ if (!ieee80211_is_data(fc) || multicast)
+ val |= MT_TXD2_FIX_RATE;
+
+ txwi[2] |= cpu_to_le32(val);
+
+ if (ieee80211_is_beacon(fc)) {
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ }
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+}
+
+void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ u16 tx_count = 15;
+ u32 val;
+
+ if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ }
+
+ if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_BCN0;
+ } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = MT_LMAC_ALTX0;
+ } else {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
+ mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+
+ txwi[1] = cpu_to_le32(val);
+ txwi[2] = 0;
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (key)
+ val |= MT_TXD3_PROTECT_FRAME;
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ val |= MT_TXD3_NO_ACK;
+
+ txwi[3] = cpu_to_le32(val);
+ txwi[4] = 0;
+ txwi[5] = 0;
+ txwi[6] = 0;
+ txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
+
+ if (is_8023)
+ mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
+ else
+ mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
+
+ if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
+ u16 rate;
+
+ /* hardware won't add HTC for mgmt/ctrl frame */
+ txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ rate = MT7921_5G_RATE_DEFAULT;
+ else
+ rate = MT7921_2G_RATE_DEFAULT;
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_TX_RATE, rate);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+}
+
+static void
+mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt7921_hw_txp *txp = txp_ptr;
+ struct mt7921_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= MT_TXD_LEN_LAST;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+
+static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
+{
+ struct mt76_phy *mphy = &dev->mphy;
+ struct mt76_queue *q;
+
+ q = mphy->q_tx[0];
+ if (blocked == q->blocked)
+ return;
+
+ q->blocked = blocked;
+ if (!blocked)
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
+ struct mt76_txwi_cache *t;
+ struct mt7921_txp_common *txp;
+ int id;
+ u8 *txwi = (u8 *)txwi_ptr;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ cb->wcid = wcid->idx;
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
+ if (id >= 0)
+ dev->token_count++;
+
+ if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
+ mt7921_set_tx_blocked(dev, true);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (id < 0)
+ return id;
+
+ mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ false);
+
+ txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
+ memset(txp, 0, sizeof(struct mt7921_txp_common));
+ mt7921_write_hw_txp(dev, tx_info, txp, id);
+
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static void
+mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+{
+ struct mt7921_sta *msta;
+ u16 fc, tid;
+ u32 val;
+
+ if (!sta || !sta->ht_cap.ht_supported)
+ return;
+
+ tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ if (tid >= 6) /* skip VO queue */
+ return;
+
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
+ return;
+
+ msta = (struct mt7921_sta *)sta->drv_priv;
+ if (!test_and_set_bit(tid, &msta->ampdu_state))
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+}
+
+static void
+mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, u8 stat,
+ struct list_head *free_list)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ .info = info,
+ .skb = skb,
+ .free_list = free_list,
+ };
+ struct ieee80211_hw *hw;
+
+ if (sta) {
+ struct mt7921_sta *msta;
+
+ msta = (struct mt7921_sta *)sta->drv_priv;
+ status.rate = &msta->stats.tx_rate;
+ }
+
+ hw = mt76_tx_status_get_hw(mdev, skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+
+ if (stat)
+ ieee80211_tx_info_clear_status(info);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.tx_time = 0;
+ ieee80211_tx_status_ext(hw, &status);
+}
+
+void mt7921_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7921_txp_common *txp;
+ int i;
+
+ txp = mt7921_txwi_to_txp(dev, t);
+
+ for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
+ struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & MT_TXD_LEN_LAST;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & MT_TXD_LEN_LAST;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ LIST_HEAD(free_list);
+ struct sk_buff *tmp;
+ bool wake = false;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+
+ /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
+ * to the time ack is received or dropped by hw (air + hw queue time).
+ * Should avoid accessing WTBL to get Tx airtime, and use it instead.
+ */
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(free->info[i]);
+ u8 stat;
+
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7921_sta *msta;
+ struct mt7921_phy *phy;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ phy = msta->vif->phy;
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->stats_list))
+ list_add_tail(&msta->stats_list, &phy->stats_list);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, msdu);
+ if (txwi)
+ dev->token_count--;
+ if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
+ dev->mphy.q_tx[0]->blocked)
+ wake = true;
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ continue;
+
+ mt7921_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
+ void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
+
+ if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi_ptr);
+
+ if (sta && !info->tx_time_est) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int pending;
+
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
+ mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+ }
+
+ if (wake) {
+ spin_lock_bh(&dev->token_lock);
+ mt7921_set_tx_blocked(dev, false);
+ spin_unlock_bh(&dev->token_lock);
+ }
+
+ napi_consume_skb(skb, 1);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+
+ if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
+ return;
+
+ mt7921_mac_sta_poll(dev);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ struct mt7921_dev *dev;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7921_txp_common *txp;
+ u16 token;
+
+ txp = mt7921_txwi_to_txp(mdev, e->txwi);
+
+ token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
+ struct mt76_wcid *wcid;
+
+ wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
+
+ mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
+ NULL);
+ }
+}
+
+void mt7921_mac_reset_counters(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
+ }
+
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR37(0));
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+void mt7921_mac_set_timing(struct mt7921_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7921_dev *dev = phy->dev;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ int sifs, offset;
+ bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (is_5ghz)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ mt76_set(dev, MT_ARB_SCR(0),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
+ mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(0),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20 || is_5ghz)
+ val = MT7921_CFEND_RATE_DEFAULT;
+ else
+ val = MT7921_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(0),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+static u8
+mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
+{
+ return 0;
+}
+
+static void
+mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
+ struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
+ struct mt76_channel_state *state;
+ u64 busy_time, tx_time, rx_time, obss_time;
+ int nf;
+
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
+ MT_MIB_OBSSTIME_MASK);
+
+ nf = mt7921_phy_get_nf(phy, idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
+
+ state = mphy->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+ state->noise = -(phy->noise >> 4);
+}
+
+void mt7921_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
+ return;
+
+ mt7921_phy_update_channel(&mdev->phy, 0);
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+}
+
+static bool
+mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7921_RESET_TIMEOUT);
+
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7921_dma_reset(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ int i;
+
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ usleep_range(1000, 2000);
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ /* re-init prefetch settings after reset */
+ mt7921_dma_prefetch(dev);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+}
+
+void mt7921_tx_token_put(struct mt7921_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7921_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
+ mt76_put_txwi(&dev->mt76, txwi);
+ dev->token_count--;
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+}
+
+/* system error recovery */
+void mt7921_mac_reset_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+
+ dev = container_of(work, struct mt7921_dev, reset_work);
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.napi[2]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mt7921_mutex_acquire(dev);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+ mt7921_tx_token_put(dev);
+ idr_init(&dev->token);
+
+ if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7921_dma_reset(&dev->phy);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ napi_enable(&dev->mt76.napi[2]);
+ napi_schedule(&dev->mt76.napi[2]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mt7921_mutex_release(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7921_WATCHDOG_TIME);
+}
+
+static void
+mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ int i, aggr0 = 0, aggr1;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ u32 val, val2;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
+
+ val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ if (val2 > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val2;
+
+ val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ if (val2 > mib->ba_miss_cnt)
+ mib->ba_miss_cnt = val2;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
+ val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ if (val2 > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt = val2;
+ }
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+ val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
+
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
+ }
+}
+
+static void
+mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ struct mt7921_sta *msta;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&phy->stats_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7921_sta, stats_list);
+ list_del_init(&msta->stats_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ /* query wtbl info to report tx rate for further devices */
+ mt7921_get_wtbl_info(dev, msta->wcid.idx);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7921_mac_work(struct work_struct *work)
+{
+ struct mt7921_phy *phy;
+ struct mt76_phy *mphy;
+
+ mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
+ mac_work.work);
+ phy = mphy->priv;
+
+ if (test_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ mt7921_mutex_acquire(phy->dev);
+
+ mt76_update_survey(mphy->dev);
+ if (++mphy->mac_work_count == 5) {
+ mphy->mac_work_count = 0;
+
+ mt7921_mac_update_mib_stats(phy);
+ }
+ if (++phy->sta_work_count == 10) {
+ phy->sta_work_count = 0;
+ mt7921_mac_sta_stats_work(phy);
+ };
+
+ mt7921_mutex_release(phy->dev);
+
+out:
+ ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
+ MT7921_WATCHDOG_TIME);
+}
+
+void mt7921_pm_wake_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+ struct mt76_phy *mphy;
+
+ dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
+ pm.wake_work);
+ mphy = dev->phy.mt76;
+
+ if (!mt7921_mcu_drv_pmctrl(dev))
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ else
+ dev_err(mphy->dev->dev, "failed to wake device\n");
+
+ ieee80211_wake_queues(mphy->hw);
+ complete_all(&dev->pm.wake_cmpl);
+}
+
+void mt7921_pm_power_save_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+ unsigned long delta;
+
+ dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
+ pm.ps_work.work);
+
+ delta = dev->pm.idle_timeout;
+ if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+ delta = dev->pm.last_activity + delta - jiffies;
+ goto out;
+ }
+
+ if (!mt7921_mcu_fw_pmctrl(dev))
+ return;
+out:
+ queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
+}
+
+int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ if (!dev->pm.enable)
+ return -EOPNOTSUPP;
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, enable);
+ if (err)
+ return err;
+
+ if (enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ mt76_clear(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
+ return 0;
+}
+
+void mt7921_coredump_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+ char *dump, *data;
+
+ dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
+ coredump.work.work);
+
+ if (time_is_after_jiffies(dev->coredump.last_activity +
+ 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
+ queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
+ MT76_CONNAC_COREDUMP_TIMEOUT);
+ return;
+ }
+
+ dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
+ data = dump;
+
+ while (true) {
+ struct sk_buff *skb;
+
+ spin_lock_bh(&dev->mt76.lock);
+ skb = __skb_dequeue(&dev->coredump.msg_list);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ if (!skb)
+ break;
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
+ break;
+
+ memcpy(data, skb->data, skb->len);
+ data += skb->len;
+
+ dev_kfree_skb(skb);
+ }
+ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ GFP_KERNEL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
new file mode 100644
index 000000000000..a0c1fa0f20e4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7921_MAC_H
+#define __MT7921_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_FLAG GENMASK(19, 16)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 27)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT,
+ PKT_TYPE_NORMAL_MCU,
+};
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
+#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
+#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_CO_ANT BIT(6)
+#define MT_RXD2_NORMAL_BF_CQI BIT(7)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
+#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_AMSDU BIT(22)
+#define MT_RXD3_NORMAL_MESH BIT(23)
+#define MT_RXD3_NORMAL_MHCP BIT(24)
+#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
+#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
+#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
+#define MT_RXD3_NORMAL_MORE BIT(28)
+#define MT_RXD3_NORMAL_UNWANT BIT(29)
+#define MT_RXD3_NORMAL_RX_DROP BIT(30)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD4_NORMAL_CLS BIT(10)
+#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+
+/* P-RXV */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_UPLINK BIT(31)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+#define MT_CRXV_SNR GENMASK(18, 13)
+#define MT_CRXV_FOE_LO GENMASK(31, 19)
+#define MT_CRXV_FOE_HI GENMASK(6, 0)
+#define MT_CRXV_FOE_SHIFT 13
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+#define MT_CT_INFO_FROM_HOST BIT(7)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_LONG_FORMAT BIT(31)
+#define MT_TXD1_TGID BIT(30)
+#define MT_TXD1_OWN_MAC GENMASK(29, 24)
+#define MT_TXD1_AMSDU BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_HDR_PAD GENMASK(19, 18)
+#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
+#define MT_TXD1_HDR_INFO GENMASK(15, 11)
+#define MT_TXD1_ETH_802_3 BIT(15)
+#define MT_TXD1_VTA BIT(10)
+#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_FIXED_RATE BIT(30)
+#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_TIMING_MEASURE BIT(5)
+#define MT_TXD3_DAS BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_MD BIT(15)
+#define MT_TXD5_ADD_BA BIT(14)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_IBF BIT(31)
+#define MT_TXD6_TX_EBF BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 16)
+#define MT_TXD6_SGI GENMASK(15, 14)
+#define MT_TXD6_HELTF GENMASK(13, 12)
+#define MT_TXD6_LDPC BIT(11)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_UDP_TCP_SUM BIT(29)
+#define MT_TXD7_IP_SUM BIT(28)
+
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TXD7_PSE_FID GENMASK(27, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_HW_AMSDU BIT(10)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(12, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_SU_EXT_TONE BIT(5)
+#define MT_TX_RATE_DCM BIT(4)
+#define MT_TX_RATE_IDX GENMASK(3, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt7921_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ __le16 rept_wds_wcid;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+struct mt7921_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le32 info[];
+} __packed __aligned(4);
+
+#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
+#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
+#define MT_TX_FREE_LATENCY GENMASK(12, 0)
+/* 0: success, others: dropped */
+#define MT_TX_FREE_STATUS GENMASK(14, 13)
+#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
+#define MT_TX_FREE_PAIR BIT(31)
+/* will support this field in further revision */
+#define MT_TX_FREE_RATE GENMASK(13, 0)
+
+static inline struct mt7921_txp_common *
+mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
+}
+
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_MASK GENMASK(11, 0)
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+#define MT_TXD_LEN_LAST BIT(15)
+
+struct mt7921_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt7921_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt7921_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt7921_txp_common {
+ union {
+ struct mt7921_hw_txp hw;
+ };
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
new file mode 100644
index 000000000000..729f6c42cdde
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7921.h"
+#include "mcu.h"
+
+static void
+mt7921_gen_ppe_thresh(u8 *he_ppet, int nss)
+{
+ u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
+ u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+
+ he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
+ FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
+ ru_bit_mask);
+
+ ppet_bits = IEEE80211_PPE_THRES_INFO_PPET_SIZE *
+ nss * hweight8(ru_bit_mask) * 2;
+ ppet_size = DIV_ROUND_UP(ppet_bits, 8);
+
+ for (i = 0; i < ppet_size - 1; i++)
+ he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3];
+
+ he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3] &
+ (0xff >> (8 - (ppet_bits - 1) % 8));
+}
+
+static int
+mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data)
+{
+ int i, idx = 0;
+ int nss = hweight8(phy->mt76->chainmask);
+ u16 mcs_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs =
+ &he_cap->he_mcs_nss_supp;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+
+ he_cap_elem->mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
+ he_cap_elem->mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ }
+
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7921_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+void mt7921_set_stream_he_caps(struct mt7921_phy *phy)
+{
+ struct ieee80211_sband_iftype_data *data;
+ struct ieee80211_supported_band *band;
+ int n;
+
+ if (phy->mt76->cap.has_2ghz) {
+ data = phy->iftype[NL80211_BAND_2GHZ];
+ n = mt7921_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+
+ band = &phy->mt76->sband_2g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_5ghz) {
+ data = phy->iftype[NL80211_BAND_5GHZ];
+ n = mt7921_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+
+ band = &phy->mt76->sband_5g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+}
+
+static int mt7921_start(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+
+ mt7921_mutex_acquire(dev);
+
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
+ mt76_connac_mcu_set_channel_domain(phy->mt76);
+
+ mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7921_mac_reset_counters(phy);
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7921_WATCHDOG_TIME);
+
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static void mt7921_stop(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ cancel_work_sync(&dev->pm.wake_work);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt7921_mutex_acquire(dev);
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
+ mt7921_mutex_release(dev);
+}
+
+static inline int get_free_idx(u32 mask, u8 start, u8 end)
+{
+ return ffs(~mask & GENMASK(end, start));
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u64 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ /* prefer hw bssid slot 1-3 */
+ i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
+ if (i)
+ return i - 1;
+
+ if (type != NL80211_IFTYPE_STATION)
+ break;
+
+ /* next, try to find a free repeater entry for the sta */
+ i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
+ REPEATER_BSSID_MAX - REPEATER_BSSID_START);
+ if (i)
+ return i + 32 - 1;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* ap uses hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static int mt7921_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ int idx, ret = 0;
+
+ mt7921_mutex_acquire(dev);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ phy->monitor_vif = vif;
+
+ mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->mt76.omac_idx = idx;
+ mvif->phy = phy;
+ mvif->mt76.band_idx = 0;
+ mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
+
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
+ true);
+ if (ret)
+ goto out;
+
+ if (dev->pm.enable) {
+ ret = mt7921_mcu_set_bss_pm(dev, vif, true);
+ if (ret)
+ goto out;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
+ dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+
+ idx = MT7921_WTBL_RESERVED - mvif->mt76.idx;
+
+ INIT_LIST_HEAD(&mvif->sta.stats_list);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt7921_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ }
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
+ vif->offload_flags = 0;
+
+ vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+
+out:
+ mt7921_mutex_release(dev);
+
+ return ret;
+}
+
+static void mt7921_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_sta *msta = &mvif->sta;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ if (vif == phy->monitor_vif)
+ phy->monitor_vif = NULL;
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+
+ if (dev->pm.enable) {
+ mt7921_mcu_set_bss_pm(dev, vif, false);
+ mt76_clear(dev, MT_WF_RFCR(0),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ mt7921_mutex_acquire(dev);
+ dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
+ mt7921_mutex_release(dev);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+int mt7921_set_channel(struct mt7921_phy *phy)
+{
+ struct mt7921_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mt7921_mutex_acquire(dev);
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt76_set_channel(phy->mt76);
+
+ ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ if (ret)
+ goto out;
+
+ mt7921_mac_set_timing(phy);
+
+ mt7921_mac_reset_counters(phy);
+ phy->noise = 0;
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+ mt7921_mutex_release(dev);
+
+ mt76_txq_schedule_all(phy->mt76);
+
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
+ MT7921_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid,
+ cmd == SET_KEY ? key : NULL);
+
+ return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+}
+
+static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ int ret;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7921_set_channel(phy);
+ if (ret)
+ return ret;
+ ieee80211_wake_queues(hw);
+ }
+
+ mt7921_mutex_acquire(dev);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ if (!enabled)
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN,
+ enabled);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ }
+
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+ queue = mt7921_lmac_mapping(dev, queue);
+ mvif->queue_params[queue] = *params;
+
+ return 0;
+}
+
+static void mt7921_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mt7921_mutex_acquire(dev);
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+
+ mt7921_mutex_release(dev);
+}
+
+static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7921_mac_set_timing(phy);
+ }
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7921_mcu_set_tx(dev, vif);
+
+ if (changed & BSS_CHANGED_PS)
+ mt7921_mcu_uni_bss_ps(dev, vif);
+
+ mt7921_mutex_release(dev);
+}
+
+int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ int ret, idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->stats_list);
+ INIT_LIST_HEAD(&msta->poll_list);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->stats.jiffies = jiffies;
+
+ ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ if (ret)
+ return ret;
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+ true);
+
+ mt7921_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid,
+ true, MCU_UNI_CMD_STA_REC_UPDATE);
+ if (ret)
+ return ret;
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+
+ return 0;
+}
+
+void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+
+ mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+
+ mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+ false);
+ }
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ if (!list_empty(&msta->stats_list))
+ list_del_init(&msta->stats_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+}
+
+static void
+mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return;
+
+ if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return;
+ }
+
+ dev->pm.last_activity = jiffies;
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+static void mt7921_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ int qid;
+
+ if (control->sta) {
+ struct mt7921_sta *sta;
+
+ sta = (struct mt7921_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7921_vif *mvif;
+
+ mvif = (struct mt7921_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+ dev->pm.last_activity = jiffies;
+ mt76_tx(mphy, control->sta, wcid, skb);
+ return;
+ }
+
+ qid = skb_get_queue_mapping(skb);
+ if (qid >= MT_TXQ_PSD) {
+ qid = IEEE80211_AC_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb);
+}
+
+static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+ mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, 0);
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mt7921_mutex_acquire(dev);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ mt7921_mcu_uni_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7921_mcu_uni_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7921_mcu_uni_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ mt7921_mcu_uni_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ mt7921_mcu_uni_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mt7921_mutex_release(dev);
+
+ return ret;
+}
+
+static int
+mt7921_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7921_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7921_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ return 0;
+}
+
+static u64
+mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ u8 omac_idx = mvif->mt76.omac_idx;
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+ u16 n;
+
+ mt7921_mutex_acquire(dev);
+
+ n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
+ /* TSF software read */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+
+ mt7921_mutex_release(dev);
+
+ return tsf.t64;
+}
+
+static void
+mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ u8 omac_idx = mvif->mt76.omac_idx;
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mt7921_mutex_acquire(dev);
+
+ n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software overwrite */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+
+ mt7921_mutex_release(dev);
+}
+
+static void
+mt7921_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = phy->dev;
+
+ mt7921_mutex_acquire(dev);
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7921_mac_set_timing(phy);
+ mt7921_mutex_release(dev);
+}
+
+void mt7921_scan_work(struct work_struct *work)
+{
+ struct mt7921_phy *phy;
+
+ phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy,
+ scan_work.work);
+
+ while (true) {
+ struct mt7921_mcu_rxd *rxd;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&phy->dev->mt76.lock);
+ skb = __skb_dequeue(&phy->scan_event_list);
+ spin_unlock_bh(&phy->dev->mt76.lock);
+
+ if (!skb)
+ break;
+
+ rxd = (struct mt7921_mcu_rxd *)skb->data;
+ if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
+ ieee80211_sched_scan_results(phy->mt76->hw);
+ } else if (test_and_clear_bit(MT76_HW_SCANNING,
+ &phy->mt76->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ ieee80211_scan_completed(phy->mt76->hw, &info);
+ }
+ dev_kfree_skb(skb);
+ }
+}
+
+static int
+mt7921_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7921_mutex_acquire(dev);
+ err = mt76_connac_mcu_hw_scan(mphy, vif, req);
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static void
+mt7921_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+
+ mt7921_mutex_acquire(dev);
+ mt76_connac_mcu_cancel_hw_scan(mphy, vif);
+ mt7921_mutex_release(dev);
+}
+
+static int
+mt7921_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7921_mutex_acquire(dev);
+
+ err = mt76_connac_mcu_sched_scan_req(mphy, vif, req);
+ if (err < 0)
+ goto out;
+
+ err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true);
+out:
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static int
+mt7921_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7921_mutex_acquire(dev);
+ err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false);
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static int
+mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mt7921_mutex_acquire(dev);
+
+ phy->mt76->antenna_mask = tx_ant;
+ phy->mt76->chainmask = tx_ant;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7921_set_stream_he_caps(phy);
+
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static void
+mt7921_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+}
+
+static void mt7921_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ struct mt7921_sta_stats *stats = &msta->stats;
+
+ if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ return;
+
+ if (stats->tx_rate.legacy) {
+ sinfo->txrate.legacy = stats->tx_rate.legacy;
+ } else {
+ sinfo->txrate.mcs = stats->tx_rate.mcs;
+ sinfo->txrate.nss = stats->tx_rate.nss;
+ sinfo->txrate.bw = stats->tx_rate.bw;
+ sinfo->txrate.he_gi = stats->tx_rate.he_gi;
+ sinfo->txrate.he_dcm = stats->tx_rate.he_dcm;
+ sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = stats->tx_rate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+#ifdef CONFIG_PM
+static int mt7921_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int err;
+
+ cancel_delayed_work_sync(&phy->scan_work);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt7921_mutex_acquire(dev);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76_connac_mcu_set_suspend_iter,
+ &dev->mphy);
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7921_resume(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int err;
+
+ mt7921_mutex_acquire(dev);
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ if (err < 0)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76_connac_mcu_set_suspend_iter,
+ &dev->mphy);
+
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7921_WATCHDOG_TIME);
+out:
+
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static void mt7921_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
+
+ device_set_wakeup_enable(mdev->dev, enabled);
+}
+
+static void mt7921_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+ mt76_connac_mcu_update_gtk_rekey(hw, vif, data);
+ mt7921_mutex_release(dev);
+}
+#endif /* CONFIG_PM */
+
+const struct ieee80211_ops mt7921_ops = {
+ .tx = mt7921_tx,
+ .start = mt7921_start,
+ .stop = mt7921_stop,
+ .add_interface = mt7921_add_interface,
+ .remove_interface = mt7921_remove_interface,
+ .config = mt7921_config,
+ .conf_tx = mt7921_conf_tx,
+ .configure_filter = mt7921_configure_filter,
+ .bss_info_changed = mt7921_bss_info_changed,
+ .sta_add = mt7921_sta_add,
+ .sta_remove = mt7921_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .sta_rc_update = mt7921_sta_rc_update,
+ .set_key = mt7921_set_key,
+ .ampdu_action = mt7921_ampdu_action,
+ .set_rts_threshold = mt7921_set_rts_threshold,
+ .wake_tx_queue = mt7921_wake_tx_queue,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .get_stats = mt7921_get_stats,
+ .get_tsf = mt7921_get_tsf,
+ .set_tsf = mt7921_set_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7921_set_antenna,
+ .set_coverage_class = mt7921_set_coverage_class,
+ .hw_scan = mt7921_hw_scan,
+ .cancel_hw_scan = mt7921_cancel_hw_scan,
+ .sta_statistics = mt7921_sta_statistics,
+ .sched_scan_start = mt7921_start_sched_scan,
+ .sched_scan_stop = mt7921_stop_sched_scan,
+#ifdef CONFIG_PM
+ .suspend = mt7921_suspend,
+ .resume = mt7921_resume,
+ .set_wakeup = mt7921_set_wakeup,
+ .set_rekey_data = mt7921_set_rekey_data,
+#endif /* CONFIG_PM */
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
new file mode 100644
index 000000000000..db125cd22b91
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -0,0 +1,1308 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+struct mt7921_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 reserved;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 reserved[11];
+ } desc;
+} __packed;
+
+struct mt7921_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 reserved[9];
+ } info;
+ };
+} __packed;
+
+struct mt7921_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserved[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+} __packed;
+
+struct mt7921_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 reserved[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 reserved1[15];
+} __packed;
+
+#define MT_STA_BFER BIT(0)
+#define MT_STA_BFEE BIT(1)
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_ENCRY_MODE BIT(4)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+
+static enum mt7921_cipher_type
+mt7921_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static int
+mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_eeprom_info *res;
+ u8 *buf;
+
+ if (!skb)
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+
+ res = (struct mt7921_mcu_eeprom_info *)skb->data;
+ buf = dev->eeprom.data + le32_to_cpu(res->addr);
+ memcpy(buf, res->data, 16);
+
+ return 0;
+}
+
+static int
+mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt7921_mcu_rxd *rxd;
+ int ret = 0;
+
+ if (!skb) {
+ dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ rxd = (struct mt7921_mcu_rxd *)skb->data;
+ if (seq != rxd->seq)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case MCU_CMD_PATCH_SEM_CONTROL:
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ break;
+ case MCU_EXT_CMD_GET_TEMP:
+ skb_pull(skb, sizeof(*rxd) + 4);
+ ret = le32_to_cpu(*(__le32 *)skb->data);
+ break;
+ case MCU_EXT_CMD_EFUSE_ACCESS:
+ ret = mt7921_mcu_parse_eeprom(mdev, skb);
+ break;
+ case MCU_UNI_CMD_DEV_INFO_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_HIF_CTRL:
+ case MCU_UNI_CMD_OFFLOAD:
+ case MCU_UNI_CMD_SUSPEND: {
+ struct mt7921_mcu_uni_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7921_mcu_uni_event *)skb->data;
+ ret = le32_to_cpu(event->status);
+ break;
+ }
+ case MCU_CMD_REG_READ: {
+ struct mt7921_mcu_reg_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7921_mcu_reg_event *)skb->data;
+ ret = (int)le32_to_cpu(event->val);
+ break;
+ }
+ default:
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ enum mt76_mcuq_id txq = MT_MCUQ_WM;
+ struct mt7921_uni_txd *uni_txd;
+ struct mt7921_mcu_txd *mcu_txd;
+ __le32 *txd;
+ u32 val;
+ u8 seq;
+
+ /* TODO: make dynamic based on msg type */
+ mdev->mcu.timeout = 20 * HZ;
+
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+
+ if (cmd == MCU_CMD_FW_SCATTER) {
+ txq = MT_MCUQ_FWDL;
+ goto exit;
+ }
+
+ txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
+ FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ if (cmd & MCU_UNI_PREFIX) {
+ uni_txd = (struct mt7921_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ goto exit;
+ }
+
+ mcu_txd = (struct mt7921_mcu_txd *)txd;
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
+ MT_TX_MCU_PORT_RX_Q0));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ switch (cmd & ~MCU_CMD_MASK) {
+ case MCU_FW_PREFIX:
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->cid = mcu_cmd;
+ break;
+ case MCU_CE_PREFIX:
+ if (cmd & MCU_QUERY_MASK)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->cid = mcu_cmd;
+ break;
+ default:
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ if (cmd & MCU_QUERY_PREFIX || cmd == MCU_EXT_CMD_EFUSE_ACCESS)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->ext_cid = mcu_cmd;
+ mcu_txd->ext_cid_ack = 1;
+ break;
+ }
+
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+ WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS &&
+ mcu_txd->set_query != MCU_Q_QUERY);
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+}
+
+static void
+mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy,
+ struct mt7921_mcu_peer_cap *peer,
+ struct rate_info *rate, u16 r)
+{
+ struct ieee80211_supported_band *sband;
+ u16 flags = 0;
+ u8 txmode = FIELD_GET(MT_WTBL_RATE_TX_MODE, r);
+ u8 gi = 0;
+ u8 bw = 0;
+
+ rate->mcs = FIELD_GET(MT_WTBL_RATE_MCS, r);
+ rate->nss = FIELD_GET(MT_WTBL_RATE_NSS, r) + 1;
+
+ switch (peer->bw) {
+ case IEEE80211_STA_RX_BW_160:
+ gi = peer->g16;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ gi = peer->g8;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ gi = peer->g4;
+ break;
+ default:
+ gi = peer->g2;
+ break;
+ }
+
+ gi = txmode >= MT_PHY_TYPE_HE_SU ?
+ FIELD_GET(MT_WTBL_RATE_HE_GI, gi) :
+ FIELD_GET(MT_WTBL_RATE_GI, gi);
+
+ switch (txmode) {
+ case MT_PHY_TYPE_CCK:
+ case MT_PHY_TYPE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ flags |= RATE_INFO_FLAGS_MCS;
+
+ if (gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ rate->he_gi = gi;
+ rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
+
+ flags |= RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ break;
+ }
+ rate->flags = flags;
+
+ bw = mt7921_mcu_chan_bw(&mphy->chandef) - FIELD_GET(MT_RA_RATE_BW, r);
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_160:
+ rate->bw = RATE_INFO_BW_160;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate->bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate->bw = RATE_INFO_BW_20;
+ break;
+ }
+}
+
+static void
+mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
+ u16 wlan_idx)
+{
+ struct mt7921_mcu_wlan_info_event *wtbl_info =
+ (struct mt7921_mcu_wlan_info_event *)(skb->data);
+ struct rate_info rate = {};
+ u8 curr_idx = wtbl_info->rate_info.rate_idx;
+ u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
+ struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+ struct mt76_phy *mphy = &dev->mphy;
+ struct mt7921_sta_stats *stats;
+ struct mt7921_sta *msta;
+ struct mt76_wcid *wcid;
+
+ if (wlan_idx >= MT76_N_WCIDS)
+ return;
+ wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
+ if (!wcid) {
+ stats->tx_rate = rate;
+ return;
+ }
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ stats = &msta->stats;
+
+ /* current rate */
+ mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+ stats->tx_rate = rate;
+}
+
+static void
+mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ __skb_queue_tail(&phy->scan_event_list, skb);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
+ MT7921_HW_SCAN_TIMEOUT);
+}
+
+static void
+mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac_beacon_loss_event *event;
+ struct mt76_phy *mphy;
+ u8 band_idx = 0; /* DBDC support */
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ event = (struct mt76_connac_beacon_loss_event *)skb->data;
+ if (band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76_connac_mcu_beacon_loss_iter, event);
+}
+
+static void
+mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_mcu_bss_event *event;
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ event = (struct mt76_connac_mcu_bss_event *)skb->data;
+ if (event->is_absent)
+ ieee80211_stop_queues(mphy->hw);
+ else
+ ieee80211_wake_queues(mphy->hw);
+}
+
+static void
+mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+ struct debug_msg {
+ __le16 id;
+ u8 type;
+ u8 flag;
+ __le32 value;
+ __le16 len;
+ u8 content[512];
+ } __packed * debug_msg;
+ u16 cur_len;
+ int i;
+
+ skb_pull(skb, sizeof(*rxd));
+ debug_msg = (struct debug_msg *)skb->data;
+
+ cur_len = min_t(u16, le16_to_cpu(debug_msg->len), 512);
+
+ if (debug_msg->type == 0x3) {
+ for (i = 0 ; i < cur_len; i++)
+ if (!debug_msg->content[i])
+ debug_msg->content[i] = ' ';
+
+ dev_dbg(dev->mt76.dev, "%s", debug_msg->content);
+ }
+}
+
+static void
+mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_EVENT_BSS_BEACON_LOSS:
+ mt7921_mcu_beacon_loss_event(dev, skb);
+ break;
+ case MCU_EVENT_SCHED_SCAN_DONE:
+ case MCU_EVENT_SCAN_DONE:
+ mt7921_mcu_scan_event(dev, skb);
+ return;
+ case MCU_EVENT_BSS_ABSENCE:
+ mt7921_mcu_bss_event(dev, skb);
+ break;
+ case MCU_EVENT_DBG_MSG:
+ mt7921_mcu_debug_msg_event(dev, skb);
+ break;
+ case MCU_EVENT_COREDUMP:
+ mt76_connac_mcu_coredump_event(&dev->mt76, skb,
+ &dev->coredump);
+ return;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+
+ if (rxd->eid == 0x6) {
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ return;
+ }
+
+ if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
+ rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
+ rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_BSS_ABSENCE ||
+ rxd->eid == MCU_EVENT_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_DBG_MSG ||
+ rxd->eid == MCU_EVENT_COREDUMP ||
+ !rxd->seq)
+ mt7921_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+/** starec & wtbl **/
+static int
+mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
+ struct ieee80211_key_conf *key, enum set_key_cmd cmd)
+{
+ struct mt7921_sta_key_conf *bip = &msta->bip;
+ struct sta_rec_sec *sec;
+ struct tlv *tlv;
+ u32 len = sizeof(*sec);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt7921_mcu_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MT_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ sec_key->key_id = bip->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, bip->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MT_CIPHER_AES_CCMP) {
+ memcpy(bip->key, key->key, key->keylen);
+ bip->keyidx = key->keyidx;
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ struct mt7921_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
+
+ if (enable && !params->amsdu)
+ msta->wcid.amsdu = false;
+
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ enable, true);
+}
+
+int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
+
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ enable, false);
+}
+
+static int mt7921_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_NIC_POWER_CTRL, &req,
+ sizeof(req), false);
+}
+
+static int mt7921_driver_own(struct mt7921_dev *dev)
+{
+ u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
+ 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7921_load_patch(struct mt7921_dev *dev)
+{
+ const struct mt7921_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+ int i, ret, sem;
+
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, MT7921_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7921_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt7921_patch_sec *sec;
+ const u8 *dl;
+ u32 len, addr;
+
+ sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) +
+ i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
+
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ dl, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ goto out;
+ }
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int
+mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
+ const struct mt7921_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt7921_fw_region *region;
+ int err;
+ u32 len, addr, mode;
+
+ region = (const struct mt7921_fw_region *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
+ if (err) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ return err;
+ }
+
+ err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ data + offset, len);
+ if (err) {
+ dev_err(dev->mt76.dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
+}
+
+static int mt7921_load_ram(struct mt7921_dev *dev)
+{
+ const struct mt7921_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, MT7921_FIRMWARE_WM, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7921_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7921_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7921_load_firmware(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+ return -EIO;
+ }
+
+ ret = mt7921_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7921_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
+ MT_TOP_MISC2_FW_N9_RDY, 1500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+
+ return -EIO;
+ }
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+#ifdef CONFIG_PM
+ dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
+#endif /* CONFIG_PM */
+
+ clear_bit(MT76_STATE_PM, &dev->mphy.state);
+
+ dev_err(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FWLOG_2_HOST, &data,
+ sizeof(data), false);
+}
+
+int mt7921_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7921_mcu_ops = {
+ .headroom = sizeof(struct mt7921_mcu_txd),
+ .mcu_skb_send_msg = mt7921_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_restart = mt7921_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7921_mcu_ops;
+
+ ret = mt7921_driver_own(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7921_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7921_mcu_fw_log_2_host(dev, 1);
+
+ return 0;
+}
+
+void mt7921_mcu_exit(struct mt7921_dev *dev)
+{
+ u32 reg = mt7921_reg_map_l1(dev, MT_TOP_MISC);
+
+ __mt76_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ FW_STATE_FW_DOWNLOAD), 1000)) {
+ dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+ return;
+ }
+
+ reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET GENMASK(3, 0)
+#define TX_CMD_MODE 1
+ struct edca {
+ u8 queue;
+ u8 set;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ };
+ struct mt7921_mcu_tx {
+ u8 total;
+ u8 action;
+ u8 valid;
+ u8 mode;
+
+ struct edca edca[IEEE80211_NUM_ACS];
+ } __packed req = {
+ .valid = true,
+ .mode = TX_CMD_MODE,
+ .total = IEEE80211_NUM_ACS,
+ };
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct edca *e = &req.edca[ac];
+
+ e->set = WMM_PARAM_SET;
+ e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+ if (q->cw_min)
+ e->cw_min = fls(q->cw_min);
+ else
+ e->cw_min = 5;
+
+ if (q->cw_max)
+ e->cw_max = cpu_to_le16(fls(q->cw_max));
+ else
+ e->cw_max = cpu_to_le16(10);
+ }
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
+ sizeof(req), true);
+}
+
+int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
+{
+ struct mt7921_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1;
+ struct {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_streams_num;
+ u8 rx_streams; /* mask or num */
+ u8 switch_reason;
+ u8 band_idx;
+ u8 center_ch2; /* for 80+80 only */
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 ap_bw;
+ u8 ap_center_ch;
+ u8 rsv1[57];
+ } __packed req = {
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .bw = mt7921_mcu_chan_bw(chandef),
+ .tx_streams_num = hweight8(phy->mt76->antenna_mask),
+ .rx_streams = phy->mt76->antenna_mask,
+ .band_idx = phy != &dev->phy,
+ .channel_band = chandef->chan->band,
+ };
+
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
+ req.rx_streams = hweight8(req.rx_streams);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ req.center_ch2 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+}
+
+int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
+{
+ struct req_hdr {
+ u8 buffer_mode;
+ u8 format;
+ __le16 len;
+ } __packed req = {
+ .buffer_mode = EE_MODE_EFUSE,
+ .format = EE_FORMAT_WHOLE,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ &req, sizeof(req), true);
+}
+
+int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
+{
+ struct mt7921_mcu_eeprom_info req = {
+ .addr = cpu_to_le32(round_down(offset, 16)),
+ };
+ struct mt7921_mcu_eeprom_info *res;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7921_mcu_eeprom_info *)skb->data;
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ memcpy(buf, res->data, 16);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx)
+{
+ struct mt7921_mcu_wlan_info wtbl_info = {
+ .wlan_idx = cpu_to_le32(wlan_idx),
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_WTBL,
+ &wtbl_info, sizeof(wtbl_info), true,
+ &skb);
+ if (ret)
+ return ret;
+
+ mt7921_mcu_tx_rate_report(dev, skb, wlan_idx);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct ps_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 ps_state; /* 0: device awake
+ * 1: static power save
+ * 2: dynamic power saving
+ * 3: enter TWT power saving
+ * 4: leave TWT power saving
+ */
+ u8 pad[3];
+ } __packed ps;
+ } __packed ps_req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .ps = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_PS),
+ .len = cpu_to_le16(sizeof(struct ps_tlv)),
+ .ps_state = vif->bss_conf.ps ? 2 : 0,
+ },
+ };
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &ps_req, sizeof(ps_req), true);
+}
+
+int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcnft_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 pad;
+ } __packed bcnft;
+ } __packed bcnft_req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .bcnft = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
+ .len = cpu_to_le16(sizeof(struct bcnft_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ },
+ };
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &bcnft_req, sizeof(bcnft_req), true);
+}
+
+int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 dtim_period;
+ __le16 aid;
+ __le16 bcn_interval;
+ __le16 atim_window;
+ u8 uapsd;
+ u8 bmc_delivered_ac;
+ u8 bmc_triggered_ac;
+ u8 pad;
+ } req = {
+ .bss_idx = mvif->mt76.idx,
+ .aid = cpu_to_le16(vif->bss_conf.aid),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ };
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } req_hdr = {
+ .bss_idx = mvif->mt76.idx,
+ };
+ int err;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr,
+ sizeof(req_hdr), false);
+ if (err < 0 || !enable)
+ return err;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req,
+ sizeof(req), false);
+}
+
+int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int i;
+
+ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 0, 50))
+ break;
+ }
+
+ if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ return -EIO;
+ }
+
+out:
+ dev->pm.last_activity = jiffies;
+
+ return 0;
+}
+
+int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int i;
+
+ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 4, 50))
+ break;
+ }
+
+ if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "firmware own failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_phy *phy = priv;
+ struct mt7921_dev *dev = phy->dev;
+
+ if (mt7921_mcu_set_bss_pm(dev, vif, dev->pm.enable))
+ return;
+
+ if (dev->pm.enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
new file mode 100644
index 000000000000..2fdc62367b3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7921_MCU_H
+#define __MT7921_MCU_H
+
+#include "../mt76_connac_mcu.h"
+
+struct mt7921_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+/**
+ * struct mt7921_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt7921_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 reserved;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 reserved2[4];
+} __packed __aligned(4);
+
+/* event table */
+enum {
+ MCU_EVENT_REG_ACCESS = 0x05,
+ MCU_EVENT_SCAN_DONE = 0x0d,
+ MCU_EVENT_BSS_ABSENCE = 0x11,
+ MCU_EVENT_BSS_BEACON_LOSS = 0x13,
+ MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_SCHED_SCAN_DONE = 0x23,
+ MCU_EVENT_DBG_MSG = 0x27,
+ MCU_EVENT_COREDUMP = 0xf0,
+};
+
+/* ext event table */
+enum {
+ MCU_EXT_EVENT_RATE_REPORT = 0x87,
+};
+
+struct mt7921_mcu_rxd {
+ __le32 rxd[6];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt7921_mcu_eeprom_info {
+ __le32 addr;
+ __le32 valid;
+ u8 data[16];
+} __packed;
+
+#define MT_RA_RATE_NSS GENMASK(8, 6)
+#define MT_RA_RATE_MCS GENMASK(3, 0)
+#define MT_RA_RATE_TX_MODE GENMASK(12, 9)
+#define MT_RA_RATE_DCM_EN BIT(4)
+#define MT_RA_RATE_BW GENMASK(14, 13)
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+struct mt7921_mcu_uni_event {
+ u8 cid;
+ u8 pad[3];
+ __le32 status; /* 0: success, others: fail */
+} __packed;
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL,
+ PATCH_IS_DL,
+ PATCH_NOT_DL_SEM_SUCCESS,
+ PATCH_REL_SEM_SUCCESS
+};
+
+enum {
+ FW_STATE_INITIAL,
+ FW_STATE_FW_DOWNLOAD,
+ FW_STATE_NORMAL_OPERATION,
+ FW_STATE_NORMAL_TRX,
+ FW_STATE_WACPU_RDY = 7
+};
+
+enum {
+ EE_MODE_EFUSE,
+ EE_MODE_BUFFER,
+};
+
+enum {
+ EE_FORMAT_BIN,
+ EE_FORMAT_WHOLE,
+ EE_FORMAT_MULTIPLE,
+};
+
+enum {
+ MCU_PHY_STATE_TX_RATE,
+ MCU_PHY_STATE_RX_RATE,
+ MCU_PHY_STATE_RSSI,
+ MCU_PHY_STATE_CONTENTION_RX_RATE,
+ MCU_PHY_STATE_OFDMLQ_CNINFO,
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+struct sec_key {
+ u8 cipher_id;
+ u8 cipher_len;
+ u8 key_id;
+ u8 key_len;
+ u8 key[32];
+} __packed;
+
+struct sta_rec_sec {
+ __le16 tag;
+ __le16 len;
+ u8 add;
+ u8 n_cipher;
+ u8 rsv[2];
+
+ struct sec_key key[2];
+} __packed;
+
+enum mt7921_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CCMP_256,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_BIP_CMAC_128,
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+enum {
+ THERMAL_SENSOR_TEMP_QUERY,
+ THERMAL_SENSOR_MANUAL_CTRL,
+ THERMAL_SENSOR_INFO_QUERY,
+ THERMAL_SENSOR_TASK_CTRL,
+};
+
+enum {
+ MT_EBF = BIT(0), /* explicit beamforming */
+ MT_IBF = BIT(1) /* implicit beamforming */
+};
+
+#define MT7921_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_hdr_trans) +\
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_smps))
+
+#define MT7921_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_he) + \
+ sizeof(struct sta_rec_ba) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct tlv) + \
+ MT7921_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7921_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
+
+#define STA_CAP_WMM BIT(0)
+#define STA_CAP_SGI_20 BIT(4)
+#define STA_CAP_SGI_40 BIT(5)
+#define STA_CAP_TX_STBC BIT(6)
+#define STA_CAP_RX_STBC BIT(7)
+#define STA_CAP_VHT_SGI_80 BIT(16)
+#define STA_CAP_VHT_SGI_160 BIT(17)
+#define STA_CAP_VHT_TX_STBC BIT(18)
+#define STA_CAP_VHT_RX_STBC BIT(19)
+#define STA_CAP_VHT_LDPC BIT(23)
+#define STA_CAP_LDPC BIT(24)
+#define STA_CAP_HT BIT(26)
+#define STA_CAP_VHT BIT(27)
+#define STA_CAP_HE BIT(28)
+
+struct mt7921_mcu_reg_event {
+ __le32 reg;
+ __le32 val;
+} __packed;
+
+struct mt7921_mcu_tx_config {
+ u8 peer_addr[ETH_ALEN];
+ u8 sw;
+ u8 dis_rx_hdr_tran;
+
+ u8 aad_om;
+ u8 pfmu_idx;
+ __le16 partial_aid;
+
+ u8 ibf;
+ u8 ebf;
+ u8 is_ht;
+ u8 is_vht;
+
+ u8 mesh;
+ u8 baf_en;
+ u8 cf_ack;
+ u8 rdg_ba;
+
+ u8 rdg;
+ u8 pm;
+ u8 rts;
+ u8 smps;
+
+ u8 txop_ps;
+ u8 not_update_ipsm;
+ u8 skip_tx;
+ u8 ldpc;
+
+ u8 qos;
+ u8 from_ds;
+ u8 to_ds;
+ u8 dyn_bw;
+
+ u8 amdsu_cross_lg;
+ u8 check_per;
+ u8 gid_63;
+ u8 he;
+
+ u8 vht_ibf;
+ u8 vht_ebf;
+ u8 vht_ldpc;
+ u8 he_ldpc;
+} __packed;
+
+struct mt7921_mcu_sec_config {
+ u8 wpi_flag;
+ u8 rv;
+ u8 ikv;
+ u8 rkv;
+
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 even_pn;
+
+ u8 key_id;
+ u8 muar_idx;
+ u8 cipher_suit;
+ u8 rsv[1];
+} __packed;
+
+struct mt7921_mcu_key_config {
+ u8 key[32];
+} __packed;
+
+struct mt7921_mcu_rate_info {
+ u8 mpdu_fail;
+ u8 mpdu_tx;
+ u8 rate_idx;
+ u8 rsv[1];
+ __le16 rate[8];
+} __packed;
+
+struct mt7921_mcu_ba_config {
+ u8 ba_en;
+ u8 rsv[3];
+ __le32 ba_winsize;
+} __packed;
+
+struct mt7921_mcu_ant_id_config {
+ u8 ant_id[4];
+} __packed;
+
+struct mt7921_mcu_peer_cap {
+ struct mt7921_mcu_ant_id_config ant_id_config;
+
+ u8 power_offset;
+ u8 bw_selector;
+ u8 change_bw_rate_n;
+ u8 bw;
+ u8 spe_idx;
+
+ u8 g2;
+ u8 g4;
+ u8 g8;
+ u8 g16;
+
+ u8 mmss;
+ u8 ampdu_factor;
+ u8 rsv[1];
+} __packed;
+
+struct mt7921_mcu_rx_cnt {
+ u8 rx_rcpi[4];
+ u8 rx_cc[4];
+ u8 rx_cc_sel;
+ u8 ce_rmsd;
+ u8 rsv[2];
+} __packed;
+
+struct mt7921_mcu_tx_cnt {
+ __le16 rate1_cnt;
+ __le16 rate1_fail_cnt;
+ __le16 rate2_cnt;
+ __le16 rate3_cnt;
+ __le16 cur_bw_tx_cnt;
+ __le16 cur_bw_tx_fail_cnt;
+ __le16 other_bw_tx_cnt;
+ __le16 other_bw_tx_fail_cnt;
+} __packed;
+
+struct mt7921_mcu_wlan_info_event {
+ struct mt7921_mcu_tx_config tx_config;
+ struct mt7921_mcu_sec_config sec_config;
+ struct mt7921_mcu_key_config key_config;
+ struct mt7921_mcu_rate_info rate_info;
+ struct mt7921_mcu_ba_config ba_config;
+ struct mt7921_mcu_peer_cap peer_cap;
+ struct mt7921_mcu_rx_cnt rx_cnt;
+ struct mt7921_mcu_tx_cnt tx_cnt;
+} __packed;
+
+struct mt7921_mcu_wlan_info {
+ __le32 wlan_idx;
+ struct mt7921_mcu_wlan_info_event event;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
new file mode 100644
index 000000000000..46e6aeec35ae
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7921_H
+#define __MT7921_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76_connac_mcu.h"
+#include "regs.h"
+
+#define MT7921_MAX_INTERFACES 4
+#define MT7921_MAX_WMM_SETS 4
+#define MT7921_WTBL_SIZE 20
+#define MT7921_WTBL_RESERVED (MT7921_WTBL_SIZE - 1)
+#define MT7921_WTBL_STA (MT7921_WTBL_RESERVED - \
+ MT7921_MAX_INTERFACES)
+
+#define MT7921_PM_TIMEOUT (HZ / 12)
+#define MT7921_HW_SCAN_TIMEOUT (HZ / 10)
+#define MT7921_WATCHDOG_TIME (HZ / 10)
+#define MT7921_RESET_TIMEOUT (30 * HZ)
+
+#define MT7921_TX_RING_SIZE 2048
+#define MT7921_TX_MCU_RING_SIZE 256
+#define MT7921_TX_FWDL_RING_SIZE 128
+
+#define MT7921_RX_RING_SIZE 1536
+#define MT7921_RX_MCU_RING_SIZE 512
+
+#define MT7921_DRV_OWN_RETRY_COUNT 10
+
+#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
+#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
+
+#define MT7921_EEPROM_SIZE 3584
+#define MT7921_TOKEN_SIZE 8192
+#define MT7921_TOKEN_FREE_THR 64
+
+#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7921_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7921_2G_RATE_DEFAULT 0x0 /* CCK 1M */
+
+#define MT7921_SKU_RATE_NUM 161
+#define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM
+#define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1)
+
+struct mt7921_vif;
+struct mt7921_sta;
+
+enum mt7921_txq_id {
+ MT7921_TXQ_BAND0,
+ MT7921_TXQ_BAND1,
+ MT7921_TXQ_FWDL = 16,
+ MT7921_TXQ_MCU_WM,
+};
+
+enum mt7921_rxq_id {
+ MT7921_RXQ_BAND0 = 0,
+ MT7921_RXQ_BAND1,
+ MT7921_RXQ_MCU_WM = 0,
+};
+
+struct mt7921_sta_stats {
+ struct rate_info prob_rate;
+ struct rate_info tx_rate;
+
+ unsigned long per;
+ unsigned long changed;
+ unsigned long jiffies;
+};
+
+struct mt7921_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
+struct mt7921_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7921_vif *vif;
+
+ struct list_head stats_list;
+ struct list_head poll_list;
+ u32 airtime_ac[8];
+
+ struct mt7921_sta_stats stats;
+
+ unsigned long ampdu_state;
+
+ struct mt7921_sta_key_conf bip;
+};
+
+struct mt7921_vif {
+ struct mt76_vif mt76; /* must be first */
+
+ struct mt7921_sta sta;
+ struct mt7921_phy *phy;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+};
+
+struct mib_stats {
+ u16 ack_fail_cnt;
+ u16 fcs_err_cnt;
+ u16 rts_cnt;
+ u16 rts_retries_cnt;
+ u16 ba_miss_cnt;
+};
+
+struct mt7921_phy {
+ struct mt76_phy *mt76;
+ struct mt7921_dev *dev;
+
+ struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+
+ struct ieee80211_vif *monitor_vif;
+
+ u32 rxfilter;
+ u64 omac_mask;
+
+ u16 noise;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+ struct list_head stats_list;
+
+ u8 sta_work_count;
+
+ struct sk_buff_head scan_event_list;
+ struct delayed_work scan_work;
+};
+
+struct mt7921_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ const struct mt76_bus_ops *bus_ops;
+ struct mt7921_phy phy;
+ struct tasklet_struct irq_tasklet;
+
+ u16 chainmask;
+
+ struct work_struct init_work;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
+ spinlock_t token_lock;
+ int token_count;
+ struct idr token;
+
+ u8 fw_debug;
+
+ struct mt76_connac_pm pm;
+ struct mt76_connac_coredump coredump;
+};
+
+enum {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+};
+
+static inline struct mt7921_phy *
+mt7921_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7921_dev *
+mt7921_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7921_dev, mt76);
+}
+
+#define mt7921_mutex_acquire(dev) \
+ mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm)
+#define mt7921_mutex_release(dev) \
+ mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
+
+static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
+extern const struct ieee80211_ops mt7921_ops;
+extern struct pci_driver mt7921_pci_driver;
+
+u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
+
+int mt7921_register_device(struct mt7921_dev *dev);
+void mt7921_unregister_device(struct mt7921_dev *dev);
+int mt7921_eeprom_init(struct mt7921_dev *dev);
+void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy);
+int mt7921_eeprom_get_target_power(struct mt7921_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+void mt7921_eeprom_init_sku(struct mt7921_dev *dev);
+int mt7921_dma_init(struct mt7921_dev *dev);
+void mt7921_dma_prefetch(struct mt7921_dev *dev);
+void mt7921_dma_cleanup(struct mt7921_dev *dev);
+int mt7921_mcu_init(struct mt7921_dev *dev);
+int mt7921_mcu_add_bss_info(struct mt7921_phy *phy,
+ struct ieee80211_vif *vif, int enable);
+int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ struct mt7921_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+int mt7921_set_channel(struct mt7921_phy *phy);
+int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
+int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
+int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
+int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
+int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct rate_info *rate);
+int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
+void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb);
+void mt7921_mcu_exit(struct mt7921_dev *dev);
+
+static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->irq_tasklet);
+}
+
+static inline u32
+mt7921_reg_map_l1(struct mt7921_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L1);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static inline u32
+mt7921_l1_rr(struct mt7921_dev *dev, u32 addr)
+{
+ return mt76_rr(dev, mt7921_reg_map_l1(dev, addr));
+}
+
+static inline void
+mt7921_l1_wr(struct mt7921_dev *dev, u32 addr, u32 val)
+{
+ mt76_wr(dev, mt7921_reg_map_l1(dev, addr), val);
+}
+
+static inline u32
+mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val)
+{
+ val |= mt7921_l1_rr(dev, addr) & ~mask;
+ mt7921_l1_wr(dev, addr, val);
+
+ return val;
+}
+
+#define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val)
+#define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0)
+
+bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
+void mt7921_mac_reset_counters(struct mt7921_phy *phy);
+void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7921_mac_set_timing(struct mt7921_phy *phy);
+int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb);
+void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb);
+void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb);
+int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7921_mac_work(struct work_struct *work);
+void mt7921_mac_reset_work(struct work_struct *work);
+int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
+void mt7921_tx_token_put(struct mt7921_dev *dev);
+void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+void mt7921_stats_work(struct work_struct *work);
+void mt7921_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
+void mt7921_update_channel(struct mt76_dev *mdev);
+int mt7921_init_debugfs(struct mt7921_dev *dev);
+
+int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+void mt7921_scan_work(struct work_struct *work);
+u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx);
+int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
+int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info);
+int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
+int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
+void mt7921_pm_wake_work(struct work_struct *work);
+void mt7921_pm_power_save_work(struct work_struct *work);
+bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
+int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable);
+void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+void mt7921_coredump_work(struct work_struct *work);
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
new file mode 100644
index 000000000000..5570b4a50531
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7921.h"
+#include "mac.h"
+#include "mcu.h"
+#include "../trace.h"
+
+static const struct pci_device_id mt7921_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7961) },
+ { },
+};
+
+static void
+mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ if (q == MT_RXQ_MAIN)
+ mt7921_irq_enable(dev, MT_INT_RX_DONE_DATA);
+ else if (q == MT_RXQ_MCU_WA)
+ mt7921_irq_enable(dev, MT_INT_RX_DONE_WM2);
+ else
+ mt7921_irq_enable(dev, MT_INT_RX_DONE_WM);
+}
+
+static irqreturn_t mt7921_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7921_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mt7921_irq_tasklet(unsigned long data)
+{
+ struct mt7921_dev *dev = (struct mt7921_dev *)data;
+ u32 intr, mask = 0;
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+
+ intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask |= intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0);
+
+ if (intr & MT_INT_TX_DONE_ALL)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX_DONE_WM)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX_DONE_WM2)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (intr & MT_INT_RX_DONE_DATA)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+}
+
+static int mt7921_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7921_txp_common),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7921_tx_prepare_skb,
+ .tx_complete_skb = mt7921_tx_complete_skb,
+ .rx_skb = mt7921_queue_rx_skb,
+ .rx_poll_complete = mt7921_rx_poll_complete,
+ .sta_ps = mt7921_sta_ps,
+ .sta_add = mt7921_mac_sta_add,
+ .sta_remove = mt7921_mac_sta_remove,
+ .update_survey = mt7921_update_channel,
+ };
+ struct mt7921_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_free_pci_vec;
+
+ mt76_pci_disable_aspm(pdev);
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops,
+ &drv_ops);
+ if (!mdev) {
+ ret = -ENOMEM;
+ goto err_free_pci_vec;
+ }
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+ mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
+ dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+
+ mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = mt7921_register_device(dev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ mt76_free_device(&dev->mt76);
+err_free_pci_vec:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+static void mt7921_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt7921_unregister_device(dev);
+ devm_free_irq(&pdev->dev, pdev->irq, dev);
+ pci_free_irq_vectors(pdev);
+}
+
+#ifdef CONFIG_PM
+static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ bool hif_suspend;
+ int i, err;
+
+ err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ if (err < 0)
+ return err;
+
+ hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
+ if (hif_suspend) {
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ if (err)
+ return err;
+ }
+
+ napi_disable(&mdev->tx_napi);
+ mt76_worker_disable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i) {
+ napi_disable(&mdev->napi[i]);
+ }
+ tasklet_kill(&dev->irq_tasklet);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+
+ /* wait until dma is idle */
+ mt76_poll(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+ /* put dma disabled */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ /* disable interrupt */
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+
+ pci_save_state(pdev);
+ err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (err)
+ goto restore;
+
+ err = mt7921_mcu_drv_pmctrl(dev);
+ if (err)
+ goto restore;
+
+ return 0;
+
+restore:
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ if (hif_suspend)
+ mt76_connac_mcu_set_hif_suspend(mdev, false);
+
+ return err;
+}
+
+static int mt7921_pci_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ int i, err;
+
+ err = mt7921_mcu_fw_pmctrl(dev);
+ if (err < 0)
+ return err;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ /* enable interrupt */
+ mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ /* put dma enabled */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+struct pci_driver mt7921_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7921_pci_device_table,
+ .probe = mt7921_pci_probe,
+ .remove = mt7921_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = mt7921_pci_suspend,
+ .resume = mt7921_pci_resume,
+#endif /* CONFIG_PM */
+};
+
+module_pci_driver(mt7921_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
new file mode 100644
index 000000000000..18980bb32dee
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7921_REGS_H
+#define __MT7921_REGS_H
+
+/* MCU WFDMA1 */
+#define MT_MCU_WFDMA1_BASE 0x3000
+#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
+
+#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+#define MT_PLE_BASE 0x8000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+
+#define MT_MDP_BASE 0xf000
+#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
+
+#define MT_MDP_DCR0 MT_MDP(0x000)
+#define MT_MDP_DCR0_DAMSDU_EN BIT(15)
+#define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19)
+
+#define MT_MDP_DCR1 MT_MDP(0x004)
+#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
+
+#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
+#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
+#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
+
+#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
+#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
+#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
+#define MT_MDP_TO_HIF 0
+#define MT_MDP_TO_WM 1
+
+/* TMAC: band 0(0x21000), band 1(0xa1000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
+
+#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
+#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
+
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
+#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
+
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
+
+#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
+#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+
+/* LPON: band 0(0x24200), band 1(0xa4200) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
+
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_SW_WRITE BIT(0)
+
+/* MIB: band 0(0x24800), band 1(0xa4800) */
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
+
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4))
+#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0)
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
+
+#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
+
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_WTBL_BASE 0x38000
+#define MT_WTBL_LMAC_ID GENMASK(14, 8)
+#define MT_WTBL_LMAC_DW GENMASK(7, 2)
+#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+
+/* AGG: band 0(0x20800), band 1(0xa0800) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
+#define MT_AGG_PCR0_MM_PROT BIT(0)
+#define MT_AGG_PCR0_GF_PROT BIT(1)
+#define MT_AGG_PCR0_BW20_PROT BIT(2)
+#define MT_AGG_PCR0_BW40_PROT BIT(4)
+#define MT_AGG_PCR0_BW80_PROT BIT(6)
+#define MT_AGG_PCR0_ERP_PROT GENMASK(12, 8)
+#define MT_AGG_PCR0_VHT_PROT BIT(13)
+#define MT_AGG_PCR0_PTA_WIN_DIS BIT(15)
+
+#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
+#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
+
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
+#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
+
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
+
+/* ARB: band 0(0x20c00), band 1(0xa0c00) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
+
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
+
+/* RMAC: band 0(0x21400), band 1(0xa1400) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
+
+/* WFDMA0 */
+#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
+
+#define MT_WFDMA0_RST MT_WFDMA0(0x100)
+#define MT_WFDMA0_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_MCU_CMD MT_WFDMA0(0x1f0)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+#define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200)
+#define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */
+#define HOST_RX_DONE_INT_STS2 BIT(2) /* Rx data */
+#define HOST_RX_DONE_INT_STS4 BIT(22) /* Rx mcu after fw downloaded */
+#define HOST_TX_DONE_INT_STS16 BIT(26)
+#define HOST_TX_DONE_INT_STS17 BIT(27) /* MCU tx done*/
+
+#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204)
+#define HOST_RX_DONE_INT_ENA0 BIT(0)
+#define HOST_RX_DONE_INT_ENA1 BIT(1)
+#define HOST_RX_DONE_INT_ENA2 BIT(2)
+#define HOST_RX_DONE_INT_ENA3 BIT(3)
+#define HOST_TX_DONE_INT_ENA0 BIT(4)
+#define HOST_TX_DONE_INT_ENA1 BIT(5)
+#define HOST_TX_DONE_INT_ENA2 BIT(6)
+#define HOST_TX_DONE_INT_ENA3 BIT(7)
+#define HOST_TX_DONE_INT_ENA4 BIT(8)
+#define HOST_TX_DONE_INT_ENA5 BIT(9)
+#define HOST_TX_DONE_INT_ENA6 BIT(10)
+#define HOST_TX_DONE_INT_ENA7 BIT(11)
+#define HOST_TX_DONE_INT_ENA8 BIT(12)
+#define HOST_TX_DONE_INT_ENA9 BIT(13)
+#define HOST_TX_DONE_INT_ENA10 BIT(14)
+#define HOST_TX_DONE_INT_ENA11 BIT(15)
+#define HOST_TX_DONE_INT_ENA12 BIT(16)
+#define HOST_TX_DONE_INT_ENA13 BIT(17)
+#define HOST_TX_DONE_INT_ENA14 BIT(18)
+#define HOST_RX_COHERENT_EN BIT(20)
+#define HOST_TX_COHERENT_EN BIT(21)
+#define HOST_RX_DONE_INT_ENA4 BIT(22)
+#define HOST_RX_DONE_INT_ENA5 BIT(23)
+#define HOST_TX_DONE_INT_ENA16 BIT(26)
+#define HOST_TX_DONE_INT_ENA17 BIT(27)
+#define MCU2HOST_SW_INT_ENA BIT(29)
+#define HOST_TX_DONE_INT_ENA18 BIT(30)
+
+/* WFDMA interrupt */
+#define MT_INT_RX_DONE_DATA HOST_RX_DONE_INT_ENA2
+#define MT_INT_RX_DONE_WM HOST_RX_DONE_INT_ENA0
+#define MT_INT_RX_DONE_WM2 HOST_RX_DONE_INT_ENA4
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_DATA | \
+ MT_INT_RX_DONE_WM | \
+ MT_INT_RX_DONE_WM2)
+#define MT_INT_TX_DONE_MCU_WM HOST_TX_DONE_INT_ENA17
+#define MT_INT_TX_DONE_FWDL HOST_TX_DONE_INT_ENA16
+#define MT_INT_TX_DONE_BAND0 HOST_TX_DONE_INT_ENA0
+#define MT_INT_MCU_CMD MCU2HOST_SW_INT_ENA
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \
+ MT_INT_TX_DONE_FWDL)
+#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \
+ MT_INT_TX_DONE_BAND0 | \
+ GENMASK(18, 4))
+
+#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_CLK_GAT_DIS BIT(30)
+
+#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
+#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
+#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+
+#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x520)
+
+#define MT_WFDMA0_TX_RING0_EXT_CTRL MT_WFDMA0(0x600)
+#define MT_WFDMA0_TX_RING1_EXT_CTRL MT_WFDMA0(0x604)
+#define MT_WFDMA0_TX_RING2_EXT_CTRL MT_WFDMA0(0x608)
+#define MT_WFDMA0_TX_RING3_EXT_CTRL MT_WFDMA0(0x60c)
+#define MT_WFDMA0_TX_RING4_EXT_CTRL MT_WFDMA0(0x610)
+#define MT_WFDMA0_TX_RING5_EXT_CTRL MT_WFDMA0(0x614)
+#define MT_WFDMA0_TX_RING6_EXT_CTRL MT_WFDMA0(0x618)
+#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640)
+#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644)
+
+#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
+#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
+#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+#define MT_WFDMA0_RX_RING3_EXT_CTRL MT_WFDMA0(0x68c)
+#define MT_WFDMA0_RX_RING4_EXT_CTRL MT_WFDMA0(0x690)
+#define MT_WFDMA0_RX_RING5_EXT_CTRL MT_WFDMA0(0x694)
+
+#define MT_TX_RING_BASE MT_WFDMA0(0x300)
+#define MT_RX_EVENT_RING_BASE MT_WFDMA0(0x500)
+
+/* WFDMA CSR */
+#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+
+#define MT_INFRA_CFG_BASE 0xfe000
+#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+
+#define MT_HIF_REMAP_L1 MT_INFRA(0x260)
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L1 0xe0000
+
+#define MT_SWDEF_BASE 0x41f200
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
+#define MT_TOP_BASE 0x18060000
+#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+
+#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
+#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_TOP_MISC MT_TOP(0xf0)
+#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+
+#define MT_HW_BOUND 0x70010020
+#define MT_HW_CHIPID 0x70010200
+#define MT_HW_REV 0x70010204
+
+#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
+#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+
+#define MT_DMA_SHDL(ofs) (0xd6000 + (ofs))
+#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
+#define MT_DMASHDL_DMASHDL_BYPASS BIT(28)
+#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008)
+#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c)
+#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010)
+#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c)
+#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
+#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
+
+#define MT_DMASHDL_GROUP_QUOTA(_n) MT_DMA_SHDL(0x020 + ((_n) << 2))
+#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0)
+#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16)
+
+#define MT_DMASHDL_Q_MAP(_n) MT_DMA_SHDL(0x060 + ((_n) << 2))
+#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0)
+#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8))
+
+#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
+
+#define MT_CONN_ON_LPCTL 0x7c060010
+#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
+#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
+#define PCIE_LPCR_HOST_SET_OWN BIT(0)
+
+#define MT_CONN_ON_MISC 0x7c0600f0
+#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 581eb56dc4be..cc769645afa5 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -14,18 +14,23 @@ static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
[MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED },
+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
[MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
};
-void mt76_testmode_tx_pending(struct mt76_dev *dev)
+void mt76_testmode_tx_pending(struct mt76_phy *phy)
{
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
+ struct mt76_dev *dev = phy->dev;
struct mt76_wcid *wcid = &dev->global_wcid;
- struct mt76_phy *phy = &dev->phy;
struct sk_buff *skb = td->tx_skb;
struct mt76_queue *q;
+ u16 tx_queued_limit;
int qid;
if (!skb || !td->tx_pending)
@@ -34,9 +39,12 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
qid = skb_get_queue_mapping(skb);
q = phy->q_tx[qid];
+ tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
+
spin_lock_bh(&q->lock);
- while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 &&
+ while (td->tx_pending > 0 &&
+ td->tx_queued - td->tx_done < tx_queued_limit &&
q->queued < q->ndesc / 2) {
int ret;
@@ -56,10 +64,9 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
static int
-mt76_testmode_tx_init(struct mt76_dev *dev)
+mt76_testmode_tx_init(struct mt76_phy *phy)
{
- struct mt76_testmode_data *td = &dev->test;
- struct mt76_phy *phy = &dev->phy;
+ struct mt76_testmode_data *td = &phy->test;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -67,6 +74,7 @@ mt76_testmode_tx_init(struct mt76_dev *dev)
IEEE80211_FCTL_FROMDS;
struct ieee80211_tx_rate *rate;
u8 max_nss = hweight8(phy->antenna_mask);
+ bool ext_phy = phy != &phy->dev->phy;
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
@@ -88,6 +96,9 @@ mt76_testmode_tx_init(struct mt76_dev *dev)
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT)
goto out;
@@ -166,9 +177,10 @@ out:
}
static void
-mt76_testmode_tx_start(struct mt76_dev *dev)
+mt76_testmode_tx_start(struct mt76_phy *phy)
{
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
+ struct mt76_dev *dev = phy->dev;
td->tx_queued = 0;
td->tx_done = 0;
@@ -177,9 +189,10 @@ mt76_testmode_tx_start(struct mt76_dev *dev)
}
static void
-mt76_testmode_tx_stop(struct mt76_dev *dev)
+mt76_testmode_tx_stop(struct mt76_phy *phy)
{
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
+ struct mt76_dev *dev = phy->dev;
mt76_worker_disable(&dev->tx_worker);
@@ -187,7 +200,8 @@ mt76_testmode_tx_stop(struct mt76_dev *dev)
mt76_worker_enable(&dev->tx_worker);
- wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
+ wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
+ MT76_TM_TIMEOUT * HZ);
dev_kfree_skb(td->tx_skb);
td->tx_skb = NULL;
@@ -206,9 +220,9 @@ mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx)
}
static void
-mt76_testmode_init_defaults(struct mt76_dev *dev)
+mt76_testmode_init_defaults(struct mt76_phy *phy)
{
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
if (td->tx_msdu_len > 0)
return;
@@ -220,49 +234,50 @@ mt76_testmode_init_defaults(struct mt76_dev *dev)
}
static int
-__mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+__mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
{
- enum mt76_testmode_state prev_state = dev->test.state;
+ enum mt76_testmode_state prev_state = phy->test.state;
+ struct mt76_dev *dev = phy->dev;
int err;
if (prev_state == MT76_TM_STATE_TX_FRAMES)
- mt76_testmode_tx_stop(dev);
+ mt76_testmode_tx_stop(phy);
if (state == MT76_TM_STATE_TX_FRAMES) {
- err = mt76_testmode_tx_init(dev);
+ err = mt76_testmode_tx_init(phy);
if (err)
return err;
}
- err = dev->test_ops->set_state(dev, state);
+ err = dev->test_ops->set_state(phy, state);
if (err) {
if (state == MT76_TM_STATE_TX_FRAMES)
- mt76_testmode_tx_stop(dev);
+ mt76_testmode_tx_stop(phy);
return err;
}
if (state == MT76_TM_STATE_TX_FRAMES)
- mt76_testmode_tx_start(dev);
+ mt76_testmode_tx_start(phy);
else if (state == MT76_TM_STATE_RX_FRAMES) {
- memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats));
+ memset(&phy->test.rx_stats, 0, sizeof(phy->test.rx_stats));
}
- dev->test.state = state;
+ phy->test.state = state;
return 0;
}
-int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
{
- struct mt76_testmode_data *td = &dev->test;
- struct ieee80211_hw *hw = dev->phy.hw;
+ struct mt76_testmode_data *td = &phy->test;
+ struct ieee80211_hw *hw = phy->hw;
if (state == td->state && state == MT76_TM_STATE_OFF)
return 0;
if (state > MT76_TM_STATE_OFF &&
- (!test_bit(MT76_STATE_RUNNING, &dev->phy.state) ||
+ (!test_bit(MT76_STATE_RUNNING, &phy->state) ||
!(hw->conf.flags & IEEE80211_CONF_MONITOR)))
return -ENOTCONN;
@@ -270,12 +285,12 @@ int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state
td->state != MT76_TM_STATE_IDLE) {
int ret;
- ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE);
+ ret = __mt76_testmode_set_state(phy, MT76_TM_STATE_IDLE);
if (ret)
return ret;
}
- return __mt76_testmode_set_state(dev, state);
+ return __mt76_testmode_set_state(phy, state);
}
EXPORT_SYMBOL(mt76_testmode_set_state);
@@ -301,8 +316,9 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ bool ext_phy = phy != &dev->phy;
u32 state;
int err;
int i;
@@ -320,11 +336,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
if (tb[MT76_TM_ATTR_RESET]) {
- mt76_testmode_set_state(dev, MT76_TM_STATE_OFF);
+ mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
memset(td, 0, sizeof(*td));
}
- mt76_testmode_init_defaults(dev);
+ mt76_testmode_init_defaults(phy);
if (tb[MT76_TM_ATTR_TX_COUNT])
td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
@@ -350,12 +366,21 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
- mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1,
- phy->antenna_mask) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
+ 1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
+ &td->tx_duty_cycle, 0, 99) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
&td->tx_power_control, 0, 1))
goto out;
+ if (tb[MT76_TM_ATTR_TX_IPG])
+ td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]);
+
+ if (tb[MT76_TM_ATTR_TX_TIME])
+ td->tx_time = nla_get_u32(tb[MT76_TM_ATTR_TX_TIME]);
+
if (tb[MT76_TM_ATTR_FREQ_OFFSET])
td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]);
@@ -382,7 +407,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (dev->test_ops->set_params) {
- err = dev->test_ops->set_params(dev, tb, state);
+ err = dev->test_ops->set_params(phy, tb, state);
if (err)
goto out;
}
@@ -393,7 +418,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = 0;
if (tb[MT76_TM_ATTR_STATE])
- err = mt76_testmode_set_state(dev, state);
+ err = mt76_testmode_set_state(phy, state);
out:
mutex_unlock(&dev->mutex);
@@ -403,9 +428,10 @@ out:
EXPORT_SYMBOL(mt76_testmode_cmd);
static int
-mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg)
+mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
{
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
+ struct mt76_dev *dev = phy->dev;
u64 rx_packets = 0;
u64 rx_fcs_error = 0;
int i;
@@ -425,7 +451,7 @@ mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg)
return -EMSGSIZE;
if (dev->test_ops->dump_stats)
- return dev->test_ops->dump_stats(dev, msg);
+ return dev->test_ops->dump_stats(phy, msg);
return 0;
}
@@ -435,7 +461,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- struct mt76_testmode_data *td = &dev->test;
+ struct mt76_testmode_data *td = &phy->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
int err = 0;
void *a;
@@ -461,22 +487,22 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
if (a) {
- err = mt76_testmode_dump_stats(dev, msg);
+ err = mt76_testmode_dump_stats(phy, msg);
nla_nest_end(msg, a);
}
goto out;
}
- mt76_testmode_init_defaults(dev);
+ mt76_testmode_init_defaults(phy);
err = -EMSGSIZE;
if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state))
goto out;
- if (td->mtd_name &&
- (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) ||
- nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset)))
+ if (dev->test_mtd.name &&
+ (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, dev->test_mtd.name) ||
+ nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, dev->test_mtd.offset)))
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
@@ -491,6 +517,14 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_SPE_IDX) &&
+ nla_put_u8(msg, MT76_TM_ATTR_TX_SPE_IDX, td->tx_spe_idx)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_DUTY_CYCLE) &&
+ nla_put_u8(msg, MT76_TM_ATTR_TX_DUTY_CYCLE, td->tx_duty_cycle)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_IPG) &&
+ nla_put_u32(msg, MT76_TM_ATTR_TX_IPG, td->tx_ipg)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_TIME) &&
+ nla_put_u32(msg, MT76_TM_ATTR_TX_TIME, td->tx_time)) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
index 7efad685a17c..e0c706ce9b42 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -5,6 +5,8 @@
#ifndef __MT76_TESTMODE_H
#define __MT76_TESTMODE_H
+#define MT76_TM_TIMEOUT 10
+
/**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
*
@@ -35,6 +37,13 @@
* @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32)
*
* @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr)
+ *
+ * @MT76_TM_ATTR_TX_SPE_IDX: tx spatial extension index (u8)
+ *
+ * @MT76_TM_ATTR_TX_DUTY_CYCLE: packet tx duty cycle (u8)
+ * @MT76_TM_ATTR_TX_IPG: tx inter-packet gap, in unit of us (u32)
+ * @MT76_TM_ATTR_TX_TIME: packet transmission time, in unit of us (u32)
+ *
*/
enum mt76_testmode_attr {
MT76_TM_ATTR_UNSPEC,
@@ -63,6 +72,12 @@ enum mt76_testmode_attr {
MT76_TM_ATTR_STATS,
+ MT76_TM_ATTR_TX_SPE_IDX,
+
+ MT76_TM_ATTR_TX_DUTY_CYCLE,
+ MT76_TM_ATTR_TX_IPG,
+ MT76_TM_ATTR_TX_TIME,
+
/* keep last */
NUM_MT76_TM_ATTRS,
MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
@@ -128,12 +143,14 @@ enum mt76_testmode_rx_attr {
* @MT76_TM_STATE_IDLE: test mode enabled, but idle
* @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames
* @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics
+ * @MT76_TM_STATE_TX_CONT: waveform tx without time gap
*/
enum mt76_testmode_state {
MT76_TM_STATE_OFF,
MT76_TM_STATE_IDLE,
MT76_TM_STATE_TX_FRAMES,
MT76_TM_STATE_RX_FRAMES,
+ MT76_TM_STATE_TX_CONT,
/* keep last */
NUM_MT76_TM_STATES,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 25627e70bdad..b8fe8adc43a3 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -202,16 +202,22 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
struct ieee80211_hw *hw;
struct sk_buff_head list;
+ mt76_tx_check_non_aql(dev, wcid_idx, skb);
+
#ifdef CONFIG_NL80211_TESTMODE
- if (skb == dev->test.tx_skb) {
- dev->test.tx_done++;
- if (dev->test.tx_queued == dev->test.tx_done)
+ if (mt76_is_testmode_skb(dev, skb, &hw)) {
+ struct mt76_phy *phy = hw->priv;
+
+ if (skb == phy->test.tx_skb)
+ phy->test.tx_done++;
+ if (phy->test.tx_queued == phy->test.tx_done)
wake_up(&dev->tx_wait);
+
+ ieee80211_free_txskb(hw, skb);
+ return;
}
#endif
- mt76_tx_check_non_aql(dev, wcid_idx, skb);
-
if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb);
@@ -261,7 +267,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
int qid = skb_get_queue_mapping(skb);
bool ext_phy = phy != &dev->phy;
- if (mt76_testmode_enabled(dev)) {
+ if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
return;
}
@@ -454,7 +460,6 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
struct mt76_wcid *wcid;
int ret = 0;
- spin_lock_bh(&q->lock);
while (1) {
if (test_bit(MT76_STATE_PM, &phy->state) ||
test_bit(MT76_RESET, &phy->state)) {
@@ -464,14 +469,9 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
if (dev->queue_ops->tx_cleanup &&
q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
- spin_unlock_bh(&q->lock);
dev->queue_ops->tx_cleanup(dev, q, false);
- spin_lock_bh(&q->lock);
}
- if (mt76_txq_stopped(q))
- break;
-
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -481,6 +481,8 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
+ spin_lock_bh(&q->lock);
+
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -494,10 +496,13 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
spin_lock_bh(&q->lock);
}
- ret += mt76_txq_send_burst(phy, q, mtxq);
+ if (!mt76_txq_stopped(q))
+ ret += mt76_txq_send_burst(phy, q, mtxq);
+
+ spin_unlock_bh(&q->lock);
+
ieee80211_return_txq(phy->hw, txq, false);
}
- spin_unlock_bh(&q->lock);
return ret;
}
@@ -539,8 +544,10 @@ void mt76_tx_worker(struct mt76_worker *w)
mt76_txq_schedule_all(dev->phy2);
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->test.tx_pending)
- mt76_testmode_tx_pending(dev);
+ if (dev->phy.test.tx_pending)
+ mt76_testmode_tx_pending(&dev->phy);
+ if (dev->phy2 && dev->phy2->test.tx_pending)
+ mt76_testmode_tx_pending(dev->phy2);
#endif
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..30bc54e98c58 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -612,6 +612,7 @@ static void mt76u_complete_rx(struct urb *urb)
case -ECONNRESET:
case -ESHUTDOWN:
case -ENOENT:
+ case -EPROTO:
return;
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
@@ -811,11 +812,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +829,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +837,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 5f99054f535b..ed78d2cb35e3 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -74,7 +74,8 @@ bad_frame:
}
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
- u32 seg_len, struct page *p)
+ u32 seg_len, struct page *p,
+ struct list_head *list)
{
struct sk_buff *skb;
struct mt7601u_rxwi *rxwi;
@@ -104,9 +105,13 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
if (!skb)
return;
- spin_lock(&dev->mac_lock);
- ieee80211_rx(dev->hw, skb);
- spin_unlock(&dev->mac_lock);
+ local_bh_disable();
+ rcu_read_lock();
+
+ ieee80211_rx_list(dev->hw, NULL, skb, list);
+
+ rcu_read_unlock();
+ local_bh_enable();
}
static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -130,6 +135,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
u32 seg_len, data_len = e->urb->actual_length;
u8 *data = page_address(e->p);
struct page *new_p = NULL;
+ LIST_HEAD(list);
int cnt = 0;
if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
@@ -140,7 +146,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
new_p = dev_alloc_pages(MT_RX_ORDER);
while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
- mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+ mt7601u_rx_process_seg(dev, data, seg_len,
+ new_p ? e->p : NULL, &list);
data_len -= seg_len;
data += seg_len;
@@ -150,10 +157,11 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (cnt > 1)
trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
+ netif_receive_skb_list(&list);
+
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
@@ -192,6 +200,7 @@ static void mt7601u_complete_rx(struct urb *urb)
case -ECONNRESET:
case -ESHUTDOWN:
case -ENOENT:
+ case -EPROTO:
return;
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
@@ -237,6 +246,7 @@ static void mt7601u_complete_tx(struct urb *urb)
case -ECONNRESET:
case -ESHUTDOWN:
case -ENOENT:
+ case -EPROTO:
return;
default:
dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
@@ -310,7 +320,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -328,6 +337,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 28db24a2b5e5..8a00f6a75ca9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -586,6 +586,9 @@ static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
{
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
mt7601u_rxdc_cal(dev);
diff --git a/drivers/net/wireless/microchip/wilc1000/Kconfig b/drivers/net/wireless/microchip/wilc1000/Kconfig
index 80c92e8bf8a5..7f15e42602dd 100644
--- a/drivers/net/wireless/microchip/wilc1000/Kconfig
+++ b/drivers/net/wireless/microchip/wilc1000/Kconfig
@@ -44,4 +44,4 @@ config WILC1000_HW_OOB_INTR
chipset. This OOB interrupt is intended to provide a faster interrupt
mechanism for SDIO host controllers that don't support SDIO interrupt.
Select this option If the SDIO host controller in your platform
- doesn't support SDIO time devision interrupt.
+ doesn't support SDIO time division interrupt.
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e3dd205cbbe5..96973ec7bd9a 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1538,7 +1538,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
wilc_wfi_deinit_mon_interface(wl, true);
vif = netdev_priv(wdev->netdev);
cfg80211_stop_iface(wiphy, wdev, GFP_KERNEL);
- unregister_netdevice(vif->ndev);
+ cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
wilc_set_operation_mode(vif, 0, 0, 0);
diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h
index a76e1dea4345..1114530d03e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/fw.h
+++ b/drivers/net/wireless/microchip/wilc1000/fw.h
@@ -44,20 +44,20 @@ struct wilc_drv_handler {
struct wilc_wep_key {
u8 index;
u8 key_len;
- u8 key[0];
+ u8 key[];
} __packed;
struct wilc_sta_wpa_ptk {
u8 mac_addr[ETH_ALEN];
u8 key_len;
- u8 key[0];
+ u8 key[];
} __packed;
struct wilc_ap_wpa_ptk {
u8 mac_addr[ETH_ALEN];
u8 index;
u8 key_len;
- u8 key[0];
+ u8 key[];
} __packed;
struct wilc_gtk_key {
@@ -65,7 +65,7 @@ struct wilc_gtk_key {
u8 rsc[8];
u8 index;
u8 key_len;
- u8 key[0];
+ u8 key[];
} __packed;
struct wilc_op_mode {
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index b5a1b65c087c..6bd63934c2d8 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -233,7 +233,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
- if (register_netdevice(wl->monitor_dev)) {
+ if (cfg80211_register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
free_netdev(wl->monitor_dev);
return NULL;
@@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked)
return;
if (rtnl_locked)
- unregister_netdevice(wl->monitor_dev);
+ cfg80211_unregister_netdevice(wl->monitor_dev);
else
unregister_netdev(wl->monitor_dev);
wl->monitor_dev = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 2a1fbbdd6a4b..1b205e7d97a8 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -737,7 +737,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
vif->netstats.tx_packets++;
vif->netstats.tx_bytes += tx_data->size;
- queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data,
+ queue_count = wilc_wlan_txq_add_net_pkt(ndev, tx_data,
tx_data->buff, tx_data->size,
wilc_tx_complete);
@@ -950,7 +950,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
vif->priv.dev = ndev;
if (rtnl_locked)
- ret = register_netdevice(ndev);
+ ret = cfg80211_register_netdevice(ndev);
else
ret = register_netdev(ndev);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index c12f27be9f79..31d51385ba93 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -408,7 +408,8 @@ static inline u8 ac_change(struct wilc *wilc, u8 *ac)
return 1;
}
-int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
+int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
+ struct tx_complete_data *tx_data, u8 *buffer,
u32 buffer_size,
void (*tx_complete_fn)(void *, int))
{
@@ -420,27 +421,27 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
wilc = vif->wilc;
if (wilc->quit) {
- tx_complete_fn(priv, 0);
+ tx_complete_fn(tx_data, 0);
return 0;
}
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
- tx_complete_fn(priv, 0);
+ tx_complete_fn(tx_data, 0);
return 0;
}
tqe->type = WILC_NET_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
tqe->tx_complete_func = tx_complete_fn;
- tqe->priv = priv;
+ tqe->priv = tx_data;
tqe->vif = vif;
- q_num = ac_classify(wilc, priv);
+ q_num = ac_classify(wilc, tx_data->skb);
tqe->q_num = q_num;
if (ac_change(wilc, &q_num)) {
- tx_complete_fn(priv, 0);
+ tx_complete_fn(tx_data, 0);
kfree(tqe);
return 0;
}
@@ -451,7 +452,7 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
tcp_process(dev, tqe);
wilc_wlan_txq_add_to_tail(dev, q_num, tqe);
} else {
- tx_complete_fn(priv, 0);
+ tx_complete_fn(tx_data, 0);
kfree(tqe);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 3d2104f19819..d55eb6b3a12a 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -399,7 +399,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif);
-int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
+int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
+ struct tx_complete_data *tx_data, u8 *buffer,
u32 buffer_size,
void (*tx_complete_fn)(void *, int));
int wilc_wlan_handle_txq(struct wilc *wl, u32 *txq_count);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 54cdf3ad09d7..504b4d0b98c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -180,7 +180,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cancel_work_sync(&vif->high_pri_tx_work);
if (netdev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(netdev);
+ cfg80211_unregister_netdevice(netdev);
if (qtnf_cmd_send_del_intf(vif))
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
@@ -267,7 +267,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
- unregister_netdevice(vif->netdev);
+ cfg80211_unregister_netdevice(vif->netdev);
vif->netdev = NULL;
goto error_del_vif;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index ad726bd100ec..b4dd60b2ebc9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -492,7 +492,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
- ret = register_netdevice(dev);
+ ret = cfg80211_register_netdevice(dev);
if (ret) {
free_netdev(dev);
vif->netdev = NULL;
@@ -611,8 +611,9 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
mac->wiphy_registered = 1;
rtnl_lock();
-
+ wiphy_lock(priv_to_wiphy(mac));
ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
+ wiphy_unlock(priv_to_wiphy(mac));
rtnl_unlock();
if (ret) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 0f328ce47fee..5d93c874d666 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -480,18 +480,7 @@ static struct pci_driver qtnf_pcie_drv_data = {
#endif
};
-static int __init qtnf_pcie_register(void)
-{
- return pci_register_driver(&qtnf_pcie_drv_data);
-}
-
-static void __exit qtnf_pcie_exit(void)
-{
- pci_unregister_driver(&qtnf_pcie_drv_data);
-}
-
-module_init(qtnf_pcie_register);
-module_exit(qtnf_pcie_exit);
+module_pci_driver(qtnf_pcie_drv_data)
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna PCIe bus driver for 802.11 wireless LAN.");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index d08b251ec5a2..36ac18ca8082 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -988,6 +988,7 @@ static const struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x177f, 0x0313) },
{ USB_DEVICE(0x177f, 0x0323) },
{ USB_DEVICE(0x177f, 0x0324) },
+ { USB_DEVICE(0x177f, 0x1163) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
index c861811aa6c0..ad95f9eba301 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
@@ -179,7 +179,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
* Make room for new data. There are 2 possibilities
* either the alignment is already present between
* the 802.11 header and payload. In that case we
- * we have to move the header less then the iv_len
+ * have to move the header less than the iv_len
* since we can use the already available l2pad bytes
* for the iv data.
* When the alignment must be added manually we must
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 9f1f93d04145..cfe2dfdae928 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1507,8 +1507,6 @@ static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
u32 val32;
int ret;
- ret = 0;
-
val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
if (val32 & SYS_CFG_SPS_LDO_SEL) {
rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index f99882255d48..629c03271bde 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -798,9 +798,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
ie += 3 + noa_len;
}
- if (find_p2p_ie == true) {
+ if (find_p2p_ie) {
if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
- (find_p2p_ps_ie == false))
+ (!find_p2p_ps_ie))
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index d10c14c694da..6f61d6a10627 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -474,11 +474,11 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw)
u8 dm_dig_max, dm_dig_min;
u8 current_igi = dm_dig->cur_igvalue;
- if (rtlpriv->dm.dm_initialgain_enable == false)
+ if (!rtlpriv->dm.dm_initialgain_enable)
return;
- if (dm_dig->dig_enable_flag == false)
+ if (!dm_dig->dig_enable_flag)
return;
- if (mac->act_scanning == true)
+ if (mac->act_scanning)
return;
if (mac->link_state >= MAC80211_LINKED)
@@ -1637,7 +1637,7 @@ static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw)
}
}
- if (bpkt_filter_match == false) {
+ if (!bpkt_filter_match) {
rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N,
BIT(16), 0);
rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index bd9160b166c5..861cc663ca93 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1269,12 +1269,12 @@ void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
- if (check_bssid == true) {
+ if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&reg_rcr));
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4));
- } else if (check_bssid == false) {
+ } else if (!check_bssid) {
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0);
rtlpriv->cfg->ops->set_hw_reg(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 265a1a336304..0b6a15c2e5cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -380,7 +380,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
initialized = false;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
- } else if (initialized == false) {
+ } else if (!initialized) {
initialized = true;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
dm_digtable->cur_igvalue = 0x20;
@@ -509,7 +509,7 @@ static void rtl92c_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- if (rtlpriv->dm.dm_initialgain_enable == false)
+ if (!rtlpriv->dm.dm_initialgain_enable)
return;
if (!(rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG))
return;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 47fabce5c235..91199262aaca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -458,7 +458,7 @@ static u8 _rtl92se_halset_sysclk(struct ieee80211_hw *hw, u8 data)
tmpvalue = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
bresult = ((tmpvalue & BIT(7)) == (data & BIT(7)));
- if ((data & (BIT(6) | BIT(7))) == false) {
+ if (!(data & (BIT(6) | BIT(7)))) {
waitcount = 100;
tmpvalue = 0;
@@ -1268,7 +1268,7 @@ static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
tmp = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
result = ((tmp & BIT(7)) == (data & BIT(7)));
- if ((data & (BIT(6) | BIT(7))) == false) {
+ if (!(data & (BIT(6) | BIT(7)))) {
waitcnt = 100;
tmp = 0;
@@ -1373,7 +1373,7 @@ static void _rtl92se_gen_refreshledstate(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
- if (rtlpci->up_first_time == 1)
+ if (rtlpci->up_first_time)
return;
if (rtlpriv->psc.rfoff_reason == RF_CHANGE_BY_IPS)
@@ -2302,7 +2302,7 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
bool turnonbypowerdomain = false;
/* just 8191se can check gpio before firstup, 92c/92d have fixed it */
- if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
+ if (rtlpci->up_first_time || rtlpci->being_init_adapter)
return false;
if (ppsc->swrf_processing)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 63283d9e7485..aaa004d4d6d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -1017,7 +1017,7 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool rtstatus = true;
+ bool rtstatus;
u8 pathmap, index, rf_num = 0;
u8 path1, path2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 372d6f8caf06..a29321e2fa72 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -1812,7 +1812,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
return false;
}
_rtl8821ae_phy_init_tx_power_by_rate(hw);
- if (rtlefuse->autoload_failflag == false) {
+ if (!rtlefuse->autoload_failflag) {
rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
@@ -3848,7 +3848,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
else
rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
- if (vdf_enable == 1) {
+ if (vdf_enable) {
rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n");
for (k = 0; k <= 2; k++) {
switch (k) {
@@ -3980,7 +3980,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
}
}
- if (tx0iqkok == false)
+ if (!tx0iqkok)
break; /* TXK fail, Don't do RXK */
if (vdf_enable == 1) {
@@ -4090,7 +4090,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
}
}
- if (tx0iqkok == false) { /* If RX mode TXK fail, then take TXK Result */
+ if (!tx0iqkok) { /* If RX mode TXK fail, then take TXK Result */
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
@@ -4249,7 +4249,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
}
}
- if (tx0iqkok == false) { /* If RX mode TXK fail, then take TXK Result */
+ if (!tx0iqkok) { /* If RX mode TXK fail, then take TXK Result */
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d62b87f010c9..6c5e242b1bc5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -310,8 +310,7 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
init_usb_anchor(&rtlusb->rx_cleanup_urbs);
skb_queue_head_init(&rtlusb->rx_queue);
- rtlusb->rx_work_tasklet.func = (void(*))_rtl_rx_work;
- rtlusb->rx_work_tasklet.data = (unsigned long)&rtlusb->rx_work_tasklet;
+ tasklet_setup(&rtlusb->rx_work_tasklet, _rtl_rx_work);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 24530cafcba7..ea2be1e25065 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -1607,6 +1607,7 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1618,6 +1619,7 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
table_case = 26;
if (coex_stat->bt_hid_exist &&
coex_stat->bt_profile_num == 1) {
+ slot_type = TDMA_4SLOT;
tdma_case = 20;
} else {
tdma_case = 20;
@@ -1635,7 +1637,7 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
}
rtw_coex_table(rtwdev, false, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 19fc2d8bf3e9..948cb79050ea 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -800,7 +800,7 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
}
mutex_lock(&rtwdev->mutex);
- coex->manual_control = enable == 0;
+ coex->manual_control = !enable;
mutex_unlock(&rtwdev->mutex);
return count;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 1f1b639cd124..2351dfb0d2e2 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -42,7 +42,7 @@ static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw,
list_add_tail(&rtwtxq->list, &rtwdev->txqs);
spin_unlock_bh(&rtwdev->txq_lock);
- tasklet_schedule(&rtwdev->tx_tasklet);
+ queue_work(rtwdev->tx_wq, &rtwdev->tx_work);
}
static int rtw_ops_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index e7c1ae454524..e6989c0525cc 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -894,6 +894,7 @@ static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct ieee80211_sta *sta = si->sta;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_hal *hal = &rtwdev->hal;
@@ -938,6 +939,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
} else {
wireless_set = WIRELESS_OFDM;
}
+ dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
if (sta->vht_cap.vht_supported) {
@@ -955,6 +957,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
} else {
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
+ dm_info->rrsr_val_init = RRSR_INIT_2G;
} else {
rtw_err(rtwdev, "Unknown band type\n");
wireless_set = 0;
@@ -1276,7 +1279,6 @@ static void rtw_set_supported_band(struct ieee80211_hw *hw,
err_out:
rtw_err(rtwdev, "failed to set supported band\n");
- kfree(sband);
}
static void rtw_unset_supported_band(struct ieee80211_hw *hw,
@@ -1591,6 +1593,8 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, 0);
rtw_phy_init_tx_power(rtwdev);
+ if (rfe_def->agc_btg_tbl)
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
@@ -1654,7 +1658,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
- tasklet_setup(&rtwdev->tx_tasklet, rtw_tx_tasklet);
+ rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
@@ -1666,6 +1670,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work,
rtw_coex_bt_multi_link_remain_work);
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
+ INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
@@ -1732,7 +1737,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
if (wow_fw->firmware)
release_firmware(wow_fw->firmware);
- tasklet_kill(&rtwdev->tx_tasklet);
+ destroy_workqueue(rtwdev->tx_wq);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 9a318dfd04f9..35afea91fd29 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -13,9 +13,11 @@
#include <linux/bitfield.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include "util.h"
+#define RTW_NAPI_WEIGHT_NUM 64
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
@@ -1042,6 +1044,7 @@ enum rtw_rfe_fem {
struct rtw_rfe_def {
const struct rtw_table *phy_pg_tbl;
const struct rtw_table *txpwr_lmt_tbl;
+ const struct rtw_table *agc_btg_tbl;
};
#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \
@@ -1049,6 +1052,12 @@ struct rtw_rfe_def {
.txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
}
+#define RTW_DEF_RFE_EXT(chip, bb_pg, pwrlmt, btg) { \
+ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \
+ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
+ .agc_btg_tbl = &rtw ## chip ## _agc_btg_type ## btg ## _tbl, \
+ }
+
#define RTW_PWR_TRK_5G_1 0
#define RTW_PWR_TRK_5G_2 1
#define RTW_PWR_TRK_5G_3 2
@@ -1488,6 +1497,9 @@ struct rtw_iqk_info {
} result;
};
+#define RRSR_INIT_2G 0x15f
+#define RRSR_INIT_5G 0x150
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
@@ -1518,6 +1530,8 @@ struct rtw_dm_info {
u8 cck_gi_l_bnd;
u8 tx_rate;
+ u32 rrsr_val_init;
+ u32 rrsr_mask_min;
u8 thermal_avg[RTW_RF_PATH_MAX];
u8 thermal_meter_k;
s8 delta_power_index[RTW_RF_PATH_MAX];
@@ -1759,7 +1773,8 @@ struct rtw_dev {
/* used to protect txqs list */
spinlock_t txq_lock;
struct list_head txqs;
- struct tasklet_struct tx_tasklet;
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
struct work_struct ba_work;
struct rtw_tx_report tx_report;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 676d861aaf99..786a48649946 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -488,13 +488,14 @@ static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
}
static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
- struct rtw_pci *rtwpci)
+ struct rtw_pci *rtwpci, bool exclude_rx)
{
unsigned long flags;
+ u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
- rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
+ rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
if (rtw_chip_wcpu_11ac(rtwdev))
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
@@ -555,14 +556,37 @@ static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
}
}
+static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
+ return;
+
+ napi_enable(&rtwpci->napi);
+}
+
+static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
+ return;
+
+ napi_synchronize(&rtwpci->napi);
+ napi_disable(&rtwpci->napi);
+}
+
static int rtw_pci_start(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_enable_interrupt(rtwdev, rtwpci);
+ rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
spin_unlock_bh(&rtwpci->irq_lock);
+ rtw_pci_napi_start(rtwdev);
+
return 0;
}
@@ -570,6 +594,8 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ rtw_pci_napi_stop(rtwdev);
+
spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
rtw_pci_dma_release(rtwdev, rtwpci);
@@ -935,16 +961,43 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
ring->r.rp = cur_rp;
}
-static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
- u8 hw_queue)
+static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct napi_struct *napi = &rtwpci->napi;
+
+ napi_schedule(napi);
+}
+
+static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci)
{
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_ring *ring;
+ int count = 0;
+ u32 tmp, cur_wp;
+
+ ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
+ tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
+ cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
+ if (cur_wp >= ring->r.wp)
+ count = cur_wp - ring->r.wp;
+ else
+ count = ring->r.len - (ring->r.wp - cur_wp);
+
+ return count;
+}
+
+static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u8 hw_queue, u32 limit)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct napi_struct *napi = &rtwpci->napi;
+ struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
struct rtw_rx_pkt_stat pkt_stat;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb, *new;
- u32 cur_wp, cur_rp, tmp;
- u32 count;
+ u32 cur_rp = ring->r.rp;
+ u32 count, rx_done = 0;
u32 pkt_offset;
u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
u32 buf_desc_sz = chip->rx_buf_desc_sz;
@@ -952,17 +1005,9 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 *rx_desc;
dma_addr_t dma;
- ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
+ count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
+ count = min(count, limit);
- tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
- cur_wp = tmp >> 16;
- cur_wp &= TRX_BD_IDX_MASK;
- if (cur_wp >= ring->r.wp)
- count = cur_wp - ring->r.wp;
- else
- count = ring->r.len - (ring->r.wp - cur_wp);
-
- cur_rp = ring->r.rp;
while (count--) {
rtw_pci_dma_check(rtwdev, ring, cur_rp);
skb = ring->buf[cur_rp];
@@ -995,7 +1040,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
rtw_rx_stats(rtwdev, pkt_stat.vif, new);
memcpy(new->cb, &rx_status, sizeof(rx_status));
- ieee80211_rx_irqsafe(rtwdev->hw, new);
+ ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
+ rx_done++;
}
next_rp:
@@ -1009,8 +1055,13 @@ next_rp:
}
ring->r.rp = cur_rp;
- ring->r.wp = cur_wp;
+ /* 'rp', the last position we have read, is seen as previous posistion
+ * of 'wp' that is used to calculate 'count' next time.
+ */
+ ring->r.wp = cur_rp;
rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
+
+ return rx_done;
}
static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
@@ -1060,6 +1111,7 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
u32 irq_status[4];
+ bool rx = false;
spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
@@ -1078,13 +1130,15 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
if (irq_status[3] & IMR_H2CDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
- if (irq_status[0] & IMR_ROK)
- rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
+ if (irq_status[0] & IMR_ROK) {
+ rtw_pci_rx_isr(rtwdev);
+ rx = true;
+ }
if (unlikely(irq_status[0] & IMR_C2HCMD))
rtw_fw_c2h_cmd_isr(rtwdev);
/* all of the jobs for this interrupt have been done */
- rtw_pci_enable_interrupt(rtwdev, rtwpci);
+ rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
spin_unlock_bh(&rtwpci->irq_lock);
return IRQ_HANDLED;
@@ -1485,6 +1539,56 @@ static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
+static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
+ struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
+ priv);
+ int work_done = 0;
+
+ while (work_done < budget) {
+ u32 work_done_once;
+
+ work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
+ budget - work_done);
+ if (work_done_once == 0)
+ break;
+ work_done += work_done_once;
+ }
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock_bh(&rtwpci->irq_lock);
+ rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
+ spin_unlock_bh(&rtwpci->irq_lock);
+ /* When ISR happens during polling and before napi_complete
+ * while no further data is received. Data on the dma_ring will
+ * not be processed immediately. Check whether dma ring is
+ * empty and perform napi_schedule accordingly.
+ */
+ if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
+ napi_schedule(napi);
+ }
+
+ return work_done;
+}
+
+static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ init_dummy_netdev(&rtwpci->netdev);
+ netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
+ RTW_NAPI_WEIGHT_NUM);
+}
+
+static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_pci_napi_stop(rtwdev);
+ netif_napi_del(&rtwpci->napi);
+}
+
int rtw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1527,6 +1631,8 @@ int rtw_pci_probe(struct pci_dev *pdev,
goto err_pci_declaim;
}
+ rtw_pci_napi_init(rtwdev);
+
ret = rtw_chip_info_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup chip information\n");
@@ -1550,6 +1656,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
return 0;
err_destroy_pci:
+ rtw_pci_napi_deinit(rtwdev);
rtw_pci_destroy(rtwdev, pdev);
err_pci_declaim:
@@ -1579,6 +1686,7 @@ void rtw_pci_remove(struct pci_dev *pdev)
rtw_unregister_hw(rtwdev, hw);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ rtw_pci_napi_deinit(rtwdev);
rtw_pci_destroy(rtwdev, pdev);
rtw_pci_declaim(rtwdev, pdev);
rtw_pci_free_irq(rtwdev, pdev);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 7cdefe229824..e76fc549a788 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -51,6 +51,7 @@
#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
#define TRX_BD_IDX_MASK GENMASK(11, 0)
+#define TRX_BD_HW_IDX_MASK GENMASK(27, 16)
/* BCNQ is specialized for rsvd page, does not need to specify a number */
#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
@@ -142,6 +143,12 @@
/* IMR 3 */
#define IMR_H2CDOK BIT(16)
+enum rtw_pci_flags {
+ RTW_PCI_FLAG_NAPI_RUNNING,
+
+ NUM_OF_RTW_PCI_FLAGS,
+};
+
/* one element is reserved to know if the ring is closed */
static inline int avail_desc(u32 wp, u32 rp, u32 len)
{
@@ -200,16 +207,21 @@ struct rtw_pci {
/* Used for PCI interrupt. */
spinlock_t hwirq_lock;
- /* Used for PCI TX queueing. */
+ /* Used for PCI TX ring/queueing, and enable INT. */
spinlock_t irq_lock;
u32 irq_mask[4];
bool irq_enabled;
+ /* napi structure */
+ struct net_device netdev;
+ struct napi_struct napi;
+
u16 rx_tag;
DECLARE_BITMAP(tx_queued, RTK_MAX_TX_QUEUE_NUM);
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
u16 link_ctrl;
+ DECLARE_BITMAP(flags, NUM_OF_RTW_PCI_FLAGS);
void __iomem *mmap;
};
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index d44960cd940c..e114ddecac09 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -465,6 +465,60 @@ static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
}
+static u32 rtw_phy_get_rrsr_mask(struct rtw_dev *rtwdev, u8 rate_idx)
+{
+ u8 rate_order;
+
+ rate_order = rate_idx;
+
+ if (rate_idx >= DESC_RATEVHT4SS_MCS0)
+ rate_order -= DESC_RATEVHT4SS_MCS0;
+ else if (rate_idx >= DESC_RATEVHT3SS_MCS0)
+ rate_order -= DESC_RATEVHT3SS_MCS0;
+ else if (rate_idx >= DESC_RATEVHT2SS_MCS0)
+ rate_order -= DESC_RATEVHT2SS_MCS0;
+ else if (rate_idx >= DESC_RATEVHT1SS_MCS0)
+ rate_order -= DESC_RATEVHT1SS_MCS0;
+ else if (rate_idx >= DESC_RATEMCS24)
+ rate_order -= DESC_RATEMCS24;
+ else if (rate_idx >= DESC_RATEMCS16)
+ rate_order -= DESC_RATEMCS16;
+ else if (rate_idx >= DESC_RATEMCS8)
+ rate_order -= DESC_RATEMCS8;
+ else if (rate_idx >= DESC_RATEMCS0)
+ rate_order -= DESC_RATEMCS0;
+ else if (rate_idx >= DESC_RATE6M)
+ rate_order -= DESC_RATE6M;
+ else
+ rate_order -= DESC_RATE1M;
+
+ if (rate_idx >= DESC_RATEMCS0 || rate_order == 0)
+ rate_order++;
+
+ return GENMASK(rate_order + RRSR_RATE_ORDER_CCK_LEN - 1, 0);
+}
+
+static void rtw_phy_rrsr_mask_min_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 mask = 0;
+
+ mask = rtw_phy_get_rrsr_mask(rtwdev, si->ra_report.desc_rate);
+ if (mask < dm_info->rrsr_mask_min)
+ dm_info->rrsr_mask_min = mask;
+}
+
+static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->rrsr_mask_min = RRSR_RATE_ORDER_MAX;
+ rtw_iterate_stas_atomic(rtwdev, rtw_phy_rrsr_mask_min_iter, rtwdev);
+ rtw_write32(rtwdev, REG_RRSR, dm_info->rrsr_val_init & dm_info->rrsr_mask_min);
+}
+
static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -561,13 +615,19 @@ static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
rtwdev->chip->ops->pwr_track(rtwdev);
}
+static void rtw_phy_ra_track(struct rtw_dev *rtwdev)
+{
+ rtw_phy_ra_info_update(rtwdev);
+ rtw_phy_rrsr_update(rtwdev);
+}
+
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
{
/* for further calculation */
rtw_phy_statistics(rtwdev);
rtw_phy_dig(rtwdev);
rtw_phy_cck_pd(rtwdev);
- rtw_phy_ra_info_update(rtwdev);
+ rtw_phy_ra_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index b924ed07630a..a4fcfb878550 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -185,4 +185,7 @@ enum rtw_phy_cck_pd_lv {
#define LSSI_READ_EDGE_MASK 0x80000000
#define LSSI_READ_DATA_MASK 0xfffff
+#define RRSR_RATE_ORDER_MAX 0xfffff
+#define RRSR_RATE_ORDER_CCK_LEN 4
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index cf9a3b674d30..ea518aa78552 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -306,6 +306,8 @@
#define REG_DARFRC 0x0430
#define REG_DARFRCH 0x0434
#define REG_RARFRCH 0x043C
+#define REG_RRSR 0x0440
+#define BITS_RRSR_RSC GENMASK(22, 21)
#define REG_ARFR0 0x0444
#define REG_ARFRH0 0x0448
#define REG_ARFR1_V1 0x044C
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 9268ea8b6dda..3fdbaf7302c5 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -60,8 +60,8 @@ static const struct rtw_hw_reg rtw8723d_txagc[] = {
#define WLAN_MAX_AGG_NR 0x0A
#define WLAN_AMPDU_MAX_TIME 0x1C
#define WLAN_ANT_SEL 0x82
-#define WLAN_LTR_IDLE_LAT 0x883C883C
-#define WLAN_LTR_ACT_LAT 0x880B880B
+#define WLAN_LTR_IDLE_LAT 0x90039003
+#define WLAN_LTR_ACT_LAT 0x883c883c
#define WLAN_LTR_CTRL1 0xCB004010
#define WLAN_LTR_CTRL2 0x01233425
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index fbfd85439d1f..33c6cf1206c8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -15,12 +15,23 @@
#include "debug.h"
#include "bf.h"
+static const s8 lna_gain_table_0[8] = {22, 8, -6, -22, -31, -40, -46, -52};
+static const s8 lna_gain_table_1[16] = {10, 6, 2, -2, -6, -10, -14, -17,
+ -20, -24, -28, -31, -34, -37, -40, -44};
+
static void rtw8821ce_efuse_parsing(struct rtw_efuse *efuse,
struct rtw8821c_efuse *map)
{
ether_addr_copy(efuse->addr, map->e.mac_addr);
}
+enum rtw8821ce_rf_set {
+ SWITCH_TO_BTG,
+ SWITCH_TO_WLG,
+ SWITCH_TO_WLA,
+ SWITCH_TO_BT,
+};
+
static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
@@ -224,6 +235,40 @@ static void rtw8821c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
}
+static void rtw8821c_switch_rf_set(struct rtw_dev *rtwdev, u8 rf_set)
+{
+ u32 reg;
+
+ rtw_write32_set(rtwdev, REG_DMEM_CTRL, BIT_WL_RST);
+ rtw_write32_set(rtwdev, REG_SYS_CTRL, BIT_FEN_EN);
+
+ reg = rtw_read32(rtwdev, REG_RFECTL);
+ switch (rf_set) {
+ case SWITCH_TO_BTG:
+ reg |= B_BTG_SWITCH;
+ reg &= ~(B_CTRL_SWITCH | B_WL_SWITCH | B_WLG_SWITCH |
+ B_WLA_SWITCH);
+ rtw_write32_mask(rtwdev, REG_ENRXCCA, MASKBYTE2, BTG_CCA);
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, MASKLWORD, BTG_LNA);
+ break;
+ case SWITCH_TO_WLG:
+ reg |= B_WL_SWITCH | B_WLG_SWITCH;
+ reg &= ~(B_BTG_SWITCH | B_CTRL_SWITCH | B_WLA_SWITCH);
+ rtw_write32_mask(rtwdev, REG_ENRXCCA, MASKBYTE2, WLG_CCA);
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, MASKLWORD, WLG_LNA);
+ break;
+ case SWITCH_TO_WLA:
+ reg |= B_WL_SWITCH | B_WLA_SWITCH;
+ reg &= ~(B_BTG_SWITCH | B_CTRL_SWITCH | B_WLG_SWITCH);
+ break;
+ case SWITCH_TO_BT:
+ default:
+ break;
+ }
+
+ rtw_write32(rtwdev, REG_RFECTL, reg);
+}
+
static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
u32 rf_reg18;
@@ -257,9 +302,14 @@ static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
}
if (channel <= 14) {
+ if (rtwdev->efuse.rfe_option == 0)
+ rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLG);
+ else if (rtwdev->efuse.rfe_option == 2)
+ rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_BTG);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1);
rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf);
} else {
+ rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLA);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x0);
}
@@ -426,17 +476,49 @@ static void rtw8821c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw8821c_set_channel_rxdfir(rtwdev, bw);
}
+static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const s8 *lna_gain_table;
+ int lna_gain_table_size;
+ s8 rx_pwr_all = 0;
+ s8 lna_gain = 0;
+
+ if (efuse->rfe_option == 0) {
+ lna_gain_table = lna_gain_table_0;
+ lna_gain_table_size = ARRAY_SIZE(lna_gain_table_0);
+ } else {
+ lna_gain_table = lna_gain_table_1;
+ lna_gain_table_size = ARRAY_SIZE(lna_gain_table_1);
+ }
+
+ if (lna_idx >= lna_gain_table_size) {
+ rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ return -120;
+ }
+
+ lna_gain = lna_gain_table[lna_idx];
+ rx_pwr_all = lna_gain - 2 * vga_idx;
+
+ return rx_pwr_all;
+}
+
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
- s8 min_rx_power = -120;
- u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+ s8 rx_power;
+ u8 lna_idx = 0;
+ u8 vga_idx = 0;
- pkt_stat->rx_power[RF_PATH_A] = pwdb - 100;
+ vga_idx = GET_PHY_STAT_P0_VGA(phy_status);
+ lna_idx = FIELD_PREP(BIT_LNA_H_MASK, GET_PHY_STAT_P0_LNA_H(phy_status)) |
+ FIELD_PREP(BIT_LNA_L_MASK, GET_PHY_STAT_P0_LNA_L(phy_status));
+ rx_power = get_cck_rx_pwr(rtwdev, lna_idx, vga_idx);
+
+ pkt_stat->rx_power[RF_PATH_A] = rx_power;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
- pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
- min_rx_power);
+ pkt_stat->signal_power = rx_power;
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -719,8 +801,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
regval = (!polarity_inverse ? 0x1 : 0x2);
}
- rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
- regval);
+ rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+ regval);
break;
case COEX_SWITCH_CTRL_BY_PTA:
rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
@@ -730,8 +812,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
PTA_CTRL_PIN);
regval = (!polarity_inverse ? 0x2 : 0x1);
- rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
- regval);
+ rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+ regval);
break;
case COEX_SWITCH_CTRL_BY_ANTDIV:
rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
@@ -757,11 +839,11 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
}
if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) {
- rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
- rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+ rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
} else {
- rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
- rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+ rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
}
}
@@ -1022,12 +1104,6 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
u8 pd[CCK_PD_LV_MAX] = {3, 7, 13, 13, 13};
u8 cck_n_rx;
- if (dm_info->min_rssi > 60) {
- new_lvl = 4;
- pd[4] = 0x1d;
- goto set_cck_pd;
- }
-
rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n",
dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl);
@@ -1044,7 +1120,6 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
-set_cck_pd:
dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl;
rtw_write32_mask(rtwdev, REG_PWRTH, 0x3f0000, pd[new_lvl]);
rtw_write32_mask(rtwdev, REG_PWRTH2, 0x1f0000,
@@ -1421,6 +1496,7 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = {
static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
+ [2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index e11e3fc41c95..112faa60f653 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -148,6 +148,14 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
/* phy status page0 */
#define GET_PHY_STAT_P0_PWDB(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P0_VGA(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(12, 8))
+#define GET_PHY_STAT_P0_LNA_L(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(15, 13))
+#define GET_PHY_STAT_P0_LNA_H(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), BIT(23))
+#define BIT_LNA_H_MASK BIT(3)
+#define BIT_LNA_L_MASK GENMASK(2, 0)
/* phy status page1 */
#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
@@ -173,6 +181,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define REG_SYS_CTRL 0x000
+#define BIT_FEN_EN BIT(26)
#define REG_INIRTS_RATE_SEL 0x0480
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
@@ -204,6 +214,11 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_FA_CCK 0xa5c
#define REG_RXDESC 0xa2c
#define REG_ENTXCCK 0xa80
+#define BTG_LNA 0xfc84
+#define WLG_LNA 0x7532
+#define REG_ENRXCCA 0xa84
+#define BTG_CCA 0x0e
+#define WLG_CCA 0x12
#define REG_PWRTH2 0xaa8
#define REG_CSRATIO 0xaaa
#define REG_TXFILTER 0xaac
@@ -217,6 +232,11 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_RFESEL0 0xcb0
#define REG_RFESEL8 0xcb4
#define REG_RFECTL 0xcb8
+#define B_BTG_SWITCH BIT(16)
+#define B_CTRL_SWITCH BIT(18)
+#define B_WL_SWITCH (BIT(20) | BIT(22))
+#define B_WLG_SWITCH BIT(21)
+#define B_WLA_SWITCH BIT(23)
#define REG_RFEINV 0xcbc
#define REG_AGCTR_B 0xe08
#define REG_RXIGI_B 0xe50
@@ -227,6 +247,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_CCA_OFDM 0xf08
#define REG_FA_OFDM 0xf48
#define REG_CCA_CCK 0xfcc
+#define REG_DMEM_CTRL 0x1080
+#define BIT_WL_RST BIT(16)
#define REG_ANTWT 0x1904
#define REG_IQKFAILMSK 0x1bf0
#define BIT_MASK_R_RFE_SEL_15 GENMASK(31, 28)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
index 970f903f7dc7..8e8915c5c498 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
@@ -1342,6 +1342,399 @@ static const u32 rtw8821c_agc[] = {
RTW_DECL_TABLE_PHY_COND(rtw8821c_agc, rtw_phy_cfg_agc);
+static const u32 rtw8821c_agc_btg_type2[] = {
+ 0x80001004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000013,
+ 0x81C, 0xFE020013,
+ 0x81C, 0xFD040013,
+ 0x81C, 0xFC060013,
+ 0x81C, 0xFB080013,
+ 0x81C, 0xFA0A0013,
+ 0x81C, 0xF90C0013,
+ 0x81C, 0xF80E0013,
+ 0x81C, 0xF7100013,
+ 0x81C, 0xF6120013,
+ 0x81C, 0xF5140013,
+ 0x81C, 0xF4160013,
+ 0x81C, 0xF3180013,
+ 0x81C, 0xF21A0013,
+ 0x81C, 0xF11C0013,
+ 0x81C, 0xF01E0013,
+ 0x81C, 0xEF200013,
+ 0x81C, 0xEE220013,
+ 0x81C, 0xED240013,
+ 0x81C, 0xEC260013,
+ 0x81C, 0xEB280013,
+ 0x81C, 0xEA2A0013,
+ 0x81C, 0xE92C0013,
+ 0x81C, 0xE82E0013,
+ 0x81C, 0xE7300013,
+ 0x81C, 0x8B320013,
+ 0x81C, 0x8A340013,
+ 0x81C, 0x89360013,
+ 0x81C, 0x88380013,
+ 0x81C, 0x873A0013,
+ 0x81C, 0x863C0013,
+ 0x81C, 0x853E0013,
+ 0x81C, 0x84400013,
+ 0x81C, 0x83420013,
+ 0x81C, 0x82440013,
+ 0x81C, 0x81460013,
+ 0x81C, 0x08480013,
+ 0x81C, 0x074A0013,
+ 0x81C, 0x064C0013,
+ 0x81C, 0x054E0013,
+ 0x81C, 0x04500013,
+ 0x81C, 0x03520013,
+ 0x81C, 0x88540003,
+ 0x81C, 0x87560003,
+ 0x81C, 0x86580003,
+ 0x81C, 0x855A0003,
+ 0x81C, 0x845C0003,
+ 0x81C, 0x835E0003,
+ 0x81C, 0x82600003,
+ 0x81C, 0x81620003,
+ 0x81C, 0x07640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x01720003,
+ 0x81C, 0x01740003,
+ 0x81C, 0x01760003,
+ 0x81C, 0x01780003,
+ 0x81C, 0x017A0003,
+ 0x81C, 0x017C0003,
+ 0x81C, 0x017E0003,
+ 0x81C, 0xFF000813,
+ 0x81C, 0xFE020813,
+ 0x81C, 0xFD040813,
+ 0x81C, 0xFC060813,
+ 0x81C, 0xFB080813,
+ 0x81C, 0xFA0A0813,
+ 0x81C, 0xF90C0813,
+ 0x81C, 0xF80E0813,
+ 0x81C, 0xF7100813,
+ 0x81C, 0xF6120813,
+ 0x81C, 0xF5140813,
+ 0x81C, 0xF4160813,
+ 0x81C, 0xF3180813,
+ 0x81C, 0xF21A0813,
+ 0x81C, 0xF11C0813,
+ 0x81C, 0x941E0813,
+ 0x81C, 0x93200813,
+ 0x81C, 0x92220813,
+ 0x81C, 0x91240813,
+ 0x81C, 0x90260813,
+ 0x81C, 0x8F280813,
+ 0x81C, 0x8E2A0813,
+ 0x81C, 0x8D2C0813,
+ 0x81C, 0x8C2E0813,
+ 0x81C, 0x8B300813,
+ 0x81C, 0x8A320813,
+ 0x81C, 0x89340813,
+ 0x81C, 0x88360813,
+ 0x81C, 0x87380813,
+ 0x81C, 0x863A0813,
+ 0x81C, 0x853C0813,
+ 0x81C, 0x843E0813,
+ 0x81C, 0x83400813,
+ 0x81C, 0x82420813,
+ 0x81C, 0x81440813,
+ 0x81C, 0x07460813,
+ 0x81C, 0x06480813,
+ 0x81C, 0x054A0813,
+ 0x81C, 0x044C0813,
+ 0x81C, 0x034E0813,
+ 0x81C, 0x02500813,
+ 0x81C, 0x01520813,
+ 0x81C, 0x88540803,
+ 0x81C, 0x87560803,
+ 0x81C, 0x86580803,
+ 0x81C, 0x855A0803,
+ 0x81C, 0x845C0803,
+ 0x81C, 0x835E0803,
+ 0x81C, 0x82600803,
+ 0x81C, 0x81620803,
+ 0x81C, 0x07640803,
+ 0x81C, 0x06660803,
+ 0x81C, 0x05680803,
+ 0x81C, 0x046A0803,
+ 0x81C, 0x036C0803,
+ 0x81C, 0x026E0803,
+ 0x81C, 0x01700803,
+ 0x81C, 0x01720803,
+ 0x81C, 0x01740803,
+ 0x81C, 0x01760803,
+ 0x81C, 0x01780803,
+ 0x81C, 0x017A0803,
+ 0x81C, 0x017C0803,
+ 0x81C, 0x017E0803,
+ 0x90001005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000013,
+ 0x81C, 0xFE020013,
+ 0x81C, 0xFD040013,
+ 0x81C, 0xFC060013,
+ 0x81C, 0xFB080013,
+ 0x81C, 0xFA0A0013,
+ 0x81C, 0xF90C0013,
+ 0x81C, 0xF80E0013,
+ 0x81C, 0xF7100013,
+ 0x81C, 0xF6120013,
+ 0x81C, 0xF5140013,
+ 0x81C, 0xF4160013,
+ 0x81C, 0xF3180013,
+ 0x81C, 0xF21A0013,
+ 0x81C, 0xF11C0013,
+ 0x81C, 0xF01E0013,
+ 0x81C, 0xEF200013,
+ 0x81C, 0xEE220013,
+ 0x81C, 0xED240013,
+ 0x81C, 0xEC260013,
+ 0x81C, 0xEB280013,
+ 0x81C, 0xEA2A0013,
+ 0x81C, 0xE92C0013,
+ 0x81C, 0xE82E0013,
+ 0x81C, 0xE7300013,
+ 0x81C, 0x8B320013,
+ 0x81C, 0x8A340013,
+ 0x81C, 0x89360013,
+ 0x81C, 0x88380013,
+ 0x81C, 0x873A0013,
+ 0x81C, 0x863C0013,
+ 0x81C, 0x853E0013,
+ 0x81C, 0x84400013,
+ 0x81C, 0x83420013,
+ 0x81C, 0x82440013,
+ 0x81C, 0x81460013,
+ 0x81C, 0x08480013,
+ 0x81C, 0x074A0013,
+ 0x81C, 0x064C0013,
+ 0x81C, 0x054E0013,
+ 0x81C, 0x04500013,
+ 0x81C, 0x03520013,
+ 0x81C, 0x88540003,
+ 0x81C, 0x87560003,
+ 0x81C, 0x86580003,
+ 0x81C, 0x855A0003,
+ 0x81C, 0x845C0003,
+ 0x81C, 0x835E0003,
+ 0x81C, 0x82600003,
+ 0x81C, 0x81620003,
+ 0x81C, 0x07640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x01720003,
+ 0x81C, 0x01740003,
+ 0x81C, 0x01760003,
+ 0x81C, 0x01780003,
+ 0x81C, 0x017A0003,
+ 0x81C, 0x017C0003,
+ 0x81C, 0x017E0003,
+ 0x81C, 0xFF000813,
+ 0x81C, 0xFE020813,
+ 0x81C, 0xFD040813,
+ 0x81C, 0xFC060813,
+ 0x81C, 0xFB080813,
+ 0x81C, 0xFA0A0813,
+ 0x81C, 0xF90C0813,
+ 0x81C, 0xF80E0813,
+ 0x81C, 0xF7100813,
+ 0x81C, 0xF6120813,
+ 0x81C, 0xF5140813,
+ 0x81C, 0xF4160813,
+ 0x81C, 0xF3180813,
+ 0x81C, 0xF21A0813,
+ 0x81C, 0xF11C0813,
+ 0x81C, 0x941E0813,
+ 0x81C, 0x93200813,
+ 0x81C, 0x92220813,
+ 0x81C, 0x91240813,
+ 0x81C, 0x90260813,
+ 0x81C, 0x8F280813,
+ 0x81C, 0x8E2A0813,
+ 0x81C, 0x8D2C0813,
+ 0x81C, 0x8C2E0813,
+ 0x81C, 0x8B300813,
+ 0x81C, 0x8A320813,
+ 0x81C, 0x89340813,
+ 0x81C, 0x88360813,
+ 0x81C, 0x87380813,
+ 0x81C, 0x863A0813,
+ 0x81C, 0x853C0813,
+ 0x81C, 0x843E0813,
+ 0x81C, 0x83400813,
+ 0x81C, 0x82420813,
+ 0x81C, 0x81440813,
+ 0x81C, 0x07460813,
+ 0x81C, 0x06480813,
+ 0x81C, 0x054A0813,
+ 0x81C, 0x044C0813,
+ 0x81C, 0x034E0813,
+ 0x81C, 0x02500813,
+ 0x81C, 0x01520813,
+ 0x81C, 0x88540803,
+ 0x81C, 0x87560803,
+ 0x81C, 0x86580803,
+ 0x81C, 0x855A0803,
+ 0x81C, 0x845C0803,
+ 0x81C, 0x835E0803,
+ 0x81C, 0x82600803,
+ 0x81C, 0x81620803,
+ 0x81C, 0x07640803,
+ 0x81C, 0x06660803,
+ 0x81C, 0x05680803,
+ 0x81C, 0x046A0803,
+ 0x81C, 0x036C0803,
+ 0x81C, 0x026E0803,
+ 0x81C, 0x01700803,
+ 0x81C, 0x01720803,
+ 0x81C, 0x01740803,
+ 0x81C, 0x01760803,
+ 0x81C, 0x01780803,
+ 0x81C, 0x017A0803,
+ 0x81C, 0x017C0803,
+ 0x81C, 0x017E0803,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000013,
+ 0x81C, 0xFE020013,
+ 0x81C, 0xFD040013,
+ 0x81C, 0xFC060013,
+ 0x81C, 0xFB080013,
+ 0x81C, 0xFA0A0013,
+ 0x81C, 0xF90C0013,
+ 0x81C, 0xF80E0013,
+ 0x81C, 0xF7100013,
+ 0x81C, 0xF6120013,
+ 0x81C, 0xF5140013,
+ 0x81C, 0xF4160013,
+ 0x81C, 0xF3180013,
+ 0x81C, 0xF21A0013,
+ 0x81C, 0xF11C0013,
+ 0x81C, 0xF01E0013,
+ 0x81C, 0xEF200013,
+ 0x81C, 0xEE220013,
+ 0x81C, 0xED240013,
+ 0x81C, 0xEC260013,
+ 0x81C, 0xEB280013,
+ 0x81C, 0xEA2A0013,
+ 0x81C, 0xE92C0013,
+ 0x81C, 0xE82E0013,
+ 0x81C, 0xE7300013,
+ 0x81C, 0x8A320013,
+ 0x81C, 0x89340013,
+ 0x81C, 0x88360013,
+ 0x81C, 0x87380013,
+ 0x81C, 0x863A0013,
+ 0x81C, 0x853C0013,
+ 0x81C, 0x843E0013,
+ 0x81C, 0x83400013,
+ 0x81C, 0x82420013,
+ 0x81C, 0x81440013,
+ 0x81C, 0x07460013,
+ 0x81C, 0x06480013,
+ 0x81C, 0x054A0013,
+ 0x81C, 0x044C0013,
+ 0x81C, 0x034E0013,
+ 0x81C, 0x02500013,
+ 0x81C, 0x01520013,
+ 0x81C, 0x88540003,
+ 0x81C, 0x87560003,
+ 0x81C, 0x86580003,
+ 0x81C, 0x855A0003,
+ 0x81C, 0x845C0003,
+ 0x81C, 0x835E0003,
+ 0x81C, 0x82600003,
+ 0x81C, 0x81620003,
+ 0x81C, 0x07640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x01720003,
+ 0x81C, 0x01740003,
+ 0x81C, 0x01760003,
+ 0x81C, 0x01780003,
+ 0x81C, 0x017A0003,
+ 0x81C, 0x017C0003,
+ 0x81C, 0x017E0003,
+ 0x81C, 0xFF000813,
+ 0x81C, 0xFE020813,
+ 0x81C, 0xFD040813,
+ 0x81C, 0xFC060813,
+ 0x81C, 0xFB080813,
+ 0x81C, 0xFA0A0813,
+ 0x81C, 0xF90C0813,
+ 0x81C, 0xF80E0813,
+ 0x81C, 0xF7100813,
+ 0x81C, 0xF6120813,
+ 0x81C, 0xF5140813,
+ 0x81C, 0xF4160813,
+ 0x81C, 0xF3180813,
+ 0x81C, 0xF21A0813,
+ 0x81C, 0xF11C0813,
+ 0x81C, 0x961E0813,
+ 0x81C, 0x95200813,
+ 0x81C, 0x94220813,
+ 0x81C, 0x93240813,
+ 0x81C, 0x92260813,
+ 0x81C, 0x91280813,
+ 0x81C, 0x8F2A0813,
+ 0x81C, 0x8E2C0813,
+ 0x81C, 0x8D2E0813,
+ 0x81C, 0x8C300813,
+ 0x81C, 0x8B320813,
+ 0x81C, 0x8A340813,
+ 0x81C, 0x89360813,
+ 0x81C, 0x88380813,
+ 0x81C, 0x873A0813,
+ 0x81C, 0x863C0813,
+ 0x81C, 0x853E0813,
+ 0x81C, 0x84400813,
+ 0x81C, 0x83420813,
+ 0x81C, 0x82440813,
+ 0x81C, 0x08460813,
+ 0x81C, 0x07480813,
+ 0x81C, 0x064A0813,
+ 0x81C, 0x054C0813,
+ 0x81C, 0x044E0813,
+ 0x81C, 0x03500813,
+ 0x81C, 0x02520813,
+ 0x81C, 0x89540803,
+ 0x81C, 0x88560803,
+ 0x81C, 0x87580803,
+ 0x81C, 0x865A0803,
+ 0x81C, 0x855C0803,
+ 0x81C, 0x845E0803,
+ 0x81C, 0x83600803,
+ 0x81C, 0x82620803,
+ 0x81C, 0x07640803,
+ 0x81C, 0x06660803,
+ 0x81C, 0x05680803,
+ 0x81C, 0x046A0803,
+ 0x81C, 0x036C0803,
+ 0x81C, 0x026E0803,
+ 0x81C, 0x01700803,
+ 0x81C, 0x01720803,
+ 0x81C, 0x01740803,
+ 0x81C, 0x01760803,
+ 0x81C, 0x01780803,
+ 0x81C, 0x017A0803,
+ 0x81C, 0x017C0803,
+ 0x81C, 0x017E0803,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8821c_agc_btg_type2, rtw_phy_cfg_agc);
+
static const u32 rtw8821c_bb[] = {
0x800, 0x9020D010,
0x804, 0x80018180,
@@ -1394,7 +1787,11 @@ static const u32 rtw8821c_bb[] = {
0x8C0, 0xFFE04020,
0x8C4, 0x47C00000,
0x8C8, 0x00025165,
+ 0x82000400, 0x00000000, 0x40000000, 0x00000000,
+ 0x8CC, 0x08190492,
+ 0xA0000000, 0x00000000,
0x8CC, 0x08188492,
+ 0xB0000000, 0x00000000,
0x8D0, 0x0000B800,
0x8D4, 0x860308A0,
0x8D8, 0x290B5612,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h
index 5ea8b4fc7fba..cda98f5c4a01 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h
@@ -7,6 +7,7 @@
extern const struct rtw_table rtw8821c_mac_tbl;
extern const struct rtw_table rtw8821c_agc_tbl;
+extern const struct rtw_table rtw8821c_agc_btg_type2_tbl;
extern const struct rtw_table rtw8821c_bb_tbl;
extern const struct rtw_table rtw8821c_bb_pg_type0_tbl;
extern const struct rtw_table rtw8821c_rf_a_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 32b4771e04d0..bb2495b8609e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -164,8 +164,6 @@ const struct rtw_table name ## _tbl = { \
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
-#define REG_RRSR 0x0440
-#define BITS_RRSR_RSC (BIT(21) | BIT(22))
#define REG_TXDFIR0 0x808
#define REG_DFIRBW 0x810
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 3a204a7533df..ad5715c65de3 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -13,7 +13,72 @@ static const u32 rtw8822c_mac[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac);
static const u32 rtw8822c_agc[] = {
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FE,
+ 0x1D90, 0x300201FD,
+ 0x1D90, 0x300301FC,
+ 0x1D90, 0x300401FB,
+ 0x1D90, 0x300501FA,
+ 0x1D90, 0x300601F9,
+ 0x1D90, 0x300701F8,
+ 0x1D90, 0x300801F7,
+ 0x1D90, 0x300901F6,
+ 0x1D90, 0x300A01F5,
+ 0x1D90, 0x300B01F4,
+ 0x1D90, 0x300C01F3,
+ 0x1D90, 0x300D01F2,
+ 0x1D90, 0x300E01F1,
+ 0x1D90, 0x300F01F0,
+ 0x1D90, 0x301001EF,
+ 0x1D90, 0x301101EE,
+ 0x1D90, 0x301201ED,
+ 0x1D90, 0x301301EC,
+ 0x1D90, 0x301401EB,
+ 0x1D90, 0x301501EA,
+ 0x1D90, 0x301601E9,
+ 0x1D90, 0x301701E8,
+ 0x1D90, 0x301801E7,
+ 0x1D90, 0x301901E5,
+ 0x1D90, 0x301A01E4,
+ 0x1D90, 0x301B01C5,
+ 0x1D90, 0x301C01C4,
+ 0x1D90, 0x301D01C3,
+ 0x1D90, 0x301E01C2,
+ 0x1D90, 0x301F0188,
+ 0x1D90, 0x30200187,
+ 0x1D90, 0x30210186,
+ 0x1D90, 0x30220184,
+ 0x1D90, 0x30230183,
+ 0x1D90, 0x30240182,
+ 0x1D90, 0x30250181,
+ 0x1D90, 0x30260148,
+ 0x1D90, 0x30270147,
+ 0x1D90, 0x30280146,
+ 0x1D90, 0x30290144,
+ 0x1D90, 0x302A0143,
+ 0x1D90, 0x302B0142,
+ 0x1D90, 0x302C0141,
+ 0x1D90, 0x302D00C8,
+ 0x1D90, 0x302E00C7,
+ 0x1D90, 0x302F00C6,
+ 0x1D90, 0x303000C5,
+ 0x1D90, 0x303100C4,
+ 0x1D90, 0x303200C3,
+ 0x1D90, 0x30330048,
+ 0x1D90, 0x30340047,
+ 0x1D90, 0x30350046,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370025,
+ 0x1D90, 0x30380024,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0003,
+ 0x1D90, 0x303E0002,
+ 0x1D90, 0x303F0001,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x300001FF,
0x1D90, 0x300101FE,
0x1D90, 0x300201FD,
@@ -209,7 +274,72 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x303E0002,
0x1D90, 0x303F0001,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x3040011F,
+ 0x1D90, 0x3041011F,
+ 0x1D90, 0x3042011F,
+ 0x1D90, 0x3043011F,
+ 0x1D90, 0x3044011F,
+ 0x1D90, 0x3045011F,
+ 0x1D90, 0x3046011F,
+ 0x1D90, 0x3047011F,
+ 0x1D90, 0x3048011F,
+ 0x1D90, 0x3049011F,
+ 0x1D90, 0x304A011F,
+ 0x1D90, 0x304B011F,
+ 0x1D90, 0x304C011F,
+ 0x1D90, 0x304D011F,
+ 0x1D90, 0x304E011F,
+ 0x1D90, 0x304F00F4,
+ 0x1D90, 0x305000F3,
+ 0x1D90, 0x305100F2,
+ 0x1D90, 0x305200F1,
+ 0x1D90, 0x305300F0,
+ 0x1D90, 0x305400EF,
+ 0x1D90, 0x305500EE,
+ 0x1D90, 0x305600ED,
+ 0x1D90, 0x305700EC,
+ 0x1D90, 0x305800EB,
+ 0x1D90, 0x305900EA,
+ 0x1D90, 0x305A00E9,
+ 0x1D90, 0x305B00E8,
+ 0x1D90, 0x305C00E7,
+ 0x1D90, 0x305D00E6,
+ 0x1D90, 0x305E00E4,
+ 0x1D90, 0x305F00E3,
+ 0x1D90, 0x306000E2,
+ 0x1D90, 0x306100C4,
+ 0x1D90, 0x306200C3,
+ 0x1D90, 0x306300C2,
+ 0x1D90, 0x306400A4,
+ 0x1D90, 0x306500A3,
+ 0x1D90, 0x306600A2,
+ 0x1D90, 0x306700A1,
+ 0x1D90, 0x30680084,
+ 0x1D90, 0x30690083,
+ 0x1D90, 0x306A0082,
+ 0x1D90, 0x306B0081,
+ 0x1D90, 0x306C0080,
+ 0x1D90, 0x306D0067,
+ 0x1D90, 0x306E0066,
+ 0x1D90, 0x306F0065,
+ 0x1D90, 0x30700064,
+ 0x1D90, 0x30710063,
+ 0x1D90, 0x30720044,
+ 0x1D90, 0x30730043,
+ 0x1D90, 0x30740042,
+ 0x1D90, 0x30750041,
+ 0x1D90, 0x30760024,
+ 0x1D90, 0x30770023,
+ 0x1D90, 0x30780022,
+ 0x1D90, 0x30790021,
+ 0x1D90, 0x307A0020,
+ 0x1D90, 0x307B0004,
+ 0x1D90, 0x307C0003,
+ 0x1D90, 0x307D0002,
+ 0x1D90, 0x307E0001,
+ 0x1D90, 0x307F0000,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x304001FD,
0x1D90, 0x304101FC,
0x1D90, 0x304201FB,
@@ -405,7 +535,72 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x307E0001,
0x1D90, 0x307F0000,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x308000FF,
+ 0x1D90, 0x308100FF,
+ 0x1D90, 0x308200FF,
+ 0x1D90, 0x308300FF,
+ 0x1D90, 0x308400FF,
+ 0x1D90, 0x308500FF,
+ 0x1D90, 0x308600FE,
+ 0x1D90, 0x308700FD,
+ 0x1D90, 0x308800FC,
+ 0x1D90, 0x308900FB,
+ 0x1D90, 0x308A00FA,
+ 0x1D90, 0x308B00F9,
+ 0x1D90, 0x308C00F8,
+ 0x1D90, 0x308D00F7,
+ 0x1D90, 0x308E00F6,
+ 0x1D90, 0x308F00F5,
+ 0x1D90, 0x309000F4,
+ 0x1D90, 0x309100F3,
+ 0x1D90, 0x309200F2,
+ 0x1D90, 0x309300F1,
+ 0x1D90, 0x309400F0,
+ 0x1D90, 0x309500EF,
+ 0x1D90, 0x309600EE,
+ 0x1D90, 0x309700ED,
+ 0x1D90, 0x309800EC,
+ 0x1D90, 0x309900EB,
+ 0x1D90, 0x309A00EA,
+ 0x1D90, 0x309B00E8,
+ 0x1D90, 0x309C00E7,
+ 0x1D90, 0x309D00E6,
+ 0x1D90, 0x309E00E5,
+ 0x1D90, 0x309F00E4,
+ 0x1D90, 0x30A000C4,
+ 0x1D90, 0x30A100C3,
+ 0x1D90, 0x30A200C2,
+ 0x1D90, 0x30A300C1,
+ 0x1D90, 0x30A400A3,
+ 0x1D90, 0x30A500A2,
+ 0x1D90, 0x30A600A1,
+ 0x1D90, 0x30A70085,
+ 0x1D90, 0x30A80084,
+ 0x1D90, 0x30A90083,
+ 0x1D90, 0x30AA0082,
+ 0x1D90, 0x30AB0081,
+ 0x1D90, 0x30AC0067,
+ 0x1D90, 0x30AD0066,
+ 0x1D90, 0x30AE0065,
+ 0x1D90, 0x30AF0064,
+ 0x1D90, 0x30B00063,
+ 0x1D90, 0x30B10044,
+ 0x1D90, 0x30B20043,
+ 0x1D90, 0x30B30042,
+ 0x1D90, 0x30B40026,
+ 0x1D90, 0x30B50025,
+ 0x1D90, 0x30B60024,
+ 0x1D90, 0x30B70023,
+ 0x1D90, 0x30B80022,
+ 0x1D90, 0x30B90021,
+ 0x1D90, 0x30BA0005,
+ 0x1D90, 0x30BB0004,
+ 0x1D90, 0x30BC0003,
+ 0x1D90, 0x30BD0002,
+ 0x1D90, 0x30BE0001,
+ 0x1D90, 0x30BF0000,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x308000FA,
0x1D90, 0x308100F9,
0x1D90, 0x308200F8,
@@ -601,7 +796,72 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30BE0001,
0x1D90, 0x30BF0000,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x30C000FF,
+ 0x1D90, 0x30C100FF,
+ 0x1D90, 0x30C200FF,
+ 0x1D90, 0x30C300FF,
+ 0x1D90, 0x30C400FF,
+ 0x1D90, 0x30C500FF,
+ 0x1D90, 0x30C600FE,
+ 0x1D90, 0x30C700FD,
+ 0x1D90, 0x30C800FC,
+ 0x1D90, 0x30C900FB,
+ 0x1D90, 0x30CA00FA,
+ 0x1D90, 0x30CB00F9,
+ 0x1D90, 0x30CC00F8,
+ 0x1D90, 0x30CD00F7,
+ 0x1D90, 0x30CE00F6,
+ 0x1D90, 0x30CF00F5,
+ 0x1D90, 0x30D000F4,
+ 0x1D90, 0x30D100F3,
+ 0x1D90, 0x30D200F2,
+ 0x1D90, 0x30D300F1,
+ 0x1D90, 0x30D400F0,
+ 0x1D90, 0x30D500EF,
+ 0x1D90, 0x30D600EE,
+ 0x1D90, 0x30D700ED,
+ 0x1D90, 0x30D800EC,
+ 0x1D90, 0x30D900EB,
+ 0x1D90, 0x30DA00EA,
+ 0x1D90, 0x30DB00E8,
+ 0x1D90, 0x30DC00E7,
+ 0x1D90, 0x30DD00E6,
+ 0x1D90, 0x30DE00E5,
+ 0x1D90, 0x30DF00E4,
+ 0x1D90, 0x30E000E3,
+ 0x1D90, 0x30E100E2,
+ 0x1D90, 0x30E200A6,
+ 0x1D90, 0x30E300A5,
+ 0x1D90, 0x30E400A4,
+ 0x1D90, 0x30E500A3,
+ 0x1D90, 0x30E600A2,
+ 0x1D90, 0x30E70086,
+ 0x1D90, 0x30E80085,
+ 0x1D90, 0x30E90084,
+ 0x1D90, 0x30EA0083,
+ 0x1D90, 0x30EB0082,
+ 0x1D90, 0x30EC0067,
+ 0x1D90, 0x30ED0066,
+ 0x1D90, 0x30EE0065,
+ 0x1D90, 0x30EF0064,
+ 0x1D90, 0x30F00063,
+ 0x1D90, 0x30F10045,
+ 0x1D90, 0x30F20044,
+ 0x1D90, 0x30F30043,
+ 0x1D90, 0x30F40042,
+ 0x1D90, 0x30F50025,
+ 0x1D90, 0x30F60024,
+ 0x1D90, 0x30F70023,
+ 0x1D90, 0x30F80022,
+ 0x1D90, 0x30F90021,
+ 0x1D90, 0x30FA0005,
+ 0x1D90, 0x30FB0004,
+ 0x1D90, 0x30FC0003,
+ 0x1D90, 0x30FD0002,
+ 0x1D90, 0x30FE0001,
+ 0x1D90, 0x30FF0000,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x30C000F8,
0x1D90, 0x30C100F7,
0x1D90, 0x30C200F6,
@@ -797,267 +1057,397 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30FE0001,
0x1D90, 0x30FF0000,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FE,
+ 0x1D90, 0x310501FD,
+ 0x1D90, 0x310601FC,
+ 0x1D90, 0x310701FB,
+ 0x1D90, 0x310801FA,
+ 0x1D90, 0x310901F9,
+ 0x1D90, 0x310A01F8,
+ 0x1D90, 0x310B01F7,
+ 0x1D90, 0x310C01F6,
+ 0x1D90, 0x310D01F5,
+ 0x1D90, 0x310E01F4,
+ 0x1D90, 0x310F01F3,
+ 0x1D90, 0x311001F2,
+ 0x1D90, 0x311101F1,
+ 0x1D90, 0x311201F0,
+ 0x1D90, 0x311301EF,
+ 0x1D90, 0x311401EE,
+ 0x1D90, 0x311501ED,
+ 0x1D90, 0x311601EC,
+ 0x1D90, 0x311701EB,
+ 0x1D90, 0x311801EA,
+ 0x1D90, 0x311901E9,
+ 0x1D90, 0x311A01E8,
+ 0x1D90, 0x311B01E7,
+ 0x1D90, 0x311C01E5,
+ 0x1D90, 0x311D01E4,
+ 0x1D90, 0x311E01C5,
+ 0x1D90, 0x311F01C4,
+ 0x1D90, 0x312001C3,
+ 0x1D90, 0x312101C2,
+ 0x1D90, 0x31220188,
+ 0x1D90, 0x31230187,
+ 0x1D90, 0x31240186,
+ 0x1D90, 0x31250184,
+ 0x1D90, 0x31260183,
+ 0x1D90, 0x31270182,
+ 0x1D90, 0x31280181,
+ 0x1D90, 0x31290148,
+ 0x1D90, 0x312A0147,
+ 0x1D90, 0x312B0146,
+ 0x1D90, 0x312C0144,
+ 0x1D90, 0x312D0143,
+ 0x1D90, 0x312E0142,
+ 0x1D90, 0x312F0141,
+ 0x1D90, 0x313000C8,
+ 0x1D90, 0x313100C7,
+ 0x1D90, 0x313200C6,
+ 0x1D90, 0x313300C5,
+ 0x1D90, 0x313400C4,
+ 0x1D90, 0x313500C3,
+ 0x1D90, 0x31360048,
+ 0x1D90, 0x31370047,
+ 0x1D90, 0x31380046,
+ 0x1D90, 0x31390045,
+ 0x1D90, 0x313A0025,
+ 0x1D90, 0x313B0024,
+ 0x1D90, 0x313C0023,
+ 0x1D90, 0x313D0022,
+ 0x1D90, 0x313E0021,
+ 0x1D90, 0x313F0020,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x310001FF,
0x1D90, 0x310101FF,
0x1D90, 0x310201FF,
0x1D90, 0x310301FF,
- 0x1D90, 0x310401FF,
- 0x1D90, 0x310501FF,
- 0x1D90, 0x310601FF,
- 0x1D90, 0x310701FF,
- 0x1D90, 0x310801FF,
- 0x1D90, 0x310901FE,
- 0x1D90, 0x310A01FD,
- 0x1D90, 0x310B01FC,
- 0x1D90, 0x310C01FB,
- 0x1D90, 0x310D01FA,
- 0x1D90, 0x310E01F9,
- 0x1D90, 0x310F01F8,
- 0x1D90, 0x311001F7,
- 0x1D90, 0x311101F6,
- 0x1D90, 0x311201F5,
- 0x1D90, 0x311301F4,
- 0x1D90, 0x311401F3,
- 0x1D90, 0x311501F2,
- 0x1D90, 0x311601F1,
- 0x1D90, 0x311701F0,
- 0x1D90, 0x311801EF,
- 0x1D90, 0x311901EE,
- 0x1D90, 0x311A01ED,
- 0x1D90, 0x311B01EC,
- 0x1D90, 0x311C01EB,
- 0x1D90, 0x311D0192,
- 0x1D90, 0x311E0191,
- 0x1D90, 0x311F0190,
- 0x1D90, 0x3120018F,
- 0x1D90, 0x3121018E,
- 0x1D90, 0x3122018D,
- 0x1D90, 0x3123018C,
- 0x1D90, 0x3124018B,
- 0x1D90, 0x3125018A,
- 0x1D90, 0x31260189,
- 0x1D90, 0x31270188,
- 0x1D90, 0x31280187,
- 0x1D90, 0x31290186,
- 0x1D90, 0x312A0185,
- 0x1D90, 0x312B0149,
- 0x1D90, 0x312C0148,
- 0x1D90, 0x312D0147,
- 0x1D90, 0x312E0146,
- 0x1D90, 0x312F0145,
- 0x1D90, 0x31300144,
- 0x1D90, 0x31310143,
- 0x1D90, 0x31320142,
- 0x1D90, 0x31330141,
- 0x1D90, 0x31340140,
- 0x1D90, 0x313500C7,
- 0x1D90, 0x313600C6,
- 0x1D90, 0x313700C5,
- 0x1D90, 0x313800C4,
- 0x1D90, 0x313900C3,
- 0x1D90, 0x313A0088,
- 0x1D90, 0x313B0087,
- 0x1D90, 0x313C0086,
- 0x1D90, 0x313D0045,
- 0x1D90, 0x313E0044,
- 0x1D90, 0x313F0043,
+ 0x1D90, 0x310401FE,
+ 0x1D90, 0x310501FD,
+ 0x1D90, 0x310601FC,
+ 0x1D90, 0x310701FB,
+ 0x1D90, 0x310801FA,
+ 0x1D90, 0x310901F9,
+ 0x1D90, 0x310A01F8,
+ 0x1D90, 0x310B01F7,
+ 0x1D90, 0x310C01F6,
+ 0x1D90, 0x310D01F5,
+ 0x1D90, 0x310E01F4,
+ 0x1D90, 0x310F01F3,
+ 0x1D90, 0x311001F2,
+ 0x1D90, 0x311101F1,
+ 0x1D90, 0x311201F0,
+ 0x1D90, 0x311301EF,
+ 0x1D90, 0x311401EE,
+ 0x1D90, 0x311501ED,
+ 0x1D90, 0x311601EC,
+ 0x1D90, 0x311701EB,
+ 0x1D90, 0x311801EA,
+ 0x1D90, 0x311901E9,
+ 0x1D90, 0x311A01E8,
+ 0x1D90, 0x311B01E7,
+ 0x1D90, 0x311C01E5,
+ 0x1D90, 0x311D01E4,
+ 0x1D90, 0x311E01C5,
+ 0x1D90, 0x311F01C4,
+ 0x1D90, 0x312001C3,
+ 0x1D90, 0x312101C2,
+ 0x1D90, 0x31220188,
+ 0x1D90, 0x31230187,
+ 0x1D90, 0x31240186,
+ 0x1D90, 0x31250184,
+ 0x1D90, 0x31260183,
+ 0x1D90, 0x31270182,
+ 0x1D90, 0x31280181,
+ 0x1D90, 0x31290148,
+ 0x1D90, 0x312A0147,
+ 0x1D90, 0x312B0146,
+ 0x1D90, 0x312C0144,
+ 0x1D90, 0x312D0143,
+ 0x1D90, 0x312E0142,
+ 0x1D90, 0x312F0141,
+ 0x1D90, 0x313000C8,
+ 0x1D90, 0x313100C7,
+ 0x1D90, 0x313200C6,
+ 0x1D90, 0x313300C5,
+ 0x1D90, 0x313400C4,
+ 0x1D90, 0x313500C3,
+ 0x1D90, 0x31360048,
+ 0x1D90, 0x31370047,
+ 0x1D90, 0x31380046,
+ 0x1D90, 0x31390045,
+ 0x1D90, 0x313A0025,
+ 0x1D90, 0x313B0024,
+ 0x1D90, 0x313C0023,
+ 0x1D90, 0x313D0022,
+ 0x1D90, 0x313E0021,
+ 0x1D90, 0x313F0020,
0x90000016, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x310001FF,
0x1D90, 0x310101FF,
0x1D90, 0x310201FF,
0x1D90, 0x310301FF,
- 0x1D90, 0x310401FF,
- 0x1D90, 0x310501FF,
- 0x1D90, 0x310601FF,
- 0x1D90, 0x310701FF,
- 0x1D90, 0x310801FF,
- 0x1D90, 0x310901FE,
- 0x1D90, 0x310A01FD,
- 0x1D90, 0x310B01FC,
- 0x1D90, 0x310C01FB,
- 0x1D90, 0x310D01FA,
- 0x1D90, 0x310E01F9,
- 0x1D90, 0x310F01F8,
- 0x1D90, 0x311001F7,
- 0x1D90, 0x311101F6,
- 0x1D90, 0x311201F5,
- 0x1D90, 0x311301F4,
- 0x1D90, 0x311401F3,
- 0x1D90, 0x311501F2,
- 0x1D90, 0x311601F1,
- 0x1D90, 0x311701F0,
- 0x1D90, 0x311801EF,
- 0x1D90, 0x311901EE,
- 0x1D90, 0x311A01ED,
- 0x1D90, 0x311B01EC,
- 0x1D90, 0x311C01EB,
- 0x1D90, 0x311D0192,
- 0x1D90, 0x311E0191,
- 0x1D90, 0x311F0190,
- 0x1D90, 0x3120018F,
- 0x1D90, 0x3121018E,
- 0x1D90, 0x3122018D,
- 0x1D90, 0x3123018C,
- 0x1D90, 0x3124018B,
- 0x1D90, 0x3125018A,
- 0x1D90, 0x31260189,
- 0x1D90, 0x31270188,
- 0x1D90, 0x31280187,
- 0x1D90, 0x31290186,
- 0x1D90, 0x312A0185,
- 0x1D90, 0x312B0149,
- 0x1D90, 0x312C0148,
- 0x1D90, 0x312D0147,
- 0x1D90, 0x312E0146,
- 0x1D90, 0x312F0145,
- 0x1D90, 0x31300144,
- 0x1D90, 0x31310143,
- 0x1D90, 0x31320142,
- 0x1D90, 0x31330141,
- 0x1D90, 0x31340140,
- 0x1D90, 0x313500C7,
- 0x1D90, 0x313600C6,
- 0x1D90, 0x313700C5,
- 0x1D90, 0x313800C4,
- 0x1D90, 0x313900C3,
- 0x1D90, 0x313A0088,
- 0x1D90, 0x313B0087,
- 0x1D90, 0x313C0086,
- 0x1D90, 0x313D0045,
- 0x1D90, 0x313E0044,
- 0x1D90, 0x313F0043,
+ 0x1D90, 0x310401FE,
+ 0x1D90, 0x310501FD,
+ 0x1D90, 0x310601FC,
+ 0x1D90, 0x310701FB,
+ 0x1D90, 0x310801FA,
+ 0x1D90, 0x310901F9,
+ 0x1D90, 0x310A01F8,
+ 0x1D90, 0x310B01F7,
+ 0x1D90, 0x310C01F6,
+ 0x1D90, 0x310D01F5,
+ 0x1D90, 0x310E01F4,
+ 0x1D90, 0x310F01F3,
+ 0x1D90, 0x311001F2,
+ 0x1D90, 0x311101F1,
+ 0x1D90, 0x311201F0,
+ 0x1D90, 0x311301EF,
+ 0x1D90, 0x311401EE,
+ 0x1D90, 0x311501ED,
+ 0x1D90, 0x311601EC,
+ 0x1D90, 0x311701EB,
+ 0x1D90, 0x311801EA,
+ 0x1D90, 0x311901E9,
+ 0x1D90, 0x311A01E8,
+ 0x1D90, 0x311B01E7,
+ 0x1D90, 0x311C01E5,
+ 0x1D90, 0x311D01E4,
+ 0x1D90, 0x311E01C5,
+ 0x1D90, 0x311F01C4,
+ 0x1D90, 0x312001C3,
+ 0x1D90, 0x312101C2,
+ 0x1D90, 0x31220188,
+ 0x1D90, 0x31230187,
+ 0x1D90, 0x31240186,
+ 0x1D90, 0x31250184,
+ 0x1D90, 0x31260183,
+ 0x1D90, 0x31270182,
+ 0x1D90, 0x31280181,
+ 0x1D90, 0x31290148,
+ 0x1D90, 0x312A0147,
+ 0x1D90, 0x312B0146,
+ 0x1D90, 0x312C0144,
+ 0x1D90, 0x312D0143,
+ 0x1D90, 0x312E0142,
+ 0x1D90, 0x312F0141,
+ 0x1D90, 0x313000C8,
+ 0x1D90, 0x313100C7,
+ 0x1D90, 0x313200C6,
+ 0x1D90, 0x313300C5,
+ 0x1D90, 0x313400C4,
+ 0x1D90, 0x313500C3,
+ 0x1D90, 0x31360048,
+ 0x1D90, 0x31370047,
+ 0x1D90, 0x31380046,
+ 0x1D90, 0x31390045,
+ 0x1D90, 0x313A0025,
+ 0x1D90, 0x313B0024,
+ 0x1D90, 0x313C0023,
+ 0x1D90, 0x313D0022,
+ 0x1D90, 0x313E0021,
+ 0x1D90, 0x313F0020,
0xA0000000, 0x00000000,
0x1D90, 0x310001FF,
0x1D90, 0x310101FF,
0x1D90, 0x310201FF,
0x1D90, 0x310301FF,
- 0x1D90, 0x310401FF,
- 0x1D90, 0x310501FF,
- 0x1D90, 0x310601FF,
- 0x1D90, 0x310701FF,
- 0x1D90, 0x310801FF,
- 0x1D90, 0x310901FE,
- 0x1D90, 0x310A01FD,
- 0x1D90, 0x310B01FC,
- 0x1D90, 0x310C01FB,
- 0x1D90, 0x310D01FA,
- 0x1D90, 0x310E01F9,
- 0x1D90, 0x310F01F8,
- 0x1D90, 0x311001F7,
- 0x1D90, 0x311101F6,
- 0x1D90, 0x311201F5,
- 0x1D90, 0x311301F4,
- 0x1D90, 0x311401F3,
- 0x1D90, 0x311501F2,
- 0x1D90, 0x311601F1,
- 0x1D90, 0x311701F0,
- 0x1D90, 0x311801EF,
- 0x1D90, 0x311901EE,
- 0x1D90, 0x311A01ED,
- 0x1D90, 0x311B01EC,
- 0x1D90, 0x311C01EB,
- 0x1D90, 0x311D0192,
- 0x1D90, 0x311E0191,
- 0x1D90, 0x311F0190,
- 0x1D90, 0x3120018F,
- 0x1D90, 0x3121018E,
- 0x1D90, 0x3122018D,
- 0x1D90, 0x3123018C,
- 0x1D90, 0x3124018B,
- 0x1D90, 0x3125018A,
- 0x1D90, 0x31260189,
- 0x1D90, 0x31270188,
- 0x1D90, 0x31280187,
- 0x1D90, 0x31290186,
- 0x1D90, 0x312A0185,
- 0x1D90, 0x312B0149,
- 0x1D90, 0x312C0148,
- 0x1D90, 0x312D0147,
- 0x1D90, 0x312E0146,
- 0x1D90, 0x312F0145,
- 0x1D90, 0x31300144,
- 0x1D90, 0x31310143,
- 0x1D90, 0x31320142,
- 0x1D90, 0x31330141,
- 0x1D90, 0x31340140,
- 0x1D90, 0x313500C7,
- 0x1D90, 0x313600C6,
- 0x1D90, 0x313700C5,
- 0x1D90, 0x313800C4,
- 0x1D90, 0x313900C3,
- 0x1D90, 0x313A0088,
- 0x1D90, 0x313B0087,
- 0x1D90, 0x313C0086,
- 0x1D90, 0x313D0045,
- 0x1D90, 0x313E0044,
- 0x1D90, 0x313F0043,
+ 0x1D90, 0x310401FE,
+ 0x1D90, 0x310501FD,
+ 0x1D90, 0x310601FC,
+ 0x1D90, 0x310701FB,
+ 0x1D90, 0x310801FA,
+ 0x1D90, 0x310901F9,
+ 0x1D90, 0x310A01F8,
+ 0x1D90, 0x310B01F7,
+ 0x1D90, 0x310C01F6,
+ 0x1D90, 0x310D01F5,
+ 0x1D90, 0x310E01F4,
+ 0x1D90, 0x310F01F3,
+ 0x1D90, 0x311001F2,
+ 0x1D90, 0x311101F1,
+ 0x1D90, 0x311201F0,
+ 0x1D90, 0x311301EF,
+ 0x1D90, 0x311401EE,
+ 0x1D90, 0x311501ED,
+ 0x1D90, 0x311601EC,
+ 0x1D90, 0x311701EB,
+ 0x1D90, 0x311801EA,
+ 0x1D90, 0x311901E9,
+ 0x1D90, 0x311A01E8,
+ 0x1D90, 0x311B01E7,
+ 0x1D90, 0x311C01E5,
+ 0x1D90, 0x311D01E4,
+ 0x1D90, 0x311E01C5,
+ 0x1D90, 0x311F01C4,
+ 0x1D90, 0x312001C3,
+ 0x1D90, 0x312101C2,
+ 0x1D90, 0x31220188,
+ 0x1D90, 0x31230187,
+ 0x1D90, 0x31240186,
+ 0x1D90, 0x31250184,
+ 0x1D90, 0x31260183,
+ 0x1D90, 0x31270182,
+ 0x1D90, 0x31280181,
+ 0x1D90, 0x31290148,
+ 0x1D90, 0x312A0147,
+ 0x1D90, 0x312B0146,
+ 0x1D90, 0x312C0144,
+ 0x1D90, 0x312D0143,
+ 0x1D90, 0x312E0142,
+ 0x1D90, 0x312F0141,
+ 0x1D90, 0x313000C8,
+ 0x1D90, 0x313100C7,
+ 0x1D90, 0x313200C6,
+ 0x1D90, 0x313300C5,
+ 0x1D90, 0x313400C4,
+ 0x1D90, 0x313500C3,
+ 0x1D90, 0x31360048,
+ 0x1D90, 0x31370047,
+ 0x1D90, 0x31380046,
+ 0x1D90, 0x31390045,
+ 0x1D90, 0x313A0025,
+ 0x1D90, 0x313B0024,
+ 0x1D90, 0x313C0023,
+ 0x1D90, 0x313D0022,
+ 0x1D90, 0x313E0021,
+ 0x1D90, 0x313F0020,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x314001FF,
0x1D90, 0x314101FF,
0x1D90, 0x314201FF,
0x1D90, 0x314301FF,
0x1D90, 0x314401FF,
0x1D90, 0x314501FF,
- 0x1D90, 0x314601FF,
- 0x1D90, 0x314701FE,
- 0x1D90, 0x314801FD,
- 0x1D90, 0x314901FC,
- 0x1D90, 0x314A01FB,
- 0x1D90, 0x314B01FA,
- 0x1D90, 0x314C01F9,
- 0x1D90, 0x314D01F8,
- 0x1D90, 0x314E01F7,
- 0x1D90, 0x314F01F6,
- 0x1D90, 0x315001F5,
- 0x1D90, 0x315101F4,
- 0x1D90, 0x315201F3,
- 0x1D90, 0x315301F2,
- 0x1D90, 0x315401F1,
- 0x1D90, 0x315501F0,
- 0x1D90, 0x315601EF,
- 0x1D90, 0x315701EE,
- 0x1D90, 0x315801ED,
- 0x1D90, 0x315901EC,
- 0x1D90, 0x315A01EB,
- 0x1D90, 0x315B01EA,
- 0x1D90, 0x315C01E9,
- 0x1D90, 0x315D018F,
- 0x1D90, 0x315E018E,
- 0x1D90, 0x315F018D,
- 0x1D90, 0x3160018C,
- 0x1D90, 0x3161018B,
- 0x1D90, 0x3162018A,
- 0x1D90, 0x31630189,
- 0x1D90, 0x31640188,
- 0x1D90, 0x31650187,
- 0x1D90, 0x31660186,
- 0x1D90, 0x31670185,
- 0x1D90, 0x31680184,
- 0x1D90, 0x31690183,
- 0x1D90, 0x316A0182,
- 0x1D90, 0x316B0149,
- 0x1D90, 0x316C0148,
- 0x1D90, 0x316D0147,
- 0x1D90, 0x316E0146,
- 0x1D90, 0x316F0145,
- 0x1D90, 0x31700144,
- 0x1D90, 0x31710143,
- 0x1D90, 0x31720142,
- 0x1D90, 0x31730141,
- 0x1D90, 0x31740140,
- 0x1D90, 0x317500C7,
- 0x1D90, 0x317600C6,
- 0x1D90, 0x317700C5,
- 0x1D90, 0x317800C4,
- 0x1D90, 0x317900C3,
- 0x1D90, 0x317A0088,
- 0x1D90, 0x317B0087,
- 0x1D90, 0x317C0086,
- 0x1D90, 0x317D0045,
- 0x1D90, 0x317E0044,
- 0x1D90, 0x317F0043,
+ 0x1D90, 0x314601FE,
+ 0x1D90, 0x314701FD,
+ 0x1D90, 0x314801FC,
+ 0x1D90, 0x314901FB,
+ 0x1D90, 0x314A01FA,
+ 0x1D90, 0x314B01F9,
+ 0x1D90, 0x314C01F8,
+ 0x1D90, 0x314D01F7,
+ 0x1D90, 0x314E01F6,
+ 0x1D90, 0x314F01F5,
+ 0x1D90, 0x315001F4,
+ 0x1D90, 0x315101F3,
+ 0x1D90, 0x315201F2,
+ 0x1D90, 0x315301F1,
+ 0x1D90, 0x315401F0,
+ 0x1D90, 0x315501EF,
+ 0x1D90, 0x315601EE,
+ 0x1D90, 0x315701ED,
+ 0x1D90, 0x315801EC,
+ 0x1D90, 0x315901EB,
+ 0x1D90, 0x315A01EA,
+ 0x1D90, 0x315B01E9,
+ 0x1D90, 0x315C01E7,
+ 0x1D90, 0x315D01E6,
+ 0x1D90, 0x315E01E5,
+ 0x1D90, 0x315F01E4,
+ 0x1D90, 0x316001A8,
+ 0x1D90, 0x316101A7,
+ 0x1D90, 0x316201A6,
+ 0x1D90, 0x316301A5,
+ 0x1D90, 0x31640185,
+ 0x1D90, 0x31650184,
+ 0x1D90, 0x31660183,
+ 0x1D90, 0x31670182,
+ 0x1D90, 0x31680149,
+ 0x1D90, 0x31690148,
+ 0x1D90, 0x316A0147,
+ 0x1D90, 0x316B0145,
+ 0x1D90, 0x316C0144,
+ 0x1D90, 0x316D0143,
+ 0x1D90, 0x316E0142,
+ 0x1D90, 0x316F00E6,
+ 0x1D90, 0x317000E5,
+ 0x1D90, 0x317100C9,
+ 0x1D90, 0x317200C8,
+ 0x1D90, 0x317300C7,
+ 0x1D90, 0x317400C6,
+ 0x1D90, 0x317500C5,
+ 0x1D90, 0x317600C4,
+ 0x1D90, 0x317700C3,
+ 0x1D90, 0x31780088,
+ 0x1D90, 0x31790087,
+ 0x1D90, 0x317A0086,
+ 0x1D90, 0x317B0085,
+ 0x1D90, 0x317C0026,
+ 0x1D90, 0x317D0025,
+ 0x1D90, 0x317E0024,
+ 0x1D90, 0x317F0023,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FE,
+ 0x1D90, 0x314701FD,
+ 0x1D90, 0x314801FC,
+ 0x1D90, 0x314901FB,
+ 0x1D90, 0x314A01FA,
+ 0x1D90, 0x314B01F9,
+ 0x1D90, 0x314C01F8,
+ 0x1D90, 0x314D01F7,
+ 0x1D90, 0x314E01F6,
+ 0x1D90, 0x314F01F5,
+ 0x1D90, 0x315001F4,
+ 0x1D90, 0x315101F3,
+ 0x1D90, 0x315201F2,
+ 0x1D90, 0x315301F1,
+ 0x1D90, 0x315401F0,
+ 0x1D90, 0x315501EF,
+ 0x1D90, 0x315601EE,
+ 0x1D90, 0x315701ED,
+ 0x1D90, 0x315801EC,
+ 0x1D90, 0x315901EB,
+ 0x1D90, 0x315A01EA,
+ 0x1D90, 0x315B01E9,
+ 0x1D90, 0x315C01E7,
+ 0x1D90, 0x315D01E6,
+ 0x1D90, 0x315E01E5,
+ 0x1D90, 0x315F01E4,
+ 0x1D90, 0x316001A8,
+ 0x1D90, 0x316101A7,
+ 0x1D90, 0x316201A6,
+ 0x1D90, 0x316301A5,
+ 0x1D90, 0x31640185,
+ 0x1D90, 0x31650184,
+ 0x1D90, 0x31660183,
+ 0x1D90, 0x31670182,
+ 0x1D90, 0x31680149,
+ 0x1D90, 0x31690148,
+ 0x1D90, 0x316A0147,
+ 0x1D90, 0x316B0145,
+ 0x1D90, 0x316C0144,
+ 0x1D90, 0x316D0143,
+ 0x1D90, 0x316E0142,
+ 0x1D90, 0x316F00E6,
+ 0x1D90, 0x317000E5,
+ 0x1D90, 0x317100C9,
+ 0x1D90, 0x317200C8,
+ 0x1D90, 0x317300C7,
+ 0x1D90, 0x317400C6,
+ 0x1D90, 0x317500C5,
+ 0x1D90, 0x317600C4,
+ 0x1D90, 0x317700C3,
+ 0x1D90, 0x31780088,
+ 0x1D90, 0x31790087,
+ 0x1D90, 0x317A0086,
+ 0x1D90, 0x317B0085,
+ 0x1D90, 0x317C0026,
+ 0x1D90, 0x317D0025,
+ 0x1D90, 0x317E0024,
+ 0x1D90, 0x317F0023,
0x90000016, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x314001FF,
0x1D90, 0x314101FF,
@@ -1065,64 +1455,64 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x314301FF,
0x1D90, 0x314401FF,
0x1D90, 0x314501FF,
- 0x1D90, 0x314601FF,
- 0x1D90, 0x314701FE,
- 0x1D90, 0x314801FD,
- 0x1D90, 0x314901FC,
- 0x1D90, 0x314A01FB,
- 0x1D90, 0x314B01FA,
- 0x1D90, 0x314C01F9,
- 0x1D90, 0x314D01F8,
- 0x1D90, 0x314E01F7,
- 0x1D90, 0x314F01F6,
- 0x1D90, 0x315001F5,
- 0x1D90, 0x315101F4,
- 0x1D90, 0x315201F3,
- 0x1D90, 0x315301F2,
- 0x1D90, 0x315401F1,
- 0x1D90, 0x315501F0,
- 0x1D90, 0x315601EF,
- 0x1D90, 0x315701EE,
- 0x1D90, 0x315801ED,
- 0x1D90, 0x315901EC,
- 0x1D90, 0x315A01EB,
- 0x1D90, 0x315B01EA,
- 0x1D90, 0x315C01E9,
- 0x1D90, 0x315D018F,
- 0x1D90, 0x315E018E,
- 0x1D90, 0x315F018D,
- 0x1D90, 0x3160018C,
- 0x1D90, 0x3161018B,
- 0x1D90, 0x3162018A,
- 0x1D90, 0x31630189,
- 0x1D90, 0x31640188,
- 0x1D90, 0x31650187,
- 0x1D90, 0x31660186,
- 0x1D90, 0x31670185,
- 0x1D90, 0x31680184,
- 0x1D90, 0x31690183,
- 0x1D90, 0x316A0182,
- 0x1D90, 0x316B0149,
- 0x1D90, 0x316C0148,
- 0x1D90, 0x316D0147,
- 0x1D90, 0x316E0146,
- 0x1D90, 0x316F0145,
- 0x1D90, 0x31700144,
- 0x1D90, 0x31710143,
- 0x1D90, 0x31720142,
- 0x1D90, 0x31730141,
- 0x1D90, 0x31740140,
- 0x1D90, 0x317500C7,
- 0x1D90, 0x317600C6,
- 0x1D90, 0x317700C5,
- 0x1D90, 0x317800C4,
- 0x1D90, 0x317900C3,
- 0x1D90, 0x317A0088,
- 0x1D90, 0x317B0087,
- 0x1D90, 0x317C0086,
- 0x1D90, 0x317D0045,
- 0x1D90, 0x317E0044,
- 0x1D90, 0x317F0043,
+ 0x1D90, 0x314601FE,
+ 0x1D90, 0x314701FD,
+ 0x1D90, 0x314801FC,
+ 0x1D90, 0x314901FB,
+ 0x1D90, 0x314A01FA,
+ 0x1D90, 0x314B01F9,
+ 0x1D90, 0x314C01F8,
+ 0x1D90, 0x314D01F7,
+ 0x1D90, 0x314E01F6,
+ 0x1D90, 0x314F01F5,
+ 0x1D90, 0x315001F4,
+ 0x1D90, 0x315101F3,
+ 0x1D90, 0x315201F2,
+ 0x1D90, 0x315301F1,
+ 0x1D90, 0x315401F0,
+ 0x1D90, 0x315501EF,
+ 0x1D90, 0x315601EE,
+ 0x1D90, 0x315701ED,
+ 0x1D90, 0x315801EC,
+ 0x1D90, 0x315901EB,
+ 0x1D90, 0x315A01EA,
+ 0x1D90, 0x315B01E9,
+ 0x1D90, 0x315C01E7,
+ 0x1D90, 0x315D01E6,
+ 0x1D90, 0x315E01E5,
+ 0x1D90, 0x315F01E4,
+ 0x1D90, 0x316001A8,
+ 0x1D90, 0x316101A7,
+ 0x1D90, 0x316201A6,
+ 0x1D90, 0x316301A5,
+ 0x1D90, 0x31640185,
+ 0x1D90, 0x31650184,
+ 0x1D90, 0x31660183,
+ 0x1D90, 0x31670182,
+ 0x1D90, 0x31680149,
+ 0x1D90, 0x31690148,
+ 0x1D90, 0x316A0147,
+ 0x1D90, 0x316B0145,
+ 0x1D90, 0x316C0144,
+ 0x1D90, 0x316D0143,
+ 0x1D90, 0x316E0142,
+ 0x1D90, 0x316F00E6,
+ 0x1D90, 0x317000E5,
+ 0x1D90, 0x317100C9,
+ 0x1D90, 0x317200C8,
+ 0x1D90, 0x317300C7,
+ 0x1D90, 0x317400C6,
+ 0x1D90, 0x317500C5,
+ 0x1D90, 0x317600C4,
+ 0x1D90, 0x317700C3,
+ 0x1D90, 0x31780088,
+ 0x1D90, 0x31790087,
+ 0x1D90, 0x317A0086,
+ 0x1D90, 0x317B0085,
+ 0x1D90, 0x317C0026,
+ 0x1D90, 0x317D0025,
+ 0x1D90, 0x317E0024,
+ 0x1D90, 0x317F0023,
0xA0000000, 0x00000000,
0x1D90, 0x314001FF,
0x1D90, 0x314101FF,
@@ -1130,66 +1520,131 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x314301FF,
0x1D90, 0x314401FF,
0x1D90, 0x314501FF,
- 0x1D90, 0x314601FF,
- 0x1D90, 0x314701FE,
- 0x1D90, 0x314801FD,
- 0x1D90, 0x314901FC,
- 0x1D90, 0x314A01FB,
- 0x1D90, 0x314B01FA,
- 0x1D90, 0x314C01F9,
- 0x1D90, 0x314D01F8,
- 0x1D90, 0x314E01F7,
- 0x1D90, 0x314F01F6,
- 0x1D90, 0x315001F5,
- 0x1D90, 0x315101F4,
- 0x1D90, 0x315201F3,
- 0x1D90, 0x315301F2,
- 0x1D90, 0x315401F1,
- 0x1D90, 0x315501F0,
- 0x1D90, 0x315601EF,
- 0x1D90, 0x315701EE,
- 0x1D90, 0x315801ED,
- 0x1D90, 0x315901EC,
- 0x1D90, 0x315A01EB,
- 0x1D90, 0x315B01EA,
- 0x1D90, 0x315C01E9,
- 0x1D90, 0x315D018F,
- 0x1D90, 0x315E018E,
- 0x1D90, 0x315F018D,
- 0x1D90, 0x3160018C,
- 0x1D90, 0x3161018B,
- 0x1D90, 0x3162018A,
- 0x1D90, 0x31630189,
- 0x1D90, 0x31640188,
- 0x1D90, 0x31650187,
- 0x1D90, 0x31660186,
- 0x1D90, 0x31670185,
- 0x1D90, 0x31680184,
- 0x1D90, 0x31690183,
- 0x1D90, 0x316A0182,
- 0x1D90, 0x316B0149,
- 0x1D90, 0x316C0148,
- 0x1D90, 0x316D0147,
- 0x1D90, 0x316E0146,
- 0x1D90, 0x316F0145,
- 0x1D90, 0x31700144,
- 0x1D90, 0x31710143,
- 0x1D90, 0x31720142,
- 0x1D90, 0x31730141,
- 0x1D90, 0x31740140,
- 0x1D90, 0x317500C7,
- 0x1D90, 0x317600C6,
- 0x1D90, 0x317700C5,
- 0x1D90, 0x317800C4,
- 0x1D90, 0x317900C3,
- 0x1D90, 0x317A0088,
- 0x1D90, 0x317B0087,
- 0x1D90, 0x317C0086,
- 0x1D90, 0x317D0045,
- 0x1D90, 0x317E0044,
- 0x1D90, 0x317F0043,
+ 0x1D90, 0x314601FE,
+ 0x1D90, 0x314701FD,
+ 0x1D90, 0x314801FC,
+ 0x1D90, 0x314901FB,
+ 0x1D90, 0x314A01FA,
+ 0x1D90, 0x314B01F9,
+ 0x1D90, 0x314C01F8,
+ 0x1D90, 0x314D01F7,
+ 0x1D90, 0x314E01F6,
+ 0x1D90, 0x314F01F5,
+ 0x1D90, 0x315001F4,
+ 0x1D90, 0x315101F3,
+ 0x1D90, 0x315201F2,
+ 0x1D90, 0x315301F1,
+ 0x1D90, 0x315401F0,
+ 0x1D90, 0x315501EF,
+ 0x1D90, 0x315601EE,
+ 0x1D90, 0x315701ED,
+ 0x1D90, 0x315801EC,
+ 0x1D90, 0x315901EB,
+ 0x1D90, 0x315A01EA,
+ 0x1D90, 0x315B01E9,
+ 0x1D90, 0x315C01E7,
+ 0x1D90, 0x315D01E6,
+ 0x1D90, 0x315E01E5,
+ 0x1D90, 0x315F01E4,
+ 0x1D90, 0x316001A8,
+ 0x1D90, 0x316101A7,
+ 0x1D90, 0x316201A6,
+ 0x1D90, 0x316301A5,
+ 0x1D90, 0x31640185,
+ 0x1D90, 0x31650184,
+ 0x1D90, 0x31660183,
+ 0x1D90, 0x31670182,
+ 0x1D90, 0x31680149,
+ 0x1D90, 0x31690148,
+ 0x1D90, 0x316A0147,
+ 0x1D90, 0x316B0145,
+ 0x1D90, 0x316C0144,
+ 0x1D90, 0x316D0143,
+ 0x1D90, 0x316E0142,
+ 0x1D90, 0x316F00E6,
+ 0x1D90, 0x317000E5,
+ 0x1D90, 0x317100C9,
+ 0x1D90, 0x317200C8,
+ 0x1D90, 0x317300C7,
+ 0x1D90, 0x317400C6,
+ 0x1D90, 0x317500C5,
+ 0x1D90, 0x317600C4,
+ 0x1D90, 0x317700C3,
+ 0x1D90, 0x31780088,
+ 0x1D90, 0x31790087,
+ 0x1D90, 0x317A0086,
+ 0x1D90, 0x317B0085,
+ 0x1D90, 0x317C0026,
+ 0x1D90, 0x317D0025,
+ 0x1D90, 0x317E0024,
+ 0x1D90, 0x317F0023,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x319601E7,
+ 0x1D90, 0x319701E6,
+ 0x1D90, 0x319801E5,
+ 0x1D90, 0x319901E4,
+ 0x1D90, 0x319A01A8,
+ 0x1D90, 0x319B01A7,
+ 0x1D90, 0x319C01A6,
+ 0x1D90, 0x319D01A5,
+ 0x1D90, 0x319E0185,
+ 0x1D90, 0x319F0184,
+ 0x1D90, 0x31A00183,
+ 0x1D90, 0x31A10182,
+ 0x1D90, 0x31A20149,
+ 0x1D90, 0x31A30148,
+ 0x1D90, 0x31A40147,
+ 0x1D90, 0x31A50145,
+ 0x1D90, 0x31A60144,
+ 0x1D90, 0x31A70143,
+ 0x1D90, 0x31A80142,
+ 0x1D90, 0x31A900E6,
+ 0x1D90, 0x31AA00E5,
+ 0x1D90, 0x31AB00C9,
+ 0x1D90, 0x31AC00C8,
+ 0x1D90, 0x31AD00C7,
+ 0x1D90, 0x31AE00C6,
+ 0x1D90, 0x31AF00C5,
+ 0x1D90, 0x31B000C4,
+ 0x1D90, 0x31B100C3,
+ 0x1D90, 0x31B20088,
+ 0x1D90, 0x31B30087,
+ 0x1D90, 0x31B40086,
+ 0x1D90, 0x31B50085,
+ 0x1D90, 0x31B60026,
+ 0x1D90, 0x31B70025,
+ 0x1D90, 0x31B80024,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0003,
+ 0x1D90, 0x31BE0002,
+ 0x1D90, 0x31BF0001,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x318001FE,
0x1D90, 0x318101FD,
0x1D90, 0x318201FC,
@@ -1385,7 +1840,10 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x31BE0002,
0x1D90, 0x31BF0001,
0xB0000000, 0x00000000,
- 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+ 0x90000015, 0x00000000, 0x40000000, 0x00000000,
0x1D70, 0x22222222,
0x1D70, 0x20202020,
0x90000016, 0x00000000, 0x40000000, 0x00000000,
@@ -1793,7 +2251,9 @@ static const u32 rtw8822c_bb[] = {
0x1828, 0x000004FD,
0x182C, 0x00000000,
0x1834, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x1838, 0x20100000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x1838, 0x20100000,
@@ -1801,11 +2261,17 @@ static const u32 rtw8822c_bb[] = {
0x1838, 0x20100000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x1838, 0x20100000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
0xA0000000, 0x00000000,
0x1838, 0x20000000,
0xB0000000, 0x00000000,
0x183C, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x1840, 0x00002300,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x1840, 0x00002300,
@@ -1813,6 +2279,10 @@ static const u32 rtw8822c_bb[] = {
0x1840, 0x00002300,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x1840, 0x00002300,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
0xA0000000, 0x00000000,
0x1840, 0x00000000,
0xB0000000, 0x00000000,
@@ -1826,7 +2296,9 @@ static const u32 rtw8822c_bb[] = {
0x1860, 0xF0040FF8,
0x1864, 0x7F000000,
0x1868, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF00,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x186C, 0x0000FF02,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x186C, 0x0000FF02,
@@ -1834,6 +2306,10 @@ static const u32 rtw8822c_bb[] = {
0x186C, 0x0000FF02,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x186C, 0x0000FF02,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
0xA0000000, 0x00000000,
0x186C, 0x0000FF00,
0xB0000000, 0x00000000,
@@ -1842,7 +2318,9 @@ static const u32 rtw8822c_bb[] = {
0x1878, 0x00000000,
0x187C, 0x00000000,
0x1880, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x02B00000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x1884, 0x03B00000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x1884, 0x03B00000,
@@ -1850,6 +2328,10 @@ static const u32 rtw8822c_bb[] = {
0x1884, 0x03B00000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x1884, 0x03B00000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
0xA0000000, 0x00000000,
0x1884, 0x02B00000,
0xB0000000, 0x00000000,
@@ -1982,7 +2464,10 @@ static const u32 rtw8822c_bb[] = {
0x1C84, 0x245120D4,
0x1C88, 0xC8400483,
0x1C8C, 0x40005A20,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000000,
+ 0x1C98, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x1C94, 0x00000B0E,
0x1C98, 0x00450000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -1994,6 +2479,12 @@ static const u32 rtw8822c_bb[] = {
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x1C94, 0x00000B0E,
0x1C98, 0x00450000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
0xA0000000, 0x00000000,
0x1C94, 0x00000000,
0x1C98, 0x00000000,
@@ -2330,7 +2821,9 @@ static const u32 rtw8822c_bb[] = {
0x4128, 0x000004FD,
0x412C, 0x00000000,
0x4134, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x4138, 0x20100000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x4138, 0x20100000,
@@ -2338,12 +2831,18 @@ static const u32 rtw8822c_bb[] = {
0x4138, 0x20100000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x4138, 0x20100000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
0xA0000000, 0x00000000,
0x4138, 0x20000000,
0xB0000000, 0x00000000,
0x413C, 0x00000000,
0x4140, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x4144, 0x00002030,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x4144, 0x00002030,
@@ -2351,6 +2850,10 @@ static const u32 rtw8822c_bb[] = {
0x4144, 0x00002030,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x4144, 0x00002030,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
0xA0000000, 0x00000000,
0x4144, 0x00000000,
0xB0000000, 0x00000000,
@@ -2363,7 +2866,9 @@ static const u32 rtw8822c_bb[] = {
0x4160, 0xF0040FF8,
0x4164, 0x7F000000,
0x4168, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x416C, 0x00008002,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x416C, 0x00008002,
@@ -2371,6 +2876,10 @@ static const u32 rtw8822c_bb[] = {
0x416C, 0x00008002,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x416C, 0x00008002,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
0xA0000000, 0x00000000,
0x416C, 0x00008000,
0xB0000000, 0x00000000,
@@ -2379,7 +2888,9 @@ static const u32 rtw8822c_bb[] = {
0x4178, 0x00000000,
0x417C, 0x00000000,
0x4180, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x02B00000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x4184, 0x03B00000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x4184, 0x03B00000,
@@ -2387,6 +2898,10 @@ static const u32 rtw8822c_bb[] = {
0x4184, 0x03B00000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x4184, 0x03B00000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
0xA0000000, 0x00000000,
0x4184, 0x02B00000,
0xB0000000, 0x00000000,
@@ -2843,7 +3358,11 @@ static const u32 rtw8822c_rf_a[] = {
0x018, 0x00013124,
0x093, 0x0008483F,
0x0DE, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000B9140,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000B9140,
@@ -2861,6 +3380,8 @@ static const u32 rtw8822c_rf_a[] = {
0x08E, 0x000A5540,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -2875,17 +3396,39 @@ static const u32 rtw8822c_rf_a[] = {
0x08E, 0x000A5540,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
0x081, 0x0000FC01,
0x081, 0x0002FC01,
0x081, 0x0003FC01,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
@@ -2903,6 +3446,8 @@ static const u32 rtw8822c_rf_a[] = {
0x085, 0x0006A06C,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -2917,14 +3462,50 @@ static const u32 rtw8822c_rf_a[] = {
0x085, 0x0006A06C,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
0xA0000000, 0x00000000,
0x085, 0x0006A06C,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
0x03F, 0x0000002A,
@@ -3005,6 +3586,15 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -3068,6 +3658,15 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -3086,6 +3685,78 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -3096,7 +3767,59 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x0000003F,
0x0EE, 0x00000000,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
0x03F, 0x000773C0,
@@ -3330,6 +4053,32 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -3512,6 +4261,32 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -3564,6 +4339,214 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -3593,7 +4576,57 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000003,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
0x033, 0x0000001E,
@@ -3818,6 +4851,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
@@ -3993,6 +5051,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
@@ -4043,6 +5126,206 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -4071,7 +5354,57 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000013,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
0x033, 0x0000002E,
@@ -4296,6 +5629,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
@@ -4471,6 +5829,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
@@ -4521,6 +5904,206 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -4549,7 +6132,57 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000023,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
0x033, 0x0000003E,
@@ -4774,6 +6407,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
@@ -4949,6 +6607,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
@@ -4999,6 +6682,206 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -5027,7 +6910,57 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000033,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
0x033, 0x0000004E,
@@ -5252,6 +7185,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
@@ -5427,6 +7385,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
@@ -5477,6 +7460,206 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -5505,7 +7688,57 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000043,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
0x033, 0x0000005E,
@@ -5730,6 +7963,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
@@ -5905,6 +8163,31 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
@@ -5955,6 +8238,206 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -5983,7 +8466,11 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x033, 0x00000053,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
@@ -6001,6 +8488,8 @@ static const u32 rtw8822c_rf_a[] = {
0x0EF, 0x00000000,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -6015,10 +8504,28 @@ static const u32 rtw8822c_rf_a[] = {
0x0EF, 0x00000000,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -6036,7 +8543,301 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x00000000,
0x0EF, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -7359,6 +10160,153 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -8388,6 +11336,153 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -8682,6 +11777,1182 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -8834,7 +13105,11 @@ static const u32 rtw8822c_rf_a[] = {
0x01B, 0x00003A40,
0x061, 0x0000D233,
0x062, 0x0004D232,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
@@ -8852,6 +13127,8 @@ static const u32 rtw8822c_rf_a[] = {
0x063, 0x00000002,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -8866,15 +13143,87 @@ static const u32 rtw8822c_rf_a[] = {
0x063, 0x00000002,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
0x0EF, 0x00000200,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000237,
0x030, 0x00001237,
0x030, 0x00002237,
@@ -9117,6 +13466,33 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00017238,
0x030, 0x00018228,
0x030, 0x00019238,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000239,
0x030, 0x00001239,
@@ -9306,6 +13682,33 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00017238,
0x030, 0x00018228,
0x030, 0x00019238,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000239,
0x030, 0x00001239,
@@ -9360,6 +13763,222 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -9390,7 +14009,33 @@ static const u32 rtw8822c_rf_a[] = {
0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000080,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
0x030, 0x00002334,
@@ -9507,6 +14152,19 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
@@ -9598,6 +14256,19 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
@@ -9624,6 +14295,110 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -9651,7 +14426,283 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x00000005,
0x033, 0x00000201,
@@ -9697,6 +14748,144 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000074,
0x033, 0x0000020A,
0x03F, 0x00000077,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x00000005,
@@ -9743,6 +14932,190 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000074,
0x033, 0x0000020A,
0x03F, 0x00000077,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
@@ -9767,7 +15140,283 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000020A,
0x03F, 0x00000CF7,
0xB0000000, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x00000005,
0x033, 0x00000281,
@@ -9813,6 +15462,144 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000074,
0x033, 0x0000028A,
0x03F, 0x00000077,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x00000005,
@@ -9859,6 +15646,190 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000074,
0x033, 0x0000028A,
0x03F, 0x00000077,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
@@ -9883,7 +15854,295 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000028A,
0x03F, 0x00000CF7,
0xB0000000, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x00000005,
0x033, 0x00000301,
@@ -9931,6 +16190,150 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000030A,
0x03F, 0x000000D1,
0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x00000005,
@@ -9979,6 +16382,198 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000030A,
0x03F, 0x000000D1,
0x0EE, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
@@ -10005,7 +16600,11 @@ static const u32 rtw8822c_rf_a[] = {
0x0EE, 0x00000000,
0xB0000000, 0x00000000,
0x051, 0x0003C800,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000942CA,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000942CA,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -10023,6 +16622,8 @@ static const u32 rtw8822c_rf_a[] = {
0x052, 0x000902CA,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10037,10 +16638,28 @@ static const u32 rtw8822c_rf_a[] = {
0x052, 0x000902CA,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942CA,
0xB0000000, 0x00000000,
@@ -10050,7 +16669,11 @@ static const u32 rtw8822c_rf_a[] = {
0x0EF, 0x00000020,
0x033, 0x00000000,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10068,6 +16691,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10082,16 +16707,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000001,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10109,6 +16756,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10123,16 +16772,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000002,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10150,6 +16821,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10164,16 +16837,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000003,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10191,6 +16886,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10205,16 +16902,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000004,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10232,6 +16951,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10246,16 +16967,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000005,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10273,6 +17016,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10287,16 +17032,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000006,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10314,6 +17081,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10328,16 +17097,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000007,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10355,6 +17146,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10369,16 +17162,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000008,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10396,6 +17211,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10410,16 +17227,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000009,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10437,6 +17276,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10451,16 +17292,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000A,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10478,6 +17341,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10492,16 +17357,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000B,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10519,6 +17406,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10533,16 +17422,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000C,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10560,6 +17471,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10574,16 +17487,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000D,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10601,6 +17536,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10615,16 +17552,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000E,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
@@ -10642,6 +17601,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10656,16 +17617,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000000F,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10683,6 +17666,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10697,16 +17682,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000010,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10724,6 +17731,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10738,16 +17747,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000011,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10765,6 +17796,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10779,16 +17812,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000012,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10806,6 +17861,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10820,16 +17877,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000013,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10847,6 +17926,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10861,16 +17942,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000014,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10888,6 +17991,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10902,16 +18007,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000015,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10929,6 +18056,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10943,16 +18072,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000016,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -10970,6 +18121,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -10984,16 +18137,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000017,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11011,6 +18186,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11025,16 +18202,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000018,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11052,6 +18251,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11066,16 +18267,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000019,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11093,6 +18316,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11107,16 +18332,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001A,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11134,6 +18381,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11148,16 +18397,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001B,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11175,6 +18446,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11189,16 +18462,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001C,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11216,6 +18511,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11230,16 +18527,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001D,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11257,6 +18576,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11271,16 +18592,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001E,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11298,6 +18641,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11312,16 +18657,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000001F,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11339,6 +18706,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11353,16 +18722,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000020,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11380,6 +18771,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11394,16 +18787,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000021,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11421,6 +18836,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11435,16 +18852,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000022,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11462,6 +18901,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11476,16 +18917,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000023,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11503,6 +18966,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11517,16 +18982,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000024,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11544,6 +19031,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11558,16 +19047,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000025,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11585,6 +19096,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11599,16 +19112,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000026,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11626,6 +19161,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11640,16 +19177,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000027,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11667,6 +19226,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11681,16 +19242,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000028,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11708,6 +19291,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11722,16 +19307,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x00000029,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11749,6 +19356,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11763,16 +19372,38 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x033, 0x0000002A,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
@@ -11790,6 +19421,8 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00021E46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -11804,16 +19437,80 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00021E46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000468,
0x033, 0x00000061,
@@ -12020,6 +19717,29 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -12181,6 +19901,29 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -12227,6 +19970,190 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -12251,7 +20178,53 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000006A,
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000468,
0x033, 0x00000021,
@@ -12458,6 +20431,29 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000467,
@@ -12619,6 +20615,29 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000467,
@@ -12665,6 +20684,190 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -12698,7 +20901,11 @@ static const u32 rtw8822c_rf_a[] = {
0x0B0, 0x0001F0FC,
0x0B1, 0x0007DBE4,
0x0B2, 0x00022400,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
@@ -12716,6 +20923,8 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -12730,31 +20939,93 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
0x0B4, 0x00099D40,
0x0B5, 0x0004103F,
- 0x83000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000187F8,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
0xA0000000, 0x00000000,
0x0B6, 0x000187F8,
0xB0000000, 0x00000000,
@@ -12769,7 +21040,11 @@ static const u32 rtw8822c_rf_a[] = {
0x0CA, 0x00080001,
0x0FE, 0x00000000,
0x0B0, 0x0001F0F8,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C700,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C700,
@@ -12787,6 +21062,8 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -12801,10 +21078,28 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C700,
0xB0000000, 0x00000000,
@@ -12812,7 +21107,11 @@ static const u32 rtw8822c_rf_a[] = {
0xFFE, 0x00000000,
0xFFE, 0x00000000,
0xFFE, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
@@ -12830,6 +21129,8 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -12844,10 +21145,28 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
@@ -12871,7 +21190,19 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000001,
0x03F, 0x0000000F,
0x0ED, 0x00000000,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000500,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
@@ -12881,6 +21212,8 @@ static const u32 rtw8822c_rf_a[] = {
0x0DD, 0x00000540,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -12895,10 +21228,28 @@ static const u32 rtw8822c_rf_a[] = {
0x0DD, 0x00000540,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
0xA0000000, 0x00000000,
0x0DD, 0x00000500,
0xB0000000, 0x00000000,
@@ -13008,7 +21359,19 @@ static const u32 rtw8822c_rf_b[] = {
0x093, 0x0008483F,
0x0EF, 0x00080000,
0x033, 0x00000001,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00091230,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
@@ -13018,6 +21381,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0009123E,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -13032,16 +21397,38 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0009123E,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0009123E,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
0xA0000000, 0x00000000,
0x03F, 0x00091230,
0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0DE, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000B9140,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000B9140,
@@ -13059,6 +21446,8 @@ static const u32 rtw8822c_rf_b[] = {
0x08E, 0x000A5540,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -13073,10 +21462,28 @@ static const u32 rtw8822c_rf_b[] = {
0x08E, 0x000A5540,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
@@ -13084,7 +21491,25 @@ static const u32 rtw8822c_rf_b[] = {
0x081, 0x0002FC01,
0x081, 0x0003FC01,
0x085, 0x0006A06C,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
0x03F, 0x0000002A,
@@ -13165,6 +21590,15 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -13228,6 +21662,15 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -13246,6 +21689,78 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -13256,7 +21771,59 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0000003F,
0x0EE, 0x00000000,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
0x03F, 0x000773C0,
@@ -13490,6 +22057,32 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -13672,6 +22265,32 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -13724,6 +22343,214 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -13753,7 +22580,57 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000003,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
0x033, 0x0000001E,
@@ -13978,6 +22855,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
@@ -14153,6 +23055,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773C0,
@@ -14203,6 +23130,206 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -14231,7 +23358,57 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000013,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
0x033, 0x0000002E,
@@ -14456,6 +23633,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
@@ -14631,6 +23833,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773C0,
@@ -14681,6 +23908,206 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -14709,7 +24136,57 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000023,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
0x033, 0x0000003E,
@@ -14934,6 +24411,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
@@ -15109,6 +24611,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773C0,
@@ -15159,6 +24686,206 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -15187,7 +24914,57 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000033,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
0x033, 0x0000004E,
@@ -15412,6 +25189,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
@@ -15587,6 +25389,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773C0,
@@ -15637,6 +25464,206 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -15665,7 +25692,57 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000043,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
0x033, 0x0000005E,
@@ -15890,6 +25967,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
@@ -16065,6 +26167,31 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773C0,
@@ -16115,6 +26242,206 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -16143,7 +26470,11 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x033, 0x00000053,
0x03F, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
@@ -16161,6 +26492,8 @@ static const u32 rtw8822c_rf_b[] = {
0x0EF, 0x00000000,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -16175,10 +26508,28 @@ static const u32 rtw8822c_rf_b[] = {
0x0EF, 0x00000000,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -16196,7 +26547,301 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x00000000,
0x0EF, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -17519,6 +28164,153 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -18548,6 +29340,153 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -18842,6 +29781,1182 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
@@ -18994,7 +31109,11 @@ static const u32 rtw8822c_rf_b[] = {
0x01B, 0x00003A40,
0x061, 0x0000D233,
0x062, 0x0004D232,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
@@ -19012,6 +31131,8 @@ static const u32 rtw8822c_rf_b[] = {
0x063, 0x00000002,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -19026,15 +31147,87 @@ static const u32 rtw8822c_rf_b[] = {
0x063, 0x00000002,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
0x0EF, 0x00000200,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000237,
0x030, 0x00001237,
0x030, 0x00002237,
@@ -19277,6 +31470,33 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00017238,
0x030, 0x00018228,
0x030, 0x00019238,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000239,
0x030, 0x00001239,
@@ -19466,6 +31686,33 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00017238,
0x030, 0x00018228,
0x030, 0x00019238,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000239,
0x030, 0x00001239,
@@ -19520,6 +31767,222 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -19550,7 +32013,33 @@ static const u32 rtw8822c_rf_b[] = {
0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000080,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
0x030, 0x00002334,
@@ -19667,6 +32156,19 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
@@ -19758,6 +32260,19 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x030, 0x00000334,
0x030, 0x00001334,
@@ -19784,6 +32299,110 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -19811,7 +32430,283 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x00000005,
0x033, 0x00000201,
@@ -19857,6 +32752,144 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000074,
0x033, 0x0000020A,
0x03F, 0x00000077,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x00000005,
@@ -19903,6 +32936,190 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000074,
0x033, 0x0000020A,
0x03F, 0x00000077,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
@@ -19927,7 +33144,283 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000020A,
0x03F, 0x00000CF7,
0xB0000000, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x00000005,
0x033, 0x00000281,
@@ -19973,6 +33466,144 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000074,
0x033, 0x0000028A,
0x03F, 0x00000077,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x00000005,
@@ -20019,6 +33650,190 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000074,
0x033, 0x0000028A,
0x03F, 0x00000077,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
@@ -20043,7 +33858,295 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000028A,
0x03F, 0x00000CF7,
0xB0000000, 0x00000000,
- 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x00000005,
0x033, 0x00000301,
@@ -20091,6 +34194,150 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000030A,
0x03F, 0x000000D1,
0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x00000005,
@@ -20139,6 +34386,198 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000030A,
0x03F, 0x000000D1,
0x0EE, 0x00000000,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
@@ -20165,7 +34604,11 @@ static const u32 rtw8822c_rf_b[] = {
0x0EE, 0x00000000,
0xB0000000, 0x00000000,
0x051, 0x0003C800,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000942C0,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000942C0,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -20183,6 +34626,8 @@ static const u32 rtw8822c_rf_b[] = {
0x052, 0x000902CA,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -20197,10 +34642,28 @@ static const u32 rtw8822c_rf_b[] = {
0x052, 0x000902CA,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942C0,
0xB0000000, 0x00000000,
@@ -20209,150 +34672,202 @@ static const u32 rtw8822c_rf_b[] = {
0x057, 0x0004C80A,
0x0EF, 0x00000020,
0x033, 0x00000000,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x0000C246,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000001,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000002,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -20370,6 +34885,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -20384,158 +34901,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000003,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x0000C246,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000004,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000004,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000005,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -20553,6 +35140,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -20567,158 +35156,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000006,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x0000C246,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000007,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000007,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000008,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -20736,6 +35395,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -20750,158 +35411,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
0x033, 0x00000009,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x0000000A,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000000A,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000000B,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -20919,6 +35650,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -20933,158 +35666,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000000C,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x0000000D,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000000D,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000241C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000241C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000000E,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -21102,6 +35905,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -21116,158 +35921,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000000F,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000010,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000010,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000011,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
@@ -21285,6 +36160,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -21299,158 +36176,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000012,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000013,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000014,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -21468,6 +36415,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -21482,158 +36431,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000015,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000016,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000017,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -21651,6 +36670,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -21665,158 +36686,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000018,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000019,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000019,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000001A,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -21834,6 +36925,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -21848,158 +36941,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000001B,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x0000001C,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001C,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000001D,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -22017,6 +37180,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -22031,158 +37196,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000001E,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x0000001F,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000020,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -22200,6 +37435,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -22214,371 +37451,513 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000021,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000022,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000022,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000023,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000020,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000024,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000025,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000025,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000026,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -22596,6 +37975,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -22610,158 +37991,228 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000027,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0xA0000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
- 0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000209C6,
- 0xA0000000, 0x00000000,
- 0x03F, 0x00008E46,
- 0xB0000000, 0x00000000,
- 0x033, 0x00000028,
- 0x83000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00000030,
- 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
- 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000028,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x0001CA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x03F, 0x000209C6,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0x03F, 0x000209C6,
0xA0000000, 0x00000000,
+ 0x03E, 0x00000020,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x00000029,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -22779,6 +38230,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -22793,16 +38246,38 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x033, 0x0000002A,
0x03E, 0x00000020,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
@@ -22820,6 +38295,8 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0001CA46,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
@@ -22834,16 +38311,80 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0001CA46,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000468,
0x033, 0x00000061,
@@ -23050,6 +38591,29 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -23211,6 +38775,29 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -23257,6 +38844,190 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -23281,7 +39052,53 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000006A,
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
- 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x8f000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x9f000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000468,
0x033, 0x00000021,
@@ -23488,6 +39305,29 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000467,
@@ -23649,6 +39489,29 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x94000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000467,
@@ -23695,6 +39558,190 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x95000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x95000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -23770,10 +39817,6 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000007,
0x03F, 0x00000002,
0x0EF, 0x00000000,
- 0x0EF, 0x00080000,
- 0x033, 0x00000001,
- 0x03F, 0x000916BF,
- 0x0EF, 0x00000000,
};
RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index ca8072177ae3..0193708fc013 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -58,6 +58,10 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
+ if (pkt_info->rts) {
+ SET_TX_DESC_RTSRATE(txdesc, DESC_RATE24M);
+ SET_TX_DESC_DATA_RTS_SHORT(txdesc, 1);
+ }
SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq);
SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq);
SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
@@ -158,7 +162,7 @@ void rtw_tx_report_purge_timer(struct timer_list *t)
if (skb_queue_len(&tx_report->queue) == 0)
return;
- WARN(1, "purge skb(s) not reported by firmware\n");
+ rtw_dbg(rtwdev, RTW_DBG_TX, "purge skb(s) not reported by firmware\n");
spin_lock_irqsave(&tx_report->q_lock, flags);
skb_queue_purge(&tx_report->queue);
@@ -290,6 +294,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_sta_info *si;
u16 seq;
u8 ampdu_factor = 0;
@@ -313,7 +318,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
ampdu_density = get_tx_ampdu_density(sta);
}
- if (info->control.use_rts)
+ if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold)
pkt_info->rts = true;
if (sta->vht_cap.vht_supported)
@@ -587,9 +592,9 @@ static void rtw_txq_push(struct rtw_dev *rtwdev,
rcu_read_unlock();
}
-void rtw_tx_tasklet(struct tasklet_struct *t)
+void rtw_tx_work(struct work_struct *w)
{
- struct rtw_dev *rtwdev = from_tasklet(rtwdev, t, tx_tasklet);
+ struct rtw_dev *rtwdev = container_of(w, struct rtw_dev, tx_work);
struct rtw_txq *rtwtxq, *tmp;
spin_lock_bh(&rtwdev->txq_lock);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 6673dbcaa21c..56371eff9f7f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -37,6 +37,10 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
#define SET_TX_DESC_USE_RTS(tx_desc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(12))
+#define SET_TX_DESC_RTSRATE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x04, value, GENMASK(28, 24))
+#define SET_TX_DESC_DATA_RTS_SHORT(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(12))
#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
#define SET_TX_DESC_DATA_STBC(txdesc, value) \
@@ -94,7 +98,7 @@ void rtw_tx(struct rtw_dev *rtwdev,
struct sk_buff *skb);
void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
-void rtw_tx_tasklet(struct tasklet_struct *t);
+void rtw_tx_work(struct work_struct *w);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 2d49c5b5eefb..a48e616e0fb9 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -193,8 +193,7 @@ get_queue_num:
if (recontend_queue)
goto get_queue_num;
- q_num = INVALID_QUEUE;
- return q_num;
+ return INVALID_QUEUE;
}
common->selected_qnum = q_num;
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index e1095b8de2bd..498c8db2eb48 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -175,10 +175,8 @@ int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
wl1251_debug(DEBUG_CMD, "cmd vbm");
vbm = kzalloc(sizeof(*vbm), GFP_KERNEL);
- if (!vbm) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!vbm)
+ return -ENOMEM;
/* Count and period will be filled by the target */
vbm->tim.bitmap_ctrl = bitmap_control;
@@ -213,10 +211,8 @@ int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
wl1251_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cmd)
+ return -ENOMEM;
cmd->channel = channel;
@@ -279,10 +275,8 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u8 *bssid;
join = kzalloc(sizeof(*join), GFP_KERNEL);
- if (!join) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!join)
+ return -ENOMEM;
wl1251_debug(DEBUG_CMD, "cmd join%s ch %d %d/%d",
bss_type == BSS_TYPE_IBSS ? " ibss" : "",
@@ -324,10 +318,8 @@ int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode)
wl1251_debug(DEBUG_CMD, "cmd set ps mode");
ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL);
- if (!ps_params) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!ps_params)
+ return -ENOMEM;
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = 1;
@@ -356,10 +348,8 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
wl1251_debug(DEBUG_CMD, "cmd read memory");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cmd)
+ return -ENOMEM;
WARN_ON(len > MAX_READ_SIZE);
len = min_t(size_t, len, MAX_READ_SIZE);
@@ -401,10 +391,8 @@ int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
cmd_len = ALIGN(sizeof(*cmd) + buf_len, 4);
cmd = kzalloc(cmd_len, GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cmd)
+ return -ENOMEM;
cmd->size = cpu_to_le16(buf_len);
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 3c9c623bb428..9d7dbfe7fe0c 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -635,7 +635,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -659,7 +658,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
@@ -688,7 +686,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 122c7a4b374f..8509b989940c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2872,21 +2872,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (is_ibss)
ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
- else {
- if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
- /*
- * TODO: this is an ugly workaround for wl12xx fw
- * bug - we are not able to tx/rx after the first
- * start_sta, so make dummy start+stop calls,
- * and then call start_sta again.
- * this should be fixed in the fw.
- */
- wl12xx_cmd_role_start_sta(wl, wlvif);
- wl12xx_cmd_role_stop_sta(wl, wlvif);
- }
-
+ else
ret = wl12xx_cmd_role_start_sta(wl, wlvif);
- }
return ret;
}
@@ -5394,7 +5381,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
ret = -EBUSY;
- wl1271_error("exceeded max RX BA sessions");
+ wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
break;
}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index b7821311ac75..81c94d390623 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -547,9 +547,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
-/* the first start_role(sta) sometimes doesn't work on wl12xx */
-#define WLCORE_QUIRK_START_STA_FAILS BIT(1)
-
/* wl127x and SPI don't support SDIO block size alignment */
#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index b446cb369557..e98e04ee9a2c 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -550,7 +550,7 @@ struct wl3501_80211_tx_plcp_hdr {
struct wl3501_80211_tx_hdr {
struct wl3501_80211_tx_plcp_hdr pclp_hdr;
struct ieee80211_hdr mac_hdr;
-} __packed;
+} __packed __aligned(2);
/*
Reserve the beginning Tx space for descriptor use.
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8ee24e351bdc..4a16d6e33c09 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -399,7 +399,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index acb786d8b1d8..193b723fe3bd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,7 +47,7 @@
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
-/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
+/* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete.
@@ -55,7 +55,7 @@
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb)
{
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
atomic_inc(&queue->inflight_packets);
}
@@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
int old;
+ bool has_rx, has_tx;
old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
WARN(old, "Interrupt while EOI pending\n");
- /* Use bitwise or as we need to call both functions. */
- if ((!xenvif_handle_tx_interrupt(queue) |
- !xenvif_handle_rx_interrupt(queue))) {
+ has_tx = xenvif_handle_tx_interrupt(queue);
+ has_rx = xenvif_handle_rx_interrupt(queue);
+
+ if (!has_rx && !has_tx) {
atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
}
@@ -628,13 +630,13 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
unsigned int evtchn)
{
struct net_device *dev = vif->dev;
+ struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
void *addr;
struct xen_netif_ctrl_sring *shared;
RING_IDX rsp_prod, req_prod;
int err;
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
- &ring_ref, 1, &addr);
+ err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
if (err)
goto err;
@@ -648,7 +650,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
goto err_unmap;
- err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
if (err < 0)
goto err_unmap;
@@ -671,8 +673,7 @@ err_deinit:
vif->ctrl_irq = 0;
err_unmap:
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
- vif->ctrl.sring);
+ xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
vif->ctrl.sring = NULL;
err:
@@ -717,6 +718,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
+ struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
struct task_struct *task;
int err;
@@ -753,7 +755,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
- queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
+ dev, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
goto err;
@@ -764,7 +766,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
- queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
+ dev, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
goto err;
@@ -774,7 +776,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
- queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
+ dev, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
goto err;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index bc3421d14576..e5c73f819662 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1091,7 +1091,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(uarg, true);
+ uarg->callback(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1228,7 +1228,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
@@ -1342,13 +1343,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return 0;
gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
- if (nr_mops != 0) {
+ if (nr_mops != 0)
ret = gnttab_map_refs(queue->tx_map_ops,
NULL,
queue->pages_to_map,
nr_mops);
- BUG_ON(ret);
- }
work_done = xenvif_tx_submit(queue);
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b8febe1d1bfd..accc991d153f 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
- if (!skb)
+ if (!skb) {
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false;
+ }
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if (skb->sw_hash)
needed++;
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 6f10e0998f1c..a5439c130130 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -411,7 +411,7 @@ static void read_xenbus_frontend_xdp(struct backend_info *be,
vif->xdp_headroom = headroom;
}
-/**
+/*
* Callback received when the frontend's state changes.
*/
static void frontend_changed(struct xenbus_device *dev,
@@ -996,7 +996,7 @@ static int netback_remove(struct xenbus_device *dev)
return 0;
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and switch to InitWait.
*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b01848ef4649..cc19cd9203da 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -864,12 +864,10 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
u32 act;
int err;
- xdp->data_hard_start = page_address(pdata);
- xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &queue->xdp_rxq;
- xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+ xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+ &queue->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
+ len, false);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
@@ -1582,7 +1580,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return ERR_PTR(err);
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffers for communication with the backend, and
* inform the backend of the appropriate details for those.
@@ -1659,7 +1657,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
}
}
-/**
+/*
* We are reconnecting to the backend, due to a suspend/resume, or a backend
* driver restart. We tear down our netif structure and recreate it, but
* leave the device-layer structures intact so that this is transparent to the
@@ -1813,7 +1811,7 @@ static int setup_netfront(struct xenbus_device *dev,
* a) feature-split-event-channels == 0
* b) feature-split-event-channels == 1 but failed to setup
*/
- if (!feature_split_evtchn || (feature_split_evtchn && err))
+ if (!feature_split_evtchn || err)
err = setup_netfront_single(queue);
if (err)
@@ -2305,7 +2303,7 @@ static int xennet_connect(struct net_device *dev)
return 0;
}
-/**
+/*
* Callback received when the backend's state changes.
*/
static void netback_changed(struct xenbus_device *dev,
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 75c65d339018..288c6f1c6979 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -49,6 +49,17 @@ config NFC_PORT100
If unsure, say N.
+config NFC_VIRTUAL_NCI
+ tristate "NCI device simulator driver"
+ depends on NFC_NCI
+ help
+ NCI virtual device simulates a NCI device to the user.
+ It can be used to validate the NCI module and applications.
+ This driver supports communication between the virtual NCI device and
+ module.
+
+ If unsure, say N.
+
source "drivers/nfc/fdp/Kconfig"
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/pn533/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 5393ba59b17d..7b1bfde1d971 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NFC_ST_NCI) += st-nci/
obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/
obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5/
obj-$(CONFIG_NFC_ST95HF) += st95hf/
+obj-$(CONFIG_NFC_VIRTUAL_NCI) += virtual_ncidev.o
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index ad0abb1f0bae..adaa1a7147f9 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -155,7 +155,7 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
/*
* LRC check failed. This may due to transmission error or
- * desynchronization between driver and FDP. Drop the paquet
+ * desynchronization between driver and FDP. Drop the packet
* and force resynchronization
*/
if (lrc) {
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 5dad8847a9b3..8fa7771085eb 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -44,15 +44,13 @@ static int microread_mei_probe(struct mei_cl_device *cldev,
return 0;
}
-static int microread_mei_remove(struct mei_cl_device *cldev)
+static void microread_mei_remove(struct mei_cl_device *cldev)
{
struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
microread_remove(phy->hdev);
nfc_mei_phy_free(phy);
-
- return 0;
}
static struct mei_cl_device_id microread_mei_tbl[] = {
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index f7464bd6d57c..f1469ac8ff42 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -513,7 +513,7 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
/*
* pn533_send_cmd_direct_async
*
- * The function sends a piority cmd directly to the chip omitting the cmd
+ * The function sends a priority cmd directly to the chip omitting the cmd
* queue. It's intended to be used by chaining mechanism of received responses
* where the host has to request every single chunk of data before scheduling
* next cmd from the queue.
@@ -615,7 +615,7 @@ static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
* as it's been already freed at the beginning of RX path by
* async_complete_cb.
*
- * 3. valid pointer in case of succesfult RX path
+ * 3. valid pointer in case of successful RX path
*
* A caller has to check a return value with IS_ERR macro. If the test pass,
* the returned pointer is valid.
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 579bc599f545..5c10aac085a4 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -42,7 +42,7 @@ static int pn544_mei_probe(struct mei_cl_device *cldev,
return 0;
}
-static int pn544_mei_remove(struct mei_cl_device *cldev)
+static void pn544_mei_remove(struct mei_cl_device *cldev)
{
struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
@@ -51,8 +51,6 @@ static int pn544_mei_remove(struct mei_cl_device *cldev)
pn544_hci_remove(phy->hdev);
nfc_mei_phy_free(phy);
-
- return 0;
}
static struct mei_cl_device_id pn544_mei_tbl[] = {
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 807eae04c1e3..1cba8f69d3ae 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -276,7 +276,6 @@ static int st_nci_hci_apdu_reader_event_received(struct nci_dev *ndev,
u8 event,
struct sk_buff *skb)
{
- int r = 0;
struct st_nci_info *info = nci_get_drvdata(ndev);
pr_debug("apdu reader gate event: %x\n", event);
@@ -298,7 +297,7 @@ static int st_nci_hci_apdu_reader_event_received(struct nci_dev *ndev,
}
kfree_skb(skb);
- return r;
+ return 0;
}
/*
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index c70f62fe321e..33978022ae47 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -169,7 +169,7 @@
/* Bits determining whether its a direct command or register R/W,
* whether to use a continuous SPI transaction or not, and the actual
- * direct cmd opcode or regster address.
+ * direct cmd opcode or register address.
*/
#define TRF7970A_CMD_BIT_CTRL BIT(7)
#define TRF7970A_CMD_BIT_RW BIT(6)
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
new file mode 100644
index 000000000000..f73ee0bf3593
--- /dev/null
+++ b/drivers/nfc/virtual_ncidev.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual NCI device simulation driver
+ *
+ * Copyright (C) 2020 Samsung Electrnoics
+ * Bongsu Jeon <bongsu.jeon@samsung.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <net/nfc/nci_core.h>
+
+enum virtual_ncidev_mode {
+ virtual_ncidev_enabled,
+ virtual_ncidev_disabled,
+ virtual_ncidev_disabling,
+};
+
+#define IOCTL_GET_NCIDEV_IDX 0
+#define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK | \
+ NFC_PROTO_ISO15693_MASK)
+
+static enum virtual_ncidev_mode state;
+static struct miscdevice miscdev;
+static struct sk_buff *send_buff;
+static struct nci_dev *ndev;
+static DEFINE_MUTEX(nci_mutex);
+
+static int virtual_nci_open(struct nci_dev *ndev)
+{
+ return 0;
+}
+
+static int virtual_nci_close(struct nci_dev *ndev)
+{
+ mutex_lock(&nci_mutex);
+ kfree_skb(send_buff);
+ send_buff = NULL;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ mutex_lock(&nci_mutex);
+ if (state != virtual_ncidev_enabled) {
+ mutex_unlock(&nci_mutex);
+ return 0;
+ }
+
+ if (send_buff) {
+ mutex_unlock(&nci_mutex);
+ return -1;
+ }
+ send_buff = skb_copy(skb, GFP_KERNEL);
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static struct nci_ops virtual_nci_ops = {
+ .open = virtual_nci_open,
+ .close = virtual_nci_close,
+ .send = virtual_nci_send
+};
+
+static ssize_t virtual_ncidev_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t actual_len;
+
+ mutex_lock(&nci_mutex);
+ if (!send_buff) {
+ mutex_unlock(&nci_mutex);
+ return 0;
+ }
+
+ actual_len = min_t(size_t, count, send_buff->len);
+
+ if (copy_to_user(buf, send_buff->data, actual_len)) {
+ mutex_unlock(&nci_mutex);
+ return -EFAULT;
+ }
+
+ skb_pull(send_buff, actual_len);
+ if (send_buff->len == 0) {
+ consume_skb(send_buff);
+ send_buff = NULL;
+ }
+ mutex_unlock(&nci_mutex);
+
+ return actual_len;
+}
+
+static ssize_t virtual_ncidev_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(count, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ nci_recv_frame(ndev, skb);
+ return count;
+}
+
+static int virtual_ncidev_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ mutex_lock(&nci_mutex);
+ if (state != virtual_ncidev_disabled) {
+ mutex_unlock(&nci_mutex);
+ return -EBUSY;
+ }
+
+ ndev = nci_allocate_device(&virtual_nci_ops, VIRTUAL_NFC_PROTOCOLS,
+ 0, 0);
+ if (!ndev) {
+ mutex_unlock(&nci_mutex);
+ return -ENOMEM;
+ }
+
+ ret = nci_register_device(ndev);
+ if (ret < 0) {
+ nci_free_device(ndev);
+ mutex_unlock(&nci_mutex);
+ return ret;
+ }
+ state = virtual_ncidev_enabled;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static int virtual_ncidev_close(struct inode *inode, struct file *file)
+{
+ mutex_lock(&nci_mutex);
+
+ if (state == virtual_ncidev_enabled) {
+ state = virtual_ncidev_disabling;
+ mutex_unlock(&nci_mutex);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ mutex_lock(&nci_mutex);
+ }
+
+ state = virtual_ncidev_disabled;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static long virtual_ncidev_ioctl(struct file *flip, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nfc_dev *nfc_dev = ndev->nfc_dev;
+ void __user *p = (void __user *)arg;
+
+ if (cmd != IOCTL_GET_NCIDEV_IDX)
+ return -ENOTTY;
+
+ if (copy_to_user(p, &nfc_dev->idx, sizeof(nfc_dev->idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static const struct file_operations virtual_ncidev_fops = {
+ .owner = THIS_MODULE,
+ .read = virtual_ncidev_read,
+ .write = virtual_ncidev_write,
+ .open = virtual_ncidev_open,
+ .release = virtual_ncidev_close,
+ .unlocked_ioctl = virtual_ncidev_ioctl
+};
+
+static int __init virtual_ncidev_init(void)
+{
+ state = virtual_ncidev_disabled;
+ miscdev.minor = MISC_DYNAMIC_MINOR;
+ miscdev.name = "virtual_nci";
+ miscdev.fops = &virtual_ncidev_fops;
+ miscdev.mode = S_IALLUGO;
+
+ return misc_register(&miscdev);
+}
+
+static void __exit virtual_ncidev_exit(void)
+{
+ misc_deregister(&miscdev);
+}
+
+module_init(virtual_ncidev_init);
+module_exit(virtual_ncidev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtual NCI device simulation driver");
+MODULE_AUTHOR("Bongsu Jeon <bongsu.jeon@samsung.com>");
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index e77c587060ff..c325be526b80 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -2,4 +2,5 @@
source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/epf/Kconfig"
source "drivers/ntb/hw/mscc/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 4714d6238845..223ca592b5f9 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -2,4 +2,5 @@
obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/
+obj-$(CONFIG_NTB_EPF) += epf/
obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/epf/Kconfig b/drivers/ntb/hw/epf/Kconfig
new file mode 100644
index 000000000000..6197d1aab344
--- /dev/null
+++ b/drivers/ntb/hw/epf/Kconfig
@@ -0,0 +1,6 @@
+config NTB_EPF
+ tristate "Generic EPF Non-Transparent Bridge support"
+ depends on m
+ help
+ This driver supports EPF NTB on configurable endpoint.
+ If unsure, say N.
diff --git a/drivers/ntb/hw/epf/Makefile b/drivers/ntb/hw/epf/Makefile
new file mode 100644
index 000000000000..2f560a422bc6
--- /dev/null
+++ b/drivers/ntb/hw/epf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_EPF) += ntb_hw_epf.o
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
new file mode 100644
index 000000000000..b019755e4e21
--- /dev/null
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Host side endpoint driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#define NTB_EPF_COMMAND 0x0
+#define CMD_CONFIGURE_DOORBELL 1
+#define CMD_TEARDOWN_DOORBELL 2
+#define CMD_CONFIGURE_MW 3
+#define CMD_TEARDOWN_MW 4
+#define CMD_LINK_UP 5
+#define CMD_LINK_DOWN 6
+
+#define NTB_EPF_ARGUMENT 0x4
+#define MSIX_ENABLE BIT(16)
+
+#define NTB_EPF_CMD_STATUS 0x8
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define NTB_EPF_LINK_STATUS 0x0A
+#define LINK_STATUS_UP BIT(0)
+
+#define NTB_EPF_TOPOLOGY 0x0C
+#define NTB_EPF_LOWER_ADDR 0x10
+#define NTB_EPF_UPPER_ADDR 0x14
+#define NTB_EPF_LOWER_SIZE 0x18
+#define NTB_EPF_UPPER_SIZE 0x1C
+#define NTB_EPF_MW_COUNT 0x20
+#define NTB_EPF_MW1_OFFSET 0x24
+#define NTB_EPF_SPAD_OFFSET 0x28
+#define NTB_EPF_SPAD_COUNT 0x2C
+#define NTB_EPF_DB_ENTRY_SIZE 0x30
+#define NTB_EPF_DB_DATA(n) (0x34 + (n) * 4)
+#define NTB_EPF_DB_OFFSET(n) (0xB4 + (n) * 4)
+
+#define NTB_EPF_MIN_DB_COUNT 3
+#define NTB_EPF_MAX_DB_COUNT 31
+#define NTB_EPF_MW_OFFSET 2
+
+#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+struct ntb_epf_dev {
+ struct ntb_dev ntb;
+ struct device *dev;
+ /* Mutex to protect providing commands to NTB EPF */
+ struct mutex cmd_lock;
+
+ enum pci_barno ctrl_reg_bar;
+ enum pci_barno peer_spad_reg_bar;
+ enum pci_barno db_reg_bar;
+
+ unsigned int mw_count;
+ unsigned int spad_count;
+ unsigned int db_count;
+
+ void __iomem *ctrl_reg;
+ void __iomem *db_reg;
+ void __iomem *peer_spad_reg;
+
+ unsigned int self_spad;
+ unsigned int peer_spad;
+
+ int db_val;
+ u64 db_valid_mask;
+};
+
+#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
+
+struct ntb_epf_data {
+ /* BAR that contains both control region and self spad region */
+ enum pci_barno ctrl_reg_bar;
+ /* BAR that contains peer spad region */
+ enum pci_barno peer_spad_reg_bar;
+ /* BAR that contains Doorbell region and Memory window '1' */
+ enum pci_barno db_reg_bar;
+};
+
+static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
+ u32 argument)
+{
+ ktime_t timeout;
+ bool timedout;
+ int ret = 0;
+ u32 status;
+
+ mutex_lock(&ndev->cmd_lock);
+ writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT);
+ writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND);
+
+ timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT);
+ while (1) {
+ timedout = ktime_after(ktime_get(), timeout);
+ status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
+
+ if (status == COMMAND_STATUS_ERROR) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (status == COMMAND_STATUS_OK)
+ break;
+
+ if (WARN_ON(timedout)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ usleep_range(5, 10);
+ }
+
+ writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
+ mutex_unlock(&ndev->cmd_lock);
+
+ return ret;
+}
+
+static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
+{
+ struct device *dev = ndev->dev;
+
+ if (idx < 0 || idx > ndev->mw_count) {
+ dev_err(dev, "Unsupported Memory Window index %d\n", idx);
+ return -EINVAL;
+ }
+
+ return idx + 2;
+}
+
+static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ return ndev->mw_count;
+}
+
+static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = pci_resource_len(ndev->ntb.pdev, bar);
+
+ return 0;
+}
+
+static u64 ntb_epf_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 status;
+
+ status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS);
+
+ return status & LINK_STATUS_UP;
+}
+
+static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx);
+ return 0;
+ }
+
+ offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ offset += (idx << 2);
+
+ return readl(ndev->ctrl_reg + offset);
+}
+
+static int ntb_epf_spad_write(struct ntb_dev *ntb,
+ int idx, u32 val)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ offset += (idx << 2);
+ writel(val, ndev->ctrl_reg + offset);
+
+ return 0;
+}
+
+static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = (idx << 2);
+ return readl(ndev->peer_spad_reg + offset);
+}
+
+static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int idx, u32 val)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = (idx << 2);
+ writel(val, ndev->peer_spad_reg + offset);
+
+ return 0;
+}
+
+static int ntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret;
+
+ ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0);
+ if (ret) {
+ dev_err(dev, "Fail to enable link\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret;
+
+ ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0);
+ if (ret) {
+ dev_err(dev, "Fail to disable link\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ntb_epf_vec_isr(int irq, void *dev)
+{
+ struct ntb_epf_dev *ndev = dev;
+ int irq_no;
+
+ irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0);
+ ndev->db_val = irq_no + 1;
+
+ if (irq_no == 0)
+ ntb_link_event(&ndev->ntb);
+ else
+ ntb_db_event(&ndev->ntb, irq_no);
+
+ return IRQ_HANDLED;
+}
+
+static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ struct device *dev = ndev->dev;
+ u32 argument = MSIX_ENABLE;
+ int irq;
+ int ret;
+ int i;
+
+ irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX);
+ if (irq < 0) {
+ dev_dbg(dev, "Failed to get MSIX interrupts\n");
+ irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max,
+ PCI_IRQ_MSI);
+ if (irq < 0) {
+ dev_err(dev, "Failed to get MSI interrupts\n");
+ return irq;
+ }
+ argument &= ~MSIX_ENABLE;
+ }
+
+ for (i = 0; i < irq; i++) {
+ ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr,
+ 0, "ntb_epf", ndev);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ goto err_request_irq;
+ }
+ }
+
+ ndev->db_count = irq - 1;
+
+ ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL,
+ argument | irq);
+ if (ret) {
+ dev_err(dev, "Failed to configure doorbell\n");
+ goto err_configure_db;
+ }
+
+ return 0;
+
+err_configure_db:
+ for (i = 0; i < ndev->db_count + 1; i++)
+ free_irq(pci_irq_vector(pdev, i), ndev);
+
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+static int ntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->mw_count;
+}
+
+static int ntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ resource_size_t mw_size;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ bar = idx + NTB_EPF_MW_OFFSET;
+
+ mw_size = pci_resource_len(ntb->pdev, bar);
+
+ if (size > mw_size) {
+ dev_err(dev, "Size:%pa is greater than the MW size %pa\n",
+ &size, &mw_size);
+ return -EINVAL;
+ }
+
+ writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR);
+ writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR);
+ writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE);
+ writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE);
+ ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx);
+
+ return 0;
+}
+
+static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret = 0;
+
+ ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx);
+ if (ret)
+ dev_err(dev, "Failed to teardown memory window\n");
+
+ return ret;
+}
+
+static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 offset = 0;
+ int bar;
+
+ if (idx == 0)
+ offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
+
+ bar = idx + NTB_EPF_MW_OFFSET;
+
+ if (base)
+ *base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
+
+ if (size)
+ *size = pci_resource_len(ndev->ntb.pdev, bar) - offset;
+
+ return 0;
+}
+
+static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct device *dev = ndev->dev;
+ u32 db_entry_size;
+ u32 db_offset;
+ u32 db_data;
+
+ if (interrupt_num > ndev->db_count) {
+ dev_err(dev, "DB interrupt %d greater than Max Supported %d\n",
+ interrupt_num, ndev->db_count);
+ return -EINVAL;
+ }
+
+ db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE);
+
+ db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num));
+ db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num));
+ writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) +
+ db_offset);
+
+ return 0;
+}
+
+static u64 ntb_epf_db_read(struct ntb_dev *ntb)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+ return ndev->db_val;
+}
+
+static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+ ndev->db_val = 0;
+
+ return 0;
+}
+
+static const struct ntb_dev_ops ntb_epf_ops = {
+ .mw_count = ntb_epf_mw_count,
+ .spad_count = ntb_epf_spad_count,
+ .peer_mw_count = ntb_epf_peer_mw_count,
+ .db_valid_mask = ntb_epf_db_valid_mask,
+ .db_set_mask = ntb_epf_db_set_mask,
+ .mw_set_trans = ntb_epf_mw_set_trans,
+ .mw_clear_trans = ntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = ntb_epf_peer_mw_get_addr,
+ .link_enable = ntb_epf_link_enable,
+ .spad_read = ntb_epf_spad_read,
+ .spad_write = ntb_epf_spad_write,
+ .peer_spad_read = ntb_epf_peer_spad_read,
+ .peer_spad_write = ntb_epf_peer_spad_write,
+ .peer_db_set = ntb_epf_peer_db_set,
+ .db_read = ntb_epf_db_read,
+ .mw_get_align = ntb_epf_mw_get_align,
+ .link_is_up = ntb_epf_link_is_up,
+ .db_clear_mask = ntb_epf_db_clear_mask,
+ .db_clear = ntb_epf_db_clear,
+ .link_disable = ntb_epf_link_disable,
+};
+
+static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev,
+ struct pci_dev *pdev)
+{
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &ntb_epf_ops;
+}
+
+static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
+{
+ struct device *dev = ndev->dev;
+ int ret;
+
+ /* One Link interrupt and rest doorbell interrupt */
+ ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1,
+ NTB_EPF_MAX_DB_COUNT + 1);
+ if (ret) {
+ dev_err(dev, "Failed to init ISR\n");
+ return ret;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+ ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
+ ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+
+ return 0;
+}
+
+static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
+ struct pci_dev *pdev)
+{
+ struct device *dev = ndev->dev;
+ int ret;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, "ntb");
+ if (ret) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_pci_regions;
+ }
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ goto err_dma_mask;
+ }
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+ }
+
+ ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
+ if (!ndev->ctrl_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
+ if (!ndev->db_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ return 0;
+
+err_dma_mask:
+ pci_clear_master(pdev);
+
+err_pci_regions:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+
+ pci_iounmap(pdev, ndev->ctrl_reg);
+ pci_iounmap(pdev, ndev->peer_spad_reg);
+ pci_iounmap(pdev, ndev->db_reg);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ int i;
+
+ ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1);
+
+ for (i = 0; i < ndev->db_count + 1; i++)
+ free_irq(pci_irq_vector(pdev, i), ndev);
+ pci_free_irq_vectors(pdev);
+}
+
+static int ntb_epf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum pci_barno peer_spad_reg_bar = BAR_1;
+ enum pci_barno ctrl_reg_bar = BAR_0;
+ enum pci_barno db_reg_bar = BAR_2;
+ struct device *dev = &pdev->dev;
+ struct ntb_epf_data *data;
+ struct ntb_epf_dev *ndev;
+ int ret;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ data = (struct ntb_epf_data *)id->driver_data;
+ if (data) {
+ if (data->peer_spad_reg_bar)
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ if (data->ctrl_reg_bar)
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ if (data->db_reg_bar)
+ db_reg_bar = data->db_reg_bar;
+ }
+
+ ndev->peer_spad_reg_bar = peer_spad_reg_bar;
+ ndev->ctrl_reg_bar = ctrl_reg_bar;
+ ndev->db_reg_bar = db_reg_bar;
+ ndev->dev = dev;
+
+ ntb_epf_init_struct(ndev, pdev);
+ mutex_init(&ndev->cmd_lock);
+
+ ret = ntb_epf_init_pci(ndev, pdev);
+ if (ret) {
+ dev_err(dev, "Failed to init PCI\n");
+ return ret;
+ }
+
+ ret = ntb_epf_init_dev(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to init device\n");
+ goto err_init_dev;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ return 0;
+
+err_register_dev:
+ ntb_epf_cleanup_isr(ndev);
+
+err_init_dev:
+ ntb_epf_deinit_pci(ndev);
+
+ return ret;
+}
+
+static void ntb_epf_pci_remove(struct pci_dev *pdev)
+{
+ struct ntb_epf_dev *ndev = pci_get_drvdata(pdev);
+
+ ntb_unregister_device(&ndev->ntb);
+ ntb_epf_cleanup_isr(ndev);
+ ntb_epf_deinit_pci(ndev);
+}
+
+static const struct ntb_epf_data j721e_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_1,
+ .db_reg_bar = BAR_2,
+};
+
+static const struct pci_device_id ntb_epf_pci_tbl[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { },
+};
+
+static struct pci_driver ntb_epf_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ntb_epf_pci_tbl,
+ .probe = ntb_epf_pci_probe,
+ .remove = ntb_epf_pci_remove,
+};
+module_pci_driver(ntb_epf_pci_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 22e5617b2cea..7b9556291eb1 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -165,7 +165,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
static blk_qc_t nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
- struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
+ struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -177,7 +177,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
@@ -310,11 +310,10 @@ static int nd_blk_probe(struct device *dev)
return nsblk_attach_disk(nsblk);
}
-static int nd_blk_remove(struct device *dev)
+static void nd_blk_remove(struct device *dev)
{
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
- return 0;
}
static struct nd_device_driver nd_blk_driver = {
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 12ff6f8784ac..41aa1f01fc07 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1442,7 +1442,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
static blk_qc_t btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct btt *btt = bio->bi_disk->private_data;
+ struct btt *btt = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -1452,7 +1452,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2304c6183822..48f0985ca8a0 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -113,18 +113,17 @@ static int nvdimm_bus_remove(struct device *dev)
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
struct module *provider = to_bus_provider(dev);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- int rc = 0;
if (nd_drv->remove) {
debug_nvdimm_lock(dev);
- rc = nd_drv->remove(dev);
+ nd_drv->remove(dev);
debug_nvdimm_unlock(dev);
}
- dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
- dev_name(dev), rc);
+ dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
+ dev_name(dev));
module_put(provider);
- return rc;
+ return 0;
}
static void nvdimm_bus_shutdown(struct device *dev)
@@ -427,7 +426,7 @@ static void free_badrange_list(struct list_head *badrange_list)
list_del_init(badrange_list);
}
-static int nd_bus_remove(struct device *dev)
+static void nd_bus_remove(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
@@ -446,8 +445,6 @@ static int nd_bus_remove(struct device *dev)
spin_unlock(&nvdimm_bus->badrange.lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
-
- return 0;
}
static int nd_bus_probe(struct device *dev)
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 7d4ddc4d9322..91d9163ee303 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -113,19 +113,14 @@ static int nvdimm_probe(struct device *dev)
return rc;
}
-static int nvdimm_remove(struct device *dev)
+static void nvdimm_remove(struct device *dev)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
- if (!ndd)
- return 0;
-
nvdimm_bus_lock(dev);
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
put_ndd(ndd);
-
- return 0;
}
static struct nd_device_driver nvdimm_driver = {
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index b59032e0859b..9d208570d059 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(state);
-static ssize_t available_slots_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
+ struct device *dev;
ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
+ dev = ndd->dev;
nvdimm_bus_lock(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
@@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev,
nvdimm_bus_unlock(dev);
return rc;
}
+
+static ssize_t available_slots_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rc;
+
+ nd_device_lock(dev);
+ rc = __available_slots_show(dev_get_drvdata(dev), buf);
+ nd_device_unlock(dev);
+
+ return rc;
+}
static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev,
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6da67f4d641a..2403b71b601e 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj,
return a->mode;
}
- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
- || a == &dev_attr_holder.attr
- || a == &dev_attr_holder_class.attr
- || a == &dev_attr_force_raw.attr
- || a == &dev_attr_mode.attr)
+ /* base is_namespace_io() attributes */
+ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
+ a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
+ a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
+ a == &dev_attr_resource.attr)
return a->mode;
return 0;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 875076b0ea6c..b8a85bfb2e95 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -23,7 +23,6 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
-#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
@@ -197,13 +196,13 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
- struct pmem_device *pmem = bio->bi_disk->private_data;
+ struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH)
ret = nvdimm_flush(nd_region, bio);
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
@@ -564,7 +563,7 @@ static int nd_pmem_probe(struct device *dev)
return pmem_attach_disk(dev, ndns);
}
-static int nd_pmem_remove(struct device *dev)
+static void nd_pmem_remove(struct device *dev)
{
struct pmem_device *pmem = dev_get_drvdata(dev);
@@ -579,8 +578,6 @@ static int nd_pmem_remove(struct device *dev)
pmem->bb_state = NULL;
}
nvdimm_flush(to_nd_region(dev->parent), NULL);
-
- return 0;
}
static void nd_pmem_shutdown(struct device *dev)
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index bfce87ed72ab..e0c34120df37 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -87,7 +87,7 @@ static int child_unregister(struct device *dev, void *data)
return 0;
}
-static int nd_region_remove(struct device *dev)
+static void nd_region_remove(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -108,8 +108,6 @@ static int nd_region_remove(struct device *dev)
*/
sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL;
-
- return 0;
}
static int child_notify(struct device *dev, void *data)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..e68a8c4ac5a6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -280,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
static void nvme_retry_req(struct request *req)
{
- struct nvme_ns *ns = req->q->queuedata;
unsigned long delay = 0;
u16 crd;
/* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
- if (ns && crd)
- delay = ns->ctrl->crdt[crd - 1] * 100;
+ if (crd)
+ delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
nvme_req(req)->retries++;
blk_mq_requeue_request(req, false);
@@ -331,7 +329,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -357,6 +355,21 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+/*
+ * Called to unwind from ->queue_rq on a failed command submission so that the
+ * multipathing code gets called to potentially failover to another path.
+ * The caller needs to unwind all transport specific resource allocations and
+ * must return propagate the return value.
+ */
+blk_status_t nvme_host_path_error(struct request *req)
+{
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+ return BLK_STS_OK;
+}
+EXPORT_SYMBOL_GPL(nvme_host_path_error);
+
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
@@ -372,6 +385,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->tagset) {
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
+
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->admin_tagset) {
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -578,7 +611,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +622,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
@@ -844,11 +876,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- struct nvme_ns *ns = req->rq_disk->private_data;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
struct page *page = req->special_vec.bv_page;
- if (page == ns->ctrl->discard_page)
- clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ if (page == ctrl->discard_page)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(page_address(page) + req->special_vec.bv_offset);
}
@@ -927,7 +959,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
rq->cmd_flags |= REQ_HIPRI;
rq->end_io_data = &wait;
- blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
+ blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
while (!completion_done(&wait)) {
blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
@@ -966,7 +998,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
if (poll)
nvme_execute_rq_polled(req->q, NULL, req, at_head);
else
- blk_execute_rq(req->q, NULL, req, at_head);
+ blk_execute_rq(NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -1103,7 +1135,7 @@ void nvme_execute_passthru_rq(struct request *rq)
u32 effects;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- blk_execute_rq(rq->q, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
nvme_passthru_end(ctrl, effects);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1115,7 +1147,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
@@ -1135,8 +1167,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (ret)
goto out;
bio = req->bio;
- bio->bi_disk = disk;
- if (disk && meta_buffer && meta_len) {
+ if (bdev)
+ bio_set_dev(bio, bdev);
+ if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
@@ -1204,7 +1237,7 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
- blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
return 0;
}
@@ -1545,8 +1578,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
@@ -2114,9 +2160,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
- if ((id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags))
- set_disk_ro(disk, true);
+ set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags));
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2165,17 +2210,18 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
+ ret = nvme_configure_metadata(ns, id);
+ if (ret)
+ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
if (ret)
goto out_unfreeze;
}
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->disk, ns, id);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
@@ -2819,7 +2865,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
@@ -2849,7 +2895,7 @@ static struct attribute *nvme_subsys_attrs[] = {
NULL,
};
-static struct attribute_group nvme_subsys_attrs_group = {
+static const struct attribute_group nvme_subsys_attrs_group = {
.attrs = nvme_subsys_attrs,
};
@@ -2858,6 +2904,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2877,7 +2928,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -3146,7 +3197,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -3186,7 +3237,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
- if (!ctrl->identified) {
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
@@ -3507,7 +3558,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
@@ -3541,7 +3592,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
@@ -3551,7 +3602,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
@@ -3561,7 +3612,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
@@ -3679,7 +3730,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-static struct attribute_group nvme_dev_attrs_group = {
+static const struct attribute_group nvme_dev_attrs_group = {
.attrs = nvme_dev_attrs,
.is_visible = nvme_dev_attrs_are_visible,
};
@@ -3813,7 +3864,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;
@@ -4422,6 +4473,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
@@ -4434,7 +4486,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
struct nvme_effects_log *cel;
unsigned long i;
- xa_for_each (&ctrl->cels, i, cel) {
+ xa_for_each(&ctrl->cels, i, cel) {
xa_erase(&ctrl->cels, i);
kfree(cel);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 72ac00173500..5dfd806fc2d2 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
-
- nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
- blk_mq_start_request(rq);
- nvme_complete_rq(rq);
- return BLK_STS_OK;
+ return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 38373a0e86ef..20dadd86e981 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
@@ -3776,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = {
NULL
};
-static struct attribute_group nvme_fc_attr_group = {
+static const struct attribute_group nvme_fc_attr_group = {
.attrs = nvme_fc_attrs,
};
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 552dbc04567b..8f9e96986780 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
- struct device *dev = ctrl->dev;
+ struct device *dev = ctrl->device;
struct nvme_hwmon_data *data;
struct device *hwmon;
int err;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return 0;
@@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data);
if (err) {
- dev_warn(ctrl->device,
- "Failed to read smart log (error %d)\n", err);
- devm_kfree(dev, data);
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ kfree(data);
return err;
}
- hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
- &nvme_hwmon_chip_info,
- NULL);
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+ data, &nvme_hwmon_chip_info,
+ NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
- devm_kfree(dev, data);
+ kfree(data);
}
-
+ ctrl->hwmon_device = hwmon;
return 0;
}
+
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->hwmon_device) {
+ struct nvme_hwmon_data *data =
+ dev_get_drvdata(ctrl->hwmon_device);
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
+ kfree(data);
+ }
+}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 470cef3abec3..b705988629f2 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -695,7 +695,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
rq->end_io_data = rqd;
- blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
+ blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io);
return 0;
@@ -757,7 +757,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write((struct nvme_command *)vcmd);
struct nvm_dev *dev = ns->ndev;
- struct gendisk *disk = ns->disk;
struct request *rq;
struct bio *bio = NULL;
__le64 *ppa_list = NULL;
@@ -817,10 +816,10 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
}
- bio->bi_disk = disk;
+ bio_set_dev(bio, ns->disk->part0);
}
- blk_execute_rq(q, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 9ac762b28811..a1d476e1ac02 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
}
for (ns = nvme_next_ns(head, old);
- ns != old;
+ ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;
@@ -296,7 +296,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
- struct nvme_ns_head *head = bio->bi_disk->private_data;
+ struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -312,7 +312,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (likely(ns)) {
- bio->bi_disk = ns->disk;
+ bio_set_dev(bio, ns->disk->part0);
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
@@ -352,7 +352,7 @@ static void nvme_requeue_work(struct work_struct *work)
* Reset disk to the mpath node and resubmit to select a new
* path.
*/
- bio->bi_disk = head->disk;
+ bio_set_dev(bio, head->disk->part0);
submit_bio_noacct(bio);
}
}
@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
+ ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
+#endif
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e49f61f81df..07b34175c6ce 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,12 @@ enum nvme_quirks {
* NVMe 1.3 compliance.
*/
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+
+ /*
+ * The controller does not properly handle DMA addresses over
+ * 48 bits.
+ */
+ NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
};
/*
@@ -246,6 +252,9 @@ struct nvme_ctrl {
struct rw_semaphore namespaces_rwsem;
struct device ctrl_device;
struct device *device; /* char device */
+#ifdef CONFIG_NVME_HWMON
+ struct device *hwmon_device;
+#endif
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
@@ -575,7 +584,10 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}
void nvme_complete_rq(struct request *req);
+blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
@@ -610,8 +622,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -630,7 +640,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -675,8 +684,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -731,8 +739,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
@@ -814,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
#ifdef CONFIG_NVME_HWMON
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
+
+static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+}
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b4385cb0ff60..38b0d694dfc9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -23,6 +23,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
}
- WARN_ON_ONCE(!iod->nents);
+}
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+ for (i = 0; i < iod->npages; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req));
+ else
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
}
+ WARN_ON_ONCE(!iod->nents);
+
+ nvme_unmap_sg(dev, req);
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
+ goto free_prps;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
"Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
- goto out;
+ goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
- goto out;
+ goto out_free_sg;
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ nvme_unmap_sg(dev, req);
+out_free_sg:
+ mempool_free(iod->sg, dev->iod_mempool);
return ret;
}
@@ -967,6 +997,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +1006,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -1326,7 +1357,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
abort_req->end_io_data = NULL;
- blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
+ blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
/*
* The aborted req will be completed on receiving the abort req.
@@ -1794,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1808,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -2237,7 +2281,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- blk_execute_rq_nowait(q, NULL, req, false,
+ blk_execute_rq_nowait(NULL, req, false,
opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
@@ -2318,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_address_bits = 48;
+ if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
goto disable;
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
@@ -2585,6 +2632,7 @@ static void nvme_reset_work(struct work_struct *work)
* Don't limit the IOMMU merged segment size.
*/
dma_set_max_seg_size(dev->dev, 0xffffffff);
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock);
@@ -3196,7 +3244,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
@@ -3212,6 +3263,22 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cf6c49d09c82..53ac4d7442ba 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
struct completion cm_done;
bool pi_support;
int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
if (idx && ctrl->ctrl.max_integrity_segments)
queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
@@ -912,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = nvme_init_identify(&ctrl->ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -994,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
+ nvme_cancel_tagset(&ctrl->ctrl);
if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
@@ -1012,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- if (ctrl->ctrl.admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
- }
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
@@ -1030,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
- if (ctrl->ctrl.tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
- }
+ nvme_cancel_tagset(&ctrl->ctrl);
if (remove)
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
@@ -1137,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->ctrl.queue_count > 1)
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, new);
+ }
destroy_admin:
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, new);
return ret;
}
@@ -1461,7 +1474,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
req->mr->sig_attrs, ns->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
@@ -2085,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err_unmap:
nvme_rdma_unmap_data(queue, rq);
err:
- if (err == -ENOMEM || err == -EAGAIN)
+ if (err == -EIO)
+ ret = nvme_host_path_error(rq);
+ else if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1ba659927442..69f59d2c5799 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
+ struct mutex queue_lock;
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
@@ -201,15 +202,10 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
- return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
-static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
-{
- return req->iter.iov_offset;
-}
-
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
@@ -228,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
struct request *rq = blk_mq_rq_from_pdu(req);
struct bio_vec *vec;
unsigned int size;
- int nsegs;
+ int nr_bvec;
size_t offset;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
vec = &rq->special_vec;
- nsegs = 1;
+ nr_bvec = 1;
size = blk_rq_payload_bytes(rq);
offset = 0;
} else {
struct bio *bio = req->curr_bio;
+ struct bvec_iter bi;
+ struct bio_vec bv;
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- nsegs = bio_segments(bio);
+ nr_bvec = 0;
+ bio_for_each_bvec(bv, bio, bi) {
+ nr_bvec++;
+ }
size = bio->bi_iter.bi_size;
offset = bio->bi_iter.bi_bvec_done;
}
- iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
+ iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
req->iter.iov_offset = offset;
}
@@ -262,6 +263,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -276,10 +287,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* directly, otherwise queue io_work. Also, only do that if we
* are on the same cpu, so we don't introduce contention.
*/
- if (queue->io_cpu == smp_processor_id() &&
+ if (queue->io_cpu == __smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
@@ -972,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- nvme_tcp_init_iter(req, WRITE);
} else {
nvme_tcp_done_send_req(queue);
}
@@ -1005,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- if (!req->data_sent)
- nvme_tcp_init_iter(req, WRITE);
return 1;
}
req->offset += ret;
@@ -1209,6 +1217,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1370,6 +1379,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
@@ -1388,7 +1398,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
@@ -1497,6 +1507,8 @@ err_crypto:
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1524,9 +1536,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
@@ -1799,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
out_wait_freeze_timed_out:
nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
+ nvme_cancel_tagset(ctrl);
if (new)
blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set:
@@ -1862,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
error = nvme_init_identify(ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->admin_q);
@@ -1888,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->admin_q);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
- if (ctrl->admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->admin_tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
- }
+ nvme_cancel_admin_tagset(ctrl);
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
@@ -1908,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
- if (ctrl->tagset) {
- blk_mq_tagset_busy_iter(ctrl->tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->tagset);
- }
+ nvme_cancel_tagset(ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
@@ -1987,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->queue_count > 1)
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new);
+ }
destroy_admin:
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, new);
return ret;
}
@@ -2252,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
+ if (req->curr_bio && req->data_len)
+ nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
req->pdu_len = req->data_len;
- else if (req->curr_bio)
- nvme_tcp_init_iter(req, READ);
pdu->hdr.type = nvme_tcp_cmd;
pdu->hdr.flags = 0;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 5c3cb6928f3c..6543015b6121 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
return ret;
}
+static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 lbaf = cdw10[0] & 0xF;
+ u8 mset = (cdw10[0] >> 4) & 0x1;
+ u8 pi = (cdw10[0] >> 5) & 0x7;
+ u8 pil = cdw10[1] & 0x1;
+ u8 ses = (cdw10[1] >> 1) & 0x7;
+
+ trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
+ lbaf, mset, pi, pil, ses);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(cdw10 + 8);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ u8 pr = cdw10[14];
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
+ slba, numd, zra, zrasf, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status:
return nvme_trace_get_lba_status(p, cdw10);
+ case nvme_admin_format_nvm:
+ return nvme_trace_admin_format_nvm(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
@@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvme_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvme_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvme_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvme_trace_zone_mgmt_recv(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 1dfe9a3500e3..c7e3ec561ba0 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -9,13 +9,7 @@
int nvme_revalidate_zones(struct nvme_ns *ns)
{
- struct request_queue *q = ns->queue;
- int ret;
-
- ret = blk_revalidate_disk_zones(ns->disk, NULL);
- if (!ret)
- blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
- return ret;
+ return blk_revalidate_disk_zones(ns->disk, NULL);
}
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
@@ -109,10 +103,11 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_set_zoned(ns->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 8d90235e4fcc..bc6a774f2124 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -74,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
- if (!ns) {
- pr_err("Could not find namespace id : %d\n",
- le32_to_cpu(req->cmd->get_log_page.nsid));
- req->error_loc = offsetof(struct nvme_rw_command, nsid);
- return NVME_SC_INVALID_NS;
- }
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
/* we don't have the right data for file backed ns */
- if (!ns->bdev)
- goto out;
+ if (!req->ns->bdev)
+ return NVME_SC_SUCCESS;
- host_reads = part_stat_read(ns->bdev, ios[READ]);
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
data_units_read =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
data_units_written =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
-out:
- nvmet_put_namespace(ns);
return NVME_SC_SUCCESS;
}
@@ -468,10 +462,8 @@ out:
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = req->sq->ctrl;
- struct nvmet_ns *ns;
struct nvme_id_ns *id;
- u16 status = 0;
+ u16 status;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -486,18 +478,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns)
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = 0;
goto done;
+ }
- nvmet_ns_revalidate(ns);
+ nvmet_ns_revalidate(req->ns);
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
- switch (req->port->ana_state[ns->anagrpid]) {
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
case NVME_ANA_INACCESSIBLE:
case NVME_ANA_PERSISTENT_LOSS:
break;
@@ -506,8 +501,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
break;
}
- if (ns->bdev)
- nvmet_bdev_set_limits(ns->bdev, id);
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
@@ -521,27 +516,28 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
- id->anagrpid = cpu_to_le32(ns->anagrpid);
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
- id->lbaf[0].ds = ns->blksize_shift;
+ id->lbaf[0].ds = req->ns->blksize_shift;
- if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
NVME_NS_DPC_PI_TYPE3;
id->mc = NVME_MC_EXTENDED_LBA;
- id->dps = ns->pi_type;
+ id->dps = req->ns->pi_type;
id->flbas = NVME_NS_FLBAS_META_EXT;
- id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
}
- if (ns->readonly)
+ if (req->ns->readonly)
id->nsattr |= (1 << 0);
- nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
@@ -603,37 +599,32 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
- u16 status = 0;
off_t off = 0;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns) {
- req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = nvmet_req_find_ns(req);
+ if (status)
goto out;
- }
- if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+ if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
NVME_NIDT_UUID_LEN,
- &ns->uuid, &off);
+ &req->ns->uuid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+ if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
- &ns->nguid, &off);
+ &req->ns->nguid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
-out_put_ns:
- nvmet_put_namespace(ns);
+
out:
nvmet_req_complete(req, status);
}
@@ -692,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u16 status;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
+ status = nvmet_req_find_ns(req);
+ if (status)
return status;
- }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -753,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
void nvmet_execute_set_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
@@ -797,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ result = nvmet_req_find_ns(req);
+ if (result)
+ return result;
+
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -828,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
void nvmet_execute_get_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
@@ -935,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req, cmd);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index c61ffd767062..635a7cb45d0b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
{
if (p->enabled)
pr_err("Disable port '%u' before changing attribute in %s\n",
- le16_to_cpu(p->disc_addr.portid), caller);
+ le16_to_cpu(p->disc_addr.portid), caller);
return p->enabled;
}
@@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
if (strtobool(page, &val))
return -EINVAL;
- if (port->enabled) {
- pr_err("Disable port before setting pi_enable value.\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
port->pi_enable = val;
return count;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8ce4d59cc9e7..67bbf0e3b507 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return status;
}
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
+{
+ pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
+ req->sq->qid);
+
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
@@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
cancel_delayed_work_sync(&ctrl->ka_work);
}
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+u16 nvmet_req_find_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
- ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
- if (ns)
- percpu_ref_get(&ns->ref);
+ req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
- return ns;
+ percpu_ref_get(&req->ns->ref);
+ return NVME_SC_SUCCESS;
}
static void nvmet_destroy_namespace(struct percpu_ref *ref)
@@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (nvmet_req_passthru_ctrl(req))
return nvmet_parse_passthru_io_cmd(req);
- req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ ret = nvmet_req_find_ns(req);
+ if (unlikely(ret))
+ return ret;
+
ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
- else
- return nvmet_bdev_parse_io_cmd(req);
+
+ return nvmet_bdev_parse_io_cmd(req);
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..d375745fc4ed 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
@@ -164,9 +165,10 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
+ struct rcu_head rcu;
};
@@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
{
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int ret;
if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- spin_lock_irqsave(&assoc->tgtport->lock, flags);
- assoc->queues[qid] = queue;
- spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+ rcu_assign_pointer(assoc->queues[qid], queue);
return queue;
@@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
{
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- unsigned long flags;
- spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
- queue->assoc->queues[queue->qid] = NULL;
- spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+ rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
@@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
destroy_workqueue(queue->work_q);
- kfree(queue);
+ kfree_rcu(queue, rcu);
}
static void
@@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
- unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = assoc->queues[qid];
+ queue = rcu_dereference(assoc->queues[qid]);
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return queue;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return NULL;
}
@@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
if (!needrandom) {
assoc->association_id = ran;
- list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del(&assoc->a_list);
+ list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree(assoc);
+ kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
- spin_lock_irqsave(&tgtport->lock, flags);
+
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- queue = assoc->queues[i];
- if (queue) {
- if (!nvmet_fc_tgt_q_get(queue))
- continue;
- spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
- spin_lock_irqsave(&tgtport->lock, flags);
+ rcu_read_lock();
+ queue = rcu_dereference(assoc->queues[i]);
+ if (!queue) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ if (!nvmet_fc_tgt_q_get(queue)) {
+ rcu_read_unlock();
+ continue;
}
+ rcu_read_unlock();
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_assoc *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return ret;
}
@@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
- struct nvmet_fc_tgt_assoc *assoc, *next;
- unsigned long flags;
+ struct nvmet_fc_tgt_assoc *assoc;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry_safe(assoc, next,
- &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
}
/**
@@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
- queue = assoc->queues[0];
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ queue = rcu_dereference(assoc->queues[0]);
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
nvmet_fc_tgtport_put(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 733d9363900e..54606f1872b4 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int opcode, starting, amount;
+ unsigned int opcode;
+ int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@@ -1544,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = {
NULL
};
-static struct attribute_group fclopp_dev_attrs_group = {
+static const struct attribute_group fclopp_dev_attrs_group = {
.attrs = fcloop_dev_attrs,
};
@@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 125dde3f410e..9a8b3726a37c 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -185,7 +185,7 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
}
bip = bio_integrity_alloc(bio, GFP_NOIO,
- min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
+ bio_max_segs(req->metadata_sg_cnt));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
@@ -225,7 +225,7 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
- int sg_cnt = req->sg_cnt;
+ unsigned int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
struct blk_plug plug;
@@ -256,14 +256,13 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (is_pci_p2pdma_page(sg_page(req->sg)))
op |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
}
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
@@ -290,7 +289,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
}
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = op;
@@ -333,7 +332,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
+ if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
@@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
@@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -451,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 0abbefd9925e..715d4376c997 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_file_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd for file ns %d on qid %d\n",
- cmd->common.opcode, req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 592763732065..cdfa537b1c0a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+u16 nvmet_req_find_ns(struct nvmet_req *req);
void nvmet_put_namespace(struct nvmet_ns *ns);
int nvmet_ns_enable(struct nvmet_ns *ns);
void nvmet_ns_disable(struct nvmet_ns *ns);
@@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
sizeof(struct nvme_dsm_range);
}
+static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
+{
+ return req->sq->ctrl->subsys;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
static inline struct nvme_ctrl *
nvmet_req_passthru_ctrl(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
+ return nvmet_passthru_ctrl(nvmet_req_subsys(req));
}
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
@@ -603,4 +609,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
}
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b9776fc8f08f..26c587ccd152 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ctrl *id;
- int max_hw_sectors;
+ unsigned int max_hw_sectors;
int page_shift;
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -198,7 +198,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
- bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
bio->bi_end_io = bio_put;
}
bio->bi_opf = req_op(rq);
@@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
q = ns->queue;
- timeout = req->sq->ctrl->subsys->io_timeout;
+ timeout = nvmet_req_subsys(req)->io_timeout;
} else {
- timeout = req->sq->ctrl->subsys->admin_timeout;
+ timeout = nvmet_req_subsys(req)->admin_timeout;
}
rq = nvme_alloc_request(q, req->cmd, 0);
@@ -275,7 +275,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
schedule_work(&req->p.work);
} else {
rq->end_io_data = req;
- blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
+ blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
nvmet_passthru_req_done);
}
@@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return nvmet_setup_passthru_command(req);
default:
/* Reject commands not in the allowlist above */
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5c1e7cb7fe0d..06b6b742bb21 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- if (port->nport->pi_enable &&
- !(cm_id->device->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER)) {
- pr_err("T10-PI is not supported for %pISpcs\n", addr);
- ret = -EINVAL;
- goto out_destroy_id;
- }
-
port->cm_id = cm_id;
return 0;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc1f0f647189..8b0485ada315 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
@@ -378,7 +379,7 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -386,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash);
}
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist sg;
+ struct kvec *iov;
+ int i;
+
+ crypto_ahash_init(hash);
+ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+ sg_init_one(&sg, iov->iov_base, iov->iov_len);
+ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+ crypto_ahash_update(hash);
+ }
+ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+ crypto_ahash_final(hash);
+}
+
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -410,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -1059,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1080,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
cmd->req.execute(&cmd->req);
}
@@ -1467,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (inet->rcv_tos > 0)
ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1525,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index c14e3249a14d..6109b3806b12 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- if (req->ns)
- strncpy(name, req->ns->device_path, DISK_NAME_LEN);
- else
+ if (!req->ns) {
memset(name, 0, DISK_NAME_LEN);
+ return;
+ }
+
+ strncpy(name, req->ns->device_path,
+ min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
}
#endif
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 954d3b4a52ab..75d2594c16e1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -270,4 +270,12 @@ config SPRD_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sprd-efuse.
+config NVMEM_RMEM
+ tristate "Reserved Memory Based Driver Support"
+ help
+ This driver maps reserved memory into an nvmem device. It might be
+ useful to expose information left by firmware in memory.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-rmem.
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index a7c377218341..5376b8e0dae5 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -55,3 +55,5 @@ obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
nvmem_sprd_efuse-y := sprd-efuse.o
+obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
+nvmem-rmem-y := rmem.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 177f5bf27c6d..a5ab1e0c74cf 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -682,7 +682,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
for_each_child_of_node(parent, child) {
addr = of_get_property(child, "reg", &len);
- if (!addr || (len < 2 * sizeof(u32))) {
+ if (!addr)
+ continue;
+ if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
return -EINVAL;
}
@@ -713,6 +715,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
cell->name, nvmem->stride);
/* Cells already added will be freed later. */
kfree_const(cell->name);
+ of_node_put(cell->np);
kfree(cell);
return -EINVAL;
}
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 701704b87dc9..c86339a7f583 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -96,7 +96,6 @@ MODULE_DEVICE_TABLE(of, imx_iim_dt_ids);
static int imx_iim_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct iim_priv *iim;
struct nvmem_device *nvmem;
@@ -111,11 +110,7 @@ static int imx_iim_probe(struct platform_device *pdev)
if (IS_ERR(iim->base))
return PTR_ERR(iim->base);
- of_id = of_match_device(imx_iim_dt_ids, dev);
- if (!of_id)
- return -ENODEV;
-
- drvdata = of_id->data;
+ drvdata = of_device_get_match_data(&pdev->dev);
iim->clk = devm_clk_get(dev, NULL);
if (IS_ERR(iim->clk))
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index a72704cd0468..f6e9f96933ca 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
@@ -18,7 +18,6 @@
#define SDAM_PBS_TRIG_CLR 0xE6
struct sdam_chip {
- struct platform_device *pdev;
struct regmap *regmap;
struct nvmem_config sdam_config;
unsigned int base;
@@ -65,7 +64,7 @@ static int sdam_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct sdam_chip *sdam = priv;
- struct device *dev = &sdam->pdev->dev;
+ struct device *dev = sdam->sdam_config.dev;
int rc;
if (!sdam_is_valid(sdam, offset, bytes)) {
@@ -86,7 +85,7 @@ static int sdam_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct sdam_chip *sdam = priv;
- struct device *dev = &sdam->pdev->dev;
+ struct device *dev = sdam->sdam_config.dev;
int rc;
if (!sdam_is_valid(sdam, offset, bytes)) {
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
new file mode 100644
index 000000000000..b11c3c974b3d
--- /dev/null
+++ b/drivers/nvmem/rmem.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+
+struct rmem {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ struct reserved_mem *mem;
+
+ phys_addr_t size;
+};
+
+static int rmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rmem *priv = context;
+ size_t available = priv->mem->size;
+ loff_t off = offset;
+ void *addr;
+ int count;
+
+ /*
+ * Only map the reserved memory at this point to avoid potential rogue
+ * kernel threads inadvertently modifying it. Based on the current
+ * uses-cases for this driver, the performance hit isn't a concern.
+ * Nor is likely to be, given the nature of the subsystem. Most nvmem
+ * devices operate over slow buses to begin with.
+ *
+ * An alternative would be setting the memory as RO, set_memory_ro(),
+ * but as of Dec 2020 this isn't possible on arm64.
+ */
+ addr = memremap(priv->mem->base, available, MEMREMAP_WB);
+ if (IS_ERR(addr)) {
+ dev_err(priv->dev, "Failed to remap memory region\n");
+ return PTR_ERR(addr);
+ }
+
+ count = memory_read_from_buffer(val, bytes, &off, addr, available);
+
+ memunmap(addr);
+
+ return count;
+}
+
+static int rmem_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = { };
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *mem;
+ struct rmem *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ mem = of_reserved_mem_lookup(dev->of_node);
+ if (!mem) {
+ dev_err(dev, "Failed to lookup reserved memory\n");
+ return -EINVAL;
+ }
+ priv->mem = mem;
+
+ config.dev = dev;
+ config.priv = priv;
+ config.name = "rmem";
+ config.size = mem->size;
+ config.reg_read = rmem_read;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id rmem_match[] = {
+ { .compatible = "nvmem-rmem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rmem_match);
+
+static struct platform_driver rmem_driver = {
+ .probe = rmem_probe,
+ .driver = {
+ .name = "rmem",
+ .of_match_table = rmem_match,
+ },
+};
+module_platform_driver(rmem_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
+MODULE_DESCRIPTION("Reserved Memory Based nvmem Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 161a23631472..8a348f0d3c5e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1297,8 +1297,8 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it)
if (it->cells_name) {
if (!it->node) {
- pr_err("%pOF: could not find phandle\n",
- it->parent);
+ pr_err("%pOF: could not find phandle %d\n",
+ it->parent, it->phandle);
goto err;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index aedfaaafd3e7..6cb86de404f1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -33,27 +33,6 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches,
}
EXPORT_SYMBOL(of_match_device);
-struct platform_device *of_dev_get(struct platform_device *dev)
-{
- struct device *tmp;
-
- if (!dev)
- return NULL;
- tmp = get_device(&dev->dev);
- if (tmp)
- return to_platform_device(tmp);
- else
- return NULL;
-}
-EXPORT_SYMBOL(of_dev_get);
-
-void of_dev_put(struct platform_device *dev)
-{
- if (dev)
- put_device(&dev->dev);
-}
-EXPORT_SYMBOL(of_dev_put);
-
int of_device_add(struct platform_device *ofdev)
{
BUG_ON(ofdev->dev.of_node == NULL);
@@ -162,9 +141,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus limit if we found valid dma-ranges earlier */
- if (!ret)
+ /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
+ if (!ret) {
dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
@@ -172,6 +153,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ /* Don't touch range map if it wasn't set from a valid dma-ranges */
+ if (!ret)
+ dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
}
@@ -181,7 +165,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
- dev->dma_range_map = map;
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index feb0f2d67fc5..dcc1dd96911a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1146,8 +1146,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
- if (nomap)
- return memblock_remove(base, size);
+ if (nomap) {
+ /*
+ * If the memory is already reserved (by another region), we
+ * should not allow it to be marked nomap.
+ */
+ if (memblock_is_region_reserved(base, size))
+ return -EBUSY;
+
+ return memblock_mark_nomap(base, size);
+ }
return memblock_reserve(base, size);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 79bd5f5a1bf1..0da86209ddaa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -511,6 +511,7 @@ static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "qcom,rmtfs-mem" },
{ .compatible = "qcom,cmd-db" },
{ .compatible = "ramoops" },
+ { .compatible = "nvmem-rmem" },
{}
};
@@ -687,7 +688,7 @@ static int of_platform_notify(struct notifier_block *nb,
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
pdev_parent ? &pdev_parent->dev : NULL);
- of_dev_put(pdev_parent);
+ platform_device_put(pdev_parent);
if (pdev == NULL) {
pr_err("%s: failed to create for '%pOF'\n",
@@ -712,7 +713,7 @@ static int of_platform_notify(struct notifier_block *nb,
of_platform_device_destroy(&pdev->dev, &children_left);
/* and put the reference of the find */
- of_dev_put(pdev);
+ platform_device_put(pdev);
break;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 5f9eed79a8aa..5036a362f52e 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_irq.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
@@ -1102,7 +1103,9 @@ static int of_link_to_phandle(struct device_node *con_np,
* created for them.
*/
sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
- if (!sup_dev && of_node_check_flag(sup_np, OF_POPULATED)) {
+ if (!sup_dev &&
+ (of_node_check_flag(sup_np, OF_POPULATED) ||
+ sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) {
pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
con_np, sup_np);
of_node_put(sup_np);
@@ -1232,6 +1235,7 @@ static struct device_node *parse_##fname(struct device_node *np, \
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
+ bool optional;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
@@ -1244,8 +1248,6 @@ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
-DEFINE_SIMPLE_PROP(interrupts_extended, "interrupts-extended",
- "#interrupt-cells")
DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", NULL)
DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells")
DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL)
@@ -1271,19 +1273,55 @@ static struct device_node *parse_iommu_maps(struct device_node *np,
return of_parse_phandle(np, prop_name, (index * 4) + 1);
}
+static struct device_node *parse_gpio_compat(struct device_node *np,
+ const char *prop_name, int index)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, "gpio") && strcmp(prop_name, "gpios"))
+ return NULL;
+
+ /*
+ * Ignore node with gpio-hog property since its gpios are all provided
+ * by its parent.
+ */
+ if (of_find_property(np, "gpio-hog", NULL))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+static struct device_node *parse_interrupts(struct device_node *np,
+ const char *prop_name, int index)
+{
+ struct of_phandle_args sup_args;
+
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || IS_ENABLED(CONFIG_PPC))
+ return NULL;
+
+ if (strcmp(prop_name, "interrupts") &&
+ strcmp(prop_name, "interrupts-extended"))
+ return NULL;
+
+ return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
+}
+
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
- { .parse_prop = parse_iommus, },
- { .parse_prop = parse_iommu_maps, },
+ { .parse_prop = parse_iommus, .optional = true, },
+ { .parse_prop = parse_iommu_maps, .optional = true, },
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_interrupt_parent, },
- { .parse_prop = parse_dmas, },
+ { .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
{ .parse_prop = parse_extcon, },
- { .parse_prop = parse_interrupts_extended, },
{ .parse_prop = parse_nvmem_cells, },
{ .parse_prop = parse_phys, },
{ .parse_prop = parse_wakeup_parent, },
@@ -1296,6 +1334,8 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
+ { .parse_prop = parse_gpio_compat, },
+ { .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
@@ -1332,6 +1372,11 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
/* Do not stop at first failed link, link all available suppliers. */
while (!matched && s->parse_prop) {
+ if (s->optional && !fw_devlink_is_strict()) {
+ s++;
+ continue;
+ }
+
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index eb51bc147440..eb100627c186 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1286,7 +1286,7 @@ static void __init of_unittest_platform_populate(void)
unittest(pdev,
"Could not create device for node '%pOFn'\n",
grandchild);
- of_dev_put(pdev);
+ platform_device_put(pdev);
}
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 8c905aabacc0..c2689386a906 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -27,6 +27,10 @@
* various states of availability.
*/
LIST_HEAD(opp_tables);
+
+/* OPP tables with uninitialized required OPPs */
+LIST_HEAD(lazy_opp_tables);
+
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
@@ -146,6 +150,32 @@ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
/**
+ * dev_pm_opp_get_required_pstate() - Gets the required performance state
+ * corresponding to an available opp
+ * @opp: opp for which performance state has to be returned for
+ * @index: index of the required opp
+ *
+ * Return: performance state read from device tree corresponding to the
+ * required opp, else return 0.
+ */
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available ||
+ index >= opp->opp_table->required_opp_count) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp->opp_table))
+ return 0;
+
+ return opp->required_opps[index]->pstate;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
@@ -449,6 +479,55 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+/**
+ * dev_pm_opp_find_level_ceil() - search for an rounded up level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for rounded up match in the opp table and returns pointer
+ * to the matching opp if found, else returns ERR_PTR in case of error and
+ * should be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->level >= *level) {
+ opp = temp_opp;
+ *level = opp->level;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -655,6 +734,10 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
{
int ret;
+ /* We may reach here for devices which don't change frequency */
+ if (IS_ERR(clk))
+ return 0;
+
ret = clk_set_rate(clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
@@ -666,12 +749,12 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
static int _generic_set_opp_regulator(struct opp_table *opp_table,
struct device *dev,
- unsigned long old_freq,
+ struct dev_pm_opp *opp,
unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ int scaling_down)
{
struct regulator *reg = opp_table->regulators[0];
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int ret;
/* This function only supports single regulator per device */
@@ -681,8 +764,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
}
/* Scaling up? Scale voltage before frequency */
- if (freq >= old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (!scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_voltage;
}
@@ -693,8 +776,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
goto restore_voltage;
/* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_freq;
}
@@ -712,19 +795,18 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
+ __func__, old_opp->rate);
restore_voltage:
/* This shouldn't harm even if the voltages weren't updated earlier */
- if (old_supply)
- _set_opp_voltage(dev, reg, old_supply);
+ _set_opp_voltage(dev, reg, old_opp->supplies);
return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
- struct dev_pm_opp *opp, struct device *dev, bool remove)
+ struct dev_pm_opp *opp, struct device *dev)
{
u32 avg, peak;
int i, ret;
@@ -733,7 +815,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
for (i = 0; i < opp_table->path_count; i++) {
- if (remove) {
+ if (!opp) {
avg = 0;
peak = 0;
} else {
@@ -743,7 +825,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
ret = icc_set_bw(opp_table->paths[i], avg, peak);
if (ret) {
dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
- remove ? "remove" : "set", i, ret);
+ opp ? "set" : "remove", i, ret);
return ret;
}
}
@@ -752,29 +834,31 @@ static int _set_opp_bw(const struct opp_table *opp_table,
}
static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, unsigned long old_freq,
- unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ struct device *dev, struct dev_pm_opp *opp,
+ unsigned long freq)
{
- struct dev_pm_set_opp_data *data;
+ struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int size;
- data = opp_table->set_opp_data;
+ /*
+ * We support this only if dev_pm_opp_set_regulators() was called
+ * earlier.
+ */
+ if (opp_table->sod_supplies) {
+ size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
+ memcpy(data->new_opp.supplies, opp->supplies, size);
+ data->regulator_count = opp_table->regulator_count;
+ } else {
+ data->regulator_count = 0;
+ }
+
data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
data->clk = opp_table->clk;
data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*old_supply) * opp_table->regulator_count;
- if (!old_supply)
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_supply, size);
-
+ data->old_opp.rate = old_opp->rate;
data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, new_supply, size);
return opp_table->set_opp(data);
}
@@ -809,6 +893,10 @@ static int _set_required_opps(struct device *dev,
if (!required_opp_tables)
return 0;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return -EBUSY;
+
/* Single genpd case */
if (!genpd_virt_devs)
return _set_required_opp(dev, dev, opp, 0);
@@ -841,38 +929,32 @@ static int _set_required_opps(struct device *dev,
return ret;
}
-/**
- * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp
- * @dev: device for which we do this operation
- * @opp: opp based on which the bandwidth levels are to be configured
- *
- * This configures the bandwidth to the levels specified by the OPP. However
- * if the OPP specified is NULL the bandwidth levels are cleared out.
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
- struct opp_table *opp_table;
- int ret;
+ struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
+ unsigned long freq;
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp table doesn't exist\n", __func__);
- return PTR_ERR(opp_table);
+ if (!IS_ERR(opp_table->clk)) {
+ freq = clk_get_rate(opp_table->clk);
+ opp = _find_freq_ceil(opp_table, &freq);
}
- if (opp)
- ret = _set_opp_bw(opp_table, opp, dev, false);
- else
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ /*
+ * Unable to find the current OPP ? Pick the first from the list since
+ * it is in ascending order, otherwise rest of the code will need to
+ * make special checks to validate current_opp.
+ */
+ if (IS_ERR(opp)) {
+ mutex_lock(&opp_table->lock);
+ opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ }
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ opp_table->current_opp = opp;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
-static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
+static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
{
int ret;
@@ -887,7 +969,7 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
if (!_get_opp_count(opp_table))
return 0;
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ ret = _set_opp_bw(opp_table, NULL, dev);
if (ret)
return ret;
@@ -900,6 +982,91 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
return ret;
}
+static int _set_opp(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, unsigned long freq)
+{
+ struct dev_pm_opp *old_opp;
+ int scaling_down, ret;
+
+ if (unlikely(!opp))
+ return _disable_opp_table(dev, opp_table);
+
+ /* Find the currently set OPP if we don't know already */
+ if (unlikely(!opp_table->current_opp))
+ _find_current_opp(dev, opp_table);
+
+ old_opp = opp_table->current_opp;
+
+ /* Return early if nothing to do */
+ if (old_opp == opp && opp_table->current_rate == freq &&
+ opp_table->enabled) {
+ dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
+ __func__, opp_table->current_rate, freq, old_opp->level,
+ opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
+ opp->bandwidth ? opp->bandwidth[0].peak : 0);
+
+ scaling_down = _opp_compare_key(old_opp, opp);
+ if (scaling_down == -1)
+ scaling_down = 0;
+
+ /* Scaling up? Configure required OPPs before frequency */
+ if (!scaling_down) {
+ ret = _set_required_opps(dev, opp_table, opp, true);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (opp_table->set_opp) {
+ ret = _set_opp_custom(opp_table, dev, opp, freq);
+ } else if (opp_table->regulators) {
+ ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
+ scaling_down);
+ } else {
+ /* Only frequency scaling */
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Scaling down? Configure required OPPs after frequency */
+ if (scaling_down) {
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_required_opps(dev, opp_table, opp, false);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+ }
+
+ opp_table->enabled = true;
+ dev_pm_opp_put(old_opp);
+
+ /* Make sure current_opp doesn't get freed */
+ dev_pm_opp_get(opp);
+ opp_table->current_opp = opp;
+ opp_table->current_rate = freq;
+
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
@@ -914,118 +1081,85 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
- unsigned long freq, old_freq, temp_freq;
- struct dev_pm_opp *old_opp, *opp;
- struct clk *clk;
+ unsigned long freq = 0, temp_freq;
+ struct dev_pm_opp *opp = NULL;
int ret;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
return PTR_ERR(opp_table);
}
- if (unlikely(!target_freq)) {
- ret = _opp_set_rate_zero(dev, opp_table);
- goto put_opp_table;
- }
-
- clk = opp_table->clk;
- if (IS_ERR(clk)) {
- dev_err(dev, "%s: No clock available for the device\n",
- __func__);
- ret = PTR_ERR(clk);
- goto put_opp_table;
- }
-
- freq = clk_round_rate(clk, target_freq);
- if ((long)freq <= 0)
- freq = target_freq;
-
- old_freq = clk_get_rate(clk);
-
- /* Return early if nothing to do */
- if (opp_table->enabled && old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
- }
-
- /*
- * For IO devices which require an OPP on some platforms/SoCs
- * while just needing to scale the clock on some others
- * we look for empty OPP tables with just a clock handle and
- * scale only the clk. This makes dev_pm_opp_set_rate()
- * equivalent to a clk_set_rate()
- */
- if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- goto put_opp_table;
- }
+ if (target_freq) {
+ /*
+ * For IO devices which require an OPP on some platforms/SoCs
+ * while just needing to scale the clock on some others
+ * we look for empty OPP tables with just a clock handle and
+ * scale only the clk. This makes dev_pm_opp_set_rate()
+ * equivalent to a clk_set_rate()
+ */
+ if (!_get_opp_count(opp_table)) {
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ goto put_opp_table;
+ }
- temp_freq = old_freq;
- old_opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(old_opp)) {
- dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
- __func__, old_freq, PTR_ERR(old_opp));
- }
+ freq = clk_round_rate(opp_table->clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
- temp_freq = freq;
- opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
- __func__, freq, ret);
- goto put_old_opp;
+ /*
+ * The clock driver may support finer resolution of the
+ * frequencies than the OPP table, don't update the frequency we
+ * pass to clk_set_rate() here.
+ */
+ temp_freq = freq;
+ opp = _find_freq_ceil(opp_table, &temp_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ goto put_opp_table;
+ }
}
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
- old_freq, freq);
+ ret = _set_opp(dev, opp_table, opp, freq);
- /* Scaling up? Configure required OPPs before frequency */
- if (freq >= old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, true);
- if (ret)
- goto put_opp;
- }
-
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- }
+ if (target_freq)
+ dev_pm_opp_put(opp);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
- /* Scaling down? Configure required OPPs after frequency */
- if (!ret && freq < old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, false);
- if (ret)
- dev_err(dev, "Failed to set required opps: %d\n", ret);
- }
+/**
+ * dev_pm_opp_set_opp() - Configure device for OPP
+ * @dev: device for which we do this operation
+ * @opp: OPP to set to
+ *
+ * This configures the device based on the properties of the OPP passed to this
+ * routine.
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
+{
+ struct opp_table *opp_table;
+ int ret;
- if (!ret) {
- ret = _set_opp_bw(opp_table, opp, dev, false);
- if (!ret)
- opp_table->enabled = true;
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ return PTR_ERR(opp_table);
}
-put_opp:
- dev_pm_opp_put(opp);
-put_old_opp:
- if (!IS_ERR(old_opp))
- dev_pm_opp_put(old_opp);
-put_opp_table:
+ ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
dev_pm_opp_put_opp_table(opp_table);
+
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
/* OPP-dev Helpers */
static void _remove_opp_dev(struct opp_device *opp_dev,
@@ -1075,6 +1209,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
mutex_init(&opp_table->lock);
mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
+ INIT_LIST_HEAD(&opp_table->lazy);
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1087,21 +1222,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
_of_init_opp_table(opp_table, dev, index);
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, NULL);
- if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret == -EPROBE_DEFER)
- goto remove_opp_dev;
-
- dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
- }
-
/* Find interconnect path(s) for the device */
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto put_clk;
+ goto remove_opp_dev;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,9 +1238,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
-put_clk:
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
remove_opp_dev:
_remove_opp_dev(opp_dev, opp_table);
err:
@@ -1128,6 +1250,37 @@ void _get_opp_table_kref(struct opp_table *opp_table)
kref_get(&opp_table->kref);
}
+static struct opp_table *_update_opp_table_clk(struct device *dev,
+ struct opp_table *opp_table,
+ bool getclk)
+{
+ int ret;
+
+ /*
+ * Return early if we don't need to get clk or we have already tried it
+ * earlier.
+ */
+ if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ return opp_table;
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+
+ ret = PTR_ERR_OR_ZERO(opp_table->clk);
+ if (!ret)
+ return opp_table;
+
+ if (ret == -ENOENT) {
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
+ return opp_table;
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ dev_err_probe(dev, ret, "Couldn't find clock\n");
+
+ return ERR_PTR(ret);
+}
+
/*
* We need to make sure that the OPP table for a device doesn't get added twice,
* if this routine gets called in parallel with the same device pointer.
@@ -1143,7 +1296,8 @@ void _get_opp_table_kref(struct opp_table *opp_table)
* uses the opp_tables_busy flag to indicate if another creator is in the middle
* of adding an OPP table and others should wait for it to finish.
*/
-struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
+ bool getclk)
{
struct opp_table *opp_table;
@@ -1190,12 +1344,12 @@ again:
unlock:
mutex_unlock(&opp_table_lock);
- return opp_table;
+ return _update_opp_table_clk(dev, opp_table, getclk);
}
-struct opp_table *_add_opp_table(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
{
- return _add_opp_table_indexed(dev, 0);
+ return _add_opp_table_indexed(dev, 0, getclk);
}
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
@@ -1214,6 +1368,9 @@ static void _opp_table_kref_release(struct kref *kref)
list_del(&opp_table->node);
mutex_unlock(&opp_table_lock);
+ if (opp_table->current_opp)
+ dev_pm_opp_put(opp_table->current_opp);
+
_of_clear_opp_table(opp_table);
/* Release clk */
@@ -1508,6 +1665,21 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
return 0;
}
+void _required_opps_available(struct dev_pm_opp *opp, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (opp->required_opps[i]->available)
+ continue;
+
+ opp->available = false;
+ pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
+ __func__, opp->required_opps[i]->np, opp->rate);
+ return;
+ }
+}
+
/*
* Returns:
* 0: On success. And appropriate error message for duplicate OPPs.
@@ -1527,12 +1699,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
mutex_lock(&opp_table->lock);
head = &opp_table->opp_list;
- if (likely(!rate_not_available)) {
- ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
- if (ret) {
- mutex_unlock(&opp_table->lock);
- return ret;
- }
+ ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
+ if (ret) {
+ mutex_unlock(&opp_table->lock);
+ return ret;
}
list_add(&new_opp->node, head);
@@ -1549,6 +1719,12 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
__func__, new_opp->rate);
}
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return 0;
+
+ _required_opps_available(new_opp, opp_table->required_opp_count);
+
return 0;
}
@@ -1631,7 +1807,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1693,7 +1869,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1737,38 +1913,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-static int _allocate_set_opp_data(struct opp_table *opp_table)
-{
- struct dev_pm_set_opp_data *data;
- int len, count = opp_table->regulator_count;
-
- if (WARN_ON(!opp_table->regulators))
- return -EINVAL;
-
- /* space for set_opp_data */
- len = sizeof(*data);
-
- /* space for old_opp.supplies and new_opp.supplies */
- len += 2 * sizeof(struct dev_pm_opp_supply) * count;
-
- data = kzalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->old_opp.supplies = (void *)(data + 1);
- data->new_opp.supplies = data->old_opp.supplies + count;
-
- opp_table->set_opp_data = data;
-
- return 0;
-}
-
-static void _free_set_opp_data(struct opp_table *opp_table)
-{
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
-}
-
/**
* dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
@@ -1785,11 +1929,12 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
const char * const names[],
unsigned int count)
{
+ struct dev_pm_opp_supply *supplies;
struct opp_table *opp_table;
struct regulator *reg;
int ret, i;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1826,10 +1971,19 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- /* Allocate block only once to pass to set_opp() routines */
- ret = _allocate_set_opp_data(opp_table);
- if (ret)
+ supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
+ if (!supplies) {
+ ret = -ENOMEM;
goto free_regulators;
+ }
+
+ mutex_lock(&opp_table->lock);
+ opp_table->sod_supplies = supplies;
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = supplies;
+ opp_table->set_opp_data->new_opp.supplies = supplies + count;
+ }
+ mutex_unlock(&opp_table->lock);
return opp_table;
@@ -1872,7 +2026,15 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- _free_set_opp_data(opp_table);
+ mutex_lock(&opp_table->lock);
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = NULL;
+ opp_table->set_opp_data->new_opp.supplies = NULL;
+ }
+
+ kfree(opp_table->sod_supplies);
+ opp_table->sod_supplies = NULL;
+ mutex_unlock(&opp_table->lock);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
@@ -1900,7 +2062,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1910,9 +2072,11 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
goto err;
}
- /* Already have default clk set, free it */
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
+ /* clk shouldn't be initialized at this point */
+ if (WARN_ON(opp_table->clk)) {
+ ret = -EBUSY;
+ goto err;
+ }
/* Find clk for the device */
opp_table->clk = clk_get(dev, name);
@@ -1966,12 +2130,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
+ struct dev_pm_set_opp_data *data;
struct opp_table *opp_table;
if (!set_opp)
return ERR_PTR(-EINVAL);
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1982,8 +2147,23 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
}
/* Another CPU that shares the OPP table has set the helper ? */
- if (!opp_table->set_opp)
- opp_table->set_opp = set_opp;
+ if (opp_table->set_opp)
+ return opp_table;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&opp_table->lock);
+ opp_table->set_opp_data = data;
+ if (opp_table->sod_supplies) {
+ data->old_opp.supplies = opp_table->sod_supplies;
+ data->new_opp.supplies = opp_table->sod_supplies +
+ opp_table->regulator_count;
+ }
+ mutex_unlock(&opp_table->lock);
+
+ opp_table->set_opp = set_opp;
return opp_table;
}
@@ -2005,10 +2185,50 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
WARN_ON(!list_empty(&opp_table->opp_list));
opp_table->set_opp = NULL;
+
+ mutex_lock(&opp_table->lock);
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+ mutex_unlock(&opp_table->lock);
+
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+static void devm_pm_opp_unregister_set_opp_helper(void *data)
+{
+ dev_pm_opp_unregister_set_opp_helper(data);
+}
+
+/**
+ * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
+
static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2058,7 +2278,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
int index = 0, ret = -EINVAL;
const char **name = names;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -2144,6 +2364,97 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
+static void devm_pm_opp_detach_genpd(void *data)
+{
+ dev_pm_opp_detach_genpd(data);
+}
+
+/**
+ * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
+ * device pointer
+ * @dev: Consumer device for which the genpd is getting attached.
+ * @names: Null terminated array of pointers containing names of genpd to attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
+ *
+ * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_attach_genpd(struct device *dev, const char **names,
+ struct device ***virt_devs)
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+
+/**
+ * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
+ * @src_table: OPP table which has @dst_table as one of its required OPP table.
+ * @dst_table: Required OPP table of the @src_table.
+ * @src_opp: OPP from the @src_table.
+ *
+ * This function returns the OPP (present in @dst_table) pointed out by the
+ * "required-opps" property of the @src_opp (present in @src_table).
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ *
+ * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
+ */
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table,
+ struct dev_pm_opp *src_opp)
+{
+ struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
+ int i;
+
+ if (!src_table || !dst_table || !src_opp ||
+ !src_table->required_opp_tables)
+ return ERR_PTR(-EINVAL);
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return ERR_PTR(-EBUSY);
+
+ for (i = 0; i < src_table->required_opp_count; i++) {
+ if (src_table->required_opp_tables[i] == dst_table) {
+ mutex_lock(&src_table->lock);
+
+ list_for_each_entry(opp, &src_table->opp_list, node) {
+ if (opp == src_opp) {
+ dest_opp = opp->required_opps[i];
+ dev_pm_opp_get(dest_opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&src_table->lock);
+ break;
+ }
+ }
+
+ if (IS_ERR(dest_opp)) {
+ pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
+ src_table, dst_table);
+ }
+
+ return dest_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
+
/**
* dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
* @src_table: OPP table which has dst_table as one of its required OPP table.
@@ -2172,9 +2483,13 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
* and so none of them have the "required-opps" property set. Return the
* pstate of the src_table as it is in such cases.
*/
- if (!src_table->required_opp_count)
+ if (!src_table || !src_table->required_opp_count)
return pstate;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return -EBUSY;
+
for (i = 0; i < src_table->required_opp_count; i++) {
if (src_table->required_opp_tables[i]->np == dst_table->np)
break;
@@ -2226,7 +2541,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -2504,3 +2819,44 @@ void dev_pm_opp_remove_table(struct device *dev)
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
+
+/**
+ * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
+ * @dev: device for which we do this operation
+ *
+ * Sync voltage state of the OPP table regulators.
+ *
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int i, ret = 0;
+
+ /* Device may not have OPP table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ /* Regulator may not be required for the device */
+ if (unlikely(!opp_table->regulators))
+ goto put_table;
+
+ /* Nothing to sync if voltage wasn't changed */
+ if (!opp_table->enabled)
+ goto put_table;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+ ret = regulator_sync_voltage(reg);
+ if (ret)
+ break;
+ }
+put_table:
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 03cb387236c4..f480c10e6314 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -144,7 +144,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
for (i = 0; i < opp_table->required_opp_count; i++) {
if (IS_ERR_OR_NULL(required_opp_tables[i]))
- break;
+ continue;
dev_pm_opp_put_opp_table(required_opp_tables[i]);
}
@@ -153,6 +153,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
opp_table->required_opp_count = 0;
opp_table->required_opp_tables = NULL;
+ list_del(&opp_table->lazy);
}
/*
@@ -165,6 +166,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
{
struct opp_table **required_opp_tables;
struct device_node *required_np, *np;
+ bool lazy = false;
int count, i;
/* Traversing the first OPP node is all we need */
@@ -195,8 +197,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
required_opp_tables[i] = _find_table_of_opp_np(required_np);
of_node_put(required_np);
- if (IS_ERR(required_opp_tables[i]))
- goto free_required_tables;
+ if (IS_ERR(required_opp_tables[i])) {
+ lazy = true;
+ continue;
+ }
/*
* We only support genpd's OPPs in the "required-opps" for now,
@@ -210,6 +214,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
}
}
+ /* Let's do the linking later on */
+ if (lazy)
+ list_add(&opp_table->lazy, &lazy_opp_tables);
+
goto put_np;
free_required_tables:
@@ -278,14 +286,14 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
for (i = 0; i < opp_table->required_opp_count; i++) {
if (!required_opps[i])
- break;
+ continue;
/* Put the reference back */
dev_pm_opp_put(required_opps[i]);
}
- kfree(required_opps);
opp->required_opps = NULL;
+ kfree(required_opps);
}
/* Populate all required OPPs which are part of "required-opps" list */
@@ -309,6 +317,10 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
for (i = 0; i < count; i++) {
required_table = opp_table->required_opp_tables[i];
+ /* Required table not added yet, we will link later */
+ if (IS_ERR_OR_NULL(required_table))
+ continue;
+
np = of_parse_required_opp(opp->np, i);
if (unlikely(!np)) {
ret = -ENODEV;
@@ -334,6 +346,104 @@ free_required_opps:
return ret;
}
+/* Link required OPPs for an individual OPP */
+static int lazy_link_required_opps(struct opp_table *opp_table,
+ struct opp_table *new_table, int index)
+{
+ struct device_node *required_np;
+ struct dev_pm_opp *opp;
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ required_np = of_parse_required_opp(opp->np, index);
+ if (unlikely(!required_np))
+ return -ENODEV;
+
+ opp->required_opps[index] = _find_opp_of_np(new_table, required_np);
+ of_node_put(required_np);
+
+ if (!opp->required_opps[index]) {
+ pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
+ __func__, opp->np, index);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/* Link required OPPs for all OPPs of the newly added OPP table */
+static void lazy_link_required_opp_table(struct opp_table *new_table)
+{
+ struct opp_table *opp_table, *temp, **required_opp_tables;
+ struct device_node *required_np, *opp_np, *required_table_np;
+ struct dev_pm_opp *opp;
+ int i, ret;
+
+ /*
+ * We only support genpd's OPPs in the "required-opps" for now,
+ * as we don't know much about other cases.
+ */
+ if (!new_table->is_genpd)
+ return;
+
+ mutex_lock(&opp_table_lock);
+
+ list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
+ bool lazy = false;
+
+ /* opp_np can't be invalid here */
+ opp_np = of_get_next_available_child(opp_table->np, NULL);
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ required_opp_tables = opp_table->required_opp_tables;
+
+ /* Required opp-table is already parsed */
+ if (!IS_ERR(required_opp_tables[i]))
+ continue;
+
+ /* required_np can't be invalid here */
+ required_np = of_parse_required_opp(opp_np, i);
+ required_table_np = of_get_parent(required_np);
+
+ of_node_put(required_table_np);
+ of_node_put(required_np);
+
+ /*
+ * Newly added table isn't the required opp-table for
+ * opp_table.
+ */
+ if (required_table_np != new_table->np) {
+ lazy = true;
+ continue;
+ }
+
+ required_opp_tables[i] = new_table;
+ _get_opp_table_kref(new_table);
+
+ /* Link OPPs now */
+ ret = lazy_link_required_opps(opp_table, new_table, i);
+ if (ret) {
+ /* The OPPs will be marked unusable */
+ lazy = false;
+ break;
+ }
+ }
+
+ of_node_put(opp_np);
+
+ /* All required opp-tables found, remove from lazy list */
+ if (!lazy) {
+ list_del(&opp_table->lazy);
+ INIT_LIST_HEAD(&opp_table->lazy);
+
+ list_for_each_entry(opp, &opp_table->opp_list, node)
+ _required_opps_available(opp, opp_table->required_opp_count);
+ }
+ }
+
+ mutex_unlock(&opp_table_lock);
+}
+
static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
{
struct device_node *np, *opp_np;
@@ -755,7 +865,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct device *dev, struct device_node *np)
{
struct dev_pm_opp *new_opp;
- u64 rate = 0;
u32 val;
int ret;
bool rate_not_available = false;
@@ -772,7 +881,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ dev_dbg(dev, "OPP not supported by hardware: %lu\n",
+ new_opp->rate);
goto free_opp;
}
@@ -822,10 +932,11 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
__func__, new_opp->turbo, new_opp->rate,
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
- new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
+ new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
+ new_opp->level);
/*
* Notify the changes in the availability of the operable
@@ -888,6 +999,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
}
}
+ lazy_link_required_opp_table(opp_table);
+
return 0;
remove_static_opp:
@@ -956,29 +1069,23 @@ remove_static_opp:
return ret;
}
-/**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup OPP table.
- *
- * Register the initial OPP table with the OPP library for given device.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
- * -EINVAL when invalid entries are found in opp-v2 table
- */
-int dev_pm_opp_of_add_table(struct device *dev)
+static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
{
struct opp_table *opp_table;
- int ret;
+ int ret, count;
- opp_table = _add_opp_table_indexed(dev, 0);
+ if (index) {
+ /*
+ * If only one phandle is present, then the same OPP table
+ * applies for all index requests.
+ */
+ count = of_count_phandle_with_args(dev->of_node,
+ "operating-points-v2", NULL);
+ if (count == 1)
+ index = 0;
+ }
+
+ opp_table = _add_opp_table_indexed(dev, index, getclk);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -996,15 +1103,12 @@ int dev_pm_opp_of_add_table(struct device *dev)
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
/**
- * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup OPP table.
- * @index: Index number.
*
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property.
+ * Register the initial OPP table with the OPP library for given device.
*
* Return:
* 0 On success OR
@@ -1017,34 +1121,46 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
* -ENODATA when empty 'operating-points' property is found
* -EINVAL when invalid entries are found in opp-v2 table
*/
-int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+int dev_pm_opp_of_add_table(struct device *dev)
{
- struct opp_table *opp_table;
- int ret, count;
-
- if (index) {
- /*
- * If only one phandle is present, then the same OPP table
- * applies for all index requests.
- */
- count = of_count_phandle_with_args(dev->of_node,
- "operating-points-v2", NULL);
- if (count == 1)
- index = 0;
- }
-
- opp_table = _add_opp_table_indexed(dev, index);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- ret = _of_add_opp_table_v2(dev, opp_table);
- if (ret)
- dev_pm_opp_put_opp_table(opp_table);
+ return _of_add_table_indexed(dev, 0, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
- return ret;
+/**
+ * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
+ * @dev: device pointer used to lookup OPP table.
+ * @index: Index number.
+ *
+ * Register the initial OPP table with the OPP library for given device only
+ * using the "operating-points-v2" property.
+ *
+ * Return: Refer to dev_pm_opp_of_add_table() for return values.
+ */
+int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return _of_add_table_indexed(dev, index, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
+/**
+ * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
+ * tree without getting clk for device.
+ * @dev: device pointer used to lookup OPP table.
+ * @index: Index number.
+ *
+ * Register the initial OPP table with the OPP library for given device only
+ * using the "operating-points-v2" property. Do not try to get the clk for the
+ * device.
+ *
+ * Return: Refer to dev_pm_opp_of_add_table() for return values.
+ */
+int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
+{
+ return _of_add_table_indexed(dev, index, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
+
/* CPU device specific helpers */
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 4ced7ffa8158..50fb9dced3c5 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -26,7 +26,7 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
-extern struct list_head opp_tables;
+extern struct list_head opp_tables, lazy_opp_tables;
/*
* Internal data structure organization with the OPP layer library is as
@@ -135,6 +135,8 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
+ * @current_rate: Currently configured frequency.
+ * @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
* @genpd_virt_devs: List of virtual devices for multiple genpd support.
@@ -155,6 +157,7 @@ enum opp_table_access {
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
+ * @sod_supplies: Set opp data supplies
* @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
@@ -166,7 +169,7 @@ enum opp_table_access {
* meant for book keeping and private to OPP library.
*/
struct opp_table {
- struct list_head node;
+ struct list_head node, lazy;
struct blocking_notifier_head head;
struct list_head dev_list;
@@ -182,6 +185,8 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
+ unsigned long current_rate;
+ struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
struct mutex genpd_virt_dev_lock;
@@ -202,6 +207,7 @@ struct opp_table {
bool is_genpd;
int (*set_opp)(struct dev_pm_set_opp_data *data);
+ struct dev_pm_opp_supply *sod_supplies;
struct dev_pm_set_opp_data *set_opp_data;
#ifdef CONFIG_DEBUG_FS
@@ -223,9 +229,14 @@ int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
-struct opp_table *_add_opp_table(struct device *dev);
-struct opp_table *_add_opp_table_indexed(struct device *dev, int index);
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
void _put_opp_list_kref(struct opp_table *opp_table);
+void _required_opps_available(struct dev_pm_opp *opp, int count);
+
+static inline bool lazy_linking_pending(struct opp_table *opp_table)
+{
+ return unlikely(!list_empty(&opp_table->lazy));
+}
#ifdef CONFIG_OF
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
deleted file mode 100644
index cc917865f13a..000000000000
--- a/drivers/oprofile/buffer_sync.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/**
- * @file buffer_sync.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Barry Kasindorf
- * @author Robert Richter <robert.richter@amd.com>
- *
- * This is the core of the buffer management. Each
- * CPU buffer is processed and entered into the
- * global event buffer. Such processing is necessary
- * in several circumstances, mentioned below.
- *
- * The processing does the job of converting the
- * transitory EIP value into a persistent dentry/offset
- * value that the profiler can record at its leisure.
- *
- * See fs/dcookies.c for a description of the dentry/offset
- * objects.
- */
-
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/dcookies.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
-#include <linux/gfp.h>
-
-#include "oprofile_stats.h"
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-
-static LIST_HEAD(dying_tasks);
-static LIST_HEAD(dead_tasks);
-static cpumask_var_t marked_cpus;
-static DEFINE_SPINLOCK(task_mortuary);
-static void process_task_mortuary(void);
-
-/* Take ownership of the task struct and place it on the
- * list for processing. Only after two full buffer syncs
- * does the task eventually get freed, because by then
- * we are sure we will not reference it again.
- * Can be invoked from softirq via RCU callback due to
- * call_rcu() of the task struct, hence the _irqsave.
- */
-static int
-task_free_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- unsigned long flags;
- struct task_struct *task = data;
- spin_lock_irqsave(&task_mortuary, flags);
- list_add(&task->tasks, &dying_tasks);
- spin_unlock_irqrestore(&task_mortuary, flags);
- return NOTIFY_OK;
-}
-
-
-/* The task is on its way out. A sync of the buffer means we can catch
- * any remaining samples for this task.
- */
-static int
-task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
-}
-
-
-/* The task is about to try a do_munmap(). We peek at what it's going to
- * do, and if it's an executable region, process the samples first, so
- * we don't lose any. This does not have to be exact, it's a QoI issue
- * only.
- */
-static int
-munmap_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- unsigned long addr = (unsigned long)data;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *mpnt;
-
- mmap_read_lock(mm);
-
- mpnt = find_vma(mm, addr);
- if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- mmap_read_unlock(mm);
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
- }
-
- mmap_read_unlock(mm);
- return 0;
-}
-
-
-/* We need to be told about new modules so we don't attribute to a previously
- * loaded module, or drop the samples on the floor.
- */
-static int
-module_load_notify(struct notifier_block *self, unsigned long val, void *data)
-{
-#ifdef CONFIG_MODULES
- if (val != MODULE_STATE_COMING)
- return NOTIFY_DONE;
-
- /* FIXME: should we process all CPU buffers ? */
- mutex_lock(&buffer_mutex);
- add_event_entry(ESCAPE_CODE);
- add_event_entry(MODULE_LOADED_CODE);
- mutex_unlock(&buffer_mutex);
-#endif
- return NOTIFY_OK;
-}
-
-
-static struct notifier_block task_free_nb = {
- .notifier_call = task_free_notify,
-};
-
-static struct notifier_block task_exit_nb = {
- .notifier_call = task_exit_notify,
-};
-
-static struct notifier_block munmap_nb = {
- .notifier_call = munmap_notify,
-};
-
-static struct notifier_block module_load_nb = {
- .notifier_call = module_load_notify,
-};
-
-static void free_all_tasks(void)
-{
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-int sync_start(void)
-{
- int err;
-
- if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
- return -ENOMEM;
-
- err = task_handoff_register(&task_free_nb);
- if (err)
- goto out1;
- err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
- if (err)
- goto out2;
- err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
- if (err)
- goto out3;
- err = register_module_notifier(&module_load_nb);
- if (err)
- goto out4;
-
- start_cpu_work();
-
-out:
- return err;
-out4:
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
-out3:
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
-out2:
- task_handoff_unregister(&task_free_nb);
- free_all_tasks();
-out1:
- free_cpumask_var(marked_cpus);
- goto out;
-}
-
-
-void sync_stop(void)
-{
- end_cpu_work();
- unregister_module_notifier(&module_load_nb);
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
- task_handoff_unregister(&task_free_nb);
- barrier(); /* do all of the above first */
-
- flush_cpu_work();
-
- free_all_tasks();
- free_cpumask_var(marked_cpus);
-}
-
-
-/* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer). */
-static inline unsigned long fast_get_dcookie(const struct path *path)
-{
- unsigned long cookie;
-
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
-}
-
-
-/* Look up the dcookie for the task's mm->exe_file,
- * which corresponds loosely to "application name". This is
- * not strictly necessary but allows oprofile to associate
- * shared-library samples with particular applications
- */
-static unsigned long get_exec_dcookie(struct mm_struct *mm)
-{
- unsigned long cookie = NO_COOKIE;
- struct file *exe_file;
-
- if (!mm)
- goto done;
-
- exe_file = get_mm_exe_file(mm);
- if (!exe_file)
- goto done;
-
- cookie = fast_get_dcookie(&exe_file->f_path);
- fput(exe_file);
-done:
- return cookie;
-}
-
-
-/* Convert the EIP value of a sample into a persistent dentry/offset
- * pair that can then be added to the global event buffer. We make
- * sure to do this lookup before a mm->mmap modification happens so
- * we don't lose track.
- *
- * The caller must ensure the mm is not nil (ie: not a kernel thread).
- */
-static unsigned long
-lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
-{
- unsigned long cookie = NO_COOKIE;
- struct vm_area_struct *vma;
-
- mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- continue;
-
- if (vma->vm_file) {
- cookie = fast_get_dcookie(&vma->vm_file->f_path);
- *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
- vma->vm_start;
- } else {
- /* must be an anonymous map */
- *offset = addr;
- }
-
- break;
- }
-
- if (!vma)
- cookie = INVALID_COOKIE;
- mmap_read_unlock(mm);
-
- return cookie;
-}
-
-static unsigned long last_cookie = INVALID_COOKIE;
-
-static void add_cpu_switch(int i)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CPU_SWITCH_CODE);
- add_event_entry(i);
- last_cookie = INVALID_COOKIE;
-}
-
-static void add_kernel_ctx_switch(unsigned int in_kernel)
-{
- add_event_entry(ESCAPE_CODE);
- if (in_kernel)
- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
- else
- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
-}
-
-static void
-add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_SWITCH_CODE);
- add_event_entry(task->pid);
- add_event_entry(cookie);
- /* Another code for daemon back-compat */
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_TGID_CODE);
- add_event_entry(task->tgid);
-}
-
-
-static void add_cookie_switch(unsigned long cookie)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(COOKIE_SWITCH_CODE);
- add_event_entry(cookie);
-}
-
-
-static void add_trace_begin(void)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(TRACE_BEGIN_CODE);
-}
-
-static void add_data(struct op_entry *entry, struct mm_struct *mm)
-{
- unsigned long code, pc, val;
- unsigned long cookie;
- off_t offset;
-
- if (!op_cpu_buffer_get_data(entry, &code))
- return;
- if (!op_cpu_buffer_get_data(entry, &pc))
- return;
- if (!op_cpu_buffer_get_size(entry))
- return;
-
- if (mm) {
- cookie = lookup_dcookie(mm, pc, &offset);
-
- if (cookie == NO_COOKIE)
- offset = pc;
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- offset = pc;
- }
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
- } else
- offset = pc;
-
- add_event_entry(ESCAPE_CODE);
- add_event_entry(code);
- add_event_entry(offset); /* Offset from Dcookie */
-
- while (op_cpu_buffer_get_data(entry, &val))
- add_event_entry(val);
-}
-
-static inline void add_sample_entry(unsigned long offset, unsigned long event)
-{
- add_event_entry(offset);
- add_event_entry(event);
-}
-
-
-/*
- * Add a sample to the global event buffer. If possible the
- * sample is converted into a persistent dentry/offset pair
- * for later lookup from userspace. Return 0 on failure.
- */
-static int
-add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
-{
- unsigned long cookie;
- off_t offset;
-
- if (in_kernel) {
- add_sample_entry(s->eip, s->event);
- return 1;
- }
-
- /* add userspace sample */
-
- if (!mm) {
- atomic_inc(&oprofile_stats.sample_lost_no_mm);
- return 0;
- }
-
- cookie = lookup_dcookie(mm, s->eip, &offset);
-
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return 0;
- }
-
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
-
- add_sample_entry(offset, s->event);
-
- return 1;
-}
-
-
-static void release_mm(struct mm_struct *mm)
-{
- if (!mm)
- return;
- mmput(mm);
-}
-
-static inline int is_code(unsigned long val)
-{
- return val == ESCAPE_CODE;
-}
-
-
-/* Move tasks along towards death. Any tasks on dead_tasks
- * will definitely have no remaining references in any
- * CPU buffers at this point, because we use two lists,
- * and to have reached the list, it must have gone through
- * one full sync already.
- */
-static void process_task_mortuary(void)
-{
- unsigned long flags;
- LIST_HEAD(local_dead_tasks);
- struct task_struct *task;
- struct task_struct *ttask;
-
- spin_lock_irqsave(&task_mortuary, flags);
-
- list_splice_init(&dead_tasks, &local_dead_tasks);
- list_splice_init(&dying_tasks, &dead_tasks);
-
- spin_unlock_irqrestore(&task_mortuary, flags);
-
- list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
- list_del(&task->tasks);
- free_task(task);
- }
-}
-
-
-static void mark_done(int cpu)
-{
- int i;
-
- cpumask_set_cpu(cpu, marked_cpus);
-
- for_each_online_cpu(i) {
- if (!cpumask_test_cpu(i, marked_cpus))
- return;
- }
-
- /* All CPUs have been processed at least once,
- * we can process the mortuary once
- */
- process_task_mortuary();
-
- cpumask_clear(marked_cpus);
-}
-
-
-/* FIXME: this is not sufficient if we implement syscall barrier backtrace
- * traversal, the code switch to sb_sample_start at first kernel enter/exit
- * switch so we need a fifth state and some special handling in sync_buffer()
- */
-typedef enum {
- sb_bt_ignore = -2,
- sb_buffer_start,
- sb_bt_start,
- sb_sample_start,
-} sync_buffer_state;
-
-/* Sync one of the CPU's buffers into the global event buffer.
- * Here we need to go through each batch of samples punctuated
- * by context switch notes, taking the task's mmap_lock and doing
- * lookup in task->mm->mmap to convert EIP into dcookie/offset
- * value.
- */
-void sync_buffer(int cpu)
-{
- struct mm_struct *mm = NULL;
- struct mm_struct *oldmm;
- unsigned long val;
- struct task_struct *new;
- unsigned long cookie = 0;
- int in_kernel = 1;
- sync_buffer_state state = sb_buffer_start;
- unsigned int i;
- unsigned long available;
- unsigned long flags;
- struct op_entry entry;
- struct op_sample *sample;
-
- mutex_lock(&buffer_mutex);
-
- add_cpu_switch(cpu);
-
- op_cpu_buffer_reset(cpu);
- available = op_cpu_buffer_entries(cpu);
-
- for (i = 0; i < available; ++i) {
- sample = op_cpu_buffer_read_entry(&entry, cpu);
- if (!sample)
- break;
-
- if (is_code(sample->eip)) {
- flags = sample->event;
- if (flags & TRACE_BEGIN) {
- state = sb_bt_start;
- add_trace_begin();
- }
- if (flags & KERNEL_CTX_SWITCH) {
- /* kernel/userspace switch */
- in_kernel = flags & IS_KERNEL;
- if (state == sb_buffer_start)
- state = sb_sample_start;
- add_kernel_ctx_switch(flags & IS_KERNEL);
- }
- if (flags & USER_CTX_SWITCH
- && op_cpu_buffer_get_data(&entry, &val)) {
- /* userspace context switch */
- new = (struct task_struct *)val;
- oldmm = mm;
- release_mm(oldmm);
- mm = get_task_mm(new);
- if (mm != oldmm)
- cookie = get_exec_dcookie(mm);
- add_user_ctx_switch(new, cookie);
- }
- if (op_cpu_buffer_get_size(&entry))
- add_data(&entry, mm);
- continue;
- }
-
- if (state < sb_bt_start)
- /* ignore sample */
- continue;
-
- if (add_sample(mm, sample, in_kernel))
- continue;
-
- /* ignore backtraces if failed to add a sample */
- if (state == sb_bt_start) {
- state = sb_bt_ignore;
- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
- }
- }
- release_mm(mm);
-
- mark_done(cpu);
-
- mutex_unlock(&buffer_mutex);
-}
-
-/* The function can be used to add a buffer worth of data directly to
- * the kernel buffer. The buffer is assumed to be a circular buffer.
- * Take the entries from index start and end at index end, wrapping
- * at max_entries.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max)
-{
- int i;
-
- i = start;
-
- mutex_lock(&buffer_mutex);
- while (i != stop) {
- add_event_entry(buf[i++]);
-
- if (i >= max)
- i = 0;
- }
-
- mutex_unlock(&buffer_mutex);
-}
-
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
deleted file mode 100644
index 3110732c1835..000000000000
--- a/drivers/oprofile/buffer_sync.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * @file buffer_sync.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_BUFFER_SYNC_H
-#define OPROFILE_BUFFER_SYNC_H
-
-/* add the necessary profiling hooks */
-int sync_start(void);
-
-/* remove the hooks */
-void sync_stop(void);
-
-/* sync the given CPU's buffer */
-void sync_buffer(int cpu);
-
-#endif /* OPROFILE_BUFFER_SYNC_H */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
deleted file mode 100644
index 9210a95cb4e6..000000000000
--- a/drivers/oprofile/cpu_buffer.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/**
- * @file cpu_buffer.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Robert Richter <robert.richter@amd.com>
- *
- * Each CPU has a local buffer that stores PC value/event
- * pairs. We also log context switches when we notice them.
- * Eventually each CPU's buffer is processed into the global
- * event buffer by sync_buffer().
- *
- * We use a local buffer for two reasons: an NMI or similar
- * interrupt cannot synchronise, and high sampling rates
- * would lead to catastrophic global synchronisation if
- * a global buffer was used.
- */
-
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-
-#include <asm/ptrace.h>
-
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-#include "oprof.h"
-
-#define OP_BUFFER_FLAGS 0
-
-static struct trace_buffer *op_ring_buffer;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
-
-static void wq_sync_buffer(struct work_struct *work);
-
-#define DEFAULT_TIMER_EXPIRE (HZ / 10)
-static int work_enabled;
-
-unsigned long oprofile_get_cpu_buffer_size(void)
-{
- return oprofile_cpu_buffer_size;
-}
-
-void oprofile_cpu_buffer_inc_smpl_lost(void)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- cpu_buf->sample_lost_overflow++;
-}
-
-void free_cpu_buffers(void)
-{
- if (op_ring_buffer)
- ring_buffer_free(op_ring_buffer);
- op_ring_buffer = NULL;
-}
-
-#define RB_EVENT_HDR_SIZE 4
-
-int alloc_cpu_buffers(void)
-{
- int i;
-
- unsigned long buffer_size = oprofile_cpu_buffer_size;
- unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
- RB_EVENT_HDR_SIZE);
-
- op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer)
- goto fail;
-
- for_each_possible_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- b->last_task = NULL;
- b->last_is_kernel = -1;
- b->tracing = 0;
- b->buffer_size = buffer_size;
- b->sample_received = 0;
- b->sample_lost_overflow = 0;
- b->backtrace_aborted = 0;
- b->sample_invalid_eip = 0;
- b->cpu = i;
- INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
- }
- return 0;
-
-fail:
- free_cpu_buffers();
- return -ENOMEM;
-}
-
-void start_cpu_work(void)
-{
- int i;
-
- work_enabled = 1;
-
- for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- /*
- * Spread the work by 1 jiffy per cpu so they dont all
- * fire at once.
- */
- schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
- }
-}
-
-void end_cpu_work(void)
-{
- work_enabled = 0;
-}
-
-void flush_cpu_work(void)
-{
- int i;
-
- for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- /* these works are per-cpu, no need for flush_sync */
- flush_delayed_work(&b->work);
- }
-}
-
-/*
- * This function prepares the cpu buffer to write a sample.
- *
- * Struct op_entry is used during operations on the ring buffer while
- * struct op_sample contains the data that is stored in the ring
- * buffer. Struct entry can be uninitialized. The function reserves a
- * data array that is specified by size. Use
- * op_cpu_buffer_write_commit() after preparing the sample. In case of
- * errors a null pointer is returned, otherwise the pointer to the
- * sample.
- *
- */
-struct op_sample
-*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
-{
- entry->event = ring_buffer_lock_reserve
- (op_ring_buffer, sizeof(struct op_sample) +
- size * sizeof(entry->sample->data[0]));
- if (!entry->event)
- return NULL;
- entry->sample = ring_buffer_event_data(entry->event);
- entry->size = size;
- entry->data = entry->sample->data;
-
- return entry->sample;
-}
-
-int op_cpu_buffer_write_commit(struct op_entry *entry)
-{
- return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
-}
-
-struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
-{
- struct ring_buffer_event *e;
- e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
- if (!e)
- return NULL;
-
- entry->event = e;
- entry->sample = ring_buffer_event_data(e);
- entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
- / sizeof(entry->sample->data[0]);
- entry->data = entry->sample->data;
- return entry->sample;
-}
-
-unsigned long op_cpu_buffer_entries(int cpu)
-{
- return ring_buffer_entries_cpu(op_ring_buffer, cpu);
-}
-
-static int
-op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
- int is_kernel, struct task_struct *task)
-{
- struct op_entry entry;
- struct op_sample *sample;
- unsigned long flags;
- int size;
-
- flags = 0;
-
- if (backtrace)
- flags |= TRACE_BEGIN;
-
- /* notice a switch from user->kernel or vice versa */
- is_kernel = !!is_kernel;
- if (cpu_buf->last_is_kernel != is_kernel) {
- cpu_buf->last_is_kernel = is_kernel;
- flags |= KERNEL_CTX_SWITCH;
- if (is_kernel)
- flags |= IS_KERNEL;
- }
-
- /* notice a task switch */
- if (cpu_buf->last_task != task) {
- cpu_buf->last_task = task;
- flags |= USER_CTX_SWITCH;
- }
-
- if (!flags)
- /* nothing to do */
- return 0;
-
- if (flags & USER_CTX_SWITCH)
- size = 1;
- else
- size = 0;
-
- sample = op_cpu_buffer_write_reserve(&entry, size);
- if (!sample)
- return -ENOMEM;
-
- sample->eip = ESCAPE_CODE;
- sample->event = flags;
-
- if (size)
- op_cpu_buffer_add_data(&entry, (unsigned long)task);
-
- op_cpu_buffer_write_commit(&entry);
-
- return 0;
-}
-
-static inline int
-op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
- unsigned long pc, unsigned long event)
-{
- struct op_entry entry;
- struct op_sample *sample;
-
- sample = op_cpu_buffer_write_reserve(&entry, 0);
- if (!sample)
- return -ENOMEM;
-
- sample->eip = pc;
- sample->event = event;
-
- return op_cpu_buffer_write_commit(&entry);
-}
-
-/*
- * This must be safe from any context.
- *
- * is_kernel is needed because on some architectures you cannot
- * tell if you are in kernel or user space simply by looking at
- * pc. We tag this in the buffer by generating kernel enter/exit
- * events whenever is_kernel changes
- */
-static int
-log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
- unsigned long backtrace, int is_kernel, unsigned long event,
- struct task_struct *task)
-{
- struct task_struct *tsk = task ? task : current;
- cpu_buf->sample_received++;
-
- if (pc == ESCAPE_CODE) {
- cpu_buf->sample_invalid_eip++;
- return 0;
- }
-
- if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
- goto fail;
-
- if (op_add_sample(cpu_buf, pc, event))
- goto fail;
-
- return 1;
-
-fail:
- cpu_buf->sample_lost_overflow++;
- return 0;
-}
-
-static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
-{
- cpu_buf->tracing = 1;
-}
-
-static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
-{
- cpu_buf->tracing = 0;
-}
-
-static inline void
-__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
- unsigned long backtrace = oprofile_backtrace_depth;
-
- /*
- * if log_sample() fail we can't backtrace since we lost the
- * source of this event
- */
- if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
- /* failed */
- return;
-
- if (!backtrace)
- return;
-
- oprofile_begin_trace(cpu_buf);
- oprofile_ops.backtrace(regs, backtrace);
- oprofile_end_trace(cpu_buf);
-}
-
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task)
-{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
-}
-
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel)
-{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
-}
-
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
-{
- int is_kernel;
- unsigned long pc;
-
- if (likely(regs)) {
- is_kernel = !user_mode(regs);
- pc = profile_pc(regs);
- } else {
- is_kernel = 0; /* This value will not be used */
- pc = ESCAPE_CODE; /* as this causes an early return. */
- }
-
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
-}
-
-/*
- * Add samples with data to the ring buffer.
- *
- * Use oprofile_add_data(&entry, val) to add data and
- * oprofile_write_commit(&entry) to commit the sample.
- */
-void
-oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
- unsigned long pc, int code, int size)
-{
- struct op_sample *sample;
- int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- cpu_buf->sample_received++;
-
- /* no backtraces for samples with data */
- if (op_add_code(cpu_buf, 0, is_kernel, current))
- goto fail;
-
- sample = op_cpu_buffer_write_reserve(entry, size + 2);
- if (!sample)
- goto fail;
- sample->eip = ESCAPE_CODE;
- sample->event = 0; /* no flags */
-
- op_cpu_buffer_add_data(entry, code);
- op_cpu_buffer_add_data(entry, pc);
-
- return;
-
-fail:
- entry->event = NULL;
- cpu_buf->sample_lost_overflow++;
-}
-
-int oprofile_add_data(struct op_entry *entry, unsigned long val)
-{
- if (!entry->event)
- return 0;
- return op_cpu_buffer_add_data(entry, val);
-}
-
-int oprofile_add_data64(struct op_entry *entry, u64 val)
-{
- if (!entry->event)
- return 0;
- if (op_cpu_buffer_get_size(entry) < 2)
- /*
- * the function returns 0 to indicate a too small
- * buffer, even if there is some space left
- */
- return 0;
- if (!op_cpu_buffer_add_data(entry, (u32)val))
- return 0;
- return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
-}
-
-int oprofile_write_commit(struct op_entry *entry)
-{
- if (!entry->event)
- return -EINVAL;
- return op_cpu_buffer_write_commit(entry);
-}
-
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
- log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
-}
-
-void oprofile_add_trace(unsigned long pc)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- if (!cpu_buf->tracing)
- return;
-
- /*
- * broken frame can give an eip with the same value as an
- * escape code, abort the trace if we get it
- */
- if (pc == ESCAPE_CODE)
- goto fail;
-
- if (op_add_sample(cpu_buf, pc, 0))
- goto fail;
-
- return;
-fail:
- cpu_buf->tracing = 0;
- cpu_buf->backtrace_aborted++;
- return;
-}
-
-/*
- * This serves to avoid cpu buffer overflow, and makes sure
- * the task mortuary progresses
- *
- * By using schedule_delayed_work_on and then schedule_delayed_work
- * we guarantee this will stay on the correct cpu
- */
-static void wq_sync_buffer(struct work_struct *work)
-{
- struct oprofile_cpu_buffer *b =
- container_of(work, struct oprofile_cpu_buffer, work.work);
- if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
- cancel_delayed_work(&b->work);
- return;
- }
- sync_buffer(b->cpu);
-
- /* don't re-add the work if we're shutting down */
- if (work_enabled)
- schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
-}
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
deleted file mode 100644
index 31478c0cff87..000000000000
--- a/drivers/oprofile/cpu_buffer.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * @file cpu_buffer.h
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#ifndef OPROFILE_CPU_BUFFER_H
-#define OPROFILE_CPU_BUFFER_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/cache.h>
-#include <linux/sched.h>
-#include <linux/ring_buffer.h>
-
-struct task_struct;
-
-int alloc_cpu_buffers(void);
-void free_cpu_buffers(void);
-
-void start_cpu_work(void);
-void end_cpu_work(void);
-void flush_cpu_work(void);
-
-/* CPU buffer is composed of such entries (which are
- * also used for context switch notes)
- */
-struct op_sample {
- unsigned long eip;
- unsigned long event;
- unsigned long data[];
-};
-
-struct op_entry;
-
-struct oprofile_cpu_buffer {
- unsigned long buffer_size;
- struct task_struct *last_task;
- int last_is_kernel;
- int tracing;
- unsigned long sample_received;
- unsigned long sample_lost_overflow;
- unsigned long backtrace_aborted;
- unsigned long sample_invalid_eip;
- int cpu;
- struct delayed_work work;
-};
-
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
-
-/*
- * Resets the cpu buffer to a sane state.
- *
- * reset these to invalid values; the next sample collected will
- * populate the buffer with proper values to initialize the buffer
- */
-static inline void op_cpu_buffer_reset(int cpu)
-{
- struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
-
- cpu_buf->last_is_kernel = -1;
- cpu_buf->last_task = NULL;
-}
-
-/*
- * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
- * called only if op_cpu_buffer_write_reserve() did not return NULL or
- * entry->event != NULL, otherwise entry->size or entry->event will be
- * used uninitialized.
- */
-
-struct op_sample
-*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
-int op_cpu_buffer_write_commit(struct op_entry *entry);
-struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
-unsigned long op_cpu_buffer_entries(int cpu);
-
-/* returns the remaining free size of data in the entry */
-static inline
-int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
-{
- if (!entry->size)
- return 0;
- *entry->data = val;
- entry->size--;
- entry->data++;
- return entry->size;
-}
-
-/* returns the size of data in the entry */
-static inline
-int op_cpu_buffer_get_size(struct op_entry *entry)
-{
- return entry->size;
-}
-
-/* returns 0 if empty or the size of data including the current value */
-static inline
-int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
-{
- int size = entry->size;
- if (!size)
- return 0;
- *val = *entry->data;
- entry->size--;
- entry->data++;
- return size;
-}
-
-/* extra data flags */
-#define KERNEL_CTX_SWITCH (1UL << 0)
-#define IS_KERNEL (1UL << 1)
-#define TRACE_BEGIN (1UL << 2)
-#define USER_CTX_SWITCH (1UL << 3)
-
-#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
deleted file mode 100644
index 6c9edc8bbc95..000000000000
--- a/drivers/oprofile/event_buffer.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file event_buffer.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- *
- * This is the global event buffer that the user-space
- * daemon reads from. The event buffer is an untyped array
- * of unsigned longs. Entries are prefixed by the
- * escape value ESCAPE_CODE followed by an identifying code.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/oprofile.h>
-#include <linux/sched/signal.h>
-#include <linux/capability.h>
-#include <linux/dcookies.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "oprof.h"
-#include "event_buffer.h"
-#include "oprofile_stats.h"
-
-DEFINE_MUTEX(buffer_mutex);
-
-static unsigned long buffer_opened;
-static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-static unsigned long *event_buffer;
-static unsigned long buffer_size;
-static unsigned long buffer_watershed;
-static size_t buffer_pos;
-/* atomic_t because wait_event checks it outside of buffer_mutex */
-static atomic_t buffer_ready = ATOMIC_INIT(0);
-
-/*
- * Add an entry to the event buffer. When we get near to the end we
- * wake up the process sleeping on the read() of the file. To protect
- * the event_buffer this function may only be called when buffer_mutex
- * is set.
- */
-void add_event_entry(unsigned long value)
-{
- /*
- * This shouldn't happen since all workqueues or handlers are
- * canceled or flushed before the event buffer is freed.
- */
- if (!event_buffer) {
- WARN_ON_ONCE(1);
- return;
- }
-
- if (buffer_pos == buffer_size) {
- atomic_inc(&oprofile_stats.event_lost_overflow);
- return;
- }
-
- event_buffer[buffer_pos] = value;
- if (++buffer_pos == buffer_size - buffer_watershed) {
- atomic_set(&buffer_ready, 1);
- wake_up(&buffer_wait);
- }
-}
-
-
-/* Wake up the waiting process if any. This happens
- * on "echo 0 >/dev/oprofile/enable" so the daemon
- * processes the data remaining in the event buffer.
- */
-void wake_up_buffer_waiter(void)
-{
- mutex_lock(&buffer_mutex);
- atomic_set(&buffer_ready, 1);
- wake_up(&buffer_wait);
- mutex_unlock(&buffer_mutex);
-}
-
-
-int alloc_event_buffer(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- buffer_size = oprofile_buffer_size;
- buffer_watershed = oprofile_buffer_watershed;
- raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-
- if (buffer_watershed >= buffer_size)
- return -EINVAL;
-
- buffer_pos = 0;
- event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
- if (!event_buffer)
- return -ENOMEM;
-
- return 0;
-}
-
-
-void free_event_buffer(void)
-{
- mutex_lock(&buffer_mutex);
- vfree(event_buffer);
- buffer_pos = 0;
- event_buffer = NULL;
- mutex_unlock(&buffer_mutex);
-}
-
-
-static int event_buffer_open(struct inode *inode, struct file *file)
-{
- int err = -EPERM;
-
- if (!perfmon_capable())
- return -EPERM;
-
- if (test_and_set_bit_lock(0, &buffer_opened))
- return -EBUSY;
-
- /* Register as a user of dcookies
- * to ensure they persist for the lifetime of
- * the open event file
- */
- err = -EINVAL;
- file->private_data = dcookie_register();
- if (!file->private_data)
- goto out;
-
- if ((err = oprofile_setup()))
- goto fail;
-
- /* NB: the actual start happens from userspace
- * echo 1 >/dev/oprofile/enable
- */
-
- return nonseekable_open(inode, file);
-
-fail:
- dcookie_unregister(file->private_data);
-out:
- __clear_bit_unlock(0, &buffer_opened);
- return err;
-}
-
-
-static int event_buffer_release(struct inode *inode, struct file *file)
-{
- oprofile_stop();
- oprofile_shutdown();
- dcookie_unregister(file->private_data);
- buffer_pos = 0;
- atomic_set(&buffer_ready, 0);
- __clear_bit_unlock(0, &buffer_opened);
- return 0;
-}
-
-
-static ssize_t event_buffer_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- int retval = -EINVAL;
- size_t const max = buffer_size * sizeof(unsigned long);
-
- /* handling partial reads is more trouble than it's worth */
- if (count != max || *offset)
- return -EINVAL;
-
- wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
-
- if (signal_pending(current))
- return -EINTR;
-
- /* can't currently happen */
- if (!atomic_read(&buffer_ready))
- return -EAGAIN;
-
- mutex_lock(&buffer_mutex);
-
- /* May happen if the buffer is freed during pending reads. */
- if (!event_buffer) {
- retval = -EINTR;
- goto out;
- }
-
- atomic_set(&buffer_ready, 0);
-
- retval = -EFAULT;
-
- count = buffer_pos * sizeof(unsigned long);
-
- if (copy_to_user(buf, event_buffer, count))
- goto out;
-
- retval = count;
- buffer_pos = 0;
-
-out:
- mutex_unlock(&buffer_mutex);
- return retval;
-}
-
-const struct file_operations event_buffer_fops = {
- .open = event_buffer_open,
- .release = event_buffer_release,
- .read = event_buffer_read,
- .llseek = no_llseek,
-};
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
deleted file mode 100644
index a8d5bb3cba89..000000000000
--- a/drivers/oprofile/event_buffer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * @file event_buffer.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef EVENT_BUFFER_H
-#define EVENT_BUFFER_H
-
-#include <linux/types.h>
-#include <linux/mutex.h>
-
-int alloc_event_buffer(void);
-
-void free_event_buffer(void);
-
-/**
- * Add data to the event buffer.
- * The data passed is free-form, but typically consists of
- * file offsets, dcookies, context information, and ESCAPE codes.
- */
-void add_event_entry(unsigned long data);
-
-/* wake up the process sleeping on the event file */
-void wake_up_buffer_waiter(void);
-
-#define INVALID_COOKIE ~0UL
-#define NO_COOKIE 0UL
-
-extern const struct file_operations event_buffer_fops;
-
-/* mutex between sync_cpu_buffers() and the
- * file reading code.
- */
-extern struct mutex buffer_mutex;
-
-#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
deleted file mode 100644
index f343bd96609a..000000000000
--- a/drivers/oprofile/nmi_timer_int.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/**
- * @file nmi_timer_int.c
- *
- * @remark Copyright 2011 Advanced Micro Devices, Inc.
- *
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-
-#ifdef CONFIG_OPROFILE_NMI_TIMER
-
-static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
-static int ctr_running;
-
-static struct perf_event_attr nmi_timer_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- .size = sizeof(struct perf_event_attr),
- .pinned = 1,
- .disabled = 1,
-};
-
-static void nmi_timer_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- event->hw.interrupts = 0; /* don't throttle interrupts */
- oprofile_add_sample(regs, 0);
-}
-
-static int nmi_timer_start_cpu(int cpu)
-{
- struct perf_event *event = per_cpu(nmi_timer_events, cpu);
-
- if (!event) {
- event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
- nmi_timer_callback, NULL);
- if (IS_ERR(event))
- return PTR_ERR(event);
- per_cpu(nmi_timer_events, cpu) = event;
- }
-
- if (event && ctr_running)
- perf_event_enable(event);
-
- return 0;
-}
-
-static void nmi_timer_stop_cpu(int cpu)
-{
- struct perf_event *event = per_cpu(nmi_timer_events, cpu);
-
- if (event && ctr_running)
- perf_event_disable(event);
-}
-
-static int nmi_timer_cpu_online(unsigned int cpu)
-{
- nmi_timer_start_cpu(cpu);
- return 0;
-}
-static int nmi_timer_cpu_predown(unsigned int cpu)
-{
- nmi_timer_stop_cpu(cpu);
- return 0;
-}
-
-static int nmi_timer_start(void)
-{
- int cpu;
-
- get_online_cpus();
- ctr_running = 1;
- for_each_online_cpu(cpu)
- nmi_timer_start_cpu(cpu);
- put_online_cpus();
-
- return 0;
-}
-
-static void nmi_timer_stop(void)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- nmi_timer_stop_cpu(cpu);
- ctr_running = 0;
- put_online_cpus();
-}
-
-static enum cpuhp_state hp_online;
-
-static void nmi_timer_shutdown(void)
-{
- struct perf_event *event;
- int cpu;
-
- cpuhp_remove_state(hp_online);
- for_each_possible_cpu(cpu) {
- event = per_cpu(nmi_timer_events, cpu);
- if (!event)
- continue;
- perf_event_disable(event);
- per_cpu(nmi_timer_events, cpu) = NULL;
- perf_event_release_kernel(event);
- }
-}
-
-static int nmi_timer_setup(void)
-{
- int err;
- u64 period;
-
- /* clock cycles per tick: */
- period = (u64)cpu_khz * 1000;
- do_div(period, HZ);
- nmi_timer_attr.sample_period = period;
-
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
- nmi_timer_cpu_online, nmi_timer_cpu_predown);
- if (err < 0) {
- nmi_timer_shutdown();
- return err;
- }
- hp_online = err;
- return 0;
-}
-
-int __init op_nmi_timer_init(struct oprofile_operations *ops)
-{
- int err = 0;
-
- err = nmi_timer_setup();
- if (err)
- return err;
- nmi_timer_shutdown(); /* only check, don't alloc */
-
- ops->create_files = NULL;
- ops->setup = nmi_timer_setup;
- ops->shutdown = nmi_timer_shutdown;
- ops->start = nmi_timer_start;
- ops->stop = nmi_timer_stop;
- ops->cpu_type = "timer";
-
- printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
-
- return 0;
-}
-
-#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
deleted file mode 100644
index ed2c3ec07024..000000000000
--- a/drivers/oprofile/oprof.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/**
- * @file oprof.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/moduleparam.h>
-#include <linux/workqueue.h>
-#include <linux/time.h>
-#include <linux/mutex.h>
-
-#include "oprof.h"
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-#include "oprofile_stats.h"
-
-struct oprofile_operations oprofile_ops;
-
-unsigned long oprofile_started;
-unsigned long oprofile_backtrace_depth;
-static unsigned long is_setup;
-static DEFINE_MUTEX(start_mutex);
-
-/* timer
- 0 - use performance monitoring hardware if available
- 1 - use the timer int mechanism regardless
- */
-static int timer = 0;
-
-int oprofile_setup(void)
-{
- int err;
-
- mutex_lock(&start_mutex);
-
- if ((err = alloc_cpu_buffers()))
- goto out;
-
- if ((err = alloc_event_buffer()))
- goto out1;
-
- if (oprofile_ops.setup && (err = oprofile_ops.setup()))
- goto out2;
-
- /* Note even though this starts part of the
- * profiling overhead, it's necessary to prevent
- * us missing task deaths and eventually oopsing
- * when trying to process the event buffer.
- */
- if (oprofile_ops.sync_start) {
- int sync_ret = oprofile_ops.sync_start();
- switch (sync_ret) {
- case 0:
- goto post_sync;
- case 1:
- goto do_generic;
- case -1:
- goto out3;
- default:
- goto out3;
- }
- }
-do_generic:
- if ((err = sync_start()))
- goto out3;
-
-post_sync:
- is_setup = 1;
- mutex_unlock(&start_mutex);
- return 0;
-
-out3:
- if (oprofile_ops.shutdown)
- oprofile_ops.shutdown();
-out2:
- free_event_buffer();
-out1:
- free_cpu_buffers();
-out:
- mutex_unlock(&start_mutex);
- return err;
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void switch_worker(struct work_struct *work);
-static DECLARE_DELAYED_WORK(switch_work, switch_worker);
-
-static void start_switch_worker(void)
-{
- if (oprofile_ops.switch_events)
- schedule_delayed_work(&switch_work, oprofile_time_slice);
-}
-
-static void stop_switch_worker(void)
-{
- cancel_delayed_work_sync(&switch_work);
-}
-
-static void switch_worker(struct work_struct *work)
-{
- if (oprofile_ops.switch_events())
- return;
-
- atomic_inc(&oprofile_stats.multiplex_counter);
- start_switch_worker();
-}
-
-/* User inputs in ms, converts to jiffies */
-int oprofile_set_timeout(unsigned long val_msec)
-{
- int err = 0;
- unsigned long time_slice;
-
- mutex_lock(&start_mutex);
-
- if (oprofile_started) {
- err = -EBUSY;
- goto out;
- }
-
- if (!oprofile_ops.switch_events) {
- err = -EINVAL;
- goto out;
- }
-
- time_slice = msecs_to_jiffies(val_msec);
- if (time_slice == MAX_JIFFY_OFFSET) {
- err = -EINVAL;
- goto out;
- }
-
- oprofile_time_slice = time_slice;
-
-out:
- mutex_unlock(&start_mutex);
- return err;
-
-}
-
-#else
-
-static inline void start_switch_worker(void) { }
-static inline void stop_switch_worker(void) { }
-
-#endif
-
-/* Actually start profiling (echo 1>/dev/oprofile/enable) */
-int oprofile_start(void)
-{
- int err = -EINVAL;
-
- mutex_lock(&start_mutex);
-
- if (!is_setup)
- goto out;
-
- err = 0;
-
- if (oprofile_started)
- goto out;
-
- oprofile_reset_stats();
-
- if ((err = oprofile_ops.start()))
- goto out;
-
- start_switch_worker();
-
- oprofile_started = 1;
-out:
- mutex_unlock(&start_mutex);
- return err;
-}
-
-
-/* echo 0>/dev/oprofile/enable */
-void oprofile_stop(void)
-{
- mutex_lock(&start_mutex);
- if (!oprofile_started)
- goto out;
- oprofile_ops.stop();
- oprofile_started = 0;
-
- stop_switch_worker();
-
- /* wake up the daemon to read what remains */
- wake_up_buffer_waiter();
-out:
- mutex_unlock(&start_mutex);
-}
-
-
-void oprofile_shutdown(void)
-{
- mutex_lock(&start_mutex);
- if (oprofile_ops.sync_stop) {
- int sync_ret = oprofile_ops.sync_stop();
- switch (sync_ret) {
- case 0:
- goto post_sync;
- case 1:
- goto do_generic;
- default:
- goto post_sync;
- }
- }
-do_generic:
- sync_stop();
-post_sync:
- if (oprofile_ops.shutdown)
- oprofile_ops.shutdown();
- is_setup = 0;
- free_event_buffer();
- free_cpu_buffers();
- mutex_unlock(&start_mutex);
-}
-
-int oprofile_set_ulong(unsigned long *addr, unsigned long val)
-{
- int err = -EBUSY;
-
- mutex_lock(&start_mutex);
- if (!oprofile_started) {
- *addr = val;
- err = 0;
- }
- mutex_unlock(&start_mutex);
-
- return err;
-}
-
-static int timer_mode;
-
-static int __init oprofile_init(void)
-{
- int err;
-
- /* always init architecture to setup backtrace support */
- timer_mode = 0;
- err = oprofile_arch_init(&oprofile_ops);
- if (!err) {
- if (!timer && !oprofilefs_register())
- return 0;
- oprofile_arch_exit();
- }
-
- /* setup timer mode: */
- timer_mode = 1;
- /* no nmi timer mode if oprofile.timer is set */
- if (timer || op_nmi_timer_init(&oprofile_ops)) {
- err = oprofile_timer_init(&oprofile_ops);
- if (err)
- return err;
- }
-
- return oprofilefs_register();
-}
-
-
-static void __exit oprofile_exit(void)
-{
- oprofilefs_unregister();
- if (!timer_mode)
- oprofile_arch_exit();
-}
-
-
-module_init(oprofile_init);
-module_exit(oprofile_exit);
-
-module_param_named(timer, timer, int, 0644);
-MODULE_PARM_DESC(timer, "force use of timer interrupt");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Levon <levon@movementarian.org>");
-MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
deleted file mode 100644
index d5412060ab0f..000000000000
--- a/drivers/oprofile/oprof.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * @file oprof.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROF_H
-#define OPROF_H
-
-int oprofile_setup(void);
-void oprofile_shutdown(void);
-
-int oprofilefs_register(void);
-void oprofilefs_unregister(void);
-
-int oprofile_start(void);
-void oprofile_stop(void);
-
-struct oprofile_operations;
-
-extern unsigned long oprofile_buffer_size;
-extern unsigned long oprofile_cpu_buffer_size;
-extern unsigned long oprofile_buffer_watershed;
-extern unsigned long oprofile_time_slice;
-
-extern struct oprofile_operations oprofile_ops;
-extern unsigned long oprofile_started;
-extern unsigned long oprofile_backtrace_depth;
-
-struct dentry;
-
-void oprofile_create_files(struct dentry *root);
-int oprofile_timer_init(struct oprofile_operations *ops);
-#ifdef CONFIG_OPROFILE_NMI_TIMER
-int op_nmi_timer_init(struct oprofile_operations *ops);
-#else
-static inline int op_nmi_timer_init(struct oprofile_operations *ops)
-{
- return -ENODEV;
-}
-#endif
-
-
-int oprofile_set_ulong(unsigned long *addr, unsigned long val);
-int oprofile_set_timeout(unsigned long time);
-
-#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
deleted file mode 100644
index ee2cfce358b9..000000000000
--- a/drivers/oprofile/oprofile_files.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * @file oprofile_files.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/fs.h>
-#include <linux/oprofile.h>
-#include <linux/jiffies.h>
-
-#include "event_buffer.h"
-#include "oprofile_stats.h"
-#include "oprof.h"
-
-#define BUFFER_SIZE_DEFAULT 131072
-#define CPU_BUFFER_SIZE_DEFAULT 8192
-#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
-#define TIME_SLICE_DEFAULT 1
-
-unsigned long oprofile_buffer_size;
-unsigned long oprofile_cpu_buffer_size;
-unsigned long oprofile_buffer_watershed;
-unsigned long oprofile_time_slice;
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static ssize_t timeout_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
- buf, count, offset);
-}
-
-
-static ssize_t timeout_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_timeout(val);
-
- if (retval)
- return retval;
- return count;
-}
-
-
-static const struct file_operations timeout_fops = {
- .read = timeout_read,
- .write = timeout_write,
- .llseek = default_llseek,
-};
-
-#endif
-
-
-static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
- offset);
-}
-
-
-static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- if (!oprofile_ops.backtrace)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
- if (retval)
- return retval;
-
- return count;
-}
-
-
-static const struct file_operations depth_fops = {
- .read = depth_read,
- .write = depth_write,
- .llseek = default_llseek,
-};
-
-
-static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
-}
-
-
-static const struct file_operations pointer_size_fops = {
- .read = pointer_size_read,
- .llseek = default_llseek,
-};
-
-
-static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
-}
-
-
-static const struct file_operations cpu_type_fops = {
- .read = cpu_type_read,
- .llseek = default_llseek,
-};
-
-
-static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
-}
-
-
-static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = 0;
- if (val)
- retval = oprofile_start();
- else
- oprofile_stop();
-
- if (retval)
- return retval;
- return count;
-}
-
-
-static const struct file_operations enable_fops = {
- .read = enable_read,
- .write = enable_write,
- .llseek = default_llseek,
-};
-
-
-static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- wake_up_buffer_waiter();
- return count;
-}
-
-
-static const struct file_operations dump_fops = {
- .write = dump_write,
- .llseek = noop_llseek,
-};
-
-void oprofile_create_files(struct dentry *root)
-{
- /* reinitialize default values */
- oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
- oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
- oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
- oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
-
- oprofilefs_create_file(root, "enable", &enable_fops);
- oprofilefs_create_file_perm(root, "dump", &dump_fops, 0666);
- oprofilefs_create_file(root, "buffer", &event_buffer_fops);
- oprofilefs_create_ulong(root, "buffer_size", &oprofile_buffer_size);
- oprofilefs_create_ulong(root, "buffer_watershed", &oprofile_buffer_watershed);
- oprofilefs_create_ulong(root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
- oprofilefs_create_file(root, "cpu_type", &cpu_type_fops);
- oprofilefs_create_file(root, "backtrace_depth", &depth_fops);
- oprofilefs_create_file(root, "pointer_size", &pointer_size_fops);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_file(root, "time_slice", &timeout_fops);
-#endif
- oprofile_create_stats_files(root);
- if (oprofile_ops.create_files)
- oprofile_ops.create_files(root);
-}
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
deleted file mode 100644
index 98a63a5f8763..000000000000
--- a/drivers/oprofile/oprofile_perf.c
+++ /dev/null
@@ -1,328 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2010 ARM Ltd.
- * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
- *
- * Perf-events backend for OProfile.
- */
-#include <linux/perf_event.h>
-#include <linux/platform_device.h>
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-
-/*
- * Per performance monitor configuration as set via oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long unit_mask;
- unsigned long kernel;
- unsigned long user;
- struct perf_event_attr attr;
-};
-
-static int oprofile_perf_enabled;
-static DEFINE_MUTEX(oprofile_perf_mutex);
-
-static struct op_counter_config *counter_config;
-static DEFINE_PER_CPU(struct perf_event **, perf_events);
-static int num_counters;
-
-/*
- * Overflow callback for oprofile.
- */
-static void op_overflow_handler(struct perf_event *event,
- struct perf_sample_data *data, struct pt_regs *regs)
-{
- int id;
- u32 cpu = smp_processor_id();
-
- for (id = 0; id < num_counters; ++id)
- if (per_cpu(perf_events, cpu)[id] == event)
- break;
-
- if (id != num_counters)
- oprofile_add_sample(regs, id);
- else
- pr_warn("oprofile: ignoring spurious overflow on cpu %u\n",
- cpu);
-}
-
-/*
- * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
- * settings in counter_config. Attributes are created as `pinned' events and
- * so are permanently scheduled on the PMU.
- */
-static void op_perf_setup(void)
-{
- int i;
- u32 size = sizeof(struct perf_event_attr);
- struct perf_event_attr *attr;
-
- for (i = 0; i < num_counters; ++i) {
- attr = &counter_config[i].attr;
- memset(attr, 0, size);
- attr->type = PERF_TYPE_RAW;
- attr->size = size;
- attr->config = counter_config[i].event;
- attr->sample_period = counter_config[i].count;
- attr->pinned = 1;
- }
-}
-
-static int op_create_counter(int cpu, int event)
-{
- struct perf_event *pevent;
-
- if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
- return 0;
-
- pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
- cpu, NULL,
- op_overflow_handler, NULL);
-
- if (IS_ERR(pevent))
- return PTR_ERR(pevent);
-
- if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
- perf_event_release_kernel(pevent);
- pr_warn("oprofile: failed to enable event %d on CPU %d\n",
- event, cpu);
- return -EBUSY;
- }
-
- per_cpu(perf_events, cpu)[event] = pevent;
-
- return 0;
-}
-
-static void op_destroy_counter(int cpu, int event)
-{
- struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
-
- if (pevent) {
- perf_event_release_kernel(pevent);
- per_cpu(perf_events, cpu)[event] = NULL;
- }
-}
-
-/*
- * Called by oprofile_perf_start to create active perf events based on the
- * perviously configured attributes.
- */
-static int op_perf_start(void)
-{
- int cpu, event, ret = 0;
-
- for_each_online_cpu(cpu) {
- for (event = 0; event < num_counters; ++event) {
- ret = op_create_counter(cpu, event);
- if (ret)
- return ret;
- }
- }
-
- return ret;
-}
-
-/*
- * Called by oprofile_perf_stop at the end of a profiling run.
- */
-static void op_perf_stop(void)
-{
- int cpu, event;
-
- for_each_online_cpu(cpu)
- for (event = 0; event < num_counters; ++event)
- op_destroy_counter(cpu, event);
-}
-
-static int oprofile_perf_create_files(struct dentry *root)
-{
- unsigned int i;
-
- for (i = 0; i < num_counters; i++) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
- }
-
- return 0;
-}
-
-static int oprofile_perf_setup(void)
-{
- raw_spin_lock(&oprofilefs_lock);
- op_perf_setup();
- raw_spin_unlock(&oprofilefs_lock);
- return 0;
-}
-
-static int oprofile_perf_start(void)
-{
- int ret = -EBUSY;
-
- mutex_lock(&oprofile_perf_mutex);
- if (!oprofile_perf_enabled) {
- ret = 0;
- op_perf_start();
- oprofile_perf_enabled = 1;
- }
- mutex_unlock(&oprofile_perf_mutex);
- return ret;
-}
-
-static void oprofile_perf_stop(void)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled)
- op_perf_stop();
- oprofile_perf_enabled = 0;
- mutex_unlock(&oprofile_perf_mutex);
-}
-
-#ifdef CONFIG_PM
-
-static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled)
- op_perf_stop();
- mutex_unlock(&oprofile_perf_mutex);
- return 0;
-}
-
-static int oprofile_perf_resume(struct platform_device *dev)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled && op_perf_start())
- oprofile_perf_enabled = 0;
- mutex_unlock(&oprofile_perf_mutex);
- return 0;
-}
-
-static struct platform_driver oprofile_driver = {
- .driver = {
- .name = "oprofile-perf",
- },
- .resume = oprofile_perf_resume,
- .suspend = oprofile_perf_suspend,
-};
-
-static struct platform_device *oprofile_pdev;
-
-static int __init init_driverfs(void)
-{
- int ret;
-
- ret = platform_driver_register(&oprofile_driver);
- if (ret)
- return ret;
-
- oprofile_pdev = platform_device_register_simple(
- oprofile_driver.driver.name, 0, NULL, 0);
- if (IS_ERR(oprofile_pdev)) {
- ret = PTR_ERR(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
- }
-
- return ret;
-}
-
-static void exit_driverfs(void)
-{
- platform_device_unregister(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
-}
-
-#else
-
-static inline int init_driverfs(void) { return 0; }
-static inline void exit_driverfs(void) { }
-
-#endif /* CONFIG_PM */
-
-void oprofile_perf_exit(void)
-{
- int cpu, id;
- struct perf_event *event;
-
- for_each_possible_cpu(cpu) {
- for (id = 0; id < num_counters; ++id) {
- event = per_cpu(perf_events, cpu)[id];
- if (event)
- perf_event_release_kernel(event);
- }
-
- kfree(per_cpu(perf_events, cpu));
- }
-
- kfree(counter_config);
- exit_driverfs();
-}
-
-int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- int cpu, ret = 0;
-
- ret = init_driverfs();
- if (ret)
- return ret;
-
- num_counters = perf_num_counters();
- if (num_counters <= 0) {
- pr_info("oprofile: no performance counters\n");
- ret = -ENODEV;
- goto out;
- }
-
- counter_config = kcalloc(num_counters,
- sizeof(struct op_counter_config), GFP_KERNEL);
-
- if (!counter_config) {
- pr_info("oprofile: failed to allocate %d "
- "counters\n", num_counters);
- ret = -ENOMEM;
- num_counters = 0;
- goto out;
- }
-
- for_each_possible_cpu(cpu) {
- per_cpu(perf_events, cpu) = kcalloc(num_counters,
- sizeof(struct perf_event *), GFP_KERNEL);
- if (!per_cpu(perf_events, cpu)) {
- pr_info("oprofile: failed to allocate %d perf events "
- "for cpu %d\n", num_counters, cpu);
- ret = -ENOMEM;
- goto out;
- }
- }
-
- ops->create_files = oprofile_perf_create_files;
- ops->setup = oprofile_perf_setup;
- ops->start = oprofile_perf_start;
- ops->stop = oprofile_perf_stop;
- ops->shutdown = oprofile_perf_stop;
- ops->cpu_type = op_name_from_perf_id();
-
- if (!ops->cpu_type)
- ret = -ENODEV;
- else
- pr_info("oprofile: using %s\n", ops->cpu_type);
-
-out:
- if (ret)
- oprofile_perf_exit();
-
- return ret;
-}
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
deleted file mode 100644
index 59659cea4582..000000000000
--- a/drivers/oprofile/oprofile_stats.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * @file oprofile_stats.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/threads.h>
-
-#include "oprofile_stats.h"
-#include "cpu_buffer.h"
-
-struct oprofile_stat_struct oprofile_stats;
-
-void oprofile_reset_stats(void)
-{
- struct oprofile_cpu_buffer *cpu_buf;
- int i;
-
- for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(op_cpu_buffer, i);
- cpu_buf->sample_received = 0;
- cpu_buf->sample_lost_overflow = 0;
- cpu_buf->backtrace_aborted = 0;
- cpu_buf->sample_invalid_eip = 0;
- }
-
- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
- atomic_set(&oprofile_stats.event_lost_overflow, 0);
- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
- atomic_set(&oprofile_stats.multiplex_counter, 0);
-}
-
-
-void oprofile_create_stats_files(struct dentry *root)
-{
- struct oprofile_cpu_buffer *cpu_buf;
- struct dentry *cpudir;
- struct dentry *dir;
- char buf[10];
- int i;
-
- dir = oprofilefs_mkdir(root, "stats");
- if (!dir)
- return;
-
- for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(op_cpu_buffer, i);
- snprintf(buf, 10, "cpu%d", i);
- cpudir = oprofilefs_mkdir(dir, buf);
-
- /* Strictly speaking access to these ulongs is racy,
- * but we can't simply lock them, and they are
- * informational only.
- */
- oprofilefs_create_ro_ulong(cpudir, "sample_received",
- &cpu_buf->sample_received);
- oprofilefs_create_ro_ulong(cpudir, "sample_lost_overflow",
- &cpu_buf->sample_lost_overflow);
- oprofilefs_create_ro_ulong(cpudir, "backtrace_aborted",
- &cpu_buf->backtrace_aborted);
- oprofilefs_create_ro_ulong(cpudir, "sample_invalid_eip",
- &cpu_buf->sample_invalid_eip);
- }
-
- oprofilefs_create_ro_atomic(dir, "sample_lost_no_mm",
- &oprofile_stats.sample_lost_no_mm);
- oprofilefs_create_ro_atomic(dir, "sample_lost_no_mapping",
- &oprofile_stats.sample_lost_no_mapping);
- oprofilefs_create_ro_atomic(dir, "event_lost_overflow",
- &oprofile_stats.event_lost_overflow);
- oprofilefs_create_ro_atomic(dir, "bt_lost_no_mapping",
- &oprofile_stats.bt_lost_no_mapping);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_ro_atomic(dir, "multiplex_counter",
- &oprofile_stats.multiplex_counter);
-#endif
-}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
deleted file mode 100644
index 1fc622bd1834..000000000000
--- a/drivers/oprofile/oprofile_stats.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * @file oprofile_stats.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#ifndef OPROFILE_STATS_H
-#define OPROFILE_STATS_H
-
-#include <linux/atomic.h>
-
-struct oprofile_stat_struct {
- atomic_t sample_lost_no_mm;
- atomic_t sample_lost_no_mapping;
- atomic_t bt_lost_no_mapping;
- atomic_t event_lost_overflow;
- atomic_t multiplex_counter;
-};
-
-extern struct oprofile_stat_struct oprofile_stats;
-
-/* reset all stats to zero */
-void oprofile_reset_stats(void);
-
-struct dentry;
-
-/* create the stats/ dir */
-void oprofile_create_stats_files(struct dentry *root);
-
-#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
deleted file mode 100644
index 0875f2f122b3..000000000000
--- a/drivers/oprofile/oprofilefs.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * @file oprofilefs.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- *
- * A simple filesystem for configuration and
- * access of oprofile.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/oprofile.h>
-#include <linux/fs.h>
-#include <linux/fs_context.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-
-#include "oprof.h"
-
-#define OPROFILEFS_MAGIC 0x6f70726f
-
-DEFINE_RAW_SPINLOCK(oprofilefs_lock);
-
-static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
-{
- struct inode *inode = new_inode(sb);
-
- if (inode) {
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- }
- return inode;
-}
-
-
-static const struct super_operations s_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
-};
-
-
-ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
-{
- return simple_read_from_buffer(buf, count, offset, str, strlen(str));
-}
-
-
-#define TMPBUFSIZE 50
-
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
-{
- char tmpbuf[TMPBUFSIZE];
- size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
- if (maxlen > TMPBUFSIZE)
- maxlen = TMPBUFSIZE;
- return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
-}
-
-
-/*
- * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
- * unchanged and might be uninitialized. This follows write syscall
- * implementation when count is zero: "If count is zero ... [and if]
- * no errors are detected, 0 will be returned without causing any
- * other effect." (man 2 write)
- */
-int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
-{
- char tmpbuf[TMPBUFSIZE];
- unsigned long flags;
-
- if (!count)
- return 0;
-
- if (count > TMPBUFSIZE - 1)
- return -EINVAL;
-
- memset(tmpbuf, 0x0, TMPBUFSIZE);
-
- if (copy_from_user(tmpbuf, buf, count))
- return -EFAULT;
-
- raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- *val = simple_strtoul(tmpbuf, NULL, 0);
- raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return count;
-}
-
-
-static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- unsigned long *val = file->private_data;
- return oprofilefs_ulong_to_user(*val, buf, count, offset);
-}
-
-
-static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long value;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_ulong(file->private_data, value);
- if (retval)
- return retval;
-
- return count;
-}
-
-
-static const struct file_operations ulong_fops = {
- .read = ulong_read_file,
- .write = ulong_write_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-static const struct file_operations ulong_ro_fops = {
- .read = ulong_read_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-static int __oprofilefs_create_file(struct dentry *root, char const *name,
- const struct file_operations *fops, int perm, void *priv)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- if (!root)
- return -ENOMEM;
-
- inode_lock(d_inode(root));
- dentry = d_alloc_name(root, name);
- if (!dentry) {
- inode_unlock(d_inode(root));
- return -ENOMEM;
- }
- inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
- if (!inode) {
- dput(dentry);
- inode_unlock(d_inode(root));
- return -ENOMEM;
- }
- inode->i_fop = fops;
- inode->i_private = priv;
- d_add(dentry, inode);
- inode_unlock(d_inode(root));
- return 0;
-}
-
-
-int oprofilefs_create_ulong(struct dentry *root,
- char const *name, unsigned long *val)
-{
- return __oprofilefs_create_file(root, name,
- &ulong_fops, 0644, val);
-}
-
-
-int oprofilefs_create_ro_ulong(struct dentry *root,
- char const *name, unsigned long *val)
-{
- return __oprofilefs_create_file(root, name,
- &ulong_ro_fops, 0444, val);
-}
-
-
-static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- atomic_t *val = file->private_data;
- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
-}
-
-
-static const struct file_operations atomic_ro_fops = {
- .read = atomic_read_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-int oprofilefs_create_ro_atomic(struct dentry *root,
- char const *name, atomic_t *val)
-{
- return __oprofilefs_create_file(root, name,
- &atomic_ro_fops, 0444, val);
-}
-
-
-int oprofilefs_create_file(struct dentry *root,
- char const *name, const struct file_operations *fops)
-{
- return __oprofilefs_create_file(root, name, fops, 0644, NULL);
-}
-
-
-int oprofilefs_create_file_perm(struct dentry *root,
- char const *name, const struct file_operations *fops, int perm)
-{
- return __oprofilefs_create_file(root, name, fops, perm, NULL);
-}
-
-
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- inode_lock(d_inode(parent));
- dentry = d_alloc_name(parent, name);
- if (!dentry) {
- inode_unlock(d_inode(parent));
- return NULL;
- }
- inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
- if (!inode) {
- dput(dentry);
- inode_unlock(d_inode(parent));
- return NULL;
- }
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- d_add(dentry, inode);
- inode_unlock(d_inode(parent));
- return dentry;
-}
-
-
-static int oprofilefs_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct inode *root_inode;
-
- sb->s_blocksize = PAGE_SIZE;
- sb->s_blocksize_bits = PAGE_SHIFT;
- sb->s_magic = OPROFILEFS_MAGIC;
- sb->s_op = &s_ops;
- sb->s_time_gran = 1;
-
- root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
- if (!root_inode)
- return -ENOMEM;
- root_inode->i_op = &simple_dir_inode_operations;
- root_inode->i_fop = &simple_dir_operations;
- sb->s_root = d_make_root(root_inode);
- if (!sb->s_root)
- return -ENOMEM;
-
- oprofile_create_files(sb->s_root);
-
- // FIXME: verify kill_litter_super removes our dentries
- return 0;
-}
-
-static int oprofilefs_get_tree(struct fs_context *fc)
-{
- return get_tree_single(fc, oprofilefs_fill_super);
-}
-
-static const struct fs_context_operations oprofilefs_context_ops = {
- .get_tree = oprofilefs_get_tree,
-};
-
-static int oprofilefs_init_fs_context(struct fs_context *fc)
-{
- fc->ops = &oprofilefs_context_ops;
- return 0;
-}
-
-static struct file_system_type oprofilefs_type = {
- .owner = THIS_MODULE,
- .name = "oprofilefs",
- .init_fs_context = oprofilefs_init_fs_context,
- .kill_sb = kill_litter_super,
-};
-MODULE_ALIAS_FS("oprofilefs");
-
-
-int __init oprofilefs_register(void)
-{
- return register_filesystem(&oprofilefs_type);
-}
-
-
-void __exit oprofilefs_unregister(void)
-{
- unregister_filesystem(&oprofilefs_type);
-}
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
deleted file mode 100644
index 2498a6cd7c24..000000000000
--- a/drivers/oprofile/timer_int.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * @file timer_int.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/notifier.h>
-#include <linux/smp.h>
-#include <linux/oprofile.h>
-#include <linux/profile.h>
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/hrtimer.h>
-#include <asm/irq_regs.h>
-#include <asm/ptrace.h>
-
-#include "oprof.h"
-
-static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
-static int ctr_running;
-
-static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
-{
- oprofile_add_sample(get_irq_regs(), 0);
- hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
- return HRTIMER_RESTART;
-}
-
-static void __oprofile_hrtimer_start(void *unused)
-{
- struct hrtimer *hrtimer = this_cpu_ptr(&oprofile_hrtimer);
-
- if (!ctr_running)
- return;
-
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = oprofile_hrtimer_notify;
-
- hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
- HRTIMER_MODE_REL_PINNED);
-}
-
-static int oprofile_hrtimer_start(void)
-{
- get_online_cpus();
- ctr_running = 1;
- on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
- put_online_cpus();
- return 0;
-}
-
-static void __oprofile_hrtimer_stop(int cpu)
-{
- struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
-
- if (!ctr_running)
- return;
-
- hrtimer_cancel(hrtimer);
-}
-
-static void oprofile_hrtimer_stop(void)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- __oprofile_hrtimer_stop(cpu);
- ctr_running = 0;
- put_online_cpus();
-}
-
-static int oprofile_timer_online(unsigned int cpu)
-{
- local_irq_disable();
- __oprofile_hrtimer_start(NULL);
- local_irq_enable();
- return 0;
-}
-
-static int oprofile_timer_prep_down(unsigned int cpu)
-{
- __oprofile_hrtimer_stop(cpu);
- return 0;
-}
-
-static enum cpuhp_state hp_online;
-
-static int oprofile_hrtimer_setup(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "oprofile/timer:online",
- oprofile_timer_online,
- oprofile_timer_prep_down);
- if (ret < 0)
- return ret;
- hp_online = ret;
- return 0;
-}
-
-static void oprofile_hrtimer_shutdown(void)
-{
- cpuhp_remove_state_nocalls(hp_online);
-}
-
-int oprofile_timer_init(struct oprofile_operations *ops)
-{
- ops->create_files = NULL;
- ops->setup = oprofile_hrtimer_setup;
- ops->shutdown = oprofile_hrtimer_shutdown;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
- return 0;
-}
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7fec4fefe151..62f8407923d4 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -243,7 +243,7 @@ static int port_detect(struct device *dev, void *dev_drv)
}
/**
- * parport_register_driver - register a parallel port device driver
+ * __parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
* @owner: owner module of drv
* @mod_name: module name string
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 11cc79411e2d..d62c4ac4ae1b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -36,4 +36,4 @@ obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
obj-y += controller/
obj-y += switch/
-ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
+subdir-ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 64e2f5e379aa..5aa8977d7b0f 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -55,15 +55,6 @@ config PCI_RCAR_GEN2
There are 3 internal PCI controllers available with a single
built-in EHCI/OHCI host controller present on each one.
-config PCIE_RCAR
- bool "Renesas R-Car PCIe controller"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_RCAR_HOST
- help
- Say Y here if you want PCIe controller support on R-Car SoCs.
- This option will be removed after arm64 defconfig is updated.
-
config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe host controller"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -242,20 +233,6 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
-config PCIE_TANGO_SMP8759
- bool "Tango SMP8759 PCIe controller (DANGEROUS)"
- depends on ARCH_TANGO && PCI_MSI && OF
- depends on BROKEN
- select PCI_HOST_COMMON
- help
- Say Y here to enable PCIe controller support for Sigma Designs
- Tango SMP8759-based systems.
-
- Note: The SMP8759 controller multiplexes PCI config and MMIO
- accesses, and Linux doesn't provide a way to serialize them.
- This can lead to data corruption if drivers perform concurrent
- config and MMIO accesses.
-
config VMD
depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
@@ -273,7 +250,7 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
default ARCH_BRCMSTB
@@ -298,6 +275,16 @@ config PCI_LOONGSON
Say Y here if you want to enable PCI controller support on
Loongson systems.
+config PCIE_MICROCHIP_HOST
+ bool "Microchip AXI PCIe host bridge support"
+ depends on PCI_MSI && OF
+ select PCI_MSI_IRQ_DOMAIN
+ select GENERIC_MSI_IRQ_DOMAIN
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
config PCIE_HISI_ERR
depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
bool "HiSilicon HIP PCIe controller error handling driver"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 04c6edc285c5..e4559f2182f2 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
-obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index dac1ac8a7615..849f1e416ea5 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -64,6 +64,7 @@ enum j721e_pcie_mode {
struct j721e_pcie_data {
enum j721e_pcie_mode mode;
+ bool quirk_retrain_flag;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -280,6 +281,7 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
static const struct j721e_pcie_data j721e_pcie_rc_data = {
.mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
@@ -388,6 +390,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
bridge->ops = &cdns_ti_pcie_host_ops;
rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
cdns_pcie = &rc->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 9e2b024d32f2..897cdde02bd8 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -382,6 +382,57 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ struct cdns_pcie *pcie = &ep->pcie;
+ u64 pci_addr, pci_addr_mask = 0xff;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ int ret;
+ int i;
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = data & ~data_mask;
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ for (i = 0; i < interrupt_num; i++) {
+ ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ pci_addr & ~pci_addr_mask,
+ entry_size);
+ if (ret)
+ return ret;
+ addr = addr + entry_size;
+ }
+
+ *msi_data = data;
+ *msi_addr_offset = pci_addr & pci_addr_mask;
+
+ return 0;
+}
+
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
u16 interrupt_num)
{
@@ -455,18 +506,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- struct pci_epf *epf;
- u32 cfg;
int ret;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
- cfg = BIT(0);
- list_for_each_entry(epf, &epc->pci_epf, list)
- cfg |= BIT(epf->func_no);
- cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
ret = cdns_pcie_start_link(pcie);
if (ret) {
@@ -481,6 +527,7 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
+ .align = 256,
};
static const struct pci_epc_features*
@@ -500,6 +547,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.set_msix = cdns_pcie_ep_set_msix,
.get_msix = cdns_pcie_ep_get_msix,
.raise_irq = cdns_pcie_ep_raise_irq,
+ .map_msi_irq = cdns_pcie_ep_map_msi_irq,
.start = cdns_pcie_ep_start,
.get_features = cdns_pcie_ep_get_features,
};
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 811c1cb2e8de..73dcf8cf98fb 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -77,6 +77,68 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
+static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (cdns_pcie_link_up(pcie)) {
+ dev_info(dev, "Link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cdns_pcie_retrain(struct cdns_pcie *pcie)
+{
+ u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ u16 lnk_stat, lnk_ctl;
+ int ret = 0;
+
+ /*
+ * Set retrain bit if current speed is 2.5 GB/s,
+ * but the PCIe root port support is > 2.5 GB/s.
+ */
+
+ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
+ PCI_EXP_LNKCAP));
+ if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return ret;
+
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ lnk_ctl = cdns_pcie_rp_readw(pcie,
+ pcie_cap_off + PCI_EXP_LNKCTL);
+ lnk_ctl |= PCI_EXP_LNKCTL_RL;
+ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ lnk_ctl);
+
+ ret = cdns_pcie_host_wait_for_link(pcie);
+ }
+ return ret;
+}
+
+static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ int ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie);
+
+ /*
+ * Retrain link for Gen2 training defect
+ * if quirk flag is set.
+ */
+ if (!ret && rc->quirk_retrain_flag)
+ ret = cdns_pcie_retrain(pcie);
+
+ return ret;
+}
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
@@ -321,9 +383,10 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
err = cdns_pcie_host_bar_config(rc, entry);
- if (err)
+ if (err) {
dev_err(dev, "Fail to configure IB using dma-ranges\n");
- return err;
+ return err;
+ }
}
return 0;
@@ -398,23 +461,6 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
-static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- int retries;
-
- /* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (cdns_pcie_link_up(pcie)) {
- dev_info(dev, "Link up\n");
- return 0;
- }
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
- }
-
- return -ETIMEDOUT;
-}
-
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
@@ -457,7 +503,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return ret;
}
- ret = cdns_pcie_host_wait_for_link(pcie);
+ ret = cdns_pcie_host_start_link(rc);
if (ret)
dev_dbg(dev, "PCIe link never came up\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 30eba6cafe2c..254d2570f8c9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -119,7 +119,7 @@
* Root Port Registers (PCI configuration space for the root port function)
*/
#define CDNS_PCIE_RP_BASE 0x00200000
-
+#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
/*
* Address Translation Registers
@@ -291,6 +291,7 @@ struct cdns_pcie {
* @device_id: PCI device ID
* @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
+ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -299,6 +300,7 @@ struct cdns_pcie_rc {
u32 vendor_id;
u32 device_id;
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+ bool quirk_retrain_flag;
};
/**
@@ -414,6 +416,13 @@ static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
cdns_pcie_write_sz(addr, 0x2, value);
}
+static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
/* Endpoint Function register access */
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
u32 reg, u8 value)
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 4d12efdacd2f..39fe2ed5a6a2 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -115,10 +115,17 @@ static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
+static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
+ .func_offset = 0x8000,
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
+
static const struct of_device_id ls_pcie_ep_of_match[] = {
{ .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
{ .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
{ .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
{ },
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 44ad34cdc3bc..5b9c625df7b8 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -232,7 +232,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_pcie_probe(struct platform_device *pdev)
+static int ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
@@ -271,10 +271,11 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
}
static struct platform_driver ls_pcie_driver = {
+ .probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
+builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index abf37aa68e51..e8afa50129a8 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -314,9 +314,6 @@ static const struct dw_pcie_host_ops al_pcie_host_ops = {
.host_init = al_pcie_host_init,
};
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
-
static int al_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -334,7 +331,6 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
pci->pp.ops = &al_pcie_host_ops;
al_pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index bcd1cd9ba8c8..1c25d8337151 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -434,10 +434,8 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->stop_link)
- return;
-
- pci->ops->stop_link(pci);
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -445,7 +443,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->start_link)
+ if (!pci->ops || !pci->ops->start_link)
return -EINVAL;
return pci->ops->start_link(pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 8a84c005f32b..7e55b2b66182 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -258,10 +258,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
static void dw_pcie_free_msi(struct pcie_port *pp)
{
- if (pp->msi_irq) {
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
- }
+ if (pp->msi_irq)
+ irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
@@ -305,8 +303,13 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (cfg_res) {
pp->cfg0_size = resource_size(cfg_res);
pp->cfg0_base = cfg_res->start;
- } else if (!pp->va_cfg0_base) {
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+ } else {
dev_err(dev, "Missing *config* reg space\n");
+ return -ENODEV;
}
if (!pci->dbi_base) {
@@ -322,38 +325,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->bridge = bridge;
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &bridge->windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- pp->io_size = resource_size(win->res);
- pp->io_bus_addr = win->res->start - win->offset;
- pp->io_base = pci_pio_to_address(win->res->start);
- break;
- case 0:
- dev_err(dev, "Missing *config* reg space\n");
- pp->cfg0_size = resource_size(win->res);
- pp->cfg0_base = win->res->start;
- if (!pci->dbi_base) {
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
- pp->cfg0_base,
- pp->cfg0_size);
- if (!pci->dbi_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
- }
- break;
- }
- }
-
- if (!pp->va_cfg0_base) {
- pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
- pp->cfg0_base, pp->cfg0_size);
- if (!pp->va_cfg0_base) {
- dev_err(dev, "Error with ioremap in function\n");
- return -ENOMEM;
- }
+ /* Get the I/O range from DT */
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
}
if (pci->link_gen < 1)
@@ -425,7 +402,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
dw_pcie_msi_init(pp);
- if (!dw_pcie_link_up(pci) && pci->ops->start_link) {
+ if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
ret = pci->ops->start_link(pci);
if (ret)
goto err_free_msi;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 645fa1892375..004cb860e266 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -141,7 +141,7 @@ u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
int ret;
u32 val;
- if (pci->ops->read_dbi)
+ if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
@@ -156,7 +156,7 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
- if (pci->ops->write_dbi) {
+ if (pci->ops && pci->ops->write_dbi) {
pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
@@ -171,7 +171,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
- if (pci->ops->write_dbi2) {
+ if (pci->ops && pci->ops->write_dbi2) {
pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
@@ -186,7 +186,7 @@ static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
int ret;
u32 val;
- if (pci->ops->read_dbi)
+ if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
@@ -200,7 +200,7 @@ static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
int ret;
- if (pci->ops->write_dbi) {
+ if (pci->ops && pci->ops->write_dbi) {
pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
return;
}
@@ -225,6 +225,47 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
+static inline u32 dw_pcie_enable_ecrc(u32 val)
+{
+ /*
+ * DesignWare core version 4.90A has a design issue where the 'TD'
+ * bit in the Control register-1 of the ATU outbound region acts
+ * like an override for the ECRC setting, i.e., the presence of TLP
+ * Digest (ECRC) in the outgoing TLPs is solely determined by this
+ * bit. This is contrary to the PCIe spec which says that the
+ * enablement of the ECRC is solely determined by the AER
+ * registers.
+ *
+ * Because of this, even when the ECRC is enabled through AER
+ * registers, the transactions going through ATU won't have TLP
+ * Digest as there is no way the PCI core AER code could program
+ * the TD bit which is specific to the DesignWare core.
+ *
+ * The best way to handle this scenario is to program the TD bit
+ * always. It affects only the traffic from root port to downstream
+ * devices.
+ *
+ * At this point,
+ * When ECRC is enabled in AER registers, everything works normally
+ * When ECRC is NOT enabled in AER registers, then,
+ * on Root Port:- TLP Digest (DWord size) gets appended to each packet
+ * even through it is not required. Since downstream
+ * TLPs are mostly for configuration accesses and BAR
+ * accesses, they are not in critical path and won't
+ * have much negative effect on the performance.
+ * on End Point:- TLP Digest is received for some/all the packets coming
+ * from the root port. TLP Digest is ignored because,
+ * as per the PCIe Spec r5.0 v1.0 section 2.2.3
+ * "TLP Digest Rules", when an endpoint receives TLP
+ * Digest when its ECRC check functionality is disabled
+ * in AER registers, received TLP Digest is just ignored.
+ * Since there is no issue or error reported either side, best way to
+ * handle the scenario is to program TD bit by default.
+ */
+
+ return val | PCIE_ATU_TD;
+}
+
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
int index, int type,
u64 cpu_addr, u64 pci_addr,
@@ -248,6 +289,8 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
val = type | PCIE_ATU_FUNC_NUM(func_no);
val = upper_32_bits(size - 1) ?
val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ if (pci->version == 0x490A)
+ val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
@@ -273,7 +316,7 @@ static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
{
u32 retries, val;
- if (pci->ops->cpu_addr_fixup)
+ if (pci->ops && pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
if (pci->iatu_unroll_enabled) {
@@ -290,12 +333,19 @@ static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
upper_32_bits(cpu_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
lower_32_bits(cpu_addr + size - 1));
+ if (pci->version >= 0x460A)
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(cpu_addr + size - 1));
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
- PCIE_ATU_FUNC_NUM(func_no));
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
+ val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ if (pci->version == 0x490A)
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
/*
@@ -321,7 +371,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr,
- u32 size)
+ u64 size)
{
__dw_pcie_prog_outbound_atu(pci, func_no, index, type,
cpu_addr, pci_addr, size);
@@ -481,7 +531,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
- if (pci->ops->link_up)
+ if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 0207840756c4..7247c8b01f04 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -86,6 +86,7 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE BIT(31)
@@ -99,6 +100,7 @@
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
#define PCIE_ATU_UPPER_TARGET 0x91C
+#define PCIE_ATU_UPPER_LIMIT 0x924
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
@@ -297,7 +299,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
u64 size);
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr,
- u32 size);
+ u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int bar, u64 cpu_addr,
enum dw_pcie_as_type as_type);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index affa2713bf80..8a7a300163e5 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -159,8 +159,10 @@ struct qcom_pcie_resources_2_3_3 {
struct reset_control *rst[7];
};
+/* 6 clocks typically, 7 for sm8250 */
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[6];
+ struct clk_bulk_data clks[7];
+ int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
struct clk *pipe_clk;
@@ -398,7 +400,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
/* enable external reference clock */
val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
- val &= ~PHY_REFCLK_USE_PAD;
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
val |= PHY_REFCLK_SSP_EN;
writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
@@ -1152,8 +1156,14 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
res->clks[3].id = "bus_slave";
res->clks[4].id = "slave_q2a";
res->clks[5].id = "tbu";
+ if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) {
+ res->clks[6].id = "ddrss_sf_tbu";
+ res->num_clks = 7;
+ } else {
+ res->num_clks = 6;
+ }
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
if (ret < 0)
return ret;
@@ -1175,7 +1185,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret < 0)
goto err_disable_regulators;
@@ -1227,7 +1237,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
return 0;
err_disable_clocks:
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_disable_regulators:
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -1238,7 +1248,7 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 6ce34a1deecb..6ab694f8d283 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -64,6 +64,8 @@ int pci_host_common_probe(struct platform_device *pdev)
if (!bridge)
return -ENOMEM;
+ platform_set_drvdata(pdev, bridge);
+
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
@@ -78,8 +80,6 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
- platform_set_drvdata(pdev, bridge);
-
return pci_host_probe(bridge);
}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6db8d96a78eb..27a17a1e4a7c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1216,7 +1216,7 @@ static void hv_irq_unmask(struct irq_data *data)
params = &hbus->retarget_msi_interrupt_params;
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
- params->int_entry.source = 1; /* MSI(-X) */
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
@@ -1714,7 +1714,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
* resumed and suspended again: see hibernation_snapshot() and
* hibernation_platform_enter().
*
- * If the memory enable bit is already set, Hyper-V sliently ignores
+ * If the memory enable bit is already set, Hyper-V silently ignores
* the below BAR updates, and the related PCI device driver can not
* work, because reading from the device register(s) always returns
* 0xFFFFFFFF.
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 2470782cb01a..1c34c897a7e2 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq,
- xgene_msi_isr);
- err = irq_set_handler_data(msi_group->gic_irq, msi_group);
- if (err) {
- pr_err("failed to register GIC IRQ handler\n");
- return -EINVAL;
- }
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ xgene_msi_isr, msi_group);
+
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 85e7c98265e8..2afdc865253e 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -173,12 +173,13 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/*
* The v1 controller has a bug in its Configuration Request
- * Retry Status (CRS) logic: when CRS is enabled and we read the
- * Vendor and Device ID of a non-existent device, the controller
- * fabricates return data of 0xFFFF0001 ("device exists but is not
- * ready") instead of 0xFFFFFFFF ("device does not exist"). This
- * causes the PCI core to retry the read until it times out.
- * Avoid this by not claiming to support CRS.
+ * Retry Status (CRS) logic: when CRS Software Visibility is
+ * enabled and we read the Vendor and Device ID of a non-existent
+ * device, the controller fabricates return data of 0xFFFF0001
+ * ("device exists but is not ready") instead of 0xFFFFFFFF
+ * ("device does not exist"). This causes the PCI core to retry
+ * the read until it times out. Avoid this by not claiming to
+ * support CRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index e1636f7714ca..42691dd8ebef 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -204,8 +204,7 @@ static int altera_msi_remove(struct platform_device *pdev)
struct altera_msi *msi = platform_get_drvdata(pdev);
msi_writel(msi, 0, MSI_INTMASK);
- irq_set_chained_handler(msi->irq, NULL);
- irq_set_handler_data(msi->irq, NULL);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
altera_free_domains(msi);
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index d41257f43a8f..e330e6811f0b 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -97,6 +97,7 @@
#define PCIE_MISC_REVISION 0x406c
#define BRCM_PCIE_HW_REV_33 0x0303
+#define BRCM_PCIE_HW_REV_3_20 0x0320
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
@@ -187,6 +188,7 @@
struct brcm_pcie;
static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
@@ -203,6 +205,7 @@ enum {
enum pcie_type {
GENERIC,
+ BCM4908,
BCM7278,
BCM2711,
};
@@ -227,6 +230,13 @@ static const struct pcie_cfg_data generic_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const int pcie_offset_bcm7278[] = {
[RGR1_SW_INIT_1] = 0xc010,
[EXT_CFG_INDEX] = 0x9000,
@@ -279,6 +289,7 @@ struct brcm_pcie {
const int *reg_offsets;
enum pcie_type type;
struct reset_control *rescal;
+ struct reset_control *perst_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
@@ -603,8 +614,7 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)
if (!msi)
return;
- irq_set_chained_handler(msi->irq, NULL);
- irq_set_handler_data(msi->irq, NULL);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
brcm_free_domains(msi);
}
@@ -735,6 +745,17 @@ static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
+static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+{
+ if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
+ return;
+
+ if (val)
+ reset_control_assert(pcie->perst_reset);
+ else
+ reset_control_deassert(pcie->perst_reset);
+}
+
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -1194,6 +1215,7 @@ static int brcm_pcie_remove(struct platform_device *pdev)
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
@@ -1250,6 +1272,11 @@ static int brcm_pcie_probe(struct platform_device *pdev)
clk_disable_unprepare(pcie->clk);
return PTR_ERR(pcie->rescal);
}
+ pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
+ if (IS_ERR(pcie->perst_reset)) {
+ clk_disable_unprepare(pcie->clk);
+ return PTR_ERR(pcie->perst_reset);
+ }
ret = reset_control_deassert(pcie->rescal);
if (ret)
@@ -1267,6 +1294,10 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+ if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
+ goto fail;
+ }
msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (pci_msi_enabled() && msi_np == pcie->np) {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index cf4c18f0c25a..23548b517e4b 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1035,14 +1035,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
err = of_pci_get_devfn(child);
if (err < 0) {
dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
+ goto error_put_node;
}
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- return err;
+ goto error_put_node;
}
err = mtk_pcie_subsys_powerup(pcie);
@@ -1058,6 +1058,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
+error_put_node:
+ of_node_put(child);
+ return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
new file mode 100644
index 000000000000..04c19ff81aff
--- /dev/null
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+/* Number of MSI IRQs */
+#define MC_NUM_MSI_IRQS 32
+#define MC_NUM_MSI_IRQS_CODED 5
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
+#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_CNT 0x20
+#define DED_ERROR_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define LTSSM_STATE 0x5c
+#define LTSSM_L0_STATE 0x10
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Bridge Phy Regs */
+#define PCIE_PCI_IDS_DW1 0x9c
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+#define MC_MSI_MAX_Q_AVAIL (MC_NUM_MSI_IRQS_CODED << 1)
+#define MC_MSI_Q_SIZE (MC_NUM_MSI_IRQS_CODED << 4)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define MSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_SHIFT 1
+#define ATR_IMPL_ENABLE 1
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+
+#define ATR_ENTRY_SIZE 32
+
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 5
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 9
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
+#define EVENT_LOCAL_PM_MSI_INT_INTX 23
+#define EVENT_LOCAL_PM_MSI_INT_MSI 24
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
+#define NUM_EVENTS 28
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .base = MC_PCIE_BRIDGE_ADDR, \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+struct mc_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
+};
+
+struct mc_port {
+ void __iomem *axi_base_addr;
+ struct device *dev;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct mc_msi msi;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+struct {
+ u32 base;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base)
+{
+ struct mc_msi *msi = &port->msi;
+ u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
+ u16 msg_ctrl = readw_relaxed(base + cap_offset + PCI_MSI_FLAGS);
+
+ msg_ctrl |= PCI_MSI_FLAGS_ENABLE;
+ msg_ctrl &= ~PCI_MSI_FLAGS_QMASK;
+ msg_ctrl |= MC_MSI_MAX_Q_AVAIL;
+ msg_ctrl &= ~PCI_MSI_FLAGS_QSIZE;
+ msg_ctrl |= MC_MSI_Q_SIZE;
+ msg_ctrl |= PCI_MSI_FLAGS_64BIT;
+
+ writew_relaxed(msg_ctrl, base + cap_offset + PCI_MSI_FLAGS);
+
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ base + cap_offset + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ base + cap_offset + PCI_MSI_ADDRESS_HI);
+}
+
+static void mc_handle_msi(struct irq_desc *desc)
+{
+ struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct device *dev = port->dev;
+ struct mc_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+}
+
+static void mc_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ u32 bitpos = data->hwirq;
+ unsigned long status;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ if (!status)
+ writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT),
+ bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mc_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mc_msi_bottom_irq_chip = {
+ .name = "Microchip MSI",
+ .irq_ack = mc_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = mc_compose_msi_msg,
+ .irq_set_affinity = mc_msi_set_affinity,
+};
+
+static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mc_port *port = domain->host_data;
+ struct mc_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long bit;
+ u32 val;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ /* Enable MSI interrupts */
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= PM_MSI_INT_MSI_MASK;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mc_port *port = irq_data_get_irq_chip_data(d);
+ struct mc_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mc_irq_msi_domain_alloc,
+ .free = mc_irq_msi_domain_free,
+};
+
+static struct irq_chip mc_msi_irq_chip = {
+ .name = "Microchip PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mc_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mc_msi_irq_chip,
+};
+
+static int mc_allocate_msi_domains(struct mc_port *port)
+{
+ struct device *dev = port->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mc_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
+ &msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mc_handle_intx(struct irq_desc *desc)
+{
+ struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(port->intx_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+}
+
+static void mc_ack_intx_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void mc_mask_intx_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void mc_unmask_intx_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip mc_intx_irq_chip = {
+ .name = "Microchip PCIe INTx",
+ .irq_ack = mc_ack_intx_irq,
+ .irq_mask = mc_mask_intx_irq,
+ .irq_unmask = mc_unmask_intx_irq,
+};
+
+static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mc_pcie_intx_map,
+};
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(void __iomem *addr)
+{
+ u32 reg = readl_relaxed(addr);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(void __iomem *addr)
+{
+ u32 reg = readl_relaxed(addr);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(void __iomem *addr)
+{
+ u32 reg = readl_relaxed(addr);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(void __iomem *addr)
+{
+ u32 reg = readl_relaxed(addr);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 get_events(struct mc_port *port)
+{
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ u32 events = 0;
+
+ events |= pcie_events(ctrl_base_addr + PCIE_EVENT_INT);
+ events |= sec_errors(ctrl_base_addr + SEC_ERROR_INT);
+ events |= ded_errors(ctrl_base_addr + DED_ERROR_INT);
+ events |= local_events(bridge_base_addr + ISTATUS_LOCAL);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct mc_port *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_handle_event(struct irq_desc *desc)
+{
+ struct mc_port *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = get_events(port);
+
+ for_each_set_bit(bit, &events, NUM_EVENTS)
+ generic_handle_irq(irq_find_mapping(port->event_domain, bit));
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct mc_port *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = mc_pcie_event_map,
+};
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare,
+ clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_pcie_init_irq_domains(struct mc_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
+ &event_domain_ops, port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return mc_allocate_msi_domains(port);
+}
+
+static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
+ ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+
+static int mc_pcie_setup_windows(struct platform_device *pdev,
+ struct mc_port *port)
+{
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ mc_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mc_port *port;
+ void __iomem *bridge_base_addr;
+ void __iomem *ctrl_base_addr;
+ int ret;
+ int irq;
+ int i, intx_irq, msi_irq, event_irq;
+ u32 val;
+ int err;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+ port->dev = dev;
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(port->axi_base_addr))
+ return PTR_ERR(port->axi_base_addr);
+
+ bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+
+ port->msi.vector_phy = MSI_ADDR;
+ port->msi.num_vectors = MC_NUM_MSI_IRQS;
+ ret = mc_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "unable to request IRQ%d\n", irq);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < NUM_EVENTS; i++) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, event_irq, mc_event_handler,
+ 0, event_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return err;
+ }
+ }
+
+ intx_irq = irq_create_mapping(port->event_domain,
+ EVENT_LOCAL_PM_MSI_INT_INTX);
+ if (!intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
+
+ msi_irq = irq_create_mapping(port->event_domain,
+ EVENT_LOCAL_PM_MSI_INT_MSI);
+ if (!msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(irq, mc_handle_event, port);
+
+ /* Hardware doesn't setup MSI by default */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= PM_MSI_INT_INTX_MASK;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+
+ writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
+
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT;
+ writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
+
+ val = SEC_ERROR_INT_TX_RAM_SEC_ERR_INT |
+ SEC_ERROR_INT_RX_RAM_SEC_ERR_INT |
+ SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT |
+ SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT;
+ writel_relaxed(val, ctrl_base_addr + SEC_ERROR_INT);
+ writel_relaxed(0, ctrl_base_addr + SEC_ERROR_INT_MASK);
+ writel_relaxed(0, ctrl_base_addr + SEC_ERROR_CNT);
+
+ val = DED_ERROR_INT_TX_RAM_DED_ERR_INT |
+ DED_ERROR_INT_RX_RAM_DED_ERR_INT |
+ DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT |
+ DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT;
+ writel_relaxed(val, ctrl_base_addr + DED_ERROR_INT);
+ writel_relaxed(0, ctrl_base_addr + DED_ERROR_INT_MASK);
+ writel_relaxed(0, ctrl_base_addr + DED_ERROR_CNT);
+
+ writel_relaxed(0, bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
+
+ /* Configure Address Translation Table 0 for PCIe config space */
+ mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start & 0xffffffff,
+ cfg->res.start, resource_size(&cfg->res));
+
+ return mc_pcie_setup_windows(pdev, port);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match)
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = pci_host_common_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 4d1c4b24e537..a728e8f9ad3c 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -735,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
}
/* setup MSI data target */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
rcar_pcie_hw_enable_msi(host);
return 0;
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 904dec0d3a88..990a00e08bc5 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -82,7 +82,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
}
rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
+ "mgmt-sticky");
if (IS_ERR(rockchip->mgmt_sticky_rst)) {
if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
dev_err(dev, "missing mgmt-sticky reset property in node\n");
@@ -118,11 +118,11 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
}
if (rockchip->is_rc) {
- rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio)) {
- dev_err(dev, "missing ep-gpios property in node\n");
- return PTR_ERR(rockchip->ep_gpio);
- }
+ rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
+ "failed to get ep GPIO\n");
}
rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c
deleted file mode 100644
index 62a061f1d62e..000000000000
--- a/drivers/pci/controller/pcie-tango.c
+++ /dev/null
@@ -1,341 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/pci-ecam.h>
-#include <linux/delay.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-
-#define MSI_MAX 256
-
-#define SMP8759_MUX 0x48
-#define SMP8759_TEST_OUT 0x74
-#define SMP8759_DOORBELL 0x7c
-#define SMP8759_STATUS 0x80
-#define SMP8759_ENABLE 0xa0
-
-struct tango_pcie {
- DECLARE_BITMAP(used_msi, MSI_MAX);
- u64 msi_doorbell;
- spinlock_t used_msi_lock;
- void __iomem *base;
- struct irq_domain *dom;
-};
-
-static void tango_msi_isr(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
- unsigned long status, base, virq, idx, pos = 0;
-
- chained_irq_enter(chip, desc);
- spin_lock(&pcie->used_msi_lock);
-
- while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
- base = round_down(pos, 32);
- status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
- for_each_set_bit(idx, &status, 32) {
- virq = irq_find_mapping(pcie->dom, base + idx);
- generic_handle_irq(virq);
- }
- pos = base + 32;
- }
-
- spin_unlock(&pcie->used_msi_lock);
- chained_irq_exit(chip, desc);
-}
-
-static void tango_ack(struct irq_data *d)
-{
- struct tango_pcie *pcie = d->chip_data;
- u32 offset = (d->hwirq / 32) * 4;
- u32 bit = BIT(d->hwirq % 32);
-
- writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
-}
-
-static void update_msi_enable(struct irq_data *d, bool unmask)
-{
- unsigned long flags;
- struct tango_pcie *pcie = d->chip_data;
- u32 offset = (d->hwirq / 32) * 4;
- u32 bit = BIT(d->hwirq % 32);
- u32 val;
-
- spin_lock_irqsave(&pcie->used_msi_lock, flags);
- val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
- val = unmask ? val | bit : val & ~bit;
- writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
- spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
-}
-
-static void tango_mask(struct irq_data *d)
-{
- update_msi_enable(d, false);
-}
-
-static void tango_unmask(struct irq_data *d)
-{
- update_msi_enable(d, true);
-}
-
-static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
- bool force)
-{
- return -EINVAL;
-}
-
-static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
-{
- struct tango_pcie *pcie = d->chip_data;
- msg->address_lo = lower_32_bits(pcie->msi_doorbell);
- msg->address_hi = upper_32_bits(pcie->msi_doorbell);
- msg->data = d->hwirq;
-}
-
-static struct irq_chip tango_chip = {
- .irq_ack = tango_ack,
- .irq_mask = tango_mask,
- .irq_unmask = tango_unmask,
- .irq_set_affinity = tango_set_affinity,
- .irq_compose_msi_msg = tango_compose_msi_msg,
-};
-
-static void msi_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void msi_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void msi_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip msi_chip = {
- .name = "MSI",
- .irq_ack = msi_ack,
- .irq_mask = msi_mask,
- .irq_unmask = msi_unmask,
-};
-
-static struct msi_domain_info msi_dom_info = {
- .flags = MSI_FLAG_PCI_MSIX
- | MSI_FLAG_USE_DEF_DOM_OPS
- | MSI_FLAG_USE_DEF_CHIP_OPS,
- .chip = &msi_chip,
-};
-
-static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct tango_pcie *pcie = dom->host_data;
- unsigned long flags;
- int pos;
-
- spin_lock_irqsave(&pcie->used_msi_lock, flags);
- pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
- if (pos >= MSI_MAX) {
- spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
- return -ENOSPC;
- }
- __set_bit(pos, pcie->used_msi);
- spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
- irq_domain_set_info(dom, virq, pos, &tango_chip,
- pcie, handle_edge_irq, NULL, NULL);
-
- return 0;
-}
-
-static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
- unsigned int nr_irqs)
-{
- unsigned long flags;
- struct irq_data *d = irq_domain_get_irq_data(dom, virq);
- struct tango_pcie *pcie = d->chip_data;
-
- spin_lock_irqsave(&pcie->used_msi_lock, flags);
- __clear_bit(d->hwirq, pcie->used_msi);
- spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
-}
-
-static const struct irq_domain_ops dom_ops = {
- .alloc = tango_irq_domain_alloc,
- .free = tango_irq_domain_free,
-};
-
-static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
- int ret;
-
- /* Reads in configuration space outside devfn 0 return garbage */
- if (devfn != 0)
- return PCIBIOS_FUNC_NOT_SUPPORTED;
-
- /*
- * PCI config and MMIO accesses are muxed. Linux doesn't have a
- * mutual exclusion mechanism for config vs. MMIO accesses, so
- * concurrent accesses may cause corruption.
- */
- writel_relaxed(1, pcie->base + SMP8759_MUX);
- ret = pci_generic_config_read(bus, devfn, where, size, val);
- writel_relaxed(0, pcie->base + SMP8759_MUX);
-
- return ret;
-}
-
-static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
- int ret;
-
- writel_relaxed(1, pcie->base + SMP8759_MUX);
- ret = pci_generic_config_write(bus, devfn, where, size, val);
- writel_relaxed(0, pcie->base + SMP8759_MUX);
-
- return ret;
-}
-
-static const struct pci_ecam_ops smp8759_ecam_ops = {
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = smp8759_config_read,
- .write = smp8759_config_write,
- }
-};
-
-static int tango_pcie_link_up(struct tango_pcie *pcie)
-{
- void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
- int i;
-
- writel_relaxed(16, test_out);
- for (i = 0; i < 10; ++i) {
- u32 ltssm_state = readl_relaxed(test_out) >> 8;
- if ((ltssm_state & 0x1f) == 0xf) /* L0 */
- return 1;
- usleep_range(3000, 4000);
- }
-
- return 0;
-}
-
-static int tango_pcie_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct tango_pcie *pcie;
- struct resource *res;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct irq_domain *msi_dom, *irq_dom;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
- int virq, offset;
-
- dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
- add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
-
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pcie->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- platform_set_drvdata(pdev, pcie);
-
- if (!tango_pcie_link_up(pcie))
- return -ENODEV;
-
- if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
- return -ENOENT;
-
- if (of_pci_range_parser_one(&parser, &range) == NULL)
- return -ENOENT;
-
- range.pci_addr += range.size;
- pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
-
- for (offset = 0; offset < MSI_MAX / 8; offset += 4)
- writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
-
- virq = platform_get_irq(pdev, 1);
- if (virq < 0)
- return virq;
-
- irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
- if (!irq_dom) {
- dev_err(dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
- if (!msi_dom) {
- dev_err(dev, "Failed to create MSI domain\n");
- irq_domain_remove(irq_dom);
- return -ENOMEM;
- }
-
- pcie->dom = irq_dom;
- spin_lock_init(&pcie->used_msi_lock);
- irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
-
- return pci_host_common_probe(pdev);
-}
-
-static const struct of_device_id tango_pcie_ids[] = {
- {
- .compatible = "sigma,smp8759-pcie",
- .data = &smp8759_ecam_ops,
- },
- { },
-};
-
-static struct platform_driver tango_pcie_driver = {
- .probe = tango_pcie_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = tango_pcie_ids,
- .suppress_bind_attrs = true,
- },
-};
-builtin_platform_driver(tango_pcie_driver);
-
-/*
- * The root complex advertises the wrong device class.
- * Header Type 1 is for PCI-to-PCI bridges.
- */
-static void tango_fixup_class(struct pci_dev *dev)
-{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
-
-/*
- * The root complex exposes a "fake" BAR, which is used to filter
- * bus-to-system accesses. Only accesses within the range defined by this
- * BAR are forwarded to the host, others are ignored.
- *
- * By default, the DMA framework expects an identity mapping, and DRAM0 is
- * mapped at 0x80000000.
- */
-static void tango_fixup_bar(struct pci_dev *dev)
-{
- dev->non_compliant_bars = true;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index f92e0152e65e..67937facd90c 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -404,6 +404,7 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
return 0;
out:
xilinx_cpm_free_irq_domains(port);
+ of_node_put(pcie_intc_node);
dev_err(dev, "Failed to allocate IRQ domains\n");
return -ENOMEM;
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 8820d0f7ec77..5f1242ca2f4e 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -12,3 +12,16 @@ config PCI_EPF_TEST
for PCI Endpoint.
If in doubt, say "N" to disable Endpoint test driver.
+
+config PCI_EPF_NTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCI Endpoint. NTB driver implements NTB
+ controller functionality using multiple PCIe endpoint instances.
+ It can support NTB endpoint function devices created using
+ device tree.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index d6fafff080e2..96ab932a537a 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
+obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
new file mode 100644
index 000000000000..338148cf56f5
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -0,0 +1,2128 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/*
+ * The PCI NTB function driver configures the SoC with multiple PCIe Endpoint
+ * (EP) controller instances (see diagram below) in such a way that
+ * transactions from one EP controller are routed to the other EP controller.
+ * Once PCI NTB function driver configures the SoC with multiple EP instances,
+ * HOST1 and HOST2 can communicate with each other using SoC as a bridge.
+ *
+ * +-------------+ +-------------+
+ * | | | |
+ * | HOST1 | | HOST2 |
+ * | | | |
+ * +------^------+ +------^------+
+ * | |
+ * | |
+ * +---------|-------------------------------------------------|---------+
+ * | +------v------+ +------v------+ |
+ * | | | | | |
+ * | | EP | | EP | |
+ * | | CONTROLLER1 | | CONTROLLER2 | |
+ * | | <-----------------------------------> | |
+ * | | | | | |
+ * | | | | | |
+ * | | | SoC With Multiple EP Instances | | |
+ * | | | (Configured using NTB Function) | | |
+ * | +-------------+ +-------------+ |
+ * +---------------------------------------------------------------------+
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+};
+
+struct epf_ntb {
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ struct pci_epf *epf;
+ u64 mws_size[MAX_MW];
+ struct config_group group;
+ struct epf_ntb_epc *epc[2];
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+
+struct epf_ntb_epc {
+ u8 func_no;
+ bool linkup;
+ bool is_msix;
+ int msix_bar;
+ u32 spad_size;
+ struct pci_epc *epc;
+ struct epf_ntb *epf_ntb;
+ void __iomem *mw_addr[6];
+ size_t msix_table_offset;
+ struct epf_ntb_ctrl *reg;
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno epf_ntb_bar[6];
+ struct delayed_work cmd_handler;
+ enum pci_epc_interface_type type;
+ const struct pci_epc_features *epc_features;
+};
+
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 mw1_offset;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to both the hosts
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST1 and the NTB function in HOST2 invoke
+ * ntb_link_enable(), this NTB function driver will trigger a link event to
+ * the NTB client in both the hosts.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ enum pci_epc_interface_type type;
+ enum pci_epc_irq_type irq_type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ struct pci_epc *epc;
+ bool is_msix;
+ u8 func_no;
+ int ret;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ is_msix = ntb_epc->is_msix;
+ ctrl = ntb_epc->reg;
+ if (link_up)
+ ctrl->link_status |= LINK_STATUS_UP;
+ else
+ ctrl->link_status &= ~LINK_STATUS_UP;
+ irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to raise Link Up IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for one host
+ * to access the memory window of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see MW1) i.e., map OB
+ * address space of memory window to PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address in the outbound address space
+ * 2) Address in the PCI Address space
+ * 3) Size of the address region to be mapped
+ *
+ * The address in the outbound address space (for MW1, MW2, MW3 and MW4) is
+ * stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3
+ * BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This
+ * is populated in epf_ntb_alloc_peer_mem() in this driver.
+ *
+ * The address and size of the PCI address region that has to be mapped would
+ * be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is
+ * connected to HOST2.
+ *
+ * Please note Memory window1 (MW1) and Doorbell registers together will be
+ * mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's
+ * used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1],
+ * epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2].
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u64 addr, size;
+ int ret = 0;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ addr = ctrl->addr;
+ size = ctrl->size;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+
+ if (size > ntb->mws_size[mw]) {
+ dev_err(&epc->dev,
+ "%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n",
+ pci_epc_interface_string(type), mw, size,
+ ntb->mws_size[mw]);
+ ret = -EINVAL;
+ goto err_invalid_size;
+ }
+
+ func_no = ntb_epc->func_no;
+
+ ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&epc->dev,
+ "%s intf: Failed to map memory window %d address\n",
+ pci_epc_interface_string(type), mw);
+
+err_invalid_size:
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+ func_no = ntb_epc->func_no;
+
+ pci_epc_unmap_addr(epc, func_no, phys_addr);
+}
+
+/**
+ * epf_ntb_configure_msi() - Map OB address space to MSI address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS |
+ *+-----------------+ | +----------------+ | +-----------------+
+ *| BAR1 | | | Doorbell 2 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR3 | | | Doorbell 4 +---+ | |
+ *+-----------------+ | |----------------+ | |
+ *| BAR4 | | | | | |
+ *+-----------------+ | | MW1 | | |
+ *| BAR5 | | | | | |
+ *+-----------------+ +----->-----------------+ | |
+ * EP CONTROLLER 1 | | | |
+ * | | | |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * pci_epc_map_msi_irq() takes the MSI address from MSI capability register
+ * and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI
+ * address.
+ *
+ * epf_ntb_configure_msi() also stores the MSI data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msi(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u16 db_count)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ u32 db_entry_size, db_data, db_offset;
+ struct pci_epf_bar *peer_epf_bar;
+ struct epf_ntb_ctrl *peer_ctrl;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ peer_ctrl = peer_ntb_epc->reg;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+
+ ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ db_entry_size, &db_data, &db_offset);
+ if (ret) {
+ dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ for (i = 0; i < db_count; i++) {
+ peer_ctrl->db_data[i] = db_data | i;
+ peer_ctrl->db_offset[i] = db_offset;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_msix() - Map OB address space to MSI-X address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
+ *+-----------------+ | +----------------+ +-----------------+
+ *| BAR1 | | | Doorbell 2 +---------+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ *+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
+ *| BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ *+-----------------+ | |----------------+ | | | |
+ *| BAR4 | | | | | | +-----------------+
+ *+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3||
+ *| BAR5 | | | | | +-----------------+
+ *+-----------------+ +----->-----------------+ | | |
+ * EP CONTROLLER 1 | | | +-----------------+
+ * | | +---->+ MSI-X ADDRESS 4 |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI-X address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and
+ * the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected
+ * to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the
+ * offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix()
+ * gets the MSI-X address and data.
+ *
+ * epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msix(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct epf_ntb_ctrl *peer_ctrl;
+ u32 db_entry_size, msg_data;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ size_t align;
+ u64 msg_addr;
+ u8 func_no;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar];
+ msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ peer_ctrl = peer_ntb_epc->reg;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ func_no = ntb_epc->func_no;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ for (i = 0; i < db_count; i++) {
+ msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
+ msg_data = msix_tbl[i].msg_data;
+ ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ db_entry_size);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to configure MSI-X IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ phys_addr = phys_addr + db_entry_size;
+ peer_ctrl->db_data[i] = msg_data;
+ peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1);
+ }
+ ntb_epc->is_msix = true;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_db() - Configure the Outbound Address Space for one host
+ * to ring the doorbell of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Count of the number of doorbells that has to be configured
+ * @msix: Indicates whether MSI-X or MSI should be used
+ *
+ * Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for
+ * one HOST to ring the doorbell of other HOST.
+ */
+static int epf_ntb_configure_db(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count, bool msix)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ int ret;
+
+ if (db_count > MAX_DB_COUNT)
+ return -EINVAL;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ if (msix)
+ ret = epf_ntb_configure_msix(ntb, type, db_count);
+ else
+ ret = epf_ntb_configure_msi(ntb, type, db_count);
+
+ if (ret)
+ dev_err(&epc->dev, "%s intf: Failed to configure DB\n",
+ pci_epc_interface_string(type));
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X
+ * address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address.
+ */
+static void
+epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+
+ pci_epc_unmap_addr(epc, func_no, phys_addr);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY)
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ u16 db_count;
+ bool is_msix;
+ int ret;
+
+ ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
+ ctrl = ntb_epc->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb_epc->reg;
+ type = ntb_epc->type;
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ db_count = argument & DB_COUNT_MASK;
+ is_msix = argument & MSIX_ENABLE;
+ ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ epf_ntb_teardown_db(ntb, type);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, type, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, type, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb_epc->linkup = true;
+ if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
+ ntb->epc[SECONDARY_INTERFACE]->linkup) {
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ }
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_DOWN:
+ ntb_epc->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "%s intf UNKNOWN command: %d\n",
+ pci_epc_interface_string(type), command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+}
+
+/**
+ * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ enum pci_barno peer_barno, barno;
+ u32 peer_spad_offset;
+ struct pci_epc *epc;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ func_no = ntb_epc->func_no;
+ epc = ntb_epc->epc;
+
+ peer_spad_offset = peer_ntb_epc->reg->spad_offset;
+ epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
+ epf_bar->size = peer_ntb_epc->spad_size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region. While BAR0 is the default self scratchpad BAR, an
+ * NTB could have other BARs for self scratchpad (because of reserved BARs).
+ * This function can get the exact BAR used for self scratchpad from
+ * epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct epf_ntb *ntb;
+ struct pci_epc *epc;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
+ pci_epc_interface_string(ntb_epc->type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Free the Local Memory mentioned in the above diagram. After invoking this
+ * function, any of config + self scratchpad region of HOST1 or peer scratchpad
+ * region of HOST2 should not be accessed.
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ struct pci_epf *epf;
+
+ epf = ntb->epf;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ if (ntb_epc->reg)
+ pci_epf_free_space(epf, ntb_epc->reg, barno, type);
+ }
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ *
+ * The size of both config region and scratchpad region has to be aligned,
+ * since the scratchpad region will also be mapped as PEER SCRATCHPAD of
+ * other host using a separate BAR.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *peer_epc_features, *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ size_t msix_table_size, pba_size, align;
+ enum pci_barno peer_barno, barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size, peer_size;
+ struct pci_epf *epf;
+ struct device *dev;
+ bool msix_capable;
+ u32 spad_count;
+ void *base;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ ntb_epc = ntb->epc[type];
+
+ epc_features = ntb_epc->epc_features;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_epc_features = peer_ntb_epc->epc_features;
+ peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+
+ /* Check if epc_features is populated incorrectly */
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = spad_count * 4;
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count;
+ ctrl_size = ALIGN(ctrl_size, 8);
+ ntb_epc->msix_table_offset = ctrl_size;
+ ntb_epc->msix_bar = barno;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8);
+ ctrl_size = ctrl_size + msix_table_size + pba_size;
+ }
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (peer_size) {
+ if (peer_size < spad_size)
+ spad_count = peer_size / 4;
+ spad_size = peer_size;
+ }
+
+ /*
+ * In order to make sure SPAD offset is aligned to its size,
+ * expand control region size to the size of SPAD if SPAD size
+ * is greater than control region size.
+ */
+ if (spad_size > ctrl_size)
+ ctrl_size = spad_size;
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, type);
+ if (!base) {
+ dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
+ pci_epc_interface_string(type));
+ return -ENOMEM;
+ }
+
+ ntb_epc->reg = base;
+
+ ctrl = ntb_epc->reg;
+ ctrl->spad_offset = ctrl_size;
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ctrl->db_entry_size = align ? align : 4;
+ ntb_epc->spad_size = spad_size;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config +
+ * scratchpad region for each of PRIMARY and SECONDARY interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for
+ * config + scratchpad region for a specific interface
+ */
+static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_config_spad_bar_alloc(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address
+ * space
+ * @ntb_epc: EPC associated with one of the HOST which holds peers outbound
+ * address regions
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram.
+ * It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3,
+ * MW4).
+ */
+static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ phys_addr_t phys_addr;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ size_t size;
+
+ epc = ntb_epc->epc;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ mw_addr = ntb_epc->mw_addr[barno];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ phys_addr = epf_bar->phys_addr;
+ size = epf_bar->size;
+ if (mw_addr) {
+ pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
+ ntb_epc->mw_addr[barno] = NULL;
+ }
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Clear doorbell and memory BARs (remove inbound ATU configuration). In the above
+ * diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2
+ * BAR, MW3 BAR and MW4 BAR).
+ */
+static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+
+ func_no = ntb_epc->func_no;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory
+ * allocated in peers outbound address space
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and
+ * epf_ntb_free_peer_mem() which frees up HOST2 outbound memory.
+ */
+static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ epf_ntb_db_mw_bar_clear(ntb_epc);
+ epf_ntb_free_peer_mem(peer_ntb_epc);
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ bool msix_capable, msi_capable;
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct device *dev;
+ u32 db_count;
+ u8 func_no;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ dev = &ntb->epf->dev;
+
+ epc_features = ntb_epc->epc_features;
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+
+ if (!(msix_capable || msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ func_no = ntb_epc->func_no;
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+ epc = ntb_epc->epc;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, func_no, db_count);
+ if (ret) {
+ dev_err(dev, "%s intf: MSI configuration failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, func_no, db_count,
+ ntb_epc->msix_bar,
+ ntb_epc->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
+ * address
+ * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4)
+ * @peer_ntb_epc: EPC associated with HOST whose outbound address space is
+ * used by @ntb_epc
+ * @size: Size of the address region that has to be allocated in peers OB SPACE
+ *
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate
+ * for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4).
+ */
+static int epf_ntb_alloc_peer_mem(struct device *dev,
+ struct epf_ntb_epc *ntb_epc,
+ enum epf_ntb_bar bar,
+ struct epf_ntb_epc *peer_ntb_epc,
+ size_t size)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *peer_epc;
+ phys_addr_t phys_addr;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t align;
+
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ peer_epc = peer_ntb_epc->epc;
+ mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
+ if (!mw_addr) {
+ dev_err(dev, "%s intf: Failed to allocate OB address\n",
+ pci_epc_interface_string(peer_ntb_epc->type));
+ return -ENOMEM;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ ntb_epc->mw_addr[barno] = mw_addr;
+
+ epf_bar->phys_addr = phys_addr;
+ epf_bar->size = size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates
+ * memory in OB address space of HOST2 and configures BAR of HOST1
+ */
+static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_ctrl *ctrl;
+ u32 num_mws, db_count;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ struct device *dev;
+ size_t align;
+ int ret, i;
+ u8 func_no;
+ u64 size;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+ func_no = ntb_epc->func_no;
+ epc = ntb_epc->epc;
+ num_mws = ntb->num_mws;
+ db_count = ntb->db_count;
+
+ for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
+ if (bar == BAR_DB_MW1) {
+ align = align ? align : 4;
+ size = db_count * align;
+ size = ALIGN(size, ntb->mws_size[i]);
+ ctrl = ntb_epc->reg;
+ ctrl->mw1_offset = size;
+ size += ntb->mws_size[i];
+ } else {
+ size = ntb->mws_size[i];
+ }
+
+ ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
+ peer_ntb_epc, size);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell mem alloc failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell BAR set failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+ }
+
+ return 0;
+
+err_alloc_peer_mem:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Unbind NTB function device from EPC and relinquish reference to pci_epc
+ * for each of the interface.
+ */
+static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ if (type < 0)
+ return;
+
+ epf = ntb->epf;
+ ntb_epc = ntb->epc[type];
+ if (!ntb_epc)
+ return;
+ epc = ntb_epc->epc;
+ pci_epc_remove_epf(epc, epf, type);
+ pci_epc_put(epc);
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_destroy_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @epc: struct pci_epc to which a particular NTB interface should be associated
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Allocate memory for NTB EPC interface and initialize it.
+ */
+static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
+ struct pci_epc *epc,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ u8 func_no;
+
+ dev = &ntb->epf->dev;
+
+ ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
+ if (!ntb_epc)
+ return -ENOMEM;
+
+ epf = ntb->epf;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ epf_bar = epf->bar;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ ntb_epc->linkup = false;
+ ntb_epc->epc = epc;
+ ntb_epc->func_no = func_no;
+ ntb_epc->type = type;
+ ntb_epc->epf_bar = epf_bar;
+ ntb_epc->epf_ntb = ntb;
+
+ epc_features = pci_epc_get_features(epc, func_no);
+ if (!epc_features)
+ return -EINVAL;
+ ntb_epc->epc_features = epc_features;
+
+ ntb->epc[type] = ntb_epc;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_create() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Get a reference to EPC device and bind NTB function device to that EPC
+ * for each of the interface. It is also a wrapper to
+ * epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface
+ * and initialize it
+ */
+static int epf_ntb_epc_create(struct epf_ntb *ntb)
+{
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
+ SECONDARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
+ goto err_epc_create;
+ }
+
+ return 0;
+
+err_epc_create:
+ epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of
+ * the NTB constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD,
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4.
+ */
+static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ ntb_epc = ntb->epc[type];
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
+ pci_epc_interface_string(type));
+ return barno;
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
+ * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
+ * BAR_MW3 and BAR_MW4 for all the interfaces.
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_init_epc_bar_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "Fail to init EPC bar for %s interface\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init_interface() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ ret = epf_ntb_peer_spad_bar_set(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Interrupt configuration failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_db_mw_bar_init(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: DB/MW BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_db_mw_bar_init;
+ }
+
+ ret = pci_epc_write_header(epc, func_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
+
+ INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+err_db_mw_bar_init:
+ epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
+
+err_peer_spad_bar_init:
+ epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_cleanup_interface() - Cleanup NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to cleanup a particular NTB interface.
+ */
+static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+
+ if (type < 0)
+ return;
+
+ ntb_epc = ntb->epc[type];
+ cancel_delayed_work(&ntb_epc->cmd_handler);
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+ epf_ntb_peer_spad_bar_clear(ntb_epc);
+ epf_ntb_config_sspad_bar_clear(ntb_epc);
+}
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_cleanup_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to initialize all NTB interface and start the workqueue
+ * to check for commands from host.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_epc_init_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Failed to initialize\n",
+ pci_epc_interface_string(type));
+ goto err_init_type;
+ }
+ }
+
+ return 0;
+
+err_init_type:
+ epf_ntb_epc_cleanup_interface(ntb, type - 1);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ if (!epf->sec_epc) {
+ dev_dbg(dev, "SECONDARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_epc_create(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int win_no; \
+ \
+ sscanf(#_name, "mw%d", &win_no); \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (ntb->num_mws < win_no) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+}
+
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_ntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_ntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index e4e51d884553..c0ac4e9cbe72 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -619,7 +619,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
if (epf_test->reg[bar]) {
pci_epc_clear_bar(epc, epf->func_no, epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
}
}
}
@@ -651,7 +652,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -771,7 +773,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align);
+ epc_features->align, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -789,7 +791,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align);
+ epc_features->align,
+ PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -834,6 +837,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
linkup_notifier = epc_features->linkup_notifier;
core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ if (test_reg_bar < 0)
+ return -EINVAL;
pci_epf_configure_bar(epf, epc_features);
}
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 3710adf51912..f3a8b833b479 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -21,6 +21,9 @@ static struct config_group *controllers_group;
struct pci_epf_group {
struct config_group group;
+ struct config_group primary_epc_group;
+ struct config_group secondary_epc_group;
+ struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
};
@@ -41,6 +44,127 @@ static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
return container_of(to_config_group(item), struct pci_epc_group, group);
}
+static int pci_secondary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, SECONDARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_secondary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_secondary_epc_item_ops = {
+ .allow_link = pci_secondary_epc_epf_link,
+ .drop_link = pci_secondary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_secondary_epc_type = {
+ .ct_item_ops = &pci_secondary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_secondary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *secondary_epc_group;
+
+ secondary_epc_group = &epf_group->secondary_epc_group;
+ config_group_init_type_name(secondary_epc_group, "secondary",
+ &pci_secondary_epc_type);
+ configfs_register_group(&epf_group->group, secondary_epc_group);
+
+ return secondary_epc_group;
+}
+
+static int pci_primary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_primary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_primary_epc_item_ops = {
+ .allow_link = pci_primary_epc_epf_link,
+ .drop_link = pci_primary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_primary_epc_type = {
+ .ct_item_ops = &pci_primary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_primary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *primary_epc_group = &epf_group->primary_epc_group;
+
+ config_group_init_type_name(primary_epc_group, "primary",
+ &pci_primary_epc_type);
+ configfs_register_group(&epf_group->group, primary_epc_group);
+
+ return primary_epc_group;
+}
+
static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
size_t len)
{
@@ -94,13 +218,13 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- ret = pci_epc_add_epf(epc, epf);
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
if (ret)
return ret;
ret = pci_epf_bind(epf);
if (ret) {
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
return ret;
}
@@ -120,7 +244,7 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
pci_epf_unbind(epf);
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
}
static struct configfs_item_operations pci_epc_item_ops = {
@@ -366,12 +490,53 @@ static struct configfs_item_operations pci_epf_ops = {
.release = pci_epf_release,
};
+static struct config_group *pci_epf_type_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(&group->cg_item);
+ struct config_group *epf_type_group;
+
+ epf_type_group = pci_epf_type_add_cfs(epf_group->epf, group);
+ return epf_type_group;
+}
+
+static void pci_epf_type_drop(struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_type_group_ops = {
+ .make_group = &pci_epf_type_make,
+ .drop_item = &pci_epf_type_drop,
+};
+
static const struct config_item_type pci_epf_type = {
+ .ct_group_ops = &pci_epf_type_group_ops,
.ct_item_ops = &pci_epf_ops,
.ct_attrs = pci_epf_attrs,
.ct_owner = THIS_MODULE,
};
+static void pci_epf_cfs_work(struct work_struct *work)
+{
+ struct pci_epf_group *epf_group;
+ struct config_group *group;
+
+ epf_group = container_of(work, struct pci_epf_group, cfs_work.work);
+ group = pci_ep_cfs_add_primary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'primary' EPC interface\n");
+ return;
+ }
+
+ group = pci_ep_cfs_add_secondary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'secondary' EPC interface\n");
+ return;
+ }
+}
+
static struct config_group *pci_epf_make(struct config_group *group,
const char *name)
{
@@ -410,10 +575,15 @@ static struct config_group *pci_epf_make(struct config_group *group,
goto free_name;
}
+ epf->group = &epf_group->group;
epf_group->epf = epf;
kfree(epf_name);
+ INIT_DELAYED_WORK(&epf_group->cfs_work, pci_epf_cfs_work);
+ queue_delayed_work(system_wq, &epf_group->cfs_work,
+ msecs_to_jiffies(1));
+
return &epf_group->group;
free_name:
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index cadd3db0cbb0..cc8f9eb2b177 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -87,24 +87,50 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* pci_epc_get_first_free_bar() - helper to get first unreserved BAR
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
- * Invoke to get the first unreserved BAR that can be used for endpoint
+ * Invoke to get the first unreserved BAR that can be used by the endpoint
* function. For any incorrect value in reserved_bar return '0'.
*/
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
- *epc_features)
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
{
- int free_bar;
+ return pci_epc_get_next_free_bar(epc_features, BAR_0);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+
+/**
+ * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ * @bar: the starting BAR number from where unreserved BAR should be searched
+ *
+ * Invoke to get the next unreserved BAR starting from @bar that can be used
+ * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ */
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar)
+{
+ unsigned long free_bar;
if (!epc_features)
- return 0;
+ return BAR_0;
+
+ /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
+ if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ bar++;
+
+ /* Find if the reserved BAR is also a 64-bit BAR */
+ free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
- free_bar = ffz(epc_features->reserved_bar);
+ /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
+ free_bar <<= 1;
+ free_bar |= epc_features->reserved_bar;
+
+ free_bar = find_next_zero_bit(&free_bar, 6, bar);
if (free_bar > 5)
- return 0;
+ return NO_BAR;
return free_bar;
}
-EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
/**
* pci_epc_get_features() - get the features supported by EPC
@@ -205,6 +231,47 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
/**
+ * pci_epc_map_msi_irq() - Map physical address to MSI address and return
+ * MSI data
+ * @epc: the EPC device which has the MSI capability
+ * @func_no: the physical endpoint function number in the EPC device
+ * @phys_addr: the physical address of the outbound region
+ * @interrupt_num: the MSI interrupt number
+ * @entry_size: Size of Outbound address region for each interrupt
+ * @msi_data: the data that should be written in order to raise MSI interrupt
+ * with interrupt number as 'interrupt num'
+ * @msi_addr_offset: Offset of MSI address from the aligned outbound address
+ * to which the MSI address is mapped
+ *
+ * Invoke to map physical address to MSI address and return MSI data. The
+ * physical address should be an address in the outbound region. This is
+ * required to implement doorbell functionality of NTB wherein EPC on either
+ * side of the interface (primary and secondary) can directly write to the
+ * physical address (in outbound region) of the other interface to ring
+ * doorbell.
+ */
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
+ u8 interrupt_num, u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (!epc->ops->map_msi_irq)
+ return -EINVAL;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
+ entry_size, msi_data, msi_addr_offset);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
+
+/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
* @func_no: the endpoint function number in the EPC device
@@ -467,21 +534,28 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
* pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
* @epc: the EPC device to which the endpoint function should be added
* @epf: the endpoint function to be added
+ * @type: Identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
*
* A PCI endpoint device can have one or more functions. In the case of PCIe,
* the specification allows up to 8 PCIe endpoint functions. Invoke
* pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
*/
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
u32 func_no;
int ret = 0;
- if (epf->epc)
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (type == PRIMARY_INTERFACE && epf->epc)
return -EBUSY;
- if (IS_ERR(epc))
- return -EINVAL;
+ if (type == SECONDARY_INTERFACE && epf->sec_epc)
+ return -EBUSY;
mutex_lock(&epc->lock);
func_no = find_first_zero_bit(&epc->function_num_map,
@@ -498,11 +572,17 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
}
set_bit(func_no, &epc->function_num_map);
- epf->func_no = func_no;
- epf->epc = epc;
-
- list_add_tail(&epf->list, &epc->pci_epf);
+ if (type == PRIMARY_INTERFACE) {
+ epf->func_no = func_no;
+ epf->epc = epc;
+ list = &epf->list;
+ } else {
+ epf->sec_epc_func_no = func_no;
+ epf->sec_epc = epc;
+ list = &epf->sec_epc_list;
+ }
+ list_add_tail(list, &epc->pci_epf);
ret:
mutex_unlock(&epc->lock);
@@ -517,14 +597,26 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*
* Invoke to remove PCI endpoint function from the endpoint controller.
*/
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
+ u32 func_no = 0;
+
if (!epc || IS_ERR(epc) || !epf)
return;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ list = &epf->list;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ list = &epf->sec_epc_list;
+ }
+
mutex_lock(&epc->lock);
- clear_bit(epf->func_no, &epc->function_num_map);
- list_del(&epf->list);
+ clear_bit(func_no, &epc->function_num_map);
+ list_del(list);
epf->epc = NULL;
mutex_unlock(&epc->lock);
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index c977cf9dce56..7646c8660d42 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,6 +21,38 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
+ * pci_epf_type_add_cfs() - Help function drivers to expose function specific
+ * attributes in configfs
+ * @epf: the EPF device that has to be configured using configfs
+ * @group: the parent configfs group (corresponding to entries in
+ * pci_epf_device_id)
+ *
+ * Invoke to expose function specific attributes in configfs. If the function
+ * driver does not have anything to expose (attributes configured by user),
+ * return NULL.
+ */
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct config_group *epf_type_group;
+
+ if (!epf->driver) {
+ dev_err(&epf->dev, "epf device not bound to driver\n");
+ return NULL;
+ }
+
+ if (!epf->driver->ops->add_cfs)
+ return NULL;
+
+ mutex_lock(&epf->lock);
+ epf_type_group = epf->driver->ops->add_cfs(epf, group);
+ mutex_unlock(&epf->lock);
+
+ return epf_type_group;
+}
+EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
+
+/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -74,24 +106,37 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
* @bar: the BAR number corresponding to the register space
+ * @type: Identifies if the allocated space is for primary EPC or secondary EPC
*
* Invoke to free the allocated PCI EPF register space.
*/
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type)
{
struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc;
if (!addr)
return;
- dma_free_coherent(dev, epf->bar[bar].size, addr,
- epf->bar[bar].phys_addr);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
- epf->bar[bar].phys_addr = 0;
- epf->bar[bar].addr = NULL;
- epf->bar[bar].size = 0;
- epf->bar[bar].barno = 0;
- epf->bar[bar].flags = 0;
+ dev = epc->dev.parent;
+ dma_free_coherent(dev, epf_bar[bar].size, addr,
+ epf_bar[bar].phys_addr);
+
+ epf_bar[bar].phys_addr = 0;
+ epf_bar[bar].addr = NULL;
+ epf_bar[bar].size = 0;
+ epf_bar[bar].barno = 0;
+ epf_bar[bar].flags = 0;
}
EXPORT_SYMBOL_GPL(pci_epf_free_space);
@@ -101,15 +146,18 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
* @align: alignment size for the allocation region
+ * @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align)
+ size_t align, enum pci_epc_interface_type type)
{
- void *space;
- struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
+ struct pci_epc *epc;
+ struct device *dev;
+ void *space;
if (size < 128)
size = 128;
@@ -119,17 +167,26 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
else
size = roundup_pow_of_two(size);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
}
- epf->bar[bar].phys_addr = phys_addr;
- epf->bar[bar].addr = space;
- epf->bar[bar].size = size;
- epf->bar[bar].barno = bar;
- epf->bar[bar].flags |= upper_32_bits(size) ?
+ epf_bar[bar].phys_addr = phys_addr;
+ epf_bar[bar].addr = space;
+ epf_bar[bar].size = size;
+ epf_bar[bar].barno = bar;
+ epf_bar[bar].flags |= upper_32_bits(size) ?
PCI_BASE_ADDRESS_MEM_TYPE_64 :
PCI_BASE_ADDRESS_MEM_TYPE_32;
@@ -282,22 +339,6 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf)
-{
- if (!id || !epf)
- return NULL;
-
- while (*id->name) {
- if (strcmp(epf->name, id->name) == 0)
- return id;
- id++;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_epf_match_device);
-
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index a2094c07af6a..a74b274a8c45 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -176,9 +176,6 @@ int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
-/* acpiphp_glue.c */
-typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
-
int acpiphp_enable_slot(struct acpiphp_slot *slot);
int acpiphp_disable_slot(struct acpiphp_slot *slot);
u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 139869d50eb2..fdaf86a888b7 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -21,8 +21,9 @@
#include "pci-bridge-emul.h"
#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
+#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
/**
* struct pci_bridge_reg_behavior - register bits behaviors
@@ -46,7 +47,8 @@ struct pci_bridge_reg_behavior {
u32 w1c;
};
-static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+static const
+struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
[PCI_VENDOR_ID / 4] = { .ro = ~0 },
[PCI_COMMAND / 4] = {
.rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -164,7 +166,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
},
};
-static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+static const
+struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
[PCI_CAP_LIST_ID / 4] = {
/*
* Capability ID, Next Capability Pointer and
@@ -260,6 +263,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
unsigned int flags)
{
+ BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
+
bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fb072f4b3176..f8afd54ca3e1 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -927,6 +927,9 @@ void pci_create_legacy_files(struct pci_bus *b)
{
int error;
+ if (!sysfs_initialized)
+ return;
+
b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
GFP_ATOMIC);
if (!b->legacy_io)
@@ -939,6 +942,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
b->legacy_io->mmap = pci_mmap_legacy_io;
+ b->legacy_io->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_io);
error = device_create_bin_file(&b->dev, b->legacy_io);
if (error)
@@ -951,6 +955,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
+ b->legacy_io->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)
@@ -1166,6 +1171,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->mmap = pci_mmap_resource_uc;
}
}
+ if (res_attr->mmap)
+ res_attr->mapping = iomem_get_mapping();
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = 0600;
res_attr->size = pci_resource_len(pdev, num);
@@ -1448,6 +1455,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pci_sysfs_init(void)
{
struct pci_dev *pdev = NULL;
+ struct pci_bus *pbus = NULL;
int retval;
sysfs_initialized = 1;
@@ -1459,6 +1467,9 @@ static int __init pci_sysfs_init(void)
}
}
+ while ((pbus = pci_find_next_bus(pbus)))
+ pci_create_legacy_files(pbus);
+
return 0;
}
late_initcall(pci_sysfs_init);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b9fecc25d213..16a17215f633 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1558,7 +1558,6 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1644,7 +1643,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
res = pdev->resource + bar_idx;
- size = ilog2(resource_size(res)) - 20;
+ size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
@@ -1665,7 +1664,6 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -3353,11 +3351,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
@@ -3603,8 +3596,16 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
return 0;
pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
- return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+ cap &= PCI_REBAR_CAP_SIZES;
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+ bar == 0 && cap == 0x7000)
+ cap = 0x3f000;
+
+ return cap >> 4;
}
+EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
/**
* pci_rebar_get_current_size - get the current size of a BAR
@@ -4029,6 +4030,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
ret = logic_pio_register_range(range);
if (ret)
kfree(range);
+
+ /* Ignore duplicates due to deferred probing */
+ if (ret == -EEXIST)
+ ret = 0;
#endif
return ret;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5c59365092fa..ef7c4661314f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -582,15 +582,11 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -630,7 +626,6 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#endif
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
static inline u64 pci_rebar_size_to_bytes(int size)
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 3946555a6042..45a2ef702b45 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -133,14 +133,6 @@ config PCIE_PTM
This is only useful if you have devices that support PTM, but it
is safe to enable even if you don't.
-config PCIE_BW
- bool "PCI Express Bandwidth Change Notification"
- depends on PCIEPORTBUS
- help
- This enables PCI Express Bandwidth Change Notification. If
- you know link width or rate changes occur only to correct
- unreliable links, you may answer Y.
-
config PCIE_EDR
bool "PCI Express Error Disconnect Recover support"
depends on PCIE_DPC && ACPI
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index d9697892fa3e..b2980db88cc0 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -12,5 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
obj-$(CONFIG_PCIE_PTM) += ptm.o
-obj-$(CONFIG_PCIE_BW) += bw_notification.o
obj-$(CONFIG_PCIE_EDR) += edr.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 77b0f2c45bc0..ba22388342d1 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1388,7 +1388,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
if (type == PCI_EXP_TYPE_RC_END)
root = dev->rcec;
else
- root = dev;
+ root = pcie_find_root_port(dev);
/*
* If the platform retained control of AER, an RCiEP may not have
@@ -1414,7 +1414,8 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
}
} else {
rc = pci_bus_error_reset(dev);
- pci_info(dev, "Root Port link has been reset (%d)\n", rc);
+ pci_info(dev, "%s Port link has been reset (%d)\n",
+ pci_is_root_bus(dev->bus) ? "Root" : "Downstream", rc);
}
if ((host->native_aer || pcie_ports_native) && aer) {
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a08e7d6dc248..ac0557a305af 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -734,50 +734,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, cap++);
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, *cap++);
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, *cap++);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
deleted file mode 100644
index 565d23cccb8b..000000000000
--- a/drivers/pci/pcie/bw_notification.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * PCI Express Link Bandwidth Notification services driver
- * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com>
- *
- * Copyright (C) 2019, Dell Inc
- *
- * The PCIe Link Bandwidth Notification provides a way to notify the
- * operating system when the link width or data rate changes. This
- * capability is required for all root ports and downstream ports
- * supporting links wider than x1 and/or multiple link speeds.
- *
- * This service port driver hooks into the bandwidth notification interrupt
- * and warns when links become degraded in operation.
- */
-
-#define dev_fmt(fmt) "bw_notification: " fmt
-
-#include "../pci.h"
-#include "portdrv.h"
-
-static bool pcie_link_bandwidth_notification_supported(struct pci_dev *dev)
-{
- int ret;
- u32 lnk_cap;
-
- ret = pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnk_cap);
- return (ret == PCIBIOS_SUCCESSFUL) && (lnk_cap & PCI_EXP_LNKCAP_LBNC);
-}
-
-static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
-{
- u16 lnk_ctl;
-
- pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
-
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
- lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
-}
-
-static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
-{
- u16 lnk_ctl;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
- lnk_ctl &= ~PCI_EXP_LNKCTL_LBMIE;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
-}
-
-static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
-{
- struct pcie_device *srv = context;
- struct pci_dev *port = srv->port;
- u16 link_status, events;
- int ret;
-
- ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
- events = link_status & PCI_EXP_LNKSTA_LBMS;
-
- if (ret != PCIBIOS_SUCCESSFUL || !events)
- return IRQ_NONE;
-
- pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
- pcie_update_link_speed(port->subordinate, link_status);
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
-{
- struct pcie_device *srv = context;
- struct pci_dev *port = srv->port;
- struct pci_dev *dev;
-
- /*
- * Print status from downstream devices, not this root port or
- * downstream switch port.
- */
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &port->subordinate->devices, bus_list)
- pcie_report_downtraining(dev);
- up_read(&pci_bus_sem);
-
- return IRQ_HANDLED;
-}
-
-static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
-{
- int ret;
-
- /* Single-width or single-speed ports do not have to support this. */
- if (!pcie_link_bandwidth_notification_supported(srv->port))
- return -ENODEV;
-
- ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
- pcie_bw_notification_handler,
- IRQF_SHARED, "PCIe BW notif", srv);
- if (ret)
- return ret;
-
- pcie_enable_link_bandwidth_notification(srv->port);
- pci_info(srv->port, "enabled with IRQ %d\n", srv->irq);
-
- return 0;
-}
-
-static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
-{
- pcie_disable_link_bandwidth_notification(srv->port);
- free_irq(srv->irq, srv);
-}
-
-static int pcie_bandwidth_notification_suspend(struct pcie_device *srv)
-{
- pcie_disable_link_bandwidth_notification(srv->port);
- return 0;
-}
-
-static int pcie_bandwidth_notification_resume(struct pcie_device *srv)
-{
- pcie_enable_link_bandwidth_notification(srv->port);
- return 0;
-}
-
-static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
- .name = "pcie_bw_notification",
- .port_type = PCIE_ANY_PORT,
- .service = PCIE_PORT_SERVICE_BWNOTIF,
- .probe = pcie_bandwidth_notification_probe,
- .suspend = pcie_bandwidth_notification_suspend,
- .resume = pcie_bandwidth_notification_resume,
- .remove = pcie_bandwidth_notification_remove,
-};
-
-int __init pcie_bandwidth_notification_init(void)
-{
- return pcie_port_service_register(&pcie_bandwidth_notification_driver);
-}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 510f31f0ef6d..b576aa890c76 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -198,8 +198,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
pci_walk_bridge(bridge, report_frozen_detected, &status);
- status = reset_subordinates(bridge);
- if (status != PCI_ERS_RESULT_RECOVERED) {
+ if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
pci_warn(bridge, "subordinate device reset failed\n");
goto failed;
}
@@ -231,15 +230,14 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, report_resume, &status);
/*
- * If we have native control of AER, clear error status in the Root
- * Port or Downstream Port that signaled the error. If the
- * platform retained control of AER, it is responsible for clearing
- * this status. In that case, the signaling device may not even be
- * visible to the OS.
+ * If we have native control of AER, clear error status in the device
+ * that detected the error. If the platform retained control of AER,
+ * it is responsible for clearing this status. In that case, the
+ * signaling device may not even be visible to the OS.
*/
if (host->native_aer || pcie_ports_native) {
- pcie_clear_device_status(bridge);
- pci_aer_clear_nonfatal_status(bridge);
+ pcie_clear_device_status(dev);
+ pci_aer_clear_nonfatal_status(dev);
}
pci_info(bridge, "device recovery successful\n");
return status;
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index af7cf237432a..2ff5724b8f13 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -53,12 +53,6 @@ int pcie_dpc_init(void);
static inline int pcie_dpc_init(void) { return 0; }
#endif
-#ifdef CONFIG_PCIE_BW
-int pcie_bandwidth_notification_init(void);
-#else
-static inline int pcie_bandwidth_notification_init(void) { return 0; }
-#endif
-
/* Port Type */
#define PCIE_ANY_PORT (~0)
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0b250bc5f405..c7ff1eea225a 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -153,7 +153,8 @@ static void pcie_portdrv_remove(struct pci_dev *dev)
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
pci_channel_state_t error)
{
- /* Root Port has no impact. Always recovers. */
+ if (error == pci_channel_io_frozen)
+ return PCI_ERS_RESULT_NEED_RESET;
return PCI_ERS_RESULT_CAN_RECOVER;
}
@@ -255,7 +256,6 @@ static void __init pcie_init_services(void)
pcie_pme_init();
pcie_dpc_init();
pcie_hp_init();
- pcie_bandwidth_notification_init();
}
static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index d35186b01d98..9bab07302bbf 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -274,6 +274,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
else
return -EINVAL;
}
+
+ if (dev->resource[i].flags & IORESOURCE_MEM &&
+ iomem_is_exclusive(dev->resource[i].start))
+ return -EINVAL;
+
ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
@@ -293,6 +298,7 @@ static int proc_bus_pci_open(struct inode *inode, struct file *file)
fpriv->write_combine = 0;
file->private_data = fpriv;
+ file->f_mapping = iomem_get_mapping();
return 0;
}
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 2061672954ee..b4c138a6ec02 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -168,7 +168,6 @@ struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
struct list_head *n;
struct pci_bus *b = NULL;
- WARN_ON(in_interrupt());
down_read(&pci_bus_sem);
n = from ? from->node.next : pci_root_buses.next;
if (n != &pci_root_buses)
@@ -196,7 +195,6 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn)
{
struct pci_dev *dev;
- WARN_ON(in_interrupt());
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -274,7 +272,6 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
struct device *dev_start = NULL;
struct pci_dev *pdev = NULL;
- WARN_ON(in_interrupt());
if (from)
dev_start = &from->dev;
dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
@@ -381,7 +378,6 @@ int pci_dev_present(const struct pci_device_id *ids)
{
struct pci_dev *found = NULL;
- WARN_ON(in_interrupt());
while (ids->vendor || ids->subvendor || ids->class_mask) {
found = pci_get_dev_by_id(ids, NULL);
if (found) {
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 43eda101fcf4..7f1acb3918d0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -410,10 +410,16 @@ EXPORT_SYMBOL(pci_release_resource);
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
struct resource *res = dev->resource + resno;
+ struct pci_host_bridge *host;
int old, ret;
u32 sizes;
u16 cmd;
+ /* Check if we must preserve the firmware's resource assignment */
+ host = pci_find_host_bridge(dev->bus);
+ if (host->preserve_config)
+ return -ENOTSUPP;
+
/* Make sure the resource isn't assigned before resizing it. */
if (!(res->flags & IORESOURCE_UNSET))
return -EBUSY;
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 31e39558d49d..8b003c890b87 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u16 word;
u32 dword;
long err;
- long cfg_ret;
+ int cfg_ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
}
err = -EIO;
- if (cfg_ret != PCIBIOS_SUCCESSFUL)
+ if (cfg_ret)
goto error;
switch (len) {
@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_byte(dev, off, byte);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_word(dev, off, word);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_dword(dev, off, dword);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index cf109d9a1112..e6939103991b 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1554,7 +1554,7 @@ static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
if (off + count > size)
count = size - off;
- s = to_socket(container_of(kobj, struct device, kobj));
+ s = to_socket(kobj_to_dev(kobj));
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
@@ -1581,7 +1581,7 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
if (error)
return error;
- s = to_socket(container_of(kobj, struct device, kobj));
+ s = to_socket(kobj_to_dev(kobj));
if (off)
return -EINVAL;
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 11783410223b..29fdd174bc23 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -238,7 +238,7 @@ static int pcmcia_probe(struct sa1111_dev *dev)
return ret;
}
-static int pcmcia_remove(struct sa1111_dev *dev)
+static void pcmcia_remove(struct sa1111_dev *dev)
{
struct sa1111_pcmcia_socket *next, *s = dev_get_drvdata(&dev->dev);
@@ -252,7 +252,6 @@ static int pcmcia_remove(struct sa1111_dev *dev)
release_mem_region(dev->res.start, 512);
sa1111_disable_device(dev);
- return 0;
}
static struct sa1111_driver pcmcia_driver = {
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 3075cf171f78..77522e5efe11 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -62,7 +62,7 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on ARM64 && ACPI && ARM_SMMU_V3
+ depends on ARM64 && ACPI
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 87c4be9dd412..f81e2ec90005 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1026,12 +1026,11 @@ static void pmu_event_set_period(struct perf_event *event)
static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
{
- unsigned long flags;
struct cci_pmu *cci_pmu = dev;
struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
int idx, handled = IRQ_NONE;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock(&events->pmu_lock);
/* Disable the PMU while we walk through the counters */
__cci_pmu_disable(cci_pmu);
@@ -1061,7 +1060,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
/* Enable the PMU and sync possibly overflowed counters */
__cci_pmu_enable_sync(cci_pmu);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock(&events->pmu_lock);
return IRQ_RETVAL(handled);
}
@@ -1376,7 +1375,7 @@ static struct attribute *pmu_attrs[] = {
NULL,
};
-static struct attribute_group pmu_attr_group = {
+static const struct attribute_group pmu_attr_group = {
.attrs = pmu_attrs,
};
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index a76ff594f3ca..1328159fe564 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -616,7 +616,7 @@ static struct attribute *arm_cmn_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group arm_cmn_cpumask_attr_group = {
+static const struct attribute_group arm_cmn_cpumask_attr_group = {
.attrs = arm_cmn_cpumask_attrs,
};
@@ -1150,7 +1150,7 @@ static int arm_cmn_commit_txn(struct pmu *pmu)
static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_cmn *cmn;
- unsigned int target;
+ unsigned int i, target;
cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node);
if (cpu != cmn->cpu)
@@ -1161,6 +1161,8 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
perf_pmu_migrate_context(&cmn->pmu, cpu, target);
+ for (i = 0; i < cmn->num_dtcs; i++)
+ irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target));
cmn->cpu = target;
return 0;
}
@@ -1502,7 +1504,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
struct arm_cmn *cmn;
const char *name;
static atomic_t id;
- int err, rootnode, this_id;
+ int err, rootnode;
cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
if (!cmn)
@@ -1549,14 +1551,9 @@ static int arm_cmn_probe(struct platform_device *pdev)
.cancel_txn = arm_cmn_end_txn,
};
- this_id = atomic_fetch_inc(&id);
- if (this_id == 0) {
- name = "arm_cmn";
- } else {
- name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
- if (!name)
- return -ENOMEM;
- }
+ name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id));
+ if (!name)
+ return -ENOMEM;
err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
if (err)
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 004930eb4bbb..66ad5b3ece19 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -159,7 +159,7 @@ static struct attribute *dmc620_pmu_events_attrs[] = {
NULL,
};
-static struct attribute_group dmc620_pmu_events_attr_group = {
+static const struct attribute_group dmc620_pmu_events_attr_group = {
.name = "events",
.attrs = dmc620_pmu_events_attrs,
};
@@ -222,7 +222,7 @@ static struct attribute *dmc620_pmu_formats_attrs[] = {
NULL,
};
-static struct attribute_group dmc620_pmu_format_attr_group = {
+static const struct attribute_group dmc620_pmu_format_attr_group = {
.name = "format",
.attrs = dmc620_pmu_formats_attrs,
};
@@ -717,6 +717,7 @@ static struct platform_driver dmc620_pmu_driver = {
.driver = {
.name = DMC620_DRVNAME,
.acpi_match_table = dmc620_acpi_match,
+ .suppress_bind_attrs = true,
},
.probe = dmc620_pmu_device_probe,
.remove = dmc620_pmu_device_remove,
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 794a37d50853..2d10d84fb79c 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -577,7 +577,7 @@ static struct attribute *armpmu_common_attrs[] = {
NULL,
};
-static struct attribute_group armpmu_common_attr_group = {
+static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu);
}
-bool arm_pmu_irq_is_nmi(void)
-{
- return has_nmi;
-}
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 74474bb322c3..8ff7a67f691c 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -493,7 +493,7 @@ static struct attribute *smmu_pmu_cpumask_attrs[] = {
NULL
};
-static struct attribute_group smmu_pmu_cpumask_group = {
+static const struct attribute_group smmu_pmu_cpumask_group = {
.attrs = smmu_pmu_cpumask_attrs,
};
@@ -548,7 +548,7 @@ static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
return 0;
}
-static struct attribute_group smmu_pmu_events_group = {
+static const struct attribute_group smmu_pmu_events_group = {
.name = "events",
.attrs = smmu_pmu_events,
.is_visible = smmu_pmu_event_is_visible,
@@ -583,7 +583,7 @@ static struct attribute *smmu_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group smmu_pmu_identifier_group = {
+static const struct attribute_group smmu_pmu_identifier_group = {
.attrs = smmu_pmu_identifier_attrs,
.is_visible = smmu_pmu_identifier_attr_visible,
};
@@ -602,7 +602,7 @@ static struct attribute *smmu_pmu_formats[] = {
NULL
};
-static struct attribute_group smmu_pmu_format_group = {
+static const struct attribute_group smmu_pmu_format_group = {
.name = "format",
.attrs = smmu_pmu_formats,
};
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index cc00915ad6d1..d3929ccebfd2 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -54,7 +54,7 @@ struct arm_spe_pmu {
struct hlist_node hotplug_node;
int irq; /* PPI */
-
+ u16 pmsver;
u16 min_period;
u16 counter_sz;
@@ -146,7 +146,7 @@ static struct attribute *arm_spe_pmu_cap_attr[] = {
NULL,
};
-static struct attribute_group arm_spe_pmu_cap_group = {
+static const struct attribute_group arm_spe_pmu_cap_group = {
.name = "caps",
.attrs = arm_spe_pmu_cap_attr,
};
@@ -227,7 +227,7 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
NULL,
};
-static struct attribute_group arm_spe_pmu_format_group = {
+static const struct attribute_group arm_spe_pmu_format_group = {
.name = "format",
.attrs = arm_spe_pmu_formats_attr,
};
@@ -247,7 +247,7 @@ static struct attribute *arm_spe_pmu_attrs[] = {
NULL,
};
-static struct attribute_group arm_spe_pmu_group = {
+static const struct attribute_group arm_spe_pmu_group = {
.attrs = arm_spe_pmu_attrs,
};
@@ -655,6 +655,18 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
return IRQ_HANDLED;
}
+static u64 arm_spe_pmsevfr_res0(u16 pmsver)
+{
+ switch (pmsver) {
+ case ID_AA64DFR0_PMSVER_8_2:
+ return SYS_PMSEVFR_EL1_RES0_8_2;
+ case ID_AA64DFR0_PMSVER_8_3:
+ /* Return the highest version we support in default */
+ default:
+ return SYS_PMSEVFR_EL1_RES0_8_3;
+ }
+}
+
/* Perf callbacks */
static int arm_spe_pmu_event_init(struct perf_event *event)
{
@@ -670,7 +682,7 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
return -ENOENT;
- if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
+ if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
return -EOPNOTSUPP;
if (attr->exclude_idle)
@@ -937,6 +949,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
fld, smp_processor_id());
return;
}
+ spe_pmu->pmsver = (u16)fld;
/* Read PMBIDR first to determine whether or not we have access */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index a11bfd8a0823..be1f26b62ddb 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -133,7 +133,7 @@ static struct attribute *ddr_perf_identifier_attrs[] = {
NULL,
};
-static struct attribute_group ddr_perf_identifier_attr_group = {
+static const struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = ddr_perf_identifier_attrs,
.is_visible = ddr_perf_identifier_attr_visible,
};
@@ -188,7 +188,7 @@ static struct attribute *ddr_perf_filter_cap_attr[] = {
NULL,
};
-static struct attribute_group ddr_perf_filter_cap_attr_group = {
+static const struct attribute_group ddr_perf_filter_cap_attr_group = {
.name = "caps",
.attrs = ddr_perf_filter_cap_attr,
};
@@ -209,7 +209,7 @@ static struct attribute *ddr_perf_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group ddr_perf_cpumask_attr_group = {
+static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = ddr_perf_cpumask_attrs,
};
@@ -265,7 +265,7 @@ static struct attribute *ddr_perf_events_attrs[] = {
NULL,
};
-static struct attribute_group ddr_perf_events_attr_group = {
+static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
};
@@ -281,7 +281,7 @@ static struct attribute *ddr_perf_format_attrs[] = {
NULL,
};
-static struct attribute_group ddr_perf_format_attr_group = {
+static const struct attribute_group ddr_perf_format_attr_group = {
.name = "format",
.attrs = ddr_perf_format_attrs,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 5ac6c9113767..ac1a8c120a00 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -319,7 +319,7 @@ static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group hisi_ddrc_pmu_identifier_group = {
+static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 41b2dceb5f26..3402f1a395a8 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -331,7 +331,7 @@ static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group hisi_hha_pmu_identifier_group = {
+static const struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 705501d18d03..7d792435c2aa 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -321,7 +321,7 @@ static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group hisi_l3c_pmu_identifier_group = {
+static const struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 23a0e008dafa..8883af955a2a 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -649,7 +649,7 @@ static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group l2_cache_pmu_cpumask_group = {
+static const struct attribute_group l2_cache_pmu_cpumask_group = {
.attrs = l2_cache_pmu_cpumask_attrs,
};
@@ -665,7 +665,7 @@ static struct attribute *l2_cache_pmu_formats[] = {
NULL,
};
-static struct attribute_group l2_cache_pmu_format_group = {
+static const struct attribute_group l2_cache_pmu_format_group = {
.name = "format",
.attrs = l2_cache_pmu_formats,
};
@@ -700,7 +700,7 @@ static struct attribute *l2_cache_pmu_events[] = {
NULL
};
-static struct attribute_group l2_cache_pmu_events_group = {
+static const struct attribute_group l2_cache_pmu_events_group = {
.name = "events",
.attrs = l2_cache_pmu_events,
};
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 9ddb577c542b..fb34b87b9471 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -630,7 +630,7 @@ static struct attribute *qcom_l3_cache_pmu_formats[] = {
NULL,
};
-static struct attribute_group qcom_l3_cache_pmu_format_group = {
+static const struct attribute_group qcom_l3_cache_pmu_format_group = {
.name = "format",
.attrs = qcom_l3_cache_pmu_formats,
};
@@ -663,7 +663,7 @@ static struct attribute *qcom_l3_cache_pmu_events[] = {
NULL
};
-static struct attribute_group qcom_l3_cache_pmu_events_group = {
+static const struct attribute_group qcom_l3_cache_pmu_events_group = {
.name = "events",
.attrs = qcom_l3_cache_pmu_events,
};
@@ -685,7 +685,7 @@ static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
+static const struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
.attrs = qcom_l3_cache_pmu_cpumask_attrs,
};
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 633cf07ba672..44faa51ba799 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1234,10 +1234,9 @@ static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
struct xgene_pmu_dev_ctx *ctx;
struct xgene_pmu *xgene_pmu = dev_id;
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
+ raw_spin_lock(&xgene_pmu->lock);
/* Get Interrupt PMU source */
val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
@@ -1273,7 +1272,7 @@ static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
}
}
- raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
+ raw_spin_unlock(&xgene_pmu->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 00dabe5fab8a..68d9c2f6a5ca 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -52,6 +52,7 @@ config PHY_XGENE
config USB_LGM_PHY
tristate "INTEL Lightning Mountain USB PHY Driver"
depends on USB_SUPPORT
+ depends on X86 || COMPILE_TEST
select USB_PHY
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index a1f1a9c90d0d..09256339bd04 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -91,10 +91,11 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM4908 || ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB
+ default ARCH_BCM4908
default ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 3ecf41359591..769c707d9b71 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -651,7 +651,7 @@ static int brcm_dsl_sata_init(struct brcm_sata_port *port)
break;
msleep(20);
try--;
- };
+ }
if (!try) {
/* PLL did not lock; give up */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 99fbc7e4138b..116fb23aebd9 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -34,7 +35,7 @@ struct value_to_name_map {
};
struct match_chip_info {
- void *init_func;
+ void (*init_func)(struct brcm_usb_init_params *params);
u8 required_regs[BRCM_REGS_MAX + 1];
u8 optional_reg;
};
@@ -286,6 +287,10 @@ static const struct match_chip_info chip_info_7445 = {
static const struct of_device_id brcm_usb_dt_ids[] = {
{
+ .compatible = "brcm,bcm4908-usb-phy",
+ .data = &chip_info_7445,
+ },
+ {
.compatible = "brcm,bcm7216-usb-phy",
.data = &chip_info_7216,
},
@@ -427,8 +432,6 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
struct device_node *dn = pdev->dev.of_node;
int err;
const char *mode;
- const struct of_device_id *match;
- void (*dvr_init)(struct brcm_usb_init_params *params);
const struct match_chip_info *info;
struct regmap *rmap;
int x;
@@ -441,10 +444,11 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
priv->ini.family_id = brcmstb_get_family_id();
priv->ini.product_id = brcmstb_get_product_id();
- match = of_match_node(brcm_usb_dt_ids, dev->of_node);
- info = match->data;
- dvr_init = info->init_func;
- (*dvr_init)(&priv->ini);
+ info = of_device_get_match_data(&pdev->dev);
+ if (!info)
+ return -ENOENT;
+
+ info->init_func(&priv->ini);
dev_dbg(dev, "Best mapping table is for %s\n",
priv->ini.family_name);
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index f310e15d94cb..591a15834b48 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -2298,6 +2298,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
if (total_num_lanes > MAX_NUM_LANES) {
dev_err(dev, "Invalid lane configuration\n");
+ ret = -EINVAL;
goto put_lnk_rst;
}
diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile
index 65d5ea00fc9d..1cb158d7233f 100644
--- a/drivers/phy/ingenic/Makefile
+++ b/drivers/phy/ingenic/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index 4d1587d82286..ea127b177f46 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -82,18 +82,7 @@
#define USBPCR1_PORT_RST BIT(21)
#define USBPCR1_WORD_IF_16BIT BIT(19)
-enum ingenic_usb_phy_version {
- ID_JZ4770,
- ID_JZ4775,
- ID_JZ4780,
- ID_X1000,
- ID_X1830,
- ID_X2000,
-};
-
struct ingenic_soc_info {
- enum ingenic_usb_phy_version version;
-
void (*usb_phy_init)(struct phy *phy);
};
@@ -300,38 +289,26 @@ static void x2000_usb_phy_init(struct phy *phy)
}
static const struct ingenic_soc_info jz4770_soc_info = {
- .version = ID_JZ4770,
-
.usb_phy_init = jz4770_usb_phy_init,
};
static const struct ingenic_soc_info jz4775_soc_info = {
- .version = ID_JZ4775,
-
.usb_phy_init = jz4775_usb_phy_init,
};
static const struct ingenic_soc_info jz4780_soc_info = {
- .version = ID_JZ4780,
-
.usb_phy_init = jz4780_usb_phy_init,
};
static const struct ingenic_soc_info x1000_soc_info = {
- .version = ID_X1000,
-
.usb_phy_init = x1000_usb_phy_init,
};
static const struct ingenic_soc_info x1830_soc_info = {
- .version = ID_X1830,
-
.usb_phy_init = x1830_usb_phy_init,
};
static const struct ingenic_soc_info x2000_soc_info = {
- .version = ID_X2000,
-
.usb_phy_init = x2000_usb_phy_init,
};
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index a7d126192cf1..29d246ea24b4 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -124,8 +124,16 @@ static int ltq_rcu_usb2_phy_power_on(struct phy *phy)
reset_control_deassert(priv->phy_reset);
ret = clk_prepare_enable(priv->phy_gate_clk);
- if (ret)
+ if (ret) {
dev_err(dev, "failed to enable PHY gate\n");
+ return ret;
+ }
+
+ /*
+ * at least the xrx200 usb2 phy requires some extra time to be
+ * operational after enabling the clock
+ */
+ usleep_range(100, 200);
return ret;
}
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index d38def43b1bf..55f8e6c048ab 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 45be8aa724f3..8313bd517e4c 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -201,6 +201,7 @@ static const struct of_device_id mtk_hdmi_phy_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, mtk_hdmi_phy_match);
static struct platform_driver mtk_hdmi_phy_driver = {
.probe = mtk_hdmi_phy_probe,
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 18c481251f04..c51114d8e437 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -233,8 +233,9 @@ static const struct of_device_id mtk_mipi_tx_match[] = {
.data = &mt8183_mipitx_data },
{ },
};
+MODULE_DEVICE_TABLE(of, mtk_mipi_tx_match);
-struct platform_driver mtk_mipi_tx_driver = {
+static struct platform_driver mtk_mipi_tx_driver = {
.probe = mtk_mipi_tx_probe,
.remove = mtk_mipi_tx_remove,
.driver = {
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 442522ba487f..6ee478bc5211 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -143,7 +143,7 @@ static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
error = iio_read_channel_processed(ddata->vbus, &value);
if (error >= 0)
- return value > 3900 ? true : false;
+ return value > 3900;
dev_err(ddata->dev, "error reading VBUS: %i\n", error);
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 0939a9e9d448..9cdebe7f26cb 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -212,6 +212,15 @@ static const unsigned int qmp_v4_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x614,
};
+static const unsigned int sm8350_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x1008,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x1014,
+};
+
static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x160,
@@ -1974,6 +1983,291 @@ static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
};
+static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x048),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x65),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0x3c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_PLL_CNTL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
@@ -2183,6 +2477,11 @@ static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
+/* usb3 phy on sdx55 doesn't have com_aux clock */
+static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
+ "aux", "cfg_ahb", "ref"
+};
+
/* list of resets */
static const char * const msm8996_pciephy_reset_l[] = {
"phy", "common", "cfg",
@@ -2824,6 +3123,117 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx55_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx55_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_sdx55_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8350_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
+ .rx_tbl = sm8350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
+ .pcs_tbl = sm8350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8350_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8350_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8350_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
static void qcom_qmp_phy_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -3135,7 +3545,7 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
+ static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
u8 val;
qphy->dp_aux_cfg++;
@@ -4129,6 +4539,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
/* It's a combo phy */
}, {
+ .compatible = "qcom,sc8180x-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
.compatible = "qcom,sdm845-qhp-pcie-phy",
.data = &sdm845_qhp_pciephy_cfg,
}, {
@@ -4171,8 +4587,20 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy",
.data = &sm8250_qmp_gen3x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8350-qmp-ufs-phy",
+ .data = &sm8350_ufsphy_cfg,
+ }, {
.compatible = "qcom,sm8250-qmp-modem-pcie-phy",
.data = &sm8250_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdx55-qmp-usb3-uni-phy",
+ .data = &sdx55_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-uni-phy",
+ .data = &sm8350_usb3_uniphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index db92a461dd2e..71ce3aa174ae 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -824,4 +824,151 @@
#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc
#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0
+/* Only for QMP V5 PHY - QSERDES COM registers */
+#define QSERDES_V5_COM_PLL_IVCO 0x058
+#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V5_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V5_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V5_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V5_COM_HSCLK_SEL 0x158
+#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+
+/* Only for QMP V5 PHY - TX registers */
+#define QSERDES_V5_TX_RES_CODE_LANE_TX 0x34
+#define QSERDES_V5_TX_RES_CODE_LANE_RX 0x38
+#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX 0x3c
+#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V5_TX_LANE_MODE_1 0x84
+#define QSERDES_V5_TX_LANE_MODE_2 0x88
+#define QSERDES_V5_TX_LANE_MODE_3 0x8c
+#define QSERDES_V5_TX_LANE_MODE_4 0x90
+#define QSERDES_V5_TX_LANE_MODE_5 0x94
+#define QSERDES_V5_TX_RCV_DETECT_LVL_2 0xa4
+#define QSERDES_V5_TX_TRAN_DRVR_EMP_EN 0xc0
+#define QSERDES_V5_TX_PI_QEC_CTRL 0xe4
+#define QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x178
+#define QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x17c
+#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180
+#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184
+
+/* Only for QMP V5 PHY - RX registers */
+#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
+#define QSERDES_V5_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V5_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V5_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V5_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V5_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V5_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE 0x060
+#define QSERDES_V5_RX_RCLK_AUXDATA_SEL 0x064
+#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V5_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V5_RX_RX_TERM_BW 0x080
+#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V5_RX_GM_CAL 0x0dc
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V5_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V5_RX_SIGDET_ENABLES 0x118
+#define QSERDES_V5_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V5_RX_SIGDET_LVL 0x120
+#define QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V5_RX_RX_BAND 0x128
+#define QSERDES_V5_RX_RX_MODE_00_LOW 0x15c
+#define QSERDES_V5_RX_RX_MODE_00_HIGH 0x160
+#define QSERDES_V5_RX_RX_MODE_00_HIGH2 0x164
+#define QSERDES_V5_RX_RX_MODE_00_HIGH3 0x168
+#define QSERDES_V5_RX_RX_MODE_00_HIGH4 0x16c
+#define QSERDES_V5_RX_RX_MODE_01_LOW 0x170
+#define QSERDES_V5_RX_RX_MODE_01_HIGH 0x174
+#define QSERDES_V5_RX_RX_MODE_01_HIGH2 0x178
+#define QSERDES_V5_RX_RX_MODE_01_HIGH3 0x17c
+#define QSERDES_V5_RX_RX_MODE_01_HIGH4 0x180
+#define QSERDES_V5_RX_RX_MODE_10_LOW 0x184
+#define QSERDES_V5_RX_RX_MODE_10_HIGH 0x188
+#define QSERDES_V5_RX_RX_MODE_10_HIGH2 0x18c
+#define QSERDES_V5_RX_RX_MODE_10_HIGH3 0x190
+#define QSERDES_V5_RX_RX_MODE_10_HIGH4 0x194
+#define QSERDES_V5_RX_DFE_EN_TIMER 0x1a0
+#define QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
+#define QSERDES_V5_RX_DCC_CTRL1 0x1a8
+#define QSERDES_V5_RX_VTH_CODE 0x1b0
+
+/* Only for QMP V5 PHY - UFS PCS registers */
+#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V5_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1 0x154
+#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2 0x158
+#define QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
+
+/* Only for QMP V5 PHY - USB3 have different offsets than V4 */
+#define QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1 0x300
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x304
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x308
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x30c
+#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x310
+#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x314
+#define QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x318
+#define QPHY_V5_PCS_USB3_LFPS_TX_ECSTART 0x31c
+#define QPHY_V5_PCS_USB3_LFPS_PER_TIMER_VAL 0x320
+#define QPHY_V5_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x324
+#define QPHY_V5_PCS_USB3_LFPS_CONFIG1 0x328
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x32c
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x330
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x334
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x338
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x33c
+#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x340
+#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x344
+#define QPHY_V5_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x348
+#define QPHY_V5_PCS_USB3_ARCVR_DTCT_CM_DLY 0x34c
+#define QPHY_V5_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x350
+#define QPHY_V5_PCS_USB3_ALFPS_DEGLITCH_VAL 0x354
+#define QPHY_V5_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x358
+#define QPHY_V5_PCS_USB3_TEST_CONTROL 0x35c
+#define QPHY_V5_PCS_USB3_RXTERMINATION_DLY_SEL 0x360
+
+/* Only for QMP V5 PHY - UNI has 0x1000 offset for PCS_USB3 regs */
+#define QPHY_V5_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x1018
+#define QPHY_V5_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x103c
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 109792203baf..8f1bf7e2186b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -22,6 +22,7 @@
#include <dt-bindings/phy/phy-qcom-qusb2.h>
+#define QUSB2PHY_PLL 0x0
#define QUSB2PHY_PLL_TEST 0x04
#define CLK_REF_SEL BIT(7)
@@ -135,6 +136,35 @@ enum qusb2phy_reg_layout {
QUSB2PHY_INTR_CTRL,
};
+static const struct qusb2_phy_init_tbl ipq6018_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL, 0x14),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xF8),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0xB3),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE3, 0x83),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xC0),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE5, 0x00),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TEST, 0x80),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9F),
+};
+
+static const unsigned int ipq6018_regs_layout[] = {
+ [QUSB2PHY_PLL_STATUS] = 0x38,
+ [QUSB2PHY_PORT_TUNE1] = 0x80,
+ [QUSB2PHY_PORT_TUNE2] = 0x84,
+ [QUSB2PHY_PORT_TUNE3] = 0x88,
+ [QUSB2PHY_PORT_TUNE4] = 0x8C,
+ [QUSB2PHY_PORT_TUNE5] = 0x90,
+ [QUSB2PHY_PORT_TEST1] = 0x98,
+ [QUSB2PHY_PORT_TEST2] = 0x9C,
+ [QUSB2PHY_PORT_POWERDOWN] = 0xB4,
+ [QUSB2PHY_INTR_CTRL] = 0xBC,
+};
+
static const unsigned int msm8996_regs_layout[] = {
[QUSB2PHY_PLL_STATUS] = 0x38,
[QUSB2PHY_PORT_TUNE1] = 0x80,
@@ -245,6 +275,9 @@ struct qusb2_phy_cfg {
/* true if PHY has PLL_CORE_INPUT_OVERRIDE register to reset PLL */
bool has_pll_override;
+
+ /* true if PHY default clk scheme is single-ended */
+ bool se_clk_scheme_default;
};
static const struct qusb2_phy_cfg msm8996_phy_cfg = {
@@ -253,6 +286,7 @@ static const struct qusb2_phy_cfg msm8996_phy_cfg = {
.regs = msm8996_regs_layout,
.has_pll_test = true,
+ .se_clk_scheme_default = true,
.disable_ctrl = (CLAMP_N_EN | FREEZIO_N | POWER_DOWN),
.mask_core_ready = PLL_LOCKED,
.autoresume_en = BIT(3),
@@ -266,10 +300,22 @@ static const struct qusb2_phy_cfg msm8998_phy_cfg = {
.disable_ctrl = POWER_DOWN,
.mask_core_ready = CORE_READY_STATUS,
.has_pll_override = true,
+ .se_clk_scheme_default = true,
.autoresume_en = BIT(0),
.update_tune1_with_efuse = true,
};
+static const struct qusb2_phy_cfg ipq6018_phy_cfg = {
+ .tbl = ipq6018_init_tbl,
+ .tbl_num = ARRAY_SIZE(ipq6018_init_tbl),
+ .regs = ipq6018_regs_layout,
+
+ .disable_ctrl = POWER_DOWN,
+ .mask_core_ready = PLL_LOCKED,
+ /* autoresume not used */
+ .autoresume_en = BIT(0),
+};
+
static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = {
.tbl = qusb2_v2_init_tbl,
.tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl),
@@ -279,10 +325,23 @@ static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = {
POWER_DOWN),
.mask_core_ready = CORE_READY_STATUS,
.has_pll_override = true,
+ .se_clk_scheme_default = true,
.autoresume_en = BIT(0),
.update_tune1_with_efuse = true,
};
+static const struct qusb2_phy_cfg sdm660_phy_cfg = {
+ .tbl = msm8996_init_tbl,
+ .tbl_num = ARRAY_SIZE(msm8996_init_tbl),
+ .regs = msm8996_regs_layout,
+
+ .has_pll_test = true,
+ .se_clk_scheme_default = false,
+ .disable_ctrl = (CLAMP_N_EN | FREEZIO_N | POWER_DOWN),
+ .mask_core_ready = PLL_LOCKED,
+ .autoresume_en = BIT(3),
+};
+
static const char * const qusb2_phy_vreg_names[] = {
"vdda-pll", "vdda-phy-dpdm",
};
@@ -701,8 +760,13 @@ static int qusb2_phy_init(struct phy *phy)
/* Required to get phy pll lock successfully */
usleep_range(150, 160);
- /* Default is single-ended clock on msm8996 */
- qphy->has_se_clk_scheme = true;
+ /*
+ * Not all the SoCs have got a readable TCSR_PHY_CLK_SCHEME
+ * register in the TCSR so, if there's none, use the default
+ * value hardcoded in the configuration.
+ */
+ qphy->has_se_clk_scheme = cfg->se_clk_scheme_default;
+
/*
* read TCSR_PHY_CLK_SCHEME register to check if single-ended
* clock scheme is selected. If yes, then disable differential
@@ -810,6 +874,9 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq6018-qusb2-phy",
+ .data = &ipq6018_phy_cfg,
+ }, {
.compatible = "qcom,ipq8074-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
@@ -819,6 +886,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,msm8998-qusb2-phy",
.data = &msm8998_phy_cfg,
}, {
+ .compatible = "qcom,sdm660-qusb2-phy",
+ .data = &sdm660_phy_cfg,
+ }, {
/*
* Deprecated. Only here to support legacy device
* trees that didn't include "qcom,qusb2-v2-phy"
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
index a52a9bf13b75..8807e59a1162 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
@@ -401,13 +401,26 @@ static const struct hsphy_init_seq init_seq_femtophy[] = {
HSPHY_INIT_CFG(0x90, 0x60, 0),
};
+static const struct hsphy_init_seq init_seq_mdm9607[] = {
+ HSPHY_INIT_CFG(0x80, 0x44, 0),
+ HSPHY_INIT_CFG(0x81, 0x38, 0),
+ HSPHY_INIT_CFG(0x82, 0x24, 0),
+ HSPHY_INIT_CFG(0x83, 0x13, 0),
+};
+
static const struct hsphy_data hsphy_data_femtophy = {
.init_seq = init_seq_femtophy,
.init_seq_num = ARRAY_SIZE(init_seq_femtophy),
};
+static const struct hsphy_data hsphy_data_mdm9607 = {
+ .init_seq = init_seq_mdm9607,
+ .init_seq_num = ARRAY_SIZE(init_seq_mdm9607),
+};
+
static const struct of_device_id qcom_snps_hsphy_match[] = {
{ .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, },
+ { .compatible = "qcom,usb-hs-28nm-mdm9607", .data = &hsphy_data_mdm9607, },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index 1e424f263e7a..20023f6eb994 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -248,15 +248,17 @@ static int rockchip_emmc_phy_init(struct phy *phy)
* - SDHCI driver to get the PHY
* - SDHCI driver to init the PHY
*
- * The clock is optional, so upon any error we just set to NULL.
+ * The clock is optional, using clk_get_optional() to get the clock
+ * and do error processing if the return value != NULL
*
* NOTE: we don't do anything special for EPROBE_DEFER here. Given the
* above expected use case, EPROBE_DEFER isn't sensible to expect, so
* it's just like any other error.
*/
- rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
+ rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
if (IS_ERR(rk_phy->emmcclk)) {
- dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ ret = PTR_ERR(rk_phy->emmcclk);
+ dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret);
rk_phy->emmcclk = NULL;
}
@@ -380,10 +382,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
if (!of_property_read_u32(dev->of_node, "drive-impedance-ohm", &val))
rk_phy->drive_impedance = convert_drive_impedance_ohm(pdev, val);
- if (of_property_read_bool(dev->of_node, "enable-strobe-pulldown"))
+ if (of_property_read_bool(dev->of_node, "rockchip,enable-strobe-pulldown"))
rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_ENABLE;
- if (!of_property_read_u32(dev->of_node, "output-tapdelay-select", &val)) {
+ if (!of_property_read_u32(dev->of_node, "rockchip,output-tapdelay-select", &val)) {
if (val <= PHYCTRL_OTAPDLYSEL_MAXVALUE)
rk_phy->output_tapdelay_select = val;
else
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index a54317e96c41..d08fbb180e43 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -17,6 +17,7 @@
#define STM32_USBPHYC_PLL 0x0
#define STM32_USBPHYC_MISC 0x8
+#define STM32_USBPHYC_MONITOR(X) (0x108 + ((X) * 0x100))
#define STM32_USBPHYC_VERSION 0x3F4
/* STM32_USBPHYC_PLL bit fields */
@@ -32,19 +33,16 @@
/* STM32_USBPHYC_MISC bit fields */
#define SWITHOST BIT(0)
+/* STM32_USBPHYC_MONITOR bit fields */
+#define STM32_USBPHYC_MON_OUT GENMASK(3, 0)
+#define STM32_USBPHYC_MON_SEL GENMASK(8, 4)
+#define STM32_USBPHYC_MON_SEL_LOCKP 0x1F
+#define STM32_USBPHYC_MON_OUT_LOCKP BIT(3)
+
/* STM32_USBPHYC_VERSION bit fields */
#define MINREV GENMASK(3, 0)
#define MAJREV GENMASK(7, 4)
-static const char * const supplies_names[] = {
- "vdda1v1", /* 1V1 */
- "vdda1v8", /* 1V8 */
-};
-
-#define NUM_SUPPLIES ARRAY_SIZE(supplies_names)
-
-#define PLL_LOCK_TIME_US 100
-#define PLL_PWR_DOWN_TIME_US 5
#define PLL_FVCO_MHZ 2880
#define PLL_INFF_MIN_RATE_HZ 19200000
#define PLL_INFF_MAX_RATE_HZ 38400000
@@ -58,7 +56,6 @@ struct pll_params {
struct stm32_usbphyc_phy {
struct phy *phy;
struct stm32_usbphyc *usbphyc;
- struct regulator_bulk_data supplies[NUM_SUPPLIES];
u32 index;
bool active;
};
@@ -70,6 +67,9 @@ struct stm32_usbphyc {
struct reset_control *rst;
struct stm32_usbphyc_phy **phys;
int nphys;
+ struct regulator *vdda1v1;
+ struct regulator *vdda1v8;
+ atomic_t n_pll_cons;
int switch_setup;
};
@@ -83,6 +83,41 @@ static inline void stm32_usbphyc_clr_bits(void __iomem *reg, u32 bits)
writel_relaxed(readl_relaxed(reg) & ~bits, reg);
}
+static int stm32_usbphyc_regulators_enable(struct stm32_usbphyc *usbphyc)
+{
+ int ret;
+
+ ret = regulator_enable(usbphyc->vdda1v1);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(usbphyc->vdda1v8);
+ if (ret)
+ goto vdda1v1_disable;
+
+ return 0;
+
+vdda1v1_disable:
+ regulator_disable(usbphyc->vdda1v1);
+
+ return ret;
+}
+
+static int stm32_usbphyc_regulators_disable(struct stm32_usbphyc *usbphyc)
+{
+ int ret;
+
+ ret = regulator_disable(usbphyc->vdda1v8);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(usbphyc->vdda1v1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void stm32_usbphyc_get_pll_params(u32 clk_rate,
struct pll_params *pll_params)
{
@@ -142,83 +177,106 @@ static int stm32_usbphyc_pll_init(struct stm32_usbphyc *usbphyc)
return 0;
}
-static bool stm32_usbphyc_has_one_phy_active(struct stm32_usbphyc *usbphyc)
+static int __stm32_usbphyc_pll_disable(struct stm32_usbphyc *usbphyc)
{
- int i;
+ void __iomem *pll_reg = usbphyc->base + STM32_USBPHYC_PLL;
+ u32 pllen;
+
+ stm32_usbphyc_clr_bits(pll_reg, PLLEN);
- for (i = 0; i < usbphyc->nphys; i++)
- if (usbphyc->phys[i]->active)
- return true;
+ /* Wait for minimum width of powerdown pulse (ENABLE = Low) */
+ if (readl_relaxed_poll_timeout(pll_reg, pllen, !(pllen & PLLEN), 5, 50))
+ dev_err(usbphyc->dev, "PLL not reset\n");
- return false;
+ return stm32_usbphyc_regulators_disable(usbphyc);
+}
+
+static int stm32_usbphyc_pll_disable(struct stm32_usbphyc *usbphyc)
+{
+ /* Check if a phy port is still active or clk48 in use */
+ if (atomic_dec_return(&usbphyc->n_pll_cons) > 0)
+ return 0;
+
+ return __stm32_usbphyc_pll_disable(usbphyc);
}
static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
{
void __iomem *pll_reg = usbphyc->base + STM32_USBPHYC_PLL;
- bool pllen = (readl_relaxed(pll_reg) & PLLEN);
+ bool pllen = readl_relaxed(pll_reg) & PLLEN;
int ret;
- /* Check if one phy port has already configured the pll */
- if (pllen && stm32_usbphyc_has_one_phy_active(usbphyc))
+ /*
+ * Check if a phy port or clk48 prepare has configured the pll
+ * and ensure the PLL is enabled
+ */
+ if (atomic_inc_return(&usbphyc->n_pll_cons) > 1 && pllen)
return 0;
if (pllen) {
- stm32_usbphyc_clr_bits(pll_reg, PLLEN);
- /* Wait for minimum width of powerdown pulse (ENABLE = Low) */
- udelay(PLL_PWR_DOWN_TIME_US);
+ /*
+ * PLL shouldn't be enabled without known consumer,
+ * disable it and reinit n_pll_cons
+ */
+ dev_warn(usbphyc->dev, "PLL enabled without known consumers\n");
+
+ ret = __stm32_usbphyc_pll_disable(usbphyc);
+ if (ret)
+ return ret;
}
+ ret = stm32_usbphyc_regulators_enable(usbphyc);
+ if (ret)
+ goto dec_n_pll_cons;
+
ret = stm32_usbphyc_pll_init(usbphyc);
if (ret)
- return ret;
+ goto reg_disable;
stm32_usbphyc_set_bits(pll_reg, PLLEN);
- /* Wait for maximum lock time */
- udelay(PLL_LOCK_TIME_US);
-
- if (!(readl_relaxed(pll_reg) & PLLEN)) {
- dev_err(usbphyc->dev, "PLLEN not set\n");
- return -EIO;
- }
-
return 0;
-}
-
-static int stm32_usbphyc_pll_disable(struct stm32_usbphyc *usbphyc)
-{
- void __iomem *pll_reg = usbphyc->base + STM32_USBPHYC_PLL;
- /* Check if other phy port active */
- if (stm32_usbphyc_has_one_phy_active(usbphyc))
- return 0;
-
- stm32_usbphyc_clr_bits(pll_reg, PLLEN);
- /* Wait for minimum width of powerdown pulse (ENABLE = Low) */
- udelay(PLL_PWR_DOWN_TIME_US);
+reg_disable:
+ stm32_usbphyc_regulators_disable(usbphyc);
- if (readl_relaxed(pll_reg) & PLLEN) {
- dev_err(usbphyc->dev, "PLL not reset\n");
- return -EIO;
- }
+dec_n_pll_cons:
+ atomic_dec(&usbphyc->n_pll_cons);
- return 0;
+ return ret;
}
static int stm32_usbphyc_phy_init(struct phy *phy)
{
struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
struct stm32_usbphyc *usbphyc = usbphyc_phy->usbphyc;
+ u32 reg_mon = STM32_USBPHYC_MONITOR(usbphyc_phy->index);
+ u32 monsel = FIELD_PREP(STM32_USBPHYC_MON_SEL,
+ STM32_USBPHYC_MON_SEL_LOCKP);
+ u32 monout;
int ret;
ret = stm32_usbphyc_pll_enable(usbphyc);
if (ret)
return ret;
+ /* Check that PLL Lock input to PHY is High */
+ writel_relaxed(monsel, usbphyc->base + reg_mon);
+ ret = readl_relaxed_poll_timeout(usbphyc->base + reg_mon, monout,
+ (monout & STM32_USBPHYC_MON_OUT_LOCKP),
+ 100, 1000);
+ if (ret) {
+ dev_err(usbphyc->dev, "PLL Lock input to PHY is Low (val=%x)\n",
+ (u32)(monout & STM32_USBPHYC_MON_OUT));
+ goto pll_disable;
+ }
+
usbphyc_phy->active = true;
return 0;
+
+pll_disable:
+ return stm32_usbphyc_pll_disable(usbphyc);
}
static int stm32_usbphyc_phy_exit(struct phy *phy)
@@ -231,25 +289,9 @@ static int stm32_usbphyc_phy_exit(struct phy *phy)
return stm32_usbphyc_pll_disable(usbphyc);
}
-static int stm32_usbphyc_phy_power_on(struct phy *phy)
-{
- struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
-
- return regulator_bulk_enable(NUM_SUPPLIES, usbphyc_phy->supplies);
-}
-
-static int stm32_usbphyc_phy_power_off(struct phy *phy)
-{
- struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
-
- return regulator_bulk_disable(NUM_SUPPLIES, usbphyc_phy->supplies);
-}
-
static const struct phy_ops stm32_usbphyc_phy_ops = {
.init = stm32_usbphyc_phy_init,
.exit = stm32_usbphyc_phy_exit,
- .power_on = stm32_usbphyc_phy_power_on,
- .power_off = stm32_usbphyc_phy_power_off,
.owner = THIS_MODULE,
};
@@ -312,7 +354,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
struct phy_provider *phy_provider;
- u32 version;
+ u32 pllen, version;
int ret, port = 0;
usbphyc = devm_kzalloc(dev, sizeof(*usbphyc), GFP_KERNEL);
@@ -344,6 +386,19 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
ret = PTR_ERR(usbphyc->rst);
if (ret == -EPROBE_DEFER)
goto clk_disable;
+
+ stm32_usbphyc_clr_bits(usbphyc->base + STM32_USBPHYC_PLL, PLLEN);
+ }
+
+ /*
+ * Wait for minimum width of powerdown pulse (ENABLE = Low):
+ * we have to ensure the PLL is disabled before phys initialization.
+ */
+ if (readl_relaxed_poll_timeout(usbphyc->base + STM32_USBPHYC_PLL,
+ pllen, !(pllen & PLLEN), 5, 50)) {
+ dev_warn(usbphyc->dev, "PLL not reset\n");
+ ret = -EPROBE_DEFER;
+ goto clk_disable;
}
usbphyc->switch_setup = -EINVAL;
@@ -355,11 +410,26 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ usbphyc->vdda1v1 = devm_regulator_get(dev, "vdda1v1");
+ if (IS_ERR(usbphyc->vdda1v1)) {
+ ret = PTR_ERR(usbphyc->vdda1v1);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get vdda1v1 supply: %d\n", ret);
+ goto clk_disable;
+ }
+
+ usbphyc->vdda1v8 = devm_regulator_get(dev, "vdda1v8");
+ if (IS_ERR(usbphyc->vdda1v8)) {
+ ret = PTR_ERR(usbphyc->vdda1v8);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get vdda1v8 supply: %d\n", ret);
+ goto clk_disable;
+ }
+
for_each_child_of_node(np, child) {
struct stm32_usbphyc_phy *usbphyc_phy;
struct phy *phy;
u32 index;
- int i;
phy = devm_phy_create(dev, child, &stm32_usbphyc_phy_ops);
if (IS_ERR(phy)) {
@@ -377,18 +447,6 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
goto put_child;
}
- for (i = 0; i < NUM_SUPPLIES; i++)
- usbphyc_phy->supplies[i].supply = supplies_names[i];
-
- ret = devm_regulator_bulk_get(&phy->dev, NUM_SUPPLIES,
- usbphyc_phy->supplies);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&phy->dev,
- "failed to get regulators: %d\n", ret);
- goto put_child;
- }
-
ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
@@ -432,6 +490,12 @@ clk_disable:
static int stm32_usbphyc_remove(struct platform_device *pdev)
{
struct stm32_usbphyc *usbphyc = dev_get_drvdata(&pdev->dev);
+ int port;
+
+ /* Ensure PHYs are not active, to allow PLL disabling */
+ for (port = 0; port < usbphyc->nphys; port++)
+ if (usbphyc->phys[port]->active)
+ stm32_usbphyc_phy_exit(usbphyc->phys[port]->phy);
clk_disable_unprepare(usbphyc->clk);
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 2b0f921b6ee3..2b65f84a5f89 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -874,13 +874,10 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
snprintf(name, sizeof(name), "ref%u", refclk);
clk = devm_clk_get_optional(gtr_dev->dev, name);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(gtr_dev->dev,
- "Failed to get reference clock %u: %ld\n",
- refclk, PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+ "Failed to get reference clock %u\n",
+ refclk);
if (!clk)
continue;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d4b2f2e2ed75..b7675cce0027 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -226,14 +226,6 @@ config PINCTRL_SINGLE
help
This selects the device tree based generic pinctrl driver.
-config PINCTRL_SIRF
- bool "CSR SiRFprimaII pin controller driver"
- depends on ARCH_SIRF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
-
config PINCTRL_SX150X
bool "Semtech SX150x I2C GPIO expander pinctrl driver"
depends on I2C=y
@@ -279,22 +271,6 @@ config PINCTRL_STMFX
and configuring push-pull, open-drain, and can also be used as
interrupt-controller.
-config PINCTRL_U300
- bool "U300 pin controller driver"
- depends on ARCH_U300
- select PINMUX
- select GENERIC_PINCONF
-
-config PINCTRL_COH901
- bool "ST-Ericsson U300 COH 901 335/571 GPIO"
- depends on GPIOLIB && ARCH_U300 && PINCTRL_U300
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support GPIO interface on ST-Ericsson U300.
- The names of the two IP block variants supported are
- COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
- ports of 8 GPIO pins each.
-
config PINCTRL_MAX77620
tristate "MAX77620/MAX20024 Pincontrol support"
depends on MFD_MAX77620 && OF
@@ -394,6 +370,19 @@ config PINCTRL_MICROCHIP_SGPIO
connect control signals from SFP modules and to act as an
LED controller.
+config PINCTRL_K210
+ bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
+ depends on RISCV && SOC_CANAAN && OF
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+ default SOC_CANAAN
+ help
+ Add support for the Canaan Kendryte K210 RISC-V SOC Field
+ Programmable IO Array (FPIOA) controller.
+
source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
@@ -417,7 +406,6 @@ source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
-source "drivers/pinctrl/zte/Kconfig"
source "drivers/pinctrl/meson/Kconfig"
source "drivers/pinctrl/cirrus/Kconfig"
source "drivers/pinctrl/visconti/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 5bb9bb6cc3ce..8bf459c32a76 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -31,11 +31,8 @@ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
-obj-$(CONFIG_PINCTRL_SIRF) += sirf/
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
-obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
@@ -48,6 +45,7 @@ obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
+obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
@@ -71,6 +69,5 @@ obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-y += mediatek/
-obj-$(CONFIG_PINCTRL_ZX) += zte/
obj-y += cirrus/
obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
index a1d16e8280e5..119f0e471efd 100644
--- a/drivers/pinctrl/actions/Kconfig
+++ b/drivers/pinctrl/actions/Kconfig
@@ -12,18 +12,21 @@ config PINCTRL_OWL
config PINCTRL_S500
bool "Actions Semi S500 pinctrl driver"
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_OWL
help
Say Y here to enable Actions Semi S500 pinctrl driver
config PINCTRL_S700
bool "Actions Semi S700 pinctrl driver"
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_OWL
help
Say Y here to enable Actions Semi S700 pinctrl driver
config PINCTRL_S900
bool "Actions Semi S900 pinctrl driver"
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_OWL
help
Say Y here to enable Actions Semi S900 pinctrl driver
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 903a4baf3846..c8b3e396ea27 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -444,7 +444,6 @@ static int owl_group_config_get(struct pinctrl_dev *pctrldev,
*config = pinconf_to_config_packed(param, arg);
return ret;
-
}
static int owl_group_config_set(struct pinctrl_dev *pctrldev,
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 34803a6c7664..5c1a109842a7 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
GROUP_DECL(PWM8G0, D22);
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 57044ab376d3..0fe4a1fcdf00 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -853,7 +853,7 @@ static int ns2_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
- if ((pull_up == false) && (pull_down == false))
+ if (!pull_up && !pull_down)
return 0;
else
return -EINVAL;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 9fc4433fece4..7d3370289938 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2117,7 +2117,6 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
return ERR_PTR(error);
return pctldev;
-
}
EXPORT_SYMBOL_GPL(pinctrl_register);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 08d110078c43..70186448d2f4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -290,7 +290,6 @@ static const struct pinctrl_ops imx1_pctrl_ops = {
.pin_dbg_show = imx1_pin_dbg_show,
.dt_node_to_map = imx1_dt_node_to_map,
.dt_free_map = imx1_dt_free_map,
-
};
static int imx1_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index b6ef1911c1dd..8085782cd8f9 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -29,6 +29,16 @@
#define REVID_SHIFT 16
#define REVID_MASK GENMASK(31, 16)
+#define CAPLIST 0x004
+#define CAPLIST_ID_SHIFT 16
+#define CAPLIST_ID_MASK GENMASK(23, 16)
+#define CAPLIST_ID_GPIO_HW_INFO 1
+#define CAPLIST_ID_PWM 2
+#define CAPLIST_ID_BLINK 3
+#define CAPLIST_ID_EXP 4
+#define CAPLIST_NEXT_SHIFT 0
+#define CAPLIST_NEXT_MASK GENMASK(15, 0)
+
#define PADBAR 0x00c
#define PADOWN_BITS 4
@@ -1321,34 +1331,19 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return 0;
}
-static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
- struct intel_community *community)
+static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
+ struct intel_community *community)
{
struct intel_padgroup *gpps;
- unsigned int npins = community->npins;
unsigned int padown_num = 0;
- size_t ngpps, i;
-
- if (community->gpps)
- ngpps = community->ngpps;
- else
- ngpps = DIV_ROUND_UP(community->npins, community->gpp_size);
+ size_t i, ngpps = community->ngpps;
gpps = devm_kcalloc(pctrl->dev, ngpps, sizeof(*gpps), GFP_KERNEL);
if (!gpps)
return -ENOMEM;
for (i = 0; i < ngpps; i++) {
- if (community->gpps) {
- gpps[i] = community->gpps[i];
- } else {
- unsigned int gpp_size = community->gpp_size;
-
- gpps[i].reg_num = i;
- gpps[i].base = community->pin_base + i * gpp_size;
- gpps[i].size = min(gpp_size, npins);
- npins -= gpps[i].size;
- }
+ gpps[i] = community->gpps[i];
if (gpps[i].size > 32)
return -EINVAL;
@@ -1367,6 +1362,38 @@ static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
}
gpps[i].padown_num = padown_num;
+ padown_num += DIV_ROUND_UP(gpps[i].size * 4, 32);
+ }
+
+ community->gpps = gpps;
+
+ return 0;
+}
+
+static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
+ struct intel_community *community)
+{
+ struct intel_padgroup *gpps;
+ unsigned int npins = community->npins;
+ unsigned int padown_num = 0;
+ size_t i, ngpps = DIV_ROUND_UP(npins, community->gpp_size);
+
+ if (community->gpp_size > 32)
+ return -EINVAL;
+
+ gpps = devm_kcalloc(pctrl->dev, ngpps, sizeof(*gpps), GFP_KERNEL);
+ if (!gpps)
+ return -ENOMEM;
+
+ for (i = 0; i < ngpps; i++) {
+ unsigned int gpp_size = community->gpp_size;
+
+ gpps[i].reg_num = i;
+ gpps[i].base = community->pin_base + i * gpp_size;
+ gpps[i].size = min(gpp_size, npins);
+ npins -= gpps[i].size;
+
+ gpps[i].padown_num = padown_num;
/*
* In older hardware the number of padown registers per
@@ -1455,7 +1482,8 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
void __iomem *regs;
- u32 padbar;
+ u32 offset;
+ u32 value;
*community = pctrl->soc->communities[i];
@@ -1463,27 +1491,48 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
if (IS_ERR(regs))
return PTR_ERR(regs);
- /*
- * Determine community features based on the revision if
- * not specified already.
- */
- if (!community->features) {
- u32 rev;
+ /* Determine community features based on the revision */
+ value = readl(regs + REVID);
+ if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
+ community->features |= PINCTRL_FEATURE_DEBOUNCE;
+ community->features |= PINCTRL_FEATURE_1K_PD;
+ }
- rev = (readl(regs + REVID) & REVID_MASK) >> REVID_SHIFT;
- if (rev >= 0x94) {
- community->features |= PINCTRL_FEATURE_DEBOUNCE;
- community->features |= PINCTRL_FEATURE_1K_PD;
+ /* Determine community features based on the capabilities */
+ offset = CAPLIST;
+ do {
+ value = readl(regs + offset);
+ switch ((value & CAPLIST_ID_MASK) >> CAPLIST_ID_SHIFT) {
+ case CAPLIST_ID_GPIO_HW_INFO:
+ community->features |= PINCTRL_FEATURE_GPIO_HW_INFO;
+ break;
+ case CAPLIST_ID_PWM:
+ community->features |= PINCTRL_FEATURE_PWM;
+ break;
+ case CAPLIST_ID_BLINK:
+ community->features |= PINCTRL_FEATURE_BLINK;
+ break;
+ case CAPLIST_ID_EXP:
+ community->features |= PINCTRL_FEATURE_EXP;
+ break;
+ default:
+ break;
}
- }
+ offset = (value & CAPLIST_NEXT_MASK) >> CAPLIST_NEXT_SHIFT;
+ } while (offset);
+
+ dev_dbg(&pdev->dev, "Community%d features: %#08x\n", i, community->features);
/* Read offset of the pad configuration registers */
- padbar = readl(regs + PADBAR);
+ offset = readl(regs + PADBAR);
community->regs = regs;
- community->pad_regs = regs + padbar;
+ community->pad_regs = regs + offset;
- ret = intel_pinctrl_add_padgroups(pctrl, community);
+ if (community->gpps)
+ ret = intel_pinctrl_add_padgroups_by_gpps(pctrl, community);
+ else
+ ret = intel_pinctrl_add_padgroups_by_size(pctrl, community);
if (ret)
return ret;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index ad34b7a3f6ed..c4fef03b663f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -143,6 +143,10 @@ struct intel_community {
/* Additional features supported by the hardware */
#define PINCTRL_FEATURE_DEBOUNCE BIT(0)
#define PINCTRL_FEATURE_1K_PD BIT(1)
+#define PINCTRL_FEATURE_GPIO_HW_INFO BIT(2)
+#define PINCTRL_FEATURE_PWM BIT(3)
+#define PINCTRL_FEATURE_BLINK BIT(4)
+#define PINCTRL_FEATURE_EXP BIT(5)
/**
* PIN_GROUP - Declare a pin group
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 3e354e02f408..75b6d66955bf 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -748,6 +748,7 @@ static const struct intel_pinctrl_soc_data tglh_soc_data = {
static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ "INT34C6", (kernel_ulong_t)&tglh_soc_data },
+ { "INTC1055", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 22736f60c16c..3b9b5dbd7968 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -157,6 +157,7 @@ static void mtk_eint_ack(struct irq_data *d)
static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ bool masked;
u32 mask = BIT(d->hwirq & 0x1f);
void __iomem *reg;
@@ -173,6 +174,13 @@ static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
else
eint->dual_edge[d->hwirq] = 0;
+ if (!mtk_eint_get_mask(eint, d->hwirq)) {
+ mtk_eint_mask(d);
+ masked = false;
+ } else {
+ masked = true;
+ }
+
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
writel(mask, reg);
@@ -189,8 +197,9 @@ static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
writel(mask, reg);
}
- if (eint->dual_edge[d->hwirq])
- mtk_eint_flip_edge(eint, d->hwirq);
+ mtk_eint_ack(d);
+ if (!masked)
+ mtk_eint_unmask(d);
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 5e00f93ac998..0fa7de43bc4c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -514,8 +514,8 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
chip->set = mtk_gpio_set;
- chip->to_irq = mtk_gpio_to_irq,
- chip->set_config = mtk_gpio_set_config,
+ chip->to_irq = mtk_gpio_to_irq;
+ chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
chip->of_node = np;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 7aeb552d16ce..72f17f26acd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
+ } else if (hw->soc->bias_set_combo) {
+ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+ if (err)
+ return err;
} else {
return -ENOTSUPP;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 039ce9be19c5..da1f19288aa6 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -891,8 +891,8 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
chip->set = mtk_gpio_set;
- chip->to_irq = mtk_gpio_to_irq,
- chip->set_config = mtk_gpio_set_config,
+ chip->to_irq = mtk_gpio_to_irq;
+ chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
chip->of_node = np;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 68894e9e05d2..5a68e242f6b3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -188,7 +188,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
-
};
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index d4ea10803fd9..abfe11c7b49f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
} else {
int irq = chip->to_irq(chip, offset);
const int pullidx = pull ? 1 : 0;
- bool wake;
int val;
static const char * const pulls[] = {
"none ",
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 6de31b5ee358..2535ca720668 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -923,7 +923,7 @@ struct npcm7xx_pincfg {
};
static const struct npcm7xx_pincfg pincfg[] = {
- /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */
+ /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */
NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 36c6078b93b3..e71ebccc479c 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -36,6 +36,7 @@
#define ATMEL_PIO_DIR_MASK BIT(8)
#define ATMEL_PIO_PUEN_MASK BIT(9)
#define ATMEL_PIO_PDEN_MASK BIT(10)
+#define ATMEL_PIO_SR_MASK BIT(11)
#define ATMEL_PIO_IFEN_MASK BIT(12)
#define ATMEL_PIO_IFSCEN_MASK BIT(13)
#define ATMEL_PIO_OPD_MASK BIT(14)
@@ -76,10 +77,12 @@
* @nbanks: number of PIO banks
* @last_bank_count: number of lines in the last bank (can be less than
* the rest of the banks).
+ * @slew_rate_support: slew rate support
*/
struct atmel_pioctrl_data {
- unsigned nbanks;
- unsigned last_bank_count;
+ unsigned int nbanks;
+ unsigned int last_bank_count;
+ unsigned int slew_rate_support;
};
struct atmel_group {
@@ -88,11 +91,11 @@ struct atmel_group {
};
struct atmel_pin {
- unsigned pin_id;
- unsigned mux;
- unsigned ioset;
- unsigned bank;
- unsigned line;
+ unsigned int pin_id;
+ unsigned int mux;
+ unsigned int ioset;
+ unsigned int bank;
+ unsigned int line;
const char *device;
};
@@ -117,20 +120,21 @@ struct atmel_pin {
* @pm_suspend_backup: backup/restore register values on suspend/resume
* @dev: device entry for the Atmel PIO controller.
* @node: node of the Atmel PIO controller.
+ * @slew_rate_support: slew rate support
*/
struct atmel_pioctrl {
void __iomem *reg_base;
struct clk *clk;
- unsigned nbanks;
+ unsigned int nbanks;
struct pinctrl_dev *pinctrl_dev;
struct atmel_group *groups;
const char * const *group_names;
struct atmel_pin **pins;
- unsigned npins;
+ unsigned int npins;
struct gpio_chip *gpio_chip;
struct irq_domain *irq_domain;
int *irqs;
- unsigned *pm_wakeup_sources;
+ unsigned int *pm_wakeup_sources;
struct {
u32 imr;
u32 odsr;
@@ -138,6 +142,7 @@ struct atmel_pioctrl {
} *pm_suspend_backup;
struct device *dev;
struct device_node *node;
+ unsigned int slew_rate_support;
};
static const char * const atmel_functions[] = {
@@ -172,11 +177,11 @@ static void atmel_gpio_irq_ack(struct irq_data *d)
*/
}
-static int atmel_gpio_irq_set_type(struct irq_data *d, unsigned type)
+static int atmel_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
- unsigned reg;
+ unsigned int reg;
atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
BIT(pin->line));
@@ -263,7 +268,7 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_set_wake = atmel_gpio_irq_set_wake,
};
-static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
@@ -311,11 +316,12 @@ static void atmel_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int atmel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int atmel_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
- unsigned reg;
+ unsigned int reg;
atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
BIT(pin->line));
@@ -326,11 +332,11 @@ static int atmel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int atmel_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
- unsigned reg;
+ unsigned int reg;
reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_PDSR);
@@ -364,12 +370,13 @@ static int atmel_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
-static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int atmel_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset,
int value)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
- unsigned reg;
+ unsigned int reg;
atmel_gpio_write(atmel_pioctrl, pin->bank,
value ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
@@ -384,7 +391,7 @@ static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
@@ -440,11 +447,11 @@ static struct gpio_chip atmel_gpio_chip = {
/* --- PINCTRL --- */
static unsigned int atmel_pin_config_read(struct pinctrl_dev *pctldev,
- unsigned pin_id)
+ unsigned int pin_id)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
- unsigned line = atmel_pioctrl->pins[pin_id]->line;
+ unsigned int bank = atmel_pioctrl->pins[pin_id]->bank;
+ unsigned int line = atmel_pioctrl->pins[pin_id]->line;
void __iomem *addr = atmel_pioctrl->reg_base
+ bank * ATMEL_PIO_BANK_OFFSET;
@@ -456,11 +463,11 @@ static unsigned int atmel_pin_config_read(struct pinctrl_dev *pctldev,
}
static void atmel_pin_config_write(struct pinctrl_dev *pctldev,
- unsigned pin_id, u32 conf)
+ unsigned int pin_id, u32 conf)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
- unsigned line = atmel_pioctrl->pins[pin_id]->line;
+ unsigned int bank = atmel_pioctrl->pins[pin_id]->bank;
+ unsigned int line = atmel_pioctrl->pins[pin_id]->line;
void __iomem *addr = atmel_pioctrl->reg_base
+ bank * ATMEL_PIO_BANK_OFFSET;
@@ -478,7 +485,7 @@ static int atmel_pctl_get_groups_count(struct pinctrl_dev *pctldev)
}
static const char *atmel_pctl_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
+ unsigned int selector)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -486,19 +493,20 @@ static const char *atmel_pctl_get_group_name(struct pinctrl_dev *pctldev,
}
static int atmel_pctl_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned selector, const unsigned **pins,
- unsigned *num_pins)
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = (unsigned *)&atmel_pioctrl->groups[selector].pin;
+ *pins = (unsigned int *)&atmel_pioctrl->groups[selector].pin;
*num_pins = 1;
return 0;
}
static struct atmel_group *
-atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev, unsigned pin)
+atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
int i;
@@ -519,7 +527,7 @@ static int atmel_pctl_xlate_pinfunc(struct pinctrl_dev *pctldev,
const char **func_name)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned pin_id, func_id;
+ unsigned int pin_id, func_id;
struct atmel_group *grp;
pin_id = ATMEL_GET_PIN_NO(pinfunc);
@@ -549,10 +557,10 @@ static int atmel_pctl_xlate_pinfunc(struct pinctrl_dev *pctldev,
static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps)
+ unsigned int *reserved_maps,
+ unsigned int *num_maps)
{
- unsigned num_pins, num_configs, reserve;
+ unsigned int num_pins, num_configs, reserve;
unsigned long *configs;
struct property *pins;
u32 pinfunc;
@@ -623,10 +631,10 @@ exit:
static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config,
struct pinctrl_map **map,
- unsigned *num_maps)
+ unsigned int *num_maps)
{
struct device_node *np;
- unsigned reserved_maps;
+ unsigned int reserved_maps;
int ret;
*map = NULL;
@@ -674,13 +682,13 @@ static int atmel_pmx_get_functions_count(struct pinctrl_dev *pctldev)
}
static const char *atmel_pmx_get_function_name(struct pinctrl_dev *pctldev,
- unsigned selector)
+ unsigned int selector)
{
return atmel_functions[selector];
}
static int atmel_pmx_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
const char * const **groups,
unsigned * const num_groups)
{
@@ -693,11 +701,11 @@ static int atmel_pmx_get_function_groups(struct pinctrl_dev *pctldev,
}
static int atmel_pmx_set_mux(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+ unsigned int function,
+ unsigned int group)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned pin;
+ unsigned int pin;
u32 conf;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
@@ -721,13 +729,13 @@ static const struct pinmux_ops atmel_pmxops = {
};
static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
- unsigned group,
+ unsigned int group,
unsigned long *config)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned param = pinconf_to_config_param(*config), arg = 0;
+ unsigned int param = pinconf_to_config_param(*config), arg = 0;
struct atmel_group *grp = atmel_pioctrl->groups + group;
- unsigned pin_id = grp->pin;
+ unsigned int pin_id = grp->pin;
u32 res;
res = atmel_pin_config_read(pctldev, pin_id);
@@ -760,6 +768,13 @@ static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (!atmel_pioctrl->slew_rate_support)
+ return -EOPNOTSUPP;
+ if (!(res & ATMEL_PIO_SR_MASK))
+ return -EINVAL;
+ arg = 1;
+ break;
case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
if (!(res & ATMEL_PIO_DRVSTR_MASK))
return -EINVAL;
@@ -774,25 +789,29 @@ static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
}
static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
- unsigned group,
+ unsigned int group,
unsigned long *configs,
- unsigned num_configs)
+ unsigned int num_configs)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
struct atmel_group *grp = atmel_pioctrl->groups + group;
- unsigned bank, pin, pin_id = grp->pin;
+ unsigned int bank, pin, pin_id = grp->pin;
u32 mask, conf = 0;
int i;
conf = atmel_pin_config_read(pctldev, pin_id);
for (i = 0; i < num_configs; i++) {
- unsigned param = pinconf_to_config_param(configs[i]);
- unsigned arg = pinconf_to_config_argument(configs[i]);
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
__func__, pin_id, configs[i]);
+ /* Keep slew rate enabled by default. */
+ if (atmel_pioctrl->slew_rate_support)
+ conf |= ATMEL_PIO_SR_MASK;
+
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
conf &= (~ATMEL_PIO_PUEN_MASK);
@@ -850,6 +869,13 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
ATMEL_PIO_SODR);
}
break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (!atmel_pioctrl->slew_rate_support)
+ break;
+ /* And remove it if explicitly requested. */
+ if (arg == 0)
+ conf &= ~ATMEL_PIO_SR_MASK;
+ break;
case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
switch (arg) {
case ATMEL_PIO_DRVSTR_LO:
@@ -877,7 +903,8 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
}
static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned pin_id)
+ struct seq_file *s,
+ unsigned int pin_id)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
u32 conf;
@@ -901,6 +928,8 @@ static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "%s ", "open-drain");
if (conf & ATMEL_PIO_SCHMITT_MASK)
seq_printf(s, "%s ", "schmitt");
+ if (atmel_pioctrl->slew_rate_support && (conf & ATMEL_PIO_SR_MASK))
+ seq_printf(s, "%s ", "slew-rate");
if (conf & ATMEL_PIO_DRVSTR_MASK) {
switch ((conf & ATMEL_PIO_DRVSTR_MASK) >> ATMEL_PIO_DRVSTR_OFFSET) {
case ATMEL_PIO_DRVSTR_ME:
@@ -994,6 +1023,7 @@ static const struct atmel_pioctrl_data atmel_sama5d2_pioctrl_data = {
static const struct atmel_pioctrl_data microchip_sama7g5_pioctrl_data = {
.nbanks = 5,
.last_bank_count = 8, /* sama7g5 has only PE0 to PE7 */
+ .slew_rate_support = 1,
};
static const struct of_device_id atmel_pctrl_of_match[] = {
@@ -1039,6 +1069,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->npins -= ATMEL_PIO_NPINS_PER_BANK;
atmel_pioctrl->npins += atmel_pioctrl_data->last_bank_count;
}
+ atmel_pioctrl->slew_rate_support = atmel_pioctrl_data->slew_rate_support;
atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
@@ -1081,8 +1112,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0 ; i < atmel_pioctrl->npins; i++) {
struct atmel_group *group = atmel_pioctrl->groups + i;
- unsigned bank = ATMEL_PIO_BANK(i);
- unsigned line = ATMEL_PIO_LINE(i);
+ unsigned int bank = ATMEL_PIO_BANK(i);
+ unsigned int line = ATMEL_PIO_LINE(i);
atmel_pioctrl->pins[i] = devm_kzalloc(dev,
sizeof(**atmel_pioctrl->pins), GFP_KERNEL);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 72edc675431c..8003d1bd1695 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -733,7 +733,6 @@ static const struct at91_pinctrl_mux_ops sam9x60_ops = {
.get_slewrate = at91_mux_sam9x60_get_slewrate,
.set_slewrate = at91_mux_sam9x60_set_slewrate,
.irq_type = alt_gpio_irq_type,
-
};
static struct at91_pinctrl_mux_ops sama5d3_ops = {
@@ -1742,7 +1741,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
gpio_irqchip->irq_disable = gpio_irq_mask;
gpio_irqchip->irq_mask = gpio_irq_mask;
gpio_irqchip->irq_unmask = gpio_irq_unmask;
- gpio_irqchip->irq_set_wake = gpio_irq_set_wake,
+ gpio_irqchip->irq_set_wake = gpio_irq_set_wake;
gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
deleted file mode 100644
index 2905348ff430..000000000000
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ /dev/null
@@ -1,774 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * U300 GPIO module.
- *
- * Copyright (C) 2007-2012 ST-Ericsson AB
- * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
- * Author: Linus Walleij <linus.walleij@linaro.org>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include "pinctrl-coh901.h"
-
-#define U300_GPIO_PORT_STRIDE (0x30)
-/*
- * Control Register 32bit (R/W)
- * bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
- * gives the number of GPIO pins.
- * bit 8-2 (mask 0x000001FC) contains the core version ID.
- */
-#define U300_GPIO_CR (0x00)
-#define U300_GPIO_CR_SYNC_SEL_ENABLE (0x00000002UL)
-#define U300_GPIO_CR_BLOCK_CLKRQ_ENABLE (0x00000001UL)
-#define U300_GPIO_PXPDIR (0x04)
-#define U300_GPIO_PXPDOR (0x08)
-#define U300_GPIO_PXPCR (0x0C)
-#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK (0x0000FFFFUL)
-#define U300_GPIO_PXPCR_PIN_MODE_MASK (0x00000003UL)
-#define U300_GPIO_PXPCR_PIN_MODE_SHIFT (0x00000002UL)
-#define U300_GPIO_PXPCR_PIN_MODE_INPUT (0x00000000UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL (0x00000001UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN (0x00000002UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE (0x00000003UL)
-#define U300_GPIO_PXPER (0x10)
-#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK (0x000000FFUL)
-#define U300_GPIO_PXPER_PULL_UP_DISABLE (0x00000001UL)
-#define U300_GPIO_PXIEV (0x14)
-#define U300_GPIO_PXIEN (0x18)
-#define U300_GPIO_PXIFR (0x1C)
-#define U300_GPIO_PXICR (0x20)
-#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK (0x000000FFUL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_MASK (0x00000001UL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE (0x00000000UL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE (0x00000001UL)
-
-/* 8 bits per port, no version has more than 7 ports */
-#define U300_GPIO_NUM_PORTS 7
-#define U300_GPIO_PINS_PER_PORT 8
-#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * U300_GPIO_NUM_PORTS)
-
-struct u300_gpio_port {
- struct u300_gpio *gpio;
- char name[8];
- int irq;
- int number;
- u8 toggle_edge_mode;
-};
-
-struct u300_gpio {
- struct gpio_chip chip;
- struct u300_gpio_port ports[U300_GPIO_NUM_PORTS];
- struct clk *clk;
- void __iomem *base;
- struct device *dev;
- u32 stride;
- /* Register offsets */
- u32 pcr;
- u32 dor;
- u32 dir;
- u32 per;
- u32 icr;
- u32 ien;
- u32 iev;
-};
-
-/*
- * Macro to expand to read a specific register found in the "gpio"
- * struct. It requires the struct u300_gpio *gpio variable to exist in
- * its context. It calculates the port offset from the given pin
- * offset, muliplies by the port stride and adds the register offset
- * so it provides a pointer to the desired register.
- */
-#define U300_PIN_REG(pin, reg) \
- (gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
-
-/*
- * Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
- * register.
- */
-#define U300_PIN_BIT(pin) \
- (1 << (pin & 0x07))
-
-struct u300_gpio_confdata {
- u16 bias_mode;
- bool output;
- int outval;
-};
-
-#define U300_FLOATING_INPUT { \
- .bias_mode = PIN_CONFIG_BIAS_HIGH_IMPEDANCE, \
- .output = false, \
-}
-
-#define U300_PULL_UP_INPUT { \
- .bias_mode = PIN_CONFIG_BIAS_PULL_UP, \
- .output = false, \
-}
-
-#define U300_OUTPUT_LOW { \
- .output = true, \
- .outval = 0, \
-}
-
-#define U300_OUTPUT_HIGH { \
- .output = true, \
- .outval = 1, \
-}
-
-/* Initial configuration */
-static const struct u300_gpio_confdata __initconst
-bs335_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
- /* Port 0, pins 0-7 */
- {
- U300_FLOATING_INPUT,
- U300_OUTPUT_HIGH,
- U300_FLOATING_INPUT,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- },
- /* Port 1, pins 0-7 */
- {
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- U300_PULL_UP_INPUT,
- U300_FLOATING_INPUT,
- U300_OUTPUT_HIGH,
- U300_OUTPUT_LOW,
- U300_OUTPUT_LOW,
- },
- /* Port 2, pins 0-7 */
- {
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_OUTPUT_LOW,
- U300_PULL_UP_INPUT,
- U300_OUTPUT_LOW,
- U300_PULL_UP_INPUT,
- },
- /* Port 3, pins 0-7 */
- {
- U300_PULL_UP_INPUT,
- U300_OUTPUT_LOW,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- },
- /* Port 4, pins 0-7 */
- {
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- },
- /* Port 5, pins 0-7 */
- {
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- },
- /* Port 6, pind 0-7 */
- {
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- U300_FLOATING_INPUT,
- }
-};
-
-static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
-
- return !!(readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset));
-}
-
-static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
-
- val = readl(U300_PIN_REG(offset, dor));
- if (value)
- writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
- else
- writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
-
- local_irq_restore(flags);
-}
-
-static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = readl(U300_PIN_REG(offset, pcr));
- /* Mask out this pin, note 2 bits per setting */
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
- writel(val, U300_PIN_REG(offset, pcr));
- local_irq_restore(flags);
- return 0;
-}
-
-static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- u32 oldmode;
- u32 val;
-
- local_irq_save(flags);
- val = readl(U300_PIN_REG(offset, pcr));
- /*
- * Drive mode must be set by the special mode set function, set
- * push/pull mode by default if no mode has been selected.
- */
- oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
- ((offset & 0x07) << 1));
- /* mode = 0 means input, else some mode is already set */
- if (oldmode == 0) {
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
- ((offset & 0x07) << 1));
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
- << ((offset & 0x07) << 1));
- writel(val, U300_PIN_REG(offset, pcr));
- }
- u300_gpio_set(chip, offset, value);
- local_irq_restore(flags);
- return 0;
-}
-
-/* Returning -EINVAL means "supported but not available" */
-int u300_gpio_config_get(struct gpio_chip *chip,
- unsigned offset,
- unsigned long *config)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- enum pin_config_param param = (enum pin_config_param) *config;
- bool biasmode;
- u32 drmode;
-
- /* One bit per pin, clamp to bool range */
- biasmode = !!(readl(U300_PIN_REG(offset, per)) & U300_PIN_BIT(offset));
-
- /* Mask out the two bits for this pin and shift to bits 0,1 */
- drmode = readl(U300_PIN_REG(offset, pcr));
- drmode &= (U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
- drmode >>= ((offset & 0x07) << 1);
-
- switch (param) {
- case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- *config = 0;
- if (biasmode)
- return 0;
- else
- return -EINVAL;
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- *config = 0;
- if (!biasmode)
- return 0;
- else
- return -EINVAL;
- break;
- case PIN_CONFIG_DRIVE_PUSH_PULL:
- *config = 0;
- if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL)
- return 0;
- else
- return -EINVAL;
- break;
- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- *config = 0;
- if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN)
- return 0;
- else
- return -EINVAL;
- break;
- case PIN_CONFIG_DRIVE_OPEN_SOURCE:
- *config = 0;
- if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE)
- return 0;
- else
- return -EINVAL;
- break;
- default:
- break;
- }
- return -ENOTSUPP;
-}
-
-int u300_gpio_config_set(struct gpio_chip *chip, unsigned offset,
- enum pin_config_param param)
-{
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- val = readl(U300_PIN_REG(offset, per));
- writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- val = readl(U300_PIN_REG(offset, per));
- writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
- break;
- case PIN_CONFIG_DRIVE_PUSH_PULL:
- val = readl(U300_PIN_REG(offset, pcr));
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
- << ((offset & 0x07) << 1));
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
- << ((offset & 0x07) << 1));
- writel(val, U300_PIN_REG(offset, pcr));
- break;
- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- val = readl(U300_PIN_REG(offset, pcr));
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
- << ((offset & 0x07) << 1));
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
- << ((offset & 0x07) << 1));
- writel(val, U300_PIN_REG(offset, pcr));
- break;
- case PIN_CONFIG_DRIVE_OPEN_SOURCE:
- val = readl(U300_PIN_REG(offset, pcr));
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
- << ((offset & 0x07) << 1));
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
- << ((offset & 0x07) << 1));
- writel(val, U300_PIN_REG(offset, pcr));
- break;
- default:
- local_irq_restore(flags);
- dev_err(gpio->dev, "illegal configuration requested\n");
- return -EINVAL;
- }
- local_irq_restore(flags);
- return 0;
-}
-
-static const struct gpio_chip u300_gpio_chip = {
- .label = "u300-gpio-chip",
- .owner = THIS_MODULE,
- .request = gpiochip_generic_request,
- .free = gpiochip_generic_free,
- .get = u300_gpio_get,
- .set = u300_gpio_set,
- .direction_input = u300_gpio_direction_input,
- .direction_output = u300_gpio_direction_output,
-};
-
-static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
-{
- u32 val;
-
- val = readl(U300_PIN_REG(offset, icr));
- /* Set mode depending on state */
- if (u300_gpio_get(&gpio->chip, offset)) {
- /* High now, let's trigger on falling edge next then */
- writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
- dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
- offset);
- } else {
- /* Low now, let's trigger on rising edge next then */
- writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
- dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
- offset);
- }
-}
-
-static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- struct u300_gpio_port *port = &gpio->ports[d->hwirq >> 3];
- int offset = d->hwirq;
- u32 val;
-
- if ((trigger & IRQF_TRIGGER_RISING) &&
- (trigger & IRQF_TRIGGER_FALLING)) {
- /*
- * The GPIO block can only trigger on falling OR rising edges,
- * not both. So we need to toggle the mode whenever the pin
- * goes from one state to the other with a special state flag
- */
- dev_dbg(gpio->dev,
- "trigger on both rising and falling edge on pin %d\n",
- offset);
- port->toggle_edge_mode |= U300_PIN_BIT(offset);
- u300_toggle_trigger(gpio, offset);
- } else if (trigger & IRQF_TRIGGER_RISING) {
- dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
- offset);
- val = readl(U300_PIN_REG(offset, icr));
- writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
- port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
- } else if (trigger & IRQF_TRIGGER_FALLING) {
- dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
- offset);
- val = readl(U300_PIN_REG(offset, icr));
- writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
- port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
- }
-
- return 0;
-}
-
-static void u300_gpio_irq_enable(struct irq_data *d)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- struct u300_gpio_port *port = &gpio->ports[d->hwirq >> 3];
- int offset = d->hwirq;
- u32 val;
- unsigned long flags;
-
- dev_dbg(gpio->dev, "enable IRQ for hwirq %lu on port %s, offset %d\n",
- d->hwirq, port->name, offset);
- local_irq_save(flags);
- val = readl(U300_PIN_REG(offset, ien));
- writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
- local_irq_restore(flags);
-}
-
-static void u300_gpio_irq_disable(struct irq_data *d)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- int offset = d->hwirq;
- u32 val;
- unsigned long flags;
-
- local_irq_save(flags);
- val = readl(U300_PIN_REG(offset, ien));
- writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
- local_irq_restore(flags);
-}
-
-static struct irq_chip u300_gpio_irqchip = {
- .name = "u300-gpio-irqchip",
- .irq_enable = u300_gpio_irq_enable,
- .irq_disable = u300_gpio_irq_disable,
- .irq_set_type = u300_gpio_irq_type,
-};
-
-static void u300_gpio_irq_handler(struct irq_desc *desc)
-{
- unsigned int irq = irq_desc_get_irq(desc);
- struct irq_chip *parent_chip = irq_desc_get_chip(desc);
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct u300_gpio *gpio = gpiochip_get_data(chip);
- struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
- int pinoffset = port->number << 3; /* get the right stride */
- unsigned long val;
-
- chained_irq_enter(parent_chip, desc);
-
- /* Read event register */
- val = readl(U300_PIN_REG(pinoffset, iev));
- /* Mask relevant bits */
- val &= 0xFFU; /* 8 bits per port */
- /* ACK IRQ (clear event) */
- writel(val, U300_PIN_REG(pinoffset, iev));
-
- /* Call IRQ handler */
- if (val != 0) {
- int irqoffset;
-
- for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
- int offset = pinoffset + irqoffset;
- int pin_irq = irq_find_mapping(chip->irq.domain, offset);
-
- dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
- pin_irq, offset);
- generic_handle_irq(pin_irq);
- /*
- * Triggering IRQ on both rising and falling edge
- * needs mockery
- */
- if (port->toggle_edge_mode & U300_PIN_BIT(offset))
- u300_toggle_trigger(gpio, offset);
- }
- }
-
- chained_irq_exit(parent_chip, desc);
-}
-
-static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
- int offset,
- const struct u300_gpio_confdata *conf)
-{
- /* Set mode: input or output */
- if (conf->output) {
- u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
-
- /* Deactivate bias mode for output */
- u300_gpio_config_set(&gpio->chip, offset,
- PIN_CONFIG_BIAS_HIGH_IMPEDANCE);
-
- /* Set drive mode for output */
- u300_gpio_config_set(&gpio->chip, offset,
- PIN_CONFIG_DRIVE_PUSH_PULL);
-
- dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
- offset, conf->outval);
- } else {
- u300_gpio_direction_input(&gpio->chip, offset);
-
- /* Always set output low on input pins */
- u300_gpio_set(&gpio->chip, offset, 0);
-
- /* Set bias mode for input */
- u300_gpio_config_set(&gpio->chip, offset, conf->bias_mode);
-
- dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
- offset, conf->bias_mode);
- }
-}
-
-static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio)
-{
- int i, j;
-
- /* Write default config and values to all pins */
- for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
- for (j = 0; j < 8; j++) {
- const struct u300_gpio_confdata *conf;
- int offset = (i*8) + j;
-
- conf = &bs335_gpio_config[i][j];
- u300_gpio_init_pin(gpio, offset, conf);
- }
- }
-}
-
-/*
- * Here we map a GPIO in the local gpio_chip pin space to a pin in
- * the local pinctrl pin space. The pin controller used is
- * pinctrl-u300.
- */
-struct coh901_pinpair {
- unsigned int offset;
- unsigned int pin_base;
-};
-
-#define COH901_PINRANGE(a, b) { .offset = a, .pin_base = b }
-
-static struct coh901_pinpair coh901_pintable[] = {
- COH901_PINRANGE(10, 426),
- COH901_PINRANGE(11, 180),
- COH901_PINRANGE(12, 165), /* MS/MMC card insertion */
- COH901_PINRANGE(13, 179),
- COH901_PINRANGE(14, 178),
- COH901_PINRANGE(16, 194),
- COH901_PINRANGE(17, 193),
- COH901_PINRANGE(18, 192),
- COH901_PINRANGE(19, 191),
- COH901_PINRANGE(20, 186),
- COH901_PINRANGE(21, 185),
- COH901_PINRANGE(22, 184),
- COH901_PINRANGE(23, 183),
- COH901_PINRANGE(24, 182),
- COH901_PINRANGE(25, 181),
-};
-
-static int __init u300_gpio_probe(struct platform_device *pdev)
-{
- struct u300_gpio *gpio;
- struct gpio_irq_chip *girq;
- int err = 0;
- int portno;
- u32 val;
- u32 ifr;
- int i;
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(struct u300_gpio), GFP_KERNEL);
- if (gpio == NULL)
- return -ENOMEM;
-
- gpio->chip = u300_gpio_chip;
- gpio->chip.ngpio = U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT;
- gpio->chip.parent = &pdev->dev;
- gpio->chip.base = 0;
- gpio->dev = &pdev->dev;
-
- gpio->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(gpio->base))
- return PTR_ERR(gpio->base);
-
- gpio->clk = devm_clk_get(gpio->dev, NULL);
- if (IS_ERR(gpio->clk)) {
- err = PTR_ERR(gpio->clk);
- dev_err(gpio->dev, "could not get GPIO clock\n");
- return err;
- }
-
- err = clk_prepare_enable(gpio->clk);
- if (err) {
- dev_err(gpio->dev, "could not enable GPIO clock\n");
- return err;
- }
-
- dev_info(gpio->dev,
- "initializing GPIO Controller COH 901 571/3\n");
- gpio->stride = U300_GPIO_PORT_STRIDE;
- gpio->pcr = U300_GPIO_PXPCR;
- gpio->dor = U300_GPIO_PXPDOR;
- gpio->dir = U300_GPIO_PXPDIR;
- gpio->per = U300_GPIO_PXPER;
- gpio->icr = U300_GPIO_PXICR;
- gpio->ien = U300_GPIO_PXIEN;
- gpio->iev = U300_GPIO_PXIEV;
- ifr = U300_GPIO_PXIFR;
-
- val = readl(gpio->base + U300_GPIO_CR);
- dev_info(gpio->dev, "COH901571/3 block version: %d, " \
- "number of cores: %d totalling %d pins\n",
- ((val & 0x000001FC) >> 2),
- ((val & 0x0000FE00) >> 9),
- ((val & 0x0000FE00) >> 9) * 8);
- writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE,
- gpio->base + U300_GPIO_CR);
- u300_gpio_init_coh901571(gpio);
-
- girq = &gpio->chip.irq;
- girq->chip = &u300_gpio_irqchip;
- girq->parent_handler = u300_gpio_irq_handler;
- girq->num_parents = U300_GPIO_NUM_PORTS;
- girq->parents = devm_kcalloc(gpio->dev, U300_GPIO_NUM_PORTS,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents) {
- err = -ENOMEM;
- goto err_dis_clk;
- }
- for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
- struct u300_gpio_port *port = &gpio->ports[portno];
-
- snprintf(port->name, 8, "gpio%d", portno);
- port->number = portno;
- port->gpio = gpio;
-
- port->irq = platform_get_irq(pdev, portno);
- girq->parents[portno] = port->irq;
-
- /* Turns off irq force (test register) for this port */
- writel(0x0, gpio->base + portno * gpio->stride + ifr);
- }
- girq->default_type = IRQ_TYPE_EDGE_FALLING;
- girq->handler = handle_simple_irq;
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add_data(&gpio->chip, gpio);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_dis_clk;
- }
-
- /*
- * Add pinctrl pin ranges, the pin controller must be registered
- * at this point
- */
- for (i = 0; i < ARRAY_SIZE(coh901_pintable); i++) {
- struct coh901_pinpair *p = &coh901_pintable[i];
-
- err = gpiochip_add_pin_range(&gpio->chip, "pinctrl-u300",
- p->offset, p->pin_base, 1);
- if (err)
- goto err_no_range;
- }
-
- platform_set_drvdata(pdev, gpio);
-
- return 0;
-
-err_no_range:
- gpiochip_remove(&gpio->chip);
-err_dis_clk:
- clk_disable_unprepare(gpio->clk);
- dev_err(&pdev->dev, "module ERROR:%d\n", err);
- return err;
-}
-
-static int __exit u300_gpio_remove(struct platform_device *pdev)
-{
- struct u300_gpio *gpio = platform_get_drvdata(pdev);
-
- /* Turn off the GPIO block */
- writel(0x00000000U, gpio->base + U300_GPIO_CR);
-
- gpiochip_remove(&gpio->chip);
- clk_disable_unprepare(gpio->clk);
- return 0;
-}
-
-static const struct of_device_id u300_gpio_match[] = {
- { .compatible = "stericsson,gpio-coh901" },
- {},
-};
-
-static struct platform_driver u300_gpio_driver = {
- .driver = {
- .name = "u300-gpio",
- .of_match_table = u300_gpio_match,
- },
- .remove = __exit_p(u300_gpio_remove),
-};
-
-static int __init u300_gpio_init(void)
-{
- return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
-}
-
-static void __exit u300_gpio_exit(void)
-{
- platform_driver_unregister(&u300_gpio_driver);
-}
-
-arch_initcall(u300_gpio_init);
-module_exit(u300_gpio_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-coh901.h b/drivers/pinctrl/pinctrl-coh901.h
deleted file mode 100644
index ba2678665168..000000000000
--- a/drivers/pinctrl/pinctrl-coh901.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-int u300_gpio_config_get(struct gpio_chip *chip,
- unsigned offset,
- unsigned long *config);
-int u300_gpio_config_set(struct gpio_chip *chip, unsigned offset,
- enum pin_config_param param);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 53a6a24bd052..f2746125b077 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -37,11 +37,11 @@
#define JZ4740_GPIO_TRIG 0x70
#define JZ4740_GPIO_FLAG 0x80
-#define JZ4760_GPIO_INT 0x10
-#define JZ4760_GPIO_PAT1 0x30
-#define JZ4760_GPIO_PAT0 0x40
-#define JZ4760_GPIO_FLAG 0x50
-#define JZ4760_GPIO_PEN 0x70
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
@@ -376,12 +376,21 @@ static int jz4760_cim_pins[] = {
0x26, 0x27, 0x28, 0x29,
0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
};
+static int jz4760_lcd_8bit_pins[] = {
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x4c,
+ 0x4d, 0x52, 0x53,
+};
+static int jz4760_lcd_16bit_pins[] = {
+ 0x4e, 0x4f, 0x50, 0x51, 0x56, 0x57, 0x58, 0x59,
+};
+static int jz4760_lcd_18bit_pins[] = {
+ 0x5a, 0x5b,
+};
static int jz4760_lcd_24bit_pins[] = {
- 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
- 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
- 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
- 0x58, 0x59, 0x5a, 0x5b,
+ 0x40, 0x41, 0x4a, 0x4b, 0x54, 0x55,
};
+static int jz4760_lcd_special_pins[] = { 0x40, 0x41, 0x4a, 0x54 };
+static int jz4760_lcd_generic_pins[] = { 0x49, };
static int jz4760_pwm_pwm0_pins[] = { 0x80, };
static int jz4760_pwm_pwm1_pins[] = { 0x81, };
static int jz4760_pwm_pwm2_pins[] = { 0x82, };
@@ -390,6 +399,7 @@ static int jz4760_pwm_pwm4_pins[] = { 0x84, };
static int jz4760_pwm_pwm5_pins[] = { 0x85, };
static int jz4760_pwm_pwm6_pins[] = { 0x6a, };
static int jz4760_pwm_pwm7_pins[] = { 0x6b, };
+static int jz4760_otg_pins[] = { 0x8a, };
static u8 jz4760_uart3_data_funcs[] = { 0, 1, };
static u8 jz4760_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
@@ -436,8 +446,12 @@ static const struct group_desc jz4760_groups[] = {
INGENIC_PIN_GROUP("i2c0-data", jz4760_i2c0, 0),
INGENIC_PIN_GROUP("i2c1-data", jz4760_i2c1, 0),
INGENIC_PIN_GROUP("cim-data", jz4760_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4760_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4760_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4760_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4760_lcd_24bit, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-generic", jz4760_lcd_generic, 0),
+ INGENIC_PIN_GROUP("lcd-special", jz4760_lcd_special, 1),
INGENIC_PIN_GROUP("pwm0", jz4760_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4760_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4760_pwm_pwm2, 0),
@@ -446,6 +460,7 @@ static const struct group_desc jz4760_groups[] = {
INGENIC_PIN_GROUP("pwm5", jz4760_pwm_pwm5, 0),
INGENIC_PIN_GROUP("pwm6", jz4760_pwm_pwm6, 0),
INGENIC_PIN_GROUP("pwm7", jz4760_pwm_pwm7, 0),
+ INGENIC_PIN_GROUP("otg-vbus", jz4760_otg, 0),
};
static const char *jz4760_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -477,7 +492,10 @@ static const char *jz4760_cs6_groups[] = { "nemc-cs6", };
static const char *jz4760_i2c0_groups[] = { "i2c0-data", };
static const char *jz4760_i2c1_groups[] = { "i2c1-data", };
static const char *jz4760_cim_groups[] = { "cim-data", };
-static const char *jz4760_lcd_groups[] = { "lcd-24bit", "lcd-no-pins", };
+static const char *jz4760_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
static const char *jz4760_pwm0_groups[] = { "pwm0", };
static const char *jz4760_pwm1_groups[] = { "pwm1", };
static const char *jz4760_pwm2_groups[] = { "pwm2", };
@@ -486,6 +504,7 @@ static const char *jz4760_pwm4_groups[] = { "pwm4", };
static const char *jz4760_pwm5_groups[] = { "pwm5", };
static const char *jz4760_pwm6_groups[] = { "pwm6", };
static const char *jz4760_pwm7_groups[] = { "pwm7", };
+static const char *jz4760_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4760_functions[] = {
{ "uart0", jz4760_uart0_groups, ARRAY_SIZE(jz4760_uart0_groups), },
@@ -514,6 +533,7 @@ static const struct function_desc jz4760_functions[] = {
{ "pwm5", jz4760_pwm5_groups, ARRAY_SIZE(jz4760_pwm5_groups), },
{ "pwm6", jz4760_pwm6_groups, ARRAY_SIZE(jz4760_pwm6_groups), },
{ "pwm7", jz4760_pwm7_groups, ARRAY_SIZE(jz4760_pwm7_groups), },
+ { "otg", jz4760_otg_groups, ARRAY_SIZE(jz4760_otg_groups), },
};
static const struct ingenic_chip_info jz4760_chip_info = {
@@ -648,7 +668,6 @@ static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
-static int jz4770_otg_pins[] = { 0x8a, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
@@ -747,7 +766,7 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7, 0),
INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii, 0),
INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii, 0),
- INGENIC_PIN_GROUP("otg-vbus", jz4770_otg, 0),
+ INGENIC_PIN_GROUP("otg-vbus", jz4760_otg, 0),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -808,7 +827,6 @@ static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
-static const char *jz4770_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -841,7 +859,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
{ "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
- { "otg", jz4770_otg_groups, ARRAY_SIZE(jz4770_otg_groups), },
+ { "otg", jz4760_otg_groups, ARRAY_SIZE(jz4760_otg_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1688,8 +1706,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
@@ -1718,9 +1736,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
+ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
} else {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1776,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1774,8 +1792,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
@@ -1799,8 +1817,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
@@ -1856,8 +1874,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1938,9 +1956,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760) {
- if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
- ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -1991,20 +2009,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
'A' + offt, idx, func);
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
}
return 0;
@@ -2057,14 +2075,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
'A' + offt, idx, input ? "in" : "out");
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2109,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->info->version >= ID_JZ4760)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -2141,8 +2159,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
}
@@ -2151,8 +2169,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
}
@@ -2384,6 +2402,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
unsigned int i;
int err;
+ chip_info = of_device_get_match_data(dev);
+ if (!chip_info) {
+ dev_err(dev, "Unsupported SoC\n");
+ return -EINVAL;
+ }
+
jzpc = devm_kzalloc(dev, sizeof(*jzpc), GFP_KERNEL);
if (!jzpc)
return -ENOMEM;
@@ -2400,7 +2424,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
jzpc->dev = dev;
- jzpc->info = chip_info = of_device_get_match_data(dev);
+ jzpc->info = chip_info;
pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
@@ -2470,17 +2494,47 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id ingenic_pinctrl_of_match[] = {
- { .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info },
- { .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info },
- { .compatible = "ingenic,jz4760-pinctrl", .data = &jz4760_chip_info },
- { .compatible = "ingenic,jz4760b-pinctrl", .data = &jz4760_chip_info },
- { .compatible = "ingenic,jz4770-pinctrl", .data = &jz4770_chip_info },
- { .compatible = "ingenic,jz4780-pinctrl", .data = &jz4780_chip_info },
- { .compatible = "ingenic,x1000-pinctrl", .data = &x1000_chip_info },
- { .compatible = "ingenic,x1000e-pinctrl", .data = &x1000_chip_info },
- { .compatible = "ingenic,x1500-pinctrl", .data = &x1500_chip_info },
- { .compatible = "ingenic,x1830-pinctrl", .data = &x1830_chip_info },
- {},
+ {
+ .compatible = "ingenic,jz4740-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4740, &jz4740_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4725b-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4725B, &jz4725b_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4760-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4760, &jz4760_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4760b-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4760, &jz4760_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4770-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4770, &jz4770_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4780-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4780, &jz4780_chip_info)
+ },
+ {
+ .compatible = "ingenic,x1000-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X1000, &x1000_chip_info)
+ },
+ {
+ .compatible = "ingenic,x1000e-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X1000, &x1000_chip_info)
+ },
+ {
+ .compatible = "ingenic,x1500-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X1500, &x1500_chip_info)
+ },
+ {
+ .compatible = "ingenic,x1830-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X1830, &x1830_chip_info)
+ },
+ { /* sentinel */ },
};
static struct platform_driver ingenic_pinctrl_driver = {
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
new file mode 100644
index 000000000000..8a733cf77ba0
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/io.h>
+
+#include <dt-bindings/pinctrl/k210-fpioa.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+/*
+ * The K210 only implements 8 drive levels, even though
+ * there is register space for 16
+ */
+#define K210_PC_DRIVE_MASK GENMASK(11, 8)
+#define K210_PC_DRIVE_SHIFT 8
+#define K210_PC_DRIVE_0 (0 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_1 (1 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_2 (2 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_3 (3 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_4 (4 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_5 (5 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_6 (6 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_7 (7 << K210_PC_DRIVE_SHIFT)
+#define K210_PC_DRIVE_MAX 7
+#define K210_PC_MODE_MASK GENMASK(23, 12)
+
+/*
+ * output enabled == PC_OE & (PC_OE_INV ^ FUNCTION_OE)
+ * where FUNCTION_OE is a physical signal from the function.
+ */
+#define K210_PC_OE BIT(12) /* Output Enable */
+#define K210_PC_OE_INV BIT(13) /* INVert Output Enable */
+#define K210_PC_DO_OE BIT(14) /* set Data Out to Output Enable sig */
+#define K210_PC_DO_INV BIT(15) /* INVert final Data Output */
+#define K210_PC_PU BIT(16) /* Pull Up */
+#define K210_PC_PD BIT(17) /* Pull Down */
+/* Strong pull up not implemented on K210 */
+#define K210_PC_SL BIT(19) /* reduce SLew rate */
+/* Same semantics as OE above */
+#define K210_PC_IE BIT(20) /* Input Enable */
+#define K210_PC_IE_INV BIT(21) /* INVert Input Enable */
+#define K210_PC_DI_INV BIT(22) /* INVert Data Input */
+#define K210_PC_ST BIT(23) /* Schmitt Trigger */
+#define K210_PC_DI BIT(31) /* raw Data Input */
+
+#define K210_PC_BIAS_MASK (K210_PC_PU & K210_PC_PD)
+
+#define K210_PC_MODE_IN (K210_PC_IE | K210_PC_ST)
+#define K210_PC_MODE_OUT (K210_PC_DRIVE_7 | K210_PC_OE)
+#define K210_PC_MODE_I2C (K210_PC_MODE_IN | K210_PC_SL | \
+ K210_PC_OE | K210_PC_PU)
+#define K210_PC_MODE_SCCB (K210_PC_MODE_I2C | \
+ K210_PC_OE_INV | K210_PC_IE_INV)
+#define K210_PC_MODE_SPI (K210_PC_MODE_IN | K210_PC_IE_INV | \
+ K210_PC_MODE_OUT | K210_PC_OE_INV)
+#define K210_PC_MODE_GPIO (K210_PC_MODE_IN | K210_PC_MODE_OUT)
+
+#define K210_PG_FUNC GENMASK(7, 0)
+#define K210_PG_DO BIT(8)
+#define K210_PG_PIN GENMASK(22, 16)
+
+/*
+ * struct k210_fpioa: Kendryte K210 FPIOA memory mapped registers
+ * @pins: 48 32-bits IO pin registers
+ * @tie_en: 256 (one per function) input tie enable bits
+ * @tie_val: 256 (one per function) input tie value bits
+ */
+struct k210_fpioa {
+ u32 pins[48];
+ u32 tie_en[8];
+ u32 tie_val[8];
+};
+
+struct k210_fpioa_data {
+
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+
+ struct k210_fpioa __iomem *fpioa;
+ struct regmap *sysctl_map;
+ u32 power_offset;
+ struct clk *clk;
+ struct clk *pclk;
+};
+
+#define K210_PIN_NAME(i) ("IO_" #i)
+#define K210_PIN(i) [(i)] = PINCTRL_PIN((i), K210_PIN_NAME(i))
+
+static const struct pinctrl_pin_desc k210_pins[] = {
+ K210_PIN(0), K210_PIN(1), K210_PIN(2),
+ K210_PIN(3), K210_PIN(4), K210_PIN(5),
+ K210_PIN(6), K210_PIN(7), K210_PIN(8),
+ K210_PIN(9), K210_PIN(10), K210_PIN(11),
+ K210_PIN(12), K210_PIN(13), K210_PIN(14),
+ K210_PIN(15), K210_PIN(16), K210_PIN(17),
+ K210_PIN(18), K210_PIN(19), K210_PIN(20),
+ K210_PIN(21), K210_PIN(22), K210_PIN(23),
+ K210_PIN(24), K210_PIN(25), K210_PIN(26),
+ K210_PIN(27), K210_PIN(28), K210_PIN(29),
+ K210_PIN(30), K210_PIN(31), K210_PIN(32),
+ K210_PIN(33), K210_PIN(34), K210_PIN(35),
+ K210_PIN(36), K210_PIN(37), K210_PIN(38),
+ K210_PIN(39), K210_PIN(40), K210_PIN(41),
+ K210_PIN(42), K210_PIN(43), K210_PIN(44),
+ K210_PIN(45), K210_PIN(46), K210_PIN(47)
+};
+
+#define K210_NPINS ARRAY_SIZE(k210_pins)
+
+/*
+ * Pin groups: each of the 48 programmable pins is a group.
+ * To this are added 8 power domain groups, which for the purposes of
+ * the pin subsystem, contain no pins. The power domain groups only exist
+ * to set the power level. The id should never be used (since there are
+ * no pins 48-55).
+ */
+static const char *const k210_group_names[] = {
+ /* The first 48 groups are for pins, one each */
+ K210_PIN_NAME(0), K210_PIN_NAME(1), K210_PIN_NAME(2),
+ K210_PIN_NAME(3), K210_PIN_NAME(4), K210_PIN_NAME(5),
+ K210_PIN_NAME(6), K210_PIN_NAME(7), K210_PIN_NAME(8),
+ K210_PIN_NAME(9), K210_PIN_NAME(10), K210_PIN_NAME(11),
+ K210_PIN_NAME(12), K210_PIN_NAME(13), K210_PIN_NAME(14),
+ K210_PIN_NAME(15), K210_PIN_NAME(16), K210_PIN_NAME(17),
+ K210_PIN_NAME(18), K210_PIN_NAME(19), K210_PIN_NAME(20),
+ K210_PIN_NAME(21), K210_PIN_NAME(22), K210_PIN_NAME(23),
+ K210_PIN_NAME(24), K210_PIN_NAME(25), K210_PIN_NAME(26),
+ K210_PIN_NAME(27), K210_PIN_NAME(28), K210_PIN_NAME(29),
+ K210_PIN_NAME(30), K210_PIN_NAME(31), K210_PIN_NAME(32),
+ K210_PIN_NAME(33), K210_PIN_NAME(34), K210_PIN_NAME(35),
+ K210_PIN_NAME(36), K210_PIN_NAME(37), K210_PIN_NAME(38),
+ K210_PIN_NAME(39), K210_PIN_NAME(40), K210_PIN_NAME(41),
+ K210_PIN_NAME(42), K210_PIN_NAME(43), K210_PIN_NAME(44),
+ K210_PIN_NAME(45), K210_PIN_NAME(46), K210_PIN_NAME(47),
+ [48] = "A0", [49] = "A1", [50] = "A2",
+ [51] = "B3", [52] = "B4", [53] = "B5",
+ [54] = "C6", [55] = "C7"
+};
+
+#define K210_NGROUPS ARRAY_SIZE(k210_group_names)
+
+enum k210_pinctrl_mode_id {
+ K210_PC_DEFAULT_DISABLED,
+ K210_PC_DEFAULT_IN,
+ K210_PC_DEFAULT_IN_TIE,
+ K210_PC_DEFAULT_OUT,
+ K210_PC_DEFAULT_I2C,
+ K210_PC_DEFAULT_SCCB,
+ K210_PC_DEFAULT_SPI,
+ K210_PC_DEFAULT_GPIO,
+ K210_PC_DEFAULT_INT13,
+};
+
+#define K210_PC_DEFAULT(mode) \
+ [K210_PC_DEFAULT_##mode] = K210_PC_MODE_##mode
+
+static const u32 k210_pinconf_mode_id_to_mode[] = {
+ [K210_PC_DEFAULT_DISABLED] = 0,
+ K210_PC_DEFAULT(IN),
+ [K210_PC_DEFAULT_IN_TIE] = K210_PC_MODE_IN,
+ K210_PC_DEFAULT(OUT),
+ K210_PC_DEFAULT(I2C),
+ K210_PC_DEFAULT(SCCB),
+ K210_PC_DEFAULT(SPI),
+ K210_PC_DEFAULT(GPIO),
+ [K210_PC_DEFAULT_INT13] = K210_PC_MODE_IN | K210_PC_PU,
+};
+
+#undef DEFAULT
+
+/*
+ * Pin functions configuration information.
+ */
+struct k210_pcf_info {
+ char name[15];
+ u8 mode_id;
+};
+
+#define K210_FUNC(id, mode) \
+ [K210_PCF_##id] = { \
+ .name = #id, \
+ .mode_id = K210_PC_DEFAULT_##mode \
+ }
+
+static const struct k210_pcf_info k210_pcf_infos[] = {
+ K210_FUNC(JTAG_TCLK, IN),
+ K210_FUNC(JTAG_TDI, IN),
+ K210_FUNC(JTAG_TMS, IN),
+ K210_FUNC(JTAG_TDO, OUT),
+ K210_FUNC(SPI0_D0, SPI),
+ K210_FUNC(SPI0_D1, SPI),
+ K210_FUNC(SPI0_D2, SPI),
+ K210_FUNC(SPI0_D3, SPI),
+ K210_FUNC(SPI0_D4, SPI),
+ K210_FUNC(SPI0_D5, SPI),
+ K210_FUNC(SPI0_D6, SPI),
+ K210_FUNC(SPI0_D7, SPI),
+ K210_FUNC(SPI0_SS0, OUT),
+ K210_FUNC(SPI0_SS1, OUT),
+ K210_FUNC(SPI0_SS2, OUT),
+ K210_FUNC(SPI0_SS3, OUT),
+ K210_FUNC(SPI0_ARB, IN_TIE),
+ K210_FUNC(SPI0_SCLK, OUT),
+ K210_FUNC(UARTHS_RX, IN),
+ K210_FUNC(UARTHS_TX, OUT),
+ K210_FUNC(RESV6, IN),
+ K210_FUNC(RESV7, IN),
+ K210_FUNC(CLK_SPI1, OUT),
+ K210_FUNC(CLK_I2C1, OUT),
+ K210_FUNC(GPIOHS0, GPIO),
+ K210_FUNC(GPIOHS1, GPIO),
+ K210_FUNC(GPIOHS2, GPIO),
+ K210_FUNC(GPIOHS3, GPIO),
+ K210_FUNC(GPIOHS4, GPIO),
+ K210_FUNC(GPIOHS5, GPIO),
+ K210_FUNC(GPIOHS6, GPIO),
+ K210_FUNC(GPIOHS7, GPIO),
+ K210_FUNC(GPIOHS8, GPIO),
+ K210_FUNC(GPIOHS9, GPIO),
+ K210_FUNC(GPIOHS10, GPIO),
+ K210_FUNC(GPIOHS11, GPIO),
+ K210_FUNC(GPIOHS12, GPIO),
+ K210_FUNC(GPIOHS13, GPIO),
+ K210_FUNC(GPIOHS14, GPIO),
+ K210_FUNC(GPIOHS15, GPIO),
+ K210_FUNC(GPIOHS16, GPIO),
+ K210_FUNC(GPIOHS17, GPIO),
+ K210_FUNC(GPIOHS18, GPIO),
+ K210_FUNC(GPIOHS19, GPIO),
+ K210_FUNC(GPIOHS20, GPIO),
+ K210_FUNC(GPIOHS21, GPIO),
+ K210_FUNC(GPIOHS22, GPIO),
+ K210_FUNC(GPIOHS23, GPIO),
+ K210_FUNC(GPIOHS24, GPIO),
+ K210_FUNC(GPIOHS25, GPIO),
+ K210_FUNC(GPIOHS26, GPIO),
+ K210_FUNC(GPIOHS27, GPIO),
+ K210_FUNC(GPIOHS28, GPIO),
+ K210_FUNC(GPIOHS29, GPIO),
+ K210_FUNC(GPIOHS30, GPIO),
+ K210_FUNC(GPIOHS31, GPIO),
+ K210_FUNC(GPIO0, GPIO),
+ K210_FUNC(GPIO1, GPIO),
+ K210_FUNC(GPIO2, GPIO),
+ K210_FUNC(GPIO3, GPIO),
+ K210_FUNC(GPIO4, GPIO),
+ K210_FUNC(GPIO5, GPIO),
+ K210_FUNC(GPIO6, GPIO),
+ K210_FUNC(GPIO7, GPIO),
+ K210_FUNC(UART1_RX, IN),
+ K210_FUNC(UART1_TX, OUT),
+ K210_FUNC(UART2_RX, IN),
+ K210_FUNC(UART2_TX, OUT),
+ K210_FUNC(UART3_RX, IN),
+ K210_FUNC(UART3_TX, OUT),
+ K210_FUNC(SPI1_D0, SPI),
+ K210_FUNC(SPI1_D1, SPI),
+ K210_FUNC(SPI1_D2, SPI),
+ K210_FUNC(SPI1_D3, SPI),
+ K210_FUNC(SPI1_D4, SPI),
+ K210_FUNC(SPI1_D5, SPI),
+ K210_FUNC(SPI1_D6, SPI),
+ K210_FUNC(SPI1_D7, SPI),
+ K210_FUNC(SPI1_SS0, OUT),
+ K210_FUNC(SPI1_SS1, OUT),
+ K210_FUNC(SPI1_SS2, OUT),
+ K210_FUNC(SPI1_SS3, OUT),
+ K210_FUNC(SPI1_ARB, IN_TIE),
+ K210_FUNC(SPI1_SCLK, OUT),
+ K210_FUNC(SPI2_D0, SPI),
+ K210_FUNC(SPI2_SS, IN),
+ K210_FUNC(SPI2_SCLK, IN),
+ K210_FUNC(I2S0_MCLK, OUT),
+ K210_FUNC(I2S0_SCLK, OUT),
+ K210_FUNC(I2S0_WS, OUT),
+ K210_FUNC(I2S0_IN_D0, IN),
+ K210_FUNC(I2S0_IN_D1, IN),
+ K210_FUNC(I2S0_IN_D2, IN),
+ K210_FUNC(I2S0_IN_D3, IN),
+ K210_FUNC(I2S0_OUT_D0, OUT),
+ K210_FUNC(I2S0_OUT_D1, OUT),
+ K210_FUNC(I2S0_OUT_D2, OUT),
+ K210_FUNC(I2S0_OUT_D3, OUT),
+ K210_FUNC(I2S1_MCLK, OUT),
+ K210_FUNC(I2S1_SCLK, OUT),
+ K210_FUNC(I2S1_WS, OUT),
+ K210_FUNC(I2S1_IN_D0, IN),
+ K210_FUNC(I2S1_IN_D1, IN),
+ K210_FUNC(I2S1_IN_D2, IN),
+ K210_FUNC(I2S1_IN_D3, IN),
+ K210_FUNC(I2S1_OUT_D0, OUT),
+ K210_FUNC(I2S1_OUT_D1, OUT),
+ K210_FUNC(I2S1_OUT_D2, OUT),
+ K210_FUNC(I2S1_OUT_D3, OUT),
+ K210_FUNC(I2S2_MCLK, OUT),
+ K210_FUNC(I2S2_SCLK, OUT),
+ K210_FUNC(I2S2_WS, OUT),
+ K210_FUNC(I2S2_IN_D0, IN),
+ K210_FUNC(I2S2_IN_D1, IN),
+ K210_FUNC(I2S2_IN_D2, IN),
+ K210_FUNC(I2S2_IN_D3, IN),
+ K210_FUNC(I2S2_OUT_D0, OUT),
+ K210_FUNC(I2S2_OUT_D1, OUT),
+ K210_FUNC(I2S2_OUT_D2, OUT),
+ K210_FUNC(I2S2_OUT_D3, OUT),
+ K210_FUNC(RESV0, DISABLED),
+ K210_FUNC(RESV1, DISABLED),
+ K210_FUNC(RESV2, DISABLED),
+ K210_FUNC(RESV3, DISABLED),
+ K210_FUNC(RESV4, DISABLED),
+ K210_FUNC(RESV5, DISABLED),
+ K210_FUNC(I2C0_SCLK, I2C),
+ K210_FUNC(I2C0_SDA, I2C),
+ K210_FUNC(I2C1_SCLK, I2C),
+ K210_FUNC(I2C1_SDA, I2C),
+ K210_FUNC(I2C2_SCLK, I2C),
+ K210_FUNC(I2C2_SDA, I2C),
+ K210_FUNC(DVP_XCLK, OUT),
+ K210_FUNC(DVP_RST, OUT),
+ K210_FUNC(DVP_PWDN, OUT),
+ K210_FUNC(DVP_VSYNC, IN),
+ K210_FUNC(DVP_HSYNC, IN),
+ K210_FUNC(DVP_PCLK, IN),
+ K210_FUNC(DVP_D0, IN),
+ K210_FUNC(DVP_D1, IN),
+ K210_FUNC(DVP_D2, IN),
+ K210_FUNC(DVP_D3, IN),
+ K210_FUNC(DVP_D4, IN),
+ K210_FUNC(DVP_D5, IN),
+ K210_FUNC(DVP_D6, IN),
+ K210_FUNC(DVP_D7, IN),
+ K210_FUNC(SCCB_SCLK, SCCB),
+ K210_FUNC(SCCB_SDA, SCCB),
+ K210_FUNC(UART1_CTS, IN),
+ K210_FUNC(UART1_DSR, IN),
+ K210_FUNC(UART1_DCD, IN),
+ K210_FUNC(UART1_RI, IN),
+ K210_FUNC(UART1_SIR_IN, IN),
+ K210_FUNC(UART1_DTR, OUT),
+ K210_FUNC(UART1_RTS, OUT),
+ K210_FUNC(UART1_OUT2, OUT),
+ K210_FUNC(UART1_OUT1, OUT),
+ K210_FUNC(UART1_SIR_OUT, OUT),
+ K210_FUNC(UART1_BAUD, OUT),
+ K210_FUNC(UART1_RE, OUT),
+ K210_FUNC(UART1_DE, OUT),
+ K210_FUNC(UART1_RS485_EN, OUT),
+ K210_FUNC(UART2_CTS, IN),
+ K210_FUNC(UART2_DSR, IN),
+ K210_FUNC(UART2_DCD, IN),
+ K210_FUNC(UART2_RI, IN),
+ K210_FUNC(UART2_SIR_IN, IN),
+ K210_FUNC(UART2_DTR, OUT),
+ K210_FUNC(UART2_RTS, OUT),
+ K210_FUNC(UART2_OUT2, OUT),
+ K210_FUNC(UART2_OUT1, OUT),
+ K210_FUNC(UART2_SIR_OUT, OUT),
+ K210_FUNC(UART2_BAUD, OUT),
+ K210_FUNC(UART2_RE, OUT),
+ K210_FUNC(UART2_DE, OUT),
+ K210_FUNC(UART2_RS485_EN, OUT),
+ K210_FUNC(UART3_CTS, IN),
+ K210_FUNC(UART3_DSR, IN),
+ K210_FUNC(UART3_DCD, IN),
+ K210_FUNC(UART3_RI, IN),
+ K210_FUNC(UART3_SIR_IN, IN),
+ K210_FUNC(UART3_DTR, OUT),
+ K210_FUNC(UART3_RTS, OUT),
+ K210_FUNC(UART3_OUT2, OUT),
+ K210_FUNC(UART3_OUT1, OUT),
+ K210_FUNC(UART3_SIR_OUT, OUT),
+ K210_FUNC(UART3_BAUD, OUT),
+ K210_FUNC(UART3_RE, OUT),
+ K210_FUNC(UART3_DE, OUT),
+ K210_FUNC(UART3_RS485_EN, OUT),
+ K210_FUNC(TIMER0_TOGGLE1, OUT),
+ K210_FUNC(TIMER0_TOGGLE2, OUT),
+ K210_FUNC(TIMER0_TOGGLE3, OUT),
+ K210_FUNC(TIMER0_TOGGLE4, OUT),
+ K210_FUNC(TIMER1_TOGGLE1, OUT),
+ K210_FUNC(TIMER1_TOGGLE2, OUT),
+ K210_FUNC(TIMER1_TOGGLE3, OUT),
+ K210_FUNC(TIMER1_TOGGLE4, OUT),
+ K210_FUNC(TIMER2_TOGGLE1, OUT),
+ K210_FUNC(TIMER2_TOGGLE2, OUT),
+ K210_FUNC(TIMER2_TOGGLE3, OUT),
+ K210_FUNC(TIMER2_TOGGLE4, OUT),
+ K210_FUNC(CLK_SPI2, OUT),
+ K210_FUNC(CLK_I2C2, OUT),
+ K210_FUNC(INTERNAL0, OUT),
+ K210_FUNC(INTERNAL1, OUT),
+ K210_FUNC(INTERNAL2, OUT),
+ K210_FUNC(INTERNAL3, OUT),
+ K210_FUNC(INTERNAL4, OUT),
+ K210_FUNC(INTERNAL5, OUT),
+ K210_FUNC(INTERNAL6, OUT),
+ K210_FUNC(INTERNAL7, OUT),
+ K210_FUNC(INTERNAL8, OUT),
+ K210_FUNC(INTERNAL9, IN),
+ K210_FUNC(INTERNAL10, IN),
+ K210_FUNC(INTERNAL11, IN),
+ K210_FUNC(INTERNAL12, IN),
+ K210_FUNC(INTERNAL13, INT13),
+ K210_FUNC(INTERNAL14, I2C),
+ K210_FUNC(INTERNAL15, IN),
+ K210_FUNC(INTERNAL16, IN),
+ K210_FUNC(INTERNAL17, IN),
+ K210_FUNC(CONSTANT, DISABLED),
+ K210_FUNC(INTERNAL18, IN),
+ K210_FUNC(DEBUG0, OUT),
+ K210_FUNC(DEBUG1, OUT),
+ K210_FUNC(DEBUG2, OUT),
+ K210_FUNC(DEBUG3, OUT),
+ K210_FUNC(DEBUG4, OUT),
+ K210_FUNC(DEBUG5, OUT),
+ K210_FUNC(DEBUG6, OUT),
+ K210_FUNC(DEBUG7, OUT),
+ K210_FUNC(DEBUG8, OUT),
+ K210_FUNC(DEBUG9, OUT),
+ K210_FUNC(DEBUG10, OUT),
+ K210_FUNC(DEBUG11, OUT),
+ K210_FUNC(DEBUG12, OUT),
+ K210_FUNC(DEBUG13, OUT),
+ K210_FUNC(DEBUG14, OUT),
+ K210_FUNC(DEBUG15, OUT),
+ K210_FUNC(DEBUG16, OUT),
+ K210_FUNC(DEBUG17, OUT),
+ K210_FUNC(DEBUG18, OUT),
+ K210_FUNC(DEBUG19, OUT),
+ K210_FUNC(DEBUG20, OUT),
+ K210_FUNC(DEBUG21, OUT),
+ K210_FUNC(DEBUG22, OUT),
+ K210_FUNC(DEBUG23, OUT),
+ K210_FUNC(DEBUG24, OUT),
+ K210_FUNC(DEBUG25, OUT),
+ K210_FUNC(DEBUG26, OUT),
+ K210_FUNC(DEBUG27, OUT),
+ K210_FUNC(DEBUG28, OUT),
+ K210_FUNC(DEBUG29, OUT),
+ K210_FUNC(DEBUG30, OUT),
+ K210_FUNC(DEBUG31, OUT),
+};
+
+#define PIN_CONFIG_OUTPUT_INVERT (PIN_CONFIG_END + 1)
+#define PIN_CONFIG_INPUT_INVERT (PIN_CONFIG_END + 2)
+
+static const struct pinconf_generic_params k210_pinconf_custom_params[] = {
+ { "output-polarity-invert", PIN_CONFIG_OUTPUT_INVERT, 1 },
+ { "input-polarity-invert", PIN_CONFIG_INPUT_INVERT, 1 },
+};
+
+/*
+ * Max drive strength in uA.
+ */
+static const int k210_pinconf_drive_strength[] = {
+ [0] = 11200,
+ [1] = 16800,
+ [2] = 22300,
+ [3] = 27800,
+ [4] = 33300,
+ [5] = 38700,
+ [6] = 44100,
+ [7] = 49500,
+};
+
+static int k210_pinconf_get_drive(unsigned int max_strength_ua)
+{
+ int i;
+
+ for (i = K210_PC_DRIVE_MAX; i; i--) {
+ if (k210_pinconf_drive_strength[i] <= max_strength_ua)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static void k210_pinmux_set_pin_function(struct pinctrl_dev *pctldev,
+ u32 pin, u32 func)
+{
+ struct k210_fpioa_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ const struct k210_pcf_info *info = &k210_pcf_infos[func];
+ u32 mode = k210_pinconf_mode_id_to_mode[info->mode_id];
+ u32 val = func | mode;
+
+ dev_dbg(pdata->dev, "set pin %u function %s (%u) -> 0x%08x\n",
+ pin, info->name, func, val);
+
+ writel(val, &pdata->fpioa->pins[pin]);
+}
+
+static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned int param, unsigned int arg)
+{
+ struct k210_fpioa_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ u32 val = readl(&pdata->fpioa->pins[pin]);
+ int drive;
+
+ dev_dbg(pdata->dev, "set pin %u param %u, arg 0x%x\n",
+ pin, param, arg);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ val &= ~K210_PC_BIAS_MASK;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!arg)
+ return -EINVAL;
+ val |= K210_PC_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!arg)
+ return -EINVAL;
+ val |= K210_PC_PD;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg *= 1000;
+ fallthrough;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ drive = k210_pinconf_get_drive(arg);
+ if (drive < 0)
+ return drive;
+ val &= ~K210_PC_DRIVE_MASK;
+ val |= FIELD_PREP(K210_PC_DRIVE_MASK, drive);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (arg)
+ val |= K210_PC_IE;
+ else
+ val &= ~K210_PC_IE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (arg)
+ val |= K210_PC_ST;
+ else
+ val &= ~K210_PC_ST;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ k210_pinmux_set_pin_function(pctldev, pin, K210_PCF_CONSTANT);
+ val = readl(&pdata->fpioa->pins[pin]);
+ val |= K210_PC_MODE_OUT;
+ if (!arg)
+ val |= K210_PC_DO_INV;
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ if (arg)
+ val |= K210_PC_OE;
+ else
+ val &= ~K210_PC_OE;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (arg)
+ val |= K210_PC_SL;
+ else
+ val &= ~K210_PC_SL;
+ break;
+ case PIN_CONFIG_OUTPUT_INVERT:
+ if (arg)
+ val |= K210_PC_DO_INV;
+ else
+ val &= ~K210_PC_DO_INV;
+ break;
+ case PIN_CONFIG_INPUT_INVERT:
+ if (arg)
+ val |= K210_PC_DI_INV;
+ else
+ val &= ~K210_PC_DI_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(val, &pdata->fpioa->pins[pin]);
+
+ return 0;
+}
+
+static int k210_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ unsigned int param, arg;
+ int i, ret;
+
+ if (WARN_ON(pin >= K210_NPINS))
+ return -EINVAL;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+ ret = k210_pinconf_set_param(pctldev, pin, param, arg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void k210_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ struct k210_fpioa_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ seq_printf(s, "%#x", readl(&pdata->fpioa->pins[pin]));
+}
+
+static int k210_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct k210_fpioa_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param, arg;
+ u32 bit;
+ int i;
+
+ /* Pins should be configured with pinmux, not groups*/
+ if (selector < K210_NPINS)
+ return -EINVAL;
+
+ /* Otherwise it's a power domain */
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ if (param != PIN_CONFIG_POWER_SOURCE)
+ return -EINVAL;
+
+ arg = pinconf_to_config_argument(configs[i]);
+ bit = BIT(selector - K210_NPINS);
+ regmap_update_bits(pdata->sysctl_map,
+ pdata->power_offset,
+ bit, arg ? bit : 0);
+ }
+
+ return 0;
+}
+
+static void k210_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int selector)
+{
+ struct k210_fpioa_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int ret;
+ u32 val;
+
+ if (selector < K210_NPINS)
+ return k210_pinconf_dbg_show(pctldev, s, selector);
+
+ ret = regmap_read(pdata->sysctl_map, pdata->power_offset, &val);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to read power reg\n");
+ return;
+ }
+
+ seq_printf(s, "%s: %s V", k210_group_names[selector],
+ val & BIT(selector - K210_NPINS) ? "1.8" : "3.3");
+}
+
+static const struct pinconf_ops k210_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_set = k210_pinconf_set,
+ .pin_config_group_set = k210_pinconf_group_set,
+ .pin_config_dbg_show = k210_pinconf_dbg_show,
+ .pin_config_group_dbg_show = k210_pinconf_group_dbg_show,
+};
+
+static int k210_pinmux_get_function_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(k210_pcf_infos);
+}
+
+static const char *k210_pinmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return k210_pcf_infos[selector].name;
+}
+
+static int k210_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ /* Any function can be mapped to any pin */
+ *groups = k210_group_names;
+ *num_groups = K210_NPINS;
+
+ return 0;
+}
+
+static int k210_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ /* Can't mux power domains */
+ if (group >= K210_NPINS)
+ return -EINVAL;
+
+ k210_pinmux_set_pin_function(pctldev, group, function);
+
+ return 0;
+}
+
+static const struct pinmux_ops k210_pinmux_ops = {
+ .get_functions_count = k210_pinmux_get_function_count,
+ .get_function_name = k210_pinmux_get_function_name,
+ .get_function_groups = k210_pinmux_get_function_groups,
+ .set_mux = k210_pinmux_set_mux,
+ .strict = true,
+};
+
+static int k210_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return K210_NGROUPS;
+}
+
+static const char *k210_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return k210_group_names[group];
+}
+
+static int k210_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ if (group >= K210_NPINS) {
+ *pins = NULL;
+ *npins = 0;
+ return 0;
+ }
+
+ *pins = &k210_pins[group].number;
+ *npins = 1;
+
+ return 0;
+}
+
+static void k210_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ seq_printf(s, "%s", dev_name(pctldev->dev));
+}
+
+static int k210_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *reserved_maps,
+ unsigned int *num_maps)
+{
+ struct property *prop;
+ const __be32 *p;
+ int ret, pinmux_groups;
+ u32 pinmux_group;
+ unsigned long *configs = NULL;
+ unsigned int num_configs = 0;
+ unsigned int reserve = 0;
+
+ ret = of_property_count_strings(np, "groups");
+ if (!ret)
+ return pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ reserved_maps, num_maps,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+
+ pinmux_groups = of_property_count_u32_elems(np, "pinmux");
+ if (pinmux_groups <= 0) {
+ /* Ignore this node */
+ return 0;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+ &num_configs);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "%pOF: could not parse node property\n",
+ np);
+ return ret;
+ }
+
+ reserve = pinmux_groups * (1 + num_configs);
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
+ reserve);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_u32(np, "pinmux", prop, p, pinmux_group) {
+ const char *group_name, *func_name;
+ u32 pin = FIELD_GET(K210_PG_PIN, pinmux_group);
+ u32 func = FIELD_GET(K210_PG_FUNC, pinmux_group);
+
+ if (pin >= K210_NPINS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ group_name = k210_group_names[pin];
+ func_name = k210_pcf_infos[func].name;
+
+ dev_dbg(pctldev->dev, "Pinmux %s: pin %u func %s\n",
+ np->name, pin, func_name);
+
+ ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps,
+ num_maps, group_name,
+ func_name);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "%pOF add mux map failed %d\n",
+ np, ret);
+ goto exit;
+ }
+
+ if (num_configs) {
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps, num_maps, group_name,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_PIN);
+ if (ret < 0) {
+ dev_err(pctldev->dev,
+ "%pOF add configs map failed %d\n",
+ np, ret);
+ goto exit;
+ }
+ }
+ }
+
+ ret = 0;
+
+exit:
+ kfree(configs);
+ return ret;
+}
+
+static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ unsigned int reserved_maps;
+ struct device_node *np;
+ int ret;
+
+ reserved_maps = 0;
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = k210_pinctrl_dt_subnode_to_map(pctldev, np_config, map,
+ &reserved_maps, num_maps);
+ if (ret < 0)
+ goto err;
+
+ for_each_available_child_of_node(np_config, np) {
+ ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps);
+ if (ret < 0)
+ goto err;
+ }
+ return 0;
+
+err:
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
+ return ret;
+}
+
+
+static const struct pinctrl_ops k210_pinctrl_ops = {
+ .get_groups_count = k210_pinctrl_get_groups_count,
+ .get_group_name = k210_pinctrl_get_group_name,
+ .get_group_pins = k210_pinctrl_get_group_pins,
+ .pin_dbg_show = k210_pinctrl_pin_dbg_show,
+ .dt_node_to_map = k210_pinctrl_dt_node_to_map,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static struct pinctrl_desc k210_pinctrl_desc = {
+ .name = "k210-pinctrl",
+ .pins = k210_pins,
+ .npins = K210_NPINS,
+ .pctlops = &k210_pinctrl_ops,
+ .pmxops = &k210_pinmux_ops,
+ .confops = &k210_pinconf_ops,
+ .custom_params = k210_pinconf_custom_params,
+ .num_custom_params = ARRAY_SIZE(k210_pinconf_custom_params),
+};
+
+static void k210_fpioa_init_ties(struct k210_fpioa_data *pdata)
+{
+ struct k210_fpioa __iomem *fpioa = pdata->fpioa;
+ u32 val;
+ int i, j;
+
+ dev_dbg(pdata->dev, "Init pin ties\n");
+
+ /* Init pin functions input ties */
+ for (i = 0; i < ARRAY_SIZE(fpioa->tie_en); i++) {
+ val = 0;
+ for (j = 0; j < 32; j++) {
+ if (k210_pcf_infos[i * 32 + j].mode_id ==
+ K210_PC_DEFAULT_IN_TIE) {
+ dev_dbg(pdata->dev,
+ "tie_en function %d (%s)\n",
+ i * 32 + j,
+ k210_pcf_infos[i * 32 + j].name);
+ val |= BIT(j);
+ }
+ }
+
+ /* Set value before enable */
+ writel(val, &fpioa->tie_val[i]);
+ writel(val, &fpioa->tie_en[i]);
+ }
+}
+
+static int k210_fpioa_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct k210_fpioa_data *pdata;
+ int ret;
+
+ dev_info(dev, "K210 FPIOA pin controller\n");
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, pdata);
+
+ pdata->fpioa = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pdata->fpioa))
+ return PTR_ERR(pdata->fpioa);
+
+ pdata->clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pdata->clk))
+ return PTR_ERR(pdata->clk);
+
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
+
+ pdata->pclk = devm_clk_get_optional(dev, "pclk");
+ if (!IS_ERR(pdata->pclk))
+ clk_prepare_enable(pdata->pclk);
+
+ pdata->sysctl_map =
+ syscon_regmap_lookup_by_phandle_args(np,
+ "canaan,k210-sysctl-power",
+ 1, &pdata->power_offset);
+ if (IS_ERR(pdata->sysctl_map))
+ return PTR_ERR(pdata->sysctl_map);
+
+ k210_fpioa_init_ties(pdata);
+
+ pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
+ if (IS_ERR(pdata->pctl))
+ return PTR_ERR(pdata->pctl);
+
+ return 0;
+}
+
+static const struct of_device_id k210_fpioa_dt_ids[] = {
+ { .compatible = "canaan,k210-fpioa" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k210_fpioa_driver = {
+ .probe = k210_fpioa_probe,
+ .driver = {
+ .name = "k210-fpioa",
+ .of_match_table = k210_fpioa_dt_ids,
+ },
+};
+builtin_platform_driver(k210_fpioa_driver);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index f3cd7e296712..7771316dfffa 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -777,6 +777,7 @@ static int pcs_add_function(struct pcs_device *pcs,
function->vals = vals;
function->nvals = nvals;
+ function->name = name;
selector = pinmux_generic_add_function(pcs->pctl, name,
pgnames, npgnames,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7b8c7a0b13de..43d9e6c7fd81 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -541,7 +541,6 @@ static void st_pinconf_set_retime_packed(struct st_pinctrl *info,
st_regmap_field_bit_set_clear_pin(rt_p->delay_0, delay & 0x1, pin);
/* 2 bit delay, msb */
st_regmap_field_bit_set_clear_pin(rt_p->delay_1, delay & 0x2, pin);
-
}
static void st_pinconf_set_retime_dedicated(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index c110f780407b..484a3b9e875c 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -443,7 +443,6 @@ static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
sx150x_gpio_oscio_set(pctl, value);
else
__sx150x_gpio_set(pctl, offset, value);
-
}
static void sx150x_gpio_set_multiple(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
deleted file mode 100644
index cc306448259e..000000000000
--- a/drivers/pinctrl/pinctrl-u300.c
+++ /dev/null
@@ -1,1111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for the U300 pin controller
- *
- * Based on the original U300 padmux functions
- * Copyright (C) 2009-2011 ST-Ericsson AB
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * The DB3350 design and control registers are oriented around pads rather than
- * pins, so we enumerate the pads we can mux rather than actual pins. The pads
- * are connected to different pins in different packaging types, so it would
- * be confusing.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include "pinctrl-coh901.h"
-
-/*
- * Register definitions for the U300 Padmux control registers in the
- * system controller
- */
-
-/* PAD MUX Control register 1 (LOW) 16bit (R/W) */
-#define U300_SYSCON_PMC1LR 0x007C
-#define U300_SYSCON_PMC1LR_MASK 0xFFFF
-#define U300_SYSCON_PMC1LR_CDI_MASK 0xC000
-#define U300_SYSCON_PMC1LR_CDI_CDI 0x0000
-#define U300_SYSCON_PMC1LR_CDI_EMIF 0x4000
-/* For BS335 */
-#define U300_SYSCON_PMC1LR_CDI_CDI2 0x8000
-#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO 0xC000
-/* For BS365 */
-#define U300_SYSCON_PMC1LR_CDI_GPIO 0x8000
-#define U300_SYSCON_PMC1LR_CDI_WCDMA 0xC000
-/* Common defs */
-#define U300_SYSCON_PMC1LR_PDI_MASK 0x3000
-#define U300_SYSCON_PMC1LR_PDI_PDI 0x0000
-#define U300_SYSCON_PMC1LR_PDI_EGG 0x1000
-#define U300_SYSCON_PMC1LR_PDI_WCDMA 0x3000
-#define U300_SYSCON_PMC1LR_MMCSD_MASK 0x0C00
-#define U300_SYSCON_PMC1LR_MMCSD_MMCSD 0x0000
-#define U300_SYSCON_PMC1LR_MMCSD_MSPRO 0x0400
-#define U300_SYSCON_PMC1LR_MMCSD_DSP 0x0800
-#define U300_SYSCON_PMC1LR_MMCSD_WCDMA 0x0C00
-#define U300_SYSCON_PMC1LR_ETM_MASK 0x0300
-#define U300_SYSCON_PMC1LR_ETM_ACC 0x0000
-#define U300_SYSCON_PMC1LR_ETM_APP 0x0100
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK 0x00C0
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC 0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF 0x0040
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM 0x0080
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB 0x00C0
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK 0x0030
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC 0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF 0x0010
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM 0x0020
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI 0x0030
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK 0x000C
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC 0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF 0x0004
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM 0x0008
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI 0x000C
-#define U300_SYSCON_PMC1LR_EMIF_1_MASK 0x0003
-#define U300_SYSCON_PMC1LR_EMIF_1_STATIC 0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0 0x0001
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1 0x0002
-#define U300_SYSCON_PMC1LR_EMIF_1 0x0003
-/* PAD MUX Control register 2 (HIGH) 16bit (R/W) */
-#define U300_SYSCON_PMC1HR 0x007E
-#define U300_SYSCON_PMC1HR_MASK 0xFFFF
-#define U300_SYSCON_PMC1HR_MISC_2_MASK 0xC000
-#define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_MISC_2_MSPRO 0x4000
-#define U300_SYSCON_PMC1HR_MISC_2_DSP 0x8000
-#define U300_SYSCON_PMC1HR_MISC_2_AAIF 0xC000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK 0x3000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF 0x1000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP 0x2000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF 0x3000
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK 0x0C00
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC 0x0400
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP 0x0800
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF 0x0C00
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK 0x0300
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI 0x0100
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF 0x0300
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK 0x00C0
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI 0x0040
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF 0x00C0
-#define U300_SYSCON_PMC1HR_APP_SPI_2_MASK 0x0030
-#define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_2_SPI 0x0010
-#define U300_SYSCON_PMC1HR_APP_SPI_2_DSP 0x0020
-#define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF 0x0030
-#define U300_SYSCON_PMC1HR_APP_UART0_2_MASK 0x000C
-#define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_UART0_2_UART0 0x0004
-#define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS 0x0008
-#define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF 0x000C
-#define U300_SYSCON_PMC1HR_APP_UART0_1_MASK 0x0003
-#define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO 0x0000
-#define U300_SYSCON_PMC1HR_APP_UART0_1_UART0 0x0001
-#define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF 0x0003
-/* Padmux 2 control */
-#define U300_SYSCON_PMC2R 0x100
-#define U300_SYSCON_PMC2R_APP_MISC_0_MASK 0x00C0
-#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO 0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM 0x0040
-#define U300_SYSCON_PMC2R_APP_MISC_0_MMC 0x0080
-#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 0x00C0
-#define U300_SYSCON_PMC2R_APP_MISC_1_MASK 0x0300
-#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO 0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM 0x0100
-#define U300_SYSCON_PMC2R_APP_MISC_1_MMC 0x0200
-#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 0x0300
-#define U300_SYSCON_PMC2R_APP_MISC_2_MASK 0x0C00
-#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO 0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM 0x0400
-#define U300_SYSCON_PMC2R_APP_MISC_2_MMC 0x0800
-#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 0x0C00
-#define U300_SYSCON_PMC2R_APP_MISC_3_MASK 0x3000
-#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO 0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM 0x1000
-#define U300_SYSCON_PMC2R_APP_MISC_3_MMC 0x2000
-#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 0x3000
-#define U300_SYSCON_PMC2R_APP_MISC_4_MASK 0xC000
-#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO 0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM 0x4000
-#define U300_SYSCON_PMC2R_APP_MISC_4_MMC 0x8000
-#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO 0xC000
-/* TODO: More SYSCON registers missing */
-#define U300_SYSCON_PMC3R 0x10C
-#define U300_SYSCON_PMC3R_APP_MISC_11_MASK 0xC000
-#define U300_SYSCON_PMC3R_APP_MISC_11_SPI 0x4000
-#define U300_SYSCON_PMC3R_APP_MISC_10_MASK 0x3000
-#define U300_SYSCON_PMC3R_APP_MISC_10_SPI 0x1000
-/* TODO: Missing other configs */
-#define U300_SYSCON_PMC4R 0x168
-#define U300_SYSCON_PMC4R_APP_MISC_12_MASK 0x0003
-#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO 0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_13_MASK 0x000C
-#define U300_SYSCON_PMC4R_APP_MISC_13_CDI 0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA 0x0004
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 0x0008
-#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO 0x000C
-#define U300_SYSCON_PMC4R_APP_MISC_14_MASK 0x0030
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI 0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA 0x0010
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 0x0020
-#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO 0x0030
-#define U300_SYSCON_PMC4R_APP_MISC_16_MASK 0x0300
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS 0x0100
-#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N 0x0200
-
-#define DRIVER_NAME "pinctrl-u300"
-
-/*
- * The DB3350 has 467 pads, I have enumerated the pads clockwise around the
- * edges of the silicon, finger by finger. LTCORNER upper left is pad 0.
- * Data taken from the PadRing chart, arranged like this:
- *
- * 0 ..... 104
- * 466 105
- * . .
- * . .
- * 358 224
- * 357 .... 225
- */
-#define U300_NUM_PADS 467
-
-/* Pad names for the pinmux subsystem */
-static const struct pinctrl_pin_desc u300_pads[] = {
- /* Pads along the top edge of the chip */
- PINCTRL_PIN(0, "P PAD VDD 28"),
- PINCTRL_PIN(1, "P PAD GND 28"),
- PINCTRL_PIN(2, "PO SIM RST N"),
- PINCTRL_PIN(3, "VSSIO 25"),
- PINCTRL_PIN(4, "VSSA ADDA ESDSUB"),
- PINCTRL_PIN(5, "PWR VSSCOMMON"),
- PINCTRL_PIN(6, "PI ADC I1 POS"),
- PINCTRL_PIN(7, "PI ADC I1 NEG"),
- PINCTRL_PIN(8, "PWR VSSAD0"),
- PINCTRL_PIN(9, "PWR VCCAD0"),
- PINCTRL_PIN(10, "PI ADC Q1 NEG"),
- PINCTRL_PIN(11, "PI ADC Q1 POS"),
- PINCTRL_PIN(12, "PWR VDDAD"),
- PINCTRL_PIN(13, "PWR GNDAD"),
- PINCTRL_PIN(14, "PI ADC I2 POS"),
- PINCTRL_PIN(15, "PI ADC I2 NEG"),
- PINCTRL_PIN(16, "PWR VSSAD1"),
- PINCTRL_PIN(17, "PWR VCCAD1"),
- PINCTRL_PIN(18, "PI ADC Q2 NEG"),
- PINCTRL_PIN(19, "PI ADC Q2 POS"),
- PINCTRL_PIN(20, "VSSA ADDA ESDSUB"),
- PINCTRL_PIN(21, "PWR VCCGPAD"),
- PINCTRL_PIN(22, "PI TX POW"),
- PINCTRL_PIN(23, "PWR VSSGPAD"),
- PINCTRL_PIN(24, "PO DAC I POS"),
- PINCTRL_PIN(25, "PO DAC I NEG"),
- PINCTRL_PIN(26, "PO DAC Q POS"),
- PINCTRL_PIN(27, "PO DAC Q NEG"),
- PINCTRL_PIN(28, "PWR VSSDA"),
- PINCTRL_PIN(29, "PWR VCCDA"),
- PINCTRL_PIN(30, "VSSA ADDA ESDSUB"),
- PINCTRL_PIN(31, "P PAD VDDIO 11"),
- PINCTRL_PIN(32, "PI PLL 26 FILTVDD"),
- PINCTRL_PIN(33, "PI PLL 26 VCONT"),
- PINCTRL_PIN(34, "PWR AGNDPLL2V5 32 13"),
- PINCTRL_PIN(35, "PWR AVDDPLL2V5 32 13"),
- PINCTRL_PIN(36, "VDDA PLL ESD"),
- PINCTRL_PIN(37, "VSSA PLL ESD"),
- PINCTRL_PIN(38, "VSS PLL"),
- PINCTRL_PIN(39, "VDDC PLL"),
- PINCTRL_PIN(40, "PWR AGNDPLL2V5 26 60"),
- PINCTRL_PIN(41, "PWR AVDDPLL2V5 26 60"),
- PINCTRL_PIN(42, "PWR AVDDPLL2V5 26 208"),
- PINCTRL_PIN(43, "PWR AGNDPLL2V5 26 208"),
- PINCTRL_PIN(44, "PWR AVDDPLL2V5 13 208"),
- PINCTRL_PIN(45, "PWR AGNDPLL2V5 13 208"),
- PINCTRL_PIN(46, "P PAD VSSIO 11"),
- PINCTRL_PIN(47, "P PAD VSSIO 12"),
- PINCTRL_PIN(48, "PI POW RST N"),
- PINCTRL_PIN(49, "VDDC IO"),
- PINCTRL_PIN(50, "P PAD VDDIO 16"),
- PINCTRL_PIN(51, "PO RF WCDMA EN 4"),
- PINCTRL_PIN(52, "PO RF WCDMA EN 3"),
- PINCTRL_PIN(53, "PO RF WCDMA EN 2"),
- PINCTRL_PIN(54, "PO RF WCDMA EN 1"),
- PINCTRL_PIN(55, "PO RF WCDMA EN 0"),
- PINCTRL_PIN(56, "PO GSM PA ENABLE"),
- PINCTRL_PIN(57, "PO RF DATA STRB"),
- PINCTRL_PIN(58, "PO RF DATA2"),
- PINCTRL_PIN(59, "PIO RF DATA1"),
- PINCTRL_PIN(60, "PIO RF DATA0"),
- PINCTRL_PIN(61, "P PAD VDD 11"),
- PINCTRL_PIN(62, "P PAD GND 11"),
- PINCTRL_PIN(63, "P PAD VSSIO 16"),
- PINCTRL_PIN(64, "P PAD VDDIO 18"),
- PINCTRL_PIN(65, "PO RF CTRL STRB2"),
- PINCTRL_PIN(66, "PO RF CTRL STRB1"),
- PINCTRL_PIN(67, "PO RF CTRL STRB0"),
- PINCTRL_PIN(68, "PIO RF CTRL DATA"),
- PINCTRL_PIN(69, "PO RF CTRL CLK"),
- PINCTRL_PIN(70, "PO TX ADC STRB"),
- PINCTRL_PIN(71, "PO ANT SW 2"),
- PINCTRL_PIN(72, "PO ANT SW 3"),
- PINCTRL_PIN(73, "PO ANT SW 0"),
- PINCTRL_PIN(74, "PO ANT SW 1"),
- PINCTRL_PIN(75, "PO M CLKRQ"),
- PINCTRL_PIN(76, "PI M CLK"),
- PINCTRL_PIN(77, "PI RTC CLK"),
- PINCTRL_PIN(78, "P PAD VDD 8"),
- PINCTRL_PIN(79, "P PAD GND 8"),
- PINCTRL_PIN(80, "P PAD VSSIO 13"),
- PINCTRL_PIN(81, "P PAD VDDIO 13"),
- PINCTRL_PIN(82, "PO SYS 1 CLK"),
- PINCTRL_PIN(83, "PO SYS 2 CLK"),
- PINCTRL_PIN(84, "PO SYS 0 CLK"),
- PINCTRL_PIN(85, "PI SYS 0 CLKRQ"),
- PINCTRL_PIN(86, "PO PWR MNGT CTRL 1"),
- PINCTRL_PIN(87, "PO PWR MNGT CTRL 0"),
- PINCTRL_PIN(88, "PO RESOUT2 RST N"),
- PINCTRL_PIN(89, "PO RESOUT1 RST N"),
- PINCTRL_PIN(90, "PO RESOUT0 RST N"),
- PINCTRL_PIN(91, "PI SERVICE N"),
- PINCTRL_PIN(92, "P PAD VDD 29"),
- PINCTRL_PIN(93, "P PAD GND 29"),
- PINCTRL_PIN(94, "P PAD VSSIO 8"),
- PINCTRL_PIN(95, "P PAD VDDIO 8"),
- PINCTRL_PIN(96, "PI EXT IRQ1 N"),
- PINCTRL_PIN(97, "PI EXT IRQ0 N"),
- PINCTRL_PIN(98, "PIO DC ON"),
- PINCTRL_PIN(99, "PIO ACC APP I2C DATA"),
- PINCTRL_PIN(100, "PIO ACC APP I2C CLK"),
- PINCTRL_PIN(101, "P PAD VDD 12"),
- PINCTRL_PIN(102, "P PAD GND 12"),
- PINCTRL_PIN(103, "P PAD VSSIO 14"),
- PINCTRL_PIN(104, "P PAD VDDIO 14"),
- /* Pads along the right edge of the chip */
- PINCTRL_PIN(105, "PIO APP I2C1 DATA"),
- PINCTRL_PIN(106, "PIO APP I2C1 CLK"),
- PINCTRL_PIN(107, "PO KEY OUT0"),
- PINCTRL_PIN(108, "PO KEY OUT1"),
- PINCTRL_PIN(109, "PO KEY OUT2"),
- PINCTRL_PIN(110, "PO KEY OUT3"),
- PINCTRL_PIN(111, "PO KEY OUT4"),
- PINCTRL_PIN(112, "PI KEY IN0"),
- PINCTRL_PIN(113, "PI KEY IN1"),
- PINCTRL_PIN(114, "PI KEY IN2"),
- PINCTRL_PIN(115, "P PAD VDDIO 15"),
- PINCTRL_PIN(116, "P PAD VSSIO 15"),
- PINCTRL_PIN(117, "P PAD GND 13"),
- PINCTRL_PIN(118, "P PAD VDD 13"),
- PINCTRL_PIN(119, "PI KEY IN3"),
- PINCTRL_PIN(120, "PI KEY IN4"),
- PINCTRL_PIN(121, "PI KEY IN5"),
- PINCTRL_PIN(122, "PIO APP PCM I2S1 DATA B"),
- PINCTRL_PIN(123, "PIO APP PCM I2S1 DATA A"),
- PINCTRL_PIN(124, "PIO APP PCM I2S1 WS"),
- PINCTRL_PIN(125, "PIO APP PCM I2S1 CLK"),
- PINCTRL_PIN(126, "PIO APP PCM I2S0 DATA B"),
- PINCTRL_PIN(127, "PIO APP PCM I2S0 DATA A"),
- PINCTRL_PIN(128, "PIO APP PCM I2S0 WS"),
- PINCTRL_PIN(129, "PIO APP PCM I2S0 CLK"),
- PINCTRL_PIN(130, "P PAD VDD 17"),
- PINCTRL_PIN(131, "P PAD GND 17"),
- PINCTRL_PIN(132, "P PAD VSSIO 19"),
- PINCTRL_PIN(133, "P PAD VDDIO 19"),
- PINCTRL_PIN(134, "UART0 RTS"),
- PINCTRL_PIN(135, "UART0 CTS"),
- PINCTRL_PIN(136, "UART0 TX"),
- PINCTRL_PIN(137, "UART0 RX"),
- PINCTRL_PIN(138, "PIO ACC SPI DO"),
- PINCTRL_PIN(139, "PIO ACC SPI DI"),
- PINCTRL_PIN(140, "PIO ACC SPI CS0 N"),
- PINCTRL_PIN(141, "PIO ACC SPI CS1 N"),
- PINCTRL_PIN(142, "PIO ACC SPI CS2 N"),
- PINCTRL_PIN(143, "PIO ACC SPI CLK"),
- PINCTRL_PIN(144, "PO PDI EXT RST N"),
- PINCTRL_PIN(145, "P PAD VDDIO 22"),
- PINCTRL_PIN(146, "P PAD VSSIO 22"),
- PINCTRL_PIN(147, "P PAD GND 18"),
- PINCTRL_PIN(148, "P PAD VDD 18"),
- PINCTRL_PIN(149, "PIO PDI C0"),
- PINCTRL_PIN(150, "PIO PDI C1"),
- PINCTRL_PIN(151, "PIO PDI C2"),
- PINCTRL_PIN(152, "PIO PDI C3"),
- PINCTRL_PIN(153, "PIO PDI C4"),
- PINCTRL_PIN(154, "PIO PDI C5"),
- PINCTRL_PIN(155, "PIO PDI D0"),
- PINCTRL_PIN(156, "PIO PDI D1"),
- PINCTRL_PIN(157, "PIO PDI D2"),
- PINCTRL_PIN(158, "PIO PDI D3"),
- PINCTRL_PIN(159, "P PAD VDDIO 21"),
- PINCTRL_PIN(160, "P PAD VSSIO 21"),
- PINCTRL_PIN(161, "PIO PDI D4"),
- PINCTRL_PIN(162, "PIO PDI D5"),
- PINCTRL_PIN(163, "PIO PDI D6"),
- PINCTRL_PIN(164, "PIO PDI D7"),
- PINCTRL_PIN(165, "PIO MS INS"),
- PINCTRL_PIN(166, "MMC DATA DIR LS"),
- PINCTRL_PIN(167, "MMC DATA 3"),
- PINCTRL_PIN(168, "MMC DATA 2"),
- PINCTRL_PIN(169, "MMC DATA 1"),
- PINCTRL_PIN(170, "MMC DATA 0"),
- PINCTRL_PIN(171, "MMC CMD DIR LS"),
- PINCTRL_PIN(172, "P PAD VDD 27"),
- PINCTRL_PIN(173, "P PAD GND 27"),
- PINCTRL_PIN(174, "P PAD VSSIO 20"),
- PINCTRL_PIN(175, "P PAD VDDIO 20"),
- PINCTRL_PIN(176, "MMC CMD"),
- PINCTRL_PIN(177, "MMC CLK"),
- PINCTRL_PIN(178, "PIO APP GPIO 14"),
- PINCTRL_PIN(179, "PIO APP GPIO 13"),
- PINCTRL_PIN(180, "PIO APP GPIO 11"),
- PINCTRL_PIN(181, "PIO APP GPIO 25"),
- PINCTRL_PIN(182, "PIO APP GPIO 24"),
- PINCTRL_PIN(183, "PIO APP GPIO 23"),
- PINCTRL_PIN(184, "PIO APP GPIO 22"),
- PINCTRL_PIN(185, "PIO APP GPIO 21"),
- PINCTRL_PIN(186, "PIO APP GPIO 20"),
- PINCTRL_PIN(187, "P PAD VDD 19"),
- PINCTRL_PIN(188, "P PAD GND 19"),
- PINCTRL_PIN(189, "P PAD VSSIO 23"),
- PINCTRL_PIN(190, "P PAD VDDIO 23"),
- PINCTRL_PIN(191, "PIO APP GPIO 19"),
- PINCTRL_PIN(192, "PIO APP GPIO 18"),
- PINCTRL_PIN(193, "PIO APP GPIO 17"),
- PINCTRL_PIN(194, "PIO APP GPIO 16"),
- PINCTRL_PIN(195, "PI CI D1"),
- PINCTRL_PIN(196, "PI CI D0"),
- PINCTRL_PIN(197, "PI CI HSYNC"),
- PINCTRL_PIN(198, "PI CI VSYNC"),
- PINCTRL_PIN(199, "PI CI EXT CLK"),
- PINCTRL_PIN(200, "PO CI EXT RST N"),
- PINCTRL_PIN(201, "P PAD VSSIO 43"),
- PINCTRL_PIN(202, "P PAD VDDIO 43"),
- PINCTRL_PIN(203, "PI CI D6"),
- PINCTRL_PIN(204, "PI CI D7"),
- PINCTRL_PIN(205, "PI CI D2"),
- PINCTRL_PIN(206, "PI CI D3"),
- PINCTRL_PIN(207, "PI CI D4"),
- PINCTRL_PIN(208, "PI CI D5"),
- PINCTRL_PIN(209, "PI CI D8"),
- PINCTRL_PIN(210, "PI CI D9"),
- PINCTRL_PIN(211, "P PAD VDD 20"),
- PINCTRL_PIN(212, "P PAD GND 20"),
- PINCTRL_PIN(213, "P PAD VSSIO 24"),
- PINCTRL_PIN(214, "P PAD VDDIO 24"),
- PINCTRL_PIN(215, "P PAD VDDIO 26"),
- PINCTRL_PIN(216, "PO EMIF 1 A26"),
- PINCTRL_PIN(217, "PO EMIF 1 A25"),
- PINCTRL_PIN(218, "P PAD VSSIO 26"),
- PINCTRL_PIN(219, "PO EMIF 1 A24"),
- PINCTRL_PIN(220, "PO EMIF 1 A23"),
- /* Pads along the bottom edge of the chip */
- PINCTRL_PIN(221, "PO EMIF 1 A22"),
- PINCTRL_PIN(222, "PO EMIF 1 A21"),
- PINCTRL_PIN(223, "P PAD VDD 21"),
- PINCTRL_PIN(224, "P PAD GND 21"),
- PINCTRL_PIN(225, "P PAD VSSIO 27"),
- PINCTRL_PIN(226, "P PAD VDDIO 27"),
- PINCTRL_PIN(227, "PO EMIF 1 A20"),
- PINCTRL_PIN(228, "PO EMIF 1 A19"),
- PINCTRL_PIN(229, "PO EMIF 1 A18"),
- PINCTRL_PIN(230, "PO EMIF 1 A17"),
- PINCTRL_PIN(231, "P PAD VDDIO 28"),
- PINCTRL_PIN(232, "P PAD VSSIO 28"),
- PINCTRL_PIN(233, "PO EMIF 1 A16"),
- PINCTRL_PIN(234, "PIO EMIF 1 D15"),
- PINCTRL_PIN(235, "PO EMIF 1 A15"),
- PINCTRL_PIN(236, "PIO EMIF 1 D14"),
- PINCTRL_PIN(237, "P PAD VDD 22"),
- PINCTRL_PIN(238, "P PAD GND 22"),
- PINCTRL_PIN(239, "P PAD VSSIO 29"),
- PINCTRL_PIN(240, "P PAD VDDIO 29"),
- PINCTRL_PIN(241, "PO EMIF 1 A14"),
- PINCTRL_PIN(242, "PIO EMIF 1 D13"),
- PINCTRL_PIN(243, "PO EMIF 1 A13"),
- PINCTRL_PIN(244, "PIO EMIF 1 D12"),
- PINCTRL_PIN(245, "P PAD VSSIO 30"),
- PINCTRL_PIN(246, "P PAD VDDIO 30"),
- PINCTRL_PIN(247, "PO EMIF 1 A12"),
- PINCTRL_PIN(248, "PIO EMIF 1 D11"),
- PINCTRL_PIN(249, "PO EMIF 1 A11"),
- PINCTRL_PIN(250, "PIO EMIF 1 D10"),
- PINCTRL_PIN(251, "P PAD VSSIO 31"),
- PINCTRL_PIN(252, "P PAD VDDIO 31"),
- PINCTRL_PIN(253, "PO EMIF 1 A10"),
- PINCTRL_PIN(254, "PIO EMIF 1 D09"),
- PINCTRL_PIN(255, "PO EMIF 1 A09"),
- PINCTRL_PIN(256, "P PAD VDDIO 32"),
- PINCTRL_PIN(257, "P PAD VSSIO 32"),
- PINCTRL_PIN(258, "P PAD GND 24"),
- PINCTRL_PIN(259, "P PAD VDD 24"),
- PINCTRL_PIN(260, "PIO EMIF 1 D08"),
- PINCTRL_PIN(261, "PO EMIF 1 A08"),
- PINCTRL_PIN(262, "PIO EMIF 1 D07"),
- PINCTRL_PIN(263, "PO EMIF 1 A07"),
- PINCTRL_PIN(264, "P PAD VDDIO 33"),
- PINCTRL_PIN(265, "P PAD VSSIO 33"),
- PINCTRL_PIN(266, "PIO EMIF 1 D06"),
- PINCTRL_PIN(267, "PO EMIF 1 A06"),
- PINCTRL_PIN(268, "PIO EMIF 1 D05"),
- PINCTRL_PIN(269, "PO EMIF 1 A05"),
- PINCTRL_PIN(270, "P PAD VDDIO 34"),
- PINCTRL_PIN(271, "P PAD VSSIO 34"),
- PINCTRL_PIN(272, "PIO EMIF 1 D04"),
- PINCTRL_PIN(273, "PO EMIF 1 A04"),
- PINCTRL_PIN(274, "PIO EMIF 1 D03"),
- PINCTRL_PIN(275, "PO EMIF 1 A03"),
- PINCTRL_PIN(276, "P PAD VDDIO 35"),
- PINCTRL_PIN(277, "P PAD VSSIO 35"),
- PINCTRL_PIN(278, "P PAD GND 23"),
- PINCTRL_PIN(279, "P PAD VDD 23"),
- PINCTRL_PIN(280, "PIO EMIF 1 D02"),
- PINCTRL_PIN(281, "PO EMIF 1 A02"),
- PINCTRL_PIN(282, "PIO EMIF 1 D01"),
- PINCTRL_PIN(283, "PO EMIF 1 A01"),
- PINCTRL_PIN(284, "P PAD VDDIO 36"),
- PINCTRL_PIN(285, "P PAD VSSIO 36"),
- PINCTRL_PIN(286, "PIO EMIF 1 D00"),
- PINCTRL_PIN(287, "PO EMIF 1 BE1 N"),
- PINCTRL_PIN(288, "PO EMIF 1 BE0 N"),
- PINCTRL_PIN(289, "PO EMIF 1 ADV N"),
- PINCTRL_PIN(290, "P PAD VDDIO 37"),
- PINCTRL_PIN(291, "P PAD VSSIO 37"),
- PINCTRL_PIN(292, "PO EMIF 1 SD CKE0"),
- PINCTRL_PIN(293, "PO EMIF 1 OE N"),
- PINCTRL_PIN(294, "PO EMIF 1 WE N"),
- PINCTRL_PIN(295, "P PAD VDDIO 38"),
- PINCTRL_PIN(296, "P PAD VSSIO 38"),
- PINCTRL_PIN(297, "PO EMIF 1 CLK"),
- PINCTRL_PIN(298, "PIO EMIF 1 SD CLK"),
- PINCTRL_PIN(299, "P PAD VSSIO 45 (not bonded)"),
- PINCTRL_PIN(300, "P PAD VDDIO 42"),
- PINCTRL_PIN(301, "P PAD VSSIO 42"),
- PINCTRL_PIN(302, "P PAD GND 31"),
- PINCTRL_PIN(303, "P PAD VDD 31"),
- PINCTRL_PIN(304, "PI EMIF 1 RET CLK"),
- PINCTRL_PIN(305, "PI EMIF 1 WAIT N"),
- PINCTRL_PIN(306, "PI EMIF 1 NFIF READY"),
- PINCTRL_PIN(307, "PO EMIF 1 SD CKE1"),
- PINCTRL_PIN(308, "PO EMIF 1 CS3 N"),
- PINCTRL_PIN(309, "P PAD VDD 25"),
- PINCTRL_PIN(310, "P PAD GND 25"),
- PINCTRL_PIN(311, "P PAD VSSIO 39"),
- PINCTRL_PIN(312, "P PAD VDDIO 39"),
- PINCTRL_PIN(313, "PO EMIF 1 CS2 N"),
- PINCTRL_PIN(314, "PO EMIF 1 CS1 N"),
- PINCTRL_PIN(315, "PO EMIF 1 CS0 N"),
- PINCTRL_PIN(316, "PO ETM TRACE PKT0"),
- PINCTRL_PIN(317, "PO ETM TRACE PKT1"),
- PINCTRL_PIN(318, "PO ETM TRACE PKT2"),
- PINCTRL_PIN(319, "P PAD VDD 30"),
- PINCTRL_PIN(320, "P PAD GND 30"),
- PINCTRL_PIN(321, "P PAD VSSIO 44"),
- PINCTRL_PIN(322, "P PAD VDDIO 44"),
- PINCTRL_PIN(323, "PO ETM TRACE PKT3"),
- PINCTRL_PIN(324, "PO ETM TRACE PKT4"),
- PINCTRL_PIN(325, "PO ETM TRACE PKT5"),
- PINCTRL_PIN(326, "PO ETM TRACE PKT6"),
- PINCTRL_PIN(327, "PO ETM TRACE PKT7"),
- PINCTRL_PIN(328, "PO ETM PIPE STAT0"),
- PINCTRL_PIN(329, "P PAD VDD 26"),
- PINCTRL_PIN(330, "P PAD GND 26"),
- PINCTRL_PIN(331, "P PAD VSSIO 40"),
- PINCTRL_PIN(332, "P PAD VDDIO 40"),
- PINCTRL_PIN(333, "PO ETM PIPE STAT1"),
- PINCTRL_PIN(334, "PO ETM PIPE STAT2"),
- PINCTRL_PIN(335, "PO ETM TRACE CLK"),
- PINCTRL_PIN(336, "PO ETM TRACE SYNC"),
- PINCTRL_PIN(337, "PIO ACC GPIO 33"),
- PINCTRL_PIN(338, "PIO ACC GPIO 32"),
- PINCTRL_PIN(339, "PIO ACC GPIO 30"),
- PINCTRL_PIN(340, "PIO ACC GPIO 29"),
- PINCTRL_PIN(341, "P PAD VDDIO 17"),
- PINCTRL_PIN(342, "P PAD VSSIO 17"),
- PINCTRL_PIN(343, "P PAD GND 15"),
- PINCTRL_PIN(344, "P PAD VDD 15"),
- PINCTRL_PIN(345, "PIO ACC GPIO 28"),
- PINCTRL_PIN(346, "PIO ACC GPIO 27"),
- PINCTRL_PIN(347, "PIO ACC GPIO 16"),
- PINCTRL_PIN(348, "PI TAP TMS"),
- PINCTRL_PIN(349, "PI TAP TDI"),
- PINCTRL_PIN(350, "PO TAP TDO"),
- PINCTRL_PIN(351, "PI TAP RST N"),
- /* Pads along the left edge of the chip */
- PINCTRL_PIN(352, "PI EMU MODE 0"),
- PINCTRL_PIN(353, "PO TAP RET CLK"),
- PINCTRL_PIN(354, "PI TAP CLK"),
- PINCTRL_PIN(355, "PO EMIF 0 SD CS N"),
- PINCTRL_PIN(356, "PO EMIF 0 SD CAS N"),
- PINCTRL_PIN(357, "PO EMIF 0 SD WE N"),
- PINCTRL_PIN(358, "P PAD VDDIO 1"),
- PINCTRL_PIN(359, "P PAD VSSIO 1"),
- PINCTRL_PIN(360, "P PAD GND 1"),
- PINCTRL_PIN(361, "P PAD VDD 1"),
- PINCTRL_PIN(362, "PO EMIF 0 SD CKE"),
- PINCTRL_PIN(363, "PO EMIF 0 SD DQML"),
- PINCTRL_PIN(364, "PO EMIF 0 SD DQMU"),
- PINCTRL_PIN(365, "PO EMIF 0 SD RAS N"),
- PINCTRL_PIN(366, "PIO EMIF 0 D15"),
- PINCTRL_PIN(367, "PO EMIF 0 A15"),
- PINCTRL_PIN(368, "PIO EMIF 0 D14"),
- PINCTRL_PIN(369, "PO EMIF 0 A14"),
- PINCTRL_PIN(370, "PIO EMIF 0 D13"),
- PINCTRL_PIN(371, "PO EMIF 0 A13"),
- PINCTRL_PIN(372, "P PAD VDDIO 2"),
- PINCTRL_PIN(373, "P PAD VSSIO 2"),
- PINCTRL_PIN(374, "P PAD GND 2"),
- PINCTRL_PIN(375, "P PAD VDD 2"),
- PINCTRL_PIN(376, "PIO EMIF 0 D12"),
- PINCTRL_PIN(377, "PO EMIF 0 A12"),
- PINCTRL_PIN(378, "PIO EMIF 0 D11"),
- PINCTRL_PIN(379, "PO EMIF 0 A11"),
- PINCTRL_PIN(380, "PIO EMIF 0 D10"),
- PINCTRL_PIN(381, "PO EMIF 0 A10"),
- PINCTRL_PIN(382, "PIO EMIF 0 D09"),
- PINCTRL_PIN(383, "PO EMIF 0 A09"),
- PINCTRL_PIN(384, "PIO EMIF 0 D08"),
- PINCTRL_PIN(385, "PO EMIF 0 A08"),
- PINCTRL_PIN(386, "PIO EMIF 0 D07"),
- PINCTRL_PIN(387, "PO EMIF 0 A07"),
- PINCTRL_PIN(388, "P PAD VDDIO 3"),
- PINCTRL_PIN(389, "P PAD VSSIO 3"),
- PINCTRL_PIN(390, "P PAD GND 3"),
- PINCTRL_PIN(391, "P PAD VDD 3"),
- PINCTRL_PIN(392, "PO EFUSE RDOUT1"),
- PINCTRL_PIN(393, "PIO EMIF 0 D06"),
- PINCTRL_PIN(394, "PO EMIF 0 A06"),
- PINCTRL_PIN(395, "PIO EMIF 0 D05"),
- PINCTRL_PIN(396, "PO EMIF 0 A05"),
- PINCTRL_PIN(397, "PIO EMIF 0 D04"),
- PINCTRL_PIN(398, "PO EMIF 0 A04"),
- PINCTRL_PIN(399, "A PADS/A VDDCO1v82v5 GND 80U SF LIN VDDCO AF"),
- PINCTRL_PIN(400, "PWR VDDCO AF"),
- PINCTRL_PIN(401, "PWR EFUSE HV1"),
- PINCTRL_PIN(402, "P PAD VSSIO 4"),
- PINCTRL_PIN(403, "P PAD VDDIO 4"),
- PINCTRL_PIN(404, "P PAD GND 4"),
- PINCTRL_PIN(405, "P PAD VDD 4"),
- PINCTRL_PIN(406, "PIO EMIF 0 D03"),
- PINCTRL_PIN(407, "PO EMIF 0 A03"),
- PINCTRL_PIN(408, "PWR EFUSE HV2"),
- PINCTRL_PIN(409, "PWR EFUSE HV3"),
- PINCTRL_PIN(410, "PIO EMIF 0 D02"),
- PINCTRL_PIN(411, "PO EMIF 0 A02"),
- PINCTRL_PIN(412, "PIO EMIF 0 D01"),
- PINCTRL_PIN(413, "P PAD VDDIO 5"),
- PINCTRL_PIN(414, "P PAD VSSIO 5"),
- PINCTRL_PIN(415, "P PAD GND 5"),
- PINCTRL_PIN(416, "P PAD VDD 5"),
- PINCTRL_PIN(417, "PO EMIF 0 A01"),
- PINCTRL_PIN(418, "PIO EMIF 0 D00"),
- PINCTRL_PIN(419, "IF 0 SD CLK"),
- PINCTRL_PIN(420, "APP SPI CLK"),
- PINCTRL_PIN(421, "APP SPI DO"),
- PINCTRL_PIN(422, "APP SPI DI"),
- PINCTRL_PIN(423, "APP SPI CS0"),
- PINCTRL_PIN(424, "APP SPI CS1"),
- PINCTRL_PIN(425, "APP SPI CS2"),
- PINCTRL_PIN(426, "PIO APP GPIO 10"),
- PINCTRL_PIN(427, "P PAD VDDIO 41"),
- PINCTRL_PIN(428, "P PAD VSSIO 41"),
- PINCTRL_PIN(429, "P PAD GND 6"),
- PINCTRL_PIN(430, "P PAD VDD 6"),
- PINCTRL_PIN(431, "PIO ACC SDIO0 CMD"),
- PINCTRL_PIN(432, "PIO ACC SDIO0 CK"),
- PINCTRL_PIN(433, "PIO ACC SDIO0 D3"),
- PINCTRL_PIN(434, "PIO ACC SDIO0 D2"),
- PINCTRL_PIN(435, "PIO ACC SDIO0 D1"),
- PINCTRL_PIN(436, "PIO ACC SDIO0 D0"),
- PINCTRL_PIN(437, "PIO USB PU"),
- PINCTRL_PIN(438, "PIO USB SP"),
- PINCTRL_PIN(439, "PIO USB DAT VP"),
- PINCTRL_PIN(440, "PIO USB SE0 VM"),
- PINCTRL_PIN(441, "PIO USB OE"),
- PINCTRL_PIN(442, "PIO USB SUSP"),
- PINCTRL_PIN(443, "P PAD VSSIO 6"),
- PINCTRL_PIN(444, "P PAD VDDIO 6"),
- PINCTRL_PIN(445, "PIO USB PUEN"),
- PINCTRL_PIN(446, "PIO ACC UART0 RX"),
- PINCTRL_PIN(447, "PIO ACC UART0 TX"),
- PINCTRL_PIN(448, "PIO ACC UART0 CTS"),
- PINCTRL_PIN(449, "PIO ACC UART0 RTS"),
- PINCTRL_PIN(450, "PIO ACC UART3 RX"),
- PINCTRL_PIN(451, "PIO ACC UART3 TX"),
- PINCTRL_PIN(452, "PIO ACC UART3 CTS"),
- PINCTRL_PIN(453, "PIO ACC UART3 RTS"),
- PINCTRL_PIN(454, "PIO ACC IRDA TX"),
- PINCTRL_PIN(455, "P PAD VDDIO 7"),
- PINCTRL_PIN(456, "P PAD VSSIO 7"),
- PINCTRL_PIN(457, "P PAD GND 7"),
- PINCTRL_PIN(458, "P PAD VDD 7"),
- PINCTRL_PIN(459, "PIO ACC IRDA RX"),
- PINCTRL_PIN(460, "PIO ACC PCM I2S CLK"),
- PINCTRL_PIN(461, "PIO ACC PCM I2S WS"),
- PINCTRL_PIN(462, "PIO ACC PCM I2S DATA A"),
- PINCTRL_PIN(463, "PIO ACC PCM I2S DATA B"),
- PINCTRL_PIN(464, "PO SIM CLK"),
- PINCTRL_PIN(465, "PIO ACC IRDA SD"),
- PINCTRL_PIN(466, "PIO SIM DATA"),
-};
-
-/**
- * @dev: a pointer back to containing device
- * @virtbase: the offset to the controller in virtual memory
- */
-struct u300_pmx {
- struct device *dev;
- struct pinctrl_dev *pctl;
- void __iomem *virtbase;
-};
-
-/**
- * u300_pmx_registers - the array of registers read/written for each pinmux
- * shunt setting
- */
-static const u32 u300_pmx_registers[] = {
- U300_SYSCON_PMC1LR,
- U300_SYSCON_PMC1HR,
- U300_SYSCON_PMC2R,
- U300_SYSCON_PMC3R,
- U300_SYSCON_PMC4R,
-};
-
-/**
- * struct u300_pin_group - describes a U300 pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
- */
-struct u300_pin_group {
- const char *name;
- const unsigned int *pins;
- const unsigned num_pins;
-};
-
-/**
- * struct pmx_onmask - mask bits to enable/disable padmux
- * @mask: mask bits to disable
- * @val: mask bits to enable
- *
- * onmask lazy dog:
- * onmask = {
- * {"PMC1LR" mask, "PMC1LR" value},
- * {"PMC1HR" mask, "PMC1HR" value},
- * {"PMC2R" mask, "PMC2R" value},
- * {"PMC3R" mask, "PMC3R" value},
- * {"PMC4R" mask, "PMC4R" value}
- * }
- */
-struct u300_pmx_mask {
- u16 mask;
- u16 bits;
-};
-
-/* The chip power pins are VDD, GND, VDDIO and VSSIO */
-static const unsigned power_pins[] = { 0, 1, 3, 31, 46, 47, 49, 50, 61, 62, 63,
- 64, 78, 79, 80, 81, 92, 93, 94, 95, 101, 102, 103, 104, 115, 116, 117,
- 118, 130, 131, 132, 133, 145, 146, 147, 148, 159, 160, 172, 173, 174,
- 175, 187, 188, 189, 190, 201, 202, 211, 212, 213, 214, 215, 218, 223,
- 224, 225, 226, 231, 232, 237, 238, 239, 240, 245, 246, 251, 252, 256,
- 257, 258, 259, 264, 265, 270, 271, 276, 277, 278, 279, 284, 285, 290,
- 291, 295, 296, 299, 300, 301, 302, 303, 309, 310, 311, 312, 319, 320,
- 321, 322, 329, 330, 331, 332, 341, 342, 343, 344, 358, 359, 360, 361,
- 372, 373, 374, 375, 388, 389, 390, 391, 402, 403, 404, 405, 413, 414,
- 415, 416, 427, 428, 429, 430, 443, 444, 455, 456, 457, 458 };
-static const unsigned emif0_pins[] = { 355, 356, 357, 362, 363, 364, 365, 366,
- 367, 368, 369, 370, 371, 376, 377, 378, 379, 380, 381, 382, 383, 384,
- 385, 386, 387, 393, 394, 395, 396, 397, 398, 406, 407, 410, 411, 412,
- 417, 418 };
-static const unsigned emif1_pins[] = { 216, 217, 219, 220, 221, 222, 227, 228,
- 229, 230, 233, 234, 235, 236, 241, 242, 243, 244, 247, 248, 249, 250,
- 253, 254, 255, 260, 261, 262, 263, 266, 267, 268, 269, 272, 273, 274,
- 275, 280, 281, 282, 283, 286, 287, 288, 289, 292, 293, 294, 297, 298,
- 304, 305, 306, 307, 308, 313, 314, 315 };
-static const unsigned uart0_pins[] = { 134, 135, 136, 137 };
-static const unsigned mmc0_pins[] = { 166, 167, 168, 169, 170, 171, 176, 177 };
-static const unsigned spi0_pins[] = { 420, 421, 422, 423, 424, 425 };
-
-static const struct u300_pmx_mask emif0_mask[] = {
- {0, 0},
- {0, 0},
- {0, 0},
- {0, 0},
- {0, 0},
-};
-
-static const struct u300_pmx_mask emif1_mask[] = {
- /*
- * This connects the SDRAM to CS2 and a NAND flash to
- * CS0 on the EMIF.
- */
- {
- U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK |
- U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK |
- U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK |
- U300_SYSCON_PMC1LR_EMIF_1_MASK,
- U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM |
- U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC |
- U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF |
- U300_SYSCON_PMC1LR_EMIF_1_SDRAM0
- },
- {0, 0},
- {0, 0},
- {0, 0},
- {0, 0},
-};
-
-static const struct u300_pmx_mask uart0_mask[] = {
- {0, 0},
- {
- U300_SYSCON_PMC1HR_APP_UART0_1_MASK |
- U300_SYSCON_PMC1HR_APP_UART0_2_MASK,
- U300_SYSCON_PMC1HR_APP_UART0_1_UART0 |
- U300_SYSCON_PMC1HR_APP_UART0_2_UART0
- },
- {0, 0},
- {0, 0},
- {0, 0},
-};
-
-static const struct u300_pmx_mask mmc0_mask[] = {
- { U300_SYSCON_PMC1LR_MMCSD_MASK, U300_SYSCON_PMC1LR_MMCSD_MMCSD},
- {0, 0},
- {0, 0},
- {0, 0},
- { U300_SYSCON_PMC4R_APP_MISC_12_MASK,
- U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO }
-};
-
-static const struct u300_pmx_mask spi0_mask[] = {
- {0, 0},
- {
- U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
- U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
- U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
- U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
- U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
- U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI
- },
- {0, 0},
- {0, 0},
- {0, 0}
-};
-
-static const struct u300_pin_group u300_pin_groups[] = {
- {
- .name = "powergrp",
- .pins = power_pins,
- .num_pins = ARRAY_SIZE(power_pins),
- },
- {
- .name = "emif0grp",
- .pins = emif0_pins,
- .num_pins = ARRAY_SIZE(emif0_pins),
- },
- {
- .name = "emif1grp",
- .pins = emif1_pins,
- .num_pins = ARRAY_SIZE(emif1_pins),
- },
- {
- .name = "uart0grp",
- .pins = uart0_pins,
- .num_pins = ARRAY_SIZE(uart0_pins),
- },
- {
- .name = "mmc0grp",
- .pins = mmc0_pins,
- .num_pins = ARRAY_SIZE(mmc0_pins),
- },
- {
- .name = "spi0grp",
- .pins = spi0_pins,
- .num_pins = ARRAY_SIZE(spi0_pins),
- },
-};
-
-static int u300_get_groups_count(struct pinctrl_dev *pctldev)
-{
- return ARRAY_SIZE(u300_pin_groups);
-}
-
-static const char *u300_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return u300_pin_groups[selector].name;
-}
-
-static int u300_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *num_pins)
-{
- *pins = u300_pin_groups[selector].pins;
- *num_pins = u300_pin_groups[selector].num_pins;
- return 0;
-}
-
-static void u300_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset)
-{
- seq_printf(s, " " DRIVER_NAME);
-}
-
-static const struct pinctrl_ops u300_pctrl_ops = {
- .get_groups_count = u300_get_groups_count,
- .get_group_name = u300_get_group_name,
- .get_group_pins = u300_get_group_pins,
- .pin_dbg_show = u300_pin_dbg_show,
-};
-
-/*
- * Here we define the available functions and their corresponding pin groups
- */
-
-/**
- * struct u300_pmx_func - describes U300 pinmux functions
- * @name: the name of this specific function
- * @groups: corresponding pin groups
- * @onmask: bits to set to enable this when doing pin muxing
- */
-struct u300_pmx_func {
- const char *name;
- const char * const *groups;
- const unsigned num_groups;
- const struct u300_pmx_mask *mask;
-};
-
-static const char * const powergrps[] = { "powergrp" };
-static const char * const emif0grps[] = { "emif0grp" };
-static const char * const emif1grps[] = { "emif1grp" };
-static const char * const uart0grps[] = { "uart0grp" };
-static const char * const mmc0grps[] = { "mmc0grp" };
-static const char * const spi0grps[] = { "spi0grp" };
-
-static const struct u300_pmx_func u300_pmx_functions[] = {
- {
- .name = "power",
- .groups = powergrps,
- .num_groups = ARRAY_SIZE(powergrps),
- /* Mask is N/A */
- },
- {
- .name = "emif0",
- .groups = emif0grps,
- .num_groups = ARRAY_SIZE(emif0grps),
- .mask = emif0_mask,
- },
- {
- .name = "emif1",
- .groups = emif1grps,
- .num_groups = ARRAY_SIZE(emif1grps),
- .mask = emif1_mask,
- },
- {
- .name = "uart0",
- .groups = uart0grps,
- .num_groups = ARRAY_SIZE(uart0grps),
- .mask = uart0_mask,
- },
- {
- .name = "mmc0",
- .groups = mmc0grps,
- .num_groups = ARRAY_SIZE(mmc0grps),
- .mask = mmc0_mask,
- },
- {
- .name = "spi0",
- .groups = spi0grps,
- .num_groups = ARRAY_SIZE(spi0grps),
- .mask = spi0_mask,
- },
-};
-
-static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
- bool enable)
-{
- u16 regval, val, mask;
- int i;
- const struct u300_pmx_mask *upmx_mask;
-
- upmx_mask = u300_pmx_functions[selector].mask;
- for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
- if (enable)
- val = upmx_mask->bits;
- else
- val = 0;
-
- mask = upmx_mask->mask;
- if (mask != 0) {
- regval = readw(upmx->virtbase + u300_pmx_registers[i]);
- regval &= ~mask;
- regval |= val;
- writew(regval, upmx->virtbase + u300_pmx_registers[i]);
- }
- upmx_mask++;
- }
-}
-
-static int u300_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- struct u300_pmx *upmx;
-
- /* There is nothing to do with the power pins */
- if (selector == 0)
- return 0;
-
- upmx = pinctrl_dev_get_drvdata(pctldev);
- u300_pmx_endisable(upmx, selector, true);
-
- return 0;
-}
-
-static int u300_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
-{
- return ARRAY_SIZE(u300_pmx_functions);
-}
-
-static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return u300_pmx_functions[selector].name;
-}
-
-static int u300_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- *groups = u300_pmx_functions[selector].groups;
- *num_groups = u300_pmx_functions[selector].num_groups;
- return 0;
-}
-
-static const struct pinmux_ops u300_pmx_ops = {
- .get_functions_count = u300_pmx_get_funcs_count,
- .get_function_name = u300_pmx_get_func_name,
- .get_function_groups = u300_pmx_get_groups,
- .set_mux = u300_pmx_set_mux,
-};
-
-static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *config)
-{
- struct pinctrl_gpio_range *range =
- pinctrl_find_gpio_range_from_pin(pctldev, pin);
-
- /* We get config for those pins we CAN get it for and that's it */
- if (!range)
- return -ENOTSUPP;
-
- return u300_gpio_config_get(range->gc,
- (pin - range->pin_base + range->base),
- config);
-}
-
-static int u300_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *configs, unsigned num_configs)
-{
- struct pinctrl_gpio_range *range =
- pinctrl_find_gpio_range_from_pin(pctldev, pin);
- int ret, i;
-
- if (!range)
- return -EINVAL;
-
- for (i = 0; i < num_configs; i++) {
- /* Note: none of these configurations take any argument */
- ret = u300_gpio_config_set(range->gc,
- (pin - range->pin_base + range->base),
- pinconf_to_config_param(configs[i]));
- if (ret)
- return ret;
- } /* for each config */
-
- return 0;
-}
-
-static const struct pinconf_ops u300_pconf_ops = {
- .is_generic = true,
- .pin_config_get = u300_pin_config_get,
- .pin_config_set = u300_pin_config_set,
-};
-
-static struct pinctrl_desc u300_pmx_desc = {
- .name = DRIVER_NAME,
- .pins = u300_pads,
- .npins = ARRAY_SIZE(u300_pads),
- .pctlops = &u300_pctrl_ops,
- .pmxops = &u300_pmx_ops,
- .confops = &u300_pconf_ops,
- .owner = THIS_MODULE,
-};
-
-static int u300_pmx_probe(struct platform_device *pdev)
-{
- struct u300_pmx *upmx;
-
- /* Create state holders etc for this driver */
- upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
- if (!upmx)
- return -ENOMEM;
-
- upmx->dev = &pdev->dev;
-
- upmx->virtbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(upmx->virtbase))
- return PTR_ERR(upmx->virtbase);
-
- upmx->pctl = devm_pinctrl_register(&pdev->dev, &u300_pmx_desc, upmx);
- if (IS_ERR(upmx->pctl)) {
- dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
- return PTR_ERR(upmx->pctl);
- }
-
- platform_set_drvdata(pdev, upmx);
-
- dev_info(&pdev->dev, "initialized U300 pin control driver\n");
-
- return 0;
-}
-
-static const struct of_device_id u300_pinctrl_match[] = {
- { .compatible = "stericsson,pinctrl-u300" },
- {},
-};
-
-
-static struct platform_driver u300_pmx_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = u300_pinctrl_match,
- },
- .probe = u300_pmx_probe,
-};
-
-static int __init u300_pmx_init(void)
-{
- return platform_driver_register(&u300_pmx_driver);
-}
-arch_initcall(u300_pmx_init);
-
-static void __exit u300_pmx_exit(void)
-{
- platform_driver_unregister(&u300_pmx_driver);
-}
-module_exit(u300_pmx_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("U300 pin control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index bab888fe3f8e..36a11c9e893a 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -564,7 +564,7 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
continue;
}
- seq_printf(s, "function: %s, groups = [ ", func);
+ seq_printf(s, "function %d: %s, groups = [ ", func_selector, func);
for (i = 0; i < num_groups; i++)
seq_printf(s, "%s ", groups[i]);
seq_puts(s, "]\n");
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index a003776506d0..6853a896c476 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -220,6 +220,15 @@ config PINCTRL_SC7280
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SC7280 platform.
+config PINCTRL_SC8180X
+ tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SC8180x platform.
+
config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
depends on GPIOLIB && OF
@@ -265,6 +274,15 @@ config PINCTRL_SM8250
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8250 platform.
+config PINCTRL_SM8350
+ tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8350 platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 91875a3f5ac4..d4301fbb7274 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -26,9 +26,11 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o
+obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
+obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e051aecf95c4..d70caecd21d2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -51,6 +51,7 @@
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
* @soc: Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
* @phys_base: Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
MSM_ACCESSOR(intr_status)
MSM_ACCESSOR(intr_target)
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g)
+{
+ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+ msm_writel_intr_status(val, pctrl, g);
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *gc = &pctrl->chip;
+ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+ struct irq_data *d = irq_get_irq_data(irq);
+ unsigned int gpio_func = pctrl->soc->gpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ /*
+ * If an GPIO interrupt is setup on this pin then we need special
+ * handling. Specifically interrupt detection logic will still see
+ * the pin twiddle even when we're muxed away.
+ *
+ * When we see a pin with an interrupt setup on it then we'll disable
+ * (mask) interrupts on it when we mux away until we mux back. Note
+ * that disable_irq() refcounts and interrupts are disabled as long as
+ * at least one disable_irq() has been called.
+ */
+ if (d && i != gpio_func &&
+ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+ disable_irq(irq);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (d && i == gpio_func &&
+ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+ /*
+ * Clear interrupts detected while not GPIO since we only
+ * masked things.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+ else
+ msm_ack_intr_status(pctrl, g);
+
+ enable_irq(irq);
+ }
+
return 0;
}
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
if (!g->nfuncs)
return 0;
- /* For now assume function 0 is GPIO because it always is */
- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
}
static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- if (status_clear) {
- /*
- * clear the interrupt status bit before unmask to avoid
- * any erroneous interrupts that would have got latched
- * when the interrupt is not in use.
- */
- val = msm_readl_intr_status(pctrl, g);
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
- }
-
val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
irq_chip_enable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
- msm_gpio_irq_clear_unmask(d, true);
+ msm_gpio_irq_unmask(d);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
msm_gpio_irq_mask(d);
}
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- msm_gpio_irq_clear_unmask(d, false);
-}
-
/**
* msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
* @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = msm_readl_intr_status(pctrl, g);
- if (g->intr_ack_high)
- val |= BIT(g->intr_status_bit);
- else
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
+ msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
+ bool was_enabled;
u32 val;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
+ was_enabled = val & BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
msm_writel_intr_cfg(val, pctrl, g);
+ /*
+ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ * Clear the interrupt. This is safe because we have
+ * IRQCHIP_SET_TYPE_MASKED.
+ */
+ if (!was_enabled)
+ msm_ack_intr_status(pctrl, g);
+
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
}
/*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
+ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+ * only works if disable is not lazy since we only clear any bogus
+ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
*/
- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
return 0;
out:
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 333f99243c43..e31a5167c91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
* @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
* to be aware that their parent can't handle dual
* edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
+ unsigned int gpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
new file mode 100644
index 000000000000..b765bf667574
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -0,0 +1,1624 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const sc8180x_tiles[] = {
+ "south",
+ "east",
+ "west"
+};
+
+enum {
+ SOUTH,
+ EAST,
+ WEST
+};
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP_OFFSET(id, _tile, offset, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id + offset, \
+ .io_reg = REG_SIZE * id + 0x4 + offset, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8 + offset, \
+ .intr_status_reg = REG_SIZE * id + 0xc + offset,\
+ .intr_target_reg = REG_SIZE * id + 0x8 + offset,\
+ .tile = _tile, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ PINGROUP_OFFSET(id, _tile, 0x0, f1, f2, f3, f4, f5, f6, f7, f8, f9)
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = EAST, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = 0xb6000, \
+ .io_reg = 0xb6004, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = SOUTH, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc sc8180x_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "UFS_RESET"),
+ PINCTRL_PIN(191, "SDC2_CLK"),
+ PINCTRL_PIN(192, "SDC2_CMD"),
+ PINCTRL_PIN(193, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+
+static const unsigned int sdc2_clk_pins[] = { 190 };
+static const unsigned int sdc2_cmd_pins[] = { 191 };
+static const unsigned int sdc2_data_pins[] = { 192 };
+static const unsigned int ufs_reset_pins[] = { 193 };
+
+enum sc8180x_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_aoss_cti,
+ msm_mux_atest_char,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb0,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb3,
+ msm_mux_atest_usb4,
+ msm_mux_audio_ref,
+ msm_mux_btfm_slimbus,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cci_timer5,
+ msm_mux_cci_timer6,
+ msm_mux_cci_timer7,
+ msm_mux_cci_timer8,
+ msm_mux_cci_timer9,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi,
+ msm_mux_debug_hot,
+ msm_mux_dp_hot,
+ msm_mux_edp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_emac_phy,
+ msm_mux_emac_pps,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gcc_gp4,
+ msm_mux_gcc_gp5,
+ msm_mux_gpio,
+ msm_mux_gps,
+ msm_mux_grfc,
+ msm_mux_hs1_mi2s,
+ msm_mux_hs2_mi2s,
+ msm_mux_hs3_mi2s,
+ msm_mux_jitter_bist,
+ msm_mux_lpass_slimbus,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mdp_vsync4,
+ msm_mux_mdp_vsync5,
+ msm_mux_mss_lte,
+ msm_mux_nav_pps,
+ msm_mux_pa_indicator,
+ msm_mux_pci_e0,
+ msm_mux_pci_e1,
+ msm_mux_pci_e2,
+ msm_mux_pci_e3,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_reset,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink,
+ msm_mux_qspi0,
+ msm_mux_qspi0_clk,
+ msm_mux_qspi0_cs,
+ msm_mux_qspi1,
+ msm_mux_qspi1_clk,
+ msm_mux_qspi1_cs,
+ msm_mux_qua_mi2s,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup2,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_rgmii,
+ msm_mux_sd_write,
+ msm_mux_sdc4,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_sp_cmu,
+ msm_mux_spkr_i2s,
+ msm_mux_ter_mi2s,
+ msm_mux_tgu,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsif1,
+ msm_mux_tsif2,
+ msm_mux_uim1,
+ msm_mux_uim2,
+ msm_mux_uim_batt,
+ msm_mux_usb0_phy,
+ msm_mux_usb1_phy,
+ msm_mux_usb2phy_ac,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc,
+ msm_mux_wlan2_adc,
+ msm_mux_wmss_reset,
+ msm_mux__,
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio115",
+};
+
+static const char * const agera_pll_groups[] = {
+ "gpio37",
+};
+
+static const char * const aoss_cti_groups[] = {
+ "gpio113",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio133", "gpio134", "gpio135", "gpio140", "gpio142",
+};
+
+static const char * const atest_tsens2_groups[] = {
+ "gpio62",
+};
+
+static const char * const atest_tsens_groups[] = {
+ "gpio93",
+};
+
+static const char * const atest_usb0_groups[] = {
+ "gpio90", "gpio91", "gpio92", "gpio93", "gpio94",
+};
+
+static const char * const atest_usb1_groups[] = {
+ "gpio60", "gpio62", "gpio63", "gpio64", "gpio65",
+};
+
+static const char * const atest_usb2_groups[] = {
+ "gpio34", "gpio95", "gpio102", "gpio121", "gpio122",
+};
+
+static const char * const atest_usb3_groups[] = {
+ "gpio68", "gpio71", "gpio72", "gpio73", "gpio74",
+};
+
+static const char * const atest_usb4_groups[] = {
+ "gpio75", "gpio76", "gpio77", "gpio78", "gpio88",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio148",
+};
+
+static const char * const btfm_slimbus_groups[] = {
+ "gpio153", "gpio154",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio25", "gpio179", "gpio180",
+ "gpio181",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio176", "gpio185", "gpio186",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio31", "gpio32", "gpio33", "gpio34", "gpio39", "gpio40",
+ "gpio41", "gpio42",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio21",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio22",
+};
+
+static const char * const cci_timer2_groups[] = {
+ "gpio23",
+};
+
+static const char * const cci_timer3_groups[] = {
+ "gpio24",
+};
+
+static const char * const cci_timer4_groups[] = {
+ "gpio178",
+};
+
+static const char * const cci_timer5_groups[] = {
+ "gpio182",
+};
+
+static const char * const cci_timer6_groups[] = {
+ "gpio183",
+};
+
+static const char * const cci_timer7_groups[] = {
+ "gpio184",
+};
+
+static const char * const cci_timer8_groups[] = {
+ "gpio185",
+};
+
+static const char * const cci_timer9_groups[] = {
+ "gpio186",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio159",
+ "gpio160",
+ "gpio161",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio34",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio98", "gpio99", "gpio145", "gpio146",
+};
+
+static const char * const ddr_pxi_groups[] = {
+ "gpio60", "gpio62", "gpio63", "gpio64", "gpio65", "gpio68", "gpio71",
+ "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
+ "gpio88", "gpio90",
+};
+
+static const char * const debug_hot_groups[] = {
+ "gpio7",
+};
+
+static const char * const dp_hot_groups[] = {
+ "gpio189",
+};
+
+static const char * const edp_hot_groups[] = {
+ "gpio10",
+};
+
+static const char * const edp_lcd_groups[] = {
+ "gpio11",
+};
+
+static const char * const emac_phy_groups[] = {
+ "gpio124",
+};
+
+static const char * const emac_pps_groups[] = {
+ "gpio81",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio131", "gpio136",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio21", "gpio137",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio22", "gpio138",
+};
+
+static const char * const gcc_gp4_groups[] = {
+ "gpio139", "gpio182",
+};
+
+static const char * const gcc_gp5_groups[] = {
+ "gpio140", "gpio183",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20",
+ "gpio21", "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27",
+ "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34",
+ "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48",
+ "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55",
+ "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62",
+ "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69",
+ "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76",
+ "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio126", "gpio127",
+ "gpio128", "gpio129", "gpio130", "gpio131", "gpio132", "gpio133",
+ "gpio134", "gpio135", "gpio136", "gpio137", "gpio138", "gpio139",
+ "gpio140", "gpio141", "gpio142", "gpio143", "gpio144", "gpio145",
+ "gpio146", "gpio147", "gpio148", "gpio149", "gpio150", "gpio151",
+ "gpio152", "gpio153", "gpio154", "gpio155", "gpio156", "gpio157",
+ "gpio158", "gpio159", "gpio160", "gpio161", "gpio162", "gpio163",
+ "gpio164", "gpio165", "gpio166", "gpio167", "gpio168", "gpio169",
+ "gpio170", "gpio171", "gpio172", "gpio173", "gpio174", "gpio175",
+ "gpio176", "gpio177", "gpio177", "gpio178", "gpio179", "gpio180",
+ "gpio181", "gpio182", "gpio183", "gpio184", "gpio185", "gpio186",
+ "gpio186", "gpio187", "gpio187", "gpio188", "gpio188", "gpio189",
+};
+
+static const char * const gps_groups[] = {
+ "gpio60", "gpio76", "gpio77", "gpio81", "gpio82",
+};
+
+static const char * const grfc_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio71", "gpio72",
+ "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79",
+ "gpio80", "gpio81", "gpio82",
+};
+
+static const char * const hs1_mi2s_groups[] = {
+ "gpio155", "gpio156", "gpio157", "gpio158", "gpio159",
+};
+
+static const char * const hs2_mi2s_groups[] = {
+ "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+};
+
+static const char * const hs3_mi2s_groups[] = {
+ "gpio125", "gpio165", "gpio166", "gpio167", "gpio168",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio129",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio149", "gpio150", "gpio151", "gpio152",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio10",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync4_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync5_groups[] = {
+ "gpio89",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio60", "gpio82",
+};
+
+static const char * const mss_lte_groups[] = {
+ "gpio69", "gpio70",
+};
+
+static const char * const nav_pps_groups[] = {
+ "gpio60", "gpio60", "gpio76", "gpio76", "gpio77", "gpio77", "gpio81",
+ "gpio81", "gpio82", "gpio82",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio68",
+};
+
+static const char * const pci_e0_groups[] = {
+ "gpio35", "gpio36",
+};
+
+static const char * const pci_e1_groups[] = {
+ "gpio102", "gpio103",
+};
+
+static const char * const pci_e2_groups[] = {
+ "gpio175", "gpio176",
+};
+
+static const char * const pci_e3_groups[] = {
+ "gpio178", "gpio179",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio33", "gpio53", "gpio54",
+ "gpio102", "gpio120", "gpio121", "gpio122", "gpio123", "gpio125",
+ "gpio148", "gpio149", "gpio150", "gpio151", "gpio152", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio130",
+};
+
+static const char * const pll_bypassnl_groups[] = {
+ "gpio100",
+};
+
+static const char * const pll_reset_groups[] = {
+ "gpio101",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio143", "gpio144", "gpio146", "gpio147",
+};
+
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio145",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio163",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio49", "gpio50", "gpio81", "gpio82", "gpio89", "gpio90", "gpio141",
+ "gpio142",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25", "gpio26",
+ "gpio27", "gpio28", "gpio29", "gpio30", "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio92", "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio119", "gpio120", "gpio121", "gpio130", "gpio132",
+ "gpio133", "gpio134", "gpio135",
+};
+
+static const char * const qlink_groups[] = {
+ "gpio61", "gpio62",
+};
+
+static const char * const qspi0_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio93",
+};
+
+static const char * const qspi0_clk_groups[] = {
+ "gpio92",
+};
+
+static const char * const qspi0_cs_groups[] = {
+ "gpio88", "gpio94",
+};
+
+static const char * const qspi1_groups[] = {
+ "gpio56", "gpio57", "gpio161", "gpio162",
+};
+
+static const char * const qspi1_clk_groups[] = {
+ "gpio163",
+};
+
+static const char * const qspi1_cs_groups[] = {
+ "gpio55", "gpio164",
+};
+
+static const char * const qua_mi2s_groups[] = {
+ "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", "gpio141",
+ "gpio142",
+};
+
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup10_groups[] = {
+ "gpio9", "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const qup11_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio83", "gpio84", "gpio85", "gpio86",
+};
+
+static const char * const qup13_groups[] = {
+ "gpio43", "gpio44", "gpio45", "gpio46",
+};
+
+static const char * const qup14_groups[] = {
+ "gpio47", "gpio48", "gpio49", "gpio50",
+};
+
+static const char * const qup15_groups[] = {
+ "gpio27", "gpio28", "gpio29", "gpio30",
+};
+
+static const char * const qup16_groups[] = {
+ "gpio83", "gpio84", "gpio85", "gpio86",
+};
+
+static const char * const qup17_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58",
+};
+
+static const char * const qup18_groups[] = {
+ "gpio23", "gpio24", "gpio25", "gpio26",
+};
+
+static const char * const qup19_groups[] = {
+ "gpio181", "gpio182", "gpio183", "gpio184",
+};
+
+static const char * const qup1_groups[] = {
+ "gpio114", "gpio115", "gpio116", "gpio117",
+};
+
+static const char * const qup2_groups[] = {
+ "gpio126", "gpio127", "gpio128", "gpio129",
+};
+
+static const char * const qup3_groups[] = {
+ "gpio144", "gpio145", "gpio146", "gpio147",
+};
+
+static const char * const qup4_groups[] = {
+ "gpio51", "gpio52", "gpio53", "gpio54",
+};
+
+static const char * const qup5_groups[] = {
+ "gpio119", "gpio120", "gpio121", "gpio122",
+};
+
+static const char * const qup6_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup7_groups[] = {
+ "gpio98", "gpio99", "gpio100", "gpio101",
+};
+
+static const char * const qup8_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+
+static const char * const qup9_groups[] = {
+ "gpio39", "gpio40", "gpio41", "gpio42",
+};
+
+static const char * const qup_l4_groups[] = {
+ "gpio35", "gpio59", "gpio60", "gpio95",
+};
+
+static const char * const qup_l5_groups[] = {
+ "gpio7", "gpio33", "gpio36", "gpio96",
+};
+
+static const char * const qup_l6_groups[] = {
+ "gpio6", "gpio34", "gpio37", "gpio97",
+};
+
+static const char * const rgmii_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio59", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+ "gpio122",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio97",
+};
+
+static const char * const sdc4_groups[] = {
+ "gpio91", "gpio93", "gpio94", "gpio95",
+};
+
+static const char * const sdc4_clk_groups[] = {
+ "gpio92",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+ "gpio90",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130",
+};
+
+static const char * const sp_cmu_groups[] = {
+ "gpio162",
+};
+
+static const char * const spkr_i2s_groups[] = {
+ "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+};
+
+static const char * const ter_mi2s_groups[] = {
+ "gpio131", "gpio132", "gpio133", "gpio134", "gpio135",
+};
+
+static const char * const tgu_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio88", "gpio74", "gpio77", "gpio76",
+ "gpio75",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio150",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio150",
+};
+
+static const char * const tsif1_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91", "gpio97",
+};
+
+static const char * const tsif2_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+};
+
+static const char * const uim1_groups[] = {
+ "gpio109", "gpio110", "gpio111", "gpio112",
+};
+
+static const char * const uim2_groups[] = {
+ "gpio105", "gpio106", "gpio107", "gpio108",
+};
+
+static const char * const uim_batt_groups[] = {
+ "gpio113",
+};
+
+static const char * const usb0_phy_groups[] = {
+ "gpio38",
+};
+
+static const char * const usb1_phy_groups[] = {
+ "gpio58",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio47", "gpio48", "gpio113", "gpio123",
+};
+
+static const char * const vfr_1_groups[] = {
+ "gpio91",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio62",
+};
+
+static const char * const wlan1_adc_groups[] = {
+ "gpio64", "gpio63",
+};
+
+static const char * const wlan2_adc_groups[] = {
+ "gpio68", "gpio65",
+};
+
+static const char * const wmss_reset_groups[] = {
+ "gpio63",
+};
+
+static const struct msm_function sc8180x_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(aoss_cti),
+ FUNCTION(atest_char),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb0),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb3),
+ FUNCTION(atest_usb4),
+ FUNCTION(audio_ref),
+ FUNCTION(btfm_slimbus),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cci_timer5),
+ FUNCTION(cci_timer6),
+ FUNCTION(cci_timer7),
+ FUNCTION(cci_timer8),
+ FUNCTION(cci_timer9),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi),
+ FUNCTION(debug_hot),
+ FUNCTION(dp_hot),
+ FUNCTION(edp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(emac_phy),
+ FUNCTION(emac_pps),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_gp4),
+ FUNCTION(gcc_gp5),
+ FUNCTION(gpio),
+ FUNCTION(gps),
+ FUNCTION(grfc),
+ FUNCTION(hs1_mi2s),
+ FUNCTION(hs2_mi2s),
+ FUNCTION(hs3_mi2s),
+ FUNCTION(jitter_bist),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mdp_vsync4),
+ FUNCTION(mdp_vsync5),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_pps),
+ FUNCTION(pa_indicator),
+ FUNCTION(pci_e0),
+ FUNCTION(pci_e1),
+ FUNCTION(pci_e2),
+ FUNCTION(pci_e3),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_reset),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink),
+ FUNCTION(qspi0),
+ FUNCTION(qspi0_clk),
+ FUNCTION(qspi0_cs),
+ FUNCTION(qspi1),
+ FUNCTION(qspi1_clk),
+ FUNCTION(qspi1_cs),
+ FUNCTION(qua_mi2s),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup2),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(rgmii),
+ FUNCTION(sd_write),
+ FUNCTION(sdc4),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sp_cmu),
+ FUNCTION(spkr_i2s),
+ FUNCTION(ter_mi2s),
+ FUNCTION(tgu),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(tsif1),
+ FUNCTION(tsif2),
+ FUNCTION(uim1),
+ FUNCTION(uim2),
+ FUNCTION(uim_batt),
+ FUNCTION(usb0_phy),
+ FUNCTION(usb1_phy),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc),
+ FUNCTION(wlan2_adc),
+ FUNCTION(wmss_reset),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sc8180x_groups[] = {
+ [0] = PINGROUP(0, WEST, qup0, cci_i2c, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, WEST, qup0, cci_i2c, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, WEST, qup0, cci_i2c, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, WEST, qup0, cci_i2c, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, WEST, qup6, rgmii, _, phase_flag, _, _, _, _, _),
+ [5] = PINGROUP(5, WEST, qup6, rgmii, _, phase_flag, _, _, _, _, _),
+ [6] = PINGROUP(6, WEST, qup6, rgmii, qup_l6, _, phase_flag, _, _, _, _),
+ [7] = PINGROUP(7, WEST, qup6, debug_hot, rgmii, qup_l5, _, phase_flag, _, _, _),
+ [8] = PINGROUP(8, EAST, mdp_vsync, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, EAST, mdp_vsync, qup10, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, EAST, edp_hot, m_voc, mdp_vsync, qup10, _, _, _, _, _),
+ [11] = PINGROUP(11, EAST, edp_lcd, qup10, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, EAST, qup10, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, EAST, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, EAST, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, EAST, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, EAST, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, EAST, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, EAST, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, EAST, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, EAST, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, EAST, cci_timer0, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
+ [22] = PINGROUP(22, EAST, cci_timer1, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
+ [23] = PINGROUP(23, EAST, cci_timer2, qup18, qdss_gpio, _, _, _, _, _, _),
+ [24] = PINGROUP(24, EAST, cci_timer3, cci_async, qup18, qdss_gpio, _, _, _, _, _),
+ [25] = PINGROUP(25, EAST, cam_mclk, cci_async, qup18, qdss_gpio, _, _, _, _, _),
+ [26] = PINGROUP(26, EAST, cci_async, qup18, qdss_gpio, _, _, _, _, _, _),
+ [27] = PINGROUP(27, EAST, qup15, _, qdss_gpio, _, _, _, _, _, _),
+ [28] = PINGROUP(28, EAST, qup15, qdss_gpio, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, EAST, qup15, qdss_gpio, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, EAST, qup15, qdss_gpio, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, EAST, cci_i2c, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, EAST, cci_i2c, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, EAST, cci_i2c, qup_l5, _, phase_flag, _, _, _, _, _),
+ [34] = PINGROUP(34, EAST, cci_i2c, qup_l6, dbg_out, atest_usb2, _, _, _, _, _),
+ [35] = PINGROUP(35, SOUTH, pci_e0, qup_l4, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, SOUTH, pci_e0, qup_l5, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, SOUTH, qup_l6, agera_pll, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, SOUTH, usb0_phy, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, EAST, qup9, cci_i2c, qdss_gpio, _, _, _, _, _, _),
+ [40] = PINGROUP(40, EAST, qup9, cci_i2c, qdss_gpio, _, _, _, _, _, _),
+ [41] = PINGROUP(41, EAST, qup9, cci_i2c, qdss_gpio, _, _, _, _, _, _),
+ [42] = PINGROUP(42, EAST, qup9, cci_i2c, qdss_gpio, _, _, _, _, _, _),
+ [43] = PINGROUP(43, EAST, qup13, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, EAST, qup13, _, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, EAST, qup13, _, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, EAST, qup13, _, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, EAST, qup14, usb2phy_ac, _, _, _, _, _, _, _),
+ [48] = PINGROUP(48, EAST, qup14, usb2phy_ac, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, EAST, qup14, qdss_cti, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, EAST, qup14, qdss_cti, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, WEST, qup4, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, WEST, qup4, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, WEST, qup4, _, phase_flag, _, _, _, _, _, _),
+ [54] = PINGROUP(54, WEST, qup4, _, _, phase_flag, _, _, _, _, _),
+ [55] = PINGROUP(55, WEST, qup17, qspi1_cs, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, WEST, qup17, qspi1, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, WEST, qup17, qspi1, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, WEST, usb1_phy, qup17, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, WEST, rgmii, qup_l4, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, EAST, gps, nav_pps, nav_pps, qup_l4, mdp_vsync, atest_usb1, ddr_pxi, _, _),
+ [61] = PINGROUP(61, EAST, qlink, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, EAST, qlink, atest_tsens2, atest_usb1, ddr_pxi, vsense_trigger, _, _, _, _),
+ [63] = PINGROUP(63, EAST, wmss_reset, _, atest_usb1, ddr_pxi, wlan1_adc, _, _, _, _),
+ [64] = PINGROUP(64, EAST, grfc, _, atest_usb1, ddr_pxi, wlan1_adc, _, _, _, _),
+ [65] = PINGROUP(65, EAST, grfc, atest_usb1, ddr_pxi, wlan2_adc, _, _, _, _, _),
+ [66] = PINGROUP(66, EAST, grfc, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, EAST, grfc, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, EAST, grfc, pa_indicator, atest_usb3, ddr_pxi, wlan2_adc, _, _, _, _),
+ [69] = PINGROUP(69, EAST, mss_lte, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, EAST, mss_lte, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, EAST, _, grfc, atest_usb3, ddr_pxi, _, _, _, _, _),
+ [72] = PINGROUP(72, EAST, _, grfc, atest_usb3, ddr_pxi, _, _, _, _, _),
+ [73] = PINGROUP(73, EAST, _, grfc, atest_usb3, ddr_pxi, _, _, _, _, _),
+ [74] = PINGROUP(74, EAST, _, grfc, tgu, atest_usb3, ddr_pxi, _, _, _, _),
+ [75] = PINGROUP(75, EAST, _, grfc, tgu, atest_usb4, ddr_pxi, _, _, _, _),
+ [76] = PINGROUP(76, EAST, _, grfc, gps, nav_pps, nav_pps, tgu, atest_usb4, ddr_pxi, _),
+ [77] = PINGROUP(77, EAST, _, grfc, gps, nav_pps, nav_pps, tgu, atest_usb4, ddr_pxi, _),
+ [78] = PINGROUP(78, EAST, _, grfc, _, atest_usb4, ddr_pxi, _, _, _, _),
+ [79] = PINGROUP(79, EAST, _, grfc, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, EAST, _, grfc, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, EAST, _, grfc, gps, nav_pps, nav_pps, qdss_cti, _, emac_pps, _),
+ [82] = PINGROUP(82, EAST, _, grfc, gps, nav_pps, nav_pps, mdp_vsync, qdss_cti, _, _),
+ [83] = PINGROUP(83, EAST, qup12, qup16, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, EAST, qup12, qup16, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, EAST, qup12, qup16, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, EAST, qup12, qup16, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, SOUTH, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, EAST, tsif1, qup8, qspi0_cs, tgu, atest_usb4, ddr_pxi, _, _, _),
+ [89] = PINGROUP(89, EAST, tsif1, qup8, qspi0, mdp_vsync0, mdp_vsync1, mdp_vsync2, mdp_vsync3, mdp_vsync4, mdp_vsync5),
+ [90] = PINGROUP(90, EAST, tsif1, qup8, qspi0, sdc4_cmd, tgu, qdss_cti, atest_usb0, ddr_pxi, _),
+ [91] = PINGROUP(91, EAST, tsif1, qup8, qspi0, sdc4, vfr_1, tgu, atest_usb0, _, _),
+ [92] = PINGROUP(92, EAST, tsif2, qup11, qspi0_clk, sdc4_clk, qdss_gpio, atest_usb0, _, _, _),
+ [93] = PINGROUP(93, EAST, tsif2, qup11, qspi0, sdc4, atest_tsens, atest_usb0, _, _, _),
+ [94] = PINGROUP(94, EAST, tsif2, qup11, qspi0_cs, sdc4, _, atest_usb0, _, _, _),
+ [95] = PINGROUP(95, EAST, tsif2, qup11, sdc4, qup_l4, atest_usb2, _, _, _, _),
+ [96] = PINGROUP(96, WEST, tsif2, qup_l5, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, WEST, sd_write, tsif1, qup_l6, _, _, _, _, _, _),
+ [98] = PINGROUP(98, WEST, qup7, ddr_bist, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, WEST, qup7, ddr_bist, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, WEST, qup7, pll_bypassnl, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, WEST, qup7, pll_reset, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, SOUTH, pci_e1, _, phase_flag, atest_usb2, _, _, _, _, _),
+ [103] = PINGROUP(103, SOUTH, pci_e1, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, SOUTH, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, WEST, uim2, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, WEST, uim2, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, WEST, uim2, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, WEST, uim2, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, WEST, uim1, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, WEST, uim1, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, WEST, uim1, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, WEST, uim1, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, WEST, uim_batt, usb2phy_ac, aoss_cti, _, _, _, _, _, _),
+ [114] = PINGROUP(114, WEST, qup1, rgmii, _, qdss_gpio, _, _, _, _, _),
+ [115] = PINGROUP(115, WEST, qup1, rgmii, adsp_ext, _, qdss_gpio, _, _, _, _),
+ [116] = PINGROUP(116, WEST, qup1, rgmii, _, qdss_gpio, _, _, _, _, _),
+ [117] = PINGROUP(117, WEST, qup1, rgmii, _, qdss_gpio, _, _, _, _, _),
+ [118] = PINGROUP(118, WEST, rgmii, _, qdss_gpio, _, _, _, _, _, _),
+ [119] = PINGROUP(119, WEST, qup5, rgmii, _, qdss_gpio, _, _, _, _, _),
+ [120] = PINGROUP(120, WEST, qup5, rgmii, _, phase_flag, qdss_gpio, _, _, _, _),
+ [121] = PINGROUP(121, WEST, qup5, rgmii, _, phase_flag, qdss_gpio, atest_usb2, _, _, _),
+ [122] = PINGROUP(122, WEST, qup5, rgmii, _, phase_flag, atest_usb2, _, _, _, _),
+ [123] = PINGROUP(123, SOUTH, usb2phy_ac, _, phase_flag, _, _, _, _, _, _),
+ [124] = PINGROUP(124, SOUTH, emac_phy, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, WEST, hs3_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [126] = PINGROUP(126, WEST, sec_mi2s, qup2, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, WEST, sec_mi2s, qup2, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, WEST, sec_mi2s, qup2, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, WEST, sec_mi2s, qup2, jitter_bist, _, _, _, _, _, _),
+ [130] = PINGROUP(130, WEST, sec_mi2s, pll_bist, _, qdss_gpio, _, _, _, _, _),
+ [131] = PINGROUP(131, WEST, ter_mi2s, gcc_gp1, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, WEST, ter_mi2s, _, qdss_gpio, _, _, _, _, _, _),
+ [133] = PINGROUP(133, WEST, ter_mi2s, _, qdss_gpio, atest_char, _, _, _, _, _),
+ [134] = PINGROUP(134, WEST, ter_mi2s, _, qdss_gpio, atest_char, _, _, _, _, _),
+ [135] = PINGROUP(135, WEST, ter_mi2s, _, qdss_gpio, atest_char, _, _, _, _, _),
+ [136] = PINGROUP(136, WEST, qua_mi2s, gcc_gp1, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, WEST, qua_mi2s, gcc_gp2, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, WEST, qua_mi2s, gcc_gp3, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, WEST, qua_mi2s, gcc_gp4, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, WEST, qua_mi2s, gcc_gp5, _, atest_char, _, _, _, _, _),
+ [141] = PINGROUP(141, WEST, qua_mi2s, qdss_cti, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, WEST, qua_mi2s, _, _, qdss_cti, atest_char, _, _, _, _),
+ [143] = PINGROUP(143, WEST, pri_mi2s, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, WEST, pri_mi2s, qup3, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, WEST, pri_mi2s_ws, qup3, ddr_bist, _, _, _, _, _, _),
+ [146] = PINGROUP(146, WEST, pri_mi2s, qup3, ddr_bist, _, _, _, _, _, _),
+ [147] = PINGROUP(147, WEST, pri_mi2s, qup3, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, WEST, spkr_i2s, audio_ref, _, phase_flag, _, _, _, _, _),
+ [149] = PINGROUP(149, WEST, lpass_slimbus, spkr_i2s, _, phase_flag, _, _, _, _, _),
+ [150] = PINGROUP(150, WEST, lpass_slimbus, spkr_i2s, _, phase_flag, tsense_pwm1, tsense_pwm2, _, _, _),
+ [151] = PINGROUP(151, WEST, lpass_slimbus, spkr_i2s, _, phase_flag, _, _, _, _, _),
+ [152] = PINGROUP(152, WEST, lpass_slimbus, spkr_i2s, _, phase_flag, _, _, _, _, _),
+ [153] = PINGROUP(153, WEST, btfm_slimbus, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, WEST, btfm_slimbus, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, WEST, hs1_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [156] = PINGROUP(156, WEST, hs1_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [157] = PINGROUP(157, WEST, hs1_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [158] = PINGROUP(158, WEST, hs1_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [159] = PINGROUP(159, WEST, hs1_mi2s, cri_trng, _, phase_flag, _, _, _, _, _),
+ [160] = PINGROUP(160, WEST, hs2_mi2s, cri_trng, _, phase_flag, _, _, _, _, _),
+ [161] = PINGROUP(161, WEST, hs2_mi2s, qspi1, cri_trng, _, phase_flag, _, _, _, _),
+ [162] = PINGROUP(162, WEST, hs2_mi2s, qspi1, sp_cmu, _, phase_flag, _, _, _, _),
+ [163] = PINGROUP(163, WEST, hs2_mi2s, qspi1_clk, prng_rosc, _, phase_flag, _, _, _, _),
+ [164] = PINGROUP(164, WEST, hs2_mi2s, qspi1_cs, _, phase_flag, _, _, _, _, _),
+ [165] = PINGROUP(165, WEST, hs3_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [166] = PINGROUP(166, WEST, hs3_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [167] = PINGROUP(167, WEST, hs3_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [168] = PINGROUP(168, WEST, hs3_mi2s, _, phase_flag, _, _, _, _, _, _),
+ [169] = PINGROUP(169, SOUTH, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, SOUTH, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, SOUTH, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, SOUTH, _, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, SOUTH, _, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, SOUTH, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, SOUTH, pci_e2, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, SOUTH, pci_e2, cci_async, _, _, _, _, _, _, _),
+ [177] = PINGROUP_OFFSET(177, SOUTH, 0x1e000, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP_OFFSET(178, SOUTH, 0x1e000, pci_e3, cci_timer4, _, _, _, _, _, _, _),
+ [179] = PINGROUP_OFFSET(179, SOUTH, 0x1e000, pci_e3, cam_mclk, _, _, _, _, _, _, _),
+ [180] = PINGROUP_OFFSET(180, SOUTH, 0x1e000, cam_mclk, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP_OFFSET(181, SOUTH, 0x1e000, qup19, cam_mclk, _, _, _, _, _, _, _),
+ [182] = PINGROUP_OFFSET(182, SOUTH, 0x1e000, qup19, cci_timer5, gcc_gp4, _, _, _, _, _, _),
+ [183] = PINGROUP_OFFSET(183, SOUTH, 0x1e000, qup19, cci_timer6, gcc_gp5, _, _, _, _, _, _),
+ [184] = PINGROUP_OFFSET(184, SOUTH, 0x1e000, qup19, cci_timer7, _, _, _, _, _, _, _),
+ [185] = PINGROUP_OFFSET(185, SOUTH, 0x1e000, cci_timer8, cci_async, _, _, _, _, _, _, _),
+ [186] = PINGROUP_OFFSET(186, SOUTH, 0x1e000, cci_timer9, cci_async, _, _, _, _, _, _, _),
+ [187] = PINGROUP_OFFSET(187, SOUTH, 0x1e000, _, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP_OFFSET(188, SOUTH, 0x1e000, _, _, _, _, _, _, _, _, _),
+ [189] = PINGROUP_OFFSET(189, SOUTH, 0x1e000, dp_hot, _, _, _, _, _, _, _, _),
+ [190] = UFS_RESET(ufs_reset),
+ [191] = SDC_QDSD_PINGROUP(sdc2_clk, 0x4b2000, 14, 6),
+ [192] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x4b2000, 11, 3),
+ [193] = SDC_QDSD_PINGROUP(sdc2_data, 0x4b2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
+ { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
+ { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
+ { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+ { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
+ { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
+ { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
+ { 77, 36 }, { 81, 64 }, { 83, 65 }, { 86, 67 }, { 87, 84 }, { 88, 117 },
+ { 88, 124 }, { 90, 69 }, { 91, 70 }, { 93, 75 }, { 95, 72 }, { 97, 74 },
+ { 101, 76 }, { 103, 77 }, { 104, 78 }, { 114, 82 }, { 117, 85 },
+ { 118, 101 }, { 119, 87 }, { 120, 88 }, { 121, 89 }, { 122, 90 },
+ { 123, 91 }, { 124, 92 }, { 125, 93 }, { 129, 94 }, { 132, 105 },
+ { 133, 35 }, { 134, 36 }, { 136, 97 }, { 142, 103 }, { 144, 115 },
+ { 144, 122 }, { 147, 106 }, { 150, 107 }, { 152, 108 }, { 153, 109 },
+ { 177, 111 }, { 180, 112 }, { 184, 113 }, { 189, 114 }
+};
+
+static struct msm_pinctrl_soc_data sc8180x_pinctrl = {
+ .tiles = sc8180x_tiles,
+ .ntiles = ARRAY_SIZE(sc8180x_tiles),
+ .pins = sc8180x_pins,
+ .npins = ARRAY_SIZE(sc8180x_pins),
+ .functions = sc8180x_functions,
+ .nfunctions = ARRAY_SIZE(sc8180x_functions),
+ .groups = sc8180x_groups,
+ .ngroups = ARRAY_SIZE(sc8180x_groups),
+ .ngpios = 191,
+ .wakeirq_map = sc8180x_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc8180x_pdc_map),
+};
+
+static int sc8180x_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sc8180x_pinctrl);
+}
+
+static const struct of_device_id sc8180x_pinctrl_of_match[] = {
+ { .compatible = "qcom,sc8180x-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sc8180x_pinctrl_of_match);
+
+static struct platform_driver sc8180x_pinctrl_driver = {
+ .driver = {
+ .name = "sc8180x-pinctrl",
+ .of_match_table = sc8180x_pinctrl_of_match,
+ },
+ .probe = sc8180x_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sc8180x_pinctrl_init(void)
+{
+ return platform_driver_register(&sc8180x_pinctrl_driver);
+}
+arch_initcall(sc8180x_pinctrl_init);
+
+static void __exit sc8180x_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sc8180x_pinctrl_driver);
+}
+module_exit(sc8180x_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI SC8180x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 2834d2c1338c..c51793f6546f 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1310,7 +1310,6 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.ngpios = 151,
.wakeirq_map = sdm845_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
-
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
new file mode 100644
index 000000000000..a406ed0ec7d3
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -0,0 +1,1649 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8350_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "UFS_RESET"),
+ PINCTRL_PIN(204, "SDC2_CLK"),
+ PINCTRL_PIN(205, "SDC2_CMD"),
+ PINCTRL_PIN(206, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+
+static const unsigned int ufs_reset_pins[] = { 203 };
+static const unsigned int sdc2_clk_pins[] = { 204 };
+static const unsigned int sdc2_cmd_pins[] = { 205 };
+static const unsigned int sdc2_data_pins[] = { 206 };
+
+enum sm8350_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_dp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_lpass_slimbus,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s1_data0,
+ msm_mux_mi2s1_data1,
+ msm_mux_mi2s1_sck,
+ msm_mux_mi2s1_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_mss_grfc0,
+ msm_mux_mss_grfc1,
+ msm_mux_mss_grfc10,
+ msm_mux_mss_grfc11,
+ msm_mux_mss_grfc12,
+ msm_mux_mss_grfc2,
+ msm_mux_mss_grfc3,
+ msm_mux_mss_grfc4,
+ msm_mux_mss_grfc5,
+ msm_mux_mss_grfc6,
+ msm_mux_mss_grfc7,
+ msm_mux_mss_grfc8,
+ msm_mux_mss_grfc9,
+ msm_mux_nav_gpio,
+ msm_mux_pa_indicator,
+ msm_mux_pcie0_clkreqn,
+ msm_mux_pcie1_clkreqn,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_enable,
+ msm_mux_qlink2_request,
+ msm_mux_qlink2_wmss,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup2,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+ "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+ "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+ "gpio201", "gpio202",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio115", "gpio117",
+};
+
+static const char * const atest_usb_groups[] = {
+ "gpio55", "gpio80", "gpio81", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio158", "gpio159", "gpio161",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio124",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio106", "gpio118", "gpio119",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio107", "gpio108", "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113", "gpio114",
+};
+
+static const char * const cci_timer_groups[] = {
+ "gpio115", "gpio116", "gpio117", "gpio118", "gpio119",
+};
+
+static const char * const cmu_rng_groups[] = {
+ "gpio174", "gpio175", "gpio176", "gpio177",
+};
+
+static const char * const coex_uart1_groups[] = {
+ "gpio151", "gpio152",
+};
+
+static const char * const coex_uart2_groups[] = {
+ "gpio153", "gpio154",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio186",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio183",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio184",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio14",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio36", "gpio37", "gpio40", "gpio41",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio51", "gpio52",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+ "gpio48", "gpio49",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+ "gpio45", "gpio47",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+ "gpio43", "gpio44",
+};
+
+static const char * const dp_hot_groups[] = {
+ "gpio87",
+};
+
+static const char * const dp_lcd_groups[] = {
+ "gpio83",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio115", "gpio129",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio116", "gpio130",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio117", "gpio131",
+};
+
+static const char * const ibi_i3c_groups[] = {
+ "gpio36", "gpio37", "gpio56", "gpio57", "gpio60", "gpio61",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio80",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio129", "gpio130",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio15", "gpio26", "gpio82", "gpio83", "gpio84",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+ "gpio87",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+ "gpio87",
+};
+
+static const char * const mi2s0_data0_groups[] = {
+ "gpio126",
+};
+
+static const char * const mi2s0_data1_groups[] = {
+ "gpio127",
+};
+
+static const char * const mi2s0_sck_groups[] = {
+ "gpio125",
+};
+
+static const char * const mi2s0_ws_groups[] = {
+ "gpio128",
+};
+
+static const char * const mi2s1_data0_groups[] = {
+ "gpio130",
+};
+
+static const char * const mi2s1_data1_groups[] = {
+ "gpio131",
+};
+
+static const char * const mi2s1_sck_groups[] = {
+ "gpio129",
+};
+
+static const char * const mi2s1_ws_groups[] = {
+ "gpio132",
+};
+
+static const char * const mi2s2_data0_groups[] = {
+ "gpio121",
+};
+
+static const char * const mi2s2_data1_groups[] = {
+ "gpio124",
+};
+
+static const char * const mi2s2_sck_groups[] = {
+ "gpio120",
+};
+
+static const char * const mi2s2_ws_groups[] = {
+ "gpio122",
+};
+
+static const char * const mss_grfc0_groups[] = {
+ "gpio141", "gpio158",
+};
+
+static const char * const mss_grfc1_groups[] = {
+ "gpio142",
+};
+
+static const char * const mss_grfc10_groups[] = {
+ "gpio153",
+};
+
+static const char * const mss_grfc11_groups[] = {
+ "gpio154",
+};
+
+static const char * const mss_grfc12_groups[] = {
+ "gpio157",
+};
+
+static const char * const mss_grfc2_groups[] = {
+ "gpio143",
+};
+
+static const char * const mss_grfc3_groups[] = {
+ "gpio144",
+};
+
+static const char * const mss_grfc4_groups[] = {
+ "gpio145",
+};
+
+static const char * const mss_grfc5_groups[] = {
+ "gpio146",
+};
+
+static const char * const mss_grfc6_groups[] = {
+ "gpio147",
+};
+
+static const char * const mss_grfc7_groups[] = {
+ "gpio148",
+};
+
+static const char * const mss_grfc8_groups[] = {
+ "gpio149",
+};
+
+static const char * const mss_grfc9_groups[] = {
+ "gpio150",
+};
+
+static const char * const nav_gpio_groups[] = {
+ "gpio155", "gpio156", "gpio157",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio157",
+};
+
+static const char * const pcie0_clkreqn_groups[] = {
+ "gpio95",
+};
+
+static const char * const pcie1_clkreqn_groups[] = {
+ "gpio98",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio12", "gpio13", "gpio16", "gpio17", "gpio28", "gpio29", "gpio30",
+ "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio72", "gpio73",
+ "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio81",
+};
+
+static const char * const pll_clk_groups[] = {
+ "gpio81",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio123",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio185",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio14", "gpio27", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194", "gpio195", "gpio196",
+ "gpio197", "gpio198", "gpio199", "gpio200",
+};
+
+static const char * const qlink0_enable_groups[] = {
+ "gpio160",
+};
+
+static const char * const qlink0_request_groups[] = {
+ "gpio159",
+};
+
+static const char * const qlink0_wmss_groups[] = {
+ "gpio161",
+};
+
+static const char * const qlink1_enable_groups[] = {
+ "gpio163",
+};
+
+static const char * const qlink1_request_groups[] = {
+ "gpio162",
+};
+
+static const char * const qlink1_wmss_groups[] = {
+ "gpio164",
+};
+
+static const char * const qlink2_enable_groups[] = {
+ "gpio166",
+};
+
+static const char * const qlink2_request_groups[] = {
+ "gpio165",
+};
+
+static const char * const qlink2_wmss_groups[] = {
+ "gpio167",
+};
+
+static const char * const qspi0_groups[] = {
+ "gpio44",
+};
+
+static const char * const qspi1_groups[] = {
+ "gpio45",
+};
+
+static const char * const qspi2_groups[] = {
+ "gpio48",
+};
+
+static const char * const qspi3_groups[] = {
+ "gpio49",
+};
+
+static const char * const qspi_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const qspi_cs_groups[] = {
+ "gpio47", "gpio51",
+};
+
+static const char * const qup0_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup1_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const qup10_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup11_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup13_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup14_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const qup15_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char * const qup16_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const qup17_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+};
+
+static const char * const qup18_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const qup19_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const qup2_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const qup3_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup4_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup5_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup6_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char * const qup7_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup8_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup9_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char * const qup_l4_groups[] = {
+ "gpio2", "gpio6", "gpio58", "gpio63",
+};
+
+static const char * const qup_l5_groups[] = {
+ "gpio3", "gpio7", "gpio59", "gpio66",
+};
+
+static const char * const qup_l6_groups[] = {
+ "gpio10", "gpio42", "gpio62", "gpio67",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio93",
+};
+
+static const char * const sdc40_groups[] = {
+ "gpio44",
+};
+
+static const char * const sdc41_groups[] = {
+ "gpio45",
+};
+
+static const char * const sdc42_groups[] = {
+ "gpio48",
+};
+
+static const char * const sdc43_groups[] = {
+ "gpio49",
+};
+
+static const char * const sdc4_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+ "gpio51",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio124",
+};
+
+static const char * const tb_trig_groups[] = {
+ "gpio64", "gpio136",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio99",
+};
+
+static const char * const tgu_ch1_groups[] = {
+ "gpio100",
+};
+
+static const char * const tgu_ch2_groups[] = {
+ "gpio101",
+};
+
+static const char * const tgu_ch3_groups[] = {
+ "gpio102",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio88",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio88",
+};
+
+static const char * const uim0_clk_groups[] = {
+ "gpio138",
+};
+
+static const char * const uim0_data_groups[] = {
+ "gpio137",
+};
+
+static const char * const uim0_present_groups[] = {
+ "gpio140",
+};
+
+static const char * const uim0_reset_groups[] = {
+ "gpio139",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio134",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio133",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio136",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio135",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio39", "gpio80",
+};
+
+static const char * const usb_phy_groups[] = {
+ "gpio81",
+};
+
+static const char * const vfr_0_groups[] = {
+ "gpio84",
+};
+
+static const char * const vfr_1_groups[] = {
+ "gpio90",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio78",
+};
+
+static const struct msm_function sm8350_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer),
+ FUNCTION(cmu_rng),
+ FUNCTION(coex_uart1),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(dp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s1_data0),
+ FUNCTION(mi2s1_data1),
+ FUNCTION(mi2s1_sck),
+ FUNCTION(mi2s1_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(mss_grfc0),
+ FUNCTION(mss_grfc1),
+ FUNCTION(mss_grfc10),
+ FUNCTION(mss_grfc11),
+ FUNCTION(mss_grfc12),
+ FUNCTION(mss_grfc2),
+ FUNCTION(mss_grfc3),
+ FUNCTION(mss_grfc4),
+ FUNCTION(mss_grfc5),
+ FUNCTION(mss_grfc6),
+ FUNCTION(mss_grfc7),
+ FUNCTION(mss_grfc8),
+ FUNCTION(mss_grfc9),
+ FUNCTION(nav_gpio),
+ FUNCTION(pa_indicator),
+ FUNCTION(pcie0_clkreqn),
+ FUNCTION(pcie1_clkreqn),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_clk),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_enable),
+ FUNCTION(qlink2_request),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup2),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(sd_write),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim0_clk),
+ FUNCTION(uim0_data),
+ FUNCTION(uim0_present),
+ FUNCTION(uim0_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8350_groups[] = {
+ [0] = PINGROUP(0, qup13, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup13, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup13, qup_l4, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup13, qup_l5, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0, qup_l4, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0, qup_l5, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup1, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup1, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup1, qup_l6, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup1, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup2, phase_flag, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup2, phase_flag, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup2, qdss_cti, dbg_out, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup2, mdp_vsync, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup3, phase_flag, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup3, phase_flag, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup3, _, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup3, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup4, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup4, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup4, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup4, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup5, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup5, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup5, mdp_vsync, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup5, qdss_cti, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup6, phase_flag, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup6, phase_flag, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup6, phase_flag, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup6, phase_flag, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup7, phase_flag, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup7, phase_flag, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup7, phase_flag, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup7, phase_flag, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup8, ibi_i3c, ddr_bist, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup8, ibi_i3c, ddr_bist, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup8, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup8, usb2phy_ac, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup9, ddr_bist, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup9, ddr_bist, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, qup9, qup_l6, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup9, ddr_pxi3, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup10, qspi0, sdc40, ddr_pxi3, _, _, _, _, _),
+ [45] = PINGROUP(45, qup10, qspi1, sdc41, ddr_pxi2, _, _, _, _, _),
+ [46] = PINGROUP(46, qup10, _, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup10, qspi_cs, ddr_pxi2, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup11, qspi2, sdc42, ddr_pxi1, _, _, _, _, _),
+ [49] = PINGROUP(49, qup11, qspi3, sdc43, ddr_pxi1, _, _, _, _, _),
+ [50] = PINGROUP(50, qup11, qspi_clk, sdc4_clk, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup11, qspi_cs, sdc4_cmd, ddr_pxi0, _, _, _, _, _),
+ [52] = PINGROUP(52, qup12, ddr_pxi0, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, qup12, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup12, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup12, atest_usb, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup14, ibi_i3c, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup14, ibi_i3c, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup14, qup_l4, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup14, qup_l5, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qup15, qup_l6, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup15, qup_l4, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup16, tb_trig, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup16, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, qup16, qup_l5, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup16, qup_l6, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qup18, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup18, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup18, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup18, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup17, phase_flag, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup17, phase_flag, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup17, phase_flag, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup17, phase_flag, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup19, phase_flag, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup19, phase_flag, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup19, phase_flag, _, vsense_trigger, _, _, _, _, _),
+ [79] = PINGROUP(79, qup19, phase_flag, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, usb2phy_ac, jitter_bist, atest_usb, _, _, _, _, _, _),
+ [81] = PINGROUP(81, usb_phy, pll_bist, pll_clk, atest_usb, _, _, _, _, _),
+ [82] = PINGROUP(82, mdp_vsync, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, mdp_vsync, dp_lcd, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, mdp_vsync, vfr_0, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, atest_char, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, mdp_vsync0, mdp_vsync1, atest_char, _, _, _, _, _, _),
+ [87] = PINGROUP(87, dp_hot, mdp_vsync2, mdp_vsync3, qdss_cti, atest_char, _, _, _, _),
+ [88] = PINGROUP(88, qdss_cti, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qdss_cti, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, vfr_1, qdss_cti, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qdss_cti, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, qdss_cti, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, sd_write, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, pcie0_clkreqn, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pcie1_clkreqn, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, tgu_ch0, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_mclk, tgu_ch1, qdss_gpio, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cam_mclk, tgu_ch2, qdss_gpio, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cam_mclk, tgu_ch3, qdss_gpio, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cam_mclk, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [104] = PINGROUP(104, cam_mclk, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [105] = PINGROUP(105, cam_mclk, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [106] = PINGROUP(106, cci_async, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [107] = PINGROUP(107, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [108] = PINGROUP(108, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [114] = PINGROUP(114, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [115] = PINGROUP(115, cci_timer, gcc_gp1, qdss_gpio, atest_char, _, _, _, _, _),
+ [116] = PINGROUP(116, cci_timer, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
+ [117] = PINGROUP(117, cci_timer, gcc_gp3, qdss_gpio, atest_char, _, _, _, _, _),
+ [118] = PINGROUP(118, cci_timer, cci_async, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, cci_timer, cci_async, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, mi2s2_sck, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, mi2s2_data0, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, mi2s2_ws, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, pri_mi2s, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, sec_mi2s, audio_ref, mi2s2_data1, _, _, _, _, _, _),
+ [125] = PINGROUP(125, mi2s0_sck, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, mi2s0_ws, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, lpass_slimbus, mi2s1_sck, gcc_gp1, _, _, _, _, _, _),
+ [130] = PINGROUP(130, lpass_slimbus, mi2s1_data0, gcc_gp2, _, _, _, _, _, _),
+ [131] = PINGROUP(131, mi2s1_data1, gcc_gp3, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, mi2s1_ws, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, uim1_data, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, uim1_clk, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, uim1_reset, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, uim1_present, tb_trig, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, uim0_data, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, uim0_clk, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, uim0_reset, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, uim0_present, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, mss_grfc0, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, mss_grfc1, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, mss_grfc2, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, mss_grfc3, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, mss_grfc4, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, mss_grfc5, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, mss_grfc6, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, mss_grfc7, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, mss_grfc8, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, mss_grfc9, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, coex_uart2, mss_grfc10, atest_usb, _, _, _, _, _, _),
+ [154] = PINGROUP(154, coex_uart2, mss_grfc11, atest_usb, _, _, _, _, _, _),
+ [155] = PINGROUP(155, nav_gpio, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, nav_gpio, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, mss_grfc12, pa_indicator, nav_gpio, _, _, _, _, _, _),
+ [158] = PINGROUP(158, mss_grfc0, atest_usb, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, qlink0_request, atest_usb, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, qlink0_enable, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qlink0_wmss, atest_usb, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, qlink1_request, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, qlink1_enable, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, qlink2_request, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, qlink2_enable, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, qlink2_wmss, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, cmu_rng, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, cmu_rng, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, cmu_rng, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, cmu_rng, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, _, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, _, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, cri_trng0, qdss_gpio, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, cri_trng1, qdss_gpio, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, prng_rosc, qdss_gpio, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, cri_trng, qdss_gpio, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, qdss_gpio, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, qdss_gpio, _, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, qdss_gpio, _, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, qdss_gpio, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, qdss_gpio, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, qdss_gpio, _, _, _, _, _, _, _, _),
+ [193] = PINGROUP(193, qdss_gpio, _, _, _, _, _, _, _, _),
+ [194] = PINGROUP(194, qdss_gpio, _, _, _, _, _, _, _, _),
+ [195] = PINGROUP(195, qdss_gpio, _, _, _, _, _, _, _, _),
+ [196] = PINGROUP(196, qdss_gpio, _, _, _, _, _, _, _, _),
+ [197] = PINGROUP(197, qdss_gpio, _, _, _, _, _, _, _, _),
+ [198] = PINGROUP(198, qdss_gpio, _, _, _, _, _, _, _, _),
+ [199] = PINGROUP(199, qdss_gpio, _, _, _, _, _, _, _, _),
+ [200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
+ [203] = UFS_RESET(ufs_reset, 0x1d8000),
+ [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6),
+ [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3),
+ [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sm8350_tlmm = {
+ .pins = sm8350_pins,
+ .npins = ARRAY_SIZE(sm8350_pins),
+ .functions = sm8350_functions,
+ .nfunctions = ARRAY_SIZE(sm8350_functions),
+ .groups = sm8350_groups,
+ .ngroups = ARRAY_SIZE(sm8350_groups),
+ .ngpios = 204,
+};
+
+static int sm8350_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8350_tlmm);
+}
+
+static const struct of_device_id sm8350_tlmm_of_match[] = {
+ { .compatible = "qcom,sm8350-tlmm", },
+ { },
+};
+
+static struct platform_driver sm8350_tlmm_driver = {
+ .driver = {
+ .name = "sm8350-tlmm",
+ .of_match_table = sm8350_tlmm_of_match,
+ },
+ .probe = sm8350_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8350_tlmm_init(void)
+{
+ return platform_driver_register(&sm8350_tlmm_driver);
+}
+arch_initcall(sm8350_tlmm_init);
+
+static void __exit sm8350_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm8350_tlmm_driver);
+}
+module_exit(sm8350_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8350 TLMM driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm8350_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 48602dba4967..3c213f799feb 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -912,6 +912,7 @@ static int pmic_mpp_remove(struct platform_device *pdev)
}
static const struct of_device_id pmic_mpp_of_match[] = {
+ { .compatible = "qcom,pm8019-mpp" }, /* 6 MPP's */
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 681d8dcf37e3..92e7f2602847 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -617,7 +617,6 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
}
break;
}
-
}
static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 42b1c6cecb57..1f4bca854add 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -193,7 +193,6 @@ static struct rt2880_pmx_func gpio_func = {
static int rt2880_pinmux_index(struct rt2880_priv *p)
{
- struct rt2880_pmx_func **f;
struct rt2880_pmx_group *mux = p->groups;
int i, j, c = 0;
@@ -207,7 +206,7 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
p->group_names = devm_kcalloc(p->dev, p->group_count,
sizeof(char *), GFP_KERNEL);
if (!p->group_names)
- return -1;
+ return -ENOMEM;
for (i = 0; i < p->group_count; i++) {
p->group_names[i] = p->groups[i].name;
@@ -218,31 +217,31 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
p->func_count++;
/* allocate our function and group mapping index buffers */
- f = p->func = devm_kcalloc(p->dev,
- p->func_count,
- sizeof(*p->func),
- GFP_KERNEL);
+ p->func = devm_kcalloc(p->dev, p->func_count,
+ sizeof(*p->func), GFP_KERNEL);
gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
GFP_KERNEL);
- if (!f || !gpio_func.groups)
- return -1;
+ if (!p->func || !gpio_func.groups)
+ return -ENOMEM;
/* add a backpointer to the function so it knows its group */
gpio_func.group_count = p->group_count;
for (i = 0; i < gpio_func.group_count; i++)
gpio_func.groups[i] = i;
- f[c] = &gpio_func;
+ p->func[c] = &gpio_func;
c++;
/* add remaining functions */
for (i = 0; i < p->group_count; i++) {
for (j = 0; j < p->groups[i].func_count; j++) {
- f[c] = &p->groups[i].func[j];
- f[c]->groups = devm_kzalloc(p->dev, sizeof(int),
+ p->func[c] = &p->groups[i].func[j];
+ p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int),
GFP_KERNEL);
- f[c]->groups[0] = i;
- f[c]->group_count = 1;
+ if (!p->func[c]->groups)
+ return -ENOMEM;
+ p->func[c]->groups[0] = i;
+ p->func[c]->group_count = 1;
c++;
}
}
@@ -280,10 +279,8 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p)
/* the pads needed to tell pinctrl about our pins */
p->pads = devm_kcalloc(p->dev, p->max_pins,
sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
- if (!p->pads || !p->gpio) {
- dev_err(p->dev, "Failed to allocate gpio data\n");
+ if (!p->pads || !p->gpio)
return -ENOMEM;
- }
memset(p->gpio, 1, sizeof(u8) * p->max_pins);
for (i = 0; i < p->func_count; i++) {
@@ -318,6 +315,7 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
{
struct rt2880_priv *p;
struct pinctrl_dev *dev;
+ int err;
if (!rt2880_pinmux_data)
return -ENOTSUPP;
@@ -333,19 +331,20 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
/* init the device */
- if (rt2880_pinmux_index(p)) {
+ err = rt2880_pinmux_index(p);
+ if (err) {
dev_err(&pdev->dev, "failed to load index\n");
- return -EINVAL;
+ return err;
}
- if (rt2880_pinmux_pins(p)) {
+
+ err = rt2880_pinmux_pins(p);
+ if (err) {
dev_err(&pdev->dev, "failed to load pins\n");
- return -EINVAL;
+ return err;
}
dev = pinctrl_register(p->desc, &pdev->dev, p);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- return 0;
+ return PTR_ERR_OR_ZERO(dev);
}
static const struct of_device_id rt2880_pinmux_match[] = {
@@ -362,7 +361,7 @@ static struct platform_driver rt2880_pinmux_driver = {
},
};
-int __init rt2880_pinmux_init(void)
+static int __init rt2880_pinmux_init(void)
{
return platform_driver_register(&rt2880_pinmux_driver);
}
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index e941b8440dbc..4b84a744ae87 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -36,6 +36,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
select PINCTRL_PFC_R8A77990 if ARCH_R8A77990
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
+ select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -142,6 +143,10 @@ config PINCTRL_PFC_R8A77970
bool "pin control support for R-Car V3M" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779A0
+ bool "pin control support for R-Car V3U" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 1f6d7dd019d8..353563228dc2 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 2cc457279345..2bfd3006f6fd 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -175,13 +175,25 @@ u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg)
return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32);
}
-void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data)
+static void sh_pfc_unlock_reg(struct sh_pfc *pfc, u32 reg, u32 data)
{
- if (pfc->info->unlock_reg)
- sh_pfc_write_raw_reg(
- sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
- ~data);
+ u32 unlock;
+
+ if (!pfc->info->unlock_reg)
+ return;
+
+ if (pfc->info->unlock_reg >= 0x80000000UL)
+ unlock = pfc->info->unlock_reg;
+ else
+ /* unlock_reg is a mask */
+ unlock = reg & ~pfc->info->unlock_reg;
+ sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, unlock), 32, ~data);
+}
+
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data)
+{
+ sh_pfc_unlock_reg(pfc, reg, data);
sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32, data);
}
@@ -227,11 +239,7 @@ static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
data &= mask;
data |= value;
- if (pfc->info->unlock_reg)
- sh_pfc_write_raw_reg(
- sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
- ~data);
-
+ sh_pfc_unlock_reg(pfc, crp->reg, data);
sh_pfc_write_raw_reg(mapped_reg, crp->reg_width, data);
}
@@ -638,6 +646,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a77995_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779A0
+ {
+ .compatible = "renesas,pfc-r8a779a0",
+ .data = &r8a779a0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
@@ -1052,6 +1066,10 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
{
unsigned int i;
+ if (!IS_ENABLED(CONFIG_SUPERH) &&
+ !of_find_matching_node(NULL, pdrv->driver.of_match_table))
+ return;
+
sh_pfc_regs = kcalloc(SH_PFC_MAX_REGS, sizeof(*sh_pfc_regs),
GFP_KERNEL);
if (!sh_pfc_regs)
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 32b66b9999b8..32fe8caca70a 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -1668,7 +1668,6 @@ static const unsigned int avb_mii_pins[] = {
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
PIN_AVB_TXCREFCLK,
-
};
static const unsigned int avb_mii_mux[] = {
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index cf14420794c7..bdd605e41303 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
@@ -1727,7 +1727,6 @@ static const unsigned int avb_mii_pins[] = {
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
PIN_AVB_TXCREFCLK,
-
};
static const unsigned int avb_mii_mux[] = {
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index 38d963561b5f..96b5b1509bb7 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -1731,7 +1731,6 @@ static const unsigned int avb_mii_pins[] = {
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
PIN_AVB_TXCREFCLK,
-
};
static const unsigned int avb_mii_mux[] = {
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index 92f231baff7d..f15e29383d9b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -1736,7 +1736,6 @@ static const unsigned int avb_mii_pins[] = {
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
PIN_AVB_TXCREFCLK,
-
};
static const unsigned int avb_mii_mux[] = {
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
new file mode 100644
index 000000000000..2250ccd0470a
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -0,0 +1,4460 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779A0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7795.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_15(0, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(0, 15, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 16, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 17, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 18, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 19, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 20, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 21, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 22, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 23, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 24, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 25, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 26, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(0, 27, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_31(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_2(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 5, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 6, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 7, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 9, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 10, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 11, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 12, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 13, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 14, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 15, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(2, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(4, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(5, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(5, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(5, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(5, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(6, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(6, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(7, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(7, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(7, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(7, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(8, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(8, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(8, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(9, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_25_33),\
+ PORT_GP_CFG_1(9, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(9, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(9, 20, fn, sfx, CFG_FLAGS)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(EXTALR, "EXTALR", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(DCUTRST_N_LPDRST_N, "DCUTRST#_LPDRST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(DCUTCK_LPDCLK, "DCUTCK_LPDCLK", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(DCUTMS, "DCUTMS", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(DCUTDI_LPDI, "DCUTDI_LPDI", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_27 FM(MMC_D7)
+#define GPSR0_26 FM(MMC_D6)
+#define GPSR0_25 FM(MMC_D5)
+#define GPSR0_24 FM(MMC_D4)
+#define GPSR0_23 FM(MMC_SD_CLK)
+#define GPSR0_22 FM(MMC_SD_D3)
+#define GPSR0_21 FM(MMC_SD_D2)
+#define GPSR0_20 FM(MMC_SD_D1)
+#define GPSR0_19 FM(MMC_SD_D0)
+#define GPSR0_18 FM(MMC_SD_CMD)
+#define GPSR0_17 FM(MMC_DS)
+#define GPSR0_16 FM(SD_CD)
+#define GPSR0_15 FM(SD_WP)
+#define GPSR0_14 FM(RPC_INT_N)
+#define GPSR0_13 FM(RPC_WP_N)
+#define GPSR0_12 FM(RPC_RESET_N)
+#define GPSR0_11 FM(QSPI1_SSL)
+#define GPSR0_10 FM(QSPI1_IO3)
+#define GPSR0_9 FM(QSPI1_IO2)
+#define GPSR0_8 FM(QSPI1_MISO_IO1)
+#define GPSR0_7 FM(QSPI1_MOSI_IO0)
+#define GPSR0_6 FM(QSPI1_SPCLK)
+#define GPSR0_5 FM(QSPI0_SSL)
+#define GPSR0_4 FM(QSPI0_IO3)
+#define GPSR0_3 FM(QSPI0_IO2)
+#define GPSR0_2 FM(QSPI0_MISO_IO1)
+#define GPSR0_1 FM(QSPI0_MOSI_IO0)
+#define GPSR0_0 FM(QSPI0_SPCLK)
+
+/* GPSR1 */
+#define GPSR1_30 F_(GP1_30, IP3SR1_27_24)
+#define GPSR1_29 F_(GP1_29, IP3SR1_23_20)
+#define GPSR1_28 F_(GP1_28, IP3SR1_19_16)
+#define GPSR1_27 F_(IRQ3, IP3SR1_15_12)
+#define GPSR1_26 F_(IRQ2, IP3SR1_11_8)
+#define GPSR1_25 F_(IRQ1, IP3SR1_7_4)
+#define GPSR1_24 F_(IRQ0, IP3SR1_3_0)
+#define GPSR1_23 F_(MSIOF2_SS2, IP2SR1_31_28)
+#define GPSR1_22 F_(MSIOF2_SS1, IP2SR1_27_24)
+#define GPSR1_21 F_(MSIOF2_SYNC, IP2SR1_23_20)
+#define GPSR1_20 F_(MSIOF2_SCK, IP2SR1_19_16)
+#define GPSR1_19 F_(MSIOF2_TXD, IP2SR1_15_12)
+#define GPSR1_18 F_(MSIOF2_RXD, IP2SR1_11_8)
+#define GPSR1_17 F_(MSIOF1_SS2, IP2SR1_7_4)
+#define GPSR1_16 F_(MSIOF1_SS1, IP2SR1_3_0)
+#define GPSR1_15 F_(MSIOF1_SYNC, IP1SR1_31_28)
+#define GPSR1_14 F_(MSIOF1_SCK, IP1SR1_27_24)
+#define GPSR1_13 F_(MSIOF1_TXD, IP1SR1_23_20)
+#define GPSR1_12 F_(MSIOF1_RXD, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_SS2, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SS1, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_SYNC, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SCK, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_TXD, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_RXD, IP0SR1_27_24)
+#define GPSR1_5 F_(HTX0, IP0SR1_23_20)
+#define GPSR1_4 F_(HCTS0_N, IP0SR1_19_16)
+#define GPSR1_3 F_(HRTS0_N, IP0SR1_15_12)
+#define GPSR1_2 F_(HSCK0, IP0SR1_11_8)
+#define GPSR1_1 F_(HRX0, IP0SR1_7_4)
+#define GPSR1_0 F_(SCIF_CLK, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_24 FM(TCLK2_A)
+#define GPSR2_23 F_(TCLK1_A, IP2SR2_31_28)
+#define GPSR2_22 F_(TPU0TO1, IP2SR2_27_24)
+#define GPSR2_21 F_(TPU0TO0, IP2SR2_23_20)
+#define GPSR2_20 F_(CLK_EXTFXR, IP2SR2_19_16)
+#define GPSR2_19 F_(RXDB_EXTFXR, IP2SR2_15_12)
+#define GPSR2_18 F_(FXR_TXDB, IP2SR2_11_8)
+#define GPSR2_17 F_(RXDA_EXTFXR_A, IP2SR2_7_4)
+#define GPSR2_16 F_(FXR_TXDA_A, IP2SR2_3_0)
+#define GPSR2_15 F_(GP2_15, IP1SR2_31_28)
+#define GPSR2_14 F_(GP2_14, IP1SR2_27_24)
+#define GPSR2_13 F_(GP2_13, IP1SR2_23_20)
+#define GPSR2_12 F_(GP2_12, IP1SR2_19_16)
+#define GPSR2_11 F_(GP2_11, IP1SR2_15_12)
+#define GPSR2_10 F_(GP2_10, IP1SR2_11_8)
+#define GPSR2_9 F_(GP2_09, IP1SR2_7_4)
+#define GPSR2_8 F_(GP2_08, IP1SR2_3_0)
+#define GPSR2_7 F_(GP2_07, IP0SR2_31_28)
+#define GPSR2_6 F_(GP2_06, IP0SR2_27_24)
+#define GPSR2_5 F_(GP2_05, IP0SR2_23_20)
+#define GPSR2_4 F_(GP2_04, IP0SR2_19_16)
+#define GPSR2_3 F_(GP2_03, IP0SR2_15_12)
+#define GPSR2_2 F_(GP2_02, IP0SR2_11_8)
+#define GPSR2_1 F_(IPC_CLKOUT, IP0SR2_7_4)
+#define GPSR2_0 F_(IPC_CLKIN, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_16 FM(CANFD7_RX)
+#define GPSR3_15 FM(CANFD7_TX)
+#define GPSR3_14 FM(CANFD6_RX)
+#define GPSR3_13 F_(CANFD6_TX, IP1SR3_23_20)
+#define GPSR3_12 F_(CANFD5_RX, IP1SR3_19_16)
+#define GPSR3_11 F_(CANFD5_TX, IP1SR3_15_12)
+#define GPSR3_10 F_(CANFD4_RX, IP1SR3_11_8)
+#define GPSR3_9 F_(CANFD4_TX, IP1SR3_7_4)
+#define GPSR3_8 F_(CANFD3_RX, IP1SR3_3_0)
+#define GPSR3_7 F_(CANFD3_TX, IP0SR3_31_28)
+#define GPSR3_6 F_(CANFD2_RX, IP0SR3_27_24)
+#define GPSR3_5 F_(CANFD2_TX, IP0SR3_23_20)
+#define GPSR3_4 FM(CANFD1_RX)
+#define GPSR3_3 FM(CANFD1_TX)
+#define GPSR3_2 F_(CANFD0_RX, IP0SR3_11_8)
+#define GPSR3_1 F_(CANFD0_TX, IP0SR3_7_4)
+#define GPSR3_0 FM(CAN_CLK)
+
+/* GPSR4 */
+#define GPSR4_26 FM(AVS1)
+#define GPSR4_25 FM(AVS0)
+#define GPSR4_24 FM(PCIE3_CLKREQ_N)
+#define GPSR4_23 FM(PCIE2_CLKREQ_N)
+#define GPSR4_22 FM(PCIE1_CLKREQ_N)
+#define GPSR4_21 FM(PCIE0_CLKREQ_N)
+#define GPSR4_20 F_(AVB0_AVTP_PPS, IP2SR4_19_16)
+#define GPSR4_19 F_(AVB0_AVTP_CAPTURE, IP2SR4_15_12)
+#define GPSR4_18 F_(AVB0_AVTP_MATCH, IP2SR4_11_8)
+#define GPSR4_17 F_(AVB0_LINK, IP2SR4_7_4)
+#define GPSR4_16 FM(AVB0_PHY_INT)
+#define GPSR4_15 F_(AVB0_MAGIC, IP1SR4_31_28)
+#define GPSR4_14 F_(AVB0_MDC, IP1SR4_27_24)
+#define GPSR4_13 F_(AVB0_MDIO, IP1SR4_23_20)
+#define GPSR4_12 F_(AVB0_TXCREFCLK, IP1SR4_19_16)
+#define GPSR4_11 F_(AVB0_TD3, IP1SR4_15_12)
+#define GPSR4_10 F_(AVB0_TD2, IP1SR4_11_8)
+#define GPSR4_9 F_(AVB0_TD1, IP1SR4_7_4)
+#define GPSR4_8 F_(AVB0_TD0, IP1SR4_3_0)
+#define GPSR4_7 F_(AVB0_TXC, IP0SR4_31_28)
+#define GPSR4_6 F_(AVB0_TX_CTL, IP0SR4_27_24)
+#define GPSR4_5 F_(AVB0_RD3, IP0SR4_23_20)
+#define GPSR4_4 F_(AVB0_RD2, IP0SR4_19_16)
+#define GPSR4_3 F_(AVB0_RD1, IP0SR4_15_12)
+#define GPSR4_2 F_(AVB0_RD0, IP0SR4_11_8)
+#define GPSR4_1 F_(AVB0_RXC, IP0SR4_7_4)
+#define GPSR4_0 F_(AVB0_RX_CTL, IP0SR4_3_0)
+
+/* GPSR5 */
+#define GPSR5_20 F_(AVB1_AVTP_PPS, IP2SR5_19_16)
+#define GPSR5_19 F_(AVB1_AVTP_CAPTURE, IP2SR5_15_12)
+#define GPSR5_18 F_(AVB1_AVTP_MATCH, IP2SR5_11_8)
+#define GPSR5_17 F_(AVB1_LINK, IP2SR5_7_4)
+#define GPSR5_16 FM(AVB1_PHY_INT)
+#define GPSR5_15 F_(AVB1_MAGIC, IP1SR5_31_28)
+#define GPSR5_14 F_(AVB1_MDC, IP1SR5_27_24)
+#define GPSR5_13 F_(AVB1_MDIO, IP1SR5_23_20)
+#define GPSR5_12 F_(AVB1_TXCREFCLK, IP1SR5_19_16)
+#define GPSR5_11 F_(AVB1_TD3, IP1SR5_15_12)
+#define GPSR5_10 F_(AVB1_TD2, IP1SR5_11_8)
+#define GPSR5_9 F_(AVB1_TD1, IP1SR5_7_4)
+#define GPSR5_8 F_(AVB1_TD0, IP1SR5_3_0)
+#define GPSR5_7 F_(AVB1_TXC, IP0SR5_31_28)
+#define GPSR5_6 F_(AVB1_TX_CTL, IP0SR5_27_24)
+#define GPSR5_5 F_(AVB1_RD3, IP0SR5_23_20)
+#define GPSR5_4 F_(AVB1_RD2, IP0SR5_19_16)
+#define GPSR5_3 F_(AVB1_RD1, IP0SR5_15_12)
+#define GPSR5_2 F_(AVB1_RD0, IP0SR5_11_8)
+#define GPSR5_1 F_(AVB1_RXC, IP0SR5_7_4)
+#define GPSR5_0 F_(AVB1_RX_CTL, IP0SR5_3_0)
+
+/* GPSR6 */
+#define GPSR6_20 FM(AVB2_AVTP_PPS)
+#define GPSR6_19 FM(AVB2_AVTP_CAPTURE)
+#define GPSR6_18 FM(AVB2_AVTP_MATCH)
+#define GPSR6_17 FM(AVB2_LINK)
+#define GPSR6_16 FM(AVB2_PHY_INT)
+#define GPSR6_15 FM(AVB2_MAGIC)
+#define GPSR6_14 FM(AVB2_MDC)
+#define GPSR6_13 FM(AVB2_MDIO)
+#define GPSR6_12 FM(AVB2_TXCREFCLK)
+#define GPSR6_11 FM(AVB2_TD3)
+#define GPSR6_10 FM(AVB2_TD2)
+#define GPSR6_9 FM(AVB2_TD1)
+#define GPSR6_8 FM(AVB2_TD0)
+#define GPSR6_7 FM(AVB2_TXC)
+#define GPSR6_6 FM(AVB2_TX_CTL)
+#define GPSR6_5 FM(AVB2_RD3)
+#define GPSR6_4 FM(AVB2_RD2)
+#define GPSR6_3 FM(AVB2_RD1)
+#define GPSR6_2 FM(AVB2_RD0)
+#define GPSR6_1 FM(AVB2_RXC)
+#define GPSR6_0 FM(AVB2_RX_CTL)
+
+/* GPSR7 */
+#define GPSR7_20 FM(AVB3_AVTP_PPS)
+#define GPSR7_19 FM(AVB3_AVTP_CAPTURE)
+#define GPSR7_18 FM(AVB3_AVTP_MATCH)
+#define GPSR7_17 FM(AVB3_LINK)
+#define GPSR7_16 FM(AVB3_PHY_INT)
+#define GPSR7_15 FM(AVB3_MAGIC)
+#define GPSR7_14 FM(AVB3_MDC)
+#define GPSR7_13 FM(AVB3_MDIO)
+#define GPSR7_12 FM(AVB3_TXCREFCLK)
+#define GPSR7_11 FM(AVB3_TD3)
+#define GPSR7_10 FM(AVB3_TD2)
+#define GPSR7_9 FM(AVB3_TD1)
+#define GPSR7_8 FM(AVB3_TD0)
+#define GPSR7_7 FM(AVB3_TXC)
+#define GPSR7_6 FM(AVB3_TX_CTL)
+#define GPSR7_5 FM(AVB3_RD3)
+#define GPSR7_4 FM(AVB3_RD2)
+#define GPSR7_3 FM(AVB3_RD1)
+#define GPSR7_2 FM(AVB3_RD0)
+#define GPSR7_1 FM(AVB3_RXC)
+#define GPSR7_0 FM(AVB3_RX_CTL)
+
+/* GPSR8 */
+#define GPSR8_20 FM(AVB4_AVTP_PPS)
+#define GPSR8_19 FM(AVB4_AVTP_CAPTURE)
+#define GPSR8_18 FM(AVB4_AVTP_MATCH)
+#define GPSR8_17 FM(AVB4_LINK)
+#define GPSR8_16 FM(AVB4_PHY_INT)
+#define GPSR8_15 FM(AVB4_MAGIC)
+#define GPSR8_14 FM(AVB4_MDC)
+#define GPSR8_13 FM(AVB4_MDIO)
+#define GPSR8_12 FM(AVB4_TXCREFCLK)
+#define GPSR8_11 FM(AVB4_TD3)
+#define GPSR8_10 FM(AVB4_TD2)
+#define GPSR8_9 FM(AVB4_TD1)
+#define GPSR8_8 FM(AVB4_TD0)
+#define GPSR8_7 FM(AVB4_TXC)
+#define GPSR8_6 FM(AVB4_TX_CTL)
+#define GPSR8_5 FM(AVB4_RD3)
+#define GPSR8_4 FM(AVB4_RD2)
+#define GPSR8_3 FM(AVB4_RD1)
+#define GPSR8_2 FM(AVB4_RD0)
+#define GPSR8_1 FM(AVB4_RXC)
+#define GPSR8_0 FM(AVB4_RX_CTL)
+
+/* GPSR9 */
+#define GPSR9_20 FM(AVB5_AVTP_PPS)
+#define GPSR9_19 FM(AVB5_AVTP_CAPTURE)
+#define GPSR9_18 FM(AVB5_AVTP_MATCH)
+#define GPSR9_17 FM(AVB5_LINK)
+#define GPSR9_16 FM(AVB5_PHY_INT)
+#define GPSR9_15 FM(AVB5_MAGIC)
+#define GPSR9_14 FM(AVB5_MDC)
+#define GPSR9_13 FM(AVB5_MDIO)
+#define GPSR9_12 FM(AVB5_TXCREFCLK)
+#define GPSR9_11 FM(AVB5_TD3)
+#define GPSR9_10 FM(AVB5_TD2)
+#define GPSR9_9 FM(AVB5_TD1)
+#define GPSR9_8 FM(AVB5_TD0)
+#define GPSR9_7 FM(AVB5_TXC)
+#define GPSR9_6 FM(AVB5_TX_CTL)
+#define GPSR9_5 FM(AVB5_RD3)
+#define GPSR9_4 FM(AVB5_RD2)
+#define GPSR9_3 FM(AVB5_RD1)
+#define GPSR9_2 FM(AVB5_RD0)
+#define GPSR9_1 FM(AVB5_RXC)
+#define GPSR9_0 FM(AVB5_RX_CTL)
+
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP0SR1_3_0 FM(SCIF_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(A0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) FM(A1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(HSCK0) FM(SCK0) F_(0, 0) F_(0, 0) F_(0, 0) FM(A2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(HRTS0_N) FM(RTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) FM(A3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(HCTS0_N) FM(CTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) FM(A4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) FM(A5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR2) FM(A6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_TXD) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR3) FM(A7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP1SR1_3_0 FM(MSIOF0_SCK) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR4) FM(A8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR5) FM(A9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SS1) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR6) FM(A10) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_SS2) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR7) FM(A11) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DG2) FM(A12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(MSIOF1_TXD) FM(HRX3) FM(SCK3) F_(0, 0) FM(DU_DG3) FM(A13) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(MSIOF1_SCK) FM(HSCK3) FM(CTS3_N) F_(0, 0) FM(DU_DG4) FM(A14) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(MSIOF1_SYNC) FM(HRTS3_N) FM(RTS3_N) F_(0, 0) FM(DU_DG5) FM(A15) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP2SR1_3_0 FM(MSIOF1_SS1) FM(HCTS3_N) FM(RX3) F_(0, 0) FM(DU_DG6) FM(A16) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(MSIOF1_SS2) FM(HTX3) FM(TX3) F_(0, 0) FM(DU_DG7) FM(A17) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) FM(DU_DB2) FM(A18) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) FM(DU_DB3) FM(A19) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) FM(DU_DB4) FM(A20) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1_A) F_(0, 0) FM(DU_DB5) FM(A21) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1_A) F_(0, 0) FM(DU_DB6) FM(A22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 FM(MSIOF2_SS2) FM(TCLK1_B) F_(0, 0) F_(0, 0) FM(DU_DB7) FM(A23) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP3SR1_3_0 FM(IRQ0) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DOTCLKOUT) FM(A24) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(IRQ1) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_HSYNC) FM(A25) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(IRQ2) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_VSYNC) FM(CS1_N_A26) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(IRQ3) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_ODDF_DISP_CDE) FM(CS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(GP1_28) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_23_20 FM(GP1_29) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_27_24 FM(GP1_30) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP0SR2_3_0 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) F_(0, 0) F_(0, 0) FM(DU_DOTCLKIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(GP2_02) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(GP2_03) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(GP2_04) F_(0, 0) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) FM(D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(GP2_05) FM(HSCK2) FM(MSIOF4_TXD) FM(SCK4) F_(0, 0) FM(D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(GP2_06) FM(HCTS2_N) FM(MSIOF4_SCK) FM(CTS4_N) F_(0, 0) FM(D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(GP2_07) FM(HRTS2_N) FM(MSIOF4_SYNC) FM(RTS4_N) F_(0, 0) FM(D8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP1SR2_3_0 FM(GP2_08) FM(HRX2) FM(MSIOF4_SS1) FM(RX4) F_(0, 0) FM(D9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(GP2_09) FM(HTX2) FM(MSIOF4_SS2) FM(TX4) F_(0, 0) FM(D10) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(GP2_10) FM(TCLK2_B) FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) FM(D11) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(GP2_11) FM(TCLK3) FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) FM(D12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(GP2_12) FM(TCLK4) FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) FM(D13) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(GP2_13) F_(0, 0) FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) FM(D14) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(GP2_14) FM(IRQ4) FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) FM(D15) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(GP2_15) FM(IRQ5) FM(MSIOF5_SS2) FM(CPG_CPCKOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP2SR2_3_0 FM(FXR_TXDA_A) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(RXDA_EXTFXR_A) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) FM(BS_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(FXR_TXDB) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) FM(RD_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(RXDB_EXTFXR) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) FM(WE0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_19_16 FM(CLK_EXTFXR) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) FM(WE1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_23_20 FM(TPU0TO0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) FM(RD_WR_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_27_24 FM(TPU0TO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CLKOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_31_28 FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(EX_WAIT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP0SR3_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(CANFD0_TX) FM(FXR_TXDA_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(CANFD0_RX) FM(RXDA_EXTFXR_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(CANFD2_TX) FM(TPU0TO2) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP1SR3_3_0 FM(CANFD3_RX) F_(0, 0) FM(PWM3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(CANFD4_TX) F_(0, 0) FM(PWM4) FM(FXR_CLKOUT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(CANFD4_RX) F_(0, 0) F_(0, 0) FM(FXR_CLKOUT2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(CANFD5_TX) F_(0, 0) F_(0, 0) FM(FXR_TXENA_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(CANFD5_RX) F_(0, 0) F_(0, 0) FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(CANFD6_TX) F_(0, 0) F_(0, 0) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP0SR4_3_0 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_7_4 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_15_12 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_23_20 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_27_24 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP1SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP1SR4_3_0 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_7_4 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_11_8 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_19_16 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_23_20 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_27_24 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_31_28 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP2SR4_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_7_4 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_15_12 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_19_16 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP0SR5_3_0 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_7_4 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_11_8 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_15_12 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_19_16 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_23_20 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_27_24 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_31_28 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP1SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP1SR5_3_0 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_7_4 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_11_8 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_15_12 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_23_20 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_27_24 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_31_28 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
+#define IP2SR5_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_7_4 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_11_8 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_19_16 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ \
+ GPSR1_30 \
+ GPSR1_29 \
+ GPSR1_28 \
+GPSR0_27 GPSR1_27 \
+GPSR0_26 GPSR1_26 GPSR4_26 \
+GPSR0_25 GPSR1_25 GPSR4_25 \
+GPSR0_24 GPSR1_24 GPSR2_24 GPSR4_24 \
+GPSR0_23 GPSR1_23 GPSR2_23 GPSR4_23 \
+GPSR0_22 GPSR1_22 GPSR2_22 GPSR4_22 \
+GPSR0_21 GPSR1_21 GPSR2_21 GPSR4_21 \
+GPSR0_20 GPSR1_20 GPSR2_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 GPSR8_20 GPSR9_20 \
+GPSR0_19 GPSR1_19 GPSR2_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 GPSR8_19 GPSR9_19 \
+GPSR0_18 GPSR1_18 GPSR2_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 GPSR8_18 GPSR9_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 GPSR8_17 GPSR9_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 GPSR8_16 GPSR9_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 GPSR8_15 GPSR9_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 GPSR8_14 GPSR9_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 GPSR9_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 GPSR9_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 GPSR9_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 GPSR9_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 GPSR9_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 GPSR9_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 GPSR9_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 GPSR9_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 GPSR9_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 GPSR9_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 GPSR9_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 GPSR9_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 GPSR9_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0 GPSR9_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 FM(IP3SR1_23_20) IP3SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 FM(IP3SR1_27_24) IP3SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 FM(IP3SR1_31_28) IP3SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 FM(IP2SR2_19_16) IP2SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 FM(IP2SR2_23_20) IP2SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 FM(IP2SR2_27_24) IP2SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 FM(IP2SR2_31_28) IP2SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 \
+\
+FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP2SR4_3_0) IP2SR4_3_0 \
+FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 FM(IP2SR4_7_4) IP2SR4_7_4 \
+FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 FM(IP2SR4_11_8) IP2SR4_11_8 \
+FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 FM(IP2SR4_15_12) IP2SR4_15_12 \
+FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 FM(IP2SR4_19_16) IP2SR4_19_16 \
+FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \
+FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 FM(IP2SR4_27_24) IP2SR4_27_24 \
+FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \
+\
+FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \
+FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \
+FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \
+FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \
+FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \
+FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 FM(IP2SR5_23_20) IP2SR5_23_20 \
+FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 FM(IP2SR5_27_24) IP2SR5_27_24 \
+FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 FM(IP2SR5_31_28) IP2SR5_31_28
+
+/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
+#define MOD_SEL2_14_15 FM(SEL_I2C6_0) F_(0, 0) F_(0, 0) FM(SEL_I2C6_3)
+#define MOD_SEL2_12_13 FM(SEL_I2C5_0) F_(0, 0) F_(0, 0) FM(SEL_I2C5_3)
+#define MOD_SEL2_10_11 FM(SEL_I2C4_0) F_(0, 0) F_(0, 0) FM(SEL_I2C4_3)
+#define MOD_SEL2_8_9 FM(SEL_I2C3_0) F_(0, 0) F_(0, 0) FM(SEL_I2C3_3)
+#define MOD_SEL2_6_7 FM(SEL_I2C2_0) F_(0, 0) F_(0, 0) FM(SEL_I2C2_3)
+#define MOD_SEL2_4_5 FM(SEL_I2C1_0) F_(0, 0) F_(0, 0) FM(SEL_I2C1_3)
+#define MOD_SEL2_2_3 FM(SEL_I2C0_0) F_(0, 0) F_(0, 0) FM(SEL_I2C0_3)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL2_14_15 \
+MOD_SEL2_12_13 \
+MOD_SEL2_10_11 \
+MOD_SEL2_8_9 \
+MOD_SEL2_6_7 \
+MOD_SEL2_4_5 \
+MOD_SEL2_2_3
+
+#define PINMUX_PHYS \
+ FM(SCL0) FM(SDA0) FM(SCL1) FM(SDA1) FM(SCL2) FM(SDA2) FM(SCL3) FM(SDA3) \
+ FM(SCL4) FM(SDA4) FM(SCL5) FM(SDA5) FM(SCL6) FM(SDA6)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_PHYS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(MMC_D7),
+ PINMUX_SINGLE(MMC_D6),
+ PINMUX_SINGLE(MMC_D5),
+ PINMUX_SINGLE(MMC_D4),
+ PINMUX_SINGLE(MMC_SD_CLK),
+ PINMUX_SINGLE(MMC_SD_D3),
+ PINMUX_SINGLE(MMC_SD_D2),
+ PINMUX_SINGLE(MMC_SD_D1),
+ PINMUX_SINGLE(MMC_SD_D0),
+ PINMUX_SINGLE(MMC_SD_CMD),
+ PINMUX_SINGLE(MMC_DS),
+
+ PINMUX_SINGLE(SD_CD),
+ PINMUX_SINGLE(SD_WP),
+
+ PINMUX_SINGLE(RPC_INT_N),
+ PINMUX_SINGLE(RPC_WP_N),
+ PINMUX_SINGLE(RPC_RESET_N),
+
+ PINMUX_SINGLE(QSPI1_SSL),
+ PINMUX_SINGLE(QSPI1_IO3),
+ PINMUX_SINGLE(QSPI1_IO2),
+ PINMUX_SINGLE(QSPI1_MISO_IO1),
+ PINMUX_SINGLE(QSPI1_MOSI_IO0),
+ PINMUX_SINGLE(QSPI1_SPCLK),
+ PINMUX_SINGLE(QSPI0_SSL),
+ PINMUX_SINGLE(QSPI0_IO3),
+ PINMUX_SINGLE(QSPI0_IO2),
+ PINMUX_SINGLE(QSPI0_MISO_IO1),
+ PINMUX_SINGLE(QSPI0_MOSI_IO0),
+ PINMUX_SINGLE(QSPI0_SPCLK),
+
+ PINMUX_SINGLE(TCLK2_A),
+
+ PINMUX_SINGLE(CANFD7_RX),
+ PINMUX_SINGLE(CANFD7_TX),
+ PINMUX_SINGLE(CANFD6_RX),
+ PINMUX_SINGLE(CANFD1_RX),
+ PINMUX_SINGLE(CANFD1_TX),
+ PINMUX_SINGLE(CAN_CLK),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS0),
+
+ PINMUX_SINGLE(PCIE3_CLKREQ_N),
+ PINMUX_SINGLE(PCIE2_CLKREQ_N),
+ PINMUX_SINGLE(PCIE1_CLKREQ_N),
+ PINMUX_SINGLE(PCIE0_CLKREQ_N),
+
+ PINMUX_SINGLE(AVB0_PHY_INT),
+ PINMUX_SINGLE(AVB0_MAGIC),
+ PINMUX_SINGLE(AVB0_MDC),
+ PINMUX_SINGLE(AVB0_MDIO),
+ PINMUX_SINGLE(AVB0_TXCREFCLK),
+
+ PINMUX_SINGLE(AVB1_PHY_INT),
+ PINMUX_SINGLE(AVB1_MAGIC),
+ PINMUX_SINGLE(AVB1_MDC),
+ PINMUX_SINGLE(AVB1_MDIO),
+ PINMUX_SINGLE(AVB1_TXCREFCLK),
+
+ PINMUX_SINGLE(AVB2_AVTP_PPS),
+ PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+ PINMUX_SINGLE(AVB2_AVTP_MATCH),
+ PINMUX_SINGLE(AVB2_LINK),
+ PINMUX_SINGLE(AVB2_PHY_INT),
+ PINMUX_SINGLE(AVB2_MAGIC),
+ PINMUX_SINGLE(AVB2_MDC),
+ PINMUX_SINGLE(AVB2_MDIO),
+ PINMUX_SINGLE(AVB2_TXCREFCLK),
+ PINMUX_SINGLE(AVB2_TD3),
+ PINMUX_SINGLE(AVB2_TD2),
+ PINMUX_SINGLE(AVB2_TD1),
+ PINMUX_SINGLE(AVB2_TD0),
+ PINMUX_SINGLE(AVB2_TXC),
+ PINMUX_SINGLE(AVB2_TX_CTL),
+ PINMUX_SINGLE(AVB2_RD3),
+ PINMUX_SINGLE(AVB2_RD2),
+ PINMUX_SINGLE(AVB2_RD1),
+ PINMUX_SINGLE(AVB2_RD0),
+ PINMUX_SINGLE(AVB2_RXC),
+ PINMUX_SINGLE(AVB2_RX_CTL),
+
+ PINMUX_SINGLE(AVB3_AVTP_PPS),
+ PINMUX_SINGLE(AVB3_AVTP_CAPTURE),
+ PINMUX_SINGLE(AVB3_AVTP_MATCH),
+ PINMUX_SINGLE(AVB3_LINK),
+ PINMUX_SINGLE(AVB3_PHY_INT),
+ PINMUX_SINGLE(AVB3_MAGIC),
+ PINMUX_SINGLE(AVB3_MDC),
+ PINMUX_SINGLE(AVB3_MDIO),
+ PINMUX_SINGLE(AVB3_TXCREFCLK),
+ PINMUX_SINGLE(AVB3_TD3),
+ PINMUX_SINGLE(AVB3_TD2),
+ PINMUX_SINGLE(AVB3_TD1),
+ PINMUX_SINGLE(AVB3_TD0),
+ PINMUX_SINGLE(AVB3_TXC),
+ PINMUX_SINGLE(AVB3_TX_CTL),
+ PINMUX_SINGLE(AVB3_RD3),
+ PINMUX_SINGLE(AVB3_RD2),
+ PINMUX_SINGLE(AVB3_RD1),
+ PINMUX_SINGLE(AVB3_RD0),
+ PINMUX_SINGLE(AVB3_RXC),
+ PINMUX_SINGLE(AVB3_RX_CTL),
+
+ PINMUX_SINGLE(AVB4_AVTP_PPS),
+ PINMUX_SINGLE(AVB4_AVTP_CAPTURE),
+ PINMUX_SINGLE(AVB4_AVTP_MATCH),
+ PINMUX_SINGLE(AVB4_LINK),
+ PINMUX_SINGLE(AVB4_PHY_INT),
+ PINMUX_SINGLE(AVB4_MAGIC),
+ PINMUX_SINGLE(AVB4_MDC),
+ PINMUX_SINGLE(AVB4_MDIO),
+ PINMUX_SINGLE(AVB4_TXCREFCLK),
+ PINMUX_SINGLE(AVB4_TD3),
+ PINMUX_SINGLE(AVB4_TD2),
+ PINMUX_SINGLE(AVB4_TD1),
+ PINMUX_SINGLE(AVB4_TD0),
+ PINMUX_SINGLE(AVB4_TXC),
+ PINMUX_SINGLE(AVB4_TX_CTL),
+ PINMUX_SINGLE(AVB4_RD3),
+ PINMUX_SINGLE(AVB4_RD2),
+ PINMUX_SINGLE(AVB4_RD1),
+ PINMUX_SINGLE(AVB4_RD0),
+ PINMUX_SINGLE(AVB4_RXC),
+ PINMUX_SINGLE(AVB4_RX_CTL),
+
+ PINMUX_SINGLE(AVB5_AVTP_PPS),
+ PINMUX_SINGLE(AVB5_AVTP_CAPTURE),
+ PINMUX_SINGLE(AVB5_AVTP_MATCH),
+ PINMUX_SINGLE(AVB5_LINK),
+ PINMUX_SINGLE(AVB5_PHY_INT),
+ PINMUX_SINGLE(AVB5_MAGIC),
+ PINMUX_SINGLE(AVB5_MDC),
+ PINMUX_SINGLE(AVB5_MDIO),
+ PINMUX_SINGLE(AVB5_TXCREFCLK),
+ PINMUX_SINGLE(AVB5_TD3),
+ PINMUX_SINGLE(AVB5_TD2),
+ PINMUX_SINGLE(AVB5_TD1),
+ PINMUX_SINGLE(AVB5_TD0),
+ PINMUX_SINGLE(AVB5_TXC),
+ PINMUX_SINGLE(AVB5_TX_CTL),
+ PINMUX_SINGLE(AVB5_RD3),
+ PINMUX_SINGLE(AVB5_RD2),
+ PINMUX_SINGLE(AVB5_RD1),
+ PINMUX_SINGLE(AVB5_RD0),
+ PINMUX_SINGLE(AVB5_RXC),
+ PINMUX_SINGLE(AVB5_RX_CTL),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, A0),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HRX0),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX0),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, A1),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HSCK0),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, SCK0),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, A2),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, RTS0_N),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, A3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, CTS0_N),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, A4),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, HTX0),
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, TX0),
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, A5),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_RXD),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, DU_DR2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, A6),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, DU_DR3),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, A7),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, DU_DR4),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, A8),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, DU_DR5),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, A9),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, DU_DR6),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, A10),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, DU_DR7),
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, A11),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, MSIOF1_RXD),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, DU_DG2),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, A12),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HRX3),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, SCK3),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, DU_DG3),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, A13),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HSCK3),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, CTS3_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, DU_DG4),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, A14),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, RTS3_N),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, DU_DG5),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, A15),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX3),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, DU_DG6),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, A16),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, HTX3),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, TX3),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, DU_DG7),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, A17),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, HSCK1),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SCK1),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, DU_DB2),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, A18),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, HCTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, CTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, DU_DB3),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, A19),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, RTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, DU_DB4),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, A20),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, HRX1),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, RX1_A),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, DU_DB5),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, A21),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, HTX1),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, TX1_A),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, DU_DB6),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, A22),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK1_B),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, DU_DB7),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, A23),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, IRQ0),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, DU_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, A24),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, IRQ1),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, DU_HSYNC),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, A25),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, IRQ2),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, DU_VSYNC),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, CS1_N_A26),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, DU_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, CS0_N),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, GP1_28),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, D0),
+
+ PINMUX_IPSR_GPSR(IP3SR1_23_20, GP1_29),
+ PINMUX_IPSR_GPSR(IP3SR1_23_20, D1),
+
+ PINMUX_IPSR_GPSR(IP3SR1_27_24, GP1_30),
+ PINMUX_IPSR_GPSR(IP3SR1_27_24, D2),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, DU_DOTCLKIN),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, IPC_CLKEN_OUT),
+
+ /* GP2_02 = SCL0 */
+ PINMUX_IPSR_MSEL(IP0SR2_11_8, GP2_02, SEL_I2C0_0),
+ PINMUX_IPSR_MSEL(IP0SR2_11_8, D3, SEL_I2C0_0),
+ PINMUX_IPSR_PHYS(IP0SR2_11_8, SCL0, SEL_I2C0_3),
+
+ /* GP2_03 = SDA0 */
+ PINMUX_IPSR_MSEL(IP0SR2_15_12, GP2_03, SEL_I2C0_0),
+ PINMUX_IPSR_MSEL(IP0SR2_15_12, D4, SEL_I2C0_0),
+ PINMUX_IPSR_PHYS(IP0SR2_15_12, SDA0, SEL_I2C0_3),
+
+ /* GP2_04 = SCL1 */
+ PINMUX_IPSR_MSEL(IP0SR2_19_16, GP2_04, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_19_16, MSIOF4_RXD, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_19_16, D5, SEL_I2C1_0),
+ PINMUX_IPSR_PHYS(IP0SR2_19_16, SCL1, SEL_I2C1_3),
+
+ /* GP2_05 = SDA1 */
+ PINMUX_IPSR_MSEL(IP0SR2_23_20, GP2_05, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_23_20, HSCK2, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_23_20, MSIOF4_TXD, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_23_20, SCK4, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP0SR2_23_20, D6, SEL_I2C1_0),
+ PINMUX_IPSR_PHYS(IP0SR2_23_20, SDA1, SEL_I2C1_3),
+
+ /* GP2_06 = SCL2 */
+ PINMUX_IPSR_MSEL(IP0SR2_27_24, GP2_06, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_27_24, HCTS2_N, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_27_24, MSIOF4_SCK, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_27_24, CTS4_N, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_27_24, D7, SEL_I2C2_0),
+ PINMUX_IPSR_PHYS(IP0SR2_27_24, SCL2, SEL_I2C2_3),
+
+ /* GP2_07 = SDA2 */
+ PINMUX_IPSR_MSEL(IP0SR2_31_28, GP2_07, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_31_28, HRTS2_N, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_31_28, MSIOF4_SYNC, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_31_28, RTS4_N, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP0SR2_31_28, D8, SEL_I2C2_0),
+ PINMUX_IPSR_PHYS(IP0SR2_31_28, SDA2, SEL_I2C2_3),
+
+ /* GP2_08 = SCL3 */
+ PINMUX_IPSR_MSEL(IP1SR2_3_0, GP2_08, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_3_0, HRX2, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_3_0, MSIOF4_SS1, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_3_0, RX4, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_3_0, D9, SEL_I2C3_0),
+ PINMUX_IPSR_PHYS(IP1SR2_3_0, SCL3, SEL_I2C3_3),
+
+ /* GP2_09 = SDA3 */
+ PINMUX_IPSR_MSEL(IP1SR2_7_4, GP2_09, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_7_4, HTX2, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_7_4, MSIOF4_SS2, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_7_4, TX4, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP1SR2_7_4, D10, SEL_I2C3_0),
+ PINMUX_IPSR_PHYS(IP1SR2_7_4, SDA3, SEL_I2C3_3),
+
+ /* GP2_10 = SCL4 */
+ PINMUX_IPSR_MSEL(IP1SR2_11_8, GP2_10, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_11_8, TCLK2_B, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_11_8, MSIOF5_RXD, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_11_8, D11, SEL_I2C4_0),
+ PINMUX_IPSR_PHYS(IP1SR2_11_8, SCL4, SEL_I2C4_3),
+
+ /* GP2_11 = SDA4 */
+ PINMUX_IPSR_MSEL(IP1SR2_15_12, GP2_11, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_15_12, TCLK3, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_15_12, MSIOF5_TXD, SEL_I2C4_0),
+ PINMUX_IPSR_MSEL(IP1SR2_15_12, D12, SEL_I2C4_0),
+ PINMUX_IPSR_PHYS(IP1SR2_15_12, SDA4, SEL_I2C4_3),
+
+ /* GP2_12 = SCL5 */
+ PINMUX_IPSR_MSEL(IP1SR2_19_16, GP2_12, SEL_I2C5_0),
+ PINMUX_IPSR_MSEL(IP1SR2_19_16, TCLK4, SEL_I2C5_0),
+ PINMUX_IPSR_MSEL(IP1SR2_19_16, MSIOF5_SCK, SEL_I2C5_0),
+ PINMUX_IPSR_MSEL(IP1SR2_19_16, D13, SEL_I2C5_0),
+ PINMUX_IPSR_PHYS(IP1SR2_19_16, SCL5, SEL_I2C5_3),
+
+ /* GP2_13 = SDA5 */
+ PINMUX_IPSR_MSEL(IP1SR2_23_20, GP2_13, SEL_I2C5_0),
+ PINMUX_IPSR_MSEL(IP1SR2_23_20, MSIOF5_SYNC, SEL_I2C5_0),
+ PINMUX_IPSR_MSEL(IP1SR2_23_20, D14, SEL_I2C5_0),
+ PINMUX_IPSR_PHYS(IP1SR2_23_20, SDA5, SEL_I2C5_3),
+
+ /* GP2_14 = SCL6 */
+ PINMUX_IPSR_MSEL(IP1SR2_27_24, GP2_14, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_27_24, IRQ4, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_27_24, MSIOF5_SS1, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_27_24, D15, SEL_I2C6_0),
+ PINMUX_IPSR_PHYS(IP1SR2_27_24, SCL6, SEL_I2C6_3),
+
+ /* GP2_15 = SDA6 */
+ PINMUX_IPSR_MSEL(IP1SR2_31_28, GP2_15, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_31_28, IRQ5, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_31_28, MSIOF5_SS2, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP1SR2_31_28, CPG_CPCKOUT, SEL_I2C6_0),
+ PINMUX_IPSR_PHYS(IP1SR2_31_28, SDA6, SEL_I2C6_3),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, FXR_TXDA_A),
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, RXDA_EXTFXR_A),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, MSIOF3_SS2),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, BS_N),
+
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, FXR_TXDB),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, MSIOF3_RXD),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, RD_N),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, RXDB_EXTFXR),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, MSIOF3_TXD),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, WE0_N),
+
+ PINMUX_IPSR_GPSR(IP2SR2_19_16, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP2SR2_19_16, MSIOF3_SCK),
+ PINMUX_IPSR_GPSR(IP2SR2_19_16, WE1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR2_23_20, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP2SR2_23_20, MSIOF3_SYNC),
+ PINMUX_IPSR_GPSR(IP2SR2_23_20, RD_WR_N),
+
+ PINMUX_IPSR_GPSR(IP2SR2_27_24, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP2SR2_27_24, CLKOUT),
+
+ PINMUX_IPSR_GPSR(IP2SR2_31_28, TCLK1_A),
+ PINMUX_IPSR_GPSR(IP2SR2_31_28, EX_WAIT0),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, FXR_TXDA_B),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, TX1_B),
+
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, RXDA_EXTFXR_B),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, RX1_B),
+
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, PWM0),
+
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, PWM1),
+
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, PWM2),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, PWM3),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, CANFD4_TX),
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, PWM4),
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, FXR_CLKOUT1),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, CANFD4_RX),
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, FXR_CLKOUT2),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, FXR_TXENA_N),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, FXR_TXENB_N),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, CANFD6_TX),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, STPWT_EXTFXR),
+
+ /* IP0SR4 */
+ PINMUX_IPSR_GPSR(IP0SR4_3_0, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR4_3_0, AVB0_MII_RX_DV),
+
+ PINMUX_IPSR_GPSR(IP0SR4_7_4, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP0SR4_7_4, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP0SR4_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP0SR4_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP0SR4_15_12, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP0SR4_15_12, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP0SR4_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP0SR4_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP0SR4_23_20, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP0SR4_23_20, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP0SR4_27_24, AVB0_TX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR4_27_24, AVB0_MII_TX_EN),
+
+ PINMUX_IPSR_GPSR(IP0SR4_31_28, AVB0_TXC),
+ PINMUX_IPSR_GPSR(IP0SR4_31_28, AVB0_MII_TXC),
+
+ /* IP1SR4 */
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, AVB0_TD0),
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, AVB0_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, AVB0_TD1),
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, AVB0_MII_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, AVB0_TD2),
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, AVB0_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP1SR4_15_12, AVB0_TD3),
+ PINMUX_IPSR_GPSR(IP1SR4_15_12, AVB0_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, AVB0_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR4_27_24, AVB0_MDC),
+
+ PINMUX_IPSR_GPSR(IP1SR4_31_28, AVB0_MAGIC),
+
+ /* IP2SR4 */
+ PINMUX_IPSR_GPSR(IP2SR4_7_4, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP2SR4_7_4, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP2SR4_11_8, AVB0_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP2SR4_11_8, AVB0_MII_RX_ER),
+ PINMUX_IPSR_GPSR(IP2SR4_11_8, CC5_OSCOUT),
+
+ PINMUX_IPSR_GPSR(IP2SR4_15_12, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP2SR4_15_12, AVB0_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP2SR4_19_16, AVB0_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP2SR4_19_16, AVB0_MII_COL),
+
+ /* IP0SR5 */
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, AVB1_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP0SR5_15_12, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP0SR5_15_12, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP0SR5_19_16, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP0SR5_19_16, AVB1_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP0SR5_27_24, AVB1_TX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR5_27_24, AVB1_MII_TX_EN),
+
+ PINMUX_IPSR_GPSR(IP0SR5_31_28, AVB1_TXC),
+ PINMUX_IPSR_GPSR(IP0SR5_31_28, AVB1_MII_TXC),
+
+ /* IP1SR5 */
+ PINMUX_IPSR_GPSR(IP1SR5_3_0, AVB1_TD0),
+ PINMUX_IPSR_GPSR(IP1SR5_3_0, AVB1_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR5_7_4, AVB1_TD1),
+ PINMUX_IPSR_GPSR(IP1SR5_7_4, AVB1_MII_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR5_11_8, AVB1_TD2),
+ PINMUX_IPSR_GPSR(IP1SR5_11_8, AVB1_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP1SR5_15_12, AVB1_TD3),
+ PINMUX_IPSR_GPSR(IP1SR5_15_12, AVB1_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP1SR5_19_16, AVB1_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP1SR5_23_20, AVB1_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR5_27_24, AVB1_MDC),
+
+ PINMUX_IPSR_GPSR(IP1SR5_31_28, AVB1_MAGIC),
+
+ /* IP2SR5 */
+ PINMUX_IPSR_GPSR(IP2SR5_7_4, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP2SR5_7_4, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP2SR5_11_8, AVB1_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP2SR5_11_8, AVB1_MII_RX_ER),
+
+ PINMUX_IPSR_GPSR(IP2SR5_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP2SR5_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP2SR5_19_16, AVB1_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP2SR5_19_16, AVB1_MII_COL),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 7),
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK, AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK, AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(4, 12),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(4, 19),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 13),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 1),
+ RCAR_GP_PIN(5, 2), RCAR_GP_PIN(5, 3),
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK, AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK, AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(6, 17),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(6, 15),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 13),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 7),
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+ RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 11),
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK, AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK, AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(6, 12),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - AVB3 ------------------------------------------------ */
+static const unsigned int avb3_link_pins[] = {
+ /* AVB3_LINK */
+ RCAR_GP_PIN(7, 17),
+};
+static const unsigned int avb3_link_mux[] = {
+ AVB3_LINK_MARK,
+};
+static const unsigned int avb3_magic_pins[] = {
+ /* AVB3_MAGIC */
+ RCAR_GP_PIN(7, 15),
+};
+static const unsigned int avb3_magic_mux[] = {
+ AVB3_MAGIC_MARK,
+};
+static const unsigned int avb3_phy_int_pins[] = {
+ /* AVB3_PHY_INT */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int avb3_phy_int_mux[] = {
+ AVB3_PHY_INT_MARK,
+};
+static const unsigned int avb3_mdio_pins[] = {
+ /* AVB3_MDC, AVB3_MDIO */
+ RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 13),
+};
+static const unsigned int avb3_mdio_mux[] = {
+ AVB3_MDC_MARK, AVB3_MDIO_MARK,
+};
+static const unsigned int avb3_rgmii_pins[] = {
+ /*
+ * AVB3_TX_CTL, AVB3_TXC, AVB3_TD0, AVB3_TD1, AVB3_TD2, AVB3_TD3,
+ * AVB3_RX_CTL, AVB3_RXC, AVB3_RD0, AVB3_RD1, AVB3_RD2, AVB3_RD3,
+ */
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9),
+ RCAR_GP_PIN(7, 10), RCAR_GP_PIN(7, 11),
+ RCAR_GP_PIN(7, 0), RCAR_GP_PIN(7, 1),
+ RCAR_GP_PIN(7, 2), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb3_rgmii_mux[] = {
+ AVB3_TX_CTL_MARK, AVB3_TXC_MARK,
+ AVB3_TD0_MARK, AVB3_TD1_MARK, AVB3_TD2_MARK, AVB3_TD3_MARK,
+ AVB3_RX_CTL_MARK, AVB3_RXC_MARK,
+ AVB3_RD0_MARK, AVB3_RD1_MARK, AVB3_RD2_MARK, AVB3_RD3_MARK,
+};
+static const unsigned int avb3_txcrefclk_pins[] = {
+ /* AVB3_TXCREFCLK */
+ RCAR_GP_PIN(7, 12),
+};
+static const unsigned int avb3_txcrefclk_mux[] = {
+ AVB3_TXCREFCLK_MARK,
+};
+static const unsigned int avb3_avtp_pps_pins[] = {
+ /* AVB3_AVTP_PPS */
+ RCAR_GP_PIN(7, 20),
+};
+static const unsigned int avb3_avtp_pps_mux[] = {
+ AVB3_AVTP_PPS_MARK,
+};
+static const unsigned int avb3_avtp_capture_pins[] = {
+ /* AVB3_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 19),
+};
+static const unsigned int avb3_avtp_capture_mux[] = {
+ AVB3_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb3_avtp_match_pins[] = {
+ /* AVB3_AVTP_MATCH */
+ RCAR_GP_PIN(7, 18),
+};
+static const unsigned int avb3_avtp_match_mux[] = {
+ AVB3_AVTP_MATCH_MARK,
+};
+
+/* - AVB4 ------------------------------------------------ */
+static const unsigned int avb4_link_pins[] = {
+ /* AVB4_LINK */
+ RCAR_GP_PIN(8, 17),
+};
+static const unsigned int avb4_link_mux[] = {
+ AVB4_LINK_MARK,
+};
+static const unsigned int avb4_magic_pins[] = {
+ /* AVB4_MAGIC */
+ RCAR_GP_PIN(8, 15),
+};
+static const unsigned int avb4_magic_mux[] = {
+ AVB4_MAGIC_MARK,
+};
+static const unsigned int avb4_phy_int_pins[] = {
+ /* AVB4_PHY_INT */
+ RCAR_GP_PIN(8, 16),
+};
+static const unsigned int avb4_phy_int_mux[] = {
+ AVB4_PHY_INT_MARK,
+};
+static const unsigned int avb4_mdio_pins[] = {
+ /* AVB4_MDC, AVB4_MDIO */
+ RCAR_GP_PIN(8, 14), RCAR_GP_PIN(8, 13),
+};
+static const unsigned int avb4_mdio_mux[] = {
+ AVB4_MDC_MARK, AVB4_MDIO_MARK,
+};
+static const unsigned int avb4_rgmii_pins[] = {
+ /*
+ * AVB4_TX_CTL, AVB4_TXC, AVB4_TD0, AVB4_TD1, AVB4_TD2, AVB4_TD3,
+ * AVB4_RX_CTL, AVB4_RXC, AVB4_RD0, AVB4_RD1, AVB4_RD2, AVB4_RD3,
+ */
+ RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7),
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11),
+ RCAR_GP_PIN(8, 0), RCAR_GP_PIN(8, 1),
+ RCAR_GP_PIN(8, 2), RCAR_GP_PIN(8, 3),
+ RCAR_GP_PIN(8, 4), RCAR_GP_PIN(8, 5),
+};
+static const unsigned int avb4_rgmii_mux[] = {
+ AVB4_TX_CTL_MARK, AVB4_TXC_MARK,
+ AVB4_TD0_MARK, AVB4_TD1_MARK, AVB4_TD2_MARK, AVB4_TD3_MARK,
+ AVB4_RX_CTL_MARK, AVB4_RXC_MARK,
+ AVB4_RD0_MARK, AVB4_RD1_MARK, AVB4_RD2_MARK, AVB4_RD3_MARK,
+};
+static const unsigned int avb4_txcrefclk_pins[] = {
+ /* AVB4_TXCREFCLK */
+ RCAR_GP_PIN(8, 12),
+};
+static const unsigned int avb4_txcrefclk_mux[] = {
+ AVB4_TXCREFCLK_MARK,
+};
+static const unsigned int avb4_avtp_pps_pins[] = {
+ /* AVB4_AVTP_PPS */
+ RCAR_GP_PIN(8, 20),
+};
+static const unsigned int avb4_avtp_pps_mux[] = {
+ AVB4_AVTP_PPS_MARK,
+};
+static const unsigned int avb4_avtp_capture_pins[] = {
+ /* AVB4_AVTP_CAPTURE */
+ RCAR_GP_PIN(8, 19),
+};
+static const unsigned int avb4_avtp_capture_mux[] = {
+ AVB4_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb4_avtp_match_pins[] = {
+ /* AVB4_AVTP_MATCH */
+ RCAR_GP_PIN(8, 18),
+};
+static const unsigned int avb4_avtp_match_mux[] = {
+ AVB4_AVTP_MATCH_MARK,
+};
+
+/* - AVB5 ------------------------------------------------ */
+static const unsigned int avb5_link_pins[] = {
+ /* AVB5_LINK */
+ RCAR_GP_PIN(9, 17),
+};
+static const unsigned int avb5_link_mux[] = {
+ AVB5_LINK_MARK,
+};
+static const unsigned int avb5_magic_pins[] = {
+ /* AVB5_MAGIC */
+ RCAR_GP_PIN(9, 15),
+};
+static const unsigned int avb5_magic_mux[] = {
+ AVB5_MAGIC_MARK,
+};
+static const unsigned int avb5_phy_int_pins[] = {
+ /* AVB5_PHY_INT */
+ RCAR_GP_PIN(9, 16),
+};
+static const unsigned int avb5_phy_int_mux[] = {
+ AVB5_PHY_INT_MARK,
+};
+static const unsigned int avb5_mdio_pins[] = {
+ /* AVB5_MDC, AVB5_MDIO */
+ RCAR_GP_PIN(9, 14), RCAR_GP_PIN(9, 13),
+};
+static const unsigned int avb5_mdio_mux[] = {
+ AVB5_MDC_MARK, AVB5_MDIO_MARK,
+};
+static const unsigned int avb5_rgmii_pins[] = {
+ /*
+ * AVB5_TX_CTL, AVB5_TXC, AVB5_TD0, AVB5_TD1, AVB5_TD2, AVB5_TD3,
+ * AVB5_RX_CTL, AVB5_RXC, AVB5_RD0, AVB5_RD1, AVB5_RD2, AVB5_RD3,
+ */
+ RCAR_GP_PIN(9, 6), RCAR_GP_PIN(9, 7),
+ RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9),
+ RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11),
+ RCAR_GP_PIN(9, 0), RCAR_GP_PIN(9, 1),
+ RCAR_GP_PIN(9, 2), RCAR_GP_PIN(9, 3),
+ RCAR_GP_PIN(9, 4), RCAR_GP_PIN(9, 5),
+};
+static const unsigned int avb5_rgmii_mux[] = {
+ AVB5_TX_CTL_MARK, AVB5_TXC_MARK,
+ AVB5_TD0_MARK, AVB5_TD1_MARK, AVB5_TD2_MARK, AVB5_TD3_MARK,
+ AVB5_RX_CTL_MARK, AVB5_RXC_MARK,
+ AVB5_RD0_MARK, AVB5_RD1_MARK, AVB5_RD2_MARK, AVB5_RD3_MARK,
+};
+static const unsigned int avb5_txcrefclk_pins[] = {
+ /* AVB5_TXCREFCLK */
+ RCAR_GP_PIN(9, 12),
+};
+static const unsigned int avb5_txcrefclk_mux[] = {
+ AVB5_TXCREFCLK_MARK,
+};
+static const unsigned int avb5_avtp_pps_pins[] = {
+ /* AVB5_AVTP_PPS */
+ RCAR_GP_PIN(9, 20),
+};
+static const unsigned int avb5_avtp_pps_mux[] = {
+ AVB5_AVTP_PPS_MARK,
+};
+static const unsigned int avb5_avtp_capture_pins[] = {
+ /* AVB5_AVTP_CAPTURE */
+ RCAR_GP_PIN(9, 19),
+};
+static const unsigned int avb5_avtp_capture_mux[] = {
+ AVB5_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb5_avtp_match_pins[] = {
+ /* AVB5_AVTP_MATCH */
+ RCAR_GP_PIN(9, 18),
+};
+static const unsigned int avb5_avtp_match_mux[] = {
+ AVB5_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD4 ----------------------------------------------------------------- */
+static const unsigned int canfd4_data_pins[] = {
+ /* CANFD4_TX, CANFD4_RX */
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int canfd4_data_mux[] = {
+ CANFD4_TX_MARK, CANFD4_RX_MARK,
+};
+
+/* - CANFD5 ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_pins[] = {
+ /* CANFD5_TX, CANFD5_RX */
+ RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int canfd5_data_mux[] = {
+ CANFD5_TX_MARK, CANFD5_RX_MARK,
+};
+
+/* - CANFD6 ----------------------------------------------------------------- */
+static const unsigned int canfd6_data_pins[] = {
+ /* CANFD6_TX, CANFD6_RX */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 14),
+};
+static const unsigned int canfd6_data_mux[] = {
+ CANFD6_TX_MARK, CANFD6_RX_MARK,
+};
+
+/* - CANFD7 ----------------------------------------------------------------- */
+static const unsigned int canfd7_data_pins[] = {
+ /* CANFD7_TX, CANFD7_RX */
+ RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int canfd7_data_mux[] = {
+ CANFD7_TX_MARK, CANFD7_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb888_pins[] = {
+ /* DU_DR[7:2], DU_DG[7:2], DU_DB[7:2] */
+ RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 12),
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 21),
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK,
+ DU_DR4_MARK, DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK,
+ DU_DG4_MARK, DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK,
+ DU_DB4_MARK, DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_clk_out_pins[] = {
+ /* DU_DOTCLKOUT */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int du_clk_out_mux[] = {
+ DU_DOTCLKOUT_MARK,
+};
+static const unsigned int du_sync_pins[] = {
+ /* DU_HSYNC, DU_VSYNC */
+ RCAR_GP_PIN(1, 25), RCAR_GP_PIN(1, 26),
+};
+static const unsigned int du_sync_mux[] = {
+ DU_HSYNC_MARK, DU_VSYNC_MARK,
+};
+static const unsigned int du_oddf_pins[] = {
+ /* DU_EXODDF/DU_ODDF/DISP/CDE */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_oddf_mux[] = {
+ DU_ODDF_DISP_CDE_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 5),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0#, HCTS0# */
+ RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 4),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* HRX1, HTX1 */
+ RCAR_GP_PIN(1, 21), RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* HSCK1 */
+ RCAR_GP_PIN(1, 18),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* HRTS1#, HCTS1# */
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 19),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2#, HCTS2# */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_pins[] = {
+ /* HRX3, HTX3 */
+ RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 17),
+};
+static const unsigned int hscif3_data_mux[] = {
+ HRX3_MARK, HTX3_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* HSCK3 */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* HRTS3#, HCTS3# */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 16),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 2),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SDA4, SCL4 */
+ RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+};
+static const unsigned int i2c4_mux[] = {
+ SDA4_MARK, SCL4_MARK,
+};
+
+/* - I2C5 ------------------------------------------------------------------- */
+static const unsigned int i2c5_pins[] = {
+ /* SDA5, SCL5 */
+ RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int i2c5_mux[] = {
+ SDA5_MARK, SCL5_MARK,
+};
+
+/* - I2C6 ------------------------------------------------------------------- */
+static const unsigned int i2c6_pins[] = {
+ /* SDA6, SCL6 */
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14),
+};
+static const unsigned int i2c6_mux[] = {
+ SDA6_MARK, SCL6_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data1_pins[] = {
+ /* MMC_SD_D0 */
+ RCAR_GP_PIN(0, 19),
+};
+static const unsigned int mmc_data1_mux[] = {
+ MMC_SD_D0_MARK,
+};
+static const unsigned int mmc_data4_pins[] = {
+ /* MMC_SD_D[0:3] */
+ RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 20),
+ RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 22),
+};
+static const unsigned int mmc_data4_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+};
+static const unsigned int mmc_data8_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 20),
+ RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 22),
+ RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+ RCAR_GP_PIN(0, 26), RCAR_GP_PIN(0, 27),
+};
+static const unsigned int mmc_data8_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(0, 23), RCAR_GP_PIN(0, 18),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 16),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(1, 19),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(1, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(2, 20),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(2, 21),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PWM0 ------------------------------------------------------------------- */
+static const unsigned int pwm0_pins[] = {
+ /* PWM0 */
+ RCAR_GP_PIN(3, 5),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+
+/* - PWM1 ------------------------------------------------------------------- */
+static const unsigned int pwm1_pins[] = {
+ /* PWM1 */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int pwm1_mux[] = {
+ PWM1_MARK,
+};
+
+/* - PWM2 ------------------------------------------------------------------- */
+static const unsigned int pwm2_pins[] = {
+ /* PWM2 */
+ RCAR_GP_PIN(3, 7),
+};
+static const unsigned int pwm2_mux[] = {
+ PWM2_MARK,
+};
+
+/* - PWM3 ------------------------------------------------------------------- */
+static const unsigned int pwm3_pins[] = {
+ /* PWM3 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int pwm3_mux[] = {
+ PWM3_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 5),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 2),
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 2),
+ RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 4),
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 8),
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 5),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0#, CTS0# */
+ RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 21), RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK1 */
+ RCAR_GP_PIN(1, 18),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS1#, CTS1# */
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 19),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX3, TX3 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK3 */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS3#, CTS3# */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_MARK, CTS3_N_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4#, CTS4# */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+/* - TMU -------------------------------------------------------------------- */
+static const unsigned int tmu_tclk1_a_pins[] = {
+ /* TCLK1 */
+ RCAR_GP_PIN(2, 23),
+};
+static const unsigned int tmu_tclk1_a_mux[] = {
+ TCLK1_A_MARK,
+};
+static const unsigned int tmu_tclk1_b_pins[] = {
+ /* TCLK1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int tmu_tclk1_b_mux[] = {
+ TCLK1_B_MARK,
+};
+
+static const unsigned int tmu_tclk2_a_pins[] = {
+ /* TCLK2 */
+ RCAR_GP_PIN(2, 24),
+};
+static const unsigned int tmu_tclk2_a_mux[] = {
+ TCLK2_A_MARK,
+};
+static const unsigned int tmu_tclk2_b_pins[] = {
+ /* TCLK2 */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int tmu_tclk2_b_mux[] = {
+ TCLK2_B_MARK,
+};
+
+static const unsigned int tmu_tclk3_pins[] = {
+ /* TCLK3 */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int tmu_tclk3_mux[] = {
+ TCLK3_MARK,
+};
+
+static const unsigned int tmu_tclk4_pins[] = {
+ /* TCLK4 */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tmu_tclk4_mux[] = {
+ TCLK4_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+ /* TPU0TO0 */
+ RCAR_GP_PIN(2, 21),
+};
+static const unsigned int tpu_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+ /* TPU0TO1 */
+ RCAR_GP_PIN(2, 22),
+};
+static const unsigned int tpu_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+ /* TPU0TO2 */
+ RCAR_GP_PIN(3, 5),
+};
+static const unsigned int tpu_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+ /* TPU0TO3 */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int tpu_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb3_link),
+ SH_PFC_PIN_GROUP(avb3_magic),
+ SH_PFC_PIN_GROUP(avb3_phy_int),
+ SH_PFC_PIN_GROUP(avb3_mdio),
+ SH_PFC_PIN_GROUP(avb3_rgmii),
+ SH_PFC_PIN_GROUP(avb3_txcrefclk),
+ SH_PFC_PIN_GROUP(avb3_avtp_pps),
+ SH_PFC_PIN_GROUP(avb3_avtp_capture),
+ SH_PFC_PIN_GROUP(avb3_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb4_link),
+ SH_PFC_PIN_GROUP(avb4_magic),
+ SH_PFC_PIN_GROUP(avb4_phy_int),
+ SH_PFC_PIN_GROUP(avb4_mdio),
+ SH_PFC_PIN_GROUP(avb4_rgmii),
+ SH_PFC_PIN_GROUP(avb4_txcrefclk),
+ SH_PFC_PIN_GROUP(avb4_avtp_pps),
+ SH_PFC_PIN_GROUP(avb4_avtp_capture),
+ SH_PFC_PIN_GROUP(avb4_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb5_link),
+ SH_PFC_PIN_GROUP(avb5_magic),
+ SH_PFC_PIN_GROUP(avb5_phy_int),
+ SH_PFC_PIN_GROUP(avb5_mdio),
+ SH_PFC_PIN_GROUP(avb5_rgmii),
+ SH_PFC_PIN_GROUP(avb5_txcrefclk),
+ SH_PFC_PIN_GROUP(avb5_avtp_pps),
+ SH_PFC_PIN_GROUP(avb5_avtp_capture),
+ SH_PFC_PIN_GROUP(avb5_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+ SH_PFC_PIN_GROUP(canfd5_data),
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6),
+
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+
+ SH_PFC_PIN_GROUP(mmc_data1),
+ SH_PFC_PIN_GROUP(mmc_data4),
+ SH_PFC_PIN_GROUP(mmc_data8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1),
+ SH_PFC_PIN_GROUP(pwm2),
+ SH_PFC_PIN_GROUP(pwm3),
+ SH_PFC_PIN_GROUP(pwm4),
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(tmu_tclk3),
+ SH_PFC_PIN_GROUP(tmu_tclk4),
+
+ SH_PFC_PIN_GROUP(tpu_to0),
+ SH_PFC_PIN_GROUP(tpu_to1),
+ SH_PFC_PIN_GROUP(tpu_to2),
+ SH_PFC_PIN_GROUP(tpu_to3),
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const avb3_groups[] = {
+ "avb3_link",
+ "avb3_magic",
+ "avb3_phy_int",
+ "avb3_mdio",
+ "avb3_rgmii",
+ "avb3_txcrefclk",
+ "avb3_avtp_pps",
+ "avb3_avtp_capture",
+ "avb3_avtp_match",
+};
+
+static const char * const avb4_groups[] = {
+ "avb4_link",
+ "avb4_magic",
+ "avb4_phy_int",
+ "avb4_mdio",
+ "avb4_rgmii",
+ "avb4_txcrefclk",
+ "avb4_avtp_pps",
+ "avb4_avtp_capture",
+ "avb4_avtp_match",
+};
+
+static const char * const avb5_groups[] = {
+ "avb5_link",
+ "avb5_magic",
+ "avb5_phy_int",
+ "avb5_mdio",
+ "avb5_rgmii",
+ "avb5_txcrefclk",
+ "avb5_avtp_pps",
+ "avb5_avtp_capture",
+ "avb5_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const canfd4_groups[] = {
+ "canfd4_data",
+};
+
+static const char * const canfd5_groups[] = {
+ "canfd5_data",
+};
+
+static const char * const canfd6_groups[] = {
+ "canfd6_data",
+};
+
+static const char * const canfd7_groups[] = {
+ "canfd7_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const du_groups[] = {
+ "du_rgb888",
+ "du_clk_out",
+ "du_sync",
+ "du_oddf",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data",
+ "hscif3_clk",
+ "hscif3_ctrl",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6",
+};
+
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_data_b",
+ "scif1_clk",
+ "scif1_ctrl",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data",
+ "scif3_clk",
+ "scif3_ctrl",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const tmu_groups[] = {
+ "tmu_tclk1_a",
+ "tmu_tclk1_b",
+ "tmu_tclk2_a",
+ "tmu_tclk2_b",
+ "tmu_tclk3",
+ "tmu_tclk4",
+};
+
+static const char * const tpu_groups[] = {
+ "tpu_to0",
+ "tpu_to1",
+ "tpu_to2",
+ "tpu_to3",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+ SH_PFC_FUNCTION(avb3),
+ SH_PFC_FUNCTION(avb4),
+ SH_PFC_FUNCTION(avb5),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(canfd4),
+ SH_PFC_FUNCTION(canfd5),
+ SH_PFC_FUNCTION(canfd6),
+ SH_PFC_FUNCTION(canfd7),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(du),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+
+ SH_PFC_FUNCTION(intc_ex),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+
+ SH_PFC_FUNCTION(tmu),
+
+ SH_PFC_FUNCTION(tpu),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG("GPSR0", 0xe6058040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_27_FN, GPSR0_27,
+ GP_0_26_FN, GPSR0_26,
+ GP_0_25_FN, GPSR0_25,
+ GP_0_24_FN, GPSR0_24,
+ GP_0_23_FN, GPSR0_23,
+ GP_0_22_FN, GPSR0_22,
+ GP_0_21_FN, GPSR0_21,
+ GP_0_20_FN, GPSR0_20,
+ GP_0_19_FN, GPSR0_19,
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xe6050040, 32, 1, GROUP(
+ 0, 0,
+ GP_1_30_FN, GPSR1_30,
+ GP_1_29_FN, GPSR1_29,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xe6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_2_24_FN, GPSR2_24,
+ GP_2_23_FN, GPSR2_23,
+ GP_2_22_FN, GPSR2_22,
+ GP_2_21_FN, GPSR2_21,
+ GP_2_20_FN, GPSR2_20,
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xe6058840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xe6060040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_26_FN, GPSR4_26,
+ GP_4_25_FN, GPSR4_25,
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ GP_4_22_FN, GPSR4_22,
+ GP_4_21_FN, GPSR4_21,
+ GP_4_20_FN, GPSR4_20,
+ GP_4_19_FN, GPSR4_19,
+ GP_4_18_FN, GPSR4_18,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xe6060840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xe6068040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR7", 0xe6068840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR8", 0xe6069040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_8_20_FN, GPSR8_20,
+ GP_8_19_FN, GPSR8_19,
+ GP_8_18_FN, GPSR8_18,
+ GP_8_17_FN, GPSR8_17,
+ GP_8_16_FN, GPSR8_16,
+ GP_8_15_FN, GPSR8_15,
+ GP_8_14_FN, GPSR8_14,
+ GP_8_13_FN, GPSR8_13,
+ GP_8_12_FN, GPSR8_12,
+ GP_8_11_FN, GPSR8_11,
+ GP_8_10_FN, GPSR8_10,
+ GP_8_9_FN, GPSR8_9,
+ GP_8_8_FN, GPSR8_8,
+ GP_8_7_FN, GPSR8_7,
+ GP_8_6_FN, GPSR8_6,
+ GP_8_5_FN, GPSR8_5,
+ GP_8_4_FN, GPSR8_4,
+ GP_8_3_FN, GPSR8_3,
+ GP_8_2_FN, GPSR8_2,
+ GP_8_1_FN, GPSR8_1,
+ GP_8_0_FN, GPSR8_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR9", 0xe6069840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_9_20_FN, GPSR9_20,
+ GP_9_19_FN, GPSR9_19,
+ GP_9_18_FN, GPSR9_18,
+ GP_9_17_FN, GPSR9_17,
+ GP_9_16_FN, GPSR9_16,
+ GP_9_15_FN, GPSR9_15,
+ GP_9_14_FN, GPSR9_14,
+ GP_9_13_FN, GPSR9_13,
+ GP_9_12_FN, GPSR9_12,
+ GP_9_11_FN, GPSR9_11,
+ GP_9_10_FN, GPSR9_10,
+ GP_9_9_FN, GPSR9_9,
+ GP_9_8_FN, GPSR9_8,
+ GP_9_7_FN, GPSR9_7,
+ GP_9_6_FN, GPSR9_6,
+ GP_9_5_FN, GPSR9_5,
+ GP_9_4_FN, GPSR9_4,
+ GP_9_3_FN, GPSR9_3,
+ GP_9_2_FN, GPSR9_2,
+ GP_9_1_FN, GPSR9_1,
+ GP_9_0_FN, GPSR9_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR1", 0xe6050060, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xe6050064, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xe6050068, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP3SR1", 0xe605006c, 32, 4, GROUP(
+ IP3SR1_31_28
+ IP3SR1_27_24
+ IP3SR1_23_20
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xe6050860, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xe6050864, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR2", 0xe6050868, 32, 4, GROUP(
+ IP2SR2_31_28
+ IP2SR2_27_24
+ IP2SR2_23_20
+ IP2SR2_19_16
+ IP2SR2_15_12
+ IP2SR2_11_8
+ IP2SR2_7_4
+ IP2SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xe6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xe6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR4", 0xe6060060, 32, 4, GROUP(
+ IP0SR4_31_28
+ IP0SR4_27_24
+ IP0SR4_23_20
+ IP0SR4_19_16
+ IP0SR4_15_12
+ IP0SR4_11_8
+ IP0SR4_7_4
+ IP0SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR4", 0xe6060064, 32, 4, GROUP(
+ IP1SR4_31_28
+ IP1SR4_27_24
+ IP1SR4_23_20
+ IP1SR4_19_16
+ IP1SR4_15_12
+ IP1SR4_11_8
+ IP1SR4_7_4
+ IP1SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR4", 0xe6060068, 32, 4, GROUP(
+ IP2SR4_31_28
+ IP2SR4_27_24
+ IP2SR4_23_20
+ IP2SR4_19_16
+ IP2SR4_15_12
+ IP2SR4_11_8
+ IP2SR4_7_4
+ IP2SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR5", 0xe6060860, 32, 4, GROUP(
+ IP0SR5_31_28
+ IP0SR5_27_24
+ IP0SR5_23_20
+ IP0SR5_19_16
+ IP0SR5_15_12
+ IP0SR5_11_8
+ IP0SR5_7_4
+ IP0SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR5", 0xe6060864, 32, 4, GROUP(
+ IP1SR5_31_28
+ IP1SR5_27_24
+ IP1SR5_23_20
+ IP1SR5_19_16
+ IP1SR5_15_12
+ IP1SR5_11_8
+ IP1SR5_7_4
+ IP1SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR5", 0xe6060868, 32, 4, GROUP(
+ IP2SR5_31_28
+ IP2SR5_27_24
+ IP2SR5_23_20
+ IP2SR5_19_16
+ IP2SR5_15_12
+ IP2SR5_11_8
+ IP2SR5_7_4
+ IP2SR5_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6050900, 32,
+ GROUP(4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 1, 1),
+ GROUP(
+ /* RESERVED 31, 30, 29, 28 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 27, 26, 25, 24 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 23, 22, 21, 20 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 19, 18, 17, 16 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ MOD_SEL2_14_15
+ MOD_SEL2_12_13
+ MOD_SEL2_10_11
+ MOD_SEL2_8_9
+ MOD_SEL2_6_7
+ MOD_SEL2_4_5
+ MOD_SEL2_2_3
+ 0, 0,
+ 0, 0, ))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xe6058080) {
+ { RCAR_GP_PIN(0, 7), 28, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(0, 6), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(0, 5), 20, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(0, 4), 16, 2 }, /* QSPI0_IO3 */
+ { RCAR_GP_PIN(0, 3), 12, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(0, 2), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(0, 1), 4, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(0, 0), 0, 2 }, /* QSPI0_SPCLK */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xe6058084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(0, 14), 24, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(0, 13), 20, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(0, 12), 16, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(0, 11), 12, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(0, 10), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(0, 9), 4, 2 }, /* QSPI1_IO2 */
+ { RCAR_GP_PIN(0, 8), 0, 2 }, /* QSPI1_MISO_IO1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xe6058088) {
+ { RCAR_GP_PIN(0, 23), 28, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(0, 22), 24, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(0, 21), 20, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(0, 20), 16, 3 }, /* MMC_SD_D1 */
+ { RCAR_GP_PIN(0, 19), 12, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* SD_CD */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL0", 0xe605808c) {
+ { RCAR_GP_PIN(0, 27), 12, 3 }, /* MMC_D7 */
+ { RCAR_GP_PIN(0, 26), 8, 3 }, /* MMC_D6 */
+ { RCAR_GP_PIN(0, 25), 4, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(0, 24), 0, 3 }, /* MMC_D4 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xe6050080) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* HRX0 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* SCIF_CLK */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xe6050084) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SCK */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xe6050088) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* MSIOF2_TXD */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* MSIOF1_SS2 */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* MSIOF1_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xe605008c) {
+ { RCAR_GP_PIN(1, 30), 24, 3 }, /* GP1_30 */
+ { RCAR_GP_PIN(1, 29), 20, 3 }, /* GP1_29 */
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* GP1_28 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* IRQ0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xe6050880) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* GP2_07 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* GP2_06 */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* GP2_05 */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* GP2_04 */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* GP2_03 */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* GP2_02 */
+ { RCAR_GP_PIN(2, 1), 4, 2 }, /* IPC_CLKOUT */
+ { RCAR_GP_PIN(2, 0), 0, 2 }, /* IPC_CLKIN */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xe6050884) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* GP2_15 */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* GP2_14 */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* GP2_13 */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* GP2_12 */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* GP2_11 */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* GP2_10 */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* GP2_9 */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* GP2_8 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xe6050888) {
+ { RCAR_GP_PIN(2, 23), 28, 3 }, /* TCLK1_A */
+ { RCAR_GP_PIN(2, 22), 24, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 21), 20, 3 }, /* TPU0TO0 */
+ { RCAR_GP_PIN(2, 20), 16, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 18), 8, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* RXDA_EXTFXR_A */
+ { RCAR_GP_PIN(2, 16), 0, 3 }, /* FXR_TXDA_A */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL2", 0xe605088c) {
+ { RCAR_GP_PIN(2, 24), 0, 3 }, /* TCLK2_A */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xe6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* CANFD1_RX */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* CANFD1_TX */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(3, 1), 4, 2 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(3, 0), 0, 2 }, /* CAN_CLK */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xe6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 3 }, /* CANFD7_TX */
+ { RCAR_GP_PIN(3, 14), 24, 3 }, /* CANFD6_RX */
+ { RCAR_GP_PIN(3, 13), 20, 3 }, /* CANFD6_TX */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* CANFD5_RX */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* CANFD5_TX */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* CANFD4_RX */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* CANFD4_TX*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* CANFD3_RX */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xe6058888) {
+ { RCAR_GP_PIN(3, 16), 0, 3 }, /* CANFD7_RX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xe6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* AVB0_TX_CTL */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* AVB0_RD3 */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* AVB0_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xe6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* AVB0_TD1*/
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* AVB0_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xe6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* PCIE2_CLKREQ_N */
+ { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ { RCAR_GP_PIN(4, 20), 16, 3 }, /* AVB0_AVTP_PPS */
+ { RCAR_GP_PIN(4, 19), 12, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(4, 18), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(4, 17), 4, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(4, 16), 0, 3 }, /* AVB0_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xe606008c) {
+ { RCAR_GP_PIN(4, 26), 8, 3 }, /* AVS1 */
+ { RCAR_GP_PIN(4, 25), 4, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* PCIE3_CLKREQ_N */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xe6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB1_RXC */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB1_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xe6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB1_MDIO */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB1_TD2 */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB1_TD1*/
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB1_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xe6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB1_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xe6068080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB2_TXC */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB2_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xe6068084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB2_TD3 */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB2_TD1*/
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB2_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xe6068088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB2_AVTP_PPS */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB2_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xe6068880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB3_TXC */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB3_TX_CTL */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB3_RD3 */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB3_RD2 */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB3_RD1 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB3_RD0 */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB3_RXC */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB3_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xe6068884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB3_MAGIC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB3_MDC */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB3_MDIO */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB3_TXCREFCLK */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB3_TD3 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB3_TD2 */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB3_TD1*/
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB3_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xe6068888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB3_AVTP_PPS */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB3_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB3_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB3_LINK */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB3_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL8", 0xe6069080) {
+ { RCAR_GP_PIN(8, 7), 28, 3 }, /* AVB4_TXC */
+ { RCAR_GP_PIN(8, 6), 24, 3 }, /* AVB4_TX_CTL */
+ { RCAR_GP_PIN(8, 5), 20, 3 }, /* AVB4_RD3 */
+ { RCAR_GP_PIN(8, 4), 16, 3 }, /* AVB4_RD2 */
+ { RCAR_GP_PIN(8, 3), 12, 3 }, /* AVB4_RD1 */
+ { RCAR_GP_PIN(8, 2), 8, 3 }, /* AVB4_RD0 */
+ { RCAR_GP_PIN(8, 1), 4, 3 }, /* AVB4_RXC */
+ { RCAR_GP_PIN(8, 0), 0, 3 }, /* AVB4_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL8", 0xe6069084) {
+ { RCAR_GP_PIN(8, 15), 28, 3 }, /* AVB4_MAGIC */
+ { RCAR_GP_PIN(8, 14), 24, 3 }, /* AVB4_MDC */
+ { RCAR_GP_PIN(8, 13), 20, 3 }, /* AVB4_MDIO */
+ { RCAR_GP_PIN(8, 12), 16, 3 }, /* AVB4_TXCREFCLK */
+ { RCAR_GP_PIN(8, 11), 12, 3 }, /* AVB4_TD3 */
+ { RCAR_GP_PIN(8, 10), 8, 3 }, /* AVB4_TD2 */
+ { RCAR_GP_PIN(8, 9), 4, 3 }, /* AVB4_TD1*/
+ { RCAR_GP_PIN(8, 8), 0, 3 }, /* AVB4_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL8", 0xe6069088) {
+ { RCAR_GP_PIN(8, 20), 16, 3 }, /* AVB4_AVTP_PPS */
+ { RCAR_GP_PIN(8, 19), 12, 3 }, /* AVB4_AVTP_CAPTURE */
+ { RCAR_GP_PIN(8, 18), 8, 3 }, /* AVB4_AVTP_MATCH */
+ { RCAR_GP_PIN(8, 17), 4, 3 }, /* AVB4_LINK */
+ { RCAR_GP_PIN(8, 16), 0, 3 }, /* AVB4_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL9", 0xe6069880) {
+ { RCAR_GP_PIN(9, 7), 28, 3 }, /* AVB5_TXC */
+ { RCAR_GP_PIN(9, 6), 24, 3 }, /* AVB5_TX_CTL */
+ { RCAR_GP_PIN(9, 5), 20, 3 }, /* AVB5_RD3 */
+ { RCAR_GP_PIN(9, 4), 16, 3 }, /* AVB5_RD2 */
+ { RCAR_GP_PIN(9, 3), 12, 3 }, /* AVB5_RD1 */
+ { RCAR_GP_PIN(9, 2), 8, 3 }, /* AVB5_RD0 */
+ { RCAR_GP_PIN(9, 1), 4, 3 }, /* AVB5_RXC */
+ { RCAR_GP_PIN(9, 0), 0, 3 }, /* AVB5_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL9", 0xe6069884) {
+ { RCAR_GP_PIN(9, 15), 28, 3 }, /* AVB5_MAGIC */
+ { RCAR_GP_PIN(9, 14), 24, 3 }, /* AVB5_MDC */
+ { RCAR_GP_PIN(9, 13), 20, 3 }, /* AVB5_MDIO */
+ { RCAR_GP_PIN(9, 12), 16, 3 }, /* AVB5_TXCREFCLK */
+ { RCAR_GP_PIN(9, 11), 12, 3 }, /* AVB5_TD3 */
+ { RCAR_GP_PIN(9, 10), 8, 3 }, /* AVB5_TD2 */
+ { RCAR_GP_PIN(9, 9), 4, 3 }, /* AVB5_TD1*/
+ { RCAR_GP_PIN(9, 8), 0, 3 }, /* AVB5_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL9", 0xe6069888) {
+ { RCAR_GP_PIN(9, 20), 16, 3 }, /* AVB5_AVTP_PPS */
+ { RCAR_GP_PIN(9, 19), 12, 3 }, /* AVB5_AVTP_CAPTURE */
+ { RCAR_GP_PIN(9, 18), 8, 3 }, /* AVB5_AVTP_MATCH */
+ { RCAR_GP_PIN(9, 17), 4, 3 }, /* AVB5_LINK */
+ { RCAR_GP_PIN(9, 16), 0, 3 }, /* AVB5_PHY_INT */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC2,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+ POC8,
+ POC9,
+ TD1SEL0,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xe60580a0, },
+ [POC1] = { 0xe60500a0, },
+ [POC2] = { 0xe60508a0, },
+ [POC4] = { 0xe60600a0, },
+ [POC5] = { 0xe60608a0, },
+ [POC6] = { 0xe60680a0, },
+ [POC7] = { 0xe60688a0, },
+ [POC8] = { 0xe60690a0, },
+ [POC9] = { 0xe60698a0, },
+ [TD1SEL0] = { 0xe6058124, },
+ { /* sentinel */ },
+};
+
+static int r8a779a0_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ if (pin >= RCAR_GP_PIN(0, 15) && pin <= RCAR_GP_PIN(0, 27))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 30))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC2].reg;
+ if (pin >= RCAR_GP_PIN(2, 2) && pin <= RCAR_GP_PIN(2, 15))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC4].reg;
+ if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC5].reg;
+ if (pin >= RCAR_GP_PIN(5, 0) && pin <= RCAR_GP_PIN(5, 17))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC6].reg;
+ if (pin >= RCAR_GP_PIN(6, 0) && pin <= RCAR_GP_PIN(6, 17))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC7].reg;
+ if (pin >= RCAR_GP_PIN(7, 0) && pin <= RCAR_GP_PIN(7, 17))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC8].reg;
+ if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 17))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC9].reg;
+ if (pin >= RCAR_GP_PIN(9, 0) && pin <= RCAR_GP_PIN(9, 17))
+ return bit;
+
+ return -EINVAL;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe60580c0, "PUD0", 0xe60580e0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* QSPI0_SPCLK */
+ [ 1] = RCAR_GP_PIN(0, 1), /* QSPI0_MOSI_IO0 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* QSPI0_MISO_IO1 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* QSPI0_IO2 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* QSPI0_IO3 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* QSPI0_SSL */
+ [ 6] = RCAR_GP_PIN(0, 6), /* QSPI1_SPCLK */
+ [ 7] = RCAR_GP_PIN(0, 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* QSPI1_MISO_IO1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* QSPI1_IO2 */
+ [10] = RCAR_GP_PIN(0, 10), /* QSPI1_IO3 */
+ [11] = RCAR_GP_PIN(0, 11), /* QSPI1_SSL */
+ [12] = RCAR_GP_PIN(0, 12), /* RPC_RESET_N */
+ [13] = RCAR_GP_PIN(0, 13), /* RPC_WP_N */
+ [14] = RCAR_GP_PIN(0, 14), /* RPC_INT_N */
+ [15] = RCAR_GP_PIN(0, 15), /* SD_WP */
+ [16] = RCAR_GP_PIN(0, 16), /* SD_CD */
+ [17] = RCAR_GP_PIN(0, 17), /* MMC_DS */
+ [18] = RCAR_GP_PIN(0, 18), /* MMC_SD_CMD */
+ [19] = RCAR_GP_PIN(0, 19), /* MMC_SD_D0 */
+ [20] = RCAR_GP_PIN(0, 20), /* MMC_SD_D1 */
+ [21] = RCAR_GP_PIN(0, 21), /* MMC_SD_D2 */
+ [22] = RCAR_GP_PIN(0, 22), /* MMC_SD_D3 */
+ [23] = RCAR_GP_PIN(0, 23), /* MMC_SD_CLK */
+ [24] = RCAR_GP_PIN(0, 24), /* MMC_D4 */
+ [25] = RCAR_GP_PIN(0, 25), /* MMC_D5 */
+ [26] = RCAR_GP_PIN(0, 26), /* MMC_D6 */
+ [27] = RCAR_GP_PIN(0, 27), /* MMC_D7 */
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe60500c0, "PUD1", 0xe60500e0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* SCIF_CLK */
+ [ 1] = RCAR_GP_PIN(1, 1), /* HRX0 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* HSCK0 */
+ [ 3] = RCAR_GP_PIN(1, 3), /* HRTS0_N */
+ [ 4] = RCAR_GP_PIN(1, 4), /* HCTS0_N */
+ [ 5] = RCAR_GP_PIN(1, 5), /* HTX0 */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_RXD */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_TXD */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SCK */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_SYNC */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SS1 */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_SS2 */
+ [12] = RCAR_GP_PIN(1, 12), /* MSIOF1_RXD */
+ [13] = RCAR_GP_PIN(1, 13), /* MSIOF1_TXD */
+ [14] = RCAR_GP_PIN(1, 14), /* MSIOF1_SCK */
+ [15] = RCAR_GP_PIN(1, 15), /* MSIOF1_SYNC */
+ [16] = RCAR_GP_PIN(1, 16), /* MSIOF1_SS1 */
+ [17] = RCAR_GP_PIN(1, 17), /* MSIOF1_SS2 */
+ [18] = RCAR_GP_PIN(1, 18), /* MSIOF2_RXD */
+ [19] = RCAR_GP_PIN(1, 19), /* MSIOF2_TXD */
+ [20] = RCAR_GP_PIN(1, 20), /* MSIOF2_SCK */
+ [21] = RCAR_GP_PIN(1, 21), /* MSIOF2_SYNC */
+ [22] = RCAR_GP_PIN(1, 22), /* MSIOF2_SS1 */
+ [23] = RCAR_GP_PIN(1, 23), /* MSIOF2_SS2 */
+ [24] = RCAR_GP_PIN(1, 24), /* IRQ0 */
+ [25] = RCAR_GP_PIN(1, 25), /* IRQ1 */
+ [26] = RCAR_GP_PIN(1, 26), /* IRQ2 */
+ [27] = RCAR_GP_PIN(1, 27), /* IRQ3 */
+ [28] = RCAR_GP_PIN(1, 28), /* GP1_28 */
+ [29] = RCAR_GP_PIN(1, 29), /* GP1_29 */
+ [30] = RCAR_GP_PIN(1, 30), /* GP1_30 */
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe60508c0, "PUD2", 0xe60508e0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* IPC_CLKIN */
+ [ 1] = RCAR_GP_PIN(2, 1), /* IPC_CLKOUT */
+ [ 2] = RCAR_GP_PIN(2, 2), /* GP2_02 */
+ [ 3] = RCAR_GP_PIN(2, 3), /* GP2_03 */
+ [ 4] = RCAR_GP_PIN(2, 4), /* GP2_04 */
+ [ 5] = RCAR_GP_PIN(2, 5), /* GP2_05 */
+ [ 6] = RCAR_GP_PIN(2, 6), /* GP2_06 */
+ [ 7] = RCAR_GP_PIN(2, 7), /* GP2_07 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* GP2_08 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* GP2_09 */
+ [10] = RCAR_GP_PIN(2, 10), /* GP2_10 */
+ [11] = RCAR_GP_PIN(2, 11), /* GP2_11 */
+ [12] = RCAR_GP_PIN(2, 12), /* GP2_12 */
+ [13] = RCAR_GP_PIN(2, 13), /* GP2_13 */
+ [14] = RCAR_GP_PIN(2, 14), /* GP2_14 */
+ [15] = RCAR_GP_PIN(2, 15), /* GP2_15 */
+ [16] = RCAR_GP_PIN(2, 16), /* FXR_TXDA_A */
+ [17] = RCAR_GP_PIN(2, 17), /* RXDA_EXTFXR_A */
+ [18] = RCAR_GP_PIN(2, 18), /* FXR_TXDB */
+ [19] = RCAR_GP_PIN(2, 19), /* RXDB_EXTFXR */
+ [20] = RCAR_GP_PIN(2, 20), /* CLK_EXTFXR */
+ [21] = RCAR_GP_PIN(2, 21), /* TPU0TO0 */
+ [22] = RCAR_GP_PIN(2, 22), /* TPU0TO1 */
+ [23] = RCAR_GP_PIN(2, 23), /* TCLK1_A */
+ [24] = RCAR_GP_PIN(2, 24), /* TCLK2_A */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe60588c0, "PUD3", 0xe60588e0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* CAN_CLK */
+ [ 1] = RCAR_GP_PIN(3, 1), /* CANFD0_TX */
+ [ 2] = RCAR_GP_PIN(3, 2), /* CANFD0_RX */
+ [ 3] = RCAR_GP_PIN(3, 3), /* CANFD1_TX */
+ [ 4] = RCAR_GP_PIN(3, 4), /* CANFD1_RX */
+ [ 5] = RCAR_GP_PIN(3, 5), /* CANFD2_TX */
+ [ 6] = RCAR_GP_PIN(3, 6), /* CANFD2_RX */
+ [ 7] = RCAR_GP_PIN(3, 7), /* CANFD3_TX */
+ [ 8] = RCAR_GP_PIN(3, 8), /* CANFD3_RX */
+ [ 9] = RCAR_GP_PIN(3, 9), /* CANFD4_TX */
+ [10] = RCAR_GP_PIN(3, 10), /* CANFD4_RX */
+ [11] = RCAR_GP_PIN(3, 11), /* CANFD5_TX */
+ [12] = RCAR_GP_PIN(3, 12), /* CANFD5_RX */
+ [13] = RCAR_GP_PIN(3, 13), /* CANFD6_TX */
+ [14] = RCAR_GP_PIN(3, 14), /* CANFD6_RX */
+ [15] = RCAR_GP_PIN(3, 15), /* CANFD7_TX */
+ [16] = RCAR_GP_PIN(3, 16), /* CANFD7_RX */
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe60600c0, "PUD4", 0xe60600e0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* AVB0_RX_CTL */
+ [ 1] = RCAR_GP_PIN(4, 1), /* AVB0_RXC */
+ [ 2] = RCAR_GP_PIN(4, 2), /* AVB0_RD0 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* AVB0_RD1 */
+ [ 4] = RCAR_GP_PIN(4, 4), /* AVB0_RD2 */
+ [ 5] = RCAR_GP_PIN(4, 5), /* AVB0_RD3 */
+ [ 6] = RCAR_GP_PIN(4, 6), /* AVB0_TX_CTL */
+ [ 7] = RCAR_GP_PIN(4, 7), /* AVB0_TXC */
+ [ 8] = RCAR_GP_PIN(4, 8), /* AVB0_TD0 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* AVB0_TD1 */
+ [10] = RCAR_GP_PIN(4, 10), /* AVB0_TD2 */
+ [11] = RCAR_GP_PIN(4, 11), /* AVB0_TD3 */
+ [12] = RCAR_GP_PIN(4, 12), /* AVB0_TXREFCLK */
+ [13] = RCAR_GP_PIN(4, 13), /* AVB0_MDIO */
+ [14] = RCAR_GP_PIN(4, 14), /* AVB0_MDC */
+ [15] = RCAR_GP_PIN(4, 15), /* AVB0_MAGIC */
+ [16] = RCAR_GP_PIN(4, 16), /* AVB0_PHY_INT */
+ [17] = RCAR_GP_PIN(4, 17), /* AVB0_LINK */
+ [18] = RCAR_GP_PIN(4, 18), /* AVB0_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(4, 19), /* AVB0_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(4, 20), /* AVB0_AVTP_PPS */
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */
+ [23] = RCAR_GP_PIN(4, 23), /* PCIE2_CLKREQ_N */
+ [24] = RCAR_GP_PIN(4, 24), /* PCIE3_CLKREQ_N */
+ [25] = RCAR_GP_PIN(4, 25), /* AVS0 */
+ [26] = RCAR_GP_PIN(4, 26), /* AVS1 */
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe60608c0, "PUD5", 0xe60608e0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB1_RX_CTL */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB1_RXC */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB1_RD0 */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB1_RD1 */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB1_RD2 */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB1_RD3 */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB1_TX_CTL */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB1_TXC */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB1_TD0 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB1_TD1 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB1_TD2 */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB1_TD3 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB1_TXCREFCLK */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB1_MDIO */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB1_MDC */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB1_MAGIC */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB1_PHY_INT */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB1_LINK */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB1_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB1_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB1_AVTP_PPS */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe60680c0, "PUD6", 0xe60680e0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB2_RX_CTL */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB2_RXC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB2_RD0 */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB2_RD1 */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB2_RD2 */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB2_RD3 */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB2_TX_CTL */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB2_TXC */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB2_TD0 */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB2_TD1 */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB2_TD2 */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB2_TD3 */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB2_TXCREFCLK */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB2_MDIO */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB2_MDC*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB2_MAGIC */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB2_PHY_INT */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB2_LINK */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB2_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB2_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB2_AVTP_PPS */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xe60688c0, "PUD7", 0xe60688e0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB3_RX_CTL */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB3_RXC */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB3_RD0 */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB3_RD1 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB3_RD2 */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB3_RD3 */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB3_TX_CTL */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB3_TXC */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB3_TD0 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB3_TD1 */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB3_TD2 */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB3_TD3 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB3_TXCREFCLK */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB3_MDIO */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB3_MDC */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB3_MAGIC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB3_PHY_INT */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB3_LINK */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB3_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB3_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB3_AVTP_PPS */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN8", 0xe60690c0, "PUD8", 0xe60690e0) {
+ [ 0] = RCAR_GP_PIN(8, 0), /* AVB4_RX_CTL */
+ [ 1] = RCAR_GP_PIN(8, 1), /* AVB4_RXC */
+ [ 2] = RCAR_GP_PIN(8, 2), /* AVB4_RD0 */
+ [ 3] = RCAR_GP_PIN(8, 3), /* AVB4_RD1 */
+ [ 4] = RCAR_GP_PIN(8, 4), /* AVB4_RD2 */
+ [ 5] = RCAR_GP_PIN(8, 5), /* AVB4_RD3 */
+ [ 6] = RCAR_GP_PIN(8, 6), /* AVB4_TX_CTL */
+ [ 7] = RCAR_GP_PIN(8, 7), /* AVB4_TXC */
+ [ 8] = RCAR_GP_PIN(8, 8), /* AVB4_TD0 */
+ [ 9] = RCAR_GP_PIN(8, 9), /* AVB4_TD1 */
+ [10] = RCAR_GP_PIN(8, 10), /* AVB4_TD2 */
+ [11] = RCAR_GP_PIN(8, 11), /* AVB4_TD3 */
+ [12] = RCAR_GP_PIN(8, 12), /* AVB4_TXCREFCLK */
+ [13] = RCAR_GP_PIN(8, 13), /* AVB4_MDIO */
+ [14] = RCAR_GP_PIN(8, 14), /* AVB4_MDC */
+ [15] = RCAR_GP_PIN(8, 15), /* AVB4_MAGIC */
+ [16] = RCAR_GP_PIN(8, 16), /* AVB4_PHY_INT */
+ [17] = RCAR_GP_PIN(8, 17), /* AVB4_LINK */
+ [18] = RCAR_GP_PIN(8, 18), /* AVB4_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(8, 19), /* AVB4_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(8, 20), /* AVB4_AVTP_PPS */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN9", 0xe60698c0, "PUD9", 0xe60698e0) {
+ [ 0] = RCAR_GP_PIN(9, 0), /* AVB5_RX_CTL */
+ [ 1] = RCAR_GP_PIN(9, 1), /* AVB5_RXC */
+ [ 2] = RCAR_GP_PIN(9, 2), /* AVB5_RD0 */
+ [ 3] = RCAR_GP_PIN(9, 3), /* AVB5_RD1 */
+ [ 4] = RCAR_GP_PIN(9, 4), /* AVB5_RD2 */
+ [ 5] = RCAR_GP_PIN(9, 5), /* AVB5_RD3 */
+ [ 6] = RCAR_GP_PIN(9, 6), /* AVB5_TX_CTL */
+ [ 7] = RCAR_GP_PIN(9, 7), /* AVB5_TXC */
+ [ 8] = RCAR_GP_PIN(9, 8), /* AVB5_TD0 */
+ [ 9] = RCAR_GP_PIN(9, 9), /* AVB5_TD1 */
+ [10] = RCAR_GP_PIN(9, 10), /* AVB5_TD2 */
+ [11] = RCAR_GP_PIN(9, 11), /* AVB5_TD3 */
+ [12] = RCAR_GP_PIN(9, 12), /* AVB5_TXCREFCLK */
+ [13] = RCAR_GP_PIN(9, 13), /* AVB5_MDIO */
+ [14] = RCAR_GP_PIN(9, 14), /* AVB5_MDC */
+ [15] = RCAR_GP_PIN(9, 15), /* AVB5_MAGIC */
+ [16] = RCAR_GP_PIN(9, 16), /* AVB5_PHY_INT */
+ [17] = RCAR_GP_PIN(9, 17), /* AVB5_LINK */
+ [18] = RCAR_GP_PIN(9, 18), /* AVB5_AVTP_MATCH */
+ [19] = RCAR_GP_PIN(9, 19), /* AVB5_AVTP_CAPTURE */
+ [20] = RCAR_GP_PIN(9, 20), /* AVB5_AVTP_PPS */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations pinmux_ops = {
+ .pin_to_pocctrl = r8a779a0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779a0_pinmux_info = {
+ .name = "r8a779a0_pfc",
+ .ops = &pinmux_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index ac542d278a38..a49f74730272 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -634,6 +634,9 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
}
case PIN_CONFIG_POWER_SOURCE: {
+ int idx = sh_pfc_get_pin_index(pfc, _pin);
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
+ unsigned int lower_voltage;
u32 pocctrl, val;
int bit;
@@ -648,7 +651,10 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
val = sh_pfc_read(pfc, pocctrl);
spin_unlock_irqrestore(&pfc->lock, flags);
- arg = (val & BIT(bit)) ? 3300 : 1800;
+ lower_voltage = (pin->configs & SH_PFC_PIN_VOLTAGE_25_33) ?
+ 2500 : 1800;
+
+ arg = (val & BIT(bit)) ? 3300 : lower_voltage;
break;
}
@@ -702,6 +708,9 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
case PIN_CONFIG_POWER_SOURCE: {
unsigned int mV = pinconf_to_config_argument(configs[i]);
+ int idx = sh_pfc_get_pin_index(pfc, _pin);
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
+ unsigned int lower_voltage;
u32 pocctrl, val;
int bit;
@@ -712,7 +721,10 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
if (WARN(bit < 0, "invalid pin %#x", _pin))
return bit;
- if (mV != 1800 && mV != 3300)
+ lower_voltage = (pin->configs & SH_PFC_PIN_VOLTAGE_25_33) ?
+ 2500 : 1800;
+
+ if (mV != lower_voltage && mV != 3300)
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index dc484c13f59c..5934faeb23d7 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -31,6 +31,15 @@ enum {
SH_PFC_PIN_CFG_PULL_DOWN)
#define SH_PFC_PIN_CFG_IO_VOLTAGE (1 << 4)
#define SH_PFC_PIN_CFG_DRIVE_STRENGTH (1 << 5)
+
+#define SH_PFC_PIN_VOLTAGE_18_33 (0 << 6)
+#define SH_PFC_PIN_VOLTAGE_25_33 (1 << 6)
+
+#define SH_PFC_PIN_CFG_IO_VOLTAGE_18_33 (SH_PFC_PIN_CFG_IO_VOLTAGE | \
+ SH_PFC_PIN_VOLTAGE_18_33)
+#define SH_PFC_PIN_CFG_IO_VOLTAGE_25_33 (SH_PFC_PIN_CFG_IO_VOLTAGE | \
+ SH_PFC_PIN_VOLTAGE_25_33)
+
#define SH_PFC_PIN_CFG_NO_GPIO (1 << 31)
struct sh_pfc_pin {
@@ -300,7 +309,7 @@ struct sh_pfc_soc_info {
const u16 *pinmux_data;
unsigned int pinmux_data_size;
- u32 unlock_reg;
+ u32 unlock_reg; /* can be literal address or mask */
};
extern const struct sh_pfc_soc_info emev2_pinmux_info;
@@ -331,6 +340,7 @@ extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -451,9 +461,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
fn(bank, pin, GP_##bank##_##pin, sfx, cfg)
#define PORT_GP_1(bank, pin, fn, sfx) PORT_GP_CFG_1(bank, pin, fn, sfx, 0)
-#define PORT_GP_CFG_4(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_2(bank, fn, sfx, cfg) \
PORT_GP_CFG_1(bank, 0, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 1, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 1, fn, sfx, cfg)
+#define PORT_GP_2(bank, fn, sfx) PORT_GP_CFG_2(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_4(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_2(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 3, fn, sfx, cfg)
#define PORT_GP_4(bank, fn, sfx) PORT_GP_CFG_4(bank, fn, sfx, 0)
@@ -572,9 +586,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 29, fn, sfx, cfg)
#define PORT_GP_30(bank, fn, sfx) PORT_GP_CFG_30(bank, fn, sfx, 0)
-#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_31(bank, fn, sfx, cfg) \
PORT_GP_CFG_30(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 30, fn, sfx, cfg)
+#define PORT_GP_31(bank, fn, sfx) PORT_GP_CFG_31(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_31(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 31, fn, sfx, cfg)
#define PORT_GP_32(bank, fn, sfx) PORT_GP_CFG_32(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b9ea09fabf84..0cd7f33cdf25 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -58,13 +58,13 @@ static void exynos_irq_mask(struct irq_data *irqd)
unsigned long mask;
unsigned long flags;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
mask |= 1 << irqd->hwirq;
writel(mask, bank->eint_base + reg_mask);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
static void exynos_irq_ack(struct irq_data *irqd)
@@ -97,13 +97,13 @@ static void exynos_irq_unmask(struct irq_data *irqd)
if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
exynos_irq_ack(irqd);
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
mask &= ~(1 << irqd->hwirq);
writel(mask, bank->eint_base + reg_mask);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -169,14 +169,14 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
con |= EXYNOS_PIN_FUNC_EINT << shift;
writel(con, bank->pctl_base + reg_con);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
return 0;
}
@@ -192,14 +192,14 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
con |= EXYNOS_PIN_FUNC_INPUT << shift;
writel(con, bank->pctl_base + reg_con);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
gpiochip_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 2223ead5bd72..00d77d6946b5 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -145,14 +145,14 @@ static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
val = readl(reg);
val &= ~(mask << shift);
val |= bank->eint_func << shift;
writel(val, reg);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index b8166e3fe4ce..53e2a6412add 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -286,14 +286,14 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
shift = shift * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
val = readl(reg);
val &= ~(mask << shift);
val |= bank->eint_func << shift;
writel(val, reg);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
/*
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 608eb5a07248..376876bd6605 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -400,14 +400,14 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
reg += 4;
}
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
data &= ~(mask << shift);
data |= func->val << shift;
writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
/* enable a specified pinmux by writing to registers */
@@ -451,7 +451,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
width = type->fld_width[cfg_type];
cfg_reg = type->reg_offset[cfg_type];
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
mask = (1 << width) - 1;
shift = pin_offset * width;
@@ -468,7 +468,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
*config = PINCFG_PACK(cfg_type, data);
}
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
return 0;
}
@@ -561,9 +561,9 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
unsigned long flags;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
samsung_gpio_set_value(gc, offset, value);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
}
/* gpiolib gpio_get callback function */
@@ -626,9 +626,9 @@ static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
unsigned long flags;
int ret;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
ret = samsung_gpio_set_direction(gc, offset, true);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
return ret;
}
@@ -640,10 +640,10 @@ static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
unsigned long flags;
int ret;
- spin_lock_irqsave(&bank->slock, flags);
+ raw_spin_lock_irqsave(&bank->slock, flags);
samsung_gpio_set_value(gc, offset, value);
ret = samsung_gpio_set_direction(gc, offset, false);
- spin_unlock_irqrestore(&bank->slock, flags);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
return ret;
}
@@ -1057,7 +1057,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
bank->eint_offset = bdata->eint_offset;
bank->name = bdata->name;
- spin_lock_init(&bank->slock);
+ raw_spin_lock_init(&bank->slock);
bank->drvdata = d;
bank->pin_base = d->nr_pins;
d->nr_pins += bank->nr_pins;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 379f34a9a482..de44f8ec330b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -171,7 +171,7 @@ struct samsung_pin_bank {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range grange;
struct exynos_irq_chip *irq_chip;
- spinlock_t slock;
+ raw_spinlock_t slock;
u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
};
diff --git a/drivers/pinctrl/sirf/Makefile b/drivers/pinctrl/sirf/Makefile
deleted file mode 100644
index 1ab0742075f6..000000000000
--- a/drivers/pinctrl/sirf/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# CSR SiRFsoc pinmux support
-
-obj-y += pinctrl-sirf.o
-obj-y += pinctrl-prima2.o
-obj-y += pinctrl-atlas6.o
-obj-y += pinctrl-atlas7.o
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
deleted file mode 100644
index ab35d59bfa04..000000000000
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ /dev/null
@@ -1,1137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pinctrl pads, groups, functions for CSR SiRFatlasVI
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/bitops.h>
-
-#include "pinctrl-sirf.h"
-
-/*
- * pad list for the pinmux subsystem
- * refer to atlasVI_io_table_v0.93.xls
- */
-static const struct pinctrl_pin_desc sirfsoc_pads[] = {
- PINCTRL_PIN(0, "gpio0-0"),
- PINCTRL_PIN(1, "gpio0-1"),
- PINCTRL_PIN(2, "gpio0-2"),
- PINCTRL_PIN(3, "gpio0-3"),
- PINCTRL_PIN(4, "pwm0"),
- PINCTRL_PIN(5, "pwm1"),
- PINCTRL_PIN(6, "pwm2"),
- PINCTRL_PIN(7, "pwm3"),
- PINCTRL_PIN(8, "warm_rst_b"),
- PINCTRL_PIN(9, "odo_0"),
- PINCTRL_PIN(10, "odo_1"),
- PINCTRL_PIN(11, "dr_dir"),
- PINCTRL_PIN(12, "rts_0"),
- PINCTRL_PIN(13, "scl_1"),
- PINCTRL_PIN(14, "ntrst"),
- PINCTRL_PIN(15, "sda_1"),
- PINCTRL_PIN(16, "x_ldd[16]"),
- PINCTRL_PIN(17, "x_ldd[17]"),
- PINCTRL_PIN(18, "x_ldd[18]"),
- PINCTRL_PIN(19, "x_ldd[19]"),
- PINCTRL_PIN(20, "x_ldd[20]"),
- PINCTRL_PIN(21, "x_ldd[21]"),
- PINCTRL_PIN(22, "x_ldd[22]"),
- PINCTRL_PIN(23, "x_ldd[23]"),
- PINCTRL_PIN(24, "gps_sgn"),
- PINCTRL_PIN(25, "gps_mag"),
- PINCTRL_PIN(26, "gps_clk"),
- PINCTRL_PIN(27, "sd_cd_b_2"),
- PINCTRL_PIN(28, "sd_vcc_on_2"),
- PINCTRL_PIN(29, "sd_wp_b_2"),
- PINCTRL_PIN(30, "sd_clk_3"),
- PINCTRL_PIN(31, "sd_cmd_3"),
-
- PINCTRL_PIN(32, "x_sd_dat_3[0]"),
- PINCTRL_PIN(33, "x_sd_dat_3[1]"),
- PINCTRL_PIN(34, "x_sd_dat_3[2]"),
- PINCTRL_PIN(35, "x_sd_dat_3[3]"),
- PINCTRL_PIN(36, "usb_clk"),
- PINCTRL_PIN(37, "usb_dir"),
- PINCTRL_PIN(38, "usb_nxt"),
- PINCTRL_PIN(39, "usb_stp"),
- PINCTRL_PIN(40, "usb_dat[7]"),
- PINCTRL_PIN(41, "usb_dat[6]"),
- PINCTRL_PIN(42, "x_cko_1"),
- PINCTRL_PIN(43, "spi_clk_1"),
- PINCTRL_PIN(44, "spi_dout_1"),
- PINCTRL_PIN(45, "spi_din_1"),
- PINCTRL_PIN(46, "spi_en_1"),
- PINCTRL_PIN(47, "x_txd_1"),
- PINCTRL_PIN(48, "x_txd_2"),
- PINCTRL_PIN(49, "x_rxd_1"),
- PINCTRL_PIN(50, "x_rxd_2"),
- PINCTRL_PIN(51, "x_usclk_0"),
- PINCTRL_PIN(52, "x_utxd_0"),
- PINCTRL_PIN(53, "x_urxd_0"),
- PINCTRL_PIN(54, "x_utfs_0"),
- PINCTRL_PIN(55, "x_urfs_0"),
- PINCTRL_PIN(56, "usb_dat5"),
- PINCTRL_PIN(57, "usb_dat4"),
- PINCTRL_PIN(58, "usb_dat3"),
- PINCTRL_PIN(59, "usb_dat2"),
- PINCTRL_PIN(60, "usb_dat1"),
- PINCTRL_PIN(61, "usb_dat0"),
- PINCTRL_PIN(62, "x_ldd[14]"),
- PINCTRL_PIN(63, "x_ldd[15]"),
-
- PINCTRL_PIN(64, "x_gps_gpio"),
- PINCTRL_PIN(65, "x_ldd[13]"),
- PINCTRL_PIN(66, "x_df_we_b"),
- PINCTRL_PIN(67, "x_df_re_b"),
- PINCTRL_PIN(68, "x_txd_0"),
- PINCTRL_PIN(69, "x_rxd_0"),
- PINCTRL_PIN(70, "x_l_lck"),
- PINCTRL_PIN(71, "x_l_fck"),
- PINCTRL_PIN(72, "x_l_de"),
- PINCTRL_PIN(73, "x_ldd[0]"),
- PINCTRL_PIN(74, "x_ldd[1]"),
- PINCTRL_PIN(75, "x_ldd[2]"),
- PINCTRL_PIN(76, "x_ldd[3]"),
- PINCTRL_PIN(77, "x_ldd[4]"),
- PINCTRL_PIN(78, "x_cko_0"),
- PINCTRL_PIN(79, "x_ldd[5]"),
- PINCTRL_PIN(80, "x_ldd[6]"),
- PINCTRL_PIN(81, "x_ldd[7]"),
- PINCTRL_PIN(82, "x_ldd[8]"),
- PINCTRL_PIN(83, "x_ldd[9]"),
- PINCTRL_PIN(84, "x_ldd[10]"),
- PINCTRL_PIN(85, "x_ldd[11]"),
- PINCTRL_PIN(86, "x_ldd[12]"),
- PINCTRL_PIN(87, "x_vip_vsync"),
- PINCTRL_PIN(88, "x_vip_hsync"),
- PINCTRL_PIN(89, "x_vip_pxclk"),
- PINCTRL_PIN(90, "x_sda_0"),
- PINCTRL_PIN(91, "x_scl_0"),
- PINCTRL_PIN(92, "x_df_ry_by"),
- PINCTRL_PIN(93, "x_df_cs_b[1]"),
- PINCTRL_PIN(94, "x_df_cs_b[0]"),
- PINCTRL_PIN(95, "x_l_pclk"),
-
- PINCTRL_PIN(96, "x_df_dqs"),
- PINCTRL_PIN(97, "x_df_wp_b"),
- PINCTRL_PIN(98, "ac97_sync"),
- PINCTRL_PIN(99, "ac97_bit_clk "),
- PINCTRL_PIN(100, "ac97_dout"),
- PINCTRL_PIN(101, "ac97_din"),
- PINCTRL_PIN(102, "x_rtc_io"),
-
- PINCTRL_PIN(103, "x_usb1_dp"),
- PINCTRL_PIN(104, "x_usb1_dn"),
-};
-
-static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
- BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
- BIT(16) | BIT(17) | BIT(18) | BIT(19) |
- BIT(20) | BIT(21) | BIT(22) | BIT(31),
- },
-};
-
-static const struct sirfsoc_padmux lcd_16bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
- .muxmask = lcd_16bits_sirfsoc_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = 0,
-};
-
-static const unsigned lcd_16bits_pins[] = { 62, 63, 65, 70, 71, 72, 73, 74, 75,
- 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 };
-
-static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
- BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
- BIT(16) | BIT(17) | BIT(18) | BIT(19) |
- BIT(20) | BIT(21) | BIT(22) | BIT(31),
- }, {
- .group = 1,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 0,
- .mask = BIT(16) | BIT(17),
- },
-};
-
-static const struct sirfsoc_padmux lcd_18bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
- .muxmask = lcd_18bits_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4) | BIT(15),
- .funcval = 0,
-};
-
-static const unsigned lcd_18bits_pins[] = { 16, 17, 62, 63, 65, 70, 71, 72, 73,
- 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 };
-
-static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
- BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
- BIT(16) | BIT(17) | BIT(18) | BIT(19) |
- BIT(20) | BIT(21) | BIT(22) | BIT(31),
- }, {
- .group = 1,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 0,
- .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) |
- BIT(21) | BIT(22) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux lcd_24bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
- .muxmask = lcd_24bits_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4) | BIT(15),
- .funcval = 0,
-};
-
-static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 62,
- 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84,
- 85, 86, 95};
-
-static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) |
- BIT(11) | BIT(12) | BIT(13) | BIT(15) | BIT(16) |
- BIT(17) | BIT(18) | BIT(19) |
- BIT(20) | BIT(21) | BIT(22) | BIT(31),
- }, {
- .group = 1,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 0,
- .mask = BIT(8),
- },
-};
-
-static const struct sirfsoc_padmux lcdrom_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
- .muxmask = lcdrom_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = BIT(4),
-};
-
-static const unsigned lcdrom_pins[] = { 8, 62, 63, 65, 70, 71, 72, 73, 74, 75,
- 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95};
-
-static const struct sirfsoc_muxmask uart0_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(12),
- }, {
- .group = 1,
- .mask = BIT(23),
- }, {
- .group = 2,
- .mask = BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux uart0_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart0_muxmask),
- .muxmask = uart0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(9),
- .funcval = BIT(9),
-};
-
-static const unsigned uart0_pins[] = { 12, 55, 68, 69 };
-
-static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask),
- .muxmask = uart0_nostreamctrl_muxmask,
-};
-
-static const unsigned uart0_nostreamctrl_pins[] = { 68, 69 };
-
-static const struct sirfsoc_muxmask uart1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(15) | BIT(17),
- },
-};
-
-static const struct sirfsoc_padmux uart1_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart1_muxmask),
- .muxmask = uart1_muxmask,
-};
-
-static const unsigned uart1_pins[] = { 47, 49 };
-
-static const struct sirfsoc_muxmask uart2_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(10) | BIT(14),
- }, {
- .group = 1,
- .mask = BIT(16) | BIT(18),
- },
-};
-
-static const struct sirfsoc_padmux uart2_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart2_muxmask),
- .muxmask = uart2_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(10),
- .funcval = BIT(10),
-};
-
-static const unsigned uart2_pins[] = { 10, 14, 48, 50 };
-
-static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(16) | BIT(18),
- },
-};
-
-static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask),
- .muxmask = uart2_nostreamctrl_muxmask,
-};
-
-static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 };
-
-static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 1,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc3_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
- .muxmask = sdmmc3_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(7),
- .funcval = 0,
-};
-
-static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 };
-
-static const struct sirfsoc_muxmask spi0_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(30),
- }, {
- .group = 1,
- .mask = BIT(0) | BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux spi0_padmux = {
- .muxmask_counts = ARRAY_SIZE(spi0_muxmask),
- .muxmask = spi0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(7),
- .funcval = BIT(7),
-};
-
-static const unsigned spi0_pins[] = { 30, 32, 34, 35 };
-
-static const struct sirfsoc_muxmask cko1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(10),
- },
-};
-
-static const struct sirfsoc_padmux cko1_padmux = {
- .muxmask_counts = ARRAY_SIZE(cko1_muxmask),
- .muxmask = cko1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = 0,
-};
-
-static const unsigned cko1_pins[] = { 42 };
-
-static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(10),
- },
-};
-
-static const struct sirfsoc_padmux i2s_mclk_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask),
- .muxmask = i2s_mclk_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = BIT(3),
-};
-
-static const unsigned i2s_mclk_pins[] = { 42 };
-
-static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19),
- },
-};
-
-static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask),
- .muxmask = i2s_ext_clk_input_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(2),
- .funcval = BIT(2),
-};
-
-static const unsigned i2s_ext_clk_input_pins[] = { 51 };
-
-static const struct sirfsoc_muxmask i2s_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(2) | BIT(3) | BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux i2s_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_muxmask),
- .muxmask = i2s_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
-};
-
-static const unsigned i2s_pins[] = { 98, 99, 100, 101 };
-
-static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(2) | BIT(3) | BIT(4),
- },
-};
-
-static const struct sirfsoc_padmux i2s_no_din_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask),
- .muxmask = i2s_no_din_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
-};
-
-static const unsigned i2s_no_din_pins[] = { 98, 99, 100 };
-
-static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(2) | BIT(3) | BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux i2s_6chn_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask),
- .muxmask = i2s_6chn_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(9),
- .funcval = BIT(1) | BIT(9),
-};
-
-static const unsigned i2s_6chn_pins[] = { 52, 55, 98, 99, 100, 101 };
-
-static const struct sirfsoc_muxmask ac97_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(2) | BIT(3) | BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux ac97_padmux = {
- .muxmask_counts = ARRAY_SIZE(ac97_muxmask),
- .muxmask = ac97_muxmask,
-};
-
-static const unsigned ac97_pins[] = { 98, 99, 100, 101 };
-
-static const struct sirfsoc_muxmask spi1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux spi1_padmux = {
- .muxmask_counts = ARRAY_SIZE(spi1_muxmask),
- .muxmask = spi1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(16),
- .funcval = 0,
-};
-
-static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
-
-static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc1_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
- .muxmask = sdmmc1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5),
- .funcval = BIT(5),
-};
-
-static const unsigned sdmmc1_pins[] = { 66, 67 };
-
-static const struct sirfsoc_muxmask gps_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(24) | BIT(25) | BIT(26),
- },
-};
-
-static const struct sirfsoc_padmux gps_padmux = {
- .muxmask_counts = ARRAY_SIZE(gps_muxmask),
- .muxmask = gps_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(13),
- .funcval = 0,
-};
-
-static const unsigned gps_pins[] = { 24, 25, 26 };
-
-static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(24) | BIT(25) | BIT(26),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc5_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
- .muxmask = sdmmc5_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(13),
- .funcval = BIT(13),
-};
-
-static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
-
-static const struct sirfsoc_muxmask usp0_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux usp0_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_muxmask),
- .muxmask = usp0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(9),
- .funcval = 0,
-};
-
-static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
-
-static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
- },
-};
-
-static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
- .muxmask = usp0_only_utfs_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(6),
- .funcval = 0,
-};
-
-static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
-
-static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
- .muxmask = usp0_only_urfs_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(9),
- .funcval = 0,
-};
-
-static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
-
-static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(20) | BIT(21),
- },
-};
-
-static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
- .muxmask = usp0_uart_nostreamctrl_muxmask,
-};
-
-static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
-static const struct sirfsoc_muxmask usp1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(15),
- }, {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux usp1_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp1_muxmask),
- .muxmask = usp1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(16),
- .funcval = BIT(16),
-};
-
-static const unsigned usp1_pins[] = { 15, 43, 44, 45, 46 };
-
-static const struct sirfsoc_muxmask usp1_uart_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(12) | BIT(13),
- },
-};
-
-static const struct sirfsoc_padmux usp1_uart_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp1_uart_nostreamctrl_muxmask),
- .muxmask = usp1_uart_nostreamctrl_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(16),
- .funcval = BIT(16),
-};
-
-static const unsigned usp1_uart_nostreamctrl_pins[] = { 44, 45 };
-
-static const struct sirfsoc_muxmask nand_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30),
- }, {
- .group = 3,
- .mask = BIT(0) | BIT(1),
- },
-};
-
-static const struct sirfsoc_padmux nand_padmux = {
- .muxmask_counts = ARRAY_SIZE(nand_muxmask),
- .muxmask = nand_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5) | BIT(19),
- .funcval = 0,
-};
-
-static const unsigned nand_pins[] = { 66, 67, 92, 93, 94, 96, 97 };
-
-static const struct sirfsoc_muxmask sdmmc0_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(1),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc0_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc0_muxmask),
- .muxmask = sdmmc0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5) | BIT(19),
- .funcval = BIT(19),
-};
-
-static const unsigned sdmmc0_pins[] = { 97 };
-
-static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(27) | BIT(28) | BIT(29),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc2_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
- .muxmask = sdmmc2_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(11),
- .funcval = 0,
-};
-
-static const unsigned sdmmc2_pins[] = { 27, 28, 29 };
-
-static const struct sirfsoc_muxmask sdmmc2_nowp_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(27) | BIT(28),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc2_nowp_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc2_nowp_muxmask),
- .muxmask = sdmmc2_nowp_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(11),
- .funcval = 0,
-};
-
-static const unsigned sdmmc2_nowp_pins[] = { 27, 28 };
-
-static const struct sirfsoc_muxmask cko0_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux cko0_padmux = {
- .muxmask_counts = ARRAY_SIZE(cko0_muxmask),
- .muxmask = cko0_muxmask,
-};
-
-static const unsigned cko0_pins[] = { 78 };
-
-static const struct sirfsoc_muxmask vip_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(4) | BIT(5) | BIT(6) | BIT(8) | BIT(9)
- | BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28) |
- BIT(29),
- },
-};
-
-static const struct sirfsoc_padmux vip_padmux = {
- .muxmask_counts = ARRAY_SIZE(vip_muxmask),
- .muxmask = vip_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(18),
- .funcval = BIT(18),
-};
-
-static const unsigned vip_pins[] = { 36, 37, 38, 40, 41, 56, 57, 58, 59,
- 60, 61 };
-
-static const struct sirfsoc_muxmask vip_noupli_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20)
- | BIT(21) | BIT(22) | BIT(23),
- }, {
- .group = 2,
- .mask = BIT(23) | BIT(24) | BIT(25),
- },
-};
-
-static const struct sirfsoc_padmux vip_noupli_padmux = {
- .muxmask_counts = ARRAY_SIZE(vip_noupli_muxmask),
- .muxmask = vip_noupli_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(15),
- .funcval = BIT(15),
-};
-
-static const unsigned vip_noupli_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23,
- 87, 88, 89 };
-
-static const struct sirfsoc_muxmask i2c0_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(26) | BIT(27),
- },
-};
-
-static const struct sirfsoc_padmux i2c0_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2c0_muxmask),
- .muxmask = i2c0_muxmask,
-};
-
-static const unsigned i2c0_pins[] = { 90, 91 };
-
-static const struct sirfsoc_muxmask i2c1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(13) | BIT(15),
- },
-};
-
-static const struct sirfsoc_padmux i2c1_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
- .muxmask = i2c1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(16),
- .funcval = 0,
-};
-
-static const unsigned i2c1_pins[] = { 13, 15 };
-
-static const struct sirfsoc_muxmask pwm0_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(4),
- },
-};
-
-static const struct sirfsoc_padmux pwm0_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
- .muxmask = pwm0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(12),
- .funcval = 0,
-};
-
-static const unsigned pwm0_pins[] = { 4 };
-
-static const struct sirfsoc_muxmask pwm1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux pwm1_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm1_muxmask),
- .muxmask = pwm1_muxmask,
-};
-
-static const unsigned pwm1_pins[] = { 5 };
-
-static const struct sirfsoc_muxmask pwm2_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(6),
- },
-};
-
-static const struct sirfsoc_padmux pwm2_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm2_muxmask),
- .muxmask = pwm2_muxmask,
-};
-
-static const unsigned pwm2_pins[] = { 6 };
-
-static const struct sirfsoc_muxmask pwm3_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(7),
- },
-};
-
-static const struct sirfsoc_padmux pwm3_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm3_muxmask),
- .muxmask = pwm3_muxmask,
-};
-
-static const unsigned pwm3_pins[] = { 7 };
-
-static const struct sirfsoc_muxmask pwm4_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux pwm4_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm4_muxmask),
- .muxmask = pwm4_muxmask,
-};
-
-static const unsigned pwm4_pins[] = { 78 };
-
-static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(8),
- },
-};
-
-static const struct sirfsoc_padmux warm_rst_padmux = {
- .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
- .muxmask = warm_rst_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = 0,
-};
-
-static const unsigned warm_rst_pins[] = { 8 };
-
-static const struct sirfsoc_muxmask usb0_upli_drvbus_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8)
- | BIT(9) | BIT(24) | BIT(25) | BIT(26) |
- BIT(27) | BIT(28) | BIT(29),
- },
-};
-static const struct sirfsoc_padmux usb0_upli_drvbus_padmux = {
- .muxmask_counts = ARRAY_SIZE(usb0_upli_drvbus_muxmask),
- .muxmask = usb0_upli_drvbus_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(18),
- .funcval = 0,
-};
-
-static const unsigned usb0_upli_drvbus_pins[] = { 36, 37, 38, 39, 40,
- 41, 56, 57, 58, 59, 60, 61 };
-
-static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(28),
- },
-};
-
-static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
- .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
- .muxmask = usb1_utmi_drvbus_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(11),
- .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
-};
-
-static const unsigned usb1_utmi_drvbus_pins[] = { 28 };
-
-static const struct sirfsoc_padmux usb1_dp_dn_padmux = {
- .muxmask_counts = 0,
- .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
- .funcmask = BIT(2),
- .funcval = BIT(2),
-};
-
-static const unsigned usb1_dp_dn_pins[] = { 103, 104 };
-
-static const struct sirfsoc_padmux uart1_route_io_usb1_padmux = {
- .muxmask_counts = 0,
- .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
- .funcmask = BIT(2),
- .funcval = 0,
-};
-
-static const unsigned uart1_route_io_usb1_pins[] = { 103, 104 };
-
-static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(9) | BIT(10) | BIT(11),
- },
-};
-
-static const struct sirfsoc_padmux pulse_count_padmux = {
- .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask),
- .muxmask = pulse_count_muxmask,
-};
-
-static const unsigned pulse_count_pins[] = { 9, 10, 11 };
-
-static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
- SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins),
- SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins),
- SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
- SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
- SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
- SIRFSOC_PIN_GROUP("uart0_nostreamctrlgrp", uart0_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
- SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
- SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
- SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
- usp0_uart_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
- SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
- SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
- SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
- usp1_uart_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
- SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
- SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
- SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins),
- SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins),
- SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins),
- SIRFSOC_PIN_GROUP("pwm4grp", pwm4_pins),
- SIRFSOC_PIN_GROUP("vipgrp", vip_pins),
- SIRFSOC_PIN_GROUP("vip_noupligrp", vip_noupli_pins),
- SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins),
- SIRFSOC_PIN_GROUP("cko0grp", cko0_pins),
- SIRFSOC_PIN_GROUP("cko1grp", cko1_pins),
- SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins),
- SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins),
- SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins),
- SIRFSOC_PIN_GROUP("sdmmc2_nowpgrp", sdmmc2_nowp_pins),
- SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins),
- SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
- SIRFSOC_PIN_GROUP("usb0_upli_drvbusgrp", usb0_upli_drvbus_pins),
- SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
- SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
- SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
- SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
- SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins),
- SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins),
- SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
- SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins),
- SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins),
- SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
- SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
- SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
- SIRFSOC_PIN_GROUP("spi1grp", spi1_pins),
- SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
-};
-
-static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" };
-static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
-static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
-static const char * const lcdromgrp[] = { "lcdromgrp" };
-static const char * const uart0grp[] = { "uart0grp" };
-static const char * const uart0_nostreamctrlgrp[] = { "uart0_nostreamctrlgrp" };
-static const char * const uart1grp[] = { "uart1grp" };
-static const char * const uart2grp[] = { "uart2grp" };
-static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
-static const char * const usp0_uart_nostreamctrl_grp[] = {
- "usp0_uart_nostreamctrl_grp" };
-static const char * const usp0grp[] = { "usp0grp" };
-static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
-static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
-
-static const char * const usp1grp[] = { "usp1grp" };
-static const char * const usp1_uart_nostreamctrl_grp[] = {
- "usp1_uart_nostreamctrl_grp" };
-static const char * const i2c0grp[] = { "i2c0grp" };
-static const char * const i2c1grp[] = { "i2c1grp" };
-static const char * const pwm0grp[] = { "pwm0grp" };
-static const char * const pwm1grp[] = { "pwm1grp" };
-static const char * const pwm2grp[] = { "pwm2grp" };
-static const char * const pwm3grp[] = { "pwm3grp" };
-static const char * const pwm4grp[] = { "pwm4grp" };
-static const char * const vipgrp[] = { "vipgrp" };
-static const char * const vip_noupligrp[] = { "vip_noupligrp" };
-static const char * const warm_rstgrp[] = { "warm_rstgrp" };
-static const char * const cko0grp[] = { "cko0grp" };
-static const char * const cko1grp[] = { "cko1grp" };
-static const char * const sdmmc0grp[] = { "sdmmc0grp" };
-static const char * const sdmmc1grp[] = { "sdmmc1grp" };
-static const char * const sdmmc2grp[] = { "sdmmc2grp" };
-static const char * const sdmmc3grp[] = { "sdmmc3grp" };
-static const char * const sdmmc5grp[] = { "sdmmc5grp" };
-static const char * const sdmmc2_nowpgrp[] = { "sdmmc2_nowpgrp" };
-static const char * const usb0_upli_drvbusgrp[] = { "usb0_upli_drvbusgrp" };
-static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
-static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
-static const char * const
- uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
-static const char * const pulse_countgrp[] = { "pulse_countgrp" };
-static const char * const i2smclkgrp[] = { "i2smclkgrp" };
-static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" };
-static const char * const i2sgrp[] = { "i2sgrp" };
-static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" };
-static const char * const i2s_6chngrp[] = { "i2s_6chngrp" };
-static const char * const ac97grp[] = { "ac97grp" };
-static const char * const nandgrp[] = { "nandgrp" };
-static const char * const spi0grp[] = { "spi0grp" };
-static const char * const spi1grp[] = { "spi1grp" };
-static const char * const gpsgrp[] = { "gpsgrp" };
-
-static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
- SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
- SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
- SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", uart0_nostreamctrlgrp,
- uart0_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
- SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
- SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl",
- uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
- usp0_uart_nostreamctrl_grp,
- usp0_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp,
- usp0_only_utfs_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp,
- usp0_only_urfs_padmux),
- SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
- SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
- usp1_uart_nostreamctrl_grp,
- usp1_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
- SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
- SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
- SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux),
- SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux),
- SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux),
- SIRFSOC_PMX_FUNCTION("pwm4", pwm4grp, pwm4_padmux),
- SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux),
- SIRFSOC_PMX_FUNCTION("vip_noupli", vip_noupligrp, vip_noupli_padmux),
- SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux),
- SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux),
- SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc2_nowp",
- sdmmc2_nowpgrp, sdmmc2_nowp_padmux),
- SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus",
- usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus",
- usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
- SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1",
- uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
- SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp,
- i2s_ext_clk_input_padmux),
- SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux),
- SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
- SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
- SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
- SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux),
- SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux),
-};
-
-struct sirfsoc_pinctrl_data atlas6_pinctrl_data = {
- (struct pinctrl_pin_desc *)sirfsoc_pads,
- ARRAY_SIZE(sirfsoc_pads),
- (struct sirfsoc_pin_group *)sirfsoc_pin_groups,
- ARRAY_SIZE(sirfsoc_pin_groups),
- (struct sirfsoc_pmx_func *)sirfsoc_pmx_functions,
- ARRAY_SIZE(sirfsoc_pmx_functions),
-};
-
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
deleted file mode 100644
index e54a6e3cafd2..000000000000
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ /dev/null
@@ -1,6157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pinctrl pads, groups, functions for CSR SiRFatlasVII
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/gpio/driver.h>
-
-/* Definition of Pad&Mux Properties */
-#define N 0
-
-/* The Bank contains input-disable regisgers */
-#define BANK_DS 0
-
-/* Clear Register offset */
-#define CLR_REG(r) ((r) + 0x04)
-
-/* Definition of multiple function select register */
-#define FUNC_CLEAR_MASK 0x7
-#define FUNC_GPIO 0
-#define FUNC_ANALOGUE 0x8
-#define ANA_CLEAR_MASK 0x1
-
-/* The Atlas7's Pad Type List */
-enum altas7_pad_type {
- PAD_T_4WE_PD = 0, /* ZIO_PAD3V_4WE_PD */
- PAD_T_4WE_PU, /* ZIO_PAD3V_4WE_PD */
- PAD_T_16ST, /* ZIO_PAD3V_SDCLK_PD */
- PAD_T_M31_0204_PD, /* PRDW0204SDGZ_M311311_PD */
- PAD_T_M31_0204_PU, /* PRDW0204SDGZ_M311311_PU */
- PAD_T_M31_0610_PD, /* PRUW0610SDGZ_M311311_PD */
- PAD_T_M31_0610_PU, /* PRUW0610SDGZ_M311311_PU */
- PAD_T_AD, /* PRDWUWHW08SCDG_HZ */
-};
-
-/* Raw value of Driver-Strength Bits */
-#define DS3 BIT(3)
-#define DS2 BIT(2)
-#define DS1 BIT(1)
-#define DS0 BIT(0)
-#define DSZ 0
-
-/* Drive-Strength Intermediate Values */
-#define DS_NULL -1
-#define DS_1BIT_IM_VAL DS0
-#define DS_1BIT_MASK 0x1
-#define DS_2BIT_IM_VAL (DS1 | DS0)
-#define DS_2BIT_MASK 0x3
-#define DS_4BIT_IM_VAL (DS3 | DS2 | DS1 | DS0)
-#define DS_4BIT_MASK 0xf
-
-/* The Drive-Strength of 4WE Pad DS1 0 CO */
-#define DS_4WE_3 (DS1 | DS0) /* 1 1 3 */
-#define DS_4WE_2 (DS1) /* 1 0 2 */
-#define DS_4WE_1 (DS0) /* 0 1 1 */
-#define DS_4WE_0 (DSZ) /* 0 0 0 */
-
-/* The Drive-Strength of 16st Pad DS3 2 1 0 CO */
-#define DS_16ST_15 (DS3 | DS2 | DS1 | DS0) /* 1 1 1 1 15 */
-#define DS_16ST_14 (DS3 | DS2 | DS0) /* 1 1 0 1 13 */
-#define DS_16ST_13 (DS3 | DS2 | DS1) /* 1 1 1 0 14 */
-#define DS_16ST_12 (DS2 | DS1 | DS0) /* 0 1 1 1 7 */
-#define DS_16ST_11 (DS2 | DS0) /* 0 1 0 1 5 */
-#define DS_16ST_10 (DS3 | DS1 | DS0) /* 1 0 1 1 11 */
-#define DS_16ST_9 (DS3 | DS0) /* 1 0 0 1 9 */
-#define DS_16ST_8 (DS1 | DS0) /* 0 0 1 1 3 */
-#define DS_16ST_7 (DS2 | DS1) /* 0 1 1 0 6 */
-#define DS_16ST_6 (DS3 | DS2) /* 1 1 0 0 12 */
-#define DS_16ST_5 (DS2) /* 0 1 0 0 4 */
-#define DS_16ST_4 (DS3 | DS1) /* 1 0 1 0 10 */
-#define DS_16ST_3 (DS1) /* 0 0 1 0 2 */
-#define DS_16ST_2 (DS0) /* 0 0 0 1 1 */
-#define DS_16ST_1 (DSZ) /* 0 0 0 0 0 */
-#define DS_16ST_0 (DS3) /* 1 0 0 0 8 */
-
-/* The Drive-Strength of M31 Pad DS0 CO */
-#define DS_M31_0 (DSZ) /* 0 0 */
-#define DS_M31_1 (DS0) /* 1 1 */
-
-/* Raw values of Pull Option Bits */
-#define PUN BIT(1)
-#define PD BIT(0)
-#define PE BIT(0)
-#define PZ 0
-
-/* Definition of Pull Types */
-#define PULL_UP 0
-#define HIGH_HYSTERESIS 1
-#define HIGH_Z 2
-#define PULL_DOWN 3
-#define PULL_DISABLE 4
-#define PULL_ENABLE 5
-#define PULL_UNKNOWN -1
-
-/* Pull Options for 4WE Pad PUN PD CO */
-#define P4WE_PULL_MASK 0x3
-#define P4WE_PULL_DOWN (PUN | PD) /* 1 1 3 */
-#define P4WE_HIGH_Z (PUN) /* 1 0 2 */
-#define P4WE_HIGH_HYSTERESIS (PD) /* 0 1 1 */
-#define P4WE_PULL_UP (PZ) /* 0 0 0 */
-
-/* Pull Options for 16ST Pad PUN PD CO */
-#define P16ST_PULL_MASK 0x3
-#define P16ST_PULL_DOWN (PUN | PD) /* 1 1 3 */
-#define P16ST_HIGH_Z (PUN) /* 1 0 2 */
-#define P16ST_PULL_UP (PZ) /* 0 0 0 */
-
-/* Pull Options for M31 Pad PE */
-#define PM31_PULL_MASK 0x1
-#define PM31_PULL_ENABLED (PE) /* 1 */
-#define PM31_PULL_DISABLED (PZ) /* 0 */
-
-/* Pull Options for A/D Pad PUN PD CO */
-#define PANGD_PULL_MASK 0x3
-#define PANGD_PULL_DOWN (PUN | PD) /* 1 1 3 */
-#define PANGD_HIGH_Z (PUN) /* 1 0 2 */
-#define PANGD_PULL_UP (PZ) /* 0 0 0 */
-
-/* Definition of Input Disable */
-#define DI_MASK 0x1
-#define DI_DISABLE 0x1
-#define DI_ENABLE 0x0
-
-/* Definition of Input Disable Value */
-#define DIV_MASK 0x1
-#define DIV_DISABLE 0x1
-#define DIV_ENABLE 0x0
-
-/* Number of Function input disable registers */
-#define NUM_OF_IN_DISABLE_REG 0x2
-
-/* Offset of Function input disable registers */
-#define IN_DISABLE_0_REG_SET 0x0A00
-#define IN_DISABLE_0_REG_CLR 0x0A04
-#define IN_DISABLE_1_REG_SET 0x0A08
-#define IN_DISABLE_1_REG_CLR 0x0A0C
-#define IN_DISABLE_VAL_0_REG_SET 0x0A80
-#define IN_DISABLE_VAL_0_REG_CLR 0x0A84
-#define IN_DISABLE_VAL_1_REG_SET 0x0A88
-#define IN_DISABLE_VAL_1_REG_CLR 0x0A8C
-
-/* Offset of the SDIO9SEL*/
-#define SYS2PCI_SDIO9SEL 0x14
-
-struct dt_params {
- const char *property;
- int value;
-};
-
-/**
- * struct atlas7_pad_conf - Atlas7 Pad Configuration
- * @id: The ID of this Pad.
- * @type: The type of this Pad.
- * @mux_reg: The mux register offset.
- * This register contains the mux.
- * @pupd_reg: The pull-up/down register offset.
- * @drvstr_reg: The drive-strength register offset.
- * @ad_ctrl_reg: The Analogue/Digital Control register.
- *
- * @mux_bit: The start bit of mux register.
- * @pupd_bit: The start bit of pull-up/down register.
- * @drvstr_bit: The start bit of drive-strength register.
- * @ad_ctrl_bit: The start bit of analogue/digital register.
- */
-struct atlas7_pad_config {
- const u32 id;
- u32 type;
- u32 mux_reg;
- u32 pupd_reg;
- u32 drvstr_reg;
- u32 ad_ctrl_reg;
- /* bits in register */
- u8 mux_bit;
- u8 pupd_bit;
- u8 drvstr_bit;
- u8 ad_ctrl_bit;
-};
-
-#define PADCONF(pad, t, mr, pr, dsr, adr, mb, pb, dsb, adb) \
- { \
- .id = pad, \
- .type = t, \
- .mux_reg = mr, \
- .pupd_reg = pr, \
- .drvstr_reg = dsr, \
- .ad_ctrl_reg = adr, \
- .mux_bit = mb, \
- .pupd_bit = pb, \
- .drvstr_bit = dsb, \
- .ad_ctrl_bit = adb, \
- }
-
-/*
- * struct atlas7_pad_status - Atlas7 Pad status
- */
-struct atlas7_pad_status {
- u8 func;
- u8 pull;
- u8 dstr;
- u8 reserved;
-};
-
-/**
- * struct atlas7_pad_mux - Atlas7 mux
- * @bank: The bank of this pad's registers on.
- * @pin : The ID of this Pad.
- * @func: The mux func on this Pad.
- * @dinput_reg: The Input-Disable register offset.
- * @dinput_bit: The start bit of Input-Disable register.
- * @dinput_val_reg: The Input-Disable-value register offset.
- * This register is used to set the value of this pad
- * if this pad was disabled.
- * @dinput_val_bit: The start bit of Input-Disable Value register.
- */
-struct atlas7_pad_mux {
- u32 bank;
- u32 pin;
- u32 func;
- u32 dinput_reg;
- u32 dinput_bit;
- u32 dinput_val_reg;
- u32 dinput_val_bit;
-};
-
-#define MUX(b, pad, f, dr, db, dvr, dvb) \
- { \
- .bank = b, \
- .pin = pad, \
- .func = f, \
- .dinput_reg = dr, \
- .dinput_bit = db, \
- .dinput_val_reg = dvr, \
- .dinput_val_bit = dvb, \
- }
-
-struct atlas7_grp_mux {
- unsigned int group;
- unsigned int pad_mux_count;
- const struct atlas7_pad_mux *pad_mux_list;
-};
-
- /**
- * struct sirfsoc_pin_group - describes a SiRFprimaII pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
- */
-struct atlas7_pin_group {
- const char *name;
- const unsigned int *pins;
- const unsigned num_pins;
-};
-
-#define GROUP(n, p) \
- { \
- .name = n, \
- .pins = p, \
- .num_pins = ARRAY_SIZE(p), \
- }
-
-struct atlas7_pmx_func {
- const char *name;
- const char * const *groups;
- const unsigned num_groups;
- const struct atlas7_grp_mux *grpmux;
-};
-
-#define FUNCTION(n, g, m) \
- { \
- .name = n, \
- .groups = g, \
- .num_groups = ARRAY_SIZE(g), \
- .grpmux = m, \
- }
-
-struct atlas7_pinctrl_data {
- struct pinctrl_pin_desc *pads;
- int pads_cnt;
- struct atlas7_pin_group *grps;
- int grps_cnt;
- struct atlas7_pmx_func *funcs;
- int funcs_cnt;
- struct atlas7_pad_config *confs;
- int confs_cnt;
-};
-
-/* Platform info of atlas7 pinctrl */
-#define ATLAS7_PINCTRL_REG_BANKS 2
-#define ATLAS7_PINCTRL_BANK_0_PINS 18
-#define ATLAS7_PINCTRL_BANK_1_PINS 141
-#define ATLAS7_PINCTRL_TOTAL_PINS \
- (ATLAS7_PINCTRL_BANK_0_PINS + ATLAS7_PINCTRL_BANK_1_PINS)
-
-/**
- * Atlas7 GPIO Chip
- */
-
-#define NGPIO_OF_BANK 32
-#define GPIO_TO_BANK(gpio) ((gpio) / NGPIO_OF_BANK)
-
-/* Registers of GPIO Controllers */
-#define ATLAS7_GPIO_BASE(g, b) ((g)->reg + 0x100 * (b))
-#define ATLAS7_GPIO_CTRL(b, i) ((b)->base + 4 * (i))
-#define ATLAS7_GPIO_INT_STATUS(b) ((b)->base + 0x8C)
-
-/* Definition bits of GPIO Control Registers */
-#define ATLAS7_GPIO_CTL_INTR_LOW_MASK BIT(0)
-#define ATLAS7_GPIO_CTL_INTR_HIGH_MASK BIT(1)
-#define ATLAS7_GPIO_CTL_INTR_TYPE_MASK BIT(2)
-#define ATLAS7_GPIO_CTL_INTR_EN_MASK BIT(3)
-#define ATLAS7_GPIO_CTL_INTR_STATUS_MASK BIT(4)
-#define ATLAS7_GPIO_CTL_OUT_EN_MASK BIT(5)
-#define ATLAS7_GPIO_CTL_DATAOUT_MASK BIT(6)
-#define ATLAS7_GPIO_CTL_DATAIN_MASK BIT(7)
-
-struct atlas7_gpio_bank {
- int id;
- int irq;
- void __iomem *base;
- unsigned int gpio_offset;
- unsigned int ngpio;
- const unsigned int *gpio_pins;
- u32 sleep_data[NGPIO_OF_BANK];
-};
-
-struct atlas7_gpio_chip {
- const char *name;
- void __iomem *reg;
- struct clk *clk;
- int nbank;
- raw_spinlock_t lock;
- struct gpio_chip chip;
- struct atlas7_gpio_bank banks[];
-};
-
-struct atlas7_pmx {
- struct device *dev;
- struct pinctrl_dev *pctl;
- struct pinctrl_desc pctl_desc;
- struct atlas7_pinctrl_data *pctl_data;
- void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS];
- void __iomem *sys2pci_base;
- u32 status_ds[NUM_OF_IN_DISABLE_REG];
- u32 status_dsv[NUM_OF_IN_DISABLE_REG];
- struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS];
-};
-
-/*
- * Pad list for the pinmux subsystem
- * refer to A7DA IO Summary - CS-314158-DD-4E.xls
- */
-
-/* Pads in IOC RTC & TOP */
-static const struct pinctrl_pin_desc atlas7_ioc_pads[] = {
- /* RTC PADs */
- PINCTRL_PIN(0, "rtc_gpio_0"),
- PINCTRL_PIN(1, "rtc_gpio_1"),
- PINCTRL_PIN(2, "rtc_gpio_2"),
- PINCTRL_PIN(3, "rtc_gpio_3"),
- PINCTRL_PIN(4, "low_bat_ind_b"),
- PINCTRL_PIN(5, "on_key_b"),
- PINCTRL_PIN(6, "ext_on"),
- PINCTRL_PIN(7, "mem_on"),
- PINCTRL_PIN(8, "core_on"),
- PINCTRL_PIN(9, "io_on"),
- PINCTRL_PIN(10, "can0_tx"),
- PINCTRL_PIN(11, "can0_rx"),
- PINCTRL_PIN(12, "spi0_clk"),
- PINCTRL_PIN(13, "spi0_cs_b"),
- PINCTRL_PIN(14, "spi0_io_0"),
- PINCTRL_PIN(15, "spi0_io_1"),
- PINCTRL_PIN(16, "spi0_io_2"),
- PINCTRL_PIN(17, "spi0_io_3"),
-
- /* TOP PADs */
- PINCTRL_PIN(18, "spi1_en"),
- PINCTRL_PIN(19, "spi1_clk"),
- PINCTRL_PIN(20, "spi1_din"),
- PINCTRL_PIN(21, "spi1_dout"),
- PINCTRL_PIN(22, "trg_spi_clk"),
- PINCTRL_PIN(23, "trg_spi_di"),
- PINCTRL_PIN(24, "trg_spi_do"),
- PINCTRL_PIN(25, "trg_spi_cs_b"),
- PINCTRL_PIN(26, "trg_acq_d1"),
- PINCTRL_PIN(27, "trg_irq_b"),
- PINCTRL_PIN(28, "trg_acq_d0"),
- PINCTRL_PIN(29, "trg_acq_clk"),
- PINCTRL_PIN(30, "trg_shutdown_b_out"),
- PINCTRL_PIN(31, "sdio2_clk"),
- PINCTRL_PIN(32, "sdio2_cmd"),
- PINCTRL_PIN(33, "sdio2_dat_0"),
- PINCTRL_PIN(34, "sdio2_dat_1"),
- PINCTRL_PIN(35, "sdio2_dat_2"),
- PINCTRL_PIN(36, "sdio2_dat_3"),
- PINCTRL_PIN(37, "df_ad_7"),
- PINCTRL_PIN(38, "df_ad_6"),
- PINCTRL_PIN(39, "df_ad_5"),
- PINCTRL_PIN(40, "df_ad_4"),
- PINCTRL_PIN(41, "df_ad_3"),
- PINCTRL_PIN(42, "df_ad_2"),
- PINCTRL_PIN(43, "df_ad_1"),
- PINCTRL_PIN(44, "df_ad_0"),
- PINCTRL_PIN(45, "df_dqs"),
- PINCTRL_PIN(46, "df_cle"),
- PINCTRL_PIN(47, "df_ale"),
- PINCTRL_PIN(48, "df_we_b"),
- PINCTRL_PIN(49, "df_re_b"),
- PINCTRL_PIN(50, "df_ry_by"),
- PINCTRL_PIN(51, "df_cs_b_1"),
- PINCTRL_PIN(52, "df_cs_b_0"),
- PINCTRL_PIN(53, "l_pclk"),
- PINCTRL_PIN(54, "l_lck"),
- PINCTRL_PIN(55, "l_fck"),
- PINCTRL_PIN(56, "l_de"),
- PINCTRL_PIN(57, "ldd_0"),
- PINCTRL_PIN(58, "ldd_1"),
- PINCTRL_PIN(59, "ldd_2"),
- PINCTRL_PIN(60, "ldd_3"),
- PINCTRL_PIN(61, "ldd_4"),
- PINCTRL_PIN(62, "ldd_5"),
- PINCTRL_PIN(63, "ldd_6"),
- PINCTRL_PIN(64, "ldd_7"),
- PINCTRL_PIN(65, "ldd_8"),
- PINCTRL_PIN(66, "ldd_9"),
- PINCTRL_PIN(67, "ldd_10"),
- PINCTRL_PIN(68, "ldd_11"),
- PINCTRL_PIN(69, "ldd_12"),
- PINCTRL_PIN(70, "ldd_13"),
- PINCTRL_PIN(71, "ldd_14"),
- PINCTRL_PIN(72, "ldd_15"),
- PINCTRL_PIN(73, "lcd_gpio_20"),
- PINCTRL_PIN(74, "vip_0"),
- PINCTRL_PIN(75, "vip_1"),
- PINCTRL_PIN(76, "vip_2"),
- PINCTRL_PIN(77, "vip_3"),
- PINCTRL_PIN(78, "vip_4"),
- PINCTRL_PIN(79, "vip_5"),
- PINCTRL_PIN(80, "vip_6"),
- PINCTRL_PIN(81, "vip_7"),
- PINCTRL_PIN(82, "vip_pxclk"),
- PINCTRL_PIN(83, "vip_hsync"),
- PINCTRL_PIN(84, "vip_vsync"),
- PINCTRL_PIN(85, "sdio3_clk"),
- PINCTRL_PIN(86, "sdio3_cmd"),
- PINCTRL_PIN(87, "sdio3_dat_0"),
- PINCTRL_PIN(88, "sdio3_dat_1"),
- PINCTRL_PIN(89, "sdio3_dat_2"),
- PINCTRL_PIN(90, "sdio3_dat_3"),
- PINCTRL_PIN(91, "sdio5_clk"),
- PINCTRL_PIN(92, "sdio5_cmd"),
- PINCTRL_PIN(93, "sdio5_dat_0"),
- PINCTRL_PIN(94, "sdio5_dat_1"),
- PINCTRL_PIN(95, "sdio5_dat_2"),
- PINCTRL_PIN(96, "sdio5_dat_3"),
- PINCTRL_PIN(97, "rgmii_txd_0"),
- PINCTRL_PIN(98, "rgmii_txd_1"),
- PINCTRL_PIN(99, "rgmii_txd_2"),
- PINCTRL_PIN(100, "rgmii_txd_3"),
- PINCTRL_PIN(101, "rgmii_txclk"),
- PINCTRL_PIN(102, "rgmii_tx_ctl"),
- PINCTRL_PIN(103, "rgmii_rxd_0"),
- PINCTRL_PIN(104, "rgmii_rxd_1"),
- PINCTRL_PIN(105, "rgmii_rxd_2"),
- PINCTRL_PIN(106, "rgmii_rxd_3"),
- PINCTRL_PIN(107, "rgmii_rx_clk"),
- PINCTRL_PIN(108, "rgmii_rxc_ctl"),
- PINCTRL_PIN(109, "rgmii_mdio"),
- PINCTRL_PIN(110, "rgmii_mdc"),
- PINCTRL_PIN(111, "rgmii_intr_n"),
- PINCTRL_PIN(112, "i2s_mclk"),
- PINCTRL_PIN(113, "i2s_bclk"),
- PINCTRL_PIN(114, "i2s_ws"),
- PINCTRL_PIN(115, "i2s_dout0"),
- PINCTRL_PIN(116, "i2s_dout1"),
- PINCTRL_PIN(117, "i2s_dout2"),
- PINCTRL_PIN(118, "i2s_din"),
- PINCTRL_PIN(119, "gpio_0"),
- PINCTRL_PIN(120, "gpio_1"),
- PINCTRL_PIN(121, "gpio_2"),
- PINCTRL_PIN(122, "gpio_3"),
- PINCTRL_PIN(123, "gpio_4"),
- PINCTRL_PIN(124, "gpio_5"),
- PINCTRL_PIN(125, "gpio_6"),
- PINCTRL_PIN(126, "gpio_7"),
- PINCTRL_PIN(127, "sda_0"),
- PINCTRL_PIN(128, "scl_0"),
- PINCTRL_PIN(129, "coex_pio_0"),
- PINCTRL_PIN(130, "coex_pio_1"),
- PINCTRL_PIN(131, "coex_pio_2"),
- PINCTRL_PIN(132, "coex_pio_3"),
- PINCTRL_PIN(133, "uart0_tx"),
- PINCTRL_PIN(134, "uart0_rx"),
- PINCTRL_PIN(135, "uart1_tx"),
- PINCTRL_PIN(136, "uart1_rx"),
- PINCTRL_PIN(137, "uart3_tx"),
- PINCTRL_PIN(138, "uart3_rx"),
- PINCTRL_PIN(139, "uart4_tx"),
- PINCTRL_PIN(140, "uart4_rx"),
- PINCTRL_PIN(141, "usp0_clk"),
- PINCTRL_PIN(142, "usp0_tx"),
- PINCTRL_PIN(143, "usp0_rx"),
- PINCTRL_PIN(144, "usp0_fs"),
- PINCTRL_PIN(145, "usp1_clk"),
- PINCTRL_PIN(146, "usp1_tx"),
- PINCTRL_PIN(147, "usp1_rx"),
- PINCTRL_PIN(148, "usp1_fs"),
- PINCTRL_PIN(149, "lvds_tx0d4p"),
- PINCTRL_PIN(150, "lvds_tx0d4n"),
- PINCTRL_PIN(151, "lvds_tx0d3p"),
- PINCTRL_PIN(152, "lvds_tx0d3n"),
- PINCTRL_PIN(153, "lvds_tx0d2p"),
- PINCTRL_PIN(154, "lvds_tx0d2n"),
- PINCTRL_PIN(155, "lvds_tx0d1p"),
- PINCTRL_PIN(156, "lvds_tx0d1n"),
- PINCTRL_PIN(157, "lvds_tx0d0p"),
- PINCTRL_PIN(158, "lvds_tx0d0n"),
- PINCTRL_PIN(159, "jtag_tdo"),
- PINCTRL_PIN(160, "jtag_tms"),
- PINCTRL_PIN(161, "jtag_tck"),
- PINCTRL_PIN(162, "jtag_tdi"),
- PINCTRL_PIN(163, "jtag_trstn"),
-};
-
-static struct atlas7_pad_config atlas7_ioc_pad_confs[] = {
- /* The Configuration of IOC_RTC Pads */
- PADCONF(0, 3, 0x0, 0x100, 0x200, -1, 0, 0, 0, 0),
- PADCONF(1, 3, 0x0, 0x100, 0x200, -1, 4, 2, 2, 0),
- PADCONF(2, 3, 0x0, 0x100, 0x200, -1, 8, 4, 4, 0),
- PADCONF(3, 5, 0x0, 0x100, 0x200, -1, 12, 6, 6, 0),
- PADCONF(4, 4, 0x0, 0x100, 0x200, -1, 16, 8, 8, 0),
- PADCONF(5, 4, 0x0, 0x100, 0x200, -1, 20, 10, 10, 0),
- PADCONF(6, 3, 0x0, 0x100, 0x200, -1, 24, 12, 12, 0),
- PADCONF(7, 3, 0x0, 0x100, 0x200, -1, 28, 14, 14, 0),
- PADCONF(8, 3, 0x8, 0x100, 0x200, -1, 0, 16, 16, 0),
- PADCONF(9, 3, 0x8, 0x100, 0x200, -1, 4, 18, 18, 0),
- PADCONF(10, 4, 0x8, 0x100, 0x200, -1, 8, 20, 20, 0),
- PADCONF(11, 4, 0x8, 0x100, 0x200, -1, 12, 22, 22, 0),
- PADCONF(12, 5, 0x8, 0x100, 0x200, -1, 16, 24, 24, 0),
- PADCONF(13, 6, 0x8, 0x100, 0x200, -1, 20, 26, 26, 0),
- PADCONF(14, 5, 0x8, 0x100, 0x200, -1, 24, 28, 28, 0),
- PADCONF(15, 5, 0x8, 0x100, 0x200, -1, 28, 30, 30, 0),
- PADCONF(16, 5, 0x10, 0x108, 0x208, -1, 0, 0, 0, 0),
- PADCONF(17, 5, 0x10, 0x108, 0x208, -1, 4, 2, 2, 0),
- /* The Configuration of IOC_TOP Pads */
- PADCONF(18, 5, 0x80, 0x180, 0x300, -1, 0, 0, 0, 0),
- PADCONF(19, 5, 0x80, 0x180, 0x300, -1, 4, 2, 2, 0),
- PADCONF(20, 5, 0x80, 0x180, 0x300, -1, 8, 4, 4, 0),
- PADCONF(21, 5, 0x80, 0x180, 0x300, -1, 12, 6, 6, 0),
- PADCONF(22, 5, 0x88, 0x188, 0x308, -1, 0, 0, 0, 0),
- PADCONF(23, 5, 0x88, 0x188, 0x308, -1, 4, 2, 2, 0),
- PADCONF(24, 5, 0x88, 0x188, 0x308, -1, 8, 4, 4, 0),
- PADCONF(25, 6, 0x88, 0x188, 0x308, -1, 12, 6, 6, 0),
- PADCONF(26, 5, 0x88, 0x188, 0x308, -1, 16, 8, 8, 0),
- PADCONF(27, 6, 0x88, 0x188, 0x308, -1, 20, 10, 10, 0),
- PADCONF(28, 5, 0x88, 0x188, 0x308, -1, 24, 12, 12, 0),
- PADCONF(29, 5, 0x88, 0x188, 0x308, -1, 28, 14, 14, 0),
- PADCONF(30, 5, 0x90, 0x188, 0x308, -1, 0, 16, 16, 0),
- PADCONF(31, 2, 0x98, 0x190, 0x310, -1, 0, 0, 0, 0),
- PADCONF(32, 1, 0x98, 0x190, 0x310, -1, 4, 2, 4, 0),
- PADCONF(33, 1, 0x98, 0x190, 0x310, -1, 8, 4, 6, 0),
- PADCONF(34, 1, 0x98, 0x190, 0x310, -1, 12, 6, 8, 0),
- PADCONF(35, 1, 0x98, 0x190, 0x310, -1, 16, 8, 10, 0),
- PADCONF(36, 1, 0x98, 0x190, 0x310, -1, 20, 10, 12, 0),
- PADCONF(37, 1, 0xa0, 0x198, 0x318, -1, 0, 0, 0, 0),
- PADCONF(38, 1, 0xa0, 0x198, 0x318, -1, 4, 2, 2, 0),
- PADCONF(39, 1, 0xa0, 0x198, 0x318, -1, 8, 4, 4, 0),
- PADCONF(40, 1, 0xa0, 0x198, 0x318, -1, 12, 6, 6, 0),
- PADCONF(41, 1, 0xa0, 0x198, 0x318, -1, 16, 8, 8, 0),
- PADCONF(42, 1, 0xa0, 0x198, 0x318, -1, 20, 10, 10, 0),
- PADCONF(43, 1, 0xa0, 0x198, 0x318, -1, 24, 12, 12, 0),
- PADCONF(44, 1, 0xa0, 0x198, 0x318, -1, 28, 14, 14, 0),
- PADCONF(45, 0, 0xa8, 0x198, 0x318, -1, 0, 16, 16, 0),
- PADCONF(46, 0, 0xa8, 0x198, 0x318, -1, 4, 18, 18, 0),
- PADCONF(47, 1, 0xa8, 0x198, 0x318, -1, 8, 20, 20, 0),
- PADCONF(48, 1, 0xa8, 0x198, 0x318, -1, 12, 22, 22, 0),
- PADCONF(49, 1, 0xa8, 0x198, 0x318, -1, 16, 24, 24, 0),
- PADCONF(50, 1, 0xa8, 0x198, 0x318, -1, 20, 26, 26, 0),
- PADCONF(51, 1, 0xa8, 0x198, 0x318, -1, 24, 28, 28, 0),
- PADCONF(52, 1, 0xa8, 0x198, 0x318, -1, 28, 30, 30, 0),
- PADCONF(53, 0, 0xb0, 0x1a0, 0x320, -1, 0, 0, 0, 0),
- PADCONF(54, 0, 0xb0, 0x1a0, 0x320, -1, 4, 2, 2, 0),
- PADCONF(55, 0, 0xb0, 0x1a0, 0x320, -1, 8, 4, 4, 0),
- PADCONF(56, 0, 0xb0, 0x1a0, 0x320, -1, 12, 6, 6, 0),
- PADCONF(57, 0, 0xb0, 0x1a0, 0x320, -1, 16, 8, 8, 0),
- PADCONF(58, 0, 0xb0, 0x1a0, 0x320, -1, 20, 10, 10, 0),
- PADCONF(59, 0, 0xb0, 0x1a0, 0x320, -1, 24, 12, 12, 0),
- PADCONF(60, 0, 0xb0, 0x1a0, 0x320, -1, 28, 14, 14, 0),
- PADCONF(61, 0, 0xb8, 0x1a0, 0x320, -1, 0, 16, 16, 0),
- PADCONF(62, 0, 0xb8, 0x1a0, 0x320, -1, 4, 18, 18, 0),
- PADCONF(63, 0, 0xb8, 0x1a0, 0x320, -1, 8, 20, 20, 0),
- PADCONF(64, 0, 0xb8, 0x1a0, 0x320, -1, 12, 22, 22, 0),
- PADCONF(65, 0, 0xb8, 0x1a0, 0x320, -1, 16, 24, 24, 0),
- PADCONF(66, 0, 0xb8, 0x1a0, 0x320, -1, 20, 26, 26, 0),
- PADCONF(67, 0, 0xb8, 0x1a0, 0x320, -1, 24, 28, 28, 0),
- PADCONF(68, 0, 0xb8, 0x1a0, 0x320, -1, 28, 30, 30, 0),
- PADCONF(69, 0, 0xc0, 0x1a8, 0x328, -1, 0, 0, 0, 0),
- PADCONF(70, 0, 0xc0, 0x1a8, 0x328, -1, 4, 2, 2, 0),
- PADCONF(71, 0, 0xc0, 0x1a8, 0x328, -1, 8, 4, 4, 0),
- PADCONF(72, 0, 0xc0, 0x1a8, 0x328, -1, 12, 6, 6, 0),
- PADCONF(73, 0, 0xc0, 0x1a8, 0x328, -1, 16, 8, 8, 0),
- PADCONF(74, 0, 0xc8, 0x1b0, 0x330, -1, 0, 0, 0, 0),
- PADCONF(75, 0, 0xc8, 0x1b0, 0x330, -1, 4, 2, 2, 0),
- PADCONF(76, 0, 0xc8, 0x1b0, 0x330, -1, 8, 4, 4, 0),
- PADCONF(77, 0, 0xc8, 0x1b0, 0x330, -1, 12, 6, 6, 0),
- PADCONF(78, 0, 0xc8, 0x1b0, 0x330, -1, 16, 8, 8, 0),
- PADCONF(79, 0, 0xc8, 0x1b0, 0x330, -1, 20, 10, 10, 0),
- PADCONF(80, 0, 0xc8, 0x1b0, 0x330, -1, 24, 12, 12, 0),
- PADCONF(81, 0, 0xc8, 0x1b0, 0x330, -1, 28, 14, 14, 0),
- PADCONF(82, 0, 0xd0, 0x1b0, 0x330, -1, 0, 16, 16, 0),
- PADCONF(83, 0, 0xd0, 0x1b0, 0x330, -1, 4, 18, 18, 0),
- PADCONF(84, 0, 0xd0, 0x1b0, 0x330, -1, 8, 20, 20, 0),
- PADCONF(85, 2, 0xd8, 0x1b8, 0x338, -1, 0, 0, 0, 0),
- PADCONF(86, 1, 0xd8, 0x1b8, 0x338, -1, 4, 4, 4, 0),
- PADCONF(87, 1, 0xd8, 0x1b8, 0x338, -1, 8, 6, 6, 0),
- PADCONF(88, 1, 0xd8, 0x1b8, 0x338, -1, 12, 8, 8, 0),
- PADCONF(89, 1, 0xd8, 0x1b8, 0x338, -1, 16, 10, 10, 0),
- PADCONF(90, 1, 0xd8, 0x1b8, 0x338, -1, 20, 12, 12, 0),
- PADCONF(91, 2, 0xe0, 0x1c0, 0x340, -1, 0, 0, 0, 0),
- PADCONF(92, 1, 0xe0, 0x1c0, 0x340, -1, 4, 4, 4, 0),
- PADCONF(93, 1, 0xe0, 0x1c0, 0x340, -1, 8, 6, 6, 0),
- PADCONF(94, 1, 0xe0, 0x1c0, 0x340, -1, 12, 8, 8, 0),
- PADCONF(95, 1, 0xe0, 0x1c0, 0x340, -1, 16, 10, 10, 0),
- PADCONF(96, 1, 0xe0, 0x1c0, 0x340, -1, 20, 12, 12, 0),
- PADCONF(97, 0, 0xe8, 0x1c8, 0x348, -1, 0, 0, 0, 0),
- PADCONF(98, 0, 0xe8, 0x1c8, 0x348, -1, 4, 2, 2, 0),
- PADCONF(99, 0, 0xe8, 0x1c8, 0x348, -1, 8, 4, 4, 0),
- PADCONF(100, 0, 0xe8, 0x1c8, 0x348, -1, 12, 6, 6, 0),
- PADCONF(101, 2, 0xe8, 0x1c8, 0x348, -1, 16, 8, 8, 0),
- PADCONF(102, 0, 0xe8, 0x1c8, 0x348, -1, 20, 12, 12, 0),
- PADCONF(103, 0, 0xe8, 0x1c8, 0x348, -1, 24, 14, 14, 0),
- PADCONF(104, 0, 0xe8, 0x1c8, 0x348, -1, 28, 16, 16, 0),
- PADCONF(105, 0, 0xf0, 0x1c8, 0x348, -1, 0, 18, 18, 0),
- PADCONF(106, 0, 0xf0, 0x1c8, 0x348, -1, 4, 20, 20, 0),
- PADCONF(107, 0, 0xf0, 0x1c8, 0x348, -1, 8, 22, 22, 0),
- PADCONF(108, 0, 0xf0, 0x1c8, 0x348, -1, 12, 24, 24, 0),
- PADCONF(109, 1, 0xf0, 0x1c8, 0x348, -1, 16, 26, 26, 0),
- PADCONF(110, 0, 0xf0, 0x1c8, 0x348, -1, 20, 28, 28, 0),
- PADCONF(111, 1, 0xf0, 0x1c8, 0x348, -1, 24, 30, 30, 0),
- PADCONF(112, 5, 0xf8, 0x200, 0x350, -1, 0, 0, 0, 0),
- PADCONF(113, 5, 0xf8, 0x200, 0x350, -1, 4, 2, 2, 0),
- PADCONF(114, 5, 0xf8, 0x200, 0x350, -1, 8, 4, 4, 0),
- PADCONF(115, 5, 0xf8, 0x200, 0x350, -1, 12, 6, 6, 0),
- PADCONF(116, 5, 0xf8, 0x200, 0x350, -1, 16, 8, 8, 0),
- PADCONF(117, 5, 0xf8, 0x200, 0x350, -1, 20, 10, 10, 0),
- PADCONF(118, 5, 0xf8, 0x200, 0x350, -1, 24, 12, 12, 0),
- PADCONF(119, 5, 0x100, 0x250, 0x358, -1, 0, 0, 0, 0),
- PADCONF(120, 5, 0x100, 0x250, 0x358, -1, 4, 2, 2, 0),
- PADCONF(121, 5, 0x100, 0x250, 0x358, -1, 8, 4, 4, 0),
- PADCONF(122, 5, 0x100, 0x250, 0x358, -1, 12, 6, 6, 0),
- PADCONF(123, 6, 0x100, 0x250, 0x358, -1, 16, 8, 8, 0),
- PADCONF(124, 6, 0x100, 0x250, 0x358, -1, 20, 10, 10, 0),
- PADCONF(125, 6, 0x100, 0x250, 0x358, -1, 24, 12, 12, 0),
- PADCONF(126, 6, 0x100, 0x250, 0x358, -1, 28, 14, 14, 0),
- PADCONF(127, 6, 0x108, 0x250, 0x358, -1, 16, 24, 24, 0),
- PADCONF(128, 6, 0x108, 0x250, 0x358, -1, 20, 26, 26, 0),
- PADCONF(129, 0, 0x110, 0x258, 0x360, -1, 0, 0, 0, 0),
- PADCONF(130, 0, 0x110, 0x258, 0x360, -1, 4, 2, 2, 0),
- PADCONF(131, 0, 0x110, 0x258, 0x360, -1, 8, 4, 4, 0),
- PADCONF(132, 0, 0x110, 0x258, 0x360, -1, 12, 6, 6, 0),
- PADCONF(133, 6, 0x118, 0x260, 0x368, -1, 0, 0, 0, 0),
- PADCONF(134, 6, 0x118, 0x260, 0x368, -1, 4, 2, 2, 0),
- PADCONF(135, 6, 0x118, 0x260, 0x368, -1, 16, 8, 8, 0),
- PADCONF(136, 6, 0x118, 0x260, 0x368, -1, 20, 10, 10, 0),
- PADCONF(137, 6, 0x118, 0x260, 0x368, -1, 24, 12, 12, 0),
- PADCONF(138, 6, 0x118, 0x260, 0x368, -1, 28, 14, 14, 0),
- PADCONF(139, 6, 0x120, 0x260, 0x368, -1, 0, 16, 16, 0),
- PADCONF(140, 6, 0x120, 0x260, 0x368, -1, 4, 18, 18, 0),
- PADCONF(141, 5, 0x128, 0x268, 0x378, -1, 0, 0, 0, 0),
- PADCONF(142, 5, 0x128, 0x268, 0x378, -1, 4, 2, 2, 0),
- PADCONF(143, 5, 0x128, 0x268, 0x378, -1, 8, 4, 4, 0),
- PADCONF(144, 5, 0x128, 0x268, 0x378, -1, 12, 6, 6, 0),
- PADCONF(145, 5, 0x128, 0x268, 0x378, -1, 16, 8, 8, 0),
- PADCONF(146, 5, 0x128, 0x268, 0x378, -1, 20, 10, 10, 0),
- PADCONF(147, 5, 0x128, 0x268, 0x378, -1, 24, 12, 12, 0),
- PADCONF(148, 5, 0x128, 0x268, 0x378, -1, 28, 14, 14, 0),
- PADCONF(149, 7, 0x130, 0x270, -1, 0x480, 0, 0, 0, 0),
- PADCONF(150, 7, 0x130, 0x270, -1, 0x480, 4, 2, 0, 1),
- PADCONF(151, 7, 0x130, 0x270, -1, 0x480, 8, 4, 0, 2),
- PADCONF(152, 7, 0x130, 0x270, -1, 0x480, 12, 6, 0, 3),
- PADCONF(153, 7, 0x130, 0x270, -1, 0x480, 16, 8, 0, 4),
- PADCONF(154, 7, 0x130, 0x270, -1, 0x480, 20, 10, 0, 5),
- PADCONF(155, 7, 0x130, 0x270, -1, 0x480, 24, 12, 0, 6),
- PADCONF(156, 7, 0x130, 0x270, -1, 0x480, 28, 14, 0, 7),
- PADCONF(157, 7, 0x138, 0x278, -1, 0x480, 0, 0, 0, 8),
- PADCONF(158, 7, 0x138, 0x278, -1, 0x480, 4, 2, 0, 9),
- PADCONF(159, 5, 0x140, 0x280, 0x380, -1, 0, 0, 0, 0),
- PADCONF(160, 6, 0x140, 0x280, 0x380, -1, 4, 2, 2, 0),
- PADCONF(161, 5, 0x140, 0x280, 0x380, -1, 8, 4, 4, 0),
- PADCONF(162, 6, 0x140, 0x280, 0x380, -1, 12, 6, 6, 0),
- PADCONF(163, 6, 0x140, 0x280, 0x380, -1, 16, 8, 8, 0),
-};
-
-/* pin list of each pin group */
-static const unsigned int gnss_gpio_pins[] = { 119, 120, 121, 122, 123, 124,
- 125, 126, 127, 128, 22, 23, 24, 25, 26, 27, 28, 29, 30, };
-static const unsigned int lcd_vip_gpio_pins[] = { 74, 75, 76, 77, 78, 79, 80,
- 81, 82, 83, 84, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, };
-static const unsigned int sdio_i2s_gpio_pins[] = { 31, 32, 33, 34, 35, 36,
- 85, 86, 87, 88, 89, 90, 129, 130, 131, 132, 91, 92, 93, 94,
- 95, 96, 112, 113, 114, 115, 116, 117, 118, };
-static const unsigned int sp_rgmii_gpio_pins[] = { 97, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 18, 19, 20, 21,
- 141, 142, 143, 144, 145, 146, 147, 148, };
-static const unsigned int lvds_gpio_pins[] = { 157, 158, 155, 156, 153, 154,
- 151, 152, 149, 150, };
-static const unsigned int jtag_uart_nand_gpio_pins[] = { 44, 43, 42, 41, 40,
- 39, 38, 37, 46, 47, 48, 49, 50, 52, 51, 45, 133, 134, 135,
- 136, 137, 138, 139, 140, 159, 160, 161, 162, 163, };
-static const unsigned int rtc_gpio_pins[] = { 0, 1, 2, 3, 4, 10, 11, 12, 13,
- 14, 15, 16, 17, 9, };
-static const unsigned int audio_ac97_pins[] = { 113, 118, 115, 114, };
-static const unsigned int audio_digmic_pins0[] = { 51, };
-static const unsigned int audio_digmic_pins1[] = { 122, };
-static const unsigned int audio_digmic_pins2[] = { 161, };
-static const unsigned int audio_func_dbg_pins[] = { 141, 144, 44, 43, 42, 41,
- 40, 39, 38, 37, 74, 75, 76, 77, 78, 79, 81, 113, 114, 118,
- 115, 49, 50, 142, 143, 80, };
-static const unsigned int audio_i2s_pins[] = { 118, 115, 116, 117, 112, 113,
- 114, };
-static const unsigned int audio_i2s_2ch_pins[] = { 118, 115, 112, 113, 114, };
-static const unsigned int audio_i2s_extclk_pins[] = { 112, };
-static const unsigned int audio_spdif_out_pins0[] = { 112, };
-static const unsigned int audio_spdif_out_pins1[] = { 116, };
-static const unsigned int audio_spdif_out_pins2[] = { 142, };
-static const unsigned int audio_uart0_basic_pins[] = { 143, 142, 141, 144, };
-static const unsigned int audio_uart0_urfs_pins0[] = { 117, };
-static const unsigned int audio_uart0_urfs_pins1[] = { 139, };
-static const unsigned int audio_uart0_urfs_pins2[] = { 163, };
-static const unsigned int audio_uart0_urfs_pins3[] = { 162, };
-static const unsigned int audio_uart1_basic_pins[] = { 147, 146, 145, 148, };
-static const unsigned int audio_uart1_urfs_pins0[] = { 117, };
-static const unsigned int audio_uart1_urfs_pins1[] = { 140, };
-static const unsigned int audio_uart1_urfs_pins2[] = { 163, };
-static const unsigned int audio_uart2_urfs_pins0[] = { 139, };
-static const unsigned int audio_uart2_urfs_pins1[] = { 163, };
-static const unsigned int audio_uart2_urfs_pins2[] = { 96, };
-static const unsigned int audio_uart2_urxd_pins0[] = { 20, };
-static const unsigned int audio_uart2_urxd_pins1[] = { 109, };
-static const unsigned int audio_uart2_urxd_pins2[] = { 93, };
-static const unsigned int audio_uart2_usclk_pins0[] = { 19, };
-static const unsigned int audio_uart2_usclk_pins1[] = { 101, };
-static const unsigned int audio_uart2_usclk_pins2[] = { 91, };
-static const unsigned int audio_uart2_utfs_pins0[] = { 18, };
-static const unsigned int audio_uart2_utfs_pins1[] = { 111, };
-static const unsigned int audio_uart2_utfs_pins2[] = { 94, };
-static const unsigned int audio_uart2_utxd_pins0[] = { 21, };
-static const unsigned int audio_uart2_utxd_pins1[] = { 110, };
-static const unsigned int audio_uart2_utxd_pins2[] = { 92, };
-static const unsigned int c_can_trnsvr_en_pins0[] = { 2, };
-static const unsigned int c_can_trnsvr_en_pins1[] = { 0, };
-static const unsigned int c_can_trnsvr_intr_pins[] = { 1, };
-static const unsigned int c_can_trnsvr_stb_n_pins[] = { 3, };
-static const unsigned int c0_can_rxd_trnsv0_pins[] = { 11, };
-static const unsigned int c0_can_rxd_trnsv1_pins[] = { 2, };
-static const unsigned int c0_can_txd_trnsv0_pins[] = { 10, };
-static const unsigned int c0_can_txd_trnsv1_pins[] = { 3, };
-static const unsigned int c1_can_rxd_pins0[] = { 138, };
-static const unsigned int c1_can_rxd_pins1[] = { 147, };
-static const unsigned int c1_can_rxd_pins2[] = { 2, };
-static const unsigned int c1_can_rxd_pins3[] = { 162, };
-static const unsigned int c1_can_txd_pins0[] = { 137, };
-static const unsigned int c1_can_txd_pins1[] = { 146, };
-static const unsigned int c1_can_txd_pins2[] = { 3, };
-static const unsigned int c1_can_txd_pins3[] = { 161, };
-static const unsigned int ca_audio_lpc_pins[] = { 62, 63, 64, 65, 66, 67, 68,
- 69, 70, 71, };
-static const unsigned int ca_bt_lpc_pins[] = { 85, 86, 87, 88, 89, 90, };
-static const unsigned int ca_coex_pins[] = { 129, 130, 131, 132, };
-static const unsigned int ca_curator_lpc_pins[] = { 57, 58, 59, 60, };
-static const unsigned int ca_pcm_debug_pins[] = { 91, 93, 94, 92, };
-static const unsigned int ca_pio_pins[] = { 121, 122, 125, 126, 38, 37, 47,
- 49, 50, 54, 55, 56, };
-static const unsigned int ca_sdio_debug_pins[] = { 40, 39, 44, 43, 42, 41, };
-static const unsigned int ca_spi_pins[] = { 82, 79, 80, 81, };
-static const unsigned int ca_trb_pins[] = { 91, 93, 94, 95, 96, 78, 74, 75,
- 76, 77, };
-static const unsigned int ca_uart_debug_pins[] = { 136, 135, 134, 133, };
-static const unsigned int clkc_pins0[] = { 30, 47, };
-static const unsigned int clkc_pins1[] = { 78, 54, };
-static const unsigned int gn_gnss_i2c_pins[] = { 128, 127, };
-static const unsigned int gn_gnss_uart_nopause_pins[] = { 134, 133, };
-static const unsigned int gn_gnss_uart_pins[] = { 134, 133, 136, 135, };
-static const unsigned int gn_trg_spi_pins0[] = { 22, 25, 23, 24, };
-static const unsigned int gn_trg_spi_pins1[] = { 82, 79, 80, 81, };
-static const unsigned int cvbs_dbg_pins[] = { 54, 53, 82, 74, 75, 76, 77, 78,
- 79, 80, 81, 83, 84, 73, 55, 56, };
-static const unsigned int cvbs_dbg_test_pins0[] = { 57, };
-static const unsigned int cvbs_dbg_test_pins1[] = { 58, };
-static const unsigned int cvbs_dbg_test_pins2[] = { 59, };
-static const unsigned int cvbs_dbg_test_pins3[] = { 60, };
-static const unsigned int cvbs_dbg_test_pins4[] = { 61, };
-static const unsigned int cvbs_dbg_test_pins5[] = { 62, };
-static const unsigned int cvbs_dbg_test_pins6[] = { 63, };
-static const unsigned int cvbs_dbg_test_pins7[] = { 64, };
-static const unsigned int cvbs_dbg_test_pins8[] = { 65, };
-static const unsigned int cvbs_dbg_test_pins9[] = { 66, };
-static const unsigned int cvbs_dbg_test_pins10[] = { 67, };
-static const unsigned int cvbs_dbg_test_pins11[] = { 68, };
-static const unsigned int cvbs_dbg_test_pins12[] = { 69, };
-static const unsigned int cvbs_dbg_test_pins13[] = { 70, };
-static const unsigned int cvbs_dbg_test_pins14[] = { 71, };
-static const unsigned int cvbs_dbg_test_pins15[] = { 72, };
-static const unsigned int gn_gnss_power_pins[] = { 123, 124, 121, 122, 125,
- 120, };
-static const unsigned int gn_gnss_sw_status_pins[] = { 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 53, 55, 56, 54, };
-static const unsigned int gn_gnss_eclk_pins[] = { 113, };
-static const unsigned int gn_gnss_irq1_pins0[] = { 112, };
-static const unsigned int gn_gnss_irq2_pins0[] = { 118, };
-static const unsigned int gn_gnss_tm_pins[] = { 115, };
-static const unsigned int gn_gnss_tsync_pins[] = { 114, };
-static const unsigned int gn_io_gnsssys_sw_cfg_pins[] = { 44, 43, 42, 41, 40,
- 39, 38, 37, 49, 50, 91, 92, 93, 94, 95, 96, };
-static const unsigned int gn_trg_pins0[] = { 29, 28, 26, 27, };
-static const unsigned int gn_trg_pins1[] = { 77, 76, 74, 75, };
-static const unsigned int gn_trg_shutdown_pins0[] = { 30, };
-static const unsigned int gn_trg_shutdown_pins1[] = { 83, };
-static const unsigned int gn_trg_shutdown_pins2[] = { 117, };
-static const unsigned int gn_trg_shutdown_pins3[] = { 123, };
-static const unsigned int i2c0_pins[] = { 128, 127, };
-static const unsigned int i2c1_pins[] = { 126, 125, };
-static const unsigned int i2s0_pins[] = { 91, 93, 94, 92, };
-static const unsigned int i2s1_basic_pins[] = { 95, 96, };
-static const unsigned int i2s1_rxd0_pins0[] = { 61, };
-static const unsigned int i2s1_rxd0_pins1[] = { 131, };
-static const unsigned int i2s1_rxd0_pins2[] = { 129, };
-static const unsigned int i2s1_rxd0_pins3[] = { 117, };
-static const unsigned int i2s1_rxd0_pins4[] = { 83, };
-static const unsigned int i2s1_rxd1_pins0[] = { 72, };
-static const unsigned int i2s1_rxd1_pins1[] = { 132, };
-static const unsigned int i2s1_rxd1_pins2[] = { 130, };
-static const unsigned int i2s1_rxd1_pins3[] = { 118, };
-static const unsigned int i2s1_rxd1_pins4[] = { 84, };
-static const unsigned int jtag_jt_dbg_nsrst_pins[] = { 125, };
-static const unsigned int jtag_ntrst_pins0[] = { 4, };
-static const unsigned int jtag_ntrst_pins1[] = { 163, };
-static const unsigned int jtag_swdiotms_pins0[] = { 2, };
-static const unsigned int jtag_swdiotms_pins1[] = { 160, };
-static const unsigned int jtag_tck_pins0[] = { 0, };
-static const unsigned int jtag_tck_pins1[] = { 161, };
-static const unsigned int jtag_tdi_pins0[] = { 1, };
-static const unsigned int jtag_tdi_pins1[] = { 162, };
-static const unsigned int jtag_tdo_pins0[] = { 3, };
-static const unsigned int jtag_tdo_pins1[] = { 159, };
-static const unsigned int ks_kas_spi_pins0[] = { 141, 144, 143, 142, };
-static const unsigned int ld_ldd_pins[] = { 57, 58, 59, 60, 61, 62, 63, 64,
- 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80,
- 81, 56, 53, };
-static const unsigned int ld_ldd_16bit_pins[] = { 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 56, 53, };
-static const unsigned int ld_ldd_fck_pins[] = { 55, };
-static const unsigned int ld_ldd_lck_pins[] = { 54, };
-static const unsigned int lr_lcdrom_pins[] = { 73, 54, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 56, 53, 55, };
-static const unsigned int lvds_analog_pins[] = { 149, 150, 151, 152, 153, 154,
- 155, 156, 157, 158, };
-static const unsigned int nd_df_basic_pins[] = { 44, 43, 42, 41, 40, 39, 38,
- 37, 47, 46, 52, 45, 49, 50, 48, };
-static const unsigned int nd_df_wp_pins[] = { 124, };
-static const unsigned int nd_df_cs_pins[] = { 51, };
-static const unsigned int ps_pins[] = { 120, 119, 121, };
-static const unsigned int ps_no_dir_pins[] = { 119, };
-static const unsigned int pwc_core_on_pins[] = { 8, };
-static const unsigned int pwc_ext_on_pins[] = { 6, };
-static const unsigned int pwc_gpio3_clk_pins[] = { 3, };
-static const unsigned int pwc_io_on_pins[] = { 9, };
-static const unsigned int pwc_lowbatt_b_pins0[] = { 4, };
-static const unsigned int pwc_mem_on_pins[] = { 7, };
-static const unsigned int pwc_on_key_b_pins0[] = { 5, };
-static const unsigned int pwc_wakeup_src0_pins[] = { 0, };
-static const unsigned int pwc_wakeup_src1_pins[] = { 1, };
-static const unsigned int pwc_wakeup_src2_pins[] = { 2, };
-static const unsigned int pwc_wakeup_src3_pins[] = { 3, };
-static const unsigned int pw_cko0_pins0[] = { 123, };
-static const unsigned int pw_cko0_pins1[] = { 101, };
-static const unsigned int pw_cko0_pins2[] = { 82, };
-static const unsigned int pw_cko0_pins3[] = { 162, };
-static const unsigned int pw_cko1_pins0[] = { 124, };
-static const unsigned int pw_cko1_pins1[] = { 110, };
-static const unsigned int pw_cko1_pins2[] = { 163, };
-static const unsigned int pw_i2s01_clk_pins0[] = { 125, };
-static const unsigned int pw_i2s01_clk_pins1[] = { 117, };
-static const unsigned int pw_i2s01_clk_pins2[] = { 132, };
-static const unsigned int pw_pwm0_pins0[] = { 119, };
-static const unsigned int pw_pwm0_pins1[] = { 159, };
-static const unsigned int pw_pwm1_pins0[] = { 120, };
-static const unsigned int pw_pwm1_pins1[] = { 160, };
-static const unsigned int pw_pwm1_pins2[] = { 131, };
-static const unsigned int pw_pwm2_pins0[] = { 121, };
-static const unsigned int pw_pwm2_pins1[] = { 98, };
-static const unsigned int pw_pwm2_pins2[] = { 161, };
-static const unsigned int pw_pwm3_pins0[] = { 122, };
-static const unsigned int pw_pwm3_pins1[] = { 73, };
-static const unsigned int pw_pwm_cpu_vol_pins0[] = { 121, };
-static const unsigned int pw_pwm_cpu_vol_pins1[] = { 98, };
-static const unsigned int pw_pwm_cpu_vol_pins2[] = { 161, };
-static const unsigned int pw_backlight_pins0[] = { 122, };
-static const unsigned int pw_backlight_pins1[] = { 73, };
-static const unsigned int rg_eth_mac_pins[] = { 108, 103, 104, 105, 106, 107,
- 102, 97, 98, 99, 100, 101, };
-static const unsigned int rg_gmac_phy_intr_n_pins[] = { 111, };
-static const unsigned int rg_rgmii_mac_pins[] = { 109, 110, };
-static const unsigned int rg_rgmii_phy_ref_clk_pins0[] = { 111, };
-static const unsigned int rg_rgmii_phy_ref_clk_pins1[] = { 53, };
-static const unsigned int sd0_pins[] = { 46, 47, 44, 43, 42, 41, 40, 39, 38,
- 37, };
-static const unsigned int sd0_4bit_pins[] = { 46, 47, 44, 43, 42, 41, };
-static const unsigned int sd1_pins[] = { 48, 49, 44, 43, 42, 41, 40, 39, 38,
- 37, };
-static const unsigned int sd1_4bit_pins0[] = { 48, 49, 44, 43, 42, 41, };
-static const unsigned int sd1_4bit_pins1[] = { 48, 49, 40, 39, 38, 37, };
-static const unsigned int sd2_basic_pins[] = { 31, 32, 33, 34, 35, 36, };
-static const unsigned int sd2_cdb_pins0[] = { 124, };
-static const unsigned int sd2_cdb_pins1[] = { 161, };
-static const unsigned int sd2_wpb_pins0[] = { 123, };
-static const unsigned int sd2_wpb_pins1[] = { 163, };
-static const unsigned int sd3_9_pins[] = { 85, 86, 87, 88, 89, 90, };
-static const unsigned int sd5_pins[] = { 91, 92, 93, 94, 95, 96, };
-static const unsigned int sd6_pins0[] = { 79, 78, 74, 75, 76, 77, };
-static const unsigned int sd6_pins1[] = { 101, 99, 100, 110, 109, 111, };
-static const unsigned int sp0_ext_ldo_on_pins[] = { 4, };
-static const unsigned int sp0_qspi_pins[] = { 12, 13, 14, 15, 16, 17, };
-static const unsigned int sp1_spi_pins[] = { 19, 20, 21, 18, };
-static const unsigned int tpiu_trace_pins[] = { 53, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, };
-static const unsigned int uart0_pins[] = { 121, 120, 134, 133, };
-static const unsigned int uart0_nopause_pins[] = { 134, 133, };
-static const unsigned int uart1_pins[] = { 136, 135, };
-static const unsigned int uart2_cts_pins0[] = { 132, };
-static const unsigned int uart2_cts_pins1[] = { 162, };
-static const unsigned int uart2_rts_pins0[] = { 131, };
-static const unsigned int uart2_rts_pins1[] = { 161, };
-static const unsigned int uart2_rxd_pins0[] = { 11, };
-static const unsigned int uart2_rxd_pins1[] = { 160, };
-static const unsigned int uart2_rxd_pins2[] = { 130, };
-static const unsigned int uart2_txd_pins0[] = { 10, };
-static const unsigned int uart2_txd_pins1[] = { 159, };
-static const unsigned int uart2_txd_pins2[] = { 129, };
-static const unsigned int uart3_cts_pins0[] = { 125, };
-static const unsigned int uart3_cts_pins1[] = { 111, };
-static const unsigned int uart3_cts_pins2[] = { 140, };
-static const unsigned int uart3_rts_pins0[] = { 126, };
-static const unsigned int uart3_rts_pins1[] = { 109, };
-static const unsigned int uart3_rts_pins2[] = { 139, };
-static const unsigned int uart3_rxd_pins0[] = { 138, };
-static const unsigned int uart3_rxd_pins1[] = { 84, };
-static const unsigned int uart3_rxd_pins2[] = { 162, };
-static const unsigned int uart3_txd_pins0[] = { 137, };
-static const unsigned int uart3_txd_pins1[] = { 83, };
-static const unsigned int uart3_txd_pins2[] = { 161, };
-static const unsigned int uart4_basic_pins[] = { 140, 139, };
-static const unsigned int uart4_cts_pins0[] = { 122, };
-static const unsigned int uart4_cts_pins1[] = { 100, };
-static const unsigned int uart4_cts_pins2[] = { 117, };
-static const unsigned int uart4_rts_pins0[] = { 123, };
-static const unsigned int uart4_rts_pins1[] = { 99, };
-static const unsigned int uart4_rts_pins2[] = { 116, };
-static const unsigned int usb0_drvvbus_pins0[] = { 51, };
-static const unsigned int usb0_drvvbus_pins1[] = { 162, };
-static const unsigned int usb1_drvvbus_pins0[] = { 134, };
-static const unsigned int usb1_drvvbus_pins1[] = { 163, };
-static const unsigned int visbus_dout_pins[] = { 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 53, 54, 55, 56, 85, 86,
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, };
-static const unsigned int vi_vip1_pins[] = { 74, 75, 76, 77, 78, 79, 80, 81,
- 82, 83, 84, 103, 104, 105, 106, 107, 102, 97, 98, };
-static const unsigned int vi_vip1_ext_pins[] = { 74, 75, 76, 77, 78, 79, 80,
- 81, 82, 83, 84, 108, 103, 104, 105, 106, 107, 102, 97, 98,
- 99, 100, };
-static const unsigned int vi_vip1_low8bit_pins[] = { 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, };
-static const unsigned int vi_vip1_high8bit_pins[] = { 82, 83, 84, 103, 104,
- 105, 106, 107, 102, 97, 98, };
-
-/* definition of pin group table */
-static struct atlas7_pin_group altas7_pin_groups[] = {
- GROUP("gnss_gpio_grp", gnss_gpio_pins),
- GROUP("lcd_vip_gpio_grp", lcd_vip_gpio_pins),
- GROUP("sdio_i2s_gpio_grp", sdio_i2s_gpio_pins),
- GROUP("sp_rgmii_gpio_grp", sp_rgmii_gpio_pins),
- GROUP("lvds_gpio_grp", lvds_gpio_pins),
- GROUP("jtag_uart_nand_gpio_grp", jtag_uart_nand_gpio_pins),
- GROUP("rtc_gpio_grp", rtc_gpio_pins),
- GROUP("audio_ac97_grp", audio_ac97_pins),
- GROUP("audio_digmic_grp0", audio_digmic_pins0),
- GROUP("audio_digmic_grp1", audio_digmic_pins1),
- GROUP("audio_digmic_grp2", audio_digmic_pins2),
- GROUP("audio_func_dbg_grp", audio_func_dbg_pins),
- GROUP("audio_i2s_grp", audio_i2s_pins),
- GROUP("audio_i2s_2ch_grp", audio_i2s_2ch_pins),
- GROUP("audio_i2s_extclk_grp", audio_i2s_extclk_pins),
- GROUP("audio_spdif_out_grp0", audio_spdif_out_pins0),
- GROUP("audio_spdif_out_grp1", audio_spdif_out_pins1),
- GROUP("audio_spdif_out_grp2", audio_spdif_out_pins2),
- GROUP("audio_uart0_basic_grp", audio_uart0_basic_pins),
- GROUP("audio_uart0_urfs_grp0", audio_uart0_urfs_pins0),
- GROUP("audio_uart0_urfs_grp1", audio_uart0_urfs_pins1),
- GROUP("audio_uart0_urfs_grp2", audio_uart0_urfs_pins2),
- GROUP("audio_uart0_urfs_grp3", audio_uart0_urfs_pins3),
- GROUP("audio_uart1_basic_grp", audio_uart1_basic_pins),
- GROUP("audio_uart1_urfs_grp0", audio_uart1_urfs_pins0),
- GROUP("audio_uart1_urfs_grp1", audio_uart1_urfs_pins1),
- GROUP("audio_uart1_urfs_grp2", audio_uart1_urfs_pins2),
- GROUP("audio_uart2_urfs_grp0", audio_uart2_urfs_pins0),
- GROUP("audio_uart2_urfs_grp1", audio_uart2_urfs_pins1),
- GROUP("audio_uart2_urfs_grp2", audio_uart2_urfs_pins2),
- GROUP("audio_uart2_urxd_grp0", audio_uart2_urxd_pins0),
- GROUP("audio_uart2_urxd_grp1", audio_uart2_urxd_pins1),
- GROUP("audio_uart2_urxd_grp2", audio_uart2_urxd_pins2),
- GROUP("audio_uart2_usclk_grp0", audio_uart2_usclk_pins0),
- GROUP("audio_uart2_usclk_grp1", audio_uart2_usclk_pins1),
- GROUP("audio_uart2_usclk_grp2", audio_uart2_usclk_pins2),
- GROUP("audio_uart2_utfs_grp0", audio_uart2_utfs_pins0),
- GROUP("audio_uart2_utfs_grp1", audio_uart2_utfs_pins1),
- GROUP("audio_uart2_utfs_grp2", audio_uart2_utfs_pins2),
- GROUP("audio_uart2_utxd_grp0", audio_uart2_utxd_pins0),
- GROUP("audio_uart2_utxd_grp1", audio_uart2_utxd_pins1),
- GROUP("audio_uart2_utxd_grp2", audio_uart2_utxd_pins2),
- GROUP("c_can_trnsvr_en_grp0", c_can_trnsvr_en_pins0),
- GROUP("c_can_trnsvr_en_grp1", c_can_trnsvr_en_pins1),
- GROUP("c_can_trnsvr_intr_grp", c_can_trnsvr_intr_pins),
- GROUP("c_can_trnsvr_stb_n_grp", c_can_trnsvr_stb_n_pins),
- GROUP("c0_can_rxd_trnsv0_grp", c0_can_rxd_trnsv0_pins),
- GROUP("c0_can_rxd_trnsv1_grp", c0_can_rxd_trnsv1_pins),
- GROUP("c0_can_txd_trnsv0_grp", c0_can_txd_trnsv0_pins),
- GROUP("c0_can_txd_trnsv1_grp", c0_can_txd_trnsv1_pins),
- GROUP("c1_can_rxd_grp0", c1_can_rxd_pins0),
- GROUP("c1_can_rxd_grp1", c1_can_rxd_pins1),
- GROUP("c1_can_rxd_grp2", c1_can_rxd_pins2),
- GROUP("c1_can_rxd_grp3", c1_can_rxd_pins3),
- GROUP("c1_can_txd_grp0", c1_can_txd_pins0),
- GROUP("c1_can_txd_grp1", c1_can_txd_pins1),
- GROUP("c1_can_txd_grp2", c1_can_txd_pins2),
- GROUP("c1_can_txd_grp3", c1_can_txd_pins3),
- GROUP("ca_audio_lpc_grp", ca_audio_lpc_pins),
- GROUP("ca_bt_lpc_grp", ca_bt_lpc_pins),
- GROUP("ca_coex_grp", ca_coex_pins),
- GROUP("ca_curator_lpc_grp", ca_curator_lpc_pins),
- GROUP("ca_pcm_debug_grp", ca_pcm_debug_pins),
- GROUP("ca_pio_grp", ca_pio_pins),
- GROUP("ca_sdio_debug_grp", ca_sdio_debug_pins),
- GROUP("ca_spi_grp", ca_spi_pins),
- GROUP("ca_trb_grp", ca_trb_pins),
- GROUP("ca_uart_debug_grp", ca_uart_debug_pins),
- GROUP("clkc_grp0", clkc_pins0),
- GROUP("clkc_grp1", clkc_pins1),
- GROUP("gn_gnss_i2c_grp", gn_gnss_i2c_pins),
- GROUP("gn_gnss_uart_nopause_grp", gn_gnss_uart_nopause_pins),
- GROUP("gn_gnss_uart_grp", gn_gnss_uart_pins),
- GROUP("gn_trg_spi_grp0", gn_trg_spi_pins0),
- GROUP("gn_trg_spi_grp1", gn_trg_spi_pins1),
- GROUP("cvbs_dbg_grp", cvbs_dbg_pins),
- GROUP("cvbs_dbg_test_grp0", cvbs_dbg_test_pins0),
- GROUP("cvbs_dbg_test_grp1", cvbs_dbg_test_pins1),
- GROUP("cvbs_dbg_test_grp2", cvbs_dbg_test_pins2),
- GROUP("cvbs_dbg_test_grp3", cvbs_dbg_test_pins3),
- GROUP("cvbs_dbg_test_grp4", cvbs_dbg_test_pins4),
- GROUP("cvbs_dbg_test_grp5", cvbs_dbg_test_pins5),
- GROUP("cvbs_dbg_test_grp6", cvbs_dbg_test_pins6),
- GROUP("cvbs_dbg_test_grp7", cvbs_dbg_test_pins7),
- GROUP("cvbs_dbg_test_grp8", cvbs_dbg_test_pins8),
- GROUP("cvbs_dbg_test_grp9", cvbs_dbg_test_pins9),
- GROUP("cvbs_dbg_test_grp10", cvbs_dbg_test_pins10),
- GROUP("cvbs_dbg_test_grp11", cvbs_dbg_test_pins11),
- GROUP("cvbs_dbg_test_grp12", cvbs_dbg_test_pins12),
- GROUP("cvbs_dbg_test_grp13", cvbs_dbg_test_pins13),
- GROUP("cvbs_dbg_test_grp14", cvbs_dbg_test_pins14),
- GROUP("cvbs_dbg_test_grp15", cvbs_dbg_test_pins15),
- GROUP("gn_gnss_power_grp", gn_gnss_power_pins),
- GROUP("gn_gnss_sw_status_grp", gn_gnss_sw_status_pins),
- GROUP("gn_gnss_eclk_grp", gn_gnss_eclk_pins),
- GROUP("gn_gnss_irq1_grp0", gn_gnss_irq1_pins0),
- GROUP("gn_gnss_irq2_grp0", gn_gnss_irq2_pins0),
- GROUP("gn_gnss_tm_grp", gn_gnss_tm_pins),
- GROUP("gn_gnss_tsync_grp", gn_gnss_tsync_pins),
- GROUP("gn_io_gnsssys_sw_cfg_grp", gn_io_gnsssys_sw_cfg_pins),
- GROUP("gn_trg_grp0", gn_trg_pins0),
- GROUP("gn_trg_grp1", gn_trg_pins1),
- GROUP("gn_trg_shutdown_grp0", gn_trg_shutdown_pins0),
- GROUP("gn_trg_shutdown_grp1", gn_trg_shutdown_pins1),
- GROUP("gn_trg_shutdown_grp2", gn_trg_shutdown_pins2),
- GROUP("gn_trg_shutdown_grp3", gn_trg_shutdown_pins3),
- GROUP("i2c0_grp", i2c0_pins),
- GROUP("i2c1_grp", i2c1_pins),
- GROUP("i2s0_grp", i2s0_pins),
- GROUP("i2s1_basic_grp", i2s1_basic_pins),
- GROUP("i2s1_rxd0_grp0", i2s1_rxd0_pins0),
- GROUP("i2s1_rxd0_grp1", i2s1_rxd0_pins1),
- GROUP("i2s1_rxd0_grp2", i2s1_rxd0_pins2),
- GROUP("i2s1_rxd0_grp3", i2s1_rxd0_pins3),
- GROUP("i2s1_rxd0_grp4", i2s1_rxd0_pins4),
- GROUP("i2s1_rxd1_grp0", i2s1_rxd1_pins0),
- GROUP("i2s1_rxd1_grp1", i2s1_rxd1_pins1),
- GROUP("i2s1_rxd1_grp2", i2s1_rxd1_pins2),
- GROUP("i2s1_rxd1_grp3", i2s1_rxd1_pins3),
- GROUP("i2s1_rxd1_grp4", i2s1_rxd1_pins4),
- GROUP("jtag_jt_dbg_nsrst_grp", jtag_jt_dbg_nsrst_pins),
- GROUP("jtag_ntrst_grp0", jtag_ntrst_pins0),
- GROUP("jtag_ntrst_grp1", jtag_ntrst_pins1),
- GROUP("jtag_swdiotms_grp0", jtag_swdiotms_pins0),
- GROUP("jtag_swdiotms_grp1", jtag_swdiotms_pins1),
- GROUP("jtag_tck_grp0", jtag_tck_pins0),
- GROUP("jtag_tck_grp1", jtag_tck_pins1),
- GROUP("jtag_tdi_grp0", jtag_tdi_pins0),
- GROUP("jtag_tdi_grp1", jtag_tdi_pins1),
- GROUP("jtag_tdo_grp0", jtag_tdo_pins0),
- GROUP("jtag_tdo_grp1", jtag_tdo_pins1),
- GROUP("ks_kas_spi_grp0", ks_kas_spi_pins0),
- GROUP("ld_ldd_grp", ld_ldd_pins),
- GROUP("ld_ldd_16bit_grp", ld_ldd_16bit_pins),
- GROUP("ld_ldd_fck_grp", ld_ldd_fck_pins),
- GROUP("ld_ldd_lck_grp", ld_ldd_lck_pins),
- GROUP("lr_lcdrom_grp", lr_lcdrom_pins),
- GROUP("lvds_analog_grp", lvds_analog_pins),
- GROUP("nd_df_basic_grp", nd_df_basic_pins),
- GROUP("nd_df_wp_grp", nd_df_wp_pins),
- GROUP("nd_df_cs_grp", nd_df_cs_pins),
- GROUP("ps_grp", ps_pins),
- GROUP("ps_no_dir_grp", ps_no_dir_pins),
- GROUP("pwc_core_on_grp", pwc_core_on_pins),
- GROUP("pwc_ext_on_grp", pwc_ext_on_pins),
- GROUP("pwc_gpio3_clk_grp", pwc_gpio3_clk_pins),
- GROUP("pwc_io_on_grp", pwc_io_on_pins),
- GROUP("pwc_lowbatt_b_grp0", pwc_lowbatt_b_pins0),
- GROUP("pwc_mem_on_grp", pwc_mem_on_pins),
- GROUP("pwc_on_key_b_grp0", pwc_on_key_b_pins0),
- GROUP("pwc_wakeup_src0_grp", pwc_wakeup_src0_pins),
- GROUP("pwc_wakeup_src1_grp", pwc_wakeup_src1_pins),
- GROUP("pwc_wakeup_src2_grp", pwc_wakeup_src2_pins),
- GROUP("pwc_wakeup_src3_grp", pwc_wakeup_src3_pins),
- GROUP("pw_cko0_grp0", pw_cko0_pins0),
- GROUP("pw_cko0_grp1", pw_cko0_pins1),
- GROUP("pw_cko0_grp2", pw_cko0_pins2),
- GROUP("pw_cko0_grp3", pw_cko0_pins3),
- GROUP("pw_cko1_grp0", pw_cko1_pins0),
- GROUP("pw_cko1_grp1", pw_cko1_pins1),
- GROUP("pw_cko1_grp2", pw_cko1_pins2),
- GROUP("pw_i2s01_clk_grp0", pw_i2s01_clk_pins0),
- GROUP("pw_i2s01_clk_grp1", pw_i2s01_clk_pins1),
- GROUP("pw_i2s01_clk_grp2", pw_i2s01_clk_pins2),
- GROUP("pw_pwm0_grp0", pw_pwm0_pins0),
- GROUP("pw_pwm0_grp1", pw_pwm0_pins1),
- GROUP("pw_pwm1_grp0", pw_pwm1_pins0),
- GROUP("pw_pwm1_grp1", pw_pwm1_pins1),
- GROUP("pw_pwm1_grp2", pw_pwm1_pins2),
- GROUP("pw_pwm2_grp0", pw_pwm2_pins0),
- GROUP("pw_pwm2_grp1", pw_pwm2_pins1),
- GROUP("pw_pwm2_grp2", pw_pwm2_pins2),
- GROUP("pw_pwm3_grp0", pw_pwm3_pins0),
- GROUP("pw_pwm3_grp1", pw_pwm3_pins1),
- GROUP("pw_pwm_cpu_vol_grp0", pw_pwm_cpu_vol_pins0),
- GROUP("pw_pwm_cpu_vol_grp1", pw_pwm_cpu_vol_pins1),
- GROUP("pw_pwm_cpu_vol_grp2", pw_pwm_cpu_vol_pins2),
- GROUP("pw_backlight_grp0", pw_backlight_pins0),
- GROUP("pw_backlight_grp1", pw_backlight_pins1),
- GROUP("rg_eth_mac_grp", rg_eth_mac_pins),
- GROUP("rg_gmac_phy_intr_n_grp", rg_gmac_phy_intr_n_pins),
- GROUP("rg_rgmii_mac_grp", rg_rgmii_mac_pins),
- GROUP("rg_rgmii_phy_ref_clk_grp0", rg_rgmii_phy_ref_clk_pins0),
- GROUP("rg_rgmii_phy_ref_clk_grp1", rg_rgmii_phy_ref_clk_pins1),
- GROUP("sd0_grp", sd0_pins),
- GROUP("sd0_4bit_grp", sd0_4bit_pins),
- GROUP("sd1_grp", sd1_pins),
- GROUP("sd1_4bit_grp0", sd1_4bit_pins0),
- GROUP("sd1_4bit_grp1", sd1_4bit_pins1),
- GROUP("sd2_basic_grp", sd2_basic_pins),
- GROUP("sd2_cdb_grp0", sd2_cdb_pins0),
- GROUP("sd2_cdb_grp1", sd2_cdb_pins1),
- GROUP("sd2_wpb_grp0", sd2_wpb_pins0),
- GROUP("sd2_wpb_grp1", sd2_wpb_pins1),
- GROUP("sd3_9_grp", sd3_9_pins),
- GROUP("sd5_grp", sd5_pins),
- GROUP("sd6_grp0", sd6_pins0),
- GROUP("sd6_grp1", sd6_pins1),
- GROUP("sp0_ext_ldo_on_grp", sp0_ext_ldo_on_pins),
- GROUP("sp0_qspi_grp", sp0_qspi_pins),
- GROUP("sp1_spi_grp", sp1_spi_pins),
- GROUP("tpiu_trace_grp", tpiu_trace_pins),
- GROUP("uart0_grp", uart0_pins),
- GROUP("uart0_nopause_grp", uart0_nopause_pins),
- GROUP("uart1_grp", uart1_pins),
- GROUP("uart2_cts_grp0", uart2_cts_pins0),
- GROUP("uart2_cts_grp1", uart2_cts_pins1),
- GROUP("uart2_rts_grp0", uart2_rts_pins0),
- GROUP("uart2_rts_grp1", uart2_rts_pins1),
- GROUP("uart2_rxd_grp0", uart2_rxd_pins0),
- GROUP("uart2_rxd_grp1", uart2_rxd_pins1),
- GROUP("uart2_rxd_grp2", uart2_rxd_pins2),
- GROUP("uart2_txd_grp0", uart2_txd_pins0),
- GROUP("uart2_txd_grp1", uart2_txd_pins1),
- GROUP("uart2_txd_grp2", uart2_txd_pins2),
- GROUP("uart3_cts_grp0", uart3_cts_pins0),
- GROUP("uart3_cts_grp1", uart3_cts_pins1),
- GROUP("uart3_cts_grp2", uart3_cts_pins2),
- GROUP("uart3_rts_grp0", uart3_rts_pins0),
- GROUP("uart3_rts_grp1", uart3_rts_pins1),
- GROUP("uart3_rts_grp2", uart3_rts_pins2),
- GROUP("uart3_rxd_grp0", uart3_rxd_pins0),
- GROUP("uart3_rxd_grp1", uart3_rxd_pins1),
- GROUP("uart3_rxd_grp2", uart3_rxd_pins2),
- GROUP("uart3_txd_grp0", uart3_txd_pins0),
- GROUP("uart3_txd_grp1", uart3_txd_pins1),
- GROUP("uart3_txd_grp2", uart3_txd_pins2),
- GROUP("uart4_basic_grp", uart4_basic_pins),
- GROUP("uart4_cts_grp0", uart4_cts_pins0),
- GROUP("uart4_cts_grp1", uart4_cts_pins1),
- GROUP("uart4_cts_grp2", uart4_cts_pins2),
- GROUP("uart4_rts_grp0", uart4_rts_pins0),
- GROUP("uart4_rts_grp1", uart4_rts_pins1),
- GROUP("uart4_rts_grp2", uart4_rts_pins2),
- GROUP("usb0_drvvbus_grp0", usb0_drvvbus_pins0),
- GROUP("usb0_drvvbus_grp1", usb0_drvvbus_pins1),
- GROUP("usb1_drvvbus_grp0", usb1_drvvbus_pins0),
- GROUP("usb1_drvvbus_grp1", usb1_drvvbus_pins1),
- GROUP("visbus_dout_grp", visbus_dout_pins),
- GROUP("vi_vip1_grp", vi_vip1_pins),
- GROUP("vi_vip1_ext_grp", vi_vip1_ext_pins),
- GROUP("vi_vip1_low8bit_grp", vi_vip1_low8bit_pins),
- GROUP("vi_vip1_high8bit_grp", vi_vip1_high8bit_pins),
-};
-
-/* How many groups that a function can use */
-static const char * const gnss_gpio_grp[] = { "gnss_gpio_grp", };
-static const char * const lcd_vip_gpio_grp[] = { "lcd_vip_gpio_grp", };
-static const char * const sdio_i2s_gpio_grp[] = { "sdio_i2s_gpio_grp", };
-static const char * const sp_rgmii_gpio_grp[] = { "sp_rgmii_gpio_grp", };
-static const char * const lvds_gpio_grp[] = { "lvds_gpio_grp", };
-static const char * const jtag_uart_nand_gpio_grp[] = {
- "jtag_uart_nand_gpio_grp", };
-static const char * const rtc_gpio_grp[] = { "rtc_gpio_grp", };
-static const char * const audio_ac97_grp[] = { "audio_ac97_grp", };
-static const char * const audio_digmic_grp0[] = { "audio_digmic_grp0", };
-static const char * const audio_digmic_grp1[] = { "audio_digmic_grp1", };
-static const char * const audio_digmic_grp2[] = { "audio_digmic_grp2", };
-static const char * const audio_func_dbg_grp[] = { "audio_func_dbg_grp", };
-static const char * const audio_i2s_grp[] = { "audio_i2s_grp", };
-static const char * const audio_i2s_2ch_grp[] = { "audio_i2s_2ch_grp", };
-static const char * const audio_i2s_extclk_grp[] = { "audio_i2s_extclk_grp", };
-static const char * const audio_spdif_out_grp0[] = { "audio_spdif_out_grp0", };
-static const char * const audio_spdif_out_grp1[] = { "audio_spdif_out_grp1", };
-static const char * const audio_spdif_out_grp2[] = { "audio_spdif_out_grp2", };
-static const char * const audio_uart0_basic_grp[] = {
- "audio_uart0_basic_grp", };
-static const char * const audio_uart0_urfs_grp0[] = {
- "audio_uart0_urfs_grp0", };
-static const char * const audio_uart0_urfs_grp1[] = {
- "audio_uart0_urfs_grp1", };
-static const char * const audio_uart0_urfs_grp2[] = {
- "audio_uart0_urfs_grp2", };
-static const char * const audio_uart0_urfs_grp3[] = {
- "audio_uart0_urfs_grp3", };
-static const char * const audio_uart1_basic_grp[] = {
- "audio_uart1_basic_grp", };
-static const char * const audio_uart1_urfs_grp0[] = {
- "audio_uart1_urfs_grp0", };
-static const char * const audio_uart1_urfs_grp1[] = {
- "audio_uart1_urfs_grp1", };
-static const char * const audio_uart1_urfs_grp2[] = {
- "audio_uart1_urfs_grp2", };
-static const char * const audio_uart2_urfs_grp0[] = {
- "audio_uart2_urfs_grp0", };
-static const char * const audio_uart2_urfs_grp1[] = {
- "audio_uart2_urfs_grp1", };
-static const char * const audio_uart2_urfs_grp2[] = {
- "audio_uart2_urfs_grp2", };
-static const char * const audio_uart2_urxd_grp0[] = {
- "audio_uart2_urxd_grp0", };
-static const char * const audio_uart2_urxd_grp1[] = {
- "audio_uart2_urxd_grp1", };
-static const char * const audio_uart2_urxd_grp2[] = {
- "audio_uart2_urxd_grp2", };
-static const char * const audio_uart2_usclk_grp0[] = {
- "audio_uart2_usclk_grp0", };
-static const char * const audio_uart2_usclk_grp1[] = {
- "audio_uart2_usclk_grp1", };
-static const char * const audio_uart2_usclk_grp2[] = {
- "audio_uart2_usclk_grp2", };
-static const char * const audio_uart2_utfs_grp0[] = {
- "audio_uart2_utfs_grp0", };
-static const char * const audio_uart2_utfs_grp1[] = {
- "audio_uart2_utfs_grp1", };
-static const char * const audio_uart2_utfs_grp2[] = {
- "audio_uart2_utfs_grp2", };
-static const char * const audio_uart2_utxd_grp0[] = {
- "audio_uart2_utxd_grp0", };
-static const char * const audio_uart2_utxd_grp1[] = {
- "audio_uart2_utxd_grp1", };
-static const char * const audio_uart2_utxd_grp2[] = {
- "audio_uart2_utxd_grp2", };
-static const char * const c_can_trnsvr_en_grp0[] = { "c_can_trnsvr_en_grp0", };
-static const char * const c_can_trnsvr_en_grp1[] = { "c_can_trnsvr_en_grp1", };
-static const char * const c_can_trnsvr_intr_grp[] = {
- "c_can_trnsvr_intr_grp", };
-static const char * const c_can_trnsvr_stb_n_grp[] = {
- "c_can_trnsvr_stb_n_grp", };
-static const char * const c0_can_rxd_trnsv0_grp[] = {
- "c0_can_rxd_trnsv0_grp", };
-static const char * const c0_can_rxd_trnsv1_grp[] = {
- "c0_can_rxd_trnsv1_grp", };
-static const char * const c0_can_txd_trnsv0_grp[] = {
- "c0_can_txd_trnsv0_grp", };
-static const char * const c0_can_txd_trnsv1_grp[] = {
- "c0_can_txd_trnsv1_grp", };
-static const char * const c1_can_rxd_grp0[] = { "c1_can_rxd_grp0", };
-static const char * const c1_can_rxd_grp1[] = { "c1_can_rxd_grp1", };
-static const char * const c1_can_rxd_grp2[] = { "c1_can_rxd_grp2", };
-static const char * const c1_can_rxd_grp3[] = { "c1_can_rxd_grp3", };
-static const char * const c1_can_txd_grp0[] = { "c1_can_txd_grp0", };
-static const char * const c1_can_txd_grp1[] = { "c1_can_txd_grp1", };
-static const char * const c1_can_txd_grp2[] = { "c1_can_txd_grp2", };
-static const char * const c1_can_txd_grp3[] = { "c1_can_txd_grp3", };
-static const char * const ca_audio_lpc_grp[] = { "ca_audio_lpc_grp", };
-static const char * const ca_bt_lpc_grp[] = { "ca_bt_lpc_grp", };
-static const char * const ca_coex_grp[] = { "ca_coex_grp", };
-static const char * const ca_curator_lpc_grp[] = { "ca_curator_lpc_grp", };
-static const char * const ca_pcm_debug_grp[] = { "ca_pcm_debug_grp", };
-static const char * const ca_pio_grp[] = { "ca_pio_grp", };
-static const char * const ca_sdio_debug_grp[] = { "ca_sdio_debug_grp", };
-static const char * const ca_spi_grp[] = { "ca_spi_grp", };
-static const char * const ca_trb_grp[] = { "ca_trb_grp", };
-static const char * const ca_uart_debug_grp[] = { "ca_uart_debug_grp", };
-static const char * const clkc_grp0[] = { "clkc_grp0", };
-static const char * const clkc_grp1[] = { "clkc_grp1", };
-static const char * const gn_gnss_i2c_grp[] = { "gn_gnss_i2c_grp", };
-static const char * const gn_gnss_uart_nopause_grp[] = {
- "gn_gnss_uart_nopause_grp", };
-static const char * const gn_gnss_uart_grp[] = { "gn_gnss_uart_grp", };
-static const char * const gn_trg_spi_grp0[] = { "gn_trg_spi_grp0", };
-static const char * const gn_trg_spi_grp1[] = { "gn_trg_spi_grp1", };
-static const char * const cvbs_dbg_grp[] = { "cvbs_dbg_grp", };
-static const char * const cvbs_dbg_test_grp0[] = { "cvbs_dbg_test_grp0", };
-static const char * const cvbs_dbg_test_grp1[] = { "cvbs_dbg_test_grp1", };
-static const char * const cvbs_dbg_test_grp2[] = { "cvbs_dbg_test_grp2", };
-static const char * const cvbs_dbg_test_grp3[] = { "cvbs_dbg_test_grp3", };
-static const char * const cvbs_dbg_test_grp4[] = { "cvbs_dbg_test_grp4", };
-static const char * const cvbs_dbg_test_grp5[] = { "cvbs_dbg_test_grp5", };
-static const char * const cvbs_dbg_test_grp6[] = { "cvbs_dbg_test_grp6", };
-static const char * const cvbs_dbg_test_grp7[] = { "cvbs_dbg_test_grp7", };
-static const char * const cvbs_dbg_test_grp8[] = { "cvbs_dbg_test_grp8", };
-static const char * const cvbs_dbg_test_grp9[] = { "cvbs_dbg_test_grp9", };
-static const char * const cvbs_dbg_test_grp10[] = { "cvbs_dbg_test_grp10", };
-static const char * const cvbs_dbg_test_grp11[] = { "cvbs_dbg_test_grp11", };
-static const char * const cvbs_dbg_test_grp12[] = { "cvbs_dbg_test_grp12", };
-static const char * const cvbs_dbg_test_grp13[] = { "cvbs_dbg_test_grp13", };
-static const char * const cvbs_dbg_test_grp14[] = { "cvbs_dbg_test_grp14", };
-static const char * const cvbs_dbg_test_grp15[] = { "cvbs_dbg_test_grp15", };
-static const char * const gn_gnss_power_grp[] = { "gn_gnss_power_grp", };
-static const char * const gn_gnss_sw_status_grp[] = {
- "gn_gnss_sw_status_grp", };
-static const char * const gn_gnss_eclk_grp[] = { "gn_gnss_eclk_grp", };
-static const char * const gn_gnss_irq1_grp0[] = { "gn_gnss_irq1_grp0", };
-static const char * const gn_gnss_irq2_grp0[] = { "gn_gnss_irq2_grp0", };
-static const char * const gn_gnss_tm_grp[] = { "gn_gnss_tm_grp", };
-static const char * const gn_gnss_tsync_grp[] = { "gn_gnss_tsync_grp", };
-static const char * const gn_io_gnsssys_sw_cfg_grp[] = {
- "gn_io_gnsssys_sw_cfg_grp", };
-static const char * const gn_trg_grp0[] = { "gn_trg_grp0", };
-static const char * const gn_trg_grp1[] = { "gn_trg_grp1", };
-static const char * const gn_trg_shutdown_grp0[] = { "gn_trg_shutdown_grp0", };
-static const char * const gn_trg_shutdown_grp1[] = { "gn_trg_shutdown_grp1", };
-static const char * const gn_trg_shutdown_grp2[] = { "gn_trg_shutdown_grp2", };
-static const char * const gn_trg_shutdown_grp3[] = { "gn_trg_shutdown_grp3", };
-static const char * const i2c0_grp[] = { "i2c0_grp", };
-static const char * const i2c1_grp[] = { "i2c1_grp", };
-static const char * const i2s0_grp[] = { "i2s0_grp", };
-static const char * const i2s1_basic_grp[] = { "i2s1_basic_grp", };
-static const char * const i2s1_rxd0_grp0[] = { "i2s1_rxd0_grp0", };
-static const char * const i2s1_rxd0_grp1[] = { "i2s1_rxd0_grp1", };
-static const char * const i2s1_rxd0_grp2[] = { "i2s1_rxd0_grp2", };
-static const char * const i2s1_rxd0_grp3[] = { "i2s1_rxd0_grp3", };
-static const char * const i2s1_rxd0_grp4[] = { "i2s1_rxd0_grp4", };
-static const char * const i2s1_rxd1_grp0[] = { "i2s1_rxd1_grp0", };
-static const char * const i2s1_rxd1_grp1[] = { "i2s1_rxd1_grp1", };
-static const char * const i2s1_rxd1_grp2[] = { "i2s1_rxd1_grp2", };
-static const char * const i2s1_rxd1_grp3[] = { "i2s1_rxd1_grp3", };
-static const char * const i2s1_rxd1_grp4[] = { "i2s1_rxd1_grp4", };
-static const char * const jtag_jt_dbg_nsrst_grp[] = {
- "jtag_jt_dbg_nsrst_grp", };
-static const char * const jtag_ntrst_grp0[] = { "jtag_ntrst_grp0", };
-static const char * const jtag_ntrst_grp1[] = { "jtag_ntrst_grp1", };
-static const char * const jtag_swdiotms_grp0[] = { "jtag_swdiotms_grp0", };
-static const char * const jtag_swdiotms_grp1[] = { "jtag_swdiotms_grp1", };
-static const char * const jtag_tck_grp0[] = { "jtag_tck_grp0", };
-static const char * const jtag_tck_grp1[] = { "jtag_tck_grp1", };
-static const char * const jtag_tdi_grp0[] = { "jtag_tdi_grp0", };
-static const char * const jtag_tdi_grp1[] = { "jtag_tdi_grp1", };
-static const char * const jtag_tdo_grp0[] = { "jtag_tdo_grp0", };
-static const char * const jtag_tdo_grp1[] = { "jtag_tdo_grp1", };
-static const char * const ks_kas_spi_grp0[] = { "ks_kas_spi_grp0", };
-static const char * const ld_ldd_grp[] = { "ld_ldd_grp", };
-static const char * const ld_ldd_16bit_grp[] = { "ld_ldd_16bit_grp", };
-static const char * const ld_ldd_fck_grp[] = { "ld_ldd_fck_grp", };
-static const char * const ld_ldd_lck_grp[] = { "ld_ldd_lck_grp", };
-static const char * const lr_lcdrom_grp[] = { "lr_lcdrom_grp", };
-static const char * const lvds_analog_grp[] = { "lvds_analog_grp", };
-static const char * const nd_df_basic_grp[] = { "nd_df_basic_grp", };
-static const char * const nd_df_wp_grp[] = { "nd_df_wp_grp", };
-static const char * const nd_df_cs_grp[] = { "nd_df_cs_grp", };
-static const char * const ps_grp[] = { "ps_grp", };
-static const char * const ps_no_dir_grp[] = { "ps_no_dir_grp", };
-static const char * const pwc_core_on_grp[] = { "pwc_core_on_grp", };
-static const char * const pwc_ext_on_grp[] = { "pwc_ext_on_grp", };
-static const char * const pwc_gpio3_clk_grp[] = { "pwc_gpio3_clk_grp", };
-static const char * const pwc_io_on_grp[] = { "pwc_io_on_grp", };
-static const char * const pwc_lowbatt_b_grp0[] = { "pwc_lowbatt_b_grp0", };
-static const char * const pwc_mem_on_grp[] = { "pwc_mem_on_grp", };
-static const char * const pwc_on_key_b_grp0[] = { "pwc_on_key_b_grp0", };
-static const char * const pwc_wakeup_src0_grp[] = { "pwc_wakeup_src0_grp", };
-static const char * const pwc_wakeup_src1_grp[] = { "pwc_wakeup_src1_grp", };
-static const char * const pwc_wakeup_src2_grp[] = { "pwc_wakeup_src2_grp", };
-static const char * const pwc_wakeup_src3_grp[] = { "pwc_wakeup_src3_grp", };
-static const char * const pw_cko0_grp0[] = { "pw_cko0_grp0", };
-static const char * const pw_cko0_grp1[] = { "pw_cko0_grp1", };
-static const char * const pw_cko0_grp2[] = { "pw_cko0_grp2", };
-static const char * const pw_cko0_grp3[] = { "pw_cko0_grp3", };
-static const char * const pw_cko1_grp0[] = { "pw_cko1_grp0", };
-static const char * const pw_cko1_grp1[] = { "pw_cko1_grp1", };
-static const char * const pw_cko1_grp2[] = { "pw_cko1_grp2", };
-static const char * const pw_i2s01_clk_grp0[] = { "pw_i2s01_clk_grp0", };
-static const char * const pw_i2s01_clk_grp1[] = { "pw_i2s01_clk_grp1", };
-static const char * const pw_i2s01_clk_grp2[] = { "pw_i2s01_clk_grp2", };
-static const char * const pw_pwm0_grp0[] = { "pw_pwm0_grp0", };
-static const char * const pw_pwm0_grp1[] = { "pw_pwm0_grp1", };
-static const char * const pw_pwm1_grp0[] = { "pw_pwm1_grp0", };
-static const char * const pw_pwm1_grp1[] = { "pw_pwm1_grp1", };
-static const char * const pw_pwm1_grp2[] = { "pw_pwm1_grp2", };
-static const char * const pw_pwm2_grp0[] = { "pw_pwm2_grp0", };
-static const char * const pw_pwm2_grp1[] = { "pw_pwm2_grp1", };
-static const char * const pw_pwm2_grp2[] = { "pw_pwm2_grp2", };
-static const char * const pw_pwm3_grp0[] = { "pw_pwm3_grp0", };
-static const char * const pw_pwm3_grp1[] = { "pw_pwm3_grp1", };
-static const char * const pw_pwm_cpu_vol_grp0[] = { "pw_pwm_cpu_vol_grp0", };
-static const char * const pw_pwm_cpu_vol_grp1[] = { "pw_pwm_cpu_vol_grp1", };
-static const char * const pw_pwm_cpu_vol_grp2[] = { "pw_pwm_cpu_vol_grp2", };
-static const char * const pw_backlight_grp0[] = { "pw_backlight_grp0", };
-static const char * const pw_backlight_grp1[] = { "pw_backlight_grp1", };
-static const char * const rg_eth_mac_grp[] = { "rg_eth_mac_grp", };
-static const char * const rg_gmac_phy_intr_n_grp[] = {
- "rg_gmac_phy_intr_n_grp", };
-static const char * const rg_rgmii_mac_grp[] = { "rg_rgmii_mac_grp", };
-static const char * const rg_rgmii_phy_ref_clk_grp0[] = {
- "rg_rgmii_phy_ref_clk_grp0", };
-static const char * const rg_rgmii_phy_ref_clk_grp1[] = {
- "rg_rgmii_phy_ref_clk_grp1", };
-static const char * const sd0_grp[] = { "sd0_grp", };
-static const char * const sd0_4bit_grp[] = { "sd0_4bit_grp", };
-static const char * const sd1_grp[] = { "sd1_grp", };
-static const char * const sd1_4bit_grp0[] = { "sd1_4bit_grp0", };
-static const char * const sd1_4bit_grp1[] = { "sd1_4bit_grp1", };
-static const char * const sd2_basic_grp[] = { "sd2_basic_grp", };
-static const char * const sd2_cdb_grp0[] = { "sd2_cdb_grp0", };
-static const char * const sd2_cdb_grp1[] = { "sd2_cdb_grp1", };
-static const char * const sd2_wpb_grp0[] = { "sd2_wpb_grp0", };
-static const char * const sd2_wpb_grp1[] = { "sd2_wpb_grp1", };
-static const char * const sd3_9_grp[] = { "sd3_9_grp", };
-static const char * const sd5_grp[] = { "sd5_grp", };
-static const char * const sd6_grp0[] = { "sd6_grp0", };
-static const char * const sd6_grp1[] = { "sd6_grp1", };
-static const char * const sp0_ext_ldo_on_grp[] = { "sp0_ext_ldo_on_grp", };
-static const char * const sp0_qspi_grp[] = { "sp0_qspi_grp", };
-static const char * const sp1_spi_grp[] = { "sp1_spi_grp", };
-static const char * const tpiu_trace_grp[] = { "tpiu_trace_grp", };
-static const char * const uart0_grp[] = { "uart0_grp", };
-static const char * const uart0_nopause_grp[] = { "uart0_nopause_grp", };
-static const char * const uart1_grp[] = { "uart1_grp", };
-static const char * const uart2_cts_grp0[] = { "uart2_cts_grp0", };
-static const char * const uart2_cts_grp1[] = { "uart2_cts_grp1", };
-static const char * const uart2_rts_grp0[] = { "uart2_rts_grp0", };
-static const char * const uart2_rts_grp1[] = { "uart2_rts_grp1", };
-static const char * const uart2_rxd_grp0[] = { "uart2_rxd_grp0", };
-static const char * const uart2_rxd_grp1[] = { "uart2_rxd_grp1", };
-static const char * const uart2_rxd_grp2[] = { "uart2_rxd_grp2", };
-static const char * const uart2_txd_grp0[] = { "uart2_txd_grp0", };
-static const char * const uart2_txd_grp1[] = { "uart2_txd_grp1", };
-static const char * const uart2_txd_grp2[] = { "uart2_txd_grp2", };
-static const char * const uart3_cts_grp0[] = { "uart3_cts_grp0", };
-static const char * const uart3_cts_grp1[] = { "uart3_cts_grp1", };
-static const char * const uart3_cts_grp2[] = { "uart3_cts_grp2", };
-static const char * const uart3_rts_grp0[] = { "uart3_rts_grp0", };
-static const char * const uart3_rts_grp1[] = { "uart3_rts_grp1", };
-static const char * const uart3_rts_grp2[] = { "uart3_rts_grp2", };
-static const char * const uart3_rxd_grp0[] = { "uart3_rxd_grp0", };
-static const char * const uart3_rxd_grp1[] = { "uart3_rxd_grp1", };
-static const char * const uart3_rxd_grp2[] = { "uart3_rxd_grp2", };
-static const char * const uart3_txd_grp0[] = { "uart3_txd_grp0", };
-static const char * const uart3_txd_grp1[] = { "uart3_txd_grp1", };
-static const char * const uart3_txd_grp2[] = { "uart3_txd_grp2", };
-static const char * const uart4_basic_grp[] = { "uart4_basic_grp", };
-static const char * const uart4_cts_grp0[] = { "uart4_cts_grp0", };
-static const char * const uart4_cts_grp1[] = { "uart4_cts_grp1", };
-static const char * const uart4_cts_grp2[] = { "uart4_cts_grp2", };
-static const char * const uart4_rts_grp0[] = { "uart4_rts_grp0", };
-static const char * const uart4_rts_grp1[] = { "uart4_rts_grp1", };
-static const char * const uart4_rts_grp2[] = { "uart4_rts_grp2", };
-static const char * const usb0_drvvbus_grp0[] = { "usb0_drvvbus_grp0", };
-static const char * const usb0_drvvbus_grp1[] = { "usb0_drvvbus_grp1", };
-static const char * const usb1_drvvbus_grp0[] = { "usb1_drvvbus_grp0", };
-static const char * const usb1_drvvbus_grp1[] = { "usb1_drvvbus_grp1", };
-static const char * const visbus_dout_grp[] = { "visbus_dout_grp", };
-static const char * const vi_vip1_grp[] = { "vi_vip1_grp", };
-static const char * const vi_vip1_ext_grp[] = { "vi_vip1_ext_grp", };
-static const char * const vi_vip1_low8bit_grp[] = { "vi_vip1_low8bit_grp", };
-static const char * const vi_vip1_high8bit_grp[] = { "vi_vip1_high8bit_grp", };
-
-static struct atlas7_pad_mux gnss_gpio_grp_pad_mux[] = {
- MUX(1, 119, 0, N, N, N, N),
- MUX(1, 120, 0, N, N, N, N),
- MUX(1, 121, 0, N, N, N, N),
- MUX(1, 122, 0, N, N, N, N),
- MUX(1, 123, 0, N, N, N, N),
- MUX(1, 124, 0, N, N, N, N),
- MUX(1, 125, 0, N, N, N, N),
- MUX(1, 126, 0, N, N, N, N),
- MUX(1, 127, 0, N, N, N, N),
- MUX(1, 128, 0, N, N, N, N),
- MUX(1, 22, 0, N, N, N, N),
- MUX(1, 23, 0, N, N, N, N),
- MUX(1, 24, 0, N, N, N, N),
- MUX(1, 25, 0, N, N, N, N),
- MUX(1, 26, 0, N, N, N, N),
- MUX(1, 27, 0, N, N, N, N),
- MUX(1, 28, 0, N, N, N, N),
- MUX(1, 29, 0, N, N, N, N),
- MUX(1, 30, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gnss_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gnss_gpio_grp_pad_mux),
- .pad_mux_list = gnss_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux lcd_vip_gpio_grp_pad_mux[] = {
- MUX(1, 74, 0, N, N, N, N),
- MUX(1, 75, 0, N, N, N, N),
- MUX(1, 76, 0, N, N, N, N),
- MUX(1, 77, 0, N, N, N, N),
- MUX(1, 78, 0, N, N, N, N),
- MUX(1, 79, 0, N, N, N, N),
- MUX(1, 80, 0, N, N, N, N),
- MUX(1, 81, 0, N, N, N, N),
- MUX(1, 82, 0, N, N, N, N),
- MUX(1, 83, 0, N, N, N, N),
- MUX(1, 84, 0, N, N, N, N),
- MUX(1, 53, 0, N, N, N, N),
- MUX(1, 54, 0, N, N, N, N),
- MUX(1, 55, 0, N, N, N, N),
- MUX(1, 56, 0, N, N, N, N),
- MUX(1, 57, 0, N, N, N, N),
- MUX(1, 58, 0, N, N, N, N),
- MUX(1, 59, 0, N, N, N, N),
- MUX(1, 60, 0, N, N, N, N),
- MUX(1, 61, 0, N, N, N, N),
- MUX(1, 62, 0, N, N, N, N),
- MUX(1, 63, 0, N, N, N, N),
- MUX(1, 64, 0, N, N, N, N),
- MUX(1, 65, 0, N, N, N, N),
- MUX(1, 66, 0, N, N, N, N),
- MUX(1, 67, 0, N, N, N, N),
- MUX(1, 68, 0, N, N, N, N),
- MUX(1, 69, 0, N, N, N, N),
- MUX(1, 70, 0, N, N, N, N),
- MUX(1, 71, 0, N, N, N, N),
- MUX(1, 72, 0, N, N, N, N),
- MUX(1, 73, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux lcd_vip_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(lcd_vip_gpio_grp_pad_mux),
- .pad_mux_list = lcd_vip_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sdio_i2s_gpio_grp_pad_mux[] = {
- MUX(1, 31, 0, N, N, N, N),
- MUX(1, 32, 0, N, N, N, N),
- MUX(1, 33, 0, N, N, N, N),
- MUX(1, 34, 0, N, N, N, N),
- MUX(1, 35, 0, N, N, N, N),
- MUX(1, 36, 0, N, N, N, N),
- MUX(1, 85, 0, N, N, N, N),
- MUX(1, 86, 0, N, N, N, N),
- MUX(1, 87, 0, N, N, N, N),
- MUX(1, 88, 0, N, N, N, N),
- MUX(1, 89, 0, N, N, N, N),
- MUX(1, 90, 0, N, N, N, N),
- MUX(1, 129, 0, N, N, N, N),
- MUX(1, 130, 0, N, N, N, N),
- MUX(1, 131, 0, N, N, N, N),
- MUX(1, 132, 0, N, N, N, N),
- MUX(1, 91, 0, N, N, N, N),
- MUX(1, 92, 0, N, N, N, N),
- MUX(1, 93, 0, N, N, N, N),
- MUX(1, 94, 0, N, N, N, N),
- MUX(1, 95, 0, N, N, N, N),
- MUX(1, 96, 0, N, N, N, N),
- MUX(1, 112, 0, N, N, N, N),
- MUX(1, 113, 0, N, N, N, N),
- MUX(1, 114, 0, N, N, N, N),
- MUX(1, 115, 0, N, N, N, N),
- MUX(1, 116, 0, N, N, N, N),
- MUX(1, 117, 0, N, N, N, N),
- MUX(1, 118, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sdio_i2s_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sdio_i2s_gpio_grp_pad_mux),
- .pad_mux_list = sdio_i2s_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sp_rgmii_gpio_grp_pad_mux[] = {
- MUX(1, 97, 0, N, N, N, N),
- MUX(1, 98, 0, N, N, N, N),
- MUX(1, 99, 0, N, N, N, N),
- MUX(1, 100, 0, N, N, N, N),
- MUX(1, 101, 0, N, N, N, N),
- MUX(1, 102, 0, N, N, N, N),
- MUX(1, 103, 0, N, N, N, N),
- MUX(1, 104, 0, N, N, N, N),
- MUX(1, 105, 0, N, N, N, N),
- MUX(1, 106, 0, N, N, N, N),
- MUX(1, 107, 0, N, N, N, N),
- MUX(1, 108, 0, N, N, N, N),
- MUX(1, 109, 0, N, N, N, N),
- MUX(1, 110, 0, N, N, N, N),
- MUX(1, 111, 0, N, N, N, N),
- MUX(1, 18, 0, N, N, N, N),
- MUX(1, 19, 0, N, N, N, N),
- MUX(1, 20, 0, N, N, N, N),
- MUX(1, 21, 0, N, N, N, N),
- MUX(1, 141, 0, N, N, N, N),
- MUX(1, 142, 0, N, N, N, N),
- MUX(1, 143, 0, N, N, N, N),
- MUX(1, 144, 0, N, N, N, N),
- MUX(1, 145, 0, N, N, N, N),
- MUX(1, 146, 0, N, N, N, N),
- MUX(1, 147, 0, N, N, N, N),
- MUX(1, 148, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sp_rgmii_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sp_rgmii_gpio_grp_pad_mux),
- .pad_mux_list = sp_rgmii_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux lvds_gpio_grp_pad_mux[] = {
- MUX(1, 157, 0, N, N, N, N),
- MUX(1, 158, 0, N, N, N, N),
- MUX(1, 155, 0, N, N, N, N),
- MUX(1, 156, 0, N, N, N, N),
- MUX(1, 153, 0, N, N, N, N),
- MUX(1, 154, 0, N, N, N, N),
- MUX(1, 151, 0, N, N, N, N),
- MUX(1, 152, 0, N, N, N, N),
- MUX(1, 149, 0, N, N, N, N),
- MUX(1, 150, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux lvds_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(lvds_gpio_grp_pad_mux),
- .pad_mux_list = lvds_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_uart_nand_gpio_grp_pad_mux[] = {
- MUX(1, 44, 0, N, N, N, N),
- MUX(1, 43, 0, N, N, N, N),
- MUX(1, 42, 0, N, N, N, N),
- MUX(1, 41, 0, N, N, N, N),
- MUX(1, 40, 0, N, N, N, N),
- MUX(1, 39, 0, N, N, N, N),
- MUX(1, 38, 0, N, N, N, N),
- MUX(1, 37, 0, N, N, N, N),
- MUX(1, 46, 0, N, N, N, N),
- MUX(1, 47, 0, N, N, N, N),
- MUX(1, 48, 0, N, N, N, N),
- MUX(1, 49, 0, N, N, N, N),
- MUX(1, 50, 0, N, N, N, N),
- MUX(1, 52, 0, N, N, N, N),
- MUX(1, 51, 0, N, N, N, N),
- MUX(1, 45, 0, N, N, N, N),
- MUX(1, 133, 0, N, N, N, N),
- MUX(1, 134, 0, N, N, N, N),
- MUX(1, 135, 0, N, N, N, N),
- MUX(1, 136, 0, N, N, N, N),
- MUX(1, 137, 0, N, N, N, N),
- MUX(1, 138, 0, N, N, N, N),
- MUX(1, 139, 0, N, N, N, N),
- MUX(1, 140, 0, N, N, N, N),
- MUX(1, 159, 0, N, N, N, N),
- MUX(1, 160, 0, N, N, N, N),
- MUX(1, 161, 0, N, N, N, N),
- MUX(1, 162, 0, N, N, N, N),
- MUX(1, 163, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux jtag_uart_nand_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_uart_nand_gpio_grp_pad_mux),
- .pad_mux_list = jtag_uart_nand_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = {
- MUX(0, 0, 0, N, N, N, N),
- MUX(0, 1, 0, N, N, N, N),
- MUX(0, 2, 0, N, N, N, N),
- MUX(0, 3, 0, N, N, N, N),
- MUX(0, 4, 0, N, N, N, N),
- MUX(0, 10, 0, N, N, N, N),
- MUX(0, 11, 0, N, N, N, N),
- MUX(0, 12, 0, N, N, N, N),
- MUX(0, 13, 0, N, N, N, N),
- MUX(0, 14, 0, N, N, N, N),
- MUX(0, 15, 0, N, N, N, N),
- MUX(0, 16, 0, N, N, N, N),
- MUX(0, 17, 0, N, N, N, N),
- MUX(0, 9, 0, N, N, N, N),
-};
-
-static struct atlas7_grp_mux rtc_gpio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(rtc_gpio_grp_pad_mux),
- .pad_mux_list = rtc_gpio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_ac97_grp_pad_mux[] = {
- MUX(1, 113, 2, N, N, N, N),
- MUX(1, 118, 2, N, N, N, N),
- MUX(1, 115, 2, N, N, N, N),
- MUX(1, 114, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_ac97_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_ac97_grp_pad_mux),
- .pad_mux_list = audio_ac97_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_digmic_grp0_pad_mux[] = {
- MUX(1, 51, 3, 0xa10, 20, 0xa90, 20),
-};
-
-static struct atlas7_grp_mux audio_digmic_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_digmic_grp0_pad_mux),
- .pad_mux_list = audio_digmic_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_digmic_grp1_pad_mux[] = {
- MUX(1, 122, 5, 0xa10, 20, 0xa90, 20),
-};
-
-static struct atlas7_grp_mux audio_digmic_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_digmic_grp1_pad_mux),
- .pad_mux_list = audio_digmic_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_digmic_grp2_pad_mux[] = {
- MUX(1, 161, 7, 0xa10, 20, 0xa90, 20),
-};
-
-static struct atlas7_grp_mux audio_digmic_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_digmic_grp2_pad_mux),
- .pad_mux_list = audio_digmic_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_func_dbg_grp_pad_mux[] = {
- MUX(1, 141, 4, N, N, N, N),
- MUX(1, 144, 4, N, N, N, N),
- MUX(1, 44, 6, N, N, N, N),
- MUX(1, 43, 6, N, N, N, N),
- MUX(1, 42, 6, N, N, N, N),
- MUX(1, 41, 6, N, N, N, N),
- MUX(1, 40, 6, N, N, N, N),
- MUX(1, 39, 6, N, N, N, N),
- MUX(1, 38, 6, N, N, N, N),
- MUX(1, 37, 6, N, N, N, N),
- MUX(1, 74, 6, N, N, N, N),
- MUX(1, 75, 6, N, N, N, N),
- MUX(1, 76, 6, N, N, N, N),
- MUX(1, 77, 6, N, N, N, N),
- MUX(1, 78, 6, N, N, N, N),
- MUX(1, 79, 6, N, N, N, N),
- MUX(1, 81, 6, N, N, N, N),
- MUX(1, 113, 6, N, N, N, N),
- MUX(1, 114, 6, N, N, N, N),
- MUX(1, 118, 6, N, N, N, N),
- MUX(1, 115, 6, N, N, N, N),
- MUX(1, 49, 6, N, N, N, N),
- MUX(1, 50, 6, N, N, N, N),
- MUX(1, 142, 4, N, N, N, N),
- MUX(1, 143, 4, N, N, N, N),
- MUX(1, 80, 6, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_func_dbg_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_func_dbg_grp_pad_mux),
- .pad_mux_list = audio_func_dbg_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_i2s_grp_pad_mux[] = {
- MUX(1, 118, 1, N, N, N, N),
- MUX(1, 115, 1, N, N, N, N),
- MUX(1, 116, 1, N, N, N, N),
- MUX(1, 117, 1, N, N, N, N),
- MUX(1, 112, 1, N, N, N, N),
- MUX(1, 113, 1, N, N, N, N),
- MUX(1, 114, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_i2s_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_i2s_grp_pad_mux),
- .pad_mux_list = audio_i2s_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_i2s_2ch_grp_pad_mux[] = {
- MUX(1, 118, 1, N, N, N, N),
- MUX(1, 115, 1, N, N, N, N),
- MUX(1, 112, 1, N, N, N, N),
- MUX(1, 113, 1, N, N, N, N),
- MUX(1, 114, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_i2s_2ch_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_i2s_2ch_grp_pad_mux),
- .pad_mux_list = audio_i2s_2ch_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_i2s_extclk_grp_pad_mux[] = {
- MUX(1, 112, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_i2s_extclk_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_i2s_extclk_grp_pad_mux),
- .pad_mux_list = audio_i2s_extclk_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_spdif_out_grp0_pad_mux[] = {
- MUX(1, 112, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_spdif_out_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp0_pad_mux),
- .pad_mux_list = audio_spdif_out_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_spdif_out_grp1_pad_mux[] = {
- MUX(1, 116, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_spdif_out_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp1_pad_mux),
- .pad_mux_list = audio_spdif_out_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_spdif_out_grp2_pad_mux[] = {
- MUX(1, 142, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_spdif_out_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp2_pad_mux),
- .pad_mux_list = audio_spdif_out_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart0_basic_grp_pad_mux[] = {
- MUX(1, 143, 1, N, N, N, N),
- MUX(1, 142, 1, N, N, N, N),
- MUX(1, 141, 1, N, N, N, N),
- MUX(1, 144, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux audio_uart0_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart0_basic_grp_pad_mux),
- .pad_mux_list = audio_uart0_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart0_urfs_grp0_pad_mux[] = {
- MUX(1, 117, 5, 0xa10, 28, 0xa90, 28),
-};
-
-static struct atlas7_grp_mux audio_uart0_urfs_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp0_pad_mux),
- .pad_mux_list = audio_uart0_urfs_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart0_urfs_grp1_pad_mux[] = {
- MUX(1, 139, 3, 0xa10, 28, 0xa90, 28),
-};
-
-static struct atlas7_grp_mux audio_uart0_urfs_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp1_pad_mux),
- .pad_mux_list = audio_uart0_urfs_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart0_urfs_grp2_pad_mux[] = {
- MUX(1, 163, 3, 0xa10, 28, 0xa90, 28),
-};
-
-static struct atlas7_grp_mux audio_uart0_urfs_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp2_pad_mux),
- .pad_mux_list = audio_uart0_urfs_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart0_urfs_grp3_pad_mux[] = {
- MUX(1, 162, 6, 0xa10, 28, 0xa90, 28),
-};
-
-static struct atlas7_grp_mux audio_uart0_urfs_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp3_pad_mux),
- .pad_mux_list = audio_uart0_urfs_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart1_basic_grp_pad_mux[] = {
- MUX(1, 147, 1, 0xa10, 24, 0xa90, 24),
- MUX(1, 146, 1, 0xa10, 25, 0xa90, 25),
- MUX(1, 145, 1, 0xa10, 23, 0xa90, 23),
- MUX(1, 148, 1, 0xa10, 22, 0xa90, 22),
-};
-
-static struct atlas7_grp_mux audio_uart1_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart1_basic_grp_pad_mux),
- .pad_mux_list = audio_uart1_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart1_urfs_grp0_pad_mux[] = {
- MUX(1, 117, 6, 0xa10, 29, 0xa90, 29),
-};
-
-static struct atlas7_grp_mux audio_uart1_urfs_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp0_pad_mux),
- .pad_mux_list = audio_uart1_urfs_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart1_urfs_grp1_pad_mux[] = {
- MUX(1, 140, 3, 0xa10, 29, 0xa90, 29),
-};
-
-static struct atlas7_grp_mux audio_uart1_urfs_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp1_pad_mux),
- .pad_mux_list = audio_uart1_urfs_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart1_urfs_grp2_pad_mux[] = {
- MUX(1, 163, 4, 0xa10, 29, 0xa90, 29),
-};
-
-static struct atlas7_grp_mux audio_uart1_urfs_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp2_pad_mux),
- .pad_mux_list = audio_uart1_urfs_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urfs_grp0_pad_mux[] = {
- MUX(1, 139, 4, 0xa10, 30, 0xa90, 30),
-};
-
-static struct atlas7_grp_mux audio_uart2_urfs_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp0_pad_mux),
- .pad_mux_list = audio_uart2_urfs_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urfs_grp1_pad_mux[] = {
- MUX(1, 163, 6, 0xa10, 30, 0xa90, 30),
-};
-
-static struct atlas7_grp_mux audio_uart2_urfs_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp1_pad_mux),
- .pad_mux_list = audio_uart2_urfs_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urfs_grp2_pad_mux[] = {
- MUX(1, 96, 3, 0xa10, 30, 0xa90, 30),
-};
-
-static struct atlas7_grp_mux audio_uart2_urfs_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp2_pad_mux),
- .pad_mux_list = audio_uart2_urfs_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urxd_grp0_pad_mux[] = {
- MUX(1, 20, 2, 0xa00, 24, 0xa80, 24),
-};
-
-static struct atlas7_grp_mux audio_uart2_urxd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp0_pad_mux),
- .pad_mux_list = audio_uart2_urxd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urxd_grp1_pad_mux[] = {
- MUX(1, 109, 2, 0xa00, 24, 0xa80, 24),
-};
-
-static struct atlas7_grp_mux audio_uart2_urxd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp1_pad_mux),
- .pad_mux_list = audio_uart2_urxd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_urxd_grp2_pad_mux[] = {
- MUX(1, 93, 3, 0xa00, 24, 0xa80, 24),
-};
-
-static struct atlas7_grp_mux audio_uart2_urxd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp2_pad_mux),
- .pad_mux_list = audio_uart2_urxd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_usclk_grp0_pad_mux[] = {
- MUX(1, 19, 2, 0xa00, 23, 0xa80, 23),
-};
-
-static struct atlas7_grp_mux audio_uart2_usclk_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp0_pad_mux),
- .pad_mux_list = audio_uart2_usclk_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_usclk_grp1_pad_mux[] = {
- MUX(1, 101, 2, 0xa00, 23, 0xa80, 23),
-};
-
-static struct atlas7_grp_mux audio_uart2_usclk_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp1_pad_mux),
- .pad_mux_list = audio_uart2_usclk_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_usclk_grp2_pad_mux[] = {
- MUX(1, 91, 3, 0xa00, 23, 0xa80, 23),
-};
-
-static struct atlas7_grp_mux audio_uart2_usclk_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp2_pad_mux),
- .pad_mux_list = audio_uart2_usclk_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utfs_grp0_pad_mux[] = {
- MUX(1, 18, 2, 0xa00, 22, 0xa80, 22),
-};
-
-static struct atlas7_grp_mux audio_uart2_utfs_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp0_pad_mux),
- .pad_mux_list = audio_uart2_utfs_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utfs_grp1_pad_mux[] = {
- MUX(1, 111, 2, 0xa00, 22, 0xa80, 22),
-};
-
-static struct atlas7_grp_mux audio_uart2_utfs_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp1_pad_mux),
- .pad_mux_list = audio_uart2_utfs_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utfs_grp2_pad_mux[] = {
- MUX(1, 94, 3, 0xa00, 22, 0xa80, 22),
-};
-
-static struct atlas7_grp_mux audio_uart2_utfs_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp2_pad_mux),
- .pad_mux_list = audio_uart2_utfs_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utxd_grp0_pad_mux[] = {
- MUX(1, 21, 2, 0xa00, 25, 0xa80, 25),
-};
-
-static struct atlas7_grp_mux audio_uart2_utxd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp0_pad_mux),
- .pad_mux_list = audio_uart2_utxd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utxd_grp1_pad_mux[] = {
- MUX(1, 110, 2, 0xa00, 25, 0xa80, 25),
-};
-
-static struct atlas7_grp_mux audio_uart2_utxd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp1_pad_mux),
- .pad_mux_list = audio_uart2_utxd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux audio_uart2_utxd_grp2_pad_mux[] = {
- MUX(1, 92, 3, 0xa00, 25, 0xa80, 25),
-};
-
-static struct atlas7_grp_mux audio_uart2_utxd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp2_pad_mux),
- .pad_mux_list = audio_uart2_utxd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux c_can_trnsvr_en_grp0_pad_mux[] = {
- MUX(0, 2, 6, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c_can_trnsvr_en_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp0_pad_mux),
- .pad_mux_list = c_can_trnsvr_en_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux c_can_trnsvr_en_grp1_pad_mux[] = {
- MUX(0, 0, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c_can_trnsvr_en_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp1_pad_mux),
- .pad_mux_list = c_can_trnsvr_en_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux c_can_trnsvr_intr_grp_pad_mux[] = {
- MUX(0, 1, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c_can_trnsvr_intr_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_intr_grp_pad_mux),
- .pad_mux_list = c_can_trnsvr_intr_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c_can_trnsvr_stb_n_grp_pad_mux[] = {
- MUX(0, 3, 6, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c_can_trnsvr_stb_n_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_stb_n_grp_pad_mux),
- .pad_mux_list = c_can_trnsvr_stb_n_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c0_can_rxd_trnsv0_grp_pad_mux[] = {
- MUX(0, 11, 1, 0xa08, 9, 0xa88, 9),
-};
-
-static struct atlas7_grp_mux c0_can_rxd_trnsv0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv0_grp_pad_mux),
- .pad_mux_list = c0_can_rxd_trnsv0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c0_can_rxd_trnsv1_grp_pad_mux[] = {
- MUX(0, 2, 5, 0xa10, 9, 0xa90, 9),
-};
-
-static struct atlas7_grp_mux c0_can_rxd_trnsv1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv1_grp_pad_mux),
- .pad_mux_list = c0_can_rxd_trnsv1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c0_can_txd_trnsv0_grp_pad_mux[] = {
- MUX(0, 10, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c0_can_txd_trnsv0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv0_grp_pad_mux),
- .pad_mux_list = c0_can_txd_trnsv0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c0_can_txd_trnsv1_grp_pad_mux[] = {
- MUX(0, 3, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c0_can_txd_trnsv1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv1_grp_pad_mux),
- .pad_mux_list = c0_can_txd_trnsv1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_rxd_grp0_pad_mux[] = {
- MUX(1, 138, 2, 0xa00, 4, 0xa80, 4),
-};
-
-static struct atlas7_grp_mux c1_can_rxd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp0_pad_mux),
- .pad_mux_list = c1_can_rxd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_rxd_grp1_pad_mux[] = {
- MUX(1, 147, 2, 0xa00, 4, 0xa80, 4),
-};
-
-static struct atlas7_grp_mux c1_can_rxd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp1_pad_mux),
- .pad_mux_list = c1_can_rxd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_rxd_grp2_pad_mux[] = {
- MUX(0, 2, 2, 0xa00, 4, 0xa80, 4),
-};
-
-static struct atlas7_grp_mux c1_can_rxd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp2_pad_mux),
- .pad_mux_list = c1_can_rxd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_rxd_grp3_pad_mux[] = {
- MUX(1, 162, 4, 0xa00, 4, 0xa80, 4),
-};
-
-static struct atlas7_grp_mux c1_can_rxd_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp3_pad_mux),
- .pad_mux_list = c1_can_rxd_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_txd_grp0_pad_mux[] = {
- MUX(1, 137, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c1_can_txd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp0_pad_mux),
- .pad_mux_list = c1_can_txd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_txd_grp1_pad_mux[] = {
- MUX(1, 146, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c1_can_txd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp1_pad_mux),
- .pad_mux_list = c1_can_txd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_txd_grp2_pad_mux[] = {
- MUX(0, 3, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c1_can_txd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp2_pad_mux),
- .pad_mux_list = c1_can_txd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux c1_can_txd_grp3_pad_mux[] = {
- MUX(1, 161, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux c1_can_txd_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp3_pad_mux),
- .pad_mux_list = c1_can_txd_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_audio_lpc_grp_pad_mux[] = {
- MUX(1, 62, 4, N, N, N, N),
- MUX(1, 63, 4, N, N, N, N),
- MUX(1, 64, 4, N, N, N, N),
- MUX(1, 65, 4, N, N, N, N),
- MUX(1, 66, 4, N, N, N, N),
- MUX(1, 67, 4, N, N, N, N),
- MUX(1, 68, 4, N, N, N, N),
- MUX(1, 69, 4, N, N, N, N),
- MUX(1, 70, 4, N, N, N, N),
- MUX(1, 71, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_audio_lpc_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_audio_lpc_grp_pad_mux),
- .pad_mux_list = ca_audio_lpc_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_bt_lpc_grp_pad_mux[] = {
- MUX(1, 85, 5, N, N, N, N),
- MUX(1, 86, 5, N, N, N, N),
- MUX(1, 87, 5, N, N, N, N),
- MUX(1, 88, 5, N, N, N, N),
- MUX(1, 89, 5, N, N, N, N),
- MUX(1, 90, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_bt_lpc_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_bt_lpc_grp_pad_mux),
- .pad_mux_list = ca_bt_lpc_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_coex_grp_pad_mux[] = {
- MUX(1, 129, 1, N, N, N, N),
- MUX(1, 130, 1, N, N, N, N),
- MUX(1, 131, 1, N, N, N, N),
- MUX(1, 132, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_coex_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_coex_grp_pad_mux),
- .pad_mux_list = ca_coex_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_curator_lpc_grp_pad_mux[] = {
- MUX(1, 57, 4, N, N, N, N),
- MUX(1, 58, 4, N, N, N, N),
- MUX(1, 59, 4, N, N, N, N),
- MUX(1, 60, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_curator_lpc_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_curator_lpc_grp_pad_mux),
- .pad_mux_list = ca_curator_lpc_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_pcm_debug_grp_pad_mux[] = {
- MUX(1, 91, 5, N, N, N, N),
- MUX(1, 93, 5, N, N, N, N),
- MUX(1, 94, 5, N, N, N, N),
- MUX(1, 92, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_pcm_debug_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_pcm_debug_grp_pad_mux),
- .pad_mux_list = ca_pcm_debug_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_pio_grp_pad_mux[] = {
- MUX(1, 121, 2, N, N, N, N),
- MUX(1, 122, 2, N, N, N, N),
- MUX(1, 125, 6, N, N, N, N),
- MUX(1, 126, 6, N, N, N, N),
- MUX(1, 38, 5, N, N, N, N),
- MUX(1, 37, 5, N, N, N, N),
- MUX(1, 47, 5, N, N, N, N),
- MUX(1, 49, 5, N, N, N, N),
- MUX(1, 50, 5, N, N, N, N),
- MUX(1, 54, 4, N, N, N, N),
- MUX(1, 55, 4, N, N, N, N),
- MUX(1, 56, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_pio_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_pio_grp_pad_mux),
- .pad_mux_list = ca_pio_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_sdio_debug_grp_pad_mux[] = {
- MUX(1, 40, 5, N, N, N, N),
- MUX(1, 39, 5, N, N, N, N),
- MUX(1, 44, 5, N, N, N, N),
- MUX(1, 43, 5, N, N, N, N),
- MUX(1, 42, 5, N, N, N, N),
- MUX(1, 41, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_sdio_debug_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_sdio_debug_grp_pad_mux),
- .pad_mux_list = ca_sdio_debug_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_spi_grp_pad_mux[] = {
- MUX(1, 82, 5, N, N, N, N),
- MUX(1, 79, 5, 0xa08, 6, 0xa88, 6),
- MUX(1, 80, 5, N, N, N, N),
- MUX(1, 81, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_spi_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_spi_grp_pad_mux),
- .pad_mux_list = ca_spi_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_trb_grp_pad_mux[] = {
- MUX(1, 91, 4, N, N, N, N),
- MUX(1, 93, 4, N, N, N, N),
- MUX(1, 94, 4, N, N, N, N),
- MUX(1, 95, 4, N, N, N, N),
- MUX(1, 96, 4, N, N, N, N),
- MUX(1, 78, 5, N, N, N, N),
- MUX(1, 74, 5, N, N, N, N),
- MUX(1, 75, 5, N, N, N, N),
- MUX(1, 76, 5, N, N, N, N),
- MUX(1, 77, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_trb_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_trb_grp_pad_mux),
- .pad_mux_list = ca_trb_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ca_uart_debug_grp_pad_mux[] = {
- MUX(1, 136, 3, N, N, N, N),
- MUX(1, 135, 3, N, N, N, N),
- MUX(1, 134, 3, N, N, N, N),
- MUX(1, 133, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ca_uart_debug_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ca_uart_debug_grp_pad_mux),
- .pad_mux_list = ca_uart_debug_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux clkc_grp0_pad_mux[] = {
- MUX(1, 30, 2, 0xa08, 14, 0xa88, 14),
- MUX(1, 47, 6, N, N, N, N),
-};
-
-static struct atlas7_grp_mux clkc_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(clkc_grp0_pad_mux),
- .pad_mux_list = clkc_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux clkc_grp1_pad_mux[] = {
- MUX(1, 78, 3, 0xa08, 14, 0xa88, 14),
- MUX(1, 54, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux clkc_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(clkc_grp1_pad_mux),
- .pad_mux_list = clkc_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_i2c_grp_pad_mux[] = {
- MUX(1, 128, 2, N, N, N, N),
- MUX(1, 127, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_i2c_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_i2c_grp_pad_mux),
- .pad_mux_list = gn_gnss_i2c_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_uart_nopause_grp_pad_mux[] = {
- MUX(1, 134, 4, N, N, N, N),
- MUX(1, 133, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_uart_nopause_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_uart_nopause_grp_pad_mux),
- .pad_mux_list = gn_gnss_uart_nopause_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_uart_grp_pad_mux[] = {
- MUX(1, 134, 4, N, N, N, N),
- MUX(1, 133, 4, N, N, N, N),
- MUX(1, 136, 4, N, N, N, N),
- MUX(1, 135, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_uart_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_uart_grp_pad_mux),
- .pad_mux_list = gn_gnss_uart_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_spi_grp0_pad_mux[] = {
- MUX(1, 22, 1, N, N, N, N),
- MUX(1, 25, 1, N, N, N, N),
- MUX(1, 23, 1, 0xa00, 10, 0xa80, 10),
- MUX(1, 24, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_spi_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_spi_grp0_pad_mux),
- .pad_mux_list = gn_trg_spi_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_spi_grp1_pad_mux[] = {
- MUX(1, 82, 3, N, N, N, N),
- MUX(1, 79, 3, N, N, N, N),
- MUX(1, 80, 3, 0xa00, 10, 0xa80, 10),
- MUX(1, 81, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_spi_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_spi_grp1_pad_mux),
- .pad_mux_list = gn_trg_spi_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_grp_pad_mux[] = {
- MUX(1, 54, 3, N, N, N, N),
- MUX(1, 53, 3, N, N, N, N),
- MUX(1, 82, 7, N, N, N, N),
- MUX(1, 74, 7, N, N, N, N),
- MUX(1, 75, 7, N, N, N, N),
- MUX(1, 76, 7, N, N, N, N),
- MUX(1, 77, 7, N, N, N, N),
- MUX(1, 78, 7, N, N, N, N),
- MUX(1, 79, 7, N, N, N, N),
- MUX(1, 80, 7, N, N, N, N),
- MUX(1, 81, 7, N, N, N, N),
- MUX(1, 83, 7, N, N, N, N),
- MUX(1, 84, 7, N, N, N, N),
- MUX(1, 73, 3, N, N, N, N),
- MUX(1, 55, 3, N, N, N, N),
- MUX(1, 56, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_grp_pad_mux),
- .pad_mux_list = cvbs_dbg_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp0_pad_mux[] = {
- MUX(1, 57, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp0_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp1_pad_mux[] = {
- MUX(1, 58, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp1_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp2_pad_mux[] = {
- MUX(1, 59, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp2_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp3_pad_mux[] = {
- MUX(1, 60, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp3_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp4_pad_mux[] = {
- MUX(1, 61, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp4_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp4_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp4_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp5_pad_mux[] = {
- MUX(1, 62, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp5_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp5_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp5_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp6_pad_mux[] = {
- MUX(1, 63, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp6_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp6_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp6_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp7_pad_mux[] = {
- MUX(1, 64, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp7_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp7_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp7_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp8_pad_mux[] = {
- MUX(1, 65, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp8_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp8_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp8_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp9_pad_mux[] = {
- MUX(1, 66, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp9_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp9_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp9_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp10_pad_mux[] = {
- MUX(1, 67, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp10_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp10_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp10_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp11_pad_mux[] = {
- MUX(1, 68, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp11_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp11_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp11_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp12_pad_mux[] = {
- MUX(1, 69, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp12_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp12_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp12_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp13_pad_mux[] = {
- MUX(1, 70, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp13_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp13_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp13_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp14_pad_mux[] = {
- MUX(1, 71, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp14_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp14_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp14_pad_mux,
-};
-
-static struct atlas7_pad_mux cvbs_dbg_test_grp15_pad_mux[] = {
- MUX(1, 72, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux cvbs_dbg_test_grp15_mux = {
- .pad_mux_count = ARRAY_SIZE(cvbs_dbg_test_grp15_pad_mux),
- .pad_mux_list = cvbs_dbg_test_grp15_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_power_grp_pad_mux[] = {
- MUX(1, 123, 7, N, N, N, N),
- MUX(1, 124, 7, N, N, N, N),
- MUX(1, 121, 7, N, N, N, N),
- MUX(1, 122, 7, N, N, N, N),
- MUX(1, 125, 7, N, N, N, N),
- MUX(1, 120, 7, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_power_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_power_grp_pad_mux),
- .pad_mux_list = gn_gnss_power_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_sw_status_grp_pad_mux[] = {
- MUX(1, 57, 7, N, N, N, N),
- MUX(1, 58, 7, N, N, N, N),
- MUX(1, 59, 7, N, N, N, N),
- MUX(1, 60, 7, N, N, N, N),
- MUX(1, 61, 7, N, N, N, N),
- MUX(1, 62, 7, N, N, N, N),
- MUX(1, 63, 7, N, N, N, N),
- MUX(1, 64, 7, N, N, N, N),
- MUX(1, 65, 7, N, N, N, N),
- MUX(1, 66, 7, N, N, N, N),
- MUX(1, 67, 7, N, N, N, N),
- MUX(1, 68, 7, N, N, N, N),
- MUX(1, 69, 7, N, N, N, N),
- MUX(1, 70, 7, N, N, N, N),
- MUX(1, 71, 7, N, N, N, N),
- MUX(1, 72, 7, N, N, N, N),
- MUX(1, 53, 7, N, N, N, N),
- MUX(1, 55, 7, N, N, N, N),
- MUX(1, 56, 7, 0xa08, 12, 0xa88, 12),
- MUX(1, 54, 7, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_sw_status_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_sw_status_grp_pad_mux),
- .pad_mux_list = gn_gnss_sw_status_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_eclk_grp_pad_mux[] = {
- MUX(1, 113, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_eclk_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_eclk_grp_pad_mux),
- .pad_mux_list = gn_gnss_eclk_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_irq1_grp0_pad_mux[] = {
- MUX(1, 112, 4, 0xa08, 10, 0xa88, 10),
-};
-
-static struct atlas7_grp_mux gn_gnss_irq1_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_irq1_grp0_pad_mux),
- .pad_mux_list = gn_gnss_irq1_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_irq2_grp0_pad_mux[] = {
- MUX(1, 118, 4, 0xa08, 11, 0xa88, 11),
-};
-
-static struct atlas7_grp_mux gn_gnss_irq2_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_irq2_grp0_pad_mux),
- .pad_mux_list = gn_gnss_irq2_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_tm_grp_pad_mux[] = {
- MUX(1, 115, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_tm_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_tm_grp_pad_mux),
- .pad_mux_list = gn_gnss_tm_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_gnss_tsync_grp_pad_mux[] = {
- MUX(1, 114, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_gnss_tsync_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_gnss_tsync_grp_pad_mux),
- .pad_mux_list = gn_gnss_tsync_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_io_gnsssys_sw_cfg_grp_pad_mux[] = {
- MUX(1, 44, 7, N, N, N, N),
- MUX(1, 43, 7, N, N, N, N),
- MUX(1, 42, 7, N, N, N, N),
- MUX(1, 41, 7, N, N, N, N),
- MUX(1, 40, 7, N, N, N, N),
- MUX(1, 39, 7, N, N, N, N),
- MUX(1, 38, 7, N, N, N, N),
- MUX(1, 37, 7, N, N, N, N),
- MUX(1, 49, 7, N, N, N, N),
- MUX(1, 50, 7, N, N, N, N),
- MUX(1, 91, 7, N, N, N, N),
- MUX(1, 92, 7, N, N, N, N),
- MUX(1, 93, 7, N, N, N, N),
- MUX(1, 94, 7, N, N, N, N),
- MUX(1, 95, 7, N, N, N, N),
- MUX(1, 96, 7, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_io_gnsssys_sw_cfg_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_io_gnsssys_sw_cfg_grp_pad_mux),
- .pad_mux_list = gn_io_gnsssys_sw_cfg_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_grp0_pad_mux[] = {
- MUX(1, 29, 1, 0xa00, 6, 0xa80, 6),
- MUX(1, 28, 1, 0xa00, 7, 0xa80, 7),
- MUX(1, 26, 1, 0xa00, 8, 0xa80, 8),
- MUX(1, 27, 1, 0xa00, 9, 0xa80, 9),
-};
-
-static struct atlas7_grp_mux gn_trg_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_grp0_pad_mux),
- .pad_mux_list = gn_trg_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_grp1_pad_mux[] = {
- MUX(1, 77, 3, 0xa00, 6, 0xa80, 6),
- MUX(1, 76, 3, 0xa00, 7, 0xa80, 7),
- MUX(1, 74, 3, 0xa00, 8, 0xa80, 8),
- MUX(1, 75, 3, 0xa00, 9, 0xa80, 9),
-};
-
-static struct atlas7_grp_mux gn_trg_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_grp1_pad_mux),
- .pad_mux_list = gn_trg_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_shutdown_grp0_pad_mux[] = {
- MUX(1, 30, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_shutdown_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_shutdown_grp0_pad_mux),
- .pad_mux_list = gn_trg_shutdown_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_shutdown_grp1_pad_mux[] = {
- MUX(1, 83, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_shutdown_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_shutdown_grp1_pad_mux),
- .pad_mux_list = gn_trg_shutdown_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_shutdown_grp2_pad_mux[] = {
- MUX(1, 117, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_shutdown_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_shutdown_grp2_pad_mux),
- .pad_mux_list = gn_trg_shutdown_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux gn_trg_shutdown_grp3_pad_mux[] = {
- MUX(1, 123, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux gn_trg_shutdown_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(gn_trg_shutdown_grp3_pad_mux),
- .pad_mux_list = gn_trg_shutdown_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux i2c0_grp_pad_mux[] = {
- MUX(1, 128, 1, N, N, N, N),
- MUX(1, 127, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux i2c0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(i2c0_grp_pad_mux),
- .pad_mux_list = i2c0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux i2c1_grp_pad_mux[] = {
- MUX(1, 126, 4, N, N, N, N),
- MUX(1, 125, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux i2c1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(i2c1_grp_pad_mux),
- .pad_mux_list = i2c1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s0_grp_pad_mux[] = {
- MUX(1, 91, 2, 0xa10, 12, 0xa90, 12),
- MUX(1, 93, 2, 0xa10, 13, 0xa90, 13),
- MUX(1, 94, 2, 0xa10, 14, 0xa90, 14),
- MUX(1, 92, 2, 0xa10, 15, 0xa90, 15),
-};
-
-static struct atlas7_grp_mux i2s0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s0_grp_pad_mux),
- .pad_mux_list = i2s0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_basic_grp_pad_mux[] = {
- MUX(1, 95, 2, 0xa10, 16, 0xa90, 16),
- MUX(1, 96, 2, 0xa10, 19, 0xa90, 19),
-};
-
-static struct atlas7_grp_mux i2s1_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_basic_grp_pad_mux),
- .pad_mux_list = i2s1_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd0_grp0_pad_mux[] = {
- MUX(1, 61, 4, 0xa10, 17, 0xa90, 17),
-};
-
-static struct atlas7_grp_mux i2s1_rxd0_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp0_pad_mux),
- .pad_mux_list = i2s1_rxd0_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd0_grp1_pad_mux[] = {
- MUX(1, 131, 4, 0xa10, 17, 0xa90, 17),
-};
-
-static struct atlas7_grp_mux i2s1_rxd0_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp1_pad_mux),
- .pad_mux_list = i2s1_rxd0_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd0_grp2_pad_mux[] = {
- MUX(1, 129, 2, 0xa10, 17, 0xa90, 17),
-};
-
-static struct atlas7_grp_mux i2s1_rxd0_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp2_pad_mux),
- .pad_mux_list = i2s1_rxd0_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd0_grp3_pad_mux[] = {
- MUX(1, 117, 7, 0xa10, 17, 0xa90, 17),
-};
-
-static struct atlas7_grp_mux i2s1_rxd0_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp3_pad_mux),
- .pad_mux_list = i2s1_rxd0_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd0_grp4_pad_mux[] = {
- MUX(1, 83, 4, 0xa10, 17, 0xa90, 17),
-};
-
-static struct atlas7_grp_mux i2s1_rxd0_grp4_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp4_pad_mux),
- .pad_mux_list = i2s1_rxd0_grp4_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd1_grp0_pad_mux[] = {
- MUX(1, 72, 4, 0xa10, 18, 0xa90, 18),
-};
-
-static struct atlas7_grp_mux i2s1_rxd1_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp0_pad_mux),
- .pad_mux_list = i2s1_rxd1_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd1_grp1_pad_mux[] = {
- MUX(1, 132, 4, 0xa10, 18, 0xa90, 18),
-};
-
-static struct atlas7_grp_mux i2s1_rxd1_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp1_pad_mux),
- .pad_mux_list = i2s1_rxd1_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd1_grp2_pad_mux[] = {
- MUX(1, 130, 2, 0xa10, 18, 0xa90, 18),
-};
-
-static struct atlas7_grp_mux i2s1_rxd1_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp2_pad_mux),
- .pad_mux_list = i2s1_rxd1_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd1_grp3_pad_mux[] = {
- MUX(1, 118, 7, 0xa10, 18, 0xa90, 18),
-};
-
-static struct atlas7_grp_mux i2s1_rxd1_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp3_pad_mux),
- .pad_mux_list = i2s1_rxd1_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux i2s1_rxd1_grp4_pad_mux[] = {
- MUX(1, 84, 4, 0xa10, 18, 0xa90, 18),
-};
-
-static struct atlas7_grp_mux i2s1_rxd1_grp4_mux = {
- .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp4_pad_mux),
- .pad_mux_list = i2s1_rxd1_grp4_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_jt_dbg_nsrst_grp_pad_mux[] = {
- MUX(1, 125, 5, 0xa08, 2, 0xa88, 2),
-};
-
-static struct atlas7_grp_mux jtag_jt_dbg_nsrst_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_jt_dbg_nsrst_grp_pad_mux),
- .pad_mux_list = jtag_jt_dbg_nsrst_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_ntrst_grp0_pad_mux[] = {
- MUX(0, 4, 3, 0xa08, 3, 0xa88, 3),
-};
-
-static struct atlas7_grp_mux jtag_ntrst_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp0_pad_mux),
- .pad_mux_list = jtag_ntrst_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_ntrst_grp1_pad_mux[] = {
- MUX(1, 163, 1, 0xa08, 3, 0xa88, 3),
-};
-
-static struct atlas7_grp_mux jtag_ntrst_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp1_pad_mux),
- .pad_mux_list = jtag_ntrst_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_swdiotms_grp0_pad_mux[] = {
- MUX(0, 2, 3, 0xa10, 10, 0xa90, 10),
-};
-
-static struct atlas7_grp_mux jtag_swdiotms_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp0_pad_mux),
- .pad_mux_list = jtag_swdiotms_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_swdiotms_grp1_pad_mux[] = {
- MUX(1, 160, 1, 0xa10, 10, 0xa90, 10),
-};
-
-static struct atlas7_grp_mux jtag_swdiotms_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp1_pad_mux),
- .pad_mux_list = jtag_swdiotms_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tck_grp0_pad_mux[] = {
- MUX(0, 0, 3, 0xa10, 11, 0xa90, 11),
-};
-
-static struct atlas7_grp_mux jtag_tck_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tck_grp0_pad_mux),
- .pad_mux_list = jtag_tck_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tck_grp1_pad_mux[] = {
- MUX(1, 161, 1, 0xa10, 11, 0xa90, 11),
-};
-
-static struct atlas7_grp_mux jtag_tck_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tck_grp1_pad_mux),
- .pad_mux_list = jtag_tck_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tdi_grp0_pad_mux[] = {
- MUX(0, 1, 3, 0xa10, 31, 0xa90, 31),
-};
-
-static struct atlas7_grp_mux jtag_tdi_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp0_pad_mux),
- .pad_mux_list = jtag_tdi_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tdi_grp1_pad_mux[] = {
- MUX(1, 162, 1, 0xa10, 31, 0xa90, 31),
-};
-
-static struct atlas7_grp_mux jtag_tdi_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp1_pad_mux),
- .pad_mux_list = jtag_tdi_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tdo_grp0_pad_mux[] = {
- MUX(0, 3, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux jtag_tdo_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp0_pad_mux),
- .pad_mux_list = jtag_tdo_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux jtag_tdo_grp1_pad_mux[] = {
- MUX(1, 159, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux jtag_tdo_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp1_pad_mux),
- .pad_mux_list = jtag_tdo_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux ks_kas_spi_grp0_pad_mux[] = {
- MUX(1, 141, 2, N, N, N, N),
- MUX(1, 144, 2, 0xa08, 8, 0xa88, 8),
- MUX(1, 143, 2, N, N, N, N),
- MUX(1, 142, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ks_kas_spi_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(ks_kas_spi_grp0_pad_mux),
- .pad_mux_list = ks_kas_spi_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux ld_ldd_grp_pad_mux[] = {
- MUX(1, 57, 1, N, N, N, N),
- MUX(1, 58, 1, N, N, N, N),
- MUX(1, 59, 1, N, N, N, N),
- MUX(1, 60, 1, N, N, N, N),
- MUX(1, 61, 1, N, N, N, N),
- MUX(1, 62, 1, N, N, N, N),
- MUX(1, 63, 1, N, N, N, N),
- MUX(1, 64, 1, N, N, N, N),
- MUX(1, 65, 1, N, N, N, N),
- MUX(1, 66, 1, N, N, N, N),
- MUX(1, 67, 1, N, N, N, N),
- MUX(1, 68, 1, N, N, N, N),
- MUX(1, 69, 1, N, N, N, N),
- MUX(1, 70, 1, N, N, N, N),
- MUX(1, 71, 1, N, N, N, N),
- MUX(1, 72, 1, N, N, N, N),
- MUX(1, 74, 2, N, N, N, N),
- MUX(1, 75, 2, N, N, N, N),
- MUX(1, 76, 2, N, N, N, N),
- MUX(1, 77, 2, N, N, N, N),
- MUX(1, 78, 2, N, N, N, N),
- MUX(1, 79, 2, N, N, N, N),
- MUX(1, 80, 2, N, N, N, N),
- MUX(1, 81, 2, N, N, N, N),
- MUX(1, 56, 1, N, N, N, N),
- MUX(1, 53, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ld_ldd_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ld_ldd_grp_pad_mux),
- .pad_mux_list = ld_ldd_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ld_ldd_16bit_grp_pad_mux[] = {
- MUX(1, 57, 1, N, N, N, N),
- MUX(1, 58, 1, N, N, N, N),
- MUX(1, 59, 1, N, N, N, N),
- MUX(1, 60, 1, N, N, N, N),
- MUX(1, 61, 1, N, N, N, N),
- MUX(1, 62, 1, N, N, N, N),
- MUX(1, 63, 1, N, N, N, N),
- MUX(1, 64, 1, N, N, N, N),
- MUX(1, 65, 1, N, N, N, N),
- MUX(1, 66, 1, N, N, N, N),
- MUX(1, 67, 1, N, N, N, N),
- MUX(1, 68, 1, N, N, N, N),
- MUX(1, 69, 1, N, N, N, N),
- MUX(1, 70, 1, N, N, N, N),
- MUX(1, 71, 1, N, N, N, N),
- MUX(1, 72, 1, N, N, N, N),
- MUX(1, 56, 1, N, N, N, N),
- MUX(1, 53, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ld_ldd_16bit_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ld_ldd_16bit_grp_pad_mux),
- .pad_mux_list = ld_ldd_16bit_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ld_ldd_fck_grp_pad_mux[] = {
- MUX(1, 55, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ld_ldd_fck_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ld_ldd_fck_grp_pad_mux),
- .pad_mux_list = ld_ldd_fck_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ld_ldd_lck_grp_pad_mux[] = {
- MUX(1, 54, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ld_ldd_lck_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ld_ldd_lck_grp_pad_mux),
- .pad_mux_list = ld_ldd_lck_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux lr_lcdrom_grp_pad_mux[] = {
- MUX(1, 73, 2, N, N, N, N),
- MUX(1, 54, 2, N, N, N, N),
- MUX(1, 57, 2, N, N, N, N),
- MUX(1, 58, 2, N, N, N, N),
- MUX(1, 59, 2, N, N, N, N),
- MUX(1, 60, 2, N, N, N, N),
- MUX(1, 61, 2, N, N, N, N),
- MUX(1, 62, 2, N, N, N, N),
- MUX(1, 63, 2, N, N, N, N),
- MUX(1, 64, 2, N, N, N, N),
- MUX(1, 65, 2, N, N, N, N),
- MUX(1, 66, 2, N, N, N, N),
- MUX(1, 67, 2, N, N, N, N),
- MUX(1, 68, 2, N, N, N, N),
- MUX(1, 69, 2, N, N, N, N),
- MUX(1, 70, 2, N, N, N, N),
- MUX(1, 71, 2, N, N, N, N),
- MUX(1, 72, 2, N, N, N, N),
- MUX(1, 56, 2, N, N, N, N),
- MUX(1, 53, 2, N, N, N, N),
- MUX(1, 55, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux lr_lcdrom_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(lr_lcdrom_grp_pad_mux),
- .pad_mux_list = lr_lcdrom_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux lvds_analog_grp_pad_mux[] = {
- MUX(1, 149, 8, N, N, N, N),
- MUX(1, 150, 8, N, N, N, N),
- MUX(1, 151, 8, N, N, N, N),
- MUX(1, 152, 8, N, N, N, N),
- MUX(1, 153, 8, N, N, N, N),
- MUX(1, 154, 8, N, N, N, N),
- MUX(1, 155, 8, N, N, N, N),
- MUX(1, 156, 8, N, N, N, N),
- MUX(1, 157, 8, N, N, N, N),
- MUX(1, 158, 8, N, N, N, N),
-};
-
-static struct atlas7_grp_mux lvds_analog_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(lvds_analog_grp_pad_mux),
- .pad_mux_list = lvds_analog_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux nd_df_basic_grp_pad_mux[] = {
- MUX(1, 44, 1, N, N, N, N),
- MUX(1, 43, 1, N, N, N, N),
- MUX(1, 42, 1, N, N, N, N),
- MUX(1, 41, 1, N, N, N, N),
- MUX(1, 40, 1, N, N, N, N),
- MUX(1, 39, 1, N, N, N, N),
- MUX(1, 38, 1, N, N, N, N),
- MUX(1, 37, 1, N, N, N, N),
- MUX(1, 47, 1, N, N, N, N),
- MUX(1, 46, 1, N, N, N, N),
- MUX(1, 52, 1, N, N, N, N),
- MUX(1, 45, 1, N, N, N, N),
- MUX(1, 49, 1, N, N, N, N),
- MUX(1, 50, 1, N, N, N, N),
- MUX(1, 48, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux nd_df_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(nd_df_basic_grp_pad_mux),
- .pad_mux_list = nd_df_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux nd_df_wp_grp_pad_mux[] = {
- MUX(1, 124, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux nd_df_wp_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(nd_df_wp_grp_pad_mux),
- .pad_mux_list = nd_df_wp_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux nd_df_cs_grp_pad_mux[] = {
- MUX(1, 51, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux nd_df_cs_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(nd_df_cs_grp_pad_mux),
- .pad_mux_list = nd_df_cs_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ps_grp_pad_mux[] = {
- MUX(1, 120, 2, N, N, N, N),
- MUX(1, 119, 2, N, N, N, N),
- MUX(1, 121, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ps_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ps_grp_pad_mux),
- .pad_mux_list = ps_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux ps_no_dir_grp_pad_mux[] = {
- MUX(1, 119, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux ps_no_dir_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(ps_no_dir_grp_pad_mux),
- .pad_mux_list = ps_no_dir_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_core_on_grp_pad_mux[] = {
- MUX(0, 8, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_core_on_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_core_on_grp_pad_mux),
- .pad_mux_list = pwc_core_on_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_ext_on_grp_pad_mux[] = {
- MUX(0, 6, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_ext_on_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_ext_on_grp_pad_mux),
- .pad_mux_list = pwc_ext_on_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_gpio3_clk_grp_pad_mux[] = {
- MUX(0, 3, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_gpio3_clk_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_gpio3_clk_grp_pad_mux),
- .pad_mux_list = pwc_gpio3_clk_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_io_on_grp_pad_mux[] = {
- MUX(0, 9, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_io_on_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_io_on_grp_pad_mux),
- .pad_mux_list = pwc_io_on_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_lowbatt_b_grp0_pad_mux[] = {
- MUX(0, 4, 1, 0xa08, 4, 0xa88, 4),
-};
-
-static struct atlas7_grp_mux pwc_lowbatt_b_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_lowbatt_b_grp0_pad_mux),
- .pad_mux_list = pwc_lowbatt_b_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_mem_on_grp_pad_mux[] = {
- MUX(0, 7, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_mem_on_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_mem_on_grp_pad_mux),
- .pad_mux_list = pwc_mem_on_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_on_key_b_grp0_pad_mux[] = {
- MUX(0, 5, 1, 0xa08, 5, 0xa88, 5),
-};
-
-static struct atlas7_grp_mux pwc_on_key_b_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_on_key_b_grp0_pad_mux),
- .pad_mux_list = pwc_on_key_b_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_wakeup_src0_grp_pad_mux[] = {
- MUX(0, 0, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_wakeup_src0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_wakeup_src0_grp_pad_mux),
- .pad_mux_list = pwc_wakeup_src0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_wakeup_src1_grp_pad_mux[] = {
- MUX(0, 1, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_wakeup_src1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_wakeup_src1_grp_pad_mux),
- .pad_mux_list = pwc_wakeup_src1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_wakeup_src2_grp_pad_mux[] = {
- MUX(0, 2, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_wakeup_src2_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_wakeup_src2_grp_pad_mux),
- .pad_mux_list = pwc_wakeup_src2_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pwc_wakeup_src3_grp_pad_mux[] = {
- MUX(0, 3, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pwc_wakeup_src3_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(pwc_wakeup_src3_grp_pad_mux),
- .pad_mux_list = pwc_wakeup_src3_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko0_grp0_pad_mux[] = {
- MUX(1, 123, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko0_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko0_grp0_pad_mux),
- .pad_mux_list = pw_cko0_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko0_grp1_pad_mux[] = {
- MUX(1, 101, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko0_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko0_grp1_pad_mux),
- .pad_mux_list = pw_cko0_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko0_grp2_pad_mux[] = {
- MUX(1, 82, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko0_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko0_grp2_pad_mux),
- .pad_mux_list = pw_cko0_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko0_grp3_pad_mux[] = {
- MUX(1, 162, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko0_grp3_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko0_grp3_pad_mux),
- .pad_mux_list = pw_cko0_grp3_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko1_grp0_pad_mux[] = {
- MUX(1, 124, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko1_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko1_grp0_pad_mux),
- .pad_mux_list = pw_cko1_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko1_grp1_pad_mux[] = {
- MUX(1, 110, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko1_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko1_grp1_pad_mux),
- .pad_mux_list = pw_cko1_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_cko1_grp2_pad_mux[] = {
- MUX(1, 163, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_cko1_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_cko1_grp2_pad_mux),
- .pad_mux_list = pw_cko1_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_i2s01_clk_grp0_pad_mux[] = {
- MUX(1, 125, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_i2s01_clk_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_i2s01_clk_grp0_pad_mux),
- .pad_mux_list = pw_i2s01_clk_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_i2s01_clk_grp1_pad_mux[] = {
- MUX(1, 117, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_i2s01_clk_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_i2s01_clk_grp1_pad_mux),
- .pad_mux_list = pw_i2s01_clk_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_i2s01_clk_grp2_pad_mux[] = {
- MUX(1, 132, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_i2s01_clk_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_i2s01_clk_grp2_pad_mux),
- .pad_mux_list = pw_i2s01_clk_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm0_grp0_pad_mux[] = {
- MUX(1, 119, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm0_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp0_pad_mux),
- .pad_mux_list = pw_pwm0_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm0_grp1_pad_mux[] = {
- MUX(1, 159, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm0_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp1_pad_mux),
- .pad_mux_list = pw_pwm0_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm1_grp0_pad_mux[] = {
- MUX(1, 120, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm1_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp0_pad_mux),
- .pad_mux_list = pw_pwm1_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm1_grp1_pad_mux[] = {
- MUX(1, 160, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm1_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp1_pad_mux),
- .pad_mux_list = pw_pwm1_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm1_grp2_pad_mux[] = {
- MUX(1, 131, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm1_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp2_pad_mux),
- .pad_mux_list = pw_pwm1_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm2_grp0_pad_mux[] = {
- MUX(1, 121, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm2_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm2_grp0_pad_mux),
- .pad_mux_list = pw_pwm2_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm2_grp1_pad_mux[] = {
- MUX(1, 98, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm2_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm2_grp1_pad_mux),
- .pad_mux_list = pw_pwm2_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm2_grp2_pad_mux[] = {
- MUX(1, 161, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm2_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm2_grp2_pad_mux),
- .pad_mux_list = pw_pwm2_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm3_grp0_pad_mux[] = {
- MUX(1, 122, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm3_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm3_grp0_pad_mux),
- .pad_mux_list = pw_pwm3_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm3_grp1_pad_mux[] = {
- MUX(1, 73, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm3_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm3_grp1_pad_mux),
- .pad_mux_list = pw_pwm3_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm_cpu_vol_grp0_pad_mux[] = {
- MUX(1, 121, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm_cpu_vol_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm_cpu_vol_grp0_pad_mux),
- .pad_mux_list = pw_pwm_cpu_vol_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm_cpu_vol_grp1_pad_mux[] = {
- MUX(1, 98, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm_cpu_vol_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm_cpu_vol_grp1_pad_mux),
- .pad_mux_list = pw_pwm_cpu_vol_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_pwm_cpu_vol_grp2_pad_mux[] = {
- MUX(1, 161, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_pwm_cpu_vol_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_pwm_cpu_vol_grp2_pad_mux),
- .pad_mux_list = pw_pwm_cpu_vol_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_backlight_grp0_pad_mux[] = {
- MUX(1, 122, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_backlight_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_backlight_grp0_pad_mux),
- .pad_mux_list = pw_backlight_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux pw_backlight_grp1_pad_mux[] = {
- MUX(1, 73, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux pw_backlight_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(pw_backlight_grp1_pad_mux),
- .pad_mux_list = pw_backlight_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux rg_eth_mac_grp_pad_mux[] = {
- MUX(1, 108, 1, N, N, N, N),
- MUX(1, 103, 1, N, N, N, N),
- MUX(1, 104, 1, N, N, N, N),
- MUX(1, 105, 1, N, N, N, N),
- MUX(1, 106, 1, N, N, N, N),
- MUX(1, 107, 1, N, N, N, N),
- MUX(1, 102, 1, N, N, N, N),
- MUX(1, 97, 1, N, N, N, N),
- MUX(1, 98, 1, N, N, N, N),
- MUX(1, 99, 1, N, N, N, N),
- MUX(1, 100, 1, N, N, N, N),
- MUX(1, 101, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux rg_eth_mac_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(rg_eth_mac_grp_pad_mux),
- .pad_mux_list = rg_eth_mac_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux rg_gmac_phy_intr_n_grp_pad_mux[] = {
- MUX(1, 111, 1, 0xa08, 13, 0xa88, 13),
-};
-
-static struct atlas7_grp_mux rg_gmac_phy_intr_n_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(rg_gmac_phy_intr_n_grp_pad_mux),
- .pad_mux_list = rg_gmac_phy_intr_n_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux rg_rgmii_mac_grp_pad_mux[] = {
- MUX(1, 109, 1, N, N, N, N),
- MUX(1, 110, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux rg_rgmii_mac_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(rg_rgmii_mac_grp_pad_mux),
- .pad_mux_list = rg_rgmii_mac_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux rg_rgmii_phy_ref_clk_grp0_pad_mux[] = {
- MUX(1, 111, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux rg_rgmii_phy_ref_clk_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(rg_rgmii_phy_ref_clk_grp0_pad_mux),
- .pad_mux_list = rg_rgmii_phy_ref_clk_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux rg_rgmii_phy_ref_clk_grp1_pad_mux[] = {
- MUX(1, 53, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux rg_rgmii_phy_ref_clk_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(rg_rgmii_phy_ref_clk_grp1_pad_mux),
- .pad_mux_list = rg_rgmii_phy_ref_clk_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux sd0_grp_pad_mux[] = {
- MUX(1, 46, 2, N, N, N, N),
- MUX(1, 47, 2, N, N, N, N),
- MUX(1, 44, 2, N, N, N, N),
- MUX(1, 43, 2, N, N, N, N),
- MUX(1, 42, 2, N, N, N, N),
- MUX(1, 41, 2, N, N, N, N),
- MUX(1, 40, 2, N, N, N, N),
- MUX(1, 39, 2, N, N, N, N),
- MUX(1, 38, 2, N, N, N, N),
- MUX(1, 37, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd0_grp_pad_mux),
- .pad_mux_list = sd0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd0_4bit_grp_pad_mux[] = {
- MUX(1, 46, 2, N, N, N, N),
- MUX(1, 47, 2, N, N, N, N),
- MUX(1, 44, 2, N, N, N, N),
- MUX(1, 43, 2, N, N, N, N),
- MUX(1, 42, 2, N, N, N, N),
- MUX(1, 41, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd0_4bit_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd0_4bit_grp_pad_mux),
- .pad_mux_list = sd0_4bit_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd1_grp_pad_mux[] = {
- MUX(1, 48, 3, N, N, N, N),
- MUX(1, 49, 3, N, N, N, N),
- MUX(1, 44, 3, 0xa00, 0, 0xa80, 0),
- MUX(1, 43, 3, 0xa00, 1, 0xa80, 1),
- MUX(1, 42, 3, 0xa00, 2, 0xa80, 2),
- MUX(1, 41, 3, 0xa00, 3, 0xa80, 3),
- MUX(1, 40, 3, N, N, N, N),
- MUX(1, 39, 3, N, N, N, N),
- MUX(1, 38, 3, N, N, N, N),
- MUX(1, 37, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd1_grp_pad_mux),
- .pad_mux_list = sd1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd1_4bit_grp0_pad_mux[] = {
- MUX(1, 48, 3, N, N, N, N),
- MUX(1, 49, 3, N, N, N, N),
- MUX(1, 44, 3, 0xa00, 0, 0xa80, 0),
- MUX(1, 43, 3, 0xa00, 1, 0xa80, 1),
- MUX(1, 42, 3, 0xa00, 2, 0xa80, 2),
- MUX(1, 41, 3, 0xa00, 3, 0xa80, 3),
-};
-
-static struct atlas7_grp_mux sd1_4bit_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(sd1_4bit_grp0_pad_mux),
- .pad_mux_list = sd1_4bit_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux sd1_4bit_grp1_pad_mux[] = {
- MUX(1, 48, 3, N, N, N, N),
- MUX(1, 49, 3, N, N, N, N),
- MUX(1, 40, 4, 0xa00, 0, 0xa80, 0),
- MUX(1, 39, 4, 0xa00, 1, 0xa80, 1),
- MUX(1, 38, 4, 0xa00, 2, 0xa80, 2),
- MUX(1, 37, 4, 0xa00, 3, 0xa80, 3),
-};
-
-static struct atlas7_grp_mux sd1_4bit_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(sd1_4bit_grp1_pad_mux),
- .pad_mux_list = sd1_4bit_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux sd2_basic_grp_pad_mux[] = {
- MUX(1, 31, 1, N, N, N, N),
- MUX(1, 32, 1, N, N, N, N),
- MUX(1, 33, 1, N, N, N, N),
- MUX(1, 34, 1, N, N, N, N),
- MUX(1, 35, 1, N, N, N, N),
- MUX(1, 36, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd2_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd2_basic_grp_pad_mux),
- .pad_mux_list = sd2_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd2_cdb_grp0_pad_mux[] = {
- MUX(1, 124, 2, 0xa08, 7, 0xa88, 7),
-};
-
-static struct atlas7_grp_mux sd2_cdb_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp0_pad_mux),
- .pad_mux_list = sd2_cdb_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux sd2_cdb_grp1_pad_mux[] = {
- MUX(1, 161, 6, 0xa08, 7, 0xa88, 7),
-};
-
-static struct atlas7_grp_mux sd2_cdb_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp1_pad_mux),
- .pad_mux_list = sd2_cdb_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux sd2_wpb_grp0_pad_mux[] = {
- MUX(1, 123, 2, 0xa10, 6, 0xa90, 6),
-};
-
-static struct atlas7_grp_mux sd2_wpb_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp0_pad_mux),
- .pad_mux_list = sd2_wpb_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux sd2_wpb_grp1_pad_mux[] = {
- MUX(1, 163, 7, 0xa10, 6, 0xa90, 6),
-};
-
-static struct atlas7_grp_mux sd2_wpb_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp1_pad_mux),
- .pad_mux_list = sd2_wpb_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux sd3_9_grp_pad_mux[] = {
- MUX(1, 85, 1, N, N, N, N),
- MUX(1, 86, 1, N, N, N, N),
- MUX(1, 87, 1, N, N, N, N),
- MUX(1, 88, 1, N, N, N, N),
- MUX(1, 89, 1, N, N, N, N),
- MUX(1, 90, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd3_9_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd3_9_grp_pad_mux),
- .pad_mux_list = sd3_9_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd5_grp_pad_mux[] = {
- MUX(1, 91, 1, N, N, N, N),
- MUX(1, 92, 1, N, N, N, N),
- MUX(1, 93, 1, N, N, N, N),
- MUX(1, 94, 1, N, N, N, N),
- MUX(1, 95, 1, N, N, N, N),
- MUX(1, 96, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sd5_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sd5_grp_pad_mux),
- .pad_mux_list = sd5_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sd6_grp0_pad_mux[] = {
- MUX(1, 79, 4, 0xa00, 27, 0xa80, 27),
- MUX(1, 78, 4, 0xa00, 26, 0xa80, 26),
- MUX(1, 74, 4, 0xa00, 28, 0xa80, 28),
- MUX(1, 75, 4, 0xa00, 29, 0xa80, 29),
- MUX(1, 76, 4, 0xa00, 30, 0xa80, 30),
- MUX(1, 77, 4, 0xa00, 31, 0xa80, 31),
-};
-
-static struct atlas7_grp_mux sd6_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(sd6_grp0_pad_mux),
- .pad_mux_list = sd6_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux sd6_grp1_pad_mux[] = {
- MUX(1, 101, 3, 0xa00, 27, 0xa80, 27),
- MUX(1, 99, 3, 0xa00, 26, 0xa80, 26),
- MUX(1, 100, 3, 0xa00, 28, 0xa80, 28),
- MUX(1, 110, 3, 0xa00, 29, 0xa80, 29),
- MUX(1, 109, 3, 0xa00, 30, 0xa80, 30),
- MUX(1, 111, 3, 0xa00, 31, 0xa80, 31),
-};
-
-static struct atlas7_grp_mux sd6_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(sd6_grp1_pad_mux),
- .pad_mux_list = sd6_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux sp0_ext_ldo_on_grp_pad_mux[] = {
- MUX(0, 4, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sp0_ext_ldo_on_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sp0_ext_ldo_on_grp_pad_mux),
- .pad_mux_list = sp0_ext_ldo_on_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sp0_qspi_grp_pad_mux[] = {
- MUX(0, 12, 1, N, N, N, N),
- MUX(0, 13, 1, N, N, N, N),
- MUX(0, 14, 1, N, N, N, N),
- MUX(0, 15, 1, N, N, N, N),
- MUX(0, 16, 1, N, N, N, N),
- MUX(0, 17, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sp0_qspi_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sp0_qspi_grp_pad_mux),
- .pad_mux_list = sp0_qspi_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux sp1_spi_grp_pad_mux[] = {
- MUX(1, 19, 1, N, N, N, N),
- MUX(1, 20, 1, N, N, N, N),
- MUX(1, 21, 1, N, N, N, N),
- MUX(1, 18, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux sp1_spi_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(sp1_spi_grp_pad_mux),
- .pad_mux_list = sp1_spi_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux tpiu_trace_grp_pad_mux[] = {
- MUX(1, 53, 5, N, N, N, N),
- MUX(1, 56, 5, N, N, N, N),
- MUX(1, 57, 5, N, N, N, N),
- MUX(1, 58, 5, N, N, N, N),
- MUX(1, 59, 5, N, N, N, N),
- MUX(1, 60, 5, N, N, N, N),
- MUX(1, 61, 5, N, N, N, N),
- MUX(1, 62, 5, N, N, N, N),
- MUX(1, 63, 5, N, N, N, N),
- MUX(1, 64, 5, N, N, N, N),
- MUX(1, 65, 5, N, N, N, N),
- MUX(1, 66, 5, N, N, N, N),
- MUX(1, 67, 5, N, N, N, N),
- MUX(1, 68, 5, N, N, N, N),
- MUX(1, 69, 5, N, N, N, N),
- MUX(1, 70, 5, N, N, N, N),
- MUX(1, 71, 5, N, N, N, N),
- MUX(1, 72, 5, N, N, N, N),
-};
-
-static struct atlas7_grp_mux tpiu_trace_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(tpiu_trace_grp_pad_mux),
- .pad_mux_list = tpiu_trace_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux uart0_grp_pad_mux[] = {
- MUX(1, 121, 4, N, N, N, N),
- MUX(1, 120, 4, N, N, N, N),
- MUX(1, 134, 1, N, N, N, N),
- MUX(1, 133, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart0_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(uart0_grp_pad_mux),
- .pad_mux_list = uart0_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux uart0_nopause_grp_pad_mux[] = {
- MUX(1, 134, 1, N, N, N, N),
- MUX(1, 133, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart0_nopause_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(uart0_nopause_grp_pad_mux),
- .pad_mux_list = uart0_nopause_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux uart1_grp_pad_mux[] = {
- MUX(1, 136, 1, N, N, N, N),
- MUX(1, 135, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(uart1_grp_pad_mux),
- .pad_mux_list = uart1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_cts_grp0_pad_mux[] = {
- MUX(1, 132, 3, 0xa10, 2, 0xa90, 2),
-};
-
-static struct atlas7_grp_mux uart2_cts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_cts_grp0_pad_mux),
- .pad_mux_list = uart2_cts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_cts_grp1_pad_mux[] = {
- MUX(1, 162, 2, 0xa10, 2, 0xa90, 2),
-};
-
-static struct atlas7_grp_mux uart2_cts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_cts_grp1_pad_mux),
- .pad_mux_list = uart2_cts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_rts_grp0_pad_mux[] = {
- MUX(1, 131, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart2_rts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_rts_grp0_pad_mux),
- .pad_mux_list = uart2_rts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_rts_grp1_pad_mux[] = {
- MUX(1, 161, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart2_rts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_rts_grp1_pad_mux),
- .pad_mux_list = uart2_rts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_rxd_grp0_pad_mux[] = {
- MUX(0, 11, 2, 0xa10, 5, 0xa90, 5),
-};
-
-static struct atlas7_grp_mux uart2_rxd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp0_pad_mux),
- .pad_mux_list = uart2_rxd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_rxd_grp1_pad_mux[] = {
- MUX(1, 160, 2, 0xa10, 5, 0xa90, 5),
-};
-
-static struct atlas7_grp_mux uart2_rxd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp1_pad_mux),
- .pad_mux_list = uart2_rxd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_rxd_grp2_pad_mux[] = {
- MUX(1, 130, 3, 0xa10, 5, 0xa90, 5),
-};
-
-static struct atlas7_grp_mux uart2_rxd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp2_pad_mux),
- .pad_mux_list = uart2_rxd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_txd_grp0_pad_mux[] = {
- MUX(0, 10, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart2_txd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_txd_grp0_pad_mux),
- .pad_mux_list = uart2_txd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_txd_grp1_pad_mux[] = {
- MUX(1, 159, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart2_txd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_txd_grp1_pad_mux),
- .pad_mux_list = uart2_txd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart2_txd_grp2_pad_mux[] = {
- MUX(1, 129, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart2_txd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart2_txd_grp2_pad_mux),
- .pad_mux_list = uart2_txd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_cts_grp0_pad_mux[] = {
- MUX(1, 125, 2, 0xa08, 0, 0xa88, 0),
-};
-
-static struct atlas7_grp_mux uart3_cts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_cts_grp0_pad_mux),
- .pad_mux_list = uart3_cts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_cts_grp1_pad_mux[] = {
- MUX(1, 111, 4, 0xa08, 0, 0xa88, 0),
-};
-
-static struct atlas7_grp_mux uart3_cts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_cts_grp1_pad_mux),
- .pad_mux_list = uart3_cts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_cts_grp2_pad_mux[] = {
- MUX(1, 140, 2, 0xa08, 0, 0xa88, 0),
-};
-
-static struct atlas7_grp_mux uart3_cts_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_cts_grp2_pad_mux),
- .pad_mux_list = uart3_cts_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rts_grp0_pad_mux[] = {
- MUX(1, 126, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_rts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rts_grp0_pad_mux),
- .pad_mux_list = uart3_rts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rts_grp1_pad_mux[] = {
- MUX(1, 109, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_rts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rts_grp1_pad_mux),
- .pad_mux_list = uart3_rts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rts_grp2_pad_mux[] = {
- MUX(1, 139, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_rts_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rts_grp2_pad_mux),
- .pad_mux_list = uart3_rts_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rxd_grp0_pad_mux[] = {
- MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
-};
-
-static struct atlas7_grp_mux uart3_rxd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp0_pad_mux),
- .pad_mux_list = uart3_rxd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rxd_grp1_pad_mux[] = {
- MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
-};
-
-static struct atlas7_grp_mux uart3_rxd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp1_pad_mux),
- .pad_mux_list = uart3_rxd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_rxd_grp2_pad_mux[] = {
- MUX(1, 162, 3, 0xa00, 5, 0xa80, 5),
-};
-
-static struct atlas7_grp_mux uart3_rxd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp2_pad_mux),
- .pad_mux_list = uart3_rxd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_txd_grp0_pad_mux[] = {
- MUX(1, 137, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_txd_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_txd_grp0_pad_mux),
- .pad_mux_list = uart3_txd_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_txd_grp1_pad_mux[] = {
- MUX(1, 83, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_txd_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_txd_grp1_pad_mux),
- .pad_mux_list = uart3_txd_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart3_txd_grp2_pad_mux[] = {
- MUX(1, 161, 3, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart3_txd_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart3_txd_grp2_pad_mux),
- .pad_mux_list = uart3_txd_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_basic_grp_pad_mux[] = {
- MUX(1, 140, 1, N, N, N, N),
- MUX(1, 139, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart4_basic_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_basic_grp_pad_mux),
- .pad_mux_list = uart4_basic_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_cts_grp0_pad_mux[] = {
- MUX(1, 122, 4, 0xa08, 1, 0xa88, 1),
-};
-
-static struct atlas7_grp_mux uart4_cts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_cts_grp0_pad_mux),
- .pad_mux_list = uart4_cts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_cts_grp1_pad_mux[] = {
- MUX(1, 100, 4, 0xa08, 1, 0xa88, 1),
-};
-
-static struct atlas7_grp_mux uart4_cts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_cts_grp1_pad_mux),
- .pad_mux_list = uart4_cts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_cts_grp2_pad_mux[] = {
- MUX(1, 117, 2, 0xa08, 1, 0xa88, 1),
-};
-
-static struct atlas7_grp_mux uart4_cts_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_cts_grp2_pad_mux),
- .pad_mux_list = uart4_cts_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_rts_grp0_pad_mux[] = {
- MUX(1, 123, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart4_rts_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_rts_grp0_pad_mux),
- .pad_mux_list = uart4_rts_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_rts_grp1_pad_mux[] = {
- MUX(1, 99, 4, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart4_rts_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_rts_grp1_pad_mux),
- .pad_mux_list = uart4_rts_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux uart4_rts_grp2_pad_mux[] = {
- MUX(1, 116, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux uart4_rts_grp2_mux = {
- .pad_mux_count = ARRAY_SIZE(uart4_rts_grp2_pad_mux),
- .pad_mux_list = uart4_rts_grp2_pad_mux,
-};
-
-static struct atlas7_pad_mux usb0_drvvbus_grp0_pad_mux[] = {
- MUX(1, 51, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux usb0_drvvbus_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp0_pad_mux),
- .pad_mux_list = usb0_drvvbus_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux usb0_drvvbus_grp1_pad_mux[] = {
- MUX(1, 162, 7, N, N, N, N),
-};
-
-static struct atlas7_grp_mux usb0_drvvbus_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp1_pad_mux),
- .pad_mux_list = usb0_drvvbus_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux usb1_drvvbus_grp0_pad_mux[] = {
- MUX(1, 134, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux usb1_drvvbus_grp0_mux = {
- .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp0_pad_mux),
- .pad_mux_list = usb1_drvvbus_grp0_pad_mux,
-};
-
-static struct atlas7_pad_mux usb1_drvvbus_grp1_pad_mux[] = {
- MUX(1, 163, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux usb1_drvvbus_grp1_mux = {
- .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp1_pad_mux),
- .pad_mux_list = usb1_drvvbus_grp1_pad_mux,
-};
-
-static struct atlas7_pad_mux visbus_dout_grp_pad_mux[] = {
- MUX(1, 57, 6, N, N, N, N),
- MUX(1, 58, 6, N, N, N, N),
- MUX(1, 59, 6, N, N, N, N),
- MUX(1, 60, 6, N, N, N, N),
- MUX(1, 61, 6, N, N, N, N),
- MUX(1, 62, 6, N, N, N, N),
- MUX(1, 63, 6, N, N, N, N),
- MUX(1, 64, 6, N, N, N, N),
- MUX(1, 65, 6, N, N, N, N),
- MUX(1, 66, 6, N, N, N, N),
- MUX(1, 67, 6, N, N, N, N),
- MUX(1, 68, 6, N, N, N, N),
- MUX(1, 69, 6, N, N, N, N),
- MUX(1, 70, 6, N, N, N, N),
- MUX(1, 71, 6, N, N, N, N),
- MUX(1, 72, 6, N, N, N, N),
- MUX(1, 53, 6, N, N, N, N),
- MUX(1, 54, 6, N, N, N, N),
- MUX(1, 55, 6, N, N, N, N),
- MUX(1, 56, 6, N, N, N, N),
- MUX(1, 85, 6, N, N, N, N),
- MUX(1, 86, 6, N, N, N, N),
- MUX(1, 87, 6, N, N, N, N),
- MUX(1, 88, 6, N, N, N, N),
- MUX(1, 89, 6, N, N, N, N),
- MUX(1, 90, 6, N, N, N, N),
- MUX(1, 91, 6, N, N, N, N),
- MUX(1, 92, 6, N, N, N, N),
- MUX(1, 93, 6, N, N, N, N),
- MUX(1, 94, 6, N, N, N, N),
- MUX(1, 95, 6, N, N, N, N),
- MUX(1, 96, 6, N, N, N, N),
-};
-
-static struct atlas7_grp_mux visbus_dout_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(visbus_dout_grp_pad_mux),
- .pad_mux_list = visbus_dout_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux vi_vip1_grp_pad_mux[] = {
- MUX(1, 74, 1, N, N, N, N),
- MUX(1, 75, 1, N, N, N, N),
- MUX(1, 76, 1, N, N, N, N),
- MUX(1, 77, 1, N, N, N, N),
- MUX(1, 78, 1, N, N, N, N),
- MUX(1, 79, 1, N, N, N, N),
- MUX(1, 80, 1, N, N, N, N),
- MUX(1, 81, 1, N, N, N, N),
- MUX(1, 82, 1, N, N, N, N),
- MUX(1, 83, 1, N, N, N, N),
- MUX(1, 84, 1, N, N, N, N),
- MUX(1, 103, 2, N, N, N, N),
- MUX(1, 104, 2, N, N, N, N),
- MUX(1, 105, 2, N, N, N, N),
- MUX(1, 106, 2, N, N, N, N),
- MUX(1, 107, 2, N, N, N, N),
- MUX(1, 102, 2, N, N, N, N),
- MUX(1, 97, 2, N, N, N, N),
- MUX(1, 98, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux vi_vip1_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(vi_vip1_grp_pad_mux),
- .pad_mux_list = vi_vip1_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux vi_vip1_ext_grp_pad_mux[] = {
- MUX(1, 74, 1, N, N, N, N),
- MUX(1, 75, 1, N, N, N, N),
- MUX(1, 76, 1, N, N, N, N),
- MUX(1, 77, 1, N, N, N, N),
- MUX(1, 78, 1, N, N, N, N),
- MUX(1, 79, 1, N, N, N, N),
- MUX(1, 80, 1, N, N, N, N),
- MUX(1, 81, 1, N, N, N, N),
- MUX(1, 82, 1, N, N, N, N),
- MUX(1, 83, 1, N, N, N, N),
- MUX(1, 84, 1, N, N, N, N),
- MUX(1, 108, 2, N, N, N, N),
- MUX(1, 103, 2, N, N, N, N),
- MUX(1, 104, 2, N, N, N, N),
- MUX(1, 105, 2, N, N, N, N),
- MUX(1, 106, 2, N, N, N, N),
- MUX(1, 107, 2, N, N, N, N),
- MUX(1, 102, 2, N, N, N, N),
- MUX(1, 97, 2, N, N, N, N),
- MUX(1, 98, 2, N, N, N, N),
- MUX(1, 99, 2, N, N, N, N),
- MUX(1, 100, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux vi_vip1_ext_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(vi_vip1_ext_grp_pad_mux),
- .pad_mux_list = vi_vip1_ext_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux vi_vip1_low8bit_grp_pad_mux[] = {
- MUX(1, 74, 1, N, N, N, N),
- MUX(1, 75, 1, N, N, N, N),
- MUX(1, 76, 1, N, N, N, N),
- MUX(1, 77, 1, N, N, N, N),
- MUX(1, 78, 1, N, N, N, N),
- MUX(1, 79, 1, N, N, N, N),
- MUX(1, 80, 1, N, N, N, N),
- MUX(1, 81, 1, N, N, N, N),
- MUX(1, 82, 1, N, N, N, N),
- MUX(1, 83, 1, N, N, N, N),
- MUX(1, 84, 1, N, N, N, N),
-};
-
-static struct atlas7_grp_mux vi_vip1_low8bit_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(vi_vip1_low8bit_grp_pad_mux),
- .pad_mux_list = vi_vip1_low8bit_grp_pad_mux,
-};
-
-static struct atlas7_pad_mux vi_vip1_high8bit_grp_pad_mux[] = {
- MUX(1, 82, 1, N, N, N, N),
- MUX(1, 83, 1, N, N, N, N),
- MUX(1, 84, 1, N, N, N, N),
- MUX(1, 103, 2, N, N, N, N),
- MUX(1, 104, 2, N, N, N, N),
- MUX(1, 105, 2, N, N, N, N),
- MUX(1, 106, 2, N, N, N, N),
- MUX(1, 107, 2, N, N, N, N),
- MUX(1, 102, 2, N, N, N, N),
- MUX(1, 97, 2, N, N, N, N),
- MUX(1, 98, 2, N, N, N, N),
-};
-
-static struct atlas7_grp_mux vi_vip1_high8bit_grp_mux = {
- .pad_mux_count = ARRAY_SIZE(vi_vip1_high8bit_grp_pad_mux),
- .pad_mux_list = vi_vip1_high8bit_grp_pad_mux,
-};
-
-static struct atlas7_pmx_func atlas7_pmx_functions[] = {
- FUNCTION("gnss_gpio", gnss_gpio_grp, &gnss_gpio_grp_mux),
- FUNCTION("lcd_vip_gpio", lcd_vip_gpio_grp, &lcd_vip_gpio_grp_mux),
- FUNCTION("sdio_i2s_gpio", sdio_i2s_gpio_grp, &sdio_i2s_gpio_grp_mux),
- FUNCTION("sp_rgmii_gpio", sp_rgmii_gpio_grp, &sp_rgmii_gpio_grp_mux),
- FUNCTION("lvds_gpio", lvds_gpio_grp, &lvds_gpio_grp_mux),
- FUNCTION("jtag_uart_nand_gpio",
- jtag_uart_nand_gpio_grp,
- &jtag_uart_nand_gpio_grp_mux),
- FUNCTION("rtc_gpio", rtc_gpio_grp, &rtc_gpio_grp_mux),
- FUNCTION("audio_ac97", audio_ac97_grp, &audio_ac97_grp_mux),
- FUNCTION("audio_digmic_m0",
- audio_digmic_grp0,
- &audio_digmic_grp0_mux),
- FUNCTION("audio_digmic_m1",
- audio_digmic_grp1,
- &audio_digmic_grp1_mux),
- FUNCTION("audio_digmic_m2",
- audio_digmic_grp2,
- &audio_digmic_grp2_mux),
- FUNCTION("audio_func_dbg",
- audio_func_dbg_grp,
- &audio_func_dbg_grp_mux),
- FUNCTION("audio_i2s", audio_i2s_grp, &audio_i2s_grp_mux),
- FUNCTION("audio_i2s_2ch", audio_i2s_2ch_grp, &audio_i2s_2ch_grp_mux),
- FUNCTION("audio_i2s_extclk",
- audio_i2s_extclk_grp,
- &audio_i2s_extclk_grp_mux),
- FUNCTION("audio_spdif_out_m0",
- audio_spdif_out_grp0,
- &audio_spdif_out_grp0_mux),
- FUNCTION("audio_spdif_out_m1",
- audio_spdif_out_grp1,
- &audio_spdif_out_grp1_mux),
- FUNCTION("audio_spdif_out_m2",
- audio_spdif_out_grp2,
- &audio_spdif_out_grp2_mux),
- FUNCTION("audio_uart0_basic",
- audio_uart0_basic_grp,
- &audio_uart0_basic_grp_mux),
- FUNCTION("audio_uart0_urfs_m0",
- audio_uart0_urfs_grp0,
- &audio_uart0_urfs_grp0_mux),
- FUNCTION("audio_uart0_urfs_m1",
- audio_uart0_urfs_grp1,
- &audio_uart0_urfs_grp1_mux),
- FUNCTION("audio_uart0_urfs_m2",
- audio_uart0_urfs_grp2,
- &audio_uart0_urfs_grp2_mux),
- FUNCTION("audio_uart0_urfs_m3",
- audio_uart0_urfs_grp3,
- &audio_uart0_urfs_grp3_mux),
- FUNCTION("audio_uart1_basic",
- audio_uart1_basic_grp,
- &audio_uart1_basic_grp_mux),
- FUNCTION("audio_uart1_urfs_m0",
- audio_uart1_urfs_grp0,
- &audio_uart1_urfs_grp0_mux),
- FUNCTION("audio_uart1_urfs_m1",
- audio_uart1_urfs_grp1,
- &audio_uart1_urfs_grp1_mux),
- FUNCTION("audio_uart1_urfs_m2",
- audio_uart1_urfs_grp2,
- &audio_uart1_urfs_grp2_mux),
- FUNCTION("audio_uart2_urfs_m0",
- audio_uart2_urfs_grp0,
- &audio_uart2_urfs_grp0_mux),
- FUNCTION("audio_uart2_urfs_m1",
- audio_uart2_urfs_grp1,
- &audio_uart2_urfs_grp1_mux),
- FUNCTION("audio_uart2_urfs_m2",
- audio_uart2_urfs_grp2,
- &audio_uart2_urfs_grp2_mux),
- FUNCTION("audio_uart2_urxd_m0",
- audio_uart2_urxd_grp0,
- &audio_uart2_urxd_grp0_mux),
- FUNCTION("audio_uart2_urxd_m1",
- audio_uart2_urxd_grp1,
- &audio_uart2_urxd_grp1_mux),
- FUNCTION("audio_uart2_urxd_m2",
- audio_uart2_urxd_grp2,
- &audio_uart2_urxd_grp2_mux),
- FUNCTION("audio_uart2_usclk_m0",
- audio_uart2_usclk_grp0,
- &audio_uart2_usclk_grp0_mux),
- FUNCTION("audio_uart2_usclk_m1",
- audio_uart2_usclk_grp1,
- &audio_uart2_usclk_grp1_mux),
- FUNCTION("audio_uart2_usclk_m2",
- audio_uart2_usclk_grp2,
- &audio_uart2_usclk_grp2_mux),
- FUNCTION("audio_uart2_utfs_m0",
- audio_uart2_utfs_grp0,
- &audio_uart2_utfs_grp0_mux),
- FUNCTION("audio_uart2_utfs_m1",
- audio_uart2_utfs_grp1,
- &audio_uart2_utfs_grp1_mux),
- FUNCTION("audio_uart2_utfs_m2",
- audio_uart2_utfs_grp2,
- &audio_uart2_utfs_grp2_mux),
- FUNCTION("audio_uart2_utxd_m0",
- audio_uart2_utxd_grp0,
- &audio_uart2_utxd_grp0_mux),
- FUNCTION("audio_uart2_utxd_m1",
- audio_uart2_utxd_grp1,
- &audio_uart2_utxd_grp1_mux),
- FUNCTION("audio_uart2_utxd_m2",
- audio_uart2_utxd_grp2,
- &audio_uart2_utxd_grp2_mux),
- FUNCTION("c_can_trnsvr_en_m0",
- c_can_trnsvr_en_grp0,
- &c_can_trnsvr_en_grp0_mux),
- FUNCTION("c_can_trnsvr_en_m1",
- c_can_trnsvr_en_grp1,
- &c_can_trnsvr_en_grp1_mux),
- FUNCTION("c_can_trnsvr_intr",
- c_can_trnsvr_intr_grp,
- &c_can_trnsvr_intr_grp_mux),
- FUNCTION("c_can_trnsvr_stb_n",
- c_can_trnsvr_stb_n_grp,
- &c_can_trnsvr_stb_n_grp_mux),
- FUNCTION("c0_can_rxd_trnsv0",
- c0_can_rxd_trnsv0_grp,
- &c0_can_rxd_trnsv0_grp_mux),
- FUNCTION("c0_can_rxd_trnsv1",
- c0_can_rxd_trnsv1_grp,
- &c0_can_rxd_trnsv1_grp_mux),
- FUNCTION("c0_can_txd_trnsv0",
- c0_can_txd_trnsv0_grp,
- &c0_can_txd_trnsv0_grp_mux),
- FUNCTION("c0_can_txd_trnsv1",
- c0_can_txd_trnsv1_grp,
- &c0_can_txd_trnsv1_grp_mux),
- FUNCTION("c1_can_rxd_m0", c1_can_rxd_grp0, &c1_can_rxd_grp0_mux),
- FUNCTION("c1_can_rxd_m1", c1_can_rxd_grp1, &c1_can_rxd_grp1_mux),
- FUNCTION("c1_can_rxd_m2", c1_can_rxd_grp2, &c1_can_rxd_grp2_mux),
- FUNCTION("c1_can_rxd_m3", c1_can_rxd_grp3, &c1_can_rxd_grp3_mux),
- FUNCTION("c1_can_txd_m0", c1_can_txd_grp0, &c1_can_txd_grp0_mux),
- FUNCTION("c1_can_txd_m1", c1_can_txd_grp1, &c1_can_txd_grp1_mux),
- FUNCTION("c1_can_txd_m2", c1_can_txd_grp2, &c1_can_txd_grp2_mux),
- FUNCTION("c1_can_txd_m3", c1_can_txd_grp3, &c1_can_txd_grp3_mux),
- FUNCTION("ca_audio_lpc", ca_audio_lpc_grp, &ca_audio_lpc_grp_mux),
- FUNCTION("ca_bt_lpc", ca_bt_lpc_grp, &ca_bt_lpc_grp_mux),
- FUNCTION("ca_coex", ca_coex_grp, &ca_coex_grp_mux),
- FUNCTION("ca_curator_lpc",
- ca_curator_lpc_grp,
- &ca_curator_lpc_grp_mux),
- FUNCTION("ca_pcm_debug", ca_pcm_debug_grp, &ca_pcm_debug_grp_mux),
- FUNCTION("ca_pio", ca_pio_grp, &ca_pio_grp_mux),
- FUNCTION("ca_sdio_debug", ca_sdio_debug_grp, &ca_sdio_debug_grp_mux),
- FUNCTION("ca_spi", ca_spi_grp, &ca_spi_grp_mux),
- FUNCTION("ca_trb", ca_trb_grp, &ca_trb_grp_mux),
- FUNCTION("ca_uart_debug", ca_uart_debug_grp, &ca_uart_debug_grp_mux),
- FUNCTION("clkc_m0", clkc_grp0, &clkc_grp0_mux),
- FUNCTION("clkc_m1", clkc_grp1, &clkc_grp1_mux),
- FUNCTION("gn_gnss_i2c", gn_gnss_i2c_grp, &gn_gnss_i2c_grp_mux),
- FUNCTION("gn_gnss_uart_nopause",
- gn_gnss_uart_nopause_grp,
- &gn_gnss_uart_nopause_grp_mux),
- FUNCTION("gn_gnss_uart", gn_gnss_uart_grp, &gn_gnss_uart_grp_mux),
- FUNCTION("gn_trg_spi_m0", gn_trg_spi_grp0, &gn_trg_spi_grp0_mux),
- FUNCTION("gn_trg_spi_m1", gn_trg_spi_grp1, &gn_trg_spi_grp1_mux),
- FUNCTION("cvbs_dbg", cvbs_dbg_grp, &cvbs_dbg_grp_mux),
- FUNCTION("cvbs_dbg_test_m0",
- cvbs_dbg_test_grp0,
- &cvbs_dbg_test_grp0_mux),
- FUNCTION("cvbs_dbg_test_m1",
- cvbs_dbg_test_grp1,
- &cvbs_dbg_test_grp1_mux),
- FUNCTION("cvbs_dbg_test_m2",
- cvbs_dbg_test_grp2,
- &cvbs_dbg_test_grp2_mux),
- FUNCTION("cvbs_dbg_test_m3",
- cvbs_dbg_test_grp3,
- &cvbs_dbg_test_grp3_mux),
- FUNCTION("cvbs_dbg_test_m4",
- cvbs_dbg_test_grp4,
- &cvbs_dbg_test_grp4_mux),
- FUNCTION("cvbs_dbg_test_m5",
- cvbs_dbg_test_grp5,
- &cvbs_dbg_test_grp5_mux),
- FUNCTION("cvbs_dbg_test_m6",
- cvbs_dbg_test_grp6,
- &cvbs_dbg_test_grp6_mux),
- FUNCTION("cvbs_dbg_test_m7",
- cvbs_dbg_test_grp7,
- &cvbs_dbg_test_grp7_mux),
- FUNCTION("cvbs_dbg_test_m8",
- cvbs_dbg_test_grp8,
- &cvbs_dbg_test_grp8_mux),
- FUNCTION("cvbs_dbg_test_m9",
- cvbs_dbg_test_grp9,
- &cvbs_dbg_test_grp9_mux),
- FUNCTION("cvbs_dbg_test_m10",
- cvbs_dbg_test_grp10,
- &cvbs_dbg_test_grp10_mux),
- FUNCTION("cvbs_dbg_test_m11",
- cvbs_dbg_test_grp11,
- &cvbs_dbg_test_grp11_mux),
- FUNCTION("cvbs_dbg_test_m12",
- cvbs_dbg_test_grp12,
- &cvbs_dbg_test_grp12_mux),
- FUNCTION("cvbs_dbg_test_m13",
- cvbs_dbg_test_grp13,
- &cvbs_dbg_test_grp13_mux),
- FUNCTION("cvbs_dbg_test_m14",
- cvbs_dbg_test_grp14,
- &cvbs_dbg_test_grp14_mux),
- FUNCTION("cvbs_dbg_test_m15",
- cvbs_dbg_test_grp15,
- &cvbs_dbg_test_grp15_mux),
- FUNCTION("gn_gnss_power", gn_gnss_power_grp, &gn_gnss_power_grp_mux),
- FUNCTION("gn_gnss_sw_status",
- gn_gnss_sw_status_grp,
- &gn_gnss_sw_status_grp_mux),
- FUNCTION("gn_gnss_eclk", gn_gnss_eclk_grp, &gn_gnss_eclk_grp_mux),
- FUNCTION("gn_gnss_irq1_m0",
- gn_gnss_irq1_grp0,
- &gn_gnss_irq1_grp0_mux),
- FUNCTION("gn_gnss_irq2_m0",
- gn_gnss_irq2_grp0,
- &gn_gnss_irq2_grp0_mux),
- FUNCTION("gn_gnss_tm", gn_gnss_tm_grp, &gn_gnss_tm_grp_mux),
- FUNCTION("gn_gnss_tsync", gn_gnss_tsync_grp, &gn_gnss_tsync_grp_mux),
- FUNCTION("gn_io_gnsssys_sw_cfg",
- gn_io_gnsssys_sw_cfg_grp,
- &gn_io_gnsssys_sw_cfg_grp_mux),
- FUNCTION("gn_trg_m0", gn_trg_grp0, &gn_trg_grp0_mux),
- FUNCTION("gn_trg_m1", gn_trg_grp1, &gn_trg_grp1_mux),
- FUNCTION("gn_trg_shutdown_m0",
- gn_trg_shutdown_grp0,
- &gn_trg_shutdown_grp0_mux),
- FUNCTION("gn_trg_shutdown_m1",
- gn_trg_shutdown_grp1,
- &gn_trg_shutdown_grp1_mux),
- FUNCTION("gn_trg_shutdown_m2",
- gn_trg_shutdown_grp2,
- &gn_trg_shutdown_grp2_mux),
- FUNCTION("gn_trg_shutdown_m3",
- gn_trg_shutdown_grp3,
- &gn_trg_shutdown_grp3_mux),
- FUNCTION("i2c0", i2c0_grp, &i2c0_grp_mux),
- FUNCTION("i2c1", i2c1_grp, &i2c1_grp_mux),
- FUNCTION("i2s0", i2s0_grp, &i2s0_grp_mux),
- FUNCTION("i2s1_basic", i2s1_basic_grp, &i2s1_basic_grp_mux),
- FUNCTION("i2s1_rxd0_m0", i2s1_rxd0_grp0, &i2s1_rxd0_grp0_mux),
- FUNCTION("i2s1_rxd0_m1", i2s1_rxd0_grp1, &i2s1_rxd0_grp1_mux),
- FUNCTION("i2s1_rxd0_m2", i2s1_rxd0_grp2, &i2s1_rxd0_grp2_mux),
- FUNCTION("i2s1_rxd0_m3", i2s1_rxd0_grp3, &i2s1_rxd0_grp3_mux),
- FUNCTION("i2s1_rxd0_m4", i2s1_rxd0_grp4, &i2s1_rxd0_grp4_mux),
- FUNCTION("i2s1_rxd1_m0", i2s1_rxd1_grp0, &i2s1_rxd1_grp0_mux),
- FUNCTION("i2s1_rxd1_m1", i2s1_rxd1_grp1, &i2s1_rxd1_grp1_mux),
- FUNCTION("i2s1_rxd1_m2", i2s1_rxd1_grp2, &i2s1_rxd1_grp2_mux),
- FUNCTION("i2s1_rxd1_m3", i2s1_rxd1_grp3, &i2s1_rxd1_grp3_mux),
- FUNCTION("i2s1_rxd1_m4", i2s1_rxd1_grp4, &i2s1_rxd1_grp4_mux),
- FUNCTION("jtag_jt_dbg_nsrst",
- jtag_jt_dbg_nsrst_grp,
- &jtag_jt_dbg_nsrst_grp_mux),
- FUNCTION("jtag_ntrst_m0", jtag_ntrst_grp0, &jtag_ntrst_grp0_mux),
- FUNCTION("jtag_ntrst_m1", jtag_ntrst_grp1, &jtag_ntrst_grp1_mux),
- FUNCTION("jtag_swdiotms_m0",
- jtag_swdiotms_grp0,
- &jtag_swdiotms_grp0_mux),
- FUNCTION("jtag_swdiotms_m1",
- jtag_swdiotms_grp1,
- &jtag_swdiotms_grp1_mux),
- FUNCTION("jtag_tck_m0", jtag_tck_grp0, &jtag_tck_grp0_mux),
- FUNCTION("jtag_tck_m1", jtag_tck_grp1, &jtag_tck_grp1_mux),
- FUNCTION("jtag_tdi_m0", jtag_tdi_grp0, &jtag_tdi_grp0_mux),
- FUNCTION("jtag_tdi_m1", jtag_tdi_grp1, &jtag_tdi_grp1_mux),
- FUNCTION("jtag_tdo_m0", jtag_tdo_grp0, &jtag_tdo_grp0_mux),
- FUNCTION("jtag_tdo_m1", jtag_tdo_grp1, &jtag_tdo_grp1_mux),
- FUNCTION("ks_kas_spi_m0", ks_kas_spi_grp0, &ks_kas_spi_grp0_mux),
- FUNCTION("ld_ldd", ld_ldd_grp, &ld_ldd_grp_mux),
- FUNCTION("ld_ldd_16bit", ld_ldd_16bit_grp, &ld_ldd_16bit_grp_mux),
- FUNCTION("ld_ldd_fck", ld_ldd_fck_grp, &ld_ldd_fck_grp_mux),
- FUNCTION("ld_ldd_lck", ld_ldd_lck_grp, &ld_ldd_lck_grp_mux),
- FUNCTION("lr_lcdrom", lr_lcdrom_grp, &lr_lcdrom_grp_mux),
- FUNCTION("lvds_analog", lvds_analog_grp, &lvds_analog_grp_mux),
- FUNCTION("nd_df_basic", nd_df_basic_grp, &nd_df_basic_grp_mux),
- FUNCTION("nd_df_wp", nd_df_wp_grp, &nd_df_wp_grp_mux),
- FUNCTION("nd_df_cs", nd_df_cs_grp, &nd_df_cs_grp_mux),
- FUNCTION("ps", ps_grp, &ps_grp_mux),
- FUNCTION("ps_no_dir", ps_no_dir_grp, &ps_no_dir_grp_mux),
- FUNCTION("pwc_core_on", pwc_core_on_grp, &pwc_core_on_grp_mux),
- FUNCTION("pwc_ext_on", pwc_ext_on_grp, &pwc_ext_on_grp_mux),
- FUNCTION("pwc_gpio3_clk", pwc_gpio3_clk_grp, &pwc_gpio3_clk_grp_mux),
- FUNCTION("pwc_io_on", pwc_io_on_grp, &pwc_io_on_grp_mux),
- FUNCTION("pwc_lowbatt_b_m0",
- pwc_lowbatt_b_grp0,
- &pwc_lowbatt_b_grp0_mux),
- FUNCTION("pwc_mem_on", pwc_mem_on_grp, &pwc_mem_on_grp_mux),
- FUNCTION("pwc_on_key_b_m0",
- pwc_on_key_b_grp0,
- &pwc_on_key_b_grp0_mux),
- FUNCTION("pwc_wakeup_src0",
- pwc_wakeup_src0_grp,
- &pwc_wakeup_src0_grp_mux),
- FUNCTION("pwc_wakeup_src1",
- pwc_wakeup_src1_grp,
- &pwc_wakeup_src1_grp_mux),
- FUNCTION("pwc_wakeup_src2",
- pwc_wakeup_src2_grp,
- &pwc_wakeup_src2_grp_mux),
- FUNCTION("pwc_wakeup_src3",
- pwc_wakeup_src3_grp,
- &pwc_wakeup_src3_grp_mux),
- FUNCTION("pw_cko0_m0", pw_cko0_grp0, &pw_cko0_grp0_mux),
- FUNCTION("pw_cko0_m1", pw_cko0_grp1, &pw_cko0_grp1_mux),
- FUNCTION("pw_cko0_m2", pw_cko0_grp2, &pw_cko0_grp2_mux),
- FUNCTION("pw_cko0_m3", pw_cko0_grp3, &pw_cko0_grp3_mux),
- FUNCTION("pw_cko1_m0", pw_cko1_grp0, &pw_cko1_grp0_mux),
- FUNCTION("pw_cko1_m1", pw_cko1_grp1, &pw_cko1_grp1_mux),
- FUNCTION("pw_cko1_m2", pw_cko1_grp2, &pw_cko1_grp2_mux),
- FUNCTION("pw_i2s01_clk_m0",
- pw_i2s01_clk_grp0,
- &pw_i2s01_clk_grp0_mux),
- FUNCTION("pw_i2s01_clk_m1",
- pw_i2s01_clk_grp1,
- &pw_i2s01_clk_grp1_mux),
- FUNCTION("pw_i2s01_clk_m2",
- pw_i2s01_clk_grp2,
- &pw_i2s01_clk_grp2_mux),
- FUNCTION("pw_pwm0_m0", pw_pwm0_grp0, &pw_pwm0_grp0_mux),
- FUNCTION("pw_pwm0_m1", pw_pwm0_grp1, &pw_pwm0_grp1_mux),
- FUNCTION("pw_pwm1_m0", pw_pwm1_grp0, &pw_pwm1_grp0_mux),
- FUNCTION("pw_pwm1_m1", pw_pwm1_grp1, &pw_pwm1_grp1_mux),
- FUNCTION("pw_pwm1_m2", pw_pwm1_grp2, &pw_pwm1_grp2_mux),
- FUNCTION("pw_pwm2_m0", pw_pwm2_grp0, &pw_pwm2_grp0_mux),
- FUNCTION("pw_pwm2_m1", pw_pwm2_grp1, &pw_pwm2_grp1_mux),
- FUNCTION("pw_pwm2_m2", pw_pwm2_grp2, &pw_pwm2_grp2_mux),
- FUNCTION("pw_pwm3_m0", pw_pwm3_grp0, &pw_pwm3_grp0_mux),
- FUNCTION("pw_pwm3_m1", pw_pwm3_grp1, &pw_pwm3_grp1_mux),
- FUNCTION("pw_pwm_cpu_vol_m0",
- pw_pwm_cpu_vol_grp0,
- &pw_pwm_cpu_vol_grp0_mux),
- FUNCTION("pw_pwm_cpu_vol_m1",
- pw_pwm_cpu_vol_grp1,
- &pw_pwm_cpu_vol_grp1_mux),
- FUNCTION("pw_pwm_cpu_vol_m2",
- pw_pwm_cpu_vol_grp2,
- &pw_pwm_cpu_vol_grp2_mux),
- FUNCTION("pw_backlight_m0",
- pw_backlight_grp0,
- &pw_backlight_grp0_mux),
- FUNCTION("pw_backlight_m1",
- pw_backlight_grp1,
- &pw_backlight_grp1_mux),
- FUNCTION("rg_eth_mac", rg_eth_mac_grp, &rg_eth_mac_grp_mux),
- FUNCTION("rg_gmac_phy_intr_n",
- rg_gmac_phy_intr_n_grp,
- &rg_gmac_phy_intr_n_grp_mux),
- FUNCTION("rg_rgmii_mac", rg_rgmii_mac_grp, &rg_rgmii_mac_grp_mux),
- FUNCTION("rg_rgmii_phy_ref_clk_m0",
- rg_rgmii_phy_ref_clk_grp0,
- &rg_rgmii_phy_ref_clk_grp0_mux),
- FUNCTION("rg_rgmii_phy_ref_clk_m1",
- rg_rgmii_phy_ref_clk_grp1,
- &rg_rgmii_phy_ref_clk_grp1_mux),
- FUNCTION("sd0", sd0_grp, &sd0_grp_mux),
- FUNCTION("sd0_4bit", sd0_4bit_grp, &sd0_4bit_grp_mux),
- FUNCTION("sd1", sd1_grp, &sd1_grp_mux),
- FUNCTION("sd1_4bit_m0", sd1_4bit_grp0, &sd1_4bit_grp0_mux),
- FUNCTION("sd1_4bit_m1", sd1_4bit_grp1, &sd1_4bit_grp1_mux),
- FUNCTION("sd2_basic", sd2_basic_grp, &sd2_basic_grp_mux),
- FUNCTION("sd2_cdb_m0", sd2_cdb_grp0, &sd2_cdb_grp0_mux),
- FUNCTION("sd2_cdb_m1", sd2_cdb_grp1, &sd2_cdb_grp1_mux),
- FUNCTION("sd2_wpb_m0", sd2_wpb_grp0, &sd2_wpb_grp0_mux),
- FUNCTION("sd2_wpb_m1", sd2_wpb_grp1, &sd2_wpb_grp1_mux),
- FUNCTION("sd3", sd3_9_grp, &sd3_9_grp_mux),
- FUNCTION("sd5", sd5_grp, &sd5_grp_mux),
- FUNCTION("sd6_m0", sd6_grp0, &sd6_grp0_mux),
- FUNCTION("sd6_m1", sd6_grp1, &sd6_grp1_mux),
- FUNCTION("sd9", sd3_9_grp, &sd3_9_grp_mux),
- FUNCTION("sp0_ext_ldo_on",
- sp0_ext_ldo_on_grp,
- &sp0_ext_ldo_on_grp_mux),
- FUNCTION("sp0_qspi", sp0_qspi_grp, &sp0_qspi_grp_mux),
- FUNCTION("sp1_spi", sp1_spi_grp, &sp1_spi_grp_mux),
- FUNCTION("tpiu_trace", tpiu_trace_grp, &tpiu_trace_grp_mux),
- FUNCTION("uart0", uart0_grp, &uart0_grp_mux),
- FUNCTION("uart0_nopause", uart0_nopause_grp, &uart0_nopause_grp_mux),
- FUNCTION("uart1", uart1_grp, &uart1_grp_mux),
- FUNCTION("uart2_cts_m0", uart2_cts_grp0, &uart2_cts_grp0_mux),
- FUNCTION("uart2_cts_m1", uart2_cts_grp1, &uart2_cts_grp1_mux),
- FUNCTION("uart2_rts_m0", uart2_rts_grp0, &uart2_rts_grp0_mux),
- FUNCTION("uart2_rts_m1", uart2_rts_grp1, &uart2_rts_grp1_mux),
- FUNCTION("uart2_rxd_m0", uart2_rxd_grp0, &uart2_rxd_grp0_mux),
- FUNCTION("uart2_rxd_m1", uart2_rxd_grp1, &uart2_rxd_grp1_mux),
- FUNCTION("uart2_rxd_m2", uart2_rxd_grp2, &uart2_rxd_grp2_mux),
- FUNCTION("uart2_txd_m0", uart2_txd_grp0, &uart2_txd_grp0_mux),
- FUNCTION("uart2_txd_m1", uart2_txd_grp1, &uart2_txd_grp1_mux),
- FUNCTION("uart2_txd_m2", uart2_txd_grp2, &uart2_txd_grp2_mux),
- FUNCTION("uart3_cts_m0", uart3_cts_grp0, &uart3_cts_grp0_mux),
- FUNCTION("uart3_cts_m1", uart3_cts_grp1, &uart3_cts_grp1_mux),
- FUNCTION("uart3_cts_m2", uart3_cts_grp2, &uart3_cts_grp2_mux),
- FUNCTION("uart3_rts_m0", uart3_rts_grp0, &uart3_rts_grp0_mux),
- FUNCTION("uart3_rts_m1", uart3_rts_grp1, &uart3_rts_grp1_mux),
- FUNCTION("uart3_rts_m2", uart3_rts_grp2, &uart3_rts_grp2_mux),
- FUNCTION("uart3_rxd_m0", uart3_rxd_grp0, &uart3_rxd_grp0_mux),
- FUNCTION("uart3_rxd_m1", uart3_rxd_grp1, &uart3_rxd_grp1_mux),
- FUNCTION("uart3_rxd_m2", uart3_rxd_grp2, &uart3_rxd_grp2_mux),
- FUNCTION("uart3_txd_m0", uart3_txd_grp0, &uart3_txd_grp0_mux),
- FUNCTION("uart3_txd_m1", uart3_txd_grp1, &uart3_txd_grp1_mux),
- FUNCTION("uart3_txd_m2", uart3_txd_grp2, &uart3_txd_grp2_mux),
- FUNCTION("uart4_basic", uart4_basic_grp, &uart4_basic_grp_mux),
- FUNCTION("uart4_cts_m0", uart4_cts_grp0, &uart4_cts_grp0_mux),
- FUNCTION("uart4_cts_m1", uart4_cts_grp1, &uart4_cts_grp1_mux),
- FUNCTION("uart4_cts_m2", uart4_cts_grp2, &uart4_cts_grp2_mux),
- FUNCTION("uart4_rts_m0", uart4_rts_grp0, &uart4_rts_grp0_mux),
- FUNCTION("uart4_rts_m1", uart4_rts_grp1, &uart4_rts_grp1_mux),
- FUNCTION("uart4_rts_m2", uart4_rts_grp2, &uart4_rts_grp2_mux),
- FUNCTION("usb0_drvvbus_m0",
- usb0_drvvbus_grp0,
- &usb0_drvvbus_grp0_mux),
- FUNCTION("usb0_drvvbus_m1",
- usb0_drvvbus_grp1,
- &usb0_drvvbus_grp1_mux),
- FUNCTION("usb1_drvvbus_m0",
- usb1_drvvbus_grp0,
- &usb1_drvvbus_grp0_mux),
- FUNCTION("usb1_drvvbus_m1",
- usb1_drvvbus_grp1,
- &usb1_drvvbus_grp1_mux),
- FUNCTION("visbus_dout", visbus_dout_grp, &visbus_dout_grp_mux),
- FUNCTION("vi_vip1", vi_vip1_grp, &vi_vip1_grp_mux),
- FUNCTION("vi_vip1_ext", vi_vip1_ext_grp, &vi_vip1_ext_grp_mux),
- FUNCTION("vi_vip1_low8bit",
- vi_vip1_low8bit_grp,
- &vi_vip1_low8bit_grp_mux),
- FUNCTION("vi_vip1_high8bit",
- vi_vip1_high8bit_grp,
- &vi_vip1_high8bit_grp_mux),
-};
-
-static struct atlas7_pinctrl_data atlas7_ioc_data = {
- .pads = (struct pinctrl_pin_desc *)atlas7_ioc_pads,
- .pads_cnt = ARRAY_SIZE(atlas7_ioc_pads),
- .grps = (struct atlas7_pin_group *)altas7_pin_groups,
- .grps_cnt = ARRAY_SIZE(altas7_pin_groups),
- .funcs = (struct atlas7_pmx_func *)atlas7_pmx_functions,
- .funcs_cnt = ARRAY_SIZE(atlas7_pmx_functions),
- .confs = (struct atlas7_pad_config *)atlas7_ioc_pad_confs,
- .confs_cnt = ARRAY_SIZE(atlas7_ioc_pad_confs),
-};
-
-/* Simple map data structure */
-struct map_data {
- u8 idx;
- u8 data;
-};
-
-/**
- * struct atlas7_pull_info - Atlas7 Pad pull info
- * @pad_type: The type of this Pad.
- * @mask: The mas value of this pin's pull bits.
- * @v2s: The map of pull register value to pull status.
- * @s2v: The map of pull status to pull register value.
- */
-struct atlas7_pull_info {
- u8 pad_type;
- u8 mask;
- const struct map_data *v2s;
- const struct map_data *s2v;
-};
-
-/* Pull Register value map to status */
-static const struct map_data p4we_pull_v2s[] = {
- { P4WE_PULL_UP, PULL_UP },
- { P4WE_HIGH_HYSTERESIS, HIGH_HYSTERESIS },
- { P4WE_HIGH_Z, HIGH_Z },
- { P4WE_PULL_DOWN, PULL_DOWN },
-};
-
-static const struct map_data p16st_pull_v2s[] = {
- { P16ST_PULL_UP, PULL_UP },
- { PD, PULL_UNKNOWN },
- { P16ST_HIGH_Z, HIGH_Z },
- { P16ST_PULL_DOWN, PULL_DOWN },
-};
-
-static const struct map_data pm31_pull_v2s[] = {
- { PM31_PULL_DISABLED, PULL_DOWN },
- { PM31_PULL_ENABLED, PULL_UP },
-};
-
-static const struct map_data pangd_pull_v2s[] = {
- { PANGD_PULL_UP, PULL_UP },
- { PD, PULL_UNKNOWN },
- { PANGD_HIGH_Z, HIGH_Z },
- { PANGD_PULL_DOWN, PULL_DOWN },
-};
-
-/* Pull status map to register value */
-static const struct map_data p4we_pull_s2v[] = {
- { PULL_UP, P4WE_PULL_UP },
- { HIGH_HYSTERESIS, P4WE_HIGH_HYSTERESIS },
- { HIGH_Z, P4WE_HIGH_Z },
- { PULL_DOWN, P4WE_PULL_DOWN },
- { PULL_DISABLE, -1 },
- { PULL_ENABLE, -1 },
-};
-
-static const struct map_data p16st_pull_s2v[] = {
- { PULL_UP, P16ST_PULL_UP },
- { HIGH_HYSTERESIS, -1 },
- { HIGH_Z, P16ST_HIGH_Z },
- { PULL_DOWN, P16ST_PULL_DOWN },
- { PULL_DISABLE, -1 },
- { PULL_ENABLE, -1 },
-};
-
-static const struct map_data pm31_pull_s2v[] = {
- { PULL_UP, PM31_PULL_ENABLED },
- { HIGH_HYSTERESIS, -1 },
- { HIGH_Z, -1 },
- { PULL_DOWN, PM31_PULL_DISABLED },
- { PULL_DISABLE, -1 },
- { PULL_ENABLE, -1 },
-};
-
-static const struct map_data pangd_pull_s2v[] = {
- { PULL_UP, PANGD_PULL_UP },
- { HIGH_HYSTERESIS, -1 },
- { HIGH_Z, PANGD_HIGH_Z },
- { PULL_DOWN, PANGD_PULL_DOWN },
- { PULL_DISABLE, -1 },
- { PULL_ENABLE, -1 },
-};
-
-static const struct atlas7_pull_info atlas7_pull_map[] = {
- { PAD_T_4WE_PD, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
- { PAD_T_4WE_PU, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
- { PAD_T_16ST, P16ST_PULL_MASK, p16st_pull_v2s, p16st_pull_s2v },
- { PAD_T_M31_0204_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
- { PAD_T_M31_0204_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
- { PAD_T_M31_0610_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
- { PAD_T_M31_0610_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
- { PAD_T_AD, PANGD_PULL_MASK, pangd_pull_v2s, pangd_pull_s2v },
-};
-
-/**
- * struct atlas7_ds_ma_info - Atlas7 Pad DriveStrength & currents info
- * @ma: The Drive Strength in current value .
- * @ds_16st: The correspond raw value of 16st pad.
- * @ds_4we: The correspond raw value of 4we pad.
- * @ds_0204m31: The correspond raw value of 0204m31 pad.
- * @ds_0610m31: The correspond raw value of 0610m31 pad.
- */
-struct atlas7_ds_ma_info {
- u32 ma;
- u32 ds_16st;
- u32 ds_4we;
- u32 ds_0204m31;
- u32 ds_0610m31;
-};
-
-static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
- { 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL },
- { 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL },
- { 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0 },
- { 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL },
- { 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1 },
- { 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL },
- { 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL },
- { 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL },
- { 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL },
- { 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL },
- { 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL },
- { 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL },
- { 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL },
- { 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL },
- { 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL },
- { 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL },
-};
-
-/**
- * struct atlas7_ds_info - Atlas7 Pad DriveStrength info
- * @type: The type of this Pad.
- * @mask: The mask value of this pin's pull bits.
- * @imval: The immediate value of drives trength register.
- * @reserved: Reserved space
- */
-struct atlas7_ds_info {
- u8 type;
- u8 mask;
- u8 imval;
- u8 reserved;
-};
-
-static const struct atlas7_ds_info atlas7_ds_map[] = {
- { PAD_T_4WE_PD, DS_2BIT_MASK, DS_2BIT_IM_VAL },
- { PAD_T_4WE_PU, DS_2BIT_MASK, DS_2BIT_IM_VAL },
- { PAD_T_16ST, DS_4BIT_MASK, DS_4BIT_IM_VAL },
- { PAD_T_M31_0204_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
- { PAD_T_M31_0204_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
- { PAD_T_M31_0610_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
- { PAD_T_M31_0610_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
- { PAD_T_AD, DS_NULL, DS_NULL },
-};
-
-static inline u32 atlas7_pin_to_bank(u32 pin)
-{
- return (pin >= ATLAS7_PINCTRL_BANK_0_PINS) ? 1 : 0;
-}
-
-static int atlas7_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- return pmx->pctl_data->funcs_cnt;
-}
-
-static const char *atlas7_pmx_get_func_name(struct pinctrl_dev *pctldev,
- u32 selector)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- return pmx->pctl_data->funcs[selector].name;
-}
-
-static int atlas7_pmx_get_func_groups(struct pinctrl_dev *pctldev,
- u32 selector, const char * const **groups,
- u32 * const num_groups)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- *groups = pmx->pctl_data->funcs[selector].groups;
- *num_groups = pmx->pctl_data->funcs[selector].num_groups;
-
- return 0;
-}
-
-static void __atlas7_pmx_pin_input_disable_set(struct atlas7_pmx *pmx,
- const struct atlas7_pad_mux *mux)
-{
- /* Set Input Disable to avoid input glitches
- *
- * All Input-Disable Control registers are located on IOCRTC.
- * So the regs bank is always 0.
- *
- */
- if (mux->dinput_reg && mux->dinput_val_reg) {
- writel(DI_MASK << mux->dinput_bit,
- pmx->regs[BANK_DS] + CLR_REG(mux->dinput_reg));
- writel(DI_DISABLE << mux->dinput_bit,
- pmx->regs[BANK_DS] + mux->dinput_reg);
-
-
- writel(DIV_MASK << mux->dinput_val_bit,
- pmx->regs[BANK_DS] + CLR_REG(mux->dinput_val_reg));
- writel(DIV_DISABLE << mux->dinput_val_bit,
- pmx->regs[BANK_DS] + mux->dinput_val_reg);
- }
-}
-
-static void __atlas7_pmx_pin_input_disable_clr(struct atlas7_pmx *pmx,
- const struct atlas7_pad_mux *mux)
-{
- /* Clear Input Disable to avoid input glitches */
- if (mux->dinput_reg && mux->dinput_val_reg) {
- writel(DI_MASK << mux->dinput_bit,
- pmx->regs[BANK_DS] + CLR_REG(mux->dinput_reg));
- writel(DI_ENABLE << mux->dinput_bit,
- pmx->regs[BANK_DS] + mux->dinput_reg);
-
- writel(DIV_MASK << mux->dinput_val_bit,
- pmx->regs[BANK_DS] + CLR_REG(mux->dinput_val_reg));
- writel(DIV_ENABLE << mux->dinput_val_bit,
- pmx->regs[BANK_DS] + mux->dinput_val_reg);
- }
-}
-
-static int __atlas7_pmx_pin_ad_sel(struct atlas7_pmx *pmx,
- struct atlas7_pad_config *conf,
- u32 bank, u32 ad_sel)
-{
- unsigned long regv;
-
- /* Write to clear register to clear A/D selector */
- writel(ANA_CLEAR_MASK << conf->ad_ctrl_bit,
- pmx->regs[bank] + CLR_REG(conf->ad_ctrl_reg));
-
- /* Set target pad A/D selector */
- regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
- regv &= ~(ANA_CLEAR_MASK << conf->ad_ctrl_bit);
- writel(regv | (ad_sel << conf->ad_ctrl_bit),
- pmx->regs[bank] + conf->ad_ctrl_reg);
-
- regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
- pr_debug("bank:%d reg:0x%04x val:0x%08lx\n",
- bank, conf->ad_ctrl_reg, regv);
- return 0;
-}
-
-static int __atlas7_pmx_pin_analog_enable(struct atlas7_pmx *pmx,
- struct atlas7_pad_config *conf, u32 bank)
-{
- /* Only PAD_T_AD pins can change between Analogue&Digital */
- if (conf->type != PAD_T_AD)
- return -EINVAL;
-
- return __atlas7_pmx_pin_ad_sel(pmx, conf, bank, 0);
-}
-
-static int __atlas7_pmx_pin_digital_enable(struct atlas7_pmx *pmx,
- struct atlas7_pad_config *conf, u32 bank)
-{
- /* Other type pads are always digital */
- if (conf->type != PAD_T_AD)
- return 0;
-
- return __atlas7_pmx_pin_ad_sel(pmx, conf, bank, 1);
-}
-
-static int __atlas7_pmx_pin_enable(struct atlas7_pmx *pmx,
- u32 pin, u32 func)
-{
- struct atlas7_pad_config *conf;
- u32 bank;
- int ret;
- unsigned long regv;
-
- pr_debug("PMX DUMP ### pin#%d func:%d #### START >>>\n",
- pin, func);
-
- /* Get this Pad's descriptor from PINCTRL */
- conf = &pmx->pctl_data->confs[pin];
- bank = atlas7_pin_to_bank(pin);
-
- /* Just enable the analog function of this pad */
- if (FUNC_ANALOGUE == func) {
- ret = __atlas7_pmx_pin_analog_enable(pmx, conf, bank);
- if (ret)
- dev_err(pmx->dev,
- "Convert pad#%d to analog failed, ret=%d\n",
- pin, ret);
- return ret;
- }
-
- /* Set Pads from analog to digital */
- ret = __atlas7_pmx_pin_digital_enable(pmx, conf, bank);
- if (ret) {
- dev_err(pmx->dev,
- "Convert pad#%d to digital failed, ret=%d\n",
- pin, ret);
- return ret;
- }
-
- /* Write to clear register to clear current function */
- writel(FUNC_CLEAR_MASK << conf->mux_bit,
- pmx->regs[bank] + CLR_REG(conf->mux_reg));
-
- /* Set target pad mux function */
- regv = readl(pmx->regs[bank] + conf->mux_reg);
- regv &= ~(FUNC_CLEAR_MASK << conf->mux_bit);
- writel(regv | (func << conf->mux_bit),
- pmx->regs[bank] + conf->mux_reg);
-
- regv = readl(pmx->regs[bank] + conf->mux_reg);
- pr_debug("bank:%d reg:0x%04x val:0x%08lx\n",
- bank, conf->mux_reg, regv);
-
- return 0;
-}
-
-static int atlas7_pmx_set_mux(struct pinctrl_dev *pctldev,
- u32 func_selector, u32 group_selector)
-{
- int idx, ret;
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct atlas7_pmx_func *pmx_func;
- struct atlas7_pin_group *pin_grp;
- const struct atlas7_grp_mux *grp_mux;
- const struct atlas7_pad_mux *mux;
-
- pmx_func = &pmx->pctl_data->funcs[func_selector];
- pin_grp = &pmx->pctl_data->grps[group_selector];
-
- pr_debug("PMX DUMP ### Function:[%s] Group:[%s] #### START >>>\n",
- pmx_func->name, pin_grp->name);
-
- /* the sd3 and sd9 pin select by SYS2PCI_SDIO9SEL register */
- if (pin_grp->pins == (unsigned int *)&sd3_9_pins) {
- if (!strcmp(pmx_func->name, "sd9"))
- writel(1, pmx->sys2pci_base + SYS2PCI_SDIO9SEL);
- else
- writel(0, pmx->sys2pci_base + SYS2PCI_SDIO9SEL);
- }
-
- grp_mux = pmx_func->grpmux;
-
- for (idx = 0; idx < grp_mux->pad_mux_count; idx++) {
- mux = &grp_mux->pad_mux_list[idx];
- __atlas7_pmx_pin_input_disable_set(pmx, mux);
- ret = __atlas7_pmx_pin_enable(pmx, mux->pin, mux->func);
- if (ret) {
- dev_err(pmx->dev,
- "FUNC:%s GRP:%s PIN#%d.%d failed, ret=%d\n",
- pmx_func->name, pin_grp->name,
- mux->pin, mux->func, ret);
- BUG_ON(1);
- }
- __atlas7_pmx_pin_input_disable_clr(pmx, mux);
- }
- pr_debug("PMX DUMP ### Function:[%s] Group:[%s] #### END <<<\n",
- pmx_func->name, pin_grp->name);
-
- return 0;
-}
-
-static u32 convert_current_to_drive_strength(u32 type, u32 ma)
-{
- int idx;
-
- for (idx = 0; idx < ARRAY_SIZE(atlas7_ma2ds_map); idx++) {
- if (atlas7_ma2ds_map[idx].ma != ma)
- continue;
-
- if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU)
- return atlas7_ma2ds_map[idx].ds_4we;
- else if (type == PAD_T_16ST)
- return atlas7_ma2ds_map[idx].ds_16st;
- else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU)
- return atlas7_ma2ds_map[idx].ds_0204m31;
- else if (type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU)
- return atlas7_ma2ds_map[idx].ds_0610m31;
- }
-
- return DS_NULL;
-}
-
-static int altas7_pinctrl_set_pull_sel(struct pinctrl_dev *pctldev,
- u32 pin, u32 sel)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
- const struct atlas7_pull_info *pull_info;
- u32 bank;
- unsigned long regv;
- void __iomem *pull_sel_reg;
-
- bank = atlas7_pin_to_bank(pin);
- pull_info = &atlas7_pull_map[conf->type];
- pull_sel_reg = pmx->regs[bank] + conf->pupd_reg;
-
- /* Retrieve correspond register value from table by sel */
- regv = pull_info->s2v[sel].data & pull_info->mask;
-
- /* Clear & Set new value to pull register */
- writel(pull_info->mask << conf->pupd_bit, CLR_REG(pull_sel_reg));
- writel(regv << conf->pupd_bit, pull_sel_reg);
-
- pr_debug("PIN_CFG ### SET PIN#%d PULL SELECTOR:%d == OK ####\n",
- pin, sel);
- return 0;
-}
-
-static int __altas7_pinctrl_set_drive_strength_sel(struct pinctrl_dev *pctldev,
- u32 pin, u32 sel)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
- const struct atlas7_ds_info *ds_info;
- u32 bank;
- void __iomem *ds_sel_reg;
-
- ds_info = &atlas7_ds_map[conf->type];
- if (sel & (~(ds_info->mask)))
- goto unsupport;
-
- bank = atlas7_pin_to_bank(pin);
- ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
-
- writel(ds_info->imval << conf->drvstr_bit, CLR_REG(ds_sel_reg));
- writel(sel << conf->drvstr_bit, ds_sel_reg);
-
- return 0;
-
-unsupport:
- pr_err("Pad#%d type[%d] doesn't support ds code[%d]!\n",
- pin, conf->type, sel);
- return -ENOTSUPP;
-}
-
-static int altas7_pinctrl_set_drive_strength_sel(struct pinctrl_dev *pctldev,
- u32 pin, u32 ma)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
- u32 type = conf->type;
- u32 sel;
- int ret;
-
- sel = convert_current_to_drive_strength(conf->type, ma);
- if (DS_NULL == sel) {
- pr_err("Pad#%d type[%d] doesn't support ds current[%d]!\n",
- pin, type, ma);
- return -ENOTSUPP;
- }
-
- ret = __altas7_pinctrl_set_drive_strength_sel(pctldev,
- pin, sel);
- pr_debug("PIN_CFG ### SET PIN#%d DS:%d MA:%d == %s ####\n",
- pin, sel, ma, ret?"FAILED":"OK");
- return ret;
-}
-
-static int atlas7_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range, u32 pin)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- u32 idx;
-
- dev_dbg(pmx->dev,
- "atlas7_pmx_gpio_request_enable: pin=%d\n", pin);
- for (idx = 0; idx < range->npins; idx++) {
- if (pin == range->pins[idx])
- break;
- }
-
- if (idx >= range->npins) {
- dev_err(pmx->dev,
- "The pin#%d could not be requested as GPIO!!\n",
- pin);
- return -EPERM;
- }
-
- __atlas7_pmx_pin_enable(pmx, pin, FUNC_GPIO);
-
- return 0;
-}
-
-static const struct pinmux_ops atlas7_pinmux_ops = {
- .get_functions_count = atlas7_pmx_get_funcs_count,
- .get_function_name = atlas7_pmx_get_func_name,
- .get_function_groups = atlas7_pmx_get_func_groups,
- .set_mux = atlas7_pmx_set_mux,
- .gpio_request_enable = atlas7_pmx_gpio_request_enable,
-};
-
-static int atlas7_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- return pmx->pctl_data->grps_cnt;
-}
-
-static const char *atlas7_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
- u32 group)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- return pmx->pctl_data->grps[group].name;
-}
-
-static int atlas7_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- u32 group, const u32 **pins, u32 *num_pins)
-{
- struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- *num_pins = pmx->pctl_data->grps[group].num_pins;
- *pins = pmx->pctl_data->grps[group].pins;
-
- return 0;
-}
-
-static int atlas7_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- u32 *num_maps)
-{
- return pinconf_generic_dt_node_to_map(pctldev, np_config, map,
- num_maps, PIN_MAP_TYPE_INVALID);
-}
-
-static void atlas7_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, u32 num_maps)
-{
- kfree(map);
-}
-
-static const struct pinctrl_ops atlas7_pinctrl_ops = {
- .get_groups_count = atlas7_pinctrl_get_groups_count,
- .get_group_name = atlas7_pinctrl_get_group_name,
- .get_group_pins = atlas7_pinctrl_get_group_pins,
- .dt_node_to_map = atlas7_pinctrl_dt_node_to_map,
- .dt_free_map = atlas7_pinctrl_dt_free_map,
-};
-
-static int atlas7_pin_config_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long *configs,
- unsigned num_configs)
-{
- u16 param;
- u32 arg;
- int idx, err;
-
- for (idx = 0; idx < num_configs; idx++) {
- param = pinconf_to_config_param(configs[idx]);
- arg = pinconf_to_config_argument(configs[idx]);
-
- pr_debug("PMX CFG###### ATLAS7 PIN#%d [%s] CONFIG PARAM:%d ARG:%d >>>>>\n",
- pin, atlas7_ioc_pads[pin].name, param, arg);
- switch (param) {
- case PIN_CONFIG_BIAS_PULL_UP:
- err = altas7_pinctrl_set_pull_sel(pctldev,
- pin, PULL_UP);
- if (err)
- return err;
- break;
-
- case PIN_CONFIG_BIAS_PULL_DOWN:
- err = altas7_pinctrl_set_pull_sel(pctldev,
- pin, PULL_DOWN);
- if (err)
- return err;
- break;
-
- case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- err = altas7_pinctrl_set_pull_sel(pctldev,
- pin, HIGH_HYSTERESIS);
- if (err)
- return err;
- break;
- case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- err = altas7_pinctrl_set_pull_sel(pctldev,
- pin, HIGH_Z);
- if (err)
- return err;
- break;
-
- case PIN_CONFIG_DRIVE_STRENGTH:
- err = altas7_pinctrl_set_drive_strength_sel(pctldev,
- pin, arg);
- if (err)
- return err;
- break;
- default:
- return -ENOTSUPP;
- }
- pr_debug("PMX CFG###### ATLAS7 PIN#%d [%s] CONFIG PARAM:%d ARG:%d <<<<\n",
- pin, atlas7_ioc_pads[pin].name, param, arg);
- }
-
- return 0;
-}
-
-static int atlas7_pin_config_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long *configs,
- unsigned num_configs)
-{
- const unsigned *pins;
- unsigned npins;
- int i, ret;
-
- ret = atlas7_pinctrl_get_group_pins(pctldev, group, &pins, &npins);
- if (ret)
- return ret;
- for (i = 0; i < npins; i++) {
- if (atlas7_pin_config_set(pctldev, pins[i],
- configs, num_configs))
- return -ENOTSUPP;
- }
- return 0;
-}
-
-static const struct pinconf_ops atlas7_pinconf_ops = {
- .pin_config_set = atlas7_pin_config_set,
- .pin_config_group_set = atlas7_pin_config_group_set,
- .is_generic = true,
-};
-
-static int atlas7_pinmux_probe(struct platform_device *pdev)
-{
- int ret, idx;
- struct atlas7_pmx *pmx;
- struct device_node *np = pdev->dev.of_node;
- u32 banks = ATLAS7_PINCTRL_REG_BANKS;
- struct device_node *sys2pci_np;
- struct resource res;
-
- /* Create state holders etc for this driver */
- pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
- if (!pmx)
- return -ENOMEM;
-
- /* The sd3 and sd9 shared all pins, and the function select by
- * SYS2PCI_SDIO9SEL register
- */
- sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
- if (!sys2pci_np)
- return -EINVAL;
-
- ret = of_address_to_resource(sys2pci_np, 0, &res);
- of_node_put(sys2pci_np);
- if (ret)
- return ret;
-
- pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pmx->sys2pci_base))
- return -ENOMEM;
-
- pmx->dev = &pdev->dev;
-
- pmx->pctl_data = &atlas7_ioc_data;
- pmx->pctl_desc.name = "pinctrl-atlas7";
- pmx->pctl_desc.pins = pmx->pctl_data->pads;
- pmx->pctl_desc.npins = pmx->pctl_data->pads_cnt;
- pmx->pctl_desc.pctlops = &atlas7_pinctrl_ops;
- pmx->pctl_desc.pmxops = &atlas7_pinmux_ops;
- pmx->pctl_desc.confops = &atlas7_pinconf_ops;
-
- for (idx = 0; idx < banks; idx++) {
- pmx->regs[idx] = of_iomap(np, idx);
- if (!pmx->regs[idx]) {
- dev_err(&pdev->dev,
- "can't map ioc bank#%d registers\n", idx);
- ret = -ENOMEM;
- goto unmap_io;
- }
- }
-
- /* Now register the pin controller and all pins it handles */
- pmx->pctl = pinctrl_register(&pmx->pctl_desc, &pdev->dev, pmx);
- if (IS_ERR(pmx->pctl)) {
- dev_err(&pdev->dev, "could not register atlas7 pinmux driver\n");
- ret = PTR_ERR(pmx->pctl);
- goto unmap_io;
- }
-
- platform_set_drvdata(pdev, pmx);
-
- dev_info(&pdev->dev, "initialized atlas7 pinmux driver\n");
-
- return 0;
-
-unmap_io:
- for (idx = 0; idx < banks; idx++) {
- if (!pmx->regs[idx])
- break;
- iounmap(pmx->regs[idx]);
- }
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int atlas7_pinmux_suspend_noirq(struct device *dev)
-{
- struct atlas7_pmx *pmx = dev_get_drvdata(dev);
- struct atlas7_pad_status *status;
- struct atlas7_pad_config *conf;
- const struct atlas7_ds_info *ds_info;
- const struct atlas7_pull_info *pull_info;
- int idx;
- u32 bank;
- unsigned long regv;
-
- for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
- /* Get this Pad's descriptor from PINCTRL */
- conf = &pmx->pctl_data->confs[idx];
- bank = atlas7_pin_to_bank(idx);
- status = &pmx->sleep_data[idx];
-
- /* Save Function selector */
- regv = readl(pmx->regs[bank] + conf->mux_reg);
- status->func = (regv >> conf->mux_bit) & FUNC_CLEAR_MASK;
-
- /* Check if Pad is in Analogue selector */
- if (conf->ad_ctrl_reg == -1)
- goto save_ds_sel;
-
- regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
- if (!(regv & (conf->ad_ctrl_bit << ANA_CLEAR_MASK)))
- status->func = FUNC_ANALOGUE;
-
-save_ds_sel:
- if (conf->drvstr_reg == -1)
- goto save_pull_sel;
-
- /* Save Drive Strength selector */
- ds_info = &atlas7_ds_map[conf->type];
- regv = readl(pmx->regs[bank] + conf->drvstr_reg);
- status->dstr = (regv >> conf->drvstr_bit) & ds_info->mask;
-
-save_pull_sel:
- /* Save Pull selector */
- pull_info = &atlas7_pull_map[conf->type];
- regv = readl(pmx->regs[bank] + conf->pupd_reg);
- regv = (regv >> conf->pupd_bit) & pull_info->mask;
- status->pull = pull_info->v2s[regv].data;
- }
-
- /*
- * Save disable input selector, this selector is not for Pin,
- * but for Mux function.
- */
- for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
- pmx->status_ds[idx] = readl(pmx->regs[BANK_DS] +
- IN_DISABLE_0_REG_SET + 0x8 * idx);
- pmx->status_dsv[idx] = readl(pmx->regs[BANK_DS] +
- IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
- }
-
- return 0;
-}
-
-static int atlas7_pinmux_resume_noirq(struct device *dev)
-{
- struct atlas7_pmx *pmx = dev_get_drvdata(dev);
- struct atlas7_pad_status *status;
- int idx;
-
- for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
- /* Get this Pad's descriptor from PINCTRL */
- status = &pmx->sleep_data[idx];
-
- /* Restore Function selector */
- __atlas7_pmx_pin_enable(pmx, idx, (u32)status->func & 0xff);
-
- if (FUNC_ANALOGUE == status->func)
- goto restore_pull_sel;
-
- /* Restore Drive Strength selector */
- __altas7_pinctrl_set_drive_strength_sel(pmx->pctl, idx,
- (u32)status->dstr & 0xff);
-
-restore_pull_sel:
- /* Restore Pull selector */
- altas7_pinctrl_set_pull_sel(pmx->pctl, idx,
- (u32)status->pull & 0xff);
- }
-
- /*
- * Restore disable input selector, this selector is not for Pin,
- * but for Mux function
- */
- for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
- writel(~0, pmx->regs[BANK_DS] +
- IN_DISABLE_0_REG_CLR + 0x8 * idx);
- writel(pmx->status_ds[idx], pmx->regs[BANK_DS] +
- IN_DISABLE_0_REG_SET + 0x8 * idx);
- writel(~0, pmx->regs[BANK_DS] +
- IN_DISABLE_VAL_0_REG_CLR + 0x8 * idx);
- writel(pmx->status_dsv[idx], pmx->regs[BANK_DS] +
- IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops atlas7_pinmux_pm_ops = {
- .suspend_noirq = atlas7_pinmux_suspend_noirq,
- .resume_noirq = atlas7_pinmux_resume_noirq,
- .freeze_noirq = atlas7_pinmux_suspend_noirq,
- .restore_noirq = atlas7_pinmux_resume_noirq,
-};
-#endif
-
-static const struct of_device_id atlas7_pinmux_ids[] = {
- { .compatible = "sirf,atlas7-ioc",},
- {},
-};
-
-static struct platform_driver atlas7_pinmux_driver = {
- .driver = {
- .name = "atlas7-ioc",
- .of_match_table = atlas7_pinmux_ids,
-#ifdef CONFIG_PM_SLEEP
- .pm = &atlas7_pinmux_pm_ops,
-#endif
- },
- .probe = atlas7_pinmux_probe,
-};
-
-static int __init atlas7_pinmux_init(void)
-{
- return platform_driver_register(&atlas7_pinmux_driver);
-}
-arch_initcall(atlas7_pinmux_init);
-
-
-/*
- * The Following is GPIO Code
- */
-static inline struct
-atlas7_gpio_bank *atlas7_gpio_to_bank(struct atlas7_gpio_chip *a7gc, u32 gpio)
-{
- return &a7gc->banks[GPIO_TO_BANK(gpio)];
-}
-
-static int __atlas7_gpio_to_pin(struct atlas7_gpio_chip *a7gc, u32 gpio)
-{
- struct atlas7_gpio_bank *bank;
- u32 ofs;
-
- bank = atlas7_gpio_to_bank(a7gc, gpio);
- ofs = gpio - bank->gpio_offset;
- if (ofs >= bank->ngpio)
- return -ENODEV;
-
- return bank->gpio_pins[ofs];
-}
-
-static void atlas7_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(gc);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 val, pin_in_bank;
- unsigned long flags;
-
- bank = atlas7_gpio_to_bank(a7gc, d->hwirq);
- pin_in_bank = d->hwirq - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- val = readl(ctrl_reg);
- /* clear interrupt status */
- writel(val, ctrl_reg);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-}
-
-static void __atlas7_gpio_irq_mask(struct atlas7_gpio_chip *a7gc, int idx)
-{
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 val, pin_in_bank;
-
- bank = atlas7_gpio_to_bank(a7gc, idx);
- pin_in_bank = idx - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- val = readl(ctrl_reg);
- val &= ~(ATLAS7_GPIO_CTL_INTR_EN_MASK |
- ATLAS7_GPIO_CTL_INTR_STATUS_MASK);
- writel(val, ctrl_reg);
-}
-
-static void atlas7_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- __atlas7_gpio_irq_mask(a7gc, d->hwirq);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-}
-
-static void atlas7_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(gc);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 val, pin_in_bank;
- unsigned long flags;
-
- bank = atlas7_gpio_to_bank(a7gc, d->hwirq);
- pin_in_bank = d->hwirq - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- val = readl(ctrl_reg);
- val &= ~ATLAS7_GPIO_CTL_INTR_STATUS_MASK;
- val |= ATLAS7_GPIO_CTL_INTR_EN_MASK;
- writel(val, ctrl_reg);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-}
-
-static int atlas7_gpio_irq_type(struct irq_data *d,
- unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(gc);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 val, pin_in_bank;
- unsigned long flags;
-
- bank = atlas7_gpio_to_bank(a7gc, d->hwirq);
- pin_in_bank = d->hwirq - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- val = readl(ctrl_reg);
- val &= ~(ATLAS7_GPIO_CTL_INTR_STATUS_MASK |
- ATLAS7_GPIO_CTL_INTR_EN_MASK);
-
- switch (type) {
- case IRQ_TYPE_NONE:
- break;
-
- case IRQ_TYPE_EDGE_RISING:
- val |= ATLAS7_GPIO_CTL_INTR_HIGH_MASK |
- ATLAS7_GPIO_CTL_INTR_TYPE_MASK;
- val &= ~ATLAS7_GPIO_CTL_INTR_LOW_MASK;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- val &= ~ATLAS7_GPIO_CTL_INTR_HIGH_MASK;
- val |= ATLAS7_GPIO_CTL_INTR_LOW_MASK |
- ATLAS7_GPIO_CTL_INTR_TYPE_MASK;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- val |= ATLAS7_GPIO_CTL_INTR_HIGH_MASK |
- ATLAS7_GPIO_CTL_INTR_LOW_MASK |
- ATLAS7_GPIO_CTL_INTR_TYPE_MASK;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- val &= ~(ATLAS7_GPIO_CTL_INTR_HIGH_MASK |
- ATLAS7_GPIO_CTL_INTR_TYPE_MASK);
- val |= ATLAS7_GPIO_CTL_INTR_LOW_MASK;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- val |= ATLAS7_GPIO_CTL_INTR_HIGH_MASK;
- val &= ~(ATLAS7_GPIO_CTL_INTR_LOW_MASK |
- ATLAS7_GPIO_CTL_INTR_TYPE_MASK);
- break;
- }
-
- writel(val, ctrl_reg);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- return 0;
-}
-
-static struct irq_chip atlas7_gpio_irq_chip = {
- .name = "atlas7-gpio-irq",
- .irq_ack = atlas7_gpio_irq_ack,
- .irq_mask = atlas7_gpio_irq_mask,
- .irq_unmask = atlas7_gpio_irq_unmask,
- .irq_set_type = atlas7_gpio_irq_type,
-};
-
-static void atlas7_gpio_handle_irq(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(gc);
- struct atlas7_gpio_bank *bank = NULL;
- u32 status, ctrl;
- int pin_in_bank = 0, idx;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int irq = irq_desc_get_irq(desc);
-
- for (idx = 0; idx < a7gc->nbank; idx++) {
- bank = &a7gc->banks[idx];
- if (bank->irq == irq)
- break;
- }
- BUG_ON(idx == a7gc->nbank);
-
- chained_irq_enter(chip, desc);
-
- status = readl(ATLAS7_GPIO_INT_STATUS(bank));
- if (!status) {
- pr_warn("%s: gpio [%s] status %#x no interrupt is flagged\n",
- __func__, gc->label, status);
- handle_bad_irq(desc);
- return;
- }
-
- while (status) {
- ctrl = readl(ATLAS7_GPIO_CTRL(bank, pin_in_bank));
-
- /*
- * Here we must check whether the corresponding GPIO's
- * interrupt has been enabled, otherwise just skip it
- */
- if ((status & 0x1) && (ctrl & ATLAS7_GPIO_CTL_INTR_EN_MASK)) {
- pr_debug("%s: chip[%s] gpio:%d happens\n",
- __func__, gc->label,
- bank->gpio_offset + pin_in_bank);
- generic_handle_irq(
- irq_find_mapping(gc->irq.domain,
- bank->gpio_offset + pin_in_bank));
- }
-
- if (++pin_in_bank >= bank->ngpio)
- break;
-
- status = status >> 1;
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void __atlas7_gpio_set_input(struct atlas7_gpio_chip *a7gc,
- unsigned int gpio)
-{
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 val, pin_in_bank;
-
- bank = atlas7_gpio_to_bank(a7gc, gpio);
- pin_in_bank = gpio - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- val = readl(ctrl_reg);
- val &= ~ATLAS7_GPIO_CTL_OUT_EN_MASK;
- writel(val, ctrl_reg);
-}
-
-static int atlas7_gpio_request(struct gpio_chip *chip,
- unsigned int gpio)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- int ret;
- unsigned long flags;
-
- ret = __atlas7_gpio_to_pin(a7gc, gpio);
- if (ret < 0)
- return ret;
-
- if (pinctrl_gpio_request(chip->base + gpio))
- return -ENODEV;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- /*
- * default status:
- * set direction as input and mask irq
- */
- __atlas7_gpio_set_input(a7gc, gpio);
- __atlas7_gpio_irq_mask(a7gc, gpio);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- return 0;
-}
-
-static void atlas7_gpio_free(struct gpio_chip *chip,
- unsigned int gpio)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- __atlas7_gpio_irq_mask(a7gc, gpio);
- __atlas7_gpio_set_input(a7gc, gpio);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- pinctrl_gpio_free(chip->base + gpio);
-}
-
-static int atlas7_gpio_direction_input(struct gpio_chip *chip,
- unsigned int gpio)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- __atlas7_gpio_set_input(a7gc, gpio);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- return 0;
-}
-
-static void __atlas7_gpio_set_output(struct atlas7_gpio_chip *a7gc,
- unsigned int gpio, int value)
-{
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 out_ctrl, pin_in_bank;
-
- bank = atlas7_gpio_to_bank(a7gc, gpio);
- pin_in_bank = gpio - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- out_ctrl = readl(ctrl_reg);
- if (value)
- out_ctrl |= ATLAS7_GPIO_CTL_DATAOUT_MASK;
- else
- out_ctrl &= ~ATLAS7_GPIO_CTL_DATAOUT_MASK;
-
- out_ctrl &= ~ATLAS7_GPIO_CTL_INTR_EN_MASK;
- out_ctrl |= ATLAS7_GPIO_CTL_OUT_EN_MASK;
- writel(out_ctrl, ctrl_reg);
-}
-
-static int atlas7_gpio_direction_output(struct gpio_chip *chip,
- unsigned int gpio, int value)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- __atlas7_gpio_set_output(a7gc, gpio, value);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- return 0;
-}
-
-static int atlas7_gpio_get_value(struct gpio_chip *chip,
- unsigned int gpio)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- struct atlas7_gpio_bank *bank;
- u32 val, pin_in_bank;
- unsigned long flags;
-
- bank = atlas7_gpio_to_bank(a7gc, gpio);
- pin_in_bank = gpio - bank->gpio_offset;
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- val = readl(ATLAS7_GPIO_CTRL(bank, pin_in_bank));
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-
- return !!(val & ATLAS7_GPIO_CTL_DATAIN_MASK);
-}
-
-static void atlas7_gpio_set_value(struct gpio_chip *chip,
- unsigned int gpio, int value)
-{
- struct atlas7_gpio_chip *a7gc = gpiochip_get_data(chip);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 ctrl, pin_in_bank;
- unsigned long flags;
-
- bank = atlas7_gpio_to_bank(a7gc, gpio);
- pin_in_bank = gpio - bank->gpio_offset;
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin_in_bank);
-
- raw_spin_lock_irqsave(&a7gc->lock, flags);
-
- ctrl = readl(ctrl_reg);
- if (value)
- ctrl |= ATLAS7_GPIO_CTL_DATAOUT_MASK;
- else
- ctrl &= ~ATLAS7_GPIO_CTL_DATAOUT_MASK;
- writel(ctrl, ctrl_reg);
-
- raw_spin_unlock_irqrestore(&a7gc->lock, flags);
-}
-
-static const struct of_device_id atlas7_gpio_ids[] = {
- { .compatible = "sirf,atlas7-gpio", },
- {},
-};
-
-static int atlas7_gpio_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct atlas7_gpio_chip *a7gc;
- struct gpio_chip *chip;
- u32 nbank;
- int ret, idx;
- struct gpio_irq_chip *girq;
-
- ret = of_property_read_u32(np, "gpio-banks", &nbank);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not find GPIO bank info,ret=%d!\n",
- ret);
- return ret;
- }
-
- /* retrieve gpio descriptor data */
- a7gc = devm_kzalloc(&pdev->dev, struct_size(a7gc, banks, nbank),
- GFP_KERNEL);
- if (!a7gc)
- return -ENOMEM;
-
- /* Get Gpio clk */
- a7gc->clk = of_clk_get(np, 0);
- if (!IS_ERR(a7gc->clk)) {
- ret = clk_prepare_enable(a7gc->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not enable clock!\n");
- return ret;
- }
- }
-
- /* Get Gpio Registers */
- a7gc->reg = of_iomap(np, 0);
- if (!a7gc->reg) {
- dev_err(&pdev->dev, "Could not map GPIO Registers!\n");
- return -ENOMEM;
- }
-
- a7gc->nbank = nbank;
- raw_spin_lock_init(&a7gc->lock);
-
- /* Setup GPIO Chip */
- chip = &a7gc->chip;
- chip->request = atlas7_gpio_request;
- chip->free = atlas7_gpio_free;
- chip->direction_input = atlas7_gpio_direction_input;
- chip->get = atlas7_gpio_get_value;
- chip->direction_output = atlas7_gpio_direction_output;
- chip->set = atlas7_gpio_set_value;
- chip->base = -1;
- /* Each chip can support 32 pins at one bank */
- chip->ngpio = NGPIO_OF_BANK * nbank;
- chip->label = kstrdup(np->name, GFP_KERNEL);
- chip->of_node = np;
- chip->of_gpio_n_cells = 2;
- chip->parent = &pdev->dev;
-
- girq = &chip->irq;
- girq->chip = &atlas7_gpio_irq_chip;
- girq->parent_handler = atlas7_gpio_handle_irq;
- girq->num_parents = nbank;
- girq->parents = devm_kcalloc(&pdev->dev, nbank,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- for (idx = 0; idx < nbank; idx++) {
- struct atlas7_gpio_bank *bank;
-
- bank = &a7gc->banks[idx];
- /* Set ctrl registers' base of this bank */
- bank->base = ATLAS7_GPIO_BASE(a7gc, idx);
- bank->gpio_offset = idx * NGPIO_OF_BANK;
-
- /* Get interrupt number from DTS */
- ret = of_irq_get(np, idx);
- if (ret <= 0) {
- dev_err(&pdev->dev,
- "Unable to find IRQ number. ret=%d\n", ret);
- if (!ret)
- ret = -ENXIO;
- goto failed;
- }
- bank->irq = ret;
- girq->parents[idx] = ret;
- }
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
-
- /* Add gpio chip to system */
- ret = gpiochip_add_data(chip, a7gc);
- if (ret) {
- dev_err(&pdev->dev,
- "%pOF: error in probe function with status %d\n",
- np, ret);
- goto failed;
- }
-
- platform_set_drvdata(pdev, a7gc);
- dev_info(&pdev->dev, "add to system.\n");
- return 0;
-failed:
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int atlas7_gpio_suspend_noirq(struct device *dev)
-{
- struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 idx, pin;
-
- for (idx = 0; idx < a7gc->nbank; idx++) {
- bank = &a7gc->banks[idx];
- for (pin = 0; pin < bank->ngpio; pin++) {
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
- bank->sleep_data[pin] = readl(ctrl_reg);
- }
- }
-
- return 0;
-}
-
-static int atlas7_gpio_resume_noirq(struct device *dev)
-{
- struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
- struct atlas7_gpio_bank *bank;
- void __iomem *ctrl_reg;
- u32 idx, pin;
-
- for (idx = 0; idx < a7gc->nbank; idx++) {
- bank = &a7gc->banks[idx];
- for (pin = 0; pin < bank->ngpio; pin++) {
- ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
- writel(bank->sleep_data[pin], ctrl_reg);
- }
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops atlas7_gpio_pm_ops = {
- .suspend_noirq = atlas7_gpio_suspend_noirq,
- .resume_noirq = atlas7_gpio_resume_noirq,
- .freeze_noirq = atlas7_gpio_suspend_noirq,
- .restore_noirq = atlas7_gpio_resume_noirq,
-};
-#endif
-
-static struct platform_driver atlas7_gpio_driver = {
- .driver = {
- .name = "atlas7-gpio",
- .of_match_table = atlas7_gpio_ids,
-#ifdef CONFIG_PM_SLEEP
- .pm = &atlas7_gpio_pm_ops,
-#endif
- },
- .probe = atlas7_gpio_probe,
-};
-
-static int __init atlas7_gpio_init(void)
-{
- return platform_driver_register(&atlas7_gpio_driver);
-}
-subsys_initcall(atlas7_gpio_init);
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
deleted file mode 100644
index 49da2a7eba1f..000000000000
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ /dev/null
@@ -1,1131 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pinctrl pads, groups, functions for CSR SiRFprimaII
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/bitops.h>
-
-#include "pinctrl-sirf.h"
-
-/*
- * pad list for the pinmux subsystem
- * refer to CS-131858-DC-6A.xls
- */
-static const struct pinctrl_pin_desc sirfsoc_pads[] = {
- PINCTRL_PIN(0, "gpio0-0"),
- PINCTRL_PIN(1, "gpio0-1"),
- PINCTRL_PIN(2, "gpio0-2"),
- PINCTRL_PIN(3, "gpio0-3"),
- PINCTRL_PIN(4, "pwm0"),
- PINCTRL_PIN(5, "pwm1"),
- PINCTRL_PIN(6, "pwm2"),
- PINCTRL_PIN(7, "pwm3"),
- PINCTRL_PIN(8, "warm_rst_b"),
- PINCTRL_PIN(9, "odo_0"),
- PINCTRL_PIN(10, "odo_1"),
- PINCTRL_PIN(11, "dr_dir"),
- PINCTRL_PIN(12, "viprom_fa"),
- PINCTRL_PIN(13, "scl_1"),
- PINCTRL_PIN(14, "ntrst"),
- PINCTRL_PIN(15, "sda_1"),
- PINCTRL_PIN(16, "x_ldd[16]"),
- PINCTRL_PIN(17, "x_ldd[17]"),
- PINCTRL_PIN(18, "x_ldd[18]"),
- PINCTRL_PIN(19, "x_ldd[19]"),
- PINCTRL_PIN(20, "x_ldd[20]"),
- PINCTRL_PIN(21, "x_ldd[21]"),
- PINCTRL_PIN(22, "x_ldd[22]"),
- PINCTRL_PIN(23, "x_ldd[23], lcdrom_frdy"),
- PINCTRL_PIN(24, "gps_sgn"),
- PINCTRL_PIN(25, "gps_mag"),
- PINCTRL_PIN(26, "gps_clk"),
- PINCTRL_PIN(27, "sd_cd_b_1"),
- PINCTRL_PIN(28, "sd_vcc_on_1"),
- PINCTRL_PIN(29, "sd_wp_b_1"),
- PINCTRL_PIN(30, "sd_clk_3"),
- PINCTRL_PIN(31, "sd_cmd_3"),
-
- PINCTRL_PIN(32, "x_sd_dat_3[0]"),
- PINCTRL_PIN(33, "x_sd_dat_3[1]"),
- PINCTRL_PIN(34, "x_sd_dat_3[2]"),
- PINCTRL_PIN(35, "x_sd_dat_3[3]"),
- PINCTRL_PIN(36, "x_sd_clk_4"),
- PINCTRL_PIN(37, "x_sd_cmd_4"),
- PINCTRL_PIN(38, "x_sd_dat_4[0]"),
- PINCTRL_PIN(39, "x_sd_dat_4[1]"),
- PINCTRL_PIN(40, "x_sd_dat_4[2]"),
- PINCTRL_PIN(41, "x_sd_dat_4[3]"),
- PINCTRL_PIN(42, "x_cko_1"),
- PINCTRL_PIN(43, "x_ac97_bit_clk"),
- PINCTRL_PIN(44, "x_ac97_dout"),
- PINCTRL_PIN(45, "x_ac97_din"),
- PINCTRL_PIN(46, "x_ac97_sync"),
- PINCTRL_PIN(47, "x_txd_1"),
- PINCTRL_PIN(48, "x_txd_2"),
- PINCTRL_PIN(49, "x_rxd_1"),
- PINCTRL_PIN(50, "x_rxd_2"),
- PINCTRL_PIN(51, "x_usclk_0"),
- PINCTRL_PIN(52, "x_utxd_0"),
- PINCTRL_PIN(53, "x_urxd_0"),
- PINCTRL_PIN(54, "x_utfs_0"),
- PINCTRL_PIN(55, "x_urfs_0"),
- PINCTRL_PIN(56, "x_usclk_1"),
- PINCTRL_PIN(57, "x_utxd_1"),
- PINCTRL_PIN(58, "x_urxd_1"),
- PINCTRL_PIN(59, "x_utfs_1"),
- PINCTRL_PIN(60, "x_urfs_1"),
- PINCTRL_PIN(61, "x_usclk_2"),
- PINCTRL_PIN(62, "x_utxd_2"),
- PINCTRL_PIN(63, "x_urxd_2"),
-
- PINCTRL_PIN(64, "x_utfs_2"),
- PINCTRL_PIN(65, "x_urfs_2"),
- PINCTRL_PIN(66, "x_df_we_b"),
- PINCTRL_PIN(67, "x_df_re_b"),
- PINCTRL_PIN(68, "x_txd_0"),
- PINCTRL_PIN(69, "x_rxd_0"),
- PINCTRL_PIN(78, "x_cko_0"),
- PINCTRL_PIN(79, "x_vip_pxd[7]"),
- PINCTRL_PIN(80, "x_vip_pxd[6]"),
- PINCTRL_PIN(81, "x_vip_pxd[5]"),
- PINCTRL_PIN(82, "x_vip_pxd[4]"),
- PINCTRL_PIN(83, "x_vip_pxd[3]"),
- PINCTRL_PIN(84, "x_vip_pxd[2]"),
- PINCTRL_PIN(85, "x_vip_pxd[1]"),
- PINCTRL_PIN(86, "x_vip_pxd[0]"),
- PINCTRL_PIN(87, "x_vip_vsync"),
- PINCTRL_PIN(88, "x_vip_hsync"),
- PINCTRL_PIN(89, "x_vip_pxclk"),
- PINCTRL_PIN(90, "x_sda_0"),
- PINCTRL_PIN(91, "x_scl_0"),
- PINCTRL_PIN(92, "x_df_ry_by"),
- PINCTRL_PIN(93, "x_df_cs_b[1]"),
- PINCTRL_PIN(94, "x_df_cs_b[0]"),
- PINCTRL_PIN(95, "x_l_pclk"),
-
- PINCTRL_PIN(96, "x_l_lck"),
- PINCTRL_PIN(97, "x_l_fck"),
- PINCTRL_PIN(98, "x_l_de"),
- PINCTRL_PIN(99, "x_ldd[0]"),
- PINCTRL_PIN(100, "x_ldd[1]"),
- PINCTRL_PIN(101, "x_ldd[2]"),
- PINCTRL_PIN(102, "x_ldd[3]"),
- PINCTRL_PIN(103, "x_ldd[4]"),
- PINCTRL_PIN(104, "x_ldd[5]"),
- PINCTRL_PIN(105, "x_ldd[6]"),
- PINCTRL_PIN(106, "x_ldd[7]"),
- PINCTRL_PIN(107, "x_ldd[8]"),
- PINCTRL_PIN(108, "x_ldd[9]"),
- PINCTRL_PIN(109, "x_ldd[10]"),
- PINCTRL_PIN(110, "x_ldd[11]"),
- PINCTRL_PIN(111, "x_ldd[12]"),
- PINCTRL_PIN(112, "x_ldd[13]"),
- PINCTRL_PIN(113, "x_ldd[14]"),
- PINCTRL_PIN(114, "x_ldd[15]"),
-
- PINCTRL_PIN(115, "x_usb1_dp"),
- PINCTRL_PIN(116, "x_usb1_dn"),
-};
-
-static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
- BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
- BIT(17) | BIT(18),
- }, {
- .group = 2,
- .mask = BIT(31),
- },
-};
-
-static const struct sirfsoc_padmux lcd_16bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
- .muxmask = lcd_16bits_sirfsoc_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = 0,
-};
-
-static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
- BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
- BIT(17) | BIT(18),
- }, {
- .group = 2,
- .mask = BIT(31),
- }, {
- .group = 0,
- .mask = BIT(16) | BIT(17),
- },
-};
-
-static const struct sirfsoc_padmux lcd_18bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
- .muxmask = lcd_18bits_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = 0,
-};
-
-static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100,
- 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
-
-static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
- BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
- BIT(17) | BIT(18),
- }, {
- .group = 2,
- .mask = BIT(31),
- }, {
- .group = 0,
- .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) |
- BIT(21) | BIT(22) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux lcd_24bits_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
- .muxmask = lcd_24bits_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = 0,
-};
-
-static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23,
- 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
- 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
- {
- .group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
- BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
- BIT(17) | BIT(18),
- }, {
- .group = 2,
- .mask = BIT(31),
- }, {
- .group = 0,
- .mask = BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux lcdrom_padmux = {
- .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
- .muxmask = lcdrom_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(4),
- .funcval = BIT(4),
-};
-
-static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask uart0_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(4) | BIT(5),
- }, {
- .group = 1,
- .mask = BIT(23) | BIT(28),
- },
-};
-
-static const struct sirfsoc_padmux uart0_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart0_muxmask),
- .muxmask = uart0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(9),
- .funcval = BIT(9),
-};
-
-static const unsigned uart0_pins[] = { 55, 60, 68, 69 };
-
-static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(4) | BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask),
- .muxmask = uart0_nostreamctrl_muxmask,
-};
-
-static const unsigned uart0_nostreamctrl_pins[] = { 68, 69 };
-
-static const struct sirfsoc_muxmask uart1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(15) | BIT(17),
- },
-};
-
-static const struct sirfsoc_padmux uart1_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart1_muxmask),
- .muxmask = uart1_muxmask,
-};
-
-static const unsigned uart1_pins[] = { 47, 49 };
-
-static const struct sirfsoc_muxmask uart2_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(16) | BIT(18) | BIT(24) | BIT(27),
- },
-};
-
-static const struct sirfsoc_padmux uart2_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart2_muxmask),
- .muxmask = uart2_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(10),
- .funcval = BIT(10),
-};
-
-static const unsigned uart2_pins[] = { 48, 50, 56, 59 };
-
-static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(16) | BIT(18),
- },
-};
-
-static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask),
- .muxmask = uart2_nostreamctrl_muxmask,
-};
-
-static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 };
-
-static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(30) | BIT(31),
- }, {
- .group = 1,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc3_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
- .muxmask = sdmmc3_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(7),
- .funcval = 0,
-};
-
-static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 };
-
-static const struct sirfsoc_muxmask spi0_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux spi0_padmux = {
- .muxmask_counts = ARRAY_SIZE(spi0_muxmask),
- .muxmask = spi0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(7),
- .funcval = BIT(7),
-};
-
-static const unsigned spi0_pins[] = { 32, 33, 34, 35 };
-
-static const struct sirfsoc_muxmask sdmmc4_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc4_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc4_muxmask),
- .muxmask = sdmmc4_muxmask,
-};
-
-static const unsigned sdmmc4_pins[] = { 36, 37, 38, 39, 40, 41 };
-
-static const struct sirfsoc_muxmask cko1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(10),
- },
-};
-
-static const struct sirfsoc_padmux cko1_padmux = {
- .muxmask_counts = ARRAY_SIZE(cko1_muxmask),
- .muxmask = cko1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = 0,
-};
-
-static const unsigned cko1_pins[] = { 42 };
-
-static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(10),
- },
-};
-
-static const struct sirfsoc_padmux i2s_mclk_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask),
- .muxmask = i2s_mclk_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = BIT(3),
-};
-
-static const unsigned i2s_mclk_pins[] = { 42 };
-
-static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19),
- },
-};
-
-static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask),
- .muxmask = i2s_ext_clk_input_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(2),
- .funcval = BIT(2),
-};
-
-static const unsigned i2s_ext_clk_input_pins[] = { 51 };
-
-static const struct sirfsoc_muxmask i2s_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux i2s_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_muxmask),
- .muxmask = i2s_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
-};
-
-static const unsigned i2s_pins[] = { 43, 44, 45, 46 };
-
-static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux i2s_no_din_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask),
- .muxmask = i2s_no_din_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
-};
-
-static const unsigned i2s_no_din_pins[] = { 43, 44, 46 };
-
-static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14)
- | BIT(23) | BIT(28),
- },
-};
-
-static const struct sirfsoc_padmux i2s_6chn_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask),
- .muxmask = i2s_6chn_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(9),
- .funcval = BIT(1) | BIT(9),
-};
-
-static const unsigned i2s_6chn_pins[] = { 43, 44, 45, 46, 55, 60 };
-
-static const struct sirfsoc_muxmask ac97_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux ac97_padmux = {
- .muxmask_counts = ARRAY_SIZE(ac97_muxmask),
- .muxmask = ac97_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(8),
- .funcval = 0,
-};
-
-static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
-
-static const struct sirfsoc_muxmask spi1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux spi1_padmux = {
- .muxmask_counts = ARRAY_SIZE(spi1_muxmask),
- .muxmask = spi1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(8),
- .funcval = BIT(8),
-};
-
-static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
-
-static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(27) | BIT(28) | BIT(29),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc1_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
- .muxmask = sdmmc1_muxmask,
-};
-
-static const unsigned sdmmc1_pins[] = { 27, 28, 29 };
-
-static const struct sirfsoc_muxmask gps_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(24) | BIT(25) | BIT(26),
- },
-};
-
-static const struct sirfsoc_padmux gps_padmux = {
- .muxmask_counts = ARRAY_SIZE(gps_muxmask),
- .muxmask = gps_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(12) | BIT(13) | BIT(14),
- .funcval = BIT(12),
-};
-
-static const unsigned gps_pins[] = { 24, 25, 26 };
-
-static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(24) | BIT(25) | BIT(26),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc5_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
- .muxmask = sdmmc5_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(13) | BIT(14),
- .funcval = BIT(13) | BIT(14),
-};
-
-static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
-
-static const struct sirfsoc_muxmask usp0_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux usp0_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_muxmask),
- .muxmask = usp0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9),
- .funcval = 0,
-};
-
-static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
-
-static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
- },
-};
-
-static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
- .muxmask = usp0_only_utfs_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(6),
- .funcval = 0,
-};
-
-static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
-
-static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
- },
-};
-
-static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
- .muxmask = usp0_only_urfs_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(2) | BIT(9),
- .funcval = 0,
-};
-
-static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
-
-static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(20) | BIT(21),
- },
-};
-
-static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
- .muxmask = usp0_uart_nostreamctrl_muxmask,
-};
-
-static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
-
-static const struct sirfsoc_muxmask usp1_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28),
- },
-};
-
-static const struct sirfsoc_padmux usp1_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp1_muxmask),
- .muxmask = usp1_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11),
- .funcval = 0,
-};
-
-static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 };
-
-static const struct sirfsoc_muxmask usp1_uart_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(25) | BIT(26),
- },
-};
-
-static const struct sirfsoc_padmux usp1_uart_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp1_uart_nostreamctrl_muxmask),
- .muxmask = usp1_uart_nostreamctrl_muxmask,
-};
-
-static const unsigned usp1_uart_nostreamctrl_pins[] = { 57, 58 };
-
-static const struct sirfsoc_muxmask usp2_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(29) | BIT(30) | BIT(31),
- }, {
- .group = 2,
- .mask = BIT(0) | BIT(1),
- },
-};
-
-static const struct sirfsoc_padmux usp2_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp2_muxmask),
- .muxmask = usp2_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(13) | BIT(14),
- .funcval = 0,
-};
-
-static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 };
-
-static const struct sirfsoc_muxmask usp2_uart_nostreamctrl_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(30) | BIT(31),
- },
-};
-
-static const struct sirfsoc_padmux usp2_uart_nostreamctrl_padmux = {
- .muxmask_counts = ARRAY_SIZE(usp2_uart_nostreamctrl_muxmask),
- .muxmask = usp2_uart_nostreamctrl_muxmask,
-};
-
-static const unsigned usp2_uart_nostreamctrl_pins[] = { 62, 63 };
-
-static const struct sirfsoc_muxmask nand_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30),
- },
-};
-
-static const struct sirfsoc_padmux nand_padmux = {
- .muxmask_counts = ARRAY_SIZE(nand_muxmask),
- .muxmask = nand_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5),
- .funcval = 0,
-};
-
-static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 };
-
-static const struct sirfsoc_padmux sdmmc0_padmux = {
- .muxmask_counts = 0,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5),
- .funcval = 0,
-};
-
-static const unsigned sdmmc0_pins[] = { };
-
-static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(2) | BIT(3),
- },
-};
-
-static const struct sirfsoc_padmux sdmmc2_padmux = {
- .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
- .muxmask = sdmmc2_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(5),
- .funcval = BIT(5),
-};
-
-static const unsigned sdmmc2_pins[] = { 66, 67 };
-
-static const struct sirfsoc_muxmask cko0_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(14),
- },
-};
-
-static const struct sirfsoc_padmux cko0_padmux = {
- .muxmask_counts = ARRAY_SIZE(cko0_muxmask),
- .muxmask = cko0_muxmask,
-};
-
-static const unsigned cko0_pins[] = { 78 };
-
-static const struct sirfsoc_muxmask vip_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
- | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
- BIT(25),
- },
-};
-
-static const struct sirfsoc_padmux vip_padmux = {
- .muxmask_counts = ARRAY_SIZE(vip_muxmask),
- .muxmask = vip_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(0),
- .funcval = 0,
-};
-
-static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89 };
-
-static const struct sirfsoc_muxmask i2c0_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(26) | BIT(27),
- },
-};
-
-static const struct sirfsoc_padmux i2c0_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2c0_muxmask),
- .muxmask = i2c0_muxmask,
-};
-
-static const unsigned i2c0_pins[] = { 90, 91 };
-
-static const struct sirfsoc_muxmask i2c1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(13) | BIT(15),
- },
-};
-
-static const struct sirfsoc_padmux i2c1_padmux = {
- .muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
- .muxmask = i2c1_muxmask,
-};
-
-static const unsigned i2c1_pins[] = { 13, 15 };
-
-static const struct sirfsoc_muxmask viprom_muxmask[] = {
- {
- .group = 2,
- .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
- | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
- BIT(25),
- }, {
- .group = 0,
- .mask = BIT(12),
- },
-};
-
-static const struct sirfsoc_padmux viprom_padmux = {
- .muxmask_counts = ARRAY_SIZE(viprom_muxmask),
- .muxmask = viprom_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(0),
- .funcval = BIT(0),
-};
-
-static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86,
- 87, 88, 89 };
-
-static const struct sirfsoc_muxmask pwm0_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(4),
- },
-};
-
-static const struct sirfsoc_padmux pwm0_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
- .muxmask = pwm0_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(12),
- .funcval = 0,
-};
-
-static const unsigned pwm0_pins[] = { 4 };
-
-static const struct sirfsoc_muxmask pwm1_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(5),
- },
-};
-
-static const struct sirfsoc_padmux pwm1_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm1_muxmask),
- .muxmask = pwm1_muxmask,
-};
-
-static const unsigned pwm1_pins[] = { 5 };
-
-static const struct sirfsoc_muxmask pwm2_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(6),
- },
-};
-
-static const struct sirfsoc_padmux pwm2_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm2_muxmask),
- .muxmask = pwm2_muxmask,
-};
-
-static const unsigned pwm2_pins[] = { 6 };
-
-static const struct sirfsoc_muxmask pwm3_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(7),
- },
-};
-
-static const struct sirfsoc_padmux pwm3_padmux = {
- .muxmask_counts = ARRAY_SIZE(pwm3_muxmask),
- .muxmask = pwm3_muxmask,
-};
-
-static const unsigned pwm3_pins[] = { 7 };
-
-static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(8),
- },
-};
-
-static const struct sirfsoc_padmux warm_rst_padmux = {
- .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
- .muxmask = warm_rst_muxmask,
-};
-
-static const unsigned warm_rst_pins[] = { 8 };
-
-static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(22),
- },
-};
-static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = {
- .muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask),
- .muxmask = usb0_utmi_drvbus_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(6),
- .funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */
-};
-
-static const unsigned usb0_utmi_drvbus_pins[] = { 54 };
-
-static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
- {
- .group = 1,
- .mask = BIT(27),
- },
-};
-
-static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
- .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
- .muxmask = usb1_utmi_drvbus_muxmask,
- .ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(11),
- .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
-};
-
-static const unsigned usb1_utmi_drvbus_pins[] = { 59 };
-
-static const struct sirfsoc_padmux usb1_dp_dn_padmux = {
- .muxmask_counts = 0,
- .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
- .funcmask = BIT(2),
- .funcval = BIT(2),
-};
-
-static const unsigned usb1_dp_dn_pins[] = { 115, 116 };
-
-static const struct sirfsoc_padmux uart1_route_io_usb1_padmux = {
- .muxmask_counts = 0,
- .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
- .funcmask = BIT(2),
- .funcval = 0,
-};
-
-static const unsigned uart1_route_io_usb1_pins[] = { 115, 116 };
-
-static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
- {
- .group = 0,
- .mask = BIT(9) | BIT(10) | BIT(11),
- },
-};
-
-static const struct sirfsoc_padmux pulse_count_padmux = {
- .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask),
- .muxmask = pulse_count_muxmask,
-};
-
-static const unsigned pulse_count_pins[] = { 9, 10, 11 };
-
-static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
- SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins),
- SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins),
- SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
- SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
- SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
- SIRFSOC_PIN_GROUP("uart0_nostreamctrlgrp", uart0_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
- SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
- SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
- SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
- usp0_uart_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
- SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
- SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
- SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
- usp1_uart_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("usp2grp", usp2_pins),
- SIRFSOC_PIN_GROUP("usp2_uart_nostreamctrl_grp",
- usp2_uart_nostreamctrl_pins),
- SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
- SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
- SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
- SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins),
- SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins),
- SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins),
- SIRFSOC_PIN_GROUP("vipgrp", vip_pins),
- SIRFSOC_PIN_GROUP("vipromgrp", viprom_pins),
- SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins),
- SIRFSOC_PIN_GROUP("cko0grp", cko0_pins),
- SIRFSOC_PIN_GROUP("cko1grp", cko1_pins),
- SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins),
- SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins),
- SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins),
- SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins),
- SIRFSOC_PIN_GROUP("sdmmc4grp", sdmmc4_pins),
- SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
- SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins),
- SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
- SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
- SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
- SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
- SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins),
- SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins),
- SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
- SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins),
- SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins),
- SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
- SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
- SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
- SIRFSOC_PIN_GROUP("spi1grp", spi1_pins),
- SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
-};
-
-static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" };
-static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
-static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
-static const char * const lcdromgrp[] = { "lcdromgrp" };
-static const char * const uart0grp[] = { "uart0grp" };
-static const char * const uart0_nostreamctrlgrp[] = { "uart0_nostreamctrlgrp" };
-static const char * const uart1grp[] = { "uart1grp" };
-static const char * const uart2grp[] = { "uart2grp" };
-static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
-static const char * const usp0grp[] = { "usp0grp" };
-static const char * const usp0_uart_nostreamctrl_grp[] = {
- "usp0_uart_nostreamctrl_grp"
-};
-static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
-static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
-static const char * const usp1grp[] = { "usp1grp" };
-static const char * const usp1_uart_nostreamctrl_grp[] = {
- "usp1_uart_nostreamctrl_grp"
-};
-static const char * const usp2grp[] = { "usp2grp" };
-static const char * const usp2_uart_nostreamctrl_grp[] = {
- "usp2_uart_nostreamctrl_grp"
-};
-static const char * const i2c0grp[] = { "i2c0grp" };
-static const char * const i2c1grp[] = { "i2c1grp" };
-static const char * const pwm0grp[] = { "pwm0grp" };
-static const char * const pwm1grp[] = { "pwm1grp" };
-static const char * const pwm2grp[] = { "pwm2grp" };
-static const char * const pwm3grp[] = { "pwm3grp" };
-static const char * const vipgrp[] = { "vipgrp" };
-static const char * const vipromgrp[] = { "vipromgrp" };
-static const char * const warm_rstgrp[] = { "warm_rstgrp" };
-static const char * const cko0grp[] = { "cko0grp" };
-static const char * const cko1grp[] = { "cko1grp" };
-static const char * const sdmmc0grp[] = { "sdmmc0grp" };
-static const char * const sdmmc1grp[] = { "sdmmc1grp" };
-static const char * const sdmmc2grp[] = { "sdmmc2grp" };
-static const char * const sdmmc3grp[] = { "sdmmc3grp" };
-static const char * const sdmmc4grp[] = { "sdmmc4grp" };
-static const char * const sdmmc5grp[] = { "sdmmc5grp" };
-static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
-static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
-static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
-static const char * const
- uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
-static const char * const pulse_countgrp[] = { "pulse_countgrp" };
-static const char * const i2smclkgrp[] = { "i2smclkgrp" };
-static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" };
-static const char * const i2sgrp[] = { "i2sgrp" };
-static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" };
-static const char * const i2s_6chngrp[] = { "i2s_6chngrp" };
-static const char * const ac97grp[] = { "ac97grp" };
-static const char * const nandgrp[] = { "nandgrp" };
-static const char * const spi0grp[] = { "spi0grp" };
-static const char * const spi1grp[] = { "spi1grp" };
-static const char * const gpsgrp[] = { "gpsgrp" };
-
-static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
- SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
- SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
- SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
- SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl",
- uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
- SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
- SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl",
- uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
- usp0_uart_nostreamctrl_grp, usp0_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_utfs",
- usp0_only_utfs_grp, usp0_only_utfs_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_urfs",
- usp0_only_urfs_grp, usp0_only_urfs_padmux),
- SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
- SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
- usp1_uart_nostreamctrl_grp, usp1_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux),
- SIRFSOC_PMX_FUNCTION("usp2_uart_nostreamctrl",
- usp2_uart_nostreamctrl_grp, usp2_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
- SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
- SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
- SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux),
- SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux),
- SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux),
- SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux),
- SIRFSOC_PMX_FUNCTION("viprom", vipromgrp, viprom_padmux),
- SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux),
- SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux),
- SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
- SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus",
- usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus",
- usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
- SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1",
- uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
- SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp,
- i2s_ext_clk_input_padmux),
- SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux),
- SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux),
- SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
- SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
- SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
- SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux),
- SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux),
-};
-
-struct sirfsoc_pinctrl_data prima2_pinctrl_data = {
- (struct pinctrl_pin_desc *)sirfsoc_pads,
- ARRAY_SIZE(sirfsoc_pads),
- (struct sirfsoc_pin_group *)sirfsoc_pin_groups,
- ARRAY_SIZE(sirfsoc_pin_groups),
- (struct sirfsoc_pmx_func *)sirfsoc_pmx_functions,
- ARRAY_SIZE(sirfsoc_pmx_functions),
-};
-
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
deleted file mode 100644
index 63a287d5795f..000000000000
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ /dev/null
@@ -1,894 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pinmux driver for CSR SiRFprimaII
- *
- * Authors:
- * Rongjun Ying <rongjun.ying@csr.com>
- * Yuping Luo <yuping.luo@csr.com>
- * Barry Song <baohua.song@csr.com>
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/of_gpio.h>
-
-#include "pinctrl-sirf.h"
-
-#define DRIVER_NAME "pinmux-sirf"
-
-struct sirfsoc_gpio_bank {
- int id;
- int parent_irq;
- spinlock_t lock;
-};
-
-struct sirfsoc_gpio_chip {
- struct of_mm_gpio_chip chip;
- struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS];
- spinlock_t lock;
-};
-
-static struct sirfsoc_pin_group *sirfsoc_pin_groups;
-static int sirfsoc_pingrp_cnt;
-
-static int sirfsoc_get_groups_count(struct pinctrl_dev *pctldev)
-{
- return sirfsoc_pingrp_cnt;
-}
-
-static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return sirfsoc_pin_groups[selector].name;
-}
-
-static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned selector,
- const unsigned **pins,
- unsigned *num_pins)
-{
- *pins = sirfsoc_pin_groups[selector].pins;
- *num_pins = sirfsoc_pin_groups[selector].num_pins;
- return 0;
-}
-
-static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned offset)
-{
- seq_printf(s, " " DRIVER_NAME);
-}
-
-static int sirfsoc_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
-{
- struct sirfsoc_pmx *spmx = pinctrl_dev_get_drvdata(pctldev);
- struct device_node *np;
- struct property *prop;
- const char *function, *group;
- int ret, index = 0, count = 0;
-
- /* calculate number of maps required */
- for_each_child_of_node(np_config, np) {
- ret = of_property_read_string(np, "sirf,function", &function);
- if (ret < 0) {
- of_node_put(np);
- return ret;
- }
-
- ret = of_property_count_strings(np, "sirf,pins");
- if (ret < 0) {
- of_node_put(np);
- return ret;
- }
-
- count += ret;
- }
-
- if (!count) {
- dev_err(spmx->dev, "No child nodes passed via DT\n");
- return -ENODEV;
- }
-
- *map = kcalloc(count, sizeof(**map), GFP_KERNEL);
- if (!*map)
- return -ENOMEM;
-
- for_each_child_of_node(np_config, np) {
- of_property_read_string(np, "sirf,function", &function);
- of_property_for_each_string(np, "sirf,pins", prop, group) {
- (*map)[index].type = PIN_MAP_TYPE_MUX_GROUP;
- (*map)[index].data.mux.group = group;
- (*map)[index].data.mux.function = function;
- index++;
- }
- }
-
- *num_maps = count;
-
- return 0;
-}
-
-static void sirfsoc_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- kfree(map);
-}
-
-static const struct pinctrl_ops sirfsoc_pctrl_ops = {
- .get_groups_count = sirfsoc_get_groups_count,
- .get_group_name = sirfsoc_get_group_name,
- .get_group_pins = sirfsoc_get_group_pins,
- .pin_dbg_show = sirfsoc_pin_dbg_show,
- .dt_node_to_map = sirfsoc_dt_node_to_map,
- .dt_free_map = sirfsoc_dt_free_map,
-};
-
-static struct sirfsoc_pmx_func *sirfsoc_pmx_functions;
-static int sirfsoc_pmxfunc_cnt;
-
-static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx,
- unsigned selector, bool enable)
-{
- int i;
- const struct sirfsoc_padmux *mux =
- sirfsoc_pmx_functions[selector].padmux;
- const struct sirfsoc_muxmask *mask = mux->muxmask;
-
- for (i = 0; i < mux->muxmask_counts; i++) {
- u32 muxval;
- muxval = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(mask[i].group));
- if (enable)
- muxval = muxval & ~mask[i].mask;
- else
- muxval = muxval | mask[i].mask;
- writel(muxval, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(mask[i].group));
- }
-
- if (mux->funcmask && enable) {
- u32 func_en_val;
-
- func_en_val =
- readl(spmx->rsc_virtbase + mux->ctrlreg);
- func_en_val =
- (func_en_val & ~mux->funcmask) | (mux->funcval);
- writel(func_en_val, spmx->rsc_virtbase + mux->ctrlreg);
- }
-}
-
-static int sirfsoc_pinmux_set_mux(struct pinctrl_dev *pmxdev,
- unsigned selector,
- unsigned group)
-{
- struct sirfsoc_pmx *spmx;
-
- spmx = pinctrl_dev_get_drvdata(pmxdev);
- sirfsoc_pinmux_endisable(spmx, selector, true);
-
- return 0;
-}
-
-static int sirfsoc_pinmux_get_funcs_count(struct pinctrl_dev *pmxdev)
-{
- return sirfsoc_pmxfunc_cnt;
-}
-
-static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return sirfsoc_pmx_functions[selector].name;
-}
-
-static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev,
- unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- *groups = sirfsoc_pmx_functions[selector].groups;
- *num_groups = sirfsoc_pmx_functions[selector].num_groups;
- return 0;
-}
-
-static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
- struct pinctrl_gpio_range *range, unsigned offset)
-{
- struct sirfsoc_pmx *spmx;
-
- int group = range->id;
-
- u32 muxval;
-
- spmx = pinctrl_dev_get_drvdata(pmxdev);
-
- muxval = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(group));
- muxval = muxval | (1 << (offset - range->pin_base));
- writel(muxval, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(group));
-
- return 0;
-}
-
-static const struct pinmux_ops sirfsoc_pinmux_ops = {
- .set_mux = sirfsoc_pinmux_set_mux,
- .get_functions_count = sirfsoc_pinmux_get_funcs_count,
- .get_function_name = sirfsoc_pinmux_get_func_name,
- .get_function_groups = sirfsoc_pinmux_get_groups,
- .gpio_request_enable = sirfsoc_pinmux_request_gpio,
-};
-
-static struct pinctrl_desc sirfsoc_pinmux_desc = {
- .name = DRIVER_NAME,
- .pctlops = &sirfsoc_pctrl_ops,
- .pmxops = &sirfsoc_pinmux_ops,
- .owner = THIS_MODULE,
-};
-
-static void __iomem *sirfsoc_rsc_of_iomap(void)
-{
- const struct of_device_id rsc_ids[] = {
- { .compatible = "sirf,prima2-rsc" },
- {}
- };
- struct device_node *np;
-
- np = of_find_matching_node(NULL, rsc_ids);
- if (!np)
- panic("unable to find compatible rsc node in dtb\n");
-
- return of_iomap(np, 0);
-}
-
-static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags)
-{
- if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
- return -EINVAL;
-
- if (flags)
- *flags = gpiospec->args[1];
-
- return gpiospec->args[0];
-}
-
-static const struct of_device_id pinmux_ids[] = {
- { .compatible = "sirf,prima2-pinctrl", .data = &prima2_pinctrl_data, },
- { .compatible = "sirf,atlas6-pinctrl", .data = &atlas6_pinctrl_data, },
- {}
-};
-
-static int sirfsoc_pinmux_probe(struct platform_device *pdev)
-{
- int ret;
- struct sirfsoc_pmx *spmx;
- struct device_node *np = pdev->dev.of_node;
- const struct sirfsoc_pinctrl_data *pdata;
-
- /* Create state holders etc for this driver */
- spmx = devm_kzalloc(&pdev->dev, sizeof(*spmx), GFP_KERNEL);
- if (!spmx)
- return -ENOMEM;
-
- spmx->dev = &pdev->dev;
-
- platform_set_drvdata(pdev, spmx);
-
- spmx->gpio_virtbase = of_iomap(np, 0);
- if (!spmx->gpio_virtbase) {
- dev_err(&pdev->dev, "can't map gpio registers\n");
- return -ENOMEM;
- }
-
- spmx->rsc_virtbase = sirfsoc_rsc_of_iomap();
- if (!spmx->rsc_virtbase) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "can't map rsc registers\n");
- goto out_no_rsc_remap;
- }
-
- pdata = of_match_node(pinmux_ids, np)->data;
- sirfsoc_pin_groups = pdata->grps;
- sirfsoc_pingrp_cnt = pdata->grps_cnt;
- sirfsoc_pmx_functions = pdata->funcs;
- sirfsoc_pmxfunc_cnt = pdata->funcs_cnt;
- sirfsoc_pinmux_desc.pins = pdata->pads;
- sirfsoc_pinmux_desc.npins = pdata->pads_cnt;
-
-
- /* Now register the pin controller and all pins it handles */
- spmx->pmx = pinctrl_register(&sirfsoc_pinmux_desc, &pdev->dev, spmx);
- if (IS_ERR(spmx->pmx)) {
- dev_err(&pdev->dev, "could not register SIRFSOC pinmux driver\n");
- ret = PTR_ERR(spmx->pmx);
- goto out_no_pmx;
- }
-
- dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n");
-
- return 0;
-
-out_no_pmx:
- iounmap(spmx->rsc_virtbase);
-out_no_rsc_remap:
- iounmap(spmx->gpio_virtbase);
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_pinmux_suspend_noirq(struct device *dev)
-{
- int i, j;
- struct sirfsoc_pmx *spmx = dev_get_drvdata(dev);
-
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- for (j = 0; j < SIRFSOC_GPIO_BANK_SIZE; j++) {
- spmx->gpio_regs[i][j] = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_CTRL(i, j));
- }
- spmx->ints_regs[i] = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_INT_STATUS(i));
- spmx->paden_regs[i] = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(i));
- }
- spmx->dspen_regs = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_DSP_EN0);
-
- for (i = 0; i < 3; i++)
- spmx->rsc_regs[i] = readl(spmx->rsc_virtbase + 4 * i);
-
- return 0;
-}
-
-static int sirfsoc_pinmux_resume_noirq(struct device *dev)
-{
- int i, j;
- struct sirfsoc_pmx *spmx = dev_get_drvdata(dev);
-
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- for (j = 0; j < SIRFSOC_GPIO_BANK_SIZE; j++) {
- writel(spmx->gpio_regs[i][j], spmx->gpio_virtbase +
- SIRFSOC_GPIO_CTRL(i, j));
- }
- writel(spmx->ints_regs[i], spmx->gpio_virtbase +
- SIRFSOC_GPIO_INT_STATUS(i));
- writel(spmx->paden_regs[i], spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(i));
- }
- writel(spmx->dspen_regs, spmx->gpio_virtbase + SIRFSOC_GPIO_DSP_EN0);
-
- for (i = 0; i < 3; i++)
- writel(spmx->rsc_regs[i], spmx->rsc_virtbase + 4 * i);
-
- return 0;
-}
-
-static const struct dev_pm_ops sirfsoc_pinmux_pm_ops = {
- .suspend_noirq = sirfsoc_pinmux_suspend_noirq,
- .resume_noirq = sirfsoc_pinmux_resume_noirq,
- .freeze_noirq = sirfsoc_pinmux_suspend_noirq,
- .restore_noirq = sirfsoc_pinmux_resume_noirq,
-};
-#endif
-
-static struct platform_driver sirfsoc_pinmux_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = pinmux_ids,
-#ifdef CONFIG_PM_SLEEP
- .pm = &sirfsoc_pinmux_pm_ops,
-#endif
- },
- .probe = sirfsoc_pinmux_probe,
-};
-
-static int __init sirfsoc_pinmux_init(void)
-{
- return platform_driver_register(&sirfsoc_pinmux_driver);
-}
-arch_initcall(sirfsoc_pinmux_init);
-
-static inline struct sirfsoc_gpio_bank *
-sirfsoc_gpio_to_bank(struct sirfsoc_gpio_chip *sgpio, unsigned int offset)
-{
- return &sgpio->sgpio_bank[offset / SIRFSOC_GPIO_BANK_SIZE];
-}
-
-static inline int sirfsoc_gpio_to_bankoff(unsigned int offset)
-{
- return offset % SIRFSOC_GPIO_BANK_SIZE;
-}
-
-static void sirfsoc_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(gc);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, d->hwirq);
- int idx = sirfsoc_gpio_to_bankoff(d->hwirq);
- u32 val, offset;
- unsigned long flags;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&sgpio->lock, flags);
-
- val = readl(sgpio->chip.regs + offset);
-
- writel(val, sgpio->chip.regs + offset);
-
- spin_unlock_irqrestore(&sgpio->lock, flags);
-}
-
-static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_chip *sgpio,
- struct sirfsoc_gpio_bank *bank,
- int idx)
-{
- u32 val, offset;
- unsigned long flags;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&sgpio->lock, flags);
-
- val = readl(sgpio->chip.regs + offset);
- val &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
- val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
- writel(val, sgpio->chip.regs + offset);
-
- spin_unlock_irqrestore(&sgpio->lock, flags);
-}
-
-static void sirfsoc_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(gc);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, d->hwirq);
-
- __sirfsoc_gpio_irq_mask(sgpio, bank, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
-}
-
-static void sirfsoc_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(gc);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, d->hwirq);
- int idx = sirfsoc_gpio_to_bankoff(d->hwirq);
- u32 val, offset;
- unsigned long flags;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&sgpio->lock, flags);
-
- val = readl(sgpio->chip.regs + offset);
- val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
- val |= SIRFSOC_GPIO_CTL_INTR_EN_MASK;
- writel(val, sgpio->chip.regs + offset);
-
- spin_unlock_irqrestore(&sgpio->lock, flags);
-}
-
-static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(gc);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, d->hwirq);
- int idx = sirfsoc_gpio_to_bankoff(d->hwirq);
- u32 val, offset;
- unsigned long flags;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&sgpio->lock, flags);
-
- val = readl(sgpio->chip.regs + offset);
- val &= ~(SIRFSOC_GPIO_CTL_INTR_STS_MASK | SIRFSOC_GPIO_CTL_OUT_EN_MASK);
-
- switch (type) {
- case IRQ_TYPE_NONE:
- break;
- case IRQ_TYPE_EDGE_RISING:
- val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
- val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
- val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
- SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
- val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
- val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
- break;
- }
-
- writel(val, sgpio->chip.regs + offset);
-
- spin_unlock_irqrestore(&sgpio->lock, flags);
-
- return 0;
-}
-
-static struct irq_chip sirfsoc_irq_chip = {
- .name = "sirf-gpio-irq",
- .irq_ack = sirfsoc_gpio_irq_ack,
- .irq_mask = sirfsoc_gpio_irq_mask,
- .irq_unmask = sirfsoc_gpio_irq_unmask,
- .irq_set_type = sirfsoc_gpio_irq_type,
-};
-
-static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
-{
- unsigned int irq = irq_desc_get_irq(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(gc);
- struct sirfsoc_gpio_bank *bank;
- u32 status, ctrl;
- int idx = 0;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int i;
-
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- bank = &sgpio->sgpio_bank[i];
- if (bank->parent_irq == irq)
- break;
- }
- BUG_ON(i == SIRFSOC_GPIO_NO_OF_BANKS);
-
- chained_irq_enter(chip, desc);
-
- status = readl(sgpio->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id));
- if (!status) {
- printk(KERN_WARNING
- "%s: gpio id %d status %#x no interrupt is flagged\n",
- __func__, bank->id, status);
- handle_bad_irq(desc);
- return;
- }
-
- while (status) {
- ctrl = readl(sgpio->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, idx));
-
- /*
- * Here we must check whether the corresponding GPIO's interrupt
- * has been enabled, otherwise just skip it
- */
- if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
- pr_debug("%s: gpio id %d idx %d happens\n",
- __func__, bank->id, idx);
- generic_handle_irq(irq_find_mapping(gc->irq.domain, idx +
- bank->id * SIRFSOC_GPIO_BANK_SIZE));
- }
-
- idx++;
- status = status >> 1;
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static inline void sirfsoc_gpio_set_input(struct sirfsoc_gpio_chip *sgpio,
- unsigned ctrl_offset)
-{
- u32 val;
-
- val = readl(sgpio->chip.regs + ctrl_offset);
- val &= ~SIRFSOC_GPIO_CTL_OUT_EN_MASK;
- writel(val, sgpio->chip.regs + ctrl_offset);
-}
-
-static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
- unsigned long flags;
-
- if (pinctrl_gpio_request(chip->base + offset))
- return -ENODEV;
-
- spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * default status:
- * set direction as input and mask irq
- */
- sirfsoc_gpio_set_input(sgpio, SIRFSOC_GPIO_CTRL(bank->id, offset));
- __sirfsoc_gpio_irq_mask(sgpio, bank, offset);
-
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
-static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
- unsigned long flags;
-
- spin_lock_irqsave(&bank->lock, flags);
-
- __sirfsoc_gpio_irq_mask(sgpio, bank, offset);
- sirfsoc_gpio_set_input(sgpio, SIRFSOC_GPIO_CTRL(bank->id, offset));
-
- spin_unlock_irqrestore(&bank->lock, flags);
-
- pinctrl_gpio_free(chip->base + offset);
-}
-
-static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, gpio);
- int idx = sirfsoc_gpio_to_bankoff(gpio);
- unsigned long flags;
- unsigned offset;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&bank->lock, flags);
-
- sirfsoc_gpio_set_input(sgpio, offset);
-
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
-static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_chip *sgpio,
- struct sirfsoc_gpio_bank *bank,
- unsigned offset,
- int value)
-{
- u32 out_ctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&bank->lock, flags);
-
- out_ctrl = readl(sgpio->chip.regs + offset);
- if (value)
- out_ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
- else
- out_ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
-
- out_ctrl &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
- out_ctrl |= SIRFSOC_GPIO_CTL_OUT_EN_MASK;
- writel(out_ctrl, sgpio->chip.regs + offset);
-
- spin_unlock_irqrestore(&bank->lock, flags);
-}
-
-static int sirfsoc_gpio_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, gpio);
- int idx = sirfsoc_gpio_to_bankoff(gpio);
- u32 offset;
- unsigned long flags;
-
- offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
-
- spin_lock_irqsave(&sgpio->lock, flags);
-
- sirfsoc_gpio_set_output(sgpio, bank, offset, value);
-
- spin_unlock_irqrestore(&sgpio->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
- u32 val;
- unsigned long flags;
-
- spin_lock_irqsave(&bank->lock, flags);
-
- val = readl(sgpio->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
-
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return !!(val & SIRFSOC_GPIO_CTL_DATAIN_MASK);
-}
-
-static void sirfsoc_gpio_set_value(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct sirfsoc_gpio_chip *sgpio = gpiochip_get_data(chip);
- struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
- u32 ctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&bank->lock, flags);
-
- ctrl = readl(sgpio->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
- if (value)
- ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
- else
- ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
- writel(ctrl, sgpio->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
-
- spin_unlock_irqrestore(&bank->lock, flags);
-}
-
-static void sirfsoc_gpio_set_pullup(struct sirfsoc_gpio_chip *sgpio,
- const u32 *pullups)
-{
- int i, n;
- const unsigned long *p = (const unsigned long *)pullups;
-
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- for_each_set_bit(n, p + i, BITS_PER_LONG) {
- u32 offset = SIRFSOC_GPIO_CTRL(i, n);
- u32 val = readl(sgpio->chip.regs + offset);
- val |= SIRFSOC_GPIO_CTL_PULL_MASK;
- val |= SIRFSOC_GPIO_CTL_PULL_HIGH;
- writel(val, sgpio->chip.regs + offset);
- }
- }
-}
-
-static void sirfsoc_gpio_set_pulldown(struct sirfsoc_gpio_chip *sgpio,
- const u32 *pulldowns)
-{
- int i, n;
- const unsigned long *p = (const unsigned long *)pulldowns;
-
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- for_each_set_bit(n, p + i, BITS_PER_LONG) {
- u32 offset = SIRFSOC_GPIO_CTRL(i, n);
- u32 val = readl(sgpio->chip.regs + offset);
- val |= SIRFSOC_GPIO_CTL_PULL_MASK;
- val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH;
- writel(val, sgpio->chip.regs + offset);
- }
- }
-}
-
-static int sirfsoc_gpio_probe(struct device_node *np)
-{
- int i, err = 0;
- struct sirfsoc_gpio_chip *sgpio;
- struct sirfsoc_gpio_bank *bank;
- void __iomem *regs;
- struct platform_device *pdev;
- struct gpio_irq_chip *girq;
-
- u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
-
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return -ENODEV;
-
- sgpio = devm_kzalloc(&pdev->dev, sizeof(*sgpio), GFP_KERNEL);
- if (!sgpio) {
- err = -ENOMEM;
- goto out_put_device;
- }
- spin_lock_init(&sgpio->lock);
-
- regs = of_iomap(np, 0);
- if (!regs) {
- err = -ENOMEM;
- goto out_put_device;
- }
-
- sgpio->chip.gc.request = sirfsoc_gpio_request;
- sgpio->chip.gc.free = sirfsoc_gpio_free;
- sgpio->chip.gc.direction_input = sirfsoc_gpio_direction_input;
- sgpio->chip.gc.get = sirfsoc_gpio_get_value;
- sgpio->chip.gc.direction_output = sirfsoc_gpio_direction_output;
- sgpio->chip.gc.set = sirfsoc_gpio_set_value;
- sgpio->chip.gc.base = 0;
- sgpio->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE * SIRFSOC_GPIO_NO_OF_BANKS;
- sgpio->chip.gc.label = kasprintf(GFP_KERNEL, "%pOF", np);
- sgpio->chip.gc.of_node = np;
- sgpio->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
- sgpio->chip.gc.of_gpio_n_cells = 2;
- sgpio->chip.gc.parent = &pdev->dev;
- sgpio->chip.regs = regs;
-
- girq = &sgpio->chip.gc.irq;
- girq->chip = &sirfsoc_irq_chip;
- girq->parent_handler = sirfsoc_gpio_handle_irq;
- girq->num_parents = SIRFSOC_GPIO_NO_OF_BANKS;
- girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents) {
- err = -ENOMEM;
- goto out_put_device;
- }
- for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
- bank = &sgpio->sgpio_bank[i];
- spin_lock_init(&bank->lock);
- bank->parent_irq = platform_get_irq(pdev, i);
- if (bank->parent_irq < 0) {
- err = bank->parent_irq;
- goto out;
- }
- girq->parents[i] = bank->parent_irq;
- }
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
-
- err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
- if (err) {
- dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
- np, err);
- goto out;
- }
-
- err = gpiochip_add_pin_range(&sgpio->chip.gc, dev_name(&pdev->dev),
- 0, 0, SIRFSOC_GPIO_BANK_SIZE * SIRFSOC_GPIO_NO_OF_BANKS);
- if (err) {
- dev_err(&pdev->dev,
- "could not add gpiochip pin range\n");
- goto out_no_range;
- }
-
- if (!of_property_read_u32_array(np, "sirf,pullups", pullups,
- SIRFSOC_GPIO_NO_OF_BANKS))
- sirfsoc_gpio_set_pullup(sgpio, pullups);
-
- if (!of_property_read_u32_array(np, "sirf,pulldowns", pulldowns,
- SIRFSOC_GPIO_NO_OF_BANKS))
- sirfsoc_gpio_set_pulldown(sgpio, pulldowns);
-
- return 0;
-
-out_no_range:
- gpiochip_remove(&sgpio->chip.gc);
-out:
- iounmap(regs);
-out_put_device:
- put_device(&pdev->dev);
- return err;
-}
-
-static int __init sirfsoc_gpio_init(void)
-{
-
- struct device_node *np;
-
- np = of_find_matching_node(NULL, pinmux_ids);
-
- if (!np)
- return -ENODEV;
-
- return sirfsoc_gpio_probe(np);
-}
-subsys_initcall(sirfsoc_gpio_init);
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.h b/drivers/pinctrl/sirf/pinctrl-sirf.h
deleted file mode 100644
index d7125b8773cc..000000000000
--- a/drivers/pinctrl/sirf/pinctrl-sirf.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * pinmux driver shared headfile for CSR SiRFsoc
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#ifndef __PINMUX_SIRF_H__
-#define __PINMUX_SIRF_H__
-
-#define SIRFSOC_NUM_PADS 622
-#define SIRFSOC_RSC_USB_UART_SHARE 0
-#define SIRFSOC_RSC_PIN_MUX 0x4
-
-#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
-#define SIRFSOC_GPIO_PAD_EN_CLR(g) ((g)*0x100 + 0x90)
-#define SIRFSOC_GPIO_CTRL(g, i) ((g)*0x100 + (i)*4)
-#define SIRFSOC_GPIO_DSP_EN0 (0x80)
-#define SIRFSOC_GPIO_INT_STATUS(g) ((g)*0x100 + 0x8C)
-
-#define SIRFSOC_GPIO_CTL_INTR_LOW_MASK 0x1
-#define SIRFSOC_GPIO_CTL_INTR_HIGH_MASK 0x2
-#define SIRFSOC_GPIO_CTL_INTR_TYPE_MASK 0x4
-#define SIRFSOC_GPIO_CTL_INTR_EN_MASK 0x8
-#define SIRFSOC_GPIO_CTL_INTR_STS_MASK 0x10
-#define SIRFSOC_GPIO_CTL_OUT_EN_MASK 0x20
-#define SIRFSOC_GPIO_CTL_DATAOUT_MASK 0x40
-#define SIRFSOC_GPIO_CTL_DATAIN_MASK 0x80
-#define SIRFSOC_GPIO_CTL_PULL_MASK 0x100
-#define SIRFSOC_GPIO_CTL_PULL_HIGH 0x200
-#define SIRFSOC_GPIO_CTL_DSP_INT 0x400
-
-#define SIRFSOC_GPIO_NO_OF_BANKS 5
-#define SIRFSOC_GPIO_BANK_SIZE 32
-#define SIRFSOC_GPIO_NUM(bank, index) (((bank)*(32)) + (index))
-
-/**
- * @dev: a pointer back to containing device
- * @virtbase: the offset to the controller in virtual memory
- */
-struct sirfsoc_pmx {
- struct device *dev;
- struct pinctrl_dev *pmx;
- void __iomem *gpio_virtbase;
- void __iomem *rsc_virtbase;
- u32 gpio_regs[SIRFSOC_GPIO_NO_OF_BANKS][SIRFSOC_GPIO_BANK_SIZE];
- u32 ints_regs[SIRFSOC_GPIO_NO_OF_BANKS];
- u32 paden_regs[SIRFSOC_GPIO_NO_OF_BANKS];
- u32 dspen_regs;
- u32 rsc_regs[3];
-};
-
-/* SIRFSOC_GPIO_PAD_EN set */
-struct sirfsoc_muxmask {
- unsigned long group;
- unsigned long mask;
-};
-
-struct sirfsoc_padmux {
- unsigned long muxmask_counts;
- const struct sirfsoc_muxmask *muxmask;
- /* RSC_PIN_MUX set */
- unsigned long ctrlreg;
- unsigned long funcmask;
- unsigned long funcval;
-};
-
- /**
- * struct sirfsoc_pin_group - describes a SiRFprimaII pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
- */
-struct sirfsoc_pin_group {
- const char *name;
- const unsigned int *pins;
- const unsigned num_pins;
-};
-
-#define SIRFSOC_PIN_GROUP(n, p) \
- { \
- .name = n, \
- .pins = p, \
- .num_pins = ARRAY_SIZE(p), \
- }
-
-struct sirfsoc_pmx_func {
- const char *name;
- const char * const *groups;
- const unsigned num_groups;
- const struct sirfsoc_padmux *padmux;
-};
-
-#define SIRFSOC_PMX_FUNCTION(n, g, m) \
- { \
- .name = n, \
- .groups = g, \
- .num_groups = ARRAY_SIZE(g), \
- .padmux = &m, \
- }
-
-struct sirfsoc_pinctrl_data {
- struct pinctrl_pin_desc *pads;
- int pads_cnt;
- struct sirfsoc_pin_group *grps;
- int grps_cnt;
- struct sirfsoc_pmx_func *funcs;
- int funcs_cnt;
-};
-
-extern struct sirfsoc_pinctrl_data prima2_pinctrl_data;
-extern struct sirfsoc_pinctrl_data atlas6_pinctrl_data;
-
-#endif
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 08dc1931b358..dca7a505d413 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -687,7 +687,7 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = INPUT_SCHMITT_SHIFT;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (is_sleep_config == true) {
+ if (is_sleep_config) {
val |= SLEEP_PULL_UP;
mask = SLEEP_PULL_UP_MASK;
shift = SLEEP_PULL_UP_SHIFT;
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 593293584ecc..33751a6a0757 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -119,4 +119,14 @@ config PINCTRL_SUN50I_H6_R
default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
+config PINCTRL_SUN50I_H616
+ bool "Support for the Allwinner H616 PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN50I_H616_R
+ bool "Support for the Allwinner H616 R-PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 8b7ff0dc3bdf..d3440c42b9d6 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -23,5 +23,7 @@ obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
+obj-$(CONFIG_PINCTRL_SUN50I_H616) += pinctrl-sun50i-h616.o
+obj-$(CONFIG_PINCTRL_SUN50I_H616_R) += pinctrl-sun50i-h616-r.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index 4557e18d5989..c7d90c44e87a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -24,11 +24,13 @@ static const struct sunxi_desc_pin sun50i_h6_r_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SCK */
SUNXI_FUNCTION(0x3, "s_i2c"), /* SCK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SDA */
SUNXI_FUNCTION(0x3, "s_i2c"), /* SDA */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
new file mode 100644
index 000000000000..8e4f10ab96ce
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner H616 R_PIO pin controller driver
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * Based on former work, which is:
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/reset.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun50i_h616_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SCK */
+ SUNXI_FUNCTION(0x3, "s_i2c")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SDA */
+ SUNXI_FUNCTION(0x3, "s_i2c")), /* SDA */
+};
+
+static const struct sunxi_pinctrl_desc sun50i_h616_r_pinctrl_data = {
+ .pins = sun50i_h616_r_pins,
+ .npins = ARRAY_SIZE(sun50i_h616_r_pins),
+ .pin_base = PL_BASE,
+};
+
+static int sun50i_h616_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun50i_h616_r_pinctrl_data);
+}
+
+static const struct of_device_id sun50i_h616_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-h616-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun50i_h616_r_pinctrl_driver = {
+ .probe = sun50i_h616_r_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-h616-r-pinctrl",
+ .of_match_table = sun50i_h616_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun50i_h616_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
new file mode 100644
index 000000000000..ce1917e230f4
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner H616 SoC pinctrl driver.
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * based on the H6 pinctrl driver
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin h616_pins[] = {
+ /* Internal connection to the AC200 part */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ERXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ERXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ECRS_DV */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ERXERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ETXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ETXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ETXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_FUNCTION(0x2, "emac1")), /* ETXEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_FUNCTION(0x2, "emac1")), /* EMDC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_FUNCTION(0x2, "emac1")), /* EMDIO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_FUNCTION(0x2, "i2c3")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_FUNCTION(0x2, "i2c3")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_FUNCTION(0x2, "pwm5")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* DS */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PC_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* RST */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PC_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x4, "spi0"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PC_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PC_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE0 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PC_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PC_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PC_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CS1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PC_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)), /* PC_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)), /* PC_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)), /* PC_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)), /* PC_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)), /* PC_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)), /* PC_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PC_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* WP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PC_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* HOLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PC_EINT16 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 0)), /* PF_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 1)), /* PF_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 2)), /* PF_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 3)), /* PF_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 4)), /* PF_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 5)), /* PF_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 6)), /* PF_EINT6 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 5)), /* PG_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x4, "jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 6)), /* PG_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x4, "jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 7)), /* PG_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x3, "clock"), /* PLL_LOCK_DEBUG */
+ SUNXI_FUNCTION(0x4, "jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "clock"), /* X32KFOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 10)), /* PG_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 11)), /* PG_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s2"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 12)), /* PG_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s2"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 13)), /* PG_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s2"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 14)), /* PG_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2c4"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 15)), /* PG_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2c4"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 16)), /* PG_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 17)), /* PG_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 18)), /* PG_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 19)), /* PG_EINT19 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x4, "pwm3"),
+ SUNXI_FUNCTION(0x5, "i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 0)), /* PH_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x4, "pwm4"),
+ SUNXI_FUNCTION(0x5, "i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 1)), /* PH_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x3, "spdif"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "pwm2"),
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 2)), /* PH_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "pwm1"),
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 3)), /* PH_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 4)), /* PH_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2s3"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 5)), /* PH_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2s3"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "i2c4"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 6)), /* PH_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x3, "i2s3"), /* SYNC */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "i2c4"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 7)), /* PH_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "ir_rx"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 10)), /* PH_EINT10 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXD3 */
+ SUNXI_FUNCTION(0x3, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x4, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x5, "hdmi"), /* HSCL */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 0)), /* PI_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXD2 */
+ SUNXI_FUNCTION(0x3, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x4, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x5, "hdmi"), /* HSDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 1)), /* PI_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXD1 */
+ SUNXI_FUNCTION(0x3, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x4, "i2s0"), /* SYNC */
+ SUNXI_FUNCTION(0x5, "hdmi"), /* HCEC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 2)), /* PI_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXD0 */
+ SUNXI_FUNCTION(0x3, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x4, "i2s0_dout0"), /* DO0 */
+ SUNXI_FUNCTION(0x5, "i2s0_din1"), /* DI1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 3)), /* PI_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXCK */
+ SUNXI_FUNCTION(0x3, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x4, "i2s0_din0"), /* DI0 */
+ SUNXI_FUNCTION(0x5, "i2s0_dout1"), /* DO1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 4)), /* PI_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ERXCTL */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* CLK */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 5)), /* PI_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ENULL */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* ERR */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 6)), /* PI_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXD3 */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* SYNC */
+ SUNXI_FUNCTION(0x5, "i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 7)), /* PI_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXD2 */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* DVLD */
+ SUNXI_FUNCTION(0x5, "i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 8)), /* PI_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXD1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D0 */
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 9)), /* PI_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXD0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D1 */
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 10)), /* PI_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXCK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 11)), /* PI_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ETXCTL */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D3 */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 12)), /* PI_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* ECLKIN */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D4 */
+ SUNXI_FUNCTION(0x5, "pwm3"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 13)), /* PI_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* MDC */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D5 */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 14)), /* PI_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* MDIO */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D6 */
+ SUNXI_FUNCTION(0x5, "clock"), /* CLK_FANOUT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 15)), /* PI_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac0"), /* EPHY_CLK */
+ SUNXI_FUNCTION(0x3, "uart4"), /* CTS */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D7 */
+ SUNXI_FUNCTION(0x5, "clock"), /* CLK_FANOUT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 7, 16)), /* PI_EINT16 */
+};
+static const unsigned int h616_irq_bank_map[] = { 0, 2, 3, 4, 5, 6, 7, 8 };
+
+static const struct sunxi_pinctrl_desc h616_pinctrl_data = {
+ .pins = h616_pins,
+ .npins = ARRAY_SIZE(h616_pins),
+ .irq_banks = ARRAY_SIZE(h616_irq_bank_map),
+ .irq_bank_map = h616_irq_bank_map,
+ .irq_read_needs_mux = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+};
+
+static int h616_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev, &h616_pinctrl_data);
+}
+
+static const struct of_device_id h616_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-h616-pinctrl", },
+ {}
+};
+
+static struct platform_driver h616_pinctrl_driver = {
+ .probe = h616_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-h616-pinctrl",
+ .of_match_table = h616_pinctrl_match,
+ },
+};
+builtin_platform_driver(h616_pinctrl_driver);
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index cfb924228d87..60a67139ff0a 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -704,10 +704,9 @@ static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
u32 reg = 0;
cfg = &group->cfg[i];
- regmap_read(iod->regmap, cfg->offset, &reg),
- seq_printf(s, "\n\t0x%08x = 0x%08x (%3d, %3d)",
- cfg->offset, reg, cfg->a_delay,
- cfg->g_delay);
+ regmap_read(iod->regmap, cfg->offset, &reg);
+ seq_printf(s, "\n\t0x%08x = 0x%08x (%3d, %3d)",
+ cfg->offset, reg, cfg->a_delay, cfg->g_delay);
}
}
#endif
diff --git a/drivers/pinctrl/visconti/pinctrl-common.c b/drivers/pinctrl/visconti/pinctrl-common.c
index 0cb10b7b4430..21c7e0d18fea 100644
--- a/drivers/pinctrl/visconti/pinctrl-common.c
+++ b/drivers/pinctrl/visconti/pinctrl-common.c
@@ -245,11 +245,34 @@ static int visconti_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int visconti_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct visconti_mux *gpio_mux = &priv->devdata->gpio_mux[pin];
+ unsigned long flags;
+ unsigned int val;
+
+ dev_dbg(priv->dev, "%s: pin = %d\n", __func__, pin);
+
+ /* update mux */
+ spin_lock_irqsave(&priv->lock, flags);
+ val = readl(priv->base + gpio_mux->offset);
+ val &= ~gpio_mux->mask;
+ val |= gpio_mux->val;
+ writel(val, priv->base + gpio_mux->offset);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
static const struct pinmux_ops visconti_pinmux_ops = {
.get_functions_count = visconti_get_functions_count,
.get_function_name = visconti_get_function_name,
.get_function_groups = visconti_get_function_groups,
.set_mux = visconti_set_mux,
+ .gpio_request_enable = visconti_gpio_request_enable,
.strict = true,
};
diff --git a/drivers/pinctrl/zte/Kconfig b/drivers/pinctrl/zte/Kconfig
deleted file mode 100644
index 4fdc70511034..000000000000
--- a/drivers/pinctrl/zte/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config PINCTRL_ZX
- bool
- select PINMUX
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
-
-config PINCTRL_ZX296718
- bool "ZTE ZX296718 pinctrl driver"
- depends on OF && ARCH_ZX
- select PINCTRL_ZX
- help
- Say Y here to enable the ZX296718 pinctrl driver
diff --git a/drivers/pinctrl/zte/Makefile b/drivers/pinctrl/zte/Makefile
deleted file mode 100644
index 2084c7810f96..000000000000
--- a/drivers/pinctrl/zte/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PINCTRL_ZX) += pinctrl-zx.o
-obj-$(CONFIG_PINCTRL_ZX296718) += pinctrl-zx296718.o
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
deleted file mode 100644
index 80d00ab8c110..000000000000
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../core.h"
-#include "../pinctrl-utils.h"
-#include "../pinmux.h"
-#include "pinctrl-zx.h"
-
-#define ZX_PULL_DOWN BIT(0)
-#define ZX_PULL_UP BIT(1)
-#define ZX_INPUT_ENABLE BIT(3)
-#define ZX_DS_SHIFT 4
-#define ZX_DS_MASK (0x7 << ZX_DS_SHIFT)
-#define ZX_DS_VALUE(x) (((x) << ZX_DS_SHIFT) & ZX_DS_MASK)
-#define ZX_SLEW BIT(8)
-
-struct zx_pinctrl {
- struct pinctrl_dev *pctldev;
- struct device *dev;
- void __iomem *base;
- void __iomem *aux_base;
- spinlock_t lock;
- struct zx_pinctrl_soc_info *info;
-};
-
-static int zx_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, u32 *num_maps)
-{
- return pinconf_generic_dt_node_to_map(pctldev, np_config, map,
- num_maps, PIN_MAP_TYPE_INVALID);
-}
-
-static const struct pinctrl_ops zx_pinctrl_ops = {
- .dt_node_to_map = zx_dt_node_to_map,
- .dt_free_map = pinctrl_utils_free_map,
- .get_groups_count = pinctrl_generic_get_group_count,
- .get_group_name = pinctrl_generic_get_group_name,
- .get_group_pins = pinctrl_generic_get_group_pins,
-};
-
-#define NONAON_MVAL 2
-
-static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
- unsigned int group_selector)
-{
- struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
- struct zx_pinctrl_soc_info *info = zpctl->info;
- const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
- struct zx_pin_data *data = pindesc->drv_data;
- struct zx_mux_desc *mux;
- u32 mask, offset, bitpos;
- struct function_desc *func;
- unsigned long flags;
- u32 val, mval;
-
- /* Skip reserved pin */
- if (!data)
- return -EINVAL;
-
- mux = data->muxes;
- mask = (1 << data->width) - 1;
- offset = data->offset;
- bitpos = data->bitpos;
-
- func = pinmux_generic_get_function(pctldev, func_selector);
- if (!func)
- return -EINVAL;
-
- while (mux->name) {
- if (strcmp(mux->name, func->name) == 0)
- break;
- mux++;
- }
-
- /* Found mux value to be written */
- mval = mux->muxval;
-
- spin_lock_irqsave(&zpctl->lock, flags);
-
- if (data->aon_pin) {
- /*
- * It's an AON pin, whose mux register offset and bit position
- * can be calculated from pin number. Each register covers 16
- * pins, and each pin occupies 2 bits.
- */
- u16 aoffset = pindesc->number / 16 * 4;
- u16 abitpos = (pindesc->number % 16) * 2;
-
- if (mval & AON_MUX_FLAG) {
- /*
- * This is a mux value that needs to be written into
- * AON pinmux register. Write it and then we're done.
- */
- val = readl(zpctl->aux_base + aoffset);
- val &= ~(0x3 << abitpos);
- val |= (mval & 0x3) << abitpos;
- writel(val, zpctl->aux_base + aoffset);
- } else {
- /*
- * It's a mux value that needs to be written into TOP
- * pinmux register.
- */
- val = readl(zpctl->base + offset);
- val &= ~(mask << bitpos);
- val |= (mval & mask) << bitpos;
- writel(val, zpctl->base + offset);
-
- /*
- * In this case, the AON pinmux register needs to be
- * set up to select non-AON function.
- */
- val = readl(zpctl->aux_base + aoffset);
- val &= ~(0x3 << abitpos);
- val |= NONAON_MVAL << abitpos;
- writel(val, zpctl->aux_base + aoffset);
- }
-
- } else {
- /*
- * This is a TOP pin, and we only need to set up TOP pinmux
- * register and then we're done with it.
- */
- val = readl(zpctl->base + offset);
- val &= ~(mask << bitpos);
- val |= (mval & mask) << bitpos;
- writel(val, zpctl->base + offset);
- }
-
- spin_unlock_irqrestore(&zpctl->lock, flags);
-
- return 0;
-}
-
-static const struct pinmux_ops zx_pinmux_ops = {
- .get_functions_count = pinmux_generic_get_function_count,
- .get_function_name = pinmux_generic_get_function_name,
- .get_function_groups = pinmux_generic_get_function_groups,
- .set_mux = zx_set_mux,
-};
-
-static int zx_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *config)
-{
- struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
- struct zx_pinctrl_soc_info *info = zpctl->info;
- const struct pinctrl_pin_desc *pindesc = info->pins + pin;
- struct zx_pin_data *data = pindesc->drv_data;
- enum pin_config_param param = pinconf_to_config_param(*config);
- u32 val;
-
- /* Skip reserved pin */
- if (!data)
- return -EINVAL;
-
- val = readl(zpctl->aux_base + data->coffset);
- val = val >> data->cbitpos;
-
- switch (param) {
- case PIN_CONFIG_BIAS_PULL_DOWN:
- val &= ZX_PULL_DOWN;
- val = !!val;
- if (val == 0)
- return -EINVAL;
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- val &= ZX_PULL_UP;
- val = !!val;
- if (val == 0)
- return -EINVAL;
- break;
- case PIN_CONFIG_INPUT_ENABLE:
- val &= ZX_INPUT_ENABLE;
- val = !!val;
- if (val == 0)
- return -EINVAL;
- break;
- case PIN_CONFIG_DRIVE_STRENGTH:
- val &= ZX_DS_MASK;
- val = val >> ZX_DS_SHIFT;
- break;
- case PIN_CONFIG_SLEW_RATE:
- val &= ZX_SLEW;
- val = !!val;
- break;
- default:
- return -ENOTSUPP;
- }
-
- *config = pinconf_to_config_packed(param, val);
-
- return 0;
-}
-
-static int zx_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs, unsigned int num_configs)
-{
- struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
- struct zx_pinctrl_soc_info *info = zpctl->info;
- const struct pinctrl_pin_desc *pindesc = info->pins + pin;
- struct zx_pin_data *data = pindesc->drv_data;
- enum pin_config_param param;
- u32 val, arg;
- int i;
-
- /* Skip reserved pin */
- if (!data)
- return -EINVAL;
-
- val = readl(zpctl->aux_base + data->coffset);
-
- for (i = 0; i < num_configs; i++) {
- param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
-
- switch (param) {
- case PIN_CONFIG_BIAS_PULL_DOWN:
- val |= ZX_PULL_DOWN << data->cbitpos;
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- val |= ZX_PULL_UP << data->cbitpos;
- break;
- case PIN_CONFIG_INPUT_ENABLE:
- val |= ZX_INPUT_ENABLE << data->cbitpos;
- break;
- case PIN_CONFIG_DRIVE_STRENGTH:
- val &= ~(ZX_DS_MASK << data->cbitpos);
- val |= ZX_DS_VALUE(arg) << data->cbitpos;
- break;
- case PIN_CONFIG_SLEW_RATE:
- if (arg)
- val |= ZX_SLEW << data->cbitpos;
- else
- val &= ~ZX_SLEW << data->cbitpos;
- break;
- default:
- return -ENOTSUPP;
- }
- }
-
- writel(val, zpctl->aux_base + data->coffset);
- return 0;
-}
-
-static const struct pinconf_ops zx_pinconf_ops = {
- .pin_config_set = zx_pin_config_set,
- .pin_config_get = zx_pin_config_get,
- .is_generic = true,
-};
-
-static int zx_pinctrl_build_state(struct platform_device *pdev)
-{
- struct zx_pinctrl *zpctl = platform_get_drvdata(pdev);
- struct zx_pinctrl_soc_info *info = zpctl->info;
- struct pinctrl_dev *pctldev = zpctl->pctldev;
- struct function_desc *functions;
- int nfunctions;
- struct group_desc *groups;
- int ngroups;
- int i;
-
- /* Every single pin composes a group */
- ngroups = info->npins;
- groups = devm_kcalloc(&pdev->dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
- if (!groups)
- return -ENOMEM;
-
- for (i = 0; i < ngroups; i++) {
- const struct pinctrl_pin_desc *pindesc = info->pins + i;
- struct group_desc *group = groups + i;
-
- group->name = pindesc->name;
- group->pins = (int *) &pindesc->number;
- group->num_pins = 1;
- radix_tree_insert(&pctldev->pin_group_tree, i, group);
- }
-
- pctldev->num_groups = ngroups;
-
- /* Build function list from pin mux functions */
- functions = kcalloc(info->npins, sizeof(*functions), GFP_KERNEL);
- if (!functions)
- return -ENOMEM;
-
- nfunctions = 0;
- for (i = 0; i < info->npins; i++) {
- const struct pinctrl_pin_desc *pindesc = info->pins + i;
- struct zx_pin_data *data = pindesc->drv_data;
- struct zx_mux_desc *mux;
-
- /* Reserved pins do not have a drv_data at all */
- if (!data)
- continue;
-
- /* Loop over all muxes for the pin */
- mux = data->muxes;
- while (mux->name) {
- struct function_desc *func = functions;
-
- /* Search function list for given mux */
- while (func->name) {
- if (strcmp(mux->name, func->name) == 0) {
- /* Function exists */
- func->num_group_names++;
- break;
- }
- func++;
- }
-
- if (!func->name) {
- /* New function */
- func->name = mux->name;
- func->num_group_names = 1;
- radix_tree_insert(&pctldev->pin_function_tree,
- nfunctions++, func);
- }
-
- mux++;
- }
- }
-
- pctldev->num_functions = nfunctions;
- functions = krealloc(functions, nfunctions * sizeof(*functions),
- GFP_KERNEL);
-
- /* Find pin groups for every single function */
- for (i = 0; i < info->npins; i++) {
- const struct pinctrl_pin_desc *pindesc = info->pins + i;
- struct zx_pin_data *data = pindesc->drv_data;
- struct zx_mux_desc *mux;
-
- if (!data)
- continue;
-
- mux = data->muxes;
- while (mux->name) {
- struct function_desc *func;
- const char **group;
- int j;
-
- /* Find function for given mux */
- for (j = 0; j < nfunctions; j++)
- if (strcmp(functions[j].name, mux->name) == 0)
- break;
-
- func = functions + j;
- if (!func->group_names) {
- func->group_names = devm_kcalloc(&pdev->dev,
- func->num_group_names,
- sizeof(*func->group_names),
- GFP_KERNEL);
- if (!func->group_names) {
- kfree(functions);
- return -ENOMEM;
- }
- }
-
- group = func->group_names;
- while (*group)
- group++;
- *group = pindesc->name;
-
- mux++;
- }
- }
-
- return 0;
-}
-
-int zx_pinctrl_init(struct platform_device *pdev,
- struct zx_pinctrl_soc_info *info)
-{
- struct pinctrl_desc *pctldesc;
- struct zx_pinctrl *zpctl;
- struct device_node *np;
- int ret;
-
- zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
- if (!zpctl)
- return -ENOMEM;
-
- spin_lock_init(&zpctl->lock);
-
- zpctl->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(zpctl->base))
- return PTR_ERR(zpctl->base);
-
- np = of_parse_phandle(pdev->dev.of_node, "zte,auxiliary-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "failed to find auxiliary controller\n");
- return -ENODEV;
- }
-
- zpctl->aux_base = of_iomap(np, 0);
- of_node_put(np);
- if (!zpctl->aux_base)
- return -ENOMEM;
-
- zpctl->dev = &pdev->dev;
- zpctl->info = info;
-
- pctldesc = devm_kzalloc(&pdev->dev, sizeof(*pctldesc), GFP_KERNEL);
- if (!pctldesc)
- return -ENOMEM;
-
- pctldesc->name = dev_name(&pdev->dev);
- pctldesc->owner = THIS_MODULE;
- pctldesc->pins = info->pins;
- pctldesc->npins = info->npins;
- pctldesc->pctlops = &zx_pinctrl_ops;
- pctldesc->pmxops = &zx_pinmux_ops;
- pctldesc->confops = &zx_pinconf_ops;
-
- zpctl->pctldev = devm_pinctrl_register(&pdev->dev, pctldesc, zpctl);
- if (IS_ERR(zpctl->pctldev)) {
- ret = PTR_ERR(zpctl->pctldev);
- dev_err(&pdev->dev, "failed to register pinctrl: %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, zpctl);
-
- ret = zx_pinctrl_build_state(pdev);
- if (ret) {
- dev_err(&pdev->dev, "failed to build state: %d\n", ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "initialized pinctrl driver\n");
- return 0;
-}
diff --git a/drivers/pinctrl/zte/pinctrl-zx.h b/drivers/pinctrl/zte/pinctrl-zx.h
deleted file mode 100644
index a0692e2e9012..000000000000
--- a/drivers/pinctrl/zte/pinctrl-zx.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __PINCTRL_ZX_H
-#define __PINCTRL_ZX_H
-
-/**
- * struct zx_mux_desc - hardware mux descriptor
- * @name: mux function name
- * @muxval: mux register bit value
- */
-struct zx_mux_desc {
- const char *name;
- u8 muxval;
-};
-
-/**
- * struct zx_pin_data - hardware per-pin data
- * @aon_pin: whether it's an AON pin
- * @offset: register offset within TOP pinmux controller
- * @bitpos: bit position within TOP pinmux register
- * @width: bit width within TOP pinmux register
- * @coffset: pinconf register offset within AON controller
- * @cbitpos: pinconf bit position within AON register
- * @muxes: available mux function names and corresponding register values
- *
- * Unlike TOP pinmux and AON pinconf registers which are arranged pretty
- * arbitrarily, AON pinmux register bits are well organized per pin id, and
- * each pin occupies two bits, so that we can calculate the AON register offset
- * and bit position from pin id. Thus, we only need to define TOP pinmux and
- * AON pinconf register data for the pin.
- */
-struct zx_pin_data {
- bool aon_pin;
- u16 offset;
- u16 bitpos;
- u16 width;
- u16 coffset;
- u16 cbitpos;
- struct zx_mux_desc *muxes;
-};
-
-struct zx_pinctrl_soc_info {
- const struct pinctrl_pin_desc *pins;
- unsigned int npins;
-};
-
-#define TOP_PIN(pin, off, bp, wd, coff, cbp, ...) { \
- .number = pin, \
- .name = #pin, \
- .drv_data = &(struct zx_pin_data) { \
- .aon_pin = false, \
- .offset = off, \
- .bitpos = bp, \
- .width = wd, \
- .coffset = coff, \
- .cbitpos = cbp, \
- .muxes = (struct zx_mux_desc[]) { \
- __VA_ARGS__, { } }, \
- }, \
-}
-
-#define AON_PIN(pin, off, bp, wd, coff, cbp, ...) { \
- .number = pin, \
- .name = #pin, \
- .drv_data = &(struct zx_pin_data) { \
- .aon_pin = true, \
- .offset = off, \
- .bitpos = bp, \
- .width = wd, \
- .coffset = coff, \
- .cbitpos = cbp, \
- .muxes = (struct zx_mux_desc[]) { \
- __VA_ARGS__, { } }, \
- }, \
-}
-
-#define ZX_RESERVED(pin) PINCTRL_PIN(pin, #pin)
-
-#define TOP_MUX(_val, _name) { \
- .name = _name, \
- .muxval = _val, \
-}
-
-/*
- * When the flag is set, it's a mux configuration for an AON pin that sits in
- * AON register. Otherwise, it's one for AON pin but sitting in TOP register.
- */
-#define AON_MUX_FLAG BIT(7)
-
-#define AON_MUX(_val, _name) { \
- .name = _name, \
- .muxval = _val | AON_MUX_FLAG, \
-}
-
-int zx_pinctrl_init(struct platform_device *pdev,
- struct zx_pinctrl_soc_info *info);
-
-#endif /* __PINCTRL_ZX_H */
diff --git a/drivers/pinctrl/zte/pinctrl-zx296718.c b/drivers/pinctrl/zte/pinctrl-zx296718.c
deleted file mode 100644
index c980aecb6f2f..000000000000
--- a/drivers/pinctrl/zte/pinctrl-zx296718.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/platform_device.h>
-
-#include "pinctrl-zx.h"
-
-#define TOP_REG0 0x00
-#define TOP_REG1 0x04
-#define TOP_REG2 0x08
-#define TOP_REG3 0x0c
-#define TOP_REG4 0x10
-#define TOP_REG5 0x14
-#define TOP_REG6 0x18
-#define TOP_REG7 0x1c
-#define TOP_REG8 0x20
-
-/*
- * The pin numbering starts from AON pins with reserved ones included,
- * so that register data like offset and bit position for AON pins can
- * be calculated from pin number.
- */
-enum zx296718_pin {
- /* aon_pmm_reg_0 */
- I2C3_SCL = 0,
- I2C3_SDA = 1,
- AON_RESERVED0 = 2,
- AON_RESERVED1 = 3,
- SEC_EN = 4,
- UART0_RXD = 5,
- UART0_TXD = 6,
- IR_IN = 7,
- SPI0_CLK = 8,
- SPI0_CS = 9,
- SPI0_TXD = 10,
- SPI0_RXD = 11,
- KEY_COL0 = 12,
- KEY_COL1 = 13,
- KEY_COL2 = 14,
- KEY_ROW0 = 15,
-
- /* aon_pmm_reg_1 */
- KEY_ROW1 = 16,
- KEY_ROW2 = 17,
- HDMI_SCL = 18,
- HDMI_SDA = 19,
- JTAG_TCK = 20,
- JTAG_TRSTN = 21,
- JTAG_TMS = 22,
- JTAG_TDI = 23,
- JTAG_TDO = 24,
- I2C0_SCL = 25,
- I2C0_SDA = 26,
- I2C1_SCL = 27,
- I2C1_SDA = 28,
- AON_RESERVED2 = 29,
- AON_RESERVED3 = 30,
- AON_RESERVED4 = 31,
-
- /* aon_pmm_reg_2 */
- SPI1_CLK = 32,
- SPI1_CS = 33,
- SPI1_TXD = 34,
- SPI1_RXD = 35,
- AON_RESERVED5 = 36,
- AON_RESERVED6 = 37,
- AUDIO_DET = 38,
- SPDIF_OUT = 39,
- HDMI_CEC = 40,
- HDMI_HPD = 41,
- GMAC_25M_OUT = 42,
- BOOT_SEL0 = 43,
- BOOT_SEL1 = 44,
- BOOT_SEL2 = 45,
- DEEP_SLEEP_OUT_N = 46,
- AON_RESERVED7 = 47,
-
- /* top_pmm_reg_0 */
- GMII_GTX_CLK = 48,
- GMII_TX_CLK = 49,
- GMII_TXD0 = 50,
- GMII_TXD1 = 51,
- GMII_TXD2 = 52,
- GMII_TXD3 = 53,
- GMII_TXD4 = 54,
- GMII_TXD5 = 55,
- GMII_TXD6 = 56,
- GMII_TXD7 = 57,
- GMII_TX_ER = 58,
- GMII_TX_EN = 59,
- GMII_RX_CLK = 60,
- GMII_RXD0 = 61,
- GMII_RXD1 = 62,
- GMII_RXD2 = 63,
-
- /* top_pmm_reg_1 */
- GMII_RXD3 = 64,
- GMII_RXD4 = 65,
- GMII_RXD5 = 66,
- GMII_RXD6 = 67,
- GMII_RXD7 = 68,
- GMII_RX_ER = 69,
- GMII_RX_DV = 70,
- GMII_COL = 71,
- GMII_CRS = 72,
- GMII_MDC = 73,
- GMII_MDIO = 74,
- SDIO1_CLK = 75,
- SDIO1_CMD = 76,
- SDIO1_DATA0 = 77,
- SDIO1_DATA1 = 78,
- SDIO1_DATA2 = 79,
-
- /* top_pmm_reg_2 */
- SDIO1_DATA3 = 80,
- SDIO1_CD = 81,
- SDIO1_WP = 82,
- USIM1_CD = 83,
- USIM1_CLK = 84,
- USIM1_RST = 85,
-
- /* top_pmm_reg_3 */
- USIM1_DATA = 86,
- SDIO0_CLK = 87,
- SDIO0_CMD = 88,
- SDIO0_DATA0 = 89,
- SDIO0_DATA1 = 90,
- SDIO0_DATA2 = 91,
- SDIO0_DATA3 = 92,
- SDIO0_CD = 93,
- SDIO0_WP = 94,
-
- /* top_pmm_reg_4 */
- TSI0_DATA0 = 95,
- SPINOR_CLK = 96,
- TSI2_DATA = 97,
- TSI2_CLK = 98,
- TSI2_SYNC = 99,
- TSI2_VALID = 100,
- SPINOR_CS = 101,
- SPINOR_DQ0 = 102,
- SPINOR_DQ1 = 103,
- SPINOR_DQ2 = 104,
- SPINOR_DQ3 = 105,
- VGA_HS = 106,
- VGA_VS = 107,
- TSI3_DATA = 108,
-
- /* top_pmm_reg_5 */
- TSI3_CLK = 109,
- TSI3_SYNC = 110,
- TSI3_VALID = 111,
- I2S1_WS = 112,
- I2S1_BCLK = 113,
- I2S1_MCLK = 114,
- I2S1_DIN0 = 115,
- I2S1_DOUT0 = 116,
- SPI3_CLK = 117,
- SPI3_CS = 118,
- SPI3_TXD = 119,
- NAND_LDO_MS18_SEL = 120,
-
- /* top_pmm_reg_6 */
- SPI3_RXD = 121,
- I2S0_MCLK = 122,
- I2S0_BCLK = 123,
- I2S0_WS = 124,
- I2S0_DIN0 = 125,
- I2S0_DOUT0 = 126,
- I2C5_SCL = 127,
- I2C5_SDA = 128,
- SPI2_CLK = 129,
- SPI2_CS = 130,
- SPI2_TXD = 131,
-
- /* top_pmm_reg_7 */
- SPI2_RXD = 132,
- NAND_WP_N = 133,
- NAND_PAGE_SIZE0 = 134,
- NAND_PAGE_SIZE1 = 135,
- NAND_ADDR_CYCLE = 136,
- NAND_RB0 = 137,
- NAND_RB1 = 138,
- NAND_RB2 = 139,
- NAND_RB3 = 140,
-
- /* top_pmm_reg_8 */
- GMAC_125M_IN = 141,
- GMAC_50M_OUT = 142,
- SPINOR_SSCLK_LOOPBACK = 143,
- SPINOR_SDIO1CLK_LOOPBACK = 144,
-};
-
-static const struct pinctrl_pin_desc zx296718_pins[] = {
- /* aon_pmm_reg_0 */
- AON_PIN(I2C3_SCL, TOP_REG2, 18, 2, 0x48, 0,
- AON_MUX(0x0, "ANMI"), /* anmi */
- AON_MUX(0x1, "AGPIO"), /* agpio29 */
- AON_MUX(0x2, "nonAON"), /* pin0 */
- AON_MUX(0x3, "EXT_INT"), /* int4 */
- TOP_MUX(0x0, "I2C3"), /* scl */
- TOP_MUX(0x1, "SPI2"), /* txd */
- TOP_MUX(0x2, "I2S1")), /* din0 */
- AON_PIN(I2C3_SDA, TOP_REG2, 20, 2, 0x48, 9,
- AON_MUX(0x0, "WD"), /* rst_b */
- AON_MUX(0x1, "AGPIO"), /* agpio30 */
- AON_MUX(0x2, "nonAON"), /* pin1 */
- AON_MUX(0x3, "EXT_INT"), /* int5 */
- TOP_MUX(0x0, "I2C3"), /* sda */
- TOP_MUX(0x1, "SPI2"), /* rxd */
- TOP_MUX(0x2, "I2S0")), /* mclk */
- ZX_RESERVED(AON_RESERVED0),
- ZX_RESERVED(AON_RESERVED1),
- AON_PIN(SEC_EN, TOP_REG3, 5, 1, 0x50, 0,
- AON_MUX(0x0, "SEC"), /* en */
- AON_MUX(0x1, "AGPIO"), /* agpio28 */
- AON_MUX(0x2, "nonAON"), /* pin3 */
- AON_MUX(0x3, "EXT_INT"), /* int7 */
- TOP_MUX(0x0, "I2C2"), /* sda */
- TOP_MUX(0x1, "SPI2")), /* cs */
- AON_PIN(UART0_RXD, 0, 0, 0, 0x50, 9,
- AON_MUX(0x0, "UART0"), /* rxd */
- AON_MUX(0x1, "AGPIO"), /* agpio20 */
- AON_MUX(0x2, "nonAON")), /* pin34 */
- AON_PIN(UART0_TXD, 0, 0, 0, 0x50, 18,
- AON_MUX(0x0, "UART0"), /* txd */
- AON_MUX(0x1, "AGPIO"), /* agpio21 */
- AON_MUX(0x2, "nonAON")), /* pin32 */
- AON_PIN(IR_IN, 0, 0, 0, 0x64, 0,
- AON_MUX(0x0, "IR"), /* in */
- AON_MUX(0x1, "AGPIO"), /* agpio0 */
- AON_MUX(0x2, "nonAON")), /* pin27 */
- AON_PIN(SPI0_CLK, TOP_REG3, 16, 1, 0x64, 9,
- AON_MUX(0x0, "EXT_INT"), /* int0 */
- AON_MUX(0x1, "AGPIO"), /* agpio23 */
- AON_MUX(0x2, "nonAON"), /* pin5 */
- AON_MUX(0x3, "PCU"), /* test6 */
- TOP_MUX(0x0, "SPI0"), /* clk */
- TOP_MUX(0x1, "ISP")), /* flash_trig */
- AON_PIN(SPI0_CS, TOP_REG3, 17, 1, 0x64, 18,
- AON_MUX(0x0, "EXT_INT"), /* int1 */
- AON_MUX(0x1, "AGPIO"), /* agpio24 */
- AON_MUX(0x2, "nonAON"), /* pin6 */
- AON_MUX(0x3, "PCU"), /* test0 */
- TOP_MUX(0x0, "SPI0"), /* cs */
- TOP_MUX(0x1, "ISP")), /* prelight_trig */
- AON_PIN(SPI0_TXD, TOP_REG3, 18, 1, 0x68, 0,
- AON_MUX(0x0, "EXT_INT"), /* int2 */
- AON_MUX(0x1, "AGPIO"), /* agpio25 */
- AON_MUX(0x2, "nonAON"), /* pin7 */
- AON_MUX(0x3, "PCU"), /* test1 */
- TOP_MUX(0x0, "SPI0"), /* txd */
- TOP_MUX(0x1, "ISP")), /* shutter_trig */
- AON_PIN(SPI0_RXD, TOP_REG3, 19, 1, 0x68, 9,
- AON_MUX(0x0, "EXT_INT"), /* int3 */
- AON_MUX(0x1, "AGPIO"), /* agpio26 */
- AON_MUX(0x2, "nonAON"), /* pin8 */
- AON_MUX(0x3, "PCU"), /* test2 */
- TOP_MUX(0x0, "SPI0"), /* rxd */
- TOP_MUX(0x1, "ISP")), /* shutter_open */
- AON_PIN(KEY_COL0, TOP_REG3, 20, 1, 0x68, 18,
- AON_MUX(0x0, "KEY"), /* col0 */
- AON_MUX(0x1, "AGPIO"), /* agpio5 */
- AON_MUX(0x2, "nonAON"), /* pin9 */
- AON_MUX(0x3, "PCU"), /* test3 */
- TOP_MUX(0x0, "UART3"), /* rxd */
- TOP_MUX(0x1, "I2S0")), /* din1 */
- AON_PIN(KEY_COL1, TOP_REG3, 21, 2, 0x6c, 0,
- AON_MUX(0x0, "KEY"), /* col1 */
- AON_MUX(0x1, "AGPIO"), /* agpio6 */
- AON_MUX(0x2, "nonAON"), /* pin10 */
- TOP_MUX(0x0, "UART3"), /* txd */
- TOP_MUX(0x1, "I2S0"), /* din2 */
- TOP_MUX(0x2, "VGA")), /* scl */
- AON_PIN(KEY_COL2, TOP_REG3, 23, 2, 0x6c, 9,
- AON_MUX(0x0, "KEY"), /* col2 */
- AON_MUX(0x1, "AGPIO"), /* agpio7 */
- AON_MUX(0x2, "nonAON"), /* pin11 */
- TOP_MUX(0x0, "PWM"), /* out1 */
- TOP_MUX(0x1, "I2S0"), /* din3 */
- TOP_MUX(0x2, "VGA")), /* sda */
- AON_PIN(KEY_ROW0, 0, 0, 0, 0x6c, 18,
- AON_MUX(0x0, "KEY"), /* row0 */
- AON_MUX(0x1, "AGPIO"), /* agpio8 */
- AON_MUX(0x2, "nonAON"), /* pin33 */
- AON_MUX(0x3, "WD")), /* rst_b */
-
- /* aon_pmm_reg_1 */
- AON_PIN(KEY_ROW1, TOP_REG3, 25, 2, 0x70, 0,
- AON_MUX(0x0, "KEY"), /* row1 */
- AON_MUX(0x1, "AGPIO"), /* agpio9 */
- AON_MUX(0x2, "nonAON"), /* pin12 */
- TOP_MUX(0x0, "LCD"), /* port0 lcd_te */
- TOP_MUX(0x1, "I2S0"), /* dout2 */
- TOP_MUX(0x2, "PWM"), /* out2 */
- TOP_MUX(0x3, "VGA")), /* hs1 */
- AON_PIN(KEY_ROW2, TOP_REG3, 27, 2, 0x70, 9,
- AON_MUX(0x0, "KEY"), /* row2 */
- AON_MUX(0x1, "AGPIO"), /* agpio10 */
- AON_MUX(0x2, "nonAON"), /* pin13 */
- TOP_MUX(0x0, "LCD"), /* port1 lcd_te */
- TOP_MUX(0x1, "I2S0"), /* dout3 */
- TOP_MUX(0x2, "PWM"), /* out3 */
- TOP_MUX(0x3, "VGA")), /* vs1 */
- AON_PIN(HDMI_SCL, TOP_REG3, 29, 1, 0x70, 18,
- AON_MUX(0x0, "PCU"), /* test7 */
- AON_MUX(0x1, "AGPIO"), /* agpio3 */
- AON_MUX(0x2, "nonAON"), /* pin14 */
- TOP_MUX(0x0, "HDMI"), /* scl */
- TOP_MUX(0x1, "UART3")), /* rxd */
- AON_PIN(HDMI_SDA, TOP_REG3, 30, 1, 0x74, 0,
- AON_MUX(0x0, "PCU"), /* test8 */
- AON_MUX(0x1, "AGPIO"), /* agpio4 */
- AON_MUX(0x2, "nonAON"), /* pin15 */
- TOP_MUX(0x0, "HDMI"), /* sda */
- TOP_MUX(0x1, "UART3")), /* txd */
- AON_PIN(JTAG_TCK, TOP_REG7, 3, 1, 0x78, 18,
- AON_MUX(0x0, "JTAG"), /* tck */
- AON_MUX(0x1, "AGPIO"), /* agpio11 */
- AON_MUX(0x2, "nonAON"), /* pin22 */
- AON_MUX(0x3, "EXT_INT"), /* int4 */
- TOP_MUX(0x0, "SPI4"), /* clk */
- TOP_MUX(0x1, "UART1")), /* rxd */
- AON_PIN(JTAG_TRSTN, TOP_REG7, 4, 1, 0xac, 0,
- AON_MUX(0x0, "JTAG"), /* trstn */
- AON_MUX(0x1, "AGPIO"), /* agpio12 */
- AON_MUX(0x2, "nonAON"), /* pin23 */
- AON_MUX(0x3, "EXT_INT"), /* int5 */
- TOP_MUX(0x0, "SPI4"), /* cs */
- TOP_MUX(0x1, "UART1")), /* txd */
- AON_PIN(JTAG_TMS, TOP_REG7, 5, 1, 0xac, 9,
- AON_MUX(0x0, "JTAG"), /* tms */
- AON_MUX(0x1, "AGPIO"), /* agpio13 */
- AON_MUX(0x2, "nonAON"), /* pin24 */
- AON_MUX(0x3, "EXT_INT"), /* int6 */
- TOP_MUX(0x0, "SPI4"), /* txd */
- TOP_MUX(0x1, "UART2")), /* rxd */
- AON_PIN(JTAG_TDI, TOP_REG7, 6, 1, 0xac, 18,
- AON_MUX(0x0, "JTAG"), /* tdi */
- AON_MUX(0x1, "AGPIO"), /* agpio14 */
- AON_MUX(0x2, "nonAON"), /* pin25 */
- AON_MUX(0x3, "EXT_INT"), /* int7 */
- TOP_MUX(0x0, "SPI4"), /* rxd */
- TOP_MUX(0x1, "UART2")), /* txd */
- AON_PIN(JTAG_TDO, 0, 0, 0, 0xb0, 0,
- AON_MUX(0x0, "JTAG"), /* tdo */
- AON_MUX(0x1, "AGPIO"), /* agpio15 */
- AON_MUX(0x2, "nonAON")), /* pin26 */
- AON_PIN(I2C0_SCL, 0, 0, 0, 0xb0, 9,
- AON_MUX(0x0, "I2C0"), /* scl */
- AON_MUX(0x1, "AGPIO"), /* agpio16 */
- AON_MUX(0x2, "nonAON")), /* pin28 */
- AON_PIN(I2C0_SDA, 0, 0, 0, 0xb0, 18,
- AON_MUX(0x0, "I2C0"), /* sda */
- AON_MUX(0x1, "AGPIO"), /* agpio17 */
- AON_MUX(0x2, "nonAON")), /* pin29 */
- AON_PIN(I2C1_SCL, TOP_REG8, 4, 1, 0xb4, 0,
- AON_MUX(0x0, "I2C1"), /* scl */
- AON_MUX(0x1, "AGPIO"), /* agpio18 */
- AON_MUX(0x2, "nonAON"), /* pin30 */
- TOP_MUX(0x0, "LCD")), /* port0 lcd_te */
- AON_PIN(I2C1_SDA, TOP_REG8, 5, 1, 0xb4, 9,
- AON_MUX(0x0, "I2C1"), /* sda */
- AON_MUX(0x1, "AGPIO"), /* agpio19 */
- AON_MUX(0x2, "nonAON"), /* pin31 */
- TOP_MUX(0x0, "LCD")), /* port1 lcd_te */
- ZX_RESERVED(AON_RESERVED2),
- ZX_RESERVED(AON_RESERVED3),
- ZX_RESERVED(AON_RESERVED4),
-
- /* aon_pmm_reg_2 */
- AON_PIN(SPI1_CLK, TOP_REG2, 6, 3, 0x40, 9,
- AON_MUX(0x0, "EXT_INT"), /* int0 */
- AON_MUX(0x1, "PCU"), /* test12 */
- AON_MUX(0x2, "nonAON"), /* pin39 */
- TOP_MUX(0x0, "SPI1"), /* clk */
- TOP_MUX(0x1, "PCM"), /* clk */
- TOP_MUX(0x2, "BGPIO"), /* gpio35 */
- TOP_MUX(0x3, "I2C4"), /* scl */
- TOP_MUX(0x4, "I2S1"), /* mclk */
- TOP_MUX(0x5, "ISP")), /* flash_trig */
- AON_PIN(SPI1_CS, TOP_REG2, 9, 3, 0x40, 18,
- AON_MUX(0x0, "EXT_INT"), /* int1 */
- AON_MUX(0x1, "PCU"), /* test13 */
- AON_MUX(0x2, "nonAON"), /* pin40 */
- TOP_MUX(0x0, "SPI1"), /* cs */
- TOP_MUX(0x1, "PCM"), /* fs */
- TOP_MUX(0x2, "BGPIO"), /* gpio36 */
- TOP_MUX(0x3, "I2C4"), /* sda */
- TOP_MUX(0x4, "I2S1"), /* bclk */
- TOP_MUX(0x5, "ISP")), /* prelight_trig */
- AON_PIN(SPI1_TXD, TOP_REG2, 12, 3, 0x44, 0,
- AON_MUX(0x0, "EXT_INT"), /* int2 */
- AON_MUX(0x1, "PCU"), /* test14 */
- AON_MUX(0x2, "nonAON"), /* pin41 */
- TOP_MUX(0x0, "SPI1"), /* txd */
- TOP_MUX(0x1, "PCM"), /* txd */
- TOP_MUX(0x2, "BGPIO"), /* gpio37 */
- TOP_MUX(0x3, "UART5"), /* rxd */
- TOP_MUX(0x4, "I2S1"), /* ws */
- TOP_MUX(0x5, "ISP")), /* shutter_trig */
- AON_PIN(SPI1_RXD, TOP_REG2, 15, 3, 0x44, 9,
- AON_MUX(0x0, "EXT_INT"), /* int3 */
- AON_MUX(0x1, "PCU"), /* test15 */
- AON_MUX(0x2, "nonAON"), /* pin42 */
- TOP_MUX(0x0, "SPI1"), /* rxd */
- TOP_MUX(0x1, "PCM"), /* rxd */
- TOP_MUX(0x2, "BGPIO"), /* gpio38 */
- TOP_MUX(0x3, "UART5"), /* txd */
- TOP_MUX(0x4, "I2S1"), /* dout0 */
- TOP_MUX(0x5, "ISP")), /* shutter_open */
- ZX_RESERVED(AON_RESERVED5),
- ZX_RESERVED(AON_RESERVED6),
- AON_PIN(AUDIO_DET, TOP_REG3, 3, 2, 0x48, 18,
- AON_MUX(0x0, "PCU"), /* test4 */
- AON_MUX(0x1, "AGPIO"), /* agpio27 */
- AON_MUX(0x2, "nonAON"), /* pin2 */
- AON_MUX(0x3, "EXT_INT"), /* int16 */
- TOP_MUX(0x0, "AUDIO"), /* detect */
- TOP_MUX(0x1, "I2C2"), /* scl */
- TOP_MUX(0x2, "SPI2")), /* clk */
- AON_PIN(SPDIF_OUT, TOP_REG3, 14, 2, 0x78, 9,
- AON_MUX(0x0, "PCU"), /* test5 */
- AON_MUX(0x1, "AGPIO"), /* agpio22 */
- AON_MUX(0x2, "nonAON"), /* pin4 */
- TOP_MUX(0x0, "SPDIF"), /* out */
- TOP_MUX(0x1, "PWM"), /* out0 */
- TOP_MUX(0x2, "ISP")), /* fl_trig */
- AON_PIN(HDMI_CEC, 0, 0, 0, 0x74, 9,
- AON_MUX(0x0, "PCU"), /* test9 */
- AON_MUX(0x1, "AGPIO"), /* agpio1 */
- AON_MUX(0x2, "nonAON")), /* pin16 */
- AON_PIN(HDMI_HPD, 0, 0, 0, 0x74, 18,
- AON_MUX(0x0, "PCU"), /* test10 */
- AON_MUX(0x1, "AGPIO"), /* agpio2 */
- AON_MUX(0x2, "nonAON")), /* pin17 */
- AON_PIN(GMAC_25M_OUT, 0, 0, 0, 0x78, 0,
- AON_MUX(0x0, "PCU"), /* test11 */
- AON_MUX(0x1, "AGPIO"), /* agpio31 */
- AON_MUX(0x2, "nonAON")), /* pin43 */
- AON_PIN(BOOT_SEL0, 0, 0, 0, 0xc0, 9,
- AON_MUX(0x0, "BOOT"), /* sel0 */
- AON_MUX(0x1, "AGPIO"), /* agpio18 */
- AON_MUX(0x2, "nonAON")), /* pin18 */
- AON_PIN(BOOT_SEL1, 0, 0, 0, 0xc0, 18,
- AON_MUX(0x0, "BOOT"), /* sel1 */
- AON_MUX(0x1, "AGPIO"), /* agpio19 */
- AON_MUX(0x2, "nonAON")), /* pin19 */
- AON_PIN(BOOT_SEL2, 0, 0, 0, 0xc4, 0,
- AON_MUX(0x0, "BOOT"), /* sel2 */
- AON_MUX(0x1, "AGPIO"), /* agpio20 */
- AON_MUX(0x2, "nonAON")), /* pin20 */
- AON_PIN(DEEP_SLEEP_OUT_N, 0, 0, 0, 0xc4, 9,
- AON_MUX(0x0, "DEEPSLP"), /* deep sleep out_n */
- AON_MUX(0x1, "AGPIO"), /* agpio21 */
- AON_MUX(0x2, "nonAON")), /* pin21 */
- ZX_RESERVED(AON_RESERVED7),
-
- /* top_pmm_reg_0 */
- TOP_PIN(GMII_GTX_CLK, TOP_REG0, 0, 2, 0x10, 0,
- TOP_MUX(0x0, "GMII"), /* gtx_clk */
- TOP_MUX(0x1, "DVI0"), /* clk */
- TOP_MUX(0x2, "BGPIO")), /* gpio0 */
- TOP_PIN(GMII_TX_CLK, TOP_REG0, 2, 2, 0x10, 9,
- TOP_MUX(0x0, "GMII"), /* tx_clk */
- TOP_MUX(0x1, "DVI0"), /* vs */
- TOP_MUX(0x2, "BGPIO")), /* gpio1 */
- TOP_PIN(GMII_TXD0, TOP_REG0, 4, 2, 0x10, 18,
- TOP_MUX(0x0, "GMII"), /* txd0 */
- TOP_MUX(0x1, "DVI0"), /* hs */
- TOP_MUX(0x2, "BGPIO")), /* gpio2 */
- TOP_PIN(GMII_TXD1, TOP_REG0, 6, 2, 0x14, 0,
- TOP_MUX(0x0, "GMII"), /* txd1 */
- TOP_MUX(0x1, "DVI0"), /* d0 */
- TOP_MUX(0x2, "BGPIO")), /* gpio3 */
- TOP_PIN(GMII_TXD2, TOP_REG0, 8, 2, 0x14, 9,
- TOP_MUX(0x0, "GMII"), /* txd2 */
- TOP_MUX(0x1, "DVI0"), /* d1 */
- TOP_MUX(0x2, "BGPIO")), /* gpio4 */
- TOP_PIN(GMII_TXD3, TOP_REG0, 10, 2, 0x14, 18,
- TOP_MUX(0x0, "GMII"), /* txd3 */
- TOP_MUX(0x1, "DVI0"), /* d2 */
- TOP_MUX(0x2, "BGPIO")), /* gpio5 */
- TOP_PIN(GMII_TXD4, TOP_REG0, 12, 2, 0x18, 0,
- TOP_MUX(0x0, "GMII"), /* txd4 */
- TOP_MUX(0x1, "DVI0"), /* d3 */
- TOP_MUX(0x2, "BGPIO")), /* gpio6 */
- TOP_PIN(GMII_TXD5, TOP_REG0, 14, 2, 0x18, 9,
- TOP_MUX(0x0, "GMII"), /* txd5 */
- TOP_MUX(0x1, "DVI0"), /* d4 */
- TOP_MUX(0x2, "BGPIO")), /* gpio7 */
- TOP_PIN(GMII_TXD6, TOP_REG0, 16, 2, 0x18, 18,
- TOP_MUX(0x0, "GMII"), /* txd6 */
- TOP_MUX(0x1, "DVI0"), /* d5 */
- TOP_MUX(0x2, "BGPIO")), /* gpio8 */
- TOP_PIN(GMII_TXD7, TOP_REG0, 18, 2, 0x1c, 0,
- TOP_MUX(0x0, "GMII"), /* txd7 */
- TOP_MUX(0x1, "DVI0"), /* d6 */
- TOP_MUX(0x2, "BGPIO")), /* gpio9 */
- TOP_PIN(GMII_TX_ER, TOP_REG0, 20, 2, 0x1c, 9,
- TOP_MUX(0x0, "GMII"), /* tx_er */
- TOP_MUX(0x1, "DVI0"), /* d7 */
- TOP_MUX(0x2, "BGPIO")), /* gpio10 */
- TOP_PIN(GMII_TX_EN, TOP_REG0, 22, 2, 0x1c, 18,
- TOP_MUX(0x0, "GMII"), /* tx_en */
- TOP_MUX(0x1, "DVI0"), /* d8 */
- TOP_MUX(0x3, "BGPIO")), /* gpio11 */
- TOP_PIN(GMII_RX_CLK, TOP_REG0, 24, 2, 0x20, 0,
- TOP_MUX(0x0, "GMII"), /* rx_clk */
- TOP_MUX(0x1, "DVI0"), /* d9 */
- TOP_MUX(0x3, "BGPIO")), /* gpio12 */
- TOP_PIN(GMII_RXD0, TOP_REG0, 26, 2, 0x20, 9,
- TOP_MUX(0x0, "GMII"), /* rxd0 */
- TOP_MUX(0x1, "DVI0"), /* d10 */
- TOP_MUX(0x3, "BGPIO")), /* gpio13 */
- TOP_PIN(GMII_RXD1, TOP_REG0, 28, 2, 0x20, 18,
- TOP_MUX(0x0, "GMII"), /* rxd1 */
- TOP_MUX(0x1, "DVI0"), /* d11 */
- TOP_MUX(0x2, "BGPIO")), /* gpio14 */
- TOP_PIN(GMII_RXD2, TOP_REG0, 30, 2, 0x24, 0,
- TOP_MUX(0x0, "GMII"), /* rxd2 */
- TOP_MUX(0x1, "DVI1"), /* clk */
- TOP_MUX(0x2, "BGPIO")), /* gpio15 */
-
- /* top_pmm_reg_1 */
- TOP_PIN(GMII_RXD3, TOP_REG1, 0, 2, 0x24, 9,
- TOP_MUX(0x0, "GMII"), /* rxd3 */
- TOP_MUX(0x1, "DVI1"), /* hs */
- TOP_MUX(0x2, "BGPIO")), /* gpio16 */
- TOP_PIN(GMII_RXD4, TOP_REG1, 2, 2, 0x24, 18,
- TOP_MUX(0x0, "GMII"), /* rxd4 */
- TOP_MUX(0x1, "DVI1"), /* vs */
- TOP_MUX(0x2, "BGPIO")), /* gpio17 */
- TOP_PIN(GMII_RXD5, TOP_REG1, 4, 2, 0x28, 0,
- TOP_MUX(0x0, "GMII"), /* rxd5 */
- TOP_MUX(0x1, "DVI1"), /* d0 */
- TOP_MUX(0x2, "BGPIO"), /* gpio18 */
- TOP_MUX(0x3, "TSI0")), /* dat0 */
- TOP_PIN(GMII_RXD6, TOP_REG1, 6, 2, 0x28, 9,
- TOP_MUX(0x0, "GMII"), /* rxd6 */
- TOP_MUX(0x1, "DVI1"), /* d1 */
- TOP_MUX(0x2, "BGPIO"), /* gpio19 */
- TOP_MUX(0x3, "TSI0")), /* clk */
- TOP_PIN(GMII_RXD7, TOP_REG1, 8, 2, 0x28, 18,
- TOP_MUX(0x0, "GMII"), /* rxd7 */
- TOP_MUX(0x1, "DVI1"), /* d2 */
- TOP_MUX(0x2, "BGPIO"), /* gpio20 */
- TOP_MUX(0x3, "TSI0")), /* sync */
- TOP_PIN(GMII_RX_ER, TOP_REG1, 10, 2, 0x2c, 0,
- TOP_MUX(0x0, "GMII"), /* rx_er */
- TOP_MUX(0x1, "DVI1"), /* d3 */
- TOP_MUX(0x2, "BGPIO"), /* gpio21 */
- TOP_MUX(0x3, "TSI0")), /* valid */
- TOP_PIN(GMII_RX_DV, TOP_REG1, 12, 2, 0x2c, 9,
- TOP_MUX(0x0, "GMII"), /* rx_dv */
- TOP_MUX(0x1, "DVI1"), /* d4 */
- TOP_MUX(0x2, "BGPIO"), /* gpio22 */
- TOP_MUX(0x3, "TSI1")), /* dat0 */
- TOP_PIN(GMII_COL, TOP_REG1, 14, 2, 0x2c, 18,
- TOP_MUX(0x0, "GMII"), /* col */
- TOP_MUX(0x1, "DVI1"), /* d5 */
- TOP_MUX(0x2, "BGPIO"), /* gpio23 */
- TOP_MUX(0x3, "TSI1")), /* clk */
- TOP_PIN(GMII_CRS, TOP_REG1, 16, 2, 0x30, 0,
- TOP_MUX(0x0, "GMII"), /* crs */
- TOP_MUX(0x1, "DVI1"), /* d6 */
- TOP_MUX(0x2, "BGPIO"), /* gpio24 */
- TOP_MUX(0x3, "TSI1")), /* sync */
- TOP_PIN(GMII_MDC, TOP_REG1, 18, 2, 0x30, 9,
- TOP_MUX(0x0, "GMII"), /* mdc */
- TOP_MUX(0x1, "DVI1"), /* d7 */
- TOP_MUX(0x2, "BGPIO"), /* gpio25 */
- TOP_MUX(0x3, "TSI1")), /* valid */
- TOP_PIN(GMII_MDIO, TOP_REG1, 20, 1, 0x30, 18,
- TOP_MUX(0x0, "GMII"), /* mdio */
- TOP_MUX(0x2, "BGPIO")), /* gpio26 */
- TOP_PIN(SDIO1_CLK, TOP_REG1, 21, 2, 0x34, 18,
- TOP_MUX(0x0, "SDIO1"), /* clk */
- TOP_MUX(0x1, "USIM0"), /* clk */
- TOP_MUX(0x2, "BGPIO"), /* gpio27 */
- TOP_MUX(0x3, "SPINOR")), /* clk */
- TOP_PIN(SDIO1_CMD, TOP_REG1, 23, 2, 0x38, 0,
- TOP_MUX(0x0, "SDIO1"), /* cmd */
- TOP_MUX(0x1, "USIM0"), /* cd */
- TOP_MUX(0x2, "BGPIO"), /* gpio28 */
- TOP_MUX(0x3, "SPINOR")), /* cs */
- TOP_PIN(SDIO1_DATA0, TOP_REG1, 25, 2, 0x38, 9,
- TOP_MUX(0x0, "SDIO1"), /* dat0 */
- TOP_MUX(0x1, "USIM0"), /* rst */
- TOP_MUX(0x2, "BGPIO"), /* gpio29 */
- TOP_MUX(0x3, "SPINOR")), /* dq0 */
- TOP_PIN(SDIO1_DATA1, TOP_REG1, 27, 2, 0x38, 18,
- TOP_MUX(0x0, "SDIO1"), /* dat1 */
- TOP_MUX(0x1, "USIM0"), /* data */
- TOP_MUX(0x2, "BGPIO"), /* gpio30 */
- TOP_MUX(0x3, "SPINOR")), /* dq1 */
- TOP_PIN(SDIO1_DATA2, TOP_REG1, 29, 2, 0x3c, 0,
- TOP_MUX(0x0, "SDIO1"), /* dat2 */
- TOP_MUX(0x1, "BGPIO"), /* gpio31 */
- TOP_MUX(0x2, "SPINOR")), /* dq2 */
-
- /* top_pmm_reg_2 */
- TOP_PIN(SDIO1_DATA3, TOP_REG2, 0, 2, 0x3c, 9,
- TOP_MUX(0x0, "SDIO1"), /* dat3 */
- TOP_MUX(0x1, "BGPIO"), /* gpio32 */
- TOP_MUX(0x2, "SPINOR")), /* dq3 */
- TOP_PIN(SDIO1_CD, TOP_REG2, 2, 2, 0x3c, 18,
- TOP_MUX(0x0, "SDIO1"), /* cd */
- TOP_MUX(0x1, "BGPIO"), /* gpio33 */
- TOP_MUX(0x2, "ISP")), /* fl_trig */
- TOP_PIN(SDIO1_WP, TOP_REG2, 4, 2, 0x40, 0,
- TOP_MUX(0x0, "SDIO1"), /* wp */
- TOP_MUX(0x1, "BGPIO"), /* gpio34 */
- TOP_MUX(0x2, "ISP")), /* ref_clk */
- TOP_PIN(USIM1_CD, TOP_REG2, 22, 3, 0x44, 18,
- TOP_MUX(0x0, "USIM1"), /* cd */
- TOP_MUX(0x1, "UART4"), /* rxd */
- TOP_MUX(0x2, "BGPIO"), /* gpio39 */
- TOP_MUX(0x3, "SPI3"), /* clk */
- TOP_MUX(0x4, "I2S0"), /* bclk */
- TOP_MUX(0x5, "B_DVI0")), /* d8 */
- TOP_PIN(USIM1_CLK, TOP_REG2, 25, 3, 0x4c, 18,
- TOP_MUX(0x0, "USIM1"), /* clk */
- TOP_MUX(0x1, "UART4"), /* txd */
- TOP_MUX(0x2, "BGPIO"), /* gpio40 */
- TOP_MUX(0x3, "SPI3"), /* cs */
- TOP_MUX(0x4, "I2S0"), /* ws */
- TOP_MUX(0x5, "B_DVI0")), /* d9 */
- TOP_PIN(USIM1_RST, TOP_REG2, 28, 3, 0x4c, 0,
- TOP_MUX(0x0, "USIM1"), /* rst */
- TOP_MUX(0x1, "UART4"), /* cts */
- TOP_MUX(0x2, "BGPIO"), /* gpio41 */
- TOP_MUX(0x3, "SPI3"), /* txd */
- TOP_MUX(0x4, "I2S0"), /* dout0 */
- TOP_MUX(0x5, "B_DVI0")), /* d10 */
-
- /* top_pmm_reg_3 */
- TOP_PIN(USIM1_DATA, TOP_REG3, 0, 3, 0x4c, 9,
- TOP_MUX(0x0, "USIM1"), /* dat */
- TOP_MUX(0x1, "UART4"), /* rst */
- TOP_MUX(0x2, "BGPIO"), /* gpio42 */
- TOP_MUX(0x3, "SPI3"), /* rxd */
- TOP_MUX(0x4, "I2S0"), /* din0 */
- TOP_MUX(0x5, "B_DVI0")), /* d11 */
- TOP_PIN(SDIO0_CLK, TOP_REG3, 6, 1, 0x58, 0,
- TOP_MUX(0x0, "SDIO0"), /* clk */
- TOP_MUX(0x1, "GPIO")), /* gpio43 */
- TOP_PIN(SDIO0_CMD, TOP_REG3, 7, 1, 0x58, 9,
- TOP_MUX(0x0, "SDIO0"), /* cmd */
- TOP_MUX(0x1, "GPIO")), /* gpio44 */
- TOP_PIN(SDIO0_DATA0, TOP_REG3, 8, 1, 0x58, 18,
- TOP_MUX(0x0, "SDIO0"), /* dat0 */
- TOP_MUX(0x1, "GPIO")), /* gpio45 */
- TOP_PIN(SDIO0_DATA1, TOP_REG3, 9, 1, 0x5c, 0,
- TOP_MUX(0x0, "SDIO0"), /* dat1 */
- TOP_MUX(0x1, "GPIO")), /* gpio46 */
- TOP_PIN(SDIO0_DATA2, TOP_REG3, 10, 1, 0x5c, 9,
- TOP_MUX(0x0, "SDIO0"), /* dat2 */
- TOP_MUX(0x1, "GPIO")), /* gpio47 */
- TOP_PIN(SDIO0_DATA3, TOP_REG3, 11, 1, 0x5c, 18,
- TOP_MUX(0x0, "SDIO0"), /* dat3 */
- TOP_MUX(0x1, "GPIO")), /* gpio48 */
- TOP_PIN(SDIO0_CD, TOP_REG3, 12, 1, 0x60, 0,
- TOP_MUX(0x0, "SDIO0"), /* cd */
- TOP_MUX(0x1, "GPIO")), /* gpio49 */
- TOP_PIN(SDIO0_WP, TOP_REG3, 13, 1, 0x60, 9,
- TOP_MUX(0x0, "SDIO0"), /* wp */
- TOP_MUX(0x1, "GPIO")), /* gpio50 */
-
- /* top_pmm_reg_4 */
- TOP_PIN(TSI0_DATA0, TOP_REG4, 0, 2, 0x60, 18,
- TOP_MUX(0x0, "TSI0"), /* dat0 */
- TOP_MUX(0x1, "LCD"), /* clk */
- TOP_MUX(0x2, "BGPIO")), /* gpio51 */
- TOP_PIN(SPINOR_CLK, TOP_REG4, 2, 2, 0xa8, 18,
- TOP_MUX(0x0, "SPINOR"), /* clk */
- TOP_MUX(0x1, "TSI0"), /* dat1 */
- TOP_MUX(0x2, "LCD"), /* dat0 */
- TOP_MUX(0x3, "BGPIO")), /* gpio52 */
- TOP_PIN(TSI2_DATA, TOP_REG4, 4, 2, 0x7c, 0,
- TOP_MUX(0x0, "TSI2"), /* dat */
- TOP_MUX(0x1, "TSI0"), /* dat2 */
- TOP_MUX(0x2, "LCD"), /* dat1 */
- TOP_MUX(0x3, "BGPIO")), /* gpio53 */
- TOP_PIN(TSI2_CLK, TOP_REG4, 6, 2, 0x7c, 9,
- TOP_MUX(0x0, "TSI2"), /* clk */
- TOP_MUX(0x1, "TSI0"), /* dat3 */
- TOP_MUX(0x2, "LCD"), /* dat2 */
- TOP_MUX(0x3, "BGPIO")), /* gpio54 */
- TOP_PIN(TSI2_SYNC, TOP_REG4, 8, 2, 0x7c, 18,
- TOP_MUX(0x0, "TSI2"), /* sync */
- TOP_MUX(0x1, "TSI0"), /* dat4 */
- TOP_MUX(0x2, "LCD"), /* dat3 */
- TOP_MUX(0x3, "BGPIO")), /* gpio55 */
- TOP_PIN(TSI2_VALID, TOP_REG4, 10, 2, 0x80, 0,
- TOP_MUX(0x0, "TSI2"), /* valid */
- TOP_MUX(0x1, "TSI0"), /* dat5 */
- TOP_MUX(0x2, "LCD"), /* dat4 */
- TOP_MUX(0x3, "BGPIO")), /* gpio56 */
- TOP_PIN(SPINOR_CS, TOP_REG4, 12, 2, 0x80, 9,
- TOP_MUX(0x0, "SPINOR"), /* cs */
- TOP_MUX(0x1, "TSI0"), /* dat6 */
- TOP_MUX(0x2, "LCD"), /* dat5 */
- TOP_MUX(0x3, "BGPIO")), /* gpio57 */
- TOP_PIN(SPINOR_DQ0, TOP_REG4, 14, 2, 0x80, 18,
- TOP_MUX(0x0, "SPINOR"), /* dq0 */
- TOP_MUX(0x1, "TSI0"), /* dat7 */
- TOP_MUX(0x2, "LCD"), /* dat6 */
- TOP_MUX(0x3, "BGPIO")), /* gpio58 */
- TOP_PIN(SPINOR_DQ1, TOP_REG4, 16, 2, 0x84, 0,
- TOP_MUX(0x0, "SPINOR"), /* dq1 */
- TOP_MUX(0x1, "TSI0"), /* clk */
- TOP_MUX(0x2, "LCD"), /* dat7 */
- TOP_MUX(0x3, "BGPIO")), /* gpio59 */
- TOP_PIN(SPINOR_DQ2, TOP_REG4, 18, 2, 0x84, 9,
- TOP_MUX(0x0, "SPINOR"), /* dq2 */
- TOP_MUX(0x1, "TSI0"), /* sync */
- TOP_MUX(0x2, "LCD"), /* dat8 */
- TOP_MUX(0x3, "BGPIO")), /* gpio60 */
- TOP_PIN(SPINOR_DQ3, TOP_REG4, 20, 2, 0x84, 18,
- TOP_MUX(0x0, "SPINOR"), /* dq3 */
- TOP_MUX(0x1, "TSI0"), /* valid */
- TOP_MUX(0x2, "LCD"), /* dat9 */
- TOP_MUX(0x3, "BGPIO")), /* gpio61 */
- TOP_PIN(VGA_HS, TOP_REG4, 22, 3, 0x88, 0,
- TOP_MUX(0x0, "VGA"), /* hs */
- TOP_MUX(0x1, "TSI1"), /* dat0 */
- TOP_MUX(0x2, "LCD"), /* dat10 */
- TOP_MUX(0x3, "BGPIO"), /* gpio62 */
- TOP_MUX(0x4, "I2S1"), /* din1 */
- TOP_MUX(0x5, "B_DVI0")), /* clk */
- TOP_PIN(VGA_VS, TOP_REG4, 25, 3, 0x88, 9,
- TOP_MUX(0x0, "VGA"), /* vs0 */
- TOP_MUX(0x1, "TSI1"), /* dat1 */
- TOP_MUX(0x2, "LCD"), /* dat11 */
- TOP_MUX(0x3, "BGPIO"), /* gpio63 */
- TOP_MUX(0x4, "I2S1"), /* din2 */
- TOP_MUX(0x5, "B_DVI0")), /* vs */
- TOP_PIN(TSI3_DATA, TOP_REG4, 28, 3, 0x88, 18,
- TOP_MUX(0x0, "TSI3"), /* dat */
- TOP_MUX(0x1, "TSI1"), /* dat2 */
- TOP_MUX(0x2, "LCD"), /* dat12 */
- TOP_MUX(0x3, "BGPIO"), /* gpio64 */
- TOP_MUX(0x4, "I2S1"), /* din3 */
- TOP_MUX(0x5, "B_DVI0")), /* hs */
-
- /* top_pmm_reg_5 */
- TOP_PIN(TSI3_CLK, TOP_REG5, 0, 3, 0x8c, 0,
- TOP_MUX(0x0, "TSI3"), /* clk */
- TOP_MUX(0x1, "TSI1"), /* dat3 */
- TOP_MUX(0x2, "LCD"), /* dat13 */
- TOP_MUX(0x3, "BGPIO"), /* gpio65 */
- TOP_MUX(0x4, "I2S1"), /* dout1 */
- TOP_MUX(0x5, "B_DVI0")), /* d0 */
- TOP_PIN(TSI3_SYNC, TOP_REG5, 3, 3, 0x8c, 9,
- TOP_MUX(0x0, "TSI3"), /* sync */
- TOP_MUX(0x1, "TSI1"), /* dat4 */
- TOP_MUX(0x2, "LCD"), /* dat14 */
- TOP_MUX(0x3, "BGPIO"), /* gpio66 */
- TOP_MUX(0x4, "I2S1"), /* dout2 */
- TOP_MUX(0x5, "B_DVI0")), /* d1 */
- TOP_PIN(TSI3_VALID, TOP_REG5, 6, 3, 0x8c, 18,
- TOP_MUX(0x0, "TSI3"), /* valid */
- TOP_MUX(0x1, "TSI1"), /* dat5 */
- TOP_MUX(0x2, "LCD"), /* dat15 */
- TOP_MUX(0x3, "BGPIO"), /* gpio67 */
- TOP_MUX(0x4, "I2S1"), /* dout3 */
- TOP_MUX(0x5, "B_DVI0")), /* d2 */
- TOP_PIN(I2S1_WS, TOP_REG5, 9, 3, 0x90, 0,
- TOP_MUX(0x0, "I2S1"), /* ws */
- TOP_MUX(0x1, "TSI1"), /* dat6 */
- TOP_MUX(0x2, "LCD"), /* dat16 */
- TOP_MUX(0x3, "BGPIO"), /* gpio68 */
- TOP_MUX(0x4, "VGA"), /* scl */
- TOP_MUX(0x5, "B_DVI0")), /* d3 */
- TOP_PIN(I2S1_BCLK, TOP_REG5, 12, 3, 0x90, 9,
- TOP_MUX(0x0, "I2S1"), /* bclk */
- TOP_MUX(0x1, "TSI1"), /* dat7 */
- TOP_MUX(0x2, "LCD"), /* dat17 */
- TOP_MUX(0x3, "BGPIO"), /* gpio69 */
- TOP_MUX(0x4, "VGA"), /* sda */
- TOP_MUX(0x5, "B_DVI0")), /* d4 */
- TOP_PIN(I2S1_MCLK, TOP_REG5, 15, 2, 0x90, 18,
- TOP_MUX(0x0, "I2S1"), /* mclk */
- TOP_MUX(0x1, "TSI1"), /* clk */
- TOP_MUX(0x2, "LCD"), /* dat18 */
- TOP_MUX(0x3, "BGPIO")), /* gpio70 */
- TOP_PIN(I2S1_DIN0, TOP_REG5, 17, 2, 0x94, 0,
- TOP_MUX(0x0, "I2S1"), /* din0 */
- TOP_MUX(0x1, "TSI1"), /* sync */
- TOP_MUX(0x2, "LCD"), /* dat19 */
- TOP_MUX(0x3, "BGPIO")), /* gpio71 */
- TOP_PIN(I2S1_DOUT0, TOP_REG5, 19, 2, 0x94, 9,
- TOP_MUX(0x0, "I2S1"), /* dout0 */
- TOP_MUX(0x1, "TSI1"), /* valid */
- TOP_MUX(0x2, "LCD"), /* dat20 */
- TOP_MUX(0x3, "BGPIO")), /* gpio72 */
- TOP_PIN(SPI3_CLK, TOP_REG5, 21, 3, 0x94, 18,
- TOP_MUX(0x0, "SPI3"), /* clk */
- TOP_MUX(0x1, "TSO1"), /* clk */
- TOP_MUX(0x2, "LCD"), /* dat21 */
- TOP_MUX(0x3, "BGPIO"), /* gpio73 */
- TOP_MUX(0x4, "UART5"), /* rxd */
- TOP_MUX(0x5, "PCM"), /* fs */
- TOP_MUX(0x6, "I2S0"), /* din1 */
- TOP_MUX(0x7, "B_DVI0")), /* d5 */
- TOP_PIN(SPI3_CS, TOP_REG5, 24, 3, 0x98, 0,
- TOP_MUX(0x0, "SPI3"), /* cs */
- TOP_MUX(0x1, "TSO1"), /* dat0 */
- TOP_MUX(0x2, "LCD"), /* dat22 */
- TOP_MUX(0x3, "BGPIO"), /* gpio74 */
- TOP_MUX(0x4, "UART5"), /* txd */
- TOP_MUX(0x5, "PCM"), /* clk */
- TOP_MUX(0x6, "I2S0"), /* din2 */
- TOP_MUX(0x7, "B_DVI0")), /* d6 */
- TOP_PIN(SPI3_TXD, TOP_REG5, 27, 3, 0x98, 9,
- TOP_MUX(0x0, "SPI3"), /* txd */
- TOP_MUX(0x1, "TSO1"), /* dat1 */
- TOP_MUX(0x2, "LCD"), /* dat23 */
- TOP_MUX(0x3, "BGPIO"), /* gpio75 */
- TOP_MUX(0x4, "UART5"), /* cts */
- TOP_MUX(0x5, "PCM"), /* txd */
- TOP_MUX(0x6, "I2S0"), /* din3 */
- TOP_MUX(0x7, "B_DVI0")), /* d7 */
- TOP_PIN(NAND_LDO_MS18_SEL, TOP_REG5, 30, 1, 0xe4, 0,
- TOP_MUX(0x0, "NAND"), /* ldo_ms18_sel */
- TOP_MUX(0x1, "BGPIO")), /* gpio99 */
-
- /* top_pmm_reg_6 */
- TOP_PIN(SPI3_RXD, TOP_REG6, 0, 3, 0x98, 18,
- TOP_MUX(0x0, "SPI3"), /* rxd */
- TOP_MUX(0x1, "TSO1"), /* dat2 */
- TOP_MUX(0x2, "LCD"), /* stvu_vsync */
- TOP_MUX(0x3, "BGPIO"), /* gpio76 */
- TOP_MUX(0x4, "UART5"), /* rts */
- TOP_MUX(0x5, "PCM"), /* rxd */
- TOP_MUX(0x6, "I2S0"), /* dout1 */
- TOP_MUX(0x7, "B_DVI1")), /* clk */
- TOP_PIN(I2S0_MCLK, TOP_REG6, 3, 3, 0x9c, 0,
- TOP_MUX(0x0, "I2S0"), /* mclk */
- TOP_MUX(0x1, "TSO1"), /* dat3 */
- TOP_MUX(0x2, "LCD"), /* stvd */
- TOP_MUX(0x3, "BGPIO"), /* gpio77 */
- TOP_MUX(0x4, "USIM0"), /* cd */
- TOP_MUX(0x5, "B_DVI1")), /* vs */
- TOP_PIN(I2S0_BCLK, TOP_REG6, 6, 3, 0x9c, 9,
- TOP_MUX(0x0, "I2S0"), /* bclk */
- TOP_MUX(0x1, "TSO1"), /* dat4 */
- TOP_MUX(0x2, "LCD"), /* sthl_hsync */
- TOP_MUX(0x3, "BGPIO"), /* gpio78 */
- TOP_MUX(0x4, "USIM0"), /* clk */
- TOP_MUX(0x5, "B_DVI1")), /* hs */
- TOP_PIN(I2S0_WS, TOP_REG6, 9, 3, 0x9c, 18,
- TOP_MUX(0x0, "I2S0"), /* ws */
- TOP_MUX(0x1, "TSO1"), /* dat5 */
- TOP_MUX(0x2, "LCD"), /* sthr */
- TOP_MUX(0x3, "BGPIO"), /* gpio79 */
- TOP_MUX(0x4, "USIM0"), /* rst */
- TOP_MUX(0x5, "B_DVI1")), /* d0 */
- TOP_PIN(I2S0_DIN0, TOP_REG6, 12, 3, 0xa0, 0,
- TOP_MUX(0x0, "I2S0"), /* din0 */
- TOP_MUX(0x1, "TSO1"), /* dat6 */
- TOP_MUX(0x2, "LCD"), /* oev_dataen */
- TOP_MUX(0x3, "BGPIO"), /* gpio80 */
- TOP_MUX(0x4, "USIM0"), /* dat */
- TOP_MUX(0x5, "B_DVI1")), /* d1 */
- TOP_PIN(I2S0_DOUT0, TOP_REG6, 15, 2, 0xa0, 9,
- TOP_MUX(0x0, "I2S0"), /* dout0 */
- TOP_MUX(0x1, "TSO1"), /* dat7 */
- TOP_MUX(0x2, "LCD"), /* ckv */
- TOP_MUX(0x3, "BGPIO")), /* gpio81 */
- TOP_PIN(I2C5_SCL, TOP_REG6, 17, 3, 0xa0, 18,
- TOP_MUX(0x0, "I2C5"), /* scl */
- TOP_MUX(0x1, "TSO1"), /* sync */
- TOP_MUX(0x2, "LCD"), /* ld */
- TOP_MUX(0x3, "BGPIO"), /* gpio82 */
- TOP_MUX(0x4, "PWM"), /* out2 */
- TOP_MUX(0x5, "I2S0"), /* dout2 */
- TOP_MUX(0x6, "B_DVI1")), /* d2 */
- TOP_PIN(I2C5_SDA, TOP_REG6, 20, 3, 0xa4, 0,
- TOP_MUX(0x0, "I2C5"), /* sda */
- TOP_MUX(0x1, "TSO1"), /* vld */
- TOP_MUX(0x2, "LCD"), /* pol */
- TOP_MUX(0x3, "BGPIO"), /* gpio83 */
- TOP_MUX(0x4, "PWM"), /* out3 */
- TOP_MUX(0x5, "I2S0"), /* dout3 */
- TOP_MUX(0x6, "B_DVI1")), /* d3 */
- TOP_PIN(SPI2_CLK, TOP_REG6, 23, 3, 0xa4, 9,
- TOP_MUX(0x0, "SPI2"), /* clk */
- TOP_MUX(0x1, "TSO0"), /* clk */
- TOP_MUX(0x2, "LCD"), /* degsl */
- TOP_MUX(0x3, "BGPIO"), /* gpio84 */
- TOP_MUX(0x4, "I2C4"), /* scl */
- TOP_MUX(0x5, "B_DVI1")), /* d4 */
- TOP_PIN(SPI2_CS, TOP_REG6, 26, 3, 0xa4, 18,
- TOP_MUX(0x0, "SPI2"), /* cs */
- TOP_MUX(0x1, "TSO0"), /* data */
- TOP_MUX(0x2, "LCD"), /* rev */
- TOP_MUX(0x3, "BGPIO"), /* gpio85 */
- TOP_MUX(0x4, "I2C4"), /* sda */
- TOP_MUX(0x5, "B_DVI1")), /* d5 */
- TOP_PIN(SPI2_TXD, TOP_REG6, 29, 3, 0xa8, 0,
- TOP_MUX(0x0, "SPI2"), /* txd */
- TOP_MUX(0x1, "TSO0"), /* sync */
- TOP_MUX(0x2, "LCD"), /* u_d */
- TOP_MUX(0x3, "BGPIO"), /* gpio86 */
- TOP_MUX(0x4, "I2C4"), /* scl */
- TOP_MUX(0x5, "B_DVI1")), /* d6 */
-
- /* top_pmm_reg_7 */
- TOP_PIN(SPI2_RXD, TOP_REG7, 0, 3, 0xa8, 9,
- TOP_MUX(0x0, "SPI2"), /* rxd */
- TOP_MUX(0x1, "TSO0"), /* vld */
- TOP_MUX(0x2, "LCD"), /* r_l */
- TOP_MUX(0x3, "BGPIO"), /* gpio87 */
- TOP_MUX(0x4, "I2C3"), /* sda */
- TOP_MUX(0x5, "B_DVI1")), /* d7 */
- TOP_PIN(NAND_WP_N, TOP_REG7, 7, 3, 0x54, 9,
- TOP_MUX(0x0, "NAND"), /* wp */
- TOP_MUX(0x1, "PWM"), /* out2 */
- TOP_MUX(0x2, "SPI2"), /* clk */
- TOP_MUX(0x3, "BGPIO"), /* gpio88 */
- TOP_MUX(0x4, "TSI0"), /* dat0 */
- TOP_MUX(0x5, "I2S1")), /* din1 */
- TOP_PIN(NAND_PAGE_SIZE0, TOP_REG7, 10, 3, 0xb8, 0,
- TOP_MUX(0x0, "NAND"), /* boot_pagesize0 */
- TOP_MUX(0x1, "PWM"), /* out3 */
- TOP_MUX(0x2, "SPI2"), /* cs */
- TOP_MUX(0x3, "BGPIO"), /* gpio89 */
- TOP_MUX(0x4, "TSI0"), /* clk */
- TOP_MUX(0x5, "I2S1")), /* din2 */
- TOP_PIN(NAND_PAGE_SIZE1, TOP_REG7, 13, 3, 0xb8, 9,
- TOP_MUX(0x0, "NAND"), /* boot_pagesize1 */
- TOP_MUX(0x1, "I2C4"), /* scl */
- TOP_MUX(0x2, "SPI2"), /* txd */
- TOP_MUX(0x3, "BGPIO"), /* gpio90 */
- TOP_MUX(0x4, "TSI0"), /* sync */
- TOP_MUX(0x5, "I2S1")), /* din3 */
- TOP_PIN(NAND_ADDR_CYCLE, TOP_REG7, 16, 3, 0xb8, 18,
- TOP_MUX(0x0, "NAND"), /* boot_addr_cycles */
- TOP_MUX(0x1, "I2C4"), /* sda */
- TOP_MUX(0x2, "SPI2"), /* rxd */
- TOP_MUX(0x3, "BGPIO"), /* gpio91 */
- TOP_MUX(0x4, "TSI0"), /* valid */
- TOP_MUX(0x5, "I2S1")), /* dout1 */
- TOP_PIN(NAND_RB0, TOP_REG7, 19, 3, 0xbc, 0,
- TOP_MUX(0x0, "NAND"), /* rdy_busy0 */
- TOP_MUX(0x1, "I2C2"), /* scl */
- TOP_MUX(0x2, "USIM0"), /* cd */
- TOP_MUX(0x3, "BGPIO"), /* gpio92 */
- TOP_MUX(0x4, "TSI1")), /* data0 */
- TOP_PIN(NAND_RB1, TOP_REG7, 22, 3, 0xbc, 9,
- TOP_MUX(0x0, "NAND"), /* rdy_busy1 */
- TOP_MUX(0x1, "I2C2"), /* sda */
- TOP_MUX(0x2, "USIM0"), /* clk */
- TOP_MUX(0x3, "BGPIO"), /* gpio93 */
- TOP_MUX(0x4, "TSI1")), /* clk */
- TOP_PIN(NAND_RB2, TOP_REG7, 25, 3, 0xbc, 18,
- TOP_MUX(0x0, "NAND"), /* rdy_busy2 */
- TOP_MUX(0x1, "UART5"), /* rxd */
- TOP_MUX(0x2, "USIM0"), /* rst */
- TOP_MUX(0x3, "BGPIO"), /* gpio94 */
- TOP_MUX(0x4, "TSI1"), /* sync */
- TOP_MUX(0x4, "I2S1")), /* dout2 */
- TOP_PIN(NAND_RB3, TOP_REG7, 28, 3, 0x54, 18,
- TOP_MUX(0x0, "NAND"), /* rdy_busy3 */
- TOP_MUX(0x1, "UART5"), /* txd */
- TOP_MUX(0x2, "USIM0"), /* dat */
- TOP_MUX(0x3, "BGPIO"), /* gpio95 */
- TOP_MUX(0x4, "TSI1"), /* valid */
- TOP_MUX(0x4, "I2S1")), /* dout3 */
-
- /* top_pmm_reg_8 */
- TOP_PIN(GMAC_125M_IN, TOP_REG8, 0, 2, 0x34, 0,
- TOP_MUX(0x0, "GMII"), /* 125m_in */
- TOP_MUX(0x1, "USB2"), /* 0_drvvbus */
- TOP_MUX(0x2, "ISP"), /* ref_clk */
- TOP_MUX(0x3, "BGPIO")), /* gpio96 */
- TOP_PIN(GMAC_50M_OUT, TOP_REG8, 2, 2, 0x34, 9,
- TOP_MUX(0x0, "GMII"), /* 50m_out */
- TOP_MUX(0x1, "USB2"), /* 1_drvvbus */
- TOP_MUX(0x2, "BGPIO"), /* gpio97 */
- TOP_MUX(0x3, "USB2")), /* 0_drvvbus */
- TOP_PIN(SPINOR_SSCLK_LOOPBACK, TOP_REG8, 6, 1, 0xc8, 9,
- TOP_MUX(0x0, "SPINOR")), /* sdio1_clk_i */
- TOP_PIN(SPINOR_SDIO1CLK_LOOPBACK, TOP_REG8, 7, 1, 0xc8, 18,
- TOP_MUX(0x0, "SPINOR")), /* ssclk_i */
-};
-
-static struct zx_pinctrl_soc_info zx296718_pinctrl_info = {
- .pins = zx296718_pins,
- .npins = ARRAY_SIZE(zx296718_pins),
-};
-
-static int zx296718_pinctrl_probe(struct platform_device *pdev)
-{
- return zx_pinctrl_init(pdev, &zx296718_pinctrl_info);
-}
-
-static const struct of_device_id zx296718_pinctrl_match[] = {
- { .compatible = "zte,zx296718-pmm", },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx296718_pinctrl_match);
-
-static struct platform_driver zx296718_pinctrl_driver = {
- .probe = zx296718_pinctrl_probe,
- .driver = {
- .name = "zx296718-pinctrl",
- .of_match_table = zx296718_pinctrl_match,
- },
-};
-builtin_platform_driver(zx296718_pinctrl_driver);
-
-MODULE_DESCRIPTION("ZTE ZX296718 pinctrl driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 3104680b7485..fc5aa1525d13 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -32,7 +32,14 @@ static struct cros_ec_platform pd_p = {
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
-static irqreturn_t ec_irq_handler(int irq, void *data)
+/**
+ * cros_ec_irq_handler() - top half part of the interrupt handler
+ * @irq: IRQ id
+ * @data: (ec_dev) Device with events to process.
+ *
+ * Return: Wakeup the bottom half
+ */
+static irqreturn_t cros_ec_irq_handler(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
@@ -51,7 +58,7 @@ static irqreturn_t ec_irq_handler(int irq, void *data)
* Return: true if more events are still pending and this function should be
* called again.
*/
-bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
+static bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
{
bool wake_event;
bool ec_has_more_events;
@@ -73,9 +80,15 @@ bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
return ec_has_more_events;
}
-EXPORT_SYMBOL(cros_ec_handle_event);
-static irqreturn_t ec_irq_thread(int irq, void *data)
+/**
+ * cros_ec_irq_thread() - bottom half part of the interrupt handler
+ * @irq: IRQ id
+ * @data: (ec_dev) Device with events to process.
+ *
+ * Return: Interrupt handled.
+ */
+irqreturn_t cros_ec_irq_thread(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
bool ec_has_more_events;
@@ -86,6 +99,7 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL(cros_ec_irq_thread);
static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
{
@@ -194,8 +208,8 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (ec_dev->irq > 0) {
err = devm_request_threaded_irq(dev, ec_dev->irq,
- ec_irq_handler,
- ec_irq_thread,
+ cros_ec_irq_handler,
+ cros_ec_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"chromeos-ec", ec_dev);
if (err) {
@@ -269,6 +283,13 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
dev_info(dev, "Chrome EC device registered\n");
+ /*
+ * Unlock EC that may be waiting for AP to process MKBP events.
+ * If the AP takes to long to answer, the EC would stop sending events.
+ */
+ if (ec_dev->mkbp_event_supported)
+ cros_ec_irq_thread(0, ec_dev);
+
return 0;
}
EXPORT_SYMBOL(cros_ec_register);
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
index e69fc1ff68b4..78363dcfdf23 100644
--- a/drivers/platform/chrome/cros_ec.h
+++ b/drivers/platform/chrome/cros_ec.h
@@ -8,12 +8,14 @@
#ifndef __CROS_EC_H
#define __CROS_EC_H
+#include <linux/interrupt.h>
+
int cros_ec_register(struct cros_ec_device *ec_dev);
int cros_ec_unregister(struct cros_ec_device *ec_dev);
int cros_ec_suspend(struct cros_ec_device *ec_dev);
int cros_ec_resume(struct cros_ec_device *ec_dev);
-bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
+irqreturn_t cros_ec_irq_thread(int irq, void *data);
#endif /* __CROS_EC_H */
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 81364029af36..f00107017318 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -140,12 +140,8 @@ static void ish_evt_handler(struct work_struct *work)
{
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ec_evt);
- struct cros_ec_device *ec_dev = client_data->ec_dev;
- bool ec_has_more_events;
- do {
- ec_has_more_events = cros_ec_handle_event(ec_dev);
- } while (ec_has_more_events);
+ cros_ec_irq_thread(0, client_data->ec_dev);
}
/**
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index de8dfb12e486..469dfc7a4a03 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -523,7 +523,7 @@ static struct attribute *__lb_cmds_attrs[] = {
NULL,
};
-static struct attribute_group cros_ec_lightbar_attr_group = {
+static const struct attribute_group cros_ec_lightbar_attr_group = {
.name = "lightbar",
.attrs = __lb_cmds_attrs,
};
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 7c92a6e22d75..aa7f7aa77297 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -526,11 +526,13 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
* power), not wake up.
*/
ec_dev->host_event_wake_mask = U32_MAX &
- ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
- BIT(EC_HOST_EVENT_BATTERY_LOW) |
- BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
- BIT(EC_HOST_EVENT_PD_MCU) |
- BIT(EC_HOST_EVENT_BATTERY_STATUS));
+ ~(EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS));
/*
* Old ECs may not support this command. Complain about all
* other errors.
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 30d0ba3b8889..d96d15b8ca94 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -149,12 +149,8 @@ cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
struct cros_ec_rpmsg *ec_rpmsg = container_of(host_event_work,
struct cros_ec_rpmsg,
host_event_work);
- struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
- bool ec_has_more_events;
- do {
- ec_has_more_events = cros_ec_handle_event(ec_dev);
- } while (ec_has_more_events);
+ cros_ec_irq_thread(0, dev_get_drvdata(&ec_rpmsg->rpdev->dev));
}
static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f521a5c65091..f07eabcf9494 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -28,7 +28,7 @@ static ssize_t reboot_show(struct device *dev,
int count = 0;
count += scnprintf(buf + count, PAGE_SIZE - count,
- "ro|rw|cancel|cold|disable-jump|hibernate");
+ "ro|rw|cancel|cold|disable-jump|hibernate|cold-ap-off");
count += scnprintf(buf + count, PAGE_SIZE - count,
" [at-shutdown]\n");
return count;
@@ -46,6 +46,7 @@ static ssize_t reboot_store(struct device *dev,
{"cancel", EC_REBOOT_CANCEL, 0},
{"ro", EC_REBOOT_JUMP_RO, 0},
{"rw", EC_REBOOT_JUMP_RW, 0},
+ {"cold-ap-off", EC_REBOOT_COLD_AP_OFF, 0},
{"cold", EC_REBOOT_COLD, 0},
{"disable-jump", EC_REBOOT_DISABLE_JUMP, 0},
{"hibernate", EC_REBOOT_HIBERNATE, 0},
@@ -329,7 +330,7 @@ static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
return a->mode;
}
-static struct attribute_group cros_ec_attr_group = {
+static const struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
.is_visible = cros_ec_ctrl_visible,
};
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index c43868615790..0811562deecc 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -44,8 +44,13 @@ struct cros_typec_port {
/* Initial capabilities for the port. */
struct typec_capability caps;
struct typec_partner *partner;
+ struct typec_cable *cable;
+ /* SOP' plug. */
+ struct typec_plug *plug;
/* Port partner PD identity info. */
struct usb_pd_identity p_identity;
+ /* Port cable PD identity info. */
+ struct usb_pd_identity c_identity;
struct typec_switch *ori_sw;
struct typec_mux *mux;
struct usb_role_switch *role_sw;
@@ -57,10 +62,12 @@ struct cros_typec_port {
/* Port alt modes. */
struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
- /* Flag indicating that PD discovery data parsing is completed. */
- bool disc_done;
- struct ec_response_typec_discovery *sop_disc;
+ /* Flag indicating that PD partner discovery data parsing is completed. */
+ bool sop_disc_done;
+ bool sop_prime_disc_done;
+ struct ec_response_typec_discovery *disc_data;
struct list_head partner_mode_list;
+ struct list_head plug_mode_list;
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -74,6 +81,7 @@ struct cros_typec_data {
struct notifier_block nb;
struct work_struct port_work;
bool typec_cmd_supported;
+ bool needs_mux_ack;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -180,37 +188,61 @@ static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
return ret;
}
-static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num)
+static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num,
+ bool is_partner)
{
struct cros_typec_port *port = typec->ports[port_num];
struct cros_typec_altmode_node *node, *tmp;
+ struct list_head *head;
- list_for_each_entry_safe(node, tmp, &port->partner_mode_list, list) {
+ head = is_partner ? &port->partner_mode_list : &port->plug_mode_list;
+ list_for_each_entry_safe(node, tmp, head, list) {
list_del(&node->list);
typec_unregister_altmode(node->amode);
devm_kfree(typec->dev, node);
}
}
-static void cros_typec_remove_partner(struct cros_typec_data *typec,
- int port_num)
+static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
{
- struct cros_typec_port *port = typec->ports[port_num];
-
- cros_typec_unregister_altmodes(typec, port_num);
-
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
- typec_mux_set(port->mux, &port->state);
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+static void cros_typec_remove_partner(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ cros_typec_unregister_altmodes(typec, port_num, true);
+
+ cros_typec_usb_disconnect_state(port);
typec_unregister_partner(port->partner);
port->partner = NULL;
memset(&port->p_identity, 0, sizeof(port->p_identity));
- port->disc_done = false;
+ port->sop_disc_done = false;
+}
+
+static void cros_typec_remove_cable(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ cros_typec_unregister_altmodes(typec, port_num, false);
+
+ typec_unregister_plug(port->plug);
+ port->plug = NULL;
+ typec_unregister_cable(port->cable);
+ port->cable = NULL;
+ memset(&port->c_identity, 0, sizeof(port->c_identity));
+ port->sop_prime_disc_done = false;
}
static void cros_unregister_ports(struct cros_typec_data *typec)
@@ -224,6 +256,9 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
if (typec->ports[i]->partner)
cros_typec_remove_partner(typec, i);
+ if (typec->ports[i]->cable)
+ cros_typec_remove_cable(typec, i);
+
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
@@ -323,13 +358,14 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
cros_typec_register_port_altmodes(typec, port_num);
- cros_port->sop_disc = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
- if (!cros_port->sop_disc) {
+ cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
+ if (!cros_port->disc_data) {
ret = -ENOMEM;
goto unregister_ports;
}
INIT_LIST_HEAD(&cros_port->partner_mode_list);
+ INIT_LIST_HEAD(&cros_port->plug_mode_list);
}
return 0;
@@ -502,11 +538,14 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_params_usb_pd_mux_ack mux_ack;
enum typec_orientation orientation;
int ret;
- if (!port->partner)
- return 0;
+ if (mux_flags == USB_PD_MUX_NONE) {
+ ret = cros_typec_usb_disconnect_state(port);
+ goto mux_ack;
+ }
if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
orientation = TYPEC_ORIENTATION_REVERSE;
@@ -541,6 +580,19 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
mux_flags);
}
+mux_ack:
+ if (!typec->needs_mux_ack)
+ return ret;
+
+ /* Sending Acknowledgment to EC */
+ mux_ack.port = port_num;
+
+ if (cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
+ sizeof(mux_ack), NULL, 0) < 0)
+ dev_warn(typec->dev,
+ "Failed to send Mux ACK to EC for port: %d\n",
+ port_num);
+
return ret;
}
@@ -595,9 +647,11 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
"Failed to register partner on port: %d\n",
port_num);
} else {
- if (!typec->ports[port_num]->partner)
- return;
- cros_typec_remove_partner(typec, port_num);
+ if (typec->ports[port_num]->partner)
+ cros_typec_remove_partner(typec, port_num);
+
+ if (typec->ports[port_num]->cable)
+ cros_typec_remove_cable(typec, port_num);
}
}
@@ -612,13 +666,18 @@ static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
sizeof(req), resp, sizeof(*resp));
}
-static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num)
+/*
+ * Helper function to register partner/plug altmodes.
+ */
+static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num,
+ bool is_partner)
{
struct cros_typec_port *port = typec->ports[port_num];
- struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct ec_response_typec_discovery *sop_disc = port->disc_data;
struct cros_typec_altmode_node *node;
struct typec_altmode_desc desc;
struct typec_altmode *amode;
+ int num_altmodes = 0;
int ret = 0;
int i, j;
@@ -629,7 +688,11 @@ static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_
desc.mode = j;
desc.vdo = sop_disc->svids[i].mode_vdo[j];
- amode = typec_partner_register_altmode(port->partner, &desc);
+ if (is_partner)
+ amode = typec_partner_register_altmode(port->partner, &desc);
+ else
+ amode = typec_plug_register_altmode(port->plug, &desc);
+
if (IS_ERR(amode)) {
ret = PTR_ERR(amode);
goto err_cleanup;
@@ -644,27 +707,140 @@ static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_
}
node->amode = amode;
- list_add_tail(&node->list, &port->partner_mode_list);
+
+ if (is_partner)
+ list_add_tail(&node->list, &port->partner_mode_list);
+ else
+ list_add_tail(&node->list, &port->plug_mode_list);
+ num_altmodes++;
}
}
+ if (is_partner)
+ ret = typec_partner_set_num_altmodes(port->partner, num_altmodes);
+ else
+ ret = typec_plug_set_num_altmodes(port->plug, num_altmodes);
+
+ if (ret < 0) {
+ dev_err(typec->dev, "Unable to set %s num_altmodes for port: %d\n",
+ is_partner ? "partner" : "plug", port_num);
+ goto err_cleanup;
+ }
+
return 0;
err_cleanup:
- cros_typec_unregister_altmodes(typec, port_num);
+ cros_typec_unregister_altmodes(typec, port_num, is_partner);
+ return ret;
+}
+
+/*
+ * Parse the PD identity data from the EC PD discovery responses and copy that to the supplied
+ * PD identity struct.
+ */
+static void cros_typec_parse_pd_identity(struct usb_pd_identity *id,
+ struct ec_response_typec_discovery *disc)
+{
+ int i;
+
+ /* First, update the PD identity VDOs for the partner. */
+ if (disc->identity_count > 0)
+ id->id_header = disc->discovery_vdo[0];
+ if (disc->identity_count > 1)
+ id->cert_stat = disc->discovery_vdo[1];
+ if (disc->identity_count > 2)
+ id->product = disc->discovery_vdo[2];
+
+ /* Copy the remaining identity VDOs till a maximum of 6. */
+ for (i = 3; i < disc->identity_count && i < VDO_MAX_OBJECTS; i++)
+ id->vdo[i - 3] = disc->discovery_vdo[i];
+}
+
+static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *disc = port->disc_data;
+ struct typec_cable_desc c_desc = {};
+ struct typec_plug_desc p_desc;
+ struct ec_params_typec_discovery req = {
+ .port = port_num,
+ .partner_type = TYPEC_PARTNER_SOP_PRIME,
+ };
+ u32 cable_plug_type;
+ int ret = 0;
+
+ memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
+ goto sop_prime_disc_exit;
+ }
+
+ /* Parse the PD identity data, even if only 0s were returned. */
+ cros_typec_parse_pd_identity(&port->c_identity, disc);
+
+ if (disc->identity_count != 0) {
+ cable_plug_type = VDO_TYPEC_CABLE_TYPE(port->c_identity.vdo[0]);
+ switch (cable_plug_type) {
+ case CABLE_ATYPE:
+ c_desc.type = USB_PLUG_TYPE_A;
+ break;
+ case CABLE_BTYPE:
+ c_desc.type = USB_PLUG_TYPE_B;
+ break;
+ case CABLE_CTYPE:
+ c_desc.type = USB_PLUG_TYPE_C;
+ break;
+ case CABLE_CAPTIVE:
+ c_desc.type = USB_PLUG_CAPTIVE;
+ break;
+ default:
+ c_desc.type = USB_PLUG_NONE;
+ }
+ c_desc.active = PD_IDH_PTYPE(port->c_identity.id_header) == IDH_PTYPE_ACABLE;
+ }
+
+ c_desc.identity = &port->c_identity;
+ c_desc.pd_revision = pd_revision;
+
+ port->cable = typec_register_cable(port->port, &c_desc);
+ if (IS_ERR(port->cable)) {
+ ret = PTR_ERR(port->cable);
+ port->cable = NULL;
+ goto sop_prime_disc_exit;
+ }
+
+ p_desc.index = TYPEC_PLUG_SOP_P;
+ port->plug = typec_register_plug(port->cable, &p_desc);
+ if (IS_ERR(port->plug)) {
+ ret = PTR_ERR(port->plug);
+ port->plug = NULL;
+ goto sop_prime_disc_exit;
+ }
+
+ ret = cros_typec_register_altmodes(typec, port_num, false);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to register plug altmodes, port: %d\n", port_num);
+ goto sop_prime_disc_exit;
+ }
+
+ return 0;
+
+sop_prime_disc_exit:
+ cros_typec_remove_cable(typec, port_num);
return ret;
}
-static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num)
+static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
{
struct cros_typec_port *port = typec->ports[port_num];
- struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct ec_response_typec_discovery *sop_disc = port->disc_data;
struct ec_params_typec_discovery req = {
.port = port_num,
.partner_type = TYPEC_PARTNER_SOP,
};
int ret = 0;
- int i;
if (!port->partner) {
dev_err(typec->dev,
@@ -674,6 +850,8 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
goto disc_exit;
}
+ typec_partner_set_pd_revision(port->partner, pd_revision);
+
memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
@@ -682,17 +860,7 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
goto disc_exit;
}
- /* First, update the PD identity VDOs for the partner. */
- if (sop_disc->identity_count > 0)
- port->p_identity.id_header = sop_disc->discovery_vdo[0];
- if (sop_disc->identity_count > 1)
- port->p_identity.cert_stat = sop_disc->discovery_vdo[1];
- if (sop_disc->identity_count > 2)
- port->p_identity.product = sop_disc->discovery_vdo[2];
-
- /* Copy the remaining identity VDOs till a maximum of 6. */
- for (i = 3; i < sop_disc->identity_count && i < VDO_MAX_OBJECTS; i++)
- port->p_identity.vdo[i - 3] = sop_disc->discovery_vdo[i];
+ cros_typec_parse_pd_identity(&port->p_identity, sop_disc);
ret = typec_partner_set_identity(port->partner);
if (ret < 0) {
@@ -700,7 +868,7 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
goto disc_exit;
}
- ret = cros_typec_register_altmodes(typec, port_num);
+ ret = cros_typec_register_altmodes(typec, port_num, true);
if (ret < 0) {
dev_err(typec->dev, "Failed to register partner altmodes, port: %d\n", port_num);
goto disc_exit;
@@ -710,6 +878,18 @@ disc_exit:
return ret;
}
+static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_num, u32 events_mask)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ .clear_events_mask = events_mask,
+ };
+
+ return cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
+}
+
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_status resp;
@@ -725,18 +905,44 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
return;
}
- if (typec->ports[port_num]->disc_done)
- return;
-
/* Handle any events appropriately. */
- if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE) {
- ret = cros_typec_handle_sop_disc(typec, port_num);
- if (ret < 0) {
+ if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE && !typec->ports[port_num]->sop_disc_done) {
+ u16 sop_revision;
+
+ /* Convert BCD to the format preferred by the TypeC framework */
+ sop_revision = (le16_to_cpu(resp.sop_revision) & 0xff00) >> 4;
+ ret = cros_typec_handle_sop_disc(typec, port_num, sop_revision);
+ if (ret < 0)
dev_err(typec->dev, "Couldn't parse SOP Disc data, port: %d\n", port_num);
- return;
+ else {
+ typec->ports[port_num]->sop_disc_done = true;
+ ret = cros_typec_send_clear_event(typec, port_num,
+ PD_STATUS_EVENT_SOP_DISC_DONE);
+ if (ret < 0)
+ dev_warn(typec->dev,
+ "Failed SOP Disc event clear, port: %d\n", port_num);
}
+ if (resp.sop_connected)
+ typec_set_pwr_opmode(typec->ports[port_num]->port, TYPEC_PWR_MODE_PD);
+ }
- typec->ports[port_num]->disc_done = true;
+ if (resp.events & PD_STATUS_EVENT_SOP_PRIME_DISC_DONE &&
+ !typec->ports[port_num]->sop_prime_disc_done) {
+ u16 sop_prime_revision;
+
+ /* Convert BCD to the format preferred by the TypeC framework */
+ sop_prime_revision = (le16_to_cpu(resp.sop_prime_revision) & 0xff00) >> 4;
+ ret = cros_typec_handle_sop_prime_disc(typec, port_num, sop_prime_revision);
+ if (ret < 0)
+ dev_err(typec->dev, "Couldn't parse SOP' Disc data, port: %d\n", port_num);
+ else {
+ typec->ports[port_num]->sop_prime_disc_done = true;
+ ret = cros_typec_send_clear_event(typec, port_num,
+ PD_STATUS_EVENT_SOP_PRIME_DISC_DONE);
+ if (ret < 0)
+ dev_warn(typec->dev,
+ "Failed SOP Disc event clear, port: %d\n", port_num);
+ }
}
}
@@ -827,8 +1033,8 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
-/* Check the EC feature flags to see if TYPEC_* commands are supported. */
-static int cros_typec_cmds_supported(struct cros_typec_data *typec)
+/* Check the EC feature flags to see if TYPEC_* features are supported. */
+static int cros_typec_feature_supported(struct cros_typec_data *typec, enum ec_feature_code feature)
{
struct ec_response_get_features resp = {};
int ret;
@@ -837,11 +1043,12 @@ static int cros_typec_cmds_supported(struct cros_typec_data *typec)
&resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev,
- "Failed to get features, assuming typec commands unsupported.\n");
+ "Failed to get features, assuming typec feature=%d unsupported.\n",
+ feature);
return 0;
}
- return resp.flags[EC_FEATURE_TYPEC_CMD / 32] & EC_FEATURE_MASK_1(EC_FEATURE_TYPEC_CMD);
+ return resp.flags[feature / 32] & EC_FEATURE_MASK_1(feature);
}
static void cros_typec_port_work(struct work_struct *work)
@@ -861,6 +1068,7 @@ static int cros_ec_typec_event(struct notifier_block *nb,
{
struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
+ flush_work(&typec->port_work);
schedule_work(&typec->port_work);
return NOTIFY_OK;
@@ -903,7 +1111,10 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
- typec->typec_cmd_supported = !!cros_typec_cmds_supported(typec);
+ typec->typec_cmd_supported = !!cros_typec_feature_supported(typec,
+ EC_FEATURE_TYPEC_CMD);
+ typec->needs_mux_ack = !!cros_typec_feature_supported(typec,
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index f3a70a312b43..c859c862d7ac 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -101,7 +101,7 @@ static struct bin_attribute *cros_ec_vbc_bin_attrs[] = {
NULL
};
-static struct attribute_group cros_ec_vbc_attr_group = {
+static const struct attribute_group cros_ec_vbc_attr_group = {
.name = "vbc",
.bin_attrs = cros_ec_vbc_bin_attrs,
};
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 3c587b4054a5..79a5e8fa680f 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -236,7 +236,7 @@ static struct attribute *wilco_dev_attrs[] = {
NULL,
};
-static struct attribute_group wilco_dev_attr_group = {
+static const struct attribute_group wilco_dev_attr_group = {
.attrs = wilco_dev_attrs,
};
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 1ab207ec9c94..b67539f9848c 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -212,9 +212,6 @@ struct goldfish_pipe_dev {
int version;
unsigned char __iomem *base;
- /* an irq tasklet to run goldfish_interrupt_task */
- struct tasklet_struct irq_tasklet;
-
struct miscdevice miscdev;
};
@@ -577,10 +574,10 @@ static struct goldfish_pipe *signalled_pipes_pop_front(
return pipe;
}
-static void goldfish_interrupt_task(unsigned long dev_addr)
+static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
{
/* Iterate over the signalled pipes and wake them one by one */
- struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr;
+ struct goldfish_pipe_dev *dev = dev_addr;
struct goldfish_pipe *pipe;
int wakes;
@@ -599,13 +596,14 @@ static void goldfish_interrupt_task(unsigned long dev_addr)
*/
wake_up_interruptible(&pipe->wake_queue);
}
+ return IRQ_HANDLED;
}
static void goldfish_pipe_device_deinit(struct platform_device *pdev,
struct goldfish_pipe_dev *dev);
/*
- * The general idea of the interrupt handling:
+ * The general idea of the (threaded) interrupt handling:
*
* 1. device raises an interrupt if there's at least one signalled pipe
* 2. IRQ handler reads the signalled pipes and their count from the device
@@ -614,8 +612,8 @@ static void goldfish_pipe_device_deinit(struct platform_device *pdev,
* otherwise it leaves it raised, so IRQ handler will be called
* again for the next chunk
* 4. IRQ handler adds all returned pipes to the device's signalled pipes list
- * 5. IRQ handler launches a tasklet to process the signalled pipes from the
- * list in a separate context
+ * 5. IRQ handler defers processing the signalled pipes from the list in a
+ * separate context
*/
static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
{
@@ -645,8 +643,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->irq_tasklet);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
@@ -811,12 +808,10 @@ static int goldfish_pipe_device_init(struct platform_device *pdev,
{
int err;
- tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
- (unsigned long)dev);
-
- err = devm_request_irq(&pdev->dev, dev->irq,
- goldfish_pipe_interrupt,
- IRQF_SHARED, "goldfish_pipe", dev);
+ err = devm_request_threaded_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt,
+ goldfish_interrupt_task,
+ IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
return err;
@@ -874,7 +869,6 @@ static void goldfish_pipe_device_deinit(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
misc_deregister(&dev->miscdev);
- tasklet_kill(&dev->irq_tasklet);
kfree(dev->pipes);
free_page((unsigned long)dev->buffers);
}
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index f64b82824db2..4ff5c3a12991 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -37,7 +37,6 @@ struct olpc_ec_priv {
struct mutex cmd_lock;
/* DCON regulator */
- struct regulator_dev *dcon_rdev;
bool dcon_enabled;
/* Pending EC commands */
@@ -387,24 +386,26 @@ static int dcon_regulator_is_enabled(struct regulator_dev *rdev)
return ec->dcon_enabled ? 1 : 0;
}
-static struct regulator_ops dcon_regulator_ops = {
+static const struct regulator_ops dcon_regulator_ops = {
.enable = dcon_regulator_enable,
.disable = dcon_regulator_disable,
.is_enabled = dcon_regulator_is_enabled,
};
static const struct regulator_desc dcon_desc = {
- .name = "dcon",
- .id = 0,
- .ops = &dcon_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
+ .name = "dcon",
+ .id = 0,
+ .ops = &dcon_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_time = 25000,
};
static int olpc_ec_probe(struct platform_device *pdev)
{
struct olpc_ec_priv *ec;
struct regulator_config config = { };
+ struct regulator_dev *regulator;
int err;
if (!ec_driver)
@@ -426,26 +427,26 @@ static int olpc_ec_probe(struct platform_device *pdev)
/* get the EC revision */
err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1);
- if (err) {
- ec_priv = NULL;
- kfree(ec);
- return err;
- }
+ if (err)
+ goto error;
config.dev = pdev->dev.parent;
config.driver_data = ec;
ec->dcon_enabled = true;
- ec->dcon_rdev = devm_regulator_register(&pdev->dev, &dcon_desc,
- &config);
- if (IS_ERR(ec->dcon_rdev)) {
+ regulator = devm_regulator_register(&pdev->dev, &dcon_desc, &config);
+ if (IS_ERR(regulator)) {
dev_err(&pdev->dev, "failed to register DCON regulator\n");
- err = PTR_ERR(ec->dcon_rdev);
- kfree(ec);
- return err;
+ err = PTR_ERR(regulator);
+ goto error;
}
ec->dbgfs_dir = olpc_ec_setup_debugfs();
+ return 0;
+
+error:
+ ec_priv = NULL;
+ kfree(ec);
return err;
}
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 33040b0b3b79..0847b2dc97bf 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,55 @@ config SURFACE3_WMI
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
+ depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
- depends on ACPI && I2C
+ depends on I2C
help
This driver provides support for ACPI operation
region of the Surface 3 battery platform driver.
+config SURFACE_ACPI_NOTIFY
+ tristate "Surface ACPI Notify Driver"
+ depends on SURFACE_AGGREGATOR
+ help
+ Surface ACPI Notify (SAN) driver for Microsoft Surface devices.
+
+ This driver provides support for the ACPI interface (called SAN) of
+ the Surface System Aggregator Module (SSAM) EC. This interface is used
+ on 5th- and 6th-generation Microsoft Surface devices (including
+ Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in
+ reduced functionality on the Surface Laptop 3) to execute SSAM
+ requests directly from ACPI code, as well as receive SSAM events and
+ turn them into ACPI notifications. It essentially acts as a
+ translation layer between the SSAM controller and ACPI.
+
+ Specifically, this driver may be needed for battery status reporting,
+ thermal sensor access, and real-time clock information, depending on
+ the Surface device in question.
+
+config SURFACE_AGGREGATOR_CDEV
+ tristate "Surface System Aggregator Module User-Space Interface"
+ depends on SURFACE_AGGREGATOR
+ help
+ Provides a misc-device interface to the Surface System Aggregator
+ Module (SSAM) controller.
+
+ This option provides a module (called surface_aggregator_cdev), that,
+ when loaded, will add a client device (and its respective driver) to
+ the SSAM controller. Said client device manages a misc-device
+ interface (/dev/surface/aggregator), which can be used by user-space
+ tools to directly communicate with the SSAM EC by sending requests and
+ receiving the corresponding responses.
+
+ The provided interface is intended for debugging and development only,
+ and should not be used otherwise.
+
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
- depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
@@ -50,10 +86,31 @@ config SURFACE_GPE
accordingly. It is required on those devices to allow wake-ups from
suspend by opening the lid.
+config SURFACE_HOTPLUG
+ tristate "Surface Hot-Plug Driver"
+ depends on GPIOLIB
+ help
+ Driver for out-of-band hot-plug event signaling on Microsoft Surface
+ devices with hot-pluggable PCIe cards.
+
+ This driver is used on Surface Book (2 and 3) devices with a
+ hot-pluggable discrete GPU (dGPU). When not in use, the dGPU on those
+ devices can enter D3cold, which prevents in-band (standard) PCIe
+ hot-plug signaling. Thus, without this driver, detaching the base
+ containing the dGPU will not correctly update the state of the
+ corresponding PCIe device if it is in D3cold. This driver adds support
+ for out-of-band hot-plug notifications, ensuring that the device state
+ is properly updated even when the device in question is in D3cold.
+
+ Select M or Y here, if you want to (fully) support hot-plugging of
+ dGPU devices on the Surface Book 2 and/or 3 during D3cold.
+
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
+ depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+source "drivers/platform/surface/aggregator/Kconfig"
+
endif # SURFACE_PLATFORMS
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index cedfb027ded1..990424c5f0c9 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -7,5 +7,9 @@
obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
+obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
+obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
+obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
+obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
new file mode 100644
index 000000000000..3aaeea9f0433
--- /dev/null
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+
+menuconfig SURFACE_AGGREGATOR
+ tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ The Surface System Aggregator Module (Surface SAM or SSAM) is an
+ embedded controller (EC) found on 5th- and later-generation Microsoft
+ Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop,
+ and newer, with exception of Surface Go series devices).
+
+ Depending on the device in question, this EC provides varying
+ functionality, including:
+ - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation)
+ - battery status information (all devices)
+ - thermal sensor access (all devices)
+ - performance mode / cooling mode control (all devices)
+ - clipboard detachment system control (Surface Book 2 and 3)
+ - HID / keyboard input (Surface Laptops, Surface Book 3)
+
+ This option controls whether the Surface SAM subsystem core will be
+ built. This includes a driver for the Surface Serial Hub (SSH), which
+ is the device responsible for the communication with the EC, and a
+ basic kernel interface exposing the EC functionality to other client
+ drivers, i.e. allowing them to make requests to the EC and receive
+ events from it. Selecting this option alone will not provide any
+ client drivers and therefore no functionality beyond the in-kernel
+ interface. Said functionality is the responsibility of the respective
+ client drivers.
+
+ Note: While 4th-generation Surface devices also make use of a SAM EC,
+ due to a difference in the communication interface of the controller,
+ only 5th and later generations are currently supported. Specifically,
+ devices using SAM-over-SSH are supported, whereas devices using
+ SAM-over-HID, which is used on the 4th generation, are currently not
+ supported.
+
+ Choose m if you want to build the SAM subsystem core and SSH driver as
+ module, y if you want to build it into the kernel and n if you don't
+ want it at all.
+
+config SURFACE_AGGREGATOR_BUS
+ bool "Surface System Aggregator Module Bus"
+ depends on SURFACE_AGGREGATOR
+ default y
+ help
+ Expands the Surface System Aggregator Module (SSAM) core driver by
+ providing a dedicated bus and client-device type.
+
+ This bus and device type are intended to provide and simplify support
+ for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are
+ not auto-detectable via the conventional means (e.g. ACPI).
+
+config SURFACE_AGGREGATOR_ERROR_INJECTION
+ bool "Surface System Aggregator Module Error Injection Capabilities"
+ depends on SURFACE_AGGREGATOR
+ depends on FUNCTION_ERROR_INJECTION
+ help
+ Provides error-injection capabilities for the Surface System
+ Aggregator Module subsystem and Surface Serial Hub driver.
+
+ Specifically, exports error injection hooks to be used with the
+ kernel's function error injection capabilities to simulate underlying
+ transport and communication problems, such as invalid data sent to or
+ received from the EC, dropped data, and communication timeouts.
+ Intended for development and debugging.
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
new file mode 100644
index 000000000000..c112e2c7112b
--- /dev/null
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+
+# For include/trace/define_trace.h to include trace.h
+CFLAGS_core.o = -I$(src)
+
+obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
+
+surface_aggregator-objs := core.o
+surface_aggregator-objs += ssh_parser.o
+surface_aggregator-objs += ssh_packet_layer.o
+surface_aggregator-objs += ssh_request_layer.o
+surface_aggregator-objs += controller.o
+
+ifeq ($(CONFIG_SURFACE_AGGREGATOR_BUS),y)
+surface_aggregator-objs += bus.o
+endif
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
new file mode 100644
index 000000000000..a9b660af0917
--- /dev/null
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module bus and device integration.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+#include "bus.h"
+#include "controller.h"
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return sysfs_emit(buf, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n",
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ssam_device_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ssam_device);
+
+static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
+ sdev->uid.domain, sdev->uid.category,
+ sdev->uid.target, sdev->uid.instance,
+ sdev->uid.function);
+}
+
+static void ssam_device_release(struct device *dev)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ ssam_controller_put(sdev->ctrl);
+ kfree(sdev);
+}
+
+const struct device_type ssam_device_type = {
+ .name = "surface_aggregator_device",
+ .groups = ssam_device_groups,
+ .uevent = ssam_device_uevent,
+ .release = ssam_device_release,
+};
+EXPORT_SYMBOL_GPL(ssam_device_type);
+
+/**
+ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
+ * @ctrl: The controller under which the device should be added.
+ * @uid: The UID of the device to be added.
+ *
+ * Allocates and initializes a new client device. The parent of the device
+ * will be set to the controller device and the name will be set based on the
+ * UID. Note that the device still has to be added via ssam_device_add().
+ * Refer to that function for more details.
+ *
+ * Return: Returns the newly allocated and initialized SSAM client device, or
+ * %NULL if it could not be allocated.
+ */
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid)
+{
+ struct ssam_device *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return NULL;
+
+ device_initialize(&sdev->dev);
+ sdev->dev.bus = &ssam_bus_type;
+ sdev->dev.type = &ssam_device_type;
+ sdev->dev.parent = ssam_controller_device(ctrl);
+ sdev->ctrl = ssam_controller_get(ctrl);
+ sdev->uid = uid;
+
+ dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(ssam_device_alloc);
+
+/**
+ * ssam_device_add() - Add a SSAM client device.
+ * @sdev: The SSAM client device to be added.
+ *
+ * Added client devices must be guaranteed to always have a valid and active
+ * controller. Thus, this function will fail with %-ENODEV if the controller
+ * of the device has not been initialized yet, has been suspended, or has been
+ * shut down.
+ *
+ * The caller of this function should ensure that the corresponding call to
+ * ssam_device_remove() is issued before the controller is shut down. If the
+ * added device is a direct child of the controller device (default), it will
+ * be automatically removed when the controller is shut down.
+ *
+ * By default, the controller device will become the parent of the newly
+ * created client device. The parent may be changed before ssam_device_add is
+ * called, but care must be taken that a) the correct suspend/resume ordering
+ * is guaranteed and b) the client device does not outlive the controller,
+ * i.e. that the device is removed before the controller is being shut down.
+ * In case these guarantees have to be manually enforced, please refer to the
+ * ssam_client_link() and ssam_client_bind() functions, which are intended to
+ * set up device-links for this purpose.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssam_device_add(struct ssam_device *sdev)
+{
+ int status;
+
+ /*
+ * Ensure that we can only add new devices to a controller if it has
+ * been started and is not going away soon. This works in combination
+ * with ssam_controller_remove_clients to ensure driver presence for the
+ * controller device, i.e. it ensures that the controller (sdev->ctrl)
+ * is always valid and can be used for requests as long as the client
+ * device we add here is registered as child under it. This essentially
+ * guarantees that the client driver can always expect the preconditions
+ * for functions like ssam_request_sync (controller has to be started
+ * and is not suspended) to hold and thus does not have to check for
+ * them.
+ *
+ * Note that for this to work, the controller has to be a parent device.
+ * If it is not a direct parent, care has to be taken that the device is
+ * removed via ssam_device_remove(), as device_unregister does not
+ * remove child devices recursively.
+ */
+ ssam_controller_statelock(sdev->ctrl);
+
+ if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(sdev->ctrl);
+ return -ENODEV;
+ }
+
+ status = device_add(&sdev->dev);
+
+ ssam_controller_stateunlock(sdev->ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_device_add);
+
+/**
+ * ssam_device_remove() - Remove a SSAM client device.
+ * @sdev: The device to remove.
+ *
+ * Removes and unregisters the provided SSAM client device.
+ */
+void ssam_device_remove(struct ssam_device *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(ssam_device_remove);
+
+/**
+ * ssam_device_id_compatible() - Check if a device ID matches a UID.
+ * @id: The device ID as potential match.
+ * @uid: The device UID matching against.
+ *
+ * Check if the given ID is a match for the given UID, i.e. if a device with
+ * the provided UID is compatible to the given ID following the match rules
+ * described in its &ssam_device_id.match_flags member.
+ *
+ * Return: Returns %true if the given UID is compatible to the match rule
+ * described by the given ID, %false otherwise.
+ */
+static bool ssam_device_id_compatible(const struct ssam_device_id *id,
+ struct ssam_device_uid uid)
+{
+ if (id->domain != uid.domain || id->category != uid.category)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
+ return false;
+
+ return true;
+}
+
+/**
+ * ssam_device_id_is_null() - Check if a device ID is null.
+ * @id: The device ID to check.
+ *
+ * Check if a given device ID is null, i.e. all zeros. Used to check for the
+ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
+ *
+ * Return: Returns %true if the given ID represents a null ID, %false
+ * otherwise.
+ */
+static bool ssam_device_id_is_null(const struct ssam_device_id *id)
+{
+ return id->match_flags == 0 &&
+ id->domain == 0 &&
+ id->category == 0 &&
+ id->target == 0 &&
+ id->instance == 0 &&
+ id->function == 0 &&
+ id->driver_data == 0;
+}
+
+/**
+ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
+ * @table: The table to search in.
+ * @uid: The UID to matched against the individual table entries.
+ *
+ * Find the first match for the provided device UID in the provided ID table
+ * and return it. Returns %NULL if no match could be found.
+ */
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
+ const struct ssam_device_uid uid)
+{
+ const struct ssam_device_id *id;
+
+ for (id = table; !ssam_device_id_is_null(id); ++id)
+ if (ssam_device_id_compatible(id, uid))
+ return id;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ssam_device_id_match);
+
+/**
+ * ssam_device_get_match() - Find and return the ID matching the device in the
+ * ID table of the bound driver.
+ * @dev: The device for which to get the matching ID table entry.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * currently bound driver and return it. Returns %NULL if the device does not
+ * have a driver bound to it, the driver does not have match_table (i.e. it is
+ * %NULL), or there is no match in the driver's match_table.
+ *
+ * This function essentially calls ssam_device_id_match() with the ID table of
+ * the bound device driver and the UID of the device.
+ *
+ * Return: Returns the first match for the UID of the device in the device
+ * driver's match table, or %NULL if no such match could be found.
+ */
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev)
+{
+ const struct ssam_device_driver *sdrv;
+
+ sdrv = to_ssam_device_driver(dev->dev.driver);
+ if (!sdrv)
+ return NULL;
+
+ if (!sdrv->match_table)
+ return NULL;
+
+ return ssam_device_id_match(sdrv->match_table, dev->uid);
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match);
+
+/**
+ * ssam_device_get_match_data() - Find the ID matching the device in the
+ * ID table of the bound driver and return its ``driver_data`` member.
+ * @dev: The device for which to get the match data.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * corresponding driver and return its driver_data. Returns %NULL if the
+ * device does not have a driver bound to it, the driver does not have
+ * match_table (i.e. it is %NULL), there is no match in the driver's
+ * match_table, or the match does not have any driver_data.
+ *
+ * This function essentially calls ssam_device_get_match() and, if any match
+ * could be found, returns its ``struct ssam_device_id.driver_data`` member.
+ *
+ * Return: Returns the driver data associated with the first match for the UID
+ * of the device in the device driver's match table, or %NULL if no such match
+ * could be found.
+ */
+const void *ssam_device_get_match_data(const struct ssam_device *dev)
+{
+ const struct ssam_device_id *id;
+
+ id = ssam_device_get_match(dev);
+ if (!id)
+ return NULL;
+
+ return (const void *)id->driver_data;
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
+
+static int ssam_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (!is_ssam_device(dev))
+ return 0;
+
+ return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
+}
+
+static int ssam_bus_probe(struct device *dev)
+{
+ return to_ssam_device_driver(dev->driver)
+ ->probe(to_ssam_device(dev));
+}
+
+static int ssam_bus_remove(struct device *dev)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
+
+ if (sdrv->remove)
+ sdrv->remove(to_ssam_device(dev));
+
+ return 0;
+}
+
+struct bus_type ssam_bus_type = {
+ .name = "surface_aggregator",
+ .match = ssam_bus_match,
+ .probe = ssam_bus_probe,
+ .remove = ssam_bus_remove,
+};
+EXPORT_SYMBOL_GPL(ssam_bus_type);
+
+/**
+ * __ssam_device_driver_register() - Register a SSAM client device driver.
+ * @sdrv: The driver to register.
+ * @owner: The module owning the provided driver.
+ *
+ * Please refer to the ssam_device_driver_register() macro for the normal way
+ * to register a driver from inside its owning module.
+ */
+int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
+ struct module *owner)
+{
+ sdrv->driver.owner = owner;
+ sdrv->driver.bus = &ssam_bus_type;
+
+ /* force drivers to async probe so I/O is possible in probe */
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
+
+/**
+ * ssam_device_driver_unregister - Unregister a SSAM device driver.
+ * @sdrv: The driver to unregister.
+ */
+void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
+{
+ driver_unregister(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+static int ssam_remove_device(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_remove(sdev);
+
+ return 0;
+}
+
+/**
+ * ssam_controller_remove_clients() - Remove SSAM client devices registered as
+ * direct children under the given controller.
+ * @ctrl: The controller to remove all direct clients for.
+ *
+ * Remove all SSAM client devices registered as direct children under the
+ * given controller. Note that this only accounts for direct children of the
+ * controller device. This does not take care of any client devices where the
+ * parent device has been manually set before calling ssam_device_add. Refer
+ * to ssam_device_add()/ssam_device_remove() for more details on those cases.
+ *
+ * To avoid new devices being added in parallel to this call, the main
+ * controller lock (not statelock) must be held during this (and if
+ * necessary, any subsequent deinitialization) call.
+ */
+void ssam_controller_remove_clients(struct ssam_controller *ctrl)
+{
+ struct device *dev;
+
+ dev = ssam_controller_device(ctrl);
+ device_for_each_child_reverse(dev, NULL, ssam_remove_device);
+}
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
new file mode 100644
index 000000000000..7712baaed6a5
--- /dev/null
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module bus and device integration.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_BUS_H
+#define _SURFACE_AGGREGATOR_BUS_H
+
+#include <linux/surface_aggregator/controller.h>
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+void ssam_controller_remove_clients(struct ssam_controller *ctrl);
+
+int ssam_bus_register(void);
+void ssam_bus_unregister(void);
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline void ssam_controller_remove_clients(struct ssam_controller *ctrl) {}
+static inline int ssam_bus_register(void) { return 0; }
+static inline void ssam_bus_unregister(void) {}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+#endif /* _SURFACE_AGGREGATOR_BUS_H */
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
new file mode 100644
index 000000000000..5bcb59ed579d
--- /dev/null
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -0,0 +1,2579 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Main SSAM/SSH controller structure and functionality.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "controller.h"
+#include "ssh_msgb.h"
+#include "ssh_request_layer.h"
+
+#include "trace.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * ssh_seq_reset() - Reset/initialize sequence ID counter.
+ * @c: The counter to reset.
+ */
+static void ssh_seq_reset(struct ssh_seq_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_seq_next() - Get next sequence ID.
+ * @c: The counter providing the sequence IDs.
+ *
+ * Return: Returns the next sequence ID of the counter.
+ */
+static u8 ssh_seq_next(struct ssh_seq_counter *c)
+{
+ u8 old = READ_ONCE(c->value);
+ u8 new = old + 1;
+ u8 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = old + 1;
+ }
+
+ return old;
+}
+
+/**
+ * ssh_rqid_reset() - Reset/initialize request ID counter.
+ * @c: The counter to reset.
+ */
+static void ssh_rqid_reset(struct ssh_rqid_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_rqid_next() - Get next request ID.
+ * @c: The counter providing the request IDs.
+ *
+ * Return: Returns the next request ID of the counter, skipping any reserved
+ * request IDs.
+ */
+static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
+{
+ u16 old = READ_ONCE(c->value);
+ u16 new = ssh_rqid_next_valid(old);
+ u16 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = ssh_rqid_next_valid(old);
+ }
+
+ return old;
+}
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+/*
+ * The notifier system is based on linux/notifier.h, specifically the SRCU
+ * implementation. The difference to that is, that some bits of the notifier
+ * call return value can be tracked across multiple calls. This is done so
+ * that handling of events can be tracked and a warning can be issued in case
+ * an event goes unhandled. The idea of that warning is that it should help
+ * discover and identify new/currently unimplemented features.
+ */
+
+/**
+ * ssam_event_matches_notifier() - Test if an event matches a notifier.
+ * @n: The event notifier to test against.
+ * @event: The event to test.
+ *
+ * Return: Returns %true if the given event matches the given notifier
+ * according to the rules set in the notifier's event mask, %false otherwise.
+ */
+static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n,
+ const struct ssam_event *event)
+{
+ bool match = n->event.id.target_category == event->target_category;
+
+ if (n->event.mask & SSAM_EVENT_MASK_TARGET)
+ match &= n->event.reg.target_id == event->target_id;
+
+ if (n->event.mask & SSAM_EVENT_MASK_INSTANCE)
+ match &= n->event.id.instance == event->instance_id;
+
+ return match;
+}
+
+/**
+ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
+ * @nh: The notifier head for which the notifier callbacks should be called.
+ * @event: The event data provided to the callbacks.
+ *
+ * Call all registered notifier callbacks in order of their priority until
+ * either no notifier is left or a notifier returns a value with the
+ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
+ * ssam_notifier_from_errno() on any non-zero error value.
+ *
+ * Return: Returns the notifier status value, which contains the notifier
+ * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
+ * potential error value returned from the last executed notifier callback.
+ * Use ssam_notifier_to_errno() to convert this value to the original error
+ * value.
+ */
+static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
+{
+ struct ssam_event_notifier *nf;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+
+ list_for_each_entry_rcu(nf, &nh->head, base.node,
+ srcu_read_lock_held(&nh->srcu)) {
+ if (ssam_event_matches_notifier(nf, event)) {
+ ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event);
+ if (ret & SSAM_NOTIF_STOP)
+ break;
+ }
+ }
+
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+
+/**
+ * ssam_nfblk_insert() - Insert a new notifier block into the given notifier
+ * list.
+ * @nh: The notifier head into which the block should be inserted.
+ * @nb: The notifier block to add.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ *
+ * Return: Returns zero on success, %-EEXIST if the notifier block has already
+ * been registered.
+ */
+static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block *p;
+ struct list_head *h;
+
+ /* Runs under lock, no need for RCU variant. */
+ list_for_each(h, &nh->head) {
+ p = list_entry(h, struct ssam_notifier_block, node);
+
+ if (unlikely(p == nb)) {
+ WARN(1, "double register detected");
+ return -EEXIST;
+ }
+
+ if (nb->priority > p->priority)
+ break;
+ }
+
+ list_add_tail_rcu(&nb->node, h);
+ return 0;
+}
+
+/**
+ * ssam_nfblk_find() - Check if a notifier block is registered on the given
+ * notifier head.
+ * list.
+ * @nh: The notifier head on which to search.
+ * @nb: The notifier block to search for.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ *
+ * Return: Returns true if the given notifier block is registered on the given
+ * notifier head, false otherwise.
+ */
+static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block *p;
+
+ /* Runs under lock, no need for RCU variant. */
+ list_for_each_entry(p, &nh->head, node) {
+ if (p == nb)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ssam_nfblk_remove() - Remove a notifier block from its notifier list.
+ * @nb: The notifier block to be removed.
+ *
+ * Note: This function must be synchronized by the caller with respect to
+ * other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ * Furthermore, the caller _must_ ensure SRCU synchronization by calling
+ * synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to
+ * ensure that the removed notifier block is not in use any more.
+ */
+static void ssam_nfblk_remove(struct ssam_notifier_block *nb)
+{
+ list_del_rcu(&nb->node);
+}
+
+/**
+ * ssam_nf_head_init() - Initialize the given notifier head.
+ * @nh: The notifier head to initialize.
+ */
+static int ssam_nf_head_init(struct ssam_nf_head *nh)
+{
+ int status;
+
+ status = init_srcu_struct(&nh->srcu);
+ if (status)
+ return status;
+
+ INIT_LIST_HEAD(&nh->head);
+ return 0;
+}
+
+/**
+ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
+ * @nh: The notifier head to deinitialize.
+ */
+static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
+{
+ cleanup_srcu_struct(&nh->srcu);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_nf_refcount_key - Key used for event activation reference
+ * counting.
+ * @reg: The registry via which the event is enabled/disabled.
+ * @id: The ID uniquely describing the event.
+ */
+struct ssam_nf_refcount_key {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+};
+
+/**
+ * struct ssam_nf_refcount_entry - RB-tree entry for reference counting event
+ * activations.
+ * @node: The node of this entry in the rb-tree.
+ * @key: The key of the event.
+ * @refcount: The reference-count of the event.
+ * @flags: The flags used when enabling the event.
+ */
+struct ssam_nf_refcount_entry {
+ struct rb_node node;
+ struct ssam_nf_refcount_key key;
+ int refcount;
+ u8 flags;
+};
+
+/**
+ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Increments the reference-/activation-count associated with the specified
+ * event type/ID, allocating a new entry for this event ID if necessary. A
+ * newly allocated entry will have a refcount of one.
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns the refcount entry on success. Returns an error pointer
+ * with %-ENOSPC if there have already been %INT_MAX events of the specified
+ * ID and type registered, or %-ENOMEM if the entry could not be allocated.
+ */
+static struct ssam_nf_refcount_entry *
+ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node **link = &nf->refcount.rb_node;
+ struct rb_node *parent = NULL;
+ int cmp;
+
+ lockdep_assert_held(&nf->lock);
+
+ key.reg = reg;
+ key.id = id;
+
+ while (*link) {
+ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
+ parent = *link;
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ link = &(*link)->rb_left;
+ } else if (cmp > 0) {
+ link = &(*link)->rb_right;
+ } else if (entry->refcount < INT_MAX) {
+ entry->refcount++;
+ return entry;
+ } else {
+ WARN_ON(1);
+ return ERR_PTR(-ENOSPC);
+ }
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->key = key;
+ entry->refcount = 1;
+
+ rb_link_node(&entry->node, parent, link);
+ rb_insert_color(&entry->node, &nf->refcount);
+
+ return entry;
+}
+
+/**
+ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Decrements the reference-/activation-count of the specified event,
+ * returning its entry. If the returned entry has a refcount of zero, the
+ * caller is responsible for freeing it using kfree().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns the refcount entry on success or %NULL if the entry has not
+ * been found.
+ */
+static struct ssam_nf_refcount_entry *
+ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node *node = nf->refcount.rb_node;
+ int cmp;
+
+ lockdep_assert_held(&nf->lock);
+
+ key.reg = reg;
+ key.id = id;
+
+ while (node) {
+ entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ node = node->rb_left;
+ } else if (cmp > 0) {
+ node = node->rb_right;
+ } else {
+ entry->refcount--;
+ if (entry->refcount == 0)
+ rb_erase(&entry->node, &nf->refcount);
+
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ssam_nf_refcount_empty() - Test if the notification system has any
+ * enabled/active events.
+ * @nf: The notification system.
+ */
+static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
+{
+ return RB_EMPTY_ROOT(&nf->refcount);
+}
+
+/**
+ * ssam_nf_call() - Call notification callbacks for the provided event.
+ * @nf: The notifier system
+ * @dev: The associated device, only used for logging.
+ * @rqid: The request ID of the event.
+ * @event: The event provided to the callbacks.
+ *
+ * Execute registered callbacks in order of their priority until either no
+ * callback is left or a callback returns a value with the %SSAM_NOTIF_STOP
+ * bit set. Note that this bit is set automatically when converting non-zero
+ * error values via ssam_notifier_from_errno() to notifier values.
+ *
+ * Also note that any callback that could handle an event should return a value
+ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
+ * unhandled/ignored. In case no registered callback could handle an event,
+ * this function will emit a warning.
+ *
+ * In case a callback failed, this function will emit an error message.
+ */
+static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
+ struct ssam_event *event)
+{
+ struct ssam_nf_head *nf_head;
+ int status, nf_ret;
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid);
+ return;
+ }
+
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+ nf_ret = ssam_nfblk_call_chain(nf_head, event);
+ status = ssam_notifier_to_errno(nf_ret);
+
+ if (status < 0) {
+ dev_err(dev,
+ "event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ status, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ } else if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
+ dev_warn(dev,
+ "event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ rqid, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ }
+}
+
+/**
+ * ssam_nf_init() - Initialize the notifier system.
+ * @nf: The notifier system to initialize.
+ */
+static int ssam_nf_init(struct ssam_nf *nf)
+{
+ int i, status;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
+ status = ssam_nf_head_init(&nf->head[i]);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ while (i--)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ return status;
+ }
+
+ mutex_init(&nf->lock);
+ return 0;
+}
+
+/**
+ * ssam_nf_destroy() - Deinitialize the notifier system.
+ * @nf: The notifier system to deinitialize.
+ */
+static void ssam_nf_destroy(struct ssam_nf *nf)
+{
+ int i;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ mutex_destroy(&nf->lock);
+}
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
+
+/*
+ * SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per
+ * work execution. Used to prevent livelocking of the workqueue. Value chosen
+ * via educated guess, may be adjusted.
+ */
+#define SSAM_CPLT_WQ_BATCH 10
+
+/*
+ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
+ * &struct ssam_event_item.
+ *
+ * This length has been chosen to be accommodate standard touchpad and
+ * keyboard input events. Events with larger payloads will be allocated
+ * separately.
+ */
+#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
+
+static struct kmem_cache *ssam_event_item_cache;
+
+/**
+ * ssam_event_item_cache_init() - Initialize the event item cache.
+ */
+int ssam_event_item_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssam_event_item)
+ + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
+ const unsigned int align = __alignof__(struct ssam_event_item);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssam_event_item_cache = cache;
+ return 0;
+}
+
+/**
+ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
+ */
+void ssam_event_item_cache_destroy(void)
+{
+ kmem_cache_destroy(ssam_event_item_cache);
+ ssam_event_item_cache = NULL;
+}
+
+static void __ssam_event_item_free_cached(struct ssam_event_item *item)
+{
+ kmem_cache_free(ssam_event_item_cache, item);
+}
+
+static void __ssam_event_item_free_generic(struct ssam_event_item *item)
+{
+ kfree(item);
+}
+
+/**
+ * ssam_event_item_free() - Free the provided event item.
+ * @item: The event item to free.
+ */
+static void ssam_event_item_free(struct ssam_event_item *item)
+{
+ trace_ssam_event_item_free(item);
+ item->ops.free(item);
+}
+
+/**
+ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
+ * @len: The event payload length.
+ * @flags: The flags used for allocation.
+ *
+ * Allocate an event item with the given payload size, preferring allocation
+ * from the event item cache if the payload is small enough (i.e. smaller than
+ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
+ * length values. The item free callback (``ops.free``) should not be
+ * overwritten after this call.
+ *
+ * Return: Returns the newly allocated event item.
+ */
+static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
+{
+ struct ssam_event_item *item;
+
+ if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
+ item = kmem_cache_alloc(ssam_event_item_cache, flags);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_cached;
+ } else {
+ item = kzalloc(struct_size(item, event.data, len), flags);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_generic;
+ }
+
+ item->event.length = len;
+
+ trace_ssam_event_item_alloc(item, len);
+ return item;
+}
+
+/**
+ * ssam_event_queue_push() - Push an event item to the event queue.
+ * @q: The event queue.
+ * @item: The item to add.
+ */
+static void ssam_event_queue_push(struct ssam_event_queue *q,
+ struct ssam_event_item *item)
+{
+ spin_lock(&q->lock);
+ list_add_tail(&item->node, &q->head);
+ spin_unlock(&q->lock);
+}
+
+/**
+ * ssam_event_queue_pop() - Pop the next event item from the event queue.
+ * @q: The event queue.
+ *
+ * Returns and removes the next event item from the queue. Returns %NULL If
+ * there is no event item left.
+ */
+static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
+{
+ struct ssam_event_item *item;
+
+ spin_lock(&q->lock);
+ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
+ if (item)
+ list_del(&item->node);
+ spin_unlock(&q->lock);
+
+ return item;
+}
+
+/**
+ * ssam_event_queue_is_empty() - Check if the event queue is empty.
+ * @q: The event queue.
+ */
+static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
+{
+ bool empty;
+
+ spin_lock(&q->lock);
+ empty = list_empty(&q->head);
+ spin_unlock(&q->lock);
+
+ return empty;
+}
+
+/**
+ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
+ * @cplt: The completion system on which to look for the queue.
+ * @tid: The target ID of the queue.
+ * @rqid: The request ID representing the event ID for which to get the queue.
+ *
+ * Return: Returns the event queue corresponding to the event type described
+ * by the given parameters. If the request ID does not represent an event,
+ * this function returns %NULL. If the target ID is not supported, this
+ * function will fall back to the default target ID (``tid = 1``).
+ */
+static
+struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt,
+ u8 tid, u16 rqid)
+{
+ u16 event = ssh_rqid_to_event(rqid);
+ u16 tidx = ssh_tid_to_index(tid);
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid);
+ return NULL;
+ }
+
+ if (!ssh_tid_is_valid(tid)) {
+ dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
+ tidx = 0;
+ }
+
+ return &cplt->event.target[tidx].queue[event];
+}
+
+/**
+ * ssam_cplt_submit() - Submit a work item to the completion system workqueue.
+ * @cplt: The completion system.
+ * @work: The work item to submit.
+ */
+static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
+{
+ return queue_work(cplt->wq, work);
+}
+
+/**
+ * ssam_cplt_submit_event() - Submit an event to the completion system.
+ * @cplt: The completion system.
+ * @item: The event item to submit.
+ *
+ * Submits the event to the completion system by queuing it on the event item
+ * queue and queuing the respective event queue work item on the completion
+ * workqueue, which will eventually complete the event.
+ *
+ * Return: Returns zero on success, %-EINVAL if there is no event queue that
+ * can handle the given event item.
+ */
+static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
+ struct ssam_event_item *item)
+{
+ struct ssam_event_queue *evq;
+
+ evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
+ if (!evq)
+ return -EINVAL;
+
+ ssam_event_queue_push(evq, item);
+ ssam_cplt_submit(cplt, &evq->work);
+ return 0;
+}
+
+/**
+ * ssam_cplt_flush() - Flush the completion system.
+ * @cplt: The completion system.
+ *
+ * Flush the completion system by waiting until all currently submitted work
+ * items have been completed.
+ *
+ * Note: This function does not guarantee that all events will have been
+ * handled once this call terminates. In case of a larger number of
+ * to-be-completed events, the event queue work function may re-schedule its
+ * work item, which this flush operation will ignore.
+ *
+ * This operation is only intended to, during normal operation prior to
+ * shutdown, try to complete most events and requests to get them out of the
+ * system while the system is still fully operational. It does not aim to
+ * provide any guarantee that all of them have been handled.
+ */
+static void ssam_cplt_flush(struct ssam_cplt *cplt)
+{
+ flush_workqueue(cplt->wq);
+}
+
+static void ssam_event_queue_work_fn(struct work_struct *work)
+{
+ struct ssam_event_queue *queue;
+ struct ssam_event_item *item;
+ struct ssam_nf *nf;
+ struct device *dev;
+ unsigned int iterations = SSAM_CPLT_WQ_BATCH;
+
+ queue = container_of(work, struct ssam_event_queue, work);
+ nf = &queue->cplt->event.notif;
+ dev = queue->cplt->dev;
+
+ /* Limit number of processed events to avoid livelocking. */
+ do {
+ item = ssam_event_queue_pop(queue);
+ if (!item)
+ return;
+
+ ssam_nf_call(nf, dev, item->rqid, &item->event);
+ ssam_event_item_free(item);
+ } while (--iterations);
+
+ if (!ssam_event_queue_is_empty(queue))
+ ssam_cplt_submit(queue->cplt, &queue->work);
+}
+
+/**
+ * ssam_event_queue_init() - Initialize an event queue.
+ * @cplt: The completion system on which the queue resides.
+ * @evq: The event queue to initialize.
+ */
+static void ssam_event_queue_init(struct ssam_cplt *cplt,
+ struct ssam_event_queue *evq)
+{
+ evq->cplt = cplt;
+ spin_lock_init(&evq->lock);
+ INIT_LIST_HEAD(&evq->head);
+ INIT_WORK(&evq->work, ssam_event_queue_work_fn);
+}
+
+/**
+ * ssam_cplt_init() - Initialize completion system.
+ * @cplt: The completion system to initialize.
+ * @dev: The device used for logging.
+ */
+static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
+{
+ struct ssam_event_target *target;
+ int status, c, i;
+
+ cplt->dev = dev;
+
+ cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
+ if (!cplt->wq)
+ return -ENOMEM;
+
+ for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
+ target = &cplt->event.target[c];
+
+ for (i = 0; i < ARRAY_SIZE(target->queue); i++)
+ ssam_event_queue_init(cplt, &target->queue[i]);
+ }
+
+ status = ssam_nf_init(&cplt->event.notif);
+ if (status)
+ destroy_workqueue(cplt->wq);
+
+ return status;
+}
+
+/**
+ * ssam_cplt_destroy() - Deinitialize the completion system.
+ * @cplt: The completion system to deinitialize.
+ *
+ * Deinitialize the given completion system and ensure that all pending, i.e.
+ * yet-to-be-completed, event items and requests have been handled.
+ */
+static void ssam_cplt_destroy(struct ssam_cplt *cplt)
+{
+ /*
+ * Note: destroy_workqueue ensures that all currently queued work will
+ * be fully completed and the workqueue drained. This means that this
+ * call will inherently also free any queued ssam_event_items, thus we
+ * don't have to take care of that here explicitly.
+ */
+ destroy_workqueue(cplt->wq);
+ ssam_nf_destroy(&cplt->event.notif);
+}
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * ssam_controller_device() - Get the &struct device associated with this
+ * controller.
+ * @c: The controller for which to get the device.
+ *
+ * Return: Returns the &struct device associated with this controller,
+ * providing its lower-level transport.
+ */
+struct device *ssam_controller_device(struct ssam_controller *c)
+{
+ return ssh_rtl_get_device(&c->rtl);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_device);
+
+static void __ssam_controller_release(struct kref *kref)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
+
+ /*
+ * The lock-call here is to satisfy lockdep. At this point we really
+ * expect this to be the last remaining reference to the controller.
+ * Anything else is a bug.
+ */
+ ssam_controller_lock(ctrl);
+ ssam_controller_destroy(ctrl);
+ ssam_controller_unlock(ctrl);
+
+ kfree(ctrl);
+}
+
+/**
+ * ssam_controller_get() - Increment reference count of controller.
+ * @c: The controller.
+ *
+ * Return: Returns the controller provided as input.
+ */
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
+{
+ if (c)
+ kref_get(&c->kref);
+ return c;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_get);
+
+/**
+ * ssam_controller_put() - Decrement reference count of controller.
+ * @c: The controller.
+ */
+void ssam_controller_put(struct ssam_controller *c)
+{
+ if (c)
+ kref_put(&c->kref, __ssam_controller_release);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_put);
+
+/**
+ * ssam_controller_statelock() - Lock the controller against state transitions.
+ * @c: The controller to lock.
+ *
+ * Lock the controller against state transitions. Holding this lock guarantees
+ * that the controller will not transition between states, i.e. if the
+ * controller is in state "started", when this lock has been acquired, it will
+ * remain in this state at least until the lock has been released.
+ *
+ * Multiple clients may concurrently hold this lock. In other words: The
+ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
+ * Actions causing state transitions of the controller must be executed while
+ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
+ * and ssam_controller_unlock() for that).
+ *
+ * See ssam_controller_stateunlock() for the corresponding unlock function.
+ */
+void ssam_controller_statelock(struct ssam_controller *c)
+{
+ down_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_statelock);
+
+/**
+ * ssam_controller_stateunlock() - Unlock controller state transitions.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_statelock() for the corresponding lock function.
+ */
+void ssam_controller_stateunlock(struct ssam_controller *c)
+{
+ up_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
+
+/**
+ * ssam_controller_lock() - Acquire the main controller lock.
+ * @c: The controller to lock.
+ *
+ * This lock must be held for any state transitions, including transition to
+ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
+ * for more details on controller locking.
+ *
+ * See ssam_controller_unlock() for the corresponding unlock function.
+ */
+void ssam_controller_lock(struct ssam_controller *c)
+{
+ down_write(&c->lock);
+}
+
+/*
+ * ssam_controller_unlock() - Release the main controller lock.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_lock() for the corresponding lock function.
+ */
+void ssam_controller_unlock(struct ssam_controller *c)
+{
+ up_write(&c->lock);
+}
+
+static void ssam_handle_event(struct ssh_rtl *rtl,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
+ struct ssam_event_item *item;
+
+ item = ssam_event_item_alloc(data->len, GFP_KERNEL);
+ if (!item)
+ return;
+
+ item->rqid = get_unaligned_le16(&cmd->rqid);
+ item->event.target_category = cmd->tc;
+ item->event.target_id = cmd->tid_in;
+ item->event.command_id = cmd->cid;
+ item->event.instance_id = cmd->iid;
+ memcpy(&item->event.data[0], data->ptr, data->len);
+
+ if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
+ ssam_event_item_free(item);
+}
+
+static const struct ssh_rtl_ops ssam_rtl_ops = {
+ .handle_event = ssam_handle_event,
+};
+
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl);
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
+
+#define SSAM_SSH_DSM_REVISION 0
+
+/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */
+static const guid_t SSAM_SSH_DSM_GUID =
+ GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
+ 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
+
+enum ssh_dsm_fn {
+ SSH_DSM_FN_SSH_POWER_PROFILE = 0x05,
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT = 0x06,
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
+ SSH_DSM_FN_D3_CLOSES_HANDLE = 0x08,
+ SSH_DSM_FN_SSH_BUFFER_SIZE = 0x09,
+};
+
+static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
+{
+ union acpi_object *obj;
+ u64 mask = 0;
+ int i;
+
+ *funcs = 0;
+
+ /*
+ * The _DSM function is only present on newer models. It is not
+ * present on 5th and 6th generation devices (i.e. up to and including
+ * Surface Pro 6, Surface Laptop 2, Surface Book 2).
+ *
+ * If the _DSM is not present, indicate that no function is supported.
+ * This will result in default values being set.
+ */
+ if (!acpi_has_method(handle, "_DSM"))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
+ SSAM_SSH_DSM_REVISION, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -EIO;
+
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
+ mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
+
+ if (mask & BIT(0))
+ *funcs = mask;
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
+{
+ union acpi_object *obj;
+ u64 val;
+
+ if (!(funcs & BIT(func)))
+ return 0; /* Not supported, leave *ret at its default value */
+
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
+ SSAM_SSH_DSM_REVISION, func, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EIO;
+
+ val = obj->integer.value;
+ ACPI_FREE(obj);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ *ret = val;
+ return 0;
+}
+
+/**
+ * ssam_controller_caps_load_from_acpi() - Load controller capabilities from
+ * ACPI _DSM.
+ * @handle: The handle of the ACPI controller/SSH device.
+ * @caps: Where to store the capabilities in.
+ *
+ * Initializes the given controller capabilities with default values, then
+ * checks and, if the respective _DSM functions are available, loads the
+ * actual capabilities from the _DSM.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+static
+int ssam_controller_caps_load_from_acpi(acpi_handle handle,
+ struct ssam_controller_caps *caps)
+{
+ u32 d3_closes_handle = false;
+ u64 funcs;
+ int status;
+
+ /* Set defaults. */
+ caps->ssh_power_profile = U32_MAX;
+ caps->screen_on_sleep_idle_timeout = U32_MAX;
+ caps->screen_off_sleep_idle_timeout = U32_MAX;
+ caps->d3_closes_handle = false;
+ caps->ssh_buffer_size = U32_MAX;
+
+ /* Pre-load supported DSM functions. */
+ status = ssam_dsm_get_functions(handle, &funcs);
+ if (status)
+ return status;
+
+ /* Load actual values from ACPI, if present. */
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
+ &caps->ssh_power_profile);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs,
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
+ &caps->screen_on_sleep_idle_timeout);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs,
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
+ &caps->screen_off_sleep_idle_timeout);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
+ &d3_closes_handle);
+ if (status)
+ return status;
+
+ caps->d3_closes_handle = !!d3_closes_handle;
+
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
+ &caps->ssh_buffer_size);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ssam_controller_init() - Initialize SSAM controller.
+ * @ctrl: The controller to initialize.
+ * @serdev: The serial device representing the underlying data transport.
+ *
+ * Initializes the given controller. Does neither start receiver nor
+ * transmitter threads. After this call, the controller has to be hooked up to
+ * the serdev core separately via &struct serdev_device_ops, relaying calls to
+ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
+ * controller has been hooked up, transmitter and receiver threads may be
+ * started via ssam_controller_start(). These setup steps need to be completed
+ * before controller can be used for requests.
+ */
+int ssam_controller_init(struct ssam_controller *ctrl,
+ struct serdev_device *serdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&serdev->dev);
+ int status;
+
+ init_rwsem(&ctrl->lock);
+ kref_init(&ctrl->kref);
+
+ status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
+ if (status)
+ return status;
+
+ dev_dbg(&serdev->dev,
+ "device capabilities:\n"
+ " ssh_power_profile: %u\n"
+ " ssh_buffer_size: %u\n"
+ " screen_on_sleep_idle_timeout: %u\n"
+ " screen_off_sleep_idle_timeout: %u\n"
+ " d3_closes_handle: %u\n",
+ ctrl->caps.ssh_power_profile,
+ ctrl->caps.ssh_buffer_size,
+ ctrl->caps.screen_on_sleep_idle_timeout,
+ ctrl->caps.screen_off_sleep_idle_timeout,
+ ctrl->caps.d3_closes_handle);
+
+ ssh_seq_reset(&ctrl->counter.seq);
+ ssh_rqid_reset(&ctrl->counter.rqid);
+
+ /* Initialize event/request completion system. */
+ status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
+ if (status)
+ return status;
+
+ /* Initialize request and packet transport layers. */
+ status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
+ if (status) {
+ ssam_cplt_destroy(&ctrl->cplt);
+ return status;
+ }
+
+ /*
+ * Set state via write_once even though we expect to be in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
+ return 0;
+}
+
+/**
+ * ssam_controller_start() - Start the receiver and transmitter threads of the
+ * controller.
+ * @ctrl: The controller.
+ *
+ * Note: When this function is called, the controller should be properly
+ * hooked up to the serdev core via &struct serdev_device_ops. Please refer
+ * to ssam_controller_init() for more details on controller initialization.
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+int ssam_controller_start(struct ssam_controller *ctrl)
+{
+ int status;
+
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
+ return -EINVAL;
+
+ status = ssh_rtl_start(&ctrl->rtl);
+ if (status)
+ return status;
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+ return 0;
+}
+
+/*
+ * SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during
+ * shutdown.
+ *
+ * Chosen to be larger than one full request timeout, including packets timing
+ * out. This value should give ample time to complete any outstanding requests
+ * during normal operation and account for the odd package timeout.
+ */
+#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT msecs_to_jiffies(5000)
+
+/**
+ * ssam_controller_shutdown() - Shut down the controller.
+ * @ctrl: The controller.
+ *
+ * Shuts down the controller by flushing all pending requests and stopping the
+ * transmitter and receiver threads. All requests submitted after this call
+ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
+ * is safe to use in parallel with ongoing request submission.
+ *
+ * In the course of this shutdown procedure, all currently registered
+ * notifiers will be unregistered. It is, however, strongly recommended to not
+ * rely on this behavior, and instead the party registering the notifier
+ * should unregister it before the controller gets shut down, e.g. via the
+ * SSAM bus which guarantees client devices to be removed before a shutdown.
+ *
+ * Note that events may still be pending after this call, but, due to the
+ * notifiers being unregistered, these events will be dropped when the
+ * controller is subsequently destroyed via ssam_controller_destroy().
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+void ssam_controller_shutdown(struct ssam_controller *ctrl)
+{
+ enum ssam_controller_state s = ctrl->state;
+ int status;
+
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
+ return;
+
+ /*
+ * Try to flush pending events and requests while everything still
+ * works. Note: There may still be packets and/or requests in the
+ * system after this call (e.g. via control packets submitted by the
+ * packet transport layer or flush timeout / failure, ...). Those will
+ * be handled with the ssh_rtl_shutdown() call below.
+ */
+ status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT);
+ if (status) {
+ ssam_err(ctrl, "failed to flush request transport layer: %d\n",
+ status);
+ }
+
+ /* Try to flush all currently completing requests and events. */
+ ssam_cplt_flush(&ctrl->cplt);
+
+ /*
+ * We expect all notifiers to have been removed by the respective client
+ * driver that set them up at this point. If this warning occurs, some
+ * client driver has not done that...
+ */
+ WARN_ON(!ssam_notifier_is_empty(ctrl));
+
+ /*
+ * Nevertheless, we should still take care of drivers that don't behave
+ * well. Thus disable all enabled events, unregister all notifiers.
+ */
+ ssam_notifier_unregister_all(ctrl);
+
+ /*
+ * Cancel remaining requests. Ensure no new ones can be queued and stop
+ * threads.
+ */
+ ssh_rtl_shutdown(&ctrl->rtl);
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
+ ctrl->rtl.ptl.serdev = NULL;
+}
+
+/**
+ * ssam_controller_destroy() - Destroy the controller and free its resources.
+ * @ctrl: The controller.
+ *
+ * Ensures that all resources associated with the controller get freed. This
+ * function should only be called after the controller has been stopped via
+ * ssam_controller_shutdown(). In general, this function should not be called
+ * directly. The only valid place to call this function directly is during
+ * initialization, before the controller has been fully initialized and passed
+ * to other processes. This function is called automatically when the
+ * reference count of the controller reaches zero.
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+void ssam_controller_destroy(struct ssam_controller *ctrl)
+{
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
+ return;
+
+ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
+
+ /*
+ * Note: New events could still have been received after the previous
+ * flush in ssam_controller_shutdown, before the request transport layer
+ * has been shut down. At this point, after the shutdown, we can be sure
+ * that no new events will be queued. The call to ssam_cplt_destroy will
+ * ensure that those remaining are being completed and freed.
+ */
+
+ /* Actually free resources. */
+ ssam_cplt_destroy(&ctrl->cplt);
+ ssh_rtl_destroy(&ctrl->rtl);
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
+}
+
+/**
+ * ssam_controller_suspend() - Suspend the controller.
+ * @ctrl: The controller to suspend.
+ *
+ * Marks the controller as suspended. Note that display-off and D0-exit
+ * notifications have to be sent manually before transitioning the controller
+ * into the suspended state via this function.
+ *
+ * See ssam_controller_resume() for the corresponding resume function.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not in the
+ * "started" state.
+ */
+int ssam_controller_suspend(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (ctrl->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: suspending controller\n");
+
+ /*
+ * Set state via write_once even though we're locked, due to
+ * smoke-testing in ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+/**
+ * ssam_controller_resume() - Resume the controller from suspend.
+ * @ctrl: The controller to resume.
+ *
+ * Resume the controller from the suspended state it was put into via
+ * ssam_controller_suspend(). This function does not issue display-on and
+ * D0-entry notifications. If required, those have to be sent manually after
+ * this call.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not suspended.
+ */
+int ssam_controller_resume(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: resuming controller\n");
+
+ /*
+ * Set state via write_once even though we're locked, due to
+ * smoke-testing in ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+
+/* -- Top-level request interface ------------------------------------------- */
+
+/**
+ * ssam_request_write_data() - Construct and write SAM request message to
+ * buffer.
+ * @buf: The buffer to write the data to.
+ * @ctrl: The controller via which the request will be sent.
+ * @spec: The request data and specification.
+ *
+ * Constructs a SAM/SSH request message and writes it to the provided buffer.
+ * The request and transport counters, specifically RQID and SEQ, will be set
+ * in this call. These counters are obtained from the controller. It is thus
+ * only valid to send the resulting message via the controller specified here.
+ *
+ * For calculation of the required buffer size, refer to the
+ * SSH_COMMAND_MESSAGE_LENGTH() macro.
+ *
+ * Return: Returns the number of bytes used in the buffer on success. Returns
+ * %-EINVAL if the payload length provided in the request specification is too
+ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
+ * is too small.
+ */
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ const struct ssam_request *spec)
+{
+ struct msgbuf msgb;
+ u16 rqid;
+ u8 seq;
+
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
+ return -EINVAL;
+
+ if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
+ return -EINVAL;
+
+ msgb_init(&msgb, buf->ptr, buf->len);
+ seq = ssh_seq_next(&ctrl->counter.seq);
+ rqid = ssh_rqid_next(&ctrl->counter.rqid);
+ msgb_push_cmd(&msgb, seq, rqid, spec);
+
+ return msgb_bytes_used(&msgb);
+}
+EXPORT_SYMBOL_GPL(ssam_request_write_data);
+
+static void ssam_request_sync_complete(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ struct ssam_request_sync *r;
+
+ r = container_of(rqst, struct ssam_request_sync, base);
+ r->status = status;
+
+ if (r->resp)
+ r->resp->length = 0;
+
+ if (status) {
+ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
+ return;
+ }
+
+ if (!data) /* Handle requests without a response. */
+ return;
+
+ if (!r->resp || !r->resp->pointer) {
+ if (data->len)
+ rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
+ return;
+ }
+
+ if (data->len > r->resp->capacity) {
+ rtl_err(rtl,
+ "rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n",
+ r->resp->capacity, data->len);
+ r->status = -ENOSPC;
+ return;
+ }
+
+ r->resp->length = data->len;
+ memcpy(r->resp->pointer, data->ptr, data->len);
+}
+
+static void ssam_request_sync_release(struct ssh_request *rqst)
+{
+ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
+}
+
+static const struct ssh_request_ops ssam_request_sync_ops = {
+ .release = ssam_request_sync_release,
+ .complete = ssam_request_sync_complete,
+};
+
+/**
+ * ssam_request_sync_alloc() - Allocate a synchronous request.
+ * @payload_len: The length of the request payload.
+ * @flags: Flags used for allocation.
+ * @rqst: Where to store the pointer to the allocated request.
+ * @buffer: Where to store the buffer descriptor for the message buffer of
+ * the request.
+ *
+ * Allocates a synchronous request with corresponding message buffer. The
+ * request still needs to be initialized ssam_request_sync_init() before
+ * it can be submitted, and the message buffer data must still be set to the
+ * returned buffer via ssam_request_sync_set_data() after it has been filled,
+ * if need be with adjusted message length.
+ *
+ * After use, the request and its corresponding message buffer should be freed
+ * via ssam_request_sync_free(). The buffer must not be freed separately.
+ *
+ * Return: Returns zero on success, %-ENOMEM if the request could not be
+ * allocated.
+ */
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer)
+{
+ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
+
+ *rqst = kzalloc(sizeof(**rqst) + msglen, flags);
+ if (!*rqst)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*rqst + 1);
+ buffer->len = msglen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
+
+/**
+ * ssam_request_sync_free() - Free a synchronous request.
+ * @rqst: The request to be freed.
+ *
+ * Free a synchronous request and its corresponding buffer allocated with
+ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
+ * or via any other function.
+ *
+ * Warning: The caller must ensure that the request is not in use any more.
+ * I.e. the caller must ensure that it has the only reference to the request
+ * and the request is not currently pending. This means that the caller has
+ * either never submitted the request, request submission has failed, or the
+ * caller has waited until the submitted request has been completed via
+ * ssam_request_sync_wait().
+ */
+void ssam_request_sync_free(struct ssam_request_sync *rqst)
+{
+ kfree(rqst);
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_free);
+
+/**
+ * ssam_request_sync_init() - Initialize a synchronous request struct.
+ * @rqst: The request to initialize.
+ * @flags: The request flags.
+ *
+ * Initializes the given request struct. Does not initialize the request
+ * message data. This has to be done explicitly after this call via
+ * ssam_request_sync_set_data() and the actual message data has to be written
+ * via ssam_request_write_data().
+ *
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
+ */
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags)
+{
+ int status;
+
+ status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
+ if (status)
+ return status;
+
+ init_completion(&rqst->comp);
+ rqst->resp = NULL;
+ rqst->status = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_init);
+
+/**
+ * ssam_request_sync_submit() - Submit a synchronous request.
+ * @ctrl: The controller with which to submit the request.
+ * @rqst: The request to submit.
+ *
+ * Submit a synchronous request. The request has to be initialized and
+ * properly set up, including response buffer (may be %NULL if no response is
+ * expected) and command message data. This function does not wait for the
+ * request to be completed.
+ *
+ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
+ * that the request has been completed before the response data can be
+ * accessed and/or the request can be freed. On failure, the request may
+ * immediately be freed.
+ *
+ * This function may only be used if the controller is active, i.e. has been
+ * initialized and not suspended.
+ */
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst)
+{
+ int status;
+
+ /*
+ * This is only a superficial check. In general, the caller needs to
+ * ensure that the controller is initialized and is not (and does not
+ * get) suspended during use, i.e. until the request has been completed
+ * (if _absolutely_ necessary, by use of ssam_controller_statelock/
+ * ssam_controller_stateunlock, but something like ssam_client_link
+ * should be preferred as this needs to last until the request has been
+ * completed).
+ *
+ * Note that it is actually safe to use this function while the
+ * controller is in the process of being shut down (as ssh_rtl_submit
+ * is safe with regards to this), but it is generally discouraged to do
+ * so.
+ */
+ if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
+ ssh_request_put(&rqst->base);
+ return -ENODEV;
+ }
+
+ status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
+ ssh_request_put(&rqst->base);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
+
+/**
+ * ssam_request_sync() - Execute a synchronous request.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ *
+ * Allocates a synchronous request with its message data buffer on the heap
+ * via ssam_request_sync_alloc(), fully initializes it via the provided
+ * request specification, submits it, and finally waits for its completion
+ * before freeing it and returning its status.
+ *
+ * Return: Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp)
+{
+ struct ssam_request_sync *rqst;
+ struct ssam_span buf;
+ ssize_t len;
+ int status;
+
+ status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
+ if (status)
+ return status;
+
+ status = ssam_request_sync_init(rqst, spec->flags);
+ if (status)
+ return status;
+
+ ssam_request_sync_set_resp(rqst, rsp);
+
+ len = ssam_request_write_data(&buf, ctrl, spec);
+ if (len < 0) {
+ ssam_request_sync_free(rqst);
+ return len;
+ }
+
+ ssam_request_sync_set_data(rqst, buf.ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, rqst);
+ if (!status)
+ status = ssam_request_sync_wait(rqst);
+
+ ssam_request_sync_free(rqst);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync);
+
+/**
+ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
+ * provided buffer as back-end for the message buffer.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ * @buf: The buffer for the request message data.
+ *
+ * Allocates a synchronous request struct on the stack, fully initializes it
+ * using the provided buffer as message data buffer, submits it, and then
+ * waits for its completion before returning its status. The
+ * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
+ * message buffer size.
+ *
+ * This function does essentially the same as ssam_request_sync(), but instead
+ * of dynamically allocating the request and message data buffer, it uses the
+ * provided message data buffer and stores the (small) request struct on the
+ * heap.
+ *
+ * Return: Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf)
+{
+ struct ssam_request_sync rqst;
+ ssize_t len;
+ int status;
+
+ status = ssam_request_sync_init(&rqst, spec->flags);
+ if (status)
+ return status;
+
+ ssam_request_sync_set_resp(&rqst, rsp);
+
+ len = ssam_request_write_data(buf, ctrl, spec);
+ if (len < 0)
+ return len;
+
+ ssam_request_sync_set_data(&rqst, buf->ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, &rqst);
+ if (!status)
+ status = ssam_request_sync_wait(&rqst);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
+
+
+/* -- Internal SAM requests. ------------------------------------------------ */
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x13,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x15,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x16,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x33,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x34,
+ .instance_id = 0x00,
+});
+
+/**
+ * struct ssh_notification_params - Command payload to enable/disable SSH
+ * notifications.
+ * @target_category: The target category for which notifications should be
+ * enabled/disabled.
+ * @flags: Flags determining how notifications are being sent.
+ * @request_id: The request ID that is used to send these notifications.
+ * @instance_id: The specific instance in the given target category for
+ * which notifications should be enabled.
+ */
+struct ssh_notification_params {
+ u8 target_category;
+ u8 flags;
+ __le16 request_id;
+ u8 instance_id;
+} __packed;
+
+static_assert(sizeof(struct ssh_notification_params) == 5);
+
+static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg, u8 cid,
+ struct ssam_event_id id, u8 flags)
+{
+ struct ssh_notification_params params;
+ struct ssam_request rqst;
+ struct ssam_response result;
+ int status;
+
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ u8 buf = 0;
+
+ /* Only allow RQIDs that lie within the event spectrum. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ params.target_category = id.target_category;
+ params.instance_id = id.instance;
+ params.flags = flags;
+ put_unaligned_le16(rqid, &params.request_id);
+
+ rqst.target_category = reg.target_category;
+ rqst.target_id = reg.target_id;
+ rqst.command_id = cid;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(params);
+ rqst.payload = (u8 *)&params;
+
+ result.capacity = sizeof(buf);
+ result.length = 0;
+ result.pointer = &buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, ctrl, &rqst, &result,
+ sizeof(params));
+
+ return status < 0 ? status : buf;
+}
+
+/**
+ * ssam_ssh_event_enable() - Enable SSH event.
+ * @ctrl: The controller for which to enable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event.
+ * @id: The event identifier.
+ * @flags: The event flags.
+ *
+ * Enables the specified event on the EC. This function does not manage
+ * reference counting of enabled events and is basically only a wrapper for
+ * the raw EC request. If the specified event is already enabled, the EC will
+ * ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request (zero on success and
+ * negative on direct failure) or %-EPROTO if the request response indicates a
+ * failure.
+ */
+static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ int status;
+
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+
+ if (status < 0 && status != -EINVAL) {
+ ssam_err(ctrl,
+ "failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ id.target_category, id.instance, reg.target_category);
+ }
+
+ if (status > 0) {
+ ssam_err(ctrl,
+ "unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ status, id.target_category, id.instance, reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+}
+
+/**
+ * ssam_ssh_event_disable() - Disable SSH event.
+ * @ctrl: The controller for which to disable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event (must be same as used when enabling the event).
+ * @id: The event identifier.
+ * @flags: The event flags (likely ignored for disabling of events).
+ *
+ * Disables the specified event on the EC. This function does not manage
+ * reference counting of enabled events and is basically only a wrapper for
+ * the raw EC request. If the specified event is already disabled, the EC will
+ * ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request (zero on success and
+ * negative on direct failure) or %-EPROTO if the request response indicates a
+ * failure.
+ */
+static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ int status;
+
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+
+ if (status < 0 && status != -EINVAL) {
+ ssam_err(ctrl,
+ "failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ id.target_category, id.instance, reg.target_category);
+ }
+
+ if (status > 0) {
+ ssam_err(ctrl,
+ "unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ status, id.target_category, id.instance, reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+}
+
+
+/* -- Wrappers for internal SAM requests. ----------------------------------- */
+
+/**
+ * ssam_get_firmware_version() - Get the SAM/EC firmware version.
+ * @ctrl: The controller.
+ * @version: Where to store the version number.
+ *
+ * Return: Returns zero on success or the status of the executed SAM request
+ * if that request failed.
+ */
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
+{
+ __le32 __version;
+ int status;
+
+ status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version);
+ if (status)
+ return status;
+
+ *version = le32_to_cpu(__version);
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
+ * off.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned off and the driver may enter
+ * a lower-power state. This will prevent events from being sent directly.
+ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
+ * as there are pending events. The events then need to be manually released,
+ * one by one, via the GPIO callback request. All pending events accumulated
+ * during this state can also be released by issuing the display-on
+ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
+ * the GPIO.
+ *
+ * On some devices, specifically ones with an integrated keyboard, the keyboard
+ * backlight will be turned off by this call.
+ *
+ * This function will only send the display-off notification command if
+ * display notifications are supported by the EC. Currently all known devices
+ * support these notifications.
+ *
+ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ ssam_dbg(ctrl, "pm: notifying display off\n");
+
+ status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned back on and the driver has
+ * exited its lower-power state. This notification is the counterpart to the
+ * display-off notification sent via ssam_ctrl_notif_display_off() and will
+ * reverse its effects, including resetting events to their default behavior.
+ *
+ * This function will only send the display-on notification command if display
+ * notifications are supported by the EC. Currently all known devices support
+ * these notifications.
+ *
+ * See ssam_ctrl_notif_display_off() for more details.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ ssam_dbg(ctrl, "pm: notifying display on\n");
+
+ status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver prepares to exit the D0 power state in
+ * favor of a lower-power state. Exact effects of this function related to the
+ * EC are currently unknown.
+ *
+ * This function will only send the D0-exit notification command if D0-state
+ * notifications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.d3_closes_handle)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 exit\n");
+
+ status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver has exited a lower-power state and entered
+ * the D0 power state. Exact effects of this function related to the EC are
+ * currently unknown.
+ *
+ * This function will only send the D0-entry notification command if D0-state
+ * notifications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * See ssam_ctrl_notif_d0_exit() for more details.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.d3_closes_handle)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 entry\n");
+
+ status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+
+/* -- Top-level event registry interface. ----------------------------------- */
+
+/**
+ * ssam_notifier_register() - Register an event notifier.
+ * @ctrl: The controller to register the notifier on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier and increment the usage counter of the
+ * associated SAM event. If the event was previously not enabled, it will be
+ * enabled during this call.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated. If this is the first time that a notifier block is registered
+ * for the specific associated event, returns the status of the event-enable
+ * EC-command.
+ */
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
+
+ ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ n->event.reg.target_category, n->event.id.target_category,
+ n->event.id.instance, entry->refcount);
+
+ status = ssam_nfblk_insert(nf_head, &n->base);
+ if (status) {
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (entry->refcount == 0)
+ kfree(entry);
+
+ mutex_unlock(&nf->lock);
+ return status;
+ }
+
+ if (entry->refcount == 1) {
+ status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
+ n->event.flags);
+ if (status) {
+ ssam_nfblk_remove(&n->base);
+ kfree(ssam_nf_refcount_dec(nf, n->event.reg, n->event.id));
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+ return status;
+ }
+
+ entry->flags = n->event.flags;
+
+ } else if (entry->flags != n->event.flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ n->event.flags, entry->flags, n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance);
+ }
+
+ mutex_unlock(&nf->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_notifier_register);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier and decrement the usage counter of the
+ * associated SAM event. If the usage counter reaches zero, the event will be
+ * disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status = 0;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ if (!ssam_nfblk_find(nf_head, &n->base)) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (WARN_ON(!entry)) {
+ /*
+ * If this does not return an entry, there's a logic error
+ * somewhere: The notifier block is registered, but the event
+ * refcount entry is not there. Remove the notifier block
+ * anyways.
+ */
+ status = -ENOENT;
+ goto remove;
+ }
+
+ ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ n->event.reg.target_category, n->event.id.target_category,
+ n->event.id.instance, entry->refcount);
+
+ if (entry->flags != n->event.flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ n->event.flags, entry->flags, n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance);
+ }
+
+ if (entry->refcount == 0) {
+ status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
+ n->event.flags);
+ kfree(entry);
+ }
+
+remove:
+ ssam_nfblk_remove(&n->base);
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
+
+/**
+ * ssam_notifier_disable_registered() - Disable events for all registered
+ * notifiers.
+ * @ctrl: The controller for which to disable the notifiers/events.
+ *
+ * Disables events for all currently registered notifiers. In case of an error
+ * (EC command failing), all previously disabled events will be restored and
+ * the error code returned.
+ *
+ * This function is intended to disable all events prior to hibernation entry.
+ * See ssam_notifier_restore_registered() to restore/re-enable all events
+ * disabled with this function.
+ *
+ * Note that this function will not disable events for notifiers registered
+ * after calling this function. It should thus be made sure that no new
+ * notifiers are going to be added after this call and before the corresponding
+ * call to ssam_notifier_restore_registered().
+ *
+ * Return: Returns zero on success. In case of failure returns the error code
+ * returned by the failed EC command to disable an event.
+ */
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+ int status;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ status = ssam_ssh_event_disable(ctrl, e->key.reg,
+ e->key.id, e->flags);
+ if (status)
+ goto err;
+ }
+ mutex_unlock(&nf->lock);
+
+ return 0;
+
+err:
+ for (n = rb_prev(n); n; n = rb_prev(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+
+ return status;
+}
+
+/**
+ * ssam_notifier_restore_registered() - Restore/re-enable events for all
+ * registered notifiers.
+ * @ctrl: The controller for which to restore the notifiers/events.
+ *
+ * Restores/re-enables all events for which notifiers have been registered on
+ * the given controller. In case of a failure, the error is logged and the
+ * function continues to try and enable the remaining events.
+ *
+ * This function is intended to restore/re-enable all registered events after
+ * hibernation. See ssam_notifier_disable_registered() for the counter part
+ * disabling the events and more details.
+ */
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+
+ /* Ignore errors, will get logged in call. */
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+}
+
+/**
+ * ssam_notifier_is_empty() - Check if there are any registered notifiers.
+ * @ctrl: The controller to check on.
+ *
+ * Return: Returns %true if there are currently no notifiers registered on the
+ * controller, %false otherwise.
+ */
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ bool result;
+
+ mutex_lock(&nf->lock);
+ result = ssam_nf_refcount_empty(nf);
+ mutex_unlock(&nf->lock);
+
+ return result;
+}
+
+/**
+ * ssam_notifier_unregister_all() - Unregister all currently registered
+ * notifiers.
+ * @ctrl: The controller to unregister the notifiers on.
+ *
+ * Unregisters all currently registered notifiers. This function is used to
+ * ensure that all notifiers will be unregistered and associated
+ * entries/resources freed when the controller is being shut down.
+ */
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *e, *n;
+
+ mutex_lock(&nf->lock);
+ rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
+ /* Ignore errors, will get logged in call. */
+ ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
+ kfree(e);
+ }
+ nf->refcount = RB_ROOT;
+ mutex_unlock(&nf->lock);
+}
+
+
+/* -- Wakeup IRQ. ----------------------------------------------------------- */
+
+static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
+{
+ struct ssam_controller *ctrl = dev_id;
+
+ ssam_dbg(ctrl, "pm: wake irq triggered\n");
+
+ /*
+ * Note: Proper wakeup detection is currently unimplemented.
+ * When the EC is in display-off or any other non-D0 state, it
+ * does not send events/notifications to the host. Instead it
+ * signals that there are events available via the wakeup IRQ.
+ * This driver is responsible for calling back to the EC to
+ * release these events one-by-one.
+ *
+ * This IRQ should not cause a full system resume by its own.
+ * Instead, events should be handled by their respective subsystem
+ * drivers, which in turn should signal whether a full system
+ * resume should be performed.
+ *
+ * TODO: Send GPIO callback command repeatedly to EC until callback
+ * returns 0x00. Return flag of callback is "has more events".
+ * Each time the command is sent, one event is "released". Once
+ * all events have been released (return = 0x00), the GPIO is
+ * re-armed. Detect wakeup events during this process, go back to
+ * sleep if no wakeup event has been received.
+ */
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be set up.
+ *
+ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
+ * to wake the device from a low power state.
+ *
+ * Note that this IRQ can only be triggered while the EC is in the display-off
+ * state. In this state, events are not sent to the host in the usual way.
+ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
+ * events and these events need to be released one-by-one via the GPIO
+ * callback request, either until there are no events left and the GPIO is
+ * reset, or all at once by transitioning the EC out of the display-off state,
+ * which will also clear the GPIO.
+ *
+ * Not all events, however, should trigger a full system wakeup. Instead the
+ * driver should, if necessary, inspect and forward each event to the
+ * corresponding subsystem, which in turn should decide if the system needs to
+ * be woken up. This logic has not been implemented yet, thus wakeup by this
+ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
+ * example, by the remaining battery percentage changing. Refer to comments in
+ * this function and comments in the corresponding IRQ handler for more
+ * details on how this should be implemented.
+ *
+ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
+ * for functions to transition the EC into and out of the display-off state as
+ * well as more details on it.
+ *
+ * The IRQ is disabled by default and has to be enabled before it can wake up
+ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
+ * should be freed via ssam_irq_free().
+ */
+int ssam_irq_setup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ struct gpio_desc *gpiod;
+ int irq;
+ int status;
+
+ /*
+ * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
+ * However, the GPIO line only gets reset by sending the GPIO callback
+ * command to SAM (or alternatively the display-on notification). As
+ * proper handling for this interrupt is not implemented yet, leaving
+ * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
+ * never gets sent and thus the line never gets reset). To avoid this,
+ * mark the IRQ as TRIGGER_RISING for now, only creating a single
+ * interrupt, and let the SAM resume callback during the controller
+ * resume process clear it.
+ */
+ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+
+ gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ irq = gpiod_to_irq(gpiod);
+ gpiod_put(gpiod);
+
+ if (irq < 0)
+ return irq;
+
+ status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
+ "ssam_wakeup", ctrl);
+ if (status)
+ return status;
+
+ ctrl->irq.num = irq;
+ disable_irq(ctrl->irq.num);
+ return 0;
+}
+
+/**
+ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be freed.
+ *
+ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
+ */
+void ssam_irq_free(struct ssam_controller *ctrl)
+{
+ free_irq(ctrl->irq.num, ctrl);
+ ctrl->irq.num = -1;
+}
+
+/**
+ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
+ * @ctrl: The controller for which the IRQ should be armed.
+ *
+ * Sets up the IRQ so that it can be used to wake the device. Specifically,
+ * this function enables the irq and then, if the device is allowed to wake up
+ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
+ * corresponding function to disable the IRQ.
+ *
+ * This function is intended to arm the IRQ before entering S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ int status;
+
+ enable_irq(ctrl->irq.num);
+ if (device_may_wakeup(dev)) {
+ status = enable_irq_wake(ctrl->irq.num);
+ if (status) {
+ ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
+ disable_irq(ctrl->irq.num);
+ return status;
+ }
+
+ ctrl->irq.wakeup_enabled = true;
+ } else {
+ ctrl->irq.wakeup_enabled = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
+ * @ctrl: The controller for which the IRQ should be disarmed.
+ *
+ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
+ *
+ * This function is intended to disarm the IRQ after exiting S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
+{
+ int status;
+
+ if (ctrl->irq.wakeup_enabled) {
+ status = disable_irq_wake(ctrl->irq.num);
+ if (status)
+ ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
+
+ ctrl->irq.wakeup_enabled = false;
+ }
+ disable_irq(ctrl->irq.num);
+}
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
new file mode 100644
index 000000000000..8297d34e7489
--- /dev/null
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Main SSAM/SSH controller structure and functionality.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
+#define _SURFACE_AGGREGATOR_CONTROLLER_H
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "ssh_request_layer.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
+ * @value: The current counter value.
+ */
+struct ssh_seq_counter {
+ u8 value;
+};
+
+/**
+ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
+ * @value: The current counter value.
+ */
+struct ssh_rqid_counter {
+ u16 value;
+};
+
+
+/* -- Event/notification system. -------------------------------------------- */
+
+/**
+ * struct ssam_nf_head - Notifier head for SSAM events.
+ * @srcu: The SRCU struct for synchronization.
+ * @head: List-head for notifier blocks registered under this head.
+ */
+struct ssam_nf_head {
+ struct srcu_struct srcu;
+ struct list_head head;
+};
+
+/**
+ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
+ * @lock: Lock guarding (de-)registration of notifier blocks. Note: This
+ * lock does not need to be held for notifier calls, only
+ * registration and deregistration.
+ * @refcount: The root of the RB-tree used for reference-counting enabled
+ * events/notifications.
+ * @head: The list of notifier heads for event/notification callbacks.
+ */
+struct ssam_nf {
+ struct mutex lock;
+ struct rb_root refcount;
+ struct ssam_nf_head head[SSH_NUM_EVENTS];
+};
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+struct ssam_cplt;
+
+/**
+ * struct ssam_event_item - Struct for event queuing and completion.
+ * @node: The node in the queue.
+ * @rqid: The request ID of the event.
+ * @ops: Instance specific functions.
+ * @ops.free: Callback for freeing this event item.
+ * @event: Actual event data.
+ */
+struct ssam_event_item {
+ struct list_head node;
+ u16 rqid;
+
+ struct {
+ void (*free)(struct ssam_event_item *event);
+ } ops;
+
+ struct ssam_event event; /* must be last */
+};
+
+/**
+ * struct ssam_event_queue - Queue for completing received events.
+ * @cplt: Reference to the completion system on which this queue is active.
+ * @lock: The lock for any operation on the queue.
+ * @head: The list-head of the queue.
+ * @work: The &struct work_struct performing completion work for this queue.
+ */
+struct ssam_event_queue {
+ struct ssam_cplt *cplt;
+
+ spinlock_t lock;
+ struct list_head head;
+ struct work_struct work;
+};
+
+/**
+ * struct ssam_event_target - Set of queues for a single SSH target ID.
+ * @queue: The array of queues, one queue per event ID.
+ */
+struct ssam_event_target {
+ struct ssam_event_queue queue[SSH_NUM_EVENTS];
+};
+
+/**
+ * struct ssam_cplt - SSAM event/async request completion system.
+ * @dev: The device with which this system is associated. Only used
+ * for logging.
+ * @wq: The &struct workqueue_struct on which all completion work
+ * items are queued.
+ * @event: Event completion management.
+ * @event.target: Array of &struct ssam_event_target, one for each target.
+ * @event.notif: Notifier callbacks and event activation reference counting.
+ */
+struct ssam_cplt {
+ struct device *dev;
+ struct workqueue_struct *wq;
+
+ struct {
+ struct ssam_event_target target[SSH_NUM_TARGETS];
+ struct ssam_nf notif;
+ } event;
+};
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * enum ssam_controller_state - State values for &struct ssam_controller.
+ * @SSAM_CONTROLLER_UNINITIALIZED:
+ * The controller has not been initialized yet or has been deinitialized.
+ * @SSAM_CONTROLLER_INITIALIZED:
+ * The controller is initialized, but has not been started yet.
+ * @SSAM_CONTROLLER_STARTED:
+ * The controller has been started and is ready to use.
+ * @SSAM_CONTROLLER_STOPPED:
+ * The controller has been stopped.
+ * @SSAM_CONTROLLER_SUSPENDED:
+ * The controller has been suspended.
+ */
+enum ssam_controller_state {
+ SSAM_CONTROLLER_UNINITIALIZED,
+ SSAM_CONTROLLER_INITIALIZED,
+ SSAM_CONTROLLER_STARTED,
+ SSAM_CONTROLLER_STOPPED,
+ SSAM_CONTROLLER_SUSPENDED,
+};
+
+/**
+ * struct ssam_controller_caps - Controller device capabilities.
+ * @ssh_power_profile: SSH power profile.
+ * @ssh_buffer_size: SSH driver UART buffer size.
+ * @screen_on_sleep_idle_timeout: SAM UART screen-on sleep idle timeout.
+ * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout.
+ * @d3_closes_handle: SAM closes UART handle in D3.
+ *
+ * Controller and SSH device capabilities found in ACPI.
+ */
+struct ssam_controller_caps {
+ u32 ssh_power_profile;
+ u32 ssh_buffer_size;
+ u32 screen_on_sleep_idle_timeout;
+ u32 screen_off_sleep_idle_timeout;
+ u32 d3_closes_handle:1;
+};
+
+/**
+ * struct ssam_controller - SSAM controller device.
+ * @kref: Reference count of the controller.
+ * @lock: Main lock for the controller, used to guard state changes.
+ * @state: Controller state.
+ * @rtl: Request transport layer for SSH I/O.
+ * @cplt: Completion system for SSH/SSAM events and asynchronous requests.
+ * @counter: Safe SSH message ID counters.
+ * @counter.seq: Sequence ID counter.
+ * @counter.rqid: Request ID counter.
+ * @irq: Wakeup IRQ resources.
+ * @irq.num: The wakeup IRQ number.
+ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
+ * @caps: The controller device capabilities.
+ */
+struct ssam_controller {
+ struct kref kref;
+
+ struct rw_semaphore lock;
+ enum ssam_controller_state state;
+
+ struct ssh_rtl rtl;
+ struct ssam_cplt cplt;
+
+ struct {
+ struct ssh_seq_counter seq;
+ struct ssh_rqid_counter rqid;
+ } counter;
+
+ struct {
+ int num;
+ bool wakeup_enabled;
+ } irq;
+
+ struct ssam_controller_caps caps;
+};
+
+#define to_ssam_controller(ptr, member) \
+ container_of(ptr, struct ssam_controller, member)
+
+#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+
+/**
+ * ssam_controller_receive_buf() - Provide input-data to the controller.
+ * @ctrl: The controller.
+ * @buf: The input buffer.
+ * @n: The number of bytes in the input buffer.
+ *
+ * Provide input data to be evaluated by the controller, which has been
+ * received via the lower-level transport.
+ *
+ * Return: Returns the number of bytes consumed, or, if the packet transport
+ * layer of the controller has been shut down, %-ESHUTDOWN.
+ */
+static inline
+int ssam_controller_receive_buf(struct ssam_controller *ctrl,
+ const unsigned char *buf, size_t n)
+{
+ return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
+}
+
+/**
+ * ssam_controller_write_wakeup() - Notify the controller that the underlying
+ * device has space available for data to be written.
+ * @ctrl: The controller.
+ */
+static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
+{
+ ssh_ptl_tx_wakeup_transfer(&ctrl->rtl.ptl);
+}
+
+int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
+int ssam_controller_start(struct ssam_controller *ctrl);
+void ssam_controller_shutdown(struct ssam_controller *ctrl);
+void ssam_controller_destroy(struct ssam_controller *ctrl);
+
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
+
+int ssam_irq_setup(struct ssam_controller *ctrl);
+void ssam_irq_free(struct ssam_controller *ctrl);
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
+
+void ssam_controller_lock(struct ssam_controller *c);
+void ssam_controller_unlock(struct ssam_controller *c);
+
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version);
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
+
+int ssam_controller_suspend(struct ssam_controller *ctrl);
+int ssam_controller_resume(struct ssam_controller *ctrl);
+
+int ssam_event_item_cache_init(void);
+void ssam_event_item_cache_destroy(void);
+
+#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
new file mode 100644
index 000000000000..8dc2c267bcd6
--- /dev/null
+++ b/drivers/platform/surface/aggregator/core.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Serial Hub (SSH) driver for communication with the Surface/System
+ * Aggregator Module (SSAM/SAM).
+ *
+ * Provides access to a SAM-over-SSH connected EC via a controller device.
+ * Handles communication via requests as well as enabling, disabling, and
+ * relaying of events.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/serdev.h>
+#include <linux/sysfs.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+#include "bus.h"
+#include "controller.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+
+/* -- Static controller reference. ------------------------------------------ */
+
+/*
+ * Main controller reference. The corresponding lock must be held while
+ * accessing (reading/writing) the reference.
+ */
+static struct ssam_controller *__ssam_controller;
+static DEFINE_SPINLOCK(__ssam_controller_lock);
+
+/**
+ * ssam_get_controller() - Get reference to SSAM controller.
+ *
+ * Returns a reference to the SSAM controller of the system or %NULL if there
+ * is none, it hasn't been set up yet, or it has already been unregistered.
+ * This function automatically increments the reference count of the
+ * controller, thus the calling party must ensure that ssam_controller_put()
+ * is called when it doesn't need the controller any more.
+ */
+struct ssam_controller *ssam_get_controller(void)
+{
+ struct ssam_controller *ctrl;
+
+ spin_lock(&__ssam_controller_lock);
+
+ ctrl = __ssam_controller;
+ if (!ctrl)
+ goto out;
+
+ if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
+ ctrl = NULL;
+
+out:
+ spin_unlock(&__ssam_controller_lock);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(ssam_get_controller);
+
+/**
+ * ssam_try_set_controller() - Try to set the main controller reference.
+ * @ctrl: The controller to which the reference should point.
+ *
+ * Set the main controller reference to the given pointer if the reference
+ * hasn't been set already.
+ *
+ * Return: Returns zero on success or %-EEXIST if the reference has already
+ * been set.
+ */
+static int ssam_try_set_controller(struct ssam_controller *ctrl)
+{
+ int status = 0;
+
+ spin_lock(&__ssam_controller_lock);
+ if (!__ssam_controller)
+ __ssam_controller = ctrl;
+ else
+ status = -EEXIST;
+ spin_unlock(&__ssam_controller_lock);
+
+ return status;
+}
+
+/**
+ * ssam_clear_controller() - Remove/clear the main controller reference.
+ *
+ * Clears the main controller reference, i.e. sets it to %NULL. This function
+ * should be called before the controller is shut down.
+ */
+static void ssam_clear_controller(void)
+{
+ spin_lock(&__ssam_controller_lock);
+ __ssam_controller = NULL;
+ spin_unlock(&__ssam_controller_lock);
+}
+
+/**
+ * ssam_client_link() - Link an arbitrary client device to the controller.
+ * @c: The controller to link to.
+ * @client: The client device.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the controller device as provider. This function
+ * can be used for non-SSAM devices (or SSAM devices not registered as child
+ * under the controller) to guarantee that the controller is valid for as long
+ * as the driver of the client device is bound, and that proper suspend and
+ * resume ordering is guaranteed.
+ *
+ * The device link does not have to be destructed manually. It is removed
+ * automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns zero on success, %-ENODEV if the controller is not ready or
+ * going to be removed soon, or %-ENOMEM if the device link could not be
+ * created for other reasons.
+ */
+int ssam_client_link(struct ssam_controller *c, struct device *client)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+ struct device_link *link;
+ struct device *ctrldev;
+
+ ssam_controller_statelock(c);
+
+ if (c->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ ctrldev = ssam_controller_device(c);
+ if (!ctrldev) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ link = device_link_add(client, ctrldev, flags);
+ if (!link) {
+ ssam_controller_stateunlock(c);
+ return -ENOMEM;
+ }
+
+ /*
+ * Return -ENODEV if supplier driver is on its way to be removed. In
+ * this case, the controller won't be around for much longer and the
+ * device link is not going to save us any more, as unbinding is
+ * already in progress.
+ */
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ ssam_controller_stateunlock(c);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_client_link);
+
+/**
+ * ssam_client_bind() - Bind an arbitrary client device to the controller.
+ * @client: The client device.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the main controller device as provider. This
+ * function can be used for non-SSAM devices to guarantee that the controller
+ * returned by this function is valid for as long as the driver of the client
+ * device is bound, and that proper suspend and resume ordering is guaranteed.
+ *
+ * This function does essentially the same as ssam_client_link(), except that
+ * it first fetches the main controller reference, then creates the link, and
+ * finally returns this reference. Note that this function does not increment
+ * the reference counter of the controller, as, due to the link, the
+ * controller lifetime is assured as long as the driver of the client device
+ * is bound.
+ *
+ * It is not valid to use the controller reference obtained by this method
+ * outside of the driver bound to the client device at the time of calling
+ * this function, without first incrementing the reference count of the
+ * controller via ssam_controller_get(). Even after doing this, care must be
+ * taken that requests are only submitted and notifiers are only
+ * (un-)registered when the controller is active and not suspended. In other
+ * words: The device link only lives as long as the client driver is bound and
+ * any guarantees enforced by this link (e.g. active controller state) can
+ * only be relied upon as long as this link exists and may need to be enforced
+ * in other ways afterwards.
+ *
+ * The created device link does not have to be destructed manually. It is
+ * removed automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns the controller on success, an error pointer with %-ENODEV
+ * if the controller is not present, not ready or going to be removed soon, or
+ * %-ENOMEM if the device link could not be created for other reasons.
+ */
+struct ssam_controller *ssam_client_bind(struct device *client)
+{
+ struct ssam_controller *c;
+ int status;
+
+ c = ssam_get_controller();
+ if (!c)
+ return ERR_PTR(-ENODEV);
+
+ status = ssam_client_link(c, client);
+
+ /*
+ * Note that we can drop our controller reference in both success and
+ * failure cases: On success, we have bound the controller lifetime
+ * inherently to the client driver lifetime, i.e. it the controller is
+ * now guaranteed to outlive the client driver. On failure, we're not
+ * going to use the controller any more.
+ */
+ ssam_controller_put(c);
+
+ return status >= 0 ? c : ERR_PTR(status);
+}
+EXPORT_SYMBOL_GPL(ssam_client_bind);
+
+
+/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
+
+static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ size_t n)
+{
+ struct ssam_controller *ctrl;
+
+ ctrl = serdev_device_get_drvdata(dev);
+ return ssam_controller_receive_buf(ctrl, buf, n);
+}
+
+static void ssam_write_wakeup(struct serdev_device *dev)
+{
+ ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
+}
+
+static const struct serdev_device_ops ssam_serdev_ops = {
+ .receive_buf = ssam_receive_buf,
+ .write_wakeup = ssam_write_wakeup,
+};
+
+
+/* -- SysFS and misc. ------------------------------------------------------- */
+
+static int ssam_log_firmware_version(struct ssam_controller *ctrl)
+{
+ u32 version, a, b, c;
+ int status;
+
+ status = ssam_get_firmware_version(ctrl, &version);
+ if (status)
+ return status;
+
+ a = (version >> 24) & 0xff;
+ b = ((version >> 8) & 0xffff);
+ c = version & 0xff;
+
+ ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
+ return 0;
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ssam_controller *ctrl = dev_get_drvdata(dev);
+ u32 version, a, b, c;
+ int status;
+
+ status = ssam_get_firmware_version(ctrl, &version);
+ if (status < 0)
+ return status;
+
+ a = (version >> 24) & 0xff;
+ b = ((version >> 8) & 0xffff);
+ c = version & 0xff;
+
+ return sysfs_emit(buf, "%u.%u.%u\n", a, b, c);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static struct attribute *ssam_sam_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ NULL
+};
+
+static const struct attribute_group ssam_sam_group = {
+ .name = "sam",
+ .attrs = ssam_sam_attrs,
+};
+
+
+/* -- ACPI based device setup. ---------------------------------------------- */
+
+static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
+ void *ctx)
+{
+ struct serdev_device *serdev = ctx;
+ struct acpi_resource_common_serialbus *serial;
+ struct acpi_resource_uart_serialbus *uart;
+ bool flow_control;
+ int status = 0;
+
+ if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return AE_OK;
+
+ serial = &rsc->data.common_serial_bus;
+ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ return AE_OK;
+
+ uart = &rsc->data.uart_serial_bus;
+
+ /* Set up serdev device. */
+ serdev_device_set_baudrate(serdev, uart->default_baud_rate);
+
+ /* serdev currently only supports RTSCTS flow control. */
+ if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) {
+ dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n",
+ uart->flow_control);
+ }
+
+ /* Set RTSCTS flow control. */
+ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
+ serdev_device_set_flow_control(serdev, flow_control);
+
+ /* serdev currently only supports EVEN/ODD parity. */
+ switch (uart->parity) {
+ case ACPI_UART_PARITY_NONE:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ break;
+ case ACPI_UART_PARITY_EVEN:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
+ break;
+ case ACPI_UART_PARITY_ODD:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
+ break;
+ default:
+ dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n",
+ uart->parity);
+ break;
+ }
+
+ if (status) {
+ dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n",
+ uart->parity, status);
+ return AE_ERROR;
+ }
+
+ /* We've found the resource and are done. */
+ return AE_CTRL_TERMINATE;
+}
+
+static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
+ struct serdev_device *serdev)
+{
+ return acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ssam_serdev_setup_via_acpi_crs, serdev);
+}
+
+
+/* -- Power management. ----------------------------------------------------- */
+
+static void ssam_serial_hub_shutdown(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to disable notifiers, signal display-off and D0-exit, ignore any
+ * errors.
+ *
+ * Note: It has not been established yet if this is actually
+ * necessary/useful for shutdown.
+ */
+
+ status = ssam_notifier_disable_registered(c);
+ if (status) {
+ ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
+ status);
+ }
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status)
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status)
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int ssam_serial_hub_pm_prepare(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-off, This will quiesce events.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status)
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+
+ return status;
+}
+
+static void ssam_serial_hub_pm_complete(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-on. This will restore events.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ status = ssam_ctrl_notif_display_on(c);
+ if (status)
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
+}
+
+static int ssam_serial_hub_pm_suspend(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
+ * error.
+ */
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ goto err_notif;
+ }
+
+ status = ssam_irq_arm_for_wakeup(c);
+ if (status)
+ goto err_irq;
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+
+err_irq:
+ ssam_ctrl_notif_d0_entry(c);
+err_notif:
+ ssam_ctrl_notif_display_on(c);
+ return status;
+}
+
+static int ssam_serial_hub_pm_resume(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ /*
+ * Try to disable IRQ wakeup (if specified) and signal D0-entry. In
+ * case of errors, log them and try to restore normal operation state
+ * as far as possible.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ ssam_irq_disarm_wakeup(c);
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ return 0;
+}
+
+static int ssam_serial_hub_pm_freeze(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * During hibernation image creation, we only have to ensure that the
+ * EC doesn't send us any events. This is done via the display-off
+ * and D0-exit notifications. Note that this sets up the wakeup IRQ
+ * on the EC side, however, we have disabled it by default on our side
+ * and won't enable it here.
+ *
+ * See ssam_serial_hub_poweroff() for more details on the hibernation
+ * process.
+ */
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ ssam_ctrl_notif_display_on(c);
+ return status;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+}
+
+static int ssam_serial_hub_pm_thaw(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+
+ return status;
+}
+
+static int ssam_serial_hub_pm_poweroff(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * When entering hibernation and powering off the system, the EC, at
+ * least on some models, may disable events. Without us taking care of
+ * that, this leads to events not being enabled/restored when the
+ * system resumes from hibernation, resulting SAM-HID subsystem devices
+ * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
+ * gone, etc.
+ *
+ * To avoid these issues, we disable all registered events here (this is
+ * likely not actually required) and restore them during the drivers PM
+ * restore callback.
+ *
+ * Wakeup from the EC interrupt is not supported during hibernation,
+ * so don't arm the IRQ here.
+ */
+
+ status = ssam_notifier_disable_registered(c);
+ if (status) {
+ ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
+ status);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ ssam_notifier_restore_registered(c);
+ return status;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+}
+
+static int ssam_serial_hub_pm_restore(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Ignore but log errors, try to restore state as much as possible in
+ * case of failures. See ssam_serial_hub_poweroff() for more details on
+ * the hibernation process.
+ */
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ ssam_notifier_restore_registered(c);
+ return 0;
+}
+
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
+ .prepare = ssam_serial_hub_pm_prepare,
+ .complete = ssam_serial_hub_pm_complete,
+ .suspend = ssam_serial_hub_pm_suspend,
+ .resume = ssam_serial_hub_pm_resume,
+ .freeze = ssam_serial_hub_pm_freeze,
+ .thaw = ssam_serial_hub_pm_thaw,
+ .poweroff = ssam_serial_hub_pm_poweroff,
+ .restore = ssam_serial_hub_pm_restore,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Device/driver setup. -------------------------------------------------- */
+
+static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
+static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
+
+static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
+ { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
+ { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
+ { },
+};
+
+static int ssam_serial_hub_probe(struct serdev_device *serdev)
+{
+ struct ssam_controller *ctrl;
+ acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
+ acpi_status astatus;
+ int status;
+
+ if (gpiod_count(&serdev->dev, NULL) < 0)
+ return -ENODEV;
+
+ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
+ if (status)
+ return status;
+
+ /* Allocate controller. */
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ /* Initialize controller. */
+ status = ssam_controller_init(ctrl, serdev);
+ if (status)
+ goto err_ctrl_init;
+
+ ssam_controller_lock(ctrl);
+
+ /* Set up serdev device. */
+ serdev_device_set_drvdata(serdev, ctrl);
+ serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
+ status = serdev_device_open(serdev);
+ if (status)
+ goto err_devopen;
+
+ astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
+ if (ACPI_FAILURE(astatus)) {
+ status = -ENXIO;
+ goto err_devinit;
+ }
+
+ /* Start controller. */
+ status = ssam_controller_start(ctrl);
+ if (status)
+ goto err_devinit;
+
+ ssam_controller_unlock(ctrl);
+
+ /*
+ * Initial SAM requests: Log version and notify default/init power
+ * states.
+ */
+ status = ssam_log_firmware_version(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_d0_entry(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_display_on(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
+ if (status)
+ goto err_initrq;
+
+ /* Set up IRQ. */
+ status = ssam_irq_setup(ctrl);
+ if (status)
+ goto err_irq;
+
+ /* Finally, set main controller reference. */
+ status = ssam_try_set_controller(ctrl);
+ if (WARN_ON(status)) /* Currently, we're the only provider. */
+ goto err_mainref;
+
+ /*
+ * TODO: The EC can wake up the system via the associated GPIO interrupt
+ * in multiple situations. One of which is the remaining battery
+ * capacity falling below a certain threshold. Normally, we should
+ * use the device_init_wakeup function, however, the EC also seems
+ * to have other reasons for waking up the system and it seems
+ * that Windows has additional checks whether the system should be
+ * resumed. In short, this causes some spurious unwanted wake-ups.
+ * For now let's thus default power/wakeup to false.
+ */
+ device_set_wakeup_capable(&serdev->dev, true);
+ acpi_walk_dep_device_list(ssh);
+
+ return 0;
+
+err_mainref:
+ ssam_irq_free(ctrl);
+err_irq:
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+err_initrq:
+ ssam_controller_lock(ctrl);
+ ssam_controller_shutdown(ctrl);
+err_devinit:
+ serdev_device_close(serdev);
+err_devopen:
+ ssam_controller_destroy(ctrl);
+ ssam_controller_unlock(ctrl);
+err_ctrl_init:
+ kfree(ctrl);
+ return status;
+}
+
+static void ssam_serial_hub_remove(struct serdev_device *serdev)
+{
+ struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
+ int status;
+
+ /* Clear static reference so that no one else can get a new one. */
+ ssam_clear_controller();
+
+ /* Disable and free IRQ. */
+ ssam_irq_free(ctrl);
+
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+ ssam_controller_lock(ctrl);
+
+ /* Remove all client devices. */
+ ssam_controller_remove_clients(ctrl);
+
+ /* Act as if suspending to silence events. */
+ status = ssam_ctrl_notif_display_off(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "display-off notification failed: %d\n",
+ status);
+ }
+
+ status = ssam_ctrl_notif_d0_exit(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
+ status);
+ }
+
+ /* Shut down controller and remove serdev device reference from it. */
+ ssam_controller_shutdown(ctrl);
+
+ /* Shut down actual transport. */
+ serdev_device_wait_until_sent(serdev, 0);
+ serdev_device_close(serdev);
+
+ /* Drop our controller reference. */
+ ssam_controller_unlock(ctrl);
+ ssam_controller_put(ctrl);
+
+ device_set_wakeup_capable(&serdev->dev, false);
+}
+
+static const struct acpi_device_id ssam_serial_hub_match[] = {
+ { "MSHW0084", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
+
+static struct serdev_device_driver ssam_serial_hub = {
+ .probe = ssam_serial_hub_probe,
+ .remove = ssam_serial_hub_remove,
+ .driver = {
+ .name = "surface_serial_hub",
+ .acpi_match_table = ssam_serial_hub_match,
+ .pm = &ssam_serial_hub_pm_ops,
+ .shutdown = ssam_serial_hub_shutdown,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init ssam_core_init(void)
+{
+ int status;
+
+ status = ssam_bus_register();
+ if (status)
+ goto err_bus;
+
+ status = ssh_ctrl_packet_cache_init();
+ if (status)
+ goto err_cpkg;
+
+ status = ssam_event_item_cache_init();
+ if (status)
+ goto err_evitem;
+
+ status = serdev_device_driver_register(&ssam_serial_hub);
+ if (status)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ ssam_event_item_cache_destroy();
+err_evitem:
+ ssh_ctrl_packet_cache_destroy();
+err_cpkg:
+ ssam_bus_unregister();
+err_bus:
+ return status;
+}
+module_init(ssam_core_init);
+
+static void __exit ssam_core_exit(void)
+{
+ serdev_device_driver_unregister(&ssam_serial_hub);
+ ssam_event_item_cache_destroy();
+ ssh_ctrl_packet_cache_destroy();
+ ssam_bus_unregister();
+}
+module_exit(ssam_core_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
new file mode 100644
index 000000000000..1221f642dda1
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH message builder functions.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
+#define _SURFACE_AGGREGATOR_SSH_MSGB_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+/**
+ * struct msgbuf - Buffer struct to construct SSH messages.
+ * @begin: Pointer to the beginning of the allocated buffer space.
+ * @end: Pointer to the end (one past last element) of the allocated buffer
+ * space.
+ * @ptr: Pointer to the first free element in the buffer.
+ */
+struct msgbuf {
+ u8 *begin;
+ u8 *end;
+ u8 *ptr;
+};
+
+/**
+ * msgb_init() - Initialize the given message buffer struct.
+ * @msgb: The buffer struct to initialize
+ * @ptr: Pointer to the underlying memory by which the buffer will be backed.
+ * @cap: Size of the underlying memory.
+ *
+ * Initialize the given message buffer struct using the provided memory as
+ * backing.
+ */
+static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
+{
+ msgb->begin = ptr;
+ msgb->end = ptr + cap;
+ msgb->ptr = ptr;
+}
+
+/**
+ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
+ * @msgb: The message buffer.
+ */
+static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
+{
+ return msgb->ptr - msgb->begin;
+}
+
+static inline void __msgb_push_u8(struct msgbuf *msgb, u8 value)
+{
+ *msgb->ptr = value;
+ msgb->ptr += sizeof(u8);
+}
+
+static inline void __msgb_push_u16(struct msgbuf *msgb, u16 value)
+{
+ put_unaligned_le16(value, msgb->ptr);
+ msgb->ptr += sizeof(u16);
+}
+
+/**
+ * msgb_push_u16() - Push a u16 value to the buffer.
+ * @msgb: The message buffer.
+ * @value: The value to push to the buffer.
+ */
+static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
+{
+ if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
+ return;
+
+ __msgb_push_u16(msgb, value);
+}
+
+/**
+ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
+ * @msgb: The message buffer.
+ */
+static inline void msgb_push_syn(struct msgbuf *msgb)
+{
+ msgb_push_u16(msgb, SSH_MSG_SYN);
+}
+
+/**
+ * msgb_push_buf() - Push raw data to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data to push to the buffer.
+ * @len: The length of the data to push to the buffer.
+ */
+static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
+}
+
+/**
+ * msgb_push_crc() - Compute CRC and push it to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ */
+static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb_push_u16(msgb, ssh_crc(buf, len));
+}
+
+/**
+ * msgb_push_frame() - Push a SSH message frame header to the buffer.
+ * @msgb: The message buffer
+ * @ty: The type of the frame.
+ * @len: The length of the payload of the frame.
+ * @seq: The sequence ID of the frame/packet.
+ */
+static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
+{
+ u8 *const begin = msgb->ptr;
+
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_frame) > msgb->end))
+ return;
+
+ __msgb_push_u8(msgb, ty); /* Frame type. */
+ __msgb_push_u16(msgb, len); /* Frame payload length. */
+ __msgb_push_u8(msgb, seq); /* Frame sequence ID. */
+
+ msgb_push_crc(msgb, begin, msgb->ptr - begin);
+}
+
+/**
+ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
+ * @msgb: The message buffer
+ * @seq: The sequence ID of the frame/packet to be ACKed.
+ */
+static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
+{
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* ACK-type frame + CRC. */
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
+
+ /* Payload CRC (ACK-type frames do not have a payload). */
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
+ * @msgb: The message buffer
+ */
+static inline void msgb_push_nak(struct msgbuf *msgb)
+{
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* NAK-type frame + CRC. */
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
+
+ /* Payload CRC (ACK-type frames do not have a payload). */
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
+ * @msgb: The message buffer.
+ * @seq: The sequence ID (SEQ) of the frame/packet.
+ * @rqid: The request ID (RQID) of the request contained in the frame.
+ * @rqst: The request to wrap in the frame.
+ */
+static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
+ const struct ssam_request *rqst)
+{
+ const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
+ u8 *cmd;
+
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* Command frame + CRC. */
+ msgb_push_frame(msgb, type, sizeof(struct ssh_command) + rqst->length, seq);
+
+ /* Frame payload: Command struct + payload. */
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_command) > msgb->end))
+ return;
+
+ cmd = msgb->ptr;
+
+ __msgb_push_u8(msgb, SSH_PLD_TYPE_CMD); /* Payload type. */
+ __msgb_push_u8(msgb, rqst->target_category); /* Target category. */
+ __msgb_push_u8(msgb, rqst->target_id); /* Target ID (out). */
+ __msgb_push_u8(msgb, 0x00); /* Target ID (in). */
+ __msgb_push_u8(msgb, rqst->instance_id); /* Instance ID. */
+ __msgb_push_u16(msgb, rqid); /* Request ID. */
+ __msgb_push_u8(msgb, rqst->command_id); /* Command ID. */
+
+ /* Command payload. */
+ msgb_push_buf(msgb, rqst->payload, rqst->length);
+
+ /* CRC for command struct + payload. */
+ msgb_push_crc(msgb, cmd, msgb->ptr - cmd);
+}
+
+#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
new file mode 100644
index 000000000000..15d96eac6811
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -0,0 +1,2074 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH packet transport layer.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/error-injection.h>
+#include <linux/jiffies.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "ssh_msgb.h"
+#include "ssh_packet_layer.h"
+#include "ssh_parser.h"
+
+#include "trace.h"
+
+/*
+ * To simplify reasoning about the code below, we define a few concepts. The
+ * system below is similar to a state-machine for packets, however, there are
+ * too many states to explicitly write them down. To (somewhat) manage the
+ * states and packets we rely on flags, reference counting, and some simple
+ * concepts. State transitions are triggered by actions.
+ *
+ * >> Actions <<
+ *
+ * - submit
+ * - transmission start (process next item in queue)
+ * - transmission finished (guaranteed to never be parallel to transmission
+ * start)
+ * - ACK received
+ * - NAK received (this is equivalent to issuing re-submit for all pending
+ * packets)
+ * - timeout (this is equivalent to re-issuing a submit or canceling)
+ * - cancel (non-pending and pending)
+ *
+ * >> Data Structures, Packet Ownership, General Overview <<
+ *
+ * The code below employs two main data structures: The packet queue,
+ * containing all packets scheduled for transmission, and the set of pending
+ * packets, containing all packets awaiting an ACK.
+ *
+ * Shared ownership of a packet is controlled via reference counting. Inside
+ * the transport system are a total of five packet owners:
+ *
+ * - the packet queue,
+ * - the pending set,
+ * - the transmitter thread,
+ * - the receiver thread (via ACKing), and
+ * - the timeout work item.
+ *
+ * Normal operation is as follows: The initial reference of the packet is
+ * obtained by submitting the packet and queuing it. The receiver thread takes
+ * packets from the queue. By doing this, it does not increment the refcount
+ * but takes over the reference (removing it from the queue). If the packet is
+ * sequenced (i.e. needs to be ACKed by the client), the transmitter thread
+ * sets-up the timeout and adds the packet to the pending set before starting
+ * to transmit it. As the timeout is handled by a reaper task, no additional
+ * reference for it is needed. After the transmit is done, the reference held
+ * by the transmitter thread is dropped. If the packet is unsequenced (i.e.
+ * does not need an ACK), the packet is completed by the transmitter thread
+ * before dropping that reference.
+ *
+ * On receival of an ACK, the receiver thread removes and obtains the
+ * reference to the packet from the pending set. The receiver thread will then
+ * complete the packet and drop its reference.
+ *
+ * On receival of a NAK, the receiver thread re-submits all currently pending
+ * packets.
+ *
+ * Packet timeouts are detected by the timeout reaper. This is a task,
+ * scheduled depending on the earliest packet timeout expiration date,
+ * checking all currently pending packets if their timeout has expired. If the
+ * timeout of a packet has expired, it is re-submitted and the number of tries
+ * of this packet is incremented. If this number reaches its limit, the packet
+ * will be completed with a failure.
+ *
+ * On transmission failure (such as repeated packet timeouts), the completion
+ * callback is immediately run by on thread on which the error was detected.
+ *
+ * To ensure that a packet eventually leaves the system it is marked as
+ * "locked" directly before it is going to be completed or when it is
+ * canceled. Marking a packet as "locked" has the effect that passing and
+ * creating new references of the packet is disallowed. This means that the
+ * packet cannot be added to the queue, the pending set, and the timeout, or
+ * be picked up by the transmitter thread or receiver thread. To remove a
+ * packet from the system it has to be marked as locked and subsequently all
+ * references from the data structures (queue, pending) have to be removed.
+ * References held by threads will eventually be dropped automatically as
+ * their execution progresses.
+ *
+ * Note that the packet completion callback is, in case of success and for a
+ * sequenced packet, guaranteed to run on the receiver thread, thus providing
+ * a way to reliably identify responses to the packet. The packet completion
+ * callback is only run once and it does not indicate that the packet has
+ * fully left the system (for this, one should rely on the release method,
+ * triggered when the reference count of the packet reaches zero). In case of
+ * re-submission (and with somewhat unlikely timing), it may be possible that
+ * the packet is being re-transmitted while the completion callback runs.
+ * Completion will occur both on success and internal error, as well as when
+ * the packet is canceled.
+ *
+ * >> Flags <<
+ *
+ * Flags are used to indicate the state and progression of a packet. Some flags
+ * have stricter guarantees than other:
+ *
+ * - locked
+ * Indicates if the packet is locked. If the packet is locked, passing and/or
+ * creating additional references to the packet is forbidden. The packet thus
+ * may not be queued, dequeued, or removed or added to the pending set. Note
+ * that the packet state flags may still change (e.g. it may be marked as
+ * ACKed, transmitted, ...).
+ *
+ * - completed
+ * Indicates if the packet completion callback has been executed or is about
+ * to be executed. This flag is used to ensure that the packet completion
+ * callback is only run once.
+ *
+ * - queued
+ * Indicates if a packet is present in the submission queue or not. This flag
+ * must only be modified with the queue lock held, and must be coherent to the
+ * presence of the packet in the queue.
+ *
+ * - pending
+ * Indicates if a packet is present in the set of pending packets or not.
+ * This flag must only be modified with the pending lock held, and must be
+ * coherent to the presence of the packet in the pending set.
+ *
+ * - transmitting
+ * Indicates if the packet is currently transmitting. In case of
+ * re-transmissions, it is only safe to wait on the "transmitted" completion
+ * after this flag has been set. The completion will be set both in success
+ * and error case.
+ *
+ * - transmitted
+ * Indicates if the packet has been transmitted. This flag is not cleared by
+ * the system, thus it indicates the first transmission only.
+ *
+ * - acked
+ * Indicates if the packet has been acknowledged by the client. There are no
+ * other guarantees given. For example, the packet may still be canceled
+ * and/or the completion may be triggered an error even though this bit is
+ * set. Rely on the status provided to the completion callback instead.
+ *
+ * - canceled
+ * Indicates if the packet has been canceled from the outside. There are no
+ * other guarantees given. Specifically, the packet may be completed by
+ * another part of the system before the cancellation attempts to complete it.
+ *
+ * >> General Notes <<
+ *
+ * - To avoid deadlocks, if both queue and pending locks are required, the
+ * pending lock must be acquired before the queue lock.
+ *
+ * - The packet priority must be accessed only while holding the queue lock.
+ *
+ * - The packet timestamp must be accessed only while holding the pending
+ * lock.
+ */
+
+/*
+ * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
+ *
+ * Maximum number of transmission attempts per sequenced packet in case of
+ * time-outs. Must be smaller than 16. If the packet times out after this
+ * amount of tries, the packet will be completed with %-ETIMEDOUT as status
+ * code.
+ */
+#define SSH_PTL_MAX_PACKET_TRIES 3
+
+/*
+ * SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
+ *
+ * Timeout in jiffies for packet transmission via the underlying serial
+ * device. If transmitting the packet takes longer than this timeout, the
+ * packet will be completed with -ETIMEDOUT. It will not be re-submitted.
+ */
+#define SSH_PTL_TX_TIMEOUT HZ
+
+/*
+ * SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
+ *
+ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
+ * time-frame after starting transmission, the packet will be re-submitted.
+ */
+#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
+
+/*
+ * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
+ *
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
+ * direct re-scheduling of reaper work_struct.
+ */
+#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+/*
+ * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
+ *
+ * Maximum number of sequenced packets concurrently waiting for an ACK.
+ * Packets marked as blocking will not be transmitted while this limit is
+ * reached.
+ */
+#define SSH_PTL_MAX_PENDING 1
+
+/*
+ * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
+ */
+#define SSH_PTL_RX_BUF_LEN 4096
+
+/*
+ * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
+ */
+#define SSH_PTL_RX_FIFO_LEN 4096
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
+
+/**
+ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
+ *
+ * Useful to test detection and handling of automated re-transmits by the EC.
+ * Specifically of packets that the EC considers not-ACKed but the driver
+ * already considers ACKed (due to dropped ACK). In this case, the EC
+ * re-transmits the packet-to-be-ACKed and the driver should detect it as
+ * duplicate/already handled. Note that the driver should still send an ACK
+ * for the re-transmitted packet.
+ */
+static noinline bool ssh_ptl_should_drop_ack_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
+ *
+ * Useful to test/force automated (timeout-based) re-transmit by the EC.
+ * Specifically, packets that have not reached the driver completely/with valid
+ * checksums. Only useful in combination with receival of (injected) bad data.
+ */
+static noinline bool ssh_ptl_should_drop_nak_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
+ * data packet.
+ *
+ * Useful to test re-transmit timeout of the driver. If the data packet has not
+ * been ACKed after a certain time, the driver should re-transmit the packet up
+ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
+ */
+static noinline bool ssh_ptl_should_drop_dsq_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
+
+/**
+ * ssh_ptl_should_fail_write() - Error injection hook to make
+ * serdev_device_write() fail.
+ *
+ * Hook to simulate errors in serdev_device_write when transmitting packets.
+ */
+static noinline int ssh_ptl_should_fail_write(void)
+{
+ return 0;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
+
+/**
+ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
+ * data being sent to the EC.
+ *
+ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
+ * Causes the packet data to be actively corrupted by overwriting it with
+ * pre-defined values, such that it becomes invalid, causing the EC to respond
+ * with a NAK packet. Useful to test handling of NAK packets received by the
+ * driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_tx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
+ * test handling thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid data/checksum of the message frame and test handling
+ * thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
+
+static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_ack_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_ack_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_nak_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_nak_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_dsq_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_dsq_packet(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: dropping sequenced data packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ /* Ignore packets that don't carry any data (i.e. flush). */
+ if (!packet->data.ptr || !packet->data.len)
+ return false;
+
+ switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
+ case SSH_FRAME_TYPE_ACK:
+ return __ssh_ptl_should_drop_ack_packet(packet);
+
+ case SSH_FRAME_TYPE_NAK:
+ return __ssh_ptl_should_drop_nak_packet(packet);
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ return __ssh_ptl_should_drop_dsq_packet(packet);
+
+ default:
+ return false;
+ }
+}
+
+static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
+ const unsigned char *buf, size_t count)
+{
+ int status;
+
+ status = ssh_ptl_should_fail_write();
+ if (unlikely(status)) {
+ trace_ssam_ei_tx_fail_write(packet, status);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating transmit error %d, packet %p\n",
+ status, packet);
+
+ return status;
+ }
+
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+ /* Ignore packets that don't carry any data (i.e. flush). */
+ if (!packet->data.ptr || !packet->data.len)
+ return;
+
+ /* Only allow sequenced data packets to be modified. */
+ if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_tx_data()))
+ return;
+
+ trace_ssam_ei_tx_corrupt_data(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating invalid transmit data on packet %p\n",
+ packet);
+
+ /*
+ * NB: The value 0xb3 has been chosen more or less randomly so that it
+ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
+ * non-trivial (i.e. non-zero, non-0xff).
+ */
+ memset(packet->data.ptr, 0xb3, packet->data.len);
+}
+
+static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+ struct ssam_span frame;
+
+ /* Check if there actually is something to corrupt. */
+ if (!sshp_find_syn(data, &frame))
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_syn()))
+ return;
+
+ trace_ssam_ei_rx_corrupt_syn(data->len);
+
+ data->ptr[1] = 0xb3; /* Set second byte of SYN to "random" value. */
+}
+
+static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+ size_t payload_len, message_len;
+ struct ssh_frame *sshf;
+
+ /* Ignore incomplete messages, will get handled once it's complete. */
+ if (frame->len < SSH_MESSAGE_LENGTH(0))
+ return;
+
+ /* Ignore incomplete messages, part 2. */
+ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
+ message_len = SSH_MESSAGE_LENGTH(payload_len);
+ if (frame->len < message_len)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_data()))
+ return;
+
+ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
+ trace_ssam_ei_rx_corrupt_data(sshf);
+
+ /*
+ * Flip bits in first byte of payload checksum. This is basically
+ * equivalent to a payload/frame data error without us having to worry
+ * about (the, arguably pretty small, probability of) accidental
+ * checksum collisions.
+ */
+ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
+
+static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ return false;
+}
+
+static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
+ struct ssh_packet *packet,
+ const unsigned char *buf,
+ size_t count)
+{
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
+
+static void __ssh_ptl_packet_release(struct kref *kref)
+{
+ struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
+
+ trace_ssam_packet_release(p);
+
+ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
+ p->ops->release(p);
+}
+
+/**
+ * ssh_packet_get() - Increment reference count of packet.
+ * @packet: The packet to increment the reference count of.
+ *
+ * Increments the reference count of the given packet. See ssh_packet_put()
+ * for the counter-part of this function.
+ *
+ * Return: Returns the packet provided as input.
+ */
+struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
+{
+ if (packet)
+ kref_get(&packet->refcnt);
+ return packet;
+}
+EXPORT_SYMBOL_GPL(ssh_packet_get);
+
+/**
+ * ssh_packet_put() - Decrement reference count of packet.
+ * @packet: The packet to decrement the reference count of.
+ *
+ * If the reference count reaches zero, the ``release`` callback specified in
+ * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
+ * called.
+ *
+ * See ssh_packet_get() for the counter-part of this function.
+ */
+void ssh_packet_put(struct ssh_packet *packet)
+{
+ if (packet)
+ kref_put(&packet->refcnt, __ssh_ptl_packet_release);
+}
+EXPORT_SYMBOL_GPL(ssh_packet_put);
+
+static u8 ssh_packet_get_seq(struct ssh_packet *packet)
+{
+ return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+/**
+ * ssh_packet_init() - Initialize SSH packet.
+ * @packet: The packet to initialize.
+ * @type: Type-flags of the packet.
+ * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
+ * @ops: Packet operations.
+ *
+ * Initializes the given SSH packet. Sets the transmission buffer pointer to
+ * %NULL and the transmission buffer length to zero. For data-type packets,
+ * this buffer has to be set separately via ssh_packet_set_data() before
+ * submission, and must contain a valid SSH message, i.e. frame with optional
+ * payload of any type.
+ */
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ u8 priority, const struct ssh_packet_ops *ops)
+{
+ kref_init(&packet->refcnt);
+
+ packet->ptl = NULL;
+ INIT_LIST_HEAD(&packet->queue_node);
+ INIT_LIST_HEAD(&packet->pending_node);
+
+ packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
+ packet->priority = priority;
+ packet->timestamp = KTIME_MAX;
+
+ packet->data.ptr = NULL;
+ packet->data.len = 0;
+
+ packet->ops = ops;
+}
+
+static struct kmem_cache *ssh_ctrl_packet_cache;
+
+/**
+ * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
+ */
+int ssh_ctrl_packet_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
+ const unsigned int align = __alignof__(struct ssh_packet);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssh_ctrl_packet_cache = cache;
+ return 0;
+}
+
+/**
+ * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
+ */
+void ssh_ctrl_packet_cache_destroy(void)
+{
+ kmem_cache_destroy(ssh_ctrl_packet_cache);
+ ssh_ctrl_packet_cache = NULL;
+}
+
+/**
+ * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
+ * @packet: Where the pointer to the newly allocated packet should be stored.
+ * @buffer: The buffer corresponding to this packet.
+ * @flags: Flags used for allocation.
+ *
+ * Allocates a packet and corresponding transport buffer from the control
+ * packet cache. Sets the packet's buffer reference to the allocated buffer.
+ * The packet must be freed via ssh_ctrl_packet_free(), which will also free
+ * the corresponding buffer. The corresponding buffer must not be freed
+ * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
+ * operations.
+ *
+ * Return: Returns zero on success, %-ENOMEM if the allocation failed.
+ */
+static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ struct ssam_span *buffer, gfp_t flags)
+{
+ *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
+ if (!*packet)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*packet + 1);
+ buffer->len = SSH_MSG_LEN_CTRL;
+
+ trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
+ return 0;
+}
+
+/**
+ * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
+ * @p: The packet to free.
+ */
+static void ssh_ctrl_packet_free(struct ssh_packet *p)
+{
+ trace_ssam_ctrl_packet_free(p);
+ kmem_cache_free(ssh_ctrl_packet_cache, p);
+}
+
+static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
+ .complete = NULL,
+ .release = ssh_ctrl_packet_free,
+};
+
+static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
+
+ spin_lock(&ptl->rtx_timeout.lock);
+
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
+ if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
+ ptl->rtx_timeout.expires = expires;
+ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
+ }
+
+ spin_unlock(&ptl->rtx_timeout.lock);
+}
+
+/* Must be called with queue lock held. */
+static void ssh_packet_next_try(struct ssh_packet *p)
+{
+ u8 base = ssh_packet_priority_get_base(p->priority);
+ u8 try = ssh_packet_priority_get_try(p->priority);
+
+ lockdep_assert_held(&p->ptl->queue.lock);
+
+ /*
+ * Ensure that we write the priority in one go via WRITE_ONCE() so we
+ * can access it via READ_ONCE() for tracing. Note that other access
+ * is guarded by the queue lock, so no need to use READ_ONCE() there.
+ */
+ WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
+}
+
+/* Must be called with queue lock held. */
+static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
+{
+ struct list_head *head;
+ struct ssh_packet *q;
+
+ lockdep_assert_held(&p->ptl->queue.lock);
+
+ /*
+ * We generally assume that there are less control (ACK/NAK) packets
+ * and re-submitted data packets as there are normal data packets (at
+ * least in situations in which many packets are queued; if there
+ * aren't many packets queued the decision on how to iterate should be
+ * basically irrelevant; the number of control/data packets is more or
+ * less limited via the maximum number of pending packets). Thus, when
+ * inserting a control or re-submitted data packet, (determined by
+ * their priority), we search from front to back. Normal data packets
+ * are, usually queued directly at the tail of the queue, so for those
+ * search from back to front.
+ */
+
+ if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) {
+ list_for_each(head, &p->ptl->queue.head) {
+ q = list_entry(head, struct ssh_packet, queue_node);
+
+ if (q->priority < p->priority)
+ break;
+ }
+ } else {
+ list_for_each_prev(head, &p->ptl->queue.head) {
+ q = list_entry(head, struct ssh_packet, queue_node);
+
+ if (q->priority >= p->priority) {
+ head = head->next;
+ break;
+ }
+ }
+ }
+
+ return head;
+}
+
+/* Must be called with queue lock held. */
+static int __ssh_ptl_queue_push(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ struct list_head *head;
+
+ lockdep_assert_held(&ptl->queue.lock);
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return -ESHUTDOWN;
+
+ /* Avoid further transitions when canceling/completing. */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
+ return -EINVAL;
+
+ /* If this packet has already been queued, do not add it. */
+ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
+ return -EALREADY;
+
+ head = __ssh_ptl_queue_find_entrypoint(packet);
+
+ list_add_tail(&ssh_packet_get(packet)->queue_node, head);
+ return 0;
+}
+
+static int ssh_ptl_queue_push(struct ssh_packet *packet)
+{
+ int status;
+
+ spin_lock(&packet->ptl->queue.lock);
+ status = __ssh_ptl_queue_push(packet);
+ spin_unlock(&packet->ptl->queue.lock);
+
+ return status;
+}
+
+static void ssh_ptl_queue_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ spin_lock(&ptl->queue.lock);
+
+ if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
+ spin_unlock(&ptl->queue.lock);
+ return;
+ }
+
+ list_del(&packet->queue_node);
+
+ spin_unlock(&ptl->queue.lock);
+ ssh_packet_put(packet);
+}
+
+static void ssh_ptl_pending_push(struct ssh_packet *p)
+{
+ struct ssh_ptl *ptl = p->ptl;
+ const ktime_t timestamp = ktime_get_coarse_boottime();
+ const ktime_t timeout = ptl->rtx_timeout.timeout;
+
+ /*
+ * Note: We can get the time for the timestamp before acquiring the
+ * lock as this is the only place we're setting it and this function
+ * is called only from the transmitter thread. Thus it is not possible
+ * to overwrite the timestamp with an outdated value below.
+ */
+
+ spin_lock(&ptl->pending.lock);
+
+ /* If we are canceling/completing this packet, do not add it. */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ /*
+ * On re-submission, the packet has already been added the pending
+ * set. We still need to update the timestamp as the packet timeout is
+ * reset for each (re-)submission.
+ */
+ p->timestamp = timestamp;
+
+ /* In case it is already pending (e.g. re-submission), do not add it. */
+ if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) {
+ atomic_inc(&ptl->pending.count);
+ list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ /* Arm/update timeout reaper. */
+ ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout);
+}
+
+static void ssh_ptl_pending_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ spin_lock(&ptl->pending.lock);
+
+ if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ list_del(&packet->pending_node);
+ atomic_dec(&ptl->pending.count);
+
+ spin_unlock(&ptl->pending.lock);
+
+ ssh_packet_put(packet);
+}
+
+/* Warning: Does not check/set "completed" bit. */
+static void __ssh_ptl_complete(struct ssh_packet *p, int status)
+{
+ struct ssh_ptl *ptl = READ_ONCE(p->ptl);
+
+ trace_ssam_packet_complete(p, status);
+ ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
+
+ if (p->ops->complete)
+ p->ops->complete(p, status);
+}
+
+static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
+{
+ /*
+ * A call to this function should in general be preceded by
+ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
+ * packet to the structures it's going to be removed from.
+ *
+ * The set_bit call does not need explicit memory barriers as the
+ * implicit barrier of the test_and_set_bit() call below ensure that the
+ * flag is visible before we actually attempt to remove the packet.
+ */
+
+ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ return;
+
+ ssh_ptl_queue_remove(p);
+ ssh_ptl_pending_remove(p);
+
+ __ssh_ptl_complete(p, status);
+}
+
+static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
+ return !atomic_read(&ptl->pending.count);
+
+ /* We can always process non-blocking packets. */
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
+ return true;
+
+ /* If we are already waiting for this packet, send it again. */
+ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
+ return true;
+
+ /* Otherwise: Check if we have the capacity to send. */
+ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
+}
+
+static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ /*
+ * If we are canceling or completing this packet, ignore it.
+ * It's going to be removed from this queue shortly.
+ */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * Packets should be ordered non-blocking/to-be-resent first.
+ * If we cannot process this packet, assume that we can't
+ * process any following packet either and abort.
+ */
+ if (!ssh_ptl_tx_can_process(p)) {
+ packet = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ /*
+ * We are allowed to change the state now. Remove it from the
+ * queue and mark it as being transmitted.
+ */
+
+ list_del(&p->queue_node);
+
+ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ /*
+ * Update number of tries. This directly influences the
+ * priority in case the packet is re-submitted (e.g. via
+ * timeout/NAK). Note that all reads and writes to the
+ * priority after the first submission are guarded by the
+ * queue lock.
+ */
+ ssh_packet_next_try(p);
+
+ packet = p;
+ break;
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ return packet;
+}
+
+static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+
+ p = ssh_ptl_tx_pop(ptl);
+ if (IS_ERR(p))
+ return p;
+
+ if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
+ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
+ ssh_ptl_pending_push(p);
+ } else {
+ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
+ }
+
+ return p;
+}
+
+static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
+
+ /* Transition state to "transmitted". */
+ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ /* If the packet is unsequenced, we're done: Lock and complete. */
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ ssh_ptl_remove_and_complete(packet, 0);
+ }
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&ptl->tx.packet_wq);
+}
+
+static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
+{
+ /* Transmission failure: Lock the packet and try to complete it. */
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
+ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
+
+ ssh_ptl_remove_and_complete(packet, status);
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&packet->ptl->tx.packet_wq);
+}
+
+static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl)
+{
+ int status;
+
+ status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt);
+ reinit_completion(&ptl->tx.thread_cplt_pkt);
+
+ /*
+ * Ensure completion is cleared before continuing to avoid lost update
+ * problems.
+ */
+ smp_mb__after_atomic();
+
+ return status;
+}
+
+static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout)
+{
+ long status;
+
+ status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx,
+ timeout);
+ reinit_completion(&ptl->tx.thread_cplt_tx);
+
+ /*
+ * Ensure completion is cleared before continuing to avoid lost update
+ * problems.
+ */
+ smp_mb__after_atomic();
+
+ return status;
+}
+
+static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
+{
+ long timeout = SSH_PTL_TX_TIMEOUT;
+ size_t offset = 0;
+
+ /* Note: Flush-packets don't have any data. */
+ if (unlikely(!packet->data.ptr))
+ return 0;
+
+ /* Error injection: drop packet to simulate transmission problem. */
+ if (ssh_ptl_should_drop_packet(packet))
+ return 0;
+
+ /* Error injection: simulate invalid packet data. */
+ ssh_ptl_tx_inject_invalid_data(packet);
+
+ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
+ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ packet->data.ptr, packet->data.len, false);
+
+ do {
+ ssize_t status, len;
+ u8 *buf;
+
+ buf = packet->data.ptr + offset;
+ len = packet->data.len - offset;
+
+ status = ssh_ptl_write_buf(ptl, packet, buf, len);
+ if (status < 0)
+ return status;
+
+ if (status == len)
+ return 0;
+
+ offset += status;
+
+ timeout = ssh_ptl_tx_wait_transfer(ptl, timeout);
+ if (kthread_should_stop() || !atomic_read(&ptl->tx.running))
+ return -ESHUTDOWN;
+
+ if (timeout < 0)
+ return -EINTR;
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+ } while (true);
+}
+
+static int ssh_ptl_tx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) {
+ struct ssh_packet *packet;
+ int status;
+
+ /* Try to get the next packet. */
+ packet = ssh_ptl_tx_next(ptl);
+
+ /* If no packet can be processed, we are done. */
+ if (IS_ERR(packet)) {
+ ssh_ptl_tx_wait_packet(ptl);
+ continue;
+ }
+
+ /* Transfer and complete packet. */
+ status = ssh_ptl_tx_packet(ptl, packet);
+ if (status)
+ ssh_ptl_tx_compl_error(packet, status);
+ else
+ ssh_ptl_tx_compl_success(packet);
+
+ ssh_packet_put(packet);
+ }
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
+ * packet.
+ * @ptl: The packet transport layer.
+ *
+ * Wakes up the packet transmitter thread, notifying it that a new packet has
+ * arrived and is ready for transfer. If the packet transport layer has been
+ * shut down, calls to this function will be ignored.
+ */
+static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl)
+{
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return;
+
+ complete(&ptl->tx.thread_cplt_pkt);
+}
+
+/**
+ * ssh_ptl_tx_start() - Start packet transmitter thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_tx_start(struct ssh_ptl *ptl)
+{
+ atomic_set_release(&ptl->tx.running, 1);
+
+ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
+ if (IS_ERR(ptl->tx.thread))
+ return PTR_ERR(ptl->tx.thread);
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_tx_stop() - Stop packet transmitter thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
+ /* Tell thread to stop. */
+ atomic_set_release(&ptl->tx.running, 0);
+
+ /*
+ * Wake up thread in case it is paused. Do not use wakeup
+ * helpers as this may be called when the shutdown bit has
+ * already been set.
+ */
+ complete(&ptl->tx.thread_cplt_pkt);
+ complete(&ptl->tx.thread_cplt_tx);
+
+ /* Finally, wait for thread to stop. */
+ status = kthread_stop(ptl->tx.thread);
+ ptl->tx.thread = NULL;
+ }
+
+ return status;
+}
+
+static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ /*
+ * We generally expect packets to be in order, so first packet
+ * to be added to pending is first to be sent, is first to be
+ * ACKed.
+ */
+ if (unlikely(ssh_packet_get_seq(p) != seq_id))
+ continue;
+
+ /*
+ * In case we receive an ACK while handling a transmission
+ * error completion. The packet will be removed shortly.
+ */
+ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ packet = ERR_PTR(-EPERM);
+ break;
+ }
+
+ /*
+ * Mark the packet as ACKed and remove it from pending by
+ * removing its node and decrementing the pending counter.
+ */
+ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_del(&p->pending_node);
+ packet = p;
+
+ break;
+ }
+ spin_unlock(&ptl->pending.lock);
+
+ return packet;
+}
+
+static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
+{
+ wait_event(packet->ptl->tx.packet_wq,
+ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ||
+ test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
+}
+
+static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet *p;
+
+ p = ssh_ptl_ack_pop(ptl, seq);
+ if (IS_ERR(p)) {
+ if (PTR_ERR(p) == -ENOENT) {
+ /*
+ * The packet has not been found in the set of pending
+ * packets.
+ */
+ ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
+ } else {
+ /*
+ * The packet is pending, but we are not allowed to take
+ * it because it has been locked.
+ */
+ WARN_ON(PTR_ERR(p) != -EPERM);
+ }
+ return;
+ }
+
+ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
+
+ /*
+ * It is possible that the packet has been transmitted, but the state
+ * has not been updated from "transmitting" to "transmitted" yet.
+ * In that case, we need to wait for this transition to occur in order
+ * to determine between success or failure.
+ *
+ * On transmission failure, the packet will be locked after this call.
+ * On success, the transmitted bit will be set.
+ */
+ ssh_ptl_wait_until_transmitted(p);
+
+ /*
+ * The packet will already be locked in case of a transmission error or
+ * cancellation. Let the transmitter or cancellation issuer complete the
+ * packet.
+ */
+ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state)))
+ ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
+
+ ssh_packet_put(p);
+ return;
+ }
+
+ ssh_ptl_remove_and_complete(p, 0);
+ ssh_packet_put(p);
+
+ if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+/**
+ * ssh_ptl_submit() - Submit a packet to the transport layer.
+ * @ptl: The packet transport layer to submit the packet to.
+ * @p: The packet to submit.
+ *
+ * Submits a new packet to the transport layer, queuing it to be sent. This
+ * function should not be used for re-submission.
+ *
+ * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
+ * the packet has been canceled prior to submission, %-EALREADY if the packet
+ * has already been submitted, or %-ESHUTDOWN if the packet transport layer
+ * has been shut down.
+ */
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
+{
+ struct ssh_ptl *ptl_old;
+ int status;
+
+ trace_ssam_packet_submit(p);
+
+ /* Validate packet fields. */
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
+ if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
+ return -EINVAL;
+ } else if (!p->data.ptr) {
+ return -EINVAL;
+ }
+
+ /*
+ * The ptl reference only gets set on or before the first submission.
+ * After the first submission, it has to be read-only.
+ *
+ * Note that ptl may already be set from upper-layer request
+ * submission, thus we cannot expect it to be NULL.
+ */
+ ptl_old = READ_ONCE(p->ptl);
+ if (!ptl_old)
+ WRITE_ONCE(p->ptl, ptl);
+ else if (WARN_ON(ptl_old != ptl))
+ return -EALREADY; /* Submitted on different PTL. */
+
+ status = ssh_ptl_queue_push(p);
+ if (status)
+ return status;
+
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) ||
+ (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
+ ssh_ptl_tx_wakeup_packet(ptl);
+
+ return 0;
+}
+
+/*
+ * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
+ * @packet: The packet to re-submit.
+ *
+ * Re-submits the given packet: Checks if it can be re-submitted and queues it
+ * if it can, resetting the packet timestamp in the process. Must be called
+ * with the pending lock held.
+ *
+ * Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
+ * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
+ * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
+ */
+static int __ssh_ptl_resubmit(struct ssh_packet *packet)
+{
+ int status;
+ u8 try;
+
+ lockdep_assert_held(&packet->ptl->pending.lock);
+
+ trace_ssam_packet_resubmit(packet);
+
+ spin_lock(&packet->ptl->queue.lock);
+
+ /* Check if the packet is out of tries. */
+ try = ssh_packet_priority_get_try(packet->priority);
+ if (try >= SSH_PTL_MAX_PACKET_TRIES) {
+ spin_unlock(&packet->ptl->queue.lock);
+ return -ECANCELED;
+ }
+
+ status = __ssh_ptl_queue_push(packet);
+ if (status) {
+ /*
+ * An error here indicates that the packet has either already
+ * been queued, been locked, or the transport layer is being
+ * shut down. In all cases: Ignore the error.
+ */
+ spin_unlock(&packet->ptl->queue.lock);
+ return status;
+ }
+
+ packet->timestamp = KTIME_MAX;
+
+ spin_unlock(&packet->ptl->queue.lock);
+ return 0;
+}
+
+static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+ bool resub = false;
+
+ /*
+ * Note: We deliberately do not remove/attempt to cancel and complete
+ * packets that are out of tires in this function. The packet will be
+ * eventually canceled and completed by the timeout. Removing the packet
+ * here could lead to overly eager cancellation if the packet has not
+ * been re-transmitted yet but the tries-counter already updated (i.e
+ * ssh_ptl_tx_next() removed the packet from the queue and updated the
+ * counter, but re-transmission for the last try has not actually
+ * started yet).
+ */
+
+ spin_lock(&ptl->pending.lock);
+
+ /* Re-queue all pending packets. */
+ list_for_each_entry(p, &ptl->pending.head, pending_node) {
+ /*
+ * Re-submission fails if the packet is out of tries, has been
+ * locked, is already queued, or the layer is being shut down.
+ * No need to re-schedule tx-thread in those cases.
+ */
+ if (!__ssh_ptl_resubmit(p))
+ resub = true;
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ if (resub)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+/**
+ * ssh_ptl_cancel() - Cancel a packet.
+ * @p: The packet to cancel.
+ *
+ * Cancels a packet. There are no guarantees on when completion and release
+ * callbacks will be called. This may occur during execution of this function
+ * or may occur at any point later.
+ *
+ * Note that it is not guaranteed that the packet will actually be canceled if
+ * the packet is concurrently completed by another process. The only guarantee
+ * of this function is that the packet will be completed (with success,
+ * failure, or cancellation) and released from the transport layer in a
+ * reasonable time-frame.
+ *
+ * May be called before the packet has been submitted, in which case any later
+ * packet submission fails.
+ */
+void ssh_ptl_cancel(struct ssh_packet *p)
+{
+ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
+ return;
+
+ trace_ssam_packet_cancel(p);
+
+ /*
+ * Lock packet and commit with memory barrier. If this packet has
+ * already been locked, it's going to be removed and completed by
+ * another party, which should have precedence.
+ */
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ return;
+
+ /*
+ * By marking the packet as locked and employing the implicit memory
+ * barrier of test_and_set_bit, we have guaranteed that, at this point,
+ * the packet cannot be added to the queue any more.
+ *
+ * In case the packet has never been submitted, packet->ptl is NULL. If
+ * the packet is currently being submitted, packet->ptl may be NULL or
+ * non-NULL. Due marking the packet as locked above and committing with
+ * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
+ * the packet will never be added to the queue. If packet->ptl is
+ * non-NULL, we don't have any guarantees.
+ */
+
+ if (READ_ONCE(p->ptl)) {
+ ssh_ptl_remove_and_complete(p, -ECANCELED);
+
+ if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
+ ssh_ptl_tx_wakeup_packet(p->ptl);
+
+ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ __ssh_ptl_complete(p, -ECANCELED);
+ }
+}
+
+/* Must be called with pending lock held */
+static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
+{
+ lockdep_assert_held(&p->ptl->pending.lock);
+
+ if (p->timestamp != KTIME_MAX)
+ return ktime_add(p->timestamp, timeout);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_ptl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
+ struct ssh_packet *p, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = ptl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+ bool resub = false;
+ int status;
+
+ trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * packets to avoid lost-update type problems.
+ */
+ spin_lock(&ptl->rtx_timeout.lock);
+ ptl->rtx_timeout.expires = KTIME_MAX;
+ spin_unlock(&ptl->rtx_timeout.lock);
+
+ spin_lock(&ptl->pending.lock);
+
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ ktime_t expires = ssh_packet_get_expiration(p, timeout);
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ trace_ssam_packet_timeout(p);
+
+ status = __ssh_ptl_resubmit(p);
+
+ /*
+ * Re-submission fails if the packet is out of tries, has been
+ * locked, is already queued, or the layer is being shut down.
+ * No need to re-schedule tx-thread in those cases.
+ */
+ if (!status)
+ resub = true;
+
+ /* Go to next packet if this packet is not out of tries. */
+ if (status != -ECANCELED)
+ continue;
+
+ /* No more tries left: Cancel the packet. */
+
+ /*
+ * If someone else has locked the packet already, don't use it
+ * and let the other party complete it.
+ */
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending list again after we've removed it here.
+ * We can therefore re-use the pending_node of this packet
+ * temporarily.
+ */
+
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_del(&p->pending_node);
+
+ list_add_tail(&p->pending_node, &claimed);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ /* Cancel and complete the packet. */
+ list_for_each_entry_safe(p, n, &claimed, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ ssh_ptl_queue_remove(p);
+ __ssh_ptl_complete(p, -ETIMEDOUT);
+ }
+
+ /*
+ * Drop the reference we've obtained by removing it from
+ * the pending set.
+ */
+ list_del(&p->pending_node);
+ ssh_packet_put(p);
+ }
+
+ /* Ensure that reaper doesn't run again immediately. */
+ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_ptl_timeout_reaper_mod(ptl, now, next);
+
+ if (resub)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
+{
+ int i;
+
+ /*
+ * Check if SEQ has been seen recently (i.e. packet was
+ * re-transmitted and we should ignore it).
+ */
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
+ if (likely(ptl->rx.blocked.seqs[i] != seq))
+ continue;
+
+ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
+ return true;
+ }
+
+ /* Update list of blocked sequence IDs. */
+ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
+ ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
+ % ARRAY_SIZE(ptl->rx.blocked.seqs);
+
+ return false;
+}
+
+static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
+ const struct ssh_frame *frame,
+ const struct ssam_span *payload)
+{
+ if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
+ return;
+
+ ptl->ops.data_received(ptl, payload);
+}
+
+static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
+ return;
+ }
+
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
+ &ssh_ptl_ctrl_packet_ops);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_ack(&msgb, seq);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
+ return;
+ }
+
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
+ &ssh_ptl_ctrl_packet_ops);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_nak(&msgb);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+{
+ struct ssh_frame *frame;
+ struct ssam_span payload;
+ struct ssam_span aligned;
+ bool syn_found;
+ int status;
+
+ /* Error injection: Modify data to simulate corrupt SYN bytes. */
+ ssh_ptl_rx_inject_invalid_syn(ptl, source);
+
+ /* Find SYN. */
+ syn_found = sshp_find_syn(source, &aligned);
+
+ if (unlikely(aligned.ptr != source->ptr)) {
+ /*
+ * We expect aligned.ptr == source->ptr. If this is not the
+ * case, then aligned.ptr > source->ptr and we've encountered
+ * some unexpected data where we'd expect the start of a new
+ * message (i.e. the SYN sequence).
+ *
+ * This can happen when a CRC check for the previous message
+ * failed and we start actively searching for the next one
+ * (via the call to sshp_find_syn() above), or the first bytes
+ * of a message got dropped or corrupted.
+ *
+ * In any case, we issue a warning, send a NAK to the EC to
+ * request re-transmission of any data we haven't acknowledged
+ * yet, and finally, skip everything up to the next SYN
+ * sequence.
+ */
+
+ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
+
+ /*
+ * Notes:
+ * - This might send multiple NAKs in case the communication
+ * starts with an invalid SYN and is broken down into multiple
+ * pieces. This should generally be handled fine, we just
+ * might receive duplicate data in this case, which is
+ * detected when handling data frames.
+ * - This path will also be executed on invalid CRCs: When an
+ * invalid CRC is encountered, the code below will skip data
+ * until directly after the SYN. This causes the search for
+ * the next SYN, which is generally not placed directly after
+ * the last one.
+ *
+ * Open question: Should we send this in case of invalid
+ * payload CRCs if the frame-type is non-sequential (current
+ * implementation) or should we drop that frame without
+ * telling the EC?
+ */
+ ssh_ptl_send_nak(ptl);
+ }
+
+ if (unlikely(!syn_found))
+ return aligned.ptr - source->ptr;
+
+ /* Error injection: Modify data to simulate corruption. */
+ ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
+
+ /* Parse and validate frame. */
+ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
+ SSH_PTL_RX_BUF_LEN);
+ if (status) /* Invalid frame: skip to next SYN. */
+ return aligned.ptr - source->ptr + sizeof(u16);
+ if (!frame) /* Not enough data. */
+ return aligned.ptr - source->ptr;
+
+ trace_ssam_rx_frame_received(frame);
+
+ switch (frame->type) {
+ case SSH_FRAME_TYPE_ACK:
+ ssh_ptl_acknowledge(ptl, frame->seq);
+ break;
+
+ case SSH_FRAME_TYPE_NAK:
+ ssh_ptl_resubmit_pending(ptl);
+ break;
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ ssh_ptl_send_ack(ptl, frame->seq);
+ fallthrough;
+
+ case SSH_FRAME_TYPE_DATA_NSQ:
+ ssh_ptl_rx_dataframe(ptl, frame, &payload);
+ break;
+
+ default:
+ ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n",
+ frame->type);
+ break;
+ }
+
+ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len);
+}
+
+static int ssh_ptl_rx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (true) {
+ struct ssam_span span;
+ size_t offs = 0;
+ size_t n;
+
+ wait_event_interruptible(ptl->rx.wq,
+ !kfifo_is_empty(&ptl->rx.fifo) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Copy from fifo to evaluation buffer. */
+ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
+
+ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
+ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ ptl->rx.buf.ptr + ptl->rx.buf.len - n,
+ n, false);
+
+ /* Parse until we need more bytes or buffer is empty. */
+ while (offs < ptl->rx.buf.len) {
+ sshp_buf_span_from(&ptl->rx.buf, offs, &span);
+ n = ssh_ptl_rx_eval(ptl, &span);
+ if (n == 0)
+ break; /* Need more bytes. */
+
+ offs += n;
+ }
+
+ /* Throw away the evaluated parts. */
+ sshp_buf_drop(&ptl->rx.buf, offs);
+ }
+
+ return 0;
+}
+
+static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
+{
+ wake_up(&ptl->rx.wq);
+}
+
+/**
+ * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_rx_start(struct ssh_ptl *ptl)
+{
+ if (ptl->rx.thread)
+ return 0;
+
+ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
+ "ssam_serial_hub-rx");
+ if (IS_ERR(ptl->rx.thread))
+ return PTR_ERR(ptl->rx.thread);
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (ptl->rx.thread) {
+ status = kthread_stop(ptl->rx.thread);
+ ptl->rx.thread = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
+ * layer.
+ * @ptl: The packet transport layer.
+ * @buf: Pointer to the data to push to the layer.
+ * @n: Size of the data to push to the layer, in bytes.
+ *
+ * Pushes data from a lower-layer transport to the receiver fifo buffer of the
+ * packet layer and notifies the receiver thread. Calls to this function are
+ * ignored once the packet layer has been shut down.
+ *
+ * Return: Returns the number of bytes transferred (positive or zero) on
+ * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
+ */
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
+{
+ int used;
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return -ESHUTDOWN;
+
+ used = kfifo_in(&ptl->rx.fifo, buf, n);
+ if (used)
+ ssh_ptl_rx_wakeup(ptl);
+
+ return used;
+}
+
+/**
+ * ssh_ptl_shutdown() - Shut down the packet transport layer.
+ * @ptl: The packet transport layer.
+ *
+ * Shuts down the packet transport layer, removing and canceling all queued
+ * and pending packets. Packets canceled by this operation will be completed
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
+ * stopped.
+ *
+ * As a result of this function, the transport layer will be marked as shut
+ * down. Submission of packets after the transport layer has been shut down
+ * will fail with %-ESHUTDOWN.
+ */
+void ssh_ptl_shutdown(struct ssh_ptl *ptl)
+{
+ LIST_HEAD(complete_q);
+ LIST_HEAD(complete_p);
+ struct ssh_packet *p, *n;
+ int status;
+
+ /* Ensure that no new packets (including ACK/NAK) can be submitted. */
+ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_ptl_queue_push(),
+ * this guarantees that no new packets can be added and all already
+ * queued packets are properly canceled. In combination with the check
+ * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
+ * properly cut off.
+ */
+ smp_mb__after_atomic();
+
+ status = ssh_ptl_rx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop receiver thread\n");
+
+ status = ssh_ptl_tx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
+
+ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
+
+ /*
+ * At this point, all threads have been stopped. This means that the
+ * only references to packets from inside the system are in the queue
+ * and pending set.
+ *
+ * Note: We still need locks here because someone could still be
+ * canceling packets.
+ *
+ * Note 2: We can re-use queue_node (or pending_node) if we mark the
+ * packet as locked an then remove it from the queue (or pending set
+ * respectively). Marking the packet as locked avoids re-queuing
+ * (which should already be prevented by having stopped the treads...)
+ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
+ * new list via other threads (e.g. cancellation).
+ *
+ * Note 3: There may be overlap between complete_p and complete_q.
+ * This is handled via test_and_set_bit() on the "completed" flag
+ * (also handles cancellation).
+ */
+
+ /* Mark queued packets as locked and move them to complete_q. */
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ /* Ensure that state does not get zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ list_del(&p->queue_node);
+ list_add_tail(&p->queue_node, &complete_q);
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ /* Mark pending packets as locked and move them to complete_p. */
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ /* Ensure that state does not get zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ list_del(&p->pending_node);
+ list_add_tail(&p->pending_node, &complete_q);
+ }
+ atomic_set(&ptl->pending.count, 0);
+ spin_unlock(&ptl->pending.lock);
+
+ /* Complete and drop packets on complete_q. */
+ list_for_each_entry(p, &complete_q, queue_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ /* Complete and drop packets on complete_p. */
+ list_for_each_entry(p, &complete_p, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ /*
+ * At this point we have guaranteed that the system doesn't reference
+ * any packets any more.
+ */
+}
+
+/**
+ * ssh_ptl_init() - Initialize packet transport layer.
+ * @ptl: The packet transport layer to initialize.
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
+ * @ops: Packet layer operations.
+ *
+ * Initializes the given packet transport layer. Transmitter and receiver
+ * threads must be started separately via ssh_ptl_tx_start() and
+ * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
+ * lower-level transport layer has been set up.
+ *
+ * Return: Returns zero on success and a nonzero error code on failure.
+ */
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops)
+{
+ int i, status;
+
+ ptl->serdev = serdev;
+ ptl->state = 0;
+
+ spin_lock_init(&ptl->queue.lock);
+ INIT_LIST_HEAD(&ptl->queue.head);
+
+ spin_lock_init(&ptl->pending.lock);
+ INIT_LIST_HEAD(&ptl->pending.head);
+ atomic_set_release(&ptl->pending.count, 0);
+
+ ptl->tx.thread = NULL;
+ atomic_set(&ptl->tx.running, 0);
+ init_completion(&ptl->tx.thread_cplt_pkt);
+ init_completion(&ptl->tx.thread_cplt_tx);
+ init_waitqueue_head(&ptl->tx.packet_wq);
+
+ ptl->rx.thread = NULL;
+ init_waitqueue_head(&ptl->rx.wq);
+
+ spin_lock_init(&ptl->rtx_timeout.lock);
+ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
+ ptl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
+
+ ptl->ops = *ops;
+
+ /* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
+ ptl->rx.blocked.seqs[i] = U16_MAX;
+ ptl->rx.blocked.offset = 0;
+
+ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
+ if (status)
+ return status;
+
+ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
+ if (status)
+ kfifo_free(&ptl->rx.fifo);
+
+ return status;
+}
+
+/**
+ * ssh_ptl_destroy() - Deinitialize packet transport layer.
+ * @ptl: The packet transport layer to deinitialize.
+ *
+ * Deinitializes the given packet transport layer and frees resources
+ * associated with it. If receiver and/or transmitter threads have been
+ * started, the layer must first be shut down via ssh_ptl_shutdown() before
+ * this function can be called.
+ */
+void ssh_ptl_destroy(struct ssh_ptl *ptl)
+{
+ kfifo_free(&ptl->rx.fifo);
+ sshp_buf_free(&ptl->rx.buf);
+}
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
new file mode 100644
index 000000000000..e8757d03f279
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH packet transport layer.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
+#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include "ssh_parser.h"
+
+/**
+ * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl.
+ *
+ * @SSH_PTL_SF_SHUTDOWN_BIT:
+ * Indicates that the packet transport layer has been shut down or is
+ * being shut down and should not accept any new packets/data.
+ */
+enum ssh_ptl_state_flags {
+ SSH_PTL_SF_SHUTDOWN_BIT,
+};
+
+/**
+ * struct ssh_ptl_ops - Callback operations for packet transport layer.
+ * @data_received: Function called when a data-packet has been received. Both,
+ * the packet layer on which the packet has been received and
+ * the packet's payload data are provided to this function.
+ */
+struct ssh_ptl_ops {
+ void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
+};
+
+/**
+ * struct ssh_ptl - SSH packet transport layer.
+ * @serdev: Serial device providing the underlying data transport.
+ * @state: State(-flags) of the transport layer.
+ * @queue: Packet submission queue.
+ * @queue.lock: Lock for modifying the packet submission queue.
+ * @queue.head: List-head of the packet submission queue.
+ * @pending: Set/list of pending packets.
+ * @pending.lock: Lock for modifying the pending set.
+ * @pending.head: List-head of the pending set/list.
+ * @pending.count: Number of currently pending packets.
+ * @tx: Transmitter subsystem.
+ * @tx.running: Flag indicating (desired) transmitter thread state.
+ * @tx.thread: Transmitter thread.
+ * @tx.thread_cplt_tx: Completion for transmitter thread waiting on transfer.
+ * @tx.thread_cplt_pkt: Completion for transmitter thread waiting on packets.
+ * @tx.packet_wq: Waitqueue-head for packet transmit completion.
+ * @rx: Receiver subsystem.
+ * @rx.thread: Receiver thread.
+ * @rx.wq: Waitqueue-head for receiver thread.
+ * @rx.fifo: Buffer for receiving data/pushing data to receiver thread.
+ * @rx.buf: Buffer for evaluating data on receiver thread.
+ * @rx.blocked: List of recent/blocked sequence IDs to detect retransmission.
+ * @rx.blocked.seqs: Array of blocked sequence IDs.
+ * @rx.blocked.offset: Offset indicating where a new ID should be inserted.
+ * @rtx_timeout: Retransmission timeout subsystem.
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
+ * @ops: Packet layer operations.
+ */
+struct ssh_ptl {
+ struct serdev_device *serdev;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ atomic_t running;
+ struct task_struct *thread;
+ struct completion thread_cplt_tx;
+ struct completion thread_cplt_pkt;
+ struct wait_queue_head packet_wq;
+ } tx;
+
+ struct {
+ struct task_struct *thread;
+ struct wait_queue_head wq;
+ struct kfifo fifo;
+ struct sshp_buf buf;
+
+ struct {
+ u16 seqs[8];
+ u16 offset;
+ } blocked;
+ } rx;
+
+ struct {
+ spinlock_t lock;
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_ptl_ops ops;
+};
+
+#define __ssam_prcond(func, p, fmt, ...) \
+ do { \
+ typeof(p) __p = (p); \
+ \
+ if (__p) \
+ func(__p, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
+
+#define to_ssh_ptl(ptr, member) \
+ container_of(ptr, struct ssh_ptl, member)
+
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops);
+
+void ssh_ptl_destroy(struct ssh_ptl *ptl);
+
+/**
+ * ssh_ptl_get_device() - Get device associated with packet transport layer.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns the device on which the given packet transport layer builds
+ * upon.
+ */
+static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
+{
+ return ptl->serdev ? &ptl->serdev->dev : NULL;
+}
+
+int ssh_ptl_tx_start(struct ssh_ptl *ptl);
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl);
+int ssh_ptl_rx_start(struct ssh_ptl *ptl);
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl);
+void ssh_ptl_shutdown(struct ssh_ptl *ptl);
+
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
+void ssh_ptl_cancel(struct ssh_packet *p);
+
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
+
+/**
+ * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
+ * transfer.
+ * @ptl: The packet transport layer.
+ *
+ * Wakes up the packet transmitter thread, notifying it that the underlying
+ * transport has more space for data to be transmitted. If the packet
+ * transport layer has been shut down, calls to this function will be ignored.
+ */
+static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl)
+{
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return;
+
+ complete(&ptl->tx.thread_cplt_tx);
+}
+
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ u8 priority, const struct ssh_packet_ops *ops);
+
+int ssh_ctrl_packet_cache_init(void);
+void ssh_ctrl_packet_cache_destroy(void);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
new file mode 100644
index 000000000000..e2dead8de94a
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH message parser.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include "ssh_parser.h"
+
+/**
+ * sshp_validate_crc() - Validate a CRC in raw message data.
+ * @src: The span of data over which the CRC should be computed.
+ * @crc: The pointer to the expected u16 CRC value.
+ *
+ * Computes the CRC of the provided data span (@src), compares it to the CRC
+ * stored at the given address (@crc), and returns the result of this
+ * comparison, i.e. %true if equal. This function is intended to run on raw
+ * input/message data.
+ *
+ * Return: Returns %true if the computed CRC matches the stored CRC, %false
+ * otherwise.
+ */
+static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
+{
+ u16 actual = ssh_crc(src->ptr, src->len);
+ u16 expected = get_unaligned_le16(crc);
+
+ return actual == expected;
+}
+
+/**
+ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
+ * @src: The data span to check the start of.
+ */
+static bool sshp_starts_with_syn(const struct ssam_span *src)
+{
+ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
+}
+
+/**
+ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
+ * @src: The data span to search in.
+ * @rem: The span (output) indicating the remaining data, starting with SSH
+ * SYN bytes, if found.
+ *
+ * Search for SSH SYN bytes in the given source span. If found, set the @rem
+ * span to the remaining data, starting with the first SYN bytes and capped by
+ * the source span length, and return %true. This function does not copy any
+ * data, but rather only sets pointers to the respective start addresses and
+ * length values.
+ *
+ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
+ * span at the end of the source span and return %false.
+ *
+ * If partial SSH SYN bytes could be found at the end of the source span, set
+ * the @rem span to cover these partial SYN bytes, capped by the end of the
+ * source span, and return %false. This function should then be re-run once
+ * more data is available.
+ *
+ * Return: Returns %true if a complete SSH SYN sequence could be found,
+ * %false otherwise.
+ */
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
+{
+ size_t i;
+
+ for (i = 0; i < src->len - 1; i++) {
+ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
+ rem->ptr = src->ptr + i;
+ rem->len = src->len - i;
+ return true;
+ }
+ }
+
+ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
+ rem->ptr = src->ptr + src->len - 1;
+ rem->len = 1;
+ return false;
+ }
+
+ rem->ptr = src->ptr + src->len;
+ rem->len = 0;
+ return false;
+}
+
+/**
+ * sshp_parse_frame() - Parse SSH frame.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @frame: The parsed frame (output).
+ * @payload: The parsed payload (output).
+ * @maxlen: The maximum supported message length.
+ *
+ * Parses and validates a SSH frame, including its payload, from the given
+ * source. Sets the provided @frame pointer to the start of the frame and
+ * writes the limits of the frame payload to the provided @payload span
+ * pointer.
+ *
+ * This function does not copy any data, but rather only validates the message
+ * data and sets pointers (and length values) to indicate the respective parts.
+ *
+ * If no complete SSH frame could be found, the frame pointer will be set to
+ * the %NULL pointer and the payload span will be set to the null span (start
+ * pointer %NULL, size zero).
+ *
+ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
+ * the start of the message is invalid, %-EBADMSG if any (frame-header or
+ * payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than
+ * the maximum message length specified in the @maxlen parameter.
+ */
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen)
+{
+ struct ssam_span sf;
+ struct ssam_span sp;
+
+ /* Initialize output. */
+ *frame = NULL;
+ payload->ptr = NULL;
+ payload->len = 0;
+
+ if (!sshp_starts_with_syn(source)) {
+ dev_warn(dev, "rx: parser: invalid start of frame\n");
+ return -ENOMSG;
+ }
+
+ /* Check for minimum packet length. */
+ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
+ dev_dbg(dev, "rx: parser: not enough data for frame\n");
+ return 0;
+ }
+
+ /* Pin down frame. */
+ sf.ptr = source->ptr + sizeof(u16);
+ sf.len = sizeof(struct ssh_frame);
+
+ /* Validate frame CRC. */
+ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
+ dev_warn(dev, "rx: parser: invalid frame CRC\n");
+ return -EBADMSG;
+ }
+
+ /* Ensure packet does not exceed maximum length. */
+ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
+ if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) {
+ dev_warn(dev, "rx: parser: frame too large: %llu bytes\n",
+ SSH_MESSAGE_LENGTH(sp.len));
+ return -EMSGSIZE;
+ }
+
+ /* Pin down payload. */
+ sp.ptr = sf.ptr + sf.len + sizeof(u16);
+
+ /* Check for frame + payload length. */
+ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
+ dev_dbg(dev, "rx: parser: not enough data for payload\n");
+ return 0;
+ }
+
+ /* Validate payload CRC. */
+ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
+ dev_warn(dev, "rx: parser: invalid payload CRC\n");
+ return -EBADMSG;
+ }
+
+ *frame = (struct ssh_frame *)sf.ptr;
+ *payload = sp;
+
+ dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n",
+ (*frame)->type, (*frame)->len);
+
+ return 0;
+}
+
+/**
+ * sshp_parse_command() - Parse SSH command frame payload.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @command: The parsed command (output).
+ * @command_data: The parsed command data/payload (output).
+ *
+ * Parses and validates a SSH command frame payload. Sets the @command pointer
+ * to the command header and the @command_data span to the command data (i.e.
+ * payload of the command). This will result in a zero-length span if the
+ * command does not have any associated data/payload. This function does not
+ * check the frame-payload-type field, which should be checked by the caller
+ * before calling this function.
+ *
+ * The @source parameter should be the complete frame payload, e.g. returned
+ * by the sshp_parse_frame() command.
+ *
+ * This function does not copy any data, but rather only validates the frame
+ * payload data and sets pointers (and length values) to indicate the
+ * respective parts.
+ *
+ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
+ * valid command-type frame payload, i.e. is too short.
+ */
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data)
+{
+ /* Check for minimum length. */
+ if (unlikely(source->len < sizeof(struct ssh_command))) {
+ *command = NULL;
+ command_data->ptr = NULL;
+ command_data->len = 0;
+
+ dev_err(dev, "rx: parser: command payload is too short\n");
+ return -ENOMSG;
+ }
+
+ *command = (struct ssh_command *)source->ptr;
+ command_data->ptr = source->ptr + sizeof(struct ssh_command);
+ command_data->len = source->len - sizeof(struct ssh_command);
+
+ dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n",
+ (*command)->tc, (*command)->cid);
+
+ return 0;
+}
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
new file mode 100644
index 000000000000..63c38d350988
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH message parser.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
+#define _SURFACE_AGGREGATOR_SSH_PARSER_H
+
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+/**
+ * struct sshp_buf - Parser buffer for SSH messages.
+ * @ptr: Pointer to the beginning of the buffer.
+ * @len: Number of bytes used in the buffer.
+ * @cap: Maximum capacity of the buffer.
+ */
+struct sshp_buf {
+ u8 *ptr;
+ size_t len;
+ size_t cap;
+};
+
+/**
+ * sshp_buf_init() - Initialize a SSH parser buffer.
+ * @buf: The buffer to initialize.
+ * @ptr: The memory backing the buffer.
+ * @cap: The length of the memory backing the buffer, i.e. its capacity.
+ *
+ * Initializes the buffer with the given memory as backing and set its used
+ * length to zero.
+ */
+static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
+{
+ buf->ptr = ptr;
+ buf->len = 0;
+ buf->cap = cap;
+}
+
+/**
+ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
+ * @buf: The buffer to initialize/allocate to.
+ * @cap: The desired capacity of the buffer.
+ * @flags: The flags used for allocating the memory.
+ *
+ * Allocates @cap bytes and initializes the provided buffer struct with the
+ * allocated memory.
+ *
+ * Return: Returns zero on success and %-ENOMEM if allocation failed.
+ */
+static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
+{
+ u8 *ptr;
+
+ ptr = kzalloc(cap, flags);
+ if (!ptr)
+ return -ENOMEM;
+
+ sshp_buf_init(buf, ptr, cap);
+ return 0;
+}
+
+/**
+ * sshp_buf_free() - Free a SSH parser buffer.
+ * @buf: The buffer to free.
+ *
+ * Frees a SSH parser buffer by freeing the memory backing it and then
+ * resetting its pointer to %NULL and length and capacity to zero. Intended to
+ * free a buffer previously allocated with sshp_buf_alloc().
+ */
+static inline void sshp_buf_free(struct sshp_buf *buf)
+{
+ kfree(buf->ptr);
+ buf->ptr = NULL;
+ buf->len = 0;
+ buf->cap = 0;
+}
+
+/**
+ * sshp_buf_drop() - Drop data from the beginning of the buffer.
+ * @buf: The buffer to drop data from.
+ * @n: The number of bytes to drop.
+ *
+ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
+ * the beginning of the buffer.
+ */
+static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
+{
+ memmove(buf->ptr, buf->ptr + n, buf->len - n);
+ buf->len -= n;
+}
+
+/**
+ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
+ * @buf: The buffer to write the data into.
+ * @fifo: The fifo to read the data from.
+ *
+ * Transfers the data contained in the fifo to the buffer, removing it from
+ * the fifo. This function will try to transfer as much data as possible,
+ * limited either by the remaining space in the buffer or by the number of
+ * bytes available in the fifo.
+ *
+ * Return: Returns the number of bytes transferred.
+ */
+static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
+ struct kfifo *fifo)
+{
+ size_t n;
+
+ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
+ buf->len += n;
+
+ return n;
+}
+
+/**
+ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
+ * @buf: The buffer to create the span from.
+ * @offset: The offset in the buffer at which the span should start.
+ * @span: The span to initialize (output).
+ *
+ * Initializes the provided span to point to the memory at the given offset in
+ * the buffer, with the length of the span being capped by the number of bytes
+ * used in the buffer after the offset (i.e. bytes remaining after the
+ * offset).
+ *
+ * Warning: This function does not validate that @offset is less than or equal
+ * to the number of bytes used in the buffer or the buffer capacity. This must
+ * be guaranteed by the caller.
+ */
+static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
+ struct ssam_span *span)
+{
+ span->ptr = buf->ptr + offset;
+ span->len = buf->len - offset;
+}
+
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
+
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen);
+
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
new file mode 100644
index 000000000000..52a83a8fcf82
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -0,0 +1,1263 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH request transport layer.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/error-injection.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include <linux/surface_aggregator/controller.h>
+
+#include "ssh_packet_layer.h"
+#include "ssh_request_layer.h"
+
+#include "trace.h"
+
+/*
+ * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
+ *
+ * Timeout as ktime_t delta for request responses. If we have not received a
+ * response in this time-frame after finishing the underlying packet
+ * transmission, the request will be completed with %-ETIMEDOUT as status
+ * code.
+ */
+#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
+
+/*
+ * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
+ *
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
+ * direct re-scheduling of reaper work_struct.
+ */
+#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+/*
+ * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
+ *
+ * Maximum number of requests concurrently waiting to be completed (i.e.
+ * waiting for the corresponding packet transmission to finish if they don't
+ * have a response or waiting for a response if they have one).
+ */
+#define SSH_RTL_MAX_PENDING 3
+
+/*
+ * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
+ * Used to prevent livelocking of the workqueue. Value chosen via educated
+ * guess, may be adjusted.
+ */
+#define SSH_RTL_TX_BATCH 10
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
+
+/**
+ * ssh_rtl_should_drop_response() - Error injection hook to drop request
+ * responses.
+ *
+ * Useful to cause request transmission timeouts in the driver by dropping the
+ * response to a request.
+ */
+static noinline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
+
+#else
+
+static inline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+
+#endif
+
+static u16 ssh_request_get_rqid(struct ssh_request *rqst)
+{
+ return get_unaligned_le16(rqst->packet.data.ptr
+ + SSH_MSGOFFSET_COMMAND(rqid));
+}
+
+static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
+{
+ if (!rqst->packet.data.ptr)
+ return U32_MAX;
+
+ return ssh_request_get_rqid(rqst);
+}
+
+static void ssh_rtl_queue_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->queue.lock);
+
+ if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return;
+ }
+
+ list_del(&rqst->node);
+
+ spin_unlock(&rtl->queue.lock);
+ ssh_request_put(rqst);
+}
+
+static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
+{
+ bool empty;
+
+ spin_lock(&rtl->queue.lock);
+ empty = list_empty(&rtl->queue.head);
+ spin_unlock(&rtl->queue.lock);
+
+ return empty;
+}
+
+static void ssh_rtl_pending_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->pending.lock);
+
+ if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return;
+ }
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&rqst->node);
+
+ spin_unlock(&rtl->pending.lock);
+
+ ssh_request_put(rqst);
+}
+
+static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->pending.lock);
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EALREADY;
+ }
+
+ atomic_inc(&rtl->pending.count);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
+
+ spin_unlock(&rtl->pending.lock);
+ return 0;
+}
+
+static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, status);
+
+ /* rtl/ptl may not be set if we're canceling before submitting. */
+ rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
+ ssh_request_get_rqid_safe(rqst), status);
+
+ rqst->ops->complete(rqst, NULL, NULL, status);
+}
+
+static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, 0);
+
+ rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
+ ssh_request_get_rqid(rqst));
+
+ rqst->ops->complete(rqst, cmd, data, 0);
+}
+
+static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
+ return !atomic_read(&rtl->pending.count);
+
+ return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
+}
+
+static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst = ERR_PTR(-ENOENT);
+ struct ssh_request *p, *n;
+
+ spin_lock(&rtl->queue.lock);
+
+ /* Find first non-locked request and remove it. */
+ list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
+ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
+ continue;
+
+ if (!ssh_rtl_tx_can_process(p)) {
+ rqst = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ /* Remove from queue and mark as transmitting. */
+ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
+
+ list_del(&p->node);
+
+ rqst = p;
+ break;
+ }
+
+ spin_unlock(&rtl->queue.lock);
+ return rqst;
+}
+
+static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst;
+ int status;
+
+ /* Get and prepare next request for transmit. */
+ rqst = ssh_rtl_tx_next(rtl);
+ if (IS_ERR(rqst))
+ return PTR_ERR(rqst);
+
+ /* Add it to/mark it as pending. */
+ status = ssh_rtl_tx_pending_push(rqst);
+ if (status) {
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ /* Submit packet. */
+ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet has been refused due to the packet layer shutting
+ * down. Complete it here.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
+ /*
+ * Note: A barrier is not required here, as there are only two
+ * references in the system at this point: The one that we have,
+ * and the other one that belongs to the pending set. Due to the
+ * request being marked as "transmitting", our process is the
+ * only one allowed to remove the pending node and change the
+ * state. Normally, the task would fall to the packet callback,
+ * but as this is a path where submission failed, this callback
+ * will never be executed.
+ */
+
+ ssh_rtl_pending_remove(rqst);
+ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
+
+ ssh_request_put(rqst);
+ return -ESHUTDOWN;
+
+ } else if (status) {
+ /*
+ * If submitting the packet failed and the packet layer isn't
+ * shutting down, the packet has either been submitted/queued
+ * before (-EALREADY, which cannot happen as we have
+ * guaranteed that requests cannot be re-submitted), or the
+ * packet was marked as locked (-EINVAL). To mark the packet
+ * locked at this stage, the request, and thus the packets
+ * itself, had to have been canceled. Simply drop the
+ * reference. Cancellation itself will remove it from the set
+ * of pending requests.
+ */
+
+ WARN_ON(status != -EINVAL);
+
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ ssh_request_put(rqst);
+ return 0;
+}
+
+static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
+{
+ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
+ return false;
+
+ if (ssh_rtl_queue_empty(rtl))
+ return false;
+
+ return schedule_work(&rtl->tx.work);
+}
+
+static void ssh_rtl_tx_work_fn(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
+ unsigned int iterations = SSH_RTL_TX_BATCH;
+ int status;
+
+ /*
+ * Try to be nice and not block/live-lock the workqueue: Run a maximum
+ * of 10 tries, then re-submit if necessary. This should not be
+ * necessary for normal execution, but guarantee it anyway.
+ */
+ do {
+ status = ssh_rtl_tx_try_process_one(rtl);
+ if (status == -ENOENT || status == -EBUSY)
+ return; /* No more requests to process. */
+
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet system shutting down. No new packets can be
+ * transmitted. Return silently, the party initiating
+ * the shutdown should handle the rest.
+ */
+ return;
+ }
+
+ WARN_ON(status != 0 && status != -EAGAIN);
+ } while (--iterations);
+
+ /* Out of tries, reschedule. */
+ ssh_rtl_tx_schedule(rtl);
+}
+
+/**
+ * ssh_rtl_submit() - Submit a request to the transport layer.
+ * @rtl: The request transport layer.
+ * @rqst: The request to submit.
+ *
+ * Submits a request to the transport layer. A single request may not be
+ * submitted multiple times without reinitializing it.
+ *
+ * Return: Returns zero on success, %-EINVAL if the request type is invalid or
+ * the request has been canceled prior to submission, %-EALREADY if the
+ * request has already been submitted, or %-ESHUTDOWN in case the request
+ * transport layer has been shut down.
+ */
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
+{
+ trace_ssam_request_submit(rqst);
+
+ /*
+ * Ensure that requests expecting a response are sequenced. If this
+ * invariant ever changes, see the comment in ssh_rtl_complete() on what
+ * is required to be changed in the code.
+ */
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
+ return -EINVAL;
+
+ spin_lock(&rtl->queue.lock);
+
+ /*
+ * Try to set ptl and check if this request has already been submitted.
+ *
+ * Must be inside lock as we might run into a lost update problem
+ * otherwise: If this were outside of the lock, cancellation in
+ * ssh_rtl_cancel_nonpending() may run after we've set the ptl
+ * reference but before we enter the lock. In that case, we'd detect
+ * that the request is being added to the queue and would try to remove
+ * it from that, but removal might fail because it hasn't actually been
+ * added yet. By putting this cmpxchg in the critical section, we
+ * ensure that the queuing detection only triggers when we are already
+ * in the critical section and the remove process will wait until the
+ * push operation has been completed (via lock) due to that. Only then,
+ * we can safely try to remove it.
+ */
+ if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
+ spin_unlock(&rtl->queue.lock);
+ return -EALREADY;
+ }
+
+ /*
+ * Ensure that we set ptl reference before we continue modifying state.
+ * This is required for non-pending cancellation. This barrier is paired
+ * with the one in ssh_rtl_cancel_nonpending().
+ *
+ * By setting the ptl reference before we test for "locked", we can
+ * check if the "locked" test may have already run. See comments in
+ * ssh_rtl_cancel_nonpending() for more detail.
+ */
+ smp_mb__after_atomic();
+
+ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -ESHUTDOWN;
+ }
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -EINVAL;
+ }
+
+ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_rtl_tx_schedule(rtl);
+ return 0;
+}
+
+static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
+
+ spin_lock(&rtl->rtx_timeout.lock);
+
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
+ if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
+ rtl->rtx_timeout.expires = expires;
+ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
+ }
+
+ spin_unlock(&rtl->rtx_timeout.lock);
+}
+
+static void ssh_rtl_timeout_start(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ ktime_t timestamp = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
+ return;
+
+ /*
+ * Note: The timestamp gets set only once. This happens on the packet
+ * callback. All other access to it is read-only.
+ */
+ WRITE_ONCE(rqst->timestamp, timestamp);
+ /*
+ * Ensure timestamp is set before starting the reaper. Paired with
+ * implicit barrier following check on ssh_request_get_expiration() in
+ * ssh_rtl_timeout_reap.
+ */
+ smp_mb__after_atomic();
+
+ ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
+}
+
+static void ssh_rtl_complete(struct ssh_rtl *rtl,
+ const struct ssh_command *command,
+ const struct ssam_span *command_data)
+{
+ struct ssh_request *r = NULL;
+ struct ssh_request *p, *n;
+ u16 rqid = get_unaligned_le16(&command->rqid);
+
+ trace_ssam_rx_response_received(command, command_data->len);
+
+ /*
+ * Get request from pending based on request ID and mark it as response
+ * received and locked.
+ */
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
+ /* We generally expect requests to be processed in order. */
+ if (unlikely(ssh_request_get_rqid(p) != rqid))
+ continue;
+
+ /* Simulate response timeout. */
+ if (ssh_rtl_should_drop_response()) {
+ spin_unlock(&rtl->pending.lock);
+
+ trace_ssam_ei_rx_drop_response(p);
+ rtl_info(rtl, "request error injection: dropping response for request %p\n",
+ &p->packet);
+ return;
+ }
+
+ /*
+ * Mark as "response received" and "locked" as we're going to
+ * complete it.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
+ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&p->node);
+
+ r = p;
+ break;
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ if (!r) {
+ rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
+ rqid);
+ return;
+ }
+
+ /* If the request hasn't been completed yet, we will do this now. */
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
+ ssh_request_put(r);
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * Make sure the request has been transmitted. In case of a sequenced
+ * request, we are guaranteed that the completion callback will run on
+ * the receiver thread directly when the ACK for the packet has been
+ * received. Similarly, this function is guaranteed to run on the
+ * receiver thread. Thus we are guaranteed that if the packet has been
+ * successfully transmitted and received an ACK, the transmitted flag
+ * has been set and is visible here.
+ *
+ * We are currently not handling unsequenced packets here, as those
+ * should never expect a response as ensured in ssh_rtl_submit. If this
+ * ever changes, one would have to test for
+ *
+ * (r->state & (transmitting | transmitted))
+ *
+ * on unsequenced packets to determine if they could have been
+ * transmitted. There are no synchronization guarantees as in the
+ * sequenced case, since, in this case, the callback function will not
+ * run on the same thread. Thus an exact determination is impossible.
+ */
+ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
+ rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
+ rqid);
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. As
+ * we receive a "false" response, the packet might still be
+ * queued though.
+ */
+ ssh_rtl_queue_remove(r);
+
+ ssh_rtl_complete_with_status(r, -EREMOTEIO);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. The request
+ * can also not be queued any more, as it has been marked as
+ * transmitting and later transmitted. Thus no need to remove it from
+ * anywhere.
+ */
+
+ ssh_rtl_complete_with_rsp(r, command, command_data);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
+{
+ struct ssh_rtl *rtl;
+ unsigned long flags, fixed;
+ bool remove;
+
+ /*
+ * Handle unsubmitted request: Try to mark the packet as locked,
+ * expecting the state to be zero (i.e. unsubmitted). Note that, if
+ * setting the state worked, we might still be adding the packet to the
+ * queue in a currently executing submit call. In that case, however,
+ * ptl reference must have been set previously, as locked is checked
+ * after setting ptl. Furthermore, when the ptl reference is set, the
+ * submission process is guaranteed to have entered the critical
+ * section. Thus only if we successfully locked this request and ptl is
+ * NULL, we have successfully removed the request, i.e. we are
+ * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
+ * packet will never be added. Otherwise, we need to try and grab it
+ * from the queue, where we are now guaranteed that the packet is or has
+ * been due to the critical section.
+ *
+ * Note that if the cmpxchg() fails, we are guaranteed that ptl has
+ * been set and is non-NULL, as states can only be nonzero after this
+ * has been set. Also note that we need to fetch the static (type)
+ * flags to ensure that they don't cause the cmpxchg() to fail.
+ */
+ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
+ flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
+
+ /*
+ * Force correct ordering with regards to state and ptl reference access
+ * to safe-guard cancellation to concurrent submission against a
+ * lost-update problem. First try to exchange state, then also check
+ * ptl if that worked. This barrier is paired with the
+ * one in ssh_rtl_submit().
+ */
+ smp_mb__after_atomic();
+
+ if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ rtl = ssh_request_rtl(r);
+ spin_lock(&rtl->queue.lock);
+
+ /*
+ * Note: 1) Requests cannot be re-submitted. 2) If a request is
+ * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
+ * successfully remove the request here, we have removed all its
+ * occurrences in the system.
+ */
+
+ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+ if (!remove) {
+ spin_unlock(&rtl->queue.lock);
+ return false;
+ }
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ list_del(&r->node);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_request_put(r); /* Drop reference obtained from queue. */
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+}
+
+static bool ssh_rtl_cancel_pending(struct ssh_request *r)
+{
+ /* If the packet is already locked, it's going to be removed shortly. */
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ return true;
+
+ /*
+ * Now that we have locked the packet, we have guaranteed that it can't
+ * be added to the system any more. If ptl is NULL, the locked
+ * check in ssh_rtl_submit() has not been run and any submission,
+ * currently in progress or called later, won't add the packet. Thus we
+ * can directly complete it.
+ *
+ * The implicit memory barrier of test_and_set_bit() should be enough
+ * to ensure that the correct order (first lock, then check ptl) is
+ * ensured. This is paired with the barrier in ssh_rtl_submit().
+ */
+ if (!READ_ONCE(r->packet.ptl)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ /*
+ * Try to cancel the packet. If the packet has not been completed yet,
+ * this will subsequently (and synchronously) call the completion
+ * callback of the packet, which will complete the request.
+ */
+ ssh_ptl_cancel(&r->packet);
+
+ /*
+ * If the packet has been completed with success, i.e. has not been
+ * canceled by the above call, the request may not have been completed
+ * yet (may be waiting for a response). Check if we need to do this
+ * here.
+ */
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+
+ return true;
+}
+
+/**
+ * ssh_rtl_cancel() - Cancel request.
+ * @rqst: The request to cancel.
+ * @pending: Whether to also cancel pending requests.
+ *
+ * Cancels the given request. If @pending is %false, this will not cancel
+ * pending requests, i.e. requests that have already been submitted to the
+ * packet layer but not been completed yet. If @pending is %true, this will
+ * cancel the given request regardless of the state it is in.
+ *
+ * If the request has been canceled by calling this function, both completion
+ * and release callbacks of the request will be executed in a reasonable
+ * time-frame. This may happen during execution of this function, however,
+ * there is no guarantee for this. For example, a request currently
+ * transmitting will be canceled/completed only after transmission has
+ * completed, and the respective callbacks will be executed on the transmitter
+ * thread, which may happen during, but also some time after execution of the
+ * cancel function.
+ *
+ * Return: Returns %true if the given request has been canceled or completed,
+ * either by this function or prior to calling this function, %false
+ * otherwise. If @pending is %true, this function will always return %true.
+ */
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
+{
+ struct ssh_rtl *rtl;
+ bool canceled;
+
+ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
+ return true;
+
+ trace_ssam_request_cancel(rqst);
+
+ if (pending)
+ canceled = ssh_rtl_cancel_pending(rqst);
+ else
+ canceled = ssh_rtl_cancel_nonpending(rqst);
+
+ /* Note: rtl may be NULL if request has not been submitted yet. */
+ rtl = ssh_request_rtl(rqst);
+ if (canceled && rtl)
+ ssh_rtl_tx_schedule(rtl);
+
+ return canceled;
+}
+
+static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
+{
+ struct ssh_request *r = to_ssh_request(p);
+
+ if (unlikely(status)) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ /*
+ * The packet may get canceled even though it has not been
+ * submitted yet. The request may still be queued. Check the
+ * queue and remove it if necessary. As the timeout would have
+ * been started in this function on success, there's no need
+ * to cancel it here.
+ */
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, status);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+ return;
+ }
+
+ /* Update state: Mark as transmitted and clear transmitting. */
+ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
+
+ /* If we expect a response, we just need to start the timeout. */
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
+ /*
+ * Note: This is the only place where the timestamp gets set,
+ * all other access to it is read-only.
+ */
+ ssh_rtl_timeout_start(r);
+ return;
+ }
+
+ /*
+ * If we don't expect a response, lock, remove, and complete the
+ * request. Note that, at this point, the request is guaranteed to have
+ * left the queue and no timeout has been started. Thus we only need to
+ * remove it from pending. If the request has already been completed (it
+ * may have been canceled) return.
+ */
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, 0);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+}
+
+static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
+{
+ ktime_t timestamp = READ_ONCE(r->timestamp);
+
+ if (timestamp != KTIME_MAX)
+ return ktime_add(timestamp, timeout);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_rtl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+
+ trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * requests to avoid lost-update type problems.
+ */
+ spin_lock(&rtl->rtx_timeout.lock);
+ rtl->rtx_timeout.expires = KTIME_MAX;
+ spin_unlock(&rtl->rtx_timeout.lock);
+
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ ktime_t expires = ssh_request_get_expiration(r, timeout);
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ /* Avoid further transitions if locked. */
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending or queued lists again after we've
+ * removed it here. We can therefore re-use the node of this
+ * packet temporarily.
+ */
+
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&r->node);
+
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ /* Cancel and complete the request. */
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ trace_ssam_request_timeout(r);
+
+ /*
+ * At this point we've removed the packet from pending. This
+ * means that we've obtained the last (only) reference of the
+ * system to it. Thus we can just complete it.
+ */
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ETIMEDOUT);
+
+ /*
+ * Drop the reference we've obtained by removing it from the
+ * pending set.
+ */
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+
+ /* Ensure that the reaper doesn't run again immediately. */
+ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_rtl_timeout_reaper_mod(rtl, now, next);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ trace_ssam_rx_event_received(cmd, data->len);
+
+ rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
+ get_unaligned_le16(&cmd->rqid));
+
+ rtl->ops.handle_event(rtl, cmd, data);
+}
+
+static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
+ struct device *dev = &p->serdev->dev;
+ struct ssh_command *command;
+ struct ssam_span command_data;
+
+ if (sshp_parse_command(dev, data, &command, &command_data))
+ return;
+
+ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
+ ssh_rtl_rx_event(rtl, command, &command_data);
+ else
+ ssh_rtl_complete(rtl, command, &command_data);
+}
+
+static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ if (!data->len) {
+ ptl_err(p, "rtl: rx: no data frame payload\n");
+ return;
+ }
+
+ switch (data->ptr[0]) {
+ case SSH_PLD_TYPE_CMD:
+ ssh_rtl_rx_command(p, data);
+ break;
+
+ default:
+ ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
+ data->ptr[0]);
+ break;
+ }
+}
+
+static void ssh_rtl_packet_release(struct ssh_packet *p)
+{
+ struct ssh_request *rqst;
+
+ rqst = to_ssh_request(p);
+ rqst->ops->release(rqst);
+}
+
+static const struct ssh_packet_ops ssh_rtl_packet_ops = {
+ .complete = ssh_rtl_packet_callback,
+ .release = ssh_rtl_packet_release,
+};
+
+/**
+ * ssh_request_init() - Initialize SSH request.
+ * @rqst: The request to initialize.
+ * @flags: Request flags, determining the type of the request.
+ * @ops: Request operations.
+ *
+ * Initializes the given SSH request and underlying packet. Sets the message
+ * buffer pointer to %NULL and the message buffer length to zero. This buffer
+ * has to be set separately via ssh_request_set_data() before submission and
+ * must contain a valid SSH request message.
+ *
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
+ */
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops)
+{
+ unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
+
+ /* Unsequenced requests cannot have a response. */
+ if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
+ return -EINVAL;
+
+ if (!(flags & SSAM_REQUEST_UNSEQUENCED))
+ type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
+
+ ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
+ &ssh_rtl_packet_ops);
+
+ INIT_LIST_HEAD(&rqst->node);
+
+ rqst->state = 0;
+ if (flags & SSAM_REQUEST_HAS_RESPONSE)
+ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+ rqst->timestamp = KTIME_MAX;
+ rqst->ops = ops;
+
+ return 0;
+}
+
+/**
+ * ssh_rtl_init() - Initialize request transport layer.
+ * @rtl: The request transport layer to initialize.
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
+ * @ops: Request transport layer operations.
+ *
+ * Initializes the given request transport layer and associated packet
+ * transport layer. Transmitter and receiver threads must be started
+ * separately via ssh_rtl_start(), after the request-layer has been
+ * initialized and the lower-level serial device layer has been set up.
+ *
+ * Return: Returns zero on success and a nonzero error code on failure.
+ */
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops)
+{
+ struct ssh_ptl_ops ptl_ops;
+ int status;
+
+ ptl_ops.data_received = ssh_rtl_rx_data;
+
+ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
+ if (status)
+ return status;
+
+ spin_lock_init(&rtl->queue.lock);
+ INIT_LIST_HEAD(&rtl->queue.head);
+
+ spin_lock_init(&rtl->pending.lock);
+ INIT_LIST_HEAD(&rtl->pending.head);
+ atomic_set_release(&rtl->pending.count, 0);
+
+ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
+
+ spin_lock_init(&rtl->rtx_timeout.lock);
+ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
+ rtl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
+
+ rtl->ops = *ops;
+
+ return 0;
+}
+
+/**
+ * ssh_rtl_destroy() - Deinitialize request transport layer.
+ * @rtl: The request transport layer to deinitialize.
+ *
+ * Deinitializes the given request transport layer and frees resources
+ * associated with it. If receiver and/or transmitter threads have been
+ * started, the layer must first be shut down via ssh_rtl_shutdown() before
+ * this function can be called.
+ */
+void ssh_rtl_destroy(struct ssh_rtl *rtl)
+{
+ ssh_ptl_destroy(&rtl->ptl);
+}
+
+/**
+ * ssh_rtl_start() - Start request transmitter and receiver.
+ * @rtl: The request transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_rtl_start(struct ssh_rtl *rtl)
+{
+ int status;
+
+ status = ssh_ptl_tx_start(&rtl->ptl);
+ if (status)
+ return status;
+
+ ssh_rtl_tx_schedule(rtl);
+
+ status = ssh_ptl_rx_start(&rtl->ptl);
+ if (status) {
+ ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
+ ssh_ptl_tx_stop(&rtl->ptl);
+ return status;
+ }
+
+ return 0;
+}
+
+struct ssh_flush_request {
+ struct ssh_request base;
+ struct completion completion;
+ int status;
+};
+
+static void ssh_rtl_flush_request_complete(struct ssh_request *r,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data,
+ int status)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ rqst->status = status;
+}
+
+static void ssh_rtl_flush_request_release(struct ssh_request *r)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ complete_all(&rqst->completion);
+}
+
+static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
+ .complete = ssh_rtl_flush_request_complete,
+ .release = ssh_rtl_flush_request_release,
+};
+
+/**
+ * ssh_rtl_flush() - Flush the request transport layer.
+ * @rtl: request transport layer
+ * @timeout: timeout for the flush operation in jiffies
+ *
+ * Queue a special flush request and wait for its completion. This request
+ * will be completed after all other currently queued and pending requests
+ * have been completed. Instead of a normal data packet, this request submits
+ * a special flush packet, meaning that upon completion, also the underlying
+ * packet transport layer has been flushed.
+ *
+ * Flushing the request layer guarantees that all previously submitted
+ * requests have been fully completed before this call returns. Additionally,
+ * flushing blocks execution of all later submitted requests until the flush
+ * has been completed.
+ *
+ * If the caller ensures that no new requests are submitted after a call to
+ * this function, the request transport layer is guaranteed to have no
+ * remaining requests when this call returns. The same guarantee does not hold
+ * for the packet layer, on which control packets may still be queued after
+ * this call.
+ *
+ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
+ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
+ * and/or request transport layer has been shut down before this call. May
+ * also return %-EINTR if the underlying packet transmission has been
+ * interrupted.
+ */
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
+{
+ const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
+ struct ssh_flush_request rqst;
+ int status;
+
+ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
+ rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
+ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
+ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
+
+ init_completion(&rqst.completion);
+
+ status = ssh_rtl_submit(rtl, &rqst.base);
+ if (status)
+ return status;
+
+ ssh_request_put(&rqst.base);
+
+ if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
+ ssh_rtl_cancel(&rqst.base, true);
+ wait_for_completion(&rqst.completion);
+ }
+
+ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
+ rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
+
+ return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
+}
+
+/**
+ * ssh_rtl_shutdown() - Shut down request transport layer.
+ * @rtl: The request transport layer.
+ *
+ * Shuts down the request transport layer, removing and canceling all queued
+ * and pending requests. Requests canceled by this operation will be completed
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
+ * stopped, the lower-level packet layer will be shutdown.
+ *
+ * As a result of this function, the transport layer will be marked as shut
+ * down. Submission of requests after the transport layer has been shut down
+ * will fail with %-ESHUTDOWN.
+ */
+void ssh_rtl_shutdown(struct ssh_rtl *rtl)
+{
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ int pending;
+
+ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_rtl_submit(),
+ * this guarantees that no new requests can be added and all already
+ * queued requests are properly canceled.
+ */
+ smp_mb__after_atomic();
+
+ /* Remove requests from queue. */
+ spin_lock(&rtl->queue.lock);
+ list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+
+ list_del(&r->node);
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->queue.lock);
+
+ /*
+ * We have now guaranteed that the queue is empty and no more new
+ * requests can be submitted (i.e. it will stay empty). This means that
+ * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
+ * we can simply call cancel_work_sync() on tx.work here and when that
+ * returns, we've locked it down. This also means that after this call,
+ * we don't submit any more packets to the underlying packet layer, so
+ * we can also shut that down.
+ */
+
+ cancel_work_sync(&rtl->tx.work);
+ ssh_ptl_shutdown(&rtl->ptl);
+ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
+
+ /*
+ * Shutting down the packet layer should also have canceled all
+ * requests. Thus the pending set should be empty. Attempt to handle
+ * this gracefully anyways, even though this should be dead code.
+ */
+
+ pending = atomic_read(&rtl->pending.count);
+ if (WARN_ON(pending)) {
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ list_del(&r->node);
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+ }
+
+ /* Finally, cancel and complete the requests we claimed before. */
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ /*
+ * We need test_and_set() because we still might compete with
+ * cancellation.
+ */
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ESHUTDOWN);
+
+ /*
+ * Drop the reference we've obtained by removing it from the
+ * lists.
+ */
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+}
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
new file mode 100644
index 000000000000..cb35815858d1
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH request transport layer.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
+#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include <linux/surface_aggregator/controller.h>
+
+#include "ssh_packet_layer.h"
+
+/**
+ * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl.
+ *
+ * @SSH_RTL_SF_SHUTDOWN_BIT:
+ * Indicates that the request transport layer has been shut down or is
+ * being shut down and should not accept any new requests.
+ */
+enum ssh_rtl_state_flags {
+ SSH_RTL_SF_SHUTDOWN_BIT,
+};
+
+/**
+ * struct ssh_rtl_ops - Callback operations for request transport layer.
+ * @handle_event: Function called when a SSH event has been received. The
+ * specified function takes the request layer, received command
+ * struct, and corresponding payload as arguments. If the event
+ * has no payload, the payload span is empty (not %NULL).
+ */
+struct ssh_rtl_ops {
+ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data);
+};
+
+/**
+ * struct ssh_rtl - SSH request transport layer.
+ * @ptl: Underlying packet transport layer.
+ * @state: State(-flags) of the transport layer.
+ * @queue: Request submission queue.
+ * @queue.lock: Lock for modifying the request submission queue.
+ * @queue.head: List-head of the request submission queue.
+ * @pending: Set/list of pending requests.
+ * @pending.lock: Lock for modifying the request set.
+ * @pending.head: List-head of the pending set/list.
+ * @pending.count: Number of currently pending requests.
+ * @tx: Transmitter subsystem.
+ * @tx.work: Transmitter work item.
+ * @rtx_timeout: Retransmission timeout subsystem.
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
+ * @ops: Request layer operations.
+ */
+struct ssh_rtl {
+ struct ssh_ptl ptl;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ struct work_struct work;
+ } tx;
+
+ struct {
+ spinlock_t lock;
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_rtl_ops ops;
+};
+
+#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
+
+#define to_ssh_rtl(ptr, member) \
+ container_of(ptr, struct ssh_rtl, member)
+
+/**
+ * ssh_rtl_get_device() - Get device associated with request transport layer.
+ * @rtl: The request transport layer.
+ *
+ * Return: Returns the device on which the given request transport layer
+ * builds upon.
+ */
+static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
+{
+ return ssh_ptl_get_device(&rtl->ptl);
+}
+
+/**
+ * ssh_request_rtl() - Get request transport layer associated with request.
+ * @rqst: The request to get the request transport layer reference for.
+ *
+ * Return: Returns the &struct ssh_rtl associated with the given SSH request.
+ */
+static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
+{
+ struct ssh_ptl *ptl;
+
+ ptl = READ_ONCE(rqst->packet.ptl);
+ return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
+}
+
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
+
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops);
+
+int ssh_rtl_start(struct ssh_rtl *rtl);
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
+void ssh_rtl_shutdown(struct ssh_rtl *rtl);
+void ssh_rtl_destroy(struct ssh_rtl *rtl);
+
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
new file mode 100644
index 000000000000..eb332bb53ae4
--- /dev/null
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Trace points for SSAM/SSH.
+ *
+ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM surface_aggregator
+
+#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _SURFACE_AGGREGATOR_TRACE_H
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include <asm/unaligned.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+
+#define SSAM_PTR_UID_LEN 9
+#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
+#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
+#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
+#define SSAM_SSH_TC_NOT_APPLICABLE 0
+
+#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
+#define _SURFACE_AGGREGATOR_TRACE_HELPERS
+
+/**
+ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
+ * @ptr: The pointer to convert.
+ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
+ *
+ * Converts the given pointer into a UID string that is safe to be shared
+ * with userspace and logs, i.e. doesn't give away the real memory location.
+ */
+static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
+{
+ char buf[2 * sizeof(void *) + 1];
+
+ BUILD_BUG_ON(ARRAY_SIZE(buf) < SSAM_PTR_UID_LEN);
+
+ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
+ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
+ SSAM_PTR_UID_LEN);
+}
+
+/**
+ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's sequence ID (SEQ) field if present, or
+ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
+ */
+static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
+ return SSAM_SEQ_NOT_APPLICABLE;
+
+ return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+/**
+ * ssam_trace_get_request_id() - Read the packet's request ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request ID (RQID) field if the packet
+ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
+ * (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_RQID_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
+}
+
+/**
+ * ssam_trace_get_request_tc() - Read the packet's request target category.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request target category (TC) field if the
+ * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
+ * if not (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_SSH_TC_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
+}
+
+#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */
+
+#define ssam_trace_get_command_field_u8(packet, field) \
+ ((!(packet) || (packet)->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
+ ? 0 : (packet)->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
+
+#define ssam_show_generic_u8_field(value) \
+ __print_symbolic(value, \
+ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_frame_type(ty) \
+ __print_symbolic(ty, \
+ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
+ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
+ { SSH_FRAME_TYPE_ACK, "ACK" }, \
+ { SSH_FRAME_TYPE_NAK, "NAK" } \
+ )
+
+#define ssam_show_packet_type(type) \
+ __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
+ { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
+ { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
+ )
+
+#define ssam_show_packet_state(state) \
+ __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
+ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
+ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_packet_seq(seq) \
+ __print_symbolic(seq, \
+ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_request_type(flags) \
+ __print_flags((flags) & SSH_REQUEST_FLAGS_TY_MASK, "", \
+ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
+ )
+
+#define ssam_show_request_state(flags) \
+ __print_flags((flags) & SSH_REQUEST_FLAGS_SF_MASK, "", \
+ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
+ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_request_id(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_ssh_tc(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC, "ACC" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" } \
+ )
+
+DECLARE_EVENT_CLASS(ssam_frame_class,
+ TP_PROTO(const struct ssh_frame *frame),
+
+ TP_ARGS(frame),
+
+ TP_STRUCT__entry(
+ __field(u8, type)
+ __field(u8, seq)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->type = frame->type;
+ __entry->seq = frame->seq;
+ __entry->len = get_unaligned_le16(&frame->len);
+ ),
+
+ TP_printk("ty=%s, seq=%#04x, len=%u",
+ ssam_show_frame_type(__entry->type),
+ __entry->seq,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_FRAME_EVENT(name) \
+ DEFINE_EVENT(ssam_frame_class, ssam_##name, \
+ TP_PROTO(const struct ssh_frame *frame), \
+ TP_ARGS(frame) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_command_class,
+ TP_PROTO(const struct ssh_command *cmd, u16 len),
+
+ TP_ARGS(cmd, len),
+
+ TP_STRUCT__entry(
+ __field(u16, rqid)
+ __field(u16, len)
+ __field(u8, tc)
+ __field(u8, cid)
+ __field(u8, iid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqid = get_unaligned_le16(&cmd->rqid);
+ __entry->tc = cmd->tc;
+ __entry->cid = cmd->cid;
+ __entry->iid = cmd->iid;
+ __entry->len = len;
+ ),
+
+ TP_printk("rqid=%#06x, tc=%s, cid=%#04x, iid=%#04x, len=%u",
+ __entry->rqid,
+ ssam_show_ssh_tc(__entry->tc),
+ __entry->cid,
+ __entry->iid,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_COMMAND_EVENT(name) \
+ DEFINE_EVENT(ssam_command_class, ssam_##name, \
+ TP_PROTO(const struct ssh_command *cmd, u16 len), \
+ TP_ARGS(cmd, len) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_packet_class,
+ TP_PROTO(const struct ssh_packet *packet),
+
+ TP_ARGS(packet),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->state = READ_ONCE(packet->state);
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state)
+ )
+);
+
+#define DEFINE_SSAM_PACKET_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet), \
+ TP_ARGS(packet) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_packet_status_class,
+ TP_PROTO(const struct ssh_packet *packet, int status),
+
+ TP_ARGS(packet, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(int, status)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->state = READ_ONCE(packet->state);
+ __entry->status = status;
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s, status=%d",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet, int status), \
+ TP_ARGS(packet, status) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_request_class,
+ TP_PROTO(const struct ssh_request *request),
+
+ TP_ARGS(request),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ /* Use packet for UID so we can match requests to packets. */
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid)
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_EVENT(name) \
+ DEFINE_EVENT(ssam_request_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request), \
+ TP_ARGS(request) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_request_status_class,
+ TP_PROTO(const struct ssh_request *request, int status),
+
+ TP_ARGS(request, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __field(int, status)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ /* Use packet for UID so we can match requests to packets. */
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ __entry->status = status;
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request, int status),\
+ TP_ARGS(request, status) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_alloc_class,
+ TP_PROTO(void *ptr, size_t len),
+
+ TP_ARGS(ptr, len),
+
+ TP_STRUCT__entry(
+ __field(size_t, len)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ ),
+
+ TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
+);
+
+#define DEFINE_SSAM_ALLOC_EVENT(name) \
+ DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
+ TP_PROTO(void *ptr, size_t len), \
+ TP_ARGS(ptr, len) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_free_class,
+ TP_PROTO(void *ptr),
+
+ TP_ARGS(ptr),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ ),
+
+ TP_printk("uid=%s", __entry->uid)
+);
+
+#define DEFINE_SSAM_FREE_EVENT(name) \
+ DEFINE_EVENT(ssam_free_class, ssam_##name, \
+ TP_PROTO(void *ptr), \
+ TP_ARGS(ptr) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_pending_class,
+ TP_PROTO(unsigned int pending),
+
+ TP_ARGS(pending),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, pending)
+ ),
+
+ TP_fast_assign(
+ __entry->pending = pending;
+ ),
+
+ TP_printk("pending=%u", __entry->pending)
+);
+
+#define DEFINE_SSAM_PENDING_EVENT(name) \
+ DEFINE_EVENT(ssam_pending_class, ssam_##name, \
+ TP_PROTO(unsigned int pending), \
+ TP_ARGS(pending) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_data_class,
+ TP_PROTO(size_t length),
+
+ TP_ARGS(length),
+
+ TP_STRUCT__entry(
+ __field(size_t, length)
+ ),
+
+ TP_fast_assign(
+ __entry->length = length;
+ ),
+
+ TP_printk("length=%zu", __entry->length)
+);
+
+#define DEFINE_SSAM_DATA_EVENT(name) \
+ DEFINE_EVENT(ssam_data_class, ssam_##name, \
+ TP_PROTO(size_t length), \
+ TP_ARGS(length) \
+ )
+
+DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
+
+DEFINE_SSAM_PACKET_EVENT(packet_release);
+DEFINE_SSAM_PACKET_EVENT(packet_submit);
+DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
+DEFINE_SSAM_PACKET_EVENT(packet_timeout);
+DEFINE_SSAM_PACKET_EVENT(packet_cancel);
+DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
+DEFINE_SSAM_PENDING_EVENT(ptl_timeout_reap);
+
+DEFINE_SSAM_REQUEST_EVENT(request_submit);
+DEFINE_SSAM_REQUEST_EVENT(request_timeout);
+DEFINE_SSAM_REQUEST_EVENT(request_cancel);
+DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
+DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap);
+
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
+DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
+DEFINE_SSAM_DATA_EVENT(ei_rx_corrupt_syn);
+DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
+DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
+
+DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
+DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
+
+DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
+DEFINE_SSAM_FREE_EVENT(event_item_free);
+
+#endif /* _SURFACE_AGGREGATOR_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c
index 130b6f52a600..fcd1d4fb94d5 100644
--- a/drivers/platform/surface/surface3-wmi.c
+++ b/drivers/platform/surface/surface3-wmi.c
@@ -57,12 +57,16 @@ static DEFINE_MUTEX(s3_wmi_lock);
static int s3_wmi_query_block(const char *guid, int instance, int *ret)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj = NULL;
acpi_status status;
- union acpi_object *obj;
int error = 0;
mutex_lock(&s3_wmi_lock);
status = wmi_query_block(guid, instance, &output);
+ if (ACPI_FAILURE(status)) {
+ error = -EIO;
+ goto out_free_unlock;
+ }
obj = output.pointer;
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
new file mode 100644
index 000000000000..ef9c1f8e8336
--- /dev/null
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the Surface ACPI Notify (SAN) interface/shim.
+ *
+ * Translates communication from ACPI to Surface System Aggregator Module
+ * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
+ * events back to ACPI notifications. Allows handling of discrete GPU
+ * notifications sent from ACPI via the SAN interface by providing them to any
+ * registered external driver.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/rwsem.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_acpi_notify.h>
+
+struct san_data {
+ struct device *dev;
+ struct ssam_controller *ctrl;
+
+ struct acpi_connection_info info;
+
+ struct ssam_event_notifier nf_bat;
+ struct ssam_event_notifier nf_tmp;
+};
+
+#define to_san_data(ptr, member) \
+ container_of(ptr, struct san_data, member)
+
+
+/* -- dGPU notifier interface. ---------------------------------------------- */
+
+struct san_rqsg_if {
+ struct rw_semaphore lock;
+ struct device *dev;
+ struct blocking_notifier_head nh;
+};
+
+static struct san_rqsg_if san_rqsg_if = {
+ .lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
+ .dev = NULL,
+ .nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
+};
+
+static int san_set_rqsg_interface_device(struct device *dev)
+{
+ int status = 0;
+
+ down_write(&san_rqsg_if.lock);
+ if (!san_rqsg_if.dev && dev)
+ san_rqsg_if.dev = dev;
+ else
+ status = -EBUSY;
+ up_write(&san_rqsg_if.lock);
+
+ return status;
+}
+
+/**
+ * san_client_link() - Link client as consumer to SAN device.
+ * @client: The client to link.
+ *
+ * Sets up a device link between the provided client device as consumer and
+ * the SAN device as provider. This function can be used to ensure that the
+ * SAN interface has been set up and will be set up for as long as the driver
+ * of the client device is bound. This guarantees that, during that time, all
+ * dGPU events will be received by any registered notifier.
+ *
+ * The link will be automatically removed once the client device's driver is
+ * unbound.
+ *
+ * Return: Returns zero on success, %-ENXIO if the SAN interface has not been
+ * set up yet, and %-ENOMEM if device link creation failed.
+ */
+int san_client_link(struct device *client)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+ struct device_link *link;
+
+ down_read(&san_rqsg_if.lock);
+
+ if (!san_rqsg_if.dev) {
+ up_read(&san_rqsg_if.lock);
+ return -ENXIO;
+ }
+
+ link = device_link_add(client, san_rqsg_if.dev, flags);
+ if (!link) {
+ up_read(&san_rqsg_if.lock);
+ return -ENOMEM;
+ }
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
+ up_read(&san_rqsg_if.lock);
+ return -ENXIO;
+ }
+
+ up_read(&san_rqsg_if.lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(san_client_link);
+
+/**
+ * san_dgpu_notifier_register() - Register a SAN dGPU notifier.
+ * @nb: The notifier-block to register.
+ *
+ * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
+ * ACPI. The registered notifier will be called with &struct san_dgpu_event
+ * as notifier data and the command ID of that event as notifier action.
+ */
+int san_dgpu_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
+}
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
+
+/**
+ * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
+ * @nb: The notifier-block to unregister.
+ */
+int san_dgpu_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
+}
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
+
+static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
+ return notifier_to_errno(ret);
+}
+
+
+/* -- ACPI _DSM event relay. ------------------------------------------------ */
+
+#define SAN_DSM_REVISION 0
+
+/* 93b666c5-70c6-469f-a215-3d487c91ab3c */
+static const guid_t SAN_DSM_UUID =
+ GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
+ 0x48, 0x7c, 0x91, 0xab, 0x3c);
+
+enum san_dsm_event_fn {
+ SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
+ SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
+ SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
+ SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
+ SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
+ SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
+ SAN_DSM_EVENT_FN_THERMAL = 0x09,
+ SAN_DSM_EVENT_FN_DPTF = 0x0a,
+};
+
+enum sam_event_cid_bat {
+ SAM_EVENT_CID_BAT_BIX = 0x15,
+ SAM_EVENT_CID_BAT_BST = 0x16,
+ SAM_EVENT_CID_BAT_ADP = 0x17,
+ SAM_EVENT_CID_BAT_PROT = 0x18,
+ SAM_EVENT_CID_BAT_DPTF = 0x4f,
+};
+
+enum sam_event_cid_tmp {
+ SAM_EVENT_CID_TMP_TRIP = 0x0b,
+};
+
+struct san_event_work {
+ struct delayed_work work;
+ struct device *dev;
+ struct ssam_event event; /* must be last */
+};
+
+static int san_acpi_notify_event(struct device *dev, u64 func,
+ union acpi_object *param)
+{
+ acpi_handle san = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+ int status = 0;
+
+ if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func)))
+ return 0;
+
+ dev_dbg(dev, "notify event %#04llx\n", func);
+
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
+ func, param, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -EFAULT;
+
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
+ dev_err(dev, "got unexpected result from _DSM\n");
+ status = -EPROTO;
+ }
+
+ ACPI_FREE(obj);
+ return status;
+}
+
+static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
+{
+ int status;
+
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
+ if (status)
+ return status;
+
+ /*
+ * Ensure that the battery states get updated correctly. When the
+ * battery is fully charged and an adapter is plugged in, it sometimes
+ * is not updated correctly, instead showing it as charging.
+ * Explicitly trigger battery updates to fix this.
+ */
+
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
+ if (status)
+ return status;
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
+}
+
+static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
+{
+ enum san_dsm_event_fn fn;
+
+ if (event->instance_id == 0x02)
+ fn = SAN_DSM_EVENT_FN_BAT2_INFO;
+ else
+ fn = SAN_DSM_EVENT_FN_BAT1_INFO;
+
+ return san_acpi_notify_event(dev, fn, NULL);
+}
+
+static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
+{
+ enum san_dsm_event_fn fn;
+
+ if (event->instance_id == 0x02)
+ fn = SAN_DSM_EVENT_FN_BAT2_STAT;
+ else
+ fn = SAN_DSM_EVENT_FN_BAT1_STAT;
+
+ return san_acpi_notify_event(dev, fn, NULL);
+}
+
+static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
+{
+ union acpi_object payload;
+
+ /*
+ * The Surface ACPI expects a buffer and not a package. It specifically
+ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
+ * acpica/nsarguments.c, but that warning can be safely ignored.
+ */
+ payload.type = ACPI_TYPE_BUFFER;
+ payload.buffer.length = event->length;
+ payload.buffer.pointer = (u8 *)&event->data[0];
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
+}
+
+static unsigned long san_evt_bat_delay(u8 cid)
+{
+ switch (cid) {
+ case SAM_EVENT_CID_BAT_ADP:
+ /*
+ * Wait for battery state to update before signaling adapter
+ * change.
+ */
+ return msecs_to_jiffies(5000);
+
+ case SAM_EVENT_CID_BAT_BST:
+ /* Ensure we do not miss anything important due to caching. */
+ return msecs_to_jiffies(2000);
+
+ default:
+ return 0;
+ }
+}
+
+static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_CID_BAT_BIX:
+ status = san_evt_bat_bix(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_BST:
+ status = san_evt_bat_bst(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_ADP:
+ status = san_evt_bat_adp(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_PROT:
+ /*
+ * TODO: Implement support for battery protection status change
+ * event.
+ */
+ return true;
+
+ case SAM_EVENT_CID_BAT_DPTF:
+ status = san_evt_bat_dptf(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status) {
+ dev_err(dev, "error handling power event (cid = %#04x)\n",
+ event->command_id);
+ }
+
+ return true;
+}
+
+static void san_evt_bat_workfn(struct work_struct *work)
+{
+ struct san_event_work *ev;
+
+ ev = container_of(work, struct san_event_work, work.work);
+ san_evt_bat(&ev->event, ev->dev);
+ kfree(ev);
+}
+
+static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
+ const struct ssam_event *event)
+{
+ struct san_data *d = to_san_data(nf, nf_bat);
+ struct san_event_work *work;
+ unsigned long delay = san_evt_bat_delay(event->command_id);
+
+ if (delay == 0)
+ return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
+
+ work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
+ if (!work)
+ return ssam_notifier_from_errno(-ENOMEM);
+
+ INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
+ work->dev = d->dev;
+
+ memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
+
+ schedule_delayed_work(&work->work, delay);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
+{
+ union acpi_object param;
+
+ /*
+ * The Surface ACPI expects an integer and not a package. This will
+ * cause a warning in acpica/nsarguments.c, but that warning can be
+ * safely ignored.
+ */
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = event->instance_id;
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, &param);
+}
+
+static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_CID_TMP_TRIP:
+ status = san_evt_tmp_trip(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status) {
+ dev_err(dev, "error handling thermal event (cid = %#04x)\n",
+ event->command_id);
+ }
+
+ return true;
+}
+
+static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
+ const struct ssam_event *event)
+{
+ struct san_data *d = to_san_data(nf, nf_tmp);
+
+ return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
+}
+
+
+/* -- ACPI GSB OperationRegion handler -------------------------------------- */
+
+struct gsb_data_in {
+ u8 cv;
+} __packed;
+
+struct gsb_data_rqsx {
+ u8 cv; /* Command value (san_gsb_request_cv). */
+ u8 tc; /* Target category. */
+ u8 tid; /* Target ID. */
+ u8 iid; /* Instance ID. */
+ u8 snc; /* Expect-response-flag. */
+ u8 cid; /* Command ID. */
+ u16 cdl; /* Payload length. */
+ u8 pld[]; /* Payload. */
+} __packed;
+
+struct gsb_data_etwl {
+ u8 cv; /* Command value (should be 0x02). */
+ u8 etw3; /* Unknown. */
+ u8 etw4; /* Unknown. */
+ u8 msg[]; /* Error message (ASCIIZ). */
+} __packed;
+
+struct gsb_data_out {
+ u8 status; /* _SSH communication status. */
+ u8 len; /* _SSH payload length. */
+ u8 pld[]; /* _SSH payload. */
+} __packed;
+
+union gsb_buffer_data {
+ struct gsb_data_in in; /* Common input. */
+ struct gsb_data_rqsx rqsx; /* RQSX input. */
+ struct gsb_data_etwl etwl; /* ETWL input. */
+ struct gsb_data_out out; /* Output. */
+};
+
+struct gsb_buffer {
+ u8 status; /* GSB AttribRawProcess status. */
+ u8 len; /* GSB AttribRawProcess length. */
+ union gsb_buffer_data data;
+} __packed;
+
+#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
+#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
+
+#define SAN_GSB_COMMAND 0
+
+enum san_gsb_request_cv {
+ SAN_GSB_REQUEST_CV_RQST = 0x01,
+ SAN_GSB_REQUEST_CV_ETWL = 0x02,
+ SAN_GSB_REQUEST_CV_RQSG = 0x03,
+};
+
+#define SAN_REQUEST_NUM_TRIES 5
+
+static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
+{
+ struct gsb_data_etwl *etwl = &b->data.etwl;
+
+ if (b->len < sizeof(struct gsb_data_etwl)) {
+ dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
+ return AE_OK;
+ }
+
+ dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4,
+ (unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
+ (char *)etwl->msg);
+
+ /* Indicate success. */
+ b->status = 0x00;
+ b->len = 0x00;
+
+ return AE_OK;
+}
+
+static
+struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type,
+ struct gsb_buffer *b)
+{
+ struct gsb_data_rqsx *rqsx = &b->data.rqsx;
+
+ if (b->len < sizeof(struct gsb_data_rqsx)) {
+ dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
+ dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
+ type, b->len, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
+ dev_err(dev, "payload for %s package too large (cdl = %d)\n",
+ type, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ return rqsx;
+}
+
+static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
+{
+ gsb->status = 0x00;
+ gsb->len = 0x02;
+ gsb->data.out.status = (u8)(-status);
+ gsb->data.out.len = 0x00;
+}
+
+static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
+{
+ gsb->status = 0x00;
+ gsb->len = len + 2;
+ gsb->data.out.status = 0x00;
+ gsb->data.out.len = len;
+
+ if (len)
+ memcpy(&gsb->data.out.pld[0], ptr, len);
+}
+
+static acpi_status san_rqst_fixup_suspended(struct san_data *d,
+ struct ssam_request *rqst,
+ struct gsb_buffer *gsb)
+{
+ if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
+ u8 base_state = 1;
+
+ /* Base state quirk:
+ * The base state may be queried from ACPI when the EC is still
+ * suspended. In this case it will return '-EPERM'. This query
+ * will only be triggered from the ACPI lid GPE interrupt, thus
+ * we are either in laptop or studio mode (base status 0x01 or
+ * 0x02). Furthermore, we will only get here if the device (and
+ * EC) have been suspended.
+ *
+ * We now assume that the device is in laptop mode (0x01). This
+ * has the drawback that it will wake the device when unfolding
+ * it in studio mode, but it also allows us to avoid actively
+ * waiting for the EC to wake up, which may incur a notable
+ * delay.
+ */
+
+ dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
+
+ gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
+ return AE_OK;
+ }
+
+ gsb_rqsx_response_error(gsb, -ENXIO);
+ return AE_OK;
+}
+
+static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
+{
+ u8 rspbuf[SAN_GSB_MAX_RESPONSE];
+ struct gsb_data_rqsx *gsb_rqst;
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status = 0;
+
+ gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
+ if (!gsb_rqst)
+ return AE_OK;
+
+ rqst.target_category = gsb_rqst->tc;
+ rqst.target_id = gsb_rqst->tid;
+ rqst.command_id = gsb_rqst->cid;
+ rqst.instance_id = gsb_rqst->iid;
+ rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
+ rqst.length = get_unaligned(&gsb_rqst->cdl);
+ rqst.payload = &gsb_rqst->pld[0];
+
+ rsp.capacity = ARRAY_SIZE(rspbuf);
+ rsp.length = 0;
+ rsp.pointer = &rspbuf[0];
+
+ /* Handle suspended device. */
+ if (d->dev->power.is_suspended) {
+ dev_warn(d->dev, "rqst: device is suspended, not executing\n");
+ return san_rqst_fixup_suspended(d, &rqst, buffer);
+ }
+
+ status = __ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
+ d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
+
+ if (!status) {
+ gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
+ } else {
+ dev_err(d->dev, "rqst: failed with error %d\n", status);
+ gsb_rqsx_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
+{
+ struct gsb_data_rqsx *gsb_rqsg;
+ struct san_dgpu_event evt;
+ int status;
+
+ gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
+ if (!gsb_rqsg)
+ return AE_OK;
+
+ evt.category = gsb_rqsg->tc;
+ evt.target = gsb_rqsg->tid;
+ evt.command = gsb_rqsg->cid;
+ evt.instance = gsb_rqsg->iid;
+ evt.length = get_unaligned(&gsb_rqsg->cdl);
+ evt.payload = &gsb_rqsg->pld[0];
+
+ status = san_dgpu_notifier_call(&evt);
+ if (!status) {
+ gsb_rqsx_response_success(buffer, NULL, 0);
+ } else {
+ dev_err(d->dev, "rqsg: failed with error %d\n", status);
+ gsb_rqsx_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status san_opreg_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64, void *opreg_context,
+ void *region_context)
+{
+ struct san_data *d = to_san_data(opreg_context, info);
+ struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
+ int accessor_type = (function & 0xFFFF0000) >> 16;
+
+ if (command != SAN_GSB_COMMAND) {
+ dev_warn(d->dev, "unsupported command: %#04llx\n", command);
+ return AE_OK;
+ }
+
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
+ dev_err(d->dev, "invalid access type: %#04x\n", accessor_type);
+ return AE_OK;
+ }
+
+ /* Buffer must have at least contain the command-value. */
+ if (buffer->len == 0) {
+ dev_err(d->dev, "request-package too small\n");
+ return AE_OK;
+ }
+
+ switch (buffer->data.in.cv) {
+ case SAN_GSB_REQUEST_CV_RQST:
+ return san_rqst(d, buffer);
+
+ case SAN_GSB_REQUEST_CV_ETWL:
+ return san_etwl(d, buffer);
+
+ case SAN_GSB_REQUEST_CV_RQSG:
+ return san_rqsg(d, buffer);
+
+ default:
+ dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n",
+ buffer->data.in.cv);
+ return AE_OK;
+ }
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int san_events_register(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+ int status;
+
+ d->nf_bat.base.priority = 1;
+ d->nf_bat.base.fn = san_evt_bat_nf;
+ d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
+ d->nf_bat.event.id.instance = 0;
+ d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
+ d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
+
+ d->nf_tmp.base.priority = 1;
+ d->nf_tmp.base.fn = san_evt_tmp_nf;
+ d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
+ d->nf_tmp.event.id.instance = 0;
+ d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
+ d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_bat);
+ if (status)
+ return status;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
+ if (status)
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+
+ return status;
+}
+
+static void san_events_unregister(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+ ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
+}
+
+#define san_consumer_printk(level, dev, handle, fmt, ...) \
+do { \
+ char *path = "<error getting consumer path>"; \
+ struct acpi_buffer buffer = { \
+ .length = ACPI_ALLOCATE_BUFFER, \
+ .pointer = NULL, \
+ }; \
+ \
+ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) \
+ path = buffer.pointer; \
+ \
+ dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__); \
+ kfree(buffer.pointer); \
+} while (0)
+
+#define san_consumer_dbg(dev, handle, fmt, ...) \
+ san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
+
+#define san_consumer_warn(dev, handle, fmt, ...) \
+ san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
+
+static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == supplier)
+ return true;
+ }
+
+ return false;
+}
+
+static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
+ struct platform_device *pdev = context;
+ struct acpi_device *adev;
+ struct device_link *link;
+
+ if (!is_san_consumer(pdev, handle))
+ return AE_OK;
+
+ /* Ignore ACPI devices that are not present. */
+ if (acpi_bus_get_device(handle, &adev) != 0)
+ return AE_OK;
+
+ san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
+
+ /* Try to set up device links, ignore but log errors. */
+ link = device_link_add(&adev->dev, &pdev->dev, flags);
+ if (!link) {
+ san_consumer_warn(&pdev->dev, handle, "failed to create device link\n");
+ return AE_OK;
+ }
+
+ return AE_OK;
+}
+
+static int san_consumer_links_setup(struct platform_device *pdev)
+{
+ acpi_status status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, san_consumer_setup, NULL,
+ pdev, NULL);
+
+ return status ? -EFAULT : 0;
+}
+
+static int san_probe(struct platform_device *pdev)
+{
+ acpi_handle san = ACPI_HANDLE(&pdev->dev);
+ struct ssam_controller *ctrl;
+ struct san_data *data;
+ acpi_status astatus;
+ int status;
+
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ status = san_consumer_links_setup(pdev);
+ if (status)
+ return status;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->ctrl = ctrl;
+
+ platform_set_drvdata(pdev, data);
+
+ astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler, NULL,
+ &data->info);
+ if (ACPI_FAILURE(astatus))
+ return -ENXIO;
+
+ status = san_events_register(pdev);
+ if (status)
+ goto err_enable_events;
+
+ status = san_set_rqsg_interface_device(&pdev->dev);
+ if (status)
+ goto err_install_dev;
+
+ acpi_walk_dep_device_list(san);
+ return 0;
+
+err_install_dev:
+ san_events_unregister(pdev);
+err_enable_events:
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler);
+ return status;
+}
+
+static int san_remove(struct platform_device *pdev)
+{
+ acpi_handle san = ACPI_HANDLE(&pdev->dev);
+
+ san_set_rqsg_interface_device(NULL);
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler);
+ san_events_unregister(pdev);
+
+ /*
+ * We have unregistered our event sources. Now we need to ensure that
+ * all delayed works they may have spawned are run to completion.
+ */
+ flush_scheduled_work();
+
+ return 0;
+}
+
+static const struct acpi_device_id san_match[] = {
+ { "MSHW0091" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, san_match);
+
+static struct platform_driver surface_acpi_notify = {
+ .probe = san_probe,
+ .remove = san_remove,
+ .driver = {
+ .name = "surface_acpi_notify",
+ .acpi_match_table = san_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_acpi_notify);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
new file mode 100644
index 000000000000..79e28fab7e40
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
+ * misc device. Intended for debugging and development.
+ *
+ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/surface_aggregator/cdev.h>
+#include <linux/surface_aggregator/controller.h>
+
+#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
+
+struct ssam_cdev {
+ struct kref kref;
+ struct rw_semaphore lock;
+ struct ssam_controller *ctrl;
+ struct miscdevice mdev;
+};
+
+static void __ssam_cdev_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct ssam_cdev, kref));
+}
+
+static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
+{
+ if (cdev)
+ kref_get(&cdev->kref);
+
+ return cdev;
+}
+
+static void ssam_cdev_put(struct ssam_cdev *cdev)
+{
+ if (cdev)
+ kref_put(&cdev->kref, __ssam_cdev_release);
+}
+
+static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *mdev = filp->private_data;
+ struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
+
+ filp->private_data = ssam_cdev_get(cdev);
+ return stream_open(inode, filp);
+}
+
+static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
+{
+ ssam_cdev_put(filp->private_data);
+ return 0;
+}
+
+static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
+{
+ struct ssam_cdev_request __user *r;
+ struct ssam_cdev_request rqst;
+ struct ssam_request spec = {};
+ struct ssam_response rsp = {};
+ const void __user *plddata;
+ void __user *rspdata;
+ int status = 0, ret = 0, tmp;
+
+ r = (struct ssam_cdev_request __user *)arg;
+ ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
+ if (ret)
+ goto out;
+
+ plddata = u64_to_user_ptr(rqst.payload.data);
+ rspdata = u64_to_user_ptr(rqst.response.data);
+
+ /* Setup basic request fields. */
+ spec.target_category = rqst.target_category;
+ spec.target_id = rqst.target_id;
+ spec.command_id = rqst.command_id;
+ spec.instance_id = rqst.instance_id;
+ spec.flags = 0;
+ spec.length = rqst.payload.length;
+ spec.payload = NULL;
+
+ if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
+ spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
+
+ if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
+ spec.flags |= SSAM_REQUEST_UNSEQUENCED;
+
+ rsp.capacity = rqst.response.length;
+ rsp.length = 0;
+ rsp.pointer = NULL;
+
+ /* Get request payload from user-space. */
+ if (spec.length) {
+ if (!plddata) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Note: spec.length is limited to U16_MAX bytes via struct
+ * ssam_cdev_request. This is slightly larger than the
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
+ * underlying protocol (note that nothing remotely this size
+ * should ever be allocated in any normal case). This size is
+ * validated later in ssam_request_sync(), for allocation the
+ * bound imposed by u16 should be enough.
+ */
+ spec.payload = kzalloc(spec.length, GFP_KERNEL);
+ if (!spec.payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ /* Allocate response buffer. */
+ if (rsp.capacity) {
+ if (!rspdata) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Note: rsp.capacity is limited to U16_MAX bytes via struct
+ * ssam_cdev_request. This is slightly larger than the
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
+ * underlying protocol (note that nothing remotely this size
+ * should ever be allocated in any normal case). In later use,
+ * this capacity does not have to be strictly bounded, as it
+ * is only used as an output buffer to be written to. For
+ * allocation the bound imposed by u16 should be enough.
+ */
+ rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
+ if (!rsp.pointer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Perform request. */
+ status = ssam_request_sync(cdev->ctrl, &spec, &rsp);
+ if (status)
+ goto out;
+
+ /* Copy response to user-space. */
+ if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
+ ret = -EFAULT;
+
+out:
+ /* Always try to set response-length and status. */
+ tmp = put_user(rsp.length, &r->response.length);
+ if (tmp)
+ ret = tmp;
+
+ tmp = put_user(status, &r->status);
+ if (tmp)
+ ret = tmp;
+
+ /* Cleanup. */
+ kfree(spec.payload);
+ kfree(rsp.pointer);
+
+ return ret;
+}
+
+static long __ssam_cdev_device_ioctl(struct ssam_cdev *cdev, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case SSAM_CDEV_REQUEST:
+ return ssam_cdev_request(cdev, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ssam_cdev *cdev = file->private_data;
+ long status;
+
+ /* Ensure that controller is valid for as long as we need it. */
+ if (down_read_killable(&cdev->lock))
+ return -ERESTARTSYS;
+
+ if (!cdev->ctrl) {
+ up_read(&cdev->lock);
+ return -ENODEV;
+ }
+
+ status = __ssam_cdev_device_ioctl(cdev, cmd, arg);
+
+ up_read(&cdev->lock);
+ return status;
+}
+
+static const struct file_operations ssam_controller_fops = {
+ .owner = THIS_MODULE,
+ .open = ssam_cdev_device_open,
+ .release = ssam_cdev_device_release,
+ .unlocked_ioctl = ssam_cdev_device_ioctl,
+ .compat_ioctl = ssam_cdev_device_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int ssam_dbg_device_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct ssam_cdev *cdev;
+ int status;
+
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ kref_init(&cdev->kref);
+ init_rwsem(&cdev->lock);
+ cdev->ctrl = ctrl;
+
+ cdev->mdev.parent = &pdev->dev;
+ cdev->mdev.minor = MISC_DYNAMIC_MINOR;
+ cdev->mdev.name = "surface_aggregator";
+ cdev->mdev.nodename = "surface/aggregator";
+ cdev->mdev.fops = &ssam_controller_fops;
+
+ status = misc_register(&cdev->mdev);
+ if (status) {
+ kfree(cdev);
+ return status;
+ }
+
+ platform_set_drvdata(pdev, cdev);
+ return 0;
+}
+
+static int ssam_dbg_device_remove(struct platform_device *pdev)
+{
+ struct ssam_cdev *cdev = platform_get_drvdata(pdev);
+
+ misc_deregister(&cdev->mdev);
+
+ /*
+ * The controller is only guaranteed to be valid for as long as the
+ * driver is bound. Remove controller so that any lingering open files
+ * cannot access it any more after we're gone.
+ */
+ down_write(&cdev->lock);
+ cdev->ctrl = NULL;
+ up_write(&cdev->lock);
+
+ ssam_cdev_put(cdev);
+ return 0;
+}
+
+static struct platform_device *ssam_cdev_device;
+
+static struct platform_driver ssam_cdev_driver = {
+ .probe = ssam_dbg_device_probe,
+ .remove = ssam_dbg_device_remove,
+ .driver = {
+ .name = SSAM_CDEV_DEVICE_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init ssam_debug_init(void)
+{
+ int status;
+
+ ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
+ PLATFORM_DEVID_NONE);
+ if (!ssam_cdev_device)
+ return -ENOMEM;
+
+ status = platform_device_add(ssam_cdev_device);
+ if (status)
+ goto err_device;
+
+ status = platform_driver_register(&ssam_cdev_driver);
+ if (status)
+ goto err_driver;
+
+ return 0;
+
+err_driver:
+ platform_device_del(ssam_cdev_device);
+err_device:
+ platform_device_put(ssam_cdev_device);
+ return status;
+}
+module_init(ssam_debug_init);
+
+static void __exit ssam_debug_exit(void)
+{
+ platform_driver_unregister(&ssam_cdev_driver);
+ platform_device_unregister(ssam_cdev_device);
+}
+module_exit(ssam_debug_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index e49e5d6d5d4e..86f6991b1215 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
return 0;
}
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
new file mode 100644
index 000000000000..cfcc15cfbacb
--- /dev/null
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (2 and later) hot-plug driver.
+ *
+ * Surface Book devices (can) have a hot-pluggable discrete GPU (dGPU). This
+ * driver is responsible for out-of-band hot-plug event signaling on these
+ * devices. It is specifically required when the hot-plug device is in D3cold
+ * and can thus not generate PCIe hot-plug events itself.
+ *
+ * Event signaling is handled via ACPI, which will generate the appropriate
+ * device-check notifications to be picked up by the PCIe hot-plug driver.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+static const struct acpi_gpio_params shps_base_presence_int = { 0, 0, false };
+static const struct acpi_gpio_params shps_base_presence = { 1, 0, false };
+static const struct acpi_gpio_params shps_device_power_int = { 2, 0, false };
+static const struct acpi_gpio_params shps_device_power = { 3, 0, false };
+static const struct acpi_gpio_params shps_device_presence_int = { 4, 0, false };
+static const struct acpi_gpio_params shps_device_presence = { 5, 0, false };
+
+static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
+ { "base_presence-int-gpio", &shps_base_presence_int, 1 },
+ { "base_presence-gpio", &shps_base_presence, 1 },
+ { "device_power-int-gpio", &shps_device_power_int, 1 },
+ { "device_power-gpio", &shps_device_power, 1 },
+ { "device_presence-int-gpio", &shps_device_presence_int, 1 },
+ { "device_presence-gpio", &shps_device_presence, 1 },
+ { },
+};
+
+/* 5515a847-ed55-4b27-8352-cd320e10360a */
+static const guid_t shps_dsm_guid =
+ GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, 0x32, 0x0e, 0x10, 0x36, 0x0a);
+
+#define SHPS_DSM_REVISION 1
+
+enum shps_dsm_fn {
+ SHPS_DSM_FN_PCI_NUM_ENTRIES = 0x01,
+ SHPS_DSM_FN_PCI_GET_ENTRIES = 0x02,
+ SHPS_DSM_FN_IRQ_BASE_PRESENCE = 0x03,
+ SHPS_DSM_FN_IRQ_DEVICE_POWER = 0x04,
+ SHPS_DSM_FN_IRQ_DEVICE_PRESENCE = 0x05,
+};
+
+enum shps_irq_type {
+ /* NOTE: Must be in order of enum shps_dsm_fn above. */
+ SHPS_IRQ_TYPE_BASE_PRESENCE = 0,
+ SHPS_IRQ_TYPE_DEVICE_POWER = 1,
+ SHPS_IRQ_TYPE_DEVICE_PRESENCE = 2,
+ SHPS_NUM_IRQS,
+};
+
+static const char *const shps_gpio_names[] = {
+ [SHPS_IRQ_TYPE_BASE_PRESENCE] = "base_presence",
+ [SHPS_IRQ_TYPE_DEVICE_POWER] = "device_power",
+ [SHPS_IRQ_TYPE_DEVICE_PRESENCE] = "device_presence",
+};
+
+struct shps_device {
+ struct mutex lock[SHPS_NUM_IRQS]; /* Protects update in shps_dsm_notify_irq() */
+ struct gpio_desc *gpio[SHPS_NUM_IRQS];
+ unsigned int irq[SHPS_NUM_IRQS];
+};
+
+#define SHPS_IRQ_NOT_PRESENT ((unsigned int)-1)
+
+static enum shps_dsm_fn shps_dsm_fn_for_irq(enum shps_irq_type type)
+{
+ return SHPS_DSM_FN_IRQ_BASE_PRESENCE + type;
+}
+
+static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type type)
+{
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *result;
+ union acpi_object param;
+ int value;
+
+ mutex_lock(&sdev->lock[type]);
+
+ value = gpiod_get_value_cansleep(sdev->gpio[type]);
+ if (value < 0) {
+ mutex_unlock(&sdev->lock[type]);
+ dev_err(&pdev->dev, "failed to get gpio: %d (irq=%d)\n", type, value);
+ return;
+ }
+
+ dev_dbg(&pdev->dev, "IRQ notification via DSM (irq=%d, value=%d)\n", type, value);
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = value;
+
+ result = acpi_evaluate_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
+ shps_dsm_fn_for_irq(type), &param);
+
+ if (!result) {
+ dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n",
+ type, value);
+
+ } else if (result->type != ACPI_TYPE_BUFFER) {
+ dev_err(&pdev->dev,
+ "IRQ notification via DSM failed: unexpected result type (irq=%d, gpio=%d)\n",
+ type, value);
+
+ } else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
+ dev_err(&pdev->dev,
+ "IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n",
+ type, value);
+ }
+
+ mutex_unlock(&sdev->lock[type]);
+
+ if (result)
+ ACPI_FREE(result);
+}
+
+static irqreturn_t shps_handle_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ int type;
+
+ /* Figure out which IRQ we're handling. */
+ for (type = 0; type < SHPS_NUM_IRQS; type++)
+ if (irq == sdev->irq[type])
+ break;
+
+ /* We should have found our interrupt, if not: this is a bug. */
+ if (WARN(type >= SHPS_NUM_IRQS, "invalid IRQ number: %d\n", irq))
+ return IRQ_HANDLED;
+
+ /* Forward interrupt to ACPI via DSM. */
+ shps_dsm_notify_irq(pdev, type);
+ return IRQ_HANDLED;
+}
+
+static int shps_setup_irq(struct platform_device *pdev, enum shps_irq_type type)
+{
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ struct gpio_desc *gpiod;
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ const char *irq_name;
+ const int dsm = shps_dsm_fn_for_irq(type);
+ int status, irq;
+
+ /*
+ * Only set up interrupts that we actually need: The Surface Book 3
+ * does not have a DSM for base presence, so don't set up an interrupt
+ * for that.
+ */
+ if (!acpi_check_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION, BIT(dsm))) {
+ dev_dbg(&pdev->dev, "IRQ notification via DSM not present (irq=%d)\n", type);
+ return 0;
+ }
+
+ gpiod = devm_gpiod_get(&pdev->dev, shps_gpio_names[type], GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ irq = gpiod_to_irq(gpiod);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "shps-irq-%d", type);
+ if (!irq_name)
+ return -ENOMEM;
+
+ status = devm_request_threaded_irq(&pdev->dev, irq, NULL, shps_handle_irq,
+ flags, irq_name, pdev);
+ if (status)
+ return status;
+
+ dev_dbg(&pdev->dev, "set up irq %d as type %d\n", irq, type);
+
+ sdev->gpio[type] = gpiod;
+ sdev->irq[type] = irq;
+
+ return 0;
+}
+
+static int surface_hotplug_remove(struct platform_device *pdev)
+{
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ int i;
+
+ /* Ensure that IRQs have been fully handled and won't trigger any more. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++) {
+ if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
+ disable_irq(sdev->irq[i]);
+
+ mutex_destroy(&sdev->lock[i]);
+ }
+
+ return 0;
+}
+
+static int surface_hotplug_probe(struct platform_device *pdev)
+{
+ struct shps_device *sdev;
+ int status, i;
+
+ /*
+ * The MSHW0153 device is also present on the Surface Laptop 3,
+ * however that doesn't have a hot-pluggable PCIe device. It also
+ * doesn't have any GPIO interrupts/pins under the MSHW0153, so filter
+ * it out here.
+ */
+ if (gpiod_count(&pdev->dev, NULL) < 0)
+ return -ENODEV;
+
+ status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios);
+ if (status)
+ return status;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sdev);
+
+ /*
+ * Initialize IRQs so that we can safely call surface_hotplug_remove()
+ * on errors.
+ */
+ for (i = 0; i < SHPS_NUM_IRQS; i++)
+ sdev->irq[i] = SHPS_IRQ_NOT_PRESENT;
+
+ /* Set up IRQs. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++) {
+ mutex_init(&sdev->lock[i]);
+
+ status = shps_setup_irq(pdev, i);
+ if (status) {
+ dev_err(&pdev->dev, "failed to set up IRQ %d: %d\n", i, status);
+ goto err;
+ }
+ }
+
+ /* Ensure everything is up-to-date. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++)
+ if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
+ shps_dsm_notify_irq(pdev, i);
+
+ return 0;
+
+err:
+ surface_hotplug_remove(pdev);
+ return status;
+}
+
+static const struct acpi_device_id surface_hotplug_acpi_match[] = {
+ { "MSHW0153", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_hotplug_acpi_match);
+
+static struct platform_driver surface_hotplug_driver = {
+ .probe = surface_hotplug_probe,
+ .remove = surface_hotplug_remove,
+ .driver = {
+ .name = "surface_hotplug",
+ .acpi_match_table = surface_hotplug_acpi_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_hotplug_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Hot-Plug Signaling Driver for Surface Book Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 91e6176cdfbd..ad4e630e73e2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -49,18 +49,6 @@ config WMI_BMOF
To compile this driver as a module, choose M here: the module will
be called wmi-bmof.
-config ALIENWARE_WMI
- tristate "Alienware Special feature control"
- depends on ACPI
- depends on LEDS_CLASS
- depends on NEW_LEDS
- depends on ACPI_WMI
- help
- This is a driver for controlling Alienware BIOS driven
- features. It exposes an interface for controlling the AlienFX
- zones on Alienware machines that don't contain a dedicated AlienFX
- USB MCU such as the X51 and X51-R2.
-
config HUAWEI_WMI
tristate "Huawei WMI laptop extras driver"
depends on ACPI_BATTERY
@@ -327,169 +315,7 @@ config EEEPC_WMI
If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
here.
-config DCDBAS
- tristate "Dell Systems Management Base Driver"
- depends on X86
- help
- The Dell Systems Management Base Driver provides a sysfs interface
- for systems management software to perform System Management
- Interrupts (SMIs) and Host Control Actions (system power cycle or
- power off after OS shutdown) on certain Dell systems.
-
- See <file:Documentation/driver-api/dcdbas.rst> for more details on the driver
- and the Dell systems on which Dell systems management software makes
- use of this driver.
-
- Say Y or M here to enable the driver for use by Dell systems
- management software such as Dell OpenManage.
-
-#
-# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
-# backends are selected. The "depends" line prevents a configuration
-# where DELL_SMBIOS=y while either of those dependencies =m.
-#
-config DELL_SMBIOS
- tristate "Dell SMBIOS driver"
- depends on DCDBAS || DCDBAS=n
- depends on ACPI_WMI || ACPI_WMI=n
- help
- This provides support for the Dell SMBIOS calling interface.
- If you have a Dell computer you should enable this option.
-
- Be sure to select at least one backend for it to work properly.
-
-config DELL_SMBIOS_WMI
- bool "Dell SMBIOS driver WMI backend"
- default y
- depends on ACPI_WMI
- select DELL_WMI_DESCRIPTOR
- depends on DELL_SMBIOS
- help
- This provides an implementation for the Dell SMBIOS calling interface
- communicated over ACPI-WMI.
-
- If you have a Dell computer from >2007 you should say Y here.
- If you aren't sure and this module doesn't work for your computer
- it just won't load.
-
-config DELL_SMBIOS_SMM
- bool "Dell SMBIOS driver SMM backend"
- default y
- depends on DCDBAS
- depends on DELL_SMBIOS
- help
- This provides an implementation for the Dell SMBIOS calling interface
- communicated over SMI/SMM.
-
- If you have a Dell computer from <=2017 you should say Y here.
- If you aren't sure and this module doesn't work for your computer
- it just won't load.
-
-config DELL_LAPTOP
- tristate "Dell Laptop Extras"
- depends on DMI
- depends on BACKLIGHT_CLASS_DEVICE
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on RFKILL || RFKILL = n
- depends on SERIO_I8042
- depends on DELL_SMBIOS
- select POWER_SUPPLY
- select LEDS_CLASS
- select NEW_LEDS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
- help
- This driver adds support for rfkill and backlight control to Dell
- laptops (except for some models covered by the Compal driver).
-
-config DELL_RBTN
- tristate "Dell Airplane Mode Switch driver"
- depends on ACPI
- depends on INPUT
- depends on RFKILL
- help
- Say Y here if you want to support Dell Airplane Mode Switch ACPI
- device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
- This driver register rfkill device or input hotkey device depending
- on hardware type (hw switch slider or keyboard toggle button). For
- rfkill devices it receive HW switch events and set correct hard
- rfkill state.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-rbtn.
-
-config DELL_RBU
- tristate "BIOS update support for DELL systems via sysfs"
- depends on X86
- select FW_LOADER
- select FW_LOADER_USER_HELPER
- help
- Say m if you want to have the option of updating the BIOS for your
- DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
- supporting application to communicate with the BIOS regarding the new
- image for the image update to take effect.
- See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
-
-config DELL_SMO8800
- tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
- depends on ACPI
- help
- Say Y here if you want to support SMO88XX freefall devices
- on Dell Latitude laptops.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-smo8800.
-
-config DELL_WMI
- tristate "Dell WMI notifications"
- depends on ACPI_WMI
- depends on DMI
- depends on INPUT
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on DELL_SMBIOS
- select DELL_WMI_DESCRIPTOR
- select INPUT_SPARSEKMAP
- help
- Say Y here if you want to support WMI-based hotkeys on Dell laptops.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-wmi.
-
-config DELL_WMI_SYSMAN
- tristate "Dell WMI-based Systems management driver"
- depends on ACPI_WMI
- depends on DMI
- select NLS
- help
- This driver allows changing BIOS settings on many Dell machines from
- 2018 and newer without the use of any additional software.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-wmi-sysman.
-
-config DELL_WMI_DESCRIPTOR
- tristate
- depends on ACPI_WMI
-
-config DELL_WMI_AIO
- tristate "WMI Hotkeys for Dell All-In-One series"
- depends on ACPI_WMI
- depends on INPUT
- select INPUT_SPARSEKMAP
- help
- Say Y here if you want to support WMI-based hotkeys on Dell
- All-In-One machines.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-wmi-aio.
-
-config DELL_WMI_LED
- tristate "External LED on Dell Business Netbooks"
- depends on LEDS_CLASS
- depends on ACPI_WMI
- help
- This adds support for the Latitude 2100 and similar
- notebooks that have an external LED.
+source "drivers/platform/x86/dell/Kconfig"
config AMILO_RFKILL
tristate "Fujitsu-Siemens Amilo rfkill support"
@@ -624,7 +450,10 @@ config IDEAPAD_LAPTOP
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on ACPI_WMI || ACPI_WMI = n
+ select ACPI_PLATFORM_PROFILE
select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
help
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
@@ -655,6 +484,7 @@ config THINKPAD_ACPI
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
+ select ACPI_PLATFORM_PROFILE
select HWMON
select NVRAM
select NEW_LEDS
@@ -1327,21 +1157,6 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
-config INTEL_MFLD_THERMAL
- tristate "Thermal driver for Intel Medfield platform"
- depends on MFD_INTEL_MSIC && THERMAL
- help
- Say Y here to enable thermal driver support for the Intel Medfield
- platform.
-
-config INTEL_MID_POWER_BUTTON
- tristate "power button driver for Intel MID platforms"
- depends on INTEL_SCU && INPUT
- help
- This driver handles the power button on the Intel MID platforms.
-
- If unsure, say N.
-
config INTEL_MRFLD_PWRBTN
tristate "Intel Merrifield Basin Cove power button driver"
depends on INTEL_SOC_PMIC_MRFLD
@@ -1369,7 +1184,7 @@ config INTEL_PMC_CORE
- MPHY/PLL gating status (Sunrisepoint PCH only)
config INTEL_PMT_CLASS
- tristate "Intel Platform Monitoring Technology (PMT) Class driver"
+ tristate
help
The Intel Platform Monitoring Technology (PMT) class driver provides
the basic sysfs interface and file hierarchy uses by PMT devices.
@@ -1382,6 +1197,7 @@ config INTEL_PMT_CLASS
config INTEL_PMT_TELEMETRY
tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
+ depends on MFD_INTEL_PMT
select INTEL_PMT_CLASS
help
The Intel Platform Monitory Technology (PMT) Telemetry driver provides
@@ -1393,6 +1209,7 @@ config INTEL_PMT_TELEMETRY
config INTEL_PMT_CRASHLOG
tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
+ depends on MFD_INTEL_PMT
select INTEL_PMT_CLASS
help
The Intel Platform Monitoring Technology (PMT) crashlog driver provides
@@ -1439,6 +1256,14 @@ config INTEL_SCU_PLATFORM
and SCU (sometimes called PMC as well). The driver currently
supports Intel Elkhart Lake and compatible platforms.
+config INTEL_SCU_WDT
+ bool
+ default INTEL_SCU_PCI
+ depends on INTEL_MID_WATCHDOG
+ help
+ This is a specific platform code to instantiate watchdog device
+ on ACPI-based Intel MID platforms.
+
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
depends on INTEL_SCU
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 581475f59819..60d554073749 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
-obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
@@ -37,20 +36,7 @@ obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
# Dell
-obj-$(CONFIG_DCDBAS) += dcdbas.o
-obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
-dell-smbios-objs := dell-smbios-base.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
-obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
-obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
-obj-$(CONFIG_DELL_RBU) += dell_rbu.o
-obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
-obj-$(CONFIG_DELL_WMI) += dell-wmi.o
-obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
-obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
-obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
-obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
+obj-$(CONFIG_X86_PLATFORM_DRIVERS_DELL) += dell/
# Fujitsu
obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
@@ -137,8 +123,6 @@ obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
# Intel PMIC / PMC / P-Unit devices
obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
-obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o
obj-$(CONFIG_INTEL_PMT_CLASS) += intel_pmt_class.o
@@ -148,6 +132,7 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
+obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a5357da885..85db9403cc14 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -30,7 +30,6 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
-ACPI_MODULE_NAME(KBUILD_MODNAME);
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
MODULE_LICENSE("GPL");
@@ -1605,7 +1604,8 @@ static void acer_kbd_dock_get_initial_state(void)
status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status"));
+ pr_err("Error getting keyboard-dock initial status: %s\n",
+ acpi_format_exception(status));
return;
}
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index b6aa6e5514f4..6b8b3ab8db48 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -336,7 +336,8 @@ static void acerhdf_check_param(struct thermal_zone_device *thermal)
pr_notice("interval changed to: %d\n", interval);
if (thermal)
- thermal->polling_delay = interval*1000;
+ thermal->polling_delay_jiffies =
+ round_jiffies(msecs_to_jiffies(interval * 1000));
prev_interval = interval;
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 0102bf1c7916..b9da58ee9b1e 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
@@ -210,31 +210,39 @@ static int amd_pmc_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (!rdev || !pci_match_id(pmc_pci_ids, rdev))
+ if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
+ pci_dev_put(rdev);
return -ENODEV;
+ }
dev->cpu_id = rdev->device;
err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
if (err) {
dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}
err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
- if (err)
+ if (err) {
+ pci_dev_put(rdev);
return pcibios_err_to_errno(err);
+ }
base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
if (err) {
dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}
err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
- if (err)
+ if (err) {
+ pci_dev_put(rdev);
return pcibios_err_to_errno(err);
+ }
base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
pci_dev_put(rdev);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 0edafe687fa9..bfea656e910c 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -861,7 +861,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
* The significance of others is yet to be found.
*/
rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
- if (!ACPI_FAILURE(rv))
+ if (ACPI_SUCCESS(rv))
len += sprintf(page + len, "SFUN value : %#x\n",
(uint) temp);
/*
@@ -873,7 +873,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
* takes several seconds to run on some systems.
*/
rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
- if (!ACPI_FAILURE(rv))
+ if (ACPI_SUCCESS(rv))
len += sprintf(page + len, "HWRS value : %#x\n",
(uint) temp);
/*
@@ -884,7 +884,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
* silently ignored.
*/
rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
- if (!ACPI_FAILURE(rv))
+ if (ACPI_SUCCESS(rv))
len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
if (asus->dsdt_info) {
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
new file mode 100644
index 000000000000..e0a55337f51a
--- /dev/null
+++ b/drivers/platform/x86/dell/Kconfig
@@ -0,0 +1,207 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Dell X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DRIVERS_DELL
+ bool "Dell X86 Platform Specific Device Drivers"
+ default n
+ depends on X86_PLATFORM_DEVICES
+ help
+ Say Y here to get to see options for device drivers for various
+ Dell x86 platforms, including vendor-specific laptop extension drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DRIVERS_DELL
+
+config ALIENWARE_WMI
+ tristate "Alienware Special feature control"
+ default m
+ depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on ACPI_WMI
+ help
+ This is a driver for controlling Alienware BIOS driven
+ features. It exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated AlienFX
+ USB MCU such as the X51 and X51-R2.
+
+config DCDBAS
+ tristate "Dell Systems Management Base Driver"
+ default m
+ depends on X86
+ help
+ The Dell Systems Management Base Driver provides a sysfs interface
+ for systems management software to perform System Management
+ Interrupts (SMIs) and Host Control Actions (system power cycle or
+ power off after OS shutdown) on certain Dell systems.
+
+ See <file:Documentation/driver-api/dcdbas.rst> for more details on the driver
+ and the Dell systems on which Dell systems management software makes
+ use of this driver.
+
+ Say Y or M here to enable the driver for use by Dell systems
+ management software such as Dell OpenManage.
+
+config DELL_LAPTOP
+ tristate "Dell Laptop Extras"
+ default m
+ depends on DMI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL || RFKILL = n
+ depends on SERIO_I8042
+ depends on DELL_SMBIOS
+ select POWER_SUPPLY
+ select LEDS_CLASS
+ select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ help
+ This driver adds support for rfkill and backlight control to Dell
+ laptops (except for some models covered by the Compal driver).
+
+config DELL_RBU
+ tristate "BIOS update support for DELL systems via sysfs"
+ default m
+ depends on X86
+ select FW_LOADER
+ select FW_LOADER_USER_HELPER
+ help
+ Say m if you want to have the option of updating the BIOS for your
+ DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
+ supporting application to communicate with the BIOS regarding the new
+ image for the image update to take effect.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
+
+config DELL_RBTN
+ tristate "Dell Airplane Mode Switch driver"
+ default m
+ depends on ACPI
+ depends on INPUT
+ depends on RFKILL
+ help
+ Say Y here if you want to support Dell Airplane Mode Switch ACPI
+ device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
+ This driver register rfkill device or input hotkey device depending
+ on hardware type (hw switch slider or keyboard toggle button). For
+ rfkill devices it receive HW switch events and set correct hard
+ rfkill state.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-rbtn.
+
+#
+# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
+# backends are selected. The "depends" line prevents a configuration
+# where DELL_SMBIOS=y while either of those dependencies =m.
+#
+config DELL_SMBIOS
+ tristate "Dell SMBIOS driver"
+ default m
+ depends on DCDBAS || DCDBAS=n
+ depends on ACPI_WMI || ACPI_WMI=n
+ help
+ This provides support for the Dell SMBIOS calling interface.
+ If you have a Dell computer you should enable this option.
+
+ Be sure to select at least one backend for it to work properly.
+
+config DELL_SMBIOS_WMI
+ bool "Dell SMBIOS driver WMI backend"
+ default y
+ depends on ACPI_WMI
+ select DELL_WMI_DESCRIPTOR
+ depends on DELL_SMBIOS
+ help
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over ACPI-WMI.
+
+ If you have a Dell computer from >2007 you should say Y here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMBIOS_SMM
+ bool "Dell SMBIOS driver SMM backend"
+ default y
+ depends on DCDBAS
+ depends on DELL_SMBIOS
+ help
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over SMI/SMM.
+
+ If you have a Dell computer from <=2017 you should say Y here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMO8800
+ tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
+ default m
+ depends on ACPI
+ help
+ Say Y here if you want to support SMO88XX freefall devices
+ on Dell Latitude laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-smo8800.
+
+config DELL_WMI
+ tristate "Dell WMI notifications"
+ default m
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on DELL_SMBIOS
+ select DELL_WMI_DESCRIPTOR
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on Dell laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi.
+
+config DELL_WMI_AIO
+ tristate "WMI Hotkeys for Dell All-In-One series"
+ default m
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on Dell
+ All-In-One machines.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi-aio.
+
+config DELL_WMI_DESCRIPTOR
+ tristate
+ default m
+ depends on ACPI_WMI
+
+config DELL_WMI_LED
+ tristate "External LED on Dell Business Netbooks"
+ default m
+ depends on LEDS_CLASS
+ depends on ACPI_WMI
+ help
+ This adds support for the Latitude 2100 and similar
+ notebooks that have an external LED.
+
+config DELL_WMI_SYSMAN
+ tristate "Dell WMI-based Systems management driver"
+ default m
+ depends on ACPI_WMI
+ depends on DMI
+ select NLS
+ help
+ This driver allows changing BIOS settings on many Dell machines from
+ 2018 and newer without the use of any additional software.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi-sysman.
+
+endif # X86_PLATFORM_DRIVERS_DELL
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
new file mode 100644
index 000000000000..d720a3e42ae3
--- /dev/null
+++ b/drivers/platform/x86/dell/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/dell
+# Dell x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
+obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+dell-smbios-objs := dell-smbios-base.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
+obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
+obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 5bb2859c8285..5bb2859c8285 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
diff --git a/drivers/platform/x86/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index d513a59a5d47..d513a59a5d47 100644
--- a/drivers/platform/x86/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
diff --git a/drivers/platform/x86/dcdbas.h b/drivers/platform/x86/dell/dcdbas.h
index c3cca5433525..c3cca5433525 100644
--- a/drivers/platform/x86/dcdbas.h
+++ b/drivers/platform/x86/dell/dcdbas.h
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 70edc5bb3a14..70edc5bb3a14 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
index a89fad47ff13..a89fad47ff13 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell/dell-rbtn.c
diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell/dell-rbtn.h
index 5e030f926c58..5e030f926c58 100644
--- a/drivers/platform/x86/dell-rbtn.h
+++ b/drivers/platform/x86/dell/dell-rbtn.h
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 3a1dbf199441..3a1dbf199441 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c
index 97c52a839a3e..97c52a839a3e 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell/dell-smbios-smm.c
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index 27a298b7c541..27a298b7c541 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
index 75fa8ea0476d..75fa8ea0476d 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell/dell-smbios.h
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 5d9304a7de1b..5d9304a7de1b 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell/dell-wmi-aio.c
index c7b7f1e403fb..c7b7f1e403fb 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell/dell-wmi-aio.c
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell/dell-wmi-descriptor.c
index a068900ae8a1..a068900ae8a1 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.c
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell/dell-wmi-descriptor.h
index 1f469fef1535..1f469fef1535 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.h
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.h
diff --git a/drivers/platform/x86/dell-wmi-led.c b/drivers/platform/x86/dell/dell-wmi-led.c
index 5bedaf7f0633..5bedaf7f0633 100644
--- a/drivers/platform/x86/dell-wmi-led.c
+++ b/drivers/platform/x86/dell/dell-wmi-led.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/Makefile b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
index 825fb2fbeea8..825fb2fbeea8 100644
--- a/drivers/platform/x86/dell-wmi-sysman/Makefile
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
diff --git a/drivers/platform/x86/dell-wmi-sysman/biosattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
index f95d8ddace5a..f95d8ddace5a 100644
--- a/drivers/platform/x86/dell-wmi-sysman/biosattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
index b80f2a62ea3f..b80f2a62ea3f 100644
--- a/drivers/platform/x86/dell-wmi-sysman/dell-wmi-sysman.h
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
diff --git a/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
index 80f4b7785c6c..80f4b7785c6c 100644
--- a/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
index 75aedbb733be..75aedbb733be 100644
--- a/drivers/platform/x86/dell-wmi-sysman/int-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
index 3abcd95477c0..3abcd95477c0 100644
--- a/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/passwordattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
index 5780b4d94759..5780b4d94759 100644
--- a/drivers/platform/x86/dell-wmi-sysman/passwordattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
index ac75dce88a4c..ac75dce88a4c 100644
--- a/drivers/platform/x86/dell-wmi-sysman/string-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
diff --git a/drivers/platform/x86/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index dc6dd531c996..cb81010ba1a2 100644
--- a/drivers/platform/x86/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -419,13 +419,17 @@ static int init_bios_attributes(int attr_type, const char *guid)
return retval;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj)
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
return -ENODEV;
elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
while (elements) {
/* sanity checking */
+ if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto nextobj;
+ }
if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto nextobj;
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell/dell-wmi.c
index bbdb3e860892..bbdb3e860892 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell/dell-wmi.c
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 03c3ff34bcf5..03c3ff34bcf5 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index ecd477964d11..e94e59283ecb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@@ -247,7 +251,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
ret = bios_return->return_code;
if (ret) {
- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
@@ -653,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index b457b0babde3..2cce82579d09 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[] = {
{}
};
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[] = {
+ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ * { }
+ * };
+ */
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[] = {
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 7598cd46cf60..6cb5ad4be231 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -8,33 +8,34 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
#include <linux/acpi.h>
-#include <linux/rfkill.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/input/sparse-keymap.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/i8042.h>
-#include <linux/dmi.h>
#include <linux/device.h>
-#include <acpi/video.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/rfkill.h>
+#include <linux/seq_file.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
-#define IDEAPAD_RFKILL_DEV_NUM (3)
+#include <acpi/video.h>
-#define BM_CONSERVATION_BIT (5)
-#define HA_FNLOCK_BIT (10)
+#include <dt-bindings/leds/common.h>
-#define CFG_BT_BIT (16)
-#define CFG_3G_BIT (17)
-#define CFG_WIFI_BIT (18)
-#define CFG_CAMERA_BIT (19)
+#define IDEAPAD_RFKILL_DEV_NUM 3
#if IS_ENABLED(CONFIG_ACPI_WMI)
static const char *const ideapad_wmi_fnesc_events[] = {
@@ -44,10 +45,39 @@ static const char *const ideapad_wmi_fnesc_events[] = {
#endif
enum {
- BMCMD_CONSERVATION_ON = 3,
- BMCMD_CONSERVATION_OFF = 5,
- HACMD_FNLOCK_ON = 0xe,
- HACMD_FNLOCK_OFF = 0xf,
+ CFG_CAP_BT_BIT = 16,
+ CFG_CAP_3G_BIT = 17,
+ CFG_CAP_WIFI_BIT = 18,
+ CFG_CAP_CAM_BIT = 19,
+ CFG_CAP_TOUCHPAD_BIT = 30,
+};
+
+enum {
+ GBMD_CONSERVATION_STATE_BIT = 5,
+};
+
+enum {
+ SMBC_CONSERVATION_ON = 3,
+ SMBC_CONSERVATION_OFF = 5,
+};
+
+enum {
+ HALS_KBD_BL_SUPPORT_BIT = 4,
+ HALS_KBD_BL_STATE_BIT = 5,
+ HALS_USB_CHARGING_SUPPORT_BIT = 6,
+ HALS_USB_CHARGING_STATE_BIT = 7,
+ HALS_FNLOCK_SUPPORT_BIT = 9,
+ HALS_FNLOCK_STATE_BIT = 10,
+ HALS_HOTKEYS_PRIMARY_BIT = 11,
+};
+
+enum {
+ SALS_KBD_BL_ON = 0x8,
+ SALS_KBD_BL_OFF = 0x9,
+ SALS_USB_CHARGING_ON = 0xa,
+ SALS_USB_CHARGING_OFF = 0xb,
+ SALS_FNLOCK_ON = 0xe,
+ SALS_FNLOCK_OFF = 0xf,
};
enum {
@@ -77,6 +107,13 @@ enum {
VPCCMD_W_BL_POWER = 0x33,
};
+struct ideapad_dytc_priv {
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct mutex mutex; /* protects the DYTC interface */
+ struct ideapad_private *priv;
+};
+
struct ideapad_rfk_priv {
int dev;
struct ideapad_private *priv;
@@ -89,10 +126,25 @@ struct ideapad_private {
struct platform_device *platform_device;
struct input_dev *inputdev;
struct backlight_device *blightdev;
+ struct ideapad_dytc_priv *dytc;
struct dentry *debug;
unsigned long cfg;
- bool has_hw_rfkill_switch;
const char *fnesc_guid;
+ struct {
+ bool conservation_mode : 1;
+ bool dytc : 1;
+ bool fan_mode : 1;
+ bool fn_lock : 1;
+ bool hw_rfkill_switch : 1;
+ bool kbd_bl : 1;
+ bool touchpad_ctrl_via_ec : 1;
+ bool usb_charging : 1;
+ } features;
+ struct {
+ bool initialized;
+ struct led_classdev led;
+ unsigned int last_brightness;
+ } kbd_bl;
};
static bool no_bt_rfkill;
@@ -102,64 +154,82 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
/*
* ACPI Helpers
*/
-#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
+#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
-static int read_method_int(acpi_handle handle, const char *method, int *val)
+static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
{
- acpi_status status;
unsigned long long result;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, (char *)name, NULL, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *res = result;
- status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
- if (ACPI_FAILURE(status)) {
- *val = -1;
- return -1;
- }
- *val = result;
return 0;
+}
+
+static int exec_simple_method(acpi_handle handle, const char *name, unsigned long arg)
+{
+ acpi_status status = acpi_execute_simple_method(handle, (char *)name, arg);
+ return ACPI_FAILURE(status) ? -EIO : 0;
}
-static int method_gbmd(acpi_handle handle, unsigned long *ret)
+static int eval_gbmd(acpi_handle handle, unsigned long *res)
{
- int result, val;
+ return eval_int(handle, "GBMD", res);
+}
- result = read_method_int(handle, "GBMD", &val);
- *ret = val;
- return result;
+static int exec_smbc(acpi_handle handle, unsigned long arg)
+{
+ return exec_simple_method(handle, "SMBC", arg);
}
-static int method_int1(acpi_handle handle, char *method, int cmd)
+static int eval_hals(acpi_handle handle, unsigned long *res)
{
- acpi_status status;
+ return eval_int(handle, "HALS", res);
+}
- status = acpi_execute_simple_method(handle, method, cmd);
- return ACPI_FAILURE(status) ? -1 : 0;
+static int exec_sals(acpi_handle handle, unsigned long arg)
+{
+ return exec_simple_method(handle, "SALS", arg);
}
-static int method_vpcr(acpi_handle handle, int cmd, int *ret)
+static int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, unsigned long *res)
{
- acpi_status status;
- unsigned long long result;
struct acpi_object_list params;
+ unsigned long long result;
union acpi_object in_obj;
+ acpi_status status;
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = cmd;
+ in_obj.integer.value = arg;
+
+ status = acpi_evaluate_integer(handle, (char *)name, &params, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
- status = acpi_evaluate_integer(handle, "VPCR", &params, &result);
+ if (res)
+ *res = result;
- if (ACPI_FAILURE(status)) {
- *ret = -1;
- return -1;
- }
- *ret = result;
return 0;
+}
+static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "DYTC", cmd, res);
}
-static int method_vpcw(acpi_handle handle, int cmd, int data)
+static int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "VPCR", cmd, res);
+}
+
+static int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data)
{
struct acpi_object_list params;
union acpi_object in_obj[2];
@@ -173,55 +243,68 @@ static int method_vpcw(acpi_handle handle, int cmd, int data)
in_obj[1].integer.value = data;
status = acpi_evaluate_object(handle, "VPCW", &params, NULL);
- if (status != AE_OK)
- return -1;
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
return 0;
}
-static int read_ec_data(acpi_handle handle, int cmd, unsigned long *data)
+static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data)
{
- int val;
- unsigned long int end_jiffies;
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
- if (method_vpcw(handle, 1, cmd))
- return -1;
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
- for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1;
- time_before(jiffies, end_jiffies);) {
+ while (time_before(jiffies, end_jiffies)) {
schedule();
- if (method_vpcr(handle, 1, &val))
- return -1;
- if (val == 0) {
- if (method_vpcr(handle, 0, &val))
- return -1;
- *data = val;
- return 0;
- }
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
+ if (val == 0)
+ return eval_vpcr(handle, 0, data);
}
- pr_err("timeout in %s\n", __func__);
- return -1;
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
}
-static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
+static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data)
{
- int val;
- unsigned long int end_jiffies;
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 0, data);
+ if (err)
+ return err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
- if (method_vpcw(handle, 0, data))
- return -1;
- if (method_vpcw(handle, 1, cmd))
- return -1;
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
- for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1;
- time_before(jiffies, end_jiffies);) {
+ while (time_before(jiffies, end_jiffies)) {
schedule();
- if (method_vpcr(handle, 1, &val))
- return -1;
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
if (val == 0)
return 0;
}
- pr_err("timeout in %s\n", __func__);
- return -1;
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
}
/*
@@ -232,44 +315,37 @@ static int debugfs_status_show(struct seq_file *s, void *data)
struct ideapad_private *priv = s->private;
unsigned long value;
- if (!priv)
- return -EINVAL;
-
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
- seq_printf(s, "Backlight max:\t%lu\n", value);
+ seq_printf(s, "Backlight max: %lu\n", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
- seq_printf(s, "Backlight now:\t%lu\n", value);
+ seq_printf(s, "Backlight now: %lu\n", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
- seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
- seq_printf(s, "=====================\n");
+ seq_printf(s, "BL power value: %s (%lu)\n", value ? "on" : "off", value);
+
+ seq_puts(s, "=====================\n");
if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
- seq_printf(s, "Radio status:\t%s(%lu)\n",
- value ? "On" : "Off", value);
+ seq_printf(s, "Radio status: %s (%lu)\n", value ? "on" : "off", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
- seq_printf(s, "Wifi status:\t%s(%lu)\n",
- value ? "On" : "Off", value);
+ seq_printf(s, "Wifi status: %s (%lu)\n", value ? "on" : "off", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
- seq_printf(s, "BT status:\t%s(%lu)\n",
- value ? "On" : "Off", value);
+ seq_printf(s, "BT status: %s (%lu)\n", value ? "on" : "off", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
- seq_printf(s, "3G status:\t%s(%lu)\n",
- value ? "On" : "Off", value);
- seq_printf(s, "=====================\n");
+ seq_printf(s, "3G status: %s (%lu)\n", value ? "on" : "off", value);
+
+ seq_puts(s, "=====================\n");
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
- seq_printf(s, "Touchpad status:%s(%lu)\n",
- value ? "On" : "Off", value);
+ seq_printf(s, "Touchpad status: %s (%lu)\n", value ? "on" : "off", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
- seq_printf(s, "Camera status:\t%s(%lu)\n",
- value ? "On" : "Off", value);
+ seq_printf(s, "Camera status: %s (%lu)\n", value ? "on" : "off", value);
+
seq_puts(s, "=====================\n");
- if (!method_gbmd(priv->adev->handle, &value)) {
- seq_printf(s, "Conservation mode:\t%s(%lu)\n",
- test_bit(BM_CONSERVATION_BIT, &value) ? "On" : "Off",
- value);
- }
+ if (!eval_gbmd(priv->adev->handle, &value))
+ seq_printf(s, "GBMD: %#010lx\n", value);
+ if (!eval_hals(priv->adev->handle, &value))
+ seq_printf(s, "HALS: %#010lx\n", value);
return 0;
}
@@ -279,39 +355,41 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
{
struct ideapad_private *priv = s->private;
- if (!priv) {
- seq_printf(s, "cfg: N/A\n");
- } else {
- seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
- priv->cfg);
- if (test_bit(CFG_BT_BIT, &priv->cfg))
- seq_printf(s, "Bluetooth ");
- if (test_bit(CFG_3G_BIT, &priv->cfg))
- seq_printf(s, "3G ");
- if (test_bit(CFG_WIFI_BIT, &priv->cfg))
- seq_printf(s, "Wireless ");
- if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
- seq_printf(s, "Camera ");
- seq_printf(s, "\nGraphic: ");
- switch ((priv->cfg)&0x700) {
- case 0x100:
- seq_printf(s, "Intel");
- break;
- case 0x200:
- seq_printf(s, "ATI");
- break;
- case 0x300:
- seq_printf(s, "Nvidia");
- break;
- case 0x400:
- seq_printf(s, "Intel and ATI");
- break;
- case 0x500:
- seq_printf(s, "Intel and Nvidia");
- break;
- }
- seq_printf(s, "\n");
+ seq_printf(s, "_CFG: %#010lx\n\n", priv->cfg);
+
+ seq_puts(s, "Capabilities:");
+ if (test_bit(CFG_CAP_BT_BIT, &priv->cfg))
+ seq_puts(s, " bluetooth");
+ if (test_bit(CFG_CAP_3G_BIT, &priv->cfg))
+ seq_puts(s, " 3G");
+ if (test_bit(CFG_CAP_WIFI_BIT, &priv->cfg))
+ seq_puts(s, " wifi");
+ if (test_bit(CFG_CAP_CAM_BIT, &priv->cfg))
+ seq_puts(s, " camera");
+ if (test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg))
+ seq_puts(s, " touchpad");
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Graphics: ");
+ switch (priv->cfg & 0x700) {
+ case 0x100:
+ seq_puts(s, "Intel");
+ break;
+ case 0x200:
+ seq_puts(s, "ATI");
+ break;
+ case 0x300:
+ seq_puts(s, "Nvidia");
+ break;
+ case 0x400:
+ seq_puts(s, "Intel and ATI");
+ break;
+ case 0x500:
+ seq_puts(s, "Intel and Nvidia");
+ break;
}
+ seq_puts(s, "\n");
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(debugfs_cfg);
@@ -323,8 +401,8 @@ static void ideapad_debugfs_init(struct ideapad_private *priv)
dir = debugfs_create_dir("ideapad", NULL);
priv->debug = dir;
- debugfs_create_file("cfg", S_IRUGO, dir, priv, &debugfs_cfg_fops);
- debugfs_create_file("status", S_IRUGO, dir, priv, &debugfs_status_fops);
+ debugfs_create_file("cfg", 0444, dir, priv, &debugfs_cfg_fops);
+ debugfs_create_file("status", 0444, dir, priv, &debugfs_status_fops);
}
static void ideapad_debugfs_exit(struct ideapad_private *priv)
@@ -336,207 +414,256 @@ static void ideapad_debugfs_exit(struct ideapad_private *priv)
/*
* sysfs
*/
-static ssize_t show_ideapad_cam(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t camera_power_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- unsigned long result;
struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
- return sprintf(buf, "-1\n");
- return sprintf(buf, "%lu\n", result);
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!result);
}
-static ssize_t store_ideapad_cam(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t camera_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int ret, state;
struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
+ if (err)
+ return err;
- if (!count)
- return 0;
- if (sscanf(buf, "%i", &state) != 1)
- return -EINVAL;
- ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
- if (ret < 0)
- return -EIO;
return count;
}
-static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+static DEVICE_ATTR_RW(camera_power);
-static ssize_t show_ideapad_fan(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t conservation_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- unsigned long result;
struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
+
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
- return sprintf(buf, "-1\n");
- return sprintf(buf, "%lu\n", result);
+ return sysfs_emit(buf, "%d\n", !!test_bit(GBMD_CONSERVATION_STATE_BIT, &result));
}
-static ssize_t store_ideapad_fan(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t conservation_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int ret, state;
struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
+ if (err)
+ return err;
- if (!count)
- return 0;
- if (sscanf(buf, "%i", &state) != 1)
- return -EINVAL;
- if (state < 0 || state > 4 || state == 3)
- return -EINVAL;
- ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
- if (ret < 0)
- return -EIO;
return count;
}
-static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan);
+static DEVICE_ATTR_RW(conservation_mode);
-static ssize_t touchpad_show(struct device *dev,
+static ssize_t fan_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
unsigned long result;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result);
+ if (err)
+ return err;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result))
- return sprintf(buf, "-1\n");
- return sprintf(buf, "%lu\n", result);
+ return sysfs_emit(buf, "%lu\n", result);
}
-/* Switch to RO for now: It might be revisited in the future */
-static ssize_t __maybe_unused touchpad_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
- bool state;
- int ret;
+ unsigned int state;
+ int err;
- ret = kstrtobool(buf, &state);
- if (ret)
- return ret;
+ err = kstrtouint(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state > 4 || state == 3)
+ return -EINVAL;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
+ if (err)
+ return err;
- ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
- if (ret < 0)
- return -EIO;
return count;
}
-static DEVICE_ATTR_RO(touchpad);
+static DEVICE_ATTR_RW(fan_mode);
-static ssize_t conservation_mode_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
- unsigned long result;
+ unsigned long hals;
+ int err;
- if (method_gbmd(priv->adev->handle, &result))
- return sprintf(buf, "-1\n");
- return sprintf(buf, "%u\n", test_bit(BM_CONSERVATION_BIT, &result));
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!test_bit(HALS_FNLOCK_STATE_BIT, &hals));
}
-static ssize_t conservation_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
bool state;
- int ret;
+ int err;
- ret = kstrtobool(buf, &state);
- if (ret)
- return ret;
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ if (err)
+ return err;
- ret = method_int1(priv->adev->handle, "SBMC", state ?
- BMCMD_CONSERVATION_ON :
- BMCMD_CONSERVATION_OFF);
- if (ret < 0)
- return -EIO;
return count;
}
-static DEVICE_ATTR_RW(conservation_mode);
+static DEVICE_ATTR_RW(fn_lock);
-static ssize_t fn_lock_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t touchpad_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
unsigned long result;
- int hals;
- int fail = read_method_int(priv->adev->handle, "HALS", &hals);
+ int err;
- if (fail)
- return sprintf(buf, "-1\n");
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result);
+ if (err)
+ return err;
- result = hals;
- return sprintf(buf, "%u\n", test_bit(HA_FNLOCK_BIT, &result));
+ return sysfs_emit(buf, "%d\n", !!result);
}
-static ssize_t fn_lock_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
bool state;
- int ret;
+ int err;
- ret = kstrtobool(buf, &state);
- if (ret)
- return ret;
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
+ if (err)
+ return err;
- ret = method_int1(priv->adev->handle, "SALS", state ?
- HACMD_FNLOCK_ON :
- HACMD_FNLOCK_OFF);
- if (ret < 0)
- return -EIO;
return count;
}
-static DEVICE_ATTR_RW(fn_lock);
+static DEVICE_ATTR_RW(touchpad);
+
+static ssize_t usb_charging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long hals;
+ int err;
+
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!test_bit(HALS_USB_CHARGING_STATE_BIT, &hals));
+}
+static ssize_t usb_charging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_sals(priv->adev->handle, state ? SALS_USB_CHARGING_ON : SALS_USB_CHARGING_OFF);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charging);
static struct attribute *ideapad_attributes[] = {
&dev_attr_camera_power.attr,
- &dev_attr_fan_mode.attr,
- &dev_attr_touchpad.attr,
&dev_attr_conservation_mode.attr,
+ &dev_attr_fan_mode.attr,
&dev_attr_fn_lock.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_usb_charging.attr,
NULL
};
static umode_t ideapad_is_visible(struct kobject *kobj,
- struct attribute *attr,
- int idx)
+ struct attribute *attr,
+ int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct ideapad_private *priv = dev_get_drvdata(dev);
- bool supported;
+ bool supported = true;
if (attr == &dev_attr_camera_power.attr)
- supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
- else if (attr == &dev_attr_fan_mode.attr) {
- unsigned long value;
- supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
- &value);
- } else if (attr == &dev_attr_conservation_mode.attr) {
- supported = acpi_has_method(priv->adev->handle, "GBMD") &&
- acpi_has_method(priv->adev->handle, "SBMC");
- } else if (attr == &dev_attr_fn_lock.attr) {
- supported = acpi_has_method(priv->adev->handle, "HALS") &&
- acpi_has_method(priv->adev->handle, "SALS");
- } else
- supported = true;
+ supported = test_bit(CFG_CAP_CAM_BIT, &priv->cfg);
+ else if (attr == &dev_attr_conservation_mode.attr)
+ supported = priv->features.conservation_mode;
+ else if (attr == &dev_attr_fan_mode.attr)
+ supported = priv->features.fan_mode;
+ else if (attr == &dev_attr_fn_lock.attr)
+ supported = priv->features.fn_lock;
+ else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->features.touchpad_ctrl_via_ec &&
+ test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg);
+ else if (attr == &dev_attr_usb_charging.attr)
+ supported = priv->features.usb_charging;
return supported ? attr->mode : 0;
}
@@ -547,6 +674,265 @@ static const struct attribute_group ideapad_attribute_group = {
};
/*
+ * DYTC Platform profile
+ */
+#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */
+#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_GET 2 /* To get current IC function and mode */
+#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+
+#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */
+
+#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
+#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
+
+#define DYTC_SET_FUNCTION_BIT 12 /* Bits 12-15 - function setting */
+#define DYTC_SET_MODE_BIT 16 /* Bits 16-19 - mode setting */
+#define DYTC_SET_VALID_BIT 20 /* Bit 20 - 1 = on, 0 = off */
+
+#define DYTC_FUNCTION_STD 0 /* Function = 0, standard mode */
+#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
+#define DYTC_FUNCTION_MMC 11 /* Function = 11, desk mode */
+
+#define DYTC_MODE_PERFORM 2 /* High power mode aka performance */
+#define DYTC_MODE_LOW_POWER 3 /* Low power mode aka quiet */
+#define DYTC_MODE_BALANCE 0xF /* Default mode aka balanced */
+
+#define DYTC_SET_COMMAND(function, mode, on) \
+ (DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
+ (mode) << DYTC_SET_MODE_BIT | \
+ (on) << DYTC_SET_VALID_BIT)
+
+#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 0)
+
+#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 1)
+
+static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+{
+ switch (dytcmode) {
+ case DYTC_MODE_LOW_POWER:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case DYTC_MODE_BALANCE:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DYTC_MODE_PERFORM:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ default: /* Unknown mode */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int convert_profile_to_dytc(enum platform_profile_option profile, int *perfmode)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ *perfmode = DYTC_MODE_LOW_POWER;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ *perfmode = DYTC_MODE_BALANCE;
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ *perfmode = DYTC_MODE_PERFORM;
+ break;
+ default: /* Unknown profile */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * dytc_profile_get: Function to register with platform_profile
+ * handler. Returns current platform profile.
+ */
+static int dytc_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+
+ *profile = dytc->current_profile;
+ return 0;
+}
+
+/*
+ * Helper function - check if we are in CQL mode and if we are
+ * - disable CQL,
+ * - run the command
+ * - enable CQL
+ * If not in CQL mode, just run the command
+ */
+static int dytc_cql_command(struct ideapad_private *priv, unsigned long cmd,
+ unsigned long *output)
+{
+ int err, cmd_err, cur_funcmode;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_GET, output);
+ if (err)
+ return err;
+
+ cur_funcmode = (*output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ /* Check if we're OK to return immediately */
+ if (cmd == DYTC_CMD_GET && cur_funcmode != DYTC_FUNCTION_CQL)
+ return 0;
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = eval_dytc(priv->adev->handle, DYTC_DISABLE_CQL, NULL);
+ if (err)
+ return err;
+ }
+
+ cmd_err = eval_dytc(priv->adev->handle, cmd, output);
+ /* Check return condition after we've restored CQL state */
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = eval_dytc(priv->adev->handle, DYTC_ENABLE_CQL, NULL);
+ if (err)
+ return err;
+ }
+
+ return cmd_err;
+}
+
+/*
+ * dytc_profile_set: Function to register with platform_profile
+ * handler. Sets current platform profile.
+ */
+static int dytc_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_private *priv = dytc->priv;
+ int err;
+
+ err = mutex_lock_interruptible(&dytc->mutex);
+ if (err)
+ return err;
+
+ if (profile == PLATFORM_PROFILE_BALANCED) {
+ /* To get back to balanced mode we just issue a reset command */
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_RESET, NULL);
+ if (err)
+ goto unlock;
+ } else {
+ int perfmode;
+
+ err = convert_profile_to_dytc(profile, &perfmode);
+ if (err)
+ goto unlock;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
+ NULL);
+ if (err)
+ goto unlock;
+ }
+
+ /* Success - update current profile */
+ dytc->current_profile = profile;
+
+unlock:
+ mutex_unlock(&dytc->mutex);
+
+ return err;
+}
+
+static void dytc_profile_refresh(struct ideapad_private *priv)
+{
+ enum platform_profile_option profile;
+ unsigned long output;
+ int err, perfmode;
+
+ mutex_lock(&priv->dytc->mutex);
+ err = dytc_cql_command(priv, DYTC_CMD_GET, &output);
+ mutex_unlock(&priv->dytc->mutex);
+ if (err)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+
+ if (convert_dytc_to_profile(perfmode, &profile))
+ return;
+
+ if (profile != priv->dytc->current_profile) {
+ priv->dytc->current_profile = profile;
+ platform_profile_notify();
+ }
+}
+
+static int ideapad_dytc_profile_init(struct ideapad_private *priv)
+{
+ int err, dytc_version;
+ unsigned long output;
+
+ if (!priv->features.dytc)
+ return -ENODEV;
+
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_QUERY, &output);
+ /* For all other errors we can flag the failure */
+ if (err)
+ return err;
+
+ /* Check DYTC is enabled and supports mode setting */
+ if (!test_bit(DYTC_QUERY_ENABLE_BIT, &output))
+ return -ENODEV;
+
+ dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ if (dytc_version < 5)
+ return -ENODEV;
+
+ priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
+ if (!priv->dytc)
+ return -ENOMEM;
+
+ mutex_init(&priv->dytc->mutex);
+
+ priv->dytc->priv = priv;
+ priv->dytc->pprof.profile_get = dytc_profile_get;
+ priv->dytc->pprof.profile_set = dytc_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, priv->dytc->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, priv->dytc->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->dytc->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&priv->dytc->pprof);
+ if (err)
+ goto pp_reg_failed;
+
+ /* Ensure initial values are correct */
+ dytc_profile_refresh(priv);
+
+ return 0;
+
+pp_reg_failed:
+ mutex_destroy(&priv->dytc->mutex);
+ kfree(priv->dytc);
+ priv->dytc = NULL;
+
+ return err;
+}
+
+static void ideapad_dytc_profile_exit(struct ideapad_private *priv)
+{
+ if (!priv->dytc)
+ return;
+
+ platform_profile_remove();
+ mutex_destroy(&priv->dytc->mutex);
+ kfree(priv->dytc);
+
+ priv->dytc = NULL;
+}
+
+/*
* Rfkill
*/
struct ideapad_rfk_data {
@@ -557,9 +943,9 @@ struct ideapad_rfk_data {
};
static const struct ideapad_rfk_data ideapad_rfk_data[] = {
- { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
+ { "ideapad_wlan", CFG_CAP_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_CAP_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_CAP_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
};
static int ideapad_rfk_set(void *data, bool blocked)
@@ -579,7 +965,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
unsigned long hw_blocked = 0;
int i;
- if (priv->has_hw_rfkill_switch) {
+ if (priv->features.hw_rfkill_switch) {
if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -592,16 +978,15 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
{
- int ret;
- unsigned long sw_blocked;
+ unsigned long rf_enabled;
+ int err;
- if (no_bt_rfkill &&
- (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
+ if (no_bt_rfkill && ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH) {
/* Force to enable bluetooth when no_bt_rfkill=1 */
- write_ec_cmd(priv->adev->handle,
- ideapad_rfk_data[dev].opcode, 1);
+ write_ec_cmd(priv->adev->handle, ideapad_rfk_data[dev].opcode, 1);
return 0;
}
+
priv->rfk_priv[dev].dev = dev;
priv->rfk_priv[dev].priv = priv;
@@ -613,20 +998,17 @@ static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
if (!priv->rfk[dev])
return -ENOMEM;
- if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
- &sw_blocked)) {
- rfkill_init_sw_state(priv->rfk[dev], 0);
- } else {
- sw_blocked = !sw_blocked;
- rfkill_init_sw_state(priv->rfk[dev], sw_blocked);
- }
+ err = read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode - 1, &rf_enabled);
+ if (err)
+ rf_enabled = 1;
- ret = rfkill_register(priv->rfk[dev]);
- if (ret) {
+ rfkill_init_sw_state(priv->rfk[dev], !rf_enabled);
+
+ err = rfkill_register(priv->rfk[dev]);
+ if (err)
rfkill_destroy(priv->rfk[dev]);
- return ret;
- }
- return 0;
+
+ return err;
}
static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
@@ -643,40 +1025,39 @@ static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
*/
static int ideapad_sysfs_init(struct ideapad_private *priv)
{
- return sysfs_create_group(&priv->platform_device->dev.kobj,
- &ideapad_attribute_group);
+ return device_add_group(&priv->platform_device->dev,
+ &ideapad_attribute_group);
}
static void ideapad_sysfs_exit(struct ideapad_private *priv)
{
- sysfs_remove_group(&priv->platform_device->dev.kobj,
- &ideapad_attribute_group);
+ device_remove_group(&priv->platform_device->dev,
+ &ideapad_attribute_group);
}
/*
* input device
*/
static const struct key_entry ideapad_keymap[] = {
- { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 7, { KEY_CAMERA } },
- { KE_KEY, 8, { KEY_MICMUTE } },
- { KE_KEY, 11, { KEY_F16 } },
- { KE_KEY, 13, { KEY_WLAN } },
- { KE_KEY, 16, { KEY_PROG1 } },
- { KE_KEY, 17, { KEY_PROG2 } },
- { KE_KEY, 64, { KEY_PROG3 } },
- { KE_KEY, 65, { KEY_PROG4 } },
- { KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
- { KE_KEY, 67, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 8, { KEY_MICMUTE } },
+ { KE_KEY, 11, { KEY_F16 } },
+ { KE_KEY, 13, { KEY_WLAN } },
+ { KE_KEY, 16, { KEY_PROG1 } },
+ { KE_KEY, 17, { KEY_PROG2 } },
+ { KE_KEY, 64, { KEY_PROG3 } },
+ { KE_KEY, 65, { KEY_PROG4 } },
+ { KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 67, { KEY_TOUCHPAD_ON } },
{ KE_KEY, 128, { KEY_ESC } },
-
- { KE_END, 0 },
+ { KE_END },
};
static int ideapad_input_init(struct ideapad_private *priv)
{
struct input_dev *inputdev;
- int error;
+ int err;
inputdev = input_allocate_device();
if (!inputdev)
@@ -687,24 +1068,28 @@ static int ideapad_input_init(struct ideapad_private *priv)
inputdev->id.bustype = BUS_HOST;
inputdev->dev.parent = &priv->platform_device->dev;
- error = sparse_keymap_setup(inputdev, ideapad_keymap, NULL);
- if (error) {
- pr_err("Unable to setup input device keymap\n");
+ err = sparse_keymap_setup(inputdev, ideapad_keymap, NULL);
+ if (err) {
+ dev_err(&priv->platform_device->dev,
+ "Could not set up input device keymap: %d\n", err);
goto err_free_dev;
}
- error = input_register_device(inputdev);
- if (error) {
- pr_err("Unable to register input device\n");
+ err = input_register_device(inputdev);
+ if (err) {
+ dev_err(&priv->platform_device->dev,
+ "Could not register input device: %d\n", err);
goto err_free_dev;
}
priv->inputdev = inputdev;
+
return 0;
err_free_dev:
input_free_device(inputdev);
- return error;
+
+ return err;
}
static void ideapad_input_exit(struct ideapad_private *priv)
@@ -725,6 +1110,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
return;
+
if (long_pressed)
ideapad_input_report(priv, 17);
else
@@ -735,24 +1121,24 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
{
unsigned long bit, value;
- read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
-
- for (bit = 0; bit < 16; bit++) {
- if (test_bit(bit, &value)) {
- switch (bit) {
- case 0: /* Z580 */
- case 6: /* Z570 */
- /* Thermal Management button */
- ideapad_input_report(priv, 65);
- break;
- case 1:
- /* OneKey Theater button */
- ideapad_input_report(priv, 64);
- break;
- default:
- pr_info("Unknown special button: %lu\n", bit);
- break;
- }
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value))
+ return;
+
+ for_each_set_bit (bit, &value, 16) {
+ switch (bit) {
+ case 6: /* Z570 */
+ case 0: /* Z580 */
+ /* Thermal Management button */
+ ideapad_input_report(priv, 65);
+ break;
+ case 1:
+ /* OneKey Theater button */
+ ideapad_input_report(priv, 64);
+ break;
+ default:
+ dev_info(&priv->platform_device->dev,
+ "Unknown special button: %lu\n", bit);
+ break;
}
}
}
@@ -764,28 +1150,29 @@ static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
{
struct ideapad_private *priv = bl_get_data(blightdev);
unsigned long now;
+ int err;
- if (!priv)
- return -EINVAL;
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ if (err)
+ return err;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
- return -EIO;
return now;
}
static int ideapad_backlight_update_status(struct backlight_device *blightdev)
{
struct ideapad_private *priv = bl_get_data(blightdev);
+ int err;
- if (!priv)
- return -EINVAL;
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
+ blightdev->props.brightness);
+ if (err)
+ return err;
- if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
- blightdev->props.brightness))
- return -EIO;
- if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
- blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
- return -EIO;
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
+ blightdev->props.power != FB_BLANK_POWERDOWN);
+ if (err)
+ return err;
return 0;
}
@@ -800,30 +1187,41 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
struct backlight_device *blightdev;
struct backlight_properties props;
unsigned long max, now, power;
+ int err;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
- return -EIO;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
- return -EIO;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
- return -EIO;
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max);
+ if (err)
+ return err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ if (err)
+ return err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power);
+ if (err)
+ return err;
+
+ memset(&props, 0, sizeof(props));
- memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = max;
props.type = BACKLIGHT_PLATFORM;
+
blightdev = backlight_device_register("ideapad",
&priv->platform_device->dev,
priv,
&ideapad_backlight_ops,
&props);
if (IS_ERR(blightdev)) {
- pr_err("Could not register backlight device\n");
- return PTR_ERR(blightdev);
+ err = PTR_ERR(blightdev);
+ dev_err(&priv->platform_device->dev,
+ "Could not register backlight device: %d\n", err);
+ return err;
}
priv->blightdev = blightdev;
blightdev->props.brightness = now;
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+
backlight_update_status(blightdev);
return 0;
@@ -837,13 +1235,15 @@ static void ideapad_backlight_exit(struct ideapad_private *priv)
static void ideapad_backlight_notify_power(struct ideapad_private *priv)
{
- unsigned long power;
struct backlight_device *blightdev = priv->blightdev;
+ unsigned long power;
if (!blightdev)
return;
+
if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
+
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
@@ -852,12 +1252,112 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
unsigned long now;
/* if we control brightness via acpi video driver */
- if (priv->blightdev == NULL) {
+ if (!priv->blightdev)
read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ else
+ backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+}
+
+/*
+ * keyboard backlight
+ */
+static int ideapad_kbd_bl_brightness_get(struct ideapad_private *priv)
+{
+ unsigned long hals;
+ int err;
+
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return !!test_bit(HALS_KBD_BL_STATE_BIT, &hals);
+}
+
+static enum led_brightness ideapad_kbd_bl_led_cdev_brightness_get(struct led_classdev *led_cdev)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, kbd_bl.led);
+
+ return ideapad_kbd_bl_brightness_get(priv);
+}
+
+static int ideapad_kbd_bl_brightness_set(struct ideapad_private *priv, unsigned int brightness)
+{
+ int err = exec_sals(priv->adev->handle, brightness ? SALS_KBD_BL_ON : SALS_KBD_BL_OFF);
+
+ if (err)
+ return err;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ return 0;
+}
+
+static int ideapad_kbd_bl_led_cdev_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, kbd_bl.led);
+
+ return ideapad_kbd_bl_brightness_set(priv, brightness);
+}
+
+static void ideapad_kbd_bl_notify(struct ideapad_private *priv)
+{
+ int brightness;
+
+ if (!priv->kbd_bl.initialized)
+ return;
+
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
return;
- }
- backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+ if (brightness == priv->kbd_bl.last_brightness)
+ return;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ led_classdev_notify_brightness_hw_changed(&priv->kbd_bl.led, brightness);
+}
+
+static int ideapad_kbd_bl_init(struct ideapad_private *priv)
+{
+ int brightness, err;
+
+ if (!priv->features.kbd_bl)
+ return -ENODEV;
+
+ if (WARN_ON(priv->kbd_bl.initialized))
+ return -EEXIST;
+
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
+ priv->kbd_bl.led.max_brightness = 1;
+ priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
+ priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
+ priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED;
+
+ err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led);
+ if (err)
+ return err;
+
+ priv->kbd_bl.initialized = true;
+
+ return 0;
+}
+
+static void ideapad_kbd_bl_exit(struct ideapad_private *priv)
+{
+ if (!priv->kbd_bl.initialized)
+ return;
+
+ priv->kbd_bl.initialized = false;
+
+ led_classdev_unregister(&priv->kbd_bl.led);
}
/*
@@ -867,68 +1367,77 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
unsigned long value;
+ if (!priv->features.touchpad_ctrl_via_ec)
+ return;
+
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
- /* Some IdeaPads don't really turn off touchpad - they only
+ unsigned char param;
+ /*
+ * Some IdeaPads don't really turn off touchpad - they only
* switch the LED state. We (de)activate KBC AUX port to turn
* touchpad off and on. We send KEY_TOUCHPAD_OFF and
- * KEY_TOUCHPAD_ON to not to get out of sync with LED */
- unsigned char param;
- i8042_command(&param, value ? I8042_CMD_AUX_ENABLE :
- I8042_CMD_AUX_DISABLE);
+ * KEY_TOUCHPAD_ON to not to get out of sync with LED
+ */
+ i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
ideapad_input_report(priv, value ? 67 : 66);
+ sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
}
}
static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ideapad_private *priv = data;
- unsigned long vpc1, vpc2, vpc_bit;
+ unsigned long vpc1, vpc2, bit;
if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
return;
+
if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
return;
vpc1 = (vpc2 << 8) | vpc1;
- for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
- if (test_bit(vpc_bit, &vpc1)) {
- switch (vpc_bit) {
- case 9:
- ideapad_sync_rfk_state(priv);
- break;
- case 13:
- case 11:
- case 8:
- case 7:
- case 6:
- ideapad_input_report(priv, vpc_bit);
- break;
- case 5:
- ideapad_sync_touchpad_state(priv);
- break;
- case 4:
- ideapad_backlight_notify_brightness(priv);
- break;
- case 3:
- ideapad_input_novokey(priv);
- break;
- case 2:
- ideapad_backlight_notify_power(priv);
- break;
- case 0:
- ideapad_check_special_buttons(priv);
- break;
- case 1:
- /* Some IdeaPads report event 1 every ~20
- * seconds while on battery power; some
- * report this when changing to/from tablet
- * mode. Squelch this event.
- */
- break;
- default:
- pr_info("Unknown event: %lu\n", vpc_bit);
- }
+
+ for_each_set_bit (bit, &vpc1, 16) {
+ switch (bit) {
+ case 13:
+ case 11:
+ case 8:
+ case 7:
+ case 6:
+ ideapad_input_report(priv, bit);
+ break;
+ case 9:
+ ideapad_sync_rfk_state(priv);
+ break;
+ case 5:
+ ideapad_sync_touchpad_state(priv);
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ case 1:
+ /*
+ * Some IdeaPads report event 1 every ~20
+ * seconds while on battery power; some
+ * report this when changing to/from tablet
+ * mode; some report this when the keyboard
+ * backlight has changed.
+ */
+ ideapad_kbd_bl_notify(priv);
+ break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
+ default:
+ dev_info(&priv->platform_device->dev,
+ "Unknown event: %lu\n", bit);
}
}
}
@@ -936,12 +1445,15 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
#if IS_ENABLED(CONFIG_ACPI_WMI)
static void ideapad_wmi_notify(u32 value, void *context)
{
+ struct ideapad_private *priv = context;
+
switch (value) {
case 128:
- ideapad_input_report(context, value);
+ ideapad_input_report(priv, value);
break;
default:
- pr_info("Unknown WMI event %u\n", value);
+ dev_info(&priv->platform_device->dev,
+ "Unknown WMI event: %u\n", value);
}
}
#endif
@@ -965,18 +1477,52 @@ static const struct dmi_system_id hw_rfkill_list[] = {
{}
};
+static void ideapad_check_features(struct ideapad_private *priv)
+{
+ acpi_handle handle = priv->adev->handle;
+ unsigned long val;
+
+ priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ priv->features.touchpad_ctrl_via_ec = !acpi_dev_present("ELAN0634", NULL, -1);
+
+ if (!read_ec_data(handle, VPCCMD_R_FAN, &val))
+ priv->features.fan_mode = true;
+
+ if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC"))
+ priv->features.conservation_mode = true;
+
+ if (acpi_has_method(handle, "DYTC"))
+ priv->features.dytc = true;
+
+ if (acpi_has_method(handle, "HALS") && acpi_has_method(handle, "SALS")) {
+ if (!eval_hals(handle, &val)) {
+ if (test_bit(HALS_FNLOCK_SUPPORT_BIT, &val))
+ priv->features.fn_lock = true;
+
+ if (test_bit(HALS_KBD_BL_SUPPORT_BIT, &val))
+ priv->features.kbd_bl = true;
+
+ if (test_bit(HALS_USB_CHARGING_SUPPORT_BIT, &val))
+ priv->features.usb_charging = true;
+ }
+ }
+}
+
static int ideapad_acpi_add(struct platform_device *pdev)
{
- int ret, i;
- int cfg;
struct ideapad_private *priv;
struct acpi_device *adev;
+ acpi_status status;
+ unsigned long cfg;
+ int err, i;
- ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
- if (ret)
+ err = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (err)
return -ENODEV;
- if (read_method_int(adev->handle, "_CFG", &cfg))
+ if (eval_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -984,28 +1530,42 @@ static int ideapad_acpi_add(struct platform_device *pdev)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, priv);
+
priv->cfg = cfg;
priv->adev = adev;
priv->platform_device = pdev;
- priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
- ret = ideapad_sysfs_init(priv);
- if (ret)
- return ret;
+ ideapad_check_features(priv);
+
+ err = ideapad_sysfs_init(priv);
+ if (err)
+ return err;
ideapad_debugfs_init(priv);
- ret = ideapad_input_init(priv);
- if (ret)
+ err = ideapad_input_init(priv);
+ if (err)
goto input_failed;
+ err = ideapad_kbd_bl_init(priv);
+ if (err) {
+ if (err != -ENODEV)
+ dev_warn(&pdev->dev, "Could not set up keyboard backlight LED: %d\n", err);
+ else
+ dev_info(&pdev->dev, "Keyboard backlight control not available\n");
+ }
+
/*
* On some models without a hw-switch (the yoga 2 13 at least)
* VPCCMD_W_RF must be explicitly set to 1 for the wifi to work.
*/
- if (!priv->has_hw_rfkill_switch)
+ if (!priv->features.hw_rfkill_switch)
write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ /* The same for Touchpad */
+ if (!priv->features.touchpad_ctrl_via_ec)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(priv, i);
@@ -1013,45 +1573,70 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ideapad_sync_rfk_state(priv);
ideapad_sync_touchpad_state(priv);
+ err = ideapad_dytc_profile_init(priv);
+ if (err) {
+ if (err != -ENODEV)
+ dev_warn(&pdev->dev, "Could not set up DYTC interface: %d\n", err);
+ else
+ dev_info(&pdev->dev, "DYTC interface is not available\n");
+ }
+
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- ret = ideapad_backlight_init(priv);
- if (ret && ret != -ENODEV)
+ err = ideapad_backlight_init(priv);
+ if (err && err != -ENODEV)
goto backlight_failed;
}
- ret = acpi_install_notify_handler(adev->handle,
- ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
- if (ret)
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify, priv);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
goto notification_failed;
+ }
#if IS_ENABLED(CONFIG_ACPI_WMI)
for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
- ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
- ideapad_wmi_notify, priv);
- if (ret == AE_OK) {
+ status = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+ ideapad_wmi_notify, priv);
+ if (ACPI_SUCCESS(status)) {
priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
break;
}
}
- if (ret != AE_OK && ret != AE_NOT_EXIST)
+
+ if (ACPI_FAILURE(status) && status != AE_NOT_EXIST) {
+ err = -EIO;
goto notification_failed_wmi;
+ }
#endif
return 0;
+
#if IS_ENABLED(CONFIG_ACPI_WMI)
notification_failed_wmi:
acpi_remove_notify_handler(priv->adev->handle,
- ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify);
#endif
+
notification_failed:
ideapad_backlight_exit(priv);
+
backlight_failed:
+ ideapad_dytc_profile_exit(priv);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(priv, i);
+
+ ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
+
input_failed:
ideapad_debugfs_exit(priv);
ideapad_sysfs_exit(priv);
- return ret;
+
+ return err;
}
static int ideapad_acpi_remove(struct platform_device *pdev)
@@ -1063,38 +1648,44 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
if (priv->fnesc_guid)
wmi_remove_notify_handler(priv->fnesc_guid);
#endif
+
acpi_remove_notify_handler(priv->adev->handle,
- ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify);
+
ideapad_backlight_exit(priv);
+ ideapad_dytc_profile_exit(priv);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(priv, i);
+
+ ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
ideapad_sysfs_exit(priv);
- dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
-static int ideapad_acpi_resume(struct device *device)
+static int ideapad_acpi_resume(struct device *dev)
{
- struct ideapad_private *priv;
-
- if (!device)
- return -EINVAL;
- priv = dev_get_drvdata(device);
+ struct ideapad_private *priv = dev_get_drvdata(dev);
ideapad_sync_rfk_state(priv);
ideapad_sync_touchpad_state(priv);
+
+ if (priv->dytc)
+ dytc_profile_refresh(priv);
+
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
static const struct acpi_device_id ideapad_device_ids[] = {
- { "VPC2004", 0},
- { "", 0},
+ {"VPC2004", 0},
+ {"", 0},
};
MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
index 12d5ab7e1f5d..3ee4c5c8a64f 100644
--- a/drivers/platform/x86/intel-uncore-frequency.c
+++ b/drivers/platform/x86/intel-uncore-frequency.c
@@ -377,6 +377,7 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
{}
};
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3b49a1f4061b..8a8017f9ca91 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -44,6 +44,7 @@ static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
{ KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
+ { KE_END }
};
static const struct key_entry intel_vbtn_switchmap[] = {
@@ -51,14 +52,15 @@ static const struct key_entry intel_vbtn_switchmap[] = {
{ KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
{ KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
{ KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
+ { KE_END }
};
#define KEYMAP_LEN \
(ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
struct intel_vbtn_priv {
- struct key_entry keymap[KEYMAP_LEN];
- struct input_dev *input_dev;
+ struct input_dev *buttons_dev;
+ struct input_dev *switches_dev;
bool has_buttons;
bool has_switches;
bool wakeup_mode;
@@ -77,48 +79,62 @@ static void detect_tablet_mode(struct platform_device *device)
return;
m = !(vgbs & VGBS_TABLET_MODE_FLAGS);
- input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+ input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
- input_report_switch(priv->input_dev, SW_DOCK, m);
+ input_report_switch(priv->switches_dev, SW_DOCK, m);
}
+/*
+ * Note this unconditionally creates the 2 input_dev-s and sets up
+ * the sparse-keymaps. Only the registration is conditional on
+ * have_buttons / have_switches. This is done so that the notify
+ * handler can always call sparse_keymap_entry_from_scancode()
+ * on the input_dev-s do determine the event type.
+ */
static int intel_vbtn_input_setup(struct platform_device *device)
{
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- int ret, keymap_len = 0;
+ int ret;
- if (priv->has_buttons) {
- memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap,
- ARRAY_SIZE(intel_vbtn_keymap) *
- sizeof(struct key_entry));
- keymap_len += ARRAY_SIZE(intel_vbtn_keymap);
- }
+ priv->buttons_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->buttons_dev)
+ return -ENOMEM;
- if (priv->has_switches) {
- memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap,
- ARRAY_SIZE(intel_vbtn_switchmap) *
- sizeof(struct key_entry));
- keymap_len += ARRAY_SIZE(intel_vbtn_switchmap);
- }
+ ret = sparse_keymap_setup(priv->buttons_dev, intel_vbtn_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->buttons_dev->dev.parent = &device->dev;
+ priv->buttons_dev->name = "Intel Virtual Buttons";
+ priv->buttons_dev->id.bustype = BUS_HOST;
- priv->keymap[keymap_len].type = KE_END;
+ if (priv->has_buttons) {
+ ret = input_register_device(priv->buttons_dev);
+ if (ret)
+ return ret;
+ }
- priv->input_dev = devm_input_allocate_device(&device->dev);
- if (!priv->input_dev)
+ priv->switches_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->switches_dev)
return -ENOMEM;
- ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL);
+ ret = sparse_keymap_setup(priv->switches_dev, intel_vbtn_switchmap, NULL);
if (ret)
return ret;
- priv->input_dev->dev.parent = &device->dev;
- priv->input_dev->name = "Intel Virtual Button driver";
- priv->input_dev->id.bustype = BUS_HOST;
+ priv->switches_dev->dev.parent = &device->dev;
+ priv->switches_dev->name = "Intel Virtual Switches";
+ priv->switches_dev->id.bustype = BUS_HOST;
- if (priv->has_switches)
+ if (priv->has_switches) {
detect_tablet_mode(device);
- return input_register_device(priv->input_dev);
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void notify_handler(acpi_handle handle, u32 event, void *context)
@@ -127,48 +143,50 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
unsigned int val = !(event & 1); /* Even=press, Odd=release */
const struct key_entry *ke, *ke_rel;
+ struct input_dev *input_dev;
bool autorelease;
+ int ret;
- if (priv->wakeup_mode) {
- ke = sparse_keymap_entry_from_scancode(priv->input_dev, event);
- if (ke) {
- pm_wakeup_hard_event(&device->dev);
-
- /*
- * Switch events like tablet mode will wake the device
- * and report the new switch position to the input
- * subsystem.
- */
- if (ke->type == KE_SW)
- sparse_keymap_report_event(priv->input_dev,
- event,
- val,
- 0);
+ if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) {
+ if (!priv->has_buttons) {
+ dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n");
return;
}
- goto out_unknown;
+ input_dev = priv->buttons_dev;
+ } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
+ if (!priv->has_switches) {
+ dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return;
+
+ priv->has_switches = true;
+ }
+ input_dev = priv->switches_dev;
+ } else {
+ dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
+ return;
+ }
+
+ if (priv->wakeup_mode) {
+ pm_wakeup_hard_event(&device->dev);
+
+ /*
+ * Skip reporting an evdev event for button wake events,
+ * mirroring how the drivers/acpi/button.c code skips this too.
+ */
+ if (ke->type == KE_KEY)
+ return;
}
/*
* Even press events are autorelease if there is no corresponding odd
* release event, or if the odd event is KE_IGNORE.
*/
- ke_rel = sparse_keymap_entry_from_scancode(priv->input_dev, event | 1);
+ ke_rel = sparse_keymap_entry_from_scancode(input_dev, event | 1);
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
- if (sparse_keymap_report_event(priv->input_dev, event, val, autorelease))
- return;
-
-out_unknown:
- dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
-}
-
-static bool intel_vbtn_has_buttons(acpi_handle handle)
-{
- acpi_status status;
-
- status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
- return ACPI_SUCCESS(status);
+ sparse_keymap_report_event(input_dev, event, val, autorelease);
}
/*
@@ -207,19 +225,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
},
},
{} /* Array terminator */
@@ -245,7 +263,7 @@ static int intel_vbtn_probe(struct platform_device *device)
acpi_status status;
int err;
- has_buttons = intel_vbtn_has_buttons(handle);
+ has_buttons = acpi_has_method(handle, "VBDL");
has_switches = intel_vbtn_has_switches(handle);
if (!has_buttons && !has_switches) {
@@ -274,6 +292,12 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
return -EBUSY;
+ if (has_buttons) {
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
+ }
+
device_init_wakeup(&device->dev, true);
/*
* In order for system wakeup to work, the EC GPE has to be marked as
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
deleted file mode 100644
index df434abbb66f..000000000000
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Power button driver for Intel MID platforms.
- *
- * Copyright (C) 2010,2017 Intel Corp
- *
- * Author: Hong Liu <hong.liu@intel.com>
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
-#include <linux/slab.h>
-
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-#include <asm/intel_scu_ipc.h>
-
-#define DRIVER_NAME "msic_power_btn"
-
-#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
-
-/*
- * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask
- * power button interrupt
- */
-#define MSIC_PWRBTNM (1 << 0)
-
-/* Intel Tangier */
-#define BCOVE_PB_LEVEL (1 << 4) /* 1 - release, 0 - press */
-
-/* Basin Cove PMIC */
-#define BCOVE_PBIRQ 0x02
-#define BCOVE_IRQLVL1MSK 0x0c
-#define BCOVE_PBIRQMASK 0x0d
-#define BCOVE_PBSTATUS 0x27
-
-struct mid_pb_ddata {
- struct device *dev;
- int irq;
- struct input_dev *input;
- unsigned short mirqlvl1_addr;
- unsigned short pbstat_addr;
- u8 pbstat_mask;
- struct intel_scu_ipc_dev *scu;
- int (*setup)(struct mid_pb_ddata *ddata);
-};
-
-static int mid_pbstat(struct mid_pb_ddata *ddata, int *value)
-{
- struct input_dev *input = ddata->input;
- int ret;
- u8 pbstat;
-
- ret = intel_scu_ipc_dev_ioread8(ddata->scu, ddata->pbstat_addr,
- &pbstat);
- if (ret)
- return ret;
-
- dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat);
-
- *value = !(pbstat & ddata->pbstat_mask);
- return 0;
-}
-
-static int mid_irq_ack(struct mid_pb_ddata *ddata)
-{
- return intel_scu_ipc_dev_update(ddata->scu, ddata->mirqlvl1_addr, 0,
- MSIC_PWRBTNM);
-}
-
-static int mrfld_setup(struct mid_pb_ddata *ddata)
-{
- /* Unmask the PBIRQ and MPBIRQ on Tangier */
- intel_scu_ipc_dev_update(ddata->scu, BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
- intel_scu_ipc_dev_update(ddata->scu, BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
-
- return 0;
-}
-
-static irqreturn_t mid_pb_isr(int irq, void *dev_id)
-{
- struct mid_pb_ddata *ddata = dev_id;
- struct input_dev *input = ddata->input;
- int value = 0;
- int ret;
-
- ret = mid_pbstat(ddata, &value);
- if (ret < 0) {
- dev_err(input->dev.parent,
- "Read error %d while reading MSIC_PB_STATUS\n", ret);
- } else {
- input_event(input, EV_KEY, KEY_POWER, value);
- input_sync(input);
- }
-
- mid_irq_ack(ddata);
- return IRQ_HANDLED;
-}
-
-static const struct mid_pb_ddata mfld_ddata = {
- .mirqlvl1_addr = INTEL_MSIC_IRQLVL1MSK,
- .pbstat_addr = INTEL_MSIC_PBSTATUS,
- .pbstat_mask = MSIC_PB_LEVEL,
-};
-
-static const struct mid_pb_ddata mrfld_ddata = {
- .mirqlvl1_addr = BCOVE_IRQLVL1MSK,
- .pbstat_addr = BCOVE_PBSTATUS,
- .pbstat_mask = BCOVE_PB_LEVEL,
- .setup = mrfld_setup,
-};
-
-static const struct x86_cpu_id mid_pb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &mfld_ddata),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &mrfld_ddata),
- {}
-};
-
-static int mid_pb_probe(struct platform_device *pdev)
-{
- const struct x86_cpu_id *id;
- struct mid_pb_ddata *ddata;
- struct input_dev *input;
- int irq = platform_get_irq(pdev, 0);
- int error;
-
- id = x86_match_cpu(mid_pb_cpu_ids);
- if (!id)
- return -ENODEV;
-
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
- return irq;
- }
-
- input = devm_input_allocate_device(&pdev->dev);
- if (!input)
- return -ENOMEM;
-
- input->name = pdev->name;
- input->phys = "power-button/input0";
- input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
-
- input_set_capability(input, EV_KEY, KEY_POWER);
-
- ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
- sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- ddata->dev = &pdev->dev;
- ddata->irq = irq;
- ddata->input = input;
-
- if (ddata->setup) {
- error = ddata->setup(ddata);
- if (error)
- return error;
- }
-
- ddata->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
- if (!ddata->scu)
- return -EPROBE_DEFER;
-
- error = devm_request_threaded_irq(&pdev->dev, irq, NULL, mid_pb_isr,
- IRQF_ONESHOT, DRIVER_NAME, ddata);
- if (error) {
- dev_err(&pdev->dev,
- "Unable to request irq %d for MID power button\n", irq);
- return error;
- }
-
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev,
- "Unable to register input dev, error %d\n", error);
- return error;
- }
-
- platform_set_drvdata(pdev, ddata);
-
- /*
- * SCU firmware might send power button interrupts to IA core before
- * kernel boots and doesn't get EOI from IA core. The first bit of
- * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new
- * power interrupt to Android kernel. Unmask the bit when probing
- * power button in kernel.
- * There is a very narrow race between irq handler and power button
- * initialization. The race happens rarely. So we needn't worry
- * about it.
- */
- error = mid_irq_ack(ddata);
- if (error) {
- dev_err(&pdev->dev,
- "Unable to clear power button interrupt, error: %d\n",
- error);
- return error;
- }
-
- device_init_wakeup(&pdev->dev, true);
- dev_pm_set_wake_irq(&pdev->dev, irq);
-
- return 0;
-}
-
-static int mid_pb_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
-
- return 0;
-}
-
-static struct platform_driver mid_pb_driver = {
- .driver = {
- .name = DRIVER_NAME,
- },
- .probe = mid_pb_probe,
- .remove = mid_pb_remove,
-};
-
-module_platform_driver(mid_pb_driver);
-
-MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
-MODULE_DESCRIPTION("Intel MID Power Button Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
deleted file mode 100644
index f12f4e7bd971..000000000000
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ /dev/null
@@ -1,560 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel MID platform thermal driver
- *
- * Copyright (C) 2011 Intel Corporation
- *
- * Author: Durgadoss R <durgadoss.r@intel.com>
- */
-
-#define pr_fmt(fmt) "intel_mid_thermal: " fmt
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/module.h>
-#include <linux/param.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-
-/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS 4
-
-/* ADC1 - thermal registers */
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
-
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-#define MSIC_CHANL_MASK_VAL 0x0F
-
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-/* Number of ADC channels */
-#define ADC_CHANLS_MAX 15
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
-
-/* ADC channel code values */
-#define SKIN_SENSOR0_CODE 0x08
-#define SKIN_SENSOR1_CODE 0x09
-#define SYS_SENSOR_CODE 0x0A
-#define MSIC_DIE_SENSOR_CODE 0x03
-
-#define SKIN_THERM_SENSOR0 0
-#define SKIN_THERM_SENSOR1 1
-#define SYS_THERM_SENSOR2 2
-#define MSIC_DIE_THERM_SENSOR3 3
-
-/* ADC code range */
-#define ADC_MAX 977
-#define ADC_MIN 162
-#define ADC_VAL0C 887
-#define ADC_VAL20C 720
-#define ADC_VAL40C 508
-#define ADC_VAL60C 315
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR INTEL_MSIC_ADC1ADDR0 /* increments by 1 */
-#define ADC_DATA_START_ADDR INTEL_MSIC_ADC1SNS0H /* increments by 2 */
-
-/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN 488
-#define MSIC_DIE_ADC_MAX 1004
-
-/* This holds the address of the first free ADC channel,
- * among the 15 channels
- */
-static int channel_index;
-
-struct platform_info {
- struct platform_device *pdev;
- struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
-};
-
-struct thermal_device_info {
- unsigned int chnl_addr;
- int direct;
- /* This holds the current temperature in millidegree celsius */
- long curr_temp;
-};
-
-/**
- * to_msic_die_temp - converts adc_val to msic_die temperature
- * @adc_val: ADC value to be converted
- *
- * Can sleep
- */
-static int to_msic_die_temp(uint16_t adc_val)
-{
- return (368 * (adc_val) / 1000) - 220;
-}
-
-/**
- * is_valid_adc - checks whether the adc code is within the defined range
- * @min: minimum value for the sensor
- * @max: maximum value for the sensor
- *
- * Can sleep
- */
-static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
-{
- return (adc_val >= min) && (adc_val <= max);
-}
-
-/**
- * adc_to_temp - converts the ADC code to temperature in C
- * @direct: true if ths channel is direct index
- * @adc_val: the adc_val that needs to be converted
- * @tp: temperature return value
- *
- * Linear approximation is used to covert the skin adc value into temperature.
- * This technique is used to avoid very long look-up table to get
- * the appropriate temp value from ADC value.
- * The adc code vs sensor temp curve is split into five parts
- * to achieve very close approximate temp value with less than
- * 0.5C error
- */
-static int adc_to_temp(int direct, uint16_t adc_val, int *tp)
-{
- int temp;
-
- /* Direct conversion for die temperature */
- if (direct) {
- if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
- *tp = to_msic_die_temp(adc_val) * 1000;
- return 0;
- }
- return -ERANGE;
- }
-
- if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
- return -ERANGE;
-
- /* Linear approximation for skin temperature */
- if (adc_val > ADC_VAL0C)
- temp = 177 - (adc_val/5);
- else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
- temp = 111 - (adc_val/8);
- else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
- temp = 92 - (adc_val/10);
- else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
- temp = 91 - (adc_val/10);
- else
- temp = 112 - (adc_val/6);
-
- /* Convert temperature in celsius to milli degree celsius */
- *tp = temp * 1000;
- return 0;
-}
-
-/**
- * mid_read_temp - read sensors for temperature
- * @temp: holds the current temperature for the sensor after reading
- *
- * reads the adc_code from the channel and converts it to real
- * temperature. The converted value is stored in temp.
- *
- * Can sleep
- */
-static int mid_read_temp(struct thermal_zone_device *tzd, int *temp)
-{
- struct thermal_device_info *td_info = tzd->devdata;
- uint16_t adc_val, addr;
- uint8_t data = 0;
- int ret;
- int curr_temp;
-
- addr = td_info->chnl_addr;
-
- /* Enable the msic for conversion before reading */
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
- if (ret)
- return ret;
-
- /* Re-toggle the RRDATARD bit (temporary workaround) */
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
- if (ret)
- return ret;
-
- /* Read the higher bits of data */
- ret = intel_msic_reg_read(addr, &data);
- if (ret)
- return ret;
-
- /* Shift bits to accommodate the lower two data bits */
- adc_val = (data << 2);
- addr++;
-
- ret = intel_msic_reg_read(addr, &data);/* Read lower bits */
- if (ret)
- return ret;
-
- /* Adding lower two bits to the higher bits */
- data &= 03;
- adc_val += data;
-
- /* Convert ADC value to temperature */
- ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
- if (ret == 0)
- *temp = td_info->curr_temp = curr_temp;
- return ret;
-}
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static int configure_adc(int val)
-{
- int ret;
- uint8_t data;
-
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if (val) {
- /* Enable and start the ADC */
- data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- } else {
- /* Just stop the ADC */
- data &= (~MSIC_ADC_START);
- }
- return intel_msic_reg_write(INTEL_MSIC_ADC1CNTL1, data);
-}
-
-/**
- * set_up_therm_channel - enable thermal channel for conversion
- * @base_addr: index of free msic ADC channel
- *
- * Enable all the three channels for conversion
- *
- * Can sleep
- */
-static int set_up_therm_channel(u16 base_addr)
-{
- int ret;
-
- /* Enable all the sensor channels */
- ret = intel_msic_reg_write(base_addr, SKIN_SENSOR0_CODE);
- if (ret)
- return ret;
-
- ret = intel_msic_reg_write(base_addr + 1, SKIN_SENSOR1_CODE);
- if (ret)
- return ret;
-
- ret = intel_msic_reg_write(base_addr + 2, SYS_SENSOR_CODE);
- if (ret)
- return ret;
-
- /* Since this is the last channel, set the stop bit
- * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- ret = intel_msic_reg_write(base_addr + 3,
- (MSIC_DIE_SENSOR_CODE | 0x10));
- if (ret)
- return ret;
-
- /* Enable ADC and start it */
- return configure_adc(1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static int reset_stopbit(uint16_t addr)
-{
- int ret;
- uint8_t data;
- ret = intel_msic_reg_read(addr, &data);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- return intel_msic_reg_write(addr, (data & 0xEF));
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc
- * code.
- */
-static int find_free_channel(void)
-{
- int ret;
- int i;
- uint8_t data;
-
- /* check whether ADC is enabled */
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if ((data & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
- ret = intel_msic_reg_read(ADC_CHNL_START_ADDR + i, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static int mid_initialize_adc(struct device *dev)
-{
- u8 data;
- u16 base_addr;
- int ret;
-
- /*
- * Ensure that adctherm is disabled before we
- * initialize the ADC
- */
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL3, &data);
- if (ret)
- return ret;
-
- data &= ~MSIC_ADCTHERM_MASK;
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, data);
- if (ret)
- return ret;
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- dev_err(dev, "No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- ret = set_up_therm_channel(base_addr);
- if (ret) {
- dev_err(dev, "unable to enable ADC");
- return ret;
- }
- dev_dbg(dev, "ADC initialization successful");
- return ret;
-}
-
-/**
- * initialize_sensor - sets default temp and timer ranges
- * @index: index of the sensor
- *
- * Context: can sleep
- */
-static struct thermal_device_info *initialize_sensor(int index)
-{
- struct thermal_device_info *td_info =
- kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
- if (!td_info)
- return NULL;
-
- /* Set the base addr of the channel for this sensor */
- td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
- /* Sensor 3 is direct conversion */
- if (index == 3)
- td_info->direct = 1;
- return td_info;
-}
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * mid_thermal_resume - resume routine
- * @dev: device structure
- *
- * mid thermal resume: re-initializes the adc. Can sleep.
- */
-static int mid_thermal_resume(struct device *dev)
-{
- return mid_initialize_adc(dev);
-}
-
-/**
- * mid_thermal_suspend - suspend routine
- * @dev: device structure
- *
- * mid thermal suspend implements the suspend functionality
- * by stopping the ADC. Can sleep.
- */
-static int mid_thermal_suspend(struct device *dev)
-{
- /*
- * This just stops the ADC and does not disable it.
- * temporary workaround until we have a generic ADC driver.
- * If 0 is passed, it disables the ADC.
- */
- return configure_adc(0);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
- mid_thermal_suspend, mid_thermal_resume);
-
-/**
- * read_curr_temp - reads the current temperature and stores in temp
- * @temp: holds the current temperature value after reading
- *
- * Can sleep
- */
-static int read_curr_temp(struct thermal_zone_device *tzd, int *temp)
-{
- WARN_ON(tzd == NULL);
- return mid_read_temp(tzd, temp);
-}
-
-/* Can't be const */
-static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = read_curr_temp,
-};
-
-/**
- * mid_thermal_probe - mfld thermal initialize
- * @pdev: platform device structure
- *
- * mid thermal probe initializes the hardware and registers
- * all the sensors with the generic thermal framework. Can sleep.
- */
-static int mid_thermal_probe(struct platform_device *pdev)
-{
- static char *name[MSIC_THERMAL_SENSORS] = {
- "skin0", "skin1", "sys", "msicdie"
- };
-
- int ret;
- int i;
- struct platform_info *pinfo;
-
- pinfo = devm_kzalloc(&pdev->dev, sizeof(struct platform_info),
- GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
-
- /* Initializing the hardware */
- ret = mid_initialize_adc(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "ADC init failed");
- return ret;
- }
-
- /* Register each sensor with the generic thermal framework*/
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- struct thermal_device_info *td_info = initialize_sensor(i);
-
- if (!td_info) {
- ret = -ENOMEM;
- goto err;
- }
- pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, 0, td_info, &tzd_ops, NULL, 0, 0);
- if (IS_ERR(pinfo->tzd[i])) {
- kfree(td_info);
- ret = PTR_ERR(pinfo->tzd[i]);
- goto err;
- }
- ret = thermal_zone_device_enable(pinfo->tzd[i]);
- if (ret) {
- kfree(td_info);
- thermal_zone_device_unregister(pinfo->tzd[i]);
- goto err;
- }
- }
-
- pinfo->pdev = pdev;
- platform_set_drvdata(pdev, pinfo);
- return 0;
-
-err:
- while (--i >= 0) {
- kfree(pinfo->tzd[i]->devdata);
- thermal_zone_device_unregister(pinfo->tzd[i]);
- }
- configure_adc(0);
- return ret;
-}
-
-/**
- * mid_thermal_remove - mfld thermal finalize
- * @dev: platform device structure
- *
- * MLFD thermal remove unregisters all the sensors from the generic
- * thermal framework. Can sleep.
- */
-static int mid_thermal_remove(struct platform_device *pdev)
-{
- int i;
- struct platform_info *pinfo = platform_get_drvdata(pdev);
-
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- kfree(pinfo->tzd[i]->devdata);
- thermal_zone_device_unregister(pinfo->tzd[i]);
- }
-
- /* Stop the ADC */
- return configure_adc(0);
-}
-
-#define DRIVER_NAME "msic_thermal"
-
-static const struct platform_device_id therm_id_table[] = {
- { DRIVER_NAME, 1 },
- { }
-};
-MODULE_DEVICE_TABLE(platform, therm_id_table);
-
-static struct platform_driver mid_thermal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .pm = &mid_thermal_pm,
- },
- .probe = mid_thermal_probe,
- .remove = mid_thermal_remove,
- .id_table = therm_id_table,
-};
-
-module_platform_driver(mid_thermal_driver);
-
-MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
-MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index d9cf7f7602b0..9171a46a9e3f 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
#define IPC_READ_BUFFER 0x90
/* Timeout in jiffies */
-#define IPC_TIMEOUT (3 * HZ)
+#define IPC_TIMEOUT (5 * HZ)
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index 8c5fd8240da9..80abc708e4f2 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -17,7 +17,6 @@
static int intel_scu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- void (*setup_fn)(void) = (void (*)(void))id->driver_data;
struct intel_scu_ipc_data scu_data = {};
struct intel_scu_ipc_dev *scu;
int ret;
@@ -30,27 +29,14 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
scu_data.irq = pdev->irq;
scu = intel_scu_ipc_register(&pdev->dev, &scu_data);
- if (IS_ERR(scu))
- return PTR_ERR(scu);
-
- if (setup_fn)
- setup_fn();
- return 0;
-}
-
-static void intel_mid_scu_setup(void)
-{
- intel_scu_devices_create();
+ return PTR_ERR_OR_ZERO(scu);
}
static const struct pci_device_id pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x080e),
- .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
- { PCI_VDEVICE(INTEL, 0x08ea),
- .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
- { PCI_VDEVICE(INTEL, 0x11a0),
- .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
{ PCI_VDEVICE(INTEL, 0x1a94) },
{ PCI_VDEVICE(INTEL, 0x5a94) },
{}
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
new file mode 100644
index 000000000000..c2479777a1d6
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Merrifield watchdog platform device library file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel-mid.h>
+#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
+
+#define TANGIER_EXT_TIMER0_MSI 12
+
+static struct platform_device wdt_dev = {
+ .name = "intel_mid_wdt",
+ .id = -1,
+};
+
+static int tangier_probe(struct platform_device *pdev)
+{
+ struct irq_alloc_info info;
+ struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+ int gsi = TANGIER_EXT_TIMER0_MSI;
+ int irq;
+
+ if (!pdata)
+ return -EINVAL;
+
+ /* IOAPIC builds identity mapping between GSI and IRQ on MID */
+ ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+ irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi);
+ return irq;
+ }
+
+ pdata->irq = irq;
+ return 0;
+}
+
+static struct intel_mid_wdt_pdata tangier_pdata = {
+ .probe = tangier_probe,
+};
+
+static const struct x86_cpu_id intel_mid_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tangier_pdata),
+ {}
+};
+
+static int __init register_mid_wdt(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_mid_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ wdt_dev.dev.platform_data = (struct intel_mid_wdt_pdata *)id->driver_data;
+ return platform_device_register(&wdt_dev);
+}
+arch_initcall(register_mid_wdt);
+
+static void __exit unregister_mid_wdt(void)
+{
+ platform_device_unregister(&wdt_dev);
+}
+__exitcall(unregister_mid_wdt);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 64ee7819c9d3..fd318cdfe313 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -96,6 +96,8 @@ static int msi_wmi_query_block(int instance, int *ret)
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = output.pointer;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e03df2881dc6..b881044b31b0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -66,6 +66,7 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/power_supply.h>
+#include <linux/platform_profile.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
@@ -8783,6 +8784,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
@@ -9854,16 +9856,27 @@ static bool has_lapsensor;
static bool palm_state;
static bool lap_state;
-static int lapsensor_get(bool *present, bool *state)
+static int dytc_command(int command, int *output)
{
acpi_handle dytc_handle;
- int output;
- *present = false;
- if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DYTC", &dytc_handle)))
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DYTC", &dytc_handle))) {
+ /* Platform doesn't support DYTC */
return -ENODEV;
- if (!acpi_evalf(dytc_handle, &output, NULL, "dd", DYTC_CMD_GET))
+ }
+ if (!acpi_evalf(dytc_handle, output, NULL, "dd", command))
return -EIO;
+ return 0;
+}
+
+static int lapsensor_get(bool *present, bool *state)
+{
+ int output, err;
+
+ *present = false;
+ err = dytc_command(DYTC_CMD_GET, &output);
+ if (err)
+ return err;
*present = true; /*If we get his far, we have lapmode support*/
*state = output & BIT(DYTC_GET_LAPMODE_BIT) ? true : false;
@@ -9951,9 +9964,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
return 0;
/* Otherwise, if there was an error return it */
- if (palm_err && (palm_err != ENODEV))
+ if (palm_err && (palm_err != -ENODEV))
return palm_err;
- if (lap_err && (lap_err != ENODEV))
+ if (lap_err && (lap_err != -ENODEV))
return lap_err;
if (has_palmsensor) {
@@ -9982,6 +9995,434 @@ static struct ibm_struct proxsensor_driver_data = {
.exit = proxsensor_exit,
};
+/*************************************************************************
+ * DYTC Platform Profile interface
+ */
+
+#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */
+#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+
+#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */
+
+#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
+#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
+
+#define DYTC_SET_FUNCTION_BIT 12 /* Bits 12-15 - function setting */
+#define DYTC_SET_MODE_BIT 16 /* Bits 16-19 - mode setting */
+#define DYTC_SET_VALID_BIT 20 /* Bit 20 - 1 = on, 0 = off */
+
+#define DYTC_FUNCTION_STD 0 /* Function = 0, standard mode */
+#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
+#define DYTC_FUNCTION_MMC 11 /* Function = 11, desk mode */
+
+#define DYTC_MODE_PERFORM 2 /* High power mode aka performance */
+#define DYTC_MODE_LOWPOWER 3 /* Low power mode */
+#define DYTC_MODE_BALANCE 0xF /* Default mode aka balanced */
+
+#define DYTC_SET_COMMAND(function, mode, on) \
+ (DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
+ (mode) << DYTC_SET_MODE_BIT | \
+ (on) << DYTC_SET_VALID_BIT)
+
+#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 0)
+
+#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 1)
+
+static bool dytc_profile_available;
+static enum platform_profile_option dytc_current_profile;
+static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
+static DEFINE_MUTEX(dytc_mutex);
+
+static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+{
+ switch (dytcmode) {
+ case DYTC_MODE_LOWPOWER:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case DYTC_MODE_BALANCE:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DYTC_MODE_PERFORM:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ default: /* Unknown mode */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int convert_profile_to_dytc(enum platform_profile_option profile, int *perfmode)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ *perfmode = DYTC_MODE_LOWPOWER;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ *perfmode = DYTC_MODE_BALANCE;
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ *perfmode = DYTC_MODE_PERFORM;
+ break;
+ default: /* Unknown profile */
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * dytc_profile_get: Function to register with platform_profile
+ * handler. Returns current platform profile.
+ */
+static int dytc_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ *profile = dytc_current_profile;
+ return 0;
+}
+
+/*
+ * Helper function - check if we are in CQL mode and if we are
+ * - disable CQL,
+ * - run the command
+ * - enable CQL
+ * If not in CQL mode, just run the command
+ */
+static int dytc_cql_command(int command, int *output)
+{
+ int err, cmd_err, dummy;
+ int cur_funcmode;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_command(DYTC_CMD_GET, output);
+ if (err)
+ return err;
+
+ cur_funcmode = (*output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ /* Check if we're OK to return immediately */
+ if ((command == DYTC_CMD_GET) && (cur_funcmode != DYTC_FUNCTION_CQL))
+ return 0;
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ atomic_inc(&dytc_ignore_event);
+ err = dytc_command(DYTC_DISABLE_CQL, &dummy);
+ if (err)
+ return err;
+ }
+
+ cmd_err = dytc_command(command, output);
+ /* Check return condition after we've restored CQL state */
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = dytc_command(DYTC_ENABLE_CQL, &dummy);
+ if (err)
+ return err;
+ }
+
+ return cmd_err;
+}
+
+/*
+ * dytc_profile_set: Function to register with platform_profile
+ * handler. Sets current platform profile.
+ */
+static int dytc_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int output;
+ int err;
+
+ if (!dytc_profile_available)
+ return -ENODEV;
+
+ err = mutex_lock_interruptible(&dytc_mutex);
+ if (err)
+ return err;
+
+ if (profile == PLATFORM_PROFILE_BALANCED) {
+ /* To get back to balanced mode we just issue a reset command */
+ err = dytc_command(DYTC_CMD_RESET, &output);
+ if (err)
+ goto unlock;
+ } else {
+ int perfmode;
+
+ err = convert_profile_to_dytc(profile, &perfmode);
+ if (err)
+ goto unlock;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_cql_command(DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1), &output);
+ if (err)
+ goto unlock;
+ }
+ /* Success - update current profile */
+ dytc_current_profile = profile;
+unlock:
+ mutex_unlock(&dytc_mutex);
+ return err;
+}
+
+static void dytc_profile_refresh(void)
+{
+ enum platform_profile_option profile;
+ int output, err;
+ int perfmode;
+
+ mutex_lock(&dytc_mutex);
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
+ mutex_unlock(&dytc_mutex);
+ if (err)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+ convert_dytc_to_profile(perfmode, &profile);
+ if (profile != dytc_current_profile) {
+ dytc_current_profile = profile;
+ platform_profile_notify();
+ }
+}
+
+static struct platform_profile_handler dytc_profile = {
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
+static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+{
+ int err, output;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dytc_profile.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
+
+ dytc_profile_available = false;
+ err = dytc_command(DYTC_CMD_QUERY, &output);
+ /*
+ * If support isn't available (ENODEV) then don't return an error
+ * and don't create the sysfs group
+ */
+ if (err == -ENODEV)
+ return 0;
+ /* For all other errors we can flag the failure */
+ if (err)
+ return err;
+
+ /* Check DYTC is enabled and supports mode setting */
+ if (output & BIT(DYTC_QUERY_ENABLE_BIT)) {
+ /* Only DYTC v5.0 and later has this feature. */
+ int dytc_version;
+
+ dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ if (dytc_version >= 5) {
+ dbg_printk(TPACPI_DBG_INIT,
+ "DYTC version %d: thermal mode available\n", dytc_version);
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dytc_profile);
+ /*
+ * If for some reason platform_profiles aren't enabled
+ * don't quit terminally.
+ */
+ if (err)
+ return 0;
+
+ dytc_profile_available = true;
+ /* Ensure initial values are correct */
+ dytc_profile_refresh();
+ }
+ }
+ return 0;
+}
+
+static void dytc_profile_exit(void)
+{
+ if (dytc_profile_available) {
+ dytc_profile_available = false;
+ platform_profile_remove();
+ }
+}
+
+static struct ibm_struct dytc_profile_driver_data = {
+ .name = "dytc-profile",
+ .exit = dytc_profile_exit,
+};
+
+/*************************************************************************
+ * Keyboard language interface
+ */
+
+struct keyboard_lang_data {
+ const char *lang_str;
+ int lang_code;
+};
+
+static const struct keyboard_lang_data keyboard_lang_data[] = {
+ {"be", 0x080c},
+ {"cz", 0x0405},
+ {"da", 0x0406},
+ {"de", 0x0c07},
+ {"en", 0x0000},
+ {"es", 0x2c0a},
+ {"et", 0x0425},
+ {"fr", 0x040c},
+ {"fr-ch", 0x100c},
+ {"hu", 0x040e},
+ {"it", 0x0410},
+ {"jp", 0x0411},
+ {"nl", 0x0413},
+ {"nn", 0x0414},
+ {"pl", 0x0415},
+ {"pt", 0x0816},
+ {"sl", 0x041b},
+ {"sv", 0x081d},
+ {"tr", 0x041f},
+};
+
+static int set_keyboard_lang_command(int command)
+{
+ acpi_handle sskl_handle;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "SSKL", &sskl_handle))) {
+ /* Platform doesn't support SSKL */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(sskl_handle, &output, NULL, "dd", command))
+ return -EIO;
+
+ return 0;
+}
+
+static int get_keyboard_lang(int *output)
+{
+ acpi_handle gskl_handle;
+ int kbd_lang;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GSKL", &gskl_handle))) {
+ /* Platform doesn't support GSKL */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(gskl_handle, &kbd_lang, NULL, "dd", 0x02000000))
+ return -EIO;
+
+ /*
+ * METHOD_ERR gets returned on devices where there are no special (e.g. '=',
+ * '(' and ')') keys which use layout dependent key-press emulation.
+ */
+ if (kbd_lang & METHOD_ERR)
+ return -ENODEV;
+
+ *output = kbd_lang;
+
+ return 0;
+}
+
+/* sysfs keyboard language entry */
+static ssize_t keyboard_lang_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int output, err, i, len = 0;
+
+ err = get_keyboard_lang(&output);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(keyboard_lang_data); i++) {
+ if (i)
+ len += sysfs_emit_at(buf, len, "%s", " ");
+
+ if (output == keyboard_lang_data[i].lang_code) {
+ len += sysfs_emit_at(buf, len, "[%s]", keyboard_lang_data[i].lang_str);
+ } else {
+ len += sysfs_emit_at(buf, len, "%s", keyboard_lang_data[i].lang_str);
+ }
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static ssize_t keyboard_lang_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err, i;
+ bool lang_found = false;
+ int lang_code = 0;
+
+ for (i = 0; i < ARRAY_SIZE(keyboard_lang_data); i++) {
+ if (sysfs_streq(buf, keyboard_lang_data[i].lang_str)) {
+ lang_code = keyboard_lang_data[i].lang_code;
+ lang_found = true;
+ break;
+ }
+ }
+
+ if (lang_found) {
+ lang_code = lang_code | 1 << 24;
+
+ /* Set language code */
+ err = set_keyboard_lang_command(lang_code);
+ if (err)
+ return err;
+ } else {
+ dev_err(&tpacpi_pdev->dev, "Unknown Keyboard language. Ignoring\n");
+ return -EINVAL;
+ }
+
+ tpacpi_disclose_usertask(attr->attr.name,
+ "keyboard language is set to %s\n", buf);
+
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "keyboard_lang");
+
+ return count;
+}
+static DEVICE_ATTR_RW(keyboard_lang);
+
+static struct attribute *kbdlang_attributes[] = {
+ &dev_attr_keyboard_lang.attr,
+ NULL
+};
+
+static const struct attribute_group kbdlang_attr_group = {
+ .attrs = kbdlang_attributes,
+};
+
+static int tpacpi_kbdlang_init(struct ibm_init_struct *iibm)
+{
+ int err, output;
+
+ err = get_keyboard_lang(&output);
+ /*
+ * If support isn't available (ENODEV) then don't return an error
+ * just don't create the sysfs group.
+ */
+ if (err == -ENODEV)
+ return 0;
+
+ if (err)
+ return err;
+
+ /* Platform supports this feature - create the sysfs file */
+ return sysfs_create_group(&tpacpi_pdev->dev.kobj, &kbdlang_attr_group);
+}
+
+static void kbdlang_exit(void)
+{
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &kbdlang_attr_group);
+}
+
+static struct ibm_struct kbdlang_driver_data = {
+ .name = "kbdlang",
+ .exit = kbdlang_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -10030,8 +10471,12 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
mutex_unlock(&kbdlight_mutex);
}
- if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED)
+ if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED) {
lapsensor_refresh();
+ /* If we are already accessing DYTC then skip dytc update */
+ if (!atomic_add_unless(&dytc_ignore_event, -1, 0))
+ dytc_profile_refresh();
+ }
}
static void hotkey_driver_event(const unsigned int scancode)
@@ -10474,6 +10919,14 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = tpacpi_proxsensor_init,
.data = &proxsensor_driver_data,
},
+ {
+ .init = tpacpi_dytc_profile_init,
+ .data = &dytc_profile_driver_data,
+ },
+ {
+ .init = tpacpi_kbdlang_init,
+ .data = &kbdlang_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 5783139d0a11..c44a6e8dceb8 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -372,6 +382,23 @@ static const struct ts_dmi_data jumper_ezpad_6_m4_data = {
.properties = jumper_ezpad_6_m4_props,
};
+static const struct property_entry jumper_ezpad_7_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2044),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_7_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_7_props,
+};
+
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -943,6 +970,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
@@ -1017,6 +1052,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 7 */
+ .driver_data = (void *)&jumper_ezpad_7_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ /* Jumper12x.WJ2012.bsBKRCP05 with the version dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Jumper12x.WJ2012.bsBKRCP"),
+ },
+ },
+ {
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 187e4a1175b0..602c46893e83 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -33,6 +33,7 @@ struct pnp_info_buffer {
typedef struct pnp_info_buffer pnp_info_buffer_t;
+__printf(2, 3)
static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...)
{
va_list args;
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index ba5cfc3dbe11..ddc6f2163c8e 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -72,7 +72,7 @@ __visible u32 pnp_bios_fault_esp;
__visible u32 pnp_bios_fault_eip;
__visible u32 pnp_bios_is_utter_crap = 0;
-static spinlock_t pnp_bios_lock;
+static DEFINE_SPINLOCK(pnp_bios_lock);
/*
* Support Functions
@@ -473,7 +473,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
int i;
- spin_lock_init(&pnp_bios_lock);
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index b22c4fdb2561..4d1192062508 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -39,6 +39,13 @@ config POWER_RESET_AT91_SAMA5D2_SHDWC
This driver supports the alternate shutdown controller for some Atmel
SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
+config POWER_RESET_ATC260X
+ tristate "Actions Semi ATC260x PMIC power-off driver"
+ depends on MFD_ATC260X
+ help
+ This driver provides power-off and restart support for a system
+ through Actions Semi ATC260x series PMICs.
+
config POWER_RESET_AXXIA
bool "LSI Axxia reset driver"
depends on ARCH_AXXIA
@@ -251,13 +258,6 @@ config POWER_RESET_RMOBILE
help
Reboot support for Renesas R-Mobile and SH-Mobile SoCs.
-config POWER_RESET_ZX
- tristate "ZTE SoCs reset driver"
- depends on ARCH_ZX || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Reboot support for ZTE SoCs.
-
config REBOOT_MODE
tristate
@@ -292,4 +292,3 @@ config NVMEM_REBOOT_MODE
action according to the mode.
endif
-
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 9dc49d3a57ff..4f959b697606 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
obj-$(CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC) += at91-sama5d2_shdwc.o
+obj-$(CONFIG_POWER_RESET_ATC260X) += atc260x-poweroff.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
@@ -29,7 +30,6 @@ obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
-obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 2fe3a627cb53..125e592af445 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -37,7 +37,7 @@
#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
#define AT91_SHDW_WKUPDBC_SHIFT 24
-#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
+#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24)
#define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
& AT91_SHDW_WKUPDBC_MASK)
@@ -78,9 +78,15 @@ struct pmc_reg_config {
u8 mckr;
};
+struct ddrc_reg_config {
+ u32 type_offset;
+ u32 type_mask;
+};
+
struct reg_config {
struct shdwc_reg_config shdwc;
struct pmc_reg_config pmc;
+ struct ddrc_reg_config ddrc;
};
struct shdwc {
@@ -262,6 +268,10 @@ static const struct reg_config sama5d2_reg_config = {
.pmc = {
.mckr = 0x30,
},
+ .ddrc = {
+ .type_offset = AT91_DDRSDRC_MDR,
+ .type_mask = AT91_DDRSDRC_MD
+ },
};
static const struct reg_config sam9x60_reg_config = {
@@ -275,6 +285,23 @@ static const struct reg_config sam9x60_reg_config = {
.pmc = {
.mckr = 0x28,
},
+ .ddrc = {
+ .type_offset = AT91_DDRSDRC_MDR,
+ .type_mask = AT91_DDRSDRC_MD
+ },
+};
+
+static const struct reg_config sama7g5_reg_config = {
+ .shdwc = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
+ },
+ .pmc = {
+ .mckr = 0x28,
+ },
};
static const struct of_device_id at91_shdwc_of_match[] = {
@@ -285,6 +312,10 @@ static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "microchip,sam9x60-shdwc",
.data = &sam9x60_reg_config,
+ },
+ {
+ .compatible = "microchip,sama7g5-shdwc",
+ .data = &sama7g5_reg_config,
}, {
/*sentinel*/
}
@@ -294,6 +325,7 @@ MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
static const struct of_device_id at91_pmc_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sam9x60-pmc" },
+ { .compatible = "microchip,sama7g5-pmc" },
{ /* Sentinel. */ }
};
@@ -355,30 +387,34 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
goto clk_disable;
}
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
- if (!np) {
- ret = -ENODEV;
- goto unmap;
- }
+ if (at91_shdwc->rcfg->ddrc.type_mask) {
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,sama5d3-ddramc");
+ if (!np) {
+ ret = -ENODEV;
+ goto unmap;
+ }
- at91_shdwc->mpddrc_base = of_iomap(np, 0);
- of_node_put(np);
+ at91_shdwc->mpddrc_base = of_iomap(np, 0);
+ of_node_put(np);
- if (!at91_shdwc->mpddrc_base) {
- ret = -ENOMEM;
- goto unmap;
+ if (!at91_shdwc->mpddrc_base) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ddr_type = readl(at91_shdwc->mpddrc_base +
+ at91_shdwc->rcfg->ddrc.type_offset) &
+ at91_shdwc->rcfg->ddrc.type_mask;
+ if (ddr_type != AT91_DDRSDRC_MD_LPDDR2 &&
+ ddr_type != AT91_DDRSDRC_MD_LPDDR3) {
+ iounmap(at91_shdwc->mpddrc_base);
+ at91_shdwc->mpddrc_base = NULL;
+ }
}
pm_power_off = at91_poweroff;
- ddr_type = readl(at91_shdwc->mpddrc_base + AT91_DDRSDRC_MDR) &
- AT91_DDRSDRC_MD;
- if (ddr_type != AT91_DDRSDRC_MD_LPDDR2 &&
- ddr_type != AT91_DDRSDRC_MD_LPDDR3) {
- iounmap(at91_shdwc->mpddrc_base);
- at91_shdwc->mpddrc_base = NULL;
- }
-
return 0;
unmap:
diff --git a/drivers/power/reset/atc260x-poweroff.c b/drivers/power/reset/atc260x-poweroff.c
new file mode 100644
index 000000000000..98f20251a6d1
--- /dev/null
+++ b/drivers/power/reset/atc260x-poweroff.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Poweroff & reset driver for Actions Semi ATC260x PMICs
+ *
+ * Copyright (c) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/atc260x/core.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+struct atc260x_pwrc {
+ struct device *dev;
+ struct regmap *regmap;
+ struct notifier_block restart_nb;
+ int (*do_poweroff)(const struct atc260x_pwrc *pwrc, bool restart);
+};
+
+/* Global variable needed only for pm_power_off */
+static struct atc260x_pwrc *atc260x_pwrc_data;
+
+static int atc2603c_do_poweroff(const struct atc260x_pwrc *pwrc, bool restart)
+{
+ int ret, deep_sleep = 0;
+ uint reg_mask, reg_val;
+
+ /* S4-Deep Sleep Mode is NOT available for WALL/USB power */
+ if (!restart && !power_supply_is_system_supplied()) {
+ deep_sleep = 1;
+ dev_info(pwrc->dev, "Enabling S4-Deep Sleep Mode");
+ }
+
+ /* Update wakeup sources */
+ reg_val = ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN |
+ (restart ? ATC2603C_PMU_SYS_CTL0_RESET_WK_EN
+ : ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN);
+
+ ret = regmap_update_bits(pwrc->regmap, ATC2603C_PMU_SYS_CTL0,
+ ATC2603C_PMU_SYS_CTL0_WK_ALL, reg_val);
+ if (ret)
+ dev_warn(pwrc->dev, "failed to write SYS_CTL0: %d\n", ret);
+
+ /* Update power mode */
+ reg_mask = ATC2603C_PMU_SYS_CTL3_EN_S2 | ATC2603C_PMU_SYS_CTL3_EN_S3;
+
+ ret = regmap_update_bits(pwrc->regmap, ATC2603C_PMU_SYS_CTL3, reg_mask,
+ deep_sleep ? 0 : ATC2603C_PMU_SYS_CTL3_EN_S3);
+ if (ret) {
+ dev_err(pwrc->dev, "failed to write SYS_CTL3: %d\n", ret);
+ return ret;
+ }
+
+ /* Trigger poweroff / restart sequence */
+ reg_mask = restart ? ATC2603C_PMU_SYS_CTL0_RESTART_EN
+ : ATC2603C_PMU_SYS_CTL1_EN_S1;
+ reg_val = restart ? ATC2603C_PMU_SYS_CTL0_RESTART_EN : 0;
+
+ ret = regmap_update_bits(pwrc->regmap,
+ restart ? ATC2603C_PMU_SYS_CTL0 : ATC2603C_PMU_SYS_CTL1,
+ reg_mask, reg_val);
+ if (ret) {
+ dev_err(pwrc->dev, "failed to write SYS_CTL%d: %d\n",
+ restart ? 0 : 1, ret);
+ return ret;
+ }
+
+ /* Wait for trigger completion */
+ mdelay(200);
+
+ return 0;
+}
+
+static int atc2609a_do_poweroff(const struct atc260x_pwrc *pwrc, bool restart)
+{
+ int ret, deep_sleep = 0;
+ uint reg_mask, reg_val;
+
+ /* S4-Deep Sleep Mode is NOT available for WALL/USB power */
+ if (!restart && !power_supply_is_system_supplied()) {
+ deep_sleep = 1;
+ dev_info(pwrc->dev, "Enabling S4-Deep Sleep Mode");
+ }
+
+ /* Update wakeup sources */
+ reg_val = ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN |
+ (restart ? ATC2609A_PMU_SYS_CTL0_RESET_WK_EN
+ : ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN);
+
+ ret = regmap_update_bits(pwrc->regmap, ATC2609A_PMU_SYS_CTL0,
+ ATC2609A_PMU_SYS_CTL0_WK_ALL, reg_val);
+ if (ret)
+ dev_warn(pwrc->dev, "failed to write SYS_CTL0: %d\n", ret);
+
+ /* Update power mode */
+ reg_mask = ATC2609A_PMU_SYS_CTL3_EN_S2 | ATC2609A_PMU_SYS_CTL3_EN_S3;
+
+ ret = regmap_update_bits(pwrc->regmap, ATC2609A_PMU_SYS_CTL3, reg_mask,
+ deep_sleep ? 0 : ATC2609A_PMU_SYS_CTL3_EN_S3);
+ if (ret) {
+ dev_err(pwrc->dev, "failed to write SYS_CTL3: %d\n", ret);
+ return ret;
+ }
+
+ /* Trigger poweroff / restart sequence */
+ reg_mask = restart ? ATC2609A_PMU_SYS_CTL0_RESTART_EN
+ : ATC2609A_PMU_SYS_CTL1_EN_S1;
+ reg_val = restart ? ATC2609A_PMU_SYS_CTL0_RESTART_EN : 0;
+
+ ret = regmap_update_bits(pwrc->regmap,
+ restart ? ATC2609A_PMU_SYS_CTL0 : ATC2609A_PMU_SYS_CTL1,
+ reg_mask, reg_val);
+ if (ret) {
+ dev_err(pwrc->dev, "failed to write SYS_CTL%d: %d\n",
+ restart ? 0 : 1, ret);
+ return ret;
+ }
+
+ /* Wait for trigger completion */
+ mdelay(200);
+
+ return 0;
+}
+
+static int atc2603c_init(const struct atc260x_pwrc *pwrc)
+{
+ int ret;
+
+ /*
+ * Delay transition from S2/S3 to S1 in order to avoid
+ * DDR init failure in Bootloader.
+ */
+ ret = regmap_update_bits(pwrc->regmap, ATC2603C_PMU_SYS_CTL3,
+ ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN,
+ ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN);
+ if (ret)
+ dev_warn(pwrc->dev, "failed to write SYS_CTL3: %d\n", ret);
+
+ /* Set wakeup sources */
+ ret = regmap_update_bits(pwrc->regmap, ATC2603C_PMU_SYS_CTL0,
+ ATC2603C_PMU_SYS_CTL0_WK_ALL,
+ ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN |
+ ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN);
+ if (ret)
+ dev_warn(pwrc->dev, "failed to write SYS_CTL0: %d\n", ret);
+
+ return ret;
+}
+
+static int atc2609a_init(const struct atc260x_pwrc *pwrc)
+{
+ int ret;
+
+ /* Set wakeup sources */
+ ret = regmap_update_bits(pwrc->regmap, ATC2609A_PMU_SYS_CTL0,
+ ATC2609A_PMU_SYS_CTL0_WK_ALL,
+ ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN |
+ ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN);
+ if (ret)
+ dev_warn(pwrc->dev, "failed to write SYS_CTL0: %d\n", ret);
+
+ return ret;
+}
+
+static void atc260x_pwrc_pm_handler(void)
+{
+ atc260x_pwrc_data->do_poweroff(atc260x_pwrc_data, false);
+
+ WARN_ONCE(1, "Unable to power off system\n");
+}
+
+static int atc260x_pwrc_restart_handler(struct notifier_block *nb,
+ unsigned long mode, void *cmd)
+{
+ struct atc260x_pwrc *pwrc = container_of(nb, struct atc260x_pwrc,
+ restart_nb);
+ pwrc->do_poweroff(pwrc, true);
+
+ return NOTIFY_DONE;
+}
+
+static int atc260x_pwrc_probe(struct platform_device *pdev)
+{
+ struct atc260x *atc260x = dev_get_drvdata(pdev->dev.parent);
+ struct atc260x_pwrc *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->regmap = atc260x->regmap;
+ priv->restart_nb.notifier_call = atc260x_pwrc_restart_handler;
+ priv->restart_nb.priority = 192;
+
+ switch (atc260x->ic_type) {
+ case ATC2603C:
+ priv->do_poweroff = atc2603c_do_poweroff;
+ ret = atc2603c_init(priv);
+ break;
+ case ATC2609A:
+ priv->do_poweroff = atc2609a_do_poweroff;
+ ret = atc2609a_init(priv);
+ break;
+ default:
+ dev_err(priv->dev,
+ "Poweroff not supported for ATC260x PMIC type: %u\n",
+ atc260x->ic_type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, priv);
+
+ if (!pm_power_off) {
+ atc260x_pwrc_data = priv;
+ pm_power_off = atc260x_pwrc_pm_handler;
+ } else {
+ dev_warn(priv->dev, "Poweroff callback already assigned\n");
+ }
+
+ ret = register_restart_handler(&priv->restart_nb);
+ if (ret)
+ dev_err(priv->dev, "failed to register restart handler: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int atc260x_pwrc_remove(struct platform_device *pdev)
+{
+ struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
+
+ if (atc260x_pwrc_data == priv) {
+ pm_power_off = NULL;
+ atc260x_pwrc_data = NULL;
+ }
+
+ unregister_restart_handler(&priv->restart_nb);
+
+ return 0;
+}
+
+static struct platform_driver atc260x_pwrc_driver = {
+ .probe = atc260x_pwrc_probe,
+ .remove = atc260x_pwrc_remove,
+ .driver = {
+ .name = "atc260x-pwrc",
+ },
+};
+
+module_platform_driver(atc260x_pwrc_driver);
+
+MODULE_DESCRIPTION("Poweroff & reset driver for ATC260x PMICs");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/linkstation-poweroff.c b/drivers/power/reset/linkstation-poweroff.c
index 39e89baedb5f..f1e843df0e16 100644
--- a/drivers/power/reset/linkstation-poweroff.c
+++ b/drivers/power/reset/linkstation-poweroff.c
@@ -113,6 +113,7 @@ static int __init linkstation_poweroff_init(void)
return -EPROBE_DEFER;
phydev = phy_find_first(bus);
+ put_device(&bus->dev);
if (!phydev)
return -EPROBE_DEFER;
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
deleted file mode 100644
index 457950833dba..000000000000
--- a/drivers/power/reset/zx-reboot.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ZTE zx296702 SoC reset code
- *
- * Copyright (c) 2015 Linaro Ltd.
- *
- * Author: Jun Nie <jun.nie@linaro.org>
- */
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-
-static void __iomem *base;
-static void __iomem *pcu_base;
-
-static int zx_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- writel_relaxed(1, base + 0xb0);
- writel_relaxed(1, pcu_base + 0x34);
-
- mdelay(50);
- pr_emerg("Unable to restart system\n");
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block zx_restart_nb = {
- .notifier_call = zx_restart_handler,
- .priority = 128,
-};
-
-static int zx_reboot_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- int err;
-
- base = of_iomap(np, 0);
- if (!base) {
- WARN(1, "failed to map base address");
- return -ENODEV;
- }
-
- np = of_find_compatible_node(NULL, NULL, "zte,zx296702-pcu");
- pcu_base = of_iomap(np, 0);
- of_node_put(np);
- if (!pcu_base) {
- iounmap(base);
- WARN(1, "failed to map pcu_base address");
- return -ENODEV;
- }
-
- err = register_restart_handler(&zx_restart_nb);
- if (err) {
- iounmap(base);
- iounmap(pcu_base);
- dev_err(&pdev->dev, "Register restart handler failed(err=%d)\n",
- err);
- }
-
- return err;
-}
-
-static const struct of_device_id zx_reboot_of_match[] = {
- { .compatible = "zte,sysctrl" },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
-
-static struct platform_driver zx_reboot_driver = {
- .probe = zx_reboot_probe,
- .driver = {
- .name = "zx-reboot",
- .of_match_table = zx_reboot_of_match,
- },
-};
-module_platform_driver(zx_reboot_driver);
-
-MODULE_DESCRIPTION("ZTE SoCs reset driver");
-MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index eec646c568b7..006b95eca673 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -229,6 +229,7 @@ config BATTERY_SBS
config CHARGER_SBS
tristate "SBS Compliant charger"
depends on I2C
+ select REGMAP_I2C
help
Say Y to include support for SBS compliant battery chargers.
@@ -513,6 +514,14 @@ config CHARGER_LT3651
Say Y to include support for the Analog Devices (Linear Technology)
LT3651 battery charger which reports its status via GPIO lines.
+config CHARGER_LTC4162L
+ tristate "LTC4162-L charger"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to include support for the Analog Devices (Linear Technology)
+ LTC4162-L battery charger connected to I2C.
+
config CHARGER_MAX14577
tristate "Maxim MAX14577/77836 battery charger driver"
depends on MFD_MAX14577
@@ -546,6 +555,7 @@ config CHARGER_MAX77693
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
+ depends on EXTCON || !EXTCON
help
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8997/LP3974 PMICs.
@@ -645,6 +655,17 @@ config CHARGER_BQ25980
Say Y to enable support for the TI BQ25980, BQ25975 and BQ25960
series of fast battery chargers.
+config CHARGER_BQ256XX
+ tristate "TI BQ256XX battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the TI BQ256XX battery chargers. The
+ BQ256XX family of devices are highly-integrated, switch-mode battery
+ charge management and system power path management devices for single
+ cell Li-ion and Li-polymer batteries.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB3XX Battery Charger"
depends on I2C
@@ -774,4 +795,10 @@ config RN5T618_POWER
This driver can also be built as a module. If so, the module will be
called rn5t618_power.
+config BATTERY_ACER_A500
+ tristate "Acer Iconia Tab A500 battery driver"
+ depends on MFD_ACER_A500_EC
+ help
+ Say Y to include support for Acer Iconia Tab A500 battery fuel gauge.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index dd4b86318cd9..5e5fdbbef531 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_LT3651) += lt3651-charger.o
+obj-$(CONFIG_CHARGER_LTC4162L) += ltc4162-l-charger.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
@@ -85,6 +86,7 @@ obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o
obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_CHARGER_BQ25980) += bq25980_charger.o
+obj-$(CONFIG_CHARGER_BQ256XX) += bq256xx_charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
@@ -98,3 +100,4 @@ obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o
obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
obj-$(CONFIG_RN5T618_POWER) += rn5t618_power.o
+obj-$(CONFIG_BATTERY_ACER_A500) += acer_a500_battery.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 3873e4857e3d..06ff42c71f24 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -857,7 +857,7 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
const struct abx500_v_to_cap *tbl;
int cap = 0;
- tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl,
+ tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl;
tbl_size = di->bm->bat_type[di->bm->batt_id].n_v_cap_tbl_elements;
for (i = 0; i < tbl_size; ++i) {
diff --git a/drivers/power/supply/acer_a500_battery.c b/drivers/power/supply/acer_a500_battery.c
new file mode 100644
index 000000000000..32a0bfcac08f
--- /dev/null
+++ b/drivers/power/supply/acer_a500_battery.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Battery driver for Acer Iconia Tab A500.
+ *
+ * Copyright 2020 GRATE-driver project.
+ *
+ * Based on downstream driver from Acer Inc.
+ * Based on NVIDIA Gas Gauge driver for SBS Compliant Batteries.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+enum {
+ REG_CAPACITY,
+ REG_VOLTAGE,
+ REG_CURRENT,
+ REG_DESIGN_CAPACITY,
+ REG_TEMPERATURE,
+};
+
+#define EC_DATA(_reg, _psp) { \
+ .psp = POWER_SUPPLY_PROP_ ## _psp, \
+ .reg = _reg, \
+}
+
+static const struct battery_register {
+ enum power_supply_property psp;
+ unsigned int reg;
+} ec_data[] = {
+ [REG_CAPACITY] = EC_DATA(0x00, CAPACITY),
+ [REG_VOLTAGE] = EC_DATA(0x01, VOLTAGE_NOW),
+ [REG_CURRENT] = EC_DATA(0x03, CURRENT_NOW),
+ [REG_DESIGN_CAPACITY] = EC_DATA(0x08, CHARGE_FULL_DESIGN),
+ [REG_TEMPERATURE] = EC_DATA(0x0a, TEMP),
+};
+
+static const enum power_supply_property a500_battery_properties[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+struct a500_battery {
+ struct delayed_work poll_work;
+ struct power_supply *psy;
+ struct regmap *regmap;
+ unsigned int capacity;
+};
+
+static bool a500_battery_update_capacity(struct a500_battery *bat)
+{
+ unsigned int capacity;
+ int err;
+
+ err = regmap_read(bat->regmap, ec_data[REG_CAPACITY].reg, &capacity);
+ if (err)
+ return false;
+
+ /* capacity can be >100% even if max value is 100% */
+ capacity = min(capacity, 100u);
+
+ if (bat->capacity != capacity) {
+ bat->capacity = capacity;
+ return true;
+ }
+
+ return false;
+}
+
+static int a500_battery_get_status(struct a500_battery *bat)
+{
+ if (bat->capacity < 100) {
+ if (power_supply_am_i_supplied(bat->psy))
+ return POWER_SUPPLY_STATUS_CHARGING;
+ else
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ return POWER_SUPPLY_STATUS_FULL;
+}
+
+static void a500_battery_unit_adjustment(struct device *dev,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ const unsigned int base_unit_conversion = 1000;
+ const unsigned int temp_kelvin_to_celsius = 2731;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval *= base_unit_conversion;
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval -= temp_kelvin_to_celsius;
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!val->intval;
+ break;
+
+ default:
+ dev_dbg(dev,
+ "%s: no need for unit conversion %d\n", __func__, psp);
+ }
+}
+
+static int a500_battery_get_ec_data_index(struct device *dev,
+ enum power_supply_property psp)
+{
+ unsigned int i;
+
+ /*
+ * DESIGN_CAPACITY register always returns a non-zero value if
+ * battery is connected and zero if disconnected, hence we'll use
+ * it for judging the battery presence.
+ */
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ psp = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
+
+ for (i = 0; i < ARRAY_SIZE(ec_data); i++)
+ if (psp == ec_data[i].psp)
+ return i;
+
+ dev_dbg(dev, "%s: invalid property %u\n", __func__, psp);
+
+ return -EINVAL;
+}
+
+static int a500_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct a500_battery *bat = power_supply_get_drvdata(psy);
+ struct device *dev = psy->dev.parent;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = a500_battery_get_status(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ a500_battery_update_capacity(bat);
+ val->intval = bat->capacity;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = a500_battery_get_ec_data_index(dev, psp);
+ if (ret < 0)
+ break;
+
+ ret = regmap_read(bat->regmap, ec_data[ret].reg, &val->intval);
+ break;
+
+ default:
+ dev_err(dev, "%s: invalid property %u\n", __func__, psp);
+ return -EINVAL;
+ }
+
+ if (!ret) {
+ /* convert units to match requirements of power supply class */
+ a500_battery_unit_adjustment(dev, psp, val);
+ }
+
+ dev_dbg(dev, "%s: property = %d, value = %x\n",
+ __func__, psp, val->intval);
+
+ /* return NODATA for properties if battery not presents */
+ if (ret)
+ return -ENODATA;
+
+ return 0;
+}
+
+static void a500_battery_poll_work(struct work_struct *work)
+{
+ struct a500_battery *bat;
+ bool capacity_changed;
+
+ bat = container_of(work, struct a500_battery, poll_work.work);
+ capacity_changed = a500_battery_update_capacity(bat);
+
+ if (capacity_changed)
+ power_supply_changed(bat->psy);
+
+ /* continuously send uevent notification */
+ schedule_delayed_work(&bat->poll_work, 30 * HZ);
+}
+
+static const struct power_supply_desc a500_battery_desc = {
+ .name = "ec-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = a500_battery_properties,
+ .get_property = a500_battery_get_property,
+ .num_properties = ARRAY_SIZE(a500_battery_properties),
+ .external_power_changed = power_supply_changed,
+};
+
+static int a500_battery_probe(struct platform_device *pdev)
+{
+ struct power_supply_config psy_cfg = {};
+ struct a500_battery *bat;
+
+ bat = devm_kzalloc(&pdev->dev, sizeof(*bat), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bat);
+
+ psy_cfg.of_node = pdev->dev.parent->of_node;
+ psy_cfg.drv_data = bat;
+
+ bat->regmap = dev_get_regmap(pdev->dev.parent, "KB930");
+ if (!bat->regmap)
+ return -EINVAL;
+
+ bat->psy = devm_power_supply_register_no_ws(&pdev->dev,
+ &a500_battery_desc,
+ &psy_cfg);
+ if (IS_ERR(bat->psy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bat->psy),
+ "failed to register battery\n");
+
+ INIT_DELAYED_WORK(&bat->poll_work, a500_battery_poll_work);
+ schedule_delayed_work(&bat->poll_work, HZ);
+
+ return 0;
+}
+
+static int a500_battery_remove(struct platform_device *pdev)
+{
+ struct a500_battery *bat = dev_get_drvdata(&pdev->dev);
+
+ cancel_delayed_work_sync(&bat->poll_work);
+
+ return 0;
+}
+
+static int __maybe_unused a500_battery_suspend(struct device *dev)
+{
+ struct a500_battery *bat = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&bat->poll_work);
+
+ return 0;
+}
+
+static int __maybe_unused a500_battery_resume(struct device *dev)
+{
+ struct a500_battery *bat = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&bat->poll_work, HZ);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(a500_battery_pm_ops,
+ a500_battery_suspend, a500_battery_resume);
+
+static struct platform_driver a500_battery_driver = {
+ .driver = {
+ .name = "acer-a500-iconia-battery",
+ .pm = &a500_battery_pm_ops,
+ },
+ .probe = a500_battery_probe,
+ .remove = a500_battery_remove,
+};
+module_platform_driver(a500_battery_driver);
+
+MODULE_DESCRIPTION("Battery gauge driver for Acer Iconia Tab A500");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_ALIAS("platform:acer-a500-iconia-battery");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 70b28b699a80..8933ae26c3d6 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -593,6 +593,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
power->axp20x_id = axp_data->axp20x_id;
power->regmap = axp20x->regmap;
power->num_irqs = axp_data->num_irq_names;
+ INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
if (power->axp20x_id == AXP202_ID) {
/* Enable vbus valid checking */
@@ -645,7 +646,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
}
}
- INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
if (axp20x_usb_vbus_needs_polling(power))
queue_delayed_work(system_power_efficient_wq, &power->vbus_detect, 0);
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 148eb8105803..39e16ecb7638 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -732,6 +732,12 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
},
},
+ { /* Mele PCG03 Mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Mini PC"),
+ },
+ },
{
/* Minix Neo Z83-4 mini PC */
.matches = {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 4841e14a5bfb..852e86bfe2fb 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1766,7 +1766,7 @@ static int bq24190_probe(struct i2c_client *client,
charger_cfg.drv_data = bdi;
charger_cfg.of_node = dev->of_node;
charger_cfg.supplied_to = bq24190_charger_supplied_to;
- charger_cfg.num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to),
+ charger_cfg.num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
bdi->charger = power_supply_register(dev, &bq24190_charger_desc,
&charger_cfg);
if (IS_ERR(bdi->charger)) {
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
new file mode 100644
index 000000000000..2ab5ba4af92b
--- /dev/null
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -0,0 +1,1749 @@
+// SPDX-License-Identifier: GPL-2.0
+// BQ256XX Battery Charger Driver
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/usb/phy.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+
+#define BQ256XX_MANUFACTURER "Texas Instruments"
+
+#define BQ256XX_INPUT_CURRENT_LIMIT 0x00
+#define BQ256XX_CHARGER_CONTROL_0 0x01
+#define BQ256XX_CHARGE_CURRENT_LIMIT 0x02
+#define BQ256XX_PRECHG_AND_TERM_CURR_LIM 0x03
+#define BQ256XX_BATTERY_VOLTAGE_LIMIT 0x04
+#define BQ256XX_CHARGER_CONTROL_1 0x05
+#define BQ256XX_CHARGER_CONTROL_2 0x06
+#define BQ256XX_CHARGER_CONTROL_3 0x07
+#define BQ256XX_CHARGER_STATUS_0 0x08
+#define BQ256XX_CHARGER_STATUS_1 0x09
+#define BQ256XX_CHARGER_STATUS_2 0x0a
+#define BQ256XX_PART_INFORMATION 0x0b
+#define BQ256XX_CHARGER_CONTROL_4 0x0c
+
+#define BQ256XX_IINDPM_MASK GENMASK(4, 0)
+#define BQ256XX_IINDPM_STEP_uA 100000
+#define BQ256XX_IINDPM_OFFSET_uA 100000
+#define BQ256XX_IINDPM_MIN_uA 100000
+#define BQ256XX_IINDPM_MAX_uA 3200000
+#define BQ256XX_IINDPM_DEF_uA 2400000
+
+#define BQ256XX_VINDPM_MASK GENMASK(3, 0)
+#define BQ256XX_VINDPM_STEP_uV 100000
+#define BQ256XX_VINDPM_OFFSET_uV 3900000
+#define BQ256XX_VINDPM_MIN_uV 3900000
+#define BQ256XX_VINDPM_MAX_uV 5400000
+#define BQ256XX_VINDPM_DEF_uV 4500000
+
+#define BQ256XX_VBATREG_MASK GENMASK(7, 3)
+#define BQ2560X_VBATREG_STEP_uV 32000
+#define BQ2560X_VBATREG_OFFSET_uV 3856000
+#define BQ2560X_VBATREG_MIN_uV 3856000
+#define BQ2560X_VBATREG_MAX_uV 4624000
+#define BQ2560X_VBATREG_DEF_uV 4208000
+#define BQ25601D_VBATREG_OFFSET_uV 3847000
+#define BQ25601D_VBATREG_MIN_uV 3847000
+#define BQ25601D_VBATREG_MAX_uV 4615000
+#define BQ25601D_VBATREG_DEF_uV 4199000
+#define BQ2561X_VBATREG_STEP_uV 10000
+#define BQ25611D_VBATREG_MIN_uV 3494000
+#define BQ25611D_VBATREG_MAX_uV 4510000
+#define BQ25611D_VBATREG_DEF_uV 4190000
+#define BQ25618_VBATREG_MIN_uV 3504000
+#define BQ25618_VBATREG_MAX_uV 4500000
+#define BQ25618_VBATREG_DEF_uV 4200000
+#define BQ256XX_VBATREG_BIT_SHIFT 3
+#define BQ2561X_VBATREG_THRESH 0x8
+#define BQ25611D_VBATREG_THRESH_uV 4290000
+#define BQ25618_VBATREG_THRESH_uV 4300000
+
+#define BQ256XX_ITERM_MASK GENMASK(3, 0)
+#define BQ256XX_ITERM_STEP_uA 60000
+#define BQ256XX_ITERM_OFFSET_uA 60000
+#define BQ256XX_ITERM_MIN_uA 60000
+#define BQ256XX_ITERM_MAX_uA 780000
+#define BQ256XX_ITERM_DEF_uA 180000
+#define BQ25618_ITERM_STEP_uA 20000
+#define BQ25618_ITERM_OFFSET_uA 20000
+#define BQ25618_ITERM_MIN_uA 20000
+#define BQ25618_ITERM_MAX_uA 260000
+#define BQ25618_ITERM_DEF_uA 60000
+
+#define BQ256XX_IPRECHG_MASK GENMASK(7, 4)
+#define BQ256XX_IPRECHG_STEP_uA 60000
+#define BQ256XX_IPRECHG_OFFSET_uA 60000
+#define BQ256XX_IPRECHG_MIN_uA 60000
+#define BQ256XX_IPRECHG_MAX_uA 780000
+#define BQ256XX_IPRECHG_DEF_uA 180000
+#define BQ25618_IPRECHG_STEP_uA 20000
+#define BQ25618_IPRECHG_OFFSET_uA 20000
+#define BQ25618_IPRECHG_MIN_uA 20000
+#define BQ25618_IPRECHG_MAX_uA 260000
+#define BQ25618_IPRECHG_DEF_uA 40000
+#define BQ256XX_IPRECHG_BIT_SHIFT 4
+
+#define BQ256XX_ICHG_MASK GENMASK(5, 0)
+#define BQ256XX_ICHG_STEP_uA 60000
+#define BQ256XX_ICHG_MIN_uA 0
+#define BQ256XX_ICHG_MAX_uA 3000000
+#define BQ2560X_ICHG_DEF_uA 2040000
+#define BQ25611D_ICHG_DEF_uA 1020000
+#define BQ25618_ICHG_STEP_uA 20000
+#define BQ25618_ICHG_MIN_uA 0
+#define BQ25618_ICHG_MAX_uA 1500000
+#define BQ25618_ICHG_DEF_uA 340000
+#define BQ25618_ICHG_THRESH 0x3c
+#define BQ25618_ICHG_THRESH_uA 1180000
+
+#define BQ256XX_VBUS_STAT_MASK GENMASK(7, 5)
+#define BQ256XX_VBUS_STAT_NO_INPUT 0
+#define BQ256XX_VBUS_STAT_USB_SDP BIT(5)
+#define BQ256XX_VBUS_STAT_USB_CDP BIT(6)
+#define BQ256XX_VBUS_STAT_USB_DCP (BIT(6) | BIT(5))
+#define BQ256XX_VBUS_STAT_USB_OTG (BIT(7) | BIT(6) | BIT(5))
+
+#define BQ256XX_CHRG_STAT_MASK GENMASK(4, 3)
+#define BQ256XX_CHRG_STAT_NOT_CHRGING 0
+#define BQ256XX_CHRG_STAT_PRECHRGING BIT(3)
+#define BQ256XX_CHRG_STAT_FAST_CHRGING BIT(4)
+#define BQ256XX_CHRG_STAT_CHRG_TERM (BIT(4) | BIT(3))
+
+#define BQ256XX_PG_STAT_MASK BIT(2)
+#define BQ256XX_WDT_FAULT_MASK BIT(7)
+#define BQ256XX_CHRG_FAULT_MASK GENMASK(5, 4)
+#define BQ256XX_CHRG_FAULT_NORMAL 0
+#define BQ256XX_CHRG_FAULT_INPUT BIT(4)
+#define BQ256XX_CHRG_FAULT_THERM BIT(5)
+#define BQ256XX_CHRG_FAULT_CST_EXPIRE (BIT(5) | BIT(4))
+#define BQ256XX_BAT_FAULT_MASK BIT(3)
+#define BQ256XX_NTC_FAULT_MASK GENMASK(2, 0)
+#define BQ256XX_NTC_FAULT_WARM BIT(1)
+#define BQ256XX_NTC_FAULT_COOL (BIT(1) | BIT(0))
+#define BQ256XX_NTC_FAULT_COLD (BIT(2) | BIT(0))
+#define BQ256XX_NTC_FAULT_HOT (BIT(2) | BIT(1))
+
+#define BQ256XX_NUM_WD_VAL 4
+#define BQ256XX_WATCHDOG_MASK GENMASK(5, 4)
+#define BQ256XX_WATCHDOG_MAX 1600000
+#define BQ256XX_WATCHDOG_DIS 0
+#define BQ256XX_WDT_BIT_SHIFT 4
+
+#define BQ256XX_REG_RST BIT(7)
+
+/**
+ * struct bq256xx_init_data -
+ * @ichg: fast charge current
+ * @iindpm: input current limit
+ * @vbatreg: charge voltage
+ * @iterm: termination current
+ * @iprechg: precharge current
+ * @vindpm: input voltage limit
+ * @ichg_max: maximum fast charge current
+ * @vbatreg_max: maximum charge voltage
+ */
+struct bq256xx_init_data {
+ u32 ichg;
+ u32 iindpm;
+ u32 vbatreg;
+ u32 iterm;
+ u32 iprechg;
+ u32 vindpm;
+ u32 ichg_max;
+ u32 vbatreg_max;
+};
+
+/**
+ * struct bq256xx_state -
+ * @vbus_stat: VBUS status according to BQ256XX_CHARGER_STATUS_0
+ * @chrg_stat: charging status according to BQ256XX_CHARGER_STATUS_0
+ * @online: PG status according to BQ256XX_CHARGER_STATUS_0
+ *
+ * @wdt_fault: watchdog fault according to BQ256XX_CHARGER_STATUS_1
+ * @bat_fault: battery fault according to BQ256XX_CHARGER_STATUS_1
+ * @chrg_fault: charging fault according to BQ256XX_CHARGER_STATUS_1
+ * @ntc_fault: TS fault according to BQ256XX_CHARGER_STATUS_1
+ */
+struct bq256xx_state {
+ u8 vbus_stat;
+ u8 chrg_stat;
+ bool online;
+
+ u8 wdt_fault;
+ u8 bat_fault;
+ u8 chrg_fault;
+ u8 ntc_fault;
+};
+
+enum bq256xx_id {
+ BQ25600,
+ BQ25600D,
+ BQ25601,
+ BQ25601D,
+ BQ25618,
+ BQ25619,
+ BQ25611D,
+};
+
+/**
+ * struct bq256xx_device -
+ * @client: i2c client structure
+ * @regmap: register map structure
+ * @dev: device structure
+ * @lock: mutex lock structure
+ *
+ * @usb2_phy: usb_phy identifier
+ * @usb3_phy: usb_phy identifier
+ * @usb_nb: notifier block
+ * @usb_work: usb work queue
+ * @usb_event: usb_event code
+ *
+ * @model_name: i2c name string
+ *
+ * @init_data: initialization data
+ * @chip_info: device variant information
+ * @state: device status and faults
+ * @watchdog_timer: watchdog timer value in milliseconds
+ */
+struct bq256xx_device {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply *charger;
+ struct power_supply *battery;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ struct usb_phy *usb2_phy;
+ struct usb_phy *usb3_phy;
+ struct notifier_block usb_nb;
+ struct work_struct usb_work;
+ unsigned long usb_event;
+
+ char model_name[I2C_NAME_SIZE];
+
+ struct bq256xx_init_data init_data;
+ const struct bq256xx_chip_info *chip_info;
+ struct bq256xx_state state;
+ int watchdog_timer;
+};
+
+/**
+ * struct bq256xx_chip_info -
+ * @model_id: device instance
+ *
+ * @bq256xx_regmap_config: regmap configuration struct
+ * @bq256xx_get_ichg: pointer to instance specific get_ichg function
+ * @bq256xx_get_iindpm: pointer to instance specific get_iindpm function
+ * @bq256xx_get_vbatreg: pointer to instance specific get_vbatreg function
+ * @bq256xx_get_iterm: pointer to instance specific get_iterm function
+ * @bq256xx_get_iprechg: pointer to instance specific get_iprechg function
+ * @bq256xx_get_vindpm: pointer to instance specific get_vindpm function
+ *
+ * @bq256xx_set_ichg: pointer to instance specific set_ichg function
+ * @bq256xx_set_iindpm: pointer to instance specific set_iindpm function
+ * @bq256xx_set_vbatreg: pointer to instance specific set_vbatreg function
+ * @bq256xx_set_iterm: pointer to instance specific set_iterm function
+ * @bq256xx_set_iprechg: pointer to instance specific set_iprechg function
+ * @bq256xx_set_vindpm: pointer to instance specific set_vindpm function
+ *
+ * @bq256xx_def_ichg: default ichg value in microamps
+ * @bq256xx_def_iindpm: default iindpm value in microamps
+ * @bq256xx_def_vbatreg: default vbatreg value in microvolts
+ * @bq256xx_def_iterm: default iterm value in microamps
+ * @bq256xx_def_iprechg: default iprechg value in microamps
+ * @bq256xx_def_vindpm: default vindpm value in microvolts
+ *
+ * @bq256xx_max_ichg: maximum charge current in microamps
+ * @bq256xx_max_vbatreg: maximum battery regulation voltage in microvolts
+ *
+ * @has_usb_detect: indicates whether device has BC1.2 detection
+ */
+struct bq256xx_chip_info {
+ int model_id;
+
+ const struct regmap_config *bq256xx_regmap_config;
+
+ int (*bq256xx_get_ichg)(struct bq256xx_device *bq);
+ int (*bq256xx_get_iindpm)(struct bq256xx_device *bq);
+ int (*bq256xx_get_vbatreg)(struct bq256xx_device *bq);
+ int (*bq256xx_get_iterm)(struct bq256xx_device *bq);
+ int (*bq256xx_get_iprechg)(struct bq256xx_device *bq);
+ int (*bq256xx_get_vindpm)(struct bq256xx_device *bq);
+
+ int (*bq256xx_set_ichg)(struct bq256xx_device *bq, int ichg);
+ int (*bq256xx_set_iindpm)(struct bq256xx_device *bq, int iindpm);
+ int (*bq256xx_set_vbatreg)(struct bq256xx_device *bq, int vbatreg);
+ int (*bq256xx_set_iterm)(struct bq256xx_device *bq, int iterm);
+ int (*bq256xx_set_iprechg)(struct bq256xx_device *bq, int iprechg);
+ int (*bq256xx_set_vindpm)(struct bq256xx_device *bq, int vindpm);
+
+ int bq256xx_def_ichg;
+ int bq256xx_def_iindpm;
+ int bq256xx_def_vbatreg;
+ int bq256xx_def_iterm;
+ int bq256xx_def_iprechg;
+ int bq256xx_def_vindpm;
+
+ int bq256xx_max_ichg;
+ int bq256xx_max_vbatreg;
+
+ bool has_usb_detect;
+};
+
+static int bq256xx_watchdog_time[BQ256XX_NUM_WD_VAL] = {
+ 0, 40000, 80000, 1600000
+};
+
+static const int bq25611d_vbatreg_values[] = {
+ 3494000, 3590000, 3686000, 3790000, 3894000, 3990000, 4090000, 4140000,
+ 4190000
+};
+
+static const int bq25618_619_vbatreg_values[] = {
+ 3504000, 3600000, 3696000, 3800000, 3904000, 4000000, 4100000, 4150000,
+ 4200000
+};
+
+static const int bq25618_619_ichg_values[] = {
+ 1290000, 1360000, 1430000, 1500000
+};
+
+static enum power_supply_usb_type bq256xx_usb_type[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_ACA,
+};
+
+static int bq256xx_array_parse(int array_size, int val, const int array[])
+{
+ int i = 0;
+
+ if (val < array[i])
+ return i - 1;
+
+ if (val >= array[array_size - 1])
+ return array_size - 1;
+
+ for (i = 1; i < array_size; i++) {
+ if (val == array[i])
+ return i;
+
+ if (val > array[i - 1] && val < array[i]) {
+ if (val < array[i])
+ return i - 1;
+ else
+ return i;
+ }
+ }
+ return -EINVAL;
+}
+
+static int bq256xx_usb_notifier(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ struct bq256xx_device *bq =
+ container_of(nb, struct bq256xx_device, usb_nb);
+
+ bq->usb_event = val;
+ queue_work(system_power_efficient_wq, &bq->usb_work);
+
+ return NOTIFY_OK;
+}
+
+static void bq256xx_usb_work(struct work_struct *data)
+{
+ struct bq256xx_device *bq =
+ container_of(data, struct bq256xx_device, usb_work);
+
+ switch (bq->usb_event) {
+ case USB_EVENT_ID:
+ break;
+ case USB_EVENT_NONE:
+ power_supply_changed(bq->charger);
+ break;
+ default:
+ dev_err(bq->dev, "Error switching to charger mode.\n");
+ break;
+ }
+}
+
+static struct reg_default bq2560x_reg_defs[] = {
+ {BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
+ {BQ256XX_CHARGER_CONTROL_0, 0x1a},
+ {BQ256XX_CHARGE_CURRENT_LIMIT, 0xa2},
+ {BQ256XX_PRECHG_AND_TERM_CURR_LIM, 0x22},
+ {BQ256XX_BATTERY_VOLTAGE_LIMIT, 0x58},
+ {BQ256XX_CHARGER_CONTROL_1, 0x9f},
+ {BQ256XX_CHARGER_CONTROL_2, 0x66},
+ {BQ256XX_CHARGER_CONTROL_3, 0x4c},
+};
+
+static struct reg_default bq25611d_reg_defs[] = {
+ {BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
+ {BQ256XX_CHARGER_CONTROL_0, 0x1a},
+ {BQ256XX_CHARGE_CURRENT_LIMIT, 0x91},
+ {BQ256XX_PRECHG_AND_TERM_CURR_LIM, 0x12},
+ {BQ256XX_BATTERY_VOLTAGE_LIMIT, 0x40},
+ {BQ256XX_CHARGER_CONTROL_1, 0x9e},
+ {BQ256XX_CHARGER_CONTROL_2, 0xe6},
+ {BQ256XX_CHARGER_CONTROL_3, 0x4c},
+ {BQ256XX_PART_INFORMATION, 0x54},
+ {BQ256XX_CHARGER_CONTROL_4, 0x75},
+};
+
+static struct reg_default bq25618_619_reg_defs[] = {
+ {BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
+ {BQ256XX_CHARGER_CONTROL_0, 0x1a},
+ {BQ256XX_CHARGE_CURRENT_LIMIT, 0x91},
+ {BQ256XX_PRECHG_AND_TERM_CURR_LIM, 0x12},
+ {BQ256XX_BATTERY_VOLTAGE_LIMIT, 0x40},
+ {BQ256XX_CHARGER_CONTROL_1, 0x9e},
+ {BQ256XX_CHARGER_CONTROL_2, 0xe6},
+ {BQ256XX_CHARGER_CONTROL_3, 0x4c},
+ {BQ256XX_PART_INFORMATION, 0x2c},
+ {BQ256XX_CHARGER_CONTROL_4, 0x75},
+};
+
+static int bq256xx_get_state(struct bq256xx_device *bq,
+ struct bq256xx_state *state)
+{
+ unsigned int charger_status_0;
+ unsigned int charger_status_1;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_CHARGER_STATUS_0,
+ &charger_status_0);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_CHARGER_STATUS_1,
+ &charger_status_1);
+ if (ret)
+ return ret;
+
+ state->vbus_stat = charger_status_0 & BQ256XX_VBUS_STAT_MASK;
+ state->chrg_stat = charger_status_0 & BQ256XX_CHRG_STAT_MASK;
+ state->online = charger_status_0 & BQ256XX_PG_STAT_MASK;
+
+ state->wdt_fault = charger_status_1 & BQ256XX_WDT_FAULT_MASK;
+ state->bat_fault = charger_status_1 & BQ256XX_BAT_FAULT_MASK;
+ state->chrg_fault = charger_status_1 & BQ256XX_CHRG_FAULT_MASK;
+ state->ntc_fault = charger_status_1 & BQ256XX_NTC_FAULT_MASK;
+
+ return 0;
+}
+
+static int bq256xx_get_ichg_curr(struct bq256xx_device *bq)
+{
+ unsigned int charge_current_limit;
+ unsigned int ichg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_CHARGE_CURRENT_LIMIT,
+ &charge_current_limit);
+ if (ret)
+ return ret;
+
+ ichg_reg_code = charge_current_limit & BQ256XX_ICHG_MASK;
+
+ return ichg_reg_code * BQ256XX_ICHG_STEP_uA;
+}
+
+static int bq25618_619_get_ichg_curr(struct bq256xx_device *bq)
+{
+ unsigned int charge_current_limit;
+ unsigned int ichg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_CHARGE_CURRENT_LIMIT,
+ &charge_current_limit);
+ if (ret)
+ return ret;
+
+ ichg_reg_code = charge_current_limit & BQ256XX_ICHG_MASK;
+
+ if (ichg_reg_code < BQ25618_ICHG_THRESH)
+ return ichg_reg_code * BQ25618_ICHG_STEP_uA;
+
+ return bq25618_619_ichg_values[ichg_reg_code - BQ25618_ICHG_THRESH];
+}
+
+static int bq256xx_set_ichg_curr(struct bq256xx_device *bq, int ichg)
+{
+ unsigned int ichg_reg_code;
+ int ichg_max = bq->init_data.ichg_max;
+
+ ichg = clamp(ichg, BQ256XX_ICHG_MIN_uA, ichg_max);
+ ichg_reg_code = ichg / BQ256XX_ICHG_STEP_uA;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_CHARGE_CURRENT_LIMIT,
+ BQ256XX_ICHG_MASK, ichg_reg_code);
+}
+
+static int bq25618_619_set_ichg_curr(struct bq256xx_device *bq, int ichg)
+{
+ int array_size = ARRAY_SIZE(bq25618_619_ichg_values);
+ unsigned int ichg_reg_code;
+ int ichg_max = bq->init_data.ichg_max;
+
+ ichg = clamp(ichg, BQ25618_ICHG_MIN_uA, ichg_max);
+
+ if (ichg <= BQ25618_ICHG_THRESH_uA) {
+ ichg_reg_code = ichg / BQ25618_ICHG_STEP_uA;
+ } else {
+ ichg_reg_code = bq256xx_array_parse(array_size, ichg,
+ bq25618_619_ichg_values) + BQ25618_ICHG_THRESH;
+ }
+
+ return regmap_update_bits(bq->regmap, BQ256XX_CHARGE_CURRENT_LIMIT,
+ BQ256XX_ICHG_MASK, ichg_reg_code);
+}
+
+static int bq25618_619_get_chrg_volt(struct bq256xx_device *bq)
+{
+ unsigned int battery_volt_lim;
+ unsigned int vbatreg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ &battery_volt_lim);
+
+ if (ret)
+ return ret;
+
+ vbatreg_reg_code = (battery_volt_lim & BQ256XX_VBATREG_MASK) >>
+ BQ256XX_VBATREG_BIT_SHIFT;
+
+ if (vbatreg_reg_code > BQ2561X_VBATREG_THRESH)
+ return ((vbatreg_reg_code - BQ2561X_VBATREG_THRESH) *
+ BQ2561X_VBATREG_STEP_uV) +
+ BQ25618_VBATREG_THRESH_uV;
+
+ return bq25618_619_vbatreg_values[vbatreg_reg_code];
+}
+
+static int bq25611d_get_chrg_volt(struct bq256xx_device *bq)
+{
+ unsigned int battery_volt_lim;
+ unsigned int vbatreg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ &battery_volt_lim);
+ if (ret)
+ return ret;
+
+ vbatreg_reg_code = (battery_volt_lim & BQ256XX_VBATREG_MASK) >>
+ BQ256XX_VBATREG_BIT_SHIFT;
+
+ if (vbatreg_reg_code > BQ2561X_VBATREG_THRESH)
+ return ((vbatreg_reg_code - BQ2561X_VBATREG_THRESH) *
+ BQ2561X_VBATREG_STEP_uV) +
+ BQ25611D_VBATREG_THRESH_uV;
+
+ return bq25611d_vbatreg_values[vbatreg_reg_code];
+}
+
+static int bq2560x_get_chrg_volt(struct bq256xx_device *bq)
+{
+ unsigned int battery_volt_lim;
+ unsigned int vbatreg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ &battery_volt_lim);
+ if (ret)
+ return ret;
+
+ vbatreg_reg_code = (battery_volt_lim & BQ256XX_VBATREG_MASK) >>
+ BQ256XX_VBATREG_BIT_SHIFT;
+
+ return (vbatreg_reg_code * BQ2560X_VBATREG_STEP_uV)
+ + BQ2560X_VBATREG_OFFSET_uV;
+}
+
+static int bq25601d_get_chrg_volt(struct bq256xx_device *bq)
+{
+ unsigned int battery_volt_lim;
+ unsigned int vbatreg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ &battery_volt_lim);
+ if (ret)
+ return ret;
+
+ vbatreg_reg_code = (battery_volt_lim & BQ256XX_VBATREG_MASK) >>
+ BQ256XX_VBATREG_BIT_SHIFT;
+
+ return (vbatreg_reg_code * BQ2560X_VBATREG_STEP_uV)
+ + BQ25601D_VBATREG_OFFSET_uV;
+}
+
+static int bq25618_619_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
+{
+ int array_size = ARRAY_SIZE(bq25618_619_vbatreg_values);
+ unsigned int vbatreg_reg_code;
+ int vbatreg_max = bq->init_data.vbatreg_max;
+
+ vbatreg = clamp(vbatreg, BQ25618_VBATREG_MIN_uV, vbatreg_max);
+
+ if (vbatreg > BQ25618_VBATREG_THRESH_uV)
+ vbatreg_reg_code = ((vbatreg -
+ BQ25618_VBATREG_THRESH_uV) /
+ (BQ2561X_VBATREG_STEP_uV)) + BQ2561X_VBATREG_THRESH;
+ else {
+ vbatreg_reg_code = bq256xx_array_parse(array_size, vbatreg,
+ bq25618_619_vbatreg_values);
+ }
+
+ return regmap_update_bits(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ BQ256XX_VBATREG_MASK, vbatreg_reg_code <<
+ BQ256XX_VBATREG_BIT_SHIFT);
+}
+
+static int bq25611d_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
+{
+ int array_size = ARRAY_SIZE(bq25611d_vbatreg_values);
+ unsigned int vbatreg_reg_code;
+ int vbatreg_max = bq->init_data.vbatreg_max;
+
+ vbatreg = clamp(vbatreg, BQ25611D_VBATREG_MIN_uV, vbatreg_max);
+
+ if (vbatreg > BQ25611D_VBATREG_THRESH_uV)
+ vbatreg_reg_code = ((vbatreg -
+ BQ25611D_VBATREG_THRESH_uV) /
+ (BQ2561X_VBATREG_STEP_uV)) + BQ2561X_VBATREG_THRESH;
+ else {
+ vbatreg_reg_code = bq256xx_array_parse(array_size, vbatreg,
+ bq25611d_vbatreg_values);
+ }
+
+ return regmap_update_bits(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ BQ256XX_VBATREG_MASK, vbatreg_reg_code <<
+ BQ256XX_VBATREG_BIT_SHIFT);
+}
+
+static int bq2560x_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
+{
+ unsigned int vbatreg_reg_code;
+ int vbatreg_max = bq->init_data.vbatreg_max;
+
+ vbatreg = clamp(vbatreg, BQ2560X_VBATREG_MIN_uV, vbatreg_max);
+
+ vbatreg_reg_code = (vbatreg - BQ2560X_VBATREG_OFFSET_uV) /
+ BQ2560X_VBATREG_STEP_uV;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ BQ256XX_VBATREG_MASK, vbatreg_reg_code <<
+ BQ256XX_VBATREG_BIT_SHIFT);
+}
+
+static int bq25601d_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
+{
+ unsigned int vbatreg_reg_code;
+ int vbatreg_max = bq->init_data.vbatreg_max;
+
+ vbatreg = clamp(vbatreg, BQ25601D_VBATREG_MIN_uV, vbatreg_max);
+
+ vbatreg_reg_code = (vbatreg - BQ25601D_VBATREG_OFFSET_uV) /
+ BQ2560X_VBATREG_STEP_uV;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_BATTERY_VOLTAGE_LIMIT,
+ BQ256XX_VBATREG_MASK, vbatreg_reg_code <<
+ BQ256XX_VBATREG_BIT_SHIFT);
+}
+
+static int bq256xx_get_prechrg_curr(struct bq256xx_device *bq)
+{
+ unsigned int prechg_and_term_curr_lim;
+ unsigned int iprechg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ &prechg_and_term_curr_lim);
+ if (ret)
+ return ret;
+
+ iprechg_reg_code = (prechg_and_term_curr_lim & BQ256XX_IPRECHG_MASK)
+ >> BQ256XX_IPRECHG_BIT_SHIFT;
+
+ return (iprechg_reg_code * BQ256XX_IPRECHG_STEP_uA) +
+ BQ256XX_IPRECHG_OFFSET_uA;
+}
+
+static int bq256xx_set_prechrg_curr(struct bq256xx_device *bq, int iprechg)
+{
+ unsigned int iprechg_reg_code;
+
+ iprechg = clamp(iprechg, BQ256XX_IPRECHG_MIN_uA,
+ BQ256XX_IPRECHG_MAX_uA);
+
+ iprechg_reg_code = ((iprechg - BQ256XX_IPRECHG_OFFSET_uA) /
+ BQ256XX_IPRECHG_STEP_uA) << BQ256XX_IPRECHG_BIT_SHIFT;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ BQ256XX_IPRECHG_MASK, iprechg_reg_code);
+}
+
+static int bq25618_619_get_prechrg_curr(struct bq256xx_device *bq)
+{
+ unsigned int prechg_and_term_curr_lim;
+ unsigned int iprechg_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ &prechg_and_term_curr_lim);
+ if (ret)
+ return ret;
+
+ iprechg_reg_code = (prechg_and_term_curr_lim & BQ256XX_IPRECHG_MASK)
+ >> BQ256XX_IPRECHG_BIT_SHIFT;
+
+ return (iprechg_reg_code * BQ25618_IPRECHG_STEP_uA) +
+ BQ25618_IPRECHG_OFFSET_uA;
+}
+
+static int bq25618_619_set_prechrg_curr(struct bq256xx_device *bq, int iprechg)
+{
+ unsigned int iprechg_reg_code;
+
+ iprechg = clamp(iprechg, BQ25618_IPRECHG_MIN_uA,
+ BQ25618_IPRECHG_MAX_uA);
+
+ iprechg_reg_code = ((iprechg - BQ25618_IPRECHG_OFFSET_uA) /
+ BQ25618_IPRECHG_STEP_uA) << BQ256XX_IPRECHG_BIT_SHIFT;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ BQ256XX_IPRECHG_MASK, iprechg_reg_code);
+}
+
+static int bq256xx_get_term_curr(struct bq256xx_device *bq)
+{
+ unsigned int prechg_and_term_curr_lim;
+ unsigned int iterm_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ &prechg_and_term_curr_lim);
+ if (ret)
+ return ret;
+
+ iterm_reg_code = prechg_and_term_curr_lim & BQ256XX_ITERM_MASK;
+
+ return (iterm_reg_code * BQ256XX_ITERM_STEP_uA) +
+ BQ256XX_ITERM_OFFSET_uA;
+}
+
+static int bq256xx_set_term_curr(struct bq256xx_device *bq, int iterm)
+{
+ unsigned int iterm_reg_code;
+
+ iterm = clamp(iterm, BQ256XX_ITERM_MIN_uA, BQ256XX_ITERM_MAX_uA);
+
+ iterm_reg_code = (iterm - BQ256XX_ITERM_OFFSET_uA) /
+ BQ256XX_ITERM_STEP_uA;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ BQ256XX_ITERM_MASK, iterm_reg_code);
+}
+
+static int bq25618_619_get_term_curr(struct bq256xx_device *bq)
+{
+ unsigned int prechg_and_term_curr_lim;
+ unsigned int iterm_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ &prechg_and_term_curr_lim);
+ if (ret)
+ return ret;
+
+ iterm_reg_code = prechg_and_term_curr_lim & BQ256XX_ITERM_MASK;
+
+ return (iterm_reg_code * BQ25618_ITERM_STEP_uA) +
+ BQ25618_ITERM_OFFSET_uA;
+}
+
+static int bq25618_619_set_term_curr(struct bq256xx_device *bq, int iterm)
+{
+ unsigned int iterm_reg_code;
+
+ iterm = clamp(iterm, BQ25618_ITERM_MIN_uA, BQ25618_ITERM_MAX_uA);
+
+ iterm_reg_code = (iterm - BQ25618_ITERM_OFFSET_uA) /
+ BQ25618_ITERM_STEP_uA;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_PRECHG_AND_TERM_CURR_LIM,
+ BQ256XX_ITERM_MASK, iterm_reg_code);
+}
+
+static int bq256xx_get_input_volt_lim(struct bq256xx_device *bq)
+{
+ unsigned int charger_control_2;
+ unsigned int vindpm_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_CHARGER_CONTROL_2,
+ &charger_control_2);
+ if (ret)
+ return ret;
+
+ vindpm_reg_code = charger_control_2 & BQ256XX_VINDPM_MASK;
+
+ return (vindpm_reg_code * BQ256XX_VINDPM_STEP_uV) +
+ BQ256XX_VINDPM_OFFSET_uV;
+}
+
+static int bq256xx_set_input_volt_lim(struct bq256xx_device *bq, int vindpm)
+{
+ unsigned int vindpm_reg_code;
+
+ vindpm = clamp(vindpm, BQ256XX_VINDPM_MIN_uV, BQ256XX_VINDPM_MAX_uV);
+
+ vindpm_reg_code = (vindpm - BQ256XX_VINDPM_OFFSET_uV) /
+ BQ256XX_VINDPM_STEP_uV;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_2,
+ BQ256XX_VINDPM_MASK, vindpm_reg_code);
+}
+
+static int bq256xx_get_input_curr_lim(struct bq256xx_device *bq)
+{
+ unsigned int input_current_limit;
+ unsigned int iindpm_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ256XX_INPUT_CURRENT_LIMIT,
+ &input_current_limit);
+ if (ret)
+ return ret;
+
+ iindpm_reg_code = input_current_limit & BQ256XX_IINDPM_MASK;
+
+ return (iindpm_reg_code * BQ256XX_IINDPM_STEP_uA) +
+ BQ256XX_IINDPM_OFFSET_uA;
+}
+
+static int bq256xx_set_input_curr_lim(struct bq256xx_device *bq, int iindpm)
+{
+ unsigned int iindpm_reg_code;
+
+ iindpm = clamp(iindpm, BQ256XX_IINDPM_MIN_uA, BQ256XX_IINDPM_MAX_uA);
+
+ iindpm_reg_code = (iindpm - BQ256XX_IINDPM_OFFSET_uA) /
+ BQ256XX_IINDPM_STEP_uA;
+
+ return regmap_update_bits(bq->regmap, BQ256XX_INPUT_CURRENT_LIMIT,
+ BQ256XX_IINDPM_MASK, iindpm_reg_code);
+}
+
+static void bq256xx_charger_reset(void *data)
+{
+ struct bq256xx_device *bq = data;
+
+ regmap_update_bits(bq->regmap, BQ256XX_PART_INFORMATION,
+ BQ256XX_REG_RST, BQ256XX_REG_RST);
+
+ if (!IS_ERR_OR_NULL(bq->usb2_phy))
+ usb_unregister_notifier(bq->usb2_phy, &bq->usb_nb);
+
+ if (!IS_ERR_OR_NULL(bq->usb3_phy))
+ usb_unregister_notifier(bq->usb3_phy, &bq->usb_nb);
+}
+
+static int bq256xx_set_charger_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ struct bq256xx_device *bq = power_supply_get_drvdata(psy);
+ int ret = -EINVAL;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = bq->chip_info->bq256xx_set_iindpm(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq->chip_info->bq256xx_set_vbatreg(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq->chip_info->bq256xx_set_ichg(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ ret = bq->chip_info->bq256xx_set_iprechg(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = bq->chip_info->bq256xx_set_iterm(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = bq->chip_info->bq256xx_set_vindpm(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+
+static int bq256xx_get_battery_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq256xx_device *bq = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = bq->init_data.ichg_max;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = bq->init_data.vbatreg_max;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bq256xx_get_charger_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq256xx_device *bq = power_supply_get_drvdata(psy);
+ struct bq256xx_state state;
+ int ret = 0;
+
+ mutex_lock(&bq->lock);
+ ret = bq256xx_get_state(bq, &state);
+ mutex_unlock(&bq->lock);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (state.vbus_stat == BQ256XX_VBUS_STAT_NO_INPUT ||
+ state.vbus_stat == BQ256XX_VBUS_STAT_USB_OTG)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (state.chrg_stat == BQ256XX_CHRG_STAT_NOT_CHRGING)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (state.chrg_stat == BQ256XX_CHRG_STAT_CHRG_TERM)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ if (state.wdt_fault) {
+ val->intval =
+ POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE;
+ } else if (state.bat_fault) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ switch (state.chrg_stat) {
+ case BQ256XX_CHRG_FAULT_INPUT:
+ val->intval =
+ POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ case BQ256XX_CHRG_FAULT_THERM:
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case BQ256XX_CHRG_FAULT_CST_EXPIRE:
+ val->intval =
+ POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ default:
+ break;
+ }
+
+ switch (state.ntc_fault) {
+ case BQ256XX_NTC_FAULT_WARM:
+ val->intval = POWER_SUPPLY_HEALTH_WARM;
+ break;
+ case BQ256XX_NTC_FAULT_COOL:
+ val->intval = POWER_SUPPLY_HEALTH_COOL;
+ break;
+ case BQ256XX_NTC_FAULT_COLD:
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case BQ256XX_NTC_FAULT_HOT:
+ val->intval = POWER_SUPPLY_HEALTH_HOT;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ }
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ if (bq->chip_info->has_usb_detect) {
+ switch (state.vbus_stat) {
+ case BQ256XX_VBUS_STAT_USB_SDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case BQ256XX_VBUS_STAT_USB_CDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case BQ256XX_VBUS_STAT_USB_DCP:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case BQ256XX_VBUS_STAT_USB_OTG:
+ val->intval = POWER_SUPPLY_USB_TYPE_ACA;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (state.vbus_stat) {
+ case BQ256XX_VBUS_STAT_USB_SDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case BQ256XX_VBUS_STAT_USB_OTG:
+ val->intval = POWER_SUPPLY_USB_TYPE_ACA;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ switch (state.chrg_stat) {
+ case BQ256XX_CHRG_STAT_NOT_CHRGING:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case BQ256XX_CHRG_STAT_PRECHRGING:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case BQ256XX_CHRG_STAT_FAST_CHRGING:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case BQ256XX_CHRG_STAT_CHRG_TERM:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ256XX_MANUFACTURER;
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bq->model_name;
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = state.online;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = bq->chip_info->bq256xx_get_vindpm(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = bq->chip_info->bq256xx_get_iindpm(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq->chip_info->bq256xx_get_vbatreg(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq->chip_info->bq256xx_get_ichg(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ ret = bq->chip_info->bq256xx_get_iprechg(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = bq->chip_info->bq256xx_get_iterm(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static bool bq256xx_state_changed(struct bq256xx_device *bq,
+ struct bq256xx_state *new_state)
+{
+ struct bq256xx_state old_state;
+
+ mutex_lock(&bq->lock);
+ old_state = bq->state;
+ mutex_unlock(&bq->lock);
+
+ return memcmp(&old_state, new_state, sizeof(struct bq256xx_state)) != 0;
+}
+
+static irqreturn_t bq256xx_irq_handler_thread(int irq, void *private)
+{
+ struct bq256xx_device *bq = private;
+ struct bq256xx_state state;
+ int ret;
+
+ ret = bq256xx_get_state(bq, &state);
+ if (ret < 0)
+ goto irq_out;
+
+ if (!bq256xx_state_changed(bq, &state))
+ goto irq_out;
+
+ mutex_lock(&bq->lock);
+ bq->state = state;
+ mutex_unlock(&bq->lock);
+
+ power_supply_changed(bq->charger);
+
+irq_out:
+ return IRQ_HANDLED;
+}
+
+static enum power_supply_property bq256xx_power_supply_props[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+};
+
+static enum power_supply_property bq256xx_battery_props[] = {
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+};
+
+static int bq256xx_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct power_supply_desc bq256xx_power_supply_desc = {
+ .name = "bq256xx-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = bq256xx_usb_type,
+ .num_usb_types = ARRAY_SIZE(bq256xx_usb_type),
+ .properties = bq256xx_power_supply_props,
+ .num_properties = ARRAY_SIZE(bq256xx_power_supply_props),
+ .get_property = bq256xx_get_charger_property,
+ .set_property = bq256xx_set_charger_property,
+ .property_is_writeable = bq256xx_property_is_writeable,
+};
+
+static struct power_supply_desc bq256xx_battery_desc = {
+ .name = "bq256xx-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = bq256xx_get_battery_property,
+ .properties = bq256xx_battery_props,
+ .num_properties = ARRAY_SIZE(bq256xx_battery_props),
+ .property_is_writeable = bq256xx_property_is_writeable,
+};
+
+
+static bool bq256xx_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BQ256XX_INPUT_CURRENT_LIMIT:
+ case BQ256XX_CHARGER_STATUS_0...BQ256XX_CHARGER_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bq25600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ256XX_PART_INFORMATION,
+ .reg_defaults = bq2560x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq2560x_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+ .volatile_reg = bq256xx_is_volatile_reg,
+};
+
+static const struct regmap_config bq25611d_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ256XX_CHARGER_CONTROL_4,
+ .reg_defaults = bq25611d_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq25611d_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+ .volatile_reg = bq256xx_is_volatile_reg,
+};
+
+static const struct regmap_config bq25618_619_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ256XX_CHARGER_CONTROL_4,
+ .reg_defaults = bq25618_619_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq25618_619_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+ .volatile_reg = bq256xx_is_volatile_reg,
+};
+
+static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
+ [BQ25600] = {
+ .model_id = BQ25600,
+ .bq256xx_regmap_config = &bq25600_regmap_config,
+ .bq256xx_get_ichg = bq256xx_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq2560x_get_chrg_volt,
+ .bq256xx_get_iterm = bq256xx_get_term_curr,
+ .bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq256xx_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq2560x_set_chrg_volt,
+ .bq256xx_set_iterm = bq256xx_set_term_curr,
+ .bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ2560X_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ256XX_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ256XX_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ256XX_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ2560X_VBATREG_MAX_uV,
+
+ .has_usb_detect = false,
+ },
+
+ [BQ25600D] = {
+ .model_id = BQ25600D,
+ .bq256xx_regmap_config = &bq25600_regmap_config,
+ .bq256xx_get_ichg = bq256xx_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq2560x_get_chrg_volt,
+ .bq256xx_get_iterm = bq256xx_get_term_curr,
+ .bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq256xx_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq2560x_set_chrg_volt,
+ .bq256xx_set_iterm = bq256xx_set_term_curr,
+ .bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ2560X_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ256XX_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ256XX_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ256XX_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ2560X_VBATREG_MAX_uV,
+
+ .has_usb_detect = true,
+ },
+
+ [BQ25601] = {
+ .model_id = BQ25601,
+ .bq256xx_regmap_config = &bq25600_regmap_config,
+ .bq256xx_get_ichg = bq256xx_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq2560x_get_chrg_volt,
+ .bq256xx_get_iterm = bq256xx_get_term_curr,
+ .bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq256xx_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq2560x_set_chrg_volt,
+ .bq256xx_set_iterm = bq256xx_set_term_curr,
+ .bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ2560X_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ256XX_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ256XX_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ256XX_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ2560X_VBATREG_MAX_uV,
+
+ .has_usb_detect = false,
+ },
+
+ [BQ25601D] = {
+ .model_id = BQ25601D,
+ .bq256xx_regmap_config = &bq25600_regmap_config,
+ .bq256xx_get_ichg = bq256xx_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq25601d_get_chrg_volt,
+ .bq256xx_get_iterm = bq256xx_get_term_curr,
+ .bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq256xx_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq25601d_set_chrg_volt,
+ .bq256xx_set_iterm = bq256xx_set_term_curr,
+ .bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ2560X_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ256XX_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ256XX_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ256XX_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ2560X_VBATREG_MAX_uV,
+
+ .has_usb_detect = true,
+ },
+
+ [BQ25611D] = {
+ .model_id = BQ25611D,
+ .bq256xx_regmap_config = &bq25611d_regmap_config,
+ .bq256xx_get_ichg = bq256xx_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq25611d_get_chrg_volt,
+ .bq256xx_get_iterm = bq256xx_get_term_curr,
+ .bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq256xx_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq25611d_set_chrg_volt,
+ .bq256xx_set_iterm = bq256xx_set_term_curr,
+ .bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ25611D_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ25611D_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ256XX_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ256XX_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ256XX_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ25611D_VBATREG_MAX_uV,
+
+ .has_usb_detect = true,
+ },
+
+ [BQ25618] = {
+ .model_id = BQ25618,
+ .bq256xx_regmap_config = &bq25618_619_regmap_config,
+ .bq256xx_get_ichg = bq25618_619_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq25618_619_get_chrg_volt,
+ .bq256xx_get_iterm = bq25618_619_get_term_curr,
+ .bq256xx_get_iprechg = bq25618_619_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq25618_619_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq25618_619_set_chrg_volt,
+ .bq256xx_set_iterm = bq25618_619_set_term_curr,
+ .bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ25618_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ25618_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ25618_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ25618_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ25618_VBATREG_MAX_uV,
+
+ .has_usb_detect = false,
+ },
+
+ [BQ25619] = {
+ .model_id = BQ25619,
+ .bq256xx_regmap_config = &bq25618_619_regmap_config,
+ .bq256xx_get_ichg = bq25618_619_get_ichg_curr,
+ .bq256xx_get_iindpm = bq256xx_get_input_curr_lim,
+ .bq256xx_get_vbatreg = bq25618_619_get_chrg_volt,
+ .bq256xx_get_iterm = bq25618_619_get_term_curr,
+ .bq256xx_get_iprechg = bq25618_619_get_prechrg_curr,
+ .bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+
+ .bq256xx_set_ichg = bq25618_619_set_ichg_curr,
+ .bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
+ .bq256xx_set_vbatreg = bq25618_619_set_chrg_volt,
+ .bq256xx_set_iterm = bq25618_619_set_term_curr,
+ .bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
+ .bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
+
+ .bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
+ .bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
+ .bq256xx_def_vbatreg = BQ25618_VBATREG_DEF_uV,
+ .bq256xx_def_iterm = BQ25618_ITERM_DEF_uA,
+ .bq256xx_def_iprechg = BQ25618_IPRECHG_DEF_uA,
+ .bq256xx_def_vindpm = BQ256XX_VINDPM_DEF_uV,
+
+ .bq256xx_max_ichg = BQ25618_ICHG_MAX_uA,
+ .bq256xx_max_vbatreg = BQ25618_VBATREG_MAX_uV,
+
+ .has_usb_detect = false,
+ },
+};
+
+static int bq256xx_power_supply_init(struct bq256xx_device *bq,
+ struct power_supply_config *psy_cfg, struct device *dev)
+{
+ bq->charger = devm_power_supply_register(bq->dev,
+ &bq256xx_power_supply_desc,
+ psy_cfg);
+ if (IS_ERR(bq->charger)) {
+ dev_err(dev, "power supply register charger failed\n");
+ return PTR_ERR(bq->charger);
+ }
+
+ bq->battery = devm_power_supply_register(bq->dev,
+ &bq256xx_battery_desc,
+ psy_cfg);
+ if (IS_ERR(bq->battery)) {
+ dev_err(dev, "power supply register battery failed\n");
+ return PTR_ERR(bq->battery);
+ }
+ return 0;
+}
+
+static int bq256xx_hw_init(struct bq256xx_device *bq)
+{
+ struct power_supply_battery_info bat_info = { };
+ int wd_reg_val = BQ256XX_WATCHDOG_DIS;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < BQ256XX_NUM_WD_VAL; i++) {
+ if (bq->watchdog_timer == bq256xx_watchdog_time[i]) {
+ wd_reg_val = i;
+ break;
+ }
+ if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
+ wd_reg_val = i;
+ }
+ ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
+ BQ256XX_WATCHDOG_MASK, wd_reg_val <<
+ BQ256XX_WDT_BIT_SHIFT);
+
+ ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret) {
+ dev_warn(bq->dev, "battery info missing, default values will be applied\n");
+
+ bat_info.constant_charge_current_max_ua =
+ bq->chip_info->bq256xx_def_ichg;
+
+ bat_info.constant_charge_voltage_max_uv =
+ bq->chip_info->bq256xx_def_vbatreg;
+
+ bat_info.precharge_current_ua =
+ bq->chip_info->bq256xx_def_iprechg;
+
+ bat_info.charge_term_current_ua =
+ bq->chip_info->bq256xx_def_iterm;
+
+ bq->init_data.ichg_max =
+ bq->chip_info->bq256xx_max_ichg;
+
+ bq->init_data.vbatreg_max =
+ bq->chip_info->bq256xx_max_vbatreg;
+ } else {
+ bq->init_data.ichg_max =
+ bat_info.constant_charge_current_max_ua;
+
+ bq->init_data.vbatreg_max =
+ bat_info.constant_charge_voltage_max_uv;
+ }
+
+ ret = bq->chip_info->bq256xx_set_vindpm(bq, bq->init_data.vindpm);
+ if (ret)
+ return ret;
+
+ ret = bq->chip_info->bq256xx_set_iindpm(bq, bq->init_data.iindpm);
+ if (ret)
+ return ret;
+
+ ret = bq->chip_info->bq256xx_set_ichg(bq,
+ bat_info.constant_charge_current_max_ua);
+ if (ret)
+ return ret;
+
+ ret = bq->chip_info->bq256xx_set_iprechg(bq,
+ bat_info.precharge_current_ua);
+ if (ret)
+ return ret;
+
+ ret = bq->chip_info->bq256xx_set_vbatreg(bq,
+ bat_info.constant_charge_voltage_max_uv);
+ if (ret)
+ return ret;
+
+ ret = bq->chip_info->bq256xx_set_iterm(bq,
+ bat_info.charge_term_current_ua);
+ if (ret)
+ return ret;
+
+ power_supply_put_battery_info(bq->charger, &bat_info);
+
+ return 0;
+}
+
+static int bq256xx_parse_dt(struct bq256xx_device *bq,
+ struct power_supply_config *psy_cfg, struct device *dev)
+{
+ int ret = 0;
+
+ psy_cfg->drv_data = bq;
+ psy_cfg->of_node = dev->of_node;
+
+ ret = device_property_read_u32(bq->dev, "ti,watchdog-timeout-ms",
+ &bq->watchdog_timer);
+ if (ret)
+ bq->watchdog_timer = BQ256XX_WATCHDOG_DIS;
+
+ if (bq->watchdog_timer > BQ256XX_WATCHDOG_MAX ||
+ bq->watchdog_timer < BQ256XX_WATCHDOG_DIS)
+ return -EINVAL;
+
+ ret = device_property_read_u32(bq->dev,
+ "input-voltage-limit-microvolt",
+ &bq->init_data.vindpm);
+ if (ret)
+ bq->init_data.vindpm = bq->chip_info->bq256xx_def_vindpm;
+
+ ret = device_property_read_u32(bq->dev,
+ "input-current-limit-microamp",
+ &bq->init_data.iindpm);
+ if (ret)
+ bq->init_data.iindpm = bq->chip_info->bq256xx_def_iindpm;
+
+ return 0;
+}
+
+static int bq256xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct bq256xx_device *bq;
+ struct power_supply_config psy_cfg = { };
+
+ int ret;
+
+ bq = devm_kzalloc(dev, sizeof(*bq), GFP_KERNEL);
+ if (!bq)
+ return -ENOMEM;
+
+ bq->client = client;
+ bq->dev = dev;
+ bq->chip_info = &bq256xx_chip_info_tbl[id->driver_data];
+
+ mutex_init(&bq->lock);
+
+ strncpy(bq->model_name, id->name, I2C_NAME_SIZE);
+
+ bq->regmap = devm_regmap_init_i2c(client,
+ bq->chip_info->bq256xx_regmap_config);
+
+ if (IS_ERR(bq->regmap)) {
+ dev_err(dev, "Failed to allocate register map\n");
+ return PTR_ERR(bq->regmap);
+ }
+
+ i2c_set_clientdata(client, bq);
+
+ ret = bq256xx_parse_dt(bq, &psy_cfg, dev);
+ if (ret) {
+ dev_err(dev, "Failed to read device tree properties%d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, bq256xx_charger_reset, bq);
+ if (ret)
+ return ret;
+
+ /* OTG reporting */
+ bq->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (!IS_ERR_OR_NULL(bq->usb2_phy)) {
+ INIT_WORK(&bq->usb_work, bq256xx_usb_work);
+ bq->usb_nb.notifier_call = bq256xx_usb_notifier;
+ usb_register_notifier(bq->usb2_phy, &bq->usb_nb);
+ }
+
+ bq->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+ if (!IS_ERR_OR_NULL(bq->usb3_phy)) {
+ INIT_WORK(&bq->usb_work, bq256xx_usb_work);
+ bq->usb_nb.notifier_call = bq256xx_usb_notifier;
+ usb_register_notifier(bq->usb3_phy, &bq->usb_nb);
+ }
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ bq256xx_irq_handler_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(&client->dev), bq);
+ if (ret < 0) {
+ dev_err(dev, "get irq fail: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = bq256xx_power_supply_init(bq, &psy_cfg, dev);
+ if (ret) {
+ dev_err(dev, "Failed to register power supply\n");
+ return ret;
+ }
+
+ ret = bq256xx_hw_init(bq);
+ if (ret) {
+ dev_err(dev, "Cannot initialize the chip.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct i2c_device_id bq256xx_i2c_ids[] = {
+ { "bq25600", BQ25600 },
+ { "bq25600d", BQ25600D },
+ { "bq25601", BQ25601 },
+ { "bq25601d", BQ25601D },
+ { "bq25611d", BQ25611D },
+ { "bq25618", BQ25618 },
+ { "bq25619", BQ25619 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bq256xx_i2c_ids);
+
+static const struct of_device_id bq256xx_of_match[] = {
+ { .compatible = "ti,bq25600", .data = (void *)BQ25600 },
+ { .compatible = "ti,bq25600d", .data = (void *)BQ25600D },
+ { .compatible = "ti,bq25601", .data = (void *)BQ25601 },
+ { .compatible = "ti,bq25601d", .data = (void *)BQ25601D },
+ { .compatible = "ti,bq25611d", .data = (void *)BQ25611D },
+ { .compatible = "ti,bq25618", .data = (void *)BQ25618 },
+ { .compatible = "ti,bq25619", .data = (void *)BQ25619 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bq256xx_of_match);
+
+static const struct acpi_device_id bq256xx_acpi_match[] = {
+ { "bq25600", BQ25600 },
+ { "bq25600d", BQ25600D },
+ { "bq25601", BQ25601 },
+ { "bq25601d", BQ25601D },
+ { "bq25611d", BQ25611D },
+ { "bq25618", BQ25618 },
+ { "bq25619", BQ25619 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bq256xx_acpi_match);
+
+static struct i2c_driver bq256xx_driver = {
+ .driver = {
+ .name = "bq256xx-charger",
+ .of_match_table = bq256xx_of_match,
+ .acpi_match_table = bq256xx_acpi_match,
+ },
+ .probe = bq256xx_probe,
+ .id_table = bq256xx_i2c_ids,
+};
+module_i2c_driver(bq256xx_driver);
+
+MODULE_AUTHOR("Ricardo Rivera-Matos <r-rivera-matos@ti.com>");
+MODULE_DESCRIPTION("bq256xx charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
index c936f311eb4f..530ff4025b31 100644
--- a/drivers/power/supply/bq25980_charger.c
+++ b/drivers/power/supply/bq25980_charger.c
@@ -1285,7 +1285,7 @@ static int bq25980_probe(struct i2c_client *client,
static const struct i2c_device_id bq25980_i2c_ids[] = {
{ "bq25980", BQ25980 },
{ "bq25975", BQ25975 },
- { "bq25975", BQ25975 },
+ { "bq25960", BQ25960 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq25980_i2c_ids);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 315e0909e6a4..4c4a7b1c64c5 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -110,6 +110,7 @@ enum bq27xxx_reg_index {
BQ27XXX_REG_TTES, /* Time-to-Empty Standby */
BQ27XXX_REG_TTECP, /* Time-to-Empty at Constant Power */
BQ27XXX_REG_NAC, /* Nominal Available Capacity */
+ BQ27XXX_REG_RC, /* Remaining Capacity */
BQ27XXX_REG_FCC, /* Full Charge Capacity */
BQ27XXX_REG_CYCT, /* Cycle Count */
BQ27XXX_REG_AE, /* Available Energy */
@@ -145,6 +146,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = INVALID_REG_ADDR,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -169,6 +171,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = INVALID_REG_ADDR,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -193,6 +196,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1a,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -215,6 +219,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -237,6 +242,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1a,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x1e,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -257,6 +263,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
[BQ27XXX_REG_AE] = 0x22,
@@ -277,6 +284,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -297,6 +305,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = 0x26,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -317,6 +326,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1c,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x1e,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -337,6 +347,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_RC] = INVALID_REG_ADDR,
[BQ27XXX_REG_FCC] = INVALID_REG_ADDR,
[BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -361,6 +372,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -382,6 +394,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -405,6 +418,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -425,6 +439,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = 0x08,
+ [BQ27XXX_REG_RC] = 0x0c,
[BQ27XXX_REG_FCC] = 0x0e,
[BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
@@ -450,6 +465,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -470,6 +486,7 @@ static u8
[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_RC] = 0x10,
[BQ27XXX_REG_FCC] = 0x12,
[BQ27XXX_REG_CYCT] = 0x2a,
[BQ27XXX_REG_AE] = 0x22,
@@ -490,6 +507,7 @@ static u8
[BQ27XXX_REG_TTES] = 0x1e,
[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
[BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_RC] = 0x04,
[BQ27XXX_REG_FCC] = 0x06,
[BQ27XXX_REG_CYCT] = 0x2c,
[BQ27XXX_REG_AE] = 0x24,
@@ -745,6 +763,7 @@ static enum power_supply_property bq27z561_props[] = {
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_POWER_AVG,
@@ -764,6 +783,7 @@ static enum power_supply_property bq28z610_props[] = {
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_POWER_AVG,
@@ -784,6 +804,7 @@ static enum power_supply_property bq34z100_props[] = {
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
@@ -1519,6 +1540,15 @@ static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di)
}
/*
+ * Return the battery Remaining Capacity in µAh
+ * Or < 0 if something fails.
+ */
+static inline int bq27xxx_battery_read_rc(struct bq27xxx_device_info *di)
+{
+ return bq27xxx_battery_read_charge(di, BQ27XXX_REG_RC);
+}
+
+/*
* Return the battery Full Charge Capacity in µAh
* Or < 0 if something fails.
*/
@@ -1789,7 +1819,7 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
if (di->opts & BQ27XXX_O_ZERO) {
flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
- if (flags & BQ27000_FLAG_CHGS) {
+ if (!(flags & BQ27000_FLAG_CHGS)) {
dev_dbg(di->dev, "negative current!\n");
curr = -curr;
}
@@ -1797,7 +1827,7 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
} else {
/* Other gauges return signed value */
- val->intval = (int)((s16)curr) * 1000;
+ val->intval = -(int)((s16)curr) * 1000;
}
return 0;
@@ -1965,7 +1995,10 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
- ret = bq27xxx_simple_value(bq27xxx_battery_read_nac(di), val);
+ if (di->regs[BQ27XXX_REG_NAC] != INVALID_REG_ADDR)
+ ret = bq27xxx_simple_value(bq27xxx_battery_read_nac(di), val);
+ else
+ ret = bq27xxx_simple_value(bq27xxx_battery_read_rc(di), val);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = bq27xxx_simple_value(di->cache.charge_full, val);
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 6fcebe441552..4dea8ecd70bc 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -570,7 +570,7 @@ static int cm_get_target_status(struct charger_manager *cm)
return POWER_SUPPLY_STATUS_DISCHARGING;
if (cm_check_thermal_status(cm)) {
- /* Check if discharging duration exeeds limit. */
+ /* Check if discharging duration exceeds limit. */
if (check_charging_duration(cm))
goto charging_ok;
return POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -578,7 +578,7 @@ static int cm_get_target_status(struct charger_manager *cm)
switch (cm->battery_status) {
case POWER_SUPPLY_STATUS_CHARGING:
- /* Check if charging duration exeeds limit. */
+ /* Check if charging duration exceeds limit. */
if (check_charging_duration(cm))
return POWER_SUPPLY_STATUS_FULL;
fallthrough;
@@ -723,9 +723,9 @@ static int charger_get_property(struct power_supply *psy,
val->intval = cm->battery_status;
break;
case POWER_SUPPLY_PROP_HEALTH:
- if (cm->emergency_stop > 0)
+ if (cm->emergency_stop == CM_BATT_OVERHEAT)
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
- else if (cm->emergency_stop < 0)
+ else if (cm->emergency_stop == CM_BATT_COLD)
val->intval = POWER_SUPPLY_HEALTH_COLD;
else
val->intval = POWER_SUPPLY_HEALTH_GOOD;
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 295611b3b15e..6d5bcdb9f45d 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
*
- * Some parts of the code based on earlie Motorola mapphone Linux kernel
+ * Some parts of the code based on earlier Motorola mapphone Linux kernel
* drivers:
*
* Copyright (C) 2009-2010 Motorola, Inc.
@@ -28,6 +28,7 @@
#include <linux/power_supply.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
+#include <linux/moduleparam.h>
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
@@ -110,6 +111,8 @@ struct cpcap_coulomb_counter_data {
enum cpcap_battery_state {
CPCAP_BATTERY_STATE_PREVIOUS,
CPCAP_BATTERY_STATE_LATEST,
+ CPCAP_BATTERY_STATE_EMPTY,
+ CPCAP_BATTERY_STATE_FULL,
CPCAP_BATTERY_STATE_NR,
};
@@ -132,12 +135,17 @@ struct cpcap_battery_ddata {
struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
u32 cc_lsb; /* μAms per LSB */
atomic_t active;
+ int charge_full;
int status;
u16 vendor;
+ unsigned int is_full:1;
};
#define CPCAP_NO_BATTERY -400
+static bool ignore_temperature_probe;
+module_param(ignore_temperature_probe, bool, 0660);
+
static struct cpcap_battery_state_data *
cpcap_battery_get_state(struct cpcap_battery_ddata *ddata,
enum cpcap_battery_state state)
@@ -160,6 +168,18 @@ cpcap_battery_previous(struct cpcap_battery_ddata *ddata)
return cpcap_battery_get_state(ddata, CPCAP_BATTERY_STATE_PREVIOUS);
}
+static struct cpcap_battery_state_data *
+cpcap_battery_get_empty(struct cpcap_battery_ddata *ddata)
+{
+ return cpcap_battery_get_state(ddata, CPCAP_BATTERY_STATE_EMPTY);
+}
+
+static struct cpcap_battery_state_data *
+cpcap_battery_get_full(struct cpcap_battery_ddata *ddata)
+{
+ return cpcap_battery_get_state(ddata, CPCAP_BATTERY_STATE_FULL);
+}
+
static int cpcap_charger_battery_temperature(struct cpcap_battery_ddata *ddata,
int *value)
{
@@ -169,7 +189,8 @@ static int cpcap_charger_battery_temperature(struct cpcap_battery_ddata *ddata,
channel = ddata->channels[CPCAP_BATTERY_IIO_BATTDET];
error = iio_read_channel_processed(channel, value);
if (error < 0) {
- dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
+ if (!ignore_temperature_probe)
+ dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
*value = CPCAP_NO_BATTERY;
return error;
@@ -366,20 +387,81 @@ static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
}
+static int cpcap_battery_get_charger_status(struct cpcap_battery_ddata *ddata,
+ int *val)
+{
+ union power_supply_propval prop;
+ struct power_supply *charger;
+ int error;
+
+ charger = power_supply_get_by_name("usb");
+ if (!charger)
+ return -ENODEV;
+
+ error = power_supply_get_property(charger, POWER_SUPPLY_PROP_STATUS,
+ &prop);
+ if (error)
+ *val = POWER_SUPPLY_STATUS_UNKNOWN;
+ else
+ *val = prop.intval;
+
+ power_supply_put(charger);
+
+ return error;
+}
+
static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
{
struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
+ unsigned int vfull;
+ int error, val;
+
+ error = cpcap_battery_get_charger_status(ddata, &val);
+ if (!error) {
+ switch (val) {
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ dev_dbg(ddata->dev, "charger disconnected\n");
+ ddata->is_full = 0;
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ dev_dbg(ddata->dev, "charger full status\n");
+ ddata->is_full = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The full battery voltage here can be inaccurate, it's used just to
+ * filter out any trickle charging events. We clear the is_full status
+ * on charger disconnect above anyways.
+ */
+ vfull = ddata->config.bat.constant_charge_voltage_max_uv - 120000;
+
+ if (ddata->is_full && state->voltage < vfull)
+ ddata->is_full = 0;
+
+ return ddata->is_full;
+}
+
+static bool cpcap_battery_low(struct cpcap_battery_ddata *ddata)
+{
+ struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
+ static bool is_low;
- if (state->voltage >=
- (ddata->config.bat.constant_charge_voltage_max_uv - 18000))
- return true;
+ if (state->current_ua > 0 && (state->voltage <= 3350000 || is_low))
+ is_low = true;
+ else
+ is_low = false;
- return false;
+ return is_low;
}
static int cpcap_battery_update_status(struct cpcap_battery_ddata *ddata)
{
- struct cpcap_battery_state_data state, *latest, *previous;
+ struct cpcap_battery_state_data state, *latest, *previous,
+ *empty, *full;
ktime_t now;
int error;
@@ -408,9 +490,47 @@ static int cpcap_battery_update_status(struct cpcap_battery_ddata *ddata)
memcpy(previous, latest, sizeof(*previous));
memcpy(latest, &state, sizeof(*latest));
+ if (cpcap_battery_full(ddata)) {
+ full = cpcap_battery_get_full(ddata);
+ memcpy(full, latest, sizeof(*full));
+
+ empty = cpcap_battery_get_empty(ddata);
+ if (empty->voltage && empty->voltage != -1) {
+ empty->voltage = -1;
+ ddata->charge_full =
+ empty->counter_uah - full->counter_uah;
+ } else if (ddata->charge_full) {
+ empty->voltage = -1;
+ empty->counter_uah =
+ full->counter_uah + ddata->charge_full;
+ }
+ } else if (cpcap_battery_low(ddata)) {
+ empty = cpcap_battery_get_empty(ddata);
+ memcpy(empty, latest, sizeof(*empty));
+
+ full = cpcap_battery_get_full(ddata);
+ if (full->voltage) {
+ full->voltage = 0;
+ ddata->charge_full =
+ empty->counter_uah - full->counter_uah;
+ }
+ }
+
return 0;
}
+/*
+ * Update battery status when cpcap-charger calls power_supply_changed().
+ * This allows us to detect battery full condition before the charger
+ * disconnects.
+ */
+static void cpcap_battery_external_power_changed(struct power_supply *psy)
+{
+ union power_supply_propval prop;
+
+ power_supply_get_property(psy, POWER_SUPPLY_PROP_STATUS, &prop);
+}
+
static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -421,10 +541,13 @@ static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_TEMP,
@@ -435,7 +558,7 @@ static int cpcap_battery_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
- struct cpcap_battery_state_data *latest, *previous;
+ struct cpcap_battery_state_data *latest, *previous, *empty;
u32 sample;
s32 accumulator;
int cached;
@@ -450,7 +573,7 @@ static int cpcap_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- if (latest->temperature > CPCAP_NO_BATTERY)
+ if (latest->temperature > CPCAP_NO_BATTERY || ignore_temperature_probe)
val->intval = 1;
else
val->intval = 0;
@@ -515,6 +638,16 @@ static int cpcap_battery_get_property(struct power_supply *psy,
tmp *= ((latest->voltage + previous->voltage) / 20000);
val->intval = div64_s64(tmp, 100);
break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ empty = cpcap_battery_get_empty(ddata);
+ if (!empty->voltage || !ddata->charge_full)
+ return -ENODATA;
+ /* (ddata->charge_full / 200) is needed for rounding */
+ val->intval = empty->counter_uah - latest->counter_uah +
+ ddata->charge_full / 200;
+ val->intval = clamp(val->intval, 0, ddata->charge_full);
+ val->intval = val->intval * 100 / ddata->charge_full;
+ break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (cpcap_battery_full(ddata))
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
@@ -529,6 +662,21 @@ static int cpcap_battery_get_property(struct power_supply *psy,
else
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ empty = cpcap_battery_get_empty(ddata);
+ if (!empty->voltage)
+ return -ENODATA;
+ val->intval = empty->counter_uah - latest->counter_uah;
+ if (val->intval < 0)
+ val->intval = 0;
+ else if (ddata->charge_full && ddata->charge_full < val->intval)
+ val->intval = ddata->charge_full;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ if (!ddata->charge_full)
+ return -ENODATA;
+ val->intval = ddata->charge_full;
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = ddata->config.info.charge_full_design;
break;
@@ -536,6 +684,8 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
break;
case POWER_SUPPLY_PROP_TEMP:
+ if (ignore_temperature_probe)
+ return -ENODATA;
val->intval = latest->temperature;
break;
default:
@@ -561,17 +711,21 @@ static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
&prop);
if (error)
- return error;
+ goto out_put;
/* Allow charger const voltage lower than battery const voltage */
if (const_charge_voltage > prop.intval)
- return 0;
+ goto out_put;
val.intval = const_charge_voltage;
- return power_supply_set_property(charger,
+ error = power_supply_set_property(charger,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
&val);
+out_put:
+ power_supply_put(charger);
+
+ return error;
}
static int cpcap_battery_set_property(struct power_supply *psy,
@@ -590,6 +744,15 @@ static int cpcap_battery_set_property(struct power_supply *psy,
ddata->config.bat.constant_charge_voltage_max_uv = val->intval;
return cpcap_battery_update_charger(ddata, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ if (val->intval < 0)
+ return -EINVAL;
+ if (val->intval > ddata->config.info.charge_full_design)
+ return -EINVAL;
+
+ ddata->charge_full = val->intval;
+
+ return 0;
default:
return -EINVAL;
}
@@ -602,6 +765,7 @@ static int cpcap_battery_property_is_writeable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
return 1;
default:
return 0;
@@ -666,7 +830,7 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_battery_irq_thread,
- IRQF_SHARED,
+ IRQF_SHARED | IRQF_ONESHOT,
name, ddata);
if (error) {
dev_err(ddata->dev, "could not get irq %s: %i\n",
@@ -835,9 +999,19 @@ static const struct of_device_id cpcap_battery_id_table[] = {
MODULE_DEVICE_TABLE(of, cpcap_battery_id_table);
#endif
+static const struct power_supply_desc cpcap_charger_battery_desc = {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = cpcap_battery_props,
+ .num_properties = ARRAY_SIZE(cpcap_battery_props),
+ .get_property = cpcap_battery_get_property,
+ .set_property = cpcap_battery_set_property,
+ .property_is_writeable = cpcap_battery_property_is_writeable,
+ .external_power_changed = cpcap_battery_external_power_changed,
+};
+
static int cpcap_battery_probe(struct platform_device *pdev)
{
- struct power_supply_desc *psy_desc;
struct cpcap_battery_ddata *ddata;
const struct of_device_id *match;
struct power_supply_config psy_cfg = {};
@@ -892,22 +1066,11 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- psy_desc = devm_kzalloc(ddata->dev, sizeof(*psy_desc), GFP_KERNEL);
- if (!psy_desc)
- return -ENOMEM;
-
- psy_desc->name = "battery";
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
- psy_desc->properties = cpcap_battery_props;
- psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props);
- psy_desc->get_property = cpcap_battery_get_property;
- psy_desc->set_property = cpcap_battery_set_property;
- psy_desc->property_is_writeable = cpcap_battery_property_is_writeable;
-
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
- ddata->psy = devm_power_supply_register(ddata->dev, psy_desc,
+ ddata->psy = devm_power_supply_register(ddata->dev,
+ &cpcap_charger_battery_desc,
&psy_cfg);
error = PTR_ERR_OR_ZERO(ddata->psy);
if (error) {
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index c0d452e3dc8b..641dcad1133f 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -89,6 +89,8 @@
* CPCAP_REG_CRM charge currents. These seem to match MC13783UG.pdf
* values in "Table 8-3. Charge Path Regulator Current Limit
* Characteristics" for the nominal values.
+ *
+ * Except 70mA and 1.596A and unlimited, these are simply 88.7mA / step.
*/
#define CPCAP_REG_CRM_ICHRG(val) (((val) & 0xf) << 0)
#define CPCAP_REG_CRM_ICHRG_0A000 CPCAP_REG_CRM_ICHRG(0x0)
@@ -120,13 +122,6 @@ enum {
CPCAP_CHARGER_IIO_NR,
};
-enum {
- CPCAP_CHARGER_DISCONNECTED,
- CPCAP_CHARGER_DETECTING,
- CPCAP_CHARGER_CHARGING,
- CPCAP_CHARGER_DONE,
-};
-
struct cpcap_charger_ddata {
struct device *dev;
struct regmap *reg;
@@ -145,8 +140,8 @@ struct cpcap_charger_ddata {
atomic_t active;
int status;
- int state;
int voltage;
+ int limit_current;
};
struct cpcap_interrupt_desc {
@@ -173,6 +168,7 @@ static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
@@ -236,6 +232,9 @@ static int cpcap_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
val->intval = ddata->status;
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ val->intval = ddata->limit_current;
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
val->intval = ddata->voltage;
break;
@@ -301,11 +300,33 @@ cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
&prop);
if (!error)
voltage = prop.intval;
+
+ power_supply_put(battery);
}
return voltage;
}
+static int cpcap_charger_current_to_regval(int microamp)
+{
+ int miliamp = microamp / 1000;
+ int res;
+
+ if (miliamp < 0)
+ return -EINVAL;
+ if (miliamp < 70)
+ return CPCAP_REG_CRM_ICHRG(0x0);
+ if (miliamp < 177)
+ return CPCAP_REG_CRM_ICHRG(0x1);
+ if (miliamp > 1596)
+ return CPCAP_REG_CRM_ICHRG(0xe);
+
+ res = microamp / 88666;
+ if (res > 0xd)
+ res = 0xd;
+ return CPCAP_REG_CRM_ICHRG(res);
+}
+
static int cpcap_charger_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
@@ -314,6 +335,12 @@ static int cpcap_charger_set_property(struct power_supply *psy,
int voltage, batvolt;
switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (cpcap_charger_current_to_regval(val->intval) < 0)
+ return -EINVAL;
+ ddata->limit_current = val->intval;
+ schedule_delayed_work(&ddata->detect_work, 0);
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
voltage = cpcap_charger_match_voltage(val->intval);
batvolt = cpcap_charger_get_bat_const_charge_voltage(ddata);
@@ -333,6 +360,7 @@ static int cpcap_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
return 1;
default:
@@ -358,31 +386,64 @@ static void cpcap_charger_set_inductive_path(struct cpcap_charger_ddata *ddata,
gpiod_set_value(ddata->gpio[1], enabled);
}
-static int cpcap_charger_set_state(struct cpcap_charger_ddata *ddata,
- int max_voltage, int charge_current,
- int trickle_current)
+static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
+ int state)
{
- bool enable;
- int error;
+ const char *status;
- enable = (charge_current || trickle_current);
- dev_dbg(ddata->dev, "%s enable: %i\n", __func__, enable);
+ if (state > POWER_SUPPLY_STATUS_FULL) {
+ dev_warn(ddata->dev, "unknown state: %i\n", state);
- if (!enable) {
- error = regmap_update_bits(ddata->reg, CPCAP_REG_CRM,
- 0x3fff,
- CPCAP_REG_CRM_FET_OVRD |
- CPCAP_REG_CRM_FET_CTRL);
- if (error) {
- ddata->status = POWER_SUPPLY_STATUS_UNKNOWN;
- goto out_err;
- }
+ return;
+ }
- ddata->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ddata->status = state;
- return 0;
+ switch (state) {
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ status = "DISCONNECTED";
+ break;
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ status = "DETECTING";
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ status = "CHARGING";
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ status = "DONE";
+ break;
+ default:
+ return;
}
+ dev_dbg(ddata->dev, "state: %s\n", status);
+}
+
+static int cpcap_charger_disable(struct cpcap_charger_ddata *ddata)
+{
+ int error;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CRM, 0x3fff,
+ CPCAP_REG_CRM_FET_OVRD |
+ CPCAP_REG_CRM_FET_CTRL);
+ if (error)
+ dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
+
+ return error;
+}
+
+static int cpcap_charger_enable(struct cpcap_charger_ddata *ddata,
+ int max_voltage, int charge_current,
+ int trickle_current)
+{
+ int error;
+
+ if (!max_voltage || !charge_current)
+ return -EINVAL;
+
+ dev_dbg(ddata->dev, "enable: %i %i %i\n",
+ max_voltage, charge_current, trickle_current);
+
error = regmap_update_bits(ddata->reg, CPCAP_REG_CRM, 0x3fff,
CPCAP_REG_CRM_CHRG_LED_EN |
trickle_current |
@@ -390,17 +451,8 @@ static int cpcap_charger_set_state(struct cpcap_charger_ddata *ddata,
CPCAP_REG_CRM_FET_CTRL |
max_voltage |
charge_current);
- if (error) {
- ddata->status = POWER_SUPPLY_STATUS_UNKNOWN;
- goto out_err;
- }
-
- ddata->status = POWER_SUPPLY_STATUS_CHARGING;
-
- return 0;
-
-out_err:
- dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
+ if (error)
+ dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
return error;
}
@@ -433,7 +485,7 @@ static void cpcap_charger_vbus_work(struct work_struct *work)
if (ddata->vbus_enabled) {
vbus = cpcap_charger_vbus_valid(ddata);
if (vbus) {
- dev_info(ddata->dev, "VBUS already provided\n");
+ dev_dbg(ddata->dev, "VBUS already provided\n");
return;
}
@@ -442,10 +494,13 @@ static void cpcap_charger_vbus_work(struct work_struct *work)
cpcap_charger_set_cable_path(ddata, false);
cpcap_charger_set_inductive_path(ddata, false);
- error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ error = cpcap_charger_disable(ddata);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata,
+ POWER_SUPPLY_STATUS_DISCHARGING);
+
error = regmap_update_bits(ddata->reg, CPCAP_REG_VUSBC,
CPCAP_BIT_VBUS_SWITCH,
CPCAP_BIT_VBUS_SWITCH);
@@ -476,6 +531,7 @@ static void cpcap_charger_vbus_work(struct work_struct *work)
return;
out_err:
+ cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
dev_err(ddata->dev, "%s could not %s vbus: %i\n", __func__,
ddata->vbus_enabled ? "enable" : "disable", error);
}
@@ -527,39 +583,6 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
return 0;
}
-static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
- int state)
-{
- const char *status;
-
- if (state > CPCAP_CHARGER_DONE) {
- dev_warn(ddata->dev, "unknown state: %i\n", state);
-
- return;
- }
-
- ddata->state = state;
-
- switch (state) {
- case CPCAP_CHARGER_DISCONNECTED:
- status = "DISCONNECTED";
- break;
- case CPCAP_CHARGER_DETECTING:
- status = "DETECTING";
- break;
- case CPCAP_CHARGER_CHARGING:
- status = "CHARGING";
- break;
- case CPCAP_CHARGER_DONE:
- status = "DONE";
- break;
- default:
- return;
- }
-
- dev_dbg(ddata->dev, "state: %s\n", status);
-}
-
static int cpcap_charger_voltage_to_regval(int voltage)
{
int offset;
@@ -591,9 +614,21 @@ static void cpcap_charger_disconnect(struct cpcap_charger_ddata *ddata,
{
int error;
- error = cpcap_charger_set_state(ddata, 0, 0, 0);
- if (error)
+ /* Update battery state before disconnecting the charger */
+ switch (state) {
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_FULL:
+ power_supply_changed(ddata->usb);
+ break;
+ default:
+ break;
+ }
+
+ error = cpcap_charger_disable(ddata);
+ if (error) {
+ cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
return;
+ }
cpcap_charger_update_state(ddata, state);
power_supply_changed(ddata->usb);
@@ -604,7 +639,7 @@ static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_charger_ddata *ddata;
struct cpcap_charger_ints_state s;
- int error;
+ int error, new_state;
ddata = container_of(work, struct cpcap_charger_ddata,
detect_work.work);
@@ -615,7 +650,8 @@ static void cpcap_usb_detect(struct work_struct *work)
/* Just init the state if a charger is connected with no chrg_det set */
if (!s.chrg_det && s.chrgcurr1 && s.vbusvld) {
- cpcap_charger_update_state(ddata, CPCAP_CHARGER_DETECTING);
+ cpcap_charger_update_state(ddata,
+ POWER_SUPPLY_STATUS_NOT_CHARGING);
return;
}
@@ -625,28 +661,35 @@ static void cpcap_usb_detect(struct work_struct *work)
* charged to 4.35V by Android. Try again in 10 minutes.
*/
if (cpcap_charger_get_charge_voltage(ddata) > ddata->voltage) {
- cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ cpcap_charger_disconnect(ddata,
+ POWER_SUPPLY_STATUS_NOT_CHARGING,
HZ * 60 * 10);
return;
}
/* Throttle chrgcurr2 interrupt for charger done and retry */
- switch (ddata->state) {
- case CPCAP_CHARGER_CHARGING:
+ switch (ddata->status) {
+ case POWER_SUPPLY_STATUS_CHARGING:
if (s.chrgcurr2)
break;
+ new_state = POWER_SUPPLY_STATUS_FULL;
+
if (s.chrgcurr1 && s.vbusvld) {
- cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DONE,
- HZ * 5);
+ cpcap_charger_disconnect(ddata, new_state, HZ * 5);
return;
}
break;
- case CPCAP_CHARGER_DONE:
+ case POWER_SUPPLY_STATUS_FULL:
if (!s.chrgcurr2)
break;
- cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
- HZ * 5);
+ if (s.vbusvld)
+ new_state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ new_state = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ cpcap_charger_disconnect(ddata, new_state, HZ * 5);
+
return;
default:
break;
@@ -654,32 +697,37 @@ static void cpcap_usb_detect(struct work_struct *work)
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
- int max_current;
- int vchrg;
+ int max_current = 532000;
+ int vchrg, ichrg;
if (cpcap_charger_battery_found(ddata))
- max_current = CPCAP_REG_CRM_ICHRG_1A596;
- else
- max_current = CPCAP_REG_CRM_ICHRG_0A532;
+ max_current = 1596000;
+
+ if (max_current > ddata->limit_current)
+ max_current = ddata->limit_current;
+ ichrg = cpcap_charger_current_to_regval(max_current);
vchrg = cpcap_charger_voltage_to_regval(ddata->voltage);
- error = cpcap_charger_set_state(ddata,
- CPCAP_REG_CRM_VCHRG(vchrg),
- max_current, 0);
+ error = cpcap_charger_enable(ddata,
+ CPCAP_REG_CRM_VCHRG(vchrg),
+ ichrg, 0);
if (error)
goto out_err;
- cpcap_charger_update_state(ddata, CPCAP_CHARGER_CHARGING);
+ cpcap_charger_update_state(ddata,
+ POWER_SUPPLY_STATUS_CHARGING);
} else {
- error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ error = cpcap_charger_disable(ddata);
if (error)
goto out_err;
- cpcap_charger_update_state(ddata, CPCAP_CHARGER_DISCONNECTED);
+ cpcap_charger_update_state(ddata,
+ POWER_SUPPLY_STATUS_DISCHARGING);
}
power_supply_changed(ddata->usb);
return;
out_err:
+ cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
}
@@ -708,7 +756,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_charger_irq_thread,
- IRQF_SHARED,
+ IRQF_SHARED | IRQF_ONESHOT,
name, ddata);
if (error) {
dev_err(ddata->dev, "could not get irq %s: %i\n",
@@ -799,6 +847,10 @@ out_err:
return error;
}
+static char *cpcap_charger_supplied_to[] = {
+ "battery",
+};
+
static const struct power_supply_desc cpcap_charger_usb_desc = {
.name = "usb",
.type = POWER_SUPPLY_TYPE_USB,
@@ -837,6 +889,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
ddata->voltage = 4200000;
+ ddata->limit_current = 532000;
ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
if (!ddata->reg)
@@ -855,6 +908,8 @@ static int cpcap_charger_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
+ psy_cfg.supplied_to = cpcap_charger_supplied_to;
+ psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to),
ddata->usb = devm_power_supply_register(ddata->dev,
&cpcap_charger_usb_desc,
@@ -885,7 +940,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
return 0;
}
-static int cpcap_charger_remove(struct platform_device *pdev)
+static void cpcap_charger_shutdown(struct platform_device *pdev)
{
struct cpcap_charger_ddata *ddata = platform_get_drvdata(pdev);
int error;
@@ -896,12 +951,20 @@ static int cpcap_charger_remove(struct platform_device *pdev)
dev_warn(ddata->dev, "could not clear USB comparator: %i\n",
error);
- error = cpcap_charger_set_state(ddata, 0, 0, 0);
- if (error)
+ error = cpcap_charger_disable(ddata);
+ if (error) {
+ cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
dev_warn(ddata->dev, "could not clear charger: %i\n",
error);
+ }
+ cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_DISCHARGING);
cancel_delayed_work_sync(&ddata->vbus_work);
cancel_delayed_work_sync(&ddata->detect_work);
+}
+
+static int cpcap_charger_remove(struct platform_device *pdev)
+{
+ cpcap_charger_shutdown(pdev);
return 0;
}
@@ -912,6 +975,7 @@ static struct platform_driver cpcap_charger_driver = {
.name = "cpcap-charger",
.of_match_table = of_match_ptr(cpcap_charger_id_table),
},
+ .shutdown = cpcap_charger_shutdown,
.remove = cpcap_charger_remove,
};
module_platform_driver(cpcap_charger_driver);
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 695bb6747400..5f50da524f41 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -198,7 +198,7 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
return w1_ds2760_read(dev, buf, off, count);
}
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index dd57a472e878..2b8c90d84325 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -624,7 +624,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
@@ -637,7 +637,7 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
int ret;
@@ -669,7 +669,7 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
@@ -682,7 +682,7 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
int ret;
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 32dc77fd9a95..8b18219ebe90 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -52,7 +52,7 @@ static int ingenic_battery_get_property(struct power_supply *psy,
return 0;
default:
return -EINVAL;
- };
+ }
}
/* Set the most appropriate IIO channel voltage reference scale
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
new file mode 100644
index 000000000000..1a5cb4405ee3
--- /dev/null
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Analog Devices (Linear Technology) LTC4162-L charger IC.
+ * Copyright (C) 2020, Topic Embedded Products
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+/* Registers (names based on what datasheet uses) */
+#define LTC4162L_EN_LIMIT_ALERTS_REG 0x0D
+#define LTC4162L_EN_CHARGER_STATE_ALERTS_REG 0x0E
+#define LTC4162L_EN_CHARGE_STATUS_ALERTS_REG 0x0F
+#define LTC4162L_CONFIG_BITS_REG 0x14
+#define LTC4162L_IIN_LIMIT_TARGET 0x15
+#define LTC4162L_ARM_SHIP_MODE 0x19
+#define LTC4162L_CHARGE_CURRENT_SETTING 0X1A
+#define LTC4162L_VCHARGE_SETTING 0X1B
+#define LTC4162L_C_OVER_X_THRESHOLD 0x1C
+#define LTC4162L_MAX_CV_TIME 0X1D
+#define LTC4162L_MAX_CHARGE_TIME 0X1E
+#define LTC4162L_CHARGER_CONFIG_BITS 0x29
+#define LTC4162L_CHARGER_STATE 0x34
+#define LTC4162L_CHARGE_STATUS 0x35
+#define LTC4162L_LIMIT_ALERTS_REG 0x36
+#define LTC4162L_CHARGER_STATE_ALERTS_REG 0x37
+#define LTC4162L_CHARGE_STATUS_ALERTS_REG 0x38
+#define LTC4162L_SYSTEM_STATUS_REG 0x39
+#define LTC4162L_VBAT 0x3A
+#define LTC4162L_VIN 0x3B
+#define LTC4162L_VOUT 0x3C
+#define LTC4162L_IBAT 0x3D
+#define LTC4162L_IIN 0x3E
+#define LTC4162L_DIE_TEMPERATURE 0x3F
+#define LTC4162L_THERMISTOR_VOLTAGE 0x40
+#define LTC4162L_BSR 0x41
+#define LTC4162L_JEITA_REGION 0x42
+#define LTC4162L_CHEM_CELLS_REG 0x43
+#define LTC4162L_ICHARGE_DAC 0x44
+#define LTC4162L_VCHARGE_DAC 0x45
+#define LTC4162L_IIN_LIMIT_DAC 0x46
+#define LTC4162L_VBAT_FILT 0x47
+#define LTC4162L_INPUT_UNDERVOLTAGE_DAC 0x4B
+
+/* Enumeration as in datasheet. Individual bits are mutually exclusive. */
+enum ltc4162l_state {
+ battery_detection = 2048,
+ charger_suspended = 256,
+ precharge = 128, /* trickle on low bat voltage */
+ cc_cv_charge = 64, /* normal charge */
+ ntc_pause = 32,
+ timer_term = 16,
+ c_over_x_term = 8, /* battery is full */
+ max_charge_time_fault = 4,
+ bat_missing_fault = 2,
+ bat_short_fault = 1
+};
+
+/* Individual bits are mutually exclusive. Only active in charging states.*/
+enum ltc4162l_charge_status {
+ ilim_reg_active = 32,
+ thermal_reg_active = 16,
+ vin_uvcl_active = 8,
+ iin_limit_active = 4,
+ constant_current = 2,
+ constant_voltage = 1,
+ charger_off = 0
+};
+
+/* Magic number to write to ARM_SHIP_MODE register */
+#define LTC4162L_ARM_SHIP_MODE_MAGIC 21325
+
+struct ltc4162l_info {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct power_supply *charger;
+ u32 rsnsb; /* Series resistor that sets charge current, microOhm */
+ u32 rsnsi; /* Series resistor to measure input current, microOhm */
+ u8 cell_count; /* Number of connected cells, 0 while unknown */
+};
+
+static u8 ltc4162l_get_cell_count(struct ltc4162l_info *info)
+{
+ int ret;
+ unsigned int val;
+
+ /* Once read successfully */
+ if (info->cell_count)
+ return info->cell_count;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHEM_CELLS_REG, &val);
+ if (ret)
+ return 0;
+
+ /* Lower 4 bits is the cell count, or 0 if the chip doesn't know yet */
+ val &= 0x0f;
+ if (!val)
+ return 0;
+
+ /* Once determined, keep the value */
+ info->cell_count = val;
+
+ return val;
+};
+
+/* Convert enum value to POWER_SUPPLY_STATUS value */
+static int ltc4162l_state_decode(enum ltc4162l_state value)
+{
+ switch (value) {
+ case precharge:
+ case cc_cv_charge:
+ return POWER_SUPPLY_STATUS_CHARGING;
+ case c_over_x_term:
+ return POWER_SUPPLY_STATUS_FULL;
+ case bat_missing_fault:
+ case bat_short_fault:
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ default:
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ }
+};
+
+static int ltc4162l_get_status(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHARGER_STATE, &regval);
+ if (ret) {
+ dev_err(&info->client->dev, "Failed to read CHARGER_STATE\n");
+ return ret;
+ }
+
+ val->intval = ltc4162l_state_decode(regval);
+
+ return 0;
+}
+
+static int ltc4162l_charge_status_decode(enum ltc4162l_charge_status value)
+{
+ if (!value)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+
+ /* constant voltage/current and input_current limit are "fast" modes */
+ if (value <= iin_limit_active)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+
+ /* Anything that's not fast we'll return as trickle */
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+}
+
+static int ltc4162l_get_charge_type(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHARGE_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ val->intval = ltc4162l_charge_status_decode(regval);
+
+ return 0;
+}
+
+static int ltc4162l_state_to_health(enum ltc4162l_state value)
+{
+ switch (value) {
+ case ntc_pause:
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ case timer_term:
+ return POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ case max_charge_time_fault:
+ return POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE;
+ case bat_missing_fault:
+ return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ case bat_short_fault:
+ return POWER_SUPPLY_HEALTH_DEAD;
+ default:
+ return POWER_SUPPLY_HEALTH_GOOD;
+ }
+}
+
+static int ltc4162l_get_health(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHARGER_STATE, &regval);
+ if (ret)
+ return ret;
+
+ val->intval = ltc4162l_state_to_health(regval);
+
+ return 0;
+}
+
+static int ltc4162l_get_online(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_SYSTEM_STATUS_REG, &regval);
+ if (ret)
+ return ret;
+
+ /* BIT(2) indicates if input voltage is sufficient to charge */
+ val->intval = !!(regval & BIT(2));
+
+ return 0;
+}
+
+static int ltc4162l_get_vbat(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /* cell_count × 192.4μV/LSB */
+ regval *= 1924;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 10;
+ val->intval = regval;
+
+ return 0;
+}
+
+static int ltc4162l_get_ibat(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_IBAT, &regval);
+ if (ret)
+ return ret;
+
+ /* Signed 16-bit number, 1.466μV / RSNSB amperes/LSB. */
+ ret = (s16)(regval & 0xFFFF);
+ val->intval = 100 * mult_frac(ret, 14660, (int)info->rsnsb);
+
+ return 0;
+}
+
+
+static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_VIN, &regval);
+ if (ret)
+ return ret;
+
+ /* 1.649mV/LSB */
+ val->intval = regval * 1694;
+
+ return 0;
+}
+
+static int ltc4162l_get_input_current(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_IIN, &regval);
+ if (ret)
+ return ret;
+
+ /* Signed 16-bit number, 1.466μV / RSNSI amperes/LSB. */
+ ret = (s16)(regval & 0xFFFF);
+ ret *= 14660;
+ ret /= info->rsnsi;
+ ret *= 100;
+
+ val->intval = ret;
+
+ return 0;
+}
+
+static int ltc4162l_get_icharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ regval &= BIT(6) - 1; /* Only the lower 5 bits */
+
+ /* The charge current servo level: (icharge_dac + 1) × 1mV/RSNSB */
+ ++regval;
+ val->intval = 10000u * mult_frac(regval, 100000u, info->rsnsb);
+
+ return 0;
+}
+
+static int ltc4162l_set_icharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ value = mult_frac(value, info->rsnsb, 100000u);
+ value /= 10000u;
+
+ /* Round to lowest possible */
+ if (value)
+ --value;
+
+ if (value > 31)
+ return -EINVAL;
+
+ return regmap_write(info->regmap, reg, value);
+}
+
+
+static int ltc4162l_get_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+ u32 voltage;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ regval &= BIT(6) - 1; /* Only the lower 5 bits */
+
+ /*
+ * charge voltage setting can be computed from
+ * cell_count × (vcharge_setting × 12.5mV + 3.8125V)
+ * where vcharge_setting ranges from 0 to 31 (4.2V max).
+ */
+ voltage = 3812500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+}
+
+static int ltc4162l_set_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ u8 cell_count = ltc4162l_get_cell_count(info);
+
+ if (!cell_count)
+ return -EBUSY; /* Not available yet, try again later */
+
+ value /= cell_count;
+
+ if (value < 3812500)
+ return -EINVAL;
+
+ value -= 3812500;
+ value /= 12500;
+
+ if (value > 31)
+ return -EINVAL;
+
+ return regmap_write(info->regmap, reg, value);
+}
+
+static int ltc4162l_get_iin_limit_dac(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_IIN_LIMIT_DAC, &regval);
+ if (ret)
+ return ret;
+
+ regval &= BIT(6) - 1; /* Only 6 bits */
+
+ /* (iin_limit_dac + 1) × 500μV / RSNSI */
+ ++regval;
+ regval *= 5000000u;
+ regval /= info->rsnsi;
+ val->intval = 100u * regval;
+
+ return 0;
+}
+
+static int ltc4162l_set_iin_limit(struct ltc4162l_info *info,
+ unsigned int value)
+{
+ unsigned int regval;
+
+ regval = mult_frac(value, info->rsnsi, 50000u);
+ regval /= 10000u;
+ if (regval)
+ --regval;
+ if (regval > 63)
+ regval = 63;
+
+ return regmap_write(info->regmap, LTC4162L_IIN_LIMIT_TARGET, regval);
+}
+
+static int ltc4162l_get_die_temp(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_DIE_TEMPERATURE, &regval);
+ if (ret)
+ return ret;
+
+ /* die_temp × 0.0215°C/LSB - 264.4°C */
+ ret = (s16)(regval & 0xFFFF);
+ ret *= 215;
+ ret /= 100; /* Centidegrees scale */
+ ret -= 26440;
+ val->intval = ret;
+
+ return 0;
+}
+
+static int ltc4162l_get_term_current(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHARGER_CONFIG_BITS, &regval);
+ if (ret)
+ return ret;
+
+ /* Check if C_OVER_X_THRESHOLD is enabled */
+ if (!(regval & BIT(2))) {
+ val->intval = 0;
+ return 0;
+ }
+
+ ret = regmap_read(info->regmap, LTC4162L_C_OVER_X_THRESHOLD, &regval);
+ if (ret)
+ return ret;
+
+ /* 1.466μV / RSNSB amperes/LSB */
+ regval *= 14660u;
+ regval /= info->rsnsb;
+ val->intval = 100 * regval;
+
+ return 0;
+}
+
+static int ltc4162l_set_term_current(struct ltc4162l_info *info,
+ unsigned int value)
+{
+ int ret;
+ unsigned int regval;
+
+ if (!value) {
+ /* Disable en_c_over_x_term when set to zero */
+ return regmap_update_bits(info->regmap,
+ LTC4162L_CHARGER_CONFIG_BITS,
+ BIT(2), 0);
+ }
+
+ regval = mult_frac(value, info->rsnsb, 14660u);
+ regval /= 100u;
+
+ ret = regmap_write(info->regmap, LTC4162L_C_OVER_X_THRESHOLD, regval);
+ if (ret)
+ return ret;
+
+ /* Set en_c_over_x_term after changing the threshold value */
+ return regmap_update_bits(info->regmap, LTC4162L_CHARGER_CONFIG_BITS,
+ BIT(2), BIT(2));
+}
+
+/* Custom properties */
+static const char * const ltc4162l_charge_status_name[] = {
+ "ilim_reg_active", /* 32 */
+ "thermal_reg_active",
+ "vin_uvcl_active",
+ "iin_limit_active",
+ "constant_current",
+ "constant_voltage",
+ "charger_off" /* 0 */
+};
+
+static ssize_t charge_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const char *result = ltc4162l_charge_status_name[
+ ARRAY_SIZE(ltc4162l_charge_status_name) - 1];
+ unsigned int regval;
+ unsigned int mask;
+ unsigned int index;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHARGE_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ /* Only one bit is set according to datasheet, let's be safe here */
+ for (mask = 32, index = 0; mask != 0; mask >>= 1, ++index) {
+ if (regval & mask) {
+ result = ltc4162l_charge_status_name[index];
+ break;
+ }
+ }
+
+ return sprintf(buf, "%s\n", result);
+}
+static DEVICE_ATTR_RO(charge_status);
+
+static ssize_t vbat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ union power_supply_propval val;
+ int ret;
+
+ ret = ltc4162l_get_vbat(info, LTC4162L_VBAT, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val.intval);
+}
+static DEVICE_ATTR_RO(vbat);
+
+static ssize_t vbat_avg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ union power_supply_propval val;
+ int ret;
+
+ ret = ltc4162l_get_vbat(info, LTC4162L_VBAT_FILT, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val.intval);
+}
+static DEVICE_ATTR_RO(vbat_avg);
+
+static ssize_t ibat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ union power_supply_propval val;
+ int ret;
+
+ ret = ltc4162l_get_ibat(info, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val.intval);
+}
+static DEVICE_ATTR_RO(ibat);
+
+static ssize_t force_telemetry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_CONFIG_BITS_REG, &regval);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", regval & BIT(2) ? 1 : 0);
+}
+
+static ssize_t force_telemetry_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 0, &value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(info->regmap, LTC4162L_CONFIG_BITS_REG,
+ BIT(2), value ? BIT(2) : 0);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(force_telemetry);
+
+static ssize_t arm_ship_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_ARM_SHIP_MODE, &regval);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n",
+ regval == LTC4162L_ARM_SHIP_MODE_MAGIC ? 1 : 0);
+}
+
+static ssize_t arm_ship_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 0, &value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, LTC4162L_ARM_SHIP_MODE,
+ value ? LTC4162L_ARM_SHIP_MODE_MAGIC : 0);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(arm_ship_mode);
+
+static struct attribute *ltc4162l_sysfs_entries[] = {
+ &dev_attr_charge_status.attr,
+ &dev_attr_ibat.attr,
+ &dev_attr_vbat.attr,
+ &dev_attr_vbat_avg.attr,
+ &dev_attr_force_telemetry.attr,
+ &dev_attr_arm_ship_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group ltc4162l_attr_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = ltc4162l_sysfs_entries,
+};
+
+static const struct attribute_group *ltc4162l_attr_groups[] = {
+ &ltc4162l_attr_group,
+ NULL,
+};
+
+static int ltc4162l_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return ltc4162l_get_status(info, val);
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return ltc4162l_get_charge_type(info, val);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return ltc4162l_get_health(info, val);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ltc4162l_get_online(info, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return ltc4162l_get_input_voltage(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ltc4162l_get_input_current(info, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return ltc4162l_get_icharge(info,
+ LTC4162L_ICHARGE_DAC, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return ltc4162l_get_icharge(info,
+ LTC4162L_CHARGE_CURRENT_SETTING, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return ltc4162l_get_vcharge(info,
+ LTC4162L_VCHARGE_DAC, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return ltc4162l_get_vcharge(info,
+ LTC4162L_VCHARGE_SETTING, val);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return ltc4162l_get_iin_limit_dac(info, val);
+ case POWER_SUPPLY_PROP_TEMP:
+ return ltc4162l_get_die_temp(info, val);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return ltc4162l_get_term_current(info, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4162l_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return ltc4162l_set_icharge(info,
+ LTC4162L_CHARGE_CURRENT_SETTING, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return ltc4162l_set_vcharge(info,
+ LTC4162L_VCHARGE_SETTING, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return ltc4162l_set_iin_limit(info, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return ltc4162l_set_term_current(info, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4162l_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Charger power supply property routines */
+static enum power_supply_property ltc4162l_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+};
+
+static const struct power_supply_desc ltc4162l_desc = {
+ .name = "ltc4162-l",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = ltc4162l_properties,
+ .num_properties = ARRAY_SIZE(ltc4162l_properties),
+ .get_property = ltc4162l_get_property,
+ .set_property = ltc4162l_set_property,
+ .property_is_writeable = ltc4162l_property_is_writeable,
+};
+
+static bool ltc4162l_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ /* all registers up to this one are writeable */
+ if (reg <= LTC4162L_CHARGER_CONFIG_BITS)
+ return true;
+
+ /* The ALERTS registers can be written to clear alerts */
+ if (reg >= LTC4162L_LIMIT_ALERTS_REG &&
+ reg <= LTC4162L_CHARGE_STATUS_ALERTS_REG)
+ return true;
+
+ return false;
+}
+
+static bool ltc4162l_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* all registers after this one are read-only status registers */
+ return reg > LTC4162L_CHARGER_CONFIG_BITS;
+}
+
+static const struct regmap_config ltc4162l_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .writeable_reg = ltc4162l_is_writeable_reg,
+ .volatile_reg = ltc4162l_is_volatile_reg,
+ .max_register = LTC4162L_INPUT_UNDERVOLTAGE_DAC,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void ltc4162l_clear_interrupts(struct ltc4162l_info *info)
+{
+ /* Acknowledge interrupt to chip by clearing all events */
+ regmap_write(info->regmap, LTC4162L_LIMIT_ALERTS_REG, 0);
+ regmap_write(info->regmap, LTC4162L_CHARGER_STATE_ALERTS_REG, 0);
+ regmap_write(info->regmap, LTC4162L_CHARGE_STATUS_ALERTS_REG, 0);
+}
+
+static int ltc4162l_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct ltc4162l_info *info;
+ struct power_supply_config ltc4162l_config = {};
+ u32 value;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(dev, "No support for SMBUS_WORD_DATA\n");
+ return -ENODEV;
+ }
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->client = client;
+ i2c_set_clientdata(client, info);
+
+ info->regmap = devm_regmap_init_i2c(client, &ltc4162l_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ dev_err(dev, "Failed to initialize register map\n");
+ return PTR_ERR(info->regmap);
+ }
+
+ ret = device_property_read_u32(dev, "lltc,rsnsb-micro-ohms",
+ &info->rsnsb);
+ if (ret) {
+ dev_err(dev, "Missing lltc,rsnsb-micro-ohms property\n");
+ return ret;
+ }
+ if (!info->rsnsb)
+ return -EINVAL;
+
+ ret = device_property_read_u32(dev, "lltc,rsnsi-micro-ohms",
+ &info->rsnsi);
+ if (ret) {
+ dev_err(dev, "Missing lltc,rsnsi-micro-ohms property\n");
+ return ret;
+ }
+ if (!info->rsnsi)
+ return -EINVAL;
+
+ if (!device_property_read_u32(dev, "lltc,cell-count", &value))
+ info->cell_count = value;
+
+ ltc4162l_config.of_node = dev->of_node;
+ ltc4162l_config.drv_data = info;
+ ltc4162l_config.attr_grp = ltc4162l_attr_groups;
+
+ info->charger = devm_power_supply_register(dev, &ltc4162l_desc,
+ &ltc4162l_config);
+ if (IS_ERR(info->charger)) {
+ dev_err(dev, "Failed to register charger\n");
+ return PTR_ERR(info->charger);
+ }
+
+ /* Disable the threshold alerts, we're not using them */
+ regmap_write(info->regmap, LTC4162L_EN_LIMIT_ALERTS_REG, 0);
+
+ /* Enable interrupts on all status changes */
+ regmap_write(info->regmap, LTC4162L_EN_CHARGER_STATE_ALERTS_REG,
+ 0x1fff);
+ regmap_write(info->regmap, LTC4162L_EN_CHARGE_STATUS_ALERTS_REG, 0x1f);
+
+ ltc4162l_clear_interrupts(info);
+
+ return 0;
+}
+
+static void ltc4162l_alert(struct i2c_client *client,
+ enum i2c_alert_protocol type, unsigned int flag)
+{
+ struct ltc4162l_info *info = i2c_get_clientdata(client);
+
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
+ ltc4162l_clear_interrupts(info);
+ power_supply_changed(info->charger);
+}
+
+static const struct i2c_device_id ltc4162l_i2c_id_table[] = {
+ { "ltc4162-l", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ltc4162l_i2c_id_table);
+
+static const struct of_device_id ltc4162l_of_match[] = {
+ { .compatible = "lltc,ltc4162-l", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ltc4162l_of_match);
+
+static struct i2c_driver ltc4162l_driver = {
+ .probe = ltc4162l_probe,
+ .alert = ltc4162l_alert,
+ .id_table = ltc4162l_i2c_id_table,
+ .driver = {
+ .name = "ltc4162-l-charger",
+ .of_match_table = of_match_ptr(ltc4162l_of_match),
+ },
+};
+module_i2c_driver(ltc4162l_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("LTC4162-L charger driver");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index 137f9fafce8c..3f49b29f3c88 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -15,8 +15,6 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/max8903_charger.c b/drivers/power/supply/max8903_charger.c
index 0bd39b0cc257..54d50b55fbae 100644
--- a/drivers/power/supply/max8903_charger.c
+++ b/drivers/power/supply/max8903_charger.c
@@ -6,22 +6,32 @@
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/platform_device.h>
-#include <linux/power/max8903_charger.h>
struct max8903_data {
- struct max8903_pdata *pdata;
struct device *dev;
struct power_supply *psy;
struct power_supply_desc psy_desc;
+ /*
+ * GPIOs
+ * chg, flt, dcm and usus are optional.
+ * dok or uok must be present.
+ * If dok is present, cen must be present.
+ */
+ struct gpio_desc *cen; /* Charger Enable input */
+ struct gpio_desc *dok; /* DC (Adapter) Power OK output */
+ struct gpio_desc *uok; /* USB Power OK output */
+ struct gpio_desc *chg; /* Charger status output */
+ struct gpio_desc *flt; /* Fault output */
+ struct gpio_desc *dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
+ struct gpio_desc *usus; /* USB Suspend Input (1: suspended) */
bool fault;
bool usb_in;
bool ta_in;
@@ -42,8 +52,9 @@ static int max8903_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- if (gpio_is_valid(data->pdata->chg)) {
- if (gpio_get_value(data->pdata->chg) == 0)
+ if (data->chg) {
+ if (gpiod_get_value(data->chg))
+ /* CHG asserted */
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (data->usb_in || data->ta_in)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -71,11 +82,17 @@ static int max8903_get_property(struct power_supply *psy,
static irqreturn_t max8903_dcin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
bool ta_in;
enum power_supply_type old_type;
- ta_in = gpio_get_value(pdata->dok) ? false : true;
+ /*
+ * This means the line is asserted.
+ *
+ * The signal is active low, but the inversion is handled in the GPIO
+ * library as the line should be flagged GPIO_ACTIVE_LOW in the device
+ * tree.
+ */
+ ta_in = gpiod_get_value(data->dok);
if (ta_in == data->ta_in)
return IRQ_HANDLED;
@@ -83,13 +100,25 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
data->ta_in = ta_in;
/* Set Current-Limit-Mode 1:DC 0:USB */
- if (gpio_is_valid(pdata->dcm))
- gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
-
- /* Charger Enable / Disable (cen is negated) */
- if (gpio_is_valid(pdata->cen))
- gpio_set_value(pdata->cen, ta_in ? 0 :
- (data->usb_in ? 0 : 1));
+ if (data->dcm)
+ gpiod_set_value(data->dcm, ta_in);
+
+ /* Charger Enable / Disable */
+ if (data->cen) {
+ int val;
+
+ if (ta_in)
+ /* Certainly enable if DOK is asserted */
+ val = 1;
+ else if (data->usb_in)
+ /* Enable if the USB charger is enabled */
+ val = 1;
+ else
+ /* Else default-disable */
+ val = 0;
+
+ gpiod_set_value(data->cen, val);
+ }
dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
"Connected" : "Disconnected");
@@ -112,11 +141,17 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
static irqreturn_t max8903_usbin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
bool usb_in;
enum power_supply_type old_type;
- usb_in = gpio_get_value(pdata->uok) ? false : true;
+ /*
+ * This means the line is asserted.
+ *
+ * The signal is active low, but the inversion is handled in the GPIO
+ * library as the line should be flagged GPIO_ACTIVE_LOW in the device
+ * tree.
+ */
+ usb_in = gpiod_get_value(data->uok);
if (usb_in == data->usb_in)
return IRQ_HANDLED;
@@ -125,10 +160,22 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
/* Do not touch Current-Limit-Mode */
- /* Charger Enable / Disable (cen is negated) */
- if (gpio_is_valid(pdata->cen))
- gpio_set_value(pdata->cen, usb_in ? 0 :
- (data->ta_in ? 0 : 1));
+ /* Charger Enable / Disable */
+ if (data->cen) {
+ int val;
+
+ if (usb_in)
+ /* Certainly enable if UOK is asserted */
+ val = 1;
+ else if (data->ta_in)
+ /* Enable if the DC charger is enabled */
+ val = 1;
+ else
+ /* Else default-disable */
+ val = 0;
+
+ gpiod_set_value(data->cen, val);
+ }
dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
"Connected" : "Disconnected");
@@ -151,10 +198,16 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
static irqreturn_t max8903_fault(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
bool fault;
- fault = gpio_get_value(pdata->flt) ? false : true;
+ /*
+ * This means the line is asserted.
+ *
+ * The signal is active low, but the inversion is handled in the GPIO
+ * library as the line should be flagged GPIO_ACTIVE_LOW in the device
+ * tree.
+ */
+ fault = gpiod_get_value(data->flt);
if (fault == data->fault)
return IRQ_HANDLED;
@@ -169,159 +222,100 @@ static irqreturn_t max8903_fault(int irq, void *_data)
return IRQ_HANDLED;
}
-static struct max8903_pdata *max8903_parse_dt_data(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct max8903_pdata *pdata = NULL;
-
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- pdata->dc_valid = false;
- pdata->usb_valid = false;
-
- pdata->cen = of_get_named_gpio(np, "cen-gpios", 0);
- if (!gpio_is_valid(pdata->cen))
- pdata->cen = -EINVAL;
-
- pdata->chg = of_get_named_gpio(np, "chg-gpios", 0);
- if (!gpio_is_valid(pdata->chg))
- pdata->chg = -EINVAL;
-
- pdata->flt = of_get_named_gpio(np, "flt-gpios", 0);
- if (!gpio_is_valid(pdata->flt))
- pdata->flt = -EINVAL;
-
- pdata->usus = of_get_named_gpio(np, "usus-gpios", 0);
- if (!gpio_is_valid(pdata->usus))
- pdata->usus = -EINVAL;
-
- pdata->dcm = of_get_named_gpio(np, "dcm-gpios", 0);
- if (!gpio_is_valid(pdata->dcm))
- pdata->dcm = -EINVAL;
-
- pdata->dok = of_get_named_gpio(np, "dok-gpios", 0);
- if (!gpio_is_valid(pdata->dok))
- pdata->dok = -EINVAL;
- else
- pdata->dc_valid = true;
-
- pdata->uok = of_get_named_gpio(np, "uok-gpios", 0);
- if (!gpio_is_valid(pdata->uok))
- pdata->uok = -EINVAL;
- else
- pdata->usb_valid = true;
-
- return pdata;
-}
-
static int max8903_setup_gpios(struct platform_device *pdev)
{
struct max8903_data *data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct max8903_pdata *pdata = pdev->dev.platform_data;
- int ret = 0;
- int gpio;
- int ta_in = 0;
- int usb_in = 0;
-
- if (pdata->dc_valid) {
- if (gpio_is_valid(pdata->dok)) {
- ret = devm_gpio_request(dev, pdata->dok,
- data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for dok: %d err %d\n",
- pdata->dok, ret);
- return ret;
- }
-
- gpio = pdata->dok; /* PULL_UPed Interrupt */
- ta_in = gpio_get_value(gpio) ? 0 : 1;
- } else {
- dev_err(dev, "When DC is wired, DOK should be wired as well.\n");
- return -EINVAL;
- }
- }
-
- if (gpio_is_valid(pdata->dcm)) {
- ret = devm_gpio_request(dev, pdata->dcm, data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for dcm: %d err %d\n",
- pdata->dcm, ret);
- return ret;
- }
-
- gpio = pdata->dcm; /* Output */
- gpio_set_value(gpio, ta_in);
+ bool ta_in = false;
+ bool usb_in = false;
+ enum gpiod_flags flags;
+
+ data->dok = devm_gpiod_get_optional(dev, "dok", GPIOD_IN);
+ if (IS_ERR(data->dok))
+ return dev_err_probe(dev, PTR_ERR(data->dok),
+ "failed to get DOK GPIO");
+ if (data->dok) {
+ gpiod_set_consumer_name(data->dok, data->psy_desc.name);
+ /*
+ * The DC OK is pulled up to 1 and goes low when a charger
+ * is plugged in (active low) but in the device tree the
+ * line is marked as GPIO_ACTIVE_LOW so we get a 1 (asserted)
+ * here if the DC charger is plugged in.
+ */
+ ta_in = gpiod_get_value(data->dok);
}
- if (pdata->usb_valid) {
- if (gpio_is_valid(pdata->uok)) {
- ret = devm_gpio_request(dev, pdata->uok,
- data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for uok: %d err %d\n",
- pdata->uok, ret);
- return ret;
- }
-
- gpio = pdata->uok;
- usb_in = gpio_get_value(gpio) ? 0 : 1;
- } else {
- dev_err(dev, "When USB is wired, UOK should be wired."
- "as well.\n");
- return -EINVAL;
- }
- }
-
- if (gpio_is_valid(pdata->cen)) {
- ret = devm_gpio_request(dev, pdata->cen, data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for cen: %d err %d\n",
- pdata->cen, ret);
- return ret;
- }
-
- gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
+ data->uok = devm_gpiod_get_optional(dev, "uok", GPIOD_IN);
+ if (IS_ERR(data->uok))
+ return dev_err_probe(dev, PTR_ERR(data->uok),
+ "failed to get UOK GPIO");
+ if (data->uok) {
+ gpiod_set_consumer_name(data->uok, data->psy_desc.name);
+ /*
+ * The USB OK is pulled up to 1 and goes low when a USB charger
+ * is plugged in (active low) but in the device tree the
+ * line is marked as GPIO_ACTIVE_LOW so we get a 1 (asserted)
+ * here if the USB charger is plugged in.
+ */
+ usb_in = gpiod_get_value(data->uok);
}
- if (gpio_is_valid(pdata->chg)) {
- ret = devm_gpio_request(dev, pdata->chg, data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for chg: %d err %d\n",
- pdata->chg, ret);
- return ret;
- }
- }
-
- if (gpio_is_valid(pdata->flt)) {
- ret = devm_gpio_request(dev, pdata->flt, data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for flt: %d err %d\n",
- pdata->flt, ret);
- return ret;
- }
+ /* Either DC OK or USB OK must be provided */
+ if (!data->dok && !data->uok) {
+ dev_err(dev, "no valid power source\n");
+ return -EINVAL;
}
- if (gpio_is_valid(pdata->usus)) {
- ret = devm_gpio_request(dev, pdata->usus, data->psy_desc.name);
- if (ret) {
- dev_err(dev,
- "Failed GPIO request for usus: %d err %d\n",
- pdata->usus, ret);
- return ret;
- }
- }
+ /*
+ * If either charger is already connected at this point,
+ * assert the CEN line and enable charging from the start.
+ *
+ * The line is active low but also marked with GPIO_ACTIVE_LOW
+ * in the device tree, so when we assert the line with
+ * GPIOD_OUT_HIGH the line will be driven low.
+ */
+ flags = (ta_in || usb_in) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ /*
+ * If DC OK is provided, Charger Enable CEN is compulsory
+ * so this is not optional here.
+ */
+ data->cen = devm_gpiod_get(dev, "cen", flags);
+ if (IS_ERR(data->cen))
+ return dev_err_probe(dev, PTR_ERR(data->cen),
+ "failed to get CEN GPIO");
+ gpiod_set_consumer_name(data->cen, data->psy_desc.name);
+
+ /*
+ * If the DC charger is connected, then select it.
+ *
+ * The DCM line should be marked GPIO_ACTIVE_HIGH in the
+ * device tree. Driving it high will enable the DC charger
+ * input over the USB charger input.
+ */
+ flags = ta_in ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ data->dcm = devm_gpiod_get_optional(dev, "dcm", flags);
+ if (IS_ERR(data->dcm))
+ return dev_err_probe(dev, PTR_ERR(data->dcm),
+ "failed to get DCM GPIO");
+ gpiod_set_consumer_name(data->dcm, data->psy_desc.name);
+
+ data->chg = devm_gpiod_get_optional(dev, "chg", GPIOD_IN);
+ if (IS_ERR(data->chg))
+ return dev_err_probe(dev, PTR_ERR(data->chg),
+ "failed to get CHG GPIO");
+ gpiod_set_consumer_name(data->chg, data->psy_desc.name);
+
+ data->flt = devm_gpiod_get_optional(dev, "flt", GPIOD_IN);
+ if (IS_ERR(data->flt))
+ return dev_err_probe(dev, PTR_ERR(data->flt),
+ "failed to get FLT GPIO");
+ gpiod_set_consumer_name(data->flt, data->psy_desc.name);
+
+ data->usus = devm_gpiod_get_optional(dev, "usus", GPIOD_IN);
+ if (IS_ERR(data->usus))
+ return dev_err_probe(dev, PTR_ERR(data->usus),
+ "failed to get USUS GPIO");
+ gpiod_set_consumer_name(data->usus, data->psy_desc.name);
data->fault = false;
data->ta_in = ta_in;
@@ -334,7 +328,6 @@ static int max8903_probe(struct platform_device *pdev)
{
struct max8903_data *data;
struct device *dev = &pdev->dev;
- struct max8903_pdata *pdata = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
int ret = 0;
@@ -342,24 +335,9 @@ static int max8903_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- if (IS_ENABLED(CONFIG_OF) && !pdata && dev->of_node)
- pdata = max8903_parse_dt_data(dev);
-
- if (!pdata) {
- dev_err(dev, "No platform data.\n");
- return -EINVAL;
- }
-
- pdev->dev.platform_data = pdata;
- data->pdata = pdata;
data->dev = dev;
platform_set_drvdata(pdev, data);
- if (pdata->dc_valid == false && pdata->usb_valid == false) {
- dev_err(dev, "No valid power sources.\n");
- return -EINVAL;
- }
-
ret = max8903_setup_gpios(pdev);
if (ret)
return ret;
@@ -381,41 +359,41 @@ static int max8903_probe(struct platform_device *pdev)
return PTR_ERR(data->psy);
}
- if (pdata->dc_valid) {
- ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->dok),
+ if (data->dok) {
+ ret = devm_request_threaded_irq(dev, gpiod_to_irq(data->dok),
NULL, max8903_dcin,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"MAX8903 DC IN", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for DC (%d)\n",
- gpio_to_irq(pdata->dok), ret);
+ gpiod_to_irq(data->dok), ret);
return ret;
}
}
- if (pdata->usb_valid) {
- ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->uok),
+ if (data->uok) {
+ ret = devm_request_threaded_irq(dev, gpiod_to_irq(data->uok),
NULL, max8903_usbin,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"MAX8903 USB IN", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for USB (%d)\n",
- gpio_to_irq(pdata->uok), ret);
+ gpiod_to_irq(data->uok), ret);
return ret;
}
}
- if (gpio_is_valid(pdata->flt)) {
- ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->flt),
+ if (data->flt) {
+ ret = devm_request_threaded_irq(dev, gpiod_to_irq(data->flt),
NULL, max8903_fault,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"MAX8903 Fault", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
- gpio_to_irq(pdata->flt), ret);
+ gpiod_to_irq(data->flt), ret);
return ret;
}
}
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 1947af25879a..321bd6b8ee41 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -6,12 +6,14 @@
// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
+#include <linux/extcon.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
+#include <linux/regulator/consumer.h>
/* MAX8997_REG_STATUS4 */
#define DCINOK_SHIFT 1
@@ -31,6 +33,10 @@ struct charger_data {
struct device *dev;
struct max8997_dev *iodev;
struct power_supply *battery;
+ struct regulator *reg;
+ struct extcon_dev *edev;
+ struct notifier_block extcon_nb;
+ struct work_struct extcon_work;
};
static enum power_supply_property max8997_battery_props[] = {
@@ -88,6 +94,67 @@ static int max8997_battery_get_property(struct power_supply *psy,
return 0;
}
+static void max8997_battery_extcon_evt_stop_work(void *data)
+{
+ struct charger_data *charger = data;
+
+ cancel_work_sync(&charger->extcon_work);
+}
+
+static void max8997_battery_extcon_evt_worker(struct work_struct *work)
+{
+ struct charger_data *charger =
+ container_of(work, struct charger_data, extcon_work);
+ struct extcon_dev *edev = charger->edev;
+ int current_limit;
+
+ if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
+ dev_dbg(charger->dev, "USB SDP charger is connected\n");
+ current_limit = 450000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
+ dev_dbg(charger->dev, "USB DCP charger is connected\n");
+ current_limit = 650000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_FAST) > 0) {
+ dev_dbg(charger->dev, "USB FAST charger is connected\n");
+ current_limit = 650000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_SLOW) > 0) {
+ dev_dbg(charger->dev, "USB SLOW charger is connected\n");
+ current_limit = 650000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
+ dev_dbg(charger->dev, "USB CDP charger is connected\n");
+ current_limit = 650000;
+ } else {
+ dev_dbg(charger->dev, "USB charger is disconnected\n");
+ current_limit = -1;
+ }
+
+ if (current_limit > 0) {
+ int ret = regulator_set_current_limit(charger->reg, current_limit, current_limit);
+
+ if (ret) {
+ dev_err(charger->dev, "failed to set current limit: %d\n", ret);
+ return;
+ }
+ ret = regulator_enable(charger->reg);
+ if (ret)
+ dev_err(charger->dev, "failed to enable regulator: %d\n", ret);
+ } else {
+ int ret = regulator_disable(charger->reg);
+
+ if (ret)
+ dev_err(charger->dev, "failed to disable regulator: %d\n", ret);
+ }
+}
+
+static int max8997_battery_extcon_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct charger_data *charger =
+ container_of(nb, struct charger_data, extcon_nb);
+ schedule_work(&charger->extcon_work);
+ return NOTIFY_OK;
+}
+
static const struct power_supply_desc max8997_battery_desc = {
.name = "max8997_pmic",
.type = POWER_SUPPLY_TYPE_BATTERY,
@@ -170,6 +237,35 @@ static int max8997_battery_probe(struct platform_device *pdev)
return PTR_ERR(charger->battery);
}
+ charger->reg = devm_regulator_get_optional(&pdev->dev, "charger");
+ if (IS_ERR(charger->reg)) {
+ if (PTR_ERR(charger->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "couldn't get charger regulator\n");
+ }
+ charger->edev = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(charger->edev)) {
+ if (PTR_ERR(charger->edev) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(charger->dev, "couldn't get extcon device\n");
+ }
+
+ if (!IS_ERR(charger->reg) && !IS_ERR(charger->edev)) {
+ INIT_WORK(&charger->extcon_work, max8997_battery_extcon_evt_worker);
+ ret = devm_add_action(&pdev->dev, max8997_battery_extcon_evt_stop_work, charger);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add extcon evt stop action: %d\n", ret);
+ return ret;
+ }
+ charger->extcon_nb.notifier_call = max8997_battery_extcon_evt;
+ ret = devm_extcon_register_notifier_all(&pdev->dev, charger->edev,
+ &charger->extcon_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register extcon notifier\n");
+ return ret;
+ };
+ }
+
return 0;
}
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index 7fe4b6b6ddc8..bffe6d84c429 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -299,13 +299,11 @@ static const struct hwmon_channel_info *power_supply_hwmon_info[] = {
HWMON_T_INPUT |
HWMON_T_MAX |
HWMON_T_MIN |
- HWMON_T_MIN_ALARM |
HWMON_T_MIN_ALARM,
HWMON_T_LABEL |
HWMON_T_INPUT |
HWMON_T_MIN_ALARM |
- HWMON_T_LABEL |
HWMON_T_MAX_ALARM),
HWMON_CHANNEL_INFO(curr,
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 92dd63171193..c3d7cbcd4fad 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -374,7 +374,7 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
return 0;
}
-static struct attribute_group power_supply_attr_group = {
+static const struct attribute_group power_supply_attr_group = {
.attrs = __power_supply_attrs,
.is_visible = power_supply_attr_is_visible,
};
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index d3bf35ed12ce..8cfbd8d6b478 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -137,6 +137,7 @@
* @mains_online: is AC/DC input connected
* @usb_online: is USB input connected
* @charging_enabled: is charging enabled
+ * @irq_unsupported: is interrupt unsupported by SMB hardware
* @max_charge_current: maximum current (in uA) the battery can be charged
* @max_charge_voltage: maximum voltage (in uV) the battery can be charged
* @pre_charge_current: current (in uA) to use in pre-charging phase
@@ -193,6 +194,7 @@ struct smb347_charger {
bool mains_online;
bool usb_online;
bool charging_enabled;
+ bool irq_unsupported;
unsigned int max_charge_current;
unsigned int max_charge_voltage;
@@ -862,6 +864,9 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
{
int ret;
+ if (smb->irq_unsupported)
+ return 0;
+
ret = smb347_set_writable(smb, true);
if (ret < 0)
return ret;
@@ -923,8 +928,6 @@ static int smb347_irq_init(struct smb347_charger *smb,
ret = regmap_update_bits(smb->regmap, CFG_STAT,
CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
CFG_STAT_DISABLED);
- if (ret < 0)
- client->irq = 0;
smb347_set_writable(smb, false);
@@ -1345,6 +1348,7 @@ static int smb347_probe(struct i2c_client *client,
if (ret < 0) {
dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
dev_warn(dev, "disabling IRQ support\n");
+ smb->irq_unsupported = true;
} else {
smb347_irq_enable(smb);
}
@@ -1357,8 +1361,8 @@ static int smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
- if (client->irq)
- smb347_irq_disable(smb);
+ smb347_irq_disable(smb);
+
return 0;
}
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 58f01659daa5..a0e1eaa25d93 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -15,11 +15,12 @@
#include <linux/wm97xx.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/slab.h>
static struct work_struct bat_work;
+static struct gpio_desc *charge_gpiod;
static DEFINE_MUTEX(work_lock);
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
static enum power_supply_property *prop;
@@ -96,12 +97,11 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
- struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
mutex_lock(&work_lock);
- bat_status = (pdata->charge_gpio >= 0) ?
- (gpio_get_value(pdata->charge_gpio) ?
+ bat_status = (charge_gpiod) ?
+ (gpiod_get_value(charge_gpiod) ?
POWER_SUPPLY_STATUS_DISCHARGING :
POWER_SUPPLY_STATUS_CHARGING) :
POWER_SUPPLY_STATUS_UNKNOWN;
@@ -171,18 +171,19 @@ static int wm97xx_bat_probe(struct platform_device *dev)
if (dev->id != -1)
return -EINVAL;
- if (gpio_is_valid(pdata->charge_gpio)) {
- ret = gpio_request(pdata->charge_gpio, "BATT CHRG");
- if (ret)
- goto err;
- ret = gpio_direction_input(pdata->charge_gpio);
- if (ret)
- goto err2;
- ret = request_irq(gpio_to_irq(pdata->charge_gpio),
+ charge_gpiod = devm_gpiod_get_optional(&dev->dev, NULL, GPIOD_IN);
+ if (IS_ERR(charge_gpiod))
+ return dev_err_probe(&dev->dev,
+ PTR_ERR(charge_gpiod),
+ "failed to get charge GPIO\n");
+ if (charge_gpiod) {
+ gpiod_set_consumer_name(charge_gpiod, "BATT CHRG");
+ ret = request_irq(gpiod_to_irq(charge_gpiod),
wm97xx_chrg_irq, 0,
"AC Detect", dev);
if (ret)
- goto err2;
+ return dev_err_probe(&dev->dev, ret,
+ "failed to request GPIO irq\n");
props++; /* POWER_SUPPLY_PROP_STATUS */
}
@@ -204,7 +205,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
}
prop[i++] = POWER_SUPPLY_PROP_PRESENT;
- if (pdata->charge_gpio >= 0)
+ if (charge_gpiod)
prop[i++] = POWER_SUPPLY_PROP_STATUS;
if (pdata->batt_tech >= 0)
prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
@@ -242,23 +243,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
err4:
kfree(prop);
err3:
- if (gpio_is_valid(pdata->charge_gpio))
- free_irq(gpio_to_irq(pdata->charge_gpio), dev);
-err2:
- if (gpio_is_valid(pdata->charge_gpio))
- gpio_free(pdata->charge_gpio);
-err:
+ if (charge_gpiod)
+ free_irq(gpiod_to_irq(charge_gpiod), dev);
return ret;
}
static int wm97xx_bat_remove(struct platform_device *dev)
{
- struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
-
- if (pdata && gpio_is_valid(pdata->charge_gpio)) {
- free_irq(gpio_to_irq(pdata->charge_gpio), dev);
- gpio_free(pdata->charge_gpio);
- }
+ if (charge_gpiod)
+ free_irq(gpiod_to_irq(charge_gpiod), dev);
cancel_work_sync(&bat_work);
power_supply_unregister(bat_psy);
kfree(prop);
diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
index ebd2e42a4457..b1508fe70e5e 100644
--- a/drivers/power/supply/z2_battery.c
+++ b/drivers/power/supply/z2_battery.c
@@ -6,7 +6,7 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -18,6 +18,7 @@
struct z2_charger {
struct z2_battery_info *info;
+ struct gpio_desc *charge_gpiod;
int bat_status;
struct i2c_client *client;
struct power_supply *batt_ps;
@@ -95,8 +96,8 @@ static void z2_batt_update(struct z2_charger *charger)
mutex_lock(&charger->work_lock);
- charger->bat_status = (info->charge_gpio >= 0) ?
- (gpio_get_value(info->charge_gpio) ?
+ charger->bat_status = charger->charge_gpiod ?
+ (gpiod_get_value(charger->charge_gpiod) ?
POWER_SUPPLY_STATUS_CHARGING :
POWER_SUPPLY_STATUS_DISCHARGING) :
POWER_SUPPLY_STATUS_UNKNOWN;
@@ -131,7 +132,7 @@ static int z2_batt_ps_init(struct z2_charger *charger, int props)
enum power_supply_property *prop;
struct z2_battery_info *info = charger->info;
- if (info->charge_gpio >= 0)
+ if (charger->charge_gpiod)
props++; /* POWER_SUPPLY_PROP_STATUS */
if (info->batt_tech >= 0)
props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
@@ -147,7 +148,7 @@ static int z2_batt_ps_init(struct z2_charger *charger, int props)
return -ENOMEM;
prop[i++] = POWER_SUPPLY_PROP_PRESENT;
- if (info->charge_gpio >= 0)
+ if (charger->charge_gpiod)
prop[i++] = POWER_SUPPLY_PROP_STATUS;
if (info->batt_tech >= 0)
prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
@@ -206,22 +207,23 @@ static int z2_batt_probe(struct i2c_client *client,
mutex_init(&charger->work_lock);
- if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
- ret = gpio_request(info->charge_gpio, "BATT CHRG");
- if (ret)
- goto err;
+ charger->charge_gpiod = devm_gpiod_get_optional(&client->dev,
+ NULL, GPIOD_IN);
+ if (IS_ERR(charger->charge_gpiod))
+ return dev_err_probe(&client->dev,
+ PTR_ERR(charger->charge_gpiod),
+ "failed to get charge GPIO\n");
- ret = gpio_direction_input(info->charge_gpio);
- if (ret)
- goto err2;
+ if (charger->charge_gpiod) {
+ gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG");
- irq_set_irq_type(gpio_to_irq(info->charge_gpio),
+ irq_set_irq_type(gpiod_to_irq(charger->charge_gpiod),
IRQ_TYPE_EDGE_BOTH);
- ret = request_irq(gpio_to_irq(info->charge_gpio),
+ ret = request_irq(gpiod_to_irq(charger->charge_gpiod),
z2_charge_switch_irq, 0,
"AC Detect", charger);
if (ret)
- goto err3;
+ goto err;
}
ret = z2_batt_ps_init(charger, props);
@@ -245,11 +247,8 @@ static int z2_batt_probe(struct i2c_client *client,
err4:
kfree(charger->batt_ps_desc.properties);
err3:
- if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
- free_irq(gpio_to_irq(info->charge_gpio), charger);
-err2:
- if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
- gpio_free(info->charge_gpio);
+ if (charger->charge_gpiod)
+ free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
err:
kfree(charger);
return ret;
@@ -258,16 +257,13 @@ err:
static int z2_batt_remove(struct i2c_client *client)
{
struct z2_charger *charger = i2c_get_clientdata(client);
- struct z2_battery_info *info = charger->info;
cancel_work_sync(&charger->bat_work);
power_supply_unregister(charger->batt_ps);
kfree(charger->batt_ps_desc.properties);
- if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
- free_irq(gpio_to_irq(info->charge_gpio), charger);
- gpio_free(info->charge_gpio);
- }
+ if (charger->charge_gpiod)
+ free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
kfree(charger);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index bc228725346b..20b4325c6161 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -43,4 +43,17 @@ config IDLE_INJECT
CPUs for power capping. Idle period can be injected
synchronously on a set of specified CPUs or alternatively
on a per CPU basis.
+
+config DTPM
+ bool "Power capping for Dynamic Thermal Power Management"
+ help
+ This enables support for the power capping for the dynamic
+ thermal power management userspace engine.
+
+config DTPM_CPU
+ bool "Add CPU power capping based on the energy model"
+ depends on DTPM && ENERGY_MODEL
+ help
+ This enables support for CPU power limitation based on
+ energy model.
endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index 7255c94ec61c..fabcf388a8d3 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DTPM) += dtpm.o
+obj-$(CONFIG_DTPM_CPU) += dtpm_cpu.o
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
new file mode 100644
index 000000000000..5a51cd34a7e8
--- /dev/null
+++ b/drivers/powercap/dtpm.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The powercap based Dynamic Thermal Power Management framework
+ * provides to the userspace a consistent API to set the power limit
+ * on some devices.
+ *
+ * DTPM defines the functions to create a tree of constraints. Each
+ * parent node is a virtual description of the aggregation of the
+ * children. It propagates the constraints set at its level to its
+ * children and collect the children power information. The leaves of
+ * the tree are the real devices which have the ability to get their
+ * current power consumption and set their power limit.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dtpm.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/powercap.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#define DTPM_POWER_LIMIT_FLAG 0
+
+static const char *constraint_name[] = {
+ "Instantaneous",
+};
+
+static DEFINE_MUTEX(dtpm_lock);
+static struct powercap_control_type *pct;
+static struct dtpm *root;
+
+static int get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window)
+{
+ return -ENOSYS;
+}
+
+static int set_time_window_us(struct powercap_zone *pcz, int cid, u64 window)
+{
+ return -ENOSYS;
+}
+
+static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+
+ mutex_lock(&dtpm_lock);
+ *max_power_uw = dtpm->power_max - dtpm->power_min;
+ mutex_unlock(&dtpm_lock);
+
+ return 0;
+}
+
+static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw)
+{
+ struct dtpm *child;
+ u64 power;
+ int ret = 0;
+
+ if (dtpm->ops) {
+ *power_uw = dtpm->ops->get_power_uw(dtpm);
+ return 0;
+ }
+
+ *power_uw = 0;
+
+ list_for_each_entry(child, &dtpm->children, sibling) {
+ ret = __get_power_uw(child, &power);
+ if (ret)
+ break;
+ *power_uw += power;
+ }
+
+ return ret;
+}
+
+static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+ int ret;
+
+ mutex_lock(&dtpm_lock);
+ ret = __get_power_uw(dtpm, power_uw);
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+
+static void __dtpm_rebalance_weight(struct dtpm *dtpm)
+{
+ struct dtpm *child;
+
+ list_for_each_entry(child, &dtpm->children, sibling) {
+
+ pr_debug("Setting weight '%d' for '%s'\n",
+ child->weight, child->zone.name);
+
+ child->weight = DIV64_U64_ROUND_CLOSEST(
+ child->power_max * 1024, dtpm->power_max);
+
+ __dtpm_rebalance_weight(child);
+ }
+}
+
+static void __dtpm_sub_power(struct dtpm *dtpm)
+{
+ struct dtpm *parent = dtpm->parent;
+
+ while (parent) {
+ parent->power_min -= dtpm->power_min;
+ parent->power_max -= dtpm->power_max;
+ parent->power_limit -= dtpm->power_limit;
+ parent = parent->parent;
+ }
+
+ __dtpm_rebalance_weight(root);
+}
+
+static void __dtpm_add_power(struct dtpm *dtpm)
+{
+ struct dtpm *parent = dtpm->parent;
+
+ while (parent) {
+ parent->power_min += dtpm->power_min;
+ parent->power_max += dtpm->power_max;
+ parent->power_limit += dtpm->power_limit;
+ parent = parent->parent;
+ }
+
+ __dtpm_rebalance_weight(root);
+}
+
+/**
+ * dtpm_update_power - Update the power on the dtpm
+ * @dtpm: a pointer to a dtpm structure to update
+ * @power_min: a u64 representing the new power_min value
+ * @power_max: a u64 representing the new power_max value
+ *
+ * Function to update the power values of the dtpm node specified in
+ * parameter. These new values will be propagated to the tree.
+ *
+ * Return: zero on success, -EINVAL if the values are inconsistent
+ */
+int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max)
+{
+ int ret = 0;
+
+ mutex_lock(&dtpm_lock);
+
+ if (power_min == dtpm->power_min && power_max == dtpm->power_max)
+ goto unlock;
+
+ if (power_max < power_min) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ __dtpm_sub_power(dtpm);
+
+ dtpm->power_min = power_min;
+ dtpm->power_max = power_max;
+ if (!test_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags))
+ dtpm->power_limit = power_max;
+
+ __dtpm_add_power(dtpm);
+
+unlock:
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+
+/**
+ * dtpm_release_zone - Cleanup when the node is released
+ * @pcz: a pointer to a powercap_zone structure
+ *
+ * Do some housecleaning and update the weight on the tree. The
+ * release will be denied if the node has children. This function must
+ * be called by the specific release callback of the different
+ * backends.
+ *
+ * Return: 0 on success, -EBUSY if there are children
+ */
+int dtpm_release_zone(struct powercap_zone *pcz)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+ struct dtpm *parent = dtpm->parent;
+
+ mutex_lock(&dtpm_lock);
+
+ if (!list_empty(&dtpm->children)) {
+ mutex_unlock(&dtpm_lock);
+ return -EBUSY;
+ }
+
+ if (parent)
+ list_del(&dtpm->sibling);
+
+ __dtpm_sub_power(dtpm);
+
+ mutex_unlock(&dtpm_lock);
+
+ if (dtpm->ops)
+ dtpm->ops->release(dtpm);
+
+ kfree(dtpm);
+
+ return 0;
+}
+
+static int __get_power_limit_uw(struct dtpm *dtpm, int cid, u64 *power_limit)
+{
+ *power_limit = dtpm->power_limit;
+ return 0;
+}
+
+static int get_power_limit_uw(struct powercap_zone *pcz,
+ int cid, u64 *power_limit)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+ int ret;
+
+ mutex_lock(&dtpm_lock);
+ ret = __get_power_limit_uw(dtpm, cid, power_limit);
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+
+/*
+ * Set the power limit on the nodes, the power limit is distributed
+ * given the weight of the children.
+ *
+ * The dtpm node lock must be held when calling this function.
+ */
+static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit)
+{
+ struct dtpm *child;
+ int ret = 0;
+ u64 power;
+
+ /*
+ * A max power limitation means we remove the power limit,
+ * otherwise we set a constraint and flag the dtpm node.
+ */
+ if (power_limit == dtpm->power_max) {
+ clear_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags);
+ } else {
+ set_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags);
+ }
+
+ pr_debug("Setting power limit for '%s': %llu uW\n",
+ dtpm->zone.name, power_limit);
+
+ /*
+ * Only leaves of the dtpm tree has ops to get/set the power
+ */
+ if (dtpm->ops) {
+ dtpm->power_limit = dtpm->ops->set_power_uw(dtpm, power_limit);
+ } else {
+ dtpm->power_limit = 0;
+
+ list_for_each_entry(child, &dtpm->children, sibling) {
+
+ /*
+ * Integer division rounding will inevitably
+ * lead to a different min or max value when
+ * set several times. In order to restore the
+ * initial value, we force the child's min or
+ * max power every time if the constraint is
+ * at the boundaries.
+ */
+ if (power_limit == dtpm->power_max) {
+ power = child->power_max;
+ } else if (power_limit == dtpm->power_min) {
+ power = child->power_min;
+ } else {
+ power = DIV_ROUND_CLOSEST_ULL(
+ power_limit * child->weight, 1024);
+ }
+
+ pr_debug("Setting power limit for '%s': %llu uW\n",
+ child->zone.name, power);
+
+ ret = __set_power_limit_uw(child, cid, power);
+ if (!ret)
+ ret = __get_power_limit_uw(child, cid, &power);
+
+ if (ret)
+ break;
+
+ dtpm->power_limit += power;
+ }
+ }
+
+ return ret;
+}
+
+static int set_power_limit_uw(struct powercap_zone *pcz,
+ int cid, u64 power_limit)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+ int ret;
+
+ mutex_lock(&dtpm_lock);
+
+ /*
+ * Don't allow values outside of the power range previously
+ * set when initializing the power numbers.
+ */
+ power_limit = clamp_val(power_limit, dtpm->power_min, dtpm->power_max);
+
+ ret = __set_power_limit_uw(dtpm, cid, power_limit);
+
+ pr_debug("%s: power limit: %llu uW, power max: %llu uW\n",
+ dtpm->zone.name, dtpm->power_limit, dtpm->power_max);
+
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
+{
+ return constraint_name[cid];
+}
+
+static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
+{
+ struct dtpm *dtpm = to_dtpm(pcz);
+
+ mutex_lock(&dtpm_lock);
+ *max_power = dtpm->power_max;
+ mutex_unlock(&dtpm_lock);
+
+ return 0;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+ .set_power_limit_uw = set_power_limit_uw,
+ .get_power_limit_uw = get_power_limit_uw,
+ .set_time_window_us = set_time_window_us,
+ .get_time_window_us = get_time_window_us,
+ .get_max_power_uw = get_max_power_uw,
+ .get_name = get_constraint_name,
+};
+
+static struct powercap_zone_ops zone_ops = {
+ .get_max_power_range_uw = get_max_power_range_uw,
+ .get_power_uw = get_power_uw,
+ .release = dtpm_release_zone,
+};
+
+/**
+ * dtpm_alloc - Allocate and initialize a dtpm struct
+ * @name: a string specifying the name of the node
+ *
+ * Return: a struct dtpm pointer, NULL in case of error
+ */
+struct dtpm *dtpm_alloc(struct dtpm_ops *ops)
+{
+ struct dtpm *dtpm;
+
+ dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL);
+ if (dtpm) {
+ INIT_LIST_HEAD(&dtpm->children);
+ INIT_LIST_HEAD(&dtpm->sibling);
+ dtpm->weight = 1024;
+ dtpm->ops = ops;
+ }
+
+ return dtpm;
+}
+
+/**
+ * dtpm_unregister - Unregister a dtpm node from the hierarchy tree
+ * @dtpm: a pointer to a dtpm structure corresponding to the node to be removed
+ *
+ * Call the underlying powercap unregister function. That will call
+ * the release callback of the powercap zone.
+ */
+void dtpm_unregister(struct dtpm *dtpm)
+{
+ powercap_unregister_zone(pct, &dtpm->zone);
+
+ pr_info("Unregistered dtpm node '%s'\n", dtpm->zone.name);
+}
+
+/**
+ * dtpm_register - Register a dtpm node in the hierarchy tree
+ * @name: a string specifying the name of the node
+ * @dtpm: a pointer to a dtpm structure corresponding to the new node
+ * @parent: a pointer to a dtpm structure corresponding to the parent node
+ *
+ * Create a dtpm node in the tree. If no parent is specified, the node
+ * is the root node of the hierarchy. If the root node already exists,
+ * then the registration will fail. The powercap controller must be
+ * initialized before calling this function.
+ *
+ * The dtpm structure must be initialized with the power numbers
+ * before calling this function.
+ *
+ * Return: zero on success, a negative value in case of error:
+ * -EAGAIN: the function is called before the framework is initialized.
+ * -EBUSY: the root node is already inserted
+ * -EINVAL: * there is no root node yet and @parent is specified
+ * * no all ops are defined
+ * * parent have ops which are reserved for leaves
+ * Other negative values are reported back from the powercap framework
+ */
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
+{
+ struct powercap_zone *pcz;
+
+ if (!pct)
+ return -EAGAIN;
+
+ if (root && !parent)
+ return -EBUSY;
+
+ if (!root && parent)
+ return -EINVAL;
+
+ if (parent && parent->ops)
+ return -EINVAL;
+
+ if (!dtpm)
+ return -EINVAL;
+
+ if (dtpm->ops && !(dtpm->ops->set_power_uw &&
+ dtpm->ops->get_power_uw &&
+ dtpm->ops->release))
+ return -EINVAL;
+
+ pcz = powercap_register_zone(&dtpm->zone, pct, name,
+ parent ? &parent->zone : NULL,
+ &zone_ops, MAX_DTPM_CONSTRAINTS,
+ &constraint_ops);
+ if (IS_ERR(pcz))
+ return PTR_ERR(pcz);
+
+ mutex_lock(&dtpm_lock);
+
+ if (parent) {
+ list_add_tail(&dtpm->sibling, &parent->children);
+ dtpm->parent = parent;
+ } else {
+ root = dtpm;
+ }
+
+ __dtpm_add_power(dtpm);
+
+ pr_info("Registered dtpm node '%s' / %llu-%llu uW, \n",
+ dtpm->zone.name, dtpm->power_min, dtpm->power_max);
+
+ mutex_unlock(&dtpm_lock);
+
+ return 0;
+}
+
+static int __init dtpm_init(void)
+{
+ struct dtpm_descr **dtpm_descr;
+
+ pct = powercap_register_control_type(NULL, "dtpm", NULL);
+ if (IS_ERR(pct)) {
+ pr_err("Failed to register control type\n");
+ return PTR_ERR(pct);
+ }
+
+ for_each_dtpm_table(dtpm_descr)
+ (*dtpm_descr)->init(*dtpm_descr);
+
+ return 0;
+}
+late_initcall(dtpm_init);
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
new file mode 100644
index 000000000000..51c366938acd
--- /dev/null
+++ b/drivers/powercap/dtpm_cpu.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The DTPM CPU is based on the energy model. It hooks the CPU in the
+ * DTPM tree which in turns update the power number by propagating the
+ * power number from the CPU energy model information to the parents.
+ *
+ * The association between the power and the performance state, allows
+ * to set the power of the CPU at the OPP granularity.
+ *
+ * The CPU hotplug is supported and the power numbers will be updated
+ * if a CPU is hot plugged / unplugged.
+ */
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuhotplug.h>
+#include <linux/dtpm.h>
+#include <linux/energy_model.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+static struct dtpm *__parent;
+
+static DEFINE_PER_CPU(struct dtpm *, dtpm_per_cpu);
+
+struct dtpm_cpu {
+ struct freq_qos_request qos_req;
+ int cpu;
+};
+
+/*
+ * When a new CPU is inserted at hotplug or boot time, add the power
+ * contribution and update the dtpm tree.
+ */
+static int power_add(struct dtpm *dtpm, struct em_perf_domain *em)
+{
+ u64 power_min, power_max;
+
+ power_min = em->table[0].power;
+ power_min *= MICROWATT_PER_MILLIWATT;
+ power_min += dtpm->power_min;
+
+ power_max = em->table[em->nr_perf_states - 1].power;
+ power_max *= MICROWATT_PER_MILLIWATT;
+ power_max += dtpm->power_max;
+
+ return dtpm_update_power(dtpm, power_min, power_max);
+}
+
+/*
+ * When a CPU is unplugged, remove its power contribution from the
+ * dtpm tree.
+ */
+static int power_sub(struct dtpm *dtpm, struct em_perf_domain *em)
+{
+ u64 power_min, power_max;
+
+ power_min = em->table[0].power;
+ power_min *= MICROWATT_PER_MILLIWATT;
+ power_min = dtpm->power_min - power_min;
+
+ power_max = em->table[em->nr_perf_states - 1].power;
+ power_max *= MICROWATT_PER_MILLIWATT;
+ power_max = dtpm->power_max - power_max;
+
+ return dtpm_update_power(dtpm, power_min, power_max);
+}
+
+static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+{
+ struct dtpm_cpu *dtpm_cpu = dtpm->private;
+ struct em_perf_domain *pd;
+ struct cpumask cpus;
+ unsigned long freq;
+ u64 power;
+ int i, nr_cpus;
+
+ pd = em_cpu_get(dtpm_cpu->cpu);
+
+ cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
+
+ nr_cpus = cpumask_weight(&cpus);
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus;
+
+ if (power > power_limit)
+ break;
+ }
+
+ freq = pd->table[i - 1].frequency;
+
+ freq_qos_update_request(&dtpm_cpu->qos_req, freq);
+
+ power_limit = pd->table[i - 1].power *
+ MICROWATT_PER_MILLIWATT * nr_cpus;
+
+ return power_limit;
+}
+
+static u64 get_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_cpu *dtpm_cpu = dtpm->private;
+ struct em_perf_domain *pd;
+ struct cpumask cpus;
+ unsigned long freq;
+ int i, nr_cpus;
+
+ pd = em_cpu_get(dtpm_cpu->cpu);
+ freq = cpufreq_quick_get(dtpm_cpu->cpu);
+ cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
+ nr_cpus = cpumask_weight(&cpus);
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ if (pd->table[i].frequency < freq)
+ continue;
+
+ return pd->table[i].power *
+ MICROWATT_PER_MILLIWATT * nr_cpus;
+ }
+
+ return 0;
+}
+
+static void pd_release(struct dtpm *dtpm)
+{
+ struct dtpm_cpu *dtpm_cpu = dtpm->private;
+
+ if (freq_qos_request_active(&dtpm_cpu->qos_req))
+ freq_qos_remove_request(&dtpm_cpu->qos_req);
+
+ kfree(dtpm_cpu);
+}
+
+static struct dtpm_ops dtpm_ops = {
+ .set_power_uw = set_pd_power_limit,
+ .get_power_uw = get_pd_power_uw,
+ .release = pd_release,
+};
+
+static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct em_perf_domain *pd;
+ struct dtpm *dtpm;
+
+ policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return 0;
+
+ pd = em_cpu_get(cpu);
+ if (!pd)
+ return -EINVAL;
+
+ dtpm = per_cpu(dtpm_per_cpu, cpu);
+
+ power_sub(dtpm, pd);
+
+ if (cpumask_weight(policy->cpus) != 1)
+ return 0;
+
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, cpu) = NULL;
+
+ dtpm_unregister(dtpm);
+
+ return 0;
+}
+
+static int cpuhp_dtpm_cpu_online(unsigned int cpu)
+{
+ struct dtpm *dtpm;
+ struct dtpm_cpu *dtpm_cpu;
+ struct cpufreq_policy *policy;
+ struct em_perf_domain *pd;
+ char name[CPUFREQ_NAME_LEN];
+ int ret = -ENOMEM;
+
+ policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return 0;
+
+ pd = em_cpu_get(cpu);
+ if (!pd)
+ return -EINVAL;
+
+ dtpm = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm)
+ return power_add(dtpm, pd);
+
+ dtpm = dtpm_alloc(&dtpm_ops);
+ if (!dtpm)
+ return -EINVAL;
+
+ dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
+ if (!dtpm_cpu)
+ goto out_kfree_dtpm;
+
+ dtpm->private = dtpm_cpu;
+ dtpm_cpu->cpu = cpu;
+
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, cpu) = dtpm;
+
+ sprintf(name, "cpu%d", dtpm_cpu->cpu);
+
+ ret = dtpm_register(name, dtpm, __parent);
+ if (ret)
+ goto out_kfree_dtpm_cpu;
+
+ ret = power_add(dtpm, pd);
+ if (ret)
+ goto out_dtpm_unregister;
+
+ ret = freq_qos_add_request(&policy->constraints,
+ &dtpm_cpu->qos_req, FREQ_QOS_MAX,
+ pd->table[pd->nr_perf_states - 1].frequency);
+ if (ret)
+ goto out_power_sub;
+
+ return 0;
+
+out_power_sub:
+ power_sub(dtpm, pd);
+
+out_dtpm_unregister:
+ dtpm_unregister(dtpm);
+ dtpm_cpu = NULL;
+ dtpm = NULL;
+
+out_kfree_dtpm_cpu:
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, cpu) = NULL;
+ kfree(dtpm_cpu);
+
+out_kfree_dtpm:
+ kfree(dtpm);
+ return ret;
+}
+
+int dtpm_register_cpu(struct dtpm *parent)
+{
+ __parent = parent;
+
+ return cpuhp_setup_state(CPUHP_AP_DTPM_CPU_ONLINE,
+ "dtpm_cpu:online",
+ cpuhp_dtpm_cpu_online,
+ cpuhp_dtpm_cpu_offline);
+}
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index c9e57237d778..fdda2a737186 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -547,7 +547,7 @@ static void rapl_init_domains(struct rapl_package *rp)
if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) {
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d",
- cpu_data(rp->lead_cpu).phys_proc_id);
+ topology_physical_package_id(rp->lead_cpu));
} else
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s",
rapl_domain_names[i]);
@@ -1049,6 +1049,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
@@ -1309,7 +1310,6 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
{
int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
if (!rapl_defaults)
@@ -1326,10 +1326,11 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
if (topology_max_die_per_package() > 1)
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
- "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id);
+ "package-%d-die-%d",
+ topology_physical_package_id(cpu), topology_die_id(cpu));
else
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
- c->phys_proc_id);
+ topology_physical_package_id(cpu));
/* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index a664dfe5fd2f..ac524cf0f31f 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -122,6 +122,8 @@
#define OTP_SCSR_CONFIG_SELECT 0x0022
#define STATUS 0xc03c
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
#define USER_GPIO0_TO_7_STATUS 0x008a
#define USER_GPIO8_TO_15_STATUS 0x008b
@@ -707,4 +709,12 @@
/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
#define COMBO_MASTER_HOLD BIT(0)
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
#endif
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 051511f5e1f2..75463c2e2b86 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -160,7 +160,6 @@ static int idtcm_xfer_read(struct idtcm *idtcm,
struct i2c_client *client = idtcm->client;
struct i2c_msg msg[2];
int cnt;
- char *fmt = "i2c_transfer failed at %d in %s, at addr: %04X!\n";
msg[0].addr = client->addr;
msg[0].flags = 0;
@@ -176,14 +175,12 @@ static int idtcm_xfer_read(struct idtcm *idtcm,
if (cnt < 0) {
dev_err(&client->dev,
- fmt,
- __LINE__,
- __func__,
- regaddr);
+ "i2c_transfer failed at %d in %s, at addr: %04x!",
+ __LINE__, __func__, regaddr);
return cnt;
} else if (cnt != 2) {
dev_err(&client->dev,
- "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ "i2c_transfer sent only %d of %d messages", cnt, 2);
return -EIO;
}
@@ -199,7 +196,6 @@ static int idtcm_xfer_write(struct idtcm *idtcm,
/* we add 1 byte for device register */
u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
int cnt;
- char *fmt = "i2c_master_send failed at %d in %s, at addr: %04X!\n";
if (count > IDTCM_MAX_WRITE_COUNT)
return -EINVAL;
@@ -211,10 +207,8 @@ static int idtcm_xfer_write(struct idtcm *idtcm,
if (cnt < 0) {
dev_err(&client->dev,
- fmt,
- __LINE__,
- __func__,
- regaddr);
+ "i2c_master_send failed at %d in %s, at addr: %04x!",
+ __LINE__, __func__, regaddr);
return cnt;
}
@@ -235,10 +229,9 @@ static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
buf[3] = 0x20;
err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
-
if (err) {
idtcm->page_offset = 0xff;
- dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ dev_err(&idtcm->client->dev, "failed to set page offset");
} else {
idtcm->page_offset = val;
}
@@ -260,7 +253,6 @@ static int _idtcm_rdwr(struct idtcm *idtcm,
lo = regaddr & 0xff;
err = idtcm_page_offset(idtcm, hi);
-
if (err)
return err;
@@ -290,12 +282,9 @@ static int idtcm_write(struct idtcm *idtcm,
static int clear_boot_status(struct idtcm *idtcm)
{
- int err;
u8 buf[4] = {0};
- err = idtcm_write(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
-
- return err;
+ return idtcm_write(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
}
static int read_boot_status(struct idtcm *idtcm, u32 *status)
@@ -318,7 +307,6 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
do {
err = read_boot_status(idtcm, &status);
-
if (err)
return err;
@@ -330,11 +318,72 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
} while (i);
- dev_warn(&idtcm->client->dev, "%s timed out\n", __func__);
+ dev_warn(&idtcm->client->dev, "%s timed out", __func__);
return -EBUSY;
}
+static int read_sys_apll_status(struct idtcm *idtcm, u8 *status)
+{
+ return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status,
+ sizeof(u8));
+}
+
+static int read_sys_dpll_status(struct idtcm *idtcm, u8 *status)
+{
+ return idtcm_read(idtcm, STATUS, DPLL_SYS_STATUS, status, sizeof(u8));
+}
+
+static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(LOCK_TIMEOUT_MS);
+ u8 apll = 0;
+ u8 dpll = 0;
+ int err;
+
+ do {
+ err = read_sys_apll_status(idtcm, &apll);
+ if (err)
+ return err;
+
+ err = read_sys_dpll_status(idtcm, &dpll);
+ if (err)
+ return err;
+
+ apll &= SYS_APLL_LOSS_LOCK_LIVE_MASK;
+ dpll &= DPLL_SYS_STATE_MASK;
+
+ if (apll == SYS_APLL_LOSS_LOCK_LIVE_LOCKED &&
+ dpll == DPLL_STATE_LOCKED) {
+ return 0;
+ } else if (dpll == DPLL_STATE_FREERUN ||
+ dpll == DPLL_STATE_HOLDOVER ||
+ dpll == DPLL_STATE_OPEN_LOOP) {
+ dev_warn(&idtcm->client->dev,
+ "No wait state: DPLL_SYS_STATE %d", dpll);
+ return -EPERM;
+ }
+
+ msleep(LOCK_POLL_INTERVAL_MS);
+ } while (time_is_after_jiffies(timeout));
+
+ dev_warn(&idtcm->client->dev,
+ "%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d",
+ LOCK_TIMEOUT_MS, apll, dpll);
+
+ return -ETIME;
+}
+
+static void wait_for_chip_ready(struct idtcm *idtcm)
+{
+ if (wait_for_boot_status_ready(idtcm))
+ dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0");
+
+ if (wait_for_sys_apll_dpll_lock(idtcm))
+ dev_warn(&idtcm->client->dev,
+ "Continuing while SYS APLL/DPLL is not locked");
+}
+
static int _idtcm_gettime(struct idtcm_channel *channel,
struct timespec64 *ts)
{
@@ -360,14 +409,12 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
/* wait trigger to be 0 */
while (trigger & TOD_READ_TRIGGER_MASK) {
-
if (idtcm->calculate_overhead_flag)
idtcm->start_time = ktime_get_raw();
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY_CMD, &trigger,
sizeof(trigger));
-
if (err)
return err;
@@ -377,7 +424,6 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY, buf, sizeof(buf));
-
if (err)
return err;
@@ -398,7 +444,7 @@ static int _sync_pll_output(struct idtcm *idtcm,
u16 sync_ctrl1;
u8 temp;
- if ((qn == 0) && (qn_plus_1 == 0))
+ if (qn == 0 && qn_plus_1 == 0)
return 0;
switch (pll) {
@@ -463,7 +509,7 @@ static int _sync_pll_output(struct idtcm *idtcm,
return err;
/* PLL5 can have OUT8 as second additional output. */
- if ((pll == 5) && (qn_plus_1 != 0)) {
+ if (pll == 5 && qn_plus_1 != 0) {
err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
@@ -485,7 +531,7 @@ static int _sync_pll_output(struct idtcm *idtcm,
}
/* PLL6 can have OUT11 as second additional output. */
- if ((pll == 6) && (qn_plus_1 != 0)) {
+ if (pll == 6 && qn_plus_1 != 0) {
err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
@@ -540,7 +586,6 @@ static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
static int idtcm_sync_pps_output(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
-
u8 pll;
u8 sync_src;
u8 qn;
@@ -549,7 +594,6 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
u8 out8_mux = 0;
u8 out11_mux = 0;
u8 temp;
-
u16 output_mask = channel->output_mask;
err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
@@ -610,7 +654,7 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
}
}
- if ((qn != 0) || (qn_plus_1 != 0))
+ if (qn != 0 || qn_plus_1 != 0)
err = _sync_pll_output(idtcm, pll, sync_src, qn,
qn_plus_1);
@@ -626,7 +670,6 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
enum hw_tod_write_trig_sel wr_trig)
{
struct idtcm *idtcm = channel->idtcm;
-
u8 buf[TOD_BYTE_COUNT];
u8 cmd;
int err;
@@ -636,7 +679,6 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
/* Configure HW TOD write trigger. */
err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
&cmd, sizeof(cmd));
-
if (err)
return err;
@@ -645,20 +687,16 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
&cmd, sizeof(cmd));
-
if (err)
return err;
if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
-
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
-
if (err)
return err;
err = idtcm_write(idtcm, channel->hw_dpll_n,
HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
-
if (err)
return err;
}
@@ -670,7 +708,6 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
&cmd, sizeof(cmd));
if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
-
if (idtcm->calculate_overhead_flag) {
/* Assumption: I2C @ 400KHz */
ktime_t diff = ktime_sub(ktime_get_raw(),
@@ -685,7 +722,6 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
}
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
-
if (err)
return err;
@@ -709,7 +745,6 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
timespec64_add_ns(&local_ts, SETTIME_CORRECTION);
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
-
if (err)
return err;
@@ -750,7 +785,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
if (++count > 20) {
dev_err(&idtcm->client->dev,
- "Timed out waiting for the write counter\n");
+ "Timed out waiting for the write counter");
return -EIO;
}
}
@@ -813,10 +848,9 @@ static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
int err;
err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
-
if (err) {
dev_err(&idtcm->client->dev,
- "%s: Set HW ToD failed\n", __func__);
+ "%s: Set HW ToD failed", __func__);
return err;
}
@@ -838,7 +872,6 @@ static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
int err;
int i;
struct idtcm *idtcm = channel->idtcm;
-
u8 buf[4];
for (i = 0; i < 4; i++) {
@@ -858,7 +891,6 @@ static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
int err;
u8 i;
struct idtcm *idtcm = channel->idtcm;
-
u8 buf[3];
if (max_ffo_ppb & 0xff000000)
@@ -879,12 +911,10 @@ static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
{
int err;
struct idtcm *idtcm = channel->idtcm;
-
u8 buf;
err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
&buf, sizeof(buf));
-
if (err)
return err;
@@ -906,12 +936,10 @@ static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
int err;
err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
-
if (err)
return err;
err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
-
if (err)
return err;
@@ -927,7 +955,6 @@ static int set_tod_write_overhead(struct idtcm_channel *channel)
s64 lowest_ns = 0;
int err;
u8 i;
-
ktime_t start;
ktime_t stop;
ktime_t diff;
@@ -939,12 +966,10 @@ static int set_tod_write_overhead(struct idtcm_channel *channel)
buf, sizeof(buf));
for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
-
start = ktime_get_raw();
err = idtcm_write(idtcm, channel->hw_dpll_n,
HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
-
if (err)
return err;
@@ -980,12 +1005,10 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
idtcm->calculate_overhead_flag = 1;
err = set_tod_write_overhead(channel);
-
if (err)
return err;
err = _idtcm_gettime(channel, &ts);
-
if (err)
return err;
@@ -1018,14 +1041,14 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
if (status == 0xA0) {
dev_dbg(&idtcm->client->dev,
- "SM_RESET completed in %d ms\n",
- i * 100);
+ "SM_RESET completed in %d ms", i * 100);
break;
}
}
if (!status)
- dev_err(&idtcm->client->dev, "Timed out waiting for CM_RESET to complete\n");
+ dev_err(&idtcm->client->dev,
+ "Timed out waiting for CM_RESET to complete");
}
return err;
@@ -1121,12 +1144,12 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
{
if (index >= MAX_TOD) {
- dev_err(&idtcm->client->dev, "ToD%d not supported\n", index);
+ dev_err(&idtcm->client->dev, "ToD%d not supported", index);
return -EINVAL;
}
if (pll >= MAX_PLL) {
- dev_err(&idtcm->client->dev, "Pll%d not supported\n", pll);
+ dev_err(&idtcm->client->dev, "Pll%d not supported", pll);
return -EINVAL;
}
@@ -1144,8 +1167,7 @@ static int check_and_set_masks(struct idtcm *idtcm,
switch (regaddr) {
case TOD_MASK_ADDR:
if ((val & 0xf0) || !(val & 0x0f)) {
- dev_err(&idtcm->client->dev,
- "Invalid TOD mask 0x%hhx\n", val);
+ dev_err(&idtcm->client->dev, "Invalid TOD mask 0x%02x", val);
err = -EINVAL;
} else {
idtcm->tod_mask = val;
@@ -1176,14 +1198,14 @@ static void display_pll_and_masks(struct idtcm *idtcm)
u8 i;
u8 mask;
- dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x\n", idtcm->tod_mask);
+ dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if (mask & idtcm->tod_mask)
dev_dbg(&idtcm->client->dev,
- "TOD%d pll = %d output_mask = 0x%04x\n",
+ "TOD%d pll = %d output_mask = 0x%04x",
i, idtcm->channel[i].pll,
idtcm->channel[i].output_mask);
}
@@ -1204,19 +1226,16 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
- dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", fname);
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'", fname);
err = request_firmware(&fw, fname, dev);
-
if (err) {
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
}
- dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes", fw->size);
rec = (struct idtcm_fwrc *) fw->data;
@@ -1224,10 +1243,9 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
idtcm_state_machine_reset(idtcm);
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
-
if (rec->reserved) {
dev_err(&idtcm->client->dev,
- "bad firmware, reserved field non-zero\n");
+ "bad firmware, reserved field non-zero");
err = -EINVAL;
} else {
regaddr = rec->hiaddr << 8;
@@ -1245,13 +1263,11 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
err = 0;
/* Top (status registers) and bottom are read-only */
- if ((regaddr < GPIO_USER_CONTROL)
- || (regaddr >= SCRATCH))
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
continue;
/* Page size 128, last 4 bytes of page skipped */
- if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || loaddr > 0xfb)
+ if ((loaddr > 0x7b && loaddr <= 0x7f) || loaddr > 0xfb)
continue;
err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
@@ -1285,7 +1301,6 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
}
err = idtcm_read(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
-
if (err)
return err;
@@ -1308,11 +1323,8 @@ static int idtcm_output_mask_enable(struct idtcm_channel *channel,
outn = 0;
while (mask) {
-
if (mask & 0x1) {
-
err = idtcm_output_enable(channel, enable, outn);
-
if (err)
return err;
}
@@ -1328,13 +1340,23 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
bool enable,
struct ptp_perout_request *perout)
{
+ struct idtcm *idtcm = channel->idtcm;
unsigned int flags = perout->flags;
+ struct timespec64 ts = {0, 0};
+ int err;
if (flags == PEROUT_ENABLE_OUTPUT_MASK)
- return idtcm_output_mask_enable(channel, enable);
+ err = idtcm_output_mask_enable(channel, enable);
+ else
+ err = idtcm_output_enable(channel, enable, perout->index);
- /* Enable/disable individual output instead */
- return idtcm_output_enable(channel, enable, perout->index);
+ if (err) {
+ dev_err(&idtcm->client->dev, "Unable to set output enable");
+ return err;
+ }
+
+ /* Align output to internal 1 PPS */
+ return _idtcm_settime(channel, &ts, SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS);
}
static int idtcm_get_pll_mode(struct idtcm_channel *channel,
@@ -1392,7 +1414,6 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel,
static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
{
struct idtcm *idtcm = channel->idtcm;
-
int err;
u8 i;
u8 buf[4] = {0};
@@ -1400,9 +1421,7 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
s64 offset_ps;
if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
-
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
-
if (err)
return err;
}
@@ -1478,20 +1497,16 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_gettime(channel, ts);
-
if (err)
- dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ dev_err(&idtcm->client->dev, "Failed at line %d in %s!",
+ __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1501,20 +1516,16 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_settime_deprecated(channel, ts);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1524,20 +1535,16 @@ static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
static int idtcm_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1546,20 +1553,16 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_adjtime_deprecated(channel, delta);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1568,8 +1571,7 @@ static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts;
enum scsr_tod_write_type_sel type;
@@ -1579,9 +1581,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
err = idtcm_do_phase_pull_in(channel, delta, 0);
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
}
@@ -1596,12 +1596,9 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
mutex_lock(&idtcm->reg_lock);
err = _idtcm_settime(channel, &ts, type);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1610,22 +1607,16 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
-
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
-
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_adjphase(channel, delta);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1634,22 +1625,16 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
-
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
-
int err;
mutex_lock(&idtcm->reg_lock);
err = _idtcm_adjfine(channel, scaled_ppm);
-
if (err)
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
mutex_unlock(&idtcm->reg_lock);
@@ -1660,9 +1645,7 @@ static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
int err;
-
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
@@ -1670,9 +1653,8 @@ static int idtcm_enable(struct ptp_clock_info *ptp,
err = idtcm_perout_enable(channel, false, &rq->perout);
if (err)
dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!",
+ __LINE__, __func__);
return err;
}
@@ -1684,9 +1666,7 @@ static int idtcm_enable(struct ptp_clock_info *ptp,
err = idtcm_perout_enable(channel, true, &rq->perout);
if (err)
dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
default:
break;
@@ -1706,7 +1686,7 @@ static int _enable_pll_tod_sync(struct idtcm *idtcm,
u16 dpll;
u16 out0 = 0, out1 = 0;
- if ((qn == 0) && (qn_plus_1 == 0))
+ if (qn == 0 && qn_plus_1 == 0)
return 0;
switch (pll) {
@@ -1771,28 +1751,24 @@ static int _enable_pll_tod_sync(struct idtcm *idtcm,
*/
if (out0) {
err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
-
if (err)
return err;
val &= ~OUT_SYNC_DISABLE;
err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
-
if (err)
return err;
}
if (out1) {
err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
-
if (err)
return err;
val &= ~OUT_SYNC_DISABLE;
err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
-
if (err)
return err;
}
@@ -1812,7 +1788,6 @@ static int _enable_pll_tod_sync(struct idtcm *idtcm,
static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
-
u8 pll;
u8 sync_src;
u8 qn;
@@ -1854,8 +1829,7 @@ static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
return -EINVAL;
}
- err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
- &temp, sizeof(temp));
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp));
if (err)
return err;
@@ -1863,8 +1837,7 @@ static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
out8_mux = 1;
- err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
- &temp, sizeof(temp));
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, &temp, sizeof(temp));
if (err)
return err;
@@ -1908,10 +1881,9 @@ static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
}
}
- if ((qn != 0) || (qn_plus_1 != 0))
+ if (qn != 0 || qn_plus_1 != 0)
err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
qn_plus_1);
-
if (err)
return err;
}
@@ -1954,7 +1926,6 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
u16 product_id;
u8 hw_rev_id;
u8 config_select;
- char *fmt = "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d\n";
idtcm_read_major_release(idtcm, &major);
idtcm_read_minor_release(idtcm, &minor);
@@ -1973,7 +1944,9 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
else
idtcm->deprecated = 1;
- dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
+ dev_info(&idtcm->client->dev,
+ "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d",
+ major, minor, hotfix,
product_id, hw_rev_id, config_select);
}
@@ -2132,9 +2105,7 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
err = idtcm_enable_tod_sync(channel);
if (err) {
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
}
}
@@ -2143,16 +2114,14 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
err = idtcm_get_pll_mode(channel, &channel->pll_mode);
if (err) {
dev_err(&idtcm->client->dev,
- "Error: %s - Unable to read pll mode\n", __func__);
+ "Error: %s - Unable to read pll mode", __func__);
return err;
}
err = idtcm_enable_tod(channel);
if (err) {
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
}
@@ -2167,7 +2136,7 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
- dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d",
index, channel->ptp_clock->index);
return 0;
@@ -2179,7 +2148,6 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
struct idtcm_channel *channel;
for (i = 0; i < MAX_TOD; i++) {
-
channel = &idtcm->channel[i];
if (channel->ptp_clock)
@@ -2208,7 +2176,6 @@ static int idtcm_probe(struct i2c_client *client,
struct idtcm *idtcm;
int err;
u8 i;
- char *fmt = "Failed at %d in line %s with channel output %d!\n";
/* Unused for now */
(void)id;
@@ -2230,13 +2197,10 @@ static int idtcm_probe(struct i2c_client *client,
idtcm_set_version_info(idtcm);
err = idtcm_load_firmware(idtcm, &client->dev);
-
if (err)
- dev_warn(&idtcm->client->dev,
- "loading firmware failed with %d\n", err);
+ dev_warn(&idtcm->client->dev, "loading firmware failed with %d", err);
- if (wait_for_boot_status_ready(idtcm))
- dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0\n");
+ wait_for_chip_ready(idtcm);
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
@@ -2244,17 +2208,14 @@ static int idtcm_probe(struct i2c_client *client,
err = idtcm_enable_channel(idtcm, i);
if (err) {
dev_err(&idtcm->client->dev,
- fmt,
- __LINE__,
- __func__,
- i);
+ "idtcm_enable_channel %d failed!", i);
break;
}
}
}
} else {
dev_err(&idtcm->client->dev,
- "no PLLs flagged as PHCs, nothing to do\n");
+ "no PLLs flagged as PHCs, nothing to do");
err = -ENODEV;
}
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 645de2c66b64..fb323271063e 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -15,7 +15,6 @@
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
#define MAX_PLL (8)
-#define MAX_OUTPUT (12)
#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
@@ -51,6 +50,9 @@
#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
#define TOD_BYTE_COUNT (11)
+#define LOCK_TIMEOUT_MS (2000)
+#define LOCK_POLL_INTERVAL_MS (10)
+
#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
#define IDTCM_MAX_WRITE_COUNT (512)
@@ -105,6 +107,18 @@ enum scsr_tod_write_type_sel {
SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
};
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
struct idtcm;
struct idtcm_channel {
@@ -123,7 +137,6 @@ struct idtcm_channel {
enum pll_mode pll_mode;
u8 pll;
u16 output_mask;
- u8 output_phase_adj[MAX_OUTPUT][4];
};
struct idtcm {
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 0937e1c047ac..9a4f66ae8070 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -611,14 +611,4 @@ config PWM_VT8500
To compile this driver as a module, choose M here: the module
will be called pwm-vt8500.
-config PWM_ZX
- tristate "ZTE ZX PWM support"
- depends on ARCH_ZX || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Generic PWM framework driver for ZTE ZX family SoCs.
-
- To compile this driver as a module, choose M here: the module
- will be called pwm-zx.
-
endif
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 18b89d7fd092..6374d3b1d6f3 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -57,4 +57,3 @@ obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
-obj-$(CONFIG_PWM_ZX) += pwm-zx.o
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 5ede8255926e..957b972c458b 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -37,16 +37,34 @@ struct iqs620_pwm_private {
struct pwm_chip chip;
struct notifier_block notifier;
struct mutex lock;
- bool out_en;
- u8 duty_val;
+ unsigned int duty_scale;
};
+static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm,
+ unsigned int duty_scale)
+{
+ struct iqs62x_core *iqs62x = iqs620_pwm->iqs62x;
+ int ret;
+
+ if (!duty_scale)
+ return regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
+ IQS620_PWR_SETTINGS_PWM_OUT, 0);
+
+ ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
+ duty_scale - 1);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
+ IQS620_PWR_SETTINGS_PWM_OUT, 0xff);
+}
+
static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct iqs620_pwm_private *iqs620_pwm;
- struct iqs62x_core *iqs62x;
- u64 duty_scale;
+ unsigned int duty_cycle;
+ unsigned int duty_scale;
int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
@@ -56,7 +74,6 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
- iqs62x = iqs620_pwm->iqs62x;
/*
* The duty cycle generated by the device is calculated as follows:
@@ -70,38 +87,18 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* For lower duty cycles (e.g. 0), the PWM output is simply disabled to
* allow an external pull-down resistor to hold the GPIO3/LTX pin low.
*/
- duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS);
-
- mutex_lock(&iqs620_pwm->lock);
-
- if (!state->enabled || !duty_scale) {
- ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
- IQS620_PWR_SETTINGS_PWM_OUT, 0);
- if (ret)
- goto err_mutex;
- }
+ duty_cycle = min_t(u64, state->duty_cycle, IQS620_PWM_PERIOD_NS);
+ duty_scale = duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
- if (duty_scale) {
- u8 duty_val = min_t(u64, duty_scale - 1, 0xff);
+ if (!state->enabled)
+ duty_scale = 0;
- ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
- duty_val);
- if (ret)
- goto err_mutex;
-
- iqs620_pwm->duty_val = duty_val;
- }
-
- if (state->enabled && duty_scale) {
- ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
- IQS620_PWR_SETTINGS_PWM_OUT, 0xff);
- if (ret)
- goto err_mutex;
- }
+ mutex_lock(&iqs620_pwm->lock);
- iqs620_pwm->out_en = state->enabled;
+ ret = iqs620_pwm_init(iqs620_pwm, duty_scale);
+ if (!ret)
+ iqs620_pwm->duty_scale = duty_scale;
-err_mutex:
mutex_unlock(&iqs620_pwm->lock);
return ret;
@@ -119,12 +116,11 @@ static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* Since the device cannot generate a 0% duty cycle, requests to do so
* cause subsequent calls to iqs620_pwm_get_state to report the output
- * as disabled with duty cycle equal to that which was in use prior to
- * the request. This is not ideal, but is the best compromise based on
+ * as disabled. This is not ideal, but is the best compromise based on
* the capabilities of the device.
*/
- state->enabled = iqs620_pwm->out_en;
- state->duty_cycle = DIV_ROUND_UP((iqs620_pwm->duty_val + 1) *
+ state->enabled = iqs620_pwm->duty_scale > 0;
+ state->duty_cycle = DIV_ROUND_UP(iqs620_pwm->duty_scale *
IQS620_PWM_PERIOD_NS, 256);
mutex_unlock(&iqs620_pwm->lock);
@@ -136,7 +132,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
unsigned long event_flags, void *context)
{
struct iqs620_pwm_private *iqs620_pwm;
- struct iqs62x_core *iqs62x;
int ret;
if (!(event_flags & BIT(IQS62X_EVENT_SYS_RESET)))
@@ -144,7 +139,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
iqs620_pwm = container_of(notifier, struct iqs620_pwm_private,
notifier);
- iqs62x = iqs620_pwm->iqs62x;
mutex_lock(&iqs620_pwm->lock);
@@ -153,16 +147,8 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
* of a device reset, so nothing else is printed here unless there is
* an additional failure.
*/
- ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
- iqs620_pwm->duty_val);
- if (ret)
- goto err_mutex;
+ ret = iqs620_pwm_init(iqs620_pwm, iqs620_pwm->duty_scale);
- ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
- IQS620_PWR_SETTINGS_PWM_OUT,
- iqs620_pwm->out_en ? 0xff : 0);
-
-err_mutex:
mutex_unlock(&iqs620_pwm->lock);
if (ret) {
@@ -209,12 +195,14 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
if (ret)
return ret;
- iqs620_pwm->out_en = val & IQS620_PWR_SETTINGS_PWM_OUT;
- ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val);
- if (ret)
- return ret;
- iqs620_pwm->duty_val = val;
+ if (val & IQS620_PWR_SETTINGS_PWM_OUT) {
+ ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val);
+ if (ret)
+ return ret;
+
+ iqs620_pwm->duty_scale = val + 1;
+ }
iqs620_pwm->chip.dev = &pdev->dev;
iqs620_pwm->chip.ops = &iqs620_pwm_ops;
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index dc5133bec3e7..7ef40243eb6c 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -289,7 +289,7 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
dev_err(lpc18xx_pwm->dev,
"maximum number of simultaneous channels reached\n");
return -EBUSY;
- };
+ }
set_bit(event, &lpc18xx_pwm->event_map);
lpc18xx_data->duty_event = event;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 389a5e140412..6ad7d0a50aed 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -72,6 +72,10 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
if (ret)
return;
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return;
+
clk_rate = clk_get_rate(pc->clk);
tmp = readl_relaxed(pc->base + pc->data->regs.period);
@@ -90,6 +94,7 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
else
state->polarity = PWM_POLARITY_NORMAL;
+ clk_disable(pc->clk);
clk_disable(pc->pclk);
}
@@ -189,6 +194,10 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
return ret;
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
pwm_get_state(pwm, &curstate);
enabled = curstate.enabled;
@@ -208,6 +217,7 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
out:
+ clk_disable(pc->clk);
clk_disable(pc->pclk);
return ret;
@@ -288,6 +298,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
u32 enable_conf, ctrl;
+ bool enabled;
int ret, count;
id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
@@ -307,7 +318,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
- "Can't get bus clk\n");
+ "Can't get PWM clk\n");
}
count = of_count_phandle_with_args(pdev->dev.of_node,
@@ -326,13 +337,13 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
ret = clk_prepare_enable(pc->clk);
if (ret) {
- dev_err(&pdev->dev, "Can't prepare enable bus clk: %d\n", ret);
+ dev_err(&pdev->dev, "Can't prepare enable PWM clk: %d\n", ret);
return ret;
}
- ret = clk_prepare(pc->pclk);
+ ret = clk_prepare_enable(pc->pclk);
if (ret) {
- dev_err(&pdev->dev, "Can't prepare APB clk: %d\n", ret);
+ dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret);
goto err_clk;
}
@@ -349,23 +360,26 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->chip.of_pwm_n_cells = 3;
}
+ enable_conf = pc->data->enable_conf;
+ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ enabled = (ctrl & enable_conf) == enable_conf;
+
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
- clk_unprepare(pc->clk);
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
goto err_pclk;
}
/* Keep the PWM clk enabled if the PWM appears to be up and running. */
- enable_conf = pc->data->enable_conf;
- ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
- if ((ctrl & enable_conf) != enable_conf)
+ if (!enabled)
clk_disable(pc->clk);
+ clk_disable(pc->pclk);
+
return 0;
err_pclk:
- clk_unprepare(pc->pclk);
+ clk_disable_unprepare(pc->pclk);
err_clk:
clk_disable_unprepare(pc->clk);
diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
deleted file mode 100644
index 34e91195ce98..000000000000
--- a/drivers/pwm/pwm-zx.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/slab.h>
-
-#define ZX_PWM_MODE 0x0
-#define ZX_PWM_CLKDIV_SHIFT 2
-#define ZX_PWM_CLKDIV_MASK GENMASK(11, 2)
-#define ZX_PWM_CLKDIV(x) (((x) << ZX_PWM_CLKDIV_SHIFT) & \
- ZX_PWM_CLKDIV_MASK)
-#define ZX_PWM_POLAR BIT(1)
-#define ZX_PWM_EN BIT(0)
-#define ZX_PWM_PERIOD 0x4
-#define ZX_PWM_DUTY 0x8
-
-#define ZX_PWM_CLKDIV_MAX 1023
-#define ZX_PWM_PERIOD_MAX 65535
-
-struct zx_pwm_chip {
- struct pwm_chip chip;
- struct clk *pclk;
- struct clk *wclk;
- void __iomem *base;
-};
-
-static inline struct zx_pwm_chip *to_zx_pwm_chip(struct pwm_chip *chip)
-{
- return container_of(chip, struct zx_pwm_chip, chip);
-}
-
-static inline u32 zx_pwm_readl(struct zx_pwm_chip *zpc, unsigned int hwpwm,
- unsigned int offset)
-{
- return readl(zpc->base + (hwpwm + 1) * 0x10 + offset);
-}
-
-static inline void zx_pwm_writel(struct zx_pwm_chip *zpc, unsigned int hwpwm,
- unsigned int offset, u32 value)
-{
- writel(value, zpc->base + (hwpwm + 1) * 0x10 + offset);
-}
-
-static void zx_pwm_set_mask(struct zx_pwm_chip *zpc, unsigned int hwpwm,
- unsigned int offset, u32 mask, u32 value)
-{
- u32 data;
-
- data = zx_pwm_readl(zpc, hwpwm, offset);
- data &= ~mask;
- data |= value & mask;
- zx_pwm_writel(zpc, hwpwm, offset, data);
-}
-
-static void zx_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
-{
- struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip);
- unsigned long rate;
- unsigned int div;
- u32 value;
- u64 tmp;
-
- value = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_MODE);
-
- if (value & ZX_PWM_POLAR)
- state->polarity = PWM_POLARITY_NORMAL;
- else
- state->polarity = PWM_POLARITY_INVERSED;
-
- if (value & ZX_PWM_EN)
- state->enabled = true;
- else
- state->enabled = false;
-
- div = (value & ZX_PWM_CLKDIV_MASK) >> ZX_PWM_CLKDIV_SHIFT;
- rate = clk_get_rate(zpc->wclk);
-
- tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_PERIOD);
- tmp *= div * NSEC_PER_SEC;
- state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
-
- tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_DUTY);
- tmp *= div * NSEC_PER_SEC;
- state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
-}
-
-static int zx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- unsigned int duty_ns, unsigned int period_ns)
-{
- struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip);
- unsigned int period_cycles, duty_cycles;
- unsigned long long c;
- unsigned int div = 1;
- unsigned long rate;
-
- /* Find out the best divider */
- rate = clk_get_rate(zpc->wclk);
-
- while (1) {
- c = rate / div;
- c = c * period_ns;
- do_div(c, NSEC_PER_SEC);
-
- if (c < ZX_PWM_PERIOD_MAX)
- break;
-
- div++;
-
- if (div > ZX_PWM_CLKDIV_MAX)
- return -ERANGE;
- }
-
- /* Calculate duty cycles */
- period_cycles = c;
- c *= duty_ns;
- do_div(c, period_ns);
- duty_cycles = c;
-
- /*
- * If the PWM is being enabled, we have to temporarily disable it
- * before configuring the registers.
- */
- if (pwm_is_enabled(pwm))
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_EN, 0);
-
- /* Set up registers */
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_CLKDIV_MASK,
- ZX_PWM_CLKDIV(div));
- zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_PERIOD, period_cycles);
- zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_DUTY, duty_cycles);
-
- /* Re-enable the PWM if needed */
- if (pwm_is_enabled(pwm))
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE,
- ZX_PWM_EN, ZX_PWM_EN);
-
- return 0;
-}
-
-static int zx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip);
- struct pwm_state cstate;
- int ret;
-
- pwm_get_state(pwm, &cstate);
-
- if (state->polarity != cstate.polarity)
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_POLAR,
- (state->polarity == PWM_POLARITY_INVERSED) ?
- 0 : ZX_PWM_POLAR);
-
- if (state->period != cstate.period ||
- state->duty_cycle != cstate.duty_cycle) {
- ret = zx_pwm_config(chip, pwm, state->duty_cycle,
- state->period);
- if (ret)
- return ret;
- }
-
- if (state->enabled != cstate.enabled) {
- if (state->enabled) {
- ret = clk_prepare_enable(zpc->wclk);
- if (ret)
- return ret;
-
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE,
- ZX_PWM_EN, ZX_PWM_EN);
- } else {
- zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE,
- ZX_PWM_EN, 0);
- clk_disable_unprepare(zpc->wclk);
- }
- }
-
- return 0;
-}
-
-static const struct pwm_ops zx_pwm_ops = {
- .apply = zx_pwm_apply,
- .get_state = zx_pwm_get_state,
- .owner = THIS_MODULE,
-};
-
-static int zx_pwm_probe(struct platform_device *pdev)
-{
- struct zx_pwm_chip *zpc;
- unsigned int i;
- int ret;
-
- zpc = devm_kzalloc(&pdev->dev, sizeof(*zpc), GFP_KERNEL);
- if (!zpc)
- return -ENOMEM;
-
- zpc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(zpc->base))
- return PTR_ERR(zpc->base);
-
- zpc->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(zpc->pclk))
- return PTR_ERR(zpc->pclk);
-
- zpc->wclk = devm_clk_get(&pdev->dev, "wclk");
- if (IS_ERR(zpc->wclk))
- return PTR_ERR(zpc->wclk);
-
- ret = clk_prepare_enable(zpc->pclk);
- if (ret)
- return ret;
-
- zpc->chip.dev = &pdev->dev;
- zpc->chip.ops = &zx_pwm_ops;
- zpc->chip.base = -1;
- zpc->chip.npwm = 4;
- zpc->chip.of_xlate = of_pwm_xlate_with_flags;
- zpc->chip.of_pwm_n_cells = 3;
-
- /*
- * PWM devices may be enabled by firmware, and let's disable all of
- * them initially to save power.
- */
- for (i = 0; i < zpc->chip.npwm; i++)
- zx_pwm_set_mask(zpc, i, ZX_PWM_MODE, ZX_PWM_EN, 0);
-
- ret = pwmchip_add(&zpc->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- clk_disable_unprepare(zpc->pclk);
- return ret;
- }
-
- platform_set_drvdata(pdev, zpc);
-
- return 0;
-}
-
-static int zx_pwm_remove(struct platform_device *pdev)
-{
- struct zx_pwm_chip *zpc = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&zpc->chip);
- clk_disable_unprepare(zpc->pclk);
-
- return ret;
-}
-
-static const struct of_device_id zx_pwm_dt_ids[] = {
- { .compatible = "zte,zx296718-pwm", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, zx_pwm_dt_ids);
-
-static struct platform_driver zx_pwm_driver = {
- .driver = {
- .name = "zx-pwm",
- .of_match_table = zx_pwm_dt_ids,
- },
- .probe = zx_pwm_probe,
- .remove = zx_pwm_remove,
-};
-module_platform_driver(zx_pwm_driver);
-
-MODULE_ALIAS("platform:zx-pwm");
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX PWM Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index c2b79736a92b..e74cf09eeff0 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -749,7 +749,7 @@ int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
EXPORT_SYMBOL_GPL(rio_map_outb_region);
/**
- * rio_unmap_inb_region -- Unmap the inbound memory region
+ * rio_unmap_outb_region -- Unmap the inbound memory region
* @mport: Master port
* @destid: destination id mapping points to
* @rstart: RIO base address window translates to
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..77c43134bc9e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -122,15 +122,6 @@ config REGULATOR_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
regulator driver.
-config REGULATOR_AB3100
- tristate "ST-Ericsson AB3100 Regulator functions"
- depends on AB3100_CORE
- default y if AB3100_CORE
- help
- These regulators correspond to functionality in the
- AB3100 analog baseband dealing with power regulators
- for the system.
-
config REGULATOR_AB8500
bool "ST-Ericsson AB8500 Power Regulators"
depends on AB8500_CORE
@@ -179,6 +170,14 @@ config REGULATOR_AS3722
AS3722 PMIC. This will enable support for all the software
controllable DCDC/LDO regulators.
+config REGULATOR_ATC260X
+ tristate "Actions Semi ATC260x PMIC Regulators"
+ depends on MFD_ATC260X
+ help
+ This driver provides support for the voltage regulators on the
+ ATC260x PMICs. This will enable support for all the software
+ controllable DCDC/LDO regulators.
+
config REGULATOR_AXP20X
tristate "X-POWERS AXP20X PMIC Regulators"
depends on MFD_AXP20X
@@ -732,6 +731,16 @@ config REGULATOR_MT6311
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6315
+ tristate "MediaTek MT6315 PMIC"
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6315 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6323
tristate "MediaTek MT6323 PMIC"
depends on MFD_MT6397
@@ -777,6 +786,16 @@ config REGULATOR_MT6397
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MTK_DVFSRC
+ tristate "MediaTek DVFSRC regulator driver"
+ depends on MTK_DVFSRC
+ help
+ Say y here to control regulator by DVFSRC (dynamic voltage
+ and frequency scaling resource collector).
+ This driver supports to control regulators via the DVFSRC
+ of Mediatek. It allows for voting on regulator state
+ between multiple users.
+
config REGULATOR_PALMAS
tristate "TI Palmas PMIC Regulators"
depends on MFD_PALMAS
@@ -828,6 +847,10 @@ config REGULATOR_PF8X00
Say y here to support the regulators found on the NXP
PF8100/PF8121A/PF8200 PMIC.
+ Say M here if you want to support for the regulators found
+ on the NXP PF8100/PF8121A/PF8200 PMIC. The module will be named
+ "pf8x00-regulator".
+
config REGULATOR_PFUZE100
tristate "Freescale PFUZE100/200/3000/3001 regulator driver"
depends on I2C && OF
@@ -881,6 +904,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
@@ -968,6 +992,16 @@ config REGULATOR_RT4801
This adds support for voltage regulators in Richtek RT4801 Display Bias IC.
The device supports two regulators (DSVP/DSVN).
+config REGULATOR_RT4831
+ tristate "Richtek RT4831 DSV Regulators"
+ depends on MFD_RT4831
+ help
+ This adds support for voltage regulators in Richtek RT4831.
+ There are three regulators (VLCM/DSVP/DSVN).
+ VLCM is a virtual voltage input for DSVP/DSVN inside IC.
+ And DSVP/DSVN is the real Vout range from 4V to 6.5V.
+ It's common used to provide the power for the display panel.
+
config REGULATOR_RT5033
tristate "Richtek RT5033 Regulators"
depends on MFD_RT5033
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 680e539f6579..44d2f8bf4b74 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_CROS_EC) += cros-ec-regulator.o
obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
-obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
obj-$(CONFIG_REGULATOR_ACT8945A) += act8945a-regulator.o
@@ -27,6 +26,7 @@ obj-$(CONFIG_REGULATOR_ARIZONA_MICSUPP) += arizona-micsupp.o
obj-$(CONFIG_REGULATOR_ARM_SCMI) += scmi-regulator.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
+obj-$(CONFIG_REGULATOR_ATC260X) += atc260x-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
@@ -89,11 +89,13 @@ obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
obj-$(CONFIG_REGULATOR_MP886X) += mp886x.o
obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
+obj-$(CONFIG_REGULATOR_MT6315) += mt6315-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
+obj-$(CONFIG_REGULATOR_MTK_DVFSRC) += mtk-dvfsrc-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_LABIBB) += qcom-labibb-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
@@ -118,6 +120,7 @@ obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o
obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
+obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
deleted file mode 100644
index a544f45efe53..000000000000
--- a/drivers/regulator/ab3100.c
+++ /dev/null
@@ -1,724 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/regulator/ab3100.c
- *
- * Copyright (C) 2008-2009 ST-Ericsson AB
- * Low-level control of the AB3100 IC Low Dropout (LDO)
- * regulators, external regulator and buck converter
- * Author: Mattias Wallin <mattias.wallin@stericsson.com>
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/mfd/ab3100.h>
-#include <linux/mfd/abx500.h>
-#include <linux/of.h>
-#include <linux/regulator/of_regulator.h>
-
-/* LDO registers and some handy masking definitions for AB3100 */
-#define AB3100_LDO_A 0x40
-#define AB3100_LDO_C 0x41
-#define AB3100_LDO_D 0x42
-#define AB3100_LDO_E 0x43
-#define AB3100_LDO_E_SLEEP 0x44
-#define AB3100_LDO_F 0x45
-#define AB3100_LDO_G 0x46
-#define AB3100_LDO_H 0x47
-#define AB3100_LDO_H_SLEEP_MODE 0
-#define AB3100_LDO_H_SLEEP_EN 2
-#define AB3100_LDO_ON 4
-#define AB3100_LDO_H_VSEL_AC 5
-#define AB3100_LDO_K 0x48
-#define AB3100_LDO_EXT 0x49
-#define AB3100_BUCK 0x4A
-#define AB3100_BUCK_SLEEP 0x4B
-#define AB3100_REG_ON_MASK 0x10
-
-/**
- * struct ab3100_regulator
- * A struct passed around the individual regulator functions
- * @platform_device: platform device holding this regulator
- * @dev: handle to the device
- * @plfdata: AB3100 platform data passed in at probe time
- * @regreg: regulator register number in the AB3100
- */
-struct ab3100_regulator {
- struct device *dev;
- struct ab3100_platform_data *plfdata;
- u8 regreg;
-};
-
-/* The order in which registers are initialized */
-static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
- AB3100_LDO_A,
- AB3100_LDO_C,
- AB3100_LDO_E,
- AB3100_LDO_E_SLEEP,
- AB3100_LDO_F,
- AB3100_LDO_G,
- AB3100_LDO_H,
- AB3100_LDO_K,
- AB3100_LDO_EXT,
- AB3100_BUCK,
- AB3100_BUCK_SLEEP,
- AB3100_LDO_D,
-};
-
-/* Preset (hardware defined) voltages for these regulators */
-#define LDO_A_VOLTAGE 2750000
-#define LDO_C_VOLTAGE 2650000
-#define LDO_D_VOLTAGE 2650000
-
-static const unsigned int ldo_e_buck_typ_voltages[] = {
- 1800000,
- 1400000,
- 1300000,
- 1200000,
- 1100000,
- 1050000,
- 900000,
-};
-
-static const unsigned int ldo_f_typ_voltages[] = {
- 1800000,
- 1400000,
- 1300000,
- 1200000,
- 1100000,
- 1050000,
- 2500000,
- 2650000,
-};
-
-static const unsigned int ldo_g_typ_voltages[] = {
- 2850000,
- 2750000,
- 1800000,
- 1500000,
-};
-
-static const unsigned int ldo_h_typ_voltages[] = {
- 2750000,
- 1800000,
- 1500000,
- 1200000,
-};
-
-static const unsigned int ldo_k_typ_voltages[] = {
- 2750000,
- 1800000,
-};
-
-
-/* The regulator devices */
-static struct ab3100_regulator
-ab3100_regulators[AB3100_NUM_REGULATORS] = {
- {
- .regreg = AB3100_LDO_A,
- },
- {
- .regreg = AB3100_LDO_C,
- },
- {
- .regreg = AB3100_LDO_D,
- },
- {
- .regreg = AB3100_LDO_E,
- },
- {
- .regreg = AB3100_LDO_F,
- },
- {
- .regreg = AB3100_LDO_G,
- },
- {
- .regreg = AB3100_LDO_H,
- },
- {
- .regreg = AB3100_LDO_K,
- },
- {
- .regreg = AB3100_LDO_EXT,
- /* No voltages for the external regulator */
- },
- {
- .regreg = AB3100_BUCK,
- },
-};
-
-/*
- * General functions for enable, disable and is_enabled used for
- * LDO: A,C,E,F,G,H,K,EXT and BUCK
- */
-static int ab3100_enable_regulator(struct regulator_dev *reg)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- int err;
- u8 regval;
-
- err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
- &regval);
- if (err) {
- dev_warn(&reg->dev, "failed to get regid %d value\n",
- abreg->regreg);
- return err;
- }
-
- /* The regulator is already on, no reason to go further */
- if (regval & AB3100_REG_ON_MASK)
- return 0;
-
- regval |= AB3100_REG_ON_MASK;
-
- err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
- regval);
- if (err) {
- dev_warn(&reg->dev, "failed to set regid %d value\n",
- abreg->regreg);
- return err;
- }
-
- return 0;
-}
-
-static int ab3100_disable_regulator(struct regulator_dev *reg)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- int err;
- u8 regval;
-
- /*
- * LDO D is a special regulator. When it is disabled, the entire
- * system is shut down. So this is handled specially.
- */
- pr_info("Called ab3100_disable_regulator\n");
- if (abreg->regreg == AB3100_LDO_D) {
- dev_info(&reg->dev, "disabling LDO D - shut down system\n");
- /* Setting LDO D to 0x00 cuts the power to the SoC */
- return abx500_set_register_interruptible(abreg->dev, 0,
- AB3100_LDO_D, 0x00U);
- }
-
- /*
- * All other regulators are handled here
- */
- err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
- &regval);
- if (err) {
- dev_err(&reg->dev, "unable to get register 0x%x\n",
- abreg->regreg);
- return err;
- }
- regval &= ~AB3100_REG_ON_MASK;
- return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
- regval);
-}
-
-static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- u8 regval;
- int err;
-
- err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
- &regval);
- if (err) {
- dev_err(&reg->dev, "unable to get register 0x%x\n",
- abreg->regreg);
- return err;
- }
-
- return regval & AB3100_REG_ON_MASK;
-}
-
-static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- u8 regval;
- int err;
-
- /*
- * For variable types, read out setting and index into
- * supplied voltage list.
- */
- err = abx500_get_register_interruptible(abreg->dev, 0,
- abreg->regreg, &regval);
- if (err) {
- dev_warn(&reg->dev,
- "failed to get regulator value in register %02x\n",
- abreg->regreg);
- return err;
- }
-
- /* The 3 highest bits index voltages */
- regval &= 0xE0;
- regval >>= 5;
-
- if (regval >= reg->desc->n_voltages) {
- dev_err(&reg->dev,
- "regulator register %02x contains an illegal voltage setting\n",
- abreg->regreg);
- return -EINVAL;
- }
-
- return reg->desc->volt_table[regval];
-}
-
-static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
- unsigned selector)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- u8 regval;
- int err;
-
- err = abx500_get_register_interruptible(abreg->dev, 0,
- abreg->regreg, &regval);
- if (err) {
- dev_warn(&reg->dev,
- "failed to get regulator register %02x\n",
- abreg->regreg);
- return err;
- }
-
- /* The highest three bits control the variable regulators */
- regval &= ~0xE0;
- regval |= (selector << 5);
-
- err = abx500_set_register_interruptible(abreg->dev, 0,
- abreg->regreg, regval);
- if (err)
- dev_warn(&reg->dev, "failed to set regulator register %02x\n",
- abreg->regreg);
-
- return err;
-}
-
-static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
- int uV)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- u8 regval;
- int err;
- int bestindex;
- u8 targetreg;
-
- if (abreg->regreg == AB3100_LDO_E)
- targetreg = AB3100_LDO_E_SLEEP;
- else if (abreg->regreg == AB3100_BUCK)
- targetreg = AB3100_BUCK_SLEEP;
- else
- return -EINVAL;
-
- /* LDO E and BUCK have special suspend voltages you can set */
- bestindex = regulator_map_voltage_iterate(reg, uV, uV);
-
- err = abx500_get_register_interruptible(abreg->dev, 0,
- targetreg, &regval);
- if (err) {
- dev_warn(&reg->dev,
- "failed to get regulator register %02x\n",
- targetreg);
- return err;
- }
-
- /* The highest three bits control the variable regulators */
- regval &= ~0xE0;
- regval |= (bestindex << 5);
-
- err = abx500_set_register_interruptible(abreg->dev, 0,
- targetreg, regval);
- if (err)
- dev_warn(&reg->dev, "failed to set regulator register %02x\n",
- abreg->regreg);
-
- return err;
-}
-
-/*
- * The external regulator can just define a fixed voltage.
- */
-static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
-{
- struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
-
- if (abreg->plfdata)
- return abreg->plfdata->external_voltage;
- else
- /* TODO: encode external voltage into device tree */
- return 0;
-}
-
-static const struct regulator_ops regulator_ops_fixed = {
- .enable = ab3100_enable_regulator,
- .disable = ab3100_disable_regulator,
- .is_enabled = ab3100_is_enabled_regulator,
-};
-
-static const struct regulator_ops regulator_ops_variable = {
- .enable = ab3100_enable_regulator,
- .disable = ab3100_disable_regulator,
- .is_enabled = ab3100_is_enabled_regulator,
- .get_voltage = ab3100_get_voltage_regulator,
- .set_voltage_sel = ab3100_set_voltage_regulator_sel,
- .list_voltage = regulator_list_voltage_table,
-};
-
-static const struct regulator_ops regulator_ops_variable_sleepable = {
- .enable = ab3100_enable_regulator,
- .disable = ab3100_disable_regulator,
- .is_enabled = ab3100_is_enabled_regulator,
- .get_voltage = ab3100_get_voltage_regulator,
- .set_voltage_sel = ab3100_set_voltage_regulator_sel,
- .set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
- .list_voltage = regulator_list_voltage_table,
-};
-
-/*
- * LDO EXT is an external regulator so it is really
- * not possible to set any voltage locally here, AB3100
- * is an on/off switch plain an simple. The external
- * voltage is defined in the board set-up if any.
- */
-static const struct regulator_ops regulator_ops_external = {
- .enable = ab3100_enable_regulator,
- .disable = ab3100_disable_regulator,
- .is_enabled = ab3100_is_enabled_regulator,
- .get_voltage = ab3100_get_voltage_regulator_external,
-};
-
-static const struct regulator_desc
-ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
- {
- .name = "LDO_A",
- .id = AB3100_LDO_A,
- .ops = &regulator_ops_fixed,
- .n_voltages = 1,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .fixed_uV = LDO_A_VOLTAGE,
- .enable_time = 200,
- },
- {
- .name = "LDO_C",
- .id = AB3100_LDO_C,
- .ops = &regulator_ops_fixed,
- .n_voltages = 1,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .fixed_uV = LDO_C_VOLTAGE,
- .enable_time = 200,
- },
- {
- .name = "LDO_D",
- .id = AB3100_LDO_D,
- .ops = &regulator_ops_fixed,
- .n_voltages = 1,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .fixed_uV = LDO_D_VOLTAGE,
- .enable_time = 200,
- },
- {
- .name = "LDO_E",
- .id = AB3100_LDO_E,
- .ops = &regulator_ops_variable_sleepable,
- .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
- .volt_table = ldo_e_buck_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 200,
- },
- {
- .name = "LDO_F",
- .id = AB3100_LDO_F,
- .ops = &regulator_ops_variable,
- .n_voltages = ARRAY_SIZE(ldo_f_typ_voltages),
- .volt_table = ldo_f_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 600,
- },
- {
- .name = "LDO_G",
- .id = AB3100_LDO_G,
- .ops = &regulator_ops_variable,
- .n_voltages = ARRAY_SIZE(ldo_g_typ_voltages),
- .volt_table = ldo_g_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 400,
- },
- {
- .name = "LDO_H",
- .id = AB3100_LDO_H,
- .ops = &regulator_ops_variable,
- .n_voltages = ARRAY_SIZE(ldo_h_typ_voltages),
- .volt_table = ldo_h_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 200,
- },
- {
- .name = "LDO_K",
- .id = AB3100_LDO_K,
- .ops = &regulator_ops_variable,
- .n_voltages = ARRAY_SIZE(ldo_k_typ_voltages),
- .volt_table = ldo_k_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 200,
- },
- {
- .name = "LDO_EXT",
- .id = AB3100_LDO_EXT,
- .ops = &regulator_ops_external,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- },
- {
- .name = "BUCK",
- .id = AB3100_BUCK,
- .ops = &regulator_ops_variable_sleepable,
- .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
- .volt_table = ldo_e_buck_typ_voltages,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .enable_time = 1000,
- },
-};
-
-static int ab3100_regulator_register(struct platform_device *pdev,
- struct ab3100_platform_data *plfdata,
- struct regulator_init_data *init_data,
- struct device_node *np,
- unsigned long id)
-{
- const struct regulator_desc *desc;
- struct ab3100_regulator *reg;
- struct regulator_dev *rdev;
- struct regulator_config config = { };
- int err, i;
-
- for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- desc = &ab3100_regulator_desc[i];
- if (desc->id == id)
- break;
- }
- if (desc->id != id)
- return -ENODEV;
-
- /* Same index used for this array */
- reg = &ab3100_regulators[i];
-
- /*
- * Initialize per-regulator struct.
- * Inherit platform data, this comes down from the
- * i2c boarddata, from the machine. So if you want to
- * see what it looks like for a certain machine, go
- * into the machine I2C setup.
- */
- reg->dev = &pdev->dev;
- if (plfdata) {
- reg->plfdata = plfdata;
- config.init_data = &plfdata->reg_constraints[i];
- } else if (np) {
- config.of_node = np;
- config.init_data = init_data;
- }
- config.dev = &pdev->dev;
- config.driver_data = reg;
-
- rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev)) {
- err = PTR_ERR(rdev);
- dev_err(&pdev->dev,
- "%s: failed to register regulator %s err %d\n",
- __func__, desc->name,
- err);
- return err;
- }
-
- return 0;
-}
-
-static struct of_regulator_match ab3100_regulator_matches[] = {
- { .name = "ab3100_ldo_a", .driver_data = (void *) AB3100_LDO_A, },
- { .name = "ab3100_ldo_c", .driver_data = (void *) AB3100_LDO_C, },
- { .name = "ab3100_ldo_d", .driver_data = (void *) AB3100_LDO_D, },
- { .name = "ab3100_ldo_e", .driver_data = (void *) AB3100_LDO_E, },
- { .name = "ab3100_ldo_f", .driver_data = (void *) AB3100_LDO_F },
- { .name = "ab3100_ldo_g", .driver_data = (void *) AB3100_LDO_G },
- { .name = "ab3100_ldo_h", .driver_data = (void *) AB3100_LDO_H },
- { .name = "ab3100_ldo_k", .driver_data = (void *) AB3100_LDO_K },
- { .name = "ab3100_ext", .driver_data = (void *) AB3100_LDO_EXT },
- { .name = "ab3100_buck", .driver_data = (void *) AB3100_BUCK },
-};
-
-/*
- * Initial settings of ab3100 registers.
- * Common for below LDO regulator settings are that
- * bit 7-5 controls voltage. Bit 4 turns regulator ON(1) or OFF(0).
- * Bit 3-2 controls sleep enable and bit 1-0 controls sleep mode.
- */
-/* LDO_A 0x16: 2.75V, ON, SLEEP_A, SLEEP OFF GND */
-#define LDO_A_SETTING 0x16
-/* LDO_C 0x10: 2.65V, ON, SLEEP_A or B, SLEEP full power */
-#define LDO_C_SETTING 0x10
-/* LDO_D 0x10: 2.65V, ON, sleep mode not used */
-#define LDO_D_SETTING 0x10
-/* LDO_E 0x10: 1.8V, ON, SLEEP_A or B, SLEEP full power */
-#define LDO_E_SETTING 0x10
-/* LDO_E SLEEP 0x00: 1.8V, not used, SLEEP_A or B, not used */
-#define LDO_E_SLEEP_SETTING 0x00
-/* LDO_F 0xD0: 2.5V, ON, SLEEP_A or B, SLEEP full power */
-#define LDO_F_SETTING 0xD0
-/* LDO_G 0x00: 2.85V, OFF, SLEEP_A or B, SLEEP full power */
-#define LDO_G_SETTING 0x00
-/* LDO_H 0x18: 2.75V, ON, SLEEP_B, SLEEP full power */
-#define LDO_H_SETTING 0x18
-/* LDO_K 0x00: 2.75V, OFF, SLEEP_A or B, SLEEP full power */
-#define LDO_K_SETTING 0x00
-/* LDO_EXT 0x00: Voltage not set, OFF, not used, not used */
-#define LDO_EXT_SETTING 0x00
-/* BUCK 0x7D: 1.2V, ON, SLEEP_A and B, SLEEP low power */
-#define BUCK_SETTING 0x7D
-/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */
-#define BUCK_SLEEP_SETTING 0xAC
-
-static const u8 ab3100_reg_initvals[] = {
- LDO_A_SETTING,
- LDO_C_SETTING,
- LDO_E_SETTING,
- LDO_E_SLEEP_SETTING,
- LDO_F_SETTING,
- LDO_G_SETTING,
- LDO_H_SETTING,
- LDO_K_SETTING,
- LDO_EXT_SETTING,
- BUCK_SETTING,
- BUCK_SLEEP_SETTING,
- LDO_D_SETTING,
-};
-
-static int
-ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
-{
- int err, i;
-
- /*
- * Set up the regulator registers, as was previously done with
- * platform data.
- */
- /* Set up regulators */
- for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- err = abx500_set_register_interruptible(&pdev->dev, 0,
- ab3100_reg_init_order[i],
- ab3100_reg_initvals[i]);
- if (err) {
- dev_err(&pdev->dev, "regulator initialization failed with error %d\n",
- err);
- return err;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ab3100_regulator_matches); i++) {
- err = ab3100_regulator_register(
- pdev, NULL, ab3100_regulator_matches[i].init_data,
- ab3100_regulator_matches[i].of_node,
- (unsigned long)ab3100_regulator_matches[i].driver_data);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-
-static int ab3100_regulators_probe(struct platform_device *pdev)
-{
- struct ab3100_platform_data *plfdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int err = 0;
- u8 data;
- int i;
-
- /* Check chip state */
- err = abx500_get_register_interruptible(&pdev->dev, 0,
- AB3100_LDO_D, &data);
- if (err) {
- dev_err(&pdev->dev, "could not read initial status of LDO_D\n");
- return err;
- }
- if (data & 0x10)
- dev_notice(&pdev->dev,
- "chip is already in active mode (Warm start)\n");
- else
- dev_notice(&pdev->dev,
- "chip is in inactive mode (Cold start)\n");
-
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- ab3100_regulator_matches,
- ARRAY_SIZE(ab3100_regulator_matches));
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
- return ab3100_regulator_of_probe(pdev, np);
- }
-
- /* Set up regulators */
- for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- err = abx500_set_register_interruptible(&pdev->dev, 0,
- ab3100_reg_init_order[i],
- plfdata->reg_initvals[i]);
- if (err) {
- dev_err(&pdev->dev, "regulator initialization failed with error %d\n",
- err);
- return err;
- }
- }
-
- /* Register the regulators */
- for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- const struct regulator_desc *desc = &ab3100_regulator_desc[i];
-
- err = ab3100_regulator_register(pdev, plfdata, NULL, NULL,
- desc->id);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct platform_driver ab3100_regulators_driver = {
- .driver = {
- .name = "ab3100-regulators",
- },
- .probe = ab3100_regulators_probe,
-};
-
-static __init int ab3100_regulators_init(void)
-{
- return platform_driver_register(&ab3100_regulators_driver);
-}
-
-static __exit void ab3100_regulators_exit(void)
-{
- platform_driver_unregister(&ab3100_regulators_driver);
-}
-
-subsys_initcall(ab3100_regulators_init);
-module_exit(ab3100_regulators_exit);
-
-MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
-MODULE_DESCRIPTION("AB3100 Regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ab3100-regulators");
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 8bb43a671ded..4f26952caa56 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -22,403 +22,17 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/regulator/ab8500.h>
-
-static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* Main display, u8500 R3 uib */
- REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
- /* Main display, u8500 uib and ST uib */
- REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
- /* Secondary display, ST uib */
- REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
- /* SFH7741 proximity sensor */
- REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
- /* BH1780GLS ambient light sensor */
- REGULATOR_SUPPLY("vcc", "2-0029"),
- /* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "2-0018"),
- /* lsm303dlhc accelerometer */
- REGULATOR_SUPPLY("vdd", "2-0019"),
- /* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "2-001e"),
- /* Rohm BU21013 Touchscreen devices */
- REGULATOR_SUPPLY("avdd", "3-005c"),
- REGULATOR_SUPPLY("avdd", "3-005d"),
- /* Synaptics RMI4 Touchscreen device */
- REGULATOR_SUPPLY("vdd", "3-004b"),
- /* L3G4200D Gyroscope device */
- REGULATOR_SUPPLY("vdd", "2-0068"),
- /* Ambient light sensor device */
- REGULATOR_SUPPLY("vdd", "3-0029"),
- /* Pressure sensor device */
- REGULATOR_SUPPLY("vdd", "2-005c"),
- /* Cypress TrueTouch Touchscreen device */
- REGULATOR_SUPPLY("vcpin", "spi8.0"),
- /* Camera device */
- REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
-};
-
-static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
- /* On-board eMMC power */
- REGULATOR_SUPPLY("vmmc", "sdi4"),
- /* AB8500 audio codec */
- REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
- /* AB8500 accessory detect 1 */
- REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
- /* AB8500 Tv-out device */
- REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
- /* AV8100 HDMI device */
- REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
-};
-
-static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
- REGULATOR_SUPPLY("v-SD-STM", "stm"),
- /* External MMC slot power */
- REGULATOR_SUPPLY("vmmc", "sdi0"),
-};
-
-static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
- /* TV-out DENC supply */
- REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
- /* Internal general-purpose ADC */
- REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
- /* ADC for charger */
- REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
- /* AB8500 Tv-out device */
- REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
-};
-
-static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
- /* AB8500 audio-codec main supply */
- REGULATOR_SUPPLY("vaud", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
- /* AB8500 audio-codec Mic1 supply */
- REGULATOR_SUPPLY("vamic1", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
- /* AB8500 audio-codec Mic2 supply */
- REGULATOR_SUPPLY("vamic2", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
- /* AB8500 audio-codec DMic supply */
- REGULATOR_SUPPLY("vdmic", "ab8500-codec.0"),
-};
-static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
- /* SoC core supply, no device */
- REGULATOR_SUPPLY("v-intcore", NULL),
- /* USB Transceiver */
- REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
- /* Handled by abx500 clk driver */
- REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* DB8500 DSI */
- REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
- REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
- REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
- /* DB8500 CSI */
- REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
-};
-
-/* ab8500 regulator register initialization */
-static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
- /*
- * VanaRequestCtrl = HP/LP depending on VxRequest
- * VextSupply1RequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
- /*
- * VextSupply2RequestCtrl = HP/LP depending on VxRequest
- * VextSupply3RequestCtrl = HP/LP depending on VxRequest
- * Vaux1RequestCtrl = HP/LP depending on VxRequest
- * Vaux2RequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
- /*
- * Vaux3RequestCtrl = HP/LP depending on VxRequest
- * SwHPReq = Control through SWValid disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
- /*
- * VanaSysClkReq1HPValid = disabled
- * Vaux1SysClkReq1HPValid = disabled
- * Vaux2SysClkReq1HPValid = disabled
- * Vaux3SysClkReq1HPValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
- /*
- * VextSupply1SysClkReq1HPValid = disabled
- * VextSupply2SysClkReq1HPValid = disabled
- * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
- /*
- * VanaHwHPReq1Valid = disabled
- * Vaux1HwHPreq1Valid = disabled
- * Vaux2HwHPReq1Valid = disabled
- * Vaux3HwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
- /*
- * VextSupply1HwHPReq1Valid = disabled
- * VextSupply2HwHPReq1Valid = disabled
- * VextSupply3HwHPReq1Valid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
- /*
- * VanaHwHPReq2Valid = disabled
- * Vaux1HwHPReq2Valid = disabled
- * Vaux2HwHPReq2Valid = disabled
- * Vaux3HwHPReq2Valid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
- /*
- * VextSupply1HwHPReq2Valid = disabled
- * VextSupply2HwHPReq2Valid = disabled
- * VextSupply3HwHPReq2Valid = HWReq2 controlled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
- /*
- * VanaSwHPReqValid = disabled
- * Vaux1SwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
- /*
- * Vaux2SwHPReqValid = disabled
- * Vaux3SwHPReqValid = disabled
- * VextSupply1SwHPReqValid = disabled
- * VextSupply2SwHPReqValid = disabled
- * VextSupply3SwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
- /*
- * SysClkReq2Valid1 = SysClkReq2 controlled
- * SysClkReq3Valid1 = disabled
- * SysClkReq4Valid1 = SysClkReq4 controlled
- * SysClkReq5Valid1 = disabled
- * SysClkReq6Valid1 = SysClkReq6 controlled
- * SysClkReq7Valid1 = disabled
- * SysClkReq8Valid1 = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
- /*
- * SysClkReq2Valid2 = disabled
- * SysClkReq3Valid2 = disabled
- * SysClkReq4Valid2 = disabled
- * SysClkReq5Valid2 = disabled
- * SysClkReq6Valid2 = SysClkReq6 controlled
- * SysClkReq7Valid2 = disabled
- * SysClkReq8Valid2 = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
- /*
- * VTVoutEna = disabled
- * Vintcore12Ena = disabled
- * Vintcore12Sel = 1.25 V
- * Vintcore12LP = inactive (HP)
- * VTVoutLP = inactive (HP)
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
- /*
- * VaudioEna = disabled
- * VdmicEna = disabled
- * Vamic1Ena = disabled
- * Vamic2Ena = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
- /*
- * Vamic1_dzout = high-Z when Vamic1 is disabled
- * Vamic2_dzout = high-Z when Vamic2 is disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
- /*
- * VPll = Hw controlled (NOTE! PRCMU bits)
- * VanaRegu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
- /*
- * VrefDDREna = disabled
- * VrefDDRSleepMode = inactive (no pulldown)
- */
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
- /*
- * VextSupply1Regu = force LP
- * VextSupply2Regu = force OFF
- * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
- * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
- * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
- */
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
- /*
- * Vaux1Regu = force HP
- * Vaux2Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
- /*
- * Vaux3Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
- /*
- * Vaux1Sel = 2.8 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
- /*
- * Vaux2Sel = 2.9 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
- /*
- * Vaux3Sel = 2.91 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
- /*
- * VextSupply12LP = disabled (no LP)
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
- /*
- * Vaux1Disch = short discharge time
- * Vaux2Disch = short discharge time
- * Vaux3Disch = short discharge time
- * Vintcore12Disch = short discharge time
- * VTVoutDisch = short discharge time
- * VaudioDisch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
- /*
- * VanaDisch = short discharge time
- * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
- * VdmicDisch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
+/* AB8500 external regulators */
+enum ab8500_ext_regulator_id {
+ AB8500_EXT_SUPPLY1,
+ AB8500_EXT_SUPPLY2,
+ AB8500_EXT_SUPPLY3,
+ AB8500_NUM_EXT_REGULATORS,
};
-/* AB8500 regulators */
-static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
- /* supplies to the display/camera */
- [AB8500_LDO_AUX1] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-DISPLAY",
- .min_uV = 2800000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
- .boot_on = 1, /* display is on at boot */
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
- .consumer_supplies = ab8500_vaux1_consumers,
- },
- /* supplies to the on-board eMMC */
- [AB8500_LDO_AUX2] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-eMMC1",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
- .consumer_supplies = ab8500_vaux2_consumers,
- },
- /* supply for VAUX3, supplies to SDcard slots */
- [AB8500_LDO_AUX3] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-MMC-SD",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
- .consumer_supplies = ab8500_vaux3_consumers,
- },
- /* supply for tvout, gpadc, TVOUT LDO */
- [AB8500_LDO_TVOUT] = {
- .constraints = {
- .name = "V-TVOUT",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers),
- .consumer_supplies = ab8500_vtvout_consumers,
- },
- /* supply for ab8500-vaudio, VAUDIO LDO */
- [AB8500_LDO_AUDIO] = {
- .constraints = {
- .name = "V-AUD",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
- .consumer_supplies = ab8500_vaud_consumers,
- },
- /* supply for v-anamic1 VAMic1-LDO */
- [AB8500_LDO_ANAMIC1] = {
- .constraints = {
- .name = "V-AMIC1",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
- .consumer_supplies = ab8500_vamic1_consumers,
- },
- /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
- [AB8500_LDO_ANAMIC2] = {
- .constraints = {
- .name = "V-AMIC2",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
- .consumer_supplies = ab8500_vamic2_consumers,
- },
- /* supply for v-dmic, VDMIC LDO */
- [AB8500_LDO_DMIC] = {
- .constraints = {
- .name = "V-DMIC",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
- .consumer_supplies = ab8500_vdmic_consumers,
- },
- /* supply for v-intcore12, VINTCORE12 LDO */
- [AB8500_LDO_INTCORE] = {
- .constraints = {
- .name = "V-INTCORE",
- .min_uV = 1250000,
- .max_uV = 1350000,
- .input_uV = 1800000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE |
- REGULATOR_CHANGE_DRMS,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
- .consumer_supplies = ab8500_vintcore_consumers,
- },
- /* supply for U8500 CSI-DSI, VANA LDO */
- [AB8500_LDO_ANA] = {
- .constraints = {
- .name = "V-CSI-DSI",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
- .consumer_supplies = ab8500_vana_consumers,
- },
+struct ab8500_ext_regulator_cfg {
+ bool hwreq; /* requires hw mode or high power mode */
};
/* supply for VextSupply3 */
@@ -465,15 +79,6 @@ static struct regulator_init_data ab8500_ext_regulators[] = {
},
};
-static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
- .reg_init = ab8500_reg_init,
- .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
- .ext_regulator = ab8500_ext_regulators,
- .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
-};
-
/**
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
@@ -788,7 +393,6 @@ static struct ab8500_ext_regulator_info
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data;
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
@@ -798,12 +402,6 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* make sure the platform data has the correct size */
- if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
- dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
- return -EINVAL;
- }
-
/* check for AB8500 2.x */
if (is_ab8500_2p0_or_earlier(ab8500)) {
struct ab8500_ext_regulator_info *info;
@@ -823,11 +421,11 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
info = &ab8500_ext_regulator_info[i];
info->dev = &pdev->dev;
info->cfg = (struct ab8500_ext_regulator_cfg *)
- pdata->ext_regulator[i].driver_data;
+ ab8500_ext_regulators[i].driver_data;
config.dev = &pdev->dev;
config.driver_data = info;
- config.init_data = &pdata->ext_regulator[i];
+ config.init_data = &ab8500_ext_regulators[i];
/* register regulator with framework */
rdev = devm_regulator_register(&pdev->dev, &info->desc,
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 47b8b6f7b571..23a401734a98 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -25,9 +25,123 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/regulator/ab8500.h>
#include <linux/slab.h>
+/* AB8500 regulators */
+enum ab8500_regulator_id {
+ AB8500_LDO_AUX1,
+ AB8500_LDO_AUX2,
+ AB8500_LDO_AUX3,
+ AB8500_LDO_INTCORE,
+ AB8500_LDO_TVOUT,
+ AB8500_LDO_AUDIO,
+ AB8500_LDO_ANAMIC1,
+ AB8500_LDO_ANAMIC2,
+ AB8500_LDO_DMIC,
+ AB8500_LDO_ANA,
+ AB8500_NUM_REGULATORS,
+};
+
+/* AB8505 regulators */
+enum ab8505_regulator_id {
+ AB8505_LDO_AUX1,
+ AB8505_LDO_AUX2,
+ AB8505_LDO_AUX3,
+ AB8505_LDO_AUX4,
+ AB8505_LDO_AUX5,
+ AB8505_LDO_AUX6,
+ AB8505_LDO_INTCORE,
+ AB8505_LDO_ADC,
+ AB8505_LDO_AUDIO,
+ AB8505_LDO_ANAMIC1,
+ AB8505_LDO_ANAMIC2,
+ AB8505_LDO_AUX8,
+ AB8505_LDO_ANA,
+ AB8505_NUM_REGULATORS,
+};
+
+/* AB8500 registers */
+enum ab8500_regulator_reg {
+ AB8500_REGUREQUESTCTRL2,
+ AB8500_REGUREQUESTCTRL3,
+ AB8500_REGUREQUESTCTRL4,
+ AB8500_REGUSYSCLKREQ1HPVALID1,
+ AB8500_REGUSYSCLKREQ1HPVALID2,
+ AB8500_REGUHWHPREQ1VALID1,
+ AB8500_REGUHWHPREQ1VALID2,
+ AB8500_REGUHWHPREQ2VALID1,
+ AB8500_REGUHWHPREQ2VALID2,
+ AB8500_REGUSWHPREQVALID1,
+ AB8500_REGUSWHPREQVALID2,
+ AB8500_REGUSYSCLKREQVALID1,
+ AB8500_REGUSYSCLKREQVALID2,
+ AB8500_REGUMISC1,
+ AB8500_VAUDIOSUPPLY,
+ AB8500_REGUCTRL1VAMIC,
+ AB8500_VPLLVANAREGU,
+ AB8500_VREFDDR,
+ AB8500_EXTSUPPLYREGU,
+ AB8500_VAUX12REGU,
+ AB8500_VRF1VAUX3REGU,
+ AB8500_VAUX1SEL,
+ AB8500_VAUX2SEL,
+ AB8500_VRF1VAUX3SEL,
+ AB8500_REGUCTRL2SPARE,
+ AB8500_REGUCTRLDISCH,
+ AB8500_REGUCTRLDISCH2,
+ AB8500_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB8505 registers */
+enum ab8505_regulator_reg {
+ AB8505_REGUREQUESTCTRL1,
+ AB8505_REGUREQUESTCTRL2,
+ AB8505_REGUREQUESTCTRL3,
+ AB8505_REGUREQUESTCTRL4,
+ AB8505_REGUSYSCLKREQ1HPVALID1,
+ AB8505_REGUSYSCLKREQ1HPVALID2,
+ AB8505_REGUHWHPREQ1VALID1,
+ AB8505_REGUHWHPREQ1VALID2,
+ AB8505_REGUHWHPREQ2VALID1,
+ AB8505_REGUHWHPREQ2VALID2,
+ AB8505_REGUSWHPREQVALID1,
+ AB8505_REGUSWHPREQVALID2,
+ AB8505_REGUSYSCLKREQVALID1,
+ AB8505_REGUSYSCLKREQVALID2,
+ AB8505_REGUVAUX4REQVALID,
+ AB8505_REGUMISC1,
+ AB8505_VAUDIOSUPPLY,
+ AB8505_REGUCTRL1VAMIC,
+ AB8505_VSMPSAREGU,
+ AB8505_VSMPSBREGU,
+ AB8505_VSAFEREGU, /* NOTE! PRCMU register */
+ AB8505_VPLLVANAREGU,
+ AB8505_EXTSUPPLYREGU,
+ AB8505_VAUX12REGU,
+ AB8505_VRF1VAUX3REGU,
+ AB8505_VSMPSASEL1,
+ AB8505_VSMPSASEL2,
+ AB8505_VSMPSASEL3,
+ AB8505_VSMPSBSEL1,
+ AB8505_VSMPSBSEL2,
+ AB8505_VSMPSBSEL3,
+ AB8505_VSAFESEL1, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL2, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL3, /* NOTE! PRCMU register */
+ AB8505_VAUX1SEL,
+ AB8505_VAUX2SEL,
+ AB8505_VRF1VAUX3SEL,
+ AB8505_VAUX4REQCTRL,
+ AB8505_VAUX4REGU,
+ AB8505_VAUX4SEL,
+ AB8505_REGUCTRLDISCH,
+ AB8505_REGUCTRLDISCH2,
+ AB8505_REGUCTRLDISCH3,
+ AB8505_CTRLVAUX5,
+ AB8505_CTRLVAUX6,
+ AB8505_NUM_REGULATOR_REGISTERS,
+};
+
/**
* struct ab8500_shared_mode - is used when mode is shared between
* two regulators.
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
new file mode 100644
index 000000000000..d8b429955d33
--- /dev/null
+++ b/drivers/regulator/atc260x-regulator.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator driver for ATC260x PMICs
+//
+// Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+// Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+
+#include <linux/mfd/atc260x/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+struct atc260x_regulator_data {
+ int voltage_time_dcdc;
+ int voltage_time_ldo;
+};
+
+static const struct linear_range atc2603c_dcdc_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1300000, 0, 13, 50000),
+ REGULATOR_LINEAR_RANGE(1950000, 14, 15, 100000),
+};
+
+static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 127, 6250),
+ REGULATOR_LINEAR_RANGE(1400000, 128, 232, 25000),
+};
+
+static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+};
+
+static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
+ REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+};
+
+static const unsigned int atc260x_ldo_voltage_range_sel[] = {
+ 0x0, 0x1,
+};
+
+static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct atc260x_regulator_data *data = rdev_get_drvdata(rdev);
+
+ if (new_selector > old_selector)
+ return data->voltage_time_dcdc;
+
+ return 0;
+}
+
+static int atc260x_ldo_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct atc260x_regulator_data *data = rdev_get_drvdata(rdev);
+
+ if (new_selector > old_selector)
+ return data->voltage_time_ldo;
+
+ return 0;
+}
+
+static const struct regulator_ops atc260x_dcdc_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_dcdc_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_ldo_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_ldo_bypass_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_ldo_set_voltage_time_sel,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
+};
+
+static const struct regulator_ops atc260x_ldo_bypass_discharge_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_ldo_set_voltage_time_sel,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops atc260x_dcdc_range_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_dcdc_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_ldo_range_pick_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+ .set_voltage_time_sel = atc260x_ldo_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_dcdc_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_dcdc_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_ldo_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = atc260x_ldo_set_voltage_time_sel,
+};
+
+static const struct regulator_ops atc260x_no_ops = {
+};
+
+/*
+ * Note LDO8 is not documented in datasheet (v2.4), but supported
+ * in the vendor's driver implementation (xapp-le-kernel).
+ */
+enum atc2603c_reg_ids {
+ ATC2603C_ID_DCDC1,
+ ATC2603C_ID_DCDC2,
+ ATC2603C_ID_DCDC3,
+ ATC2603C_ID_LDO1,
+ ATC2603C_ID_LDO2,
+ ATC2603C_ID_LDO3,
+ ATC2603C_ID_LDO5,
+ ATC2603C_ID_LDO6,
+ ATC2603C_ID_LDO7,
+ ATC2603C_ID_LDO8,
+ ATC2603C_ID_LDO11,
+ ATC2603C_ID_LDO12,
+ ATC2603C_ID_SWITCHLDO1,
+ ATC2603C_ID_MAX,
+};
+
+#define atc2603c_reg_desc_dcdc(num, min, step, n_volt, vsel_h, vsel_l) { \
+ .name = "DCDC"#num, \
+ .supply_name = "dcdc"#num, \
+ .of_match = of_match_ptr("dcdc"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_DCDC##num, \
+ .ops = &atc260x_dcdc_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = n_volt, \
+ .vsel_reg = ATC2603C_PMU_DC##num##_CTL0, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_reg = ATC2603C_PMU_DC##num##_CTL0, \
+ .enable_mask = BIT(15), \
+ .enable_time = 800, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_dcdc_range(num, vsel_h, vsel_l) { \
+ .name = "DCDC"#num, \
+ .supply_name = "dcdc"#num, \
+ .of_match = of_match_ptr("dcdc"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_DCDC##num, \
+ .ops = &atc260x_dcdc_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .n_voltages = 16, \
+ .linear_ranges = atc2603c_dcdc_voltage_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(atc2603c_dcdc_voltage_ranges), \
+ .vsel_reg = ATC2603C_PMU_DC##num##_CTL0, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_reg = ATC2603C_PMU_DC##num##_CTL0, \
+ .enable_mask = BIT(15), \
+ .enable_time = 800, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_dcdc_fixed(num, min, step, n_volt, vsel_h, vsel_l) { \
+ .name = "DCDC"#num, \
+ .supply_name = "dcdc"#num, \
+ .of_match = of_match_ptr("dcdc"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_DCDC##num, \
+ .ops = &atc260x_dcdc_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = n_volt, \
+ .vsel_reg = ATC2603C_PMU_DC##num##_CTL0, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_time = 800, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_ldo(num, min, step, n_volt, vsel_h, vsel_l) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_LDO##num, \
+ .ops = &atc260x_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = n_volt, \
+ .vsel_reg = ATC2603C_PMU_LDO##num##_CTL, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_reg = ATC2603C_PMU_LDO##num##_CTL, \
+ .enable_mask = BIT(0), \
+ .enable_time = 2000, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_ldo_fixed(num, min, step, n_volt, vsel_h, vsel_l) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_LDO##num, \
+ .ops = &atc260x_ldo_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = n_volt, \
+ .vsel_reg = ATC2603C_PMU_LDO##num##_CTL, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_time = 2000, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_ldo_noops(num, vfixed) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_LDO##num, \
+ .ops = &atc260x_no_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .fixed_uV = vfixed, \
+ .n_voltages = 1, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2603c_reg_desc_ldo_switch(num, min, step, n_volt, vsel_h, vsel_l) { \
+ .name = "SWITCHLDO"#num, \
+ .supply_name = "switchldo"#num, \
+ .of_match = of_match_ptr("switchldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2603C_ID_SWITCHLDO##num, \
+ .ops = &atc260x_ldo_bypass_discharge_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = n_volt, \
+ .vsel_reg = ATC2603C_PMU_SWITCH_CTL, \
+ .vsel_mask = GENMASK(vsel_h, vsel_l), \
+ .enable_reg = ATC2603C_PMU_SWITCH_CTL, \
+ .enable_mask = BIT(15), \
+ .enable_is_inverted = true, \
+ .enable_time = 2000, \
+ .bypass_reg = ATC2603C_PMU_SWITCH_CTL, \
+ .bypass_mask = BIT(5), \
+ .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
+ .active_discharge_mask = BIT(1), \
+ .owner = THIS_MODULE, \
+}
+
+static const struct regulator_desc atc2603c_reg[] = {
+ atc2603c_reg_desc_dcdc_fixed(1, 700000, 25000, 29, 11, 7),
+ atc2603c_reg_desc_dcdc_range(2, 12, 8),
+ atc2603c_reg_desc_dcdc_fixed(3, 2600000, 100000, 8, 11, 9),
+ atc2603c_reg_desc_ldo_fixed(1, 2600000, 100000, 8, 15, 13),
+ atc2603c_reg_desc_ldo_fixed(2, 2600000, 100000, 8, 15, 13),
+ atc2603c_reg_desc_ldo_fixed(3, 1500000, 100000, 6, 15, 13),
+ atc2603c_reg_desc_ldo(5, 2600000, 100000, 8, 15, 13),
+ atc2603c_reg_desc_ldo_fixed(6, 700000, 25000, 29, 15, 11),
+ atc2603c_reg_desc_ldo(7, 1500000, 100000, 6, 15, 13),
+ atc2603c_reg_desc_ldo(8, 2300000, 100000, 11, 15, 12),
+ atc2603c_reg_desc_ldo_fixed(11, 2600000, 100000, 8, 15, 13),
+ atc2603c_reg_desc_ldo_noops(12, 1800000),
+ atc2603c_reg_desc_ldo_switch(1, 3000000, 100000, 4, 4, 3),
+};
+
+static const struct regulator_desc atc2603c_reg_dcdc2_ver_b =
+ atc2603c_reg_desc_dcdc(2, 1000000, 50000, 18, 12, 8);
+
+enum atc2609a_reg_ids {
+ ATC2609A_ID_DCDC0,
+ ATC2609A_ID_DCDC1,
+ ATC2609A_ID_DCDC2,
+ ATC2609A_ID_DCDC3,
+ ATC2609A_ID_DCDC4,
+ ATC2609A_ID_LDO0,
+ ATC2609A_ID_LDO1,
+ ATC2609A_ID_LDO2,
+ ATC2609A_ID_LDO3,
+ ATC2609A_ID_LDO4,
+ ATC2609A_ID_LDO5,
+ ATC2609A_ID_LDO6,
+ ATC2609A_ID_LDO7,
+ ATC2609A_ID_LDO8,
+ ATC2609A_ID_LDO9,
+ ATC2609A_ID_MAX,
+};
+
+#define atc2609a_reg_desc_dcdc(num, en_bit) { \
+ .name = "DCDC"#num, \
+ .supply_name = "dcdc"#num, \
+ .of_match = of_match_ptr("dcdc"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_DCDC##num, \
+ .ops = &atc260x_dcdc_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = 600000, \
+ .uV_step = 6250, \
+ .n_voltages = 256, \
+ .vsel_reg = ATC2609A_PMU_DC##num##_CTL0, \
+ .vsel_mask = GENMASK(15, 8), \
+ .enable_reg = ATC2609A_PMU_DC_OSC, \
+ .enable_mask = BIT(en_bit), \
+ .enable_time = 800, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2609a_reg_desc_dcdc_range(num, en_bit) { \
+ .name = "DCDC"#num, \
+ .supply_name = "dcdc"#num, \
+ .of_match = of_match_ptr("dcdc"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_DCDC##num, \
+ .ops = &atc260x_dcdc_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .n_voltages = 233, \
+ .linear_ranges = atc2609a_dcdc_voltage_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(atc2609a_dcdc_voltage_ranges), \
+ .vsel_reg = ATC2609A_PMU_DC##num##_CTL0, \
+ .vsel_mask = GENMASK(15, 8), \
+ .enable_reg = ATC2609A_PMU_DC_OSC, \
+ .enable_mask = BIT(en_bit), \
+ .enable_time = 800, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2609a_reg_desc_ldo(num) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_LDO##num, \
+ .ops = &atc260x_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = 700000, \
+ .uV_step = 100000, \
+ .n_voltages = 16, \
+ .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .vsel_mask = GENMASK(4, 1), \
+ .enable_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .enable_mask = BIT(0), \
+ .enable_time = 2000, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2609a_reg_desc_ldo_bypass(num) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_LDO##num, \
+ .ops = &atc260x_ldo_bypass_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = 2300000, \
+ .uV_step = 100000, \
+ .n_voltages = 12, \
+ .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .vsel_mask = GENMASK(5, 2), \
+ .enable_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .enable_mask = BIT(0), \
+ .enable_time = 2000, \
+ .bypass_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .bypass_mask = BIT(1), \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_LDO##num, \
+ .ops = &atc260x_ldo_range_pick_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
+ .n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+ .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .vsel_mask = GENMASK(4, 1), \
+ .vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .vsel_range_mask = BIT(5), \
+ .linear_range_selectors = atc260x_ldo_voltage_range_sel, \
+ .enable_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ .enable_mask = BIT(0), \
+ .enable_time = 2000, \
+ .owner = THIS_MODULE, \
+}
+
+#define atc2609a_reg_desc_ldo_fixed(num) { \
+ .name = "LDO"#num, \
+ .supply_name = "ldo"#num, \
+ .of_match = of_match_ptr("ldo"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = ATC2609A_ID_LDO##num, \
+ .ops = &atc260x_ldo_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = 2600000, \
+ .uV_step = 100000, \
+ .n_voltages = 8, \
+ .vsel_reg = ATC2609A_PMU_LDO##num##_CTL, \
+ .vsel_mask = GENMASK(15, 13), \
+ .enable_time = 2000, \
+ .owner = THIS_MODULE, \
+}
+
+static const struct regulator_desc atc2609a_reg[] = {
+ atc2609a_reg_desc_dcdc(0, 4),
+ atc2609a_reg_desc_dcdc(1, 5),
+ atc2609a_reg_desc_dcdc(2, 6),
+ atc2609a_reg_desc_dcdc_range(3, 7),
+ atc2609a_reg_desc_dcdc(4, 8),
+ atc2609a_reg_desc_ldo_bypass(0),
+ atc2609a_reg_desc_ldo_bypass(1),
+ atc2609a_reg_desc_ldo_bypass(2),
+ atc2609a_reg_desc_ldo_range_pick(3, 0),
+ atc2609a_reg_desc_ldo_range_pick(4, 0),
+ atc2609a_reg_desc_ldo(5),
+ atc2609a_reg_desc_ldo_range_pick(6, 1),
+ atc2609a_reg_desc_ldo_range_pick(7, 0),
+ atc2609a_reg_desc_ldo_range_pick(8, 0),
+ atc2609a_reg_desc_ldo_fixed(9),
+};
+
+static int atc260x_regulator_probe(struct platform_device *pdev)
+{
+ struct atc260x *atc260x = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = atc260x->dev;
+ struct atc260x_regulator_data *atc260x_data;
+ struct regulator_config config = {};
+ struct regulator_dev *atc260x_rdev;
+ const struct regulator_desc *regulators;
+ bool atc2603c_ver_b = false;
+ int i, nregulators;
+
+ atc260x_data = devm_kzalloc(&pdev->dev, sizeof(*atc260x_data), GFP_KERNEL);
+ if (!atc260x_data)
+ return -ENOMEM;
+
+ atc260x_data->voltage_time_dcdc = 350;
+ atc260x_data->voltage_time_ldo = 800;
+
+ switch (atc260x->ic_type) {
+ case ATC2603C:
+ regulators = atc2603c_reg;
+ nregulators = ATC2603C_ID_MAX;
+ atc2603c_ver_b = atc260x->ic_ver == ATC260X_B;
+ break;
+ case ATC2609A:
+ atc260x_data->voltage_time_dcdc = 250;
+ regulators = atc2609a_reg;
+ nregulators = ATC2609A_ID_MAX;
+ break;
+ default:
+ dev_err(dev, "unsupported ATC260X ID %d\n", atc260x->ic_type);
+ return -EINVAL;
+ }
+
+ config.dev = dev;
+ config.regmap = atc260x->regmap;
+ config.driver_data = atc260x_data;
+
+ /* Instantiate the regulators */
+ for (i = 0; i < nregulators; i++) {
+ if (atc2603c_ver_b && regulators[i].id == ATC2603C_ID_DCDC2)
+ atc260x_rdev = devm_regulator_register(&pdev->dev,
+ &atc2603c_reg_dcdc2_ver_b,
+ &config);
+ else
+ atc260x_rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i],
+ &config);
+ if (IS_ERR(atc260x_rdev)) {
+ dev_err(dev, "failed to register regulator: %d\n", i);
+ return PTR_ERR(atc260x_rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver atc260x_regulator_driver = {
+ .probe = atc260x_regulator_probe,
+ .driver = {
+ .name = "atc260x-regulator",
+ },
+};
+
+module_platform_driver(atc260x_regulator_driver);
+
+MODULE_DESCRIPTION("Regulator driver for ATC260x PMICs");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 90cb8445f721..d260c442b788 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -1070,7 +1070,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
static int axp20x_regulator_parse_dt(struct platform_device *pdev)
{
struct device_node *np, *regulators;
- int ret;
+ int ret = 0;
u32 dcdcfreq = 0;
np = of_node_get(pdev->dev.parent->of_node);
@@ -1085,13 +1085,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
if (ret < 0) {
dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
- return ret;
}
-
of_node_put(regulators);
}
- return 0;
+ of_node_put(np);
+ return ret;
}
static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index d44adf7e875a..1f5f9482b209 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -244,19 +244,14 @@ static const struct regulator_desc bd70528_desc[] = {
static int bd70528_probe(struct platform_device *pdev)
{
- struct rohm_regmap_dev *bd70528;
int i;
struct regulator_config config = {
.dev = pdev->dev.parent,
};
- bd70528 = dev_get_drvdata(pdev->dev.parent);
- if (!bd70528) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- config.regmap = bd70528->regmap;
+ config.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!config.regmap)
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) {
struct regulator_dev *rdev;
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
index 85c0b9000963..6b12e963ed8f 100644
--- a/drivers/regulator/bd71828-regulator.c
+++ b/drivers/regulator/bd71828-regulator.c
@@ -749,19 +749,14 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
static int bd71828_probe(struct platform_device *pdev)
{
- struct rohm_regmap_dev *bd71828;
int i, j, ret;
struct regulator_config config = {
.dev = pdev->dev.parent,
};
- bd71828 = dev_get_drvdata(pdev->dev.parent);
- if (!bd71828) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- config.regmap = bd71828->regmap;
+ config.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!config.regmap)
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
struct regulator_dev *rdev;
@@ -777,7 +772,7 @@ static int bd71828_probe(struct platform_device *pdev)
return PTR_ERR(rdev);
}
for (j = 0; j < rd->reg_init_amnt; j++) {
- ret = regmap_update_bits(bd71828->regmap,
+ ret = regmap_update_bits(config.regmap,
rd->reg_inits[j].reg,
rd->reg_inits[j].mask,
rd->reg_inits[j].val);
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..8ff47ea522d6 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1497,7 +1554,7 @@ err_out:
static int bd718xx_probe(struct platform_device *pdev)
{
- struct bd718xx *mfd;
+ struct regmap *regmap;
struct regulator_config config = { 0 };
int i, j, err, omit_enable;
bool use_snvs;
@@ -1506,11 +1563,10 @@ static int bd718xx_probe(struct platform_device *pdev)
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
const struct regulator_ops **swops, **hwops;
- mfd = dev_get_drvdata(pdev->dev.parent);
- if (!mfd) {
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
dev_err(&pdev->dev, "No MFD driver data\n");
- err = -EINVAL;
- goto err;
+ return -EINVAL;
}
switch (chip) {
@@ -1533,7 +1589,7 @@ static int bd718xx_probe(struct platform_device *pdev)
}
/* Register LOCK release */
- err = regmap_update_bits(mfd->chip.regmap, BD718XX_REG_REGLOCK,
+ err = regmap_update_bits(regmap, BD718XX_REG_REGLOCK,
(REGLOCK_PWRSEQ | REGLOCK_VREG), 0);
if (err) {
dev_err(&pdev->dev, "Failed to unlock PMIC (%d)\n", err);
@@ -1552,8 +1608,7 @@ static int bd718xx_probe(struct platform_device *pdev)
* bit allowing HW defaults for power rails to be used
*/
if (!use_snvs) {
- err = regmap_update_bits(mfd->chip.regmap,
- BD718XX_REG_TRANS_COND1,
+ err = regmap_update_bits(regmap, BD718XX_REG_TRANS_COND1,
BD718XX_ON_REQ_POWEROFF_MASK |
BD718XX_SWRESET_POWEROFF_MASK |
BD718XX_WDOG_POWEROFF_MASK |
@@ -1569,7 +1624,7 @@ static int bd718xx_probe(struct platform_device *pdev)
}
config.dev = pdev->dev.parent;
- config.regmap = mfd->chip.regmap;
+ config.regmap = regmap;
/*
* There are cases when we want to leave the enable-control for
* the HW state machine and use this driver only for voltage control.
@@ -1628,7 +1683,7 @@ static int bd718xx_probe(struct platform_device *pdev)
if (!no_enable_control && (!use_snvs ||
!rdev->constraints->always_on ||
!rdev->constraints->boot_on)) {
- err = regmap_update_bits(mfd->chip.regmap, r->init.reg,
+ err = regmap_update_bits(regmap, r->init.reg,
r->init.mask, r->init.val);
if (err) {
dev_err(&pdev->dev,
@@ -1638,7 +1693,7 @@ static int bd718xx_probe(struct platform_device *pdev)
}
}
for (j = 0; j < r->additional_init_amnt; j++) {
- err = regmap_update_bits(mfd->chip.regmap,
+ err = regmap_update_bits(regmap,
r->additional_inits[j].reg,
r->additional_inits[j].mask,
r->additional_inits[j].val);
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index e690c2ce5b3c..7b0cd08db446 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * ROHM BD9571MWV-M regulator driver
+ * ROHM BD9571MWV-M and BD9574MWF-M regulator driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
*
@@ -9,6 +9,7 @@
* NOTE: VD09 is missing
*/
+#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -17,7 +18,7 @@
#include <linux/mfd/bd9571mwv.h>
struct bd9571mwv_reg {
- struct bd9571mwv *bd;
+ struct regmap *regmap;
/* DDR Backup Power */
u8 bkup_mode_cnt_keepon; /* from "rohm,ddr-backup-power" */
@@ -137,26 +138,30 @@ static const struct regulator_desc regulators[] = {
};
#ifdef CONFIG_PM_SLEEP
-static int bd9571mwv_bkup_mode_read(struct bd9571mwv *bd, unsigned int *mode)
+static int bd9571mwv_bkup_mode_read(struct bd9571mwv_reg *bdreg,
+ unsigned int *mode)
{
int ret;
- ret = regmap_read(bd->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
+ ret = regmap_read(bdreg->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
if (ret) {
- dev_err(bd->dev, "failed to read backup mode (%d)\n", ret);
+ dev_err(regmap_get_device(bdreg->regmap),
+ "failed to read backup mode (%d)\n", ret);
return ret;
}
return 0;
}
-static int bd9571mwv_bkup_mode_write(struct bd9571mwv *bd, unsigned int mode)
+static int bd9571mwv_bkup_mode_write(struct bd9571mwv_reg *bdreg,
+ unsigned int mode)
{
int ret;
- ret = regmap_write(bd->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
+ ret = regmap_write(bdreg->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
if (ret) {
- dev_err(bd->dev, "failed to configure backup mode 0x%x (%d)\n",
+ dev_err(regmap_get_device(bdreg->regmap),
+ "failed to configure backup mode 0x%x (%d)\n",
mode, ret);
return ret;
}
@@ -194,7 +199,7 @@ static ssize_t backup_mode_store(struct device *dev,
* Configure DDR Backup Mode, to change the role of the accessory power
* switch from a power switch to a wake-up switch, or vice versa
*/
- ret = bd9571mwv_bkup_mode_read(bdreg->bd, &mode);
+ ret = bd9571mwv_bkup_mode_read(bdreg, &mode);
if (ret)
return ret;
@@ -202,7 +207,7 @@ static ssize_t backup_mode_store(struct device *dev,
if (bdreg->bkup_mode_enabled)
mode |= bdreg->bkup_mode_cnt_keepon;
- ret = bd9571mwv_bkup_mode_write(bdreg->bd, mode);
+ ret = bd9571mwv_bkup_mode_write(bdreg, mode);
if (ret)
return ret;
@@ -221,7 +226,7 @@ static int bd9571mwv_suspend(struct device *dev)
return 0;
/* Save DDR Backup Mode */
- ret = bd9571mwv_bkup_mode_read(bdreg->bd, &mode);
+ ret = bd9571mwv_bkup_mode_read(bdreg, &mode);
if (ret)
return ret;
@@ -235,7 +240,7 @@ static int bd9571mwv_suspend(struct device *dev)
mode |= bdreg->bkup_mode_cnt_keepon;
if (mode != bdreg->bkup_mode_cnt_saved)
- return bd9571mwv_bkup_mode_write(bdreg->bd, mode);
+ return bd9571mwv_bkup_mode_write(bdreg, mode);
return 0;
}
@@ -248,7 +253,7 @@ static int bd9571mwv_resume(struct device *dev)
return 0;
/* Restore DDR Backup Mode */
- return bd9571mwv_bkup_mode_write(bdreg->bd, bdreg->bkup_mode_cnt_saved);
+ return bd9571mwv_bkup_mode_write(bdreg, bdreg->bkup_mode_cnt_saved);
}
static const struct dev_pm_ops bd9571mwv_pm = {
@@ -268,51 +273,54 @@ static int bd9571mwv_regulator_remove(struct platform_device *pdev)
static int bd9571mwv_regulator_probe(struct platform_device *pdev)
{
- struct bd9571mwv *bd = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct bd9571mwv_reg *bdreg;
struct regulator_dev *rdev;
unsigned int val;
int i;
+ enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
bdreg = devm_kzalloc(&pdev->dev, sizeof(*bdreg), GFP_KERNEL);
if (!bdreg)
return -ENOMEM;
- bdreg->bd = bd;
+ bdreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
platform_set_drvdata(pdev, bdreg);
config.dev = &pdev->dev;
- config.dev->of_node = bd->dev->of_node;
- config.driver_data = bd;
- config.regmap = bd->regmap;
+ config.dev->of_node = pdev->dev.parent->of_node;
+ config.driver_data = bdreg;
+ config.regmap = bdreg->regmap;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ /* BD9574MWF supports DVFS only */
+ if (chip == ROHM_CHIP_TYPE_BD9574 && regulators[i].id != DVFS)
+ continue;
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
&config);
if (IS_ERR(rdev)) {
- dev_err(bd->dev, "failed to register %s regulator\n",
+ dev_err(&pdev->dev, "failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
}
val = 0;
- of_property_read_u32(bd->dev->of_node, "rohm,ddr-backup-power", &val);
+ of_property_read_u32(config.dev->of_node, "rohm,ddr-backup-power", &val);
if (val & ~BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK) {
- dev_err(bd->dev, "invalid %s mode %u\n",
+ dev_err(&pdev->dev, "invalid %s mode %u\n",
"rohm,ddr-backup-power", val);
return -EINVAL;
}
bdreg->bkup_mode_cnt_keepon = val;
- bdreg->rstbmode_level = of_property_read_bool(bd->dev->of_node,
+ bdreg->rstbmode_level = of_property_read_bool(config.dev->of_node,
"rohm,rstbmode-level");
- bdreg->rstbmode_pulse = of_property_read_bool(bd->dev->of_node,
+ bdreg->rstbmode_pulse = of_property_read_bool(config.dev->of_node,
"rohm,rstbmode-pulse");
if (bdreg->rstbmode_level && bdreg->rstbmode_pulse) {
- dev_err(bd->dev, "only one rohm,rstbmode-* may be specified");
+ dev_err(&pdev->dev, "only one rohm,rstbmode-* may be specified");
return -EINVAL;
}
@@ -336,7 +344,8 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
}
static const struct platform_device_id bd9571mwv_regulator_id_table[] = {
- { "bd9571mwv-regulator", },
+ { "bd9571mwv-regulator", ROHM_CHIP_TYPE_BD9571 },
+ { "bd9574mwf-regulator", ROHM_CHIP_TYPE_BD9574 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, bd9571mwv_regulator_id_table);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca03d8e70bd1..16114aea099a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1617,7 +1617,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name)
{
struct regulator *regulator;
- int err;
+ int err = 0;
if (dev) {
char buf[REG_STR_SIZE];
@@ -1663,8 +1663,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
}
}
- regulator->debugfs = debugfs_create_dir(supply_name,
- rdev->debugfs);
+ if (err != -EEXIST)
+ regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
if (!regulator->debugfs) {
rdev_dbg(rdev, "Failed to create debugfs directory\n");
} else {
@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resolve? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
if (r == rdev) {
dev_err(dev, "Supply for %s (%s) resolved to itself\n",
rdev->desc->name, rdev->supply_name);
- if (!have_full_constraints())
- return -EINVAL;
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
r = dummy_regulator_rdev;
get_device(&r->dev);
}
@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
+ regulator_unlock(rdev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
@@ -2020,7 +2042,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
* Returns a struct regulator corresponding to the regulator producer,
* or IS_ERR() condition containing errno.
*
- * Use of supply names configured via regulator_set_device_supply() is
+ * Use of supply names configured via set_consumer_device_supply() is
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
@@ -2047,7 +2069,7 @@ EXPORT_SYMBOL_GPL(regulator_get);
* regulator off for correct operation of the hardware they are
* controlling.
*
- * Use of supply names configured via regulator_set_device_supply() is
+ * Use of supply names configured via set_consumer_device_supply() is
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
@@ -2073,7 +2095,7 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
* disrupting the operation of drivers that can handle absent
* supplies.
*
- * Use of supply names configured via regulator_set_device_supply() is
+ * Use of supply names configured via set_consumer_device_supply() is
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
@@ -4131,7 +4153,11 @@ int regulator_sync_voltage(struct regulator *regulator)
if (ret < 0)
goto out;
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ /* balance only, if regulator is coupled */
+ if (rdev->coupling_desc.n_coupled > 1)
+ ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+ else
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
out:
regulator_unlock(rdev);
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 74ad92dc664a..88c6bd5b6c78 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -550,7 +550,7 @@ static int mcp16502_probe(struct i2c_client *client,
config.regmap = rmap;
config.driver_data = mcp;
- mcp->lpm = devm_gpiod_get(dev, "lpm", GPIOD_OUT_LOW);
+ mcp->lpm = devm_gpiod_get_optional(dev, "lpm", GPIOD_OUT_LOW);
if (IS_ERR(mcp->lpm)) {
dev_err(dev, "failed to get lpm pin: %ld\n", PTR_ERR(mcp->lpm));
return PTR_ERR(mcp->lpm);
diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
new file mode 100644
index 000000000000..d49a1534d8e9
--- /dev/null
+++ b/drivers/regulator/mt6315-regulator.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2021 MediaTek Inc.
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6315-regulator.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/spmi.h>
+
+#define MT6315_BUCK_MODE_AUTO 0
+#define MT6315_BUCK_MODE_FORCE_PWM 1
+#define MT6315_BUCK_MODE_LP 2
+
+struct mt6315_regulator_info {
+ struct regulator_desc desc;
+ u32 status_reg;
+ u32 lp_mode_mask;
+ u32 lp_mode_shift;
+};
+
+struct mt_regulator_init_data {
+ u32 modeset_mask[MT6315_VBUCK_MAX];
+};
+
+struct mt6315_chip {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#define MT_BUCK(_name, _bid, _vsel) \
+[_bid] = { \
+ .desc = { \
+ .name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .regulators_node = "regulators", \
+ .ops = &mt6315_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _bid, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 0xbf, \
+ .linear_ranges = mt_volt_range1, \
+ .n_linear_ranges = ARRAY_SIZE(mt_volt_range1), \
+ .vsel_reg = _vsel, \
+ .vsel_mask = 0xff, \
+ .enable_reg = MT6315_BUCK_TOP_CON0, \
+ .enable_mask = BIT(_bid), \
+ .of_map_mode = mt6315_map_mode, \
+ }, \
+ .status_reg = _bid##_DBG4, \
+ .lp_mode_mask = BIT(_bid), \
+ .lp_mode_shift = _bid, \
+}
+
+static const struct linear_range mt_volt_range1[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
+};
+
+static unsigned int mt6315_map_mode(u32 mode)
+{
+ switch (mode) {
+ case MT6315_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case MT6315_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ case MT6315_BUCK_MODE_LP:
+ return REGULATOR_MODE_IDLE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct mt_regulator_init_data *init = rdev_get_drvdata(rdev);
+ const struct mt6315_regulator_info *info;
+ int ret, regval;
+ u32 modeset_mask;
+
+ info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
+ modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
+ ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_4PHASE_ANA_CON42, &regval);
+ if (ret != 0) {
+ dev_notice(&rdev->dev, "Failed to get mode: %d\n", ret);
+ return ret;
+ }
+
+ if ((regval & modeset_mask) == modeset_mask)
+ return REGULATOR_MODE_FAST;
+
+ ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_CON1, &regval);
+ if (ret != 0) {
+ dev_notice(&rdev->dev, "Failed to get lp mode: %d\n", ret);
+ return ret;
+ }
+
+ if (regval & info->lp_mode_mask)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mt6315_regulator_set_mode(struct regulator_dev *rdev,
+ u32 mode)
+{
+ struct mt_regulator_init_data *init = rdev_get_drvdata(rdev);
+ const struct mt6315_regulator_info *info;
+ int ret, val, curr_mode;
+ u32 modeset_mask;
+
+ info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
+ modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
+ curr_mode = mt6315_regulator_get_mode(rdev);
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ ret = regmap_update_bits(rdev->regmap,
+ MT6315_BUCK_TOP_4PHASE_ANA_CON42,
+ modeset_mask,
+ modeset_mask);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ if (curr_mode == REGULATOR_MODE_FAST) {
+ ret = regmap_update_bits(rdev->regmap,
+ MT6315_BUCK_TOP_4PHASE_ANA_CON42,
+ modeset_mask,
+ 0);
+ } else if (curr_mode == REGULATOR_MODE_IDLE) {
+ ret = regmap_update_bits(rdev->regmap,
+ MT6315_BUCK_TOP_CON1,
+ info->lp_mode_mask,
+ 0);
+ usleep_range(100, 110);
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = MT6315_BUCK_MODE_LP >> 1;
+ val <<= info->lp_mode_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ MT6315_BUCK_TOP_CON1,
+ info->lp_mode_mask,
+ val);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_notice(&rdev->dev, "Unsupported mode: %d\n", mode);
+ break;
+ }
+
+ if (ret != 0) {
+ dev_notice(&rdev->dev, "Failed to set mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt6315_get_status(struct regulator_dev *rdev)
+{
+ const struct mt6315_regulator_info *info;
+ int ret;
+ u32 regval;
+
+ info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
+ ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+ if (ret < 0) {
+ dev_notice(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & BIT(0)) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static const struct regulator_ops mt6315_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6315_get_status,
+ .set_mode = mt6315_regulator_set_mode,
+ .get_mode = mt6315_regulator_get_mode,
+};
+
+static const struct mt6315_regulator_info mt6315_regulators[MT6315_VBUCK_MAX] = {
+ MT_BUCK("vbuck1", MT6315_VBUCK1, MT6315_BUCK_TOP_ELR0),
+ MT_BUCK("vbuck2", MT6315_VBUCK2, MT6315_BUCK_TOP_ELR2),
+ MT_BUCK("vbuck3", MT6315_VBUCK3, MT6315_BUCK_TOP_ELR4),
+ MT_BUCK("vbuck4", MT6315_VBUCK4, MT6315_BUCK_TOP_ELR6),
+};
+
+static const struct regmap_config mt6315_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x16d0,
+ .fast_io = true,
+};
+
+static const struct of_device_id mt6315_of_match[] = {
+ {
+ .compatible = "mediatek,mt6315-regulator",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mt6315_of_match);
+
+static int mt6315_regulator_probe(struct spmi_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct mt6315_chip *chip;
+ struct mt_regulator_init_data *init_data;
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+
+ regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
+ if (!regmap)
+ return -ENODEV;
+
+ chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ init_data = devm_kzalloc(dev, sizeof(struct mt_regulator_init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ switch (pdev->usid) {
+ case MT6315_PP:
+ init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1) | BIT(MT6315_VBUCK2) |
+ BIT(MT6315_VBUCK4);
+ break;
+ case MT6315_SP:
+ case MT6315_RP:
+ init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1) | BIT(MT6315_VBUCK2);
+ break;
+ default:
+ init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1);
+ break;
+ }
+ for (i = MT6315_VBUCK2; i < MT6315_VBUCK_MAX; i++)
+ init_data->modeset_mask[i] = BIT(i);
+
+ chip->dev = dev;
+ chip->regmap = regmap;
+ dev_set_drvdata(dev, chip);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ for (i = MT6315_VBUCK1; i < MT6315_VBUCK_MAX; i++) {
+ config.driver_data = init_data;
+ rdev = devm_regulator_register(dev, &mt6315_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_notice(dev, "Failed to register %s\n", mt6315_regulators[i].desc.name);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static void mt6315_regulator_shutdown(struct spmi_device *pdev)
+{
+ struct mt6315_chip *chip = dev_get_drvdata(&pdev->dev);
+ int ret = 0;
+
+ ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, PROTECTION_KEY_H);
+ ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, PROTECTION_KEY);
+ ret |= regmap_update_bits(chip->regmap, MT6315_TOP2_ELR7, 1, 1);
+ ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, 0);
+ ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, 0);
+ if (ret < 0)
+ dev_notice(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
+ pdev->usid, ret);
+}
+
+static struct spmi_driver mt6315_regulator_driver = {
+ .driver = {
+ .name = "mt6315-regulator",
+ .of_match_table = mt6315_of_match,
+ },
+ .probe = mt6315_regulator_probe,
+ .shutdown = mt6315_regulator_shutdown,
+};
+
+module_spmi_driver(mt6315_regulator_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6315 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c
new file mode 100644
index 000000000000..d3d876198d6e
--- /dev/null
+++ b/drivers/regulator/mtk-dvfsrc-regulator.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 MediaTek Inc.
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/soc/mediatek/mtk_dvfsrc.h>
+
+#define DVFSRC_ID_VCORE 0
+#define DVFSRC_ID_VSCP 1
+
+#define MT_DVFSRC_REGULAR(match, _name, _volt_table) \
+[DVFSRC_ID_##_name] = { \
+ .desc = { \
+ .name = match, \
+ .of_match = of_match_ptr(match), \
+ .ops = &dvfsrc_vcore_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = DVFSRC_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+ }, \
+}
+
+/*
+ * DVFSRC regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @voltage_selector: Selector used for get_voltage_sel() and
+ * set_voltage_sel() callbacks
+ */
+
+struct dvfsrc_regulator {
+ struct regulator_desc desc;
+};
+
+/*
+ * MTK DVFSRC regulators' init data
+ *
+ * @size: num of regulators
+ * @regulator_info: regulator info.
+ */
+struct dvfsrc_regulator_init_data {
+ u32 size;
+ struct dvfsrc_regulator *regulator_info;
+};
+
+static inline struct device *to_dvfsrc_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent;
+}
+
+static int dvfsrc_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct device *dvfsrc_dev = to_dvfsrc_dev(rdev);
+ int id = rdev_get_id(rdev);
+
+ if (id == DVFSRC_ID_VCORE)
+ mtk_dvfsrc_send_request(dvfsrc_dev,
+ MTK_DVFSRC_CMD_VCORE_REQUEST,
+ selector);
+ else if (id == DVFSRC_ID_VSCP)
+ mtk_dvfsrc_send_request(dvfsrc_dev,
+ MTK_DVFSRC_CMD_VSCP_REQUEST,
+ selector);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dvfsrc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct device *dvfsrc_dev = to_dvfsrc_dev(rdev);
+ int id = rdev_get_id(rdev);
+ int val, ret;
+
+ if (id == DVFSRC_ID_VCORE)
+ ret = mtk_dvfsrc_query_info(dvfsrc_dev,
+ MTK_DVFSRC_CMD_VCORE_LEVEL_QUERY,
+ &val);
+ else if (id == DVFSRC_ID_VSCP)
+ ret = mtk_dvfsrc_query_info(dvfsrc_dev,
+ MTK_DVFSRC_CMD_VSCP_LEVEL_QUERY,
+ &val);
+ else
+ return -EINVAL;
+
+ if (ret != 0)
+ return ret;
+
+ return val;
+}
+
+static const struct regulator_ops dvfsrc_vcore_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = dvfsrc_get_voltage_sel,
+ .set_voltage_sel = dvfsrc_set_voltage_sel,
+};
+
+static const unsigned int mt8183_voltages[] = {
+ 725000,
+ 800000,
+};
+
+static struct dvfsrc_regulator mt8183_regulators[] = {
+ MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE,
+ mt8183_voltages),
+};
+
+static const struct dvfsrc_regulator_init_data regulator_mt8183_data = {
+ .size = ARRAY_SIZE(mt8183_regulators),
+ .regulator_info = &mt8183_regulators[0],
+};
+
+static const unsigned int mt6873_voltages[] = {
+ 575000,
+ 600000,
+ 650000,
+ 725000,
+};
+
+static struct dvfsrc_regulator mt6873_regulators[] = {
+ MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE,
+ mt6873_voltages),
+ MT_DVFSRC_REGULAR("dvfsrc-vscp", VSCP,
+ mt6873_voltages),
+};
+
+static const struct dvfsrc_regulator_init_data regulator_mt6873_data = {
+ .size = ARRAY_SIZE(mt6873_regulators),
+ .regulator_info = &mt6873_regulators[0],
+};
+
+static const struct of_device_id mtk_dvfsrc_regulator_match[] = {
+ {
+ .compatible = "mediatek,mt8183-dvfsrc",
+ .data = &regulator_mt8183_data,
+ }, {
+ .compatible = "mediatek,mt8192-dvfsrc",
+ .data = &regulator_mt6873_data,
+ }, {
+ .compatible = "mediatek,mt6873-dvfsrc",
+ .data = &regulator_mt6873_data,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match);
+
+static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ const struct dvfsrc_regulator_init_data *regulator_init_data;
+ struct dvfsrc_regulator *mt_regulators;
+ int i;
+
+ match = of_match_node(mtk_dvfsrc_regulator_match, dev->parent->of_node);
+
+ if (!match) {
+ dev_err(dev, "invalid compatible string\n");
+ return -ENODEV;
+ }
+
+ regulator_init_data = match->data;
+
+ mt_regulators = regulator_init_data->regulator_info;
+ for (i = 0; i < regulator_init_data->size; i++) {
+ config.dev = dev->parent;
+ config.driver_data = (mt_regulators + i);
+ rdev = devm_regulator_register(dev->parent,
+ &(mt_regulators + i)->desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register %s\n",
+ (mt_regulators + i)->desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver mtk_dvfsrc_regulator_driver = {
+ .driver = {
+ .name = "mtk-dvfsrc-regulator",
+ },
+ .probe = dvfsrc_vcore_regulator_probe,
+};
+
+static int __init mtk_dvfsrc_regulator_init(void)
+{
+ return platform_driver_register(&mtk_dvfsrc_regulator_driver);
+}
+subsys_initcall(mtk_dvfsrc_regulator_init);
+
+static void __exit mtk_dvfsrc_regulator_exit(void)
+{
+ platform_driver_unregister(&mtk_dvfsrc_regulator_driver);
+}
+module_exit(mtk_dvfsrc_regulator_exit);
+
+MODULE_AUTHOR("Arvin wang <arvin.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index cb29421d745a..833d398c6aa2 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -5,6 +5,7 @@
*/
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -32,6 +33,7 @@ struct pca9450_regulator_desc {
struct pca9450 {
struct device *dev;
struct regmap *regmap;
+ struct gpio_desc *sd_vsel_gpio;
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
@@ -795,6 +797,26 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ /* Set reset behavior on assertion of WDOG_B signal */
+ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
+ WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
+ return ret;
+ }
+
+ /*
+ * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
+ * This is only valid if the SD_VSEL input of the PMIC is high. Let's
+ * check if the pin is available as GPIO and set it to high.
+ */
+ pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(pca9450->sd_vsel_gpio)) {
+ dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
+ return ret;
+ }
+
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc");
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..9b28bd63208d 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -114,7 +114,6 @@ enum swxilim_bits {
#define PF8X00_SWXILIM_SHIFT 3
#define PF8X00_SWXILIM_MASK GENMASK(4, 3)
#define PF8X00_SWXPHASE_MASK GENMASK(2, 0)
-#define PF8X00_SWXPHASE_DEFAULT 0
#define PF8X00_SWXPHASE_SHIFT 7
enum pf8x00_devid {
@@ -126,10 +125,12 @@ enum pf8x00_devid {
#define PF8X00_DEVICE_FAM_MASK GENMASK(7, 4)
#define PF8X00_DEVICE_ID_MASK GENMASK(3, 0)
-struct pf8x00_regulator {
+struct pf8x00_regulator_data {
struct regulator_desc desc;
- u8 ilim;
- u8 phase_shift;
+ unsigned int suspend_enable_reg;
+ unsigned int suspend_enable_mask;
+ unsigned int suspend_voltage_reg;
+ unsigned int suspend_voltage_cache;
};
struct pf8x00_chip {
@@ -150,35 +151,16 @@ static const int pf8x00_ldo_voltages[] = {
3100000, 3150000, 3200000, 3300000, 3350000, 1650000, 1700000, 5000000,
};
-#define SWV(i) (6250 * i + 400000)
-#define SWV_LINE(i) SWV(i*8+0), SWV(i*8+1), SWV(i*8+2), SWV(i*8+3), \
- SWV(i*8+4), SWV(i*8+5), SWV(i*8+6), SWV(i*8+7)
+/* Output: 2.1A to 4.5A */
+static const unsigned int pf8x00_sw_current_table[] = {
+ 2100000, 2600000, 3000000, 4500000,
+};
/* Output: 0.4V to 1.8V */
-static const int pf8x00_sw1_to_6_voltages[] = {
- SWV_LINE(0),
- SWV_LINE(1),
- SWV_LINE(2),
- SWV_LINE(3),
- SWV_LINE(4),
- SWV_LINE(5),
- SWV_LINE(6),
- SWV_LINE(7),
- SWV_LINE(8),
- SWV_LINE(9),
- SWV_LINE(10),
- SWV_LINE(11),
- SWV_LINE(12),
- SWV_LINE(13),
- SWV_LINE(14),
- SWV_LINE(15),
- SWV_LINE(16),
- SWV_LINE(17),
- SWV_LINE(18),
- SWV_LINE(19),
- SWV_LINE(20),
- SWV_LINE(21),
- 1500000, 1800000,
+#define PF8XOO_SW1_6_VOLTAGE_NUM 0xB2
+static const struct linear_range pf8x00_sw1_to_6_voltages[] = {
+ REGULATOR_LINEAR_RANGE(400000, 0x00, 0xB0, 6250),
+ REGULATOR_LINEAR_RANGE(1800000, 0xB1, 0xB1, 0),
};
/* Output: 1.0V to 4.1V */
@@ -194,15 +176,10 @@ static const int pf8x00_vsnvs_voltages[] = {
0, 1800000, 3000000, 3300000,
};
-static struct pf8x00_regulator *desc_to_regulator(const struct regulator_desc *desc)
-{
- return container_of(desc, struct pf8x00_regulator, desc);
-}
-
-static void swxilim_select(const struct regulator_desc *desc, int ilim)
+static void swxilim_select(struct pf8x00_chip *chip, int id, int ilim)
{
- struct pf8x00_regulator *data = desc_to_regulator(desc);
u8 ilim_sel;
+ u8 reg = PF8X00_SW_BASE(id) + SW_CONFIG2;
switch (ilim) {
case 2100:
@@ -222,43 +199,130 @@ static void swxilim_select(const struct regulator_desc *desc, int ilim)
break;
}
- data->ilim = ilim_sel;
+ regmap_update_bits(chip->regmap, reg,
+ PF8X00_SWXILIM_MASK,
+ ilim_sel << PF8X00_SWXILIM_SHIFT);
}
-static int pf8x00_of_parse_cb(struct device_node *np,
+static void handle_ilim_property(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
- struct pf8x00_regulator *data = desc_to_regulator(desc);
struct pf8x00_chip *chip = config->driver_data;
+ int ret;
+ int val;
+
+ if ((desc->id >= PF8X00_BUCK1) && (desc->id <= PF8X00_BUCK7)) {
+ ret = of_property_read_u32(np, "nxp,ilim-ma", &val);
+ if (ret) {
+ dev_dbg(chip->dev, "unspecified ilim for BUCK%d, use value stored in OTP\n",
+ desc->id - PF8X00_LDO4);
+ return;
+ }
+
+ dev_warn(chip->dev, "nxp,ilim-ma is deprecated, please use regulator-max-microamp\n");
+ swxilim_select(chip, desc->id, val);
+
+ } else
+ dev_warn(chip->dev, "nxp,ilim-ma used with incorrect regulator (%d)\n", desc->id);
+}
+
+static void handle_shift_property(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ unsigned char id = desc->id - PF8X00_LDO4;
+ unsigned char reg = PF8X00_SW_BASE(id) + SW_CONFIG2;
+ struct pf8x00_chip *chip = config->driver_data;
+
int phase;
int val;
int ret;
+ if ((desc->id >= PF8X00_BUCK1) && (desc->id <= PF8X00_BUCK7)) {
+ ret = of_property_read_u32(np, "nxp,phase-shift", &val);
+ if (ret) {
+ dev_dbg(chip->dev,
+ "unspecified phase-shift for BUCK%d, using OTP configuration\n",
+ id);
+ return;
+ }
- ret = of_property_read_u32(np, "nxp,ilim-ma", &val);
- if (ret)
- dev_dbg(chip->dev, "unspecified ilim for BUCK%d, use 2100 mA\n",
- desc->id - PF8X00_LDO4);
+ if (val < 0 || val > 315 || val % 45 != 0) {
+ dev_warn(config->dev,
+ "invalid phase_shift %d for BUCK%d, using OTP configuration\n",
+ val, id);
+ return;
+ }
- swxilim_select(desc, val);
+ phase = val / 45;
- ret = of_property_read_u32(np, "nxp,phase-shift", &val);
- if (ret) {
- dev_dbg(chip->dev,
- "unspecified phase-shift for BUCK%d, use 0 degrees\n",
- desc->id - PF8X00_LDO4);
- val = PF8X00_SWXPHASE_DEFAULT;
+ if (phase >= 1)
+ phase -= 1;
+ else
+ phase = PF8X00_SWXPHASE_SHIFT;
+
+ regmap_update_bits(chip->regmap, reg,
+ PF8X00_SWXPHASE_MASK,
+ phase);
+ } else
+ dev_warn(chip->dev, "nxp,phase-shift used with incorrect regulator (%d)\n", id);
+
+}
+
+static int pf8x00_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+
+ handle_ilim_property(np, desc, config);
+ handle_shift_property(np, desc, config);
+
+ return 0;
+}
+
+static int pf8x00_suspend_enable(struct regulator_dev *rdev)
+{
+ struct pf8x00_regulator_data *regl = rdev_get_drvdata(rdev);
+ struct regmap *rmap = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(rmap, regl->suspend_enable_reg,
+ regl->suspend_enable_mask,
+ regl->suspend_enable_mask);
+}
+
+static int pf8x00_suspend_disable(struct regulator_dev *rdev)
+{
+ struct pf8x00_regulator_data *regl = rdev_get_drvdata(rdev);
+ struct regmap *rmap = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(rmap, regl->suspend_enable_reg,
+ regl->suspend_enable_mask, 0);
+}
+
+static int pf8x00_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct pf8x00_regulator_data *regl = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (regl->suspend_voltage_cache == uV)
+ return 0;
+
+ ret = regulator_map_voltage_iterate(rdev, uV, uV);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev), "failed to map %i uV\n", uV);
+ return ret;
}
- phase = val / 45;
- if ((phase * 45) != val) {
- dev_warn(config->dev,
- "invalid phase_shift %d for BUCK%d, use 0 degrees\n",
- (phase * 45), desc->id - PF8X00_LDO4);
- phase = PF8X00_SWXPHASE_SHIFT;
+ dev_dbg(rdev_get_dev(rdev), "uV: %i, reg: 0x%x, msk: 0x%x, val: 0x%x\n",
+ uV, regl->suspend_voltage_reg, regl->desc.vsel_mask, ret);
+ ret = regmap_update_bits(rdev->regmap, regl->suspend_voltage_reg,
+ regl->desc.vsel_mask, ret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev), "failed to set %i uV\n", uV);
+ return ret;
}
- data->phase_shift = (phase >= 1) ? phase - 1 : PF8X00_SWXPHASE_SHIFT;
+ regl->suspend_voltage_cache = uV;
return 0;
}
@@ -270,15 +334,37 @@ static const struct regulator_ops pf8x00_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_suspend_enable = pf8x00_suspend_enable,
+ .set_suspend_disable = pf8x00_suspend_disable,
+ .set_suspend_voltage = pf8x00_set_suspend_voltage,
};
-static const struct regulator_ops pf8x00_buck_ops = {
+
+static const struct regulator_ops pf8x00_buck1_6_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_suspend_enable = pf8x00_suspend_enable,
+ .set_suspend_disable = pf8x00_suspend_disable,
+ .set_suspend_voltage = pf8x00_set_suspend_voltage,
+};
+
+static const struct regulator_ops pf8x00_buck7_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_suspend_enable = pf8x00_suspend_enable,
+ .set_suspend_disable = pf8x00_suspend_disable,
};
static const struct regulator_ops pf8x00_vsnvs_ops = {
@@ -310,6 +396,9 @@ static const struct regulator_ops pf8x00_vsnvs_ops = {
.disable_val = 0x0, \
.enable_mask = 2, \
}, \
+ .suspend_enable_reg = (base) + LDO_CONFIG2, \
+ .suspend_enable_mask = 1, \
+ .suspend_voltage_reg = (base) + LDO_STBY_VOLT, \
}
#define PF8X00BUCK(_id, _name, base, voltages) \
@@ -319,14 +408,54 @@ static const struct regulator_ops pf8x00_vsnvs_ops = {
.of_match = _name, \
.regulators_node = "regulators", \
.of_parse_cb = pf8x00_of_parse_cb, \
- .n_voltages = ARRAY_SIZE(voltages), \
- .ops = &pf8x00_buck_ops, \
+ .n_voltages = PF8XOO_SW1_6_VOLTAGE_NUM, \
+ .ops = &pf8x00_buck1_6_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PF8X00_BUCK ## _id, \
.owner = THIS_MODULE, \
+ .ramp_delay = 19000, \
+ .linear_ranges = pf8x00_sw1_to_6_voltages, \
+ .n_linear_ranges = \
+ ARRAY_SIZE(pf8x00_sw1_to_6_voltages), \
+ .vsel_reg = (base) + SW_RUN_VOLT, \
+ .vsel_mask = 0xff, \
+ .curr_table = pf8x00_sw_current_table, \
+ .n_current_limits = \
+ ARRAY_SIZE(pf8x00_sw_current_table), \
+ .csel_reg = (base) + SW_CONFIG2, \
+ .csel_mask = PF8X00_SWXILIM_MASK, \
+ .enable_reg = (base) + SW_MODE1, \
+ .enable_val = 0x3, \
+ .disable_val = 0x0, \
+ .enable_mask = 0x3, \
+ .enable_time = 500, \
+ }, \
+ .suspend_enable_reg = (base) + SW_MODE1, \
+ .suspend_enable_mask = 0xc, \
+ .suspend_voltage_reg = (base) + SW_STBY_VOLT, \
+ }
+
+#define PF8X00BUCK7(_name, base, voltages) \
+ [PF8X00_BUCK7] = { \
+ .desc = { \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .of_parse_cb = pf8x00_of_parse_cb, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pf8x00_buck7_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PF8X00_BUCK7, \
+ .owner = THIS_MODULE, \
+ .ramp_delay = 19000, \
.volt_table = voltages, \
.vsel_reg = (base) + SW_RUN_VOLT, \
.vsel_mask = 0xff, \
+ .curr_table = pf8x00_sw_current_table, \
+ .n_current_limits = \
+ ARRAY_SIZE(pf8x00_sw_current_table), \
+ .csel_reg = (base) + SW_CONFIG2, \
+ .csel_mask = PF8X00_SWXILIM_MASK, \
.enable_reg = (base) + SW_MODE1, \
.enable_val = 0x3, \
.disable_val = 0x0, \
@@ -335,6 +464,7 @@ static const struct regulator_ops pf8x00_vsnvs_ops = {
}, \
}
+
#define PF8X00VSNVS(_name, base, voltages) \
[PF8X00_VSNVS] = { \
.desc = { \
@@ -352,7 +482,7 @@ static const struct regulator_ops pf8x00_vsnvs_ops = {
}, \
}
-static struct pf8x00_regulator pf8x00_regulators_data[PF8X00_MAX_REGULATORS] = {
+static struct pf8x00_regulator_data pf8x00_regs_data[PF8X00_MAX_REGULATORS] = {
PF8X00LDO(1, "ldo1", PF8X00_LDO_BASE(PF8X00_LDO1), pf8x00_ldo_voltages),
PF8X00LDO(2, "ldo2", PF8X00_LDO_BASE(PF8X00_LDO2), pf8x00_ldo_voltages),
PF8X00LDO(3, "ldo3", PF8X00_LDO_BASE(PF8X00_LDO3), pf8x00_ldo_voltages),
@@ -363,7 +493,7 @@ static struct pf8x00_regulator pf8x00_regulators_data[PF8X00_MAX_REGULATORS] = {
PF8X00BUCK(4, "buck4", PF8X00_SW_BASE(PF8X00_BUCK4), pf8x00_sw1_to_6_voltages),
PF8X00BUCK(5, "buck5", PF8X00_SW_BASE(PF8X00_BUCK5), pf8x00_sw1_to_6_voltages),
PF8X00BUCK(6, "buck6", PF8X00_SW_BASE(PF8X00_BUCK6), pf8x00_sw1_to_6_voltages),
- PF8X00BUCK(7, "buck7", PF8X00_SW_BASE(PF8X00_BUCK7), pf8x00_sw7_voltages),
+ PF8X00BUCK7("buck7", PF8X00_SW_BASE(PF8X00_BUCK7), pf8x00_sw7_voltages),
PF8X00VSNVS("vsnvs", PF8X00_VSNVS_CONFIG1, pf8x00_vsnvs_voltages),
};
@@ -399,7 +529,7 @@ static int pf8x00_identify(struct pf8x00_chip *chip)
name = "PF8121A";
break;
case PF8200:
- name = "PF8100";
+ name = "PF8200";
break;
default:
dev_err(chip->dev, "Unknown pf8x00 device id 0x%x\n", dev_id);
@@ -437,12 +567,12 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
if (ret)
return ret;
- for (id = 0; id < ARRAY_SIZE(pf8x00_regulators_data); id++) {
- struct pf8x00_regulator *data = &pf8x00_regulators_data[id];
+ for (id = 0; id < ARRAY_SIZE(pf8x00_regs_data); id++) {
+ struct pf8x00_regulator_data *data = &pf8x00_regs_data[id];
struct regulator_dev *rdev;
config.dev = chip->dev;
- config.driver_data = chip;
+ config.driver_data = data;
config.regmap = chip->regmap;
rdev = devm_regulator_register(&client->dev, &data->desc, &config);
@@ -451,31 +581,23 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
"failed to register %s regulator\n", data->desc.name);
return PTR_ERR(rdev);
}
-
- if ((id >= PF8X00_BUCK1) && (id <= PF8X00_BUCK7)) {
- u8 reg = PF8X00_SW_BASE(id) + SW_CONFIG2;
-
- regmap_update_bits(chip->regmap, reg,
- PF8X00_SWXPHASE_MASK,
- data->phase_shift);
-
- regmap_update_bits(chip->regmap, reg,
- PF8X00_SWXILIM_MASK,
- data->ilim << PF8X00_SWXILIM_SHIFT);
- }
}
return 0;
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
index 8ccf572394a2..de25e3279b4b 100644
--- a/drivers/regulator/qcom-labibb-regulator.c
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -17,11 +17,48 @@
#define PMI8998_LAB_REG_BASE 0xde00
#define PMI8998_IBB_REG_BASE 0xdc00
+#define PMI8998_IBB_LAB_REG_OFFSET 0x200
#define REG_LABIBB_STATUS1 0x08
+ #define LABIBB_STATUS1_SC_BIT BIT(6)
+ #define LABIBB_STATUS1_VREG_OK_BIT BIT(7)
+
+#define REG_LABIBB_INT_SET_TYPE 0x11
+#define REG_LABIBB_INT_POLARITY_HIGH 0x12
+#define REG_LABIBB_INT_POLARITY_LOW 0x13
+#define REG_LABIBB_INT_LATCHED_CLR 0x14
+#define REG_LABIBB_INT_EN_SET 0x15
+#define REG_LABIBB_INT_EN_CLR 0x16
+ #define LABIBB_INT_VREG_OK BIT(0)
+ #define LABIBB_INT_VREG_TYPE_LEVEL 0
+
+#define REG_LABIBB_VOLTAGE 0x41
+ #define LABIBB_VOLTAGE_OVERRIDE_EN BIT(7)
+ #define LAB_VOLTAGE_SET_MASK GENMASK(3, 0)
+ #define IBB_VOLTAGE_SET_MASK GENMASK(5, 0)
+
#define REG_LABIBB_ENABLE_CTL 0x46
-#define LABIBB_STATUS1_VREG_OK_BIT BIT(7)
-#define LABIBB_CONTROL_ENABLE BIT(7)
+ #define LABIBB_CONTROL_ENABLE BIT(7)
+
+#define REG_LABIBB_PD_CTL 0x47
+ #define LAB_PD_CTL_MASK GENMASK(1, 0)
+ #define IBB_PD_CTL_MASK (BIT(0) | BIT(7))
+ #define LAB_PD_CTL_STRONG_PULL BIT(0)
+ #define IBB_PD_CTL_HALF_STRENGTH BIT(0)
+ #define IBB_PD_CTL_EN BIT(7)
+
+#define REG_LABIBB_CURRENT_LIMIT 0x4b
+ #define LAB_CURRENT_LIMIT_MASK GENMASK(2, 0)
+ #define IBB_CURRENT_LIMIT_MASK GENMASK(4, 0)
+ #define LAB_CURRENT_LIMIT_OVERRIDE_EN BIT(3)
+ #define LABIBB_CURRENT_LIMIT_EN BIT(7)
+
+#define REG_IBB_PWRUP_PWRDN_CTL_1 0x58
+ #define IBB_CTL_1_DISCHARGE_EN BIT(2)
+
+#define REG_LABIBB_SOFT_START_CTL 0x5f
+#define REG_LABIBB_SEC_ACCESS 0xd0
+ #define LABIBB_SEC_UNLOCK_CODE 0xa5
#define LAB_ENABLE_CTL_MASK BIT(7)
#define IBB_ENABLE_CTL_MASK (BIT(7) | BIT(6))
@@ -30,14 +67,35 @@
#define LAB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 2)
#define IBB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 10)
#define LABIBB_POLL_ENABLED_TIME 1000
+#define OCP_RECOVERY_INTERVAL_MS 500
+#define SC_RECOVERY_INTERVAL_MS 250
+#define LABIBB_MAX_OCP_COUNT 4
+#define LABIBB_MAX_SC_COUNT 3
+#define LABIBB_MAX_FATAL_COUNT 2
+
+struct labibb_current_limits {
+ u32 uA_min;
+ u32 uA_step;
+ u8 ovr_val;
+};
struct labibb_regulator {
struct regulator_desc desc;
struct device *dev;
struct regmap *regmap;
struct regulator_dev *rdev;
+ struct labibb_current_limits uA_limits;
+ struct delayed_work ocp_recovery_work;
+ struct delayed_work sc_recovery_work;
u16 base;
u8 type;
+ u8 dischg_sel;
+ u8 soft_start_sel;
+ int sc_irq;
+ int sc_count;
+ int ocp_irq;
+ int ocp_irq_count;
+ int fatal_count;
};
struct labibb_regulator_data {
@@ -47,10 +105,579 @@ struct labibb_regulator_data {
const struct regulator_desc *desc;
};
+static int qcom_labibb_ocp_hw_enable(struct regulator_dev *rdev)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* Clear irq latch status to avoid spurious event */
+ ret = regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_LATCHED_CLR,
+ LABIBB_INT_VREG_OK, 1);
+ if (ret)
+ return ret;
+
+ /* Enable OCP HW interrupt */
+ return regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_EN_SET,
+ LABIBB_INT_VREG_OK, 1);
+}
+
+static int qcom_labibb_ocp_hw_disable(struct regulator_dev *rdev)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_EN_CLR,
+ LABIBB_INT_VREG_OK, 1);
+}
+
+/**
+ * qcom_labibb_check_ocp_status - Check the Over-Current Protection status
+ * @vreg: Main driver structure
+ *
+ * This function checks the STATUS1 register for the VREG_OK bit: if it is
+ * set, then there is no Over-Current event.
+ *
+ * Returns: Zero if there is no over-current, 1 if in over-current or
+ * negative number for error
+ */
+static int qcom_labibb_check_ocp_status(struct labibb_regulator *vreg)
+{
+ u32 cur_status;
+ int ret;
+
+ ret = regmap_read(vreg->rdev->regmap, vreg->base + REG_LABIBB_STATUS1,
+ &cur_status);
+ if (ret)
+ return ret;
+
+ return !(cur_status & LABIBB_STATUS1_VREG_OK_BIT);
+}
+
+/**
+ * qcom_labibb_ocp_recovery_worker - Handle OCP event
+ * @work: OCP work structure
+ *
+ * This is the worker function to handle the Over Current Protection
+ * hardware event; This will check if the hardware is still
+ * signaling an over-current condition and will eventually stop
+ * the regulator if such condition is still signaled after
+ * LABIBB_MAX_OCP_COUNT times.
+ *
+ * If the driver that is consuming the regulator did not take action
+ * for the OCP condition, or the hardware did not stabilize, a cut
+ * of the LAB and IBB regulators will be forced (regulators will be
+ * disabled).
+ *
+ * As last, if the writes to shut down the LAB/IBB regulators fail
+ * for more than LABIBB_MAX_FATAL_COUNT, then a kernel panic will be
+ * triggered, as a last resort to protect the hardware from burning;
+ * this, however, is expected to never happen, but this is kept to
+ * try to further ensure that we protect the hardware at all costs.
+ */
+static void qcom_labibb_ocp_recovery_worker(struct work_struct *work)
+{
+ struct labibb_regulator *vreg;
+ const struct regulator_ops *ops;
+ int ret;
+
+ vreg = container_of(work, struct labibb_regulator,
+ ocp_recovery_work.work);
+ ops = vreg->rdev->desc->ops;
+
+ if (vreg->ocp_irq_count >= LABIBB_MAX_OCP_COUNT) {
+ /*
+ * If we tried to disable the regulator multiple times but
+ * we kept failing, there's only one last hope to save our
+ * hardware from the death: raise a kernel bug, reboot and
+ * hope that the bootloader kindly saves us. This, though
+ * is done only as paranoid checking, because failing the
+ * regmap write to disable the vreg is almost impossible,
+ * since we got here after multiple regmap R/W.
+ */
+ BUG_ON(vreg->fatal_count > LABIBB_MAX_FATAL_COUNT);
+ dev_err(&vreg->rdev->dev, "LABIBB: CRITICAL: Disabling regulator\n");
+
+ /* Disable the regulator immediately to avoid damage */
+ ret = ops->disable(vreg->rdev);
+ if (ret) {
+ vreg->fatal_count++;
+ goto reschedule;
+ }
+ enable_irq(vreg->ocp_irq);
+ vreg->fatal_count = 0;
+ return;
+ }
+
+ ret = qcom_labibb_check_ocp_status(vreg);
+ if (ret != 0) {
+ vreg->ocp_irq_count++;
+ goto reschedule;
+ }
+
+ ret = qcom_labibb_ocp_hw_enable(vreg->rdev);
+ if (ret) {
+ /* We cannot trust it without OCP enabled. */
+ dev_err(vreg->dev, "Cannot enable OCP IRQ\n");
+ vreg->ocp_irq_count++;
+ goto reschedule;
+ }
+
+ enable_irq(vreg->ocp_irq);
+ /* Everything went fine: reset the OCP count! */
+ vreg->ocp_irq_count = 0;
+ return;
+
+reschedule:
+ mod_delayed_work(system_wq, &vreg->ocp_recovery_work,
+ msecs_to_jiffies(OCP_RECOVERY_INTERVAL_MS));
+}
+
+/**
+ * qcom_labibb_ocp_isr - Interrupt routine for OverCurrent Protection
+ * @irq: Interrupt number
+ * @chip: Main driver structure
+ *
+ * Over Current Protection (OCP) will signal to the client driver
+ * that an over-current event has happened and then will schedule
+ * a recovery worker.
+ *
+ * Disabling and eventually re-enabling the regulator is expected
+ * to be done by the driver, as some hardware may be triggering an
+ * over-current condition only at first initialization or it may
+ * be expected only for a very brief amount of time, after which
+ * the attached hardware may be expected to stabilize its current
+ * draw.
+ *
+ * Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
+ */
+static irqreturn_t qcom_labibb_ocp_isr(int irq, void *chip)
+{
+ struct labibb_regulator *vreg = chip;
+ const struct regulator_ops *ops = vreg->rdev->desc->ops;
+ int ret;
+
+ /* If the regulator is not enabled, this is a fake event */
+ if (!ops->is_enabled(vreg->rdev))
+ return 0;
+
+ /* If we tried to recover for too many times it's not getting better */
+ if (vreg->ocp_irq_count > LABIBB_MAX_OCP_COUNT)
+ return IRQ_NONE;
+
+ /*
+ * If we (unlikely) can't read this register, to prevent hardware
+ * damage at all costs, we assume that the overcurrent event was
+ * real; Moreover, if the status register is not signaling OCP,
+ * it was a spurious event, so it's all ok.
+ */
+ ret = qcom_labibb_check_ocp_status(vreg);
+ if (ret == 0) {
+ vreg->ocp_irq_count = 0;
+ goto end;
+ }
+ vreg->ocp_irq_count++;
+
+ /*
+ * Disable the interrupt temporarily, or it will fire continuously;
+ * we will re-enable it in the recovery worker function.
+ */
+ disable_irq_nosync(irq);
+
+ /* Warn the user for overcurrent */
+ dev_warn(vreg->dev, "Over-Current interrupt fired!\n");
+
+ /* Disable the interrupt to avoid hogging */
+ ret = qcom_labibb_ocp_hw_disable(vreg->rdev);
+ if (ret)
+ goto end;
+
+ /* Signal overcurrent event to drivers */
+ regulator_notifier_call_chain(vreg->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
+
+end:
+ /* Schedule the recovery work */
+ schedule_delayed_work(&vreg->ocp_recovery_work,
+ msecs_to_jiffies(OCP_RECOVERY_INTERVAL_MS));
+ if (ret)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_labibb_set_ocp(struct regulator_dev *rdev)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+ char *ocp_irq_name;
+ u32 irq_flags = IRQF_ONESHOT;
+ int irq_trig_low, ret;
+
+ /* If there is no OCP interrupt, there's nothing to set */
+ if (vreg->ocp_irq <= 0)
+ return -EINVAL;
+
+ ocp_irq_name = devm_kasprintf(vreg->dev, GFP_KERNEL, "%s-over-current",
+ vreg->desc.name);
+ if (!ocp_irq_name)
+ return -ENOMEM;
+
+ /* IRQ polarities - LAB: trigger-low, IBB: trigger-high */
+ switch (vreg->type) {
+ case QCOM_LAB_TYPE:
+ irq_flags |= IRQF_TRIGGER_LOW;
+ irq_trig_low = 1;
+ break;
+ case QCOM_IBB_TYPE:
+ irq_flags |= IRQF_TRIGGER_HIGH;
+ irq_trig_low = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Activate OCP HW level interrupt */
+ ret = regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_SET_TYPE,
+ LABIBB_INT_VREG_OK,
+ LABIBB_INT_VREG_TYPE_LEVEL);
+ if (ret)
+ return ret;
+
+ /* Set OCP interrupt polarity */
+ ret = regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_POLARITY_HIGH,
+ LABIBB_INT_VREG_OK, !irq_trig_low);
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(rdev->regmap,
+ vreg->base + REG_LABIBB_INT_POLARITY_LOW,
+ LABIBB_INT_VREG_OK, irq_trig_low);
+ if (ret)
+ return ret;
+
+ ret = qcom_labibb_ocp_hw_enable(rdev);
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(vreg->dev, vreg->ocp_irq, NULL,
+ qcom_labibb_ocp_isr, irq_flags,
+ ocp_irq_name, vreg);
+}
+
+/**
+ * qcom_labibb_check_sc_status - Check the Short Circuit Protection status
+ * @vreg: Main driver structure
+ *
+ * This function checks the STATUS1 register on both LAB and IBB regulators
+ * for the ShortCircuit bit: if it is set on *any* of them, then we have
+ * experienced a short-circuit event.
+ *
+ * Returns: Zero if there is no short-circuit, 1 if in short-circuit or
+ * negative number for error
+ */
+static int qcom_labibb_check_sc_status(struct labibb_regulator *vreg)
+{
+ u32 ibb_status, ibb_reg, lab_status, lab_reg;
+ int ret;
+
+ /* We have to work on both regulators due to PBS... */
+ lab_reg = ibb_reg = vreg->base + REG_LABIBB_STATUS1;
+ if (vreg->type == QCOM_LAB_TYPE)
+ ibb_reg -= PMI8998_IBB_LAB_REG_OFFSET;
+ else
+ lab_reg += PMI8998_IBB_LAB_REG_OFFSET;
+
+ ret = regmap_read(vreg->rdev->regmap, lab_reg, &lab_status);
+ if (ret)
+ return ret;
+ ret = regmap_read(vreg->rdev->regmap, ibb_reg, &ibb_status);
+ if (ret)
+ return ret;
+
+ return !!(lab_status & LABIBB_STATUS1_SC_BIT) ||
+ !!(ibb_status & LABIBB_STATUS1_SC_BIT);
+}
+
+/**
+ * qcom_labibb_sc_recovery_worker - Handle Short Circuit event
+ * @work: SC work structure
+ *
+ * This is the worker function to handle the Short Circuit Protection
+ * hardware event; This will check if the hardware is still
+ * signaling a short-circuit condition and will eventually never
+ * re-enable the regulator if such condition is still signaled after
+ * LABIBB_MAX_SC_COUNT times.
+ *
+ * If the driver that is consuming the regulator did not take action
+ * for the SC condition, or the hardware did not stabilize, this
+ * worker will stop rescheduling, leaving the regulators disabled
+ * as already done by the Portable Batch System (PBS).
+ *
+ * Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
+ */
+static void qcom_labibb_sc_recovery_worker(struct work_struct *work)
+{
+ struct labibb_regulator *vreg;
+ const struct regulator_ops *ops;
+ u32 lab_reg, ibb_reg, lab_val, ibb_val, val;
+ bool pbs_cut = false;
+ int i, sc, ret;
+
+ vreg = container_of(work, struct labibb_regulator,
+ sc_recovery_work.work);
+ ops = vreg->rdev->desc->ops;
+
+ /*
+ * If we tried to check the regulator status multiple times but we
+ * kept failing, then just bail out, as the Portable Batch System
+ * (PBS) will disable the vregs for us, preventing hardware damage.
+ */
+ if (vreg->fatal_count > LABIBB_MAX_FATAL_COUNT)
+ return;
+
+ /* Too many short-circuit events. Throw in the towel. */
+ if (vreg->sc_count > LABIBB_MAX_SC_COUNT)
+ return;
+
+ /*
+ * The Portable Batch System (PBS) automatically disables LAB
+ * and IBB when a short-circuit event is detected, so we have to
+ * check and work on both of them at the same time.
+ */
+ lab_reg = ibb_reg = vreg->base + REG_LABIBB_ENABLE_CTL;
+ if (vreg->type == QCOM_LAB_TYPE)
+ ibb_reg -= PMI8998_IBB_LAB_REG_OFFSET;
+ else
+ lab_reg += PMI8998_IBB_LAB_REG_OFFSET;
+
+ sc = qcom_labibb_check_sc_status(vreg);
+ if (sc)
+ goto reschedule;
+
+ for (i = 0; i < LABIBB_MAX_SC_COUNT; i++) {
+ ret = regmap_read(vreg->regmap, lab_reg, &lab_val);
+ if (ret) {
+ vreg->fatal_count++;
+ goto reschedule;
+ }
+
+ ret = regmap_read(vreg->regmap, ibb_reg, &ibb_val);
+ if (ret) {
+ vreg->fatal_count++;
+ goto reschedule;
+ }
+ val = lab_val & ibb_val;
+
+ if (!(val & LABIBB_CONTROL_ENABLE)) {
+ pbs_cut = true;
+ break;
+ }
+ usleep_range(5000, 6000);
+ }
+ if (pbs_cut)
+ goto reschedule;
+
+
+ /*
+ * If we have reached this point, we either have successfully
+ * recovered from the SC condition or we had a spurious SC IRQ,
+ * which means that we can re-enable the regulators, if they
+ * have ever been disabled by the PBS.
+ */
+ ret = ops->enable(vreg->rdev);
+ if (ret)
+ goto reschedule;
+
+ /* Everything went fine: reset the OCP count! */
+ vreg->sc_count = 0;
+ enable_irq(vreg->sc_irq);
+ return;
+
+reschedule:
+ /*
+ * Now that we have done basic handling of the short-circuit,
+ * reschedule this worker in the regular system workqueue, as
+ * taking action is not truly urgent anymore.
+ */
+ vreg->sc_count++;
+ mod_delayed_work(system_wq, &vreg->sc_recovery_work,
+ msecs_to_jiffies(SC_RECOVERY_INTERVAL_MS));
+}
+
+/**
+ * qcom_labibb_sc_isr - Interrupt routine for Short Circuit Protection
+ * @irq: Interrupt number
+ * @chip: Main driver structure
+ *
+ * Short Circuit Protection (SCP) will signal to the client driver
+ * that a regulation-out event has happened and then will schedule
+ * a recovery worker.
+ *
+ * The LAB and IBB regulators will be automatically disabled by the
+ * Portable Batch System (PBS) and they will be enabled again by
+ * the worker function if the hardware stops signaling the short
+ * circuit event.
+ *
+ * Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
+ */
+static irqreturn_t qcom_labibb_sc_isr(int irq, void *chip)
+{
+ struct labibb_regulator *vreg = chip;
+
+ if (vreg->sc_count > LABIBB_MAX_SC_COUNT)
+ return IRQ_NONE;
+
+ /* Warn the user for short circuit */
+ dev_warn(vreg->dev, "Short-Circuit interrupt fired!\n");
+
+ /*
+ * Disable the interrupt temporarily, or it will fire continuously;
+ * we will re-enable it in the recovery worker function.
+ */
+ disable_irq_nosync(irq);
+
+ /* Signal out of regulation event to drivers */
+ regulator_notifier_call_chain(vreg->rdev,
+ REGULATOR_EVENT_REGULATION_OUT, NULL);
+
+ /* Schedule the short-circuit handling as high-priority work */
+ mod_delayed_work(system_highpri_wq, &vreg->sc_recovery_work,
+ msecs_to_jiffies(SC_RECOVERY_INTERVAL_MS));
+ return IRQ_HANDLED;
+}
+
+
+static int qcom_labibb_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+ struct regulator_desc *desc = &vreg->desc;
+ struct labibb_current_limits *lim = &vreg->uA_limits;
+ u32 mask, val;
+ int i, ret, sel = -1;
+
+ if (min_uA < lim->uA_min || max_uA < lim->uA_min)
+ return -EINVAL;
+
+ for (i = 0; i < desc->n_current_limits; i++) {
+ int uA_limit = (lim->uA_step * i) + lim->uA_min;
+
+ if (max_uA >= uA_limit && min_uA <= uA_limit)
+ sel = i;
+ }
+ if (sel < 0)
+ return -EINVAL;
+
+ /* Current limit setting needs secure access */
+ ret = regmap_write(vreg->regmap, vreg->base + REG_LABIBB_SEC_ACCESS,
+ LABIBB_SEC_UNLOCK_CODE);
+ if (ret)
+ return ret;
+
+ mask = desc->csel_mask | lim->ovr_val;
+ mask |= LABIBB_CURRENT_LIMIT_EN;
+ val = (u32)sel | lim->ovr_val;
+ val |= LABIBB_CURRENT_LIMIT_EN;
+
+ return regmap_update_bits(vreg->regmap, desc->csel_reg, mask, val);
+}
+
+static int qcom_labibb_get_current_limit(struct regulator_dev *rdev)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+ struct regulator_desc *desc = &vreg->desc;
+ struct labibb_current_limits *lim = &vreg->uA_limits;
+ unsigned int cur_step;
+ int ret;
+
+ ret = regmap_read(vreg->regmap, desc->csel_reg, &cur_step);
+ if (ret)
+ return ret;
+ cur_step &= desc->csel_mask;
+
+ return (cur_step * lim->uA_step) + lim->uA_min;
+}
+
+static int qcom_labibb_set_soft_start(struct regulator_dev *rdev)
+{
+ struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
+ u32 val = 0;
+
+ if (vreg->type == QCOM_IBB_TYPE)
+ val = vreg->dischg_sel;
+ else
+ val = vreg->soft_start_sel;
+
+ return regmap_write(rdev->regmap, rdev->desc->soft_start_reg, val);
+}
+
+static int qcom_labibb_get_table_sel(const int *table, int sz, u32 value)
+{
+ int i;
+
+ for (i = 0; i < sz; i++)
+ if (table[i] == value)
+ return i;
+ return -EINVAL;
+}
+
+/* IBB discharge resistor values in KOhms */
+static const int dischg_resistor_values[] = { 300, 64, 32, 16 };
+
+/* Soft start time in microseconds */
+static const int soft_start_values[] = { 200, 400, 600, 800 };
+
+static int qcom_labibb_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct labibb_regulator *vreg = config->driver_data;
+ u32 dischg_kohms, soft_start_time;
+ int ret;
+
+ ret = of_property_read_u32(np, "qcom,discharge-resistor-kohms",
+ &dischg_kohms);
+ if (ret)
+ dischg_kohms = 300;
+
+ ret = qcom_labibb_get_table_sel(dischg_resistor_values,
+ ARRAY_SIZE(dischg_resistor_values),
+ dischg_kohms);
+ if (ret < 0)
+ return ret;
+ vreg->dischg_sel = (u8)ret;
+
+ ret = of_property_read_u32(np, "qcom,soft-start-us",
+ &soft_start_time);
+ if (ret)
+ soft_start_time = 200;
+
+ ret = qcom_labibb_get_table_sel(soft_start_values,
+ ARRAY_SIZE(soft_start_values),
+ soft_start_time);
+ if (ret < 0)
+ return ret;
+ vreg->soft_start_sel = (u8)ret;
+
+ return 0;
+}
+
static const struct regulator_ops qcom_labibb_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+ .set_current_limit = qcom_labibb_set_current_limit,
+ .get_current_limit = qcom_labibb_get_current_limit,
+ .set_soft_start = qcom_labibb_set_soft_start,
+ .set_over_current_protection = qcom_labibb_set_ocp,
};
static const struct regulator_desc pmi8998_lab_desc = {
@@ -59,10 +686,25 @@ static const struct regulator_desc pmi8998_lab_desc = {
.enable_val = LABIBB_CONTROL_ENABLE,
.enable_time = LAB_ENABLE_TIME,
.poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
+ .soft_start_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_SOFT_START_CTL),
+ .pull_down_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_PD_CTL),
+ .pull_down_mask = LAB_PD_CTL_MASK,
+ .pull_down_val_on = LAB_PD_CTL_STRONG_PULL,
+ .vsel_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_VOLTAGE),
+ .vsel_mask = LAB_VOLTAGE_SET_MASK,
+ .apply_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_VOLTAGE),
+ .apply_bit = LABIBB_VOLTAGE_OVERRIDE_EN,
+ .csel_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_CURRENT_LIMIT),
+ .csel_mask = LAB_CURRENT_LIMIT_MASK,
+ .n_current_limits = 8,
.off_on_delay = LABIBB_OFF_ON_DELAY,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 4600000,
+ .uV_step = 100000,
+ .n_voltages = 16,
.ops = &qcom_labibb_ops,
+ .of_parse_cb = qcom_labibb_of_parse_cb,
};
static const struct regulator_desc pmi8998_ibb_desc = {
@@ -71,10 +713,29 @@ static const struct regulator_desc pmi8998_ibb_desc = {
.enable_val = LABIBB_CONTROL_ENABLE,
.enable_time = IBB_ENABLE_TIME,
.poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
+ .soft_start_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_SOFT_START_CTL),
+ .active_discharge_off = 0,
+ .active_discharge_on = IBB_CTL_1_DISCHARGE_EN,
+ .active_discharge_mask = IBB_CTL_1_DISCHARGE_EN,
+ .active_discharge_reg = (PMI8998_IBB_REG_BASE + REG_IBB_PWRUP_PWRDN_CTL_1),
+ .pull_down_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_PD_CTL),
+ .pull_down_mask = IBB_PD_CTL_MASK,
+ .pull_down_val_on = IBB_PD_CTL_HALF_STRENGTH | IBB_PD_CTL_EN,
+ .vsel_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_VOLTAGE),
+ .vsel_mask = IBB_VOLTAGE_SET_MASK,
+ .apply_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_VOLTAGE),
+ .apply_bit = LABIBB_VOLTAGE_OVERRIDE_EN,
+ .csel_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_CURRENT_LIMIT),
+ .csel_mask = IBB_CURRENT_LIMIT_MASK,
+ .n_current_limits = 32,
.off_on_delay = LABIBB_OFF_ON_DELAY,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 1400000,
+ .uV_step = 100000,
+ .n_voltages = 64,
.ops = &qcom_labibb_ops,
+ .of_parse_cb = qcom_labibb_of_parse_cb,
};
static const struct labibb_regulator_data pmi8998_labibb_data[] = {
@@ -94,7 +755,7 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
struct labibb_regulator *vreg;
struct device *dev = &pdev->dev;
struct regulator_config cfg = {};
-
+ struct device_node *reg_node;
const struct of_device_id *match;
const struct labibb_regulator_data *reg_data;
struct regmap *reg_regmap;
@@ -112,6 +773,8 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
return -ENODEV;
for (reg_data = match->data; reg_data->name; reg_data++) {
+ char *sc_irq_name;
+ int irq = 0;
/* Validate if the type of regulator is indeed
* what's mentioned in DT.
@@ -134,10 +797,61 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
if (!vreg)
return -ENOMEM;
+ sc_irq_name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-short-circuit",
+ reg_data->name);
+ if (!sc_irq_name)
+ return -ENOMEM;
+
+ reg_node = of_get_child_by_name(pdev->dev.of_node,
+ reg_data->name);
+ if (!reg_node)
+ return -EINVAL;
+
+ /* The Short Circuit interrupt is critical */
+ irq = of_irq_get_byname(reg_node, "sc-err");
+ if (irq <= 0) {
+ if (irq == 0)
+ irq = -EINVAL;
+
+ return dev_err_probe(vreg->dev, irq,
+ "Short-circuit irq not found.\n");
+ }
+ vreg->sc_irq = irq;
+
+ /* OverCurrent Protection IRQ is optional */
+ irq = of_irq_get_byname(reg_node, "ocp");
+ vreg->ocp_irq = irq;
+ vreg->ocp_irq_count = 0;
+ of_node_put(reg_node);
+
vreg->regmap = reg_regmap;
vreg->dev = dev;
vreg->base = reg_data->base;
vreg->type = reg_data->type;
+ INIT_DELAYED_WORK(&vreg->sc_recovery_work,
+ qcom_labibb_sc_recovery_worker);
+
+ if (vreg->ocp_irq > 0)
+ INIT_DELAYED_WORK(&vreg->ocp_recovery_work,
+ qcom_labibb_ocp_recovery_worker);
+
+ switch (vreg->type) {
+ case QCOM_LAB_TYPE:
+ /* LAB Limits: 200-1600mA */
+ vreg->uA_limits.uA_min = 200000;
+ vreg->uA_limits.uA_step = 200000;
+ vreg->uA_limits.ovr_val = LAB_CURRENT_LIMIT_OVERRIDE_EN;
+ break;
+ case QCOM_IBB_TYPE:
+ /* IBB Limits: 0-1550mA */
+ vreg->uA_limits.uA_min = 0;
+ vreg->uA_limits.uA_step = 50000;
+ vreg->uA_limits.ovr_val = 0; /* No override bit */
+ break;
+ default:
+ return -EINVAL;
+ }
memcpy(&vreg->desc, reg_data->desc, sizeof(vreg->desc));
vreg->desc.of_match = reg_data->name;
@@ -155,6 +869,14 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
reg_data->name, ret);
return PTR_ERR(vreg->rdev);
}
+
+ ret = devm_request_threaded_irq(vreg->dev, vreg->sc_irq, NULL,
+ qcom_labibb_sc_isr,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING,
+ sc_irq_name, vreg);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..79a554f1029d 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,16 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
+ .n_voltages = 5,
+ .pmic_mode_map = pmic_mode_map_pmic5_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -928,6 +937,19 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps515_1, "vdd-s2"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
{},
};
@@ -1058,6 +1080,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8009_vreg_data,
},
{
+ .compatible = "qcom,pm8009-1-rpmh-regulators",
+ .data = pm8009_1_vreg_data,
+ },
+ {
.compatible = "qcom,pm8150-rpmh-regulators",
.data = pm8150_vreg_data,
},
@@ -1090,6 +1116,14 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm6150l_vreg_data,
},
{
+ .compatible = "qcom,pmc8180-rpmh-regulators",
+ .data = pm8150_vreg_data,
+ },
+ {
+ .compatible = "qcom,pmc8180c-rpmh-regulators",
+ .data = pm8150l_vreg_data,
+ },
+ {
.compatible = "qcom,pmx55-rpmh-regulators",
.data = pmx55_vreg_data,
},
diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
index 399002383b28..5c558b153d55 100644
--- a/drivers/regulator/rohm-regulator.c
+++ b/drivers/regulator/rohm-regulator.c
@@ -52,9 +52,12 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
char *prop;
unsigned int reg, mask, omask, oreg = desc->enable_reg;
- for (i = 0; i < ROHM_DVS_LEVEL_MAX && !ret; i++) {
- if (dvs->level_map & (1 << i)) {
- switch (i + 1) {
+ for (i = 0; i < ROHM_DVS_LEVEL_VALID_AMOUNT && !ret; i++) {
+ int bit;
+
+ bit = BIT(i);
+ if (dvs->level_map & bit) {
+ switch (bit) {
case ROHM_DVS_LEVEL_RUN:
prop = "rohm,dvs-run-voltage";
reg = dvs->run_reg;
diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c
new file mode 100644
index 000000000000..3d4695ded629
--- /dev/null
+++ b/drivers/regulator/rt4831-regulator.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+
+enum {
+ DSV_OUT_VLCM = 0,
+ DSV_OUT_VPOS,
+ DSV_OUT_VNEG,
+ DSV_OUT_MAX
+};
+
+#define RT4831_REG_DSVEN 0x09
+#define RT4831_REG_VLCM 0x0c
+#define RT4831_REG_VPOS 0x0d
+#define RT4831_REG_VNEG 0x0e
+#define RT4831_REG_FLAGS 0x0f
+
+#define RT4831_VOLT_MASK GENMASK(5, 0)
+#define RT4831_DSVMODE_SHIFT 5
+#define RT4831_DSVMODE_MASK GENMASK(7, 5)
+#define RT4831_POSADEN_MASK BIT(4)
+#define RT4831_NEGADEN_MASK BIT(3)
+#define RT4831_POSEN_MASK BIT(2)
+#define RT4831_NEGEN_MASK BIT(1)
+
+#define RT4831_OTP_MASK BIT(6)
+#define RT4831_LCMOVP_MASK BIT(5)
+#define RT4831_VPOSSCP_MASK BIT(3)
+#define RT4831_VNEGSCP_MASK BIT(2)
+
+#define DSV_MODE_NORMAL (0x4 << RT4831_DSVMODE_SHIFT)
+#define DSV_MODE_BYPASS (0x6 << RT4831_DSVMODE_SHIFT)
+#define STEP_UV 50000
+#define VLCM_MIN_UV 4000000
+#define VLCM_MAX_UV 7150000
+#define VLCM_N_VOLTAGES ((VLCM_MAX_UV - VLCM_MIN_UV) / STEP_UV + 1)
+#define VPN_MIN_UV 4000000
+#define VPN_MAX_UV 6500000
+#define VPN_N_VOLTAGES ((VPN_MAX_UV - VPN_MIN_UV) / STEP_UV + 1)
+
+static int rt4831_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int val, events = 0;
+ int ret;
+
+ ret = regmap_read(regmap, RT4831_REG_FLAGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & RT4831_OTP_MASK)
+ events |= REGULATOR_ERROR_OVER_TEMP;
+
+ if (rid == DSV_OUT_VLCM && (val & RT4831_LCMOVP_MASK))
+ events |= REGULATOR_ERROR_OVER_CURRENT;
+
+ if (rid == DSV_OUT_VPOS && (val & RT4831_VPOSSCP_MASK))
+ events |= REGULATOR_ERROR_OVER_CURRENT;
+
+ if (rid == DSV_OUT_VNEG && (val & RT4831_VNEGSCP_MASK))
+ events |= REGULATOR_ERROR_OVER_CURRENT;
+
+ *flags = events;
+ return 0;
+}
+
+static const struct regulator_ops rt4831_dsvlcm_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
+ .get_error_flags = rt4831_get_error_flags,
+};
+
+static const struct regulator_ops rt4831_dsvpn_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt4831_get_error_flags,
+};
+
+static const struct regulator_desc rt4831_regulator_descs[] = {
+ {
+ .name = "DSVLCM",
+ .ops = &rt4831_dsvlcm_ops,
+ .of_match = of_match_ptr("DSVLCM"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .id = DSV_OUT_VLCM,
+ .n_voltages = VLCM_N_VOLTAGES,
+ .min_uV = VLCM_MIN_UV,
+ .uV_step = STEP_UV,
+ .vsel_reg = RT4831_REG_VLCM,
+ .vsel_mask = RT4831_VOLT_MASK,
+ .bypass_reg = RT4831_REG_DSVEN,
+ .bypass_val_on = DSV_MODE_BYPASS,
+ .bypass_val_off = DSV_MODE_NORMAL,
+ },
+ {
+ .name = "DSVP",
+ .ops = &rt4831_dsvpn_ops,
+ .of_match = of_match_ptr("DSVP"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .id = DSV_OUT_VPOS,
+ .n_voltages = VPN_N_VOLTAGES,
+ .min_uV = VPN_MIN_UV,
+ .uV_step = STEP_UV,
+ .vsel_reg = RT4831_REG_VPOS,
+ .vsel_mask = RT4831_VOLT_MASK,
+ .enable_reg = RT4831_REG_DSVEN,
+ .enable_mask = RT4831_POSEN_MASK,
+ .active_discharge_reg = RT4831_REG_DSVEN,
+ .active_discharge_mask = RT4831_POSADEN_MASK,
+ },
+ {
+ .name = "DSVN",
+ .ops = &rt4831_dsvpn_ops,
+ .of_match = of_match_ptr("DSVN"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .id = DSV_OUT_VNEG,
+ .n_voltages = VPN_N_VOLTAGES,
+ .min_uV = VPN_MIN_UV,
+ .uV_step = STEP_UV,
+ .vsel_reg = RT4831_REG_VNEG,
+ .vsel_mask = RT4831_VOLT_MASK,
+ .enable_reg = RT4831_REG_DSVEN,
+ .enable_mask = RT4831_NEGEN_MASK,
+ .active_discharge_reg = RT4831_REG_DSVEN,
+ .active_discharge_mask = RT4831_NEGADEN_MASK,
+ }
+};
+
+static int rt4831_regulator_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct regulator_dev *rdev;
+ struct regulator_config config = {};
+ int i, ret;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to init regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Configure DSV mode to normal by default */
+ ret = regmap_update_bits(regmap, RT4831_REG_DSVEN, RT4831_DSVMODE_MASK, DSV_MODE_NORMAL);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to configure dsv mode to normal\n");
+ return ret;
+ }
+
+ config.dev = pdev->dev.parent;
+ config.regmap = regmap;
+
+ for (i = 0; i < DSV_OUT_MAX; i++) {
+ rdev = devm_regulator_register(&pdev->dev, rt4831_regulator_descs + i, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register %d regulator\n", i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id rt4831_regulator_match[] = {
+ { "rt4831-regulator", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, rt4831_regulator_match);
+
+static struct platform_driver rt4831_regulator_driver = {
+ .driver = {
+ .name = "rt4831-regulator",
+ },
+ .id_table = rt4831_regulator_match,
+ .probe = rt4831_regulator_probe,
+};
+module_platform_driver(rt4831_regulator_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 3fa472127e9a..7c111bbdc2af 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rdata),
GFP_KERNEL);
- if (!rdata)
+ if (!rdata) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
rmode = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rmode),
GFP_KERNEL);
- if (!rmode)
+ if (!rmode) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
pdata->regulators = rdata;
pdata->opmode = rmode;
@@ -573,10 +577,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
"s5m8767,pmic-ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
- if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
+ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
rdata->ext_control_gpiod = NULL;
- else if (IS_ERR(rdata->ext_control_gpiod))
+ } else if (IS_ERR(rdata->ext_control_gpiod)) {
+ of_node_put(reg_np);
+ of_node_put(regulators_np);
return PTR_ERR(rdata->ext_control_gpiod);
+ }
rdata->id = i;
rdata->initdata = of_get_regulator_init_data(
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 9e7efe542f69..15d1574d129b 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -155,6 +155,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -162,7 +163,9 @@ config QCOM_Q6V5_ADSP
select QCOM_RPROC_COMMON
help
Say y here to support the Peripheral Image Loader
- for the Qualcomm Technology Inc. ADSP remote processors.
+ for the non-TrustZone part of Qualcomm Technology Inc. ADSP and CDSP
+ remote processors. The TrustZone part is handled by QCOM_Q6V5_PAS
+ driver.
config QCOM_Q6V5_MSS
tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support"
@@ -171,6 +174,7 @@ config QCOM_Q6V5_MSS
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -179,7 +183,8 @@ config QCOM_Q6V5_MSS
select QCOM_SCM
help
Say y here to support the Qualcomm self-authenticating modem
- subsystem based on Hexagon V5.
+ subsystem based on Hexagon V5. The TrustZone based system is
+ handled by QCOM_Q6V5_PAS driver.
config QCOM_Q6V5_PAS
tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
@@ -188,6 +193,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -197,7 +203,9 @@ config QCOM_Q6V5_PAS
help
Say y here to support the TrustZone based Peripheral Image Loader
for the Qualcomm Hexagon v5 based remote processors. This is commonly
- used to control subsystems such as ADSP, Compute and Sensor.
+ used to control subsystems such as ADSP (Audio DSP),
+ CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and
+ SLPI (Sensor Low Power Island).
config QCOM_Q6V5_WCSS
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
@@ -206,6 +214,7 @@ config QCOM_Q6V5_WCSS
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -214,7 +223,8 @@ config QCOM_Q6V5_WCSS
select QCOM_SCM
help
Say y here to support the Qualcomm Peripheral Image Loader for the
- Hexagon V5 based WCSS remote processors.
+ Hexagon V5 based WCSS remote processors on e.g. IPQ8074. This is
+ a non-TrustZone wireless subsystem.
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
@@ -238,13 +248,16 @@ config QCOM_WCNSS_PIL
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SMEM
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the Peripheral Image Loader for the Qualcomm
- Wireless Connectivity Subsystem.
+ Say y here to support the Peripheral Image Loader for loading WCNSS
+ firmware and boot the core on e.g. MSM8974, MSM8916. The firmware is
+ verified and booted with the help of the Peripheral Authentication
+ System (PAS) in TrustZone.
config ST_REMOTEPROC
tristate "ST remoteproc support"
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 26e19e6143b7..e2618c36eaab 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -27,6 +27,11 @@
#define AUX_CTRL_NMI BIT(1)
#define AUX_CTRL_SW_RESET BIT(0)
+static bool auto_boot;
+module_param(auto_boot, bool, 0400);
+MODULE_PARM_DESC(auto_boot,
+ "Auto-boot the remote processor [default=false]");
+
struct vpu_mem_map {
const char *name;
unsigned int da;
@@ -172,6 +177,8 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
if (!rproc)
return -ENOMEM;
+ rproc->auto_boot = auto_boot;
+
vpu = rproc->priv;
vpu->dev = &pdev->dev;
platform_set_drvdata(pdev, vpu);
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 988edb4977c3..61901f5efa05 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -47,6 +47,8 @@
#define MT8192_CORE0_SW_RSTN_CLR 0x10000
#define MT8192_CORE0_SW_RSTN_SET 0x10004
+#define MT8192_CORE0_MEM_ATT_PREDEF 0x10008
+#define MT8192_CORE0_WDT_IRQ 0x10030
#define MT8192_CORE0_WDT_CFG 0x10034
#define SCP_FW_VER_LEN 32
@@ -75,6 +77,7 @@ struct mtk_scp_of_data {
void (*scp_reset_assert)(struct mtk_scp *scp);
void (*scp_reset_deassert)(struct mtk_scp *scp);
void (*scp_stop)(struct mtk_scp *scp);
+ void *(*scp_da_to_va)(struct mtk_scp *scp, u64 da, size_t len);
u32 host_to_scp_reg;
u32 host_to_scp_int_bit;
@@ -89,6 +92,10 @@ struct mtk_scp {
void __iomem *reg_base;
void __iomem *sram_base;
size_t sram_size;
+ phys_addr_t sram_phys;
+ void __iomem *l1tcm_base;
+ size_t l1tcm_size;
+ phys_addr_t l1tcm_phys;
const struct mtk_scp_of_data *data;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index e0c235690361..ce727598c41c 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -197,17 +197,19 @@ static void mt8192_scp_irq_handler(struct mtk_scp *scp)
scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
- if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
scp_ipi_handler(scp);
- else
- scp_wdt_handler(scp, scp_to_host);
- /*
- * SCP won't send another interrupt until we clear
- * MT8192_SCP2APMCU_IPC.
- */
- writel(MT8192_SCP_IPC_INT_BIT,
- scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+ /*
+ * SCP won't send another interrupt until we clear
+ * MT8192_SCP2APMCU_IPC.
+ */
+ writel(MT8192_SCP_IPC_INT_BIT,
+ scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+ } else {
+ scp_wdt_handler(scp, scp_to_host);
+ writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
+ }
}
static irqreturn_t scp_irq_handler(int irq, void *priv)
@@ -369,6 +371,9 @@ static int mt8192_scp_before_load(struct mtk_scp *scp)
mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
return 0;
}
@@ -458,9 +463,8 @@ stop:
return ret;
}
-static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
{
- struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
int offset;
if (da < scp->sram_size) {
@@ -476,6 +480,42 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
return NULL;
}
+static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
+{
+ int offset;
+
+ if (da >= scp->sram_phys &&
+ (da + len) <= scp->sram_phys + scp->sram_size) {
+ offset = da - scp->sram_phys;
+ return (void __force *)scp->sram_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->l1tcm_size &&
+ da >= scp->l1tcm_phys &&
+ (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
+ offset = da - scp->l1tcm_phys;
+ return (void __force *)scp->l1tcm_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->dram_size &&
+ da >= scp->dma_addr &&
+ (da + len) <= scp->dma_addr + scp->dram_size) {
+ offset = da - scp->dma_addr;
+ return scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+
+ return scp->data->scp_da_to_va(scp, da, len);
+}
+
static void mt8183_scp_stop(struct mtk_scp *scp)
{
/* Disable SCP watchdog */
@@ -714,13 +754,27 @@ static int scp_probe(struct platform_device *pdev)
goto free_rproc;
}
scp->sram_size = resource_size(res);
+ scp->sram_phys = res->start;
+
+ /* l1tcm is an optional memory region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
+ scp->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)scp->l1tcm_base)) {
+ ret = PTR_ERR((__force void *)scp->l1tcm_base);
+ if (ret != -EINVAL) {
+ dev_err(dev, "Failed to map l1tcm memory\n");
+ goto free_rproc;
+ }
+ } else {
+ scp->l1tcm_size = resource_size(res);
+ scp->l1tcm_phys = res->start;
+ }
mutex_init(&scp->send_lock);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_init(&scp->ipi_desc[i].lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- scp->reg_base = devm_ioremap_resource(dev, res);
+ scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
if (IS_ERR((__force void *)scp->reg_base)) {
dev_err(dev, "Failed to parse and map cfg memory\n");
ret = PTR_ERR((__force void *)scp->reg_base);
@@ -803,6 +857,7 @@ static const struct mtk_scp_of_data mt8183_of_data = {
.scp_reset_assert = mt8183_scp_reset_assert,
.scp_reset_deassert = mt8183_scp_reset_deassert,
.scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
.ipi_buf_offset = 0x7bdb0,
@@ -814,6 +869,7 @@ static const struct mtk_scp_of_data mt8192_of_data = {
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
.scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index ee586226e438..e635454d6170 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -565,6 +565,26 @@ static const struct adsp_data sm8250_adsp_resource = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sm8350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data msm8998_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -629,6 +649,25 @@ static const struct adsp_data sm8250_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sm8350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -701,6 +740,26 @@ static const struct adsp_data sm8250_slpi_resource = {
.ssctl_id = 0x16,
};
+static const struct adsp_data sm8350_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
static const struct adsp_data msm8998_slpi_resource = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
@@ -745,6 +804,10 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
{ .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
+ { .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index f95854255c70..2a6a23cb14ca 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -570,7 +570,7 @@ static int wcnss_probe(struct platform_device *pdev)
if (IS_ERR(mmio)) {
ret = PTR_ERR(mmio);
goto free_rproc;
- };
+ }
ret = wcnss_alloc_memory_region(wcnss);
if (ret)
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index 0e0ae1e764ea..169acd305ae3 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -160,6 +160,7 @@ static int qcom_iris_remove(struct platform_device *pdev)
static const struct of_device_id iris_of_match[] = {
{ .compatible = "qcom,wcn3620", .data = &wcn3620_data },
{ .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
{ .compatible = "qcom,wcn3680", .data = &wcn3680_data },
{}
};
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 2394eef383e3..ab150765d124 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1988,7 +1988,7 @@ int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
goto out;
}
- kfree(rproc->firmware);
+ kfree_const(rproc->firmware);
rproc->firmware = p;
out:
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index a180aeae9675..ccb3c14a0023 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -370,8 +370,13 @@ static int stm32_rproc_request_mbox(struct rproc *rproc)
ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
if (IS_ERR(ddata->mb[i].chan)) {
- if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER)
+ if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER) {
+ dev_err_probe(dev->parent,
+ PTR_ERR(ddata->mb[i].chan),
+ "failed to request mailbox %s\n",
+ name);
goto err_probe;
+ }
dev_warn(dev, "cannot get %s mbox\n", name);
ddata->mb[i].chan = NULL;
}
@@ -592,15 +597,14 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, irq, "failed to get interrupt\n");
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
dev_name(dev), pdev);
- if (err) {
- dev_err(dev, "failed to request wdg irq\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request wdg irq\n");
ddata->wdg_irq = irq;
@@ -613,10 +617,9 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
}
ddata->rst = devm_reset_control_get_by_index(dev, 0);
- if (IS_ERR(ddata->rst)) {
- dev_err(dev, "failed to get mcu reset\n");
- return PTR_ERR(ddata->rst);
- }
+ if (IS_ERR(ddata->rst))
+ return dev_err_probe(dev, PTR_ERR(ddata->rst),
+ "failed to get mcu_reset\n");
/*
* if platform is secured the hold boot bit must be written by
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 71ab75a46491..4171c6f76385 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -89,6 +89,16 @@ config RESET_INTEL_GW
Say Y to control the reset signals provided by reset controller.
Otherwise, say N.
+config RESET_K210
+ bool "Reset controller driver for Canaan Kendryte K210 SoC"
+ depends on (SOC_CANAAN || COMPILE_TEST) && OF
+ select MFD_SYSCON
+ default SOC_CANAAN
+ help
+ Support for the Canaan Kendryte K210 RISC-V SoC reset controller.
+ Say Y if you want to control reset signals provided by this
+ controller.
+
config RESET_LANTIQ
bool "Lantiq XWAY Reset Driver" if COMPILE_TEST
default SOC_TYPE_XWAY
@@ -173,7 +183,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_AGILEX || ARCH_ASPEED || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARC
+ default ARCH_AGILEX || ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARC
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 1054123fd187..65a118a91b27 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
+obj-$(CONFIG_RESET_K210) += reset-k210.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 34e89aa0fb5e..dbf881b586d9 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -875,8 +875,8 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
EXPORT_SYMBOL_GPL(__devm_reset_control_get);
/**
- * device_reset - find reset controller associated with the device
- * and perform reset
+ * __device_reset - find reset controller associated with the device
+ * and perform reset
* @dev: device to be reset by the controller
* @optional: whether it is optional to reset the device
*
diff --git a/drivers/reset/hisilicon/reset-hi3660.c b/drivers/reset/hisilicon/reset-hi3660.c
index a7d4445924e5..965f5ceba7d8 100644
--- a/drivers/reset/hisilicon/reset-hi3660.c
+++ b/drivers/reset/hisilicon/reset-hi3660.c
@@ -83,9 +83,14 @@ static int hi3660_reset_probe(struct platform_device *pdev)
if (!rc)
return -ENOMEM;
- rc->map = syscon_regmap_lookup_by_phandle(np, "hisi,rst-syscon");
+ rc->map = syscon_regmap_lookup_by_phandle(np, "hisilicon,rst-syscon");
+ if (rc->map == ERR_PTR(-ENODEV)) {
+ /* fall back to the deprecated compatible */
+ rc->map = syscon_regmap_lookup_by_phandle(np,
+ "hisi,rst-syscon");
+ }
if (IS_ERR(rc->map)) {
- dev_err(dev, "failed to get hi3660,rst-syscon\n");
+ dev_err(dev, "failed to get hisilicon,rst-syscon\n");
return PTR_ERR(rc->map);
}
diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c
new file mode 100644
index 000000000000..1b6e03522b40
--- /dev/null
+++ b/drivers/reset/reset-k210.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <soc/canaan/k210-sysctl.h>
+
+#include <dt-bindings/reset/k210-rst.h>
+
+#define K210_RST_MASK 0x27FFFFFF
+
+struct k210_rst {
+ struct regmap *map;
+ struct reset_controller_dev rcdev;
+};
+
+static inline struct k210_rst *
+to_k210_rst(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct k210_rst, rcdev);
+}
+
+static inline int k210_rst_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct k210_rst *ksr = to_k210_rst(rcdev);
+
+ return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 1);
+}
+
+static inline int k210_rst_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct k210_rst *ksr = to_k210_rst(rcdev);
+
+ return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 0);
+}
+
+static int k210_rst_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = k210_rst_assert(rcdev, id);
+ if (ret == 0) {
+ udelay(10);
+ ret = k210_rst_deassert(rcdev, id);
+ }
+
+ return ret;
+}
+
+static int k210_rst_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct k210_rst *ksr = to_k210_rst(rcdev);
+ u32 reg, bit = BIT(id);
+ int ret;
+
+ ret = regmap_read(ksr->map, K210_SYSCTL_PERI_RESET, &reg);
+ if (ret)
+ return ret;
+
+ return reg & bit;
+}
+
+static int k210_rst_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned long id = reset_spec->args[0];
+
+ if (!(BIT(id) & K210_RST_MASK))
+ return -EINVAL;
+
+ return id;
+}
+
+static const struct reset_control_ops k210_rst_ops = {
+ .assert = k210_rst_assert,
+ .deassert = k210_rst_deassert,
+ .reset = k210_rst_reset,
+ .status = k210_rst_status,
+};
+
+static int k210_rst_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent_np = of_get_parent(dev->of_node);
+ struct k210_rst *ksr;
+
+ dev_info(dev, "K210 reset controller\n");
+
+ ksr = devm_kzalloc(dev, sizeof(*ksr), GFP_KERNEL);
+ if (!ksr)
+ return -ENOMEM;
+
+ ksr->map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+ if (IS_ERR(ksr->map))
+ return PTR_ERR(ksr->map);
+
+ ksr->rcdev.owner = THIS_MODULE;
+ ksr->rcdev.dev = dev;
+ ksr->rcdev.of_node = dev->of_node;
+ ksr->rcdev.ops = &k210_rst_ops;
+ ksr->rcdev.nr_resets = fls(K210_RST_MASK);
+ ksr->rcdev.of_reset_n_cells = 1;
+ ksr->rcdev.of_xlate = k210_rst_xlate;
+
+ return devm_reset_controller_register(dev, &ksr->rcdev);
+}
+
+static const struct of_device_id k210_rst_dt_ids[] = {
+ { .compatible = "canaan,k210-rst" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k210_rst_driver = {
+ .probe = k210_rst_probe,
+ .driver = {
+ .name = "k210-rst",
+ .of_match_table = k210_rst_dt_ids,
+ },
+};
+builtin_platform_driver(k210_rst_driver);
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index e066614818a3..4dda0daf2c6f 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -146,6 +146,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "aspeed,ast2500-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
+ { .compatible = "brcm,bcm4908-misc-pcie-reset",
+ .data = &reset_simple_active_low },
{ .compatible = "snps,dw-high-reset" },
{ .compatible = "snps,dw-low-reset",
.data = &reset_simple_active_low },
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
index dcd1ce616974..dea929c6045d 100644
--- a/drivers/rpmsg/qcom_glink_ssr.c
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -8,15 +8,16 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rpmsg.h>
+#include <linux/rpmsg/qcom_glink.h>
#include <linux/remoteproc/qcom_rproc.h>
/**
* struct do_cleanup_msg - The data structure for an SSR do_cleanup message
- * version: The G-Link SSR protocol version
- * command: The G-Link SSR command - do_cleanup
- * seq_num: Sequence number
- * name_len: Length of the name of the subsystem being restarted
- * name: G-Link edge name of the subsystem being restarted
+ * @version: The G-Link SSR protocol version
+ * @command: The G-Link SSR command - do_cleanup
+ * @seq_num: Sequence number
+ * @name_len: Length of the name of the subsystem being restarted
+ * @name: G-Link edge name of the subsystem being restarted
*/
struct do_cleanup_msg {
__le32 version;
@@ -28,9 +29,9 @@ struct do_cleanup_msg {
/**
* struct cleanup_done_msg - The data structure for an SSR cleanup_done message
- * version: The G-Link SSR protocol version
- * response: The G-Link SSR response to a do_cleanup command, cleanup_done
- * seq_num: Sequence number
+ * @version: The G-Link SSR protocol version
+ * @response: The G-Link SSR response to a do_cleanup command, cleanup_done
+ * @seq_num: Sequence number
*/
struct cleanup_done_msg {
__le32 version;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6123f9f4fbc9..ce723dc54aa4 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -692,6 +692,7 @@ config RTC_DRV_S5M
tristate "Samsung S2M/S5M series"
depends on MFD_SEC_CORE || COMPILE_TEST
select REGMAP_IRQ
+ select REGMAP_I2C
help
If you say yes here you will get support for the
RTC of Samsung S2MPS14 and S5M PMIC series.
@@ -973,18 +974,6 @@ config RTC_DRV_ALPHA
Direct support for the real-time clock found on every Alpha
system, specifically MC146818 compatibles. If in doubt, say Y.
-config RTC_DRV_VRTC
- tristate "Virtual RTC for Intel MID platforms"
- depends on X86_INTEL_MID
- default y if X86_INTEL_MID
-
- help
- Say "yes" here to get direct support for the real time clock
- found on Moorestown platforms. The VRTC is a emulated RTC that
- derives its clock source from a real RTC in the PMIC. The MC146818
- style programming interface is mostly conserved, but any
- updates are done via IPC calls to the system controller FW.
-
config RTC_DRV_DS1216
tristate "Dallas DS1216"
depends on SNI_RM
@@ -1270,14 +1259,6 @@ config RTC_DRV_PCF50633
If you say yes here you get support for the RTC subsystem of the
NXP PCF50633 used in embedded systems.
-config RTC_DRV_AB3100
- tristate "ST-Ericsson AB3100 RTC"
- depends on AB3100_CORE
- default y if AB3100_CORE
- help
- Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
- support. This chip contains a battery- and capacitor-backed RTC.
-
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
@@ -1300,7 +1281,7 @@ config RTC_DRV_OPAL
config RTC_DRV_ZYNQMP
tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you say yes here you get support for the RTC controller found on
Xilinx Zynq Ultrascale+ MPSoC.
@@ -1587,13 +1568,6 @@ config RTC_DRV_STARFIRE
If you say Y here you will get support for the RTC found on
Starfire systems.
-config RTC_DRV_TX4939
- tristate "TX4939 SoC"
- depends on SOC_TX4939 || COMPILE_TEST
- help
- Driver for the internal RTC (Realtime Clock) module found on
- Toshiba TX4939 SoC.
-
config RTC_DRV_MV
tristate "Marvell SoC RTC"
depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
@@ -1608,6 +1582,7 @@ config RTC_DRV_MV
config RTC_DRV_ARMADA38X
tristate "Armada 38x Marvell SoC RTC"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
help
If you say yes here you will get support for the in-chip RTC
that can be found in the Armada 38x Marvell's SoC device
@@ -1645,18 +1620,6 @@ config RTC_DRV_PS3
This driver can also be built as a module. If so, the module
will be called rtc-ps3.
-config RTC_DRV_COH901331
- tristate "ST-Ericsson COH 901 331 RTC"
- depends on ARCH_U300 || COMPILE_TEST
- help
- If you say Y here you will get access to ST-Ericsson
- COH 901 331 RTC clock found in some ST-Ericsson Mobile
- Platforms.
-
- This driver can also be built as a module. If so, the module
- will be called "rtc-coh901331".
-
-
config RTC_DRV_STMP
tristate "Freescale STMP3xxx/i.MX23/i.MX28 RTC"
depends on ARCH_MXS || COMPILE_TEST
@@ -1799,13 +1762,6 @@ config RTC_DRV_IMX_SC
If you say yes here you get support for the NXP i.MX System
Controller RTC module.
-config RTC_DRV_SIRFSOC
- tristate "SiRFSOC RTC"
- depends on ARCH_SIRF
- help
- Say "yes" here to support the real time clock on SiRF SOC chips.
- This driver can also be built as a module called rtc-sirfsoc.
-
config RTC_DRV_ST_LPC
tristate "STMicroelectronics LPC RTC"
depends on ARCH_STI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index bb8f319b09fb..4e930183c170 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,7 +19,6 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += sysfs.o
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
-obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
obj-$(CONFIG_RTC_DRV_ABEOZ9) += rtc-ab-eoz9.o
@@ -38,7 +37,6 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_BRCMSTB) += rtc-brcmstb-waketimer.o
obj-$(CONFIG_RTC_DRV_CADENCE) += rtc-cadence.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
-obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
obj-$(CONFIG_RTC_DRV_CPCAP) += rtc-cpcap.o
obj-$(CONFIG_RTC_DRV_CROS_EC) += rtc-cros-ec.o
obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
@@ -154,7 +152,6 @@ obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
-obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o
obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
@@ -171,10 +168,8 @@ obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
-obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
-obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 7e470fbd5e4d..f77bc089eb6b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -231,6 +231,8 @@ static struct rtc_device *rtc_allocate_device(void)
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
+ set_bit(RTC_FEATURE_ALARM, rtc->features);
+
return rtc;
}
@@ -322,11 +324,6 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
rtc->offset_secs = 0;
}
-/**
- * rtc_device_unregister - removes the previously registered RTC class device
- *
- * @rtc: the RTC class device to destroy
- */
static void devm_rtc_unregister_device(void *data)
{
struct rtc_device *rtc = data;
@@ -386,6 +383,9 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
return -EINVAL;
}
+ if (!rtc->ops->set_alarm)
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+
rtc->owner = owner;
rtc_device_get_offset(rtc);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 794a4f036b99..dcb34c73319e 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -186,7 +186,7 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc,
if (!rtc->ops) {
err = -ENODEV;
- } else if (!rtc->ops->read_alarm) {
+ } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
err = -EINVAL;
} else {
alarm->enabled = 0;
@@ -392,7 +392,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
if (!rtc->ops) {
err = -ENODEV;
- } else if (!rtc->ops->read_alarm) {
+ } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
err = -EINVAL;
} else {
memset(alarm, 0, sizeof(struct rtc_wkalrm));
@@ -436,7 +436,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (!rtc->ops)
err = -ENODEV;
- else if (!rtc->ops->set_alarm)
+ else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
err = -EINVAL;
else
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
@@ -451,7 +451,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (!rtc->ops)
return -ENODEV;
- else if (!rtc->ops->set_alarm)
+ else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
return -EINVAL;
err = rtc_valid_tm(&alarm->time);
@@ -531,7 +531,7 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
/* nothing */;
else if (!rtc->ops)
err = -ENODEV;
- else if (!rtc->ops->alarm_irq_enable)
+ else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
err = -EINVAL;
else
err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
@@ -843,7 +843,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
static void rtc_alarm_disable(struct rtc_device *rtc)
{
- if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+ if (!rtc->ops || !test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
return;
rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
deleted file mode 100644
index e4fd961e8bf6..000000000000
--- a/drivers/rtc/rtc-ab3100.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * RTC clock driver for the AB3100 Analog Baseband Chip
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/mfd/abx500.h>
-
-/* Clock rate in Hz */
-#define AB3100_RTC_CLOCK_RATE 32768
-
-/*
- * The AB3100 RTC registers. These are the same for
- * AB3000 and AB3100.
- * Control register:
- * Bit 0: RTC Monitor cleared=0, active=1, if you set it
- * to 1 it remains active until RTC power is lost.
- * Bit 1: 32 kHz Oscillator, 0 = on, 1 = bypass
- * Bit 2: Alarm on, 0 = off, 1 = on
- * Bit 3: 32 kHz buffer disabling, 0 = enabled, 1 = disabled
- */
-#define AB3100_RTC 0x53
-/* default setting, buffer disabled, alarm on */
-#define RTC_SETTING 0x30
-/* Alarm when AL0-AL3 == TI0-TI3 */
-#define AB3100_AL0 0x56
-#define AB3100_AL1 0x57
-#define AB3100_AL2 0x58
-#define AB3100_AL3 0x59
-/* This 48-bit register that counts up at 32768 Hz */
-#define AB3100_TI0 0x5a
-#define AB3100_TI1 0x5b
-#define AB3100_TI2 0x5c
-#define AB3100_TI3 0x5d
-#define AB3100_TI4 0x5e
-#define AB3100_TI5 0x5f
-
-/*
- * RTC clock functions and device struct declaration
- */
-static int ab3100_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
- AB3100_TI3, AB3100_TI4, AB3100_TI5};
- unsigned char buf[6];
- u64 hw_counter = rtc_tm_to_time64(tm) * AB3100_RTC_CLOCK_RATE * 2;
- int err = 0;
- int i;
-
- buf[0] = (hw_counter) & 0xFF;
- buf[1] = (hw_counter >> 8) & 0xFF;
- buf[2] = (hw_counter >> 16) & 0xFF;
- buf[3] = (hw_counter >> 24) & 0xFF;
- buf[4] = (hw_counter >> 32) & 0xFF;
- buf[5] = (hw_counter >> 40) & 0xFF;
-
- for (i = 0; i < 6; i++) {
- err = abx500_set_register_interruptible(dev, 0,
- regs[i], buf[i]);
- if (err)
- return err;
- }
-
- /* Set the flag to mark that the clock is now set */
- return abx500_mask_and_set_register_interruptible(dev, 0,
- AB3100_RTC,
- 0x01, 0x01);
-
-}
-
-static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- time64_t time;
- u8 rtcval;
- int err;
-
- err = abx500_get_register_interruptible(dev, 0,
- AB3100_RTC, &rtcval);
- if (err)
- return err;
-
- if (!(rtcval & 0x01)) {
- dev_info(dev, "clock not set (lost power)");
- return -EINVAL;
- } else {
- u64 hw_counter;
- u8 buf[6];
-
- /* Read out time registers */
- err = abx500_get_register_page_interruptible(dev, 0,
- AB3100_TI0,
- buf, 6);
- if (err != 0)
- return err;
-
- hw_counter = ((u64) buf[5] << 40) | ((u64) buf[4] << 32) |
- ((u64) buf[3] << 24) | ((u64) buf[2] << 16) |
- ((u64) buf[1] << 8) | (u64) buf[0];
- time = hw_counter / (u64) (AB3100_RTC_CLOCK_RATE * 2);
- }
-
- rtc_time64_to_tm(time, tm);
-
- return 0;
-}
-
-static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- time64_t time;
- u64 hw_counter;
- u8 buf[6];
- u8 rtcval;
- int err;
-
- /* Figure out if alarm is enabled or not */
- err = abx500_get_register_interruptible(dev, 0,
- AB3100_RTC, &rtcval);
- if (err)
- return err;
- if (rtcval & 0x04)
- alarm->enabled = 1;
- else
- alarm->enabled = 0;
- /* No idea how this could be represented */
- alarm->pending = 0;
- /* Read out alarm registers, only 4 bytes */
- err = abx500_get_register_page_interruptible(dev, 0,
- AB3100_AL0, buf, 4);
- if (err)
- return err;
- hw_counter = ((u64) buf[3] << 40) | ((u64) buf[2] << 32) |
- ((u64) buf[1] << 24) | ((u64) buf[0] << 16);
- time = hw_counter / (u64) (AB3100_RTC_CLOCK_RATE * 2);
-
- rtc_time64_to_tm(time, &alarm->time);
-
- return rtc_valid_tm(&alarm->time);
-}
-
-static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3};
- unsigned char buf[4];
- time64_t secs;
- u64 hw_counter;
- int err;
- int i;
-
- secs = rtc_tm_to_time64(&alarm->time);
- hw_counter = secs * AB3100_RTC_CLOCK_RATE * 2;
- buf[0] = (hw_counter >> 16) & 0xFF;
- buf[1] = (hw_counter >> 24) & 0xFF;
- buf[2] = (hw_counter >> 32) & 0xFF;
- buf[3] = (hw_counter >> 40) & 0xFF;
-
- /* Set the alarm */
- for (i = 0; i < 4; i++) {
- err = abx500_set_register_interruptible(dev, 0,
- regs[i], buf[i]);
- if (err)
- return err;
- }
- /* Then enable the alarm */
- return abx500_mask_and_set_register_interruptible(dev, 0,
- AB3100_RTC, (1 << 2),
- alarm->enabled << 2);
-}
-
-static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
-{
- /*
- * It's not possible to enable/disable the alarm IRQ for this RTC.
- * It does not actually trigger any IRQ: instead its only function is
- * to power up the system, if it wasn't on. This will manifest as
- * a "power up cause" in the AB3100 power driver (battery charging etc)
- * and need to be handled there instead.
- */
- if (enabled)
- return abx500_mask_and_set_register_interruptible(dev, 0,
- AB3100_RTC, (1 << 2),
- 1 << 2);
- else
- return abx500_mask_and_set_register_interruptible(dev, 0,
- AB3100_RTC, (1 << 2),
- 0);
-}
-
-static const struct rtc_class_ops ab3100_rtc_ops = {
- .read_time = ab3100_rtc_read_time,
- .set_time = ab3100_rtc_set_time,
- .read_alarm = ab3100_rtc_read_alarm,
- .set_alarm = ab3100_rtc_set_alarm,
- .alarm_irq_enable = ab3100_rtc_irq_enable,
-};
-
-static int __init ab3100_rtc_probe(struct platform_device *pdev)
-{
- int err;
- u8 regval;
- struct rtc_device *rtc;
-
- /* The first RTC register needs special treatment */
- err = abx500_get_register_interruptible(&pdev->dev, 0,
- AB3100_RTC, &regval);
- if (err) {
- dev_err(&pdev->dev, "unable to read RTC register\n");
- return -ENODEV;
- }
-
- if ((regval & 0xFE) != RTC_SETTING) {
- dev_warn(&pdev->dev, "not default value in RTC reg 0x%x\n",
- regval);
- }
-
- if ((regval & 1) == 0) {
- /*
- * Set bit to detect power loss.
- * This bit remains until RTC power is lost.
- */
- regval = 1 | RTC_SETTING;
- err = abx500_set_register_interruptible(&pdev->dev, 0,
- AB3100_RTC, regval);
- /* Ignore any error on this write */
- }
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- rtc->ops = &ab3100_rtc_ops;
- /* 48bit counter at (AB3100_RTC_CLOCK_RATE * 2) */
- rtc->range_max = U32_MAX;
-
- platform_set_drvdata(pdev, rtc);
-
- return devm_rtc_register_device(rtc);
-}
-
-static struct platform_driver ab3100_rtc_driver = {
- .driver = {
- .name = "ab3100-rtc",
- },
-};
-
-module_platform_driver_probe(ab3100_rtc_driver, ab3100_rtc_probe);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("AB3100 RTC Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 6733bb0df674..9b0138d07232 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -117,6 +117,16 @@ struct abx80x_priv {
struct watchdog_device wdog;
};
+static int abx80x_write_config_key(struct i2c_client *client, u8 key)
+{
+ if (i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, key) < 0) {
+ dev_err(&client->dev, "Unable to write configuration key\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int abx80x_is_rc_mode(struct i2c_client *client)
{
int flags = 0;
@@ -140,12 +150,8 @@ static int abx80x_enable_trickle_charger(struct i2c_client *client,
* Write the configuration key register to enable access to the Trickle
* register
*/
- err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
- ABX8XX_CFG_KEY_MISC);
- if (err < 0) {
- dev_err(&client->dev, "Unable to write configuration key\n");
+ if (abx80x_write_config_key(client, ABX8XX_CFG_KEY_MISC) < 0)
return -EIO;
- }
err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE,
ABX8XX_TRICKLE_CHARGE_ENABLE |
@@ -358,12 +364,8 @@ static int abx80x_rtc_set_autocalibration(struct device *dev,
}
/* Unlock write access to Oscillator Control Register */
- retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
- ABX8XX_CFG_KEY_OSC);
- if (retval < 0) {
- dev_err(dev, "Failed to write CONFIG_KEY register\n");
- return retval;
- }
+ if (abx80x_write_config_key(client, ABX8XX_CFG_KEY_OSC) < 0)
+ return -EIO;
retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags);
@@ -450,12 +452,8 @@ static ssize_t oscillator_store(struct device *dev,
flags |= (ABX8XX_OSC_OSEL);
/* Unlock write access on Oscillator Control register */
- retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
- ABX8XX_CFG_KEY_OSC);
- if (retval < 0) {
- dev_err(dev, "Failed to write CONFIG_KEY register\n");
- return retval;
- }
+ if (abx80x_write_config_key(client, ABX8XX_CFG_KEY_OSC) < 0)
+ return -EIO;
retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags);
if (retval < 0) {
@@ -762,13 +760,8 @@ static int abx80x_probe(struct i2c_client *client,
* Write the configuration key register to enable access to
* the config2 register
*/
- err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
- ABX8XX_CFG_KEY_MISC);
- if (err < 0) {
- dev_err(&client->dev,
- "Unable to write configuration key\n");
+ if (abx80x_write_config_key(client, ABX8XX_CFG_KEY_MISC) < 0)
return -EIO;
- }
err = i2c_smbus_write_byte_data(client, ABX8XX_REG_OUT_CTRL,
data | ABX8XX_OUT_CTRL_EXDS);
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 1ddbef99e38f..66783cb5e711 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -528,7 +528,7 @@ static irqreturn_t ac100_rtc_irq(int irq, void *data)
unsigned int val = 0;
int ret;
- mutex_lock(&chip->rtc->ops_lock);
+ rtc_lock(chip->rtc);
/* read status */
ret = regmap_read(regmap, AC100_ALM_INT_STA, &val);
@@ -551,7 +551,7 @@ static irqreturn_t ac100_rtc_irq(int irq, void *data)
}
out:
- mutex_unlock(&chip->rtc->ops_lock);
+ rtc_unlock(chip->rtc);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 807a79c07f08..cc542e6b1d5b 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -458,14 +458,6 @@ static const struct rtc_class_ops armada38x_rtc_ops = {
.set_offset = armada38x_rtc_set_offset,
};
-static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
- .read_time = armada38x_rtc_read_time,
- .set_time = armada38x_rtc_set_time,
- .read_alarm = armada38x_rtc_read_alarm,
- .read_offset = armada38x_rtc_read_offset,
- .set_offset = armada38x_rtc_set_offset,
-};
-
static const struct armada38x_rtc_data armada38x_data = {
.update_mbus_timing = rtc_update_38x_mbus_timing_params,
.read_rtc_reg = read_rtc_register_38x_wa,
@@ -540,20 +532,15 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
- if (rtc->irq != -1) {
+ if (rtc->irq != -1)
device_init_wakeup(&pdev->dev, 1);
- rtc->rtc_dev->ops = &armada38x_rtc_ops;
- } else {
- /*
- * If there is no interrupt available then we can't
- * use the alarm
- */
- rtc->rtc_dev->ops = &armada38x_rtc_ops_noirq;
- }
+ else
+ clear_bit(RTC_FEATURE_ALARM, rtc->rtc_dev->features);
/* Update RTC-MBUS bridge timing parameters */
rtc->data->update_mbus_timing(rtc);
+ rtc->rtc_dev->ops = &armada38x_rtc_ops;
rtc->rtc_dev->range_max = U32_MAX;
return devm_rtc_register_device(rtc->rtc_dev);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 3ab81cdec00b..de795e489f71 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -116,15 +116,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
u32 isr;
unsigned long events = 0;
- mutex_lock(&priv->rtc->ops_lock);
+ rtc_lock(priv->rtc);
isr = ioread32(priv->iobase + HW_CIIR);
if (!isr) {
- mutex_unlock(&priv->rtc->ops_lock);
+ rtc_unlock(priv->rtc);
return IRQ_NONE;
}
iowrite32(0, priv->iobase + HW_CIIR);
- mutex_unlock(&priv->rtc->ops_lock);
+ rtc_unlock(priv->rtc);
events |= RTC_AF | RTC_IRQF;
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 933e4237237d..2235c968842d 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -311,7 +311,7 @@ static const struct i2c_device_id bq32k_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bq32k_id);
-static const struct of_device_id bq32k_of_match[] = {
+static const __maybe_unused struct of_device_id bq32k_of_match[] = {
{ .compatible = "ti,bq32000" },
{ }
};
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 0366e2ff04ae..c74130e8f496 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -306,7 +306,7 @@ static int brcmstb_waketmr_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
-static const struct of_device_id brcmstb_waketmr_of_match[] = {
+static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
{ .compatible = "brcm,brcmstb-waketimer" },
{ /* sentinel */ },
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 51e80bc70d42..670fd8a2970e 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -574,12 +574,6 @@ static const struct rtc_class_ops cmos_rtc_ops = {
.alarm_irq_enable = cmos_alarm_irq_enable,
};
-static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
- .read_time = cmos_read_time,
- .set_time = cmos_set_time,
- .proc = cmos_procfs,
-};
-
/*----------------------------------------------------------------*/
/*
@@ -649,11 +643,10 @@ static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
- unsigned long flags;
u8 irqstat;
u8 rtc_control;
- spin_lock_irqsave(&rtc_lock, flags);
+ spin_lock(&rtc_lock);
/* When the HPET interrupt handler calls us, the interrupt
* status is passed as arg1 instead of the irq number. But
@@ -687,7 +680,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
- spin_unlock_irqrestore(&rtc_lock, flags);
+ spin_unlock(&rtc_lock);
if (is_intr(irqstat)) {
rtc_update_irq(p, 1, irqstat);
@@ -805,6 +798,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_lock_irq(&rtc_lock);
+ /* Ensure that the RTC is accessible. Bit 6 must be 0! */
+ if ((CMOS_READ(RTC_VALID) & 0x40) != 0) {
+ spin_unlock_irq(&rtc_lock);
+ dev_warn(dev, "not accessible\n");
+ retval = -ENXIO;
+ goto cleanup1;
+ }
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
@@ -857,12 +858,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
goto cleanup1;
}
-
- cmos_rtc.rtc->ops = &cmos_rtc_ops;
} else {
- cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
+ clear_bit(RTC_FEATURE_ALARM, cmos_rtc.rtc->features);
}
+ cmos_rtc.rtc->ops = &cmos_rtc_ops;
+
retval = devm_rtc_register_device(cmos_rtc.rtc);
if (retval)
goto cleanup2;
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
deleted file mode 100644
index 168ced87d93a..000000000000
--- a/drivers/rtc/rtc-coh901331.c
+++ /dev/null
@@ -1,290 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * Real Time Clock interface for ST-Ericsson AB COH 901 331 RTC.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Based on rtc-pl031.c by Deepak Saxena <dsaxena@plexity.net>
- * Copyright 2006 (c) MontaVista Software, Inc.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/rtc.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/pm.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-/*
- * Registers in the COH 901 331
- */
-/* Alarm value 32bit (R/W) */
-#define COH901331_ALARM 0x00U
-/* Used to set current time 32bit (R/W) */
-#define COH901331_SET_TIME 0x04U
-/* Indication if current time is valid 32bit (R/-) */
-#define COH901331_VALID 0x08U
-/* Read the current time 32bit (R/-) */
-#define COH901331_CUR_TIME 0x0cU
-/* Event register for the "alarm" interrupt */
-#define COH901331_IRQ_EVENT 0x10U
-/* Mask register for the "alarm" interrupt */
-#define COH901331_IRQ_MASK 0x14U
-/* Force register for the "alarm" interrupt */
-#define COH901331_IRQ_FORCE 0x18U
-
-/*
- * Reference to RTC block clock
- * Notice that the frequent clk_enable()/clk_disable() on this
- * clock is mainly to be able to turn on/off other clocks in the
- * hierarchy as needed, the RTC clock is always on anyway.
- */
-struct coh901331_port {
- struct rtc_device *rtc;
- struct clk *clk;
- void __iomem *virtbase;
- int irq;
-#ifdef CONFIG_PM_SLEEP
- u32 irqmaskstore;
-#endif
-};
-
-static irqreturn_t coh901331_interrupt(int irq, void *data)
-{
- struct coh901331_port *rtap = data;
-
- clk_enable(rtap->clk);
- /* Ack IRQ */
- writel(1, rtap->virtbase + COH901331_IRQ_EVENT);
- /*
- * Disable the interrupt. This is necessary because
- * the RTC lives on a lower-clocked line and will
- * not release the IRQ line until after a few (slower)
- * clock cycles. The interrupt will be re-enabled when
- * a new alarm is set anyway.
- */
- writel(0, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable(rtap->clk);
-
- /* Set alarm flag */
- rtc_update_irq(rtap->rtc, 1, RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static int coh901331_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- clk_enable(rtap->clk);
- /* Check if the time is valid */
- if (!readl(rtap->virtbase + COH901331_VALID)) {
- clk_disable(rtap->clk);
- return -EINVAL;
- }
-
- rtc_time64_to_tm(readl(rtap->virtbase + COH901331_CUR_TIME), tm);
- clk_disable(rtap->clk);
- return 0;
-}
-
-static int coh901331_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- clk_enable(rtap->clk);
- writel(rtc_tm_to_time64(tm), rtap->virtbase + COH901331_SET_TIME);
- clk_disable(rtap->clk);
-
- return 0;
-}
-
-static int coh901331_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- clk_enable(rtap->clk);
- rtc_time64_to_tm(readl(rtap->virtbase + COH901331_ALARM), &alarm->time);
- alarm->pending = readl(rtap->virtbase + COH901331_IRQ_EVENT) & 1U;
- alarm->enabled = readl(rtap->virtbase + COH901331_IRQ_MASK) & 1U;
- clk_disable(rtap->clk);
-
- return 0;
-}
-
-static int coh901331_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
- unsigned long time = rtc_tm_to_time64(&alarm->time);
-
- clk_enable(rtap->clk);
- writel(time, rtap->virtbase + COH901331_ALARM);
- writel(alarm->enabled, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable(rtap->clk);
-
- return 0;
-}
-
-static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- clk_enable(rtap->clk);
- if (enabled)
- writel(1, rtap->virtbase + COH901331_IRQ_MASK);
- else
- writel(0, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable(rtap->clk);
-
- return 0;
-}
-
-static const struct rtc_class_ops coh901331_ops = {
- .read_time = coh901331_read_time,
- .set_time = coh901331_set_time,
- .read_alarm = coh901331_read_alarm,
- .set_alarm = coh901331_set_alarm,
- .alarm_irq_enable = coh901331_alarm_irq_enable,
-};
-
-static int __exit coh901331_remove(struct platform_device *pdev)
-{
- struct coh901331_port *rtap = platform_get_drvdata(pdev);
-
- if (rtap)
- clk_unprepare(rtap->clk);
-
- return 0;
-}
-
-
-static int __init coh901331_probe(struct platform_device *pdev)
-{
- int ret;
- struct coh901331_port *rtap;
-
- rtap = devm_kzalloc(&pdev->dev,
- sizeof(struct coh901331_port), GFP_KERNEL);
- if (!rtap)
- return -ENOMEM;
-
- rtap->virtbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(rtap->virtbase))
- return PTR_ERR(rtap->virtbase);
-
- rtap->irq = platform_get_irq(pdev, 0);
- if (devm_request_irq(&pdev->dev, rtap->irq, coh901331_interrupt, 0,
- "RTC COH 901 331 Alarm", rtap))
- return -EIO;
-
- rtap->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(rtap->clk)) {
- ret = PTR_ERR(rtap->clk);
- dev_err(&pdev->dev, "could not get clock\n");
- return ret;
- }
-
- rtap->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtap->rtc))
- return PTR_ERR(rtap->rtc);
-
- rtap->rtc->ops = &coh901331_ops;
- rtap->rtc->range_max = U32_MAX;
-
- /* We enable/disable the clock only to assure it works */
- ret = clk_prepare_enable(rtap->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
- clk_disable(rtap->clk);
-
- platform_set_drvdata(pdev, rtap);
-
- ret = devm_rtc_register_device(rtap->rtc);
- if (ret)
- goto out_no_rtc;
-
- return 0;
-
- out_no_rtc:
- clk_unprepare(rtap->clk);
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int coh901331_suspend(struct device *dev)
-{
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- /*
- * If this RTC alarm will be used for waking the system up,
- * don't disable it of course. Else we just disable the alarm
- * and await suspension.
- */
- if (device_may_wakeup(dev)) {
- enable_irq_wake(rtap->irq);
- } else {
- clk_enable(rtap->clk);
- rtap->irqmaskstore = readl(rtap->virtbase + COH901331_IRQ_MASK);
- writel(0, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable(rtap->clk);
- }
- clk_unprepare(rtap->clk);
- return 0;
-}
-
-static int coh901331_resume(struct device *dev)
-{
- int ret;
- struct coh901331_port *rtap = dev_get_drvdata(dev);
-
- ret = clk_prepare(rtap->clk);
- if (ret)
- return ret;
-
- if (device_may_wakeup(dev)) {
- disable_irq_wake(rtap->irq);
- } else {
- clk_enable(rtap->clk);
- writel(rtap->irqmaskstore, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable(rtap->clk);
- }
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(coh901331_pm_ops, coh901331_suspend, coh901331_resume);
-
-static void coh901331_shutdown(struct platform_device *pdev)
-{
- struct coh901331_port *rtap = platform_get_drvdata(pdev);
-
- clk_enable(rtap->clk);
- writel(0, rtap->virtbase + COH901331_IRQ_MASK);
- clk_disable_unprepare(rtap->clk);
-}
-
-static const struct of_device_id coh901331_dt_match[] = {
- { .compatible = "stericsson,coh901331" },
- {},
-};
-MODULE_DEVICE_TABLE(of, coh901331_dt_match);
-
-static struct platform_driver coh901331_driver = {
- .driver = {
- .name = "rtc-coh901331",
- .pm = &coh901331_pm_ops,
- .of_match_table = coh901331_dt_match,
- },
- .remove = __exit_p(coh901331_remove),
- .shutdown = coh901331_shutdown,
-};
-
-module_platform_driver_probe(coh901331_driver, coh901331_probe);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("ST-Ericsson AB COH 901 331 RTC Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 4fdfa5b6feb2..218a6de19247 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -205,7 +205,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc->rtc_dev);
}
-static const struct of_device_id dc_dt_ids[] = {
+static const __maybe_unused struct of_device_id dc_dt_ids[] = {
{ .compatible = "cnxt,cx92755-rtc" },
{ /* sentinel */ }
};
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 8c2ab29c3d91..9ef107b99b65 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -435,13 +435,12 @@ static const struct rtc_class_ops ds1305_ops = {
static void ds1305_work(struct work_struct *work)
{
struct ds1305 *ds1305 = container_of(work, struct ds1305, work);
- struct mutex *lock = &ds1305->rtc->ops_lock;
struct spi_device *spi = ds1305->spi;
u8 buf[3];
int status;
/* lock to protect ds1305->ctrl */
- mutex_lock(lock);
+ rtc_lock(ds1305->rtc);
/* Disable the IRQ, and clear its status ... for now, we "know"
* that if more than one alarm is active, they're in sync.
@@ -459,7 +458,7 @@ static void ds1305_work(struct work_struct *work)
if (status < 0)
dev_dbg(&spi->dev, "clear irq --> %d\n", status);
- mutex_unlock(lock);
+ rtc_unlock(ds1305->rtc);
if (!test_bit(FLAG_EXITING, &ds1305->flags))
enable_irq(spi->irq);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 183cf7c01364..cd8e438bc9c4 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -558,11 +558,10 @@ static u8 do_trickle_setup_rx8130(struct ds1307 *ds1307, u32 ohms, bool diode)
static irqreturn_t rx8130_irq(int irq, void *dev_id)
{
struct ds1307 *ds1307 = dev_id;
- struct mutex *lock = &ds1307->rtc->ops_lock;
u8 ctl[3];
int ret;
- mutex_lock(lock);
+ rtc_lock(ds1307->rtc);
/* Read control registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
@@ -582,7 +581,7 @@ static irqreturn_t rx8130_irq(int irq, void *dev_id)
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
out:
- mutex_unlock(lock);
+ rtc_unlock(ds1307->rtc);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 630493759d15..4cd8efbef6cf 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -139,7 +139,7 @@ static const struct i2c_device_id ds1672_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1672_id);
-static const struct of_device_id ds1672_of_match[] = {
+static const __maybe_unused struct of_device_id ds1672_of_match[] = {
{ .compatible = "dallas,ds1672" },
{ }
};
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index d69c807af29b..75db7ab654a5 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -658,7 +658,6 @@ ds1685_rtc_irq_handler(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
- struct mutex *rtc_mutex;
u8 ctrlb, ctrlc;
unsigned long events = 0;
u8 num_irqs = 0;
@@ -667,8 +666,7 @@ ds1685_rtc_irq_handler(int irq, void *dev_id)
if (unlikely(!rtc))
return IRQ_HANDLED;
- rtc_mutex = &rtc->dev->ops_lock;
- mutex_lock(rtc_mutex);
+ rtc_lock(rtc->dev);
/* Ctrlb holds the interrupt-enable bits and ctrlc the flag bits. */
ctrlb = rtc->read(rtc, RTC_CTRL_B);
@@ -713,7 +711,7 @@ ds1685_rtc_irq_handler(int irq, void *dev_id)
}
}
rtc_update_irq(rtc->dev, num_irqs, events);
- mutex_unlock(rtc_mutex);
+ rtc_unlock(rtc->dev);
return events ? IRQ_HANDLED : IRQ_NONE;
}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 16b89035d135..168bc27f1f5a 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -406,11 +406,10 @@ static irqreturn_t ds3232_irq(int irq, void *dev_id)
{
struct device *dev = dev_id;
struct ds3232 *ds3232 = dev_get_drvdata(dev);
- struct mutex *lock = &ds3232->rtc->ops_lock;
int ret;
int stat, control;
- mutex_lock(lock);
+ rtc_lock(ds3232->rtc);
ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
if (ret)
@@ -448,7 +447,7 @@ static irqreturn_t ds3232_irq(int irq, void *dev_id)
}
unlock:
- mutex_unlock(lock);
+ rtc_unlock(ds3232->rtc);
return IRQ_HANDLED;
}
@@ -593,7 +592,7 @@ static const struct i2c_device_id ds3232_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds3232_id);
-static const struct of_device_id ds3232_of_match[] = {
+static const __maybe_unused struct of_device_id ds3232_of_match[] = {
{ .compatible = "dallas,ds3232" },
{ }
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 24e0095be058..0751cae27285 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -428,10 +428,9 @@ static irqreturn_t hym8563_irq(int irq, void *dev_id)
{
struct hym8563 *hym8563 = (struct hym8563 *)dev_id;
struct i2c_client *client = hym8563->client;
- struct mutex *lock = &hym8563->rtc->ops_lock;
int data, ret;
- mutex_lock(lock);
+ rtc_lock(hym8563->rtc);
/* Clear the alarm flag */
@@ -451,7 +450,7 @@ static irqreturn_t hym8563_irq(int irq, void *dev_id)
}
out:
- mutex_unlock(lock);
+ rtc_unlock(hym8563->rtc);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 563a6d9c9fcf..182dfa605515 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -99,7 +99,7 @@ static const struct i2c_device_id isl1208_id[] = {
};
MODULE_DEVICE_TABLE(i2c, isl1208_id);
-static const struct of_device_id isl1208_of_match[] = {
+static const __maybe_unused struct of_device_id isl1208_of_match[] = {
{ .compatible = "isil,isl1208", .data = &isl1208_configs[TYPE_ISL1208] },
{ .compatible = "isil,isl1209", .data = &isl1208_configs[TYPE_ISL1209] },
{ .compatible = "isil,isl1218", .data = &isl1208_configs[TYPE_ISL1218] },
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 160dcf68e64e..89128fc29ccc 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -85,7 +85,7 @@ static const struct i2c_device_id m41t80_id[] = {
};
MODULE_DEVICE_TABLE(i2c, m41t80_id);
-static const struct of_device_id m41t80_of_match[] = {
+static const __maybe_unused struct of_device_id m41t80_of_match[] = {
{
.compatible = "st,m41t62",
.data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT)
@@ -158,21 +158,20 @@ static irqreturn_t m41t80_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct m41t80_data *m41t80 = i2c_get_clientdata(client);
- struct mutex *lock = &m41t80->rtc->ops_lock;
unsigned long events = 0;
int flags, flags_afe;
- mutex_lock(lock);
+ rtc_lock(m41t80->rtc);
flags_afe = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (flags_afe < 0) {
- mutex_unlock(lock);
+ rtc_unlock(m41t80->rtc);
return IRQ_NONE;
}
flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (flags <= 0) {
- mutex_unlock(lock);
+ rtc_unlock(m41t80->rtc);
return IRQ_NONE;
}
@@ -189,7 +188,7 @@ static irqreturn_t m41t80_handle_irq(int irq, void *dev_id)
flags_afe);
}
- mutex_unlock(lock);
+ rtc_unlock(m41t80->rtc);
return IRQ_HANDLED;
}
@@ -397,10 +396,13 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static struct rtc_class_ops m41t80_rtc_ops = {
+static const struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
.proc = m41t80_rtc_proc,
+ .read_alarm = m41t80_read_alarm,
+ .set_alarm = m41t80_set_alarm,
+ .alarm_irq_enable = m41t80_alarm_irq_enable,
};
#ifdef CONFIG_PM_SLEEP
@@ -783,7 +785,7 @@ static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
*/
static int wdt_open(struct inode *inode, struct file *file)
{
- if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
+ if (iminor(inode) == WATCHDOG_MINOR) {
mutex_lock(&m41t80_rtc_mutex);
if (test_and_set_bit(0, &wdt_is_open)) {
mutex_unlock(&m41t80_rtc_mutex);
@@ -807,7 +809,7 @@ static int wdt_open(struct inode *inode, struct file *file)
*/
static int wdt_release(struct inode *inode, struct file *file)
{
- if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
+ if (iminor(inode) == WATCHDOG_MINOR)
clear_bit(0, &wdt_is_open);
return 0;
}
@@ -913,13 +915,10 @@ static int m41t80_probe(struct i2c_client *client,
wakeup_source = false;
}
}
- if (client->irq > 0 || wakeup_source) {
- m41t80_rtc_ops.read_alarm = m41t80_read_alarm;
- m41t80_rtc_ops.set_alarm = m41t80_set_alarm;
- m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable;
- /* Enable the wakealarm */
+ if (client->irq > 0 || wakeup_source)
device_init_wakeup(&client->dev, true);
- }
+ else
+ clear_bit(RTC_FEATURE_ALARM, m41t80_data->rtc->features);
m41t80_data->rtc->ops = &m41t80_rtc_ops;
m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 5f5898d3b055..1d2e99a70fce 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -313,11 +313,6 @@ static const struct rtc_class_ops m48t59_rtc_ops = {
.alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
};
-static const struct rtc_class_ops m48t02_rtc_ops = {
- .read_time = m48t59_rtc_read_time,
- .set_time = m48t59_rtc_set_time,
-};
-
static int m48t59_nvram_read(void *priv, unsigned int offset, void *val,
size_t size)
{
@@ -366,7 +361,6 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
struct m48t59_private *m48t59 = NULL;
struct resource *res;
int ret = -ENOMEM;
- const struct rtc_class_ops *ops;
struct nvmem_config nvmem_cfg = {
.name = "m48t59-",
.word_size = 1,
@@ -438,17 +432,21 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
}
+
+ m48t59->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(m48t59->rtc))
+ return PTR_ERR(m48t59->rtc);
+
switch (pdata->type) {
case M48T59RTC_TYPE_M48T59:
- ops = &m48t59_rtc_ops;
pdata->offset = 0x1ff0;
break;
case M48T59RTC_TYPE_M48T02:
- ops = &m48t02_rtc_ops;
+ clear_bit(RTC_FEATURE_ALARM, m48t59->rtc->features);
pdata->offset = 0x7f0;
break;
case M48T59RTC_TYPE_M48T08:
- ops = &m48t02_rtc_ops;
+ clear_bit(RTC_FEATURE_ALARM, m48t59->rtc->features);
pdata->offset = 0x1ff0;
break;
default:
@@ -459,11 +457,7 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
spin_lock_init(&m48t59->lock);
platform_set_drvdata(pdev, m48t59);
- m48t59->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(m48t59->rtc))
- return PTR_ERR(m48t59->rtc);
-
- m48t59->rtc->ops = ops;
+ m48t59->rtc->ops = &m48t59_rtc_ops;
nvmem_cfg.size = pdata->offset;
ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 972a5b9a629d..dcfaf09946ee 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,6 +21,13 @@ unsigned int mc146818_get_time(struct rtc_time *time)
again:
spin_lock_irqsave(&rtc_lock, flags);
+ /* Ensure that the RTC is accessible. Bit 6 must be 0! */
+ if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ memset(time, 0xff, sizeof(*time));
+ return 0;
+ }
+
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 21cbf7f892e8..bad7792b6ca5 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -350,10 +350,9 @@ static irqreturn_t mcp795_irq(int irq, void *data)
{
struct spi_device *spi = data;
struct rtc_device *rtc = spi_get_drvdata(spi);
- struct mutex *lock = &rtc->ops_lock;
int ret;
- mutex_lock(lock);
+ rtc_lock(rtc);
/* Disable alarm.
* There is no need to clear ALM0IF (Alarm 0 Interrupt Flag) bit,
@@ -365,7 +364,7 @@ static irqreturn_t mcp795_irq(int irq, void *data)
"Failed to disable alarm in IRQ (ret=%d)\n", ret);
rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
- mutex_unlock(lock);
+ rtc_unlock(rtc);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index 8642c06565ea..44bdc8b4a90d 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -380,7 +380,7 @@ out_disable_vdd:
return ret;
}
-static const struct of_device_id meson_rtc_dt_match[] = {
+static const __maybe_unused struct of_device_id meson_rtc_dt_match[] = {
{ .compatible = "amlogic,meson6-rtc", },
{ .compatible = "amlogic,meson8-rtc", },
{ .compatible = "amlogic,meson8b-rtc", },
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
deleted file mode 100644
index 421b3b6071b6..000000000000
--- a/drivers/rtc/rtc-mrst.c
+++ /dev/null
@@ -1,521 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rtc-mrst.c: Driver for Moorestown virtual RTC
- *
- * (C) Copyright 2009 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- * Feng Tang (feng.tang@intel.com)
- *
- * Note:
- * VRTC is emulated by system controller firmware, the real HW
- * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
- * in a memory mapped IO space that is visible to the host IA
- * processor.
- *
- * This driver is based upon drivers/rtc/rtc-cmos.c
- */
-
-/*
- * Note:
- * * vRTC only supports binary mode and 24H mode
- * * vRTC only support PIE and AIE, no UIE, and its PIE only happens
- * at 23:59:59pm everyday, no support for adjustable frequency
- * * Alarm function is also limited to hr/min/sec.
- */
-
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/mc146818rtc.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-
-#include <asm/intel_scu_ipc.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-
-struct mrst_rtc {
- struct rtc_device *rtc;
- struct device *dev;
- int irq;
-
- u8 enabled_wake;
- u8 suspend_ctrl;
-};
-
-static const char driver_name[] = "rtc_mrst";
-
-#define RTC_IRQMASK (RTC_PF | RTC_AF)
-
-static inline int is_intr(u8 rtc_intr)
-{
- if (!(rtc_intr & RTC_IRQF))
- return 0;
- return rtc_intr & RTC_IRQMASK;
-}
-
-static inline unsigned char vrtc_is_updating(void)
-{
- unsigned char uip;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- uip = (vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return uip;
-}
-
-/*
- * rtc_time's year contains the increment over 1900, but vRTC's YEAR
- * register can't be programmed to value larger than 0x64, so vRTC
- * driver chose to use 1972 (1970 is UNIX time start point) as the base,
- * and does the translation at read/write time.
- *
- * Why not just use 1970 as the offset? it's because using 1972 will
- * make it consistent in leap year setting for both vrtc and low-level
- * physical rtc devices. Then why not use 1960 as the offset? If we use
- * 1960, for a device's first use, its YEAR register is 0 and the system
- * year will be parsed as 1960 which is not a valid UNIX time and will
- * cause many applications to fail mysteriously.
- */
-static int mrst_read_time(struct device *dev, struct rtc_time *time)
-{
- unsigned long flags;
-
- if (vrtc_is_updating())
- msleep(20);
-
- spin_lock_irqsave(&rtc_lock, flags);
- time->tm_sec = vrtc_cmos_read(RTC_SECONDS);
- time->tm_min = vrtc_cmos_read(RTC_MINUTES);
- time->tm_hour = vrtc_cmos_read(RTC_HOURS);
- time->tm_mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
- time->tm_mon = vrtc_cmos_read(RTC_MONTH);
- time->tm_year = vrtc_cmos_read(RTC_YEAR);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- /* Adjust for the 1972/1900 */
- time->tm_year += 72;
- time->tm_mon--;
- return 0;
-}
-
-static int mrst_set_time(struct device *dev, struct rtc_time *time)
-{
- int ret;
- unsigned long flags;
- unsigned char mon, day, hrs, min, sec;
- unsigned int yrs;
-
- yrs = time->tm_year;
- mon = time->tm_mon + 1; /* tm_mon starts at zero */
- day = time->tm_mday;
- hrs = time->tm_hour;
- min = time->tm_min;
- sec = time->tm_sec;
-
- if (yrs < 72 || yrs > 172)
- return -EINVAL;
- yrs -= 72;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- vrtc_cmos_write(yrs, RTC_YEAR);
- vrtc_cmos_write(mon, RTC_MONTH);
- vrtc_cmos_write(day, RTC_DAY_OF_MONTH);
- vrtc_cmos_write(hrs, RTC_HOURS);
- vrtc_cmos_write(min, RTC_MINUTES);
- vrtc_cmos_write(sec, RTC_SECONDS);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
- return ret;
-}
-
-static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned char rtc_control;
-
- if (mrst->irq <= 0)
- return -EIO;
-
- /* vRTC only supports binary mode */
- spin_lock_irq(&rtc_lock);
- t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
- t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
- t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
-
- rtc_control = vrtc_cmos_read(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- t->enabled = !!(rtc_control & RTC_AIE);
- t->pending = 0;
-
- return 0;
-}
-
-static void mrst_checkintr(struct mrst_rtc *mrst, unsigned char rtc_control)
-{
- unsigned char rtc_intr;
-
- /*
- * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
- * allegedly some older rtcs need that to handle irqs properly
- */
- rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
- rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
- if (is_intr(rtc_intr))
- rtc_update_irq(mrst->rtc, 1, rtc_intr);
-}
-
-static void mrst_irq_enable(struct mrst_rtc *mrst, unsigned char mask)
-{
- unsigned char rtc_control;
-
- /*
- * Flush any pending IRQ status, notably for update irqs,
- * before we enable new IRQs
- */
- rtc_control = vrtc_cmos_read(RTC_CONTROL);
- mrst_checkintr(mrst, rtc_control);
-
- rtc_control |= mask;
- vrtc_cmos_write(rtc_control, RTC_CONTROL);
-
- mrst_checkintr(mrst, rtc_control);
-}
-
-static void mrst_irq_disable(struct mrst_rtc *mrst, unsigned char mask)
-{
- unsigned char rtc_control;
-
- rtc_control = vrtc_cmos_read(RTC_CONTROL);
- rtc_control &= ~mask;
- vrtc_cmos_write(rtc_control, RTC_CONTROL);
- mrst_checkintr(mrst, rtc_control);
-}
-
-static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned char hrs, min, sec;
- int ret = 0;
-
- if (!mrst->irq)
- return -EIO;
-
- hrs = t->time.tm_hour;
- min = t->time.tm_min;
- sec = t->time.tm_sec;
-
- spin_lock_irq(&rtc_lock);
- /* Next rtc irq must not be from previous alarm setting */
- mrst_irq_disable(mrst, RTC_AIE);
-
- /* Update alarm */
- vrtc_cmos_write(hrs, RTC_HOURS_ALARM);
- vrtc_cmos_write(min, RTC_MINUTES_ALARM);
- vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
-
- spin_unlock_irq(&rtc_lock);
-
- ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETALARM);
- if (ret)
- return ret;
-
- spin_lock_irq(&rtc_lock);
- if (t->enabled)
- mrst_irq_enable(mrst, RTC_AIE);
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-/* Currently, the vRTC doesn't support UIE ON/OFF */
-static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- if (enabled)
- mrst_irq_enable(mrst, RTC_AIE);
- else
- mrst_irq_disable(mrst, RTC_AIE);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
-}
-
-
-#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
-
-static int mrst_procfs(struct device *dev, struct seq_file *seq)
-{
- unsigned char rtc_control;
-
- spin_lock_irq(&rtc_lock);
- rtc_control = vrtc_cmos_read(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- seq_printf(seq,
- "periodic_IRQ\t: %s\n"
- "alarm\t\t: %s\n"
- "BCD\t\t: no\n"
- "periodic_freq\t: daily (not adjustable)\n",
- (rtc_control & RTC_PIE) ? "on" : "off",
- (rtc_control & RTC_AIE) ? "on" : "off");
-
- return 0;
-}
-
-#else
-#define mrst_procfs NULL
-#endif
-
-static const struct rtc_class_ops mrst_rtc_ops = {
- .read_time = mrst_read_time,
- .set_time = mrst_set_time,
- .read_alarm = mrst_read_alarm,
- .set_alarm = mrst_set_alarm,
- .proc = mrst_procfs,
- .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
-};
-
-static struct mrst_rtc mrst_rtc;
-
-/*
- * When vRTC IRQ is captured by SCU FW, FW will clear the AIE bit in
- * Reg B, so no need for this driver to clear it
- */
-static irqreturn_t mrst_rtc_irq(int irq, void *p)
-{
- u8 irqstat;
-
- spin_lock(&rtc_lock);
- /* This read will clear all IRQ flags inside Reg C */
- irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
- spin_unlock(&rtc_lock);
-
- irqstat &= RTC_IRQMASK | RTC_IRQF;
- if (is_intr(irqstat)) {
- rtc_update_irq(p, 1, irqstat);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
-static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
- int rtc_irq)
-{
- int retval = 0;
- unsigned char rtc_control;
-
- /* There can be only one ... */
- if (mrst_rtc.dev)
- return -EBUSY;
-
- if (!iomem)
- return -ENODEV;
-
- iomem = devm_request_mem_region(dev, iomem->start, resource_size(iomem),
- driver_name);
- if (!iomem) {
- dev_dbg(dev, "i/o mem already in use.\n");
- return -EBUSY;
- }
-
- mrst_rtc.irq = rtc_irq;
- mrst_rtc.dev = dev;
- dev_set_drvdata(dev, &mrst_rtc);
-
- mrst_rtc.rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(mrst_rtc.rtc))
- return PTR_ERR(mrst_rtc.rtc);
-
- mrst_rtc.rtc->ops = &mrst_rtc_ops;
-
- rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
-
- spin_lock_irq(&rtc_lock);
- mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
- rtc_control = vrtc_cmos_read(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
- dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");
-
- if (rtc_irq) {
- retval = devm_request_irq(dev, rtc_irq, mrst_rtc_irq,
- 0, dev_name(&mrst_rtc.rtc->dev),
- mrst_rtc.rtc);
- if (retval < 0) {
- dev_dbg(dev, "IRQ %d is already in use, err %d\n",
- rtc_irq, retval);
- goto cleanup0;
- }
- }
-
- retval = devm_rtc_register_device(mrst_rtc.rtc);
- if (retval)
- goto cleanup0;
-
- dev_dbg(dev, "initialised\n");
- return 0;
-
-cleanup0:
- mrst_rtc.dev = NULL;
- dev_err(dev, "rtc-mrst: unable to initialise\n");
- return retval;
-}
-
-static void rtc_mrst_do_shutdown(void)
-{
- spin_lock_irq(&rtc_lock);
- mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
- spin_unlock_irq(&rtc_lock);
-}
-
-static void rtc_mrst_do_remove(struct device *dev)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
-
- rtc_mrst_do_shutdown();
-
- mrst->rtc = NULL;
- mrst->dev = NULL;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mrst_suspend(struct device *dev)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned char tmp;
-
- /* Only the alarm might be a wakeup event source */
- spin_lock_irq(&rtc_lock);
- mrst->suspend_ctrl = tmp = vrtc_cmos_read(RTC_CONTROL);
- if (tmp & (RTC_PIE | RTC_AIE)) {
- unsigned char mask;
-
- if (device_may_wakeup(dev))
- mask = RTC_IRQMASK & ~RTC_AIE;
- else
- mask = RTC_IRQMASK;
- tmp &= ~mask;
- vrtc_cmos_write(tmp, RTC_CONTROL);
-
- mrst_checkintr(mrst, tmp);
- }
- spin_unlock_irq(&rtc_lock);
-
- if (tmp & RTC_AIE) {
- mrst->enabled_wake = 1;
- enable_irq_wake(mrst->irq);
- }
-
- dev_dbg(&mrst_rtc.rtc->dev, "suspend%s, ctrl %02x\n",
- (tmp & RTC_AIE) ? ", alarm may wake" : "",
- tmp);
-
- return 0;
-}
-
-/*
- * We want RTC alarms to wake us from the deep power saving state
- */
-static inline int mrst_poweroff(struct device *dev)
-{
- return mrst_suspend(dev);
-}
-
-static int mrst_resume(struct device *dev)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned char tmp = mrst->suspend_ctrl;
-
- /* Re-enable any irqs previously active */
- if (tmp & RTC_IRQMASK) {
- unsigned char mask;
-
- if (mrst->enabled_wake) {
- disable_irq_wake(mrst->irq);
- mrst->enabled_wake = 0;
- }
-
- spin_lock_irq(&rtc_lock);
- do {
- vrtc_cmos_write(tmp, RTC_CONTROL);
-
- mask = vrtc_cmos_read(RTC_INTR_FLAGS);
- mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
- if (!is_intr(mask))
- break;
-
- rtc_update_irq(mrst->rtc, 1, mask);
- tmp &= ~RTC_AIE;
- } while (mask & RTC_AIE);
- spin_unlock_irq(&rtc_lock);
- }
-
- dev_dbg(&mrst_rtc.rtc->dev, "resume, ctrl %02x\n", tmp);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
-#define MRST_PM_OPS (&mrst_pm_ops)
-
-#else
-#define MRST_PM_OPS NULL
-
-static inline int mrst_poweroff(struct device *dev)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static int vrtc_mrst_platform_probe(struct platform_device *pdev)
-{
- return vrtc_mrst_do_probe(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0),
- platform_get_irq(pdev, 0));
-}
-
-static int vrtc_mrst_platform_remove(struct platform_device *pdev)
-{
- rtc_mrst_do_remove(&pdev->dev);
- return 0;
-}
-
-static void vrtc_mrst_platform_shutdown(struct platform_device *pdev)
-{
- if (system_state == SYSTEM_POWER_OFF && !mrst_poweroff(&pdev->dev))
- return;
-
- rtc_mrst_do_shutdown();
-}
-
-MODULE_ALIAS("platform:vrtc_mrst");
-
-static struct platform_driver vrtc_mrst_platform_driver = {
- .probe = vrtc_mrst_platform_probe,
- .remove = vrtc_mrst_platform_remove,
- .shutdown = vrtc_mrst_platform_shutdown,
- .driver = {
- .name = driver_name,
- .pm = MRST_PM_OPS,
- }
-};
-
-module_platform_driver(vrtc_mrst_platform_driver);
-
-MODULE_AUTHOR("Jacob Pan; Feng Tang");
-MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index f8e2ecea1d8d..6c526e2ec56d 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -200,11 +200,6 @@ static irqreturn_t mv_rtc_interrupt(int irq, void *data)
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
-};
-
-static const struct rtc_class_ops mv_rtc_alarm_ops = {
- .read_time = mv_rtc_read_time,
- .set_time = mv_rtc_set_time,
.read_alarm = mv_rtc_read_alarm,
.set_alarm = mv_rtc_set_alarm,
.alarm_irq_enable = mv_rtc_alarm_irq_enable,
@@ -268,13 +263,12 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
}
- if (pdata->irq >= 0) {
+ if (pdata->irq >= 0)
device_init_wakeup(&pdev->dev, 1);
- pdata->rtc->ops = &mv_rtc_alarm_ops;
- } else {
- pdata->rtc->ops = &mv_rtc_ops;
- }
+ else
+ clear_bit(RTC_FEATURE_ALARM, pdata->rtc->features);
+ pdata->rtc->ops = &mv_rtc_ops;
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 65b29b0fa548..db57dda7ab97 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -189,11 +189,10 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long flags;
u32 status;
u32 events = 0;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock(&pdata->rtc->irq_lock);
status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
@@ -209,7 +208,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
events |= (RTC_PF | RTC_IRQF);
rtc_update_irq(pdata->rtc, 1, events);
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock(&pdata->rtc->irq_lock);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 0d73f6f0cf9e..a577a74aaf75 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -74,13 +74,12 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
struct device *dev = dev_id;
struct mxc_rtc_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long flags;
u32 lp_status;
u32 lp_cr;
- spin_lock_irqsave(&pdata->lock, flags);
+ spin_lock(&pdata->lock);
if (clk_enable(pdata->clk)) {
- spin_unlock_irqrestore(&pdata->lock, flags);
+ spin_unlock(&pdata->lock);
return IRQ_NONE;
}
@@ -104,7 +103,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
mxc_rtc_sync_lp_locked(dev, ioaddr);
clk_disable(pdata->clk);
- spin_unlock_irqrestore(&pdata->lock, flags);
+ spin_unlock(&pdata->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 7b9f8bcf86fe..f8f49a969c23 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -224,32 +224,35 @@ static int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled)
return enabled ? 0 : opal_set_tpo_time(dev, &alarm);
}
-static struct rtc_class_ops opal_rtc_ops = {
+static const struct rtc_class_ops opal_rtc_ops = {
.read_time = opal_get_rtc_time,
.set_time = opal_set_rtc_time,
+ .read_alarm = opal_get_tpo_time,
+ .set_alarm = opal_set_tpo_time,
+ .alarm_irq_enable = opal_tpo_alarm_irq_enable,
};
static int opal_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
if (pdev->dev.of_node &&
(of_property_read_bool(pdev->dev.of_node, "wakeup-source") ||
- of_property_read_bool(pdev->dev.of_node, "has-tpo")/* legacy */)) {
+ of_property_read_bool(pdev->dev.of_node, "has-tpo")/* legacy */))
device_set_wakeup_capable(&pdev->dev, true);
- opal_rtc_ops.read_alarm = opal_get_tpo_time;
- opal_rtc_ops.set_alarm = opal_set_tpo_time;
- opal_rtc_ops.alarm_irq_enable = opal_tpo_alarm_irq_enable;
- }
-
- rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ else
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ rtc->ops = &opal_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->range_max = RTC_TIMESTAMP_END_9999;
rtc->uie_unsupported = 1;
- return 0;
+ return devm_rtc_register_device(rtc);
}
static const struct of_device_id opal_rtc_match[] = {
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 534ffc91eec1..0f58cac81d8c 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -307,11 +307,10 @@ static int pcf2123_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
static irqreturn_t pcf2123_rtc_irq(int irq, void *dev)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
- struct mutex *lock = &pcf2123->rtc->ops_lock;
unsigned int val = 0;
int ret = IRQ_NONE;
- mutex_lock(lock);
+ rtc_lock(pcf2123->rtc);
regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val);
/* Alarm? */
@@ -324,7 +323,7 @@ static irqreturn_t pcf2123_rtc_irq(int irq, void *dev)
rtc_update_irq(pcf2123->rtc, 1, RTC_IRQF | RTC_AF);
}
- mutex_unlock(lock);
+ rtc_unlock(pcf2123->rtc);
return ret;
}
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 39a7b5116aa4..d13c20a2adf7 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -26,6 +26,7 @@
/* Control register 1 */
#define PCF2127_REG_CTRL1 0x00
+#define PCF2127_BIT_CTRL1_POR_OVRD BIT(3)
#define PCF2127_BIT_CTRL1_TSF1 BIT(4)
/* Control register 2 */
#define PCF2127_REG_CTRL2 0x01
@@ -57,6 +58,9 @@
#define PCF2127_REG_ALARM_DM 0x0D
#define PCF2127_REG_ALARM_DW 0x0E
#define PCF2127_BIT_ALARM_AE BIT(7)
+/* CLKOUT control register */
+#define PCF2127_REG_CLKOUT 0x0f
+#define PCF2127_BIT_CLKOUT_OTPR BIT(5)
/* Watchdog registers */
#define PCF2127_REG_WD_CTL 0x10
#define PCF2127_BIT_WD_CTL_TF0 BIT(0)
@@ -225,12 +229,6 @@ static int pcf2127_rtc_ioctl(struct device *dev,
}
}
-static const struct rtc_class_ops pcf2127_rtc_ops = {
- .ioctl = pcf2127_rtc_ioctl,
- .read_time = pcf2127_rtc_read_time,
- .set_time = pcf2127_rtc_set_time,
-};
-
static int pcf2127_nvmem_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
@@ -459,7 +457,7 @@ static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static const struct rtc_class_ops pcf2127_rtc_alrm_ops = {
+static const struct rtc_class_ops pcf2127_rtc_ops = {
.ioctl = pcf2127_rtc_ioctl,
.read_time = pcf2127_rtc_read_time,
.set_time = pcf2127_rtc_set_time,
@@ -560,10 +558,11 @@ static const struct attribute_group pcf2127_attr_group = {
};
static int pcf2127_probe(struct device *dev, struct regmap *regmap,
- int alarm_irq, const char *name, bool has_nvmem)
+ int alarm_irq, const char *name, bool is_pcf2127)
{
struct pcf2127 *pcf2127;
int ret = 0;
+ unsigned int val;
dev_dbg(dev, "%s\n", __func__);
@@ -584,6 +583,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
pcf2127->rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
if (alarm_irq > 0) {
ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
@@ -598,10 +598,10 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
if (alarm_irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
device_init_wakeup(dev, true);
- pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
+ set_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
}
- if (has_nvmem) {
+ if (is_pcf2127) {
struct nvmem_config nvmem_cfg = {
.priv = pcf2127,
.reg_read = pcf2127_nvmem_read,
@@ -613,9 +613,33 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
}
/*
+ * The "Power-On Reset Override" facility prevents the RTC to do a reset
+ * after power on. For normal operation the PORO must be disabled.
+ */
+ regmap_clear_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_POR_OVRD);
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CLKOUT, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!(val & PCF2127_BIT_CLKOUT_OTPR)) {
+ ret = regmap_set_bits(pcf2127->regmap, PCF2127_REG_CLKOUT,
+ PCF2127_BIT_CLKOUT_OTPR);
+ if (ret < 0)
+ return ret;
+
+ msleep(100);
+ }
+
+ /*
* Watchdog timer enabled and reset pin /RST activated when timed out.
* Select 1Hz clock source for watchdog timer.
* Note: Countdown timer disabled and not available.
+ * For pca2129, pcf2129, only bit[7] is for Symbol WD_CD
+ * of register watchdg_tim_ctl. The bit[6] is labeled
+ * as T. Bits labeled as T must always be written with
+ * logic 0.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
PCF2127_BIT_WD_CTL_CD1 |
@@ -623,7 +647,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
PCF2127_BIT_WD_CTL_TF1 |
PCF2127_BIT_WD_CTL_TF0,
PCF2127_BIT_WD_CTL_CD1 |
- PCF2127_BIT_WD_CTL_CD0 |
+ (is_pcf2127 ? PCF2127_BIT_WD_CTL_CD0 : 0) |
PCF2127_BIT_WD_CTL_TF1);
if (ret) {
dev_err(dev, "%s: watchdog config (wd_ctl) failed\n", __func__);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index e19cf2adbc35..aef6c1ee8bb0 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -311,14 +311,6 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
.set_time = pcf85063_rtc_set_time,
.read_offset = pcf85063_read_offset,
.set_offset = pcf85063_set_offset,
- .ioctl = pcf85063_ioctl,
-};
-
-static const struct rtc_class_ops pcf85063_rtc_ops_alarm = {
- .read_time = pcf85063_rtc_read_time,
- .set_time = pcf85063_rtc_set_time,
- .read_offset = pcf85063_read_offset,
- .set_offset = pcf85063_set_offset,
.read_alarm = pcf85063_rtc_read_alarm,
.set_alarm = pcf85063_rtc_set_alarm,
.alarm_irq_enable = pcf85063_rtc_alarm_irq_enable,
@@ -509,15 +501,6 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-static const struct pcf85063_config pcf85063a_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
-};
-
static const struct pcf85063_config pcf85063tp_config = {
.regmap = {
.reg_bits = 8,
@@ -526,16 +509,6 @@ static const struct pcf85063_config pcf85063tp_config = {
},
};
-static const struct pcf85063_config rv8263_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
-};
-
static int pcf85063_probe(struct i2c_client *client)
{
struct pcf85063 *pcf85063;
@@ -587,6 +560,7 @@ static int pcf85063_probe(struct i2c_client *client)
pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf85063->rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
if (config->has_alarms && client->irq > 0) {
err = devm_request_threaded_irq(&client->dev, client->irq,
@@ -597,7 +571,7 @@ static int pcf85063_probe(struct i2c_client *client)
dev_warn(&pcf85063->rtc->dev,
"unable to request IRQ, alarms disabled\n");
} else {
- pcf85063->rtc->ops = &pcf85063_rtc_ops_alarm;
+ set_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
device_init_wakeup(&client->dev, true);
err = dev_pm_set_wake_irq(&client->dev, client->irq);
if (err)
@@ -618,6 +592,25 @@ static int pcf85063_probe(struct i2c_client *client)
}
#ifdef CONFIG_OF
+static const struct pcf85063_config pcf85063a_config = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+};
+
+static const struct pcf85063_config rv8263_config = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
+
static const struct of_device_id pcf85063_of_match[] = {
{ .compatible = "nxp,pcf85063", .data = &pcf85063tp_config },
{ .compatible = "nxp,pcf85063tp", .data = &pcf85063tp_config },
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index a574c8d15a5c..bb3e9ba75f6c 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -285,11 +285,6 @@ static irqreturn_t pcf85363_rtc_handle_irq(int irq, void *dev_id)
static const struct rtc_class_ops rtc_ops = {
.read_time = pcf85363_rtc_read_time,
.set_time = pcf85363_rtc_set_time,
-};
-
-static const struct rtc_class_ops rtc_ops_alarm = {
- .read_time = pcf85363_rtc_read_time,
- .set_time = pcf85363_rtc_set_time,
.read_alarm = pcf85363_rtc_read_alarm,
.set_alarm = pcf85363_rtc_set_alarm,
.alarm_irq_enable = pcf85363_rtc_alarm_irq_enable,
@@ -403,6 +398,7 @@ static int pcf85363_probe(struct i2c_client *client,
pcf85363->rtc->ops = &rtc_ops;
pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
if (client->irq > 0) {
regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
@@ -415,7 +411,7 @@ static int pcf85363_probe(struct i2c_client *client,
if (ret)
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
else
- pcf85363->rtc->ops = &rtc_ops_alarm;
+ set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
}
ret = devm_rtc_register_device(pcf85363->rtc);
@@ -428,7 +424,7 @@ static int pcf85363_probe(struct i2c_client *client,
return ret;
}
-static const struct of_device_id dev_ids[] = {
+static const __maybe_unused struct of_device_id dev_ids[] = {
{ .compatible = "nxp,pcf85263", .data = &pcf_85263_config },
{ .compatible = "nxp,pcf85363", .data = &pcf_85363_config },
{ /* sentinel */ }
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index de3e6c355f2e..18f12f36eb2b 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -597,6 +597,7 @@ static int pcf8563_probe(struct i2c_client *client,
static const struct i2c_device_id pcf8563_id[] = {
{ "pcf8563", 0 },
{ "rtc8564", 0 },
+ { "pca8565", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
@@ -606,6 +607,7 @@ static const struct of_device_id pcf8563_of_match[] = {
{ .compatible = "nxp,pcf8563" },
{ .compatible = "epson,rtc8564" },
{ .compatible = "microcrystal,rv8564" },
+ { .compatible = "nxp,pca8565" },
{}
};
MODULE_DEVICE_TABLE(of, pcf8563_of_match);
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 5a880516f3e8..39038c0754ee 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -137,7 +137,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
return ret;
}
-static int pl030_remove(struct amba_device *dev)
+static void pl030_remove(struct amba_device *dev)
{
struct pl030_rtc *rtc = amba_get_drvdata(dev);
@@ -146,8 +146,6 @@ static int pl030_remove(struct amba_device *dev)
free_irq(dev->irq[0], rtc);
iounmap(rtc->base);
amba_release_regions(dev);
-
- return 0;
}
static struct amba_id pl030_ids[] = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 224bbf096262..e38ee8848385 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -280,7 +280,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return 0;
}
-static int pl031_remove(struct amba_device *adev)
+static void pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
@@ -289,8 +289,6 @@ static int pl031_remove(struct amba_device *adev)
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
amba_release_regions(adev);
-
- return 0;
}
static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
@@ -352,12 +350,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- if (!adev->irq[0]) {
- /* When there's no interrupt, no point in exposing the alarm */
- ops->read_alarm = NULL;
- ops->set_alarm = NULL;
- ops->alarm_irq_enable = NULL;
- }
+ if (!adev->irq[0])
+ clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 0d9dd6faabba..eb206597a8fa 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -20,6 +20,7 @@
/* RTC_CTRL register bit fields */
#define PM8xxx_RTC_ENABLE BIT(7)
#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
+#define PM8xxx_RTC_ALARM_ENABLE BIT(7)
#define NUM_8_BIT_RTC_REGS 0x4
@@ -265,6 +266,7 @@ rtc_rw_fail:
static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int rc;
+ unsigned int ctrl_reg;
u8 value[NUM_8_BIT_RTC_REGS];
unsigned long secs;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
@@ -282,6 +284,13 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_time64_to_tm(secs, &alarm->time);
+ rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+ if (rc) {
+ dev_err(dev, "Read from RTC alarm control register failed\n");
+ return rc;
+ }
+ alarm->enabled = !!(ctrl_reg & PM8xxx_RTC_ALARM_ENABLE);
+
dev_dbg(dev, "Alarm set for - h:m:s=%ptRt, y-m-d=%ptRdr\n",
&alarm->time, &alarm->time);
@@ -343,16 +352,15 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
unsigned int ctrl_reg;
int rc;
- unsigned long irq_flags;
rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
- spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ spin_lock(&rtc_dd->ctrl_reg_lock);
/* Clear the alarm enable bit */
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
if (rc) {
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ spin_unlock(&rtc_dd->ctrl_reg_lock);
goto rtc_alarm_handled;
}
@@ -360,13 +368,13 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
if (rc) {
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ spin_unlock(&rtc_dd->ctrl_reg_lock);
dev_err(rtc_dd->rtc_dev,
"Write to alarm control register failed\n");
goto rtc_alarm_handled;
}
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ spin_unlock(&rtc_dd->ctrl_reg_lock);
/* Clear RTC alarm register */
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index aaf1b95e3990..5dbaeb7af648 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -320,11 +320,10 @@ static irqreturn_t rtc7301_irq_handler(int irq, void *dev_id)
{
struct rtc_device *rtc = dev_id;
struct rtc7301_priv *priv = dev_get_drvdata(rtc->dev.parent);
- unsigned long flags;
irqreturn_t ret = IRQ_NONE;
u8 alrm_ctrl;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
rtc7301_select_bank(priv, 1);
@@ -335,7 +334,7 @@ static irqreturn_t rtc7301_irq_handler(int irq, void *dev_id)
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return ret;
}
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 3bd6eaa0dcf6..80980414890c 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -83,7 +83,7 @@ static const struct i2c_device_id rs5c372_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rs5c372_id);
-static const struct of_device_id rs5c372_of_match[] = {
+static const __maybe_unused struct of_device_id rs5c372_of_match[] = {
{
.compatible = "ricoh,r2025sd",
.data = (void *)rtc_r2025sd
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 979407a51c7a..0c48d980d06a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -265,8 +265,7 @@ static irqreturn_t rv3028_handle_irq(int irq, void *dev_id)
return IRQ_NONE;
}
- if (status & RV3028_STATUS_PORF)
- dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n");
+ status &= ~RV3028_STATUS_PORF;
if (status & RV3028_STATUS_TF) {
status |= RV3028_STATUS_TF;
@@ -311,10 +310,8 @@ static int rv3028_get_time(struct device *dev, struct rtc_time *tm)
if (ret < 0)
return ret;
- if (status & RV3028_STATUS_PORF) {
- dev_warn(dev, "Voltage low, data is invalid.\n");
+ if (status & RV3028_STATUS_PORF)
return -EINVAL;
- }
ret = regmap_bulk_read(rv3028->regmap, RV3028_SEC, date, sizeof(date));
if (ret)
@@ -770,9 +767,12 @@ static int rv3028_clkout_register_clk(struct rv3028_data *rv3028,
}
#endif
-static struct rtc_class_ops rv3028_rtc_ops = {
+static const struct rtc_class_ops rv3028_rtc_ops = {
.read_time = rv3028_get_time,
.set_time = rv3028_set_time,
+ .read_alarm = rv3028_get_alarm,
+ .set_alarm = rv3028_set_alarm,
+ .alarm_irq_enable = rv3028_alarm_irq_enable,
.read_offset = rv3028_read_offset,
.set_offset = rv3028_set_offset,
.ioctl = rv3028_ioctl,
@@ -823,9 +823,6 @@ static int rv3028_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- if (status & RV3028_STATUS_PORF)
- dev_warn(&client->dev, "Voltage low, data loss detected.\n");
-
if (status & RV3028_STATUS_AF)
dev_warn(&client->dev, "An alarm may have been missed.\n");
@@ -841,12 +838,10 @@ static int rv3028_probe(struct i2c_client *client)
if (ret) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
- } else {
- rv3028_rtc_ops.read_alarm = rv3028_get_alarm;
- rv3028_rtc_ops.set_alarm = rv3028_set_alarm;
- rv3028_rtc_ops.alarm_irq_enable = rv3028_alarm_irq_enable;
}
}
+ if (!client->irq)
+ clear_bit(RTC_FEATURE_ALARM, rv3028->rtc->features);
ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL1,
RV3028_CTRL1_WADA, RV3028_CTRL1_WADA);
@@ -903,7 +898,7 @@ static int rv3028_probe(struct i2c_client *client)
return 0;
}
-static const struct of_device_id rv3028_of_match[] = {
+static const __maybe_unused struct of_device_id rv3028_of_match[] = {
{ .compatible = "microcrystal,rv3028", },
{ }
};
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index dc1bda62095e..8cb84c9595fc 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -265,24 +265,23 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
{
struct device *dev = dev_id;
struct rv3029_data *rv3029 = dev_get_drvdata(dev);
- struct mutex *lock = &rv3029->rtc->ops_lock;
unsigned int flags, controls;
unsigned long events = 0;
int ret;
- mutex_lock(lock);
+ rtc_lock(rv3029->rtc);
ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
if (ret) {
dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
- mutex_unlock(lock);
+ rtc_unlock(rv3029->rtc);
return IRQ_NONE;
}
ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
if (ret) {
dev_warn(dev, "Read IRQ Flags Register error %d\n", ret);
- mutex_unlock(lock);
+ rtc_unlock(rv3029->rtc);
return IRQ_NONE;
}
@@ -297,7 +296,7 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
regmap_write(rv3029->regmap, RV3029_IRQ_FLAGS, flags);
regmap_write(rv3029->regmap, RV3029_IRQ_CTRL, controls);
}
- mutex_unlock(lock);
+ rtc_unlock(rv3029->rtc);
return IRQ_HANDLED;
}
@@ -694,10 +693,13 @@ static void rv3029_hwmon_register(struct device *dev, const char *name)
#endif /* CONFIG_RTC_DRV_RV3029_HWMON */
-static struct rtc_class_ops rv3029_rtc_ops = {
+static const struct rtc_class_ops rv3029_rtc_ops = {
.read_time = rv3029_read_time,
.set_time = rv3029_set_time,
.ioctl = rv3029_ioctl,
+ .read_alarm = rv3029_read_alarm,
+ .set_alarm = rv3029_set_alarm,
+ .alarm_irq_enable = rv3029_alarm_irq_enable,
};
static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
@@ -739,12 +741,10 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
if (rc) {
dev_warn(dev, "unable to request IRQ, alarms disabled\n");
rv3029->irq = 0;
- } else {
- rv3029_rtc_ops.read_alarm = rv3029_read_alarm;
- rv3029_rtc_ops.set_alarm = rv3029_set_alarm;
- rv3029_rtc_ops.alarm_irq_enable = rv3029_alarm_irq_enable;
}
}
+ if (!rv3029->irq)
+ clear_bit(RTC_FEATURE_ALARM, rv3029->rtc->features);
rv3029->rtc->ops = &rv3029_rtc_ops;
rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
@@ -808,7 +808,7 @@ static const struct i2c_device_id rv3029_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rv3029_id);
-static const struct of_device_id rv3029_of_match[] = {
+static const __maybe_unused struct of_device_id rv3029_of_match[] = {
{ .compatible = "microcrystal,rv3029" },
{ }
};
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index c9bcea727757..d63102d5cb1e 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -804,12 +804,15 @@ static void rv3032_hwmon_register(struct device *dev)
devm_hwmon_device_register_with_info(dev, "rv3032", rv3032, &rv3032_hwmon_chip_info, NULL);
}
-static struct rtc_class_ops rv3032_rtc_ops = {
+static const struct rtc_class_ops rv3032_rtc_ops = {
.read_time = rv3032_get_time,
.set_time = rv3032_set_time,
.read_offset = rv3032_read_offset,
.set_offset = rv3032_set_offset,
.ioctl = rv3032_ioctl,
+ .read_alarm = rv3032_get_alarm,
+ .set_alarm = rv3032_set_alarm,
+ .alarm_irq_enable = rv3032_alarm_irq_enable,
};
static const struct regmap_config regmap_config = {
@@ -868,12 +871,10 @@ static int rv3032_probe(struct i2c_client *client)
if (ret) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
- } else {
- rv3032_rtc_ops.read_alarm = rv3032_get_alarm;
- rv3032_rtc_ops.set_alarm = rv3032_set_alarm;
- rv3032_rtc_ops.alarm_irq_enable = rv3032_alarm_irq_enable;
}
}
+ if (!client->irq)
+ clear_bit(RTC_FEATURE_ALARM, rv3032->rtc->features);
ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1,
RV3032_CTRL1_WADA, RV3032_CTRL1_WADA);
@@ -905,7 +906,7 @@ static int rv3032_probe(struct i2c_client *client)
return 0;
}
-static const struct of_device_id rv3032_of_match[] = {
+static const __maybe_unused struct of_device_id rv3032_of_match[] = {
{ .compatible = "microcrystal,rv3032", },
{ }
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index d4ea6db51b26..72adef5a5ebe 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -471,10 +471,13 @@ static int rv8803_nvram_read(void *priv, unsigned int offset,
return 0;
}
-static struct rtc_class_ops rv8803_rtc_ops = {
+static const struct rtc_class_ops rv8803_rtc_ops = {
.read_time = rv8803_get_time,
.set_time = rv8803_set_time,
.ioctl = rv8803_ioctl,
+ .read_alarm = rv8803_get_alarm,
+ .set_alarm = rv8803_set_alarm,
+ .alarm_irq_enable = rv8803_alarm_irq_enable,
};
static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
@@ -567,12 +570,10 @@ static int rv8803_probe(struct i2c_client *client,
if (err) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
- } else {
- rv8803_rtc_ops.read_alarm = rv8803_get_alarm;
- rv8803_rtc_ops.set_alarm = rv8803_set_alarm;
- rv8803_rtc_ops.alarm_irq_enable = rv8803_alarm_irq_enable;
}
}
+ if (!client->irq)
+ clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
if (err)
@@ -606,7 +607,7 @@ static const struct i2c_device_id rv8803_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rv8803_id);
-static const struct of_device_id rv8803_of_match[] = {
+static const __maybe_unused struct of_device_id rv8803_of_match[] = {
{
.compatible = "microcrystal,rv8803",
.data = (void *)rv_8803
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index a7b671a21022..79161d4c6ce4 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -331,7 +331,7 @@ static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev)
return 0;
}
-#ifdef CONFIG_SPI_MASTER
+#if IS_ENABLED(CONFIG_SPI_MASTER)
static struct regmap_config regmap_spi_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -411,7 +411,7 @@ static void rx6110_spi_unregister(void)
}
#endif /* CONFIG_SPI_MASTER */
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static struct regmap_config regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 8340ab47a059..d09056570739 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -55,7 +55,7 @@ static const struct i2c_device_id rx8010_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rx8010_id);
-static const struct of_device_id rx8010_of_match[] = {
+static const __maybe_unused struct of_device_id rx8010_of_match[] = {
{ .compatible = "epson,rx8010" },
{ }
};
@@ -73,11 +73,11 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id)
struct rx8010_data *rx8010 = i2c_get_clientdata(client);
int flagreg, err;
- mutex_lock(&rx8010->rtc->ops_lock);
+ rtc_lock(rx8010->rtc);
err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
if (err) {
- mutex_unlock(&rx8010->rtc->ops_lock);
+ rtc_unlock(rx8010->rtc);
return IRQ_NONE;
}
@@ -100,7 +100,7 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id)
}
err = regmap_write(rx8010->regs, RX8010_FLAG, flagreg);
- mutex_unlock(&rx8010->rtc->ops_lock);
+ rtc_unlock(rx8010->rtc);
return err ? IRQ_NONE : IRQ_HANDLED;
}
@@ -354,13 +354,7 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
}
}
-static const struct rtc_class_ops rx8010_rtc_ops_default = {
- .read_time = rx8010_get_time,
- .set_time = rx8010_set_time,
- .ioctl = rx8010_ioctl,
-};
-
-static const struct rtc_class_ops rx8010_rtc_ops_alarm = {
+static const struct rtc_class_ops rx8010_rtc_ops = {
.read_time = rx8010_get_time,
.set_time = rx8010_set_time,
.ioctl = rx8010_ioctl,
@@ -409,12 +403,11 @@ static int rx8010_probe(struct i2c_client *client)
dev_err(dev, "unable to request IRQ\n");
return err;
}
-
- rx8010->rtc->ops = &rx8010_rtc_ops_alarm;
} else {
- rx8010->rtc->ops = &rx8010_rtc_ops_default;
+ clear_bit(RTC_FEATURE_ALARM, rx8010->rtc->features);
}
+ rx8010->rtc->ops = &rx8010_rtc_ops;
rx8010->rtc->max_user_freq = 1;
rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index a24f85893f90..c914091819ba 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -142,10 +142,9 @@ static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- struct mutex *lock = &rx8025->rtc->ops_lock;
int status;
- mutex_lock(lock);
+ rtc_lock(rx8025->rtc);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
@@ -170,7 +169,7 @@ static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
}
out:
- mutex_unlock(lock);
+ rtc_unlock(rx8025->rtc);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index de109139529b..aed4898a0ff4 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -314,7 +314,7 @@ static const struct i2c_device_id rx8581_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rx8581_id);
-static const struct of_device_id rx8581_of_match[] = {
+static const __maybe_unused struct of_device_id rx8581_of_match[] = {
{ .compatible = "epson,rx8571", .data = &rx8571_config },
{ .compatible = "epson,rx8581", .data = &rx8581_config },
{ /* sentinel */ }
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index ea15d0392bb9..b5bdeda7d767 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -55,7 +55,7 @@ static const struct i2c_device_id s35390a_id[] = {
};
MODULE_DEVICE_TABLE(i2c, s35390a_id);
-static const struct of_device_id s35390a_of_match[] = {
+static const __maybe_unused struct of_device_id s35390a_of_match[] = {
{ .compatible = "s35390a" },
{ .compatible = "sii,s35390a" },
{ }
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index fab326ba9cec..e57d3ca70a78 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -382,7 +382,6 @@ static int s3c_rtc_remove(struct platform_device *pdev)
static int s3c_rtc_probe(struct platform_device *pdev)
{
struct s3c_rtc *info = NULL;
- struct rtc_time rtc_tm;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -448,20 +447,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- /* Check RTC Time */
- if (s3c_rtc_gettime(&pdev->dev, &rtc_tm)) {
- rtc_tm.tm_year = 100;
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_mday = 1;
- rtc_tm.tm_hour = 0;
- rtc_tm.tm_min = 0;
- rtc_tm.tm_sec = 0;
-
- s3c_rtc_settime(&pdev->dev, &rtc_tm);
-
- dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
- }
-
/* register RTC and exit */
info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
THIS_MODULE);
@@ -573,7 +558,7 @@ static struct s3c_rtc_data const s3c6410_rtc_data = {
.disable = s3c6410_rtc_disable,
};
-static const struct of_device_id s3c_rtc_dt_match[] = {
+static const __maybe_unused struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
.data = &s3c2410_rtc_data,
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index eb9dde4095a9..80b66f16db89 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -760,7 +760,8 @@ static int s5m_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->i2c = i2c_new_dummy_device(s5m87xx->i2c->adapter, RTC_I2C_ADDR);
+ info->i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
+ RTC_I2C_ADDR);
if (IS_ERR(info->i2c)) {
dev_err(&pdev->dev, "Failed to allocate I2C for RTC\n");
return PTR_ERR(info->i2c);
@@ -771,7 +772,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
ret = PTR_ERR(info->regmap);
dev_err(&pdev->dev, "Failed to allocate RTC register map: %d\n",
ret);
- goto err;
+ return ret;
}
info->dev = &pdev->dev;
@@ -781,26 +782,25 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
if (info->irq <= 0) {
- ret = -EINVAL;
dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
alarm_irq);
- goto err;
+ return -EINVAL;
}
}
platform_set_drvdata(pdev, info);
ret = s5m8767_rtc_init_reg(info);
+ if (ret)
+ return ret;
device_init_wakeup(&pdev->dev, 1);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
&s5m_rtc_ops, THIS_MODULE);
- if (IS_ERR(info->rtc_dev)) {
- ret = PTR_ERR(info->rtc_dev);
- goto err;
- }
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
if (!info->irq) {
dev_info(&pdev->dev, "Alarm IRQ not available\n");
@@ -813,24 +813,10 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->irq, ret);
- goto err;
+ return ret;
}
return 0;
-
-err:
- i2c_unregister_device(info->i2c);
-
- return ret;
-}
-
-static int s5m_rtc_remove(struct platform_device *pdev)
-{
- struct s5m_rtc_info *info = platform_get_drvdata(pdev);
-
- i2c_unregister_device(info->i2c);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -874,7 +860,6 @@ static struct platform_driver s5m_rtc_driver = {
.pm = &s5m_rtc_pm_ops,
},
.probe = s5m_rtc_probe,
- .remove = s5m_rtc_remove,
.id_table = s5m_rtc_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index f6bee69ba017..24e8528e23ec 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -207,7 +207,7 @@ static const struct i2c_device_id sd3078_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sd3078_id);
-static const struct of_device_id rtc_dt_match[] = {
+static const __maybe_unused struct of_device_id rtc_dt_match[] = {
{ .compatible = "whwave,sd3078" },
{},
};
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
deleted file mode 100644
index 03a6cca23201..000000000000
--- a/drivers/rtc/rtc-sirfsoc.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SiRFSoC Real Time Clock interface for Linux
- *
- * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/rtc.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
-#include <linux/rtc/sirfsoc_rtciobrg.h>
-
-
-#define RTC_CN 0x00
-#define RTC_ALARM0 0x04
-#define RTC_ALARM1 0x18
-#define RTC_STATUS 0x08
-#define RTC_SW_VALUE 0x40
-#define SIRFSOC_RTC_AL1E (1<<6)
-#define SIRFSOC_RTC_AL1 (1<<4)
-#define SIRFSOC_RTC_HZE (1<<3)
-#define SIRFSOC_RTC_AL0E (1<<2)
-#define SIRFSOC_RTC_HZ (1<<1)
-#define SIRFSOC_RTC_AL0 (1<<0)
-#define RTC_DIV 0x0c
-#define RTC_DEEP_CTRL 0x14
-#define RTC_CLOCK_SWITCH 0x1c
-#define SIRFSOC_RTC_CLK 0x03 /* others are reserved */
-
-/* Refer to RTC DIV switch */
-#define RTC_HZ 16
-
-/* This macro is also defined in arch/arm/plat-sirfsoc/cpu.c */
-#define RTC_SHIFT 4
-
-#define INTR_SYSRTC_CN 0x48
-
-struct sirfsoc_rtc_drv {
- struct rtc_device *rtc;
- u32 rtc_base;
- u32 irq;
- unsigned irq_wake;
- /* Overflow for every 8 years extra time */
- u32 overflow_rtc;
- spinlock_t lock;
- struct regmap *regmap;
-#ifdef CONFIG_PM
- u32 saved_counter;
- u32 saved_overflow_rtc;
-#endif
-};
-
-static u32 sirfsoc_rtc_readl(struct sirfsoc_rtc_drv *rtcdrv, u32 offset)
-{
- u32 val;
-
- regmap_read(rtcdrv->regmap, rtcdrv->rtc_base + offset, &val);
- return val;
-}
-
-static void sirfsoc_rtc_writel(struct sirfsoc_rtc_drv *rtcdrv,
- u32 offset, u32 val)
-{
- regmap_write(rtcdrv->regmap, rtcdrv->rtc_base + offset, val);
-}
-
-static int sirfsoc_rtc_read_alarm(struct device *dev,
- struct rtc_wkalrm *alrm)
-{
- unsigned long rtc_alarm, rtc_count;
- struct sirfsoc_rtc_drv *rtcdrv;
-
- rtcdrv = dev_get_drvdata(dev);
-
- spin_lock_irq(&rtcdrv->lock);
-
- rtc_count = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
-
- rtc_alarm = sirfsoc_rtc_readl(rtcdrv, RTC_ALARM0);
- memset(alrm, 0, sizeof(struct rtc_wkalrm));
-
- /*
- * assume alarm interval not beyond one round counter overflow_rtc:
- * 0->0xffffffff
- */
- /* if alarm is in next overflow cycle */
- if (rtc_count > rtc_alarm)
- rtc_time64_to_tm((rtcdrv->overflow_rtc + 1)
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &alrm->time);
- else
- rtc_time64_to_tm(rtcdrv->overflow_rtc
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &alrm->time);
- if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E)
- alrm->enabled = 1;
-
- spin_unlock_irq(&rtcdrv->lock);
-
- return 0;
-}
-
-static int sirfsoc_rtc_set_alarm(struct device *dev,
- struct rtc_wkalrm *alrm)
-{
- unsigned long rtc_status_reg, rtc_alarm;
- struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = dev_get_drvdata(dev);
-
- if (alrm->enabled) {
- rtc_alarm = rtc_tm_to_time64(&alrm->time);
-
- spin_lock_irq(&rtcdrv->lock);
-
- rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
- if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
- /*
- * An ongoing alarm in progress - ingore it and not
- * to return EBUSY
- */
- dev_info(dev, "An old alarm was set, will be replaced by a new one\n");
- }
-
- sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, rtc_alarm << RTC_SHIFT);
- rtc_status_reg &= ~0x07; /* mask out the lower status bits */
- /*
- * This bit RTC_AL sets it as a wake-up source for Sleep Mode
- * Writing 1 into this bit will clear it
- */
- rtc_status_reg |= SIRFSOC_RTC_AL0;
- /* enable the RTC alarm interrupt */
- rtc_status_reg |= SIRFSOC_RTC_AL0E;
- sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
-
- spin_unlock_irq(&rtcdrv->lock);
- } else {
- /*
- * if this function was called with enabled=0
- * then it could mean that the application is
- * trying to cancel an ongoing alarm
- */
- spin_lock_irq(&rtcdrv->lock);
-
- rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
- if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
- /* clear the RTC status register's alarm bit */
- rtc_status_reg &= ~0x07;
- /* write 1 into SIRFSOC_RTC_AL0 to force a clear */
- rtc_status_reg |= (SIRFSOC_RTC_AL0);
- /* Clear the Alarm enable bit */
- rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
-
- sirfsoc_rtc_writel(rtcdrv, RTC_STATUS,
- rtc_status_reg);
- }
-
- spin_unlock_irq(&rtcdrv->lock);
- }
-
- return 0;
-}
-
-static int sirfsoc_rtc_read_time(struct device *dev,
- struct rtc_time *tm)
-{
- unsigned long tmp_rtc = 0;
- struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = dev_get_drvdata(dev);
- /*
- * This patch is taken from WinCE - Need to validate this for
- * correctness. To work around sirfsoc RTC counter double sync logic
- * fail, read several times to make sure get stable value.
- */
- do {
- tmp_rtc = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
- cpu_relax();
- } while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN));
-
- rtc_time64_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT)
- | tmp_rtc >> RTC_SHIFT, tm);
- return 0;
-}
-
-static int sirfsoc_rtc_set_time(struct device *dev,
- struct rtc_time *tm)
-{
- unsigned long rtc_time;
- struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = dev_get_drvdata(dev);
-
- rtc_time = rtc_tm_to_time64(tm);
-
- rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT);
-
- sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
- sirfsoc_rtc_writel(rtcdrv, RTC_CN, rtc_time << RTC_SHIFT);
-
- return 0;
-}
-
-static int sirfsoc_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- unsigned long rtc_status_reg = 0x0;
- struct sirfsoc_rtc_drv *rtcdrv;
-
- rtcdrv = dev_get_drvdata(dev);
-
- spin_lock_irq(&rtcdrv->lock);
-
- rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
- if (enabled)
- rtc_status_reg |= SIRFSOC_RTC_AL0E;
- else
- rtc_status_reg &= ~SIRFSOC_RTC_AL0E;
-
- sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
-
- spin_unlock_irq(&rtcdrv->lock);
-
- return 0;
-
-}
-
-static const struct rtc_class_ops sirfsoc_rtc_ops = {
- .read_time = sirfsoc_rtc_read_time,
- .set_time = sirfsoc_rtc_set_time,
- .read_alarm = sirfsoc_rtc_read_alarm,
- .set_alarm = sirfsoc_rtc_set_alarm,
- .alarm_irq_enable = sirfsoc_rtc_alarm_irq_enable
-};
-
-static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
-{
- struct sirfsoc_rtc_drv *rtcdrv = pdata;
- unsigned long rtc_status_reg = 0x0;
- unsigned long events = 0x0;
-
- spin_lock(&rtcdrv->lock);
-
- rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
- /* this bit will be set ONLY if an alarm was active
- * and it expired NOW
- * So this is being used as an ASSERT
- */
- if (rtc_status_reg & SIRFSOC_RTC_AL0) {
- /*
- * clear the RTC status register's alarm bit
- * mask out the lower status bits
- */
- rtc_status_reg &= ~0x07;
- /* write 1 into SIRFSOC_RTC_AL0 to ACK the alarm interrupt */
- rtc_status_reg |= (SIRFSOC_RTC_AL0);
- /* Clear the Alarm enable bit */
- rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
- }
-
- sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
-
- spin_unlock(&rtcdrv->lock);
-
- /* this should wake up any apps polling/waiting on the read
- * after setting the alarm
- */
- events |= RTC_IRQF | RTC_AF;
- rtc_update_irq(rtcdrv->rtc, 1, events);
-
- return IRQ_HANDLED;
-}
-
-static const struct of_device_id sirfsoc_rtc_of_match[] = {
- { .compatible = "sirf,prima2-sysrtc"},
- {},
-};
-
-static const struct regmap_config sysrtc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .fast_io = true,
-};
-
-MODULE_DEVICE_TABLE(of, sirfsoc_rtc_of_match);
-
-static int sirfsoc_rtc_probe(struct platform_device *pdev)
-{
- int err;
- unsigned long rtc_div;
- struct sirfsoc_rtc_drv *rtcdrv;
- struct device_node *np = pdev->dev.of_node;
-
- rtcdrv = devm_kzalloc(&pdev->dev,
- sizeof(struct sirfsoc_rtc_drv), GFP_KERNEL);
- if (rtcdrv == NULL)
- return -ENOMEM;
-
- spin_lock_init(&rtcdrv->lock);
-
- err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base);
- if (err) {
- dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n");
- return err;
- }
-
- platform_set_drvdata(pdev, rtcdrv);
-
- /* Register rtc alarm as a wakeup source */
- device_init_wakeup(&pdev->dev, 1);
-
- rtcdrv->regmap = devm_regmap_init_iobg(&pdev->dev,
- &sysrtc_regmap_config);
- if (IS_ERR(rtcdrv->regmap)) {
- err = PTR_ERR(rtcdrv->regmap);
- dev_err(&pdev->dev, "Failed to allocate register map: %d\n",
- err);
- return err;
- }
-
- /*
- * Set SYS_RTC counter in RTC_HZ HZ Units
- * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
- * If 16HZ, therefore RTC_DIV = 1023;
- */
- rtc_div = ((32768 / RTC_HZ) / 2) - 1;
- sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
-
- /* 0x3 -> RTC_CLK */
- sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
-
- /* reset SYS RTC ALARM0 */
- sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
-
- /* reset SYS RTC ALARM1 */
- sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
-
- /* Restore RTC Overflow From Register After Command Reboot */
- rtcdrv->overflow_rtc =
- sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
-
- rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtcdrv->rtc))
- return PTR_ERR(rtcdrv->rtc);
-
- rtcdrv->rtc->ops = &sirfsoc_rtc_ops;
- rtcdrv->rtc->range_max = (1ULL << 60) - 1;
-
- rtcdrv->irq = platform_get_irq(pdev, 0);
- err = devm_request_irq(&pdev->dev, rtcdrv->irq, sirfsoc_rtc_irq_handler,
- IRQF_SHARED, pdev->name, rtcdrv);
- if (err) {
- dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n");
- return err;
- }
-
- return devm_rtc_register_device(rtcdrv->rtc);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_rtc_suspend(struct device *dev)
-{
- struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
- rtcdrv->overflow_rtc =
- sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
-
- rtcdrv->saved_counter =
- sirfsoc_rtc_readl(rtcdrv, RTC_CN);
- rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
- if (device_may_wakeup(dev) && !enable_irq_wake(rtcdrv->irq))
- rtcdrv->irq_wake = 1;
-
- return 0;
-}
-
-static int sirfsoc_rtc_resume(struct device *dev)
-{
- u32 tmp;
- struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
-
- /*
- * if resume from snapshot and the rtc power is lost,
- * restroe the rtc settings
- */
- if (SIRFSOC_RTC_CLK != sirfsoc_rtc_readl(rtcdrv, RTC_CLOCK_SWITCH)) {
- u32 rtc_div;
- /* 0x3 -> RTC_CLK */
- sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
- /*
- * Set SYS_RTC counter in RTC_HZ HZ Units
- * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
- * If 16HZ, therefore RTC_DIV = 1023;
- */
- rtc_div = ((32768 / RTC_HZ) / 2) - 1;
-
- sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
-
- /* reset SYS RTC ALARM0 */
- sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
-
- /* reset SYS RTC ALARM1 */
- sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
- }
- rtcdrv->overflow_rtc = rtcdrv->saved_overflow_rtc;
-
- /*
- * if current counter is small than previous,
- * it means overflow in sleep
- */
- tmp = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
- if (tmp <= rtcdrv->saved_counter)
- rtcdrv->overflow_rtc++;
- /*
- *PWRC Value Be Changed When Suspend, Restore Overflow
- * In Memory To Register
- */
- sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
-
- if (device_may_wakeup(dev) && rtcdrv->irq_wake) {
- disable_irq_wake(rtcdrv->irq);
- rtcdrv->irq_wake = 0;
- }
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(sirfsoc_rtc_pm_ops,
- sirfsoc_rtc_suspend, sirfsoc_rtc_resume);
-
-static struct platform_driver sirfsoc_rtc_driver = {
- .driver = {
- .name = "sirfsoc-rtc",
- .pm = &sirfsoc_rtc_pm_ops,
- .of_match_table = sirfsoc_rtc_of_match,
- },
- .probe = sirfsoc_rtc_probe,
-};
-module_platform_driver(sirfsoc_rtc_driver);
-
-MODULE_DESCRIPTION("SiRF SoC rtc driver");
-MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sirfsoc-rtc");
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index d774aa18f57a..75a8924ba12b 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -209,7 +209,7 @@ static irqreturn_t stm32_rtc_alarm_irq(int irq, void *dev_id)
const struct stm32_rtc_events *evts = &rtc->data->events;
unsigned int status, cr;
- mutex_lock(&rtc->rtc_dev->ops_lock);
+ rtc_lock(rtc->rtc_dev);
status = readl_relaxed(rtc->base + regs->sr);
cr = readl_relaxed(rtc->base + regs->cr);
@@ -226,7 +226,7 @@ static irqreturn_t stm32_rtc_alarm_irq(int irq, void *dev_id)
stm32_rtc_clear_event_flags(rtc, evts->alra);
}
- mutex_unlock(&rtc->rtc_dev->ops_lock);
+ rtc_unlock(rtc->rtc_dev);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 8925015cc698..85f7ad5d5390 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -232,7 +232,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct tegra_rtc_info *info = dev_get_drvdata(dev);
- unsigned long events = 0, flags;
+ unsigned long events = 0;
u32 status;
status = readl(info->base + TEGRA_RTC_REG_INTR_STATUS);
@@ -240,10 +240,10 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
/* clear the interrupt masks and status on any IRQ */
tegra_rtc_wait_while_busy(dev);
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock(&info->lock);
writel(0, info->base + TEGRA_RTC_REG_INTR_MASK);
writel(status, info->base + TEGRA_RTC_REG_INTR_STATUS);
- spin_unlock_irqrestore(&info->lock, flags);
+ spin_unlock(&info->lock);
}
/* check if alarm */
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2d87b62826a8..288abb1abdb8 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -361,13 +361,6 @@ static const struct rtc_class_ops tps65910_rtc_ops = {
.set_offset = tps65910_set_offset,
};
-static const struct rtc_class_ops tps65910_rtc_ops_noirq = {
- .read_time = tps65910_rtc_read_time,
- .set_time = tps65910_rtc_set_time,
- .read_offset = tps65910_read_offset,
- .set_offset = tps65910_set_offset,
-};
-
static int tps65910_rtc_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = NULL;
@@ -426,11 +419,15 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->irq = irq;
if (irq != -1) {
- device_set_wakeup_capable(&pdev->dev, 1);
- tps_rtc->rtc->ops = &tps65910_rtc_ops;
- } else
- tps_rtc->rtc->ops = &tps65910_rtc_ops_noirq;
+ if (device_property_present(tps65910->dev, "wakeup-source"))
+ device_init_wakeup(&pdev->dev, 1);
+ else
+ device_set_wakeup_capable(&pdev->dev, 1);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, tps_rtc->rtc->features);
+ }
+ tps_rtc->rtc->ops = &tps65910_rtc_ops;
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
deleted file mode 100644
index c3309db5448d..000000000000
--- a/drivers/rtc/rtc-tx4939.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * TX4939 internal RTC driver
- * Based on RBTX49xx patch from CELF patch archive.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- */
-#include <linux/rtc.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
-
-#define TX4939_RTCCTL_ALME 0x00000080
-#define TX4939_RTCCTL_ALMD 0x00000040
-#define TX4939_RTCCTL_BUSY 0x00000020
-
-#define TX4939_RTCCTL_COMMAND 0x00000007
-#define TX4939_RTCCTL_COMMAND_NOP 0x00000000
-#define TX4939_RTCCTL_COMMAND_GETTIME 0x00000001
-#define TX4939_RTCCTL_COMMAND_SETTIME 0x00000002
-#define TX4939_RTCCTL_COMMAND_GETALARM 0x00000003
-#define TX4939_RTCCTL_COMMAND_SETALARM 0x00000004
-
-#define TX4939_RTCTBC_PM 0x00000080
-#define TX4939_RTCTBC_COMP 0x0000007f
-
-#define TX4939_RTC_REG_RAMSIZE 0x00000100
-#define TX4939_RTC_REG_RWBSIZE 0x00000006
-
-struct tx4939_rtc_reg {
- __u32 ctl;
- __u32 adr;
- __u32 dat;
- __u32 tbc;
-};
-
-struct tx4939rtc_plat_data {
- struct rtc_device *rtc;
- struct tx4939_rtc_reg __iomem *rtcreg;
- spinlock_t lock;
-};
-
-static int tx4939_rtc_cmd(struct tx4939_rtc_reg __iomem *rtcreg, int cmd)
-{
- int i = 0;
-
- __raw_writel(cmd, &rtcreg->ctl);
- /* This might take 30us (next 32.768KHz clock) */
- while (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_BUSY) {
- /* timeout on approx. 100us (@ GBUS200MHz) */
- if (i++ > 200 * 100)
- return -EBUSY;
- cpu_relax();
- }
- return 0;
-}
-
-static int tx4939_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- unsigned long secs = rtc_tm_to_time64(tm);
- int i, ret;
- unsigned char buf[6];
-
- buf[0] = 0;
- buf[1] = 0;
- buf[2] = secs;
- buf[3] = secs >> 8;
- buf[4] = secs >> 16;
- buf[5] = secs >> 24;
- spin_lock_irq(&pdata->lock);
- __raw_writel(0, &rtcreg->adr);
- for (i = 0; i < 6; i++)
- __raw_writel(buf[i], &rtcreg->dat);
- ret = tx4939_rtc_cmd(rtcreg,
- TX4939_RTCCTL_COMMAND_SETTIME |
- (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- spin_unlock_irq(&pdata->lock);
- return ret;
-}
-
-static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- int i, ret;
- unsigned long sec;
- unsigned char buf[6];
-
- spin_lock_irq(&pdata->lock);
- ret = tx4939_rtc_cmd(rtcreg,
- TX4939_RTCCTL_COMMAND_GETTIME |
- (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- if (ret) {
- spin_unlock_irq(&pdata->lock);
- return ret;
- }
- __raw_writel(2, &rtcreg->adr);
- for (i = 2; i < 6; i++)
- buf[i] = __raw_readl(&rtcreg->dat);
- spin_unlock_irq(&pdata->lock);
- sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
- (buf[3] << 8) | buf[2];
- rtc_time64_to_tm(sec, tm);
- return 0;
-}
-
-static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- int i, ret;
- unsigned long sec;
- unsigned char buf[6];
-
- sec = rtc_tm_to_time64(&alrm->time);
- buf[0] = 0;
- buf[1] = 0;
- buf[2] = sec;
- buf[3] = sec >> 8;
- buf[4] = sec >> 16;
- buf[5] = sec >> 24;
- spin_lock_irq(&pdata->lock);
- __raw_writel(0, &rtcreg->adr);
- for (i = 0; i < 6; i++)
- __raw_writel(buf[i], &rtcreg->dat);
- ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM |
- (alrm->enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->lock);
- return ret;
-}
-
-static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- int i, ret;
- unsigned long sec;
- unsigned char buf[6];
- u32 ctl;
-
- spin_lock_irq(&pdata->lock);
- ret = tx4939_rtc_cmd(rtcreg,
- TX4939_RTCCTL_COMMAND_GETALARM |
- (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- if (ret) {
- spin_unlock_irq(&pdata->lock);
- return ret;
- }
- __raw_writel(2, &rtcreg->adr);
- for (i = 2; i < 6; i++)
- buf[i] = __raw_readl(&rtcreg->dat);
- ctl = __raw_readl(&rtcreg->ctl);
- alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
- alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
- spin_unlock_irq(&pdata->lock);
- sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
- (buf[3] << 8) | buf[2];
- rtc_time64_to_tm(sec, &alrm->time);
- return rtc_valid_tm(&alrm->time);
-}
-
-static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- spin_lock_irq(&pdata->lock);
- tx4939_rtc_cmd(pdata->rtcreg,
- TX4939_RTCCTL_COMMAND_NOP |
- (enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->lock);
- return 0;
-}
-
-static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
-{
- struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev_id);
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- unsigned long events = RTC_IRQF;
-
- spin_lock(&pdata->lock);
- if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) {
- events |= RTC_AF;
- tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- }
- spin_unlock(&pdata->lock);
- rtc_update_irq(pdata->rtc, 1, events);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops tx4939_rtc_ops = {
- .read_time = tx4939_rtc_read_time,
- .read_alarm = tx4939_rtc_read_alarm,
- .set_alarm = tx4939_rtc_set_alarm,
- .set_time = tx4939_rtc_set_time,
- .alarm_irq_enable = tx4939_rtc_alarm_irq_enable,
-};
-
-static int tx4939_nvram_read(void *priv, unsigned int pos, void *val,
- size_t bytes)
-{
- struct tx4939rtc_plat_data *pdata = priv;
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- u8 *buf = val;
-
- spin_lock_irq(&pdata->lock);
- for (; bytes; bytes--) {
- __raw_writel(pos++, &rtcreg->adr);
- *buf++ = __raw_readl(&rtcreg->dat);
- }
- spin_unlock_irq(&pdata->lock);
- return 0;
-}
-
-static int tx4939_nvram_write(void *priv, unsigned int pos, void *val,
- size_t bytes)
-{
- struct tx4939rtc_plat_data *pdata = priv;
- struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
- u8 *buf = val;
-
- spin_lock_irq(&pdata->lock);
- for (; bytes; bytes--) {
- __raw_writel(pos++, &rtcreg->adr);
- __raw_writel(*buf++, &rtcreg->dat);
- }
- spin_unlock_irq(&pdata->lock);
- return 0;
-}
-
-static int __init tx4939_rtc_probe(struct platform_device *pdev)
-{
- struct rtc_device *rtc;
- struct tx4939rtc_plat_data *pdata;
- int irq, ret;
- struct nvmem_config nvmem_cfg = {
- .name = "tx4939_nvram",
- .size = TX4939_RTC_REG_RAMSIZE,
- .reg_read = tx4939_nvram_read,
- .reg_write = tx4939_nvram_write,
- };
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
- platform_set_drvdata(pdev, pdata);
-
- pdata->rtcreg = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pdata->rtcreg))
- return PTR_ERR(pdata->rtcreg);
-
- spin_lock_init(&pdata->lock);
- tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
- 0, pdev->name, &pdev->dev) < 0)
- return -EBUSY;
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- rtc->ops = &tx4939_rtc_ops;
- rtc->range_max = U32_MAX;
-
- pdata->rtc = rtc;
-
- nvmem_cfg.priv = pdata;
- ret = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
- if (ret)
- return ret;
-
- return devm_rtc_register_device(rtc);
-}
-
-static int __exit tx4939_rtc_remove(struct platform_device *pdev)
-{
- struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- spin_lock_irq(&pdata->lock);
- tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- spin_unlock_irq(&pdata->lock);
- return 0;
-}
-
-static struct platform_driver tx4939_rtc_driver = {
- .remove = __exit_p(tx4939_rtc_remove),
- .driver = {
- .name = "tx4939rtc",
- },
-};
-
-module_platform_driver_probe(tx4939_rtc_driver, tx4939_rtc_probe);
-
-MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
-MODULE_DESCRIPTION("TX4939 internal RTC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tx4939rtc");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c7eb9a10c680..28c04a4efa66 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -428,23 +428,15 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device)
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
- struct gendisk *disk;
- struct disk_part_iter piter;
- struct block_device *part;
-
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
if ((device->features & DASD_FEATURE_USERAW)) {
- disk = device->block->gdp;
- kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
return 0;
}
- disk = device->block->bdev->bd_disk;
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
- while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
- disk_part_iter_exit(&piter);
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
}
return 0;
}
@@ -455,9 +447,6 @@ dasd_state_ready_to_online(struct dasd_device * device)
static int dasd_state_online_to_ready(struct dasd_device *device)
{
int rc;
- struct gendisk *disk;
- struct disk_part_iter piter;
- struct block_device *part;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device);
@@ -466,13 +455,8 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
}
device->state = DASD_STATE_READY;
- if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
- disk = device->block->bdev->bd_disk;
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
- while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
- disk_part_iter_exit(&piter);
- }
+ if (device->block && !(device->features & DASD_FEATURE_USERAW))
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 16bb135c20aa..03d27ee9cac6 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1874,18 +1874,26 @@ void dasd_path_create_kobjects(struct dasd_device *device)
}
EXPORT_SYMBOL(dasd_path_create_kobjects);
-/*
- * As we keep kobjects for the lifetime of a device, this function must not be
- * called anywhere but in the context of offlining a device.
- */
-void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
{
if (device->path[chp].in_sysfs) {
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = false;
}
}
-EXPORT_SYMBOL(dasd_path_remove_kobj);
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
int dasd_add_sysfs_files(struct ccw_device *cdev)
{
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 3caa1ee5f4b0..65eb87cbbb9b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1036,7 +1036,6 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
- dasd_path_remove_kobj(device, i);
}
}
@@ -2173,6 +2172,7 @@ out_err2:
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
@@ -2191,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->vdsneq = NULL;
private->gneq = NULL;
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3bc008f9136c..b8a04c42d1d2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -858,7 +858,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
void dasd_path_create_kobj(struct dasd_device *, int);
void dasd_path_create_kobjects(struct dasd_device *);
-void dasd_path_remove_kobj(struct dasd_device *, int);
+void dasd_path_remove_kobjects(struct dasd_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 299e77ec2c41..da33cb4cba28 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -879,17 +879,13 @@ dcssblk_submit_bio(struct bio *bio)
blk_queue_split(&bio);
bytes_done = 0;
- dev_info = bio->bi_disk->private_data;
+ dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
- /* Request beyond end of DCSS segment. */
- goto fail;
- }
/* verify data transfer direction */
if (dev_info->is_shared) {
switch (dev_info->segment_type) {
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c2536f7767b3..d1ed39162943 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,7 +184,7 @@ static unsigned long xpram_highest_page_index(void)
*/
static blk_qc_t xpram_submit_bio(struct bio *bio)
{
- xpram_device_t *xdev = bio->bi_disk->private_data;
+ xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 1354c42d95aa..671efee612af 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -914,7 +914,6 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(&raw->port, tty);
- raw->port.low_latency = 0; /* don't use bottom half for pushing chars */
/*
* Start up 3215 device
*/
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index ec9f8ad5341c..b7329af076a0 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -66,13 +66,13 @@ int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
unsigned long flags;
int rc;
- raw_local_irq_save(flags);
+ flags = arch_local_irq_save();
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
sclp_early_wait_irq();
out:
- raw_local_irq_restore(flags);
+ arch_local_irq_restore(flags);
return rc;
}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 5aff8b684eb2..013bcc331305 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -65,7 +65,6 @@ sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
- sclp_port.low_latency = 0;
return 0;
}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 3f9a6ef650fa..047f812d1a1c 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -560,7 +560,6 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
- sclp_vt220_port.low_latency = 0;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index ecf8c5006a0e..0d484fe43d7e 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -761,7 +761,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
* This function is called, when error recovery was successful
*/
static inline int
-tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
+tape_3590_erp_succeeded(struct tape_device *device, struct tape_request *request)
{
DBF_EVENT(3, "Error Recovery successful for %s\n",
tape_op_verbose[request->op]);
@@ -831,7 +831,7 @@ tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
case SENSE_BRA_PER:
return tape_3590_erp_failed(device, request, irb, rc);
case SENSE_BRA_CONT:
- return tape_3590_erp_succeded(device, request);
+ return tape_3590_erp_succeeded(device, request);
case SENSE_BRA_RE:
return tape_3590_erp_retry(device, request, irb);
case SENSE_BRA_DRE:
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index aec996de44d9..15692449a1c3 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -967,7 +967,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
- tp->port.low_latency = 0;
tp->inattr = TF_INPUT;
goto port_install;
}
@@ -996,7 +995,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
- tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 1bbf27b98cf6..68f49e2e964c 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -681,7 +681,7 @@ static int ur_open(struct inode *inode, struct file *file)
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
*/
- devno = MINOR(file_inode(file)->i_rdev);
+ devno = iminor(file_inode(file));
urd = urdev_get_from_devno(devno);
if (!urd) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 94c6470de635..253ab4e7a415 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -225,18 +225,23 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
+ sch->dev.dma_mask = &sch->dma_mask;
device_initialize(&sch->dev);
/*
- * The physical addresses of some the dma structures that can
+ * The physical addresses for some of the dma structures that can
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
- sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
+ if (ret)
+ goto err;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
- sch->dma_mask = DMA_BIT_MASK(64);
- sch->dev.dma_mask = &sch->dma_mask;
+ ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
+ if (ret)
+ goto err;
+
return sch;
err:
@@ -970,8 +975,11 @@ static int __init setup_css(int nr)
* css->device as the device argument with the DMA API)
* and are fine with 64 bit addresses.
*/
- css->device.coherent_dma_mask = DMA_BIT_MASK(64);
- css->device.dma_mask = &css->device.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
+ if (ret) {
+ kfree(css);
+ goto out_err;
+ }
mutex_init(&css->mutex);
ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 4b0a7cbb2096..3f026021e95e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -621,14 +621,6 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
NULL,
};
-static int ccw_device_add(struct ccw_device *cdev)
-{
- struct device *dev = &cdev->dev;
-
- dev->bus = &ccw_bus_type;
- return device_add(dev);
-}
-
static int match_dev_id(struct device *dev, const void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
@@ -687,33 +679,47 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
struct gen_pool *dma_pool;
+ int ret;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+ if (!cdev) {
+ ret = -ENOMEM;
goto err_cdev;
+ }
cdev->private = kzalloc(sizeof(struct ccw_device_private),
GFP_KERNEL | GFP_DMA);
- if (!cdev->private)
+ if (!cdev->private) {
+ ret = -ENOMEM;
goto err_priv;
- cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ }
+
cdev->dev.dma_mask = sch->dev.dma_mask;
+ ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
+ if (ret)
+ goto err_coherent_mask;
+
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
- if (!dma_pool)
+ if (!dma_pool) {
+ ret = -ENOMEM;
goto err_dma_pool;
+ }
cdev->private->dma_pool = dma_pool;
cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
sizeof(*cdev->private->dma_area));
- if (!cdev->private->dma_area)
+ if (!cdev->private->dma_area) {
+ ret = -ENOMEM;
goto err_dma_area;
+ }
return cdev;
err_dma_area:
cio_gp_dma_destroy(dma_pool, &cdev->dev);
err_dma_pool:
+err_coherent_mask:
kfree(cdev->private);
err_priv:
kfree(cdev);
err_cdev:
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
static void ccw_device_todo(struct work_struct *work);
@@ -739,6 +745,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
cdev->ccwlock = sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
+ cdev->dev.bus = &ccw_bus_type;
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
@@ -840,7 +847,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
/* make it known to the system */
- ret = ccw_device_add(cdev);
+ ret = device_add(&cdev->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
@@ -1052,7 +1059,7 @@ static int io_subchannel_probe(struct subchannel *sch)
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
cdev = sch_get_cdev(sch);
- rc = ccw_device_add(cdev);
+ rc = device_add(&cdev->dev);
if (rc) {
/* Release online reference. */
put_device(&cdev->dev);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index cd2df4ff8e0e..34bf2f197c71 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -139,9 +139,6 @@ struct qdio_dev_perf_stat {
unsigned int qdio_int;
unsigned int pci_request_int;
- unsigned int tasklet_inbound;
- unsigned int tasklet_inbound_resched;
- unsigned int tasklet_inbound_resched2;
unsigned int tasklet_outbound;
unsigned int siga_read;
@@ -149,7 +146,6 @@ struct qdio_dev_perf_stat {
unsigned int siga_sync;
unsigned int inbound_call;
- unsigned int inbound_handler;
unsigned int stop_polling;
unsigned int inbound_queue_full;
unsigned int outbound_call;
@@ -193,6 +189,8 @@ struct qdio_output_q {
struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */
struct timer_list timer;
+ /* tasklet to check for completions */
+ struct tasklet_struct tasklet;
};
/*
@@ -216,13 +214,9 @@ struct qdio_q {
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
- /* error condition during a data transfer */
- unsigned int qdio_error;
-
/* last scan of the queue */
u64 timestamp;
- struct tasklet_struct tasklet;
struct qdio_queue_perf_stat q_stats;
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
@@ -254,6 +248,7 @@ struct qdio_irq {
struct ccw_device *cdev;
struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev;
+ u64 last_data_irq_time;
unsigned long int_parm;
struct subchannel_id schid;
@@ -324,6 +319,14 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
+static inline void qdio_deliver_irq(struct qdio_irq *irq)
+{
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
+ irq->irq_poll(irq->cdev, irq->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq, int_discarded);
+}
+
#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
@@ -357,16 +360,12 @@ extern u64 last_ai_time;
/* prototypes for thin interrupt */
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
-void tiqdio_add_device(struct qdio_irq *irq_ptr);
-void tiqdio_remove_device(struct qdio_irq *irq_ptr);
-void tiqdio_inbound_processing(unsigned long q);
int qdio_thinint_init(void);
void qdio_thinint_exit(void);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
-void qdio_inbound_processing(unsigned long data);
-void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_tasklet(struct tasklet_struct *t);
void qdio_outbound_timer(struct timer_list *t);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 863d17c802ca..00384f58f218 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -105,8 +105,9 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
- q->timestamp, last_ai_time);
+ seq_printf(m, "Timestamp: %llx\n", q->timestamp);
+ seq_printf(m, "Last Data IRQ: %llx Last AI: %llx\n",
+ q->irq_ptr->last_data_irq_time, last_ai_time);
seq_printf(m, "nr_used: %d ftc: %d\n",
atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
@@ -197,15 +198,11 @@ static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
"Requested PCIs",
- "Inbound tasklet runs",
- "Inbound tasklet resched",
- "Inbound tasklet resched2",
"Outbound tasklet runs",
"SIGA read",
"SIGA write",
"SIGA sync",
"Inbound calls",
- "Inbound handler",
"Inbound stop_polling",
"Inbound queue full",
"Outbound calls",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f9a31c7819ae..03a011619908 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -202,7 +202,7 @@ again:
*/
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
- int auto_ack, int merge_pending)
+ int auto_ack)
{
unsigned char __state = 0;
int i = 1;
@@ -217,18 +217,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (__state & SLSB_OWNER_CU)
goto out;
- if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
- __state = SLSB_P_OUTPUT_EMPTY;
-
for (; i < count; i++) {
bufnr = next_buf(bufnr);
- /* merge PENDING into EMPTY: */
- if (merge_pending &&
- q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
- __state == SLSB_P_OUTPUT_EMPTY)
- continue;
-
/* stop if next state differs from initial state: */
if (q->slsb.val[bufnr] != __state)
break;
@@ -242,7 +233,7 @@ out:
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack)
{
- return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
+ return get_buf_states(q, bufnr, state, 1, auto_ack);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -420,8 +411,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
static void process_buffer_error(struct qdio_q *q, unsigned int start,
int count)
{
- q->qdio_error = QDIO_ERROR_SLSB_STATE;
-
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
q->sbal[start]->element[15].sflags == 0x10) {
@@ -450,7 +439,8 @@ static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
q->u.in.batch_count += count;
}
-static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
+ unsigned int *error)
{
unsigned char state = 0;
int count;
@@ -465,7 +455,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, start, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1);
if (!count)
return 0;
@@ -484,6 +474,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
count);
+ *error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
inbound_handle_work(q, start, count, false);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
@@ -508,11 +499,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
}
}
-static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
-{
- return get_inbound_buffer_frontier(q, start);
-}
-
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -546,96 +532,23 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
WARN_ON_ONCE(phys_aob & 0xFF);
}
- q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
- unsigned int count)
-{
- if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
- return;
-
- if (q->is_input_q) {
- qperf_inc(q, inbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
- } else {
- qperf_inc(q, outbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
- start, count);
- }
-
- q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
- q->irq_ptr->int_parm);
-
- /* for the next time */
- q->qdio_error = 0;
-}
-
static inline int qdio_tasklet_schedule(struct qdio_q *q)
{
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
- tasklet_schedule(&q->tasklet);
+ tasklet_schedule(&q->u.out.tasklet);
return 0;
}
return -EPERM;
}
-static void __qdio_inbound_processing(struct qdio_q *q)
-{
- unsigned int start = q->first_to_check;
- int count;
-
- qperf_inc(q, tasklet_inbound);
-
- count = qdio_inbound_q_moved(q, start);
- if (count == 0)
- return;
-
- qdio_kick_handler(q, start, count);
- start = add_buf(start, count);
- q->first_to_check = start;
-
- if (!qdio_inbound_q_done(q, start)) {
- /* means poll time is not yet over */
- qperf_inc(q, tasklet_inbound_resched);
- if (!qdio_tasklet_schedule(q))
- return;
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!qdio_inbound_q_done(q, start)) {
- qperf_inc(q, tasklet_inbound_resched2);
- qdio_tasklet_schedule(q);
- }
-}
-
-void qdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
- __qdio_inbound_processing(q);
-}
-
-static void qdio_check_pending(struct qdio_q *q, unsigned int index)
-{
- unsigned char state;
-
- if (get_buf_state(q, index, &state, 0) > 0 &&
- state == SLSB_P_OUTPUT_PENDING &&
- q->u.out.aobs[index]) {
- q->u.out.sbal_state[index].flags |=
- QDIO_OUTBUF_STATE_FLAG_PENDING;
- q->u.out.aobs[index] = NULL;
- }
-}
-
-static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
+ unsigned int *error)
{
unsigned char state = 0;
+ unsigned int i;
int count;
q->timestamp = get_tod_clock_fast();
@@ -651,13 +564,19 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
if (!count)
return 0;
- count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0);
if (!count)
return 0;
switch (state) {
- case SLSB_P_OUTPUT_EMPTY:
case SLSB_P_OUTPUT_PENDING:
+ /* detach the utilized QAOBs: */
+ for (i = 0; i < count; i++)
+ q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
+
+ *error = QDIO_ERROR_SLSB_PENDING;
+ fallthrough;
+ case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
@@ -667,6 +586,10 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
account_sbals(q, count);
return count;
case SLSB_P_OUTPUT_ERROR:
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
+ q->nr, count);
+
+ *error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
@@ -697,26 +620,6 @@ static inline int qdio_outbound_q_done(struct qdio_q *q)
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
-{
- int count;
-
- count = get_outbound_buffer_frontier(q, start);
-
- if (count) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
-
- if (q->u.out.use_cq) {
- unsigned int i;
-
- for (i = 0; i < count; i++)
- qdio_check_pending(q, QDIO_BUFNR(start + i));
- }
- }
-
- return count;
-}
-
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
unsigned long aob)
{
@@ -760,18 +663,29 @@ retry:
return cc;
}
-static void __qdio_outbound_processing(struct qdio_q *q)
+void qdio_outbound_tasklet(struct tasklet_struct *t)
{
+ struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
+ struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
unsigned int start = q->first_to_check;
+ unsigned int error = 0;
int count;
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- count = qdio_outbound_q_moved(q, start);
+ count = get_outbound_buffer_frontier(q, start, &error);
if (count) {
q->first_to_check = add_buf(start, count);
- qdio_kick_handler(q, start, count);
+
+ if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
+
+ q->handler(q->irq_ptr->cdev, error, q->nr, start,
+ count, q->irq_ptr->int_parm);
+ }
}
if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
@@ -798,13 +712,6 @@ sched:
qdio_tasklet_schedule(q);
}
-/* outbound tasklet */
-void qdio_outbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
- __qdio_outbound_processing(q);
-}
-
void qdio_outbound_timer(struct timer_list *t)
{
struct qdio_q *q = from_timer(q, t, u.out.timer);
@@ -825,19 +732,6 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
qdio_tasklet_schedule(out);
}
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
-
- if (need_siga_sync(q) && need_siga_sync_after_ai(q))
- qdio_sync_queues(q);
-
- /* The interrupt could be caused by a PCI request: */
- qdio_check_outbound_pci_queues(q->irq_ptr);
-
- __qdio_inbound_processing(q);
-}
-
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -865,15 +759,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- if (irq_ptr->irq_poll) {
- if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
- irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
- else
- QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
- } else {
- for_each_input_queue(irq_ptr, q, i)
- tasklet_schedule(&q->tasklet);
- }
+ qdio_deliver_irq(irq_ptr);
+ irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
return;
@@ -1016,12 +903,9 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
int i;
- for_each_input_queue(irq_ptr, q, i)
- tasklet_kill(&q->tasklet);
-
for_each_output_queue(irq_ptr, q, i) {
del_timer_sync(&q->u.out.timer);
- tasklet_kill(&q->tasklet);
+ tasklet_kill(&q->u.out.tasklet);
}
}
@@ -1059,7 +943,6 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- tiqdio_remove_device(irq_ptr);
qdio_shutdown_queues(irq_ptr);
qdio_shutdown_debug_entries(irq_ptr);
@@ -1177,7 +1060,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
if (rc)
goto err_queues;
- INIT_LIST_HEAD(&irq_ptr->entry);
cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
@@ -1263,6 +1145,9 @@ int qdio_establish(struct ccw_device *cdev,
!init_data->output_sbal_addr_array)
return -EINVAL;
+ if (!init_data->irq_poll)
+ return -EINVAL;
+
mutex_lock(&irq_ptr->setup_mutex);
qdio_trace_init_data(irq_ptr, init_data);
qdio_setup_irq(irq_ptr, init_data);
@@ -1357,9 +1242,6 @@ int qdio_activate(struct ccw_device *cdev)
goto out;
}
- if (is_thinint_irq(irq_ptr))
- tiqdio_add_device(irq_ptr);
-
/* wait for subchannel to become active */
msleep(5);
@@ -1557,17 +1439,16 @@ static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
unsigned int start = q->first_to_check;
int count;
- count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
- qdio_outbound_q_moved(q, start);
+ *error = 0;
+ count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
+ get_outbound_buffer_frontier(q, start, error);
if (count == 0)
return 0;
*bufnr = start;
- *error = q->qdio_error;
/* for the next time */
q->first_to_check = add_buf(start, count);
- q->qdio_error = 0;
return count;
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a5b2e16b7aa8..c8b9620bc688 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -259,14 +259,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_storage_lists(q, irq_ptr,
qdio_init->input_sbal_addr_array[i], i);
-
- if (is_thinint_irq(irq_ptr)) {
- tasklet_init(&q->tasklet, tiqdio_inbound_processing,
- (unsigned long) q);
- } else {
- tasklet_init(&q->tasklet, qdio_inbound_processing,
- (unsigned long) q);
- }
}
for_each_output_queue(irq_ptr, q, i) {
@@ -280,8 +272,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_storage_lists(q, irq_ptr,
qdio_init->output_sbal_addr_array[i], i);
- tasklet_init(&q->tasklet, qdio_outbound_processing,
- (unsigned long) q);
+ tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
}
}
@@ -483,12 +474,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
ccw_device_get_schid(cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
- if (init_data->irq_poll) {
- irq_ptr->irq_poll = init_data->irq_poll;
- set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
- } else {
- irq_ptr->irq_poll = NULL;
- }
+ irq_ptr->irq_poll = init_data->irq_poll;
+ set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
setup_qib(irq_ptr, init_data);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 7a440e4328cd..8e09bf3a2fcd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -66,22 +66,6 @@ static void put_indicator(u32 *addr)
atomic_dec(&ind->count);
}
-void tiqdio_add_device(struct qdio_irq *irq_ptr)
-{
- mutex_lock(&tiq_list_lock);
- list_add_rcu(&irq_ptr->entry, &tiq_list);
- mutex_unlock(&tiq_list_lock);
-}
-
-void tiqdio_remove_device(struct qdio_irq *irq_ptr)
-{
- mutex_lock(&tiq_list_lock);
- list_del_rcu(&irq_ptr->entry);
- mutex_unlock(&tiq_list_lock);
- synchronize_rcu();
- INIT_LIST_HEAD(&irq_ptr->entry);
-}
-
static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
@@ -106,32 +90,6 @@ static inline u32 clear_shared_ind(void)
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
-static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
-{
- struct qdio_q *q;
- int i;
-
- if (!references_shared_dsci(irq))
- xchg(irq->dsci, 0);
-
- if (irq->irq_poll) {
- if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
- irq->irq_poll(irq->cdev, irq->int_parm);
- else
- QDIO_PERF_STAT_INC(irq, int_discarded);
-
- return;
- }
-
- for_each_input_queue(irq, q, i) {
- /*
- * Call inbound processing but not directly
- * since that could starve other thinint queues.
- */
- tasklet_schedule(&q->tasklet);
- }
-}
-
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
@@ -139,10 +97,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
*/
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
+ u64 irq_time = S390_lowcore.int_clock;
u32 si_used = clear_shared_ind();
struct qdio_irq *irq;
- last_ai_time = S390_lowcore.int_clock;
+ last_ai_time = irq_time;
inc_irq_stat(IRQIO_QAI);
/* protect tiq_list entries, only changed in activate or shutdown */
@@ -153,10 +112,15 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
if (unlikely(references_shared_dsci(irq))) {
if (!si_used)
continue;
- } else if (!*irq->dsci)
- continue;
+ } else {
+ if (!*irq->dsci)
+ continue;
- tiqdio_call_inq_handlers(irq);
+ xchg(irq->dsci, 0);
+ }
+
+ qdio_deliver_irq(irq);
+ irq->last_data_irq_time = irq_time;
QDIO_PERF_STAT_INC(irq, adapter_int);
}
@@ -208,10 +172,15 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
rc = set_subchannel_ind(irq_ptr, 0);
- if (rc)
+ if (rc) {
put_indicator(irq_ptr->dsci);
+ return rc;
+ }
- return rc;
+ mutex_lock(&tiq_list_lock);
+ list_add_rcu(&irq_ptr->entry, &tiq_list);
+ mutex_unlock(&tiq_list_lock);
+ return 0;
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
@@ -219,6 +188,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
if (!is_thinint_irq(irq_ptr))
return;
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&irq_ptr->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
+
/* reset adapter interrupt indicators */
set_subchannel_ind(irq_ptr, 1);
put_indicator(irq_ptr->dsci);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index be2520cc010b..7dc72cb718b0 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
struct vfio_ap_queue *q;
- int apid, apqi;
mutex_lock(&matrix_dev->lock);
q = dev_get_drvdata(&apdev->device);
+ vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL);
- apid = AP_QID_CARD(q->apqn);
- apqi = AP_QID_QUEUE(q->apqn);
- vfio_ap_mdev_reset_queue(apid, apqi, 1);
- vfio_ap_irq_disable(q);
kfree(q);
mutex_unlock(&matrix_dev->lock);
}
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index e0bde8518745..41fc2e4135fe 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -25,6 +25,7 @@
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static int match_apqn(struct device *dev, const void *data)
{
@@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
int apqn)
{
struct vfio_ap_queue *q;
- struct device *dev;
if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
return NULL;
if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
return NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (!dev)
- return NULL;
- q = dev_get_drvdata(dev);
- q->matrix_mdev = matrix_mdev;
- put_device(dev);
+ q = vfio_ap_find_queue(apqn);
+ if (q)
+ q->matrix_mdev = matrix_mdev;
return q;
}
@@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
- if (q->saved_pfn && q->matrix_mdev)
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
&q->saved_pfn, 1);
- q->saved_pfn = 0;
- q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->saved_pfn = 0;
+ }
}
/**
@@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
*/
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status;
@@ -1037,19 +1038,14 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
{
struct ap_matrix_mdev *m;
- mutex_lock(&matrix_dev->lock);
-
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if ((m != matrix_mdev) && (m->kvm == kvm)) {
- mutex_unlock(&matrix_dev->lock);
+ if ((m != matrix_mdev) && (m->kvm == kvm))
return -EPERM;
- }
}
matrix_mdev->kvm = kvm;
kvm_get_kvm(kvm);
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- mutex_unlock(&matrix_dev->lock);
return 0;
}
@@ -1083,79 +1079,118 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret;
+ int ret, notify_rc = NOTIFY_OK;
struct ap_matrix_mdev *matrix_mdev;
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ mutex_lock(&matrix_dev->lock);
if (!data) {
- matrix_mdev->kvm = NULL;
- return NOTIFY_OK;
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+ goto notify_done;
}
ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret)
- return NOTIFY_DONE;
+ if (ret) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
/* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd)
- return NOTIFY_DONE;
+ if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
- return NOTIFY_OK;
+notify_done:
+ mutex_unlock(&matrix_dev->lock);
+ return notify_rc;
}
-static void vfio_ap_irq_disable_apqn(int apqn)
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct device *dev;
- struct vfio_ap_queue *q;
+ struct vfio_ap_queue *q = NULL;
dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
&apqn, match_apqn);
if (dev) {
q = dev_get_drvdata(dev);
- vfio_ap_irq_disable(q);
put_device(dev);
}
+
+ return q;
}
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
unsigned int retry)
{
struct ap_queue_status status;
+ int ret;
int retry2 = 2;
- int apqn = AP_MKQID(apid, apqi);
- do {
- status = ap_zapq(apqn);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- while (!status.queue_empty && retry2--) {
- msleep(20);
- status = ap_tapq(apqn, NULL);
- }
- WARN_ON_ONCE(retry2 <= 0);
- return 0;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
+ if (!q)
+ return 0;
+
+retry_zapq:
+ status = ap_zapq(q->apqn);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ ret = 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (retry--) {
msleep(20);
- break;
- default:
- /* things are really broken, give up */
- return -EIO;
+ goto retry_zapq;
}
- } while (retry--);
+ ret = -EBUSY;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ WARN_ON_ONCE(status.irq_enabled);
+ ret = -EBUSY;
+ goto free_resources;
+ default:
+ /* things are really broken, give up */
+ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ status.response_code);
+ return -EIO;
+ }
+
+ /* wait for the reset to take effect */
+ while (retry2--) {
+ if (status.queue_empty && !status.irq_enabled)
+ break;
+ msleep(20);
+ status = ap_tapq(q->apqn, NULL);
+ }
+ WARN_ON_ONCE(retry2 <= 0);
- return -EBUSY;
+free_resources:
+ vfio_ap_free_aqic_resources(q);
+
+ return ret;
}
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
@@ -1163,13 +1198,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
int ret;
int rc = 0;
unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
matrix_mdev->matrix.apm_max + 1) {
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
matrix_mdev->matrix.aqm_max + 1) {
- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+ ret = vfio_ap_mdev_reset_queue(q, 1);
/*
* Regardless whether a queue turns out to be busy, or
* is not operational, we need to continue resetting
@@ -1177,7 +1214,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -1222,13 +1258,8 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
- if (matrix_mdev->kvm) {
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- vfio_ap_mdev_reset_queues(mdev);
- kvm_put_kvm(matrix_mdev->kvm);
- matrix_mdev->kvm = NULL;
- }
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
mutex_unlock(&matrix_dev->lock);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index f46dde56b464..28e9d9989768 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -88,11 +88,6 @@ struct ap_matrix_mdev {
struct mdev_device *mdev;
};
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry);
-
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
unsigned long saved_pfn;
@@ -100,5 +95,10 @@ struct vfio_ap_queue {
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
};
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry);
+
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 10206e4498d0..52eaf51c9bb6 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1438,6 +1438,8 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
@@ -1481,6 +1483,8 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
return rc;
@@ -1524,6 +1528,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
rc, xcRB.status);
@@ -1568,6 +1574,8 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
@@ -1744,6 +1752,8 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc)
return rc;
return put_user(mex64.outputdatalength,
@@ -1795,6 +1805,8 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
if (rc)
return rc;
return put_user(crt64.outputdatalength,
@@ -1865,6 +1877,8 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
xcRB32.reply_data_length = xcRB64.reply_data_length;
xcRB32.status = xcRB64.status;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index b1046811450f..d68c0ed5e0dd 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -662,7 +662,10 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
__func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
- rc = -EIO;
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EAGAIN;
+ else
+ rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
@@ -1275,7 +1278,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
__func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
- rc = -EIO;
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EAGAIN;
+ else
+ rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
@@ -1441,7 +1447,10 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
__func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
- rc = -EIO;
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EAGAIN;
+ else
+ rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..a1da83b0b0ef 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -956,24 +956,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
return PFN_UP(end) - PFN_DOWN(start);
}
-static inline int qeth_get_ip_version(struct sk_buff *skb)
-{
- struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
- __be16 prot = veth->h_vlan_proto;
-
- if (prot == htons(ETH_P_8021Q))
- prot = veth->h_vlan_encapsulated_proto;
-
- switch (prot) {
- case htons(ETH_P_IPV6):
- return 6;
- case htons(ETH_P_IP):
- return 4;
- default:
- return 0;
- }
-}
-
static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
{
u8 *addr = eth_hdr(skb)->h_dest;
@@ -984,14 +966,20 @@ static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
return RTN_UNICAST;
}
-static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
+static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
+ __be16 proto)
{
struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt;
rt = (struct rt6_info *) dst;
- if (dst)
- dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ if (dst) {
+ if (proto == htons(ETH_P_IPV6))
+ dst = dst_check(dst, rt6_get_cookie(rt));
+ else
+ dst = dst_check(dst, 0);
+ }
+
return dst;
}
@@ -1014,11 +1002,11 @@ static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
return &ipv6_hdr(skb)->daddr;
}
-static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
+static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, __be16 proto)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
- if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
- (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
+ if ((proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (proto == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
*flags |= QETH_HDR_EXT_UDP;
}
@@ -1067,8 +1055,8 @@ extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
-int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
-void qeth_core_free_discipline(struct qeth_card *);
+int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
+void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
extern struct kmem_cache *qeth_core_header_cache;
@@ -1079,7 +1067,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
@@ -1144,10 +1133,10 @@ int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv,
+ struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len));
+ __be16 proto, unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..b71b8902d1c4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -825,7 +825,8 @@ static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
return false;
rcu_read_lock();
- next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+ next_hop = qeth_next_hop_v4_rcu(skb,
+ qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
key = ipv4_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
@@ -851,7 +852,8 @@ static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
return false;
rcu_read_lock();
- next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+ next_hop = qeth_next_hop_v6_rcu(skb,
+ qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
key = ipv6_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
@@ -1407,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct sk_buff *skb;
skb_queue_walk(&buf->skb_list, skb) {
+ struct sock *sk = skb->sk;
+
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
- if (skb->sk && skb->sk->sk_family == PF_IUCV)
- iucv_sk(skb->sk)->sk_txnotify(skb, notification);
+ if (sk && sk->sk_family == PF_IUCV)
+ iucv_sk(sk)->sk_txnotify(sk, notification);
}
}
@@ -3690,24 +3694,27 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
- /* Fake the TX completion interrupt: */
- if (IS_IQD(card)) {
- unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
- unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+ switch (rc) {
+ case 0:
+ case -ENOBUFS:
+ /* ignore temporary SIGA errors without busy condition */
- if (frames && queue->coalesced_frames >= frames) {
- napi_schedule(&queue->napi);
- queue->coalesced_frames = 0;
- QETH_TXQ_STAT_INC(queue, coal_frames);
- } else if (usecs) {
- qeth_tx_arm_timer(queue, usecs);
+ /* Fake the TX completion interrupt: */
+ if (IS_IQD(card)) {
+ unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
+ unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
+ }
}
- }
- if (rc) {
- /* ignore temporary SIGA errors without busy condition */
- if (rc == -ENOBUFS)
- return;
+ break;
+ default:
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
@@ -3717,7 +3724,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
- return;
}
}
@@ -3896,11 +3902,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
- switch (qeth_get_ip_version(skb)) {
- case 4:
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
- case 6:
+ case htons(ETH_P_IPV6):
tos = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
@@ -4365,10 +4371,10 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv,
+ struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len))
+ __be16 proto, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
@@ -4401,7 +4407,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(queue, hdr, skb, ipv, frame_len);
+ fill_header(queue, hdr, skb, proto, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -5507,12 +5513,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5535,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5543,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5557,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5585,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5606,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5626,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6067,14 +6074,15 @@ int qeth_poll(struct napi_struct *napi, int budget)
EXPORT_SYMBOL_GPL(qeth_poll);
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
- unsigned int bidx, bool error, int budget)
+ unsigned int bidx, unsigned int qdio_error,
+ int budget)
{
struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
u8 sflags = buffer->buffer->element[15].sflags;
struct qeth_card *card = queue->card;
+ bool error = !!qdio_error;
- if (queue->bufstates && (queue->bufstates[bidx].flags &
- QDIO_OUTBUF_STATE_FLAG_PENDING)) {
+ if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
@@ -6348,9 +6356,11 @@ static int qeth_register_dbf_views(void)
static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
-int qeth_core_load_discipline(struct qeth_card *card,
- enum qeth_discipline_id discipline)
+int qeth_setup_discipline(struct qeth_card *card,
+ enum qeth_discipline_id discipline)
{
+ int rc;
+
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
@@ -6372,12 +6382,25 @@ int qeth_core_load_discipline(struct qeth_card *card,
return -EINVAL;
}
+ rc = card->discipline->setup(card->gdev);
+ if (rc) {
+ if (discipline == QETH_DISCIPLINE_LAYER2)
+ symbol_put(qeth_l2_discipline);
+ else
+ symbol_put(qeth_l3_discipline);
+ card->discipline = NULL;
+
+ return rc;
+ }
+
card->options.layer = discipline;
return 0;
}
-void qeth_core_free_discipline(struct qeth_card *card)
+void qeth_remove_discipline(struct qeth_card *card)
{
+ card->discipline->remove(card->gdev);
+
if (IS_LAYER2(card))
symbol_put(qeth_l2_discipline);
else
@@ -6584,23 +6607,19 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
- rc = qeth_core_load_discipline(card, enforced_disc);
+ /* It's so early that we don't need the discipline_mutex yet. */
+ rc = qeth_setup_discipline(card, enforced_disc);
if (rc)
- goto err_load;
+ goto err_setup_disc;
gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
card->discipline->devtype;
- rc = card->discipline->setup(card->gdev);
- if (rc)
- goto err_disc;
break;
}
return 0;
-err_disc:
- qeth_core_free_discipline(card);
-err_load:
+err_setup_disc:
err_chp_desc:
free_netdev(card->dev);
err_card:
@@ -6616,10 +6635,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
- if (card->discipline) {
- card->discipline->remove(gdev);
- qeth_core_free_discipline(card);
- }
+ mutex_lock(&card->discipline_mutex);
+ if (card->discipline)
+ qeth_remove_discipline(card);
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,29 +6653,32 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
- rc = qeth_core_load_discipline(card, def_discipline);
+ rc = qeth_setup_discipline(card, def_discipline);
if (rc)
goto err;
- rc = card->discipline->setup(card->gdev);
- if (rc) {
- qeth_core_free_discipline(card);
- goto err;
- }
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
- return qeth_set_offline(card, false);
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index a0f777f76f66..5815114da468 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -384,19 +384,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
- card->discipline->remove(card->gdev);
- qeth_core_free_discipline(card);
+ qeth_remove_discipline(card);
free_netdev(card->dev);
card->dev = ndev;
}
- rc = qeth_core_load_discipline(card, newdis);
- if (rc)
- goto out;
+ rc = qeth_setup_discipline(card, newdis);
- rc = card->discipline->setup(card->gdev);
- if (rc)
- qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..ca44421a6d6e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -157,7 +157,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len)
+ __be16 proto, unsigned int data_len)
{
int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -169,7 +169,7 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
}
/* set byte byte 3 to casting flags */
@@ -551,7 +551,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+ rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
qeth_l2_fill_header);
if (!rc)
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..dd441eaec66e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1576,7 +1576,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
- int ipv)
+ __be16 proto)
{
struct neighbour *n = NULL;
@@ -1595,30 +1595,31 @@ static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
}
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- switch (ipv) {
- case 4:
+ switch (proto) {
+ case htons(ETH_P_IP):
if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
- case 6:
+ case htons(ETH_P_IPV6):
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
+ case htons(ETH_P_AF_IUCV):
+ return RTN_UNICAST;
default:
- /* ... and MAC address */
+ /* OSA only: ... and MAC address */
return qeth_get_ether_cast_type(skb);
}
}
-static int qeth_l3_get_cast_type(struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
{
- int ipv = qeth_get_ip_version(skb);
struct dst_entry *dst;
int cast_type;
rcu_read_lock();
- dst = qeth_dst_check_rcu(skb, ipv);
- cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ dst = qeth_dst_check_rcu(skb, proto);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
rcu_read_unlock();
return cast_type;
@@ -1637,7 +1638,7 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len)
+ __be16 proto, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -1652,23 +1653,15 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- if (skb->protocol == htons(ETH_P_AF_IUCV)) {
- l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
- memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
- iucv_trans_hdr(skb)->destUserID, 8);
- return;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+ qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
/* some HW requires combined L3+L4 csum offload: */
- if (ipv == 4)
+ if (proto == htons(ETH_P_IP))
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
}
}
- if (ipv == 4 || IS_IQD(card)) {
+ if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
/* NETIF_F_HW_VLAN_CTAG_TX */
if (skb_vlan_tag_present(skb)) {
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
@@ -1680,24 +1673,33 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
- dst = qeth_dst_check_rcu(skb, ipv);
+ dst = qeth_dst_check_rcu(skb, proto);
if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
cast_type = RTN_UNICAST;
else
- cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
- if (ipv == 4) {
+ switch (proto) {
+ case htons(ETH_P_IP):
l3_hdr->next_hop.addr.s6_addr32[3] =
qeth_next_hop_v4_rcu(skb, dst);
- } else if (ipv == 6) {
+ break;
+ case htons(ETH_P_IPV6):
l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
- } else {
+ break;
+ case htons(ETH_P_AF_IUCV):
+ l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
+ iucv_trans_hdr(skb)->destUserID, 8);
+ l3_hdr->flags |= QETH_HDR_IPV6;
+ break;
+ default:
/* OSA only: */
l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
@@ -1719,7 +1721,7 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv)
+ struct qeth_qdio_out_q *queue, __be16 proto)
{
unsigned int hw_hdr_len;
int rc;
@@ -1733,15 +1735,15 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
+ return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ __be16 proto = vlan_get_protocol(skb);
u16 txq = skb_get_queue_mapping(skb);
- int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int rc;
@@ -1752,22 +1754,32 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->options.sniffer)
goto tx_drop;
- if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
- (card->options.cq == QETH_CQ_ENABLED &&
- skb->protocol != htons(ETH_P_AF_IUCV)))
+
+ switch (proto) {
+ case htons(ETH_P_AF_IUCV):
+ if (card->options.cq != QETH_CQ_ENABLED)
+ goto tx_drop;
+ break;
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ if (card->options.cq == QETH_CQ_ENABLED)
+ goto tx_drop;
+ break;
+ default:
goto tx_drop;
+ }
} else {
queue = card->qdio.out_qs[txq];
}
if (!(dev->flags & IFF_BROADCAST) &&
- qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
+ qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
goto tx_drop;
- if (ipv == 4 || IS_IQD(card))
- rc = qeth_l3_xmit(card, skb, queue, ipv);
+ if (proto == htons(ETH_P_IP) || IS_IQD(card))
+ rc = qeth_l3_xmit(card, skb, queue, proto);
else
- rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
+ rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
if (!rc)
return NETDEV_TX_OK;
@@ -1813,7 +1825,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1821,8 +1833,10 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
- sb_dev);
+ __be16 proto = vlan_get_protocol(skb);
+
+ return qeth_iqd_select_queue(dev, skb,
+ qeth_l3_get_cast_type(skb, proto), sb_dev);
}
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -1971,7 +1985,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 5730572b52cd..54e686dca6de 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -117,7 +117,7 @@ struct virtio_rev_info {
};
/* the highest virtio-ccw revision we support */
-#define VIRTIO_CCW_REV_MAX 1
+#define VIRTIO_CCW_REV_MAX 2
struct virtio_ccw_vq_info {
struct virtqueue *vq;
@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
- if (vcdev->revision < 1)
+ if (vcdev->revision < 2)
return vcdev->dma_area->status;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index fad936eb845f..00e72b97d0b6 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -186,7 +186,7 @@ static int d7s_probe(struct platform_device *op)
p->regs = of_ioremap(&op->resource[0], 0, sizeof(u8), "d7s");
if (!p->regs) {
printk(KERN_ERR PFX "Cannot map chip registers\n");
- goto out_free;
+ goto out;
}
err = misc_register(&d7s_miscdev);
@@ -228,8 +228,6 @@ out:
out_iounmap:
of_iounmap(&op->resource[0], p->regs, sizeof(u8));
-
-out_free:
goto out;
}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9529074c8886..ec5627890809 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11423,12 +11423,11 @@ static int advansys_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int advansys_isa_remove(struct device *dev, unsigned int id)
+static void advansys_isa_remove(struct device *dev, unsigned int id)
{
int ioport = _asc_def_iop_base[id];
advansys_release(dev_get_drvdata(dev));
release_region(ioport, ASC_IOADR_GAP);
- return 0;
}
static struct isa_driver advansys_isa_driver = {
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 916664294d70..21aab9f5b117 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -1054,12 +1054,11 @@ static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int aha1542_isa_remove(struct device *pdev,
+static void aha1542_isa_remove(struct device *pdev,
unsigned int ndev)
{
aha1542_release(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
- return 0;
}
static struct isa_driver aha1542_isa_driver = {
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index e0cdcd2003d0..2b4280a43a53 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -175,7 +175,7 @@ static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
return 1;
}
-static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
+static void fdomain_isa_remove(struct device *dev, unsigned int ndev)
{
struct Scsi_Host *sh = dev_get_drvdata(dev);
int base = sh->io_port;
@@ -183,7 +183,6 @@ static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
fdomain_destroy(sh);
release_region(base, FDOMAIN_REGION_SIZE);
dev_set_drvdata(dev, NULL);
- return 0;
}
static struct isa_driver fdomain_isa_driver = {
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 2df2f38a9b12..7ba3c9312731 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -720,12 +720,11 @@ static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int generic_NCR5380_isa_remove(struct device *pdev,
- unsigned int ndev)
+static void generic_NCR5380_isa_remove(struct device *pdev,
+ unsigned int ndev)
{
generic_NCR5380_release_resources(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
- return 0;
}
static struct isa_driver generic_NCR5380_isa_driver = {
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 5e990f4c1ca6..4d819e52496a 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -559,6 +559,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return -ENODEV;
}
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d1b0cbe1b5f1..3cdeaeb92933 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6881,6 +6881,7 @@ static void __exit scsi_debug_exit(void)
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
+ kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1cdfa5a8ca09..08c06c56331c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2025,7 +2025,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->timeout = 10 * HZ;
rq->retries = 5;
- blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d0ae586565f8..7d52a11e1b61 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -269,7 +269,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(req->q, NULL, req, 1);
+ blk_execute_rq(NULL, req, 1);
/*
* Some devices (USB mass-storage in particular) may transfer
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 87a7274e4632..ee558675eab4 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -665,12 +665,28 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
return 0;
}
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
+static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
{
+ /* Serialize against revalidate zones */
+ mutex_lock(&sdkp->rev_mutex);
+
kvfree(sdkp->zones_wp_offset);
sdkp->zones_wp_offset = NULL;
kfree(sdkp->zone_wp_update_buf);
sdkp->zone_wp_update_buf = NULL;
+
+ sdkp->nr_zones = 0;
+ sdkp->rev_nr_zones = 0;
+ sdkp->zone_blocks = 0;
+ sdkp->rev_zone_blocks = 0;
+
+ mutex_unlock(&sdkp->rev_mutex);
+}
+
+void sd_zbc_release_disk(struct scsi_disk *sdkp)
+{
+ if (sd_is_zoned(sdkp))
+ sd_zbc_clear_zone_info(sdkp);
}
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
@@ -773,6 +789,21 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
*/
return 0;
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ if (!blk_queue_is_zoned(q)) {
+ /*
+ * This can happen for a host aware disk with partitions.
+ * The block device zone information was already cleared
+ * by blk_queue_set_zoned(). Only clear the scsi disk zone
+ * information and exit early.
+ */
+ sd_zbc_clear_zone_info(sdkp);
+ return 0;
+ }
+
/* Check zoned block device characteristics (unconstrained reads) */
ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
@@ -793,9 +824,13 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
blk_queue_max_active_zones(q, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
+ /*
+ * Per ZBC and ZAC specifications, writes in sequential write required
+ * zones of host-managed devices must be aligned to the device physical
+ * block size.
+ */
+ if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
sdkp->rev_nr_zones = nr_zones;
sdkp->rev_zone_blocks = zone_blocks;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index bfa8d77322d7..4383d93110f8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -829,8 +829,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, at_head, sg_rq_end_io);
+ blk_execute_rq_nowait(sdp->disk, srp->rq, at_head, sg_rq_end_io);
return 0;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 43f7624508a9..841ad2fc369a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -585,7 +585,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
rq->retries = retries;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
return 0;
}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index ec80ec83cf85..d70cdcd35e43 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -179,8 +179,8 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
}
/* The actual number of configurations supported is (CFGC+1) */
- err = blk_ksm_init(&hba->ksm,
- hba->crypto_capabilities.config_count + 1);
+ err = devm_blk_ksm_init(hba->dev, &hba->ksm,
+ hba->crypto_capabilities.config_count + 1);
if (err)
goto out;
@@ -236,8 +236,3 @@ void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
if (hba->caps & UFSHCD_CAP_CRYPTO)
blk_ksm_register(&hba->ksm, q);
}
-
-void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba)
-{
- blk_ksm_destroy(&hba->ksm);
-}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
index d53851be5541..78a58e788dff 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/scsi/ufs/ufshcd-crypto.h
@@ -43,8 +43,6 @@ void ufshcd_init_crypto(struct ufs_hba *hba);
void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q);
-void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba);
-
#else /* CONFIG_SCSI_UFS_CRYPTO */
static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
@@ -69,9 +67,6 @@ static inline void ufshcd_init_crypto(struct ufs_hba *hba) { }
static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
struct request_queue *q) { }
-static inline void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba)
-{ }
-
#endif /* CONFIG_SCSI_UFS_CRYPTO */
#endif /* _UFSHCD_CRYPTO_H */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 80620c866192..77161750c9fb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -9190,7 +9190,6 @@ EXPORT_SYMBOL_GPL(ufshcd_remove);
*/
void ufshcd_dealloc_host(struct ufs_hba *hba)
{
- ufshcd_crypto_destroy_keyslot_manager(hba);
scsi_host_put(hba->host);
}
EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
diff --git a/drivers/sfi/Kconfig b/drivers/sfi/Kconfig
deleted file mode 100644
index 3d0b64db214e..000000000000
--- a/drivers/sfi/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# SFI Configuration
-#
-
-menuconfig SFI
- bool "SFI (Simple Firmware Interface) Support"
- help
- The Simple Firmware Interface (SFI) provides a lightweight method
- for platform firmware to pass information to the operating system
- via static tables in memory. Kernel SFI support is required to
- boot on SFI-only platforms. Currently, all SFI-only platforms are
- based on the 2nd generation Intel Atom processor platform,
- code-named Moorestown.
-
- For more information, see http://simplefirmware.org
-
- Say 'Y' here to enable the kernel to boot on SFI-only platforms.
diff --git a/drivers/sfi/Makefile b/drivers/sfi/Makefile
deleted file mode 100644
index ca9436b12386..000000000000
--- a/drivers/sfi/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += sfi_acpi.o
-obj-y += sfi_core.o
-
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
deleted file mode 100644
index d277b36eb389..000000000000
--- a/drivers/sfi/sfi_acpi.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/* sfi_acpi.c Simple Firmware Interface - ACPI extensions */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#define KMSG_COMPONENT "SFI"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/sfi_acpi.h>
-#include "sfi_core.h"
-
-/*
- * SFI can access ACPI-defined tables via an optional ACPI XSDT.
- *
- * This allows re-use, and avoids re-definition, of standard tables.
- * For example, the "MCFG" table is defined by PCI, reserved by ACPI,
- * and is expected to be present many SFI-only systems.
- */
-
-static struct acpi_table_xsdt *xsdt_va __read_mostly;
-
-#define XSDT_GET_NUM_ENTRIES(ptable, entry_type) \
- ((ptable->header.length - sizeof(struct acpi_table_header)) / \
- (sizeof(entry_type)))
-
-static inline struct sfi_table_header *acpi_to_sfi_th(
- struct acpi_table_header *th)
-{
- return (struct sfi_table_header *)th;
-}
-
-static inline struct acpi_table_header *sfi_to_acpi_th(
- struct sfi_table_header *th)
-{
- return (struct acpi_table_header *)th;
-}
-
-/*
- * sfi_acpi_parse_xsdt()
- *
- * Parse the ACPI XSDT for later access by sfi_acpi_table_parse().
- */
-static int __init sfi_acpi_parse_xsdt(struct sfi_table_header *th)
-{
- struct sfi_table_key key = SFI_ANY_KEY;
- int tbl_cnt, i;
- void *ret;
-
- xsdt_va = (struct acpi_table_xsdt *)th;
- tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
- for (i = 0; i < tbl_cnt; i++) {
- ret = sfi_check_table(xsdt_va->table_offset_entry[i], &key);
- if (IS_ERR(ret)) {
- disable_sfi();
- return -1;
- }
- }
-
- return 0;
-}
-
-int __init sfi_acpi_init(void)
-{
- struct sfi_table_key xsdt_key = { .sig = SFI_SIG_XSDT };
-
- sfi_table_parse(SFI_SIG_XSDT, NULL, NULL, sfi_acpi_parse_xsdt);
-
- /* Only call the get_table to keep the table mapped */
- xsdt_va = (struct acpi_table_xsdt *)sfi_get_table(&xsdt_key);
- return 0;
-}
-
-static struct acpi_table_header *sfi_acpi_get_table(struct sfi_table_key *key)
-{
- u32 tbl_cnt, i;
- void *ret;
-
- tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
- for (i = 0; i < tbl_cnt; i++) {
- ret = sfi_check_table(xsdt_va->table_offset_entry[i], key);
- if (!IS_ERR(ret) && ret)
- return sfi_to_acpi_th(ret);
- }
-
- return NULL;
-}
-
-static void sfi_acpi_put_table(struct acpi_table_header *table)
-{
- sfi_put_table(acpi_to_sfi_th(table));
-}
-
-/*
- * sfi_acpi_table_parse()
- *
- * Find specified table in XSDT, run handler on it and return its return value
- */
-int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- int(*handler)(struct acpi_table_header *))
-{
- struct acpi_table_header *table = NULL;
- struct sfi_table_key key;
- int ret = 0;
-
- if (sfi_disabled)
- return -1;
-
- key.sig = signature;
- key.oem_id = oem_id;
- key.oem_table_id = oem_table_id;
-
- table = sfi_acpi_get_table(&key);
- if (!table)
- return -EINVAL;
-
- ret = handler(table);
- sfi_acpi_put_table(table);
- return ret;
-}
-
-static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t offset, size_t count)
-{
- struct sfi_table_attr *tbl_attr =
- container_of(bin_attr, struct sfi_table_attr, attr);
- struct acpi_table_header *th = NULL;
- struct sfi_table_key key;
- ssize_t cnt;
-
- key.sig = tbl_attr->name;
- key.oem_id = NULL;
- key.oem_table_id = NULL;
-
- th = sfi_acpi_get_table(&key);
- if (!th)
- return 0;
-
- cnt = memory_read_from_buffer(buf, count, &offset,
- th, th->length);
- sfi_acpi_put_table(th);
-
- return cnt;
-}
-
-
-void __init sfi_acpi_sysfs_init(void)
-{
- u32 tbl_cnt, i;
- struct sfi_table_attr *tbl_attr;
-
- tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
- for (i = 0; i < tbl_cnt; i++) {
- tbl_attr =
- sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]);
- tbl_attr->attr.read = sfi_acpi_table_show;
- }
-
- return;
-}
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
deleted file mode 100644
index a5136901dd8a..000000000000
--- a/drivers/sfi/sfi_core.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/* sfi_core.c Simple Firmware Interface - core internals */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#define KMSG_COMPONENT "SFI"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/memblock.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include "sfi_core.h"
-
-#define ON_SAME_PAGE(addr1, addr2) \
- (((unsigned long)(addr1) & PAGE_MASK) == \
- ((unsigned long)(addr2) & PAGE_MASK))
-#define TABLE_ON_PAGE(page, table, size) (ON_SAME_PAGE(page, table) && \
- ON_SAME_PAGE(page, table + size))
-
-int sfi_disabled __read_mostly;
-EXPORT_SYMBOL(sfi_disabled);
-
-static u64 syst_pa __read_mostly;
-static struct sfi_table_simple *syst_va __read_mostly;
-
-/*
- * FW creates and saves the SFI tables in memory. When these tables get
- * used, they may need to be mapped to virtual address space, and the mapping
- * can happen before or after the memremap() is ready, so a flag is needed
- * to indicating this
- */
-static u32 sfi_use_memremap __read_mostly;
-
-/*
- * sfi_un/map_memory calls early_memremap/memunmap which is a __init function
- * and introduces section mismatch. So use __ref to make it calm.
- */
-static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
-{
- if (!phys || !size)
- return NULL;
-
- if (sfi_use_memremap)
- return memremap(phys, size, MEMREMAP_WB);
- else
- return early_memremap(phys, size);
-}
-
-static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
-{
- if (!virt || !size)
- return;
-
- if (sfi_use_memremap)
- memunmap(virt);
- else
- early_memunmap(virt, size);
-}
-
-static void sfi_print_table_header(unsigned long long pa,
- struct sfi_table_header *header)
-{
- pr_info("%4.4s %llX, %04X (v%d %6.6s %8.8s)\n",
- header->sig, pa,
- header->len, header->rev, header->oem_id,
- header->oem_table_id);
-}
-
-/*
- * sfi_verify_table()
- * Sanity check table lengh, calculate checksum
- */
-static int sfi_verify_table(struct sfi_table_header *table)
-{
-
- u8 checksum = 0;
- u8 *puchar = (u8 *)table;
- u32 length = table->len;
-
- /* Sanity check table length against arbitrary 1MB limit */
- if (length > 0x100000) {
- pr_err("Invalid table length 0x%x\n", length);
- return -1;
- }
-
- while (length--)
- checksum += *puchar++;
-
- if (checksum) {
- pr_err("Checksum %2.2X should be %2.2X\n",
- table->csum, table->csum - checksum);
- return -1;
- }
- return 0;
-}
-
-/*
- * sfi_map_table()
- *
- * Return address of mapped table
- * Check for common case that we can re-use mapping to SYST,
- * which requires syst_pa, syst_va to be initialized.
- */
-static struct sfi_table_header *sfi_map_table(u64 pa)
-{
- struct sfi_table_header *th;
- u32 length;
-
- if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
- th = sfi_map_memory(pa, sizeof(struct sfi_table_header));
- else
- th = (void *)syst_va + (pa - syst_pa);
-
- /* If table fits on same page as its header, we are done */
- if (TABLE_ON_PAGE(th, th, th->len))
- return th;
-
- /* Entire table does not fit on same page as SYST */
- length = th->len;
- if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
- sfi_unmap_memory(th, sizeof(struct sfi_table_header));
-
- return sfi_map_memory(pa, length);
-}
-
-/*
- * sfi_unmap_table()
- *
- * Undoes effect of sfi_map_table() by unmapping table
- * if it did not completely fit on same page as SYST.
- */
-static void sfi_unmap_table(struct sfi_table_header *th)
-{
- if (!TABLE_ON_PAGE(syst_va, th, th->len))
- sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
- sizeof(*th) : th->len);
-}
-
-static int sfi_table_check_key(struct sfi_table_header *th,
- struct sfi_table_key *key)
-{
-
- if (strncmp(th->sig, key->sig, SFI_SIGNATURE_SIZE)
- || (key->oem_id && strncmp(th->oem_id,
- key->oem_id, SFI_OEM_ID_SIZE))
- || (key->oem_table_id && strncmp(th->oem_table_id,
- key->oem_table_id, SFI_OEM_TABLE_ID_SIZE)))
- return -1;
-
- return 0;
-}
-
-/*
- * This function will be used in 2 cases:
- * 1. used to enumerate and verify the tables addressed by SYST/XSDT,
- * thus no signature will be given (in kernel boot phase)
- * 2. used to parse one specific table, signature must exist, and
- * the mapped virt address will be returned, and the virt space
- * will be released by call sfi_put_table() later
- *
- * This two cases are from two different functions with two different
- * sections and causes section mismatch warning. So use __ref to tell
- * modpost not to make any noise.
- *
- * Return value:
- * NULL: when can't find a table matching the key
- * ERR_PTR(error): error value
- * virt table address: when a matched table is found
- */
-struct sfi_table_header *
- __ref sfi_check_table(u64 pa, struct sfi_table_key *key)
-{
- struct sfi_table_header *th;
- void *ret = NULL;
-
- th = sfi_map_table(pa);
- if (!th)
- return ERR_PTR(-ENOMEM);
-
- if (!key->sig) {
- sfi_print_table_header(pa, th);
- if (sfi_verify_table(th))
- ret = ERR_PTR(-EINVAL);
- } else {
- if (!sfi_table_check_key(th, key))
- return th; /* Success */
- }
-
- sfi_unmap_table(th);
- return ret;
-}
-
-/*
- * sfi_get_table()
- *
- * Search SYST for the specified table with the signature in
- * the key, and return the mapped table
- */
-struct sfi_table_header *sfi_get_table(struct sfi_table_key *key)
-{
- struct sfi_table_header *th;
- u32 tbl_cnt, i;
-
- tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
- for (i = 0; i < tbl_cnt; i++) {
- th = sfi_check_table(syst_va->pentry[i], key);
- if (!IS_ERR(th) && th)
- return th;
- }
-
- return NULL;
-}
-
-void sfi_put_table(struct sfi_table_header *th)
-{
- sfi_unmap_table(th);
-}
-
-/* Find table with signature, run handler on it */
-int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- sfi_table_handler handler)
-{
- struct sfi_table_header *table = NULL;
- struct sfi_table_key key;
- int ret = -EINVAL;
-
- if (sfi_disabled || !handler || !signature)
- goto exit;
-
- key.sig = signature;
- key.oem_id = oem_id;
- key.oem_table_id = oem_table_id;
-
- table = sfi_get_table(&key);
- if (!table)
- goto exit;
-
- ret = handler(table);
- sfi_put_table(table);
-exit:
- return ret;
-}
-EXPORT_SYMBOL_GPL(sfi_table_parse);
-
-/*
- * sfi_parse_syst()
- * Checksum all the tables in SYST and print their headers
- *
- * success: set syst_va, return 0
- */
-static int __init sfi_parse_syst(void)
-{
- struct sfi_table_key key = SFI_ANY_KEY;
- int tbl_cnt, i;
- void *ret;
-
- syst_va = sfi_map_memory(syst_pa, sizeof(struct sfi_table_simple));
- if (!syst_va)
- return -ENOMEM;
-
- tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
- for (i = 0; i < tbl_cnt; i++) {
- ret = sfi_check_table(syst_va->pentry[i], &key);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- }
-
- return 0;
-}
-
-/*
- * The OS finds the System Table by searching 16-byte boundaries between
- * physical address 0x000E0000 and 0x000FFFFF. The OS shall search this region
- * starting at the low address and shall stop searching when the 1st valid SFI
- * System Table is found.
- *
- * success: set syst_pa, return 0
- * fail: return -1
- */
-static __init int sfi_find_syst(void)
-{
- unsigned long offset, len;
- void *start;
-
- len = SFI_SYST_SEARCH_END - SFI_SYST_SEARCH_BEGIN;
- start = sfi_map_memory(SFI_SYST_SEARCH_BEGIN, len);
- if (!start)
- return -1;
-
- for (offset = 0; offset < len; offset += 16) {
- struct sfi_table_header *syst_hdr;
-
- syst_hdr = start + offset;
- if (strncmp(syst_hdr->sig, SFI_SIG_SYST,
- SFI_SIGNATURE_SIZE))
- continue;
-
- if (syst_hdr->len > PAGE_SIZE)
- continue;
-
- sfi_print_table_header(SFI_SYST_SEARCH_BEGIN + offset,
- syst_hdr);
-
- if (sfi_verify_table(syst_hdr))
- continue;
-
- /*
- * Enforce SFI spec mandate that SYST reside within a page.
- */
- if (!ON_SAME_PAGE(syst_pa, syst_pa + syst_hdr->len)) {
- pr_info("SYST 0x%llx + 0x%x crosses page\n",
- syst_pa, syst_hdr->len);
- continue;
- }
-
- /* Success */
- syst_pa = SFI_SYST_SEARCH_BEGIN + offset;
- sfi_unmap_memory(start, len);
- return 0;
- }
-
- sfi_unmap_memory(start, len);
- return -1;
-}
-
-static struct kobject *sfi_kobj;
-static struct kobject *tables_kobj;
-
-static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t offset, size_t count)
-{
- struct sfi_table_attr *tbl_attr =
- container_of(bin_attr, struct sfi_table_attr, attr);
- struct sfi_table_header *th = NULL;
- struct sfi_table_key key;
- ssize_t cnt;
-
- key.sig = tbl_attr->name;
- key.oem_id = NULL;
- key.oem_table_id = NULL;
-
- if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) {
- th = sfi_get_table(&key);
- if (!th)
- return 0;
-
- cnt = memory_read_from_buffer(buf, count, &offset,
- th, th->len);
- sfi_put_table(th);
- } else
- cnt = memory_read_from_buffer(buf, count, &offset,
- syst_va, syst_va->header.len);
-
- return cnt;
-}
-
-struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
-{
- struct sfi_table_attr *tbl_attr;
- struct sfi_table_header *th;
- int ret;
-
- tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL);
- if (!tbl_attr)
- return NULL;
-
- th = sfi_map_table(pa);
- if (!th || !th->sig[0]) {
- kfree(tbl_attr);
- return NULL;
- }
-
- sysfs_attr_init(&tbl_attr->attr.attr);
- memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE);
-
- tbl_attr->attr.size = 0;
- tbl_attr->attr.read = sfi_table_show;
- tbl_attr->attr.attr.name = tbl_attr->name;
- tbl_attr->attr.attr.mode = 0400;
-
- ret = sysfs_create_bin_file(tables_kobj,
- &tbl_attr->attr);
- if (ret) {
- kfree(tbl_attr);
- tbl_attr = NULL;
- }
-
- sfi_unmap_table(th);
- return tbl_attr;
-}
-
-static int __init sfi_sysfs_init(void)
-{
- int tbl_cnt, i;
-
- if (sfi_disabled)
- return 0;
-
- sfi_kobj = kobject_create_and_add("sfi", firmware_kobj);
- if (!sfi_kobj)
- return 0;
-
- tables_kobj = kobject_create_and_add("tables", sfi_kobj);
- if (!tables_kobj) {
- kobject_put(sfi_kobj);
- return 0;
- }
-
- sfi_sysfs_install_table(syst_pa);
-
- tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
-
- for (i = 0; i < tbl_cnt; i++)
- sfi_sysfs_install_table(syst_va->pentry[i]);
-
- sfi_acpi_sysfs_init();
- kobject_uevent(sfi_kobj, KOBJ_ADD);
- kobject_uevent(tables_kobj, KOBJ_ADD);
- pr_info("SFI sysfs interfaces init success\n");
- return 0;
-}
-
-void __init sfi_init(void)
-{
- if (!acpi_disabled)
- disable_sfi();
-
- if (sfi_disabled)
- return;
-
- pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n");
-
- if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
- disable_sfi();
-
- return;
-}
-
-void __init sfi_init_late(void)
-{
- int length;
-
- if (sfi_disabled)
- return;
-
- length = syst_va->header.len;
- sfi_unmap_memory(syst_va, sizeof(struct sfi_table_simple));
-
- /* Use memremap now after it is ready */
- sfi_use_memremap = 1;
- syst_va = sfi_map_memory(syst_pa, length);
-
- sfi_acpi_init();
-}
-
-/*
- * The reason we put it here because we need wait till the /sys/firmware
- * is setup, then our interface can be registered in /sys/firmware/sfi
- */
-core_initcall(sfi_sysfs_init);
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
deleted file mode 100644
index 1d5cfe854cf7..000000000000
--- a/drivers/sfi/sfi_core.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* sfi_core.h Simple Firmware Interface, internal header */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#include <linux/sysfs.h>
-
-struct sfi_table_key{
- char *sig;
- char *oem_id;
- char *oem_table_id;
-};
-
-/* sysfs interface */
-struct sfi_table_attr {
- struct bin_attribute attr;
- char name[8];
-};
-
-#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL }
-
-extern int __init sfi_acpi_init(void);
-extern struct sfi_table_header *sfi_check_table(u64 paddr,
- struct sfi_table_key *key);
-struct sfi_table_header *sfi_get_table(struct sfi_table_key *key);
-extern void sfi_put_table(struct sfi_table_header *table);
-extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa);
-extern void __init sfi_acpi_sysfs_init(void);
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index f8e070d67fa3..a14684ffe4c1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
- resource_size(res));
+ resource_size(res));
if (!d->window[k].virt)
goto err2;
}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
index 9e62ba9311f0..939915a07d99 100644
--- a/drivers/sh/intc/virq-debugfs.c
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -16,7 +16,7 @@
#include <linux/debugfs.h>
#include "internals.h"
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
return 0;
}
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
- return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
- .open = intc_irq_xlate_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index d097d070f579..e8a30c4c5aec 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -6,6 +6,7 @@ source "drivers/soc/amlogic/Kconfig"
source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/canaan/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
@@ -22,7 +23,5 @@ source "drivers/soc/ti/Kconfig"
source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
source "drivers/soc/xilinx/Kconfig"
-source "drivers/soc/zte/Kconfig"
-source "drivers/soc/kendryte/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 699b758d28e4..f678e4d9e585 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_ACTIONS) += actions/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
+obj-$(CONFIG_SOC_CANAAN) += canaan/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
@@ -28,5 +29,3 @@ obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
-obj-$(CONFIG_ARCH_ZX) += zte/
-obj-$(CONFIG_SOC_KENDRYTE) += kendryte/
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 682ba0eb4eba..20acac6342ef 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -11,6 +11,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/kfifo.h>
@@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel {
struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
+ struct clk *clk;
struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
};
@@ -282,22 +284,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
return -ENODEV;
}
+ lpc_snoop->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpc_snoop->clk)) {
+ rc = PTR_ERR(lpc_snoop->clk);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "couldn't get clock\n");
+ return rc;
+ }
+ rc = clk_prepare_enable(lpc_snoop->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
if (rc)
- return rc;
+ goto err;
rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
if (rc)
- return rc;
+ goto err;
/* Configuration of 2nd snoop channel port is optional */
if (of_property_read_u32_index(dev->of_node, "snoop-ports",
1, &port) == 0) {
rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
- if (rc)
+ if (rc) {
aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ goto err;
+ }
}
+ return 0;
+
+err:
+ clk_disable_unprepare(lpc_snoop->clk);
+
return rc;
}
@@ -309,6 +331,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
aspeed_lpc_disable_snoop(lpc_snoop, 0);
aspeed_lpc_disable_snoop(lpc_snoop, 1);
+ clk_disable_unprepare(lpc_snoop->clk);
+
return 0;
}
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
index 773930e0cb10..e3215f826d17 100644
--- a/drivers/soc/aspeed/aspeed-socinfo.c
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -25,6 +25,7 @@ static struct {
/* AST2600 */
{ "AST2600", 0x05000303 },
{ "AST2620", 0x05010203 },
+ { "AST2605", 0x05030103 },
};
static const char *siliconid_to_name(u32 siliconid)
@@ -43,14 +44,30 @@ static const char *siliconid_to_name(u32 siliconid)
static const char *siliconid_to_rev(u32 siliconid)
{
unsigned int rev = (siliconid >> 16) & 0xff;
-
- switch (rev) {
- case 0:
- return "A0";
- case 1:
- return "A1";
- case 3:
- return "A2";
+ unsigned int gen = (siliconid >> 24) & 0xff;
+
+ if (gen < 0x5) {
+ /* AST2500 and below */
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 3:
+ return "A2";
+ }
+ } else {
+ /* AST2600 */
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 2:
+ return "A2";
+ case 3:
+ return "A3";
+ }
}
return "??";
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index c4472b68b7c2..a490ad7e090f 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Atmel
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com
* Boris Brezillon <boris.brezillon@free-electrons.com
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
*/
#define pr_fmt(fmt) "AT91: " fmt
@@ -25,137 +21,218 @@
#define AT91_DBGU_EXID 0x44
#define AT91_CHIPID_CIDR 0x00
#define AT91_CHIPID_EXID 0x04
-#define AT91_CIDR_VERSION(x) ((x) & 0x1f)
+#define AT91_CIDR_VERSION(x, m) ((x) & (m))
+#define AT91_CIDR_VERSION_MASK GENMASK(4, 0)
+#define AT91_CIDR_VERSION_MASK_SAMA7G5 GENMASK(3, 0)
#define AT91_CIDR_EXT BIT(31)
-#define AT91_CIDR_MATCH_MASK 0x7fffffe0
+#define AT91_CIDR_MATCH_MASK GENMASK(30, 5)
+#define AT91_CIDR_MASK_SAMA7G5 GENMASK(27, 5)
-static const struct at91_soc __initconst socs[] = {
+static const struct at91_soc socs[] __initconst = {
#ifdef CONFIG_SOC_AT91RM9200
- AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"),
+ AT91_SOC(AT91RM9200_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91rm9200 BGA", "at91rm9200"),
#endif
#ifdef CONFIG_SOC_AT91SAM9
- AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL),
- AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL),
- AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL),
- AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL),
- AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH,
+ AT91_SOC(AT91SAM9260_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9260", NULL),
+ AT91_SOC(AT91SAM9261_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9261", NULL),
+ AT91_SOC(AT91SAM9263_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9263", NULL),
+ AT91_SOC(AT91SAM9G20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9g20", NULL),
+ AT91_SOC(AT91SAM9RL64_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9rl64", NULL),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9M11_EXID_MATCH,
"at91sam9m11", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH,
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9M10_EXID_MATCH,
"at91sam9m10", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH,
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G46_EXID_MATCH,
"at91sam9g46", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH,
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G45_EXID_MATCH,
"at91sam9g45", "at91sam9g45"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH,
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G15_EXID_MATCH,
"at91sam9g15", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH,
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G35_EXID_MATCH,
"at91sam9g35", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH,
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9X35_EXID_MATCH,
"at91sam9x35", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH,
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G25_EXID_MATCH,
"at91sam9g25", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH,
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9X25_EXID_MATCH,
"at91sam9x25", "at91sam9x5"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH,
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9CN12_EXID_MATCH,
"at91sam9cn12", "at91sam9n12"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH,
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9N12_EXID_MATCH,
"at91sam9n12", "at91sam9n12"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH,
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9CN11_EXID_MATCH,
"at91sam9cn11", "at91sam9n12"),
- AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
- AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
- AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
+ AT91_SOC(AT91SAM9XE128_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe128", "at91sam9xe128"),
+ AT91_SOC(AT91SAM9XE256_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe256", "at91sam9xe256"),
+ AT91_SOC(AT91SAM9XE512_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe512", "at91sam9xe512"),
#endif
#ifdef CONFIG_SOC_SAM9X60
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+ "sam9x60", "sam9x60"),
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
"sam9x60 64MiB DDR2 SiP", "sam9x60"),
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
"sam9x60 128MiB DDR2 SiP", "sam9x60"),
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
"sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D21CU_EXID_MATCH,
"sama5d21", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D22CU_EXID_MATCH,
"sama5d22", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D225C_D1M_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D225C_D1M_EXID_MATCH,
"sama5d225c 16MiB SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D23CU_EXID_MATCH,
"sama5d23", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D24CX_EXID_MATCH,
"sama5d24", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D24CU_EXID_MATCH,
"sama5d24", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D26CU_EXID_MATCH,
"sama5d26", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27CU_EXID_MATCH,
"sama5d27", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27CN_EXID_MATCH,
"sama5d27", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D1G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_D1G_EXID_MATCH,
"sama5d27c 128MiB SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_D5M_EXID_MATCH,
"sama5d27c 64MiB SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_LD1G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_LD1G_EXID_MATCH,
"sama5d27c 128MiB LPDDR2 SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_LD2G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_LD2G_EXID_MATCH,
"sama5d27c 256MiB LPDDR2 SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28CU_EXID_MATCH,
"sama5d28", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28CN_EXID_MATCH,
"sama5d28", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_D1G_EXID_MATCH,
"sama5d28c 128MiB SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_LD1G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_LD1G_EXID_MATCH,
"sama5d28c 128MiB LPDDR2 SiP", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_LD2G_EXID_MATCH,
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_LD2G_EXID_MATCH,
"sama5d28c 256MiB LPDDR2 SiP", "sama5d2"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D31_EXID_MATCH,
"sama5d31", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D33_EXID_MATCH,
"sama5d33", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH,
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D34_EXID_MATCH,
"sama5d34", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH,
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D35_EXID_MATCH,
"sama5d35", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH,
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D36_EXID_MATCH,
"sama5d36", "sama5d3"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH,
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D41_EXID_MATCH,
"sama5d41", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH,
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D42_EXID_MATCH,
"sama5d42", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH,
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D43_EXID_MATCH,
"sama5d43", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D44_EXID_MATCH,
"sama5d44", "sama5d4"),
#endif
#ifdef CONFIG_SOC_SAMV7
- AT91_SOC(SAME70Q21_CIDR_MATCH, SAME70Q21_EXID_MATCH,
+ AT91_SOC(SAME70Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q21_EXID_MATCH,
"same70q21", "same7"),
- AT91_SOC(SAME70Q20_CIDR_MATCH, SAME70Q20_EXID_MATCH,
+ AT91_SOC(SAME70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q20_EXID_MATCH,
"same70q20", "same7"),
- AT91_SOC(SAME70Q19_CIDR_MATCH, SAME70Q19_EXID_MATCH,
+ AT91_SOC(SAME70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q19_EXID_MATCH,
"same70q19", "same7"),
- AT91_SOC(SAMS70Q21_CIDR_MATCH, SAMS70Q21_EXID_MATCH,
+ AT91_SOC(SAMS70Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q21_EXID_MATCH,
"sams70q21", "sams7"),
- AT91_SOC(SAMS70Q20_CIDR_MATCH, SAMS70Q20_EXID_MATCH,
+ AT91_SOC(SAMS70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q20_EXID_MATCH,
"sams70q20", "sams7"),
- AT91_SOC(SAMS70Q19_CIDR_MATCH, SAMS70Q19_EXID_MATCH,
+ AT91_SOC(SAMS70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q19_EXID_MATCH,
"sams70q19", "sams7"),
- AT91_SOC(SAMV71Q21_CIDR_MATCH, SAMV71Q21_EXID_MATCH,
+ AT91_SOC(SAMV71Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q21_EXID_MATCH,
"samv71q21", "samv7"),
- AT91_SOC(SAMV71Q20_CIDR_MATCH, SAMV71Q20_EXID_MATCH,
+ AT91_SOC(SAMV71Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q20_EXID_MATCH,
"samv71q20", "samv7"),
- AT91_SOC(SAMV71Q19_CIDR_MATCH, SAMV71Q19_EXID_MATCH,
+ AT91_SOC(SAMV71Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q19_EXID_MATCH,
"samv71q19", "samv7"),
- AT91_SOC(SAMV70Q20_CIDR_MATCH, SAMV70Q20_EXID_MATCH,
+ AT91_SOC(SAMV70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV70Q20_EXID_MATCH,
"samv70q20", "samv7"),
- AT91_SOC(SAMV70Q19_CIDR_MATCH, SAMV70Q19_EXID_MATCH,
+ AT91_SOC(SAMV70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV70Q19_EXID_MATCH,
"samv70q19", "samv7"),
#endif
+#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
+ "sama7g51", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G52_EXID_MATCH,
+ "sama7g52", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G53_EXID_MATCH,
+ "sama7g53", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_EXID_MATCH,
+ "sama7g54", "sama7g5"),
+#endif
{ /* sentinel */ },
};
@@ -191,8 +268,13 @@ static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
{
struct device_node *np;
void __iomem *regs;
+ static const struct of_device_id chipids[] = {
+ { .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7g5-chipid" },
+ { },
+ };
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid");
+ np = of_find_matching_node(NULL, chipids);
if (!np)
return -ENODEV;
@@ -235,7 +317,7 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
}
for (soc = socs; soc->name; soc++) {
- if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK))
+ if (soc->cidr_match != (cidr & soc->cidr_mask))
continue;
if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
@@ -254,7 +336,7 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
soc_dev_attr->family = soc->family;
soc_dev_attr->soc_id = soc->name;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
- AT91_CIDR_VERSION(cidr));
+ AT91_CIDR_VERSION(cidr, soc->version_mask));
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->revision);
@@ -266,13 +348,27 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
if (soc->family)
pr_info("Detected SoC family: %s\n", soc->family);
pr_info("Detected SoC: %s, revision %X\n", soc->name,
- AT91_CIDR_VERSION(cidr));
+ AT91_CIDR_VERSION(cidr, soc->version_mask));
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7g5", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 5849846a69d6..c3eb3c8f0834 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Atmel
*
* Boris Brezillon <boris.brezillon@free-electrons.com
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
*/
#ifndef __AT91_SOC_H
@@ -16,14 +12,19 @@
struct at91_soc {
u32 cidr_match;
+ u32 cidr_mask;
+ u32 version_mask;
u32 exid_match;
const char *name;
const char *family;
};
-#define AT91_SOC(__cidr, __exid, __name, __family) \
+#define AT91_SOC(__cidr, __cidr_mask, __version_mask, __exid, \
+ __name, __family) \
{ \
.cidr_match = (__cidr), \
+ .cidr_mask = (__cidr_mask), \
+ .version_mask = (__version_mask), \
.exid_match = (__exid), \
.name = (__name), \
.family = (__family), \
@@ -43,6 +44,7 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
+#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
#define AT91SAM9M10_EXID_MATCH 0x00000002
@@ -64,6 +66,11 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X60_D1G_EXID_MATCH 0x00000010
#define SAM9X60_D6K_EXID_MATCH 0x00000011
+#define SAMA7G51_EXID_MATCH 0x3
+#define SAMA7G52_EXID_MATCH 0x2
+#define SAMA7G53_EXID_MATCH 0x1
+#define SAMA7G54_EXID_MATCH 0x0
+
#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
#define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index 7bc90e0bd773..0f0efa28d92b 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
-obj-$(CONFIG_SOC_BCM63XX) += bcm63xx/
+obj-y += bcm63xx/
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm63xx/Kconfig b/drivers/soc/bcm/bcm63xx/Kconfig
index 16f648a6c70a..9e501c8ac5ce 100644
--- a/drivers/soc/bcm/bcm63xx/Kconfig
+++ b/drivers/soc/bcm/bcm63xx/Kconfig
@@ -10,3 +10,12 @@ config BCM63XX_POWER
BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
endif # SOC_BCM63XX
+
+config BCM_PMB
+ bool "Broadcom PMB (Power Management Bus) driver"
+ depends on ARCH_BCM4908 || (COMPILE_TEST && OF)
+ default ARCH_BCM4908
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the Broadcom's PMB (Power Management Bus) that
+ is used for disabling and enabling SoC devices.
diff --git a/drivers/soc/bcm/bcm63xx/Makefile b/drivers/soc/bcm/bcm63xx/Makefile
index 0710d5e018cc..557eed3d67bd 100644
--- a/drivers/soc/bcm/bcm63xx/Makefile
+++ b/drivers/soc/bcm/bcm63xx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM63XX_POWER) += bcm63xx-power.o
+obj-$(CONFIG_BCM_PMB) += bcm-pmb.o
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
new file mode 100644
index 000000000000..c223023dc64f
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2013 Broadcom
+ * Copyright (C) 2020 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <dt-bindings/soc/bcm-pmb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset/bcm63xx_pmb.h>
+
+#define BPCM_ID_REG 0x00
+#define BPCM_CAPABILITIES 0x04
+#define BPCM_CAP_NUM_ZONES 0x000000ff
+#define BPCM_CAP_SR_REG_BITS 0x0000ff00
+#define BPCM_CAP_PLLTYPE 0x00030000
+#define BPCM_CAP_UBUS 0x00080000
+#define BPCM_CONTROL 0x08
+#define BPCM_STATUS 0x0c
+#define BPCM_ROSC_CONTROL 0x10
+#define BPCM_ROSC_THRESH_H 0x14
+#define BPCM_ROSC_THRESHOLD_BCM6838 0x14
+#define BPCM_ROSC_THRESH_S 0x18
+#define BPCM_ROSC_COUNT_BCM6838 0x18
+#define BPCM_ROSC_COUNT 0x1c
+#define BPCM_PWD_CONTROL_BCM6838 0x1c
+#define BPCM_PWD_CONTROL 0x20
+#define BPCM_SR_CONTROL_BCM6838 0x20
+#define BPCM_PWD_ACCUM_CONTROL 0x24
+#define BPCM_SR_CONTROL 0x28
+#define BPCM_GLOBAL_CONTROL 0x2c
+#define BPCM_MISC_CONTROL 0x30
+#define BPCM_MISC_CONTROL2 0x34
+#define BPCM_SGPHY_CNTL 0x38
+#define BPCM_SGPHY_STATUS 0x3c
+#define BPCM_ZONE0 0x40
+#define BPCM_ZONE_CONTROL 0x00
+#define BPCM_ZONE_CONTROL_MANUAL_CLK_EN 0x00000001
+#define BPCM_ZONE_CONTROL_MANUAL_RESET_CTL 0x00000002
+#define BPCM_ZONE_CONTROL_FREQ_SCALE_USED 0x00000004 /* R/O */
+#define BPCM_ZONE_CONTROL_DPG_CAPABLE 0x00000008 /* R/O */
+#define BPCM_ZONE_CONTROL_MANUAL_MEM_PWR 0x00000030
+#define BPCM_ZONE_CONTROL_MANUAL_ISO_CTL 0x00000040
+#define BPCM_ZONE_CONTROL_MANUAL_CTL 0x00000080
+#define BPCM_ZONE_CONTROL_DPG_CTL_EN 0x00000100
+#define BPCM_ZONE_CONTROL_PWR_DN_REQ 0x00000200
+#define BPCM_ZONE_CONTROL_PWR_UP_REQ 0x00000400
+#define BPCM_ZONE_CONTROL_MEM_PWR_CTL_EN 0x00000800
+#define BPCM_ZONE_CONTROL_BLK_RESET_ASSERT 0x00001000
+#define BPCM_ZONE_CONTROL_MEM_STBY 0x00002000
+#define BPCM_ZONE_CONTROL_RESERVED 0x0007c000
+#define BPCM_ZONE_CONTROL_PWR_CNTL_STATE 0x00f80000
+#define BPCM_ZONE_CONTROL_FREQ_SCALAR_DYN_SEL 0x01000000 /* R/O */
+#define BPCM_ZONE_CONTROL_PWR_OFF_STATE 0x02000000 /* R/O */
+#define BPCM_ZONE_CONTROL_PWR_ON_STATE 0x04000000 /* R/O */
+#define BPCM_ZONE_CONTROL_PWR_GOOD 0x08000000 /* R/O */
+#define BPCM_ZONE_CONTROL_DPG_PWR_STATE 0x10000000 /* R/O */
+#define BPCM_ZONE_CONTROL_MEM_PWR_STATE 0x20000000 /* R/O */
+#define BPCM_ZONE_CONTROL_ISO_STATE 0x40000000 /* R/O */
+#define BPCM_ZONE_CONTROL_RESET_STATE 0x80000000 /* R/O */
+#define BPCM_ZONE_CONFIG1 0x04
+#define BPCM_ZONE_CONFIG2 0x08
+#define BPCM_ZONE_FREQ_SCALAR_CONTROL 0x0c
+#define BPCM_ZONE_SIZE 0x10
+
+struct bcm_pmb {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+ bool little_endian;
+ struct genpd_onecell_data genpd_onecell_data;
+};
+
+struct bcm_pmb_pd_data {
+ const char * const name;
+ int id;
+ u8 bus;
+ u8 device;
+};
+
+struct bcm_pmb_pm_domain {
+ struct bcm_pmb *pmb;
+ const struct bcm_pmb_pd_data *data;
+ struct generic_pm_domain genpd;
+};
+
+static int bcm_pmb_bpcm_read(struct bcm_pmb *pmb, int bus, u8 device,
+ int offset, u32 *val)
+{
+ void __iomem *base = pmb->base + bus * 0x20;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&pmb->lock, flags);
+ err = bpcm_rd(base, device, offset, val);
+ spin_unlock_irqrestore(&pmb->lock, flags);
+
+ if (!err)
+ *val = pmb->little_endian ? le32_to_cpu(*val) : be32_to_cpu(*val);
+
+ return err;
+}
+
+static int bcm_pmb_bpcm_write(struct bcm_pmb *pmb, int bus, u8 device,
+ int offset, u32 val)
+{
+ void __iomem *base = pmb->base + bus * 0x20;
+ unsigned long flags;
+ int err;
+
+ val = pmb->little_endian ? cpu_to_le32(val) : cpu_to_be32(val);
+
+ spin_lock_irqsave(&pmb->lock, flags);
+ err = bpcm_wr(base, device, offset, val);
+ spin_unlock_irqrestore(&pmb->lock, flags);
+
+ return err;
+}
+
+static int bcm_pmb_power_off_zone(struct bcm_pmb *pmb, int bus, u8 device,
+ int zone)
+{
+ int offset;
+ u32 val;
+ int err;
+
+ offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
+
+ err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
+ if (err)
+ return err;
+
+ val |= BPCM_ZONE_CONTROL_PWR_DN_REQ;
+ val &= ~BPCM_ZONE_CONTROL_PWR_UP_REQ;
+
+ err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
+
+ return err;
+}
+
+static int bcm_pmb_power_on_zone(struct bcm_pmb *pmb, int bus, u8 device,
+ int zone)
+{
+ int offset;
+ u32 val;
+ int err;
+
+ offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
+
+ err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
+ if (err)
+ return err;
+
+ if (!(val & BPCM_ZONE_CONTROL_PWR_ON_STATE)) {
+ val &= ~BPCM_ZONE_CONTROL_PWR_DN_REQ;
+ val |= BPCM_ZONE_CONTROL_DPG_CTL_EN;
+ val |= BPCM_ZONE_CONTROL_PWR_UP_REQ;
+ val |= BPCM_ZONE_CONTROL_MEM_PWR_CTL_EN;
+ val |= BPCM_ZONE_CONTROL_BLK_RESET_ASSERT;
+
+ err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
+ }
+
+ return err;
+}
+
+static int bcm_pmb_power_off_device(struct bcm_pmb *pmb, int bus, u8 device)
+{
+ int offset;
+ u32 val;
+ int err;
+
+ /* Entire device can be powered off by powering off the 0th zone */
+ offset = BPCM_ZONE0 + BPCM_ZONE_CONTROL;
+
+ err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
+ if (err)
+ return err;
+
+ if (!(val & BPCM_ZONE_CONTROL_PWR_OFF_STATE)) {
+ val = BPCM_ZONE_CONTROL_PWR_DN_REQ;
+
+ err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
+ }
+
+ return err;
+}
+
+static int bcm_pmb_power_on_device(struct bcm_pmb *pmb, int bus, u8 device)
+{
+ u32 val;
+ int err;
+ int i;
+
+ err = bcm_pmb_bpcm_read(pmb, bus, device, BPCM_CAPABILITIES, &val);
+ if (err)
+ return err;
+
+ for (i = 0; i < (val & BPCM_CAP_NUM_ZONES); i++) {
+ err = bcm_pmb_power_on_zone(pmb, bus, device, i);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int bcm_pmb_power_on(struct generic_pm_domain *genpd)
+{
+ struct bcm_pmb_pm_domain *pd = container_of(genpd, struct bcm_pmb_pm_domain, genpd);
+ const struct bcm_pmb_pd_data *data = pd->data;
+ struct bcm_pmb *pmb = pd->pmb;
+
+ switch (data->id) {
+ case BCM_PMB_PCIE0:
+ case BCM_PMB_PCIE1:
+ case BCM_PMB_PCIE2:
+ return bcm_pmb_power_on_zone(pmb, data->bus, data->device, 0);
+ case BCM_PMB_HOST_USB:
+ return bcm_pmb_power_on_device(pmb, data->bus, data->device);
+ default:
+ dev_err(pmb->dev, "unsupported device id: %d\n", data->id);
+ return -EINVAL;
+ }
+}
+
+static int bcm_pmb_power_off(struct generic_pm_domain *genpd)
+{
+ struct bcm_pmb_pm_domain *pd = container_of(genpd, struct bcm_pmb_pm_domain, genpd);
+ const struct bcm_pmb_pd_data *data = pd->data;
+ struct bcm_pmb *pmb = pd->pmb;
+
+ switch (data->id) {
+ case BCM_PMB_PCIE0:
+ case BCM_PMB_PCIE1:
+ case BCM_PMB_PCIE2:
+ return bcm_pmb_power_off_zone(pmb, data->bus, data->device, 0);
+ case BCM_PMB_HOST_USB:
+ return bcm_pmb_power_off_device(pmb, data->bus, data->device);
+ default:
+ dev_err(pmb->dev, "unsupported device id: %d\n", data->id);
+ return -EINVAL;
+ }
+}
+
+static int bcm_pmb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct bcm_pmb_pd_data *table;
+ const struct bcm_pmb_pd_data *e;
+ struct resource *res;
+ struct bcm_pmb *pmb;
+ int max_id;
+ int err;
+
+ pmb = devm_kzalloc(dev, sizeof(*pmb), GFP_KERNEL);
+ if (!pmb)
+ return -ENOMEM;
+
+ pmb->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmb->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmb->base))
+ return PTR_ERR(pmb->base);
+
+ spin_lock_init(&pmb->lock);
+
+ pmb->little_endian = !of_device_is_big_endian(dev->of_node);
+
+ table = of_device_get_match_data(dev);
+ if (!table)
+ return -EINVAL;
+
+ max_id = 0;
+ for (e = table; e->name; e++)
+ max_id = max(max_id, e->id);
+
+ pmb->genpd_onecell_data.num_domains = max_id + 1;
+ pmb->genpd_onecell_data.domains =
+ devm_kcalloc(dev, pmb->genpd_onecell_data.num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!pmb->genpd_onecell_data.domains)
+ return -ENOMEM;
+
+ for (e = table; e->name; e++) {
+ struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+
+ pd->pmb = pmb;
+ pd->data = e;
+ pd->genpd.name = e->name;
+ pd->genpd.power_on = bcm_pmb_power_on;
+ pd->genpd.power_off = bcm_pmb_power_off;
+
+ pm_genpd_init(&pd->genpd, NULL, true);
+ pmb->genpd_onecell_data.domains[e->id] = &pd->genpd;
+ }
+
+ err = of_genpd_add_provider_onecell(dev->of_node, &pmb->genpd_onecell_data);
+ if (err) {
+ dev_err(dev, "failed to add genpd provider: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct bcm_pmb_pd_data bcm_pmb_bcm4908_data[] = {
+ { .name = "pcie2", .id = BCM_PMB_PCIE2, .bus = 0, .device = 2, },
+ { .name = "pcie0", .id = BCM_PMB_PCIE0, .bus = 1, .device = 14, },
+ { .name = "pcie1", .id = BCM_PMB_PCIE1, .bus = 1, .device = 15, },
+ { .name = "usb", .id = BCM_PMB_HOST_USB, .bus = 1, .device = 17, },
+ { },
+};
+
+static const struct of_device_id bcm_pmb_of_match[] = {
+ { .compatible = "brcm,bcm4908-pmb", .data = &bcm_pmb_bcm4908_data, },
+ { },
+};
+
+static struct platform_driver bcm_pmb_driver = {
+ .driver = {
+ .name = "bcm-pmb",
+ .of_match_table = bcm_pmb_of_match,
+ },
+ .probe = bcm_pmb_probe,
+};
+
+builtin_platform_driver(bcm_pmb_driver);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index d33a383701dd..e87dfc6660f3 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -11,8 +11,6 @@
#include <linux/soc/brcmstb/brcmstb.h>
#include <linux/sys_soc.h>
-#include <soc/brcmstb/common.h>
-
static u32 family_id;
static u32 product_id;
@@ -21,21 +19,6 @@ static const struct of_device_id brcmstb_machine_match[] = {
{ }
};
-bool soc_is_brcmstb(void)
-{
- const struct of_device_id *match;
- struct device_node *root;
-
- root = of_find_node_by_path("/");
- if (!root)
- return false;
-
- match = of_match_node(brcmstb_machine_match, root);
- of_node_put(root);
-
- return match != NULL;
-}
-
u32 brcmstb_get_family_id(void)
{
return family_id;
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
new file mode 100644
index 000000000000..8179b69518b4
--- /dev/null
+++ b/drivers/soc/canaan/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config SOC_K210_SYSCTL
+ bool "Canaan Kendryte K210 SoC system controller"
+ depends on RISCV && SOC_CANAAN && OF
+ default SOC_CANAAN
+ select PM
+ select SIMPLE_PM_BUS
+ select SYSCON
+ select MFD_SYSCON
+ help
+ Canaan Kendryte K210 SoC system controller driver.
diff --git a/drivers/soc/canaan/Makefile b/drivers/soc/canaan/Makefile
new file mode 100644
index 000000000000..570280ad7967
--- /dev/null
+++ b/drivers/soc/canaan/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SOC_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/canaan/k210-sysctl.c b/drivers/soc/canaan/k210-sysctl.c
new file mode 100644
index 000000000000..27a346c406bc
--- /dev/null
+++ b/drivers/soc/canaan/k210-sysctl.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <asm/soc.h>
+
+#include <soc/canaan/k210-sysctl.h>
+
+static int k210_sysctl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *pclk;
+ int ret;
+
+ dev_info(dev, "K210 system controller\n");
+
+ /* Get power bus clock */
+ pclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk),
+ "Get bus clock failed\n");
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ dev_err(dev, "Enable bus clock failed\n");
+ return ret;
+ }
+
+ /* Populate children */
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ dev_err(dev, "Populate platform failed %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id k210_sysctl_of_match[] = {
+ { .compatible = "canaan,k210-sysctl", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k210_sysctl_driver = {
+ .driver = {
+ .name = "k210-sysctl",
+ .of_match_table = k210_sysctl_of_match,
+ },
+ .probe = k210_sysctl_probe,
+};
+builtin_platform_driver(k210_sysctl_driver);
+
+/*
+ * System controller registers base address and size.
+ */
+#define K210_SYSCTL_BASE_ADDR 0x50440000ULL
+#define K210_SYSCTL_BASE_SIZE 0x1000
+
+/*
+ * This needs to be called very early during initialization, given that
+ * PLL1 needs to be enabled to be able to use all SRAM.
+ */
+static void __init k210_soc_early_init(const void *fdt)
+{
+ void __iomem *sysctl_base;
+
+ sysctl_base = ioremap(K210_SYSCTL_BASE_ADDR, K210_SYSCTL_BASE_SIZE);
+ if (!sysctl_base)
+ panic("k210-sysctl: ioremap failed");
+
+ k210_clk_early_init(sysctl_base);
+
+ iounmap(sysctl_base);
+}
+SOC_EARLY_INIT_DECLARE(k210_soc, "canaan,kendryte-k210", k210_soc_early_init);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 497a7e0fd027..654e9246ce6b 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -27,7 +27,7 @@
static struct gen_pool *muram_pool;
static spinlock_t cpm_muram_lock;
-static u8 __iomem *muram_vbase;
+static void __iomem *muram_vbase;
static phys_addr_t muram_pbase;
struct muram_block {
@@ -223,9 +223,9 @@ void __iomem *cpm_muram_addr(unsigned long offset)
}
EXPORT_SYMBOL(cpm_muram_addr);
-unsigned long cpm_muram_offset(void __iomem *addr)
+unsigned long cpm_muram_offset(const void __iomem *addr)
{
- return addr - (void __iomem *)muram_vbase;
+ return addr - muram_vbase;
}
EXPORT_SYMBOL(cpm_muram_offset);
@@ -235,6 +235,18 @@ EXPORT_SYMBOL(cpm_muram_offset);
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
- return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+ return muram_pbase + (addr - muram_vbase);
}
EXPORT_SYMBOL(cpm_muram_dma);
+
+/*
+ * As cpm_muram_free, but takes the virtual address rather than the
+ * muram offset.
+ */
+void cpm_muram_free_addr(const void __iomem *addr)
+{
+ if (!addr)
+ return;
+ cpm_muram_free(cpm_muram_offset(addr));
+}
+EXPORT_SYMBOL(cpm_muram_free_addr);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index a9370f4aacca..05812f8ae734 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -13,7 +13,7 @@ config SOC_IMX8M
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
- select ARM_GIC_V3 if ARCH_MXC
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index cc57a384d74d..071e14496e4b 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -5,6 +5,8 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -29,7 +31,7 @@
struct imx8_soc_data {
char *name;
- u32 (*soc_revision)(void);
+ u32 (*soc_revision)(struct device *dev);
};
static u64 soc_uid;
@@ -50,7 +52,7 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static u32 __init imx8mq_soc_revision(void)
+static u32 __init imx8mq_soc_revision(struct device *dev)
{
struct device_node *np;
void __iomem *ocotp_base;
@@ -75,9 +77,20 @@ static u32 __init imx8mq_soc_revision(void)
rev = REV_B1;
}
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
- soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ if (dev) {
+ int ret;
+
+ ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
+ if (ret) {
+ iounmap(ocotp_base);
+ of_node_put(np);
+ return ret;
+ }
+ } else {
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ }
iounmap(ocotp_base);
of_node_put(np);
@@ -107,7 +120,7 @@ static void __init imx8mm_soc_uid(void)
of_node_put(np);
}
-static u32 __init imx8mm_soc_revision(void)
+static u32 __init imx8mm_soc_revision(struct device *dev)
{
struct device_node *np;
void __iomem *anatop_base;
@@ -125,7 +138,15 @@ static u32 __init imx8mm_soc_revision(void)
iounmap(anatop_base);
of_node_put(np);
- imx8mm_soc_uid();
+ if (dev) {
+ int ret;
+
+ ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
+ if (ret)
+ return ret;
+ } else {
+ imx8mm_soc_uid();
+ }
return rev;
}
@@ -150,7 +171,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
.soc_revision = imx8mm_soc_revision,
};
-static __maybe_unused const struct of_device_id imx8_soc_match[] = {
+static __maybe_unused const struct of_device_id imx8_machine_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
@@ -158,12 +179,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ }
};
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
+ { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, },
+ { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, },
+ { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, },
+ { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, },
+ { }
+};
+
#define imx8_revision(soc_rev) \
soc_rev ? \
kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
"unknown"
-static int __init imx8_soc_init(void)
+static int imx8_soc_info(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
@@ -182,7 +211,10 @@ static int __init imx8_soc_init(void)
if (ret)
goto free_soc;
- id = of_match_node(imx8_soc_match, of_root);
+ if (pdev)
+ id = of_match_node(imx8_soc_match, pdev->dev.of_node);
+ else
+ id = of_match_node(imx8_machine_match, of_root);
if (!id) {
ret = -ENODEV;
goto free_soc;
@@ -191,8 +223,16 @@ static int __init imx8_soc_init(void)
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
- if (data->soc_revision)
- soc_rev = data->soc_revision();
+ if (data->soc_revision) {
+ if (pdev) {
+ soc_rev = data->soc_revision(&pdev->dev);
+ ret = soc_rev;
+ if (ret < 0)
+ goto free_soc;
+ } else {
+ soc_rev = data->soc_revision(NULL);
+ }
+ }
}
soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -230,4 +270,24 @@ free_soc:
kfree(soc_dev_attr);
return ret;
}
+
+/* Retain device_initcall is for backward compatibility with DTS. */
+static int __init imx8_soc_init(void)
+{
+ if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL))
+ return 0;
+
+ return imx8_soc_info(NULL);
+}
device_initcall(imx8_soc_init);
+
+static struct platform_driver imx8_soc_info_driver = {
+ .probe = imx8_soc_info,
+ .driver = {
+ .name = "imx8_soc_info",
+ .of_match_table = imx8_soc_match,
+ },
+};
+
+module_platform_driver(imx8_soc_info_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/kendryte/Kconfig b/drivers/soc/kendryte/Kconfig
deleted file mode 100644
index 49785b1b0217..000000000000
--- a/drivers/soc/kendryte/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-if SOC_KENDRYTE
-
-config K210_SYSCTL
- bool "Kendryte K210 system controller"
- default y
- depends on RISCV
- help
- Enables controlling the K210 various clocks and to enable
- general purpose use of the extra 2MB of SRAM normally
- reserved for the AI engine.
-
-endif
diff --git a/drivers/soc/kendryte/Makefile b/drivers/soc/kendryte/Makefile
deleted file mode 100644
index 002d9ce95c0d..000000000000
--- a/drivers/soc/kendryte/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
deleted file mode 100644
index 707019223dd8..000000000000
--- a/drivers/soc/kendryte/k210-sysctl.c
+++ /dev/null
@@ -1,260 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2019 Christoph Hellwig.
- * Copyright (c) 2019 Western Digital Corporation or its affiliates.
- */
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/bitfield.h>
-#include <asm/soc.h>
-
-#define K210_SYSCTL_CLK0_FREQ 26000000UL
-
-/* Registers base address */
-#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL
-
-/* Registers */
-#define K210_SYSCTL_PLL0 0x08
-#define K210_SYSCTL_PLL1 0x0c
-/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */
-#define PLL_RESET (1 << 20)
-#define PLL_PWR (1 << 21)
-#define PLL_INTFB (1 << 22)
-#define PLL_BYPASS (1 << 23)
-#define PLL_TEST (1 << 24)
-#define PLL_OUT_EN (1 << 25)
-#define PLL_TEST_EN (1 << 26)
-#define K210_SYSCTL_PLL_LOCK 0x18
-#define PLL0_LOCK1 (1 << 0)
-#define PLL0_LOCK2 (1 << 1)
-#define PLL0_SLIP_CLEAR (1 << 2)
-#define PLL0_TEST_CLK_OUT (1 << 3)
-#define PLL1_LOCK1 (1 << 8)
-#define PLL1_LOCK2 (1 << 9)
-#define PLL1_SLIP_CLEAR (1 << 10)
-#define PLL1_TEST_CLK_OUT (1 << 11)
-#define PLL2_LOCK1 (1 << 16)
-#define PLL2_LOCK2 (1 << 16)
-#define PLL2_SLIP_CLEAR (1 << 18)
-#define PLL2_TEST_CLK_OUT (1 << 19)
-#define K210_SYSCTL_CLKSEL0 0x20
-#define CLKSEL_ACLK (1 << 0)
-#define K210_SYSCTL_CLKEN_CENT 0x28
-#define CLKEN_CPU (1 << 0)
-#define CLKEN_SRAM0 (1 << 1)
-#define CLKEN_SRAM1 (1 << 2)
-#define CLKEN_APB0 (1 << 3)
-#define CLKEN_APB1 (1 << 4)
-#define CLKEN_APB2 (1 << 5)
-#define K210_SYSCTL_CLKEN_PERI 0x2c
-#define CLKEN_ROM (1 << 0)
-#define CLKEN_DMA (1 << 1)
-#define CLKEN_AI (1 << 2)
-#define CLKEN_DVP (1 << 3)
-#define CLKEN_FFT (1 << 4)
-#define CLKEN_GPIO (1 << 5)
-#define CLKEN_SPI0 (1 << 6)
-#define CLKEN_SPI1 (1 << 7)
-#define CLKEN_SPI2 (1 << 8)
-#define CLKEN_SPI3 (1 << 9)
-#define CLKEN_I2S0 (1 << 10)
-#define CLKEN_I2S1 (1 << 11)
-#define CLKEN_I2S2 (1 << 12)
-#define CLKEN_I2C0 (1 << 13)
-#define CLKEN_I2C1 (1 << 14)
-#define CLKEN_I2C2 (1 << 15)
-#define CLKEN_UART1 (1 << 16)
-#define CLKEN_UART2 (1 << 17)
-#define CLKEN_UART3 (1 << 18)
-#define CLKEN_AES (1 << 19)
-#define CLKEN_FPIO (1 << 20)
-#define CLKEN_TIMER0 (1 << 21)
-#define CLKEN_TIMER1 (1 << 22)
-#define CLKEN_TIMER2 (1 << 23)
-#define CLKEN_WDT0 (1 << 24)
-#define CLKEN_WDT1 (1 << 25)
-#define CLKEN_SHA (1 << 26)
-#define CLKEN_OTP (1 << 27)
-#define CLKEN_RTC (1 << 29)
-
-struct k210_sysctl {
- void __iomem *regs;
- struct clk_hw hw;
-};
-
-static void k210_set_bits(u32 val, void __iomem *reg)
-{
- writel(readl(reg) | val, reg);
-}
-
-static void k210_clear_bits(u32 val, void __iomem *reg)
-{
- writel(readl(reg) & ~val, reg);
-}
-
-static void k210_pll1_enable(void __iomem *regs)
-{
- u32 val;
-
- val = readl(regs + K210_SYSCTL_PLL1);
- val &= ~GENMASK(19, 0); /* clkr1 = 0 */
- val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */
- val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */
- val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */
- writel(val, regs + K210_SYSCTL_PLL1);
-
- k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1);
- k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1);
-
- /*
- * Reset the pll. The magic NOPs come from the Kendryte reference SDK.
- */
- k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
- k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
- nop();
- nop();
- k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
-
- for (;;) {
- val = readl(regs + K210_SYSCTL_PLL_LOCK);
- if (val & PLL1_LOCK2)
- break;
- writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK);
- }
-
- k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1);
-}
-
-static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw);
- u32 clksel0, pll0;
- u64 pll0_freq, clkr0, clkf0, clkod0;
-
- /*
- * If the clock selector is not set, use the base frequency.
- * Otherwise, use PLL0 frequency with a frequency divisor.
- */
- clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0);
- if (!(clksel0 & CLKSEL_ACLK))
- return K210_SYSCTL_CLK0_FREQ;
-
- /*
- * Get PLL0 frequency:
- * freq = base frequency * clkf0 / (clkr0 * clkod0)
- */
- pll0 = readl(s->regs + K210_SYSCTL_PLL0);
- clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0);
- clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0);
- clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0);
- pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0);
-
- /* Get the frequency divisor from the clock selector */
- return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0));
-}
-
-static const struct clk_ops k210_sysctl_clk_ops = {
- .recalc_rate = k210_sysctl_clk_recalc_rate,
-};
-
-static const struct clk_init_data k210_clk_init_data = {
- .name = "k210-sysctl-pll1",
- .ops = &k210_sysctl_clk_ops,
-};
-
-static int k210_sysctl_probe(struct platform_device *pdev)
-{
- struct k210_sysctl *s;
- int error;
-
- pr_info("Kendryte K210 SoC sysctl\n");
-
- s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- s->regs = devm_ioremap_resource(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
- if (IS_ERR(s->regs))
- return PTR_ERR(s->regs);
-
- s->hw.init = &k210_clk_init_data;
- error = devm_clk_hw_register(&pdev->dev, &s->hw);
- if (error) {
- dev_err(&pdev->dev, "failed to register clk");
- return error;
- }
-
- error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
- &s->hw);
- if (error) {
- dev_err(&pdev->dev, "adding clk provider failed\n");
- return error;
- }
-
- return 0;
-}
-
-static const struct of_device_id k210_sysctl_of_match[] = {
- { .compatible = "kendryte,k210-sysctl", },
- {}
-};
-
-static struct platform_driver k210_sysctl_driver = {
- .driver = {
- .name = "k210-sysctl",
- .of_match_table = k210_sysctl_of_match,
- },
- .probe = k210_sysctl_probe,
-};
-
-static int __init k210_sysctl_init(void)
-{
- return platform_driver_register(&k210_sysctl_driver);
-}
-core_initcall(k210_sysctl_init);
-
-/*
- * This needs to be called very early during initialization, given that
- * PLL1 needs to be enabled to be able to use all SRAM.
- */
-static void __init k210_soc_early_init(const void *fdt)
-{
- void __iomem *regs;
-
- regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000);
- if (!regs)
- panic("K210 sysctl ioremap");
-
- /* Enable PLL1 to make the KPU SRAM useable */
- k210_pll1_enable(regs);
-
- k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0);
-
- k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1,
- regs + K210_SYSCTL_CLKEN_CENT);
- k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC,
- regs + K210_SYSCTL_CLKEN_PERI);
-
- k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0);
-
- iounmap(regs);
-}
-SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
-
-#ifdef CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN
-/*
- * Generic entry for the default k210.dtb embedded DTB for boards with:
- * - Vendor ID: 0x4B5
- * - Arch ID: 0xE59889E6A5A04149 (= "Canaan AI" in UTF-8 encoded Chinese)
- * - Impl ID: 0x4D41495832303030 (= "MAIX2000")
- * These values are reported by the SiPEED MAXDUINO, SiPEED MAIX GO and
- * SiPEED Dan dock boards.
- */
-SOC_BUILTIN_DTB_DECLARE(k210, 0x4B5, 0xE59889E6A5A04149, 0x4D41495832303030);
-#endif
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
index 7c6b009b6f6c..e7011d665b15 100644
--- a/drivers/soc/litex/Kconfig
+++ b/drivers/soc/litex/Kconfig
@@ -8,12 +8,25 @@ config LITEX
config LITEX_SOC_CONTROLLER
tristate "Enable LiteX SoC Controller driver"
depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
select LITEX
help
This option enables the SoC Controller Driver which verifies
- LiteX CSR access and provides common litex_get_reg/litex_set_reg
+ LiteX CSR access and provides common litex_[read|write]*
accessors.
All drivers that use functions from litex.h must depend on
LITEX.
+config LITEX_SUBREG_SIZE
+ int "Size of a LiteX CSR subregister, in bytes"
+ depends on LITEX
+ range 1 4
+ default 4
+ help
+ LiteX MMIO registers (referred to as Configuration and Status
+ registers, or CSRs) are spread across adjacent 8- or 32-bit
+ subregisters, located at 32-bit aligned MMIO addresses. Use
+ this to select the appropriate size (1 or 4 bytes) matching
+ your particular LiteX build.
+
endmenu
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index 1217cafdfd4d..6268bfa7f0d6 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -15,79 +15,11 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/reboot.h>
-/*
- * LiteX SoC Generator, depending on the configuration, can split a single
- * logical CSR (Control&Status Register) into a series of consecutive physical
- * registers.
- *
- * For example, in the configuration with 8-bit CSR Bus, 32-bit aligned (the
- * default one for 32-bit CPUs) a 32-bit logical CSR will be generated as four
- * 32-bit physical registers, each one containing one byte of meaningful data.
- *
- * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
- *
- * The purpose of `litex_set_reg`/`litex_get_reg` is to implement the logic
- * of writing to/reading from the LiteX CSR in a single place that can be
- * then reused by all LiteX drivers.
- */
-
-/**
- * litex_set_reg() - Writes the value to the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- * @val: Value to be written to the CSR
- *
- * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
- * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
- * each one containing one byte of meaningful data.
- *
- * This function splits a single possibly multi-byte write into a series of
- * single-byte writes with a proper offset.
- */
-void litex_set_reg(void __iomem *reg, unsigned long reg_size,
- unsigned long val)
-{
- unsigned long shifted_data, shift, i;
-
- for (i = 0; i < reg_size; ++i) {
- shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
- shifted_data = val >> shift;
-
- WRITE_LITEX_SUBREGISTER(shifted_data, reg, i);
- }
-}
-EXPORT_SYMBOL_GPL(litex_set_reg);
-
-/**
- * litex_get_reg() - Reads the value of the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- *
- * Return: Value read from the CSR
- *
- * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
- * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
- * each one containing one byte of meaningful data.
- *
- * This function generates a series of single-byte reads with a proper offset
- * and joins their results into a single multi-byte value.
- */
-unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_size)
-{
- unsigned long shifted_data, shift, i;
- unsigned long result = 0;
-
- for (i = 0; i < reg_size; ++i) {
- shifted_data = READ_LITEX_SUBREGISTER(reg, i);
-
- shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
- result |= (shifted_data << shift);
- }
-
- return result;
-}
-EXPORT_SYMBOL_GPL(litex_get_reg);
+/* reset register located at the base address */
+#define RESET_REG_OFF 0x00
+#define RESET_REG_VALUE 0x00000001
#define SCRATCH_REG_OFF 0x04
#define SCRATCH_REG_VALUE 0x12345678
@@ -131,25 +63,39 @@ static int litex_check_csr_access(void __iomem *reg_addr)
/* restore original value of the SCRATCH register */
litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_REG_VALUE);
- pr_info("LiteX SoC Controller driver initialized");
+ pr_info("LiteX SoC Controller driver initialized: subreg:%d, align:%d",
+ LITEX_SUBREG_SIZE, LITEX_SUBREG_ALIGN);
return 0;
}
struct litex_soc_ctrl_device {
void __iomem *base;
+ struct notifier_block reset_nb;
};
+static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev =
+ container_of(this, struct litex_soc_ctrl_device, reset_nb);
+
+ litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
+ return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
-
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
struct litex_soc_ctrl_device *soc_ctrl_dev;
+ int error;
soc_ctrl_dev = devm_kzalloc(&pdev->dev, sizeof(*soc_ctrl_dev), GFP_KERNEL);
if (!soc_ctrl_dev)
@@ -159,7 +105,29 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
if (IS_ERR(soc_ctrl_dev->base))
return PTR_ERR(soc_ctrl_dev->base);
- return litex_check_csr_access(soc_ctrl_dev->base);
+ error = litex_check_csr_access(soc_ctrl_dev->base);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, soc_ctrl_dev);
+
+ soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
+ soc_ctrl_dev->reset_nb.priority = 128;
+ error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ if (error) {
+ dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
+ error);
+ }
+
+ return 0;
+}
+
+static int litex_soc_ctrl_remove(struct platform_device *pdev)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&soc_ctrl_dev->reset_nb);
+ return 0;
}
static struct platform_driver litex_soc_ctrl_driver = {
@@ -168,6 +136,7 @@ static struct platform_driver litex_soc_ctrl_driver = {
.of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
},
.probe = litex_soc_ctrl_probe,
+ .remove = litex_soc_ctrl_remove,
};
module_platform_driver(litex_soc_ctrl_driver);
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index b6908db534c2..90270f8114ed 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
diff --git a/drivers/soc/mediatek/mt8167-pm-domains.h b/drivers/soc/mediatek/mt8167-pm-domains.h
new file mode 100644
index 000000000000..ad0b8dfa0527
--- /dev/null
+++ b/drivers/soc/mediatek/mt8167-pm-domains.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8167_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8167_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8167-power.h>
+
+#define MT8167_PWR_STATUS_MFG_2D BIT(24)
+#define MT8167_PWR_STATUS_MFG_ASYNC BIT(25)
+
+/*
+ * MT8167 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
+ [MT8167_POWER_DOMAIN_MM] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8167_TOP_AXI_PROT_EN_MM_EMI |
+ MT8167_TOP_AXI_PROT_EN_MCU_MM),
+ },
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8167_POWER_DOMAIN_VDEC] = {
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8167_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8167_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = MT8167_PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8167_TOP_AXI_PROT_EN_MCU_MFG |
+ MT8167_TOP_AXI_PROT_EN_MFG_EMI),
+ },
+ },
+ [MT8167_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = MT8167_PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8167_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8167_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = 0,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8167_TOP_AXI_PROT_EN_CONN_EMI |
+ MT8167_TOP_AXI_PROT_EN_CONN_MCU |
+ MT8167_TOP_AXI_PROT_EN_MCU_CONN),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt8167_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8167,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8167),
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+};
+
+#endif /* __SOC_MEDIATEK_MT8167_PM_DOMAINS_H */
+
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
index 8d996c5d2682..aa5230e6c12f 100644
--- a/drivers/soc/mediatek/mt8183-pm-domains.h
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -38,6 +38,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.ctl_offs = 0x0338,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8183_POWER_DOMAIN_MFG_CORE0] = {
.sta_mask = BIT(7),
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 280d3bd9f675..3c8e4212d941 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -463,36 +463,4 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
}
EXPORT_SYMBOL(cmdq_pkt_flush_async);
-struct cmdq_flush_completion {
- struct completion cmplt;
- bool err;
-};
-
-static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
-{
- struct cmdq_flush_completion *cmplt;
-
- cmplt = (struct cmdq_flush_completion *)data.data;
- if (data.sta != CMDQ_CB_NORMAL)
- cmplt->err = true;
- else
- cmplt->err = false;
- complete(&cmplt->cmplt);
-}
-
-int cmdq_pkt_flush(struct cmdq_pkt *pkt)
-{
- struct cmdq_flush_completion cmplt;
- int err;
-
- init_completion(&cmplt.cmplt);
- err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
- if (err < 0)
- return err;
- wait_for_completion(&cmplt.cmplt);
-
- return cmplt.err ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(cmdq_pkt_flush);
-
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/soc/mediatek/mtk-mutex.c
index 1f99db6b1a42..f531b119da7a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -9,12 +9,11 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_drm_ddp.h"
-#include "mtk_drm_ddp_comp.h"
-
-#define MT2701_DISP_MUTEX0_MOD0 0x2c
-#define MT2701_DISP_MUTEX0_SOF0 0x30
+#define MT2701_MUTEX0_MOD0 0x2c
+#define MT2701_MUTEX0_SOF0 0x30
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
@@ -79,33 +78,32 @@
#define MT2701_MUTEX_MOD_DISP_RDMA0 10
#define MT2701_MUTEX_MOD_DISP_RDMA1 12
-#define MUTEX_SOF_SINGLE_MODE 0
-#define MUTEX_SOF_DSI0 1
-#define MUTEX_SOF_DSI1 2
-#define MUTEX_SOF_DPI0 3
-#define MUTEX_SOF_DPI1 4
-#define MUTEX_SOF_DSI2 5
-#define MUTEX_SOF_DSI3 6
-#define MT8167_MUTEX_SOF_DPI0 2
-#define MT8167_MUTEX_SOF_DPI1 3
-
-
-struct mtk_disp_mutex {
+#define MT2712_MUTEX_SOF_SINGLE_MODE 0
+#define MT2712_MUTEX_SOF_DSI0 1
+#define MT2712_MUTEX_SOF_DSI1 2
+#define MT2712_MUTEX_SOF_DPI0 3
+#define MT2712_MUTEX_SOF_DPI1 4
+#define MT2712_MUTEX_SOF_DSI2 5
+#define MT2712_MUTEX_SOF_DSI3 6
+#define MT8167_MUTEX_SOF_DPI0 2
+#define MT8167_MUTEX_SOF_DPI1 3
+
+struct mtk_mutex {
int id;
bool claimed;
};
-enum mtk_ddp_mutex_sof_id {
- DDP_MUTEX_SOF_SINGLE_MODE,
- DDP_MUTEX_SOF_DSI0,
- DDP_MUTEX_SOF_DSI1,
- DDP_MUTEX_SOF_DPI0,
- DDP_MUTEX_SOF_DPI1,
- DDP_MUTEX_SOF_DSI2,
- DDP_MUTEX_SOF_DSI3,
+enum mtk_mutex_sof_id {
+ MUTEX_SOF_SINGLE_MODE,
+ MUTEX_SOF_DSI0,
+ MUTEX_SOF_DSI1,
+ MUTEX_SOF_DPI0,
+ MUTEX_SOF_DPI1,
+ MUTEX_SOF_DSI2,
+ MUTEX_SOF_DSI3,
};
-struct mtk_ddp_data {
+struct mtk_mutex_data {
const unsigned int *mutex_mod;
const unsigned int *mutex_sof;
const unsigned int mutex_mod_reg;
@@ -113,12 +111,12 @@ struct mtk_ddp_data {
const bool no_clk;
};
-struct mtk_ddp {
+struct mtk_mutex_ctx {
struct device *dev;
struct clk *clk;
void __iomem *regs;
- struct mtk_disp_mutex mutex[10];
- const struct mtk_ddp_data *data;
+ struct mtk_mutex mutex[10];
+ const struct mtk_mutex_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -183,150 +181,155 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
-static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
- [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
- [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
- [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
- [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
- [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
- [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
- [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
-static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
- [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
- [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
- [DDP_MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
- [DDP_MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
+static const unsigned int mt8167_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
+ [MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
};
-static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+static const struct mtk_mutex_data mt2701_mutex_driver_data = {
.mutex_mod = mt2701_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
- .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
- .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
-static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+static const struct mtk_mutex_data mt2712_mutex_driver_data = {
.mutex_mod = mt2712_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
- .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
- .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
-static const struct mtk_ddp_data mt8167_ddp_driver_data = {
+static const struct mtk_mutex_data mt8167_mutex_driver_data = {
.mutex_mod = mt8167_mutex_mod,
.mutex_sof = mt8167_mutex_sof,
- .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
- .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
.no_clk = true,
};
-static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+static const struct mtk_mutex_data mt8173_mutex_driver_data = {
.mutex_mod = mt8173_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
- .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
- .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
-struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
+struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
- struct mtk_ddp *ddp = dev_get_drvdata(dev);
-
- if (id >= 10)
- return ERR_PTR(-EINVAL);
- if (ddp->mutex[id].claimed)
- return ERR_PTR(-EBUSY);
+ struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
+ int i;
- ddp->mutex[id].claimed = true;
+ for (i = 0; i < 10; i++)
+ if (!mtx->mutex[i].claimed) {
+ mtx->mutex[i].claimed = true;
+ return &mtx->mutex[i];
+ }
- return &ddp->mutex[id];
+ return ERR_PTR(-EBUSY);
}
+EXPORT_SYMBOL_GPL(mtk_mutex_get);
-void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex)
+void mtk_mutex_put(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
- WARN_ON(&ddp->mutex[mutex->id] != mutex);
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
mutex->claimed = false;
}
+EXPORT_SYMBOL_GPL(mtk_mutex_put);
-int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex)
+int mtk_mutex_prepare(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
- return clk_prepare_enable(ddp->clk);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ return clk_prepare_enable(mtx->clk);
}
+EXPORT_SYMBOL_GPL(mtk_mutex_prepare);
-void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex)
+void mtk_mutex_unprepare(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
- clk_disable_unprepare(ddp->clk);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ clk_disable_unprepare(mtx->clk);
}
+EXPORT_SYMBOL_GPL(mtk_mutex_unprepare);
-void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id)
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
unsigned int reg;
unsigned int sof_id;
unsigned int offset;
- WARN_ON(&ddp->mutex[mutex->id] != mutex);
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- sof_id = DDP_MUTEX_SOF_DSI0;
+ sof_id = MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- sof_id = DDP_MUTEX_SOF_DSI0;
+ sof_id = MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- sof_id = DDP_MUTEX_SOF_DSI2;
+ sof_id = MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- sof_id = DDP_MUTEX_SOF_DSI3;
+ sof_id = MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- sof_id = DDP_MUTEX_SOF_DPI0;
+ sof_id = MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- sof_id = DDP_MUTEX_SOF_DPI1;
+ sof_id = MUTEX_SOF_DPI1;
break;
default:
- if (ddp->data->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ if (mtx->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
mutex->id);
- reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->data->mutex_mod[id];
- writel_relaxed(reg, ddp->regs + offset);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg |= 1 << mtx->data->mutex_mod[id];
+ writel_relaxed(reg, mtx->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
- reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->data->mutex_mod[id] - 32);
- writel_relaxed(reg, ddp->regs + offset);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg |= 1 << (mtx->data->mutex_mod[id] - 32);
+ writel_relaxed(reg, mtx->regs + offset);
}
return;
}
- writel_relaxed(ddp->data->mutex_sof[sof_id],
- ddp->regs +
- DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
+ writel_relaxed(mtx->data->mutex_sof[sof_id],
+ mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
}
+EXPORT_SYMBOL_GPL(mtk_mutex_add_comp);
-void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id)
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
unsigned int reg;
unsigned int offset;
- WARN_ON(&ddp->mutex[mutex->id] != mutex);
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
@@ -336,129 +339,136 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs +
- DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg,
mutex->id));
break;
default:
- if (ddp->data->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ if (mtx->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
mutex->id);
- reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->data->mutex_mod[id]);
- writel_relaxed(reg, ddp->regs + offset);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg &= ~(1 << mtx->data->mutex_mod[id]);
+ writel_relaxed(reg, mtx->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
- reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
- writel_relaxed(reg, ddp->regs + offset);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg &= ~(1 << (mtx->data->mutex_mod[id] - 32));
+ writel_relaxed(reg, mtx->regs + offset);
}
break;
}
}
+EXPORT_SYMBOL_GPL(mtk_mutex_remove_comp);
-void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex)
+void mtk_mutex_enable(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
- WARN_ON(&ddp->mutex[mutex->id] != mutex);
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
- writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
}
+EXPORT_SYMBOL_GPL(mtk_mutex_enable);
-void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
+void mtk_mutex_disable(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
- WARN_ON(&ddp->mutex[mutex->id] != mutex);
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
- writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(0, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
}
+EXPORT_SYMBOL_GPL(mtk_mutex_disable);
-void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
+void mtk_mutex_acquire(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
u32 tmp;
- writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
- writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
- if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
+ writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, mtx->regs + DISP_REG_MUTEX(mutex->id));
+ if (readl_poll_timeout_atomic(mtx->regs + DISP_REG_MUTEX(mutex->id),
tmp, tmp & INT_MUTEX, 1, 10000))
pr_err("could not acquire mutex %d\n", mutex->id);
}
+EXPORT_SYMBOL_GPL(mtk_mutex_acquire);
-void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
+void mtk_mutex_release(struct mtk_mutex *mutex)
{
- struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
- mutex[mutex->id]);
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
- writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
+ writel(0, mtx->regs + DISP_REG_MUTEX(mutex->id));
}
+EXPORT_SYMBOL_GPL(mtk_mutex_release);
-static int mtk_ddp_probe(struct platform_device *pdev)
+static int mtk_mutex_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct mtk_ddp *ddp;
+ struct mtk_mutex_ctx *mtx;
struct resource *regs;
int i;
- ddp = devm_kzalloc(dev, sizeof(*ddp), GFP_KERNEL);
- if (!ddp)
+ mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL);
+ if (!mtx)
return -ENOMEM;
for (i = 0; i < 10; i++)
- ddp->mutex[i].id = i;
+ mtx->mutex[i].id = i;
- ddp->data = of_device_get_match_data(dev);
+ mtx->data = of_device_get_match_data(dev);
- if (!ddp->data->no_clk) {
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+ if (!mtx->data->no_clk) {
+ mtx->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mtx->clk)) {
+ if (PTR_ERR(mtx->clk) != -EPROBE_DEFER)
dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ return PTR_ERR(mtx->clk);
}
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddp->regs = devm_ioremap_resource(dev, regs);
- if (IS_ERR(ddp->regs)) {
+ mtx->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(mtx->regs)) {
dev_err(dev, "Failed to map mutex registers\n");
- return PTR_ERR(ddp->regs);
+ return PTR_ERR(mtx->regs);
}
- platform_set_drvdata(pdev, ddp);
+ platform_set_drvdata(pdev, mtx);
return 0;
}
-static int mtk_ddp_remove(struct platform_device *pdev)
+static int mtk_mutex_remove(struct platform_device *pdev)
{
return 0;
}
-static const struct of_device_id ddp_driver_dt_match[] = {
+static const struct of_device_id mutex_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-mutex",
- .data = &mt2701_ddp_driver_data},
+ .data = &mt2701_mutex_driver_data},
{ .compatible = "mediatek,mt2712-disp-mutex",
- .data = &mt2712_ddp_driver_data},
+ .data = &mt2712_mutex_driver_data},
{ .compatible = "mediatek,mt8167-disp-mutex",
- .data = &mt8167_ddp_driver_data},
+ .data = &mt8167_mutex_driver_data},
{ .compatible = "mediatek,mt8173-disp-mutex",
- .data = &mt8173_ddp_driver_data},
+ .data = &mt8173_mutex_driver_data},
{},
};
-MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
+MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
-struct platform_driver mtk_ddp_driver = {
- .probe = mtk_ddp_probe,
- .remove = mtk_ddp_remove,
+struct platform_driver mtk_mutex_driver = {
+ .probe = mtk_mutex_probe,
+ .remove = mtk_mutex_remove,
.driver = {
- .name = "mediatek-ddp",
+ .name = "mediatek-mutex",
.owner = THIS_MODULE,
- .of_match_table = ddp_driver_dt_match,
+ .of_match_table = mutex_driver_dt_match,
},
};
+
+builtin_platform_driver(mtk_mutex_driver);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index fb70cb3b07b3..b7f697666bdd 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -13,8 +13,10 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/soc/mediatek/infracfg.h>
+#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
#include "mt8192-pm-domains.h"
@@ -40,6 +42,7 @@ struct scpsys_domain {
struct clk_bulk_data *subsys_clks;
struct regmap *infracfg;
struct regmap *smi;
+ struct regulator *supply;
};
struct scpsys {
@@ -187,6 +190,16 @@ static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
return _scpsys_bus_protect_disable(pd->data->bp_infracfg, pd->infracfg);
}
+static int scpsys_regulator_enable(struct regulator *supply)
+{
+ return supply ? regulator_enable(supply) : 0;
+}
+
+static int scpsys_regulator_disable(struct regulator *supply)
+{
+ return supply ? regulator_disable(supply) : 0;
+}
+
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
@@ -194,10 +207,14 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
bool tmp;
int ret;
- ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ ret = scpsys_regulator_enable(pd->supply);
if (ret)
return ret;
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret)
+ goto err_reg;
+
/* subsys power on */
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
@@ -232,6 +249,8 @@ err_disable_subsys_clks:
clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
err_pwr_ack:
clk_bulk_disable(pd->num_clks, pd->clks);
+err_reg:
+ scpsys_regulator_disable(pd->supply);
return ret;
}
@@ -267,6 +286,8 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
clk_bulk_disable(pd->num_clks, pd->clks);
+ scpsys_regulator_disable(pd->supply);
+
return 0;
}
@@ -275,6 +296,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
{
const struct scpsys_domain_data *domain_data;
struct scpsys_domain *pd;
+ struct device_node *root_node = scpsys->dev->of_node;
struct property *prop;
const char *clk_name;
int i, ret, num_clks;
@@ -307,6 +329,25 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
pd->data = domain_data;
pd->scpsys = scpsys;
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_DOMAIN_SUPPLY)) {
+ /*
+ * Find regulator in current power domain node.
+ * devm_regulator_get() finds regulator in a node and its child
+ * node, so set of_node to current power domain node then change
+ * back to original node after regulator is found for current
+ * power domain node.
+ */
+ scpsys->dev->of_node = node;
+ pd->supply = devm_regulator_get(scpsys->dev, "domain");
+ scpsys->dev->of_node = root_node;
+ if (IS_ERR(pd->supply)) {
+ dev_err_probe(scpsys->dev, PTR_ERR(pd->supply),
+ "%pOF: failed to get power supply.\n",
+ node);
+ return ERR_CAST(pd->supply);
+ }
+ }
+
pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
if (IS_ERR(pd->infracfg))
return ERR_CAST(pd->infracfg);
@@ -446,8 +487,8 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
child_pd = scpsys_add_one_domain(scpsys, child);
if (IS_ERR(child_pd)) {
- ret = PTR_ERR(child_pd);
- dev_err(scpsys->dev, "%pOF: failed to get child domain id\n", child);
+ dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
+ "%pOF: failed to get child domain id\n", child);
goto err_put_node;
}
@@ -515,6 +556,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys)
static const struct of_device_id scpsys_of_match[] = {
{
+ .compatible = "mediatek,mt8167-power-controller",
+ .data = &mt8167_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8173-power-controller",
.data = &mt8173_scpsys_data,
},
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index a2f4d8f97e05..141dc76054e6 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -7,6 +7,7 @@
#define MTK_SCPD_FWAIT_SRAM BIT(1)
#define MTK_SCPD_SRAM_ISO BIT(2)
#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
+#define MTK_SCPD_DOMAIN_SUPPLY BIT(4)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
@@ -14,6 +15,7 @@
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
#define SPM_AUDIO_PWR_CON 0x029c
#define SPM_MFG_2D_PWR_CON 0x02c0
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 16b421608e9c..8403a77b59fe 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -4,6 +4,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/device.h>
@@ -35,6 +36,9 @@
#define CACHE_LINE_SIZE_SHIFT 6
+#define LLCC_COMMON_HW_INFO 0x00030000
+#define LLCC_MAJOR_VERSION_MASK GENMASK(31, 24)
+
#define LLCC_COMMON_STATUS0 0x0003000c
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
#define LLCC_LB_CNT_SHIFT 28
@@ -47,6 +51,7 @@
#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
+#define LLCC_TRP_WRSC_EN 0x21f20
#define BANK_OFFSET_STRIDE 0x80000
@@ -73,6 +78,7 @@
* then the ways assigned to this client are not flushed on power
* collapse.
* @activate_on_init: Activate the slice immediately after it is programmed
+ * @write_scid_en: Bit enables write cache support for a given scid.
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -87,6 +93,7 @@ struct llcc_slice_config {
bool dis_cap_alloc;
bool retain_on_pc;
bool activate_on_init;
+ bool write_scid_en;
};
struct qcom_llcc_config {
@@ -147,6 +154,25 @@ static const struct llcc_slice_config sm8150_data[] = {
{ LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
};
+static const struct llcc_slice_config sm8250_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
@@ -164,6 +190,11 @@ static const struct qcom_llcc_config sm8150_cfg = {
.size = ARRAY_SIZE(sm8150_data),
};
+static const struct qcom_llcc_config sm8250_cfg = {
+ .sct_data = sm8250_data,
+ .size = ARRAY_SIZE(sm8250_data),
+};
+
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
/**
@@ -413,6 +444,16 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
+ if (drv_data->major_version == 2) {
+ u32 wren;
+
+ wren = config->write_scid_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_EN,
+ BIT(config->slice_id), wren);
+ if (ret)
+ return ret;
+ }
+
if (config->activate_on_init) {
desc.slice_id = config->slice_id;
ret = llcc_slice_activate(&desc);
@@ -476,6 +517,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
const struct qcom_llcc_config *cfg;
const struct llcc_slice_config *llcc_cfg;
u32 sz;
+ u32 version;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
@@ -496,6 +538,13 @@ static int qcom_llcc_probe(struct platform_device *pdev)
goto err;
}
+ /* Extract major version of the IP */
+ ret = regmap_read(drv_data->bcast_regmap, LLCC_COMMON_HW_INFO, &version);
+ if (ret)
+ goto err;
+
+ drv_data->major_version = FIELD_GET(LLCC_MAJOR_VERSION_MASK, version);
+
ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
&num_banks);
if (ret)
@@ -559,6 +608,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
+ { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ }
};
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 7f9e9944d1ea..f1875dc31ae2 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -189,6 +189,7 @@ struct ocmem *of_get_ocmem(struct device *dev)
{
struct platform_device *pdev;
struct device_node *devnode;
+ struct ocmem *ocmem;
devnode = of_parse_phandle(dev->of_node, "sram", 0);
if (!devnode || !devnode->parent) {
@@ -202,7 +203,12 @@ struct ocmem *of_get_ocmem(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- return platform_get_drvdata(pdev);
+ ocmem = platform_get_drvdata(pdev);
+ if (!ocmem) {
+ dev_err(dev, "Cannot get ocmem\n");
+ return ERR_PTR(-ENODEV);
+ }
+ return ocmem;
}
EXPORT_SYMBOL(of_get_ocmem);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index b5840d624bc6..53acb9423bd6 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -600,6 +600,7 @@ static const struct of_device_id qmp_dt_match[] = {
{ .compatible = "qcom,sdm845-aoss-qmp", },
{ .compatible = "qcom,sm8150-aoss-qmp", },
{ .compatible = "qcom,sm8250-aoss-qmp", },
+ { .compatible = "qcom,sm8350-aoss-qmp", },
{}
};
MODULE_DEVICE_TABLE(of, qmp_dt_match);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 37969dcbaf14..a84ab0d6a9d4 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -231,10 +231,9 @@ static void tcs_invalidate(struct rsc_drv *drv, int type)
if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
return;
- for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
- write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
- }
+
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
}
@@ -364,7 +363,7 @@ static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
enable = TCS_AMC_MODE_ENABLE;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
enable |= TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ write_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, enable);
}
}
@@ -443,7 +442,6 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
- write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
@@ -476,23 +474,23 @@ skip:
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
const struct tcs_request *msg)
{
- u32 msgid, cmd_msgid;
+ u32 msgid;
+ u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
u32 cmd_enable = 0;
- u32 cmd_complete;
struct tcs_cmd *cmd;
int i, j;
- cmd_msgid = CMD_MSGID_LEN;
+ /* Convert all commands to RR when the request has wait_for_compl set */
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
- cmd_msgid |= CMD_MSGID_WRITE;
-
- cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
cmd_enable |= BIT(j);
- cmd_complete |= cmd->wait << j;
msgid = cmd_msgid;
+ /*
+ * Additionally, if the cmd->wait is set, make the command
+ * response reqd even if the overall request was fire-n-forget.
+ */
msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
@@ -501,7 +499,6 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
}
- write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
@@ -652,7 +649,6 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
* cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
*/
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
- write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
spin_unlock_irqrestore(&drv->lock, flags);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 85d1207b72d7..27733b0e7fca 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -21,6 +21,8 @@
* RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
#define RPMPD_SMPA 0x61706d73
#define RPMPD_LDOA 0x616f646c
+#define RPMPD_SMPB 0x62706d73
+#define RPMPD_LDOB 0x626f646c
#define RPMPD_RWCX 0x78637772
#define RPMPD_RWMX 0x786d7772
#define RPMPD_RWLC 0x636c7772
@@ -184,6 +186,31 @@ static const struct rpmpd_desc msm8976_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_HIGH,
};
+/* msm8994 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8994, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8994, vddmx, vddmx_ao, SMPA, CORNER, 2);
+/* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */
+DEFINE_RPMPD_CORNER(msm8994, vddgfx, SMPB, 2);
+
+DEFINE_RPMPD_VFC(msm8994, vddcx_vfc, SMPA, 1);
+DEFINE_RPMPD_VFC(msm8994, vddgfx_vfc, SMPB, 2);
+
+static struct rpmpd *msm8994_rpmpds[] = {
+ [MSM8994_VDDCX] = &msm8994_vddcx,
+ [MSM8994_VDDCX_AO] = &msm8994_vddcx_ao,
+ [MSM8994_VDDCX_VFC] = &msm8994_vddcx_vfc,
+ [MSM8994_VDDMX] = &msm8994_vddmx,
+ [MSM8994_VDDMX_AO] = &msm8994_vddmx_ao,
+ [MSM8994_VDDGFX] = &msm8994_vddgfx,
+ [MSM8994_VDDGFX_VFC] = &msm8994_vddgfx_vfc,
+};
+
+static const struct rpmpd_desc msm8994_desc = {
+ .rpmpds = msm8994_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8994_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
/* msm8996 RPM Power domains */
DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
@@ -302,6 +329,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
+ { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 7251827bac88..cc4e0655a47b 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -732,9 +732,7 @@ qcom_smem_partition_header(struct qcom_smem *smem,
header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
- dev_err(smem->dev, "bad partition magic %02x %02x %02x %02x\n",
- header->magic[0], header->magic[1],
- header->magic[2], header->magic[3]);
+ dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
return NULL;
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index d21530d24253..f6cfb79338f0 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -15,6 +15,8 @@
#include <linux/sys_soc.h>
#include <linux/types.h>
+#include <asm/unaligned.h>
+
/*
* SoC version type with major number in the upper 16 bits and minor
* number in the lower 16 bits.
@@ -83,6 +85,11 @@ static const char *const pmic_models[] = {
[23] = "PM8038",
[24] = "PM8922",
[25] = "PM8917",
+ [30] = "PM8150",
+ [31] = "PM8150L",
+ [32] = "PM8150B",
+ [33] = "PMK8002",
+ [36] = "PM8009",
};
#endif /* CONFIG_DEBUG_FS */
@@ -217,23 +224,39 @@ static const struct soc_id soc_id[] = {
{ 250, "MSM8616" },
{ 251, "MSM8992" },
{ 253, "APQ8094" },
+ { 290, "MDM9607" },
{ 291, "APQ8096" },
+ { 292, "MSM8998" },
{ 293, "MSM8953" },
+ { 296, "MDM8207" },
+ { 297, "MDM9207" },
+ { 298, "MDM9307" },
+ { 299, "MDM9628" },
{ 304, "APQ8053" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 317, "SDM660" },
{ 318, "SDM630" },
+ { 319, "APQ8098" },
{ 321, "SDM845" },
+ { 322, "MDM9206" },
+ { 324, "SDA660" },
+ { 325, "SDM658" },
+ { 326, "SDA658" },
+ { 327, "SDA630" },
{ 338, "SDM450" },
{ 341, "SDA845" },
+ { 345, "SDM636" },
+ { 346, "SDA636" },
{ 349, "SDM632" },
{ 350, "SDA632" },
{ 351, "SDA450" },
{ 356, "SM8250" },
{ 402, "IPQ6018" },
{ 425, "SC7180" },
+ { 455, "QRB5165" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -264,7 +287,7 @@ static const struct file_operations qcom_ ##name## _ops = { \
}
#define DEBUGFS_ADD(info, name) \
- debugfs_create_file(__stringify(name), 0400, \
+ debugfs_create_file(__stringify(name), 0444, \
qcom_socinfo->dbg_root, \
info, &qcom_ ##name## _ops)
@@ -286,7 +309,7 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
if (model < 0)
return -EINVAL;
- if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
seq_printf(seq, "%s\n", pmic_models[model]);
else
seq_printf(seq, "unknown (%d)\n", model);
@@ -294,6 +317,32 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
return 0;
}
+static int qcom_show_pmic_model_array(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ unsigned int num_pmics = le32_to_cpu(socinfo->num_pmics);
+ unsigned int pmic_array_offset = le32_to_cpu(socinfo->pmic_array_offset);
+ int i;
+ void *ptr = socinfo;
+
+ ptr += pmic_array_offset;
+
+ /* No need for bounds checking, it happened at socinfo_debugfs_init */
+ for (i = 0; i < num_pmics; i++) {
+ unsigned int model = SOCINFO_MINOR(get_unaligned_le32(ptr + 2 * i * sizeof(u32)));
+ unsigned int die_rev = get_unaligned_le32(ptr + (2 * i + 1) * sizeof(u32));
+
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s %u.%u\n", pmic_models[model],
+ SOCINFO_MAJOR(die_rev),
+ SOCINFO_MINOR(die_rev));
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+ }
+
+ return 0;
+}
+
static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
{
struct socinfo *socinfo = seq->private;
@@ -316,6 +365,7 @@ static int qcom_show_chip_id(struct seq_file *seq, void *p)
QCOM_OPEN(build_id, qcom_show_build_id);
QCOM_OPEN(pmic_model, qcom_show_pmic_model);
+QCOM_OPEN(pmic_model_array, qcom_show_pmic_model_array);
QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
QCOM_OPEN(chip_id, qcom_show_chip_id);
@@ -344,25 +394,27 @@ DEFINE_IMAGE_OPS(variant);
DEFINE_IMAGE_OPS(oem);
static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
- struct socinfo *info)
+ struct socinfo *info, size_t info_size)
{
struct smem_image_version *versions;
struct dentry *dentry;
size_t size;
int i;
+ unsigned int num_pmics;
+ unsigned int pmic_array_offset;
qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL);
qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
- debugfs_create_x32("info_fmt", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_x32("info_fmt", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.fmt);
switch (qcom_socinfo->info.fmt) {
case SOCINFO_VERSION(0, 15):
qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
- debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("nmodem_supported", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nmodem_supported);
fallthrough;
case SOCINFO_VERSION(0, 14):
@@ -371,19 +423,19 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.num_defective_parts = __le32_to_cpu(info->num_defective_parts);
qcom_socinfo->info.ndefective_parts_array_offset = __le32_to_cpu(info->ndefective_parts_array_offset);
- debugfs_create_u32("num_clusters", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("num_clusters", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.num_clusters);
- debugfs_create_u32("ncluster_array_offset", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("ncluster_array_offset", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.ncluster_array_offset);
- debugfs_create_u32("num_defective_parts", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("num_defective_parts", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.num_defective_parts);
- debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("ndefective_parts_array_offset", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.ndefective_parts_array_offset);
fallthrough;
case SOCINFO_VERSION(0, 13):
qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
- debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("nproduct_id", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nproduct_id);
DEBUGFS_ADD(info, chip_id);
fallthrough;
@@ -395,21 +447,26 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.raw_device_num =
__le32_to_cpu(info->raw_device_num);
- debugfs_create_x32("chip_family", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_x32("chip_family", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.chip_family);
- debugfs_create_x32("raw_device_family", 0400,
+ debugfs_create_x32("raw_device_family", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_device_family);
- debugfs_create_x32("raw_device_number", 0400,
+ debugfs_create_x32("raw_device_number", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_device_num);
fallthrough;
case SOCINFO_VERSION(0, 11):
+ num_pmics = le32_to_cpu(info->num_pmics);
+ pmic_array_offset = le32_to_cpu(info->pmic_array_offset);
+ if (pmic_array_offset + 2 * num_pmics * sizeof(u32) <= info_size)
+ DEBUGFS_ADD(info, pmic_model_array);
+ fallthrough;
case SOCINFO_VERSION(0, 10):
case SOCINFO_VERSION(0, 9):
qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id);
- debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("foundry_id", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.foundry_id);
fallthrough;
case SOCINFO_VERSION(0, 8):
@@ -421,7 +478,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.hw_plat_subtype =
__le32_to_cpu(info->hw_plat_subtype);
- debugfs_create_u32("hardware_platform_subtype", 0400,
+ debugfs_create_u32("hardware_platform_subtype", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat_subtype);
fallthrough;
@@ -429,28 +486,28 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.accessory_chip =
__le32_to_cpu(info->accessory_chip);
- debugfs_create_u32("accessory_chip", 0400,
+ debugfs_create_u32("accessory_chip", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.accessory_chip);
fallthrough;
case SOCINFO_VERSION(0, 4):
qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
- debugfs_create_u32("platform_version", 0400,
+ debugfs_create_u32("platform_version", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.plat_ver);
fallthrough;
case SOCINFO_VERSION(0, 3):
qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
- debugfs_create_u32("hardware_platform", 0400,
+ debugfs_create_u32("hardware_platform", 0444,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat);
fallthrough;
case SOCINFO_VERSION(0, 2):
qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
- debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root,
+ debugfs_create_u32("raw_version", 0444, qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_ver);
fallthrough;
case SOCINFO_VERSION(0, 1):
@@ -467,11 +524,11 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
dentry = debugfs_create_dir(socinfo_image_names[i],
qcom_socinfo->dbg_root);
- debugfs_create_file("name", 0400, dentry, &versions[i],
+ debugfs_create_file("name", 0444, dentry, &versions[i],
&qcom_image_name_ops);
- debugfs_create_file("variant", 0400, dentry, &versions[i],
+ debugfs_create_file("variant", 0444, dentry, &versions[i],
&qcom_image_variant_ops);
- debugfs_create_file("oem", 0400, dentry, &versions[i],
+ debugfs_create_file("oem", 0444, dentry, &versions[i],
&qcom_image_oem_ops);
}
}
@@ -482,7 +539,7 @@ static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo)
}
#else
static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
- struct socinfo *info)
+ struct socinfo *info, size_t info_size)
{
}
static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { }
@@ -522,7 +579,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
if (IS_ERR(qs->soc_dev))
return PTR_ERR(qs->soc_dev);
- socinfo_debugfs_init(qs, info);
+ socinfo_debugfs_init(qs, info, item_size);
/* Feed the soc specific unique data into entropy pool */
add_device_randomness(info, item_size);
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 9b235fc90027..53387a72ca00 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/soc/renesas/rcar-sysc.h>
#include "rcar-sysc.h"
@@ -44,13 +45,13 @@
#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
-#define SYSCSR_RETRIES 100
+#define SYSCSR_TIMEOUT 100
#define SYSCSR_DELAY_US 1
#define PWRER_RETRIES 100
#define PWRER_DELAY_US 1
-#define SYSCISR_RETRIES 1000
+#define SYSCISR_TIMEOUT 1000
#define SYSCISR_DELAY_US 1
#define RCAR_PD_ALWAYS_ON 32 /* Always-on power area */
@@ -68,7 +69,8 @@ static u32 rcar_sysc_extmask_offs, rcar_sysc_extmask_val;
static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int sr_bit, reg_offs;
- int k;
+ u32 val;
+ int ret;
if (on) {
sr_bit = SYSCSR_PONENB;
@@ -79,13 +81,10 @@ static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
}
/* Wait until SYSC is ready to accept a power request */
- for (k = 0; k < SYSCSR_RETRIES; k++) {
- if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
- break;
- udelay(SYSCSR_DELAY_US);
- }
-
- if (k == SYSCSR_RETRIES)
+ ret = readl_poll_timeout_atomic(rcar_sysc_base + SYSCSR, val,
+ val & BIT(sr_bit), SYSCSR_DELAY_US,
+ SYSCSR_TIMEOUT);
+ if (ret)
return -EAGAIN;
/* Submit power shutoff or power resume request */
@@ -99,10 +98,9 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int isr_mask = BIT(sysc_ch->isr_bit);
unsigned int chan_mask = BIT(sysc_ch->chan_bit);
- unsigned int status;
+ unsigned int status, k;
unsigned long flags;
- int ret = 0;
- int k;
+ int ret;
spin_lock_irqsave(&rcar_sysc_lock, flags);
@@ -145,13 +143,10 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
}
/* Wait until the power shutoff or resume request has completed * */
- for (k = 0; k < SYSCISR_RETRIES; k++) {
- if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
- break;
- udelay(SYSCISR_DELAY_US);
- }
-
- if (k == SYSCISR_RETRIES)
+ ret = readl_poll_timeout_atomic(rcar_sysc_base + SYSCISR, status,
+ status & isr_mask, SYSCISR_DELAY_US,
+ SYSCISR_TIMEOUT);
+ if (ret)
ret = -EIO;
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
@@ -439,6 +434,8 @@ static int __init rcar_sysc_pd_init(void)
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+ if (!error)
+ of_node_set_flag(np, OF_POPULATED);
out_put:
of_node_put(np);
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index fc7f48a92288..5745d7e5908e 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -7,21 +7,19 @@ menuconfig SOC_SAMSUNG
if SOC_SAMSUNG
-config EXYNOS_ASV
- bool "Exynos Adaptive Supply Voltage support" if COMPILE_TEST
- depends on (ARCH_EXYNOS && EXYNOS_CHIPID) || COMPILE_TEST
- select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
-
# There is no need to enable these drivers for ARMv8
config EXYNOS_ASV_ARM
bool "Exynos ASV ARMv7-specific driver extensions" if COMPILE_TEST
- depends on EXYNOS_ASV
+ depends on EXYNOS_CHIPID
config EXYNOS_CHIPID
- bool "Exynos Chipid controller driver" if COMPILE_TEST
+ bool "Exynos ChipID controller and ASV driver" if COMPILE_TEST
depends on ARCH_EXYNOS || COMPILE_TEST
+ select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
select MFD_SYSCON
select SOC_BUS
+ help
+ Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 59e8e9453f27..0c523a8de4eb 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_EXYNOS_ASV) += exynos-asv.o
obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
-obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o
+obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 8abf4dfaa5c5..d60af8acc391 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -2,7 +2,9 @@
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
*
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
@@ -10,12 +12,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-chipid.h>
@@ -111,7 +108,7 @@ static int exynos_asv_update_opps(struct exynos_asv *asv)
return 0;
}
-static int exynos_asv_probe(struct platform_device *pdev)
+int exynos_asv_init(struct device *dev, struct regmap *regmap)
{
int (*probe_func)(struct exynos_asv *asv);
struct exynos_asv *asv;
@@ -119,39 +116,39 @@ static int exynos_asv_probe(struct platform_device *pdev)
u32 product_id = 0;
int ret, i;
- cpu_dev = get_cpu_device(0);
- ret = dev_pm_opp_get_opp_count(cpu_dev);
- if (ret < 0)
- return -EPROBE_DEFER;
-
- asv = devm_kzalloc(&pdev->dev, sizeof(*asv), GFP_KERNEL);
+ asv = devm_kzalloc(dev, sizeof(*asv), GFP_KERNEL);
if (!asv)
return -ENOMEM;
- asv->chipid_regmap = device_node_to_regmap(pdev->dev.of_node);
- if (IS_ERR(asv->chipid_regmap)) {
- dev_err(&pdev->dev, "Could not find syscon regmap\n");
- return PTR_ERR(asv->chipid_regmap);
+ asv->chipid_regmap = regmap;
+ asv->dev = dev;
+ ret = regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID,
+ &product_id);
+ if (ret < 0) {
+ dev_err(dev, "Cannot read revision from ChipID: %d\n", ret);
+ return -ENODEV;
}
- regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
-
switch (product_id & EXYNOS_MASK) {
case 0xE5422000:
probe_func = exynos5422_asv_init;
break;
default:
- return -ENODEV;
+ dev_dbg(dev, "No ASV support for this SoC\n");
+ devm_kfree(dev, asv);
+ return 0;
}
- ret = of_property_read_u32(pdev->dev.of_node, "samsung,asv-bin",
+ cpu_dev = get_cpu_device(0);
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+ ret = of_property_read_u32(dev->of_node, "samsung,asv-bin",
&asv->of_bin);
if (ret < 0)
asv->of_bin = -EINVAL;
- asv->dev = &pdev->dev;
- dev_set_drvdata(&pdev->dev, asv);
-
for (i = 0; i < ARRAY_SIZE(asv->subsys); i++)
asv->subsys[i].asv = asv;
@@ -161,17 +158,3 @@ static int exynos_asv_probe(struct platform_device *pdev)
return exynos_asv_update_opps(asv);
}
-
-static const struct of_device_id exynos_asv_of_device_ids[] = {
- { .compatible = "samsung,exynos4210-chipid" },
- {}
-};
-
-static struct platform_driver exynos_asv_driver = {
- .driver = {
- .name = "exynos-asv",
- .of_match_table = exynos_asv_of_device_ids,
- },
- .probe = exynos_asv_probe,
-};
-module_platform_driver(exynos_asv_driver);
diff --git a/drivers/soc/samsung/exynos-asv.h b/drivers/soc/samsung/exynos-asv.h
index 3fd1f2acd999..dcbe154db31e 100644
--- a/drivers/soc/samsung/exynos-asv.h
+++ b/drivers/soc/samsung/exynos-asv.h
@@ -68,4 +68,6 @@ static inline u32 exynos_asv_opp_get_frequency(const struct exynos_asv_subsys *s
return __asv_get_table_entry(&subsys->table, level, 0);
}
+int exynos_asv_init(struct device *dev, struct regmap *regmap);
+
#endif /* __LINUX_SOC_EXYNOS_ASV_H */
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 1a76eade2ed6..5c1d0f97f766 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -2,20 +2,28 @@
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
*
* Exynos - CHIP ID support
* Author: Pankaj Dubey <pankaj.dubey@samsung.com>
* Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
-#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/soc/samsung/exynos-chipid.h>
#include <linux/sys_soc.h>
+#include "exynos-asv.h"
+
static const struct exynos_soc_id {
const char *name;
unsigned int id;
@@ -36,7 +44,7 @@ static const struct exynos_soc_id {
{ "EXYNOS7420", 0xE7420000 },
};
-static const char * __init product_id_to_soc_id(unsigned int product_id)
+static const char *product_id_to_soc_id(unsigned int product_id)
{
int i;
@@ -46,25 +54,17 @@ static const char * __init product_id_to_soc_id(unsigned int product_id)
return NULL;
}
-static int __init exynos_chipid_early_init(void)
+static int exynos_chipid_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *root;
- struct device_node *syscon;
struct regmap *regmap;
u32 product_id;
u32 revision;
int ret;
- syscon = of_find_compatible_node(NULL, NULL,
- "samsung,exynos4210-chipid");
- if (!syscon)
- return -ENODEV;
-
- regmap = device_node_to_regmap(syscon);
- of_node_put(syscon);
-
+ regmap = device_node_to_regmap(pdev->dev.of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -74,7 +74,8 @@ static int __init exynos_chipid_early_init(void)
revision = product_id & EXYNOS_REV_MASK;
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -84,20 +85,24 @@ static int __init exynos_chipid_early_init(void)
of_property_read_string(root, "model", &soc_dev_attr->machine);
of_node_put(root);
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x", revision);
+ soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%x", revision);
soc_dev_attr->soc_id = product_id_to_soc_id(product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/* please note that the actual registration will be deferred */
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ ret = exynos_asv_init(&pdev->dev, regmap);
+ if (ret)
goto err;
- }
+
+ platform_set_drvdata(pdev, soc_dev);
dev_info(soc_device_to_device(soc_dev),
"Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
@@ -106,9 +111,31 @@ static int __init exynos_chipid_early_init(void)
return 0;
err:
- kfree(soc_dev_attr->revision);
- kfree(soc_dev_attr);
+ soc_device_unregister(soc_dev);
+
return ret;
}
-arch_initcall(exynos_chipid_early_init);
+static int exynos_chipid_remove(struct platform_device *pdev)
+{
+ struct soc_device *soc_dev = platform_get_drvdata(pdev);
+
+ soc_device_unregister(soc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_chipid_of_device_ids[] = {
+ { .compatible = "samsung,exynos4210-chipid" },
+ {}
+};
+
+static struct platform_driver exynos_chipid_driver = {
+ .driver = {
+ .name = "exynos-chipid",
+ .of_match_table = exynos_chipid_of_device_ids,
+ },
+ .probe = exynos_chipid_probe,
+ .remove = exynos_chipid_remove,
+};
+builtin_platform_driver(exynos_chipid_driver);
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index ab8582971bfc..5ec0c13f0aaf 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/sched.h>
+#include <linux/pm_runtime.h>
struct exynos_pm_domain_config {
/* Value for LOCAL_PWR_CFG and STATUS fields for each domain */
@@ -73,15 +73,15 @@ static int exynos_pd_power_off(struct generic_pm_domain *domain)
return exynos_pd_power(domain, false);
}
-static const struct exynos_pm_domain_config exynos4210_cfg __initconst = {
+static const struct exynos_pm_domain_config exynos4210_cfg = {
.local_pwr_cfg = 0x7,
};
-static const struct exynos_pm_domain_config exynos5433_cfg __initconst = {
+static const struct exynos_pm_domain_config exynos5433_cfg = {
.local_pwr_cfg = 0xf,
};
-static const struct of_device_id exynos_pm_domain_of_match[] __initconst = {
+static const struct of_device_id exynos_pm_domain_of_match[] = {
{
.compatible = "samsung,exynos4210-pd",
.data = &exynos4210_cfg,
@@ -92,7 +92,7 @@ static const struct of_device_id exynos_pm_domain_of_match[] __initconst = {
{ },
};
-static __init const char *exynos_get_domain_name(struct device_node *node)
+static const char *exynos_get_domain_name(struct device_node *node)
{
const char *name;
@@ -101,60 +101,44 @@ static __init const char *exynos_get_domain_name(struct device_node *node)
return kstrdup_const(name, GFP_KERNEL);
}
-static __init int exynos4_pm_init_power_domain(void)
+static int exynos_pd_probe(struct platform_device *pdev)
{
- struct device_node *np;
- const struct of_device_id *match;
-
- for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) {
- const struct exynos_pm_domain_config *pm_domain_cfg;
- struct exynos_pm_domain *pd;
- int on;
+ const struct exynos_pm_domain_config *pm_domain_cfg;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args child, parent;
+ struct exynos_pm_domain *pd;
+ int on, ret;
- pm_domain_cfg = match->data;
+ pm_domain_cfg = of_device_get_match_data(dev);
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- of_node_put(np);
- return -ENOMEM;
- }
- pd->pd.name = exynos_get_domain_name(np);
- if (!pd->pd.name) {
- kfree(pd);
- of_node_put(np);
- return -ENOMEM;
- }
+ pd->pd.name = exynos_get_domain_name(np);
+ if (!pd->pd.name)
+ return -ENOMEM;
- pd->base = of_iomap(np, 0);
- if (!pd->base) {
- pr_warn("%s: failed to map memory\n", __func__);
- kfree_const(pd->pd.name);
- kfree(pd);
- continue;
- }
-
- pd->pd.power_off = exynos_pd_power_off;
- pd->pd.power_on = exynos_pd_power_on;
- pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+ pd->base = of_iomap(np, 0);
+ if (!pd->base) {
+ kfree_const(pd->pd.name);
+ return -ENODEV;
+ }
- on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
+ pd->pd.power_off = exynos_pd_power_off;
+ pd->pd.power_on = exynos_pd_power_on;
+ pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
- pm_genpd_init(&pd->pd, NULL, !on);
- of_genpd_add_provider_simple(np, &pd->pd);
- }
+ on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
- /* Assign the child power domains to their parents */
- for_each_matching_node(np, exynos_pm_domain_of_match) {
- struct of_phandle_args child, parent;
+ pm_genpd_init(&pd->pd, NULL, !on);
+ ret = of_genpd_add_provider_simple(np, &pd->pd);
+ if (ret == 0 && of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &parent) == 0) {
child.np = np;
child.args_count = 0;
- if (of_parse_phandle_with_args(np, "power-domains",
- "#power-domain-cells", 0,
- &parent) != 0)
- continue;
-
if (of_genpd_add_subdomain(&parent, &child))
pr_warn("%pOF failed to add subdomain: %pOF\n",
parent.np, child.np);
@@ -163,6 +147,21 @@ static __init int exynos4_pm_init_power_domain(void)
parent.np, child.np);
}
- return 0;
+ pm_runtime_enable(dev);
+ return ret;
+}
+
+static struct platform_driver exynos_pd_driver = {
+ .probe = exynos_pd_probe,
+ .driver = {
+ .name = "exynos-pd",
+ .of_match_table = exynos_pm_domain_of_match,
+ .suppress_bind_attrs = true,
+ }
+};
+
+static __init int exynos4_pm_init_power_domain(void)
+{
+ return platform_driver_register(&exynos_pd_driver);
}
core_initcall(exynos4_pm_init_power_domain);
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
index 44d7e1951da3..59640a1d0b28 100644
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -17,6 +17,10 @@
#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+#define SIFIVE_L2_DIRECCFAIL_LOW 0x120
+#define SIFIVE_L2_DIRECCFAIL_HIGH 0x124
+#define SIFIVE_L2_DIRECCFAIL_COUNT 0x128
+
#define SIFIVE_L2_DATECCFIX_LOW 0x140
#define SIFIVE_L2_DATECCFIX_HIGH 0x144
#define SIFIVE_L2_DATECCFIX_COUNT 0x148
@@ -29,7 +33,7 @@
#define SIFIVE_L2_WAYENABLE 0x08
#define SIFIVE_L2_ECCINJECTERR 0x40
-#define SIFIVE_L2_MAX_ECCINTR 3
+#define SIFIVE_L2_MAX_ECCINTR 4
static void __iomem *l2_base;
static int g_irq[SIFIVE_L2_MAX_ECCINTR];
@@ -39,6 +43,7 @@ enum {
DIR_CORR = 0,
DATA_CORR,
DATA_UNCORR,
+ DIR_UNCORR,
};
#ifdef CONFIG_DEBUG_FS
@@ -93,6 +98,7 @@ static void l2_config_read(void)
static const struct of_device_id sifive_l2_ids[] = {
{ .compatible = "sifive,fu540-c000-ccache" },
+ { .compatible = "sifive,fu740-c000-ccache" },
{ /* end of table */ },
};
@@ -155,6 +161,15 @@ static irqreturn_t l2_int_handler(int irq, void *device)
atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
"DirECCFix");
}
+ if (irq == g_irq[DIR_UNCORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DIRECCFAIL_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DIRECCFAIL_LOW);
+ /* Reading this register clears the DirFail interrupt sig */
+ readl(l2_base + SIFIVE_L2_DIRECCFAIL_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+ "DirECCFail");
+ panic("L2CACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
+ }
if (irq == g_irq[DATA_CORR]) {
add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
@@ -181,7 +196,7 @@ static int __init sifive_l2_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc;
+ int i, rc, intr_num;
np = of_find_matching_node(NULL, sifive_l2_ids);
if (!np)
@@ -194,7 +209,13 @@ static int __init sifive_l2_init(void)
if (!l2_base)
return -ENOMEM;
- for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+ intr_num = of_property_count_u32_elems(np, "interrupts");
+ if (!intr_num) {
+ pr_err("L2CACHE: no interrupts property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < intr_num; i++) {
g_irq[i] = irq_of_parse_and_map(np, i);
rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
if (rc) {
diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c
index e9925c8487d7..d90e4a264b6f 100644
--- a/drivers/soc/sunxi/sunxi_mbus.c
+++ b/drivers/soc/sunxi/sunxi_mbus.c
@@ -23,12 +23,7 @@ static const char * const sunxi_mbus_devices[] = {
"allwinner,sun7i-a20-display-engine",
"allwinner,sun8i-a23-display-engine",
"allwinner,sun8i-a33-display-engine",
- "allwinner,sun8i-a83t-display-engine",
- "allwinner,sun8i-h3-display-engine",
- "allwinner,sun8i-r40-display-engine",
- "allwinner,sun8i-v3s-display-engine",
"allwinner,sun9i-a80-display-engine",
- "allwinner,sun50i-a64-display-engine",
/*
* And now we have the regular devices connected to the MBUS
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index d4c7bd59429e..42833e33a96c 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -283,7 +283,7 @@ int sunxi_sram_release(struct device *dev)
EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
- bool has_emac_clock;
+ int num_emac_clocks;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -291,20 +291,31 @@ static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
};
static const struct sunxi_sramc_variant sun8i_h3_sramc_variant = {
- .has_emac_clock = true,
+ .num_emac_clocks = 1,
};
static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
- .has_emac_clock = true,
+ .num_emac_clocks = 1,
+};
+
+static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
+ .num_emac_clocks = 2,
};
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
unsigned int reg)
{
- if (reg == SUNXI_SRAM_EMAC_CLOCK_REG)
- return true;
- return false;
+ const struct sunxi_sramc_variant *variant;
+
+ variant = of_device_get_match_data(dev);
+
+ if (reg < SUNXI_SRAM_EMAC_CLOCK_REG)
+ return false;
+ if (reg > SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
+ return false;
+
+ return true;
}
static struct regmap_config sunxi_sram_emac_clock_regmap = {
@@ -312,7 +323,7 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
.val_bits = 32,
.reg_stride = 4,
/* last defined register */
- .max_register = SUNXI_SRAM_EMAC_CLOCK_REG,
+ .max_register = SUNXI_SRAM_EMAC_CLOCK_REG + 4,
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
@@ -343,7 +354,7 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!d)
return -ENOMEM;
- if (variant->has_emac_clock) {
+ if (variant->num_emac_clocks > 0) {
emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
&sunxi_sram_emac_clock_regmap);
@@ -387,6 +398,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.compatible = "allwinner,sun50i-h5-system-control",
.data = &sun50i_a64_sramc_variant,
},
+ {
+ .compatible = "allwinner,sun50i-h616-system-control",
+ .data = &sun50i_h616_sramc_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index b495b0d5d0fa..312ba0f98ad7 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
@@ -1517,15 +1518,13 @@ EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
static int k3_ringacc_probe(struct platform_device *pdev)
{
const struct ringacc_match_data *match_data;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct k3_ringacc *ringacc;
int ret;
- match = of_match_node(k3_ringacc_of_match, dev->of_node);
- if (!match)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
- match_data = match->data;
ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
if (!ringacc)
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 7b5cb5d48f7d..591d14ebcb11 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -758,6 +758,7 @@ static int knav_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
ret = dma_init(node, child);
if (ret) {
+ of_node_put(child);
dev_err(&pdev->dev, "init failed with %d\n", ret);
break;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 2e521f1eda96..2ac3856b8d42 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1087,6 +1087,7 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
for_each_child_of_node(regions, child) {
region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating region\n");
return -ENOMEM;
}
@@ -1399,6 +1400,7 @@ static int knav_queue_init_qmgrs(struct knav_device *kdev,
for_each_child_of_node(qmgrs, child) {
qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
if (!qmgr) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating qmgr\n");
return -ENOMEM;
}
@@ -1498,6 +1500,7 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
for_each_child_of_node(pdsps, child) {
pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
if (!pdsp) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating pdsp\n");
return -ENOMEM;
}
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 77f0051358f1..bf1468e5bccb 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -860,6 +860,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
+ u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
@@ -907,6 +908,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
map++;
}
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 64f3e3105540..7bab4bbaf02d 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -535,7 +535,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
ret = am33xx_push_sram_idle();
if (ret)
- goto err_free_sram;
+ goto err_unsetup_rtc;
am33xx_pm_set_ipc_ops();
@@ -575,6 +575,9 @@ err_pm_runtime_put:
err_pm_runtime_disable:
pm_runtime_disable(dev);
wkup_m3_ipc_put(m3_ipc);
+err_unsetup_rtc:
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
err_free_sram:
am33xx_pm_free_sram();
pm33xx_dev = NULL;
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 5d6e7132a5c4..f22ac1edbdd0 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -161,6 +161,53 @@ static struct regmap_config regmap_conf = {
.reg_stride = 4,
};
+static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct resource res;
+ int ret;
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret)
+ dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+
+node_put:
+ of_node_put(child);
+ return ret;
+}
+
static int pruss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -239,56 +286,18 @@ static int pruss_probe(struct platform_device *pdev)
goto rpm_disable;
}
- child = of_get_child_by_name(np, "cfg");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
- ret = -ENODEV;
+ ret = pruss_cfg_of_init(dev, pruss);
+ if (ret < 0)
goto rpm_put;
- }
-
- if (of_address_to_resource(child, 0, &res)) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!pruss->cfg_base) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
- (u64)res.start);
- regmap_conf.max_register = resource_size(&res) - 4;
-
- pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
- &regmap_conf);
- kfree(regmap_conf.name);
- if (IS_ERR(pruss->cfg_regmap)) {
- dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
- PTR_ERR(pruss->cfg_regmap));
- ret = PTR_ERR(pruss->cfg_regmap);
- goto node_put;
- }
-
- ret = pruss_clk_init(pruss, child);
- if (ret) {
- dev_err(dev, "failed to setup coreclk-mux\n");
- goto node_put;
- }
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "failed to register child devices\n");
- goto node_put;
+ goto rpm_put;
}
- of_node_put(child);
-
return 0;
-node_put:
- of_node_put(child);
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 0b1708dae361..53af9115dc31 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -1,23 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
menu "Xilinx SoC drivers"
-config XILINX_VCU
- tristate "Xilinx VCU logicoreIP Init"
- depends on HAS_IOMEM
- select REGMAP_MMIO
- help
- Provides the driver to enable and disable the isolation between the
- processing system and programmable logic part by using the logicoreIP
- register set. This driver also configures the frequency based on the
- clock information from the logicoreIP register set.
-
- If you say yes here you get support for the logicoreIP.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called xlnx_vcu.
-
config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
depends on PM && ZYNQMP_FIRMWARE
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index f66bfea5de17..9854e6f6086b 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
deleted file mode 100644
index 14daad4efc58..000000000000
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ /dev/null
@@ -1,628 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx VCU Init
- *
- * Copyright (C) 2016 - 2017 Xilinx, Inc.
- *
- * Contacts Dhaval Shah <dshah@xilinx.com>
- */
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/xlnx-vcu.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-/* vcu slcr registers, bitmask and shift */
-#define VCU_PLL_CTRL 0x24
-#define VCU_PLL_CTRL_RESET_MASK 0x01
-#define VCU_PLL_CTRL_RESET_SHIFT 0
-#define VCU_PLL_CTRL_BYPASS_MASK 0x01
-#define VCU_PLL_CTRL_BYPASS_SHIFT 3
-#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
-#define VCU_PLL_CTRL_FBDIV_SHIFT 8
-#define VCU_PLL_CTRL_POR_IN_MASK 0x01
-#define VCU_PLL_CTRL_POR_IN_SHIFT 1
-#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
-#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
-#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
-#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
-#define VCU_PLL_CTRL_DEFAULT 0
-#define VCU_PLL_DIV2 2
-
-#define VCU_PLL_CFG 0x28
-#define VCU_PLL_CFG_RES_MASK 0x0f
-#define VCU_PLL_CFG_RES_SHIFT 0
-#define VCU_PLL_CFG_CP_MASK 0x0f
-#define VCU_PLL_CFG_CP_SHIFT 5
-#define VCU_PLL_CFG_LFHF_MASK 0x03
-#define VCU_PLL_CFG_LFHF_SHIFT 10
-#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
-#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
-#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
-#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
-#define VCU_ENC_CORE_CTRL 0x30
-#define VCU_ENC_MCU_CTRL 0x34
-#define VCU_DEC_CORE_CTRL 0x38
-#define VCU_DEC_MCU_CTRL 0x3c
-#define VCU_PLL_DIVISOR_MASK 0x3f
-#define VCU_PLL_DIVISOR_SHIFT 4
-#define VCU_SRCSEL_MASK 0x01
-#define VCU_SRCSEL_SHIFT 0
-#define VCU_SRCSEL_PLL 1
-
-#define VCU_PLL_STATUS 0x60
-#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
-
-#define MHZ 1000000
-#define FVCO_MIN (1500U * MHZ)
-#define FVCO_MAX (3000U * MHZ)
-#define DIVISOR_MIN 0
-#define DIVISOR_MAX 63
-#define FRAC 100
-#define LIMIT (10 * MHZ)
-
-/**
- * struct xvcu_device - Xilinx VCU init device structure
- * @dev: Platform device
- * @pll_ref: pll ref clock source
- * @aclk: axi clock source
- * @logicore_reg_ba: logicore reg base address
- * @vcu_slcr_ba: vcu_slcr Register base address
- * @coreclk: core clock frequency
- */
-struct xvcu_device {
- struct device *dev;
- struct clk *pll_ref;
- struct clk *aclk;
- struct regmap *logicore_reg_ba;
- void __iomem *vcu_slcr_ba;
- u32 coreclk;
-};
-
-static struct regmap_config vcu_settings_regmap_config = {
- .name = "regmap",
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0xfff,
- .cache_type = REGCACHE_NONE,
-};
-
-/**
- * struct xvcu_pll_cfg - Helper data
- * @fbdiv: The integer portion of the feedback divider to the PLL
- * @cp: PLL charge pump control
- * @res: PLL loop filter resistor control
- * @lfhf: PLL loop filter high frequency capacitor control
- * @lock_dly: Lock circuit configuration settings for lock windowsize
- * @lock_cnt: Lock circuit counter setting
- */
-struct xvcu_pll_cfg {
- u32 fbdiv;
- u32 cp;
- u32 res;
- u32 lfhf;
- u32 lock_dly;
- u32 lock_cnt;
-};
-
-static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
- { 25, 3, 10, 3, 63, 1000 },
- { 26, 3, 10, 3, 63, 1000 },
- { 27, 4, 6, 3, 63, 1000 },
- { 28, 4, 6, 3, 63, 1000 },
- { 29, 4, 6, 3, 63, 1000 },
- { 30, 4, 6, 3, 63, 1000 },
- { 31, 6, 1, 3, 63, 1000 },
- { 32, 6, 1, 3, 63, 1000 },
- { 33, 4, 10, 3, 63, 1000 },
- { 34, 5, 6, 3, 63, 1000 },
- { 35, 5, 6, 3, 63, 1000 },
- { 36, 5, 6, 3, 63, 1000 },
- { 37, 5, 6, 3, 63, 1000 },
- { 38, 5, 6, 3, 63, 975 },
- { 39, 3, 12, 3, 63, 950 },
- { 40, 3, 12, 3, 63, 925 },
- { 41, 3, 12, 3, 63, 900 },
- { 42, 3, 12, 3, 63, 875 },
- { 43, 3, 12, 3, 63, 850 },
- { 44, 3, 12, 3, 63, 850 },
- { 45, 3, 12, 3, 63, 825 },
- { 46, 3, 12, 3, 63, 800 },
- { 47, 3, 12, 3, 63, 775 },
- { 48, 3, 12, 3, 63, 775 },
- { 49, 3, 12, 3, 63, 750 },
- { 50, 3, 12, 3, 63, 750 },
- { 51, 3, 2, 3, 63, 725 },
- { 52, 3, 2, 3, 63, 700 },
- { 53, 3, 2, 3, 63, 700 },
- { 54, 3, 2, 3, 63, 675 },
- { 55, 3, 2, 3, 63, 675 },
- { 56, 3, 2, 3, 63, 650 },
- { 57, 3, 2, 3, 63, 650 },
- { 58, 3, 2, 3, 63, 625 },
- { 59, 3, 2, 3, 63, 625 },
- { 60, 3, 2, 3, 63, 625 },
- { 61, 3, 2, 3, 63, 600 },
- { 62, 3, 2, 3, 63, 600 },
- { 63, 3, 2, 3, 63, 600 },
- { 64, 3, 2, 3, 63, 600 },
- { 65, 3, 2, 3, 63, 600 },
- { 66, 3, 2, 3, 63, 600 },
- { 67, 3, 2, 3, 63, 600 },
- { 68, 3, 2, 3, 63, 600 },
- { 69, 3, 2, 3, 63, 600 },
- { 70, 3, 2, 3, 63, 600 },
- { 71, 3, 2, 3, 63, 600 },
- { 72, 3, 2, 3, 63, 600 },
- { 73, 3, 2, 3, 63, 600 },
- { 74, 3, 2, 3, 63, 600 },
- { 75, 3, 2, 3, 63, 600 },
- { 76, 3, 2, 3, 63, 600 },
- { 77, 3, 2, 3, 63, 600 },
- { 78, 3, 2, 3, 63, 600 },
- { 79, 3, 2, 3, 63, 600 },
- { 80, 3, 2, 3, 63, 600 },
- { 81, 3, 2, 3, 63, 600 },
- { 82, 3, 2, 3, 63, 600 },
- { 83, 4, 2, 3, 63, 600 },
- { 84, 4, 2, 3, 63, 600 },
- { 85, 4, 2, 3, 63, 600 },
- { 86, 4, 2, 3, 63, 600 },
- { 87, 4, 2, 3, 63, 600 },
- { 88, 4, 2, 3, 63, 600 },
- { 89, 4, 2, 3, 63, 600 },
- { 90, 4, 2, 3, 63, 600 },
- { 91, 4, 2, 3, 63, 600 },
- { 92, 4, 2, 3, 63, 600 },
- { 93, 4, 2, 3, 63, 600 },
- { 94, 4, 2, 3, 63, 600 },
- { 95, 4, 2, 3, 63, 600 },
- { 96, 4, 2, 3, 63, 600 },
- { 97, 4, 2, 3, 63, 600 },
- { 98, 4, 2, 3, 63, 600 },
- { 99, 4, 2, 3, 63, 600 },
- { 100, 4, 2, 3, 63, 600 },
- { 101, 4, 2, 3, 63, 600 },
- { 102, 4, 2, 3, 63, 600 },
- { 103, 5, 2, 3, 63, 600 },
- { 104, 5, 2, 3, 63, 600 },
- { 105, 5, 2, 3, 63, 600 },
- { 106, 5, 2, 3, 63, 600 },
- { 107, 3, 4, 3, 63, 600 },
- { 108, 3, 4, 3, 63, 600 },
- { 109, 3, 4, 3, 63, 600 },
- { 110, 3, 4, 3, 63, 600 },
- { 111, 3, 4, 3, 63, 600 },
- { 112, 3, 4, 3, 63, 600 },
- { 113, 3, 4, 3, 63, 600 },
- { 114, 3, 4, 3, 63, 600 },
- { 115, 3, 4, 3, 63, 600 },
- { 116, 3, 4, 3, 63, 600 },
- { 117, 3, 4, 3, 63, 600 },
- { 118, 3, 4, 3, 63, 600 },
- { 119, 3, 4, 3, 63, 600 },
- { 120, 3, 4, 3, 63, 600 },
- { 121, 3, 4, 3, 63, 600 },
- { 122, 3, 4, 3, 63, 600 },
- { 123, 3, 4, 3, 63, 600 },
- { 124, 3, 4, 3, 63, 600 },
- { 125, 3, 4, 3, 63, 600 },
-};
-
-/**
- * xvcu_read - Read from the VCU register space
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- *
- * Return: Returns 32bit value from VCU register specified
- *
- */
-static inline u32 xvcu_read(void __iomem *iomem, u32 offset)
-{
- return ioread32(iomem + offset);
-}
-
-/**
- * xvcu_write - Write to the VCU register space
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- * @value: Value to write
- */
-static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
-{
- iowrite32(value, iomem + offset);
-}
-
-/**
- * xvcu_write_field_reg - Write to the vcu reg field
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- * @field: vcu reg field to write to
- * @mask: vcu reg mask
- * @shift: vcu reg number of bits to shift the bitfield
- */
-static void xvcu_write_field_reg(void __iomem *iomem, int offset,
- u32 field, u32 mask, int shift)
-{
- u32 val = xvcu_read(iomem, offset);
-
- val &= ~(mask << shift);
- val |= (field & mask) << shift;
-
- xvcu_write(iomem, offset, val);
-}
-
-/**
- * xvcu_set_vcu_pll_info - Set the VCU PLL info
- * @xvcu: Pointer to the xvcu_device structure
- *
- * Programming the VCU PLL based on the user configuration
- * (ref clock freq, core clock freq, mcu clock freq).
- * Core clock frequency has higher priority than mcu clock frequency
- * Errors in following cases
- * - When mcu or clock clock get from logicoreIP is 0
- * - When VCU PLL DIV related bits value other than 1
- * - When proper data not found for given data
- * - When sis570_1 clocksource related operation failed
- *
- * Return: Returns status, either success or error+reason
- */
-static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
-{
- u32 refclk, coreclk, mcuclk, inte, deci;
- u32 divisor_mcu, divisor_core, fvco;
- u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
- u32 cfg_val, mod, ctrl;
- int ret, i;
- const struct xvcu_pll_cfg *found = NULL;
-
- regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK, &inte);
- regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC, &deci);
- regmap_read(xvcu->logicore_reg_ba, VCU_CORE_CLK, &coreclk);
- coreclk *= MHZ;
- regmap_read(xvcu->logicore_reg_ba, VCU_MCU_CLK, &mcuclk);
- mcuclk *= MHZ;
- if (!mcuclk || !coreclk) {
- dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
- return -EINVAL;
- }
-
- refclk = (inte * MHZ) + (deci * (MHZ / FRAC));
- dev_dbg(xvcu->dev, "Ref clock from logicoreIP is %uHz\n", refclk);
- dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
- dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
-
- clk_disable_unprepare(xvcu->pll_ref);
- ret = clk_set_rate(xvcu->pll_ref, refclk);
- if (ret)
- dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
-
- ret = clk_prepare_enable(xvcu->pll_ref);
- if (ret) {
- dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
- return ret;
- }
-
- refclk = clk_get_rate(xvcu->pll_ref);
-
- /*
- * The divide-by-2 should be always enabled (==1)
- * to meet the timing in the design.
- * Otherwise, it's an error
- */
- vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
- clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
- clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
- if (clkoutdiv != 1) {
- dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
- return -EINVAL;
- }
-
- for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
- const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
-
- fvco = cfg->fbdiv * refclk;
- if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
- pll_clk = fvco / VCU_PLL_DIV2;
- if (fvco % VCU_PLL_DIV2 != 0)
- pll_clk++;
- mod = pll_clk % coreclk;
- if (mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- } else if (coreclk - mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- divisor_core++;
- } else {
- continue;
- }
- if (divisor_core >= DIVISOR_MIN &&
- divisor_core <= DIVISOR_MAX) {
- found = cfg;
- divisor_mcu = pll_clk / mcuclk;
- mod = pll_clk % mcuclk;
- if (mcuclk - mod < LIMIT)
- divisor_mcu++;
- break;
- }
- }
- }
-
- if (!found) {
- dev_err(xvcu->dev, "Invalid clock combination.\n");
- return -EINVAL;
- }
-
- xvcu->coreclk = pll_clk / divisor_core;
- mcuclk = pll_clk / divisor_mcu;
- dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
- dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
- dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
-
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
- vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
- VCU_PLL_CTRL_FBDIV_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
- VCU_PLL_CTRL_POR_IN_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
- VCU_PLL_CTRL_POR_IN_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
- VCU_PLL_CTRL_PWR_POR_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
- VCU_PLL_CTRL_PWR_POR_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
-
- /* Set divisor for the core and mcu clock */
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
-
- /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
- cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
- (found->cp << VCU_PLL_CFG_CP_SHIFT) |
- (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
- (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
- (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
-
- return 0;
-}
-
-/**
- * xvcu_set_pll - PLL init sequence
- * @xvcu: Pointer to the xvcu_device structure
- *
- * Call the api to set the PLL info and once that is done then
- * init the PLL sequence to make the PLL stable.
- *
- * Return: Returns status, either success or error+reason
- */
-static int xvcu_set_pll(struct xvcu_device *xvcu)
-{
- u32 lock_status;
- unsigned long timeout;
- int ret;
-
- ret = xvcu_set_vcu_pll_info(xvcu);
- if (ret) {
- dev_err(xvcu->dev, "failed to set pll info\n");
- return ret;
- }
-
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- /*
- * Defined the timeout for the max time to wait the
- * PLL_STATUS to be locked.
- */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
- if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- return 0;
- }
- } while (!time_after(jiffies, timeout));
-
- /* PLL is not locked even after the timeout of the 2sec */
- dev_err(xvcu->dev, "PLL is not locked\n");
- return -ETIMEDOUT;
-}
-
-/**
- * xvcu_probe - Probe existence of the logicoreIP
- * and initialize PLL
- *
- * @pdev: Pointer to the platform_device structure
- *
- * Return: Returns 0 on success
- * Negative error code otherwise
- */
-static int xvcu_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct xvcu_device *xvcu;
- void __iomem *regs;
- int ret;
-
- xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
- if (!xvcu)
- return -ENOMEM;
-
- xvcu->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
- if (!res) {
- dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
- return -ENODEV;
- }
-
- xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->vcu_slcr_ba) {
- dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
- return -ENOMEM;
- }
-
- xvcu->logicore_reg_ba =
- syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
- if (IS_ERR(xvcu->logicore_reg_ba)) {
- dev_info(&pdev->dev,
- "could not find xlnx,vcu-settings: trying direct register access\n");
-
- res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
- }
-
- regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!regs) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
- }
-
- xvcu->logicore_reg_ba =
- devm_regmap_init_mmio(&pdev->dev, regs,
- &vcu_settings_regmap_config);
- if (IS_ERR(xvcu->logicore_reg_ba)) {
- dev_err(&pdev->dev, "failed to init regmap\n");
- return PTR_ERR(xvcu->logicore_reg_ba);
- }
- }
-
- xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(xvcu->aclk)) {
- dev_err(&pdev->dev, "Could not get aclk clock\n");
- return PTR_ERR(xvcu->aclk);
- }
-
- xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- if (IS_ERR(xvcu->pll_ref)) {
- dev_err(&pdev->dev, "Could not get pll_ref clock\n");
- return PTR_ERR(xvcu->pll_ref);
- }
-
- ret = clk_prepare_enable(xvcu->aclk);
- if (ret) {
- dev_err(&pdev->dev, "aclk clock enable failed\n");
- return ret;
- }
-
- ret = clk_prepare_enable(xvcu->pll_ref);
- if (ret) {
- dev_err(&pdev->dev, "pll_ref clock enable failed\n");
- goto error_aclk;
- }
-
- /*
- * Do the Gasket isolation and put the VCU out of reset
- * Bit 0 : Gasket isolation
- * Bit 1 : put VCU out of reset
- */
- regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
-
- /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
- ret = xvcu_set_pll(xvcu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the pll\n");
- goto error_pll_ref;
- }
-
- dev_set_drvdata(&pdev->dev, xvcu);
-
- return 0;
-
-error_pll_ref:
- clk_disable_unprepare(xvcu->pll_ref);
-error_aclk:
- clk_disable_unprepare(xvcu->aclk);
- return ret;
-}
-
-/**
- * xvcu_remove - Insert gasket isolation
- * and disable the clock
- * @pdev: Pointer to the platform_device structure
- *
- * Return: Returns 0 on success
- * Negative error code otherwise
- */
-static int xvcu_remove(struct platform_device *pdev)
-{
- struct xvcu_device *xvcu;
-
- xvcu = platform_get_drvdata(pdev);
- if (!xvcu)
- return -ENODEV;
-
- /* Add the the Gasket isolation and put the VCU in reset. */
- regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
-
- clk_disable_unprepare(xvcu->pll_ref);
- clk_disable_unprepare(xvcu->aclk);
-
- return 0;
-}
-
-static const struct of_device_id xvcu_of_id_table[] = {
- { .compatible = "xlnx,vcu" },
- { .compatible = "xlnx,vcu-logicoreip-1.0" },
- { }
-};
-MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
-
-static struct platform_driver xvcu_driver = {
- .driver = {
- .name = "xilinx-vcu",
- .of_match_table = xvcu_of_id_table,
- },
- .probe = xvcu_probe,
- .remove = xvcu_remove,
-};
-
-module_platform_driver(xvcu_driver);
-
-MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
-MODULE_DESCRIPTION("Xilinx VCU init Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig
deleted file mode 100644
index 1cf1938da541..000000000000
--- a/drivers/soc/zte/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ZTE SoC drivers
-#
-menuconfig SOC_ZTE
- depends on ARCH_ZX || COMPILE_TEST
- bool "ZTE SoC driver support"
-
-if SOC_ZTE
-
-config ZX2967_PM_DOMAINS
- bool "ZX2967 PM domains"
- depends on PM_GENERIC_DOMAINS
-
-endif
diff --git a/drivers/soc/zte/Makefile b/drivers/soc/zte/Makefile
deleted file mode 100644
index 728c677addcd..000000000000
--- a/drivers/soc/zte/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ZTE SOC drivers
-#
-obj-$(CONFIG_ZX2967_PM_DOMAINS) += zx2967_pm_domains.o
-obj-$(CONFIG_ZX2967_PM_DOMAINS) += zx296718_pm_domains.o
diff --git a/drivers/soc/zte/zx296718_pm_domains.c b/drivers/soc/zte/zx296718_pm_domains.c
deleted file mode 100644
index 4daab06bbc26..000000000000
--- a/drivers/soc/zte/zx296718_pm_domains.c
+++ /dev/null
@@ -1,181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <dt-bindings/soc/zte,pm_domains.h>
-#include "zx2967_pm_domains.h"
-
-static u16 zx296718_offsets[REG_ARRAY_SIZE] = {
- [REG_CLKEN] = 0x18,
- [REG_ISOEN] = 0x1c,
- [REG_RSTEN] = 0x20,
- [REG_PWREN] = 0x24,
- [REG_ACK_SYNC] = 0x28,
-};
-
-enum {
- PCU_DM_VOU = 0,
- PCU_DM_SAPPU,
- PCU_DM_VDE,
- PCU_DM_VCE,
- PCU_DM_HDE,
- PCU_DM_VIU,
- PCU_DM_USB20,
- PCU_DM_USB21,
- PCU_DM_USB30,
- PCU_DM_HSIC,
- PCU_DM_GMAC,
- PCU_DM_TS,
-};
-
-static struct zx2967_pm_domain vou_domain = {
- .dm = {
- .name = "vou_domain",
- },
- .bit = PCU_DM_VOU,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain sappu_domain = {
- .dm = {
- .name = "sappu_domain",
- },
- .bit = PCU_DM_SAPPU,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain vde_domain = {
- .dm = {
- .name = "vde_domain",
- },
- .bit = PCU_DM_VDE,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain vce_domain = {
- .dm = {
- .name = "vce_domain",
- },
- .bit = PCU_DM_VCE,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain hde_domain = {
- .dm = {
- .name = "hde_domain",
- },
- .bit = PCU_DM_HDE,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain viu_domain = {
- .dm = {
- .name = "viu_domain",
- },
- .bit = PCU_DM_VIU,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain usb20_domain = {
- .dm = {
- .name = "usb20_domain",
- },
- .bit = PCU_DM_USB20,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain usb21_domain = {
- .dm = {
- .name = "usb21_domain",
- },
- .bit = PCU_DM_USB21,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain usb30_domain = {
- .dm = {
- .name = "usb30_domain",
- },
- .bit = PCU_DM_USB30,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain hsic_domain = {
- .dm = {
- .name = "hsic_domain",
- },
- .bit = PCU_DM_HSIC,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain gmac_domain = {
- .dm = {
- .name = "gmac_domain",
- },
- .bit = PCU_DM_GMAC,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct zx2967_pm_domain ts_domain = {
- .dm = {
- .name = "ts_domain",
- },
- .bit = PCU_DM_TS,
- .polarity = PWREN,
- .reg_offset = zx296718_offsets,
-};
-
-static struct generic_pm_domain *zx296718_pm_domains[] = {
- [DM_ZX296718_VOU] = &vou_domain.dm,
- [DM_ZX296718_SAPPU] = &sappu_domain.dm,
- [DM_ZX296718_VDE] = &vde_domain.dm,
- [DM_ZX296718_VCE] = &vce_domain.dm,
- [DM_ZX296718_HDE] = &hde_domain.dm,
- [DM_ZX296718_VIU] = &viu_domain.dm,
- [DM_ZX296718_USB20] = &usb20_domain.dm,
- [DM_ZX296718_USB21] = &usb21_domain.dm,
- [DM_ZX296718_USB30] = &usb30_domain.dm,
- [DM_ZX296718_HSIC] = &hsic_domain.dm,
- [DM_ZX296718_GMAC] = &gmac_domain.dm,
- [DM_ZX296718_TS] = &ts_domain.dm,
-};
-
-static int zx296718_pd_probe(struct platform_device *pdev)
-{
- return zx2967_pd_probe(pdev,
- zx296718_pm_domains,
- ARRAY_SIZE(zx296718_pm_domains));
-}
-
-static const struct of_device_id zx296718_pm_domain_matches[] = {
- { .compatible = "zte,zx296718-pcu", },
- { },
-};
-
-static struct platform_driver zx296718_pd_driver = {
- .driver = {
- .name = "zx296718-powerdomain",
- .of_match_table = zx296718_pm_domain_matches,
- },
- .probe = zx296718_pd_probe,
-};
-
-static int __init zx296718_pd_init(void)
-{
- return platform_driver_register(&zx296718_pd_driver);
-}
-subsys_initcall(zx296718_pd_init);
diff --git a/drivers/soc/zte/zx2967_pm_domains.c b/drivers/soc/zte/zx2967_pm_domains.c
deleted file mode 100644
index a4503e31b616..000000000000
--- a/drivers/soc/zte/zx2967_pm_domains.c
+++ /dev/null
@@ -1,141 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-
-#include "zx2967_pm_domains.h"
-
-#define PCU_DM_CLKEN(zpd) ((zpd)->reg_offset[REG_CLKEN])
-#define PCU_DM_ISOEN(zpd) ((zpd)->reg_offset[REG_ISOEN])
-#define PCU_DM_RSTEN(zpd) ((zpd)->reg_offset[REG_RSTEN])
-#define PCU_DM_PWREN(zpd) ((zpd)->reg_offset[REG_PWREN])
-#define PCU_DM_ACK_SYNC(zpd) ((zpd)->reg_offset[REG_ACK_SYNC])
-
-static void __iomem *pcubase;
-
-static int zx2967_power_on(struct generic_pm_domain *domain)
-{
- struct zx2967_pm_domain *zpd = (struct zx2967_pm_domain *)domain;
- unsigned long loop = 1000;
- u32 val;
-
- val = readl_relaxed(pcubase + PCU_DM_PWREN(zpd));
- if (zpd->polarity == PWREN)
- val |= BIT(zpd->bit);
- else
- val &= ~BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_PWREN(zpd));
-
- do {
- udelay(1);
- val = readl_relaxed(pcubase + PCU_DM_ACK_SYNC(zpd))
- & BIT(zpd->bit);
- } while (--loop && !val);
-
- if (!loop) {
- pr_err("Error: %s %s fail\n", __func__, domain->name);
- return -EIO;
- }
-
- val = readl_relaxed(pcubase + PCU_DM_RSTEN(zpd));
- val |= BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_RSTEN(zpd));
- udelay(5);
-
- val = readl_relaxed(pcubase + PCU_DM_ISOEN(zpd));
- val &= ~BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_ISOEN(zpd));
- udelay(5);
-
- val = readl_relaxed(pcubase + PCU_DM_CLKEN(zpd));
- val |= BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_CLKEN(zpd));
- udelay(5);
-
- pr_debug("poweron %s\n", domain->name);
-
- return 0;
-}
-
-static int zx2967_power_off(struct generic_pm_domain *domain)
-{
- struct zx2967_pm_domain *zpd = (struct zx2967_pm_domain *)domain;
- unsigned long loop = 1000;
- u32 val;
-
- val = readl_relaxed(pcubase + PCU_DM_CLKEN(zpd));
- val &= ~BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_CLKEN(zpd));
- udelay(5);
-
- val = readl_relaxed(pcubase + PCU_DM_ISOEN(zpd));
- val |= BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_ISOEN(zpd));
- udelay(5);
-
- val = readl_relaxed(pcubase + PCU_DM_RSTEN(zpd));
- val &= ~BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_RSTEN(zpd));
- udelay(5);
-
- val = readl_relaxed(pcubase + PCU_DM_PWREN(zpd));
- if (zpd->polarity == PWREN)
- val &= ~BIT(zpd->bit);
- else
- val |= BIT(zpd->bit);
- writel_relaxed(val, pcubase + PCU_DM_PWREN(zpd));
-
- do {
- udelay(1);
- val = readl_relaxed(pcubase + PCU_DM_ACK_SYNC(zpd))
- & BIT(zpd->bit);
- } while (--loop && val);
-
- if (!loop) {
- pr_err("Error: %s %s fail\n", __func__, domain->name);
- return -EIO;
- }
-
- pr_debug("poweroff %s\n", domain->name);
-
- return 0;
-}
-
-int zx2967_pd_probe(struct platform_device *pdev,
- struct generic_pm_domain **zx_pm_domains,
- int domain_num)
-{
- struct genpd_onecell_data *genpd_data;
- struct resource *res;
- int i;
-
- genpd_data = devm_kzalloc(&pdev->dev, sizeof(*genpd_data), GFP_KERNEL);
- if (!genpd_data)
- return -ENOMEM;
-
- genpd_data->domains = zx_pm_domains;
- genpd_data->num_domains = domain_num;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcubase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pcubase))
- return PTR_ERR(pcubase);
-
- for (i = 0; i < domain_num; ++i) {
- zx_pm_domains[i]->power_on = zx2967_power_on;
- zx_pm_domains[i]->power_off = zx2967_power_off;
-
- pm_genpd_init(zx_pm_domains[i], NULL, false);
- }
-
- of_genpd_add_provider_onecell(pdev->dev.of_node, genpd_data);
- dev_info(&pdev->dev, "powerdomain init ok\n");
- return 0;
-}
diff --git a/drivers/soc/zte/zx2967_pm_domains.h b/drivers/soc/zte/zx2967_pm_domains.h
deleted file mode 100644
index f586c02410ff..000000000000
--- a/drivers/soc/zte/zx2967_pm_domains.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header for ZTE's Power Domain Driver support
- *
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#ifndef __ZTE_ZX2967_PM_DOMAIN_H
-#define __ZTE_ZX2967_PM_DOMAIN_H
-
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-
-enum {
- REG_CLKEN,
- REG_ISOEN,
- REG_RSTEN,
- REG_PWREN,
- REG_PWRDN,
- REG_ACK_SYNC,
-
- /* The size of the array - must be last */
- REG_ARRAY_SIZE,
-};
-
-enum zx2967_power_polarity {
- PWREN,
- PWRDN,
-};
-
-struct zx2967_pm_domain {
- struct generic_pm_domain dm;
- const u16 bit;
- const enum zx2967_power_polarity polarity;
- const u16 *reg_offset;
-};
-
-int zx2967_pd_probe(struct platform_device *pdev,
- struct generic_pm_domain **zx_pm_domains,
- int domain_num);
-
-#endif /* __ZTE_ZX2967_PM_DOMAIN_H */
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index d1e8c3a54976..46885429928a 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -267,8 +267,10 @@ static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
ret = do_transfer(bus, msg);
if (ret != 0 && ret != -ENODATA)
- dev_err(bus->dev, "trf on Slave %d failed:%d\n",
- msg->dev_num, ret);
+ dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
+ msg->dev_num, ret,
+ (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
+ msg->addr, msg->len);
if (msg->page)
sdw_reset_page(bus, msg->dev_num);
@@ -405,10 +407,11 @@ sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
return sdw_transfer(slave->bus, &msg);
}
-static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
{
return sdw_nwrite_no_pm(slave, addr, 1, &value);
}
+EXPORT_SYMBOL(sdw_write_no_pm);
static int
sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
@@ -476,8 +479,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
}
EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
-static int
-sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
{
u8 buf;
int ret;
@@ -488,6 +490,19 @@ sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
else
return buf;
}
+EXPORT_SYMBOL(sdw_read_no_pm);
+
+static int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ int tmp;
+
+ tmp = sdw_read_no_pm(slave, addr);
+ if (tmp < 0)
+ return tmp;
+
+ tmp = (tmp & ~mask) | val;
+ return sdw_write_no_pm(slave, addr, tmp);
+}
/**
* sdw_nread() - Read "n" contiguous SDW Slave registers
@@ -500,16 +515,16 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(slave->bus->dev);
+ ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(slave->bus->dev);
+ pm_runtime_put_noidle(&slave->dev);
return ret;
}
ret = sdw_nread_no_pm(slave, addr, count, val);
- pm_runtime_mark_last_busy(slave->bus->dev);
- pm_runtime_put(slave->bus->dev);
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put(&slave->dev);
return ret;
}
@@ -526,16 +541,16 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(slave->bus->dev);
+ ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(slave->bus->dev);
+ pm_runtime_put_noidle(&slave->dev);
return ret;
}
ret = sdw_nwrite_no_pm(slave, addr, count, val);
- pm_runtime_mark_last_busy(slave->bus->dev);
- pm_runtime_put(slave->bus->dev);
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put(&slave->dev);
return ret;
}
@@ -623,6 +638,7 @@ err:
static int sdw_assign_device_num(struct sdw_slave *slave)
{
+ struct sdw_bus *bus = slave->bus;
int ret, dev_num;
bool new_device = false;
@@ -633,7 +649,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
dev_num = sdw_get_device_num(slave);
mutex_unlock(&slave->bus->bus_lock);
if (dev_num < 0) {
- dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
+ dev_err(bus->dev, "Get dev_num failed: %d\n",
dev_num);
return dev_num;
}
@@ -646,7 +662,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
if (!new_device)
- dev_dbg(slave->bus->dev,
+ dev_dbg(bus->dev,
"Slave already registered, reusing dev_num:%d\n",
slave->dev_num);
@@ -656,7 +672,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
- dev_err(&slave->dev, "Program device_num %d failed: %d\n",
+ dev_err(bus->dev, "Program device_num %d failed: %d\n",
dev_num, ret);
return ret;
}
@@ -679,9 +695,8 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
id->class_id = SDW_CLASS_ID(addr);
dev_dbg(bus->dev,
- "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n",
- id->class_id, id->part_id, id->mfg_id,
- id->unique_id, id->sdw_version);
+ "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
+ id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
}
static int sdw_program_device_num(struct sdw_bus *bus)
@@ -735,7 +750,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
*/
ret = sdw_assign_device_num(slave);
if (ret) {
- dev_err(slave->bus->dev,
+ dev_err(bus->dev,
"Assign dev_num failed:%d\n",
ret);
return ret;
@@ -775,15 +790,17 @@ static int sdw_program_device_num(struct sdw_bus *bus)
static void sdw_modify_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
- mutex_lock(&slave->bus->bus_lock);
+ struct sdw_bus *bus = slave->bus;
- dev_vdbg(&slave->dev,
+ mutex_lock(&bus->bus_lock);
+
+ dev_vdbg(bus->dev,
"%s: changing status slave %d status %d new status %d\n",
__func__, slave->dev_num, slave->status, status);
if (status == SDW_SLAVE_UNATTACHED) {
dev_dbg(&slave->dev,
- "%s: initializing completion for Slave %d\n",
+ "%s: initializing enumeration and init completion for Slave %d\n",
__func__, slave->dev_num);
init_completion(&slave->enumeration_complete);
@@ -792,13 +809,13 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
} else if ((status == SDW_SLAVE_ATTACHED) &&
(slave->status == SDW_SLAVE_UNATTACHED)) {
dev_dbg(&slave->dev,
- "%s: signaling completion for Slave %d\n",
+ "%s: signaling enumeration completion for Slave %d\n",
__func__, slave->dev_num);
complete(&slave->enumeration_complete);
}
slave->status = status;
- mutex_unlock(&slave->bus->bus_lock);
+ mutex_unlock(&bus->bus_lock);
}
static enum sdw_clk_stop_mode sdw_get_clk_stop_mode(struct sdw_slave *slave)
@@ -950,17 +967,17 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
simple_clk_stop = false;
}
- if (is_slave && !simple_clk_stop) {
+ /* Skip remaining clock stop preparation if no Slave is attached */
+ if (!is_slave)
+ return ret;
+
+ if (!simple_clk_stop) {
ret = sdw_bus_wait_for_clk_prep_deprep(bus,
SDW_BROADCAST_DEV_NUM);
if (ret < 0)
return ret;
}
- /* Don't need to inform slaves if there is no slave attached */
- if (!is_slave)
- return ret;
-
/* Inform slaves that prep is done */
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
@@ -1074,16 +1091,13 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
"clk stop deprep failed:%d", ret);
}
- if (is_slave && !simple_clk_stop)
- sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
-
- /*
- * Don't need to call slave callback function if there is no slave
- * attached
- */
+ /* Skip remaining clock stop de-preparation if no Slave is attached */
if (!is_slave)
return 0;
+ if (!simple_clk_stop)
+ sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
continue;
@@ -1127,7 +1141,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
if (ret < 0)
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DPN_INTMASK write failed:%d\n", val);
return ret;
@@ -1210,7 +1224,7 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
}
scale_index++;
- ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base);
+ ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
if (ret < 0) {
dev_err(&slave->dev,
"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
@@ -1218,13 +1232,13 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
}
/* initialize scale for both banks */
- ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
+ ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
if (ret < 0) {
dev_err(&slave->dev,
"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
return ret;
}
- ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
+ ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
if (ret < 0)
dev_err(&slave->dev,
"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
@@ -1256,9 +1270,9 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
val = slave->prop.scp_int1_mask;
/* Enable SCP interrupts */
- ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
+ ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INTMASK1 write failed:%d\n", ret);
return ret;
}
@@ -1271,9 +1285,9 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
val = prop->dp0_prop->imp_def_interrupts;
val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
- ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
+ ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
if (ret < 0)
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INTMASK read failed:%d\n", ret);
return ret;
}
@@ -1283,9 +1297,9 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
u8 clear, impl_int_mask;
int status, status2, ret, count = 0;
- status = sdw_read(slave, SDW_DP0_INT);
+ status = sdw_read_no_pm(slave, SDW_DP0_INT);
if (status < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INT read failed:%d\n", status);
return status;
}
@@ -1322,17 +1336,17 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
}
/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
- ret = sdw_write(slave, SDW_DP0_INT, clear);
+ ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INT write failed:%d\n", ret);
return ret;
}
/* Read DP0 interrupt again */
- status2 = sdw_read(slave, SDW_DP0_INT);
+ status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
if (status2 < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INT read failed:%d\n", status2);
return status2;
}
@@ -1345,7 +1359,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
if (count == SDW_READ_INTR_CLEAR_RETRY)
- dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read\n");
+ dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
return ret;
}
@@ -1361,9 +1375,9 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
return sdw_handle_dp0_interrupt(slave, slave_status);
addr = SDW_DPN_INT(port);
- status = sdw_read(slave, addr);
+ status = sdw_read_no_pm(slave, addr);
if (status < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DPN_INT read failed:%d\n", status);
return status;
@@ -1395,17 +1409,17 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
}
/* clear the interrupt but don't touch reserved fields */
- ret = sdw_write(slave, addr, clear);
+ ret = sdw_write_no_pm(slave, addr, clear);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DPN_INT write failed:%d\n", ret);
return ret;
}
/* Read DPN interrupt again */
- status2 = sdw_read(slave, addr);
+ status2 = sdw_read_no_pm(slave, addr);
if (status2 < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DPN_INT read failed:%d\n", status2);
return status2;
}
@@ -1418,7 +1432,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
if (count == SDW_READ_INTR_CLEAR_RETRY)
- dev_warn(slave->bus->dev, "Reached MAX_RETRY on port read");
+ dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
return ret;
}
@@ -1440,30 +1454,30 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
- pm_runtime_put_noidle(slave->bus->dev);
+ pm_runtime_put_noidle(&slave->dev);
return ret;
}
/* Read Intstat 1, Intstat 2 and Intstat 3 registers */
- ret = sdw_read(slave, SDW_SCP_INT1);
+ ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
goto io_err;
}
buf = ret;
- ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2);
+ ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
goto io_err;
}
if (slave->prop.is_sdca) {
- ret = sdw_read(slave, SDW_DP0_INT);
+ ret = sdw_read_no_pm(slave, SDW_DP0_INT);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INT read failed:%d\n", ret);
goto io_err;
}
@@ -1558,9 +1572,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
}
/* Ack interrupt */
- ret = sdw_write(slave, SDW_SCP_INT1, clear);
+ ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INT1 write failed:%d\n", ret);
goto io_err;
}
@@ -1572,25 +1586,25 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
* Read status again to ensure no new interrupts arrived
* while servicing interrupts.
*/
- ret = sdw_read(slave, SDW_SCP_INT1);
+ ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
goto io_err;
}
_buf = ret;
- ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2);
+ ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
goto io_err;
}
if (slave->prop.is_sdca) {
- ret = sdw_read(slave, SDW_DP0_INT);
+ ret = sdw_read_no_pm(slave, SDW_DP0_INT);
if (ret < 0) {
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"SDW_DP0_INT read failed:%d\n", ret);
goto io_err;
}
@@ -1616,7 +1630,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
if (count == SDW_READ_INTR_CLEAR_RETRY)
- dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read\n");
+ dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
io_err:
pm_runtime_mark_last_busy(&slave->dev);
@@ -1722,7 +1736,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
case SDW_SLAVE_ALERT:
ret = sdw_handle_slave_alerts(slave);
if (ret)
- dev_err(bus->dev,
+ dev_err(&slave->dev,
"Slave %d alert handling failed: %d\n",
i, ret);
break;
@@ -1741,24 +1755,29 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
ret = sdw_initialize_slave(slave);
if (ret)
- dev_err(bus->dev,
+ dev_err(&slave->dev,
"Slave %d initialization failed: %d\n",
i, ret);
break;
default:
- dev_err(bus->dev, "Invalid slave %d status:%d\n",
+ dev_err(&slave->dev, "Invalid slave %d status:%d\n",
i, status[i]);
break;
}
ret = sdw_update_slave_status(slave, status[i]);
if (ret)
- dev_err(slave->bus->dev,
+ dev_err(&slave->dev,
"Update Slave status failed:%d\n", ret);
- if (attached_initializing)
+ if (attached_initializing) {
+ dev_dbg(&slave->dev,
+ "%s: signaling initialization completion for Slave %d\n",
+ __func__, slave->dev_num);
+
complete(&slave->initialization_complete);
+ }
}
return ret;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 9fa55164354a..d05442e646a3 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -188,7 +188,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_PDI_CONFIG_PORT GENMASK(4, 0)
/* Driver defaults */
-#define CDNS_TX_TIMEOUT 2000
+#define CDNS_TX_TIMEOUT 500
#define CDNS_SCP_RX_FIFOLEVEL 0x2
@@ -483,11 +483,11 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
for (i = 0; i < count; i++) {
if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
no_ack = 1;
- dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
- if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
- nack = 1;
- dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
- }
+ dev_vdbg(cdns->dev, "Msg Ack not received, cmd %d\n", i);
+ }
+ if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+ nack = 1;
+ dev_err_ratelimited(cdns->dev, "Msg NACK received, cmd %d\n", i);
}
}
@@ -734,21 +734,18 @@ static void cdns_read_response(struct sdw_cdns *cdns)
}
static int cdns_update_slave_status(struct sdw_cdns *cdns,
- u32 slave0, u32 slave1)
+ u64 slave_intstat)
{
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
bool is_slave = false;
- u64 slave;
u32 mask;
int i, set_status;
- /* combine the two status */
- slave = ((u64)slave1 << 32) | slave0;
memset(status, 0, sizeof(status));
for (i = 0; i <= SDW_MAX_DEVICES; i++) {
- mask = (slave >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
- CDNS_MCP_SLAVE_STATUS_BITS;
+ mask = (slave_intstat >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
+ CDNS_MCP_SLAVE_STATUS_BITS;
if (!mask)
continue;
@@ -918,13 +915,17 @@ static void cdns_update_slave_status_work(struct work_struct *work)
struct sdw_cdns *cdns =
container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
-
- dev_dbg_ratelimited(cdns->dev, "Slave status change\n");
+ u64 slave_intstat;
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
- cdns_update_slave_status(cdns, slave0, slave1);
+ /* combine the two status */
+ slave_intstat = ((u64)slave1 << 32) | slave0;
+
+ dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
+
+ cdns_update_slave_status(cdns, slave_intstat);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 66adb258a425..a2d5cdaa9998 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -967,7 +967,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
/* Port configuration */
- pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
+ pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
goto error;
@@ -1673,10 +1673,12 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
!clock_stop_quirks) {
+ bool wake_enable = true;
+
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable clock stop on suspend\n");
- return ret;
+ wake_enable = false;
}
ret = sdw_cdns_enable_interrupt(cdns, false);
@@ -1691,7 +1693,7 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
return ret;
}
- intel_shim_wake(sdw, true);
+ intel_shim_wake(sdw, wake_enable);
} else {
dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
__func__, clock_stop_quirks);
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index cabdadb09a1b..bc8520eb385e 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -405,11 +405,12 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
{
acpi_status status;
+ info->handle = NULL;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
parent_handle, 1,
sdw_intel_acpi_cb,
NULL, info, NULL);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status) || info->handle == NULL)
return -ENODEV;
return sdw_intel_scan_controller(info);
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index a08f4081c1c4..180f38bd003b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -163,15 +163,13 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
if (id.unique_id != id2.unique_id) {
dev_dbg(bus->dev,
- "Valid unique IDs %x %x for Slave mfg %x part %d\n",
- id.unique_id, id2.unique_id,
- id.mfg_id, id.part_id);
+ "Valid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
+ id.unique_id, id2.unique_id, id.mfg_id, id.part_id);
ignore_unique_id = false;
} else {
dev_err(bus->dev,
- "Invalid unique IDs %x %x for Slave mfg %x part %d\n",
- id.unique_id, id2.unique_id,
- id.mfg_id, id.part_id);
+ "Invalid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
+ id.unique_id, id2.unique_id, id.mfg_id, id.part_id);
return -ENODEV;
}
}
diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c
index b48b6617a396..3210359cd944 100644
--- a/drivers/soundwire/sysfs_slave.c
+++ b/drivers/soundwire/sysfs_slave.c
@@ -130,7 +130,7 @@ static struct attribute *slave_dev_attrs[] = {
* we don't use ATTRIBUTES_GROUP here since we want to add a subdirectory
* for device-level properties
*/
-static struct attribute_group sdw_slave_dev_attr_group = {
+static const struct attribute_group sdw_slave_dev_attr_group = {
.attrs = slave_dev_attrs,
.name = "dev-properties",
};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index aadaea052f51..09a263cf4ae2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -203,7 +203,7 @@ config SPI_CADENCE
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ depends on OF && (ARM || ARM64 || X86 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -292,13 +292,6 @@ config SPI_DLN2
This driver can also be built as a module. If so, the module
will be called spi-dln2.
-config SPI_EFM32
- tristate "EFM32 SPI controller"
- depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
- select SPI_BITBANG
- help
- Driver for the spi controller found on Energy Micro's EFM32 SoCs.
-
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
depends on ARCH_EP93XX || COMPILE_TEST
@@ -601,7 +594,6 @@ config SPI_PIC32_SQI
config SPI_PL022
tristate "ARM AMBA PL022 SSP controller"
depends on ARM_AMBA
- default y if MACH_U300
default y if ARCH_REALVIEW
default y if INTEGRATOR_IMPD1
default y if ARCH_VERSATILE
@@ -650,7 +642,7 @@ config SPI_RPCIF
tristate "Renesas RPC-IF SPI driver"
depends on RENESAS_RPCIF
help
- SPI driver for Renesas R-Car Gen3 RPC-IF.
+ SPI driver for Renesas R-Car Gen3 or RZ/G2 RPC-IF.
config SPI_RSPI
tristate "Renesas RSPI/QSPI controller"
@@ -751,13 +743,6 @@ config SPI_SIFIVE
help
This exposes the SPI controller IP from SiFive.
-config SPI_SIRF
- tristate "CSR SiRFprimaII SPI controller"
- depends on SIRF_DMA
- select SPI_BITBANG
- help
- SPI driver for CSR SiRFprimaII SoCs
-
config SPI_SLAVE_MT27XX
tristate "MediaTek SPI slave device"
depends on ARCH_MEDIATEK || COMPILE_TEST
@@ -843,6 +828,15 @@ config SPI_MXS
help
SPI driver for Freescale MXS devices.
+config SPI_TEGRA210_QUAD
+ tristate "NVIDIA Tegra QSPI Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ QSPI driver for NVIDIA Tegra QSPI Controller interface. This
+ controller is different from the SPI controller and is available
+ on Tegra SoCs starting from Tegra210.
+
config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
@@ -885,12 +879,6 @@ config SPI_TOPCLIFF_PCH
This driver also supports the ML7213/ML7223/ML7831, a companion chip
for the Atom E6xx series and compatible with the Intel EG20T PCH.
-config SPI_TXX9
- tristate "Toshiba TXx9 SPI controller"
- depends on GPIOLIB && (CPU_TX49XX || COMPILE_TEST)
- help
- SPI driver for Toshiba TXx9 MIPS SoCs
-
config SPI_UNIPHIER
tristate "Socionext UniPhier SPI Controller"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6fea5821662e..0f06fc0813c6 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -42,7 +42,6 @@ spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
-obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
obj-$(CONFIG_SPI_FSI) += spi-fsi.o
@@ -94,6 +93,7 @@ obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
+obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
@@ -105,7 +105,6 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
-obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
@@ -115,6 +114,7 @@ obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
+obj-$(CONFIG_SPI_TEGRA210_QUAD) += spi-tegra210-quad.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
@@ -122,7 +122,6 @@ obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
-obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_UNIPHIER) += spi-uniphier.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 10bc5390ab91..95d4fa32c299 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -657,6 +657,7 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..62ea0c9e321b 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
@@ -252,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Invalid number of chipselect: %hu\n",
pdata->num_chipselect);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
master->num_chipselect = pdata->num_chipselect;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 948396b382d7..f429436082af 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1590,7 +1590,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (ret == 0) {
as->use_dma = true;
} else if (ret == -EPROBE_DEFER) {
- return ret;
+ goto out_unmap_regs;
}
} else if (as->caps.has_pdc_support) {
as->use_pdc = true;
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index dfb7196f4caf..4b59a1b1bf7e 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -26,7 +26,7 @@
#include <asm/mach-au1x00/au1550_spi.h>
-static unsigned usedma = 1;
+static unsigned int usedma = 1;
module_param(usedma, uint, 0644);
/*
@@ -43,9 +43,9 @@ struct au1550_spi {
volatile psc_spi_t __iomem *regs;
int irq;
- unsigned len;
- unsigned tx_count;
- unsigned rx_count;
+ unsigned int len;
+ unsigned int tx_count;
+ unsigned int rx_count;
const u8 *tx;
u8 *rx;
@@ -56,14 +56,14 @@ struct au1550_spi {
struct completion master_done;
- unsigned usedma;
+ unsigned int usedma;
u32 dma_tx_id;
u32 dma_rx_id;
u32 dma_tx_ch;
u32 dma_rx_ch;
u8 *dma_rx_tmpbuf;
- unsigned dma_rx_tmpbuf_size;
+ unsigned int dma_rx_tmpbuf_size;
u32 dma_rx_tmpbuf_addr;
struct spi_master *master;
@@ -74,8 +74,7 @@ struct au1550_spi {
/* we use an 8-bit memory device for dma transfers to/from spi fifo */
-static dbdev_tab_t au1550_spi_mem_dbdev =
-{
+static dbdev_tab_t au1550_spi_mem_dbdev = {
.dev_id = DBDMA_MEM_CHAN,
.dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC,
.dev_tsize = 0,
@@ -99,7 +98,7 @@ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
* BRG valid range is 4..63
* DIV valid range is 0..3
*/
-static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz)
+static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned int speed_hz)
{
u32 mainclk_hz = hw->pdata->mainclk_hz;
u32 div, brg;
@@ -161,7 +160,7 @@ static void au1550_spi_reset_fifos(struct au1550_spi *hw)
static void au1550_spi_chipsel(struct spi_device *spi, int value)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
- unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
u32 cfg, stat;
switch (value) {
@@ -221,7 +220,7 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value)
static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
- unsigned bpw, hz;
+ unsigned int bpw, hz;
u32 cfg, stat;
if (t) {
@@ -276,7 +275,7 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* spi master done event irq is not generated unless rx fifo is empty (emptied)
* so we need rx tmp buffer to use for rx dma if user does not provide one
*/
-static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
+static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned int size)
{
hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL);
if (!hw->dma_rx_tmpbuf)
@@ -399,10 +398,10 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
DMA_FROM_DEVICE);
}
/* unmap buffers if mapped above */
- if (t->rx_buf && t->rx_dma == 0 )
+ if (t->rx_buf && t->rx_dma == 0)
dma_unmap_single(hw->dev, dma_rx_addr, t->len,
DMA_FROM_DEVICE);
- if (t->tx_buf && t->tx_dma == 0 )
+ if (t->tx_buf && t->tx_dma == 0)
dma_unmap_single(hw->dev, dma_tx_addr, t->len,
DMA_TO_DEVICE);
@@ -447,8 +446,8 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
"dma transfer: receive FIFO overflow!\n");
else
dev_err(hw->dev,
- "dma transfer: unexpected SPI error "
- "(event=0x%x stat=0x%x)!\n", evnt, stat);
+ "dma transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
+ evnt, stat);
complete(&hw->master_done);
return IRQ_HANDLED;
@@ -493,12 +492,12 @@ static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \
wmb(); /* drain writebuffer */ \
}
-AU1550_SPI_RX_WORD(8,0xff)
-AU1550_SPI_RX_WORD(16,0xffff)
-AU1550_SPI_RX_WORD(32,0xffffff)
-AU1550_SPI_TX_WORD(8,0xff)
-AU1550_SPI_TX_WORD(16,0xffff)
-AU1550_SPI_TX_WORD(32,0xffffff)
+AU1550_SPI_RX_WORD(8, 0xff)
+AU1550_SPI_RX_WORD(16, 0xffff)
+AU1550_SPI_RX_WORD(32, 0xffffff)
+AU1550_SPI_TX_WORD(8, 0xff)
+AU1550_SPI_TX_WORD(16, 0xffff)
+AU1550_SPI_TX_WORD(32, 0xffffff)
static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
{
@@ -567,8 +566,8 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
au1550_spi_mask_ack_all(hw);
au1550_spi_reset_fifos(hw);
dev_err(hw->dev,
- "pio transfer: unexpected SPI error "
- "(event=0x%x stat=0x%x)!\n", evnt, stat);
+ "pio transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
+ evnt, stat);
complete(&hw->master_done);
return IRQ_HANDLED;
}
@@ -636,12 +635,14 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+
return hw->txrx_bufs(spi, t);
}
static irqreturn_t au1550_spi_irq(int irq, void *dev)
{
struct au1550_spi *hw = dev;
+
return hw->irq_callback(hw);
}
@@ -872,6 +873,7 @@ static int au1550_spi_probe(struct platform_device *pdev)
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
+
master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
master->min_speed_hz =
hw->pdata->mainclk_hz / (max_div + 1) + 1;
@@ -972,8 +974,7 @@ static int __init au1550_spi_init(void)
if (usedma) {
ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
if (!ddma_memid)
- printk(KERN_ERR "au1550-spi: cannot add memory"
- "dbdma device\n");
+ printk(KERN_ERR "au1550-spi: cannot add memory dbdma device\n");
}
return platform_driver_register(&au1550_spi_drv);
}
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index c028446c7460..707fe3a5d8ef 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -881,7 +881,7 @@ static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
* when using flex mode we need to send
* the upper address byte to bspi
*/
- if (bcm_qspi_bspi_ver_three(qspi) == false) {
+ if (!bcm_qspi_bspi_ver_three(qspi)) {
addr = from & 0xff000000;
bcm_qspi_write(qspi, BSPI,
BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 197485f2c2b2..8965fe61c8b4 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,7 +386,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(bs);
/* wake up the framework */
- complete(&bs->ctlr->xfer_completion);
+ spi_finalize_current_transfer(bs->ctlr);
}
return IRQ_HANDLED;
@@ -608,7 +608,7 @@ static void bcm2835_spi_dma_rx_done(void *data)
bcm2835_spi_reset_hw(bs);
/* and mark as completed */;
- complete(&ctlr->xfer_completion);
+ spi_finalize_current_transfer(ctlr);
}
/**
@@ -640,7 +640,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
bcm2835_spi_undo_prologue(bs);
bcm2835_spi_reset_hw(bs);
- complete(&ctlr->xfer_completion);
+ spi_finalize_current_transfer(ctlr);
}
/**
@@ -1307,6 +1307,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
"could not get clk\n");
+ ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
+
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0)
return bs->irq ? bs->irq : -ENODEV;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 1a26865c42f8..75589ac6e95f 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -254,7 +254,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
/* and if rx_len is 0 then disable interrupts and wake up completion */
if (!bs->rx_len) {
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
- complete(&master->xfer_completion);
+ spi_finalize_current_transfer(master);
}
return IRQ_HANDLED;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index ba7d40c2922f..442cc7c53a47 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -52,6 +52,7 @@ struct cqspi_flash_pdata {
u8 inst_width;
u8 addr_width;
u8 data_width;
+ bool dtr;
u8 cs;
};
@@ -75,6 +76,7 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
+ u32 num_chipselect;
bool rclk_en;
u32 trigger_address;
u32 wr_delay;
@@ -111,6 +113,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
+#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
@@ -173,6 +177,9 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
+#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
+
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
@@ -188,6 +195,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL 0x90
#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
@@ -198,6 +206,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
#define CQSPI_REG_INDIRECTWR 0x70
#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
@@ -214,6 +223,14 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+#define CQSPI_REG_POLLING_STATUS 0xB0
+#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
+
+#define CQSPI_REG_OP_EXT_LOWER 0xE0
+#define CQSPI_REG_OP_EXT_READ_LSB 24
+#define CQSPI_REG_OP_EXT_WRITE_LSB 16
+#define CQSPI_REG_OP_EXT_STIG_LSB 0
+
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
@@ -288,6 +305,80 @@ static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata)
return rdreg;
}
+static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
+{
+ unsigned int dummy_clk;
+
+ dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
+ if (dtr)
+ dummy_clk /= 2;
+
+ return dummy_clk;
+}
+
+static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+
+ switch (op->data.buswidth) {
+ case 0:
+ break;
+ case 1:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case 2:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case 4:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ case 8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (f_pdata->dtr) {
+ switch (op->cmd.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (op->addr.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (op->data.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int cqspi_wait_idle(struct cqspi_st *cqspi)
{
const unsigned int poll_idle_retry = 3;
@@ -345,19 +436,85 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
+static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op,
+ unsigned int shift)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ u8 ext;
+
+ if (op->cmd.nbytes != 2)
+ return -EINVAL;
+
+ /* Opcode extension is the LSB. */
+ ext = op->cmd.opcode & 0xff;
+
+ reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
+ reg &= ~(0xff << shift);
+ reg |= ext << shift;
+ writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
+
+ return 0;
+}
+
+static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op, unsigned int shift,
+ bool enable)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ int ret;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ /*
+ * We enable dual byte opcode here. The callers have to set up the
+ * extension opcode based on which type of operation it is.
+ */
+ if (enable) {
+ reg |= CQSPI_REG_CONFIG_DTR_PROTO;
+ reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
+
+ /* Set up command opcode extension. */
+ ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
+ if (ret)
+ return ret;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
+ reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ return cqspi_wait_idle(cqspi);
+}
+
static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
u8 *rxbuf = op->data.buf.in;
- u8 opcode = op->cmd.opcode;
+ u8 opcode;
size_t n_rx = op->data.nbytes;
unsigned int rdreg;
unsigned int reg;
+ unsigned int dummy_clk;
size_t read_len;
int status;
+ status = cqspi_set_protocol(f_pdata, op);
+ if (status)
+ return status;
+
+ status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ f_pdata->dtr);
+ if (status)
+ return status;
+
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
dev_err(&cqspi->pdev->dev,
"Invalid input argument, len %zu rxbuf 0x%p\n",
@@ -365,11 +522,24 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
rdreg = cqspi_calc_rdreg(f_pdata);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+ dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ return -EOPNOTSUPP;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
+ << CQSPI_REG_CMDCTRL_DUMMY_LSB;
+
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
/* 0 means 1 byte. */
@@ -401,12 +571,22 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
- const u8 opcode = op->cmd.opcode;
+ u8 opcode;
const u8 *txbuf = op->data.buf.out;
size_t n_tx = op->data.nbytes;
unsigned int reg;
unsigned int data;
size_t write_len;
+ int ret;
+
+ ret = cqspi_set_protocol(f_pdata, op);
+ if (ret)
+ return ret;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(&cqspi->pdev->dev,
@@ -415,6 +595,14 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
+ reg = cqspi_calc_rdreg(f_pdata);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (op->addr.nbytes) {
@@ -454,14 +642,27 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
void __iomem *reg_base = cqspi->iobase;
unsigned int dummy_clk = 0;
unsigned int reg;
+ int ret;
+ u8 opcode;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
- reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(f_pdata);
/* Setup dummy clock cycles */
- dummy_clk = op->dummy.nbytes * 8;
+ dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
- dummy_clk = CQSPI_DUMMY_CLKS_MAX;
+ return -EOPNOTSUPP;
if (dummy_clk)
reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
@@ -573,15 +774,43 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
unsigned int reg;
+ int ret;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
+ u8 opcode;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
+
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
/* Set opcode. */
- reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
reg = cqspi_calc_rdreg(f_pdata);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+ if (f_pdata->dtr) {
+ /*
+ * Some flashes like the cypress Semper flash expect a 4-byte
+ * dummy address with the Read SR command in DTR mode, but this
+ * controller does not support sending address with the Read SR
+ * command. So, disable write completion polling on the
+ * controller's side. spi-nor will take care of polling the
+ * status register.
+ */
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ }
+
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
@@ -835,35 +1064,6 @@ static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
cqspi_controller_enable(cqspi, 1);
}
-static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
- const struct spi_mem_op *op)
-{
- f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
- if (op->data.dir == SPI_MEM_DATA_IN) {
- switch (op->data.buswidth) {
- case 1:
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- break;
- case 2:
- f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
- break;
- case 4:
- f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
- break;
- case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
@@ -881,7 +1081,16 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
if (ret)
return ret;
- if (cqspi->use_direct_mode && ((to + len) <= cqspi->ahb_size)) {
+ /*
+ * Some flashes like the Cypress Semper flash expect a dummy 4-byte
+ * address (all 0s) with the read status register command in DTR mode.
+ * But this controller does not support sending dummy address bytes to
+ * the flash when it is polling the write completion register in DTR
+ * mode. So, we can not use direct mode when in DTR mode for writing
+ * data.
+ */
+ if (!f_pdata->dtr && cqspi->use_direct_mode &&
+ ((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
@@ -942,7 +1151,7 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
dma_async_issue_pending(cqspi->rx_chan);
if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
- msecs_to_jiffies(len))) {
+ msecs_to_jiffies(max_t(size_t, len, 500)))) {
dmaengine_terminate_sync(cqspi->rx_chan);
dev_err(dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
@@ -1010,6 +1219,26 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
return ret;
}
+static bool cqspi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ bool all_true, all_false;
+
+ all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
+ op->data.dtr;
+ all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+ !op->data.dtr;
+
+ /* Mixed DTR modes not supported. */
+ if (!(all_true || all_false))
+ return false;
+
+ if (all_true)
+ return spi_mem_dtr_supports_op(mem, op);
+ else
+ return spi_mem_default_supports_op(mem, op);
+}
+
static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
struct cqspi_flash_pdata *f_pdata,
struct device_node *np)
@@ -1070,6 +1299,9 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
return -ENXIO;
}
+ if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
+ cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
+
cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
return 0;
@@ -1101,10 +1333,12 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
- /* Enable Direct Access Controller */
- reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
- reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
- writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ /* Disable direct access controller */
+ if (!cqspi->use_direct_mode) {
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ }
cqspi_controller_enable(cqspi, 1);
}
@@ -1138,6 +1372,7 @@ static const char *cqspi_get_name(struct spi_mem *mem)
static const struct spi_controller_mem_ops cqspi_mem_ops = {
.exec_op = cqspi_exec_mem_op,
.get_name = cqspi_get_name,
+ .supports_op = cqspi_supports_mem_op,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
@@ -1279,13 +1514,14 @@ static int cqspi_probe(struct platform_device *pdev)
reset_control_deassert(rstc_ocp);
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ master->max_speed_hz = cqspi->master_ref_clk_hz;
ddata = of_device_get_match_data(dev);
if (ddata) {
if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
- cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
- master->mode_bits |= SPI_RX_OCTAL;
+ master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
cqspi->use_direct_mode = true;
}
@@ -1302,6 +1538,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ master->num_chipselect = cqspi->num_chipselect;
+
ret = cqspi_setup_flash(cqspi);
if (ret) {
dev_err(dev, "failed to setup flash parameters %d\n", ret);
@@ -1390,6 +1628,10 @@ static const struct cqspi_driver_platdata am654_ospi = {
.quirks = CQSPI_NEEDS_WR_DELAY,
};
+static const struct cqspi_driver_platdata intel_lgm_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
@@ -1403,6 +1645,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "ti,am654-ospi",
.data = &am654_ospi,
},
+ {
+ .compatible = "intel,lgm-qspi",
+ .data = &intel_lgm_qspi,
+ },
{ /* end of table */ }
};
@@ -1427,3 +1673,4 @@ MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 70467b9d61ba..a3afd1b9ac56 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -115,6 +115,7 @@ struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 5e900f228919..0bef5ce08094 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -104,7 +104,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
master->use_gpio_descriptors = true;
master->bus_num = -1;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->dev.of_node = pdev->dev.of_node;
master->prepare_message = spi_clps711x_prepare_message;
master->transfer_one = spi_clps711x_transfer_one;
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index 4aa8596fb1f2..5be6b7b80c21 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -84,7 +84,7 @@ static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t
if (shift) {
chunk = min_t(size_t, 4 - shift, len);
data = readl_relaxed(from - shift);
- memcpy(to, &data + shift, chunk);
+ memcpy(to, (char *)&data + shift, chunk);
from += chunk;
to += chunk;
len -= chunk;
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
deleted file mode 100644
index ea6e4a7b3feb..000000000000
--- a/drivers/spi/spi-efm32.c
+++ /dev/null
@@ -1,462 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012-2013 Uwe Kleine-Koenig for Pengutronix
- */
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_data/efm32-spi.h>
-#include <linux/of.h>
-
-#define DRIVER_NAME "efm32-spi"
-
-#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
-
-#define REG_CTRL 0x00
-#define REG_CTRL_SYNC 0x0001
-#define REG_CTRL_CLKPOL 0x0100
-#define REG_CTRL_CLKPHA 0x0200
-#define REG_CTRL_MSBF 0x0400
-#define REG_CTRL_TXBIL 0x1000
-
-#define REG_FRAME 0x04
-#define REG_FRAME_DATABITS__MASK 0x000f
-#define REG_FRAME_DATABITS(n) ((n) - 3)
-
-#define REG_CMD 0x0c
-#define REG_CMD_RXEN 0x0001
-#define REG_CMD_RXDIS 0x0002
-#define REG_CMD_TXEN 0x0004
-#define REG_CMD_TXDIS 0x0008
-#define REG_CMD_MASTEREN 0x0010
-
-#define REG_STATUS 0x10
-#define REG_STATUS_TXENS 0x0002
-#define REG_STATUS_TXC 0x0020
-#define REG_STATUS_TXBL 0x0040
-#define REG_STATUS_RXDATAV 0x0080
-
-#define REG_CLKDIV 0x14
-
-#define REG_RXDATAX 0x18
-#define REG_RXDATAX_RXDATA__MASK 0x01ff
-#define REG_RXDATAX_PERR 0x4000
-#define REG_RXDATAX_FERR 0x8000
-
-#define REG_TXDATA 0x34
-
-#define REG_IF 0x40
-#define REG_IF_TXBL 0x0002
-#define REG_IF_RXDATAV 0x0004
-
-#define REG_IFS 0x44
-#define REG_IFC 0x48
-#define REG_IEN 0x4c
-
-#define REG_ROUTE 0x54
-#define REG_ROUTE_RXPEN 0x0001
-#define REG_ROUTE_TXPEN 0x0002
-#define REG_ROUTE_CLKPEN 0x0008
-#define REG_ROUTE_LOCATION__MASK 0x0700
-#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
-
-struct efm32_spi_ddata {
- struct spi_bitbang bitbang;
-
- spinlock_t lock;
-
- struct clk *clk;
- void __iomem *base;
- unsigned int rxirq, txirq;
- struct efm32_spi_pdata pdata;
-
- /* irq data */
- struct completion done;
- const u8 *tx_buf;
- u8 *rx_buf;
- unsigned tx_len, rx_len;
-};
-
-#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
-#define efm32_spi_vdbg(ddata, format, arg...) \
- dev_vdbg(ddata_to_dev(ddata), format, ##arg)
-
-static void efm32_spi_write32(struct efm32_spi_ddata *ddata,
- u32 value, unsigned offset)
-{
- writel_relaxed(value, ddata->base + offset);
-}
-
-static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
-{
- return readl_relaxed(ddata->base + offset);
-}
-
-static int efm32_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
-
- unsigned bpw = t->bits_per_word ?: spi->bits_per_word;
- unsigned speed = t->speed_hz ?: spi->max_speed_hz;
- unsigned long clkfreq = clk_get_rate(ddata->clk);
- u32 clkdiv;
-
- efm32_spi_write32(ddata, REG_CTRL_SYNC | REG_CTRL_MSBF |
- (spi->mode & SPI_CPHA ? REG_CTRL_CLKPHA : 0) |
- (spi->mode & SPI_CPOL ? REG_CTRL_CLKPOL : 0), REG_CTRL);
-
- efm32_spi_write32(ddata,
- REG_FRAME_DATABITS(bpw), REG_FRAME);
-
- if (2 * speed >= clkfreq)
- clkdiv = 0;
- else
- clkdiv = 64 * (DIV_ROUND_UP(2 * clkfreq, speed) - 4);
-
- if (clkdiv > (1U << 21))
- return -EINVAL;
-
- efm32_spi_write32(ddata, clkdiv, REG_CLKDIV);
- efm32_spi_write32(ddata, REG_CMD_MASTEREN, REG_CMD);
- efm32_spi_write32(ddata, REG_CMD_RXEN | REG_CMD_TXEN, REG_CMD);
-
- return 0;
-}
-
-static void efm32_spi_tx_u8(struct efm32_spi_ddata *ddata)
-{
- u8 val = 0;
-
- if (ddata->tx_buf) {
- val = *ddata->tx_buf;
- ddata->tx_buf++;
- }
-
- ddata->tx_len--;
- efm32_spi_write32(ddata, val, REG_TXDATA);
- efm32_spi_vdbg(ddata, "%s: tx 0x%x\n", __func__, val);
-}
-
-static void efm32_spi_rx_u8(struct efm32_spi_ddata *ddata)
-{
- u32 rxdata = efm32_spi_read32(ddata, REG_RXDATAX);
- efm32_spi_vdbg(ddata, "%s: rx 0x%x\n", __func__, rxdata);
-
- if (ddata->rx_buf) {
- *ddata->rx_buf = rxdata;
- ddata->rx_buf++;
- }
-
- ddata->rx_len--;
-}
-
-static void efm32_spi_filltx(struct efm32_spi_ddata *ddata)
-{
- while (ddata->tx_len &&
- ddata->tx_len + 2 > ddata->rx_len &&
- efm32_spi_read32(ddata, REG_STATUS) & REG_STATUS_TXBL) {
- efm32_spi_tx_u8(ddata);
- }
-}
-
-static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
-{
- struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
- int ret = -EBUSY;
-
- spin_lock_irq(&ddata->lock);
-
- if (ddata->tx_buf || ddata->rx_buf)
- goto out_unlock;
-
- ddata->tx_buf = t->tx_buf;
- ddata->rx_buf = t->rx_buf;
- ddata->tx_len = ddata->rx_len =
- t->len * DIV_ROUND_UP(t->bits_per_word, 8);
-
- efm32_spi_filltx(ddata);
-
- reinit_completion(&ddata->done);
-
- efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
-
- spin_unlock_irq(&ddata->lock);
-
- wait_for_completion(&ddata->done);
-
- spin_lock_irq(&ddata->lock);
-
- ret = t->len - max(ddata->tx_len, ddata->rx_len);
-
- efm32_spi_write32(ddata, 0, REG_IEN);
- ddata->tx_buf = ddata->rx_buf = NULL;
-
-out_unlock:
- spin_unlock_irq(&ddata->lock);
-
- return ret;
-}
-
-static irqreturn_t efm32_spi_rxirq(int irq, void *data)
-{
- struct efm32_spi_ddata *ddata = data;
- irqreturn_t ret = IRQ_NONE;
-
- spin_lock(&ddata->lock);
-
- while (ddata->rx_len > 0 &&
- efm32_spi_read32(ddata, REG_STATUS) &
- REG_STATUS_RXDATAV) {
- efm32_spi_rx_u8(ddata);
-
- ret = IRQ_HANDLED;
- }
-
- if (!ddata->rx_len) {
- u32 ien = efm32_spi_read32(ddata, REG_IEN);
-
- ien &= ~REG_IF_RXDATAV;
-
- efm32_spi_write32(ddata, ien, REG_IEN);
-
- complete(&ddata->done);
- }
-
- spin_unlock(&ddata->lock);
-
- return ret;
-}
-
-static irqreturn_t efm32_spi_txirq(int irq, void *data)
-{
- struct efm32_spi_ddata *ddata = data;
-
- efm32_spi_vdbg(ddata,
- "%s: txlen = %u, rxlen = %u, if=0x%08x, stat=0x%08x\n",
- __func__, ddata->tx_len, ddata->rx_len,
- efm32_spi_read32(ddata, REG_IF),
- efm32_spi_read32(ddata, REG_STATUS));
-
- spin_lock(&ddata->lock);
-
- efm32_spi_filltx(ddata);
-
- efm32_spi_vdbg(ddata, "%s: txlen = %u, rxlen = %u\n",
- __func__, ddata->tx_len, ddata->rx_len);
-
- if (!ddata->tx_len) {
- u32 ien = efm32_spi_read32(ddata, REG_IEN);
-
- ien &= ~REG_IF_TXBL;
-
- efm32_spi_write32(ddata, ien, REG_IEN);
- efm32_spi_vdbg(ddata, "disable TXBL\n");
- }
-
- spin_unlock(&ddata->lock);
-
- return IRQ_HANDLED;
-}
-
-static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
-{
- u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
-
- return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
-}
-
-static void efm32_spi_probe_dt(struct platform_device *pdev,
- struct spi_master *master, struct efm32_spi_ddata *ddata)
-{
- struct device_node *np = pdev->dev.of_node;
- u32 location;
- int ret;
-
- ret = of_property_read_u32(np, "energymicro,location", &location);
-
- if (ret)
- /* fall back to wrongly namespaced property */
- ret = of_property_read_u32(np, "efm32,location", &location);
-
- if (ret)
- /* fall back to old and (wrongly) generic property "location" */
- ret = of_property_read_u32(np, "location", &location);
-
- if (!ret) {
- dev_dbg(&pdev->dev, "using location %u\n", location);
- } else {
- /* default to location configured in hardware */
- location = efm32_spi_get_configured_location(ddata);
-
- dev_info(&pdev->dev, "fall back to location %u\n", location);
- }
-
- ddata->pdata.location = location;
-}
-
-static int efm32_spi_probe(struct platform_device *pdev)
-{
- struct efm32_spi_ddata *ddata;
- struct resource *res;
- int ret;
- struct spi_master *master;
- struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return -EINVAL;
-
- master = spi_alloc_master(&pdev->dev, sizeof(*ddata));
- if (!master) {
- dev_dbg(&pdev->dev,
- "failed to allocate spi master controller\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, master);
-
- master->dev.of_node = pdev->dev.of_node;
-
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
- master->use_gpio_descriptors = true;
-
- ddata = spi_master_get_devdata(master);
-
- ddata->bitbang.master = master;
- ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
- ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
-
- spin_lock_init(&ddata->lock);
- init_completion(&ddata->done);
-
- ddata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ddata->clk)) {
- ret = PTR_ERR(ddata->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
- goto err;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "failed to determine base address\n");
- goto err;
- }
-
- if (resource_size(res) < 0x60) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "memory resource too small\n");
- goto err;
- }
-
- ddata->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ddata->base)) {
- ret = PTR_ERR(ddata->base);
- goto err;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0)
- goto err;
-
- ddata->rxirq = ret;
-
- ret = platform_get_irq(pdev, 1);
- if (ret <= 0)
- ret = ddata->rxirq + 1;
-
- ddata->txirq = ret;
-
- ret = clk_prepare_enable(ddata->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
- goto err;
- }
-
- efm32_spi_probe_dt(pdev, master, ddata);
-
- efm32_spi_write32(ddata, 0, REG_IEN);
- efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
- REG_ROUTE_CLKPEN |
- REG_ROUTE_LOCATION(ddata->pdata.location), REG_ROUTE);
-
- ret = request_irq(ddata->rxirq, efm32_spi_rxirq,
- 0, DRIVER_NAME " rx", ddata);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rxirq (%d)\n", ret);
- goto err_disable_clk;
- }
-
- ret = request_irq(ddata->txirq, efm32_spi_txirq,
- 0, DRIVER_NAME " tx", ddata);
- if (ret) {
- dev_err(&pdev->dev, "failed to register txirq (%d)\n", ret);
- goto err_free_rx_irq;
- }
-
- ret = spi_bitbang_start(&ddata->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "spi_bitbang_start failed (%d)\n", ret);
-
- free_irq(ddata->txirq, ddata);
-err_free_rx_irq:
- free_irq(ddata->rxirq, ddata);
-err_disable_clk:
- clk_disable_unprepare(ddata->clk);
-err:
- spi_master_put(master);
- }
-
- return ret;
-}
-
-static int efm32_spi_remove(struct platform_device *pdev)
-{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
-
- spi_bitbang_stop(&ddata->bitbang);
-
- efm32_spi_write32(ddata, 0, REG_IEN);
-
- free_irq(ddata->txirq, ddata);
- free_irq(ddata->rxirq, ddata);
- clk_disable_unprepare(ddata->clk);
- spi_master_put(master);
-
- return 0;
-}
-
-static const struct of_device_id efm32_spi_dt_ids[] = {
- {
- .compatible = "energymicro,efm32-spi",
- }, {
- /* doesn't follow the "vendor,device" scheme, don't use */
- .compatible = "efm32,spi",
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, efm32_spi_dt_ids);
-
-static struct platform_driver efm32_spi_driver = {
- .probe = efm32_spi_probe,
- .remove = efm32_spi_remove,
-
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = efm32_spi_dt_ids,
- },
-};
-module_platform_driver(efm32_spi_driver);
-
-MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
-MODULE_DESCRIPTION("EFM32 SPI driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 9494257e1c33..e4a8d203f940 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata;
- bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
pdata = spi->dev.parent->parent->platform_data;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
- pdata->cs_control(spi, !pol);
+ pdata->cs_control(spi, false);
}
if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
fsl_spi_change_mode(spi);
if (pdata->cs_control)
- pdata->cs_control(spi, pol);
+ pdata->cs_control(spi, true);
}
}
@@ -696,7 +695,7 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
return;
- iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
+ iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
}
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 4650b483a33d..385eb7bba05a 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -19,6 +19,8 @@
#define HISI_SFC_V3XX_VERSION (0x1f8)
+#define HISI_SFC_V3XX_GLB_CFG (0x100)
+#define HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE BIT(2)
#define HISI_SFC_V3XX_RAW_INT_STAT (0x120)
#define HISI_SFC_V3XX_INT_STAT (0x124)
#define HISI_SFC_V3XX_INT_MASK (0x128)
@@ -75,6 +77,7 @@ struct hisi_sfc_v3xx_host {
void __iomem *regbase;
int max_cmd_dword;
struct completion *completion;
+ u8 address_mode;
int irq;
};
@@ -168,10 +171,18 @@ static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+
+ host = spi_controller_get_devdata(spi->master);
+
if (op->data.buswidth > 4 || op->dummy.buswidth > 4 ||
op->addr.buswidth > 4 || op->cmd.buswidth > 4)
return false;
+ if (op->addr.nbytes != host->address_mode && op->addr.nbytes)
+ return false;
+
return spi_mem_default_supports_op(mem, op);
}
@@ -416,7 +427,7 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct hisi_sfc_v3xx_host *host;
struct spi_controller *ctlr;
- u32 version;
+ u32 version, glb_config;
int ret;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
@@ -463,16 +474,24 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
ctlr->num_chipselect = 1;
ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+ /*
+ * The address mode of the controller is either 3 or 4,
+ * which is indicated by the address mode bit in
+ * the global config register. The register is read only
+ * for the OS driver.
+ */
+ glb_config = readl(host->regbase + HISI_SFC_V3XX_GLB_CFG);
+ if (glb_config & HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE)
+ host->address_mode = 4;
+ else
+ host->address_mode = 3;
+
version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
- switch (version) {
- case 0x351:
+ if (version >= 0x351)
host->max_cmd_dword = 64;
- break;
- default:
+ else
host->max_cmd_dword = 16;
- break;
- }
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 73ca821763d6..5dc4ea4b4450 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1685,7 +1685,7 @@ static int spi_imx_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
goto out_bitbang_start;
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index f3a3f196e628..dc713b0c3c4d 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -137,8 +137,8 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return -ENOTSUPP;
}
-bool spi_mem_default_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+static bool spi_mem_check_buswidth(struct spi_mem *mem,
+ const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
@@ -156,13 +156,29 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
+ return true;
+}
+
+bool spi_mem_dtr_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->cmd.nbytes != 2)
+ return false;
+
+ return spi_mem_check_buswidth(mem, op);
+}
+EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
return false;
if (op->cmd.nbytes != 1)
return false;
- return true;
+ return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -354,6 +370,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ xfers[xferpos].dummy_data = 1;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index ef2f24420460..124cba7213f1 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -120,7 +120,7 @@ static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
ms->cs_change = ms->transfer->cs_change;
/* Write out the first byte */
- ms->wcol_tx_timestamp = get_tbl();
+ ms->wcol_tx_timestamp = mftb();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
@@ -221,8 +221,8 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
* but it can also be worked around simply by retrying the
* transfer which is what we do here. */
ms->wcol_count++;
- ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
- ms->wcol_tx_timestamp = get_tbl();
+ ms->wcol_ticks += mftb() - ms->wcol_tx_timestamp;
+ ms->wcol_tx_timestamp = mftb();
data = 0;
if (ms->tx_buf)
data = *(ms->tx_buf - 1);
@@ -247,14 +247,16 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
/* Is the transfer complete? */
ms->len--;
if (ms->len == 0) {
- ms->timestamp = get_tbl();
- ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
+ ms->timestamp = mftb();
+ if (ms->transfer->delay.unit == SPI_DELAY_UNIT_USECS)
+ ms->timestamp += ms->transfer->delay.value *
+ tb_ticks_per_usec;
ms->state = mpc52xx_spi_fsmstate_wait;
return FSM_CONTINUE;
}
/* Write out the next byte */
- ms->wcol_tx_timestamp = get_tbl();
+ ms->wcol_tx_timestamp = mftb();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
@@ -276,7 +278,7 @@ mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
status);
- if (((int)get_tbl()) - ms->timestamp < 0)
+ if (((int)mftb()) - ms->timestamp < 0)
return FSM_POLL;
ms->message->actual_length += ms->transfer->len;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5d643051bf3d..976f73b9e299 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -287,7 +287,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
static void mtk_spi_prepare_transfer(struct spi_master *master,
struct spi_transfer *xfer)
{
- u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
+ u32 spi_clk_hz, div, sck_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
spi_clk_hz = clk_get_rate(mdata->spi_clk);
@@ -297,32 +297,25 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
div = 1;
sck_time = (div + 1) / 2;
- cs_time = sck_time * 2;
if (mdata->dev_comp->enhance_timing) {
- reg_val = (((sck_time - 1) & 0xffff)
+ reg_val = readl(mdata->base + SPI_CFG2_REG);
+ reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_CFG2_SCK_HIGH_OFFSET);
+ reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_CFG2_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG2_REG);
- reg_val = (((cs_time - 1) & 0xffff)
- << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
- reg_val |= (((cs_time - 1) & 0xffff)
- << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG0_REG);
} else {
- reg_val = (((sck_time - 1) & 0xff)
+ reg_val = readl(mdata->base + SPI_CFG0_REG);
+ reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
- reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
- reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
writel(reg_val, mdata->base + SPI_CFG0_REG);
}
-
- reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
- reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG1_REG);
}
static void mtk_spi_setup_packet(struct spi_master *master)
@@ -513,6 +506,52 @@ static bool mtk_spi_can_dma(struct spi_master *master,
(unsigned long)xfer->rx_buf % 4 == 0);
}
+static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ u16 setup_dly, hold_dly, inactive_dly;
+ u32 reg_val;
+
+ if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
+ return -EINVAL;
+ }
+
+ setup_dly = setup ? setup->value : 1;
+ hold_dly = hold ? hold->value : 1;
+ inactive_dly = inactive ? inactive->value : 1;
+
+ reg_val = readl(mdata->base + SPI_CFG0_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold_dly - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup_dly - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ } else {
+ reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup_dly - 1) & 0xff)
+ << SPI_CFG0_CS_SETUP_OFFSET);
+ }
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+ reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+ return 0;
+}
+
static int mtk_spi_setup(struct spi_device *spi)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
@@ -644,6 +683,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->transfer_one = mtk_spi_transfer_one;
master->can_dma = mtk_spi_can_dma;
master->setup = mtk_spi_setup;
+ master->set_cs_timing = mtk_spi_set_hw_cs_timing;
of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
if (!of_id) {
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b57b8b3cc26e..68ed7fd64256 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -96,10 +96,16 @@ struct orion_spi {
struct clk *clk;
struct clk *axi_clk;
const struct orion_spi_dev *devdata;
+ struct device *dev;
struct orion_child_options child[ORION_NUM_CHIPSELECTS];
};
+#ifdef CONFIG_PM
+static int orion_spi_runtime_suspend(struct device *dev);
+static int orion_spi_runtime_resume(struct device *dev);
+#endif
+
static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
{
return orion_spi->base + reg;
@@ -369,8 +375,15 @@ orion_spi_write_read_8bit(struct spi_device *spi,
{
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
+ bool cs_single_byte;
+
+ cs_single_byte = spi->mode & SPI_CS_WORD;
orion_spi = spi_master_get_devdata(spi->master);
+
+ if (cs_single_byte)
+ orion_spi_set_cs(spi, 0);
+
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
@@ -384,6 +397,11 @@ orion_spi_write_read_8bit(struct spi_device *spi,
writel(0, tx_reg);
if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
dev_err(&spi->dev, "TXS timed out\n");
return -1;
}
@@ -391,6 +409,12 @@ orion_spi_write_read_8bit(struct spi_device *spi,
if (rx_buf && *rx_buf)
*(*rx_buf)++ = readl(rx_reg);
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
+
return 1;
}
@@ -401,6 +425,11 @@ orion_spi_write_read_16bit(struct spi_device *spi,
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
+ if (spi->mode & SPI_CS_WORD) {
+ dev_err(&spi->dev, "SPI_CS_WORD is only supported for 8 bit words\n");
+ return -1;
+ }
+
orion_spi = spi_master_get_devdata(spi->master);
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
@@ -440,12 +469,13 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
orion_spi = spi_master_get_devdata(spi->master);
/*
- * Use SPI direct write mode if base address is available. Otherwise
- * fall back to PIO mode for this transfer.
+ * Use SPI direct write mode if base address is available
+ * and SPI_CS_WORD flag is not set.
+ * Otherwise fall back to PIO mode for this transfer.
*/
vaddr = orion_spi->child[cs].direct_access.vaddr;
- if (vaddr && xfer->tx_buf && word_len == 8) {
+ if (vaddr && xfer->tx_buf && word_len == 8 && (spi->mode & SPI_CS_WORD) == 0) {
unsigned int cnt = count / 4;
unsigned int rem = count % 4;
@@ -507,7 +537,21 @@ static int orion_spi_transfer_one(struct spi_master *master,
static int orion_spi_setup(struct spi_device *spi)
{
- return orion_spi_setup_transfer(spi, NULL);
+ int ret;
+#ifdef CONFIG_PM
+ struct orion_spi *orion_spi = spi_master_get_devdata(spi->master);
+ struct device *dev = orion_spi->dev;
+
+ orion_spi_runtime_resume(dev);
+#endif
+
+ ret = orion_spi_setup_transfer(spi, NULL);
+
+#ifdef CONFIG_PM
+ orion_spi_runtime_suspend(dev);
+#endif
+
+ return ret;
}
static int orion_spi_reset(struct orion_spi *orion_spi)
@@ -616,7 +660,7 @@ static int orion_spi_probe(struct platform_device *pdev)
}
/* we support all 4 SPI modes and LSB first option */
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
master->set_cs = orion_spi_set_cs;
master->transfer_one = orion_spi_transfer_one;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
@@ -630,6 +674,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
spi->master = master;
+ spi->dev = &pdev->dev;
of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index d1776fea287e..fd74ddfbb686 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2314,13 +2314,13 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
return status;
}
-static int
+static void
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
if (!pl022)
- return 0;
+ return;
/*
* undo pm_runtime_put() in probe. I assume that we're not
@@ -2335,7 +2335,6 @@ pl022_remove(struct amba_device *adev)
clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index f236e3034cf8..14fc41ed2361 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -21,7 +21,8 @@ enum {
PORT_BSW1,
PORT_BSW2,
PORT_CE4100,
- PORT_LPT,
+ PORT_LPT0,
+ PORT_LPT1,
};
struct pxa_spi_info {
@@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
+static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
+static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
+static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
@@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = {
.num_chipselect = 1,
.max_clk_rate = 50000000,
},
- [PORT_LPT] = {
+ [PORT_LPT0] = {
.type = LPSS_LPT_SSP,
.port_id = 0,
.setup = lpss_spi_setup,
- .tx_param = &lpt_tx_param,
- .rx_param = &lpt_rx_param,
+ .tx_param = &lpt0_tx_param,
+ .rx_param = &lpt0_rx_param,
+ },
+ [PORT_LPT1] = {
+ .type = LPSS_LPT_SSP,
+ .port_id = 1,
+ .setup = lpss_spi_setup,
+ .tx_param = &lpt1_tx_param,
+ .rx_param = &lpt1_rx_param,
},
};
@@ -285,8 +295,11 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
{ PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
- { },
+ { PCI_VDEVICE(INTEL, 0x9c65), PORT_LPT0 },
+ { PCI_VDEVICE(INTEL, 0x9c66), PORT_LPT1 },
+ { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
+ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
+ { }
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index bd2354fd438d..0cc767283674 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1492,6 +1492,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
+ /* ADL-P */
+ { PCI_VDEVICE(INTEL, 0x51aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x51ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x51fb), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 8863be370884..1dbcc410cd35 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -511,8 +511,7 @@ static int qcom_qspi_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
- ret = devm_request_irq(dev, ret, qcom_qspi_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
+ ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl);
if (ret) {
dev_err(dev, "Failed to request irq %d\n", ret);
return ret;
diff --git a/drivers/spi/spi-realtek-rtl.c b/drivers/spi/spi-realtek-rtl.c
new file mode 100644
index 000000000000..866b0477dbd7
--- /dev/null
+++ b/drivers/spi/spi-realtek-rtl.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+
+struct rtspi {
+ void __iomem *base;
+};
+
+/* SPI Flash Configuration Register */
+#define RTL_SPI_SFCR 0x00
+#define RTL_SPI_SFCR_RBO BIT(28)
+#define RTL_SPI_SFCR_WBO BIT(27)
+
+/* SPI Flash Control and Status Register */
+#define RTL_SPI_SFCSR 0x08
+#define RTL_SPI_SFCSR_CSB0 BIT(31)
+#define RTL_SPI_SFCSR_CSB1 BIT(30)
+#define RTL_SPI_SFCSR_RDY BIT(27)
+#define RTL_SPI_SFCSR_CS BIT(24)
+#define RTL_SPI_SFCSR_LEN_MASK ~(0x03 << 28)
+#define RTL_SPI_SFCSR_LEN1 (0x00 << 28)
+#define RTL_SPI_SFCSR_LEN4 (0x03 << 28)
+
+/* SPI Flash Data Register */
+#define RTL_SPI_SFDR 0x0c
+
+#define REG(x) (rtspi->base + x)
+
+
+static void rt_set_cs(struct spi_device *spi, bool active)
+{
+ struct rtspi *rtspi = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ /* CS0 bit is active low */
+ value = readl(REG(RTL_SPI_SFCSR));
+ if (active)
+ value |= RTL_SPI_SFCSR_CSB0;
+ else
+ value &= ~RTL_SPI_SFCSR_CSB0;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static void set_size(struct rtspi *rtspi, int size)
+{
+ u32 value;
+
+ value = readl(REG(RTL_SPI_SFCSR));
+ value &= RTL_SPI_SFCSR_LEN_MASK;
+ if (size == 4)
+ value |= RTL_SPI_SFCSR_LEN4;
+ else if (size == 1)
+ value |= RTL_SPI_SFCSR_LEN1;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static inline void wait_ready(struct rtspi *rtspi)
+{
+ while (!(readl(REG(RTL_SPI_SFCSR)) & RTL_SPI_SFCSR_RDY))
+ cpu_relax();
+}
+static void send4(struct rtspi *rtspi, const u32 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 4);
+ writel(*buf, REG(RTL_SPI_SFDR));
+}
+
+static void send1(struct rtspi *rtspi, const u8 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 1);
+ writel(buf[0] << 24, REG(RTL_SPI_SFDR));
+}
+
+static void rcv4(struct rtspi *rtspi, u32 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 4);
+ *buf = readl(REG(RTL_SPI_SFDR));
+}
+
+static void rcv1(struct rtspi *rtspi, u8 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 1);
+ *buf = readl(REG(RTL_SPI_SFDR)) >> 24;
+}
+
+static int transfer_one(struct spi_controller *ctrl, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rtspi *rtspi = spi_controller_get_devdata(ctrl);
+ void *rx_buf;
+ const void *tx_buf;
+ int cnt;
+
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ cnt = xfer->len;
+ if (tx_buf) {
+ while (cnt >= 4) {
+ send4(rtspi, tx_buf);
+ tx_buf += 4;
+ cnt -= 4;
+ }
+ while (cnt) {
+ send1(rtspi, tx_buf);
+ tx_buf++;
+ cnt--;
+ }
+ } else if (rx_buf) {
+ while (cnt >= 4) {
+ rcv4(rtspi, rx_buf);
+ rx_buf += 4;
+ cnt -= 4;
+ }
+ while (cnt) {
+ rcv1(rtspi, rx_buf);
+ rx_buf++;
+ cnt--;
+ }
+ }
+
+ spi_finalize_current_transfer(ctrl);
+
+ return 0;
+}
+
+static void init_hw(struct rtspi *rtspi)
+{
+ u32 value;
+
+ /* Turn on big-endian byte ordering */
+ value = readl(REG(RTL_SPI_SFCR));
+ value |= RTL_SPI_SFCR_RBO | RTL_SPI_SFCR_WBO;
+ writel(value, REG(RTL_SPI_SFCR));
+
+ value = readl(REG(RTL_SPI_SFCSR));
+ /* Permanently disable CS1, since it's never used */
+ value |= RTL_SPI_SFCSR_CSB1;
+ /* Select CS0 for use */
+ value &= RTL_SPI_SFCSR_CS;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static int realtek_rtl_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctrl;
+ struct rtspi *rtspi;
+ int err;
+
+ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*rtspi));
+ if (!ctrl) {
+ dev_err(&pdev->dev, "Error allocating SPI controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, ctrl);
+ rtspi = spi_controller_get_devdata(ctrl);
+
+ rtspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(rtspi->base)) {
+ dev_err(&pdev->dev, "Could not map SPI register address");
+ return -ENOMEM;
+ }
+
+ init_hw(rtspi);
+
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->set_cs = rt_set_cs;
+ ctrl->transfer_one = transfer_one;
+
+ err = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (err) {
+ dev_err(&pdev->dev, "Could not register SPI controller\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static const struct of_device_id realtek_rtl_spi_of_ids[] = {
+ { .compatible = "realtek,rtl8380-spi" },
+ { .compatible = "realtek,rtl8382-spi" },
+ { .compatible = "realtek,rtl8391-spi" },
+ { .compatible = "realtek,rtl8392-spi" },
+ { .compatible = "realtek,rtl8393-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, realtek_rtl_spi_of_ids);
+
+static struct platform_driver realtek_rtl_spi_driver = {
+ .probe = realtek_rtl_spi_probe,
+ .driver = {
+ .name = "realtek-rtl-spi",
+ .of_match_table = realtek_rtl_spi_of_ids,
+ },
+};
+
+module_platform_driver(realtek_rtl_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bert Vermeulen <bert@biot.com>");
+MODULE_DESCRIPTION("Realtek RTL SPI driver");
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 09d8e92400eb..936ef54e0903 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -566,7 +566,7 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
rs->slave_abort = true;
- complete(&ctlr->xfer_completion);
+ spi_finalize_current_transfer(ctlr);
return 0;
}
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 3579675485a5..c53138ce0030 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -176,15 +176,14 @@ static int rpcif_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int rpcif_spi_suspend(struct device *dev)
+static int __maybe_unused rpcif_spi_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
return spi_controller_suspend(ctlr);
}
-static int rpcif_spi_resume(struct device *dev)
+static int __maybe_unused rpcif_spi_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
@@ -192,17 +191,15 @@ static int rpcif_spi_resume(struct device *dev)
}
static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
-#define DEV_PM_OPS (&rpcif_spi_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
static struct platform_driver rpcif_spi_driver = {
.probe = rpcif_spi_probe,
.remove = rpcif_spi_remove,
.driver = {
.name = "rpc-if-spi",
- .pm = DEV_PM_OPS,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &rpcif_spi_pm_ops,
+#endif
},
};
module_platform_driver(rpcif_spi_driver);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index b2579af0e3eb..41ed9ff8fad0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -259,11 +259,13 @@ static const u32 sh_msiof_spi_div_array[] = {
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
- unsigned long parent_rate, u32 spi_hz)
+ struct spi_transfer *t)
{
+ unsigned long parent_rate = clk_get_rate(p->clk);
+ unsigned int div_pow = p->min_div_pow;
+ u32 spi_hz = t->speed_hz;
unsigned long div;
u32 brps, scr;
- unsigned int div_pow = p->min_div_pow;
if (!spi_hz || !parent_rate) {
WARN(1, "Invalid clock rate parameters %lu and %u\n",
@@ -292,6 +294,8 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
brps = 32;
}
+ t->effective_speed_hz = parent_rate / (brps << div_pow);
+
scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
@@ -923,7 +927,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_slave(p->ctlr))
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
+ sh_msiof_spi_set_clk_regs(p, t);
while (ctlr->dma_tx && len > 15) {
/*
@@ -1258,6 +1262,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
+ unsigned long clksrc;
int i;
int ret;
@@ -1333,6 +1338,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ clksrc = clk_get_rate(p->clk);
+ ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, 1024);
+ ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, 1 << p->min_div_pow);
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = p->info->num_chipselect;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
deleted file mode 100644
index 8419e6722e17..000000000000
--- a/drivers/spi/spi-sirf.c
+++ /dev/null
@@ -1,1236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SPI bus driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/reset.h>
-
-#define DRIVER_NAME "sirfsoc_spi"
-/* SPI CTRL register defines */
-#define SIRFSOC_SPI_SLV_MODE BIT(16)
-#define SIRFSOC_SPI_CMD_MODE BIT(17)
-#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
-#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
-#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
-#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
-#define SIRFSOC_SPI_TRAN_MSB BIT(22)
-#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
-#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
-#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
-#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
-#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
-#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
-#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
-#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
-#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
-#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
-
-/* Interrupt Enable */
-#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
-#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
-#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
-#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
-#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
-#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
-#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
-#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
-#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
-#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
-#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
-
-/* Interrupt status */
-#define SIRFSOC_SPI_RX_DONE BIT(0)
-#define SIRFSOC_SPI_TX_DONE BIT(1)
-#define SIRFSOC_SPI_RX_OFLOW BIT(2)
-#define SIRFSOC_SPI_TX_UFLOW BIT(3)
-#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
-#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
-#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
-#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
-#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
-#define SIRFSOC_SPI_FRM_END BIT(10)
-
-/* TX RX enable */
-#define SIRFSOC_SPI_RX_EN BIT(0)
-#define SIRFSOC_SPI_TX_EN BIT(1)
-#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
-
-#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
-#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
-
-/* FIFO OPs */
-#define SIRFSOC_SPI_FIFO_RESET BIT(0)
-#define SIRFSOC_SPI_FIFO_START BIT(1)
-
-/* FIFO CTRL */
-#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
-#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
-#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
-/* USP related */
-#define SIRFSOC_USP_SYNC_MODE BIT(0)
-#define SIRFSOC_USP_SLV_MODE BIT(1)
-#define SIRFSOC_USP_LSB BIT(4)
-#define SIRFSOC_USP_EN BIT(5)
-#define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6)
-#define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7)
-#define SIRFSOC_USP_CS_HIGH_VALID BIT(9)
-#define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11)
-#define SIRFSOC_USP_TFS_IO_MODE BIT(14)
-#define SIRFSOC_USP_TFS_IO_INPUT BIT(19)
-
-#define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF
-#define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF
-#define SIRFSOC_USP_RXD_DELAY_OFFSET 0
-#define SIRFSOC_USP_TXD_DELAY_OFFSET 8
-#define SIRFSOC_USP_RXD_DELAY_LEN 1
-#define SIRFSOC_USP_TXD_DELAY_LEN 1
-#define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21
-#define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF
-#define SIRFSOC_USP_CLK_10_11_MASK 0x3
-#define SIRFSOC_USP_CLK_10_11_OFFSET 30
-#define SIRFSOC_USP_CLK_12_15_MASK 0xF
-#define SIRFSOC_USP_CLK_12_15_OFFSET 24
-
-#define SIRFSOC_USP_TX_DATA_OFFSET 0
-#define SIRFSOC_USP_TX_SYNC_OFFSET 8
-#define SIRFSOC_USP_TX_FRAME_OFFSET 16
-#define SIRFSOC_USP_TX_SHIFTER_OFFSET 24
-
-#define SIRFSOC_USP_TX_DATA_MASK 0xFF
-#define SIRFSOC_USP_TX_SYNC_MASK 0xFF
-#define SIRFSOC_USP_TX_FRAME_MASK 0xFF
-#define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F
-
-#define SIRFSOC_USP_RX_DATA_OFFSET 0
-#define SIRFSOC_USP_RX_FRAME_OFFSET 8
-#define SIRFSOC_USP_RX_SHIFTER_OFFSET 16
-
-#define SIRFSOC_USP_RX_DATA_MASK 0xFF
-#define SIRFSOC_USP_RX_FRAME_MASK 0xFF
-#define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F
-#define SIRFSOC_USP_CS_HIGH_VALUE BIT(1)
-
-#define SIRFSOC_SPI_FIFO_SC_OFFSET 0
-#define SIRFSOC_SPI_FIFO_LC_OFFSET 10
-#define SIRFSOC_SPI_FIFO_HC_OFFSET 20
-
-#define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset))
-#define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1))
-#define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1)
-#define SIRFSOC_SPI_FIFO_THD_OFFSET 2
-#define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \
- ((val) & (s)->fifo_level_chk_mask)
-
-enum sirf_spi_type {
- SIRF_REAL_SPI,
- SIRF_USP_SPI_P2,
- SIRF_USP_SPI_A7,
-};
-
-/*
- * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
- * due to the limitation of dma controller
- */
-
-#define ALIGNED(x) (!((u32)x & 0x3))
-#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
- ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
-
-#define SIRFSOC_MAX_CMD_BYTES 4
-#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
-
-struct sirf_spi_register {
- /*SPI and USP-SPI common*/
- u32 tx_rx_en;
- u32 int_en;
- u32 int_st;
- u32 tx_dma_io_ctrl;
- u32 tx_dma_io_len;
- u32 txfifo_ctrl;
- u32 txfifo_level_chk;
- u32 txfifo_op;
- u32 txfifo_st;
- u32 txfifo_data;
- u32 rx_dma_io_ctrl;
- u32 rx_dma_io_len;
- u32 rxfifo_ctrl;
- u32 rxfifo_level_chk;
- u32 rxfifo_op;
- u32 rxfifo_st;
- u32 rxfifo_data;
- /*SPI self*/
- u32 spi_ctrl;
- u32 spi_cmd;
- u32 spi_dummy_delay_ctrl;
- /*USP-SPI self*/
- u32 usp_mode1;
- u32 usp_mode2;
- u32 usp_tx_frame_ctrl;
- u32 usp_rx_frame_ctrl;
- u32 usp_pin_io_data;
- u32 usp_risc_dsp_mode;
- u32 usp_async_param_reg;
- u32 usp_irda_x_mode_div;
- u32 usp_sm_cfg;
- u32 usp_int_en_clr;
-};
-
-static const struct sirf_spi_register real_spi_register = {
- .tx_rx_en = 0x8,
- .int_en = 0xc,
- .int_st = 0x10,
- .tx_dma_io_ctrl = 0x100,
- .tx_dma_io_len = 0x104,
- .txfifo_ctrl = 0x108,
- .txfifo_level_chk = 0x10c,
- .txfifo_op = 0x110,
- .txfifo_st = 0x114,
- .txfifo_data = 0x118,
- .rx_dma_io_ctrl = 0x120,
- .rx_dma_io_len = 0x124,
- .rxfifo_ctrl = 0x128,
- .rxfifo_level_chk = 0x12c,
- .rxfifo_op = 0x130,
- .rxfifo_st = 0x134,
- .rxfifo_data = 0x138,
- .spi_ctrl = 0x0,
- .spi_cmd = 0x4,
- .spi_dummy_delay_ctrl = 0x144,
-};
-
-static const struct sirf_spi_register usp_spi_register = {
- .tx_rx_en = 0x10,
- .int_en = 0x14,
- .int_st = 0x18,
- .tx_dma_io_ctrl = 0x100,
- .tx_dma_io_len = 0x104,
- .txfifo_ctrl = 0x108,
- .txfifo_level_chk = 0x10c,
- .txfifo_op = 0x110,
- .txfifo_st = 0x114,
- .txfifo_data = 0x118,
- .rx_dma_io_ctrl = 0x120,
- .rx_dma_io_len = 0x124,
- .rxfifo_ctrl = 0x128,
- .rxfifo_level_chk = 0x12c,
- .rxfifo_op = 0x130,
- .rxfifo_st = 0x134,
- .rxfifo_data = 0x138,
- .usp_mode1 = 0x0,
- .usp_mode2 = 0x4,
- .usp_tx_frame_ctrl = 0x8,
- .usp_rx_frame_ctrl = 0xc,
- .usp_pin_io_data = 0x1c,
- .usp_risc_dsp_mode = 0x20,
- .usp_async_param_reg = 0x24,
- .usp_irda_x_mode_div = 0x28,
- .usp_sm_cfg = 0x2c,
- .usp_int_en_clr = 0x140,
-};
-
-struct sirfsoc_spi {
- struct spi_bitbang bitbang;
- struct completion rx_done;
- struct completion tx_done;
-
- void __iomem *base;
- u32 ctrl_freq; /* SPI controller clock speed */
- struct clk *clk;
-
- /* rx & tx bufs from the spi_transfer */
- const void *tx;
- void *rx;
-
- /* place received word into rx buffer */
- void (*rx_word) (struct sirfsoc_spi *);
- /* get word from tx buffer for sending */
- void (*tx_word) (struct sirfsoc_spi *);
-
- /* number of words left to be tranmitted/received */
- unsigned int left_tx_word;
- unsigned int left_rx_word;
-
- /* rx & tx DMA channels */
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
- dma_addr_t src_start;
- dma_addr_t dst_start;
- int word_width; /* in bytes */
-
- /*
- * if tx size is not more than 4 and rx size is NULL, use
- * command model
- */
- bool tx_by_cmd;
- bool hw_cs;
- enum sirf_spi_type type;
- const struct sirf_spi_register *regs;
- unsigned int fifo_size;
- /* fifo empty offset is (fifo full offset + 1)*/
- unsigned int fifo_full_offset;
- /* fifo_level_chk_mask is (fifo_size/4 - 1) */
- unsigned int fifo_level_chk_mask;
- unsigned int dat_max_frm_len;
-};
-
-struct sirf_spi_comp_data {
- const struct sirf_spi_register *regs;
- enum sirf_spi_type type;
- unsigned int dat_max_frm_len;
- unsigned int fifo_size;
- void (*hwinit)(struct sirfsoc_spi *sspi);
-};
-
-static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
-{
- /* reset USP and let USP can operate */
- writel(readl(sspi->base + sspi->regs->usp_mode1) &
- ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
- writel(readl(sspi->base + sspi->regs->usp_mode1) |
- SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
-}
-
-static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
-{
- u32 data;
- u8 *rx = sspi->rx;
-
- data = readl(sspi->base + sspi->regs->rxfifo_data);
-
- if (rx) {
- *rx++ = (u8) data;
- sspi->rx = rx;
- }
-
- sspi->left_rx_word--;
-}
-
-static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
-{
- u32 data = 0;
- const u8 *tx = sspi->tx;
-
- if (tx) {
- data = *tx++;
- sspi->tx = tx;
- }
- writel(data, sspi->base + sspi->regs->txfifo_data);
- sspi->left_tx_word--;
-}
-
-static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
-{
- u32 data;
- u16 *rx = sspi->rx;
-
- data = readl(sspi->base + sspi->regs->rxfifo_data);
-
- if (rx) {
- *rx++ = (u16) data;
- sspi->rx = rx;
- }
-
- sspi->left_rx_word--;
-}
-
-static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
-{
- u32 data = 0;
- const u16 *tx = sspi->tx;
-
- if (tx) {
- data = *tx++;
- sspi->tx = tx;
- }
-
- writel(data, sspi->base + sspi->regs->txfifo_data);
- sspi->left_tx_word--;
-}
-
-static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
-{
- u32 data;
- u32 *rx = sspi->rx;
-
- data = readl(sspi->base + sspi->regs->rxfifo_data);
-
- if (rx) {
- *rx++ = (u32) data;
- sspi->rx = rx;
- }
-
- sspi->left_rx_word--;
-
-}
-
-static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
-{
- u32 data = 0;
- const u32 *tx = sspi->tx;
-
- if (tx) {
- data = *tx++;
- sspi->tx = tx;
- }
-
- writel(data, sspi->base + sspi->regs->txfifo_data);
- sspi->left_tx_word--;
-}
-
-static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
-{
- struct sirfsoc_spi *sspi = dev_id;
- u32 spi_stat;
-
- spi_stat = readl(sspi->base + sspi->regs->int_st);
- if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
- && (spi_stat & SIRFSOC_SPI_FRM_END)) {
- complete(&sspi->tx_done);
- writel(0x0, sspi->base + sspi->regs->int_en);
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- return IRQ_HANDLED;
- }
- /* Error Conditions */
- if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
- spi_stat & SIRFSOC_SPI_TX_UFLOW) {
- complete(&sspi->tx_done);
- complete(&sspi->rx_done);
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- case SIRF_USP_SPI_P2:
- writel(0x0, sspi->base + sspi->regs->int_en);
- break;
- case SIRF_USP_SPI_A7:
- writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
- break;
- }
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- return IRQ_HANDLED;
- }
- if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
- complete(&sspi->tx_done);
- while (!(readl(sspi->base + sspi->regs->int_st) &
- SIRFSOC_SPI_RX_IO_DMA))
- cpu_relax();
- complete(&sspi->rx_done);
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- case SIRF_USP_SPI_P2:
- writel(0x0, sspi->base + sspi->regs->int_en);
- break;
- case SIRF_USP_SPI_A7:
- writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
- break;
- }
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
-
- return IRQ_HANDLED;
-}
-
-static void spi_sirfsoc_dma_fini_callback(void *data)
-{
- struct completion *dma_complete = data;
-
- complete(dma_complete);
-}
-
-static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct sirfsoc_spi *sspi;
- int timeout = t->len * 10;
- u32 cmd;
-
- sspi = spi_master_get_devdata(spi->master);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
- memcpy(&cmd, sspi->tx, t->len);
- if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
- cmd = cpu_to_be32(cmd) >>
- ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
- if (sspi->word_width == 2 && t->len == 4 &&
- (!(spi->mode & SPI_LSB_FIRST)))
- cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
- writel(cmd, sspi->base + sspi->regs->spi_cmd);
- writel(SIRFSOC_SPI_FRM_END_INT_EN,
- sspi->base + sspi->regs->int_en);
- writel(SIRFSOC_SPI_CMD_TX_EN,
- sspi->base + sspi->regs->tx_rx_en);
- if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
- dev_err(&spi->dev, "cmd transfer timeout\n");
- return;
- }
- sspi->left_rx_word -= t->len;
-}
-
-static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct sirfsoc_spi *sspi;
- struct dma_async_tx_descriptor *rx_desc, *tx_desc;
- int timeout = t->len * 10;
-
- sspi = spi_master_get_devdata(spi->master);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->txfifo_op);
- writel(0, sspi->base + sspi->regs->int_en);
- break;
- case SIRF_USP_SPI_P2:
- writel(0x0, sspi->base + sspi->regs->rxfifo_op);
- writel(0x0, sspi->base + sspi->regs->txfifo_op);
- writel(0, sspi->base + sspi->regs->int_en);
- break;
- case SIRF_USP_SPI_A7:
- writel(0x0, sspi->base + sspi->regs->rxfifo_op);
- writel(0x0, sspi->base + sspi->regs->txfifo_op);
- writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
- break;
- }
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- if (sspi->left_tx_word < sspi->dat_max_frm_len) {
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- writel(readl(sspi->base + sspi->regs->spi_ctrl) |
- SIRFSOC_SPI_ENA_AUTO_CLR |
- SIRFSOC_SPI_MUL_DAT_MODE,
- sspi->base + sspi->regs->spi_ctrl);
- writel(sspi->left_tx_word - 1,
- sspi->base + sspi->regs->tx_dma_io_len);
- writel(sspi->left_tx_word - 1,
- sspi->base + sspi->regs->rx_dma_io_len);
- break;
- case SIRF_USP_SPI_P2:
- case SIRF_USP_SPI_A7:
- /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
- writel(sspi->left_tx_word * sspi->word_width,
- sspi->base + sspi->regs->tx_dma_io_len);
- writel(sspi->left_tx_word * sspi->word_width,
- sspi->base + sspi->regs->rx_dma_io_len);
- break;
- }
- } else {
- if (sspi->type == SIRF_REAL_SPI)
- writel(readl(sspi->base + sspi->regs->spi_ctrl),
- sspi->base + sspi->regs->spi_ctrl);
- writel(0, sspi->base + sspi->regs->tx_dma_io_len);
- writel(0, sspi->base + sspi->regs->rx_dma_io_len);
- }
- sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
- (t->tx_buf != t->rx_buf) ?
- DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
- rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
- sspi->dst_start, t->len, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- rx_desc->callback = spi_sirfsoc_dma_fini_callback;
- rx_desc->callback_param = &sspi->rx_done;
-
- sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
- (t->tx_buf != t->rx_buf) ?
- DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
- tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
- sspi->src_start, t->len, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- tx_desc->callback = spi_sirfsoc_dma_fini_callback;
- tx_desc->callback_param = &sspi->tx_done;
-
- dmaengine_submit(tx_desc);
- dmaengine_submit(rx_desc);
- dma_async_issue_pending(sspi->tx_chan);
- dma_async_issue_pending(sspi->rx_chan);
- writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
- sspi->base + sspi->regs->tx_rx_en);
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7) {
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->txfifo_op);
- }
- if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
- dev_err(&spi->dev, "transfer timeout\n");
- dmaengine_terminate_all(sspi->rx_chan);
- } else
- sspi->left_rx_word = 0;
- /*
- * we only wait tx-done event if transferring by DMA. for PIO,
- * we get rx data by writing tx data, so if rx is done, tx has
- * done earlier
- */
- if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
- dev_err(&spi->dev, "transfer timeout\n");
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7)
- writel(0, sspi->base + sspi->regs->tx_rx_en);
- dmaengine_terminate_all(sspi->tx_chan);
- }
- dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
- dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
- /* TX, RX FIFO stop */
- writel(0, sspi->base + sspi->regs->rxfifo_op);
- writel(0, sspi->base + sspi->regs->txfifo_op);
- if (sspi->left_tx_word >= sspi->dat_max_frm_len)
- writel(0, sspi->base + sspi->regs->tx_rx_en);
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7)
- writel(0, sspi->base + sspi->regs->tx_rx_en);
-}
-
-static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct sirfsoc_spi *sspi;
- int timeout = t->len * 10;
- unsigned int data_units;
-
- sspi = spi_master_get_devdata(spi->master);
- do {
- writel(SIRFSOC_SPI_FIFO_RESET,
- sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_RESET,
- sspi->base + sspi->regs->txfifo_op);
- switch (sspi->type) {
- case SIRF_USP_SPI_P2:
- writel(0x0, sspi->base + sspi->regs->rxfifo_op);
- writel(0x0, sspi->base + sspi->regs->txfifo_op);
- writel(0, sspi->base + sspi->regs->int_en);
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- writel(min((sspi->left_tx_word * sspi->word_width),
- sspi->fifo_size),
- sspi->base + sspi->regs->tx_dma_io_len);
- writel(min((sspi->left_rx_word * sspi->word_width),
- sspi->fifo_size),
- sspi->base + sspi->regs->rx_dma_io_len);
- break;
- case SIRF_USP_SPI_A7:
- writel(0x0, sspi->base + sspi->regs->rxfifo_op);
- writel(0x0, sspi->base + sspi->regs->txfifo_op);
- writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- writel(min((sspi->left_tx_word * sspi->word_width),
- sspi->fifo_size),
- sspi->base + sspi->regs->tx_dma_io_len);
- writel(min((sspi->left_rx_word * sspi->word_width),
- sspi->fifo_size),
- sspi->base + sspi->regs->rx_dma_io_len);
- break;
- case SIRF_REAL_SPI:
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->txfifo_op);
- writel(0, sspi->base + sspi->regs->int_en);
- writel(readl(sspi->base + sspi->regs->int_st),
- sspi->base + sspi->regs->int_st);
- writel(readl(sspi->base + sspi->regs->spi_ctrl) |
- SIRFSOC_SPI_MUL_DAT_MODE |
- SIRFSOC_SPI_ENA_AUTO_CLR,
- sspi->base + sspi->regs->spi_ctrl);
- data_units = sspi->fifo_size / sspi->word_width;
- writel(min(sspi->left_tx_word, data_units) - 1,
- sspi->base + sspi->regs->tx_dma_io_len);
- writel(min(sspi->left_rx_word, data_units) - 1,
- sspi->base + sspi->regs->rx_dma_io_len);
- break;
- }
- while (!((readl(sspi->base + sspi->regs->txfifo_st)
- & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
- sspi->left_tx_word)
- sspi->tx_word(sspi);
- writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
- SIRFSOC_SPI_TX_UFLOW_INT_EN |
- SIRFSOC_SPI_RX_OFLOW_INT_EN |
- SIRFSOC_SPI_RX_IO_DMA_INT_EN,
- sspi->base + sspi->regs->int_en);
- writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
- sspi->base + sspi->regs->tx_rx_en);
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7) {
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + sspi->regs->txfifo_op);
- }
- if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
- !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
- dev_err(&spi->dev, "transfer timeout\n");
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7)
- writel(0, sspi->base + sspi->regs->tx_rx_en);
- break;
- }
- while (!((readl(sspi->base + sspi->regs->rxfifo_st)
- & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
- sspi->left_rx_word)
- sspi->rx_word(sspi);
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7)
- writel(0, sspi->base + sspi->regs->tx_rx_en);
- writel(0, sspi->base + sspi->regs->rxfifo_op);
- writel(0, sspi->base + sspi->regs->txfifo_op);
- } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
-}
-
-static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
-{
- struct sirfsoc_spi *sspi;
-
- sspi = spi_master_get_devdata(spi->master);
- sspi->tx = t->tx_buf;
- sspi->rx = t->rx_buf;
- sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
- reinit_completion(&sspi->rx_done);
- reinit_completion(&sspi->tx_done);
- /*
- * in the transfer, if transfer data using command register with rx_buf
- * null, just fill command data into command register and wait for its
- * completion.
- */
- if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
- spi_sirfsoc_cmd_transfer(spi, t);
- else if (IS_DMA_VALID(t))
- spi_sirfsoc_dma_transfer(spi, t);
- else
- spi_sirfsoc_pio_transfer(spi, t);
-
- return t->len - sspi->left_rx_word * sspi->word_width;
-}
-
-static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
-{
- struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
-
- if (sspi->hw_cs) {
- u32 regval;
-
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- regval = readl(sspi->base + sspi->regs->spi_ctrl);
- switch (value) {
- case BITBANG_CS_ACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval |= SIRFSOC_SPI_CS_IO_OUT;
- else
- regval &= ~SIRFSOC_SPI_CS_IO_OUT;
- break;
- case BITBANG_CS_INACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval &= ~SIRFSOC_SPI_CS_IO_OUT;
- else
- regval |= SIRFSOC_SPI_CS_IO_OUT;
- break;
- }
- writel(regval, sspi->base + sspi->regs->spi_ctrl);
- break;
- case SIRF_USP_SPI_P2:
- case SIRF_USP_SPI_A7:
- regval = readl(sspi->base +
- sspi->regs->usp_pin_io_data);
- switch (value) {
- case BITBANG_CS_ACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval |= SIRFSOC_USP_CS_HIGH_VALUE;
- else
- regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
- break;
- case BITBANG_CS_INACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
- else
- regval |= SIRFSOC_USP_CS_HIGH_VALUE;
- break;
- }
- writel(regval,
- sspi->base + sspi->regs->usp_pin_io_data);
- break;
- }
- } else {
- switch (value) {
- case BITBANG_CS_ACTIVE:
- gpio_direction_output(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 1 : 0);
- break;
- case BITBANG_CS_INACTIVE:
- gpio_direction_output(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- break;
- }
- }
-}
-
-static int spi_sirfsoc_config_mode(struct spi_device *spi)
-{
- struct sirfsoc_spi *sspi;
- u32 regval, usp_mode1;
-
- sspi = spi_master_get_devdata(spi->master);
- regval = readl(sspi->base + sspi->regs->spi_ctrl);
- usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
- if (!(spi->mode & SPI_CS_HIGH)) {
- regval |= SIRFSOC_SPI_CS_IDLE_STAT;
- usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
- } else {
- regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
- usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
- }
- if (!(spi->mode & SPI_LSB_FIRST)) {
- regval |= SIRFSOC_SPI_TRAN_MSB;
- usp_mode1 &= ~SIRFSOC_USP_LSB;
- } else {
- regval &= ~SIRFSOC_SPI_TRAN_MSB;
- usp_mode1 |= SIRFSOC_USP_LSB;
- }
- if (spi->mode & SPI_CPOL) {
- regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
- usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
- } else {
- regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
- usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
- }
- /*
- * Data should be driven at least 1/2 cycle before the fetch edge
- * to make sure that data gets stable at the fetch edge.
- */
- if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
- (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
- regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
- usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
- SIRFSOC_USP_RXD_FALLING_EDGE);
- } else {
- regval |= SIRFSOC_SPI_DRV_POS_EDGE;
- usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
- SIRFSOC_USP_TXD_FALLING_EDGE);
- }
- writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
- SIRFSOC_SPI_FIFO_SC_OFFSET) |
- (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
- SIRFSOC_SPI_FIFO_LC_OFFSET) |
- (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
- SIRFSOC_SPI_FIFO_HC_OFFSET),
- sspi->base + sspi->regs->txfifo_level_chk);
- writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
- SIRFSOC_SPI_FIFO_SC_OFFSET) |
- (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
- SIRFSOC_SPI_FIFO_LC_OFFSET) |
- (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
- SIRFSOC_SPI_FIFO_HC_OFFSET),
- sspi->base + sspi->regs->rxfifo_level_chk);
- /*
- * it should never set to hardware cs mode because in hardware cs mode,
- * cs signal can't controlled by driver.
- */
- switch (sspi->type) {
- case SIRF_REAL_SPI:
- regval |= SIRFSOC_SPI_CS_IO_MODE;
- writel(regval, sspi->base + sspi->regs->spi_ctrl);
- break;
- case SIRF_USP_SPI_P2:
- case SIRF_USP_SPI_A7:
- usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
- usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
- usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
- writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
- break;
- }
-
- return 0;
-}
-
-static int
-spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
-{
- struct sirfsoc_spi *sspi;
- u8 bits_per_word = 0;
- int hz = 0;
- u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
-
- sspi = spi_master_get_devdata(spi->master);
-
- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
- hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
-
- usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
- if (regval > 0xFFFF || regval < 0) {
- dev_err(&spi->dev, "Speed %d not supported\n", hz);
- return -EINVAL;
- }
- switch (bits_per_word) {
- case 8:
- regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
- sspi->rx_word = spi_sirfsoc_rx_word_u8;
- sspi->tx_word = spi_sirfsoc_tx_word_u8;
- break;
- case 12:
- case 16:
- regval |= (bits_per_word == 12) ?
- SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
- SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
- sspi->rx_word = spi_sirfsoc_rx_word_u16;
- sspi->tx_word = spi_sirfsoc_tx_word_u16;
- break;
- case 32:
- regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
- sspi->rx_word = spi_sirfsoc_rx_word_u32;
- sspi->tx_word = spi_sirfsoc_tx_word_u32;
- break;
- default:
- dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
- return -EINVAL;
- }
- sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
- txfifo_ctrl = (((sspi->fifo_size / 2) &
- SIRFSOC_SPI_FIFO_THD_MASK(sspi))
- << SIRFSOC_SPI_FIFO_THD_OFFSET) |
- (sspi->word_width >> 1);
- rxfifo_ctrl = (((sspi->fifo_size / 2) &
- SIRFSOC_SPI_FIFO_THD_MASK(sspi))
- << SIRFSOC_SPI_FIFO_THD_OFFSET) |
- (sspi->word_width >> 1);
- writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
- writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
- if (sspi->type == SIRF_USP_SPI_P2 ||
- sspi->type == SIRF_USP_SPI_A7) {
- tx_frm_ctl = 0;
- tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
- << SIRFSOC_USP_TX_DATA_OFFSET;
- tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
- - 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
- SIRFSOC_USP_TX_SYNC_OFFSET;
- tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
- + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
- SIRFSOC_USP_TX_FRAME_OFFSET;
- tx_frm_ctl |= ((bits_per_word - 1) &
- SIRFSOC_USP_TX_SHIFTER_MASK) <<
- SIRFSOC_USP_TX_SHIFTER_OFFSET;
- rx_frm_ctl = 0;
- rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
- << SIRFSOC_USP_RX_DATA_OFFSET;
- rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
- + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
- SIRFSOC_USP_RX_FRAME_OFFSET;
- rx_frm_ctl |= ((bits_per_word - 1)
- & SIRFSOC_USP_RX_SHIFTER_MASK) <<
- SIRFSOC_USP_RX_SHIFTER_OFFSET;
- writel(tx_frm_ctl | (((usp_mode2 >> 10) &
- SIRFSOC_USP_CLK_10_11_MASK) <<
- SIRFSOC_USP_CLK_10_11_OFFSET),
- sspi->base + sspi->regs->usp_tx_frame_ctrl);
- writel(rx_frm_ctl | (((usp_mode2 >> 12) &
- SIRFSOC_USP_CLK_12_15_MASK) <<
- SIRFSOC_USP_CLK_12_15_OFFSET),
- sspi->base + sspi->regs->usp_rx_frame_ctrl);
- writel(readl(sspi->base + sspi->regs->usp_mode2) |
- ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
- SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
- (SIRFSOC_USP_RXD_DELAY_LEN <<
- SIRFSOC_USP_RXD_DELAY_OFFSET) |
- (SIRFSOC_USP_TXD_DELAY_LEN <<
- SIRFSOC_USP_TXD_DELAY_OFFSET),
- sspi->base + sspi->regs->usp_mode2);
- }
- if (sspi->type == SIRF_REAL_SPI)
- writel(regval, sspi->base + sspi->regs->spi_ctrl);
- spi_sirfsoc_config_mode(spi);
- if (sspi->type == SIRF_REAL_SPI) {
- if (t && t->tx_buf && !t->rx_buf &&
- (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
- sspi->tx_by_cmd = true;
- writel(readl(sspi->base + sspi->regs->spi_ctrl) |
- (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
- SIRFSOC_SPI_CMD_MODE),
- sspi->base + sspi->regs->spi_ctrl);
- } else {
- sspi->tx_by_cmd = false;
- writel(readl(sspi->base + sspi->regs->spi_ctrl) &
- ~SIRFSOC_SPI_CMD_MODE,
- sspi->base + sspi->regs->spi_ctrl);
- }
- }
- if (IS_DMA_VALID(t)) {
- /* Enable DMA mode for RX, TX */
- writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
- writel(SIRFSOC_SPI_RX_DMA_FLUSH,
- sspi->base + sspi->regs->rx_dma_io_ctrl);
- } else {
- /* Enable IO mode for RX, TX */
- writel(SIRFSOC_SPI_IO_MODE_SEL,
- sspi->base + sspi->regs->tx_dma_io_ctrl);
- writel(SIRFSOC_SPI_IO_MODE_SEL,
- sspi->base + sspi->regs->rx_dma_io_ctrl);
- }
- return 0;
-}
-
-static int spi_sirfsoc_setup(struct spi_device *spi)
-{
- struct sirfsoc_spi *sspi;
- int ret = 0;
-
- sspi = spi_master_get_devdata(spi->master);
- if (spi->cs_gpio == -ENOENT)
- sspi->hw_cs = true;
- else {
- sspi->hw_cs = false;
- if (!spi_get_ctldata(spi)) {
- void *cs = kmalloc(sizeof(int), GFP_KERNEL);
- if (!cs) {
- ret = -ENOMEM;
- goto exit;
- }
- ret = gpio_is_valid(spi->cs_gpio);
- if (!ret) {
- dev_err(&spi->dev, "no valid gpio\n");
- ret = -ENOENT;
- goto exit;
- }
- ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
- if (ret) {
- dev_err(&spi->dev, "failed to request gpio\n");
- goto exit;
- }
- spi_set_ctldata(spi, cs);
- }
- }
- spi_sirfsoc_config_mode(spi);
- spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
-exit:
- return ret;
-}
-
-static void spi_sirfsoc_cleanup(struct spi_device *spi)
-{
- if (spi_get_ctldata(spi)) {
- gpio_free(spi->cs_gpio);
- kfree(spi_get_ctldata(spi));
- }
-}
-
-static const struct sirf_spi_comp_data sirf_real_spi = {
- .regs = &real_spi_register,
- .type = SIRF_REAL_SPI,
- .dat_max_frm_len = 64 * 1024,
- .fifo_size = 256,
-};
-
-static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
- .regs = &usp_spi_register,
- .type = SIRF_USP_SPI_P2,
- .dat_max_frm_len = 1024 * 1024,
- .fifo_size = 128,
- .hwinit = sirfsoc_usp_hwinit,
-};
-
-static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
- .regs = &usp_spi_register,
- .type = SIRF_USP_SPI_A7,
- .dat_max_frm_len = 1024 * 1024,
- .fifo_size = 512,
- .hwinit = sirfsoc_usp_hwinit,
-};
-
-static const struct of_device_id spi_sirfsoc_of_match[] = {
- { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
- { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
- { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
- {}
-};
-MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
-
-static int spi_sirfsoc_probe(struct platform_device *pdev)
-{
- struct sirfsoc_spi *sspi;
- struct spi_master *master;
- const struct sirf_spi_comp_data *spi_comp_data;
- int irq;
- int ret;
- const struct of_device_id *match;
-
- ret = device_reset(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "SPI reset failed!\n");
- return ret;
- }
-
- master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
- if (!master) {
- dev_err(&pdev->dev, "Unable to allocate SPI master\n");
- return -ENOMEM;
- }
- match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
- platform_set_drvdata(pdev, master);
- sspi = spi_master_get_devdata(master);
- sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = match->data;
- sspi->regs = spi_comp_data->regs;
- sspi->type = spi_comp_data->type;
- sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
- sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
- sspi->fifo_size = spi_comp_data->fifo_size;
- sspi->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(sspi->base)) {
- ret = PTR_ERR(sspi->base);
- goto free_master;
- }
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = -ENXIO;
- goto free_master;
- }
- ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
- DRIVER_NAME, sspi);
- if (ret)
- goto free_master;
-
- sspi->bitbang.master = master;
- sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
- sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
- sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
- sspi->bitbang.master->setup = spi_sirfsoc_setup;
- sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
- SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
- master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
- master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
-
- /* request DMA channels */
- sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR(sspi->rx_chan)) {
- dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = PTR_ERR(sspi->rx_chan);
- goto free_master;
- }
- sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(sspi->tx_chan)) {
- dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = PTR_ERR(sspi->tx_chan);
- goto free_rx_dma;
- }
-
- sspi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(sspi->clk)) {
- ret = PTR_ERR(sspi->clk);
- goto free_tx_dma;
- }
- clk_prepare_enable(sspi->clk);
- if (spi_comp_data->hwinit)
- spi_comp_data->hwinit(sspi);
- sspi->ctrl_freq = clk_get_rate(sspi->clk);
-
- init_completion(&sspi->rx_done);
- init_completion(&sspi->tx_done);
-
- ret = spi_bitbang_start(&sspi->bitbang);
- if (ret)
- goto free_clk;
- dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
-
- return 0;
-free_clk:
- clk_disable_unprepare(sspi->clk);
- clk_put(sspi->clk);
-free_tx_dma:
- dma_release_channel(sspi->tx_chan);
-free_rx_dma:
- dma_release_channel(sspi->rx_chan);
-free_master:
- spi_master_put(master);
-
- return ret;
-}
-
-static int spi_sirfsoc_remove(struct platform_device *pdev)
-{
- struct spi_master *master;
- struct sirfsoc_spi *sspi;
-
- master = platform_get_drvdata(pdev);
- sspi = spi_master_get_devdata(master);
- spi_bitbang_stop(&sspi->bitbang);
- clk_disable_unprepare(sspi->clk);
- clk_put(sspi->clk);
- dma_release_channel(sspi->rx_chan);
- dma_release_channel(sspi->tx_chan);
- spi_master_put(master);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int spi_sirfsoc_suspend(struct device *dev)
-{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
- int ret;
-
- ret = spi_master_suspend(master);
- if (ret)
- return ret;
-
- clk_disable(sspi->clk);
- return 0;
-}
-
-static int spi_sirfsoc_resume(struct device *dev)
-{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
-
- clk_enable(sspi->clk);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
- spi_sirfsoc_resume);
-
-static struct platform_driver spi_sirfsoc_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .pm = &spi_sirfsoc_pm_ops,
- .of_match_table = spi_sirfsoc_of_match,
- },
- .probe = spi_sirfsoc_probe,
- .remove = spi_sirfsoc_remove,
-};
-module_platform_driver(spi_sirfsoc_driver);
-MODULE_DESCRIPTION("SiRF SoC SPI master driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
-MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
-MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..25c076461011 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -5,6 +5,7 @@
// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -94,27 +95,22 @@
#define STM32H7_SPI_CR1_SSI BIT(12)
/* STM32H7_SPI_CR2 bit fields */
-#define STM32H7_SPI_CR2_TSIZE_SHIFT 0
#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
+#define STM32H7_SPI_TSIZE_MAX GENMASK(15, 0)
/* STM32H7_SPI_CFG1 bit fields */
-#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
-#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
-#define STM32H7_SPI_CFG1_MBR_SHIFT 28
#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
+#define STM32H7_SPI_CFG1_MBR_SHIFT 28
#define STM32H7_SPI_CFG1_MBR_MIN 0
#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
/* STM32H7_SPI_CFG2 bit fields */
-#define STM32H7_SPI_CFG2_MIDI_SHIFT 4
#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
-#define STM32H7_SPI_CFG2_COMM_SHIFT 17
#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
-#define STM32H7_SPI_CFG2_SP_SHIFT 19
#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
#define STM32H7_SPI_CFG2_MASTER BIT(22)
#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
@@ -140,7 +136,6 @@
#define STM32H7_SPI_SR_OVR BIT(6)
#define STM32H7_SPI_SR_MODF BIT(9)
#define STM32H7_SPI_SR_SUSP BIT(11)
-#define STM32H7_SPI_SR_RXPLVL_SHIFT 13
#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
#define STM32H7_SPI_SR_RXWNE BIT(15)
@@ -167,8 +162,6 @@
#define SPI_3WIRE_TX 3
#define SPI_3WIRE_RX 4
-#define SPI_1HZ_NS 1000000000
-
/*
* use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
* without fifo buffers.
@@ -268,7 +261,6 @@ struct stm32_spi_cfg {
* @base: virtual memory area
* @clk: hw kernel clock feeding the SPI clock generator
* @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
- * @rst: SPI controller reset line
* @lock: prevent I/O concurrent access
* @irq: SPI controller interrupt line
* @fifo_size: size of the embedded fifo in bytes
@@ -294,7 +286,6 @@ struct stm32_spi {
void __iomem *base;
struct clk *clk;
u32 clk_rate;
- struct reset_control *rst;
spinlock_t lock; /* prevent I/O concurrent access */
int irq;
unsigned int fifo_size;
@@ -417,9 +408,7 @@ static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
- max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
- STM32H7_SPI_CFG1_DSIZE_SHIFT;
- max_bpw += 1;
+ max_bpw = FIELD_GET(STM32H7_SPI_CFG1_DSIZE, cfg1) + 1;
spin_unlock_irqrestore(&spi->lock, flags);
@@ -473,34 +462,14 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
*/
static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
{
- u32 fthlv, half_fifo, packet;
+ u32 packet, bpw;
/* data packet should not exceed 1/2 of fifo space */
- half_fifo = (spi->fifo_size / 2);
-
- /* data_packet should not exceed transfer length */
- if (half_fifo > xfer_len)
- packet = xfer_len;
- else
- packet = half_fifo;
-
- if (spi->cur_bpw <= 8)
- fthlv = packet;
- else if (spi->cur_bpw <= 16)
- fthlv = packet / 2;
- else
- fthlv = packet / 4;
+ packet = clamp(xfer_len, 1U, spi->fifo_size / 2);
/* align packet size with data registers access */
- if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
- else
- fthlv -= (fthlv % 4); /* multiple of 4 */
-
- if (!fthlv)
- fthlv = 1;
-
- return fthlv;
+ bpw = DIV_ROUND_UP(spi->cur_bpw, 8);
+ return DIV_ROUND_UP(packet, bpw);
}
/**
@@ -607,8 +576,7 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
{
u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
- u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
- STM32H7_SPI_SR_RXPLVL_SHIFT;
+ u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
while ((spi->rx_len > 0) &&
((sr & STM32H7_SPI_SR_RXP) ||
@@ -635,8 +603,7 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
}
sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
- rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
- STM32H7_SPI_SR_RXPLVL_SHIFT;
+ rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
}
dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
@@ -928,8 +895,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
mask |= STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
- dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
- sr, ier);
+ dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
}
@@ -956,15 +923,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
if (sr & STM32H7_SPI_SR_OVR) {
- dev_warn(spi->dev, "Overrun: received value discarded\n");
- if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32h7_spi_read_rxfifo(spi, false);
- /*
- * If overrun is detected while using DMA, it means that
- * something went wrong, so stop the current transfer
- */
- if (spi->cur_usedma)
- end = true;
+ dev_err(spi->dev, "Overrun: RX data lost\n");
+ end = true;
}
if (sr & STM32H7_SPI_SR_EOT) {
@@ -1028,10 +988,24 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
clrb |= spi->cfg->regs->lsb_first.mask;
dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
- spi_dev->mode & SPI_CPOL,
- spi_dev->mode & SPI_CPHA,
- spi_dev->mode & SPI_LSB_FIRST,
- spi_dev->mode & SPI_CS_HIGH);
+ !!(spi_dev->mode & SPI_CPOL),
+ !!(spi_dev->mode & SPI_CPHA),
+ !!(spi_dev->mode & SPI_LSB_FIRST),
+ !!(spi_dev->mode & SPI_CS_HIGH));
+
+ /* On STM32H7, messages should not exceed a maximum size setted
+ * afterward via the set_number_of_data function. In order to
+ * ensure that, split large messages into several messages
+ */
+ if (spi->cfg->set_number_of_data) {
+ int ret;
+
+ ret = spi_split_transfers_maxsize(master, msg,
+ STM32H7_SPI_TSIZE_MAX,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&spi->lock, flags);
@@ -1405,15 +1379,13 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
bpw = spi->cur_bpw - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
- cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
- STM32H7_SPI_CFG1_DSIZE;
+ cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_DSIZE, bpw);
spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
fthlv = spi->cur_fthlv - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
- cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
- STM32H7_SPI_CFG1_FTHLV;
+ cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_FTHLV, fthlv);
writel_relaxed(
(readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
@@ -1431,8 +1403,7 @@ static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
u32 clrb = 0, setb = 0;
clrb |= spi->cfg->regs->br.mask;
- setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) &
- spi->cfg->regs->br.mask;
+ setb |= (mbrdiv << spi->cfg->regs->br.shift) & spi->cfg->regs->br.mask;
writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
~clrb) | setb,
@@ -1523,8 +1494,7 @@ static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
}
cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
- cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
- STM32H7_SPI_CFG2_COMM;
+ cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_COMM, mode);
writel_relaxed(
(readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
@@ -1546,15 +1516,16 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
if ((len > 1) && (spi->cur_midi > 0)) {
- u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
- u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
- (u32)STM32H7_SPI_CFG2_MIDI >>
- STM32H7_SPI_CFG2_MIDI_SHIFT);
+ u32 sck_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->cur_speed);
+ u32 midi = min_t(u32,
+ DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
+ FIELD_GET(STM32H7_SPI_CFG2_MIDI,
+ STM32H7_SPI_CFG2_MIDI));
+
dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
sck_period_ns, midi, midi * sck_period_ns);
- cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
- STM32H7_SPI_CFG2_MIDI;
+ cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_MIDI, midi);
}
writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
@@ -1569,14 +1540,8 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
- u32 cr2_clrb = 0, cr2_setb = 0;
-
- if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
- STM32H7_SPI_CR2_TSIZE_SHIFT)) {
- cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
- cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
- writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
- ~cr2_clrb) | cr2_setb,
+ if (nb_words <= STM32H7_SPI_TSIZE_MAX) {
+ writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words),
spi->base + STM32H7_SPI_CR2);
} else {
return -EMSGSIZE;
@@ -1677,6 +1642,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
struct stm32_spi *spi = spi_master_get_devdata(master);
int ret;
+ /* Don't do anything on 0 bytes transfers */
+ if (transfer->len == 0)
+ return 0;
+
spi->tx_buf = transfer->tx_buf;
spi->rx_buf = transfer->rx_buf;
spi->tx_len = spi->tx_buf ? transfer->len : 0;
@@ -1831,6 +1800,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct stm32_spi *spi;
struct resource *res;
+ struct reset_control *rst;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
@@ -1892,11 +1862,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
goto err_clk_disable;
}
- spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (!IS_ERR(spi->rst)) {
- reset_control_assert(spi->rst);
+ rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (rst) {
+ if (IS_ERR(rst)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "failed to get reset\n");
+ goto err_clk_disable;
+ }
+
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(spi->rst);
+ reset_control_deassert(rst);
}
if (spi->cfg->has_fifo)
@@ -1960,12 +1936,6 @@ static int stm32_spi_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- if (!master->cs_gpiods) {
- dev_err(&pdev->dev, "no CS gpios available\n");
- ret = -EINVAL;
- goto err_pm_disable;
- }
-
dev_info(&pdev->dev, "driver initialized\n");
return 0;
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index 8cdca6ab8098..ea706d9629cb 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
+
+ if (!enable)
+ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
+
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
}
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
new file mode 100644
index 000000000000..2f806f4b2c34
--- /dev/null
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -0,0 +1,1410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020 NVIDIA CORPORATION.
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define QSPI_COMMAND1 0x000
+#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define QSPI_PACKED BIT(5)
+#define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
+#define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
+#define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
+#define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
+#define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
+#define QSPI_SDR_DDR_SEL BIT(9)
+#define QSPI_TX_EN BIT(11)
+#define QSPI_RX_EN BIT(12)
+#define QSPI_CS_SW_VAL BIT(20)
+#define QSPI_CS_SW_HW BIT(21)
+#define QSPI_CONTROL_MODE_0 (0 << 28)
+#define QSPI_CONTROL_MODE_3 (3 << 28)
+#define QSPI_CONTROL_MODE_MASK (3 << 28)
+#define QSPI_M_S BIT(30)
+#define QSPI_PIO BIT(31)
+
+#define QSPI_COMMAND2 0x004
+#define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
+#define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
+
+#define QSPI_CS_TIMING1 0x008
+#define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+
+#define QSPI_CS_TIMING2 0x00c
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
+
+#define QSPI_TRANS_STATUS 0x010
+#define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define QSPI_RDY BIT(30)
+
+#define QSPI_FIFO_STATUS 0x014
+#define QSPI_RX_FIFO_EMPTY BIT(0)
+#define QSPI_RX_FIFO_FULL BIT(1)
+#define QSPI_TX_FIFO_EMPTY BIT(2)
+#define QSPI_TX_FIFO_FULL BIT(3)
+#define QSPI_RX_FIFO_UNF BIT(4)
+#define QSPI_RX_FIFO_OVF BIT(5)
+#define QSPI_TX_FIFO_UNF BIT(6)
+#define QSPI_TX_FIFO_OVF BIT(7)
+#define QSPI_ERR BIT(8)
+#define QSPI_TX_FIFO_FLUSH BIT(14)
+#define QSPI_RX_FIFO_FLUSH BIT(15)
+#define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
+#define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
+
+#define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
+ QSPI_RX_FIFO_OVF | \
+ QSPI_TX_FIFO_UNF | \
+ QSPI_TX_FIFO_OVF)
+#define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
+ QSPI_TX_FIFO_EMPTY)
+
+#define QSPI_TX_DATA 0x018
+#define QSPI_RX_DATA 0x01c
+
+#define QSPI_DMA_CTL 0x020
+#define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
+#define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
+#define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
+#define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
+#define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
+
+#define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
+#define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
+#define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
+#define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
+#define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
+
+#define QSPI_DMA_EN BIT(31)
+
+#define QSPI_DMA_BLK 0x024
+#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+
+#define QSPI_TX_FIFO 0x108
+#define QSPI_RX_FIFO 0x188
+
+#define QSPI_FIFO_DEPTH 64
+
+#define QSPI_INTR_MASK 0x18c
+#define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
+#define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
+#define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
+#define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
+#define QSPI_INTR_RDY_MASK BIT(29)
+#define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
+ QSPI_INTR_RX_FIFO_OVF_MASK | \
+ QSPI_INTR_TX_FIFO_UNF_MASK | \
+ QSPI_INTR_TX_FIFO_OVF_MASK)
+
+#define QSPI_MISC_REG 0x194
+#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
+#define QSPI_DUMMY_CYCLES_MAX 0xff
+
+#define DATA_DIR_TX BIT(0)
+#define DATA_DIR_RX BIT(1)
+
+#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
+
+struct tegra_qspi_client_data {
+ int tx_clk_tap_delay;
+ int rx_clk_tap_delay;
+};
+
+struct tegra_qspi {
+ struct device *dev;
+ struct spi_master *master;
+ /* lock to protect data accessed by irq */
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned int irq;
+
+ u32 cur_speed;
+ unsigned int cur_pos;
+ unsigned int words_per_32bit;
+ unsigned int bytes_per_word;
+ unsigned int curr_dma_words;
+ unsigned int cur_direction;
+
+ unsigned int cur_rx_pos;
+ unsigned int cur_tx_pos;
+
+ unsigned int dma_buf_size;
+ unsigned int max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ bool use_dma;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+ u32 def_command2_reg;
+ u32 spi_cs_timing1;
+ u32 spi_cs_timing2;
+ u8 dummy_cycles;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
+{
+ return readl(tqspi->base + offset);
+}
+
+static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
+{
+ writel(value, tqspi->base + offset);
+
+ /* read back register to make sure that register writes completed */
+ if (offset != QSPI_TX_FIFO)
+ readl(tqspi->base + QSPI_COMMAND1);
+}
+
+static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
+{
+ u32 value;
+
+ /* write 1 to clear status register */
+ value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
+ tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
+
+ value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
+ if (!(value & QSPI_INTR_RDY_MASK)) {
+ value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
+ tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
+ }
+
+ /* clear fifo status error if any */
+ value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ if (value & QSPI_ERR)
+ tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
+}
+
+static unsigned int
+tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int max_word, max_len, total_fifo_words;
+ unsigned int remain_len = t->len - tqspi->cur_pos;
+ unsigned int bits_per_word = t->bits_per_word;
+
+ tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ /*
+ * Tegra QSPI controller supports packed or unpacked mode transfers.
+ * Packed mode is used for data transfers using 8, 16, or 32 bits per
+ * word with a minimum transfer of 1 word and for all other transfers
+ * unpacked mode will be used.
+ */
+
+ if ((bits_per_word == 8 || bits_per_word == 16 ||
+ bits_per_word == 32) && t->len > 3) {
+ tqspi->is_packed = true;
+ tqspi->words_per_32bit = 32 / bits_per_word;
+ } else {
+ tqspi->is_packed = false;
+ tqspi->words_per_32bit = 1;
+ }
+
+ if (tqspi->is_packed) {
+ max_len = min(remain_len, tqspi->max_buf_size);
+ tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
+ max_word = min(max_word, tqspi->max_buf_size / 4);
+ tqspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+
+ return total_fifo_words;
+}
+
+static unsigned int
+tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int written_words, fifo_words_left, count;
+ unsigned int len, tx_empty_count, max_n_32bit, i;
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ u32 fifo_status;
+
+ fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tqspi->is_packed) {
+ fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
+ written_words = min(fifo_words_left, tqspi->curr_dma_words);
+ len = written_words * tqspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(len, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; (i < 4) && len; i++, len--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
+ }
+
+ tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
+ } else {
+ unsigned int write_bytes;
+ u8 bytes_per_word = tqspi->bytes_per_word;
+
+ max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ len = written_words * tqspi->bytes_per_word;
+ if (len > t->len - tqspi->cur_pos)
+ len = t->len - tqspi->cur_pos;
+ write_bytes = len;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
+ }
+
+ tqspi->cur_tx_pos += write_bytes;
+ }
+
+ return written_words;
+}
+
+static unsigned int
+tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
+ unsigned int len, rx_full_count, count, i;
+ unsigned int read_words = 0;
+ u32 fifo_status, x;
+
+ fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tqspi->is_packed) {
+ len = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
+
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i * 8) & 0xff;
+ }
+
+ read_words += tqspi->curr_dma_words;
+ tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tqspi->bytes_per_word;
+ unsigned int read_bytes;
+
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tqspi->cur_pos)
+ len = t->len - tqspi->cur_pos;
+ read_bytes = len;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
+
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ *rx_buf++ = (x >> (i * 8)) & 0xff;
+ }
+
+ read_words += rx_full_count;
+ tqspi->cur_rx_pos += read_bytes;
+ }
+
+ return read_words;
+}
+
+static void
+tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
+ tqspi->dma_buf_size, DMA_TO_DEVICE);
+
+ /*
+ * In packed mode, each word in FIFO may contain multiple packets
+ * based on bits per word. So all bytes in each FIFO word are valid.
+ *
+ * In unpacked mode, each word in FIFO contains single packet and
+ * based on bits per word any remaining bits in FIFO word will be
+ * ignored by the hardware and are invalid bits.
+ */
+ if (tqspi->is_packed) {
+ tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ unsigned int i, count, consume, write_bytes;
+
+ /*
+ * Fill tx_dma_buf to contain single packet in each word based
+ * on bits per word from SPI core tx_buf.
+ */
+ consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ if (consume > t->len - tqspi->cur_pos)
+ consume = t->len - tqspi->cur_pos;
+ write_bytes = consume;
+ for (count = 0; count < tqspi->curr_dma_words; count++) {
+ u32 x = 0;
+
+ for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tqspi->tx_dma_buf[count] = x;
+ }
+
+ tqspi->cur_tx_pos += write_bytes;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
+ tqspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void
+tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tqspi->is_packed) {
+ tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned int i, count, consume, read_bytes;
+
+ /*
+ * Each FIFO word contains single data packet.
+ * Skip invalid bits in each FIFO word based on bits per word
+ * and align bytes while filling in SPI core rx_buf.
+ */
+ consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ if (consume > t->len - tqspi->cur_pos)
+ consume = t->len - tqspi->cur_pos;
+ read_bytes = consume;
+ for (count = 0; count < tqspi->curr_dma_words; count++) {
+ u32 x = tqspi->rx_dma_buf[count] & rx_mask;
+
+ for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
+ *rx_buf++ = (x >> (i * 8)) & 0xff;
+ }
+
+ tqspi->cur_rx_pos += read_bytes;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_qspi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
+{
+ dma_addr_t tx_dma_phys;
+
+ reinit_completion(&tqspi->tx_dma_complete);
+
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+
+ tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
+ len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tqspi->tx_dma_desc) {
+ dev_err(tqspi->dev, "Unable to get TX descriptor\n");
+ return -EIO;
+ }
+
+ tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
+ tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
+ dmaengine_submit(tqspi->tx_dma_desc);
+ dma_async_issue_pending(tqspi->tx_dma_chan);
+
+ return 0;
+}
+
+static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
+{
+ dma_addr_t rx_dma_phys;
+
+ reinit_completion(&tqspi->rx_dma_complete);
+
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+
+ tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
+ len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tqspi->rx_dma_desc) {
+ dev_err(tqspi->dev, "Unable to get RX descriptor\n");
+ return -EIO;
+ }
+
+ tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
+ tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
+ dmaengine_submit(tqspi->rx_dma_desc);
+ dma_async_issue_pending(tqspi->rx_dma_chan);
+
+ return 0;
+}
+
+static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
+{
+ void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
+ u32 val;
+
+ val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
+ return 0;
+
+ val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
+ tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
+
+ if (!atomic)
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
+ 1000, 1000000);
+
+ return readl_relaxed_poll_timeout_atomic(addr, val,
+ (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
+ 1000, 1000000);
+}
+
+static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
+{
+ u32 intr_mask;
+
+ intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
+ intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
+ tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
+}
+
+static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
+ unsigned int len;
+
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+
+ if (t->tx_buf) {
+ t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tqspi->dev, t->tx_dma))
+ return -ENOMEM;
+ }
+
+ if (t->rx_buf) {
+ t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int len;
+
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+}
+
+static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ struct dma_slave_config dma_sconfig = { 0 };
+ unsigned int len;
+ u8 dma_burst;
+ int ret = 0;
+ u32 val;
+
+ if (tqspi->is_packed) {
+ ret = tegra_qspi_dma_map_xfer(tqspi, t);
+ if (ret < 0)
+ return ret;
+ }
+
+ val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
+
+ tegra_qspi_unmask_irq(tqspi);
+
+ if (tqspi->is_packed)
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+ else
+ len = tqspi->curr_dma_words * 4;
+
+ /* set attention level based on length of transfer */
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
+ tqspi->dma_control_reg = val;
+
+ dma_sconfig.device_fc = true;
+ if (tqspi->cur_direction & DATA_DIR_TX) {
+ dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ ret = tegra_qspi_start_tx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX) {
+ dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size,
+ DMA_FROM_DEVICE);
+
+ ret = tegra_qspi_start_rx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ return ret;
+ }
+ }
+
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+
+ tqspi->is_curr_dma_xfer = true;
+ tqspi->dma_control_reg = val;
+ val |= QSPI_DMA_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
+
+ return ret;
+}
+
+static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int cur_words;
+
+ if (qspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
+ else
+ cur_words = qspi->curr_dma_words;
+
+ val = QSPI_DMA_BLK_SET(cur_words - 1);
+ tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
+
+ tegra_qspi_unmask_irq(qspi);
+
+ qspi->is_curr_dma_xfer = false;
+ val = qspi->command1_reg;
+ val |= QSPI_PIO;
+ tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
+
+ return 0;
+}
+
+static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
+{
+ if (tqspi->tx_dma_buf) {
+ dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
+ tqspi->tx_dma_buf, tqspi->tx_dma_phys);
+ tqspi->tx_dma_buf = NULL;
+ }
+
+ if (tqspi->tx_dma_chan) {
+ dma_release_channel(tqspi->tx_dma_chan);
+ tqspi->tx_dma_chan = NULL;
+ }
+
+ if (tqspi->rx_dma_buf) {
+ dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
+ tqspi->rx_dma_buf, tqspi->rx_dma_phys);
+ tqspi->rx_dma_buf = NULL;
+ }
+
+ if (tqspi->rx_dma_chan) {
+ dma_release_channel(tqspi->rx_dma_chan);
+ tqspi->rx_dma_chan = NULL;
+ }
+}
+
+static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
+{
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_phys;
+ u32 *dma_buf;
+ int err;
+
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->rx_dma_chan = dma_chan;
+
+ dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ tqspi->rx_dma_buf = dma_buf;
+ tqspi->rx_dma_phys = dma_phys;
+
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+
+ dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ tqspi->tx_dma_buf = dma_buf;
+ tqspi->tx_dma_phys = dma_phys;
+ tqspi->use_dma = true;
+
+ return 0;
+
+err_out:
+ tegra_qspi_deinit_dma(tqspi);
+
+ if (err != -EPROBE_DEFER) {
+ dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
+ dev_err(tqspi->dev, "falling back to PIO\n");
+ return 0;
+ }
+
+ return err;
+}
+
+static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
+ bool is_first_of_msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi_client_data *cdata = spi->controller_data;
+ u32 command1, command2, speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ u32 tx_tap = 0, rx_tap = 0;
+ int req_mode;
+
+ if (speed != tqspi->cur_speed) {
+ clk_set_rate(tqspi->clk, speed);
+ tqspi->cur_speed = speed;
+ }
+
+ tqspi->cur_pos = 0;
+ tqspi->cur_rx_pos = 0;
+ tqspi->cur_tx_pos = 0;
+ tqspi->curr_xfer = t;
+
+ if (is_first_of_msg) {
+ tegra_qspi_mask_clear_irq(tqspi);
+
+ command1 = tqspi->def_command1_reg;
+ command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~QSPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_3)
+ command1 |= QSPI_CONTROL_MODE_3;
+ else
+ command1 |= QSPI_CONTROL_MODE_0;
+
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= QSPI_CS_SW_VAL;
+ else
+ command1 &= ~QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
+
+ if (cdata && cdata->tx_clk_tap_delay)
+ tx_tap = cdata->tx_clk_tap_delay;
+
+ if (cdata && cdata->rx_clk_tap_delay)
+ rx_tap = cdata->rx_clk_tap_delay;
+
+ command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
+ if (command2 != tqspi->def_command2_reg)
+ tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
+
+ } else {
+ command1 = tqspi->command1_reg;
+ command1 &= ~QSPI_BIT_LENGTH(~0);
+ command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ command1 &= ~QSPI_SDR_DDR_SEL;
+
+ return command1;
+}
+
+static int tegra_qspi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, u32 command1)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ unsigned int total_fifo_words;
+ u8 bus_width = 0;
+ int ret;
+
+ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+
+ command1 &= ~QSPI_PACKED;
+ if (tqspi->is_packed)
+ command1 |= QSPI_PACKED;
+ tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
+
+ tqspi->cur_direction = 0;
+
+ command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
+ if (t->rx_buf) {
+ command1 |= QSPI_RX_EN;
+ tqspi->cur_direction |= DATA_DIR_RX;
+ bus_width = t->rx_nbits;
+ }
+
+ if (t->tx_buf) {
+ command1 |= QSPI_TX_EN;
+ tqspi->cur_direction |= DATA_DIR_TX;
+ bus_width = t->tx_nbits;
+ }
+
+ command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
+
+ if (bus_width == SPI_NBITS_QUAD)
+ command1 |= QSPI_INTERFACE_WIDTH_QUAD;
+ else if (bus_width == SPI_NBITS_DUAL)
+ command1 |= QSPI_INTERFACE_WIDTH_DUAL;
+ else
+ command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
+
+ tqspi->command1_reg = command1;
+
+ tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
+
+ ret = tegra_qspi_flush_fifos(tqspi, false);
+ if (ret < 0)
+ return ret;
+
+ if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
+ ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ else
+ ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+
+ return ret;
+}
+
+static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
+{
+ struct tegra_qspi_client_data *cdata;
+ struct device_node *slave_np = spi->dev.of_node;
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return NULL;
+
+ of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+ return cdata;
+}
+
+static void tegra_qspi_cleanup(struct spi_device *spi)
+{
+ struct tegra_qspi_client_data *cdata = spi->controller_data;
+
+ spi->controller_data = NULL;
+ kfree(cdata);
+}
+
+static int tegra_qspi_setup(struct spi_device *spi)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi_client_data *cdata = spi->controller_data;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(tqspi->dev);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ if (!cdata) {
+ cdata = tegra_qspi_parse_cdata_dt(spi);
+ spi->controller_data = cdata;
+ }
+
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ /* keep default cs state to inactive */
+ val = tqspi->def_command1_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~QSPI_CS_SW_VAL;
+ else
+ val |= QSPI_CS_SW_VAL;
+
+ tqspi->def_command1_reg = val;
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+
+ pm_runtime_put(tqspi->dev);
+
+ return 0;
+}
+
+static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
+{
+ dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
+ dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_COMMAND1),
+ tegra_qspi_readl(tqspi, QSPI_COMMAND2));
+ dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
+ tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
+ dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
+ tegra_qspi_readl(tqspi, QSPI_MISC_REG));
+ dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
+ tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
+}
+
+static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
+{
+ dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
+ tegra_qspi_dump_regs(tqspi);
+ tegra_qspi_flush_fifos(tqspi, true);
+ reset_control_assert(tqspi->rst);
+ udelay(2);
+ reset_control_deassert(tqspi->rst);
+}
+
+static void tegra_qspi_transfer_end(struct spi_device *spi)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
+
+ if (cs_val)
+ tqspi->command1_reg |= QSPI_CS_SW_VAL;
+ else
+ tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+}
+
+static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *transfer;
+ bool is_first_msg = true;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ tqspi->tx_status = 0;
+ tqspi->rx_status = 0;
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ struct spi_transfer *xfer = transfer;
+ u8 dummy_bytes = 0;
+ u32 cmd1;
+
+ tqspi->dummy_cycles = 0;
+ /*
+ * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
+ * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
+ * So, check if the next transfer is dummy data transfer and program dummy
+ * clock cycles along with the current transfer and skip next transfer.
+ */
+ if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ struct spi_transfer *next_xfer;
+
+ next_xfer = list_next_entry(xfer, transfer_list);
+ if (next_xfer->dummy_data) {
+ u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
+
+ if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
+ tqspi->dummy_cycles = dummy_cycles;
+ dummy_bytes = next_xfer->len;
+ transfer = next_xfer;
+ }
+ }
+ }
+
+ reinit_completion(&tqspi->xfer_completion);
+
+ cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
+ goto complete_xfer;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tqspi->xfer_completion,
+ QSPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tqspi->dev, "transfer timeout: %d\n", ret);
+ if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ tegra_qspi_handle_error(tqspi);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ tegra_qspi_handle_error(tqspi);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ msg->actual_length += xfer->len + dummy_bytes;
+
+complete_xfer:
+ if (ret < 0) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ goto exit;
+ }
+
+ if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ /* de-activate CS after last transfer only when cs_change is not set */
+ if (!xfer->cs_change) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ } else if (xfer->cs_change) {
+ /* de-activated CS between the transfers only when cs_change is set */
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ }
+
+ ret = 0;
+exit:
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
+{
+ struct spi_transfer *t = tqspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ tegra_qspi_handle_error(tqspi);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->cur_pos = tqspi->cur_tx_pos;
+ else
+ tqspi->cur_pos = tqspi->cur_rx_pos;
+
+ if (tqspi->cur_pos == t->len) {
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+ tegra_qspi_start_cpu_based_transfer(tqspi, t);
+exit:
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
+{
+ struct spi_transfer *t = tqspi->curr_xfer;
+ unsigned int total_fifo_words;
+ unsigned long flags;
+ long wait_status;
+ int err = 0;
+
+ if (tqspi->cur_direction & DATA_DIR_TX) {
+ if (tqspi->tx_status) {
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ dev_err(tqspi->dev, "failed TX DMA transfer\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX) {
+ if (tqspi->rx_status) {
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ dev_err(tqspi->dev, "failed RX DMA transfer\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ if (err) {
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+ tegra_qspi_handle_error(tqspi);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->cur_pos = tqspi->cur_tx_pos;
+ else
+ tqspi->cur_pos = tqspi->cur_rx_pos;
+
+ if (tqspi->cur_pos == t->len) {
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+
+ /* continue transfer in current message */
+ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+ if (total_fifo_words > QSPI_FIFO_DEPTH)
+ err = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ else
+ err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_qspi *tqspi = context_data;
+
+ tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
+
+ tegra_qspi_mask_clear_irq(tqspi);
+
+ if (!tqspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tqspi);
+
+ return handle_dma_based_xfer(tqspi);
+}
+
+static const struct of_device_id tegra_qspi_of_match[] = {
+ { .compatible = "nvidia,tegra210-qspi", },
+ { .compatible = "nvidia,tegra186-qspi", },
+ { .compatible = "nvidia,tegra194-qspi", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
+
+static int tegra_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_qspi *tqspi;
+ struct resource *r;
+ int ret, qspi_irq;
+ int bus_num;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+ tqspi = spi_master_get_devdata(master);
+
+ master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ master->setup = tegra_qspi_setup;
+ master->cleanup = tegra_qspi_cleanup;
+ master->transfer_one_message = tegra_qspi_transfer_one_message;
+ master->num_chipselect = 1;
+ master->auto_runtime_pm = true;
+
+ bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (bus_num >= 0)
+ master->bus_num = bus_num;
+
+ tqspi->master = master;
+ tqspi->dev = &pdev->dev;
+ spin_lock_init(&tqspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tqspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tqspi->base))
+ return PTR_ERR(tqspi->base);
+
+ tqspi->phys = r->start;
+ qspi_irq = platform_get_irq(pdev, 0);
+ tqspi->irq = qspi_irq;
+
+ tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
+ if (IS_ERR(tqspi->clk)) {
+ ret = PTR_ERR(tqspi->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ tqspi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(tqspi->rst)) {
+ ret = PTR_ERR(tqspi->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n", ret);
+ return ret;
+ }
+
+ tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
+ tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
+
+ ret = tegra_qspi_init_dma(tqspi);
+ if (ret < 0)
+ return ret;
+
+ if (tqspi->use_dma)
+ tqspi->max_buf_size = tqspi->dma_buf_size;
+
+ init_completion(&tqspi->tx_dma_complete);
+ init_completion(&tqspi->rx_dma_complete);
+ init_completion(&tqspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ reset_control_assert(tqspi->rst);
+ udelay(2);
+ reset_control_deassert(tqspi->rst);
+
+ tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+ tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
+ tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
+ tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
+
+ pm_runtime_put(&pdev->dev);
+
+ ret = request_threaded_irq(tqspi->irq, NULL,
+ tegra_qspi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tqspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
+ goto exit_pm_disable;
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register master: %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ return 0;
+
+exit_free_irq:
+ free_irq(qspi_irq, tqspi);
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ tegra_qspi_deinit_dma(tqspi);
+ return ret;
+}
+
+static int tegra_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+ free_irq(tqspi->irq, tqspi);
+ pm_runtime_disable(&pdev->dev);
+ tegra_qspi_deinit_dma(tqspi);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_qspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int __maybe_unused tegra_qspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+ tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+
+static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+
+ /* flush all write which are in PPSB queue by reading back */
+ tegra_qspi_readl(tqspi, QSPI_COMMAND1);
+
+ clk_disable_unprepare(tqspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tqspi->clk);
+ if (ret < 0)
+ dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops tegra_qspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
+};
+
+static struct platform_driver tegra_qspi_driver = {
+ .driver = {
+ .name = "tegra-qspi",
+ .pm = &tegra_qspi_pm_ops,
+ .of_match_table = tegra_qspi_of_match,
+ },
+ .probe = tegra_qspi_probe,
+ .remove = tegra_qspi_remove,
+};
+module_platform_driver(tegra_qspi_driver);
+
+MODULE_ALIAS("platform:qspi-tegra");
+MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
+MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
deleted file mode 100644
index 3606232f190f..000000000000
--- a/drivers/spi/spi-txx9.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * TXx9 SPI controller driver.
- *
- * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- *
- * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
- */
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/gpio/machine.h>
-#include <linux/gpio/consumer.h>
-
-
-#define SPI_FIFO_SIZE 4
-#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */
-#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */
-
-#define TXx9_SPMCR 0x00
-#define TXx9_SPCR0 0x04
-#define TXx9_SPCR1 0x08
-#define TXx9_SPFS 0x0c
-#define TXx9_SPSR 0x14
-#define TXx9_SPDR 0x18
-
-/* SPMCR : SPI Master Control */
-#define TXx9_SPMCR_OPMODE 0xc0
-#define TXx9_SPMCR_CONFIG 0x40
-#define TXx9_SPMCR_ACTIVE 0x80
-#define TXx9_SPMCR_SPSTP 0x02
-#define TXx9_SPMCR_BCLR 0x01
-
-/* SPCR0 : SPI Control 0 */
-#define TXx9_SPCR0_TXIFL_MASK 0xc000
-#define TXx9_SPCR0_RXIFL_MASK 0x3000
-#define TXx9_SPCR0_SIDIE 0x0800
-#define TXx9_SPCR0_SOEIE 0x0400
-#define TXx9_SPCR0_RBSIE 0x0200
-#define TXx9_SPCR0_TBSIE 0x0100
-#define TXx9_SPCR0_IFSPSE 0x0010
-#define TXx9_SPCR0_SBOS 0x0004
-#define TXx9_SPCR0_SPHA 0x0002
-#define TXx9_SPCR0_SPOL 0x0001
-
-/* SPSR : SPI Status */
-#define TXx9_SPSR_TBSI 0x8000
-#define TXx9_SPSR_RBSI 0x4000
-#define TXx9_SPSR_TBS_MASK 0x3800
-#define TXx9_SPSR_RBS_MASK 0x0700
-#define TXx9_SPSR_SPOE 0x0080
-#define TXx9_SPSR_IFSD 0x0008
-#define TXx9_SPSR_SIDLE 0x0004
-#define TXx9_SPSR_STRDY 0x0002
-#define TXx9_SPSR_SRRDY 0x0001
-
-
-struct txx9spi {
- struct work_struct work;
- spinlock_t lock; /* protect 'queue' */
- struct list_head queue;
- wait_queue_head_t waitq;
- void __iomem *membase;
- int baseclk;
- struct clk *clk;
- struct gpio_desc *last_chipselect;
- int last_chipselect_val;
-};
-
-static u32 txx9spi_rd(struct txx9spi *c, int reg)
-{
- return __raw_readl(c->membase + reg);
-}
-static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
-{
- __raw_writel(val, c->membase + reg);
-}
-
-static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
- int on, unsigned int cs_delay)
-{
- /*
- * The GPIO descriptor will track polarity inversion inside
- * gpiolib.
- */
- if (on) {
- /* deselect the chip with cs_change hint in last transfer */
- if (c->last_chipselect)
- gpiod_set_value(c->last_chipselect,
- !c->last_chipselect_val);
- c->last_chipselect = spi->cs_gpiod;
- c->last_chipselect_val = on;
- } else {
- c->last_chipselect = NULL;
- ndelay(cs_delay); /* CS Hold Time */
- }
- gpiod_set_value(spi->cs_gpiod, on);
- ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
-}
-
-static int txx9spi_setup(struct spi_device *spi)
-{
- struct txx9spi *c = spi_master_get_devdata(spi->master);
-
- if (!spi->max_speed_hz)
- return -EINVAL;
-
- /* deselect chip */
- spin_lock(&c->lock);
- txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
- spin_unlock(&c->lock);
-
- return 0;
-}
-
-static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
-{
- struct txx9spi *c = dev_id;
-
- /* disable rx intr */
- txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
- TXx9_SPCR0);
- wake_up(&c->waitq);
- return IRQ_HANDLED;
-}
-
-static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
-{
- struct spi_device *spi = m->spi;
- struct spi_transfer *t;
- unsigned int cs_delay;
- unsigned int cs_change = 1;
- int status = 0;
- u32 mcr;
- u32 prev_speed_hz = 0;
- u8 prev_bits_per_word = 0;
-
- /* CS setup/hold/recovery time in nsec */
- cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
-
- mcr = txx9spi_rd(c, TXx9_SPMCR);
- if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
- dev_err(&spi->dev, "Bad mode.\n");
- status = -EIO;
- goto exit;
- }
- mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
-
- /* enter config mode */
- txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
- txx9spi_wr(c, TXx9_SPCR0_SBOS
- | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
- | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
- | 0x08,
- TXx9_SPCR0);
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- const void *txbuf = t->tx_buf;
- void *rxbuf = t->rx_buf;
- u32 data;
- unsigned int len = t->len;
- unsigned int wsize;
- u32 speed_hz = t->speed_hz;
- u8 bits_per_word = t->bits_per_word;
-
- wsize = bits_per_word >> 3; /* in bytes */
-
- if (prev_speed_hz != speed_hz
- || prev_bits_per_word != bits_per_word) {
- int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1;
-
- n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER);
- /* enter config mode */
- txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
- TXx9_SPMCR);
- txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
- /* enter active mode */
- txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
-
- prev_speed_hz = speed_hz;
- prev_bits_per_word = bits_per_word;
- }
-
- if (cs_change)
- txx9spi_cs_func(spi, c, 1, cs_delay);
- cs_change = t->cs_change;
- while (len) {
- unsigned int count = SPI_FIFO_SIZE;
- int i;
- u32 cr0;
-
- if (len < count * wsize)
- count = len / wsize;
- /* now tx must be idle... */
- while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
- cpu_relax();
- cr0 = txx9spi_rd(c, TXx9_SPCR0);
- cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
- cr0 |= (count - 1) << 12;
- /* enable rx intr */
- cr0 |= TXx9_SPCR0_RBSIE;
- txx9spi_wr(c, cr0, TXx9_SPCR0);
- /* send */
- for (i = 0; i < count; i++) {
- if (txbuf) {
- data = (wsize == 1)
- ? *(const u8 *)txbuf
- : *(const u16 *)txbuf;
- txx9spi_wr(c, data, TXx9_SPDR);
- txbuf += wsize;
- } else
- txx9spi_wr(c, 0, TXx9_SPDR);
- }
- /* wait all rx data */
- wait_event(c->waitq,
- txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
- /* receive */
- for (i = 0; i < count; i++) {
- data = txx9spi_rd(c, TXx9_SPDR);
- if (rxbuf) {
- if (wsize == 1)
- *(u8 *)rxbuf = data;
- else
- *(u16 *)rxbuf = data;
- rxbuf += wsize;
- }
- }
- len -= count * wsize;
- }
- m->actual_length += t->len;
- spi_transfer_delay_exec(t);
-
- if (!cs_change)
- continue;
- if (t->transfer_list.next == &m->transfers)
- break;
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- txx9spi_cs_func(spi, c, 0, cs_delay);
- }
-
-exit:
- m->status = status;
- if (m->complete)
- m->complete(m->context);
-
- /* normally deactivate chipselect ... unless no error and
- * cs_change has hinted that the next message will probably
- * be for this chip too.
- */
- if (!(status == 0 && cs_change))
- txx9spi_cs_func(spi, c, 0, cs_delay);
-
- /* enter config mode */
- txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
-}
-
-static void txx9spi_work(struct work_struct *work)
-{
- struct txx9spi *c = container_of(work, struct txx9spi, work);
- unsigned long flags;
-
- spin_lock_irqsave(&c->lock, flags);
- while (!list_empty(&c->queue)) {
- struct spi_message *m;
-
- m = container_of(c->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irqrestore(&c->lock, flags);
-
- txx9spi_work_one(c, m);
-
- spin_lock_irqsave(&c->lock, flags);
- }
- spin_unlock_irqrestore(&c->lock, flags);
-}
-
-static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct spi_master *master = spi->master;
- struct txx9spi *c = spi_master_get_devdata(master);
- struct spi_transfer *t;
- unsigned long flags;
-
- m->actual_length = 0;
-
- /* check each transfer's parameters */
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (!t->tx_buf && !t->rx_buf && t->len)
- return -EINVAL;
- }
-
- spin_lock_irqsave(&c->lock, flags);
- list_add_tail(&m->queue, &c->queue);
- schedule_work(&c->work);
- spin_unlock_irqrestore(&c->lock, flags);
-
- return 0;
-}
-
-/*
- * Chip select uses GPIO only, further the driver is using the chip select
- * numer (from the device tree "reg" property, and this can only come from
- * device tree since this i MIPS and there is no way to pass platform data) as
- * the GPIO number. As the platform has only one GPIO controller (the txx9 GPIO
- * chip) it is thus using the chip select number as an offset into that chip.
- * This chip has a maximum of 16 GPIOs 0..15 and this is what all platforms
- * register.
- *
- * We modernized this behaviour by explicitly converting that offset to an
- * offset on the GPIO chip using a GPIO descriptor machine table of the same
- * size as the txx9 GPIO chip with a 1-to-1 mapping of chip select to GPIO
- * offset.
- *
- * This is admittedly a hack, but it is countering the hack of using "reg" to
- * contain a GPIO offset when it should be using "cs-gpios" as the SPI bindings
- * state.
- */
-static struct gpiod_lookup_table txx9spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP_IDX("TXx9", 0, "cs", 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 1, "cs", 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 2, "cs", 2, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 3, "cs", 3, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 4, "cs", 4, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 5, "cs", 5, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 6, "cs", 6, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 7, "cs", 7, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 8, "cs", 8, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 9, "cs", 9, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 10, "cs", 10, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 11, "cs", 11, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 12, "cs", 12, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 13, "cs", 13, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 14, "cs", 14, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("TXx9", 15, "cs", 15, GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static int txx9spi_probe(struct platform_device *dev)
-{
- struct spi_master *master;
- struct txx9spi *c;
- struct resource *res;
- int ret = -ENODEV;
- u32 mcr;
- int irq;
-
- master = spi_alloc_master(&dev->dev, sizeof(*c));
- if (!master)
- return ret;
- c = spi_master_get_devdata(master);
- platform_set_drvdata(dev, master);
-
- INIT_WORK(&c->work, txx9spi_work);
- spin_lock_init(&c->lock);
- INIT_LIST_HEAD(&c->queue);
- init_waitqueue_head(&c->waitq);
-
- c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
- if (IS_ERR(c->clk)) {
- ret = PTR_ERR(c->clk);
- c->clk = NULL;
- goto exit;
- }
- ret = clk_prepare_enable(c->clk);
- if (ret) {
- c->clk = NULL;
- goto exit;
- }
- c->baseclk = clk_get_rate(c->clk);
- master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
- master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- c->membase = devm_ioremap_resource(&dev->dev, res);
- if (IS_ERR(c->membase))
- goto exit_busy;
-
- /* enter config mode */
- mcr = txx9spi_rd(c, TXx9_SPMCR);
- mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
- txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0)
- goto exit_busy;
- ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0,
- "spi_txx9", c);
- if (ret)
- goto exit;
-
- c->last_chipselect = NULL;
-
- dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
- (unsigned long long)res->start, irq,
- (c->baseclk + 500000) / 1000000);
-
- gpiod_add_lookup_table(&txx9spi_cs_gpio_table);
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
-
- master->bus_num = dev->id;
- master->setup = txx9spi_setup;
- master->transfer = txx9spi_transfer;
- master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->use_gpio_descriptors = true;
-
- ret = devm_spi_register_master(&dev->dev, master);
- if (ret)
- goto exit;
- return 0;
-exit_busy:
- ret = -EBUSY;
-exit:
- clk_disable_unprepare(c->clk);
- spi_master_put(master);
- return ret;
-}
-
-static int txx9spi_remove(struct platform_device *dev)
-{
- struct spi_master *master = platform_get_drvdata(dev);
- struct txx9spi *c = spi_master_get_devdata(master);
-
- flush_work(&c->work);
- clk_disable_unprepare(c->clk);
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:spi_txx9");
-
-static struct platform_driver txx9spi_driver = {
- .probe = txx9spi_probe,
- .remove = txx9spi_remove,
- .driver = {
- .name = "spi_txx9",
- },
-};
-
-static int __init txx9spi_init(void)
-{
- return platform_driver_register(&txx9spi_driver);
-}
-subsys_initcall(txx9spi_init);
-
-static void __exit txx9spi_exit(void)
-{
- platform_driver_unregister(&txx9spi_driver);
-}
-module_exit(txx9spi_exit);
-
-MODULE_DESCRIPTION("TXx9 SPI Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..b08efe88ccd6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -810,7 +810,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->controller->last_cs_enable = enable;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if (!spi->controller->set_cs_timing) {
+ if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
+ !spi->controller->set_cs_timing) {
if (enable1)
spi_delay_exec(&spi->controller->cs_setup, NULL);
else
@@ -841,7 +842,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->controller->set_cs(spi, !enable);
}
- if (!spi->controller->set_cs_timing) {
+ if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
+ !spi->controller->set_cs_timing) {
if (!enable1)
spi_delay_exec(&spi->controller->cs_inactive, NULL);
}
@@ -1108,6 +1110,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1119,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -1263,7 +1269,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
ptp_read_system_prets(xfer->ptp_sts);
}
- if (xfer->tx_buf || xfer->rx_buf) {
+ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
@@ -1941,6 +1947,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_TX;
+ break;
case 1:
break;
case 2:
@@ -1962,6 +1971,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_RX;
+ break;
case 1:
break;
case 2:
@@ -2206,7 +2218,7 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
return AE_OK;
if (!lookup.max_speed_hz &&
- !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
+ ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
/* Apple does not use _CRS but nested devices for SPI slaves */
acpi_spi_parse_apple_properties(adev, &lookup);
@@ -3329,12 +3341,16 @@ int spi_setup(struct spi_device *spi)
unsigned bad_bits, ugly_bits;
int status;
- /* check mode to prevent that DUAL and QUAD set at the same time
+ /*
+ * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
+ * are set at the same time
*/
- if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
- ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
+ if ((hweight_long(spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
+ (hweight_long(spi->mode &
+ (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
dev_err(&spi->dev,
- "setup: can not select dual and quad at the same time\n");
+ "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
return -EINVAL;
}
/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
@@ -3348,7 +3364,8 @@ int spi_setup(struct spi_device *spi)
* SPI_CS_WORD has a fallback software implementation,
* so it is ignored here.
*/
- bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
+ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
+ SPI_NO_TX | SPI_NO_RX);
/* nothing prevents from working with active-high CS in case if it
* is driven by GPIO.
*/
@@ -3378,8 +3395,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
@@ -3444,11 +3462,31 @@ EXPORT_SYMBOL_GPL(spi_setup);
int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
struct spi_delay *hold, struct spi_delay *inactive)
{
+ struct device *parent = spi->controller->dev.parent;
size_t len;
+ int status;
- if (spi->controller->set_cs_timing)
- return spi->controller->set_cs_timing(spi, setup, hold,
- inactive);
+ if (spi->controller->set_cs_timing &&
+ !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
+ if (spi->controller->auto_runtime_pm) {
+ status = pm_runtime_get_sync(parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ status = spi->controller->set_cs_timing(spi, setup,
+ hold, inactive);
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+ return status;
+ } else {
+ return spi->controller->set_cs_timing(spi, setup, hold,
+ inactive);
+ }
+ }
if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
(hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
@@ -3610,6 +3648,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* 2. check tx/rx_nbits match the mode in spi_device
*/
if (xfer->tx_buf) {
+ if (spi->mode & SPI_NO_TX)
+ return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
xfer->tx_nbits != SPI_NBITS_QUAD)
@@ -3623,6 +3663,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
/* check transfer rx_nbits */
if (xfer->rx_buf) {
+ if (spi->mode & SPI_NO_RX)
+ return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
xfer->rx_nbits != SPI_NBITS_QUAD)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 859910ec8d9f..8cb4d923aeaa 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lwn,bk4" },
{ .compatible = "dh,dhcom-board" },
{ .compatible = "menlo,m53cpld" },
+ { .compatible = "cisco,spi-petra" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index de844b412110..bbbd311eda03 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitmap.h>
#include <linux/delay.h>
@@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
{
unsigned int irq;
- u32 status;
- int id;
+ u32 status, id;
u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 4789d36ddfd3..d66a64e42273 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -933,7 +933,7 @@ static int __init ashmem_init(void)
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
- 0, 0, NULL);
+ 0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!ashmem_range_cachep) {
pr_err("failed to create slab cache\n");
goto out_free1;
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index d0c6e42eadda..64c77970eee8 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -3,7 +3,10 @@ config STAGING_BOARD
bool "Staging Board Support"
depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
help
- Select to enable per-board staging support code.
-
- If in doubt, say N here.
+ Staging board base is to support continuous upstream
+ in-tree development and integration of platform devices.
+ Helps developers integrate devices as platform devices for
+ device drivers that only provide platform device bindings.
+ This in turn allows for incremental development of both
+ hardware feature support and DT binding work in parallel.
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
index ebe99db7d153..c7e1dc58dfba 100644
--- a/drivers/staging/clocking-wizard/TODO
+++ b/drivers/staging/clocking-wizard/TODO
@@ -2,7 +2,8 @@ TODO:
- support for fractional multiplier
- support for fractional divider (output 0 only)
- support for set_rate() operations (may benefit from Stephen Boyd's
- refactoring of the clk primitives: https://lkml.org/lkml/2014/9/5/766)
+ refactoring of the clk primitives:
+ https://lore.kernel.org/lkml/1409957256-23729-1-git-send-email-sboyd@codeaurora.org)
- review arithmetic
- overflow after multiplication?
- maximize accuracy before divisions
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..df77b6bf5c64 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -939,8 +939,8 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
/* fill devinfo structure */
devinfo.version_code = COMEDI_VERSION_CODE;
devinfo.n_subdevs = dev->n_subdevices;
- strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
- strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
+ strscpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
+ strscpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
s = comedi_file_read_subdevice(file);
if (s)
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index d0081897fe47..8fc45638ff59 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -19,14 +19,15 @@
* PCI-7234 (adl_pci7234), PCI-7432 (adl_pci7432), PCI-7433 (adl_pci7433),
* PCI-7434 (adl_pci7434)
* Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Thu, 02 Aug 2012 14:27:46 -0700
- * Status: untested
+ * Updated: Fri, 20 Nov 2020 14:49:36 +0000
+ * Status: works (tested on PCI-7230)
*
* One or two subdevices are setup by this driver depending on
* the number of digital inputs and/or outputs provided by the
* board. Each subdevice has a maximum of 32 channels.
*
- * PCI-7230 - 2 subdevices: 0 - 16 input, 1 - 16 output
+ * PCI-7230 - 4 subdevices: 0 - 16 input, 1 - 16 output,
+ * 2 - IRQ_IDI0, 3 - IRQ_IDI1
* PCI-7233 - 1 subdevice: 0 - 32 input
* PCI-7234 - 1 subdevice: 0 - 32 output
* PCI-7432 - 2 subdevices: 0 - 32 input, 1 - 32 output
@@ -37,8 +38,9 @@
* interrupt signals on digital input channels 0 and 1. The PCI-7233
* has dual-interrupt sources for change-of-state (COS) on any 16
* digital input channels of LSB and for COS on any 16 digital input
- * lines of MSB. Interrupts are not currently supported by this
- * driver.
+ * lines of MSB.
+ *
+ * Currently, this driver only supports interrupts for PCI-7230.
*
* Configuration Options: not applicable, uses comedi PCI auto config
*/
@@ -47,13 +49,22 @@
#include "../comedi_pci.h"
+#include "plx9052.h"
+
/*
* Register I/O map (32-bit access only)
*/
-#define PCI7X3X_DIO_REG 0x00
-#define PCI743X_DIO_REG 0x04
+#define PCI7X3X_DIO_REG 0x0000 /* in the DigIO Port area */
+#define PCI743X_DIO_REG 0x0004
+
+#define ADL_PT_CLRIRQ 0x0040 /* in the DigIO Port area */
-enum apci1516_boardid {
+#define LINTI1_EN_ACT_IDI0 (PLX9052_INTCSR_LI1ENAB | PLX9052_INTCSR_LI1STAT)
+#define LINTI2_EN_ACT_IDI1 (PLX9052_INTCSR_LI2ENAB | PLX9052_INTCSR_LI2STAT)
+#define EN_PCI_LINT2H_LINT1H \
+ (PLX9052_INTCSR_PCIENAB | PLX9052_INTCSR_LI2POL | PLX9052_INTCSR_LI1POL)
+
+enum adl_pci7x3x_boardid {
BOARD_PCI7230,
BOARD_PCI7233,
BOARD_PCI7234,
@@ -67,14 +78,16 @@ struct adl_pci7x3x_boardinfo {
int nsubdevs;
int di_nchan;
int do_nchan;
+ int irq_nchan;
};
static const struct adl_pci7x3x_boardinfo adl_pci7x3x_boards[] = {
[BOARD_PCI7230] = {
.name = "adl_pci7230",
- .nsubdevs = 2,
+ .nsubdevs = 4, /* IDI, IDO, IRQ_IDI0, IRQ_IDI1 */
.di_nchan = 16,
.do_nchan = 16,
+ .irq_nchan = 2,
},
[BOARD_PCI7233] = {
.name = "adl_pci7233",
@@ -104,6 +117,178 @@ static const struct adl_pci7x3x_boardinfo adl_pci7x3x_boards[] = {
}
};
+struct adl_pci7x3x_dev_private_data {
+ unsigned long lcr_io_base;
+ unsigned int int_ctrl;
+};
+
+struct adl_pci7x3x_sd_private_data {
+ spinlock_t subd_slock; /* spin-lock for cmd_running */
+ unsigned long port_offset;
+ short int cmd_running;
+};
+
+static void process_irq(struct comedi_device *dev, unsigned int subdev,
+ unsigned short intcsr)
+{
+ struct comedi_subdevice *s = &dev->subdevices[subdev];
+ struct adl_pci7x3x_sd_private_data *sd_priv = s->private;
+ unsigned long reg = sd_priv->port_offset;
+ struct comedi_async *async_p = s->async;
+
+ if (async_p) {
+ unsigned short val = inw(dev->iobase + reg);
+
+ spin_lock(&sd_priv->subd_slock);
+ if (sd_priv->cmd_running)
+ comedi_buf_write_samples(s, &val, 1);
+ spin_unlock(&sd_priv->subd_slock);
+ comedi_handle_events(dev, s);
+ }
+}
+
+static irqreturn_t adl_pci7x3x_interrupt(int irq, void *p_device)
+{
+ struct comedi_device *dev = p_device;
+ struct adl_pci7x3x_dev_private_data *dev_private = dev->private;
+ unsigned long cpu_flags;
+ unsigned int intcsr;
+ bool li1stat, li2stat;
+
+ if (!dev->attached) {
+ /* Ignore interrupt before device fully attached. */
+ /* Might not even have allocated subdevices yet! */
+ return IRQ_NONE;
+ }
+
+ /* Check if we are source of interrupt */
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ intcsr = inl(dev_private->lcr_io_base + PLX9052_INTCSR);
+ li1stat = (intcsr & LINTI1_EN_ACT_IDI0) == LINTI1_EN_ACT_IDI0;
+ li2stat = (intcsr & LINTI2_EN_ACT_IDI1) == LINTI2_EN_ACT_IDI1;
+ if (li1stat || li2stat) {
+ /* clear all current interrupt flags */
+ /* Fixme: Reset all 2 Int Flags */
+ outb(0x00, dev->iobase + ADL_PT_CLRIRQ);
+ }
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ /* SubDev 2, 3 = Isolated DigIn , on "SCSI2" jack!*/
+
+ if (li1stat) /* 0x0005 LINTi1 is Enabled && IDI0 is 1 */
+ process_irq(dev, 2, intcsr);
+
+ if (li2stat) /* 0x0028 LINTi2 is Enabled && IDI1 is 1 */
+ process_irq(dev, 3, intcsr);
+
+ return IRQ_RETVAL(li1stat || li2stat);
+}
+
+static int adl_pci7x3x_asy_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+ /* Step 2b : and mutually compatible */
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
+ cmd->chanlist_len);
+ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+static int adl_pci7x3x_asy_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct adl_pci7x3x_dev_private_data *dev_private = dev->private;
+ struct adl_pci7x3x_sd_private_data *sd_priv = s->private;
+ unsigned long cpu_flags;
+ unsigned int int_enab;
+
+ if (s->index == 2) {
+ /* enable LINTi1 == IDI sdi[0] Ch 0 IRQ ActHigh */
+ int_enab = PLX9052_INTCSR_LI1ENAB;
+ } else {
+ /* enable LINTi2 == IDI sdi[0] Ch 1 IRQ ActHigh */
+ int_enab = PLX9052_INTCSR_LI2ENAB;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ dev_private->int_ctrl |= int_enab;
+ outl(dev_private->int_ctrl, dev_private->lcr_io_base + PLX9052_INTCSR);
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
+ sd_priv->cmd_running = 1;
+ spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
+
+ return 0;
+}
+
+static int adl_pci7x3x_asy_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct adl_pci7x3x_dev_private_data *dev_private = dev->private;
+ struct adl_pci7x3x_sd_private_data *sd_priv = s->private;
+ unsigned long cpu_flags;
+ unsigned int int_enab;
+
+ spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
+ sd_priv->cmd_running = 0;
+ spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
+ /* disable Interrupts */
+ if (s->index == 2)
+ int_enab = PLX9052_INTCSR_LI1ENAB;
+ else
+ int_enab = PLX9052_INTCSR_LI2ENAB;
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ dev_private->int_ctrl &= ~int_enab;
+ outl(dev_private->int_ctrl, dev_private->lcr_io_base + PLX9052_INTCSR);
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ return 0;
+}
+
+/* same as _di_insn_bits because the IRQ-pins are the DI-ports */
+static int adl_pci7x3x_dirq_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct adl_pci7x3x_sd_private_data *sd_priv = s->private;
+ unsigned long reg = (unsigned long)sd_priv->port_offset;
+
+ data[1] = inl(dev->iobase + reg);
+
+ return insn->n;
+}
+
static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -143,15 +328,28 @@ static int adl_pci7x3x_di_insn_bits(struct comedi_device *dev,
return insn->n;
}
+static int adl_pci7x3x_reset(struct comedi_device *dev)
+{
+ struct adl_pci7x3x_dev_private_data *dev_private = dev->private;
+
+ /* disable Interrupts */
+ dev_private->int_ctrl = 0x00; /* Disable PCI + LINTi2 + LINTi1 */
+ outl(dev_private->int_ctrl, dev_private->lcr_io_base + PLX9052_INTCSR);
+
+ return 0;
+}
+
static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct adl_pci7x3x_boardinfo *board = NULL;
struct comedi_subdevice *s;
+ struct adl_pci7x3x_dev_private_data *dev_private;
int subdev;
int nchan;
int ret;
+ int ic;
if (context < ARRAY_SIZE(adl_pci7x3x_boards))
board = &adl_pci7x3x_boards[context];
@@ -160,10 +358,34 @@ static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
+ if (!dev_private)
+ return -ENOMEM;
+
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
+ dev_private->lcr_io_base = pci_resource_start(pcidev, 1);
+
+ adl_pci7x3x_reset(dev);
+
+ if (board->irq_nchan) {
+ /* discard all evtl. old IRQs */
+ outb(0x00, dev->iobase + ADL_PT_CLRIRQ);
+
+ if (pcidev->irq) {
+ ret = request_irq(pcidev->irq, adl_pci7x3x_interrupt,
+ IRQF_SHARED, dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = pcidev->irq;
+ /* 0x52 PCI + IDI Ch 1 Ch 0 IRQ Off ActHigh */
+ dev_private->int_ctrl = EN_PCI_LINT2H_LINT1H;
+ outl(dev_private->int_ctrl,
+ dev_private->lcr_io_base + PLX9052_INTCSR);
+ }
+ }
+ }
ret = comedi_alloc_subdevices(dev, board->nsubdevs);
if (ret)
@@ -237,14 +459,56 @@ static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
}
}
+ for (ic = 0; ic < board->irq_nchan; ++ic) {
+ struct adl_pci7x3x_sd_private_data *sd_priv;
+
+ nchan = 1;
+
+ s = &dev->subdevices[subdev];
+ /* Isolated digital inputs 0 or 1 */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = nchan;
+ s->maxdata = 1;
+ s->insn_bits = adl_pci7x3x_dirq_insn_bits;
+ s->range_table = &range_digital;
+
+ sd_priv = comedi_alloc_spriv(s, sizeof(*sd_priv));
+ if (!sd_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&sd_priv->subd_slock);
+ sd_priv->port_offset = PCI7X3X_DIO_REG;
+ sd_priv->cmd_running = 0;
+
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmdtest = adl_pci7x3x_asy_cmdtest;
+ s->do_cmd = adl_pci7x3x_asy_cmd;
+ s->cancel = adl_pci7x3x_asy_cancel;
+ }
+
+ subdev++;
+ }
+
return 0;
}
+static void adl_pci7x3x_detach(struct comedi_device *dev)
+{
+ if (dev->iobase)
+ adl_pci7x3x_reset(dev);
+ comedi_pci_detach(dev);
+}
+
static struct comedi_driver adl_pci7x3x_driver = {
.driver_name = "adl_pci7x3x",
.module = THIS_MODULE,
.auto_attach = adl_pci7x3x_auto_attach,
- .detach = comedi_pci_detach,
+ .detach = adl_pci7x3x_detach,
};
static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 0df28ec00f37..8e222b6ff2b4 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -34,9 +34,15 @@
*/
/* PCI-1730, PCI-1733, PCI-1736 interrupt control registers */
-#define PCI173X_INT_EN_REG 0x08 /* R/W: enable/disable */
-#define PCI173X_INT_RF_REG 0x0c /* R/W: falling/rising edge */
-#define PCI173X_INT_CLR_REG 0x10 /* R/W: clear */
+#define PCI173X_INT_EN_REG 0x0008 /* R/W: enable/disable */
+#define PCI173X_INT_RF_REG 0x000c /* R/W: falling/rising edge */
+#define PCI173X_INT_FLAG_REG 0x0010 /* R: status */
+#define PCI173X_INT_CLR_REG 0x0010 /* W: clear */
+
+#define PCI173X_INT_IDI0 0x01 /* IDI0 edge occurred */
+#define PCI173X_INT_IDI1 0x02 /* IDI1 edge occurred */
+#define PCI173X_INT_DI0 0x04 /* DI0 edge occurred */
+#define PCI173X_INT_DI1 0x08 /* DI1 edge occurred */
/* PCI-1739U, PCI-1750, PCI1751 interrupt control registers */
#define PCI1750_INT_REG 0x20 /* R/W: status/control */
@@ -63,6 +69,7 @@
#define PCI_DIO_MAX_DI_SUBDEVS 2 /* 2 x 8/16/32 input channels max */
#define PCI_DIO_MAX_DO_SUBDEVS 2 /* 2 x 8/16/32 output channels max */
#define PCI_DIO_MAX_DIO_SUBDEVG 2 /* 2 x any number of 8255 devices max */
+#define PCI_DIO_MAX_IRQ_SUBDEVS 4 /* 4 x 1 input IRQ channels max */
enum pci_dio_boardid {
TYPE_PCI1730,
@@ -84,7 +91,12 @@ enum pci_dio_boardid {
struct diosubd_data {
int chans; /* num of chans or 8255 devices */
- unsigned long addr; /* PCI address ofset */
+ unsigned long addr; /* PCI address offset */
+};
+
+struct dio_irq_subd_data {
+ unsigned short int_en; /* interrupt enable/status bit */
+ unsigned long addr; /* PCI address offset */
};
struct dio_boardtype {
@@ -93,6 +105,7 @@ struct dio_boardtype {
struct diosubd_data sdi[PCI_DIO_MAX_DI_SUBDEVS];
struct diosubd_data sdo[PCI_DIO_MAX_DO_SUBDEVS];
struct diosubd_data sdio[PCI_DIO_MAX_DIO_SUBDEVG];
+ struct dio_irq_subd_data sdirq[PCI_DIO_MAX_IRQ_SUBDEVS];
unsigned long id_reg;
unsigned long timer_regbase;
unsigned int is_16bit:1;
@@ -101,12 +114,17 @@ struct dio_boardtype {
static const struct dio_boardtype boardtypes[] = {
[TYPE_PCI1730] = {
.name = "pci1730",
- .nsubdevs = 5,
+ /* DI, IDI, DO, IDO, ID, IRQ_DI0, IRQ_DI1, IRQ_IDI0, IRQ_IDI1 */
+ .nsubdevs = 9,
.sdi[0] = { 16, 0x02, }, /* DI 0-15 */
.sdi[1] = { 16, 0x00, }, /* ISO DI 0-15 */
.sdo[0] = { 16, 0x02, }, /* DO 0-15 */
.sdo[1] = { 16, 0x00, }, /* ISO DO 0-15 */
.id_reg = 0x04,
+ .sdirq[0] = { PCI173X_INT_DI0, 0x02, }, /* DI 0 */
+ .sdirq[1] = { PCI173X_INT_DI1, 0x02, }, /* DI 1 */
+ .sdirq[2] = { PCI173X_INT_IDI0, 0x00, }, /* ISO DI 0 */
+ .sdirq[3] = { PCI173X_INT_IDI1, 0x00, }, /* ISO DI 1 */
},
[TYPE_PCI1733] = {
.name = "pci1733",
@@ -205,6 +223,188 @@ static const struct dio_boardtype boardtypes[] = {
},
};
+struct pci_dio_dev_private_data {
+ int boardtype;
+ int irq_subd;
+ unsigned short int_ctrl;
+ unsigned short int_rf;
+};
+
+struct pci_dio_sd_private_data {
+ spinlock_t subd_slock; /* spin-lock for cmd_running */
+ unsigned long port_offset;
+ short int cmd_running;
+};
+
+static void process_irq(struct comedi_device *dev, unsigned int subdev,
+ unsigned char irqflags)
+{
+ struct comedi_subdevice *s = &dev->subdevices[subdev];
+ struct pci_dio_sd_private_data *sd_priv = s->private;
+ unsigned long reg = sd_priv->port_offset;
+ struct comedi_async *async_p = s->async;
+
+ if (async_p) {
+ unsigned short val = inw(dev->iobase + reg);
+
+ spin_lock(&sd_priv->subd_slock);
+ if (sd_priv->cmd_running)
+ comedi_buf_write_samples(s, &val, 1);
+ spin_unlock(&sd_priv->subd_slock);
+ comedi_handle_events(dev, s);
+ }
+}
+
+static irqreturn_t pci_dio_interrupt(int irq, void *p_device)
+{
+ struct comedi_device *dev = p_device;
+ struct pci_dio_dev_private_data *dev_private = dev->private;
+ const struct dio_boardtype *board = dev->board_ptr;
+ unsigned long cpu_flags;
+ unsigned char irqflags;
+ int i;
+
+ if (!dev->attached) {
+ /* Ignore interrupt before device fully attached. */
+ /* Might not even have allocated subdevices yet! */
+ return IRQ_NONE;
+ }
+
+ /* Check if we are source of interrupt */
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ irqflags = inb(dev->iobase + PCI173X_INT_FLAG_REG);
+ if (!(irqflags & 0x0F)) {
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+ return IRQ_NONE;
+ }
+
+ /* clear all current interrupt flags */
+ outb(irqflags, dev->iobase + PCI173X_INT_CLR_REG);
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ /* check irq subdevice triggers */
+ for (i = 0; i < PCI_DIO_MAX_IRQ_SUBDEVS; i++) {
+ if (irqflags & board->sdirq[i].int_en)
+ process_irq(dev, dev_private->irq_subd + i, irqflags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pci_dio_asy_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+ /* Step 2b : and mutually compatible */
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ /*
+ * For scan_begin_arg, the trigger number must be 0 and the only
+ * allowed flags are CR_EDGE and CR_INVERT. CR_EDGE is ignored,
+ * CR_INVERT sets the trigger to falling edge.
+ */
+ if (cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) {
+ cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
+ err |= -EINVAL;
+ }
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
+ cmd->chanlist_len);
+ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+static int pci_dio_asy_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pci_dio_dev_private_data *dev_private = dev->private;
+ struct pci_dio_sd_private_data *sd_priv = s->private;
+ const struct dio_boardtype *board = dev->board_ptr;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long cpu_flags;
+ unsigned short int_en;
+
+ int_en = board->sdirq[s->index - dev_private->irq_subd].int_en;
+
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ if (cmd->scan_begin_arg & CR_INVERT)
+ dev_private->int_rf |= int_en; /* falling edge */
+ else
+ dev_private->int_rf &= ~int_en; /* rising edge */
+ outb(dev_private->int_rf, dev->iobase + PCI173X_INT_RF_REG);
+ dev_private->int_ctrl |= int_en; /* enable interrupt source */
+ outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
+ sd_priv->cmd_running = 1;
+ spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
+
+ return 0;
+}
+
+static int pci_dio_asy_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pci_dio_dev_private_data *dev_private = dev->private;
+ struct pci_dio_sd_private_data *sd_priv = s->private;
+ const struct dio_boardtype *board = dev->board_ptr;
+ unsigned long cpu_flags;
+ unsigned short int_en;
+
+ spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
+ sd_priv->cmd_running = 0;
+ spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
+
+ int_en = board->sdirq[s->index - dev_private->irq_subd].int_en;
+
+ spin_lock_irqsave(&dev->spinlock, cpu_flags);
+ dev_private->int_ctrl &= ~int_en;
+ outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
+ spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
+
+ return 0;
+}
+
+/* same as _insn_bits_di_ because the IRQ-pins are the DI-ports */
+static int pci_dio_insn_bits_dirq_b(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pci_dio_sd_private_data *sd_priv = s->private;
+ unsigned long reg = (unsigned long)sd_priv->port_offset;
+ unsigned long iobase = dev->iobase + reg;
+
+ data[1] = inb(iobase);
+
+ return insn->n;
+}
+
static int pci_dio_insn_bits_di_b(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -283,6 +483,7 @@ static int pci_dio_insn_bits_do_w(struct comedi_device *dev,
static int pci_dio_reset(struct comedi_device *dev, unsigned long cardtype)
{
+ struct pci_dio_dev_private_data *dev_private = dev->private;
/* disable channel freeze function on the PCI-1752/1756 boards */
if (cardtype == TYPE_PCI1752 || cardtype == TYPE_PCI1756)
outw(0, dev->iobase + PCI1752_CFC_REG);
@@ -292,9 +493,13 @@ static int pci_dio_reset(struct comedi_device *dev, unsigned long cardtype)
case TYPE_PCI1730:
case TYPE_PCI1733:
case TYPE_PCI1736:
- outb(0, dev->iobase + PCI173X_INT_EN_REG);
+ dev_private->int_ctrl = 0x00;
+ outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
+ /* Reset all 4 Int Flags */
outb(0x0f, dev->iobase + PCI173X_INT_CLR_REG);
- outb(0, dev->iobase + PCI173X_INT_RF_REG);
+ /* Rising Edge => IRQ . On all 4 Pins */
+ dev_private->int_rf = 0x00;
+ outb(dev_private->int_rf, dev->iobase + PCI173X_INT_RF_REG);
break;
case TYPE_PCI1739:
case TYPE_PCI1750:
@@ -346,8 +551,8 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct dio_boardtype *board = NULL;
- const struct diosubd_data *d;
struct comedi_subdevice *s;
+ struct pci_dio_dev_private_data *dev_private;
int ret, subdev, i, j;
if (context < ARRAY_SIZE(boardtypes))
@@ -357,6 +562,10 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
+ if (!dev_private)
+ return -ENOMEM;
+
ret = comedi_pci_enable(dev);
if (ret)
return ret;
@@ -365,15 +574,25 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
else
dev->iobase = pci_resource_start(pcidev, 2);
+ dev_private->boardtype = context;
pci_dio_reset(dev, context);
+ /* request IRQ if device has irq subdevices */
+ if (board->sdirq[0].int_en && pcidev->irq) {
+ ret = request_irq(pcidev->irq, pci_dio_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
ret = comedi_alloc_subdevices(dev, board->nsubdevs);
if (ret)
return ret;
subdev = 0;
for (i = 0; i < PCI_DIO_MAX_DI_SUBDEVS; i++) {
- d = &board->sdi[i];
+ const struct diosubd_data *d = &board->sdi[i];
+
if (d->chans) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DI;
@@ -385,11 +604,13 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
? pci_dio_insn_bits_di_w
: pci_dio_insn_bits_di_b;
s->private = (void *)d->addr;
+
}
}
for (i = 0; i < PCI_DIO_MAX_DO_SUBDEVS; i++) {
- d = &board->sdo[i];
+ const struct diosubd_data *d = &board->sdo[i];
+
if (d->chans) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DO;
@@ -420,7 +641,8 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
}
for (i = 0; i < PCI_DIO_MAX_DIO_SUBDEVG; i++) {
- d = &board->sdio[i];
+ const struct diosubd_data *d = &board->sdio[i];
+
for (j = 0; j < d->chans; j++) {
s = &dev->subdevices[subdev++];
ret = subdev_8255_init(dev, s, NULL,
@@ -454,14 +676,57 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
comedi_8254_subdevice_init(s, dev->pacer);
}
+ dev_private->irq_subd = subdev; /* first interrupt subdevice index */
+ for (i = 0; i < PCI_DIO_MAX_IRQ_SUBDEVS; ++i) {
+ struct pci_dio_sd_private_data *sd_priv = NULL;
+ const struct dio_irq_subd_data *d = &board->sdirq[i];
+
+ if (d->int_en) {
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci_dio_insn_bits_dirq_b;
+ sd_priv = comedi_alloc_spriv(s, sizeof(*sd_priv));
+ if (!sd_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&sd_priv->subd_slock);
+ sd_priv->port_offset = d->addr;
+ sd_priv->cmd_running = 0;
+
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmdtest = pci_dio_asy_cmdtest;
+ s->do_cmd = pci_dio_asy_cmd;
+ s->cancel = pci_dio_asy_cancel;
+ }
+ }
+ }
+
return 0;
}
+static void pci_dio_detach(struct comedi_device *dev)
+{
+ struct pci_dio_dev_private_data *dev_private = dev->private;
+ int boardtype = dev_private->boardtype;
+
+ if (dev->iobase)
+ pci_dio_reset(dev, boardtype);
+ comedi_pci_detach(dev);
+}
+
static struct comedi_driver adv_pci_dio_driver = {
.driver_name = "adv_pci_dio",
.module = THIS_MODULE,
.auto_attach = pci_dio_auto_attach,
- .detach = comedi_pci_detach,
+ .detach = pci_dio_detach,
};
static unsigned long pci_dio_override_cardtype(struct pci_dev *pcidev,
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index a30b4f5b199b..3536c03ff523 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -34,6 +34,9 @@
#define DRIVER_DESC "EMXX UDC driver"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+static struct gpio_desc *vbus_gpio;
+static int vbus_irq;
+
static const char driver_name[] = "emxx_udc";
static const char driver_desc[] = DRIVER_DESC;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index bca614d69aca..c9e37a1b8139 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -20,8 +20,6 @@
/* below hacked up for staging integration */
#define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */
#define INT_VBUS 0 /* IRQ for GPIO_P153 */
-struct gpio_desc *vbus_gpio;
-int vbus_irq;
/*------------ Board dependence(Wait) */
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index 3a280cc1892c..abe9395a0aef 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -7,9 +7,13 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/module.h>
+
#include <video/mipi_display.h>
#include "fbtft.h"
@@ -66,6 +70,62 @@ enum st7789v_command {
#define MADCTL_MX BIT(6) /* bitmask for column address order */
#define MADCTL_MY BIT(7) /* bitmask for page address order */
+/* 60Hz for 16.6ms, configured as 2*16.6ms */
+#define PANEL_TE_TIMEOUT_MS 33
+
+static struct completion panel_te; /* completion for panel TE line */
+static int irq_te; /* Linux IRQ for LCD TE line */
+
+static irqreturn_t panel_te_handler(int irq, void *data)
+{
+ complete(&panel_te);
+ return IRQ_HANDLED;
+}
+
+/*
+ * init_tearing_effect_line() - init tearing effect line.
+ * @par: FBTFT parameter object.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+static int init_tearing_effect_line(struct fbtft_par *par)
+{
+ struct device *dev = par->info->device;
+ struct gpio_desc *te;
+ int rc, irq;
+
+ te = gpiod_get_optional(dev, "te", GPIOD_IN);
+ if (IS_ERR(te))
+ return dev_err_probe(dev, PTR_ERR(te), "Failed to request te GPIO\n");
+
+ /* if te is NULL, indicating no configuration, directly return success */
+ if (!te) {
+ irq_te = 0;
+ return 0;
+ }
+
+ irq = gpiod_to_irq(te);
+
+ /* GPIO is locked as an IRQ, we may drop the reference */
+ gpiod_put(te);
+
+ if (irq < 0)
+ return irq;
+
+ irq_te = irq;
+ init_completion(&panel_te);
+
+ /* The effective state is high and lasts no more than 1000 microseconds */
+ rc = devm_request_irq(dev, irq_te, panel_te_handler,
+ IRQF_TRIGGER_RISING, "TE_GPIO", par);
+ if (rc)
+ return dev_err_probe(dev, rc, "TE IRQ request failed.\n");
+
+ disable_irq_nosync(irq_te);
+
+ return 0;
+}
+
/**
* init_display() - initialize the display controller
*
@@ -82,6 +142,12 @@ enum st7789v_command {
*/
static int init_display(struct fbtft_par *par)
{
+ int rc;
+
+ rc = init_tearing_effect_line(par);
+ if (rc)
+ return rc;
+
/* turn off sleep mode */
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(120);
@@ -137,6 +203,10 @@ static int init_display(struct fbtft_par *par)
*/
write_reg(par, PWCTRL1, 0xA4, 0xA1);
+ /* TE line output is off by default when powering on */
+ if (irq_te)
+ write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00);
+
write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
if (HSD20_IPS)
@@ -145,6 +215,50 @@ static int init_display(struct fbtft_par *par)
return 0;
}
+/*
+ * write_vmem() - write data to display.
+ * @par: FBTFT parameter object.
+ * @offset: offset from screen_buffer.
+ * @len: the length of data to be writte.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ struct device *dev = par->info->device;
+ int ret;
+
+ if (irq_te) {
+ enable_irq(irq_te);
+ reinit_completion(&panel_te);
+ ret = wait_for_completion_timeout(&panel_te,
+ msecs_to_jiffies(PANEL_TE_TIMEOUT_MS));
+ if (ret == 0)
+ dev_err(dev, "wait panel TE timeout\n");
+
+ disable_irq(irq_te);
+ }
+
+ switch (par->pdata->display.buswidth) {
+ case 8:
+ ret = fbtft_write_vmem16_bus8(par, offset, len);
+ break;
+ case 9:
+ ret = fbtft_write_vmem16_bus9(par, offset, len);
+ break;
+ case 16:
+ ret = fbtft_write_vmem16_bus16(par, offset, len);
+ break;
+ default:
+ dev_err(dev, "Unsupported buswidth %d\n",
+ par->pdata->display.buswidth);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
/**
* set_var() - apply LCD properties like rotation and BGR mode
*
@@ -259,6 +373,7 @@ static struct fbtft_display display = {
.gamma = HSD20_IPS_GAMMA,
.fbtftops = {
.init_display = init_display,
+ .write_vmem = write_vmem,
.set_var = set_var,
.set_gamma = set_gamma,
.blank = blank,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index b5fded15e8a6..9af2e63050d1 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -185,7 +185,7 @@ static struct attribute *controller_attributes[] = {
NULL,
};
-static struct attribute_group controller_attribute_group = {
+static const struct attribute_group controller_attribute_group = {
.attrs = controller_attributes,
};
@@ -206,7 +206,7 @@ static int can_power_is_enabled(struct regulator_dev *rdev)
return !(readb(cd->cpld_base + CPLD_STATUS1) & CPLD_STATUS1_CAN_POWER);
}
-static struct regulator_ops can_power_ops = {
+static const struct regulator_ops can_power_ops = {
.is_enabled = can_power_is_enabled,
};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
index d7f4ed1df69d..0af2e9914ec4 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
@@ -38,19 +38,19 @@ static void dpaa2_switch_get_drvinfo(struct net_device *netdev,
u16 version_major, version_minor;
int err;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
&version_major,
&version_minor);
if (err)
- strlcpy(drvinfo->fw_version, "N/A",
+ strscpy(drvinfo->fw_version, "N/A",
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u", version_major, version_minor);
- strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index d524e92051a3..703055e063ff 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -901,66 +901,64 @@ static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
}
static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
- struct switchdev_trans *trans,
u8 state)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return dpaa2_switch_port_set_stp_state(port_priv, state);
}
-static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+static int
+dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
{
- if (flags & ~(BR_LEARNING | BR_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD))
return -EINVAL;
return 0;
}
-static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+static int
+dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- /* Learning is enabled per switch */
- err = dpaa2_switch_set_learning(port_priv->ethsw_data,
- !!(flags & BR_LEARNING));
- if (err)
- goto exit;
+ if (flags.mask & BR_LEARNING) {
+ /* Learning is enabled per switch */
+ err = dpaa2_switch_set_learning(port_priv->ethsw_data,
+ !!(flags.val & BR_LEARNING));
+ if (err)
+ return err;
+ }
- err = dpaa2_switch_port_set_flood(port_priv, !!(flags & BR_FLOOD));
+ if (flags.mask & BR_FLOOD) {
+ err = dpaa2_switch_port_set_flood(port_priv,
+ !!(flags.val & BR_FLOOD));
+ if (err)
+ return err;
+ }
-exit:
- return err;
+ return 0;
}
static int dpaa2_switch_port_attr_set(struct net_device *netdev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = dpaa2_switch_port_attr_stp_state_set(netdev, trans,
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_pre_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
@@ -975,54 +973,49 @@ static int dpaa2_switch_port_attr_set(struct net_device *netdev,
}
static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpsw_attr *attr = &ethsw->sw_attr;
- int vid, err = 0, new_vlans = 0;
-
- if (switchdev_trans_ph_prepare(trans)) {
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid])
- new_vlans++;
-
- /* Make sure that the VLAN is not already configured
- * on the switch port
- */
- if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER)
- return -EEXIST;
- }
+ int err = 0;
- /* Check if there is space for a new VLAN */
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
- return err;
- }
- if (attr->max_vlans - attr->num_vlans < new_vlans)
- return -ENOSPC;
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
- return 0;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
}
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid]) {
- /* this is a new VLAN */
- err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vid);
- if (err)
- return err;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
- }
- err = dpaa2_switch_port_add_vlan(port_priv, vid, vlan->flags);
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid);
if (err)
- break;
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
}
- return err;
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
}
static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
@@ -1043,15 +1036,11 @@ static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc
}
static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* Check if address is already set on this port */
if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
return -EEXIST;
@@ -1070,21 +1059,18 @@ static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
}
static int dpaa2_switch_port_obj_add(struct net_device *netdev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dpaa2_switch_port_vlans_add(netdev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = dpaa2_switch_port_mdb_add(netdev,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1155,18 +1141,11 @@ static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int vid, err = 0;
if (netif_is_bridge_master(vlan->obj.orig_dev))
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = dpaa2_switch_port_del_vlan(port_priv, vid);
- if (err)
- break;
- }
-
- return err;
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
}
static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
@@ -1216,8 +1195,7 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -1411,8 +1389,7 @@ static int dpaa2_switch_port_obj_event(unsigned long event,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index db83d34cd677..c368082aae1a 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -2189,6 +2189,7 @@ static int fwserial_create(struct fw_unit *unit)
err = fw_core_add_address_handler(&port->rx_handler,
&fw_high_memory_region);
if (err) {
+ tty_port_destroy(&port->port);
kfree(port);
goto free_ports;
}
@@ -2271,6 +2272,7 @@ unregister_ttys:
free_ports:
for (--i; i >= 0; --i) {
+ fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
tty_port_destroy(&serial->ports[i]->port);
kfree(serial->ports[i]);
}
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index e3047d36d8db..aa65f4fbf860 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -40,10 +40,11 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
/* Read the size of the page table. */
static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
- struct gasket_page_table_ioctl __user *argp)
+ struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
struct gasket_page_table_ioctl ibuf;
+ struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
@@ -51,8 +52,8 @@ static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
- ibuf.size = gasket_page_table_num_entries(
- gasket_dev->page_table[ibuf.page_table_index]);
+ table = gasket_dev->page_table[ibuf.page_table_index];
+ ibuf.size = gasket_page_table_num_entries(table);
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
@@ -66,10 +67,11 @@ static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
/* Read the size of the simple page table. */
static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
- struct gasket_page_table_ioctl __user *argp)
+ struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
struct gasket_page_table_ioctl ibuf;
+ struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
@@ -77,8 +79,8 @@ static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
- ibuf.size =
- gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
+ table = gasket_dev->page_table[ibuf.page_table_index];
+ ibuf.size = gasket_page_table_num_simple_entries(table);
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
@@ -92,11 +94,12 @@ static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
/* Set the boundary between the simple and extended page tables. */
static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
- struct gasket_page_table_ioctl __user *argp)
+ struct gasket_page_table_ioctl __user *argp)
{
int ret;
struct gasket_page_table_ioctl ibuf;
uint max_page_table_size;
+ struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
@@ -107,8 +110,8 @@ static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
- max_page_table_size = gasket_page_table_max_size(
- gasket_dev->page_table[ibuf.page_table_index]);
+ table = gasket_dev->page_table[ibuf.page_table_index];
+ max_page_table_size = gasket_page_table_max_size(table);
if (ibuf.size > max_page_table_size) {
dev_dbg(gasket_dev->dev,
@@ -119,8 +122,7 @@ static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
mutex_lock(&gasket_dev->mutex);
- ret = gasket_page_table_partition(
- gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
+ ret = gasket_page_table_partition(table, ibuf.size);
mutex_unlock(&gasket_dev->mutex);
return ret;
@@ -131,6 +133,7 @@ static int gasket_map_buffers(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
struct gasket_page_table_ioctl ibuf;
+ struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
@@ -142,13 +145,12 @@ static int gasket_map_buffers(struct gasket_dev *gasket_dev,
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
- if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
- ibuf.host_address,
+ table = gasket_dev->page_table[ibuf.page_table_index];
+ if (gasket_page_table_are_addrs_bad(table, ibuf.host_address,
ibuf.device_address, ibuf.size))
return -EINVAL;
- return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
- ibuf.host_address, ibuf.device_address,
+ return gasket_page_table_map(table, ibuf.host_address, ibuf.device_address,
ibuf.size / PAGE_SIZE);
}
@@ -157,6 +159,7 @@ static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
struct gasket_page_table_ioctl ibuf;
+ struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
@@ -168,12 +171,11 @@ static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
- if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
- ibuf.device_address, ibuf.size))
+ table = gasket_dev->page_table[ibuf.page_table_index];
+ if (gasket_page_table_is_dev_addr_bad(table, ibuf.device_address, ibuf.size))
return -EINVAL;
- gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
- ibuf.device_address, ibuf.size / PAGE_SIZE);
+ gasket_page_table_unmap(table, ibuf.device_address, ibuf.size / PAGE_SIZE);
return 0;
}
@@ -183,7 +185,7 @@ static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
* corresponding memory.
*/
static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
- struct gasket_coherent_alloc_config_ioctl __user *argp)
+ struct gasket_coherent_alloc_config_ioctl __user *argp)
{
int ret;
struct gasket_coherent_alloc_config_ioctl ibuf;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index dc4da66c3695..54bdb64f52e8 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
static int request_mac_address(struct lte_udev *udev)
{
- u8 buf[16] = {0,};
- struct hci_packet *hci = (struct hci_packet *)buf;
+ struct hci_packet *hci;
struct usb_device *usbdev = udev->usbdev;
int actual;
int ret = -1;
+ hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+
hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
hci->data[0] = MAC_ADDRESS;
- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
+ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
&actual, 1000);
udev->request_mac_addr = 1;
+ kfree(hci);
return ret;
}
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 3011b8abce38..1ed4772d2771 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -166,7 +166,7 @@ static int gbaudio_remove_controls(struct snd_card *card, struct device *dev,
snprintf(id.name, sizeof(id.name), "%s %s", prefix,
control->name);
else
- strlcpy(id.name, control->name, sizeof(id.name));
+ strscpy(id.name, control->name, sizeof(id.name));
id.numid = 0;
id.iface = control->iface;
id.device = control->device;
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index ab882cc49b41..fcd518f9540c 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -18,8 +18,8 @@ static ssize_t manager_sysfs_add_store(struct kobject *kobj,
struct gb_audio_manager_module_descriptor desc = { {0} };
int num = sscanf(buf,
- "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
- "vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
+ "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF
+ "s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
desc.name, &desc.vid, &desc.pid, &desc.intf_id,
&desc.ip_devices, &desc.op_devices);
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index a243d60f0d56..0f9fdc077b4c 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -342,7 +342,7 @@ static int gb_audio_probe(struct gb_bundle *bundle,
/* inform above layer for uevent */
dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
/* prepare for the audio manager */
- strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
+ strscpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
desc.vid = 2; /* todo */
desc.pid = 3; /* todo */
desc.intf_id = gbmodule->dev_id;
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 662e3e8b4b63..e816e4db555e 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -200,7 +200,7 @@ static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
return -EINVAL;
name = gbaudio_map_controlid(module, data->ctl_id,
uinfo->value.enumerated.item);
- strlcpy(uinfo->value.enumerated.name, name, NAME_SIZE);
+ strscpy(uinfo->value.enumerated.name, name, NAME_SIZE);
break;
default:
dev_err(comp->dev, "Invalid type: %d for %s:kcontrol\n",
@@ -1047,7 +1047,7 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
}
/* Prefix dev_id to widget control_name */
- strlcpy(temp_name, w->name, NAME_SIZE);
+ strscpy(temp_name, w->name, NAME_SIZE);
snprintf(w->name, NAME_SIZE, "GB %d %s", module->dev_id, temp_name);
switch (w->type) {
@@ -1169,7 +1169,7 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
}
control->id = curr->id;
/* Prefix dev_id to widget_name */
- strlcpy(temp_name, curr->name, NAME_SIZE);
+ strscpy(temp_name, curr->name, NAME_SIZE);
snprintf(curr->name, NAME_SIZE, "GB %d %s", module->dev_id,
temp_name);
control->name = curr->name;
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index ed706f39e87a..adb91286803a 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -221,8 +221,8 @@ static void gb_hid_init_reports(struct gb_hid *ghid)
}
static int __gb_hid_get_raw_report(struct hid_device *hid,
- unsigned char report_number, __u8 *buf, size_t count,
- unsigned char report_type)
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
{
struct gb_hid *ghid = hid->driver_data;
int ret;
@@ -254,7 +254,7 @@ static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
if (report_id && ret >= 0)
- ret++; /* add report_id to the number of transfered bytes */
+ ret++; /* add report_id to the number of transferred bytes */
return 0;
}
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d2672b65c3f4..87d36948c610 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -290,8 +290,7 @@ static int channel_attr_groups_set(struct gb_channel *channel,
channel->attrs = kcalloc(size + 1, sizeof(*channel->attrs), GFP_KERNEL);
if (!channel->attrs)
return -ENOMEM;
- channel->attr_group = kcalloc(1, sizeof(*channel->attr_group),
- GFP_KERNEL);
+ channel->attr_group = kzalloc(sizeof(*channel->attr_group), GFP_KERNEL);
if (!channel->attr_group)
return -ENOMEM;
channel->attr_groups = kcalloc(2, sizeof(*channel->attr_groups),
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index ec96f28887f9..75cb170e05fb 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -449,7 +449,7 @@ static int __gb_power_supply_set_name(char *init_name, char *name, size_t len)
if (!strlen(init_name))
init_name = "gb_power_supply";
- strlcpy(name, init_name, len);
+ strscpy(name, init_name, len);
while ((ret < len) && (psy = power_supply_get_by_name(name))) {
power_supply_put(psy);
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index fc27c52de74a..672d540d3365 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -455,10 +455,10 @@ static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
dev_type = response.device_type;
if (dev_type == GB_SPI_SPI_DEV)
- strlcpy(spi_board.modalias, "spidev",
+ strscpy(spi_board.modalias, "spidev",
sizeof(spi_board.modalias));
else if (dev_type == GB_SPI_SPI_NOR)
- strlcpy(spi_board.modalias, "spi-nor",
+ strscpy(spi_board.modalias, "spi-nor",
sizeof(spi_board.modalias));
else if (dev_type == GB_SPI_SPI_MODALIAS)
memcpy(spi_board.modalias, response.name,
diff --git a/drivers/staging/hikey9xx/Kconfig b/drivers/staging/hikey9xx/Kconfig
index 2e48ded92a7e..82bb4a22b286 100644
--- a/drivers/staging/hikey9xx/Kconfig
+++ b/drivers/staging/hikey9xx/Kconfig
@@ -29,6 +29,7 @@ config MFD_HI6421_SPMI
depends on OF
depends on SPMI
select MFD_CORE
+ select REGMAP_SPMI
help
Add support for HiSilicon Hi6421v600 SPMI PMIC. Hi6421 includes
multi-functions, such as regulators, RTC, codec, Coulomb counter,
@@ -44,6 +45,7 @@ config REGULATOR_HI6421V600
tristate "HiSilicon Hi6421v600 PMIC voltage regulator support"
depends on MFD_HI6421_SPMI && OF
depends on REGULATOR
+ select REGMAP
help
This driver provides support for the voltage regulators on
HiSilicon Hi6421v600 PMU / Codec IC.
diff --git a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
index 4f34a5282970..4ebcfea9f3bf 100644
--- a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
+++ b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
@@ -4,149 +4,105 @@
*
* Copyright (c) 2013 Linaro Ltd.
* Copyright (c) 2011 Hisilicon.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+ * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
*/
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/hi6421-spmi-pmic.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spmi.h>
-/* 8-bit register offset in PMIC */
-#define HISI_MASK_STATE 0xff
+enum hi6421_spmi_pmic_irq_list {
+ OTMP = 0,
+ VBUS_CONNECT,
+ VBUS_DISCONNECT,
+ ALARMON_R,
+ HOLD_6S,
+ HOLD_1S,
+ POWERKEY_UP,
+ POWERKEY_DOWN,
+ OCP_SCP_R,
+ COUL_R,
+ SIM0_HPD_R,
+ SIM0_HPD_F,
+ SIM1_HPD_R,
+ SIM1_HPD_F,
+ PMIC_IRQ_LIST_MAX,
+};
#define HISI_IRQ_ARRAY 2
#define HISI_IRQ_NUM (HISI_IRQ_ARRAY * 8)
-#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
-#define SOC_PMIC_IRQ0_ADDR 0x0212
-
#define HISI_IRQ_KEY_NUM 0
-#define HISI_IRQ_KEY_VALUE 0xc0
-#define HISI_IRQ_KEY_DOWN 7
-#define HISI_IRQ_KEY_UP 6
-#define HISI_MASK_FIELD 0xFF
#define HISI_BITS 8
-
-/*define the first group interrupt register number*/
-#define HISI_PMIC_FIRST_GROUP_INT_NUM 2
-
-static const struct mfd_cell hi6421v600_devs[] = {
- { .name = "hi6421v600-regulator", },
-};
+#define HISI_IRQ_KEY_VALUE (BIT(POWERKEY_DOWN) | BIT(POWERKEY_UP))
+#define HISI_MASK GENMASK(HISI_BITS - 1, 0)
/*
- * The PMIC register is only 8-bit.
- * Hisilicon SoC use hardware to map PMIC register into SoC mapping.
- * At here, we are accessing SoC register with 32-bit.
+ * The IRQs are mapped as:
+ *
+ * ====================== ============= ============ =====
+ * IRQ MASK REGISTER IRQ REGISTER BIT
+ * ====================== ============= ============ =====
+ * OTMP 0x0202 0x212 bit 0
+ * VBUS_CONNECT 0x0202 0x212 bit 1
+ * VBUS_DISCONNECT 0x0202 0x212 bit 2
+ * ALARMON_R 0x0202 0x212 bit 3
+ * HOLD_6S 0x0202 0x212 bit 4
+ * HOLD_1S 0x0202 0x212 bit 5
+ * POWERKEY_UP 0x0202 0x212 bit 6
+ * POWERKEY_DOWN 0x0202 0x212 bit 7
+ *
+ * OCP_SCP_R 0x0203 0x213 bit 0
+ * COUL_R 0x0203 0x213 bit 1
+ * SIM0_HPD_R 0x0203 0x213 bit 2
+ * SIM0_HPD_F 0x0203 0x213 bit 3
+ * SIM1_HPD_R 0x0203 0x213 bit 4
+ * SIM1_HPD_F 0x0203 0x213 bit 5
+ * ====================== ============= ============ =====
*/
-int hi6421_spmi_pmic_read(struct hi6421_spmi_pmic *pmic, int reg)
-{
- struct spmi_device *pdev;
- u8 read_value = 0;
- u32 ret;
-
- pdev = to_spmi_device(pmic->dev);
- if (!pdev) {
- pr_err("%s: pdev get failed!\n", __func__);
- return -ENODEV;
- }
-
- ret = spmi_ext_register_readl(pdev, reg, &read_value, 1);
- if (ret) {
- pr_err("%s: spmi_ext_register_readl failed!\n", __func__);
- return ret;
- }
- return read_value;
-}
-EXPORT_SYMBOL(hi6421_spmi_pmic_read);
-
-int hi6421_spmi_pmic_write(struct hi6421_spmi_pmic *pmic, int reg, u32 val)
-{
- struct spmi_device *pdev;
- u32 ret;
-
- pdev = to_spmi_device(pmic->dev);
- if (!pdev) {
- pr_err("%s: pdev get failed!\n", __func__);
- return -ENODEV;
- }
-
- ret = spmi_ext_register_writel(pdev, reg, (unsigned char *)&val, 1);
- if (ret)
- pr_err("%s: spmi_ext_register_writel failed!\n", __func__);
-
- return ret;
-}
-EXPORT_SYMBOL(hi6421_spmi_pmic_write);
-
-int hi6421_spmi_pmic_rmw(struct hi6421_spmi_pmic *pmic, int reg,
- u32 mask, u32 bits)
-{
- unsigned long flags;
- u32 data;
- int ret;
+#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
+#define SOC_PMIC_IRQ0_ADDR 0x0212
- spin_lock_irqsave(&pmic->lock, flags);
- data = hi6421_spmi_pmic_read(pmic, reg) & ~mask;
- data |= mask & bits;
- ret = hi6421_spmi_pmic_write(pmic, reg, data);
- spin_unlock_irqrestore(&pmic->lock, flags);
+#define IRQ_MASK_REGISTER(irq_data) (SOC_PMIC_IRQ_MASK_0_ADDR + \
+ (irqd_to_hwirq(irq_data) >> 3))
+#define IRQ_MASK_BIT(irq_data) BIT(irqd_to_hwirq(irq_data) & 0x07)
- return ret;
-}
-EXPORT_SYMBOL(hi6421_spmi_pmic_rmw);
+static const struct mfd_cell hi6421v600_devs[] = {
+ { .name = "hi6421v600-regulator", },
+};
-static irqreturn_t hi6421_spmi_irq_handler(int irq, void *data)
+static irqreturn_t hi6421_spmi_irq_handler(int irq, void *priv)
{
- struct hi6421_spmi_pmic *pmic = (struct hi6421_spmi_pmic *)data;
+ struct hi6421_spmi_pmic *ddata = (struct hi6421_spmi_pmic *)priv;
unsigned long pending;
+ unsigned int in;
int i, offset;
for (i = 0; i < HISI_IRQ_ARRAY; i++) {
- pending = hi6421_spmi_pmic_read(pmic, (i + SOC_PMIC_IRQ0_ADDR));
- pending &= HISI_MASK_FIELD;
- if (pending != 0)
- pr_debug("pending[%d]=0x%lx\n\r", i, pending);
-
- hi6421_spmi_pmic_write(pmic, (i + SOC_PMIC_IRQ0_ADDR), pending);
-
- /* solve powerkey order */
- if ((i == HISI_IRQ_KEY_NUM) &&
- ((pending & HISI_IRQ_KEY_VALUE) == HISI_IRQ_KEY_VALUE)) {
- generic_handle_irq(pmic->irqs[HISI_IRQ_KEY_DOWN]);
- generic_handle_irq(pmic->irqs[HISI_IRQ_KEY_UP]);
+ regmap_read(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, &in);
+ pending = HISI_MASK & in;
+ regmap_write(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, pending);
+
+ if (i == HISI_IRQ_KEY_NUM &&
+ (pending & HISI_IRQ_KEY_VALUE) == HISI_IRQ_KEY_VALUE) {
+ generic_handle_irq(ddata->irqs[POWERKEY_DOWN]);
+ generic_handle_irq(ddata->irqs[POWERKEY_UP]);
pending &= (~HISI_IRQ_KEY_VALUE);
}
- if (pending) {
- for_each_set_bit(offset, &pending, HISI_BITS)
- generic_handle_irq(pmic->irqs[offset + i * HISI_BITS]);
- }
+ if (!pending)
+ continue;
+
+ for_each_set_bit(offset, &pending, HISI_BITS)
+ generic_handle_irq(ddata->irqs[offset + i * HISI_BITS]);
}
return IRQ_HANDLED;
@@ -154,34 +110,38 @@ static irqreturn_t hi6421_spmi_irq_handler(int irq, void *data)
static void hi6421_spmi_irq_mask(struct irq_data *d)
{
- struct hi6421_spmi_pmic *pmic = irq_data_get_irq_chip_data(d);
- u32 data, offset;
+ struct hi6421_spmi_pmic *ddata = irq_data_get_irq_chip_data(d);
unsigned long flags;
+ unsigned int data;
+ u32 offset;
- offset = (irqd_to_hwirq(d) >> 3);
- offset += SOC_PMIC_IRQ_MASK_0_ADDR;
+ offset = IRQ_MASK_REGISTER(d);
- spin_lock_irqsave(&pmic->lock, flags);
- data = hi6421_spmi_pmic_read(pmic, offset);
- data |= (1 << (irqd_to_hwirq(d) & 0x07));
- hi6421_spmi_pmic_write(pmic, offset, data);
- spin_unlock_irqrestore(&pmic->lock, flags);
+ spin_lock_irqsave(&ddata->lock, flags);
+
+ regmap_read(ddata->regmap, offset, &data);
+ data |= IRQ_MASK_BIT(d);
+ regmap_write(ddata->regmap, offset, data);
+
+ spin_unlock_irqrestore(&ddata->lock, flags);
}
static void hi6421_spmi_irq_unmask(struct irq_data *d)
{
- struct hi6421_spmi_pmic *pmic = irq_data_get_irq_chip_data(d);
+ struct hi6421_spmi_pmic *ddata = irq_data_get_irq_chip_data(d);
u32 data, offset;
unsigned long flags;
offset = (irqd_to_hwirq(d) >> 3);
offset += SOC_PMIC_IRQ_MASK_0_ADDR;
- spin_lock_irqsave(&pmic->lock, flags);
- data = hi6421_spmi_pmic_read(pmic, offset);
+ spin_lock_irqsave(&ddata->lock, flags);
+
+ regmap_read(ddata->regmap, offset, &data);
data &= ~(1 << (irqd_to_hwirq(d) & 0x07));
- hi6421_spmi_pmic_write(pmic, offset, data);
- spin_unlock_irqrestore(&pmic->lock, flags);
+ regmap_write(ddata->regmap, offset, data);
+
+ spin_unlock_irqrestore(&ddata->lock, flags);
}
static struct irq_chip hi6421_spmi_pmu_irqchip = {
@@ -195,11 +155,11 @@ static struct irq_chip hi6421_spmi_pmu_irqchip = {
static int hi6421_spmi_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
- struct hi6421_spmi_pmic *pmic = d->host_data;
+ struct hi6421_spmi_pmic *ddata = d->host_data;
irq_set_chip_and_handler_name(virq, &hi6421_spmi_pmu_irqchip,
handle_simple_irq, "hisi");
- irq_set_chip_data(virq, pmic);
+ irq_set_chip_data(virq, ddata);
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
@@ -210,118 +170,111 @@ static const struct irq_domain_ops hi6421_spmi_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static void hi6421_spmi_pmic_irq_prc(struct hi6421_spmi_pmic *pmic)
+static void hi6421_spmi_pmic_irq_init(struct hi6421_spmi_pmic *ddata)
{
- int i, pending;
+ int i;
+ unsigned int pending;
- for (i = 0 ; i < HISI_IRQ_ARRAY; i++)
- hi6421_spmi_pmic_write(pmic, SOC_PMIC_IRQ_MASK_0_ADDR + i,
- HISI_MASK_STATE);
+ for (i = 0; i < HISI_IRQ_ARRAY; i++)
+ regmap_write(ddata->regmap, SOC_PMIC_IRQ_MASK_0_ADDR + i,
+ HISI_MASK);
- for (i = 0 ; i < HISI_IRQ_ARRAY; i++) {
- pending = hi6421_spmi_pmic_read(pmic, SOC_PMIC_IRQ0_ADDR + i);
-
- pr_debug("PMU IRQ address value:irq[0x%x] = 0x%x\n",
- SOC_PMIC_IRQ0_ADDR + i, pending);
- hi6421_spmi_pmic_write(pmic, SOC_PMIC_IRQ0_ADDR + i,
- HISI_MASK_STATE);
+ for (i = 0; i < HISI_IRQ_ARRAY; i++) {
+ regmap_read(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, &pending);
+ regmap_write(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i,
+ HISI_MASK);
}
}
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = HISI_BITS,
+ .max_register = 0xffff,
+ .fast_io = true
+};
+
static int hi6421_spmi_pmic_probe(struct spmi_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct hi6421_spmi_pmic *pmic;
+ struct hi6421_spmi_pmic *ddata;
unsigned int virq;
int ret, i;
- pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
return -ENOMEM;
- spin_lock_init(&pmic->lock);
+ ddata->regmap = devm_regmap_init_spmi_ext(pdev, &regmap_config);
+ if (IS_ERR(ddata->regmap))
+ return PTR_ERR(ddata->regmap);
- pmic->dev = dev;
+ spin_lock_init(&ddata->lock);
- pmic->gpio = of_get_gpio(np, 0);
- if (pmic->gpio < 0)
- return pmic->gpio;
+ ddata->dev = dev;
- if (!gpio_is_valid(pmic->gpio))
+ ddata->gpio = of_get_gpio(np, 0);
+ if (ddata->gpio < 0)
+ return ddata->gpio;
+
+ if (!gpio_is_valid(ddata->gpio))
return -EINVAL;
- ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN, "pmic");
+ ret = devm_gpio_request_one(dev, ddata->gpio, GPIOF_IN, "pmic");
if (ret < 0) {
- dev_err(dev, "failed to request gpio%d\n", pmic->gpio);
+ dev_err(dev, "Failed to request gpio%d\n", ddata->gpio);
return ret;
}
- pmic->irq = gpio_to_irq(pmic->gpio);
+ ddata->irq = gpio_to_irq(ddata->gpio);
- hi6421_spmi_pmic_irq_prc(pmic);
+ hi6421_spmi_pmic_irq_init(ddata);
- pmic->irqs = devm_kzalloc(dev, HISI_IRQ_NUM * sizeof(int), GFP_KERNEL);
- if (!pmic->irqs) {
- ret = -ENOMEM;
- goto irq_malloc;
- }
+ ddata->irqs = devm_kzalloc(dev, HISI_IRQ_NUM * sizeof(int), GFP_KERNEL);
+ if (!ddata->irqs)
+ return -ENOMEM;
- pmic->domain = irq_domain_add_simple(np, HISI_IRQ_NUM, 0,
- &hi6421_spmi_domain_ops, pmic);
- if (!pmic->domain) {
- dev_err(dev, "failed irq domain add simple!\n");
- ret = -ENODEV;
- goto irq_malloc;
+ ddata->domain = irq_domain_add_simple(np, HISI_IRQ_NUM, 0,
+ &hi6421_spmi_domain_ops, ddata);
+ if (!ddata->domain) {
+ dev_err(dev, "Failed to create IRQ domain\n");
+ return -ENODEV;
}
for (i = 0; i < HISI_IRQ_NUM; i++) {
- virq = irq_create_mapping(pmic->domain, i);
+ virq = irq_create_mapping(ddata->domain, i);
if (!virq) {
- dev_err(dev, "Failed mapping hwirq\n");
- ret = -ENOSPC;
- goto irq_malloc;
+ dev_err(dev, "Failed to map H/W IRQ\n");
+ return -ENOSPC;
}
- pmic->irqs[i] = virq;
- dev_dbg(dev, "%s: pmic->irqs[%d] = %d\n",
- __func__, i, pmic->irqs[i]);
+ ddata->irqs[i] = virq;
}
- ret = request_threaded_irq(pmic->irq, hi6421_spmi_irq_handler, NULL,
+ ret = request_threaded_irq(ddata->irq, hi6421_spmi_irq_handler, NULL,
IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_NO_SUSPEND,
- "pmic", pmic);
+ "pmic", ddata);
if (ret < 0) {
- dev_err(dev, "could not claim pmic IRQ: error %d\n", ret);
- goto irq_malloc;
+ dev_err(dev, "Failed to start IRQ handling thread: error %d\n",
+ ret);
+ return ret;
}
- dev_set_drvdata(&pdev->dev, pmic);
+ dev_set_drvdata(&pdev->dev, ddata);
- /*
- * The logic below will rely that the pmic is already stored at
- * drvdata.
- */
- dev_dbg(&pdev->dev, "SPMI-PMIC: adding children for %pOF\n",
- pdev->dev.of_node);
ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
hi6421v600_devs, ARRAY_SIZE(hi6421v600_devs),
NULL, 0, NULL);
- if (!ret)
- return 0;
-
- dev_err(dev, "Failed to add child devices: %d\n", ret);
-
-irq_malloc:
- free_irq(pmic->irq, pmic);
+ if (ret < 0)
+ dev_err(dev, "Failed to add child devices: %d\n", ret);
return ret;
}
static void hi6421_spmi_pmic_remove(struct spmi_device *pdev)
{
- struct hi6421_spmi_pmic *pmic = dev_get_drvdata(&pdev->dev);
+ struct hi6421_spmi_pmic *ddata = dev_get_drvdata(&pdev->dev);
- free_irq(pmic->irq, pmic);
+ free_irq(ddata->irq, ddata);
}
static const struct of_device_id pmic_spmi_id_table[] = {
diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
index 614b03c9ddfb..f6a14e9c3cbf 100644
--- a/drivers/staging/hikey9xx/hi6421v600-regulator.c
+++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
@@ -1,183 +1,148 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Device driver for regulators in Hisi IC
- *
- * Copyright (c) 2013 Linaro Ltd.
- * Copyright (c) 2011 Hisilicon.
- *
- * Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+//
+// Device driver for regulators in Hisi IC
+//
+// Copyright (c) 2013 Linaro Ltd.
+// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
+//
+// Guodong Xu <guodong.xu@linaro.org>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/io.h>
#include <linux/mfd/hi6421-spmi-pmic.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/spmi.h>
-#include <linux/time.h>
-#include <linux/uaccess.h>
-
-#define rdev_dbg(rdev, fmt, arg...) \
- pr_debug("%s: %s: " fmt, (rdev)->desc->name, __func__, ##arg)
-struct hi6421v600_regulator {
- struct regulator_desc rdesc;
+struct hi6421_spmi_reg_info {
+ struct regulator_desc desc;
struct hi6421_spmi_pmic *pmic;
- u32 eco_mode_mask, eco_uA;
+ u8 eco_mode_mask;
+ u32 eco_uA;
+
+ /* Serialize regulator enable logic */
+ struct mutex enable_mutex;
};
-static DEFINE_MUTEX(enable_mutex);
+static const unsigned int ldo3_voltages[] = {
+ 1500000, 1550000, 1600000, 1650000,
+ 1700000, 1725000, 1750000, 1775000,
+ 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 2000000
+};
-/*
- * helper function to ensure when it returns it is at least 'delay_us'
- * microseconds after 'since'.
- */
+static const unsigned int ldo4_voltages[] = {
+ 1725000, 1750000, 1775000, 1800000,
+ 1825000, 1850000, 1875000, 1900000
+};
-static int hi6421_spmi_regulator_is_enabled(struct regulator_dev *rdev)
-{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 reg_val;
+static const unsigned int ldo9_voltages[] = {
+ 1750000, 1800000, 1825000, 2800000,
+ 2850000, 2950000, 3000000, 3300000
+};
- reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->enable_reg);
+static const unsigned int ldo15_voltages[] = {
+ 1800000, 1850000, 2400000, 2600000,
+ 2700000, 2850000, 2950000, 3000000
+};
- rdev_dbg(rdev,
- "enable_reg=0x%x, val= 0x%x, enable_state=%d\n",
- rdev->desc->enable_reg,
- reg_val, (reg_val & rdev->desc->enable_mask));
+static const unsigned int ldo17_voltages[] = {
+ 2500000, 2600000, 2700000, 2800000,
+ 3000000, 3100000, 3200000, 3300000
+};
- return ((reg_val & rdev->desc->enable_mask) != 0);
-}
+static const unsigned int ldo34_voltages[] = {
+ 2600000, 2700000, 2800000, 2900000,
+ 3000000, 3100000, 3200000, 3300000
+};
+
+/**
+ * HI6421V600_LDO() - specify a LDO power line
+ * @_id: LDO id name string
+ * @vtable: voltage table
+ * @ereg: enable register
+ * @emask: enable mask
+ * @vreg: voltage select register
+ * @odelay: off/on delay time in uS
+ * @etime: enable time in uS
+ * @ecomask: eco mode mask
+ * @ecoamp: eco mode load uppler limit in uA
+ */
+#define HI6421V600_LDO(_id, vtable, ereg, emask, vreg, \
+ odelay, etime, ecomask, ecoamp) \
+ [HI6421V600_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &hi6421_spmi_ldo_rops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421V600_##_id, \
+ .owner = THIS_MODULE, \
+ .volt_table = vtable, \
+ .n_voltages = ARRAY_SIZE(vtable), \
+ .vsel_mask = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+ .vsel_reg = vreg, \
+ .enable_reg = ereg, \
+ .enable_mask = emask, \
+ .enable_time = etime, \
+ .ramp_delay = etime, \
+ .off_on_delay = odelay, \
+ }, \
+ .eco_mode_mask = ecomask, \
+ .eco_uA = ecoamp, \
+ }
static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ int ret;
/* cannot enable more than one regulator at one time */
- mutex_lock(&enable_mutex);
- usleep_range(HISI_REGS_ENA_PROTECT_TIME,
- HISI_REGS_ENA_PROTECT_TIME + 1000);
+ mutex_lock(&sreg->enable_mutex);
- /* set enable register */
- rdev_dbg(rdev,
- "off_on_delay=%d us, enable_reg=0x%x, enable_mask=0x%x\n",
- rdev->desc->off_on_delay, rdev->desc->enable_reg,
- rdev->desc->enable_mask);
+ ret = regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ rdev->desc->enable_mask);
- hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
- rdev->desc->enable_mask,
- rdev->desc->enable_mask);
+ /* Avoid powering up multiple devices at the same time */
+ usleep_range(rdev->desc->off_on_delay, rdev->desc->off_on_delay + 60);
- mutex_unlock(&enable_mutex);
+ mutex_unlock(&sreg->enable_mutex);
- return 0;
+ return ret;
}
static int hi6421_spmi_regulator_disable(struct regulator_dev *rdev)
{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
-
- /* set enable register to 0 */
- rdev_dbg(rdev, "enable_reg=0x%x, enable_mask=0x%x\n",
- rdev->desc->enable_reg, rdev->desc->enable_mask);
-
- hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
- rdev->desc->enable_mask, 0);
-
- return 0;
-}
-
-static int hi6421_spmi_regulator_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 reg_val, selector;
-
- /* get voltage selector */
- reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->vsel_reg);
-
- selector = (reg_val & rdev->desc->vsel_mask) >> (ffs(rdev->desc->vsel_mask) - 1);
- rdev_dbg(rdev,
- "vsel_reg=0x%x, value=0x%x, entry=0x%x, voltage=%d mV\n",
- rdev->desc->vsel_reg, reg_val, selector,
- rdev->desc->ops->list_voltage(rdev, selector) / 1000);
-
- return selector;
-}
-
-static int hi6421_spmi_regulator_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 reg_val;
-
- if (unlikely(selector >= rdev->desc->n_voltages))
- return -EINVAL;
-
- reg_val = selector << (ffs(rdev->desc->vsel_mask) - 1);
-
- /* set voltage selector */
- rdev_dbg(rdev,
- "vsel_reg=0x%x, mask=0x%x, value=0x%x, voltage=%d mV\n",
- rdev->desc->vsel_reg, rdev->desc->vsel_mask, reg_val,
- rdev->desc->ops->list_voltage(rdev, selector) / 1000);
-
- hi6421_spmi_pmic_rmw(pmic, rdev->desc->vsel_reg,
- rdev->desc->vsel_mask, reg_val);
-
- return 0;
+ return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, 0);
}
static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
struct hi6421_spmi_pmic *pmic = sreg->pmic;
- unsigned int mode;
u32 reg_val;
- reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->enable_reg);
+ regmap_read(pmic->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & sreg->eco_mode_mask)
- mode = REGULATOR_MODE_IDLE;
- else
- mode = REGULATOR_MODE_NORMAL;
-
- rdev_dbg(rdev,
- "enable_reg=0x%x, eco_mode_mask=0x%x, reg_val=0x%x, %s mode\n",
- rdev->desc->enable_reg, sreg->eco_mode_mask, reg_val,
- mode == REGULATOR_MODE_IDLE ? "idle" : "normal");
+ return REGULATOR_MODE_IDLE;
- return mode;
+ return REGULATOR_MODE_NORMAL;
}
static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
struct hi6421_spmi_pmic *pmic = sreg->pmic;
u32 val;
@@ -192,14 +157,8 @@ static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
return -EINVAL;
}
- /* set mode */
- rdev_dbg(rdev, "enable_reg=0x%x, eco_mode_mask=0x%x, value=0x%x\n",
- rdev->desc->enable_reg, sreg->eco_mode_mask, val);
-
- hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
- sreg->eco_mode_mask, val);
-
- return 0;
+ return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ sreg->eco_mode_mask, val);
}
static unsigned int
@@ -207,199 +166,84 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV,
int load_uA)
{
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- if (load_uA || ((unsigned int)load_uA > sreg->eco_uA))
+ if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA))
return REGULATOR_MODE_NORMAL;
return REGULATOR_MODE_IDLE;
}
-static int hi6421_spmi_dt_parse(struct platform_device *pdev,
- struct hi6421v600_regulator *sreg,
- struct regulator_desc *rdesc)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- unsigned int *v_table;
- int ret;
-
- ret = of_property_read_u32(np, "reg", &rdesc->enable_reg);
- if (ret) {
- dev_err(dev, "missing reg property\n");
- return ret;
- }
-
- ret = of_property_read_u32(np, "vsel-reg", &rdesc->vsel_reg);
- if (ret) {
- dev_err(dev, "missing vsel-reg property\n");
- return ret;
- }
-
- ret = of_property_read_u32(np, "enable-mask", &rdesc->enable_mask);
- if (ret) {
- dev_err(dev, "missing enable-mask property\n");
- return ret;
- }
-
- /*
- * Not all regulators work on idle mode
- */
- ret = of_property_read_u32(np, "idle-mode-mask", &sreg->eco_mode_mask);
- if (ret) {
- dev_dbg(dev, "LDO doesn't support economy mode.\n");
- sreg->eco_mode_mask = 0;
- sreg->eco_uA = 0;
- } else {
- ret = of_property_read_u32(np, "eco-microamp", &sreg->eco_uA);
- if (ret) {
- dev_err(dev, "missing eco-microamp property\n");
- return ret;
- }
- }
-
- /* parse .off-on-delay */
- ret = of_property_read_u32(np, "off-on-delay-us",
- &rdesc->off_on_delay);
- if (ret) {
- dev_err(dev, "missing off-on-delay-us property\n");
- return ret;
- }
-
- /* parse .enable_time */
- ret = of_property_read_u32(np, "startup-delay-us",
- &rdesc->enable_time);
- if (ret) {
- dev_err(dev, "missing startup-delay-us property\n");
- return ret;
- }
-
- /* FIXME: are there a better value for this? */
- rdesc->ramp_delay = rdesc->enable_time;
-
- /* parse volt_table */
-
- rdesc->n_voltages = of_property_count_u32_elems(np, "voltage-table");
-
- v_table = devm_kzalloc(dev, sizeof(unsigned int) * rdesc->n_voltages,
- GFP_KERNEL);
- if (unlikely(!v_table))
- return -ENOMEM;
- rdesc->volt_table = v_table;
-
- ret = of_property_read_u32_array(np, "voltage-table",
- v_table, rdesc->n_voltages);
- if (ret) {
- dev_err(dev, "missing voltage-table property\n");
- return ret;
- }
-
- /*
- * Instead of explicitly requiring a mask for the voltage selector,
- * as they all start from bit zero (at least on the known LDOs),
- * just use the number of voltages at the voltage table, getting the
- * minimal mask that would pick everything.
- */
- rdesc->vsel_mask = (1 << (fls(rdesc->n_voltages) - 1)) - 1;
-
- dev_dbg(dev, "voltage selector settings: reg: 0x%x, mask: 0x%x\n",
- rdesc->vsel_reg, rdesc->vsel_mask);
-
- return 0;
-}
-
static const struct regulator_ops hi6421_spmi_ldo_rops = {
- .is_enabled = hi6421_spmi_regulator_is_enabled,
+ .is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_spmi_regulator_enable,
.disable = hi6421_spmi_regulator_disable,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
- .get_voltage_sel = hi6421_spmi_regulator_get_voltage_sel,
- .set_voltage_sel = hi6421_spmi_regulator_set_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_spmi_regulator_get_mode,
.set_mode = hi6421_spmi_regulator_set_mode,
.get_optimum_mode = hi6421_spmi_regulator_get_optimum_mode,
};
-static int hi6421_spmi_regulator_probe_ldo(struct platform_device *pdev,
- struct device_node *np,
- struct hi6421_spmi_pmic *pmic)
-{
- struct regulation_constraints *constraint;
- struct regulator_init_data *initdata;
- struct regulator_config config = { };
- struct hi6421v600_regulator *sreg;
- struct device *dev = &pdev->dev;
- struct regulator_desc *rdesc;
- struct regulator_dev *rdev;
- const char *supplyname;
- int ret;
-
- initdata = of_get_regulator_init_data(dev, np, NULL);
- if (!initdata) {
- dev_err(dev, "failed to get regulator data\n");
- return -EINVAL;
- }
-
- sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
- if (!sreg)
- return -ENOMEM;
-
- sreg->pmic = pmic;
- rdesc = &sreg->rdesc;
-
- rdesc->name = initdata->constraints.name;
- rdesc->ops = &hi6421_spmi_ldo_rops;
- rdesc->type = REGULATOR_VOLTAGE;
- rdesc->min_uV = initdata->constraints.min_uV;
-
- supplyname = of_get_property(np, "supply_name", NULL);
- if (supplyname)
- initdata->supply_regulator = supplyname;
-
- /* parse device tree data for regulator specific */
- ret = hi6421_spmi_dt_parse(pdev, sreg, rdesc);
- if (ret)
- return ret;
-
- /* hisi regulator supports two modes */
- constraint = &initdata->constraints;
-
- constraint->valid_modes_mask = REGULATOR_MODE_NORMAL;
- if (sreg->eco_mode_mask) {
- constraint->valid_modes_mask |= REGULATOR_MODE_IDLE;
- constraint->valid_ops_mask |= REGULATOR_CHANGE_MODE;
- }
-
- config.dev = &pdev->dev;
- config.init_data = initdata;
- config.driver_data = sreg;
- config.of_node = pdev->dev.of_node;
-
- /* register regulator */
- rdev = regulator_register(rdesc, &config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "failed to register %s\n",
- rdesc->name);
- return PTR_ERR(rdev);
- }
-
- rdev_dbg(rdev, "valid_modes_mask: 0x%x, valid_ops_mask: 0x%x\n",
- constraint->valid_modes_mask, constraint->valid_ops_mask);
-
- dev_set_drvdata(dev, rdev);
+/* HI6421v600 regulators with known registers */
+enum hi6421_spmi_regulator_id {
+ HI6421V600_LDO3,
+ HI6421V600_LDO4,
+ HI6421V600_LDO9,
+ HI6421V600_LDO15,
+ HI6421V600_LDO16,
+ HI6421V600_LDO17,
+ HI6421V600_LDO33,
+ HI6421V600_LDO34,
+};
- return 0;
-}
+static struct hi6421_spmi_reg_info regulator_info[] = {
+ HI6421V600_LDO(LDO3, ldo3_voltages,
+ 0x16, 0x01, 0x51,
+ 20000, 120,
+ 0, 0),
+ HI6421V600_LDO(LDO4, ldo4_voltages,
+ 0x17, 0x01, 0x52,
+ 20000, 120,
+ 0x10, 10000),
+ HI6421V600_LDO(LDO9, ldo9_voltages,
+ 0x1c, 0x01, 0x57,
+ 20000, 360,
+ 0x10, 10000),
+ HI6421V600_LDO(LDO15, ldo15_voltages,
+ 0x21, 0x01, 0x5c,
+ 20000, 360,
+ 0x10, 10000),
+ HI6421V600_LDO(LDO16, ldo15_voltages,
+ 0x22, 0x01, 0x5d,
+ 20000, 360,
+ 0x10, 10000),
+ HI6421V600_LDO(LDO17, ldo17_voltages,
+ 0x23, 0x01, 0x5e,
+ 20000, 120,
+ 0x10, 10000),
+ HI6421V600_LDO(LDO33, ldo17_voltages,
+ 0x32, 0x01, 0x6d,
+ 20000, 120,
+ 0, 0),
+ HI6421V600_LDO(LDO34, ldo34_voltages,
+ 0x33, 0x01, 0x6e,
+ 20000, 120,
+ 0, 0),
+};
static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
{
struct device *pmic_dev = pdev->dev.parent;
- struct device_node *np = pmic_dev->of_node;
- struct device_node *regulators, *child;
- struct platform_device *new_pdev;
+ struct regulator_config config = { };
+ struct hi6421_spmi_reg_info *sreg;
+ struct hi6421_spmi_reg_info *info;
+ struct device *dev = &pdev->dev;
struct hi6421_spmi_pmic *pmic;
- int ret;
+ struct regulator_dev *rdev;
+ int i;
/*
* This driver is meant to be called by hi6421-spmi-core,
@@ -410,69 +254,46 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
if (WARN_ON(!pmic))
return -ENODEV;
- regulators = of_get_child_by_name(np, "regulators");
- if (!regulators) {
- dev_err(&pdev->dev, "regulator node not found\n");
- return -ENODEV;
- }
+ sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
+ if (!sreg)
+ return -ENOMEM;
- /*
- * Parse all LDO regulator nodes
- */
- for_each_child_of_node(regulators, child) {
- dev_dbg(&pdev->dev, "adding child %pOF\n", child);
+ sreg->pmic = pmic;
+ mutex_init(&sreg->enable_mutex);
- new_pdev = platform_device_alloc(child->name, -1);
- new_pdev->dev.parent = pmic_dev;
- new_pdev->dev.of_node = of_node_get(child);
+ for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
+ info = &regulator_info[i];
- ret = platform_device_add(new_pdev);
- if (ret < 0) {
- platform_device_put(new_pdev);
- continue;
- }
+ config.dev = pdev->dev.parent;
+ config.driver_data = sreg;
+ config.regmap = pmic->regmap;
- ret = hi6421_spmi_regulator_probe_ldo(new_pdev, child, pmic);
- if (ret < 0)
- platform_device_put(new_pdev);
+ rdev = devm_regulator_register(dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register %s\n",
+ info->desc.name);
+ return PTR_ERR(rdev);
+ }
}
- of_node_put(regulators);
-
- return 0;
-}
-
-static int hi6421_spmi_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = dev_get_drvdata(&pdev->dev);
- struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
-
- regulator_unregister(rdev);
-
- if (rdev->desc->volt_table)
- devm_kfree(&pdev->dev, (unsigned int *)rdev->desc->volt_table);
-
- kfree(sreg);
-
return 0;
}
-static const struct platform_device_id hi6421v600_regulator_table[] = {
+static const struct platform_device_id hi6421_spmi_regulator_table[] = {
{ .name = "hi6421v600-regulator" },
{},
};
-MODULE_DEVICE_TABLE(platform, hi6421v600_regulator_table);
+MODULE_DEVICE_TABLE(platform, hi6421_spmi_regulator_table);
-static struct platform_driver hi6421v600_regulator_driver = {
- .id_table = hi6421v600_regulator_table,
+static struct platform_driver hi6421_spmi_regulator_driver = {
+ .id_table = hi6421_spmi_regulator_table,
.driver = {
- .name = "hi6421v600-regulator",
+ .name = "hi6421v600-regulator",
},
.probe = hi6421_spmi_regulator_probe,
- .remove = hi6421_spmi_regulator_remove,
};
-module_platform_driver(hi6421v600_regulator_driver);
+module_platform_driver(hi6421_spmi_regulator_driver);
-MODULE_DESCRIPTION("Hi6421v600 regulator driver");
+MODULE_DESCRIPTION("Hi6421v600 SPMI regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
index 80e74c261e05..3b23ad56b31a 100644
--- a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
+++ b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
@@ -55,52 +55,13 @@ properties:
$ref: "/schemas/regulator/regulator.yaml#"
- properties:
- reg:
- description: Enable register.
-
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
- vsel-reg:
- description: Voltage selector register.
-
- enable-mask:
- description: Bitmask used to enable the regulator.
-
- voltage-table:
- description: Table with the selector items for the voltage regulator.
- minItems: 2
- maxItems: 16
-
- off-on-delay-us:
- description: Time required for changing state to enabled in microseconds.
-
- startup-delay-us:
- description: Startup time in microseconds.
-
- idle-mode-mask:
- description: Bitmask used to put the regulator on idle mode.
-
- eco-microamp:
- description: Maximum current while on idle mode.
-
- required:
- - reg
- - vsel-reg
- - enable-mask
- - voltage-table
- - off-on-delay-us
- - startup-delay-us
-
required:
- compatible
- reg
- regulators
+additionalProperties: false
+
examples:
- |
/* pmic properties */
@@ -117,43 +78,58 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- ldo3: ldo3@16 {
- reg = <0x16>;
- vsel-reg = <0x51>;
-
+ ldo3: LDO3 {
regulator-name = "ldo3";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <2000000>;
regulator-boot-on;
-
- enable-mask = <0x01>;
-
- voltage-table = <1500000>, <1550000>, <1600000>, <1650000>,
- <1700000>, <1725000>, <1750000>, <1775000>,
- <1800000>, <1825000>, <1850000>, <1875000>,
- <1900000>, <1925000>, <1950000>, <2000000>;
- off-on-delay-us = <20000>;
- startup-delay-us = <120>;
};
- ldo4: ldo4@17 { /* 40 PIN */
- reg = <0x17>;
- vsel-reg = <0x52>;
-
+ ldo4: LDO4 {
regulator-name = "ldo4";
regulator-min-microvolt = <1725000>;
regulator-max-microvolt = <1900000>;
regulator-boot-on;
+ };
+
+ ldo9: LDO9 {
+ regulator-name = "ldo9";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ ldo15: LDO15 {
+ regulator-name = "ldo15";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+
+ ldo16: LDO16 {
+ regulator-name = "ldo16";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ };
- enable-mask = <0x01>;
- idle-mode-mask = <0x10>;
- eco-microamp = <10000>;
+ ldo17: LDO17 {
+ regulator-name = "ldo17";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo33: LDO33 {
+ regulator-name = "ldo33";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
- hi6421-vsel = <0x52 0x07>;
- voltage-table = <1725000>, <1750000>, <1775000>, <1800000>,
- <1825000>, <1850000>, <1875000>, <1900000>;
- off-on-delay-us = <20000>;
- startup-delay-us = <120>;
+ ldo34: LDO34 {
+ regulator-name = "ldo34";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
};
};
};
diff --git a/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml b/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
index f2a56fa4e78e..21f68a9c2df1 100644
--- a/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
+++ b/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
@@ -26,14 +26,22 @@ properties:
reg:
maxItems: 1
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 0
+
spmi-channel:
description: |
number of the Kirin 970 SPMI channel where the SPMI devices are connected.
required:
- - compatible
- - reg
- - spmi-channel
+ - compatible
+ - reg
+ - spmi-channel
+ - "#address-cells"
+ - "#size-cells"
patternProperties:
"^pmic@[0-9a-f]$":
@@ -43,6 +51,8 @@ patternProperties:
are documented at
drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml.
+additionalProperties: false
+
examples:
- |
bus {
@@ -51,11 +61,14 @@ examples:
spmi: spmi@fff24000 {
compatible = "hisilicon,kirin970-spmi-controller";
+ #address-cells = <2>;
+ #size-cells = <0>;
status = "ok";
reg = <0x0 0xfff24000 0x0 0x1000>;
spmi-channel = <2>;
pmic@0 {
+ reg = <0 0>;
/* pmic properties */
};
};
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.c b/drivers/staging/hikey9xx/phy-hi3670-usb3.c
index 4fc013911a78..e7e579ce0302 100644
--- a/drivers/staging/hikey9xx/phy-hi3670-usb3.c
+++ b/drivers/staging/hikey9xx/phy-hi3670-usb3.c
@@ -8,6 +8,7 @@
* Authors: Yu Chen <chenyu56@huawei.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -41,15 +42,15 @@
#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
#define USB3OTG_CTRL0 (0x00)
-#define USB3OTG_CTRL3 (0x0C)
+#define USB3OTG_CTRL3 (0x0c)
#define USB3OTG_CTRL4 (0x10)
#define USB3OTG_CTRL5 (0x14)
-#define USB3OTG_CTRL7 (0x1C)
+#define USB3OTG_CTRL7 (0x1c)
#define USB_MISC_CFG50 (0x50)
#define USB_MISC_CFG54 (0x54)
#define USB_MISC_CFG58 (0x58)
-#define USB_MISC_CFG5C (0x5C)
-#define USB_MISC_CFGA0 (0xA0)
+#define USB_MISC_CFG5C (0x5c)
+#define USB_MISC_CFGA0 (0xa0)
#define TCA_CLK_RST (0x200)
#define TCA_INTR_EN (0x204)
#define TCA_INTR_STS (0x208)
@@ -66,14 +67,14 @@
#define CTRL5_USB2_SIDDQ BIT(0)
-#define CTRL7_USB2_REFCLKSEL_MASK (3 << 3)
-#define CTRL7_USB2_REFCLKSEL_ABB (3 << 3)
-#define CTRL7_USB2_REFCLKSEL_PAD (2 << 3)
+#define CTRL7_USB2_REFCLKSEL_MASK GENMASK(4, 3)
+#define CTRL7_USB2_REFCLKSEL_ABB (BIT(4) | BIT(3))
+#define CTRL7_USB2_REFCLKSEL_PAD BIT(4)
#define CFG50_USB3_PHY_TEST_POWERDOWN BIT(23)
-#define CFG54_USB31PHY_CR_ADDR_MASK (0xFFFF)
-#define CFG54_USB31PHY_CR_ADDR_SHIFT (16)
+#define CFG54_USB31PHY_CR_ADDR_MASK GENMASK(31, 16)
+
#define CFG54_USB3PHY_REF_USE_PAD BIT(12)
#define CFG54_PHY0_PMA_PWR_STABLE BIT(11)
#define CFG54_PHY0_PCS_PWR_STABLE BIT(9)
@@ -84,8 +85,7 @@
#define CFG54_USB31PHY_CR_CLK BIT(2)
#define CFG54_USB3_PHY0_ANA_PWR_EN BIT(1)
-#define CFG58_USB31PHY_CR_DATA_MASK (0xFFFF)
-#define CFG58_USB31PHY_CR_DATA_RD_START (16)
+#define CFG58_USB31PHY_CR_DATA_MASK GENMASK(31, 16)
#define CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN BIT(1)
@@ -102,20 +102,20 @@
#define CLK_RST_SUSPEND_CLK_EN BIT(0)
#define GCFG_ROLE_HSTDEV BIT(4)
-#define GCFG_OP_MODE (3 << 0)
+#define GCFG_OP_MODE GENMASK(1, 0)
#define GCFG_OP_MODE_CTRL_SYNC_MODE BIT(0)
#define TCPC_VALID BIT(4)
#define TCPC_LOW_POWER_EN BIT(3)
-#define TCPC_MUX_CONTROL_MASK (3 << 0)
+#define TCPC_MUX_CONTROL_MASK GENMASK(1, 0)
#define TCPC_MUX_CONTROL_USB31 BIT(0)
#define SYSMODE_CFG_TYPEC_DISABLE BIT(3)
-#define VBUS_CTRL_POWERPRESENT_OVERRD (3 << 2)
-#define VBUS_CTRL_VBUSVALID_OVERRD (3 << 0)
+#define VBUS_CTRL_POWERPRESENT_OVERRD GENMASK(3, 2)
+#define VBUS_CTRL_VBUSVALID_OVERRD GENMASK(1, 0)
-#define KIRIN970_USB_DEFAULT_PHY_PARAM (0xFDFEE4)
+#define KIRIN970_USB_DEFAULT_PHY_PARAM (0xfdfee4)
#define KIRIN970_USB_DEFAULT_PHY_VBOOST (0x5)
#define TX_VBOOST_LVL_REG (0xf)
@@ -162,16 +162,14 @@ static int hi3670_phy_cr_set_sel(struct regmap *usb31misc)
static int hi3670_phy_cr_start(struct regmap *usb31misc, int direction)
{
- int ret;
+ int ret, reg;
if (direction)
- ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
- CFG54_USB31PHY_CR_WR_EN,
- CFG54_USB31PHY_CR_WR_EN);
+ reg = CFG54_USB31PHY_CR_WR_EN;
else
- ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
- CFG54_USB31PHY_CR_RD_EN,
- CFG54_USB31PHY_CR_RD_EN);
+ reg = CFG54_USB31PHY_CR_RD_EN;
+
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54, reg, reg);
if (ret)
return ret;
@@ -180,16 +178,14 @@ static int hi3670_phy_cr_start(struct regmap *usb31misc, int direction)
if (ret)
return ret;
- ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
- CFG54_USB31PHY_CR_RD_EN | CFG54_USB31PHY_CR_WR_EN, 0);
-
- return ret;
+ return regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_RD_EN | CFG54_USB31PHY_CR_WR_EN, 0);
}
static int hi3670_phy_cr_wait_ack(struct regmap *usb31misc)
{
u32 reg;
- int retry = 100000;
+ int retry = 10;
int ret;
while (retry-- > 0) {
@@ -202,6 +198,8 @@ static int hi3670_phy_cr_wait_ack(struct regmap *usb31misc)
ret = hi3670_phy_cr_clk(usb31misc);
if (ret)
return ret;
+
+ usleep_range(10, 20);
}
return -ETIMEDOUT;
@@ -216,9 +214,9 @@ static int hi3670_phy_cr_set_addr(struct regmap *usb31misc, u32 addr)
if (ret)
return ret;
- reg &= ~(CFG54_USB31PHY_CR_ADDR_MASK << CFG54_USB31PHY_CR_ADDR_SHIFT);
- reg |= ((addr & CFG54_USB31PHY_CR_ADDR_MASK) << CFG54_USB31PHY_CR_ADDR_SHIFT);
- ret = regmap_write(usb31misc, USB_MISC_CFG54, reg);
+ reg = FIELD_PREP(CFG54_USB31PHY_CR_ADDR_MASK, addr);
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_ADDR_MASK, reg);
return ret;
}
@@ -255,8 +253,7 @@ static int hi3670_phy_cr_read(struct regmap *usb31misc, u32 addr, u32 *val)
if (ret)
return ret;
- *val = (reg >> CFG58_USB31PHY_CR_DATA_RD_START) &
- CFG58_USB31PHY_CR_DATA_MASK;
+ *val = FIELD_GET(CFG58_USB31PHY_CR_DATA_MASK, reg);
return 0;
}
@@ -281,7 +278,7 @@ static int hi3670_phy_cr_write(struct regmap *usb31misc, u32 addr, u32 val)
return ret;
ret = regmap_write(usb31misc, USB_MISC_CFG58,
- val & CFG58_USB31PHY_CR_DATA_MASK);
+ FIELD_PREP(CFG58_USB31PHY_CR_DATA_MASK, val));
if (ret)
return ret;
@@ -329,24 +326,24 @@ static int hi3670_phy_set_params(struct hi3670_priv *priv)
return ret;
}
-static int hi3670_is_abbclk_seleted(struct hi3670_priv *priv)
+static bool hi3670_is_abbclk_selected(struct hi3670_priv *priv)
{
u32 reg;
if (!priv->sctrl) {
dev_err(priv->dev, "priv->sctrl is null!\n");
- return 1;
+ return false;
}
if (regmap_read(priv->sctrl, SCTRL_SCDEEPSLEEPED, &reg)) {
dev_err(priv->dev, "SCTRL_SCDEEPSLEEPED read failed!\n");
- return 1;
+ return false;
}
if ((reg & USB_CLK_SELECTED) == 0)
- return 1;
+ return false;
- return 0;
+ return true;
}
static int hi3670_config_phy_clock(struct hi3670_priv *priv)
@@ -354,7 +351,7 @@ static int hi3670_config_phy_clock(struct hi3670_priv *priv)
u32 val, mask;
int ret;
- if (hi3670_is_abbclk_seleted(priv)) {
+ if (!hi3670_is_abbclk_selected(priv)) {
/* usb refclk iso disable */
ret = regmap_write(priv->peri_crg, PERI_CRG_ISODIS,
USB_REFCLK_ISO_EN);
@@ -571,7 +568,7 @@ static int hi3670_phy_exit(struct phy *phy)
if (ret)
goto out;
- if (hi3670_is_abbclk_seleted(priv)) {
+ if (!hi3670_is_abbclk_selected(priv)) {
/* disable usb_tcxo_en */
ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3,
USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START);
@@ -588,7 +585,7 @@ out:
return ret;
}
-static struct phy_ops hi3670_phy_ops = {
+static const struct phy_ops hi3670_phy_ops = {
.init = hi3670_phy_init,
.exit = hi3670_phy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml b/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
index 125a5d6546ae..ebd78acfe2de 100644
--- a/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
+++ b/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
@@ -8,6 +8,7 @@ title: Hisilicon Kirin970 USB PHY
maintainers:
- Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
description: |+
Bindings for USB3 PHY on HiSilicon Kirin 970.
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index e8996b1c3b35..ca59986b20f8 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -20,8 +20,6 @@ menuconfig STAGING_MEDIA
if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
-source "drivers/staging/media/allegro-dvt/Kconfig"
-
source "drivers/staging/media/atomisp/Kconfig"
source "drivers/staging/media/hantro/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 24b5873ff760..716929a1a313 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
diff --git a/drivers/staging/media/allegro-dvt/Kconfig b/drivers/staging/media/allegro-dvt/Kconfig
deleted file mode 100644
index 6b7107d9995c..000000000000
--- a/drivers/staging/media/allegro-dvt/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config VIDEO_ALLEGRO_DVT
- tristate "Allegro DVT Video IP Core"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_ZYNQMP || COMPILE_TEST
- select V4L2_MEM2MEM_DEV
- select VIDEOBUF2_DMA_CONTIG
- select REGMAP
- select REGMAP_MMIO
- help
- Support for the encoder video IP core by Allegro DVT. This core is
- found for example on the Xilinx ZynqMP SoC in the EV family and is
- called VCU in the reference manual.
-
- To compile this driver as a module, choose M here: the module
- will be called allegro.
diff --git a/drivers/staging/media/allegro-dvt/Makefile b/drivers/staging/media/allegro-dvt/Makefile
deleted file mode 100644
index 8e306dcdc55c..000000000000
--- a/drivers/staging/media/allegro-dvt/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-allegro-objs := allegro-core.o nal-h264.o allegro-mail.o
-
-obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o
diff --git a/drivers/staging/media/allegro-dvt/TODO b/drivers/staging/media/allegro-dvt/TODO
deleted file mode 100644
index 99e19be0e45a..000000000000
--- a/drivers/staging/media/allegro-dvt/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-TODO:
-
-- This driver is waiting for the stateful encoder spec and corresponding
- v4l2-compliance tests to be finalized.
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 5a5121d958ed..8c65733e0255 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -22,7 +22,6 @@
#include <asm/processor.h>
#include <linux/i2c.h>
-#include <linux/sfi.h>
#include <media/v4l2-subdev.h>
#include "atomisp.h"
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..2ef5f44e4b6b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -349,12 +349,20 @@ static int isp_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
}
-static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK",
- "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
- "ATOMISP_SUBDEV_PAD_SOURCE_VF",
- "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
- "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"
- };
+static const char *atomisp_pad_str(unsigned int pad)
+{
+ static const char *const pad_str[] = {
+ "ATOMISP_SUBDEV_PAD_SINK",
+ "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VF",
+ "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO",
+ };
+
+ if (pad >= ARRAY_SIZE(pad_str))
+ return "ATOMISP_INVALID_PAD";
+ return pad_str[pad];
+}
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
@@ -378,7 +386,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
dev_dbg(isp->dev,
"sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
- atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP
+ atomisp_pad_str(pad), target == V4L2_SEL_TGT_CROP
? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
r->left, r->top, r->width, r->height,
which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
@@ -612,7 +620,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
enum atomisp_input_stream_id stream_id;
dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
- atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code,
+ atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
: "V4L2_SUBDEV_FORMAT_ACTIVE");
@@ -1062,26 +1070,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index e0eaff0f8a22..6a5ee4607089 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -269,7 +269,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
hmm_set(bo->start, 0, bytes);
dev_dbg(atomisp_dev,
- "%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
+ "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
__func__, bo->start, bytes, type, from_highmem, userptr, cached);
return bo->start;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
index edab72256494..e5e2f6fb37e0 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_firmware.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
@@ -20,6 +20,7 @@
* This file contains firmware loading/unloading support functionality
*/
+#include <linux/device.h>
#include "ia_css_err.h"
#include "ia_css_env.h"
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
index b4813cd50daa..4a18da6bf0c1 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
@@ -368,6 +368,7 @@ static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
break;
case IA_CSS_CSI2_COMPRESSION_TYPE_2:
predictor = MIPI_PREDICTOR_TYPE2 - 1;
+ break;
default:
break;
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
index 24fc497bd491..9fad28b97201 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -949,7 +949,7 @@ sh_css_set_black_frame(struct ia_css_stream *stream,
params = stream->isp_params_configs;
height = raw_black_frame->info.res.height;
- width = raw_black_frame->info.padded_width,
+ width = raw_black_frame->info.padded_width;
ptr = raw_black_frame->data
+ raw_black_frame->planes.raw.offset;
@@ -1442,8 +1442,8 @@ static int sh_css_params_default_morph_table(
IA_CSS_ENTER_PRIVATE("");
- step = (ISP_VEC_NELEMS / 16) * 128,
- width = binary->morph_tbl_width,
+ step = (ISP_VEC_NELEMS / 16) * 128;
+ width = binary->morph_tbl_width;
height = binary->morph_tbl_height;
tab = ia_css_morph_table_allocate(width, height);
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index b668a82d40ad..1bc118e375a1 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -316,7 +316,7 @@ hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
fmt->pixelformat = vpu_fmt->fourcc;
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_JPEG,
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
@@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
raw_fmt->width = encoded_fmt->width;
- raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
if (ctx->is_encoder)
hantro_set_fmt_out(ctx, raw_fmt);
else
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index f555aac8a9d5..15322dc3124a 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_IMX_MEDIA
tristate "i.MX5/6 V4L2 media core driver"
depends on ARCH_MXC || COMPILE_TEST
- depends on VIDEO_V4L2 && IMX_IPUV3_CORE
+ depends on VIDEO_V4L2
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
depends on HAS_DMA
@@ -14,21 +14,22 @@ config VIDEO_IMX_MEDIA
driver for the i.MX5/6 SOC.
if VIDEO_IMX_MEDIA
-menu "i.MX5/6/7 Media Sub devices"
+menu "i.MX5/6/7/8 Media Sub devices"
config VIDEO_IMX_CSI
tristate "i.MX5/6 Camera Sensor Interface driver"
depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
+ depends on IMX_IPUV3_CORE
default y
help
A video4linux camera sensor interface driver for i.MX5/6.
config VIDEO_IMX7_CSI
- tristate "i.MX6UL/L / i.MX7 Camera Sensor Interface driver"
+ tristate "i.MX6UL/L / i.MX7 / i.MX8M Camera Sensor Interface driver"
depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
default y
help
Enable support for video4linux camera sensor interface driver for
- i.MX6UL/L or i.MX7.
+ i.MX6UL/L, i.MX7 or i.MX8M.
endmenu
endif
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 9bd9e873ba7c..69cc5da04a2e 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -8,9 +8,9 @@ imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media.o
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index c1931eb2540e..e10ce103a5b4 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -816,14 +816,8 @@ void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
struct capture_priv *priv = to_capture_priv(vdev);
struct video_device *vfd = priv->vdev.vfd;
- mutex_lock(&priv->mutex);
-
- if (video_is_registered(vfd)) {
- video_unregister_device(vfd);
- media_entity_cleanup(&vfd->entity);
- }
-
- mutex_unlock(&priv->mutex);
+ media_entity_cleanup(&vfd->entity);
+ video_unregister_device(vfd);
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index fab1155a5958..63a0204502a8 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -869,11 +869,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
struct video_device *vfd = priv->vdev.vfd;
- mutex_lock(&priv->mutex);
-
video_unregister_device(vfd);
-
- mutex_unlock(&priv->mutex);
}
struct imx_media_video_dev *
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index db77fef07654..ef5add079774 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1922,19 +1922,13 @@ static int imx_csi_async_register(struct csi_priv *priv)
port, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- fwnode_handle_put(ep);
- return -ENOMEM;
- }
-
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &priv->notifier, ep, asd);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &priv->notifier, ep, struct v4l2_async_subdev);
fwnode_handle_put(ep);
- if (ret) {
- kfree(asd);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
/* OK if asd already exists */
if (ret != -EEXIST)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 6d2205461e56..338b8bd0bb07 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
if (IS_ERR(imxmd->m2m_vdev)) {
ret = PTR_ERR(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
goto unlock;
}
@@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+ if (imxmd->m2m_vdev) {
+ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
+ }
+
v4l2_async_notifier_unregister(&imxmd->notifier);
imx_media_unregister_ipu_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
- imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 82e13e972e23..b677cf0e0c84 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -31,7 +31,7 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
/* add CSI fwnode to async notifier */
asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
of_fwnode_handle(csi_np),
- sizeof(*asd));
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
if (ret == -EEXIST)
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 94d87d27d389..4f8fcc91aaae 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -42,7 +42,10 @@ struct csi2_dev {
struct clk *pllref_clk;
struct clk *pix_clk; /* what is this? */
void __iomem *base;
- struct v4l2_fwnode_bus_mipi_csi2 bus;
+
+ struct v4l2_subdev *remote;
+ unsigned int remote_pad;
+ unsigned short data_lanes;
/* lock to protect all members below */
struct mutex lock;
@@ -138,10 +141,8 @@ static void csi2_enable(struct csi2_dev *csi2, bool enable)
}
}
-static void csi2_set_lanes(struct csi2_dev *csi2)
+static void csi2_set_lanes(struct csi2_dev *csi2, unsigned int lanes)
{
- int lanes = csi2->bus.num_data_lanes;
-
writel(lanes - 1, csi2->base + CSI2_N_LANES);
}
@@ -250,13 +251,12 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
}
/* Waits for low-power LP-11 state on data and clock lanes. */
-static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2, unsigned int lanes)
{
u32 mask, reg;
int ret;
- mask = PHY_STOPSTATECLK | (((1 << csi2->bus.num_data_lanes) - 1) <<
- PHY_STOPSTATEDATA_BIT);
+ mask = PHY_STOPSTATECLK | (((1 << lanes) - 1) << PHY_STOPSTATEDATA_BIT);
ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
(reg & mask) == mask, 0, 500000);
@@ -300,8 +300,65 @@ static void csi2ipu_gasket_init(struct csi2_dev *csi2)
writel(reg, csi2->base + CSI2IPU_GASKET);
}
+static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ unsigned int num_lanes = UINT_MAX;
+ int ret;
+
+ *lanes = csi2->data_lanes;
+
+ ret = v4l2_subdev_call(csi2->remote, pad, get_mbus_config,
+ csi2->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(csi2->dev, "No remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(csi2->dev, "Failed to get remote mbus configuration\n");
+ return ret;
+ }
+
+ if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(csi2->dev, "Unsupported media bus type %u\n",
+ mbus_config.type);
+ return -EINVAL;
+ }
+
+ switch (mbus_config.flags & V4L2_MBUS_CSI2_LANES) {
+ case V4L2_MBUS_CSI2_1_LANE:
+ num_lanes = 1;
+ break;
+ case V4L2_MBUS_CSI2_2_LANE:
+ num_lanes = 2;
+ break;
+ case V4L2_MBUS_CSI2_3_LANE:
+ num_lanes = 3;
+ break;
+ case V4L2_MBUS_CSI2_4_LANE:
+ num_lanes = 4;
+ break;
+ default:
+ num_lanes = csi2->data_lanes;
+ break;
+ }
+
+ if (num_lanes > csi2->data_lanes) {
+ dev_err(csi2->dev,
+ "Unsupported mbus config: too many data lanes %u\n",
+ num_lanes);
+ return -EINVAL;
+ }
+
+ *lanes = num_lanes;
+
+ return 0;
+}
+
static int csi2_start(struct csi2_dev *csi2)
{
+ unsigned int lanes;
int ret;
ret = clk_prepare_enable(csi2->pix_clk);
@@ -316,12 +373,16 @@ static int csi2_start(struct csi2_dev *csi2)
if (ret)
goto err_disable_clk;
+ ret = csi2_get_active_lanes(csi2, &lanes);
+ if (ret)
+ goto err_disable_clk;
+
/* Step 4 */
- csi2_set_lanes(csi2);
+ csi2_set_lanes(csi2, lanes);
csi2_enable(csi2, true);
/* Step 5 */
- csi2_dphy_wait_stopstate(csi2);
+ csi2_dphy_wait_stopstate(csi2, lanes);
/* Step 6 */
ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
@@ -544,12 +605,35 @@ static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
{
struct csi2_dev *csi2 = notifier_to_dev(notifier);
struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&sd->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(csi2->dev, "Failed to find pad for %s\n", sd->name);
+ return pad;
+ }
+
+ csi2->remote = sd;
+ csi2->remote_pad = pad;
+
+ dev_dbg(csi2->dev, "Bound %s pad: %d\n", sd->name, pad);
return v4l2_create_fwnode_links_to_pad(sd, sink);
}
+static void csi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+
+ csi2->remote = NULL;
+}
+
static const struct v4l2_async_notifier_operations csi2_notify_ops = {
.bound = csi2_notify_bound,
+ .unbind = csi2_notify_unbind,
};
static int csi2_async_register(struct csi2_dev *csi2)
@@ -557,7 +641,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd = NULL;
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *ep;
int ret;
@@ -572,24 +656,18 @@ static int csi2_async_register(struct csi2_dev *csi2)
if (ret)
goto err_parse;
- csi2->bus = vep.bus.mipi_csi2;
-
- dev_dbg(csi2->dev, "data lanes: %d\n", csi2->bus.num_data_lanes);
- dev_dbg(csi2->dev, "flags: 0x%08x\n", csi2->bus.flags);
+ csi2->data_lanes = vep.bus.mipi_csi2.num_data_lanes;
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- ret = -ENOMEM;
- goto err_parse;
- }
-
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi2->notifier, ep, asd);
- if (ret)
- goto err_parse;
+ dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
+ dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &csi2->notifier, ep, struct v4l2_async_subdev);
fwnode_handle_put(ep);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
csi2->notifier.ops = &csi2_notify_ops;
ret = v4l2_async_subdev_notifier_register(&csi2->sd,
@@ -601,7 +679,6 @@ static int csi2_async_register(struct csi2_dev *csi2)
err_parse:
fwnode_handle_put(ep);
- kfree(asd);
return ret;
}
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index a3f3df901704..3046f880c014 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -499,6 +499,7 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sink_fmt)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct media_entity *src;
struct media_pad *pad;
int ret;
@@ -509,11 +510,21 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
if (!csi->src_sd)
return -EPIPE;
+ src = &csi->src_sd->entity;
+
/*
- * find the entity that is selected by the CSI mux. This is needed
+ * if the source is neither a CSI MUX or CSI-2 get the one directly
+ * upstream from this CSI
+ */
+ if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
+ src->function != MEDIA_ENT_F_VID_MUX)
+ src = &csi->sd.entity;
+
+ /*
+ * find the entity that is selected by the source. This is needed
* to distinguish between a parallel or CSI-2 pipeline.
*/
- pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true);
+ pad = imx_media_pipeline_pad(src, 0, 0, true);
if (!pad)
return -ENODEV;
@@ -1164,12 +1175,12 @@ static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
- /* The bound subdev must always be the CSI mux */
- if (WARN_ON(sd->entity.function != MEDIA_ENT_F_VID_MUX))
- return -ENXIO;
-
- /* Mark it as such via its group id */
- sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+ /*
+ * If the subdev is a video mux, it must be one of the CSI
+ * muxes. Mark it as such via its group id.
+ */
+ if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
+ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
return v4l2_create_fwnode_links_to_pad(sd, sink);
}
@@ -1180,7 +1191,7 @@ static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
static int imx7_csi_async_register(struct imx7_csi *csi)
{
- struct v4l2_async_subdev *asd = NULL;
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *ep;
int ret;
@@ -1189,19 +1200,13 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- fwnode_handle_put(ep);
- return -ENOMEM;
- }
-
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi->notifier, ep, asd);
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &csi->notifier, ep, struct v4l2_async_subdev);
fwnode_handle_put(ep);
- if (ret) {
- kfree(asd);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
/* OK if asd already exists */
if (ret != -EEXIST)
return ret;
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 7612993cc1d6..a01a7364b4b9 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -1004,7 +1004,7 @@ static int mipi_csis_async_register(struct csi_state *state)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd = NULL;
+ struct v4l2_async_subdev *asd;
struct fwnode_handle *ep;
int ret;
@@ -1024,17 +1024,13 @@ static int mipi_csis_async_register(struct csi_state *state)
dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- ret = -ENOMEM;
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &state->notifier, ep, struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
goto err_parse;
}
- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
- &state->notifier, ep, asd);
- if (ret)
- goto err_parse;
-
fwnode_handle_put(ep);
state->notifier.ops = &mipi_csis_notify_ops;
@@ -1048,7 +1044,6 @@ static int mipi_csis_async_register(struct csi_state *state)
err_parse:
fwnode_handle_put(ep);
- kfree(asd);
return ret;
}
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 4dc8d9165f63..60aa02eb7d2a 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -773,9 +773,6 @@ static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
pixm->pixelformat = fmt->fourcc;
- memset(pixm->plane_fmt[0].reserved, 0,
- sizeof(pixm->plane_fmt[0].reserved));
-
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index e06ea7ea1e50..dae9073e7d3c 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1349,4 +1349,3 @@ module_platform_driver(iss_driver);
MODULE_DESCRIPTION("TI OMAP4 ISS driver");
MODULE_AUTHOR("Sergio Aguirre <sergio.a.aguirre@gmail.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ISS_VIDEO_DRIVER_VERSION);
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 8b3dd92021e1..526281bf0051 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -18,7 +18,6 @@
#include <media/videobuf2-dma-contig.h>
#define ISS_VIDEO_DRIVER_NAME "issvideo"
-#define ISS_VIDEO_DRIVER_VERSION "0.0.2"
struct iss_device;
struct iss_video;
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index aa4f8c287618..d3eb81ee8dc2 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -143,7 +143,7 @@ static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f,
memset(f, 0, sizeof(*f));
f->fmt.pix_mp.pixelformat = fourcc;
f->fmt.pix_mp.field = V4L2_FIELD_NONE;
- f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709,
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index ddad5d274ee8..7bd9291c8d5f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -34,56 +34,48 @@ static const struct cedrus_control cedrus_controls[] = {
.id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
},
.codec = CEDRUS_CODEC_MPEG2,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
},
.codec = CEDRUS_CODEC_MPEG2,
- .required = false,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
},
.codec = CEDRUS_CODEC_H264,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
},
.codec = CEDRUS_CODEC_H264,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SPS,
},
.codec = CEDRUS_CODEC_H264,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_PPS,
},
.codec = CEDRUS_CODEC_H264,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
},
.codec = CEDRUS_CODEC_H264,
- .required = false,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
},
.codec = CEDRUS_CODEC_H264,
- .required = false,
},
{
.cfg = {
@@ -92,7 +84,6 @@ static const struct cedrus_control cedrus_controls[] = {
.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
},
.codec = CEDRUS_CODEC_H264,
- .required = false,
},
{
.cfg = {
@@ -101,7 +92,6 @@ static const struct cedrus_control cedrus_controls[] = {
.def = V4L2_STATELESS_H264_START_CODE_NONE,
},
.codec = CEDRUS_CODEC_H264,
- .required = false,
},
/*
* We only expose supported profiles information,
@@ -120,28 +110,24 @@ static const struct cedrus_control cedrus_controls[] = {
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
},
.codec = CEDRUS_CODEC_H264,
- .required = false,
},
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
},
.codec = CEDRUS_CODEC_H265,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
},
.codec = CEDRUS_CODEC_H265,
- .required = true,
},
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
},
.codec = CEDRUS_CODEC_H265,
- .required = true,
},
{
.cfg = {
@@ -150,7 +136,6 @@ static const struct cedrus_control cedrus_controls[] = {
.def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
},
.codec = CEDRUS_CODEC_H265,
- .required = false,
},
{
.cfg = {
@@ -159,14 +144,12 @@ static const struct cedrus_control cedrus_controls[] = {
.def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
},
.codec = CEDRUS_CODEC_H265,
- .required = false,
},
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
},
.codec = CEDRUS_CODEC_VP8,
- .required = true,
},
};
@@ -227,12 +210,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
static int cedrus_request_validate(struct media_request *req)
{
struct media_request_object *obj;
- struct v4l2_ctrl_handler *parent_hdl, *hdl;
struct cedrus_ctx *ctx = NULL;
- struct v4l2_ctrl *ctrl_test;
unsigned int count;
- unsigned int i;
- int ret = 0;
list_for_each_entry(obj, &req->objects, list) {
struct vb2_buffer *vb;
@@ -259,34 +238,6 @@ static int cedrus_request_validate(struct media_request *req)
return -EINVAL;
}
- parent_hdl = &ctx->hdl;
-
- hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
- if (!hdl) {
- v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n");
- return -ENOENT;
- }
-
- for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
- if (cedrus_controls[i].codec != ctx->current_codec ||
- !cedrus_controls[i].required)
- continue;
-
- ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
- cedrus_controls[i].cfg.id);
- if (!ctrl_test) {
- v4l2_info(&ctx->dev->v4l2_dev,
- "Missing required codec control\n");
- ret = -ENOENT;
- break;
- }
- }
-
- v4l2_ctrl_request_hdl_put(hdl);
-
- if (ret)
- return ret;
-
return vb2_request_validate(req);
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index c96077aaef49..251a6a660351 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -56,7 +56,6 @@ enum cedrus_h264_pic_type {
struct cedrus_control {
struct v4l2_ctrl_config cfg;
enum cedrus_codec codec;
- unsigned char required:1;
};
struct cedrus_h264_run {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index 781c84a9b1b7..de7442d4834d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index a19c85c57fca..033a6935c26d 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -253,13 +253,14 @@ static unsigned int csi_get_pixel_rate(struct tegra_csi_channel *csi_chan)
}
void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 csi_port_num,
u8 *clk_settle_time,
u8 *ths_settle_time)
{
struct tegra_csi *csi = csi_chan->csi;
unsigned int cil_clk_mhz;
unsigned int pix_clk_mhz;
- int clk_idx = (csi_chan->csi_port_num >> 1) + 1;
+ int clk_idx = (csi_port_num >> 1) + 1;
cil_clk_mhz = clk_get_rate(csi->clks[clk_idx].clk) / MHZ;
pix_clk_mhz = csi_get_pixel_rate(csi_chan) / MHZ;
@@ -410,7 +411,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi,
unsigned int num_pads)
{
struct tegra_csi_channel *chan;
- int ret = 0;
+ int ret = 0, i;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
@@ -418,8 +419,21 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi,
list_add_tail(&chan->list, &csi->csi_chans);
chan->csi = csi;
- chan->csi_port_num = port_num;
- chan->numlanes = lanes;
+ /*
+ * Each CSI brick has maximum of 4 lanes.
+ * For lanes more than 4, use multiple of immediate CSI bricks as gang.
+ */
+ if (lanes <= CSI_LANES_PER_BRICK) {
+ chan->numlanes = lanes;
+ chan->numgangports = 1;
+ } else {
+ chan->numlanes = CSI_LANES_PER_BRICK;
+ chan->numgangports = lanes / CSI_LANES_PER_BRICK;
+ }
+
+ for (i = 0; i < chan->numgangports; i++)
+ chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK;
+
chan->of_node = node;
chan->numpads = num_pads;
if (num_pads & 0x2) {
@@ -500,7 +514,14 @@ static int tegra_csi_channels_alloc(struct tegra_csi *csi)
}
lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
- if (!lanes || ((lanes & (lanes - 1)) != 0)) {
+ /*
+ * Each CSI brick has maximum 4 data lanes.
+ * For lanes more than 4, validate lanes to be multiple of 4
+ * so multiple of consecutive CSI bricks can be ganged up for
+ * streaming.
+ */
+ if (!lanes || ((lanes & (lanes - 1)) != 0) ||
+ (lanes > CSI_LANES_PER_BRICK && ((portno & 1) != 0))) {
dev_err(csi->dev, "invalid data-lanes %d for %pOF\n",
lanes, channel);
ret = -EINVAL;
@@ -544,7 +565,7 @@ static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
subdev->dev = csi->dev;
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
- chan->csi_port_num);
+ chan->csi_port_nums[0]);
else
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
kbasename(chan->of_node->full_name));
@@ -596,7 +617,7 @@ static int tegra_csi_channels_init(struct tegra_csi *csi)
if (ret) {
dev_err(csi->dev,
"failed to initialize channel-%d: %d\n",
- chan->csi_port_num, ret);
+ chan->csi_port_nums[0], ret);
return ret;
}
}
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
index c65ff73b1cdc..386f7c664259 100644
--- a/drivers/staging/media/tegra-video/csi.h
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -17,6 +17,10 @@
* CILB.
*/
#define CSI_PORTS_PER_BRICK 2
+#define CSI_LANES_PER_BRICK 4
+
+/* Maximum 2 CSI x4 ports can be ganged up for streaming */
+#define GANG_PORTS_MAX 2
/* each CSI channel can have one sink and one source pads */
#define TEGRA_CSI_PADS_NUM 2
@@ -43,8 +47,10 @@ struct tegra_csi;
* @numpads: number of pads.
* @csi: Tegra CSI device structure
* @of_node: csi device tree node
- * @numlanes: number of lanes used per port/channel
- * @csi_port_num: CSI channel port number
+ * @numgangports: number of immediate ports ganged up to meet the
+ * channel bus-width
+ * @numlanes: number of lanes used per port
+ * @csi_port_nums: CSI channel port numbers
* @pg_mode: test pattern generator mode for channel
* @format: active format of the channel
* @framerate: active framerate for TPG
@@ -60,8 +66,9 @@ struct tegra_csi_channel {
unsigned int numpads;
struct tegra_csi *csi;
struct device_node *of_node;
+ u8 numgangports;
unsigned int numlanes;
- u8 csi_port_num;
+ u8 csi_port_nums[GANG_PORTS_MAX];
u8 pg_mode;
struct v4l2_mbus_framefmt format;
unsigned int framerate;
@@ -150,6 +157,7 @@ extern const struct tegra_csi_soc tegra210_csi_soc;
void tegra_csi_error_recover(struct v4l2_subdev *subdev);
void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 csi_port_num,
u8 *clk_settle_time,
u8 *ths_settle_time);
#endif
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
index ac066c030a4f..f10a041e3e6c 100644
--- a/drivers/staging/media/tegra-video/tegra210.c
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -149,21 +149,22 @@ static u32 tegra_vi_read(struct tegra_vi_channel *chan, unsigned int addr)
}
/* Tegra210 VI_CSI registers accessors */
-static void vi_csi_write(struct tegra_vi_channel *chan, unsigned int addr,
- u32 val)
+static void vi_csi_write(struct tegra_vi_channel *chan, u8 portno,
+ unsigned int addr, u32 val)
{
void __iomem *vi_csi_base;
- vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
writel_relaxed(val, vi_csi_base + addr);
}
-static u32 vi_csi_read(struct tegra_vi_channel *chan, unsigned int addr)
+static u32 vi_csi_read(struct tegra_vi_channel *chan, u8 portno,
+ unsigned int addr)
{
void __iomem *vi_csi_base;
- vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
return readl_relaxed(vi_csi_base + addr);
}
@@ -171,27 +172,52 @@ static u32 vi_csi_read(struct tegra_vi_channel *chan, unsigned int addr)
/*
* Tegra210 VI channel capture operations
*/
-static int tegra_channel_capture_setup(struct tegra_vi_channel *chan)
+static int tegra_channel_capture_setup(struct tegra_vi_channel *chan,
+ u8 portno)
{
u32 height = chan->format.height;
u32 width = chan->format.width;
u32 format = chan->fmtinfo->img_fmt;
u32 data_type = chan->fmtinfo->img_dt;
u32 word_count = (width * chan->fmtinfo->bit_width) / 8;
+ u32 bypass_pixel_transform = BIT(BYPASS_PXL_TRANSFORM_OFFSET);
- vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
- vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DEF,
- ((chan->pg_mode ? 0 : 1) << BYPASS_PXL_TRANSFORM_OFFSET) |
+ /*
+ * VI Pixel transformation unit converts source pixels data format
+ * into selected destination pixel format and aligns properly while
+ * interfacing with memory packer.
+ * This pixel transformation should be enabled for YUV and RGB
+ * formats and should be bypassed for RAW formats as RAW formats
+ * only support direct to memory.
+ */
+ if (chan->pg_mode || data_type == TEGRA_IMAGE_DT_YUV422_8 ||
+ data_type == TEGRA_IMAGE_DT_RGB888)
+ bypass_pixel_transform = 0;
+
+ /*
+ * For x8 source streaming, the source image is split onto two x4 ports
+ * with left half to first x4 port and right half to second x4 port.
+ * So, use split width and corresponding word count for each x4 port.
+ */
+ if (chan->numgangports > 1) {
+ width = width >> 1;
+ word_count = (width * chan->fmtinfo->bit_width) / 8;
+ }
+
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DEF,
+ bypass_pixel_transform |
(format << IMAGE_DEF_FORMAT_OFFSET) |
IMAGE_DEF_DEST_MEM);
- vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DT, data_type);
- vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
- vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE,
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DT, data_type);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE,
(height << IMAGE_SIZE_HEIGHT_OFFSET) | width);
return 0;
}
-static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan)
+static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan,
+ u8 portno)
{
/* disable clock gating to enable continuous clock */
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, 0);
@@ -199,15 +225,16 @@ static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan)
* Soft reset memory client interface, pixel format logic, sensor
* control logic, and a shadow copy logic to bring VI to clean state.
*/
- vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0xf);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0xf);
usleep_range(100, 200);
- vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0x0);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0x0);
/* enable back VI clock gating */
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
}
-static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
+static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan,
+ u8 portno)
{
struct v4l2_subdev *subdev;
u32 val;
@@ -219,9 +246,9 @@ static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
* events which can cause CSI and VI hardware hang.
* This helps to have a clean capture for next frame.
*/
- val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
+ val = vi_csi_read(chan, portno, TEGRA_VI_CSI_ERROR_STATUS);
dev_dbg(&chan->video.dev, "TEGRA_VI_CSI_ERROR_STATUS 0x%08x\n", val);
- vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, val);
val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
dev_dbg(&chan->video.dev,
@@ -229,8 +256,8 @@ static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
/* recover VI by issuing software reset and re-setup for capture */
- tegra_channel_vi_soft_reset(chan);
- tegra_channel_capture_setup(chan);
+ tegra_channel_vi_soft_reset(chan, portno);
+ tegra_channel_capture_setup(chan, portno);
/* recover CSI block */
subdev = tegra_channel_get_remote_csi_subdev(chan);
@@ -269,67 +296,114 @@ static void release_buffer(struct tegra_vi_channel *chan,
vb2_buffer_done(&vb->vb2_buf, state);
}
-static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
- struct tegra_channel_buffer *buf)
+static void tegra_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
+ u8 portno, u32 buf_offset,
+ struct tegra_channel_buffer *buf)
{
- u32 thresh, value, frame_start, mw_ack_done;
- int bytes_per_line = chan->format.bytesperline;
- int err;
+ int bytesperline = chan->format.bytesperline;
+ u32 sizeimage = chan->format.sizeimage;
/* program buffer address by using surface 0 */
- vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
- (u64)buf->addr >> 32);
- vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB, buf->addr);
- vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_STRIDE, bytes_per_line);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
+ ((u64)buf->addr + buf_offset) >> 32);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB,
+ buf->addr + buf_offset);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_STRIDE, bytesperline);
+ if (chan->fmtinfo->fourcc != V4L2_PIX_FMT_NV16)
+ return;
/*
- * Tegra VI block interacts with host1x syncpt for synchronizing
- * programmed condition of capture state and hardware operation.
- * Frame start and Memory write acknowledge syncpts has their own
- * FIFO of depth 2.
- *
- * Syncpoint trigger conditions set through VI_INCR_SYNCPT register
- * are added to HW syncpt FIFO and when the HW triggers, syncpt
- * condition is removed from the FIFO and counter at syncpoint index
- * will be incremented by the hardware and software can wait for
- * counter to reach threshold to synchronize capturing frame with the
- * hardware capture events.
+ * Program surface 1 for UV plane with offset sizeimage from Y plane.
*/
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_MSB,
+ (((u64)buf->addr + sizeimage / 2) + buf_offset) >> 32);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_LSB,
+ buf->addr + sizeimage / 2 + buf_offset);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_STRIDE, bytesperline);
+}
- /* increase channel syncpoint threshold for FRAME_START */
- thresh = host1x_syncpt_incr_max(chan->frame_start_sp, 1);
+static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ u32 thresh, value, frame_start, mw_ack_done;
+ u32 fs_thresh[GANG_PORTS_MAX];
+ u8 *portnos = chan->portnos;
+ int gang_bpl = (chan->format.width >> 1) * chan->fmtinfo->bpp;
+ u32 buf_offset;
+ bool capture_timedout = false;
+ int err, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Align buffers side-by-side for all consecutive x4 ports
+ * in gang ports using bytes per line based on source split
+ * width.
+ */
+ buf_offset = i * roundup(gang_bpl, SURFACE_ALIGN_BYTES);
+ tegra_channel_vi_buffer_setup(chan, portnos[i], buf_offset,
+ buf);
- /* Program FRAME_START trigger condition syncpt request */
- frame_start = VI_CSI_PP_FRAME_START(chan->portno);
- value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
- host1x_syncpt_id(chan->frame_start_sp);
- tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+ /*
+ * Tegra VI block interacts with host1x syncpt to synchronize
+ * programmed condition and hardware operation for capture.
+ * Frame start and Memory write acknowledge syncpts has their
+ * own FIFO of depth 2.
+ *
+ * Syncpoint trigger conditions set through VI_INCR_SYNCPT
+ * register are added to HW syncpt FIFO and when HW triggers,
+ * syncpt condition is removed from the FIFO and counter at
+ * syncpoint index will be incremented by the hardware and
+ * software can wait for counter to reach threshold to
+ * synchronize capturing frame with hardware capture events.
+ */
- /* increase channel syncpoint threshold for MW_ACK_DONE */
- buf->mw_ack_sp_thresh = host1x_syncpt_incr_max(chan->mw_ack_sp, 1);
+ /* increase channel syncpoint threshold for FRAME_START */
+ thresh = host1x_syncpt_incr_max(chan->frame_start_sp[i], 1);
+ fs_thresh[i] = thresh;
+
+ /* Program FRAME_START trigger condition syncpt request */
+ frame_start = VI_CSI_PP_FRAME_START(portnos[i]);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
+ host1x_syncpt_id(chan->frame_start_sp[i]);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* increase channel syncpoint threshold for MW_ACK_DONE */
+ thresh = host1x_syncpt_incr_max(chan->mw_ack_sp[i], 1);
+ buf->mw_ack_sp_thresh[i] = thresh;
+
+ /* Program MW_ACK_DONE trigger condition syncpt request */
+ mw_ack_done = VI_CSI_MW_ACK_DONE(portnos[i]);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
+ host1x_syncpt_id(chan->mw_ack_sp[i]);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+ }
- /* Program MW_ACK_DONE trigger condition syncpt request */
- mw_ack_done = VI_CSI_MW_ACK_DONE(chan->portno);
- value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
- host1x_syncpt_id(chan->mw_ack_sp);
- tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+ /* enable single shot capture after all ganged ports are ready */
+ for (i = 0; i < chan->numgangports; i++)
+ vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_SINGLE_SHOT,
+ SINGLE_SHOT_CAPTURE);
- /* enable single shot capture */
- vi_csi_write(chan, TEGRA_VI_CSI_SINGLE_SHOT, SINGLE_SHOT_CAPTURE);
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Wait for syncpt counter to reach frame start event threshold
+ */
+ err = host1x_syncpt_wait(chan->frame_start_sp[i], fs_thresh[i],
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (err) {
+ capture_timedout = true;
+ /* increment syncpoint counter for timedout events */
+ host1x_syncpt_incr(chan->frame_start_sp[i]);
+ spin_lock(&chan->sp_incr_lock[i]);
+ host1x_syncpt_incr(chan->mw_ack_sp[i]);
+ spin_unlock(&chan->sp_incr_lock[i]);
+ /* clear errors and recover */
+ tegra_channel_capture_error_recover(chan, portnos[i]);
+ }
+ }
- /* wait for syncpt counter to reach frame start event threshold */
- err = host1x_syncpt_wait(chan->frame_start_sp, thresh,
- TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
- if (err) {
+ if (capture_timedout) {
dev_err_ratelimited(&chan->video.dev,
"frame start syncpt timeout: %d\n", err);
- /* increment syncpoint counter for timedout events */
- host1x_syncpt_incr(chan->frame_start_sp);
- spin_lock(&chan->sp_incr_lock);
- host1x_syncpt_incr(chan->mw_ack_sp);
- spin_unlock(&chan->sp_incr_lock);
- /* clear errors and recover */
- tegra_channel_capture_error_recover(chan);
release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
return err;
}
@@ -350,21 +424,29 @@ static void tegra_channel_capture_done(struct tegra_vi_channel *chan,
{
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
u32 value;
- int ret;
+ bool capture_timedout = false;
+ int ret, i;
- /* wait for syncpt counter to reach MW_ACK_DONE event threshold */
- ret = host1x_syncpt_wait(chan->mw_ack_sp, buf->mw_ack_sp_thresh,
- TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
- if (ret) {
- dev_err_ratelimited(&chan->video.dev,
- "MW_ACK_DONE syncpt timeout: %d\n", ret);
- state = VB2_BUF_STATE_ERROR;
- /* increment syncpoint counter for timedout event */
- spin_lock(&chan->sp_incr_lock);
- host1x_syncpt_incr(chan->mw_ack_sp);
- spin_unlock(&chan->sp_incr_lock);
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Wait for syncpt counter to reach MW_ACK_DONE event threshold
+ */
+ ret = host1x_syncpt_wait(chan->mw_ack_sp[i],
+ buf->mw_ack_sp_thresh[i],
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (ret) {
+ capture_timedout = true;
+ state = VB2_BUF_STATE_ERROR;
+ /* increment syncpoint counter for timedout event */
+ spin_lock(&chan->sp_incr_lock[i]);
+ host1x_syncpt_incr(chan->mw_ack_sp[i]);
+ spin_unlock(&chan->sp_incr_lock[i]);
+ }
}
+ if (capture_timedout)
+ dev_err_ratelimited(&chan->video.dev,
+ "MW_ACK_DONE syncpt timeout: %d\n", ret);
release_buffer(chan, buf, state);
}
@@ -372,6 +454,7 @@ static int chan_capture_kthread_start(void *data)
{
struct tegra_vi_channel *chan = data;
struct tegra_channel_buffer *buf;
+ unsigned int retries = 0;
int err = 0;
while (1) {
@@ -401,8 +484,15 @@ static int chan_capture_kthread_start(void *data)
spin_unlock(&chan->start_lock);
err = tegra_channel_capture_frame(chan, buf);
- if (err)
+ if (!err) {
+ retries = 0;
+ continue;
+ }
+
+ if (retries++ > chan->syncpt_timeout_retry)
vb2_queue_error(&chan->queue);
+ else
+ err = 0;
}
return 0;
@@ -437,14 +527,12 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
struct media_pipeline *pipe = &chan->video.pipe;
u32 val;
- int ret;
+ u8 *portnos = chan->portnos;
+ int ret, i;
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
- /* clear errors */
- val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
- vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
-
+ /* clear syncpt errors */
val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
@@ -463,7 +551,14 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
if (ret < 0)
goto error_pipeline_start;
- tegra_channel_capture_setup(chan);
+ /* clear csi errors and do capture setup for all ports in gang mode */
+ for (i = 0; i < chan->numgangports; i++) {
+ val = vi_csi_read(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS);
+ vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ tegra_channel_capture_setup(chan, portnos[i]);
+ }
+
ret = tegra_channel_set_stream(chan, true);
if (ret < 0)
goto error_set_stream;
@@ -606,19 +701,19 @@ static const struct tegra_video_format tegra210_video_formats[] = {
TEGRA210_VIDEO_FMT(RAW12, 12, SGBRG12_1X12, 2, T_R16_I, SGBRG12),
TEGRA210_VIDEO_FMT(RAW12, 12, SBGGR12_1X12, 2, T_R16_I, SBGGR12),
/* RGB888 */
- TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, RGB24),
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, XBGR32),
TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X32_PADHI, 4, T_A8B8G8R8,
- XBGR32),
+ RGBX32),
/* YUV422 */
- TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, UYVY),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, VYUY),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, YUYV),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, UYVY),
TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 1, T_Y8__V8U8_N422, NV16),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, UYVY),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, VYUY),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, YUYV),
- TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, UYVY),
};
/* Tegra210 VI operations */
@@ -717,10 +812,10 @@ static void tpg_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
/*
* Tegra210 CSI operations
*/
-static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
+static void tegra210_csi_port_recover(struct tegra_csi_channel *csi_chan,
+ u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
- unsigned int portno = csi_chan->csi_port_num;
u32 val;
/*
@@ -769,16 +864,26 @@ static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
}
}
-static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
+static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int i;
+
+ for (i = 0; i < csi_chan->numgangports; i++)
+ tegra210_csi_port_recover(csi_chan, portnos[i]);
+}
+
+static int
+tegra210_csi_port_start_streaming(struct tegra_csi_channel *csi_chan,
+ u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
- unsigned int portno = csi_chan->csi_port_num;
u8 clk_settle_time = 0;
u8 ths_settle_time = 10;
u32 val;
if (!csi_chan->pg_mode)
- tegra_csi_calc_settle_time(csi_chan, &clk_settle_time,
+ tegra_csi_calc_settle_time(csi_chan, portno, &clk_settle_time,
&ths_settle_time);
csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
@@ -877,10 +982,10 @@ static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
return 0;
}
-static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
+static void
+tegra210_csi_port_stop_streaming(struct tegra_csi_channel *csi_chan, u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
- unsigned int portno = csi_chan->csi_port_num;
u32 val;
val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
@@ -918,6 +1023,35 @@ static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
}
}
+static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int ret, i;
+
+ for (i = 0; i < csi_chan->numgangports; i++) {
+ ret = tegra210_csi_port_start_streaming(csi_chan, portnos[i]);
+ if (ret)
+ goto stream_start_fail;
+ }
+
+ return 0;
+
+stream_start_fail:
+ for (i = i - 1; i >= 0; i--)
+ tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
+
+ return ret;
+}
+
+static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int i;
+
+ for (i = 0; i < csi_chan->numgangports; i++)
+ tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
+}
+
/*
* Tegra210 CSI TPG frame rate table with horizontal and vertical
* blanking intervals for corresponding format and resolution.
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 560d8b368124..7a09061cda57 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <media/v4l2-dv-timings.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-fwnode.h>
@@ -29,7 +30,6 @@
#include "vi.h"
#include "video.h"
-#define SURFACE_ALIGN_BYTES 64
#define MAX_CID_CONTROLS 1
static const struct tegra_video_format tegra_default_format = {
@@ -484,6 +484,8 @@ static void tegra_channel_fmt_align(struct tegra_vi_channel *chan,
pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
pix->sizeimage = pix->bytesperline * pix->height;
+ if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ pix->sizeimage *= 2;
}
static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
@@ -533,11 +535,18 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
fse.code = fmtinfo->code;
ret = v4l2_subdev_call(subdev, pad, enum_frame_size, pad_cfg, &fse);
if (ret) {
- ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
- if (ret)
- return -EINVAL;
- pad_cfg->try_crop.width = sdsel.r.width;
- pad_cfg->try_crop.height = sdsel.r.height;
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
+ pad_cfg->try_crop.width = 0;
+ pad_cfg->try_crop.height = 0;
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+ if (ret)
+ return -EINVAL;
+
+ pad_cfg->try_crop.width = sdsel.r.width;
+ pad_cfg->try_crop.height = sdsel.r.height;
+ }
} else {
pad_cfg->try_crop.width = fse.max_width;
pad_cfg->try_crop.height = fse.max_height;
@@ -563,6 +572,14 @@ static int tegra_channel_try_format(struct file *file, void *fh,
return __tegra_channel_try_format(chan, &format->fmt.pix);
}
+static void tegra_channel_update_gangports(struct tegra_vi_channel *chan)
+{
+ if (chan->format.width <= 1920)
+ chan->numgangports = 1;
+ else
+ chan->numgangports = chan->totalports;
+}
+
static int tegra_channel_set_format(struct file *file, void *fh,
struct v4l2_format *format)
{
@@ -596,6 +613,7 @@ static int tegra_channel_set_format(struct file *file, void *fh,
chan->format = *pix;
chan->fmtinfo = fmtinfo;
+ tegra_channel_update_gangports(chan);
return 0;
}
@@ -628,10 +646,23 @@ static int tegra_channel_set_subdev_active_fmt(struct tegra_vi_channel *chan)
chan->format.sizeimage = chan->format.bytesperline *
chan->format.height;
tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+ tegra_channel_update_gangports(chan);
return 0;
}
+static int
+tegra_channel_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 4, NULL);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
static int tegra_channel_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
@@ -711,6 +742,133 @@ static int tegra_channel_s_selection(struct file *file, void *fh,
return ret;
}
+static int tegra_channel_g_edid(struct file *file, void *fh,
+ struct v4l2_edid *edid)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, get_edid))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, get_edid, edid);
+}
+
+static int tegra_channel_s_edid(struct file *file, void *fh,
+ struct v4l2_edid *edid)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, set_edid))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, set_edid, edid);
+}
+
+static int tegra_channel_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, video, g_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ video, g_dv_timings, timings);
+}
+
+static int tegra_channel_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ struct v4l2_dv_timings curr_timings;
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, video, s_dv_timings))
+ return -ENOTTY;
+
+ ret = tegra_channel_g_dv_timings(file, fh, &curr_timings);
+ if (ret)
+ return ret;
+
+ if (v4l2_match_dv_timings(timings, &curr_timings, 0, false))
+ return 0;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ ret = v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ video, s_dv_timings, timings);
+ if (ret)
+ return ret;
+
+ chan->format.width = bt->width;
+ chan->format.height = bt->height;
+ chan->format.bytesperline = bt->width * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline * bt->height;
+ tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+ tegra_channel_update_gangports(chan);
+
+ return 0;
+}
+
+static int tegra_channel_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, video, query_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ video, query_dv_timings, timings);
+}
+
+static int tegra_channel_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, enum_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
+}
+
+static int tegra_channel_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, dv_timings_cap, cap);
+}
+
+static int tegra_channel_log_status(struct file *file, void *fh)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ v4l2_device_call_all(chan->video.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
static int tegra_channel_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
@@ -723,6 +881,8 @@ static int tegra_channel_enum_input(struct file *file, void *fh,
inp->type = V4L2_INPUT_TYPE_CAMERA;
subdev = tegra_channel_get_remote_source_subdev(chan);
strscpy(inp->name, subdev->name, sizeof(inp->name));
+ if (v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
return 0;
}
@@ -766,10 +926,18 @@ static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_subscribe_event = tegra_channel_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_g_selection = tegra_channel_g_selection,
.vidioc_s_selection = tegra_channel_s_selection,
+ .vidioc_g_edid = tegra_channel_g_edid,
+ .vidioc_s_edid = tegra_channel_s_edid,
+ .vidioc_g_dv_timings = tegra_channel_g_dv_timings,
+ .vidioc_s_dv_timings = tegra_channel_s_dv_timings,
+ .vidioc_query_dv_timings = tegra_channel_query_dv_timings,
+ .vidioc_enum_dv_timings = tegra_channel_enum_dv_timings,
+ .vidioc_dv_timings_cap = tegra_channel_dv_timings_cap,
+ .vidioc_log_status = tegra_channel_log_status,
};
/*
@@ -788,7 +956,6 @@ static const struct v4l2_file_operations tegra_channel_fops = {
/*
* V4L2 control operations
*/
-#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_vi_channel *chan = container_of(ctrl->handler,
@@ -800,6 +967,9 @@ static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
/* pattern change takes effect on next stream */
chan->pg_mode = ctrl->val + 1;
break;
+ case V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY:
+ chan->syncpt_timeout_retry = ctrl->val;
+ break;
default:
return -EINVAL;
}
@@ -811,10 +981,22 @@ static const struct v4l2_ctrl_ops vi_ctrl_ops = {
.s_ctrl = vi_s_ctrl,
};
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
static const char *const vi_pattern_strings[] = {
"Black/White Direct Mode",
"Color Patch Mode",
};
+#else
+static const struct v4l2_ctrl_config syncpt_timeout_ctrl = {
+ .ops = &vi_ctrl_ops,
+ .id = V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY,
+ .name = "Syncpt timeout retry",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 10000,
+ .step = 1,
+ .def = 5,
+};
#endif
static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
@@ -836,6 +1018,16 @@ static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
#else
struct v4l2_subdev *subdev;
+ /* custom control */
+ v4l2_ctrl_new_custom(&chan->ctrl_handler, &syncpt_timeout_ctrl, NULL);
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->vi->dev, "failed to add %s ctrl handler: %d\n",
+ syncpt_timeout_ctrl.name,
+ chan->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return chan->ctrl_handler.error;
+ }
+
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!subdev)
return -ENODEV;
@@ -934,12 +1126,21 @@ static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
return 0;
}
+static void tegra_channel_host1x_syncpts_free(struct tegra_vi_channel *chan)
+{
+ int i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ host1x_syncpt_free(chan->mw_ack_sp[i]);
+ host1x_syncpt_free(chan->frame_start_sp[i]);
+ }
+}
+
static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
{
v4l2_ctrl_handler_free(&chan->ctrl_handler);
media_entity_cleanup(&chan->video.entity);
- host1x_syncpt_free(chan->mw_ack_sp);
- host1x_syncpt_free(chan->frame_start_sp);
+ tegra_channel_host1x_syncpts_free(chan);
mutex_destroy(&chan->video_lock);
}
@@ -957,11 +1158,46 @@ void tegra_channels_cleanup(struct tegra_vi *vi)
}
}
+static int tegra_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ struct host1x_syncpt *fs_sp;
+ struct host1x_syncpt *mw_sp;
+ int ret, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ fs_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!fs_sp) {
+ dev_err(vi->dev, "failed to request frame start syncpoint\n");
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ mw_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!mw_sp) {
+ dev_err(vi->dev, "failed to request memory ack syncpoint\n");
+ host1x_syncpt_free(fs_sp);
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ chan->frame_start_sp[i] = fs_sp;
+ chan->mw_ack_sp[i] = mw_sp;
+ spin_lock_init(&chan->sp_incr_lock[i]);
+ }
+
+ return 0;
+
+free_syncpts:
+ tegra_channel_host1x_syncpts_free(chan);
+ return ret;
+}
+
static int tegra_channel_init(struct tegra_vi_channel *chan)
{
struct tegra_vi *vi = chan->vi;
struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
- unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
int ret;
mutex_init(&chan->video_lock);
@@ -969,7 +1205,6 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
INIT_LIST_HEAD(&chan->done);
spin_lock_init(&chan->start_lock);
spin_lock_init(&chan->done_lock);
- spin_lock_init(&chan->sp_incr_lock);
init_waitqueue_head(&chan->start_wait);
init_waitqueue_head(&chan->done_wait);
@@ -984,18 +1219,9 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
- chan->frame_start_sp = host1x_syncpt_request(&vi->client, flags);
- if (!chan->frame_start_sp) {
- dev_err(vi->dev, "failed to request frame start syncpoint\n");
- return -ENOMEM;
- }
-
- chan->mw_ack_sp = host1x_syncpt_request(&vi->client, flags);
- if (!chan->mw_ack_sp) {
- dev_err(vi->dev, "failed to request memory ack syncpoint\n");
- ret = -ENOMEM;
- goto free_fs_syncpt;
- }
+ ret = tegra_channel_host1x_syncpt_init(chan);
+ if (ret)
+ return ret;
/* initialize the media entity */
chan->pad.flags = MEDIA_PAD_FL_SINK;
@@ -1003,7 +1229,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
if (ret < 0) {
dev_err(vi->dev,
"failed to initialize media entity: %d\n", ret);
- goto free_mw_ack_syncpt;
+ goto free_syncpts;
}
ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS);
@@ -1019,7 +1245,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
chan->video.release = video_device_release_empty;
chan->video.queue = &chan->queue;
snprintf(chan->video.name, sizeof(chan->video.name), "%s-%s-%u",
- dev_name(vi->dev), "output", chan->portno);
+ dev_name(vi->dev), "output", chan->portnos[0]);
chan->video.vfl_type = VFL_TYPE_VIDEO;
chan->video.vfl_dir = VFL_DIR_RX;
chan->video.ioctl_ops = &tegra_channel_ioctl_ops;
@@ -1055,17 +1281,16 @@ free_v4l2_ctrl_hdl:
v4l2_ctrl_handler_free(&chan->ctrl_handler);
cleanup_media:
media_entity_cleanup(&chan->video.entity);
-free_mw_ack_syncpt:
- host1x_syncpt_free(chan->mw_ack_sp);
-free_fs_syncpt:
- host1x_syncpt_free(chan->frame_start_sp);
+free_syncpts:
+ tegra_channel_host1x_syncpts_free(chan);
return ret;
}
static int tegra_vi_channel_alloc(struct tegra_vi *vi, unsigned int port_num,
- struct device_node *node)
+ struct device_node *node, unsigned int lanes)
{
struct tegra_vi_channel *chan;
+ unsigned int i;
/*
* Do not use devm_kzalloc as memory is freed immediately
@@ -1078,7 +1303,20 @@ static int tegra_vi_channel_alloc(struct tegra_vi *vi, unsigned int port_num,
return -ENOMEM;
chan->vi = vi;
- chan->portno = port_num;
+ chan->portnos[0] = port_num;
+ /*
+ * For data lanes more than maximum csi lanes per brick, multiple of
+ * x4 ports are used simultaneously for capture.
+ */
+ if (lanes <= CSI_LANES_PER_BRICK)
+ chan->totalports = 1;
+ else
+ chan->totalports = lanes / CSI_LANES_PER_BRICK;
+ chan->numgangports = chan->totalports;
+
+ for (i = 1; i < chan->totalports; i++)
+ chan->portnos[i] = chan->portnos[0] + i * CSI_PORTS_PER_BRICK;
+
chan->of_node = node;
list_add_tail(&chan->list, &vi->vi_chans);
@@ -1092,7 +1330,8 @@ static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
int ret;
for (port_num = 0; port_num < nchannels; port_num++) {
- ret = tegra_vi_channel_alloc(vi, port_num, vi->dev->of_node);
+ ret = tegra_vi_channel_alloc(vi, port_num,
+ vi->dev->of_node, 2);
if (ret < 0)
return ret;
}
@@ -1107,6 +1346,9 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
struct device_node *ports;
struct device_node *port;
unsigned int port_num;
+ struct device_node *parent;
+ struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
+ unsigned int lanes;
int ret = 0;
ports = of_get_child_by_name(node, "ports");
@@ -1133,8 +1375,21 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
if (!ep)
continue;
+ parent = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!parent)
+ continue;
+
+ ep = of_graph_get_endpoint_by_regs(parent, 0, 0);
+ of_node_put(parent);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep),
+ &v4l2_ep);
of_node_put(ep);
- ret = tegra_vi_channel_alloc(vi, port_num, port);
+ if (ret)
+ continue;
+
+ lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ ret = tegra_vi_channel_alloc(vi, port_num, port, lanes);
if (ret < 0) {
of_node_put(port);
goto cleanup;
@@ -1156,7 +1411,7 @@ static int tegra_vi_channels_init(struct tegra_vi *vi)
if (ret < 0) {
dev_err(vi->dev,
"failed to initialize channel-%d: %d\n",
- chan->portno, ret);
+ chan->portnos[0], ret);
goto cleanup;
}
}
@@ -1478,6 +1733,9 @@ static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
v4l2_set_subdev_hostdata(subdev, chan);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ v4l2_set_subdev_hostdata(subdev, chan);
+
return 0;
unregister_video:
@@ -1530,7 +1788,7 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
struct tegra_vi *vi = chan->vi;
struct fwnode_handle *ep = NULL;
struct fwnode_handle *remote = NULL;
- struct v4l2_async_subdev *asd;
+ struct tegra_vi_graph_entity *tvge;
struct device_node *node = NULL;
int ret;
@@ -1554,10 +1812,10 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
continue;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier,
- remote, sizeof(struct tegra_vi_graph_entity));
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
+ tvge = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier,
+ remote, struct tegra_vi_graph_entity);
+ if (IS_ERR(tvge)) {
+ ret = PTR_ERR(tvge);
dev_err(vi->dev,
"failed to add subdev to notifier: %d\n", ret);
fwnode_handle_put(remote);
@@ -1600,7 +1858,8 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
* next channels.
*/
list_for_each_entry(chan, &vi->vi_chans, list) {
- remote = fwnode_graph_get_remote_node(fwnode, chan->portno, 0);
+ remote = fwnode_graph_get_remote_node(fwnode, chan->portnos[0],
+ 0);
if (!remote)
continue;
@@ -1615,7 +1874,7 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
if (ret < 0) {
dev_err(vi->dev,
"failed to register channel %d notifier: %d\n",
- chan->portno, ret);
+ chan->portnos[0], ret);
v4l2_async_notifier_cleanup(&chan->notifier);
}
}
@@ -1666,11 +1925,14 @@ static int tegra_vi_init(struct host1x_client *client)
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
ret = tegra_vi_graph_init(vi);
if (ret < 0)
- goto free_chans;
+ goto cleanup_chans;
}
return 0;
+cleanup_chans:
+ list_for_each_entry(chan, &vi->vi_chans, list)
+ tegra_channel_cleanup(chan);
free_chans:
list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
list_del(&chan->list);
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
index 7d6b7a6d0a45..a68e2c02c7b0 100644
--- a/drivers/staging/media/tegra-video/vi.h
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -21,6 +21,10 @@
#include <media/v4l2-subdev.h>
#include <media/videobuf2-v4l2.h>
+#include "csi.h"
+
+#define V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+
#define TEGRA_MIN_WIDTH 32U
#define TEGRA_MAX_WIDTH 32768U
#define TEGRA_MIN_HEIGHT 32U
@@ -31,6 +35,7 @@
#define TEGRA_IMAGE_FORMAT_DEF 32
#define MAX_FORMAT_NUM 64
+#define SURFACE_ALIGN_BYTES 64
enum tegra_vi_pg_mode {
TEGRA_VI_PG_DISABLED = 0,
@@ -151,10 +156,13 @@ struct tegra_vi_graph_entity {
* @done: list of capture done queued buffers
* @done_lock: protects the capture done queue list
*
- * @portno: VI channel port number
+ * @portnos: VI channel port numbers
+ * @totalports: total number of ports used for this channel
+ * @numgangports: number of ports combined together as a gang for capture
* @of_node: device node of VI channel
*
* @ctrl_handler: V4L2 control handler of this video channel
+ * @syncpt_timeout_retry: syncpt timeout retry count for the capture
* @fmts_bitmap: a bitmap for supported formats matching v4l2 subdev formats
* @tpg_fmts_bitmap: a bitmap for supported TPG formats
* @pg_mode: test pattern generator mode (disabled/direct/patch)
@@ -168,10 +176,10 @@ struct tegra_vi_channel {
struct media_pad pad;
struct tegra_vi *vi;
- struct host1x_syncpt *frame_start_sp;
- struct host1x_syncpt *mw_ack_sp;
+ struct host1x_syncpt *frame_start_sp[GANG_PORTS_MAX];
+ struct host1x_syncpt *mw_ack_sp[GANG_PORTS_MAX];
/* protects the cpu syncpoint increment */
- spinlock_t sp_incr_lock;
+ spinlock_t sp_incr_lock[GANG_PORTS_MAX];
struct task_struct *kthread_start_capture;
wait_queue_head_t start_wait;
@@ -190,10 +198,13 @@ struct tegra_vi_channel {
/* protects the capture done queue list */
spinlock_t done_lock;
- unsigned char portno;
+ unsigned char portnos[GANG_PORTS_MAX];
+ u8 totalports;
+ u8 numgangports;
struct device_node *of_node;
struct v4l2_ctrl_handler ctrl_handler;
+ unsigned int syncpt_timeout_retry;
DECLARE_BITMAP(fmts_bitmap, MAX_FORMAT_NUM);
DECLARE_BITMAP(tpg_fmts_bitmap, MAX_FORMAT_NUM);
enum tegra_vi_pg_mode pg_mode;
@@ -216,7 +227,7 @@ struct tegra_channel_buffer {
struct list_head queue;
struct tegra_vi_channel *chan;
dma_addr_t addr;
- u32 mw_ack_sp_thresh;
+ u32 mw_ack_sp_thresh[GANG_PORTS_MAX];
};
/*
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
index e50bd70575f3..d966b319553f 100644
--- a/drivers/staging/media/tegra-video/video.c
+++ b/drivers/staging/media/tegra-video/video.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+
#include "video.h"
static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
@@ -24,6 +26,21 @@ static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
kfree(vid);
}
+static void tegra_v4l2_dev_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct tegra_vi_channel *chan;
+ const struct v4l2_event *ev = arg;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ chan = v4l2_get_subdev_hostdata(sd);
+ v4l2_event_queue(&chan->video, arg);
+ if (ev->type == V4L2_EVENT_SOURCE_CHANGE && vb2_is_streaming(&chan->queue))
+ vb2_queue_error(&chan->queue);
+}
+
static int host1x_video_probe(struct host1x_device *dev)
{
struct tegra_video_device *vid;
@@ -49,6 +66,7 @@ static int host1x_video_probe(struct host1x_device *dev)
vid->v4l2_dev.mdev = &vid->media_dev;
vid->v4l2_dev.release = tegra_v4l2_dev_release;
+ vid->v4l2_dev.notify = tegra_v4l2_dev_notify;
ret = v4l2_device_register(&dev->dev, &vid->v4l2_dev);
if (ret < 0) {
dev_err(&dev->dev,
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
index d9f8b21edf6a..e8902f824d6c 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -1020,7 +1020,7 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
vq->buf_struct_size = sizeof(struct zr_buffer);
vq->ops = &zr_video_qops;
vq->mem_ops = &vb2_dma_contig_memops;
- vq->gfp_flags = GFP_DMA32,
+ vq->gfp_flags = GFP_DMA32;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->min_buffers_needed = 9;
vq->lock = &zr->lock;
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index b6fecb06a0e6..f125bb6da406 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -68,7 +68,7 @@ struct net_dev_context {
};
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
-static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
+static DEFINE_MUTEX(probe_disc_mt); /* ch->linked = true, most_nd_open */
static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
static struct most_component comp;
@@ -520,7 +520,6 @@ static int __init most_net_init(void)
{
int err;
- mutex_init(&probe_disc_mt);
err = most_register_component(&comp);
if (err)
return err;
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 3a1a59058042..c87f6a037874 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -86,6 +86,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
{
unsigned int i = 0;
+ if (bytes < 2)
+ return;
while (i < bytes - 2) {
dest[i] = source[i + 2];
dest[i + 1] = source[i + 1];
@@ -158,9 +160,9 @@ static struct channel *get_channel(struct most_interface *iface,
int channel_id)
{
struct sound_adapter *adpt = iface->priv;
- struct channel *channel, *tmp;
+ struct channel *channel;
- list_for_each_entry_safe(channel, tmp, &adpt->dev_list, list) {
+ list_for_each_entry(channel, &adpt->dev_list, list) {
if ((channel->iface == iface) && (channel->id == channel_id))
return channel;
}
@@ -525,7 +527,7 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
pr_err("Incompatible channel type\n");
return -EINVAL;
}
- strlcpy(arg_list_cpy, arg_list, STRING_SIZE);
+ strscpy(arg_list_cpy, arg_list, STRING_SIZE);
ret = split_arg_list(arg_list_cpy, &ch_num, &sample_res);
if (ret < 0)
return ret;
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 829df899b993..90933d78c332 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -245,8 +245,8 @@ static int vidioc_querycap(struct file *file, void *priv,
struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- strlcpy(cap->driver, "v4l2_component", sizeof(cap->driver));
- strlcpy(cap->card, "MOST", sizeof(cap->card));
+ strscpy(cap->driver, "v4l2_component", sizeof(cap->driver));
+ strscpy(cap->card, "MOST", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"%s", mdev->iface->description);
return 0;
@@ -483,7 +483,7 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
mdev->v4l2_dev.release = comp_v4l2_dev_release;
/* Create the v4l2_device */
- strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
+ strscpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
if (ret) {
pr_err("v4l2_device_register() failed\n");
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
index 66da1bf10c32..23256d1286f3 100644
--- a/drivers/staging/mt7621-dma/Makefile
+++ b/drivers/staging/mt7621-dma/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
+obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
index d241349214e7..b0ed935de7ac 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
@@ -747,7 +749,7 @@ static struct platform_driver mtk_hsdma_driver = {
.probe = mtk_hsdma_probe,
.remove = mtk_hsdma_remove,
.driver = {
- .name = "hsdma-mt7621",
+ .name = KBUILD_MODNAME,
.of_match_table = mtk_hsdma_of_match,
},
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 5b9d3bf82cb1..16fc94f65486 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -222,89 +222,84 @@
pinctrl: pinctrl {
compatible = "ralink,rt2880-pinmux";
- pinctrl-names = "default";
- pinctrl-0 = <&state_default>;
-
- state_default: pinctrl0 {
- };
- i2c_pins: i2c0 {
- i2c0 {
+ i2c_pins: i2c0-pins {
+ pinmux {
groups = "i2c";
function = "i2c";
};
};
- spi_pins: spi0 {
- spi0 {
+ spi_pins: spi0-pins {
+ pinmux {
groups = "spi";
function = "spi";
};
};
- uart1_pins: uart1 {
- uart1 {
+ uart1_pins: uart1-pins {
+ pinmux {
groups = "uart1";
function = "uart1";
};
};
- uart2_pins: uart2 {
- uart2 {
+ uart2_pins: uart2-pins {
+ pinmux {
groups = "uart2";
function = "uart2";
};
};
- uart3_pins: uart3 {
- uart3 {
+ uart3_pins: uart3-pins {
+ pinmux {
groups = "uart3";
function = "uart3";
};
};
- rgmii1_pins: rgmii1 {
- rgmii1 {
+ rgmii1_pins: rgmii1-pins {
+ pinmux {
groups = "rgmii1";
function = "rgmii1";
};
};
- rgmii2_pins: rgmii2 {
- rgmii2 {
+ rgmii2_pins: rgmii2-pins {
+ pinmux {
groups = "rgmii2";
function = "rgmii2";
};
};
- mdio_pins: mdio0 {
- mdio0 {
+ mdio_pins: mdio0-pins {
+ pinmux {
groups = "mdio";
function = "mdio";
};
};
- pcie_pins: pcie0 {
- pcie0 {
+ pcie_pins: pcie0-pins {
+ pinmux {
groups = "pcie";
function = "gpio";
};
};
- nand_pins: nand0 {
- spi-nand {
+ nand_pins: nand0-pins {
+ spi-pinmux {
groups = "spi";
function = "nand1";
};
- sdhci-nand {
+ sdhci-pinmux {
groups = "sdhci";
function = "nand2";
};
};
- sdhci_pins: sdhci0 {
- sdhci0 {
+ sdhci_pins: sdhci0-pins {
+ pinmux {
groups = "sdhci";
function = "sdhci";
};
@@ -497,13 +492,6 @@
};
};
- gsw: gsw@1e110000 {
- compatible = "mediatek,mt7621-gsw";
- reg = <0x1e110000 0x8000>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SHARED 23 IRQ_TYPE_LEVEL_HIGH>;
- };
-
pcie: pcie@1e140000 {
compatible = "mediatek,mt7621-pci";
reg = <0x1e140000 0x100 /* host-pci bridge registers */
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 0e861c4bfcbf..b1ef196e1cfe 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -338,7 +338,7 @@ static const struct power_supply_desc nvec_psy_desc = {
};
static int counter;
-static int const bat_iter[] = {
+static const int bat_iter[] = {
SLOT_STATUS, VOLTAGE, CURRENT, CAPACITY_REMAINING,
#ifdef EC_FULL_DIAG
AVERAGE_CURRENT, TEMPERATURE, TIME_REMAINING,
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 45db29262a9c..157009015c3b 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -112,8 +112,8 @@ static int nvec_mouse_probe(struct platform_device *pdev)
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
- strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
- strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
+ strscpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
+ strscpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
ps2_dev.ser_dev = ser_dev;
ps2_dev.notifier.notifier_call = nvec_ps2_notifier;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 0bf545849b11..b3049108edc4 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -21,9 +21,9 @@
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
- strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, UTS_RELEASE, sizeof(info->version));
+ strscpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
static int cvm_oct_nway_reset(struct net_device *dev)
@@ -146,9 +146,8 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
goto no_phy;
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
- if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
+ if (!phy_node && of_phy_is_fixed_link(priv->of_node))
phy_node = of_node_get(priv->of_node);
- }
if (!phy_node)
goto no_phy;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index e7281212db5b..6d8e9a481786 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -576,7 +576,7 @@ static struct notifier_block dcon_panic_nb = {
static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
+ strscpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/staging/qlge/Kconfig b/drivers/staging/qlge/Kconfig
index a3cb25a3ab80..6d831ed67965 100644
--- a/drivers/staging/qlge/Kconfig
+++ b/drivers/staging/qlge/Kconfig
@@ -3,6 +3,7 @@
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on ETHERNET && PCI
+ select NET_DEVLINK
help
This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
diff --git a/drivers/staging/qlge/Makefile b/drivers/staging/qlge/Makefile
index 1dc2568e820c..07c1898a512e 100644
--- a/drivers/staging/qlge/Makefile
+++ b/drivers/staging/qlge/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_QLGE) += qlge.o
-qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o qlge_devlink.o
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
index f93f7428f5d5..c76394b9451b 100644
--- a/drivers/staging/qlge/TODO
+++ b/drivers/staging/qlge/TODO
@@ -14,12 +14,6 @@
queues" is confusing.
* struct rx_ring is used for rx and tx completions, with some members relevant
to one case only
-* there is an inordinate amount of disparate debugging code, most of which is
- of questionable value. In particular, qlge_dbg.c has hundreds of lines of
- code bitrotting away in ifdef land (doesn't compile since commit
- 18c49b91777c ("qlge: do vlan cleanup", v3.1-rc1), 8 years ago).
-* triggering an ethtool regdump will hexdump a 176k struct to dmesg depending
- on some module parameters.
* the flow control implementation in firmware is buggy (sends a flood of pause
frames, resets the link, device and driver buffer queues become
desynchronized), disable it by default
@@ -28,10 +22,6 @@
* the driver has a habit of using runtime checks where compile time checks are
possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers())
* reorder struct members to avoid holes if it doesn't impact performance
-* in terms of namespace, the driver uses either qlge_, ql_ (used by
- other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with
- clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_"
- prefix.
* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi,
use pci_iomap)
* some "while" loops could be rewritten with simple "for", ex.
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 0381f3f56bc7..55e0ad759250 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -1081,7 +1081,7 @@ struct tx_buf_desc {
#define OPCODE_IB_MPI_IOCB 0x21
#define OPCODE_IB_AE_IOCB 0x3f
-struct ob_mac_iocb_req {
+struct qlge_ob_mac_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_IOCB_REQ_OI 0x01
@@ -1104,7 +1104,7 @@ struct ob_mac_iocb_req {
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __packed;
-struct ob_mac_iocb_rsp {
+struct qlge_ob_mac_iocb_rsp {
u8 opcode; /* */
u8 flags1; /* */
#define OB_MAC_IOCB_RSP_OI 0x01 /* */
@@ -1121,7 +1121,7 @@ struct ob_mac_iocb_rsp {
__le32 reserved[13];
} __packed;
-struct ob_mac_tso_iocb_req {
+struct qlge_ob_mac_tso_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_OI 0x01
@@ -1149,7 +1149,7 @@ struct ob_mac_tso_iocb_req {
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __packed;
-struct ob_mac_tso_iocb_rsp {
+struct qlge_ob_mac_tso_iocb_rsp {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_RSP_OI 0x01
@@ -1166,7 +1166,7 @@ struct ob_mac_tso_iocb_rsp {
__le32 reserved2[13];
} __packed;
-struct ib_mac_iocb_rsp {
+struct qlge_ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
u8 flags1;
#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
@@ -1225,7 +1225,7 @@ struct ib_mac_iocb_rsp {
__le64 hdr_addr; /* */
} __packed;
-struct ib_ae_iocb_rsp {
+struct qlge_ib_ae_iocb_rsp {
u8 opcode;
u8 flags1;
#define IB_AE_IOCB_RSP_OI 0x01
@@ -1250,7 +1250,7 @@ struct ib_ae_iocb_rsp {
* These three structures are for generic
* handling of ib and ob iocbs.
*/
-struct ql_net_rsp_iocb {
+struct qlge_net_rsp_iocb {
u8 opcode;
u8 flags0;
__le16 length;
@@ -1258,7 +1258,7 @@ struct ql_net_rsp_iocb {
__le32 reserved[14];
} __packed;
-struct net_req_iocb {
+struct qlge_net_req_iocb {
u8 opcode;
u8 flags0;
__le16 flags1;
@@ -1346,7 +1346,7 @@ struct ricb {
/* SOFTWARE/DRIVER DATA STRUCTURES. */
-struct oal {
+struct qlge_oal {
struct tx_buf_desc oal[TX_DESC_PER_OAL];
};
@@ -1357,9 +1357,9 @@ struct map_list {
struct tx_ring_desc {
struct sk_buff *skb;
- struct ob_mac_iocb_req *queue_entry;
+ struct qlge_ob_mac_iocb_req *queue_entry;
u32 index;
- struct oal oal;
+ struct qlge_oal oal;
struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
@@ -1388,7 +1388,7 @@ struct tx_ring {
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
struct delayed_work tx_work;
- struct ql_adapter *qdev;
+ struct qlge_adapter *qdev;
u64 tx_packets;
u64 tx_bytes;
u64 tx_errors;
@@ -1469,7 +1469,7 @@ struct rx_ring {
dma_addr_t prod_idx_sh_reg_dma;
void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
u32 cnsmr_idx; /* current sw idx */
- struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
+ struct qlge_net_rsp_iocb *curr_entry; /* next entry on queue */
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
@@ -1487,7 +1487,7 @@ struct rx_ring {
char name[IFNAMSIZ + 5];
struct napi_struct napi;
u8 reserved;
- struct ql_adapter *qdev;
+ struct qlge_adapter *qdev;
u64 rx_packets;
u64 rx_multicast;
u64 rx_bytes;
@@ -1752,14 +1752,14 @@ enum {
#define SHADOW_OFFSET 0xb0000000
#define SHADOW_REG_SHIFT 20
-struct ql_nic_misc {
+struct qlge_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
u32 intr_count;
u32 function;
};
-struct ql_reg_dump {
+struct qlge_reg_dump {
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
@@ -1769,7 +1769,7 @@ struct ql_reg_dump {
/* segment 30 */
struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
+ struct qlge_nic_misc misc_nic_info;
/* segment 31 */
/* one interrupt state for each CQ */
@@ -1792,7 +1792,7 @@ struct ql_reg_dump {
u32 ets[8 + 2];
};
-struct ql_mpi_coredump {
+struct qlge_mpi_coredump {
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
@@ -1914,7 +1914,7 @@ struct ql_mpi_coredump {
/* segment 30 */
struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
+ struct qlge_nic_misc misc_nic_info;
/* segment 31 */
/* one interrupt state for each CQ */
@@ -1991,7 +1991,7 @@ struct ql_mpi_coredump {
* irq environment as a context to the ISR.
*/
struct intr_context {
- struct ql_adapter *qdev;
+ struct qlge_adapter *qdev;
u32 intr;
u32 irq_mask; /* Mask of which rings the vector services. */
u32 hooked;
@@ -2056,15 +2056,27 @@ enum {
};
struct nic_operations {
- int (*get_flash)(struct ql_adapter *qdev);
- int (*port_initialize)(struct ql_adapter *qdev);
+ int (*get_flash)(struct qlge_adapter *qdev);
+ int (*port_initialize)(struct qlge_adapter *qdev);
};
+struct qlge_netdev_priv {
+ struct qlge_adapter *qdev;
+ struct net_device *ndev;
+};
+
+static inline
+struct qlge_adapter *netdev_to_qdev(struct net_device *ndev)
+{
+ struct qlge_netdev_priv *ndev_priv = netdev_priv(ndev);
+
+ return ndev_priv->qdev;
+}
/*
* The main Adapter structure definition.
* This structure has all fields relevant to the hardware.
*/
-struct ql_adapter {
+struct qlge_adapter {
struct ricb ricb;
unsigned long flags;
u32 wol;
@@ -2077,6 +2089,7 @@ struct ql_adapter {
struct pci_dev *pdev;
struct net_device *ndev; /* Parent NET device */
+ struct devlink_health_reporter *reporter;
/* Hardware information */
u32 chip_rev_id;
u32 fw_rev_id;
@@ -2139,8 +2152,7 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
- struct ql_mpi_coredump *mpi_coredump;
- u32 core_is_dumped;
+ struct qlge_mpi_coredump *mpi_coredump;
u32 link_config;
u32 led_config;
u32 max_frame_size;
@@ -2153,7 +2165,6 @@ struct ql_adapter {
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
- struct delayed_work mpi_core_to_log;
struct completion ide_completion;
const struct nic_operations *nic_ops;
u16 device_id;
@@ -2166,7 +2177,7 @@ struct ql_adapter {
/*
* Typical Register accessor for memory mapped device.
*/
-static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+static inline u32 qlge_read32(const struct qlge_adapter *qdev, int reg)
{
return readl(qdev->reg_base + reg);
}
@@ -2174,7 +2185,7 @@ static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
/*
* Typical Register accessor for memory mapped device.
*/
-static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+static inline void qlge_write32(const struct qlge_adapter *qdev, int reg, u32 val)
{
writel(val, qdev->reg_base + reg);
}
@@ -2189,7 +2200,7 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
* 1 4k chunk of memory. The lower half of the space is for outbound
* queues. The upper half is for inbound queues.
*/
-static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+static inline void qlge_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
}
@@ -2205,7 +2216,7 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
* queues. The upper half is for inbound queues.
* Caller has to guarantee ordering.
*/
-static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
+static inline void qlge_write_db_reg_relaxed(u32 val, void __iomem *addr)
{
writel_relaxed(val, addr);
}
@@ -2220,7 +2231,7 @@ static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
* update the relevant index register and then copy the value to the
* shadow register in host memory.
*/
-static inline u32 ql_read_sh_reg(__le32 *addr)
+static inline u32 qlge_read_sh_reg(__le32 *addr)
{
u32 reg;
@@ -2233,132 +2244,49 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
-void ql_queue_fw_error(struct ql_adapter *qdev);
-void ql_mpi_work(struct work_struct *work);
-void ql_mpi_reset_work(struct work_struct *work);
-void ql_mpi_core_to_log(struct work_struct *work);
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
-void ql_queue_asic_error(struct ql_adapter *qdev);
-void ql_set_ethtool_ops(struct net_device *ndev);
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
-void ql_mpi_idc_work(struct work_struct *work);
-void ql_mpi_port_cfg_work(struct work_struct *work);
-int ql_mb_get_fw_state(struct ql_adapter *qdev);
-int ql_cam_route_initialize(struct ql_adapter *qdev);
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
-int ql_unpause_mpi_risc(struct ql_adapter *qdev);
-int ql_pause_mpi_risc(struct ql_adapter *qdev);
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
- int word_count);
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
-int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
-int ql_mb_get_led_cfg(struct ql_adapter *qdev);
-void ql_link_on(struct ql_adapter *qdev);
-void ql_link_off(struct ql_adapter *qdev);
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
-int ql_mb_get_port_cfg(struct ql_adapter *qdev);
-int ql_mb_set_port_cfg(struct ql_adapter *qdev);
-int ql_wait_fifo_empty(struct ql_adapter *qdev);
-void ql_get_dump(struct ql_adapter *qdev, void *buff);
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
-void ql_check_lb_frame(struct ql_adapter *qdev, struct sk_buff *skb);
-int ql_own_firmware(struct ql_adapter *qdev);
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-
-/* #define QL_ALL_DUMP */
-/* #define QL_REG_DUMP */
-/* #define QL_DEV_DUMP */
-/* #define QL_CB_DUMP */
-/* #define QL_IB_DUMP */
-/* #define QL_OB_DUMP */
-
-#ifdef QL_REG_DUMP
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-void ql_dump_routing_entries(struct ql_adapter *qdev);
-void ql_dump_regs(struct ql_adapter *qdev);
-#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
-#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
-#else
-#define QL_DUMP_REGS(qdev)
-#define QL_DUMP_ROUTE(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
-#endif
-
-#ifdef QL_STAT_DUMP
-void ql_dump_stat(struct ql_adapter *qdev);
-#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
-#else
-#define QL_DUMP_STAT(qdev)
-#endif
-
-#ifdef QL_DEV_DUMP
-void ql_dump_qdev(struct ql_adapter *qdev);
-#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
-#else
-#define QL_DUMP_QDEV(qdev)
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb);
-void ql_dump_tx_ring(struct tx_ring *tx_ring);
-void ql_dump_ricb(struct ricb *ricb);
-void ql_dump_cqicb(struct cqicb *cqicb);
-void ql_dump_rx_ring(struct rx_ring *rx_ring);
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
-#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
-#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
-#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
-#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
-#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
- ql_dump_hw_cb(qdev, size, bit, q_id)
-#else
-#define QL_DUMP_RICB(ricb)
-#define QL_DUMP_WQICB(wqicb)
-#define QL_DUMP_TX_RING(tx_ring)
-#define QL_DUMP_CQICB(cqicb)
-#define QL_DUMP_RX_RING(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd);
-void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb);
-void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp);
-#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp)
-#else
-#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp)
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp);
-#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp)
-#else
-#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp)
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev);
-#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
-#else
-#define QL_DUMP_ALL(qdev)
-#endif
+int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask);
+void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask);
+int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
+int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value);
+int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
+void qlge_queue_fw_error(struct qlge_adapter *qdev);
+void qlge_mpi_work(struct work_struct *work);
+void qlge_mpi_reset_work(struct work_struct *work);
+int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void qlge_queue_asic_error(struct qlge_adapter *qdev);
+void qlge_set_ethtool_ops(struct net_device *ndev);
+int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data);
+void qlge_mpi_idc_work(struct work_struct *work);
+void qlge_mpi_port_cfg_work(struct work_struct *work);
+int qlge_mb_get_fw_state(struct qlge_adapter *qdev);
+int qlge_cam_route_initialize(struct qlge_adapter *qdev);
+int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
+int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data);
+int qlge_unpause_mpi_risc(struct qlge_adapter *qdev);
+int qlge_pause_mpi_risc(struct qlge_adapter *qdev);
+int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev);
+int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev);
+int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump);
+int qlge_mb_about_fw(struct qlge_adapter *qdev);
+int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol);
+int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol);
+int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config);
+int qlge_mb_get_led_cfg(struct qlge_adapter *qdev);
+void qlge_link_on(struct qlge_adapter *qdev);
+void qlge_link_off(struct qlge_adapter *qdev);
+int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control);
+int qlge_mb_get_port_cfg(struct qlge_adapter *qdev);
+int qlge_mb_set_port_cfg(struct qlge_adapter *qdev);
+int qlge_wait_fifo_empty(struct qlge_adapter *qdev);
+void qlge_get_dump(struct qlge_adapter *qdev, void *buff);
+netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void qlge_check_lb_frame(struct qlge_adapter *qdev, struct sk_buff *skb);
+int qlge_own_firmware(struct qlge_adapter *qdev);
+int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
#endif /* _QLGE_H_ */
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 42fd13990f3a..37e593f0fd82 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -6,8 +6,8 @@
#include "qlge.h"
/* Read a NIC register from the alternate function. */
-static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
+static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev,
+ u32 reg)
{
u32 register_to_read;
u32 reg_val;
@@ -17,7 +17,7 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
| MPI_NIC_READ
| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
| reg;
- status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ status = qlge_read_mpi_reg(qdev, register_to_read, &reg_val);
if (status != 0)
return 0xffffffff;
@@ -25,8 +25,8 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
}
/* Write a NIC register from the alternate function. */
-static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
+static int qlge_write_other_func_reg(struct qlge_adapter *qdev,
+ u32 reg, u32 reg_val)
{
u32 register_to_read;
@@ -35,17 +35,17 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev,
| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
| reg;
- return ql_write_mpi_reg(qdev, register_to_read, reg_val);
+ return qlge_write_mpi_reg(qdev, register_to_read, reg_val);
}
-static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
+static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
{
u32 temp;
int count;
for (count = 10; count; count--) {
- temp = ql_read_other_func_reg(qdev, reg);
+ temp = qlge_read_other_func_reg(qdev, reg);
/* check for errors */
if (temp & err_bit)
@@ -57,80 +57,80 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
return -1;
}
-static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg,
+ u32 *data)
{
int status;
/* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
+ qlge_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
/* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* get the data */
- *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+ *data = qlge_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
exit:
return status;
}
/* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+static int qlge_read_serdes_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
- ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+ qlge_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* get the data */
- *data = ql_read32(qdev, XG_SERDES_DATA);
+ *data = qlge_read32(qdev, XG_SERDES_DATA);
exit:
return status;
}
-static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- bool direct_valid, bool indirect_valid)
+static void qlge_get_both_serdes(struct qlge_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ bool direct_valid, bool indirect_valid)
{
unsigned int status;
status = 1;
if (direct_valid)
- status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ status = qlge_read_serdes_reg(qdev, addr, direct_ptr);
/* Dead fill any failures or invalids. */
if (status)
*direct_ptr = 0xDEADBEEF;
status = 1;
if (indirect_valid)
- status = ql_read_other_func_serdes_reg(
- qdev, addr, indirect_ptr);
+ status = qlge_read_other_func_serdes_reg(qdev, addr,
+ indirect_ptr);
/* Dead fill any failures or invalids. */
if (status)
*indirect_ptr = 0xDEADBEEF;
}
-static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
+static int qlge_get_serdes_regs(struct qlge_adapter *qdev,
+ struct qlge_mpi_coredump *mpi_coredump)
{
int status;
bool xfi_direct_valid = false, xfi_indirect_valid = false;
@@ -140,9 +140,9 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
u32 *indirect_ptr;
/* The XAUI needs to be read out per port */
- status = ql_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START,
- &temp);
+ status = qlge_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START,
+ &temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
@@ -150,7 +150,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
XG_SERDES_ADDR_XAUI_PWR_DOWN)
xaui_indirect_valid = false;
- status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ status = qlge_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
@@ -163,7 +163,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
* XFI register is shared so only need to read one
* functions and then check the bits.
*/
- status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ status = qlge_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
if (status)
temp = 0;
@@ -198,8 +198,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -215,8 +215,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_XFI_AN register block. */
if (qdev->func & 1) {
@@ -228,8 +228,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_TRAIN register block. */
if (qdev->func & 1) {
@@ -243,8 +243,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -260,8 +260,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_TX register block. */
if (qdev->func & 1) {
@@ -275,8 +275,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
mpi_coredump->serdes2_xfi_hss_tx;
}
for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_RX register block. */
if (qdev->func & 1) {
@@ -291,8 +291,8 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PLL register block. */
if (qdev->func & 1) {
@@ -307,33 +307,33 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
mpi_coredump->serdes2_xfi_hss_pll;
}
for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
return 0;
}
-static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+static int qlge_read_other_func_xgmac_reg(struct qlge_adapter *qdev, u32 reg,
+ u32 *data)
{
int status = 0;
/* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
- ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+ qlge_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
- *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+ *data = qlge_read_other_func_reg(qdev, XGMAC_DATA / 4);
exit:
return status;
}
@@ -341,8 +341,8 @@ exit:
/* Read the 400 xgmac control/statistics registers
* skipping unused locations.
*/
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
+static int qlge_get_xgmac_regs(struct qlge_adapter *qdev, u32 *buf,
+ unsigned int other_function)
{
int status = 0;
int i;
@@ -370,9 +370,9 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
(i > 0x000005c8 && i < 0x00000600)) {
if (other_function)
status =
- ql_read_other_func_xgmac_reg(qdev, i, buf);
+ qlge_read_other_func_xgmac_reg(qdev, i, buf);
else
- status = ql_read_xgmac_reg(qdev, i, buf);
+ status = qlge_read_xgmac_reg(qdev, i, buf);
if (status)
*buf = 0xdeadbeef;
@@ -382,46 +382,46 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
return status;
}
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
+static int qlge_get_ets_regs(struct qlge_adapter *qdev, u32 *buf)
{
int i;
for (i = 0; i < 8; i++, buf++) {
- ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, NIC_ETS);
+ qlge_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+ *buf = qlge_read32(qdev, NIC_ETS);
}
for (i = 0; i < 2; i++, buf++) {
- ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, CNA_ETS);
+ qlge_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+ *buf = qlge_read32(qdev, CNA_ETS);
}
return 0;
}
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
+static void qlge_get_intr_states(struct qlge_adapter *qdev, u32 *buf)
{
int i;
for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
- ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
- *buf = ql_read32(qdev, INTR_EN);
+ qlge_write32(qdev, INTR_EN,
+ qdev->intr_context[i].intr_read_mask);
+ *buf = qlge_read32(qdev, INTR_EN);
}
}
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
+static int qlge_get_cam_entries(struct qlge_adapter *qdev, u32 *buf)
{
int i, status;
u32 value[3];
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
for (i = 0; i < 16; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
+ status = qlge_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -432,8 +432,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
*buf++ = value[2]; /* output */
}
for (i = 0; i < 32; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ status = qlge_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC,
+ i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -443,21 +443,21 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
*buf++ = value[1]; /* upper Mcast address */
}
err:
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
+static int qlge_get_routing_entries(struct qlge_adapter *qdev, u32 *buf)
{
int status;
u32 value, i;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
for (i = 0; i < 16; i++) {
- status = ql_get_routing_reg(qdev, i, &value);
+ status = qlge_get_routing_reg(qdev, i, &value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of routing index register\n");
@@ -467,23 +467,23 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
}
}
err:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
+static int qlge_get_mpi_shadow_regs(struct qlge_adapter *qdev, u32 *buf)
{
u32 i;
int status;
for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = ql_write_mpi_reg(qdev,
- RISC_124,
- (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ status = qlge_write_mpi_reg(qdev,
+ RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
if (status)
goto end;
- status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ status = qlge_read_mpi_reg(qdev, RISC_127, buf);
if (status)
goto end;
}
@@ -492,13 +492,13 @@ end:
}
/* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
+static int qlge_get_mpi_regs(struct qlge_adapter *qdev, u32 *buf,
+ u32 offset, u32 count)
{
int i, status = 0;
for (i = 0; i < count; i++, buf++) {
- status = ql_read_mpi_reg(qdev, offset + i, buf);
+ status = qlge_read_mpi_reg(qdev, offset + i, buf);
if (status)
return status;
}
@@ -506,8 +506,8 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
}
/* Read the ASIC probe dump */
-static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
+static unsigned int *qlge_get_probe(struct qlge_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
{
u32 module, mux_sel, probe, lo_val, hi_val;
@@ -519,15 +519,15 @@ static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
| PRB_MX_ADDR_ARE
| mux_sel
| (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
- ql_write32(qdev, PRB_MX_ADDR, probe);
- lo_val = ql_read32(qdev, PRB_MX_DATA);
+ qlge_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = qlge_read32(qdev, PRB_MX_DATA);
if (mux_sel == 0) {
*buf = probe;
buf++;
}
probe |= PRB_MX_ADDR_UP;
- ql_write32(qdev, PRB_MX_ADDR, probe);
- hi_val = ql_read32(qdev, PRB_MX_DATA);
+ qlge_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = qlge_read32(qdev, PRB_MX_DATA);
*buf = lo_val;
buf++;
*buf = hi_val;
@@ -537,23 +537,23 @@ static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
return buf;
}
-static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+static int qlge_get_probe_dump(struct qlge_adapter *qdev, unsigned int *buf)
{
/* First we have to enable the probe mux */
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
+ qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = qlge_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = qlge_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = qlge_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = qlge_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
}
/* Read out the routing index registers */
-static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+static int qlge_get_routing_index_registers(struct qlge_adapter *qdev, u32 *buf)
{
int status;
u32 type, index, index_max;
@@ -561,7 +561,7 @@ static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
u32 result_data;
u32 val;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
@@ -574,11 +574,11 @@ static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
val = RT_IDX_RS
| (type << RT_IDX_TYPE_SHIFT)
| (index << RT_IDX_IDX_SHIFT);
- ql_write32(qdev, RT_IDX, val);
+ qlge_write32(qdev, RT_IDX, val);
result_index = 0;
while ((result_index & RT_IDX_MR) == 0)
- result_index = ql_read32(qdev, RT_IDX);
- result_data = ql_read32(qdev, RT_DATA);
+ result_index = qlge_read32(qdev, RT_IDX);
+ result_data = qlge_read32(qdev, RT_DATA);
*buf = type;
buf++;
*buf = index;
@@ -589,12 +589,12 @@ static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
buf++;
}
}
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Read out the MAC protocol registers */
-static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+static void qlge_get_mac_protocol_registers(struct qlge_adapter *qdev, u32 *buf)
{
u32 result_index, result_data;
u32 type;
@@ -657,13 +657,13 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
| (type << MAC_ADDR_TYPE_SHIFT)
| (index << MAC_ADDR_IDX_SHIFT)
| (offset);
- ql_write32(qdev, MAC_ADDR_IDX, val);
+ qlge_write32(qdev, MAC_ADDR_IDX, val);
result_index = 0;
while ((result_index & MAC_ADDR_MR) == 0) {
- result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
+ result_index = qlge_read32(qdev,
+ MAC_ADDR_IDX);
}
- result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ result_data = qlge_read32(qdev, MAC_ADDR_DATA);
*buf = result_index;
buf++;
*buf = result_data;
@@ -673,7 +673,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
}
}
-static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+static void qlge_get_sem_registers(struct qlge_adapter *qdev, u32 *buf)
{
u32 func_num, reg, reg_val;
int status;
@@ -682,7 +682,7 @@ static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
reg = MPI_NIC_REG_BLOCK
| (func_num << MPI_NIC_FUNCTION_SHIFT)
| (SEM / 4);
- status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ status = qlge_read_mpi_reg(qdev, reg, &reg_val);
*buf = reg_val;
/* if the read failed then dead fill the element. */
if (!status)
@@ -692,9 +692,8 @@ static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
}
/* Create a coredump segment header */
-static void ql_build_coredump_seg_header(
- struct mpi_coredump_segment_header *seg_hdr,
- u32 seg_number, u32 seg_size, u8 *desc)
+static void qlge_build_coredump_seg_header(struct mpi_coredump_segment_header *seg_hdr,
+ u32 seg_number, u32 seg_size, u8 *desc)
{
memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
@@ -710,7 +709,7 @@ static void ql_build_coredump_seg_header(
* space for this function as well as a coredump structure that
* will contain the dump.
*/
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump)
{
int status;
int i;
@@ -724,9 +723,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
* it isn't available. If the firmware died it
* might be holding the sem.
*/
- ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- status = ql_pause_mpi_risc(qdev);
+ status = qlge_pause_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC pause. Status = 0x%.08x\n", status);
@@ -740,155 +739,155 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.image_size =
- sizeof(struct ql_mpi_coredump);
+ sizeof(struct qlge_mpi_coredump);
strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.id_string));
/* Get generic NIC reg dump */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
- ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
/* Get XGMac registers. (Segment 18, Rev C. step 21) */
- ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
- ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
if (qdev->func & 1) {
/* Odd means our function is NIC 2 */
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic2_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
+ qlge_read32(qdev, i * sizeof(u32));
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+ qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
} else {
/* Even means our function is NIC 1 */
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
+ qlge_read32(qdev, i * sizeof(u32));
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic2_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+ qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
}
/* Rev C. Step 20a */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_an),
- "XAUI AN Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
/* Rev C. Step 20b */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_hss_pcs),
- "XAUI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_an),
- "XFI AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_train),
- "XFI TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pcs),
- "XFI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_tx),
- "XFI HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_rx),
- "XFI HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pll),
- "XFI HSS PLL Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_an),
- "XAUI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
- "XAUI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_an),
- "XFI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_train),
- "XFI2 TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
- "XFI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_tx),
- "XFI2 HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_rx),
- "XFI2 HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pll),
- "XFI2 HSS PLL Registers");
-
- status = ql_get_serdes_regs(qdev, mpi_coredump);
+ qlge_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = qlge_get_serdes_regs(qdev, mpi_coredump);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of Serdes Registers. Status = 0x%.08x\n",
@@ -896,185 +895,185 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
}
- ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
- sizeof(mpi_coredump->core_regs_seg_hdr) +
- sizeof(mpi_coredump->mpi_core_regs) +
- sizeof(mpi_coredump->mpi_core_sh_regs),
- "Core Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
/* Get the MPI Core Registers */
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
- MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
if (status)
goto err;
/* Get the 16 MPI shadow registers */
- status = ql_get_mpi_shadow_regs(qdev,
- &mpi_coredump->mpi_core_sh_regs[0]);
+ status = qlge_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
if (status)
goto err;
/* Get the Test Logic Registers */
- ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->test_logic_regs),
- "Test Logic Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
- TEST_REGS_ADDR, TEST_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
if (status)
goto err;
/* Get the RMII Registers */
- ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->rmii_regs),
- "RMII Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
- RMII_REGS_ADDR, RMII_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
if (status)
goto err;
/* Get the FCMAC1 Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac1_regs),
- "FCMAC1 Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
- FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
if (status)
goto err;
/* Get the FCMAC2 Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac2_regs),
- "FCMAC2 Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
- FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
if (status)
goto err;
/* Get the FC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc1_mbx_regs),
- "FC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
- FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the IDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ide_regs),
- "IDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
- IDE_REGS_ADDR, IDE_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
if (status)
goto err;
/* Get the NIC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic1_mbx_regs),
- "NIC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
- NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the SMBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->smbus_regs),
- "SMBus Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
- SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
if (status)
goto err;
/* Get the FC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc2_mbx_regs),
- "FC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
- FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the NIC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic2_mbx_regs),
- "NIC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
- NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the I2C Registers */
- ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->i2c_regs),
- "I2C Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
- I2C_REGS_ADDR, I2C_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
if (status)
goto err;
/* Get the MEMC Registers */
- ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_regs),
- "MEMC Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
- MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
if (status)
goto err;
/* Get the PBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->pbus_regs),
- "PBUS Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
- PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
if (status)
goto err;
/* Get the MDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mde_regs),
- "MDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
- MDE_REGS_ADDR, MDE_REGS_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = qlge_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
if (status)
goto err;
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
+ qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
@@ -1082,79 +1081,79 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Segment 31 */
/* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
if (status)
goto err;
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = qlge_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
/* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
goto err;
- ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->probe_dump),
- "Probe Dump");
- ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->routing_regs),
- "Routing Regs");
- status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ qlge_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ qlge_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = qlge_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
if (status)
goto err;
- ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mac_prot_regs),
- "MAC Prot Regs");
- ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ qlge_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
/* Get the semaphore registers for all 5 functions */
- ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->sem_regs), "Sem Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
- ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+ qlge_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
/* Prevent the mpi restarting while we dump the memory.*/
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+ qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
/* clear the pause */
- status = ql_unpause_mpi_risc(qdev);
+ status = qlge_unpause_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC unpause. Status = 0x%.08x\n", status);
@@ -1162,20 +1161,20 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
/* Reset the RISC so we can dump RAM */
- status = ql_hard_reset_mpi_risc(qdev);
+ status = qlge_hard_reset_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC reset. Status = 0x%.08x\n", status);
goto err;
}
- ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->code_ram),
- "WCS RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of CODE RAM. Status = 0x%.08x\n",
@@ -1184,13 +1183,13 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
/* Insert the segment header */
- ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_ram),
- "MEMC RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ qlge_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of MEMC RAM. Status = 0x%.08x\n",
@@ -1198,13 +1197,13 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
}
err:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
-static void ql_get_core_dump(struct ql_adapter *qdev)
+static void qlge_get_core_dump(struct qlge_adapter *qdev)
{
- if (!ql_own_firmware(qdev)) {
+ if (!qlge_own_firmware(qdev)) {
netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
@@ -1214,11 +1213,11 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
"Force Coredump can only be done from interface that is up\n");
return;
}
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
}
-static void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void qlge_gen_reg_dump(struct qlge_adapter *qdev,
+ struct qlge_reg_dump *mpi_coredump)
{
int i, status;
@@ -1228,71 +1227,71 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.image_size =
- sizeof(struct ql_reg_dump);
+ sizeof(struct qlge_reg_dump);
strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.id_string));
/* segment 16 */
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
+ qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
mpi_coredump->misc_nic_info.function = qdev->func;
/* Segment 16, Rev C. Step 18 */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_regs),
- "NIC Registers");
+ qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_regs),
+ "NIC Registers");
/* Get generic reg dump */
for (i = 0; i < 64; i++)
- mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+ mpi_coredump->nic_regs[i] = qlge_read32(qdev, i * sizeof(u32));
/* Segment 31 */
/* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
if (status)
return;
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = qlge_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
if (status)
return;
/* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
}
-void ql_get_dump(struct ql_adapter *qdev, void *buff)
+void qlge_get_dump(struct qlge_adapter *qdev, void *buff)
{
/*
* If the dump has already been taken and is stored
@@ -1304,711 +1303,12 @@ void ql_get_dump(struct ql_adapter *qdev, void *buff)
*/
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
- if (!ql_core_dump(qdev, buff))
- ql_soft_reset_mpi_risc(qdev);
+ if (!qlge_core_dump(qdev, buff))
+ qlge_soft_reset_mpi_risc(qdev);
else
netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
} else {
- ql_gen_reg_dump(qdev, buff);
- ql_get_core_dump(qdev);
+ qlge_gen_reg_dump(qdev, buff);
+ qlge_get_core_dump(qdev);
}
}
-
-/* Coredump to messages log file using separate worker thread */
-void ql_mpi_core_to_log(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_core_to_log.work);
-
- print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n",
- DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump,
- sizeof(*qdev->mpi_coredump), false);
-}
-
-#ifdef QL_REG_DUMP
-static void ql_dump_intr_states(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
-
- for (i = 0; i < qdev->intr_count; i++) {
- ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
- value = ql_read32(qdev, INTR_EN);
- netdev_err(qdev->ndev, "Interrupt %d is %s\n", i,
- (value & INTR_EN_EN ? "enabled" : "disabled"));
- }
-}
-
-#define DUMP_XGMAC(qdev, reg) \
-do { \
- u32 data; \
- ql_read_xgmac_reg(qdev, reg, &data); \
- netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \
-} while (0)
-
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
-{
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- netdev_err(qdev->ndev, "%s: Couldn't get xgmac sem\n",
- __func__);
- return;
- }
- DUMP_XGMAC(qdev, PAUSE_SRC_LO);
- DUMP_XGMAC(qdev, PAUSE_SRC_HI);
- DUMP_XGMAC(qdev, GLOBAL_CFG);
- DUMP_XGMAC(qdev, TX_CFG);
- DUMP_XGMAC(qdev, RX_CFG);
- DUMP_XGMAC(qdev, FLOW_CTL);
- DUMP_XGMAC(qdev, PAUSE_OPCODE);
- DUMP_XGMAC(qdev, PAUSE_TIMER);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
- DUMP_XGMAC(qdev, MAC_TX_PARAMS);
- DUMP_XGMAC(qdev, MAC_RX_PARAMS);
- DUMP_XGMAC(qdev, MAC_SYS_INT);
- DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
- DUMP_XGMAC(qdev, MAC_MGMT_INT);
- DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
- DUMP_XGMAC(qdev, EXT_ARB_MODE);
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
-}
-
-static void ql_dump_ets_regs(struct ql_adapter *qdev)
-{
-}
-
-static void ql_dump_cam_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value[3];
-
- i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (i)
- return;
- for (i = 0; i < 4; i++) {
- if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
- netdev_err(qdev->ndev,
- "%s: Failed read of mac index register\n",
- __func__);
- break;
- }
- if (value[0])
- netdev_err(qdev->ndev,
- "CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
- i, value[1], value[0], value[2]);
- }
- for (i = 0; i < 32; i++) {
- if (ql_get_mac_addr_reg
- (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
- netdev_err(qdev->ndev,
- "%s: Failed read of mac index register\n",
- __func__);
- break;
- }
- if (value[0])
- netdev_err(qdev->ndev,
- "MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
- i, value[1], value[0]);
- }
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-void ql_dump_routing_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
-
- i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (i)
- return;
- for (i = 0; i < 16; i++) {
- value = 0;
- if (ql_get_routing_reg(qdev, i, &value)) {
- netdev_err(qdev->ndev,
- "%s: Failed read of routing index register\n",
- __func__);
- break;
- }
- if (value)
- netdev_err(qdev->ndev,
- "Routing Mask %d = 0x%.08x\n",
- i, value);
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-#define DUMP_REG(qdev, reg) \
- netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
-
-void ql_dump_regs(struct ql_adapter *qdev)
-{
- netdev_err(qdev->ndev, "reg dump for function #%d\n", qdev->func);
- DUMP_REG(qdev, SYS);
- DUMP_REG(qdev, RST_FO);
- DUMP_REG(qdev, FSC);
- DUMP_REG(qdev, CSR);
- DUMP_REG(qdev, ICB_RID);
- DUMP_REG(qdev, ICB_L);
- DUMP_REG(qdev, ICB_H);
- DUMP_REG(qdev, CFG);
- DUMP_REG(qdev, BIOS_ADDR);
- DUMP_REG(qdev, STS);
- DUMP_REG(qdev, INTR_EN);
- DUMP_REG(qdev, INTR_MASK);
- DUMP_REG(qdev, ISR1);
- DUMP_REG(qdev, ISR2);
- DUMP_REG(qdev, ISR3);
- DUMP_REG(qdev, ISR4);
- DUMP_REG(qdev, REV_ID);
- DUMP_REG(qdev, FRC_ECC_ERR);
- DUMP_REG(qdev, ERR_STS);
- DUMP_REG(qdev, RAM_DBG_ADDR);
- DUMP_REG(qdev, RAM_DBG_DATA);
- DUMP_REG(qdev, ECC_ERR_CNT);
- DUMP_REG(qdev, SEM);
- DUMP_REG(qdev, GPIO_1);
- DUMP_REG(qdev, GPIO_2);
- DUMP_REG(qdev, GPIO_3);
- DUMP_REG(qdev, XGMAC_ADDR);
- DUMP_REG(qdev, XGMAC_DATA);
- DUMP_REG(qdev, NIC_ETS);
- DUMP_REG(qdev, CNA_ETS);
- DUMP_REG(qdev, FLASH_ADDR);
- DUMP_REG(qdev, FLASH_DATA);
- DUMP_REG(qdev, CQ_STOP);
- DUMP_REG(qdev, PAGE_TBL_RID);
- DUMP_REG(qdev, WQ_PAGE_TBL_LO);
- DUMP_REG(qdev, WQ_PAGE_TBL_HI);
- DUMP_REG(qdev, CQ_PAGE_TBL_LO);
- DUMP_REG(qdev, CQ_PAGE_TBL_HI);
- DUMP_REG(qdev, COS_DFLT_CQ1);
- DUMP_REG(qdev, COS_DFLT_CQ2);
- DUMP_REG(qdev, SPLT_HDR);
- DUMP_REG(qdev, FC_PAUSE_THRES);
- DUMP_REG(qdev, NIC_PAUSE_THRES);
- DUMP_REG(qdev, FC_ETHERTYPE);
- DUMP_REG(qdev, FC_RCV_CFG);
- DUMP_REG(qdev, NIC_RCV_CFG);
- DUMP_REG(qdev, FC_COS_TAGS);
- DUMP_REG(qdev, NIC_COS_TAGS);
- DUMP_REG(qdev, MGMT_RCV_CFG);
- DUMP_REG(qdev, XG_SERDES_ADDR);
- DUMP_REG(qdev, XG_SERDES_DATA);
- DUMP_REG(qdev, PRB_MX_ADDR);
- DUMP_REG(qdev, PRB_MX_DATA);
- ql_dump_intr_states(qdev);
- ql_dump_xgmac_control_regs(qdev);
- ql_dump_ets_regs(qdev);
- ql_dump_cam_entries(qdev);
- ql_dump_routing_entries(qdev);
-}
-#endif
-
-#ifdef QL_STAT_DUMP
-
-#define DUMP_STAT(qdev, stat) \
- netdev_err(qdev->ndev, "%s = %ld\n", #stat, \
- (unsigned long)(qdev)->nic_stats.stat)
-
-void ql_dump_stat(struct ql_adapter *qdev)
-{
- netdev_err(qdev->ndev, "%s: Enter\n", __func__);
- DUMP_STAT(qdev, tx_pkts);
- DUMP_STAT(qdev, tx_bytes);
- DUMP_STAT(qdev, tx_mcast_pkts);
- DUMP_STAT(qdev, tx_bcast_pkts);
- DUMP_STAT(qdev, tx_ucast_pkts);
- DUMP_STAT(qdev, tx_ctl_pkts);
- DUMP_STAT(qdev, tx_pause_pkts);
- DUMP_STAT(qdev, tx_64_pkt);
- DUMP_STAT(qdev, tx_65_to_127_pkt);
- DUMP_STAT(qdev, tx_128_to_255_pkt);
- DUMP_STAT(qdev, tx_256_511_pkt);
- DUMP_STAT(qdev, tx_512_to_1023_pkt);
- DUMP_STAT(qdev, tx_1024_to_1518_pkt);
- DUMP_STAT(qdev, tx_1519_to_max_pkt);
- DUMP_STAT(qdev, tx_undersize_pkt);
- DUMP_STAT(qdev, tx_oversize_pkt);
- DUMP_STAT(qdev, rx_bytes);
- DUMP_STAT(qdev, rx_bytes_ok);
- DUMP_STAT(qdev, rx_pkts);
- DUMP_STAT(qdev, rx_pkts_ok);
- DUMP_STAT(qdev, rx_bcast_pkts);
- DUMP_STAT(qdev, rx_mcast_pkts);
- DUMP_STAT(qdev, rx_ucast_pkts);
- DUMP_STAT(qdev, rx_undersize_pkts);
- DUMP_STAT(qdev, rx_oversize_pkts);
- DUMP_STAT(qdev, rx_jabber_pkts);
- DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
- DUMP_STAT(qdev, rx_drop_events);
- DUMP_STAT(qdev, rx_fcerr_pkts);
- DUMP_STAT(qdev, rx_align_err);
- DUMP_STAT(qdev, rx_symbol_err);
- DUMP_STAT(qdev, rx_mac_err);
- DUMP_STAT(qdev, rx_ctl_pkts);
- DUMP_STAT(qdev, rx_pause_pkts);
- DUMP_STAT(qdev, rx_64_pkts);
- DUMP_STAT(qdev, rx_65_to_127_pkts);
- DUMP_STAT(qdev, rx_128_255_pkts);
- DUMP_STAT(qdev, rx_256_511_pkts);
- DUMP_STAT(qdev, rx_512_to_1023_pkts);
- DUMP_STAT(qdev, rx_1024_to_1518_pkts);
- DUMP_STAT(qdev, rx_1519_to_max_pkts);
- DUMP_STAT(qdev, rx_len_err_pkts);
-};
-#endif
-
-#ifdef QL_DEV_DUMP
-
-#define DUMP_QDEV_FIELD(qdev, type, field) \
- netdev_err(qdev->ndev, "qdev->%-24s = " type "\n", #field, (qdev)->field)
-#define DUMP_QDEV_DMA_FIELD(qdev, field) \
- netdev_err(qdev->ndev, "qdev->%-24s = %llx\n", #field, \
- (unsigned long long)qdev->field)
-#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
- netdev_err(qdev->ndev, "%s[%d].%s = " type "\n", \
- #array, index, #field, (qdev)->array[index].field)
-void ql_dump_qdev(struct ql_adapter *qdev)
-{
- int i;
-
- DUMP_QDEV_FIELD(qdev, "%lx", flags);
- DUMP_QDEV_FIELD(qdev, "%p", pdev);
- DUMP_QDEV_FIELD(qdev, "%p", ndev);
- DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
- DUMP_QDEV_FIELD(qdev, "%p", reg_base);
- DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
- DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
- DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- if (qdev->msi_x_entry)
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
- }
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
- }
- DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
- DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
- DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
- DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
-}
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb)
-{
- struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
- struct ql_adapter *qdev = tx_ring->qdev;
-
- netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
- netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
- netdev_err(qdev->ndev, "wqicb->flags = %x\n",
- le16_to_cpu(wqicb->flags));
- netdev_err(qdev->ndev, "wqicb->cq_id_rss = %d\n",
- le16_to_cpu(wqicb->cq_id_rss));
- netdev_err(qdev->ndev, "wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
- netdev_err(qdev->ndev, "wqicb->wq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(wqicb->addr));
- netdev_err(qdev->ndev, "wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
-}
-
-void ql_dump_tx_ring(struct tx_ring *tx_ring)
-{
- struct ql_adapter *qdev = tx_ring->qdev;
-
- netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
- tx_ring->wq_id);
- netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
- netdev_err(qdev->ndev, "tx_ring->base_dma = 0x%llx\n",
- (unsigned long long)tx_ring->wq_base_dma);
- netdev_err(qdev->ndev, "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
- tx_ring->cnsmr_idx_sh_reg,
- tx_ring->cnsmr_idx_sh_reg
- ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
- netdev_err(qdev->ndev, "tx_ring->size = %d\n", tx_ring->wq_size);
- netdev_err(qdev->ndev, "tx_ring->len = %d\n", tx_ring->wq_len);
- netdev_err(qdev->ndev, "tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
- netdev_err(qdev->ndev, "tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
- netdev_err(qdev->ndev, "tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
- netdev_err(qdev->ndev, "tx_ring->cq_id = %d\n", tx_ring->cq_id);
- netdev_err(qdev->ndev, "tx_ring->wq_id = %d\n", tx_ring->wq_id);
- netdev_err(qdev->ndev, "tx_ring->q = %p\n", tx_ring->q);
- netdev_err(qdev->ndev, "tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
-}
-
-void ql_dump_ricb(struct ricb *ricb)
-{
- int i;
- struct ql_adapter *qdev =
- container_of(ricb, struct ql_adapter, ricb);
-
- netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
- netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
-
- netdev_err(qdev->ndev, "ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
- netdev_err(qdev->ndev, "ricb->flags = %s%s%s%s%s%s%s%s%s\n",
- ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
- ricb->flags & RSS_L6K ? "RSS_L6K " : "",
- ricb->flags & RSS_LI ? "RSS_LI " : "",
- ricb->flags & RSS_LB ? "RSS_LB " : "",
- ricb->flags & RSS_LM ? "RSS_LM " : "",
- ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
- ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
- ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
- ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
- netdev_err(qdev->ndev, "ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
- for (i = 0; i < 16; i++)
- netdev_err(qdev->ndev, "ricb->hash_cq_id[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->hash_cq_id[i]));
- for (i = 0; i < 10; i++)
- netdev_err(qdev->ndev, "ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv6_hash_key[i]));
- for (i = 0; i < 4; i++)
- netdev_err(qdev->ndev, "ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv4_hash_key[i]));
-}
-
-void ql_dump_cqicb(struct cqicb *cqicb)
-{
- struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
- struct ql_adapter *qdev = rx_ring->qdev;
-
- netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
-
- netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
- netdev_err(qdev->ndev, "cqicb->flags = %x\n", cqicb->flags);
- netdev_err(qdev->ndev, "cqicb->len = %d\n", le16_to_cpu(cqicb->len));
- netdev_err(qdev->ndev, "cqicb->addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->addr));
- netdev_err(qdev->ndev, "cqicb->prod_idx_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
- netdev_err(qdev->ndev, "cqicb->pkt_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->pkt_delay));
- netdev_err(qdev->ndev, "cqicb->irq_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->irq_delay));
- netdev_err(qdev->ndev, "cqicb->lbq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
- netdev_err(qdev->ndev, "cqicb->lbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_buf_size));
- netdev_err(qdev->ndev, "cqicb->lbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_len));
- netdev_err(qdev->ndev, "cqicb->sbq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
- netdev_err(qdev->ndev, "cqicb->sbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_buf_size));
- netdev_err(qdev->ndev, "cqicb->sbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_len));
-}
-
-static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
-
- if (rx_ring->cq_id < qdev->rss_ring_count)
- return "RX COMPLETION";
- else
- return "TX COMPLETION";
-};
-
-void ql_dump_rx_ring(struct rx_ring *rx_ring)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
-
- netdev_err(qdev->ndev,
- "===================== Dumping rx_ring %d ===============\n",
- rx_ring->cq_id);
- netdev_err(qdev->ndev,
- "Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
- qlge_rx_ring_type_name(rx_ring));
- netdev_err(qdev->ndev, "rx_ring->cqicb = %p\n", &rx_ring->cqicb);
- netdev_err(qdev->ndev, "rx_ring->cq_base = %p\n", rx_ring->cq_base);
- netdev_err(qdev->ndev, "rx_ring->cq_base_dma = %llx\n",
- (unsigned long long)rx_ring->cq_base_dma);
- netdev_err(qdev->ndev, "rx_ring->cq_size = %d\n", rx_ring->cq_size);
- netdev_err(qdev->ndev, "rx_ring->cq_len = %d\n", rx_ring->cq_len);
- netdev_err(qdev->ndev,
- "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
- rx_ring->prod_idx_sh_reg,
- rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
- netdev_err(qdev->ndev, "rx_ring->prod_idx_sh_reg_dma = %llx\n",
- (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
- netdev_err(qdev->ndev, "rx_ring->cnsmr_idx_db_reg = %p\n",
- rx_ring->cnsmr_idx_db_reg);
- netdev_err(qdev->ndev, "rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
- netdev_err(qdev->ndev, "rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
- netdev_err(qdev->ndev, "rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
-
- netdev_err(qdev->ndev, "rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
- netdev_err(qdev->ndev, "rx_ring->lbq.base_dma = %llx\n",
- (unsigned long long)rx_ring->lbq.base_dma);
- netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect = %p\n",
- rx_ring->lbq.base_indirect);
- netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect_dma = %llx\n",
- (unsigned long long)rx_ring->lbq.base_indirect_dma);
- netdev_err(qdev->ndev, "rx_ring->lbq = %p\n", rx_ring->lbq.queue);
- netdev_err(qdev->ndev, "rx_ring->lbq.prod_idx_db_reg = %p\n",
- rx_ring->lbq.prod_idx_db_reg);
- netdev_err(qdev->ndev, "rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
- netdev_err(qdev->ndev, "rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
-
- netdev_err(qdev->ndev, "rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
- netdev_err(qdev->ndev, "rx_ring->sbq.base_dma = %llx\n",
- (unsigned long long)rx_ring->sbq.base_dma);
- netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect = %p\n",
- rx_ring->sbq.base_indirect);
- netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect_dma = %llx\n",
- (unsigned long long)rx_ring->sbq.base_indirect_dma);
- netdev_err(qdev->ndev, "rx_ring->sbq = %p\n", rx_ring->sbq.queue);
- netdev_err(qdev->ndev, "rx_ring->sbq.prod_idx_db_reg addr = %p\n",
- rx_ring->sbq.prod_idx_db_reg);
- netdev_err(qdev->ndev, "rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
- netdev_err(qdev->ndev, "rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
- netdev_err(qdev->ndev, "rx_ring->cq_id = %d\n", rx_ring->cq_id);
- netdev_err(qdev->ndev, "rx_ring->irq = %d\n", rx_ring->irq);
- netdev_err(qdev->ndev, "rx_ring->cpu = %d\n", rx_ring->cpu);
- netdev_err(qdev->ndev, "rx_ring->qdev = %p\n", rx_ring->qdev);
-}
-
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
-{
- void *ptr;
-
- netdev_err(qdev->ndev, "%s: Enter\n", __func__);
-
- ptr = kmalloc(size, GFP_ATOMIC);
- if (!ptr)
- return;
-
- if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
- netdev_err(qdev->ndev, "%s: Failed to upload control block!\n", __func__);
- goto fail_it;
- }
- switch (bit) {
- case CFG_DRQ:
- ql_dump_wqicb((struct wqicb *)ptr);
- break;
- case CFG_DCQ:
- ql_dump_cqicb((struct cqicb *)ptr);
- break;
- case CFG_DR:
- ql_dump_ricb((struct ricb *)ptr);
- break;
- default:
- netdev_err(qdev->ndev, "%s: Invalid bit value = %x\n", __func__, bit);
- break;
- }
-fail_it:
- kfree(ptr);
-}
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
-{
- netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
- le64_to_cpu((u64)tbd->addr));
- netdev_err(qdev->ndev, "tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
- le64_to_cpu((u64)tbd->addr));
- netdev_err(qdev->ndev, "tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
- le64_to_cpu((u64)tbd->addr));
- netdev_err(qdev->ndev, "tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
-}
-
-void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
-{
- struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
- (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
- struct tx_buf_desc *tbd;
- u16 frame_len;
-
- netdev_err(qdev->ndev, "%s\n", __func__);
- netdev_err(qdev->ndev, "opcode = %s\n",
- (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
- netdev_err(qdev->ndev, "flags1 = %s %s %s %s %s\n",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
- netdev_err(qdev->ndev, "flags2 = %s %s %s\n",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- netdev_err(qdev->ndev, "flags3 = %s %s %s\n",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
- netdev_err(qdev->ndev, "tid = %x\n", ob_mac_iocb->tid);
- netdev_err(qdev->ndev, "txq_idx = %d\n", ob_mac_iocb->txq_idx);
- netdev_err(qdev->ndev, "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
- if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
- netdev_err(qdev->ndev, "frame_len = %d\n",
- le32_to_cpu(ob_mac_tso_iocb->frame_len));
- netdev_err(qdev->ndev, "mss = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->mss));
- netdev_err(qdev->ndev, "prot_hdr_len = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
- netdev_err(qdev->ndev, "hdr_offset = 0x%.04x\n",
- le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
- frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
- } else {
- netdev_err(qdev->ndev, "frame_len = %d\n",
- le16_to_cpu(ob_mac_iocb->frame_len));
- frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
- }
- tbd = &ob_mac_iocb->tbd[0];
- ql_dump_tx_desc(qdev, tbd);
-}
-
-void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
-{
- netdev_err(qdev->ndev, "%s\n", __func__);
- netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode);
- netdev_err(qdev->ndev, "flags = %s %s %s %s %s %s %s\n",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ?
- "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
- ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
- netdev_err(qdev->ndev, "tid = %x\n", ob_mac_rsp->tid);
-}
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- netdev_err(qdev->ndev, "%s\n", __func__);
- netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode);
- netdev_err(qdev->ndev, "flags1 = %s%s%s%s%s%s\n",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
-
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
- netdev_err(qdev->ndev, "%s%s%s Multicast\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
-
- netdev_err(qdev->ndev, "flags2 = %s%s%s%s%s\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
-
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
- netdev_err(qdev->ndev, "%s%s%s%s%s error\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
-
- netdev_err(qdev->ndev, "flags3 = %s%s\n",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- netdev_err(qdev->ndev, "RSS flags = %s%s%s%s\n",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
-
- netdev_err(qdev->ndev, "data_len = %d\n",
- le32_to_cpu(ib_mac_rsp->data_len));
- netdev_err(qdev->ndev, "data_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- netdev_err(qdev->ndev, "rss = %x\n",
- le32_to_cpu(ib_mac_rsp->rss));
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
- netdev_err(qdev->ndev, "vlan_id = %x\n",
- le16_to_cpu(ib_mac_rsp->vlan_id));
-
- netdev_err(qdev->ndev, "flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- netdev_err(qdev->ndev, "hdr length = %d\n",
- le32_to_cpu(ib_mac_rsp->hdr_len));
- netdev_err(qdev->ndev, "hdr addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
- }
-}
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev)
-{
- int i;
-
- QL_DUMP_REGS(qdev);
- QL_DUMP_QDEV(qdev);
- for (i = 0; i < qdev->tx_ring_count; i++) {
- QL_DUMP_TX_RING(&qdev->tx_ring[i]);
- QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
- }
- for (i = 0; i < qdev->rx_ring_count; i++) {
- QL_DUMP_RX_RING(&qdev->rx_ring[i]);
- QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
- }
-}
-#endif
diff --git a/drivers/staging/qlge/qlge_devlink.c b/drivers/staging/qlge/qlge_devlink.c
new file mode 100644
index 000000000000..86834d96cebf
--- /dev/null
+++ b/drivers/staging/qlge/qlge_devlink.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "qlge.h"
+#include "qlge_devlink.h"
+
+static int qlge_fill_seg_(struct devlink_fmsg *fmsg,
+ struct mpi_coredump_segment_header *seg_header,
+ u32 *reg_data)
+{
+ int regs_num = (seg_header->seg_size
+ - sizeof(struct mpi_coredump_segment_header)) / sizeof(u32);
+ int err;
+ int i;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, seg_header->description);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "segment", seg_header->seg_num);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "values");
+ if (err)
+ return err;
+ for (i = 0; i < regs_num; i++) {
+ err = devlink_fmsg_u32_put(fmsg, *reg_data);
+ if (err)
+ return err;
+ reg_data++;
+ }
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ return err;
+}
+
+#define FILL_SEG(seg_hdr, seg_regs) \
+ do { \
+ err = qlge_fill_seg_(fmsg, &dump->seg_hdr, dump->seg_regs); \
+ if (err) { \
+ kvfree(dump); \
+ return err; \
+ } \
+ } while (0)
+
+static int qlge_reporter_coredump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ struct qlge_adapter *qdev = devlink_health_reporter_priv(reporter);
+ struct qlge_mpi_coredump *dump;
+ wait_queue_head_t wait;
+
+ if (!netif_running(qdev->ndev))
+ return 0;
+
+ if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (qlge_own_firmware(qdev)) {
+ qlge_queue_fw_error(qdev);
+ init_waitqueue_head(&wait);
+ wait_event_timeout(wait, 0, 5 * HZ);
+ } else {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump failed because this NIC function doesn't own the firmware\n");
+ return -EPERM;
+ }
+ }
+
+ dump = kvmalloc(sizeof(*dump), GFP_KERNEL);
+ if (!dump)
+ return -ENOMEM;
+
+ err = qlge_core_dump(qdev, dump);
+ if (err) {
+ kvfree(dump);
+ return err;
+ }
+
+ qlge_soft_reset_mpi_risc(qdev);
+
+ FILL_SEG(core_regs_seg_hdr, mpi_core_regs);
+ FILL_SEG(test_logic_regs_seg_hdr, test_logic_regs);
+ FILL_SEG(rmii_regs_seg_hdr, rmii_regs);
+ FILL_SEG(fcmac1_regs_seg_hdr, fcmac1_regs);
+ FILL_SEG(fcmac2_regs_seg_hdr, fcmac2_regs);
+ FILL_SEG(fc1_mbx_regs_seg_hdr, fc1_mbx_regs);
+ FILL_SEG(ide_regs_seg_hdr, ide_regs);
+ FILL_SEG(nic1_mbx_regs_seg_hdr, nic1_mbx_regs);
+ FILL_SEG(smbus_regs_seg_hdr, smbus_regs);
+ FILL_SEG(fc2_mbx_regs_seg_hdr, fc2_mbx_regs);
+ FILL_SEG(nic2_mbx_regs_seg_hdr, nic2_mbx_regs);
+ FILL_SEG(i2c_regs_seg_hdr, i2c_regs);
+ FILL_SEG(memc_regs_seg_hdr, memc_regs);
+ FILL_SEG(pbus_regs_seg_hdr, pbus_regs);
+ FILL_SEG(mde_regs_seg_hdr, mde_regs);
+ FILL_SEG(nic_regs_seg_hdr, nic_regs);
+ FILL_SEG(nic2_regs_seg_hdr, nic2_regs);
+ FILL_SEG(xgmac1_seg_hdr, xgmac1);
+ FILL_SEG(xgmac2_seg_hdr, xgmac2);
+ FILL_SEG(code_ram_seg_hdr, code_ram);
+ FILL_SEG(memc_ram_seg_hdr, memc_ram);
+ FILL_SEG(xaui_an_hdr, serdes_xaui_an);
+ FILL_SEG(xaui_hss_pcs_hdr, serdes_xaui_hss_pcs);
+ FILL_SEG(xfi_an_hdr, serdes_xfi_an);
+ FILL_SEG(xfi_train_hdr, serdes_xfi_train);
+ FILL_SEG(xfi_hss_pcs_hdr, serdes_xfi_hss_pcs);
+ FILL_SEG(xfi_hss_tx_hdr, serdes_xfi_hss_tx);
+ FILL_SEG(xfi_hss_rx_hdr, serdes_xfi_hss_rx);
+ FILL_SEG(xfi_hss_pll_hdr, serdes_xfi_hss_pll);
+
+ err = qlge_fill_seg_(fmsg, &dump->misc_nic_seg_hdr,
+ (u32 *)&dump->misc_nic_info);
+ if (err) {
+ kvfree(dump);
+ return err;
+ }
+
+ FILL_SEG(intr_states_seg_hdr, intr_states);
+ FILL_SEG(cam_entries_seg_hdr, cam_entries);
+ FILL_SEG(nic_routing_words_seg_hdr, nic_routing_words);
+ FILL_SEG(ets_seg_hdr, ets);
+ FILL_SEG(probe_dump_seg_hdr, probe_dump);
+ FILL_SEG(routing_reg_seg_hdr, routing_regs);
+ FILL_SEG(mac_prot_reg_seg_hdr, mac_prot_regs);
+ FILL_SEG(xaui2_an_hdr, serdes2_xaui_an);
+ FILL_SEG(xaui2_hss_pcs_hdr, serdes2_xaui_hss_pcs);
+ FILL_SEG(xfi2_an_hdr, serdes2_xfi_an);
+ FILL_SEG(xfi2_train_hdr, serdes2_xfi_train);
+ FILL_SEG(xfi2_hss_pcs_hdr, serdes2_xfi_hss_pcs);
+ FILL_SEG(xfi2_hss_tx_hdr, serdes2_xfi_hss_tx);
+ FILL_SEG(xfi2_hss_rx_hdr, serdes2_xfi_hss_rx);
+ FILL_SEG(xfi2_hss_pll_hdr, serdes2_xfi_hss_pll);
+ FILL_SEG(sem_regs_seg_hdr, sem_regs);
+
+ kvfree(dump);
+ return err;
+}
+
+static const struct devlink_health_reporter_ops qlge_reporter_ops = {
+ .name = "coredump",
+ .dump = qlge_reporter_coredump,
+};
+
+void qlge_health_create_reporters(struct qlge_adapter *priv)
+{
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(priv);
+ priv->reporter =
+ devlink_health_reporter_create(devlink, &qlge_reporter_ops,
+ 0, priv);
+ if (IS_ERR(priv->reporter))
+ netdev_warn(priv->ndev,
+ "Failed to create reporter, err = %ld\n",
+ PTR_ERR(priv->reporter));
+}
diff --git a/drivers/staging/qlge/qlge_devlink.h b/drivers/staging/qlge/qlge_devlink.h
new file mode 100644
index 000000000000..19078e1ac694
--- /dev/null
+++ b/drivers/staging/qlge/qlge_devlink.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef QLGE_DEVLINK_H
+#define QLGE_DEVLINK_H
+
+#include <net/devlink.h>
+
+void qlge_health_create_reporters(struct qlge_adapter *priv);
+
+#endif /* QLGE_DEVLINK_H */
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index a28f0254cf60..b70570b7b467 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -34,16 +34,16 @@
#include "qlge.h"
-struct ql_stats {
+struct qlge_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
-#define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m)
-#define QL_OFF(m) offsetof(struct ql_adapter, m)
+#define QL_SIZEOF(m) sizeof_field(struct qlge_adapter, m)
+#define QL_OFF(m) offsetof(struct qlge_adapter, m)
-static const struct ql_stats ql_gstrings_stats[] = {
+static const struct qlge_stats qlge_gstrings_stats[] = {
{"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
{"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
{"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
@@ -175,15 +175,15 @@ static const struct ql_stats ql_gstrings_stats[] = {
QL_OFF(nic_stats.rx_nic_fifo_drop)},
};
-static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+static const char qlge_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
-#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
-#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_TEST_LEN (sizeof(qlge_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(qlge_gstrings_stats)
#define QLGE_RCV_MAC_ERR_STATS 7
-static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+static int qlge_update_ring_coalescing(struct qlge_adapter *qdev)
{
int i, status = 0;
struct rx_ring *rx_ring;
@@ -203,10 +203,10 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay =
- cpu_to_le16(qdev->tx_max_coalesced_frames);
+ cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -224,10 +224,10 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay =
- cpu_to_le16(qdev->rx_max_coalesced_frames);
+ cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -239,14 +239,14 @@ exit:
return status;
}
-static void ql_update_stats(struct ql_adapter *qdev)
+static void qlge_update_stats(struct qlge_adapter *qdev)
{
u32 i;
u64 data;
u64 *iter = &qdev->nic_stats.tx_pkts;
spin_lock(&qdev->stats_lock);
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+ if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get xgmac sem.\n");
goto quit;
@@ -255,7 +255,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get TX statistics.
*/
for (i = 0x200; i < 0x280; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@@ -270,7 +270,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX statistics.
*/
for (i = 0x300; i < 0x3d0; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@@ -288,7 +288,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get Per-priority TX pause frame counter statistics.
*/
for (i = 0x500; i < 0x540; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@@ -303,7 +303,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get Per-priority RX pause frame counter statistics.
*/
for (i = 0x568; i < 0x5a8; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@@ -317,7 +317,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
/*
* Get RX NIC FIFO DROP statistics.
*/
- if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+ if (qlge_read_xgmac_reg64(qdev, 0x5b8, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n", i);
goto end;
@@ -325,32 +325,30 @@ static void ql_update_stats(struct ql_adapter *qdev)
*iter = data;
}
end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
+ qlge_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
spin_unlock(&qdev->stats_lock);
-
- QL_DUMP_STAT(qdev);
}
-static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+static void qlge_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
int index;
switch (stringset) {
case ETH_SS_TEST:
- memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ memcpy(buf, *qlge_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (index = 0; index < QLGE_STATS_LEN; index++) {
memcpy(buf + index * ETH_GSTRING_LEN,
- ql_gstrings_stats[index].stat_string,
+ qlge_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
-static int ql_get_sset_count(struct net_device *dev, int sset)
+static int qlge_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -363,34 +361,34 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
}
static void
-ql_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+qlge_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int index, length;
length = QLGE_STATS_LEN;
- ql_update_stats(qdev);
+ qlge_update_stats(qdev);
for (index = 0; index < length; index++) {
char *p = (char *)qdev +
- ql_gstrings_stats[index].stat_offset;
- *data++ = (ql_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ qlge_gstrings_stats[index].stat_offset;
+ *data++ = (qlge_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
}
}
-static int ql_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
+static int qlge_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
u32 supported, advertising;
supported = SUPPORTED_10000baseT_Full;
advertising = ADVERTISED_10000baseT_Full;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
- STS_LINK_TYPE_10GBASET) {
+ STS_LINK_TYPE_10GBASET) {
supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->base.port = PORT_TP;
@@ -412,26 +410,26 @@ static int ql_get_link_ksettings(struct net_device *ndev,
return 0;
}
-static void ql_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *drvinfo)
+static void qlge_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, qlge_driver_version,
+ strscpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, qlge_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
}
-static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+static void qlge_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
@@ -442,9 +440,9 @@ static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
}
-static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+static int qlge_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
@@ -462,25 +460,25 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
-static int ql_set_phys_id(struct net_device *ndev,
- enum ethtool_phys_id_state state)
+static int qlge_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
/* Save the current LED settings */
- if (ql_mb_get_led_cfg(qdev))
+ if (qlge_mb_get_led_cfg(qdev))
return -EIO;
/* Start blinking */
- ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+ qlge_mb_set_led_cfg(qdev, QL_LED_BLINK);
return 0;
case ETHTOOL_ID_INACTIVE:
/* Restore LED settings */
- if (ql_mb_set_led_cfg(qdev, qdev->led_config))
+ if (qlge_mb_set_led_cfg(qdev, qdev->led_config))
return -EIO;
return 0;
@@ -489,7 +487,7 @@ static int ql_set_phys_id(struct net_device *ndev,
}
}
-static int ql_start_loopback(struct ql_adapter *qdev)
+static int qlge_start_loopback(struct qlge_adapter *qdev)
{
if (netif_carrier_ok(qdev->ndev)) {
set_bit(QL_LB_LINK_UP, &qdev->flags);
@@ -498,21 +496,21 @@ static int ql_start_loopback(struct ql_adapter *qdev)
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
qdev->link_config |= CFG_LOOPBACK_PCS;
- return ql_mb_set_port_cfg(qdev);
+ return qlge_mb_set_port_cfg(qdev);
}
-static void ql_stop_loopback(struct ql_adapter *qdev)
+static void qlge_stop_loopback(struct qlge_adapter *qdev)
{
qdev->link_config &= ~CFG_LOOPBACK_PCS;
- ql_mb_set_port_cfg(qdev);
+ qlge_mb_set_port_cfg(qdev);
if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
netif_carrier_on(qdev->ndev);
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
}
-static void ql_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
+static void qlge_create_lb_frame(struct sk_buff *skb,
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
@@ -521,8 +519,8 @@ static void ql_create_lb_frame(struct sk_buff *skb,
skb->data[frame_size / 2 + 12] = (unsigned char)0xAF;
}
-void ql_check_lb_frame(struct ql_adapter *qdev,
- struct sk_buff *skb)
+void qlge_check_lb_frame(struct qlge_adapter *qdev,
+ struct sk_buff *skb)
{
unsigned int frame_size = skb->len;
@@ -534,7 +532,7 @@ void ql_check_lb_frame(struct ql_adapter *qdev,
}
}
-static int ql_run_loopback_test(struct ql_adapter *qdev)
+static int qlge_run_loopback_test(struct qlge_adapter *qdev)
{
int i;
netdev_tx_t rc;
@@ -548,33 +546,33 @@ static int ql_run_loopback_test(struct ql_adapter *qdev)
skb->queue_mapping = 0;
skb_put(skb, size);
- ql_create_lb_frame(skb, size);
- rc = ql_lb_send(skb, qdev->ndev);
+ qlge_create_lb_frame(skb, size);
+ rc = qlge_lb_send(skb, qdev->ndev);
if (rc != NETDEV_TX_OK)
return -EPIPE;
atomic_inc(&qdev->lb_count);
}
/* Give queue time to settle before testing results. */
msleep(2);
- ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+ qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
-static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+static int qlge_loopback_test(struct qlge_adapter *qdev, u64 *data)
{
- *data = ql_start_loopback(qdev);
+ *data = qlge_start_loopback(qdev);
if (*data)
goto out;
- *data = ql_run_loopback_test(qdev);
+ *data = qlge_run_loopback_test(qdev);
out:
- ql_stop_loopback(qdev);
+ qlge_stop_loopback(qdev);
return *data;
}
-static void ql_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
+static void qlge_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
@@ -582,7 +580,7 @@ static void ql_self_test(struct net_device *ndev,
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
- if (ql_loopback_test(qdev, &data[0]))
+ if (qlge_loopback_test(qdev, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
} else {
@@ -601,32 +599,31 @@ static void ql_self_test(struct net_device *ndev,
}
}
-static int ql_get_regs_len(struct net_device *ndev)
+static int qlge_get_regs_len(struct net_device *ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- return sizeof(struct ql_mpi_coredump);
+ return sizeof(struct qlge_mpi_coredump);
else
- return sizeof(struct ql_reg_dump);
+ return sizeof(struct qlge_reg_dump);
}
-static void ql_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
+static void qlge_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- ql_get_dump(qdev, p);
- qdev->core_is_dumped = 0;
+ qlge_get_dump(qdev, p);
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- regs->len = sizeof(struct ql_mpi_coredump);
+ regs->len = sizeof(struct qlge_mpi_coredump);
else
- regs->len = sizeof(struct ql_reg_dump);
+ regs->len = sizeof(struct qlge_reg_dump);
}
-static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+static int qlge_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
{
- struct ql_adapter *qdev = netdev_priv(dev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
@@ -647,14 +644,14 @@ static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
-static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+static int qlge_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
/* Validate user parameters. */
if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
return -EINVAL;
- /* Don't wait more than 10 usec. */
+ /* Don't wait more than 10 usec. */
if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
return -EINVAL;
if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
@@ -674,25 +671,25 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
- return ql_update_ring_coalescing(qdev);
+ return qlge_update_ring_coalescing(qdev);
}
-static void ql_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static void qlge_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
{
- struct ql_adapter *qdev = netdev_priv(netdev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- ql_mb_get_port_cfg(qdev);
+ qlge_mb_get_port_cfg(qdev);
if (qdev->link_config & CFG_PAUSE_STD) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
-static int ql_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static int qlge_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
{
- struct ql_adapter *qdev = netdev_priv(netdev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if ((pause->rx_pause) && (pause->tx_pause))
qdev->link_config |= CFG_PAUSE_STD;
@@ -701,19 +698,19 @@ static int ql_set_pauseparam(struct net_device *netdev,
else
return -EINVAL;
- return ql_mb_set_port_cfg(qdev);
+ return qlge_mb_set_port_cfg(qdev);
}
-static u32 ql_get_msglevel(struct net_device *ndev)
+static u32 qlge_get_msglevel(struct net_device *ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
return qdev->msg_enable;
}
-static void ql_set_msglevel(struct net_device *ndev, u32 value)
+static void qlge_set_msglevel(struct net_device *ndev, u32 value)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
qdev->msg_enable = value;
}
@@ -721,23 +718,23 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
const struct ethtool_ops qlge_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
- .get_drvinfo = ql_get_drvinfo,
- .get_wol = ql_get_wol,
- .set_wol = ql_set_wol,
- .get_regs_len = ql_get_regs_len,
- .get_regs = ql_get_regs,
- .get_msglevel = ql_get_msglevel,
- .set_msglevel = ql_set_msglevel,
+ .get_drvinfo = qlge_get_drvinfo,
+ .get_wol = qlge_get_wol,
+ .set_wol = qlge_set_wol,
+ .get_regs_len = qlge_get_regs_len,
+ .get_regs = qlge_get_regs,
+ .get_msglevel = qlge_get_msglevel,
+ .set_msglevel = qlge_set_msglevel,
.get_link = ethtool_op_get_link,
- .set_phys_id = ql_set_phys_id,
- .self_test = ql_self_test,
- .get_pauseparam = ql_get_pauseparam,
- .set_pauseparam = ql_set_pauseparam,
- .get_coalesce = ql_get_coalesce,
- .set_coalesce = ql_set_coalesce,
- .get_sset_count = ql_get_sset_count,
- .get_strings = ql_get_strings,
- .get_ethtool_stats = ql_get_ethtool_stats,
- .get_link_ksettings = ql_get_link_ksettings,
+ .set_phys_id = qlge_set_phys_id,
+ .self_test = qlge_self_test,
+ .get_pauseparam = qlge_get_pauseparam,
+ .set_pauseparam = qlge_set_pauseparam,
+ .get_coalesce = qlge_get_coalesce,
+ .set_coalesce = qlge_set_coalesce,
+ .get_sset_count = qlge_get_sset_count,
+ .get_strings = qlge_get_strings,
+ .get_ethtool_stats = qlge_get_ethtool_stats,
+ .get_link_ksettings = qlge_get_link_ksettings,
};
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index e6b7baa12cd6..5516be3af898 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -42,6 +42,7 @@
#include <net/ip6_checksum.h>
#include "qlge.h"
+#include "qlge_devlink.h"
char qlge_driver_name[] = DRV_NAME;
const char qlge_driver_version[] = DRV_VERSION;
@@ -89,16 +90,16 @@ static const struct pci_device_id qlge_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-static int ql_wol(struct ql_adapter *);
+static int qlge_wol(struct qlge_adapter *);
static void qlge_set_multicast_list(struct net_device *);
-static int ql_adapter_down(struct ql_adapter *);
-static int ql_adapter_up(struct ql_adapter *);
+static int qlge_adapter_down(struct qlge_adapter *);
+static int qlge_adapter_up(struct qlge_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
*/
-static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
{
u32 sem_bits = 0;
@@ -132,26 +133,26 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
return -EINVAL;
}
- ql_write32(qdev, SEM, sem_bits | sem_mask);
- return !(ql_read32(qdev, SEM) & sem_bits);
+ qlge_write32(qdev, SEM, sem_bits | sem_mask);
+ return !(qlge_read32(qdev, SEM) & sem_bits);
}
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
{
unsigned int wait_count = 30;
do {
- if (!ql_sem_trylock(qdev, sem_mask))
+ if (!qlge_sem_trylock(qdev, sem_mask))
return 0;
udelay(100);
} while (--wait_count);
return -ETIMEDOUT;
}
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
{
- ql_write32(qdev, SEM, sem_mask);
- ql_read32(qdev, SEM); /* flush */
+ qlge_write32(qdev, SEM, sem_mask);
+ qlge_read32(qdev, SEM); /* flush */
}
/* This function waits for a specific bit to come ready
@@ -159,13 +160,13 @@ void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
* process, but is also used in kernel thread API such as
* netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
*/
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
int count;
for (count = 0; count < UDELAY_COUNT; count++) {
- temp = ql_read32(qdev, reg);
+ temp = qlge_read32(qdev, reg);
/* check for errors */
if (temp & err_bit) {
@@ -186,13 +187,13 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
/* The CFG register is used to download TX and RX control blocks
* to the chip. This function waits for an operation to complete.
*/
-static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
{
int count;
u32 temp;
for (count = 0; count < UDELAY_COUNT; count++) {
- temp = ql_read32(qdev, CFG);
+ temp = qlge_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
@@ -205,8 +206,8 @@ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id)
+int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id)
{
u64 map;
int status = 0;
@@ -225,38 +226,38 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
return -ENOMEM;
}
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
goto lock_failed;
- status = ql_wait_cfg(qdev, bit);
+ status = qlge_wait_cfg(qdev, bit);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for CFG to come ready.\n");
goto exit;
}
- ql_write32(qdev, ICB_L, (u32)map);
- ql_write32(qdev, ICB_H, (u32)(map >> 32));
+ qlge_write32(qdev, ICB_L, (u32)map);
+ qlge_write32(qdev, ICB_H, (u32)(map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
- ql_write32(qdev, CFG, (mask | value));
+ qlge_write32(qdev, CFG, (mask | value));
/*
* Wait for the bit to clear after signaling hw.
*/
- status = ql_wait_cfg(qdev, bit);
+ status = qlge_wait_cfg(qdev, bit);
exit:
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
+ qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
lock_failed:
dma_unmap_single(&qdev->pdev->dev, map, size, direction);
return status;
}
/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value)
+int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
+ u32 *value)
{
u32 offset = 0;
int status;
@@ -264,46 +265,46 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC: {
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS |
type); /* type */
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
break;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS |
type); /* type */
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
break;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index
<< MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR |
MAC_ADDR_RS | type); /* type */
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MR, 0);
if (status)
break;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
}
break;
}
@@ -320,8 +321,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
/* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
-static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
- u16 index)
+static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
+ u16 index)
{
u32 offset = 0;
int status = 0;
@@ -332,22 +333,22 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ qlge_write32(qdev, MAC_ADDR_DATA, lower);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ qlge_write32(qdev, MAC_ADDR_DATA, upper);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
break;
}
case MAC_ADDR_TYPE_CAM_MAC: {
@@ -355,27 +356,27 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u32 upper = (addr[0] << 8) | addr[1];
u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ qlge_write32(qdev, MAC_ADDR_DATA, lower);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ qlge_write32(qdev, MAC_ADDR_DATA, upper);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- (offset) | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ (offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
@@ -388,7 +389,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
}
case MAC_ADDR_TYPE_VLAN: {
@@ -398,11 +399,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
- ql_write32(qdev, MAC_ADDR_IDX,
- offset | /* offset */
+ qlge_write32(qdev, MAC_ADDR_IDX,
+ offset | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type | /* type */
enable_bit); /* enable/disable */
@@ -421,7 +422,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* have to clear it to prevent wrong frame routing
* especially in a bonding environment.
*/
-static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
{
int status;
char zero_mac_addr[ETH_ALEN];
@@ -437,50 +438,50 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Clearing MAC address\n");
}
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
- MAC_ADDR_TYPE_CAM_MAC,
- qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init mac address.\n");
return status;
}
-void ql_link_on(struct ql_adapter *qdev)
+void qlge_link_on(struct qlge_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
- ql_set_mac_addr(qdev, 1);
+ qlge_set_mac_addr(qdev, 1);
}
-void ql_link_off(struct ql_adapter *qdev)
+void qlge_link_off(struct qlge_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
- ql_set_mac_addr(qdev, 0);
+ qlge_set_mac_addr(qdev, 0);
}
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
{
int status = 0;
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
- ql_write32(qdev, RT_IDX,
- RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
+ qlge_write32(qdev, RT_IDX,
+ RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+ status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
if (status)
goto exit;
- *value = ql_read32(qdev, RT_DATA);
+ *value = qlge_read32(qdev, RT_DATA);
exit:
return status;
}
@@ -490,8 +491,8 @@ exit:
* multicast/error frames to the default queue for slow handling,
* and CAM hit/RSS frames to the fast handling queues.
*/
-static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
- int enable)
+static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
+ int enable)
{
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
@@ -577,50 +578,50 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
}
if (value) {
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
value |= (enable ? RT_IDX_E : 0);
- ql_write32(qdev, RT_IDX, value);
- ql_write32(qdev, RT_DATA, enable ? mask : 0);
+ qlge_write32(qdev, RT_IDX, value);
+ qlge_write32(qdev, RT_DATA, enable ? mask : 0);
}
exit:
return status;
}
-static void ql_enable_interrupts(struct ql_adapter *qdev)
+static void qlge_enable_interrupts(struct qlge_adapter *qdev)
{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+ qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
}
-static void ql_disable_interrupts(struct ql_adapter *qdev)
+static void qlge_disable_interrupts(struct qlge_adapter *qdev)
{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+ qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
-static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
{
struct intr_context *ctx = &qdev->intr_context[intr];
- ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
+ qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
}
-static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
{
struct intr_context *ctx = &qdev->intr_context[intr];
- ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
+ qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
}
-static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->intr_count; i++)
- ql_enable_completion_interrupt(qdev, i);
+ qlge_enable_completion_interrupt(qdev, i);
}
-static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
{
int status, i;
u16 csum = 0;
@@ -642,31 +643,31 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
return csum;
}
-static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
+static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
- ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+ qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* This data is stored on flash as an array of
- * __le32. Since ql_read32() returns cpu endian
+ * __le32. Since qlge_read32() returns cpu endian
* we need to swap it back.
*/
- *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+ *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
exit:
return status;
}
-static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
{
u32 i, size;
int status;
@@ -682,12 +683,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
else
offset = FUNC1_FLASH_OFFSET / sizeof(u32);
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i + offset, p);
+ status = qlge_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
@@ -695,8 +696,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
}
}
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) /
+ status = qlge_validate_flash(qdev,
+ sizeof(struct flash_params_8000) /
sizeof(u16),
"8000");
if (status) {
@@ -728,11 +729,11 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ qlge_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
-static int ql_get_8012_flash_params(struct ql_adapter *qdev)
+static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
{
int i;
int status;
@@ -746,11 +747,11 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
if (qdev->port)
offset = size;
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i + offset, p);
+ status = qlge_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
@@ -758,10 +759,10 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
}
}
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) /
- sizeof(u16),
- "8012");
+ status = qlge_validate_flash(qdev,
+ sizeof(struct flash_params_8012) /
+ sizeof(u16),
+ "8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
@@ -778,7 +779,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ qlge_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
@@ -786,18 +787,18 @@ exit:
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
-static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
{
int status;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ status = qlge_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
- ql_write32(qdev, XGMAC_DATA, data);
+ qlge_write32(qdev, XGMAC_DATA, data);
/* trigger the write */
- ql_write32(qdev, XGMAC_ADDR, reg);
+ qlge_write32(qdev, XGMAC_ADDR, reg);
return status;
}
@@ -805,39 +806,39 @@ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status = 0;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ status = qlge_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
- ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+ qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ status = qlge_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
- *data = ql_read32(qdev, XGMAC_DATA);
+ *data = qlge_read32(qdev, XGMAC_DATA);
exit:
return status;
}
/* This is used for reading the 64-bit statistics regs. */
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
{
int status = 0;
u32 hi = 0;
u32 lo = 0;
- status = ql_read_xgmac_reg(qdev, reg, &lo);
+ status = qlge_read_xgmac_reg(qdev, reg, &lo);
if (status)
goto exit;
- status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+ status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
if (status)
goto exit;
@@ -847,17 +848,17 @@ exit:
return status;
}
-static int ql_8000_port_initialize(struct ql_adapter *qdev)
+static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
{
int status;
/*
* Get MPI firmware version for driver banner
* and ethool info.
*/
- status = ql_mb_about_fw(qdev);
+ status = qlge_mb_about_fw(qdev);
if (status)
goto exit;
- status = ql_mb_get_fw_state(qdev);
+ status = qlge_mb_get_fw_state(qdev);
if (status)
goto exit;
/* Wake up a worker to get/set the TX/RX frame sizes. */
@@ -872,18 +873,18 @@ exit:
* This functionality may be done in the MPI firmware at a
* later date.
*/
-static int ql_8012_port_initialize(struct ql_adapter *qdev)
+static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
{
int status = 0;
u32 data;
- if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+ if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
netif_info(qdev, link, qdev->ndev,
"Another function has the semaphore, so wait for the port init bit to come ready.\n");
- status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+ status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
netif_crit(qdev, link, qdev->ndev,
"Port initialize timed out.\n");
@@ -893,11 +894,11 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
- status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+ status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
goto end;
data |= GLOBAL_CFG_RESET;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
@@ -906,48 +907,48 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
data |= GLOBAL_CFG_TX_STAT_EN;
data |= GLOBAL_CFG_RX_STAT_EN;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Enable transmitter, and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+ status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
if (status)
goto end;
data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
data |= TX_CFG_EN; /* Enable the transmitter. */
- status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+ status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
if (status)
goto end;
/* Enable receiver and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+ status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
if (status)
goto end;
data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
data |= RX_CFG_EN; /* Enable the receiver. */
- status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+ status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
if (status)
goto end;
/* Turn on jumbo. */
status =
- ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+ qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
if (status)
goto end;
status =
- ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+ qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
if (status)
goto end;
/* Signal to the world that the port is enabled. */
- ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+ qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
+ qlge_sem_unlock(qdev, qdev->xg_sem_mask);
return status;
}
-static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
{
return PAGE_SIZE << qdev->lbq_buf_order;
}
@@ -962,8 +963,8 @@ static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
return bq_desc;
}
-static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring)
{
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
@@ -971,17 +972,17 @@ static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
qdev->lbq_buf_size, DMA_FROM_DEVICE);
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
- ql_lbq_block_size(qdev)) {
+ qlge_lbq_block_size(qdev)) {
/* last chunk of the master page */
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
- ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
+ qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
}
return lbq_desc;
}
/* Update an rx ring index. */
-static void ql_update_cq(struct rx_ring *rx_ring)
+static void qlge_update_cq(struct rx_ring *rx_ring)
{
rx_ring->cnsmr_idx++;
rx_ring->curr_entry++;
@@ -991,9 +992,9 @@ static void ql_update_cq(struct rx_ring *rx_ring)
}
}
-static void ql_write_cq_idx(struct rx_ring *rx_ring)
+static void qlge_write_cq_idx(struct rx_ring *rx_ring)
{
- ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+ qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
static const char * const bq_type_name[] = {
@@ -1005,7 +1006,7 @@ static const char * const bq_type_name[] = {
static int qlge_refill_sb(struct rx_ring *rx_ring,
struct qlge_bq_desc *sbq_desc, gfp_t gfp)
{
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct sk_buff *skb;
if (sbq_desc->p.skb)
@@ -1038,7 +1039,7 @@ static int qlge_refill_sb(struct rx_ring *rx_ring,
static int qlge_refill_lb(struct rx_ring *rx_ring,
struct qlge_bq_desc *lbq_desc, gfp_t gfp)
{
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
if (!master_chunk->page) {
@@ -1049,7 +1050,7 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
if (unlikely(!page))
return -ENOMEM;
dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
- ql_lbq_block_size(qdev),
+ qlge_lbq_block_size(qdev),
DMA_FROM_DEVICE);
if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
__free_pages(page, qdev->lbq_buf_order);
@@ -1072,7 +1073,7 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
* buffer get.
*/
master_chunk->offset += qdev->lbq_buf_size;
- if (master_chunk->offset == ql_lbq_block_size(qdev)) {
+ if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
master_chunk->page = NULL;
} else {
master_chunk->va += qdev->lbq_buf_size;
@@ -1086,7 +1087,7 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
{
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_bq_desc *bq_desc;
int refill_count;
int retval;
@@ -1132,7 +1133,7 @@ static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
"ring %u %s: updating prod idx = %d.\n",
rx_ring->cq_id, bq_type_name[bq->type],
i);
- ql_write_db_reg(i, bq->prod_idx_db_reg);
+ qlge_write_db_reg(i, bq->prod_idx_db_reg);
}
bq->next_to_use = i;
}
@@ -1140,8 +1141,8 @@ static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
return retval;
}
-static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
- unsigned long delay)
+static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
+ unsigned long delay)
{
bool sbq_fail, lbq_fail;
@@ -1172,7 +1173,7 @@ static void qlge_slow_refill(struct work_struct *work)
struct napi_struct *napi = &rx_ring->napi;
napi_disable(napi);
- ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
+ qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
napi_enable(napi);
local_bh_disable();
@@ -1187,8 +1188,8 @@ static void qlge_slow_refill(struct work_struct *work)
/* Unmaps tx buffers. Can be called from send() if a pci mapping
* fails at some stage, or from the interrupt when a tx completes.
*/
-static void ql_unmap_send(struct ql_adapter *qdev,
- struct tx_ring_desc *tx_ring_desc, int mapped)
+static void qlge_unmap_send(struct qlge_adapter *qdev,
+ struct tx_ring_desc *tx_ring_desc, int mapped)
{
int i;
@@ -1229,9 +1230,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
/* Map the buffers for this transmit. This will return
* NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
*/
-static int ql_map_send(struct ql_adapter *qdev,
- struct ob_mac_iocb_req *mac_iocb_ptr,
- struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+static int qlge_map_send(struct qlge_adapter *qdev,
+ struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
+ struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
{
int len = skb_headlen(skb);
dma_addr_t map;
@@ -1294,7 +1295,7 @@ static int ql_map_send(struct ql_adapter *qdev,
*/
/* Tack on the OAL in the eighth segment of IOCB. */
map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
- sizeof(struct oal),
+ sizeof(struct qlge_oal),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
@@ -1316,7 +1317,7 @@ static int ql_map_send(struct ql_adapter *qdev,
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- sizeof(struct oal));
+ sizeof(struct qlge_oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
}
@@ -1351,13 +1352,13 @@ map_error:
* we pass in the number of frags that mapped successfully
* so they can be umapped.
*/
- ql_unmap_send(qdev, tx_ring_desc, map_idx);
+ qlge_unmap_send(qdev, tx_ring_desc, map_idx);
return NETDEV_TX_BUSY;
}
/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
+static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
+ struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
@@ -1389,12 +1390,12 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
}
/**
- * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * qlge_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
-static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- void *page, size_t *len)
+static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
{
u16 *tags;
@@ -1412,18 +1413,18 @@ static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
+static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct sk_buff *skb;
- struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
put_page(lbq_desc->p.pg_chunk.page);
return;
}
@@ -1458,15 +1459,15 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
+static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
- struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
@@ -1482,12 +1483,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
goto err_out;
}
/* Update the MAC header length*/
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+ qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
@@ -1521,12 +1522,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1549,10 +1550,10 @@ err_out:
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
+static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
@@ -1576,14 +1577,14 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
+ qlge_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
@@ -1628,12 +1629,12 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1651,7 +1652,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
netif_receive_skb(skb);
}
-static void ql_realign_skb(struct sk_buff *skb, int len)
+static void qlge_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1669,9 +1670,9 @@ static void ql_realign_skb(struct sk_buff *skb, int len)
* completion. It will be rewritten for readability in the near
* future, but for not it works well.
*/
-static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
@@ -1693,7 +1694,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb = sbq_desc->p.skb;
- ql_realign_skb(skb, hdr_len);
+ qlge_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
sbq_desc->p.skb = NULL;
}
@@ -1731,7 +1732,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length);
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
skb = sbq_desc->p.skb;
- ql_realign_skb(skb, length);
+ qlge_realign_skb(skb, length);
skb_put(skb, length);
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
@@ -1748,7 +1749,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* chain it to the header buffer's skb and let
* it rip.
*/
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
@@ -1763,7 +1764,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame.
*/
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (!skb) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
@@ -1783,9 +1784,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->len += length;
skb->data_len += length;
skb->truesize += length;
- ql_update_mac_hdr_len(qdev, ib_mac_rsp,
- lbq_desc->p.pg_chunk.va,
- &hlen);
+ qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
__pskb_pull_tail(skb, hlen);
}
} else {
@@ -1823,7 +1824,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb_reserve(skb, NET_IP_ALIGN);
}
do {
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
size = min(length, qdev->lbq_buf_size);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1838,25 +1839,23 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
} while (length > 0);
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
- &hlen);
+ qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
__pskb_pull_tail(skb, hlen);
}
return skb;
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
+static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
-
- skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+ skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No skb available, drop packet.\n");
@@ -1866,7 +1865,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
@@ -1882,7 +1881,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
+ qlge_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
@@ -1917,12 +1916,12 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -1942,67 +1941,64 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
- ((le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
-
- QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
*/
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
+ qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
+ qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
+ qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
}
return (unsigned long)length;
}
/* Process an outbound completion from an rx ring. */
-static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
- struct ob_mac_iocb_rsp *mac_rsp)
+static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
+ struct qlge_ob_mac_iocb_rsp *mac_rsp)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
- QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
- ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+ qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
tx_ring->tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
@@ -2033,16 +2029,16 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
}
/* Fire up a handler to reset the MPI processor. */
-void ql_queue_fw_error(struct ql_adapter *qdev)
+void qlge_queue_fw_error(struct qlge_adapter *qdev)
{
- ql_link_off(qdev);
+ qlge_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
-void ql_queue_asic_error(struct ql_adapter *qdev)
+void qlge_queue_asic_error(struct qlge_adapter *qdev)
{
- ql_link_off(qdev);
- ql_disable_interrupts(qdev);
+ qlge_link_off(qdev);
+ qlge_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
@@ -2055,47 +2051,47 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
-static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
- struct ib_ae_iocb_rsp *ib_ae_rsp)
+static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
+ struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
netif_err(qdev, rx_err, qdev->ndev,
"Management Processor Fatal Error.\n");
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
netdev_err(qdev->ndev, "This event shouldn't occur.\n");
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
netdev_err(qdev->ndev, "Soft ECC error detected.\n");
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
netdev_err(qdev->ndev,
"PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
ib_ae_rsp->q_id);
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
break;
default:
netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
ib_ae_rsp->event);
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
break;
}
}
-static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ob_mac_iocb_rsp *net_rsp = NULL;
+ struct qlge_adapter *qdev = rx_ring->qdev;
+ u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
struct tx_ring *tx_ring;
@@ -2105,12 +2101,12 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
"cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
- net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+ net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_OB_MAC_TSO_IOCB:
case OPCODE_OB_MAC_IOCB:
- ql_process_mac_tx_intr(qdev, net_rsp);
+ qlge_process_mac_tx_intr(qdev, net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -2118,12 +2114,12 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
net_rsp->opcode);
}
count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ qlge_update_cq(rx_ring);
+ prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
if (!net_rsp)
return 0;
- ql_write_cq_idx(rx_ring);
+ qlge_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
@@ -2137,11 +2133,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
return count;
}
-static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ql_net_rsp_iocb *net_rsp;
+ struct qlge_adapter *qdev = rx_ring->qdev;
+ u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct qlge_net_rsp_iocb *net_rsp;
int count = 0;
/* While there are entries in the completion queue. */
@@ -2154,14 +2150,14 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
rmb();
switch (net_rsp->opcode) {
case OPCODE_IB_MAC_IOCB:
- ql_process_mac_rx_intr(qdev, rx_ring,
- (struct ib_mac_iocb_rsp *)
- net_rsp);
+ qlge_process_mac_rx_intr(qdev, rx_ring,
+ (struct qlge_ib_mac_iocb_rsp *)
+ net_rsp);
break;
case OPCODE_IB_AE_IOCB:
- ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
- net_rsp);
+ qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
+ net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -2170,20 +2166,20 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
break;
}
count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ qlge_update_cq(rx_ring);
+ prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget)
break;
}
- ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
- ql_write_cq_idx(rx_ring);
+ qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
+ qlge_write_cq_idx(rx_ring);
return count;
}
-static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
{
struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct rx_ring *trx_ring;
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
@@ -2200,42 +2196,42 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+ (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
- ql_clean_outbound_rx_ring(trx_ring);
+ qlge_clean_outbound_rx_ring(trx_ring);
}
}
/*
* Now service the RSS ring if it's active.
*/
- if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
- rx_ring->cnsmr_idx) {
+ if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+ rx_ring->cnsmr_idx) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing RX completion ring %d.\n",
__func__, rx_ring->cq_id);
- work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+ work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
}
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ql_enable_completion_interrupt(qdev, rx_ring->irq);
+ qlge_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
}
static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+ qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+ qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -2246,12 +2242,12 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
static int qlge_update_hw_vlan_features(struct net_device *ndev,
netdev_features_t features)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status = 0;
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
bool need_restart = netif_running(ndev);
+ int status = 0;
if (need_restart) {
- status = ql_adapter_down(qdev);
+ status = qlge_adapter_down(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring down the adapter\n");
@@ -2263,7 +2259,7 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
ndev->features = features;
if (need_restart) {
- status = ql_adapter_up(qdev);
+ status = qlge_adapter_up(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring up the adapter\n");
@@ -2292,13 +2288,13 @@ static int qlge_set_features(struct net_device *ndev,
return 0;
}
-static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
+ err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
@@ -2307,29 +2303,29 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
int err;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
-static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
+ err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
@@ -2338,35 +2334,35 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
int err;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
-static void qlge_restore_vlan(struct ql_adapter *qdev)
+static void qlge_restore_vlan(struct qlge_adapter *qdev)
{
int status;
u16 vid;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
__qlge_vlan_rx_add_vid(qdev, vid);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
@@ -2386,7 +2382,7 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
static irqreturn_t qlge_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct intr_context *intr_context = &qdev->intr_context[0];
u32 var;
int work_done = 0;
@@ -2398,18 +2394,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* enable it is not effective.
*/
if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
- ql_disable_completion_interrupt(qdev, 0);
+ qlge_disable_completion_interrupt(qdev, 0);
- var = ql_read32(qdev, STS);
+ var = qlge_read32(qdev, STS);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
- ql_disable_completion_interrupt(qdev, 0);
- ql_queue_asic_error(qdev);
+ qlge_disable_completion_interrupt(qdev, 0);
+ qlge_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
- var = ql_read32(qdev, ERR_STS);
+ var = qlge_read32(qdev, ERR_STS);
netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2418,14 +2414,14 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
- (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+ (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
@@ -2436,7 +2432,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* pass. Compare it to the queues that this irq services
* and call napi if there's a match.
*/
- var = ql_read32(qdev, ISR1);
+ var = qlge_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
@@ -2449,13 +2445,13 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* systematically re-enable the interrupt if we didn't
* schedule napi.
*/
- ql_enable_completion_interrupt(qdev, 0);
+ qlge_enable_completion_interrupt(qdev, 0);
}
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
-static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
{
if (skb_is_gso(skb)) {
int err;
@@ -2469,11 +2465,11 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb)
- << OB_MAC_TRANSPORT_HDR_SHIFT);
+ cpu_to_le16(skb_network_offset(skb) |
+ skb_transport_offset(skb)
+ << OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
if (likely(l3_proto == htons(ETH_P_IP))) {
@@ -2488,17 +2484,17 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
} else if (l3_proto == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
return 1;
}
return 0;
}
-static void ql_hw_csum_setup(struct sk_buff *skb,
- struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+static void qlge_hw_csum_setup(struct sk_buff *skb,
+ struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
{
int len;
struct iphdr *iph = ip_hdr(skb);
@@ -2508,7 +2504,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+ skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
len = (ntohs(iph->tot_len) - (iph->ihl << 2));
@@ -2516,14 +2512,14 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
check = &(tcp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- (tcp_hdr(skb)->doff << 2));
+ cpu_to_le16(skb_transport_offset(skb) +
+ (tcp_hdr(skb)->doff << 2));
} else {
check = &(udp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- sizeof(struct udphdr));
+ cpu_to_le16(skb_transport_offset(skb) +
+ sizeof(struct udphdr));
}
*check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, len, iph->protocol, 0);
@@ -2531,9 +2527,9 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
{
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
+ struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
struct tx_ring_desc *tx_ring_desc;
- struct ob_mac_iocb_req *mac_iocb_ptr;
- struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
u32 tx_ring_idx = (u32)skb->queue_mapping;
@@ -2571,28 +2567,28 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
}
- tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
- ql_hw_csum_setup(skb,
- (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ qlge_hw_csum_setup(skb,
+ (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
- if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
- NETDEV_TX_OK) {
+ if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+ NETDEV_TX_OK) {
netif_err(qdev, tx_queued, qdev->ndev,
"Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
- QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
+
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring->prod_idx = 0;
wmb();
- ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+ qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
@@ -2611,7 +2607,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-static void ql_free_shadow_space(struct ql_adapter *qdev)
+static void qlge_free_shadow_space(struct qlge_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
dma_free_coherent(&qdev->pdev->dev,
@@ -2629,7 +2625,7 @@ static void ql_free_shadow_space(struct ql_adapter *qdev)
}
}
-static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
@@ -2658,11 +2654,11 @@ err_wqp_sh_area:
return -ENOMEM;
}
-static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
{
struct tx_ring_desc *tx_ring_desc;
int i;
- struct ob_mac_iocb_req *mac_iocb_ptr;
+ struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
mac_iocb_ptr = tx_ring->wq_base;
tx_ring_desc = tx_ring->q;
@@ -2676,8 +2672,8 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
}
-static void ql_free_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
+static void qlge_free_tx_resources(struct qlge_adapter *qdev,
+ struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
@@ -2688,20 +2684,20 @@ static void ql_free_tx_resources(struct ql_adapter *qdev,
tx_ring->q = NULL;
}
-static int ql_alloc_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
+static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
+ struct tx_ring *tx_ring)
{
tx_ring->wq_base =
- dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
- &tx_ring->wq_base_dma, GFP_ATOMIC);
+ dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma, GFP_ATOMIC);
if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
- kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
- GFP_KERNEL);
+ kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
+ GFP_KERNEL);
if (!tx_ring->q)
goto err;
@@ -2715,19 +2711,19 @@ pci_alloc_err:
return -ENOMEM;
}
-static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
struct qlge_bq *lbq = &rx_ring->lbq;
unsigned int last_offset;
- last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
+ last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
while (lbq->next_to_clean != lbq->next_to_use) {
struct qlge_bq_desc *lbq_desc =
&lbq->queue[lbq->next_to_clean];
if (lbq_desc->p.pg_chunk.offset == last_offset)
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
- ql_lbq_block_size(qdev),
+ qlge_lbq_block_size(qdev),
DMA_FROM_DEVICE);
put_page(lbq_desc->p.pg_chunk.page);
@@ -2736,13 +2732,13 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
if (rx_ring->master_chunk.page) {
dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
- ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
+ qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
put_page(rx_ring->master_chunk.page);
rx_ring->master_chunk.page = NULL;
}
}
-static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
@@ -2767,7 +2763,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
/* Free all large and small rx buffers associated
* with the completion queues for this device.
*/
-static void ql_free_rx_buffers(struct ql_adapter *qdev)
+static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
{
int i;
@@ -2775,25 +2771,25 @@ static void ql_free_rx_buffers(struct ql_adapter *qdev)
struct rx_ring *rx_ring = &qdev->rx_ring[i];
if (rx_ring->lbq.queue)
- ql_free_lbq_buffers(qdev, rx_ring);
+ qlge_free_lbq_buffers(qdev, rx_ring);
if (rx_ring->sbq.queue)
- ql_free_sbq_buffers(qdev, rx_ring);
+ qlge_free_sbq_buffers(qdev, rx_ring);
}
}
-static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->rss_ring_count; i++)
- ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
- HZ / 2);
+ qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
+ HZ / 2);
}
static int qlge_init_bq(struct qlge_bq *bq)
{
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
- struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_bq_desc *bq_desc;
__le64 *buf_ptr;
int i;
@@ -2823,8 +2819,8 @@ static int qlge_init_bq(struct qlge_bq *bq)
return 0;
}
-static void ql_free_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static void qlge_free_rx_resources(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
@@ -2860,15 +2856,15 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
/* Allocate queues and buffers for this completions queue based
* on the values in the parameter structure.
*/
-static int ql_alloc_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
+ struct rx_ring *rx_ring)
{
/*
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
- dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
- &rx_ring->cq_base_dma, GFP_ATOMIC);
+ dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma, GFP_ATOMIC);
if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
@@ -2877,14 +2873,14 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
if (rx_ring->cq_id < qdev->rss_ring_count &&
(qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
- ql_free_rx_resources(qdev, rx_ring);
+ qlge_free_rx_resources(qdev, rx_ring);
return -ENOMEM;
}
return 0;
}
-static void ql_tx_ring_clean(struct ql_adapter *qdev)
+static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
@@ -2903,8 +2899,8 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
"Freeing lost SKB %p, from queue %d, index %d.\n",
tx_ring_desc->skb, j,
tx_ring_desc->index);
- ql_unmap_send(qdev, tx_ring_desc,
- tx_ring_desc->map_cnt);
+ qlge_unmap_send(qdev, tx_ring_desc,
+ tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
}
@@ -2912,27 +2908,27 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
}
}
-static void ql_free_mem_resources(struct ql_adapter *qdev)
+static void qlge_free_mem_resources(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->tx_ring_count; i++)
- ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+ qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
for (i = 0; i < qdev->rx_ring_count; i++)
- ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
- ql_free_shadow_space(qdev);
+ qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
+ qlge_free_shadow_space(qdev);
}
-static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
{
int i;
/* Allocate space for our shadow registers and such. */
- if (ql_alloc_shadow_space(qdev))
+ if (qlge_alloc_shadow_space(qdev))
return -ENOMEM;
for (i = 0; i < qdev->rx_ring_count; i++) {
- if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+ if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"RX resource allocation failed.\n");
goto err_mem;
@@ -2940,7 +2936,7 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
- if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+ if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"TX resource allocation failed.\n");
goto err_mem;
@@ -2949,7 +2945,7 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
return 0;
err_mem:
- ql_free_mem_resources(qdev);
+ qlge_free_mem_resources(qdev);
return -ENOMEM;
}
@@ -2957,7 +2953,7 @@ err_mem:
* The control block is defined as
* "Completion Queue Initialization Control Block", or cqicb.
*/
-static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
@@ -2965,7 +2961,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+ qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u64 tmp;
__le64 *base_indirect_ptr;
@@ -3012,8 +3008,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
* Set up the control block load flags.
*/
cqicb->flags = FLAGS_LC | /* Load queue base address */
- FLAGS_LV | /* Load MSI-X vector */
- FLAGS_LI; /* Load irq delay values */
+ FLAGS_LV | /* Load MSI-X vector */
+ FLAGS_LI; /* Load irq delay values */
if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq.base_dma;
@@ -3043,7 +3039,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq.base_indirect_dma);
+ cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->sbq.next_to_use = 0;
@@ -3053,7 +3049,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
- netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+ netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
@@ -3061,8 +3057,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
}
- err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+ CFG_LCQ, rx_ring->cq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
@@ -3070,15 +3066,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
return err;
}
-static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
{
struct wqicb *wqicb = (struct wqicb *)tx_ring;
void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+ qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
void *shadow_reg = qdev->tx_ring_shadow_reg_area +
- (tx_ring->wq_id * sizeof(u64));
+ (tx_ring->wq_id * sizeof(u64));
u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
- (tx_ring->wq_id * sizeof(u64));
+ (tx_ring->wq_id * sizeof(u64));
int err = 0;
/*
@@ -3105,10 +3101,10 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
- ql_init_tx_ring(qdev, tx_ring);
+ qlge_init_tx_ring(qdev, tx_ring);
- err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16)tx_ring->wq_id);
+ err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
+ (u16)tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
@@ -3116,7 +3112,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
return err;
}
-static void ql_disable_msix(struct ql_adapter *qdev)
+static void qlge_disable_msix(struct qlge_adapter *qdev)
{
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
pci_disable_msix(qdev->pdev);
@@ -3133,7 +3129,7 @@ static void ql_disable_msix(struct ql_adapter *qdev)
* stored in qdev->intr_count. If we don't get that
* many then we reduce the count and try again.
*/
-static void ql_enable_msix(struct ql_adapter *qdev)
+static void qlge_enable_msix(struct qlge_adapter *qdev)
{
int i, err;
@@ -3186,7 +3182,7 @@ msi:
"Running with legacy interrupts.\n");
}
-/* Each vector services 1 RSS ring and and 1 or more
+/* Each vector services 1 RSS ring and 1 or more
* TX completion rings. This function loops through
* the TX completion rings and assigns the vector that
* will service it. An example would be if there are
@@ -3195,7 +3191,7 @@ msi:
* and TX completion rings 0,1,2 and 3. Vector 1 would
* service RSS ring 1 and TX completion rings 4,5,6 and 7.
*/
-static void ql_set_tx_vect(struct ql_adapter *qdev)
+static void qlge_set_tx_vect(struct qlge_adapter *qdev)
{
int i, j, vect;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
@@ -3203,7 +3199,7 @@ static void ql_set_tx_vect(struct ql_adapter *qdev)
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Assign irq vectors to TX rx_rings.*/
for (vect = 0, j = 0, i = qdev->rss_ring_count;
- i < qdev->rx_ring_count; i++) {
+ i < qdev->rx_ring_count; i++) {
if (j == tx_rings_per_vector) {
vect++;
j = 0;
@@ -3225,7 +3221,7 @@ static void ql_set_tx_vect(struct ql_adapter *qdev)
* rings. This function sets up a bit mask per vector
* that indicates which rings it services.
*/
-static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
+static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
{
int j, vect = ctx->intr;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
@@ -3240,8 +3236,8 @@ static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
*/
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
- (1 << qdev->rx_ring[qdev->rss_ring_count +
- (vect * tx_rings_per_vector) + j].cq_id);
+ (1 << qdev->rx_ring[qdev->rss_ring_count +
+ (vect * tx_rings_per_vector) + j].cq_id);
}
} else {
/* For single vector we just shift each queue's
@@ -3258,7 +3254,7 @@ static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
* The intr_context structure is used to hook each vector
* to possibly different handlers.
*/
-static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
{
int i = 0;
struct intr_context *intr_context = &qdev->intr_context[0];
@@ -3275,23 +3271,23 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
/* Set up this vector's bit-mask that indicates
* which queues it services.
*/
- ql_set_irq_mask(qdev, intr_context);
+ qlge_set_irq_mask(qdev, intr_context);
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
- | i;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+ | i;
intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
- INTR_EN_IHD | i;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+ INTR_EN_IHD | i;
intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
- i;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+ i;
if (i == 0) {
/* The first vector/queue handles
* broadcast/multicast, fatal errors,
@@ -3322,10 +3318,10 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE;
if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
/* Experience shows that when using INTx interrupts,
* the device does not always auto-mask INTR_EN_EN.
@@ -3337,7 +3333,7 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
intr_context->intr_dis_mask |= INTR_EN_EI << 16;
}
intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
* Single interrupt means one handler for all rings.
*/
@@ -3348,15 +3344,15 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
* a single vector so it will service all RSS and
* TX completion rings.
*/
- ql_set_irq_mask(qdev, intr_context);
+ qlge_set_irq_mask(qdev, intr_context);
}
/* Tell the TX completion rings which MSIx vector
* they will be using.
*/
- ql_set_tx_vect(qdev);
+ qlge_set_tx_vect(qdev);
}
-static void ql_free_irq(struct ql_adapter *qdev)
+static void qlge_free_irq(struct qlge_adapter *qdev)
{
int i;
struct intr_context *intr_context = &qdev->intr_context[0];
@@ -3371,17 +3367,17 @@ static void ql_free_irq(struct ql_adapter *qdev)
}
}
}
- ql_disable_msix(qdev);
+ qlge_disable_msix(qdev);
}
-static int ql_request_irq(struct ql_adapter *qdev)
+static int qlge_request_irq(struct qlge_adapter *qdev)
{
int i;
int status = 0;
struct pci_dev *pdev = qdev->pdev;
struct intr_context *intr_context = &qdev->intr_context[0];
- ql_resolve_queues_to_irqs(qdev);
+ qlge_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
@@ -3408,11 +3404,11 @@ static int ql_request_irq(struct ql_adapter *qdev)
"%s: dev_id = 0x%p.\n", __func__,
&qdev->rx_ring[0]);
status =
- request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED, &qdev->flags)
- ? 0
- : IRQF_SHARED,
- intr_context->name, &qdev->rx_ring[0]);
+ request_irq(pdev->irq, qlge_isr,
+ test_bit(QL_MSI_ENABLED, &qdev->flags)
+ ? 0
+ : IRQF_SHARED,
+ intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
@@ -3425,11 +3421,11 @@ static int ql_request_irq(struct ql_adapter *qdev)
return status;
err_irq:
netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
- ql_free_irq(qdev);
+ qlge_free_irq(qdev);
return status;
}
-static int ql_start_rss(struct ql_adapter *qdev)
+static int qlge_start_rss(struct qlge_adapter *qdev)
{
static const u8 init_hash_seed[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
@@ -3459,7 +3455,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
+ status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
@@ -3467,55 +3463,55 @@ static int ql_start_rss(struct ql_adapter *qdev)
return status;
}
-static int ql_clear_routing_entries(struct ql_adapter *qdev)
+static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
{
int i, status = 0;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
- status = ql_set_routing_reg(qdev, i, 0, 0);
+ status = qlge_set_routing_reg(qdev, i, 0, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
break;
}
}
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Initialize the frame-to-queue routing. */
-static int ql_route_initialize(struct ql_adapter *qdev)
+static int qlge_route_initialize(struct qlge_adapter *qdev)
{
int status = 0;
/* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
+ status = qlge_clear_routing_entries(qdev);
if (status)
return status;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
- status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
+ status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
+ RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for IP CSUM error packets.\n");
goto exit;
}
- status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
+ status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
+ RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for TCP/UDP CSUM error packets.\n");
goto exit;
}
- status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+ status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for broadcast packets.\n");
@@ -3525,8 +3521,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
* routing block.
*/
if (qdev->rss_ring_count > 1) {
- status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
+ status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+ RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
@@ -3534,17 +3530,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
}
}
- status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
- RT_IDX_CAM_HIT, 1);
+ status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+ RT_IDX_CAM_HIT, 1);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
-int ql_cam_route_initialize(struct ql_adapter *qdev)
+int qlge_cam_route_initialize(struct qlge_adapter *qdev)
{
int status, set;
@@ -3552,22 +3548,22 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
* determine if we are setting or clearing
* the MAC address in the CAM.
*/
- set = ql_read32(qdev, STS);
+ set = qlge_read32(qdev, STS);
set &= qdev->port_link_up;
- status = ql_set_mac_addr(qdev, set);
+ status = qlge_set_mac_addr(qdev, set);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
- status = ql_route_initialize(qdev);
+ status = qlge_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
-static int ql_adapter_initialize(struct ql_adapter *qdev)
+static int qlge_adapter_initialize(struct qlge_adapter *qdev)
{
u32 value, mask;
int i;
@@ -3578,7 +3574,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
*/
value = SYS_EFE | SYS_FAE;
mask = value << 16;
- ql_write32(qdev, SYS, mask | value);
+ qlge_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
value = NIC_RCV_CFG_DFQ;
@@ -3587,40 +3583,40 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
value |= NIC_RCV_CFG_RV;
mask |= (NIC_RCV_CFG_RV << 16);
}
- ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+ qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K;
+ FSC_EC | FSC_VM_PAGE_4K;
value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
- FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
- ql_write32(qdev, FSC, mask | value);
+ FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+ qlge_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_LEN);
+ qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
* This is helpful on bonding where both interfaces can have
* the same MAC address.
*/
- ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+ qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
/* Reroute all packets to our Interface.
* They may have been routed to MPI firmware
* due to WOL.
*/
- value = ql_read32(qdev, MGMT_RCV_CFG);
+ value = qlge_read32(qdev, MGMT_RCV_CFG);
value &= ~MGMT_RCV_CFG_RM;
mask = 0xffff0000;
/* Sticky reg needs clearing due to WOL. */
- ql_write32(qdev, MGMT_RCV_CFG, mask);
- ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+ qlge_write32(qdev, MGMT_RCV_CFG, mask);
+ qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
@@ -3629,7 +3625,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
- status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+ status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start rx ring[%d].\n", i);
@@ -3641,7 +3637,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
* then download a RICB to configure RSS.
*/
if (qdev->rss_ring_count > 1) {
- status = ql_start_rss(qdev);
+ status = qlge_start_rss(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
@@ -3650,7 +3646,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Start up the tx queues. */
for (i = 0; i < qdev->tx_ring_count; i++) {
- status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+ status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start tx ring[%d].\n", i);
@@ -3664,7 +3660,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
- status = ql_cam_route_initialize(qdev);
+ status = qlge_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
@@ -3679,14 +3675,14 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
}
/* Issue soft reset to chip. */
-static int ql_adapter_reset(struct ql_adapter *qdev)
+static int qlge_adapter_reset(struct qlge_adapter *qdev)
{
u32 value;
int status = 0;
unsigned long end_jiffies;
/* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
+ status = qlge_clear_routing_entries(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
@@ -3697,19 +3693,19 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
*/
if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
/* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+ qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
/* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
+ qlge_wait_fifo_empty(qdev);
} else {
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
}
- ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+ qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
end_jiffies = jiffies + usecs_to_jiffies(30);
do {
- value = ql_read32(qdev, RST_FO);
+ value = qlge_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
break;
cpu_relax();
@@ -3722,13 +3718,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
}
/* Resume management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
+ qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
return status;
}
-static void ql_display_dev_info(struct net_device *ndev)
+static void qlge_display_dev_info(struct net_device *ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
@@ -3742,7 +3738,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
-static int ql_wol(struct ql_adapter *qdev)
+static int qlge_wol(struct qlge_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@@ -3755,7 +3751,7 @@ static int ql_wol(struct ql_adapter *qdev)
*/
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
- WAKE_MCAST | WAKE_BCAST)) {
+ WAKE_MCAST | WAKE_BCAST)) {
netif_err(qdev, ifdown, qdev->ndev,
"Unsupported WOL parameter. qdev->wol = 0x%x.\n",
qdev->wol);
@@ -3763,7 +3759,7 @@ static int ql_wol(struct ql_adapter *qdev)
}
if (qdev->wol & WAKE_MAGIC) {
- status = ql_mb_wol_set_magic(qdev, 1);
+ status = qlge_mb_wol_set_magic(qdev, 1);
if (status) {
netif_err(qdev, ifdown, qdev->ndev,
"Failed to set magic packet on %s.\n",
@@ -3779,7 +3775,7 @@ static int ql_wol(struct ql_adapter *qdev)
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
- status = ql_mb_wol_mode(qdev, wol);
+ status = qlge_mb_wol_mode(qdev, wol);
netif_err(qdev, drv, qdev->ndev,
"WOL %s (wol code 0x%x) on %s\n",
(status == 0) ? "Successfully set" : "Failed",
@@ -3789,7 +3785,7 @@ static int ql_wol(struct ql_adapter *qdev)
return status;
}
-static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
+static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
{
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3799,58 +3795,57 @@ static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
}
-static int ql_adapter_down(struct ql_adapter *qdev)
+static int qlge_adapter_down(struct qlge_adapter *qdev)
{
int i, status = 0;
- ql_link_off(qdev);
+ qlge_link_off(qdev);
- ql_cancel_all_work_sync(qdev);
+ qlge_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_disable_interrupts(qdev);
+ qlge_disable_interrupts(qdev);
- ql_tx_ring_clean(qdev);
+ qlge_tx_ring_clean(qdev);
/* Call netif_napi_del() from common point.
- */
+ */
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
- status = ql_adapter_reset(qdev);
+ status = qlge_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
- ql_free_rx_buffers(qdev);
+ qlge_free_rx_buffers(qdev);
return status;
}
-static int ql_adapter_up(struct ql_adapter *qdev)
+static int qlge_adapter_up(struct qlge_adapter *qdev)
{
int err = 0;
- err = ql_adapter_initialize(qdev);
+ err = qlge_adapter_initialize(qdev);
if (err) {
netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_alloc_rx_buffers(qdev);
+ qlge_alloc_rx_buffers(qdev);
/* If the port is initialized and the
* link is up the turn on the carrier.
*/
- if ((ql_read32(qdev, STS) & qdev->port_init) &&
- (ql_read32(qdev, STS) & qdev->port_link_up))
- ql_link_on(qdev);
+ if ((qlge_read32(qdev, STS) & qdev->port_init) &&
+ (qlge_read32(qdev, STS) & qdev->port_link_up))
+ qlge_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
@@ -3859,34 +3854,34 @@ static int ql_adapter_up(struct ql_adapter *qdev)
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
- ql_enable_interrupts(qdev);
- ql_enable_all_completion_interrupts(qdev);
+ qlge_enable_interrupts(qdev);
+ qlge_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
return 0;
err_init:
- ql_adapter_reset(qdev);
+ qlge_adapter_reset(qdev);
return err;
}
-static void ql_release_adapter_resources(struct ql_adapter *qdev)
+static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
{
- ql_free_mem_resources(qdev);
- ql_free_irq(qdev);
+ qlge_free_mem_resources(qdev);
+ qlge_free_irq(qdev);
}
-static int ql_get_adapter_resources(struct ql_adapter *qdev)
+static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
{
- if (ql_alloc_mem_resources(qdev)) {
+ if (qlge_alloc_mem_resources(qdev)) {
netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
- return ql_request_irq(qdev);
+ return qlge_request_irq(qdev);
}
static int qlge_close(struct net_device *ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int i;
/* If we hit pci_channel_io_perm_failure
@@ -3910,12 +3905,12 @@ static int qlge_close(struct net_device *ndev)
for (i = 0; i < qdev->rss_ring_count; i++)
cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
- ql_adapter_down(qdev);
- ql_release_adapter_resources(qdev);
+ qlge_adapter_down(qdev);
+ qlge_release_adapter_resources(qdev);
return 0;
}
-static void qlge_set_lb_size(struct ql_adapter *qdev)
+static void qlge_set_lb_size(struct qlge_adapter *qdev)
{
if (qdev->ndev->mtu <= 1500)
qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
@@ -3924,22 +3919,22 @@ static void qlge_set_lb_size(struct ql_adapter *qdev)
qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
}
-static int ql_configure_rings(struct ql_adapter *qdev)
+static int qlge_configure_rings(struct qlge_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
- int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+ int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
- * cpu_cnt vectors. ql_enable_msix() will adjust the
+ * cpu_cnt vectors. qlge_enable_msix() will adjust the
* vector count to what we actually get. We then
* allocate an RSS ring for each.
* Essentially, we are doing min(cpu_count, msix_vector_count).
*/
qdev->intr_count = cpu_cnt;
- ql_enable_msix(qdev);
+ qlge_enable_msix(qdev);
/* Adjust the RSS ring count to the actual vector count. */
qdev->rss_ring_count = qdev->intr_count;
qdev->tx_ring_count = cpu_cnt;
@@ -3952,7 +3947,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
tx_ring->wq_size =
- tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+ tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
/*
* The completion queue ID for the tx rings start
@@ -3973,7 +3968,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
*/
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
rx_ring->lbq.type = QLGE_LB;
rx_ring->sbq.type = QLGE_SB;
INIT_DELAYED_WORK(&rx_ring->refill_work,
@@ -3985,7 +3980,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
/* outbound cq is same size as tx_ring it services. */
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
}
}
return 0;
@@ -3993,34 +3988,34 @@ static int ql_configure_rings(struct ql_adapter *qdev)
static int qlge_open(struct net_device *ndev)
{
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int err = 0;
- struct ql_adapter *qdev = netdev_priv(ndev);
- err = ql_adapter_reset(qdev);
+ err = qlge_adapter_reset(qdev);
if (err)
return err;
qlge_set_lb_size(qdev);
- err = ql_configure_rings(qdev);
+ err = qlge_configure_rings(qdev);
if (err)
return err;
- err = ql_get_adapter_resources(qdev);
+ err = qlge_get_adapter_resources(qdev);
if (err)
goto error_up;
- err = ql_adapter_up(qdev);
+ err = qlge_adapter_up(qdev);
if (err)
goto error_up;
return err;
error_up:
- ql_release_adapter_resources(qdev);
+ qlge_release_adapter_resources(qdev);
return err;
}
-static int ql_change_rx_buffers(struct ql_adapter *qdev)
+static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
{
int status;
@@ -4041,13 +4036,13 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
}
}
- status = ql_adapter_down(qdev);
+ status = qlge_adapter_down(qdev);
if (status)
goto error;
qlge_set_lb_size(qdev);
- status = ql_adapter_up(qdev);
+ status = qlge_adapter_up(qdev);
if (status)
goto error;
@@ -4062,7 +4057,7 @@ error:
static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
if (ndev->mtu == 1500 && new_mtu == 9000)
@@ -4080,7 +4075,7 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
if (!netif_running(qdev->ndev))
return 0;
- status = ql_change_rx_buffers(qdev);
+ status = qlge_change_rx_buffers(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Changing MTU failed.\n");
@@ -4092,7 +4087,7 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
static struct net_device_stats *qlge_get_stats(struct net_device
*ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct rx_ring *rx_ring = &qdev->rx_ring[0];
struct tx_ring *tx_ring = &qdev->tx_ring[0];
unsigned long pkts, mcast, dropped, errors, bytes;
@@ -4128,11 +4123,11 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct netdev_hw_addr *ha;
int i, status;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return;
/*
@@ -4141,7 +4136,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
*/
if (ndev->flags & IFF_PROMISC) {
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
+ if (qlge_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set promiscuous mode.\n");
@@ -4151,7 +4146,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
} else {
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
+ if (qlge_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear promiscuous mode.\n");
@@ -4168,7 +4163,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if ((ndev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
+ if (qlge_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set all-multi mode.\n");
@@ -4178,7 +4173,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
} else {
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
+ if (qlge_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear all-multi mode.\n");
@@ -4189,22 +4184,22 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
if (!netdev_mc_empty(ndev)) {
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
- MAC_ADDR_TYPE_MULTI_MAC, i)) {
+ if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
+ MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
i++;
}
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (ql_set_routing_reg
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (qlge_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set multicast match mode.\n");
@@ -4213,12 +4208,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
}
exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct sockaddr *addr = p;
int status;
@@ -4228,37 +4223,37 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
/* Update local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC,
- qdev->func * MAX_CQ);
+ status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
}
-static void ql_asic_reset_work(struct work_struct *work)
+static void qlge_asic_reset_work(struct work_struct *work)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, asic_reset_work.work);
+ struct qlge_adapter *qdev =
+ container_of(work, struct qlge_adapter, asic_reset_work.work);
int status;
rtnl_lock();
- status = ql_adapter_down(qdev);
+ status = qlge_adapter_down(qdev);
if (status)
goto error;
- status = ql_adapter_up(qdev);
+ status = qlge_adapter_up(qdev);
if (status)
goto error;
@@ -4279,13 +4274,13 @@ error:
}
static const struct nic_operations qla8012_nic_ops = {
- .get_flash = ql_get_8012_flash_params,
- .port_initialize = ql_8012_port_initialize,
+ .get_flash = qlge_get_8012_flash_params,
+ .port_initialize = qlge_8012_port_initialize,
};
static const struct nic_operations qla8000_nic_ops = {
- .get_flash = ql_get_8000_flash_params,
- .port_initialize = ql_8000_port_initialize,
+ .get_flash = qlge_get_8000_flash_params,
+ .port_initialize = qlge_8000_port_initialize,
};
/* Find the pcie function number for the other NIC
@@ -4295,21 +4290,21 @@ static const struct nic_operations qla8000_nic_ops = {
* after a fatal firmware error, or doing a firmware
* coredump.
*/
-static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
{
int status = 0;
u32 temp;
u32 nic_func1, nic_func2;
- status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
+ status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+ &temp);
if (status)
return status;
nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
+ MPI_TEST_NIC_FUNC_MASK);
nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
+ MPI_TEST_NIC_FUNC_MASK);
if (qdev->func == nic_func1)
qdev->alt_func = nic_func2;
@@ -4321,16 +4316,16 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
return status;
}
-static int ql_get_board_info(struct ql_adapter *qdev)
+static int qlge_get_board_info(struct qlge_adapter *qdev)
{
int status;
qdev->func =
- (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+ (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
if (qdev->func > 3)
return -EIO;
- status = ql_get_alt_pcie_func(qdev);
+ status = qlge_get_alt_pcie_func(qdev);
if (status)
return status;
@@ -4348,7 +4343,7 @@ static int ql_get_board_info(struct ql_adapter *qdev)
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
}
- qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+ qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
qdev->device_id = qdev->pdev->device;
if (qdev->device_id == QLGE_DEVICE_ID_8012)
qdev->nic_ops = &qla8012_nic_ops;
@@ -4357,10 +4352,9 @@ static int ql_get_board_info(struct ql_adapter *qdev)
return status;
}
-static void ql_release_all(struct pci_dev *pdev)
+static void qlge_release_all(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = pci_get_drvdata(pdev);
if (qdev->workqueue) {
destroy_workqueue(qdev->workqueue);
@@ -4375,34 +4369,32 @@ static void ql_release_all(struct pci_dev *pdev)
pci_release_regions(pdev);
}
-static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
- int cards_found)
+static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
+ int cards_found)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct net_device *ndev = qdev->ndev;
int err = 0;
- memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
return err;
}
- qdev->ndev = ndev;
qdev->pdev = pdev;
- pci_set_drvdata(pdev, ndev);
+ pci_set_drvdata(pdev, qdev);
/* Set PCIe read request size */
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out1;
+ goto err_disable_pci;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "PCI region request failed.\n");
- return err;
+ goto err_disable_pci;
}
pci_set_master(pdev);
@@ -4418,7 +4410,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out2;
+ goto err_release_pci;
}
/* Set PCIe reset type for EEH to fundamental. */
@@ -4429,7 +4421,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
- goto err_out2;
+ goto err_release_pci;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4438,24 +4430,24 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
- goto err_out2;
+ goto err_iounmap_base;
}
- err = ql_get_board_info(qdev);
+ err = qlge_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
- goto err_out2;
+ goto err_iounmap_doorbell;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
- vmalloc(sizeof(struct ql_mpi_coredump));
+ vmalloc(sizeof(struct qlge_mpi_coredump));
if (!qdev->mpi_coredump) {
err = -ENOMEM;
- goto err_out2;
+ goto err_iounmap_doorbell;
}
if (qlge_force_coredump)
set_bit(QL_FRC_COREDUMP, &qdev->flags);
@@ -4464,7 +4456,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out2;
+ goto err_free_mpi_coredump;
}
/* Keep local copy of current mac address. */
@@ -4487,15 +4479,14 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
ndev->name);
if (!qdev->workqueue) {
err = -ENOMEM;
- goto err_out2;
+ goto err_free_mpi_coredump;
}
- INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
- INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
- INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
- INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
+ INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
+ INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
+ INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
init_completion(&qdev->ide_completion);
mutex_init(&qdev->mpi_mutex);
@@ -4505,10 +4496,18 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
DRV_NAME, DRV_VERSION);
}
return 0;
-err_out2:
- ql_release_all(pdev);
-err_out1:
+
+err_free_mpi_coredump:
+ vfree(qdev->mpi_coredump);
+err_iounmap_doorbell:
+ iounmap(qdev->doorbell_area);
+err_iounmap_base:
+ iounmap(qdev->reg_base);
+err_release_pci:
+ pci_release_regions(pdev);
+err_disable_pci:
pci_disable_device(pdev);
+
return err;
}
@@ -4527,12 +4526,12 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
-static void ql_timer(struct timer_list *t)
+static void qlge_timer(struct timer_list *t)
{
- struct ql_adapter *qdev = from_timer(qdev, t, timer);
+ struct qlge_adapter *qdev = from_timer(qdev, t, timer);
u32 var = 0;
- var = ql_read32(qdev, STS);
+ var = qlge_read32(qdev, STS);
if (pci_channel_offline(qdev->pdev)) {
netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
return;
@@ -4541,36 +4540,47 @@ static void ql_timer(struct timer_list *t)
mod_timer(&qdev->timer, jiffies + (5 * HZ));
}
+static const struct devlink_ops qlge_devlink_ops;
+
static int qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
+ struct qlge_netdev_priv *ndev_priv;
+ struct qlge_adapter *qdev = NULL;
struct net_device *ndev = NULL;
- struct ql_adapter *qdev = NULL;
+ struct devlink *devlink;
static int cards_found;
int err = 0;
- ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+ devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
+ if (!devlink)
+ return -ENOMEM;
+
+ qdev = devlink_priv(devlink);
+
+ ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
min(MAX_CPUS,
netif_get_num_default_rss_queues()));
if (!ndev)
- return -ENOMEM;
+ goto devlink_free;
- err = ql_init_device(pdev, ndev, cards_found);
- if (err < 0) {
- free_netdev(ndev);
- return err;
- }
+ ndev_priv = netdev_priv(ndev);
+ ndev_priv->qdev = qdev;
+ ndev_priv->ndev = ndev;
+ qdev->ndev = ndev;
+ err = qlge_init_device(pdev, qdev, cards_found);
+ if (err < 0)
+ goto netdev_free;
- qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXCSUM;
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
@@ -4601,51 +4611,67 @@ static int qlge_probe(struct pci_dev *pdev,
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
- ql_release_all(pdev);
+ qlge_release_all(pdev);
pci_disable_device(pdev);
- free_netdev(ndev);
- return err;
+ goto netdev_free;
}
+
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto netdev_free;
+
+ qlge_health_create_reporters(qdev);
/* Start up the timer to trigger EEH if
* the bus goes dead
*/
- timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
+ timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
mod_timer(&qdev->timer, jiffies + (5 * HZ));
- ql_link_off(qdev);
- ql_display_dev_info(ndev);
+ qlge_link_off(qdev);
+ qlge_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
return 0;
+
+netdev_free:
+ free_netdev(ndev);
+devlink_free:
+ devlink_free(devlink);
+
+ return err;
}
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
{
return qlge_send(skb, ndev);
}
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
{
- return ql_clean_inbound_rx_ring(rx_ring, budget);
+ return qlge_clean_inbound_rx_ring(rx_ring, budget);
}
static void qlge_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = pci_get_drvdata(pdev);
+ struct net_device *ndev = qdev->ndev;
+ struct devlink *devlink = priv_to_devlink(qdev);
del_timer_sync(&qdev->timer);
- ql_cancel_all_work_sync(qdev);
+ qlge_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
- ql_release_all(pdev);
+ qlge_release_all(pdev);
pci_disable_device(pdev);
+ devlink_health_reporter_destroy(qdev->reporter);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
free_netdev(ndev);
}
/* Clean up resources without touching hardware. */
-static void ql_eeh_close(struct net_device *ndev)
+static void qlge_eeh_close(struct net_device *ndev)
{
+ struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int i;
- struct ql_adapter *qdev = netdev_priv(ndev);
if (netif_carrier_ok(ndev)) {
netif_carrier_off(ndev);
@@ -4653,15 +4679,15 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- ql_cancel_all_work_sync(qdev);
+ qlge_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_tx_ring_clean(qdev);
- ql_free_rx_buffers(qdev);
- ql_release_adapter_resources(qdev);
+ qlge_tx_ring_clean(qdev);
+ qlge_free_rx_buffers(qdev);
+ qlge_release_adapter_resources(qdev);
}
/*
@@ -4671,8 +4697,8 @@ static void ql_eeh_close(struct net_device *ndev)
static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = pci_get_drvdata(pdev);
+ struct net_device *ndev = qdev->ndev;
switch (state) {
case pci_channel_io_normal:
@@ -4681,14 +4707,14 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev))
- ql_eeh_close(ndev);
+ qlge_eeh_close(ndev);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
del_timer_sync(&qdev->timer);
- ql_eeh_close(ndev);
+ qlge_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4705,8 +4731,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = pci_get_drvdata(pdev);
pdev->error_state = pci_channel_io_normal;
@@ -4718,7 +4743,7 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
- if (ql_adapter_reset(qdev)) {
+ if (qlge_adapter_reset(qdev)) {
netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
@@ -4729,8 +4754,8 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
static void qlge_io_resume(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct qlge_adapter *qdev = pci_get_drvdata(pdev);
+ struct net_device *ndev = qdev->ndev;
int err = 0;
if (netif_running(ndev)) {
@@ -4756,36 +4781,43 @@ static const struct pci_error_handlers qlge_err_handler = {
static int __maybe_unused qlge_suspend(struct device *dev_d)
{
- struct net_device *ndev = dev_get_drvdata(dev_d);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct qlge_adapter *qdev;
+ struct net_device *ndev;
int err;
+ qdev = pci_get_drvdata(pdev);
+ ndev = qdev->ndev;
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
- err = ql_adapter_down(qdev);
+ err = qlge_adapter_down(qdev);
if (!err)
return err;
}
- ql_wol(qdev);
+ qlge_wol(qdev);
return 0;
}
static int __maybe_unused qlge_resume(struct device *dev_d)
{
- struct net_device *ndev = dev_get_drvdata(dev_d);
- struct ql_adapter *qdev = netdev_priv(ndev);
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct qlge_adapter *qdev;
+ struct net_device *ndev;
int err;
- pci_set_master(to_pci_dev(dev_d));
+ qdev = pci_get_drvdata(pdev);
+ ndev = qdev->ndev;
+
+ pci_set_master(pdev);
device_wakeup_disable(dev_d);
if (netif_running(ndev)) {
- err = ql_adapter_up(qdev);
+ err = qlge_adapter_up(qdev);
if (err)
return err;
}
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 2ff3db2661a9..2630ebf50341 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -1,28 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
#include "qlge.h"
-int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+int qlge_unpause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
/* Un-pause the RISC */
- tmp = ql_read32(qdev, CSR);
+ tmp = qlge_read32(qdev, CSR);
if (!(tmp & CSR_RP))
return -EIO;
- ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
return 0;
}
-int ql_pause_mpi_risc(struct ql_adapter *qdev)
+int qlge_pause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Pause the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
for (count = UDELAY_COUNT; count; count--) {
- tmp = ql_read32(qdev, CSR);
+ tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RP)
break;
mdelay(UDELAY_DELAY);
@@ -30,17 +30,17 @@ int ql_pause_mpi_risc(struct ql_adapter *qdev)
return (count == 0) ? -ETIMEDOUT : 0;
}
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Reset the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ qlge_write32(qdev, CSR, CSR_CMD_SET_RST);
for (count = UDELAY_COUNT; count; count--) {
- tmp = ql_read32(qdev, CSR);
+ tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RR) {
- ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ qlge_write32(qdev, CSR, CSR_CMD_CLR_RST);
break;
}
mdelay(UDELAY_DELAY);
@@ -48,47 +48,47 @@ int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
return (count == 0) ? -ETIMEDOUT : 0;
}
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
- ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+ qlge_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* get the data */
- *data = ql_read32(qdev, PROC_DATA);
+ *data = qlge_read32(qdev, PROC_DATA);
exit:
return status;
}
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
{
int status = 0;
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* write the data to the data reg */
- ql_write32(qdev, PROC_DATA, data);
+ qlge_write32(qdev, PROC_DATA, data);
/* trigger the write */
- ql_write32(qdev, PROC_ADDR, reg);
+ qlge_write32(qdev, PROC_ADDR, reg);
/* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
exit:
return status;
}
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev)
{
- return ql_write_mpi_reg(qdev, 0x00001010, 1);
+ return qlge_write_mpi_reg(qdev, 0x00001010, 1);
}
/* Determine if we are in charge of the firmware. If
@@ -96,7 +96,7 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
* we are the higher function and the lower function
* is not enabled.
*/
-int ql_own_firmware(struct ql_adapter *qdev)
+int qlge_own_firmware(struct qlge_adapter *qdev)
{
u32 temp;
@@ -112,43 +112,43 @@ int ql_own_firmware(struct ql_adapter *qdev)
* enabled, then we are responsible for
* core dump and firmware reset after an error.
*/
- temp = ql_read32(qdev, STS);
+ temp = qlge_read32(qdev, STS);
if (!(temp & (1 << (8 + qdev->alt_func))))
return 1;
return 0;
}
-static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_get_mb_sts(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return -EBUSY;
for (i = 0; i < mbcp->out_count; i++) {
status =
- ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
+ qlge_read_mpi_reg(qdev, qdev->mailbox_out + i,
+ &mbcp->mbox_out[i]);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
/* Wait for a single mailbox command to complete.
* Returns zero on success.
*/
-static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+static int qlge_wait_mbx_cmd_cmplt(struct qlge_adapter *qdev)
{
int count;
u32 value;
for (count = 100; count; count--) {
- value = ql_read32(qdev, STS);
+ value = qlge_read32(qdev, STS);
if (value & STS_PI)
return 0;
mdelay(UDELAY_DELAY); /* 100ms */
@@ -159,7 +159,7 @@ static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
/* Execute a single mailbox command.
* Caller must hold PROC_ADDR semaphore.
*/
-static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_exec_mb_cmd(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -167,10 +167,10 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
* Make sure there's nothing pending.
* This shouldn't happen.
*/
- if (ql_read32(qdev, CSR) & CSR_HRI)
+ if (qlge_read32(qdev, CSR) & CSR_HRI)
return -EIO;
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return status;
@@ -178,17 +178,17 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
* Fill the outbound mailboxes.
*/
for (i = 0; i < mbcp->in_count; i++) {
- status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
+ status = qlge_write_mpi_reg(qdev, qdev->mailbox_in + i,
+ mbcp->mbox_in[i]);
if (status)
goto end;
}
/*
* Wake up the MPI firmware.
*/
- ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+ qlge_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
end:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+ qlge_sem_unlock(qdev, SEM_PROC_REG_MASK);
return status;
}
@@ -199,7 +199,7 @@ end:
* to handler processing this since a mailbox command
* will need to be sent to ACK the request.
*/
-static int ql_idc_req_aen(struct ql_adapter *qdev)
+static int qlge_idc_req_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
@@ -209,17 +209,17 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
* handle the request.
*/
mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
* we don't get another interrupt
* when we leave mpi_worker.
*/
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
}
return status;
@@ -228,17 +228,17 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
/* Process an inter-device event completion.
* If good, signal the caller's completion.
*/
-static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+static int qlge_idc_cmplt_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting RISC!\n");
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
} else {
/* Wake up the sleeping mpi_idc_work thread that is
* waiting for this event.
@@ -248,13 +248,13 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
return status;
}
-static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static void qlge_link_up(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"%s: Could not get mailbox status.\n", __func__);
@@ -268,7 +268,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* then set up the CAM and frame routing.
*/
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
- status = ql_cam_route_initialize(qdev);
+ status = qlge_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
@@ -288,34 +288,34 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* we don't get another interrupt
* when we leave mpi_worker dpc.
*/
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 0);
}
- ql_link_on(qdev);
+ qlge_link_on(qdev);
}
-static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static void qlge_link_down(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 3;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
- ql_link_off(qdev);
+ qlge_link_off(qdev);
}
-static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_sfp_in(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 5;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
@@ -324,13 +324,13 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
-static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_sfp_out(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 1;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
@@ -339,13 +339,13 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
-static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_aen_lost(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 6;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
} else {
@@ -360,20 +360,20 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
-static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static void qlge_init_fw_done(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
- status = ql_cam_route_initialize(qdev);
+ status = qlge_cam_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
@@ -387,26 +387,26 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
* It also gets called when a mailbox command is polling for
* it's completion.
*/
-static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_mpi_handler(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
int orig_count = mbcp->out_count;
/* Just get mailbox zero for now. */
mbcp->out_count = 1;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
+ qlge_queue_asic_error(qdev);
goto end;
}
switch (mbcp->mbox_out[0]) {
- /* This case is only active when we arrive here
- * as a result of issuing a mailbox command to
- * the firmware.
- */
+ /* This case is only active when we arrive here
+ * as a result of issuing a mailbox command to
+ * the firmware.
+ */
case MB_CMD_STS_INTRMDT:
case MB_CMD_STS_GOOD:
case MB_CMD_STS_INVLD_CMD:
@@ -421,34 +421,34 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
* command completion.
*/
mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
return status;
- /* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode.
- */
+ /* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode.
+ */
case AEN_IDC_REQ:
- status = ql_idc_req_aen(qdev);
+ status = qlge_idc_req_aen(qdev);
break;
- /* Process and inbound IDC event.
- * This will happen when we're trying to
- * change tx/rx max frame size, change pause
- * parameters or loopback mode.
- */
+ /* Process and inbound IDC event.
+ * This will happen when we're trying to
+ * change tx/rx max frame size, change pause
+ * parameters or loopback mode.
+ */
case AEN_IDC_CMPLT:
case AEN_IDC_EXT:
- status = ql_idc_cmplt_aen(qdev);
+ status = qlge_idc_cmplt_aen(qdev);
break;
case AEN_LINK_UP:
- ql_link_up(qdev, mbcp);
+ qlge_link_up(qdev, mbcp);
break;
case AEN_LINK_DOWN:
- ql_link_down(qdev, mbcp);
+ qlge_link_down(qdev, mbcp);
break;
case AEN_FW_INIT_DONE:
@@ -457,48 +457,48 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
return status;
}
- ql_init_fw_done(qdev, mbcp);
+ qlge_init_fw_done(qdev, mbcp);
break;
case AEN_AEN_SFP_IN:
- ql_sfp_in(qdev, mbcp);
+ qlge_sfp_in(qdev, mbcp);
break;
case AEN_AEN_SFP_OUT:
- ql_sfp_out(qdev, mbcp);
+ qlge_sfp_out(qdev, mbcp);
break;
- /* This event can arrive at boot time or after an
- * MPI reset if the firmware failed to initialize.
- */
+ /* This event can arrive at boot time or after an
+ * MPI reset if the firmware failed to initialize.
+ */
case AEN_FW_INIT_FAIL:
/* If we're in process on executing the firmware,
* then convert the status to normal mailbox status.
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
+ status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
netif_err(qdev, drv, qdev->ndev,
"Firmware initialization failed.\n");
status = -EIO;
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
netif_err(qdev, drv, qdev->ndev, "System Error.\n");
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
status = -EIO;
break;
case AEN_AEN_LOST:
- ql_aen_lost(qdev, mbcp);
+ qlge_aen_lost(qdev, mbcp);
break;
case AEN_DCBX_CHG:
@@ -510,7 +510,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Clear the MPI firmware status. */
}
end:
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
/* Restore the original mailbox count to
* what the caller asked for. This can get
* changed when a mailbox command is waiting
@@ -526,7 +526,7 @@ end:
* element in the array contains the value for it's
* respective mailbox register.
*/
-static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+static int qlge_mailbox_command(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
unsigned long count;
@@ -534,10 +534,10 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
/* Load the mailbox registers and wake up MPI RISC. */
- status = ql_exec_mb_cmd(qdev, mbcp);
+ status = qlge_exec_mb_cmd(qdev, mbcp);
if (status)
goto end;
@@ -556,7 +556,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
count = jiffies + HZ * MAILBOX_TIMEOUT;
do {
/* Wait for the interrupt to come in. */
- status = ql_wait_mbx_cmd_cmplt(qdev);
+ status = qlge_wait_mbx_cmd_cmplt(qdev);
if (status)
continue;
@@ -565,7 +565,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
* will be spawned. If it's our completion
* we will catch it below.
*/
- status = ql_mpi_handler(qdev, mbcp);
+ status = qlge_mpi_handler(qdev, mbcp);
if (status)
goto end;
@@ -574,9 +574,9 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
* completion then get out.
*/
if (((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_GOOD) ||
- ((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_INTRMDT))
+ MB_CMD_STS_GOOD) ||
+ ((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_INTRMDT))
goto done;
} while (time_before(jiffies, count));
@@ -590,17 +590,17 @@ done:
/* Now we can clear the interrupt condition
* and look at our status.
*/
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
if (((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_GOOD) &&
- ((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_INTRMDT)) {
+ MB_CMD_STS_GOOD) &&
+ ((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_INTRMDT)) {
status = -EIO;
}
end:
/* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
return status;
}
@@ -609,7 +609,7 @@ end:
* driver banner and for ethtool info.
* Returns zero on success.
*/
-int ql_mb_about_fw(struct ql_adapter *qdev)
+int qlge_mb_about_fw(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -622,7 +622,7 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -641,7 +641,7 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
/* Get functional state for MPI firmware.
* Returns zero on success.
*/
-int ql_mb_get_fw_state(struct ql_adapter *qdev)
+int qlge_mb_get_fw_state(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -654,7 +654,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -680,7 +680,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
-static int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int qlge_mb_idc_ack(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -697,7 +697,7 @@ static int ql_mb_idc_ack(struct ql_adapter *qdev)
mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -712,7 +712,7 @@ static int ql_mb_idc_ack(struct ql_adapter *qdev)
* for the current port.
* Most likely will block.
*/
-int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+int qlge_mb_set_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -727,7 +727,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[1] = qdev->link_config;
mbcp->mbox_in[2] = qdev->max_frame_size;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -742,8 +742,8 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
-static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
+static int qlge_mb_dump_ram(struct qlge_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
{
int status = 0;
struct mbox_params mbc;
@@ -764,7 +764,7 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
mbcp->mbox_in[7] = LSW(MSD(req_dma));
mbcp->mbox_in[8] = MSW(addr);
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -776,8 +776,8 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
}
/* Issue a mailbox command to dump RISC RAM. */
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
+int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
{
int status;
char *my_buf;
@@ -789,7 +789,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
if (!my_buf)
return -EIO;
- status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ status = qlge_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
if (!status)
memcpy(buf, my_buf, word_count * sizeof(u32));
@@ -802,7 +802,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
* for the current port.
* Most likely will block.
*/
-int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+int qlge_mb_get_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -815,7 +815,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -832,7 +832,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
}
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -846,7 +846,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
mbcp->mbox_in[1] = wol;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -857,7 +857,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
}
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -888,7 +888,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
mbcp->mbox_in[7] = 0;
}
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -906,7 +906,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
* The firmware will complete the request if the other
* function doesn't respond.
*/
-static int ql_idc_wait(struct ql_adapter *qdev)
+static int qlge_idc_wait(struct qlge_adapter *qdev)
{
int status = -ETIMEDOUT;
struct mbox_params *mbcp = &qdev->idc_mbc;
@@ -947,7 +947,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
return status;
}
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -961,7 +961,7 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
mbcp->mbox_in[1] = led_config;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -974,7 +974,7 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
}
-int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+int qlge_mb_get_led_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -987,7 +987,7 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -1001,7 +1001,7 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
}
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
+int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -1015,7 +1015,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
mbcp->mbox_in[1] = control;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -1038,7 +1038,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
}
/* Returns a negative error code or the mailbox command status. */
-static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
+static int qlge_mb_get_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 *control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -1052,7 +1052,7 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
- status = ql_mailbox_command(qdev, mbcp);
+ status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -1073,15 +1073,15 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
return status;
}
-int ql_wait_fifo_empty(struct ql_adapter *qdev)
+int qlge_wait_fifo_empty(struct qlge_adapter *qdev)
{
int count;
u32 mgmnt_fifo_empty;
u32 nic_fifo_empty;
for (count = 6; count; count--) {
- nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
- ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
+ nic_fifo_empty = qlge_read32(qdev, STS) & STS_NFE;
+ qlge_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
if (nic_fifo_empty && mgmnt_fifo_empty)
return 0;
@@ -1093,14 +1093,14 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
/* API called in work thread context to set new TX/RX
* maximum frame size values to match MTU.
*/
-static int ql_set_port_cfg(struct ql_adapter *qdev)
+static int qlge_set_port_cfg(struct qlge_adapter *qdev)
{
int status;
- status = ql_mb_set_port_cfg(qdev);
+ status = qlge_mb_set_port_cfg(qdev);
if (status)
return status;
- status = ql_idc_wait(qdev);
+ status = qlge_idc_wait(qdev);
return status;
}
@@ -1112,13 +1112,13 @@ static int ql_set_port_cfg(struct ql_adapter *qdev)
* from the firmware and, if necessary, changes them to match
* the MTU setting.
*/
-void ql_mpi_port_cfg_work(struct work_struct *work)
+void qlge_mpi_port_cfg_work(struct work_struct *work)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
+ struct qlge_adapter *qdev =
+ container_of(work, struct qlge_adapter, mpi_port_cfg_work.work);
int status;
- status = ql_mb_get_port_cfg(qdev);
+ status = qlge_mb_get_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
@@ -1131,7 +1131,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
- status = ql_set_port_cfg(qdev);
+ status = qlge_set_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to set port config data.\n");
@@ -1141,7 +1141,7 @@ end:
clear_bit(QL_PORT_CFG, &qdev->flags);
return;
err:
- ql_queue_fw_error(qdev);
+ qlge_queue_fw_error(qdev);
goto end;
}
@@ -1151,10 +1151,10 @@ err:
* has been made and then send a mailbox command ACKing
* the change request.
*/
-void ql_mpi_idc_work(struct work_struct *work)
+void qlge_mpi_idc_work(struct work_struct *work)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_idc_work.work);
+ struct qlge_adapter *qdev =
+ container_of(work, struct qlge_adapter, mpi_idc_work.work);
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
u32 aen;
@@ -1170,7 +1170,7 @@ void ql_mpi_idc_work(struct work_struct *work)
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
- ql_link_off(qdev);
+ qlge_link_off(qdev);
fallthrough;
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
@@ -1180,7 +1180,7 @@ void ql_mpi_idc_work(struct work_struct *work)
set_bit(QL_CAM_RT_SET, &qdev->flags);
/* Do ACK if required */
if (timeout) {
- status = ql_mb_idc_ack(qdev);
+ status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
@@ -1191,18 +1191,18 @@ void ql_mpi_idc_work(struct work_struct *work)
}
break;
- /* These sub-commands issued by another (FCoE)
- * function are requesting to do an operation
- * on the shared resource (MPI environment).
- * We currently don't issue these so we just
- * ACK the request.
- */
+ /* These sub-commands issued by another (FCoE)
+ * function are requesting to do an operation
+ * on the shared resource (MPI environment).
+ * We currently don't issue these so we just
+ * ACK the request.
+ */
case MB_CMD_IOP_RESTART_MPI:
case MB_CMD_IOP_PREP_LINK_DOWN:
/* Drop the link, reload the routing
* table when link comes up.
*/
- ql_link_off(qdev);
+ qlge_link_off(qdev);
set_bit(QL_CAM_RT_SET, &qdev->flags);
fallthrough;
case MB_CMD_IOP_DVR_START:
@@ -1213,7 +1213,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_IOP_NONE: /* an IDC without params */
/* Do ACK if required */
if (timeout) {
- status = ql_mb_idc_ack(qdev);
+ status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
@@ -1226,54 +1226,48 @@ void ql_mpi_idc_work(struct work_struct *work)
}
}
-void ql_mpi_work(struct work_struct *work)
+void qlge_mpi_work(struct work_struct *work)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_work.work);
+ struct qlge_adapter *qdev =
+ container_of(work, struct qlge_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int err = 0;
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- while (ql_read32(qdev, STS) & STS_PI) {
+ while (qlge_read32(qdev, STS) & STS_PI) {
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->out_count = 1;
/* Don't continue if an async event
* did not complete properly.
*/
- err = ql_mpi_handler(qdev, mbcp);
+ err = qlge_mpi_handler(qdev, mbcp);
if (err)
break;
}
/* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
}
-void ql_mpi_reset_work(struct work_struct *work)
+void qlge_mpi_reset_work(struct work_struct *work)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_reset_work.work);
+ struct qlge_adapter *qdev =
+ container_of(work, struct qlge_adapter, mpi_reset_work.work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
/* If we're not the dominant NIC function,
* then there is nothing to do.
*/
- if (!ql_own_firmware(qdev)) {
+ if (!qlge_own_firmware(qdev)) {
netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
- if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
- netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
- qdev->core_is_dumped = 1;
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_core_to_log, 5 * HZ);
- }
- ql_soft_reset_mpi_risc(qdev);
+ qlge_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 8794907a39f4..ebd9b96a8211 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -779,7 +779,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
/* setting auth algo number */
val16 = (u16)psta->authalg;
- if (status != _STATS_SUCCESSFUL_)
+ if (status != WLAN_STATUS_SUCCESS)
val16 = 0;
if (val16) {
@@ -2675,13 +2675,13 @@ static unsigned int OnAuth(struct adapter *padapter,
DBG_88E("auth rejected due to bad alg [alg=%d, auth_mib=%d] %02X%02X%02X%02X%02X%02X\n",
algorithm, auth_mode, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
- status = _STATS_NO_SUPP_ALG_;
+ status = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
goto auth_fail;
}
if (!rtw_access_ctrl(padapter, sa)) {
- status = _STATS_UNABLE_HANDLE_STA_;
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
@@ -2692,7 +2692,7 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat = rtw_alloc_stainfo(pstapriv, sa);
if (!pstat) {
DBG_88E(" Exceed the upper limit of supported clients...\n");
- status = _STATS_UNABLE_HANDLE_STA_;
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
@@ -2724,7 +2724,7 @@ static unsigned int OnAuth(struct adapter *padapter,
if ((pstat->auth_seq + 1) != seq) {
DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
seq, pstat->auth_seq + 1);
- status = _STATS_OUT_OF_AUTH_SEQ_;
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
@@ -2737,7 +2737,7 @@ static unsigned int OnAuth(struct adapter *padapter,
} else {
DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
seq, pstat->auth_seq + 1);
- status = _STATS_OUT_OF_AUTH_SEQ_;
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
} else { /* shared system or auto authentication */
@@ -2757,7 +2757,7 @@ static unsigned int OnAuth(struct adapter *padapter,
if (!p || ie_len <= 0) {
DBG_88E("auth rejected because challenge failure!(1)\n");
- status = _STATS_CHALLENGE_FAIL_;
+ status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
@@ -2768,13 +2768,13 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->expire_to = pstapriv->assoc_to;
} else {
DBG_88E("auth rejected because challenge failure!\n");
- status = _STATS_CHALLENGE_FAIL_;
+ status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
} else {
DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
seq, pstat->auth_seq + 1);
- status = _STATS_OUT_OF_AUTH_SEQ_;
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
}
@@ -2782,7 +2782,7 @@ static unsigned int OnAuth(struct adapter *padapter,
/* Now, we are going to issue_auth... */
pstat->auth_seq = seq + 1;
- issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
+ issue_auth(padapter, pstat, (unsigned short)(WLAN_STATUS_SUCCESS));
if (pstat->state & WIFI_FW_AUTH_SUCCESS)
pstat->auth_seq = 0;
@@ -2892,7 +2892,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
int i, wpa_ie_len, left;
unsigned char supportRate[16];
int supportRateNum;
- unsigned short status = _STATS_SUCCESSFUL_;
+ unsigned short status = WLAN_STATUS_SUCCESS;
unsigned short frame_type, ie_offset = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -2953,7 +2953,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
!elems.ssid) {
DBG_88E("STA %pM sent invalid association request\n",
pstat->hwaddr);
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto OnAssocReqFail;
}
@@ -2964,18 +2964,18 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (!p || ie_len == 0) {
/* broadcast ssid, however it is not allowed in assocreq */
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto OnAssocReqFail;
} else {
/* check if ssid match */
if (memcmp((void *)(p + 2), cur->ssid.ssid, cur->ssid.ssid_length))
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
if (ie_len != cur->ssid.ssid_length)
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
}
- if (status != _STATS_SUCCESSFUL_)
+ if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
/* check if the supported rate is ok */
@@ -2986,7 +2986,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
/* supportRateNum = AP_BSSRATE_LEN; */
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto OnAssocReqFail;
} else {
memcpy(supportRate, p + 2, ie_len);
@@ -3066,7 +3066,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie_len = 0;
}
- if (status != _STATS_SUCCESSFUL_)
+ if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
@@ -3097,7 +3097,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (!selected_registrar) {
DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
- status = _STATS_UNABLE_HANDLE_STA_;
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto OnAssocReqFail;
}
@@ -3198,7 +3198,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
pstat->flags &= ~WLAN_STA_HT;
}
if ((!pmlmepriv->htpriv.ht_option) && (pstat->flags & WLAN_STA_HT)) {
- status = _STATS_FAILURE_;
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto OnAssocReqFail;
}
@@ -3225,7 +3225,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
else
pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
- if (status != _STATS_SUCCESSFUL_)
+ if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
/* TODO: identify_proprietary_vendor_ie(); */
@@ -3276,7 +3276,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
+ if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == WLAN_STATUS_SUCCESS)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 1895f81e09b5..1b9006879a11 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -74,20 +74,6 @@ enum WIFI_FRAME_SUBTYPE {
WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
};
-enum WIFI_STATUS_CODE {
- _STATS_SUCCESSFUL_ = 0,
- _STATS_FAILURE_ = 1,
- _STATS_CAP_FAIL_ = 10,
- _STATS_NO_ASOC_ = 11,
- _STATS_OTHER_ = 12,
- _STATS_NO_SUPP_ALG_ = 13,
- _STATS_OUT_OF_AUTH_SEQ_ = 14,
- _STATS_CHALLENGE_FAIL_ = 15,
- _STATS_AUTH_TIMEOUT_ = 16,
- _STATS_UNABLE_HANDLE_STA_ = 17,
- _STATS_RATE_FAIL_ = 18,
-};
-
enum WIFI_REG_DOMAIN {
DOMAIN_FCC = 1,
DOMAIN_IC = 2,
@@ -102,73 +88,64 @@ enum WIFI_REG_DOMAIN {
DOMAIN_MAX
};
-#define _TO_DS_ BIT(8)
-#define _FROM_DS_ BIT(9)
-#define _MORE_FRAG_ BIT(10)
-#define _RETRY_ BIT(11)
-#define _PWRMGT_ BIT(12)
-#define _MORE_DATA_ BIT(13)
-#define _PRIVACY_ BIT(14)
-#define _ORDER_ BIT(15)
-
#define SetToDs(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_TODS)
-#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_TO_DS_)) != 0)
+#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0)
#define ClearToDs(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_TO_DS_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_TODS))
#define SetFrDs(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_FROM_DS_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_FROMDS)
-#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
+#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0)
#define ClearFrDs(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_FROM_DS_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_FROMDS))
#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
#define SetMFrag(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)
-#define GetMFrag(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_FRAG_)) != 0)
+#define GetMFrag(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0)
#define ClearMFrag(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_MOREFRAGS))
#define SetRetry(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_RETRY_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_RETRY)
-#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_RETRY_)) != 0)
+#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0)
#define ClearRetry(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_RETRY_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_RETRY))
#define SetPwrMgt(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_PWRMGT_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_PM)
-#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_PWRMGT_)) != 0)
+#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_PM)) != 0)
#define ClearPwrMgt(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_PWRMGT_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_PM))
#define SetMData(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_DATA_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_MOREDATA)
-#define GetMData(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_DATA_)) != 0)
+#define GetMData(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0)
#define ClearMData(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_))
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_MOREDATA))
#define SetPrivacy(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_)
+ *(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_PROTECTED)
#define GetPrivacy(pbuf) \
- (((*(__le16 *)(pbuf)) & cpu_to_le16(_PRIVACY_)) != 0)
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0)
#define GetOrder(pbuf) \
- (((*(__le16 *)(pbuf)) & cpu_to_le16(_ORDER_)) != 0)
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0)
#define GetFrameType(pbuf) \
(le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2)))
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 6f42f13a71fa..bf22f130d3e1 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1865,7 +1865,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
goto exit;
}
- strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strscpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 43ebd11b53fe..efad43d8e465 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index c22ddeb9a56b..b0efa2eb705e 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -205,5 +205,5 @@ drop_packet:
("%s: drop, tx_drop=%d\n", __func__, (u32)pxmitpriv->tx_drop));
exit:
- return 0;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 03fcc23516fd..963a2ffbc1fb 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -3,6 +3,7 @@ config RTLLIB
tristate "Support for rtllib wireless devices"
depends on WLAN && m
select LIB80211
+ select CRC32
help
If you have a wireless card that uses rtllib, say
Y. Currently the only card is the rtl8192e.
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 9f869fb3eaa8..ff843d7ec606 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -129,9 +129,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
RegRCR = rtl92e_readl(dev, RCR);
priv->ReceiveConfig = RegRCR;
- if (Type == true)
+ if (Type)
RegRCR |= (RCR_CBSSID);
- else if (Type == false)
+ else
RegRCR &= (~RCR_CBSSID);
rtl92e_writel(dev, RCR, RegRCR);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 663675efcfe4..9078fadd65f9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1389,7 +1389,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
rtl92e_dm_watchdog(dev);
- if (rtllib_act_scanning(priv->rtllib, false) == false) {
+ if (!rtllib_act_scanning(priv->rtllib, false)) {
if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
RTLLIB_NOLINK) &&
(ieee->eRFPowerState == eRfOn) && !ieee->is_set_key &&
@@ -2471,7 +2471,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
priv->ops = ops;
- if (rtl92e_check_adapter(pdev, dev) == false)
+ if (!rtl92e_check_adapter(pdev, dev))
goto err_unmap;
dev->irq = pdev->irq;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 462835684e8b..e340be3ebb97 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1765,7 +1765,7 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
- if (priv->rtllib->bCTSToSelfEnable != true) {
+ if (!priv->rtllib->bCTSToSelfEnable) {
pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
@@ -2447,7 +2447,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
unsigned int txhipower_threshold = 0;
unsigned int txlowpower_threshold = 0;
- if (priv->rtllib->bdynamic_txpower_enable != true) {
+ if (!priv->rtllib->bdynamic_txpower_enable) {
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
return;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 6ae7a67e767f..f4f7b74c8cd1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -18,9 +18,9 @@ static void _rtl92e_ethtool_get_drvinfo(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
}
static u32 _rtl92e_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 9475f8c6edf7..c5e89eb40342 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -290,7 +290,7 @@ void rtl92e_leisure_ps_leave(struct net_device *dev)
if (priv->rtllib->SetFwCmdHandler)
priv->rtllib->SetFwCmdHandler(dev,
FW_CMD_LPS_LEAVE);
- }
+ }
}
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index d31b5e1c8df4..66c135321da4 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -924,7 +924,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
- if ((ieee->pHTInfo->bCurRxReorderEnable == false) ||
+ if (!ieee->pHTInfo->bCurRxReorderEnable ||
!ieee->current_network.qos_data.active ||
!IsDataFrame(skb->data) ||
IsLegacyDataFrame(skb->data)) {
@@ -1442,8 +1442,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
}
/* Indicate packets to upper layer or Rx Reorder */
- if (ieee->pHTInfo->bCurRxReorderEnable == false || pTS == NULL ||
- bToOtherSTA)
+ if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL || bToOtherSTA)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 2c752ba5a802..2d3be91b113d 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1352,9 +1352,8 @@ rtllib_association_req(struct rtllib_network *beacon,
rtllib_WMM_Info(ieee, &tag);
}
- if (wps_ie_len && ieee->wps_ie) {
+ if (wps_ie_len && ieee->wps_ie)
skb_put_data(skb, ieee->wps_ie, wps_ie_len);
- }
if (turbo_info_len) {
tag = skb_put(skb, turbo_info_len);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index e0d79daca24a..8add17752eed 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -297,7 +297,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
return;
}
- if (pTxTs->TxAdmittedBARecord.bValid == false) {
+ if (!pTxTs->TxAdmittedBARecord.bValid) {
if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
KEY_TYPE_NA)) {
;
@@ -307,7 +307,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
TsStartAddBaProcess(ieee, pTxTs);
}
goto FORCED_AGG_SETTING;
- } else if (pTxTs->bUsingBa == false) {
+ } else if (!pTxTs->bUsingBa) {
if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
(pTxTs->TxCurSeq+1)%4096))
pTxTs->bUsingBa = true;
@@ -365,9 +365,9 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
return;
}
- if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
+ else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index aa26b2fd2774..2e486ccb6432 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -341,8 +341,6 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
goto done;
}
-
-
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index ef883d462d3d..f3b112a058ca 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -5,6 +5,7 @@ config RTL8192U
depends on m
select WIRELESS_EXT
select WEXT_PRIV
+ select CRC32
select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index f434a26cdb2f..afa92ddfa005 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -484,7 +484,7 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- strlcpy(wrqu->name, "802.11", IFNAMSIZ);
+ strscpy(wrqu->name, "802.11", IFNAMSIZ);
if (ieee->modulation & IEEE80211_CCK_MODULATION) {
strlcat(wrqu->name, "b", IFNAMSIZ);
if (ieee->modulation & IEEE80211_OFDM_MODULATION)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 63a561ab4a76..bd8914645e95 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -327,20 +327,20 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
}
FORCED_AGG_SETTING:
switch (pHTInfo->ForcedAMPDUMode) {
- case HT_AGG_AUTO:
- break;
-
- case HT_AGG_FORCE_ENABLE:
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
- tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
- break;
-
- case HT_AGG_FORCE_DISABLE:
- tcb_desc->bAMPDUEnable = false;
- tcb_desc->ampdu_density = 0;
- tcb_desc->ampdu_factor = 0;
- break;
+ case HT_AGG_AUTO:
+ break;
+
+ case HT_AGG_FORCE_ENABLE:
+ tcb_desc->bAMPDUEnable = true;
+ tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
+ tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
+ break;
+
+ case HT_AGG_FORCE_DISABLE:
+ tcb_desc->bAMPDUEnable = false;
+ tcb_desc->ampdu_density = 0;
+ tcb_desc->ampdu_factor = 0;
+ break;
}
return;
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 63e0f7b1b852..fee3bfb99075 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -58,7 +58,7 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
(enum rf90_radio_path_e)eRFPath,
0x14, bMask12Bits, 0x5ab);
} else {
- RT_TRACE(COMP_ERR, "phy_set_rf8256_bandwidth(): unknown hardware version\n");
+ RT_TRACE(COMP_ERR, "%s(): unknown hardware version\n", __func__);
}
break;
case HT_CHANNEL_WIDTH_20_40:
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 93676af98629..9fc4adc83d77 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1608,6 +1608,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
void *oldaddr, *newaddr;
priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!priv->rx_urb[16])
+ return -ENOMEM;
priv->oldaddr = kmalloc(16, GFP_KERNEL);
if (!priv->oldaddr)
return -ENOMEM;
diff --git a/drivers/staging/rtl8712/rtl871x_debug.h b/drivers/staging/rtl8712/rtl871x_debug.h
index a427547c02ba..57f2a38cb71c 100644
--- a/drivers/staging/rtl8712/rtl871x_debug.h
+++ b/drivers/staging/rtl8712/rtl871x_debug.h
@@ -17,7 +17,6 @@
#include "osdep_service.h"
#include "drv_types.h"
-
#define _drv_emerg_ 1
#define _drv_alert_ 2
#define _drv_crit_ 3
@@ -28,7 +27,6 @@
#define _drv_dump_ 8
#define _drv_debug_ 9
-
#define _module_rtl871x_xmit_c_ BIT(0)
#define _module_xmit_osdep_c_ BIT(1)
#define _module_rtl871x_recv_c_ BIT(2)
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index cbaa7a489748..81de5a9e6b67 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1784,7 +1784,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
return -ENOMEM;
param->cmd = IEEE_CMD_SET_ENCRYPTION;
eth_broadcast_addr(param->sta_addr);
- strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strscpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
param->u.crypt.set_tx = 0;
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 601d4ff607bc..1b32b3510093 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -101,20 +101,6 @@ enum WIFI_REASON_CODE {
_RSON_PMK_NOT_AVAILABLE_ = 24,
};
-enum WIFI_STATUS_CODE {
- _STATS_SUCCESSFUL_ = 0,
- _STATS_FAILURE_ = 1,
- _STATS_CAP_FAIL_ = 10,
- _STATS_NO_ASOC_ = 11,
- _STATS_OTHER_ = 12,
- _STATS_NO_SUPP_ALG_ = 13,
- _STATS_OUT_OF_AUTH_SEQ_ = 14,
- _STATS_CHALLENGE_FAIL_ = 15,
- _STATS_AUTH_TIMEOUT_ = 16,
- _STATS_UNABLE_HANDLE_STA_ = 17,
- _STATS_RATE_FAIL_ = 18,
-};
-
enum WIFI_REG_DOMAIN {
DOMAIN_FCC = 1,
DOMAIN_IC = 2,
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index a83d8f7f611c..a311595deafb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -171,9 +171,8 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
- for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j)
c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
- }
p1 = (u8 *)&crc32_table[i];
p1[0] = crc32_reverseBit(p[3]);
@@ -195,9 +194,8 @@ static __le32 getcrc32(u8 *buf, sint len)
crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
- for (p = buf; len > 0; ++p, --len) {
+ for (p = buf; len > 0; ++p, --len)
crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
- }
return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
}
@@ -320,9 +318,8 @@ static u32 secmicgetuint32(u8 *p)
s32 i;
u32 res = 0;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
res |= ((u32)(*p++)) << (8*i);
- }
return res;
}
@@ -396,9 +393,8 @@ void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
rtw_secmicappendbyte(pmicdata, 0);
rtw_secmicappendbyte(pmicdata, 0);
/* and then zeroes until the length is a multiple of 4 */
- while (pmicdata->nBytesInM != 0) {
+ while (pmicdata->nBytesInM != 0)
rtw_secmicappendbyte(pmicdata, 0);
- }
/* The appendByte function has already computed the result. */
secmicputuint32(dst, pmicdata->L);
secmicputuint32(dst+4, pmicdata->R);
@@ -918,9 +914,8 @@ static void xor_128(u8 *a, u8 *b, u8 *out)
{
sint i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++)
out[i] = a[i] ^ b[i];
- }
}
@@ -928,9 +923,8 @@ static void xor_32(u8 *a, u8 *b, u8 *out)
{
sint i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
out[i] = a[i] ^ b[i];
- }
}
@@ -969,9 +963,8 @@ static void byte_sub(u8 *in, u8 *out)
{
sint i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++)
out[i] = sbox(in[i]);
- }
}
@@ -1259,9 +1252,8 @@ static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
{
sint i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++)
out[i] = ina[i] ^ inb[i];
- }
}
static sint aes_cipher(u8 *key, uint hdrlen,
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index cf23414d7224..22365926a9f8 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -20,16 +20,11 @@
#define MAX_DOZE_WAITING_TIMES_9x 64
/**
-* Function: phy_CalculateBitShift
-*
-* OverView: Get shifted position of the BitMask
-*
-* Input:
-* u32 BitMask,
-*
-* Output: none
-* Return: u32 Return the shift bit bit position of the mask
-*/
+ * phy_CalculateBitShift - Get shifted position of the BitMask.
+ * @BitMask: Bitmask.
+ *
+ * Return: Return the shift bit position of the mask
+ */
static u32 phy_CalculateBitShift(u32 BitMask)
{
u32 i;
@@ -43,19 +38,17 @@ static u32 phy_CalculateBitShift(u32 BitMask)
/**
-* Function: PHY_QueryBBReg
-*
-* OverView: Read "specific bits" from BB register
-*
-* Input:
-* struct adapter * Adapter,
-* u32 RegAddr, The target address to be readback
-* u32 BitMask The target bit position in the target address
-* to be readback
-* Output: None
-* Return: u32 Data The readback register value
-* Note: This function is equal to "GetRegSetting" in PHY programming guide
-*/
+ * PHY_QueryBBReg - Read "specific bits" from BB register.
+ * @Adapter:
+ * @RegAddr: The target address to be readback
+ * @BitMask: The target bit position in the target address
+ * to be readback
+ *
+ * Return: The readback register value
+ *
+ * .. Note:: This function is equal to "GetRegSetting" in PHY programming
+ * guide
+ */
u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
{
u32 OriginalValue, BitShift;
@@ -64,8 +57,6 @@ u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
return 0;
#endif
- /* RT_TRACE(COMP_RF, DBG_TRACE, ("--->PHY_QueryBBReg(): RegAddr(%#lx), BitMask(%#lx)\n", RegAddr, BitMask)); */
-
OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
@@ -75,22 +66,17 @@ u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
/**
-* Function: PHY_SetBBReg
-*
-* OverView: Write "Specific bits" to BB register (page 8~)
-*
-* Input:
-* struct adapter * Adapter,
-* u32 RegAddr, The target address to be modified
-* u32 BitMask The target bit position in the target address
-* to be modified
-* u32 Data The new register value in the target bit position
-* of the target address
-*
-* Output: None
-* Return: None
-* Note: This function is equal to "PutRegSetting" in PHY programming guide
-*/
+ * PHY_SetBBReg - Write "Specific bits" to BB register (page 8~).
+ * @Adapter:
+ * @RegAddr: The target address to be modified
+ * @BitMask: The target bit position in the target address
+ * to be modified
+ * @Data: The new register value in the target bit position
+ * of the target address
+ *
+ * .. Note:: This function is equal to "PutRegSetting" in PHY programming
+ * guide
+ */
void PHY_SetBBReg_8723B(
struct adapter *Adapter,
@@ -106,8 +92,6 @@ void PHY_SetBBReg_8723B(
return;
#endif
- /* RT_TRACE(COMP_RF, DBG_TRACE, ("--->PHY_SetBBReg(): RegAddr(%#lx), BitMask(%#lx), Data(%#lx)\n", RegAddr, BitMask, Data)); */
-
if (BitMask != bMaskDWord) { /* if not "double word" write */
OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
@@ -171,40 +155,30 @@ static u32 phy_RFSerialRead_8723B(
if (RfPiEnable) {
/* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi|MaskforPhySet, bLSSIReadBackData);
-
- /* RT_DISP(FINIT, INIT_RF, ("Readback from RF-PI : 0x%x\n", retValue)); */
} else {
/* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack|MaskforPhySet, bLSSIReadBackData);
-
- /* RT_DISP(FINIT, INIT_RF, ("Readback from RF-SI : 0x%x\n", retValue)); */
}
return retValue;
}
/**
-* Function: phy_RFSerialWrite_8723B
-*
-* OverView: Write data to RF register (page 8~)
-*
-* Input:
-* struct adapter * Adapter,
-* RF_PATH eRFPath, Radio path of A/B/C/D
-* u32 Offset, The target address to be read
-* u32 Data The new register Data in the target bit position
-* of the target to be read
-*
-* Output: None
-* Return: None
-* Note: Threre are three types of serial operations:
-* 1. Software serial write
-* 2. Hardware LSSI-Low Speed Serial Interface
-* 3. Hardware HSSI-High speed
-* serial write. Driver need to implement (1) and (2).
-* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+ * phy_RFSerialWrite_8723B - Write data to RF register (page 8~).
+ * @Adapter:
+ * @eRFPath: Radio path of A/B/C/D
+ * @Offset: The target address to be read
+ * @Data: The new register Data in the target bit position
+ * of the target to be read
+ *
+ * .. Note:: Threre are three types of serial operations:
+ * 1. Software serial write
+ * 2. Hardware LSSI-Low Speed Serial Interface
+ * 3. Hardware HSSI-High speed
+ * serial write. Driver need to implement (1) and (2).
+ * This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
*
- * Note: For RF8256 only
+ * .. Note:: For RF8256 only
* The total count of RTL8256(Zebra4) register is around 36 bit it only employs
* 4-bit RF address. RTL8256 uses "register mode control bit" (Reg00[12], Reg00[10])
* to access register address bigger than 0xf. See "Appendix-4 in PHY Configuration
@@ -225,7 +199,7 @@ static u32 phy_RFSerialRead_8723B(
*
*
*
-*/
+ */
static void phy_RFSerialWrite_8723B(
struct adapter *Adapter,
enum RF_PATH eRFPath,
@@ -248,34 +222,27 @@ static void phy_RFSerialWrite_8723B(
/* */
/* Put write addr in [5:0] and write data in [31:16] */
/* */
- /* DataAndAddr = (Data<<16) | (NewOffset&0x3f); */
DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff; /* T65 RF */
-
/* */
/* Write Operation */
/* */
PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
- /* RTPRINT(FPHY, PHY_RFW, ("RFW-%d Addr[0x%lx]= 0x%lx\n", eRFPath, pPhyReg->rf3wireOffset, DataAndAddr)); */
-
}
/**
-* Function: PHY_QueryRFReg
-*
-* OverView: Query "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct adapter * Adapter,
-* RF_PATH eRFPath, Radio path of A/B/C/D
-* u32 RegAddr, The target address to be read
-* u32 BitMask The target bit position in the target address
-* to be read
-*
-* Output: None
-* Return: u32 Readback value
-* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
-*/
+ * PHY_QueryRFReg - Query "Specific bits" to RF register (page 8~).
+ * @Adapter:
+ * @eRFPath: Radio path of A/B/C/D
+ * @RegAdd: The target address to be read
+ * @BitMask: The target bit position in the target address
+ * to be read
+ *
+ * Return: Readback value
+ *
+ * .. Note:: This function is equal to "GetRFRegSetting" in PHY
+ * programming guide
+ */
u32 PHY_QueryRFReg_8723B(
struct adapter *Adapter,
u8 eRFPath,
@@ -296,23 +263,18 @@ u32 PHY_QueryRFReg_8723B(
}
/**
-* Function: PHY_SetRFReg
-*
-* OverView: Write "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct adapter * Adapter,
-* RF_PATH eRFPath, Radio path of A/B/C/D
-* u32 RegAddr, The target address to be modified
-* u32 BitMask The target bit position in the target address
-* to be modified
-* u32 Data The new register Data in the target bit position
-* of the target address
-*
-* Output: None
-* Return: None
-* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
-*/
+ * PHY_SetRFReg - Write "Specific bits" to RF register (page 8~).
+ * @Adapter:
+ * @eRFPath: Radio path of A/B/C/D
+ * @RegAddr: The target address to be modified
+ * @BitMask: The target bit position in the target address
+ * to be modified
+ * @Data: The new register Data in the target bit position
+ * of the target address
+ *
+ * .. Note:: This function is equal to "PutRFRegSetting" in PHY
+ * programming guide.
+ */
void PHY_SetRFReg_8723B(
struct adapter *Adapter,
u8 eRFPath,
@@ -344,15 +306,7 @@ void PHY_SetRFReg_8723B(
/*-----------------------------------------------------------------------------
- * Function: PHY_MACConfig8192C
- *
- * Overview: Condig MAC by header file or parameter file.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
+ * PHY_MACConfig8192C - Condig MAC by header file or parameter file.
*
* Revised History:
* When Who Remark
@@ -369,17 +323,12 @@ s32 PHY_MACConfig8723B(struct adapter *Adapter)
}
/**
-* Function: phy_InitBBRFRegisterDefinition
-*
-* OverView: Initialize Register definition offset for Radio Path A/B/C/D
-*
-* Input:
-* struct adapter * Adapter,
-*
-* Output: None
-* Return: None
-* Note: The initialization value is constant and it should never be changes
-*/
+ * phy_InitBBRFRegisterDefinition - Initialize Register definition offset for
+ * Radio Path A/B/C/D
+ * @Adapter:
+ *
+ * .. Note:: The initialization value is constant and it should never be changes
+ */
static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
@@ -512,7 +461,6 @@ int PHY_RFConfig8723B(struct adapter *Adapter)
rtStatus = PHY_RF6052_Config8723B(Adapter);
phy_LCK_8723B(Adapter);
- /* PHY_BB8723B_Config_1T(Adapter); */
return rtStatus;
}
@@ -619,8 +567,6 @@ u8 PHY_GetTxPowerIndex(
s8 txPower = 0, powerDiffByRate = 0, limit = 0;
bool bIn24G = false;
- /* DBG_871X("===>%s\n", __func__); */
-
txPower = (s8) PHY_GetTxPowerIndexBase(padapter, RFPath, Rate, BandWidth, Channel, &bIn24G);
powerDiffByRate = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, ODM_RF_PATH_A, RF_1TX, Rate);
@@ -642,7 +588,6 @@ u8 PHY_GetTxPowerIndex(
if (txPower > MAX_POWER_INDEX)
txPower = MAX_POWER_INDEX;
- /* DBG_871X("Final Tx Power(RF-%c, Channel: %d) = %d(0x%X)\n", ((RFPath == 0)?'A':'B'), Channel, txPower, txPower)); */
return (u8) txPower;
}
@@ -789,8 +734,6 @@ static void phy_PostSetBwMode8723B(struct adapter *Adapter)
PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
-/* PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 1); */
-
PHY_SetBBReg(Adapter, rOFDM0_TxPseudoNoiseWgt, (BIT31|BIT30), 0x0);
break;
@@ -805,15 +748,9 @@ static void phy_PostSetBwMode8723B(struct adapter *Adapter)
PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
-/* PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 0); */
-
PHY_SetBBReg(Adapter, 0x818, (BIT26|BIT27), (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
-
break;
-
default:
- /*RT_TRACE(COMP_DBG, DBG_LOUD, ("phy_SetBWMode8723B(): unknown Bandwidth: %#X\n"\
- , pHalData->CurrentChannelBW));*/
break;
}
@@ -826,10 +763,8 @@ static void phy_SwChnl8723B(struct adapter *padapter)
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
u8 channelToSW = pHalData->CurrentChannel;
- if (pHalData->rf_chip == RF_PSEUDO_11N) {
- /* RT_TRACE(COMP_MLME, DBG_LOUD, ("phy_SwChnl8723B: return for PSEUDO\n")); */
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
return;
- }
pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff00) | channelToSW);
PHY_SetRFReg(padapter, ODM_RF_PATH_A, RF_CHNLBW, 0x3FF, pHalData->RfRegChnlVal[0]);
PHY_SetRFReg(padapter, ODM_RF_PATH_B, RF_CHNLBW, 0x3FF, pHalData->RfRegChnlVal[0]);
@@ -841,7 +776,6 @@ static void phy_SwChnlAndSetBwMode8723B(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- /* RT_TRACE(COMP_SCAN, DBG_LOUD, ("phy_SwChnlAndSetBwMode8723B(): bSwChnl %d, bSetChnlBW %d\n", pHalData->bSwChnl, pHalData->bSetChnlBW)); */
if (Adapter->bNotifyChannelChange) {
DBG_871X("[%s] bSwChnl =%d, ch =%d, bSetChnlBW =%d, bw =%d\n",
__func__,
@@ -886,8 +820,6 @@ static void PHY_HandleSwChnlAndSetBW8723B(
u8 tmpnCur80MhzPrimeSC = pHalData->nCur80MhzPrimeSC;
u8 tmpCenterFrequencyIndex1 = pHalData->CurrentCenterFrequencyIndex1;
- /* DBG_871X("=> PHY_HandleSwChnlAndSetBW8812: bSwitchChannel %d, bSetBandWidth %d\n", bSwitchChannel, bSetBandWidth); */
-
/* check is swchnl or setbw */
if (!bSwitchChannel && !bSetBandWidth) {
DBG_871X("PHY_HandleSwChnlAndSetBW8812: not switch channel and not set bandwidth\n");
@@ -896,7 +828,6 @@ static void PHY_HandleSwChnlAndSetBW8723B(
/* skip change for channel or bandwidth is the same */
if (bSwitchChannel) {
- /* if (pHalData->CurrentChannel != ChannelNum) */
{
if (HAL_IsLegalChannel(Adapter, ChannelNum))
pHalData->bSwChnl = true;
@@ -906,10 +837,8 @@ static void PHY_HandleSwChnlAndSetBW8723B(
if (bSetBandWidth)
pHalData->bSetChnlBW = true;
- if (!pHalData->bSetChnlBW && !pHalData->bSwChnl) {
- /* DBG_871X("<= PHY_HandleSwChnlAndSetBW8812: bSwChnl %d, bSetChnlBW %d\n", pHalData->bSwChnl, pHalData->bSetChnlBW); */
+ if (!pHalData->bSetChnlBW && !pHalData->bSwChnl)
return;
- }
if (pHalData->bSwChnl) {
@@ -968,9 +897,5 @@ void PHY_SetSwChnlBWMode8723B(
u8 Offset80
)
{
- /* DBG_871X("%s() ===>\n", __func__); */
-
PHY_HandleSwChnlAndSetBW8723B(Adapter, true, true, channel, Bandwidth, Offset40, Offset80, channel);
-
- /* DBG_871X("<==%s()\n", __func__); */
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 1fbf89cb72d0..2d15a5f7648d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -24,7 +24,7 @@ static void update_recvframe_attrib(struct adapter *padapter,
{
struct rx_pkt_attrib *pattrib;
struct recv_stat report;
- PRXREPORT prxreport = (PRXREPORT)&report;
+ struct rxreport_8723b *prxreport = (struct rxreport_8723b *)&report;
report.rxdw0 = prxstat->rxdw0;
report.rxdw1 = prxstat->rxdw1;
diff --git a/drivers/staging/rtl8723bs/include/autoconf.h b/drivers/staging/rtl8723bs/include/autoconf.h
index 8f4c1e734473..86cf09ca5f06 100644
--- a/drivers/staging/rtl8723bs/include/autoconf.h
+++ b/drivers/staging/rtl8723bs/include/autoconf.h
@@ -5,7 +5,6 @@
*
******************************************************************************/
-
/*
* Automatically generated C config: don't edit
*/
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 1de5acaef8ff..426c8d58c444 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -257,8 +257,8 @@ struct hal_ops {
bool (*Efuse_PgPacketWrite_BT)(struct adapter *padapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
s32 (*xmit_thread_handler)(struct adapter *padapter);
- void (*hal_notch_filter)(struct adapter * adapter, bool enable);
- void (*hal_reset_security_engine)(struct adapter * adapter);
+ void (*hal_notch_filter)(struct adapter *adapter, bool enable);
+ void (*hal_reset_security_engine)(struct adapter *adapter);
s32 (*c2h_handler)(struct adapter *padapter, u8 *c2h_evt);
c2h_id_filter c2h_id_filter_ccx;
@@ -384,8 +384,8 @@ void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter);
s32 rtw_hal_xmit_thread_handler(struct adapter *padapter);
-void rtw_hal_notch_filter(struct adapter * adapter, bool enable);
-void rtw_hal_reset_security_engine(struct adapter * adapter);
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
+void rtw_hal_reset_security_engine(struct adapter *adapter);
bool rtw_hal_c2h_valid(struct adapter *adapter, u8 *buf);
s32 rtw_hal_c2h_handler(struct adapter *adapter, u8 *c2h_evt);
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index d9ff8c8e7f36..f80db2c984a4 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -667,85 +667,6 @@ struct ieee80211_header_data {
#define MFIE_TYPE_RATES_EX 50
#define MFIE_TYPE_GENERIC 221
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
-} __attribute__ ((packed));
-
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[0];
-} __attribute__ ((packed));
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __attribute__ ((packed));
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 10
-
-
-struct ieee80211_authentication {
- struct ieee80211_header_data header;
- u16 algorithm;
- u16 transaction;
- u16 status;
- /* struct ieee80211_info_element_hdr info_element; */
-} __attribute__ ((packed));
-
-
-struct ieee80211_probe_response {
- struct ieee80211_header_data header;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 capability;
- struct ieee80211_info_element info_element;
-} __attribute__ ((packed));
-
-struct ieee80211_probe_request {
- struct ieee80211_header_data header;
- /*struct ieee80211_info_element info_element;*/
-} __attribute__ ((packed));
-
-struct ieee80211_assoc_request_frame {
- struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 listen_interval;
- /* u8 current_ap[ETH_ALEN]; */
- struct ieee80211_info_element_hdr info_element;
-} __attribute__ ((packed));
-
-struct ieee80211_assoc_response_frame {
- struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 status;
- u16 aid;
-} __attribute__ ((packed));
-
-struct ieee80211_txb {
- u8 nr_frags;
- u8 encrypted;
- u16 reserved;
- u16 frag_size;
- u16 payload_size;
- struct sk_buff *fragments[0];
-};
-
-
/* SWEEP TABLE ENTRIES NUMBER*/
#define MAX_SWEEP_TAB_ENTRIES 42
#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index f36516fa84c7..8e6e972dd843 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -42,11 +42,13 @@ struct rt_firmware_hdr {
/* LONG WORD 0 ---- */
__le16 signature; /* 92C0: test chip; 92C, 88C0: test chip;
- * 88C1: MP A-cut; 92C1: MP A-cut */
+ * 88C1: MP A-cut; 92C1: MP A-cut
+ */
u8 category; /* AP/NIC and USB/PCI */
u8 function; /* Reserved for different FW function indications,
* for further use when driver needs to download
- * different FW in different conditions. */
+ * different FW in different conditions.
+ */
__le16 version; /* FW Version */
__le16 subversion; /* FW Subversion, default 0x00 */
@@ -135,7 +137,6 @@ struct rt_firmware_hdr {
#define WMM_NORMAL_PAGE_NUM_LPQ_8723B 0x20
#define WMM_NORMAL_PAGE_NUM_NPQ_8723B 0x20
-
#include "HalVerDef.h"
#include "hal_com.h"
@@ -149,7 +150,8 @@ struct rt_firmware_hdr {
#define EFUSE_MAX_SECTION_8723B 64
#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose.
- * Added by Roger, 2009.09.02. */
+ * Added by Roger, 2009.09.02.
+ */
#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN_8723B)
#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
@@ -173,7 +175,8 @@ typedef enum _C2H_EVT {
C2H_TSF = 1,
C2H_AP_RPT_RSP = 2,
C2H_CCX_TX_RPT = 3, /* The FW notify the report
- * of the specific tx packet. */
+ * of the specific tx packet.
+ */
C2H_BT_RSSI = 4,
C2H_BT_OP_MODE = 5,
C2H_EXT_RA_RPT = 6,
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_recv.h b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
index fad6749af768..60a1df703c8e 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
@@ -9,7 +9,7 @@
#include <rtl8192c_recv.h>
-typedef struct rxreport_8723b {
+struct rxreport_8723b {
/* DWORD 0 */
u32 pktlen:14;
u32 crc32:1;
@@ -79,9 +79,9 @@ typedef struct rxreport_8723b {
/* DWORD 5 */
u32 tsfl;
-} RXREPORT, *PRXREPORT;
+};
-typedef struct phystatus_8723b {
+struct phystatus_8723b {
u32 rxgain_a:7;
u32 trsw_a:1;
u32 rxgain_b:7;
@@ -123,7 +123,7 @@ typedef struct phystatus_8723b {
u32 anttrainen:1;
u32 antselb:1;
u32 antsel:1;
-} PHYSTATUS, *PPHYSTATUS;
+};
s32 rtl8723bs_init_recv_priv(struct adapter *padapter);
void rtl8723bs_free_recv_priv(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index ea0dd156051e..d8655cb619a1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -524,18 +524,16 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY == state) {
+ if (state == _FW_UNDER_SURVEY)
pmlmepriv->bScanInProcess = true;
- }
}
static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY == state) {
+ if (state == _FW_UNDER_SURVEY)
pmlmepriv->bScanInProcess = false;
- }
}
/*
diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
index ab5a8627d371..f798b0c744a4 100644
--- a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
@@ -20,9 +20,9 @@ enum country_code_type_t {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
index ea370b2bb8db..27cd2c5d90af 100644
--- a/drivers/staging/rtl8723bs/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -68,7 +68,7 @@ struct ndis_802_11_fix_ie {
struct ndis_80211_var_ie {
u8 ElementID;
u8 Length;
- u8 data[1];
+ u8 data[];
};
/* Length is the 4 bytes multiples of the sum of
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index bf1417236161..ff164a8c8679 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2516,7 +2516,7 @@ fail:
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
@@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
}
- /* init regulary domain */
- rtw_regd_init(padapter, rtw_reg_notifier);
-
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
*((struct adapter **)wiphy_priv(wiphy)) = padapter;
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+ /* init regulary domain */
+ rtw_regd_init(wiphy, rtw_reg_notifier);
+
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index fb2df871c0cb..d46c65ab384b 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -48,8 +48,9 @@ void rtw_os_indicate_connect(struct adapter *adapter)
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
rtw_cfg80211_ibss_indicate_connect(adapter);
- } else
+ } else {
rtw_cfg80211_indicate_connect(adapter);
+ }
rtw_indicate_wx_assoc_event(adapter);
netif_carrier_on(adapter->pnetdev);
@@ -163,9 +164,8 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
len = sec_ie[1] + 2;
len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < len; i++)
p += sprintf(p, "%02x", sec_ie[i]);
- }
p += sprintf(p, ")");
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index b2208e5f190a..301ffff12e82 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -339,8 +339,6 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
padapter = rtw_netdev_priv(pnetdev);
- rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
-
/* 3 3. init driver special setting, interface, OS and hardware relative */
/* 4 3.1 set hardware operation functions */
@@ -378,6 +376,8 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
goto free_hal_data;
}
+ rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
/* 3 8. get WLan MAC address */
/* set mac addr */
rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 578b9f734231..3f04b7a954ba 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -34,7 +34,7 @@
NL80211_RRF_PASSIVE_SCAN)
static const struct ieee80211_regdomain rtw_regdom_rd = {
- .n_reg_rules = 3,
+ .n_reg_rules = 2,
.alpha2 = "99",
.reg_rules = {
RTW_2GHZ_CH01_11,
@@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
_rtw_reg_apply_flags(wiphy);
}
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
- struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
- return 0;
}
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 029f0d09e966..c237a8f8eb59 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -814,7 +814,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
fix->ywrapstep = crtc->ywrapstep;
fix->accel = FB_ACCEL_SMI;
- strlcpy(fix->id, fixId[index], sizeof(fix->id));
+ strscpy(fix->id, fixId[index], sizeof(fix->id));
fix->smem_start = crtc->oScreen + sm750_dev->vidmem_start;
pr_info("fix->smem_start = %lx\n", fix->smem_start);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 7ae5306b92fe..4455d26f7c96 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -74,14 +74,10 @@ struct visorhba_devdata {
unsigned long long interrupts_notme;
unsigned long long interrupts_disabled;
u64 __iomem *flags_addr;
- atomic_t interrupt_rcvd;
- wait_queue_head_t rsp_queue;
struct visordisk_info head;
unsigned int max_buff_len;
int devnum;
- struct task_struct *thread;
- int thread_wait_ms;
-
+ struct uiscmdrsp *cmdrsp;
/*
* allows us to pass int handles back-and-forth between us and
* iovm, instead of raw pointers
@@ -97,39 +93,6 @@ struct visorhba_devices_open {
};
/*
- * visor_thread_start - Starts a thread for the device
- * @threadfn: Function the thread starts
- * @thrcontext: Context to pass to the thread, i.e. devdata
- * @name: String describing name of thread
- *
- * Starts a thread for the device.
- *
- * Return: The task_struct * denoting the thread on success,
- * or NULL on failure
- */
-static struct task_struct *visor_thread_start(int (*threadfn)(void *),
- void *thrcontext, char *name)
-{
- struct task_struct *task;
-
- task = kthread_run(threadfn, thrcontext, "%s", name);
- if (IS_ERR(task)) {
- pr_err("visorbus failed to start thread\n");
- return NULL;
- }
- return task;
-}
-
-/*
- * visor_thread_stop - Stops the thread if it is running
- * @task: Description of process to stop
- */
-static void visor_thread_stop(struct task_struct *task)
-{
- kthread_stop(task);
-}
-
-/*
* add_scsipending_entry - Save off io command that is pending in
* Service Partition
* @devdata: Pointer to devdata
@@ -730,7 +693,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
/* Stop using the IOVM response queue (queue should be drained
* by the end)
*/
- visor_thread_stop(devdata->thread);
+ visorbus_disable_channel_interrupts(devdata->dev);
/* Fail commands that weren't completed */
spin_lock_irqsave(&devdata->privlock, flags);
@@ -952,37 +915,18 @@ static void drain_queue(struct uiscmdrsp *cmdrsp,
}
/*
- * process_incoming_rsps - Process responses from IOSP
- * @v: Void pointer to visorhba_devdata
- *
- * Main function for the thread that processes the responses
- * from the IO Service Partition. When the queue is empty, wait
- * to check to see if it is full again.
- *
- * Return: 0 on success, -ENOMEM on failure
+ * This is used only when this driver is active as an hba driver in the
+ * client guest partition. It is called periodically so we can obtain
+ * and process the command respond from the IO Service Partition periodically.
*/
-static int process_incoming_rsps(void *v)
+static void visorhba_channel_interrupt(struct visor_device *dev)
{
- struct visorhba_devdata *devdata = v;
- struct uiscmdrsp *cmdrsp = NULL;
- const int size = sizeof(*cmdrsp);
+ struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
- cmdrsp = kmalloc(size, GFP_ATOMIC);
- if (!cmdrsp)
- return -ENOMEM;
+ if (!devdata)
+ return;
- while (1) {
- if (kthread_should_stop())
- break;
- wait_event_interruptible_timeout(
- devdata->rsp_queue, (atomic_read(
- &devdata->interrupt_rcvd) == 1),
- msecs_to_jiffies(devdata->thread_wait_ms));
- /* drain queue */
- drain_queue(cmdrsp, devdata);
- }
- kfree(cmdrsp);
- return 0;
+ drain_queue(devdata->cmdrsp, devdata);
}
/*
@@ -1028,8 +972,7 @@ static int visorhba_resume(struct visor_device *dev,
if (devdata->serverdown && !devdata->serverchangingstate)
devdata->serverchangingstate = true;
- devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
- "vhba_incming");
+ visorbus_enable_channel_interrupts(dev);
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1095,7 +1038,6 @@ static int visorhba_probe(struct visor_device *dev)
goto err_debugfs_dir;
}
- init_waitqueue_head(&devdata->rsp_queue);
spin_lock_init(&devdata->privlock);
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1113,9 +1055,8 @@ static int visorhba_probe(struct visor_device *dev)
idr_init(&devdata->idr);
- devdata->thread_wait_ms = 2;
- devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
- "vhba_incoming");
+ devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
+ visorbus_enable_channel_interrupts(dev);
scsi_scan_host(scsihost);
@@ -1150,7 +1091,8 @@ static void visorhba_remove(struct visor_device *dev)
return;
scsihost = devdata->scsihost;
- visor_thread_stop(devdata->thread);
+ kfree(devdata->cmdrsp);
+ visorbus_disable_channel_interrupts(dev);
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
@@ -1173,7 +1115,7 @@ static struct visor_driver visorhba_driver = {
.remove = visorhba_remove,
.pause = visorhba_pause,
.resume = visorhba_resume,
- .channel_interrupt = NULL,
+ .channel_interrupt = visorhba_channel_interrupt,
};
/*
diff --git a/drivers/staging/vc04_services/bcm2835-audio/TODO b/drivers/staging/vc04_services/bcm2835-audio/TODO
index cb8ead3e9108..b85451255db0 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/TODO
+++ b/drivers/staging/vc04_services/bcm2835-audio/TODO
@@ -5,6 +5,6 @@
*****************************************************************************
1) Revisit multi-cards options and PCM route mixer control (as per comment
-https://lkml.org/lkml/2018/9/8/200)
+https://lore.kernel.org/lkml/s5hd0to5598.wl-tiwai@suse.de)
2) Fix the remaining checkpatch.pl errors and warnings.
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index 4c2cae99776b..3703409715da 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -224,7 +224,7 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
{
int err;
- strcpy(chip->card->mixername, "Broadcom Mixer");
+ strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
err = create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl), snd_bcm2835_ctl);
if (err < 0)
return err;
@@ -261,7 +261,7 @@ static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
{
- strcpy(chip->card->mixername, "Broadcom Mixer");
+ strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl),
snd_bcm2835_headphones_ctl);
}
@@ -295,7 +295,7 @@ static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
{
- strcpy(chip->card->mixername, "Broadcom Mixer");
+ strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi),
snd_bcm2835_hdmi);
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index f783b632141b..542aff131d06 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -12,7 +12,7 @@
static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
@@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
@@ -334,7 +334,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
pcm->private_data = chip;
pcm->nonatomic = true;
- strcpy(pcm->name, name);
+ strscpy(pcm->name, name, sizeof(pcm->name));
if (!spdif) {
chip->dest = route;
chip->volume = 0;
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index cf5f80f5ca6b..c250fbef2fa3 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -185,9 +185,9 @@ static int snd_add_child_device(struct device *dev,
goto error;
}
- strcpy(card->driver, audio_driver->driver.name);
- strcpy(card->shortname, audio_driver->shortname);
- strcpy(card->longname, audio_driver->longname);
+ strscpy(card->driver, audio_driver->driver.name, sizeof(card->driver));
+ strscpy(card->shortname, audio_driver->shortname, sizeof(card->shortname));
+ strscpy(card->longname, audio_driver->longname, sizeof(card->longname));
err = audio_driver->newpcm(chip, audio_driver->shortname,
audio_driver->route,
diff --git a/drivers/staging/vc04_services/interface/TODO b/drivers/staging/vc04_services/interface/TODO
index fc2752bc95b2..0bcb8f158afc 100644
--- a/drivers/staging/vc04_services/interface/TODO
+++ b/drivers/staging/vc04_services/interface/TODO
@@ -91,3 +91,7 @@ The first thing one generally sees in a probe function is a memory allocation
for all the device specific data. This structure is then passed all over the
driver. This is good practice since it makes the driver work regardless of the
number of devices probed.
+
+14) Clean up Sparse warnings from __user annotations. See
+vchiq_irq_queue_bulk_tx_rx(). Ensure that the address of "&waiter->bulk_waiter"
+is never disclosed to userspace.
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index f500a7043805..59e45dc03a97 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -958,7 +958,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
struct vchiq_service *service;
struct bulk_waiter_node *waiter = NULL;
bool found = false;
- void *userdata = NULL;
+ void *userdata;
int status = 0;
int ret;
@@ -997,15 +997,10 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
"found bulk_waiter %pK for pid %d", waiter,
current->pid);
userdata = &waiter->bulk_waiter;
+ } else {
+ userdata = args->userdata;
}
- /*
- * FIXME address space mismatch:
- * args->data may be interpreted as a kernel pointer
- * in create_pagelist() called from vchiq_bulk_transfer(),
- * accessing kernel data instead of user space, based on the
- * address.
- */
status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
userdata, args->mode, dir);
@@ -1057,14 +1052,21 @@ static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int i
compat_uptr_t ptr32;
compat_uptr_t __user *uptr = ubuf;
ret = get_user(ptr32, uptr + index);
+ if (ret)
+ return ret;
+
*buf = compat_ptr(ptr32);
} else {
uintptr_t ptr, __user *uptr = ubuf;
ret = get_user(ptr, uptr + index);
+
+ if (ret)
+ return ret;
+
*buf = (void __user *)ptr;
}
- return ret;
+ return 0;
}
struct vchiq_completion_data32 {
@@ -1715,7 +1717,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
{
struct vchiq_queue_bulk_transfer32 args32;
struct vchiq_queue_bulk_transfer args;
- enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
+ enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
if (copy_from_user(&args32, argp, sizeof(args32)))
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index f91e21a0b51b..3023fa9fdc64 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -12,13 +12,12 @@ static int g_connected;
static int g_num_deferred_callbacks;
static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
static int g_once_init;
-static struct mutex g_connected_mutex;
+static DEFINE_MUTEX(g_connected_mutex);
/* Function to initialize our lock */
static void connected_init(void)
{
if (!g_once_init) {
- mutex_init(&g_connected_mutex);
g_once_init = 1;
}
}
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index fd0ea4dbcb91..e3fa38bd7f12 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -175,7 +175,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- unsigned int minor = MINOR(file_inode(file)->i_rdev);
+ unsigned int minor = iminor(file_inode(file));
ssize_t retval;
size_t image_size;
@@ -218,7 +218,7 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
static ssize_t vme_user_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned int minor = MINOR(file_inode(file)->i_rdev);
+ unsigned int minor = iminor(file_inode(file));
ssize_t retval;
size_t image_size;
@@ -260,7 +260,7 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
{
- unsigned int minor = MINOR(file_inode(file)->i_rdev);
+ unsigned int minor = iminor(file_inode(file));
size_t image_size;
loff_t res;
@@ -294,7 +294,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
struct vme_slave slave;
struct vme_irq_id irq_req;
unsigned long copied;
- unsigned int minor = MINOR(inode->i_rdev);
+ unsigned int minor = iminor(inode);
int retval;
dma_addr_t pci_addr;
void __user *argp = (void __user *)arg;
@@ -412,7 +412,7 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
struct inode *inode = file_inode(file);
- unsigned int minor = MINOR(inode->i_rdev);
+ unsigned int minor = iminor(inode);
mutex_lock(&image[minor].mutex);
ret = vme_user_ioctl(inode, file, cmd, arg);
@@ -481,7 +481,7 @@ static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
{
- unsigned int minor = MINOR(file_inode(file)->i_rdev);
+ unsigned int minor = iminor(file_inode(file));
if (type[minor] == MASTER_MINOR)
return vme_user_master_mmap(minor, vma);
@@ -689,7 +689,7 @@ err_dev:
return err;
}
-static int vme_user_remove(struct vme_dev *dev)
+static void vme_user_remove(struct vme_dev *dev)
{
int i;
@@ -717,8 +717,6 @@ static int vme_user_remove(struct vme_dev *dev)
/* Unregister the major and minor device numbers */
unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
-
- return 0;
}
static struct vme_driver vme_user_driver = {
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 6b25d75d2501..1aa675241599 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1683,7 +1683,7 @@ static unsigned char byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
{0xF0, 0x00},
};
-static const unsigned short awcFrameTime[MAX_RATE] = {
+static const unsigned short awc_frame_time[MAX_RATE] = {
10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216
};
@@ -1716,7 +1716,7 @@ unsigned int bb_get_frame_time(unsigned char by_preamble_type,
if (rate_idx > RATE_54M)
return 0;
- rate = (unsigned int)awcFrameTime[rate_idx];
+ rate = (unsigned int)awc_frame_time[rate_idx];
if (rate_idx <= 3) { /* CCK mode */
if (by_preamble_type == 1) /* Short */
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 464dd89078b2..e7061d383306 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -111,7 +111,7 @@ struct vnt_rts_g {
__le16 duration_bb;
u16 reserved;
struct ieee80211_rts data;
-} __packed;
+} __packed __aligned(2);
struct vnt_rts_g_fb {
struct vnt_phy_field b;
@@ -125,14 +125,14 @@ struct vnt_rts_g_fb {
__le16 rts_duration_ba_f1;
__le16 rts_duration_aa_f1;
struct ieee80211_rts data;
-} __packed;
+} __packed __aligned(2);
struct vnt_rts_ab {
struct vnt_phy_field ab;
__le16 duration;
u16 reserved;
struct ieee80211_rts data;
-} __packed;
+} __packed __aligned(2);
struct vnt_rts_a_fb {
struct vnt_phy_field a;
@@ -141,7 +141,7 @@ struct vnt_rts_a_fb {
__le16 rts_duration_f0;
__le16 rts_duration_f1;
struct ieee80211_rts data;
-} __packed;
+} __packed __aligned(2);
/* CTS buffer header */
struct vnt_cts {
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 5b8da06e3916..bcd4d467e03a 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -687,7 +687,7 @@ static int vnt_rf_set_txpower(struct vnt_private *priv, u8 power,
if (hw_value < ARRAY_SIZE(vt3226d0_lo_current_table)) {
ret = vnt_rf_write_embedded(priv,
- vt3226d0_lo_current_table[hw_value]);
+ vt3226d0_lo_current_table[hw_value]);
if (ret)
return ret;
}
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 6ca2ca32d036..f23440799443 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -95,7 +95,7 @@ struct vnt_rts_g {
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_g data_head;
-} __packed;
+} __packed __aligned(2);
struct vnt_rts_ab {
struct vnt_phy_field ab;
@@ -103,7 +103,7 @@ struct vnt_rts_ab {
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_ab data_head;
-} __packed;
+} __packed __aligned(2);
/* CTS buffer header */
struct vnt_cts {
@@ -113,7 +113,7 @@ struct vnt_cts {
struct ieee80211_cts data;
u16 reserved2;
struct vnt_tx_datahead_g data_head;
-} __packed;
+} __packed __aligned(2);
union vnt_tx_data_head {
/* rts g */
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index ed53d0b45592..cd6bcfdfbe9a 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -5,7 +5,6 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/gpio/consumer.h>
#include <net/mac80211.h>
#include "bh.h"
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
index 78c49329e22a..92ef3298d4ac 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/staging/wfx/bh.h
@@ -8,10 +8,6 @@
#ifndef WFX_BH_H
#define WFX_BH_H
-#include <linux/atomic.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
struct wfx_dev;
struct wfx_hif {
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
index ca04b3da6204..ea3911485307 100644
--- a/drivers/staging/wfx/bus.h
+++ b/drivers/staging/wfx/bus.h
@@ -8,9 +8,6 @@
#ifndef WFX_BUS_H
#define WFX_BUS_H
-#include <linux/mmc/sdio_func.h>
-#include <linux/spi/spi.h>
-
#define WFX_REG_CONFIG 0x0
#define WFX_REG_CONTROL 0x1
#define WFX_REG_IN_OUT_QUEUE 0x2
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index e06d7e1ebe9c..588edce44854 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -5,19 +5,13 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/module.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
-#include <linux/interrupt.h>
#include <linux/of_irq.h>
-#include <linux/irq.h>
#include "bus.h"
#include "wfx.h"
-#include "hwio.h"
-#include "main.h"
-#include "bh.h"
static const struct wfx_platform_data wfx_sdio_pdata = {
.file_fw = "wfm_wf200",
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index a99125d1a30d..f89855abe9f8 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -6,19 +6,12 @@
* Copyright (c) 2011, Sagrad Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include "bus.h"
#include "wfx.h"
-#include "hwio.h"
-#include "main.h"
-#include "bh.h"
#define SET_WRITE 0x7FFF /* usage: and operation */
#define SET_READ 0x8000 /* usage: or operation */
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index 385f2d42a0e2..2cfa16279220 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -5,13 +5,8 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
#include "data_rx.h"
#include "wfx.h"
-#include "bh.h"
-#include "sta.h"
static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
{
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 36b36ef39d05..76f26e3c4381 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -6,14 +6,9 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include "data_tx.h"
#include "wfx.h"
-#include "bh.h"
#include "sta.h"
-#include "queue.h"
-#include "debug.h"
#include "traces.h"
#include "hif_tx_mib.h"
@@ -331,6 +326,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
{
struct hif_msg *hif_msg;
struct hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -344,11 +340,14 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// From now tx_info->control is unusable
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ // Fill tx_priv
+ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
+ tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
// Fill hif_msg
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
WARN(offset & 1, "attempt to transmit an unaligned frame");
- skb_put(skb, wfx_tx_get_icv_len(hw_key));
+ skb_put(skb, tx_priv->icv_size);
skb_push(skb, wmsg_len);
memset(skb->data, 0, wmsg_len);
hif_msg = (struct hif_msg *)skb->data;
@@ -484,6 +483,7 @@ static void wfx_tx_fill_rates(struct wfx_dev *wdev,
void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
{
+ const struct wfx_tx_priv *tx_priv;
struct ieee80211_tx_info *tx_info;
struct wfx_vif *wvif;
struct sk_buff *skb;
@@ -495,6 +495,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
return;
}
tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
wvif = wdev_to_wvif(wdev, ((struct hif_msg *)skb->data)->interface);
WARN_ON(!wvif);
if (!wvif)
@@ -503,6 +504,8 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
// Note that wfx_pending_get_pkt_us_delay() get data from tx_info
_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
wfx_tx_fill_rates(wdev, tx_info, arg);
+ skb_trim(skb, skb->len - tx_priv->icv_size);
+
// From now, you can touch to tx_info->status, but do not touch to
// tx_priv anymore
// FIXME: use ieee80211_tx_info_clear_status()
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 46c9fff7a870..6b3020097efa 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -8,9 +8,6 @@
#ifndef WFX_DATA_TX_H
#define WFX_DATA_TX_H
-#include <linux/list.h>
-#include <net/mac80211.h>
-
#include "hif_api_cmd.h"
#include "hif_api_mib.h"
@@ -35,6 +32,7 @@ struct tx_policy_cache {
struct wfx_tx_priv {
ktime_t xmit_timestamp;
+ unsigned char icv_size;
};
void wfx_tx_policy_init(struct wfx_vif *wvif);
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index eedada78c25f..3e87d13eb358 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -5,15 +5,9 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/crc32.h>
-
#include "debug.h"
#include "wfx.h"
#include "sta.h"
-#include "main.h"
-#include "hif_tx.h"
#include "hif_tx_mib.h"
#define CREATE_TRACE_POINTS
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index 1b8aec02d169..1bb9054871c4 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -6,8 +6,6 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
#include <linux/bitfield.h>
#include "fwio.h"
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 11bc1a58edae..8b671c9ab97c 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -8,10 +8,6 @@
#ifndef WFX_HIF_API_CMD_H
#define WFX_HIF_API_CMD_H
-#include <linux/ieee80211.h>
-
-#include "hif_api_general.h"
-
enum hif_requests_ids {
HIF_REQ_ID_RESET = 0x0a,
HIF_REQ_ID_READ_MIB = 0x05,
@@ -100,7 +96,7 @@ struct hif_req_update_ie {
u8 reserved1:5;
u8 reserved2;
__le16 num_ies;
- struct element ie[];
+ u8 ie[];
} __packed;
struct hif_cnf_update_ie {
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
index 24188945718d..70b253d0265d 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -8,15 +8,6 @@
#ifndef WFX_HIF_API_GENERAL_H
#define WFX_HIF_API_GENERAL_H
-#ifdef __KERNEL__
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#else
-#include <net/ethernet.h>
-#include <stdint.h>
-#define __packed __attribute__((__packed__))
-#endif
-
#define HIF_ID_IS_INDICATION 0x80
#define HIF_COUNTER_MAX 7
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 63b437261eb7..17dc13321978 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -6,11 +6,7 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/etherdevice.h>
-
-#include "hif_tx.h"
#include "wfx.h"
-#include "bh.h"
#include "hwio.h"
#include "debug.h"
#include "sta.h"
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c
index 1926cf1b62be..6432ed86505c 100644
--- a/drivers/staging/wfx/hif_tx_mib.c
+++ b/drivers/staging/wfx/hif_tx_mib.c
@@ -6,13 +6,8 @@
* Copyright (c) 2010, ST-Ericsson
* Copyright (C) 2010, ST-Ericsson SA
*/
-
-#include <linux/etherdevice.h>
-
#include "wfx.h"
-#include "hif_tx.h"
#include "hif_tx_mib.h"
-#include "hif_api_mib.h"
int hif_set_output_power(struct wfx_vif *wvif, int val)
{
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index 36fbc5b5d64c..089bb41be149 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -5,13 +5,10 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include "hwio.h"
#include "wfx.h"
-#include "bus.h"
#include "traces.h"
/*
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index 0b8e4f7157df..8bb9bcfc3182 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -8,8 +8,6 @@
#ifndef WFX_HWIO_H
#define WFX_HWIO_H
-#include <linux/types.h>
-
struct wfx_dev;
int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len);
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
index 2ab82bed4c1b..c93d07dcdc10 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/staging/wfx/key.c
@@ -5,12 +5,10 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "key.h"
#include "wfx.h"
-#include "hif_tx_mib.h"
static int wfx_alloc_key(struct wfx_dev *wdev)
{
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
index 70a44d0ca35e..4dc9feadaba2 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/staging/wfx/key.h
@@ -8,8 +8,6 @@
#ifndef WFX_KEY_H
#define WFX_KEY_H
-#include <net/mac80211.h>
-
struct wfx_dev;
struct wfx_vif;
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index e7bc1988124a..b9ea9a93fe1a 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -10,28 +10,21 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
*/
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/gpio/consumer.h>
#include <linux/mmc/sdio_func.h>
#include <linux/spi/spi.h>
-#include <linux/etherdevice.h>
#include <linux/firmware.h>
-#include "main.h"
#include "wfx.h"
#include "fwio.h"
#include "hwio.h"
#include "bus.h"
-#include "bh.h"
#include "sta.h"
#include "key.h"
#include "scan.h"
#include "debug.h"
-#include "data_tx.h"
#include "hif_tx_mib.h"
-#include "hif_api_cmd.h"
#define WFX_PDS_MAX_SIZE 1500
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index a0db322383a3..086bcc041b90 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -10,11 +10,8 @@
#ifndef WFX_MAIN_H
#define WFX_MAIN_H
-#include <linux/device.h>
#include <linux/gpio/consumer.h>
-#include "hif_api_general.h"
-
struct wfx_dev;
struct hwbus_ops;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 31c37f69c295..3bddf282a4ce 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -5,13 +5,9 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/sched.h>
#include <net/mac80211.h>
-#include "queue.h"
#include "wfx.h"
-#include "sta.h"
-#include "data_tx.h"
#include "traces.h"
void wfx_tx_lock(struct wfx_dev *wdev)
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 80ba19455ef3..e43aa9dfbc45 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -8,9 +8,6 @@
#ifndef WFX_QUEUE_H
#define WFX_QUEUE_H
-#include <linux/skbuff.h>
-#include <linux/atomic.h>
-
struct wfx_dev;
struct wfx_vif;
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index c7496a766478..e5b7eef78858 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -8,8 +8,6 @@
#ifndef WFX_SCAN_H
#define WFX_SCAN_H
-#include <net/mac80211.h>
-
struct wfx_dev;
struct wfx_vif;
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 196779a1b89a..5585f9e876e1 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -5,17 +5,11 @@
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "sta.h"
#include "wfx.h"
-#include "fwio.h"
-#include "bh.h"
-#include "key.h"
#include "scan.h"
-#include "debug.h"
-#include "hif_tx.h"
#include "hif_tx_mib.h"
#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index d7b5df5ea4e6..a3fb9fc93fa4 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -8,8 +8,6 @@
#ifndef WFX_STA_H
#define WFX_STA_H
-#include <net/mac80211.h>
-
struct wfx_dev;
struct wfx_vif;
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index e34c7a538c65..afe1074e09b3 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -12,11 +12,8 @@
#define _WFX_TRACE_H
#include <linux/tracepoint.h>
-#include <net/mac80211.h>
#include "bus.h"
-#include "hif_api_cmd.h"
-#include "hif_api_mib.h"
/* The hell below need some explanations. For each symbolic number, we need to
* define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic.
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 94898680ccde..a185b82795c4 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -10,9 +10,6 @@
#ifndef WFX_H
#define WFX_H
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/nospec.h>
#include <net/mac80211.h>
diff --git a/drivers/staging/wimax/i2400m/fw.c b/drivers/staging/wimax/i2400m/fw.c
index edb5eba0898b..92ea5c101e76 100644
--- a/drivers/staging/wimax/i2400m/fw.c
+++ b/drivers/staging/wimax/i2400m/fw.c
@@ -189,12 +189,17 @@ void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd)
{
if (i2400m_brh_get_use_checksum(cmd)) {
int i;
- u32 checksum = 0;
+ __le32 checksum = 0;
const u32 *checksum_ptr = (void *) cmd->payload;
- for (i = 0; i < cmd->data_size / 4; i++)
- checksum += cpu_to_le32(*checksum_ptr++);
- checksum += cmd->command + cmd->target_addr + cmd->data_size;
- cmd->block_checksum = cpu_to_le32(checksum);
+
+ for (i = 0; i < le32_to_cpu(cmd->data_size) / 4; i++)
+ le32_add_cpu(&checksum, *checksum_ptr++);
+
+ le32_add_cpu(&checksum, le32_to_cpu(cmd->command));
+ le32_add_cpu(&checksum, le32_to_cpu(cmd->target_addr));
+ le32_add_cpu(&checksum, le32_to_cpu(cmd->data_size));
+
+ cmd->block_checksum = checksum;
}
}
EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
@@ -583,7 +588,7 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
struct i2400m_bootrom_header *ack, size_t ack_size,
int flags)
{
- ssize_t result = -ENOMEM, rx_bytes;
+ ssize_t result, rx_bytes;
struct device *dev = i2400m_dev(i2400m);
int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd);
diff --git a/drivers/staging/wimax/i2400m/netdev.c b/drivers/staging/wimax/i2400m/netdev.c
index 8339d600e77b..cd06eaf75e8b 100644
--- a/drivers/staging/wimax/i2400m/netdev.c
+++ b/drivers/staging/wimax/i2400m/netdev.c
@@ -561,11 +561,11 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, i2400m->fw_name ? : "",
sizeof(info->fw_version));
if (net_dev->dev.parent)
- strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
+ strscpy(info->bus_info, dev_name(net_dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/staging/wimax/i2400m/rx.c b/drivers/staging/wimax/i2400m/rx.c
index c9fb619a9e01..702a1e2fabcd 100644
--- a/drivers/staging/wimax/i2400m/rx.c
+++ b/drivers/staging/wimax/i2400m/rx.c
@@ -485,8 +485,7 @@ struct i2400m_roq_data {
* store the sequence number (sn) and the cs (packet type) coming from
* the RX payload header from the device.
*/
-struct i2400m_roq
-{
+struct i2400m_roq {
unsigned ws;
struct sk_buff_head queue;
struct i2400m_roq_log *log;
@@ -819,7 +818,7 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
*/
static
void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
- struct sk_buff * skb, unsigned lbn)
+ struct sk_buff *skb, unsigned lbn)
{
struct device *dev = i2400m_dev(i2400m);
unsigned nsn, len;
@@ -882,7 +881,7 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
*/
static
void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
- struct sk_buff * skb, unsigned sn)
+ struct sk_buff *skb, unsigned sn)
{
struct device *dev = i2400m_dev(i2400m);
unsigned nsn, old_ws, len;
diff --git a/drivers/staging/wimax/i2400m/tx.c b/drivers/staging/wimax/i2400m/tx.c
index e9436212fe54..8c01f42876ea 100644
--- a/drivers/staging/wimax/i2400m/tx.c
+++ b/drivers/staging/wimax/i2400m/tx.c
@@ -785,7 +785,7 @@ try_new:
d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
le32_to_cpu(tx_msg->pld[num_pls].val),
pl_type, buf_len);
- tx_msg->num_pls = le16_to_cpu(num_pls+1);
+ tx_msg->num_pls = cpu_to_le16(num_pls + 1);
tx_msg->size += padded_len;
d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
@@ -893,10 +893,10 @@ skip:
current->pid, (void *) tx_msg - i2400m->tx_buf,
(size_t) tx_msg->offset, (size_t) tx_msg->size,
(size_t) tx_msg_moved->size);
- tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
- tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
+ tx_msg_moved->barker = cpu_to_le32(I2400M_H2D_PREVIEW_BARKER);
+ tx_msg_moved->sequence = cpu_to_le32(i2400m->tx_sequence++);
- pls = le32_to_cpu(tx_msg_moved->num_pls);
+ pls = le16_to_cpu(tx_msg_moved->num_pls);
i2400m->tx_pl_num += pls; /* Update stats */
if (pls > i2400m->tx_pl_max)
i2400m->tx_pl_max = pls;
diff --git a/drivers/staging/wimax/i2400m/usb.c b/drivers/staging/wimax/i2400m/usb.c
index f250d03ce7c7..481b1ccde983 100644
--- a/drivers/staging/wimax/i2400m/usb.c
+++ b/drivers/staging/wimax/i2400m/usb.c
@@ -333,8 +333,8 @@ static void i2400mu_get_drvinfo(struct net_device *net_dev,
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct usb_device *udev = i2400mu->usb_dev;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, i2400m->fw_name ? : "",
sizeof(info->fw_version));
usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a15abb2c8f54..6f9666dc0277 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -569,24 +569,22 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
goto bail;
}
- /* Allocate a buf of size req->len */
- msgbuf = kmalloc(req->len, GFP_KERNEL);
- if (msgbuf) {
- if (copy_from_user(msgbuf, (void __user *)req->data, req->len))
- result = -EFAULT;
- else
- result = p80211req_dorequest(wlandev, msgbuf);
+ msgbuf = memdup_user(req->data, req->len);
+ if (IS_ERR(msgbuf)) {
+ result = PTR_ERR(msgbuf);
+ goto bail;
+ }
- if (result == 0) {
- if (copy_to_user
- ((void __user *)req->data, msgbuf, req->len)) {
- result = -EFAULT;
- }
+ result = p80211req_dorequest(wlandev, msgbuf);
+
+ if (result == 0) {
+ if (copy_to_user
+ ((void __user *)req->data, msgbuf, req->len)) {
+ result = -EFAULT;
}
- kfree(msgbuf);
- } else {
- result = -ENOMEM;
}
+ kfree(msgbuf);
+
bail:
/* If allocate,copyfrom or copyto fails, return errno */
return result;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 9b3eb2e8c92a..b926e1d6c7b8 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
length += sizeof(struct cpl_tx_data_iso);
-#define MAX_IMM_TX_PKT_LEN 256
- return length <= MAX_IMM_TX_PKT_LEN;
+ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d1f887e3603f..5a66854def95 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -241,6 +241,7 @@ struct target_core_file_cmd {
unsigned long len;
struct se_cmd *cmd;
struct kiocb iocb;
+ struct bio_vec bvecs[];
};
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -268,29 +269,22 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct target_core_file_cmd *aio_cmd;
struct iov_iter iter;
struct scatterlist *sg;
- struct bio_vec *bvec;
ssize_t len = 0;
int ret = 0, i;
- aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
+ aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
if (!aio_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
- if (!bvec) {
- kfree(aio_cmd);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
for_each_sg(sgl, sg, sgl_nents, i) {
- bvec[i].bv_page = sg_page(sg);
- bvec[i].bv_len = sg->length;
- bvec[i].bv_offset = sg->offset;
+ aio_cmd->bvecs[i].bv_page = sg_page(sg);
+ aio_cmd->bvecs[i].bv_len = sg->length;
+ aio_cmd->bvecs[i].bv_offset = sg->offset;
len += sg->length;
}
- iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
aio_cmd->cmd = cmd;
aio_cmd->len = len;
@@ -307,8 +301,6 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
- kfree(bvec);
-
if (ret != -EIOCBQUEUED)
cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8ed93fd205c7..ee3d52061281 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -315,10 +315,8 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
-
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
+ &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
@@ -638,8 +636,7 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
return -ENODEV;
}
- bip = bio_integrity_alloc(bio, GFP_NOIO,
- min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
+ bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7994f27e4527..3cbc074992bc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -881,7 +881,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) {
new_bio:
- nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+ nr_vecs = bio_max_segs(nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
@@ -1000,8 +1000,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->timeout = PS_TIMEOUT_OTHER;
scsi_req(req)->retries = PS_RETRY;
- blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
- (cmd->sam_task_attr == TCM_HEAD_TAG),
+ blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG),
pscsi_req_done);
return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 44e15d7fb2f0..66d6f1d06f21 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index c56a1bde9417..e5f20005179a 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index c981757ba0d4..7a77e375b503 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -148,7 +149,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- might_sleep();
+ cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 7b2d919da2ac..81ff593ac4ec 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2019, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#ifndef _OPTEE_MSG_H
#define _OPTEE_MSG_H
@@ -12,11 +12,9 @@
* This file defines the OP-TEE message protocol used to communicate
* with an instance of OP-TEE running in secure world.
*
- * This file is divided into three sections.
+ * This file is divided into two sections.
* 1. Formatting of messages.
* 2. Requests from normal world
- * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
- * tee-supplicant.
*/
/*****************************************************************************
@@ -54,8 +52,8 @@
* Every entry in buffer should point to a 4k page beginning (12 least
* significant bits must be equal to zero).
*
- * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
- * offset of the user buffer.
+ * 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold
+ * page offset of user buffer.
*
* So, entries should be placed like members of this structure:
*
@@ -176,17 +174,9 @@ struct optee_msg_param {
* @params: the parameters supplied to the OS Command
*
* All normal calls to Trusted OS uses this struct. If cmd requires further
- * information than what these field holds it can be passed as a parameter
+ * information than what these fields hold it can be passed as a parameter
* tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
- * attrs field). All parameters tagged as meta has to come first.
- *
- * Temp memref parameters can be fragmented if supported by the Trusted OS
- * (when optee_smc.h is bearer of this protocol this is indicated with
- * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
- * fragmented then has all but the last fragment the
- * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
- * it will still be presented as a single logical memref to the Trusted
- * Application.
+ * attrs field). All parameters tagged as meta have to come first.
*/
struct optee_msg_arg {
u32 cmd;
@@ -199,7 +189,7 @@ struct optee_msg_arg {
u32 num_params;
/* num_params tells the actual number of element in params */
- struct optee_msg_param params[0];
+ struct optee_msg_param params[];
};
/**
@@ -290,15 +280,12 @@ struct optee_msg_arg {
* OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
* information is passed as:
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
- * [| OPTEE_MSG_ATTR_FRAGMENT]
+ * [| OPTEE_MSG_ATTR_NONCONTIG]
* [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
* [in] param[0].u.tmem.size size (of first fragment)
* [in] param[0].u.tmem.shm_ref holds shared memory reference
- * ...
- * The shared memory can optionally be fragmented, temp memrefs can follow
- * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
*
- * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisters a previously registered shared
* memory reference. The information is passed as:
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
* [in] param[0].u.rmem.shm_ref holds shared memory reference
@@ -313,131 +300,4 @@ struct optee_msg_arg {
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
-/*****************************************************************************
- * Part 3 - Requests from secure world, RPC
- *****************************************************************************/
-
-/*
- * All RPC is done with a struct optee_msg_arg as bearer of information,
- * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
- *
- * RPC communication with tee-supplicant is reversed compared to normal
- * client communication desribed above. The supplicant receives requests
- * and sends responses.
- */
-
-/*
- * Load a TA into memory, defined in tee-supplicant
- */
-#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
-
-/*
- * Reserved
- */
-#define OPTEE_MSG_RPC_CMD_RPMB 1
-
-/*
- * File system access, defined in tee-supplicant
- */
-#define OPTEE_MSG_RPC_CMD_FS 2
-
-/*
- * Get time
- *
- * Returns number of seconds and nano seconds since the Epoch,
- * 1970-01-01 00:00:00 +0000 (UTC).
- *
- * [out] param[0].u.value.a Number of seconds
- * [out] param[0].u.value.b Number of nano seconds.
- */
-#define OPTEE_MSG_RPC_CMD_GET_TIME 3
-
-/*
- * Wait queue primitive, helper for secure world to implement a wait queue.
- *
- * If secure world need to wait for a secure world mutex it issues a sleep
- * request instead of spinning in secure world. Conversely is a wakeup
- * request issued when a secure world mutex with a thread waiting thread is
- * unlocked.
- *
- * Waiting on a key
- * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
- * [in] param[0].u.value.b wait key
- *
- * Waking up a key
- * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
- * [in] param[0].u.value.b wakeup key
- */
-#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
-#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
-#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
-
-/*
- * Suspend execution
- *
- * [in] param[0].value .a number of milliseconds to suspend
- */
-#define OPTEE_MSG_RPC_CMD_SUSPEND 5
-
-/*
- * Allocate a piece of shared memory
- *
- * Shared memory can optionally be fragmented, to support that additional
- * spare param entries are allocated to make room for eventual fragments.
- * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
- * unused. All returned temp memrefs except the last should have the
- * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
- *
- * [in] param[0].u.value.a type of memory one of
- * OPTEE_MSG_RPC_SHM_TYPE_* below
- * [in] param[0].u.value.b requested size
- * [in] param[0].u.value.c required alignment
- *
- * [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
- * [out] param[0].u.tmem.size size (of first fragment)
- * [out] param[0].u.tmem.shm_ref shared memory reference
- * ...
- * [out] param[n].u.tmem.buf_ptr physical address
- * [out] param[n].u.tmem.size size
- * [out] param[n].u.tmem.shm_ref shared memory reference (same value
- * as in param[n-1].u.tmem.shm_ref)
- */
-#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
-/* Memory that can be shared with a non-secure user space application */
-#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
-/* Memory only shared with non-secure kernel */
-#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
-
-/*
- * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
- *
- * [in] param[0].u.value.a type of memory one of
- * OPTEE_MSG_RPC_SHM_TYPE_* above
- * [in] param[0].u.value.b value of shared memory reference
- * returned in param[0].u.tmem.shm_ref
- * above
- */
-#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
-
-/*
- * Access a device on an i2c bus
- *
- * [in] param[0].u.value.a mode: RD(0), WR(1)
- * [in] param[0].u.value.b i2c adapter
- * [in] param[0].u.value.c i2c chip
- *
- * [in] param[1].u.value.a i2c control flags
- *
- * [in/out] memref[2] buffer to exchange the transfer data
- * with the secure world
- *
- * [out] param[3].u.value.a bytes transferred by the driver
- */
-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER 21
-/* I2C master transfer modes */
-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD 0
-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR 1
-/* I2C master control flags */
-#define OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT BIT(0)
-
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
new file mode 100644
index 000000000000..b8275140cef8
--- /dev/null
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2016-2021, Linaro Limited
+ */
+
+#ifndef __OPTEE_RPC_CMD_H
+#define __OPTEE_RPC_CMD_H
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below.
+ * Only the commands handled by the kernel driver are defined here.
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication described above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] value[0].a Number of seconds
+ * [out] value[0].b Number of nano seconds.
+ */
+#define OPTEE_RPC_CMD_GET_TIME 3
+
+/*
+ * Wait queue primitive, helper for secure world to implement a wait queue.
+ *
+ * If secure world needs to wait for a secure world mutex it issues a sleep
+ * request instead of spinning in secure world. Conversely is a wakeup
+ * request issued when a secure world mutex with a thread waiting thread is
+ * unlocked.
+ *
+ * Waiting on a key
+ * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP
+ * [in] value[0].b Wait key
+ *
+ * Waking up a key
+ * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP
+ * [in] value[0].b Wakeup key
+ */
+#define OPTEE_RPC_CMD_WAIT_QUEUE 4
+#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0
+#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1
+
+/*
+ * Suspend execution
+ *
+ * [in] value[0].a Number of milliseconds to suspend
+ */
+#define OPTEE_RPC_CMD_SUSPEND 5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* below
+ * [in] value[0].b Requested size
+ * [in] value[0].c Required alignment
+ * [out] memref[0] Buffer
+ */
+#define OPTEE_RPC_CMD_SHM_ALLOC 6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_RPC_SHM_TYPE_APPL 0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_RPC_SHM_TYPE_KERNEL 1
+
+/*
+ * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* above
+ * [in] value[0].b Value of shared memory reference or cookie
+ */
+#define OPTEE_RPC_CMD_SHM_FREE 7
+
+/*
+ * Issue master requests (read and write operations) to an I2C chip.
+ *
+ * [in] value[0].a Transfer mode (OPTEE_RPC_I2C_TRANSFER_*)
+ * [in] value[0].b The I2C bus (a.k.a adapter).
+ * 16 bit field.
+ * [in] value[0].c The I2C chip (a.k.a address).
+ * 16 bit field (either 7 or 10 bit effective).
+ * [in] value[1].a The I2C master control flags (ie, 10 bit address).
+ * 16 bit field.
+ * [in/out] memref[2] Buffer used for data transfers.
+ * [out] value[3].a Number of bytes transferred by the REE.
+ */
+#define OPTEE_RPC_CMD_I2C_TRANSFER 21
+
+/* I2C master transfer modes */
+#define OPTEE_RPC_I2C_TRANSFER_RD 0
+#define OPTEE_RPC_I2C_TRANSFER_WR 1
+
+/* I2C master control flags */
+#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
+
+#endif /*__OPTEE_RPC_CMD_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 777ad54d4c2c..80eb763a8a80 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2019, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#ifndef OPTEE_SMC_H
#define OPTEE_SMC_H
@@ -39,10 +39,10 @@
/*
* Function specified by SMC Calling convention
*
- * Return one of the following UIDs if using API specified in this file
- * without further extentions:
- * 65cb6b93-af0c-4617-8ed6-644a8d1140f8
- * see also OPTEE_SMC_UID_* in optee_msg.h
+ * Return the following UID if using API specified in this file
+ * without further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * see also OPTEE_MSG_UID_* in optee_msg.h
*/
#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
#define OPTEE_SMC_CALLS_UID \
@@ -53,7 +53,7 @@
/*
* Function specified by SMC Calling convention
*
- * Returns 2.0 if using API specified in this file without further extentions.
+ * Returns 2.0 if using API specified in this file without further extensions.
* see also OPTEE_MSG_REVISION_* in optee_msg.h
*/
#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
@@ -109,8 +109,8 @@ struct optee_smc_call_get_os_revision_result {
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
- * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
- * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
* a3 Cache settings, not used if physical pointer is in a predefined shared
* memory area else per OPTEE_SMC_SHM_*
* a4-6 Not used
@@ -139,7 +139,7 @@ struct optee_smc_call_get_os_revision_result {
* optee_msg_arg.
* OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
* try again later.
- * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct
+ * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
* optee_msg_arg.
* OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
* OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
@@ -214,8 +214,9 @@ struct optee_smc_get_shm_config_result {
* secure world accepts command buffers located in any parts of non-secure RAM
*/
#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
-
-/* Secure world supports Shared Memory with a NULL buffer reference */
+/* Secure world is built with virtualization support */
+#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
+/* Secure world supports Shared Memory with a NULL reference */
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
@@ -245,8 +246,8 @@ struct optee_smc_exchange_capabilities_result {
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
- * a1 Upper 32bit of a 64bit Shared memory cookie
- * a2 Lower 32bit of a 64bit Shared memory cookie
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie
* a3-7 Preserved
*
* Cache empty return register usage:
@@ -294,6 +295,31 @@ struct optee_smc_disable_shm_cache_result {
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
/*
+ * Query OP-TEE about number of supported threads
+ *
+ * Normal World OS or Hypervisor issues this call to find out how many
+ * threads OP-TEE supports. That is how many standard calls can be issued
+ * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Number of threads
+ * a2-7 Preserved
+ *
+ * Error return:
+ * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15
+#define OPTEE_SMC_GET_THREAD_COUNT \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
+
+/*
* Resume from RPC (for example after processing a foreign interrupt)
*
* Call register usage:
@@ -341,16 +367,16 @@ struct optee_smc_disable_shm_cache_result {
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
- * a1 Upper 32bits of 64bit physical pointer to allocated
+ * a1 Upper 32 bits of 64-bit physical pointer to allocated
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
* be allocated.
- * a2 Lower 32bits of 64bit physical pointer to allocated
+ * a2 Lower 32 bits of 64-bit physical pointer to allocated
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
* be allocated
* a3 Preserved
- * a4 Upper 32bits of 64bit Shared memory cookie used when freeing
+ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
* the memory or doing an RPC
- * a5 Lower 32bits of 64bit Shared memory cookie used when freeing
+ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
* the memory or doing an RPC
* a6-7 Preserved
*/
@@ -363,9 +389,9 @@ struct optee_smc_disable_shm_cache_result {
*
* "Call" register usage:
* a0 This value, OPTEE_SMC_RETURN_RPC_FREE
- * a1 Upper 32bits of 64bit shared memory cookie belonging to this
+ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
* argument memory
- * a2 Lower 32bits of 64bit shared memory cookie belonging to this
+ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
* argument memory
* a3-7 Resume information, must be preserved
*
@@ -379,7 +405,7 @@ struct optee_smc_disable_shm_cache_result {
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
/*
- * Deliver foreign interrupt to normal world.
+ * Deliver a foreign interrupt in normal world.
*
* "Call" register usage:
* a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
@@ -389,7 +415,7 @@ struct optee_smc_disable_shm_cache_result {
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1-7 Preserved
*/
-#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4
+#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4
#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
@@ -405,10 +431,10 @@ struct optee_smc_disable_shm_cache_result {
*
* "Call" register usage:
* a0 OPTEE_SMC_RETURN_RPC_CMD
- * a1 Upper 32bit of a 64bit Shared memory cookie holding a
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
* struct optee_msg_arg, must be preserved, only the data should
* be updated
- * a2 Lower 32bit of a 64bit Shared memory cookie holding a
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
* struct optee_msg_arg, must be preserved, only the data should
* be updated
* a3-7 Resume information, must be preserved
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index 1e3614e4798f..1849180b0278 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -12,6 +12,7 @@
#include <linux/tee_drv.h>
#include "optee_private.h"
#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
struct wq_entry {
struct list_head link;
@@ -54,8 +55,9 @@ bad:
static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
struct optee_msg_arg *arg)
{
- struct i2c_client client = { 0 };
struct tee_param *params;
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg = { };
size_t i;
int ret = -EOPNOTSUPP;
u8 attr[] = {
@@ -85,48 +87,48 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
goto bad;
}
- client.adapter = i2c_get_adapter(params[0].u.value.b);
- if (!client.adapter)
+ adapter = i2c_get_adapter(params[0].u.value.b);
+ if (!adapter)
goto bad;
- if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) {
- if (!i2c_check_functionality(client.adapter,
+ if (params[1].u.value.a & OPTEE_RPC_I2C_FLAGS_TEN_BIT) {
+ if (!i2c_check_functionality(adapter,
I2C_FUNC_10BIT_ADDR)) {
- i2c_put_adapter(client.adapter);
+ i2c_put_adapter(adapter);
goto bad;
}
- client.flags = I2C_CLIENT_TEN;
+ msg.flags = I2C_M_TEN;
}
- client.addr = params[0].u.value.c;
- snprintf(client.name, I2C_NAME_SIZE, "i2c%d", client.adapter->nr);
+ msg.addr = params[0].u.value.c;
+ msg.buf = params[2].u.memref.shm->kaddr;
+ msg.len = params[2].u.memref.size;
switch (params[0].u.value.a) {
- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD:
- ret = i2c_master_recv(&client, params[2].u.memref.shm->kaddr,
- params[2].u.memref.size);
+ case OPTEE_RPC_I2C_TRANSFER_RD:
+ msg.flags |= I2C_M_RD;
break;
- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR:
- ret = i2c_master_send(&client, params[2].u.memref.shm->kaddr,
- params[2].u.memref.size);
+ case OPTEE_RPC_I2C_TRANSFER_WR:
break;
default:
- i2c_put_adapter(client.adapter);
+ i2c_put_adapter(adapter);
goto bad;
}
+ ret = i2c_transfer(adapter, &msg, 1);
+
if (ret < 0) {
arg->ret = TEEC_ERROR_COMMUNICATION;
} else {
- params[3].u.value.a = ret;
+ params[3].u.value.a = msg.len;
if (optee_to_msg_param(arg->params, arg->num_params, params))
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
else
arg->ret = TEEC_SUCCESS;
}
- i2c_put_adapter(client.adapter);
+ i2c_put_adapter(adapter);
kfree(params);
return;
bad:
@@ -194,10 +196,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
goto bad;
switch (arg->params[0].u.value.a) {
- case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
+ case OPTEE_RPC_WAIT_QUEUE_SLEEP:
wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
break;
- case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
+ case OPTEE_RPC_WAIT_QUEUE_WAKEUP:
wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
break;
default:
@@ -267,11 +269,11 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
struct tee_shm *shm;
param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
- param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
param.u.value.b = sz;
param.u.value.c = 0;
- ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
+ ret = optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_ALLOC, 1, &param);
if (ret)
return ERR_PTR(-ENOMEM);
@@ -308,10 +310,10 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
sz = arg->params[0].u.value.b;
switch (arg->params[0].u.value.a) {
- case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ case OPTEE_RPC_SHM_TYPE_APPL:
shm = cmd_alloc_suppl(ctx, sz);
break;
- case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
break;
default:
@@ -383,7 +385,7 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
struct tee_param param;
param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
- param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
param.u.value.b = tee_shm_get_id(shm);
param.u.value.c = 0;
@@ -400,7 +402,7 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
*/
tee_shm_put(shm);
- optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
+ optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
}
static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
@@ -418,10 +420,10 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
switch (arg->params[0].u.value.a) {
- case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ case OPTEE_RPC_SHM_TYPE_APPL:
cmd_free_suppl(ctx, shm);
break;
- case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
tee_shm_free(shm);
break;
default:
@@ -458,23 +460,23 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
}
switch (arg->cmd) {
- case OPTEE_MSG_RPC_CMD_GET_TIME:
+ case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg);
break;
- case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+ case OPTEE_RPC_CMD_WAIT_QUEUE:
handle_rpc_func_cmd_wq(optee, arg);
break;
- case OPTEE_MSG_RPC_CMD_SUSPEND:
+ case OPTEE_RPC_CMD_SUSPEND:
handle_rpc_func_cmd_wait(arg);
break;
- case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+ case OPTEE_RPC_CMD_SHM_ALLOC:
free_pages_list(call_ctx);
handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
break;
- case OPTEE_MSG_RPC_CMD_SHM_FREE:
+ case OPTEE_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
break;
- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER:
+ case OPTEE_RPC_CMD_I2C_TRANSFER:
handle_rpc_func_cmd_i2c_transfer(ctx, arg);
break;
default:
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 7edc8dc6bbab..d7f44deab5b1 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -450,15 +450,6 @@ depends on (ARCH_STI || ARCH_STM32) && OF
source "drivers/thermal/st/Kconfig"
endmenu
-config TANGO_THERMAL
- tristate "Tango thermal management"
- depends on ARCH_TANGO || COMPILE_TEST
- help
- Enable the Tango thermal driver, which supports the primitive
- temperature sensor embedded in Tango chips since the SMP8758.
- This sensor only generates a 1-bit signal to indicate whether
- the die temperature exceeds a programmable threshold.
-
source "drivers/thermal/tegra/Kconfig"
config GENERIC_ADC_THERMAL
@@ -476,14 +467,6 @@ depends on (ARCH_QCOM && OF) || COMPILE_TEST
source "drivers/thermal/qcom/Kconfig"
endmenu
-config ZX2967_THERMAL
- tristate "Thermal sensors on zx2967 SoC"
- depends on ARCH_ZX || COMPILE_TEST
- help
- Enable the zx2967 thermal sensors driver, which supports
- the primitive temperature sensor embedded in zx2967 SoCs.
- This sensor generates the real time die temperature.
-
config UNIPHIER_THERMAL
tristate "Socionext UniPhier thermal driver"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index b64dd50a6629..82fc3e616e54 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -42,7 +42,6 @@ obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
-obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_IMX_SC_THERMAL) += imx_sc_thermal.o
obj-$(CONFIG_IMX8MM_THERMAL) += imx8mm_thermal.o
@@ -57,7 +56,6 @@ obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
-obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 612f063c1cfc..10af3341e5ea 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -76,7 +76,9 @@ struct cpufreq_cooling_device {
struct em_perf_domain *em;
struct cpufreq_policy *policy;
struct list_head node;
+#ifndef CONFIG_SMP
struct time_in_idle *idle_time;
+#endif
struct freq_qos_request qos_req;
};
@@ -132,14 +134,25 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
}
/**
- * get_load() - get load for a cpu since last updated
- * @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
- * @cpu: cpu number
- * @cpu_idx: index of the cpu in time_in_idle*
+ * get_load() - get load for a cpu
+ * @cpufreq_cdev: struct cpufreq_cooling_device for the cpu
+ * @cpu: cpu number
+ * @cpu_idx: index of the cpu in time_in_idle array
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
+#ifdef CONFIG_SMP
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
+ int cpu_idx)
+{
+ unsigned long max = arch_scale_cpu_capacity(cpu);
+ unsigned long util;
+
+ util = sched_cpu_util(cpu, max);
+ return (util * 100) / max;
+}
+#else /* !CONFIG_SMP */
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
int cpu_idx)
{
@@ -161,6 +174,7 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
return load;
}
+#endif /* CONFIG_SMP */
/**
* get_dynamic_power() - calculate the dynamic power
@@ -346,6 +360,36 @@ static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
}
#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
+#ifdef CONFIG_SMP
+static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
+{
+ return 0;
+}
+
+static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
+{
+}
+#else
+static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
+{
+ unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus);
+
+ cpufreq_cdev->idle_time = kcalloc(num_cpus,
+ sizeof(*cpufreq_cdev->idle_time),
+ GFP_KERNEL);
+ if (!cpufreq_cdev->idle_time)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
+{
+ kfree(cpufreq_cdev->idle_time);
+ cpufreq_cdev->idle_time = NULL;
+}
+#endif /* CONFIG_SMP */
+
static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned long state)
{
@@ -441,7 +485,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
frequency = get_state_freq(cpufreq_cdev, state);
ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
- if (ret > 0) {
+ if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state;
cpus = cpufreq_cdev->policy->cpus;
max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
@@ -485,7 +529,7 @@ __cpufreq_cooling_register(struct device_node *np,
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
- unsigned int i, num_cpus;
+ unsigned int i;
struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
@@ -496,7 +540,6 @@ __cpufreq_cooling_register(struct device_node *np,
return ERR_PTR(-ENODEV);
}
-
if (IS_ERR_OR_NULL(policy)) {
pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
return ERR_PTR(-EINVAL);
@@ -514,12 +557,10 @@ __cpufreq_cooling_register(struct device_node *np,
return ERR_PTR(-ENOMEM);
cpufreq_cdev->policy = policy;
- num_cpus = cpumask_weight(policy->related_cpus);
- cpufreq_cdev->idle_time = kcalloc(num_cpus,
- sizeof(*cpufreq_cdev->idle_time),
- GFP_KERNEL);
- if (!cpufreq_cdev->idle_time) {
- cdev = ERR_PTR(-ENOMEM);
+
+ ret = allocate_idle_time(cpufreq_cdev);
+ if (ret) {
+ cdev = ERR_PTR(ret);
goto free_cdev;
}
@@ -579,7 +620,7 @@ remove_qos_req:
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time:
- kfree(cpufreq_cdev->idle_time);
+ free_idle_time(cpufreq_cdev);
free_cdev:
kfree(cpufreq_cdev);
return cdev;
@@ -672,7 +713,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
thermal_cooling_device_unregister(cdev);
freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
- kfree(cpufreq_cdev->idle_time);
+ free_idle_time(cpufreq_cdev);
kfree(cpufreq_cdev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 4d74994f160a..180edec34e07 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -95,7 +95,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
thermal_zone_device_update(thermal->zone,
THERMAL_EVENT_UNSPECIFIED);
- delay = msecs_to_jiffies(thermal->zone->passive_delay);
+ delay = thermal->zone->passive_delay_jiffies;
queue_delayed_work(system_freezable_wq, &thermal->work, delay);
return;
}
@@ -245,7 +245,7 @@ static int da9062_thermal_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev,
"TJUNC temperature polling period set at %d ms\n",
- thermal->zone->passive_delay);
+ jiffies_to_msecs(thermal->zone->passive_delay_jiffies));
ret = platform_get_irq_byname(pdev, "THERMAL");
if (ret < 0) {
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 7a4170a0b51f..92acae53df49 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -258,7 +258,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
* power being applied, slowing down the controller)
*/
d = mul_frac(tz->tzp->k_d, err - params->prev_err);
- d = div_frac(d, tz->passive_delay);
+ d = div_frac(d, jiffies_to_msecs(tz->passive_delay_jiffies));
params->prev_err = err;
power_range = p + i + d;
@@ -590,13 +590,42 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
}
/**
+ * check_power_actors() - Check all cooling devices and warn when they are
+ * not power actors
+ * @tz: thermal zone to operate on
+ *
+ * Check all cooling devices in the @tz and warn every time they are missing
+ * power actor API. The warning should help to investigate the issue, which
+ * could be e.g. lack of Energy Model for a given device.
+ *
+ * Return: 0 on success, -EINVAL if any cooling device does not implement
+ * the power actor API.
+ */
+static int check_power_actors(struct thermal_zone_device *tz)
+{
+ struct thermal_instance *instance;
+ int ret = 0;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (!cdev_is_power_actor(instance->cdev)) {
+ dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
+ instance->cdev->type);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
* power_allocator_bind() - bind the power_allocator governor to a thermal zone
* @tz: thermal zone to bind it to
*
* Initialize the PID controller parameters and bind it to the thermal
* zone.
*
- * Return: 0 on success, or -ENOMEM if we ran out of memory.
+ * Return: 0 on success, or -ENOMEM if we ran out of memory, or -EINVAL
+ * when there are unsupported cooling devices in the @tz.
*/
static int power_allocator_bind(struct thermal_zone_device *tz)
{
@@ -604,6 +633,10 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
struct power_allocator_params *params;
int control_temp;
+ ret = check_power_actors(tz);
+ if (ret)
+ return ret;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index 2ae7198d3067..12acb12aac50 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -109,7 +109,7 @@ static void update_passive_instance(struct thermal_zone_device *tz,
* If value is +1, activate a passive instance.
* If value is -1, deactivate a passive instance.
*/
- if (type == THERMAL_TRIP_PASSIVE || type == THERMAL_TRIPS_NONE)
+ if (type == THERMAL_TRIP_PASSIVE)
tz->passive += value;
}
@@ -122,13 +122,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
bool throttle = false;
int old_target;
- if (trip == THERMAL_TRIPS_NONE) {
- trip_temp = tz->forced_passive;
- trip_type = THERMAL_TRIPS_NONE;
- } else {
- tz->ops->get_trip_temp(tz, trip, &trip_temp);
- tz->ops->get_trip_type(tz, trip, &trip_type);
- }
+ tz->ops->get_trip_temp(tz, trip, &trip_temp);
+ tz->ops->get_trip_type(tz, trip, &trip_type);
trend = get_tz_trend(tz, trip);
@@ -189,9 +184,6 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip)
thermal_zone_trip_update(tz, trip);
- if (tz->forced_passive)
- thermal_zone_trip_update(tz, THERMAL_TRIPS_NONE);
-
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index 8025b21f43fa..ce4f59213c7a 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -8,6 +8,10 @@ config INTEL_POWERCLAMP
enforce idle time which results in more package C-state residency. The
user interface is exposed via generic thermal framework.
+config X86_THERMAL_VECTOR
+ def_bool y
+ depends on X86 && CPU_SUP_INTEL && X86_LOCAL_APIC
+
config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
depends on X86_THERMAL_VECTOR
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index 0d9736ced5d4..ff2ad30ef397 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
+obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 6e479deff76b..d1248ba943a4 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -146,12 +146,18 @@ static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
return 0;
}
+static void int340x_thermal_critical(struct thermal_zone_device *zone)
+{
+ dev_dbg(&zone->device, "%s: critical temperature reached\n", zone->type);
+}
+
static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
.get_temp = int340x_thermal_get_zone_temp,
.get_trip_temp = int340x_thermal_get_trip_temp,
.get_trip_type = int340x_thermal_get_trip_type,
.set_trip_temp = int340x_thermal_set_trip_temp,
.get_trip_hyst = int340x_thermal_get_trip_hyst,
+ .critical = int340x_thermal_critical,
};
static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index 41723c6c6c0c..527c91f5960b 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -326,10 +326,16 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
return 0;
}
+static void pch_critical(struct thermal_zone_device *tzd)
+{
+ dev_dbg(&tzd->device, "%s: critical temperature reached\n", tzd->type);
+}
+
static struct thermal_zone_device_ops tzd_ops = {
.get_temp = pch_thermal_get_temp,
.get_trip_type = pch_get_trip_type,
.get_trip_temp = pch_get_trip_temp,
+ .critical = pch_critical,
};
enum board_ids {
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
new file mode 100644
index 000000000000..f8e882592ba5
--- /dev/null
+++ b/drivers/thermal/intel/therm_throt.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Thermal throttle event support code (such as syslog messaging and rate
+ * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
+ *
+ * This allows consistent reporting of CPU thermal throttle events.
+ *
+ * Maintains a counter in /sys that keeps track of the number of thermal
+ * events, such that the user knows how bad the thermal problem might be
+ * (since the logging to syslog is rate limited).
+ *
+ * Author: Dmitriy Zavin (dmitriyz@google.com)
+ *
+ * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
+ * Inspired by Ross Biro's and Al Borchers' counter code.
+ */
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <asm/processor.h>
+#include <asm/thermal.h>
+#include <asm/traps.h>
+#include <asm/apic.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+
+#include "thermal_interrupt.h"
+
+/* How long to wait between reporting thermal events */
+#define CHECK_INTERVAL (300 * HZ)
+
+#define THERMAL_THROTTLING_EVENT 0
+#define POWER_LIMIT_EVENT 1
+
+/**
+ * struct _thermal_state - Represent the current thermal event state
+ * @next_check: Stores the next timestamp, when it is allowed
+ * to log the next warning message.
+ * @last_interrupt_time: Stores the timestamp for the last threshold
+ * high event.
+ * @therm_work: Delayed workqueue structure
+ * @count: Stores the current running count for thermal
+ * or power threshold interrupts.
+ * @last_count: Stores the previous running count for thermal
+ * or power threshold interrupts.
+ * @max_time_ms: This shows the maximum amount of time CPU was
+ * in throttled state for a single thermal
+ * threshold high to low state.
+ * @total_time_ms: This is a cumulative time during which CPU was
+ * in the throttled state.
+ * @rate_control_active: Set when a throttling message is logged.
+ * This is used for the purpose of rate-control.
+ * @new_event: Stores the last high/low status of the
+ * THERM_STATUS_PROCHOT or
+ * THERM_STATUS_POWER_LIMIT.
+ * @level: Stores whether this _thermal_state instance is
+ * for a CORE level or for PACKAGE level.
+ * @sample_index: Index for storing the next sample in the buffer
+ * temp_samples[].
+ * @sample_count: Total number of samples collected in the buffer
+ * temp_samples[].
+ * @average: The last moving average of temperature samples
+ * @baseline_temp: Temperature at which thermal threshold high
+ * interrupt was generated.
+ * @temp_samples: Storage for temperature samples to calculate
+ * moving average.
+ *
+ * This structure is used to represent data related to thermal state for a CPU.
+ * There is a separate storage for core and package level for each CPU.
+ */
+struct _thermal_state {
+ u64 next_check;
+ u64 last_interrupt_time;
+ struct delayed_work therm_work;
+ unsigned long count;
+ unsigned long last_count;
+ unsigned long max_time_ms;
+ unsigned long total_time_ms;
+ bool rate_control_active;
+ bool new_event;
+ u8 level;
+ u8 sample_index;
+ u8 sample_count;
+ u8 average;
+ u8 baseline_temp;
+ u8 temp_samples[3];
+};
+
+struct thermal_state {
+ struct _thermal_state core_throttle;
+ struct _thermal_state core_power_limit;
+ struct _thermal_state package_throttle;
+ struct _thermal_state package_power_limit;
+ struct _thermal_state core_thresh0;
+ struct _thermal_state core_thresh1;
+ struct _thermal_state pkg_thresh0;
+ struct _thermal_state pkg_thresh1;
+};
+
+/* Callback to handle core threshold interrupts */
+int (*platform_thermal_notify)(__u64 msr_val);
+EXPORT_SYMBOL(platform_thermal_notify);
+
+/* Callback to handle core package threshold_interrupts */
+int (*platform_thermal_package_notify)(__u64 msr_val);
+EXPORT_SYMBOL_GPL(platform_thermal_package_notify);
+
+/* Callback support of rate control, return true, if
+ * callback has rate control */
+bool (*platform_thermal_package_rate_control)(void);
+EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control);
+
+
+static DEFINE_PER_CPU(struct thermal_state, thermal_state);
+
+static atomic_t therm_throt_en = ATOMIC_INIT(0);
+
+static u32 lvtthmr_init __read_mostly;
+
+#ifdef CONFIG_SYSFS
+#define define_therm_throt_device_one_ro(_name) \
+ static DEVICE_ATTR(_name, 0444, \
+ therm_throt_device_show_##_name, \
+ NULL) \
+
+#define define_therm_throt_device_show_func(event, name) \
+ \
+static ssize_t therm_throt_device_show_##event##_##name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned int cpu = dev->id; \
+ ssize_t ret; \
+ \
+ preempt_disable(); /* CPU hotplug */ \
+ if (cpu_online(cpu)) { \
+ ret = sprintf(buf, "%lu\n", \
+ per_cpu(thermal_state, cpu).event.name); \
+ } else \
+ ret = 0; \
+ preempt_enable(); \
+ \
+ return ret; \
+}
+
+define_therm_throt_device_show_func(core_throttle, count);
+define_therm_throt_device_one_ro(core_throttle_count);
+
+define_therm_throt_device_show_func(core_power_limit, count);
+define_therm_throt_device_one_ro(core_power_limit_count);
+
+define_therm_throt_device_show_func(package_throttle, count);
+define_therm_throt_device_one_ro(package_throttle_count);
+
+define_therm_throt_device_show_func(package_power_limit, count);
+define_therm_throt_device_one_ro(package_power_limit_count);
+
+define_therm_throt_device_show_func(core_throttle, max_time_ms);
+define_therm_throt_device_one_ro(core_throttle_max_time_ms);
+
+define_therm_throt_device_show_func(package_throttle, max_time_ms);
+define_therm_throt_device_one_ro(package_throttle_max_time_ms);
+
+define_therm_throt_device_show_func(core_throttle, total_time_ms);
+define_therm_throt_device_one_ro(core_throttle_total_time_ms);
+
+define_therm_throt_device_show_func(package_throttle, total_time_ms);
+define_therm_throt_device_one_ro(package_throttle_total_time_ms);
+
+static struct attribute *thermal_throttle_attrs[] = {
+ &dev_attr_core_throttle_count.attr,
+ &dev_attr_core_throttle_max_time_ms.attr,
+ &dev_attr_core_throttle_total_time_ms.attr,
+ NULL
+};
+
+static const struct attribute_group thermal_attr_group = {
+ .attrs = thermal_throttle_attrs,
+ .name = "thermal_throttle"
+};
+#endif /* CONFIG_SYSFS */
+
+#define CORE_LEVEL 0
+#define PACKAGE_LEVEL 1
+
+#define THERM_THROT_POLL_INTERVAL HZ
+#define THERM_STATUS_PROCHOT_LOG BIT(1)
+
+#define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15))
+#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11))
+
+static void clear_therm_status_log(int level)
+{
+ int msr;
+ u64 mask, msr_val;
+
+ if (level == CORE_LEVEL) {
+ msr = MSR_IA32_THERM_STATUS;
+ mask = THERM_STATUS_CLEAR_CORE_MASK;
+ } else {
+ msr = MSR_IA32_PACKAGE_THERM_STATUS;
+ mask = THERM_STATUS_CLEAR_PKG_MASK;
+ }
+
+ rdmsrl(msr, msr_val);
+ msr_val &= mask;
+ wrmsrl(msr, msr_val & ~THERM_STATUS_PROCHOT_LOG);
+}
+
+static void get_therm_status(int level, bool *proc_hot, u8 *temp)
+{
+ int msr;
+ u64 msr_val;
+
+ if (level == CORE_LEVEL)
+ msr = MSR_IA32_THERM_STATUS;
+ else
+ msr = MSR_IA32_PACKAGE_THERM_STATUS;
+
+ rdmsrl(msr, msr_val);
+ if (msr_val & THERM_STATUS_PROCHOT_LOG)
+ *proc_hot = true;
+ else
+ *proc_hot = false;
+
+ *temp = (msr_val >> 16) & 0x7F;
+}
+
+static void __maybe_unused throttle_active_work(struct work_struct *work)
+{
+ struct _thermal_state *state = container_of(to_delayed_work(work),
+ struct _thermal_state, therm_work);
+ unsigned int i, avg, this_cpu = smp_processor_id();
+ u64 now = get_jiffies_64();
+ bool hot;
+ u8 temp;
+
+ get_therm_status(state->level, &hot, &temp);
+ /* temperature value is offset from the max so lesser means hotter */
+ if (!hot && temp > state->baseline_temp) {
+ if (state->rate_control_active)
+ pr_info("CPU%d: %s temperature/speed normal (total events = %lu)\n",
+ this_cpu,
+ state->level == CORE_LEVEL ? "Core" : "Package",
+ state->count);
+
+ state->rate_control_active = false;
+ return;
+ }
+
+ if (time_before64(now, state->next_check) &&
+ state->rate_control_active)
+ goto re_arm;
+
+ state->next_check = now + CHECK_INTERVAL;
+
+ if (state->count != state->last_count) {
+ /* There was one new thermal interrupt */
+ state->last_count = state->count;
+ state->average = 0;
+ state->sample_count = 0;
+ state->sample_index = 0;
+ }
+
+ state->temp_samples[state->sample_index] = temp;
+ state->sample_count++;
+ state->sample_index = (state->sample_index + 1) % ARRAY_SIZE(state->temp_samples);
+ if (state->sample_count < ARRAY_SIZE(state->temp_samples))
+ goto re_arm;
+
+ avg = 0;
+ for (i = 0; i < ARRAY_SIZE(state->temp_samples); ++i)
+ avg += state->temp_samples[i];
+
+ avg /= ARRAY_SIZE(state->temp_samples);
+
+ if (state->average > avg) {
+ pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n",
+ this_cpu,
+ state->level == CORE_LEVEL ? "Core" : "Package",
+ state->count);
+ state->rate_control_active = true;
+ }
+
+ state->average = avg;
+
+re_arm:
+ clear_therm_status_log(state->level);
+ schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
+}
+
+/***
+ * therm_throt_process - Process thermal throttling event from interrupt
+ * @curr: Whether the condition is current or not (boolean), since the
+ * thermal interrupt normally gets called both when the thermal
+ * event begins and once the event has ended.
+ *
+ * This function is called by the thermal interrupt after the
+ * IRQ has been acknowledged.
+ *
+ * It will take care of rate limiting and printing messages to the syslog.
+ */
+static void therm_throt_process(bool new_event, int event, int level)
+{
+ struct _thermal_state *state;
+ unsigned int this_cpu = smp_processor_id();
+ bool old_event;
+ u64 now;
+ struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
+
+ now = get_jiffies_64();
+ if (level == CORE_LEVEL) {
+ if (event == THERMAL_THROTTLING_EVENT)
+ state = &pstate->core_throttle;
+ else if (event == POWER_LIMIT_EVENT)
+ state = &pstate->core_power_limit;
+ else
+ return;
+ } else if (level == PACKAGE_LEVEL) {
+ if (event == THERMAL_THROTTLING_EVENT)
+ state = &pstate->package_throttle;
+ else if (event == POWER_LIMIT_EVENT)
+ state = &pstate->package_power_limit;
+ else
+ return;
+ } else
+ return;
+
+ old_event = state->new_event;
+ state->new_event = new_event;
+
+ if (new_event)
+ state->count++;
+
+ if (event != THERMAL_THROTTLING_EVENT)
+ return;
+
+ if (new_event && !state->last_interrupt_time) {
+ bool hot;
+ u8 temp;
+
+ get_therm_status(state->level, &hot, &temp);
+ /*
+ * Ignore short temperature spike as the system is not close
+ * to PROCHOT. 10C offset is large enough to ignore. It is
+ * already dropped from the high threshold temperature.
+ */
+ if (temp > 10)
+ return;
+
+ state->baseline_temp = temp;
+ state->last_interrupt_time = now;
+ schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
+ } else if (old_event && state->last_interrupt_time) {
+ unsigned long throttle_time;
+
+ throttle_time = jiffies_delta_to_msecs(now - state->last_interrupt_time);
+ if (throttle_time > state->max_time_ms)
+ state->max_time_ms = throttle_time;
+ state->total_time_ms += throttle_time;
+ state->last_interrupt_time = 0;
+ }
+}
+
+static int thresh_event_valid(int level, int event)
+{
+ struct _thermal_state *state;
+ unsigned int this_cpu = smp_processor_id();
+ struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
+ u64 now = get_jiffies_64();
+
+ if (level == PACKAGE_LEVEL)
+ state = (event == 0) ? &pstate->pkg_thresh0 :
+ &pstate->pkg_thresh1;
+ else
+ state = (event == 0) ? &pstate->core_thresh0 :
+ &pstate->core_thresh1;
+
+ if (time_before64(now, state->next_check))
+ return 0;
+
+ state->next_check = now + CHECK_INTERVAL;
+
+ return 1;
+}
+
+static bool int_pln_enable;
+static int __init int_pln_enable_setup(char *s)
+{
+ int_pln_enable = true;
+
+ return 1;
+}
+__setup("int_pln_enable", int_pln_enable_setup);
+
+#ifdef CONFIG_SYSFS
+/* Add/Remove thermal_throttle interface for CPU device: */
+static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
+{
+ int err;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
+ if (err)
+ return err;
+
+ if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) {
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_core_power_limit_count.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+ }
+
+ if (cpu_has(c, X86_FEATURE_PTS)) {
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_count.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_max_time_ms.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_total_time_ms.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+
+ if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) {
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_power_limit_count.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+ }
+ }
+
+ return 0;
+
+del_group:
+ sysfs_remove_group(&dev->kobj, &thermal_attr_group);
+
+ return err;
+}
+
+static void thermal_throttle_remove_dev(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &thermal_attr_group);
+}
+
+/* Get notified when a cpu comes on/off. Be hotplug friendly. */
+static int thermal_throttle_online(unsigned int cpu)
+{
+ struct thermal_state *state = &per_cpu(thermal_state, cpu);
+ struct device *dev = get_cpu_device(cpu);
+ u32 l;
+
+ state->package_throttle.level = PACKAGE_LEVEL;
+ state->core_throttle.level = CORE_LEVEL;
+
+ INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
+ INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
+
+ /* Unmask the thermal vector after the above workqueues are initialized. */
+ l = apic_read(APIC_LVTTHMR);
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+
+ return thermal_throttle_add_dev(dev, cpu);
+}
+
+static int thermal_throttle_offline(unsigned int cpu)
+{
+ struct thermal_state *state = &per_cpu(thermal_state, cpu);
+ struct device *dev = get_cpu_device(cpu);
+ u32 l;
+
+ /* Mask the thermal vector before draining evtl. pending work */
+ l = apic_read(APIC_LVTTHMR);
+ apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+
+ cancel_delayed_work_sync(&state->package_throttle.therm_work);
+ cancel_delayed_work_sync(&state->core_throttle.therm_work);
+
+ state->package_throttle.rate_control_active = false;
+ state->core_throttle.rate_control_active = false;
+
+ thermal_throttle_remove_dev(dev);
+ return 0;
+}
+
+static __init int thermal_throttle_init_device(void)
+{
+ int ret;
+
+ if (!atomic_read(&therm_throt_en))
+ return 0;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
+ thermal_throttle_online,
+ thermal_throttle_offline);
+ return ret < 0 ? ret : 0;
+}
+device_initcall(thermal_throttle_init_device);
+
+#endif /* CONFIG_SYSFS */
+
+static void notify_package_thresholds(__u64 msr_val)
+{
+ bool notify_thres_0 = false;
+ bool notify_thres_1 = false;
+
+ if (!platform_thermal_package_notify)
+ return;
+
+ /* lower threshold check */
+ if (msr_val & THERM_LOG_THRESHOLD0)
+ notify_thres_0 = true;
+ /* higher threshold check */
+ if (msr_val & THERM_LOG_THRESHOLD1)
+ notify_thres_1 = true;
+
+ if (!notify_thres_0 && !notify_thres_1)
+ return;
+
+ if (platform_thermal_package_rate_control &&
+ platform_thermal_package_rate_control()) {
+ /* Rate control is implemented in callback */
+ platform_thermal_package_notify(msr_val);
+ return;
+ }
+
+ /* lower threshold reached */
+ if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0))
+ platform_thermal_package_notify(msr_val);
+ /* higher threshold reached */
+ if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1))
+ platform_thermal_package_notify(msr_val);
+}
+
+static void notify_thresholds(__u64 msr_val)
+{
+ /* check whether the interrupt handler is defined;
+ * otherwise simply return
+ */
+ if (!platform_thermal_notify)
+ return;
+
+ /* lower threshold reached */
+ if ((msr_val & THERM_LOG_THRESHOLD0) &&
+ thresh_event_valid(CORE_LEVEL, 0))
+ platform_thermal_notify(msr_val);
+ /* higher threshold reached */
+ if ((msr_val & THERM_LOG_THRESHOLD1) &&
+ thresh_event_valid(CORE_LEVEL, 1))
+ platform_thermal_notify(msr_val);
+}
+
+/* Thermal transition interrupt handler */
+void intel_thermal_interrupt(void)
+{
+ __u64 msr_val;
+
+ if (static_cpu_has(X86_FEATURE_HWP))
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+
+ rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+
+ /* Check for violation of core thermal thresholds*/
+ notify_thresholds(msr_val);
+
+ therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+ CORE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
+ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+ CORE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_PTS)) {
+ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+ /* check violations of package thermal thresholds */
+ notify_package_thresholds(msr_val);
+ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+ PACKAGE_LEVEL);
+ if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
+ therm_throt_process(msr_val &
+ PACKAGE_THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+ PACKAGE_LEVEL);
+ }
+}
+
+/* Thermal monitoring depends on APIC, ACPI and clock modulation */
+static int intel_thermal_supported(struct cpuinfo_x86 *c)
+{
+ if (!boot_cpu_has(X86_FEATURE_APIC))
+ return 0;
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ return 0;
+ return 1;
+}
+
+bool x86_thermal_enabled(void)
+{
+ return atomic_read(&therm_throt_en);
+}
+
+void intel_init_thermal(struct cpuinfo_x86 *c)
+{
+ unsigned int cpu = smp_processor_id();
+ int tm2 = 0;
+ u32 l, h;
+
+ if (!intel_thermal_supported(c))
+ return;
+
+ /* On the BSP? */
+ if (c == &boot_cpu_data)
+ lvtthmr_init = apic_read(APIC_LVTTHMR);
+
+ /*
+ * First check if its enabled already, in which case there might
+ * be some SMM goo which handles it, so we can't even put a handler
+ * since it might be delivered via SMI already:
+ */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ h = lvtthmr_init;
+ /*
+ * The initial value of thermal LVT entries on all APs always reads
+ * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
+ * sequence to them and LVT registers are reset to 0s except for
+ * the mask bits which are set to 1s when APs receive INIT IPI.
+ * If BIOS takes over the thermal interrupt and sets its interrupt
+ * delivery mode to SMI (not fixed), it restores the value that the
+ * BIOS has programmed on AP based on BSP's info we saved since BIOS
+ * is always setting the same value for all threads/cores.
+ */
+ if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
+ apic_write(APIC_LVTTHMR, lvtthmr_init);
+
+
+ if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
+ if (system_state == SYSTEM_BOOTING)
+ pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
+ return;
+ }
+
+ /* early Pentium M models use different method for enabling TM2 */
+ if (cpu_has(c, X86_FEATURE_TM2)) {
+ if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
+ rdmsr(MSR_THERM2_CTL, l, h);
+ if (l & MSR_THERM2_CTL_TM_SELECT)
+ tm2 = 1;
+ } else if (l & MSR_IA32_MISC_ENABLE_TM2)
+ tm2 = 1;
+ }
+
+ /* We'll mask the thermal vector in the lapic till we're ready: */
+ h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
+ apic_write(APIC_LVTTHMR, h);
+
+ rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
+ if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
+ wrmsr(MSR_IA32_THERM_INTERRUPT,
+ (l | (THERM_INT_LOW_ENABLE
+ | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h);
+ else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
+ wrmsr(MSR_IA32_THERM_INTERRUPT,
+ l | (THERM_INT_LOW_ENABLE
+ | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
+ else
+ wrmsr(MSR_IA32_THERM_INTERRUPT,
+ l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
+
+ if (cpu_has(c, X86_FEATURE_PTS)) {
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ (l | (PACKAGE_THERM_INT_LOW_ENABLE
+ | PACKAGE_THERM_INT_HIGH_ENABLE))
+ & ~PACKAGE_THERM_INT_PLN_ENABLE, h);
+ else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l | (PACKAGE_THERM_INT_LOW_ENABLE
+ | PACKAGE_THERM_INT_HIGH_ENABLE
+ | PACKAGE_THERM_INT_PLN_ENABLE), h);
+ else
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l | (PACKAGE_THERM_INT_LOW_ENABLE
+ | PACKAGE_THERM_INT_HIGH_ENABLE), h);
+ }
+
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
+
+ pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
+ tm2 ? "TM2" : "TM1");
+
+ /* enable thermal throttle processing */
+ atomic_set(&therm_throt_en, 1);
+}
diff --git a/drivers/thermal/intel/thermal_interrupt.h b/drivers/thermal/intel/thermal_interrupt.h
new file mode 100644
index 000000000000..53f427bb58dc
--- /dev/null
+++ b/drivers/thermal/intel/thermal_interrupt.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_THERMAL_INTERRUPT_H
+#define _INTEL_THERMAL_INTERRUPT_H
+
+/* Interrupt Handler for package thermal thresholds */
+extern int (*platform_thermal_package_notify)(__u64 msr_val);
+
+/* Interrupt Handler for core thermal thresholds */
+extern int (*platform_thermal_notify)(__u64 msr_val);
+
+/* Callback support of rate control, return true, if
+ * callback has rate control */
+extern bool (*platform_thermal_package_rate_control)(void);
+
+#endif /* _INTEL_THERMAL_INTERRUPT_H */
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index b81c33202f41..295742e83960 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -17,8 +17,10 @@
#include <linux/pm.h>
#include <linux/thermal.h>
#include <linux/debugfs.h>
+
#include <asm/cpu_device_id.h>
-#include <asm/mce.h>
+
+#include "thermal_interrupt.h"
/*
* Rate control delay: Idea is to introduce denounce effect
diff --git a/drivers/thermal/khadas_mcu_fan.c b/drivers/thermal/khadas_mcu_fan.c
index 9eadd2d6413e..d35e5313bea4 100644
--- a/drivers/thermal/khadas_mcu_fan.c
+++ b/drivers/thermal/khadas_mcu_fan.c
@@ -100,7 +100,6 @@ static int khadas_mcu_fan_probe(struct platform_device *pdev)
return ret;
}
ctx->cdev = cdev;
- thermal_cdev_update(cdev);
return 0;
}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index aa9c1d80fae4..8d5ac2df26dc 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -10,6 +10,17 @@ config QCOM_TSENS
Also able to set threshold temperature for both hot and cold and update
when a threshold is reached.
+config QCOM_SPMI_ADC_TM5
+ tristate "Qualcomm SPMI PMIC Thermal Monitor ADC5"
+ depends on OF && SPMI && IIO
+ select REGMAP_SPMI
+ select QCOM_VADC_COMMON
+ help
+ This enables the thermal driver for the ADC thermal monitoring
+ device. It shows up as a thermal zone with multiple trip points.
+ Thermal client sets threshold temperature for both warm and cool and
+ gets updated when a threshold is reached.
+
config QCOM_SPMI_TEMP_ALARM
tristate "Qualcomm SPMI PMIC Temperature Alarm"
depends on OF && SPMI && IIO
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index ec86eef7f6a6..252ea7d9da0b 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -3,4 +3,5 @@ obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
qcom_tsens-y += tsens.o tsens-v2.o tsens-v1.o tsens-v0_1.o \
tsens-8960.o
+obj-$(CONFIG_QCOM_SPMI_ADC_TM5) += qcom-spmi-adc-tm5.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
new file mode 100644
index 000000000000..b460b56e981c
--- /dev/null
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Linaro Limited
+ *
+ * Based on original driver:
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
+#include <linux/iio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+/*
+ * Thermal monitoring block consists of 8 (ADC_TM5_NUM_CHANNELS) channels. Each
+ * channel is programmed to use one of ADC channels for voltage comparison.
+ * Voltages are programmed using ADC codes, so we have to convert temp to
+ * voltage and then to ADC code value.
+ *
+ * Configuration of TM channels must match configuration of corresponding ADC
+ * channels.
+ */
+
+#define ADC5_MAX_CHANNEL 0xc0
+#define ADC_TM5_NUM_CHANNELS 8
+
+#define ADC_TM5_STATUS_LOW 0x0a
+
+#define ADC_TM5_STATUS_HIGH 0x0b
+
+#define ADC_TM5_NUM_BTM 0x0f
+
+#define ADC_TM5_ADC_DIG_PARAM 0x42
+
+#define ADC_TM5_FAST_AVG_CTL (ADC_TM5_ADC_DIG_PARAM + 1)
+#define ADC_TM5_FAST_AVG_EN BIT(7)
+
+#define ADC_TM5_MEAS_INTERVAL_CTL (ADC_TM5_ADC_DIG_PARAM + 2)
+#define ADC_TM5_TIMER1 3 /* 3.9ms */
+
+#define ADC_TM5_MEAS_INTERVAL_CTL2 (ADC_TM5_ADC_DIG_PARAM + 3)
+#define ADC_TM5_MEAS_INTERVAL_CTL2_MASK 0xf0
+#define ADC_TM5_TIMER2 10 /* 1 second */
+#define ADC_TM5_MEAS_INTERVAL_CTL3_MASK 0xf
+#define ADC_TM5_TIMER3 4 /* 4 second */
+
+#define ADC_TM_EN_CTL1 0x46
+#define ADC_TM_EN BIT(7)
+#define ADC_TM_CONV_REQ 0x47
+#define ADC_TM_CONV_REQ_EN BIT(7)
+
+#define ADC_TM5_M_CHAN_BASE 0x60
+
+#define ADC_TM5_M_ADC_CH_SEL_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 0)
+#define ADC_TM5_M_LOW_THR0(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 1)
+#define ADC_TM5_M_LOW_THR1(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 2)
+#define ADC_TM5_M_HIGH_THR0(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 3)
+#define ADC_TM5_M_HIGH_THR1(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 4)
+#define ADC_TM5_M_MEAS_INTERVAL_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 5)
+#define ADC_TM5_M_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 6)
+#define ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK 0xf
+#define ADC_TM5_M_CTL_CAL_SEL_MASK 0x30
+#define ADC_TM5_M_CTL_CAL_VAL 0x40
+#define ADC_TM5_M_EN(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 7)
+#define ADC_TM5_M_MEAS_EN BIT(7)
+#define ADC_TM5_M_HIGH_THR_INT_EN BIT(1)
+#define ADC_TM5_M_LOW_THR_INT_EN BIT(0)
+
+enum adc5_timer_select {
+ ADC5_TIMER_SEL_1 = 0,
+ ADC5_TIMER_SEL_2,
+ ADC5_TIMER_SEL_3,
+ ADC5_TIMER_SEL_NONE,
+};
+
+struct adc_tm5_data {
+ const u32 full_scale_code_volt;
+ unsigned int *decimation;
+ unsigned int *hw_settle;
+};
+
+enum adc_tm5_cal_method {
+ ADC_TM5_NO_CAL = 0,
+ ADC_TM5_RATIOMETRIC_CAL,
+ ADC_TM5_ABSOLUTE_CAL
+};
+
+struct adc_tm5_chip;
+
+/**
+ * struct adc_tm5_channel - ADC Thermal Monitoring channel data.
+ * @channel: channel number.
+ * @adc_channel: corresponding ADC channel number.
+ * @cal_method: calibration method.
+ * @prescale: channel scaling performed on the input signal.
+ * @hw_settle_time: the time between AMUX being configured and the
+ * start of conversion.
+ * @iio: IIO channel instance used by this channel.
+ * @chip: ADC TM chip instance.
+ * @tzd: thermal zone device used by this channel.
+ */
+struct adc_tm5_channel {
+ unsigned int channel;
+ unsigned int adc_channel;
+ enum adc_tm5_cal_method cal_method;
+ unsigned int prescale;
+ unsigned int hw_settle_time;
+ struct iio_channel *iio;
+ struct adc_tm5_chip *chip;
+ struct thermal_zone_device *tzd;
+};
+
+/**
+ * struct adc_tm5_chip - ADC Thermal Monitoring properties
+ * @regmap: SPMI ADC5 Thermal Monitoring peripheral register map field.
+ * @dev: SPMI ADC5 device.
+ * @data: software configuration data.
+ * @channels: array of ADC TM channel data.
+ * @nchannels: amount of channels defined/allocated
+ * @decimation: sampling rate supported for the channel.
+ * @avg_samples: ability to provide single result from the ADC
+ * that is an average of multiple measurements.
+ * @base: base address of TM registers.
+ */
+struct adc_tm5_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ const struct adc_tm5_data *data;
+ struct adc_tm5_channel *channels;
+ unsigned int nchannels;
+ unsigned int decimation;
+ unsigned int avg_samples;
+ u16 base;
+};
+
+static const struct adc_tm5_data adc_tm5_data_pmic = {
+ .full_scale_code_volt = 0x70e4,
+ .decimation = (unsigned int []) { 250, 420, 840 },
+ .hw_settle = (unsigned int []) { 15, 100, 200, 300, 400, 500, 600, 700,
+ 1000, 2000, 4000, 8000, 16000, 32000,
+ 64000, 128000 },
+};
+
+static int adc_tm5_read(struct adc_tm5_chip *adc_tm, u16 offset, u8 *data, int len)
+{
+ return regmap_bulk_read(adc_tm->regmap, adc_tm->base + offset, data, len);
+}
+
+static int adc_tm5_write(struct adc_tm5_chip *adc_tm, u16 offset, u8 *data, int len)
+{
+ return regmap_bulk_write(adc_tm->regmap, adc_tm->base + offset, data, len);
+}
+
+static int adc_tm5_reg_update(struct adc_tm5_chip *adc_tm, u16 offset, u8 mask, u8 val)
+{
+ return regmap_write_bits(adc_tm->regmap, adc_tm->base + offset, mask, val);
+}
+
+static irqreturn_t adc_tm5_isr(int irq, void *data)
+{
+ struct adc_tm5_chip *chip = data;
+ u8 status_low, status_high, ctl;
+ int ret, i;
+
+ ret = adc_tm5_read(chip, ADC_TM5_STATUS_LOW, &status_low, sizeof(status_low));
+ if (unlikely(ret)) {
+ dev_err(chip->dev, "read status low failed: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ ret = adc_tm5_read(chip, ADC_TM5_STATUS_HIGH, &status_high, sizeof(status_high));
+ if (unlikely(ret)) {
+ dev_err(chip->dev, "read status high failed: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < chip->nchannels; i++) {
+ bool upper_set = false, lower_set = false;
+ unsigned int ch = chip->channels[i].channel;
+
+ /* No TZD, we warned at the boot time */
+ if (!chip->channels[i].tzd)
+ continue;
+
+ ret = adc_tm5_read(chip, ADC_TM5_M_EN(ch), &ctl, sizeof(ctl));
+ if (unlikely(ret)) {
+ dev_err(chip->dev, "ctl read failed: %d, channel %d\n", ret, i);
+ continue;
+ }
+
+ if (!(ctl & ADC_TM5_M_MEAS_EN))
+ continue;
+
+ lower_set = (status_low & BIT(ch)) &&
+ (ctl & ADC_TM5_M_LOW_THR_INT_EN);
+
+ upper_set = (status_high & BIT(ch)) &&
+ (ctl & ADC_TM5_M_HIGH_THR_INT_EN);
+
+ if (upper_set || lower_set)
+ thermal_zone_device_update(chip->channels[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int adc_tm5_get_temp(void *data, int *temp)
+{
+ struct adc_tm5_channel *channel = data;
+ int ret;
+
+ if (!channel || !channel->iio)
+ return -EINVAL;
+
+ ret = iio_read_channel_processed(channel->iio, temp);
+ if (ret < 0)
+ return ret;
+
+ if (ret != IIO_VAL_INT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int adc_tm5_disable_channel(struct adc_tm5_channel *channel)
+{
+ struct adc_tm5_chip *chip = channel->chip;
+ unsigned int reg = ADC_TM5_M_EN(channel->channel);
+
+ return adc_tm5_reg_update(chip, reg,
+ ADC_TM5_M_MEAS_EN |
+ ADC_TM5_M_HIGH_THR_INT_EN |
+ ADC_TM5_M_LOW_THR_INT_EN,
+ 0);
+}
+
+static int adc_tm5_enable(struct adc_tm5_chip *chip)
+{
+ int ret;
+ u8 data;
+
+ data = ADC_TM_EN;
+ ret = adc_tm5_write(chip, ADC_TM_EN_CTL1, &data, sizeof(data));
+ if (ret < 0) {
+ dev_err(chip->dev, "adc-tm enable failed\n");
+ return ret;
+ }
+
+ data = ADC_TM_CONV_REQ_EN;
+ ret = adc_tm5_write(chip, ADC_TM_CONV_REQ, &data, sizeof(data));
+ if (ret < 0) {
+ dev_err(chip->dev, "adc-tm request conversion failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adc_tm5_configure(struct adc_tm5_channel *channel, int low, int high)
+{
+ struct adc_tm5_chip *chip = channel->chip;
+ u8 buf[8];
+ u16 reg = ADC_TM5_M_ADC_CH_SEL_CTL(channel->channel);
+ int ret;
+
+ ret = adc_tm5_read(chip, reg, buf, sizeof(buf));
+ if (ret) {
+ dev_err(chip->dev, "channel %d params read failed: %d\n", channel->channel, ret);
+ return ret;
+ }
+
+ buf[0] = channel->adc_channel;
+
+ /* High temperature corresponds to low voltage threshold */
+ if (high != INT_MAX) {
+ u16 adc_code = qcom_adc_tm5_temp_volt_scale(channel->prescale,
+ chip->data->full_scale_code_volt, high);
+
+ buf[1] = adc_code & 0xff;
+ buf[2] = adc_code >> 8;
+ buf[7] |= ADC_TM5_M_LOW_THR_INT_EN;
+ } else {
+ buf[7] &= ~ADC_TM5_M_LOW_THR_INT_EN;
+ }
+
+ /* Low temperature corresponds to high voltage threshold */
+ if (low != -INT_MAX) {
+ u16 adc_code = qcom_adc_tm5_temp_volt_scale(channel->prescale,
+ chip->data->full_scale_code_volt, low);
+
+ buf[3] = adc_code & 0xff;
+ buf[4] = adc_code >> 8;
+ buf[7] |= ADC_TM5_M_HIGH_THR_INT_EN;
+ } else {
+ buf[7] &= ~ADC_TM5_M_HIGH_THR_INT_EN;
+ }
+
+ buf[5] = ADC5_TIMER_SEL_2;
+
+ /* Set calibration select, hw_settle delay */
+ buf[6] &= ~ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK;
+ buf[6] |= FIELD_PREP(ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK, channel->hw_settle_time);
+ buf[6] &= ~ADC_TM5_M_CTL_CAL_SEL_MASK;
+ buf[6] |= FIELD_PREP(ADC_TM5_M_CTL_CAL_SEL_MASK, channel->cal_method);
+
+ buf[7] |= ADC_TM5_M_MEAS_EN;
+
+ ret = adc_tm5_write(chip, reg, buf, sizeof(buf));
+ if (ret) {
+ dev_err(chip->dev, "channel %d params write failed: %d\n", channel->channel, ret);
+ return ret;
+ }
+
+ return adc_tm5_enable(chip);
+}
+
+static int adc_tm5_set_trips(void *data, int low, int high)
+{
+ struct adc_tm5_channel *channel = data;
+ struct adc_tm5_chip *chip;
+ int ret;
+
+ if (!channel)
+ return -EINVAL;
+
+ chip = channel->chip;
+ dev_dbg(chip->dev, "%d:low(mdegC):%d, high(mdegC):%d\n",
+ channel->channel, low, high);
+
+ if (high == INT_MAX && low <= -INT_MAX)
+ ret = adc_tm5_disable_channel(channel);
+ else
+ ret = adc_tm5_configure(channel, low, high);
+
+ return ret;
+}
+
+static struct thermal_zone_of_device_ops adc_tm5_ops = {
+ .get_temp = adc_tm5_get_temp,
+ .set_trips = adc_tm5_set_trips,
+};
+
+static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
+{
+ unsigned int i;
+ struct thermal_zone_device *tzd;
+
+ for (i = 0; i < adc_tm->nchannels; i++) {
+ adc_tm->channels[i].chip = adc_tm;
+
+ tzd = devm_thermal_zone_of_sensor_register(adc_tm->dev,
+ adc_tm->channels[i].channel,
+ &adc_tm->channels[i],
+ &adc_tm5_ops);
+ if (IS_ERR(tzd)) {
+ dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n",
+ adc_tm->channels[i].channel, PTR_ERR(tzd));
+ return PTR_ERR(tzd);
+ }
+ adc_tm->channels[i].tzd = tzd;
+ }
+
+ return 0;
+}
+
+static int adc_tm5_init(struct adc_tm5_chip *chip)
+{
+ u8 buf[4], channels_available;
+ int ret;
+ unsigned int i;
+
+ ret = adc_tm5_read(chip, ADC_TM5_NUM_BTM,
+ &channels_available, sizeof(channels_available));
+ if (ret) {
+ dev_err(chip->dev, "read failed for BTM channels\n");
+ return ret;
+ }
+
+ for (i = 0; i < chip->nchannels; i++) {
+ if (chip->channels[i].channel >= channels_available) {
+ dev_err(chip->dev, "Invalid channel %d\n", chip->channels[i].channel);
+ return -EINVAL;
+ }
+ }
+
+ buf[0] = chip->decimation;
+ buf[1] = chip->avg_samples | ADC_TM5_FAST_AVG_EN;
+ buf[2] = ADC_TM5_TIMER1;
+ buf[3] = FIELD_PREP(ADC_TM5_MEAS_INTERVAL_CTL2_MASK, ADC_TM5_TIMER2) |
+ FIELD_PREP(ADC_TM5_MEAS_INTERVAL_CTL3_MASK, ADC_TM5_TIMER3);
+
+ ret = adc_tm5_write(chip, ADC_TM5_ADC_DIG_PARAM, buf, sizeof(buf));
+ if (ret) {
+ dev_err(chip->dev, "block write failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
+ struct adc_tm5_channel *channel,
+ struct device_node *node)
+{
+ const char *name = node->name;
+ u32 chan, value, varr[2];
+ int ret;
+ struct device *dev = adc_tm->dev;
+ struct of_phandle_args args;
+
+ ret = of_property_read_u32(node, "reg", &chan);
+ if (ret) {
+ dev_err(dev, "%s: invalid channel number %d\n", name, ret);
+ return ret;
+ }
+
+ if (chan >= ADC_TM5_NUM_CHANNELS) {
+ dev_err(dev, "%s: channel number too big: %d\n", name, chan);
+ return -EINVAL;
+ }
+
+ channel->channel = chan;
+
+ /*
+ * We are tied to PMIC's ADC controller, which always use single
+ * argument for channel number. So don't bother parsing
+ * #io-channel-cells, just enforce cell_count = 1.
+ */
+ ret = of_parse_phandle_with_fixed_args(node, "io-channels", 1, 0, &args);
+ if (ret < 0) {
+ dev_err(dev, "%s: error parsing ADC channel number %d: %d\n", name, chan, ret);
+ return ret;
+ }
+ of_node_put(args.np);
+
+ if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
+ dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
+ return ret;
+ }
+ channel->adc_channel = args.args[0];
+
+ channel->iio = devm_of_iio_channel_get_by_name(adc_tm->dev, node, NULL);
+ if (IS_ERR(channel->iio)) {
+ ret = PTR_ERR(channel->iio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: error getting channel: %d\n", name, ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ if (!ret) {
+ ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]);
+ if (ret < 0) {
+ dev_err(dev, "%s: invalid pre-scaling <%d %d>\n",
+ name, varr[0], varr[1]);
+ return ret;
+ }
+ channel->prescale = ret;
+ } else {
+ /* 1:1 prescale is index 0 */
+ channel->prescale = 0;
+ }
+
+ ret = of_property_read_u32(node, "qcom,hw-settle-time-us", &value);
+ if (!ret) {
+ ret = qcom_adc5_hw_settle_time_from_dt(value, adc_tm->data->hw_settle);
+ if (ret < 0) {
+ dev_err(dev, "%s invalid hw-settle-time-us %d us\n",
+ name, value);
+ return ret;
+ }
+ channel->hw_settle_time = ret;
+ } else {
+ channel->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
+ }
+
+ if (of_property_read_bool(node, "qcom,ratiometric"))
+ channel->cal_method = ADC_TM5_RATIOMETRIC_CAL;
+ else
+ channel->cal_method = ADC_TM5_ABSOLUTE_CAL;
+
+ return 0;
+}
+
+static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node *node)
+{
+ struct adc_tm5_channel *channels;
+ struct device_node *child;
+ u32 value;
+ int ret;
+ struct device *dev = adc_tm->dev;
+
+ adc_tm->nchannels = of_get_available_child_count(node);
+ if (!adc_tm->nchannels)
+ return -EINVAL;
+
+ adc_tm->channels = devm_kcalloc(dev, adc_tm->nchannels,
+ sizeof(*adc_tm->channels), GFP_KERNEL);
+ if (!adc_tm->channels)
+ return -ENOMEM;
+
+ channels = adc_tm->channels;
+
+ adc_tm->data = of_device_get_match_data(dev);
+ if (!adc_tm->data)
+ adc_tm->data = &adc_tm5_data_pmic;
+
+ ret = of_property_read_u32(node, "qcom,decimation", &value);
+ if (!ret) {
+ ret = qcom_adc5_decimation_from_dt(value, adc_tm->data->decimation);
+ if (ret < 0) {
+ dev_err(dev, "invalid decimation %d\n", value);
+ return ret;
+ }
+ adc_tm->decimation = ret;
+ } else {
+ adc_tm->decimation = ADC5_DECIMATION_DEFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ if (!ret) {
+ ret = qcom_adc5_avg_samples_from_dt(value);
+ if (ret < 0) {
+ dev_err(dev, "invalid avg-samples %d\n", value);
+ return ret;
+ }
+ adc_tm->avg_samples = ret;
+ } else {
+ adc_tm->avg_samples = VADC_DEF_AVG_SAMPLES;
+ }
+
+ for_each_available_child_of_node(node, child) {
+ ret = adc_tm5_get_dt_channel_data(adc_tm, channels, child);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+
+ channels++;
+ }
+
+ return 0;
+}
+
+static int adc_tm5_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct adc_tm5_chip *adc_tm;
+ struct regmap *regmap;
+ int ret, irq;
+ u32 reg;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret)
+ return ret;
+
+ adc_tm = devm_kzalloc(&pdev->dev, sizeof(*adc_tm), GFP_KERNEL);
+ if (!adc_tm)
+ return -ENOMEM;
+
+ adc_tm->regmap = regmap;
+ adc_tm->dev = dev;
+ adc_tm->base = reg;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "get_irq failed: %d\n", irq);
+ return irq;
+ }
+
+ ret = adc_tm5_get_dt_data(adc_tm, node);
+ if (ret) {
+ dev_err(dev, "get dt data failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adc_tm5_init(adc_tm);
+ if (ret) {
+ dev_err(dev, "adc-tm init failed\n");
+ return ret;
+ }
+
+ ret = adc_tm5_register_tzd(adc_tm);
+ if (ret) {
+ dev_err(dev, "tzd register failed\n");
+ return ret;
+ }
+
+ return devm_request_threaded_irq(dev, irq, NULL, adc_tm5_isr,
+ IRQF_ONESHOT, "pm-adc-tm5", adc_tm);
+}
+
+static const struct of_device_id adc_tm5_match_table[] = {
+ {
+ .compatible = "qcom,spmi-adc-tm5",
+ .data = &adc_tm5_data_pmic,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adc_tm5_match_table);
+
+static struct platform_driver adc_tm5_driver = {
+ .driver = {
+ .name = "qcom-spmi-adc-tm5",
+ .of_match_table = adc_tm5_match_table,
+ },
+ .probe = adc_tm5_probe,
+};
+module_platform_driver(adc_tm5_driver);
+
+MODULE_DESCRIPTION("SPMI PMIC Thermal Monitor ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
deleted file mode 100644
index 304b461e12aa..000000000000
--- a/drivers/thermal/tango_thermal.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/thermal.h>
-#include <linux/platform_device.h>
-
-/*
- * According to a data sheet draft, "this temperature sensor uses a bandgap
- * type of circuit to compare a voltage which has a negative temperature
- * coefficient with a voltage that is proportional to absolute temperature.
- * A resistor bank allows 41 different temperature thresholds to be selected
- * and the logic output will then indicate whether the actual die temperature
- * lies above or below the selected threshold."
- */
-
-#define TEMPSI_CMD 0
-#define TEMPSI_RES 4
-#define TEMPSI_CFG 8
-
-#define CMD_OFF 0
-#define CMD_ON 1
-#define CMD_READ 2
-
-#define IDX_MIN 15
-#define IDX_MAX 40
-
-struct tango_thermal_priv {
- void __iomem *base;
- int thresh_idx;
-};
-
-static bool temp_above_thresh(void __iomem *base, int thresh_idx)
-{
- writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
- usleep_range(10, 20);
- writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
-
- return readl(base + TEMPSI_RES);
-}
-
-static int tango_get_temp(void *arg, int *res)
-{
- struct tango_thermal_priv *priv = arg;
- int idx = priv->thresh_idx;
-
- if (temp_above_thresh(priv->base, idx)) {
- /* Search upward by incrementing thresh_idx */
- while (idx < IDX_MAX && temp_above_thresh(priv->base, ++idx))
- cpu_relax();
- idx = idx - 1; /* always return lower bound */
- } else {
- /* Search downward by decrementing thresh_idx */
- while (idx > IDX_MIN && !temp_above_thresh(priv->base, --idx))
- cpu_relax();
- }
-
- *res = (idx * 9 / 2 - 38) * 1000; /* millidegrees Celsius */
- priv->thresh_idx = idx;
-
- return 0;
-}
-
-static const struct thermal_zone_of_device_ops ops = {
- .get_temp = tango_get_temp,
-};
-
-static void tango_thermal_init(struct tango_thermal_priv *priv)
-{
- writel(0, priv->base + TEMPSI_CFG);
- writel(CMD_ON, priv->base + TEMPSI_CMD);
-}
-
-static int tango_thermal_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct tango_thermal_priv *priv;
- struct thermal_zone_device *tzdev;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- platform_set_drvdata(pdev, priv);
- priv->thresh_idx = IDX_MIN;
- tango_thermal_init(priv);
-
- tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
- return PTR_ERR_OR_ZERO(tzdev);
-}
-
-static int __maybe_unused tango_thermal_resume(struct device *dev)
-{
- tango_thermal_init(dev_get_drvdata(dev));
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(tango_thermal_pm, NULL, tango_thermal_resume);
-
-static const struct of_device_id tango_sensor_ids[] = {
- {
- .compatible = "sigma,smp8758-thermal",
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, tango_sensor_ids);
-
-static struct platform_driver tango_thermal_driver = {
- .probe = tango_thermal_probe,
- .driver = {
- .name = "tango-thermal",
- .of_match_table = tango_sensor_ids,
- .pm = &tango_thermal_pm,
- },
-};
-
-module_platform_driver(tango_thermal_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sigma Designs");
-MODULE_DESCRIPTION("Tango temperature sensor");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4a291d205d5c..996c038f83a4 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -289,16 +289,11 @@ static int __init thermal_register_governors(void)
* - Critical trip point will cause a system shutdown.
*/
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
- int delay)
+ unsigned long delay)
{
- if (delay > 1000)
+ if (delay)
mod_delayed_work(system_freezable_power_efficient_wq,
- &tz->poll_queue,
- round_jiffies(msecs_to_jiffies(delay)));
- else if (delay)
- mod_delayed_work(system_freezable_power_efficient_wq,
- &tz->poll_queue,
- msecs_to_jiffies(delay));
+ &tz->poll_queue, delay);
else
cancel_delayed_work(&tz->poll_queue);
}
@@ -317,9 +312,9 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
mutex_lock(&tz->lock);
if (!stop && tz->passive)
- thermal_zone_device_set_polling(tz, tz->passive_delay);
- else if (!stop && tz->polling_delay)
- thermal_zone_device_set_polling(tz, tz->polling_delay);
+ thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
+ else if (!stop && tz->polling_delay_jiffies)
+ thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
else
thermal_zone_device_set_polling(tz, 0);
@@ -412,9 +407,6 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
trace_thermal_zone_trip(tz, trip, trip_type);
- if (tz->ops->notify)
- tz->ops->notify(tz, trip, trip_type);
-
if (trip_type == THERMAL_TRIP_HOT && tz->ops->hot)
tz->ops->hot(tz);
else if (trip_type == THERMAL_TRIP_CRITICAL)
@@ -486,12 +478,6 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
pos->initialized = false;
}
-static void thermal_zone_device_reset(struct thermal_zone_device *tz)
-{
- tz->passive = 0;
- thermal_zone_device_init(tz);
-}
-
static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
enum thermal_device_mode mode)
{
@@ -601,26 +587,6 @@ static void thermal_zone_device_check(struct work_struct *work)
thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
}
-void thermal_zone_device_rebind_exception(struct thermal_zone_device *tz,
- const char *cdev_type, size_t size)
-{
- struct thermal_cooling_device *cdev = NULL;
-
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(cdev, &thermal_cdev_list, node) {
- /* skip non matching cdevs */
- if (strncmp(cdev_type, cdev->type, size))
- continue;
-
- /* re binding the exception matching the type pattern */
- thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev,
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- }
- mutex_unlock(&thermal_list_lock);
-}
-
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
@@ -688,23 +654,6 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
return match;
}
-void thermal_zone_device_unbind_exception(struct thermal_zone_device *tz,
- const char *cdev_type, size_t size)
-{
- struct thermal_cooling_device *cdev = NULL;
-
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(cdev, &thermal_cdev_list, node) {
- /* skip non matching cdevs */
- if (strncmp(cdev_type, cdev->type, size))
- continue;
- /* unbinding the exception matching the type pattern */
- thermal_zone_unbind_cooling_device(tz, THERMAL_TRIPS_NONE,
- cdev);
- }
- mutex_unlock(&thermal_list_lock);
-}
-
/*
* Device management section: cooling devices, zones devices, and binding
*
@@ -750,7 +699,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
unsigned long max_state;
int result, ret;
- if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
+ if (trip >= tz->trips || trip < 0)
return -EINVAL;
list_for_each_entry(pos1, &thermal_tz_list, node) {
@@ -1352,8 +1301,9 @@ thermal_zone_device_register(const char *type, int trips, int mask,
tz->device.class = &thermal_class;
tz->devdata = devdata;
tz->trips = trips;
- tz->passive_delay = passive_delay;
- tz->polling_delay = polling_delay;
+
+ thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
+ thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
/* sys I/F */
/* Add nodes that are always present via .groups */
@@ -1407,7 +1357,7 @@ thermal_zone_device_register(const char *type, int trips, int mask,
INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
- thermal_zone_device_reset(tz);
+ thermal_zone_device_init(tz);
/* Update the new thermal zone and mark it as already updated. */
if (atomic_cmpxchg(&tz->need_update, 1, 0))
thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 8df600fa7b79..86b8cef7310e 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -65,6 +65,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
cdev->ops->power2state;
}
+void thermal_cdev_update(struct thermal_cooling_device *);
+
/**
* struct thermal_trip - representation of a point in temperature domain
* @np: pointer to struct device_node that this trip point was created from
@@ -118,15 +120,12 @@ struct thermal_instance {
int thermal_register_governor(struct thermal_governor *);
void thermal_unregister_governor(struct thermal_governor *);
-void thermal_zone_device_rebind_exception(struct thermal_zone_device *,
- const char *, size_t);
-void thermal_zone_device_unbind_exception(struct thermal_zone_device *,
- const char *, size_t);
int thermal_zone_device_set_policy(struct thermal_zone_device *, char *);
int thermal_build_list_of_policies(char *buf);
/* Helpers */
void thermal_zone_set_trips(struct thermal_zone_device *tz);
+void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index c94bc824e5d3..7f50f412e02a 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -175,6 +175,13 @@ exit:
mutex_unlock(&tz->lock);
}
+void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms)
+{
+ *delay_jiffies = msecs_to_jiffies(delay_ms);
+ if (delay_ms > 1000)
+ *delay_jiffies = round_jiffies(*delay_jiffies);
+}
+
static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
int target)
{
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 0866e949339b..345917a58f2f 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -217,49 +217,6 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-passive_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int state;
-
- if (sscanf(buf, "%d\n", &state) != 1)
- return -EINVAL;
-
- /* sanity check: values below 1000 millicelcius don't make sense
- * and can cause the system to go into a thermal heart attack
- */
- if (state && state < 1000)
- return -EINVAL;
-
- if (state && !tz->forced_passive) {
- if (!tz->passive_delay)
- tz->passive_delay = 1000;
- thermal_zone_device_rebind_exception(tz, "Processor",
- sizeof("Processor"));
- } else if (!state && tz->forced_passive) {
- tz->passive_delay = 0;
- thermal_zone_device_unbind_exception(tz, "Processor",
- sizeof("Processor"));
- }
-
- tz->forced_passive = state;
-
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-
- return count;
-}
-
-static ssize_t
-passive_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
-
- return sprintf(buf, "%d\n", tz->forced_passive);
-}
-
-static ssize_t
policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -403,7 +360,6 @@ static DEVICE_ATTR_RW(sustainable_power);
/* These thermal zone device attributes are created based on conditions */
static DEVICE_ATTR_RW(mode);
-static DEVICE_ATTR_RW(passive);
/* These attributes are unconditionally added to a thermal zone */
static struct attribute *thermal_zone_dev_attrs[] = {
@@ -438,45 +394,9 @@ static const struct attribute_group thermal_zone_mode_attribute_group = {
.attrs = thermal_zone_mode_attrs,
};
-/* We expose passive only if passive trips are present */
-static struct attribute *thermal_zone_passive_attrs[] = {
- &dev_attr_passive.attr,
- NULL,
-};
-
-static umode_t thermal_zone_passive_is_visible(struct kobject *kobj,
- struct attribute *attr,
- int attrno)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct thermal_zone_device *tz;
- enum thermal_trip_type trip_type;
- int count, passive = 0;
-
- tz = container_of(dev, struct thermal_zone_device, device);
-
- for (count = 0; count < tz->trips && !passive; count++) {
- tz->ops->get_trip_type(tz, count, &trip_type);
-
- if (trip_type == THERMAL_TRIP_PASSIVE)
- passive = 1;
- }
-
- if (!passive)
- return attr->mode;
-
- return 0;
-}
-
-static const struct attribute_group thermal_zone_passive_attribute_group = {
- .attrs = thermal_zone_passive_attrs,
- .is_visible = thermal_zone_passive_is_visible,
-};
-
static const struct attribute_group *thermal_zone_attribute_groups[] = {
&thermal_zone_attribute_group,
&thermal_zone_mode_attribute_group,
- &thermal_zone_passive_attribute_group,
/* This is not NULL terminated as we create the group dynamically */
};
@@ -955,10 +875,7 @@ trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
instance =
container_of(attr, struct thermal_instance, attr);
- if (instance->trip == THERMAL_TRIPS_NONE)
- return sprintf(buf, "-1\n");
- else
- return sprintf(buf, "%d\n", instance->trip);
+ return sprintf(buf, "%d\n", instance->trip);
}
ssize_t
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index fdb8a495ab69..b4ef7340ac9b 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -24,7 +24,7 @@ omap4430_mpu_temp_sensor_registers = {
.bgap_dtemp_mask = OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK,
.bgap_mode_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET,
- .mode_ctrl_mask = OMAP4430_SINGLE_MODE_MASK,
+ .mode_ctrl_mask = OMAP4430_CONTINUOUS_MODE_MASK,
.bgap_efuse = OMAP4430_FUSE_OPP_BGAP,
};
@@ -58,7 +58,8 @@ omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
const struct ti_bandgap_data omap4430_data = {
.features = TI_BANDGAP_FEATURE_MODE_CONFIG |
TI_BANDGAP_FEATURE_CLK_CTRL |
- TI_BANDGAP_FEATURE_POWER_SWITCH,
+ TI_BANDGAP_FEATURE_POWER_SWITCH |
+ TI_BANDGAP_FEATURE_CONT_MODE_ONLY,
.fclock_name = "bandgap_fclk",
.div_ck_name = "bandgap_fclk",
.conv_table = omap4430_adc_to_temp,
@@ -96,7 +97,7 @@ omap4460_mpu_temp_sensor_registers = {
.mask_cold_mask = OMAP4460_MASK_COLD_MASK,
.bgap_mode_ctrl = OMAP4460_BGAP_CTRL_OFFSET,
- .mode_ctrl_mask = OMAP4460_SINGLE_MODE_MASK,
+ .mode_ctrl_mask = OMAP4460_CONTINUOUS_MODE_MASK,
.bgap_counter = OMAP4460_BGAP_COUNTER_OFFSET,
.counter_mask = OMAP4460_COUNTER_MASK,
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index 9a3955c3853b..c63f439e01d6 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -40,7 +40,7 @@
/* OMAP4430.TEMP_SENSOR bits */
#define OMAP4430_BGAP_TEMPSOFF_MASK BIT(12)
#define OMAP4430_BGAP_TSHUT_MASK BIT(11)
-#define OMAP4430_SINGLE_MODE_MASK BIT(10)
+#define OMAP4430_CONTINUOUS_MODE_MASK BIT(10)
#define OMAP4430_BGAP_TEMP_SENSOR_SOC_MASK BIT(9)
#define OMAP4430_BGAP_TEMP_SENSOR_EOCZ_MASK BIT(8)
#define OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK (0xff << 0)
@@ -113,7 +113,7 @@
#define OMAP4460_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
/* OMAP4460.BANDGAP_CTRL bits */
-#define OMAP4460_SINGLE_MODE_MASK BIT(31)
+#define OMAP4460_CONTINUOUS_MODE_MASK BIT(31)
#define OMAP4460_MASK_HOT_MASK BIT(1)
#define OMAP4460_MASK_COLD_MASK BIT(0)
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index dcac99f327b0..8a3646e26ddd 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -26,6 +26,7 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/cpu_pm.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
@@ -602,35 +603,40 @@ void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id)
static int
ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
{
- u32 counter = 1000;
- struct temp_sensor_registers *tsr;
-
- /* Select single conversion mode */
- if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
- RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 0);
+ struct temp_sensor_registers *tsr = bgp->conf->sensors[id].registers;
+ void __iomem *temp_sensor_ctrl = bgp->base + tsr->temp_sensor_ctrl;
+ int error;
+ u32 val;
+
+ /* Select continuous or single conversion mode */
+ if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) {
+ if (TI_BANDGAP_HAS(bgp, CONT_MODE_ONLY))
+ RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 1);
+ else
+ RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 0);
+ }
- /* Start of Conversion = 1 */
- RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1);
+ /* Set Start of Conversion if available */
+ if (tsr->bgap_soc_mask) {
+ RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1);
- /* Wait for EOCZ going up */
- tsr = bgp->conf->sensors[id].registers;
+ /* Wait for EOCZ going up */
+ error = readl_poll_timeout_atomic(temp_sensor_ctrl, val,
+ val & tsr->bgap_eocz_mask,
+ 1, 1000);
+ if (error)
+ dev_warn(bgp->dev, "eocz timed out waiting high\n");
- while (--counter) {
- if (ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
- tsr->bgap_eocz_mask)
- break;
+ /* Clear Start of Conversion if available */
+ RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0);
}
- /* Start of Conversion = 0 */
- RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0);
-
- /* Wait for EOCZ going down */
- counter = 1000;
- while (--counter) {
- if (!(ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
- tsr->bgap_eocz_mask))
- break;
- }
+ /* Wait for EOCZ going down, always needed even if no bgap_soc_mask */
+ error = readl_poll_timeout_atomic(temp_sensor_ctrl, val,
+ !(val & tsr->bgap_eocz_mask),
+ 1, 1500);
+ if (error)
+ dev_warn(bgp->dev, "eocz timed out waiting low\n");
return 0;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index ed0ea4b17b25..1f4bbaf31675 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -280,6 +280,7 @@ struct ti_temp_sensor {
* has Errata 814
* TI_BANDGAP_FEATURE_UNRELIABLE - used when the sensor readings are too
* inaccurate.
+ * TI_BANDGAP_FEATURE_CONT_MODE_ONLY - used when single mode hangs the sensor
* TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
* specific feature (above) or not. Return non-zero, if yes.
*/
@@ -295,6 +296,7 @@ struct ti_temp_sensor {
#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9)
#define TI_BANDGAP_FEATURE_ERRATA_814 BIT(10)
#define TI_BANDGAP_FEATURE_UNRELIABLE BIT(11)
+#define TI_BANDGAP_FEATURE_CONT_MODE_ONLY BIT(12)
#define TI_BANDGAP_HAS(b, f) \
((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 2ce4b19f312a..f84375865c97 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -166,6 +166,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
char *domain)
{
struct ti_thermal_data *data;
+ int interval;
data = ti_bandgap_get_sensor_data(bgp, id);
@@ -183,9 +184,10 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
return PTR_ERR(data->ti_thermal);
}
+ interval = jiffies_to_msecs(data->ti_thermal->polling_delay_jiffies);
+
ti_bandgap_set_sensor_data(bgp, id, data);
- ti_bandgap_write_update_interval(bgp, data->sensor_id,
- data->ti_thermal->polling_delay);
+ ti_bandgap_write_update_interval(bgp, data->sensor_id, interval);
return 0;
}
diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c
deleted file mode 100644
index 8e3a2d3c2f9a..000000000000
--- a/drivers/thermal/zx2967_thermal.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ZTE's zx2967 family thermal sensor driver
- *
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/thermal.h>
-
-/* Power Mode: 0->low 1->high */
-#define ZX2967_THERMAL_POWER_MODE 0
-#define ZX2967_POWER_MODE_LOW 0
-#define ZX2967_POWER_MODE_HIGH 1
-
-/* DCF Control Register */
-#define ZX2967_THERMAL_DCF 0x4
-#define ZX2967_DCF_EN BIT(1)
-#define ZX2967_DCF_FREEZE BIT(0)
-
-/* Selection Register */
-#define ZX2967_THERMAL_SEL 0x8
-
-/* Control Register */
-#define ZX2967_THERMAL_CTRL 0x10
-
-#define ZX2967_THERMAL_READY BIT(12)
-#define ZX2967_THERMAL_TEMP_MASK GENMASK(11, 0)
-#define ZX2967_THERMAL_ID_MASK 0x18
-#define ZX2967_THERMAL_ID 0x10
-
-#define ZX2967_GET_TEMP_TIMEOUT_US (100 * 1024)
-
-/**
- * struct zx2967_thermal_priv - zx2967 thermal sensor private structure
- * @tzd: struct thermal_zone_device where the sensor is registered
- * @lock: prevents read sensor in parallel
- * @clk_topcrm: topcrm clk structure
- * @clk_apb: apb clk structure
- * @regs: pointer to base address of the thermal sensor
- * @dev: struct device pointer
- */
-
-struct zx2967_thermal_priv {
- struct thermal_zone_device *tzd;
- struct mutex lock;
- struct clk *clk_topcrm;
- struct clk *clk_apb;
- void __iomem *regs;
- struct device *dev;
-};
-
-static int zx2967_thermal_get_temp(void *data, int *temp)
-{
- void __iomem *regs;
- struct zx2967_thermal_priv *priv = data;
- u32 val;
- int ret;
-
- if (!priv->tzd)
- return -EAGAIN;
-
- regs = priv->regs;
- mutex_lock(&priv->lock);
- writel_relaxed(ZX2967_POWER_MODE_LOW,
- regs + ZX2967_THERMAL_POWER_MODE);
- writel_relaxed(ZX2967_DCF_EN, regs + ZX2967_THERMAL_DCF);
-
- val = readl_relaxed(regs + ZX2967_THERMAL_SEL);
- val &= ~ZX2967_THERMAL_ID_MASK;
- val |= ZX2967_THERMAL_ID;
- writel_relaxed(val, regs + ZX2967_THERMAL_SEL);
-
- /*
- * Must wait for a while, surely it's a bit odd.
- * otherwise temperature value we got has a few deviation, even if
- * the THERMAL_READY bit is set.
- */
- usleep_range(100, 300);
- ret = readx_poll_timeout(readl, regs + ZX2967_THERMAL_CTRL,
- val, val & ZX2967_THERMAL_READY, 300,
- ZX2967_GET_TEMP_TIMEOUT_US);
- if (ret) {
- dev_err(priv->dev, "Thermal sensor data timeout\n");
- goto unlock;
- }
-
- writel_relaxed(ZX2967_DCF_FREEZE | ZX2967_DCF_EN,
- regs + ZX2967_THERMAL_DCF);
- val = readl_relaxed(regs + ZX2967_THERMAL_CTRL)
- & ZX2967_THERMAL_TEMP_MASK;
- writel_relaxed(ZX2967_POWER_MODE_HIGH,
- regs + ZX2967_THERMAL_POWER_MODE);
-
- /*
- * Calculate temperature
- * In dts, slope is multiplied by 1000.
- */
- *temp = DIV_ROUND_CLOSEST(((s32)val + priv->tzd->tzp->offset) * 1000,
- priv->tzd->tzp->slope);
-
-unlock:
- mutex_unlock(&priv->lock);
- return ret;
-}
-
-static const struct thermal_zone_of_device_ops zx2967_of_thermal_ops = {
- .get_temp = zx2967_thermal_get_temp,
-};
-
-static int zx2967_thermal_probe(struct platform_device *pdev)
-{
- struct zx2967_thermal_priv *priv;
- struct resource *res;
- int ret;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
-
- priv->clk_topcrm = devm_clk_get(&pdev->dev, "topcrm");
- if (IS_ERR(priv->clk_topcrm)) {
- ret = PTR_ERR(priv->clk_topcrm);
- dev_err(&pdev->dev, "failed to get topcrm clock: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(priv->clk_topcrm);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable topcrm clock: %d\n",
- ret);
- return ret;
- }
-
- priv->clk_apb = devm_clk_get(&pdev->dev, "apb");
- if (IS_ERR(priv->clk_apb)) {
- ret = PTR_ERR(priv->clk_apb);
- dev_err(&pdev->dev, "failed to get apb clock: %d\n", ret);
- goto disable_clk_topcrm;
- }
-
- ret = clk_prepare_enable(priv->clk_apb);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable apb clock: %d\n",
- ret);
- goto disable_clk_topcrm;
- }
-
- mutex_init(&priv->lock);
- priv->tzd = thermal_zone_of_sensor_register(&pdev->dev,
- 0, priv, &zx2967_of_thermal_ops);
-
- if (IS_ERR(priv->tzd)) {
- ret = PTR_ERR(priv->tzd);
- dev_err(&pdev->dev, "failed to register sensor: %d\n", ret);
- goto disable_clk_all;
- }
-
- if (priv->tzd->tzp->slope == 0) {
- thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd);
- dev_err(&pdev->dev, "coefficients of sensor is invalid\n");
- ret = -EINVAL;
- goto disable_clk_all;
- }
-
- priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
-
- return 0;
-
-disable_clk_all:
- clk_disable_unprepare(priv->clk_apb);
-disable_clk_topcrm:
- clk_disable_unprepare(priv->clk_topcrm);
- return ret;
-}
-
-static int zx2967_thermal_exit(struct platform_device *pdev)
-{
- struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev);
-
- thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd);
- clk_disable_unprepare(priv->clk_topcrm);
- clk_disable_unprepare(priv->clk_apb);
-
- return 0;
-}
-
-static const struct of_device_id zx2967_thermal_id_table[] = {
- { .compatible = "zte,zx296718-thermal" },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx2967_thermal_id_table);
-
-#ifdef CONFIG_PM_SLEEP
-static int zx2967_thermal_suspend(struct device *dev)
-{
- struct zx2967_thermal_priv *priv = dev_get_drvdata(dev);
-
- if (priv && priv->clk_topcrm)
- clk_disable_unprepare(priv->clk_topcrm);
-
- if (priv && priv->clk_apb)
- clk_disable_unprepare(priv->clk_apb);
-
- return 0;
-}
-
-static int zx2967_thermal_resume(struct device *dev)
-{
- struct zx2967_thermal_priv *priv = dev_get_drvdata(dev);
- int error;
-
- error = clk_prepare_enable(priv->clk_topcrm);
- if (error)
- return error;
-
- error = clk_prepare_enable(priv->clk_apb);
- if (error) {
- clk_disable_unprepare(priv->clk_topcrm);
- return error;
- }
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(zx2967_thermal_pm_ops,
- zx2967_thermal_suspend, zx2967_thermal_resume);
-
-static struct platform_driver zx2967_thermal_driver = {
- .probe = zx2967_thermal_probe,
- .remove = zx2967_thermal_exit,
- .driver = {
- .name = "zx2967_thermal",
- .of_match_table = zx2967_thermal_id_table,
- .pm = &zx2967_thermal_pm_ops,
- },
-};
-module_platform_driver(zx2967_thermal_driver);
-
-MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>");
-MODULE_DESCRIPTION("ZTE zx2967 thermal driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index a5f988a9f948..35fa17f7e599 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
- while (!dev_is_pci(dev))
+ while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev)
@@ -115,3 +115,68 @@ void tb_acpi_add_links(struct tb_nhi *nhi)
if (ACPI_FAILURE(status))
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
}
+
+/**
+ * tb_acpi_is_native() - Did the platform grant native TBT/USB4 control
+ *
+ * Returns %true if the platform granted OS native control over
+ * TBT/USB4. In this case software based connection manager can be used,
+ * otherwise there is firmware based connection manager running.
+ */
+bool tb_acpi_is_native(void)
+{
+ return osc_sb_native_usb4_support_confirmed &&
+ osc_sb_native_usb4_control;
+}
+
+/**
+ * tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform
+ *
+ * When software based connection manager is used, this function
+ * returns %true if platform allows native USB3 tunneling.
+ */
+bool tb_acpi_may_tunnel_usb3(void)
+{
+ if (tb_acpi_is_native())
+ return osc_sb_native_usb4_control & OSC_USB_USB3_TUNNELING;
+ return true;
+}
+
+/**
+ * tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform
+ *
+ * When software based connection manager is used, this function
+ * returns %true if platform allows native DP tunneling.
+ */
+bool tb_acpi_may_tunnel_dp(void)
+{
+ if (tb_acpi_is_native())
+ return osc_sb_native_usb4_control & OSC_USB_DP_TUNNELING;
+ return true;
+}
+
+/**
+ * tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform
+ *
+ * When software based connection manager is used, this function
+ * returns %true if platform allows native PCIe tunneling.
+ */
+bool tb_acpi_may_tunnel_pcie(void)
+{
+ if (tb_acpi_is_native())
+ return osc_sb_native_usb4_control & OSC_USB_PCIE_TUNNELING;
+ return true;
+}
+
+/**
+ * tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed
+ *
+ * When software based connection manager is used, this function
+ * returns %true if platform allows XDomain connections.
+ */
+bool tb_acpi_is_xdomain_allowed(void)
+{
+ if (tb_acpi_is_native())
+ return osc_sb_native_usb4_control & OSC_USB_XDOMAIN;
+ return true;
+}
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 6f571e912cf2..8ecd610c62d5 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -178,7 +178,7 @@ int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
/**
* tb_switch_find_cap() - Find switch capability
- * @sw Switch to find the capability for
+ * @sw: Switch to find the capability for
* @cap: Capability to look
*
* Returns offset to start of capability or %-ENOENT if no such
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index bac08b820015..f1aeaff9f368 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -20,7 +20,17 @@
#define TB_CTL_RETRIES 4
/**
- * struct tb_cfg - thunderbolt control channel
+ * struct tb_ctl - Thunderbolt control channel
+ * @nhi: Pointer to the NHI structure
+ * @tx: Transmit ring
+ * @rx: Receive ring
+ * @frame_pool: DMA pool for control messages
+ * @rx_packets: Received control messages
+ * @request_queue_lock: Lock protecting @request_queue
+ * @request_queue: List of outstanding requests
+ * @running: Is the control channel running at the moment
+ * @callback: Callback called when hotplug message is received
+ * @callback_data: Data passed to @callback
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -338,7 +348,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
tb_ctl_pkg_free(pkg);
}
-/**
+/*
* tb_cfg_tx() - transmit a packet on the control channel
*
* len must be a multiple of four.
@@ -375,7 +385,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
return res;
}
-/**
+/*
* tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
*/
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
@@ -602,6 +612,9 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
+ * @nhi: Pointer to NHI
+ * @cb: Callback called for plug events
+ * @cb_data: Data passed to @cb
*
* cb will be invoked once for every hot plug event.
*
@@ -649,6 +662,7 @@ err:
/**
* tb_ctl_free() - free a control channel
+ * @ctl: Control channel to free
*
* Must be called after tb_ctl_stop.
*
@@ -677,6 +691,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
/**
* tb_cfg_start() - start/resume the control channel
+ * @ctl: Control channel to start
*/
void tb_ctl_start(struct tb_ctl *ctl)
{
@@ -691,7 +706,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
}
/**
- * control() - pause the control channel
+ * tb_ctrl_stop() - pause the control channel
+ * @ctl: Control channel to stop
*
* All invocations of ctl->callback will have finished after this method
* returns.
@@ -784,6 +800,9 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
/**
* tb_cfg_reset() - send a reset packet and wait for a response
+ * @ctl: Control channel pointer
+ * @route: Router string for the router to send reset
+ * @timeout_msec: Timeout in ms how long to wait for the response
*
* If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for
@@ -820,9 +839,17 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
}
/**
- * tb_cfg_read() - read from config space into buffer
+ * tb_cfg_read_raw() - read from config space into buffer
+ * @ctl: Pointer to the control channel
+ * @buffer: Buffer where the data is read
+ * @route: Route string of the router
+ * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
+ * @space: Config space selector
+ * @offset: Dword word offset of the register to start reading
+ * @length: Number of dwords to read
+ * @timeout_msec: Timeout in ms how long to wait for the response
*
- * Offset and length are in dwords.
+ * Reads from router config space without translating the possible error.
*/
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
@@ -884,8 +911,16 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
/**
* tb_cfg_write() - write from buffer into config space
+ * @ctl: Pointer to the control channel
+ * @buffer: Data to write
+ * @route: Route string of the router
+ * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
+ * @space: Config space selector
+ * @offset: Dword word offset of the register to start writing
+ * @length: Number of dwords to write
+ * @timeout_msec: Timeout in ms how long to wait for the response
*
- * Offset and length are in dwords.
+ * Writes to router config space without translating the possible error.
*/
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
@@ -1022,6 +1057,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
/**
* tb_cfg_get_upstream_port() - get upstream port number of switch at route
+ * @ctl: Pointer to the control channel
+ * @route: Route string of the router
*
* Reads the first dword from the switches TB_CFG_SWITCH config area and
* returns the port number from which the reply originated.
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index 847dd07a7b17..7288aaf01ae6 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -335,6 +335,8 @@ static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
/* Write the block to MAIL_DATA registers */
ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
index f924423fa180..6debaf5a6604 100644
--- a/drivers/thunderbolt/dma_test.c
+++ b/drivers/thunderbolt/dma_test.c
@@ -7,7 +7,6 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -299,14 +298,12 @@ static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
tf->frame.size = 0; /* means 4096 */
tf->dma_test = dt;
- tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
+ tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!tf->data) {
kfree(tf);
return -ENOMEM;
}
- memcpy(tf->data, dma_test_pattern, DMA_TEST_FRAME_SIZE);
-
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index f0de94f7acbf..89ae614eaba2 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -118,6 +118,7 @@ static const char * const tb_security_names[] = {
[TB_SECURITY_SECURE] = "secure",
[TB_SECURITY_DPONLY] = "dponly",
[TB_SECURITY_USBONLY] = "usbonly",
+ [TB_SECURITY_NOPCIE] = "nopcie",
};
static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
@@ -238,6 +239,22 @@ err_free_str:
}
static DEVICE_ATTR_RW(boot_acl);
+static ssize_t deauthorization_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct tb *tb = container_of(dev, struct tb, dev);
+ bool deauthorization = false;
+
+ /* Only meaningful if authorization is supported */
+ if (tb->security_level == TB_SECURITY_USER ||
+ tb->security_level == TB_SECURITY_SECURE)
+ deauthorization = !!tb->cm_ops->disapprove_switch;
+
+ return sprintf(buf, "%d\n", deauthorization);
+}
+static DEVICE_ATTR_RO(deauthorization);
+
static ssize_t iommu_dma_protection_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -267,6 +284,7 @@ static DEVICE_ATTR_RO(security);
static struct attribute *domain_attrs[] = {
&dev_attr_boot_acl.attr,
+ &dev_attr_deauthorization.attr,
&dev_attr_iommu_dma_protection.attr,
&dev_attr_security.attr,
NULL,
@@ -289,7 +307,7 @@ static umode_t domain_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group domain_attr_group = {
+static const struct attribute_group domain_attr_group = {
.is_visible = domain_attr_is_visible,
.attrs = domain_attrs,
};
@@ -394,7 +412,9 @@ static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
- return tb_xdomain_handle_request(tb, type, buf, size);
+ if (tb_is_xdomain_enabled())
+ return tb_xdomain_handle_request(tb, type, buf, size);
+ break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
@@ -441,6 +461,9 @@ int tb_domain_add(struct tb *tb)
goto err_ctl_stop;
}
+ tb_dbg(tb, "security level set to %s\n",
+ tb_security_names[tb->security_level]);
+
ret = device_add(&tb->dev);
if (ret)
goto err_ctl_stop;
@@ -602,13 +625,30 @@ int tb_domain_runtime_resume(struct tb *tb)
}
/**
+ * tb_domain_disapprove_switch() - Disapprove switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to disapprove
+ *
+ * This will disconnect PCIe tunnel from parent to this @sw.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
+{
+ if (!tb->cm_ops->disapprove_switch)
+ return -EPERM;
+
+ return tb->cm_ops->disapprove_switch(tb, sw);
+}
+
+/**
* tb_domain_approve_switch() - Approve switch
* @tb: Domain the switch belongs to
* @sw: Switch to approve
*
* This will approve switch by connection manager specific means. In
- * case of success the connection manager will create tunnels for all
- * supported protocols.
+ * case of success the connection manager will create PCIe tunnel from
+ * parent to @sw.
*/
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 0c8471be3e32..dd03d3096653 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include "tb.h"
-/**
+/*
* tb_eeprom_ctl_write() - write control word
*/
static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
@@ -20,7 +20,7 @@ static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
}
-/**
+/*
* tb_eeprom_ctl_write() - read control word
*/
static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
@@ -33,7 +33,7 @@ enum tb_eeprom_transfer {
TB_EEPROM_OUT,
};
-/**
+/*
* tb_eeprom_active - enable rom access
*
* WARNING: Always disable access after usage. Otherwise the controller will
@@ -62,7 +62,7 @@ static int tb_eeprom_active(struct tb_switch *sw, bool enable)
}
}
-/**
+/*
* tb_eeprom_transfer - transfer one bit
*
* If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
@@ -90,7 +90,7 @@ static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
return tb_eeprom_ctl_write(sw, ctl);
}
-/**
+/*
* tb_eeprom_out - write one byte to the bus
*/
static int tb_eeprom_out(struct tb_switch *sw, u8 val)
@@ -110,7 +110,7 @@ static int tb_eeprom_out(struct tb_switch *sw, u8 val)
return 0;
}
-/**
+/*
* tb_eeprom_in - read one byte from the bus
*/
static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
@@ -131,7 +131,7 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
return 0;
}
-/**
+/*
* tb_eeprom_get_drom_offset - get drom offset within eeprom
*/
static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
@@ -162,7 +162,7 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
return 0;
}
-/**
+/*
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
@@ -279,7 +279,9 @@ struct tb_drom_entry_port {
/**
- * tb_drom_read_uid_only - read uid directly from drom
+ * tb_drom_read_uid_only() - Read UID directly from DROM
+ * @sw: Router whose UID to read
+ * @uid: UID is placed here
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
* identity.
@@ -374,7 +376,7 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
return 0;
}
-/**
+/*
* tb_drom_parse_entries - parse the linked list of drom entries
*
* Drom must have been copied to sw->drom.
@@ -410,7 +412,7 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
return 0;
}
-/**
+/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
@@ -520,7 +522,14 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
}
/**
- * tb_drom_read - copy drom to sw->drom and parse it
+ * tb_drom_read() - Copy DROM to sw->drom and parse it
+ * @sw: Router whose DROM to read and parse
+ *
+ * This function reads router DROM and if successful parses the entries and
+ * populates the fields in @sw accordingly. Can be called for any router
+ * generation.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
*/
int tb_drom_read(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8b7f941a9bb7..f6f605d48371 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -85,8 +85,8 @@ struct usb4_switch_nvm_auth {
* @set_uuid: Set UUID for the root switch (optional)
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
- * @xdomain_connected - Handle XDomain connected ICM message
- * @xdomain_disconnected - Handle XDomain disconnected ICM message
+ * @xdomain_connected: Handle XDomain connected ICM message
+ * @xdomain_disconnected: Handle XDomain disconnected ICM message
* @rtd3_veto: Handle RTD3 veto notification ICM message
*/
struct icm {
@@ -1701,10 +1701,12 @@ static void icm_handle_notification(struct work_struct *work)
icm->device_disconnected(tb, n->pkg);
break;
case ICM_EVENT_XDOMAIN_CONNECTED:
- icm->xdomain_connected(tb, n->pkg);
+ if (tb_is_xdomain_enabled())
+ icm->xdomain_connected(tb, n->pkg);
break;
case ICM_EVENT_XDOMAIN_DISCONNECTED:
- icm->xdomain_disconnected(tb, n->pkg);
+ if (tb_is_xdomain_enabled())
+ icm->xdomain_disconnected(tb, n->pkg);
break;
case ICM_EVENT_RTD3_VETO:
icm->rtd3_veto(tb, n->pkg);
@@ -2316,7 +2318,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
- tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+ tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 41e6c738f6c8..bc671730a11f 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -158,6 +158,41 @@ void tb_lc_unconfigure_xdomain(struct tb_port *port)
tb_lc_set_xdomain_configured(port, false);
}
+/**
+ * tb_lc_start_lane_initialization() - Start lane initialization
+ * @port: Device router lane 0 adapter
+ *
+ * Starts lane initialization for @port after the router resumed from
+ * sleep. Should be called for those downstream lane adapters that were
+ * not connected (tb_lc_configure_port() was not called) before sleep.
+ *
+ * Returns %0 in success and negative errno in case of failure.
+ */
+int tb_lc_start_lane_initialization(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int ret, cap;
+ u32 ctrl;
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (sw->generation < 2)
+ return 0;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ ctrl |= TB_LC_SX_CTRL_SLI;
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cfc622da4f83..a0386d1e3fc9 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -44,7 +44,7 @@ static int ring_interrupt_index(struct tb_ring *ring)
return bit;
}
-/**
+/*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
* ring->nhi->lock must be held.
@@ -105,7 +105,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
iowrite32(new, ring->nhi->iobase + reg);
}
-/**
+/*
* nhi_disable_interrupts() - disable interrupts for all rings
*
* Use only during init and shutdown.
@@ -182,7 +182,7 @@ static bool ring_empty(struct tb_ring *ring)
return ring->head == ring->tail;
}
-/**
+/*
* ring_write_descriptors() - post frames from ring->queue to the controller
*
* ring->lock is held.
@@ -212,7 +212,7 @@ static void ring_write_descriptors(struct tb_ring *ring)
}
}
-/**
+/*
* ring_work() - progress completed frames
*
* If the ring is shutting down then all frames are marked as canceled and
@@ -592,6 +592,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
/**
* tb_ring_start() - enable a ring
+ * @ring: Ring to start
*
* Must not be invoked in parallel with tb_ring_stop().
*/
@@ -667,6 +668,7 @@ EXPORT_SYMBOL_GPL(tb_ring_start);
/**
* tb_ring_stop() - shutdown a ring
+ * @ring: Ring to stop
*
* Must not be invoked from a callback.
*
@@ -754,7 +756,7 @@ void tb_ring_free(struct tb_ring *ring)
dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
ring->hop);
- /**
+ /*
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
* to finish before freeing the ring.
@@ -1188,6 +1190,29 @@ static void tb_apple_add_links(struct tb_nhi *nhi)
}
}
+static struct tb *nhi_select_cm(struct tb_nhi *nhi)
+{
+ struct tb *tb;
+
+ /*
+ * USB4 case is simple. If we got control of any of the
+ * capabilities, we use software CM.
+ */
+ if (tb_acpi_is_native())
+ return tb_probe(nhi);
+
+ /*
+ * Either firmware based CM is running (we did not get control
+ * from the firmware) or this is pre-USB4 PC so try first
+ * firmware CM and then fallback to software CM.
+ */
+ tb = icm_probe(nhi);
+ if (!tb)
+ tb = tb_probe(nhi);
+
+ return tb;
+}
+
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
@@ -1256,9 +1281,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tb_apple_add_links(nhi);
tb_acpi_add_links(nhi);
- tb = icm_probe(nhi);
- if (!tb)
- tb = tb_probe(nhi);
+ tb = nhi_select_cm(nhi);
if (!tb) {
dev_err(&nhi->pdev->dev,
"failed to determine connection manager, aborting\n");
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ca7d738d66de..f63e205a35d9 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -466,6 +466,7 @@ void tb_path_deactivate(struct tb_path *path)
/**
* tb_path_activate() - activate a path
+ * @path: Path to activate
*
* Activate a path starting with the last hop and iterating backwards. The
* caller must fill path->hops before calling tb_path_activate().
@@ -561,6 +562,7 @@ err:
/**
* tb_path_is_invalid() - check whether any ports on the path are invalid
+ * @path: Path to check
*
* Return: Returns true if the path is invalid, false otherwise.
*/
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index a8572f49d3ad..b63fecca6c2a 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -525,6 +525,8 @@ int tb_port_state(struct tb_port *port)
/**
* tb_wait_for_port() - wait for a port to become ready
+ * @port: Port to wait
+ * @wait_if_unplugged: Wait also when port is unplugged
*
* Wait up to 1 second for a port to reach state TB_PORT_UP. If
* wait_if_unplugged is set then we also wait if the port is in state
@@ -589,6 +591,8 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
/**
* tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
+ * @port: Port to add/remove NFC credits
+ * @credits: Credits to add/remove
*
* Change the number of NFC credits allocated to @port by @credits. To remove
* NFC credits pass a negative amount of credits.
@@ -646,6 +650,8 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
/**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
+ * @port: Port whose counters to clear
+ * @counter: Counter index to clear
*
* Return: Returns 0 on success or an error code on failure.
*/
@@ -718,7 +724,7 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
-/**
+/*
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -1065,6 +1071,17 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
tb_port_set_link_width(port, 1);
}
+static int tb_port_start_lane_initialization(struct tb_port *port)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(port->sw))
+ return 0;
+
+ ret = tb_lc_start_lane_initialization(port);
+ return ret == -EINVAL ? 0 : ret;
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -1302,7 +1319,7 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
}
/**
- * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
* @sw: Switch to reset
*
* Return: Returns 0 on success or an error code on failure.
@@ -1326,7 +1343,7 @@ int tb_switch_reset(struct tb_switch *sw)
return res.err;
}
-/**
+/*
* tb_plug_events_active() - enable/disable plug events on a switch
*
* Also configures a sane plug_events_delay of 255ms.
@@ -1376,6 +1393,30 @@ static ssize_t authorized_show(struct device *dev,
return sprintf(buf, "%u\n", sw->authorized);
}
+static int disapprove_switch(struct device *dev, void *not_used)
+{
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(dev);
+ if (sw && sw->authorized) {
+ int ret;
+
+ /* First children */
+ ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
+ if (ret)
+ return ret;
+
+ ret = tb_domain_disapprove_switch(sw->tb, sw);
+ if (ret)
+ return ret;
+
+ sw->authorized = 0;
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+ }
+
+ return 0;
+}
+
static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
{
int ret = -EINVAL;
@@ -1383,10 +1424,18 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
- if (sw->authorized)
+ if (!!sw->authorized == !!val)
goto unlock;
switch (val) {
+ /* Disapprove switch */
+ case 0:
+ if (tb_route(sw)) {
+ ret = disapprove_switch(&sw->dev, NULL);
+ goto unlock;
+ }
+ break;
+
/* Approve switch */
case 1:
if (sw->key)
@@ -1725,7 +1774,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct tb_switch *sw = tb_to_switch(dev);
- if (attr == &dev_attr_device.attr) {
+ if (attr == &dev_attr_authorized.attr) {
+ if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
+ sw->tb->security_level == TB_SECURITY_DPONLY)
+ return 0;
+ } else if (attr == &dev_attr_device.attr) {
if (!sw->device)
return 0;
} else if (attr == &dev_attr_device_name.attr) {
@@ -1771,7 +1824,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
return sw->safe_mode ? 0 : attr->mode;
}
-static struct attribute_group switch_group = {
+static const struct attribute_group switch_group = {
.is_visible = switch_attr_is_visible,
.attrs = switch_attrs,
};
@@ -2606,6 +2659,7 @@ void tb_switch_remove(struct tb_switch *sw)
/**
* tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
+ * @sw: Router to mark unplugged
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
@@ -2694,8 +2748,22 @@ int tb_switch_resume(struct tb_switch *sw)
/* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) {
- if (!tb_port_has_remote(port) && !port->xdomain)
+ if (!tb_port_has_remote(port) && !port->xdomain) {
+ /*
+ * For disconnected downstream lane adapters
+ * start lane initialization now so we detect
+ * future connects.
+ */
+ if (!tb_is_upstream_port(port) && tb_port_is_null(port))
+ tb_port_start_lane_initialization(port);
continue;
+ } else if (port->xdomain) {
+ /*
+ * Start lane initialization for XDomain so the
+ * link gets re-established.
+ */
+ tb_port_start_lane_initialization(port);
+ }
if (tb_wait_for_port(port, true) <= 0) {
tb_port_warn(port,
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 51d5b031cada..1f000ac1728b 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -179,6 +179,9 @@ static void tb_scan_xdomain(struct tb_port *port)
struct tb_xdomain *xd;
u64 route;
+ if (!tb_is_xdomain_enabled())
+ return;
+
route = tb_downstream_route(port);
xd = tb_xdomain_find_by_route(tb, route);
if (xd) {
@@ -434,6 +437,11 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
+ if (!tb_acpi_may_tunnel_usb3()) {
+ tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
+ return 0;
+ }
+
up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
if (!up)
return 0;
@@ -509,6 +517,9 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
struct tb_port *port;
int ret;
+ if (!tb_acpi_may_tunnel_usb3())
+ return 0;
+
if (tb_route(sw)) {
ret = tb_tunnel_usb3(sw->tb, sw);
if (ret)
@@ -528,7 +539,7 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
static void tb_scan_port(struct tb_port *port);
-/**
+/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
static void tb_scan_switch(struct tb_switch *sw)
@@ -544,7 +555,7 @@ static void tb_scan_switch(struct tb_switch *sw)
pm_runtime_put_autosuspend(&sw->dev);
}
-/**
+/*
* tb_scan_port() - check for and initialize switches below port
*/
static void tb_scan_port(struct tb_port *port)
@@ -704,7 +715,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
tb_tunnel_free(tunnel);
}
-/**
+/*
* tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
*/
static void tb_free_invalid_tunnels(struct tb *tb)
@@ -719,7 +730,7 @@ static void tb_free_invalid_tunnels(struct tb *tb)
}
}
-/**
+/*
* tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
@@ -838,6 +849,11 @@ static void tb_tunnel_dp(struct tb *tb)
struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel;
+ if (!tb_acpi_may_tunnel_dp()) {
+ tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+ return;
+ }
+
/*
* Find pair of inactive DP IN and DP OUT adapters and then
* establish a DP tunnel between them.
@@ -1002,6 +1018,25 @@ static void tb_disconnect_and_release_dp(struct tb *tb)
}
}
+static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *up;
+
+ up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
+ if (WARN_ON(!up))
+ return -ENODEV;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
+ if (WARN_ON(!tunnel))
+ return -ENODEV;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+ tb_tunnel_free(tunnel);
+ return 0;
+}
+
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_port *up, *down, *port;
@@ -1101,7 +1136,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
/* hotplug handling */
-/**
+/*
* tb_handle_hotplug() - handle hotplug event
*
* Executes on tb->wq.
@@ -1210,7 +1245,7 @@ out:
kfree(ev);
}
-/**
+/*
* tb_schedule_hotplug_handler() - callback function for the control channel
*
* Delegates to tb_handle_hotplug.
@@ -1512,6 +1547,7 @@ static const struct tb_cm_ops tb_cm_ops = {
.runtime_suspend = tb_runtime_suspend,
.runtime_resume = tb_runtime_resume,
.handle_event = tb_handle_event,
+ .disapprove_switch = tb_disconnect_pci,
.approve_switch = tb_tunnel_pci,
.approve_xdomain_paths = tb_approve_xdomain_paths,
.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
@@ -1526,7 +1562,11 @@ struct tb *tb_probe(struct tb_nhi *nhi)
if (!tb)
return NULL;
- tb->security_level = TB_SECURITY_USER;
+ if (tb_acpi_may_tunnel_pcie())
+ tb->security_level = TB_SECURITY_USER;
+ else
+ tb->security_level = TB_SECURITY_NOPCIE;
+
tb->cm_ops = &tb_cm_ops;
tcm = tb_priv(tb);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 554feda1e359..beea88c34c0f 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -138,6 +138,8 @@ struct tb_switch_tmu {
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
+ *
+ * In USB4 terminology this structure represents a router.
*/
struct tb_switch {
struct device dev;
@@ -196,6 +198,9 @@ struct tb_switch {
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
* @list: Used to link ports to DP resources list
+ *
+ * In USB4 terminology this structure represents an adapter (protocol or
+ * lane adapter).
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -361,6 +366,7 @@ struct tb_path {
* @handle_event: Handle thunderbolt event
* @get_boot_acl: Get boot ACL list
* @set_boot_acl: Set boot ACL list
+ * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel)
* @approve_switch: Approve switch
* @add_switch_key: Add key to switch
* @challenge_switch_key: Challenge switch using key
@@ -394,6 +400,7 @@ struct tb_cm_ops {
const void *buf, size_t size);
int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
+ int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw);
int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
@@ -629,6 +636,7 @@ int tb_domain_thaw_noirq(struct tb *tb);
void tb_domain_complete(struct tb *tb);
int tb_domain_runtime_suspend(struct tb *tb);
int tb_domain_runtime_resume(struct tb *tb);
+int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
@@ -923,6 +931,7 @@ int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
void tb_lc_unconfigure_xdomain(struct tb_port *port);
+int tb_lc_start_lane_initialization(struct tb_port *port);
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
int tb_lc_set_sleep(struct tb_switch *sw);
bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
@@ -949,6 +958,7 @@ static inline u64 tb_downstream_route(struct tb_port *port)
| ((u64) port->port << (port->sw->config.depth * 8));
}
+bool tb_is_xdomain_enabled(void);
bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
@@ -1034,8 +1044,20 @@ void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
void tb_acpi_add_links(struct tb_nhi *nhi);
+
+bool tb_acpi_is_native(void);
+bool tb_acpi_may_tunnel_usb3(void);
+bool tb_acpi_may_tunnel_dp(void);
+bool tb_acpi_may_tunnel_pcie(void);
+bool tb_acpi_is_xdomain_allowed(void);
#else
static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
+
+static inline bool tb_acpi_is_native(void) { return true; }
+static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
+static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
+static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
+static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index ae427a953489..626751e06292 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -464,6 +464,7 @@ struct tb_regs_hop {
#define TB_LC_SX_CTRL_L1D BIT(17)
#define TB_LC_SX_CTRL_L2C BIT(20)
#define TB_LC_SX_CTRL_L2D BIT(21)
+#define TB_LC_SX_CTRL_SLI BIT(29)
#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
#define TB_LC_SX_CTRL_SLP BIT(31)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index dcdf9c7a9cae..6557b6e07009 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -830,7 +830,7 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
* @transmit_path: HopID used for transmitting packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %0 if RX path is not needed.
- * @reveive_path: HopID used for receiving packets
+ * @receive_path: HopID used for receiving packets
*
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
@@ -932,12 +932,14 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
+ int pcie_enabled = tb_acpi_may_tunnel_pcie();
+
/*
- * PCIe tunneling affects the USB3 bandwidth so take that it
- * into account here.
+ * PCIe tunneling, if enabled, affects the USB3 bandwidth so
+ * take that it into account here.
*/
- *consumed_up = tunnel->allocated_up * (3 + 1) / 3;
- *consumed_down = tunnel->allocated_down * (3 + 1) / 3;
+ *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
+ *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
return 0;
}
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 67a2867382ed..680bc738dd66 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -331,13 +331,18 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
- if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+ if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
+ tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
val |= ROUTER_CS_5_UTO;
xhci = false;
}
- /* Only enable PCIe tunneling if the parent router supports it */
- if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+ /*
+ * Only enable PCIe tunneling if the parent router supports it
+ * and it is not disabled.
+ */
+ if (tb_acpi_may_tunnel_pcie() &&
+ tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
val |= ROUTER_CS_5_PTO;
/*
* xHCI can be enabled if PCIe tunneling is supported
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9b3a299a1202..7cf8b9c85ab7 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -30,6 +30,10 @@ struct xdomain_request_work {
struct tb *tb;
};
+static bool tb_xdomain_enabled = true;
+module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
+MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
+
/* Serializes access to the properties and protocol handlers below */
static DEFINE_MUTEX(xdomain_lock);
@@ -47,6 +51,11 @@ static const uuid_t tb_xdp_uuid =
UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+bool tb_is_xdomain_enabled(void)
+{
+ return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
+}
+
static bool tb_xdomain_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
@@ -670,7 +679,7 @@ EXPORT_SYMBOL_GPL(tb_register_service_driver);
/**
* tb_unregister_service_driver() - Unregister XDomain service driver
- * @xdrv: Driver to unregister
+ * @drv: Driver to unregister
*
* Unregisters XDomain service driver from the bus.
*/
@@ -756,7 +765,7 @@ static struct attribute *tb_service_attrs[] = {
NULL,
};
-static struct attribute_group tb_service_attr_group = {
+static const struct attribute_group tb_service_attr_group = {
.attrs = tb_service_attrs,
};
@@ -1239,7 +1248,7 @@ static struct attribute *xdomain_attrs[] = {
NULL,
};
-static struct attribute_group xdomain_attr_group = {
+static const struct attribute_group xdomain_attr_group = {
.attrs = xdomain_attrs,
};
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 47a6e42f0d04..e15cd6b5bb99 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 3c1c5a9240a7..730de6bf048b 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -2,15 +2,13 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
- n_null.o ttynull.o
+ n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
-obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
-obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
obj-$(CONFIG_R3964) += n_r3964.o
obj-y += vt/
@@ -25,6 +23,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 13f63c01c589..18b78ea110ef 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -998,7 +998,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
state->custom_divisor = ss->custom_divisor;
port->close_delay = ss->close_delay * HZ/100;
port->closing_wait = ss->closing_wait * HZ/100;
- port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (tty_port_initialized(port)) {
@@ -1386,8 +1385,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
tty->driver_data = info;
tty->port = port;
- port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-
retval = startup(tty, info);
if (retval) {
return retval;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 509d1042825a..c90848919644 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -605,7 +605,7 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
hvcsd->todo_mask |= HVCS_QUICK_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
- /* This is synch because tty->low_latency == 1 */
+ /* This is synch -- FIXME :js: it is not! */
if(got)
tty_flip_buffer_push(&hvcsd->port);
@@ -825,9 +825,6 @@ static int hvcs_remove(struct vio_dev *dev)
unsigned long flags;
struct tty_struct *tty;
- if (!hvcsd)
- return -ENODEV;
-
/* By this time the vty-server won't be getting any more interrupts */
spin_lock_irqsave(&hvcsd->lock, flags);
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 23584769fc29..6dacbc5e286c 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -101,7 +101,6 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
tty->port.tty = linux_tty;
linux_tty->driver_data = tty;
- tty->port.low_latency = 1;
if (tty->tty_type == TTYTYPE_MODEM)
ipwireless_ppp_open(tty->network);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 3703987c4666..4203b64bccdb 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1273,7 +1273,6 @@ static int mxser_set_serial_info(struct tty_struct *tty,
(ss->flags & ASYNC_FLAGS));
port->close_delay = ss->close_delay * HZ / 100;
port->closing_wait = ss->closing_wait * HZ / 100;
- port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
(ss->baud_base != info->baud_base ||
ss->custom_divisor !=
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c676fa89ee0b..51dafc06f541 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2559,7 +2559,8 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
*/
static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr)
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 12557ee1edb6..1363e659dc1d 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -416,13 +416,19 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
* Returns the number of bytes returned or error code.
*/
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
- __u8 __user *buf, size_t nr)
+ __u8 *kbuf, size_t nr,
+ void **cookie, unsigned long offset)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int ret = 0;
struct n_hdlc_buf *rbuf;
DECLARE_WAITQUEUE(wait, current);
+ /* Is this a repeated call for an rbuf we already found earlier? */
+ rbuf = *cookie;
+ if (rbuf)
+ goto have_rbuf;
+
add_wait_queue(&tty->read_wait, &wait);
for (;;) {
@@ -436,25 +442,8 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
set_current_state(TASK_INTERRUPTIBLE);
rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
- if (rbuf) {
- if (rbuf->count > nr) {
- /* too large for caller's buffer */
- ret = -EOVERFLOW;
- } else {
- __set_current_state(TASK_RUNNING);
- if (copy_to_user(buf, rbuf->buf, rbuf->count))
- ret = -EFAULT;
- else
- ret = rbuf->count;
- }
-
- if (n_hdlc->rx_free_buf_list.count >
- DEFAULT_RX_BUF_COUNT)
- kfree(rbuf);
- else
- n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
+ if (rbuf)
break;
- }
/* no data */
if (tty_io_nonblock(tty, file)) {
@@ -473,6 +462,39 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
remove_wait_queue(&tty->read_wait, &wait);
__set_current_state(TASK_RUNNING);
+ if (!rbuf)
+ return ret;
+ *cookie = rbuf;
+
+have_rbuf:
+ /* Have we used it up entirely? */
+ if (offset >= rbuf->count)
+ goto done_with_rbuf;
+
+ /* More data to go, but can't copy any more? EOVERFLOW */
+ ret = -EOVERFLOW;
+ if (!nr)
+ goto done_with_rbuf;
+
+ /* Copy as much data as possible */
+ ret = rbuf->count - offset;
+ if (ret > nr)
+ ret = nr;
+ memcpy(kbuf, rbuf->buf+offset, ret);
+ offset += ret;
+
+ /* If we still have data left, we leave the rbuf in the cookie */
+ if (offset < rbuf->count)
+ return ret;
+
+done_with_rbuf:
+ *cookie = NULL;
+
+ if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
+ kfree(rbuf);
+ else
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
+
return ret;
} /* end of n_hdlc_tty_read() */
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
index 96feabae4740..ce03ae78f5c6 100644
--- a/drivers/tty/n_null.c
+++ b/drivers/tty/n_null.c
@@ -20,7 +20,8 @@ static void n_null_close(struct tty_struct *tty)
}
static ssize_t n_null_read(struct tty_struct *tty, struct file *file,
- unsigned char __user * buf, size_t nr)
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 934dd2fb2ec8..3161f0a535e3 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -129,7 +129,7 @@ static void remove_client_block(struct r3964_info *pInfo,
static int r3964_open(struct tty_struct *tty);
static void r3964_close(struct tty_struct *tty);
static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
- unsigned char __user * buf, size_t nr);
+ void *cookie, unsigned char *buf, size_t nr);
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
@@ -1058,7 +1058,8 @@ static void r3964_close(struct tty_struct *tty)
}
static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
- unsigned char __user * buf, size_t nr)
+ unsigned char *kbuf, size_t nr,
+ void **cookie, unsigned long offset)
{
struct r3964_info *pInfo = tty->disc_data;
struct r3964_client_info *pClient;
@@ -1109,10 +1110,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
kfree(pMsg);
TRACE_M("r3964_read - msg kfree %p", pMsg);
- if (copy_to_user(buf, &theMsg, ret)) {
- ret = -EFAULT;
- goto unlock;
- }
+ memcpy(kbuf, &theMsg, ret);
TRACE_PS("read - return %d", ret);
goto unlock;
diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
deleted file mode 100644
index 4479af4d2fa5..000000000000
--- a/drivers/tty/n_tracerouter.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * n_tracerouter.c - Trace data router through tty space
- *
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This trace router uses the Linux line discipline framework to route
- * trace data coming from a HW Modem to a PTI (Parallel Trace Module) port.
- * The solution is not specific to a HW modem and this line disciple can
- * be used to route any stream of data in kernel space.
- * This is part of a solution for the P1149.7, compact JTAG, standard.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <linux/tty.h>
-#include <linux/tty_ldisc.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/bug.h>
-#include "n_tracesink.h"
-
-/*
- * Other ldisc drivers use 65536 which basically means,
- * 'I can always accept 64k' and flow control is off.
- * This number is deemed appropriate for this driver.
- */
-#define RECEIVE_ROOM 65536
-#define DRIVERNAME "n_tracerouter"
-
-/*
- * struct to hold private configuration data for this ldisc.
- * opencalled is used to hold if this ldisc has been opened.
- * kref_tty holds the tty reference the ldisc sits on top of.
- */
-struct tracerouter_data {
- u8 opencalled;
- struct tty_struct *kref_tty;
-};
-static struct tracerouter_data *tr_data;
-
-/* lock for when tty reference is being used */
-static DEFINE_MUTEX(routelock);
-
-/**
- * n_tracerouter_open() - Called when a tty is opened by a SW entity.
- * @tty: terminal device to the ldisc.
- *
- * Return:
- * 0 for success.
- *
- * Caveats: This should only be opened one time per SW entity.
- */
-static int n_tracerouter_open(struct tty_struct *tty)
-{
- int retval = -EEXIST;
-
- mutex_lock(&routelock);
- if (tr_data->opencalled == 0) {
-
- tr_data->kref_tty = tty_kref_get(tty);
- if (tr_data->kref_tty == NULL) {
- retval = -EFAULT;
- } else {
- tr_data->opencalled = 1;
- tty->disc_data = tr_data;
- tty->receive_room = RECEIVE_ROOM;
- tty_driver_flush_buffer(tty);
- retval = 0;
- }
- }
- mutex_unlock(&routelock);
- return retval;
-}
-
-/**
- * n_tracerouter_close() - close connection
- * @tty: terminal device to the ldisc.
- *
- * Called when a software entity wants to close a connection.
- */
-static void n_tracerouter_close(struct tty_struct *tty)
-{
- struct tracerouter_data *tptr = tty->disc_data;
-
- mutex_lock(&routelock);
- WARN_ON(tptr->kref_tty != tr_data->kref_tty);
- tty_driver_flush_buffer(tty);
- tty_kref_put(tr_data->kref_tty);
- tr_data->kref_tty = NULL;
- tr_data->opencalled = 0;
- tty->disc_data = NULL;
- mutex_unlock(&routelock);
-}
-
-/**
- * n_tracerouter_read() - read request from user space
- * @tty: terminal device passed into the ldisc.
- * @file: pointer to open file object.
- * @buf: pointer to the data buffer that gets eventually returned.
- * @nr: number of bytes of the data buffer that is returned.
- *
- * function that allows read() functionality in userspace. By default if this
- * is not implemented it returns -EIO. This module is functioning like a
- * router via n_tracerouter_receivebuf(), and there is no real requirement
- * to implement this function. However, an error return value other than
- * -EIO should be used just to show that there was an intent not to have
- * this function implemented. Return value based on read() man pages.
- *
- * Return:
- * -EINVAL
- */
-static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr) {
- return -EINVAL;
-}
-
-/**
- * n_tracerouter_write() - Function that allows write() in userspace.
- * @tty: terminal device passed into the ldisc.
- * @file: pointer to open file object.
- * @buf: pointer to the data buffer that gets eventually returned.
- * @nr: number of bytes of the data buffer that is returned.
- *
- * By default if this is not implemented, it returns -EIO.
- * This should not be implemented, ever, because
- * 1. this driver is functioning like a router via
- * n_tracerouter_receivebuf()
- * 2. No writes to HW will ever go through this line discpline driver.
- * However, an error return value other than -EIO should be used
- * just to show that there was an intent not to have this function
- * implemented. Return value based on write() man pages.
- *
- * Return:
- * -EINVAL
- */
-static ssize_t n_tracerouter_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr) {
- return -EINVAL;
-}
-
-/**
- * n_tracerouter_receivebuf() - Routing function for driver.
- * @tty: terminal device passed into the ldisc. It's assumed
- * tty will never be NULL.
- * @cp: buffer, block of characters to be eventually read by
- * someone, somewhere (user read() call or some kernel function).
- * @fp: flag buffer.
- * @count: number of characters (aka, bytes) in cp.
- *
- * This function takes the input buffer, cp, and passes it to
- * an external API function for processing.
- */
-static void n_tracerouter_receivebuf(struct tty_struct *tty,
- const unsigned char *cp,
- char *fp, int count)
-{
- mutex_lock(&routelock);
- n_tracesink_datadrain((u8 *) cp, count);
- mutex_unlock(&routelock);
-}
-
-/*
- * Flush buffer is not impelemented as the ldisc has no internal buffering
- * so the tty_driver_flush_buffer() is sufficient for this driver's needs.
- */
-
-static struct tty_ldisc_ops tty_ptirouter_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = DRIVERNAME,
- .open = n_tracerouter_open,
- .close = n_tracerouter_close,
- .read = n_tracerouter_read,
- .write = n_tracerouter_write,
- .receive_buf = n_tracerouter_receivebuf
-};
-
-/**
- * n_tracerouter_init - module initialisation
- *
- * Registers this module as a line discipline driver.
- *
- * Return:
- * 0 for success, any other value error.
- */
-static int __init n_tracerouter_init(void)
-{
- int retval;
-
- tr_data = kzalloc(sizeof(struct tracerouter_data), GFP_KERNEL);
- if (tr_data == NULL)
- return -ENOMEM;
-
-
- /* Note N_TRACEROUTER is defined in linux/tty.h */
- retval = tty_register_ldisc(N_TRACEROUTER, &tty_ptirouter_ldisc);
- if (retval < 0) {
- pr_err("%s: Registration failed: %d\n", __func__, retval);
- kfree(tr_data);
- }
- return retval;
-}
-
-/**
- * n_tracerouter_exit - module unload
- *
- * Removes this module as a line discipline driver.
- */
-static void __exit n_tracerouter_exit(void)
-{
- int retval = tty_unregister_ldisc(N_TRACEROUTER);
-
- if (retval < 0)
- pr_err("%s: Unregistration failed: %d\n", __func__, retval);
- else
- kfree(tr_data);
-}
-
-module_init(n_tracerouter_init);
-module_exit(n_tracerouter_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jay Freyensee");
-MODULE_ALIAS_LDISC(N_TRACEROUTER);
-MODULE_DESCRIPTION("Trace router ldisc driver");
diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
deleted file mode 100644
index d96ba82cc356..000000000000
--- a/drivers/tty/n_tracesink.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * n_tracesink.c - Trace data router and sink path through tty space.
- *
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The trace sink uses the Linux line discipline framework to receive
- * trace data coming from the PTI source line discipline driver
- * to a user-desired tty port, like USB.
- * This is to provide a way to extract modem trace data on
- * devices that do not have a PTI HW module, or just need modem
- * trace data to come out of a different HW output port.
- * This is part of a solution for the P1149.7, compact JTAG, standard.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <linux/tty.h>
-#include <linux/tty_ldisc.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/bug.h>
-#include "n_tracesink.h"
-
-/*
- * Other ldisc drivers use 65536 which basically means,
- * 'I can always accept 64k' and flow control is off.
- * This number is deemed appropriate for this driver.
- */
-#define RECEIVE_ROOM 65536
-#define DRIVERNAME "n_tracesink"
-
-/*
- * there is a quirk with this ldisc is he can write data
- * to a tty from anyone calling his kernel API, which
- * meets customer requirements in the drivers/misc/pti.c
- * project. So he needs to know when he can and cannot write when
- * the API is called. In theory, the API can be called
- * after an init() but before a successful open() which
- * would crash the system if tty is not checked.
- */
-static struct tty_struct *this_tty;
-static DEFINE_MUTEX(writelock);
-
-/**
- * n_tracesink_open() - Called when a tty is opened by a SW entity.
- * @tty: terminal device to the ldisc.
- *
- * Return:
- * 0 for success,
- * -EFAULT = couldn't get a tty kref n_tracesink will sit
- * on top of
- * -EEXIST = open() called successfully once and it cannot
- * be called again.
- *
- * Caveats: open() should only be successful the first time a
- * SW entity calls it.
- */
-static int n_tracesink_open(struct tty_struct *tty)
-{
- int retval = -EEXIST;
-
- mutex_lock(&writelock);
- if (this_tty == NULL) {
- this_tty = tty_kref_get(tty);
- if (this_tty == NULL) {
- retval = -EFAULT;
- } else {
- tty->disc_data = this_tty;
- tty_driver_flush_buffer(tty);
- retval = 0;
- }
- }
- mutex_unlock(&writelock);
-
- return retval;
-}
-
-/**
- * n_tracesink_close() - close connection
- * @tty: terminal device to the ldisc.
- *
- * Called when a software entity wants to close a connection.
- */
-static void n_tracesink_close(struct tty_struct *tty)
-{
- mutex_lock(&writelock);
- tty_driver_flush_buffer(tty);
- tty_kref_put(this_tty);
- this_tty = NULL;
- tty->disc_data = NULL;
- mutex_unlock(&writelock);
-}
-
-/**
- * n_tracesink_read() - read request from user space
- * @tty: terminal device passed into the ldisc.
- * @file: pointer to open file object.
- * @buf: pointer to the data buffer that gets eventually returned.
- * @nr: number of bytes of the data buffer that is returned.
- *
- * function that allows read() functionality in userspace. By default if this
- * is not implemented it returns -EIO. This module is functioning like a
- * router via n_tracesink_receivebuf(), and there is no real requirement
- * to implement this function. However, an error return value other than
- * -EIO should be used just to show that there was an intent not to have
- * this function implemented. Return value based on read() man pages.
- *
- * Return:
- * -EINVAL
- */
-static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr) {
- return -EINVAL;
-}
-
-/**
- * n_tracesink_write() - Function that allows write() in userspace.
- * @tty: terminal device passed into the ldisc.
- * @file: pointer to open file object.
- * @buf: pointer to the data buffer that gets eventually returned.
- * @nr: number of bytes of the data buffer that is returned.
- *
- * By default if this is not implemented, it returns -EIO.
- * This should not be implemented, ever, because
- * 1. this driver is functioning like a router via
- * n_tracesink_receivebuf()
- * 2. No writes to HW will ever go through this line discpline driver.
- * However, an error return value other than -EIO should be used
- * just to show that there was an intent not to have this function
- * implemented. Return value based on write() man pages.
- *
- * Return:
- * -EINVAL
- */
-static ssize_t n_tracesink_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr) {
- return -EINVAL;
-}
-
-/**
- * n_tracesink_datadrain() - Kernel API function used to route
- * trace debugging data to user-defined
- * port like USB.
- *
- * @buf: Trace debuging data buffer to write to tty target
- * port. Null value will return with no write occurring.
- * @count: Size of buf. Value of 0 or a negative number will
- * return with no write occuring.
- *
- * Caveat: If this line discipline does not set the tty it sits
- * on top of via an open() call, this API function will not
- * call the tty's write() call because it will have no pointer
- * to call the write().
- */
-void n_tracesink_datadrain(u8 *buf, int count)
-{
- mutex_lock(&writelock);
-
- if ((buf != NULL) && (count > 0) && (this_tty != NULL))
- this_tty->ops->write(this_tty, buf, count);
-
- mutex_unlock(&writelock);
-}
-EXPORT_SYMBOL_GPL(n_tracesink_datadrain);
-
-/*
- * Flush buffer is not impelemented as the ldisc has no internal buffering
- * so the tty_driver_flush_buffer() is sufficient for this driver's needs.
- */
-
-/*
- * tty_ldisc function operations for this driver.
- */
-static struct tty_ldisc_ops tty_n_tracesink = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = DRIVERNAME,
- .open = n_tracesink_open,
- .close = n_tracesink_close,
- .read = n_tracesink_read,
- .write = n_tracesink_write
-};
-
-/**
- * n_tracesink_init- module initialisation
- *
- * Registers this module as a line discipline driver.
- *
- * Return:
- * 0 for success, any other value error.
- */
-static int __init n_tracesink_init(void)
-{
- /* Note N_TRACESINK is defined in linux/tty.h */
- int retval = tty_register_ldisc(N_TRACESINK, &tty_n_tracesink);
-
- if (retval < 0)
- pr_err("%s: Registration failed: %d\n", __func__, retval);
-
- return retval;
-}
-
-/**
- * n_tracesink_exit - module unload
- *
- * Removes this module as a line discipline driver.
- */
-static void __exit n_tracesink_exit(void)
-{
- int retval = tty_unregister_ldisc(N_TRACESINK);
-
- if (retval < 0)
- pr_err("%s: Unregistration failed: %d\n", __func__, retval);
-}
-
-module_init(n_tracesink_init);
-module_exit(n_tracesink_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jay Freyensee");
-MODULE_ALIAS_LDISC(N_TRACESINK);
-MODULE_DESCRIPTION("Trace sink ldisc driver");
diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h
deleted file mode 100644
index 7031d515a700..000000000000
--- a/drivers/tty/n_tracesink.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * n_tracesink.h - Kernel driver API to route trace data in kernel space.
- *
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file is used by n_tracerouter to be able to send the
- * data of it's tty port to the tty port this module sits. This
- * mechanism can also be used independent of the PTI module.
- *
- */
-
-#ifndef N_TRACESINK_H_
-#define N_TRACESINK_H_
-
-void n_tracesink_datadrain(u8 *buf, int count);
-
-#endif
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 319d68c8a5df..87ec15dbe10d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -164,29 +164,24 @@ static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
memset(buffer, 0x00, size);
}
-static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
- size_t tail, size_t n)
+static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
{
struct n_tty_data *ldata = tty->disc_data;
size_t size = N_TTY_BUF_SIZE - tail;
void *from = read_buf_addr(ldata, tail);
- int uncopied;
if (n > size) {
tty_audit_add_data(tty, from, size);
- uncopied = copy_to_user(to, from, size);
- zero_buffer(tty, from, size - uncopied);
- if (uncopied)
- return uncopied;
+ memcpy(to, from, size);
+ zero_buffer(tty, from, size);
to += size;
n -= size;
from = ldata->read_buf;
}
tty_audit_add_data(tty, from, n);
- uncopied = copy_to_user(to, from, n);
- zero_buffer(tty, from, n - uncopied);
- return uncopied;
+ memcpy(to, from, n);
+ zero_buffer(tty, from, n);
}
/**
@@ -1894,8 +1889,10 @@ static void n_tty_close(struct tty_struct *tty)
if (tty->link)
n_tty_packet_mode_flush(tty);
+ down_write(&tty->termios_rwsem);
vfree(ldata);
tty->disc_data = NULL;
+ up_write(&tty->termios_rwsem);
}
/**
@@ -1944,42 +1941,38 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
/**
* copy_from_read_buf - copy read data directly
* @tty: terminal device
- * @b: user data
+ * @kbp: data
* @nr: size of data
*
* Helper function to speed up n_tty_read. It is only called when
- * ICANON is off; it copies characters straight from the tty queue to
- * user space directly. It can be profitably called twice; once to
- * drain the space from the tail pointer to the (physical) end of the
- * buffer, and once to drain the space from the (physical) beginning of
- * the buffer to head pointer.
+ * ICANON is off; it copies characters straight from the tty queue.
*
* Called under the ldata->atomic_read_lock sem
*
+ * Returns true if it successfully copied data, but there is still
+ * more data to be had.
+ *
* n_tty_read()/consumer path:
* caller holds non-exclusive termios_rwsem
* read_tail published
*/
-static int copy_from_read_buf(struct tty_struct *tty,
- unsigned char __user **b,
+static bool copy_from_read_buf(struct tty_struct *tty,
+ unsigned char **kbp,
size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
- int retval;
size_t n;
bool is_eof;
size_t head = smp_load_acquire(&ldata->commit_head);
size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
- retval = 0;
n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
n = min(*nr, n);
if (n) {
unsigned char *from = read_buf_addr(ldata, tail);
- retval = copy_to_user(*b, from, n);
- n -= retval;
+ memcpy(*kbp, from, n);
is_eof = n == 1 && *from == EOF_CHAR(tty);
tty_audit_add_data(tty, from, n);
zero_buffer(tty, from, n);
@@ -1987,22 +1980,25 @@ static int copy_from_read_buf(struct tty_struct *tty,
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
(head == ldata->read_tail))
- n = 0;
- *b += n;
+ return false;
+ *kbp += n;
*nr -= n;
+
+ /* If we have more to copy, let the caller know */
+ return head != ldata->read_tail;
}
- return retval;
+ return false;
}
/**
* canon_copy_from_read_buf - copy read data in canonical mode
* @tty: terminal device
- * @b: user data
+ * @kbp: data
* @nr: size of data
*
* Helper function for n_tty_read. It is only called when ICANON is on;
* it copies one line of input up to and including the line-delimiting
- * character into the user-space buffer.
+ * character into the result buffer.
*
* NB: When termios is changed from non-canonical to canonical mode and
* the read buffer contains data, n_tty_set_termios() simulates an EOF
@@ -2017,21 +2013,22 @@ static int copy_from_read_buf(struct tty_struct *tty,
* read_tail published
*/
-static int canon_copy_from_read_buf(struct tty_struct *tty,
- unsigned char __user **b,
- size_t *nr)
+static bool canon_copy_from_read_buf(struct tty_struct *tty,
+ unsigned char **kbp,
+ size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n, size, more, c;
size_t eol;
- size_t tail;
- int ret, found = 0;
+ size_t tail, canon_head;
+ int found = 0;
/* N.B. avoid overrun if nr == 0 */
if (!*nr)
- return 0;
+ return false;
- n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
+ canon_head = smp_load_acquire(&ldata->canon_head);
+ n = min(*nr + 1, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -2061,10 +2058,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
- ret = tty_copy_to_user(tty, *b, tail, n);
- if (ret)
- return -EFAULT;
- *b += n;
+ tty_copy(tty, *kbp, tail, n);
+ *kbp += n;
*nr -= n;
if (found)
@@ -2077,12 +2072,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
else
ldata->push = 0;
tty_audit_push();
+ return false;
}
- return 0;
-}
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+ /* No EOL found - do a continuation retry if there is more data */
+ return ldata->read_tail != canon_head;
+}
/**
* job_control - check job control
@@ -2105,7 +2100,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
- if (file->f_op->write == redirected_tty_write)
+ if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
@@ -2132,10 +2127,11 @@ static int job_control(struct tty_struct *tty, struct file *file)
*/
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr)
+ unsigned char *kbuf, size_t nr,
+ void **cookie, unsigned long offset)
{
struct n_tty_data *ldata = tty->disc_data;
- unsigned char __user *b = buf;
+ unsigned char *kb = kbuf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int c;
int minimum, time;
@@ -2144,6 +2140,30 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
int packet;
size_t tail;
+ /*
+ * Is this a continuation of a read started earler?
+ *
+ * If so, we still hold the atomic_read_lock and the
+ * termios_rwsem, and can just continue to copy data.
+ */
+ if (*cookie) {
+ if (ldata->icanon && !L_EXTPROC(tty)) {
+ if (canon_copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ } else {
+ if (copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ }
+
+ /* No more data - release locks and stop retries */
+ n_tty_kick_worker(tty);
+ n_tty_check_unthrottle(tty);
+ up_read(&tty->termios_rwsem);
+ mutex_unlock(&ldata->atomic_read_lock);
+ *cookie = NULL;
+ return kb - kbuf;
+ }
+
c = job_control(tty, file);
if (c < 0)
return c;
@@ -2181,17 +2201,13 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
/* First test for status change. */
if (packet && tty->link->ctrl_status) {
unsigned char cs;
- if (b != buf)
+ if (kb != kbuf)
break;
spin_lock_irq(&tty->link->ctrl_lock);
cs = tty->link->ctrl_status;
tty->link->ctrl_status = 0;
spin_unlock_irq(&tty->link->ctrl_lock);
- if (put_user(cs, b)) {
- retval = -EFAULT;
- break;
- }
- b++;
+ *kb++ = cs;
nr--;
break;
}
@@ -2234,33 +2250,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (ldata->icanon && !L_EXTPROC(tty)) {
- retval = canon_copy_from_read_buf(tty, &b, &nr);
- if (retval)
- break;
+ if (canon_copy_from_read_buf(tty, &kb, &nr))
+ goto more_to_be_read;
} else {
- int uncopied;
-
/* Deal with packet mode. */
- if (packet && b == buf) {
- if (put_user(TIOCPKT_DATA, b)) {
- retval = -EFAULT;
- break;
- }
- b++;
+ if (packet && kb == kbuf) {
+ *kb++ = TIOCPKT_DATA;
nr--;
}
- uncopied = copy_from_read_buf(tty, &b, &nr);
- uncopied += copy_from_read_buf(tty, &b, &nr);
- if (uncopied) {
- retval = -EFAULT;
- break;
+ /*
+ * Copy data, and if there is more to be had
+ * and we have nothing more to wait for, then
+ * let's mark us for retries.
+ *
+ * NOTE! We return here with both the termios_sem
+ * and atomic_read_lock still held, the retries
+ * will release them when done.
+ */
+ if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) {
+more_to_be_read:
+ remove_wait_queue(&tty->read_wait, &wait);
+ *cookie = cookie;
+ return kb - kbuf;
}
}
n_tty_check_unthrottle(tty);
- if (b - buf >= minimum)
+ if (kb - kbuf >= minimum)
break;
if (time)
timeout = time;
@@ -2272,8 +2290,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
remove_wait_queue(&tty->read_wait, &wait);
mutex_unlock(&ldata->atomic_read_lock);
- if (b - buf)
- retval = b - buf;
+ if (kb - kbuf)
+ retval = kb - kbuf;
return retval;
}
@@ -2309,7 +2327,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index a59f1e062bc6..8b2797b6ee44 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -45,7 +45,6 @@ static DEFINE_MUTEX(devpts_mutex);
static void pty_close(struct tty_struct *tty, struct file *filp)
{
- BUG_ON(!tty);
if (tty->driver->subtype == PTY_TYPE_MASTER)
WARN_ON(tty->count > 1);
else {
@@ -67,7 +66,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&tty->link->read_wait);
wake_up_interruptible(&tty->link->write_wait);
if (tty->driver->subtype == PTY_TYPE_MASTER) {
- set_bit(TTY_OTHER_CLOSED, &tty->flags);
+ struct file *f;
+
#ifdef CONFIG_UNIX98_PTYS
if (tty->driver == ptm_driver) {
mutex_lock(&devpts_mutex);
@@ -76,7 +76,17 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&devpts_mutex);
}
#endif
- tty_vhangup(tty->link);
+
+ /*
+ * This hack is required because a program can open a
+ * pty and redirect a console to it, but if the pty is
+ * closed and the console is not released, then the
+ * slave side will never close. So release the
+ * redirect when the master closes.
+ */
+ f = tty_release_redirect(tty->link);
+ if (f)
+ fput(f);
}
}
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
index c0ffad1572c6..e13ae18b0713 100644
--- a/drivers/tty/serial/8250/8250_tegra.c
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -26,16 +26,17 @@ static void tegra_uart_handle_break(struct uart_port *p)
{
unsigned int status, tmout = 10000;
- do {
+ while (1) {
status = p->serial_in(p, UART_LSR);
- if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
- status = p->serial_in(p, UART_RX);
- else
+ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)))
break;
+
+ p->serial_in(p, UART_RX);
+
if (--tmout == 0)
break;
udelay(1);
- } while (1);
+ }
}
static int tegra_uart_probe(struct platform_device *pdev)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 34a2899e69c0..0c4cd4a348f4 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -276,28 +276,6 @@ config SERIAL_SAMSUNG_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
-config SERIAL_SIRFSOC
- tristate "SiRF SoC Platform Serial port support"
- depends on ARCH_SIRF
- select SERIAL_CORE
- help
- Support for the on-chip UART on the CSR SiRFprimaII series,
- providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
- provide all of these ports, depending on how the serial port
- pins are configured).
-
-config SERIAL_SIRFSOC_CONSOLE
- bool "Support for console on SiRF SoC serial port"
- depends on SERIAL_SIRFSOC=y
- select SERIAL_CORE_CONSOLE
- help
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttySiRFx". (Try "man bootparam" or see the documentation of
- your boot loader about how to pass options to the kernel at
- boot time.)
-
config SERIAL_TEGRA
tristate "NVIDIA Tegra20/30 SoC serial controller"
depends on ARCH_TEGRA && TEGRA20_APB_DMA
@@ -1204,13 +1182,6 @@ config SERIAL_ALTERA_UART_CONSOLE
help
Enable a Altera UART port to be the system console.
-config SERIAL_IFX6X60
- tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
- depends on GPIOLIB || COMPILE_TEST
- depends on SPI && HAS_DMA
- help
- Support for the IFX6x60 modem devices on Intel MID platforms.
-
config SERIAL_PCH_UART
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
@@ -1295,14 +1266,6 @@ config SERIAL_AR933X_NR_UARTS
Set this to the number of serial ports you want the driver
to support.
-config SERIAL_EFM32_UART
- tristate "EFM32 UART/USART port"
- depends on ARM && (ARCH_EFM32 || COMPILE_TEST)
- select SERIAL_CORE
- help
- This driver support the USART and UART ports on
- Energy Micro's efm32 SoCs.
-
config SERIAL_MPS2_UART_CONSOLE
bool "MPS2 UART console support"
depends on SERIAL_MPS2_UART
@@ -1316,11 +1279,6 @@ config SERIAL_MPS2_UART
help
This driver support the UART ports on ARM MPS2.
-config SERIAL_EFM32_UART_CONSOLE
- bool "EFM32 UART/USART console support"
- depends on SERIAL_EFM32_UART=y
- select SERIAL_CORE_CONSOLE
-
config SERIAL_ARC
tristate "ARC UART driver support"
select SERIAL_CORE
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index b85d53f9e9ff..7da0856cd198 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -64,16 +64,13 @@ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
-obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
-obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
-obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
obj-$(CONFIG_SERIAL_RP2) += rp2.o
obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 3284f34e9dfe..3f96edfe569c 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -754,7 +754,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
return ret;
}
-static int pl010_remove(struct amba_device *dev)
+static void pl010_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
@@ -770,8 +770,6 @@ static int pl010_remove(struct amba_device *dev)
if (!busy)
uart_unregister_driver(&amba_reg);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c255476cce28..4ead0c9048a8 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2679,13 +2679,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return pl011_register_port(uap);
}
-static int pl011_remove(struct amba_device *dev)
+static void pl011_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 4df47d02b34b..58aaa533203b 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -499,8 +499,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
pr_debug("CPM uart[%d]:set_termios\n", port->line);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
- if (baud < HW_BUF_SPD_THRESHOLD ||
- (pinfo->port.state && pinfo->port.state->port.low_latency))
+ if (baud < HW_BUF_SPD_THRESHOLD || port->flags & UPF_LOW_LATENCY)
pinfo->rx_fifosize = 1;
else
pinfo->rx_fifosize = RX_BUF_SIZE;
@@ -1107,6 +1106,32 @@ static void cpm_put_poll_char(struct uart_port *port,
ch[0] = (char)c;
cpm_uart_early_write(pinfo, ch, 1, false);
}
+
+static struct uart_port *udbg_port;
+
+static void udbg_cpm_putc(char c)
+{
+ if (c == '\n')
+ cpm_put_poll_char(udbg_port, '\r');
+ cpm_put_poll_char(udbg_port, c);
+}
+
+static int udbg_cpm_getc_poll(void)
+{
+ int c = cpm_get_poll_char(udbg_port);
+
+ return c == NO_POLL_CHAR ? -1 : c;
+}
+
+static int udbg_cpm_getc(void)
+{
+ int c;
+
+ while ((c = udbg_cpm_getc_poll()) == -1)
+ cpu_relax();
+ return c;
+}
+
#endif /* CONFIG_CONSOLE_POLL */
static const struct uart_ops cpm_uart_pops = {
@@ -1237,7 +1262,10 @@ static int cpm_uart_init_port(struct device_node *np,
}
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
- udbg_putc = NULL;
+#ifdef CONFIG_CONSOLE_POLL
+ if (!udbg_port)
+#endif
+ udbg_putc = NULL;
#endif
return cpm_uart_request_port(&pinfo->port);
@@ -1358,6 +1386,15 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
uart_set_options(port, co, baud, parity, bits, flow);
cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX);
+#ifdef CONFIG_CONSOLE_POLL
+ if (!udbg_port) {
+ udbg_port = &pinfo->port;
+ udbg_putc = udbg_cpm_putc;
+ udbg_getc = udbg_cpm_getc;
+ udbg_getc_poll = udbg_cpm_getc_poll;
+ }
+#endif
+
return 0;
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
deleted file mode 100644
index f12f29cf4f31..000000000000
--- a/drivers/tty/serial/efm32-uart.c
+++ /dev/null
@@ -1,852 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/serial_core.h>
-#include <linux/tty_flip.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <linux/platform_data/efm32-uart.h>
-
-#define DRIVER_NAME "efm32-uart"
-#define DEV_NAME "ttyefm"
-
-#define UARTn_CTRL 0x00
-#define UARTn_CTRL_SYNC 0x0001
-#define UARTn_CTRL_TXBIL 0x1000
-
-#define UARTn_FRAME 0x04
-#define UARTn_FRAME_DATABITS__MASK 0x000f
-#define UARTn_FRAME_DATABITS(n) ((n) - 3)
-#define UARTn_FRAME_PARITY__MASK 0x0300
-#define UARTn_FRAME_PARITY_NONE 0x0000
-#define UARTn_FRAME_PARITY_EVEN 0x0200
-#define UARTn_FRAME_PARITY_ODD 0x0300
-#define UARTn_FRAME_STOPBITS_HALF 0x0000
-#define UARTn_FRAME_STOPBITS_ONE 0x1000
-#define UARTn_FRAME_STOPBITS_TWO 0x3000
-
-#define UARTn_CMD 0x0c
-#define UARTn_CMD_RXEN 0x0001
-#define UARTn_CMD_RXDIS 0x0002
-#define UARTn_CMD_TXEN 0x0004
-#define UARTn_CMD_TXDIS 0x0008
-
-#define UARTn_STATUS 0x10
-#define UARTn_STATUS_TXENS 0x0002
-#define UARTn_STATUS_TXC 0x0020
-#define UARTn_STATUS_TXBL 0x0040
-#define UARTn_STATUS_RXDATAV 0x0080
-
-#define UARTn_CLKDIV 0x14
-
-#define UARTn_RXDATAX 0x18
-#define UARTn_RXDATAX_RXDATA__MASK 0x01ff
-#define UARTn_RXDATAX_PERR 0x4000
-#define UARTn_RXDATAX_FERR 0x8000
-/*
- * This is a software only flag used for ignore_status_mask and
- * read_status_mask! It's used for breaks that the hardware doesn't report
- * explicitly.
- */
-#define SW_UARTn_RXDATAX_BERR 0x2000
-
-#define UARTn_TXDATA 0x34
-
-#define UARTn_IF 0x40
-#define UARTn_IF_TXC 0x0001
-#define UARTn_IF_TXBL 0x0002
-#define UARTn_IF_RXDATAV 0x0004
-#define UARTn_IF_RXOF 0x0010
-
-#define UARTn_IFS 0x44
-#define UARTn_IFC 0x48
-#define UARTn_IEN 0x4c
-
-#define UARTn_ROUTE 0x54
-#define UARTn_ROUTE_LOCATION__MASK 0x0700
-#define UARTn_ROUTE_LOCATION(n) (((n) << 8) & UARTn_ROUTE_LOCATION__MASK)
-#define UARTn_ROUTE_RXPEN 0x0001
-#define UARTn_ROUTE_TXPEN 0x0002
-
-struct efm32_uart_port {
- struct uart_port port;
- unsigned int txirq;
- struct clk *clk;
- struct efm32_uart_pdata pdata;
-};
-#define to_efm_port(_port) container_of(_port, struct efm32_uart_port, port)
-#define efm_debug(efm_port, format, arg...) \
- dev_dbg(efm_port->port.dev, format, ##arg)
-
-static void efm32_uart_write32(struct efm32_uart_port *efm_port,
- u32 value, unsigned offset)
-{
- writel_relaxed(value, efm_port->port.membase + offset);
-}
-
-static u32 efm32_uart_read32(struct efm32_uart_port *efm_port,
- unsigned offset)
-{
- return readl_relaxed(efm_port->port.membase + offset);
-}
-
-static unsigned int efm32_uart_tx_empty(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- u32 status = efm32_uart_read32(efm_port, UARTn_STATUS);
-
- if (status & UARTn_STATUS_TXC)
- return TIOCSER_TEMT;
- else
- return 0;
-}
-
-static void efm32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- /* sorry, neither handshaking lines nor loop functionallity */
-}
-
-static unsigned int efm32_uart_get_mctrl(struct uart_port *port)
-{
- /* sorry, no handshaking lines available */
- return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR;
-}
-
-static void efm32_uart_stop_tx(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- u32 ien = efm32_uart_read32(efm_port, UARTn_IEN);
-
- efm32_uart_write32(efm_port, UARTn_CMD_TXDIS, UARTn_CMD);
- ien &= ~(UARTn_IF_TXC | UARTn_IF_TXBL);
- efm32_uart_write32(efm_port, ien, UARTn_IEN);
-}
-
-static void efm32_uart_tx_chars(struct efm32_uart_port *efm_port)
-{
- struct uart_port *port = &efm_port->port;
- struct circ_buf *xmit = &port->state->xmit;
-
- while (efm32_uart_read32(efm_port, UARTn_STATUS) &
- UARTn_STATUS_TXBL) {
- if (port->x_char) {
- port->icount.tx++;
- efm32_uart_write32(efm_port, port->x_char,
- UARTn_TXDATA);
- port->x_char = 0;
- continue;
- }
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
- port->icount.tx++;
- efm32_uart_write32(efm_port, xmit->buf[xmit->tail],
- UARTn_TXDATA);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- } else
- break;
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (!port->x_char && uart_circ_empty(xmit) &&
- efm32_uart_read32(efm_port, UARTn_STATUS) &
- UARTn_STATUS_TXC)
- efm32_uart_stop_tx(port);
-}
-
-static void efm32_uart_start_tx(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- u32 ien;
-
- efm32_uart_write32(efm_port,
- UARTn_IF_TXBL | UARTn_IF_TXC, UARTn_IFC);
- ien = efm32_uart_read32(efm_port, UARTn_IEN);
- efm32_uart_write32(efm_port,
- ien | UARTn_IF_TXBL | UARTn_IF_TXC, UARTn_IEN);
- efm32_uart_write32(efm_port, UARTn_CMD_TXEN, UARTn_CMD);
-
- efm32_uart_tx_chars(efm_port);
-}
-
-static void efm32_uart_stop_rx(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
-
- efm32_uart_write32(efm_port, UARTn_CMD_RXDIS, UARTn_CMD);
-}
-
-static void efm32_uart_break_ctl(struct uart_port *port, int ctl)
-{
- /* not possible without fiddling with gpios */
-}
-
-static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port)
-{
- struct uart_port *port = &efm_port->port;
-
- while (efm32_uart_read32(efm_port, UARTn_STATUS) &
- UARTn_STATUS_RXDATAV) {
- u32 rxdata = efm32_uart_read32(efm_port, UARTn_RXDATAX);
- int flag = 0;
-
- /*
- * This is a reserved bit and I only saw it read as 0. But to be
- * sure not to be confused too much by new devices adhere to the
- * warning in the reference manual that reserved bits might
- * read as 1 in the future.
- */
- rxdata &= ~SW_UARTn_RXDATAX_BERR;
-
- port->icount.rx++;
-
- if ((rxdata & UARTn_RXDATAX_FERR) &&
- !(rxdata & UARTn_RXDATAX_RXDATA__MASK)) {
- rxdata |= SW_UARTn_RXDATAX_BERR;
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- } else if (rxdata & UARTn_RXDATAX_PERR)
- port->icount.parity++;
- else if (rxdata & UARTn_RXDATAX_FERR)
- port->icount.frame++;
-
- rxdata &= port->read_status_mask;
-
- if (rxdata & SW_UARTn_RXDATAX_BERR)
- flag = TTY_BREAK;
- else if (rxdata & UARTn_RXDATAX_PERR)
- flag = TTY_PARITY;
- else if (rxdata & UARTn_RXDATAX_FERR)
- flag = TTY_FRAME;
- else if (uart_handle_sysrq_char(port,
- rxdata & UARTn_RXDATAX_RXDATA__MASK))
- continue;
-
- if ((rxdata & port->ignore_status_mask) == 0)
- tty_insert_flip_char(&port->state->port,
- rxdata & UARTn_RXDATAX_RXDATA__MASK, flag);
- }
-}
-
-static irqreturn_t efm32_uart_rxirq(int irq, void *data)
-{
- struct efm32_uart_port *efm_port = data;
- u32 irqflag = efm32_uart_read32(efm_port, UARTn_IF);
- int handled = IRQ_NONE;
- struct uart_port *port = &efm_port->port;
- struct tty_port *tport = &port->state->port;
-
- spin_lock(&port->lock);
-
- if (irqflag & UARTn_IF_RXDATAV) {
- efm32_uart_write32(efm_port, UARTn_IF_RXDATAV, UARTn_IFC);
- efm32_uart_rx_chars(efm_port);
-
- handled = IRQ_HANDLED;
- }
-
- if (irqflag & UARTn_IF_RXOF) {
- efm32_uart_write32(efm_port, UARTn_IF_RXOF, UARTn_IFC);
- port->icount.overrun++;
- tty_insert_flip_char(tport, 0, TTY_OVERRUN);
-
- handled = IRQ_HANDLED;
- }
-
- spin_unlock(&port->lock);
-
- tty_flip_buffer_push(tport);
-
- return handled;
-}
-
-static irqreturn_t efm32_uart_txirq(int irq, void *data)
-{
- struct efm32_uart_port *efm_port = data;
- u32 irqflag = efm32_uart_read32(efm_port, UARTn_IF);
-
- /* TXBL doesn't need to be cleared */
- if (irqflag & UARTn_IF_TXC)
- efm32_uart_write32(efm_port, UARTn_IF_TXC, UARTn_IFC);
-
- if (irqflag & (UARTn_IF_TXC | UARTn_IF_TXBL)) {
- efm32_uart_tx_chars(efm_port);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-}
-
-static int efm32_uart_startup(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- int ret;
-
- ret = clk_enable(efm_port->clk);
- if (ret) {
- efm_debug(efm_port, "failed to enable clk\n");
- goto err_clk_enable;
- }
- port->uartclk = clk_get_rate(efm_port->clk);
-
- /* Enable pins at configured location */
- efm32_uart_write32(efm_port,
- UARTn_ROUTE_LOCATION(efm_port->pdata.location) |
- UARTn_ROUTE_RXPEN | UARTn_ROUTE_TXPEN,
- UARTn_ROUTE);
-
- ret = request_irq(port->irq, efm32_uart_rxirq, 0,
- DRIVER_NAME, efm_port);
- if (ret) {
- efm_debug(efm_port, "failed to register rxirq\n");
- goto err_request_irq_rx;
- }
-
- /* disable all irqs */
- efm32_uart_write32(efm_port, 0, UARTn_IEN);
-
- ret = request_irq(efm_port->txirq, efm32_uart_txirq, 0,
- DRIVER_NAME, efm_port);
- if (ret) {
- efm_debug(efm_port, "failed to register txirq\n");
- free_irq(port->irq, efm_port);
-err_request_irq_rx:
-
- clk_disable(efm_port->clk);
- } else {
- efm32_uart_write32(efm_port,
- UARTn_IF_RXDATAV | UARTn_IF_RXOF, UARTn_IEN);
- efm32_uart_write32(efm_port, UARTn_CMD_RXEN, UARTn_CMD);
- }
-
-err_clk_enable:
- return ret;
-}
-
-static void efm32_uart_shutdown(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
-
- efm32_uart_write32(efm_port, 0, UARTn_IEN);
- free_irq(port->irq, efm_port);
-
- clk_disable(efm_port->clk);
-}
-
-static void efm32_uart_set_termios(struct uart_port *port,
- struct ktermios *new, struct ktermios *old)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- unsigned long flags;
- unsigned baud;
- u32 clkdiv;
- u32 frame = 0;
-
- /* no modem control lines */
- new->c_cflag &= ~(CRTSCTS | CMSPAR);
-
- baud = uart_get_baud_rate(port, new, old,
- DIV_ROUND_CLOSEST(port->uartclk, 16 * 8192),
- DIV_ROUND_CLOSEST(port->uartclk, 16));
-
- switch (new->c_cflag & CSIZE) {
- case CS5:
- frame |= UARTn_FRAME_DATABITS(5);
- break;
- case CS6:
- frame |= UARTn_FRAME_DATABITS(6);
- break;
- case CS7:
- frame |= UARTn_FRAME_DATABITS(7);
- break;
- case CS8:
- frame |= UARTn_FRAME_DATABITS(8);
- break;
- }
-
- if (new->c_cflag & CSTOPB)
- /* the receiver only verifies the first stop bit */
- frame |= UARTn_FRAME_STOPBITS_TWO;
- else
- frame |= UARTn_FRAME_STOPBITS_ONE;
-
- if (new->c_cflag & PARENB) {
- if (new->c_cflag & PARODD)
- frame |= UARTn_FRAME_PARITY_ODD;
- else
- frame |= UARTn_FRAME_PARITY_EVEN;
- } else
- frame |= UARTn_FRAME_PARITY_NONE;
-
- /*
- * the 6 lowest bits of CLKDIV are dc, bit 6 has value 0.25.
- * port->uartclk <= 14e6, so 4 * port->uartclk doesn't overflow.
- */
- clkdiv = (DIV_ROUND_CLOSEST(4 * port->uartclk, 16 * baud) - 4) << 6;
-
- spin_lock_irqsave(&port->lock, flags);
-
- efm32_uart_write32(efm_port,
- UARTn_CMD_TXDIS | UARTn_CMD_RXDIS, UARTn_CMD);
-
- port->read_status_mask = UARTn_RXDATAX_RXDATA__MASK;
- if (new->c_iflag & INPCK)
- port->read_status_mask |=
- UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
- if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
-
- port->ignore_status_mask = 0;
- if (new->c_iflag & IGNPAR)
- port->ignore_status_mask |=
- UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
- if (new->c_iflag & IGNBRK)
- port->ignore_status_mask |= SW_UARTn_RXDATAX_BERR;
-
- uart_update_timeout(port, new->c_cflag, baud);
-
- efm32_uart_write32(efm_port, UARTn_CTRL_TXBIL, UARTn_CTRL);
- efm32_uart_write32(efm_port, frame, UARTn_FRAME);
- efm32_uart_write32(efm_port, clkdiv, UARTn_CLKDIV);
-
- efm32_uart_write32(efm_port, UARTn_CMD_TXEN | UARTn_CMD_RXEN,
- UARTn_CMD);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *efm32_uart_type(struct uart_port *port)
-{
- return port->type == PORT_EFMUART ? "efm32-uart" : NULL;
-}
-
-static void efm32_uart_release_port(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
-
- clk_unprepare(efm_port->clk);
- clk_put(efm_port->clk);
- iounmap(port->membase);
-}
-
-static int efm32_uart_request_port(struct uart_port *port)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- int ret;
-
- port->membase = ioremap(port->mapbase, 60);
- if (!efm_port->port.membase) {
- ret = -ENOMEM;
- efm_debug(efm_port, "failed to remap\n");
- goto err_ioremap;
- }
-
- efm_port->clk = clk_get(port->dev, NULL);
- if (IS_ERR(efm_port->clk)) {
- ret = PTR_ERR(efm_port->clk);
- efm_debug(efm_port, "failed to get clock\n");
- goto err_clk_get;
- }
-
- ret = clk_prepare(efm_port->clk);
- if (ret) {
- clk_put(efm_port->clk);
-err_clk_get:
-
- iounmap(port->membase);
-err_ioremap:
- return ret;
- }
- return 0;
-}
-
-static void efm32_uart_config_port(struct uart_port *port, int type)
-{
- if (type & UART_CONFIG_TYPE &&
- !efm32_uart_request_port(port))
- port->type = PORT_EFMUART;
-}
-
-static int efm32_uart_verify_port(struct uart_port *port,
- struct serial_struct *serinfo)
-{
- int ret = 0;
-
- if (serinfo->type != PORT_UNKNOWN && serinfo->type != PORT_EFMUART)
- ret = -EINVAL;
-
- return ret;
-}
-
-static const struct uart_ops efm32_uart_pops = {
- .tx_empty = efm32_uart_tx_empty,
- .set_mctrl = efm32_uart_set_mctrl,
- .get_mctrl = efm32_uart_get_mctrl,
- .stop_tx = efm32_uart_stop_tx,
- .start_tx = efm32_uart_start_tx,
- .stop_rx = efm32_uart_stop_rx,
- .break_ctl = efm32_uart_break_ctl,
- .startup = efm32_uart_startup,
- .shutdown = efm32_uart_shutdown,
- .set_termios = efm32_uart_set_termios,
- .type = efm32_uart_type,
- .release_port = efm32_uart_release_port,
- .request_port = efm32_uart_request_port,
- .config_port = efm32_uart_config_port,
- .verify_port = efm32_uart_verify_port,
-};
-
-static struct efm32_uart_port *efm32_uart_ports[5];
-
-#ifdef CONFIG_SERIAL_EFM32_UART_CONSOLE
-static void efm32_uart_console_putchar(struct uart_port *port, int ch)
-{
- struct efm32_uart_port *efm_port = to_efm_port(port);
- unsigned int timeout = 0x400;
- u32 status;
-
- while (1) {
- status = efm32_uart_read32(efm_port, UARTn_STATUS);
-
- if (status & UARTn_STATUS_TXBL)
- break;
- if (!timeout--)
- return;
- }
- efm32_uart_write32(efm_port, ch, UARTn_TXDATA);
-}
-
-static void efm32_uart_console_write(struct console *co, const char *s,
- unsigned int count)
-{
- struct efm32_uart_port *efm_port = efm32_uart_ports[co->index];
- u32 status = efm32_uart_read32(efm_port, UARTn_STATUS);
- unsigned int timeout = 0x400;
-
- if (!(status & UARTn_STATUS_TXENS))
- efm32_uart_write32(efm_port, UARTn_CMD_TXEN, UARTn_CMD);
-
- uart_console_write(&efm_port->port, s, count,
- efm32_uart_console_putchar);
-
- /* Wait for the transmitter to become empty */
- while (1) {
- u32 status = efm32_uart_read32(efm_port, UARTn_STATUS);
- if (status & UARTn_STATUS_TXC)
- break;
- if (!timeout--)
- break;
- }
-
- if (!(status & UARTn_STATUS_TXENS))
- efm32_uart_write32(efm_port, UARTn_CMD_TXDIS, UARTn_CMD);
-}
-
-static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
- int *baud, int *parity, int *bits)
-{
- u32 ctrl = efm32_uart_read32(efm_port, UARTn_CTRL);
- u32 route, clkdiv, frame;
-
- if (ctrl & UARTn_CTRL_SYNC)
- /* not operating in async mode */
- return;
-
- route = efm32_uart_read32(efm_port, UARTn_ROUTE);
- if (!(route & UARTn_ROUTE_TXPEN))
- /* tx pin not routed */
- return;
-
- clkdiv = efm32_uart_read32(efm_port, UARTn_CLKDIV);
-
- *baud = DIV_ROUND_CLOSEST(4 * efm_port->port.uartclk,
- 16 * (4 + (clkdiv >> 6)));
-
- frame = efm32_uart_read32(efm_port, UARTn_FRAME);
- switch (frame & UARTn_FRAME_PARITY__MASK) {
- case UARTn_FRAME_PARITY_ODD:
- *parity = 'o';
- break;
- case UARTn_FRAME_PARITY_EVEN:
- *parity = 'e';
- break;
- default:
- *parity = 'n';
- }
-
- *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
- UARTn_FRAME_DATABITS(4) + 4;
-
- efm_debug(efm_port, "get_opts: options=%d%c%d\n",
- *baud, *parity, *bits);
-}
-
-static int efm32_uart_console_setup(struct console *co, char *options)
-{
- struct efm32_uart_port *efm_port;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
- int ret;
-
- if (co->index < 0 || co->index >= ARRAY_SIZE(efm32_uart_ports)) {
- unsigned i;
- for (i = 0; i < ARRAY_SIZE(efm32_uart_ports); ++i) {
- if (efm32_uart_ports[i]) {
- pr_warn("efm32-console: fall back to console index %u (from %hhi)\n",
- i, co->index);
- co->index = i;
- break;
- }
- }
- }
-
- efm_port = efm32_uart_ports[co->index];
- if (!efm_port) {
- pr_warn("efm32-console: No port at %d\n", co->index);
- return -ENODEV;
- }
-
- ret = clk_prepare(efm_port->clk);
- if (ret) {
- dev_warn(efm_port->port.dev,
- "console: clk_prepare failed: %d\n", ret);
- return ret;
- }
-
- efm_port->port.uartclk = clk_get_rate(efm_port->clk);
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- efm32_uart_console_get_options(efm_port,
- &baud, &parity, &bits);
-
- return uart_set_options(&efm_port->port, co, baud, parity, bits, flow);
-}
-
-static struct uart_driver efm32_uart_reg;
-
-static struct console efm32_uart_console = {
- .name = DEV_NAME,
- .write = efm32_uart_console_write,
- .device = uart_console_device,
- .setup = efm32_uart_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &efm32_uart_reg,
-};
-
-#else
-#define efm32_uart_console (*(struct console *)NULL)
-#endif /* ifdef CONFIG_SERIAL_EFM32_UART_CONSOLE / else */
-
-static struct uart_driver efm32_uart_reg = {
- .owner = THIS_MODULE,
- .driver_name = DRIVER_NAME,
- .dev_name = DEV_NAME,
- .nr = ARRAY_SIZE(efm32_uart_ports),
- .cons = &efm32_uart_console,
-};
-
-static int efm32_uart_probe_dt(struct platform_device *pdev,
- struct efm32_uart_port *efm_port)
-{
- struct device_node *np = pdev->dev.of_node;
- u32 location;
- int ret;
-
- if (!np)
- return 1;
-
- ret = of_property_read_u32(np, "energymicro,location", &location);
-
- if (ret)
- /* fall back to wrongly namespaced property */
- ret = of_property_read_u32(np, "efm32,location", &location);
-
- if (ret)
- /* fall back to old and (wrongly) generic property "location" */
- ret = of_property_read_u32(np, "location", &location);
-
- if (!ret) {
- if (location > 5) {
- dev_err(&pdev->dev, "invalid location\n");
- return -EINVAL;
- }
- efm_debug(efm_port, "using location %u\n", location);
- efm_port->pdata.location = location;
- } else {
- efm_debug(efm_port, "fall back to location 0\n");
- }
-
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
- return ret;
- } else {
- efm_port->port.line = ret;
- return 0;
- }
-
-}
-
-static int efm32_uart_probe(struct platform_device *pdev)
-{
- struct efm32_uart_port *efm_port;
- struct resource *res;
- unsigned int line;
- int ret;
-
- efm_port = kzalloc(sizeof(*efm_port), GFP_KERNEL);
- if (!efm_port) {
- dev_dbg(&pdev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_dbg(&pdev->dev, "failed to determine base address\n");
- goto err_get_base;
- }
-
- if (resource_size(res) < 60) {
- ret = -EINVAL;
- dev_dbg(&pdev->dev, "memory resource too small\n");
- goto err_too_small;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- dev_dbg(&pdev->dev, "failed to get rx irq\n");
- goto err_get_rxirq;
- }
-
- efm_port->port.irq = ret;
-
- ret = platform_get_irq(pdev, 1);
- if (ret <= 0)
- ret = efm_port->port.irq + 1;
-
- efm_port->txirq = ret;
-
- efm_port->port.dev = &pdev->dev;
- efm_port->port.mapbase = res->start;
- efm_port->port.type = PORT_EFMUART;
- efm_port->port.iotype = UPIO_MEM32;
- efm_port->port.fifosize = 2;
- efm_port->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_EFM32_UART_CONSOLE);
- efm_port->port.ops = &efm32_uart_pops;
- efm_port->port.flags = UPF_BOOT_AUTOCONF;
-
- ret = efm32_uart_probe_dt(pdev, efm_port);
- if (ret > 0) {
- /* not created by device tree */
- const struct efm32_uart_pdata *pdata = dev_get_platdata(&pdev->dev);
-
- efm_port->port.line = pdev->id;
-
- if (pdata)
- efm_port->pdata = *pdata;
- } else if (ret < 0)
- goto err_probe_dt;
-
- line = efm_port->port.line;
-
- if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[line] = efm_port;
-
- ret = uart_add_one_port(&efm32_uart_reg, &efm_port->port);
- if (ret) {
- dev_dbg(&pdev->dev, "failed to add port: %d\n", ret);
-
- if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[line] = NULL;
-err_probe_dt:
-err_get_rxirq:
-err_too_small:
-err_get_base:
- kfree(efm_port);
- } else {
- platform_set_drvdata(pdev, efm_port);
- dev_dbg(&pdev->dev, "\\o/\n");
- }
-
- return ret;
-}
-
-static int efm32_uart_remove(struct platform_device *pdev)
-{
- struct efm32_uart_port *efm_port = platform_get_drvdata(pdev);
- unsigned int line = efm_port->port.line;
-
- uart_remove_one_port(&efm32_uart_reg, &efm_port->port);
-
- if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[line] = NULL;
-
- kfree(efm_port);
-
- return 0;
-}
-
-static const struct of_device_id efm32_uart_dt_ids[] = {
- {
- .compatible = "energymicro,efm32-uart",
- }, {
- /* doesn't follow the "vendor,device" scheme, don't use */
- .compatible = "efm32,uart",
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, efm32_uart_dt_ids);
-
-static struct platform_driver efm32_uart_driver = {
- .probe = efm32_uart_probe,
- .remove = efm32_uart_remove,
-
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = efm32_uart_dt_ids,
- },
-};
-
-static int __init efm32_uart_init(void)
-{
- int ret;
-
- ret = uart_register_driver(&efm32_uart_reg);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&efm32_uart_driver);
- if (ret)
- uart_unregister_driver(&efm32_uart_reg);
-
- pr_info("EFM32 UART/USART driver\n");
-
- return ret;
-}
-module_init(efm32_uart_init);
-
-static void __exit efm32_uart_exit(void)
-{
- platform_driver_unregister(&efm32_uart_driver);
- uart_unregister_driver(&efm32_uart_reg);
-}
-module_exit(efm32_uart_exit);
-
-MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
-MODULE_DESCRIPTION("EFM32 UART/USART driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index bd047e1f9bea..794035041744 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2580,9 +2580,7 @@ static struct uart_driver lpuart_reg = {
static int lpuart_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id = of_match_device(lpuart_dt_ids,
- &pdev->dev);
- const struct lpuart_soc_data *sdata = of_id->data;
+ const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct lpuart_port *sport;
struct resource *res;
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 94c8281ddb5f..9a872750581c 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -118,7 +118,7 @@ MODULE_DEVICE_TABLE(pci, icom_pci_table);
static LIST_HEAD(icom_adapter_head);
/* spinlock for adapter initialization and changing adapter operations */
-static spinlock_t icom_lock;
+static DEFINE_SPINLOCK(icom_lock);
#ifdef ICOM_TRACE
static inline void trace(struct icom_port *icom_port, char *trace_pt,
@@ -1616,8 +1616,6 @@ static int __init icom_init(void)
{
int ret;
- spin_lock_init(&icom_lock);
-
ret = uart_register_driver(&icom_uart_driver);
if (ret)
return ret;
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
deleted file mode 100644
index 182e0ccd60b2..000000000000
--- a/drivers/tty/serial/ifx6x60.c
+++ /dev/null
@@ -1,1390 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/****************************************************************************
- *
- * Driver for the IFX 6x60 spi modem.
- *
- * Copyright (C) 2008 Option International
- * Copyright (C) 2008 Filip Aben <f.aben@option.com>
- * Denis Joseph Barrow <d.barow@option.com>
- * Jan Dumon <j.dumon@option.com>
- *
- * Copyright (C) 2009, 2010 Intel Corp
- * Russ Gorby <russ.gorby@intel.com>
- *
- * Driver modified by Intel from Option gtm501l_spi.c
- *
- * Notes
- * o The driver currently assumes a single device only. If you need to
- * change this then look for saved_ifx_dev and add a device lookup
- * o The driver is intended to be big-endian safe but has never been
- * tested that way (no suitable hardware). There are a couple of FIXME
- * notes by areas that may need addressing
- * o Some of the GPIO naming/setup assumptions may need revisiting if
- * you need to use this driver for another platform.
- *
- *****************************************************************************/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/termios.h>
-#include <linux/tty.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/kfifo.h>
-#include <linux/tty_flip.h>
-#include <linux/timer.h>
-#include <linux/serial.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/rfkill.h>
-#include <linux/fs.h>
-#include <linux/ip.h>
-#include <linux/dmapool.h>
-#include <linux/gpio/consumer.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/spi/ifx_modem.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-
-#include "ifx6x60.h"
-
-#define IFX_SPI_MORE_MASK 0x10
-#define IFX_SPI_MORE_BIT 4 /* bit position in u8 */
-#define IFX_SPI_CTS_BIT 6 /* bit position in u8 */
-#define IFX_SPI_MODE SPI_MODE_1
-#define IFX_SPI_TTY_ID 0
-#define IFX_SPI_TIMEOUT_SEC 2
-#define IFX_SPI_HEADER_0 (-1)
-#define IFX_SPI_HEADER_F (-2)
-
-#define PO_POST_DELAY 200
-
-/* forward reference */
-static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
-static int ifx_modem_reboot_callback(struct notifier_block *nfb,
- unsigned long event, void *data);
-static int ifx_modem_power_off(struct ifx_spi_device *ifx_dev);
-
-/* local variables */
-static int spi_bpw = 16; /* 8, 16 or 32 bit word length */
-static struct tty_driver *tty_drv;
-static struct ifx_spi_device *saved_ifx_dev;
-static struct lock_class_key ifx_spi_key;
-
-static struct notifier_block ifx_modem_reboot_notifier_block = {
- .notifier_call = ifx_modem_reboot_callback,
-};
-
-static int ifx_modem_power_off(struct ifx_spi_device *ifx_dev)
-{
- gpiod_set_value(ifx_dev->gpio.pmu_reset, 1);
- msleep(PO_POST_DELAY);
-
- return 0;
-}
-
-static int ifx_modem_reboot_callback(struct notifier_block *nfb,
- unsigned long event, void *data)
-{
- if (saved_ifx_dev)
- ifx_modem_power_off(saved_ifx_dev);
- else
- pr_warn("no ifx modem active;\n");
-
- return NOTIFY_OK;
-}
-
-/* GPIO/GPE settings */
-
-/**
- * mrdy_set_high - set MRDY GPIO
- * @ifx: device we are controlling
- *
- */
-static inline void mrdy_set_high(struct ifx_spi_device *ifx)
-{
- gpiod_set_value(ifx->gpio.mrdy, 1);
-}
-
-/**
- * mrdy_set_low - clear MRDY GPIO
- * @ifx: device we are controlling
- *
- */
-static inline void mrdy_set_low(struct ifx_spi_device *ifx)
-{
- gpiod_set_value(ifx->gpio.mrdy, 0);
-}
-
-/**
- * ifx_spi_power_state_set
- * @ifx_dev: our SPI device
- * @val: bits to set
- *
- * Set bit in power status and signal power system if status becomes non-0
- */
-static void
-ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ifx_dev->power_lock, flags);
-
- /*
- * if power status is already non-0, just update, else
- * tell power system
- */
- if (!ifx_dev->power_status)
- pm_runtime_get(&ifx_dev->spi_dev->dev);
- ifx_dev->power_status |= val;
-
- spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
-}
-
-/**
- * ifx_spi_power_state_clear - clear power bit
- * @ifx_dev: our SPI device
- * @val: bits to clear
- *
- * clear bit in power status and signal power system if status becomes 0
- */
-static void
-ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ifx_dev->power_lock, flags);
-
- if (ifx_dev->power_status) {
- ifx_dev->power_status &= ~val;
- if (!ifx_dev->power_status)
- pm_runtime_put(&ifx_dev->spi_dev->dev);
- }
-
- spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
-}
-
-/**
- * swap_buf_8
- * @buf: our buffer
- * @len : number of bytes (not words) in the buffer
- * @end: end of buffer
- *
- * Swap the contents of a buffer into big endian format
- */
-static inline void swap_buf_8(unsigned char *buf, int len, void *end)
-{
- /* don't swap buffer if SPI word width is 8 bits */
- return;
-}
-
-/**
- * swap_buf_16
- * @buf: our buffer
- * @len : number of bytes (not words) in the buffer
- * @end: end of buffer
- *
- * Swap the contents of a buffer into big endian format
- */
-static inline void swap_buf_16(unsigned char *buf, int len, void *end)
-{
- int n;
-
- u16 *buf_16 = (u16 *)buf;
- len = ((len + 1) >> 1);
- if ((void *)&buf_16[len] > end) {
- pr_err("swap_buf_16: swap exceeds boundary (%p > %p)!",
- &buf_16[len], end);
- return;
- }
- for (n = 0; n < len; n++) {
- *buf_16 = cpu_to_be16(*buf_16);
- buf_16++;
- }
-}
-
-/**
- * swap_buf_32
- * @buf: our buffer
- * @len : number of bytes (not words) in the buffer
- * @end: end of buffer
- *
- * Swap the contents of a buffer into big endian format
- */
-static inline void swap_buf_32(unsigned char *buf, int len, void *end)
-{
- int n;
-
- u32 *buf_32 = (u32 *)buf;
- len = (len + 3) >> 2;
-
- if ((void *)&buf_32[len] > end) {
- pr_err("swap_buf_32: swap exceeds boundary (%p > %p)!\n",
- &buf_32[len], end);
- return;
- }
- for (n = 0; n < len; n++) {
- *buf_32 = cpu_to_be32(*buf_32);
- buf_32++;
- }
-}
-
-/**
- * mrdy_assert - assert MRDY line
- * @ifx_dev: our SPI device
- *
- * Assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low
- * now.
- *
- * FIXME: Can SRDY even go high as we are running this code ?
- */
-static void mrdy_assert(struct ifx_spi_device *ifx_dev)
-{
- int val = gpiod_get_value(ifx_dev->gpio.srdy);
- if (!val) {
- if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
- &ifx_dev->flags)) {
- mod_timer(&ifx_dev->spi_timer,jiffies + IFX_SPI_TIMEOUT_SEC*HZ);
-
- }
- }
- ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING);
- mrdy_set_high(ifx_dev);
-}
-
-/**
- * ifx_spi_timeout - SPI timeout
- * @t: timer in our SPI device
- *
- * The SPI has timed out: hang up the tty. Users will then see a hangup
- * and error events.
- */
-static void ifx_spi_timeout(struct timer_list *t)
-{
- struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
-
- dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
- tty_port_tty_hangup(&ifx_dev->tty_port, false);
- mrdy_set_low(ifx_dev);
- clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
-}
-
-/* char/tty operations */
-
-/**
- * ifx_spi_tiocmget - get modem lines
- * @tty: our tty device
- *
- * Map the signal state into Linux modem flags and report the value
- * in Linux terms
- */
-static int ifx_spi_tiocmget(struct tty_struct *tty)
-{
- unsigned int value;
- struct ifx_spi_device *ifx_dev = tty->driver_data;
-
- value =
- (test_bit(IFX_SPI_RTS, &ifx_dev->signal_state) ? TIOCM_RTS : 0) |
- (test_bit(IFX_SPI_DTR, &ifx_dev->signal_state) ? TIOCM_DTR : 0) |
- (test_bit(IFX_SPI_CTS, &ifx_dev->signal_state) ? TIOCM_CTS : 0) |
- (test_bit(IFX_SPI_DSR, &ifx_dev->signal_state) ? TIOCM_DSR : 0) |
- (test_bit(IFX_SPI_DCD, &ifx_dev->signal_state) ? TIOCM_CAR : 0) |
- (test_bit(IFX_SPI_RI, &ifx_dev->signal_state) ? TIOCM_RNG : 0);
- return value;
-}
-
-/**
- * ifx_spi_tiocmset - set modem bits
- * @tty: the tty structure
- * @set: bits to set
- * @clear: bits to clear
- *
- * The IFX6x60 only supports DTR and RTS. Set them accordingly
- * and flag that an update to the modem is needed.
- *
- * FIXME: do we need to kick the tranfers when we do this ?
- */
-static int ifx_spi_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
-
- if (set & TIOCM_RTS)
- set_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
- if (set & TIOCM_DTR)
- set_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
- if (clear & TIOCM_RTS)
- clear_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
- if (clear & TIOCM_DTR)
- clear_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
-
- set_bit(IFX_SPI_UPDATE, &ifx_dev->signal_state);
- return 0;
-}
-
-/**
- * ifx_spi_open - called on tty open
- * @tty: our tty device
- * @filp: file handle being associated with the tty
- *
- * Open the tty interface. We let the tty_port layer do all the work
- * for us.
- *
- * FIXME: Remove single device assumption and saved_ifx_dev
- */
-static int ifx_spi_open(struct tty_struct *tty, struct file *filp)
-{
- return tty_port_open(&saved_ifx_dev->tty_port, tty, filp);
-}
-
-/**
- * ifx_spi_close - called when our tty closes
- * @tty: the tty being closed
- * @filp: the file handle being closed
- *
- * Perform the close of the tty. We use the tty_port layer to do all
- * our hard work.
- */
-static void ifx_spi_close(struct tty_struct *tty, struct file *filp)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
- tty_port_close(&ifx_dev->tty_port, tty, filp);
- /* FIXME: should we do an ifx_spi_reset here ? */
-}
-
-/**
- * ifx_decode_spi_header - decode received header
- * @buffer: the received data
- * @length: decoded length
- * @more: decoded more flag
- * @received_cts: status of cts we received
- *
- * Note how received_cts is handled -- if header is all F it is left
- * the same as it was, if header is all 0 it is set to 0 otherwise it is
- * taken from the incoming header.
- *
- * FIXME: endianness
- */
-static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
- unsigned char *more, unsigned char *received_cts)
-{
- u16 h1;
- u16 h2;
- u16 *in_buffer = (u16 *)buffer;
-
- h1 = *in_buffer;
- h2 = *(in_buffer+1);
-
- if (h1 == 0 && h2 == 0) {
- *received_cts = 0;
- *more = 0;
- return IFX_SPI_HEADER_0;
- } else if (h1 == 0xffff && h2 == 0xffff) {
- *more = 0;
- /* spi_slave_cts remains as it was */
- return IFX_SPI_HEADER_F;
- }
-
- *length = h1 & 0xfff; /* upper bits of byte are flags */
- *more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1;
- *received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1;
- return 0;
-}
-
-/**
- * ifx_setup_spi_header - set header fields
- * @txbuffer: pointer to start of SPI buffer
- * @tx_count: bytes
- * @more: indicate if more to follow
- *
- * Format up an SPI header for a transfer
- *
- * FIXME: endianness?
- */
-static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count,
- unsigned char more)
-{
- *(u16 *)(txbuffer) = tx_count;
- *(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE;
- txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK;
-}
-
-/**
- * ifx_spi_prepare_tx_buffer - prepare transmit frame
- * @ifx_dev: our SPI device
- *
- * The transmit buffr needs a header and various other bits of
- * information followed by as much data as we can pull from the FIFO
- * and transfer. This function formats up a suitable buffer in the
- * ifx_dev->tx_buffer
- *
- * FIXME: performance - should we wake the tty when the queue is half
- * empty ?
- */
-static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
-{
- int temp_count;
- int queue_length;
- int tx_count;
- unsigned char *tx_buffer;
-
- tx_buffer = ifx_dev->tx_buffer;
-
- /* make room for required SPI header */
- tx_buffer += IFX_SPI_HEADER_OVERHEAD;
- tx_count = IFX_SPI_HEADER_OVERHEAD;
-
- /* clear to signal no more data if this turns out to be the
- * last buffer sent in a sequence */
- ifx_dev->spi_more = 0;
-
- /* if modem cts is set, just send empty buffer */
- if (!ifx_dev->spi_slave_cts) {
- /* see if there's tx data */
- queue_length = kfifo_len(&ifx_dev->tx_fifo);
- if (queue_length != 0) {
- /* data to mux -- see if there's room for it */
- temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE);
- temp_count = kfifo_out_locked(&ifx_dev->tx_fifo,
- tx_buffer, temp_count,
- &ifx_dev->fifo_lock);
-
- /* update buffer pointer and data count in message */
- tx_buffer += temp_count;
- tx_count += temp_count;
- if (temp_count == queue_length)
- /* poke port to get more data */
- tty_port_tty_wakeup(&ifx_dev->tty_port);
- else /* more data in port, use next SPI message */
- ifx_dev->spi_more = 1;
- }
- }
- /* have data and info for header -- set up SPI header in buffer */
- /* spi header needs payload size, not entire buffer size */
- ifx_spi_setup_spi_header(ifx_dev->tx_buffer,
- tx_count-IFX_SPI_HEADER_OVERHEAD,
- ifx_dev->spi_more);
- /* swap actual data in the buffer */
- ifx_dev->swap_buf((ifx_dev->tx_buffer), tx_count,
- &ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]);
- return tx_count;
-}
-
-/**
- * ifx_spi_write - line discipline write
- * @tty: our tty device
- * @buf: pointer to buffer to write (kernel space)
- * @count: size of buffer
- *
- * Write the characters we have been given into the FIFO. If the device
- * is not active then activate it, when the SRDY line is asserted back
- * this will commence I/O
- */
-static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
- unsigned char *tmp_buf = (unsigned char *)buf;
- unsigned long flags;
- bool is_fifo_empty;
- int tx_count;
-
- spin_lock_irqsave(&ifx_dev->fifo_lock, flags);
- is_fifo_empty = kfifo_is_empty(&ifx_dev->tx_fifo);
- tx_count = kfifo_in(&ifx_dev->tx_fifo, tmp_buf, count);
- spin_unlock_irqrestore(&ifx_dev->fifo_lock, flags);
- if (is_fifo_empty)
- mrdy_assert(ifx_dev);
-
- return tx_count;
-}
-
-/**
- * ifx_spi_chars_in_buffer - line discipline helper
- * @tty: our tty device
- *
- * Report how much data we can accept before we drop bytes. As we use
- * a simple FIFO this is nice and easy.
- */
-static int ifx_spi_write_room(struct tty_struct *tty)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
- return IFX_SPI_FIFO_SIZE - kfifo_len(&ifx_dev->tx_fifo);
-}
-
-/**
- * ifx_spi_chars_in_buffer - line discipline helper
- * @tty: our tty device
- *
- * Report how many characters we have buffered. In our case this is the
- * number of bytes sitting in our transmit FIFO.
- */
-static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
- return kfifo_len(&ifx_dev->tx_fifo);
-}
-
-/**
- * ifx_port_hangup
- * @tty: our tty
- *
- * tty port hang up. Called when tty_hangup processing is invoked either
- * by loss of carrier, or by software (eg vhangup). Serialized against
- * activate/shutdown by the tty layer.
- */
-static void ifx_spi_hangup(struct tty_struct *tty)
-{
- struct ifx_spi_device *ifx_dev = tty->driver_data;
- tty_port_hangup(&ifx_dev->tty_port);
-}
-
-/**
- * ifx_port_activate
- * @port: our tty port
- * @tty: our tty device
- *
- * tty port activate method - called for first open. Serialized
- * with hangup and shutdown by the tty layer.
- */
-static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
-{
- struct ifx_spi_device *ifx_dev =
- container_of(port, struct ifx_spi_device, tty_port);
-
- /* clear any old data; can't do this in 'close' */
- kfifo_reset(&ifx_dev->tx_fifo);
-
- /* clear any flag which may be set in port shutdown procedure */
- clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
- clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
-
- /* put port data into this tty */
- tty->driver_data = ifx_dev;
-
- /* allows flip string push from int context */
- port->low_latency = 1;
-
- /* set flag to allows data transfer */
- set_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
-
- return 0;
-}
-
-/**
- * ifx_port_shutdown
- * @port: our tty port
- *
- * tty port shutdown method - called for last port close. Serialized
- * with hangup and activate by the tty layer.
- */
-static void ifx_port_shutdown(struct tty_port *port)
-{
- struct ifx_spi_device *ifx_dev =
- container_of(port, struct ifx_spi_device, tty_port);
-
- clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
- mrdy_set_low(ifx_dev);
- del_timer(&ifx_dev->spi_timer);
- clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
- tasklet_kill(&ifx_dev->io_work_tasklet);
-}
-
-static const struct tty_port_operations ifx_tty_port_ops = {
- .activate = ifx_port_activate,
- .shutdown = ifx_port_shutdown,
-};
-
-static const struct tty_operations ifx_spi_serial_ops = {
- .open = ifx_spi_open,
- .close = ifx_spi_close,
- .write = ifx_spi_write,
- .hangup = ifx_spi_hangup,
- .write_room = ifx_spi_write_room,
- .chars_in_buffer = ifx_spi_chars_in_buffer,
- .tiocmget = ifx_spi_tiocmget,
- .tiocmset = ifx_spi_tiocmset,
-};
-
-/**
- * ifx_spi_insert_fip_string - queue received data
- * @ifx_dev: our SPI device
- * @chars: buffer we have received
- * @size: number of chars reeived
- *
- * Queue bytes to the tty assuming the tty side is currently open. If
- * not the discard the data.
- */
-static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
- unsigned char *chars, size_t size)
-{
- tty_insert_flip_string(&ifx_dev->tty_port, chars, size);
- tty_flip_buffer_push(&ifx_dev->tty_port);
-}
-
-/**
- * ifx_spi_complete - SPI transfer completed
- * @ctx: our SPI device
- *
- * An SPI transfer has completed. Process any received data and kick off
- * any further transmits we can commence.
- */
-static void ifx_spi_complete(void *ctx)
-{
- struct ifx_spi_device *ifx_dev = ctx;
- int length;
- int actual_length;
- unsigned char more = 0;
- unsigned char cts;
- int local_write_pending = 0;
- int queue_length;
- int srdy;
- int decode_result;
-
- mrdy_set_low(ifx_dev);
-
- if (!ifx_dev->spi_msg.status) {
- /* check header validity, get comm flags */
- ifx_dev->swap_buf(ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD,
- &ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]);
- decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer,
- &length, &more, &cts);
- if (decode_result == IFX_SPI_HEADER_0) {
- dev_dbg(&ifx_dev->spi_dev->dev,
- "ignore input: invalid header 0");
- ifx_dev->spi_slave_cts = 0;
- goto complete_exit;
- } else if (decode_result == IFX_SPI_HEADER_F) {
- dev_dbg(&ifx_dev->spi_dev->dev,
- "ignore input: invalid header F");
- goto complete_exit;
- }
-
- ifx_dev->spi_slave_cts = cts;
-
- actual_length = min((unsigned int)length,
- ifx_dev->spi_msg.actual_length);
- ifx_dev->swap_buf(
- (ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD),
- actual_length,
- &ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]);
- ifx_spi_insert_flip_string(
- ifx_dev,
- ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD,
- (size_t)actual_length);
- } else {
- more = 0;
- dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
- ifx_dev->spi_msg.status);
- }
-
-complete_exit:
- if (ifx_dev->write_pending) {
- ifx_dev->write_pending = 0;
- local_write_pending = 1;
- }
-
- clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
-
- queue_length = kfifo_len(&ifx_dev->tx_fifo);
- srdy = gpiod_get_value(ifx_dev->gpio.srdy);
- if (!srdy)
- ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
-
- /* schedule output if there is more to do */
- if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags))
- tasklet_schedule(&ifx_dev->io_work_tasklet);
- else {
- if (more || ifx_dev->spi_more || queue_length > 0 ||
- local_write_pending) {
- if (ifx_dev->spi_slave_cts) {
- if (more)
- mrdy_assert(ifx_dev);
- } else
- mrdy_assert(ifx_dev);
- } else {
- /*
- * poke line discipline driver if any for more data
- * may or may not get more data to write
- * for now, say not busy
- */
- ifx_spi_power_state_clear(ifx_dev,
- IFX_SPI_POWER_DATA_PENDING);
- tty_port_tty_wakeup(&ifx_dev->tty_port);
- }
- }
-}
-
-/**
- * ifx_spio_io - I/O tasklet
- * @t: tasklet construct used to fetch the SPI device
- *
- * Queue data for transmission if possible and then kick off the
- * transfer.
- */
-static void ifx_spi_io(struct tasklet_struct *t)
-{
- int retval;
- struct ifx_spi_device *ifx_dev = from_tasklet(ifx_dev, t,
- io_work_tasklet);
-
- if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags) &&
- test_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags)) {
- if (ifx_dev->gpio.unack_srdy_int_nb > 0)
- ifx_dev->gpio.unack_srdy_int_nb--;
-
- ifx_spi_prepare_tx_buffer(ifx_dev);
-
- spi_message_init(&ifx_dev->spi_msg);
- INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
-
- ifx_dev->spi_msg.context = ifx_dev;
- ifx_dev->spi_msg.complete = ifx_spi_complete;
-
- /* set up our spi transfer */
- /* note len is BYTES, not transfers */
- ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE;
- ifx_dev->spi_xfer.cs_change = 0;
- ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
- /* ifx_dev->spi_xfer.speed_hz = 390625; */
- ifx_dev->spi_xfer.bits_per_word =
- ifx_dev->spi_dev->bits_per_word;
-
- ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
- ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
-
- /*
- * setup dma pointers
- */
- if (ifx_dev->use_dma) {
- ifx_dev->spi_msg.is_dma_mapped = 1;
- ifx_dev->tx_dma = ifx_dev->tx_bus;
- ifx_dev->rx_dma = ifx_dev->rx_bus;
- ifx_dev->spi_xfer.tx_dma = ifx_dev->tx_dma;
- ifx_dev->spi_xfer.rx_dma = ifx_dev->rx_dma;
- } else {
- ifx_dev->spi_msg.is_dma_mapped = 0;
- ifx_dev->tx_dma = (dma_addr_t)0;
- ifx_dev->rx_dma = (dma_addr_t)0;
- ifx_dev->spi_xfer.tx_dma = (dma_addr_t)0;
- ifx_dev->spi_xfer.rx_dma = (dma_addr_t)0;
- }
-
- spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg);
-
- /* Assert MRDY. This may have already been done by the write
- * routine.
- */
- mrdy_assert(ifx_dev);
-
- retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg);
- if (retval) {
- clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS,
- &ifx_dev->flags);
- tasklet_schedule(&ifx_dev->io_work_tasklet);
- return;
- }
- } else
- ifx_dev->write_pending = 1;
-}
-
-/**
- * ifx_spi_free_port - free up the tty side
- * @ifx_dev: IFX device going away
- *
- * Unregister and free up a port when the device goes away
- */
-static void ifx_spi_free_port(struct ifx_spi_device *ifx_dev)
-{
- if (ifx_dev->tty_dev)
- tty_unregister_device(tty_drv, ifx_dev->minor);
- tty_port_destroy(&ifx_dev->tty_port);
- kfifo_free(&ifx_dev->tx_fifo);
-}
-
-/**
- * ifx_spi_create_port - create a new port
- * @ifx_dev: our spi device
- *
- * Allocate and initialise the tty port that goes with this interface
- * and add it to the tty layer so that it can be opened.
- */
-static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev)
-{
- int ret = 0;
- struct tty_port *pport = &ifx_dev->tty_port;
-
- spin_lock_init(&ifx_dev->fifo_lock);
- lockdep_set_class_and_subclass(&ifx_dev->fifo_lock,
- &ifx_spi_key, 0);
-
- if (kfifo_alloc(&ifx_dev->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- tty_port_init(pport);
- pport->ops = &ifx_tty_port_ops;
- ifx_dev->minor = IFX_SPI_TTY_ID;
- ifx_dev->tty_dev = tty_port_register_device(pport, tty_drv,
- ifx_dev->minor, &ifx_dev->spi_dev->dev);
- if (IS_ERR(ifx_dev->tty_dev)) {
- dev_dbg(&ifx_dev->spi_dev->dev,
- "%s: registering tty device failed", __func__);
- ret = PTR_ERR(ifx_dev->tty_dev);
- goto error_port;
- }
- return 0;
-
-error_port:
- tty_port_destroy(pport);
-error_ret:
- ifx_spi_free_port(ifx_dev);
- return ret;
-}
-
-/**
- * ifx_spi_handle_srdy - handle SRDY
- * @ifx_dev: device asserting SRDY
- *
- * Check our device state and see what we need to kick off when SRDY
- * is asserted. This usually means killing the timer and firing off the
- * I/O processing.
- */
-static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev)
-{
- if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) {
- del_timer(&ifx_dev->spi_timer);
- clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
- }
-
- ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY);
-
- if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags))
- tasklet_schedule(&ifx_dev->io_work_tasklet);
- else
- set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
-}
-
-/**
- * ifx_spi_srdy_interrupt - SRDY asserted
- * @irq: our IRQ number
- * @dev: our ifx device
- *
- * The modem asserted SRDY. Handle the srdy event
- */
-static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev)
-{
- struct ifx_spi_device *ifx_dev = dev;
- ifx_dev->gpio.unack_srdy_int_nb++;
- ifx_spi_handle_srdy(ifx_dev);
- return IRQ_HANDLED;
-}
-
-/**
- * ifx_spi_reset_interrupt - Modem has changed reset state
- * @irq: interrupt number
- * @dev: our device pointer
- *
- * The modem has either entered or left reset state. Check the GPIO
- * line to see which.
- *
- * FIXME: review locking on MR_INPROGRESS versus
- * parallel unsolicited reset/solicited reset
- */
-static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev)
-{
- struct ifx_spi_device *ifx_dev = dev;
- int val = gpiod_get_value(ifx_dev->gpio.reset_out);
- int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state);
-
- if (val == 0) {
- /* entered reset */
- set_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
- if (!solreset) {
- /* unsolicited reset */
- tty_port_tty_hangup(&ifx_dev->tty_port, false);
- }
- } else {
- /* exited reset */
- clear_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
- if (solreset) {
- set_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state);
- wake_up(&ifx_dev->mdm_reset_wait);
- }
- }
- return IRQ_HANDLED;
-}
-
-/**
- * ifx_spi_free_device - free device
- * @ifx_dev: device to free
- *
- * Free the IFX device
- */
-static void ifx_spi_free_device(struct ifx_spi_device *ifx_dev)
-{
- ifx_spi_free_port(ifx_dev);
- dma_free_coherent(&ifx_dev->spi_dev->dev,
- IFX_SPI_TRANSFER_SIZE,
- ifx_dev->tx_buffer,
- ifx_dev->tx_bus);
- dma_free_coherent(&ifx_dev->spi_dev->dev,
- IFX_SPI_TRANSFER_SIZE,
- ifx_dev->rx_buffer,
- ifx_dev->rx_bus);
-}
-
-/**
- * ifx_spi_reset - reset modem
- * @ifx_dev: modem to reset
- *
- * Perform a reset on the modem
- */
-static int ifx_spi_reset(struct ifx_spi_device *ifx_dev)
-{
- int ret;
- /*
- * set up modem power, reset
- *
- * delays are required on some platforms for the modem
- * to reset properly
- */
- set_bit(MR_START, &ifx_dev->mdm_reset_state);
- gpiod_set_value(ifx_dev->gpio.po, 0);
- gpiod_set_value(ifx_dev->gpio.reset, 0);
- msleep(25);
- gpiod_set_value(ifx_dev->gpio.reset, 1);
- msleep(1);
- gpiod_set_value(ifx_dev->gpio.po, 1);
- msleep(1);
- gpiod_set_value(ifx_dev->gpio.po, 0);
- ret = wait_event_timeout(ifx_dev->mdm_reset_wait,
- test_bit(MR_COMPLETE,
- &ifx_dev->mdm_reset_state),
- IFX_RESET_TIMEOUT);
- if (!ret)
- dev_warn(&ifx_dev->spi_dev->dev, "Modem reset timeout: (state:%lx)",
- ifx_dev->mdm_reset_state);
-
- ifx_dev->mdm_reset_state = 0;
- return ret;
-}
-
-/**
- * ifx_spi_spi_probe - probe callback
- * @spi: our possible matching SPI device
- *
- * Probe for a 6x60 modem on SPI bus. Perform any needed device and
- * GPIO setup.
- *
- * FIXME:
- * - Support for multiple devices
- * - Split out MID specific GPIO handling eventually
- */
-
-static int ifx_spi_spi_probe(struct spi_device *spi)
-{
- int ret;
- int srdy;
- struct ifx_modem_platform_data *pl_data;
- struct ifx_spi_device *ifx_dev;
- struct device *dev = &spi->dev;
-
- if (saved_ifx_dev) {
- dev_dbg(dev, "ignoring subsequent detection");
- return -ENODEV;
- }
-
- pl_data = dev_get_platdata(dev);
- if (!pl_data) {
- dev_err(dev, "missing platform data!");
- return -ENODEV;
- }
-
- /* initialize structure to hold our device variables */
- ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL);
- if (!ifx_dev) {
- dev_err(dev, "spi device allocation failed");
- return -ENOMEM;
- }
- saved_ifx_dev = ifx_dev;
- ifx_dev->spi_dev = spi;
- clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
- spin_lock_init(&ifx_dev->write_lock);
- spin_lock_init(&ifx_dev->power_lock);
- ifx_dev->power_status = 0;
- timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
- ifx_dev->modem = pl_data->modem_type;
- ifx_dev->use_dma = pl_data->use_dma;
- ifx_dev->max_hz = pl_data->max_hz;
- /* initialize spi mode, etc */
- spi->max_speed_hz = ifx_dev->max_hz;
- spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode);
- spi->bits_per_word = spi_bpw;
- ret = spi_setup(spi);
- if (ret) {
- dev_err(dev, "SPI setup wasn't successful %d", ret);
- kfree(ifx_dev);
- return -ENODEV;
- }
-
- /* init swap_buf function according to word width configuration */
- if (spi->bits_per_word == 32)
- ifx_dev->swap_buf = swap_buf_32;
- else if (spi->bits_per_word == 16)
- ifx_dev->swap_buf = swap_buf_16;
- else
- ifx_dev->swap_buf = swap_buf_8;
-
- /* ensure SPI protocol flags are initialized to enable transfer */
- ifx_dev->spi_more = 0;
- ifx_dev->spi_slave_cts = 0;
-
- /*initialize transfer and dma buffers */
- ifx_dev->tx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
- IFX_SPI_TRANSFER_SIZE,
- &ifx_dev->tx_bus,
- GFP_KERNEL);
- if (!ifx_dev->tx_buffer) {
- dev_err(dev, "DMA-TX buffer allocation failed");
- ret = -ENOMEM;
- goto error_ret;
- }
- ifx_dev->rx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
- IFX_SPI_TRANSFER_SIZE,
- &ifx_dev->rx_bus,
- GFP_KERNEL);
- if (!ifx_dev->rx_buffer) {
- dev_err(dev, "DMA-RX buffer allocation failed");
- ret = -ENOMEM;
- goto error_ret;
- }
-
- /* initialize waitq for modem reset */
- init_waitqueue_head(&ifx_dev->mdm_reset_wait);
-
- spi_set_drvdata(spi, ifx_dev);
- tasklet_setup(&ifx_dev->io_work_tasklet, ifx_spi_io);
-
- set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
-
- /* create our tty port */
- ret = ifx_spi_create_port(ifx_dev);
- if (ret != 0) {
- dev_err(dev, "create default tty port failed");
- goto error_ret;
- }
-
- ifx_dev->gpio.reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ifx_dev->gpio.reset)) {
- dev_err(dev, "could not obtain reset GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.reset);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.reset, "ifxModem reset");
- ifx_dev->gpio.po = devm_gpiod_get(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(ifx_dev->gpio.po)) {
- dev_err(dev, "could not obtain power GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.po);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.po, "ifxModem power");
- ifx_dev->gpio.mrdy = devm_gpiod_get(dev, "mrdy", GPIOD_OUT_LOW);
- if (IS_ERR(ifx_dev->gpio.mrdy)) {
- dev_err(dev, "could not obtain mrdy GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.mrdy);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.mrdy, "ifxModem mrdy");
- ifx_dev->gpio.srdy = devm_gpiod_get(dev, "srdy", GPIOD_IN);
- if (IS_ERR(ifx_dev->gpio.srdy)) {
- dev_err(dev, "could not obtain srdy GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.srdy);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.srdy, "ifxModem srdy");
- ifx_dev->gpio.reset_out = devm_gpiod_get(dev, "rst_out", GPIOD_IN);
- if (IS_ERR(ifx_dev->gpio.reset_out)) {
- dev_err(dev, "could not obtain rst_out GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.reset_out);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.reset_out, "ifxModem reset out");
- ifx_dev->gpio.pmu_reset = devm_gpiod_get(dev, "pmu_reset", GPIOD_ASIS);
- if (IS_ERR(ifx_dev->gpio.pmu_reset)) {
- dev_err(dev, "could not obtain pmu_reset GPIO\n");
- ret = PTR_ERR(ifx_dev->gpio.pmu_reset);
- goto error_ret;
- }
- gpiod_set_consumer_name(ifx_dev->gpio.pmu_reset, "ifxModem PMU reset");
-
- ret = request_irq(gpiod_to_irq(ifx_dev->gpio.reset_out),
- ifx_spi_reset_interrupt,
- IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
- ifx_dev);
- if (ret) {
- dev_err(dev, "Unable to get irq %x\n",
- gpiod_to_irq(ifx_dev->gpio.reset_out));
- goto error_ret;
- }
-
- ret = ifx_spi_reset(ifx_dev);
-
- ret = request_irq(gpiod_to_irq(ifx_dev->gpio.srdy),
- ifx_spi_srdy_interrupt, IRQF_TRIGGER_RISING, DRVNAME,
- ifx_dev);
- if (ret) {
- dev_err(dev, "Unable to get irq %x",
- gpiod_to_irq(ifx_dev->gpio.srdy));
- goto error_ret2;
- }
-
- /* set pm runtime power state and register with power system */
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- /* handle case that modem is already signaling SRDY */
- /* no outgoing tty open at this point, this just satisfies the
- * modem's read and should reset communication properly
- */
- srdy = gpiod_get_value(ifx_dev->gpio.srdy);
-
- if (srdy) {
- mrdy_assert(ifx_dev);
- ifx_spi_handle_srdy(ifx_dev);
- } else
- mrdy_set_low(ifx_dev);
- return 0;
-
-error_ret2:
- free_irq(gpiod_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
-error_ret:
- ifx_spi_free_device(ifx_dev);
- saved_ifx_dev = NULL;
- return ret;
-}
-
-/**
- * ifx_spi_spi_remove - SPI device was removed
- * @spi: SPI device
- *
- * FIXME: We should be shutting the device down here not in
- * the module unload path.
- */
-
-static int ifx_spi_spi_remove(struct spi_device *spi)
-{
- struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
- /* stop activity */
- tasklet_kill(&ifx_dev->io_work_tasklet);
-
- pm_runtime_disable(&spi->dev);
-
- /* free irq */
- free_irq(gpiod_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
- free_irq(gpiod_to_irq(ifx_dev->gpio.srdy), ifx_dev);
-
- /* free allocations */
- ifx_spi_free_device(ifx_dev);
-
- saved_ifx_dev = NULL;
- return 0;
-}
-
-/**
- * ifx_spi_spi_shutdown - called on SPI shutdown
- * @spi: SPI device
- *
- * No action needs to be taken here
- */
-
-static void ifx_spi_spi_shutdown(struct spi_device *spi)
-{
- struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
-
- ifx_modem_power_off(ifx_dev);
-}
-
-/*
- * various suspends and resumes have nothing to do
- * no hardware to save state for
- */
-
-/**
- * ifx_spi_pm_suspend - suspend modem on system suspend
- * @dev: device being suspended
- *
- * Suspend the modem. No action needed on Intel MID platforms, may
- * need extending for other systems.
- */
-static int ifx_spi_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-/**
- * ifx_spi_pm_resume - resume modem on system resume
- * @dev: device being suspended
- *
- * Allow the modem to resume. No action needed.
- *
- * FIXME: do we need to reset anything here ?
- */
-static int ifx_spi_pm_resume(struct device *dev)
-{
- return 0;
-}
-
-/**
- * ifx_spi_pm_runtime_resume - suspend modem
- * @dev: device being suspended
- *
- * Allow the modem to resume. No action needed.
- */
-static int ifx_spi_pm_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-/**
- * ifx_spi_pm_runtime_suspend - suspend modem
- * @dev: device being suspended
- *
- * Allow the modem to suspend and thus suspend to continue up the
- * device tree.
- */
-static int ifx_spi_pm_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-/**
- * ifx_spi_pm_runtime_idle - check if modem idle
- * @dev: our device
- *
- * Check conditions and queue runtime suspend if idle.
- */
-static int ifx_spi_pm_runtime_idle(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
-
- if (!ifx_dev->power_status)
- pm_runtime_suspend(dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ifx_spi_pm = {
- .resume = ifx_spi_pm_resume,
- .suspend = ifx_spi_pm_suspend,
- .runtime_resume = ifx_spi_pm_runtime_resume,
- .runtime_suspend = ifx_spi_pm_runtime_suspend,
- .runtime_idle = ifx_spi_pm_runtime_idle
-};
-
-static const struct spi_device_id ifx_id_table[] = {
- {"ifx6160", 0},
- {"ifx6260", 0},
- { }
-};
-MODULE_DEVICE_TABLE(spi, ifx_id_table);
-
-/* spi operations */
-static struct spi_driver ifx_spi_driver = {
- .driver = {
- .name = DRVNAME,
- .pm = &ifx_spi_pm,
- },
- .probe = ifx_spi_spi_probe,
- .shutdown = ifx_spi_spi_shutdown,
- .remove = ifx_spi_spi_remove,
- .id_table = ifx_id_table
-};
-
-/**
- * ifx_spi_exit - module exit
- *
- * Unload the module.
- */
-
-static void __exit ifx_spi_exit(void)
-{
- /* unregister */
- spi_unregister_driver(&ifx_spi_driver);
- tty_unregister_driver(tty_drv);
- put_tty_driver(tty_drv);
- unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
-}
-
-/**
- * ifx_spi_init - module entry point
- *
- * Initialise the SPI and tty interfaces for the IFX SPI driver
- * We need to initialize upper-edge spi driver after the tty
- * driver because otherwise the spi probe will race
- */
-
-static int __init ifx_spi_init(void)
-{
- int result;
-
- tty_drv = alloc_tty_driver(1);
- if (!tty_drv) {
- pr_err("%s: alloc_tty_driver failed", DRVNAME);
- return -ENOMEM;
- }
-
- tty_drv->driver_name = DRVNAME;
- tty_drv->name = TTYNAME;
- tty_drv->minor_start = IFX_SPI_TTY_ID;
- tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
- tty_drv->subtype = SERIAL_TYPE_NORMAL;
- tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- tty_drv->init_termios = tty_std_termios;
-
- tty_set_operations(tty_drv, &ifx_spi_serial_ops);
-
- result = tty_register_driver(tty_drv);
- if (result) {
- pr_err("%s: tty_register_driver failed(%d)",
- DRVNAME, result);
- goto err_free_tty;
- }
-
- result = spi_register_driver(&ifx_spi_driver);
- if (result) {
- pr_err("%s: spi_register_driver failed(%d)",
- DRVNAME, result);
- goto err_unreg_tty;
- }
-
- result = register_reboot_notifier(&ifx_modem_reboot_notifier_block);
- if (result) {
- pr_err("%s: register ifx modem reboot notifier failed(%d)",
- DRVNAME, result);
- goto err_unreg_spi;
- }
-
- return 0;
-err_unreg_spi:
- spi_unregister_driver(&ifx_spi_driver);
-err_unreg_tty:
- tty_unregister_driver(tty_drv);
-err_free_tty:
- put_tty_driver(tty_drv);
-
- return result;
-}
-
-module_init(ifx_spi_init);
-module_exit(ifx_spi_exit);
-
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("IFX6x60 spi driver");
-MODULE_LICENSE("GPL");
-MODULE_INFO(Version, "0.1-IFX6x60");
diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
deleted file mode 100644
index ecb841d928a7..000000000000
--- a/drivers/tty/serial/ifx6x60.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/****************************************************************************
- *
- * Driver for the IFX spi modem.
- *
- * Copyright (C) 2009, 2010 Intel Corp
- * Jim Stanley <jim.stanley@intel.com>
- *
- *****************************************************************************/
-#ifndef _IFX6X60_H
-#define _IFX6X60_H
-
-struct gpio_desc;
-
-#define DRVNAME "ifx6x60"
-#define TTYNAME "ttyIFX"
-
-#define IFX_SPI_MAX_MINORS 1
-#define IFX_SPI_TRANSFER_SIZE 2048
-#define IFX_SPI_FIFO_SIZE 4096
-
-#define IFX_SPI_HEADER_OVERHEAD 4
-#define IFX_RESET_TIMEOUT msecs_to_jiffies(50)
-
-/* device flags bitfield definitions */
-#define IFX_SPI_STATE_PRESENT 0
-#define IFX_SPI_STATE_IO_IN_PROGRESS 1
-#define IFX_SPI_STATE_IO_READY 2
-#define IFX_SPI_STATE_TIMER_PENDING 3
-#define IFX_SPI_STATE_IO_AVAILABLE 4
-
-/* flow control bitfields */
-#define IFX_SPI_DCD 0
-#define IFX_SPI_CTS 1
-#define IFX_SPI_DSR 2
-#define IFX_SPI_RI 3
-#define IFX_SPI_DTR 4
-#define IFX_SPI_RTS 5
-#define IFX_SPI_TX_FC 6
-#define IFX_SPI_RX_FC 7
-#define IFX_SPI_UPDATE 8
-
-#define IFX_SPI_PAYLOAD_SIZE (IFX_SPI_TRANSFER_SIZE - \
- IFX_SPI_HEADER_OVERHEAD)
-
-#define IFX_SPI_IRQ_TYPE DETECT_EDGE_RISING
-#define IFX_SPI_GPIO_TARGET 0
-#define IFX_SPI_GPIO0 0x105
-
-#define IFX_SPI_STATUS_TIMEOUT (2000*HZ)
-
-/* values for bits in power status byte */
-#define IFX_SPI_POWER_DATA_PENDING 1
-#define IFX_SPI_POWER_SRDY 2
-
-struct ifx_spi_device {
- /* Our SPI device */
- struct spi_device *spi_dev;
-
- /* Port specific data */
- struct kfifo tx_fifo;
- spinlock_t fifo_lock;
- unsigned long signal_state;
-
- /* TTY Layer logic */
- struct tty_port tty_port;
- struct device *tty_dev;
- int minor;
-
- /* Low level I/O work */
- struct tasklet_struct io_work_tasklet;
- unsigned long flags;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
-
- int modem; /* Modem type */
- int use_dma; /* provide dma-able addrs in SPI msg */
- long max_hz; /* max SPI frequency */
-
- spinlock_t write_lock;
- int write_pending;
- spinlock_t power_lock;
- unsigned char power_status;
-
- unsigned char *rx_buffer;
- unsigned char *tx_buffer;
- dma_addr_t rx_bus;
- dma_addr_t tx_bus;
- unsigned char spi_more;
- unsigned char spi_slave_cts;
-
- struct timer_list spi_timer;
-
- struct spi_message spi_msg;
- struct spi_transfer spi_xfer;
-
- struct {
- /* gpio lines */
- struct gpio_desc *srdy; /* slave-ready gpio */
- struct gpio_desc *mrdy; /* master-ready gpio */
- struct gpio_desc *reset; /* modem-reset gpio */
- struct gpio_desc *po; /* modem-on gpio */
- struct gpio_desc *reset_out; /* modem-in-reset gpio */
- struct gpio_desc *pmu_reset; /* PMU reset gpio */
- /* state/stats */
- int unack_srdy_int_nb;
- } gpio;
-
- /* modem reset */
- unsigned long mdm_reset_state;
-#define MR_START 0
-#define MR_INPROGRESS 1
-#define MR_COMPLETE 2
- wait_queue_head_t mdm_reset_wait;
- void (*swap_buf)(unsigned char *buf, int len, void *end);
-};
-
-#endif /* _IFX6X60_H */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 425624d794dd..8257597d034d 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2248,7 +2248,7 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
sport->port.membase = base;
- sport->port.type = PORT_IMX,
+ sport->port.type = PORT_IMX;
sport->port.iotype = UPIO_MEM;
sport->port.irq = rxirq;
sport->port.fifosize = 32;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 62813e421f12..497b334bc845 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -876,7 +876,7 @@ static int lqasc_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &lqasc_pops;
port->fifosize = 16;
- port->type = PORT_LTQ_ASC,
+ port->type = PORT_LTQ_ASC;
port->line = line;
port->dev = &pdev->dev;
/* unused, just to be backward-compatible */
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 371569a0fd00..3c92d4e01488 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -521,9 +521,6 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
MAX3100_STATUS_PE | MAX3100_STATUS_FE |
MAX3100_STATUS_OE;
- /* we are sending char from a workqueue so enable */
- s->port.state->port.low_latency = 1;
-
if (s->poll_time > 0)
del_timer_sync(&s->timer);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 118b29912289..e0c00a1b0763 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 8ecf622602cb..f414d6acad69 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -34,8 +34,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <asm/cacheflush.h>
-
#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/irq.h>
@@ -1535,34 +1533,6 @@ disable_clk_ahb:
return err;
}
-/*
- * This function returns 1 if pdev isn't a device instatiated by dt, 0 if it
- * could successfully get all information from dt or a negative errno.
- */
-static int serial_mxs_probe_dt(struct mxs_auart_port *s,
- struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- if (!np)
- /* no device tree device */
- return 1;
-
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
- return ret;
- }
- s->port.line = ret;
-
- if (of_get_property(np, "uart-has-rtscts", NULL) ||
- of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
- set_bit(MXS_AUART_RTSCTS, &s->flags);
-
- return 0;
-}
-
static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
{
enum mctrl_gpio_idx i;
@@ -1631,6 +1601,7 @@ static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s)
static int mxs_auart_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct mxs_auart_port *s;
u32 version;
int ret, irq;
@@ -1643,11 +1614,17 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.dev = &pdev->dev;
s->dev = &pdev->dev;
- ret = serial_mxs_probe_dt(s, pdev);
- if (ret > 0)
- s->port.line = pdev->id < 0 ? 0 : pdev->id;
- else if (ret < 0)
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
return ret;
+ }
+ s->port.line = ret;
+
+ if (of_get_property(np, "uart-has-rtscts", NULL) ||
+ of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
+ set_bit(MXS_AUART_RTSCTS, &s->flags);
+
if (s->port.line >= ARRAY_SIZE(auart_port)) {
dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
return -EINVAL;
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index c149f8c30007..abc6042f0378 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -12,6 +12,7 @@
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -62,6 +63,9 @@
#define OWL_UART_STAT_TRFL_MASK GENMASK(16, 11)
#define OWL_UART_STAT_UTBB BIT(17)
+#define OWL_UART_POLL_USEC 5
+#define OWL_UART_TIMEOUT_USEC 10000
+
static struct uart_driver owl_uart_driver;
struct owl_uart_info {
@@ -461,6 +465,36 @@ static void owl_uart_config_port(struct uart_port *port, int flags)
}
}
+#ifdef CONFIG_CONSOLE_POLL
+
+static int owl_uart_poll_get_char(struct uart_port *port)
+{
+ if (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_RFEM)
+ return NO_POLL_CHAR;
+
+ return owl_uart_read(port, OWL_UART_RXDAT);
+}
+
+static void owl_uart_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ u32 reg;
+ int ret;
+
+ /* Wait while FIFO is full or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + OWL_UART_STAT, reg,
+ !(reg & OWL_UART_STAT_TFFU),
+ OWL_UART_POLL_USEC,
+ OWL_UART_TIMEOUT_USEC);
+ if (ret == -ETIMEDOUT) {
+ dev_err(port->dev, "Timeout waiting while UART TX FULL\n");
+ return;
+ }
+
+ owl_uart_write(port, ch, OWL_UART_TXDAT);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static const struct uart_ops owl_uart_ops = {
.set_mctrl = owl_uart_set_mctrl,
.get_mctrl = owl_uart_get_mctrl,
@@ -476,6 +510,10 @@ static const struct uart_ops owl_uart_ops = {
.request_port = owl_uart_request_port,
.release_port = owl_uart_release_port,
.verify_port = owl_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = owl_uart_poll_get_char,
+ .poll_put_char = owl_uart_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_OWL_CONSOLE
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 828f9ad1be49..ba31e97d3d96 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -975,7 +975,6 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
port->closing_wait = closing_wait;
if (new_info->xmit_fifo_size)
uport->fifosize = new_info->xmit_fifo_size;
- port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
check_and_exit:
retval = 0;
@@ -1795,8 +1794,6 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
if (!uport || uport->flags & UPF_DEAD)
return -ENXIO;
- port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
-
/*
* Start up the serial port.
*/
@@ -2851,6 +2848,8 @@ static const struct attribute_group tty_dev_attr_group = {
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure to use for this port.
*
+ * Context: task context, might sleep
+ *
* This allows the driver to register its own uart_port structure
* with the core driver. The main purpose is to allow the low
* level uart drivers to expand uart_port, rather than having yet
@@ -2864,8 +2863,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
struct device *tty_dev;
int num_groups;
- BUG_ON(in_interrupt());
-
if (uport->line >= drv->nr)
return -EINVAL;
@@ -2954,6 +2951,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure for this port
*
+ * Context: task context, might sleep
+ *
* This unhooks (and hangs up) the specified port structure from the
* core driver. No further calls will be made to the low-level code
* for this port.
@@ -2966,8 +2965,6 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
struct tty_struct *tty;
int ret = 0;
- BUG_ON(in_interrupt());
-
mutex_lock(&port_mutex);
/*
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 1066eebe3b28..328d5a78792f 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+ ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
deleted file mode 100644
index 38622f2a30a9..000000000000
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ /dev/null
@@ -1,1503 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for CSR SiRFprimaII onboard UARTs.
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/sysrq.h>
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/of_gpio.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-#include "sirfsoc_uart.h"
-
-static unsigned int
-sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
-static unsigned int
-sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
-static struct uart_driver sirfsoc_uart_drv;
-
-static void sirfsoc_uart_tx_dma_complete_callback(void *param);
-static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
- {4000000, 2359296},
- {3500000, 1310721},
- {3000000, 1572865},
- {2500000, 1245186},
- {2000000, 1572866},
- {1500000, 1245188},
- {1152000, 1638404},
- {1000000, 1572869},
- {921600, 1114120},
- {576000, 1245196},
- {500000, 1245198},
- {460800, 1572876},
- {230400, 1310750},
- {115200, 1310781},
- {57600, 1310843},
- {38400, 1114328},
- {19200, 1114545},
- {9600, 1114979},
-};
-
-static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR];
-
-static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
-{
- return container_of(port, struct sirfsoc_uart_port, port);
-}
-
-static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
-{
- unsigned long reg;
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
- reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
- return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
- goto cts_asserted;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
- SIRFUART_AFC_CTS_STATUS))
- goto cts_asserted;
- else
- goto cts_deasserted;
- } else {
- if (!gpio_get_value(sirfport->cts_gpio))
- goto cts_asserted;
- else
- goto cts_deasserted;
- }
-cts_deasserted:
- return TIOCM_CAR | TIOCM_DSR;
-cts_asserted:
- return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
-}
-
-static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- unsigned int assert = mctrl & TIOCM_RTS;
- unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
- unsigned int current_val;
-
- if (mctrl & TIOCM_LOOP) {
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
- wr_regl(port, ureg->sirfsoc_line_ctrl,
- rd_regl(port, ureg->sirfsoc_line_ctrl) |
- SIRFUART_LOOP_BACK);
- else
- wr_regl(port, ureg->sirfsoc_mode1,
- rd_regl(port, ureg->sirfsoc_mode1) |
- SIRFSOC_USP_LOOP_BACK_CTRL);
- } else {
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
- wr_regl(port, ureg->sirfsoc_line_ctrl,
- rd_regl(port, ureg->sirfsoc_line_ctrl) &
- ~SIRFUART_LOOP_BACK);
- else
- wr_regl(port, ureg->sirfsoc_mode1,
- rd_regl(port, ureg->sirfsoc_mode1) &
- ~SIRFSOC_USP_LOOP_BACK_CTRL);
- }
-
- if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
- return;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
- val |= current_val;
- wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
- } else {
- if (!val)
- gpio_set_value(sirfport->rts_gpio, 1);
- else
- gpio_set_value(sirfport->rts_gpio, 0);
- }
-}
-
-static void sirfsoc_uart_stop_tx(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
- if (sirfport->tx_dma_chan) {
- if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
- dmaengine_pause(sirfport->tx_dma_chan);
- sirfport->tx_dma_state = TX_DMA_PAUSE;
- } else {
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) &
- ~uint_en->sirfsoc_txfifo_empty_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_txfifo_empty_en);
- }
- } else {
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
- wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
- ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) &
- ~uint_en->sirfsoc_txfifo_empty_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_txfifo_empty_en);
- }
-}
-
-static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
-{
- struct uart_port *port = &sirfport->port;
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- struct circ_buf *xmit = &port->state->xmit;
- unsigned long tran_size;
- unsigned long tran_start;
- unsigned long pio_tx_size;
-
- tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- tran_start = (unsigned long)(xmit->buf + xmit->tail);
- if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
- !tran_size)
- return;
- if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
- dmaengine_resume(sirfport->tx_dma_chan);
- return;
- }
- if (sirfport->tx_dma_state == TX_DMA_RUNNING)
- return;
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)&
- ~(uint_en->sirfsoc_txfifo_empty_en));
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_txfifo_empty_en);
- /*
- * DMA requires buffer address and buffer length are both aligned with
- * 4 bytes, so we use PIO for
- * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
- * bytes, and move to DMA for the left part aligned with 4bytes
- * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
- * part first, move to PIO for the left 1~3 bytes
- */
- if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
- wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
- SIRFUART_IO_MODE);
- if (BYTES_TO_ALIGN(tran_start)) {
- pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
- BYTES_TO_ALIGN(tran_start));
- tran_size -= pio_tx_size;
- }
- if (tran_size < 4)
- sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)|
- uint_en->sirfsoc_txfifo_empty_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- uint_en->sirfsoc_txfifo_empty_en);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
- } else {
- /* tx transfer mode switch into dma mode */
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
- wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
- ~SIRFUART_IO_MODE);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
- tran_size &= ~(0x3);
-
- sirfport->tx_dma_addr = dma_map_single(port->dev,
- xmit->buf + xmit->tail,
- tran_size, DMA_TO_DEVICE);
- sirfport->tx_dma_desc = dmaengine_prep_slave_single(
- sirfport->tx_dma_chan, sirfport->tx_dma_addr,
- tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!sirfport->tx_dma_desc) {
- dev_err(port->dev, "DMA prep slave single fail\n");
- return;
- }
- sirfport->tx_dma_desc->callback =
- sirfsoc_uart_tx_dma_complete_callback;
- sirfport->tx_dma_desc->callback_param = (void *)sirfport;
- sirfport->transfer_size = tran_size;
-
- dmaengine_submit(sirfport->tx_dma_desc);
- dma_async_issue_pending(sirfport->tx_dma_chan);
- sirfport->tx_dma_state = TX_DMA_RUNNING;
- }
-}
-
-static void sirfsoc_uart_start_tx(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- if (sirfport->tx_dma_chan)
- sirfsoc_uart_tx_with_dma(sirfport);
- else {
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
- wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
- ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
- sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)|
- uint_en->sirfsoc_txfifo_empty_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- uint_en->sirfsoc_txfifo_empty_en);
- }
-}
-
-static void sirfsoc_uart_stop_rx(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- if (sirfport->rx_dma_chan) {
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) &
- ~(SIRFUART_RX_DMA_INT_EN(uint_en,
- sirfport->uart_reg->uart_type) |
- uint_en->sirfsoc_rx_done_en));
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- SIRFUART_RX_DMA_INT_EN(uint_en,
- sirfport->uart_reg->uart_type)|
- uint_en->sirfsoc_rx_done_en);
- dmaengine_terminate_all(sirfport->rx_dma_chan);
- } else {
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)&
- ~(SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type)));
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- }
-}
-
-static void sirfsoc_uart_disable_ms(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
- if (!sirfport->hw_flow_ctrl)
- return;
- sirfport->ms_enabled = false;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- wr_regl(port, ureg->sirfsoc_afc_ctrl,
- rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)&
- ~uint_en->sirfsoc_cts_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_cts_en);
- } else
- disable_irq(gpio_to_irq(sirfport->cts_gpio));
-}
-
-static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
-{
- struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
- struct uart_port *port = &sirfport->port;
- spin_lock(&port->lock);
- if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
- uart_handle_cts_change(port,
- !gpio_get_value(sirfport->cts_gpio));
- spin_unlock(&port->lock);
- return IRQ_HANDLED;
-}
-
-static void sirfsoc_uart_enable_ms(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
- if (!sirfport->hw_flow_ctrl)
- return;
- sirfport->ms_enabled = true;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- wr_regl(port, ureg->sirfsoc_afc_ctrl,
- rd_regl(port, ureg->sirfsoc_afc_ctrl) |
- SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN |
- SIRFUART_AFC_CTRL_RX_THD);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)
- | uint_en->sirfsoc_cts_en);
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- uint_en->sirfsoc_cts_en);
- } else
- enable_irq(gpio_to_irq(sirfport->cts_gpio));
-}
-
-static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
- if (break_state)
- ulcon |= SIRFUART_SET_BREAK;
- else
- ulcon &= ~SIRFUART_SET_BREAK;
- wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
- }
-}
-
-static unsigned int
-sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
- unsigned int ch, rx_count = 0;
- struct tty_struct *tty;
- tty = tty_port_tty_get(&port->state->port);
- if (!tty)
- return -ENODEV;
- while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- ufifo_st->ff_empty(port))) {
- ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
- SIRFUART_DUMMY_READ;
- if (unlikely(uart_handle_sysrq_char(port, ch)))
- continue;
- uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
- rx_count++;
- if (rx_count >= max_rx_count)
- break;
- }
-
- port->icount.rx += rx_count;
-
- return rx_count;
-}
-
-static unsigned int
-sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
-{
- struct uart_port *port = &sirfport->port;
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
- struct circ_buf *xmit = &port->state->xmit;
- unsigned int num_tx = 0;
- while (!uart_circ_empty(xmit) &&
- !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
- ufifo_st->ff_full(port)) &&
- count--) {
- wr_regl(port, ureg->sirfsoc_tx_fifo_data,
- xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- num_tx++;
- }
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
- return num_tx;
-}
-
-static void sirfsoc_uart_tx_dma_complete_callback(void *param)
-{
- struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
- struct uart_port *port = &sirfport->port;
- struct circ_buf *xmit = &port->state->xmit;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- xmit->tail = (xmit->tail + sirfport->transfer_size) &
- (UART_XMIT_SIZE - 1);
- port->icount.tx += sirfport->transfer_size;
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
- if (sirfport->tx_dma_addr)
- dma_unmap_single(port->dev, sirfport->tx_dma_addr,
- sirfport->transfer_size, DMA_TO_DEVICE);
- sirfport->tx_dma_state = TX_DMA_IDLE;
- sirfsoc_uart_tx_with_dma(sirfport);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
-{
- unsigned long intr_status;
- unsigned long cts_status;
- unsigned long flag = TTY_NORMAL;
- struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
- struct uart_port *port = &sirfport->port;
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
- struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- struct uart_state *state = port->state;
- struct circ_buf *xmit = &port->state->xmit;
- spin_lock(&port->lock);
- intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
- wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
- intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
- if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st,
- sirfport->uart_reg->uart_type)))) {
- if (intr_status & uint_st->sirfsoc_rxd_brk) {
- port->icount.brk++;
- if (uart_handle_break(port))
- goto recv_char;
- }
- if (intr_status & uint_st->sirfsoc_rx_oflow) {
- port->icount.overrun++;
- flag = TTY_OVERRUN;
- }
- if (intr_status & uint_st->sirfsoc_frm_err) {
- port->icount.frame++;
- flag = TTY_FRAME;
- }
- if (intr_status & uint_st->sirfsoc_parity_err) {
- port->icount.parity++;
- flag = TTY_PARITY;
- }
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
- intr_status &= port->read_status_mask;
- uart_insert_char(port, intr_status,
- uint_en->sirfsoc_rx_oflow_en, 0, flag);
- }
-recv_char:
- if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
- (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
- !sirfport->tx_dma_state) {
- cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
- SIRFUART_AFC_CTS_STATUS;
- if (cts_status != 0)
- cts_status = 0;
- else
- cts_status = 1;
- uart_handle_cts_change(port, cts_status);
- wake_up_interruptible(&state->port.delta_msr_wait);
- }
- if (!sirfport->rx_dma_chan &&
- (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) {
- /*
- * chip will trigger continuous RX_TIMEOUT interrupt
- * in RXFIFO empty and not trigger if RXFIFO recevice
- * data in limit time, original method use RX_TIMEOUT
- * will trigger lots of useless interrupt in RXFIFO
- * empty.RXFIFO received one byte will trigger RX_DONE
- * interrupt.use RX_DONE to wait for data received
- * into RXFIFO, use RX_THD/RX_FULL for lots data receive
- * and use RX_TIMEOUT for the last left data.
- */
- if (intr_status & uint_st->sirfsoc_rx_done) {
- if (!sirfport->is_atlas7) {
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)
- & ~(uint_en->sirfsoc_rx_done_en));
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)
- | (uint_en->sirfsoc_rx_timeout_en));
- } else {
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_rx_done_en);
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- uint_en->sirfsoc_rx_timeout_en);
- }
- } else {
- if (intr_status & uint_st->sirfsoc_rx_timeout) {
- if (!sirfport->is_atlas7) {
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)
- & ~(uint_en->sirfsoc_rx_timeout_en));
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg)
- | (uint_en->sirfsoc_rx_done_en));
- } else {
- wr_regl(port,
- ureg->sirfsoc_int_en_clr_reg,
- uint_en->sirfsoc_rx_timeout_en);
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- uint_en->sirfsoc_rx_done_en);
- }
- }
- sirfsoc_uart_pio_rx_chars(port, port->fifosize);
- }
- }
- spin_unlock(&port->lock);
- tty_flip_buffer_push(&state->port);
- spin_lock(&port->lock);
- if (intr_status & uint_st->sirfsoc_txfifo_empty) {
- if (sirfport->tx_dma_chan)
- sirfsoc_uart_tx_with_dma(sirfport);
- else {
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- spin_unlock(&port->lock);
- return IRQ_HANDLED;
- } else {
- sirfsoc_uart_pio_tx_chars(sirfport,
- port->fifosize);
- if ((uart_circ_empty(xmit)) &&
- (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
- ufifo_st->ff_empty(port)))
- sirfsoc_uart_stop_tx(port);
- }
- }
- }
- spin_unlock(&port->lock);
-
- return IRQ_HANDLED;
-}
-
-static void sirfsoc_uart_rx_dma_complete_callback(void *param)
-{
-}
-
-/* submit rx dma task into dmaengine */
-static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
- ~SIRFUART_IO_MODE);
- sirfport->rx_dma_items.xmit.tail =
- sirfport->rx_dma_items.xmit.head = 0;
- sirfport->rx_dma_items.desc =
- dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan,
- sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
- SIRFSOC_RX_DMA_BUF_SIZE / 2,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
- if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) {
- dev_err(port->dev, "DMA slave single fail\n");
- return;
- }
- sirfport->rx_dma_items.desc->callback =
- sirfsoc_uart_rx_dma_complete_callback;
- sirfport->rx_dma_items.desc->callback_param = sirfport;
- sirfport->rx_dma_items.cookie =
- dmaengine_submit(sirfport->rx_dma_items.desc);
- dma_async_issue_pending(sirfport->rx_dma_chan);
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) |
- SIRFUART_RX_DMA_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- SIRFUART_RX_DMA_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
-}
-
-static unsigned int
-sirfsoc_usp_calc_sample_div(unsigned long set_rate,
- unsigned long ioclk_rate, unsigned long *sample_reg)
-{
- unsigned long min_delta = ~0UL;
- unsigned short sample_div;
- unsigned long ioclk_div = 0;
- unsigned long temp_delta;
-
- for (sample_div = SIRF_USP_MIN_SAMPLE_DIV;
- sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
- temp_delta = ioclk_rate -
- (ioclk_rate + (set_rate * sample_div) / 2)
- / (set_rate * sample_div) * set_rate * sample_div;
-
- temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
- if (temp_delta < min_delta) {
- ioclk_div = (2 * ioclk_rate /
- (set_rate * sample_div) + 1) / 2 - 1;
- if (ioclk_div > SIRF_IOCLK_DIV_MAX)
- continue;
- min_delta = temp_delta;
- *sample_reg = sample_div;
- if (!temp_delta)
- break;
- }
- }
- return ioclk_div;
-}
-
-static unsigned int
-sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
- unsigned long ioclk_rate, unsigned long *set_baud)
-{
- unsigned long min_delta = ~0UL;
- unsigned short sample_div;
- unsigned int regv = 0;
- unsigned long ioclk_div;
- unsigned long baud_tmp;
- int temp_delta;
-
- for (sample_div = SIRF_MIN_SAMPLE_DIV;
- sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
- ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
- if (ioclk_div > SIRF_IOCLK_DIV_MAX)
- continue;
- baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
- temp_delta = baud_tmp - baud_rate;
- temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
- if (temp_delta < min_delta) {
- regv = regv & (~SIRF_IOCLK_DIV_MASK);
- regv = regv | ioclk_div;
- regv = regv & (~SIRF_SAMPLE_DIV_MASK);
- regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
- min_delta = temp_delta;
- *set_baud = baud_tmp;
- }
- }
- return regv;
-}
-
-static void sirfsoc_uart_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- unsigned long config_reg = 0;
- unsigned long baud_rate;
- unsigned long set_baud;
- unsigned long flags;
- unsigned long ic;
- unsigned int clk_div_reg = 0;
- unsigned long txfifo_op_reg, ioclk_rate;
- unsigned long rx_time_out;
- int threshold_div;
- u32 data_bit_len, stop_bit_len, len_val;
- unsigned long sample_div_reg = 0xf;
- ioclk_rate = port->uartclk;
-
- switch (termios->c_cflag & CSIZE) {
- default:
- case CS8:
- data_bit_len = 8;
- config_reg |= SIRFUART_DATA_BIT_LEN_8;
- break;
- case CS7:
- data_bit_len = 7;
- config_reg |= SIRFUART_DATA_BIT_LEN_7;
- break;
- case CS6:
- data_bit_len = 6;
- config_reg |= SIRFUART_DATA_BIT_LEN_6;
- break;
- case CS5:
- data_bit_len = 5;
- config_reg |= SIRFUART_DATA_BIT_LEN_5;
- break;
- }
- if (termios->c_cflag & CSTOPB) {
- config_reg |= SIRFUART_STOP_BIT_LEN_2;
- stop_bit_len = 2;
- } else
- stop_bit_len = 1;
-
- spin_lock_irqsave(&port->lock, flags);
- port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
- port->ignore_status_mask = 0;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
- uint_en->sirfsoc_parity_err_en;
- } else {
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
- }
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |=
- uint_en->sirfsoc_frm_err_en |
- uint_en->sirfsoc_parity_err_en;
- if (termios->c_cflag & PARENB) {
- if (termios->c_cflag & CMSPAR) {
- if (termios->c_cflag & PARODD)
- config_reg |= SIRFUART_STICK_BIT_MARK;
- else
- config_reg |= SIRFUART_STICK_BIT_SPACE;
- } else {
- if (termios->c_cflag & PARODD)
- config_reg |= SIRFUART_STICK_BIT_ODD;
- else
- config_reg |= SIRFUART_STICK_BIT_EVEN;
- }
- }
- } else {
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |=
- uint_en->sirfsoc_frm_err_en;
- if (termios->c_cflag & PARENB)
- dev_warn(port->dev,
- "USP-UART not support parity err\n");
- }
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |=
- uint_en->sirfsoc_rxd_brk_en;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |=
- uint_en->sirfsoc_rx_oflow_en;
- }
- if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= SIRFUART_DUMMY_READ;
- /* Hardware Flow Control Settings */
- if (UART_ENABLE_MS(port, termios->c_cflag)) {
- if (!sirfport->ms_enabled)
- sirfsoc_uart_enable_ms(port);
- } else {
- if (sirfport->ms_enabled)
- sirfsoc_uart_disable_ms(port);
- }
- baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
- if (ioclk_rate == 150000000) {
- for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
- if (baud_rate == baudrate_to_regv[ic].baud_rate)
- clk_div_reg = baudrate_to_regv[ic].reg_val;
- }
- set_baud = baud_rate;
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- if (unlikely(clk_div_reg == 0))
- clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
- ioclk_rate, &set_baud);
- wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
- } else {
- clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
- ioclk_rate, &sample_div_reg);
- sample_div_reg--;
- set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
- (sample_div_reg + 1));
- /* setting usp mode 2 */
- len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
- (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
- len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
- << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
- wr_regl(port, ureg->sirfsoc_mode2, len_val);
- }
- if (tty_termios_baud_rate(termios))
- tty_termios_encode_baud_rate(termios, set_baud, set_baud);
- /* set receive timeout && data bits len */
- rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
- rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
- txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op,
- (txfifo_op_reg & ~SIRFUART_FIFO_START));
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
- config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out);
- wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
- } else {
- /*tx frame ctrl*/
- len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
- len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
- SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
- len_val |= ((data_bit_len - 1) <<
- SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
- len_val |= (((clk_div_reg & 0xc00) >> 10) <<
- SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
- wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
- /*rx frame ctrl*/
- len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
- len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
- SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
- len_val |= (data_bit_len - 1) <<
- SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
- len_val |= (((clk_div_reg & 0xf000) >> 12) <<
- SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
- wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
- /*async param*/
- wr_regl(port, ureg->sirfsoc_async_param_reg,
- (SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) |
- (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
- SIRFSOC_USP_ASYNC_DIV2_OFFSET);
- }
- if (sirfport->tx_dma_chan)
- wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
- else
- wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
- if (sirfport->rx_dma_chan)
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
- ~SIRFUART_IO_MODE);
- else
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
- SIRFUART_IO_MODE);
- sirfport->rx_period_time = 20000000;
- /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
- if (set_baud < 1000000)
- threshold_div = 1;
- else
- threshold_div = 2;
- wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
- SIRFUART_FIFO_THD(port) / threshold_div);
- wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
- SIRFUART_FIFO_THD(port) / threshold_div);
- txfifo_op_reg |= SIRFUART_FIFO_START;
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
- uart_update_timeout(port, termios->c_cflag, set_baud);
- wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- if (!state)
- clk_prepare_enable(sirfport->clk);
- else
- clk_disable_unprepare(sirfport->clk);
-}
-
-static int sirfsoc_uart_startup(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- unsigned int index = port->line;
- int ret;
- irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
- ret = request_irq(port->irq,
- sirfsoc_uart_isr,
- 0,
- SIRFUART_PORT_NAME,
- sirfport);
- if (ret != 0) {
- dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
- index, port->irq);
- goto irq_err;
- }
- /* initial hardware settings */
- wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
- SIRFUART_IO_MODE);
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
- SIRFUART_IO_MODE);
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
- ~SIRFUART_RX_DMA_FLUSH);
- wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
- wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
- wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
- wr_regl(port, ureg->sirfsoc_mode1,
- SIRFSOC_USP_ENDIAN_CTRL_LSBF |
- SIRFSOC_USP_EN);
- wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
- wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
- if (sirfport->rx_dma_chan)
- wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
- SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) |
- SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) |
- SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4));
- if (sirfport->tx_dma_chan) {
- sirfport->tx_dma_state = TX_DMA_IDLE;
- wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
- SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
- SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
- SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
- }
- sirfport->ms_enabled = false;
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
- sirfport->hw_flow_ctrl) {
- irq_modify_status(gpio_to_irq(sirfport->cts_gpio),
- IRQ_NOREQUEST, IRQ_NOAUTOEN);
- ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
- sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
- if (ret != 0) {
- dev_err(port->dev, "UART-USP:request gpio irq fail\n");
- goto init_rx_err;
- }
- }
- if (sirfport->uart_reg->uart_type == SIRF_REAL_UART &&
- sirfport->rx_dma_chan)
- wr_regl(port, ureg->sirfsoc_swh_dma_io,
- SIRFUART_CLEAR_RX_ADDR_EN);
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
- sirfport->rx_dma_chan)
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
- SIRFSOC_USP_FRADDR_CLR_EN);
- if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
- sirfport->is_hrt_enabled = true;
- sirfport->rx_period_time = 20000000;
- sirfport->rx_last_pos = -1;
- sirfport->pio_fetch_cnt = 0;
- sirfport->rx_dma_items.xmit.tail =
- sirfport->rx_dma_items.xmit.head = 0;
- hrtimer_start(&sirfport->hrt,
- ns_to_ktime(sirfport->rx_period_time),
- HRTIMER_MODE_REL);
- }
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
- if (sirfport->rx_dma_chan)
- sirfsoc_uart_start_next_rx_dma(port);
- else {
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) |
- SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- }
- enable_irq(port->irq);
-
- return 0;
-init_rx_err:
- free_irq(port->irq, sirfport);
-irq_err:
- return ret;
-}
-
-static void sirfsoc_uart_shutdown(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct circ_buf *xmit;
-
- xmit = &sirfport->rx_dma_items.xmit;
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
- else
- wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL);
-
- free_irq(port->irq, sirfport);
- if (sirfport->ms_enabled)
- sirfsoc_uart_disable_ms(port);
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
- sirfport->hw_flow_ctrl) {
- gpio_set_value(sirfport->rts_gpio, 1);
- free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
- }
- if (sirfport->tx_dma_chan)
- sirfport->tx_dma_state = TX_DMA_IDLE;
- if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
- while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) &&
- !CIRC_CNT(xmit->head, xmit->tail,
- SIRFSOC_RX_DMA_BUF_SIZE))
- ;
- sirfport->is_hrt_enabled = false;
- hrtimer_cancel(&sirfport->hrt);
- }
-}
-
-static const char *sirfsoc_uart_type(struct uart_port *port)
-{
- return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
-}
-
-static int sirfsoc_uart_request_port(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
- void *ret;
- ret = request_mem_region(port->mapbase,
- SIRFUART_MAP_SIZE, uart_param->port_name);
- return ret ? 0 : -EBUSY;
-}
-
-static void sirfsoc_uart_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
-}
-
-static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = SIRFSOC_PORT_TYPE;
- sirfsoc_uart_request_port(port);
- }
-}
-
-static const struct uart_ops sirfsoc_uart_ops = {
- .tx_empty = sirfsoc_uart_tx_empty,
- .get_mctrl = sirfsoc_uart_get_mctrl,
- .set_mctrl = sirfsoc_uart_set_mctrl,
- .stop_tx = sirfsoc_uart_stop_tx,
- .start_tx = sirfsoc_uart_start_tx,
- .stop_rx = sirfsoc_uart_stop_rx,
- .enable_ms = sirfsoc_uart_enable_ms,
- .break_ctl = sirfsoc_uart_break_ctl,
- .startup = sirfsoc_uart_startup,
- .shutdown = sirfsoc_uart_shutdown,
- .set_termios = sirfsoc_uart_set_termios,
- .pm = sirfsoc_uart_pm,
- .type = sirfsoc_uart_type,
- .release_port = sirfsoc_uart_release_port,
- .request_port = sirfsoc_uart_request_port,
- .config_port = sirfsoc_uart_config_port,
-};
-
-#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
-static int __init
-sirfsoc_uart_console_setup(struct console *co, char *options)
-{
- unsigned int baud = 115200;
- unsigned int bits = 8;
- unsigned int parity = 'n';
- unsigned int flow = 'n';
- struct sirfsoc_uart_port *sirfport;
- struct sirfsoc_register *ureg;
- if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
- co->index = 1;
- sirfport = sirf_ports[co->index];
- if (!sirfport)
- return -ENODEV;
- ureg = &sirfport->uart_reg->uart_reg;
- if (!sirfport->port.mapbase)
- return -ENODEV;
-
- /* enable usp in mode1 register */
- if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
- wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
- SIRFSOC_USP_ENDIAN_CTRL_LSBF);
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- sirfport->port.cons = co;
-
- /* default console tx/rx transfer using io mode */
- sirfport->rx_dma_chan = NULL;
- sirfport->tx_dma_chan = NULL;
- return uart_set_options(&sirfport->port, co, baud, parity, bits, flow);
-}
-
-static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
- while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
- ufifo_st->ff_full(port))
- cpu_relax();
- wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
-}
-
-static void sirfsoc_uart_console_write(struct console *co, const char *s,
- unsigned int count)
-{
- struct sirfsoc_uart_port *sirfport = sirf_ports[co->index];
-
- uart_console_write(&sirfport->port, s, count,
- sirfsoc_uart_console_putchar);
-}
-
-static struct console sirfsoc_uart_console = {
- .name = SIRFSOC_UART_NAME,
- .device = uart_console_device,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .write = sirfsoc_uart_console_write,
- .setup = sirfsoc_uart_console_setup,
- .data = &sirfsoc_uart_drv,
-};
-
-static int __init sirfsoc_uart_console_init(void)
-{
- register_console(&sirfsoc_uart_console);
- return 0;
-}
-console_initcall(sirfsoc_uart_console_init);
-#endif
-
-static struct uart_driver sirfsoc_uart_drv = {
- .owner = THIS_MODULE,
- .driver_name = SIRFUART_PORT_NAME,
- .nr = SIRFSOC_UART_NR,
- .dev_name = SIRFSOC_UART_NAME,
- .major = SIRFSOC_UART_MAJOR,
- .minor = SIRFSOC_UART_MINOR,
-#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
- .cons = &sirfsoc_uart_console,
-#else
- .cons = NULL,
-#endif
-};
-
-static enum hrtimer_restart
- sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt)
-{
- struct sirfsoc_uart_port *sirfport;
- struct uart_port *port;
- int count, inserted;
- struct dma_tx_state tx_state;
- struct tty_struct *tty;
- struct sirfsoc_register *ureg;
- struct circ_buf *xmit;
- struct sirfsoc_fifo_status *ufifo_st;
- int max_pio_cnt;
-
- sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
- port = &sirfport->port;
- inserted = 0;
- tty = port->state->port.tty;
- ureg = &sirfport->uart_reg->uart_reg;
- xmit = &sirfport->rx_dma_items.xmit;
- ufifo_st = &sirfport->uart_reg->fifo_status;
-
- dmaengine_tx_status(sirfport->rx_dma_chan,
- sirfport->rx_dma_items.cookie, &tx_state);
- if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
- sirfport->rx_last_pos) {
- xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
- sirfport->rx_last_pos = xmit->head;
- sirfport->pio_fetch_cnt = 0;
- }
- count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
- SIRFSOC_RX_DMA_BUF_SIZE);
- while (count > 0) {
- inserted = tty_insert_flip_string(tty->port,
- (const unsigned char *)&xmit->buf[xmit->tail], count);
- if (!inserted)
- goto next_hrt;
- port->icount.rx += inserted;
- xmit->tail = (xmit->tail + inserted) &
- (SIRFSOC_RX_DMA_BUF_SIZE - 1);
- count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
- SIRFSOC_RX_DMA_BUF_SIZE);
- tty_flip_buffer_push(tty->port);
- }
- /*
- * if RX DMA buffer data have all push into tty buffer, and there is
- * only little data(less than a dma transfer unit) left in rxfifo,
- * fetch it out in pio mode and switch back to dma immediately
- */
- if (!inserted && !count &&
- ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
- dmaengine_pause(sirfport->rx_dma_chan);
- /* switch to pio mode */
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
- SIRFUART_IO_MODE);
- /*
- * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
- * When found changing I/O to DMA mode, it clears
- * two low bits of read point;
- * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
- * Fetch data out from rxfifo into DMA buffer in PIO mode,
- * while switch back to DMA mode, the data fetched will override
- * by DMA, as hardware have a strange behaviour:
- * after switch back to DMA mode, check rxfifo status it will
- * be the number PIO fetched, so record the fetched data count
- * to avoid the repeated fetch
- */
- max_pio_cnt = 3;
- while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- ufifo_st->ff_empty(port)) && max_pio_cnt--) {
- xmit->buf[xmit->head] =
- rd_regl(port, ureg->sirfsoc_rx_fifo_data);
- xmit->head = (xmit->head + 1) &
- (SIRFSOC_RX_DMA_BUF_SIZE - 1);
- sirfport->pio_fetch_cnt++;
- }
- /* switch back to dma mode */
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
- rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
- ~SIRFUART_IO_MODE);
- dmaengine_resume(sirfport->rx_dma_chan);
- }
-next_hrt:
- hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
- return HRTIMER_RESTART;
-}
-
-static const struct of_device_id sirfsoc_uart_ids[] = {
- { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
- { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
- { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
- { .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp},
- {}
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
-
-static int sirfsoc_uart_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct sirfsoc_uart_port *sirfport;
- struct uart_port *port;
- struct resource *res;
- int ret;
- struct dma_slave_config slv_cfg = {
- .src_maxburst = 1,
- };
- struct dma_slave_config tx_slv_cfg = {
- .dst_maxburst = 2,
- };
- const struct of_device_id *match;
-
- match = of_match_node(sirfsoc_uart_ids, np);
- sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
- if (!sirfport) {
- ret = -ENOMEM;
- goto err;
- }
- sirfport->port.line = of_alias_get_id(np, "serial");
- if (sirfport->port.line >= ARRAY_SIZE(sirf_ports)) {
- dev_err(&pdev->dev, "serial%d out of range\n",
- sirfport->port.line);
- return -EINVAL;
- }
- sirf_ports[sirfport->port.line] = sirfport;
- sirfport->port.iotype = UPIO_MEM;
- sirfport->port.flags = UPF_BOOT_AUTOCONF;
- port = &sirfport->port;
- port->dev = &pdev->dev;
- port->private_data = sirfport;
- sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
-
- sirfport->hw_flow_ctrl =
- of_property_read_bool(np, "uart-has-rtscts") ||
- of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */;
- if (of_device_is_compatible(np, "sirf,prima2-uart") ||
- of_device_is_compatible(np, "sirf,atlas7-uart"))
- sirfport->uart_reg->uart_type = SIRF_REAL_UART;
- if (of_device_is_compatible(np, "sirf,prima2-usp-uart") ||
- of_device_is_compatible(np, "sirf,atlas7-usp-uart")) {
- sirfport->uart_reg->uart_type = SIRF_USP_UART;
- if (!sirfport->hw_flow_ctrl)
- goto usp_no_flow_control;
- if (of_find_property(np, "cts-gpios", NULL))
- sirfport->cts_gpio =
- of_get_named_gpio(np, "cts-gpios", 0);
- else
- sirfport->cts_gpio = -1;
- if (of_find_property(np, "rts-gpios", NULL))
- sirfport->rts_gpio =
- of_get_named_gpio(np, "rts-gpios", 0);
- else
- sirfport->rts_gpio = -1;
-
- if ((!gpio_is_valid(sirfport->cts_gpio) ||
- !gpio_is_valid(sirfport->rts_gpio))) {
- ret = -EINVAL;
- dev_err(&pdev->dev,
- "Usp flow control must have cts and rts gpio");
- goto err;
- }
- ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
- "usp-cts-gpio");
- if (ret) {
- dev_err(&pdev->dev, "Unable request cts gpio");
- goto err;
- }
- gpio_direction_input(sirfport->cts_gpio);
- ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
- "usp-rts-gpio");
- if (ret) {
- dev_err(&pdev->dev, "Unable request rts gpio");
- goto err;
- }
- gpio_direction_output(sirfport->rts_gpio, 1);
- }
-usp_no_flow_control:
- if (of_device_is_compatible(np, "sirf,atlas7-uart") ||
- of_device_is_compatible(np, "sirf,atlas7-usp-uart"))
- sirfport->is_atlas7 = true;
-
- if (of_property_read_u32(np, "fifosize", &port->fifosize)) {
- dev_err(&pdev->dev,
- "Unable to find fifosize in uart node.\n");
- ret = -EFAULT;
- goto err;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Insufficient resources.\n");
- ret = -EFAULT;
- goto err;
- }
- port->mapbase = res->start;
- port->membase = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
- if (!port->membase) {
- dev_err(&pdev->dev, "Cannot remap resource.\n");
- ret = -ENOMEM;
- goto err;
- }
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Insufficient resources.\n");
- ret = -EFAULT;
- goto err;
- }
- port->irq = res->start;
-
- sirfport->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sirfport->clk)) {
- ret = PTR_ERR(sirfport->clk);
- goto err;
- }
- port->uartclk = clk_get_rate(sirfport->clk);
-
- port->ops = &sirfsoc_uart_ops;
- spin_lock_init(&port->lock);
-
- platform_set_drvdata(pdev, sirfport);
- ret = uart_add_one_port(&sirfsoc_uart_drv, port);
- if (ret != 0) {
- dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
- goto err;
- }
-
- sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
- sirfport->rx_dma_items.xmit.buf =
- dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- &sirfport->rx_dma_items.dma_addr, GFP_KERNEL);
- if (!sirfport->rx_dma_items.xmit.buf) {
- dev_err(port->dev, "Uart alloc bufa failed\n");
- ret = -ENOMEM;
- goto alloc_coherent_err;
- }
- sirfport->rx_dma_items.xmit.head =
- sirfport->rx_dma_items.xmit.tail = 0;
- if (sirfport->rx_dma_chan)
- dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
- sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
- if (sirfport->tx_dma_chan)
- dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
- if (sirfport->rx_dma_chan) {
- hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback;
- sirfport->is_hrt_enabled = false;
- }
-
- return 0;
-alloc_coherent_err:
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items.xmit.buf,
- sirfport->rx_dma_items.dma_addr);
- dma_release_channel(sirfport->rx_dma_chan);
-err:
- return ret;
-}
-
-static int sirfsoc_uart_remove(struct platform_device *pdev)
-{
- struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
- struct uart_port *port = &sirfport->port;
- uart_remove_one_port(&sirfsoc_uart_drv, port);
- if (sirfport->rx_dma_chan) {
- dmaengine_terminate_all(sirfport->rx_dma_chan);
- dma_release_channel(sirfport->rx_dma_chan);
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items.xmit.buf,
- sirfport->rx_dma_items.dma_addr);
- }
- if (sirfport->tx_dma_chan) {
- dmaengine_terminate_all(sirfport->tx_dma_chan);
- dma_release_channel(sirfport->tx_dma_chan);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int
-sirfsoc_uart_suspend(struct device *pdev)
-{
- struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
- struct uart_port *port = &sirfport->port;
- uart_suspend_port(&sirfsoc_uart_drv, port);
- return 0;
-}
-
-static int sirfsoc_uart_resume(struct device *pdev)
-{
- struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
- struct uart_port *port = &sirfport->port;
- uart_resume_port(&sirfsoc_uart_drv, port);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
-};
-
-static struct platform_driver sirfsoc_uart_driver = {
- .probe = sirfsoc_uart_probe,
- .remove = sirfsoc_uart_remove,
- .driver = {
- .name = SIRFUART_PORT_NAME,
- .of_match_table = sirfsoc_uart_ids,
- .pm = &sirfsoc_uart_pm_ops,
- },
-};
-
-static int __init sirfsoc_uart_init(void)
-{
- int ret = 0;
-
- ret = uart_register_driver(&sirfsoc_uart_drv);
- if (ret)
- goto out;
-
- ret = platform_driver_register(&sirfsoc_uart_driver);
- if (ret)
- uart_unregister_driver(&sirfsoc_uart_drv);
-out:
- return ret;
-}
-module_init(sirfsoc_uart_init);
-
-static void __exit sirfsoc_uart_exit(void)
-{
- platform_driver_unregister(&sirfsoc_uart_driver);
- uart_unregister_driver(&sirfsoc_uart_drv);
-}
-module_exit(sirfsoc_uart_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
-MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
deleted file mode 100644
index fb88ac565227..000000000000
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ /dev/null
@@ -1,447 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Drivers for CSR SiRFprimaII onboard UARTs.
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-#include <linux/bitops.h>
-#include <linux/log2.h>
-#include <linux/hrtimer.h>
-struct sirfsoc_uart_param {
- const char *uart_name;
- const char *port_name;
-};
-
-struct sirfsoc_register {
- /* hardware uart specific */
- u32 sirfsoc_line_ctrl;
- u32 sirfsoc_divisor;
- /* uart - usp common */
- u32 sirfsoc_tx_rx_en;
- u32 sirfsoc_int_en_reg;
- u32 sirfsoc_int_st_reg;
- u32 sirfsoc_int_en_clr_reg;
- u32 sirfsoc_tx_dma_io_ctrl;
- u32 sirfsoc_tx_dma_io_len;
- u32 sirfsoc_tx_fifo_ctrl;
- u32 sirfsoc_tx_fifo_level_chk;
- u32 sirfsoc_tx_fifo_op;
- u32 sirfsoc_tx_fifo_status;
- u32 sirfsoc_tx_fifo_data;
- u32 sirfsoc_rx_dma_io_ctrl;
- u32 sirfsoc_rx_dma_io_len;
- u32 sirfsoc_rx_fifo_ctrl;
- u32 sirfsoc_rx_fifo_level_chk;
- u32 sirfsoc_rx_fifo_op;
- u32 sirfsoc_rx_fifo_status;
- u32 sirfsoc_rx_fifo_data;
- u32 sirfsoc_afc_ctrl;
- u32 sirfsoc_swh_dma_io;
- /* hardware usp specific */
- u32 sirfsoc_mode1;
- u32 sirfsoc_mode2;
- u32 sirfsoc_tx_frame_ctrl;
- u32 sirfsoc_rx_frame_ctrl;
- u32 sirfsoc_async_param_reg;
-};
-
-typedef u32 (*fifo_full_mask)(struct uart_port *port);
-typedef u32 (*fifo_empty_mask)(struct uart_port *port);
-
-struct sirfsoc_fifo_status {
- fifo_full_mask ff_full;
- fifo_empty_mask ff_empty;
-};
-
-struct sirfsoc_int_en {
- u32 sirfsoc_rx_done_en;
- u32 sirfsoc_tx_done_en;
- u32 sirfsoc_rx_oflow_en;
- u32 sirfsoc_tx_allout_en;
- u32 sirfsoc_rx_io_dma_en;
- u32 sirfsoc_tx_io_dma_en;
- u32 sirfsoc_rxfifo_full_en;
- u32 sirfsoc_txfifo_empty_en;
- u32 sirfsoc_rxfifo_thd_en;
- u32 sirfsoc_txfifo_thd_en;
- u32 sirfsoc_frm_err_en;
- u32 sirfsoc_rxd_brk_en;
- u32 sirfsoc_rx_timeout_en;
- u32 sirfsoc_parity_err_en;
- u32 sirfsoc_cts_en;
- u32 sirfsoc_rts_en;
-};
-
-struct sirfsoc_int_status {
- u32 sirfsoc_rx_done;
- u32 sirfsoc_tx_done;
- u32 sirfsoc_rx_oflow;
- u32 sirfsoc_tx_allout;
- u32 sirfsoc_rx_io_dma;
- u32 sirfsoc_tx_io_dma;
- u32 sirfsoc_rxfifo_full;
- u32 sirfsoc_txfifo_empty;
- u32 sirfsoc_rxfifo_thd;
- u32 sirfsoc_txfifo_thd;
- u32 sirfsoc_frm_err;
- u32 sirfsoc_rxd_brk;
- u32 sirfsoc_rx_timeout;
- u32 sirfsoc_parity_err;
- u32 sirfsoc_cts;
- u32 sirfsoc_rts;
-};
-
-enum sirfsoc_uart_type {
- SIRF_REAL_UART,
- SIRF_USP_UART,
-};
-
-struct sirfsoc_uart_register {
- struct sirfsoc_register uart_reg;
- struct sirfsoc_int_en uart_int_en;
- struct sirfsoc_int_status uart_int_st;
- struct sirfsoc_fifo_status fifo_status;
- struct sirfsoc_uart_param uart_param;
- enum sirfsoc_uart_type uart_type;
-};
-
-static u32 uart_usp_ff_full_mask(struct uart_port *port)
-{
- u32 full_bit;
-
- full_bit = ilog2(port->fifosize);
- return (1 << full_bit);
-}
-
-static u32 uart_usp_ff_empty_mask(struct uart_port *port)
-{
- u32 empty_bit;
-
- empty_bit = ilog2(port->fifosize) + 1;
- return (1 << empty_bit);
-}
-
-static struct sirfsoc_uart_register sirfsoc_usp = {
- .uart_reg = {
- .sirfsoc_mode1 = 0x0000,
- .sirfsoc_mode2 = 0x0004,
- .sirfsoc_tx_frame_ctrl = 0x0008,
- .sirfsoc_rx_frame_ctrl = 0x000c,
- .sirfsoc_tx_rx_en = 0x0010,
- .sirfsoc_int_en_reg = 0x0014,
- .sirfsoc_int_st_reg = 0x0018,
- .sirfsoc_async_param_reg = 0x0024,
- .sirfsoc_tx_dma_io_ctrl = 0x0100,
- .sirfsoc_tx_dma_io_len = 0x0104,
- .sirfsoc_tx_fifo_ctrl = 0x0108,
- .sirfsoc_tx_fifo_level_chk = 0x010c,
- .sirfsoc_tx_fifo_op = 0x0110,
- .sirfsoc_tx_fifo_status = 0x0114,
- .sirfsoc_tx_fifo_data = 0x0118,
- .sirfsoc_rx_dma_io_ctrl = 0x0120,
- .sirfsoc_rx_dma_io_len = 0x0124,
- .sirfsoc_rx_fifo_ctrl = 0x0128,
- .sirfsoc_rx_fifo_level_chk = 0x012c,
- .sirfsoc_rx_fifo_op = 0x0130,
- .sirfsoc_rx_fifo_status = 0x0134,
- .sirfsoc_rx_fifo_data = 0x0138,
- .sirfsoc_int_en_clr_reg = 0x140,
- },
- .uart_int_en = {
- .sirfsoc_rx_done_en = BIT(0),
- .sirfsoc_tx_done_en = BIT(1),
- .sirfsoc_rx_oflow_en = BIT(2),
- .sirfsoc_tx_allout_en = BIT(3),
- .sirfsoc_rx_io_dma_en = BIT(4),
- .sirfsoc_tx_io_dma_en = BIT(5),
- .sirfsoc_rxfifo_full_en = BIT(6),
- .sirfsoc_txfifo_empty_en = BIT(7),
- .sirfsoc_rxfifo_thd_en = BIT(8),
- .sirfsoc_txfifo_thd_en = BIT(9),
- .sirfsoc_frm_err_en = BIT(10),
- .sirfsoc_rx_timeout_en = BIT(11),
- .sirfsoc_rxd_brk_en = BIT(15),
- },
- .uart_int_st = {
- .sirfsoc_rx_done = BIT(0),
- .sirfsoc_tx_done = BIT(1),
- .sirfsoc_rx_oflow = BIT(2),
- .sirfsoc_tx_allout = BIT(3),
- .sirfsoc_rx_io_dma = BIT(4),
- .sirfsoc_tx_io_dma = BIT(5),
- .sirfsoc_rxfifo_full = BIT(6),
- .sirfsoc_txfifo_empty = BIT(7),
- .sirfsoc_rxfifo_thd = BIT(8),
- .sirfsoc_txfifo_thd = BIT(9),
- .sirfsoc_frm_err = BIT(10),
- .sirfsoc_rx_timeout = BIT(11),
- .sirfsoc_rxd_brk = BIT(15),
- },
- .fifo_status = {
- .ff_full = uart_usp_ff_full_mask,
- .ff_empty = uart_usp_ff_empty_mask,
- },
- .uart_param = {
- .uart_name = "ttySiRF",
- .port_name = "sirfsoc-uart",
- },
-};
-
-static struct sirfsoc_uart_register sirfsoc_uart = {
- .uart_reg = {
- .sirfsoc_line_ctrl = 0x0040,
- .sirfsoc_tx_rx_en = 0x004c,
- .sirfsoc_divisor = 0x0050,
- .sirfsoc_int_en_reg = 0x0054,
- .sirfsoc_int_st_reg = 0x0058,
- .sirfsoc_int_en_clr_reg = 0x0060,
- .sirfsoc_tx_dma_io_ctrl = 0x0100,
- .sirfsoc_tx_dma_io_len = 0x0104,
- .sirfsoc_tx_fifo_ctrl = 0x0108,
- .sirfsoc_tx_fifo_level_chk = 0x010c,
- .sirfsoc_tx_fifo_op = 0x0110,
- .sirfsoc_tx_fifo_status = 0x0114,
- .sirfsoc_tx_fifo_data = 0x0118,
- .sirfsoc_rx_dma_io_ctrl = 0x0120,
- .sirfsoc_rx_dma_io_len = 0x0124,
- .sirfsoc_rx_fifo_ctrl = 0x0128,
- .sirfsoc_rx_fifo_level_chk = 0x012c,
- .sirfsoc_rx_fifo_op = 0x0130,
- .sirfsoc_rx_fifo_status = 0x0134,
- .sirfsoc_rx_fifo_data = 0x0138,
- .sirfsoc_afc_ctrl = 0x0140,
- .sirfsoc_swh_dma_io = 0x0148,
- },
- .uart_int_en = {
- .sirfsoc_rx_done_en = BIT(0),
- .sirfsoc_tx_done_en = BIT(1),
- .sirfsoc_rx_oflow_en = BIT(2),
- .sirfsoc_tx_allout_en = BIT(3),
- .sirfsoc_rx_io_dma_en = BIT(4),
- .sirfsoc_tx_io_dma_en = BIT(5),
- .sirfsoc_rxfifo_full_en = BIT(6),
- .sirfsoc_txfifo_empty_en = BIT(7),
- .sirfsoc_rxfifo_thd_en = BIT(8),
- .sirfsoc_txfifo_thd_en = BIT(9),
- .sirfsoc_frm_err_en = BIT(10),
- .sirfsoc_rxd_brk_en = BIT(11),
- .sirfsoc_rx_timeout_en = BIT(12),
- .sirfsoc_parity_err_en = BIT(13),
- .sirfsoc_cts_en = BIT(14),
- .sirfsoc_rts_en = BIT(15),
- },
- .uart_int_st = {
- .sirfsoc_rx_done = BIT(0),
- .sirfsoc_tx_done = BIT(1),
- .sirfsoc_rx_oflow = BIT(2),
- .sirfsoc_tx_allout = BIT(3),
- .sirfsoc_rx_io_dma = BIT(4),
- .sirfsoc_tx_io_dma = BIT(5),
- .sirfsoc_rxfifo_full = BIT(6),
- .sirfsoc_txfifo_empty = BIT(7),
- .sirfsoc_rxfifo_thd = BIT(8),
- .sirfsoc_txfifo_thd = BIT(9),
- .sirfsoc_frm_err = BIT(10),
- .sirfsoc_rxd_brk = BIT(11),
- .sirfsoc_rx_timeout = BIT(12),
- .sirfsoc_parity_err = BIT(13),
- .sirfsoc_cts = BIT(14),
- .sirfsoc_rts = BIT(15),
- },
- .fifo_status = {
- .ff_full = uart_usp_ff_full_mask,
- .ff_empty = uart_usp_ff_empty_mask,
- },
- .uart_param = {
- .uart_name = "ttySiRF",
- .port_name = "sirfsoc_uart",
- },
-};
-/* uart io ctrl */
-#define SIRFUART_DATA_BIT_LEN_MASK 0x3
-#define SIRFUART_DATA_BIT_LEN_5 BIT(0)
-#define SIRFUART_DATA_BIT_LEN_6 1
-#define SIRFUART_DATA_BIT_LEN_7 2
-#define SIRFUART_DATA_BIT_LEN_8 3
-#define SIRFUART_STOP_BIT_LEN_1 0
-#define SIRFUART_STOP_BIT_LEN_2 BIT(2)
-#define SIRFUART_PARITY_EN BIT(3)
-#define SIRFUART_EVEN_BIT BIT(4)
-#define SIRFUART_STICK_BIT_MASK (7 << 3)
-#define SIRFUART_STICK_BIT_NONE (0 << 3)
-#define SIRFUART_STICK_BIT_EVEN BIT(3)
-#define SIRFUART_STICK_BIT_ODD (3 << 3)
-#define SIRFUART_STICK_BIT_MARK (5 << 3)
-#define SIRFUART_STICK_BIT_SPACE (7 << 3)
-#define SIRFUART_SET_BREAK BIT(6)
-#define SIRFUART_LOOP_BACK BIT(7)
-#define SIRFUART_PARITY_MASK (7 << 3)
-#define SIRFUART_DUMMY_READ BIT(16)
-#define SIRFUART_AFC_CTRL_RX_THD 0x70
-#define SIRFUART_AFC_RX_EN BIT(8)
-#define SIRFUART_AFC_TX_EN BIT(9)
-#define SIRFUART_AFC_CTS_CTRL BIT(10)
-#define SIRFUART_AFC_RTS_CTRL BIT(11)
-#define SIRFUART_AFC_CTS_STATUS BIT(12)
-#define SIRFUART_AFC_RTS_STATUS BIT(13)
-/* UART FIFO Register */
-#define SIRFUART_FIFO_STOP 0x0
-#define SIRFUART_FIFO_RESET BIT(0)
-#define SIRFUART_FIFO_START BIT(1)
-
-#define SIRFUART_RX_EN BIT(0)
-#define SIRFUART_TX_EN BIT(1)
-
-#define SIRFUART_IO_MODE BIT(0)
-#define SIRFUART_DMA_MODE 0x0
-#define SIRFUART_RX_DMA_FLUSH 0x4
-
-#define SIRFUART_CLEAR_RX_ADDR_EN 0x2
-/* Baud Rate Calculation */
-#define SIRF_USP_MIN_SAMPLE_DIV 0x1
-#define SIRF_MIN_SAMPLE_DIV 0xf
-#define SIRF_MAX_SAMPLE_DIV 0x3f
-#define SIRF_IOCLK_DIV_MAX 0xffff
-#define SIRF_SAMPLE_DIV_SHIFT 16
-#define SIRF_IOCLK_DIV_MASK 0xffff
-#define SIRF_SAMPLE_DIV_MASK 0x3f0000
-#define SIRF_BAUD_RATE_SUPPORT_NR 18
-
-/* USP SPEC */
-#define SIRFSOC_USP_ENDIAN_CTRL_LSBF BIT(4)
-#define SIRFSOC_USP_EN BIT(5)
-#define SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET 0
-#define SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET 8
-#define SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK 0x3ff
-#define SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET 21
-#define SIRFSOC_USP_TX_DATA_LEN_OFFSET 0
-#define SIRFSOC_USP_TX_SYNC_LEN_OFFSET 8
-#define SIRFSOC_USP_TX_FRAME_LEN_OFFSET 16
-#define SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET 24
-#define SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET 30
-#define SIRFSOC_USP_RX_DATA_LEN_OFFSET 0
-#define SIRFSOC_USP_RX_FRAME_LEN_OFFSET 8
-#define SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET 16
-#define SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET 24
-#define SIRFSOC_USP_ASYNC_DIV2_MASK 0x3f
-#define SIRFSOC_USP_ASYNC_DIV2_OFFSET 16
-#define SIRFSOC_USP_LOOP_BACK_CTRL BIT(2)
-#define SIRFSOC_USP_FRADDR_CLR_EN BIT(1)
-/* USP-UART Common */
-#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
-#define SIRFUART_RECV_TIMEOUT_VALUE(x) \
- (((x) > 0xFFFF) ? 0xFFFF : ((x) & 0xFFFF))
-#define SIRFUART_USP_RECV_TIMEOUT(x) (x & 0xFFFF)
-#define SIRFUART_UART_RECV_TIMEOUT(x) ((x & 0xFFFF) << 16)
-
-#define SIRFUART_FIFO_THD(port) (port->fifosize >> 1)
-#define SIRFUART_ERR_INT_STAT(unit_st, uart_type) \
- (uint_st->sirfsoc_rx_oflow | \
- uint_st->sirfsoc_frm_err | \
- uint_st->sirfsoc_rxd_brk | \
- ((uart_type != SIRF_REAL_UART) ? \
- 0 : uint_st->sirfsoc_parity_err))
-#define SIRFUART_RX_IO_INT_EN(uint_en, uart_type) \
- (uint_en->sirfsoc_rx_done_en |\
- uint_en->sirfsoc_rxfifo_thd_en |\
- uint_en->sirfsoc_rxfifo_full_en |\
- uint_en->sirfsoc_frm_err_en |\
- uint_en->sirfsoc_rx_oflow_en |\
- uint_en->sirfsoc_rxd_brk_en |\
- ((uart_type != SIRF_REAL_UART) ? \
- 0 : uint_en->sirfsoc_parity_err_en))
-#define SIRFUART_RX_IO_INT_ST(uint_st) \
- (uint_st->sirfsoc_rxfifo_thd |\
- uint_st->sirfsoc_rxfifo_full|\
- uint_st->sirfsoc_rx_done |\
- uint_st->sirfsoc_rx_timeout)
-#define SIRFUART_CTS_INT_ST(uint_st) (uint_st->sirfsoc_cts)
-#define SIRFUART_RX_DMA_INT_EN(uint_en, uart_type) \
- (uint_en->sirfsoc_frm_err_en |\
- uint_en->sirfsoc_rx_oflow_en |\
- uint_en->sirfsoc_rxd_brk_en |\
- ((uart_type != SIRF_REAL_UART) ? \
- 0 : uint_en->sirfsoc_parity_err_en))
-/* Generic Definitions */
-#define SIRFSOC_UART_NAME "ttySiRF"
-#define SIRFSOC_UART_MAJOR 0
-#define SIRFSOC_UART_MINOR 0
-#define SIRFUART_PORT_NAME "sirfsoc-uart"
-#define SIRFUART_MAP_SIZE 0x200
-#define SIRFSOC_UART_NR 11
-#define SIRFSOC_PORT_TYPE 0xa5
-
-/* Uart Common Use Macro*/
-#define SIRFSOC_RX_DMA_BUF_SIZE (1024 * 32)
-#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
-/* Uart Fifo Level Chk */
-#define SIRFUART_TX_FIFO_SC_OFFSET 0
-#define SIRFUART_TX_FIFO_LC_OFFSET 10
-#define SIRFUART_TX_FIFO_HC_OFFSET 20
-#define SIRFUART_TX_FIFO_CHK_SC(line, value) ((((line) == 1) ? (value & 0x3) :\
- (value & 0x1f)) << SIRFUART_TX_FIFO_SC_OFFSET)
-#define SIRFUART_TX_FIFO_CHK_LC(line, value) ((((line) == 1) ? (value & 0x3) :\
- (value & 0x1f)) << SIRFUART_TX_FIFO_LC_OFFSET)
-#define SIRFUART_TX_FIFO_CHK_HC(line, value) ((((line) == 1) ? (value & 0x3) :\
- (value & 0x1f)) << SIRFUART_TX_FIFO_HC_OFFSET)
-
-#define SIRFUART_RX_FIFO_CHK_SC SIRFUART_TX_FIFO_CHK_SC
-#define SIRFUART_RX_FIFO_CHK_LC SIRFUART_TX_FIFO_CHK_LC
-#define SIRFUART_RX_FIFO_CHK_HC SIRFUART_TX_FIFO_CHK_HC
-#define SIRFUART_RX_FIFO_MASK 0x7f
-/* Indicate how many buffers used */
-
-/* For Fast Baud Rate Calculation */
-struct sirfsoc_baudrate_to_regv {
- unsigned int baud_rate;
- unsigned int reg_val;
-};
-
-enum sirfsoc_tx_state {
- TX_DMA_IDLE,
- TX_DMA_RUNNING,
- TX_DMA_PAUSE,
-};
-
-struct sirfsoc_rx_buffer {
- struct circ_buf xmit;
- dma_cookie_t cookie;
- struct dma_async_tx_descriptor *desc;
- dma_addr_t dma_addr;
-};
-
-struct sirfsoc_uart_port {
- bool hw_flow_ctrl;
- bool ms_enabled;
-
- struct uart_port port;
- struct clk *clk;
- /* for SiRFatlas7, there are SET/CLR for UART_INT_EN */
- bool is_atlas7;
- struct sirfsoc_uart_register *uart_reg;
- struct dma_chan *rx_dma_chan;
- struct dma_chan *tx_dma_chan;
- dma_addr_t tx_dma_addr;
- struct dma_async_tx_descriptor *tx_dma_desc;
- unsigned long transfer_size;
- enum sirfsoc_tx_state tx_dma_state;
- unsigned int cts_gpio;
- unsigned int rts_gpio;
-
- struct sirfsoc_rx_buffer rx_dma_items;
- struct hrtimer hrt;
- bool is_hrt_enabled;
- unsigned long rx_period_time;
- unsigned long rx_last_pos;
- unsigned long pio_fetch_cnt;
-};
-
-/* Register Access Control */
-#define portaddr(port, reg) ((port)->membase + (reg))
-#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
-#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
-
-/* UART Port Mask */
-#define SIRFUART_FIFOLEVEL_MASK(port) ((port->fifosize - 1) & 0xFFF)
-#define SIRFUART_FIFOFULL_MASK(port) (port->fifosize & 0xFFF)
-#define SIRFUART_FIFOEMPTY_MASK(port) ((port->fifosize & 0xFFF) << 1)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index f4de32d3f2af..b3675cf25a69 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -3,7 +3,8 @@
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
- * Gerald Baeza <gerald.baeza@st.com>
+ * Gerald Baeza <gerald.baeza@foss.st.com>
+ * Erwan Le Ray <erwan.leray@foss.st.com>
*
* Inspired by st-asc.c from STMicroelectronics (c)
*/
@@ -34,15 +35,15 @@
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
-static void stm32_stop_tx(struct uart_port *port);
-static void stm32_transmit_chars(struct uart_port *port);
+static void stm32_usart_stop_tx(struct uart_port *port);
+static void stm32_usart_transmit_chars(struct uart_port *port);
static inline struct stm32_port *to_stm32_port(struct uart_port *port)
{
return container_of(port, struct stm32_port, port);
}
-static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
+static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
{
u32 val;
@@ -51,7 +52,7 @@ static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
writel_relaxed(val, port->membase + reg);
}
-static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
+static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
{
u32 val;
@@ -60,8 +61,8 @@ static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
writel_relaxed(val, port->membase + reg);
}
-static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
- u32 delay_DDE, u32 baud)
+static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
+ u32 delay_DDE, u32 baud)
{
u32 rs485_deat_dedt;
u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
@@ -95,16 +96,16 @@ static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr1 |= rs485_deat_dedt;
}
-static int stm32_config_rs485(struct uart_port *port,
- struct serial_rs485 *rs485conf)
+static int stm32_usart_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 usartdiv, baud, cr1, cr3;
bool over8;
- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
port->rs485 = *rs485conf;
@@ -122,9 +123,10 @@ static int stm32_config_rs485(struct uart_port *port,
<< USART_BRR_04_R_SHIFT;
baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
- stm32_config_reg_rs485(&cr1, &cr3,
- rs485conf->delay_rts_before_send,
- rs485conf->delay_rts_after_send, baud);
+ stm32_usart_config_reg_rs485(&cr1, &cr3,
+ rs485conf->delay_rts_before_send,
+ rs485conf->delay_rts_after_send,
+ baud);
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
cr3 &= ~USART_CR3_DEP;
@@ -137,18 +139,19 @@ static int stm32_config_rs485(struct uart_port *port,
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
} else {
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DEM | USART_CR3_DEP);
- stm32_clr_bits(port, ofs->cr1,
- USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+ stm32_usart_clr_bits(port, ofs->cr3,
+ USART_CR3_DEM | USART_CR3_DEP);
+ stm32_usart_clr_bits(port, ofs->cr1,
+ USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
}
- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
return 0;
}
-static int stm32_init_rs485(struct uart_port *port,
- struct platform_device *pdev)
+static int stm32_usart_init_rs485(struct uart_port *port,
+ struct platform_device *pdev)
{
struct serial_rs485 *rs485conf = &port->rs485;
@@ -162,11 +165,11 @@ static int stm32_init_rs485(struct uart_port *port,
return uart_get_rs485_mode(port);
}
-static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
- bool threaded)
+static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
+ int *last_res, bool threaded)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
enum dma_status status;
struct dma_tx_state state;
@@ -176,8 +179,7 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
&state);
- if ((status == DMA_IN_PROGRESS) &&
- (*last_res != state.residue))
+ if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
return 1;
else
return 0;
@@ -187,11 +189,11 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
return 0;
}
-static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
- int *last_res)
+static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
+ int *last_res)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
if (stm32_port->rx_ch) {
@@ -207,11 +209,11 @@ static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
return c;
}
-static void stm32_receive_chars(struct uart_port *port, bool threaded)
+static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
{
struct tty_port *tport = &port->state->port;
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
u32 sr;
char flag;
@@ -219,7 +221,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
- while (stm32_pending_rx(port, &sr, &stm32_port->last_res, threaded)) {
+ while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+ threaded)) {
sr |= USART_SR_DUMMY_RX;
flag = TTY_NORMAL;
@@ -238,7 +241,7 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
writel_relaxed(sr & USART_SR_ERR_MASK,
port->membase + ofs->icr);
- c = stm32_get_char(port, &sr, &stm32_port->last_res);
+ c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
port->icount.rx++;
if (sr & USART_SR_ERR_MASK) {
if (sr & USART_SR_ORE) {
@@ -278,53 +281,53 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
spin_lock(&port->lock);
}
-static void stm32_tx_dma_complete(void *arg)
+static void stm32_usart_tx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct stm32_port *stm32port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
stm32port->tx_dma_busy = false;
/* Let's see if we have pending data to send */
- stm32_transmit_chars(port);
+ stm32_usart_transmit_chars(port);
}
-static void stm32_tx_interrupt_enable(struct uart_port *port)
+static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
/*
* Enables TX FIFO threashold irq when FIFO is enabled,
* or TX empty irq when FIFO is disabled
*/
if (stm32_port->fifoen)
- stm32_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
else
- stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
-static void stm32_tx_interrupt_disable(struct uart_port *port)
+static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (stm32_port->fifoen)
- stm32_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
else
- stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
-static void stm32_transmit_chars_pio(struct uart_port *port)
+static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
if (stm32_port->tx_dma_busy) {
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
stm32_port->tx_dma_busy = false;
}
@@ -339,15 +342,15 @@ static void stm32_transmit_chars_pio(struct uart_port *port)
/* rely on TXE irq (mask or unmask) for sending remaining data */
if (uart_circ_empty(xmit))
- stm32_tx_interrupt_disable(port);
+ stm32_usart_tx_interrupt_disable(port);
else
- stm32_tx_interrupt_enable(port);
+ stm32_usart_tx_interrupt_enable(port);
}
-static void stm32_transmit_chars_dma(struct uart_port *port)
+static void stm32_usart_transmit_chars_dma(struct uart_port *port)
{
struct stm32_port *stm32port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
struct dma_async_tx_descriptor *desc = NULL;
unsigned int count, i;
@@ -383,71 +386,77 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
- if (!desc) {
- for (i = count; i > 0; i--)
- stm32_transmit_chars_pio(port);
- return;
- }
+ if (!desc)
+ goto fallback_err;
- desc->callback = stm32_tx_dma_complete;
+ desc->callback = stm32_usart_tx_dma_complete;
desc->callback_param = port;
/* Push current DMA TX transaction in the pending queue */
- dmaengine_submit(desc);
+ if (dma_submit_error(dmaengine_submit(desc))) {
+ /* dma no yet started, safe to free resources */
+ dmaengine_terminate_async(stm32port->tx_ch);
+ goto fallback_err;
+ }
/* Issue pending DMA TX requests */
dma_async_issue_pending(stm32port->tx_ch);
- stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
port->icount.tx += count;
+ return;
+
+fallback_err:
+ for (i = count; i > 0; i--)
+ stm32_usart_transmit_chars_pio(port);
}
-static void stm32_transmit_chars(struct uart_port *port)
+static void stm32_usart_transmit_chars(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
if (stm32_port->tx_dma_busy)
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
if (stm32_port->tx_dma_busy)
- stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- stm32_tx_interrupt_disable(port);
+ stm32_usart_tx_interrupt_disable(port);
return;
}
if (ofs->icr == UNDEF_REG)
- stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+ stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
else
writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
if (stm32_port->tx_ch)
- stm32_transmit_chars_dma(port);
+ stm32_usart_transmit_chars_dma(port);
else
- stm32_transmit_chars_pio(port);
+ stm32_usart_transmit_chars_pio(port);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
- stm32_tx_interrupt_disable(port);
+ stm32_usart_tx_interrupt_disable(port);
}
-static irqreturn_t stm32_interrupt(int irq, void *ptr)
+static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
spin_lock(&port->lock);
@@ -458,15 +467,15 @@ static irqreturn_t stm32_interrupt(int irq, void *ptr)
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
- if ((sr & USART_SR_WUF) && (ofs->icr != UNDEF_REG))
+ if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
writel_relaxed(USART_ICR_WUCF,
port->membase + ofs->icr);
if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
- stm32_receive_chars(port, false);
+ stm32_usart_receive_chars(port, false);
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
- stm32_transmit_chars(port);
+ stm32_usart_transmit_chars(port);
spin_unlock(&port->lock);
@@ -476,7 +485,7 @@ static irqreturn_t stm32_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
-static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
+static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -484,35 +493,35 @@ static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
spin_lock(&port->lock);
if (stm32_port->rx_ch)
- stm32_receive_chars(port, true);
+ stm32_usart_receive_chars(port, true);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
-static unsigned int stm32_tx_empty(struct uart_port *port)
+static unsigned int stm32_usart_tx_empty(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
}
-static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
+static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
- stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
else
- stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
mctrl_gpio_set(stm32_port->gpios, mctrl);
}
-static unsigned int stm32_get_mctrl(struct uart_port *port)
+static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
unsigned int ret;
@@ -523,23 +532,23 @@ static unsigned int stm32_get_mctrl(struct uart_port *port)
return mctrl_gpio_get(stm32_port->gpios, &ret);
}
-static void stm32_enable_ms(struct uart_port *port)
+static void stm32_usart_enable_ms(struct uart_port *port)
{
mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
}
-static void stm32_disable_ms(struct uart_port *port)
+static void stm32_usart_disable_ms(struct uart_port *port)
{
mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
}
/* Transmit stop */
-static void stm32_stop_tx(struct uart_port *port)
+static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
- stm32_tx_interrupt_disable(port);
+ stm32_usart_tx_interrupt_disable(port);
if (rs485conf->flags & SER_RS485_ENABLED) {
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
@@ -553,7 +562,7 @@ static void stm32_stop_tx(struct uart_port *port)
}
/* There are probably characters waiting to be transmitted. */
-static void stm32_start_tx(struct uart_port *port)
+static void stm32_usart_start_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
@@ -572,73 +581,72 @@ static void stm32_start_tx(struct uart_port *port)
}
}
- stm32_transmit_chars(port);
+ stm32_usart_transmit_chars(port);
}
/* Throttle the remote when input buffer is about to overflow. */
-static void stm32_throttle(struct uart_port *port)
+static void stm32_usart_throttle(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
- stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Unthrottle the remote, the input buffer can now accept data. */
-static void stm32_unthrottle(struct uart_port *port)
+static void stm32_usart_unthrottle(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- stm32_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
- stm32_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Receive stop */
-static void stm32_stop_rx(struct uart_port *port)
+static void stm32_usart_stop_rx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
- stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
-
+ stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
}
/* Handle breaks - ignored by us */
-static void stm32_break_ctl(struct uart_port *port, int break_state)
+static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
}
-static int stm32_startup(struct uart_port *port)
+static int stm32_usart_startup(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const char *name = to_platform_device(port->dev)->name;
u32 val;
int ret;
- ret = request_threaded_irq(port->irq, stm32_interrupt,
- stm32_threaded_interrupt,
+ ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
+ stm32_usart_threaded_interrupt,
IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
- stm32_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
+ stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
/* Tx and RX FIFO configuration */
if (stm32_port->fifoen) {
@@ -653,21 +661,21 @@ static int stm32_startup(struct uart_port *port)
val = stm32_port->cr1_irq | USART_CR1_RE;
if (stm32_port->fifoen)
val |= USART_CR1_FIFOEN;
- stm32_set_bits(port, ofs->cr1, val);
+ stm32_usart_set_bits(port, ofs->cr1, val);
return 0;
}
-static void stm32_shutdown(struct uart_port *port)
+static void stm32_usart_shutdown(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 val, isr;
int ret;
/* Disable modem control interrupts */
- stm32_disable_ms(port);
+ stm32_usart_disable_ms(port);
val = USART_CR1_TXEIE | USART_CR1_TE;
val |= stm32_port->cr1_irq | USART_CR1_RE;
@@ -679,15 +687,16 @@ static void stm32_shutdown(struct uart_port *port)
isr, (isr & USART_SR_TC),
10, 100000);
+ /* Send the TC error message only when ISR_TC is not set */
if (ret)
- dev_err(port->dev, "transmission complete not set\n");
+ dev_err(port->dev, "Transmission is not complete\n");
- stm32_clr_bits(port, ofs->cr1, val);
+ stm32_usart_clr_bits(port, ofs->cr1, val);
free_irq(port->irq, port);
}
-static unsigned int stm32_get_databits(struct ktermios *termios)
+static unsigned int stm32_usart_get_databits(struct ktermios *termios)
{
unsigned int bits;
@@ -717,12 +726,13 @@ static unsigned int stm32_get_databits(struct ktermios *termios)
return bits;
}
-static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void stm32_usart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
struct serial_rs485 *rs485conf = &port->rs485;
unsigned int baud, bits;
u32 usartdiv, mantissa, fraction, oversampling;
@@ -742,8 +752,8 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
/* flush RX & TX FIFO */
if (ofs->rqr != UNDEF_REG)
- stm32_set_bits(port, ofs->rqr,
- USART_RQR_TXFRQ | USART_RQR_RXFRQ);
+ stm32_usart_set_bits(port, ofs->rqr,
+ USART_RQR_TXFRQ | USART_RQR_RXFRQ);
cr1 = USART_CR1_TE | USART_CR1_RE;
if (stm32_port->fifoen)
@@ -756,7 +766,7 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
if (cflag & CSTOPB)
cr2 |= USART_CR2_STOP_2B;
- bits = stm32_get_databits(termios);
+ bits = stm32_usart_get_databits(termios);
stm32_port->rdr_mask = (BIT(bits) - 1);
if (cflag & PARENB) {
@@ -809,9 +819,9 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
/* Handle modem control interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
- stm32_enable_ms(port);
+ stm32_usart_enable_ms(port);
else
- stm32_disable_ms(port);
+ stm32_usart_disable_ms(port);
usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
@@ -824,11 +834,11 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
if (usartdiv < 16) {
oversampling = 8;
cr1 |= USART_CR1_OVER8;
- stm32_set_bits(port, ofs->cr1, USART_CR1_OVER8);
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
} else {
oversampling = 16;
cr1 &= ~USART_CR1_OVER8;
- stm32_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
}
mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
@@ -865,9 +875,10 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
cr3 |= USART_CR3_DMAR;
if (rs485conf->flags & SER_RS485_ENABLED) {
- stm32_config_reg_rs485(&cr1, &cr3,
- rs485conf->delay_rts_before_send,
- rs485conf->delay_rts_after_send, baud);
+ stm32_usart_config_reg_rs485(&cr1, &cr3,
+ rs485conf->delay_rts_before_send,
+ rs485conf->delay_rts_after_send,
+ baud);
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
cr3 &= ~USART_CR3_DEP;
rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
@@ -885,44 +896,44 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
writel_relaxed(cr2, port->membase + ofs->cr2);
writel_relaxed(cr1, port->membase + ofs->cr1);
- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
}
-static const char *stm32_type(struct uart_port *port)
+static const char *stm32_usart_type(struct uart_port *port)
{
return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
}
-static void stm32_release_port(struct uart_port *port)
+static void stm32_usart_release_port(struct uart_port *port)
{
}
-static int stm32_request_port(struct uart_port *port)
+static int stm32_usart_request_port(struct uart_port *port)
{
return 0;
}
-static void stm32_config_port(struct uart_port *port, int flags)
+static void stm32_usart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_STM32;
}
static int
-stm32_verify_port(struct uart_port *port, struct serial_struct *ser)
+stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* No user changeable parameters */
return -EINVAL;
}
-static void stm32_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
+static void stm32_usart_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
{
struct stm32_port *stm32port = container_of(port,
struct stm32_port, port);
- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
- struct stm32_usart_config *cfg = &stm32port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32port->info->cfg;
unsigned long flags = 0;
switch (state) {
@@ -931,7 +942,7 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
break;
case UART_PM_STATE_OFF:
spin_lock_irqsave(&port->lock, flags);
- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
pm_runtime_put_sync(port->dev);
break;
@@ -939,33 +950,42 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
}
static const struct uart_ops stm32_uart_ops = {
- .tx_empty = stm32_tx_empty,
- .set_mctrl = stm32_set_mctrl,
- .get_mctrl = stm32_get_mctrl,
- .stop_tx = stm32_stop_tx,
- .start_tx = stm32_start_tx,
- .throttle = stm32_throttle,
- .unthrottle = stm32_unthrottle,
- .stop_rx = stm32_stop_rx,
- .enable_ms = stm32_enable_ms,
- .break_ctl = stm32_break_ctl,
- .startup = stm32_startup,
- .shutdown = stm32_shutdown,
- .set_termios = stm32_set_termios,
- .pm = stm32_pm,
- .type = stm32_type,
- .release_port = stm32_release_port,
- .request_port = stm32_request_port,
- .config_port = stm32_config_port,
- .verify_port = stm32_verify_port,
+ .tx_empty = stm32_usart_tx_empty,
+ .set_mctrl = stm32_usart_set_mctrl,
+ .get_mctrl = stm32_usart_get_mctrl,
+ .stop_tx = stm32_usart_stop_tx,
+ .start_tx = stm32_usart_start_tx,
+ .throttle = stm32_usart_throttle,
+ .unthrottle = stm32_usart_unthrottle,
+ .stop_rx = stm32_usart_stop_rx,
+ .enable_ms = stm32_usart_enable_ms,
+ .break_ctl = stm32_usart_break_ctl,
+ .startup = stm32_usart_startup,
+ .shutdown = stm32_usart_shutdown,
+ .set_termios = stm32_usart_set_termios,
+ .pm = stm32_usart_pm,
+ .type = stm32_usart_type,
+ .release_port = stm32_usart_release_port,
+ .request_port = stm32_usart_request_port,
+ .config_port = stm32_usart_config_port,
+ .verify_port = stm32_usart_verify_port,
};
-static int stm32_init_port(struct stm32_port *stm32port,
- struct platform_device *pdev)
+static void stm32_usart_deinit_port(struct stm32_port *stm32port)
+{
+ clk_disable_unprepare(stm32port->clk);
+}
+
+static int stm32_usart_init_port(struct stm32_port *stm32port,
+ struct platform_device *pdev)
{
struct uart_port *port = &stm32port->port;
struct resource *res;
- int ret;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq ? : -ENODEV;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
@@ -973,15 +993,10 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->dev = &pdev->dev;
port->fifosize = stm32port->info->cfg.fifosize;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
+ port->irq = irq;
+ port->rs485_config = stm32_usart_config_rs485;
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0)
- return ret ? : -ENODEV;
- port->irq = ret;
-
- port->rs485_config = stm32_config_rs485;
-
- ret = stm32_init_rs485(port, pdev);
+ ret = stm32_usart_init_rs485(port, pdev);
if (ret)
return ret;
@@ -1022,7 +1037,10 @@ static int stm32_init_port(struct stm32_port *stm32port,
goto err_clk;
}
- /* Both CTS/RTS gpios and "st,hw-flow-ctrl" should not be specified */
+ /*
+ * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
+ * properties should not be specified.
+ */
if (stm32port->hw_flow_control) {
if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
@@ -1040,7 +1058,7 @@ err_clk:
return ret;
}
-static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
+static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int id;
@@ -1078,10 +1096,10 @@ static const struct of_device_id stm32_match[] = {
MODULE_DEVICE_TABLE(of, stm32_match);
#endif
-static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
- struct platform_device *pdev)
+static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
+ struct platform_device *pdev)
{
- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct uart_port *port = &stm32port->port;
struct device *dev = &pdev->dev;
struct dma_slave_config config;
@@ -1095,8 +1113,8 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
return -ENODEV;
}
stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
- &stm32port->rx_dma_buf,
- GFP_KERNEL);
+ &stm32port->rx_dma_buf,
+ GFP_KERNEL);
if (!stm32port->rx_buf) {
ret = -ENOMEM;
goto alloc_err;
@@ -1130,7 +1148,11 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
desc->callback_param = NULL;
/* Push current DMA transaction in the pending queue */
- dmaengine_submit(desc);
+ ret = dma_submit_error(dmaengine_submit(desc));
+ if (ret) {
+ dmaengine_terminate_sync(stm32port->rx_ch);
+ goto config_err;
+ }
/* Issue pending DMA requests */
dma_async_issue_pending(stm32port->rx_ch);
@@ -1149,10 +1171,10 @@ alloc_err:
return ret;
}
-static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
- struct platform_device *pdev)
+static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
+ struct platform_device *pdev)
{
- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct uart_port *port = &stm32port->port;
struct device *dev = &pdev->dev;
struct dma_slave_config config;
@@ -1167,8 +1189,8 @@ static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
return -ENODEV;
}
stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
- &stm32port->tx_dma_buf,
- GFP_KERNEL);
+ &stm32port->tx_dma_buf,
+ GFP_KERNEL);
if (!stm32port->tx_buf) {
ret = -ENOMEM;
goto alloc_err;
@@ -1200,23 +1222,20 @@ alloc_err:
return ret;
}
-static int stm32_serial_probe(struct platform_device *pdev)
+static int stm32_usart_serial_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct stm32_port *stm32port;
int ret;
- stm32port = stm32_of_get_stm32_port(pdev);
+ stm32port = stm32_usart_of_get_port(pdev);
if (!stm32port)
return -ENODEV;
- match = of_match_device(stm32_match, &pdev->dev);
- if (match && match->data)
- stm32port->info = (struct stm32_usart_info *)match->data;
- else
+ stm32port->info = of_device_get_match_data(&pdev->dev);
+ if (!stm32port->info)
return -EINVAL;
- ret = stm32_init_port(stm32port, pdev);
+ ret = stm32_usart_init_port(stm32port, pdev);
if (ret)
return ret;
@@ -1237,11 +1256,11 @@ static int stm32_serial_probe(struct platform_device *pdev)
if (ret)
goto err_wirq;
- ret = stm32_of_dma_rx_probe(stm32port, pdev);
+ ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
if (ret)
dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
- ret = stm32_of_dma_tx_probe(stm32port, pdev);
+ ret = stm32_usart_of_dma_tx_probe(stm32port, pdev);
if (ret)
dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
@@ -1263,21 +1282,21 @@ err_nowup:
device_init_wakeup(&pdev->dev, false);
err_uninit:
- clk_disable_unprepare(stm32port->clk);
+ stm32_usart_deinit_port(stm32port);
return ret;
}
-static int stm32_serial_remove(struct platform_device *pdev)
+static int stm32_usart_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
int err;
pm_runtime_get_sync(&pdev->dev);
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
if (stm32_port->rx_ch)
dma_release_channel(stm32_port->rx_ch);
@@ -1287,7 +1306,7 @@ static int stm32_serial_remove(struct platform_device *pdev)
RX_BUF_L, stm32_port->rx_buf,
stm32_port->rx_dma_buf);
- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
if (stm32_port->tx_ch)
dma_release_channel(stm32_port->tx_ch);
@@ -1302,7 +1321,7 @@ static int stm32_serial_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
}
- clk_disable_unprepare(stm32_port->clk);
+ stm32_usart_deinit_port(stm32_port);
err = uart_remove_one_port(&stm32_usart_driver, port);
@@ -1312,12 +1331,11 @@ static int stm32_serial_remove(struct platform_device *pdev)
return err;
}
-
#ifdef CONFIG_SERIAL_STM32_CONSOLE
-static void stm32_console_putchar(struct uart_port *port, int ch)
+static void stm32_usart_console_putchar(struct uart_port *port, int ch)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
cpu_relax();
@@ -1325,12 +1343,13 @@ static void stm32_console_putchar(struct uart_port *port, int ch)
writel_relaxed(ch, port->membase + ofs->tdr);
}
-static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
+static void stm32_usart_console_write(struct console *co, const char *s,
+ unsigned int cnt)
{
struct uart_port *port = &stm32_ports[co->index].port;
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
unsigned long flags;
u32 old_cr1, new_cr1;
int locked = 1;
@@ -1349,7 +1368,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
writel_relaxed(new_cr1, port->membase + ofs->cr1);
- uart_console_write(port, s, cnt, stm32_console_putchar);
+ uart_console_write(port, s, cnt, stm32_usart_console_putchar);
/* Restore interrupt state */
writel_relaxed(old_cr1, port->membase + ofs->cr1);
@@ -1359,7 +1378,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
local_irq_restore(flags);
}
-static int stm32_console_setup(struct console *co, char *options)
+static int stm32_usart_console_setup(struct console *co, char *options)
{
struct stm32_port *stm32port;
int baud = 9600;
@@ -1378,7 +1397,7 @@ static int stm32_console_setup(struct console *co, char *options)
* this to be called during the uart port registration when the
* driver gets probed and the port should be mapped at that point.
*/
- if (stm32port->port.mapbase == 0 || stm32port->port.membase == NULL)
+ if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
return -ENXIO;
if (options)
@@ -1390,8 +1409,8 @@ static int stm32_console_setup(struct console *co, char *options)
static struct console stm32_console = {
.name = STM32_SERIAL_NAME,
.device = uart_console_device,
- .write = stm32_console_write,
- .setup = stm32_console_setup,
+ .write = stm32_usart_console_write,
+ .setup = stm32_usart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &stm32_usart_driver,
@@ -1412,41 +1431,41 @@ static struct uart_driver stm32_usart_driver = {
.cons = STM32_SERIAL_CONSOLE,
};
-static void __maybe_unused stm32_serial_enable_wakeup(struct uart_port *port,
- bool enable)
+static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
+ bool enable)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 val;
if (stm32_port->wakeirq <= 0)
return;
if (enable) {
- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- stm32_set_bits(port, ofs->cr1, USART_CR1_UESM);
+ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
val = readl_relaxed(port->membase + ofs->cr3);
val &= ~USART_CR3_WUS_MASK;
/* Enable Wake up interrupt from low power on start bit */
val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
writel_relaxed(val, port->membase + ofs->cr3);
- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
} else {
- stm32_clr_bits(port, ofs->cr1, USART_CR1_UESM);
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
}
}
-static int __maybe_unused stm32_serial_suspend(struct device *dev)
+static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
uart_suspend_port(&stm32_usart_driver, port);
if (device_may_wakeup(dev))
- stm32_serial_enable_wakeup(port, true);
+ stm32_usart_serial_en_wakeup(port, true);
else
- stm32_serial_enable_wakeup(port, false);
+ stm32_usart_serial_en_wakeup(port, false);
/*
* When "no_console_suspend" is enabled, keep the pinctrl default state
@@ -1464,19 +1483,19 @@ static int __maybe_unused stm32_serial_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused stm32_serial_resume(struct device *dev)
+static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
if (device_may_wakeup(dev))
- stm32_serial_enable_wakeup(port, false);
+ stm32_usart_serial_en_wakeup(port, false);
return uart_resume_port(&stm32_usart_driver, port);
}
-static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct stm32_port *stm32port = container_of(port,
@@ -1487,7 +1506,7 @@ static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
+static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct stm32_port *stm32port = container_of(port,
@@ -1497,14 +1516,15 @@ static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops stm32_serial_pm_ops = {
- SET_RUNTIME_PM_OPS(stm32_serial_runtime_suspend,
- stm32_serial_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(stm32_serial_suspend, stm32_serial_resume)
+ SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
+ stm32_usart_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
+ stm32_usart_serial_resume)
};
static struct platform_driver stm32_serial_driver = {
- .probe = stm32_serial_probe,
- .remove = stm32_serial_remove,
+ .probe = stm32_usart_serial_probe,
+ .remove = stm32_usart_serial_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_serial_pm_ops,
@@ -1512,7 +1532,7 @@ static struct platform_driver stm32_serial_driver = {
},
};
-static int __init usart_init(void)
+static int __init stm32_usart_init(void)
{
static char banner[] __initdata = "STM32 USART driver initialized";
int ret;
@@ -1530,14 +1550,14 @@ static int __init usart_init(void)
return ret;
}
-static void __exit usart_exit(void)
+static void __exit stm32_usart_exit(void)
{
platform_driver_unregister(&stm32_serial_driver);
uart_unregister_driver(&stm32_usart_driver);
}
-module_init(usart_init);
-module_exit(usart_exit);
+module_init(stm32_usart_init);
+module_exit(stm32_usart_exit);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index d4c916e78d40..cb4f327c46db 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -259,7 +259,7 @@ struct stm32_usart_info stm32h7_info = {
struct stm32_port {
struct uart_port port;
struct clk *clk;
- struct stm32_usart_info *info;
+ const struct stm32_usart_info *info;
struct dma_chan *rx_ch; /* dma rx channel */
dma_addr_t rx_dma_buf; /* dma rx buffer bus address */
unsigned char *rx_buf; /* dma rx buffer cpu address */
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index c0b384e3ed4d..644173786bf0 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -672,7 +672,6 @@ static int open(struct tty_struct *tty, struct file *filp)
DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
mutex_lock(&info->port.mutex);
- info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8034489337d7..74733ec8f565 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -142,13 +142,10 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
/* Mutex to protect creating and releasing a tty */
DEFINE_MUTEX(tty_mutex);
-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -432,14 +429,12 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
EXPORT_SYMBOL_GPL(tty_find_polling_driver);
#endif
-static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
{
return 0;
}
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
@@ -477,8 +472,10 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tty_fops = {
.llseek = no_llseek,
- .read = tty_read,
- .write = tty_write,
+ .read_iter = tty_read,
+ .write_iter = tty_write,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -490,8 +487,10 @@ static const struct file_operations tty_fops = {
static const struct file_operations console_fops = {
.llseek = no_llseek,
- .read = tty_read,
- .write = redirected_tty_write,
+ .read_iter = tty_read,
+ .write_iter = redirected_tty_write,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -502,8 +501,8 @@ static const struct file_operations console_fops = {
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
- .read = hung_up_tty_read,
- .write = hung_up_tty_write,
+ .read_iter = hung_up_tty_read,
+ .write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
@@ -541,6 +540,29 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
+ * tty_release_redirect - Release a redirect on a pty if present
+ * @tty: tty device
+ *
+ * This is available to the pty code so if the master closes, if the
+ * slave is a redirect it can release the redirect. It returns the
+ * filp for the redirect, which must be fput when the operations on
+ * the tty are completed.
+ */
+struct file *tty_release_redirect(struct tty_struct *tty)
+{
+ struct file *f = NULL;
+
+ spin_lock(&redirect_lock);
+ if (redirect && file_tty(redirect) == tty) {
+ f = redirect;
+ redirect = NULL;
+ }
+ spin_unlock(&redirect_lock);
+
+ return f;
+}
+
+/**
* __tty_hangup - actual handler for hangup events
* @tty: tty device
* @exit_session: if non-zero, signal all foreground group processes
@@ -566,7 +588,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
static void __tty_hangup(struct tty_struct *tty, int exit_session)
{
struct file *cons_filp = NULL;
- struct file *filp, *f = NULL;
+ struct file *filp, *f;
struct tty_file_private *priv;
int closecount = 0, n;
int refs;
@@ -574,13 +596,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
if (!tty)
return;
-
- spin_lock(&redirect_lock);
- if (redirect && file_tty(redirect) == tty) {
- f = redirect;
- redirect = NULL;
- }
- spin_unlock(&redirect_lock);
+ f = tty_release_redirect(tty);
tty_lock(tty);
@@ -606,9 +622,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
- if (filp->f_op->write == redirected_tty_write)
+ if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
- if (filp->f_op->write != tty_write)
+ if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
@@ -831,6 +847,72 @@ static void tty_update_time(struct timespec64 *time)
time->tv_sec = sec;
}
+/*
+ * Iterate on the ldisc ->read() function until we've gotten all
+ * the data the ldisc has for us.
+ *
+ * The "cookie" is something that the ldisc read function can fill
+ * in to let us know that there is more data to be had.
+ *
+ * We promise to continue to call the ldisc until it stops returning
+ * data or clears the cookie. The cookie may be something that the
+ * ldisc maintains state for and needs to free.
+ */
+static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+ struct file *file, struct iov_iter *to)
+{
+ int retval = 0;
+ void *cookie = NULL;
+ unsigned long offset = 0;
+ char kernel_buf[64];
+ size_t count = iov_iter_count(to);
+
+ do {
+ int size, copied;
+
+ size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
+ size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
+ if (!size)
+ break;
+
+ if (size < 0) {
+ /* Did we have an earlier error (ie -EFAULT)? */
+ if (retval)
+ break;
+ retval = size;
+
+ /*
+ * -EOVERFLOW means we didn't have enough space
+ * for a whole packet, and we shouldn't return
+ * a partial result.
+ */
+ if (retval == -EOVERFLOW)
+ offset = 0;
+ break;
+ }
+
+ copied = copy_to_iter(kernel_buf, size, to);
+ offset += copied;
+ count -= copied;
+
+ /*
+ * If the user copy failed, we still need to do another ->read()
+ * call if we had a cookie to let the ldisc clear up.
+ *
+ * But make sure size is zeroed.
+ */
+ if (unlikely(copied != size)) {
+ count = 0;
+ retval = -EFAULT;
+ }
+ } while (cookie);
+
+ /* We always clear tty buffer in case they contained passwords */
+ memzero_explicit(kernel_buf, sizeof(kernel_buf));
+ return offset ? offset : retval;
+}
+
+
/**
* tty_read - read method for tty device files
* @file: pointer to tty file
@@ -846,10 +928,10 @@ static void tty_update_time(struct timespec64 *time)
* read calls may be outstanding in parallel.
*/
-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
{
int i;
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
@@ -863,11 +945,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
situation */
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_read(file, buf, count, ppos);
+ return hung_up_tty_read(iocb, to);
+ i = -EIO;
if (ld->ops->read)
- i = ld->ops->read(tty, file, buf, count);
- else
- i = -EIO;
+ i = iterate_tty_read(ld, tty, file, to);
tty_ldisc_deref(ld);
if (i > 0)
@@ -901,9 +982,9 @@ static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
- const char __user *buf,
- size_t count)
+ struct iov_iter *from)
{
+ size_t count = iov_iter_count(from);
ssize_t ret, written = 0;
unsigned int chunk;
@@ -955,14 +1036,23 @@ static inline ssize_t do_tty_write(
size_t size = count;
if (size > chunk)
size = chunk;
+
ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
+ if (copy_from_iter(tty->write_buf, size, from) != size)
break;
+
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
+
written += ret;
- buf += ret;
+ if (ret > size)
+ break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
count -= ret;
if (!count)
break;
@@ -1022,8 +1112,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
@@ -1038,17 +1127,21 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_write(file, buf, count, ppos);
+ return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ ret = do_tty_write(ld->ops->write, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
@@ -1057,13 +1150,17 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
p = get_file(redirect);
spin_unlock(&redirect_lock);
+ /*
+ * We know the redirected tty is just another tty, we can can
+ * call file_tty_write() directly with that file pointer.
+ */
if (p) {
ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
+ res = file_tty_write(p, iocb, iter);
fput(p);
return res;
}
- return tty_write(file, buf, count, ppos);
+ return tty_write(iocb, iter);
}
/*
@@ -1875,22 +1972,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
return driver;
}
-/**
- * tty_kopen - open a tty device for kernel
- * @device: dev_t of device to open
- *
- * Opens tty exclusively for kernel. Performs the driver lookup,
- * makes sure it's not already opened and performs the first-time
- * tty initialization.
- *
- * Returns the locked initialized &tty_struct
- *
- * Claims the global tty_mutex to serialize:
- * - concurrent first-time tty initialization
- * - concurrent tty driver removal w/ lookup
- * - concurrent tty removal from driver table
- */
-struct tty_struct *tty_kopen(dev_t device)
+static struct tty_struct *tty_kopen(dev_t device, int shared)
{
struct tty_struct *tty;
struct tty_driver *driver;
@@ -1905,7 +1987,7 @@ struct tty_struct *tty_kopen(dev_t device)
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, NULL, index);
- if (IS_ERR(tty))
+ if (IS_ERR(tty) || shared)
goto out;
if (tty) {
@@ -1923,7 +2005,42 @@ out:
tty_driver_kref_put(driver);
return tty;
}
-EXPORT_SYMBOL_GPL(tty_kopen);
+
+/**
+ * tty_kopen_exclusive - open a tty device for kernel
+ * @device: dev_t of device to open
+ *
+ * Opens tty exclusively for kernel. Performs the driver lookup,
+ * makes sure it's not already opened and performs the first-time
+ * tty initialization.
+ *
+ * Returns the locked initialized &tty_struct
+ *
+ * Claims the global tty_mutex to serialize:
+ * - concurrent first-time tty initialization
+ * - concurrent tty driver removal w/ lookup
+ * - concurrent tty removal from driver table
+ */
+struct tty_struct *tty_kopen_exclusive(dev_t device)
+{
+ return tty_kopen(device, 0);
+}
+EXPORT_SYMBOL_GPL(tty_kopen_exclusive);
+
+/**
+ * tty_kopen_shared - open a tty device for shared in-kernel use
+ * @device: dev_t of device to open
+ *
+ * Opens an already existing tty for in-kernel use. Compared to
+ * tty_kopen_exclusive() above it doesn't ensure to be the only user.
+ *
+ * Locking is identical to tty_kopen() above.
+ */
+struct tty_struct *tty_kopen_shared(dev_t device)
+{
+ return tty_kopen(device, 1);
+}
+EXPORT_SYMBOL_GPL(tty_kopen_shared);
/**
* tty_open_by_driver - open a tty device
@@ -2295,7 +2412,7 @@ static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
+ if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
@@ -2305,6 +2422,12 @@ static int tioccons(struct file *file)
fput(f);
return 0;
}
+ if (file->f_op->write_iter != tty_write)
+ return -ENOTTY;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
spin_lock(&redirect_lock);
if (redirect) {
spin_unlock(&redirect_lock);
@@ -2468,15 +2591,36 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
return tty->ops->tiocmset(tty, set, clear);
}
+/**
+ * tty_get_icount - get tty statistics
+ * @tty: tty device
+ * @icount: output parameter
+ *
+ * Gets a copy of the tty's icount statistics.
+ *
+ * Locking: none (up to the driver)
+ */
+int tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ memset(icount, 0, sizeof(*icount));
+
+ if (tty->ops->get_icount)
+ return tty->ops->get_icount(tty, icount);
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tty_get_icount);
+
static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
{
- int retval = -EINVAL;
struct serial_icounter_struct icount;
- memset(&icount, 0, sizeof(icount));
- if (tty->ops->get_icount)
- retval = tty->ops->get_icount(tty, &icount);
+ int retval;
+
+ retval = tty_get_icount(tty, &icount);
if (retval != 0)
return retval;
+
if (copy_to_user(arg, &icount, sizeof(icount)))
return -EFAULT;
return 0;
@@ -2867,7 +3011,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
static int this_tty(const void *t, struct file *file, unsigned fd)
{
- if (likely(file->f_op->read != tty_read))
+ if (likely(file->f_op->read_iter != tty_read))
return 0;
return file_tty(file) != t ? 0 : fd + 1;
}
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index eced70ec54e1..17f05b7eb6d3 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
-void __init register_ttynull_console(void)
-{
- if (!ttynull_driver)
- return;
-
- if (add_preferred_console(ttynull_console.name, 0, NULL))
- return;
-
- register_console(&ttynull_console);
-}
-
static int __init ttynull_init(void)
{
struct tty_driver *driver;
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 9ffd42e333b8..e2d6205f83ce 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -681,9 +681,6 @@ static int vcc_remove(struct vio_dev *vdev)
{
struct vcc_port *port = dev_get_drvdata(&vdev->dev);
- if (!port)
- return -ENODEV;
-
del_timer_sync(&port->rx_timer);
del_timer_sync(&port->tx_timer);
@@ -695,12 +692,9 @@ static int vcc_remove(struct vio_dev *vdev)
tty_vhangup(port->tty);
/* Get exclusive reference to VCC, ensures that there are no other
- * clients to this port
+ * clients to this port. This cannot fail.
*/
- port = vcc_get(port->index, true);
-
- if (WARN_ON(!port))
- return -ENODEV;
+ vcc_get(port->index, true);
tty_unregister_device(vcc_tty_driver, port->index);
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index f7d015c67963..d815ac98b39e 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -495,7 +495,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
p2[unicode & 0x3f] = fontpos;
- p->sum += (fontpos << 20) + unicode;
+ p->sum += (fontpos << 20U) + unicode;
return 0;
}
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index c7095fb7d2d1..094d95bf0005 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -6,7 +6,7 @@
#include <linux/keyboard.h>
#include <linux/kd.h>
-u_short plain_map[NR_KEYS] = {
+unsigned short plain_map[NR_KEYS] = {
0xf200, 0xf01b, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036,
0xf037, 0xf038, 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf07f, 0xf009,
0xfb71, 0xfb77, 0xfb65, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
@@ -25,7 +25,7 @@ u_short plain_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short shift_map[NR_KEYS] = {
+static unsigned short shift_map[NR_KEYS] = {
0xf200, 0xf01b, 0xf021, 0xf040, 0xf023, 0xf024, 0xf025, 0xf05e,
0xf026, 0xf02a, 0xf028, 0xf029, 0xf05f, 0xf02b, 0xf07f, 0xf009,
0xfb51, 0xfb57, 0xfb45, 0xfb52, 0xfb54, 0xfb59, 0xfb55, 0xfb49,
@@ -44,7 +44,7 @@ u_short shift_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short altgr_map[NR_KEYS] = {
+static unsigned short altgr_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf040, 0xf200, 0xf024, 0xf200, 0xf200,
0xf07b, 0xf05b, 0xf05d, 0xf07d, 0xf05c, 0xf200, 0xf200, 0xf200,
0xfb71, 0xfb77, 0xf918, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
@@ -63,7 +63,7 @@ u_short altgr_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short ctrl_map[NR_KEYS] = {
+static unsigned short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf000, 0xf01b, 0xf01c, 0xf01d, 0xf01e,
0xf01f, 0xf07f, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf008, 0xf200,
0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
@@ -82,7 +82,7 @@ u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short shift_ctrl_map[NR_KEYS] = {
+static unsigned short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf000, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf200, 0xf200,
0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
@@ -101,7 +101,7 @@ u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short alt_map[NR_KEYS] = {
+static unsigned short alt_map[NR_KEYS] = {
0xf200, 0xf81b, 0xf831, 0xf832, 0xf833, 0xf834, 0xf835, 0xf836,
0xf837, 0xf838, 0xf839, 0xf830, 0xf82d, 0xf83d, 0xf87f, 0xf809,
0xf871, 0xf877, 0xf865, 0xf872, 0xf874, 0xf879, 0xf875, 0xf869,
@@ -120,7 +120,7 @@ u_short alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-u_short ctrl_alt_map[NR_KEYS] = {
+static unsigned short ctrl_alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf811, 0xf817, 0xf805, 0xf812, 0xf814, 0xf819, 0xf815, 0xf809,
@@ -224,40 +224,40 @@ char *func_table[MAX_NR_FUNC] = {
};
struct kbdiacruc accent_table[MAX_DIACR] = {
- {'`', 'A', 0300}, {'`', 'a', 0340},
- {'\'', 'A', 0301}, {'\'', 'a', 0341},
- {'^', 'A', 0302}, {'^', 'a', 0342},
- {'~', 'A', 0303}, {'~', 'a', 0343},
- {'"', 'A', 0304}, {'"', 'a', 0344},
- {'O', 'A', 0305}, {'o', 'a', 0345},
- {'0', 'A', 0305}, {'0', 'a', 0345},
- {'A', 'A', 0305}, {'a', 'a', 0345},
- {'A', 'E', 0306}, {'a', 'e', 0346},
- {',', 'C', 0307}, {',', 'c', 0347},
- {'`', 'E', 0310}, {'`', 'e', 0350},
- {'\'', 'E', 0311}, {'\'', 'e', 0351},
- {'^', 'E', 0312}, {'^', 'e', 0352},
- {'"', 'E', 0313}, {'"', 'e', 0353},
- {'`', 'I', 0314}, {'`', 'i', 0354},
- {'\'', 'I', 0315}, {'\'', 'i', 0355},
- {'^', 'I', 0316}, {'^', 'i', 0356},
- {'"', 'I', 0317}, {'"', 'i', 0357},
- {'-', 'D', 0320}, {'-', 'd', 0360},
- {'~', 'N', 0321}, {'~', 'n', 0361},
- {'`', 'O', 0322}, {'`', 'o', 0362},
- {'\'', 'O', 0323}, {'\'', 'o', 0363},
- {'^', 'O', 0324}, {'^', 'o', 0364},
- {'~', 'O', 0325}, {'~', 'o', 0365},
- {'"', 'O', 0326}, {'"', 'o', 0366},
- {'/', 'O', 0330}, {'/', 'o', 0370},
- {'`', 'U', 0331}, {'`', 'u', 0371},
- {'\'', 'U', 0332}, {'\'', 'u', 0372},
- {'^', 'U', 0333}, {'^', 'u', 0373},
- {'"', 'U', 0334}, {'"', 'u', 0374},
- {'\'', 'Y', 0335}, {'\'', 'y', 0375},
- {'T', 'H', 0336}, {'t', 'h', 0376},
- {'s', 's', 0337}, {'"', 'y', 0377},
- {'s', 'z', 0337}, {'i', 'j', 0377},
+ {'`', 'A', 0x00c0}, {'`', 'a', 0x00e0},
+ {'\'', 'A', 0x00c1}, {'\'', 'a', 0x00e1},
+ {'^', 'A', 0x00c2}, {'^', 'a', 0x00e2},
+ {'~', 'A', 0x00c3}, {'~', 'a', 0x00e3},
+ {'"', 'A', 0x00c4}, {'"', 'a', 0x00e4},
+ {'O', 'A', 0x00c5}, {'o', 'a', 0x00e5},
+ {'0', 'A', 0x00c5}, {'0', 'a', 0x00e5},
+ {'A', 'A', 0x00c5}, {'a', 'a', 0x00e5},
+ {'A', 'E', 0x00c6}, {'a', 'e', 0x00e6},
+ {',', 'C', 0x00c7}, {',', 'c', 0x00e7},
+ {'`', 'E', 0x00c8}, {'`', 'e', 0x00e8},
+ {'\'', 'E', 0x00c9}, {'\'', 'e', 0x00e9},
+ {'^', 'E', 0x00ca}, {'^', 'e', 0x00ea},
+ {'"', 'E', 0x00cb}, {'"', 'e', 0x00eb},
+ {'`', 'I', 0x00cc}, {'`', 'i', 0x00ec},
+ {'\'', 'I', 0x00cd}, {'\'', 'i', 0x00ed},
+ {'^', 'I', 0x00ce}, {'^', 'i', 0x00ee},
+ {'"', 'I', 0x00cf}, {'"', 'i', 0x00ef},
+ {'-', 'D', 0x00d0}, {'-', 'd', 0x00f0},
+ {'~', 'N', 0x00d1}, {'~', 'n', 0x00f1},
+ {'`', 'O', 0x00d2}, {'`', 'o', 0x00f2},
+ {'\'', 'O', 0x00d3}, {'\'', 'o', 0x00f3},
+ {'^', 'O', 0x00d4}, {'^', 'o', 0x00f4},
+ {'~', 'O', 0x00d5}, {'~', 'o', 0x00f5},
+ {'"', 'O', 0x00d6}, {'"', 'o', 0x00f6},
+ {'/', 'O', 0x00d8}, {'/', 'o', 0x00f8},
+ {'`', 'U', 0x00d9}, {'`', 'u', 0x00f9},
+ {'\'', 'U', 0x00da}, {'\'', 'u', 0x00fa},
+ {'^', 'U', 0x00db}, {'^', 'u', 0x00fb},
+ {'"', 'U', 0x00dc}, {'"', 'u', 0x00fc},
+ {'\'', 'Y', 0x00dd}, {'\'', 'y', 0x00fd},
+ {'T', 'H', 0x00de}, {'t', 'h', 0x00fe},
+ {'s', 's', 0x00df}, {'"', 'y', 0x00ff},
+ {'s', 'z', 0x00df}, {'i', 'j', 0x00ff},
};
unsigned int accent_table_size = 68;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 52922d21a49f..77638629c562 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -131,6 +131,9 @@ static const unsigned char max_vals[] = {
static const int NR_TYPES = ARRAY_SIZE(max_vals);
+static void kbd_bh(struct tasklet_struct *unused);
+static DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh);
+
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock);
@@ -372,6 +375,12 @@ static void to_utf8(struct vc_data *vc, uint c)
}
}
+/* FIXME: review locking for vt.c callers */
+static void set_leds(void)
+{
+ tasklet_schedule(&keyboard_tasklet);
+}
+
/*
* Called after returning from RAW mode or when changing consoles - recompute
* shift_down[] and shift_state from key_down[] maybe called when keymap is
@@ -401,9 +410,12 @@ static void do_compute_shiftstate(void)
}
/* We still have to export this method to vt.c */
-void compute_shiftstate(void)
+void vt_set_leds_compute_shiftstate(void)
{
unsigned long flags;
+
+ set_leds();
+
spin_lock_irqsave(&kbd_event_lock, flags);
do_compute_shiftstate();
spin_unlock_irqrestore(&kbd_event_lock, flags);
@@ -1233,7 +1245,7 @@ void vt_kbd_con_stop(int console)
* handle the scenario when keyboard handler is not registered yet
* but we already getting updates from the VT to update led state.
*/
-static void kbd_bh(unsigned long dummy)
+static void kbd_bh(struct tasklet_struct *unused)
{
unsigned int leds;
unsigned long flags;
@@ -1249,8 +1261,6 @@ static void kbd_bh(unsigned long dummy)
}
}
-DECLARE_TASKLET_DISABLED_OLD(keyboard_tasklet, kbd_bh);
-
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index d04a162939a4..284b07224c55 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1036,8 +1036,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
}
set_cursor(vc);
if (is_switch) {
- set_leds();
- compute_shiftstate();
+ vt_set_leds_compute_shiftstate();
notify_update(vc);
}
}
@@ -4584,16 +4583,8 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
if (op->data && font.charcount > op->charcount)
rc = -ENOSPC;
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- if (font.width > op->width || font.height > op->height)
- rc = -ENOSPC;
- } else {
- if (font.width != 8)
- rc = -EIO;
- else if ((op->height && font.height > op->height) ||
- font.height > 32)
- rc = -ENOSPC;
- }
+ if (font.width > op->width || font.height > op->height)
+ rc = -ENOSPC;
if (rc)
goto out;
@@ -4621,7 +4612,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
return -EINVAL;
if (op->charcount > 512)
return -EINVAL;
- if (op->width <= 0 || op->width > 32 || op->height > 32)
+ if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32)
return -EINVAL;
size = (op->width+7)/8 * 32 * op->charcount;
if (size > max_font_size)
@@ -4631,31 +4622,6 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
if (IS_ERR(font.data))
return PTR_ERR(font.data);
- if (!op->height) { /* Need to guess font height [compat] */
- int h, i;
- u8 *charmap = font.data;
-
- /*
- * If from KDFONTOP ioctl, don't allow things which can be done
- * in userland,so that we can get rid of this soon
- */
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- kfree(font.data);
- return -EINVAL;
- }
-
- for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++)
- if (charmap[32*i+h-1])
- goto nonzero;
-
- kfree(font.data);
- return -EINVAL;
-
- nonzero:
- op->height = h;
- }
-
font.charcount = op->charcount;
font.width = op->width;
font.height = op->height;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 3813c40f1b48..89aeaf3c1bca 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -484,70 +484,6 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0;
}
-static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
- struct consolefontdesc __user *user_cfd,
- struct console_font_op *op)
-{
- struct consolefontdesc cfdarg;
- int i;
-
- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc)))
- return -EFAULT;
-
- switch (cmd) {
- case PIO_FONTX:
- op->op = KD_FONT_OP_SET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = cfdarg.chardata;
- return con_font_op(vc, op);
-
- case GIO_FONTX:
- op->op = KD_FONT_OP_GET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = cfdarg.chardata;
- i = con_font_op(vc, op);
- if (i)
- return i;
- cfdarg.charheight = op->height;
- cfdarg.charcount = op->charcount;
- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
- return -EFAULT;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
-{
- int ret;
-
- if (__is_defined(BROKEN_GRAPHICS_PROGRAMS)) {
- /*
- * With BROKEN_GRAPHICS_PROGRAMS defined, the default font is
- * not saved.
- */
- return -ENOSYS;
- }
-
- op->op = KD_FONT_OP_SET_DEFAULT;
- op->data = NULL;
- ret = con_font_op(vc, op);
- if (ret)
- return ret;
-
- console_lock();
- con_set_default_unimap(vc);
- console_unlock();
-
- return 0;
-}
-
static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
bool perm, struct vc_data *vc)
{
@@ -572,29 +508,7 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
bool perm)
{
- struct console_font_op op; /* used in multiple places here */
-
switch (cmd) {
- case PIO_FONT:
- if (!perm)
- return -EPERM;
- op.op = KD_FONT_OP_SET;
- op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
- op.width = 8;
- op.height = 0;
- op.charcount = 256;
- op.data = up;
- return con_font_op(vc, &op);
-
- case GIO_FONT:
- op.op = KD_FONT_OP_GET;
- op.flags = KD_FONT_FLAG_OLD;
- op.width = 8;
- op.height = 32;
- op.charcount = 256;
- op.data = up;
- return con_font_op(vc, &op);
-
case PIO_CMAP:
if (!perm)
return -EPERM;
@@ -603,20 +517,6 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
case GIO_CMAP:
return con_get_cmap(up);
- case PIO_FONTX:
- if (!perm)
- return -EPERM;
-
- fallthrough;
- case GIO_FONTX:
- return do_fontx_ioctl(vc, cmd, up, &op);
-
- case PIO_FONTRESET:
- if (!perm)
- return -EPERM;
-
- return vt_io_fontreset(vc, &op);
-
case PIO_SCRNMAP:
if (!perm)
return -EPERM;
@@ -1030,8 +930,7 @@ void reset_vc(struct vc_data *vc)
put_pid(vc->vt_pid);
vc->vt_pid = NULL;
vc->vt_newvt = -1;
- if (!in_interrupt()) /* Via keyboard.c:SAK() - akpm */
- reset_palette(vc);
+ reset_palette(vc);
}
void vc_SAK(struct work_struct *work)
@@ -1059,54 +958,6 @@ void vc_SAK(struct work_struct *work)
#ifdef CONFIG_COMPAT
-struct compat_consolefontdesc {
- unsigned short charcount; /* characters in font (256 or 512) */
- unsigned short charheight; /* scan lines per character (1-32) */
- compat_caddr_t chardata; /* font data in expanded form */
-};
-
-static inline int
-compat_fontx_ioctl(struct vc_data *vc, int cmd,
- struct compat_consolefontdesc __user *user_cfd,
- int perm, struct console_font_op *op)
-{
- struct compat_consolefontdesc cfdarg;
- int i;
-
- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc)))
- return -EFAULT;
-
- switch (cmd) {
- case PIO_FONTX:
- if (!perm)
- return -EPERM;
- op->op = KD_FONT_OP_SET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = compat_ptr(cfdarg.chardata);
- return con_font_op(vc, op);
-
- case GIO_FONTX:
- op->op = KD_FONT_OP_GET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = compat_ptr(cfdarg.chardata);
- i = con_font_op(vc, op);
- if (i)
- return i;
- cfdarg.charheight = op->height;
- cfdarg.charcount = op->charcount;
- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc)))
- return -EFAULT;
- return 0;
- }
- return -EINVAL;
-}
-
struct compat_console_font_op {
compat_uint_t op; /* operation code KD_FONT_OP_* */
compat_uint_t flags; /* KD_FONT_FLAG_* */
@@ -1183,9 +1034,6 @@ long vt_compat_ioctl(struct tty_struct *tty,
/*
* these need special handlers for incompatible data structures
*/
- case PIO_FONTX:
- case GIO_FONTX:
- return compat_fontx_ioctl(vc, cmd, up, perm, &op);
case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc);
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index b8e44d16279f..c7d681fef198 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -92,7 +92,7 @@ static int probe(struct pci_dev *pdev,
gdev->info.version = DRIVER_VERSION;
gdev->info.release = release;
gdev->pdev = pdev;
- if (pdev->irq) {
+ if (pdev->irq && (pdev->irq != IRQ_NOTCONNECTED)) {
gdev->info.irq = pdev->irq;
gdev->info.irq_flags = IRQF_SHARED;
gdev->info.handler = irqhandler;
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index ba5706ccc188..3e2cc95b7b0b 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -13,7 +13,9 @@ obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_ISP1760) += isp1760/
+obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns3/
obj-$(CONFIG_USB_CDNS3) += cdns3/
+obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/
obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_USB_MTU3) += mtu3/
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index 6b6b04a3fe0f..6332a6b5dce6 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -76,7 +76,7 @@ struct c67x00_hcd {
u16 next_td_addr;
u16 next_buf_addr;
- struct tasklet_struct tasklet;
+ struct work_struct work;
struct completion endpoint_disable;
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index e65f1a0ae80b..c7d3e907be81 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -1123,24 +1123,26 @@ static void c67x00_do_work(struct c67x00_hcd *c67x00)
/* -------------------------------------------------------------------------- */
-static void c67x00_sched_tasklet(struct tasklet_struct *t)
+static void c67x00_sched_work(struct work_struct *work)
{
- struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet);
+ struct c67x00_hcd *c67x00;
+
+ c67x00 = container_of(work, struct c67x00_hcd, work);
c67x00_do_work(c67x00);
}
void c67x00_sched_kick(struct c67x00_hcd *c67x00)
{
- tasklet_hi_schedule(&c67x00->tasklet);
+ queue_work(system_highpri_wq, &c67x00->work);
}
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
{
- tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet);
+ INIT_WORK(&c67x00->work, c67x00_sched_work);
return 0;
}
void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
{
- tasklet_kill(&c67x00->tasklet);
+ cancel_work_sync(&c67x00->work);
}
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 84716d216ae5..b98ca0a1352a 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -1,14 +1,28 @@
-config USB_CDNS3
- tristate "Cadence USB3 Dual-Role Controller"
+config USB_CDNS_SUPPORT
+ tristate "Cadence USB Support"
depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA
select USB_XHCI_PLATFORM if USB_XHCI_HCD
select USB_ROLE_SWITCH
help
+ Say Y here if your system has a Cadence USBSS or USBSSP
+ dual-role controller.
+ It supports: dual-role switch, Host-only, and Peripheral-only.
+
+config USB_CDNS_HOST
+ bool
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNS3
+ tristate "Cadence USB3 Dual-Role Controller"
+ depends on USB_CDNS_SUPPORT
+ help
Say Y here if your system has a Cadence USB3 dual-role controller.
It supports: dual-role switch, Host-only, and Peripheral-only.
If you choose to build this driver is a dynamically linked
as module, the module will be called cdns3.ko.
+endif
if USB_CDNS3
@@ -25,6 +39,7 @@ config USB_CDNS3_GADGET
config USB_CDNS3_HOST
bool "Cadence USB3 host controller"
depends on USB=y || USB=USB_CDNS3
+ select USB_CDNS_HOST
help
Say Y here to enable host controller functionality of the
Cadence driver.
@@ -64,3 +79,44 @@ config USB_CDNS3_IMX
For example, imx8qm and imx8qxp.
endif
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNSP_PCI
+ tristate "Cadence CDNSP Dual-Role Controller"
+ depends on USB_CDNS_SUPPORT && USB_PCI && ACPI
+ help
+ Say Y here if your system has a Cadence CDNSP dual-role controller.
+ It supports: dual-role switch Host-only, and Peripheral-only.
+
+ If you choose to build this driver is a dynamically linked
+ module, the module will be called cdnsp.ko.
+endif
+
+if USB_CDNSP_PCI
+
+config USB_CDNSP_GADGET
+ bool "Cadence CDNSP device controller"
+ depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI
+ help
+ Say Y here to enable device controller functionality of the
+ Cadence CDNSP-DEV driver.
+
+ Cadence CDNSP Device Controller in device mode is
+ very similar to XHCI controller. Therefore some algorithms
+ used has been taken from host driver.
+ This controller supports FF, HS, SS and SSP mode.
+ It doesn't support LS.
+
+config USB_CDNSP_HOST
+ bool "Cadence CDNSP host controller"
+ depends on USB=y || USB=USB_CDNSP_PCI
+ select USB_CDNS_HOST
+ help
+ Say Y here to enable host controller functionality of the
+ Cadence driver.
+
+ Host controller is compliant with XHCI so it uses
+ standard XHCI driver.
+
+endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index d47e341a6f39..61edb2f89276 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -1,18 +1,43 @@
# SPDX-License-Identifier: GPL-2.0
# define_trace.h needs to know how to find our header
-CFLAGS_trace.o := -I$(src)
+CFLAGS_cdns3-trace.o := -I$(src)
+CFLAGS_cdnsp-trace.o := -I$(src)
-cdns3-y := core.o drd.o
+cdns-usb-common-y := core.o drd.o
+cdns3-y := cdns3-plat.o
-obj-$(CONFIG_USB_CDNS3) += cdns3.o
-cdns3-$(CONFIG_USB_CDNS3_GADGET) += gadget.o ep0.o
+ifeq ($(CONFIG_USB),m)
+obj-m += cdns-usb-common.o
+obj-m += cdns3.o
+else
+obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns-usb-common.o
+obj-$(CONFIG_USB_CDNS3) += cdns3.o
+endif
+
+cdns-usb-common-$(CONFIG_USB_CDNS_HOST) += host.o
+cdns3-$(CONFIG_USB_CDNS3_GADGET) += cdns3-gadget.o cdns3-ep0.o
ifneq ($(CONFIG_USB_CDNS3_GADGET),)
-cdns3-$(CONFIG_TRACING) += trace.o
+cdns3-$(CONFIG_TRACING) += cdns3-trace.o
+endif
+
+obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
+
+cdnsp-udc-pci-y := cdnsp-pci.o
+
+ifdef CONFIG_USB_CDNSP_PCI
+ifeq ($(CONFIG_USB),m)
+obj-m += cdnsp-udc-pci.o
+else
+obj-$(CONFIG_USB_CDNSP_PCI) += cdnsp-udc-pci.o
+endif
endif
-cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
+cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET) += cdnsp-ring.o cdnsp-gadget.o \
+ cdnsp-mem.o cdnsp-ep0.o
-obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
-obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
-obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
+ifneq ($(CONFIG_USB_CDNSP_GADGET),)
+cdnsp-udc-pci-$(CONFIG_TRACING) += cdnsp-trace.o
+endif
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/cdns3-debug.h
index a5c6a29e1340..a5c6a29e1340 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/cdns3-debug.h
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/cdns3-ep0.c
index d3121a32cc68..9a17802275d5 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/cdns3-ep0.c
@@ -13,8 +13,8 @@
#include <linux/usb/composite.h>
#include <linux/iopoll.h>
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -364,7 +364,7 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
return -EINVAL;
- if (!(ctrl->wIndex & ~USB_DIR_IN))
+ if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
return 0;
index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
@@ -789,7 +789,7 @@ int cdns3_gadget_ep_set_wedge(struct usb_ep *ep)
return 0;
}
-const struct usb_ep_ops cdns3_gadget_ep0_ops = {
+static const struct usb_ep_ops cdns3_gadget_ep0_ops = {
.enable = cdns3_gadget_ep0_enable,
.disable = cdns3_gadget_ep0_disable,
.alloc_request = cdns3_gadget_ep_alloc_request,
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 08a4e693c470..582bfeceedb4 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -63,8 +63,8 @@
#include "core.h"
#include "gadget-export.h"
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
#include "drd.h"
static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
@@ -1200,7 +1200,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
td_size = DIV_ROUND_UP(request->length,
priv_ep->endpoint.maxpacket);
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
- trb->length = TRB_TDL_SS_SIZE(td_size);
+ trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
else
control |= TRB_TDL_HS_SIZE(td_size);
}
@@ -1247,10 +1247,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
priv_req->trb->control = cpu_to_le32(control);
if (sg_supported) {
- trb->control |= TRB_ISP;
+ trb->control |= cpu_to_le32(TRB_ISP);
/* Don't set chain bit for last TRB */
if (sg_iter < num_trb - 1)
- trb->control |= TRB_CHAIN;
+ trb->control |= cpu_to_le32(TRB_CHAIN);
s = sg_next(s);
}
@@ -1844,7 +1844,7 @@ __must_hold(&priv_dev->lock)
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
struct cdns3_device *priv_dev = data;
- struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev);
+ struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
irqreturn_t ret = IRQ_NONE;
u32 reg;
@@ -3084,7 +3084,7 @@ static void cdns3_gadget_release(struct device *dev)
kfree(priv_dev);
}
-static void cdns3_gadget_exit(struct cdns3 *cdns)
+static void cdns3_gadget_exit(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
@@ -3117,10 +3117,10 @@ static void cdns3_gadget_exit(struct cdns3 *cdns)
kfree(priv_dev->zlp_buf);
usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
- cdns3_drd_gadget_off(cdns);
+ cdns_drd_gadget_off(cdns);
}
-static int cdns3_gadget_start(struct cdns3 *cdns)
+static int cdns3_gadget_start(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
u32 max_speed;
@@ -3240,7 +3240,7 @@ err1:
return ret;
}
-static int __cdns3_gadget_init(struct cdns3 *cdns)
+static int __cdns3_gadget_init(struct cdns *cdns)
{
int ret = 0;
@@ -3251,7 +3251,7 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
return ret;
}
- cdns3_drd_gadget_on(cdns);
+ cdns_drd_gadget_on(cdns);
pm_runtime_get_sync(cdns->dev);
ret = cdns3_gadget_start(cdns);
@@ -3277,7 +3277,7 @@ err0:
return ret;
}
-static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
__must_hold(&cdns->lock)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3296,7 +3296,7 @@ __must_hold(&cdns->lock)
return 0;
}
-static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
+static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3311,13 +3311,13 @@ static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
/**
* cdns3_gadget_init - initialize device structure
*
- * @cdns: cdns3 instance
+ * @cdns: cdns instance
*
* This function initializes the gadget.
*/
-int cdns3_gadget_init(struct cdns3 *cdns)
+int cdns3_gadget_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
@@ -3327,7 +3327,7 @@ int cdns3_gadget_init(struct cdns3 *cdns)
rdrv->stop = cdns3_gadget_exit;
rdrv->suspend = cdns3_gadget_suspend;
rdrv->resume = cdns3_gadget_resume;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "gadget";
cdns->roles[USB_ROLE_DEVICE] = rdrv;
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
index 21fa461c518e..21fa461c518e 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/cdns3-gadget.h
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 22a56c4dce67..8f88eec0b0ea 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
@@ -214,20 +218,16 @@ err:
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -250,7 +250,7 @@ static void cdns3_set_wakeup(struct cdns_imx *data, bool enable)
static int cdns_imx_platform_suspend(struct device *dev,
bool suspend, bool wakeup)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct cdns *cdns = dev_get_drvdata(dev);
struct device *parent = dev->parent;
struct cdns_imx *data = dev_get_drvdata(parent);
void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs);
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
new file mode 100644
index 000000000000..4b18e1c6a4bb
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver.
+ *
+ * Copyright (C) 2018-2020 Cadence.
+ * Copyright (C) 2017-2018 NXP
+ * Copyright (C) 2019 Texas Instruments
+ *
+ *
+ * Author: Peter Chen <peter.chen@nxp.com>
+ * Pawel Laszczak <pawell@cadence.com>
+ * Roger Quadros <rogerq@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+static int set_phy_power_on(struct cdns *cdns)
+{
+ int ret;
+
+ ret = phy_power_on(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(cdns->usb3_phy);
+ if (ret)
+ phy_power_off(cdns->usb2_phy);
+
+ return ret;
+}
+
+static void set_phy_power_off(struct cdns *cdns)
+{
+ phy_power_off(cdns->usb3_phy);
+ phy_power_off(cdns->usb2_phy);
+}
+
+/**
+ * cdns3_plat_probe - probe for cdns3 core device
+ * @pdev: Pointer to cdns3 core platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cdns *cdns;
+ void __iomem *regs;
+ int ret;
+
+ cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
+ if (!cdns)
+ return -ENOMEM;
+
+ cdns->dev = dev;
+ cdns->pdata = dev_get_platdata(dev);
+
+ platform_set_drvdata(pdev, cdns);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
+ if (!res) {
+ dev_err(dev, "missing host IRQ\n");
+ return -ENODEV;
+ }
+
+ cdns->xhci_res[0] = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
+ if (!res) {
+ dev_err(dev, "couldn't get xhci resource\n");
+ return -ENXIO;
+ }
+
+ cdns->xhci_res[1] = *res;
+
+ cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
+
+ if (cdns->dev_irq < 0)
+ return cdns->dev_irq;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "dev");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ cdns->dev_regs = regs;
+
+ cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
+ if (cdns->otg_irq < 0)
+ return cdns->otg_irq;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
+ if (!res) {
+ dev_err(dev, "couldn't get otg resource\n");
+ return -ENXIO;
+ }
+
+ cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
+
+ cdns->otg_res = *res;
+
+ cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
+ if (cdns->wakeup_irq == -EPROBE_DEFER)
+ return cdns->wakeup_irq;
+ else if (cdns->wakeup_irq == 0)
+ return -EINVAL;
+
+ if (cdns->wakeup_irq < 0) {
+ dev_dbg(dev, "couldn't get wakeup irq\n");
+ cdns->wakeup_irq = 0x0;
+ }
+
+ cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
+ if (IS_ERR(cdns->usb2_phy))
+ return PTR_ERR(cdns->usb2_phy);
+
+ ret = phy_init(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
+ if (IS_ERR(cdns->usb3_phy))
+ return PTR_ERR(cdns->usb3_phy);
+
+ ret = phy_init(cdns->usb3_phy);
+ if (ret)
+ goto err_phy3_init;
+
+ ret = set_phy_power_on(cdns);
+ if (ret)
+ goto err_phy_power_on;
+
+ cdns->gadget_init = cdns3_gadget_init;
+
+ ret = cdns_init(cdns);
+ if (ret)
+ goto err_cdns_init;
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
+ pm_runtime_forbid(dev);
+
+ /*
+ * The controller needs less time between bus and controller suspend,
+ * and we also needs a small delay to avoid frequently entering low
+ * power mode.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 20);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+
+err_cdns_init:
+ set_phy_power_off(cdns);
+err_phy_power_on:
+ phy_exit(cdns->usb3_phy);
+err_phy3_init:
+ phy_exit(cdns->usb2_phy);
+
+ return ret;
+}
+
+/**
+ * cdns3_remove - unbind drd driver and clean up
+ * @pdev: Pointer to Linux platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_remove(struct platform_device *pdev)
+{
+ struct cdns *cdns = platform_get_drvdata(pdev);
+ struct device *dev = cdns->dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ cdns_remove(cdns);
+ set_phy_power_off(cdns);
+ phy_exit(cdns->usb2_phy);
+ phy_exit(cdns->usb3_phy);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int cdns3_set_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (cdns->pdata && cdns->pdata->platform_suspend)
+ ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
+
+ return ret;
+}
+
+static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ bool wakeup;
+ unsigned long flags;
+
+ if (cdns->in_lpm)
+ return 0;
+
+ if (PMSG_IS_AUTO(msg))
+ wakeup = true;
+ else
+ wakeup = device_may_wakeup(dev);
+
+ cdns3_set_platform_suspend(cdns->dev, true, wakeup);
+ set_phy_power_off(cdns);
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns->in_lpm = true;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+ return 0;
+}
+
+static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ int ret;
+ unsigned long flags;
+
+ if (!cdns->in_lpm)
+ return 0;
+
+ ret = set_phy_power_on(cdns);
+ if (ret)
+ return ret;
+
+ cdns3_set_platform_suspend(cdns->dev, false, false);
+
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns_resume(cdns, !PMSG_IS_AUTO(msg));
+ cdns->in_lpm = false;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ if (cdns->wakeup_pending) {
+ cdns->wakeup_pending = false;
+ enable_irq(cdns->wakeup_irq);
+ }
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+ return ret;
+}
+
+static int cdns3_plat_runtime_suspend(struct device *dev)
+{
+ return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int cdns3_plat_runtime_resume(struct device *dev)
+{
+ return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int cdns3_plat_suspend(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+
+ cdns_suspend(cdns);
+
+ return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+}
+
+static int cdns3_plat_resume(struct device *dev)
+{
+ return cdns3_controller_resume(dev, PMSG_RESUME);
+}
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops cdns3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume)
+ SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend,
+ cdns3_plat_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cdns3_match[] = {
+ { .compatible = "cdns,usb3" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_cdns3_match);
+#endif
+
+static struct platform_driver cdns3_driver = {
+ .probe = cdns3_plat_probe,
+ .remove = cdns3_plat_remove,
+ .driver = {
+ .name = "cdns-usb3",
+ .of_match_table = of_match_ptr(of_cdns3_match),
+ .pm = &cdns3_pm_ops,
+ },
+};
+
+module_platform_driver(cdns3_driver);
+
+MODULE_ALIAS("platform:cdns3");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 90e246601537..eccb1c766bba 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -214,6 +214,7 @@ static int cdns_ti_remove(struct platform_device *pdev)
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
+ { .compatible = "ti,am64-usb", },
{},
};
MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
diff --git a/drivers/usb/cdns3/trace.c b/drivers/usb/cdns3/cdns3-trace.c
index 459fa72d9c74..b9858acaef02 100644
--- a/drivers/usb/cdns3/trace.c
+++ b/drivers/usb/cdns3/cdns3-trace.c
@@ -8,4 +8,4 @@
*/
#define CREATE_TRACE_POINTS
-#include "trace.h"
+#include "cdns3-trace.h"
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/cdns3-trace.h
index 0a2a3269bfac..8648c7a7a9dd 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/cdns3-trace.h
@@ -19,8 +19,8 @@
#include <asm/byteorder.h>
#include <linux/usb/ch9.h>
#include "core.h"
-#include "gadget.h"
-#include "debug.h"
+#include "cdns3-gadget.h"
+#include "cdns3-debug.h"
#define CDNS3_MSG_MAX 500
@@ -565,6 +565,6 @@ DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled,
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
+#define TRACE_INCLUDE_FILE cdns3-trace
#include <trace/define_trace.h>
diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
new file mode 100644
index 000000000000..a8776df2d4e0
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-debug.h
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+#ifndef __LINUX_CDNSP_DEBUG
+#define __LINUX_CDNSP_DEBUG
+
+static inline const char *cdnsp_trb_comp_code_string(u8 status)
+{
+ switch (status) {
+ case COMP_INVALID:
+ return "Invalid";
+ case COMP_SUCCESS:
+ return "Success";
+ case COMP_DATA_BUFFER_ERROR:
+ return "Data Buffer Error";
+ case COMP_BABBLE_DETECTED_ERROR:
+ return "Babble Detected";
+ case COMP_TRB_ERROR:
+ return "TRB Error";
+ case COMP_RESOURCE_ERROR:
+ return "Resource Error";
+ case COMP_NO_SLOTS_AVAILABLE_ERROR:
+ return "No Slots Available Error";
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ return "Invalid Stream Type Error";
+ case COMP_SLOT_NOT_ENABLED_ERROR:
+ return "Slot Not Enabled Error";
+ case COMP_ENDPOINT_NOT_ENABLED_ERROR:
+ return "Endpoint Not Enabled Error";
+ case COMP_SHORT_PACKET:
+ return "Short Packet";
+ case COMP_RING_UNDERRUN:
+ return "Ring Underrun";
+ case COMP_RING_OVERRUN:
+ return "Ring Overrun";
+ case COMP_VF_EVENT_RING_FULL_ERROR:
+ return "VF Event Ring Full Error";
+ case COMP_PARAMETER_ERROR:
+ return "Parameter Error";
+ case COMP_CONTEXT_STATE_ERROR:
+ return "Context State Error";
+ case COMP_EVENT_RING_FULL_ERROR:
+ return "Event Ring Full Error";
+ case COMP_INCOMPATIBLE_DEVICE_ERROR:
+ return "Incompatible Device Error";
+ case COMP_MISSED_SERVICE_ERROR:
+ return "Missed Service Error";
+ case COMP_COMMAND_RING_STOPPED:
+ return "Command Ring Stopped";
+ case COMP_COMMAND_ABORTED:
+ return "Command Aborted";
+ case COMP_STOPPED:
+ return "Stopped";
+ case COMP_STOPPED_LENGTH_INVALID:
+ return "Stopped - Length Invalid";
+ case COMP_STOPPED_SHORT_PACKET:
+ return "Stopped - Short Packet";
+ case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
+ return "Max Exit Latency Too Large Error";
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return "Isoch Buffer Overrun";
+ case COMP_EVENT_LOST_ERROR:
+ return "Event Lost Error";
+ case COMP_UNDEFINED_ERROR:
+ return "Undefined Error";
+ case COMP_INVALID_STREAM_ID_ERROR:
+ return "Invalid Stream ID Error";
+ default:
+ return "Unknown!!";
+ }
+}
+
+static inline const char *cdnsp_trb_type_string(u8 type)
+{
+ switch (type) {
+ case TRB_NORMAL:
+ return "Normal";
+ case TRB_SETUP:
+ return "Setup Stage";
+ case TRB_DATA:
+ return "Data Stage";
+ case TRB_STATUS:
+ return "Status Stage";
+ case TRB_ISOC:
+ return "Isoch";
+ case TRB_LINK:
+ return "Link";
+ case TRB_EVENT_DATA:
+ return "Event Data";
+ case TRB_TR_NOOP:
+ return "No-Op";
+ case TRB_ENABLE_SLOT:
+ return "Enable Slot Command";
+ case TRB_DISABLE_SLOT:
+ return "Disable Slot Command";
+ case TRB_ADDR_DEV:
+ return "Address Device Command";
+ case TRB_CONFIG_EP:
+ return "Configure Endpoint Command";
+ case TRB_EVAL_CONTEXT:
+ return "Evaluate Context Command";
+ case TRB_RESET_EP:
+ return "Reset Endpoint Command";
+ case TRB_STOP_RING:
+ return "Stop Ring Command";
+ case TRB_SET_DEQ:
+ return "Set TR Dequeue Pointer Command";
+ case TRB_RESET_DEV:
+ return "Reset Device Command";
+ case TRB_FORCE_HEADER:
+ return "Force Header Command";
+ case TRB_CMD_NOOP:
+ return "No-Op Command";
+ case TRB_TRANSFER:
+ return "Transfer Event";
+ case TRB_COMPLETION:
+ return "Command Completion Event";
+ case TRB_PORT_STATUS:
+ return "Port Status Change Event";
+ case TRB_HC_EVENT:
+ return "Device Controller Event";
+ case TRB_MFINDEX_WRAP:
+ return "MFINDEX Wrap Event";
+ case TRB_ENDPOINT_NRDY:
+ return "Endpoint Not ready";
+ case TRB_HALT_ENDPOINT:
+ return "Halt Endpoint";
+ case TRB_FLUSH_ENDPOINT:
+ return "FLush Endpoint";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *cdnsp_ring_type_string(enum cdnsp_ring_type type)
+{
+ switch (type) {
+ case TYPE_CTRL:
+ return "CTRL";
+ case TYPE_ISOC:
+ return "ISOC";
+ case TYPE_BULK:
+ return "BULK";
+ case TYPE_INTR:
+ return "INTR";
+ case TYPE_STREAM:
+ return "STREAM";
+ case TYPE_COMMAND:
+ return "CMD";
+ case TYPE_EVENT:
+ return "EVENT";
+ }
+
+ return "UNKNOWN";
+}
+
+static inline char *cdnsp_slot_state_string(u32 state)
+{
+ switch (state) {
+ case SLOT_STATE_ENABLED:
+ return "enabled/disabled";
+ case SLOT_STATE_DEFAULT:
+ return "default";
+ case SLOT_STATE_ADDRESSED:
+ return "addressed";
+ case SLOT_STATE_CONFIGURED:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
+static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
+ u32 field1, u32 field2, u32 field3)
+{
+ int ep_id = TRB_TO_EP_INDEX(field3) - 1;
+ int type = TRB_FIELD_TO_TYPE(field3);
+ unsigned int ep_num;
+ int ret = 0;
+ u32 temp;
+
+ ep_num = DIV_ROUND_UP(ep_id, 2);
+
+ switch (type) {
+ case TRB_LINK:
+ ret += snprintf(str, size,
+ "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_TC ? 'T' : 't',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_TRANSFER:
+ case TRB_COMPLETION:
+ case TRB_PORT_STATUS:
+ case TRB_HC_EVENT:
+ ret += snprintf(str, size,
+ "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+ " len %ld slot %ld flags %c:%c",
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ cdnsp_trb_type_string(type), field1, field0,
+ cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+ EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+ field3 & EVENT_DATA ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_MFINDEX_WRAP:
+ ret += snprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_SETUP:
+ ret += snprintf(str, size,
+ "type '%s' bRequestType %02x bRequest %02x "
+ "wValue %02x%02x wIndex %02x%02x wLength %d "
+ "length %ld TD size %ld intr %ld Setup ID %ld "
+ "flags %c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field0 & 0xff,
+ (field0 & 0xff00) >> 8,
+ (field0 & 0xff000000) >> 24,
+ (field0 & 0xff0000) >> 16,
+ (field1 & 0xff00) >> 8,
+ field1 & 0xff,
+ (field1 & 0xff000000) >> 16 |
+ (field1 & 0xff0000) >> 16,
+ TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ TRB_SETUPID_TO_TYPE(field3),
+ field3 & TRB_IDT ? 'D' : 'd',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_DATA:
+ ret += snprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld TD size %ld "
+ "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_IDT ? 'D' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_STATUS:
+ ret += snprintf(str, size,
+ "Buffer %08x%08x length %ld TD size %ld intr"
+ "%ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_NORMAL:
+ case TRB_ISOC:
+ case TRB_EVENT_DATA:
+ case TRB_TR_NOOP:
+ ret += snprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld "
+ "TD size %ld intr %ld "
+ "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_BEI ? 'B' : 'b',
+ field3 & TRB_IDT ? 'T' : 't',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c',
+ !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+ break;
+ case TRB_CMD_NOOP:
+ case TRB_ENABLE_SLOT:
+ ret += snprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_DISABLE_SLOT:
+ ret += snprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_ADDR_DEV:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_BSR ? 'B' : 'b',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_CONFIG_EP:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_DC ? 'D' : 'd',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_EVAL_CONTEXT:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_RESET_EP:
+ case TRB_HALT_ENDPOINT:
+ case TRB_FLUSH_ENDPOINT:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_STOP_RING:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) slot %ld sp %d flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ TRB_TO_SLOT_ID(field3),
+ TRB_TO_SUSPEND_PORT(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_SET_DEQ:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_STREAM_ID(field2),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_RESET_DEV:
+ ret += snprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_ENDPOINT_NRDY:
+ temp = TRB_TO_HOST_STREAM(field2);
+
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), temp,
+ temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+ temp == STREAM_REJECTED ? "(REJECTED)" : "",
+ TRB_TO_DEV_STREAM(field0),
+ field3 & TRB_STAT ? 'S' : 's',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ default:
+ ret += snprintf(str, size,
+ "type '%s' -> raw %08x %08x %08x %08x",
+ cdnsp_trb_type_string(type),
+ field0, field1, field2, field3);
+ }
+
+ return str;
+}
+
+static inline const char *cdnsp_decode_slot_context(u32 info, u32 info2,
+ u32 int_target, u32 state)
+{
+ static char str[1024];
+ int ret = 0;
+ u32 speed;
+ char *s;
+
+ speed = info & DEV_SPEED;
+
+ switch (speed) {
+ case SLOT_SPEED_FS:
+ s = "full-speed";
+ break;
+ case SLOT_SPEED_HS:
+ s = "high-speed";
+ break;
+ case SLOT_SPEED_SS:
+ s = "super-speed";
+ break;
+ case SLOT_SPEED_SSP:
+ s = "super-speed plus";
+ break;
+ default:
+ s = "UNKNOWN speed";
+ }
+
+ ret = sprintf(str, "%s Ctx Entries %d",
+ s, (info & LAST_CTX_MASK) >> 27);
+
+ ret += sprintf(str + ret, " [Intr %ld] Addr %ld State %s",
+ GET_INTR_TARGET(int_target), state & DEV_ADDR_MASK,
+ cdnsp_slot_state_string(GET_SLOT_STATE(state)));
+
+ return str;
+}
+
+static inline const char *cdnsp_portsc_link_state_string(u32 portsc)
+{
+ switch (portsc & PORT_PLS_MASK) {
+ case XDEV_U0:
+ return "U0";
+ case XDEV_U1:
+ return "U1";
+ case XDEV_U2:
+ return "U2";
+ case XDEV_U3:
+ return "U3";
+ case XDEV_DISABLED:
+ return "Disabled";
+ case XDEV_RXDETECT:
+ return "RxDetect";
+ case XDEV_INACTIVE:
+ return "Inactive";
+ case XDEV_POLLING:
+ return "Polling";
+ case XDEV_RECOVERY:
+ return "Recovery";
+ case XDEV_HOT_RESET:
+ return "Hot Reset";
+ case XDEV_COMP_MODE:
+ return "Compliance mode";
+ case XDEV_TEST_MODE:
+ return "Test mode";
+ case XDEV_RESUME:
+ return "Resume";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static inline const char *cdnsp_decode_portsc(char *str, size_t size,
+ u32 portsc)
+{
+ int ret;
+
+ ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
+ portsc & PORT_POWER ? "Powered" : "Powered-off",
+ portsc & PORT_CONNECT ? "Connected" : "Not-connected",
+ portsc & PORT_PED ? "Enabled" : "Disabled",
+ cdnsp_portsc_link_state_string(portsc),
+ DEV_PORT_SPEED(portsc));
+
+ if (portsc & PORT_RESET)
+ ret += snprintf(str + ret, size - ret, "In-Reset ");
+
+ ret += snprintf(str + ret, size - ret, "Change: ");
+ if (portsc & PORT_CSC)
+ ret += snprintf(str + ret, size - ret, "CSC ");
+ if (portsc & PORT_WRC)
+ ret += snprintf(str + ret, size - ret, "WRC ");
+ if (portsc & PORT_RC)
+ ret += snprintf(str + ret, size - ret, "PRC ");
+ if (portsc & PORT_PLC)
+ ret += snprintf(str + ret, size - ret, "PLC ");
+ if (portsc & PORT_CEC)
+ ret += snprintf(str + ret, size - ret, "CEC ");
+ ret += snprintf(str + ret, size - ret, "Wake: ");
+ if (portsc & PORT_WKCONN_E)
+ ret += snprintf(str + ret, size - ret, "WCE ");
+ if (portsc & PORT_WKDISC_E)
+ ret += snprintf(str + ret, size - ret, "WDE ");
+
+ return str;
+}
+
+static inline const char *cdnsp_ep_state_string(u8 state)
+{
+ switch (state) {
+ case EP_STATE_DISABLED:
+ return "disabled";
+ case EP_STATE_RUNNING:
+ return "running";
+ case EP_STATE_HALTED:
+ return "halted";
+ case EP_STATE_STOPPED:
+ return "stopped";
+ case EP_STATE_ERROR:
+ return "error";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *cdnsp_ep_type_string(u8 type)
+{
+ switch (type) {
+ case ISOC_OUT_EP:
+ return "Isoc OUT";
+ case BULK_OUT_EP:
+ return "Bulk OUT";
+ case INT_OUT_EP:
+ return "Int OUT";
+ case CTRL_EP:
+ return "Ctrl";
+ case ISOC_IN_EP:
+ return "Isoc IN";
+ case BULK_IN_EP:
+ return "Bulk IN";
+ case INT_IN_EP:
+ return "Int IN";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
+ u32 info, u32 info2,
+ u64 deq, u32 tx_info)
+{
+ u8 max_pstr, ep_state, interval, ep_type, burst, cerr, mult;
+ bool lsa, hid;
+ u16 maxp, avg;
+ u32 esit;
+ int ret;
+
+ esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+ CTX_TO_MAX_ESIT_PAYLOAD_LO(tx_info);
+
+ ep_state = info & EP_STATE_MASK;
+ max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
+ interval = CTX_TO_EP_INTERVAL(info);
+ mult = CTX_TO_EP_MULT(info) + 1;
+ lsa = !!(info & EP_HAS_LSA);
+
+ cerr = (info2 & (3 << 1)) >> 1;
+ ep_type = CTX_TO_EP_TYPE(info2);
+ hid = !!(info2 & (1 << 7));
+ burst = CTX_TO_MAX_BURST(info2);
+ maxp = MAX_PACKET_DECODED(info2);
+
+ avg = EP_AVG_TRB_LENGTH(tx_info);
+
+ ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
+ cdnsp_ep_state_string(ep_state), mult,
+ max_pstr, lsa ? "LSA " : "");
+
+ ret += snprintf(str + ret, size - ret,
+ "interval %d us max ESIT payload %d CErr %d ",
+ (1 << interval) * 125, esit, cerr);
+
+ ret += snprintf(str + ret, size - ret,
+ "Type %s %sburst %d maxp %d deq %016llx ",
+ cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
+ burst, maxp, deq);
+
+ ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
+
+ return str;
+}
+
+#endif /*__LINUX_CDNSP_DEBUG*/
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
new file mode 100644
index 000000000000..9b8325f82499
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/list.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_ep0_stall(struct cdnsp_device *pdev)
+{
+ struct cdnsp_request *preq;
+ struct cdnsp_ep *pep;
+
+ pep = &pdev->eps[0];
+ preq = next_request(&pep->pending_list);
+
+ if (pdev->three_stage_setup) {
+ cdnsp_halt_endpoint(pdev, pep, true);
+
+ if (preq)
+ cdnsp_gadget_giveback(pep, preq, -ECONNRESET);
+ } else {
+ pep->ep_state |= EP0_HALTED_STATUS;
+
+ if (preq)
+ list_del(&preq->list);
+
+ cdnsp_status_stage(pdev);
+ }
+}
+
+static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&pdev->lock);
+ ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl);
+ spin_lock(&pdev->lock);
+
+ return ret;
+}
+
+static int cdnsp_ep0_set_config(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ u32 cfg;
+ int ret;
+
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (state) {
+ case USB_STATE_ADDRESS:
+ trace_cdnsp_ep0_set_config("from Address state");
+ break;
+ case USB_STATE_CONFIGURED:
+ trace_cdnsp_ep0_set_config("from Configured state");
+ break;
+ default:
+ dev_err(pdev->dev, "Set Configuration - bad device state\n");
+ return -EINVAL;
+ }
+
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret)
+ return ret;
+
+ if (!cfg)
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+
+ return 0;
+}
+
+static int cdnsp_ep0_set_address(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ struct cdnsp_slot_ctx *slot_ctx;
+ unsigned int slot_state;
+ int ret;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl->wValue);
+
+ if (addr > 127) {
+ dev_err(pdev->dev, "Invalid device address %d\n", addr);
+ return -EINVAL;
+ }
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+
+ if (state == USB_STATE_CONFIGURED) {
+ dev_err(pdev->dev, "Can't Set Address from Configured State\n");
+ return -EINVAL;
+ }
+
+ pdev->device_address = le16_to_cpu(ctrl->wValue);
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+ if (slot_state == SLOT_STATE_ADDRESSED)
+ cdnsp_reset_device(pdev);
+
+ /*set device address*/
+ ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS);
+ if (ret)
+ return ret;
+
+ if (addr)
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+
+ return 0;
+}
+
+int cdnsp_status_stage(struct cdnsp_device *pdev)
+{
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+ pdev->ep0_preq.request.length = 0;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_w_index_to_ep_index(u16 wIndex)
+{
+ if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
+ return 0;
+
+ return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
+ (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
+}
+
+static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct cdnsp_ep *pep;
+ __le16 *response;
+ int ep_sts = 0;
+ u16 status = 0;
+ u32 recipient;
+
+ recipient = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recipient) {
+ case USB_RECIP_DEVICE:
+ status = pdev->gadget.is_selfpowered;
+ status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
+ if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+ status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED;
+ status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED;
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ /*
+ * Function Remote Wake Capable D0
+ * Function Remote Wakeup D1
+ */
+ return cdnsp_ep0_delegate_req(pdev, ctrl);
+ case USB_RECIP_ENDPOINT:
+ ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
+ pep = &pdev->eps[ep_sts];
+ ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+ /* check if endpoint is stalled */
+ if (ep_sts == EP_STATE_HALTED)
+ status = BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ response = (__le16 *)pdev->setup_buf;
+ *response = cpu_to_le16(status);
+
+ pdev->ep0_preq.request.length = sizeof(*response);
+ pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static void cdnsp_enter_test_mode(struct cdnsp_device *pdev)
+{
+ u32 temp;
+
+ temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28);
+ temp |= PORT_TEST_MODE(pdev->test_mode);
+ writel(temp, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+ u16 tmode;
+
+ state = pdev->gadget.state;
+ speed = pdev->gadget.speed;
+
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ pdev->may_wakeup = !!set;
+ trace_cdnsp_may_wakeup(set);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+ return -EINVAL;
+
+ pdev->u1_allowed = !!set;
+ trace_cdnsp_u1(set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+ return -EINVAL;
+
+ pdev->u2_allowed = !!set;
+ trace_cdnsp_u2(set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ return -EINVAL;
+ case USB_DEVICE_TEST_MODE:
+ if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
+ return -EINVAL;
+
+ tmode = le16_to_cpu(ctrl->wIndex);
+
+ if (!set || (tmode & 0xff) != 0)
+ return -EINVAL;
+
+ tmode = tmode >> 8;
+
+ if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J)
+ return -EINVAL;
+
+ pdev->test_mode = tmode;
+
+ /*
+ * Test mode must be set before Status Stage but controller
+ * will start testing sequence after Status Stage.
+ */
+ cdnsp_enter_test_mode(pdev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ u16 wValue, wIndex;
+ int ret;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * Remote wakeup is enabled when any function within a device
+ * is enabled for function remote wakeup.
+ */
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
+ pdev->may_wakeup++;
+ else
+ if (pdev->may_wakeup > 0)
+ pdev->may_wakeup--;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ struct cdnsp_ep *pep;
+ u16 wValue;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
+
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ if (!set && (pep->ep_state & EP_WEDGE)) {
+ /* Resets Sequence Number */
+ cdnsp_halt_endpoint(pdev, pep, 0);
+ cdnsp_halt_endpoint(pdev, pep, 1);
+ break;
+ }
+
+ return cdnsp_halt_endpoint(pdev, pep, set);
+ default:
+ dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return cdnsp_ep0_handle_feature_device(pdev, ctrl, set);
+ case USB_RECIP_INTERFACE:
+ return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set);
+ case USB_RECIP_ENDPOINT:
+ return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ u16 wLength;
+
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ wLength = le16_to_cpu(ctrl->wLength);
+
+ if (wLength != 6) {
+ dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n",
+ wLength);
+ return -EINVAL;
+ }
+
+ /*
+ * To handle Set SEL we need to receive 6 bytes from Host. So let's
+ * queue a usb_request for 6 bytes.
+ */
+ pdev->ep0_preq.request.length = 6;
+ pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength))
+ return -EINVAL;
+
+ pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue);
+
+ return 0;
+}
+
+static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ ret = cdnsp_ep0_handle_status(pdev, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ ret = cdnsp_ep0_set_address(pdev, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ ret = cdnsp_ep0_set_config(pdev, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ ret = cdnsp_ep0_set_sel(pdev, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
+ break;
+ case USB_REQ_SET_INTERFACE:
+ /*
+ * Add request into pending list to block sending status stage
+ * by libcomposite.
+ */
+ list_add_tail(&pdev->ep0_preq.list,
+ &pdev->ep0_preq.pep->pending_list);
+
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret == -EBUSY)
+ ret = 0;
+
+ list_del(&pdev->ep0_preq.list);
+ break;
+ default:
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+{
+ struct usb_ctrlrequest *ctrl = &pdev->setup;
+ int ret = 0;
+ u16 len;
+
+ trace_cdnsp_ctrl_req(ctrl);
+
+ if (!pdev->gadget_driver)
+ goto out;
+
+ if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+ dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Restore the ep0 to Stopped/Running state. */
+ if (pdev->eps[0].ep_state & EP_HALTED) {
+ trace_cdnsp_ep0_halted("Restore to normal state");
+ cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+ }
+
+ /*
+ * Finishing previous SETUP transfer by removing request from
+ * list and informing upper layer
+ */
+ if (!list_empty(&pdev->eps[0].pending_list)) {
+ struct cdnsp_request *req;
+
+ trace_cdnsp_ep0_request("Remove previous");
+ req = next_request(&pdev->eps[0].pending_list);
+ cdnsp_ep_dequeue(&pdev->eps[0], req);
+ }
+
+ len = le16_to_cpu(ctrl->wLength);
+ if (!len) {
+ pdev->three_stage_setup = false;
+ pdev->ep0_expect_in = false;
+ } else {
+ pdev->three_stage_setup = true;
+ pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
+ }
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = cdnsp_ep0_std_request(pdev, ctrl);
+ else
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+
+ if (!len)
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ trace_cdnsp_ep0_status_stage("delayed");
+ return;
+ }
+out:
+ if (ret < 0)
+ cdnsp_ep0_stall(pdev);
+ else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+ cdnsp_status_stage(pdev);
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
new file mode 100644
index 000000000000..f2ebbacd932e
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/dmi.h>
+
+#include "core.h"
+#include "gadget-export.h"
+#include "drd.h"
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+unsigned int cdnsp_port_speed(unsigned int port_status)
+{
+ /*Detect gadget speed based on PORTSC register*/
+ if (DEV_SUPERSPEEDPLUS(port_status))
+ return USB_SPEED_SUPER_PLUS;
+ else if (DEV_SUPERSPEED(port_status))
+ return USB_SPEED_SUPER;
+ else if (DEV_HIGHSPEED(port_status))
+ return USB_SPEED_HIGH;
+ else if (DEV_FULLSPEED(port_status))
+ return USB_SPEED_FULL;
+
+ /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
+ return USB_SPEED_UNKNOWN;
+}
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+u32 cdnsp_port_state_to_neutral(u32 state)
+{
+ /* Save read-only status and port state. */
+ return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ * @base: PCI MMIO registers base address.
+ * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
+ * beginning of list)
+ * @id: Extended capability ID to search for.
+ *
+ * Returns the offset of the next matching extended capability structure.
+ * Some capabilities can occur several times,
+ * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
+ */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
+{
+ u32 offset = start;
+ u32 next;
+ u32 val;
+
+ if (!start || start == HCC_PARAMS_OFFSET) {
+ val = readl(base + HCC_PARAMS_OFFSET);
+ if (val == ~0)
+ return 0;
+
+ offset = HCC_EXT_CAPS(val) << 2;
+ if (!offset)
+ return 0;
+ };
+
+ do {
+ val = readl(base + offset);
+ if (val == ~0)
+ return 0;
+
+ if (EXT_CAPS_ID(val) == id && offset != start)
+ return offset;
+
+ next = EXT_CAPS_NEXT(val);
+ offset += next << 2;
+ } while (next);
+
+ return 0;
+}
+
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs,
+ u32 link_state)
+{
+ int port_num = 0xFF;
+ u32 temp;
+
+ temp = readl(port_regs);
+ temp = cdnsp_port_state_to_neutral(temp);
+ temp |= PORT_WKCONN_E | PORT_WKDISC_E;
+ writel(temp, port_regs);
+
+ temp &= ~PORT_PLS_MASK;
+ temp |= PORT_LINK_STROBE | link_state;
+
+ if (pdev->active_port)
+ port_num = pdev->active_port->port_num;
+
+ trace_cdnsp_handle_port_status(port_num, readl(port_regs));
+ writel(temp, port_regs);
+ trace_cdnsp_link_state_changed(port_num, readl(port_regs));
+}
+
+static void cdnsp_disable_port(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs)
+{
+ u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
+
+ writel(temp | PORT_PED, port_regs);
+}
+
+static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs)
+{
+ u32 portsc = readl(port_regs);
+
+ writel(cdnsp_port_state_to_neutral(portsc) |
+ (portsc & PORT_CHANGE_BITS), port_regs);
+}
+
+static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+ bit = readl(reg) | bit;
+ writel(bit, reg);
+}
+
+static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+ bit = readl(reg) & ~bit;
+ writel(bit, reg);
+}
+
+/*
+ * Disable interrupts and begin the controller halting process.
+ */
+static void cdnsp_quiesce(struct cdnsp_device *pdev)
+{
+ u32 halted;
+ u32 mask;
+ u32 cmd;
+
+ mask = ~(u32)(CDNSP_IRQS);
+
+ halted = readl(&pdev->op_regs->status) & STS_HALT;
+ if (!halted)
+ mask &= ~(CMD_R_S | CMD_DEVEN);
+
+ cmd = readl(&pdev->op_regs->command);
+ cmd &= mask;
+ writel(cmd, &pdev->op_regs->command);
+}
+
+/*
+ * Force controller into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * Controller will complete any current and actively pipelined transactions, and
+ * should halt within 16 ms of the run/stop bit being cleared.
+ * Read controller Halted bit in the status register to see when the
+ * controller is finished.
+ */
+int cdnsp_halt(struct cdnsp_device *pdev)
+{
+ int ret;
+ u32 val;
+
+ cdnsp_quiesce(pdev);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
+ val & STS_HALT, 1,
+ CDNSP_MAX_HALT_USEC);
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Device halt failed\n");
+ return ret;
+ }
+
+ pdev->cdnsp_state |= CDNSP_STATE_HALTED;
+
+ return 0;
+}
+
+/*
+ * device controller died, register read returns 0xffffffff, or command never
+ * ends.
+ */
+void cdnsp_died(struct cdnsp_device *pdev)
+{
+ dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
+ pdev->cdnsp_state |= CDNSP_STATE_DYING;
+ cdnsp_halt(pdev);
+}
+
+/*
+ * Set the run bit and wait for the device to be running.
+ */
+static int cdnsp_start(struct cdnsp_device *pdev)
+{
+ u32 temp;
+ int ret;
+
+ temp = readl(&pdev->op_regs->command);
+ temp |= (CMD_R_S | CMD_DEVEN);
+ writel(temp, &pdev->op_regs->command);
+
+ pdev->cdnsp_state = 0;
+
+ /*
+ * Wait for the STS_HALT Status bit to be 0 to indicate the device is
+ * running.
+ */
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+ !(temp & STS_HALT), 1,
+ CDNSP_MAX_HALT_USEC);
+ if (ret) {
+ pdev->cdnsp_state = CDNSP_STATE_DYING;
+ dev_err(pdev->dev, "ERROR: Controller run failed\n");
+ }
+
+ return ret;
+}
+
+/*
+ * Reset a halted controller.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int cdnsp_reset(struct cdnsp_device *pdev)
+{
+ u32 command;
+ u32 temp;
+ int ret;
+
+ temp = readl(&pdev->op_regs->status);
+
+ if (temp == ~(u32)0) {
+ dev_err(pdev->dev, "Device not accessible, reset failed.\n");
+ return -ENODEV;
+ }
+
+ if ((temp & STS_HALT) == 0) {
+ dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
+ return -EINVAL;
+ }
+
+ command = readl(&pdev->op_regs->command);
+ command |= CMD_RESET;
+ writel(command, &pdev->op_regs->command);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
+ !(temp & CMD_RESET), 1,
+ 10 * 1000);
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Controller reset failed\n");
+ return ret;
+ }
+
+ /*
+ * CDNSP cannot write any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+ !(temp & STS_CNR), 1,
+ 10 * 1000);
+
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
+ return ret;
+ }
+
+ dev_dbg(pdev->dev, "Controller ready to work");
+
+ return ret;
+}
+
+/*
+ * cdnsp_get_endpoint_index - Find the index for an endpoint given its
+ * descriptor.Use the return value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+static unsigned int
+ cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index = (unsigned int)usb_endpoint_num(desc);
+
+ if (usb_endpoint_xfer_control(desc))
+ return index * 2;
+
+ return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+}
+
+/*
+ * Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+static unsigned int
+ cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (cdnsp_get_endpoint_index(desc) + 1);
+}
+
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+ struct usb_request *request;
+ int ret;
+
+ if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
+ trace_cdnsp_request_enqueue_busy(preq);
+ return -EBUSY;
+ }
+
+ request = &preq->request;
+ request->actual = 0;
+ request->status = -EINPROGRESS;
+ preq->direction = pep->direction;
+ preq->epnum = pep->number;
+ preq->td.drbl = 0;
+
+ ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
+ if (ret) {
+ trace_cdnsp_request_enqueue_error(preq);
+ return ret;
+ }
+
+ list_add_tail(&preq->list, &pep->pending_list);
+
+ trace_cdnsp_request_enqueue(preq);
+
+ switch (usb_endpoint_type(pep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ret = cdnsp_queue_ctrl_tx(pdev, preq);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ ret = cdnsp_queue_bulk_tx(pdev, preq);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
+ }
+
+ if (ret)
+ goto unmap;
+
+ return 0;
+
+unmap:
+ usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+ pep->direction);
+ list_del(&preq->list);
+ trace_cdnsp_request_enqueue_error(preq);
+
+ return ret;
+}
+
+/*
+ * Remove the request's TD from the endpoint ring. This may cause the
+ * controller to stop USB transfers, potentially stopping in the middle of a
+ * TRB buffer. The controller should pick up where it left off in the TD,
+ * unless a Set Transfer Ring Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled request will be "removed"
+ * from the ring. Since the ring is a contiguous structure, they can't be
+ * physically removed. Instead, there are two options:
+ *
+ * 1) If the controller is in the middle of processing the request to be
+ * canceled, we simply move the ring's dequeue pointer past those TRBs
+ * using the Set Transfer Ring Dequeue Pointer command. This will be
+ * the common case, when drivers timeout on the last submitted request
+ * and attempt to cancel.
+ *
+ * 2) If the controller is in the middle of a different TD, we turn the TRBs
+ * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
+ * The controller will need to invalidate the any TRBs it has cached after
+ * the stop endpoint command.
+ *
+ * 3) The TD may have completed by the time the Stop Endpoint Command
+ * completes, so software needs to handle that case too.
+ *
+ */
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+ int ret;
+
+ trace_cdnsp_request_dequeue(preq);
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
+ ret = cdnsp_cmd_stop_ep(pdev, pep);
+ if (ret)
+ return ret;
+ }
+
+ return cdnsp_remove_request(pdev, preq, pep);
+}
+
+static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ struct cdnsp_ep_ctx *ep_ctx;
+ int i;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ /*
+ * When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ ctrl_ctx->drop_flags = 0;
+ ctrl_ctx->add_flags = 0;
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+
+ /* Endpoint 0 is always valid */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+ for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
+ ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+/* Issue a configure endpoint command and wait for it to finish. */
+static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
+{
+ int ret;
+
+ cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ if (ret) {
+ dev_err(pdev->dev,
+ "ERR: unexpected command completion code 0x%x.\n", ret);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_segment *segment;
+ union cdnsp_trb *event;
+ u32 cycle_state;
+ u32 data;
+
+ event = pdev->event_ring->dequeue;
+ segment = pdev->event_ring->deq_seg;
+ cycle_state = pdev->event_ring->cycle_state;
+
+ while (1) {
+ data = le32_to_cpu(event->trans_event.flags);
+
+ /* Check the owner of the TRB. */
+ if ((data & TRB_CYCLE) != cycle_state)
+ break;
+
+ if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
+ TRB_TO_EP_ID(data) == (pep->idx + 1)) {
+ data |= TRB_EVENT_INVALIDATE;
+ event->trans_event.flags = cpu_to_le32(data);
+ }
+
+ if (cdnsp_last_trb_on_seg(segment, event)) {
+ cycle_state ^= 1;
+ segment = pdev->event_ring->deq_seg->next;
+ event = segment->trbs;
+ } else {
+ event++;
+ }
+ }
+}
+
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *event_deq_seg;
+ union cdnsp_trb *cmd_trb;
+ dma_addr_t cmd_deq_dma;
+ union cdnsp_trb *event;
+ u32 cycle_state;
+ int ret, val;
+ u64 cmd_dma;
+ u32 flags;
+
+ cmd_trb = pdev->cmd.command_trb;
+ pdev->cmd.status = 0;
+
+ trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
+ !CMD_RING_BUSY(val), 1,
+ CDNSP_CMD_TIMEOUT);
+ if (ret) {
+ dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
+ trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
+ pdev->cdnsp_state = CDNSP_STATE_DYING;
+ return -ETIMEDOUT;
+ }
+
+ event = pdev->event_ring->dequeue;
+ event_deq_seg = pdev->event_ring->deq_seg;
+ cycle_state = pdev->event_ring->cycle_state;
+
+ cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
+ if (!cmd_deq_dma)
+ return -EINVAL;
+
+ while (1) {
+ flags = le32_to_cpu(event->event_cmd.flags);
+
+ /* Check the owner of the TRB. */
+ if ((flags & TRB_CYCLE) != cycle_state)
+ return -EINVAL;
+
+ cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
+
+ /*
+ * Check whether the completion event is for last queued
+ * command.
+ */
+ if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
+ cmd_dma != (u64)cmd_deq_dma) {
+ if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+ event++;
+ continue;
+ }
+
+ if (cdnsp_last_trb_on_ring(pdev->event_ring,
+ event_deq_seg, event))
+ cycle_state ^= 1;
+
+ event_deq_seg = event_deq_seg->next;
+ event = event_deq_seg->trbs;
+ continue;
+ }
+
+ trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
+
+ pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
+ if (pdev->cmd.status == COMP_SUCCESS)
+ return 0;
+
+ return -pdev->cmd.status;
+ }
+}
+
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ int value)
+{
+ int ret;
+
+ trace_cdnsp_ep_halt(value ? "Set" : "Clear");
+
+ if (value) {
+ ret = cdnsp_cmd_stop_ep(pdev, pep);
+ if (ret)
+ return ret;
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
+ cdnsp_queue_halt_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ }
+
+ pep->ep_state |= EP_HALTED;
+ } else {
+ /*
+ * In device mode driver can call reset endpoint command
+ * from any endpoint state.
+ */
+ cdnsp_queue_reset_ep(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
+
+ if (ret)
+ return ret;
+
+ pep->ep_state &= ~EP_HALTED;
+
+ if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+ pep->ep_state &= ~EP_WEDGE;
+ }
+
+ return 0;
+}
+
+static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ int ret = 0;
+ u32 ep_sts;
+ int i;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ /* Don't issue the command if there's no endpoints to update. */
+ if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
+ return 0;
+
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+ ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
+ __le32 le32 = cpu_to_le32(BIT(i));
+
+ if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
+ (ctrl_ctx->add_flags & le32) || i == 1) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
+ break;
+ }
+ }
+
+ ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+ if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
+ ep_sts == EP_STATE_DISABLED) ||
+ (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
+ ret = cdnsp_configure_endpoint(pdev);
+
+ trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
+ trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
+
+ cdnsp_zero_in_ctx(pdev);
+
+ return ret;
+}
+
+/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * cdnsp_setup_device(), and then re-set up the configuration.
+ */
+int cdnsp_reset_device(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ int slot_state;
+ int ret, i;
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ slot_ctx->dev_info = 0;
+ pdev->device_address = 0;
+
+ /* If device is not setup, there is no point in resetting it. */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+ trace_cdnsp_reset_device(slot_ctx);
+
+ if (slot_state <= SLOT_STATE_DEFAULT &&
+ pdev->eps[0].ep_state & EP_HALTED) {
+ cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+ }
+
+ /*
+ * During Reset Device command controller shall transition the
+ * endpoint ep0 to the Running State.
+ */
+ pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
+ pdev->eps[0].ep_state |= EP_ENABLED;
+
+ if (slot_state <= SLOT_STATE_DEFAULT)
+ return 0;
+
+ cdnsp_queue_reset_device(pdev);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ /*
+ * After Reset Device command all not default endpoints
+ * are in Disabled state.
+ */
+ for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
+ pdev->eps[i].ep_state |= EP_STOPPED;
+
+ trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
+
+ if (ret)
+ dev_err(pdev->dev, "Reset device failed with error code %d",
+ ret);
+
+ return ret;
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
+ struct cdnsp_ep_ctx *ep_ctx,
+ struct cdnsp_stream_info *stream_info)
+{
+ u32 max_primary_streams;
+
+ /* MaxPStreams is the number of stream context array entries, not the
+ * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+ * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+ */
+ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+ ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+ ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+ | EP_HAS_LSA);
+ ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
+}
+
+/*
+ * The drivers use this function to prepare a bulk endpoints to use streams.
+ *
+ * Don't allow the call to succeed if endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ */
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
+ unsigned int num_stream_ctxs;
+ int ret;
+
+ if (num_streams == 0)
+ return 0;
+
+ if (num_streams > STREAM_NUM_STREAMS)
+ return -EINVAL;
+
+ /*
+ * Add two to the number of streams requested to account for
+ * stream 0 that is reserved for controller usage and one additional
+ * for TASK SET FULL response.
+ */
+ num_streams += 2;
+
+ /* The stream context array size must be a power of two */
+ num_stream_ctxs = roundup_pow_of_two(num_streams);
+
+ trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
+
+ ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
+ if (ret)
+ return ret;
+
+ cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
+
+ pep->ep_state |= EP_HAS_STREAMS;
+ pep->stream_info.td_count = 0;
+ pep->stream_info.first_prime_det = 0;
+
+ /* Subtract 1 for stream 0, which drivers can't use. */
+ return num_streams - 1;
+}
+
+int cdnsp_disable_slot(struct cdnsp_device *pdev)
+{
+ int ret;
+
+ cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ pdev->slot_id = 0;
+ pdev->active_port = NULL;
+
+ trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
+ memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
+
+ return ret;
+}
+
+int cdnsp_enable_slot(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ int slot_state;
+ int ret;
+
+ /* If device is not setup, there is no point in resetting it */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+ if (slot_state != SLOT_STATE_DISABLED)
+ return 0;
+
+ cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ if (ret)
+ goto show_trace;
+
+ pdev->slot_id = 1;
+
+show_trace:
+ trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ return ret;
+}
+
+/*
+ * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
+ * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
+ */
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ int dev_state = 0;
+ int ret;
+
+ if (!pdev->slot_id) {
+ trace_cdnsp_slot_id("incorrect");
+ return -EINVAL;
+ }
+
+ if (!pdev->active_port->port_num)
+ return -EINVAL;
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+ if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
+ trace_cdnsp_slot_already_in_default(slot_ctx);
+ return 0;
+ }
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
+ ret = cdnsp_setup_addressable_priv_dev(pdev);
+ if (ret)
+ return ret;
+ }
+
+ cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
+
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
+ trace_cdnsp_setup_device_slot(slot_ctx);
+
+ cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ /* Zero the input context control for later use. */
+ ctrl_ctx->add_flags = 0;
+ ctrl_ctx->drop_flags = 0;
+
+ return ret;
+}
+
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
+ struct usb_request *req,
+ int enable)
+{
+ if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
+ return;
+
+ trace_cdnsp_lpm(enable);
+
+ if (enable)
+ writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
+ &pdev->active_port->regs->portpmsc);
+ else
+ writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_get_frame(struct cdnsp_device *pdev)
+{
+ return readl(&pdev->run_regs->microframe_index) >> 3;
+}
+
+static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ u32 added_ctxs;
+ int ret;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+ !desc->wMaxPacketSize)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
+ "%s is already enabled\n", pep->name))
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ added_ctxs = cdnsp_get_endpoint_flag(desc);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+ if (pdev->gadget.speed == USB_SPEED_FULL) {
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
+ pep->interval = desc->bInterval << 3;
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
+ pep->interval = BIT(desc->bInterval - 1) << 3;
+ }
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
+ if (pep->interval > BIT(12)) {
+ dev_err(pdev->dev, "bInterval %d not supported\n",
+ desc->bInterval);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+ }
+
+ ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
+ if (ret)
+ goto unlock;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+ ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
+ ctrl_ctx->drop_flags = 0;
+
+ ret = cdnsp_update_eps_configuration(pdev, pep);
+ if (ret) {
+ cdnsp_free_endpoint_rings(pdev, pep);
+ goto unlock;
+ }
+
+ pep->ep_state |= EP_ENABLED;
+ pep->ep_state &= ~EP_STOPPED;
+
+unlock:
+ trace_cdnsp_ep_enable_end(pep, 0);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_request *preq;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ u32 drop_flag;
+ int ret = 0;
+
+ if (!ep)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (!(pep->ep_state & EP_ENABLED)) {
+ dev_err(pdev->dev, "%s is already disabled\n", pep->name);
+ ret = -EINVAL;
+ goto finish;
+ }
+
+ cdnsp_cmd_stop_ep(pdev, pep);
+ pep->ep_state |= EP_DIS_IN_RROGRESS;
+ cdnsp_cmd_flush_ep(pdev, pep);
+
+ /* Remove all queued USB requests. */
+ while (!list_empty(&pep->pending_list)) {
+ preq = next_request(&pep->pending_list);
+ cdnsp_ep_dequeue(pep, preq);
+ }
+
+ cdnsp_invalidate_ep_events(pdev, pep);
+
+ pep->ep_state &= ~EP_DIS_IN_RROGRESS;
+ drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+ ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
+ ctrl_ctx->add_flags = 0;
+
+ cdnsp_endpoint_zero(pdev, pep);
+
+ ret = cdnsp_update_eps_configuration(pdev, pep);
+ cdnsp_free_endpoint_rings(pdev, pep);
+
+ pep->ep_state &= ~EP_ENABLED;
+ pep->ep_state |= EP_STOPPED;
+
+finish:
+ trace_cdnsp_ep_disable_end(pep, 0);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_request *preq;
+
+ preq = kzalloc(sizeof(*preq), gfp_flags);
+ if (!preq)
+ return NULL;
+
+ preq->epnum = pep->number;
+ preq->pep = pep;
+
+ trace_cdnsp_alloc_request(preq);
+
+ return &preq->request;
+}
+
+static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdnsp_request *preq = to_cdnsp_request(request);
+
+ trace_cdnsp_free_request(preq);
+ kfree(preq);
+}
+
+static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct cdnsp_request *preq;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ int ret;
+
+ if (!request || !ep)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ if (!(pep->ep_state & EP_ENABLED)) {
+ dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
+ pep->name);
+ return -EINVAL;
+ }
+
+ preq = to_cdnsp_request(request);
+ spin_lock_irqsave(&pdev->lock, flags);
+ ret = cdnsp_ep_enqueue(pep, preq);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ unsigned long flags;
+ int ret;
+
+ if (!pep->endpoint.desc) {
+ dev_err(pdev->dev,
+ "%s: can't dequeue to disabled endpoint\n",
+ pep->name);
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ struct cdnsp_request *preq;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ preq = next_request(&pep->pending_list);
+ if (value) {
+ if (preq) {
+ trace_cdnsp_ep_busy_try_halt_again(pep, 0);
+ ret = -EAGAIN;
+ goto done;
+ }
+ }
+
+ ret = cdnsp_halt_endpoint(pdev, pep, value);
+
+done:
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return ret;
+}
+
+static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pep->ep_state |= EP_WEDGE;
+ ret = cdnsp_halt_endpoint(pdev, pep, 1);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
+ .enable = cdnsp_gadget_ep_enable,
+ .disable = cdnsp_gadget_ep_disable,
+ .alloc_request = cdnsp_gadget_ep_alloc_request,
+ .free_request = cdnsp_gadget_ep_free_request,
+ .queue = cdnsp_gadget_ep_queue,
+ .dequeue = cdnsp_gadget_ep_dequeue,
+ .set_halt = cdnsp_gadget_ep_set_halt,
+ .set_wedge = cdnsp_gadget_ep_set_wedge,
+};
+
+static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
+ .enable = cdnsp_gadget_ep_enable,
+ .disable = cdnsp_gadget_ep_disable,
+ .alloc_request = cdnsp_gadget_ep_alloc_request,
+ .free_request = cdnsp_gadget_ep_free_request,
+ .queue = cdnsp_gadget_ep_queue,
+ .dequeue = cdnsp_gadget_ep_dequeue,
+ .set_halt = cdnsp_gadget_ep_set_halt,
+ .set_wedge = cdnsp_gadget_ep_set_wedge,
+};
+
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
+ struct cdnsp_request *preq,
+ int status)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+
+ list_del(&preq->list);
+
+ if (preq->request.status == -EINPROGRESS)
+ preq->request.status = status;
+
+ usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+ preq->direction);
+
+ trace_cdnsp_request_giveback(preq);
+
+ if (preq != &pdev->ep0_preq) {
+ spin_unlock(&pdev->lock);
+ usb_gadget_giveback_request(&pep->endpoint, &preq->request);
+ spin_lock(&pdev->lock);
+ }
+}
+
+static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+};
+
+static int cdnsp_run(struct cdnsp_device *pdev,
+ enum usb_device_speed speed)
+{
+ u32 fs_speed = 0;
+ u64 temp_64;
+ u32 temp;
+ int ret;
+
+ temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+ temp_64 &= ~ERST_PTR_MASK;
+ temp = readl(&pdev->ir_set->irq_control);
+ temp &= ~IMOD_INTERVAL_MASK;
+ temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
+ writel(temp, &pdev->ir_set->irq_control);
+
+ temp = readl(&pdev->port3x_regs->mode_addr);
+
+ switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
+ temp |= CFG_3XPORT_SSP_SUPPORT;
+ break;
+ case USB_SPEED_SUPER:
+ temp &= ~CFG_3XPORT_SSP_SUPPORT;
+ break;
+ case USB_SPEED_HIGH:
+ break;
+ case USB_SPEED_FULL:
+ fs_speed = PORT_REG6_FORCE_FS;
+ break;
+ default:
+ dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
+ speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ /* Default to superspeed. */
+ speed = USB_SPEED_SUPER;
+ break;
+ }
+
+ if (speed >= USB_SPEED_SUPER) {
+ writel(temp, &pdev->port3x_regs->mode_addr);
+ cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
+ XDEV_RXDETECT);
+ } else {
+ cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+ }
+
+ cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
+ XDEV_RXDETECT);
+
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+ writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
+
+ ret = cdnsp_start(pdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ temp = readl(&pdev->op_regs->command);
+ temp |= (CMD_INTE);
+ writel(temp, &pdev->op_regs->command);
+
+ temp = readl(&pdev->ir_set->irq_pending);
+ writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
+
+ trace_cdnsp_init("Controller ready to work");
+ return 0;
+err:
+ cdnsp_halt(pdev);
+ return ret;
+}
+
+static int cdnsp_gadget_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ enum usb_device_speed max_speed = driver->max_speed;
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pdev->gadget_driver = driver;
+
+ /* limit speed if necessary */
+ max_speed = min(driver->max_speed, g->max_speed);
+ ret = cdnsp_run(pdev, max_speed);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+ union cdnsp_trb *event_ring_deq,
+ u8 clear_ehb)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != pdev->event_ring->dequeue) {
+ deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue);
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C). */
+ if (clear_ehb)
+ temp_64 |= ERST_EHB;
+ else
+ temp_64 &= ~ERST_EHB;
+
+ cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *seg;
+ u64 val_64;
+ int i;
+
+ cdnsp_initialize_ring_info(pdev->cmd_ring);
+
+ seg = pdev->cmd_ring->first_seg;
+ for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
+ memset(seg->trbs, 0,
+ sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
+ seg = seg->next;
+ }
+
+ /* Set the address in the Command Ring Control register. */
+ val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+ (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+ pdev->cmd_ring->cycle_state;
+ cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+}
+
+static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *event_deq_seg;
+ union cdnsp_trb *event_ring_deq;
+ union cdnsp_trb *event;
+ u32 cycle_bit;
+
+ event_ring_deq = pdev->event_ring->dequeue;
+ event_deq_seg = pdev->event_ring->deq_seg;
+ event = pdev->event_ring->dequeue;
+
+ /* Update ring dequeue pointer. */
+ while (1) {
+ cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
+
+ /* Does the controller or driver own the TRB? */
+ if (cycle_bit != pdev->event_ring->cycle_state)
+ break;
+
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+ event++;
+ continue;
+ }
+
+ if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
+ event))
+ cycle_bit ^= 1;
+
+ event_deq_seg = event_deq_seg->next;
+ event = event_deq_seg->trbs;
+ }
+
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+}
+
+static void cdnsp_stop(struct cdnsp_device *pdev)
+{
+ u32 temp;
+
+ cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
+
+ /* Remove internally queued request for ep0. */
+ if (!list_empty(&pdev->eps[0].pending_list)) {
+ struct cdnsp_request *req;
+
+ req = next_request(&pdev->eps[0].pending_list);
+ if (req == &pdev->ep0_preq)
+ cdnsp_ep_dequeue(&pdev->eps[0], req);
+ }
+
+ cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
+ cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+ cdnsp_disable_slot(pdev);
+ cdnsp_halt(pdev);
+
+ temp = readl(&pdev->op_regs->status);
+ writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
+ temp = readl(&pdev->ir_set->irq_pending);
+ writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
+
+ cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
+ cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
+
+ /* Clear interrupt line */
+ temp = readl(&pdev->ir_set->irq_pending);
+ temp |= IMAN_IP;
+ writel(temp, &pdev->ir_set->irq_pending);
+
+ cdnsp_consume_all_events(pdev);
+ cdnsp_clear_cmd_ring(pdev);
+
+ trace_cdnsp_exit("Controller stopped.");
+}
+
+/*
+ * Stop controller.
+ * This function is called by the gadget core when the driver is removed.
+ * Disable slot, disable IRQs, and quiesce the controller.
+ */
+static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdnsp_stop(pdev);
+ pdev->gadget_driver = NULL;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_get_frame(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+
+ return cdnsp_get_frame(pdev);
+}
+
+static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+ u32 portpm, portsc;
+
+ port_regs = pdev->active_port->regs;
+ portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
+
+ /* Remote wakeup feature is not enabled by host. */
+ if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
+ portpm = readl(&port_regs->portpmsc);
+
+ if (!(portpm & PORT_RWE))
+ return;
+ }
+
+ if (portsc == XDEV_U3 && !pdev->may_wakeup)
+ return;
+
+ cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
+
+ pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
+}
+
+static int cdnsp_gadget_wakeup(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ __cdnsp_gadget_wakeup(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
+ int is_selfpowered)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ g->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
+ struct cdns *cdns = dev_get_drvdata(pdev->dev);
+
+ trace_cdnsp_pullup(is_on);
+
+ if (!is_on) {
+ cdnsp_reset_device(pdev);
+ cdns_clear_vbus(cdns);
+ } else {
+ cdns_set_vbus(cdns);
+ }
+ return 0;
+}
+
+static const struct usb_gadget_ops cdnsp_gadget_ops = {
+ .get_frame = cdnsp_gadget_get_frame,
+ .wakeup = cdnsp_gadget_wakeup,
+ .set_selfpowered = cdnsp_gadget_set_selfpowered,
+ .pullup = cdnsp_gadget_pullup,
+ .udc_start = cdnsp_gadget_udc_start,
+ .udc_stop = cdnsp_gadget_udc_stop,
+};
+
+static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ void __iomem *reg = &pdev->cap_regs->hc_capbase;
+ int endpoints;
+
+ reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
+
+ if (!pep->direction) {
+ pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
+ pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
+ pep->buffering = (pep->buffering + 1) / 2;
+ pep->buffering_period = (pep->buffering_period + 1) / 2;
+ return;
+ }
+
+ endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
+
+ /* Set to XBUF_TX_TAG_MASK_0 register. */
+ reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
+ /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
+ reg += pep->number * sizeof(u32) * 2;
+
+ pep->buffering = (readl(reg) + 1) / 2;
+ pep->buffering_period = pep->buffering;
+}
+
+static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
+{
+ int max_streams = HCC_MAX_PSA(pdev->hcc_params);
+ struct cdnsp_ep *pep;
+ int i;
+
+ INIT_LIST_HEAD(&pdev->gadget.ep_list);
+
+ if (max_streams < STREAM_LOG_STREAMS) {
+ dev_err(pdev->dev, "Stream size %d not supported\n",
+ max_streams);
+ return -EINVAL;
+ }
+
+ max_streams = STREAM_LOG_STREAMS;
+
+ for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+ bool direction = !(i & 1); /* Start from OUT endpoint. */
+ u8 epnum = ((i + 1) >> 1);
+
+ if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
+ continue;
+
+ pep = &pdev->eps[i];
+ pep->pdev = pdev;
+ pep->number = epnum;
+ pep->direction = direction; /* 0 for OUT, 1 for IN. */
+
+ /*
+ * Ep0 is bidirectional, so ep0in and ep0out are represented by
+ * pdev->eps[0]
+ */
+ if (epnum == 0) {
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, "BiDir");
+
+ pep->idx = 0;
+ usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
+ pep->endpoint.maxburst = 1;
+ pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
+ pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
+ pep->endpoint.comp_desc = NULL;
+ pep->endpoint.caps.type_control = true;
+ pep->endpoint.caps.dir_in = true;
+ pep->endpoint.caps.dir_out = true;
+
+ pdev->ep0_preq.epnum = pep->number;
+ pdev->ep0_preq.pep = pep;
+ pdev->gadget.ep0 = &pep->endpoint;
+ } else {
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, (pep->direction) ? "in" : "out");
+
+ pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1;
+ usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
+
+ pep->endpoint.max_streams = max_streams;
+ pep->endpoint.ops = &cdnsp_gadget_ep_ops;
+ list_add_tail(&pep->endpoint.ep_list,
+ &pdev->gadget.ep_list);
+
+ pep->endpoint.caps.type_iso = true;
+ pep->endpoint.caps.type_bulk = true;
+ pep->endpoint.caps.type_int = true;
+
+ pep->endpoint.caps.dir_in = direction;
+ pep->endpoint.caps.dir_out = !direction;
+ }
+
+ pep->endpoint.name = pep->name;
+ pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
+ pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
+ cdnsp_get_ep_buffering(pdev, pep);
+
+ dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
+ "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
+ "SupDir IN: %s, OUT: %s\n",
+ pep->name, 1024,
+ (pep->endpoint.caps.type_control) ? "yes" : "no",
+ (pep->endpoint.caps.type_int) ? "yes" : "no",
+ (pep->endpoint.caps.type_bulk) ? "yes" : "no",
+ (pep->endpoint.caps.type_iso) ? "yes" : "no",
+ (pep->endpoint.caps.dir_in) ? "yes" : "no",
+ (pep->endpoint.caps.dir_out) ? "yes" : "no");
+
+ INIT_LIST_HEAD(&pep->pending_list);
+ }
+
+ return 0;
+}
+
+static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
+{
+ struct cdnsp_ep *pep;
+ int i;
+
+ for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+ pep = &pdev->eps[i];
+ if (pep->number != 0 && pep->out_ctx)
+ list_del(&pep->endpoint.ep_list);
+ }
+}
+
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
+{
+ pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
+
+ if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->disconnect(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
+
+ pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
+}
+
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
+{
+ if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->suspend(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+}
+
+void cdnsp_resume_gadget(struct cdnsp_device *pdev)
+{
+ if (pdev->gadget_driver && pdev->gadget_driver->resume) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->resume(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+}
+
+void cdnsp_irq_reset(struct cdnsp_device *pdev)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+
+ cdnsp_reset_device(pdev);
+
+ port_regs = pdev->active_port->regs;
+ pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
+
+ spin_unlock(&pdev->lock);
+ usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
+ spin_lock(&pdev->lock);
+
+ switch (pdev->gadget.speed) {
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ pdev->gadget.ep0->maxpacket = 512;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ pdev->gadget.ep0->maxpacket = 64;
+ break;
+ default:
+ /* Low speed is not supported. */
+ dev_err(pdev->dev, "Unknown device speed\n");
+ break;
+ }
+
+ cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+ cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+}
+
+static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
+{
+ void __iomem *reg = &pdev->cap_regs->hc_capbase;
+
+ reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
+ pdev->rev_cap = reg;
+
+ dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
+ readl(&pdev->rev_cap->ctrl_revision),
+ readl(&pdev->rev_cap->rtl_revision),
+ readl(&pdev->rev_cap->ep_supported),
+ readl(&pdev->rev_cap->rx_buff_size),
+ readl(&pdev->rev_cap->tx_buff_size));
+}
+
+static int cdnsp_gen_setup(struct cdnsp_device *pdev)
+{
+ int ret;
+ u32 reg;
+
+ pdev->cap_regs = pdev->regs;
+ pdev->op_regs = pdev->regs +
+ HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
+ pdev->run_regs = pdev->regs +
+ (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
+
+ /* Cache read-only capability registers */
+ pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
+ pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
+ pdev->hci_version = HC_VERSION(pdev->hcc_params);
+ pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+
+ cdnsp_get_rev_cap(pdev);
+
+ /* Make sure the Device Controller is halted. */
+ ret = cdnsp_halt(pdev);
+ if (ret)
+ return ret;
+
+ /* Reset the internal controller memory state and registers. */
+ ret = cdnsp_reset(pdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Set dma_mask and coherent_dma_mask to 64-bits,
+ * if controller supports 64-bit addressing.
+ */
+ if (HCC_64BIT_ADDR(pdev->hcc_params) &&
+ !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
+ dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
+ } else {
+ /*
+ * This is to avoid error in cases where a 32-bit USB
+ * controller is used on a 64-bit capable system.
+ */
+ ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
+ dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
+ }
+
+ spin_lock_init(&pdev->lock);
+
+ ret = cdnsp_mem_init(pdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Software workaround for U1: after transition
+ * to U1 the controller starts gating clock, and in some cases,
+ * it causes that controller stack.
+ */
+ reg = readl(&pdev->port3x_regs->mode_2);
+ reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
+ writel(reg, &pdev->port3x_regs->mode_2);
+
+ return 0;
+}
+
+static int __cdnsp_gadget_init(struct cdns *cdns)
+{
+ struct cdnsp_device *pdev;
+ u32 max_speed;
+ int ret = -ENOMEM;
+
+ cdns_drd_gadget_on(cdns);
+
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(cdns->dev);
+
+ cdns->gadget_dev = pdev;
+ pdev->dev = cdns->dev;
+ pdev->regs = cdns->dev_regs;
+ max_speed = usb_get_maximum_speed(cdns->dev);
+
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ /* Default to SSP */
+ max_speed = USB_SPEED_SUPER_PLUS;
+ break;
+ }
+
+ pdev->gadget.ops = &cdnsp_gadget_ops;
+ pdev->gadget.name = "cdnsp-gadget";
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+ pdev->gadget.sg_supported = 1;
+ pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+ pdev->gadget.lpm_capable = 1;
+
+ pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
+ if (!pdev->setup_buf)
+ goto free_pdev;
+
+ /*
+ * Controller supports not aligned buffer but it should improve
+ * performance.
+ */
+ pdev->gadget.quirk_ep_out_aligned_size = true;
+
+ ret = cdnsp_gen_setup(pdev);
+ if (ret) {
+ dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
+ goto free_setup;
+ }
+
+ ret = cdnsp_gadget_init_endpoints(pdev);
+ if (ret) {
+ dev_err(pdev->dev, "failed to initialize endpoints\n");
+ goto halt_pdev;
+ }
+
+ ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
+ if (ret) {
+ dev_err(pdev->dev, "failed to register udc\n");
+ goto free_endpoints;
+ }
+
+ ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
+ cdnsp_irq_handler,
+ cdnsp_thread_irq_handler, IRQF_SHARED,
+ dev_name(pdev->dev), pdev);
+ if (ret)
+ goto del_gadget;
+
+ return 0;
+
+del_gadget:
+ usb_del_gadget_udc(&pdev->gadget);
+free_endpoints:
+ cdnsp_gadget_free_endpoints(pdev);
+halt_pdev:
+ cdnsp_halt(pdev);
+ cdnsp_reset(pdev);
+ cdnsp_mem_cleanup(pdev);
+free_setup:
+ kfree(pdev->setup_buf);
+free_pdev:
+ kfree(pdev);
+
+ return ret;
+}
+
+static void cdnsp_gadget_exit(struct cdns *cdns)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+
+ devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
+ pm_runtime_mark_last_busy(cdns->dev);
+ pm_runtime_put_autosuspend(cdns->dev);
+ usb_del_gadget_udc(&pdev->gadget);
+ cdnsp_gadget_free_endpoints(pdev);
+ cdnsp_mem_cleanup(pdev);
+ kfree(pdev);
+ cdns->gadget_dev = NULL;
+ cdns_drd_gadget_off(cdns);
+}
+
+static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+ unsigned long flags;
+
+ if (pdev->link_state == XDEV_U3)
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdnsp_disconnect_gadget(pdev);
+ cdnsp_stop(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+ enum usb_device_speed max_speed;
+ unsigned long flags;
+ int ret;
+
+ if (!pdev->gadget_driver)
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ max_speed = pdev->gadget_driver->max_speed;
+
+ /* Limit speed if necessary. */
+ max_speed = min(max_speed, pdev->gadget.max_speed);
+
+ ret = cdnsp_run(pdev, max_speed);
+
+ if (pdev->link_state == XDEV_U3)
+ __cdnsp_gadget_wakeup(pdev);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdnsp_gadget_init - initialize device structure
+ * @cdns: cdnsp instance
+ *
+ * This function initializes the gadget.
+ */
+int cdnsp_gadget_init(struct cdns *cdns)
+{
+ struct cdns_role_driver *rdrv;
+
+ rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = __cdnsp_gadget_init;
+ rdrv->stop = cdnsp_gadget_exit;
+ rdrv->suspend = cdnsp_gadget_suspend;
+ rdrv->resume = cdnsp_gadget_resume;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
+ rdrv->name = "gadget";
+ cdns->roles[USB_ROLE_DEVICE] = rdrv;
+
+ return 0;
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
new file mode 100644
index 000000000000..6bbb26548c04
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -0,0 +1,1601 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+#ifndef __LINUX_CDNSP_GADGET_H
+#define __LINUX_CDNSP_GADGET_H
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/usb/gadget.h>
+#include <linux/irq.h>
+
+/* Max number slots - only 1 is allowed. */
+#define CDNSP_DEV_MAX_SLOTS 1
+
+#define CDNSP_EP0_SETUP_SIZE 512
+
+/* One control and 15 for in and 15 for out endpoints. */
+#define CDNSP_ENDPOINTS_NUM 31
+
+/* Best Effort Service Latency. */
+#define CDNSP_DEFAULT_BESL 0
+
+/* Device Controller command default timeout value in us */
+#define CDNSP_CMD_TIMEOUT (15 * 1000)
+
+/* Up to 16 ms to halt an device controller */
+#define CDNSP_MAX_HALT_USEC (16 * 1000)
+
+#define CDNSP_CTX_SIZE 2112
+
+/*
+ * Controller register interface.
+ */
+
+/**
+ * struct cdnsp_cap_regs - CDNSP Registers.
+ * @hc_capbase: Length of the capabilities register and controller
+ * version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ * @hcc_params2: HCCPARAMS2 Capability Parameters 2,
+ */
+struct cdnsp_cap_regs {
+ __le32 hc_capbase;
+ __le32 hcs_params1;
+ __le32 hcs_params2;
+ __le32 hcs_params3;
+ __le32 hcc_params;
+ __le32 db_off;
+ __le32 run_regs_off;
+ __le32 hcc_params2;
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks. */
+/* bits 7:0 - how long is the Capabilities register. */
+#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0))
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1))
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Endpoints */
+#define HCS_ENDPOINTS_MASK GENMASK(7, 0)
+#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0)
+
+/* HCCPARAMS offset from PCI base address */
+#define HCC_PARAMS_OFFSET 0x10
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* 1: device controller can use 64-bit address pointers. */
+#define HCC_64BIT_ADDR(p) ((p) & BIT(0))
+/* 1: device controller uses 64-byte Device Context structures. */
+#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */
+#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1)
+/* Extended Capabilities pointer from PCI base. */
+#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved. */
+#define DBOFF_MASK GENMASK(31, 2)
+
+/* run_regs_off bitmask - bits 0:4 reserved. */
+#define RTSOFF_MASK GENMASK(31, 5)
+
+/**
+ * struct cdnsp_op_regs - Device Controller Operational Registers.
+ * @command: USBCMD - Controller command register.
+ * @status: USBSTS - Controller status register.
+ * @page_size: This indicates the page size that the device controller supports.
+ * If bit n is set, the controller supports a page size of 2^(n+12),
+ * up to a 128MB page size. 4K is the minimum page size.
+ * @dnctrl: DNCTRL - Device notification control register.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer.
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer.
+ * @config_reg: CONFIG - Configure Register
+ * @port_reg_base: PORTSCn - base address for Port Status and Control
+ * Each port has a Port Status and Control register,
+ * followed by a Port Power Management Status and Control
+ * register, a Port Link Info register, and a reserved
+ * register.
+ */
+struct cdnsp_op_regs {
+ __le32 command;
+ __le32 status;
+ __le32 page_size;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 dnctrl;
+ __le64 cmd_ring;
+ /* rsvd: offset 0x20-2F. */
+ __le32 reserved3[4];
+ __le64 dcbaa_ptr;
+ __le32 config_reg;
+ /* rsvd: offset 0x3C-3FF. */
+ __le32 reserved4[241];
+ /* port 1 registers, which serve as a base address for other ports. */
+ __le32 port_reg_base;
+};
+
+/* Number of registers per port. */
+#define NUM_PORT_REGS 4
+
+/**
+ * struct cdnsp_port_regs - Port Registers.
+ * @portsc: PORTSC - Port Status and Control Register.
+ * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register.
+ * @portli: PORTLI - Port Link Info register.
+ */
+struct cdnsp_port_regs {
+ __le32 portsc;
+ __le32 portpmsc;
+ __le32 portli;
+ __le32 reserved;
+};
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0 (connect status) and 10:13 (port speed).
+ * These bits are also sticky - meaning they're in the AUX well and they aren't
+ * changed by a hot and warm.
+ */
+#define CDNSP_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK)
+
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8 (link state), 25:26 ("wake on" enable state)
+ */
+#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E)
+
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1 (port enable/disable), 17 ( connect changed),
+ * 21 (port reset changed) , 22 (Port Link State Change),
+ */
+#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC)
+
+/* USBCMD - USB command - bitmasks. */
+/* Run/Stop, controller execution - do not write unless controller is halted.*/
+#define CMD_R_S BIT(0)
+/*
+ * Reset device controller - resets internal controller state machine and all
+ * registers (except PCI config regs).
+ */
+#define CMD_RESET BIT(1)
+/* Event Interrupt Enable - a '1' allows interrupts from the controller. */
+#define CMD_INTE BIT(2)
+/*
+ * Device System Error Interrupt Enable - get out-of-band signal for
+ * controller errors.
+ */
+#define CMD_DSEIE BIT(3)
+/* device controller save/restore state. */
+#define CMD_CSS BIT(8)
+#define CMD_CRS BIT(9)
+/*
+ * Enable Wrap Event - '1' means device controller generates an event
+ * when MFINDEX wraps.
+ */
+#define CMD_EWE BIT(10)
+/* 1: device enabled */
+#define CMD_DEVEN BIT(17)
+/* bits 18:31 are reserved (and should be preserved on writes). */
+
+/* Command register values to disable interrupts. */
+#define CDNSP_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE)
+
+/* USBSTS - USB status - bitmasks */
+/* controller not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT BIT(0)
+/*
+ * serious error, e.g. PCI parity error. The controller will clear
+ * the run/stop bit.
+ */
+#define STS_FATAL BIT(2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set.*/
+#define STS_EINT BIT(3)
+/* port change detect */
+#define STS_PCD BIT(4)
+/* save state status - '1' means device controller is saving state. */
+#define STS_SSS BIT(8)
+/* restore state status - '1' means controllers is restoring state. */
+#define STS_RSS BIT(9)
+/* 1: save or restore error */
+#define STS_SRE BIT(10)
+/* 1: device Not Ready to accept doorbell or op reg writes after reset. */
+#define STS_CNR BIT(11)
+/* 1: internal Device Controller Error.*/
+#define STS_HCE BIT(12)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */
+/* bit 0 is the command ring cycle state. */
+#define CMD_RING_CS BIT(0)
+/* stop ring immediately - abort the currently executing command. */
+#define CMD_RING_ABORT BIT(2)
+/*
+ * Command Ring Busy.
+ * Set when Doorbell register is written with DB for command and cleared when
+ * the controller reached end of CR.
+ */
+#define CMD_RING_BUSY(p) ((p) & BIT(4))
+/* 1: command ring is running */
+#define CMD_RING_RUNNING BIT(3)
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_RSVD_BITS GENMASK(5, 0)
+
+/* CONFIG - Configure Register - config_reg bitmasks. */
+/* bits 0:7 - maximum number of device slots enabled. */
+#define MAX_DEVS GENMASK(7, 0)
+/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */
+#define CONFIG_U3E BIT(8)
+
+/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */
+/* 1: device connected. */
+#define PORT_CONNECT BIT(0)
+/* 1: port enabled. */
+#define PORT_PED BIT(1)
+/* 1: port reset signaling asserted. */
+#define PORT_RESET BIT(4)
+/*
+ * Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe sets the link state.
+ */
+#define PORT_PLS_MASK GENMASK(8, 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+/* 1: port has power. */
+#define PORT_POWER BIT(9)
+/*
+ * bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - Reserved (Low Speed not supported
+ * 3 - high speed
+ * 4 - super speed
+ * 5 - super speed
+ * 6-15 reserved
+ */
+#define DEV_SPEED_MASK GENMASK(13, 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE BIT(16)
+/* 1: connect status change */
+#define PORT_CSC BIT(17)
+/* 1: warm reset for a USB 3.0 device is done. */
+#define PORT_WRC BIT(19)
+/* 1: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC BIT(21)
+/*
+ * port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ----------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC BIT(22)
+/* Port configure error change - port failed to configure its link partner. */
+#define PORT_CEC BIT(23)
+/* Wake on connect (enable). */
+#define PORT_WKCONN_E BIT(25)
+/* Wake on disconnect (enable). */
+#define PORT_WKDISC_E BIT(26)
+/* Indicates if Warm Reset is being received. */
+#define PORT_WR BIT(31)
+
+#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC)
+
+/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */
+/* Enables U1 entry. */
+#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0)
+#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK)
+/* Enables U2 entry .*/
+#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8)
+#define PORT_U2_TIMEOUT(p) (((p) << 8) & PORT_U2_TIMEOUT_MASK)
+
+/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */
+#define PORT_L1S_MASK GENMASK(2, 0)
+#define PORT_L1S(p) ((p) & PORT_L1S_MASK)
+#define PORT_L1S_ACK PORT_L1S(1)
+#define PORT_L1S_NYET PORT_L1S(2)
+#define PORT_L1S_STALL PORT_L1S(3)
+#define PORT_L1S_TIMEOUT PORT_L1S(4)
+/* Remote Wake Enable. */
+#define PORT_RWE BIT(3)
+/* Best Effort Service Latency (BESL). */
+#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4))
+/* Hardware LPM Enable (HLE). */
+#define PORT_HLE BIT(16)
+/* Received Best Effort Service Latency (BESL). */
+#define PORT_RRBESL(p) (((p) & GENMASK(20, 17)) >> 17)
+/* Port Test Control. */
+#define PORT_TEST_MODE_MASK GENMASK(31, 28)
+#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK)
+
+/**
+ * struct cdnsp_intr_reg - Interrupt Register Set.
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ * Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The controller places events on the ring
+ * and "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The driver processes those events and
+ * updates the dequeue pointer.
+ */
+struct cdnsp_intr_reg {
+ __le32 irq_pending;
+ __le32 irq_control;
+ __le32 erst_size;
+ __le32 rsvd;
+ __le64 erst_base;
+ __le64 erst_dequeue;
+};
+
+/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */
+#define IMAN_IE BIT(1)
+#define IMAN_IP BIT(0)
+/* bits 2:31 need to be preserved */
+#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2)
+#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2))
+
+/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
+/*
+ * Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define IMOD_INTERVAL_MASK GENMASK(15, 0)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define IMOD_COUNTER_MASK GENMASK(31, 16)
+#define IMOD_DEFAULT_INTERVAL 0
+
+/* erst_size bitmasks. */
+/* Preserve bits 16:31 of erst_size. */
+#define ERST_SIZE_MASK GENMASK(31, 16)
+
+/* erst_dequeue bitmasks. */
+/*
+ * Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK GENMASK(2, 0)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */
+#define ERST_EHB BIT(3)
+#define ERST_PTR_MASK GENMASK(3, 0)
+
+/**
+ * struct cdnsp_run_regs
+ * @microframe_index: MFINDEX - current microframe number.
+ * @ir_set: Array of Interrupter registers.
+ *
+ * Device Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct cdnsp_run_regs {
+ __le32 microframe_index;
+ __le32 rsvd[7];
+ struct cdnsp_intr_reg ir_set[128];
+};
+
+/**
+ * USB2.0 Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @port_reg1: Timer Configuration Register.
+ * @port_reg2: Timer Configuration Register.
+ * @port_reg3: Timer Configuration Register.
+ * @port_reg4: Timer Configuration Register.
+ * @port_reg5: Timer Configuration Register.
+ * @port_reg6: Chicken bits for USB20PPP.
+ */
+struct cdnsp_20port_cap {
+ __le32 ext_cap;
+ __le32 port_reg1;
+ __le32 port_reg2;
+ __le32 port_reg3;
+ __le32 port_reg4;
+ __le32 port_reg5;
+ __le32 port_reg6;
+};
+
+/* Extended capability register fields */
+#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0))
+#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0))
+/* Extended capability IDs - ID 0 reserved */
+#define EXT_CAPS_PROTOCOL 2
+
+/* USB 2.0 Port Peripheral Configuration Extended Capability */
+#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1
+/*
+ * Setting this bit to '1' enables automatic wakeup from L1 state on transfer
+ * TRB prepared when USBSSP operates in USB2.0 mode.
+ */
+#define PORT_REG6_L1_L0_HW_EN BIT(1)
+/*
+ * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0
+ * mode (disables High Speed).
+ */
+#define PORT_REG6_FORCE_FS BIT(0)
+
+/**
+ * USB3.x Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @mode_addr: Miscellaneous 3xPORT operation mode configuration register.
+ * @mode_2: 3x Port Control Register 2.
+ */
+struct cdnsp_3xport_cap {
+ __le32 ext_cap;
+ __le32 mode_addr;
+ __le32 reserved[52];
+ __le32 mode_2;
+};
+
+/* Extended Capability Header for 3XPort Configuration Registers. */
+#define D_XEC_CFG_3XPORT_CAP 0xC0
+#define CFG_3XPORT_SSP_SUPPORT BIT(31)
+#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0)
+
+/* Revision Extended Capability ID */
+#define RTL_REV_CAP 0xC4
+#define RTL_REV_CAP_RX_BUFF_CMD_SIZE BITMASK(31, 24)
+#define RTL_REV_CAP_RX_BUFF_SIZE BITMASK(15, 0)
+#define RTL_REV_CAP_TX_BUFF_CMD_SIZE BITMASK(31, 24)
+#define RTL_REV_CAP_TX_BUFF_SIZE BITMASK(15, 0)
+
+#define CDNSP_VER_1 0x00000000
+#define CDNSP_VER_2 0x10000000
+
+#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) \
+ (readl(&(pdev)->rev_cap->ep_supported) & \
+ (BIT(ep_num) << ((dir) ? 0 : 16)))
+
+/**
+ * struct cdnsp_rev_cap - controller capabilities.
+ * @ext_cap: Header for RTL Revision Extended Capability.
+ * @rtl_revision: RTL revision.
+ * @rx_buff_size: Rx buffer sizes.
+ * @tx_buff_size: Tx buffer sizes.
+ * @ep_supported: Supported endpoints.
+ * @ctrl_revision: Controller revision ID.
+ */
+struct cdnsp_rev_cap {
+ __le32 ext_cap;
+ __le32 rtl_revision;
+ __le32 rx_buff_size;
+ __le32 tx_buff_size;
+ __le32 ep_supported;
+ __le32 ctrl_revision;
+};
+
+/* USB2.0 Port Peripheral Configuration Registers. */
+#define D_XEC_PRE_REGS_CAP 0xC8
+#define REG_CHICKEN_BITS_2_OFFSET 0x48
+#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
+
+/* XBUF Extended Capability ID. */
+#define XBUF_CAP_ID 0xCB
+#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
+#define XBUF_RX_TAG_MASK_1_OFFSET 0x24
+#define XBUF_TX_CMD_OFFSET 0x2C
+
+/**
+ * struct cdnsp_doorbell_array.
+ * @cmd_db: Command ring doorbell register.
+ * @ep_db: Endpoint ring doorbell register.
+ * Bits 0 - 7: Endpoint target.
+ * Bits 8 - 15: RsvdZ.
+ * Bits 16 - 31: Stream ID.
+ */
+struct cdnsp_doorbell_array {
+ __le32 cmd_db;
+ __le32 ep_db;
+};
+
+#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff)
+#define DB_VALUE_CMD 0x00000000
+
+/**
+ * struct cdnsp_container_ctx.
+ * @type: Type of context. Used to calculated offsets to contained contexts.
+ * @size: Size of the context data.
+ * @ctx_size: context data structure size - 64 or 32 bits.
+ * @dma: dma address of the bytes.
+ * @bytes: The raw context data given to HW.
+ *
+ * Represents either a Device or Input context. Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct cdnsp_container_ctx {
+ unsigned int type;
+#define CDNSP_CTX_TYPE_DEVICE 0x1
+#define CDNSP_CTX_TYPE_INPUT 0x2
+ int size;
+ int ctx_size;
+ dma_addr_t dma;
+ u8 *bytes;
+};
+
+/**
+ * struct cdnsp_slot_ctx
+ * @dev_info: Device speed, and last valid endpoint.
+ * @dev_port: Device port number that is needed to access the USB device.
+ * @int_target: Interrupter target number.
+ * @dev_state: Slot state and device address.
+ *
+ * Slot Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the slot context for controller internal use.
+ */
+struct cdnsp_slot_ctx {
+ __le32 dev_info;
+ __le32 dev_port;
+ __le32 int_target;
+ __le32 dev_state;
+ /* offset 0x10 to 0x1f reserved for controller internal use. */
+ __le32 reserved[4];
+};
+
+/* Bits 20:23 in the Slot Context are the speed for the device. */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+
+/* dev_info bitmasks. */
+/* Device speed - values defined by PORTSC Device Speed field - 20:23. */
+#define DEV_SPEED GENMASK(23, 20)
+#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
+/* Index of the last valid endpoint context in this device context - 27:31. */
+#define LAST_CTX_MASK ((unsigned int)GENMASK(31, 27))
+#define LAST_CTX(p) ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
+#define SLOT_FLAG BIT(0)
+#define EP0_FLAG BIT(1)
+
+/* dev_port bitmasks */
+/* Device port number that is needed to access the USB device. */
+#define DEV_PORT(p) (((p) & 0xff) << 16)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the controller. */
+#define DEV_ADDR_MASK GENMASK(7, 0)
+/* Slot state */
+#define SLOT_STATE GENMASK(31, 27)
+#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27)
+
+#define SLOT_STATE_DISABLED 0
+#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT 1
+#define SLOT_STATE_ADDRESSED 2
+#define SLOT_STATE_CONFIGURED 3
+
+/**
+ * struct cdnsp_ep_ctx.
+ * @ep_info: Endpoint state, streams, mult, and interval information.
+ * @ep_info2: Information on endpoint type, max packet size, max burst size,
+ * error count, and whether the controller will force an event for
+ * all transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ * defines one stream, this points to the endpoint transfer ring.
+ * Otherwise, it points to a stream context array, which has a
+ * ring pointer for each flow.
+ * @tx_info: Average TRB lengths for the endpoint ring and
+ * max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the endpoint context for controller internal
+ * use.
+ */
+struct cdnsp_ep_ctx {
+ __le32 ep_info;
+ __le32 ep_info2;
+ __le64 deq;
+ __le32 tx_info;
+ /* offset 0x14 - 0x1f reserved for controller internal use. */
+ __le32 reserved[3];
+};
+
+/* ep_info bitmasks. */
+/*
+ * Endpoint State - bits 0:2:
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK GENMASK(3, 0)
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
+/* Mult - Max number of burst within an interval, in EP companion desc. */
+#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8))
+#define CTX_TO_EP_MULT(p) (((p) & GENMASK(9, 8)) >> 8)
+/* bits 10:14 are Max Primary Streams. */
+/* bit 15 is Linear Stream Array. */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16))
+#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) & GENMASK(23, 16)) >> 16))
+#define CTX_TO_EP_INTERVAL(p) (((p) & GENMASK(23, 16)) >> 16)
+#define EP_MAXPSTREAMS_MASK GENMASK(14, 10)
+#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
+#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA BIT(15)
+
+/* ep_info2 bitmasks */
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
+#define EP_TYPE(p) ((p) << 3)
+#define ISOC_OUT_EP 1
+#define BULK_OUT_EP 2
+#define INT_OUT_EP 3
+#define CTRL_EP 4
+#define ISOC_IN_EP 5
+#define BULK_IN_EP 6
+#define INT_IN_EP 7
+/* bit 6 reserved. */
+/* bit 7 is Device Initiate Disable - for disabling stream selection. */
+#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8))
+#define CTX_TO_MAX_BURST(p) (((p) & GENMASK(15, 8)) >> 8)
+#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16))
+#define MAX_PACKET_MASK GENMASK(31, 16)
+#define MAX_PACKET_DECODED(p) (((p) & GENMASK(31, 16)) >> 16)
+
+/* tx_info bitmasks. */
+#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0))
+#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16))
+#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24)
+#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p) (((p) & GENMASK(31, 16)) >> 16)
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) & GENMASK(31, 24)) >> 24)
+
+/* deq bitmasks. */
+#define EP_CTX_CYCLE_MASK BIT(0)
+#define CTX_DEQ_MASK (~0xfL)
+
+/**
+ * struct cdnsp_input_control_context
+ * Input control context;
+ *
+ * @drop_context: Set the bit of the endpoint context you want to disable.
+ * @add_context: Set the bit of the endpoint context you want to enable.
+ */
+struct cdnsp_input_control_ctx {
+ __le32 drop_flags;
+ __le32 add_flags;
+ __le32 rsvd2[6];
+};
+
+/**
+ * Represents everything that is needed to issue a command on the command ring.
+ *
+ * @in_ctx: Pointer to input context structure.
+ * @status: Command Completion Code for last command.
+ * @command_trb: Pointer to command TRB.
+ */
+struct cdnsp_command {
+ /* Input context for changing device state. */
+ struct cdnsp_container_ctx *in_ctx;
+ u32 status;
+ union cdnsp_trb *command_trb;
+};
+
+/**
+ * Stream context structure.
+ *
+ * @stream_ring: 64-bit stream ring address, cycle state, and stream type.
+ * @reserved: offset 0x14 - 0x1f reserved for controller internal use.
+ */
+struct cdnsp_stream_ctx {
+ __le64 stream_ring;
+ __le32 reserved[2];
+};
+
+/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */
+#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1))
+/* Secondary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_SEC_TR 0
+/* Primary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_PRI_TR 1
+
+/**
+ * struct cdnsp_stream_info: Representing everything that is needed to
+ * supports stream capable endpoints.
+ * @stream_rings: Array of pointers containing Transfer rings for all
+ * supported streams.
+ * @num_streams: Number of streams, including stream 0.
+ * @stream_ctx_array: The stream context array may be bigger than the number
+ * of streams the driver asked for.
+ * @num_stream_ctxs: Number of streams.
+ * @ctx_array_dma: Dma address of Context Stream Array.
+ * @trb_address_map: For mapping physical TRB addresses to segments in
+ * stream rings.
+ * @td_count: Number of TDs associated with endpoint.
+ * @first_prime_det: First PRIME packet detected.
+ * @drbls_count: Number of allowed doorbells.
+ */
+struct cdnsp_stream_info {
+ struct cdnsp_ring **stream_rings;
+ unsigned int num_streams;
+ struct cdnsp_stream_ctx *stream_ctx_array;
+ unsigned int num_stream_ctxs;
+ dma_addr_t ctx_array_dma;
+ struct radix_tree_root trb_address_map;
+ int td_count;
+ u8 first_prime_det;
+#define STREAM_DRBL_FIFO_DEPTH 2
+ u8 drbls_count;
+};
+
+#define STREAM_LOG_STREAMS 4
+#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS)
+
+#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1
+#error "Not suupported stream value"
+#endif
+
+/**
+ * struct cdnsp_ep - extended device side representation of USB endpoint.
+ * @endpoint: usb endpoint
+ * @pending_req_list: List of requests queuing on transfer ring.
+ * @pdev: Device associated with this endpoint.
+ * @number: Endpoint number (1 - 15).
+ * idx: The device context index (DCI).
+ * interval: Interval between packets used for ISOC endpoint.
+ * @name: A human readable name e.g. ep1out.
+ * @direction: Endpoint direction.
+ * @buffering: Number of on-chip buffers related to endpoint.
+ * @buffering_period; Number of on-chip buffers related to periodic endpoint.
+ * @in_ctx: Pointer to input endpoint context structure.
+ * @out_ctx: Pointer to output endpoint context structure.
+ * @ring: Pointer to transfer ring.
+ * @stream_info: Hold stream information.
+ * @ep_state: Current state of endpoint.
+ * @skip: Sometimes the controller can not process isochronous endpoint ring
+ * quickly enough, and it will miss some isoc tds on the ring and
+ * generate Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
+ */
+struct cdnsp_ep {
+ struct usb_ep endpoint;
+ struct list_head pending_list;
+ struct cdnsp_device *pdev;
+ u8 number;
+ u8 idx;
+ u32 interval;
+ char name[20];
+ u8 direction;
+ u8 buffering;
+ u8 buffering_period;
+ struct cdnsp_ep_ctx *in_ctx;
+ struct cdnsp_ep_ctx *out_ctx;
+ struct cdnsp_ring *ring;
+ struct cdnsp_stream_info stream_info;
+ unsigned int ep_state;
+#define EP_ENABLED BIT(0)
+#define EP_DIS_IN_RROGRESS BIT(1)
+#define EP_HALTED BIT(2)
+#define EP_STOPPED BIT(3)
+#define EP_WEDGE BIT(4)
+#define EP0_HALTED_STATUS BIT(5)
+#define EP_HAS_STREAMS BIT(6)
+
+ bool skip;
+};
+
+/**
+ * struct cdnsp_device_context_array
+ * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts.
+ * @dma: DMA address for device contexts structure.
+ */
+struct cdnsp_device_context_array {
+ __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1];
+ dma_addr_t dma;
+};
+
+/**
+ * struct cdnsp_transfer_event.
+ * @buffer: 64-bit buffer address, or immediate data.
+ * @transfer_len: Data length transferred.
+ * @flags: Field is interpreted differently based on the type of TRB.
+ */
+struct cdnsp_transfer_event {
+ __le64 buffer;
+ __le32 transfer_len;
+ __le32 flags;
+};
+
+/* Invalidate event after disabling endpoint. */
+#define TRB_EVENT_INVALIDATE 8
+
+/* Transfer event TRB length bit mask. */
+/* bits 0:23 */
+#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0))
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_INVALID 0
+#define COMP_SUCCESS 1
+#define COMP_DATA_BUFFER_ERROR 2
+#define COMP_BABBLE_DETECTED_ERROR 3
+#define COMP_TRB_ERROR 5
+#define COMP_RESOURCE_ERROR 7
+#define COMP_NO_SLOTS_AVAILABLE_ERROR 9
+#define COMP_INVALID_STREAM_TYPE_ERROR 10
+#define COMP_SLOT_NOT_ENABLED_ERROR 11
+#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12
+#define COMP_SHORT_PACKET 13
+#define COMP_RING_UNDERRUN 14
+#define COMP_RING_OVERRUN 15
+#define COMP_VF_EVENT_RING_FULL_ERROR 16
+#define COMP_PARAMETER_ERROR 17
+#define COMP_CONTEXT_STATE_ERROR 19
+#define COMP_EVENT_RING_FULL_ERROR 21
+#define COMP_INCOMPATIBLE_DEVICE_ERROR 22
+#define COMP_MISSED_SERVICE_ERROR 23
+#define COMP_COMMAND_RING_STOPPED 24
+#define COMP_COMMAND_ABORTED 25
+#define COMP_STOPPED 26
+#define COMP_STOPPED_LENGTH_INVALID 27
+#define COMP_STOPPED_SHORT_PACKET 28
+#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29
+#define COMP_ISOCH_BUFFER_OVERRUN 31
+#define COMP_EVENT_LOST_ERROR 32
+#define COMP_UNDEFINED_ERROR 33
+#define COMP_INVALID_STREAM_ID_ERROR 34
+
+/*Transfer Event NRDY bit fields */
+#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0))
+#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0))
+#define STREAM_PRIME_ACK 0xFFFE
+#define STREAM_REJECTED 0xFFFF
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16)
+
+/**
+ * struct cdnsp_link_trb
+ * @segment_ptr: 64-bit segment pointer.
+ * @intr_target: Interrupter target.
+ * @control: Flags.
+ */
+struct cdnsp_link_trb {
+ __le64 segment_ptr;
+ __le32 intr_target;
+ __le32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE BIT(1)
+
+/**
+ * struct cdnsp_event_cmd - Command completion event TRB.
+ * cmd_trb: Pointer to command TRB, or the value passed by the event data trb
+ * status: Command completion parameters and error code.
+ * flags: Flags.
+ */
+struct cdnsp_event_cmd {
+ __le64 cmd_trb;
+ __le32 status;
+ __le32 flags;
+};
+
+/* flags bitmasks */
+
+/* Address device - disable SetAddress. */
+#define TRB_BSR BIT(9)
+
+/* Configure Endpoint - Deconfigure. */
+#define TRB_DC BIT(9)
+
+/* Force Header */
+#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0))
+#define TRB_FH_TR_PACKET 0x4
+#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25))
+#define TRB_FH_TR_PACKET_DEV_NOT 0x6
+#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4))
+#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1
+#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8))
+
+enum cdnsp_setup_dev {
+ SETUP_CONTEXT_ONLY,
+ SETUP_CONTEXT_ADDRESS,
+};
+
+/* bits 24:31 are the slot ID. */
+#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
+#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24))
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */
+#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f)
+
+#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16))
+
+#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
+#define TRB_TO_SUSPEND_PORT(p) (((p) >> 23) & 0x1)
+#define LAST_EP_INDEX 30
+
+/* Set TR Dequeue Pointer command TRB fields. */
+#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16))
+#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
+
+/* Link TRB specific fields. */
+#define TRB_TC BIT(1)
+
+/* Port Status Change Event TRB fields. */
+/* Port ID - bits 31:24. */
+#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
+#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24))
+#define EVENT_DATA BIT(2)
+
+/* Normal TRB fields. */
+/* transfer_len bitmasks - bits 0:16. */
+#define TRB_LEN(p) ((p) & GENMASK(16, 0))
+/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */
+#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
+#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17)
+/*
+ * Controller uses the TD_SIZE field for TBC if Extended TBC
+ * is enabled (ETE).
+ */
+#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at. */
+#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22))
+#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22)
+/*
+ * Total burst count field, Rsvdz on controller with Extended TBC
+ * enabled (ETE).
+ */
+#define TRB_TBC(p) (((p) & 0x3) << 7)
+#define TRB_TLBPC(p) (((p) & 0xf) << 16)
+
+/* Cycle bit - indicates TRB ownership by driver or driver.*/
+#define TRB_CYCLE BIT(0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT BIT(1)
+/* Interrupt on short packet. */
+#define TRB_ISP BIT(2)
+/* Set PCIe no snoop attribute. */
+#define TRB_NO_SNOOP BIT(3)
+/* Chain multiple TRBs into a TD. */
+#define TRB_CHAIN BIT(4)
+/* Interrupt on completion. */
+#define TRB_IOC BIT(5)
+/* The buffer pointer contains immediate data. */
+#define TRB_IDT BIT(6)
+/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */
+#define TRB_STAT BIT(7)
+/* Block Event Interrupt. */
+#define TRB_BEI BIT(9)
+
+/* Control transfer TRB specific fields. */
+#define TRB_DIR_IN BIT(16)
+
+/* TRB bit mask in Data Stage TRB */
+#define TRB_SETUPID_BITMASK GENMASK(9, 8)
+#define TRB_SETUPID(p) ((p) << 8)
+#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8)
+
+#define TRB_SETUP_SPEEDID_USB3 0x1
+#define TRB_SETUP_SPEEDID_USB2 0x0
+#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7))
+
+#define TRB_SETUPSTAT_ACK 0x1
+#define TRB_SETUPSTAT_STALL 0x0
+#define TRB_SETUPSTAT(p) ((p) << 6)
+
+/* Isochronous TRB specific fields */
+#define TRB_SIA BIT(31)
+#define TRB_FRAME_ID(p) (((p) << 20) & GENMASK(30, 20))
+
+struct cdnsp_generic_trb {
+ __le32 field[4];
+};
+
+union cdnsp_trb {
+ struct cdnsp_link_trb link;
+ struct cdnsp_transfer_event trans_event;
+ struct cdnsp_event_cmd event_cmd;
+ struct cdnsp_generic_trb generic;
+};
+
+/* TRB bit mask. */
+#define TRB_TYPE_BITMASK GENMASK(15, 10)
+#define TRB_TYPE(p) ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
+
+/* TRB type IDs. */
+/* bulk, interrupt, isoc scatter/gather, and control data stage. */
+#define TRB_NORMAL 1
+/* Setup Stage for control transfers. */
+#define TRB_SETUP 2
+/* Data Stage for control transfers. */
+#define TRB_DATA 3
+/* Status Stage for control transfers. */
+#define TRB_STATUS 4
+/* ISOC transfers. */
+#define TRB_ISOC 5
+/* TRB for linking ring segments. */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring). */
+#define TRB_TR_NOOP 8
+
+/* Command TRBs */
+/* Enable Slot Command. */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command. */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command. */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command. */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command. */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Endpoint Command. */
+#define TRB_RESET_EP 14
+/* Stop Transfer Ring Command. */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command. */
+#define TRB_SET_DEQ 16
+/* Reset Device Command. */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt). */
+#define TRB_FORCE_EVENT 18
+/* Force Header Command - generate a transaction or link management packet. */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings. */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved. */
+
+/* Event TRBS. */
+/* Transfer Event. */
+#define TRB_TRANSFER 32
+/* Command Completion Event. */
+#define TRB_COMPLETION 33
+/* Port Status Change Event. */
+#define TRB_PORT_STATUS 34
+/* Device Controller Event. */
+#define TRB_HC_EVENT 37
+/* MFINDEX Wrap Event - microframe counter wrapped. */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved. */
+/* Endpoint Not Ready Event. */
+#define TRB_ENDPOINT_NRDY 48
+/* TRB IDs 49-53 reserved. */
+/* Halt Endpoint Command. */
+#define TRB_HALT_ENDPOINT 54
+/* Doorbell Overflow Event. */
+#define TRB_DRB_OVERFLOW 57
+/* Flush Endpoint Command. */
+#define TRB_FLUSH_ENDPOINT 58
+
+#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4.
+ * The command ring is 64-byte aligned, so it must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_EVENT_SEGMENT 256
+#define TRBS_PER_EV_DEQ_UPDATE 100
+#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16)
+#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE))
+/* TRB buffer pointers can't cross 64KB boundaries. */
+#define TRB_MAX_BUFF_SHIFT 16
+#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT)
+/* How much data is left before the 64KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
+ ((addr) & (TRB_MAX_BUFF_SIZE - 1)))
+
+/**
+ * struct cdnsp_segment - segment related data.
+ * @trbs: Array of Transfer Request Blocks.
+ * @next: Pointer to the next segment.
+ * @dma: DMA address of current segment.
+ * @bounce_dma: Bounce buffer DMA address .
+ * @bounce_buf: Bounce buffer virtual address.
+ * bounce_offs: Bounce buffer offset.
+ * bounce_len: Bounce buffer length.
+ */
+struct cdnsp_segment {
+ union cdnsp_trb *trbs;
+ struct cdnsp_segment *next;
+ dma_addr_t dma;
+ /* Max packet sized bounce buffer for td-fragmant alignment */
+ dma_addr_t bounce_dma;
+ void *bounce_buf;
+ unsigned int bounce_offs;
+ unsigned int bounce_len;
+};
+
+/**
+ * struct cdnsp_td - Transfer Descriptor object.
+ * @td_list: Used for binding TD with ep_ring->td_list.
+ * @preq: Request associated with this TD
+ * @start_seg: Segment containing the first_trb in TD.
+ * @first_trb: First TRB for this TD.
+ * @last_trb: Last TRB related with TD.
+ * @bounce_seg: Bounce segment for this TD.
+ * @request_length_set: actual_length of the request has already been set.
+ * @drbl - TD has been added to HW scheduler - only for stream capable
+ * endpoints.
+ */
+struct cdnsp_td {
+ struct list_head td_list;
+ struct cdnsp_request *preq;
+ struct cdnsp_segment *start_seg;
+ union cdnsp_trb *first_trb;
+ union cdnsp_trb *last_trb;
+ struct cdnsp_segment *bounce_seg;
+ bool request_length_set;
+ bool drbl;
+};
+
+/**
+ * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring.
+ * @new_deq_seg: New dequeue segment.
+ * @new_deq_ptr: New dequeue pointer.
+ * @new_cycle_state: New cycle state.
+ * @stream_id: stream id for which new dequeue pointer has been selected.
+ */
+struct cdnsp_dequeue_state {
+ struct cdnsp_segment *new_deq_seg;
+ union cdnsp_trb *new_deq_ptr;
+ int new_cycle_state;
+ unsigned int stream_id;
+};
+
+enum cdnsp_ring_type {
+ TYPE_CTRL = 0,
+ TYPE_ISOC,
+ TYPE_BULK,
+ TYPE_INTR,
+ TYPE_STREAM,
+ TYPE_COMMAND,
+ TYPE_EVENT,
+};
+
+/**
+ * struct cdnsp_ring - information describing transfer, command or event ring.
+ * @first_seg: First segment on transfer ring.
+ * @last_seg: Last segment on transfer ring.
+ * @enqueue: SW enqueue pointer address.
+ * @enq_seg: SW enqueue segment address.
+ * @dequeue: SW dequeue pointer address.
+ * @deq_seg: SW dequeue segment address.
+ * @td_list: transfer descriptor list associated with this ring.
+ * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle
+ * field to give ownership of the TRB to the device controller
+ * (if we are the producer) or to check if we own the TRB
+ * (if we are the consumer).
+ * @stream_id: Stream id
+ * @stream_active: Stream is active - PRIME packet has been detected.
+ * @stream_rejected: This ring has been rejected by host.
+ * @num_tds: Number of TDs associated with ring.
+ * @num_segs: Number of segments.
+ * @num_trbs_free: Number of free TRBs on the ring.
+ * @bounce_buf_len: Length of bounce buffer.
+ * @type: Ring type - event, transfer, or command ring.
+ * @last_td_was_short - TD is short TD.
+ * @trb_address_map: For mapping physical TRB addresses to segments in
+ * stream rings.
+ */
+struct cdnsp_ring {
+ struct cdnsp_segment *first_seg;
+ struct cdnsp_segment *last_seg;
+ union cdnsp_trb *enqueue;
+ struct cdnsp_segment *enq_seg;
+ union cdnsp_trb *dequeue;
+ struct cdnsp_segment *deq_seg;
+ struct list_head td_list;
+ u32 cycle_state;
+ unsigned int stream_id;
+ unsigned int stream_active;
+ unsigned int stream_rejected;
+ int num_tds;
+ unsigned int num_segs;
+ unsigned int num_trbs_free;
+ unsigned int bounce_buf_len;
+ enum cdnsp_ring_type type;
+ bool last_td_was_short;
+ struct radix_tree_root *trb_address_map;
+};
+
+/**
+ * struct cdnsp_erst_entry - even ring segment table entry object.
+ * @seg_addr: 64-bit event ring segment address.
+ * seg_size: Number of TRBs in segment.;
+ */
+struct cdnsp_erst_entry {
+ __le64 seg_addr;
+ __le32 seg_size;
+ /* Set to zero */
+ __le32 rsvd;
+};
+
+/**
+ * struct cdnsp_erst - even ring segment table for event ring.
+ * @entries: Array of event ring segments
+ * @num_entries: Number of segments in entries array.
+ * @erst_dma_addr: DMA address for entries array.
+ */
+struct cdnsp_erst {
+ struct cdnsp_erst_entry *entries;
+ unsigned int num_entries;
+ dma_addr_t erst_dma_addr;
+};
+
+/**
+ * struct cdnsp_request - extended device side representation of usb_request
+ * object .
+ * @td: Transfer descriptor associated with this request.
+ * @request: Generic usb_request object describing single I/O request.
+ * @list: Used to adding request to endpoint pending_list.
+ * @pep: Extended representation of usb_ep object
+ * @epnum: Endpoint number associated with usb request.
+ * @direction: Endpoint direction for usb request.
+ */
+struct cdnsp_request {
+ struct cdnsp_td td;
+ struct usb_request request;
+ struct list_head list;
+ struct cdnsp_ep *pep;
+ u8 epnum;
+ unsigned direction:1;
+};
+
+#define ERST_NUM_SEGS 1
+
+/* Stages used during enumeration process.*/
+enum cdnsp_ep0_stage {
+ CDNSP_SETUP_STAGE,
+ CDNSP_DATA_STAGE,
+ CDNSP_STATUS_STAGE,
+};
+
+/**
+ * struct cdnsp_port - holds information about detected ports.
+ * @port_num: Port number.
+ * @exist: Indicate if port exist.
+ * maj_rev: Major revision.
+ * min_rev: Minor revision.
+ */
+struct cdnsp_port {
+ struct cdnsp_port_regs __iomem *regs;
+ u8 port_num;
+ u8 exist;
+ u8 maj_rev;
+ u8 min_rev;
+};
+
+#define CDNSP_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define CDNSP_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
+#define CDNSP_EXT_PORT_OFF(x) ((x) & 0xff)
+#define CDNSP_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
+
+/**
+ * struct cdnsp_device - represent USB device.
+ * @dev: Pointer to device structure associated whit this controller.
+ * @gadget: Device side representation of the peripheral controller.
+ * @gadget_driver: Pointer to the gadget driver.
+ * @irq: IRQ line number used by device side.
+ * @regs:IO device memory.
+ * @cap_regs: Capability registers.
+ * @op_regs: Operational registers.
+ * @run_regs: Runtime registers.
+ * @dba: Device base address register.
+ * @ir_set: Current interrupter register set.
+ * @port20_regs: Port 2.0 Peripheral Configuration Registers.
+ * @port3x_regs: USB3.x Port Peripheral Configuration Registers.
+ * @rev_cap: Controller Capabilities Registers.
+ * @hcs_params1: Cached register copies of read-only HCSPARAMS1
+ * @hcc_params: Cached register copies of read-only HCCPARAMS1
+ * @setup: Temporary buffer for setup packet.
+ * @ep0_preq: Internal allocated request used during enumeration.
+ * @ep0_stage: ep0 stage during enumeration process.
+ * @three_stage_setup: Three state or two state setup.
+ * @ep0_expect_in: Data IN expected for control transfer.
+ * @setup_id: Setup identifier.
+ * @setup_speed - Speed detected for current SETUP packet.
+ * @setup_buf: Buffer for SETUP packet.
+ * @device_address: Current device address.
+ * @may_wakeup: remote wakeup enabled/disabled.
+ * @lock: Lock used in interrupt thread context.
+ * @hci_version: device controller version.
+ * @dcbaa: Device context base address array.
+ * @cmd_ring: Command ring.
+ * @cmd: Represent all what is needed to issue command on Command Ring.
+ * @event_ring: Event ring.
+ * @erst: Event Ring Segment table
+ * @slot_id: Current Slot ID. Should be 0 or 1.
+ * @out_ctx: Output context.
+ * @in_ctx: Input context.
+ * @eps: array of endpoints object associated with device.
+ * @usb2_hw_lpm_capable: hardware lpm is enabled;
+ * @u1_allowed: Allow device transition to U1 state.
+ * @u2_allowed: Allow device transition to U2 state
+ * @device_pool: DMA pool for allocating input and output context.
+ * @segment_pool: DMA pool for allocating new segments.
+ * @cdnsp_state: Current state of controller.
+ * @link_state: Current link state.
+ * @usb2_port - Port USB 2.0.
+ * @usb3_port - Port USB 3.0.
+ * @active_port - Current selected Port.
+ * @test_mode: selected Test Mode.
+ */
+struct cdnsp_device {
+ struct device *dev;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ unsigned int irq;
+ void __iomem *regs;
+
+ /* Registers map */
+ struct cdnsp_cap_regs __iomem *cap_regs;
+ struct cdnsp_op_regs __iomem *op_regs;
+ struct cdnsp_run_regs __iomem *run_regs;
+ struct cdnsp_doorbell_array __iomem *dba;
+ struct cdnsp_intr_reg __iomem *ir_set;
+ struct cdnsp_20port_cap __iomem *port20_regs;
+ struct cdnsp_3xport_cap __iomem *port3x_regs;
+ struct cdnsp_rev_cap __iomem *rev_cap;
+
+ /* Cached register copies of read-only CDNSP data */
+ __u32 hcs_params1;
+ __u32 hcs_params3;
+ __u32 hcc_params;
+ /* Lock used in interrupt thread context. */
+ spinlock_t lock;
+ struct usb_ctrlrequest setup;
+ struct cdnsp_request ep0_preq;
+ enum cdnsp_ep0_stage ep0_stage;
+ u8 three_stage_setup;
+ u8 ep0_expect_in;
+ u8 setup_id;
+ u8 setup_speed;
+ void *setup_buf;
+ u8 device_address;
+ int may_wakeup;
+ u16 hci_version;
+
+ /* data structures */
+ struct cdnsp_device_context_array *dcbaa;
+ struct cdnsp_ring *cmd_ring;
+ struct cdnsp_command cmd;
+ struct cdnsp_ring *event_ring;
+ struct cdnsp_erst erst;
+ int slot_id;
+
+ /*
+ * Commands to the hardware are passed an "input context" that
+ * tells the hardware what to change in its data structures.
+ * The hardware will return changes in an "output context" that
+ * software must allocate for the hardware. .
+ */
+ struct cdnsp_container_ctx out_ctx;
+ struct cdnsp_container_ctx in_ctx;
+ struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM];
+ u8 usb2_hw_lpm_capable:1;
+ u8 u1_allowed:1;
+ u8 u2_allowed:1;
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
+
+#define CDNSP_STATE_HALTED BIT(1)
+#define CDNSP_STATE_DYING BIT(2)
+#define CDNSP_STATE_DISCONNECT_PENDING BIT(3)
+#define CDNSP_WAKEUP_PENDING BIT(4)
+ unsigned int cdnsp_state;
+ unsigned int link_state;
+
+ struct cdnsp_port usb2_port;
+ struct cdnsp_port usb3_port;
+ struct cdnsp_port *active_port;
+ u16 test_mode;
+};
+
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Registers with 64-bit address pointers should be written to with
+ * dword accesses by writing the low dword first (ptr[0]), then the high dword
+ * (ptr[1]) second. controller implementations that do not support 64-bit
+ * address pointers will ignore the high dword, and write order is irrelevant.
+ */
+static inline u64 cdnsp_read_64(__le64 __iomem *regs)
+{
+ return lo_hi_readq(regs);
+}
+
+static inline void cdnsp_write_64(const u64 val, __le64 __iomem *regs)
+{
+ lo_hi_writeq(val, regs);
+}
+
+/* CDNSP memory management functions. */
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev);
+int cdnsp_mem_init(struct cdnsp_device *pdev);
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev);
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev);
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *ep);
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ gfp_t mem_flags);
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs, gfp_t flags);
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *ep, u64 address);
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams);
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+
+/* Device controller glue. */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id);
+int cdnsp_halt(struct cdnsp_device *pdev);
+void cdnsp_died(struct cdnsp_device *pdev);
+int cdnsp_reset(struct cdnsp_device *pdev);
+irqreturn_t cdnsp_irq_handler(int irq, void *priv);
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup);
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *usbsssp_data,
+ struct usb_request *req, int enable);
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data);
+
+/* Ring, segment, TRB, and TD functions. */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+ union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ union cdnsp_trb *trb);
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev);
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+ union cdnsp_trb *event_ring_deq,
+ u8 clear_ehb);
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring);
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev);
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type);
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr,
+ enum cdnsp_setup_dev setup);
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq);
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr);
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num);
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev);
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state);
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep);
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring);
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs, u32 link_state);
+u32 cdnsp_port_state_to_neutral(u32 state);
+
+/* CDNSP device controller contexts. */
+int cdnsp_enable_slot(struct cdnsp_device *pdev);
+int cdnsp_disable_slot(struct cdnsp_device *pdev);
+struct cdnsp_input_control_ctx
+ *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+ unsigned int ep_index);
+/* CDNSP gadget interface. */
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev);
+void cdnsp_resume_gadget(struct cdnsp_device *pdev);
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev);
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep, struct cdnsp_request *preq,
+ int status);
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+unsigned int cdnsp_port_speed(unsigned int port_status);
+void cdnsp_irq_reset(struct cdnsp_device *pdev);
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep, int value);
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_setup_analyze(struct cdnsp_device *pdev);
+int cdnsp_status_stage(struct cdnsp_device *pdev);
+int cdnsp_reset_device(struct cdnsp_device *pdev);
+
+/**
+ * next_request - gets the next request on the given list
+ * @list: the request list to operate on
+ *
+ * Caller should take care of locking. This function return NULL or the first
+ * request available on list.
+ */
+static inline struct cdnsp_request *next_request(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct cdnsp_request, list);
+}
+
+#define to_cdnsp_ep(ep) (container_of(ep, struct cdnsp_ep, endpoint))
+#define gadget_to_cdnsp(g) (container_of(g, struct cdnsp_device, gadget))
+#define request_to_cdnsp_request(r) (container_of(r, struct cdnsp_request, \
+ request))
+#define to_cdnsp_request(r) (container_of(r, struct cdnsp_request, request))
+int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq,
+ struct cdnsp_ep *pep);
+
+#endif /* __LINUX_CDNSP_GADGET_H */
diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c
new file mode 100644
index 000000000000..7a84e928710e
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-mem.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep);
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
+ unsigned int cycle_state,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_segment *seg;
+ dma_addr_t dma;
+ int i;
+
+ seg = kzalloc(sizeof(*seg), flags);
+ if (!seg)
+ return NULL;
+
+ seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return NULL;
+ }
+
+ if (max_packet) {
+ seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+ if (!seg->bounce_buf)
+ goto free_dma;
+ }
+
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
+ if (cycle_state == 0) {
+ for (i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+
+free_dma:
+ dma_pool_free(pdev->segment_pool, seg->trbs, dma);
+ kfree(seg);
+
+ return NULL;
+}
+
+static void cdnsp_segment_free(struct cdnsp_device *pdev,
+ struct cdnsp_segment *seg)
+{
+ if (seg->trbs)
+ dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
+
+ kfree(seg->bounce_buf);
+ kfree(seg);
+}
+
+static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
+ struct cdnsp_segment *first)
+{
+ struct cdnsp_segment *seg;
+
+ seg = first->next;
+
+ while (seg != first) {
+ struct cdnsp_segment *next = seg->next;
+
+ cdnsp_segment_free(pdev, seg);
+ seg = next;
+ }
+
+ cdnsp_segment_free(pdev, first);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void cdnsp_link_segments(struct cdnsp_device *pdev,
+ struct cdnsp_segment *prev,
+ struct cdnsp_segment *next,
+ enum cdnsp_ring_type type)
+{
+ struct cdnsp_link_trb *link;
+ u32 val;
+
+ if (!prev || !next)
+ return;
+
+ prev->next = next;
+ if (type != TYPE_EVENT) {
+ link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
+ link->segment_ptr = cpu_to_le64(next->dma);
+
+ /*
+ * Set the last TRB in the segment to have a TRB type ID
+ * of Link TRB
+ */
+ val = le32_to_cpu(link->control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ link->control = cpu_to_le32(val);
+ }
+}
+
+/*
+ * Link the ring to the new segments.
+ * Set Toggle Cycle for the new ring if needed.
+ */
+static void cdnsp_link_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *first,
+ struct cdnsp_segment *last,
+ unsigned int num_segs)
+{
+ struct cdnsp_segment *next;
+
+ if (!ring || !first || !last)
+ return;
+
+ next = ring->enq_seg->next;
+ cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
+ cdnsp_link_segments(pdev, last, next, ring->type);
+ ring->num_segs += num_segs;
+ ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+
+ if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ ~cpu_to_le32(LINK_TOGGLE);
+ last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+ ring->last_seg = last;
+ }
+}
+
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the device controller won't
+ * tell us which stream ring the TRB came from. We could store the stream ID
+ * in an event data TRB, but that doesn't help us for the cancellation case,
+ * since the endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example,
+ * say I have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ gfp_t mem_flags)
+{
+ unsigned long key;
+ int ret;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+
+ /* Skip any segments that were already added. */
+ if (radix_tree_lookup(trb_address_map, key))
+ return 0;
+
+ ret = radix_tree_maybe_preload(mem_flags);
+ if (ret)
+ return ret;
+
+ ret = radix_tree_insert(trb_address_map, key, ring);
+ radix_tree_preload_end();
+
+ return ret;
+}
+
+static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_segment *seg)
+{
+ unsigned long key;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ if (radix_tree_lookup(trb_address_map, key))
+ radix_tree_delete(trb_address_map, key);
+}
+
+static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *first_seg,
+ struct cdnsp_segment *last_seg,
+ gfp_t mem_flags)
+{
+ struct cdnsp_segment *failed_seg;
+ struct cdnsp_segment *seg;
+ int ret;
+
+ seg = first_seg;
+ do {
+ ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
+ mem_flags);
+ if (ret)
+ goto remove_streams;
+ if (seg == last_seg)
+ return 0;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return 0;
+
+remove_streams:
+ failed_seg = seg;
+ seg = first_seg;
+ do {
+ cdnsp_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return ret;
+}
+
+static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
+{
+ struct cdnsp_segment *seg;
+
+ seg = ring->first_seg;
+ do {
+ cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+}
+
+static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
+{
+ return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
+ ring->first_seg, ring->last_seg, GFP_ATOMIC);
+}
+
+static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+ if (!ring)
+ return;
+
+ trace_cdnsp_ring_free(ring);
+
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ cdnsp_remove_stream_mapping(ring);
+
+ cdnsp_free_segments_for_ring(pdev, ring->first_seg);
+ }
+
+ kfree(ring);
+}
+
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
+{
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+
+ /*
+ * The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ *
+ * New rings are initialized with cycle state equal to 1; if we are
+ * handling ring expansion, set the cycle state equal to the old ring.
+ */
+ ring->cycle_state = 1;
+
+ /*
+ * Each segment has a link TRB, and leave an extra TRB for SW
+ * accounting purpose
+ */
+ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+}
+
+/* Allocate segments and link them for a ring. */
+static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
+ struct cdnsp_segment **first,
+ struct cdnsp_segment **last,
+ unsigned int num_segs,
+ unsigned int cycle_state,
+ enum cdnsp_ring_type type,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_segment *prev;
+
+ /* Allocate first segment. */
+ prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
+ if (!prev)
+ return -ENOMEM;
+
+ num_segs--;
+ *first = prev;
+
+ /* Allocate all other segments. */
+ while (num_segs > 0) {
+ struct cdnsp_segment *next;
+
+ next = cdnsp_segment_alloc(pdev, cycle_state,
+ max_packet, flags);
+ if (!next) {
+ cdnsp_free_segments_for_ring(pdev, *first);
+ return -ENOMEM;
+ }
+
+ cdnsp_link_segments(pdev, prev, next, type);
+
+ prev = next;
+ num_segs--;
+ }
+
+ cdnsp_link_segments(pdev, prev, *first, type);
+ *last = prev;
+
+ return 0;
+}
+
+/*
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ */
+static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
+ unsigned int num_segs,
+ enum cdnsp_ring_type type,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_ring *ring;
+ int ret;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ if (!ring)
+ return NULL;
+
+ ring->num_segs = num_segs;
+ ring->bounce_buf_len = max_packet;
+ INIT_LIST_HEAD(&ring->td_list);
+ ring->type = type;
+
+ if (num_segs == 0)
+ return ring;
+
+ ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
+ &ring->last_seg, num_segs,
+ 1, type, max_packet, flags);
+ if (ret)
+ goto fail;
+
+ /* Only event ring does not use link TRB. */
+ if (type != TYPE_EVENT)
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+
+ cdnsp_initialize_ring_info(ring);
+ trace_cdnsp_ring_alloc(ring);
+ return ring;
+fail:
+ kfree(ring);
+ return NULL;
+}
+
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ cdnsp_ring_free(pdev, pep->ring);
+ pep->ring = NULL;
+ cdnsp_free_stream_info(pdev, pep);
+}
+
+/*
+ * Expand an existing ring.
+ * Allocate a new ring which has same segment numbers and link the two rings.
+ */
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs,
+ gfp_t flags)
+{
+ unsigned int num_segs_needed;
+ struct cdnsp_segment *first;
+ struct cdnsp_segment *last;
+ unsigned int num_segs;
+ int ret;
+
+ num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
+ (TRBS_PER_SEGMENT - 1);
+
+ /* Allocate number of segments we needed, or double the ring size. */
+ num_segs = max(ring->num_segs, num_segs_needed);
+
+ ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
+ ring->cycle_state, ring->type,
+ ring->bounce_buf_len, flags);
+ if (ret)
+ return -ENOMEM;
+
+ if (ring->type == TYPE_STREAM)
+ ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
+ ring, first,
+ last, flags);
+
+ if (ret) {
+ cdnsp_free_segments_for_ring(pdev, first);
+
+ return ret;
+ }
+
+ cdnsp_link_rings(pdev, ring, first, last, num_segs);
+ trace_cdnsp_ring_expansion(ring);
+
+ return 0;
+}
+
+static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
+{
+ int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
+
+ pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
+ pdev->out_ctx.size = size;
+ pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
+ pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+ &pdev->out_ctx.dma);
+
+ if (!pdev->out_ctx.bytes)
+ return -ENOMEM;
+
+ pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
+ pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
+ pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
+ pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+ &pdev->in_ctx.dma);
+
+ if (!pdev->in_ctx.bytes) {
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+struct cdnsp_input_control_ctx
+ *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
+{
+ if (ctx->type != CDNSP_CTX_TYPE_INPUT)
+ return NULL;
+
+ return (struct cdnsp_input_control_ctx *)ctx->bytes;
+}
+
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
+{
+ if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
+ return (struct cdnsp_slot_ctx *)ctx->bytes;
+
+ return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
+}
+
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+ unsigned int ep_index)
+{
+ /* Increment ep index by offset of start of ep ctx array. */
+ ep_index++;
+ if (ctx->type == CDNSP_CTX_TYPE_INPUT)
+ ep_index++;
+
+ return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
+}
+
+static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
+ pep->stream_info.ctx_array_dma);
+}
+
+/* The stream context array must be a power of 2. */
+static struct cdnsp_stream_ctx
+ *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ size_t size = sizeof(struct cdnsp_stream_ctx) *
+ pep->stream_info.num_stream_ctxs;
+
+ if (size > CDNSP_CTX_SIZE)
+ return NULL;
+
+ /**
+ * Driver uses intentionally the device_pool to allocated stream
+ * context array. Device Pool has 2048 bytes of size what gives us
+ * 128 entries.
+ */
+ return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
+ &pep->stream_info.ctx_array_dma);
+}
+
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
+{
+ if (pep->ep_state & EP_HAS_STREAMS)
+ return radix_tree_lookup(&pep->stream_info.trb_address_map,
+ address >> TRB_SEGMENT_SHIFT);
+
+ return pep->ring;
+}
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs.
+ * The number of requested streams includes stream 0, which cannot be used by
+ * driver.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ */
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams)
+{
+ struct cdnsp_stream_info *stream_info;
+ struct cdnsp_ring *cur_ring;
+ u32 cur_stream;
+ u64 addr;
+ int ret;
+ int mps;
+
+ stream_info = &pep->stream_info;
+ stream_info->num_streams = num_streams;
+ stream_info->num_stream_ctxs = num_stream_ctxs;
+
+ /* Initialize the array of virtual pointers to stream rings. */
+ stream_info->stream_rings = kcalloc(num_streams,
+ sizeof(struct cdnsp_ring *),
+ GFP_ATOMIC);
+ if (!stream_info->stream_rings)
+ return -ENOMEM;
+
+ /* Initialize the array of DMA addresses for stream rings for the HW. */
+ stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
+ if (!stream_info->stream_ctx_array)
+ goto cleanup_stream_rings;
+
+ memset(stream_info->stream_ctx_array, 0,
+ sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
+ INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+ mps = usb_endpoint_maxp(pep->endpoint.desc);
+
+ /*
+ * Allocate rings for all the streams that the driver will use,
+ * and add their segment DMA addresses to the radix tree.
+ * Stream 0 is reserved.
+ */
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
+ GFP_ATOMIC);
+ stream_info->stream_rings[cur_stream] = cur_ring;
+
+ if (!cur_ring)
+ goto cleanup_rings;
+
+ cur_ring->stream_id = cur_stream;
+ cur_ring->trb_address_map = &stream_info->trb_address_map;
+
+ /* Set deq ptr, cycle bit, and stream context type. */
+ addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
+ cur_ring->cycle_state;
+
+ stream_info->stream_ctx_array[cur_stream].stream_ring =
+ cpu_to_le64(addr);
+
+ trace_cdnsp_set_stream_ring(cur_ring);
+
+ ret = cdnsp_update_stream_mapping(cur_ring);
+ if (ret)
+ goto cleanup_rings;
+ }
+
+ return 0;
+
+cleanup_rings:
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ cdnsp_ring_free(pdev, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+
+cleanup_stream_rings:
+ kfree(pep->stream_info.stream_rings);
+
+ return -ENOMEM;
+}
+
+/* Frees all stream contexts associated with the endpoint. */
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_stream_info *stream_info = &pep->stream_info;
+ struct cdnsp_ring *cur_ring;
+ int cur_stream;
+
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return;
+
+ for (cur_stream = 1; cur_stream < stream_info->num_streams;
+ cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ cdnsp_ring_free(pdev, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+
+ if (stream_info->stream_ctx_array)
+ cdnsp_free_stream_ctx(pdev, pep);
+
+ kfree(stream_info->stream_rings);
+ pep->ep_state &= ~EP_HAS_STREAMS;
+}
+
+/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
+static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
+{
+ pdev->dcbaa->dev_context_ptrs[1] = 0;
+
+ cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
+
+ if (pdev->in_ctx.bytes)
+ dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+ pdev->in_ctx.dma);
+
+ if (pdev->out_ctx.bytes)
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+
+ pdev->in_ctx.bytes = NULL;
+ pdev->out_ctx.bytes = NULL;
+}
+
+static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
+{
+ int ret = -ENOMEM;
+
+ ret = cdnsp_init_device_ctx(pdev);
+ if (ret)
+ return ret;
+
+ /* Allocate endpoint 0 ring. */
+ pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
+ if (!pdev->eps[0].ring)
+ goto fail;
+
+ /* Point to output device context in dcbaa. */
+ pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
+ pdev->cmd.in_ctx = &pdev->in_ctx;
+
+ trace_cdnsp_alloc_priv_device(pdev);
+ return 0;
+fail:
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+ dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+ pdev->in_ctx.dma);
+
+ return ret;
+}
+
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
+{
+ struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
+ struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
+ dma_addr_t dma;
+
+ dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
+ ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
+}
+
+/* Setup an controller private device for a Set Address command. */
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ struct cdnsp_ep_ctx *ep0_ctx;
+ u32 max_packets, port;
+
+ ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+
+ /* Only the control endpoint is valid - one endpoint context. */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+
+ switch (pdev->gadget.speed) {
+ case USB_SPEED_SUPER_PLUS:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+ max_packets = MAX_PACKET(512);
+ break;
+ case USB_SPEED_SUPER:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+ max_packets = MAX_PACKET(512);
+ break;
+ case USB_SPEED_HIGH:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+ max_packets = MAX_PACKET(64);
+ break;
+ case USB_SPEED_FULL:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+ max_packets = MAX_PACKET(64);
+ break;
+ default:
+ /* Speed was not set , this shouldn't happen. */
+ return -EINVAL;
+ }
+
+ port = DEV_PORT(pdev->active_port->port_num);
+ slot_ctx->dev_port |= cpu_to_le32(port);
+ slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
+ DEV_ADDR_MASK));
+ ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
+ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+ max_packets);
+
+ ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
+ pdev->eps[0].ring->cycle_state);
+
+ trace_cdnsp_setup_addressable_priv_device(pdev);
+
+ return 0;
+}
+
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ */
+static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ unsigned int interval;
+
+ interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
+ if (interval != pep->endpoint.desc->bInterval - 1)
+ dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
+ pep->name, 1 << interval,
+ g->speed == USB_SPEED_FULL ? "" : "micro");
+
+ /*
+ * Full speed isoc endpoints specify interval in frames,
+ * not microframes. We are using microframes everywhere,
+ * so adjust accordingly.
+ */
+ if (g->speed == USB_SPEED_FULL)
+ interval += 3; /* 1 frame = 2^3 uframes */
+
+ /* Controller handles only up to 512ms (2^12). */
+ if (interval > 12)
+ interval = 12;
+
+ return interval;
+}
+
+/*
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
+ struct cdnsp_ep *pep,
+ unsigned int desc_interval,
+ unsigned int min_exponent,
+ unsigned int max_exponent)
+{
+ unsigned int interval;
+
+ interval = fls(desc_interval) - 1;
+ return clamp_val(interval, min_exponent, max_exponent);
+}
+
+/*
+ * Return the polling interval.
+ *
+ * The polling interval is expressed in "microframes". If controllers's Interval
+ * field is set to N, it will service the endpoint every 2^(Interval)*125us.
+ */
+static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ unsigned int interval = 0;
+
+ switch (g->speed) {
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ interval = cdnsp_parse_exponent_interval(g, pep);
+ break;
+ case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
+ interval = cdnsp_parse_exponent_interval(g, pep);
+ } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
+ interval = pep->endpoint.desc->bInterval << 3;
+ interval = cdnsp_microframes_to_exponent(g, pep,
+ interval,
+ 3, 10);
+ }
+
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return interval;
+}
+
+/*
+ * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
+{
+ if (g->speed < USB_SPEED_SUPER ||
+ !usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ return 0;
+
+ return pep->endpoint.comp_desc->bmAttributes;
+}
+
+static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ /* Super speed and Plus have max burst in ep companion desc */
+ if (g->speed >= USB_SPEED_SUPER)
+ return pep->endpoint.comp_desc->bMaxBurst;
+
+ if (g->speed == USB_SPEED_HIGH &&
+ (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
+ usb_endpoint_xfer_int(pep->endpoint.desc)))
+ return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
+
+ return 0;
+}
+
+static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
+{
+ int in;
+
+ in = usb_endpoint_dir_in(desc);
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ return CTRL_EP;
+ case USB_ENDPOINT_XFER_BULK:
+ return in ? BULK_IN_EP : BULK_OUT_EP;
+ case USB_ENDPOINT_XFER_ISOC:
+ return in ? ISOC_IN_EP : ISOC_OUT_EP;
+ case USB_ENDPOINT_XFER_INT:
+ return in ? INT_IN_EP : INT_OUT_EP;
+ }
+
+ return 0;
+}
+
+/*
+ * Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ int max_packet;
+ int max_burst;
+
+ /* Only applies for interrupt or isochronous endpoints*/
+ if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
+ usb_endpoint_xfer_bulk(pep->endpoint.desc))
+ return 0;
+
+ /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
+ if (g->speed >= USB_SPEED_SUPER_PLUS &&
+ USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
+ return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+ /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
+ else if (g->speed >= USB_SPEED_SUPER)
+ return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+
+ max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+ max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
+
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * max_burst;
+}
+
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ gfp_t mem_flags)
+{
+ enum cdnsp_ring_type ring_type;
+ struct cdnsp_ep_ctx *ep_ctx;
+ unsigned int err_count = 0;
+ unsigned int avg_trb_len;
+ unsigned int max_packet;
+ unsigned int max_burst;
+ unsigned int interval;
+ u32 max_esit_payload;
+ unsigned int mult;
+ u32 endpoint_type;
+ int ret;
+
+ ep_ctx = pep->in_ctx;
+
+ endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
+ if (!endpoint_type)
+ return -EINVAL;
+
+ ring_type = usb_endpoint_type(pep->endpoint.desc);
+
+ /*
+ * Get values to fill the endpoint context, mostly from ep descriptor.
+ * The average TRB buffer length for bulk endpoints is unclear as we
+ * have no clue on scatter gather list entry size. For Isoc and Int,
+ * set it to max available.
+ */
+ max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
+ interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
+ mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
+ max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+ max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
+ avg_trb_len = max_esit_payload;
+
+ /* Allow 3 retries for everything but isoc, set CErr = 3. */
+ if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ err_count = 3;
+ if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+ pdev->gadget.speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
+ if (usb_endpoint_xfer_control(pep->endpoint.desc))
+ avg_trb_len = 8;
+
+ /* Set up the endpoint ring. */
+ pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+ pep->skip = false;
+
+ /* Fill the endpoint context */
+ ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
+ EP_INTERVAL(interval) | EP_MULT(mult));
+ ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
+ MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
+ ERROR_COUNT(err_count));
+ ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
+ pep->ring->cycle_state);
+
+ ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
+ EP_AVG_TRB_LENGTH(avg_trb_len));
+
+ if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+ pdev->gadget.speed > USB_SPEED_HIGH) {
+ ret = cdnsp_alloc_streams(pdev, pep);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ pep->in_ctx->ep_info = 0;
+ pep->in_ctx->ep_info2 = 0;
+ pep->in_ctx->deq = 0;
+ pep->in_ctx->tx_info = 0;
+}
+
+static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
+ struct cdnsp_ring *evt_ring,
+ struct cdnsp_erst *erst)
+{
+ struct cdnsp_erst_entry *entry;
+ struct cdnsp_segment *seg;
+ unsigned int val;
+ size_t size;
+
+ size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
+ erst->entries = dma_alloc_coherent(pdev->dev, size,
+ &erst->erst_dma_addr, GFP_KERNEL);
+ if (!erst->entries)
+ return -ENOMEM;
+
+ erst->num_entries = evt_ring->num_segs;
+
+ seg = evt_ring->first_seg;
+ for (val = 0; val < evt_ring->num_segs; val++) {
+ entry = &erst->entries[val];
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ return 0;
+}
+
+static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
+{
+ size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
+ struct device *dev = pdev->dev;
+
+ if (erst->entries)
+ dma_free_coherent(dev, size, erst->entries,
+ erst->erst_dma_addr);
+
+ erst->entries = NULL;
+}
+
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
+{
+ struct device *dev = pdev->dev;
+
+ cdnsp_free_priv_device(pdev);
+ cdnsp_free_erst(pdev, &pdev->erst);
+
+ if (pdev->event_ring)
+ cdnsp_ring_free(pdev, pdev->event_ring);
+
+ pdev->event_ring = NULL;
+
+ if (pdev->cmd_ring)
+ cdnsp_ring_free(pdev, pdev->cmd_ring);
+
+ pdev->cmd_ring = NULL;
+
+ dma_pool_destroy(pdev->segment_pool);
+ pdev->segment_pool = NULL;
+ dma_pool_destroy(pdev->device_pool);
+ pdev->device_pool = NULL;
+
+ if (pdev->dcbaa)
+ dma_free_coherent(dev, sizeof(*pdev->dcbaa),
+ pdev->dcbaa, pdev->dcbaa->dma);
+
+ pdev->dcbaa = NULL;
+
+ pdev->usb2_port.exist = 0;
+ pdev->usb3_port.exist = 0;
+ pdev->usb2_port.port_num = 0;
+ pdev->usb3_port.port_num = 0;
+ pdev->active_port = NULL;
+}
+
+static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
+{
+ dma_addr_t deq;
+ u64 temp;
+
+ deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue);
+
+ /* Update controller event ring dequeue pointer */
+ temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+
+ /*
+ * Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+
+ cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
+ &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_add_in_port(struct cdnsp_device *pdev,
+ struct cdnsp_port *port,
+ __le32 __iomem *addr)
+{
+ u32 temp, port_offset, port_count;
+
+ temp = readl(addr);
+ port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
+ port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
+
+ /* Port offset and count in the third dword.*/
+ temp = readl(addr + 2);
+ port_offset = CDNSP_EXT_PORT_OFF(temp);
+ port_count = CDNSP_EXT_PORT_COUNT(temp);
+
+ trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
+
+ port->port_num = port_offset;
+ port->exist = 1;
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be.
+ */
+static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
+{
+ void __iomem *base;
+ u32 offset;
+ int i;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, 0,
+ EXT_CAP_CFG_DEV_20PORT_CAP_ID);
+ pdev->port20_regs = base + offset;
+
+ offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
+ pdev->port3x_regs = base + offset;
+
+ offset = 0;
+ base = &pdev->cap_regs->hc_capbase;
+
+ /* Driver expects max 2 extended protocol capability. */
+ for (i = 0; i < 2; i++) {
+ u32 temp;
+
+ offset = cdnsp_find_next_ext_cap(base, offset,
+ EXT_CAPS_PROTOCOL);
+ temp = readl(base + offset);
+
+ if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
+ !pdev->usb3_port.port_num)
+ cdnsp_add_in_port(pdev, &pdev->usb3_port,
+ base + offset);
+
+ if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
+ !pdev->usb2_port.port_num)
+ cdnsp_add_in_port(pdev, &pdev->usb2_port,
+ base + offset);
+ }
+
+ if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
+ dev_err(pdev->dev, "Error: Only one port detected\n");
+ return -ENODEV;
+ }
+
+ trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports.");
+
+ pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
+ (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+ (pdev->usb2_port.port_num - 1));
+
+ pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
+ (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+ (pdev->usb3_port.port_num - 1));
+
+ return 0;
+}
+
+/*
+ * Initialize memory for CDNSP (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts, set up a command ring segment, create event
+ * ring (one for now).
+ */
+int cdnsp_mem_init(struct cdnsp_device *pdev)
+{
+ struct device *dev = pdev->dev;
+ int ret = -ENOMEM;
+ unsigned int val;
+ dma_addr_t dma;
+ u32 page_size;
+ u64 val_64;
+
+ /*
+ * Use 4K pages, since that's common and the minimum the
+ * controller supports
+ */
+ page_size = 1 << 12;
+
+ val = readl(&pdev->op_regs->config_reg);
+ val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
+ writel(val, &pdev->op_regs->config_reg);
+
+ /*
+ * Doorbell array must be physically contiguous
+ * and 64-byte (cache line) aligned.
+ */
+ pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
+ &dma, GFP_KERNEL);
+ if (!pdev->dcbaa)
+ return -ENOMEM;
+
+ memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa));
+ pdev->dcbaa->dma = dma;
+
+ cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
+
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments
+ * and our use of dma addresses in the trb_address_map radix tree needs
+ * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
+ * need.
+ */
+ pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
+ page_size);
+ if (!pdev->segment_pool)
+ goto release_dcbaa;
+
+ pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
+ CDNSP_CTX_SIZE, 64, page_size);
+ if (!pdev->device_pool)
+ goto destroy_segment_pool;
+
+
+ /* Set up the command ring to have one segments for now. */
+ pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
+ if (!pdev->cmd_ring)
+ goto destroy_device_pool;
+
+ /* Set the address in the Command Ring Control register */
+ val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+ (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+ pdev->cmd_ring->cycle_state;
+ cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+
+ val = readl(&pdev->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ pdev->dba = (void __iomem *)pdev->cap_regs + val;
+
+ /* Set ir_set to interrupt register set 0 */
+ pdev->ir_set = &pdev->run_regs->ir_set[0];
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST).
+ */
+ pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
+ 0, GFP_KERNEL);
+ if (!pdev->event_ring)
+ goto free_cmd_ring;
+
+ ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
+ if (ret)
+ goto free_event_ring;
+
+ /* Set ERST count with the number of entries in the segment table. */
+ val = readl(&pdev->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ writel(val, &pdev->ir_set->erst_size);
+
+ /* Set the segment table base address. */
+ val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
+ val_64 &= ERST_PTR_MASK;
+ val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
+ cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
+
+ /* Set the event ring dequeue address. */
+ cdnsp_set_event_deq(pdev);
+
+ ret = cdnsp_setup_port_arrays(pdev);
+ if (ret)
+ goto free_erst;
+
+ ret = cdnsp_alloc_priv_device(pdev);
+ if (ret) {
+ dev_err(pdev->dev,
+ "Could not allocate cdnsp_device data structures\n");
+ goto free_erst;
+ }
+
+ return 0;
+
+free_erst:
+ cdnsp_free_erst(pdev, &pdev->erst);
+free_event_ring:
+ cdnsp_ring_free(pdev, pdev->event_ring);
+free_cmd_ring:
+ cdnsp_ring_free(pdev, pdev->cmd_ring);
+destroy_device_pool:
+ dma_pool_destroy(pdev->device_pool);
+destroy_segment_pool:
+ dma_pool_destroy(pdev->segment_pool);
+release_dcbaa:
+ dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
+ pdev->dcbaa->dma);
+
+ cdnsp_reset(pdev);
+
+ return ret;
+}
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
new file mode 100644
index 000000000000..fe8a114c586c
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCI Glue driver.
+ *
+ * Copyright (C) 2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+#define PCI_BAR_HOST 0
+#define PCI_BAR_OTG 0
+#define PCI_BAR_DEV 2
+
+#define PCI_DEV_FN_HOST_DEVICE 0
+#define PCI_DEV_FN_OTG 1
+
+#define PCI_DRIVER_NAME "cdns-pci-usbssp"
+#define PLAT_DRIVER_NAME "cdns-usbssp"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0x0100
+#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+
+static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+{
+ struct pci_dev *func;
+
+ /*
+ * Gets the second function.
+ * It's little tricky, but this platform has two function.
+ * The fist keeps resources for Host/Device while the second
+ * keeps resources for DRD/OTG.
+ */
+ func = pci_get_device(pdev->vendor, pdev->device, NULL);
+ if (!func)
+ return NULL;
+
+ if (func->devfn == pdev->devfn) {
+ func = pci_get_device(pdev->vendor, pdev->device, func);
+ if (!func)
+ return NULL;
+ }
+
+ return func;
+}
+
+static int cdnsp_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_dev *func;
+ struct resource *res;
+ struct cdns *cdnsp;
+ int ret;
+
+ /*
+ * For GADGET/HOST PCI (devfn) function number is 0,
+ * for OTG PCI (devfn) function number is 1.
+ */
+ if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ pdev->devfn != PCI_DEV_FN_OTG))
+ return -EINVAL;
+
+ func = cdnsp_get_second_fun(pdev);
+ if (!func)
+ return -EINVAL;
+
+ if (func->class == PCI_CLASS_SERIAL_USB_XHCI ||
+ pdev->class == PCI_CLASS_SERIAL_USB_XHCI) {
+ ret = -EINVAL;
+ goto put_pci;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
+ goto put_pci;
+ }
+
+ pci_set_master(pdev);
+ if (pci_is_enabled(func)) {
+ cdnsp = pci_get_drvdata(func);
+ } else {
+ cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
+ if (!cdnsp) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
+ }
+
+ /* For GADGET device function number is 0. */
+ if (pdev->devfn == 0) {
+ resource_size_t rsrc_start, rsrc_len;
+
+ /* Function 0: host(BAR_0) + device(BAR_1).*/
+ dev_dbg(dev, "Initialize resources\n");
+ rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
+ rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
+ res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
+ if (!res) {
+ dev_dbg(dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto free_cdnsp;
+ }
+
+ cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len);
+ if (!cdnsp->dev_regs) {
+ dev_dbg(dev, "error mapping memory\n");
+ ret = -EFAULT;
+ goto free_cdnsp;
+ }
+
+ cdnsp->dev_irq = pdev->irq;
+ dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
+ &rsrc_start);
+
+ res = &cdnsp->xhci_res[0];
+ res->start = pci_resource_start(pdev, PCI_BAR_HOST);
+ res->end = pci_resource_end(pdev, PCI_BAR_HOST);
+ res->name = "xhci";
+ res->flags = IORESOURCE_MEM;
+ dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n",
+ &res->start);
+
+ /* Interrupt for XHCI, */
+ res = &cdnsp->xhci_res[1];
+ res->start = pdev->irq;
+ res->name = "host";
+ res->flags = IORESOURCE_IRQ;
+ } else {
+ res = &cdnsp->otg_res;
+ res->start = pci_resource_start(pdev, PCI_BAR_OTG);
+ res->end = pci_resource_end(pdev, PCI_BAR_OTG);
+ res->name = "otg";
+ res->flags = IORESOURCE_MEM;
+ dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n",
+ &res->start);
+
+ /* Interrupt for OTG/DRD. */
+ cdnsp->otg_irq = pdev->irq;
+ }
+
+ if (pci_is_enabled(func)) {
+ cdnsp->dev = dev;
+ cdnsp->gadget_init = cdnsp_gadget_init;
+
+ ret = cdns_init(cdnsp);
+ if (ret)
+ goto free_cdnsp;
+ }
+
+ pci_set_drvdata(pdev, cdnsp);
+
+ device_wakeup_enable(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+
+free_cdnsp:
+ if (!pci_is_enabled(func))
+ kfree(cdnsp);
+
+disable_pci:
+ pci_disable_device(pdev);
+
+put_pci:
+ pci_dev_put(func);
+
+ return ret;
+}
+
+static void cdnsp_pci_remove(struct pci_dev *pdev)
+{
+ struct cdns *cdnsp;
+ struct pci_dev *func;
+
+ func = cdnsp_get_second_fun(pdev);
+ cdnsp = (struct cdns *)pci_get_drvdata(pdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ if (!pci_is_enabled(func)) {
+ kfree(cdnsp);
+ goto pci_put;
+ }
+
+ cdns_remove(cdnsp);
+
+pci_put:
+ pci_dev_put(func);
+}
+
+static int __maybe_unused cdnsp_pci_suspend(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+
+ return cdns_suspend(cdns);
+}
+
+static int __maybe_unused cdnsp_pci_resume(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cdns->lock, flags);
+ ret = cdns_resume(cdns, 1);
+ spin_unlock_irqrestore(&cdns->lock, flags);
+
+ return ret;
+}
+
+static const struct dev_pm_ops cdnsp_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume)
+};
+
+static const struct pci_device_id cdnsp_pci_ids[] = {
+ { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ CDNS_DRD_IF, PCI_ANY_ID },
+ { 0, }
+};
+
+static struct pci_driver cdnsp_pci_driver = {
+ .name = "cdnsp-pci",
+ .id_table = &cdnsp_pci_ids[0],
+ .probe = cdnsp_pci_probe,
+ .remove = cdnsp_pci_remove,
+ .driver = {
+ .pm = &cdnsp_pci_pm_ops,
+ }
+};
+
+module_pci_driver(cdnsp_pci_driver);
+MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids);
+
+MODULE_ALIAS("pci:cdnsp");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence CDNSP PCI driver");
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
new file mode 100644
index 000000000000..f9170d177a89
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -0,0 +1,2438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ * Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ * least one free TRB in the ring. This is useful if you want to turn that
+ * into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ * link TRB, then load the pointer with the address in the link TRB. If the
+ * link TRB had its toggle bit set, you may need to update the ring cycle
+ * state (see cycle bit rules). You may have to do this multiple times
+ * until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ * equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ * Update enqueue pointer between each write (which may update the ring
+ * cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ * and endpoint rings. If controller is the producer for the event ring,
+ * and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ * the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ * continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ * updates event ring dequeue pointer. Controller is the consumer for the
+ * command and endpoint rings; it generates events on the event ring
+ * for these.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include "cdnsp-trace.h"
+#include "cdnsp-gadget.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+ union cdnsp_trb *trb)
+{
+ unsigned long segment_offset = trb - seg->trbs;
+
+ if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
+ return 0;
+
+ return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
+{
+ return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
+static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
+{
+ return TRB_TYPE_LINK_LE32(trb->link.control);
+}
+
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
+{
+ return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
+}
+
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ union cdnsp_trb *trb)
+{
+ return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
+{
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+}
+
+static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
+{
+ if (cdnsp_trb_is_link(trb)) {
+ /* Unchain chained link TRBs. */
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+ } else {
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB. */
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
+ }
+}
+
+/*
+ * Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void cdnsp_next_trb(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment **seg,
+ union cdnsp_trb **trb)
+{
+ if (cdnsp_trb_is_link(*trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ (*trb)++;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+ /* event ring doesn't have link trbs, check for last trb. */
+ if (ring->type == TYPE_EVENT) {
+ if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+ ring->dequeue++;
+ goto out;
+ }
+
+ if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+ ring->cycle_state ^= 1;
+
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ goto out;
+ }
+
+ /* All other rings have link trbs. */
+ if (!cdnsp_trb_is_link(ring->dequeue)) {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
+ while (cdnsp_trb_is_link(ring->dequeue)) {
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ }
+out:
+ trace_cdnsp_inc_deq(ring);
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell.
+ */
+static void cdnsp_inc_enq(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ bool more_trbs_coming)
+{
+ union cdnsp_trb *next;
+ u32 chain;
+
+ chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+
+ /* If this is not event ring, there is one less usable TRB. */
+ if (!cdnsp_trb_is_link(ring->enqueue))
+ ring->num_trbs_free--;
+ next = ++(ring->enqueue);
+
+ /* Update the dequeue pointer further if that was a link TRB */
+ while (cdnsp_trb_is_link(next)) {
+ /*
+ * If the caller doesn't plan on enqueuing more TDs before
+ * ringing the doorbell, then we don't want to give the link TRB
+ * to the hardware just yet. We'll give the link TRB back in
+ * cdnsp_prepare_ring() just before we enqueue the TD at the
+ * top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
+
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ next->link.control |= cpu_to_le32(chain);
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (cdnsp_link_trb_toggles_cycle(next))
+ ring->cycle_state ^= 1;
+
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+
+ trace_cdnsp_inc_enq(ring);
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment.
+ */
+static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs)
+{
+ int num_trbs_in_deq_seg;
+
+ if (ring->num_trbs_free < num_trbs)
+ return false;
+
+ if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+ num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+
+ if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Workaround for L1: controller has issue with resuming from L1 after
+ * setting doorbell for endpoint during L1 state. This function forces
+ * resume signal in such case.
+ */
+static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
+{
+ if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
+ cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
+}
+
+/* Ring the doorbell after placing a command on the ring. */
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
+{
+ writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
+}
+
+/*
+ * Ring the doorbell after placing a transfer on the ring.
+ * Returns true if doorbell was set, otherwise false.
+ */
+static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id)
+{
+ __le32 __iomem *reg_addr = &pdev->dba->ep_db;
+ unsigned int ep_state = pep->ep_state;
+ unsigned int db_value;
+
+ /*
+ * Don't ring the doorbell for this endpoint if endpoint is halted or
+ * disabled.
+ */
+ if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
+ return false;
+
+ /* For stream capable endpoints driver can ring doorbell only twice. */
+ if (pep->ep_state & EP_HAS_STREAMS) {
+ if (pep->stream_info.drbls_count >= 2)
+ return false;
+
+ pep->stream_info.drbls_count++;
+ }
+
+ pep->ep_state &= ~EP_STOPPED;
+
+ if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
+ !pdev->ep0_expect_in)
+ db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
+ else
+ db_value = DB_VALUE(pep->idx, stream_id);
+
+ trace_cdnsp_tr_drbl(pep, stream_id);
+
+ writel(db_value, reg_addr);
+
+ cdnsp_force_l0_go(pdev);
+
+ /* Doorbell was set. */
+ return true;
+}
+
+/*
+ * Get the right ring for the given pep and stream_id.
+ * If the endpoint supports streams, boundary check the USB request's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id)
+{
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return pep->ring;
+
+ if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
+ dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
+ pep->name, stream_id);
+ return NULL;
+ }
+
+ return pep->stream_info.stream_rings[stream_id];
+}
+
+static struct cdnsp_ring *
+ cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ return cdnsp_get_transfer_ring(pdev, preq->pep,
+ preq->request.stream_id);
+}
+
+/* Ring the doorbell for any rings with pending requests. */
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_stream_info *stream_info;
+ unsigned int stream_id;
+ int ret;
+
+ if (pep->ep_state & EP_DIS_IN_RROGRESS)
+ return;
+
+ /* A ring has pending Request if its TD list is not empty. */
+ if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
+ if (pep->ring && !list_empty(&pep->ring->td_list))
+ cdnsp_ring_ep_doorbell(pdev, pep, 0);
+ return;
+ }
+
+ stream_info = &pep->stream_info;
+
+ for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
+ struct cdnsp_td *td, *td_temp;
+ struct cdnsp_ring *ep_ring;
+
+ if (stream_info->drbls_count >= 2)
+ return;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+ if (!ep_ring)
+ continue;
+
+ if (!ep_ring->stream_active || ep_ring->stream_rejected)
+ continue;
+
+ list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+ td_list) {
+ if (td->drbl)
+ continue;
+
+ ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+ if (ret)
+ td->drbl = 1;
+ }
+ }
+}
+
+/*
+ * Get the hw dequeue pointer controller stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possible stream context type.
+ */
+static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct cdnsp_stream_ctx *st_ctx;
+ struct cdnsp_ep *pep;
+
+ pep = &pdev->eps[stream_id];
+
+ if (pep->ep_state & EP_HAS_STREAMS) {
+ st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
+ return le64_to_cpu(st_ctx->stream_ring);
+ }
+
+ return le64_to_cpu(pep->out_ctx->deq);
+}
+
+/*
+ * Move the controller endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the controller endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the
+ * controller stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the controller new cycle state
+ * when we pass any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id,
+ struct cdnsp_td *cur_td,
+ struct cdnsp_dequeue_state *state)
+{
+ bool td_last_trb_found = false;
+ struct cdnsp_segment *new_seg;
+ struct cdnsp_ring *ep_ring;
+ union cdnsp_trb *new_deq;
+ bool cycle_found = false;
+ u64 hw_dequeue;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+ if (!ep_ring)
+ return;
+
+ /*
+ * Dig out the cycle state saved by the controller during the
+ * stop endpoint command.
+ */
+ hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
+ new_seg = ep_ring->deq_seg;
+ new_deq = ep_ring->dequeue;
+ state->new_cycle_state = hw_dequeue & 0x1;
+ state->stream_id = stream_id;
+
+ /*
+ * We want to find the pointer, segment and cycle state of the new trb
+ * (the one after current TD's last_trb). We know the cycle state at
+ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+ * found.
+ */
+ do {
+ if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
+ == (dma_addr_t)(hw_dequeue & ~0xf)) {
+ cycle_found = true;
+
+ if (td_last_trb_found)
+ break;
+ }
+
+ if (new_deq == cur_td->last_trb)
+ td_last_trb_found = true;
+
+ if (cycle_found && cdnsp_trb_is_link(new_deq) &&
+ cdnsp_link_trb_toggles_cycle(new_deq))
+ state->new_cycle_state ^= 0x1;
+
+ cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
+
+ /* Search wrapped around, bail out. */
+ if (new_deq == pep->ring->dequeue) {
+ dev_err(pdev->dev,
+ "Error: Failed finding new dequeue state\n");
+ state->new_deq_seg = NULL;
+ state->new_deq_ptr = NULL;
+ return;
+ }
+
+ } while (!cycle_found || !td_last_trb_found);
+
+ state->new_deq_seg = new_seg;
+ state->new_deq_ptr = new_deq;
+
+ trace_cdnsp_new_deq_state(state);
+}
+
+/*
+ * flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
+static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ep_ring,
+ struct cdnsp_td *td,
+ bool flip_cycle)
+{
+ struct cdnsp_segment *seg = td->start_seg;
+ union cdnsp_trb *trb = td->first_trb;
+
+ while (1) {
+ cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
+
+ /* flip cycle if asked to */
+ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+ if (trb == td->last_trb)
+ break;
+
+ cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
+ }
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
+ struct cdnsp_segment *start_seg,
+ union cdnsp_trb *start_trb,
+ union cdnsp_trb *end_trb,
+ dma_addr_t suspect_dma)
+{
+ struct cdnsp_segment *cur_seg;
+ union cdnsp_trb *temp_trb;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ dma_addr_t start_dma;
+
+ start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
+ cur_seg = start_seg;
+
+ do {
+ if (start_dma == 0)
+ return NULL;
+
+ temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
+
+ trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
+ end_trb_dma, cur_seg->dma,
+ end_seg_dma);
+
+ if (end_trb_dma > 0) {
+ /*
+ * The end TRB is in this segment, so suspect should
+ * be here
+ */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma &&
+ suspect_dma <= end_trb_dma) {
+ return cur_seg;
+ }
+ } else {
+ /*
+ * Case for one segment with a
+ * TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma)) {
+ return cur_seg;
+ }
+ }
+
+ return NULL;
+ }
+
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
+
+ cur_seg = cur_seg->next;
+ start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != start_seg);
+
+ return NULL;
+}
+
+static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_td *td)
+{
+ struct cdnsp_segment *seg = td->bounce_seg;
+ struct cdnsp_request *preq;
+ size_t len;
+
+ if (!seg)
+ return;
+
+ preq = td->preq;
+
+ trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
+ seg->bounce_dma, 0);
+
+ if (!preq->direction) {
+ dma_unmap_single(pdev->dev, seg->bounce_dma,
+ ring->bounce_buf_len, DMA_TO_DEVICE);
+ return;
+ }
+
+ dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* For in transfers we need to copy the data from bounce to sg */
+ len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
+ seg->bounce_buf, seg->bounce_len,
+ seg->bounce_offs);
+ if (len != seg->bounce_len)
+ dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+}
+
+static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state)
+{
+ struct cdnsp_ring *ep_ring;
+ int ret;
+
+ if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+ return 0;
+ }
+
+ cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
+ trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
+
+ /*
+ * Update the ring's dequeue segment and dequeue pointer
+ * to reflect the new position.
+ */
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
+
+ if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
+ while (ep_ring->dequeue != deq_state->new_deq_ptr) {
+ ep_ring->num_trbs_free++;
+ ep_ring->dequeue++;
+
+ if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+ if (ep_ring->dequeue == deq_state->new_deq_ptr)
+ break;
+
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+ }
+
+ /*
+ * Probably there was TIMEOUT during handling Set Dequeue Pointer
+ * command. It's critical error and controller will be stopped.
+ */
+ if (ret)
+ return -ESHUTDOWN;
+
+ /* Restart any rings with pending requests */
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+ return 0;
+}
+
+int cdnsp_remove_request(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_dequeue_state deq_state;
+ struct cdnsp_td *cur_td = NULL;
+ struct cdnsp_ring *ep_ring;
+ struct cdnsp_segment *seg;
+ int status = -ECONNRESET;
+ int ret = 0;
+ u64 hw_deq;
+
+ memset(&deq_state, 0, sizeof(deq_state));
+
+ trace_cdnsp_remove_request(pep->out_ctx);
+ trace_cdnsp_remove_request_td(preq);
+
+ cur_td = &preq->td;
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the controller endpoint ring dequeue pointer past
+ * this TD.
+ */
+ hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
+ hw_deq &= ~0xf;
+
+ seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
+ cur_td->last_trb, hw_deq);
+
+ if (seg && (pep->ep_state & EP_ENABLED))
+ cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
+ cur_td, &deq_state);
+ else
+ cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
+
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list.
+ */
+ list_del_init(&cur_td->td_list);
+ ep_ring->num_tds--;
+ pep->stream_info.td_count--;
+
+ /*
+ * During disconnecting all endpoint will be disabled so we don't
+ * have to worry about updating dequeue pointer.
+ */
+ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
+ status = -ESHUTDOWN;
+ ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
+ }
+
+ cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
+ cdnsp_gadget_giveback(pep, cur_td->preq, status);
+
+ return ret;
+}
+
+static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
+{
+ struct cdnsp_port *port = pdev->active_port;
+ u8 old_port = 0;
+
+ if (port && port->port_num == port_id)
+ return 0;
+
+ if (port)
+ old_port = port->port_num;
+
+ if (port_id == pdev->usb2_port.port_num) {
+ port = &pdev->usb2_port;
+ } else if (port_id == pdev->usb3_port.port_num) {
+ port = &pdev->usb3_port;
+ } else {
+ dev_err(pdev->dev, "Port event with invalid port ID %d\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (port_id != old_port) {
+ cdnsp_disable_slot(pdev);
+ pdev->active_port = port;
+ cdnsp_enable_slot(pdev);
+ }
+
+ if (port_id == pdev->usb2_port.port_num)
+ cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
+ else
+ writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
+ &pdev->usb3_port.regs->portpmsc);
+
+ return 0;
+}
+
+static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
+ union cdnsp_trb *event)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+ u32 portsc, cmd_regs;
+ bool port2 = false;
+ u32 link_state;
+ u32 port_id;
+
+ /* Port status change events always have a successful completion code */
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+ dev_err(pdev->dev, "ERR: incorrect PSC event\n");
+
+ port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+
+ if (cdnsp_update_port_id(pdev, port_id))
+ goto cleanup;
+
+ port_regs = pdev->active_port->regs;
+
+ if (port_id == pdev->usb2_port.port_num)
+ port2 = true;
+
+new_event:
+ portsc = readl(&port_regs->portsc);
+ writel(cdnsp_port_state_to_neutral(portsc) |
+ (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
+
+ trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
+
+ pdev->gadget.speed = cdnsp_port_speed(portsc);
+ link_state = portsc & PORT_PLS_MASK;
+
+ /* Port Link State change detected. */
+ if ((portsc & PORT_PLC)) {
+ if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
+ link_state == XDEV_RESUME) {
+ cmd_regs = readl(&pdev->op_regs->command);
+ if (!(cmd_regs & CMD_R_S))
+ goto cleanup;
+
+ if (DEV_SUPERSPEED_ANY(portsc)) {
+ cdnsp_set_link_state(pdev, &port_regs->portsc,
+ XDEV_U0);
+
+ cdnsp_resume_gadget(pdev);
+ }
+ }
+
+ if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
+ link_state == XDEV_U0) {
+ pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
+
+ cdnsp_force_header_wakeup(pdev, 1);
+ cdnsp_ring_cmd_db(pdev);
+ cdnsp_wait_for_cmd_compl(pdev);
+ }
+
+ if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
+ !DEV_SUPERSPEED_ANY(portsc))
+ cdnsp_resume_gadget(pdev);
+
+ if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3)
+ cdnsp_suspend_gadget(pdev);
+
+ pdev->link_state = link_state;
+ }
+
+ if (portsc & PORT_CSC) {
+ /* Detach device. */
+ if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
+ cdnsp_disconnect_gadget(pdev);
+
+ /* Attach device. */
+ if (portsc & PORT_CONNECT) {
+ if (!port2)
+ cdnsp_irq_reset(pdev);
+
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
+ }
+ }
+
+ /* Port reset. */
+ if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
+ cdnsp_irq_reset(pdev);
+ pdev->u1_allowed = 0;
+ pdev->u2_allowed = 0;
+ pdev->may_wakeup = 0;
+ }
+
+ if (portsc & PORT_CEC)
+ dev_err(pdev->dev, "Port Over Current detected\n");
+
+ if (portsc & PORT_CEC)
+ dev_err(pdev->dev, "Port Configure Error detected\n");
+
+ if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
+ goto new_event;
+
+cleanup:
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+}
+
+static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_ring *ep_ring,
+ int *status)
+{
+ struct cdnsp_request *preq = td->preq;
+
+ /* if a bounce buffer was used to align this td then unmap it */
+ cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
+
+ /*
+ * If the controller said we transferred more data than the buffer
+ * length, Play it safe and say we didn't transfer anything.
+ */
+ if (preq->request.actual > preq->request.length) {
+ preq->request.actual = 0;
+ *status = 0;
+ }
+
+ list_del_init(&td->td_list);
+ ep_ring->num_tds--;
+ preq->pep->stream_info.td_count--;
+
+ cdnsp_gadget_giveback(preq->pep, preq, *status);
+}
+
+static void cdnsp_finish_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *ep,
+ int *status)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+ trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
+ /*
+ * The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ return;
+ }
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_td_cleanup(pdev, td, ep_ring, status);
+}
+
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ union cdnsp_trb *stop_trb)
+{
+ struct cdnsp_segment *seg = ring->deq_seg;
+ union cdnsp_trb *trb = ring->dequeue;
+ u32 sum;
+
+ for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
+ if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
+ sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+ }
+ return sum;
+}
+
+static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id,
+ int start_cycle,
+ struct cdnsp_generic_trb *start_trb)
+{
+ /*
+ * Pass all the TRBs to the hardware at once and make sure this write
+ * isn't reordered.
+ */
+ wmb();
+
+ if (start_cycle)
+ start_trb->field[3] |= cpu_to_le32(start_cycle);
+ else
+ start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+ if ((pep->ep_state & EP_HAS_STREAMS) &&
+ !pep->stream_info.first_prime_det) {
+ trace_cdnsp_wait_for_prime(pep, stream_id);
+ return 0;
+ }
+
+ return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+}
+
+/*
+ * Process control tds, update USB request status and actual_length.
+ */
+static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *event_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int *status)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 remaining;
+ u32 trb_type;
+
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ /*
+ * if on data stage then update the actual_length of the USB
+ * request and flag it as set, so it won't be overwritten in the event
+ * for the last TRB.
+ */
+ if (trb_type == TRB_DATA) {
+ td->request_length_set = true;
+ td->preq->request.actual = td->preq->request.length - remaining;
+ }
+
+ /* at status stage */
+ if (!td->request_length_set)
+ td->preq->request.actual = td->preq->request.length;
+
+ if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
+ pdev->three_stage_setup) {
+ td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+ td_list);
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+ cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
+ &td->last_trb->generic);
+ return;
+ }
+
+ cdnsp_finish_td(pdev, td, event, pep, status);
+}
+
+/*
+ * Process isochronous tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *ep_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int status)
+{
+ struct cdnsp_request *preq = td->preq;
+ u32 remaining, requested, ep_trb_len;
+ bool sum_trbs_for_length = false;
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+ u32 td_length;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+
+ requested = preq->request.length;
+
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ preq->request.status = 0;
+ break;
+ case COMP_SHORT_PACKET:
+ preq->request.status = 0;
+ sum_trbs_for_length = true;
+ break;
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ case COMP_BABBLE_DETECTED_ERROR:
+ preq->request.status = -EOVERFLOW;
+ break;
+ case COMP_STOPPED:
+ sum_trbs_for_length = true;
+ break;
+ case COMP_STOPPED_SHORT_PACKET:
+ /* field normally containing residue now contains transferred */
+ preq->request.status = 0;
+ requested = remaining;
+ break;
+ case COMP_STOPPED_LENGTH_INVALID:
+ requested = 0;
+ remaining = 0;
+ break;
+ default:
+ sum_trbs_for_length = true;
+ preq->request.status = -1;
+ break;
+ }
+
+ if (sum_trbs_for_length) {
+ td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
+ td_length += ep_trb_len - remaining;
+ } else {
+ td_length = requested;
+ }
+
+ td->preq->request.actual += td_length;
+
+ cdnsp_finish_td(pdev, td, event, pep, &status);
+}
+
+static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int status)
+{
+ struct cdnsp_ring *ep_ring;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ td->preq->request.status = -EXDEV;
+ td->preq->request.actual = 0;
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_td_cleanup(pdev, td, ep_ring, &status);
+}
+
+/*
+ * Process bulk and interrupt tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *ep_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *ep,
+ int *status)
+{
+ u32 remaining, requested, ep_trb_len;
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ requested = td->preq->request.length;
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ case COMP_SHORT_PACKET:
+ *status = 0;
+ break;
+ case COMP_STOPPED_SHORT_PACKET:
+ td->preq->request.actual = remaining;
+ goto finish_td;
+ case COMP_STOPPED_LENGTH_INVALID:
+ /* Stopped on ep trb with invalid length, exclude it. */
+ ep_trb_len = 0;
+ remaining = 0;
+ break;
+ }
+
+ if (ep_trb == td->last_trb)
+ ep_trb_len = requested - remaining;
+ else
+ ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+ td->preq->request.actual = ep_trb_len;
+
+finish_td:
+ ep->stream_info.drbls_count--;
+
+ cdnsp_finish_td(pdev, td, event, ep, status);
+}
+
+static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
+ struct cdnsp_transfer_event *event)
+{
+ struct cdnsp_generic_trb *generic;
+ struct cdnsp_ring *ep_ring;
+ struct cdnsp_ep *pep;
+ int cur_stream;
+ int ep_index;
+ int host_sid;
+ int dev_sid;
+
+ generic = (struct cdnsp_generic_trb *)event;
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
+ host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
+
+ pep = &pdev->eps[ep_index];
+
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return;
+
+ if (host_sid == STREAM_PRIME_ACK) {
+ pep->stream_info.first_prime_det = 1;
+ for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
+ cur_stream++) {
+ ep_ring = pep->stream_info.stream_rings[cur_stream];
+ ep_ring->stream_active = 1;
+ ep_ring->stream_rejected = 0;
+ }
+ }
+
+ if (host_sid == STREAM_REJECTED) {
+ struct cdnsp_td *td, *td_temp;
+
+ pep->stream_info.drbls_count--;
+ ep_ring = pep->stream_info.stream_rings[dev_sid];
+ ep_ring->stream_active = 0;
+ ep_ring->stream_rejected = 1;
+
+ list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+ td_list) {
+ td->drbl = 0;
+ }
+ }
+
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted TRB DMA address or endpoint is disabled.
+ */
+static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
+ struct cdnsp_transfer_event *event)
+{
+ const struct usb_endpoint_descriptor *desc;
+ bool handling_skipped_tds = false;
+ struct cdnsp_segment *ep_seg;
+ struct cdnsp_ring *ep_ring;
+ int status = -EINPROGRESS;
+ union cdnsp_trb *ep_trb;
+ dma_addr_t ep_trb_dma;
+ struct cdnsp_ep *pep;
+ struct cdnsp_td *td;
+ u32 trb_comp_code;
+ int invalidate;
+ int ep_index;
+
+ invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ ep_trb_dma = le64_to_cpu(event->buffer);
+
+ pep = &pdev->eps[ep_index];
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+
+ /*
+ * If device is disconnect then all requests will be dequeued
+ * by upper layers as part of disconnect sequence.
+ * We don't want handle such event to avoid racing.
+ */
+ if (invalidate || !pdev->gadget.connected)
+ goto cleanup;
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
+ trace_cdnsp_ep_disabled(pep->out_ctx);
+ goto err_out;
+ }
+
+ /* Some transfer events don't always point to a trb*/
+ if (!ep_ring) {
+ switch (trb_comp_code) {
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ case COMP_INVALID_STREAM_ID_ERROR:
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ goto cleanup;
+ default:
+ dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
+ pep->name);
+ goto err_out;
+ }
+ }
+
+ /* Look for some error cases that need special treatment. */
+ switch (trb_comp_code) {
+ case COMP_BABBLE_DETECTED_ERROR:
+ status = -EOVERFLOW;
+ break;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ /*
+ * When the Isoch ring is empty, the controller will generate
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
+ * Underrun Event for OUT Isoch endpoint.
+ */
+ goto cleanup;
+ case COMP_MISSED_SERVICE_ERROR:
+ /*
+ * When encounter missed service error, one or more isoc tds
+ * may be missed by controller.
+ * Set skip flag of the ep_ring; Complete the missed tds as
+ * short transfer when process the ep_ring next time.
+ */
+ pep->skip = true;
+ break;
+ }
+
+ do {
+ /*
+ * This TRB should be in the TD at the head of this ring's TD
+ * list.
+ */
+ if (list_empty(&ep_ring->td_list)) {
+ /*
+ * Don't print warnings if it's due to a stopped
+ * endpoint generating an extra completion event, or
+ * a event for the last TRB of a short TD we already
+ * got a short event for.
+ * The short TD is already removed from the TD list.
+ */
+ if (!(trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+ ep_ring->last_td_was_short))
+ trace_cdnsp_trb_without_td(ep_ring,
+ (struct cdnsp_generic_trb *)event);
+
+ if (pep->skip) {
+ pep->skip = false;
+ trace_cdnsp_ep_list_empty_with_skip(pep, 0);
+ }
+
+ goto cleanup;
+ }
+
+ td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+ td_list);
+
+ /* Is this a TRB in the currently executing TD? */
+ ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
+ ep_ring->dequeue, td->last_trb,
+ ep_trb_dma);
+
+ /*
+ * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
+ * of FSE is not in the current TD pointed by ep_ring->dequeue
+ * because that the hardware dequeue pointer still at the
+ * previous TRB of the current TD. The previous TRB maybe a
+ * Link TD or the last TRB of the previous TD. The command
+ * completion handle will take care the rest.
+ */
+ if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+ pep->skip = false;
+ goto cleanup;
+ }
+
+ desc = td->preq->pep->endpoint.desc;
+ if (!ep_seg) {
+ if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
+ /* Something is busted, give up! */
+ dev_err(pdev->dev,
+ "ERROR Transfer event TRB DMA ptr not "
+ "part of current TD ep_index %d "
+ "comp_code %u\n", ep_index,
+ trb_comp_code);
+ return -EINVAL;
+ }
+
+ cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+ goto cleanup;
+ }
+
+ if (trb_comp_code == COMP_SHORT_PACKET)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
+
+ if (pep->skip) {
+ pep->skip = false;
+ cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+ goto cleanup;
+ }
+
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
+ / sizeof(*ep_trb)];
+
+ trace_cdnsp_handle_transfer(ep_ring,
+ (struct cdnsp_generic_trb *)ep_trb);
+
+ if (cdnsp_trb_is_noop(ep_trb))
+ goto cleanup;
+
+ if (usb_endpoint_xfer_control(desc))
+ cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
+ &status);
+ else if (usb_endpoint_xfer_isoc(desc))
+ cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
+ status);
+ else
+ cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
+ &status);
+cleanup:
+ handling_skipped_tds = pep->skip;
+
+ /*
+ * Do not update event ring dequeue pointer if we're in a loop
+ * processing missed tds.
+ */
+ if (!handling_skipped_tds)
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ /*
+ * If ep->skip is set, it means there are missed tds on the
+ * endpoint ring need to take care of.
+ * Process them as short transfer until reach the td pointed by
+ * the event.
+ */
+ } while (handling_skipped_tds);
+ return 0;
+
+err_out:
+ dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long)
+ cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ return -EINVAL;
+}
+
+/*
+ * This function handles all events on the event ring.
+ * Returns true for "possibly more events to process" (caller should call
+ * again), otherwise false if done.
+ */
+static bool cdnsp_handle_event(struct cdnsp_device *pdev)
+{
+ unsigned int comp_code;
+ union cdnsp_trb *event;
+ bool update_ptrs = true;
+ u32 cycle_bit;
+ int ret = 0;
+ u32 flags;
+
+ event = pdev->event_ring->dequeue;
+ flags = le32_to_cpu(event->event_cmd.flags);
+ cycle_bit = (flags & TRB_CYCLE);
+
+ /* Does the controller or driver own the TRB? */
+ if (cycle_bit != pdev->event_ring->cycle_state)
+ return false;
+
+ trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
+
+ /*
+ * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * reads of the event's flags/data below.
+ */
+ rmb();
+
+ switch (flags & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_COMPLETION):
+ /*
+ * Command can't be handled in interrupt context so just
+ * increment command ring dequeue pointer.
+ */
+ cdnsp_inc_deq(pdev, pdev->cmd_ring);
+ break;
+ case TRB_TYPE(TRB_PORT_STATUS):
+ cdnsp_handle_port_status(pdev, event);
+ update_ptrs = false;
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
+ if (ret >= 0)
+ update_ptrs = false;
+ break;
+ case TRB_TYPE(TRB_SETUP):
+ pdev->ep0_stage = CDNSP_SETUP_STAGE;
+ pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
+ pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
+ pdev->setup = *((struct usb_ctrlrequest *)
+ &event->trans_event.buffer);
+
+ cdnsp_setup_analyze(pdev);
+ break;
+ case TRB_TYPE(TRB_ENDPOINT_NRDY):
+ cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
+ break;
+ case TRB_TYPE(TRB_HC_EVENT): {
+ comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
+
+ switch (comp_code) {
+ case COMP_EVENT_RING_FULL_ERROR:
+ dev_err(pdev->dev, "Event Ring Full\n");
+ break;
+ default:
+ dev_err(pdev->dev, "Controller error code 0x%02x\n",
+ comp_code);
+ }
+
+ break;
+ }
+ case TRB_TYPE(TRB_MFINDEX_WRAP):
+ case TRB_TYPE(TRB_DRB_OVERFLOW):
+ break;
+ default:
+ dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
+ TRB_FIELD_TO_TYPE(flags));
+ }
+
+ if (update_ptrs)
+ /* Update SW event ring dequeue pointer. */
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ /*
+ * Caller will call us again to check if there are more items
+ * on the event ring.
+ */
+ return true;
+}
+
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+{
+ struct cdnsp_device *pdev = (struct cdnsp_device *)data;
+ union cdnsp_trb *event_ring_deq;
+ int counter = 0;
+
+ spin_lock(&pdev->lock);
+
+ if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+ cdnsp_died(pdev);
+ spin_unlock(&pdev->lock);
+ return IRQ_HANDLED;
+ }
+
+ event_ring_deq = pdev->event_ring->dequeue;
+
+ while (cdnsp_handle_event(pdev)) {
+ if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
+ event_ring_deq = pdev->event_ring->dequeue;
+ counter = 0;
+ }
+ }
+
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+
+ spin_unlock(&pdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cdnsp_irq_handler(int irq, void *priv)
+{
+ struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
+ u32 irq_pending;
+ u32 status;
+
+ status = readl(&pdev->op_regs->status);
+
+ if (status == ~(u32)0) {
+ cdnsp_died(pdev);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & STS_EINT))
+ return IRQ_NONE;
+
+ writel(status | STS_EINT, &pdev->op_regs->status);
+ irq_pending = readl(&pdev->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &pdev->ir_set->irq_pending);
+
+ if (status & STS_FATAL) {
+ cdnsp_died(pdev);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Generic function for queuing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell?
+ */
+static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
+ bool more_trbs_coming, u32 field1, u32 field2,
+ u32 field3, u32 field4)
+{
+ struct cdnsp_generic_trb *trb;
+
+ trb = &ring->enqueue->generic;
+
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
+ trb->field[3] = cpu_to_le32(field4);
+
+ trace_cdnsp_queue_trb(ring, trb);
+ cdnsp_inc_enq(pdev, ring, more_trbs_coming);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to
+ * queue num_trbs.
+ */
+static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ep_ring,
+ u32 ep_state, unsigned
+ int num_trbs,
+ gfp_t mem_flags)
+{
+ unsigned int num_trbs_needed;
+
+ /* Make sure the endpoint has been added to controller schedule. */
+ switch (ep_state) {
+ case EP_STATE_STOPPED:
+ case EP_STATE_RUNNING:
+ case EP_STATE_HALTED:
+ break;
+ default:
+ dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
+ return -EINVAL;
+ }
+
+ while (1) {
+ if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
+ break;
+
+ trace_cdnsp_no_room_on_ring("try ring expansion");
+
+ num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+ if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
+ mem_flags)) {
+ dev_err(pdev->dev, "Ring expansion failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ while (cdnsp_trb_is_link(ep_ring->enqueue)) {
+ ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
+ /* The cycle bit must be set as the last operation. */
+ wmb();
+ ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
+ ep_ring->cycle_state ^= 1;
+ ep_ring->enq_seg = ep_ring->enq_seg->next;
+ ep_ring->enqueue = ep_ring->enq_seg->trbs;
+ }
+ return 0;
+}
+
+static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int num_trbs)
+{
+ struct cdnsp_ring *ep_ring;
+ int ret;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
+ preq->request.stream_id);
+ if (!ep_ring)
+ return -EINVAL;
+
+ ret = cdnsp_prepare_ring(pdev, ep_ring,
+ GET_EP_CTX_STATE(preq->pep->out_ctx),
+ num_trbs, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&preq->td.td_list);
+ preq->td.preq = preq;
+
+ /* Add this TD to the tail of the endpoint ring's TD list. */
+ list_add_tail(&preq->td.td_list, &ep_ring->td_list);
+ ep_ring->num_tds++;
+ preq->pep->stream_info.td_count++;
+
+ preq->td.start_seg = ep_ring->enq_seg;
+ preq->td.first_trb = ep_ring->enqueue;
+
+ return 0;
+}
+
+static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
+{
+ unsigned int num_trbs;
+
+ num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+ TRB_MAX_BUFF_SIZE);
+ if (num_trbs == 0)
+ num_trbs++;
+
+ return num_trbs;
+}
+
+static unsigned int count_trbs_needed(struct cdnsp_request *preq)
+{
+ return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
+{
+ unsigned int i, len, full_len, num_trbs = 0;
+ struct scatterlist *sg;
+
+ full_len = preq->request.length;
+
+ for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
+ len = sg_dma_len(sg);
+ num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
+ len = min(len, full_len);
+ full_len -= len;
+ if (full_len == 0)
+ break;
+ }
+
+ return num_trbs;
+}
+
+static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
+{
+ return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
+{
+ if (running_total != preq->request.length)
+ dev_err(preq->pep->pdev->dev,
+ "%s - Miscalculated tx length, "
+ "queued %#x, asked for %#x (%d)\n",
+ preq->pep->name, running_total,
+ preq->request.length, preq->request.actual);
+}
+
+/*
+ * TD size is the number of max packet sized packets remaining in the TD
+ * (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
+ int transferred,
+ int trb_buff_len,
+ unsigned int td_total_len,
+ struct cdnsp_request *preq,
+ bool more_trbs_coming)
+{
+ u32 maxp, total_packet_count;
+
+ /* One TRB with a zero-length data packet. */
+ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
+ trb_buff_len == td_total_len)
+ return 0;
+
+ maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+ /* Queuing functions don't count the current TRB into transferred. */
+ return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+}
+
+static int cdnsp_align_td(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq, u32 enqd_len,
+ u32 *trb_buff_len, struct cdnsp_segment *seg)
+{
+ struct device *dev = pdev->dev;
+ unsigned int unalign;
+ unsigned int max_pkt;
+ u32 new_buff_len;
+
+ max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+ /* We got lucky, last normal TRB data on segment is packet aligned. */
+ if (unalign == 0)
+ return 0;
+
+ /* Is the last nornal TRB alignable by splitting it. */
+ if (*trb_buff_len > unalign) {
+ *trb_buff_len -= unalign;
+ trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
+ enqd_len, 0, unalign);
+ return 0;
+ }
+
+ /*
+ * We want enqd_len + trb_buff_len to sum up to a number aligned to
+ * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+ * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+ */
+ new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+ if (new_buff_len > (preq->request.length - enqd_len))
+ new_buff_len = (preq->request.length - enqd_len);
+
+ /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
+ if (preq->direction) {
+ sg_pcopy_to_buffer(preq->request.sg,
+ preq->request.num_mapped_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_FROM_DEVICE);
+ }
+
+ if (dma_mapping_error(dev, seg->bounce_dma)) {
+ /* Try without aligning.*/
+ dev_warn(pdev->dev,
+ "Failed mapping bounce buffer, not aligning\n");
+ return 0;
+ }
+
+ *trb_buff_len = new_buff_len;
+ seg->bounce_len = new_buff_len;
+ seg->bounce_offs = enqd_len;
+
+ trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
+ unalign);
+
+ /*
+ * Bounce buffer successful aligned and seg->bounce_dma will be used
+ * in transfer TRB as new transfer buffer address.
+ */
+ return 1;
+}
+
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+ unsigned int enqd_len, block_len, trb_buff_len, full_len;
+ unsigned int start_cycle, num_sgs = 0;
+ struct cdnsp_generic_trb *start_trb;
+ u32 field, length_field, remainder;
+ struct scatterlist *sg = NULL;
+ bool more_trbs_coming = true;
+ bool need_zero_pkt = false;
+ bool zero_len_trb = false;
+ struct cdnsp_ring *ring;
+ bool first_trb = true;
+ unsigned int num_trbs;
+ struct cdnsp_ep *pep;
+ u64 addr, send_addr;
+ int sent_len, ret;
+
+ ring = cdnsp_request_to_transfer_ring(pdev, preq);
+ if (!ring)
+ return -EINVAL;
+
+ full_len = preq->request.length;
+
+ if (preq->request.num_sgs) {
+ num_sgs = preq->request.num_sgs;
+ sg = preq->request.sg;
+ addr = (u64)sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
+ num_trbs = count_sg_trbs_needed(preq);
+ } else {
+ num_trbs = count_trbs_needed(preq);
+ addr = (u64)preq->request.dma;
+ block_len = full_len;
+ }
+
+ pep = preq->pep;
+
+ /* Deal with request.zero - need one more td/trb. */
+ if (preq->request.zero && preq->request.length &&
+ IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
+ need_zero_pkt = true;
+ num_trbs++;
+ }
+
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ if (ret)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ring->enqueue->generic;
+ start_cycle = ring->cycle_state;
+ send_addr = addr;
+
+ /* Queue the TRBs, even if they are zero-length */
+ for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
+ enqd_len += trb_buff_len) {
+ field = TRB_TYPE(TRB_NORMAL);
+
+ /* TRB buffer should not cross 64KB boundaries */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min(trb_buff_len, block_len);
+ if (enqd_len + trb_buff_len > full_len)
+ trb_buff_len = full_len - enqd_len;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb) {
+ first_trb = false;
+ if (start_cycle == 0)
+ field |= TRB_CYCLE;
+ } else {
+ field |= ring->cycle_state;
+ }
+
+ /*
+ * Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
+ field |= TRB_CHAIN;
+ if (cdnsp_trb_is_link(ring->enqueue + 1)) {
+ if (cdnsp_align_td(pdev, preq, enqd_len,
+ &trb_buff_len,
+ ring->enq_seg)) {
+ send_addr = ring->enq_seg->bounce_dma;
+ /* Assuming TD won't span 2 segs */
+ preq->td.bounce_seg = ring->enq_seg;
+ }
+ }
+ }
+
+ if (enqd_len + trb_buff_len >= full_len) {
+ if (need_zero_pkt && zero_len_trb) {
+ zero_len_trb = true;
+ } else {
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ need_zero_pkt = false;
+ preq->td.last_trb = ring->enqueue;
+ }
+ }
+
+ /* Only set interrupt on short packet for OUT endpoints. */
+ if (!preq->direction)
+ field |= TRB_ISP;
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+ remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
+ full_len, preq,
+ more_trbs_coming);
+
+ length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+ lower_32_bits(send_addr),
+ upper_32_bits(send_addr),
+ length_field,
+ field);
+
+ addr += trb_buff_len;
+ sent_len = trb_buff_len;
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
+ sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ addr += sent_len;
+ }
+ }
+ block_len -= sent_len;
+ send_addr = addr;
+ }
+
+ cdnsp_check_trb_math(preq, enqd_len);
+ ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
+ start_cycle, start_trb);
+
+ if (ret)
+ preq->td.drbl = 1;
+
+ return 0;
+}
+
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+ u32 field, length_field, remainder;
+ struct cdnsp_ep *pep = preq->pep;
+ struct cdnsp_ring *ep_ring;
+ int num_trbs;
+ int ret;
+
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+ if (!ep_ring)
+ return -EINVAL;
+
+ /* 1 TRB for data, 1 for status */
+ num_trbs = (pdev->three_stage_setup) ? 2 : 1;
+
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ if (ret)
+ return ret;
+
+ /* If there's data, queue data TRBs */
+ if (pdev->ep0_expect_in)
+ field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+ else
+ field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+
+ if (preq->request.length > 0) {
+ remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
+ preq->request.length, preq, 1);
+
+ length_field = TRB_LEN(preq->request.length) |
+ TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
+
+ if (pdev->ep0_expect_in)
+ field |= TRB_DIR_IN;
+
+ cdnsp_queue_trb(pdev, ep_ring, true,
+ lower_32_bits(preq->request.dma),
+ upper_32_bits(preq->request.dma), length_field,
+ field | ep_ring->cycle_state |
+ TRB_SETUPID(pdev->setup_id) |
+ pdev->setup_speed);
+
+ pdev->ep0_stage = CDNSP_DATA_STAGE;
+ }
+
+ /* Save the DMA address of the last TRB in the TD. */
+ preq->td.last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB. */
+ if (preq->request.length == 0)
+ field = ep_ring->cycle_state;
+ else
+ field = (ep_ring->cycle_state ^ 1);
+
+ if (preq->request.length > 0 && pdev->ep0_expect_in)
+ field |= TRB_DIR_IN;
+
+ if (pep->ep_state & EP0_HALTED_STATUS) {
+ pep->ep_state &= ~EP0_HALTED_STATUS;
+ field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
+ } else {
+ field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
+ }
+
+ cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
+ field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
+ TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
+
+ cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
+
+ return 0;
+}
+
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
+ int ret = 0;
+
+ if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
+ trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
+ goto ep_stopped;
+ }
+
+ cdnsp_queue_stop_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
+
+ep_stopped:
+ pep->ep_state |= EP_STOPPED;
+ return ret;
+}
+
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ int ret;
+
+ cdnsp_queue_flush_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
+
+ return ret;
+}
+
+/*
+ * The transfer burst count field of the isochronous TRB defines the number of
+ * bursts that are required to move all packets in this TD. Only SuperSpeed
+ * devices can burst up to bMaxBurst number of packets per service interval.
+ * This field is zero based, meaning a value of zero in the field means one
+ * burst. Basically, for everything but SuperSpeed devices, this field will be
+ * zero.
+ */
+static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+
+ if (pdev->gadget.speed < USB_SPEED_SUPER)
+ return 0;
+
+ max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+}
+
+/*
+ * Returns the number of packets in the last "burst" of packets. This field is
+ * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
+ * the last burst packet count is equal to the total number of packets in the
+ * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
+ * must contain (bMaxBurst + 1) number of packets, but the last burst can
+ * contain 1 to (bMaxBurst + 1) packets.
+ */
+static unsigned int
+ cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+ unsigned int residue;
+
+ if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+ /* bMaxBurst is zero based: 0 means 1 packet per burst. */
+ max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+ residue = total_packet_count % (max_burst + 1);
+
+ /*
+ * If residue is zero, the last burst contains (max_burst + 1)
+ * number of packets, but the TLBPC field is zero-based.
+ */
+ if (residue == 0)
+ return max_burst;
+
+ return residue - 1;
+ }
+ if (total_packet_count == 0)
+ return 0;
+
+ return total_packet_count - 1;
+}
+
+/* Queue function isoc transfer */
+static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ int trb_buff_len, td_len, td_remain_len, ret;
+ unsigned int burst_count, last_burst_pkt;
+ unsigned int total_pkt_count, max_pkt;
+ struct cdnsp_generic_trb *start_trb;
+ bool more_trbs_coming = true;
+ struct cdnsp_ring *ep_ring;
+ int running_total = 0;
+ u32 field, length_field;
+ int start_cycle;
+ int trbs_per_td;
+ u64 addr;
+ int i;
+
+ ep_ring = preq->pep->ring;
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+ td_len = preq->request.length;
+ addr = (u64)preq->request.dma;
+ td_remain_len = td_len;
+
+ max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
+
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_pkt_count == 0)
+ total_pkt_count++;
+
+ burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
+ last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
+ total_pkt_count);
+ trbs_per_td = count_isoc_trbs_needed(preq);
+
+ ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Set isoc specific data for the first TRB in a TD.
+ * Prevent HW from getting the TRBs by keeping the cycle state
+ * inverted in the first TDs isoc TRB.
+ */
+ field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
+ start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count);
+
+ /* Fill the rest of the TRB fields, and remaining normal TRBs. */
+ for (i = 0; i < trbs_per_td; i++) {
+ u32 remainder;
+
+ /* Calculate TRB length. */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ if (trb_buff_len > td_remain_len)
+ trb_buff_len = td_remain_len;
+
+ /* Set the TRB length, TD size, & interrupter fields. */
+ remainder = cdnsp_td_remainder(pdev, running_total,
+ trb_buff_len, td_len, preq,
+ more_trbs_coming);
+
+ length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
+
+ /* Only first TRB is isoc, overwrite otherwise. */
+ if (i) {
+ field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
+ length_field |= TRB_TD_SIZE(remainder);
+ } else {
+ length_field |= TRB_TD_SIZE_TBC(burst_count);
+ }
+
+ /* Only set interrupt on short packet for OUT EPs. */
+ if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
+ field |= TRB_ISP;
+
+ /* Set the chain bit for all except the last TRB. */
+ if (i < trbs_per_td - 1) {
+ more_trbs_coming = true;
+ field |= TRB_CHAIN;
+ } else {
+ more_trbs_coming = false;
+ preq->td.last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+
+ cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
+ lower_32_bits(addr), upper_32_bits(addr),
+ length_field, field);
+
+ running_total += trb_buff_len;
+ addr += trb_buff_len;
+ td_remain_len -= trb_buff_len;
+ }
+
+ /* Check TD length */
+ if (running_total != td_len) {
+ dev_err(pdev->dev, "ISOC TD length unmatch\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
+ start_cycle, start_trb);
+
+ return 0;
+
+cleanup:
+ /* Clean up a partially enqueued isoc transfer. */
+ list_del_init(&preq->td.td_list);
+ ep_ring->num_tds--;
+
+ /*
+ * Use the first TD as a temporary variable to turn the TDs we've
+ * queued into No-ops with a software-owned cycle bit.
+ * That way the hardware won't accidentally start executing bogus TDs
+ * when we partially overwrite them.
+ * td->first_trb and td->start_seg are already set.
+ */
+ preq->td.last_trb = ep_ring->enqueue;
+ /* Every TRB except the first & last will have its cycle bit flipped. */
+ cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
+
+ /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+ ep_ring->enqueue = preq->td.first_trb;
+ ep_ring->enq_seg = preq->td.start_seg;
+ ep_ring->cycle_state = start_cycle;
+ return ret;
+}
+
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 ep_state;
+ int num_trbs;
+ int ret;
+
+ ep_ring = preq->pep->ring;
+ ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
+ num_trbs = count_isoc_trbs_needed(preq);
+
+ /*
+ * Check the ring to guarantee there is enough room for the whole
+ * request. Do not insert any td of the USB Request to the ring if the
+ * check failed.
+ */
+ ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ return cdnsp_queue_isoc_tx(pdev, preq);
+}
+
+/**** Command Ring Operations ****/
+/*
+ * Generic function for queuing a command TRB on the command ring.
+ * Driver queue only one command to ring in the moment.
+ */
+static void cdnsp_queue_command(struct cdnsp_device *pdev,
+ u32 field1,
+ u32 field2,
+ u32 field3,
+ u32 field4)
+{
+ cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
+ GFP_ATOMIC);
+
+ pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
+
+ cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
+ field3, field4 | pdev->cmd_ring->cycle_state);
+}
+
+/* Queue a slot enable or disable request on the command ring */
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue an address device command TRB */
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr,
+ enum cdnsp_setup_dev setup)
+{
+ cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_ADDR_DEV) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
+}
+
+/* Queue a reset device command TRB */
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr)
+{
+ cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_CONFIG_EP) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/*
+ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
+ * activity on an endpoint that is about to be suspended.
+ */
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
+}
+
+/* Set Transfer Ring Dequeue Pointer command. */
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state)
+{
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
+ u32 type = TRB_TYPE(TRB_SET_DEQ);
+ u32 trb_sct = 0;
+ dma_addr_t addr;
+
+ addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+
+ if (deq_state->stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+
+ cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
+ deq_state->new_cycle_state, upper_32_bits(addr),
+ trb_stream_id, trb_slot_id |
+ EP_ID_FOR_TRB(pep->idx) | type);
+}
+
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ return cdnsp_queue_command(pdev, 0, 0, 0,
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index) |
+ TRB_TYPE(TRB_RESET_EP));
+}
+
+/*
+ * Queue a halt endpoint request on the command ring.
+ */
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index));
+}
+
+/*
+ * Queue a flush endpoint request on the command ring.
+ */
+void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index));
+}
+
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
+{
+ u32 lo, mid;
+
+ lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
+ TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
+ mid = TRB_FH_TR_PACKET_DEV_NOT |
+ TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
+ TRB_FH_TO_INTERFACE(intf_num);
+
+ cdnsp_queue_command(pdev, lo, mid, 0,
+ TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
+}
diff --git a/drivers/usb/cdns3/cdnsp-trace.c b/drivers/usb/cdns3/cdnsp-trace.c
new file mode 100644
index 000000000000..e50ab799ad95
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "cdnsp-trace.h"
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
new file mode 100644
index 000000000000..5aa88ca012de
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-trace.h
@@ -0,0 +1,830 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ * Trace support header file
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cdnsp-dev
+
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR cdnsp_dev
+
+#if !defined(__CDNSP_DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __CDNSP_DEV_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "cdnsp-gadget.h"
+#include "cdnsp-debug.h"
+
+/*
+ * There is limitation for single buffer size in TRACEPOINT subsystem.
+ * By default TRACE_BUF_SIZE is 1024, so no all data will be logged.
+ * To show more data this must be increased. In most cases the default
+ * value is sufficient.
+ */
+#define CDNSP_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(unsigned int, state)
+ __field(u32, stream_id)
+ __field(u8, enabled)
+ __field(unsigned int, num_streams)
+ __field(int, td_count)
+ __field(u8, first_prime_det)
+ __field(u8, drbls_count)
+ ),
+ TP_fast_assign(
+ __assign_str(name, pep->name);
+ __entry->state = pep->ep_state;
+ __entry->stream_id = stream_id;
+ __entry->enabled = pep->ep_state & EP_HAS_STREAMS;
+ __entry->num_streams = pep->stream_info.num_streams;
+ __entry->td_count = pep->stream_info.td_count;
+ __entry->first_prime_det = pep->stream_info.first_prime_det;
+ __entry->drbls_count = pep->stream_info.drbls_count;
+ ),
+ TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d "
+ "tds %d, first prime: %d drbls %d",
+ __get_str(name), __entry->state, __entry->stream_id,
+ __entry->enabled, __entry->num_streams, __entry->td_count,
+ __entry->first_prime_det, __entry->drbls_count)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_tr_drbl,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_wait_for_prime,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_list_empty_with_skip,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_enable_end,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_disable_end,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_busy_try_halt_again,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_enable_disable,
+ TP_PROTO(int set),
+ TP_ARGS(set),
+ TP_STRUCT__entry(
+ __field(int, set)
+ ),
+ TP_fast_assign(
+ __entry->set = set;
+ ),
+ TP_printk("%s", __entry->set ? "enabled" : "disabled")
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_pullup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u1,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u2,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_lpm,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_may_wakeup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_simple,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(text, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(text, msg)
+ ),
+ TP_printk("%s", __get_str(text))
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_exit,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_init,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_slot_id,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_no_room_on_ring,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_status_stage,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_request,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_set_config,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_halted,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep_halt,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+TRACE_EVENT(cdnsp_looking_trb_in_td,
+ TP_PROTO(dma_addr_t suspect, dma_addr_t trb_start, dma_addr_t trb_end,
+ dma_addr_t curr_seg, dma_addr_t end_seg),
+ TP_ARGS(suspect, trb_start, trb_end, curr_seg, end_seg),
+ TP_STRUCT__entry(
+ __field(dma_addr_t, suspect)
+ __field(dma_addr_t, trb_start)
+ __field(dma_addr_t, trb_end)
+ __field(dma_addr_t, curr_seg)
+ __field(dma_addr_t, end_seg)
+ ),
+ TP_fast_assign(
+ __entry->suspect = suspect;
+ __entry->trb_start = trb_start;
+ __entry->trb_end = trb_end;
+ __entry->curr_seg = curr_seg;
+ __entry->end_seg = end_seg;
+ ),
+ TP_printk("DMA: suspect event: %pad, trb-start: %pad, trb-end %pad, "
+ "seg-start %pad, seg-end %pad",
+ &__entry->suspect, &__entry->trb_start, &__entry->trb_end,
+ &__entry->curr_seg, &__entry->end_seg)
+);
+
+TRACE_EVENT(cdnsp_port_info,
+ TP_PROTO(__le32 __iomem *addr, u32 offset, u32 count, u32 rev),
+ TP_ARGS(addr, offset, count, rev),
+ TP_STRUCT__entry(
+ __field(__le32 __iomem *, addr)
+ __field(u32, offset)
+ __field(u32, count)
+ __field(u32, rev)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->rev = rev;
+ ),
+ TP_printk("Ext Cap %p, port offset = %u, count = %u, rev = 0x%x",
+ __entry->addr, __entry->offset, __entry->count, __entry->rev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_deq_state,
+ TP_PROTO(struct cdnsp_dequeue_state *state),
+ TP_ARGS(state),
+ TP_STRUCT__entry(
+ __field(int, new_cycle_state)
+ __field(struct cdnsp_segment *, new_deq_seg)
+ __field(dma_addr_t, deq_seg_dma)
+ __field(union cdnsp_trb *, new_deq_ptr)
+ __field(dma_addr_t, deq_ptr_dma)
+ ),
+ TP_fast_assign(
+ __entry->new_cycle_state = state->new_cycle_state;
+ __entry->new_deq_seg = state->new_deq_seg;
+ __entry->deq_seg_dma = state->new_deq_seg->dma;
+ __entry->new_deq_ptr = state->new_deq_ptr,
+ __entry->deq_ptr_dma = cdnsp_trb_virt_to_dma(state->new_deq_seg,
+ state->new_deq_ptr);
+ ),
+ TP_printk("New cycle state = 0x%x, New dequeue segment = %p (0x%pad dma), "
+ "New dequeue pointer = %p (0x%pad dma)",
+ __entry->new_cycle_state, __entry->new_deq_seg,
+ &__entry->deq_seg_dma, __entry->new_deq_ptr,
+ &__entry->deq_ptr_dma
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_deq_state, cdnsp_new_deq_state,
+ TP_PROTO(struct cdnsp_dequeue_state *state),
+ TP_ARGS(state)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ctrl,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl),
+ TP_STRUCT__entry(
+ __field(u8, bRequestType)
+ __field(u8, bRequest)
+ __field(u16, wValue)
+ __field(u16, wIndex)
+ __field(u16, wLength)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = ctrl->bRequestType;
+ __entry->bRequest = ctrl->bRequest;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
+ ),
+ TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNSP_MSG_MAX,
+ __entry->bRequestType,
+ __entry->bRequest, __entry->wValue,
+ __entry->wIndex, __entry->wLength)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ctrl, cdnsp_ctrl_req,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_bounce,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign),
+ TP_STRUCT__entry(
+ __string(name, preq->pep->name)
+ __field(u32, new_buf_len)
+ __field(u32, offset)
+ __field(dma_addr_t, dma)
+ __field(unsigned int, unalign)
+ ),
+ TP_fast_assign(
+ __assign_str(name, preq->pep->name);
+ __entry->new_buf_len = new_buf_len;
+ __entry->offset = offset;
+ __entry->dma = dma;
+ __entry->unalign = unalign;
+ ),
+ TP_printk("%s buf len %d, offset %d, dma %pad, unalign %d",
+ __get_str(name), __entry->new_buf_len,
+ __entry->offset, &__entry->dma, __entry->unalign
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_align_td_split,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_map,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_unmap,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_trb,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb),
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, field0)
+ __field(u32, field1)
+ __field(u32, field2)
+ __field(u32, field3)
+ __field(union cdnsp_trb *, trb)
+ __field(dma_addr_t, trb_dma)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->type = ring->type;
+ __entry->field0 = le32_to_cpu(trb->field[0]);
+ __entry->field1 = le32_to_cpu(trb->field[1]);
+ __entry->field2 = le32_to_cpu(trb->field[2]);
+ __entry->field3 = le32_to_cpu(trb->field[3]);
+ __entry->trb = (union cdnsp_trb *)trb;
+ __entry->trb_dma = cdnsp_trb_virt_to_dma(ring->deq_seg,
+ (union cdnsp_trb *)trb);
+
+ ),
+ TP_printk("%s: %s trb: %p(%pad)", cdnsp_ring_type_string(__entry->type),
+ cdnsp_decode_trb(__get_str(str), CDNSP_MSG_MAX,
+ __entry->field0, __entry->field1,
+ __entry->field2, __entry->field3),
+ __entry->trb, &__entry->trb_dma
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_event,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_trb_without_td,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_command,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_transfer,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_queue_trb,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_wait_for_compl,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_timeout,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_defered_event,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_pdev,
+ TP_PROTO(struct cdnsp_device *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(
+ __field(struct cdnsp_device *, pdev)
+ __field(struct usb_gadget *, gadget)
+ __field(dma_addr_t, out_ctx)
+ __field(dma_addr_t, in_ctx)
+ __field(u8, port_num)
+ ),
+ TP_fast_assign(
+ __entry->pdev = pdev;
+ __entry->gadget = &pdev->gadget;
+ __entry->in_ctx = pdev->in_ctx.dma;
+ __entry->out_ctx = pdev->out_ctx.dma;
+ __entry->port_num = pdev->active_port ?
+ pdev->active_port->port_num : 0xFF;
+ ),
+ TP_printk("pdev %p gadget %p ctx %pad | %pad, port %d ",
+ __entry->pdev, __entry->gadget, &__entry->in_ctx,
+ &__entry->out_ctx, __entry->port_num
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_alloc_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_free_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_addressable_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->pep->name)
+ __field(struct usb_request *, request)
+ __field(struct cdnsp_request *, preq)
+ __field(void *, buf)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(dma_addr_t, dma)
+ __field(unsigned int, stream_id)
+ __field(unsigned int, zero)
+ __field(unsigned int, short_not_ok)
+ __field(unsigned int, no_interrupt)
+ __field(struct scatterlist*, sg)
+ __field(unsigned int, num_sgs)
+ __field(unsigned int, num_mapped_sgs)
+
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->pep->name);
+ __entry->request = &req->request;
+ __entry->preq = req;
+ __entry->buf = req->request.buf;
+ __entry->actual = req->request.actual;
+ __entry->length = req->request.length;
+ __entry->status = req->request.status;
+ __entry->dma = req->request.dma;
+ __entry->stream_id = req->request.stream_id;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ __entry->sg = req->request.sg;
+ __entry->num_sgs = req->request.num_sgs;
+ __entry->num_mapped_sgs = req->request.num_mapped_sgs;
+ ),
+ TP_printk("%s; req U:%p/P:%p, req buf %p, length %u/%u, status %d, "
+ "buf dma (%pad), SID %u, %s%s%s, sg %p, num_sg %d,"
+ " num_m_sg %d",
+ __get_str(name), __entry->request, __entry->preq,
+ __entry->buf, __entry->actual, __entry->length,
+ __entry->status, &__entry->dma,
+ __entry->stream_id, __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_busy,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_error,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_dequeue,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_giveback,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_alloc_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_free_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep_ctx,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u64, deq)
+ __field(u32, tx_info)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->ep_info);
+ __entry->info2 = le32_to_cpu(ctx->ep_info2);
+ __entry->deq = le64_to_cpu(ctx->deq);
+ __entry->tx_info = le32_to_cpu(ctx->tx_info);
+ ),
+ TP_printk("%s", cdnsp_decode_ep_context(__get_str(str), CDNSP_MSG_MAX,
+ __entry->info, __entry->info2,
+ __entry->deq, __entry->tx_info)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_disabled,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_stopped_or_disabled,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_remove_request,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_stop_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_flush_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_set_deq_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_reset_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_config_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_slot_ctx,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u32, int_target)
+ __field(u32, state)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->dev_info);
+ __entry->info2 = le32_to_cpu(ctx->dev_port);
+ __entry->int_target = le32_to_cpu(ctx->int_target);
+ __entry->state = le32_to_cpu(ctx->dev_state);
+ ),
+ TP_printk("%s", cdnsp_decode_slot_context(__entry->info,
+ __entry->info2,
+ __entry->int_target,
+ __entry->state)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_slot_already_in_default,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_enable_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_disable_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_reset_device,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_setup_device_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_addr_dev,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_reset_dev,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_set_deq,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_configure_endpoint,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_td_info,
+ TP_PROTO(struct cdnsp_request *preq),
+ TP_ARGS(preq),
+ TP_STRUCT__entry(
+ __string(name, preq->pep->name)
+ __field(struct usb_request *, request)
+ __field(struct cdnsp_request *, preq)
+ __field(union cdnsp_trb *, first_trb)
+ __field(union cdnsp_trb *, last_trb)
+ __field(dma_addr_t, trb_dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, preq->pep->name);
+ __entry->request = &preq->request;
+ __entry->preq = preq;
+ __entry->first_trb = preq->td.first_trb;
+ __entry->last_trb = preq->td.last_trb;
+ __entry->trb_dma = cdnsp_trb_virt_to_dma(preq->td.start_seg,
+ preq->td.first_trb)
+ ),
+ TP_printk("%s req/preq: %p/%p, first trb %p[vir]/%pad(dma), last trb %p",
+ __get_str(name), __entry->request, __entry->preq,
+ __entry->first_trb, &__entry->trb_dma,
+ __entry->last_trb
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_td_info, cdnsp_remove_request_td,
+ TP_PROTO(struct cdnsp_request *preq),
+ TP_ARGS(preq)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ring,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring),
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(void *, ring)
+ __field(dma_addr_t, enq)
+ __field(dma_addr_t, deq)
+ __field(dma_addr_t, enq_seg)
+ __field(dma_addr_t, deq_seg)
+ __field(unsigned int, num_segs)
+ __field(unsigned int, stream_id)
+ __field(unsigned int, cycle_state)
+ __field(unsigned int, num_trbs_free)
+ __field(unsigned int, bounce_buf_len)
+ ),
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->type = ring->type;
+ __entry->num_segs = ring->num_segs;
+ __entry->stream_id = ring->stream_id;
+ __entry->enq_seg = ring->enq_seg->dma;
+ __entry->deq_seg = ring->deq_seg->dma;
+ __entry->cycle_state = ring->cycle_state;
+ __entry->num_trbs_free = ring->num_trbs_free;
+ __entry->bounce_buf_len = ring->bounce_buf_len;
+ __entry->enq = cdnsp_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue);
+ __entry->deq = cdnsp_trb_virt_to_dma(ring->deq_seg,
+ ring->dequeue);
+ ),
+ TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d"
+ " free_trbs %d bounce %d cycle %d",
+ cdnsp_ring_type_string(__entry->type), __entry->ring,
+ &__entry->enq, &__entry->enq_seg,
+ &__entry->deq, &__entry->deq_seg,
+ __entry->num_segs,
+ __entry->stream_id,
+ __entry->num_trbs_free,
+ __entry->bounce_buf_len,
+ __entry->cycle_state
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_alloc,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_free,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_set_stream_ring,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_expansion,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_enq,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_deq,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_portsc,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc),
+ TP_STRUCT__entry(
+ __field(u32, portnum)
+ __field(u32, portsc)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->portnum = portnum;
+ __entry->portsc = portsc;
+ ),
+ TP_printk("port-%d: %s",
+ __entry->portnum,
+ cdnsp_decode_portsc(__get_str(str), CDNSP_MSG_MAX,
+ __entry->portsc)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_handle_port_status,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_link_state_changed,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+TRACE_EVENT(cdnsp_stream_number,
+ TP_PROTO(struct cdnsp_ep *pep, int num_stream_ctxs, int num_streams),
+ TP_ARGS(pep, num_stream_ctxs, num_streams),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(int, num_stream_ctxs)
+ __field(int, num_streams)
+ ),
+ TP_fast_assign(
+ __entry->num_stream_ctxs = num_stream_ctxs;
+ __entry->num_streams = num_streams;
+ ),
+ TP_printk("%s Need %u stream ctx entries for %u stream IDs.",
+ __get_str(name), __entry->num_stream_ctxs,
+ __entry->num_streams)
+);
+
+#endif /* __CDNSP_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cdnsp-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 1991cb5cf6bf..199713769289 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
*
* Copyright (C) 2018-2019 Cadence.
* Copyright (C) 2017-2018 NXP
@@ -19,15 +19,13 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
-#include "gadget.h"
#include "core.h"
#include "host-export.h"
-#include "gadget-export.h"
#include "drd.h"
-static int cdns3_idle_init(struct cdns3 *cdns);
+static int cdns_idle_init(struct cdns *cdns);
-static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
+static int cdns_role_start(struct cdns *cdns, enum usb_role role)
{
int ret;
@@ -41,47 +39,47 @@ static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
if (!cdns->roles[role])
return -ENXIO;
- if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE)
+ if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE)
return 0;
mutex_lock(&cdns->mutex);
ret = cdns->roles[role]->start(cdns);
if (!ret)
- cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE;
+ cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE;
mutex_unlock(&cdns->mutex);
return ret;
}
-static void cdns3_role_stop(struct cdns3 *cdns)
+static void cdns_role_stop(struct cdns *cdns)
{
enum usb_role role = cdns->role;
if (WARN_ON(role > USB_ROLE_DEVICE))
return;
- if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE)
+ if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE)
return;
mutex_lock(&cdns->mutex);
cdns->roles[role]->stop(cdns);
- cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE;
+ cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE;
mutex_unlock(&cdns->mutex);
}
-static void cdns3_exit_roles(struct cdns3 *cdns)
+static void cdns_exit_roles(struct cdns *cdns)
{
- cdns3_role_stop(cdns);
- cdns3_drd_exit(cdns);
+ cdns_role_stop(cdns);
+ cdns_drd_exit(cdns);
}
/**
- * cdns3_core_init_role - initialize role of operation
- * @cdns: Pointer to cdns3 structure
+ * cdns_core_init_role - initialize role of operation
+ * @cdns: Pointer to cdns structure
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_core_init_role(struct cdns3 *cdns)
+static int cdns_core_init_role(struct cdns *cdns)
{
struct device *dev = cdns->dev;
enum usb_dr_mode best_dr_mode;
@@ -97,13 +95,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
* can be restricted later depending on strap pin configuration.
*/
if (dr_mode == USB_DR_MODE_UNKNOWN) {
- if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
- IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_OTG;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
- dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_PERIPHERAL;
+ if (cdns->version == CDNSP_CONTROLLER_V2) {
+ if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ } else {
+ if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
}
/*
@@ -112,7 +120,7 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
*/
best_dr_mode = cdns->dr_mode;
- ret = cdns3_idle_init(cdns);
+ ret = cdns_idle_init(cdns);
if (ret)
return ret;
@@ -128,7 +136,14 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
dr_mode = best_dr_mode;
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
- ret = cdns3_host_init(cdns);
+ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ IS_ENABLED(CONFIG_USB_CDNSP_HOST)) ||
+ (cdns->version < CDNSP_CONTROLLER_V2 &&
+ IS_ENABLED(CONFIG_USB_CDNS3_HOST)))
+ ret = cdns_host_init(cdns);
+ else
+ ret = -ENXIO;
+
if (ret) {
dev_err(dev, "Host initialization failed with %d\n",
ret);
@@ -137,7 +152,11 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
- ret = cdns3_gadget_init(cdns);
+ if (cdns->gadget_init)
+ ret = cdns->gadget_init(cdns);
+ else
+ ret = -ENXIO;
+
if (ret) {
dev_err(dev, "Device initialization failed with %d\n",
ret);
@@ -147,28 +166,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
cdns->dr_mode = dr_mode;
- ret = cdns3_drd_update_mode(cdns);
+ ret = cdns_drd_update_mode(cdns);
if (ret)
goto err;
/* Initialize idle role to start with */
- ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+ ret = cdns_role_start(cdns, USB_ROLE_NONE);
if (ret)
goto err;
switch (cdns->dr_mode) {
case USB_DR_MODE_OTG:
- ret = cdns3_hw_role_switch(cdns);
+ ret = cdns_hw_role_switch(cdns);
if (ret)
goto err;
break;
case USB_DR_MODE_PERIPHERAL:
- ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+ ret = cdns_role_start(cdns, USB_ROLE_DEVICE);
if (ret)
goto err;
break;
case USB_DR_MODE_HOST:
- ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+ ret = cdns_role_start(cdns, USB_ROLE_HOST);
if (ret)
goto err;
break;
@@ -179,32 +198,32 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
return 0;
err:
- cdns3_exit_roles(cdns);
+ cdns_exit_roles(cdns);
return ret;
}
/**
- * cdns3_hw_role_state_machine - role switch state machine based on hw events.
+ * cdns_hw_role_state_machine - role switch state machine based on hw events.
* @cdns: Pointer to controller structure.
*
* Returns next role to be entered based on hw events.
*/
-static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
+static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns)
{
enum usb_role role = USB_ROLE_NONE;
int id, vbus;
if (cdns->dr_mode != USB_DR_MODE_OTG) {
- if (cdns3_is_host(cdns))
+ if (cdns_is_host(cdns))
role = USB_ROLE_HOST;
- if (cdns3_is_device(cdns))
+ if (cdns_is_device(cdns))
role = USB_ROLE_DEVICE;
return role;
}
- id = cdns3_get_id(cdns);
- vbus = cdns3_get_vbus(cdns);
+ id = cdns_get_id(cdns);
+ vbus = cdns_get_vbus(cdns);
/*
* Role change state machine
@@ -240,28 +259,28 @@ static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
return role;
}
-static int cdns3_idle_role_start(struct cdns3 *cdns)
+static int cdns_idle_role_start(struct cdns *cdns)
{
return 0;
}
-static void cdns3_idle_role_stop(struct cdns3 *cdns)
+static void cdns_idle_role_stop(struct cdns *cdns)
{
/* Program Lane swap and bring PHY out of RESET */
phy_reset(cdns->usb3_phy);
}
-static int cdns3_idle_init(struct cdns3 *cdns)
+static int cdns_idle_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
- rdrv->start = cdns3_idle_role_start;
- rdrv->stop = cdns3_idle_role_stop;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->start = cdns_idle_role_start;
+ rdrv->stop = cdns_idle_role_stop;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->suspend = NULL;
rdrv->resume = NULL;
rdrv->name = "idle";
@@ -272,10 +291,10 @@ static int cdns3_idle_init(struct cdns3 *cdns)
}
/**
- * cdns3_hw_role_switch - switch roles based on HW state
+ * cdns_hw_role_switch - switch roles based on HW state
* @cdns: controller
*/
-int cdns3_hw_role_switch(struct cdns3 *cdns)
+int cdns_hw_role_switch(struct cdns *cdns)
{
enum usb_role real_role, current_role;
int ret = 0;
@@ -287,22 +306,22 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
- real_role = cdns3_hw_role_state_machine(cdns);
+ real_role = cdns_hw_role_state_machine(cdns);
/* Do nothing if nothing changed */
if (current_role == real_role)
goto exit;
- cdns3_role_stop(cdns);
+ cdns_role_stop(cdns);
dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role);
- ret = cdns3_role_start(cdns, real_role);
+ ret = cdns_role_start(cdns, real_role);
if (ret) {
/* Back to current role */
dev_err(cdns->dev, "set %d has failed, back to %d\n",
real_role, current_role);
- ret = cdns3_role_start(cdns, current_role);
+ ret = cdns_role_start(cdns, current_role);
if (ret)
dev_err(cdns->dev, "back to %d failed too\n",
current_role);
@@ -319,15 +338,15 @@ exit:
*
* Returns role
*/
-static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
+static enum usb_role cdns_role_get(struct usb_role_switch *sw)
{
- struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
return cdns->role;
}
/**
- * cdns3_role_set - set current role of controller.
+ * cdns_role_set - set current role of controller.
*
* @sw: pointer to USB role switch structure
* @role: the previous role
@@ -335,9 +354,9 @@ static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
* - Role switch for dual-role devices
* - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
*/
-static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
+static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
{
- struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
int ret = 0;
pm_runtime_get_sync(cdns->dev);
@@ -365,8 +384,8 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
}
}
- cdns3_role_stop(cdns);
- ret = cdns3_role_start(cdns, role);
+ cdns_role_stop(cdns);
+ ret = cdns_role_start(cdns, role);
if (ret)
dev_err(cdns->dev, "set role %d has failed\n", role);
@@ -375,37 +394,17 @@ pm_put:
return ret;
}
-static int set_phy_power_on(struct cdns3 *cdns)
-{
- int ret;
-
- ret = phy_power_on(cdns->usb2_phy);
- if (ret)
- return ret;
-
- ret = phy_power_on(cdns->usb3_phy);
- if (ret)
- phy_power_off(cdns->usb2_phy);
-
- return ret;
-}
-
-static void set_phy_power_off(struct cdns3 *cdns)
-{
- phy_power_off(cdns->usb3_phy);
- phy_power_off(cdns->usb2_phy);
-}
/**
- * cdns3_wakeup_irq - interrupt handler for wakeup events
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * cdns_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3/cdnsp core device
+ * @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
-static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
+static irqreturn_t cdns_wakeup_irq(int irq, void *data)
{
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
if (cdns->in_lpm) {
disable_irq_nosync(irq);
@@ -420,17 +419,14 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
}
/**
- * cdns3_probe - probe for cdns3 core device
- * @pdev: Pointer to cdns3 core platform device
+ * cdns_probe - probe for cdns3/cdnsp core device
+ * @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_probe(struct platform_device *pdev)
+int cdns_init(struct cdns *cdns)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct cdns3 *cdns;
- void __iomem *regs;
+ struct device *dev = cdns->dev;
int ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -439,259 +435,78 @@ static int cdns3_probe(struct platform_device *pdev)
return ret;
}
- cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
- if (!cdns)
- return -ENOMEM;
-
- cdns->dev = dev;
- cdns->pdata = dev_get_platdata(dev);
-
- platform_set_drvdata(pdev, cdns);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
- if (!res) {
- dev_err(dev, "missing host IRQ\n");
- return -ENODEV;
- }
-
- cdns->xhci_res[0] = *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
- if (!res) {
- dev_err(dev, "couldn't get xhci resource\n");
- return -ENXIO;
- }
-
- cdns->xhci_res[1] = *res;
-
- cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
- if (cdns->dev_irq < 0)
- return cdns->dev_irq;
-
- regs = devm_platform_ioremap_resource_byname(pdev, "dev");
- if (IS_ERR(regs))
- return PTR_ERR(regs);
- cdns->dev_regs = regs;
-
- cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
- if (cdns->otg_irq < 0)
- return cdns->otg_irq;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
- if (!res) {
- dev_err(dev, "couldn't get otg resource\n");
- return -ENXIO;
- }
-
- cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
-
- cdns->otg_res = *res;
-
- cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
- if (cdns->wakeup_irq == -EPROBE_DEFER)
- return cdns->wakeup_irq;
- else if (cdns->wakeup_irq == 0)
- return -EINVAL;
-
- if (cdns->wakeup_irq < 0) {
- dev_dbg(dev, "couldn't get wakeup irq\n");
- cdns->wakeup_irq = 0x0;
- }
-
mutex_init(&cdns->mutex);
- cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
- if (IS_ERR(cdns->usb2_phy))
- return PTR_ERR(cdns->usb2_phy);
-
- ret = phy_init(cdns->usb2_phy);
- if (ret)
- return ret;
-
- cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
- if (IS_ERR(cdns->usb3_phy))
- return PTR_ERR(cdns->usb3_phy);
-
- ret = phy_init(cdns->usb3_phy);
- if (ret)
- goto err1;
-
- ret = set_phy_power_on(cdns);
- if (ret)
- goto err2;
-
if (device_property_read_bool(dev, "usb-role-switch")) {
struct usb_role_switch_desc sw_desc = { };
- sw_desc.set = cdns3_role_set;
- sw_desc.get = cdns3_role_get;
+ sw_desc.set = cdns_role_set;
+ sw_desc.get = cdns_role_get;
sw_desc.allow_userspace_control = true;
sw_desc.driver_data = cdns;
sw_desc.fwnode = dev->fwnode;
cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
- ret = PTR_ERR(cdns->role_sw);
dev_warn(dev, "Unable to register Role Switch\n");
- goto err3;
+ return PTR_ERR(cdns->role_sw);
}
}
if (cdns->wakeup_irq) {
ret = devm_request_irq(cdns->dev, cdns->wakeup_irq,
- cdns3_wakeup_irq,
+ cdns_wakeup_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
- goto err4;
+ goto role_switch_unregister;
}
}
- ret = cdns3_drd_init(cdns);
+ ret = cdns_drd_init(cdns);
if (ret)
- goto err4;
+ goto init_failed;
- ret = cdns3_core_init_role(cdns);
+ ret = cdns_core_init_role(cdns);
if (ret)
- goto err4;
+ goto init_failed;
spin_lock_init(&cdns->lock);
- device_set_wakeup_capable(dev, true);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
- pm_runtime_forbid(dev);
- /*
- * The controller needs less time between bus and controller suspend,
- * and we also needs a small delay to avoid frequently entering low
- * power mode.
- */
- pm_runtime_set_autosuspend_delay(dev, 20);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_use_autosuspend(dev);
dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
return 0;
-err4:
- cdns3_drd_exit(cdns);
+init_failed:
+ cdns_drd_exit(cdns);
+role_switch_unregister:
if (cdns->role_sw)
usb_role_switch_unregister(cdns->role_sw);
-err3:
- set_phy_power_off(cdns);
-err2:
- phy_exit(cdns->usb3_phy);
-err1:
- phy_exit(cdns->usb2_phy);
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_init);
/**
- * cdns3_remove - unbind drd driver and clean up
- * @pdev: Pointer to Linux platform device
+ * cdns_remove - unbind drd driver and clean up
+ * @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_remove(struct platform_device *pdev)
+int cdns_remove(struct cdns *cdns)
{
- struct cdns3 *cdns = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
- cdns3_exit_roles(cdns);
+ cdns_exit_roles(cdns);
usb_role_switch_unregister(cdns->role_sw);
- set_phy_power_off(cdns);
- phy_exit(cdns->usb2_phy);
- phy_exit(cdns->usb3_phy);
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int cdns3_set_platform_suspend(struct device *dev,
- bool suspend, bool wakeup)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- int ret = 0;
-
- if (cdns->pdata && cdns->pdata->platform_suspend)
- ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
-
- return ret;
-}
-
-static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- bool wakeup;
- unsigned long flags;
-
- if (cdns->in_lpm)
- return 0;
-
- if (PMSG_IS_AUTO(msg))
- wakeup = true;
- else
- wakeup = device_may_wakeup(dev);
-
- cdns3_set_platform_suspend(cdns->dev, true, wakeup);
- set_phy_power_off(cdns);
- spin_lock_irqsave(&cdns->lock, flags);
- cdns->in_lpm = true;
- spin_unlock_irqrestore(&cdns->lock, flags);
- dev_dbg(cdns->dev, "%s ends\n", __func__);
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_remove);
-static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- int ret;
- unsigned long flags;
-
- if (!cdns->in_lpm)
- return 0;
-
- ret = set_phy_power_on(cdns);
- if (ret)
- return ret;
-
- cdns3_set_platform_suspend(cdns->dev, false, false);
-
- spin_lock_irqsave(&cdns->lock, flags);
- if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg))
- cdns->roles[cdns->role]->resume(cdns, false);
-
- cdns->in_lpm = false;
- spin_unlock_irqrestore(&cdns->lock, flags);
- if (cdns->wakeup_pending) {
- cdns->wakeup_pending = false;
- enable_irq(cdns->wakeup_irq);
- }
- dev_dbg(cdns->dev, "%s ends\n", __func__);
-
- return ret;
-}
-
-static int cdns3_runtime_suspend(struct device *dev)
-{
- return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
-}
-
-static int cdns3_runtime_resume(struct device *dev)
-{
- return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
-}
#ifdef CONFIG_PM_SLEEP
-
-static int cdns3_suspend(struct device *dev)
+int cdns_suspend(struct cdns *cdns)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct device *dev = cdns->dev;
unsigned long flags;
if (pm_runtime_status_suspended(dev))
@@ -703,52 +518,30 @@ static int cdns3_suspend(struct device *dev)
spin_unlock_irqrestore(&cdns->lock, flags);
}
- return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+ return 0;
}
+EXPORT_SYMBOL_GPL(cdns_suspend);
-static int cdns3_resume(struct device *dev)
+int cdns_resume(struct cdns *cdns, u8 set_active)
{
- int ret;
+ struct device *dev = cdns->dev;
- ret = cdns3_controller_resume(dev, PMSG_RESUME);
- if (ret)
- return ret;
+ if (cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, false);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (set_active) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
- return ret;
+ return 0;
}
+EXPORT_SYMBOL_GPL(cdns_resume);
#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops cdns3_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume)
- SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL)
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_cdns3_match[] = {
- { .compatible = "cdns,usb3" },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_cdns3_match);
-#endif
-
-static struct platform_driver cdns3_driver = {
- .probe = cdns3_probe,
- .remove = cdns3_remove,
- .driver = {
- .name = "cdns-usb3",
- .of_match_table = of_match_ptr(of_cdns3_match),
- .pm = &cdns3_pm_ops,
- },
-};
-
-module_platform_driver(cdns3_driver);
-
-MODULE_ALIAS("platform:cdns3");
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 3176f924293a..ab0cb68acd23 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Header File.
+ * Cadence USBSS and USBSSP DRD Header File.
*
* Copyright (C) 2017-2018 NXP
* Copyright (C) 2018-2019 Cadence.
@@ -14,10 +14,10 @@
#ifndef __LINUX_CDNS3_CORE_H
#define __LINUX_CDNS3_CORE_H
-struct cdns3;
+struct cdns;
/**
- * struct cdns3_role_driver - host/gadget role driver
+ * struct cdns_role_driver - host/gadget role driver
* @start: start this role
* @stop: stop this role
* @suspend: suspend callback for this role
@@ -26,18 +26,18 @@ struct cdns3;
* @name: role name string (host/gadget)
* @state: current state
*/
-struct cdns3_role_driver {
- int (*start)(struct cdns3 *cdns);
- void (*stop)(struct cdns3 *cdns);
- int (*suspend)(struct cdns3 *cdns, bool do_wakeup);
- int (*resume)(struct cdns3 *cdns, bool hibernated);
+struct cdns_role_driver {
+ int (*start)(struct cdns *cdns);
+ void (*stop)(struct cdns *cdns);
+ int (*suspend)(struct cdns *cdns, bool do_wakeup);
+ int (*resume)(struct cdns *cdns, bool hibernated);
const char *name;
-#define CDNS3_ROLE_STATE_INACTIVE 0
-#define CDNS3_ROLE_STATE_ACTIVE 1
+#define CDNS_ROLE_STATE_INACTIVE 0
+#define CDNS_ROLE_STATE_ACTIVE 1
int state;
};
-#define CDNS3_XHCI_RESOURCES_NUM 2
+#define CDNS_XHCI_RESOURCES_NUM 2
struct cdns3_platform_data {
int (*platform_suspend)(struct device *dev,
@@ -47,7 +47,7 @@ struct cdns3_platform_data {
};
/**
- * struct cdns3 - Representation of Cadence USB3 DRD controller.
+ * struct cdns - Representation of Cadence USB3 DRD controller.
* @dev: pointer to Cadence device struct
* @xhci_regs: pointer to base of xhci registers
* @xhci_res: the resource for xhci
@@ -55,14 +55,16 @@ struct cdns3_platform_data {
* @otg_res: the resource for otg
* @otg_v0_regs: pointer to base of v0 otg registers
* @otg_v1_regs: pointer to base of v1 otg registers
+ * @otg_cdnsp_regs: pointer to base of CDNSP otg registers
* @otg_regs: pointer to base of otg registers
+ * @otg_irq_regs: pointer to interrupt registers
* @otg_irq: irq number for otg controller
* @dev_irq: irq number for device controller
* @wakeup_irq: irq number for wakeup event, it is optional
* @roles: array of supported roles for this controller
* @role: current role
- * @host_dev: the child host device pointer for cdns3 core
- * @gadget_dev: the child gadget device pointer for cdns3 core
+ * @host_dev: the child host device pointer for cdns core
+ * @gadget_dev: the child gadget device pointer
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
* @mutex: the mutex for concurrent code at driver
@@ -76,29 +78,33 @@ struct cdns3_platform_data {
* @pdata: platform data from glue layer
* @lock: spinlock structure
* @xhci_plat_data: xhci private data structure pointer
+ * @gadget_init: pointer to gadget initialization function
*/
-struct cdns3 {
+struct cdns {
struct device *dev;
void __iomem *xhci_regs;
- struct resource xhci_res[CDNS3_XHCI_RESOURCES_NUM];
+ struct resource xhci_res[CDNS_XHCI_RESOURCES_NUM];
struct cdns3_usb_regs __iomem *dev_regs;
- struct resource otg_res;
- struct cdns3_otg_legacy_regs *otg_v0_regs;
- struct cdns3_otg_regs *otg_v1_regs;
- struct cdns3_otg_common_regs *otg_regs;
+ struct resource otg_res;
+ struct cdns3_otg_legacy_regs __iomem *otg_v0_regs;
+ struct cdns3_otg_regs __iomem *otg_v1_regs;
+ struct cdnsp_otg_regs __iomem *otg_cdnsp_regs;
+ struct cdns_otg_common_regs __iomem *otg_regs;
+ struct cdns_otg_irq_regs __iomem *otg_irq_regs;
#define CDNS3_CONTROLLER_V0 0
#define CDNS3_CONTROLLER_V1 1
+#define CDNSP_CONTROLLER_V2 2
u32 version;
bool phyrst_a_enable;
int otg_irq;
int dev_irq;
int wakeup_irq;
- struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1];
+ struct cdns_role_driver *roles[USB_ROLE_DEVICE + 1];
enum usb_role role;
struct platform_device *host_dev;
- struct cdns3_device *gadget_dev;
+ void *gadget_dev;
struct phy *usb2_phy;
struct phy *usb3_phy;
/* mutext used in workqueue*/
@@ -110,8 +116,21 @@ struct cdns3 {
struct cdns3_platform_data *pdata;
spinlock_t lock;
struct xhci_plat_priv *xhci_plat_data;
+
+ int (*gadget_init)(struct cdns *cdns);
};
-int cdns3_hw_role_switch(struct cdns3 *cdns);
+int cdns_hw_role_switch(struct cdns *cdns);
+int cdns_init(struct cdns *cdns);
+int cdns_remove(struct cdns *cdns);
+#ifdef CONFIG_PM_SLEEP
+int cdns_resume(struct cdns *cdns, u8 set_active);
+int cdns_suspend(struct cdns *cdns);
+#else /* CONFIG_PM_SLEEP */
+static inline int cdns_resume(struct cdns *cdns, u8 set_active)
+{ return 0; }
+static inline int cdns_suspend(struct cdns *cdns)
+{ return 0; }
+#endif /* CONFIG_PM_SLEEP */
#endif /* __LINUX_CDNS3_CORE_H */
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 38ccd29e4cde..fa5318ade3e1 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -1,35 +1,33 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
* Copyright (C) 2019 Texas Instruments
*
* Author: Pawel Laszczak <pawell@cadence.com>
* Roger Quadros <rogerq@ti.com>
*
- *
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
-#include "gadget.h"
#include "drd.h"
#include "core.h"
/**
- * cdns3_set_mode - change mode of OTG Core
+ * cdns_set_mode - change mode of OTG Core
* @cdns: pointer to context structure
* @mode: selected mode from cdns_role
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode)
{
+ void __iomem *override_reg;
u32 reg;
switch (mode) {
@@ -39,11 +37,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
break;
case USB_DR_MODE_OTG:
dev_dbg(cdns->dev, "Set controller to OTG mode\n");
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- reg = readl(&cdns->otg_v1_regs->override);
+
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ override_reg = &cdns->otg_cdnsp_regs->override;
+ else if (cdns->version == CDNS3_CONTROLLER_V1)
+ override_reg = &cdns->otg_v1_regs->override;
+ else
+ override_reg = &cdns->otg_v0_regs->ctrl1;
+
+ reg = readl(override_reg);
+
+ if (cdns->version != CDNS3_CONTROLLER_V0)
reg |= OVERRIDE_IDPULLUP;
- writel(reg, &cdns->otg_v1_regs->override);
+ else
+ reg |= OVERRIDE_IDPULLUP_V0;
+
+ writel(reg, override_reg);
+ if (cdns->version == CDNS3_CONTROLLER_V1) {
/*
* Enable work around feature built into the
* controller to address issue with RX Sensitivity
@@ -55,10 +66,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
reg |= PHYRST_CFG_PHYRST_A_ENABLE;
writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
}
- } else {
- reg = readl(&cdns->otg_v0_regs->ctrl1);
- reg |= OVERRIDE_IDPULLUP_V0;
- writel(reg, &cdns->otg_v0_regs->ctrl1);
}
/*
@@ -76,7 +83,7 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
return 0;
}
-int cdns3_get_id(struct cdns3 *cdns)
+int cdns_get_id(struct cdns *cdns)
{
int id;
@@ -86,7 +93,7 @@ int cdns3_get_id(struct cdns3 *cdns)
return id;
}
-int cdns3_get_vbus(struct cdns3 *cdns)
+int cdns_get_vbus(struct cdns *cdns)
{
int vbus;
@@ -96,64 +103,95 @@ int cdns3_get_vbus(struct cdns3 *cdns)
return vbus;
}
-bool cdns3_is_host(struct cdns3 *cdns)
+void cdns_clear_vbus(struct cdns *cdns)
+{
+ u32 reg;
+
+ if (cdns->version != CDNSP_CONTROLLER_V2)
+ return;
+
+ reg = readl(&cdns->otg_cdnsp_regs->override);
+ reg |= OVERRIDE_SESS_VLD_SEL;
+ writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_clear_vbus);
+
+void cdns_set_vbus(struct cdns *cdns)
+{
+ u32 reg;
+
+ if (cdns->version != CDNSP_CONTROLLER_V2)
+ return;
+
+ reg = readl(&cdns->otg_cdnsp_regs->override);
+ reg &= ~OVERRIDE_SESS_VLD_SEL;
+ writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_set_vbus);
+
+bool cdns_is_host(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_HOST)
return true;
- else if (cdns3_get_id(cdns) == CDNS3_ID_HOST)
+ else if (cdns_get_id(cdns) == CDNS3_ID_HOST)
return true;
return false;
}
-bool cdns3_is_device(struct cdns3 *cdns)
+bool cdns_is_device(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL)
return true;
else if (cdns->dr_mode == USB_DR_MODE_OTG)
- if (cdns3_get_id(cdns) == CDNS3_ID_PERIPHERAL)
+ if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL)
return true;
return false;
}
/**
- * cdns3_otg_disable_irq - Disable all OTG interrupts
+ * cdns_otg_disable_irq - Disable all OTG interrupts
* @cdns: Pointer to controller context structure
*/
-static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_regs->ien);
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
- * cdns3_otg_enable_irq - enable id and sess_valid interrupts
+ * cdns_otg_enable_irq - enable id and sess_valid interrupts
* @cdns: Pointer to controller context structure
*/
-static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+static void cdns_otg_enable_irq(struct cdns *cdns)
{
writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
- OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
+ OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
}
/**
- * cdns3_drd_host_on - start host.
+ * cdns_drd_host_on - start host.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno.
*/
-int cdns3_drd_host_on(struct cdns3 *cdns)
+int cdns_drd_host_on(struct cdns *cdns)
{
- u32 val;
+ u32 val, ready_bit;
int ret;
/* Enable host mode. */
writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
&cdns->otg_regs->cmd);
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_XHCI_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_XHCI_READY;
+
dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_XHCI_READY, 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
@@ -163,10 +201,10 @@ int cdns3_drd_host_on(struct cdns3 *cdns)
}
/**
- * cdns3_drd_host_off - stop host.
+ * cdns_drd_host_off - stop host.
* @cdns: Pointer to controller context structure.
*/
-void cdns3_drd_host_off(struct cdns3 *cdns)
+void cdns_drd_host_off(struct cdns *cdns)
{
u32 val;
@@ -182,24 +220,29 @@ void cdns3_drd_host_off(struct cdns3 *cdns)
}
/**
- * cdns3_drd_gadget_on - start gadget.
+ * cdns_drd_gadget_on - start gadget.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_drd_gadget_on(struct cdns3 *cdns)
+int cdns_drd_gadget_on(struct cdns *cdns)
{
- int ret, val;
u32 reg = OTGCMD_OTG_DIS;
+ u32 ready_bit;
+ int ret, val;
/* switch OTG core */
writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_DEV_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_DEV_READY;
+
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_DEV_READY,
- 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret) {
dev_err(cdns->dev, "timeout waiting for dev_ready\n");
return ret;
@@ -208,12 +251,13 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns)
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_on);
/**
- * cdns3_drd_gadget_off - stop gadget.
+ * cdns_drd_gadget_off - stop gadget.
* @cdns: Pointer to controller context structure.
*/
-void cdns3_drd_gadget_off(struct cdns3 *cdns)
+void cdns_drd_gadget_off(struct cdns *cdns)
{
u32 val;
@@ -231,49 +275,50 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns)
1, 2000000);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_off);
/**
- * cdns3_init_otg_mode - initialize drd controller
+ * cdns_init_otg_mode - initialize drd controller
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_init_otg_mode(struct cdns3 *cdns)
+static int cdns_init_otg_mode(struct cdns *cdns)
{
int ret;
- cdns3_otg_disable_irq(cdns);
+ cdns_otg_disable_irq(cdns);
/* clear all interrupts */
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
- ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_OTG);
if (ret)
return ret;
- cdns3_otg_enable_irq(cdns);
+ cdns_otg_enable_irq(cdns);
return 0;
}
/**
- * cdns3_drd_update_mode - initialize mode of operation
+ * cdns_drd_update_mode - initialize mode of operation
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_drd_update_mode(struct cdns3 *cdns)
+int cdns_drd_update_mode(struct cdns *cdns)
{
int ret;
switch (cdns->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
break;
case USB_DR_MODE_HOST:
- ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_HOST);
break;
case USB_DR_MODE_OTG:
- ret = cdns3_init_otg_mode(cdns);
+ ret = cdns_init_otg_mode(cdns);
break;
default:
dev_err(cdns->dev, "Unsupported mode of operation %d\n",
@@ -284,27 +329,27 @@ int cdns3_drd_update_mode(struct cdns3 *cdns)
return ret;
}
-static irqreturn_t cdns3_drd_thread_irq(int irq, void *data)
+static irqreturn_t cdns_drd_thread_irq(int irq, void *data)
{
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
- cdns3_hw_role_switch(cdns);
+ cdns_hw_role_switch(cdns);
return IRQ_HANDLED;
}
/**
- * cdns3_drd_irq - interrupt handler for OTG events
+ * cdns_drd_irq - interrupt handler for OTG events
*
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * @irq: irq number for cdns core device
+ * @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
-static irqreturn_t cdns3_drd_irq(int irq, void *data)
+static irqreturn_t cdns_drd_irq(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
u32 reg;
if (cdns->dr_mode != USB_DR_MODE_OTG)
@@ -313,30 +358,30 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
if (cdns->in_lpm)
return ret;
- reg = readl(&cdns->otg_regs->ivect);
+ reg = readl(&cdns->otg_irq_regs->ivect);
if (!reg)
return IRQ_NONE;
if (reg & OTGIEN_ID_CHANGE_INT) {
dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n",
- cdns3_get_id(cdns));
+ cdns_get_id(cdns));
ret = IRQ_WAKE_THREAD;
}
if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) {
dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n",
- cdns3_get_vbus(cdns));
+ cdns_get_vbus(cdns));
ret = IRQ_WAKE_THREAD;
}
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
return ret;
}
-int cdns3_drd_init(struct cdns3 *cdns)
+int cdns_drd_init(struct cdns *cdns)
{
void __iomem *regs;
u32 state;
@@ -347,28 +392,43 @@ int cdns3_drd_init(struct cdns3 *cdns)
return PTR_ERR(regs);
/* Detection of DRD version. Controller has been released
- * in two versions. Both are similar, but they have same changes
- * in register maps.
- * The first register in old version is command register and it's read
- * only, so driver should read 0 from it. On the other hand, in v1
- * the first register contains device ID number which is not set to 0.
- * Driver uses this fact to detect the proper version of
+ * in three versions. All are very similar and are software compatible,
+ * but they have same changes in register maps.
+ * The first register in oldest version is command register and it's
+ * read only. Driver should read 0 from it. On the other hand, in v1
+ * and v2 the first register contains device ID number which is not
+ * set to 0. Driver uses this fact to detect the proper version of
* controller.
*/
cdns->otg_v0_regs = regs;
if (!readl(&cdns->otg_v0_regs->cmd)) {
cdns->version = CDNS3_CONTROLLER_V0;
cdns->otg_v1_regs = NULL;
+ cdns->otg_cdnsp_regs = NULL;
cdns->otg_regs = regs;
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v0_regs->ien;
writel(1, &cdns->otg_v0_regs->simulate);
dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
cdns->otg_v1_regs = regs;
- cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
- cdns->version = CDNS3_CONTROLLER_V1;
- writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->otg_cdnsp_regs = regs;
+
+ cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
+
+ if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_cdnsp_regs->ien;
+ cdns->version = CDNSP_CONTROLLER_V2;
+ } else {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v1_regs->ien;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->version = CDNS3_CONTROLLER_V1;
+ }
+
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
@@ -378,17 +438,24 @@ int cdns3_drd_init(struct cdns3 *cdns)
/* Update dr_mode according to STRAP configuration. */
cdns->dr_mode = USB_DR_MODE_OTG;
- if (state == OTGSTS_STRAP_HOST) {
+
+ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_HOST) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_HOST)) {
dev_dbg(cdns->dev, "Controller strapped to HOST\n");
cdns->dr_mode = USB_DR_MODE_HOST;
- } else if (state == OTGSTS_STRAP_GADGET) {
+ } else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_GADGET) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_GADGET)) {
dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
}
ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq,
- cdns3_drd_irq,
- cdns3_drd_thread_irq,
+ cdns_drd_irq,
+ cdns_drd_thread_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
@@ -405,8 +472,9 @@ int cdns3_drd_init(struct cdns3 *cdns)
return 0;
}
-int cdns3_drd_exit(struct cdns3 *cdns)
+int cdns_drd_exit(struct cdns *cdns)
{
- cdns3_otg_disable_irq(cdns);
+ cdns_otg_disable_irq(cdns);
+
return 0;
}
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index f1ccae285a16..9724acdecbbb 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USB3 DRD header file.
+ * Cadence USB3 and USBSSP DRD header file.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
*
* Author: Pawel Laszczak <pawell@cadence.com>
*/
@@ -10,10 +10,9 @@
#define __LINUX_CDNS3_DRD
#include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
#include "core.h"
-/* DRD register interface for version v1. */
+/* DRD register interface for version v1 of cdns3 driver. */
struct cdns3_otg_regs {
__le32 did;
__le32 rid;
@@ -38,7 +37,7 @@ struct cdns3_otg_regs {
__le32 ctrl2;
};
-/* DRD register interface for version v0. */
+/* DRD register interface for version v0 of cdns3 driver. */
struct cdns3_otg_legacy_regs {
__le32 cmd;
__le32 sts;
@@ -57,14 +56,45 @@ struct cdns3_otg_legacy_regs {
__le32 ctrl1;
};
+/* DRD register interface for cdnsp driver */
+struct cdnsp_otg_regs {
+ __le32 did;
+ __le32 rid;
+ __le32 cfgs1;
+ __le32 cfgs2;
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 ien;
+ __le32 ivect;
+ __le32 tmr;
+ __le32 simulate;
+ __le32 adpbc_sts;
+ __le32 adp_ramp_time;
+ __le32 adpbc_ctrl1;
+ __le32 adpbc_ctrl2;
+ __le32 override;
+ __le32 vbusvalid_dbnc_cfg;
+ __le32 sessvalid_dbnc_cfg;
+ __le32 susp_timing_ctrl;
+};
+
+#define OTG_CDNSP_DID 0x0004034E
+
/*
- * Common registers interface for both version of DRD.
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
*/
-struct cdns3_otg_common_regs {
+struct cdns_otg_common_regs {
__le32 cmd;
__le32 sts;
__le32 state;
- __le32 different1;
+};
+
+/*
+ * Interrupt related registers. This registers are mapped in different
+ * location for CDNSP controller.
+ */
+struct cdns_otg_irq_regs {
__le32 ien;
__le32 ivect;
};
@@ -92,9 +122,9 @@ struct cdns3_otg_common_regs {
#define OTGCMD_DEV_BUS_DROP BIT(8)
/* Drop the bus for Host mode*/
#define OTGCMD_HOST_BUS_DROP BIT(9)
-/* Power Down USBSS-DEV. */
+/* Power Down USBSS-DEV - only for CDNS3.*/
#define OTGCMD_DEV_POWER_OFF BIT(11)
-/* Power Down CDNSXHCI. */
+/* Power Down CDNSXHCI - only for CDNS3. */
#define OTGCMD_HOST_POWER_OFF BIT(12)
/* OTGIEN - bitmasks */
@@ -123,20 +153,31 @@ struct cdns3_otg_common_regs {
#define OTGSTS_OTG_NRDY_MASK BIT(11)
#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK)
/*
- * Value of the strap pins.
+ * Value of the strap pins for:
+ * CDNS3:
* 000 - no default configuration
* 010 - Controller initiall configured as Host
* 100 - Controller initially configured as Device
+ * CDNSP:
+ * 000 - No default configuration.
+ * 010 - Controller initiall configured as Host.
+ * 100 - Controller initially configured as Device.
*/
#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12)
#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00
#define OTGSTS_STRAP_HOST_OTG 0x01
#define OTGSTS_STRAP_HOST 0x02
#define OTGSTS_STRAP_GADGET 0x04
+#define OTGSTS_CDNSP_STRAP_HOST 0x01
+#define OTGSTS_CDNSP_STRAP_GADGET 0x02
+
/* Host mode is turned on. */
-#define OTGSTS_XHCI_READY BIT(26)
+#define OTGSTS_CDNS3_XHCI_READY BIT(26)
+#define OTGSTS_CDNSP_XHCI_READY BIT(27)
+
/* "Device mode is turned on .*/
-#define OTGSTS_DEV_READY BIT(27)
+#define OTGSTS_CDNS3_DEV_READY BIT(27)
+#define OTGSTS_CDNSP_DEV_READY BIT(26)
/* OTGSTATE- bitmasks */
#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0)
@@ -152,6 +193,8 @@ struct cdns3_otg_common_regs {
#define OVERRIDE_IDPULLUP BIT(0)
/* Only for CDNS3_CONTROLLER_V0 version */
#define OVERRIDE_IDPULLUP_V0 BIT(24)
+/* Vbusvalid/Sesvalid override select. */
+#define OVERRIDE_SESS_VLD_SEL BIT(10)
/* PHYRST_CFG - bitmasks */
#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0)
@@ -159,17 +202,18 @@ struct cdns3_otg_common_regs {
#define CDNS3_ID_PERIPHERAL 1
#define CDNS3_ID_HOST 0
-bool cdns3_is_host(struct cdns3 *cdns);
-bool cdns3_is_device(struct cdns3 *cdns);
-int cdns3_get_id(struct cdns3 *cdns);
-int cdns3_get_vbus(struct cdns3 *cdns);
-int cdns3_drd_init(struct cdns3 *cdns);
-int cdns3_drd_exit(struct cdns3 *cdns);
-int cdns3_drd_update_mode(struct cdns3 *cdns);
-int cdns3_drd_gadget_on(struct cdns3 *cdns);
-void cdns3_drd_gadget_off(struct cdns3 *cdns);
-int cdns3_drd_host_on(struct cdns3 *cdns);
-void cdns3_drd_host_off(struct cdns3 *cdns);
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
+bool cdns_is_host(struct cdns *cdns);
+bool cdns_is_device(struct cdns *cdns);
+int cdns_get_id(struct cdns *cdns);
+int cdns_get_vbus(struct cdns *cdns);
+void cdns_clear_vbus(struct cdns *cdns);
+void cdns_set_vbus(struct cdns *cdns);
+int cdns_drd_init(struct cdns *cdns);
+int cdns_drd_exit(struct cdns *cdns);
+int cdns_drd_update_mode(struct cdns *cdns);
+int cdns_drd_gadget_on(struct cdns *cdns);
+void cdns_drd_gadget_off(struct cdns *cdns);
+int cdns_drd_host_on(struct cdns *cdns);
+void cdns_drd_host_off(struct cdns *cdns);
#endif /* __LINUX_CDNS3_DRD */
diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h
index 702c5a267a92..c37b6269b001 100644
--- a/drivers/usb/cdns3/gadget-export.h
+++ b/drivers/usb/cdns3/gadget-export.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Driver - Gadget Export APIs.
+ * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs.
*
* Copyright (C) 2017 NXP
* Copyright (C) 2017-2018 NXP
@@ -10,16 +10,28 @@
#ifndef __LINUX_CDNS3_GADGET_EXPORT
#define __LINUX_CDNS3_GADGET_EXPORT
-#ifdef CONFIG_USB_CDNS3_GADGET
+#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET)
-int cdns3_gadget_init(struct cdns3 *cdns);
+int cdnsp_gadget_init(struct cdns *cdns);
#else
-static inline int cdns3_gadget_init(struct cdns3 *cdns)
+static inline int cdnsp_gadget_init(struct cdns *cdns)
{
return -ENXIO;
}
-#endif
+#endif /* CONFIG_USB_CDNSP_GADGET */
+
+#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET)
+
+int cdns3_gadget_init(struct cdns *cdns);
+#else
+
+static inline int cdns3_gadget_init(struct cdns *cdns)
+{
+ return -ENXIO;
+}
+
+#endif /* CONFIG_USB_CDNS3_GADGET */
#endif /* __LINUX_CDNS3_GADGET_EXPORT */
diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h
index 26041718a086..cf92173ecf00 100644
--- a/drivers/usb/cdns3/host-export.h
+++ b/drivers/usb/cdns3/host-export.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Driver - Host Export APIs
+ * Cadence USBSS and USBSSP DRD Driver - Host Export APIs
*
* Copyright (C) 2017-2018 NXP
*
@@ -9,25 +9,19 @@
#ifndef __LINUX_CDNS3_HOST_EXPORT
#define __LINUX_CDNS3_HOST_EXPORT
-struct usb_hcd;
-#ifdef CONFIG_USB_CDNS3_HOST
+#if IS_ENABLED(CONFIG_USB_CDNS_HOST)
-int cdns3_host_init(struct cdns3 *cdns);
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+int cdns_host_init(struct cdns *cdns);
#else
-static inline int cdns3_host_init(struct cdns3 *cdns)
+static inline int cdns_host_init(struct cdns *cdns)
{
return -ENXIO;
}
-static inline void cdns3_host_exit(struct cdns3 *cdns) { }
-static inline int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
+static inline void cdns_host_exit(struct cdns *cdns) { }
-#endif /* CONFIG_USB_CDNS3_HOST */
+#endif /* USB_CDNS_HOST */
#endif /* __LINUX_CDNS3_HOST_EXPORT */
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index ec89f2e5430f..84dadfa726aa 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver - host side
+ * Cadence USBSS and USBSSP DRD Driver - host side
*
* Copyright (C) 2018-2019 Cadence Design Systems.
* Copyright (C) 2017-2018 NXP
@@ -23,18 +23,20 @@
#define CFG_RXDET_P3_EN BIT(15)
#define LPM_2_STB_SWITCH_EN BIT(25)
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+
static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
.suspend_quirk = xhci_cdns3_suspend_quirk,
};
-static int __cdns3_host_init(struct cdns3 *cdns)
+static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
int ret;
struct usb_hcd *hcd;
- cdns3_drd_host_on(cdns);
+ cdns_drd_host_on(cdns);
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -46,7 +48,7 @@ static int __cdns3_host_init(struct cdns3 *cdns)
cdns->host_dev = xhci;
ret = platform_device_add_resources(xhci, cdns->xhci_res,
- CDNS3_XHCI_RESOURCES_NUM);
+ CDNS_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(cdns->dev, "couldn't add resources to xHCI device\n");
goto err1;
@@ -87,7 +89,7 @@ err1:
return ret;
}
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 value;
@@ -113,25 +115,25 @@ int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
return 0;
}
-static void cdns3_host_exit(struct cdns3 *cdns)
+static void cdns_host_exit(struct cdns *cdns)
{
kfree(cdns->xhci_plat_data);
platform_device_unregister(cdns->host_dev);
cdns->host_dev = NULL;
- cdns3_drd_host_off(cdns);
+ cdns_drd_host_off(cdns);
}
-int cdns3_host_init(struct cdns3 *cdns)
+int cdns_host_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
- rdrv->start = __cdns3_host_init;
- rdrv->stop = cdns3_host_exit;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->start = __cdns_host_init;
+ rdrv->stop = cdns_host_exit;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "host";
cdns->roles[USB_ROLE_HOST] = rdrv;
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 8bafcfc6080d..661818e8fed6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -53,9 +53,8 @@ config USB_CHIPIDEA_GENERIC
default USB_CHIPIDEA
config USB_CHIPIDEA_TEGRA
- tristate "Enable Tegra UDC glue driver" if EMBEDDED
+ tristate "Enable Tegra USB glue driver" if EMBEDDED
depends on OF
- depends on USB_CHIPIDEA_UDC
default USB_CHIPIDEA
endif
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 7455df0ede49..90f2a8b786be 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -4,57 +4,278 @@
*/
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/reset.h>
+#include <linux/usb.h>
#include <linux/usb/chipidea.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/of.h>
+#include <linux/usb/phy.h>
+
+#include "../host/ehci.h"
#include "ci.h"
-struct tegra_udc {
+struct tegra_usb {
struct ci_hdrc_platform_data data;
struct platform_device *dev;
+ const struct tegra_usb_soc_info *soc;
struct usb_phy *phy;
struct clk *clk;
+
+ bool needs_double_reset;
};
-struct tegra_udc_soc_info {
+struct tegra_usb_soc_info {
unsigned long flags;
+ unsigned int txfifothresh;
+ enum usb_dr_mode dr_mode;
};
-static const struct tegra_udc_soc_info tegra_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
+static const struct tegra_usb_soc_info tegra20_ehci_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_HOST,
+ .txfifothresh = 10,
};
-static const struct of_device_id tegra_udc_of_match[] = {
+static const struct tegra_usb_soc_info tegra30_ehci_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_HOST,
+ .txfifothresh = 16,
+};
+
+static const struct tegra_usb_soc_info tegra20_udc_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_UNKNOWN,
+ .txfifothresh = 10,
+};
+
+static const struct tegra_usb_soc_info tegra30_udc_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_UNKNOWN,
+ .txfifothresh = 16,
+};
+
+static const struct of_device_id tegra_usb_of_match[] = {
{
+ .compatible = "nvidia,tegra20-ehci",
+ .data = &tegra20_ehci_soc_info,
+ }, {
+ .compatible = "nvidia,tegra30-ehci",
+ .data = &tegra30_ehci_soc_info,
+ }, {
.compatible = "nvidia,tegra20-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra20_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
/* sentinel */
}
};
-MODULE_DEVICE_TABLE(of, tegra_udc_of_match);
+MODULE_DEVICE_TABLE(of, tegra_usb_of_match);
+
+static int tegra_usb_reset_controller(struct device *dev)
+{
+ struct reset_control *rst, *rst_utmi;
+ struct device_node *phy_np;
+ int err;
+
+ rst = devm_reset_control_get_shared(dev, "usb");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "can't get ehci reset: %pe\n", rst);
+ return PTR_ERR(rst);
+ }
+
+ phy_np = of_parse_phandle(dev->of_node, "nvidia,phy", 0);
+ if (!phy_np)
+ return -ENOENT;
+
+ /*
+ * The 1st USB controller contains some UTMI pad registers that are
+ * global for all the controllers on the chip. Those registers are
+ * also cleared when reset is asserted to the 1st controller.
+ */
+ rst_utmi = of_reset_control_get_shared(phy_np, "utmi-pads");
+ if (IS_ERR(rst_utmi)) {
+ dev_warn(dev, "can't get utmi-pads reset from the PHY\n");
+ dev_warn(dev, "continuing, but please update your DT\n");
+ } else {
+ /*
+ * PHY driver performs UTMI-pads reset in a case of a
+ * non-legacy DT.
+ */
+ reset_control_put(rst_utmi);
+ }
+
+ of_node_put(phy_np);
+
+ /* reset control is shared, hence initialize it first */
+ err = reset_control_deassert(rst);
+ if (err)
+ return err;
+
+ err = reset_control_assert(rst);
+ if (err)
+ return err;
+
+ udelay(1);
+
+ err = reset_control_deassert(rst);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_usb_notify_event(struct ci_hdrc *ci, unsigned int event)
+{
+ struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
+ struct ehci_hcd *ehci;
+
+ switch (event) {
+ case CI_HDRC_CONTROLLER_RESET_EVENT:
+ if (ci->hcd) {
+ ehci = hcd_to_ehci(ci->hcd);
+ ehci->has_tdi_phy_lpm = false;
+ ehci_writel(ehci, usb->soc->txfifothresh << 16,
+ &ehci->regs->txfill_tuning);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra_usb_internal_port_reset(struct ehci_hcd *ehci,
+ u32 __iomem *portsc_reg,
+ unsigned long *flags)
+{
+ u32 saved_usbintr, temp;
+ unsigned int i, tries;
+ int retval = 0;
+
+ saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
+ /* disable USB interrupt */
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ spin_unlock_irqrestore(&ehci->lock, *flags);
+
+ /*
+ * Here we have to do Port Reset at most twice for
+ * Port Enable bit to be set.
+ */
+ for (i = 0; i < 2; i++) {
+ temp = ehci_readl(ehci, portsc_reg);
+ temp |= PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ fsleep(10000);
+ temp &= ~PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ fsleep(1000);
+ tries = 100;
+ do {
+ fsleep(1000);
+ /*
+ * Up to this point, Port Enable bit is
+ * expected to be set after 2 ms waiting.
+ * USB1 usually takes extra 45 ms, for safety,
+ * we take 100 ms as timeout.
+ */
+ temp = ehci_readl(ehci, portsc_reg);
+ } while (!(temp & PORT_PE) && tries--);
+ if (temp & PORT_PE)
+ break;
+ }
+ if (i == 2)
+ retval = -ETIMEDOUT;
+
+ /*
+ * Clear Connect Status Change bit if it's set.
+ * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
+ */
+ if (temp & PORT_CSC)
+ ehci_writel(ehci, PORT_CSC, portsc_reg);
+
+ /*
+ * Write to clear any interrupt status bits that might be set
+ * during port reset.
+ */
+ temp = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, temp, &ehci->regs->status);
+
+ /* restore original interrupt-enable bits */
+ spin_lock_irqsave(&ehci->lock, *flags);
+ ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
+
+ return retval;
+}
+
+static int tegra_ehci_hub_control(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength,
+ bool *done, unsigned long *flags)
+{
+ struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
+ struct ehci_hcd *ehci = hcd_to_ehci(ci->hcd);
+ u32 __iomem *status_reg;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue != USB_PORT_FEAT_RESET || !usb->needs_double_reset)
+ break;
+
+ /* for USB1 port we need to issue Port Reset twice internally */
+ retval = tegra_usb_internal_port_reset(ehci, status_reg, flags);
+ *done = true;
+ break;
+ }
+
+ return retval;
+}
-static int tegra_udc_probe(struct platform_device *pdev)
+static void tegra_usb_enter_lpm(struct ci_hdrc *ci, bool enable)
{
- const struct tegra_udc_soc_info *soc;
- struct tegra_udc *udc;
+ /*
+ * Touching any register which belongs to AHB clock domain will
+ * hang CPU if USB controller is put into low power mode because
+ * AHB USB clock is gated on Tegra in the LPM.
+ *
+ * Tegra PHY has a separate register for checking the clock status
+ * and usb_phy_set_suspend() takes care of gating/ungating the clocks
+ * and restoring the PHY state on Tegra. Hence DEVLC/PORTSC registers
+ * shouldn't be touched directly by the CI driver.
+ */
+ usb_phy_set_suspend(ci->usb_phy, enable);
+}
+
+static int tegra_usb_probe(struct platform_device *pdev)
+{
+ const struct tegra_usb_soc_info *soc;
+ struct tegra_usb *usb;
int err;
- udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
- if (!udc)
+ usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
+ if (!usb)
return -ENOMEM;
soc = of_device_get_match_data(&pdev->dev);
@@ -63,70 +284,99 @@ static int tegra_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- udc->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
- if (IS_ERR(udc->phy)) {
- err = PTR_ERR(udc->phy);
+ usb->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
+ if (IS_ERR(usb->phy)) {
+ err = PTR_ERR(usb->phy);
dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
return err;
}
- udc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(udc->clk)) {
- err = PTR_ERR(udc->clk);
+ usb->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usb->clk)) {
+ err = PTR_ERR(usb->clk);
dev_err(&pdev->dev, "failed to get clock: %d\n", err);
return err;
}
- err = clk_prepare_enable(udc->clk);
+ err = clk_prepare_enable(usb->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", err);
return err;
}
- /* setup and register ChipIdea HDRC device */
- udc->data.name = "tegra-udc";
- udc->data.flags = soc->flags;
- udc->data.usb_phy = udc->phy;
- udc->data.capoffset = DEF_CAPOFFSET;
-
- udc->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
- pdev->num_resources, &udc->data);
- if (IS_ERR(udc->dev)) {
- err = PTR_ERR(udc->dev);
- dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
+ if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
+ usb->needs_double_reset = true;
+
+ err = tegra_usb_reset_controller(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to reset controller: %d\n", err);
goto fail_power_off;
}
- platform_set_drvdata(pdev, udc);
+ /*
+ * USB controller registers shouldn't be touched before PHY is
+ * initialized, otherwise CPU will hang because clocks are gated.
+ * PHY driver controls gating of internal USB clocks on Tegra.
+ */
+ err = usb_phy_init(usb->phy);
+ if (err)
+ goto fail_power_off;
+
+ platform_set_drvdata(pdev, usb);
+
+ /* setup and register ChipIdea HDRC device */
+ usb->soc = soc;
+ usb->data.name = "tegra-usb";
+ usb->data.flags = soc->flags;
+ usb->data.usb_phy = usb->phy;
+ usb->data.dr_mode = soc->dr_mode;
+ usb->data.capoffset = DEF_CAPOFFSET;
+ usb->data.enter_lpm = tegra_usb_enter_lpm;
+ usb->data.hub_control = tegra_ehci_hub_control;
+ usb->data.notify_event = tegra_usb_notify_event;
+
+ /* Tegra PHY driver currently doesn't support LPM for ULPI */
+ if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_ULPI)
+ usb->data.flags &= ~CI_HDRC_SUPPORTS_RUNTIME_PM;
+
+ usb->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
+ pdev->num_resources, &usb->data);
+ if (IS_ERR(usb->dev)) {
+ err = PTR_ERR(usb->dev);
+ dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
+ goto phy_shutdown;
+ }
return 0;
+phy_shutdown:
+ usb_phy_shutdown(usb->phy);
fail_power_off:
- clk_disable_unprepare(udc->clk);
+ clk_disable_unprepare(usb->clk);
return err;
}
-static int tegra_udc_remove(struct platform_device *pdev)
+static int tegra_usb_remove(struct platform_device *pdev)
{
- struct tegra_udc *udc = platform_get_drvdata(pdev);
+ struct tegra_usb *usb = platform_get_drvdata(pdev);
- ci_hdrc_remove_device(udc->dev);
- clk_disable_unprepare(udc->clk);
+ ci_hdrc_remove_device(usb->dev);
+ usb_phy_shutdown(usb->phy);
+ clk_disable_unprepare(usb->clk);
return 0;
}
-static struct platform_driver tegra_udc_driver = {
+static struct platform_driver tegra_usb_driver = {
.driver = {
- .name = "tegra-udc",
- .of_match_table = tegra_udc_of_match,
+ .name = "tegra-usb",
+ .of_match_table = tegra_usb_of_match,
},
- .probe = tegra_udc_probe,
- .remove = tegra_udc_remove,
+ .probe = tegra_usb_probe,
+ .remove = tegra_usb_remove,
};
-module_platform_driver(tegra_udc_driver);
+module_platform_driver(tegra_usb_driver);
-MODULE_DESCRIPTION("NVIDIA Tegra USB device mode driver");
+MODULE_DESCRIPTION("NVIDIA Tegra USB driver");
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_ALIAS("platform:tegra-udc");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index aa40e510b806..3f6c21406dbd 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -195,7 +195,7 @@ static void hw_wait_phy_stable(void)
}
/* The PHY enters/leaves low power mode */
-static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+static void ci_hdrc_enter_lpm_common(struct ci_hdrc *ci, bool enable)
{
enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
@@ -208,6 +208,11 @@ static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
0);
}
+static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+{
+ return ci->platdata->enter_lpm(ci, enable);
+}
+
static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
{
u32 reg;
@@ -790,6 +795,9 @@ static int ci_get_platdata(struct device *dev,
platdata->pins_device = p;
}
+ if (!platdata->enter_lpm)
+ platdata->enter_lpm = ci_hdrc_enter_lpm_common;
+
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 48e4a5ca1835..67247d2ac07a 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -29,6 +29,12 @@ struct ehci_ci_priv {
bool enabled;
};
+struct ci_hdrc_dma_aligned_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -160,14 +166,15 @@ static int host_start(struct ci_hdrc *ci)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_host);
+ ci->hcd = hcd;
+
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
+ ci->hcd = NULL;
goto disable_reg;
} else {
struct usb_otg *otg = &ci->otg;
- ci->hcd = hcd;
-
if (ci_otg_is_fsm_mode(ci)) {
otg->host = &hcd->self;
hcd->self.otg_port = 1;
@@ -237,6 +244,7 @@ static int ci_ehci_hub_control(
u32 temp;
unsigned long flags;
int retval = 0;
+ bool done = false;
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -244,6 +252,13 @@ static int ci_ehci_hub_control(
spin_lock_irqsave(&ehci->lock, flags);
+ if (ci->platdata->hub_control) {
+ retval = ci->platdata->hub_control(ci, typeReq, wValue, wIndex,
+ buf, wLength, &done, &flags);
+ if (done)
+ goto done;
+ }
+
if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
temp = ehci_readl(ehci, status_reg);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
@@ -349,6 +364,86 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
+{
+ struct ci_hdrc_dma_aligned_buffer *temp;
+ size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer,
+ struct ci_hdrc_dma_aligned_buffer, data);
+
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(temp->old_xfer_buffer, temp->data, length);
+ }
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+ const unsigned int ci_hdrc_usb_dma_align = 32;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
+ return 0;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct ci_hdrc_dma_aligned_buffer) +
+ ci_hdrc_usb_dma_align - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct dma_aligned_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = ci_hdrc_alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ ci_hdrc_free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ ci_hdrc_free_dma_aligned_buffer(urb);
+}
+
int ci_hdrc_host_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
@@ -366,6 +461,11 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
+ if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA) {
+ ci_ehci_hc_driver.map_urb_for_dma = ci_hdrc_map_urb_for_dma;
+ ci_ehci_hc_driver.unmap_urb_for_dma = ci_hdrc_unmap_urb_for_dma;
+ }
+
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..37f824b59daa 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,16 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
+#endif
+
+#if IS_ENABLED(CONFIG_USB_SERIAL_XR)
+ { USB_DEVICE(0x04e2, 0x1410), /* Ignore XR21V141X USB to Serial converter */
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..c9f6e9758288 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
@@ -1312,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b222b777e6a4..74d5a9c5238a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -25,7 +25,7 @@
/* Increment API VERSION when changing tmc.h with new flags or ioctls
* or when changing a significant behavior of the driver.
*/
-#define USBTMC_API_VERSION (2)
+#define USBTMC_API_VERSION (3)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
@@ -475,33 +475,17 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
}
-static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
- void __user *arg)
+static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
- int srq_asserted = 0;
u8 *buffer;
u8 tag;
- __u8 stb;
int rv;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
- spin_lock_irq(&data->dev_lock);
- srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
- if (srq_asserted) {
- /* a STB with SRQ is already received */
- stb = file_data->srq_byte;
- spin_unlock_irq(&data->dev_lock);
- rv = put_user(stb, (__u8 __user *)arg);
- dev_dbg(dev, "stb:0x%02x with srq received %d\n",
- (unsigned int)stb, rv);
- return rv;
- }
- spin_unlock_irq(&data->dev_lock);
-
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -548,13 +532,12 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
data->iin_bTag, tag);
}
- stb = data->bNotify2;
+ *stb = data->bNotify2;
} else {
- stb = buffer[2];
+ *stb = buffer[2];
}
- rv = put_user(stb, (__u8 __user *)arg);
- dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)stb, rv);
+ dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
exit:
/* bump interrupt bTag */
@@ -567,6 +550,53 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
return rv;
}
+static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ int srq_asserted = 0;
+ __u8 stb;
+ int rv;
+
+ rv = usbtmc_get_stb(file_data, &stb);
+
+ if (rv > 0) {
+ srq_asserted = atomic_xchg(&file_data->srq_asserted,
+ srq_asserted);
+ if (srq_asserted)
+ stb |= 0x40; /* Set RQS bit */
+
+ rv = put_user(stb, (__u8 __user *)arg);
+ }
+ return rv;
+
+}
+
+static int usbtmc_ioctl_get_srq_stb(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ int srq_asserted = 0;
+ __u8 stb = 0;
+ int rv;
+
+ spin_lock_irq(&data->dev_lock);
+ srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
+
+ if (srq_asserted) {
+ stb = file_data->srq_byte;
+ spin_unlock_irq(&data->dev_lock);
+ rv = put_user(stb, (__u8 __user *)arg);
+ } else {
+ spin_unlock_irq(&data->dev_lock);
+ rv = -ENOMSG;
+ }
+
+ dev_dbg(dev, "stb:0x%02x with srq received %d\n", (unsigned int)stb, rv);
+
+ return rv;
+}
+
static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
__u32 __user *arg)
{
@@ -2145,6 +2175,17 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
file_data->auto_abort = !!tmp_byte;
break;
+ case USBTMC_IOCTL_GET_STB:
+ retval = usbtmc_get_stb(file_data, &tmp_byte);
+ if (retval > 0)
+ retval = put_user(tmp_byte, (__u8 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_GET_SRQ_STB:
+ retval = usbtmc_ioctl_get_srq_stb(file_data,
+ (void __user *)arg);
+ break;
+
case USBTMC_IOCTL_CANCEL_IO:
retval = usbtmc_ioctl_cancel_io(file_data);
break;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 1433260d99b4..fc21cf2d36f6 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -69,6 +69,13 @@ static const char *const speed_names[] = {
[USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
+static const char *const ssp_rate[] = {
+ [USB_SSP_GEN_UNKNOWN] = "UNKNOWN",
+ [USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1",
+ [USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2",
+ [USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2",
+};
+
const char *usb_speed_string(enum usb_device_speed speed)
{
if (speed < 0 || speed >= ARRAY_SIZE(speed_names))
@@ -86,12 +93,29 @@ enum usb_device_speed usb_get_maximum_speed(struct device *dev)
if (ret < 0)
return USB_SPEED_UNKNOWN;
- ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ if (ret > 0)
+ return USB_SPEED_SUPER_PLUS;
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
+enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev)
+{
+ const char *maximum_speed;
+ int ret;
+
+ ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
+ if (ret < 0)
+ return USB_SSP_GEN_UNKNOWN;
+
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret;
+}
+EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate);
+
const char *usb_state_string(enum usb_device_state state)
{
static const char *const names[] = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..3f0381344221 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -111,8 +111,8 @@ DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
*/
/*-------------------------------------------------------------------------*/
-#define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
-#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
+#define KERNEL_REL bin2bcd(LINUX_VERSION_MAJOR)
+#define KERNEL_VER bin2bcd(LINUX_VERSION_PATCHLEVEL)
/* usb 3.1 root hub device descriptor */
static const u8 usb31_rh_dev_descriptor[18] = {
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 1b4eb7046b07..6ade3daf7858 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -391,6 +391,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ /* ELMO L-12F document camera */
+ { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG },
+
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -415,6 +418,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* novation SoundControl XL */
+ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
@@ -495,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
- /* novation SoundControl XL */
- { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
-
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0a0d11151cfb..ad4c94366dad 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e9ac215b9663..fc3269f5faf1 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
chan->xfer_len = num_packets * chan->max_packet;
+ } else if (chan->ep_is_in) {
+ /*
+ * Always program an integral # of max packets
+ * for IN transfers.
+ * Note: This assumes that the input buffer is
+ * aligned and sized accordingly.
+ */
+ chan->xfer_len = num_packets * chan->max_packet;
}
} else {
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
}
- if (chan->ep_is_in)
- /*
- * Always program an integral # of max packets for IN
- * transfers
- */
- chan->xfer_len = num_packets * chan->max_packet;
-
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index a052d39b4375..d5f4ec1b73b1 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
&short_read);
if (urb->actual_length + xfer_length > urb->length) {
- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
xfer_length = urb->length - urb->actual_length;
}
@@ -1977,6 +1977,18 @@ error:
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_XACT_ERR);
+ /*
+ * We can get here after a completed transaction
+ * (urb->actual_length >= urb->length) which was not reported
+ * as completed. If that is the case, and we do not abort
+ * the transfer, a transfer of size 0 will be enqueued
+ * subsequently. If urb->actual_length is not DMA-aligned,
+ * the buffer will then point to an unaligned address, and
+ * the resulting behavior is undefined. Bail out in that
+ * situation.
+ */
+ if (qtd->urb->actual_length >= qtd->urb->length)
+ qtd->error_count = 3;
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 267543c3dc38..92df3d620f7d 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -177,7 +177,10 @@ static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
p->i2c_enable = false;
p->activate_stm_fs_transceiver = true;
p->activate_stm_id_vb_detection = true;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->host_support_fs_ls_low_power = true;
+ p->host_ls_low_power_phy_clk = true;
}
static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
@@ -189,7 +192,12 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
p->host_rx_fifo_size = 440;
p->host_nperio_tx_fifo_size = 256;
p->host_perio_tx_fifo_size = 256;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
}
const struct of_device_id dwc2_of_match_table[] = {
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 7afc10872f1f..0000151e3ca9 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -63,20 +63,6 @@ struct dwc2_pci_glue {
struct platform_device *phy;
};
-static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
-{
- if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
- pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
- struct property_entry properties[] = {
- { },
- };
-
- return platform_device_add_properties(dwc2, properties);
- }
-
- return 0;
-}
-
/**
* dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI
* driver
@@ -143,10 +129,6 @@ static int dwc2_pci_probe(struct pci_dev *pci,
dwc2->dev.parent = dev;
- ret = dwc2_pci_quirks(pci, dwc2);
- if (ret)
- goto err;
-
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
ret = -ENOMEM;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 7a2304565a73..2133acf8ee69 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -139,4 +139,14 @@ config USB_DWC3_QCOM
for peripheral mode support.
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_IMX8MP
+ tristate "NXP iMX8MP Platform"
+ depends on OF && COMMON_CLK
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ default USB_DWC3
+ help
+ NXP iMX8M Plus SoC use DesignWare Core IP for USB2/3
+ functionality.
+ Say 'Y' or 'M' if you have one such device.
+
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..2259f8876fb2 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -51,3 +51,4 @@ obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 841daec70b6e..f2448d0a9d39 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1126,11 +1126,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb2_phy);
if (ret == -ENXIO || ret == -ENODEV) {
dwc->usb2_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb2 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
}
@@ -1138,11 +1135,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb3_phy);
if (ret == -ENXIO || ret == -ENODEV) {
dwc->usb3_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb3 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
}
@@ -1151,11 +1145,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb2_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV) {
dwc->usb2_generic_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb2 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
}
@@ -1164,11 +1155,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb3_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV) {
dwc->usb3_generic_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb3 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
}
@@ -1190,11 +1178,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize gadget\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
@@ -1205,20 +1190,14 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
ret = dwc3_drd_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize dual-role\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -1273,6 +1252,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
hird_threshold = 12;
dwc->maximum_speed = usb_get_maximum_speed(dev);
+ dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
@@ -1444,6 +1424,42 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
break;
}
+
+ /*
+ * Currently the controller does not have visibility into the HW
+ * parameter to determine the maximum number of lanes the HW supports.
+ * If the number of lanes is not specified in the device property, then
+ * set the default to support dual-lane for DWC_usb32 and single-lane
+ * for DWC_usb31 for super-speed-plus.
+ */
+ if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
+ switch (dwc->max_ssp_rate) {
+ case USB_SSP_GEN_2x1:
+ if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
+ dev_warn(dev, "UDC only supports Gen 1\n");
+ break;
+ case USB_SSP_GEN_1x2:
+ case USB_SSP_GEN_2x2:
+ if (DWC3_IP_IS(DWC31))
+ dev_warn(dev, "UDC only supports single lane\n");
+ break;
+ case USB_SSP_GEN_UNKNOWN:
+ default:
+ switch (hwparam_gen) {
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->max_ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_1x2;
+ break;
+ }
+ break;
+ }
+ }
}
static int dwc3_probe(struct platform_device *pdev)
@@ -1490,7 +1506,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- dwc->reset = devm_reset_control_array_get(dev, true, true);
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1555,8 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
ret = dwc3_core_init(dwc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize core: %d\n", ret);
+ dev_err_probe(dev, ret, "failed to initialize core\n");
goto err4;
}
@@ -1758,7 +1773,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..052b20d52651 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
@@ -385,6 +386,8 @@
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
/* Device Configuration Register */
+#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */
+
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -458,6 +461,8 @@
#define DWC3_DEVTEN_USBRSTEN BIT(1)
#define DWC3_DEVTEN_DISCONNEVTEN BIT(0)
+#define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */
+
/* Device Status Register */
#define DWC3_DSTS_DCNRD BIT(29)
@@ -963,6 +968,10 @@ struct dwc3_scratchpad_array {
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count
+ * @gadget_max_speed: maximum gadget speed requested
+ * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling
+ * rate and lane count.
* @ip: controller's ID
* @revision: controller's version of an IP
* @version_type: VERSIONTYPE register contents, a sub release of a revision
@@ -1125,6 +1134,9 @@ struct dwc3 {
u32 nr_scratch;
u32 u1u2;
u32 maximum_speed;
+ u32 gadget_max_speed;
+ enum usb_ssp_rate max_ssp_rate;
+ enum usb_ssp_rate gadget_ssp_rate;
u32 ip;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 3e1c1aacf002..e2b68bb770d1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -441,8 +441,8 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
- struct device_node *np_phy, *np_conn;
- struct extcon_dev *edev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
const char *name;
if (device_property_read_bool(dev, "extcon"))
@@ -462,15 +462,22 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
return edev;
}
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- else
- edev = NULL;
-
- of_node_put(np_conn);
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
of_node_put(np_phy);
return edev;
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
index 55b4a901168e..f6e3817fa7af 100644
--- a/drivers/usb/dwc3/dwc3-haps.c
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -33,6 +33,10 @@ static const struct property_entry initial_properties[] = {
{ },
};
+static const struct software_node dwc3_haps_swnode = {
+ .properties = initial_properties,
+};
+
static int dwc3_haps_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -77,7 +81,7 @@ static int dwc3_haps_probe(struct pci_dev *pci,
dwc->pci = pci;
dwc->dwc3->dev.parent = dev;
- ret = platform_device_add_properties(dwc->dwc3, initial_properties);
+ ret = device_add_software_node(&dwc->dwc3->dev, &dwc3_haps_swnode);
if (ret)
goto err;
@@ -91,6 +95,7 @@ static int dwc3_haps_probe(struct pci_dev *pci,
return 0;
err:
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
@@ -99,6 +104,7 @@ static void dwc3_haps_remove(struct pci_dev *pci)
{
struct dwc3_haps *dwc = pci_get_drvdata(pci);
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
new file mode 100644
index 000000000000..75f0042b998b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * dwc3-imx8mp.c - NXP imx8mp Specific Glue layer
+ *
+ * Copyright (c) 2020 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+
+/* USB wakeup registers */
+#define USB_WAKEUP_CTRL 0x00
+
+/* Global wakeup interrupt enable, also used to clear interrupt */
+#define USB_WAKEUP_EN BIT(31)
+/* Wakeup from connect or disconnect, only for superspeed */
+#define USB_WAKEUP_SS_CONN BIT(5)
+/* 0 select vbus_valid, 1 select sessvld */
+#define USB_WAKEUP_VBUS_SRC_SESS_VAL BIT(4)
+/* Enable signal for wake up from u3 state */
+#define USB_WAKEUP_U3_EN BIT(3)
+/* Enable signal for wake up from id change */
+#define USB_WAKEUP_ID_EN BIT(2)
+/* Enable signal for wake up from vbus change */
+#define USB_WAKEUP_VBUS_EN BIT(1)
+/* Enable signal for wake up from dp/dm change */
+#define USB_WAKEUP_DPDM_EN BIT(0)
+
+#define USB_WAKEUP_EN_MASK GENMASK(5, 0)
+
+struct dwc3_imx8mp {
+ struct device *dev;
+ struct platform_device *dwc3;
+ void __iomem *glue_base;
+ struct clk *hsio_clk;
+ struct clk *suspend_clk;
+ int irq;
+ bool pm_suspended;
+ bool wakeup_pending;
+};
+
+static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
+{
+ struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
+ u32 val;
+
+ if (!dwc3)
+ return;
+
+ val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+
+ if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
+ USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
+ else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
+ USB_WAKEUP_VBUS_SRC_SESS_VAL;
+
+ writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+}
+
+static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
+{
+ u32 val;
+
+ val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+ val &= ~(USB_WAKEUP_EN | USB_WAKEUP_EN_MASK);
+ writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+}
+
+static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
+{
+ struct dwc3_imx8mp *dwc3_imx = _dwc3_imx;
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+
+ if (!dwc3_imx->pm_suspended)
+ return IRQ_HANDLED;
+
+ disable_irq_nosync(dwc3_imx->irq);
+ dwc3_imx->wakeup_pending = true;
+
+ if ((dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc->xhci)
+ pm_runtime_resume(&dwc->xhci->dev);
+ else if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ pm_runtime_get(dwc->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int dwc3_imx8mp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dwc3_np, *node = dev->of_node;
+ struct dwc3_imx8mp *dwc3_imx;
+ int err, irq;
+
+ if (!node) {
+ dev_err(dev, "device node not found\n");
+ return -EINVAL;
+ }
+
+ dwc3_imx = devm_kzalloc(dev, sizeof(*dwc3_imx), GFP_KERNEL);
+ if (!dwc3_imx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dwc3_imx);
+
+ dwc3_imx->dev = dev;
+
+ dwc3_imx->glue_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dwc3_imx->glue_base))
+ return PTR_ERR(dwc3_imx->glue_base);
+
+ dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
+ if (IS_ERR(dwc3_imx->hsio_clk)) {
+ err = PTR_ERR(dwc3_imx->hsio_clk);
+ dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
+ if (IS_ERR(dwc3_imx->suspend_clk)) {
+ err = PTR_ERR(dwc3_imx->suspend_clk);
+ dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto disable_clks;
+ }
+ dwc3_imx->irq = irq;
+
+ err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
+ IRQF_ONESHOT, dev_name(dev), dwc3_imx);
+ if (err) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
+ goto disable_clks;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0)
+ goto disable_rpm;
+
+ dwc3_np = of_get_child_by_name(node, "dwc3");
+ if (!dwc3_np) {
+ dev_err(dev, "failed to find dwc3 core child\n");
+ goto disable_rpm;
+ }
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err_node_put;
+ }
+
+ dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
+ if (!dwc3_imx->dwc3) {
+ dev_err(dev, "failed to get dwc3 platform device\n");
+ err = -ENODEV;
+ goto depopulate;
+ }
+ of_node_put(dwc3_np);
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_put(dev);
+
+ return 0;
+
+depopulate:
+ of_platform_depopulate(dev);
+err_node_put:
+ of_node_put(dwc3_np);
+disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+disable_clks:
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+disable_hsio_clk:
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ return err;
+}
+
+static int dwc3_imx8mp_remove(struct platform_device *pdev)
+{
+ struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ if (dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup enable */
+ if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
+ dwc3_imx8mp_wakeup_enable(dwc3_imx);
+
+ dwc3_imx->pm_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+ int ret = 0;
+
+ if (!dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup disable */
+ dwc3_imx8mp_wakeup_disable(dwc3_imx);
+ dwc3_imx->pm_suspended = false;
+
+ if (dwc3_imx->wakeup_pending) {
+ dwc3_imx->wakeup_pending = false;
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) {
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
+ } else {
+ /*
+ * Add wait for xhci switch from suspend
+ * clock to normal clock to detect connection.
+ */
+ usleep_range(9000, 10000);
+ }
+ enable_irq(dwc3_imx->irq);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_imx8mp_suspend(dwc3_imx, PMSG_SUSPEND);
+
+ if (device_may_wakeup(dwc3_imx->dev))
+ enable_irq_wake(dwc3_imx->irq);
+ else
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+ dev_dbg(dev, "dwc3 imx8mp pm suspend.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ if (device_may_wakeup(dwc3_imx->dev)) {
+ disable_irq_wake(dwc3_imx->irq);
+ } else {
+ ret = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (ret)
+ return ret;
+
+ ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp pm resume.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime suspend.\n");
+
+ return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime resume.\n");
+
+ return dwc3_imx8mp_resume(dwc3_imx, PMSG_AUTO_RESUME);
+}
+
+static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
+ dwc3_imx8mp_runtime_resume, NULL)
+};
+
+static const struct of_device_id dwc3_imx8mp_of_match[] = {
+ { .compatible = "fsl,imx8mp-dwc3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match);
+
+static struct platform_driver dwc3_imx8mp_driver = {
+ .probe = dwc3_imx8mp_probe,
+ .remove = dwc3_imx8mp_remove,
+ .driver = {
+ .name = "imx8mp-dwc3",
+ .pm = &dwc3_imx8mp_dev_pm_ops,
+ .of_match_table = dwc3_imx8mp_of_match,
+ },
+};
+
+module_platform_driver(dwc3_imx8mp_driver);
+
+MODULE_ALIAS("platform:imx8mp-dwc3");
+MODULE_AUTHOR("jun.li@nxp.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 imx8mp Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 9a99253d5ba3..057056c0975e 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -99,13 +99,8 @@ static int kdwc3_probe(struct platform_device *pdev)
/* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
- if (IS_ERR(kdwc->usb3_phy)) {
- error = PTR_ERR(kdwc->usb3_phy);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "couldn't get usb3 phy: %d\n", error);
-
- return error;
- }
+ if (IS_ERR(kdwc->usb3_phy))
+ return dev_err_probe(dev, PTR_ERR(kdwc->usb3_phy), "couldn't get usb3 phy\n");
phy_pm_runtime_get_sync(kdwc->usb3_phy);
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index bae6a70664c8..3d3918a8d5fb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,7 +40,9 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -142,6 +144,18 @@ static const struct property_entry dwc3_pci_amd_properties[] = {
{}
};
+static const struct software_node dwc3_pci_intel_swnode = {
+ .properties = dwc3_pci_intel_properties,
+};
+
+static const struct software_node dwc3_pci_intel_mrfld_swnode = {
+ .properties = dwc3_pci_mrfld_properties,
+};
+
+static const struct software_node dwc3_pci_amd_swnode = {
+ .properties = dwc3_pci_amd_properties,
+};
+
static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
struct pci_dev *pdev = dwc->pci;
@@ -222,7 +236,6 @@ static void dwc3_pci_resume_work(struct work_struct *work)
static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
- struct property_entry *p = (struct property_entry *)id->driver_data;
struct dwc3_pci *dwc;
struct resource res[2];
int ret;
@@ -265,7 +278,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
dwc->dwc3->dev.parent = dev;
ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
- ret = platform_device_add_properties(dwc->dwc3, p);
+ ret = device_add_software_node(&dwc->dwc3->dev, (void *)id->driver_data);
if (ret < 0)
goto err;
@@ -288,6 +301,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
err:
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
@@ -304,75 +318,82 @@ static void dwc3_pci_remove(struct pci_dev *pci)
#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BSW),
- (kernel_ulong_t) &dwc3_pci_intel_properties },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
- (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT_M),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_APL),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_KBP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GLK),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
- (kernel_ulong_t) &dwc3_pci_amd_properties, },
+ (kernel_ulong_t) &dwc3_pci_amd_swnode, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c703d552bbcf..846a47be6df7 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -60,12 +60,14 @@ struct dwc3_acpi_pdata {
int dp_hs_phy_irq_index;
int dm_hs_phy_irq_index;
int ss_phy_irq_index;
+ bool is_urs;
};
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
+ struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
@@ -429,13 +431,15 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
static int dwc3_qcom_get_irq(struct platform_device *pdev,
const char *name, int num)
{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
struct device_node *np = pdev->dev.of_node;
int ret;
if (np)
- ret = platform_get_irq_byname(pdev, name);
+ ret = platform_get_irq_byname(pdev_irq, name);
else
- ret = platform_get_irq(pdev, num);
+ ret = platform_get_irq(pdev_irq, num);
return ret;
}
@@ -563,11 +567,17 @@ static const struct property_entry dwc3_qcom_acpi_properties[] = {
{}
};
+static const struct software_node dwc3_qcom_swnode = {
+ .properties = dwc3_qcom_acpi_properties,
+};
+
static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct resource *res, *child_res = NULL;
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
+ pdev;
int irq;
int ret;
@@ -597,7 +607,7 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
child_res[0].end = child_res[0].start +
qcom->acpi_pdata->dwc3_core_base_size;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq(pdev_irq, 0);
child_res[1].flags = IORESOURCE_IRQ;
child_res[1].start = child_res[1].end = irq;
@@ -607,16 +617,17 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
goto out;
}
- ret = platform_device_add_properties(qcom->dwc3,
- dwc3_qcom_acpi_properties);
+ ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add properties\n");
goto out;
}
ret = platform_device_add(qcom->dwc3);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "failed to add device\n");
+ device_remove_software_node(&qcom->dwc3->dev);
+ }
out:
kfree(child_res);
@@ -651,6 +662,33 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return 0;
}
+static struct platform_device *
+dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+{
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+ int ret;
+ int id;
+
+ /* Figure out device id */
+ ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
+ if (!ret)
+ return NULL;
+
+ /* Find the child using name */
+ snprintf(name, sizeof(name), "USB%d", id);
+ fwh = fwnode_get_named_child_node(dev->fwnode, name);
+ if (!fwh)
+ return NULL;
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+ return NULL;
+
+ return acpi_create_platform_device(adev, NULL);
+}
+
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -715,6 +753,14 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->acpi_pdata->qscratch_base_offset;
parent_res->end = parent_res->start +
qcom->acpi_pdata->qscratch_base_size;
+
+ if (qcom->acpi_pdata->is_urs) {
+ qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
+ if (!qcom->urs_usb) {
+ dev_err(dev, "failed to create URS USB platdev\n");
+ return -ENODEV;
+ }
+ }
}
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
@@ -796,6 +842,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
+ device_remove_software_node(&qcom->dwc3->dev);
of_platform_depopulate(dev);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
@@ -877,8 +924,20 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
.ss_phy_irq_index = 2
};
+static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
+ .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
+ .qscratch_base_size = SDM845_QSCRATCH_SIZE,
+ .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
+ .hs_phy_irq_index = 1,
+ .dp_hs_phy_irq_index = 4,
+ .dm_hs_phy_irq_index = 3,
+ .ss_phy_irq_index = 2,
+ .is_urs = true,
+};
+
static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
{ "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
+ { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
{ },
};
MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index e733be840545..b06b7092b1a2 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -274,7 +274,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
of_node_put(child);
- of_dev_put(child_pdev);
+ platform_device_put(child_pdev);
/*
* Configure the USB port as device or host according to the static
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..aebcf8ec0716 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -605,8 +605,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
if (desc->bInterval) {
- params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
- dep->interval = 1 << (desc->bInterval - 1);
+ u8 bInterval_m1;
+
+ /*
+ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
+ * must be set to 0 when the controller operates in full-speed.
+ */
+ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
+ if (dwc->gadget->speed == USB_SPEED_FULL)
+ bInterval_m1 = 0;
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ dwc->gadget->speed == USB_SPEED_FULL)
+ dep->interval = desc->bInterval;
+ else
+ dep->interval = 1 << (desc->bInterval - 1);
+
+ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
@@ -1763,6 +1778,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2036,6 +2053,102 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
}
}
+static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
+{
+ enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate;
+ u32 reg;
+
+ if (ssp_rate == USB_SSP_GEN_UNKNOWN)
+ ssp_rate = dwc->max_ssp_rate;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_SPEED_MASK;
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ if (ssp_rate == USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+
+ if (ssp_rate != USB_SSP_GEN_2x1 &&
+ dwc->max_ssp_rate != USB_SSP_GEN_2x1)
+ reg |= DWC3_DCFG_NUMLANES(1);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
+{
+ enum usb_device_speed speed;
+ u32 reg;
+
+ speed = dwc->gadget_max_speed;
+ if (speed > dwc->maximum_speed)
+ speed = dwc->maximum_speed;
+
+ if (speed == USB_SPEED_SUPER_PLUS &&
+ DWC3_IP_IS(DWC32)) {
+ __dwc3_gadget_set_ssp_rate(dwc);
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_SPEED_MASK);
+
+ /*
+ * WORKAROUND: DWC3 revision < 2.20a have an issue
+ * which would cause metastability state on Run/Stop
+ * bit if we try to force the IP to USB2-only mode.
+ *
+ * Because of that, we cannot configure the IP to any
+ * speed other than the SuperSpeed
+ *
+ * Refers to:
+ *
+ * STAR#9000525659: Clock Domain Crossing on DCTL in
+ * USB 2.0 Mode
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
+ !dwc->dis_metastability_quirk) {
+ reg |= DWC3_DCFG_SUPERSPEED;
+ } else {
+ switch (speed) {
+ case USB_SPEED_LOW:
+ reg |= DWC3_DCFG_LOWSPEED;
+ break;
+ case USB_SPEED_FULL:
+ reg |= DWC3_DCFG_FULLSPEED;
+ break;
+ case USB_SPEED_HIGH:
+ reg |= DWC3_DCFG_HIGHSPEED;
+ break;
+ case USB_SPEED_SUPER:
+ reg |= DWC3_DCFG_SUPERSPEED;
+ break;
+ case USB_SPEED_SUPER_PLUS:
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ break;
+ default:
+ dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ }
+ }
+
+ if (DWC3_IP_IS(DWC32) &&
+ speed > USB_SPEED_UNKNOWN &&
+ speed < USB_SPEED_SUPER_PLUS)
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
@@ -2058,6 +2171,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (dwc->has_hibernation)
reg |= DWC3_DCTL_KEEP_CONNECT;
+ __dwc3_gadget_set_speed(dwc);
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
@@ -2083,6 +2197,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2108,6 +2223,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
+ * Check the return value for successful resume, or error. For a
+ * successful resume, the DWC3 runtime PM resume routine will handle
+ * the run stop sequence, so avoid duplicate operations here.
+ */
+ ret = pm_runtime_get_sync(dwc->dev);
+ if (!ret || ret < 0) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
+ /*
* Synchronize any pending event handling before executing the controller
* halt routine.
*/
@@ -2145,10 +2271,14 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ dwc->connected = false;
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put(dwc->dev);
return ret;
}
@@ -2158,8 +2288,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
u32 reg;
/* Enable all but Start and End of Frame IRQs */
- reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
- DWC3_DEVTEN_EVNTOVERFLOWEN |
+ reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
@@ -2297,7 +2426,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
- int ret = 0;
+ int ret;
int irq;
irq = dwc->irq_gadget;
@@ -2306,33 +2435,14 @@ static int dwc3_gadget_start(struct usb_gadget *g,
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
- goto err0;
+ return ret;
}
spin_lock_irqsave(&dwc->lock, flags);
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget->name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
- }
-
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
-
-err1:
- spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
-
-err0:
- return ret;
}
static void __dwc3_gadget_stop(struct dwc3 *dwc)
@@ -2348,13 +2458,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2407,62 +2510,33 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
- u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- reg &= ~(DWC3_DCFG_SPEED_MASK);
-
- /*
- * WORKAROUND: DWC3 revision < 2.20a have an issue
- * which would cause metastability state on Run/Stop
- * bit if we try to force the IP to USB2-only mode.
- *
- * Because of that, we cannot configure the IP to any
- * speed other than the SuperSpeed
- *
- * Refers to:
- *
- * STAR#9000525659: Clock Domain Crossing on DCTL in
- * USB 2.0 Mode
- */
- if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
- !dwc->dis_metastability_quirk) {
- reg |= DWC3_DCFG_SUPERSPEED;
- } else {
- switch (speed) {
- case USB_SPEED_LOW:
- reg |= DWC3_DCFG_LOWSPEED;
- break;
- case USB_SPEED_FULL:
- reg |= DWC3_DCFG_FULLSPEED;
- break;
- case USB_SPEED_HIGH:
- reg |= DWC3_DCFG_HIGHSPEED;
- break;
- case USB_SPEED_SUPER:
- reg |= DWC3_DCFG_SUPERSPEED;
- break;
- case USB_SPEED_SUPER_PLUS:
- if (DWC3_IP_IS(DWC3))
- reg |= DWC3_DCFG_SUPERSPEED;
- else
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- break;
- default:
- dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+ dwc->gadget_max_speed = speed;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
- if (DWC3_IP_IS(DWC3))
- reg |= DWC3_DCFG_SUPERSPEED;
- else
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- }
- }
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
+ enum usb_ssp_rate rate)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_ssp_rate = rate;
spin_unlock_irqrestore(&dwc->lock, flags);
}
+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ if (dwc->usb2_phy)
+ return usb_phy_set_power(dwc->usb2_phy, mA);
+
+ return 0;
+}
+
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
@@ -2471,7 +2545,9 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
.udc_start = dwc3_gadget_start,
.udc_stop = dwc3_gadget_stop,
.udc_set_speed = dwc3_gadget_set_speed,
+ .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
+ .vbus_draw = dwc3_gadget_vbus_draw,
};
/* -------------------------------------------------------------------------- */
@@ -3304,12 +3380,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
struct dwc3_ep *dep;
int ret;
u32 reg;
+ u8 lanes = 1;
u8 speed;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
+ if (DWC3_IP_IS(DWC32))
+ lanes = DWC3_DSTS_CONNLANES(reg) + 1;
+
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
+
/*
* RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
* each time on Connect Done.
@@ -3324,6 +3406,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+
+ if (lanes > 1)
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
break;
case DWC3_DSTS_SUPERSPEED:
/*
@@ -3345,6 +3432,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER;
+
+ if (lanes > 1) {
+ dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
+ }
break;
case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
@@ -3839,6 +3931,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dev->platform_data = dwc;
dwc->gadget->ops = &dwc3_gadget_ops;
dwc->gadget->speed = USB_SPEED_UNKNOWN;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
dwc->gadget->sg_supported = true;
dwc->gadget->name = "dwc3-gadget";
dwc->gadget->lpm_capable = true;
@@ -3865,6 +3958,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->revision);
dwc->gadget->max_speed = dwc->maximum_speed;
+ dwc->gadget->max_ssp_rate = dwc->max_ssp_rate;
/*
* REVISIT: Here we should clear all pending IRQs to be
@@ -3881,7 +3975,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err5;
}
- dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+ if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
+ dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
+ else
+ dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
return 0;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e195176580de..f29a264635aa 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -108,7 +108,7 @@ int dwc3_host_init(struct dwc3 *dwc)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
- ret = platform_device_add_properties(xhci, props);
+ ret = device_create_managed_software_node(&xhci->dev, props, NULL);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
goto err;
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..72a9797dbbae 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/utsname.h>
+#include <linux/bitfield.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
@@ -392,8 +393,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +428,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
@@ -728,47 +735,77 @@ static int bos_desc(struct usb_composite_dev *cdev)
/* The SuperSpeedPlus USB Device Capability descriptor */
if (gadget_is_superspeed_plus(cdev->gadget)) {
struct usb_ssp_cap_descriptor *ssp_cap;
+ u8 ssac = 1;
+ u8 ssic;
+ int i;
- ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
- bos->bNumDeviceCaps++;
+ if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x2)
+ ssac = 3;
/*
- * Report typical values.
+ * Paired RX and TX sublink speed attributes share
+ * the same SSID.
*/
+ ssic = (ssac + 1) / 2 - 1;
+
+ ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
- le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1));
- ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(ssac));
+ ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
ssp_cap->bReserved = 0;
ssp_cap->wReserved = 0;
- /* SSAC = 1 (2 attributes) */
- ssp_cap->bmAttributes = cpu_to_le32(1);
+ ssp_cap->bmAttributes =
+ cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic));
- /* Min RX/TX Lane Count = 1 */
ssp_cap->wFunctionalitySupport =
- cpu_to_le16((1 << 8) | (1 << 12));
+ cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID, 0) |
+ FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) |
+ FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1));
/*
- * bmSublinkSpeedAttr[0]:
- * ST = Symmetric, RX
- * LSE = 3 (Gbps)
- * LP = 1 (SuperSpeedPlus)
- * LSM = 10 (10 Gbps)
+ * Use 1 SSID if the gadget supports up to gen2x1 or not
+ * specified:
+ * - SSID 0 for symmetric RX/TX sublink speed of 10 Gbps.
+ *
+ * Use 1 SSID if the gadget supports up to gen1x2:
+ * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
+ *
+ * Use 2 SSIDs if the gadget supports up to gen2x2:
+ * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
+ * - SSID 1 for symmetric RX/TX sublink speed of 10 Gbps.
*/
- ssp_cap->bmSublinkSpeedAttr[0] =
- cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
- /*
- * bmSublinkSpeedAttr[1] =
- * ST = Symmetric, TX
- * LSE = 3 (Gbps)
- * LP = 1 (SuperSpeedPlus)
- * LSM = 10 (10 Gbps)
- */
- ssp_cap->bmSublinkSpeedAttr[1] =
- cpu_to_le32((3 << 4) | (1 << 14) |
- (0xa << 16) | (1 << 7));
+ for (i = 0; i < ssac + 1; i++) {
+ u8 ssid;
+ u8 mantissa;
+ u8 type;
+
+ ssid = i >> 1;
+
+ if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x1 ||
+ cdev->gadget->max_ssp_rate == USB_SSP_GEN_UNKNOWN)
+ mantissa = 10;
+ else
+ mantissa = 5 << ssid;
+
+ if (i % 2)
+ type = USB_SSP_SUBLINK_SPEED_ST_SYM_TX;
+ else
+ type = USB_SSP_SUBLINK_SPEED_ST_SYM_RX;
+
+ ssp_cap->bmSublinkSpeedAttr[i] =
+ cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE,
+ USB_SSP_SUBLINK_SPEED_LSE_GBPS) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST, type) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP,
+ USB_SSP_SUBLINK_SPEED_LP_SSP) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, mantissa));
+ }
}
return le16_to_cpu(bos->wTotalLength);
@@ -2036,7 +2073,7 @@ done:
return value;
}
-void composite_disconnect(struct usb_gadget *gadget)
+static void __composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
@@ -2053,6 +2090,23 @@ void composite_disconnect(struct usb_gadget *gadget)
spin_unlock_irqrestore(&cdev->lock, flags);
}
+void composite_disconnect(struct usb_gadget *gadget)
+{
+ usb_gadget_vbus_draw(gadget, 0);
+ __composite_disconnect(gadget);
+}
+
+void composite_reset(struct usb_gadget *gadget)
+{
+ /*
+ * Section 1.4.13 Standard Downstream Port of the USB battery charging
+ * specification v1.2 states that a device connected on a SDP shall only
+ * draw at max 100mA while in a connected, but unconfigured state.
+ */
+ usb_gadget_vbus_draw(gadget, 100);
+ __composite_disconnect(gadget);
+}
+
/*-------------------------------------------------------------------------*/
static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
@@ -2373,7 +2427,7 @@ static const struct usb_gadget_driver composite_driver_template = {
.unbind = composite_unbind,
.setup = composite_setup,
- .reset = composite_disconnect,
+ .reset = composite_reset,
.disconnect = composite_disconnect,
.suspend = composite_suspend,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..0d56f33d63c2 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
- return sprintf(page, "%s\n", udc_name ?: "");
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
+
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1481,6 +1488,28 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget)
spin_unlock_irqrestore(&gi->spinlock, flags);
}
+static void configfs_composite_reset(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_reset(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
static void configfs_composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
@@ -1530,13 +1559,13 @@ static const struct usb_gadget_driver configfs_driver_template = {
.unbind = configfs_composite_unbind,
.setup = configfs_composite_setup,
- .reset = configfs_composite_disconnect,
+ .reset = configfs_composite_reset,
.disconnect = configfs_composite_disconnect,
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1605,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 8fff995b8dd5..71a1a26e85c7 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -87,7 +87,7 @@ struct f_midi {
struct snd_rawmidi_substream *out_substream[MAX_PORTS];
unsigned long out_triggered;
- struct tasklet_struct tasklet;
+ struct work_struct work;
unsigned int in_ports;
unsigned int out_ports;
int index;
@@ -698,9 +698,11 @@ drop_out:
f_midi_drop_out_substreams(midi);
}
-static void f_midi_in_tasklet(struct tasklet_struct *t)
+static void f_midi_in_work(struct work_struct *work)
{
- struct f_midi *midi = from_tasklet(midi, t, tasklet);
+ struct f_midi *midi;
+
+ midi = container_of(work, struct f_midi, work);
f_midi_transmit(midi);
}
@@ -737,7 +739,7 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
VDBG(midi, "%s() %d\n", __func__, up);
midi->in_ports_array[substream->number].active = up;
if (up)
- tasklet_hi_schedule(&midi->tasklet);
+ queue_work(system_highpri_wq, &midi->work);
}
static int f_midi_out_open(struct snd_rawmidi_substream *substream)
@@ -875,7 +877,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0;
midi->gadget = cdev->gadget;
- tasklet_setup(&midi->tasklet, f_midi_in_tasklet);
+ INIT_WORK(&midi->work, f_midi_in_work);
status = f_midi_register_card(midi);
if (status < 0)
goto fail_register;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..61ce8e68f7a3 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -51,6 +51,8 @@
#define GET_PORT_STATUS 1
#define SOFT_RESET 2
+#define DEFAULT_Q_LEN 10 /* same as legacy g_printer gadget */
+
static int major, minors;
static struct class *usb_gadget_class;
static DEFINE_IDA(printer_ida);
@@ -1162,6 +1164,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
@@ -1363,6 +1366,9 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
opts->func_inst.free_func_inst = gprinter_free_inst;
ret = &opts->func_inst;
+ /* Make sure q_len is initialized, otherwise the bound device can't support read/write! */
+ opts->q_len = DEFAULT_Q_LEN;
+
mutex_lock(&printer_ida_lock);
if (ida_is_empty(&printer_ida)) {
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index e6d32c536781..265c4d805f81 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -23,11 +23,6 @@
#define PRD_SIZE_MAX PAGE_SIZE
#define MIN_PERIODS 4
-struct uac_req {
- struct uac_rtd_params *pp; /* parent param */
- struct usb_request *req;
-};
-
/* Runtime data params for one stream */
struct uac_rtd_params {
struct snd_uac_chip *uac; /* parent chip */
@@ -41,9 +36,8 @@ struct uac_rtd_params {
void *rbuf;
unsigned int max_psize; /* MaxPacketSize of endpoint */
- struct uac_req *ureq;
- spinlock_t lock;
+ struct usb_request **reqs;
};
struct snd_uac_chip {
@@ -79,17 +73,20 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned int pending;
- unsigned long flags, flags2;
unsigned int hw_ptr;
int status = req->status;
- struct uac_req *ur = req->context;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- struct uac_rtd_params *prm = ur->pp;
+ struct uac_rtd_params *prm = req->context;
struct snd_uac_chip *uac = prm->uac;
/* i/f shutting down */
- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+ if (!prm->ep_enabled) {
+ usb_ep_free_request(ep, req);
+ return;
+ }
+
+ if (req->status == -ESHUTDOWN)
return;
/*
@@ -106,16 +103,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
if (!substream)
goto exit;
- snd_pcm_stream_lock_irqsave(substream, flags2);
+ snd_pcm_stream_lock(substream);
runtime = substream->runtime;
if (!runtime || !snd_pcm_running(substream)) {
- snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ snd_pcm_stream_unlock(substream);
goto exit;
}
- spin_lock_irqsave(&prm->lock, flags);
-
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* For each IN packet, take the quotient of the current data
@@ -142,8 +137,6 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
hw_ptr = prm->hw_ptr;
- spin_unlock_irqrestore(&prm->lock, flags);
-
/* Pack USB load in ALSA ring buffer */
pending = runtime->dma_bytes - hw_ptr;
@@ -167,12 +160,10 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
}
}
- spin_lock_irqsave(&prm->lock, flags);
/* update hw_ptr after data is copied to memory */
prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
hw_ptr = prm->hw_ptr;
- spin_unlock_irqrestore(&prm->lock, flags);
- snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ snd_pcm_stream_unlock(substream);
if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
snd_pcm_period_elapsed(substream);
@@ -188,7 +179,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct uac_rtd_params *prm;
struct g_audio *audio_dev;
struct uac_params *params;
- unsigned long flags;
int err = 0;
audio_dev = uac->audio_dev;
@@ -199,8 +189,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
else
prm = &uac->c_prm;
- spin_lock_irqsave(&prm->lock, flags);
-
/* Reset */
prm->hw_ptr = 0;
@@ -217,8 +205,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
err = -EINVAL;
}
- spin_unlock_irqrestore(&prm->lock, flags);
-
/* Clear buffer after Play stops */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
memset(prm->rbuf, 0, prm->max_psize * params->req_number);
@@ -239,6 +225,25 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
+static u64 uac_ssize_to_fmt(int ssize)
+{
+ u64 ret;
+
+ switch (ssize) {
+ case 3:
+ ret = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ ret = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ ret = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+
+ return ret;
+}
+
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -262,36 +267,14 @@ static int uac_pcm_open(struct snd_pcm_substream *substream)
runtime->hw = uac_pcm_hardware;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- spin_lock_init(&uac->p_prm.lock);
runtime->hw.rate_min = p_srate;
- switch (p_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
+ runtime->hw.formats = uac_ssize_to_fmt(p_ssize);
runtime->hw.channels_min = num_channels(p_chmask);
runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize
/ runtime->hw.periods_min;
} else {
- spin_lock_init(&uac->c_prm.lock);
runtime->hw.rate_min = c_srate;
- switch (c_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
+ runtime->hw.formats = uac_ssize_to_fmt(c_ssize);
runtime->hw.channels_min = num_channels(c_chmask);
runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize
/ runtime->hw.periods_min;
@@ -335,10 +318,16 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
params = &audio_dev->params;
for (i = 0; i < params->req_number; i++) {
- if (prm->ureq[i].req) {
- usb_ep_dequeue(ep, prm->ureq[i].req);
- usb_ep_free_request(ep, prm->ureq[i].req);
- prm->ureq[i].req = NULL;
+ if (prm->reqs[i]) {
+ if (usb_ep_dequeue(ep, prm->reqs[i]))
+ usb_ep_free_request(ep, prm->reqs[i]);
+ /*
+ * If usb_ep_dequeue() cannot successfully dequeue the
+ * request, the request will be freed by the completion
+ * callback.
+ */
+
+ prm->reqs[i] = NULL;
}
}
@@ -367,22 +356,21 @@ int u_audio_start_capture(struct g_audio *audio_dev)
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
- if (!prm->ureq[i].req) {
+ if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
+ prm->reqs[i] = req;
req->zero = 0;
- req->context = &prm->ureq[i];
+ req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
@@ -445,22 +433,21 @@ int u_audio_start_playback(struct g_audio *audio_dev)
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
- if (!prm->ureq[i].req) {
+ if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
+ prm->reqs[i] = req;
req->zero = 0;
- req->context = &prm->ureq[i];
+ req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
@@ -505,9 +492,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
uac->c_prm.uac = uac;
prm->max_psize = g_audio->out_ep_maxpsize;
- prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
- GFP_KERNEL);
- if (!prm->ureq) {
+ prm->reqs = kcalloc(params->req_number,
+ sizeof(struct usb_request *),
+ GFP_KERNEL);
+ if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
@@ -527,9 +515,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
uac->p_prm.uac = uac;
prm->max_psize = g_audio->in_ep_maxpsize;
- prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
- GFP_KERNEL);
- if (!prm->ureq) {
+ prm->reqs = kcalloc(params->req_number,
+ sizeof(struct usb_request *),
+ GFP_KERNEL);
+ if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
@@ -582,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
snd_fail:
snd_card_free(card);
fail:
- kfree(uac->p_prm.ureq);
- kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.reqs);
+ kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
@@ -605,8 +594,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
if (card)
snd_card_free(card);
- kfree(uac->p_prm.ureq);
- kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.reqs);
+ kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..d1d044d9f859 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -79,6 +80,7 @@ struct eth_dev {
bool zlp;
bool no_skb_reserve;
+ bool ifname_set;
u8 host_mac[ETH_ALEN];
u8 dev_mac[ETH_ALEN];
};
@@ -786,7 +788,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
@@ -1003,15 +1005,45 @@ EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len)
{
+ struct eth_dev *dev = netdev_priv(net);
int ret;
rtnl_lock();
- ret = scnprintf(name, len, "%s\n", netdev_name(net));
+ ret = scnprintf(name, len, "%s\n",
+ dev->ifname_set ? net->name : netdev_name(net));
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(gether_get_ifname);
+int gether_set_ifname(struct net_device *net, const char *name, int len)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ char tmp[IFNAMSIZ];
+ const char *p;
+
+ if (name[len - 1] == '\n')
+ len--;
+
+ if (len >= sizeof(tmp))
+ return -E2BIG;
+
+ strscpy(tmp, name, len + 1);
+ if (!dev_valid_name(tmp))
+ return -EINVAL;
+
+ /* Require exactly one %d, so binding will not fail with EEXIST. */
+ p = strchr(name, '%');
+ if (!p || p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
+ strncpy(net->name, tmp, sizeof(net->name));
+ dev->ifname_set = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gether_set_ifname);
+
/*
* gether_cleanup - remove Ethernet-over-USB device
* Context: may sleep
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 10dd640684e2..40144546d1b0 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -244,6 +244,18 @@ unsigned gether_get_qmult(struct net_device *net);
*/
int gether_get_ifname(struct net_device *net, char *name, int len);
+/**
+ * gether_set_ifname - set an ethernet-over-usb link interface name
+ * @net: device representing this link
+ * @name: new interface name
+ * @len: length of @name
+ *
+ * This sets the interface name of this ethernet-over-usb link.
+ * A single terminating newline, if any, is ignored.
+ * Returns zero on success, else negative errno.
+ */
+int gether_set_ifname(struct net_device *net, const char *name, int len);
+
void gether_cleanup(struct eth_dev *dev);
/* connect/disconnect is handled by individual functions */
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index bd92b5703013..3dfb460908fa 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -148,7 +148,20 @@ out: \
return ret; \
} \
\
- CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
+ static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
+ const char *page, size_t len)\
+ { \
+ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
+ int ret = -EBUSY; \
+ \
+ mutex_lock(&opts->lock); \
+ if (!opts->refcnt) \
+ ret = gether_set_ifname(opts->net, page, len); \
+ mutex_unlock(&opts->lock); \
+ return ret ?: len; \
+ } \
+ \
+ CONFIGFS_ATTR(_f_##_opts_, ifname)
#define USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(_f_, _n_) \
static ssize_t _f_##_opts_##_n_##_show(struct config_item *item,\
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 2caccbb6e014..1e59204ec7aa 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -258,9 +258,7 @@ __acquires(&port->port_lock)
list_del(&req->list);
req->zero = kfifo_is_empty(&port->port_write_buf);
- pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
- port->port_num, len, *((u8 *)req->buf),
- *((u8 *)req->buf+1), *((u8 *)req->buf+2));
+ pr_vdebug("ttyGS%d: tx len=%d, %3ph ...\n", port->port_num, len, req->buf);
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
@@ -346,7 +344,7 @@ __acquires(&port->port_lock)
}
/*
- * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * RX work takes data out of the RX queue and hands it up to the TTY
* layer until it refuses to take any more data (or is throttled back).
* Then it issues reads for any further data.
*
@@ -709,7 +707,7 @@ raced_with_open:
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
- * let the push tasklet fire again until we're re-opened.
+ * let the push async work fire again until we're re-opened.
*/
if (gser == NULL)
kfifo_free(&port->port_write_buf);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index f02c38b32a2b..11dd6e8adc8a 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -515,10 +515,15 @@ config USB_G_WEBCAM
config USB_RAW_GADGET
tristate "USB Raw Gadget"
help
- USB Raw Gadget is a kernel module that provides a userspace interface
- for the USB Gadget subsystem. Essentially it allows to emulate USB
- devices from userspace. See Documentation/usb/raw-gadget.rst for
- details.
+ USB Raw Gadget is a gadget driver that gives userspace low-level
+ control over the gadget's communication process.
+
+ Like any other gadget driver, Raw Gadget implements USB devices via
+ the USB gadget API. Unlike most gadget drivers, Raw Gadget does not
+ implement any concrete USB functions itself but requires userspace
+ to do that.
+
+ See Documentation/usb/raw-gadget.rst for details.
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "raw_gadget".
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 30313b233680..99c7fc0d1d59 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 062dfac30399..c5a2c734234a 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -3,7 +3,8 @@
* USB Raw Gadget driver.
* See Documentation/usb/raw-gadget.rst for more details.
*
- * Andrey Konovalov <andreyknvl@gmail.com>
+ * Copyright (c) 2020 Google, Inc.
+ * Author: Andrey Konovalov <andreyknvl@gmail.com>
*/
#include <linux/compiler.h>
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 0bd6b20435b8..02d8bfae58fb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 6497185ec4e7..5c7dea5e0ff1 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -46,8 +46,8 @@
* - Make vid/did overridable
* - make it look like usb1 if usb1 mode forced
*/
-#define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
-#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
+#define KERNEL_REL bin2bcd(LINUX_VERSION_MAJOR)
+#define KERNEL_VER bin2bcd(LINUX_VERSION_PATCHLEVEL)
enum {
AST_VHUB_STR_INDEX_MAX = 4,
@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 3e88c7670b2e..8bedb7f64eba 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -11,14 +11,3 @@ config USB_BDC_UDC
Say "y" here to link the driver statically, or "m" to build a dynamically
linked module called "bdc".
-
-if USB_BDC_UDC
-
-comment "Platform Support"
-config USB_BDC_PCI
- tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
- default USB_BDC_UDC
- help
- Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
-endif
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
index 52cb5ea48bbe..1b21c9518efc 100644
--- a/drivers/usb/gadget/udc/bdc/Makefile
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -5,5 +5,3 @@ bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
ifneq ($(CONFIG_USB_GADGET_VERBOSE),)
bdc-y += bdc_dbg.o
endif
-
-obj-$(CONFIG_USB_BDC_PCI) += bdc_pci.o
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index ac75e25c3b6a..8d00b1239f21 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc.h - header for the BRCM BDC USB3.0 device controller
*
@@ -35,7 +35,7 @@
/*
* Maximum size of ep0 response buffer for ch9 requests,
* the set_sel request uses 6 so far, the max.
-*/
+ */
#define EP0_RESPONSE_BUFF 6
/* Start with SS as default */
#define EP0_MAX_PKT_SIZE 512
@@ -86,23 +86,23 @@
#define BDC_EPSTS5 0x74
#define BDC_EPSTS6 0x78
#define BDC_EPSTS7 0x7c
-#define BDC_SRRBAL(n) (0x200 + (n * 0x10))
-#define BDC_SRRBAH(n) (0x204 + (n * 0x10))
-#define BDC_SRRINT(n) (0x208 + (n * 0x10))
-#define BDC_INTCTLS(n) (0x20c + (n * 0x10))
+#define BDC_SRRBAL(n) (0x200 + ((n) * 0x10))
+#define BDC_SRRBAH(n) (0x204 + ((n) * 0x10))
+#define BDC_SRRINT(n) (0x208 + ((n) * 0x10))
+#define BDC_INTCTLS(n) (0x20c + ((n) * 0x10))
/* Extended capability regs */
#define BDC_FSCNOC 0xcd4
#define BDC_FSCNIC 0xce4
-#define NUM_NCS(p) (p >> 28)
+#define NUM_NCS(p) ((p) >> 28)
/* Register bit fields and Masks */
/* BDC Configuration 0 */
#define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8)
-#define BDC_SPB(p) (p & 0x7)
+#define BDC_SPB(p) ((p) & 0x7)
/* BDC Capability1 */
-#define BDC_P64 (1 << 0)
+#define BDC_P64 BIT(0)
/* BDC Command register */
#define BDC_CMD_FH 0xe
@@ -111,9 +111,9 @@
#define BDC_CMD_BLA 0x3
#define BDC_CMD_EPC 0x2
#define BDC_CMD_DVC 0x1
-#define BDC_CMD_CWS (0x1 << 5)
+#define BDC_CMD_CWS BIT(5)
#define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6)
-#define BDC_CMD_EPN(p) ((p & 0x1f) << 10)
+#define BDC_CMD_EPN(p) (((p) & 0x1f) << 10)
#define BDC_SUB_CMD_ADD (0x1 << 17)
#define BDC_SUB_CMD_FWK (0x4 << 17)
/* Reset sequence number */
@@ -124,7 +124,7 @@
#define BDC_SUB_CMD_EP_STP (0x2 << 17)
#define BDC_SUB_CMD_EP_STL (0x4 << 17)
#define BDC_SUB_CMD_EP_RST (0x1 << 17)
-#define BDC_CMD_SRD (1 << 27)
+#define BDC_CMD_SRD BIT(27)
/* CMD completion status */
#define BDC_CMDS_SUCC 0x1
@@ -141,19 +141,19 @@
#define EPM_SHIFT 4
/* BDC USPSC */
-#define BDC_VBC (1 << 31)
-#define BDC_PRC (1 << 30)
-#define BDC_PCE (1 << 29)
-#define BDC_CFC (1 << 28)
-#define BDC_PCC (1 << 27)
-#define BDC_PSC (1 << 26)
-#define BDC_VBS (1 << 25)
-#define BDC_PRS (1 << 24)
-#define BDC_PCS (1 << 23)
+#define BDC_VBC BIT(31)
+#define BDC_PRC BIT(30)
+#define BDC_PCE BIT(29)
+#define BDC_CFC BIT(28)
+#define BDC_PCC BIT(27)
+#define BDC_PSC BIT(26)
+#define BDC_VBS BIT(25)
+#define BDC_PRS BIT(24)
+#define BDC_PCS BIT(23)
#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
-#define BDC_SCN (1 << 8)
-#define BDC_SDC (1 << 7)
-#define BDC_SWS (1 << 4)
+#define BDC_SCN BIT(8)
+#define BDC_SDC BIT(7)
+#define BDC_SWS BIT(4)
#define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf)
#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
@@ -163,21 +163,21 @@
#define BDC_SPEED_HS 0x3
#define BDC_SPEED_SS 0x4
-#define BDC_PST(p) (p & 0xf)
+#define BDC_PST(p) ((p) & 0xf)
#define BDC_PST_MASK 0xf
/* USPPMS */
-#define BDC_U2E (0x1 << 31)
-#define BDC_U1E (0x1 << 30)
-#define BDC_U2A (0x1 << 29)
-#define BDC_PORT_W1S (0x1 << 17)
+#define BDC_U2E BIT(31)
+#define BDC_U1E BIT(30)
+#define BDC_U2A BIT(29)
+#define BDC_PORT_W1S BIT(17)
#define BDC_U1T(p) ((p) & 0xff)
#define BDC_U2T(p) (((p) & 0xff) << 8)
#define BDC_U1T_MASK 0xff
/* USBPM2 */
/* Hardware LPM Enable */
-#define BDC_HLE (1 << 16)
+#define BDC_HLE BIT(16)
/* BDC Status and Control */
#define BDC_COP_RST (1 << 29)
@@ -186,11 +186,11 @@
#define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP)
-#define BDC_COS (1 << 28)
+#define BDC_COS BIT(28)
#define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20)
-#define BDC_MASK_MCW (1 << 7)
-#define BDC_GIE (1 << 1)
-#define BDC_GIP (1 << 0)
+#define BDC_MASK_MCW BIT(7)
+#define BDC_GIE BIT(1)
+#define BDC_GIP BIT(0)
#define BDC_HLT 1
#define BDC_NOR 2
@@ -201,19 +201,19 @@
#define BD_CHAIN 0xf
#define BD_TFS_SHIFT 4
-#define BD_SOT (1 << 26)
-#define BD_EOT (1 << 27)
-#define BD_ISP (1 << 29)
-#define BD_IOC (1 << 30)
-#define BD_SBF (1 << 31)
+#define BD_SOT BIT(26)
+#define BD_EOT BIT(27)
+#define BD_ISP BIT(29)
+#define BD_IOC BIT(30)
+#define BD_SBF BIT(31)
#define BD_INTR_TARGET(p) (((p) & 0x1f) << 27)
-#define BDC_SRR_RWS (1 << 4)
-#define BDC_SRR_RST (1 << 3)
-#define BDC_SRR_ISR (1 << 2)
-#define BDC_SRR_IE (1 << 1)
-#define BDC_SRR_IP (1 << 0)
+#define BDC_SRR_RWS BIT(4)
+#define BDC_SRR_RST BIT(3)
+#define BDC_SRR_ISR BIT(2)
+#define BDC_SRR_IE BIT(1)
+#define BDC_SRR_IP BIT(0)
#define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24)
#define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
#define BDC_SRR_DPI_MASK 0x00ff0000
@@ -221,14 +221,14 @@
#define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT)
/* Control transfer BD specific fields */
-#define BD_DIR_IN (1 << 25)
+#define BD_DIR_IN BIT(25)
#define BDC_PTC_MASK 0xf0000000
/* status report defines */
#define SR_XSF 0
#define SR_USPC 4
-#define SR_BD_LEN(p) (p & 0xffffff)
+#define SR_BD_LEN(p) ((p) & 0xffffff)
#define XSF_SUCC 0x1
#define XSF_SHORT 0x3
@@ -241,13 +241,13 @@
/* Transfer BD fields */
#define BD_LEN(p) ((p) & 0x1ffff)
-#define BD_LTF (1 << 25)
+#define BD_LTF BIT(25)
#define BD_TYPE_DS 0x1
#define BD_TYPE_SS 0x2
-#define BDC_EP_ENABLED (1 << 0)
-#define BDC_EP_STALL (1 << 1)
-#define BDC_EP_STOP (1 << 2)
+#define BDC_EP_ENABLED BIT(0)
+#define BDC_EP_STALL BIT(1)
+#define BDC_EP_STOP BIT(2)
/* One BD can transfer max 65536 bytes */
#define BD_MAX_BUFF_SIZE (1 << 16)
@@ -266,9 +266,9 @@
/* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */
#define BDC_TNOTIFY 2500 /*in ms*/
/* Devstatus bitfields */
-#define REMOTE_WAKEUP_ISSUED (1 << 16)
-#define DEVICE_SUSPENDED (1 << 17)
-#define FUNC_WAKE_ISSUED (1 << 18)
+#define REMOTE_WAKEUP_ISSUED BIT(16)
+#define DEVICE_SUSPENDED BIT(17)
+#define FUNC_WAKE_ISSUED BIT(18)
#define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP)
/* On disconnect, preserve these bits and clear rest */
@@ -466,24 +466,24 @@ static inline void bdc_writel(void __iomem *base, u32 offset, u32 value)
}
/* Buffer descriptor list operations */
-void bdc_notify_xfr(struct bdc *, u32);
-void bdc_softconn(struct bdc *);
-void bdc_softdisconn(struct bdc *);
-int bdc_run(struct bdc *);
-int bdc_stop(struct bdc *);
-int bdc_reset(struct bdc *);
-int bdc_udc_init(struct bdc *);
-void bdc_udc_exit(struct bdc *);
-int bdc_reinit(struct bdc *);
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum);
+void bdc_softconn(struct bdc *bdc);
+void bdc_softdisconn(struct bdc *bdc);
+int bdc_run(struct bdc *bdc);
+int bdc_stop(struct bdc *bdc);
+int bdc_reset(struct bdc *bdc);
+int bdc_udc_init(struct bdc *bdc);
+void bdc_udc_exit(struct bdc *bdc);
+int bdc_reinit(struct bdc *bdc);
/* Status report handlers */
/* Upstream port status change sr */
-void bdc_sr_uspc(struct bdc *, struct bdc_sr *);
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport);
/* transfer sr */
-void bdc_sr_xsf(struct bdc *, struct bdc_sr *);
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport);
/* EP0 XSF handlers */
-void bdc_xsf_ep0_setup_recv(struct bdc *, struct bdc_sr *);
-void bdc_xsf_ep0_data_start(struct bdc *, struct bdc_sr *);
-void bdc_xsf_ep0_status_start(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport);
#endif /* __LINUX_BDC_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 44c2a5eef785..995f79c79f96 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -163,7 +163,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
usb_endpoint_xfer_isoc(desc)) {
param2 |= si;
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
- mul = comp_desc->bmAttributes;
+ mul = comp_desc->bmAttributes;
}
param2 |= mul << EPM_SHIFT;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
index 29cc988a671a..533ad52fa6d0 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_cmd.h - header for the BDC debug functions
*
@@ -10,15 +10,14 @@
#define __LINUX_BDC_CMD_H__
/* Command operations */
-int bdc_address_device(struct bdc *, u32);
-int bdc_config_ep(struct bdc *, struct bdc_ep *);
-int bdc_dconfig_ep(struct bdc *, struct bdc_ep *);
-int bdc_stop_ep(struct bdc *, int);
-int bdc_ep_set_stall(struct bdc *, int);
-int bdc_ep_clear_stall(struct bdc *, int);
-int bdc_ep_set_halt(struct bdc_ep *, u32 , int);
-int bdc_ep_bla(struct bdc *, struct bdc_ep *, dma_addr_t);
-int bdc_function_wake(struct bdc*, u8);
-int bdc_function_wake_fh(struct bdc*, u8);
+int bdc_address_device(struct bdc *bdc, u32 add);
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_stop_ep(struct bdc *bdc, int epnum);
+int bdc_ep_set_stall(struct bdc *bdc, int epnum);
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum);
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr);
+int bdc_function_wake(struct bdc *bdc, u8 intf);
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf);
#endif /* __LINUX_BDC_CMD_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
index 7ba7448ad743..9c03e13308ca 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -68,7 +68,7 @@ void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
sr = bdc->srr.sr_bds;
addr = bdc->srr.dma_addr;
- dev_vdbg(bdc->dev, "bdc_dbg_srr sr:%p dqp_index:%d\n",
+ dev_vdbg(bdc->dev, "%s sr:%p dqp_index:%d\n", __func__,
sr, bdc->srr.dqp_index);
for (i = 0; i < NUM_SR_ENTRIES; i++) {
sr = &bdc->srr.sr_bds[i];
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
index 373d5abffbb8..acd8332f8584 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_dbg.h - header for the BDC debug functions
*
@@ -12,10 +12,10 @@
#include "bdc.h"
#ifdef CONFIG_USB_GADGET_VERBOSE
-void bdc_dbg_bd_list(struct bdc *, struct bdc_ep*);
-void bdc_dbg_srr(struct bdc *, u32);
-void bdc_dbg_regs(struct bdc *);
-void bdc_dump_epsts(struct bdc *);
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep);
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num);
+void bdc_dbg_regs(struct bdc *bdc);
+void bdc_dump_epsts(struct bdc *bdc);
#else
static inline void bdc_dbg_regs(struct bdc *bdc)
{ }
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index fafdc9fdb4a5..8e2f20b12519 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -68,7 +68,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
* check if the bd_table struct is allocated ?
* if yes, then check if bd memory has been allocated, then
* free the dma_pool and also the bd_table struct memory
- */
+ */
bd_table = bd_list->bd_table_array[index];
dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
if (!bd_table) {
@@ -147,7 +147,7 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
/* Allocate memory for each table */
for (index = 0; index < num_tabs; index++) {
/* Allocate memory for bd_table structure */
- bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
+ bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC);
if (!bd_table)
goto fail;
@@ -275,7 +275,7 @@ static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
end_bdi = next_hwd_bdi - 1;
if (end_bdi < 0)
end_bdi = ep->bd_list.max_bdi - 1;
- else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
+ else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
end_bdi--;
return end_bdi;
@@ -756,7 +756,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
__func__, ep->name, start_bdi, end_bdi);
- dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
+ dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
ep, (void *)ep->usb_ep.desc);
/* if still connected, stop the ep to see where the HW is ? */
if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
@@ -795,7 +795,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
start_pending = true;
end_pending = true;
} else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
- end_pending = true;
+ end_pending = true;
}
} else {
if (start_bdi >= curr_hw_dqpi) {
@@ -1405,7 +1405,7 @@ static int ep0_set_sel(struct bdc *bdc,
}
/*
- * Queue a 0 byte bd only if wLength is more than the length and and length is
+ * Queue a 0 byte bd only if wLength is more than the length and length is
* a multiple of MaxPacket then queue 0 byte BD
*/
static int ep0_queue_zlp(struct bdc *bdc)
@@ -1858,12 +1858,12 @@ static int bdc_gadget_ep_enable(struct usb_ep *_ep,
int ret;
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
- pr_debug("bdc_gadget_ep_enable invalid parameters\n");
+ pr_debug("%s invalid parameters\n", __func__);
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
- pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
+ pr_debug("%s missing wMaxPacketSize\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
index a37ff8033b4f..4d3affd07a65 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_ep.h - header for the BDC debug functions
*
@@ -9,9 +9,9 @@
#ifndef __LINUX_BDC_EP_H__
#define __LINUX_BDC_EP_H__
-int bdc_init_ep(struct bdc *);
-int bdc_ep_disable(struct bdc_ep *);
-int bdc_ep_enable(struct bdc_ep *);
-void bdc_free_ep(struct bdc *);
+int bdc_init_ep(struct bdc *bdc);
+int bdc_ep_disable(struct bdc_ep *ep);
+int bdc_ep_enable(struct bdc_ep *ep);
+void bdc_free_ep(struct bdc *bdc);
#endif /* __LINUX_BDC_EP_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
deleted file mode 100644
index 6dbc489513cd..000000000000
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
- *
- * Copyright (C) 2014 Broadcom Corporation
- *
- * Author: Ashwini Pahuja
- *
- * Based on drivers under drivers/usb/
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/platform_device.h>
-
-#include "bdc.h"
-
-#define BDC_PCI_PID 0x1570
-
-struct bdc_pci {
- struct device *dev;
- struct platform_device *bdc;
-};
-
-static int bdc_setup_msi(struct pci_dev *pci)
-{
- int ret;
-
- ret = pci_enable_msi(pci);
- if (ret) {
- pr_err("failed to allocate MSI entry\n");
- return ret;
- }
-
- return ret;
-}
-
-static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
-{
- struct resource res[2];
- struct platform_device *bdc;
- struct bdc_pci *glue;
- int ret = -ENOMEM;
-
- glue = devm_kzalloc(&pci->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
- glue->dev = &pci->dev;
- ret = pci_enable_device(pci);
- if (ret) {
- dev_err(&pci->dev, "failed to enable pci device\n");
- return -ENODEV;
- }
- pci_set_master(pci);
-
- bdc = platform_device_alloc(BRCM_BDC_NAME, PLATFORM_DEVID_AUTO);
- if (!bdc)
- return -ENOMEM;
-
- memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
- bdc_setup_msi(pci);
-
- res[0].start = pci_resource_start(pci, 0);
- res[0].end = pci_resource_end(pci, 0);
- res[0].name = BRCM_BDC_NAME;
- res[0].flags = IORESOURCE_MEM;
-
- res[1].start = pci->irq;
- res[1].name = BRCM_BDC_NAME;
- res[1].flags = IORESOURCE_IRQ;
-
- ret = platform_device_add_resources(bdc, res, ARRAY_SIZE(res));
- if (ret) {
- dev_err(&pci->dev,
- "couldn't add resources to bdc device\n");
- platform_device_put(bdc);
- return ret;
- }
-
- pci_set_drvdata(pci, glue);
-
- dma_set_coherent_mask(&bdc->dev, pci->dev.coherent_dma_mask);
-
- bdc->dev.dma_mask = pci->dev.dma_mask;
- bdc->dev.dma_parms = pci->dev.dma_parms;
- bdc->dev.parent = &pci->dev;
- glue->bdc = bdc;
-
- ret = platform_device_add(bdc);
- if (ret) {
- dev_err(&pci->dev, "failed to register bdc device\n");
- platform_device_put(bdc);
- return ret;
- }
-
- return 0;
-}
-
-static void bdc_pci_remove(struct pci_dev *pci)
-{
- struct bdc_pci *glue = pci_get_drvdata(pci);
-
- platform_device_unregister(glue->bdc);
- pci_disable_msi(pci);
-}
-
-static struct pci_device_id bdc_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BDC_PCI_PID), },
- {} /* Terminating Entry */
-};
-
-MODULE_DEVICE_TABLE(pci, bdc_pci_id_table);
-
-static struct pci_driver bdc_pci_driver = {
- .name = "bdc-pci",
- .id_table = bdc_pci_id_table,
- .probe = bdc_pci_probe,
- .remove = bdc_pci_remove,
-};
-
-MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("BRCM BDC USB3 PCI Glue layer");
-module_pci_driver(bdc_pci_driver);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 248426a3e88a..5ac0ef88334e 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -164,7 +164,7 @@ static void bdc_func_wake_timer(struct work_struct *work)
/*
* Check if host has started transferring on endpoints
* FUNC_WAKE_ISSUED is cleared when transfer has started after resume
- */
+ */
if (bdc->devstatus & FUNC_WAKE_ISSUED) {
dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
/* flag is still set, so again send func wake */
@@ -205,7 +205,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
* if not then send function wake again every
* TNotification secs until host initiates
* transfer to BDC, USB3 spec Table 8.13
- */
+ */
schedule_delayed_work(
&bdc->func_wake_notify,
msecs_to_jiffies(BDC_TNOTIFY));
@@ -379,7 +379,7 @@ static int bdc_udc_start(struct usb_gadget *gadget,
* Run the controller from here and when BDC is connected to
* Host then driver will receive a USPC SR with VBUS present
* and then driver will do a softconnect.
- */
+ */
ret = bdc_run(bdc);
if (ret) {
dev_err(bdc->dev, "%s bdc run fail\n", __func__);
@@ -530,7 +530,7 @@ int bdc_udc_init(struct bdc *bdc)
bdc->gadget.name = BRCM_BDC_NAME;
ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
- IRQF_SHARED , BRCM_BDC_NAME, bdc);
+ IRQF_SHARED, BRCM_BDC_NAME, bdc);
if (ret) {
dev_err(bdc->dev,
"failed to request irq #%d %d\n",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..493ff93f7dda 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -29,6 +29,7 @@
* @list: for use by the udc class driver
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
+ * @started: the UDC's started state. True if the UDC had started.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
@@ -39,6 +40,7 @@ struct usb_udc {
struct device dev;
struct list_head list;
bool vbus;
+ bool started;
};
static struct class *udc_class;
@@ -659,8 +661,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
@@ -1083,7 +1084,18 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
*/
static inline int usb_gadget_udc_start(struct usb_udc *udc)
{
- return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+ int ret;
+
+ if (udc->started) {
+ dev_err(&udc->dev, "UDC had already started\n");
+ return -EBUSY;
+ }
+
+ ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+ if (!ret)
+ udc->started = true;
+
+ return ret;
}
/**
@@ -1099,7 +1111,13 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
*/
static inline void usb_gadget_udc_stop(struct usb_udc *udc)
{
+ if (!udc->started) {
+ dev_err(&udc->dev, "UDC had already stopped\n");
+ return;
+ }
+
udc->gadget->ops->udc_stop(udc->gadget);
+ udc->started = false;
}
/**
@@ -1115,12 +1133,18 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
enum usb_device_speed speed)
{
- if (udc->gadget->ops->udc_set_speed) {
- enum usb_device_speed s;
+ struct usb_gadget *gadget = udc->gadget;
+ enum usb_device_speed s;
- s = min(speed, udc->gadget->max_speed);
- udc->gadget->ops->udc_set_speed(udc->gadget, s);
- }
+ if (speed == USB_SPEED_UNKNOWN)
+ s = gadget->max_speed;
+ else
+ s = min(speed, gadget->max_speed);
+
+ if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate)
+ gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate);
+ else if (gadget->ops->udc_set_speed)
+ gadget->ops->udc_set_speed(gadget, s);
}
/**
@@ -1223,6 +1247,8 @@ int usb_add_gadget(struct usb_gadget *gadget)
udc->gadget = gadget;
gadget->udc = udc;
+ udc->started = false;
+
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
@@ -1530,10 +1556,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1573,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..57067763b100 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
- default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 6c726d2e1788..d046c09fa566 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -36,7 +36,6 @@
#include <asm/unaligned.h>
#include "amd5536udc.h"
-static void udc_tasklet_disconnect(unsigned long);
static void udc_setup_endpoints(struct udc *dev);
static void udc_soft_reset(struct udc *dev);
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
@@ -95,9 +94,6 @@ static struct timer_list udc_pollstall_timer;
static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
-/* tasklet for usb disconnect */
-static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect);
-
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
static const struct {
@@ -1637,6 +1633,8 @@ static void usb_connect(struct udc *dev)
*/
static void usb_disconnect(struct udc *dev)
{
+ u32 tmp;
+
/* Return if already disconnected */
if (!dev->connected)
return;
@@ -1648,23 +1646,6 @@ static void usb_disconnect(struct udc *dev)
/* mask interrupts */
udc_mask_unused_interrupts(dev);
- /* REVISIT there doesn't seem to be a point to having this
- * talk to a tasklet ... do it directly, we already hold
- * the spinlock needed to process the disconnect.
- */
-
- tasklet_schedule(&disconnect_tasklet);
-}
-
-/* Tasklet for disconnect to be outside of interrupt context */
-static void udc_tasklet_disconnect(unsigned long par)
-{
- struct udc *dev = udc;
- u32 tmp;
-
- DBG(dev, "Tasklet disconnect\n");
- spin_lock_irq(&dev->lock);
-
if (dev->driver) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
@@ -1673,13 +1654,10 @@ static void udc_tasklet_disconnect(unsigned long par)
/* empty queues */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
-
}
/* disable ep0 */
- ep_init(dev->regs,
- &dev->ep[UDC_EP0IN_IX]);
-
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
if (!soft_reset_occured) {
/* init controller by soft reset */
@@ -1695,8 +1673,6 @@ static void udc_tasklet_disconnect(unsigned long par)
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
}
-
- spin_unlock_irq(&dev->lock);
}
/* Reset the UDC core */
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index d5e9d20c097d..72f2ea062d55 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -843,8 +843,8 @@ static int __xudc_ep_enable(struct xusb_ep *ep,
break;
}
- ep->buffer0ready = 0;
- ep->buffer1ready = 0;
+ ep->buffer0ready = false;
+ ep->buffer1ready = false;
ep->curbufnum = 0;
ep->rambase = rambase[ep->epnumber];
xudc_epconfig(ep, udc);
@@ -868,11 +868,11 @@ static int __xudc_ep_enable(struct xusb_ep *ep,
if (ep->epnumber && !ep->is_in) {
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << ep->epnumber);
- ep->buffer0ready = 1;
+ ep->buffer0ready = true;
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
(1 << (ep->epnumber +
XUSB_STATUS_EP_BUFF2_SHIFT)));
- ep->buffer1ready = 1;
+ ep->buffer1ready = true;
}
return 0;
@@ -1954,7 +1954,7 @@ static void xudc_nonctrl_ep_handler(struct xusb_udc *udc, u8 epnum,
if (intrstatus & (XUSB_STATUS_EP0_BUFF1_COMP_MASK << epnum))
ep->buffer0ready = 0;
if (intrstatus & (XUSB_STATUS_EP0_BUFF2_COMP_MASK << epnum))
- ep->buffer1ready = 0;
+ ep->buffer1ready = false;
if (list_empty(&ep->queue))
return;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 31e59309da1f..b94f2a070c05 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -269,10 +269,14 @@ config USB_EHCI_HCD_AT91
config USB_EHCI_TEGRA
tristate "NVIDIA Tegra HCD support"
depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
+ select USB_CHIPIDEA
+ select USB_CHIPIDEA_HOST
+ select USB_CHIPIDEA_TEGRA
help
- This driver enables support for the internal USB Host Controllers
+ This option is deprecated now and the driver was removed, use
+ USB_CHIPIDEA_TEGRA instead.
+
+ Enable support for the internal USB Host Controllers
found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index c1b08703af10..3e4d298d851f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
-obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
obj-$(CONFIG_USB_EHCI_BRCMSTB) += ehci-brcm.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e358ae17d51e..1926b328b6aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 087402aec5cb..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
deleted file mode 100644
index 869d9c4de5fc..000000000000
--- a/drivers/usb/host/ehci-tegra.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
- *
- * Copyright (C) 2010 Google, Inc.
- * Copyright (C) 2009 - 2013 NVIDIA Corporation
- */
-
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/usb/ehci_def.h>
-#include <linux/usb/tegra_usb_phy.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/otg.h>
-
-#include "ehci.h"
-
-#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-
-#define TEGRA_USB_DMA_ALIGN 32
-
-#define DRIVER_DESC "Tegra EHCI driver"
-#define DRV_NAME "tegra-ehci"
-
-static struct hc_driver __read_mostly tegra_ehci_hc_driver;
-
-struct tegra_ehci_soc_config {
- bool has_hostpc;
-};
-
-struct tegra_ehci_hcd {
- struct clk *clk;
- struct reset_control *rst;
- int port_resuming;
- bool needs_double_reset;
-};
-
-static int tegra_reset_usb_controller(struct platform_device *pdev)
-{
- struct device_node *phy_np;
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tegra_ehci_hcd *tegra =
- (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
- struct reset_control *rst;
- int err;
-
- phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
- if (!phy_np)
- return -ENOENT;
-
- /*
- * The 1st USB controller contains some UTMI pad registers that are
- * global for all the controllers on the chip. Those registers are
- * also cleared when reset is asserted to the 1st controller.
- */
- rst = of_reset_control_get_shared(phy_np, "utmi-pads");
- if (IS_ERR(rst)) {
- dev_warn(&pdev->dev,
- "can't get utmi-pads reset from the PHY\n");
- dev_warn(&pdev->dev,
- "continuing, but please update your DT\n");
- } else {
- /*
- * PHY driver performs UTMI-pads reset in a case of
- * non-legacy DT.
- */
- reset_control_put(rst);
- }
-
- of_node_put(phy_np);
-
- /* reset control is shared, hence initialize it first */
- err = reset_control_deassert(tegra->rst);
- if (err)
- return err;
-
- err = reset_control_assert(tegra->rst);
- if (err)
- return err;
-
- udelay(1);
-
- err = reset_control_deassert(tegra->rst);
- if (err)
- return err;
-
- return 0;
-}
-
-static int tegra_ehci_internal_port_reset(
- struct ehci_hcd *ehci,
- u32 __iomem *portsc_reg
-)
-{
- u32 temp;
- unsigned long flags;
- int retval = 0;
- int i, tries;
- u32 saved_usbintr;
-
- spin_lock_irqsave(&ehci->lock, flags);
- saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
- /* disable USB interrupt */
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- /*
- * Here we have to do Port Reset at most twice for
- * Port Enable bit to be set.
- */
- for (i = 0; i < 2; i++) {
- temp = ehci_readl(ehci, portsc_reg);
- temp |= PORT_RESET;
- ehci_writel(ehci, temp, portsc_reg);
- mdelay(10);
- temp &= ~PORT_RESET;
- ehci_writel(ehci, temp, portsc_reg);
- mdelay(1);
- tries = 100;
- do {
- mdelay(1);
- /*
- * Up to this point, Port Enable bit is
- * expected to be set after 2 ms waiting.
- * USB1 usually takes extra 45 ms, for safety,
- * we take 100 ms as timeout.
- */
- temp = ehci_readl(ehci, portsc_reg);
- } while (!(temp & PORT_PE) && tries--);
- if (temp & PORT_PE)
- break;
- }
- if (i == 2)
- retval = -ETIMEDOUT;
-
- /*
- * Clear Connect Status Change bit if it's set.
- * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
- */
- if (temp & PORT_CSC)
- ehci_writel(ehci, PORT_CSC, portsc_reg);
-
- /*
- * Write to clear any interrupt status bits that might be set
- * during port reset.
- */
- temp = ehci_readl(ehci, &ehci->regs->status);
- ehci_writel(ehci, temp, &ehci->regs->status);
-
- /* restore original interrupt enable bits */
- ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
- return retval;
-}
-
-static int tegra_ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
- u32 __iomem *status_reg;
- u32 temp;
- unsigned long flags;
- int retval = 0;
-
- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- if (typeReq == GetPortStatus) {
- temp = ehci_readl(ehci, status_reg);
- if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
- /* Resume completed, re-enable disconnect detection */
- tegra->port_resuming = 0;
- tegra_usb_phy_postresume(hcd->usb_phy);
- }
- }
-
- else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
- temp = ehci_readl(ehci, status_reg);
- if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
- retval = -EPIPE;
- goto done;
- }
-
- temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
- temp |= PORT_WKDISC_E | PORT_WKOC_E;
- ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
-
- /*
- * If a transaction is in progress, there may be a delay in
- * suspending the port. Poll until the port is suspended.
- */
- if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
- PORT_SUSPEND, 5000))
- pr_err("%s: timeout waiting for SUSPEND\n", __func__);
-
- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
- goto done;
- }
-
- /* For USB1 port we need to issue Port Reset twice internally */
- if (tegra->needs_double_reset &&
- (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
- spin_unlock_irqrestore(&ehci->lock, flags);
- return tegra_ehci_internal_port_reset(ehci, status_reg);
- }
-
- /*
- * Tegra host controller will time the resume operation to clear the bit
- * when the port control state switches to HS or FS Idle. This behavior
- * is different from EHCI where the host controller driver is required
- * to set this bit to a zero after the resume duration is timed in the
- * driver.
- */
- else if (typeReq == ClearPortFeature &&
- wValue == USB_PORT_FEAT_SUSPEND) {
- temp = ehci_readl(ehci, status_reg);
- if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
- retval = -EPIPE;
- goto done;
- }
-
- if (!(temp & PORT_SUSPEND))
- goto done;
-
- /* Disable disconnect detection during port resume */
- tegra_usb_phy_preresume(hcd->usb_phy);
-
- ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
-
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- /* start resume signalling */
- ehci_writel(ehci, temp | PORT_RESUME, status_reg);
- set_bit(wIndex-1, &ehci->resuming_ports);
-
- spin_unlock_irqrestore(&ehci->lock, flags);
- msleep(20);
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* Poll until the controller clears RESUME and SUSPEND */
- if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
- pr_err("%s: timeout waiting for RESUME\n", __func__);
- if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
- pr_err("%s: timeout waiting for SUSPEND\n", __func__);
-
- ehci->reset_done[wIndex-1] = 0;
- clear_bit(wIndex-1, &ehci->resuming_ports);
-
- tegra->port_resuming = 1;
- goto done;
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- /* Handle the hub control events here */
- return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-
-done:
- spin_unlock_irqrestore(&ehci->lock, flags);
- return retval;
-}
-
-struct dma_aligned_buffer {
- void *kmalloc_ptr;
- void *old_xfer_buffer;
- u8 data[];
-};
-
-static void free_dma_aligned_buffer(struct urb *urb)
-{
- struct dma_aligned_buffer *temp;
- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-
- temp = container_of(urb->transfer_buffer,
- struct dma_aligned_buffer, data);
-
- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
- memcpy(temp->old_xfer_buffer, temp->data, length);
- }
- urb->transfer_buffer = temp->old_xfer_buffer;
- kfree(temp->kmalloc_ptr);
-
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
-}
-
-static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
-{
- struct dma_aligned_buffer *temp, *kmalloc_ptr;
- size_t kmalloc_size;
-
- if (urb->num_sgs || urb->sg ||
- urb->transfer_buffer_length == 0 ||
- !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
- return 0;
-
- /* Allocate a buffer with enough padding for alignment */
- kmalloc_size = urb->transfer_buffer_length +
- sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1;
-
- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
- if (!kmalloc_ptr)
- return -ENOMEM;
-
- /* Position our struct dma_aligned_buffer such that data is aligned */
- temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
- temp->kmalloc_ptr = kmalloc_ptr;
- temp->old_xfer_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
-
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-}
-
-static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- int ret;
-
- ret = alloc_dma_aligned_buffer(urb, mem_flags);
- if (ret)
- return ret;
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
- free_dma_aligned_buffer(urb);
-
- return ret;
-}
-
-static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- usb_hcd_unmap_urb_for_dma(hcd, urb);
- free_dma_aligned_buffer(urb);
-}
-
-static const struct tegra_ehci_soc_config tegra30_soc_config = {
- .has_hostpc = true,
-};
-
-static const struct tegra_ehci_soc_config tegra20_soc_config = {
- .has_hostpc = false,
-};
-
-static const struct of_device_id tegra_ehci_of_match[] = {
- { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
- { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
- { },
-};
-
-static int tegra_ehci_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- const struct tegra_ehci_soc_config *soc_config;
- struct resource *res;
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- struct tegra_ehci_hcd *tegra;
- int err = 0;
- int irq;
- struct usb_phy *u_phy;
-
- match = of_match_device(tegra_ehci_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- soc_config = match->data;
-
- /* Right now device-tree probed devices don't get dma_mask set.
- * Since shared usb code relies on it, set it here for now.
- * Once we have dma capability bindings this can go away.
- */
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return err;
-
- hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "Unable to create HCD\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, hcd);
- ehci = hcd_to_ehci(hcd);
- tegra = (struct tegra_ehci_hcd *)ehci->priv;
-
- hcd->has_tt = 1;
-
- tegra->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tegra->clk)) {
- dev_err(&pdev->dev, "Can't get ehci clock\n");
- err = PTR_ERR(tegra->clk);
- goto cleanup_hcd_create;
- }
-
- tegra->rst = devm_reset_control_get_shared(&pdev->dev, "usb");
- if (IS_ERR(tegra->rst)) {
- dev_err(&pdev->dev, "Can't get ehci reset\n");
- err = PTR_ERR(tegra->rst);
- goto cleanup_hcd_create;
- }
-
- err = clk_prepare_enable(tegra->clk);
- if (err)
- goto cleanup_hcd_create;
-
- err = tegra_reset_usb_controller(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to reset controller\n");
- goto cleanup_clk_en;
- }
-
- u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
- if (IS_ERR(u_phy)) {
- err = -EPROBE_DEFER;
- goto cleanup_clk_en;
- }
- hcd->usb_phy = u_phy;
- hcd->skip_phy_initialization = 1;
-
- tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
- "nvidia,needs-double-reset");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- err = PTR_ERR(hcd->regs);
- goto cleanup_clk_en;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- ehci->caps = hcd->regs + 0x100;
- ehci->has_hostpc = soc_config->has_hostpc;
-
- err = usb_phy_init(hcd->usb_phy);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize phy\n");
- goto cleanup_clk_en;
- }
-
- u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
- GFP_KERNEL);
- if (!u_phy->otg) {
- err = -ENOMEM;
- goto cleanup_phy;
- }
- u_phy->otg->host = hcd_to_bus(hcd);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto cleanup_phy;
- }
-
- otg_set_host(u_phy->otg, &hcd->self);
-
- err = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (err) {
- dev_err(&pdev->dev, "Failed to add USB HCD\n");
- goto cleanup_otg_set_host;
- }
- device_wakeup_enable(hcd->self.controller);
-
- return err;
-
-cleanup_otg_set_host:
- otg_set_host(u_phy->otg, NULL);
-cleanup_phy:
- usb_phy_shutdown(hcd->usb_phy);
-cleanup_clk_en:
- clk_disable_unprepare(tegra->clk);
-cleanup_hcd_create:
- usb_put_hcd(hcd);
- return err;
-}
-
-static int tegra_ehci_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tegra_ehci_hcd *tegra =
- (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
-
- usb_remove_hcd(hcd);
- otg_set_host(hcd->usb_phy->otg, NULL);
- usb_phy_shutdown(hcd->usb_phy);
- clk_disable_unprepare(tegra->clk);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-static struct platform_driver tegra_ehci_driver = {
- .probe = tegra_ehci_probe,
- .remove = tegra_ehci_remove,
- .shutdown = tegra_ehci_hcd_shutdown,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = tegra_ehci_of_match,
- }
-};
-
-static int tegra_ehci_reset(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
- int txfifothresh;
-
- retval = ehci_setup(hcd);
- if (retval)
- return retval;
-
- /*
- * We should really pull this value out of tegra_ehci_soc_config, but
- * to avoid needing access to it, make use of the fact that Tegra20 is
- * the only one so far that needs a value of 10, and Tegra20 is the
- * only one which doesn't set has_hostpc.
- */
- txfifothresh = ehci->has_hostpc ? 0x10 : 10;
- ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
-
- return 0;
-}
-
-static const struct ehci_driver_overrides tegra_overrides __initconst = {
- .extra_priv_size = sizeof(struct tegra_ehci_hcd),
- .reset = tegra_ehci_reset,
-};
-
-static int __init ehci_tegra_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- pr_info(DRV_NAME ": " DRIVER_DESC "\n");
-
- ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
-
- /*
- * The Tegra HW has some unusual quirks, which require Tegra-specific
- * workarounds. We override certain hc_driver functions here to
- * achieve that. We explicitly do not enhance ehci_driver_overrides to
- * allow this more easily, since this is an unusual case, and we don't
- * want to encourage others to override these functions by making it
- * too easy.
- */
-
- tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
- tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
- tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
-
- return platform_driver_register(&tegra_ehci_driver);
-}
-module_init(ehci_tegra_init);
-
-static void __exit ehci_tegra_cleanup(void)
-{
- platform_driver_unregister(&tegra_ehci_driver);
-}
-module_exit(ehci_tegra_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 8e19a5eb5b62..feca826d3f6a 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -236,7 +236,7 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
* Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
* the HCD's stop() method.
*/
-static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
+static void ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
@@ -244,8 +244,6 @@ static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
-
- return 0;
}
static void ohci_hcd_sa1111_shutdown(struct device *_dev)
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
index 3351d07c431f..7a4c2c4ad50e 100644
--- a/drivers/usb/host/xhci-ext-caps.c
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -54,7 +54,8 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
}
if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
- ret = platform_device_add_properties(pdev, role_switch_props);
+ ret = device_create_managed_software_node(&pdev->dev, role_switch_props,
+ NULL);
if (ret) {
dev_err(dev, "failed to register device properties\n");
platform_device_put(pdev);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3589b49b6c8b..f2c4ee7c4786 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -592,23 +592,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
return ep->ring;
}
-struct xhci_ring *xhci_stream_id_to_ring(
- struct xhci_virt_device *dev,
- unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep = &dev->eps[ep_index];
-
- if (stream_id == 0)
- return ep->ring;
- if (!ep->stream_info)
- return NULL;
-
- if (stream_id >= ep->stream_info->num_streams)
- return NULL;
- return ep->stream_info->stream_rings[stream_id];
-}
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -994,6 +977,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev)
return 0;
+ dev->slot_id = slot_id;
+
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
@@ -1012,6 +997,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
/* Initialize the cancellation list and watchdog timers for each ep */
for (i = 0; i < 31; i++) {
+ dev->eps[i].ep_index = i;
+ dev->eps[i].vdev = dev;
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 45c54d56ecbd..b45e5bf08997 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->endpoint);
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
sch_ep->bw_budget_table[j];
}
}
+ sch_ep->allocated = used;
}
static int check_sch_tt(struct usb_device *udev,
@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
return 0;
}
+static void destroy_sch_ep(struct usb_device *udev,
+ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+ /* only release ep bw check passed by check_sch_bw() */
+ if (sch_ep->allocated)
+ update_bus_bw(sch_bw, sch_ep, 0);
+
+ list_del(&sch_ep->endpoint);
+
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
+}
+
static bool need_bw_sch(struct usb_host_endpoint *ep,
enum usb_device_speed speed, int has_tt)
{
@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
-
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
-
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
return 0;
}
@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
+ destroy_sch_ep(udev, sch_bw, sch_ep);
break;
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+ destroy_sch_ep(udev, sch_bw, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 8f321f39ab96..fe010cc61f19 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index a93cfe817904..cbb09dfea62e 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
* @repeat: the time gap between two uframes that transfers are
@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
u32 ep_type;
u32 maxpkt;
void *ep;
+ bool allocated;
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
struct device *dev;
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
+ struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
int num_u2_ports;
@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
{
}
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+}
#endif
#endif /* _XHCI_MTK_H_ */
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 60651a50770f..8ca1a235d164 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -8,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct phy *phy;
+ int ret;
+
+ /* Old bindings miss the PHY handle */
+ phy = of_phy_get(dev->of_node, "usb3-phy");
+ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(phy))
+ goto phy_out;
+
+ ret = phy_init(phy);
+ if (ret)
+ goto phy_put;
+
+ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+ if (ret)
+ goto phy_exit;
+
+ ret = phy_power_on(phy);
+ if (ret == -EOPNOTSUPP) {
+ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+ dev_warn(dev, "PHY unsupported by firmware\n");
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+ }
+ if (ret)
+ goto phy_exit;
+
+ phy_power_off(phy);
+phy_exit:
+ phy_exit(phy);
+phy_put:
+ of_phy_put(phy);
+phy_out:
+
+ return 0;
+}
+
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..01bf3fcb3eca 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,6 +12,7 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
return 0;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 4d34f6005381..c1edcc9b13ce 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
priv->plat_start(hcd);
}
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->plat_setup)
+ return 0;
+
+ return priv->plat_setup(hcd);
+}
+
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .plat_setup = xhci_mvebu_a3700_plat_setup,
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+ if (priv) {
+ ret = xhci_priv_plat_setup(hcd);
+ if (ret)
+ goto disable_usb_phy;
+ }
+
+ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 1fb149d1fbce..561d0b7bce09 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..5e548a1c93ab 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -59,6 +59,10 @@
#include "xhci-trace.h"
#include "xhci-mtk.h"
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed);
+
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
@@ -151,10 +155,11 @@ static void next_trb(struct xhci_hcd *xhci,
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
- * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
+ unsigned int link_trb_count = 0;
+
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
@@ -170,14 +175,23 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
/* All other rings have link trbs */
if (!trb_is_link(ring->dequeue)) {
- ring->dequeue++;
- ring->num_trbs_free++;
+ if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+ xhci_warn(xhci, "Missing link TRB at end of segment\n");
+ } else {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
}
+
while (trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- }
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "Ring is an endless link TRB loop\n");
+ break;
+ }
+ }
out:
trace_xhci_inc_deq(ring);
@@ -186,7 +200,6 @@ out:
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
- * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
@@ -206,11 +219,18 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
{
u32 chain;
union xhci_trb *next;
+ unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
if (!trb_is_link(ring->enqueue))
ring->num_trbs_free--;
+
+ if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
+ xhci_err(xhci, "Tried to move enqueue past ring segment\n");
+ return;
+ }
+
next = ++(ring->enqueue);
/* Update the dequeue pointer further if that was a link TRB */
@@ -247,6 +267,11 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
+
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
+ break;
+ }
}
trace_xhci_inc_enq(ring);
@@ -408,9 +433,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
writel(DB_VALUE(ep_index, stream_id), db_addr);
- /* The CPU has better things to do at this point than wait for a
- * write-posting flush. It'll get there soon enough.
- */
+ /* flush the write */
+ readl(db_addr);
}
/* Ring the doorbell for any rings with pending URBs */
@@ -446,6 +470,46 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
+static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
+ xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
+ return NULL;
+ }
+ if (ep_index >= EP_CTX_PER_DEV) {
+ xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
+ return NULL;
+ }
+ if (!xhci->devs[slot_id]) {
+ xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
+ return NULL;
+ }
+
+ return &xhci->devs[slot_id]->eps[ep_index];
+}
+
+static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep,
+ unsigned int stream_id)
+{
+ /* common case, no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (!ep->stream_info)
+ return NULL;
+
+ if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
+ xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
+ stream_id, ep->vdev->slot_id, ep->ep_index);
+ return NULL;
+ }
+
+ return ep->stream_info->stream_rings[stream_id];
+}
+
/* Get the right ring for the given slot_id, ep_index and stream_id.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
@@ -456,30 +520,11 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep;
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
return NULL;
- }
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
+ return xhci_virt_ep_to_ring(xhci, ep, stream_id);
}
@@ -506,73 +551,55 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
return le64_to_cpu(ep_ctx->deq);
}
-/*
- * Move the xHC's endpoint ring dequeue pointer past cur_td.
- * Record the new state of the xHC's endpoint ring dequeue segment,
- * dequeue pointer, stream id, and new consumer cycle state in state.
- * Update our internal representation of the ring's dequeue pointer.
- *
- * We do this in three jumps:
- * - First we update our new ring state to be the same as when the xHC stopped.
- * - Then we traverse the ring to find the segment that contains
- * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
- * any link TRBs with the toggle cycle bit set.
- * - Finally we move the dequeue state one TRB further, toggling the cycle bit
- * if we've moved it past a link TRB with the toggle cycle bit set.
- *
- * Some of the uses of xhci_generic_trb are grotty, but if they're done
- * with correct __le32 accesses they should work fine. Only users of this are
- * in here.
- */
-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *cur_td,
- struct xhci_dequeue_state *state)
+static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id, struct xhci_td *td)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
+ struct xhci_command *cmd;
struct xhci_segment *new_seg;
union xhci_trb *new_deq;
+ int new_cycle;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
bool td_last_trb_found = false;
+ u32 trb_sct = 0;
+ int ret;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN can't find new dequeue state "
- "for invalid stream ID %u.\n",
- stream_id);
- return;
+ xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
+ stream_id);
+ return -ENODEV;
}
/*
* A cancelled TD can complete with a stall if HW cached the trb.
- * In this case driver can't find cur_td, but if the ring is empty we
+ * In this case driver can't find td, but if the ring is empty we
* can move the dequeue pointer to the current enqueue position.
+ * We shouldn't hit this anymore as cached cancelled TRBs are given back
+ * after clearing the cache, but be on the safe side and keep it anyway
*/
- if (!cur_td) {
+ if (!td) {
if (list_empty(&ep_ring->td_list)) {
- state->new_deq_seg = ep_ring->enq_seg;
- state->new_deq_ptr = ep_ring->enqueue;
- state->new_cycle_state = ep_ring->cycle_state;
- goto done;
+ new_seg = ep_ring->enq_seg;
+ new_deq = ep_ring->enqueue;
+ new_cycle = ep_ring->cycle_state;
+ xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
+ goto deq_found;
} else {
- xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
- return;
+ xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
+ return -EINVAL;
}
}
- /* Dig out the cycle state saved by the xHC during the stop ep cmd */
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Finding endpoint context");
-
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
- state->new_cycle_state = hw_dequeue & 0x1;
- state->stream_id = stream_id;
+ new_cycle = hw_dequeue & 0x1;
/*
* We want to find the pointer, segment and cycle state of the new trb
@@ -587,40 +614,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (td_last_trb_found)
break;
}
- if (new_deq == cur_td->last_trb)
+ if (new_deq == td->last_trb)
td_last_trb_found = true;
if (cycle_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
- state->new_cycle_state ^= 0x1;
+ new_cycle ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
xhci_err(xhci, "Error: Failed finding new dequeue state\n");
- state->new_deq_seg = NULL;
- state->new_deq_ptr = NULL;
- return;
+ return -EINVAL;
}
} while (!cycle_found || !td_last_trb_found);
- state->new_deq_seg = new_seg;
- state->new_deq_ptr = new_deq;
+deq_found:
-done:
/* Don't update the ring cycle state for the producer (us). */
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cycle state = 0x%x", state->new_cycle_state);
+ addr = xhci_trb_virt_to_dma(new_seg, new_deq);
+ if (addr == 0) {
+ xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
+ xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
+ return -EINVAL;
+ }
+
+ if ((ep->ep_state & SET_DEQ_PENDING)) {
+ xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
+ &addr);
+ return -EBUSY;
+ }
+
+ /* This function gets called from contexts where it cannot sleep */
+ cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!cmd) {
+ xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
+ return -ENOMEM;
+ }
+
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ ret = queue_command(xhci, cmd,
+ lower_32_bits(addr) | trb_sct | new_cycle,
+ upper_32_bits(addr),
+ STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
+ EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
+ if (ret < 0) {
+ xhci_free_command(xhci, cmd);
+ return ret;
+ }
+ ep->queued_deq_seg = new_seg;
+ ep->queued_deq_ptr = new_deq;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "New dequeue segment = %p (virtual)",
- state->new_deq_seg);
- addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "New dequeue pointer = 0x%llx (DMA)",
- (unsigned long long) addr);
+ "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
+
+ /* Stop the TD queueing code from ringing the doorbell until
+ * this command completes. The HC won't set the dequeue pointer
+ * if the ring is running, and ringing the doorbell starts the
+ * ring running.
+ */
+ ep->ep_state |= SET_DEQ_PENDING;
+ xhci_ring_cmd_db(xhci);
+ return 0;
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
@@ -699,159 +757,334 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
-/*
- * When we get a command completion for a Stop Endpoint Command, we need to
- * unlink any cancelled TDs from the ring. There are two ways to do that:
- *
- * 1. If the HW was in the middle of processing the TD that needs to be
- * cancelled, then we must move the ring's dequeue pointer past the last TRB
- * in the TD with a Set Dequeue Pointer Command.
- * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
- * bit cleared) so that the HW will skip over them.
- */
-static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
- union xhci_trb *trb, struct xhci_event_cmd *event)
+static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_ring *ep_ring, int status)
{
- unsigned int ep_index;
- struct xhci_ring *ep_ring;
- struct xhci_virt_ep *ep;
- struct xhci_td *cur_td = NULL;
- struct xhci_td *last_unlinked_td;
- struct xhci_ep_ctx *ep_ctx;
- struct xhci_virt_device *vdev;
- u64 hw_deq;
- struct xhci_dequeue_state deq_state;
+ struct urb *urb = NULL;
- if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
- if (!xhci->devs[slot_id])
- xhci_warn(xhci, "Stop endpoint command "
- "completion for disabled slot %u\n",
- slot_id);
- return;
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+
+ /* if a bounce buffer was used to align this td then unmap it */
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than the buffer
+ * length, urb->actual_length will be a very big number (since it's
+ * unsigned). Play it safe and say we didn't transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+ urb->transfer_buffer_length, urb->actual_length);
+ urb->actual_length = 0;
+ status = 0;
}
+ /* TD might be removed from td_list if we are giving back a cancelled URB */
+ if (!list_empty(&td->td_list))
+ list_del_init(&td->td_list);
+ /* Giving back a cancelled URB, or if a slated TD completed anyway */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
- memset(&deq_state, 0, sizeof(deq_state));
- ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ inc_td_cnt(urb);
+ /* Giveback the urb when all the tds are completed */
+ if (last_td_in_urb(td)) {
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+ (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length, status);
- vdev = xhci->devs[slot_id];
- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
- trace_xhci_handle_cmd_stop_ep(ep_ctx);
+ /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ status = 0;
+ xhci_giveback_urb_in_irq(xhci, td, status);
+ }
- ep = &xhci->devs[slot_id]->eps[ep_index];
- last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
- struct xhci_td, cancelled_td_list);
+ return 0;
+}
- if (list_empty(&ep->cancelled_td_list)) {
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- return;
+
+/* Complete the cancelled URBs we unlinked from td_list. */
+static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
+{
+ struct xhci_ring *ring;
+ struct xhci_td *td, *tmp_td;
+
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
+ cancelled_td_list) {
+
+ /*
+ * Doesn't matter what we pass for status, since the core will
+ * just overwrite it (because the URB has been unlinked).
+ */
+ ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
+
+ if (td->cancel_status == TD_CLEARED)
+ xhci_td_cleanup(ep->xhci, td, ring, 0);
+
+ if (ep->xhci->xhc_state & XHCI_STATE_DYING)
+ return;
+ }
+}
+
+static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, enum xhci_ep_reset_type reset_type)
+{
+ struct xhci_command *command;
+ int ret = 0;
+
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command) {
+ ret = -ENOMEM;
+ goto done;
}
- /* Fix up the ep ring first, so HW stops executing cancelled TDs.
- * We have the xHCI lock, so nothing can modify this list until we drop
- * it. We're also in the event handler, so we can't get re-interrupted
- * if another Stop Endpoint command completes
+ ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
+done:
+ if (ret)
+ xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
+ slot_id, ep_index, ret);
+ return ret;
+}
+
+static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep, unsigned int stream_id,
+ struct xhci_td *td,
+ enum xhci_ep_reset_type reset_type)
+{
+ unsigned int slot_id = ep->vdev->slot_id;
+ int err;
+
+ /*
+ * Avoid resetting endpoint if link is inactive. Can cause host hang.
+ * Device will be reset soon to recover the link so don't do anything
*/
- list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
+ if (ep->vdev->flags & VDEV_PORT_ERROR)
+ return;
+
+ /* add td to cancelled list and let reset ep handler take care of it */
+ if (reset_type == EP_HARD_RESET) {
+ ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
+ if (td && list_empty(&td->cancelled_td_list)) {
+ list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ td->cancel_status = TD_HALTED;
+ }
+ }
+
+ if (ep->ep_state & EP_HALTED) {
+ xhci_dbg(xhci, "Reset ep command already pending\n");
+ return;
+ }
+
+ err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
+ if (err)
+ return;
+
+ ep->ep_state |= EP_HALTED;
+
+ xhci_ring_cmd_db(xhci);
+}
+
+/*
+ * Fix up the ep ring first, so HW stops executing cancelled TDs.
+ * We have the xHCI lock, so nothing can modify this list until we drop it.
+ * We're also in the event handler, so we can't get re-interrupted if another
+ * Stop Endpoint command completes.
+ *
+ * only call this when ring is not in a running state
+ */
+
+static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_td *td = NULL;
+ struct xhci_td *tmp_td = NULL;
+ struct xhci_td *cached_td = NULL;
+ struct xhci_ring *ring;
+ u64 hw_deq;
+ unsigned int slot_id = ep->vdev->slot_id;
+ int err;
+
+ xhci = ep->xhci;
+
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Removing canceled TD starting at 0x%llx (dma).",
(unsigned long long)xhci_trb_virt_to_dma(
- cur_td->start_seg, cur_td->first_trb));
- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
- if (!ep_ring) {
- /* This shouldn't happen unless a driver is mucking
- * with the stream ID after submission. This will
- * leave the TD on the hardware ring, and the hardware
- * will try to execute it, and may access a buffer
- * that has already been freed. In the best case, the
- * hardware will execute it, and the event handler will
- * ignore the completion event for that TD, since it was
- * removed from the td_list for that endpoint. In
- * short, don't muck with the stream ID after
- * submission.
- */
- xhci_warn(xhci, "WARN Cancelled URB %p "
- "has invalid stream ID %u.\n",
- cur_td->urb,
- cur_td->urb->stream_id);
- goto remove_finished_td;
+ td->start_seg, td->first_trb));
+ list_del_init(&td->td_list);
+ ring = xhci_urb_to_transfer_ring(xhci, td->urb);
+ if (!ring) {
+ xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
+ td->urb, td->urb->stream_id);
+ continue;
}
/*
- * If we stopped on the TD we need to cancel, then we have to
+ * If ring stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
- hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
- cur_td->urb->stream_id);
+ hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
+ td->urb->stream_id);
hw_deq &= ~0xf;
- if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
- cur_td->last_trb, hw_deq, false)) {
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
- cur_td->urb->stream_id,
- cur_td, &deq_state);
+ if (trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, hw_deq, false)) {
+ switch (td->cancel_status) {
+ case TD_CLEARED: /* TD is already no-op */
+ case TD_CLEARING_CACHE: /* set TR deq command already queued */
+ break;
+ case TD_DIRTY: /* TD is cached, clear it */
+ case TD_HALTED:
+ /* FIXME stream case, several stopped rings */
+ cached_td = td;
+ break;
+ }
} else {
- td_to_noop(xhci, ep_ring, cur_td, false);
+ td_to_noop(xhci, ring, td, false);
+ td->cancel_status = TD_CLEARED;
+ }
+ }
+ if (cached_td) {
+ cached_td->cancel_status = TD_CLEARING_CACHE;
+
+ err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
+ cached_td->urb->stream_id,
+ cached_td);
+ /* Failed to move past cached td, try just setting it noop */
+ if (err) {
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
}
+ cached_td = NULL;
+ }
+ return 0;
+}
-remove_finished_td:
- /*
- * The event handler won't see a completion for this TD anymore,
- * so remove it from the endpoint ring's TD list. Keep it in
- * the cancelled TD list for URB completion later.
- */
- list_del_init(&cur_td->td_list);
+/*
+ * Returns the TD the endpoint ring halted on.
+ * Only call for non-running rings without streams.
+ */
+static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
+{
+ struct xhci_td *td;
+ u64 hw_deq;
+
+ if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
+ hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
+ hw_deq &= ~0xf;
+ td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
+ if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
+ td->last_trb, hw_deq, false))
+ return td;
}
+ return NULL;
+}
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring. There are two ways to do that:
+ *
+ * 1. If the HW was in the middle of processing the TD that needs to be
+ * cancelled, then we must move the ring's dequeue pointer past the last TRB
+ * in the TD with a Set Dequeue Pointer Command.
+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ * bit cleared) so that the HW will skip over them.
+ */
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 comp_code)
+{
+ unsigned int ep_index;
+ struct xhci_virt_ep *ep;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_td *td = NULL;
+ enum xhci_ep_reset_type reset_type;
+ struct xhci_command *command;
- /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
- if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
- &deq_state);
- xhci_ring_cmd_db(xhci);
- } else {
- /* Otherwise ring the doorbell(s) to restart queued transfers */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+ if (!xhci->devs[slot_id])
+ xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
+ slot_id);
+ return;
}
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
+
+ trace_xhci_handle_cmd_stop_ep(ep_ctx);
+
+ if (comp_code == COMP_CONTEXT_STATE_ERROR) {
/*
- * Drop the lock and complete the URBs in the cancelled TD list.
- * New TDs to be cancelled might be added to the end of the list before
- * we can complete all the URBs for the TDs we already unlinked.
- * So stop when we've completed the URB for the last TD we unlinked.
+ * If stop endpoint command raced with a halting endpoint we need to
+ * reset the host side endpoint first.
+ * If the TD we halted on isn't cancelled the TD should be given back
+ * with a proper error code, and the ring dequeue moved past the TD.
+ * If streams case we can't find hw_deq, or the TD we halted on so do a
+ * soft reset.
+ *
+ * Proper error code is unknown here, it would be -EPIPE if device side
+ * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
+ * We use -EPROTO, if device is stalled it should return a stall error on
+ * next transfer, which then will return -EPIPE, and device side stall is
+ * noted and cleared by class driver.
*/
- do {
- cur_td = list_first_entry(&ep->cancelled_td_list,
- struct xhci_td, cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
+ switch (GET_EP_CTX_STATE(ep_ctx)) {
+ case EP_STATE_HALTED:
+ xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ reset_type = EP_SOFT_RESET;
+ } else {
+ reset_type = EP_HARD_RESET;
+ td = find_halted_td(ep);
+ if (td)
+ td->status = -EPROTO;
+ }
+ /* reset ep, reset handler cleans up cancelled tds */
+ xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ return;
+ case EP_STATE_RUNNING:
+ /* Race, HW handled stop ep cmd before ep was running */
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command)
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
- /* Clean up the cancelled URB */
- /* Doesn't matter what we pass for status, since the core will
- * just overwrite it (because the URB has been unlinked).
- */
- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
- xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
- inc_td_cnt(cur_td->urb);
- if (last_td_in_urb(cur_td))
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ mod_timer(&ep->stop_cmd_timer,
+ jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
+ xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
+ xhci_ring_cmd_db(xhci);
- /* Stop processing the cancelled list if the watchdog timer is
- * running.
- */
- if (xhci->xhc_state & XHCI_STATE_DYING)
return;
- } while (cur_td != last_unlinked_td);
+ default:
+ break;
+ }
+ }
+ /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
+ xhci_invalidate_cancelled_tds(ep);
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
- /* Return to the event handler with xhci->lock re-acquired */
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ xhci_giveback_invalidated_tds(ep);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
@@ -1064,17 +1297,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
- struct xhci_virt_device *dev;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
- dev = xhci->devs[slot_id];
- ep = &dev->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
- ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+ ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
@@ -1082,8 +1316,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
goto cleanup;
}
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
+ slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
trace_xhci_handle_cmd_set_deq(slot_ctx);
trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
@@ -1136,7 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- update_ring_for_set_deq_completion(xhci, dev,
+ update_ring_for_set_deq_completion(xhci, ep->vdev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
@@ -1144,11 +1378,19 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
-
+ /* HW cached TDs cleared from cache, give them back */
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
+ cancelled_td_list) {
+ ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
+ if (td->cancel_status == TD_CLEARING_CACHE) {
+ td->cancel_status = TD_CLEARED;
+ xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ }
+ }
cleanup:
- dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
- dev->eps[ep_index].queued_deq_seg = NULL;
- dev->eps[ep_index].queued_deq_ptr = NULL;
+ ep->ep_state &= ~SET_DEQ_PENDING;
+ ep->queued_deq_seg = NULL;
+ ep->queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
@@ -1156,13 +1398,16 @@ cleanup:
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
- struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
- vdev = xhci->devs[slot_id];
- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_reset_ep(ep_ctx);
/* This command will only fail if the endpoint wasn't halted,
@@ -1171,27 +1416,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Ignoring reset ep completion code of %u", cmd_comp_code);
- /* HW with the reset endpoint quirk needs to have a configure endpoint
- * command complete before the endpoint can be used. Queue that here
- * because the HW can't handle two commands being queued in a row.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
- struct xhci_command *command;
+ /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
+ xhci_invalidate_cancelled_tds(ep);
- command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- return;
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK)
+ xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
+ /* Clear our internal halted state */
+ ep->ep_state &= ~EP_HALTED;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Queueing configure endpoint command");
- xhci_queue_configure_endpoint(xhci, command,
- xhci->devs[slot_id]->in_ctx->dma, slot_id,
- false);
- xhci_ring_cmd_db(xhci);
- } else {
- /* Clear our internal halted state */
- xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
- }
+ xhci_giveback_invalidated_tds(ep);
/* if this was a soft reset, then restart */
if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
@@ -1226,7 +1459,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
- struct xhci_event_cmd *event, u32 cmd_comp_code)
+ u32 cmd_comp_code)
{
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1244,6 +1477,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
* is not waiting on the configure endpoint command.
*/
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return;
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
@@ -1288,24 +1523,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_addr_dev(slot_ctx);
}
-static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
- struct xhci_event_cmd *event)
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
+ if (!vdev) {
+ xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
+ slot_id);
+ return;
+ }
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_reset_dev(slot_ctx);
xhci_dbg(xhci, "Completed reset device command.\n");
- if (!xhci->devs[slot_id])
- xhci_warn(xhci, "Reset device command completion "
- "for disabled slot %u\n", slot_id);
}
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
@@ -1398,7 +1636,7 @@ time_out_completed:
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
- int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
@@ -1406,6 +1644,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_command *cmd;
u32 cmd_type;
+ if (slot_id >= MAX_HC_SLOTS) {
+ xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
+ return;
+ }
+
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_trb = xhci->cmd_ring->dequeue;
@@ -1466,8 +1709,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
case TRB_CONFIG_EP:
if (!cmd->completion)
- xhci_handle_cmd_config_ep(xhci, slot_id, event,
- cmd_comp_code);
+ xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
break;
case TRB_EVAL_CONTEXT:
break;
@@ -1478,7 +1720,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
if (!cmd->completion)
- xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
+ cmd_comp_code);
break;
case TRB_SET_DEQ:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1501,7 +1744,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
*/
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3]));
- xhci_handle_cmd_reset_dev(xhci, slot_id, event);
+ xhci_handle_cmd_reset_dev(xhci, slot_id);
break;
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
@@ -1528,11 +1771,8 @@ event_handled:
}
static void handle_vendor_event(struct xhci_hcd *xhci,
- union xhci_trb *event)
+ union xhci_trb *event, u32 trb_type)
{
- u32 trb_type;
-
- trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
@@ -1849,37 +2089,6 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
}
}
-static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *td,
- enum xhci_ep_reset_type reset_type)
-{
- struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
- struct xhci_command *command;
-
- /*
- * Avoid resetting endpoint if link is inactive. Can cause host hang.
- * Device will be reset soon to recover the link so don't do anything
- */
- if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR)
- return;
-
- command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- return;
-
- ep->ep_state |= EP_HALTED;
-
- xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
-
- if (reset_type == EP_HARD_RESET) {
- ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
- xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id,
- td);
- }
- xhci_ring_cmd_db(xhci);
-}
-
/* Check if an error has halted the endpoint ring. The class driver will
* cleanup the halt for a non-default control endpoint if we indicate a stall.
* However, a babble and other errors also halt the endpoint ring, and the class
@@ -1920,82 +2129,63 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
return 0;
}
-static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_ring *ep_ring, int *status)
-{
- struct urb *urb = NULL;
-
- /* Clean up the endpoint's TD list */
- urb = td->urb;
-
- /* if a bounce buffer was used to align this td then unmap it */
- xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
-
- /* Do one last check of the actual transfer length.
- * If the host controller said we transferred more data than the buffer
- * length, urb->actual_length will be a very big number (since it's
- * unsigned). Play it safe and say we didn't transfer anything.
- */
- if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
- urb->transfer_buffer_length, urb->actual_length);
- urb->actual_length = 0;
- *status = 0;
- }
- list_del_init(&td->td_list);
- /* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list))
- list_del_init(&td->cancelled_td_list);
-
- inc_td_cnt(urb);
- /* Giveback the urb when all the tds are completed */
- if (last_td_in_urb(td)) {
- if ((urb->actual_length != urb->transfer_buffer_length &&
- (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
- (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
- xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
- urb, urb->actual_length,
- urb->transfer_buffer_length, *status);
-
- /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
- *status = 0;
- xhci_giveback_urb_in_irq(xhci, td, *status);
- }
-
- return 0;
-}
-
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
{
- struct xhci_virt_device *xdev;
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
- unsigned int slot_id;
u32 trb_comp_code;
- int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
- trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
- /* The Endpoint Stop Command completion will take care of any
- * stopped TDs. A stopped TD may be restarted, so don't update
+ switch (trb_comp_code) {
+ case COMP_STOPPED_LENGTH_INVALID:
+ case COMP_STOPPED_SHORT_PACKET:
+ case COMP_STOPPED:
+ /*
+ * The "Stop Endpoint" completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
return 0;
- }
- if (trb_comp_code == COMP_STALL_ERROR ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code)) {
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_SPLIT_TRANSACTION_ERROR:
+ /*
+ * If endpoint context state is not halted we might be
+ * racing with a reset endpoint command issued by a unsuccessful
+ * stop endpoint completion (context error). In that case the
+ * td should be on the cancelled list, and EP_HALTED flag set.
+ *
+ * Or then it's not halted due to the 0.95 spec stating that a
+ * babbling control endpoint should not halt. The 0.96 spec
+ * again says it should. Some HW claims to be 0.95 compliant,
+ * but it halts the control endpoint anyway.
+ */
+ if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
+ /*
+ * If EP_HALTED is set and TD is on the cancelled list
+ * the TD and dequeue pointer will be handled by reset
+ * ep command completion
+ */
+ if ((ep->ep_state & EP_HALTED) &&
+ !list_empty(&td->cancelled_td_list)) {
+ xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ td->start_seg, td->first_trb));
+ return 0;
+ }
+ /* endpoint not halted, don't reset it */
+ break;
+ }
+ /* Almost same procedure as for STALL_ERROR below */
+ xhci_clear_hub_tt_buffer(xhci, td, ep);
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_HARD_RESET);
+ return 0;
+ case COMP_STALL_ERROR:
/*
* xhci internal endpoint state will go to a "halt" state for
* any stall, including default control pipe protocol stall.
@@ -2006,18 +2196,24 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* stall later. Hub TT buffer should only be cleared for FS/LS
* devices behind HS hubs for functional stalls.
*/
- if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
+ if (ep->ep_index != 0)
xhci_clear_hub_tt_buffer(xhci, td, ep);
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, EP_HARD_RESET);
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring);
- inc_deq(xhci, ep_ring);
+
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_HARD_RESET);
+
+ return 0; /* xhci_handle_halted_endpoint marked td cancelled */
+ default:
+ break;
}
- return xhci_td_cleanup(xhci, td, ep_ring, status);
+ /* Update ring dequeue pointer */
+ ep_ring->dequeue = td->last_trb;
+ ep_ring->deq_seg = td->last_trb_seg;
+ ep_ring->num_trbs_free += td->num_trbs - 1;
+ inc_deq(xhci, ep_ring);
+
+ return xhci_td_cleanup(xhci, td, ep_ring, td->status);
}
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
@@ -2040,21 +2236,15 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
- struct xhci_virt_device *xdev;
- unsigned int slot_id;
- int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
u32 remaining, requested;
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
requested = td->urb->transfer_buffer_length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2064,13 +2254,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (trb_type != TRB_STATUS) {
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
(trb_type == TRB_DATA) ? "data" : "setup");
- *status = -ESHUTDOWN;
+ td->status = -ESHUTDOWN;
break;
}
- *status = 0;
+ td->status = 0;
break;
case COMP_SHORT_PACKET:
- *status = 0;
+ td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
@@ -2102,7 +2292,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
- trb_comp_code, ep_index);
+ trb_comp_code, ep->ep_index);
fallthrough;
case COMP_STALL_ERROR:
/* Did we transfer part of the data (middle) phase? */
@@ -2134,7 +2324,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length = requested;
finish_td:
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
/*
@@ -2142,9 +2332,8 @@ finish_td:
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
- struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
struct usb_iso_packet_descriptor *frame;
@@ -2153,7 +2342,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
u32 remaining, requested, ep_trb_len;
int short_framestatus;
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
@@ -2214,26 +2402,23 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
if (sum_trbs_for_length)
- frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
else
frame->actual_length = requested;
td->urb->actual_length += frame->actual_length;
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep, int status)
{
- struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
frame = &td->urb->iso_frame_desc[idx];
@@ -2245,11 +2430,12 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
frame->actual_length = 0;
/* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring);
- inc_deq(xhci, ep_ring);
+ ep->ring->dequeue = td->last_trb;
+ ep->ring->deq_seg = td->last_trb_seg;
+ ep->ring->num_trbs_free += td->num_trbs - 1;
+ inc_deq(xhci, ep->ring);
- return xhci_td_cleanup(xhci, td, ep_ring, status);
+ return xhci_td_cleanup(xhci, td, ep->ring, status);
}
/*
@@ -2257,18 +2443,14 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
struct xhci_slot_ctx *slot_ctx;
struct xhci_ring *ep_ring;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
- unsigned int slot_id;
- int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2285,13 +2467,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
}
- *status = 0;
+ td->status = 0;
break;
case COMP_SHORT_PACKET:
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
- *status = 0;
+ td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
td->urb->actual_length = remaining;
@@ -2305,9 +2487,11 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
break;
- *status = 0;
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, EP_SOFT_RESET);
+
+ td->status = 0;
+
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_SOFT_RESET);
return 0;
default:
/* do nothing */
@@ -2326,7 +2510,7 @@ finish_td:
remaining);
td->urb->actual_length = 0;
}
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
/*
@@ -2337,7 +2521,6 @@ finish_td:
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
- struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
@@ -2358,16 +2541,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
- xdev = xhci->devs[slot_id];
- if (!xdev) {
- xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
- slot_id);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep) {
+ xhci_err(xhci, "ERROR Invalid Transfer event\n");
goto err_out;
}
- ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci,
@@ -2383,8 +2564,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_USB_TRANSACTION_ERROR:
case COMP_INVALID_STREAM_TYPE_ERROR:
case COMP_INVALID_STREAM_ID_ERROR:
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
- NULL, EP_SOFT_RESET);
+ xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+ EP_SOFT_RESET);
goto cleanup;
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
@@ -2440,7 +2621,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_STALL_ERROR:
xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
ep_index);
- ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_SPLIT_TRANSACTION_ERROR:
@@ -2568,11 +2748,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) {
- xhci_cleanup_halted_endpoint(xhci, slot_id,
- ep_index,
- ep_ring->stream_id,
- NULL,
- EP_HARD_RESET);
+ xhci_handle_halted_endpoint(xhci, ep,
+ ep_ring->stream_id,
+ NULL,
+ EP_HARD_RESET);
}
goto cleanup;
}
@@ -2631,7 +2810,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return -ESHUTDOWN;
}
- skip_isoc_td(xhci, td, event, ep, &status);
+ skip_isoc_td(xhci, td, ep, status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
@@ -2659,25 +2838,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* endpoint. Otherwise, the endpoint remains stalled
* indefinitely.
*/
+
if (trb_is_noop(ep_trb)) {
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code))
- xhci_cleanup_halted_endpoint(xhci, slot_id,
- ep_index,
- ep_ring->stream_id,
- td, EP_HARD_RESET);
+ xhci_handle_halted_endpoint(xhci, ep,
+ ep_ring->stream_id,
+ td, EP_HARD_RESET);
goto cleanup;
}
+ td->status = status;
+
/* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
+ process_ctrl_td(xhci, td, ep_trb, event, ep);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- process_isoc_td(xhci, td, ep_trb, event, ep, &status);
+ process_isoc_td(xhci, td, ep_trb, event, ep);
else
- process_bulk_intr_td(xhci, td, ep_trb, event, ep,
- &status);
+ process_bulk_intr_td(xhci, td, ep_trb, event, ep);
cleanup:
handling_skipped_tds = ep->skip &&
trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
@@ -2722,6 +2902,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
+ u32 trb_type;
int ret;
/* Event ring hasn't been allocated yet. */
@@ -2743,31 +2924,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
* speculative reads of the event's flags/data below.
*/
rmb();
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
/* FIXME: Handle more event types. */
- switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
- case TRB_TYPE(TRB_COMPLETION):
+
+ switch (trb_type) {
+ case TRB_COMPLETION:
handle_cmd_completion(xhci, &event->event_cmd);
break;
- case TRB_TYPE(TRB_PORT_STATUS):
+ case TRB_PORT_STATUS:
handle_port_status(xhci, event);
update_ptrs = 0;
break;
- case TRB_TYPE(TRB_TRANSFER):
+ case TRB_TRANSFER:
ret = handle_tx_event(xhci, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
- case TRB_TYPE(TRB_DEV_NOTE):
+ case TRB_DEV_NOTE:
handle_device_notification(xhci, event);
break;
default:
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
- TRB_TYPE(48))
- handle_vendor_event(xhci, event);
+ if (trb_type >= TRB_VENDOR_DEFINED_LOW)
+ handle_vendor_event(xhci, event, trb_type);
else
- xhci_warn(xhci, "ERROR unknown event type %d\n",
- TRB_FIELD_TO_TYPE(
- le32_to_cpu(event->event_cmd.flags)));
+ xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -2931,6 +3111,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
@@ -2946,6 +3128,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
+ unsigned int link_trb_count = 0;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@@ -3017,7 +3200,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
ep_ring->enq_seg = ep_ring->enq_seg->next;
ep_ring->enqueue = ep_ring->enq_seg->trbs;
+
+ /* prevent infinite loop if all first trbs are link trbs */
+ if (link_trb_count++ > ep_ring->num_segs) {
+ xhci_warn(xhci, "Ring is an endless link TRB loop\n");
+ return -EINVAL;
+ }
}
+
+ if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
+ xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3036,7 +3231,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+ ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
+ stream_id);
if (!ep_ring) {
xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
stream_id);
@@ -3275,12 +3471,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
@@ -3401,7 +3601,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_IOC;
more_trbs_coming = false;
td->last_trb = ring->enqueue;
-
+ td->last_trb_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
@@ -3427,7 +3627,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
upper_32_bits(send_addr),
length_field,
field);
-
+ td->num_trbs++;
addr += trb_buff_len;
sent_len = trb_buff_len;
@@ -3451,8 +3651,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
urb_priv->td[1].last_trb = ring->enqueue;
+ urb_priv->td[1].last_trb_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
+ urb_priv->td[1].num_trbs++;
}
check_trb_math(urb, enqd_len);
@@ -3503,6 +3705,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv;
td = &urb_priv->td[0];
+ td->num_trbs = num_trbs;
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
@@ -3575,6 +3778,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue;
+ td->last_trb_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
@@ -3819,7 +4023,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
goto cleanup;
}
td = &urb_priv->td[i];
-
+ td->num_trbs = trbs_per_td;
/* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
@@ -3862,6 +4066,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} else {
more_trbs_coming = false;
td->last_trb = ep_ring->enqueue;
+ td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
if (trb_block_event_intr(xhci, num_tds, i))
field |= TRB_BEI;
@@ -4145,71 +4350,6 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
-/* Set Transfer Ring Dequeue Pointer command */
-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state)
-{
- dma_addr_t addr;
- u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
- u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
- u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
- u32 trb_sct = 0;
- u32 type = TRB_TYPE(TRB_SET_DEQ);
- struct xhci_virt_ep *ep;
- struct xhci_command *cmd;
- int ret;
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
- deq_state->new_deq_seg,
- (unsigned long long)deq_state->new_deq_seg->dma,
- deq_state->new_deq_ptr,
- (unsigned long long)xhci_trb_virt_to_dma(
- deq_state->new_deq_seg, deq_state->new_deq_ptr),
- deq_state->new_cycle_state);
-
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- if (addr == 0) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
- deq_state->new_deq_seg, deq_state->new_deq_ptr);
- return;
- }
- ep = &xhci->devs[slot_id]->eps[ep_index];
- if ((ep->ep_state & SET_DEQ_PENDING)) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
- return;
- }
-
- /* This function gets called from contexts where it cannot sleep */
- cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!cmd)
- return;
-
- ep->queued_deq_seg = deq_state->new_deq_seg;
- ep->queued_deq_ptr = deq_state->new_deq_ptr;
- if (deq_state->stream_id)
- trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
- ret = queue_command(xhci, cmd,
- lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
- upper_32_bits(addr), trb_stream_id,
- trb_slot_id | trb_ep_index | type, false);
- if (ret < 0) {
- xhci_free_command(xhci, cmd);
- return;
- }
-
- /* Stop the TD queueing code from ringing the doorbell until
- * this command completes. The HC won't set the dequeue pointer
- * if the ring is running, and ringing the doorbell starts the
- * ring running.
- */
- ep->ep_state |= SET_DEQ_PENDING;
-}
-
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index,
enum xhci_ep_reset_type reset_type)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 934be1686352..50bb91b6a4b8 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..bd27bd670104 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
-/* Find the flag for this endpoint (for use in the control context). Use the
- * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
- * bit 1, etc.
- */
-static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
-{
- return 1 << (ep_index + 1);
-}
-
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
@@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ /* TD can already be on cancelled list if ep halted on it */
+ if (list_empty(&td->cancelled_td_list)) {
+ td->cancel_status = TD_DIRTY;
+ list_add_tail(&td->cancelled_td_list,
+ &ep->cancelled_td_list);
+ }
}
/* Queue a stop endpoint command, but only if this is
@@ -2985,7 +2981,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
@@ -3083,7 +3079,7 @@ command_cleanup:
return ret;
}
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
@@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
}
-static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state)
-{
- struct xhci_input_control_ctx *ctrl_ctx;
- struct xhci_container_ctx *in_ctx;
- struct xhci_ep_ctx *ep_ctx;
- u32 added_ctxs;
- dma_addr_t addr;
-
- in_ctx = xhci->devs[slot_id]->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
- if (!ctrl_ctx) {
- xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
- __func__);
- return;
- }
-
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ep_index);
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- if (addr == 0) {
- xhci_warn(xhci, "WARN Cannot submit config ep after "
- "reset ep command\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
- deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- return;
- }
- ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
-
- added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
- xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ctrl_ctx,
- added_ctxs, added_ctxs);
-}
-
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_td *td)
-{
- struct xhci_dequeue_state deq_state;
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Cleaning up stalled endpoint ring");
- /* We need to move the HW's dequeue pointer past this TD,
- * or it will attempt to resend it on the next doorbell ring.
- */
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
- &deq_state);
-
- if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
- return;
-
- /* HW with the reset endpoint quirk will use the saved dequeue state to
- * issue a configure endpoint command later.
- */
- if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Queueing new dequeue state");
- xhci_queue_new_dequeue_state(xhci, slot_id,
- ep_index, &deq_state);
- } else {
- /* Better hope no one uses the input context between now and the
- * reset endpoint completion!
- * XXX: No idea how this hardware will react when stream rings
- * are enabled.
- */
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Setting up input context for "
- "configure endpoint command");
- xhci_setup_input_ctx_for_quirk(xhci, slot_id,
- ep_index, &deq_state);
- }
-}
-
static void xhci_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *host_ep)
{
@@ -4770,19 +4688,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4752,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
@@ -5510,6 +5428,10 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
+ if (over->check_bandwidth)
+ drv->check_bandwidth = over->check_bandwidth;
+ if (over->reset_bandwidth)
+ drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 25e57bc9c3cc..d41de5dc0452 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -918,6 +918,8 @@ struct xhci_bw_info {
#define SS_BW_RESERVED 10
struct xhci_virt_ep {
+ struct xhci_virt_device *vdev; /* parent */
+ unsigned int ep_index;
struct xhci_ring *ring;
/* Related to endpoints that are configured to use stream IDs only */
struct xhci_stream_info *stream_info;
@@ -993,8 +995,10 @@ struct xhci_interval_bw_table {
unsigned int ss_bw_out;
};
+#define EP_CTX_PER_DEV 31
struct xhci_virt_device {
+ int slot_id;
struct usb_device *udev;
/*
* Commands to the hardware are passed an "input context" that
@@ -1007,7 +1011,7 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
- struct xhci_virt_ep eps[31];
+ struct xhci_virt_ep eps[EP_CTX_PER_DEV];
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
@@ -1415,7 +1419,7 @@ union xhci_trb {
/* MFINDEX Wrap Event - microframe counter wrapped */
#define TRB_MFINDEX_WRAP 39
/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
-
+#define TRB_VENDOR_DEFINED_LOW 48
/* Nec vendor-specific command completion event. */
#define TRB_NEC_CMD_COMP 48
/* Get NEC firmware revision. */
@@ -1535,16 +1539,27 @@ struct xhci_segment {
unsigned int bounce_len;
};
+enum xhci_cancelled_td_status {
+ TD_DIRTY = 0,
+ TD_HALTED,
+ TD_CLEARING_CACHE,
+ TD_CLEARED,
+};
+
struct xhci_td {
struct list_head td_list;
struct list_head cancelled_td_list;
+ int status;
+ enum xhci_cancelled_td_status cancel_status;
struct urb *urb;
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
+ struct xhci_segment *last_trb_seg;
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
+ unsigned int num_trbs;
};
/* xHCI command default timeout value */
@@ -1556,13 +1571,6 @@ struct xhci_cd {
union xhci_trb *cmd_trb;
};
-struct xhci_dequeue_state {
- struct xhci_segment *new_deq_seg;
- union xhci_trb *new_deq_ptr;
- int new_cycle_state;
- unsigned int stream_id;
-};
-
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
@@ -1920,6 +1928,8 @@ struct xhci_driver_overrides {
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
@@ -2046,10 +2056,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
-struct xhci_ring *xhci_stream_id_to_ring(
- struct xhci_virt_device *dev,
- unsigned int ep_index,
- unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags);
struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
@@ -2074,6 +2080,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
@@ -2121,13 +2129,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *cur_td,
- struct xhci_dequeue_state *state);
-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td);
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 29fe5771c21b..507deef1f709 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -396,7 +396,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
}
static int usb251xb_get_ofdata(struct usb251xb *hub,
- struct usb251xb_data *data)
+ const struct usb251xb_data *data)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
@@ -630,7 +630,7 @@ static const struct of_device_id usb251xb_of_match[] = {
MODULE_DEVICE_TABLE(of, usb251xb_of_match);
#else /* CONFIG_OF */
static int usb251xb_get_ofdata(struct usb251xb *hub,
- struct usb251xb_data *data)
+ const struct usb251xb_data *data)
{
return 0;
}
@@ -647,13 +647,11 @@ static int usb251xb_probe(struct usb251xb *hub)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id = of_match_device(usb251xb_of_match,
- dev);
+ const struct usb251xb_data *usb_data = of_device_get_match_data(dev);
int err;
- if (np && of_id) {
- err = usb251xb_get_ofdata(hub,
- (struct usb251xb_data *)of_id->data);
+ if (np && usb_data) {
+ err = usb251xb_get_ofdata(hub, usb_data);
if (err) {
dev_err(dev, "failed to get ofdata: %d\n", err);
return err;
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 48099c6bf04c..330f494cd158 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -409,13 +409,18 @@ static int __init usb3503_init(void)
int err;
err = i2c_add_driver(&usb3503_i2c_driver);
- if (err != 0)
+ if (err) {
pr_err("usb3503: Failed to register I2C driver: %d\n", err);
+ return err;
+ }
err = platform_driver_register(&usb3503_platform_driver);
- if (err != 0)
+ if (err) {
pr_err("usb3503: Failed to register platform driver: %d\n",
err);
+ i2c_del_driver(&usb3503_i2c_driver);
+ return err;
+ }
return 0;
}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index c4fe1f4cd17a..5b7d576bf6ee 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -116,13 +116,13 @@ static int jz4740_musb_init(struct musb *musb)
if (IS_ERR(musb->xceiv)) {
err = PTR_ERR(musb->xceiv);
if (err != -EPROBE_DEFER)
- dev_err(dev, "No transceiver configured: %d", err);
+ dev_err(dev, "No transceiver configured: %d\n", err);
return err;
}
glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
if (IS_ERR(glue->role_sw)) {
- dev_err(dev, "Failed to register USB role switch");
+ dev_err(dev, "Failed to register USB role switch\n");
return PTR_ERR(glue->role_sw);
}
@@ -205,26 +205,26 @@ static int jz4740_probe(struct platform_device *pdev)
pdata = of_device_get_match_data(dev);
if (!pdata) {
- dev_err(dev, "missing platform data");
+ dev_err(dev, "missing platform data\n");
return -EINVAL;
}
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
- dev_err(dev, "failed to allocate musb device");
+ dev_err(dev, "failed to allocate musb device\n");
return -ENOMEM;
}
clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
- dev_err(dev, "failed to get clock");
+ dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable clock");
+ dev_err(dev, "failed to enable clock\n");
goto err_platform_device_put;
}
@@ -240,19 +240,19 @@ static int jz4740_probe(struct platform_device *pdev)
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(dev, "failed to add resources");
+ dev_err(dev, "failed to add resources\n");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
- dev_err(dev, "failed to add platform_data");
+ dev_err(dev, "failed to add platform_data\n");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
- dev_err(dev, "failed to register musb device");
+ dev_err(dev, "failed to register musb device\n");
goto err_clk_disable;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 849e0b770130..1cd87729ba60 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2240,32 +2240,35 @@ int musb_queue_resume_work(struct musb *musb,
{
struct musb_pending_work *w;
unsigned long flags;
+ bool is_suspended;
int error;
if (WARN_ON(!callback))
return -EINVAL;
- if (pm_runtime_active(musb->controller))
- return callback(musb, data);
+ spin_lock_irqsave(&musb->list_lock, flags);
+ is_suspended = musb->is_runtime_suspended;
+
+ if (is_suspended) {
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
- w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
- if (!w)
- return -ENOMEM;
+ w->callback = callback;
+ w->data = data;
- w->callback = callback;
- w->data = data;
- spin_lock_irqsave(&musb->list_lock, flags);
- if (musb->is_runtime_suspended) {
list_add_tail(&w->node, &musb->pending_list);
error = 0;
- } else {
- dev_err(musb->controller, "could not add resume work %p\n",
- callback);
- devm_kfree(musb->controller, w);
- error = -EINPROGRESS;
}
+
+out_unlock:
spin_unlock_irqrestore(&musb->list_lock, flags);
+ if (!is_suspended)
+ error = callback(musb, data);
+
return error;
}
EXPORT_SYMBOL_GPL(musb_queue_resume_work);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f62ffaede1ab..ef374d4dd94a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -451,7 +451,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
return;
}
- if (request) {
+ if (req) {
trace_musb_req_tx(req);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 0aacfc8be5a1..7acd1635850d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -321,8 +321,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data)
musb_channel->channel.status =
MUSB_DMA_STATUS_BUS_ABORT;
} else {
- u8 devctl;
-
addr = musb_read_hsdma_addr(mbase,
bchannel);
channel->actual_len = addr
@@ -336,8 +334,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data)
< musb_channel->len) ?
"=> reconfig 0" : "=> complete");
- devctl = musb_readb(mbase, MUSB_DEVCTL);
-
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 67b39dc62b37..8a262c5a0408 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -714,14 +714,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
struct clk *clk;
struct mxs_phy *mxs_phy;
int ret;
- const struct of_device_id *of_id;
struct device_node *np = pdev->dev.of_node;
u32 val;
- of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -797,7 +792,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.charger_detect = mxs_phy_charger_detect;
mxs_phy->clk = clk;
- mxs_phy->data = of_id->data;
+ mxs_phy->data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mxs_phy);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 03a333797382..a48452a6172b 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -45,6 +45,7 @@
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_RESUME_EN BIT(2)
#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
#define USB_SUSP_CLR BIT(5)
@@ -56,6 +57,15 @@
#define USB_SUSP_SET BIT(14)
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+#define USB_PHY_VBUS_SENSORS 0x404
+#define B_SESS_VLD_WAKEUP_EN BIT(6)
+#define B_VBUS_VLD_WAKEUP_EN BIT(14)
+#define A_SESS_VLD_WAKEUP_EN BIT(22)
+#define A_VBUS_VLD_WAKEUP_EN BIT(30)
+
+#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define VBUS_WAKEUP_WAKEUP_EN BIT(30)
+
#define USB1_LEGACY_CTRL 0x410
#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
@@ -334,6 +344,11 @@ static int utmip_pad_power_on(struct tegra_usb_phy *phy)
writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
+ if (phy->pad_wakeup) {
+ phy->pad_wakeup = false;
+ utmip_pad_count--;
+ }
+
spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
@@ -359,6 +374,17 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
goto ulock;
}
+ /*
+ * In accordance to TRM, OTG and Bias pad circuits could be turned off
+ * to save power if wake is enabled, but the VBUS-change detection
+ * method is board-specific and these circuits may need to be enabled
+ * to generate wakeup event, hence we will just keep them both enabled.
+ */
+ if (phy->wakeup_enabled) {
+ phy->pad_wakeup = true;
+ utmip_pad_count++;
+ }
+
if (--utmip_pad_count == 0) {
val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
@@ -503,11 +529,24 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
+ val = readl_relaxed(base + USB_SUSP_CTRL);
+ val &= ~USB_WAKE_ON_RESUME_EN;
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel_relaxed(val, base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val &= ~VBUS_WAKEUP_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
+ val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
+ val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN);
+ writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
+
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
@@ -605,31 +644,51 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
utmi_phy_clk_disable(phy);
- if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ /* PHY won't resume if reset is asserted */
+ if (!phy->wakeup_enabled) {
val = readl_relaxed(base + USB_SUSP_CTRL);
- val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
- val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
+ val |= UTMIP_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl_relaxed(base + USB_SUSP_CTRL);
- val |= UTMIP_RESET;
- writel_relaxed(val, base + USB_SUSP_CTRL);
-
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
- val = readl_relaxed(base + UTMIP_XCVR_CFG0);
- val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
- UTMIP_FORCE_PDZI_POWERDOWN;
- writel_relaxed(val, base + UTMIP_XCVR_CFG0);
+ if (!phy->wakeup_enabled) {
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
+ val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
+ UTMIP_FORCE_PDZI_POWERDOWN;
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
+ }
val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
writel_relaxed(val, base + UTMIP_XCVR_CFG1);
+ if (phy->wakeup_enabled) {
+ val = readl_relaxed(base + USB_SUSP_CTRL);
+ val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
+ val |= USB_WAKEUP_DEBOUNCE_COUNT(5);
+ val |= USB_WAKE_ON_RESUME_EN;
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+
+ /*
+ * Ask VBUS sensor to generate wake event once cable is
+ * connected.
+ */
+ if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val |= VBUS_WAKEUP_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
+ val |= A_VBUS_VLD_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
+ }
+ }
+
return utmip_pad_power_off(phy);
}
@@ -765,6 +824,15 @@ static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
usleep_range(5000, 6000);
clk_disable_unprepare(phy->clk);
+ /*
+ * Wakeup currently unimplemented for ULPI, thus PHY needs to be
+ * force-resumed.
+ */
+ if (WARN_ON_ONCE(phy->wakeup_enabled)) {
+ ulpi_phy_power_on(phy);
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -784,6 +852,9 @@ static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
phy->powered_on = true;
+ /* Let PHY settle down */
+ usleep_range(2000, 2500);
+
return 0;
}
@@ -824,6 +895,15 @@ static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
phy->freq = NULL;
}
+static int tegra_usb_phy_set_wakeup(struct usb_phy *u_phy, bool enable)
+{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+ phy->wakeup_enabled = enable;
+
+ return 0;
+}
+
static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
@@ -1195,6 +1275,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->u_phy.dev = &pdev->dev;
tegra_phy->u_phy.init = tegra_usb_phy_init;
tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+ tegra_phy->u_phy.set_wakeup = tegra_usb_phy_set_wakeup;
tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ac9a81ae8216..e6fa13701808 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a21ff5ab6df9..de5c01257060 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -633,6 +633,15 @@ config USB_SERIAL_UPD78F0730
To compile this driver as a module, choose M here: the
module will be called upd78f0730.
+config USB_SERIAL_XR
+ tristate "USB MaxLinear/Exar USB to Serial driver"
+ help
+ Say Y here if you want to use MaxLinear/Exar USB to Serial converter
+ devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xr_serial.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 50c53aed787a..c7bb1a88173e 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -61,4 +61,5 @@ obj-$(CONFIG_USB_SERIAL_UPD78F0730) += upd78f0730.o
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WISHBONE) += wishbone-serial.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
+obj-$(CONFIG_USB_SERIAL_XR) += xr_serial.o
obj-$(CONFIG_USB_SERIAL_XSENS_MT) += xsens_mt.o
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 71a9206ea1e2..b9bedfe9bd09 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -178,15 +178,13 @@ static int ark3116_port_probe(struct usb_serial_port *port)
return 0;
}
-static int ark3116_port_remove(struct usb_serial_port *port)
+static void ark3116_port_remove(struct usb_serial_port *port)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* device is closed, so URBs and DMA should be down */
mutex_destroy(&priv->hw_lock);
kfree(priv);
-
- return 0;
}
static void ark3116_set_termios(struct tty_struct *tty,
@@ -719,9 +717,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
* hardware bug or something.
*
* According to a patch provided here
- * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
- * as an IrDA dongle. Since I do not have such a thing, I could not
- * investigate that aspect. However, I can speculate ;-).
+ * https://lore.kernel.org/lkml/200907261419.50702.linux@rainbow-software.org
+ * the ARK3116 can also be used as an IrDA dongle. Since I do not have
+ * such a thing, I could not investigate that aspect. However, I can
+ * speculate ;-).
*
* - IrDA encodes data differently than RS232. Most likely, one of
* the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 9bb123ab9bc9..ed9193f3bb1a 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -37,7 +37,7 @@
/* function prototypes for a Belkin USB Serial Adapter F5U103 */
static int belkin_sa_port_probe(struct usb_serial_port *port);
-static int belkin_sa_port_remove(struct usb_serial_port *port);
+static void belkin_sa_port_remove(struct usb_serial_port *port);
static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void belkin_sa_close(struct usb_serial_port *port);
@@ -134,14 +134,12 @@ static int belkin_sa_port_probe(struct usb_serial_port *port)
return 0;
}
-static int belkin_sa_port_remove(struct usb_serial_port *port)
+static void belkin_sa_port_remove(struct usb_serial_port *port)
{
struct belkin_sa_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int belkin_sa_open(struct tty_struct *tty,
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index eb0195cf37dd..7133818a58b9 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -16,19 +16,13 @@
static int usb_serial_device_match(struct device *dev,
struct device_driver *drv)
{
- struct usb_serial_driver *driver;
- const struct usb_serial_port *port;
+ const struct usb_serial_port *port = to_usb_serial_port(dev);
+ struct usb_serial_driver *driver = to_usb_serial_driver(drv);
/*
* drivers are already assigned to ports in serial_probe so it's
* a simple check here.
*/
- port = to_usb_serial_port(dev);
- if (!port)
- return 0;
-
- driver = to_usb_serial_driver(drv);
-
if (driver == port->serial->type)
return 1;
@@ -37,16 +31,12 @@ static int usb_serial_device_match(struct device *dev,
static int usb_serial_device_probe(struct device *dev)
{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver;
- struct usb_serial_port *port;
struct device *tty_dev;
int retval = 0;
int minor;
- port = to_usb_serial_port(dev);
- if (!port)
- return -ENODEV;
-
/* make sure suspend/resume doesn't race against port_probe */
retval = usb_autopm_get_interface(port->serial->interface);
if (retval)
@@ -86,16 +76,11 @@ err_autopm_put:
static int usb_serial_device_remove(struct device *dev)
{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver;
- struct usb_serial_port *port;
- int retval = 0;
int minor;
int autopm_err;
- port = to_usb_serial_port(dev);
- if (!port)
- return -ENODEV;
-
/*
* Make sure suspend/resume doesn't race against port_remove.
*
@@ -109,7 +94,7 @@ static int usb_serial_device_remove(struct device *dev)
driver = port->serial->type;
if (driver->port_remove)
- retval = driver->port_remove(port);
+ driver->port_remove(port);
dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
driver->description, minor);
@@ -117,7 +102,7 @@ static int usb_serial_device_remove(struct device *dev)
if (!autopm_err)
usb_autopm_put_interface(port->serial->interface);
- return retval;
+ return 0;
}
static ssize_t new_id_store(struct device_driver *driver,
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 28deaaec581f..8d997b71056f 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -419,14 +419,12 @@ error: kfree(priv);
return r;
}
-static int ch341_port_remove(struct usb_serial_port *port)
+static void ch341_port_remove(struct usb_serial_port *port)
{
struct ch341_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int ch341_carrier_raised(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbb10dfc56e3..9e1c609792fb 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -3,6 +3,7 @@
* Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
+ * Copyright (C) 2010-2021 Johan Hovold (johan@kernel.org)
*
* Support to set flow control line levels using TIOCMGET and TIOCMSET
* thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow
@@ -16,9 +17,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/usb.h>
-#include <linux/uaccess.h>
#include <linux/usb/serial.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
@@ -45,7 +44,7 @@ static int cp210x_attach(struct usb_serial *);
static void cp210x_disconnect(struct usb_serial *);
static void cp210x_release(struct usb_serial *);
static int cp210x_port_probe(struct usb_serial_port *);
-static int cp210x_port_remove(struct usb_serial_port *);
+static void cp210x_port_remove(struct usb_serial_port *);
static void cp210x_dtr_rts(struct usb_serial_port *port, int on);
static void cp210x_process_read_urb(struct urb *urb);
static void cp210x_enable_event_mode(struct usb_serial_port *port);
@@ -61,6 +60,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -201,6 +201,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
@@ -266,7 +267,12 @@ struct cp210x_port_private {
u8 bInterfaceNumber;
bool event_mode;
enum cp210x_event_state event_state;
- u8 lsr;
+ u8 lsr;
+
+ struct mutex mutex;
+ bool crtscts;
+ bool dtr;
+ bool rts;
};
static struct usb_serial_driver cp210x_device = {
@@ -377,6 +383,16 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CONTROL_WRITE_DTR 0x0100
#define CONTROL_WRITE_RTS 0x0200
+/* CP210X_(GET|SET)_CHARS */
+struct cp210x_special_chars {
+ u8 bEofChar;
+ u8 bErrorChar;
+ u8 bBreakChar;
+ u8 bEventChar;
+ u8 bXonChar;
+ u8 bXoffChar;
+};
+
/* CP210X_VENDOR_SPECIFIC values */
#define CP210X_READ_2NCONFIG 0x000E
#define CP210X_READ_LATCH 0x00C2
@@ -435,17 +451,14 @@ struct cp210x_flow_ctl {
/* cp210x_flow_ctl::ulControlHandshake */
#define CP210X_SERIAL_DTR_MASK GENMASK(1, 0)
-#define CP210X_SERIAL_DTR_SHIFT(_mode) (_mode)
+#define CP210X_SERIAL_DTR_INACTIVE (0 << 0)
+#define CP210X_SERIAL_DTR_ACTIVE (1 << 0)
+#define CP210X_SERIAL_DTR_FLOW_CTL (2 << 0)
#define CP210X_SERIAL_CTS_HANDSHAKE BIT(3)
#define CP210X_SERIAL_DSR_HANDSHAKE BIT(4)
#define CP210X_SERIAL_DCD_HANDSHAKE BIT(5)
#define CP210X_SERIAL_DSR_SENSITIVITY BIT(6)
-/* values for cp210x_flow_ctl::ulControlHandshake::CP210X_SERIAL_DTR_MASK */
-#define CP210X_SERIAL_DTR_INACTIVE 0
-#define CP210X_SERIAL_DTR_ACTIVE 1
-#define CP210X_SERIAL_DTR_FLOW_CTL 2
-
/* cp210x_flow_ctl::ulFlowReplace */
#define CP210X_SERIAL_AUTO_TRANSMIT BIT(0)
#define CP210X_SERIAL_AUTO_RECEIVE BIT(1)
@@ -453,14 +466,11 @@ struct cp210x_flow_ctl {
#define CP210X_SERIAL_NULL_STRIPPING BIT(3)
#define CP210X_SERIAL_BREAK_CHAR BIT(4)
#define CP210X_SERIAL_RTS_MASK GENMASK(7, 6)
-#define CP210X_SERIAL_RTS_SHIFT(_mode) (_mode << 6)
+#define CP210X_SERIAL_RTS_INACTIVE (0 << 6)
+#define CP210X_SERIAL_RTS_ACTIVE (1 << 6)
+#define CP210X_SERIAL_RTS_FLOW_CTL (2 << 6)
#define CP210X_SERIAL_XOFF_CONTINUE BIT(31)
-/* values for cp210x_flow_ctl::ulFlowReplace::CP210X_SERIAL_RTS_MASK */
-#define CP210X_SERIAL_RTS_INACTIVE 0
-#define CP210X_SERIAL_RTS_ACTIVE 1
-#define CP210X_SERIAL_RTS_FLOW_CTL 2
-
/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */
struct cp210x_pin_mode {
u8 eci;
@@ -664,16 +674,13 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
kfree(dmabuf);
- if (result == bufsize) {
- result = 0;
- } else {
+ if (result < 0) {
dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
req, bufsize, result);
- if (result >= 0)
- result = -EIO;
+ return result;
}
- return result;
+ return 0;
}
/*
@@ -710,17 +717,14 @@ static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
kfree(dmabuf);
- if (result == bufsize) {
- result = 0;
- } else {
+ if (result < 0) {
dev_err(&serial->interface->dev,
"failed to set vendor val 0x%04x size %d: %d\n", val,
bufsize, result);
- if (result >= 0)
- result = -EIO;
+ return result;
}
- return result;
+ return 0;
}
#endif
@@ -1074,30 +1078,80 @@ static void cp210x_disable_event_mode(struct usb_serial_port *port)
port_priv->event_mode = false;
}
+static int cp210x_set_chars(struct usb_serial_port *port,
+ struct cp210x_special_chars *chars)
+{
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ void *dmabuf;
+ int result;
+
+ dmabuf = kmemdup(chars, sizeof(*chars), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ CP210X_SET_CHARS, REQTYPE_HOST_TO_INTERFACE, 0,
+ port_priv->bInterfaceNumber,
+ dmabuf, sizeof(*chars), USB_CTRL_SET_TIMEOUT);
+
+ kfree(dmabuf);
+
+ if (result < 0) {
+ dev_err(&port->dev, "failed to set special chars: %d\n", result);
+ return result;
+ }
+
+ return 0;
+}
+
static bool cp210x_termios_change(const struct ktermios *a, const struct ktermios *b)
{
- bool iflag_change;
+ bool iflag_change, cc_change;
- iflag_change = ((a->c_iflag ^ b->c_iflag) & INPCK);
+ iflag_change = ((a->c_iflag ^ b->c_iflag) & (INPCK | IXON | IXOFF));
+ cc_change = a->c_cc[VSTART] != b->c_cc[VSTART] ||
+ a->c_cc[VSTOP] != b->c_cc[VSTOP];
- return tty_termios_hw_change(a, b) || iflag_change;
+ return tty_termios_hw_change(a, b) || iflag_change || cc_change;
}
static void cp210x_set_flow_control(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ struct cp210x_special_chars chars;
struct cp210x_flow_ctl flow_ctl;
u32 flow_repl;
u32 ctl_hs;
int ret;
- if (old_termios && C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS))
+ if (old_termios &&
+ C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
+ I_IXON(tty) == (old_termios->c_iflag & IXON) &&
+ I_IXOFF(tty) == (old_termios->c_iflag & IXOFF) &&
+ START_CHAR(tty) == old_termios->c_cc[VSTART] &&
+ STOP_CHAR(tty) == old_termios->c_cc[VSTOP]) {
return;
+ }
+
+ if (I_IXON(tty) || I_IXOFF(tty)) {
+ memset(&chars, 0, sizeof(chars));
+
+ chars.bXonChar = START_CHAR(tty);
+ chars.bXoffChar = STOP_CHAR(tty);
+
+ ret = cp210x_set_chars(port, &chars);
+ if (ret)
+ return;
+ }
+
+ mutex_lock(&port_priv->mutex);
ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
sizeof(flow_ctl));
if (ret)
- return;
+ goto out_unlock;
ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
@@ -1106,26 +1160,51 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
ctl_hs &= ~CP210X_SERIAL_DCD_HANDSHAKE;
ctl_hs &= ~CP210X_SERIAL_DSR_SENSITIVITY;
ctl_hs &= ~CP210X_SERIAL_DTR_MASK;
- ctl_hs |= CP210X_SERIAL_DTR_SHIFT(CP210X_SERIAL_DTR_ACTIVE);
+ if (port_priv->dtr)
+ ctl_hs |= CP210X_SERIAL_DTR_ACTIVE;
+ else
+ ctl_hs |= CP210X_SERIAL_DTR_INACTIVE;
+ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
if (C_CRTSCTS(tty)) {
ctl_hs |= CP210X_SERIAL_CTS_HANDSHAKE;
- flow_repl &= ~CP210X_SERIAL_RTS_MASK;
- flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL);
+ if (port_priv->rts)
+ flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL;
+ else
+ flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
+ port_priv->crtscts = true;
} else {
ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE;
- flow_repl &= ~CP210X_SERIAL_RTS_MASK;
- flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_ACTIVE);
+ if (port_priv->rts)
+ flow_repl |= CP210X_SERIAL_RTS_ACTIVE;
+ else
+ flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
+ port_priv->crtscts = false;
}
- dev_dbg(&port->dev, "%s - ulControlHandshake=0x%08x, ulFlowReplace=0x%08x\n",
- __func__, ctl_hs, flow_repl);
+ if (I_IXOFF(tty))
+ flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
+ else
+ flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+
+ if (I_IXON(tty))
+ flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
+ else
+ flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
+
+ flow_ctl.ulXonLimit = cpu_to_le32(128);
+ flow_ctl.ulXoffLimit = cpu_to_le32(128);
+
+ dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
+ ctl_hs, flow_repl);
flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
sizeof(flow_ctl));
+out_unlock:
+ mutex_unlock(&port_priv->mutex);
}
static void cp210x_set_termios(struct tty_struct *tty,
@@ -1210,28 +1289,77 @@ static int cp210x_tiocmset(struct tty_struct *tty,
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int set, unsigned int clear)
{
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ struct cp210x_flow_ctl flow_ctl;
+ u32 ctl_hs, flow_repl;
u16 control = 0;
+ int ret;
+
+ mutex_lock(&port_priv->mutex);
if (set & TIOCM_RTS) {
+ port_priv->rts = true;
control |= CONTROL_RTS;
control |= CONTROL_WRITE_RTS;
}
if (set & TIOCM_DTR) {
+ port_priv->dtr = true;
control |= CONTROL_DTR;
control |= CONTROL_WRITE_DTR;
}
if (clear & TIOCM_RTS) {
+ port_priv->rts = false;
control &= ~CONTROL_RTS;
control |= CONTROL_WRITE_RTS;
}
if (clear & TIOCM_DTR) {
+ port_priv->dtr = false;
control &= ~CONTROL_DTR;
control |= CONTROL_WRITE_DTR;
}
- dev_dbg(&port->dev, "%s - control = 0x%.4x\n", __func__, control);
+ /*
+ * Use SET_FLOW to set DTR and enable/disable auto-RTS when hardware
+ * flow control is enabled.
+ */
+ if (port_priv->crtscts && control & CONTROL_WRITE_RTS) {
+ ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
+ sizeof(flow_ctl));
+ if (ret)
+ goto out_unlock;
+
+ ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
+ flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
+
+ ctl_hs &= ~CP210X_SERIAL_DTR_MASK;
+ if (port_priv->dtr)
+ ctl_hs |= CP210X_SERIAL_DTR_ACTIVE;
+ else
+ ctl_hs |= CP210X_SERIAL_DTR_INACTIVE;
- return cp210x_write_u16_reg(port, CP210X_SET_MHS, control);
+ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
+ if (port_priv->rts)
+ flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL;
+ else
+ flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
+
+ flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
+ flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
+
+ dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n",
+ __func__, ctl_hs, flow_repl);
+
+ ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
+ sizeof(flow_ctl));
+ } else {
+ dev_dbg(&port->dev, "%s - control = 0x%04x\n", __func__, control);
+
+ ret = cp210x_write_u16_reg(port, CP210X_SET_MHS, control);
+ }
+out_unlock:
+ mutex_unlock(&port_priv->mutex);
+
+ return ret;
}
static void cp210x_dtr_rts(struct usb_serial_port *port, int on)
@@ -1259,7 +1387,7 @@ static int cp210x_tiocmget(struct tty_struct *tty)
|((control & CONTROL_RING)? TIOCM_RI : 0)
|((control & CONTROL_DCD) ? TIOCM_CD : 0);
- dev_dbg(&port->dev, "%s - control = 0x%.2x\n", __func__, control);
+ dev_dbg(&port->dev, "%s - control = 0x%02x\n", __func__, control);
return result;
}
@@ -1708,20 +1836,19 @@ static int cp210x_port_probe(struct usb_serial_port *port)
return -ENOMEM;
port_priv->bInterfaceNumber = cp210x_interface_num(serial);
+ mutex_init(&port_priv->mutex);
usb_set_serial_port_data(port, port_priv);
return 0;
}
-static int cp210x_port_remove(struct usb_serial_port *port)
+static void cp210x_port_remove(struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv;
port_priv = usb_get_serial_port_data(port);
kfree(port_priv);
-
- return 0;
}
static void cp210x_init_max_speed(struct usb_serial *serial)
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 2e40908963da..cf389224d528 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -47,7 +47,7 @@
/* Function prototypes */
static int cyberjack_port_probe(struct usb_serial_port *port);
-static int cyberjack_port_remove(struct usb_serial_port *port);
+static void cyberjack_port_remove(struct usb_serial_port *port);
static int cyberjack_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void cyberjack_close(struct usb_serial_port *port);
@@ -120,7 +120,7 @@ static int cyberjack_port_probe(struct usb_serial_port *port)
return 0;
}
-static int cyberjack_port_remove(struct usb_serial_port *port)
+static void cyberjack_port_remove(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
@@ -128,8 +128,6 @@ static int cyberjack_port_remove(struct usb_serial_port *port)
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int cyberjack_open(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index cc028601c388..166ee2286fda 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -115,7 +115,7 @@ struct cypress_private {
static int cypress_earthmate_port_probe(struct usb_serial_port *port);
static int cypress_hidcom_port_probe(struct usb_serial_port *port);
static int cypress_ca42v2_port_probe(struct usb_serial_port *port);
-static int cypress_port_remove(struct usb_serial_port *port);
+static void cypress_port_remove(struct usb_serial_port *port);
static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port);
static void cypress_close(struct usb_serial_port *port);
static void cypress_dtr_rts(struct usb_serial_port *port, int on);
@@ -564,7 +564,7 @@ static int cypress_ca42v2_port_probe(struct usb_serial_port *port)
return 0;
}
-static int cypress_port_remove(struct usb_serial_port *port)
+static void cypress_port_remove(struct usb_serial_port *port)
{
struct cypress_private *priv;
@@ -572,8 +572,6 @@ static int cypress_port_remove(struct usb_serial_port *port)
kfifo_free(&priv->write_fifo);
kfree(priv);
-
- return 0;
}
static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 0ecd5316d85f..8b2f06539f2c 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -233,7 +233,7 @@ static int digi_startup(struct usb_serial *serial);
static void digi_disconnect(struct usb_serial *serial);
static void digi_release(struct usb_serial *serial);
static int digi_port_probe(struct usb_serial_port *port);
-static int digi_port_remove(struct usb_serial_port *port);
+static void digi_port_remove(struct usb_serial_port *port);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
@@ -1281,14 +1281,12 @@ static int digi_port_probe(struct usb_serial_port *port)
return digi_port_init(port, port->port_number);
}
-static int digi_port_remove(struct usb_serial_port *port)
+static void digi_port_remove(struct usb_serial_port *port)
{
struct digi_port *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static void digi_read_bulk_callback(struct urb *urb)
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 0c7eacc630e0..6a8f39147d8e 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -192,13 +192,9 @@ static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val)
tmp,
sizeof(val),
USB_CTRL_SET_TIMEOUT);
- if (status != sizeof(val)) {
+ if (status < 0) {
dev_err(&port->dev, "%s failed status: %d\n", __func__, status);
-
- if (status < 0)
- status = usb_translate_errors(status);
- else
- status = -EIO;
+ status = usb_translate_errors(status);
} else {
status = 0;
}
@@ -886,10 +882,6 @@ static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
status = usb_translate_errors(status);
if (status == -EIO)
continue;
- } else if (status != size) {
- /* Retry on short transfers */
- status = -EIO;
- continue;
} else {
status = 0;
}
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 5661fd03e545..a763b362f081 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -235,11 +235,9 @@ static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data)
USB_TYPE_VENDOR | USB_DIR_OUT,
reg, 0, tmp, sizeof(u8),
F81534_USB_TIMEOUT);
- if (status > 0) {
+ if (status == sizeof(u8)) {
status = 0;
break;
- } else if (status == 0) {
- status = -EIO;
}
}
@@ -1432,12 +1430,11 @@ static int f81534_port_probe(struct usb_serial_port *port)
return f81534_set_port_output_pin(port);
}
-static int f81534_port_remove(struct usb_serial_port *port)
+static void f81534_port_remove(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
flush_work(&port_priv->lsr_work);
- return 0;
}
static int f81534_tiocmget(struct tty_struct *tty)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 94398f89e600..c867592477c9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1069,7 +1069,7 @@ static const char *ftdi_chip_name[] = {
static int ftdi_sio_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int ftdi_sio_port_probe(struct usb_serial_port *port);
-static int ftdi_sio_port_remove(struct usb_serial_port *port);
+static void ftdi_sio_port_remove(struct usb_serial_port *port);
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
static void ftdi_process_read_urb(struct urb *urb);
@@ -1153,13 +1153,13 @@ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base)
divisor = divisor3 >> 3;
divisor3 &= 0x7;
if (divisor3 == 1)
- divisor |= 0xc000;
+ divisor |= 0xc000; /* +0.125 */
else if (divisor3 >= 4)
- divisor |= 0x4000;
+ divisor |= 0x4000; /* +0.5 */
else if (divisor3 != 0)
- divisor |= 0x8000;
+ divisor |= 0x8000; /* +0.25 */
else if (divisor == 1)
- divisor = 0; /* special case for maximum baud rate */
+ divisor = 0; /* special case for maximum baud rate */
return divisor;
}
@@ -1177,9 +1177,9 @@ static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base)
divisor = divisor3 >> 3;
divisor |= (u32)divfrac[divisor3 & 0x7] << 14;
/* Deal with special cases for highest baud rates. */
- if (divisor == 1)
+ if (divisor == 1) /* 1.0 */
divisor = 0;
- else if (divisor == 0x4001)
+ else if (divisor == 0x4001) /* 1.5 */
divisor = 1;
return divisor;
}
@@ -1201,9 +1201,9 @@ static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
divisor = divisor3 >> 3;
divisor |= (u32)divfrac[divisor3 & 0x7] << 14;
/* Deal with special cases for highest baud rates. */
- if (divisor == 1)
+ if (divisor == 1) /* 1.0 */
divisor = 0;
- else if (divisor == 0x4001)
+ else if (divisor == 0x4001) /* 1.5 */
divisor = 1;
/*
* Set this bit to turn off a divide by 2.5 on baud rate generator
@@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
index_value = get_ftdi_divisor(tty, port);
value = (u16)index_value;
index = (u16)(index_value >> 16);
- if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
- (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
+ if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H || priv->chip_type == FT232H ||
+ priv->chip_type == FTX) {
/* Probably the BM type needs the MSB of the encoded fractional
* divider also moved like for the chips above. Any infos? */
index = (u16)((index << 8) | priv->interface);
@@ -2399,7 +2400,7 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
return 0;
}
-static int ftdi_sio_port_remove(struct usb_serial_port *port)
+static void ftdi_sio_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
@@ -2408,8 +2409,6 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
remove_sysfs_attrs(port);
kfree(priv);
-
- return 0;
}
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index c02c19bb1183..50e8bdc77e71 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1401,7 +1401,7 @@ err_free:
}
-static int garmin_port_remove(struct usb_serial_port *port)
+static void garmin_port_remove(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1409,7 +1409,6 @@ static int garmin_port_remove(struct usb_serial_port *port)
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
kfree(garmin_data_p);
- return 0;
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index ba5d8df69518..a493670c06e6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -293,7 +293,7 @@ static int edge_startup(struct usb_serial *serial);
static void edge_disconnect(struct usb_serial *serial);
static void edge_release(struct usb_serial *serial);
static int edge_port_probe(struct usb_serial_port *port);
-static int edge_port_remove(struct usb_serial_port *port);
+static void edge_port_remove(struct usb_serial_port *port);
/* function prototypes for all of our local functions */
@@ -3078,14 +3078,12 @@ static int edge_port_probe(struct usb_serial_port *port)
return 0;
}
-static int edge_port_remove(struct usb_serial_port *port)
+static void edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
kfree(edge_port);
-
- return 0;
}
static struct usb_serial_driver edgeport_2port_device = {
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c327d4cf7928..e800547be9e0 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -266,7 +266,7 @@ static int ti_vread_sync(struct usb_device *dev, __u8 request,
if (status < 0)
return status;
if (status != size) {
- dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n",
+ dev_dbg(&dev->dev, "%s - wanted to read %d, but only read %d\n",
__func__, size, status);
return -ECOMM;
}
@@ -283,11 +283,7 @@ static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value,
value, index, data, size, timeout);
if (status < 0)
return status;
- if (status != size) {
- dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n",
- __func__, size, status);
- return -ECOMM;
- }
+
return 0;
}
@@ -2629,15 +2625,13 @@ err:
return ret;
}
-static int edge_port_remove(struct usb_serial_port *port)
+static void edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
edge_remove_sysfs_attrs(port);
kfree(edge_port);
-
- return 0;
}
/* Sysfs Attributes */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..093afd67a664 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -100,7 +100,7 @@ static int iuu_port_probe(struct usb_serial_port *port)
return 0;
}
-static int iuu_port_remove(struct usb_serial_port *port)
+static void iuu_port_remove(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
@@ -108,8 +108,6 @@ static int iuu_port_remove(struct usb_serial_port *port)
kfree(priv->writebuf);
kfree(priv->buf);
kfree(priv);
-
- return 0;
}
static int iuu_tiocmset(struct tty_struct *tty,
@@ -532,23 +530,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +560,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index aa3dbce22cfb..622077dcc344 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -49,7 +49,7 @@ static int keyspan_startup(struct usb_serial *serial);
static void keyspan_disconnect(struct usb_serial *serial);
static void keyspan_release(struct usb_serial *serial);
static int keyspan_port_probe(struct usb_serial_port *port);
-static int keyspan_port_remove(struct usb_serial_port *port);
+static void keyspan_port_remove(struct usb_serial_port *port);
static int keyspan_write_room(struct tty_struct *tty);
static int keyspan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
@@ -2985,7 +2985,7 @@ err_in_buffer:
return -ENOMEM;
}
-static int keyspan_port_remove(struct usb_serial_port *port)
+static void keyspan_port_remove(struct usb_serial_port *port)
{
struct keyspan_port_private *p_priv;
int i;
@@ -3014,8 +3014,6 @@ static int keyspan_port_remove(struct usb_serial_port *port)
kfree(p_priv->in_buffer[i]);
kfree(p_priv);
-
- return 0;
}
/* Structs for the devices, pre and post renumeration. */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e6f933e8d25f..39b0f5f344c2 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -672,14 +672,12 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port)
return 0;
}
-static int keyspan_pda_port_remove(struct usb_serial_port *port)
+static void keyspan_pda_port_remove(struct usb_serial_port *port)
{
struct keyspan_pda_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static struct usb_serial_driver keyspan_pda_fake_device = {
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5f6b82ebccc5..f1e9628a9907 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -52,7 +52,7 @@
* Function prototypes
*/
static int klsi_105_port_probe(struct usb_serial_port *port);
-static int klsi_105_port_remove(struct usb_serial_port *port);
+static void klsi_105_port_remove(struct usb_serial_port *port);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
static void klsi_105_set_termios(struct tty_struct *tty,
@@ -231,14 +231,12 @@ static int klsi_105_port_probe(struct usb_serial_port *port)
return 0;
}
-static int klsi_105_port_remove(struct usb_serial_port *port)
+static void klsi_105_port_remove(struct usb_serial_port *port)
{
struct klsi_105_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 49aacb0a327c..a9bc546626ab 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -48,7 +48,7 @@
/* Function prototypes */
static int kobil_port_probe(struct usb_serial_port *probe);
-static int kobil_port_remove(struct usb_serial_port *probe);
+static void kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -143,14 +143,12 @@ static int kobil_port_probe(struct usb_serial_port *port)
}
-static int kobil_port_remove(struct usb_serial_port *port)
+static void kobil_port_remove(struct usb_serial_port *port)
{
struct kobil_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static void kobil_init_termios(struct tty_struct *tty)
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 7887c312d9a9..ecd5b921e374 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -39,7 +39,7 @@
* Function prototypes
*/
static int mct_u232_port_probe(struct usb_serial_port *port);
-static int mct_u232_port_remove(struct usb_serial_port *remove);
+static void mct_u232_port_remove(struct usb_serial_port *remove);
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port);
static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
@@ -400,14 +400,12 @@ static int mct_u232_port_probe(struct usb_serial_port *port)
return 0;
}
-static int mct_u232_port_remove(struct usb_serial_port *port)
+static void mct_u232_port_remove(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index e63cea02cfd8..0bfe4459c37f 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -256,14 +256,12 @@ static int metrousb_port_probe(struct usb_serial_port *port)
return 0;
}
-static int metrousb_port_remove(struct usb_serial_port *port)
+static void metrousb_port_remove(struct usb_serial_port *port)
{
struct metrousb_private *metro_priv;
metro_priv = usb_get_serial_port_data(port);
kfree(metro_priv);
-
- return 0;
}
static void metrousb_throttle(struct tty_struct *tty)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 41ee2984a0df..701dfb32b129 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -215,8 +215,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
int status;
buf = kmalloc(1, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ *data = 0;
return -ENOMEM;
+ }
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, buf, 1, MOS_WDR_TIMEOUT);
@@ -1092,8 +1094,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
- if (!urb->transfer_buffer)
+ if (!urb->transfer_buffer) {
+ bytes_sent = -ENOMEM;
goto exit;
+ }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
@@ -1756,14 +1760,12 @@ static int mos7720_port_probe(struct usb_serial_port *port)
return 0;
}
-static int mos7720_port_remove(struct usb_serial_port *port)
+static void mos7720_port_remove(struct usb_serial_port *port)
{
struct moschip_port *mos7720_port;
mos7720_port = usb_get_serial_port_data(port);
kfree(mos7720_port);
-
- return 0;
}
static struct usb_serial_driver moschip7720_2port_driver = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 23f91d658cb4..1bf0d066f55a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -883,8 +883,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
- if (!urb->transfer_buffer)
+ if (!urb->transfer_buffer) {
+ bytes_sent = -ENOMEM;
goto exit;
+ }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
@@ -1743,7 +1745,7 @@ error:
return status;
}
-static int mos7840_port_remove(struct usb_serial_port *port)
+static void mos7840_port_remove(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
@@ -1760,8 +1762,6 @@ static int mos7840_port_remove(struct usb_serial_port *port)
}
kfree(mos7840_port);
-
- return 0;
}
static struct usb_serial_driver moschip7840_4port_device = {
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 5d38c2a0f590..eb45a9b0005c 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -261,13 +261,6 @@ static int mxuport_send_ctrl_data_urb(struct usb_serial *serial,
return status;
}
- if (status != size) {
- dev_err(&serial->interface->dev,
- "%s - short write (%d / %zd)\n",
- __func__, status, size);
- return -EIO;
- }
-
return 0;
}
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 5b6e982a9376..83c62f920c50 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -36,7 +36,7 @@ static int omninet_prepare_write_buffer(struct usb_serial_port *port,
static int omninet_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds);
static int omninet_port_probe(struct usb_serial_port *port);
-static int omninet_port_remove(struct usb_serial_port *port);
+static void omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
@@ -121,14 +121,12 @@ static int omninet_port_probe(struct usb_serial_port *port)
return 0;
}
-static int omninet_port_remove(struct usb_serial_port *port)
+static void omninet_port_remove(struct usb_serial_port *port)
{
struct omninet_data *od;
od = usb_get_serial_port_data(port);
kfree(od);
-
- return 0;
}
#define OMNINET_HEADERLEN 4
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 0af76800bd78..eecb72aef83e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -385,13 +385,11 @@ static int opticon_port_probe(struct usb_serial_port *port)
return 0;
}
-static int opticon_port_remove(struct usb_serial_port *port)
+static void opticon_port_remove(struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static struct usb_serial_driver opticon_device = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..c6969ca72839 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1117,6 +1119,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -1565,7 +1569,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
@@ -1912,6 +1917,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2057,6 +2066,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 8151dd7a45e8..65cd0341fa78 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -132,7 +132,7 @@ static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int oti6858_port_probe(struct usb_serial_port *port);
-static int oti6858_port_remove(struct usb_serial_port *port);
+static void oti6858_port_remove(struct usb_serial_port *port);
/* device info */
static struct usb_serial_driver oti6858_device = {
@@ -344,14 +344,12 @@ static int oti6858_port_probe(struct usb_serial_port *port)
return 0;
}
-static int oti6858_port_remove(struct usb_serial_port *port)
+static void oti6858_port_remove(struct usb_serial_port *port)
{
struct oti6858_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index be8067017eaa..eed9acd1ae08 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -183,6 +183,7 @@ struct pl2303_type_data {
speed_t max_baud_rate;
unsigned long quirks;
unsigned int no_autoxonxoff:1;
+ unsigned int no_divisors:1;
};
struct pl2303_serial_private {
@@ -209,6 +210,7 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
},
[TYPE_HXN] = {
.max_baud_rate = 12000000,
+ .no_divisors = true,
},
};
@@ -448,13 +450,11 @@ static int pl2303_port_probe(struct usb_serial_port *port)
return 0;
}
-static int pl2303_port_remove(struct usb_serial_port *port)
+static void pl2303_port_remove(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
@@ -571,8 +571,12 @@ static void pl2303_encode_baud_rate(struct tty_struct *tty,
baud = min_t(speed_t, baud, spriv->type->max_baud_rate);
/*
* Use direct method for supported baud rates, otherwise use divisors.
+ * Newer chip types do not support divisor encoding.
*/
- baud_sup = pl2303_get_supported_baud_rate(baud);
+ if (spriv->type->no_divisors)
+ baud_sup = baud;
+ else
+ baud_sup = pl2303_get_supported_baud_rate(baud);
if (baud == baud_sup)
baud = pl2303_encode_baud_rate_direct(buf, baud);
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 872d1bc86ab4..599dcb2e374d 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -727,7 +727,7 @@ err_buf:
return -ENOMEM;
}
-static int qt2_port_remove(struct usb_serial_port *port)
+static void qt2_port_remove(struct usb_serial_port *port)
{
struct qt2_port_private *port_priv;
@@ -735,8 +735,6 @@ static int qt2_port_remove(struct usb_serial_port *port)
usb_free_urb(port_priv->write_urb);
kfree(port_priv->write_buffer);
kfree(port_priv);
-
- return 0;
}
static int qt2_tiocmget(struct tty_struct *tty)
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 57fc3c31712e..54e16ffc30a0 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -901,15 +901,13 @@ static int sierra_port_probe(struct usb_serial_port *port)
return 0;
}
-static int sierra_port_remove(struct usb_serial_port *port)
+static void sierra_port_remove(struct usb_serial_port *port)
{
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
usb_set_serial_port_data(port, NULL);
kfree(portdata);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 3bac55bd9bd9..7039dc918827 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -169,14 +169,12 @@ static int spcp8x5_port_probe(struct usb_serial_port *port)
return 0;
}
-static int spcp8x5_port_remove(struct usb_serial_port *port)
+static void spcp8x5_port_remove(struct usb_serial_port *port)
{
struct spcp8x5_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int spcp8x5_set_ctrl_line(struct usb_serial_port *port, u8 mcr)
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 7d39d35e52a1..89fdc5c19285 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -366,14 +366,12 @@ static int ssu100_port_probe(struct usb_serial_port *port)
return 0;
}
-static int ssu100_port_remove(struct usb_serial_port *port)
+static void ssu100_port_remove(struct usb_serial_port *port)
{
struct ssu100_port_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static int ssu100_tiocmget(struct tty_struct *tty)
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 6ca24e86f686..d7f73ad6e778 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -160,13 +160,11 @@ static int symbol_port_probe(struct usb_serial_port *port)
return 0;
}
-static int symbol_port_remove(struct usb_serial_port *port)
+static void symbol_port_remove(struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_port_data(port);
kfree(priv);
-
- return 0;
}
static struct usb_serial_driver symbol_device = {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 73075b9351c5..7252b0ce75a6 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -303,7 +303,7 @@ struct ti_device {
static int ti_startup(struct usb_serial *serial);
static void ti_release(struct usb_serial *serial);
static int ti_port_probe(struct usb_serial_port *port);
-static int ti_port_remove(struct usb_serial_port *port);
+static void ti_port_remove(struct usb_serial_port *port);
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ti_close(struct usb_serial_port *port);
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -629,14 +629,12 @@ static int ti_port_probe(struct usb_serial_port *port)
return 0;
}
-static int ti_port_remove(struct usb_serial_port *port)
+static void ti_port_remove(struct usb_serial_port *port)
{
struct ti_port *tport;
tport = usb_get_serial_port_data(port);
kfree(tport);
-
- return 0;
}
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 0a2268c479af..26d7b003b7e3 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -145,14 +145,11 @@ static int upd78f0730_send_ctl(struct usb_serial_port *port,
kfree(buf);
- if (res != size) {
+ if (res < 0) {
struct device *dev = &port->dev;
dev_err(dev, "failed to send control request %02x: %d\n",
*(u8 *)data, res);
- /* The maximum expected length of a transfer is 6 bytes */
- if (res >= 0)
- res = -EIO;
return res;
}
@@ -174,15 +171,13 @@ static int upd78f0730_port_probe(struct usb_serial_port *port)
return 0;
}
-static int upd78f0730_port_remove(struct usb_serial_port *port)
+static void upd78f0730_port_remove(struct usb_serial_port *port)
{
struct upd78f0730_port_private *private;
private = usb_get_serial_port_data(port);
mutex_destroy(&private->lock);
kfree(private);
-
- return 0;
}
static int upd78f0730_tiocmget(struct tty_struct *tty)
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 934e9361cf6b..79dafd98e0a1 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -10,7 +10,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
extern void usb_wwan_close(struct usb_serial_port *port);
extern int usb_wwan_port_probe(struct usb_serial_port *port);
-extern int usb_wwan_port_remove(struct usb_serial_port *port);
+extern void usb_wwan_port_remove(struct usb_serial_port *port);
extern int usb_wwan_write_room(struct tty_struct *tty);
extern int usb_wwan_tiocmget(struct tty_struct *tty);
extern int usb_wwan_tiocmset(struct tty_struct *tty,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 4b9845807bee..46d46a4f99c9 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -544,7 +544,7 @@ bail_out_error:
}
EXPORT_SYMBOL_GPL(usb_wwan_port_probe);
-int usb_wwan_port_remove(struct usb_serial_port *port)
+void usb_wwan_port_remove(struct usb_serial_port *port)
{
int i;
struct usb_wwan_port_private *portdata;
@@ -562,8 +562,6 @@ int usb_wwan_port_remove(struct usb_serial_port *port)
}
kfree(portdata);
-
- return 0;
}
EXPORT_SYMBOL(usb_wwan_port_remove);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index ca3bd58f2025..ccfd5ed652cd 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -79,7 +79,7 @@ static int whiteheat_firmware_attach(struct usb_serial *serial);
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_port_probe(struct usb_serial_port *port);
-static int whiteheat_port_remove(struct usb_serial_port *port);
+static void whiteheat_port_remove(struct usb_serial_port *port);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void whiteheat_close(struct usb_serial_port *port);
@@ -345,14 +345,12 @@ static int whiteheat_port_probe(struct usb_serial_port *port)
return 0;
}
-static int whiteheat_port_remove(struct usb_serial_port *port)
+static void whiteheat_port_remove(struct usb_serial_port *port)
{
struct whiteheat_private *info;
info = usb_get_serial_port_data(port);
kfree(info);
-
- return 0;
}
static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
new file mode 100644
index 000000000000..483d07dee19d
--- /dev/null
+++ b/drivers/usb/serial/xr_serial.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MaxLinear/Exar USB to Serial driver
+ *
+ * Copyright (c) 2020 Manivannan Sadhasivam <mani@kernel.org>
+ *
+ * Based on the initial driver written by Patong Yang:
+ *
+ * https://lore.kernel.org/r/20180404070634.nhspvmxcjwfgjkcv@advantechmxl-desktop
+ *
+ * Copyright (c) 2018 Patong Yang <patong.mxl@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+struct xr_txrx_clk_mask {
+ u16 tx;
+ u16 rx0;
+ u16 rx1;
+};
+
+#define XR_INT_OSC_HZ 48000000U
+#define XR21V141X_MIN_SPEED 46U
+#define XR21V141X_MAX_SPEED XR_INT_OSC_HZ
+
+/* USB Requests */
+#define XR21V141X_SET_REQ 0
+#define XR21V141X_GET_REQ 1
+
+#define XR21V141X_CLOCK_DIVISOR_0 0x04
+#define XR21V141X_CLOCK_DIVISOR_1 0x05
+#define XR21V141X_CLOCK_DIVISOR_2 0x06
+#define XR21V141X_TX_CLOCK_MASK_0 0x07
+#define XR21V141X_TX_CLOCK_MASK_1 0x08
+#define XR21V141X_RX_CLOCK_MASK_0 0x09
+#define XR21V141X_RX_CLOCK_MASK_1 0x0a
+
+/* XR21V141X register blocks */
+#define XR21V141X_UART_REG_BLOCK 0
+#define XR21V141X_UM_REG_BLOCK 4
+#define XR21V141X_UART_CUSTOM_BLOCK 0x66
+
+/* XR21V141X UART Manager Registers */
+#define XR21V141X_UM_FIFO_ENABLE_REG 0x10
+#define XR21V141X_UM_ENABLE_TX_FIFO 0x01
+#define XR21V141X_UM_ENABLE_RX_FIFO 0x02
+
+#define XR21V141X_UM_RX_FIFO_RESET 0x18
+#define XR21V141X_UM_TX_FIFO_RESET 0x1c
+
+#define XR21V141X_UART_ENABLE_TX 0x1
+#define XR21V141X_UART_ENABLE_RX 0x2
+
+#define XR21V141X_UART_MODE_RI BIT(0)
+#define XR21V141X_UART_MODE_CD BIT(1)
+#define XR21V141X_UART_MODE_DSR BIT(2)
+#define XR21V141X_UART_MODE_DTR BIT(3)
+#define XR21V141X_UART_MODE_CTS BIT(4)
+#define XR21V141X_UART_MODE_RTS BIT(5)
+
+#define XR21V141X_UART_BREAK_ON 0xff
+#define XR21V141X_UART_BREAK_OFF 0
+
+#define XR21V141X_UART_DATA_MASK GENMASK(3, 0)
+#define XR21V141X_UART_DATA_7 0x7
+#define XR21V141X_UART_DATA_8 0x8
+
+#define XR21V141X_UART_PARITY_MASK GENMASK(6, 4)
+#define XR21V141X_UART_PARITY_SHIFT 4
+#define XR21V141X_UART_PARITY_NONE (0x0 << XR21V141X_UART_PARITY_SHIFT)
+#define XR21V141X_UART_PARITY_ODD (0x1 << XR21V141X_UART_PARITY_SHIFT)
+#define XR21V141X_UART_PARITY_EVEN (0x2 << XR21V141X_UART_PARITY_SHIFT)
+#define XR21V141X_UART_PARITY_MARK (0x3 << XR21V141X_UART_PARITY_SHIFT)
+#define XR21V141X_UART_PARITY_SPACE (0x4 << XR21V141X_UART_PARITY_SHIFT)
+
+#define XR21V141X_UART_STOP_MASK BIT(7)
+#define XR21V141X_UART_STOP_SHIFT 7
+#define XR21V141X_UART_STOP_1 (0x0 << XR21V141X_UART_STOP_SHIFT)
+#define XR21V141X_UART_STOP_2 (0x1 << XR21V141X_UART_STOP_SHIFT)
+
+#define XR21V141X_UART_FLOW_MODE_NONE 0x0
+#define XR21V141X_UART_FLOW_MODE_HW 0x1
+#define XR21V141X_UART_FLOW_MODE_SW 0x2
+
+#define XR21V141X_UART_MODE_GPIO_MASK GENMASK(2, 0)
+#define XR21V141X_UART_MODE_RTS_CTS 0x1
+#define XR21V141X_UART_MODE_DTR_DSR 0x2
+#define XR21V141X_UART_MODE_RS485 0x3
+#define XR21V141X_UART_MODE_RS485_ADDR 0x4
+
+#define XR21V141X_REG_ENABLE 0x03
+#define XR21V141X_REG_FORMAT 0x0b
+#define XR21V141X_REG_FLOW_CTRL 0x0c
+#define XR21V141X_REG_XON_CHAR 0x10
+#define XR21V141X_REG_XOFF_CHAR 0x11
+#define XR21V141X_REG_LOOPBACK 0x12
+#define XR21V141X_REG_TX_BREAK 0x14
+#define XR21V141X_REG_RS845_DELAY 0x15
+#define XR21V141X_REG_GPIO_MODE 0x1a
+#define XR21V141X_REG_GPIO_DIR 0x1b
+#define XR21V141X_REG_GPIO_INT_MASK 0x1c
+#define XR21V141X_REG_GPIO_SET 0x1d
+#define XR21V141X_REG_GPIO_CLR 0x1e
+#define XR21V141X_REG_GPIO_STATUS 0x1f
+
+static int xr_set_reg(struct usb_serial_port *port, u8 block, u8 reg, u8 val)
+{
+ struct usb_serial *serial = port->serial;
+ int ret;
+
+ ret = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ XR21V141X_SET_REQ,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ val, reg | (block << 8), NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&port->dev, "Failed to set reg 0x%02x: %d\n", reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xr_get_reg(struct usb_serial_port *port, u8 block, u8 reg, u8 *val)
+{
+ struct usb_serial *serial = port->serial;
+ u8 *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(1, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ XR21V141X_GET_REQ,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg | (block << 8), dmabuf, 1,
+ USB_CTRL_GET_TIMEOUT);
+ if (ret == 1) {
+ *val = *dmabuf;
+ ret = 0;
+ } else {
+ dev_err(&port->dev, "Failed to get reg 0x%02x: %d\n", reg, ret);
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ kfree(dmabuf);
+
+ return ret;
+}
+
+static int xr_set_reg_uart(struct usb_serial_port *port, u8 reg, u8 val)
+{
+ return xr_set_reg(port, XR21V141X_UART_REG_BLOCK, reg, val);
+}
+
+static int xr_get_reg_uart(struct usb_serial_port *port, u8 reg, u8 *val)
+{
+ return xr_get_reg(port, XR21V141X_UART_REG_BLOCK, reg, val);
+}
+
+static int xr_set_reg_um(struct usb_serial_port *port, u8 reg, u8 val)
+{
+ return xr_set_reg(port, XR21V141X_UM_REG_BLOCK, reg, val);
+}
+
+/*
+ * According to datasheet, below is the recommended sequence for enabling UART
+ * module in XR21V141X:
+ *
+ * Enable Tx FIFO
+ * Enable Tx and Rx
+ * Enable Rx FIFO
+ */
+static int xr_uart_enable(struct usb_serial_port *port)
+{
+ int ret;
+
+ ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG,
+ XR21V141X_UM_ENABLE_TX_FIFO);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_REG_ENABLE,
+ XR21V141X_UART_ENABLE_TX | XR21V141X_UART_ENABLE_RX);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG,
+ XR21V141X_UM_ENABLE_TX_FIFO | XR21V141X_UM_ENABLE_RX_FIFO);
+
+ if (ret)
+ xr_set_reg_uart(port, XR21V141X_REG_ENABLE, 0);
+
+ return ret;
+}
+
+static int xr_uart_disable(struct usb_serial_port *port)
+{
+ int ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_REG_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, 0);
+
+ return ret;
+}
+
+static int xr_tiocmget(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ u8 status;
+ int ret;
+
+ ret = xr_get_reg_uart(port, XR21V141X_REG_GPIO_STATUS, &status);
+ if (ret)
+ return ret;
+
+ /*
+ * Modem control pins are active low, so reading '0' means it is active
+ * and '1' means not active.
+ */
+ ret = ((status & XR21V141X_UART_MODE_DTR) ? 0 : TIOCM_DTR) |
+ ((status & XR21V141X_UART_MODE_RTS) ? 0 : TIOCM_RTS) |
+ ((status & XR21V141X_UART_MODE_CTS) ? 0 : TIOCM_CTS) |
+ ((status & XR21V141X_UART_MODE_DSR) ? 0 : TIOCM_DSR) |
+ ((status & XR21V141X_UART_MODE_RI) ? 0 : TIOCM_RI) |
+ ((status & XR21V141X_UART_MODE_CD) ? 0 : TIOCM_CD);
+
+ return ret;
+}
+
+static int xr_tiocmset_port(struct usb_serial_port *port,
+ unsigned int set, unsigned int clear)
+{
+ u8 gpio_set = 0;
+ u8 gpio_clr = 0;
+ int ret = 0;
+
+ /* Modem control pins are active low, so set & clr are swapped */
+ if (set & TIOCM_RTS)
+ gpio_clr |= XR21V141X_UART_MODE_RTS;
+ if (set & TIOCM_DTR)
+ gpio_clr |= XR21V141X_UART_MODE_DTR;
+ if (clear & TIOCM_RTS)
+ gpio_set |= XR21V141X_UART_MODE_RTS;
+ if (clear & TIOCM_DTR)
+ gpio_set |= XR21V141X_UART_MODE_DTR;
+
+ /* Writing '0' to gpio_{set/clr} bits has no effect, so no need to do */
+ if (gpio_clr)
+ ret = xr_set_reg_uart(port, XR21V141X_REG_GPIO_CLR, gpio_clr);
+
+ if (gpio_set)
+ ret = xr_set_reg_uart(port, XR21V141X_REG_GPIO_SET, gpio_set);
+
+ return ret;
+}
+
+static int xr_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ return xr_tiocmset_port(port, set, clear);
+}
+
+static void xr_dtr_rts(struct usb_serial_port *port, int on)
+{
+ if (on)
+ xr_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0);
+ else
+ xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS);
+}
+
+static void xr_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ u8 state;
+
+ if (break_state == 0)
+ state = XR21V141X_UART_BREAK_OFF;
+ else
+ state = XR21V141X_UART_BREAK_ON;
+
+ dev_dbg(&port->dev, "Turning break %s\n",
+ state == XR21V141X_UART_BREAK_OFF ? "off" : "on");
+ xr_set_reg_uart(port, XR21V141X_REG_TX_BREAK, state);
+}
+
+/* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */
+static const struct xr_txrx_clk_mask xr21v141x_txrx_clk_masks[] = {
+ { 0x000, 0x000, 0x000 },
+ { 0x000, 0x000, 0x000 },
+ { 0x100, 0x000, 0x100 },
+ { 0x020, 0x400, 0x020 },
+ { 0x010, 0x100, 0x010 },
+ { 0x208, 0x040, 0x208 },
+ { 0x104, 0x820, 0x108 },
+ { 0x844, 0x210, 0x884 },
+ { 0x444, 0x110, 0x444 },
+ { 0x122, 0x888, 0x224 },
+ { 0x912, 0x448, 0x924 },
+ { 0x492, 0x248, 0x492 },
+ { 0x252, 0x928, 0x292 },
+ { 0x94a, 0x4a4, 0xa52 },
+ { 0x52a, 0xaa4, 0x54a },
+ { 0xaaa, 0x954, 0x4aa },
+ { 0xaaa, 0x554, 0xaaa },
+ { 0x555, 0xad4, 0x5aa },
+ { 0xb55, 0xab4, 0x55a },
+ { 0x6b5, 0x5ac, 0xb56 },
+ { 0x5b5, 0xd6c, 0x6d6 },
+ { 0xb6d, 0xb6a, 0xdb6 },
+ { 0x76d, 0x6da, 0xbb6 },
+ { 0xedd, 0xdda, 0x76e },
+ { 0xddd, 0xbba, 0xeee },
+ { 0x7bb, 0xf7a, 0xdde },
+ { 0xf7b, 0xef6, 0x7de },
+ { 0xdf7, 0xbf6, 0xf7e },
+ { 0x7f7, 0xfee, 0xefe },
+ { 0xfdf, 0xfbe, 0x7fe },
+ { 0xf7f, 0xefe, 0xffe },
+ { 0xfff, 0xffe, 0xffd },
+};
+
+static int xr_set_baudrate(struct tty_struct *tty,
+ struct usb_serial_port *port)
+{
+ u32 divisor, baud, idx;
+ u16 tx_mask, rx_mask;
+ int ret;
+
+ baud = tty->termios.c_ospeed;
+ if (!baud)
+ return 0;
+
+ baud = clamp(baud, XR21V141X_MIN_SPEED, XR21V141X_MAX_SPEED);
+ divisor = XR_INT_OSC_HZ / baud;
+ idx = ((32 * XR_INT_OSC_HZ) / baud) & 0x1f;
+ tx_mask = xr21v141x_txrx_clk_masks[idx].tx;
+
+ if (divisor & 0x01)
+ rx_mask = xr21v141x_txrx_clk_masks[idx].rx1;
+ else
+ rx_mask = xr21v141x_txrx_clk_masks[idx].rx0;
+
+ dev_dbg(&port->dev, "Setting baud rate: %u\n", baud);
+ /*
+ * XR21V141X uses fractional baud rate generator with 48MHz internal
+ * oscillator and 19-bit programmable divisor. So theoretically it can
+ * generate most commonly used baud rates with high accuracy.
+ */
+ ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_0,
+ divisor & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_1,
+ (divisor >> 8) & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_2,
+ (divisor >> 16) & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_0,
+ tx_mask & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_1,
+ (tx_mask >> 8) & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_0,
+ rx_mask & 0xff);
+ if (ret)
+ return ret;
+
+ ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_1,
+ (rx_mask >> 8) & 0xff);
+ if (ret)
+ return ret;
+
+ tty_encode_baud_rate(tty, baud, baud);
+
+ return 0;
+}
+
+static void xr_set_flow_mode(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ u8 flow, gpio_mode;
+ int ret;
+
+ ret = xr_get_reg_uart(port, XR21V141X_REG_GPIO_MODE, &gpio_mode);
+ if (ret)
+ return;
+
+ /* Set GPIO mode for controlling the pins manually by default. */
+ gpio_mode &= ~XR21V141X_UART_MODE_GPIO_MASK;
+
+ if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) {
+ dev_dbg(&port->dev, "Enabling hardware flow ctrl\n");
+ gpio_mode |= XR21V141X_UART_MODE_RTS_CTS;
+ flow = XR21V141X_UART_FLOW_MODE_HW;
+ } else if (I_IXON(tty)) {
+ u8 start_char = START_CHAR(tty);
+ u8 stop_char = STOP_CHAR(tty);
+
+ dev_dbg(&port->dev, "Enabling sw flow ctrl\n");
+ flow = XR21V141X_UART_FLOW_MODE_SW;
+
+ xr_set_reg_uart(port, XR21V141X_REG_XON_CHAR, start_char);
+ xr_set_reg_uart(port, XR21V141X_REG_XOFF_CHAR, stop_char);
+ } else {
+ dev_dbg(&port->dev, "Disabling flow ctrl\n");
+ flow = XR21V141X_UART_FLOW_MODE_NONE;
+ }
+
+ /*
+ * As per the datasheet, UART needs to be disabled while writing to
+ * FLOW_CONTROL register.
+ */
+ xr_uart_disable(port);
+ xr_set_reg_uart(port, XR21V141X_REG_FLOW_CTRL, flow);
+ xr_uart_enable(port);
+
+ xr_set_reg_uart(port, XR21V141X_REG_GPIO_MODE, gpio_mode);
+
+ if (C_BAUD(tty) == B0)
+ xr_dtr_rts(port, 0);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ xr_dtr_rts(port, 1);
+}
+
+static void xr_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct ktermios *termios = &tty->termios;
+ u8 bits = 0;
+ int ret;
+
+ if (!old_termios || (tty->termios.c_ospeed != old_termios->c_ospeed))
+ xr_set_baudrate(tty, port);
+
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ case CS6:
+ /* CS5 and CS6 are not supported, so just restore old setting */
+ termios->c_cflag &= ~CSIZE;
+ if (old_termios)
+ termios->c_cflag |= old_termios->c_cflag & CSIZE;
+ else
+ bits |= XR21V141X_UART_DATA_8;
+ break;
+ case CS7:
+ bits |= XR21V141X_UART_DATA_7;
+ break;
+ case CS8:
+ default:
+ bits |= XR21V141X_UART_DATA_8;
+ break;
+ }
+
+ if (C_PARENB(tty)) {
+ if (C_CMSPAR(tty)) {
+ if (C_PARODD(tty))
+ bits |= XR21V141X_UART_PARITY_MARK;
+ else
+ bits |= XR21V141X_UART_PARITY_SPACE;
+ } else {
+ if (C_PARODD(tty))
+ bits |= XR21V141X_UART_PARITY_ODD;
+ else
+ bits |= XR21V141X_UART_PARITY_EVEN;
+ }
+ }
+
+ if (C_CSTOPB(tty))
+ bits |= XR21V141X_UART_STOP_2;
+ else
+ bits |= XR21V141X_UART_STOP_1;
+
+ ret = xr_set_reg_uart(port, XR21V141X_REG_FORMAT, bits);
+ if (ret)
+ return;
+
+ xr_set_flow_mode(tty, port, old_termios);
+}
+
+static int xr_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ u8 gpio_dir;
+ int ret;
+
+ ret = xr_uart_enable(port);
+ if (ret) {
+ dev_err(&port->dev, "Failed to enable UART\n");
+ return ret;
+ }
+
+ /*
+ * Configure DTR and RTS as outputs and RI, CD, DSR and CTS as
+ * inputs.
+ */
+ gpio_dir = XR21V141X_UART_MODE_DTR | XR21V141X_UART_MODE_RTS;
+ xr_set_reg_uart(port, XR21V141X_REG_GPIO_DIR, gpio_dir);
+
+ /* Setup termios */
+ if (tty)
+ xr_set_termios(tty, port, NULL);
+
+ ret = usb_serial_generic_open(tty, port);
+ if (ret) {
+ xr_uart_disable(port);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xr_close(struct usb_serial_port *port)
+{
+ usb_serial_generic_close(port);
+
+ xr_uart_disable(port);
+}
+
+static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id)
+{
+ struct usb_driver *driver = serial->type->usb_driver;
+ struct usb_interface *control_interface;
+ int ret;
+
+ /* Don't bind to control interface */
+ if (serial->interface->cur_altsetting->desc.bInterfaceNumber == 0)
+ return -ENODEV;
+
+ /* But claim the control interface during data interface probe */
+ control_interface = usb_ifnum_to_if(serial->dev, 0);
+ if (!control_interface)
+ return -ENODEV;
+
+ ret = usb_driver_claim_interface(driver, control_interface, NULL);
+ if (ret) {
+ dev_err(&serial->interface->dev, "Failed to claim control interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xr_disconnect(struct usb_serial *serial)
+{
+ struct usb_driver *driver = serial->type->usb_driver;
+ struct usb_interface *control_interface;
+
+ control_interface = usb_ifnum_to_if(serial->dev, 0);
+ usb_driver_release_interface(driver, control_interface);
+}
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x04e2, 0x1410) }, /* XR21V141X */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_serial_driver xr_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xr_serial",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+ .probe = xr_probe,
+ .disconnect = xr_disconnect,
+ .open = xr_open,
+ .close = xr_close,
+ .break_ctl = xr_break_ctl,
+ .set_termios = xr_set_termios,
+ .tiocmget = xr_tiocmget,
+ .tiocmset = xr_tiocmset,
+ .dtr_rts = xr_dtr_rts
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &xr_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, id_table);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <mani@kernel.org>");
+MODULE_DESCRIPTION("MaxLinear/Exar USB to Serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index e62e5e3da01e..b7f094435b00 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -15,8 +15,8 @@
#include <linux/usb/typec_dp.h>
#include "displayport.h"
-#define DP_HEADER(_dp, cmd) (VDO((_dp)->alt->svid, 1, cmd) | \
- VDO_OPOS(USB_TYPEC_DP_MODE))
+#define DP_HEADER(_dp, ver, cmd) (VDO((_dp)->alt->svid, 1, ver, cmd) \
+ | VDO_OPOS(USB_TYPEC_DP_MODE))
enum {
DP_CONF_USB,
@@ -156,9 +156,14 @@ static int dp_altmode_configured(struct dp_altmode *dp)
static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
{
- u32 header = DP_HEADER(dp, DP_CMD_CONFIGURE);
+ int svdm_version = typec_altmode_get_svdm_version(dp->alt);
+ u32 header;
int ret;
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data);
if (ret) {
dev_err(&dp->alt->dev,
@@ -181,6 +186,7 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
static void dp_altmode_work(struct work_struct *work)
{
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
+ int svdm_version;
u32 header;
u32 vdo;
int ret;
@@ -194,7 +200,10 @@ static void dp_altmode_work(struct work_struct *work)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
case DP_STATE_UPDATE:
- header = DP_HEADER(dp, DP_CMD_STATUS_UPDATE);
+ svdm_version = typec_altmode_get_svdm_version(dp->alt);
+ if (svdm_version < 0)
+ break;
+ header = DP_HEADER(dp, svdm_version, DP_CMD_STATUS_UPDATE);
vdo = 1;
ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
if (ret)
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..45f0bf65e9ab 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -27,6 +27,7 @@ struct typec_cable {
enum typec_plug_type type;
struct usb_pd_identity *identity;
unsigned int active:1;
+ u16 pd_revision; /* 0300H = "3.0" */
};
struct typec_partner {
@@ -36,6 +37,8 @@ struct typec_partner {
enum typec_accessory accessory;
struct ida mode_ids;
int num_altmodes;
+ u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
};
struct typec_port {
@@ -86,7 +89,7 @@ static const char * const typec_accessory_modes[] = {
/* Product types defined in USB PD Specification R3.0 V2.0 */
static const char * const product_type_ufp[8] = {
- [IDH_PTYPE_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_UFP] = "not_ufp",
[IDH_PTYPE_HUB] = "hub",
[IDH_PTYPE_PERIPH] = "peripheral",
[IDH_PTYPE_PSD] = "psd",
@@ -94,17 +97,17 @@ static const char * const product_type_ufp[8] = {
};
static const char * const product_type_dfp[8] = {
- [IDH_PTYPE_DFP_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_DFP] = "not_dfp",
[IDH_PTYPE_DFP_HUB] = "hub",
[IDH_PTYPE_DFP_HOST] = "host",
[IDH_PTYPE_DFP_PB] = "power_brick",
- [IDH_PTYPE_DFP_AMC] = "amc",
};
static const char * const product_type_cable[8] = {
- [IDH_PTYPE_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_CABLE] = "not_cable",
[IDH_PTYPE_PCABLE] = "passive",
[IDH_PTYPE_ACABLE] = "active",
+ [IDH_PTYPE_VPD] = "vpd",
};
static struct usb_pd_identity *get_pd_identity(struct device *dev)
@@ -264,6 +267,11 @@ type_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(type);
+static ssize_t usb_power_delivery_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static DEVICE_ATTR_RO(usb_power_delivery_revision);
+
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
@@ -680,6 +688,7 @@ static struct attribute *typec_partner_attrs[] = {
&dev_attr_supports_usb_power_delivery.attr,
&dev_attr_number_of_alternate_modes.attr,
&dev_attr_type.attr,
+ &dev_attr_usb_power_delivery_revision.attr,
NULL
};
@@ -741,6 +750,30 @@ int typec_partner_set_identity(struct typec_partner *partner)
EXPORT_SYMBOL_GPL(typec_partner_set_identity);
/**
+ * typec_partner_set_pd_revision - Set the PD revision supported by the partner
+ * @partner: The partner to be updated.
+ * @pd_revision: USB Power Delivery Specification Revision supported by partner
+ *
+ * This routine is used to report that the PD revision of the port partner has
+ * become available.
+ */
+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision)
+{
+ if (partner->pd_revision == pd_revision)
+ return;
+
+ partner->pd_revision = pd_revision;
+ sysfs_notify(&partner->dev.kobj, NULL, "usb_power_delivery_revision");
+ if (pd_revision != 0 && !partner->usb_pd) {
+ partner->usb_pd = 1;
+ sysfs_notify(&partner->dev.kobj, NULL,
+ "supports_usb_power_delivery");
+ }
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision);
+
+/**
* typec_partner_set_num_altmodes - Set the number of available partner altmodes
* @partner: The partner to be updated.
* @num_altmodes: The number of altmodes we want to specify as available.
@@ -766,6 +799,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -792,6 +826,20 @@ typec_partner_register_altmode(struct typec_partner *partner,
EXPORT_SYMBOL_GPL(typec_partner_register_altmode);
/**
+ * typec_partner_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
+ * @partner: USB Type-C Partner that supports SVDM
+ * @svdm_version: Negotiated SVDM Version
+ *
+ * This routine is used to save the negotiated SVDM Version.
+ */
+void typec_partner_set_svdm_version(struct typec_partner *partner,
+ enum usb_pd_svdm_ver svdm_version)
+{
+ partner->svdm_version = svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_svdm_version);
+
+/**
* typec_register_partner - Register a USB Type-C Partner
* @port: The USB Type-C Port the partner is connected to
* @desc: Description of the partner
@@ -814,6 +862,8 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
partner->usb_pd = desc->usb_pd;
partner->accessory = desc->accessory;
partner->num_altmodes = -1;
+ partner->pd_revision = desc->pd_revision;
+ partner->svdm_version = port->cap->svdm_version;
if (desc->identity) {
/*
@@ -923,6 +973,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -1026,6 +1077,7 @@ static DEVICE_ATTR_RO(plug_type);
static struct attribute *typec_cable_attrs[] = {
&dev_attr_type.attr,
&dev_attr_plug_type.attr,
+ &dev_attr_usb_power_delivery_revision.attr,
NULL
};
ATTRIBUTE_GROUPS(typec_cable);
@@ -1128,6 +1180,7 @@ struct typec_cable *typec_register_cable(struct typec_port *port,
cable->type = desc->type;
cable->active = desc->active;
+ cable->pd_revision = desc->pd_revision;
if (desc->identity) {
/*
@@ -1497,11 +1550,23 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct typec_port *p = to_typec_port(dev);
+ u16 rev = 0;
- return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff);
+ if (is_typec_partner(dev)) {
+ struct typec_partner *partner = to_typec_partner(dev);
+
+ rev = partner->pd_revision;
+ } else if (is_typec_cable(dev)) {
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ rev = cable->pd_revision;
+ } else if (is_typec_port(dev)) {
+ struct typec_port *p = to_typec_port(dev);
+
+ rev = p->cap->pd_revision;
+ }
+ return sysfs_emit(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
}
-static DEVICE_ATTR_RO(usb_power_delivery_revision);
static ssize_t orientation_show(struct device *dev,
struct device_attribute *attr,
@@ -1846,6 +1911,33 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
+ * typec_get_negotiated_svdm_version - Get negotiated SVDM Version
+ * @port: USB Type-C Port.
+ *
+ * Get the negotiated SVDM Version. The Version is set to the port default
+ * value stored in typec_capability on partner registration, and updated after
+ * a successful Discover Identity if the negotiated value is less than the
+ * default value.
+ *
+ * Returns usb_pd_svdm_ver if the partner has been registered otherwise -ENODEV.
+ */
+int typec_get_negotiated_svdm_version(struct typec_port *port)
+{
+ enum usb_pd_svdm_ver svdm_version;
+ struct device *partner_dev;
+
+ partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ if (!partner_dev)
+ return -ENODEV;
+
+ svdm_version = to_typec_partner(partner_dev)->svdm_version;
+ put_device(partner_dev);
+
+ return svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
+
+/**
* typec_get_drvdata - Return private driver data pointer
* @port: USB Type-C port
*/
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f676abab044b..a27deb0b5f03 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -255,6 +255,14 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
TCPC_TCPC_CTRL_ORIENTATION : 0);
}
+static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ if (tcpci->data->set_partner_usb_comm_capable)
+ tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
+}
+
static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -720,6 +728,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
+ tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
if (tcpci->data->auto_discharge_disconnect) {
tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index c3c7d07d9b4e..57b6e24e0a0c 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -161,6 +161,10 @@ struct tcpci;
* Optional; Enables TCPC to autonously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
*/
struct tcpci_data {
struct regmap *regmap;
@@ -175,6 +179,8 @@ struct tcpci_data {
enum typec_cc_status cc);
int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
+ void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable);
};
struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index 319266329b42..041a1c393594 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -5,13 +5,10 @@
* MAXIM TCPCI based TCPC driver
*/
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
@@ -22,6 +19,9 @@
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
+#define TCPC_VENDOR_USBSW_CTRL 0x93
+#define TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA 0x9
+#define TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA 0
#define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0
#define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1
@@ -158,7 +158,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
*/
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2);
if (ret < 0) {
- dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d", ret);
+ dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d\n", ret);
return;
}
@@ -167,13 +167,13 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
- dev_err(chip->dev, "%s", count == 0 ? "error: count is 0" :
+ dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
"error frame_type is not SOP");
return;
}
if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
- dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d", count);
+ dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count);
return;
}
@@ -184,7 +184,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
count += 1;
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count);
if (ret < 0) {
- dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d", ret);
+ dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d\n", ret);
return;
}
@@ -277,6 +277,21 @@ static void process_tx(struct max_tcpci_chip *chip, u16 status)
max_tcpci_init_regs(chip);
}
+/* Enable USB switches when partner is USB communications capable */
+static void max_tcpci_set_partner_usb_comm_capable(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(data);
+ int ret;
+
+ ret = max_tcpci_write8(chip, TCPC_VENDOR_USBSW_CTRL, capable ?
+ TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA :
+ TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA);
+
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to enable USB switches");
+}
+
static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
{
u16 mask;
@@ -317,7 +332,7 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
return ret;
if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) {
- dev_info(chip->dev, "FRS Signal");
+ dev_info(chip->dev, "FRS Signal\n");
tcpm_sink_frs(chip->port);
}
}
@@ -456,12 +471,12 @@ static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id
chip->data.frs_sourcing_vbus = max_tcpci_frs_sourcing_vbus;
chip->data.auto_discharge_disconnect = true;
chip->data.vbus_vsafe0v = true;
+ chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
if (IS_ERR(chip->tcpci)) {
- dev_err(&client->dev, "TCPCI port registration failed");
- ret = PTR_ERR(chip->tcpci);
+ dev_err(&client->dev, "TCPCI port registration failed\n");
return PTR_ERR(chip->tcpci);
}
chip->port = tcpci_get_tcpm_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 22a85b396f69..be0b6469dd3d 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -76,6 +76,8 @@
S(SNK_HARD_RESET_SINK_ON), \
\
S(SOFT_RESET), \
+ S(SRC_SOFT_RESET_WAIT_SNK_TX), \
+ S(SNK_SOFT_RESET), \
S(SOFT_RESET_SEND), \
\
S(DR_SWAP_ACCEPT), \
@@ -139,7 +141,46 @@
\
S(ERROR_RECOVERY), \
S(PORT_RESET), \
- S(PORT_RESET_WAIT_OFF)
+ S(PORT_RESET_WAIT_OFF), \
+ \
+ S(AMS_START), \
+ S(CHUNK_NOT_SUPP)
+
+#define FOREACH_AMS(S) \
+ S(NONE_AMS), \
+ S(POWER_NEGOTIATION), \
+ S(GOTOMIN), \
+ S(SOFT_RESET_AMS), \
+ S(HARD_RESET), \
+ S(CABLE_RESET), \
+ S(GET_SOURCE_CAPABILITIES), \
+ S(GET_SINK_CAPABILITIES), \
+ S(POWER_ROLE_SWAP), \
+ S(FAST_ROLE_SWAP), \
+ S(DATA_ROLE_SWAP), \
+ S(VCONN_SWAP), \
+ S(SOURCE_ALERT), \
+ S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
+ S(GETTING_SOURCE_SINK_STATUS), \
+ S(GETTING_BATTERY_CAPABILITIES), \
+ S(GETTING_BATTERY_STATUS), \
+ S(GETTING_MANUFACTURER_INFORMATION), \
+ S(SECURITY), \
+ S(FIRMWARE_UPDATE), \
+ S(DISCOVER_IDENTITY), \
+ S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
+ S(DISCOVER_SVIDS), \
+ S(DISCOVER_MODES), \
+ S(DFP_TO_UFP_ENTER_MODE), \
+ S(DFP_TO_UFP_EXIT_MODE), \
+ S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
+ S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
+ S(ATTENTION), \
+ S(BIST), \
+ S(UNSTRUCTURED_VDMS), \
+ S(STRUCTURED_VDMS), \
+ S(COUNTRY_INFO), \
+ S(COUNTRY_CODES)
#define GENERATE_ENUM(e) e
#define GENERATE_STRING(s) #s
@@ -152,6 +193,14 @@ static const char * const tcpm_states[] = {
FOREACH_STATE(GENERATE_STRING)
};
+enum tcpm_ams {
+ FOREACH_AMS(GENERATE_ENUM)
+};
+
+static const char * const tcpm_ams_str[] = {
+ FOREACH_AMS(GENERATE_STRING)
+};
+
enum vdm_states {
VDM_STATE_ERR_BUSY = -3,
VDM_STATE_ERR_SEND = -2,
@@ -161,6 +210,7 @@ enum vdm_states {
VDM_STATE_READY = 1,
VDM_STATE_BUSY = 2,
VDM_STATE_WAIT_RSP_BUSY = 3,
+ VDM_STATE_SEND_MESSAGE = 4,
};
enum pd_msg_request {
@@ -302,6 +352,7 @@ struct tcpm_port {
struct hrtimer enable_frs_timer;
struct kthread_work enable_frs;
bool state_machine_running;
+ bool vdm_sm_running;
struct completion tx_complete;
enum tcpm_transmit_status tx_status;
@@ -381,6 +432,12 @@ struct tcpm_port {
/* Sink caps have been queried */
bool sink_cap_done;
+ /* Collision Avoidance and Atomic Message Sequence */
+ enum tcpm_state upcoming_state;
+ enum tcpm_ams ams;
+ enum tcpm_ams next_ams;
+ bool in_ams;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -396,6 +453,12 @@ struct pd_rx_event {
struct pd_message msg;
};
+static const char * const pd_rev[] = {
+ [PD_REV10] = "rev1",
+ [PD_REV20] = "rev2",
+ [PD_REV30] = "rev3",
+};
+
#define tcpm_cc_is_sink(cc) \
((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
(cc) == TYPEC_CC_RP_3_0)
@@ -440,6 +503,10 @@ struct pd_rx_event {
((port)->typec_caps.data == TYPEC_PORT_DFP ? \
TYPEC_HOST : TYPEC_DEVICE)
+#define tcpm_sink_tx_ok(port) \
+ (tcpm_port_is_sink(port) && \
+ ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
+
static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
{
if (port->port_type == TYPEC_PORT_DRP) {
@@ -666,6 +733,67 @@ static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
#endif
+static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
+{
+ tcpm_log(port, "cc:=%d", cc);
+ port->cc_req = cc;
+ port->tcpc->set_cc(port->tcpc, cc);
+}
+
+/*
+ * Determine RP value to set based on maximum current supported
+ * by a port if configured as source.
+ * Returns CC value to report to link partner.
+ */
+static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
+{
+ const u32 *src_pdo = port->src_pdo;
+ int nr_pdo = port->nr_src_pdo;
+ int i;
+
+ /*
+ * Search for first entry with matching voltage.
+ * It should report the maximum supported current.
+ */
+ for (i = 0; i < nr_pdo; i++) {
+ const u32 pdo = src_pdo[i];
+
+ if (pdo_type(pdo) == PDO_TYPE_FIXED &&
+ pdo_fixed_voltage(pdo) == 5000) {
+ unsigned int curr = pdo_max_current(pdo);
+
+ if (curr >= 3000)
+ return TYPEC_CC_RP_3_0;
+ else if (curr >= 1500)
+ return TYPEC_CC_RP_1_5;
+ return TYPEC_CC_RP_DEF;
+ }
+ }
+
+ return TYPEC_CC_RP_DEF;
+}
+
+static int tcpm_ams_finish(struct tcpm_port *port)
+{
+ int ret = 0;
+
+ tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
+
+ if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_OK);
+ else
+ tcpm_set_cc(port, SINK_TX_NG);
+ } else if (port->pwr_role == TYPEC_SOURCE) {
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ }
+
+ port->in_ams = false;
+ port->ams = NONE_AMS;
+
+ return ret;
+}
+
static int tcpm_pd_transmit(struct tcpm_port *port,
enum tcpm_transmit_type type,
const struct pd_message *msg)
@@ -693,13 +821,30 @@ static int tcpm_pd_transmit(struct tcpm_port *port,
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
- return 0;
+ /*
+ * USB PD rev 2.0, 8.3.2.2.1:
+ * USB PD rev 3.0, 8.3.2.1.3:
+ * "... Note that every AMS is Interruptible until the first
+ * Message in the sequence has been successfully sent (GoodCRC
+ * Message received)."
+ */
+ if (port->ams != NONE_AMS)
+ port->in_ams = true;
+ break;
case TCPC_TX_DISCARDED:
- return -EAGAIN;
+ ret = -EAGAIN;
+ break;
case TCPC_TX_FAILED:
default:
- return -EIO;
+ ret = -EIO;
+ break;
}
+
+ /* Some AMS don't expect responses. Finish them here. */
+ if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
+ tcpm_ams_finish(port);
+
+ return ret;
}
void tcpm_pd_transmit_complete(struct tcpm_port *port,
@@ -804,39 +949,6 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
return ret;
}
-/*
- * Determine RP value to set based on maximum current supported
- * by a port if configured as source.
- * Returns CC value to report to link partner.
- */
-static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
-{
- const u32 *src_pdo = port->src_pdo;
- int nr_pdo = port->nr_src_pdo;
- int i;
-
- /*
- * Search for first entry with matching voltage.
- * It should report the maximum supported current.
- */
- for (i = 0; i < nr_pdo; i++) {
- const u32 pdo = src_pdo[i];
-
- if (pdo_type(pdo) == PDO_TYPE_FIXED &&
- pdo_fixed_voltage(pdo) == 5000) {
- unsigned int curr = pdo_max_current(pdo);
-
- if (curr >= 3000)
- return TYPEC_CC_RP_3_0;
- else if (curr >= 1500)
- return TYPEC_CC_RP_1_5;
- return TYPEC_CC_RP_DEF;
- }
- }
-
- return TYPEC_CC_RP_DEF;
-}
-
static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
{
return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
@@ -911,13 +1023,47 @@ static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
return 0;
}
+/*
+ * Transform the PDO to be compliant to PD rev2.0.
+ * Return 0 if the PDO type is not defined in PD rev2.0.
+ * Otherwise, return the converted PDO.
+ */
+static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
+{
+ switch (pdo_type(pdo)) {
+ case PDO_TYPE_FIXED:
+ if (role == TYPEC_SINK)
+ return pdo & ~PDO_FIXED_FRS_CURR_MASK;
+ else
+ return pdo & ~PDO_FIXED_UNCHUNK_EXT;
+ case PDO_TYPE_VAR:
+ case PDO_TYPE_BATT:
+ return pdo;
+ case PDO_TYPE_APDO:
+ default:
+ return 0;
+ }
+}
+
static int tcpm_pd_send_source_caps(struct tcpm_port *port)
{
struct pd_message msg;
- int i;
+ u32 pdo;
+ unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
- if (!port->nr_src_pdo) {
+
+ for (i = 0; i < port->nr_src_pdo; i++) {
+ if (port->negotiated_rev >= PD_REV30) {
+ msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
+ } else {
+ pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
+ if (pdo)
+ msg.payload[nr_pdo++] = cpu_to_le32(pdo);
+ }
+ }
+
+ if (!nr_pdo) {
/* No source capabilities defined, sink only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
@@ -930,10 +1076,8 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port)
port->data_role,
port->negotiated_rev,
port->message_id,
- port->nr_src_pdo);
+ nr_pdo);
}
- for (i = 0; i < port->nr_src_pdo; i++)
- msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
@@ -941,10 +1085,22 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port)
static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
{
struct pd_message msg;
- int i;
+ u32 pdo;
+ unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
- if (!port->nr_snk_pdo) {
+
+ for (i = 0; i < port->nr_snk_pdo; i++) {
+ if (port->negotiated_rev >= PD_REV30) {
+ msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
+ } else {
+ pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
+ if (pdo)
+ msg.payload[nr_pdo++] = cpu_to_le32(pdo);
+ }
+ }
+
+ if (!nr_pdo) {
/* No sink capabilities defined, source only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
@@ -957,10 +1113,8 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
port->data_role,
port->negotiated_rev,
port->message_id,
- port->nr_snk_pdo);
+ nr_pdo);
}
- for (i = 0; i < port->nr_snk_pdo; i++)
- msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
@@ -1000,16 +1154,17 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
if (delay_ms) {
- tcpm_log(port, "pending state change %s -> %s @ %u ms",
- tcpm_states[port->state], tcpm_states[state],
- delay_ms);
+ tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
+ tcpm_states[port->state], tcpm_states[state], delay_ms,
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = state;
mod_tcpm_delayed_work(port, delay_ms);
port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
port->delay_ms = delay_ms;
} else {
- tcpm_log(port, "state change %s -> %s",
- tcpm_states[port->state], tcpm_states[state]);
+ tcpm_log(port, "state change %s -> %s [%s %s]",
+ tcpm_states[port->state], tcpm_states[state],
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = INVALID_STATE;
port->prev_state = port->state;
port->state = state;
@@ -1031,10 +1186,11 @@ static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
tcpm_set_state(port, state, delay_ms);
else
tcpm_log(port,
- "skipped %sstate change %s -> %s [%u ms], context state %s",
+ "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
delay_ms ? "delayed " : "",
tcpm_states[port->state], tcpm_states[state],
- delay_ms, tcpm_states[port->enter_state]);
+ delay_ms, tcpm_states[port->enter_state],
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
}
static void tcpm_queue_message(struct tcpm_port *port,
@@ -1044,6 +1200,149 @@ static void tcpm_queue_message(struct tcpm_port *port,
mod_tcpm_delayed_work(port, 0);
}
+static bool tcpm_vdm_ams(struct tcpm_port *port)
+{
+ switch (port->ams) {
+ case DISCOVER_IDENTITY:
+ case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
+ case DISCOVER_SVIDS:
+ case DISCOVER_MODES:
+ case DFP_TO_UFP_ENTER_MODE:
+ case DFP_TO_UFP_EXIT_MODE:
+ case DFP_TO_CABLE_PLUG_ENTER_MODE:
+ case DFP_TO_CABLE_PLUG_EXIT_MODE:
+ case ATTENTION:
+ case UNSTRUCTURED_VDMS:
+ case STRUCTURED_VDMS:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool tcpm_ams_interruptible(struct tcpm_port *port)
+{
+ switch (port->ams) {
+ /* Interruptible AMS */
+ case NONE_AMS:
+ case SECURITY:
+ case FIRMWARE_UPDATE:
+ case DISCOVER_IDENTITY:
+ case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
+ case DISCOVER_SVIDS:
+ case DISCOVER_MODES:
+ case DFP_TO_UFP_ENTER_MODE:
+ case DFP_TO_UFP_EXIT_MODE:
+ case DFP_TO_CABLE_PLUG_ENTER_MODE:
+ case DFP_TO_CABLE_PLUG_EXIT_MODE:
+ case UNSTRUCTURED_VDMS:
+ case STRUCTURED_VDMS:
+ case COUNTRY_INFO:
+ case COUNTRY_CODES:
+ break;
+ /* Non-Interruptible AMS */
+ default:
+ if (port->in_ams)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
+{
+ int ret = 0;
+
+ tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
+
+ if (!tcpm_ams_interruptible(port) &&
+ !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
+ port->upcoming_state = INVALID_STATE;
+ tcpm_log(port, "AMS %s not interruptible, aborting",
+ tcpm_ams_str[port->ams]);
+ return -EAGAIN;
+ }
+
+ if (port->pwr_role == TYPEC_SOURCE) {
+ enum typec_cc_status cc_req = port->cc_req;
+
+ port->ams = ams;
+
+ if (ams == HARD_RESET) {
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
+ tcpm_set_state(port, HARD_RESET_START, 0);
+ return ret;
+ } else if (ams == SOFT_RESET_AMS) {
+ if (!port->explicit_contract)
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_state(port, SOFT_RESET_SEND, 0);
+ return ret;
+ } else if (tcpm_vdm_ams(port)) {
+ /* tSinkTx is enforced in vdm_run_state_machine */
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_NG);
+ return ret;
+ }
+
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_NG);
+
+ switch (port->state) {
+ case SRC_READY:
+ case SRC_STARTUP:
+ case SRC_SOFT_RESET_WAIT_SNK_TX:
+ case SOFT_RESET:
+ case SOFT_RESET_SEND:
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_state(port, AMS_START,
+ cc_req == SINK_TX_OK ?
+ PD_T_SINK_TX : 0);
+ else
+ tcpm_set_state(port, AMS_START, 0);
+ break;
+ default:
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_state(port, SRC_READY,
+ cc_req == SINK_TX_OK ?
+ PD_T_SINK_TX : 0);
+ else
+ tcpm_set_state(port, SRC_READY, 0);
+ break;
+ }
+ } else {
+ if (port->negotiated_rev >= PD_REV30 &&
+ !tcpm_sink_tx_ok(port) &&
+ ams != SOFT_RESET_AMS &&
+ ams != HARD_RESET) {
+ port->upcoming_state = INVALID_STATE;
+ tcpm_log(port, "Sink TX No Go");
+ return -EAGAIN;
+ }
+
+ port->ams = ams;
+
+ if (ams == HARD_RESET) {
+ tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
+ tcpm_set_state(port, HARD_RESET_START, 0);
+ return ret;
+ } else if (tcpm_vdm_ams(port)) {
+ return ret;
+ }
+
+ if (port->state == SNK_READY ||
+ port->state == SNK_SOFT_RESET)
+ tcpm_set_state(port, AMS_START, 0);
+ else
+ tcpm_set_state(port, SNK_READY, 0);
+ }
+
+ return ret;
+}
+
/*
* VDM/VDO handling functions
*/
@@ -1176,8 +1475,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
const u32 *p, int cnt, u32 *response,
enum adev_actions *adev_action)
{
+ struct typec_port *typec = port->typec_port;
struct typec_altmode *pdev;
struct pd_mode_data *modep;
+ int svdm_version;
int rlen = 0;
int cmd_type;
int cmd;
@@ -1194,14 +1495,33 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+
switch (cmd_type) {
case CMDT_INIT:
switch (cmd) {
case CMD_DISCOVER_IDENT:
+ if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ break;
+
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
/* 6.4.4.3.1: Only respond as UFP (device) */
if (port->data_role == TYPEC_DEVICE &&
port->nr_snk_vdo) {
- for (i = 0; i < port->nr_snk_vdo; i++)
+ /*
+ * Product Type DFP and Connector Type are not defined in SVDM
+ * version 1.0 and shall be set to zero.
+ */
+ if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
+ response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
+ & ~IDH_CONN_MASK;
+ else
+ response[1] = port->snk_vdo[0];
+ for (i = 1; i < port->nr_snk_vdo; i++)
response[i + 1] = port->snk_vdo[i];
rlen = port->nr_snk_vdo + 1;
}
@@ -1230,27 +1550,34 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
rlen = 1;
}
+ response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
+ (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
break;
case CMDT_RSP_ACK:
/* silently drop message if we are not connected */
if (IS_ERR_OR_NULL(port->partner))
break;
+ tcpm_ams_finish(port);
+
switch (cmd) {
case CMD_DISCOVER_IDENT:
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
/* 6.4.4.3.1 */
svdm_consume_identity(port, p, cnt);
- response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
+ response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
rlen = 1;
break;
case CMD_DISCOVER_SVID:
/* 6.4.4.3.2 */
if (svdm_consume_svids(port, p, cnt)) {
- response[0] = VDO(USB_SID_PD, 1,
- CMD_DISCOVER_SVID);
+ response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
rlen = 1;
} else if (modep->nsvids && supports_modal(port)) {
- response[0] = VDO(modep->svids[0], 1,
+ response[0] = VDO(modep->svids[0], 1, svdm_version,
CMD_DISCOVER_MODES);
rlen = 1;
}
@@ -1261,10 +1588,11 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
modep->svid_index++;
if (modep->svid_index < modep->nsvids) {
u16 svid = modep->svids[modep->svid_index];
- response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
+ response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
rlen = 1;
} else {
tcpm_register_partner_altmodes(port);
+ port->vdm_sm_running = false;
}
break;
case CMD_ENTER_MODE:
@@ -1281,21 +1609,45 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
return 0;
}
break;
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ break;
default:
+ /* Unrecognized SVDM */
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
+ (VDO_SVDM_VERS(svdm_version));
break;
}
break;
case CMDT_RSP_NAK:
+ tcpm_ams_finish(port);
switch (cmd) {
+ case CMD_DISCOVER_IDENT:
+ case CMD_DISCOVER_SVID:
+ case CMD_DISCOVER_MODES:
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ break;
case CMD_ENTER_MODE:
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
default:
+ /* Unrecognized SVDM */
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
+ (VDO_SVDM_VERS(svdm_version));
break;
}
+ port->vdm_sm_running = false;
break;
default:
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
+ (VDO_SVDM_VERS(svdm_version));
+ port->vdm_sm_running = false;
break;
}
@@ -1331,8 +1683,12 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_DONE;
}
- if (PD_VDO_SVDM(p[0]))
+ if (PD_VDO_SVDM(p[0])) {
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ } else {
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ }
/*
* We are done with any state stored in the port struct now, except
@@ -1368,7 +1724,13 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
break;
case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
+ int svdm_version = typec_get_negotiated_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
response[0] |= VDO_OPOS(adev->mode);
rlen = 1;
}
@@ -1395,14 +1757,19 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
const u32 *data, int count)
{
+ int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
u32 header;
+ if (svdm_version < 0)
+ return;
+
if (WARN_ON(count > VDO_MAX_SIZE - 1))
count = VDO_MAX_SIZE - 1;
/* set VDM header with VID & CMD */
header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
- 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
+ 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
+ svdm_version, cmd);
tcpm_queue_vdm(port, header, data, count);
}
@@ -1435,7 +1802,8 @@ static unsigned int vdm_ready_timeout(u32 vdm_hdr)
static void vdm_run_state_machine(struct tcpm_port *port)
{
struct pd_message msg;
- int i, res;
+ int i, res = 0;
+ u32 vdo_hdr = port->vdo_data[0];
switch (port->vdm_state) {
case VDM_STATE_READY:
@@ -1452,26 +1820,49 @@ static void vdm_run_state_machine(struct tcpm_port *port)
if (port->state != SRC_READY && port->state != SNK_READY)
break;
- /* Prepare and send VDM */
- memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
- port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, port->vdo_count);
- for (i = 0; i < port->vdo_count; i++)
- msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
- res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
- if (res < 0) {
- port->vdm_state = VDM_STATE_ERR_SEND;
- } else {
- unsigned long timeout;
+ /* TODO: AMS operation for Unstructured VDM */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
+ switch (PD_VDO_CMD(vdo_hdr)) {
+ case CMD_DISCOVER_IDENT:
+ res = tcpm_ams_start(port, DISCOVER_IDENTITY);
+ if (res == 0)
+ port->send_discover = false;
+ break;
+ case CMD_DISCOVER_SVID:
+ res = tcpm_ams_start(port, DISCOVER_SVIDS);
+ break;
+ case CMD_DISCOVER_MODES:
+ res = tcpm_ams_start(port, DISCOVER_MODES);
+ break;
+ case CMD_ENTER_MODE:
+ res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+ break;
+ case CMD_EXIT_MODE:
+ res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+ break;
+ case CMD_ATTENTION:
+ res = tcpm_ams_start(port, ATTENTION);
+ break;
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ res = tcpm_ams_start(port, STRUCTURED_VDMS);
+ break;
+ default:
+ res = -EOPNOTSUPP;
+ break;
+ }
- port->vdm_retries = 0;
- port->vdm_state = VDM_STATE_BUSY;
- timeout = vdm_ready_timeout(port->vdo_data[0]);
- mod_vdm_delayed_work(port, timeout);
+ if (res < 0) {
+ port->vdm_sm_running = false;
+ return;
+ }
}
+
+ port->vdm_state = VDM_STATE_SEND_MESSAGE;
+ mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
+ port->pwr_role == TYPEC_SOURCE &&
+ PD_VDO_SVDM(vdo_hdr) &&
+ PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
+ PD_T_SINK_TX : 0);
break;
case VDM_STATE_WAIT_RSP_BUSY:
port->vdo_data[0] = port->vdo_retry;
@@ -1480,6 +1871,8 @@ static void vdm_run_state_machine(struct tcpm_port *port)
break;
case VDM_STATE_BUSY:
port->vdm_state = VDM_STATE_ERR_TMOUT;
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
break;
case VDM_STATE_ERR_SEND:
/*
@@ -1492,6 +1885,29 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ tcpm_ams_finish(port);
+ }
+ break;
+ case VDM_STATE_SEND_MESSAGE:
+ /* Prepare and send VDM */
+ memset(&msg, 0, sizeof(msg));
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id, port->vdo_count);
+ for (i = 0; i < port->vdo_count; i++)
+ msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
+ res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ if (res < 0) {
+ port->vdm_state = VDM_STATE_ERR_SEND;
+ } else {
+ unsigned long timeout;
+
+ port->vdm_retries = 0;
+ port->vdm_state = VDM_STATE_BUSY;
+ timeout = vdm_ready_timeout(vdo_hdr);
+ mod_vdm_delayed_work(port, timeout);
}
break;
default:
@@ -1514,7 +1930,11 @@ static void vdm_state_machine_work(struct kthread_work *work)
prev_state = port->vdm_state;
vdm_run_state_machine(port);
} while (port->vdm_state != prev_state &&
- port->vdm_state != VDM_STATE_BUSY);
+ port->vdm_state != VDM_STATE_BUSY &&
+ port->vdm_state != VDM_STATE_SEND_MESSAGE);
+
+ if (port->vdm_state == VDM_STATE_ERR_TMOUT)
+ port->vdm_sm_running = false;
mutex_unlock(&port->lock);
}
@@ -1642,9 +2062,14 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
u32 header;
- header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
@@ -1654,9 +2079,14 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
static int tcpm_altmode_exit(struct typec_altmode *altmode)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
u32 header;
- header = VDO(altmode->svid, 1, CMD_EXIT_MODE);
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
tcpm_queue_vdm_unlocked(port, header, NULL, 0);
@@ -1736,6 +2166,71 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
return ret;
}
+static void tcpm_pd_handle_state(struct tcpm_port *port,
+ enum tcpm_state state,
+ enum tcpm_ams ams,
+ unsigned int delay_ms)
+{
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ port->ams = ams;
+ tcpm_set_state(port, state, delay_ms);
+ break;
+ /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ case SRC_TRANSITION_SUPPLY:
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ break;
+ default:
+ if (!tcpm_ams_interruptible(port)) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ 0);
+ } else {
+ /* process the Message 6.8.1 */
+ port->upcoming_state = state;
+ port->next_ams = ams;
+ tcpm_set_state(port, ready_state(port), delay_ms);
+ }
+ break;
+ }
+}
+
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+ enum pd_msg_request message,
+ enum tcpm_ams ams)
+{
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ port->ams = ams;
+ tcpm_queue_message(port, message);
+ break;
+ /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ case SRC_TRANSITION_SUPPLY:
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ break;
+ default:
+ if (!tcpm_ams_interruptible(port)) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ 0);
+ } else {
+ port->next_ams = ams;
+ tcpm_set_state(port, ready_state(port), 0);
+ /* 6.8.1 process the Message */
+ tcpm_queue_message(port, message);
+ }
+ break;
+ }
+}
+
static void tcpm_pd_data_request(struct tcpm_port *port,
const struct pd_message *msg)
{
@@ -1749,9 +2244,6 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
switch (type) {
case PD_DATA_SOURCE_CAP:
- if (port->pwr_role != TYPEC_SINK)
- break;
-
for (i = 0; i < cnt; i++)
port->source_caps[i] = le32_to_cpu(msg->payload[i]);
@@ -1767,12 +2259,26 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just do nothing in that scenario.
*/
- if (rev == PD_REV10)
+ if (rev == PD_REV10) {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_ams_finish(port);
break;
+ }
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
+ if (port->pwr_role == TYPEC_SOURCE) {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
+ /* Unexpected Source Capabilities */
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else if (port->state == SNK_WAIT_CAPABILITIES) {
/*
* This message may be received even if VBUS is not
* present. This is quite unexpected; see USB PD
@@ -1786,30 +2292,55 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
* but be prepared to keep waiting for VBUS after it was
* handled.
*/
- tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ port->ams = POWER_NEGOTIATION;
+ port->in_ams = true;
+ tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ } else {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_ams_finish(port);
+ tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
+ POWER_NEGOTIATION, 0);
+ }
break;
case PD_DATA_REQUEST:
- if (port->pwr_role != TYPEC_SOURCE ||
- cnt != 1) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
-
/*
* Adjust revision in subsequent message headers, as required,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just reject in that scenario.
*/
if (rev == PD_REV10) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
}
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
+ if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ break;
+ }
+
port->sink_request = le32_to_cpu(msg->payload[0]);
- tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
+
+ if (port->vdm_sm_running && port->explicit_contract) {
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
+ break;
+ }
+
+ if (port->state == SRC_SEND_CAPABILITIES)
+ tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
+ else
+ tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
+ POWER_NEGOTIATION, 0);
break;
case PD_DATA_SINK_CAP:
/* We don't do anything with this at the moment... */
@@ -1830,16 +2361,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
- tcpm_set_state(port, SNK_READY, 0);
+ if (port->ams == GET_SINK_CAPABILITIES)
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ /* Unexpected Sink Capabilities */
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
tcpm_handle_vdm_request(port, msg->payload, cnt);
break;
case PD_DATA_BIST:
- if (port->state == SRC_READY || port->state == SNK_READY) {
- port->bist_request = le32_to_cpu(msg->payload[0]);
- tcpm_set_state(port, BIST_RX, 0);
- }
+ port->bist_request = le32_to_cpu(msg->payload[0]);
+ tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
tcpm_handle_alert(port, msg->payload, cnt);
@@ -1847,10 +2384,17 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
/* Currently unsupported */
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled data message type %#x", type);
+ tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ tcpm_log(port, "Unrecognized data message type %#x", type);
break;
}
}
@@ -1875,26 +2419,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case PD_CTRL_PING:
break;
case PD_CTRL_GET_SOURCE_CAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
break;
case PD_CTRL_GET_SINK_CAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
break;
case PD_CTRL_GOTO_MIN:
break;
@@ -1910,6 +2438,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
+ /* Set VDM running flag ASAP */
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
} else {
/*
@@ -1933,6 +2465,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
@@ -1942,10 +2479,14 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
switch (port->state) {
case SNK_NEGOTIATE_CAPABILITIES:
/* USB PD specification, Figure 8-43 */
- if (port->explicit_contract)
+ if (port->explicit_contract) {
next_state = SNK_READY;
- else
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+ } else {
next_state = SNK_WAIT_CAPABILITIES;
+ }
tcpm_set_state(port, next_state, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
@@ -1954,6 +2495,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->pps_data.op_curr = port->current_limit;
port->pps_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
+
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+
tcpm_set_state(port, SNK_READY, 0);
break;
case DR_SWAP_SEND:
@@ -1979,6 +2525,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, ready_state(port), 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
@@ -1995,15 +2546,20 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
break;
case SOFT_RESET_SEND:
- port->message_id = 0;
- port->rx_msgid = -1;
- if (port->pwr_role == TYPEC_SOURCE)
- next_state = SRC_SEND_CAPABILITIES;
- else
- next_state = SNK_WAIT_CAPABILITIES;
- tcpm_set_state(port, next_state, 0);
+ if (port->ams == SOFT_RESET_AMS)
+ tcpm_ams_finish(port);
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
+ } else {
+ tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ }
break;
case DR_SWAP_SEND:
+ if (port->data_role == TYPEC_DEVICE &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+
tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
break;
case PR_SWAP_SEND:
@@ -2016,57 +2572,62 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
case PD_CTRL_SOFT_RESET:
+ port->ams = SOFT_RESET_AMS;
tcpm_set_state(port, SOFT_RESET, 0);
break;
case PD_CTRL_DR_SWAP:
- if (port->typec_caps.data != TYPEC_PORT_DRD) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
/*
* XXX
* 6.3.9: If an alternate mode is active, a request to swap
* alternate modes shall trigger a port reset.
*/
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
+ if (port->typec_caps.data != TYPEC_PORT_DRD) {
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+ if (port->vdm_sm_running) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+
+ tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
}
break;
case PD_CTRL_PR_SWAP:
if (port->port_type != TYPEC_PORT_DRP) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+ if (port->vdm_sm_running) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+
+ tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
}
break;
case PD_CTRL_VCONN_SWAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
- break;
- default:
+ if (port->vdm_sm_running) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
+
+ tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
break;
case PD_CTRL_GET_SOURCE_CAP_EXT:
case PD_CTRL_GET_STATUS:
@@ -2074,10 +2635,19 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case PD_CTRL_GET_PPS_STATUS:
case PD_CTRL_GET_COUNTRY_CODES:
/* Currently not supported */
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled ctrl message type %#x", type);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ tcpm_log(port, "Unrecognized ctrl message type %#x", type);
break;
}
}
@@ -2089,11 +2659,13 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unchunked extended messages unsupported");
return;
}
if (data_size > PD_EXT_MAX_CHUNK_DATA) {
+ tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
tcpm_log(port, "Chunk handling not yet supported");
return;
}
@@ -2106,16 +2678,18 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
*/
if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_set_state(port, GET_PPS_STATUS_SEND, 0);
+ tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
+ GETTING_SOURCE_SINK_STATUS, 0);
+
else
- tcpm_set_state(port, ready_state(port), 0);
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
break;
case PD_EXT_PPS_STATUS:
/*
* For now the PPS status message is used to clear events
* and nothing more.
*/
- tcpm_set_state(port, ready_state(port), 0);
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
@@ -2129,10 +2703,11 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
case PD_EXT_FW_UPDATE_RESPONSE:
case PD_EXT_COUNTRY_INFO:
case PD_EXT_COUNTRY_CODES:
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled extended message type %#x", type);
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+ tcpm_log(port, "Unrecognized extended message type %#x", type);
break;
}
}
@@ -2245,7 +2820,12 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
break;
case PD_MSG_DATA_SINK_CAP:
- tcpm_pd_send_sink_caps(port);
+ ret = tcpm_pd_send_sink_caps(port);
+ if (ret < 0) {
+ tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
+ tcpm_set_state(port, SNK_SOFT_RESET, 0);
+ }
+ tcpm_ams_finish(port);
break;
case PD_MSG_DATA_SOURCE_CAP:
ret = tcpm_pd_send_source_caps(port);
@@ -2255,8 +2835,11 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
ret);
tcpm_set_state(port, SOFT_RESET_SEND, 0);
} else if (port->pwr_role == TYPEC_SOURCE) {
+ tcpm_ams_finish(port);
tcpm_set_state(port, HARD_RESET_SEND,
PD_T_SENDER_RESPONSE);
+ } else {
+ tcpm_ams_finish(port);
}
break;
default:
@@ -2776,13 +3359,6 @@ static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
return ret == 0;
}
-static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
-{
- tcpm_log(port, "cc:=%d", cc);
- port->cc_req = cc;
- port->tcpc->set_cc(port->tcpc, cc);
-}
-
static int tcpm_init_vbus(struct tcpm_port *port)
{
int ret;
@@ -2904,6 +3480,14 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port)
memset(modep, 0, sizeof(*modep));
}
+static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
+{
+ tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
+
+ if (port->tcpc->set_partner_usb_comm_capable)
+ port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
+}
+
static void tcpm_reset_port(struct tcpm_port *port)
{
int ret;
@@ -2912,11 +3496,15 @@ static void tcpm_reset_port(struct tcpm_port *port)
ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
}
+ port->in_ams = false;
+ port->ams = NONE_AMS;
+ port->vdm_sm_running = false;
tcpm_unregister_altmodes(port);
tcpm_typec_disconnect(port);
port->attached = false;
port->pd_capable = false;
port->pps_data.supported = false;
+ tcpm_set_partner_usb_comm_capable(port, false);
/*
* First Rx ID should be 0; set this to a sentinel of -1 so that
@@ -3090,6 +3678,7 @@ static void run_state_machine(struct tcpm_port *port)
int ret;
enum typec_pwr_opmode opmode;
unsigned int msecs;
+ enum tcpm_state upcoming_state;
port->enter_state = port->state;
switch (port->state) {
@@ -3190,7 +3779,12 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
- tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
+ if (port->ams == POWER_ROLE_SWAP ||
+ port->ams == FAST_ROLE_SWAP)
+ tcpm_ams_finish(port);
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
break;
case SRC_SEND_CAPABILITIES:
port->caps_count++;
@@ -3251,6 +3845,8 @@ static void run_state_machine(struct tcpm_port *port)
}
} else {
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_partner_usb_comm_capable(port,
+ !!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
PD_T_SRC_TRANSITION);
}
@@ -3272,6 +3868,24 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ if (port->next_ams != NONE_AMS) {
+ port->ams = port->next_ams;
+ port->next_ams = NONE_AMS;
+ }
+
+ /*
+ * If previous AMS is interrupted, switch to the upcoming
+ * state.
+ */
+ if (port->upcoming_state != INVALID_STATE) {
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+ }
+
tcpm_check_send_discover(port);
/*
* 6.3.5
@@ -3389,6 +4003,12 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
+
+ if (port->ams == POWER_ROLE_SWAP ||
+ port->ams == FAST_ROLE_SWAP)
+ /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
+ tcpm_ams_finish(port);
+
tcpm_set_state(port, SNK_DISCOVERY, 0);
break;
case SNK_DISCOVERY:
@@ -3437,7 +4057,7 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SOFT_RESET_SEND,
+ tcpm_set_state(port, SNK_SOFT_RESET,
PD_T_SINK_WAIT_CAP);
} else {
tcpm_set_state(port, hard_reset_state(port),
@@ -3446,6 +4066,8 @@ static void run_state_machine(struct tcpm_port *port)
break;
case SNK_NEGOTIATE_CAPABILITIES:
port->pd_capable = true;
+ tcpm_set_partner_usb_comm_capable(port,
+ !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
port->hard_reset_count = 0;
ret = tcpm_pd_send_request(port);
if (ret < 0) {
@@ -3490,9 +4112,28 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
- tcpm_check_send_discover(port);
mod_enable_frs_delayed_work(port, 0);
tcpm_pps_complete(port, port->pps_status);
+
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ if (port->next_ams != NONE_AMS) {
+ port->ams = port->next_ams;
+ port->next_ams = NONE_AMS;
+ }
+
+ /*
+ * If previous AMS is interrupted, switch to the upcoming
+ * state.
+ */
+ if (port->upcoming_state != INVALID_STATE) {
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+ }
+
+ tcpm_check_send_discover(port);
power_supply_changed(port->psy);
break;
@@ -3513,8 +4154,14 @@ static void run_state_machine(struct tcpm_port *port)
/* Hard_Reset states */
case HARD_RESET_SEND:
- tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
- tcpm_set_state(port, HARD_RESET_START, 0);
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ /*
+ * State machine will be directed to HARD_RESET_START,
+ * thus set upcoming_state to INVALID_STATE.
+ */
+ port->upcoming_state = INVALID_STATE;
+ tcpm_ams_start(port, HARD_RESET);
break;
case HARD_RESET_START:
port->sink_cap_done = false;
@@ -3558,6 +4205,8 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_HARD_RESET_VBUS_ON:
tcpm_set_vconn(port, true);
tcpm_set_vbus(port, true);
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
port->tcpc->set_pd_rx(port->tcpc, true);
tcpm_set_attached_state(port, true);
tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
@@ -3579,6 +4228,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
break;
case SNK_HARD_RESET_WAIT_VBUS:
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
/* Assume we're disconnected if VBUS doesn't come back. */
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
@@ -3606,6 +4257,8 @@ static void run_state_machine(struct tcpm_port *port)
5000);
tcpm_set_charge(port, true);
}
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
tcpm_set_attached_state(port, true);
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
@@ -3616,10 +4269,20 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
- if (port->pwr_role == TYPEC_SOURCE)
- tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
- else
+ tcpm_ams_finish(port);
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
+ } else {
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ }
+ break;
+ case SRC_SOFT_RESET_WAIT_SNK_TX:
+ case SNK_SOFT_RESET:
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ port->upcoming_state = SOFT_RESET_SEND;
+ tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
port->message_id = 0;
@@ -3639,10 +4302,14 @@ static void run_state_machine(struct tcpm_port *port)
break;
case DR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ /* Set VDM state machine running flag ASAP */
+ if (port->data_role == TYPEC_DEVICE && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
+ tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
case DR_SWAP_CHANGE_DR:
@@ -3655,6 +4322,7 @@ static void run_state_machine(struct tcpm_port *port)
TYPEC_HOST);
port->send_discover = true;
}
+ tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -3782,6 +4450,7 @@ static void run_state_machine(struct tcpm_port *port)
case VCONN_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
@@ -3791,6 +4460,8 @@ static void run_state_machine(struct tcpm_port *port)
break;
case VCONN_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_START:
@@ -3806,10 +4477,14 @@ static void run_state_machine(struct tcpm_port *port)
case VCONN_SWAP_TURN_ON_VCONN:
tcpm_set_vconn(port, true);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -3817,6 +4492,8 @@ static void run_state_machine(struct tcpm_port *port)
case PR_SWAP_CANCEL:
case VCONN_SWAP_CANCEL:
tcpm_swap_complete(port, port->swap_status);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_READY, 0);
else
@@ -3886,6 +4563,25 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_default_state(port),
port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
break;
+
+ /* AMS intermediate state */
+ case AMS_START:
+ if (port->upcoming_state == INVALID_STATE) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_READY : SNK_READY, 0);
+ break;
+ }
+
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+
+ /* Chunk state */
+ case CHUNK_NOT_SUPP:
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
+ break;
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
@@ -4127,6 +4823,9 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
switch (port->state) {
case SNK_TRANSITION_SINK_VBUS:
port->explicit_contract = true;
+ /* Set the VDM flag ASAP */
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
break;
case SNK_DISCOVERY:
@@ -4262,6 +4961,17 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
/* Do nothing, waiting for sink detection */
break;
+ case SRC_STARTUP:
+ case SRC_SEND_CAPABILITIES:
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ case SRC_NEGOTIATE_CAPABILITIES:
+ case SRC_TRANSITION_SUPPLY:
+ case SRC_READY:
+ case SRC_WAIT_NEW_CAPABILITIES:
+ /* Force to unattached state to re-initiate connection */
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ break;
+
case PORT_RESET:
/*
* State set back to default mode once the timer completes.
@@ -4313,6 +5023,10 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
+ if (port->ams != NONE_AMS)
+ port->ams = NONE_AMS;
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+ port->ams = HARD_RESET;
/*
* If we keep receiving hard reset requests, executing the hard reset
* must have failed. Revert to error recovery if that happens.
@@ -4363,10 +5077,16 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
_tcpm_cc_change(port, cc1, cc2);
}
if (events & TCPM_FRS_EVENT) {
- if (port->state == SNK_READY)
- tcpm_set_state(port, FR_SWAP_SEND, 0);
- else
+ if (port->state == SNK_READY) {
+ int ret;
+
+ port->upcoming_state = FR_SWAP_SEND;
+ ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
+ if (ret == -EAGAIN)
+ port->upcoming_state = INVALID_STATE;
+ } else {
tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
+ }
}
if (events & TCPM_SOURCING_VBUS) {
tcpm_log(port, "sourcing vbus");
@@ -4435,6 +5155,7 @@ EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
+ int ret;
mutex_lock(&port->lock);
/* Not FRS capable */
@@ -4449,9 +5170,14 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)
goto resched;
- tcpm_set_state(port, GET_SINK_CAP, 0);
- port->sink_cap_done = true;
-
+ port->upcoming_state = GET_SINK_CAP;
+ ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ } else {
+ port->sink_cap_done = true;
+ goto unlock;
+ }
resched:
mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
unlock:
@@ -4501,7 +5227,12 @@ static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
port->non_pd_role_swap = true;
tcpm_set_state(port, PORT_RESET, 0);
} else {
- tcpm_set_state(port, DR_SWAP_SEND, 0);
+ port->upcoming_state = DR_SWAP_SEND;
+ ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
}
port->swap_status = 0;
@@ -4547,10 +5278,16 @@ static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
goto port_unlock;
}
+ port->upcoming_state = PR_SWAP_SEND;
+ ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
- tcpm_set_state(port, PR_SWAP_SEND, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
@@ -4586,10 +5323,16 @@ static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
goto port_unlock;
}
+ port->upcoming_state = VCONN_SWAP_SEND;
+ ret = tcpm_ams_start(port, VCONN_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
- tcpm_set_state(port, VCONN_SWAP_SEND, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
@@ -4654,6 +5397,13 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
goto port_unlock;
}
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
/* Round down operating current to align with PPS valid steps */
op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
@@ -4661,7 +5411,6 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
port->pps_data.op_curr = op_curr;
port->pps_status = 0;
port->pps_pending = true;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
@@ -4710,6 +5459,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
goto port_unlock;
}
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
/* Round down output voltage to align with PPS valid steps */
out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
@@ -4717,7 +5473,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
port->pps_data.out_volt = out_volt;
port->pps_status = 0;
port->pps_pending = true;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
@@ -4757,6 +5512,16 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
goto port_unlock;
}
+ if (activate)
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ else
+ port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
reinit_completion(&port->pps_complete);
port->pps_status = 0;
port->pps_pending = true;
@@ -4765,9 +5530,6 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
if (activate) {
port->pps_data.out_volt = port->supply_voltage;
port->pps_data.op_curr = port->current_limit;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
- } else {
- tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
}
mutex_unlock(&port->lock);
@@ -4960,6 +5722,20 @@ sink:
port->new_source_frs_current = frs_current;
}
+ /* sink-vdos is optional */
+ ret = fwnode_property_count_u32(fwnode, "sink-vdos");
+ if (ret < 0)
+ ret = 0;
+
+ port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
+ if (port->nr_snk_vdo) {
+ ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
+ port->snk_vdo,
+ port->nr_snk_vdo);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -5265,6 +6041,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
+ port->typec_caps.svdm_version = SVDM_VER_2_0;
port->typec_caps.driver_data = port;
port->typec_caps.ops = &tcpm_ops;
port->typec_caps.orientation_aware = 1;
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 2192d7c4fec7..5e9b37b3f25e 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -3,6 +3,7 @@
config TYPEC_UCSI
tristate "USB Type-C Connector System Software Interface driver"
depends on !CPU_BIG_ENDIAN
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
USB Type-C Connector System Software Interface (UCSI) is a
specification for an interface that allows the operating system to
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 261131c9e37c..73cd5bf35047 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -49,6 +49,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
struct ucsi *ucsi = dp->con->ucsi;
+ int svdm_version;
u64 command;
u8 cur = 0;
int ret;
@@ -83,7 +84,13 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
* mode, and letting the alt mode driver continue.
*/
- dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_ENTER_MODE);
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0) {
+ ret = svdm_version;
+ goto err_unlock;
+ }
+
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_ENTER_MODE);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
@@ -101,6 +108,7 @@ err_unlock:
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
+ int svdm_version;
u64 command;
int ret = 0;
@@ -120,7 +128,13 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
if (ret < 0)
goto out_unlock;
- dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_EXIT_MODE);
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0) {
+ ret = svdm_version;
+ goto out_unlock;
+ }
+
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_EXIT_MODE);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
@@ -186,6 +200,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
int cmd_type = PD_VDO_CMDT(header);
int cmd = PD_VDO_CMD(header);
+ int svdm_version;
mutex_lock(&dp->con->lock);
@@ -198,9 +213,20 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
return -EOPNOTSUPP;
}
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0) {
+ mutex_unlock(&dp->con->lock);
+ return svdm_version;
+ }
+
switch (cmd_type) {
case CMDT_INIT:
- dp->header = VDO(USB_TYPEC_DP_SID, 1, cmd);
+ if (PD_VDO_SVDM_VER(header) < svdm_version) {
+ typec_partner_set_svdm_version(dp->con->partner, PD_VDO_SVDM_VER(header));
+ svdm_version = PD_VDO_SVDM_VER(header);
+ }
+
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, cmd);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
switch (cmd) {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f02958927cbd..244270755ae6 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,6 +588,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
static void ucsi_partner_change(struct ucsi_connector *con)
{
+ enum usb_role u_role = USB_ROLE_NONE;
int ret;
if (!con->partner)
@@ -595,11 +596,14 @@ static void ucsi_partner_change(struct ucsi_connector *con)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -610,6 +614,15 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!completion_done(&con->complete))
complete(&con->complete);
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret)
+ dev_err(con->ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
+
/* Can't rely on Partner Flags field. Always checking the alt modes. */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret)
@@ -628,6 +641,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
struct ucsi_connector_status pre_ack_status;
struct ucsi_connector_status post_ack_status;
enum typec_role role;
+ enum usb_role u_role = USB_ROLE_NONE;
u16 inferred_changes;
u16 changed_flags;
u64 command;
@@ -753,11 +767,14 @@ static void ucsi_handle_connector_change(struct work_struct *work)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -770,6 +787,16 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_unregister_partner(con);
ucsi_port_psy_changed(con);
+
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) &
+ UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret)
+ dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
@@ -988,6 +1015,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
+ enum usb_role u_role = USB_ROLE_NONE;
u64 command;
int ret;
@@ -1024,6 +1052,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
cap->revision = ucsi->cap.typec_version;
cap->pd_revision = ucsi->cap.pd_version;
+ cap->svdm_version = SVDM_VER_2_0;
cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY)
@@ -1066,11 +1095,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -1086,6 +1118,24 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ucsi_port_psy_changed(con);
}
+ con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
+ if (IS_ERR(con->usb_role_sw)) {
+ dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
+ con->num);
+ con->usb_role_sw = NULL;
+ }
+
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret) {
+ dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
+ ret = 0;
+ }
+
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret) {
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index dd9ba60ab4a3..3920e20a9e9e 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -8,6 +8,7 @@
#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/role.h>
/* -------------------------------------------------------------------------- */
@@ -331,6 +332,8 @@ struct ucsi_connector {
u32 rdo;
u32 src_pdos[UCSI_MAX_PDOS];
int num_pdos;
+
+ struct usb_role_switch *usb_role_sw;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index c1c0bbc9f8b1..77a5b3f8736a 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -23,7 +23,7 @@ struct kmem_cache *stub_priv_cache;
*/
#define MAX_BUSID 16
static struct bus_id_priv busid_table[MAX_BUSID];
-static spinlock_t busid_table_lock;
+static DEFINE_SPINLOCK(busid_table_lock);
static void init_busid_table(void)
{
@@ -35,8 +35,6 @@ static void init_busid_table(void)
*/
memset(busid_table, 0, sizeof(busid_table));
- spin_lock_init(&busid_table_lock);
-
for (i = 0; i < MAX_BUSID; i++)
spin_lock_init(&busid_table[i].busid_lock);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 8be857a4fa13..d60ce17d3dd2 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -277,6 +277,10 @@ struct usbip_device {
void (*reset)(struct usbip_device *);
void (*unusable)(struct usbip_device *);
} eh_ops;
+
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
};
#define kthread_get_run(threadfn, data, namefmt, ...) \
@@ -337,4 +341,29 @@ static inline int interface_to_devnum(struct usb_interface *interface)
return udev->devnum;
}
+#ifdef CONFIG_KCOV
+
+static inline void usbip_kcov_handle_init(struct usbip_device *ud)
+{
+ ud->kcov_handle = kcov_common_handle();
+}
+
+static inline void usbip_kcov_remote_start(struct usbip_device *ud)
+{
+ kcov_remote_start_common(ud->kcov_handle);
+}
+
+static inline void usbip_kcov_remote_stop(void)
+{
+ kcov_remote_stop();
+}
+
+#else /* CONFIG_KCOV */
+
+static inline void usbip_kcov_handle_init(struct usbip_device *ud) { }
+static inline void usbip_kcov_remote_start(struct usbip_device *ud) { }
+static inline void usbip_kcov_remote_stop(void) { }
+
+#endif /* CONFIG_KCOV */
+
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 266024cbb64f..7f2d1c241559 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -261,7 +261,9 @@ int vhci_rx_loop(void *data)
if (usbip_event_happened(ud))
break;
+ usbip_kcov_remote_start(ud);
vhci_rx_pdu(ud);
+ usbip_kcov_remote_stop();
}
return 0;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index be37aec250c2..96e5371dc335 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -383,6 +383,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
+ usbip_kcov_handle_init(&vdev->ud);
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 92a6396f8a73..ffd1e098bfd2 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig VDPA
tristate "vDPA drivers"
+ depends on NET
help
Enable this module to support vDPA device that uses a
datapath which complies with virtio specifications with
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index fa1af301cf55..7c8bbfcf6c3e 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -432,7 +432,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops,
- IFCVF_MAX_QUEUE_PAIRS * 2);
+ IFCVF_MAX_QUEUE_PAIRS * 2, NULL);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 5c92a576edae..08f742fd2409 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
struct sg_table sg_head;
int log_size;
int nsg;
+ int nent;
struct list_head list;
u64 offset;
};
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 4b6195666c58..d300f799efcd 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
return (npages + 1) / 2;
}
-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
-{
- struct scatterlist *sg;
- __be64 *pas;
- int i;
-
- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- (*pas) = cpu_to_be64(sg_dma_address(sg));
-}
-
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
+ int nsg = mr->nsg;
+ u64 dma_addr;
+ u64 dma_len;
+ int j = 0;
int i;
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- mtt[i] = cpu_to_be64(sg_dma_address(sg));
+ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
+ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
+ nsg && dma_len;
+ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
+ mtt[j++] = cpu_to_be64(dma_addr);
+ }
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
- fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!err)
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!mr->nent)
goto err_map;
err = create_direct_mr(mvdev, mr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 88dde3455bfd..10e9b09932eb 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -87,6 +87,7 @@ struct mlx5_vq_restore_info {
u64 device_addr;
u64 driver_addr;
u16 avail_index;
+ u16 used_index;
bool ready;
struct vdpa_callback cb;
bool restore;
@@ -121,6 +122,7 @@ struct mlx5_vdpa_virtqueue {
u32 virtq_id;
struct mlx5_vdpa_net *ndev;
u16 avail_idx;
+ u16 used_idx;
int fw_state;
/* keep last in the struct */
@@ -804,6 +806,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
get_features_12_3(ndev->mvdev.actual_features));
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
@@ -1022,6 +1025,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
struct mlx5_virtq_attr {
u8 state;
u16 available_index;
+ u16 used_index;
};
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
@@ -1052,6 +1056,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out);
return 0;
@@ -1535,6 +1540,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
+static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ ndev->vqs[i].avail_idx = 0;
+ ndev->vqs[i].used_idx = 0;
+ }
+}
+
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
@@ -1610,6 +1625,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
return err;
ri->avail_index = attr.available_index;
+ ri->used_index = attr.used_index;
ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr;
@@ -1654,6 +1670,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
continue;
mvq->avail_idx = ri->avail_index;
+ mvq->used_idx = ri->used_index;
mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr;
@@ -1768,6 +1785,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
@@ -1802,7 +1820,7 @@ static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- if (offset + len < sizeof(struct virtio_net_config))
+ if (offset + len <= sizeof(struct virtio_net_config))
memcpy(buf, (u8 *)&ndev->config + offset, len);
}
@@ -1964,7 +1982,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 2 * mlx5_vdpa_max_qps(max_vqs));
+ 2 * mlx5_vdpa_max_qps(max_vqs), NULL);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index c0825650c055..da67f07e24fd 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -11,9 +11,17 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/vdpa.h>
+#include <uapi/linux/vdpa.h>
+#include <net/genetlink.h>
+#include <linux/mod_devicetable.h>
+static LIST_HEAD(mdev_head);
+/* A global mutex that protects vdpa management device and device level operations. */
+static DEFINE_MUTEX(vdpa_dev_mutex);
static DEFINE_IDA(vdpa_index_ida);
+static struct genl_family vdpa_nl_family;
+
static int vdpa_dev_probe(struct device *d)
{
struct vdpa_device *vdev = dev_to_vdpa(d);
@@ -63,6 +71,7 @@ static void vdpa_release_dev(struct device *d)
* @config: the bus operations that is supported by this device
* @nvqs: number of virtqueues supported by this device
* @size: size of the parent structure that contains private data
+ * @name: name of the vdpa device; optional.
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
@@ -72,8 +81,7 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs,
- size_t size)
+ int nvqs, size_t size, const char *name)
{
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -101,7 +109,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->features_valid = false;
vdev->nvqs = nvqs;
- err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
+ if (name)
+ err = dev_set_name(&vdev->dev, "%s", name);
+ else
+ err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err)
goto err_name;
@@ -118,6 +129,44 @@ err:
}
EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
+static int vdpa_name_match(struct device *dev, const void *data)
+{
+ struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
+
+ return (strcmp(dev_name(&vdev->dev), data) == 0);
+}
+
+static int __vdpa_register_device(struct vdpa_device *vdev)
+{
+ struct device *dev;
+
+ lockdep_assert_held(&vdpa_dev_mutex);
+ dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
+ if (dev) {
+ put_device(dev);
+ return -EEXIST;
+ }
+ return device_add(&vdev->dev);
+}
+
+/**
+ * _vdpa_register_device - register a vDPA device with vdpa lock held
+ * Caller must have a succeed call of vdpa_alloc_device() before.
+ * Caller must invoke this routine in the management device dev_add()
+ * callback after setting up valid mgmtdev for this vdpa device.
+ * @vdev: the vdpa device to be registered to vDPA bus
+ *
+ * Returns an error when fail to add device to vDPA bus
+ */
+int _vdpa_register_device(struct vdpa_device *vdev)
+{
+ if (!vdev->mdev)
+ return -EINVAL;
+
+ return __vdpa_register_device(vdev);
+}
+EXPORT_SYMBOL_GPL(_vdpa_register_device);
+
/**
* vdpa_register_device - register a vDPA device
* Callers must have a succeed call of vdpa_alloc_device() before.
@@ -127,17 +176,38 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
*/
int vdpa_register_device(struct vdpa_device *vdev)
{
- return device_add(&vdev->dev);
+ int err;
+
+ mutex_lock(&vdpa_dev_mutex);
+ err = __vdpa_register_device(vdev);
+ mutex_unlock(&vdpa_dev_mutex);
+ return err;
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
/**
+ * _vdpa_unregister_device - unregister a vDPA device
+ * Caller must invoke this routine as part of management device dev_del()
+ * callback.
+ * @vdev: the vdpa device to be unregisted from vDPA bus
+ */
+void _vdpa_unregister_device(struct vdpa_device *vdev)
+{
+ lockdep_assert_held(&vdpa_dev_mutex);
+ WARN_ON(!vdev->mdev);
+ device_unregister(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
+
+/**
* vdpa_unregister_device - unregister a vDPA device
* @vdev: the vdpa device to be unregisted from vDPA bus
*/
void vdpa_unregister_device(struct vdpa_device *vdev)
{
+ mutex_lock(&vdpa_dev_mutex);
device_unregister(&vdev->dev);
+ mutex_unlock(&vdpa_dev_mutex);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
@@ -167,13 +237,436 @@ void vdpa_unregister_driver(struct vdpa_driver *drv)
}
EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
+/**
+ * vdpa_mgmtdev_register - register a vdpa management device
+ *
+ * @mdev: Pointer to vdpa management device
+ * vdpa_mgmtdev_register() register a vdpa management device which supports
+ * vdpa device management.
+ */
+int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
+{
+ if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&mdev->list);
+ mutex_lock(&vdpa_dev_mutex);
+ list_add_tail(&mdev->list, &mdev_head);
+ mutex_unlock(&vdpa_dev_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
+
+static int vdpa_match_remove(struct device *dev, void *data)
+{
+ struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
+ struct vdpa_mgmt_dev *mdev = vdev->mdev;
+
+ if (mdev == data)
+ mdev->ops->dev_del(mdev, vdev);
+ return 0;
+}
+
+void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
+{
+ mutex_lock(&vdpa_dev_mutex);
+
+ list_del(&mdev->list);
+
+ /* Filter out all the entries belong to this management device and delete it. */
+ bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
+
+ mutex_unlock(&vdpa_dev_mutex);
+}
+EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
+
+static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
+ const char *busname, const char *devname)
+{
+ /* Bus name is optional for simulated management device, so ignore the
+ * device with bus if bus attribute is provided.
+ */
+ if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
+ return false;
+
+ if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
+ return true;
+
+ if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
+ (strcmp(dev_name(mdev->device), devname) == 0))
+ return true;
+
+ return false;
+}
+
+static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
+{
+ struct vdpa_mgmt_dev *mdev;
+ const char *busname = NULL;
+ const char *devname;
+
+ if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
+ return ERR_PTR(-EINVAL);
+ devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
+ if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
+ busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
+
+ list_for_each_entry(mdev, &mdev_head, list) {
+ if (mgmtdev_handle_match(mdev, busname, devname))
+ return mdev;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
+{
+ if (mdev->device->bus &&
+ nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
+ u32 portid, u32 seq, int flags)
+{
+ u64 supported_classes = 0;
+ void *hdr;
+ int i = 0;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
+ if (!hdr)
+ return -EMSGSIZE;
+ err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
+ if (err)
+ goto msg_err;
+
+ while (mdev->id_table[i].device) {
+ supported_classes |= BIT(mdev->id_table[i].device);
+ i++;
+ }
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
+ supported_classes, VDPA_ATTR_UNSPEC)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+msg_err:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vdpa_mgmt_dev *mdev;
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&vdpa_dev_mutex);
+ mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
+ if (IS_ERR(mdev)) {
+ mutex_unlock(&vdpa_dev_mutex);
+ NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
+ err = PTR_ERR(mdev);
+ goto out;
+ }
+
+ err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
+ mutex_unlock(&vdpa_dev_mutex);
+ if (err)
+ goto out;
+ err = genlmsg_reply(msg, info);
+ return err;
+
+out:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int
+vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct vdpa_mgmt_dev *mdev;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&vdpa_dev_mutex);
+ list_for_each_entry(mdev, &mdev_head, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ if (err)
+ goto out;
+ idx++;
+ }
+out:
+ mutex_unlock(&vdpa_dev_mutex);
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vdpa_mgmt_dev *mdev;
+ const char *name;
+ int err = 0;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+
+ mutex_lock(&vdpa_dev_mutex);
+ mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
+ if (IS_ERR(mdev)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
+ err = PTR_ERR(mdev);
+ goto err;
+ }
+
+ err = mdev->ops->dev_add(mdev, name);
+err:
+ mutex_unlock(&vdpa_dev_mutex);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vdpa_mgmt_dev *mdev;
+ struct vdpa_device *vdev;
+ struct device *dev;
+ const char *name;
+ int err = 0;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+ name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+
+ mutex_lock(&vdpa_dev_mutex);
+ dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ mdev = vdev->mdev;
+ mdev->ops->dev_del(mdev, vdev);
+mdev_err:
+ put_device(dev);
+dev_err:
+ mutex_unlock(&vdpa_dev_mutex);
+ return err;
+}
+
+static int
+vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
+ int flags, struct netlink_ext_ack *extack)
+{
+ u16 max_vq_size;
+ u32 device_id;
+ u32 vendor_id;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
+ if (err)
+ goto msg_err;
+
+ device_id = vdev->config->get_device_id(vdev);
+ vendor_id = vdev->config->get_vendor_id(vdev);
+ max_vq_size = vdev->config->get_vq_num_max(vdev);
+
+ err = -EMSGSIZE;
+ if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
+ goto msg_err;
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
+ goto msg_err;
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
+ goto msg_err;
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
+ goto msg_err;
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
+ goto msg_err;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+msg_err:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct sk_buff *msg;
+ const char *devname;
+ struct device *dev;
+ int err;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+ devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&vdpa_dev_mutex);
+ dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
+ if (!err)
+ err = genlmsg_reply(msg, info);
+mdev_err:
+ put_device(dev);
+err:
+ mutex_unlock(&vdpa_dev_mutex);
+ if (err)
+ nlmsg_free(msg);
+ return err;
+}
+
+struct vdpa_dev_dump_info {
+ struct sk_buff *msg;
+ struct netlink_callback *cb;
+ int start_idx;
+ int idx;
+};
+
+static int vdpa_dev_dump(struct device *dev, void *data)
+{
+ struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
+ struct vdpa_dev_dump_info *info = data;
+ int err;
+
+ if (!vdev->mdev)
+ return 0;
+ if (info->idx < info->start_idx) {
+ info->idx++;
+ return 0;
+ }
+ err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
+ info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
+ if (err)
+ return err;
+
+ info->idx++;
+ return 0;
+}
+
+static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct vdpa_dev_dump_info info;
+
+ info.msg = msg;
+ info.cb = cb;
+ info.start_idx = cb->args[0];
+ info.idx = 0;
+
+ mutex_lock(&vdpa_dev_mutex);
+ bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
+ mutex_unlock(&vdpa_dev_mutex);
+ cb->args[0] = info.idx;
+ return msg->len;
+}
+
+static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
+ [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
+ [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
+};
+
+static const struct genl_ops vdpa_nl_ops[] = {
+ {
+ .cmd = VDPA_CMD_MGMTDEV_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_mgmtdev_get_doit,
+ .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_add_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_del_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_get_dumpit,
+ },
+};
+
+static struct genl_family vdpa_nl_family __ro_after_init = {
+ .name = VDPA_GENL_NAME,
+ .version = VDPA_GENL_VERSION,
+ .maxattr = VDPA_ATTR_MAX,
+ .policy = vdpa_nl_policy,
+ .netnsok = false,
+ .module = THIS_MODULE,
+ .ops = vdpa_nl_ops,
+ .n_ops = ARRAY_SIZE(vdpa_nl_ops),
+};
+
static int vdpa_init(void)
{
- return bus_register(&vdpa_bus);
+ int err;
+
+ err = bus_register(&vdpa_bus);
+ if (err)
+ return err;
+ err = genl_register_family(&vdpa_nl_family);
+ if (err)
+ goto err;
+ return 0;
+
+err:
+ bus_unregister(&vdpa_bus);
+ return err;
}
static void __exit vdpa_exit(void)
{
+ genl_unregister_family(&vdpa_nl_family);
bus_unregister(&vdpa_bus);
ida_destroy(&vdpa_index_ida);
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index b3fcc67bfdf0..d5942842432d 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
- dev_attr->nvqs);
+ dev_attr->nvqs, dev_attr->name);
if (!vdpasim)
goto err_alloc;
@@ -249,6 +249,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
+ vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
if (!vdpasim->config)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index b02142293d5b..6d75444f9948 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -33,6 +33,8 @@ struct vdpasim_virtqueue {
};
struct vdpasim_dev_attr {
+ struct vdpa_mgmt_dev *mgmt_dev;
+ const char *name;
u64 supported_features;
size_t config_size;
size_t buffer_size;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index c10b6981fdab..d344c5b7c914 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -33,9 +33,7 @@ static char *macaddr;
module_param(macaddr, charp, 0);
MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
-u8 macaddr_buf[ETH_ALEN];
-
-static struct vdpasim *vdpasim_net_dev;
+static u8 macaddr_buf[ETH_ALEN];
static void vdpasim_net_work(struct work_struct *work)
{
@@ -120,21 +118,23 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
}
-static int __init vdpasim_net_init(void)
+static void vdpasim_net_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vdpasim_net_mgmtdev = {
+ .init_name = "vdpasim_net",
+ .release = vdpasim_net_mgmtdev_release,
+};
+
+static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
{
struct vdpasim_dev_attr dev_attr = {};
+ struct vdpasim *simdev;
int ret;
- if (macaddr) {
- mac_pton(macaddr, macaddr_buf);
- if (!is_valid_ether_addr(macaddr_buf)) {
- ret = -EADDRNOTAVAIL;
- goto out;
- }
- } else {
- eth_random_addr(macaddr_buf);
- }
-
+ dev_attr.mgmt_dev = mdev;
+ dev_attr.name = name;
dev_attr.id = VIRTIO_ID_NET;
dev_attr.supported_features = VDPASIM_NET_FEATURES;
dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
@@ -143,29 +143,75 @@ static int __init vdpasim_net_init(void)
dev_attr.work_fn = vdpasim_net_work;
dev_attr.buffer_size = PAGE_SIZE;
- vdpasim_net_dev = vdpasim_create(&dev_attr);
- if (IS_ERR(vdpasim_net_dev)) {
- ret = PTR_ERR(vdpasim_net_dev);
- goto out;
+ simdev = vdpasim_create(&dev_attr);
+ if (IS_ERR(simdev))
+ return PTR_ERR(simdev);
+
+ ret = _vdpa_register_device(&simdev->vdpa);
+ if (ret)
+ goto reg_err;
+
+ return 0;
+
+reg_err:
+ put_device(&simdev->vdpa.dev);
+ return ret;
+}
+
+static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
+ struct vdpa_device *dev)
+{
+ struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
+
+ _vdpa_unregister_device(&simdev->vdpa);
+}
+
+static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
+ .dev_add = vdpasim_net_dev_add,
+ .dev_del = vdpasim_net_dev_del
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vdpasim_net_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpasim_net_mgmtdev_ops,
+};
+
+static int __init vdpasim_net_init(void)
+{
+ int ret;
+
+ if (macaddr) {
+ mac_pton(macaddr, macaddr_buf);
+ if (!is_valid_ether_addr(macaddr_buf))
+ return -EADDRNOTAVAIL;
+ } else {
+ eth_random_addr(macaddr_buf);
}
- ret = vdpa_register_device(&vdpasim_net_dev->vdpa);
+ ret = device_register(&vdpasim_net_mgmtdev);
if (ret)
- goto put_dev;
+ return ret;
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto parent_err;
return 0;
-put_dev:
- put_device(&vdpasim_net_dev->vdpa.dev);
-out:
+parent_err:
+ device_unregister(&vdpasim_net_mgmtdev);
return ret;
}
static void __exit vdpasim_net_exit(void)
{
- struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa;
-
- vdpa_unregister_device(vdpa);
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vdpasim_net_mgmtdev);
}
module_init(vdpasim_net_init);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 40a223381ab6..ac3c1dd3edef 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -45,15 +45,3 @@ config VFIO_PCI_NVLINK2
depends on VFIO_PCI && PPC_POWERNV
help
VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
-
-config VFIO_PCI_ZDEV
- bool "VFIO PCI ZPCI device CLP support"
- depends on VFIO_PCI && S390
- default y
- help
- Enabling this option exposes VFIO capabilities containing hardware
- configuration for zPCI devices. This enables userspace (e.g. QEMU)
- to supply proper configuration values instead of hard-coded defaults
- for zPCI devices passed through via VFIO on s390.
-
- Say Y here.
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 781e0809d6ee..eff97a7cd9f1 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -3,6 +3,6 @@
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
-vfio-pci-$(CONFIG_VFIO_PCI_ZDEV) += vfio_pci_zdev.o
+vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 706de3ef94bb..65e7e6b44578 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -807,6 +807,7 @@ static long vfio_pci_ioctl(void *device_data,
struct vfio_device_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz;
+ int ret;
minsz = offsetofend(struct vfio_device_info, num_irqs);
@@ -832,13 +833,10 @@ static long vfio_pci_ioctl(void *device_data,
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
- if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV)) {
- int ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
-
- if (ret && ret != -ENODEV) {
- pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
- return ret;
- }
+ ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
+ return ret;
}
if (caps.size) {
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 53d97f459252..e66dfb0178ed 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -127,7 +127,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
ret = pci_user_read_config_byte(pdev, pos, &val);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
if (copy_to_user(buf + count - size, &val, 1))
return -EFAULT;
@@ -141,7 +141,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
val = cpu_to_le16(val);
if (copy_to_user(buf + count - size, &val, 2))
@@ -156,7 +156,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
ret = pci_user_read_config_dword(pdev, pos, &val);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
val = cpu_to_le32(val);
if (copy_to_user(buf + count - size, &val, 4))
@@ -171,7 +171,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
val = cpu_to_le16(val);
if (copy_to_user(buf + count - size, &val, 2))
@@ -186,7 +186,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
ret = pci_user_read_config_byte(pdev, pos, &val);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
if (copy_to_user(buf + count - size, &val, 1))
return -EFAULT;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 5c90e560c5c7..9cd1882a05af 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -214,7 +214,7 @@ static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
}
#endif
-#ifdef CONFIG_VFIO_PCI_ZDEV
+#ifdef CONFIG_S390
extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps);
#else
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index 229685634031..7b011b62c766 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -24,8 +24,7 @@
/*
* Add the Base PCI Function information to the device info region.
*/
-static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
+static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_base cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_BASE,
@@ -45,8 +44,7 @@ static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
/*
* Add the Base PCI Function Group information to the device info region.
*/
-static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
+static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_group cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_GROUP,
@@ -66,14 +64,15 @@ static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
/*
* Add the device utility string to the device info region.
*/
-static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
+static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_util *cap;
int cap_size = sizeof(*cap) + CLP_UTIL_STR_LEN;
int ret;
cap = kmalloc(cap_size, GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
cap->header.version = 1;
@@ -90,14 +89,15 @@ static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
/*
* Add the function path string to the device info region.
*/
-static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
+static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_pfip *cap;
int cap_size = sizeof(*cap) + CLP_PFIP_NR_SEGMENTS;
int ret;
cap = kmalloc(cap_size, GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
cap->header.version = 1;
@@ -123,21 +123,21 @@ int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
if (!zdev)
return -ENODEV;
- ret = zpci_base_cap(zdev, vdev, caps);
+ ret = zpci_base_cap(zdev, caps);
if (ret)
return ret;
- ret = zpci_group_cap(zdev, vdev, caps);
+ ret = zpci_group_cap(zdev, caps);
if (ret)
return ret;
if (zdev->util_str_avail) {
- ret = zpci_util_cap(zdev, vdev, caps);
+ ret = zpci_util_cap(zdev, caps);
if (ret)
return ret;
}
- ret = zpci_pfip_cap(zdev, vdev, caps);
+ ret = zpci_pfip_cap(zdev, caps);
return ret;
}
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 9636a2afaecd..3626c2150101 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -71,18 +71,13 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
}
-static int vfio_amba_remove(struct amba_device *adev)
+static void vfio_amba_remove(struct amba_device *adev)
{
- struct vfio_platform_device *vdev;
-
- vdev = vfio_platform_remove_common(&adev->dev);
- if (vdev) {
- kfree(vdev->name);
- kfree(vdev);
- return 0;
- }
+ struct vfio_platform_device *vdev =
+ vfio_platform_remove_common(&adev->dev);
- return -EINVAL;
+ kfree(vdev->name);
+ kfree(vdev);
}
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 4ad8a35667a7..38779e6fd80c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1220,6 +1220,11 @@ static int vfio_fops_open(struct inode *inode, struct file *filep)
static int vfio_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (driver && driver->ops->notify)
+ driver->ops->notify(container->iommu_data,
+ VFIO_IOMMU_CONTAINER_CLOSE);
filep->private_data = NULL;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4dedaa9128..4bb162c1d649 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -69,11 +70,15 @@ struct vfio_iommu {
struct rb_root dma_list;
struct blocking_notifier_head notifier;
unsigned int dma_avail;
+ unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
+ uint64_t num_non_pinned_groups;
+ wait_queue_head_t vaddr_wait;
bool v2;
bool nesting;
bool dirty_page_tracking;
bool pinned_page_dirty_scope;
+ bool container_open;
};
struct vfio_domain {
@@ -92,11 +97,20 @@ struct vfio_dma {
int prot; /* IOMMU_READ/WRITE */
bool iommu_mapped;
bool lock_cap; /* capable(CAP_IPC_LOCK) */
+ bool vaddr_invalid;
struct task_struct *task;
struct rb_root pfn_list; /* Ex-user pinned pfn list */
unsigned long *bitmap;
};
+struct vfio_batch {
+ struct page **pages; /* for pin_user_pages_remote */
+ struct page *fallback_page; /* if pages alloc fails */
+ int capacity; /* length of pages array */
+ int size; /* of batch currently */
+ int offset; /* of next entry in pages */
+};
+
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
@@ -143,12 +157,13 @@ struct vfio_regions {
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
+#define WAITED 1
+
static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -173,6 +188,31 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
return NULL;
}
+static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
+ dma_addr_t start, size_t size)
+{
+ struct rb_node *res = NULL;
+ struct rb_node *node = iommu->dma_list.rb_node;
+ struct vfio_dma *dma_res = NULL;
+
+ while (node) {
+ struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
+
+ if (start < dma->iova + dma->size) {
+ res = node;
+ dma_res = dma;
+ if (start >= dma->iova)
+ break;
+ node = node->rb_left;
+ } else {
+ node = node->rb_right;
+ }
+ }
+ if (res && size && dma_res->iova >= start + size)
+ res = NULL;
+ return res;
+}
+
static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
{
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
@@ -236,6 +276,18 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
}
}
+static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
+{
+ struct rb_node *n;
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ bitmap_set(dma->bitmap, 0, dma->size >> pgshift);
+ }
+}
+
static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
{
struct rb_node *n;
@@ -415,13 +467,54 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
+#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
+
+static void vfio_batch_init(struct vfio_batch *batch)
+{
+ batch->size = 0;
+ batch->offset = 0;
+
+ if (unlikely(disable_hugepages))
+ goto fallback;
+
+ batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!batch->pages)
+ goto fallback;
+
+ batch->capacity = VFIO_BATCH_MAX_CAPACITY;
+ return;
+
+fallback:
+ batch->pages = &batch->fallback_page;
+ batch->capacity = 1;
+}
+
+static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma)
+{
+ while (batch->size) {
+ unsigned long pfn = page_to_pfn(batch->pages[batch->offset]);
+
+ put_pfn(pfn, dma->prot);
+ batch->offset++;
+ batch->size--;
+ }
+}
+
+static void vfio_batch_fini(struct vfio_batch *batch)
+{
+ if (batch->capacity == VFIO_BATCH_MAX_CAPACITY)
+ free_page((unsigned long)batch->pages);
+}
+
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -435,16 +528,28 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
-static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
- int prot, unsigned long *pfn)
+/*
+ * Returns the positive number of pfns successfully obtained or a negative
+ * error code.
+ */
+static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
+ long npages, int prot, unsigned long *pfn,
+ struct page **pages)
{
- struct page *page[1];
struct vm_area_struct *vma;
unsigned int flags = 0;
int ret;
@@ -453,11 +558,10 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE;
mmap_read_lock(mm);
- ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
- page, NULL, NULL);
- if (ret == 1) {
- *pfn = page_to_pfn(page[0]);
- ret = 0;
+ ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
+ pages, NULL, NULL);
+ if (ret > 0) {
+ *pfn = page_to_pfn(pages[0]);
goto done;
}
@@ -471,14 +575,73 @@ retry:
if (ret == -EAGAIN)
goto retry;
- if (!ret && !is_invalid_reserved_pfn(*pfn))
- ret = -EFAULT;
+ if (!ret) {
+ if (is_invalid_reserved_pfn(*pfn))
+ ret = 1;
+ else
+ ret = -EFAULT;
+ }
}
done:
mmap_read_unlock(mm);
return ret;
}
+static int vfio_wait(struct vfio_iommu *iommu)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&iommu->vaddr_wait, &wait, TASK_KILLABLE);
+ mutex_unlock(&iommu->lock);
+ schedule();
+ mutex_lock(&iommu->lock);
+ finish_wait(&iommu->vaddr_wait, &wait);
+ if (kthread_should_stop() || !iommu->container_open ||
+ fatal_signal_pending(current)) {
+ return -EFAULT;
+ }
+ return WAITED;
+}
+
+/*
+ * Find dma struct and wait for its vaddr to be valid. iommu lock is dropped
+ * if the task waits, but is re-locked on return. Return result in *dma_p.
+ * Return 0 on success with no waiting, WAITED on success if waited, and -errno
+ * on error.
+ */
+static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start,
+ size_t size, struct vfio_dma **dma_p)
+{
+ int ret;
+
+ do {
+ *dma_p = vfio_find_dma(iommu, start, size);
+ if (!*dma_p)
+ ret = -EINVAL;
+ else if (!(*dma_p)->vaddr_invalid)
+ ret = 0;
+ else
+ ret = vfio_wait(iommu);
+ } while (ret > 0);
+
+ return ret;
+}
+
+/*
+ * Wait for all vaddr in the dma_list to become valid. iommu lock is dropped
+ * if the task waits, but is re-locked on return. Return 0 on success with no
+ * waiting, WAITED on success if waited, and -errno on error.
+ */
+static int vfio_wait_all_valid(struct vfio_iommu *iommu)
+{
+ int ret = 0;
+
+ while (iommu->vaddr_invalid_count && ret >= 0)
+ ret = vfio_wait(iommu);
+
+ return ret;
+}
+
/*
* Attempt to pin pages. We really don't want to track all the pfns and
* the iommu can only map chunks of consecutive pfns anyway, so get the
@@ -486,76 +649,102 @@ done:
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
- unsigned long limit)
+ unsigned long limit, struct vfio_batch *batch)
{
- unsigned long pfn = 0;
+ unsigned long pfn;
+ struct mm_struct *mm = current->mm;
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
/* This code path is only user initiated */
- if (!current->mm)
+ if (!mm)
return -ENODEV;
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
- if (ret)
- return ret;
-
- pinned++;
- rsvd = is_invalid_reserved_pfn(*pfn_base);
-
- /*
- * Reserved pages aren't counted against the user, externally pinned
- * pages are already counted against the user.
- */
- if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
- put_pfn(*pfn_base, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
- limit << PAGE_SHIFT);
- return -ENOMEM;
- }
- lock_acct++;
+ if (batch->size) {
+ /* Leftover pages in batch from an earlier call. */
+ *pfn_base = page_to_pfn(batch->pages[batch->offset]);
+ pfn = *pfn_base;
+ rsvd = is_invalid_reserved_pfn(*pfn_base);
+ } else {
+ *pfn_base = 0;
}
- if (unlikely(disable_hugepages))
- goto out;
+ while (npage) {
+ if (!batch->size) {
+ /* Empty batch, so refill it. */
+ long req_pages = min_t(long, npage, batch->capacity);
- /* Lock all the consecutive pages from pfn_base */
- for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
- pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
- if (ret)
- break;
+ ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
+ &pfn, batch->pages);
+ if (ret < 0)
+ goto unpin_out;
- if (pfn != *pfn_base + pinned ||
- rsvd != is_invalid_reserved_pfn(pfn)) {
- put_pfn(pfn, dma->prot);
- break;
+ batch->size = ret;
+ batch->offset = 0;
+
+ if (!*pfn_base) {
+ *pfn_base = pfn;
+ rsvd = is_invalid_reserved_pfn(*pfn_base);
+ }
}
- if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!dma->lock_cap &&
- current->mm->locked_vm + lock_acct + 1 > limit) {
- put_pfn(pfn, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
- __func__, limit << PAGE_SHIFT);
- ret = -ENOMEM;
- goto unpin_out;
+ /*
+ * pfn is preset for the first iteration of this inner loop and
+ * updated at the end to handle a VM_PFNMAP pfn. In that case,
+ * batch->pages isn't valid (there's no struct page), so allow
+ * batch->pages to be touched only when there's more than one
+ * pfn to check, which guarantees the pfns are from a
+ * !VM_PFNMAP vma.
+ */
+ while (true) {
+ if (pfn != *pfn_base + pinned ||
+ rsvd != is_invalid_reserved_pfn(pfn))
+ goto out;
+
+ /*
+ * Reserved pages aren't counted against the user,
+ * externally pinned pages are already counted against
+ * the user.
+ */
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!dma->lock_cap &&
+ mm->locked_vm + lock_acct + 1 > limit) {
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, limit << PAGE_SHIFT);
+ ret = -ENOMEM;
+ goto unpin_out;
+ }
+ lock_acct++;
}
- lock_acct++;
+
+ pinned++;
+ npage--;
+ vaddr += PAGE_SIZE;
+ iova += PAGE_SIZE;
+ batch->offset++;
+ batch->size--;
+
+ if (!batch->size)
+ break;
+
+ pfn = page_to_pfn(batch->pages[batch->offset]);
}
+
+ if (unlikely(disable_hugepages))
+ break;
}
out:
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
- if (ret) {
- if (!rsvd) {
+ if (ret < 0) {
+ if (pinned && !rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot);
}
+ vfio_batch_unpin(batch, dma);
return ret;
}
@@ -587,6 +776,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
+ struct page *pages[1];
struct mm_struct *mm;
int ret;
@@ -594,8 +784,8 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
if (!mm)
return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
- if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+ ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+ if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ret = vfio_lock_acct(dma, 1, true);
if (ret) {
put_pfn(*pfn_base, dma->prot);
@@ -640,6 +830,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
unsigned long remote_vaddr;
struct vfio_dma *dma;
bool do_accounting;
+ dma_addr_t iova;
if (!iommu || !user_pfn || !phys_pfn)
return -EINVAL;
@@ -650,6 +841,22 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
+ /*
+ * Wait for all necessary vaddr's to be valid so they can be used in
+ * the main loop without dropping the lock, to avoid racing vs unmap.
+ */
+again:
+ if (iommu->vaddr_invalid_count) {
+ for (i = 0; i < npage; i++) {
+ iova = user_pfn[i] << PAGE_SHIFT;
+ ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma);
+ if (ret < 0)
+ goto pin_done;
+ if (ret == WAITED)
+ goto again;
+ }
+ }
+
/* Fail if notifier list is empty */
if (!iommu->notifier.head) {
ret = -EINVAL;
@@ -664,7 +871,6 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
for (i = 0; i < npage; i++) {
- dma_addr_t iova;
struct vfio_pfn *vpfn;
iova = user_pfn[i] << PAGE_SHIFT;
@@ -714,7 +920,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
- update_pinned_page_dirty_scope(iommu);
+ iommu->num_non_pinned_groups--;
}
goto pin_done;
@@ -945,10 +1151,15 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
+ WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
vfio_unmap_unpin(iommu, dma, true);
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
vfio_dma_bitmap_free(dma);
+ if (dma->vaddr_invalid) {
+ iommu->vaddr_invalid_count--;
+ wake_up_all(&iommu->vaddr_wait);
+ }
kfree(dma);
iommu->dma_avail++;
}
@@ -991,7 +1202,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
* mark all pages dirty if any IOMMU capable device is not able
* to report dirty pages and all pages are pinned and mapped.
*/
- if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
+ if (iommu->num_non_pinned_groups && dma->iommu_mapped)
bitmap_set(dma->bitmap, 0, nbits);
if (shift) {
@@ -1074,34 +1285,36 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
{
struct vfio_dma *dma, *dma_last = NULL;
size_t unmapped = 0, pgsize;
- int ret = 0, retries = 0;
+ int ret = -EINVAL, retries = 0;
unsigned long pgshift;
+ dma_addr_t iova = unmap->iova;
+ unsigned long size = unmap->size;
+ bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
+ bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
+ struct rb_node *n, *first_n;
mutex_lock(&iommu->lock);
pgshift = __ffs(iommu->pgsize_bitmap);
pgsize = (size_t)1 << pgshift;
- if (unmap->iova & (pgsize - 1)) {
- ret = -EINVAL;
+ if (iova & (pgsize - 1))
goto unlock;
- }
- if (!unmap->size || unmap->size & (pgsize - 1)) {
- ret = -EINVAL;
+ if (unmap_all) {
+ if (iova || size)
+ goto unlock;
+ size = SIZE_MAX;
+ } else if (!size || size & (pgsize - 1)) {
goto unlock;
}
- if (unmap->iova + unmap->size - 1 < unmap->iova ||
- unmap->size > SIZE_MAX) {
- ret = -EINVAL;
+ if (iova + size - 1 < iova || size > SIZE_MAX)
goto unlock;
- }
/* When dirty tracking is enabled, allow only min supported pgsize */
if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
(!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
- ret = -EINVAL;
goto unlock;
}
@@ -1138,21 +1351,25 @@ again:
* will only return success and a size of zero if there were no
* mappings within the range.
*/
- if (iommu->v2) {
- dma = vfio_find_dma(iommu, unmap->iova, 1);
- if (dma && dma->iova != unmap->iova) {
- ret = -EINVAL;
+ if (iommu->v2 && !unmap_all) {
+ dma = vfio_find_dma(iommu, iova, 1);
+ if (dma && dma->iova != iova)
goto unlock;
- }
- dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
- if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
- ret = -EINVAL;
+
+ dma = vfio_find_dma(iommu, iova + size - 1, 0);
+ if (dma && dma->iova + dma->size != iova + size)
goto unlock;
- }
}
- while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
- if (!iommu->v2 && unmap->iova > dma->iova)
+ ret = 0;
+ n = first_n = vfio_find_dma_first_node(iommu, iova, size);
+
+ while (n) {
+ dma = rb_entry(n, struct vfio_dma, node);
+ if (dma->iova >= iova + size)
+ break;
+
+ if (!iommu->v2 && iova > dma->iova)
break;
/*
* Task with same address space who mapped this iova range is
@@ -1161,6 +1378,27 @@ again:
if (dma->task->mm != current->mm)
break;
+ if (invalidate_vaddr) {
+ if (dma->vaddr_invalid) {
+ struct rb_node *last_n = n;
+
+ for (n = first_n; n != last_n; n = rb_next(n)) {
+ dma = rb_entry(n,
+ struct vfio_dma, node);
+ dma->vaddr_invalid = false;
+ iommu->vaddr_invalid_count--;
+ }
+ ret = -EINVAL;
+ unmapped = 0;
+ break;
+ }
+ dma->vaddr_invalid = true;
+ iommu->vaddr_invalid_count++;
+ unmapped += dma->size;
+ n = rb_next(n);
+ continue;
+ }
+
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
struct vfio_iommu_type1_dma_unmap nb_unmap;
@@ -1190,12 +1428,13 @@ again:
if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
ret = update_user_bitmap(bitmap->data, iommu, dma,
- unmap->iova, pgsize);
+ iova, pgsize);
if (ret)
break;
}
unmapped += dma->size;
+ n = rb_next(n);
vfio_remove_dma(iommu, dma);
}
@@ -1239,15 +1478,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
{
dma_addr_t iova = dma->iova;
unsigned long vaddr = dma->vaddr;
+ struct vfio_batch batch;
size_t size = map_size;
long npage;
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret = 0;
+ vfio_batch_init(&batch);
+
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
- size >> PAGE_SHIFT, &pfn, limit);
+ size >> PAGE_SHIFT, &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1260,6 +1503,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
if (ret) {
vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
npage, true);
+ vfio_batch_unpin(&batch, dma);
break;
}
@@ -1267,6 +1511,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->size += npage << PAGE_SHIFT;
}
+ vfio_batch_fini(&batch);
dma->iommu_mapped = true;
if (ret)
@@ -1299,6 +1544,7 @@ static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
+ bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
dma_addr_t iova = map->iova;
unsigned long vaddr = map->vaddr;
size_t size = map->size;
@@ -1316,13 +1562,16 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
+ if ((prot && set_vaddr) || (!prot && !set_vaddr))
+ return -EINVAL;
+
mutex_lock(&iommu->lock);
pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
WARN_ON((pgsize - 1) & PAGE_MASK);
- if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
+ if (!size || (size | iova | vaddr) & (pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1333,7 +1582,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
- if (vfio_find_dma(iommu, iova, size)) {
+ dma = vfio_find_dma(iommu, iova, size);
+ if (set_vaddr) {
+ if (!dma) {
+ ret = -ENOENT;
+ } else if (!dma->vaddr_invalid || dma->iova != iova ||
+ dma->size != size) {
+ ret = -EINVAL;
+ } else {
+ dma->vaddr = vaddr;
+ dma->vaddr_invalid = false;
+ iommu->vaddr_invalid_count--;
+ wake_up_all(&iommu->vaddr_wait);
+ }
+ goto out_unlock;
+ } else if (dma) {
ret = -EEXIST;
goto out_unlock;
}
@@ -1425,16 +1688,23 @@ static int vfio_bus_type(struct device *dev, void *data)
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
+ struct vfio_batch batch;
struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
+ ret = vfio_wait_all_valid(iommu);
+ if (ret < 0)
+ return ret;
+
/* Arbitrarily pick the first domain in the list for lookups */
if (!list_empty(&iommu->domain_list))
d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
+ vfio_batch_init(&batch);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
@@ -1482,7 +1752,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
- &pfn, limit);
+ &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1496,11 +1767,13 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
if (ret) {
- if (!dma->iommu_mapped)
+ if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
phys >> PAGE_SHIFT,
size >> PAGE_SHIFT,
true);
+ vfio_batch_unpin(&batch, dma);
+ }
goto unwind;
}
@@ -1515,6 +1788,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
dma->iommu_mapped = true;
}
+ vfio_batch_fini(&batch);
return 0;
unwind:
@@ -1555,6 +1829,7 @@ unwind:
}
}
+ vfio_batch_fini(&batch);
return ret;
}
@@ -1622,33 +1897,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
-{
- struct vfio_domain *domain;
- struct vfio_group *group;
-
- list_for_each_entry(domain, &iommu->domain_list, next) {
- list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
- return;
- }
- }
- }
-
- if (iommu->external_domain) {
- domain = iommu->external_domain;
- list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
- return;
- }
- }
- }
-
- iommu->pinned_page_dirty_scope = true;
-}
-
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
phys_addr_t *base)
{
@@ -2057,8 +2305,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* addition of a dirty tracking group.
*/
group->pinned_page_dirty_scope = true;
- if (!iommu->pinned_page_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
return 0;
@@ -2188,7 +2434,7 @@ done:
* demotes the iommu scope until it declares itself dirty tracking
* capable via the page pinning interface.
*/
- iommu->pinned_page_dirty_scope = false;
+ iommu->num_non_pinned_groups++;
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
@@ -2238,23 +2484,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
}
}
-static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
-{
- struct rb_node *n;
-
- n = rb_first(&iommu->dma_list);
- for (; n; n = rb_next(n)) {
- struct vfio_dma *dma;
-
- dma = rb_entry(n, struct vfio_dma, node);
-
- if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
- break;
- }
- /* mdev vendor driver must unregister notifier */
- WARN_ON(iommu->notifier.head);
-}
-
/*
* Called when a domain is removed in detach. It is possible that
* the removed domain decided the iova aperture window. Modify the
@@ -2354,10 +2583,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
kfree(group);
if (list_empty(&iommu->external_domain->group_list)) {
- vfio_sanity_check_pfn_list(iommu);
-
- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
+ WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
+ }
kfree(iommu->external_domain);
iommu->external_domain = NULL;
@@ -2391,10 +2620,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
*/
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
- if (!iommu->external_domain)
+ if (!iommu->external_domain) {
+ WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
- else
+ } else {
vfio_iommu_unmap_unpin_reaccount(iommu);
+ }
}
iommu_domain_free(domain->domain);
list_del(&domain->next);
@@ -2415,8 +2646,11 @@ detach_group_done:
* Removal of a group without dirty tracking may allow the iommu scope
* to be promoted.
*/
- if (update_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ if (update_dirty_scope) {
+ iommu->num_non_pinned_groups--;
+ if (iommu->dirty_page_tracking)
+ vfio_iommu_populate_bitmap_full(iommu);
+ }
mutex_unlock(&iommu->lock);
}
@@ -2446,8 +2680,10 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->iova_list);
iommu->dma_list = RB_ROOT;
iommu->dma_avail = dma_entry_limit;
+ iommu->container_open = true;
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+ init_waitqueue_head(&iommu->vaddr_wait);
return iommu;
}
@@ -2475,7 +2711,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
if (iommu->external_domain) {
vfio_release_domain(iommu->external_domain, true);
- vfio_sanity_check_pfn_list(iommu);
kfree(iommu->external_domain);
}
@@ -2517,6 +2752,8 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
+ case VFIO_UNMAP_ALL:
+ case VFIO_UPDATE_VADDR:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
@@ -2688,7 +2925,8 @@ static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
{
struct vfio_iommu_type1_dma_map map;
unsigned long minsz;
- uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+ uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE |
+ VFIO_DMA_MAP_FLAG_VADDR;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
@@ -2706,6 +2944,9 @@ static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
{
struct vfio_iommu_type1_dma_unmap unmap;
struct vfio_bitmap bitmap = { 0 };
+ uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP |
+ VFIO_DMA_UNMAP_FLAG_VADDR |
+ VFIO_DMA_UNMAP_FLAG_ALL;
unsigned long minsz;
int ret;
@@ -2714,8 +2955,12 @@ static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
- if (unmap.argsz < minsz ||
- unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
+ if (unmap.argsz < minsz || unmap.flags & ~mask)
+ return -EINVAL;
+
+ if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+ (unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL |
+ VFIO_DMA_UNMAP_FLAG_VADDR)))
return -EINVAL;
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
@@ -2906,12 +3151,13 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
struct vfio_dma *dma;
bool kthread = current->mm == NULL;
size_t offset;
+ int ret;
*copied = 0;
- dma = vfio_find_dma(iommu, user_iova, 1);
- if (!dma)
- return -EINVAL;
+ ret = vfio_find_dma_valid(iommu, user_iova, 1, &dma);
+ if (ret < 0)
+ return ret;
if ((write && !(dma->prot & IOMMU_WRITE)) ||
!(dma->prot & IOMMU_READ))
@@ -3003,6 +3249,19 @@ vfio_iommu_type1_group_iommu_domain(void *iommu_data,
return domain;
}
+static void vfio_iommu_type1_notify(void *iommu_data,
+ enum vfio_iommu_notify_type event)
+{
+ struct vfio_iommu *iommu = iommu_data;
+
+ if (event != VFIO_IOMMU_CONTAINER_CLOSE)
+ return;
+ mutex_lock(&iommu->lock);
+ iommu->container_open = false;
+ mutex_unlock(&iommu->lock);
+ wake_up_all(&iommu->vaddr_wait);
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -3017,6 +3276,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.unregister_notifier = vfio_iommu_type1_unregister_notifier,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
+ .notify = vfio_iommu_type1_notify,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..df82b124170e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -381,7 +381,8 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
}
-static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+static void vhost_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
@@ -827,14 +828,15 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
done:
@@ -863,6 +865,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,14 +898,13 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
+ ubuf->flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
@@ -923,19 +925,21 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
if (!zcopy_used)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4ce9f00ae10e..5de21ad4bd05 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1814,12 +1814,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i;
- vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
- if (!vs) {
- vs = vzalloc(sizeof(*vs));
- if (!vs)
- goto err_vs;
- }
+ vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ goto err_vs;
vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
index e3fee3f1f582..d7b287cffd5c 100644
--- a/drivers/video/backlight/ktd253-backlight.c
+++ b/drivers/video/backlight/ktd253-backlight.c
@@ -137,15 +137,7 @@ static int ktd253_backlight_probe(struct platform_device *pdev)
brightness = max_brightness;
}
- if (brightness)
- /* This will be the default ratio when the KTD253 is enabled */
- ktd253->ratio = KTD253_MAX_RATIO;
- else
- ktd253->ratio = 0;
-
- ktd253->gpiod = devm_gpiod_get(dev, "enable",
- brightness ? GPIOD_OUT_HIGH :
- GPIOD_OUT_LOW);
+ ktd253->gpiod = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ktd253->gpiod)) {
ret = PTR_ERR(ktd253->gpiod);
if (ret != -EPROBE_DEFER)
@@ -153,6 +145,8 @@ static int ktd253_backlight_probe(struct platform_device *pdev)
return ret;
}
gpiod_set_consumer_name(ktd253->gpiod, dev_name(dev));
+ /* Bring backlight to a known off state */
+ msleep(KTD253_T_OFF_MS);
bl = devm_backlight_device_register(dev, dev_name(dev), dev, ktd253,
&ktd253_backlight_ops, NULL);
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 0e45685bcc1c..36856962ed83 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -9,16 +9,16 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
#include <linux/spi/spi.h>
-#include <linux/spi/lms283gf05.h>
#include <linux/module.h>
struct lms283gf05_state {
struct spi_device *spi;
struct lcd_device *ld;
+ struct gpio_desc *reset;
};
struct lms283gf05_seq {
@@ -90,13 +90,13 @@ static const struct lms283gf05_seq disp_pdwnseq[] = {
};
-static void lms283gf05_reset(unsigned long gpio, bool inverted)
+static void lms283gf05_reset(struct gpio_desc *gpiod)
{
- gpio_set_value(gpio, !inverted);
+ gpiod_set_value(gpiod, 0); /* De-asserted */
mdelay(100);
- gpio_set_value(gpio, inverted);
+ gpiod_set_value(gpiod, 1); /* Asserted */
mdelay(20);
- gpio_set_value(gpio, !inverted);
+ gpiod_set_value(gpiod, 0); /* De-asserted */
mdelay(20);
}
@@ -125,18 +125,15 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
{
struct lms283gf05_state *st = lcd_get_data(ld);
struct spi_device *spi = st->spi;
- struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
if (power <= FB_BLANK_NORMAL) {
- if (pdata)
- lms283gf05_reset(pdata->reset_gpio,
- pdata->reset_inverted);
+ if (st->reset)
+ lms283gf05_reset(st->reset);
lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
} else {
lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
- if (pdata)
- gpio_set_value(pdata->reset_gpio,
- pdata->reset_inverted);
+ if (st->reset)
+ gpiod_set_value(st->reset, 1); /* Asserted */
}
return 0;
@@ -150,24 +147,18 @@ static struct lcd_ops lms_ops = {
static int lms283gf05_probe(struct spi_device *spi)
{
struct lms283gf05_state *st;
- struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
struct lcd_device *ld;
- int ret = 0;
-
- if (pdata != NULL) {
- ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
- GPIOF_DIR_OUT | (!pdata->reset_inverted ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- "LMS283GF05 RESET");
- if (ret)
- return ret;
- }
st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
+ st->reset = gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset))
+ return PTR_ERR(st->reset);
+ gpiod_set_consumer_name(st->reset, "LMS283GF05 RESET");
+
ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st,
&lms_ops);
if (IS_ERR(ld))
@@ -179,8 +170,8 @@ static int lms283gf05_probe(struct spi_device *spi)
spi_set_drvdata(spi, st);
/* kick in the LCD */
- if (pdata)
- lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
+ if (st->reset)
+ lms283gf05_reset(st->reset);
lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
return 0;
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 297ee2e1ab0b..0468ea82159f 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -208,7 +208,7 @@ static int locomolcd_probe(struct locomo_dev *ldev)
return 0;
}
-static int locomolcd_remove(struct locomo_dev *dev)
+static void locomolcd_remove(struct locomo_dev *dev)
{
unsigned long flags;
@@ -220,7 +220,6 @@ static int locomolcd_remove(struct locomo_dev *dev)
local_irq_save(flags);
locomolcd_dev = NULL;
local_irq_restore(flags);
- return 0;
}
static struct locomo_driver poodle_lcd_driver = {
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 3bc7800eb0a9..091f07e7c145 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1692,7 +1692,7 @@ static int wled_probe(struct platform_device *pdev)
static int wled_remove(struct platform_device *pdev)
{
- struct wled *wled = dev_get_drvdata(&pdev->dev);
+ struct wled *wled = platform_get_drvdata(pdev);
mutex_destroy(&wled->lock);
cancel_delayed_work_sync(&wled->ovp_work);
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 8268ac43d54f..c95e0de7f4e7 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -291,7 +291,7 @@ static int sky81452_bl_probe(struct platform_device *pdev)
}
memset(&props, 0, sizeof(props));
- props.max_brightness = SKY81452_MAX_BRIGHTNESS,
+ props.max_brightness = SKY81452_MAX_BRIGHTNESS;
name = pdata->name ? pdata->name : SKY81452_DEFAULT_NAME;
bd = devm_backlight_device_register(dev, name, dev, regmap,
&sky81452_bl_ops, &props);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 17876f0179b5..962c12be9774 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -90,7 +90,6 @@ static unsigned int vga_video_num_lines; /* Number of text lines */
static bool vga_can_do_color; /* Do we support colors? */
static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */
static unsigned char vga_video_type __read_mostly; /* Card type */
-static bool vga_font_is_default = true;
static int vga_vesa_blanked;
static bool vga_palette_blanked;
static bool vga_is_gfx;
@@ -878,7 +877,6 @@ static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
beg = 0x0a;
}
-#ifdef BROKEN_GRAPHICS_PROGRAMS
/*
* All fonts are loaded in slot 0 (0:1 for 512 ch)
*/
@@ -886,24 +884,7 @@ static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
if (!arg)
return -EINVAL; /* Return to default font not supported */
- vga_font_is_default = false;
font_select = ch512 ? 0x04 : 0x00;
-#else
- /*
- * The default font is kept in slot 0 and is never touched.
- * A custom font is loaded in slot 2 (256 ch) or 2:3 (512 ch)
- */
-
- if (set) {
- vga_font_is_default = !arg;
- if (!arg)
- ch512 = false; /* Default font is always 256 */
- font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00;
- }
-
- if (!vga_font_is_default)
- charmap += 4 * cmapsz;
-#endif
raw_spin_lock_irq(&vga_lock);
/* First, the Sequencer */
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index cfb7f5612ef0..4f02db65dede 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1269,6 +1269,7 @@ config FB_ATY
select FB_CFB_IMAGEBLIT
select FB_BACKLIGHT if FB_ATY_BACKLIGHT
select FB_MACMODES if PPC
+ select FB_ATY_CT if SPARC64 && PCI
help
This driver supports graphics boards with the ATI Mach64 chips.
Say Y if you have such a graphics board.
@@ -1279,7 +1280,6 @@ config FB_ATY
config FB_ATY_CT
bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
depends on PCI && FB_ATY
- default y if SPARC64 && PCI
help
Say Y here to support use of ATI's 64-bit Rage boards (or other
boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index bcc92aecf666..1b72edc01cfb 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -921,40 +921,6 @@ static int acornfb_detect_monitortype(void)
return 4;
}
-/*
- * This enables the unused memory to be freed on older Acorn machines.
- * We are freeing memory on behalf of the architecture initialisation
- * code here.
- */
-static inline void
-free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
-{
- int mb_freed = 0;
-
- /*
- * Align addresses
- */
- virtual_start = PAGE_ALIGN(virtual_start);
- virtual_end = PAGE_ALIGN(virtual_end);
-
- while (virtual_start < virtual_end) {
- struct page *page;
-
- /*
- * Clear page reserved bit,
- * set count to 1, and free
- * the page.
- */
- page = virt_to_page(virtual_start);
- __free_reserved_page(page);
-
- virtual_start += PAGE_SIZE;
- mb_freed += PAGE_SIZE / 1024;
- }
-
- printk("acornfb: freed %dK memory\n", mb_freed);
-}
-
static int acornfb_probe(struct platform_device *dev)
{
unsigned long size;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index b7682de412d8..33595cc4778e 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -925,7 +925,7 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
return ret;
}
-static int clcdfb_remove(struct amba_device *dev)
+static void clcdfb_remove(struct amba_device *dev)
{
struct clcd_fb *fb = amba_get_drvdata(dev);
@@ -942,8 +942,6 @@ static int clcdfb_remove(struct amba_device *dev)
kfree(fb);
amba_release_regions(dev);
-
- return 0;
}
static const struct amba_id clcdfb_id_table[] = {
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 226682550b4b..6e07a97bbd31 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -3736,7 +3736,7 @@ default_chipset:
if (err)
goto free_irq;
- dev_set_drvdata(&pdev->dev, info);
+ platform_set_drvdata(pdev, info);
err = register_framebuffer(info);
if (err)
@@ -3764,7 +3764,7 @@ release:
static int __exit amifb_remove(struct platform_device *pdev)
{
- struct fb_info *info = dev_get_drvdata(&pdev->dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index c8feff0ee8da..e946903a86c2 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -175,6 +175,15 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
return aty_ld_le32(LCD_DATA, par);
}
}
+#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \
+ defined(CONFIG_FB_ATY_GENERIC_LCD) */
+void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
+{ }
+
+u32 aty_ld_lcd(int index, const struct atyfb_par *par)
+{
+ return 0;
+}
#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
@@ -2353,6 +2362,9 @@ static int aty_init(struct fb_info *info)
int gtb_memsize, has_var = 0;
struct fb_var_screeninfo var;
int ret;
+#ifdef CONFIG_ATARI
+ u8 dac_type;
+#endif
init_waitqueue_head(&par->vblank.wait);
spin_lock_init(&par->int_lock);
@@ -2360,13 +2372,12 @@ static int aty_init(struct fb_info *info)
#ifdef CONFIG_FB_ATY_GX
if (!M64_HAS(INTEGRATED)) {
u32 stat0;
- u8 dac_type, dac_subtype, clk_type;
+ u8 dac_subtype, clk_type;
stat0 = aty_ld_le32(CNFG_STAT0, par);
par->bus_type = (stat0 >> 0) & 0x07;
par->ram_type = (stat0 >> 3) & 0x07;
ramname = aty_gx_ram[par->ram_type];
/* FIXME: clockchip/RAMDAC probing? */
- dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
#ifdef CONFIG_ATARI
clk_type = CLK_ATI18818_1;
dac_type = (stat0 >> 9) & 0x07;
@@ -2375,7 +2386,6 @@ static int aty_init(struct fb_info *info)
else
dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
#else
- dac_type = DAC_IBMRGB514;
dac_subtype = DAC_IBMRGB514;
clk_type = CLK_IBMRGB514;
#endif
@@ -3062,7 +3072,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
if (dp == of_console_device) {
struct fb_var_screeninfo *var = &default_var;
unsigned int N, P, Q, M, T, R;
- u32 v_total, h_total;
struct crtc crtc;
u8 pll_regs[16];
u8 clock_cntl;
@@ -3078,9 +3087,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
aty_crtc_to_var(&crtc, var);
- h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
- v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
-
/*
* Read the PLL to figure actual Refresh Rate.
*/
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index f87cc81f4fa2..011b07e44e0d 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -281,10 +281,13 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
- u32 crtc_gen_cntl, lcd_gen_cntrl;
+ u32 crtc_gen_cntl;
u8 tmp, tmp2;
- lcd_gen_cntrl = 0;
+#ifdef CONFIG_FB_ATY_GENERIC_LCD
+ u32 lcd_gen_cntrl = 0;
+#endif
+
#ifdef DEBUG
printk("atyfb(%s): about to program:\n"
"pll_ext_cntl=0x%02x pll_gen_cntl=0x%02x pll_vclk_cntl=0x%02x\n",
@@ -402,7 +405,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 mpost_div, xpost_div, sclk_post_div_real;
u32 q, memcntl, trp;
- u32 dsp_config, dsp_on_off, vga_dsp_config, vga_dsp_on_off;
+ u32 dsp_config;
#ifdef DEBUG
int pllmclk, pllsclk;
#endif
@@ -488,9 +491,9 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
/* Allow BIOS to override */
dsp_config = aty_ld_le32(DSP_CONFIG, par);
- dsp_on_off = aty_ld_le32(DSP_ON_OFF, par);
- vga_dsp_config = aty_ld_le32(VGA_DSP_CONFIG, par);
- vga_dsp_on_off = aty_ld_le32(VGA_DSP_ON_OFF, par);
+ aty_ld_le32(DSP_ON_OFF, par);
+ aty_ld_le32(VGA_DSP_CONFIG, par);
+ aty_ld_le32(VGA_DSP_ON_OFF, par);
if (dsp_config)
pll->ct.dsp_loop_latency = (dsp_config & DSP_LOOP_LATENCY) >> 16;
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index 9966c58aa26c..df55e23b7a5a 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -488,12 +488,10 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
#if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
{
u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
- int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
int i;
for (i = 0; i < 4; i++)
- mon_types[i] = radeon_probe_i2c_connector(rinfo,
- i+1, &EDIDs[i]);
+ radeon_probe_i2c_connector(rinfo, i + 1, &EDIDs[i]);
}
#endif /* DEBUG */
/*
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 0d9a6bb57a09..e7702fe1fe7d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -116,7 +116,7 @@ struct bw2_par {
/**
* bw2_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 77f6470ce665..bdcc3f6ab666 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -179,7 +179,7 @@ static int cg3_setcolreg(unsigned regno,
/**
* cg3_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int cg3_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index a1c68cd48d7e..97ef43c25974 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -511,7 +511,7 @@ static int cg6_setcolreg(unsigned regno,
/**
* cg6_blank - Blanks the display.
*
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int cg6_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index e9027172c0f5..93802abbbc72 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2463,8 +2463,6 @@ static void AttrOn(const struct cirrusfb_info *cinfo)
*/
static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
{
- unsigned char dummy;
-
if (is_laguna(cinfo))
return;
if (cinfo->btype == BT_PICASSO) {
@@ -2473,18 +2471,18 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
WGen(cinfo, VGA_PEL_MSK, 0x00);
udelay(200);
/* next read dummy from pixel address (3c8) */
- dummy = RGen(cinfo, VGA_PEL_IW);
+ RGen(cinfo, VGA_PEL_IW);
udelay(200);
}
/* now do the usual stuff to access the HDR */
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
WGen(cinfo, VGA_PEL_MSK, val);
@@ -2492,7 +2490,7 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
if (cinfo->btype == BT_PICASSO) {
/* now first reset HDR access counter */
- dummy = RGen(cinfo, VGA_PEL_IW);
+ RGen(cinfo, VGA_PEL_IW);
udelay(200);
/* and at the end, restore the mask value */
@@ -2800,9 +2798,9 @@ static void bestclock(long freq, int *nom, int *den, int *div)
#ifdef CIRRUSFB_DEBUG
-/**
+/*
* cirrusfb_dbg_print_regs
- * @base: If using newmmio, the newmmio base address, otherwise %NULL
+ * @regbase: If using newmmio, the newmmio base address, otherwise %NULL
* @reg_class: type of registers to read: %CRT, or %SEQ
*
* DESCRIPTION:
@@ -2847,7 +2845,7 @@ static void cirrusfb_dbg_print_regs(struct fb_info *info,
va_end(list);
}
-/**
+/*
* cirrusfb_dbg_reg_dump
* @base: If using newmmio, the newmmio base address, otherwise %NULL
*
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 2df56bd303d2..509311471d51 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -64,9 +64,9 @@
#undef in_le32
#undef out_le32
#define in_8(addr) 0
-#define out_8(addr, val)
+#define out_8(addr, val) (void)(val)
#define in_le32(addr) 0
-#define out_le32(addr, val)
+#define out_le32(addr, val) (void)(val)
#define pgprot_cached_wthru(prot) (prot)
#else
static void invalid_vram_cache(void __force *addr)
diff --git a/drivers/video/fbdev/core/fb_notify.c b/drivers/video/fbdev/core/fb_notify.c
index 74c2da528884..10e3b9a74adc 100644
--- a/drivers/video/fbdev/core/fb_notify.c
+++ b/drivers/video/fbdev/core/fb_notify.c
@@ -19,6 +19,8 @@ static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
/**
* fb_register_client - register a client notifier
* @nb: notifier block to callback on events
+ *
+ * Return: 0 on success, negative error code on failure.
*/
int fb_register_client(struct notifier_block *nb)
{
@@ -29,6 +31,8 @@ EXPORT_SYMBOL(fb_register_client);
/**
* fb_unregister_client - unregister a client notifier
* @nb: notifier block to callback on events
+ *
+ * Return: 0 on success, negative error code on failure.
*/
int fb_unregister_client(struct notifier_block *nb)
{
@@ -38,7 +42,10 @@ EXPORT_SYMBOL(fb_unregister_client);
/**
* fb_notifier_call_chain - notify clients of fb_events
+ * @val: value passed to callback
+ * @v: pointer passed to callback
*
+ * Return: The return value of the last notifier function
*/
int fb_notifier_call_chain(unsigned long val, void *v)
{
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bf61598bf1c3..44a5cd2f54cc 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -56,8 +56,6 @@
* more details.
*/
-#undef FBCONDEBUG
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
@@ -82,12 +80,6 @@
#include "fbcon.h"
-#ifdef FBCONDEBUG
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
-
/*
* FIXME: Locking
*
@@ -1015,11 +1007,11 @@ static const char *fbcon_startup(void)
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
- DPRINTK("mode: %s\n", info->fix.id);
- DPRINTK("visual: %d\n", info->fix.visual);
- DPRINTK("res: %dx%d-%d\n", info->var.xres,
- info->var.yres,
- info->var.bits_per_pixel);
+ pr_debug("mode: %s\n", info->fix.id);
+ pr_debug("visual: %d\n", info->fix.visual);
+ pr_debug("res: %dx%d-%d\n", info->var.xres,
+ info->var.yres,
+ info->var.bits_per_pixel);
fbcon_add_cursor_timer(info);
return display_desc;
@@ -2013,7 +2005,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
y_diff < 0 || y_diff > virt_fh) {
const struct fb_videomode *mode;
- DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+ pr_debug("attempting resize %ix%i\n", var.xres, var.yres);
mode = fb_find_best_mode(&var, &info->modelist);
if (mode == NULL)
return -EINVAL;
@@ -2023,7 +2015,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
if (virt_w > var.xres/virt_fw || virt_h > var.yres/virt_fh)
return -EINVAL;
- DPRINTK("resize now %ix%i\n", var.xres, var.yres);
+ pr_debug("resize now %ix%i\n", var.xres, var.yres);
if (con_is_visible(vc)) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
@@ -3299,8 +3291,7 @@ static void fbcon_exit(void)
if (info->queue.func)
pending = cancel_work_sync(&info->queue);
- DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
- "no"));
+ pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
for (j = first_fb_vc; j <= last_fb_vc; j++) {
if (con2fb_map[j] == i) {
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 1bf82dbc9e3c..b0e690f41025 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -605,6 +605,7 @@ static void get_detailed_timing(unsigned char *block,
* fb_create_modedb - create video mode database
* @edid: EDID data
* @dbsize: database size
+ * @specs: monitor specifications, may be NULL
*
* RETURNS: struct fb_videomode, @dbsize contains length of database
*
@@ -1100,7 +1101,6 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
* 2 * M
* M = 300;
* C = 30;
-
*/
static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
{
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index e38c0e3f9c61..005ac3c17aa1 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1066,7 +1066,7 @@ static void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
static int fb_remove(struct platform_device *dev)
{
- struct fb_info *info = dev_get_drvdata(&dev->dev);
+ struct fb_info *info = platform_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
int ret;
@@ -1482,7 +1482,7 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
fb_set_var(da8xx_fb_info, &da8xx_fb_var);
- dev_set_drvdata(&device->dev, da8xx_fb_info);
+ platform_set_drvdata(device, da8xx_fb_info);
/* initialize the vsync wait queue */
init_waitqueue_head(&par->vsync_wait);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index e57c00824965..b80ba3d2a9b8 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -139,7 +139,7 @@ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
static void efifb_show_boot_graphics(struct fb_info *info)
{
- u32 bmp_width, bmp_height, bmp_pitch, screen_pitch, dst_x, y, src_y;
+ u32 bmp_width, bmp_height, bmp_pitch, dst_x, y, src_y;
struct screen_info *si = &screen_info;
struct bmp_file_header *file_header;
struct bmp_dib_header *dib_header;
@@ -193,7 +193,6 @@ static void efifb_show_boot_graphics(struct fb_info *info)
bmp_width = dib_header->width;
bmp_height = abs(dib_header->height);
bmp_pitch = round_up(3 * bmp_width, 4);
- screen_pitch = si->lfb_linelength;
if ((file_header->bitmap_offset + bmp_pitch * bmp_height) >
bgrt_image_size)
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 948b73184433..b3d580e57221 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -667,7 +667,7 @@ static int ffb_setcolreg(unsigned regno,
/**
* ffb_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int ffb_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 31270a8986e8..c5b99a4861e8 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -198,7 +198,7 @@ static void gbe_reset(void)
static void gbe_turn_off(void)
{
int i;
- unsigned int val, x, y, vpixen_off;
+ unsigned int val, y, vpixen_off;
gbe_turned_on = 0;
@@ -249,7 +249,6 @@ static void gbe_turn_off(void)
for (i = 0; i < 100000; i++) {
val = gbe->vt_xy;
- x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y < vpixen_off)
break;
@@ -260,7 +259,6 @@ static void gbe_turn_off(void)
"gbefb: wait for vpixen_off timed out\n");
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
- x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y > vpixen_off)
break;
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 9c83ec3f8e1f..2b885cd046fe 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -305,11 +305,13 @@ static const struct of_device_id goldfish_fb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id goldfish_fb_acpi_match[] = {
{ "GFSH0004", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
+#endif
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index a45fcff1461f..8bbac7182ad3 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -357,8 +357,8 @@ error:
/**
* hgafb_open - open the framebuffer device
- * @info:pointer to fb_info object containing info for current hga board
- * @int:open by console system or userland.
+ * @info: pointer to fb_info object containing info for current hga board
+ * @init: open by console system or userland.
*/
static int hgafb_open(struct fb_info *info, int init)
@@ -370,9 +370,9 @@ static int hgafb_open(struct fb_info *info, int init)
}
/**
- * hgafb_open - open the framebuffer device
- * @info:pointer to fb_info object containing info for current hga board
- * @int:open by console system or userland.
+ * hgafb_release - open the framebuffer device
+ * @info: pointer to fb_info object containing info for current hga board
+ * @init: open by console system or userland.
*/
static int hgafb_release(struct fb_info *info, int init)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 884b16efa7e8..7f8debd2da06 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -657,7 +657,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct fb_info *info = dev_get_drvdata(&pdev->dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 40b11cce0ad6..3eb0f3583f4f 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -308,7 +308,7 @@ static int leo_setcolreg(unsigned regno,
/**
* leo_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int leo_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index 1911a47769b6..16401eb95c6c 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -17,8 +17,8 @@
/**
* spi_write - write command to the SPI port
+ * @spi: the SPI device.
* @data: can be 8/16/32-bit, MSB justified data to write.
- * @len: data length.
*
* Wait bus transfer complete IRQ.
* The caller is expected to perform the necessary locking.
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 894617ddabcb..fabb271337ed 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -445,7 +445,6 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
{
struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- uint32_t enabled;
unsigned long flags;
if (mx3_fbi->txd == NULL)
@@ -453,7 +452,7 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_lock_irqsave(&mx3fb->lock, flags);
- enabled = sdc_fb_uninit(mx3_fbi);
+ sdc_fb_uninit(mx3_fbi);
spin_unlock_irqrestore(&mx3fb->lock, flags);
@@ -732,7 +731,7 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
* mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
- * @info: framebuffer information pointer
+ * @fbi: framebuffer information pointer
* @return: 0 on success or negative error code on failure.
*/
static int mx3fb_set_fix(struct fb_info *fbi)
@@ -740,7 +739,7 @@ static int mx3fb_set_fix(struct fb_info *fbi)
struct fb_fix_screeninfo *fix = &fbi->fix;
struct fb_var_screeninfo *var = &fbi->var;
- strncpy(fix->id, "DISP3 BG", 8);
+ memcpy(fix->id, "DISP3 BG", 8);
fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
@@ -1105,6 +1104,8 @@ static void __blank(int blank, struct fb_info *fbi)
/**
* mx3fb_blank() - blank the display.
+ * @blank: blank value for the panel
+ * @fbi: framebuffer information pointer
*/
static int mx3fb_blank(int blank, struct fb_info *fbi)
{
@@ -1126,7 +1127,7 @@ static int mx3fb_blank(int blank, struct fb_info *fbi)
/**
* mx3fb_pan_display() - pan or wrap the display
* @var: variable screen buffer information.
- * @info: framebuffer information pointer.
+ * @fbi: framebuffer information pointer.
*
* We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
@@ -1387,6 +1388,8 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
/**
* mx3fb_init_fbinfo() - initialize framebuffer information object.
+ * @dev: the device
+ * @ops: framebuffer device operations
* @return: initialized framebuffer structure.
*/
static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 09a20d4ab35f..c0f4f402da3f 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1843,7 +1843,6 @@ static int neo_init_hw(struct fb_info *info)
struct neofb_par *par = info->par;
int videoRam = 896;
int maxClock = 65000;
- int CursorMem = 1024;
int CursorOff = 0x100;
DBG("neo_init_hw");
@@ -1895,19 +1894,16 @@ static int neo_init_hw(struct fb_info *info)
case FB_ACCEL_NEOMAGIC_NM2070:
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
- CursorMem = 2048;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
- CursorMem = 1024;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
- CursorMem = 1024;
CursorOff = 0x1000;
par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
diff --git a/drivers/video/fbdev/nvidia/nv_setup.c b/drivers/video/fbdev/nvidia/nv_setup.c
index 2fa68669613a..5404017e6957 100644
--- a/drivers/video/fbdev/nvidia/nv_setup.c
+++ b/drivers/video/fbdev/nvidia/nv_setup.c
@@ -89,9 +89,8 @@ u8 NVReadSeq(struct nvidia_par *par, u8 index)
}
void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
{
- volatile u8 tmp;
- tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+ VGA_RD08(par->PCIO, par->IOBase + 0x0a);
if (par->paletteEnabled)
index &= ~0x20;
else
@@ -101,9 +100,7 @@ void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
}
u8 NVReadAttr(struct nvidia_par *par, u8 index)
{
- volatile u8 tmp;
-
- tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+ VGA_RD08(par->PCIO, par->IOBase + 0x0a);
if (par->paletteEnabled)
index &= ~0x20;
else
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 744416dc530e..3ca1bd7bb92f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -43,6 +43,7 @@ config FB_OMAP2_PANEL_DPI
config FB_OMAP2_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_PANEL_DSI_CM = n
help
Driver for generic DSI command mode panels.
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 0f93a260e432..1bec7a4422e8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -239,7 +239,7 @@ static struct omap_dss_driver lb035q02_ops = {
static int lb035q02_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct panel_drv_data *ddata = spi_get_drvdata(spi);
struct omap_dss_device *in;
struct gpio_desc *gpio;
@@ -277,7 +277,7 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (ddata == NULL)
return -ENOMEM;
- dev_set_drvdata(&spi->dev, ddata);
+ spi_set_drvdata(spi, ddata);
ddata->spi = spi;
@@ -318,7 +318,7 @@ err_gpio:
static int lb035q02_panel_spi_remove(struct spi_device *spi)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct panel_drv_data *ddata = spi_get_drvdata(spi);
struct omap_dss_device *dssdev = &ddata->dssdev;
struct omap_dss_device *in = ddata->in;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 3417618310ff..cc2ad787d493 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -75,7 +75,7 @@ static void dispc_dump_irqs(struct seq_file *s)
seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1])
PIS(FRAMEDONE);
PIS(VSYNC);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index e2e7fe6f89ee..99ce6e955a46 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -55,7 +55,7 @@ static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
/* only used in non-DT mode */
static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
{
- return dev_get_drvdata(&pdev->dev);
+ return platform_get_drvdata(pdev);
}
static struct dss_pll *dpi_get_pll(enum omap_channel channel)
@@ -784,7 +784,7 @@ static int dpi_bind(struct device *dev, struct device *master, void *data)
dpi->pdev = pdev;
- dev_set_drvdata(&pdev->dev, dpi);
+ platform_set_drvdata(pdev, dpi);
mutex_init(&dpi->lock);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 6f9c25fec994..daa313f14335 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -399,7 +399,7 @@ module_param(dsi_perf, bool, 0644);
static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
{
- return dev_get_drvdata(&dsidev->dev);
+ return platform_get_drvdata(dsidev);
}
static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
@@ -1178,13 +1178,12 @@ static int dsi_regulator_init(struct platform_device *dsidev)
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
- u32 l;
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
b0 = 28;
@@ -1554,7 +1553,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -3627,7 +3626,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
static void dsi_proto_timings(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned tlpx, tclk_zero, tclk_prepare;
unsigned tclk_pre, tclk_post;
unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned ths_trail, ths_exit;
@@ -3646,7 +3645,6 @@ static void dsi_proto_timings(struct platform_device *dsidev)
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
@@ -4040,7 +4038,6 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- u16 dw, dh;
dsi_perf_mark_setup(dsidev);
@@ -4049,11 +4046,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
-
#ifdef DSI_PERF_MEASURE
- dsi->update_bytes = dw * dh *
+ dsi->update_bytes = dsi->timings.x_res * dsi->timings.y_res *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsidev);
@@ -5272,7 +5266,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
dsi->pdev = dsidev;
- dev_set_drvdata(&dsidev->dev, dsi);
+ platform_set_drvdata(dsidev, dsi);
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 496b43bdad21..800bd108e834 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -672,7 +672,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
int irq;
hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ platform_set_drvdata(pdev, &hdmi);
mutex_init(&hdmi.lock);
spin_lock_init(&hdmi.audio_playing_lock);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index 726c190862d4..e6363a420933 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -679,7 +679,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -741,7 +741,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index e3d441ade241..2c03608addcd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -713,7 +713,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
int irq;
hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ platform_set_drvdata(pdev, &hdmi);
mutex_init(&hdmi.lock);
spin_lock_init(&hdmi.audio_playing_lock);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index eda29d3032e1..cb63bc0e92ca 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -790,7 +790,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -833,7 +833,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 6da672e92643..4e88a0a195ad 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -175,7 +175,7 @@ static int p9100_setcolreg(unsigned regno,
/**
* p9100_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 27893fa139b0..c68725eebee3 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1508,8 +1508,8 @@ static const struct fb_ops pm2fb_ops = {
*
* Initialise and allocate resource for PCI device.
*
- * @param pdev PCI device.
- * @param id PCI device ID.
+ * @pdev: PCI device.
+ * @id: PCI device ID.
*/
static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1715,7 +1715,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*
* Release all device resources.
*
- * @param pdev PCI device to clean up.
+ * @pdev: PCI device to clean up.
*/
static void pm2fb_remove(struct pci_dev *pdev)
{
@@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
#ifndef MODULE
-/**
+/*
* Parse user specified options.
*
* This is, comma-separated options following `video=pm2fb:'.
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index ce55b9d2e862..55554b0433cb 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -464,7 +464,7 @@ static inline void reverse_order(u32 *l)
/**
* rivafb_load_cursor_image - load cursor image to hardware
- * @data: address to monochrome bitmap (1 = foreground color, 0 = background)
+ * @data8: address to monochrome bitmap (1 = foreground color, 0 = background)
* @par: pointer to private data
* @w: width of cursor image in pixels
* @h: height of cursor image in scanlines
@@ -843,9 +843,9 @@ static void riva_update_var(struct fb_var_screeninfo *var,
/**
* rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
- * @var:
- * @nom:
- * @den:
+ * @var: standard kernel fb changeable data
+ * @nom: nom
+ * @den: den
*
* DESCRIPTION:
* .
@@ -1214,7 +1214,6 @@ out:
/**
* rivafb_pan_display
* @var: standard kernel fb changeable data
- * @con: TODO
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index bcf9c4b4de31..8b829b720064 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -836,17 +836,17 @@ static void nv10CalcArbitration
nv10_sim_state *arb
)
{
- int data, pagemiss, cas,width, video_enable, bpp;
- int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs;
- int nvclk_fill, us_extra;
+ int data, pagemiss, width, video_enable, bpp;
+ int nvclks, mclks, pclks, vpagemiss, crtpagemiss;
+ int nvclk_fill;
int found, mclk_extra, mclk_loop, cbs, m1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
- int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate;
- int vus_m, vus_n, vus_p;
- int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm;
+ int us_m, us_m_min, us_n, us_p, crtc_drain_rate;
+ int vus_m;
+ int vpm_us, us_video, cpm_us, us_crt,clwm;
int clwm_rnd_down;
- int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2;
- int pclks_2_top_fifo, min_mclk_extra;
+ int m2us, us_pipe_min, p1clk, p2;
+ int min_mclk_extra;
int us_min_mclk_extra;
fifo->valid = 1;
@@ -854,16 +854,13 @@ static void nv10CalcArbitration
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
- cas = arb->mem_latency;
width = arb->memory_width/64;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
- vlwm = 1024;
cbs = 512;
- vbs = 512;
pclks = 4; /* lwm detect. */
@@ -924,17 +921,11 @@ static void nv10CalcArbitration
us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq;
us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */
us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */
- us_pipe = us_m + us_n + us_p;
us_pipe_min = us_m_min + us_n + us_p;
- us_extra = 0;
vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */
- vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */
- vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */
- vus_pipe = vus_m + vus_n + vus_p;
if(video_enable) {
- video_drain_rate = pclk_freq * 4; /* MB/s */
crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */
vpagemiss = 1; /* self generating page miss */
@@ -993,7 +984,6 @@ static void nv10CalcArbitration
else if(crtc_drain_rate * 100 >= nvclk_fill * 98) {
clwm = 1024;
cbs = 512;
- us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ;
}
}
}
@@ -1010,7 +1000,6 @@ static void nv10CalcArbitration
m1 = clwm + cbs - 1024; /* Amount of overfill */
m2us = us_pipe_min + us_min_mclk_extra;
- pclks_2_top_fifo = (1024-clwm)/(8*width);
/* pclk cycles to drain */
p1clk = m2us * pclk_freq/(1000*1000);
@@ -1038,7 +1027,6 @@ static void nv10CalcArbitration
min_mclk_extra--;
}
}
- craw = clwm;
if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8;
data = (int)(clwm);
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 4541afcf9386..d1b5f965bc96 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -45,7 +45,7 @@
#if 0
#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
#else
-#define dbg(fmt, args...) do { } while (0)
+#define dbg(fmt, args...) do { no_printk(KERN_INFO fmt, ## args); } while (0)
#endif
/*
@@ -512,7 +512,6 @@ s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
}
/**
- *
* s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
* @info : framebuffer structure
* @rect : fb_fillrect structure
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index ba316bd56efd..3b134e1bbc38 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -75,6 +75,7 @@ struct s3c_fb;
* @buf_size: Offset of buffer size registers.
* @buf_end: Offset of buffer end registers.
* @osd: The base for the OSD registers.
+ * @osd_stride: stride of osd
* @palette: Address of palette memory, or 0 if none.
* @has_prtcon: Set if has PRTCON register.
* @has_shadowcon: Set if has SHADOWCON register.
@@ -155,7 +156,7 @@ struct s3c_fb_palette {
* @windata: The platform data supplied for the window configuration.
* @parent: The hardware that this window is part of.
* @fbinfo: Pointer pack to the framebuffer info for this window.
- * @varint: The variant information for this window.
+ * @variant: The variant information for this window.
* @palette_buffer: Buffer/cache to hold palette entries.
* @pseudo_palette: For use in TRUECOLOUR modes for entries 0..15/
* @index: The window number of this window.
@@ -336,7 +337,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
* @sfb: The hardware state.
- * @pixclock: The pixel clock wanted, in picoseconds.
+ * @pixclk: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
@@ -733,7 +734,7 @@ static inline unsigned int chan_to_field(unsigned int chan,
* @red: The red field for the palette data.
* @green: The green field for the palette data.
* @blue: The blue field for the palette data.
- * @trans: The transparency (alpha) field for the palette data.
+ * @transp: The transparency (alpha) field for the palette data.
* @info: The framebuffer being changed.
*/
static int s3c_fb_setcolreg(unsigned regno,
@@ -1133,6 +1134,7 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
/**
* s3c_fb_release_win() - release resources for a framebuffer window.
+ * @sfb: The base resources for the hardware.
* @win: The window to cleanup the resources for.
*
* Release the resources that where claimed for the hardware window,
@@ -1160,6 +1162,7 @@ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
/**
* s3c_fb_probe_win() - register an hardware window
* @sfb: The base resources for the hardware
+ * @win_no: The window number
* @variant: The variant information for this window.
* @res: Pointer to where to place the resultant window.
*
@@ -1170,7 +1173,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
struct s3c_fb_win_variant *variant,
struct s3c_fb_win **res)
{
- struct fb_var_screeninfo *var;
struct fb_videomode initmode;
struct s3c_fb_pd_win *windata;
struct s3c_fb_win *win;
@@ -1198,7 +1200,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
win = fbinfo->par;
*res = win;
- var = &fbinfo->var;
win->variant = *variant;
win->fbinfo = fbinfo;
win->parent = sfb;
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index fde27feae5d0..b568c646a76c 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -2648,7 +2648,7 @@ static void
SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short RRTI)
{
- unsigned short data, infoflag = 0, modeflag, resindex;
+ unsigned short data, infoflag = 0, modeflag;
#ifdef CONFIG_FB_SIS_315
unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
unsigned short data2, data3;
@@ -2659,7 +2659,6 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
if(SiS_Pr->UseCustomMode) {
infoflag = SiS_Pr->CInfoFlag;
} else {
- resindex = SiS_GetResInfo(SiS_Pr, ModeNo, ModeIdIndex);
if(ModeNo > 0x13) {
infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag;
}
@@ -3538,17 +3537,13 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
struct fb_var_screeninfo *var, bool writeres
)
{
- unsigned short HRE, HBE, HRS, HBS, HDE, HT;
- unsigned short VRE, VBE, VRS, VBS, VDE, VT;
- unsigned char sr_data, cr_data, cr_data2;
- int A, B, C, D, E, F, temp;
+ unsigned short HRE, HBE, HRS, HDE;
+ unsigned short VRE, VBE, VRS, VDE;
+ unsigned char sr_data, cr_data;
+ int B, C, D, E, F, temp;
sr_data = crdata[14];
- /* Horizontal total */
- HT = crdata[0] | ((unsigned short)(sr_data & 0x03) << 8);
- A = HT + 5;
-
/* Horizontal display enable end */
HDE = crdata[1] | ((unsigned short)(sr_data & 0x0C) << 6);
E = HDE + 1;
@@ -3557,9 +3552,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
HRS = crdata[4] | ((unsigned short)(sr_data & 0xC0) << 2);
F = HRS - E - 3;
- /* Horizontal blank start */
- HBS = crdata[2] | ((unsigned short)(sr_data & 0x30) << 4);
-
sr_data = crdata[15];
cr_data = crdata[5];
@@ -3588,13 +3580,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
sr_data = crdata[13];
cr_data = crdata[7];
- /* Vertical total */
- VT = crdata[6] |
- ((unsigned short)(cr_data & 0x01) << 8) |
- ((unsigned short)(cr_data & 0x20) << 4) |
- ((unsigned short)(sr_data & 0x01) << 10);
- A = VT + 2;
-
/* Vertical display enable end */
VDE = crdata[10] |
((unsigned short)(cr_data & 0x02) << 7) |
@@ -3609,14 +3594,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
((unsigned short)(sr_data & 0x08) << 7);
F = VRS + 1 - E;
- cr_data2 = (crdata[16] & 0x01) << 5;
-
- /* Vertical blank start */
- VBS = crdata[11] |
- ((unsigned short)(cr_data & 0x08) << 5) |
- ((unsigned short)(cr_data2 & 0x20) << 4) |
- ((unsigned short)(sr_data & 0x04) << 8);
-
/* Vertical blank end */
VBE = crdata[12] | ((unsigned short)(sr_data & 0x10) << 4);
temp = VBE - ((E - 1) & 511);
diff --git a/drivers/video/fbdev/sis/oem310.h b/drivers/video/fbdev/sis/oem310.h
index 8fce56e4482c..ed28755715ce 100644
--- a/drivers/video/fbdev/sis/oem310.h
+++ b/drivers/video/fbdev/sis/oem310.h
@@ -200,6 +200,7 @@ static const unsigned char SiS310_TVDelayCompensation_651302LV[] = /* M650, 651,
0x33,0x33
};
+#if 0 /* Not used */
static const unsigned char SiS_TVDelay661_301[] = /* 661, 301 */
{
0x44,0x44,
@@ -219,6 +220,7 @@ static const unsigned char SiS_TVDelay661_301B[] = /* 661, 301B et al */
0x44,0x44,
0x44,0x44
};
+#endif
static const unsigned char SiS310_TVDelayCompensation_LVDS[] = /* LVDS */
{
diff --git a/drivers/video/fbdev/sis/sis.h b/drivers/video/fbdev/sis/sis.h
index 9f4c3093ccb3..d632f096083b 100644
--- a/drivers/video/fbdev/sis/sis.h
+++ b/drivers/video/fbdev/sis/sis.h
@@ -15,7 +15,6 @@
#include "vgatypes.h"
#include "vstruct.h"
-#include "init.h"
#define VER_MAJOR 1
#define VER_MINOR 8
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 03c736f6f3d0..266a5582f94d 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -5029,7 +5029,6 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
static const u8 cs168[8] = {
0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
};
- u8 reg;
u8 v1;
u8 v2;
u8 v3;
@@ -5037,9 +5036,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
SiS_SetReg(SISCR, 0xb0, 0x80); /* DDR2 dual frequency mode */
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x86, 0x00);
- reg = SiS_GetReg(SISCR, 0x86);
+ SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, 0x88);
- reg = SiS_GetReg(SISCR, 0x86);
+ SiS_GetReg(SISCR, 0x86);
v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
if (ivideo->haveXGIROM) {
v1 = bios[regb + 0x168];
@@ -5049,9 +5048,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
SiS_SetReg(SISCR, 0x86, v1);
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x85, 0x00);
- reg = SiS_GetReg(SISCR, 0x85);
+ SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, 0x88);
- reg = SiS_GetReg(SISCR, 0x85);
+ SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, v2);
SiS_SetReg(SISCR, 0x82, v3);
SiS_SetReg(SISCR, 0x98, 0x01);
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index c05cdabeb11c..27d4b0ace2d6 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1390,7 +1390,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fix->smem_start, info->screen_base,
fix->smem_len >> 20);
- f_ddprintk("regbase_virt: %#lx\n", par->mmio_vbase);
+ f_ddprintk("regbase_virt: %p\n", par->mmio_vbase);
f_ddprintk("membase_phys: %#lx\n", fix->smem_start);
f_ddprintk("fbbase_virt: %p\n", info->screen_base);
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 34b2e5b6e84a..1638a40fed22 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -196,7 +196,7 @@ static int tcx_setcolreg(unsigned regno,
/**
* tcx_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index f056d80f6359..67e37a62b07c 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -206,9 +206,7 @@ static inline u8 crt_inb(struct tdfx_par *par, u32 idx)
static inline void att_outb(struct tdfx_par *par, u32 idx, u8 val)
{
- unsigned char tmp;
-
- tmp = vga_inb(par, IS1_R);
+ vga_inb(par, IS1_R);
vga_outb(par, ATT_IW, idx);
vga_outb(par, ATT_IW, val);
}
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 666fbe2f671c..ae0cf5540636 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -555,7 +555,7 @@ tgafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
/**
* tgafb_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
@@ -837,7 +837,7 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
u32 *palette = ((u32 *)info->pseudo_palette);
unsigned long pos, line_length, i, j;
const unsigned char *data;
- void __iomem *regs_base, *fb_base;
+ void __iomem *fb_base;
dx = image->dx;
dy = image->dy;
@@ -855,7 +855,6 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
if (dy + height > vyres)
height = vyres - dy;
- regs_base = par->tga_regs_base;
fb_base = par->tga_fb_base;
pos = dy * line_length + (dx * 4);
@@ -1034,7 +1033,7 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
regs_base + TGA_MODE_REG);
}
-/**
+/*
* tgafb_copyarea - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Copies on area of the screen to another area.
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index f9b3c1cb9530..b9cdd02c1000 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1017,6 +1017,7 @@ static void dlfb_ops_destroy(struct fb_info *info)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
+ dlfb_free_urb_list(dlfb);
usb_put_dev(dlfb->udev);
kfree(dlfb);
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index def14ac0ebe1..4df6772802d7 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -423,7 +423,7 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
task->t.flags = TF_VBEIB;
task->t.buf_len = sizeof(struct vbe_ib);
task->buf = &par->vbe_ib;
- strncpy(par->vbe_ib.vbe_signature, "VBE2", 4);
+ memcpy(par->vbe_ib.vbe_signature, "VBE2", 4);
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
@@ -560,6 +560,8 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
task->t.regs.eax = 0x4f0a;
task->t.regs.ebx = 0x0;
err = uvesafb_exec(task);
+ if (err)
+ return err;
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
par->pmi_setpal = par->ypan = 0;
@@ -1871,7 +1873,7 @@ static ssize_t v86d_show(struct device_driver *dev, char *buf)
static ssize_t v86d_store(struct device_driver *dev, const char *buf,
size_t count)
{
- strncpy(v86d_path, buf, PATH_MAX);
+ strncpy(v86d_path, buf, PATH_MAX - 1);
return count;
}
static DRIVER_ATTR_RW(v86d);
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 4a869402d120..088b962076b5 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -537,11 +537,9 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
u32 clock;
struct via_display_timing timing;
struct fb_var_screeninfo panel_var;
- const struct fb_videomode *mode_crt_table, *panel_crt_table;
+ const struct fb_videomode *panel_crt_table;
DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n");
- /* Get mode table */
- mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60);
/* Get panel table Pointer */
panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
viafb_fill_var_timing_info(&panel_var, panel_crt_table);
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 2445cfe617a9..42255d27a1db 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -11,6 +11,7 @@
#include <linux/fb.h>
#include <linux/platform_device.h>
#include "core/fb_draw.h"
+#include "wmt_ge_rops.h"
#define GE_COMMAND_OFF 0x00
#define GE_DEPTH_OFF 0x04
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index ca4ff658cad0..ffbf900648d9 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -472,7 +472,7 @@ static int xilinxfb_of_probe(struct platform_device *pdev)
if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
- dev_set_drvdata(&pdev->dev, drvdata);
+ platform_set_drvdata(pdev, drvdata);
return xilinxfb_assign(pdev, drvdata, &pdata);
}
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index abc9ada798ee..f93b6abbe258 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -52,6 +52,7 @@ static int parse_timing_property(const struct device_node *np, const char *name,
/**
* of_parse_display_timing - parse display_timing entry from device_node
* @np: device_node with the properties
+ * @dt: display_timing that contains the result. I may be partially written in case of errors
**/
static int of_parse_display_timing(const struct device_node *np,
struct display_timing *dt)
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 67aff2421c29..e7d10ffd3b66 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -14,9 +14,9 @@
/**
* of_get_videomode - get the videomode #<index> from devicetree
- * @np - devicenode with the display_timings
- * @vm - set to return value
- * @index - index into list of display_timings
+ * @np: devicenode with the display_timings
+ * @vm: set to return value
+ * @index: index into list of display_timings
* (Set this to OF_USE_NATIVE_MODE to use whatever mode is
* specified as native mode in the DT.)
*
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 80c5f9c16ec1..8061e8ef449f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -34,4 +34,6 @@ config FSL_HV_MANAGER
source "drivers/virt/vboxguest/Kconfig"
source "drivers/virt/nitro_enclaves/Kconfig"
+
+source "drivers/virt/acrn/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index f28425ce4b39..3e272ea60cd9 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
+obj-$(CONFIG_ACRN_HSM) += acrn/
diff --git a/drivers/virt/acrn/Kconfig b/drivers/virt/acrn/Kconfig
new file mode 100644
index 000000000000..3e1a61c9d8d8
--- /dev/null
+++ b/drivers/virt/acrn/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config ACRN_HSM
+ tristate "ACRN Hypervisor Service Module"
+ depends on ACRN_GUEST
+ select EVENTFD
+ help
+ ACRN Hypervisor Service Module (HSM) is a kernel module which
+ communicates with ACRN userspace through ioctls and talks to
+ the ACRN Hypervisor through hypercalls. HSM will only run in
+ a privileged management VM, called Service VM, to manage User
+ VMs and do I/O emulation. Not required for simply running
+ under ACRN as a User VM.
+
+ To compile as a module, choose M, the module will be called
+ acrn. If unsure, say N.
diff --git a/drivers/virt/acrn/Makefile b/drivers/virt/acrn/Makefile
new file mode 100644
index 000000000000..08ce641dcfa1
--- /dev/null
+++ b/drivers/virt/acrn/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ACRN_HSM) := acrn.o
+acrn-y := hsm.o vm.o mm.o ioreq.o ioeventfd.o irqfd.o
diff --git a/drivers/virt/acrn/acrn_drv.h b/drivers/virt/acrn/acrn_drv.h
new file mode 100644
index 000000000000..1be54efa666c
--- /dev/null
+++ b/drivers/virt/acrn/acrn_drv.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ACRN_HSM_DRV_H
+#define __ACRN_HSM_DRV_H
+
+#include <linux/acrn.h>
+#include <linux/dev_printk.h>
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+
+#include "hypercall.h"
+
+extern struct miscdevice acrn_dev;
+
+#define ACRN_NAME_LEN 16
+#define ACRN_MEM_MAPPING_MAX 256
+
+#define ACRN_MEM_REGION_ADD 0
+#define ACRN_MEM_REGION_DEL 2
+
+struct acrn_vm;
+struct acrn_ioreq_client;
+
+/**
+ * struct vm_memory_region_op - Hypervisor memory operation
+ * @type: Operation type (ACRN_MEM_REGION_*)
+ * @attr: Memory attribute (ACRN_MEM_TYPE_* | ACRN_MEM_ACCESS_*)
+ * @user_vm_pa: Physical address of User VM to be mapped.
+ * @service_vm_pa: Physical address of Service VM to be mapped.
+ * @size: Size of this region.
+ *
+ * Structure containing needed information that is provided to ACRN Hypervisor
+ * to manage the EPT mappings of a single memory region of the User VM. Several
+ * &struct vm_memory_region_op can be batched to ACRN Hypervisor, see &struct
+ * vm_memory_region_batch.
+ */
+struct vm_memory_region_op {
+ u32 type;
+ u32 attr;
+ u64 user_vm_pa;
+ u64 service_vm_pa;
+ u64 size;
+};
+
+/**
+ * struct vm_memory_region_batch - A batch of vm_memory_region_op.
+ * @vmid: A User VM ID.
+ * @reserved: Reserved.
+ * @regions_num: The number of vm_memory_region_op.
+ * @regions_gpa: Physical address of a vm_memory_region_op array.
+ *
+ * HC_VM_SET_MEMORY_REGIONS uses this structure to manage EPT mappings of
+ * multiple memory regions of a User VM. A &struct vm_memory_region_batch
+ * contains multiple &struct vm_memory_region_op for batch processing in the
+ * ACRN Hypervisor.
+ */
+struct vm_memory_region_batch {
+ u16 vmid;
+ u16 reserved[3];
+ u32 regions_num;
+ u64 regions_gpa;
+};
+
+/**
+ * struct vm_memory_mapping - Memory map between a User VM and the Service VM
+ * @pages: Pages in Service VM kernel.
+ * @npages: Number of pages.
+ * @service_vm_va: Virtual address in Service VM kernel.
+ * @user_vm_pa: Physical address in User VM.
+ * @size: Size of this memory region.
+ *
+ * HSM maintains memory mappings between a User VM GPA and the Service VM
+ * kernel VA for accelerating the User VM GPA translation.
+ */
+struct vm_memory_mapping {
+ struct page **pages;
+ int npages;
+ void *service_vm_va;
+ u64 user_vm_pa;
+ size_t size;
+};
+
+/**
+ * struct acrn_ioreq_buffer - Data for setting the ioreq buffer of User VM
+ * @ioreq_buf: The GPA of the IO request shared buffer of a VM
+ *
+ * The parameter for the HC_SET_IOREQ_BUFFER hypercall used to set up
+ * the shared I/O request buffer between Service VM and ACRN hypervisor.
+ */
+struct acrn_ioreq_buffer {
+ u64 ioreq_buf;
+};
+
+struct acrn_ioreq_range {
+ struct list_head list;
+ u32 type;
+ u64 start;
+ u64 end;
+};
+
+#define ACRN_IOREQ_CLIENT_DESTROYING 0U
+typedef int (*ioreq_handler_t)(struct acrn_ioreq_client *client,
+ struct acrn_io_request *req);
+/**
+ * struct acrn_ioreq_client - Structure of I/O client.
+ * @name: Client name
+ * @vm: The VM that the client belongs to
+ * @list: List node for this acrn_ioreq_client
+ * @is_default: If this client is the default one
+ * @flags: Flags (ACRN_IOREQ_CLIENT_*)
+ * @range_list: I/O ranges
+ * @range_lock: Lock to protect range_list
+ * @ioreqs_map: The pending I/O requests bitmap.
+ * @handler: I/O requests handler of this client
+ * @thread: The thread which executes the handler
+ * @wq: The wait queue for the handler thread parking
+ * @priv: Data for the thread
+ */
+struct acrn_ioreq_client {
+ char name[ACRN_NAME_LEN];
+ struct acrn_vm *vm;
+ struct list_head list;
+ bool is_default;
+ unsigned long flags;
+ struct list_head range_list;
+ rwlock_t range_lock;
+ DECLARE_BITMAP(ioreqs_map, ACRN_IO_REQUEST_MAX);
+ ioreq_handler_t handler;
+ struct task_struct *thread;
+ wait_queue_head_t wq;
+ void *priv;
+};
+
+#define ACRN_INVALID_VMID (0xffffU)
+
+#define ACRN_VM_FLAG_DESTROYED 0U
+#define ACRN_VM_FLAG_CLEARING_IOREQ 1U
+extern struct list_head acrn_vm_list;
+extern rwlock_t acrn_vm_list_lock;
+/**
+ * struct acrn_vm - Properties of ACRN User VM.
+ * @list: Entry within global list of all VMs.
+ * @vmid: User VM ID.
+ * @vcpu_num: Number of virtual CPUs in the VM.
+ * @flags: Flags (ACRN_VM_FLAG_*) of the VM. This is VM
+ * flag management in HSM which is different
+ * from the &acrn_vm_creation.vm_flag.
+ * @regions_mapping_lock: Lock to protect &acrn_vm.regions_mapping and
+ * &acrn_vm.regions_mapping_count.
+ * @regions_mapping: Memory mappings of this VM.
+ * @regions_mapping_count: Number of memory mapping of this VM.
+ * @ioreq_clients_lock: Lock to protect ioreq_clients and default_client
+ * @ioreq_clients: The I/O request clients list of this VM
+ * @default_client: The default I/O request client
+ * @ioreq_buf: I/O request shared buffer
+ * @ioreq_page: The page of the I/O request shared buffer
+ * @pci_conf_addr: Address of a PCI configuration access emulation
+ * @monitor_page: Page of interrupt statistics of User VM
+ * @ioeventfds_lock: Lock to protect ioeventfds list
+ * @ioeventfds: List to link all hsm_ioeventfd
+ * @ioeventfd_client: I/O client for ioeventfds of the VM
+ * @irqfds_lock: Lock to protect irqfds list
+ * @irqfds: List to link all hsm_irqfd
+ * @irqfd_wq: Workqueue for irqfd async shutdown
+ */
+struct acrn_vm {
+ struct list_head list;
+ u16 vmid;
+ int vcpu_num;
+ unsigned long flags;
+ struct mutex regions_mapping_lock;
+ struct vm_memory_mapping regions_mapping[ACRN_MEM_MAPPING_MAX];
+ int regions_mapping_count;
+ spinlock_t ioreq_clients_lock;
+ struct list_head ioreq_clients;
+ struct acrn_ioreq_client *default_client;
+ struct acrn_io_request_buffer *ioreq_buf;
+ struct page *ioreq_page;
+ u32 pci_conf_addr;
+ struct page *monitor_page;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds;
+ struct acrn_ioreq_client *ioeventfd_client;
+ struct mutex irqfds_lock;
+ struct list_head irqfds;
+ struct workqueue_struct *irqfd_wq;
+};
+
+struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
+ struct acrn_vm_creation *vm_param);
+int acrn_vm_destroy(struct acrn_vm *vm);
+int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
+ u64 size, u32 mem_type, u32 mem_access_right);
+int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size);
+int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+void acrn_vm_all_ram_unmap(struct acrn_vm *vm);
+
+int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma);
+void acrn_ioreq_deinit(struct acrn_vm *vm);
+int acrn_ioreq_intr_setup(void);
+void acrn_ioreq_intr_remove(void);
+void acrn_ioreq_request_clear(struct acrn_vm *vm);
+int acrn_ioreq_client_wait(struct acrn_ioreq_client *client);
+int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu);
+struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
+ ioreq_handler_t handler,
+ void *data, bool is_default,
+ const char *name);
+void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client);
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
+
+int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data);
+
+int acrn_ioeventfd_init(struct acrn_vm *vm);
+int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args);
+void acrn_ioeventfd_deinit(struct acrn_vm *vm);
+
+int acrn_irqfd_init(struct acrn_vm *vm);
+int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args);
+void acrn_irqfd_deinit(struct acrn_vm *vm);
+
+#endif /* __ACRN_HSM_DRV_H */
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
new file mode 100644
index 000000000000..1f6b7c54a1a4
--- /dev/null
+++ b/drivers/virt/acrn/hsm.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN Hypervisor Service Module (HSM)
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Fengwei Yin <fengwei.yin@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/acrn.h>
+#include <asm/hypervisor.h>
+
+#include "acrn_drv.h"
+
+/*
+ * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
+ * represent a VM instance and continues to be associated with the opened file
+ * descriptor. All ioctl operations on this file descriptor will be targeted to
+ * the VM instance. Release of this file descriptor will destroy the object.
+ */
+static int acrn_dev_open(struct inode *inode, struct file *filp)
+{
+ struct acrn_vm *vm;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ vm->vmid = ACRN_INVALID_VMID;
+ filp->private_data = vm;
+ return 0;
+}
+
+static int pmcmd_ioctl(u64 cmd, void __user *uptr)
+{
+ struct acrn_pstate_data *px_data;
+ struct acrn_cstate_data *cx_data;
+ u64 *pm_info;
+ int ret = 0;
+
+ switch (cmd & PMCMD_TYPE_MASK) {
+ case ACRN_PMCMD_GET_PX_CNT:
+ case ACRN_PMCMD_GET_CX_CNT:
+ pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
+ if (ret < 0) {
+ kfree(pm_info);
+ break;
+ }
+
+ if (copy_to_user(uptr, pm_info, sizeof(u64)))
+ ret = -EFAULT;
+ kfree(pm_info);
+ break;
+ case ACRN_PMCMD_GET_PX_DATA:
+ px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ if (!px_data)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
+ if (ret < 0) {
+ kfree(px_data);
+ break;
+ }
+
+ if (copy_to_user(uptr, px_data, sizeof(*px_data)))
+ ret = -EFAULT;
+ kfree(px_data);
+ break;
+ case ACRN_PMCMD_GET_CX_DATA:
+ cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ if (!cx_data)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
+ if (ret < 0) {
+ kfree(cx_data);
+ break;
+ }
+
+ if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
+ ret = -EFAULT;
+ kfree(cx_data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * HSM relies on hypercall layer of the ACRN hypervisor to do the
+ * sanity check against the input parameters.
+ */
+static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long ioctl_param)
+{
+ struct acrn_vm *vm = filp->private_data;
+ struct acrn_vm_creation *vm_param;
+ struct acrn_vcpu_regs *cpu_regs;
+ struct acrn_ioreq_notify notify;
+ struct acrn_ptdev_irq *irq_info;
+ struct acrn_ioeventfd ioeventfd;
+ struct acrn_vm_memmap memmap;
+ struct acrn_msi_entry *msi;
+ struct acrn_pcidev *pcidev;
+ struct acrn_irqfd irqfd;
+ struct page *page;
+ u64 cstate_cmd;
+ int i, ret = 0;
+
+ if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
+ dev_dbg(acrn_dev.this_device,
+ "ioctl 0x%x: Invalid VM state!\n", cmd);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case ACRN_IOCTL_CREATE_VM:
+ vm_param = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_vm_creation));
+ if (IS_ERR(vm_param))
+ return PTR_ERR(vm_param);
+
+ if ((vm_param->reserved0 | vm_param->reserved1) != 0)
+ return -EINVAL;
+
+ vm = acrn_vm_create(vm, vm_param);
+ if (!vm) {
+ ret = -EINVAL;
+ kfree(vm_param);
+ break;
+ }
+
+ if (copy_to_user((void __user *)ioctl_param, vm_param,
+ sizeof(struct acrn_vm_creation))) {
+ acrn_vm_destroy(vm);
+ ret = -EFAULT;
+ }
+
+ kfree(vm_param);
+ break;
+ case ACRN_IOCTL_START_VM:
+ ret = hcall_start_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to start VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_PAUSE_VM:
+ ret = hcall_pause_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to pause VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_RESET_VM:
+ ret = hcall_reset_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to restart VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_DESTROY_VM:
+ ret = acrn_vm_destroy(vm);
+ break;
+ case ACRN_IOCTL_SET_VCPU_REGS:
+ cpu_regs = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_vcpu_regs));
+ if (IS_ERR(cpu_regs))
+ return PTR_ERR(cpu_regs);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
+ if (cpu_regs->reserved[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
+ if (cpu_regs->vcpu_regs.reserved_32[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
+ if (cpu_regs->vcpu_regs.reserved_64[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
+ if (cpu_regs->vcpu_regs.gdt.reserved[i] |
+ cpu_regs->vcpu_regs.idt.reserved[i])
+ return -EINVAL;
+
+ ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set regs state of VM%u!\n",
+ vm->vmid);
+ kfree(cpu_regs);
+ break;
+ case ACRN_IOCTL_SET_MEMSEG:
+ if (copy_from_user(&memmap, (void __user *)ioctl_param,
+ sizeof(memmap)))
+ return -EFAULT;
+
+ ret = acrn_vm_memseg_map(vm, &memmap);
+ break;
+ case ACRN_IOCTL_UNSET_MEMSEG:
+ if (copy_from_user(&memmap, (void __user *)ioctl_param,
+ sizeof(memmap)))
+ return -EFAULT;
+
+ ret = acrn_vm_memseg_unmap(vm, &memmap);
+ break;
+ case ACRN_IOCTL_ASSIGN_PCIDEV:
+ pcidev = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_pcidev));
+ if (IS_ERR(pcidev))
+ return PTR_ERR(pcidev);
+
+ ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to assign pci device!\n");
+ kfree(pcidev);
+ break;
+ case ACRN_IOCTL_DEASSIGN_PCIDEV:
+ pcidev = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_pcidev));
+ if (IS_ERR(pcidev))
+ return PTR_ERR(pcidev);
+
+ ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to deassign pci device!\n");
+ kfree(pcidev);
+ break;
+ case ACRN_IOCTL_SET_PTDEV_INTR:
+ irq_info = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_ptdev_irq));
+ if (IS_ERR(irq_info))
+ return PTR_ERR(irq_info);
+
+ ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to configure intr for ptdev!\n");
+ kfree(irq_info);
+ break;
+ case ACRN_IOCTL_RESET_PTDEV_INTR:
+ irq_info = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_ptdev_irq));
+ if (IS_ERR(irq_info))
+ return PTR_ERR(irq_info);
+
+ ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to reset intr for ptdev!\n");
+ kfree(irq_info);
+ break;
+ case ACRN_IOCTL_SET_IRQLINE:
+ ret = hcall_set_irqline(vm->vmid, ioctl_param);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set interrupt line!\n");
+ break;
+ case ACRN_IOCTL_INJECT_MSI:
+ msi = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_msi_entry));
+ if (IS_ERR(msi))
+ return PTR_ERR(msi);
+
+ ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to inject MSI!\n");
+ kfree(msi);
+ break;
+ case ACRN_IOCTL_VM_INTR_MONITOR:
+ ret = pin_user_pages_fast(ioctl_param, 1,
+ FOLL_WRITE | FOLL_LONGTERM, &page);
+ if (unlikely(ret != 1)) {
+ dev_dbg(acrn_dev.this_device,
+ "Failed to pin intr hdr buffer!\n");
+ return -EFAULT;
+ }
+
+ ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
+ if (ret < 0) {
+ unpin_user_page(page);
+ dev_dbg(acrn_dev.this_device,
+ "Failed to monitor intr data!\n");
+ return ret;
+ }
+ if (vm->monitor_page)
+ unpin_user_page(vm->monitor_page);
+ vm->monitor_page = page;
+ break;
+ case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
+ if (vm->default_client)
+ return -EEXIST;
+ if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
+ ret = -EINVAL;
+ break;
+ case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
+ if (vm->default_client)
+ acrn_ioreq_client_destroy(vm->default_client);
+ break;
+ case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
+ if (vm->default_client)
+ ret = acrn_ioreq_client_wait(vm->default_client);
+ else
+ ret = -ENODEV;
+ break;
+ case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
+ if (copy_from_user(&notify, (void __user *)ioctl_param,
+ sizeof(struct acrn_ioreq_notify)))
+ return -EFAULT;
+
+ if (notify.reserved != 0)
+ return -EINVAL;
+
+ ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
+ break;
+ case ACRN_IOCTL_CLEAR_VM_IOREQ:
+ acrn_ioreq_request_clear(vm);
+ break;
+ case ACRN_IOCTL_PM_GET_CPU_STATE:
+ if (copy_from_user(&cstate_cmd, (void *)ioctl_param,
+ sizeof(cstate_cmd)))
+ return -EFAULT;
+
+ ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
+ break;
+ case ACRN_IOCTL_IOEVENTFD:
+ if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
+ sizeof(ioeventfd)))
+ return -EFAULT;
+
+ if (ioeventfd.reserved != 0)
+ return -EINVAL;
+
+ ret = acrn_ioeventfd_config(vm, &ioeventfd);
+ break;
+ case ACRN_IOCTL_IRQFD:
+ if (copy_from_user(&irqfd, (void __user *)ioctl_param,
+ sizeof(irqfd)))
+ return -EFAULT;
+ ret = acrn_irqfd_config(vm, &irqfd);
+ break;
+ default:
+ dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int acrn_dev_release(struct inode *inode, struct file *filp)
+{
+ struct acrn_vm *vm = filp->private_data;
+
+ acrn_vm_destroy(vm);
+ kfree(vm);
+ return 0;
+}
+
+static ssize_t remove_cpu_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 cpu, lapicid;
+ int ret;
+
+ if (kstrtoull(buf, 0, &cpu) < 0)
+ return -EINVAL;
+
+ if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
+ return -EINVAL;
+
+ if (cpu_online(cpu))
+ remove_cpu(cpu);
+
+ lapicid = cpu_data(cpu).apicid;
+ dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
+ ret = hcall_sos_remove_cpu(lapicid);
+ if (ret < 0) {
+ dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
+ goto fail_remove;
+ }
+
+ return count;
+
+fail_remove:
+ add_cpu(cpu);
+ return ret;
+}
+static DEVICE_ATTR_WO(remove_cpu);
+
+static struct attribute *acrn_attrs[] = {
+ &dev_attr_remove_cpu.attr,
+ NULL
+};
+
+static struct attribute_group acrn_attr_group = {
+ .attrs = acrn_attrs,
+};
+
+static const struct attribute_group *acrn_attr_groups[] = {
+ &acrn_attr_group,
+ NULL
+};
+
+static const struct file_operations acrn_fops = {
+ .owner = THIS_MODULE,
+ .open = acrn_dev_open,
+ .release = acrn_dev_release,
+ .unlocked_ioctl = acrn_dev_ioctl,
+};
+
+struct miscdevice acrn_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "acrn_hsm",
+ .fops = &acrn_fops,
+ .groups = acrn_attr_groups,
+};
+
+static int __init hsm_init(void)
+{
+ int ret;
+
+ if (x86_hyper_type != X86_HYPER_ACRN)
+ return -ENODEV;
+
+ if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
+ return -EPERM;
+
+ ret = misc_register(&acrn_dev);
+ if (ret) {
+ pr_err("Create misc dev failed!\n");
+ return ret;
+ }
+
+ ret = acrn_ioreq_intr_setup();
+ if (ret) {
+ pr_err("Setup I/O request handler failed!\n");
+ misc_deregister(&acrn_dev);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit hsm_exit(void)
+{
+ acrn_ioreq_intr_remove();
+ misc_deregister(&acrn_dev);
+}
+module_init(hsm_init);
+module_exit(hsm_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
diff --git a/drivers/virt/acrn/hypercall.h b/drivers/virt/acrn/hypercall.h
new file mode 100644
index 000000000000..0cfad05bd1a9
--- /dev/null
+++ b/drivers/virt/acrn/hypercall.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ACRN HSM: hypercalls of ACRN Hypervisor
+ */
+#ifndef __ACRN_HSM_HYPERCALL_H
+#define __ACRN_HSM_HYPERCALL_H
+#include <asm/acrn.h>
+
+/*
+ * Hypercall IDs of the ACRN Hypervisor
+ */
+#define _HC_ID(x, y) (((x) << 24) | (y))
+
+#define HC_ID 0x80UL
+
+#define HC_ID_GEN_BASE 0x0UL
+#define HC_SOS_REMOVE_CPU _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01)
+
+#define HC_ID_VM_BASE 0x10UL
+#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00)
+#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01)
+#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02)
+#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03)
+#define HC_RESET_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05)
+#define HC_SET_VCPU_REGS _HC_ID(HC_ID, HC_ID_VM_BASE + 0x06)
+
+#define HC_ID_IRQ_BASE 0x20UL
+#define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03)
+#define HC_VM_INTR_MONITOR _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x04)
+#define HC_SET_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x05)
+
+#define HC_ID_IOREQ_BASE 0x30UL
+#define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00)
+#define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01)
+
+#define HC_ID_MEM_BASE 0x40UL
+#define HC_VM_SET_MEMORY_REGIONS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02)
+
+#define HC_ID_PCI_BASE 0x50UL
+#define HC_SET_PTDEV_INTR _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03)
+#define HC_RESET_PTDEV_INTR _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04)
+#define HC_ASSIGN_PCIDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x05)
+#define HC_DEASSIGN_PCIDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x06)
+
+#define HC_ID_PM_BASE 0x80UL
+#define HC_PM_GET_CPU_STATE _HC_ID(HC_ID, HC_ID_PM_BASE + 0x00)
+
+/**
+ * hcall_sos_remove_cpu() - Remove a vCPU of Service VM
+ * @cpu: The vCPU to be removed
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_sos_remove_cpu(u64 cpu)
+{
+ return acrn_hypercall1(HC_SOS_REMOVE_CPU, cpu);
+}
+
+/**
+ * hcall_create_vm() - Create a User VM
+ * @vminfo: Service VM GPA of info of User VM creation
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_create_vm(u64 vminfo)
+{
+ return acrn_hypercall1(HC_CREATE_VM, vminfo);
+}
+
+/**
+ * hcall_start_vm() - Start a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_start_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_START_VM, vmid);
+}
+
+/**
+ * hcall_pause_vm() - Pause a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_pause_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_PAUSE_VM, vmid);
+}
+
+/**
+ * hcall_destroy_vm() - Destroy a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_destroy_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_DESTROY_VM, vmid);
+}
+
+/**
+ * hcall_reset_vm() - Reset a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_reset_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_RESET_VM, vmid);
+}
+
+/**
+ * hcall_set_vcpu_regs() - Set up registers of virtual CPU of a User VM
+ * @vmid: User VM ID
+ * @regs_state: Service VM GPA of registers state
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_vcpu_regs(u64 vmid, u64 regs_state)
+{
+ return acrn_hypercall2(HC_SET_VCPU_REGS, vmid, regs_state);
+}
+
+/**
+ * hcall_inject_msi() - Deliver a MSI interrupt to a User VM
+ * @vmid: User VM ID
+ * @msi: Service VM GPA of MSI message
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_inject_msi(u64 vmid, u64 msi)
+{
+ return acrn_hypercall2(HC_INJECT_MSI, vmid, msi);
+}
+
+/**
+ * hcall_vm_intr_monitor() - Set a shared page for User VM interrupt statistics
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the shared page
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_vm_intr_monitor(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_VM_INTR_MONITOR, vmid, addr);
+}
+
+/**
+ * hcall_set_irqline() - Set or clear an interrupt line
+ * @vmid: User VM ID
+ * @op: Service VM GPA of interrupt line operations
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_irqline(u64 vmid, u64 op)
+{
+ return acrn_hypercall2(HC_SET_IRQLINE, vmid, op);
+}
+
+/**
+ * hcall_set_ioreq_buffer() - Set up the shared buffer for I/O Requests.
+ * @vmid: User VM ID
+ * @buffer: Service VM GPA of the shared buffer
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_ioreq_buffer(u64 vmid, u64 buffer)
+{
+ return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer);
+}
+
+/**
+ * hcall_notify_req_finish() - Notify ACRN Hypervisor of I/O request completion.
+ * @vmid: User VM ID
+ * @vcpu: The vCPU which initiated the I/O request
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_notify_req_finish(u64 vmid, u64 vcpu)
+{
+ return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu);
+}
+
+/**
+ * hcall_set_memory_regions() - Inform the hypervisor to set up EPT mappings
+ * @regions_pa: Service VM GPA of &struct vm_memory_region_batch
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_memory_regions(u64 regions_pa)
+{
+ return acrn_hypercall1(HC_VM_SET_MEMORY_REGIONS, regions_pa);
+}
+
+/**
+ * hcall_assign_pcidev() - Assign a PCI device to a User VM
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the &struct acrn_pcidev
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_assign_pcidev(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_ASSIGN_PCIDEV, vmid, addr);
+}
+
+/**
+ * hcall_deassign_pcidev() - De-assign a PCI device from a User VM
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the &struct acrn_pcidev
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_deassign_pcidev(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_DEASSIGN_PCIDEV, vmid, addr);
+}
+
+/**
+ * hcall_set_ptdev_intr() - Configure an interrupt for an assigned PCI device.
+ * @vmid: User VM ID
+ * @irq: Service VM GPA of the &struct acrn_ptdev_irq
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_ptdev_intr(u64 vmid, u64 irq)
+{
+ return acrn_hypercall2(HC_SET_PTDEV_INTR, vmid, irq);
+}
+
+/**
+ * hcall_reset_ptdev_intr() - Reset an interrupt for an assigned PCI device.
+ * @vmid: User VM ID
+ * @irq: Service VM GPA of the &struct acrn_ptdev_irq
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_reset_ptdev_intr(u64 vmid, u64 irq)
+{
+ return acrn_hypercall2(HC_RESET_PTDEV_INTR, vmid, irq);
+}
+
+/*
+ * hcall_get_cpu_state() - Get P-states and C-states info from the hypervisor
+ * @state: Service VM GPA of buffer of P-states and C-states
+ */
+static inline long hcall_get_cpu_state(u64 cmd, u64 state)
+{
+ return acrn_hypercall2(HC_PM_GET_CPU_STATE, cmd, state);
+}
+
+#endif /* __ACRN_HSM_HYPERCALL_H */
diff --git a/drivers/virt/acrn/ioeventfd.c b/drivers/virt/acrn/ioeventfd.c
new file mode 100644
index 000000000000..ac4037e9f947
--- /dev/null
+++ b/drivers/virt/acrn/ioeventfd.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN HSM eventfd - use eventfd objects to signal expected I/O requests
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Shuo Liu <shuo.a.liu@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/eventfd.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+/**
+ * struct hsm_ioeventfd - Properties of HSM ioeventfd
+ * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
+ * @eventfd: Eventfd of the HSM ioeventfd
+ * @addr: Address of I/O range
+ * @data: Data for matching
+ * @length: Length of I/O range
+ * @type: Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO)
+ * @wildcard: Data matching or not
+ */
+struct hsm_ioeventfd {
+ struct list_head list;
+ struct eventfd_ctx *eventfd;
+ u64 addr;
+ u64 data;
+ int length;
+ int type;
+ bool wildcard;
+};
+
+static inline int ioreq_type_from_flags(int flags)
+{
+ return flags & ACRN_IOEVENTFD_FLAG_PIO ?
+ ACRN_IOREQ_TYPE_PORTIO : ACRN_IOREQ_TYPE_MMIO;
+}
+
+static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p)
+{
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ eventfd_ctx_put(p->eventfd);
+ list_del(&p->list);
+ kfree(p);
+}
+
+static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm,
+ struct hsm_ioeventfd *ioeventfd)
+{
+ struct hsm_ioeventfd *p;
+
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ /* Either one is wildcard, the data matching will be skipped. */
+ list_for_each_entry(p, &vm->ioeventfds, list)
+ if (p->eventfd == ioeventfd->eventfd &&
+ p->addr == ioeventfd->addr &&
+ p->type == ioeventfd->type &&
+ (p->wildcard || ioeventfd->wildcard ||
+ p->data == ioeventfd->data))
+ return true;
+
+ return false;
+}
+
+/*
+ * Assign an eventfd to a VM and create a HSM ioeventfd associated with the
+ * eventfd. The properties of the HSM ioeventfd are built from a &struct
+ * acrn_ioeventfd.
+ */
+static int acrn_ioeventfd_assign(struct acrn_vm *vm,
+ struct acrn_ioeventfd *args)
+{
+ struct eventfd_ctx *eventfd;
+ struct hsm_ioeventfd *p;
+ int ret;
+
+ /* Check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /*
+ * Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width
+ * accesses can cover vhost's requirements.
+ */
+ if (!(args->len == 1 || args->len == 2 ||
+ args->len == 4 || args->len == 8))
+ return -EINVAL;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&p->list);
+ p->addr = args->addr;
+ p->length = args->len;
+ p->eventfd = eventfd;
+ p->type = ioreq_type_from_flags(args->flags);
+
+ /*
+ * ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the
+ * writing of notification register of each virtqueue may trigger the
+ * notification. There is no data matching requirement.
+ */
+ if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
+ p->data = args->data;
+ else
+ p->wildcard = true;
+
+ mutex_lock(&vm->ioeventfds_lock);
+
+ if (hsm_ioeventfd_is_conflict(vm, p)) {
+ ret = -EEXIST;
+ goto unlock_fail;
+ }
+
+ /* register the I/O range into ioreq client */
+ ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
+ p->addr, p->addr + p->length - 1);
+ if (ret < 0)
+ goto unlock_fail;
+
+ list_add_tail(&p->list, &vm->ioeventfds);
+ mutex_unlock(&vm->ioeventfds_lock);
+
+ return 0;
+
+unlock_fail:
+ mutex_unlock(&vm->ioeventfds_lock);
+ kfree(p);
+fail:
+ eventfd_ctx_put(eventfd);
+ return ret;
+}
+
+static int acrn_ioeventfd_deassign(struct acrn_vm *vm,
+ struct acrn_ioeventfd *args)
+{
+ struct hsm_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&vm->ioeventfds_lock);
+ list_for_each_entry(p, &vm->ioeventfds, list) {
+ if (p->eventfd != eventfd)
+ continue;
+
+ acrn_ioreq_range_del(vm->ioeventfd_client, p->type,
+ p->addr, p->addr + p->length - 1);
+ acrn_ioeventfd_shutdown(vm, p);
+ break;
+ }
+ mutex_unlock(&vm->ioeventfds_lock);
+
+ eventfd_ctx_put(eventfd);
+ return 0;
+}
+
+static struct hsm_ioeventfd *hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr,
+ u64 data, int len, int type)
+{
+ struct hsm_ioeventfd *p = NULL;
+
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ list_for_each_entry(p, &vm->ioeventfds, list) {
+ if (p->type == type && p->addr == addr && p->length >= len &&
+ (p->wildcard || p->data == data))
+ return p;
+ }
+
+ return NULL;
+}
+
+static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
+ struct acrn_io_request *req)
+{
+ struct hsm_ioeventfd *p;
+ u64 addr, val;
+ int size;
+
+ if (req->type == ACRN_IOREQ_TYPE_MMIO) {
+ /*
+ * I/O requests are dispatched by range check only, so a
+ * acrn_ioreq_client need process both READ and WRITE accesses
+ * of same range. READ accesses are safe to be ignored here
+ * because virtio PCI devices write the notify registers for
+ * notification.
+ */
+ if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
+ /* reading does nothing and return 0 */
+ req->reqs.mmio_request.value = 0;
+ return 0;
+ }
+ addr = req->reqs.mmio_request.address;
+ size = req->reqs.mmio_request.size;
+ val = req->reqs.mmio_request.value;
+ } else {
+ if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) {
+ /* reading does nothing and return 0 */
+ req->reqs.pio_request.value = 0;
+ return 0;
+ }
+ addr = req->reqs.pio_request.address;
+ size = req->reqs.pio_request.size;
+ val = req->reqs.pio_request.value;
+ }
+
+ mutex_lock(&client->vm->ioeventfds_lock);
+ p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
+ if (p)
+ eventfd_signal(p->eventfd, 1);
+ mutex_unlock(&client->vm->ioeventfds_lock);
+
+ return 0;
+}
+
+int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
+{
+ int ret;
+
+ if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
+ ret = acrn_ioeventfd_deassign(vm, args);
+ else
+ ret = acrn_ioeventfd_assign(vm, args);
+
+ return ret;
+}
+
+int acrn_ioeventfd_init(struct acrn_vm *vm)
+{
+ char name[ACRN_NAME_LEN];
+
+ mutex_init(&vm->ioeventfds_lock);
+ INIT_LIST_HEAD(&vm->ioeventfds);
+ snprintf(name, sizeof(name), "ioeventfd-%u", vm->vmid);
+ vm->ioeventfd_client = acrn_ioreq_client_create(vm,
+ acrn_ioeventfd_handler,
+ NULL, false, name);
+ if (!vm->ioeventfd_client) {
+ dev_err(acrn_dev.this_device, "Failed to create ioeventfd ioreq client!\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(acrn_dev.this_device, "VM %u ioeventfd init.\n", vm->vmid);
+ return 0;
+}
+
+void acrn_ioeventfd_deinit(struct acrn_vm *vm)
+{
+ struct hsm_ioeventfd *p, *next;
+
+ dev_dbg(acrn_dev.this_device, "VM %u ioeventfd deinit.\n", vm->vmid);
+ acrn_ioreq_client_destroy(vm->ioeventfd_client);
+ mutex_lock(&vm->ioeventfds_lock);
+ list_for_each_entry_safe(p, next, &vm->ioeventfds, list)
+ acrn_ioeventfd_shutdown(vm, p);
+ mutex_unlock(&vm->ioeventfds_lock);
+}
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
new file mode 100644
index 000000000000..80b2e3f0e276
--- /dev/null
+++ b/drivers/virt/acrn/ioreq.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN_HSM: Handle I/O requests
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Jason Chen CJ <jason.cj.chen@intel.com>
+ * Fengwei Yin <fengwei.yin@intel.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/acrn.h>
+
+#include "acrn_drv.h"
+
+static void ioreq_pause(void);
+static void ioreq_resume(void);
+
+static void ioreq_dispatcher(struct work_struct *work);
+static struct workqueue_struct *ioreq_wq;
+static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
+
+static inline bool has_pending_request(struct acrn_ioreq_client *client)
+{
+ return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
+}
+
+static inline bool is_destroying(struct acrn_ioreq_client *client)
+{
+ return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
+}
+
+static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
+ struct acrn_io_request *acrn_req)
+{
+ bool polling_mode;
+ int ret = 0;
+
+ polling_mode = acrn_req->completion_polling;
+ /* Add barrier() to make sure the writes are done before completion */
+ smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
+
+ /*
+ * To fulfill the requirement of real-time in several industry
+ * scenarios, like automotive, ACRN can run under the partition mode,
+ * in which User VMs and Service VM are bound to dedicated CPU cores.
+ * Polling mode of handling the I/O request is introduced to achieve a
+ * faster I/O request handling. In polling mode, the hypervisor polls
+ * I/O request's completion. Once an I/O request is marked as
+ * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
+ * to continue the I/O request flow. Thus, the completion notification
+ * from HSM of I/O request is not needed. Please note,
+ * completion_polling needs to be read before the I/O request being
+ * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
+ * hypervisor.
+ */
+ if (!polling_mode) {
+ ret = hcall_notify_req_finish(vm->vmid, vcpu);
+ if (ret < 0)
+ dev_err(acrn_dev.this_device,
+ "Notify I/O request finished failed!\n");
+ }
+
+ return ret;
+}
+
+static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
+ u16 vcpu,
+ struct acrn_io_request *acrn_req)
+{
+ int ret;
+
+ if (vcpu >= client->vm->vcpu_num)
+ return -EINVAL;
+
+ clear_bit(vcpu, client->ioreqs_map);
+ if (!acrn_req) {
+ acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
+ acrn_req += vcpu;
+ }
+
+ ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
+
+ return ret;
+}
+
+int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
+{
+ int ret = 0;
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (vm->default_client)
+ ret = acrn_ioreq_complete_request(vm->default_client,
+ vcpu, NULL);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ return ret;
+}
+
+/**
+ * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ *
+ * Return: 0 on success, <0 on error
+ */
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ if (end < start) {
+ dev_err(acrn_dev.this_device,
+ "Invalid IO range [0x%llx,0x%llx]\n", start, end);
+ return -EINVAL;
+ }
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->type = type;
+ range->start = start;
+ range->end = end;
+
+ write_lock_bh(&client->range_lock);
+ list_add(&range->list, &client->range_list);
+ write_unlock_bh(&client->range_lock);
+
+ return 0;
+}
+
+/**
+ * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ */
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ write_lock_bh(&client->range_lock);
+ list_for_each_entry(range, &client->range_list, list) {
+ if (type == range->type &&
+ start == range->start &&
+ end == range->end) {
+ list_del(&range->list);
+ kfree(range);
+ break;
+ }
+ }
+ write_unlock_bh(&client->range_lock);
+}
+
+/*
+ * ioreq_task() is the execution entity of handler thread of an I/O client.
+ * The handler callback of the I/O client is called within the handler thread.
+ */
+static int ioreq_task(void *data)
+{
+ struct acrn_ioreq_client *client = data;
+ struct acrn_io_request *req;
+ unsigned long *ioreqs_map;
+ int vcpu, ret;
+
+ /*
+ * Lockless access to ioreqs_map is safe, because
+ * 1) set_bit() and clear_bit() are atomic operations.
+ * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
+ * set_bit() - in ioreq_work handler
+ * Handler callback handles corresponding I/O request
+ * clear_bit() - in handler thread (include ACRN userspace)
+ * Mark corresponding I/O request completed
+ * Loop again if a new I/O request occurs
+ */
+ ioreqs_map = client->ioreqs_map;
+ while (!kthread_should_stop()) {
+ acrn_ioreq_client_wait(client);
+ while (has_pending_request(client)) {
+ vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
+ req = client->vm->ioreq_buf->req_slot + vcpu;
+ ret = client->handler(client, req);
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device,
+ "IO handle failure: %d\n", ret);
+ break;
+ }
+ acrn_ioreq_complete_request(client, vcpu, req);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For the non-default I/O clients, give them chance to complete the current
+ * I/O requests if there are any. For the default I/O client, it is safe to
+ * clear all pending I/O requests because the clearing request is from ACRN
+ * userspace.
+ */
+void acrn_ioreq_request_clear(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client;
+ bool has_pending = false;
+ unsigned long vcpu;
+ int retry = 10;
+
+ /*
+ * IO requests of this VM will be completed directly in
+ * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
+ */
+ set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
+
+ /*
+ * acrn_ioreq_request_clear is only called in VM reset case. Simply
+ * wait 100ms in total for the IO requests' completion.
+ */
+ do {
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ list_for_each_entry(client, &vm->ioreq_clients, list) {
+ has_pending = has_pending_request(client);
+ if (has_pending)
+ break;
+ }
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ if (has_pending)
+ schedule_timeout_interruptible(HZ / 100);
+ } while (has_pending && --retry > 0);
+ if (retry == 0)
+ dev_warn(acrn_dev.this_device,
+ "%s cannot flush pending request!\n", client->name);
+
+ /* Clear all ioreqs belonging to the default client */
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ client = vm->default_client;
+ if (client) {
+ vcpu = find_next_bit(client->ioreqs_map,
+ ACRN_IO_REQUEST_MAX, 0);
+ while (vcpu < ACRN_IO_REQUEST_MAX) {
+ acrn_ioreq_complete_request(client, vcpu, NULL);
+ vcpu = find_next_bit(client->ioreqs_map,
+ ACRN_IO_REQUEST_MAX, vcpu + 1);
+ }
+ }
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ /* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
+ clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
+}
+
+int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
+{
+ if (client->is_default) {
+ /*
+ * In the default client, a user space thread waits on the
+ * waitqueue. The is_destroying() check is used to notify user
+ * space the client is going to be destroyed.
+ */
+ wait_event_interruptible(client->wq,
+ has_pending_request(client) ||
+ is_destroying(client));
+ if (is_destroying(client))
+ return -ENODEV;
+ } else {
+ wait_event_interruptible(client->wq,
+ has_pending_request(client) ||
+ kthread_should_stop());
+ }
+
+ return 0;
+}
+
+static bool is_cfg_addr(struct acrn_io_request *req)
+{
+ return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
+ (req->reqs.pio_request.address == 0xcf8));
+}
+
+static bool is_cfg_data(struct acrn_io_request *req)
+{
+ return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
+ ((req->reqs.pio_request.address >= 0xcfc) &&
+ (req->reqs.pio_request.address < (0xcfc + 4))));
+}
+
+/* The low 8-bit of supported pci_reg addr.*/
+#define PCI_LOWREG_MASK 0xFC
+/* The high 4-bit of supported pci_reg addr */
+#define PCI_HIGHREG_MASK 0xF00
+/* Max number of supported functions */
+#define PCI_FUNCMAX 7
+/* Max number of supported slots */
+#define PCI_SLOTMAX 31
+/* Max number of supported buses */
+#define PCI_BUSMAX 255
+#define CONF1_ENABLE 0x80000000UL
+/*
+ * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
+ * following steps:
+ * 1) writes address into 0xCF8 port
+ * 2) accesses data in/from 0xCFC
+ * This function combines such paired PCI configuration space I/O requests into
+ * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
+ */
+static bool handle_cf8cfc(struct acrn_vm *vm,
+ struct acrn_io_request *req, u16 vcpu)
+{
+ int offset, pci_cfg_addr, pci_reg;
+ bool is_handled = false;
+
+ if (is_cfg_addr(req)) {
+ WARN_ON(req->reqs.pio_request.size != 4);
+ if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
+ vm->pci_conf_addr = req->reqs.pio_request.value;
+ else
+ req->reqs.pio_request.value = vm->pci_conf_addr;
+ is_handled = true;
+ } else if (is_cfg_data(req)) {
+ if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
+ if (req->reqs.pio_request.direction ==
+ ACRN_IOREQ_DIR_READ)
+ req->reqs.pio_request.value = 0xffffffff;
+ is_handled = true;
+ } else {
+ offset = req->reqs.pio_request.address - 0xcfc;
+
+ req->type = ACRN_IOREQ_TYPE_PCICFG;
+ pci_cfg_addr = vm->pci_conf_addr;
+ req->reqs.pci_request.bus =
+ (pci_cfg_addr >> 16) & PCI_BUSMAX;
+ req->reqs.pci_request.dev =
+ (pci_cfg_addr >> 11) & PCI_SLOTMAX;
+ req->reqs.pci_request.func =
+ (pci_cfg_addr >> 8) & PCI_FUNCMAX;
+ pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
+ ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
+ req->reqs.pci_request.reg = pci_reg + offset;
+ }
+ }
+
+ if (is_handled)
+ ioreq_complete_request(vm, vcpu, req);
+
+ return is_handled;
+}
+
+static bool in_range(struct acrn_ioreq_range *range,
+ struct acrn_io_request *req)
+{
+ bool ret = false;
+
+ if (range->type == req->type) {
+ switch (req->type) {
+ case ACRN_IOREQ_TYPE_MMIO:
+ if (req->reqs.mmio_request.address >= range->start &&
+ (req->reqs.mmio_request.address +
+ req->reqs.mmio_request.size - 1) <= range->end)
+ ret = true;
+ break;
+ case ACRN_IOREQ_TYPE_PORTIO:
+ if (req->reqs.pio_request.address >= range->start &&
+ (req->reqs.pio_request.address +
+ req->reqs.pio_request.size - 1) <= range->end)
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
+ struct acrn_io_request *req)
+{
+ struct acrn_ioreq_client *client, *found = NULL;
+ struct acrn_ioreq_range *range;
+
+ lockdep_assert_held(&vm->ioreq_clients_lock);
+
+ list_for_each_entry(client, &vm->ioreq_clients, list) {
+ read_lock_bh(&client->range_lock);
+ list_for_each_entry(range, &client->range_list, list) {
+ if (in_range(range, req)) {
+ found = client;
+ break;
+ }
+ }
+ read_unlock_bh(&client->range_lock);
+ if (found)
+ break;
+ }
+ return found ? found : vm->default_client;
+}
+
+/**
+ * acrn_ioreq_client_create() - Create an ioreq client
+ * @vm: The VM that this client belongs to
+ * @handler: The ioreq_handler of ioreq client acrn_hsm will create a kernel
+ * thread and call the handler to handle I/O requests.
+ * @priv: Private data for the handler
+ * @is_default: If it is the default client
+ * @name: The name of ioreq client
+ *
+ * Return: acrn_ioreq_client pointer on success, NULL on error
+ */
+struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
+ ioreq_handler_t handler,
+ void *priv, bool is_default,
+ const char *name)
+{
+ struct acrn_ioreq_client *client;
+
+ if (!handler && !is_default) {
+ dev_dbg(acrn_dev.this_device,
+ "Cannot create non-default client w/o handler!\n");
+ return NULL;
+ }
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->handler = handler;
+ client->vm = vm;
+ client->priv = priv;
+ client->is_default = is_default;
+ if (name)
+ strncpy(client->name, name, sizeof(client->name) - 1);
+ rwlock_init(&client->range_lock);
+ INIT_LIST_HEAD(&client->range_list);
+ init_waitqueue_head(&client->wq);
+
+ if (client->handler) {
+ client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
+ client->vm->vmid, client->name);
+ if (IS_ERR(client->thread)) {
+ kfree(client);
+ return NULL;
+ }
+ }
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (is_default)
+ vm->default_client = client;
+ else
+ list_add(&client->list, &vm->ioreq_clients);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
+ return client;
+}
+
+/**
+ * acrn_ioreq_client_destroy() - Destroy an ioreq client
+ * @client: The ioreq client
+ */
+void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
+{
+ struct acrn_ioreq_range *range, *next;
+ struct acrn_vm *vm = client->vm;
+
+ dev_dbg(acrn_dev.this_device,
+ "Destroy ioreq client %s.\n", client->name);
+ ioreq_pause();
+ set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
+ if (client->is_default)
+ wake_up_interruptible(&client->wq);
+ else
+ kthread_stop(client->thread);
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (client->is_default)
+ vm->default_client = NULL;
+ else
+ list_del(&client->list);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ write_lock_bh(&client->range_lock);
+ list_for_each_entry_safe(range, next, &client->range_list, list) {
+ list_del(&range->list);
+ kfree(range);
+ }
+ write_unlock_bh(&client->range_lock);
+ kfree(client);
+
+ ioreq_resume();
+}
+
+static int acrn_ioreq_dispatch(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client;
+ struct acrn_io_request *req;
+ int i;
+
+ for (i = 0; i < vm->vcpu_num; i++) {
+ req = vm->ioreq_buf->req_slot + i;
+
+ /* barrier the read of processed of acrn_io_request */
+ if (smp_load_acquire(&req->processed) ==
+ ACRN_IOREQ_STATE_PENDING) {
+ /* Complete the IO request directly in clearing stage */
+ if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
+ ioreq_complete_request(vm, i, req);
+ continue;
+ }
+ if (handle_cf8cfc(vm, req, i))
+ continue;
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ client = find_ioreq_client(vm, req);
+ if (!client) {
+ dev_err(acrn_dev.this_device,
+ "Failed to find ioreq client!\n");
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+ return -EINVAL;
+ }
+ if (!client->is_default)
+ req->kernel_handled = 1;
+ else
+ req->kernel_handled = 0;
+ /*
+ * Add barrier() to make sure the writes are done
+ * before setting ACRN_IOREQ_STATE_PROCESSING
+ */
+ smp_store_release(&req->processed,
+ ACRN_IOREQ_STATE_PROCESSING);
+ set_bit(i, client->ioreqs_map);
+ wake_up_interruptible(&client->wq);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void ioreq_dispatcher(struct work_struct *work)
+{
+ struct acrn_vm *vm;
+
+ read_lock(&acrn_vm_list_lock);
+ list_for_each_entry(vm, &acrn_vm_list, list) {
+ if (!vm->ioreq_buf)
+ break;
+ acrn_ioreq_dispatch(vm);
+ }
+ read_unlock(&acrn_vm_list_lock);
+}
+
+static void ioreq_intr_handler(void)
+{
+ queue_work(ioreq_wq, &ioreq_work);
+}
+
+static void ioreq_pause(void)
+{
+ /* Flush and unarm the handler to ensure no I/O requests pending */
+ acrn_remove_intr_handler();
+ drain_workqueue(ioreq_wq);
+}
+
+static void ioreq_resume(void)
+{
+ /* Schedule after enabling in case other clients miss interrupt */
+ acrn_setup_intr_handler(ioreq_intr_handler);
+ queue_work(ioreq_wq, &ioreq_work);
+}
+
+int acrn_ioreq_intr_setup(void)
+{
+ acrn_setup_intr_handler(ioreq_intr_handler);
+ ioreq_wq = alloc_workqueue("ioreq_wq",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ioreq_wq) {
+ dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
+ acrn_remove_intr_handler();
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void acrn_ioreq_intr_remove(void)
+{
+ if (ioreq_wq)
+ destroy_workqueue(ioreq_wq);
+ acrn_remove_intr_handler();
+}
+
+int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
+{
+ struct acrn_ioreq_buffer *set_buffer;
+ struct page *page;
+ int ret;
+
+ if (vm->ioreq_buf)
+ return -EEXIST;
+
+ set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
+ if (!set_buffer)
+ return -ENOMEM;
+
+ ret = pin_user_pages_fast(buf_vma, 1,
+ FOLL_WRITE | FOLL_LONGTERM, &page);
+ if (unlikely(ret != 1) || !page) {
+ dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ vm->ioreq_buf = page_address(page);
+ vm->ioreq_page = page;
+ set_buffer->ioreq_buf = page_to_phys(page);
+ ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
+ unpin_user_page(page);
+ vm->ioreq_buf = NULL;
+ goto free_buf;
+ }
+
+ dev_dbg(acrn_dev.this_device,
+ "Init ioreq buffer %pK!\n", vm->ioreq_buf);
+ ret = 0;
+free_buf:
+ kfree(set_buffer);
+ return ret;
+}
+
+void acrn_ioreq_deinit(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client, *next;
+
+ dev_dbg(acrn_dev.this_device,
+ "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
+ /* Destroy all clients belonging to this VM */
+ list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
+ acrn_ioreq_client_destroy(client);
+ if (vm->default_client)
+ acrn_ioreq_client_destroy(vm->default_client);
+
+ if (vm->ioreq_buf && vm->ioreq_page) {
+ unpin_user_page(vm->ioreq_page);
+ vm->ioreq_buf = NULL;
+ }
+}
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
new file mode 100644
index 000000000000..a8766d528e29
--- /dev/null
+++ b/drivers/virt/acrn/irqfd.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Shuo Liu <shuo.a.liu@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+static LIST_HEAD(acrn_irqfd_clients);
+static DEFINE_MUTEX(acrn_irqfds_mutex);
+
+/**
+ * struct hsm_irqfd - Properties of HSM irqfd
+ * @vm: Associated VM pointer
+ * @wait: Entry of wait-queue
+ * @shutdown: Async shutdown work
+ * @eventfd: Associated eventfd
+ * @list: Entry within &acrn_vm.irqfds of irqfds of a VM
+ * @pt: Structure for select/poll on the associated eventfd
+ * @msi: MSI data
+ */
+struct hsm_irqfd {
+ struct acrn_vm *vm;
+ wait_queue_entry_t wait;
+ struct work_struct shutdown;
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ poll_table pt;
+ struct acrn_msi_entry msi;
+};
+
+static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
+{
+ struct acrn_vm *vm = irqfd->vm;
+
+ acrn_msi_inject(vm, irqfd->msi.msi_addr,
+ irqfd->msi.msi_data);
+}
+
+static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
+{
+ u64 cnt;
+
+ lockdep_assert_held(&irqfd->vm->irqfds_lock);
+
+ /* remove from wait queue */
+ list_del_init(&irqfd->list);
+ eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
+ eventfd_ctx_put(irqfd->eventfd);
+ kfree(irqfd);
+}
+
+static void hsm_irqfd_shutdown_work(struct work_struct *work)
+{
+ struct hsm_irqfd *irqfd;
+ struct acrn_vm *vm;
+
+ irqfd = container_of(work, struct hsm_irqfd, shutdown);
+ vm = irqfd->vm;
+ mutex_lock(&vm->irqfds_lock);
+ if (!list_empty(&irqfd->list))
+ hsm_irqfd_shutdown(irqfd);
+ mutex_unlock(&vm->irqfds_lock);
+}
+
+/* Called with wqh->lock held and interrupts disabled */
+static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ unsigned long poll_bits = (unsigned long)key;
+ struct hsm_irqfd *irqfd;
+ struct acrn_vm *vm;
+
+ irqfd = container_of(wait, struct hsm_irqfd, wait);
+ vm = irqfd->vm;
+ if (poll_bits & POLLIN)
+ /* An event has been signaled, inject an interrupt */
+ acrn_irqfd_inject(irqfd);
+
+ if (poll_bits & POLLHUP)
+ /* Do shutdown work in thread to hold wqh->lock */
+ queue_work(vm->irqfd_wq, &irqfd->shutdown);
+
+ return 0;
+}
+
+static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct hsm_irqfd *irqfd;
+
+ irqfd = container_of(pt, struct hsm_irqfd, pt);
+ add_wait_queue(wqh, &irqfd->wait);
+}
+
+/*
+ * Assign an eventfd to a VM and create a HSM irqfd associated with the
+ * eventfd. The properties of the HSM irqfd are built from a &struct
+ * acrn_irqfd.
+ */
+static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
+{
+ struct eventfd_ctx *eventfd = NULL;
+ struct hsm_irqfd *irqfd, *tmp;
+ unsigned int events;
+ struct fd f;
+ int ret = 0;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->vm = vm;
+ memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
+ INIT_LIST_HEAD(&irqfd->list);
+ INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
+
+ f = fdget(args->fd);
+ if (!f.file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ eventfd = eventfd_ctx_fileget(f.file);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->eventfd = eventfd;
+
+ /*
+ * Install custom wake-up handling to be notified whenever underlying
+ * eventfd is signaled.
+ */
+ init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
+ init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
+
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry(tmp, &vm->irqfds, list) {
+ if (irqfd->eventfd != tmp->eventfd)
+ continue;
+ ret = -EBUSY;
+ mutex_unlock(&vm->irqfds_lock);
+ goto fail;
+ }
+ list_add_tail(&irqfd->list, &vm->irqfds);
+ mutex_unlock(&vm->irqfds_lock);
+
+ /* Check the pending event in this stage */
+ events = f.file->f_op->poll(f.file, &irqfd->pt);
+
+ if (events & POLLIN)
+ acrn_irqfd_inject(irqfd);
+
+ fdput(f);
+ return 0;
+fail:
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ fdput(f);
+out:
+ kfree(irqfd);
+ return ret;
+}
+
+static int acrn_irqfd_deassign(struct acrn_vm *vm,
+ struct acrn_irqfd *args)
+{
+ struct hsm_irqfd *irqfd, *tmp;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
+ if (irqfd->eventfd == eventfd) {
+ hsm_irqfd_shutdown(irqfd);
+ break;
+ }
+ }
+ mutex_unlock(&vm->irqfds_lock);
+ eventfd_ctx_put(eventfd);
+
+ return 0;
+}
+
+int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
+{
+ int ret;
+
+ if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
+ ret = acrn_irqfd_deassign(vm, args);
+ else
+ ret = acrn_irqfd_assign(vm, args);
+
+ return ret;
+}
+
+int acrn_irqfd_init(struct acrn_vm *vm)
+{
+ INIT_LIST_HEAD(&vm->irqfds);
+ mutex_init(&vm->irqfds_lock);
+ vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
+ if (!vm->irqfd_wq)
+ return -ENOMEM;
+
+ dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
+ return 0;
+}
+
+void acrn_irqfd_deinit(struct acrn_vm *vm)
+{
+ struct hsm_irqfd *irqfd, *next;
+
+ dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
+ destroy_workqueue(vm->irqfd_wq);
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
+ hsm_irqfd_shutdown(irqfd);
+ mutex_unlock(&vm->irqfds_lock);
+}
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
new file mode 100644
index 000000000000..c4f2e15c8a2b
--- /dev/null
+++ b/drivers/virt/acrn/mm.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN: Memory mapping management
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Fei Li <lei1.li@intel.com>
+ * Shuo Liu <shuo.a.liu@intel.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
+{
+ struct vm_memory_region_batch *regions;
+ int ret;
+
+ regions = kzalloc(sizeof(*regions), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ regions->vmid = vm->vmid;
+ regions->regions_num = 1;
+ regions->regions_gpa = virt_to_phys(region);
+
+ ret = hcall_set_memory_regions(virt_to_phys(regions));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set memory region for VM[%u]!\n", vm->vmid);
+
+ kfree(regions);
+ return ret;
+}
+
+/**
+ * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
+ * @vm: User VM.
+ * @user_gpa: A GPA of User VM.
+ * @service_gpa: A GPA of Service VM.
+ * @size: Size of the region.
+ * @mem_type: Combination of ACRN_MEM_TYPE_*.
+ * @mem_access_right: Combination of ACRN_MEM_ACCESS_*.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
+ u64 size, u32 mem_type, u32 mem_access_right)
+{
+ struct vm_memory_region_op *region;
+ int ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->type = ACRN_MEM_REGION_ADD;
+ region->user_vm_pa = user_gpa;
+ region->service_vm_pa = service_gpa;
+ region->size = size;
+ region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
+ (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
+ ret = modify_region(vm, region);
+
+ dev_dbg(acrn_dev.this_device,
+ "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
+ __func__, (void *)user_gpa, (void *)service_gpa, size);
+ kfree(region);
+ return ret;
+}
+
+/**
+ * acrn_mm_region_del() - Del the EPT mapping of a memory region.
+ * @vm: User VM.
+ * @user_gpa: A GPA of the User VM.
+ * @size: Size of the region.
+ *
+ * Return: 0 on success, <0 for error.
+ */
+int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
+{
+ struct vm_memory_region_op *region;
+ int ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->type = ACRN_MEM_REGION_DEL;
+ region->user_vm_pa = user_gpa;
+ region->service_vm_pa = 0UL;
+ region->size = size;
+ region->attr = 0U;
+
+ ret = modify_region(vm, region);
+
+ dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
+ __func__, (void *)user_gpa, size);
+ kfree(region);
+ return ret;
+}
+
+int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ int ret;
+
+ if (memmap->type == ACRN_MEMMAP_RAM)
+ return acrn_vm_ram_map(vm, memmap);
+
+ if (memmap->type != ACRN_MEMMAP_MMIO) {
+ dev_dbg(acrn_dev.this_device,
+ "Invalid memmap type: %u\n", memmap->type);
+ return -EINVAL;
+ }
+
+ ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
+ memmap->service_vm_pa, memmap->len,
+ ACRN_MEM_TYPE_UC, memmap->attr);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Add memory region failed, VM[%u]!\n", vm->vmid);
+
+ return ret;
+}
+
+int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ int ret;
+
+ if (memmap->type != ACRN_MEMMAP_MMIO) {
+ dev_dbg(acrn_dev.this_device,
+ "Invalid memmap type: %u\n", memmap->type);
+ return -EINVAL;
+ }
+
+ ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Del memory region failed, VM[%u]!\n", vm->vmid);
+
+ return ret;
+}
+
+/**
+ * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
+ * @vm: The User VM pointer
+ * @memmap: Info of the EPT mapping
+ *
+ * Return: 0 on success, <0 for error.
+ */
+int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ struct vm_memory_region_batch *regions_info;
+ int nr_pages, i = 0, order, nr_regions = 0;
+ struct vm_memory_mapping *region_mapping;
+ struct vm_memory_region_op *vm_region;
+ struct page **pages = NULL, *page;
+ void *remap_vaddr;
+ int ret, pinned;
+ u64 user_vm_pa;
+
+ if (!vm || !memmap)
+ return -EINVAL;
+
+ /* Get the page number of the map region */
+ nr_pages = memmap->len >> PAGE_SHIFT;
+ pages = vzalloc(nr_pages * sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+
+ /* Lock the pages of user memory map region */
+ pinned = pin_user_pages_fast(memmap->vma_base,
+ nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto free_pages;
+ } else if (pinned != nr_pages) {
+ ret = -EFAULT;
+ goto put_pages;
+ }
+
+ /* Create a kernel map for the map region */
+ remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!remap_vaddr) {
+ ret = -ENOMEM;
+ goto put_pages;
+ }
+
+ /* Record Service VM va <-> User VM pa mapping */
+ mutex_lock(&vm->regions_mapping_lock);
+ region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
+ if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
+ region_mapping->pages = pages;
+ region_mapping->npages = nr_pages;
+ region_mapping->size = memmap->len;
+ region_mapping->service_vm_va = remap_vaddr;
+ region_mapping->user_vm_pa = memmap->user_vm_pa;
+ vm->regions_mapping_count++;
+ } else {
+ dev_warn(acrn_dev.this_device,
+ "Run out of memory mapping slots!\n");
+ ret = -ENOMEM;
+ mutex_unlock(&vm->regions_mapping_lock);
+ goto unmap_no_count;
+ }
+ mutex_unlock(&vm->regions_mapping_lock);
+
+ /* Calculate count of vm_memory_region_op */
+ while (i < nr_pages) {
+ page = pages[i];
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ order = compound_order(page);
+ nr_regions++;
+ i += 1 << order;
+ }
+
+ /* Prepare the vm_memory_region_batch */
+ regions_info = kzalloc(sizeof(*regions_info) +
+ sizeof(*vm_region) * nr_regions,
+ GFP_KERNEL);
+ if (!regions_info) {
+ ret = -ENOMEM;
+ goto unmap_kernel_map;
+ }
+
+ /* Fill each vm_memory_region_op */
+ vm_region = (struct vm_memory_region_op *)(regions_info + 1);
+ regions_info->vmid = vm->vmid;
+ regions_info->regions_num = nr_regions;
+ regions_info->regions_gpa = virt_to_phys(vm_region);
+ user_vm_pa = memmap->user_vm_pa;
+ i = 0;
+ while (i < nr_pages) {
+ u32 region_size;
+
+ page = pages[i];
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ order = compound_order(page);
+ region_size = PAGE_SIZE << order;
+ vm_region->type = ACRN_MEM_REGION_ADD;
+ vm_region->user_vm_pa = user_vm_pa;
+ vm_region->service_vm_pa = page_to_phys(page);
+ vm_region->size = region_size;
+ vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
+ (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
+
+ vm_region++;
+ user_vm_pa += region_size;
+ i += 1 << order;
+ }
+
+ /* Inform the ACRN Hypervisor to set up EPT mappings */
+ ret = hcall_set_memory_regions(virt_to_phys(regions_info));
+ if (ret < 0) {
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set regions, VM[%u]!\n", vm->vmid);
+ goto unset_region;
+ }
+ kfree(regions_info);
+
+ dev_dbg(acrn_dev.this_device,
+ "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
+ __func__, vm->vmid,
+ remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
+ return ret;
+
+unset_region:
+ kfree(regions_info);
+unmap_kernel_map:
+ mutex_lock(&vm->regions_mapping_lock);
+ vm->regions_mapping_count--;
+ mutex_unlock(&vm->regions_mapping_lock);
+unmap_no_count:
+ vunmap(remap_vaddr);
+put_pages:
+ for (i = 0; i < pinned; i++)
+ unpin_user_page(pages[i]);
+free_pages:
+ vfree(pages);
+ return ret;
+}
+
+/**
+ * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
+ * @vm: The User VM
+ */
+void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
+{
+ struct vm_memory_mapping *region_mapping;
+ int i, j;
+
+ mutex_lock(&vm->regions_mapping_lock);
+ for (i = 0; i < vm->regions_mapping_count; i++) {
+ region_mapping = &vm->regions_mapping[i];
+ vunmap(region_mapping->service_vm_va);
+ for (j = 0; j < region_mapping->npages; j++)
+ unpin_user_page(region_mapping->pages[j]);
+ vfree(region_mapping->pages);
+ }
+ mutex_unlock(&vm->regions_mapping_lock);
+}
diff --git a/drivers/virt/acrn/vm.c b/drivers/virt/acrn/vm.c
new file mode 100644
index 000000000000..7804a2492ad7
--- /dev/null
+++ b/drivers/virt/acrn/vm.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN_HSM: Virtual Machine management
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Jason Chen CJ <jason.cj.chen@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+/* List of VMs */
+LIST_HEAD(acrn_vm_list);
+/*
+ * acrn_vm_list is read in a worker thread which dispatch I/O requests and
+ * is wrote in VM creation ioctl. Use the rwlock mechanism to protect it.
+ */
+DEFINE_RWLOCK(acrn_vm_list_lock);
+
+struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
+ struct acrn_vm_creation *vm_param)
+{
+ int ret;
+
+ ret = hcall_create_vm(virt_to_phys(vm_param));
+ if (ret < 0 || vm_param->vmid == ACRN_INVALID_VMID) {
+ dev_err(acrn_dev.this_device,
+ "Failed to create VM! Error: %d\n", ret);
+ return NULL;
+ }
+
+ mutex_init(&vm->regions_mapping_lock);
+ INIT_LIST_HEAD(&vm->ioreq_clients);
+ spin_lock_init(&vm->ioreq_clients_lock);
+ vm->vmid = vm_param->vmid;
+ vm->vcpu_num = vm_param->vcpu_num;
+
+ if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) {
+ hcall_destroy_vm(vm_param->vmid);
+ vm->vmid = ACRN_INVALID_VMID;
+ return NULL;
+ }
+
+ write_lock_bh(&acrn_vm_list_lock);
+ list_add(&vm->list, &acrn_vm_list);
+ write_unlock_bh(&acrn_vm_list_lock);
+
+ acrn_ioeventfd_init(vm);
+ acrn_irqfd_init(vm);
+ dev_dbg(acrn_dev.this_device, "VM %u created.\n", vm->vmid);
+ return vm;
+}
+
+int acrn_vm_destroy(struct acrn_vm *vm)
+{
+ int ret;
+
+ if (vm->vmid == ACRN_INVALID_VMID ||
+ test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
+ return 0;
+
+ /* Remove from global VM list */
+ write_lock_bh(&acrn_vm_list_lock);
+ list_del_init(&vm->list);
+ write_unlock_bh(&acrn_vm_list_lock);
+
+ acrn_ioeventfd_deinit(vm);
+ acrn_irqfd_deinit(vm);
+ acrn_ioreq_deinit(vm);
+
+ if (vm->monitor_page) {
+ put_page(vm->monitor_page);
+ vm->monitor_page = NULL;
+ }
+
+ ret = hcall_destroy_vm(vm->vmid);
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device,
+ "Failed to destroy VM %u\n", vm->vmid);
+ clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+ return ret;
+ }
+
+ acrn_vm_all_ram_unmap(vm);
+
+ dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
+ vm->vmid = ACRN_INVALID_VMID;
+ return 0;
+}
+
+/**
+ * acrn_inject_msi() - Inject a MSI interrupt into a User VM
+ * @vm: User VM
+ * @msi_addr: The MSI address
+ * @msi_data: The MSI data
+ *
+ * Return: 0 on success, <0 on error
+ */
+int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data)
+{
+ struct acrn_msi_entry *msi;
+ int ret;
+
+ /* might be used in interrupt context, so use GFP_ATOMIC */
+ msi = kzalloc(sizeof(*msi), GFP_ATOMIC);
+ if (!msi)
+ return -ENOMEM;
+
+ /*
+ * msi_addr: addr[19:12] with dest vcpu id
+ * msi_data: data[7:0] with vector
+ */
+ msi->msi_addr = msi_addr;
+ msi->msi_data = msi_data;
+ ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
+ if (ret < 0)
+ dev_err(acrn_dev.this_device,
+ "Failed to inject MSI to VM %u!\n", vm->vmid);
+ kfree(msi);
+ return ret;
+}
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index ea05af41ec69..8d195e3f8301 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -468,7 +468,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
* Cancellation fun.
*/
static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
- u32 timeout_ms, bool *leak_it)
+ u32 timeout_ms, bool interruptible, bool *leak_it)
{
int rc, cancel_rc, ret;
long timeout;
@@ -495,10 +495,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
else
timeout = msecs_to_jiffies(timeout_ms);
- timeout = wait_event_interruptible_timeout(
- gdev->hgcm_wq,
- hgcm_req_done(gdev, &call->header),
- timeout);
+ if (interruptible) {
+ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+ } else {
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+ }
/* timeout > 0 means hgcm_req_done has returned true, so success */
if (timeout > 0)
@@ -631,7 +636,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
hgcm_call_init_call(call, client_id, function, parms, parm_count,
bounce_bufs);
- ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
+ ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
+ requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
if (ret == 0) {
*vbox_status = call->header.result;
ret = hgcm_call_copy_back_result(call, parms, parm_count,
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 7b41130d3f35..ce1b3f6ec325 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
This option is selected if the architecture may need to enforce
VIRTIO_F_ACCESS_PLATFORM
+config VIRTIO_PCI_LIB
+ tristate
+ help
+ Modern PCI device implementation. This module implements the
+ basic probe and control for devices which are based on modern
+ PCI device with possible vendor specific extensions. Any
+ module that selects this module must depend on PCI.
+
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
@@ -21,6 +29,7 @@ if VIRTIO_MENU
config VIRTIO_PCI
tristate "PCI driver for virtio devices"
depends on PCI
+ select VIRTIO_PCI_LIB
select VIRTIO
help
This driver provides support for virtio based paravirtual device
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 591e6f72aa54..699bbea0465f 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
+obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index f1f6208edcf5..ce51ae165943 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -7,6 +7,7 @@
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_input.h>
+#include <linux/input/mt.h>
struct virtio_input {
struct virtio_device *vdev;
@@ -64,6 +65,21 @@ static int virtinput_send_status(struct virtio_input *vi,
unsigned long flags;
int rc;
+ /*
+ * Since 29cc309d8bf1 (HID: hid-multitouch: forward MSC_TIMESTAMP),
+ * EV_MSC/MSC_TIMESTAMP is added to each before EV_SYN event.
+ * EV_MSC is configured as INPUT_PASS_TO_ALL.
+ * In case of touch device:
+ * BE pass EV_MSC/MSC_TIMESTAMP to FE on receiving event from evdev.
+ * FE pass EV_MSC/MSC_TIMESTAMP back to BE.
+ * BE writes EV_MSC/MSC_TIMESTAMP to evdev due to INPUT_PASS_TO_ALL.
+ * BE receives extra EV_MSC/MSC_TIMESTAMP and pass to FE.
+ * >>> Each new frame becomes larger and larger.
+ * Disable EV_MSC/MSC_TIMESTAMP forwarding for MT.
+ */
+ if (vi->idev->mt && type == EV_MSC && code == MSC_TIMESTAMP)
+ return 0;
+
stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC);
if (!stsbuf)
return -ENOMEM;
@@ -204,7 +220,7 @@ static int virtinput_probe(struct virtio_device *vdev)
struct virtio_input *vi;
unsigned long flags;
size_t size;
- int abs, err;
+ int abs, err, nslots;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -289,6 +305,13 @@ static int virtinput_probe(struct virtio_device *vdev)
continue;
virtinput_cfg_abs(vi, abs);
}
+
+ if (test_bit(ABS_MT_SLOT, vi->idev->absbit)) {
+ nslots = input_abs_get_max(vi->idev, ABS_MT_SLOT) + 1;
+ err = input_mt_init_slots(vi->idev, nslots, 0);
+ if (err)
+ goto err_mt_init_slots;
+ }
}
virtio_device_ready(vdev);
@@ -304,6 +327,7 @@ err_input_register:
spin_lock_irqsave(&vi->lock, flags);
vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags);
+err_mt_init_slots:
input_free_device(vi->idev);
err_input_alloc:
vdev->config->del_vqs(vdev);
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 9fc9ec4a25f5..10ec60d81e84 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -623,7 +623,7 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
/* Memory might get onlined immediately. */
atomic64_add(size, &vm->offline_size);
rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
- MEMHP_MERGE_RESOURCE);
+ MHP_MERGE_RESOURCE);
if (rc) {
atomic64_sub(size, &vm->offline_size);
dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
@@ -2222,7 +2222,7 @@ static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
*/
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
- const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ const struct range pluggable_range = mhp_get_pluggable_range(true);
uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
@@ -2234,15 +2234,25 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
usable_region_size, &usable_region_size);
- end_addr = vm->addr + usable_region_size;
- end_addr = min(end_addr, phys_limit);
+ end_addr = min(vm->addr + usable_region_size - 1,
+ pluggable_range.end);
- if (vm->in_sbm)
- vm->sbm.last_usable_mb_id =
- virtio_mem_phys_to_mb_id(end_addr) - 1;
- else
- vm->bbm.last_usable_bb_id =
- virtio_mem_phys_to_bb_id(vm, end_addr) - 1;
+ if (vm->in_sbm) {
+ vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
+ if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
+ vm->sbm.last_usable_mb_id--;
+ } else {
+ vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
+ end_addr);
+ if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
+ vm->bbm.last_usable_bb_id--;
+ }
+ /*
+ * If we cannot plug any of our device memory (e.g., nothing in the
+ * usable region is addressable), the last usable memory block id will
+ * be smaller than the first usable memory block id. We'll stop
+ * attempting to add memory with -ENOSPC from our main loop.
+ */
/* see if there is a request to change the size */
virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
@@ -2364,7 +2374,7 @@ static int virtio_mem_init_vq(struct virtio_mem *vm)
static int virtio_mem_init(struct virtio_mem *vm)
{
- const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ const struct range pluggable_range = mhp_get_pluggable_range(true);
uint64_t sb_size, addr;
uint16_t node_id;
@@ -2405,9 +2415,10 @@ static int virtio_mem_init(struct virtio_mem *vm)
if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
dev_warn(&vm->vdev->dev,
"The alignment of the physical end address can make some memory unusable.\n");
- if (vm->addr + vm->region_size > phys_limit)
+ if (vm->addr < pluggable_range.start ||
+ vm->addr + vm->region_size - 1 > pluggable_range.end)
dev_warn(&vm->vdev->dev,
- "Some memory is not addressable. This can make some memory unusable.\n");
+ "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
/*
* We want subblocks to span at least MAX_ORDER_NR_PAGES and
@@ -2429,7 +2440,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
vm->sbm.sb_size;
/* Round up to the next full memory block */
- addr = vm->addr + memory_block_size_bytes() - 1;
+ addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
+ memory_block_size_bytes() - 1;
vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
vm->sbm.next_mb_id = vm->sbm.first_mb_id;
} else {
@@ -2450,7 +2462,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
}
/* Round up to the next aligned big block */
- addr = vm->addr + vm->bbm.bb_size - 1;
+ addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
+ vm->bbm.bb_size - 1;
vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
vm->bbm.next_bb_id = vm->bbm.first_bb_id;
}
@@ -2577,7 +2590,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
* actually in use (e.g., trying to reload the driver).
*/
if (vm->plugged_size) {
- vm->unplug_all_required = 1;
+ vm->unplug_all_required = true;
dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 238383ff1064..a286d22b6551 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -126,7 +126,7 @@ static int vm_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- /* Make sure there is are no mixed devices */
+ /* Make sure there are no mixed devices */
if (vm_dev->version == 2 &&
!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index b2f0eb4067cb..beec047a8f8d 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -25,6 +25,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
@@ -43,31 +44,12 @@ struct virtio_pci_vq_info {
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
+ struct virtio_pci_modern_device mdev;
/* In legacy mode, these two point to within ->legacy. */
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* Modern only fields */
- /* The IO mapping for the PCI config space (non-legacy mode) */
- struct virtio_pci_common_cfg __iomem *common;
- /* Device-specific data (non-legacy mode) */
- void __iomem *device;
- /* Base of vq notifications (non-legacy mode). */
- void __iomem *notify_base;
-
- /* So we can sanity-check accesses. */
- size_t notify_len;
- size_t device_len;
-
- /* Capability for when we need to map notifications per-vq. */
- int notify_map_cap;
-
- /* Multiply queue_notify_off by this value. (non-legacy mode). */
- u32 notify_offset_multiplier;
-
- int modern_bars;
-
/* Legacy only field */
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 3d6ae5a5e252..fbd4ebc00eb6 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -19,136 +19,11 @@
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
-/*
- * Type-safe wrappers for io accesses.
- * Use these to enforce at compile time the following spec requirement:
- *
- * The driver MUST access each field using the “natural†access
- * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
- * for 16-bit fields and 8-bit accesses for 8-bit fields.
- */
-static inline u8 vp_ioread8(const u8 __iomem *addr)
-{
- return ioread8(addr);
-}
-static inline u16 vp_ioread16 (const __le16 __iomem *addr)
-{
- return ioread16(addr);
-}
-
-static inline u32 vp_ioread32(const __le32 __iomem *addr)
-{
- return ioread32(addr);
-}
-
-static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
-{
- iowrite8(value, addr);
-}
-
-static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
-{
- iowrite16(value, addr);
-}
-
-static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
-{
- iowrite32(value, addr);
-}
-
-static void vp_iowrite64_twopart(u64 val,
- __le32 __iomem *lo, __le32 __iomem *hi)
-{
- vp_iowrite32((u32)val, lo);
- vp_iowrite32(val >> 32, hi);
-}
-
-static void __iomem *map_capability(struct pci_dev *dev, int off,
- size_t minlen,
- u32 align,
- u32 start, u32 size,
- size_t *len)
-{
- u8 bar;
- u32 offset, length;
- void __iomem *p;
-
- pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
- bar),
- &bar);
- pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
- &offset);
- pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
- &length);
-
- if (length <= start) {
- dev_err(&dev->dev,
- "virtio_pci: bad capability len %u (>%u expected)\n",
- length, start);
- return NULL;
- }
-
- if (length - start < minlen) {
- dev_err(&dev->dev,
- "virtio_pci: bad capability len %u (>=%zu expected)\n",
- length, minlen);
- return NULL;
- }
-
- length -= start;
-
- if (start + offset < offset) {
- dev_err(&dev->dev,
- "virtio_pci: map wrap-around %u+%u\n",
- start, offset);
- return NULL;
- }
-
- offset += start;
-
- if (offset & (align - 1)) {
- dev_err(&dev->dev,
- "virtio_pci: offset %u not aligned to %u\n",
- offset, align);
- return NULL;
- }
-
- if (length > size)
- length = size;
-
- if (len)
- *len = length;
-
- if (minlen + offset < minlen ||
- minlen + offset > pci_resource_len(dev, bar)) {
- dev_err(&dev->dev,
- "virtio_pci: map virtio %zu@%u "
- "out of range on bar %i length %lu\n",
- minlen, offset,
- bar, (unsigned long)pci_resource_len(dev, bar));
- return NULL;
- }
-
- p = pci_iomap_range(dev, bar, offset, length);
- if (!p)
- dev_err(&dev->dev,
- "virtio_pci: unable to map virtio %u@%u on bar %i\n",
- length, offset, bar);
- return p;
-}
-
-/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- u64 features;
-
- vp_iowrite32(0, &vp_dev->common->device_feature_select);
- features = vp_ioread32(&vp_dev->common->device_feature);
- vp_iowrite32(1, &vp_dev->common->device_feature_select);
- features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
- return features;
+ return vp_modern_get_features(&vp_dev->mdev);
}
static void vp_transport_features(struct virtio_device *vdev, u64 features)
@@ -179,10 +54,7 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- vp_iowrite32(0, &vp_dev->common->guest_feature_select);
- vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
- vp_iowrite32(1, &vp_dev->common->guest_feature_select);
- vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+ vp_modern_set_features(&vp_dev->mdev, vdev->features);
return 0;
}
@@ -192,29 +64,31 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ void __iomem *device = mdev->device;
u8 b;
__le16 w;
__le32 l;
- BUG_ON(offset + len > vp_dev->device_len);
+ BUG_ON(offset + len > mdev->device_len);
switch (len) {
case 1:
- b = ioread8(vp_dev->device + offset);
+ b = ioread8(device + offset);
memcpy(buf, &b, sizeof b);
break;
case 2:
- w = cpu_to_le16(ioread16(vp_dev->device + offset));
+ w = cpu_to_le16(ioread16(device + offset));
memcpy(buf, &w, sizeof w);
break;
case 4:
- l = cpu_to_le32(ioread32(vp_dev->device + offset));
+ l = cpu_to_le32(ioread32(device + offset));
memcpy(buf, &l, sizeof l);
break;
case 8:
- l = cpu_to_le32(ioread32(vp_dev->device + offset));
+ l = cpu_to_le32(ioread32(device + offset));
memcpy(buf, &l, sizeof l);
- l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
+ l = cpu_to_le32(ioread32(device + offset + sizeof l));
memcpy(buf + sizeof l, &l, sizeof l);
break;
default:
@@ -228,30 +102,32 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ void __iomem *device = mdev->device;
u8 b;
__le16 w;
__le32 l;
- BUG_ON(offset + len > vp_dev->device_len);
+ BUG_ON(offset + len > mdev->device_len);
switch (len) {
case 1:
memcpy(&b, buf, sizeof b);
- iowrite8(b, vp_dev->device + offset);
+ iowrite8(b, device + offset);
break;
case 2:
memcpy(&w, buf, sizeof w);
- iowrite16(le16_to_cpu(w), vp_dev->device + offset);
+ iowrite16(le16_to_cpu(w), device + offset);
break;
case 4:
memcpy(&l, buf, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+ iowrite32(le32_to_cpu(l), device + offset);
break;
case 8:
memcpy(&l, buf, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+ iowrite32(le32_to_cpu(l), device + offset);
memcpy(&l, buf + sizeof l, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
+ iowrite32(le32_to_cpu(l), device + offset + sizeof l);
break;
default:
BUG();
@@ -261,35 +137,40 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return vp_ioread8(&vp_dev->common->config_generation);
+
+ return vp_modern_generation(&vp_dev->mdev);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return vp_ioread8(&vp_dev->common->device_status);
+
+ return vp_modern_get_status(&vp_dev->mdev);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- vp_iowrite8(status, &vp_dev->common->device_status);
+ vp_modern_set_status(&vp_dev->mdev, status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+
/* 0 status means a reset. */
- vp_iowrite8(0, &vp_dev->common->device_status);
+ vp_modern_set_status(mdev, 0);
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device.
* This will flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any.
*/
- while (vp_ioread8(&vp_dev->common->device_status))
+ while (vp_modern_get_status(mdev))
msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
@@ -297,11 +178,7 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
- /* Setup the vector used for configuration events */
- vp_iowrite16(vector, &vp_dev->common->msix_config);
- /* Verify we had enough resources to assign the vector */
- /* Will also flush the write out to device */
- return vp_ioread16(&vp_dev->common->msix_config);
+ return vp_modern_config_vector(&vp_dev->mdev, vector);
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
@@ -312,20 +189,18 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
bool ctx,
u16 msix_vec)
{
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtqueue *vq;
u16 num, off;
int err;
- if (index >= vp_ioread16(&cfg->num_queues))
+ if (index >= vp_modern_get_num_queues(mdev))
return ERR_PTR(-ENOENT);
- /* Select the queue we're interested in */
- vp_iowrite16(index, &cfg->queue_select);
-
/* Check if queue is either not available or already active. */
- num = vp_ioread16(&cfg->queue_size);
- if (!num || vp_ioread16(&cfg->queue_enable))
+ num = vp_modern_get_queue_size(mdev, index);
+ if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
@@ -334,7 +209,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* get offset of notification word for this vq */
- off = vp_ioread16(&cfg->queue_notify_off);
+ off = vp_modern_get_queue_notify_off(mdev, index);
info->msix_vector = msix_vec;
@@ -347,33 +222,30 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return ERR_PTR(-ENOMEM);
/* activate the queue */
- vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
- vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
- &cfg->queue_desc_lo, &cfg->queue_desc_hi);
- vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
- &cfg->queue_avail_lo, &cfg->queue_avail_hi);
- vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
- &cfg->queue_used_lo, &cfg->queue_used_hi);
-
- if (vp_dev->notify_base) {
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (mdev->notify_base) {
/* offset should not wrap */
- if ((u64)off * vp_dev->notify_offset_multiplier + 2
- > vp_dev->notify_len) {
- dev_warn(&vp_dev->pci_dev->dev,
+ if ((u64)off * mdev->notify_offset_multiplier + 2
+ > mdev->notify_len) {
+ dev_warn(&mdev->pci_dev->dev,
"bad notification offset %u (x %u) "
"for queue %u > %zd",
- off, vp_dev->notify_offset_multiplier,
- index, vp_dev->notify_len);
+ off, mdev->notify_offset_multiplier,
+ index, mdev->notify_len);
err = -EINVAL;
goto err_map_notify;
}
- vq->priv = (void __force *)vp_dev->notify_base +
- off * vp_dev->notify_offset_multiplier;
+ vq->priv = (void __force *)mdev->notify_base +
+ off * mdev->notify_offset_multiplier;
} else {
- vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
- vp_dev->notify_map_cap, 2, 2,
- off * vp_dev->notify_offset_multiplier, 2,
- NULL);
+ vq->priv = (void __force *)vp_modern_map_capability(mdev,
+ mdev->notify_map_cap, 2, 2,
+ off * mdev->notify_offset_multiplier, 2,
+ NULL);
}
if (!vq->priv) {
@@ -382,8 +254,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
- msix_vec = vp_ioread16(&cfg->queue_msix_vector);
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
@@ -393,8 +264,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return vq;
err_assign_vector:
- if (!vp_dev->notify_base)
- pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
+ if (!mdev->notify_base)
+ pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
err_map_notify:
vring_del_virtqueue(vq);
return ERR_PTR(err);
@@ -416,10 +287,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* Select and activate all queues. Has to be done last: once we do
* this, there's no way to go back except reset.
*/
- list_for_each_entry(vq, &vdev->vqs, list) {
- vp_iowrite16(vq->index, &vp_dev->common->queue_select);
- vp_iowrite16(1, &vp_dev->common->queue_enable);
- }
+ list_for_each_entry(vq, &vdev->vqs, list)
+ vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
return 0;
}
@@ -428,18 +297,14 @@ static void del_vq(struct virtio_pci_vq_info *info)
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- vp_iowrite16(vq->index, &vp_dev->common->queue_select);
-
- if (vp_dev->msix_enabled) {
- vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
- &vp_dev->common->queue_msix_vector);
- /* Flush the write out to device */
- vp_ioread16(&vp_dev->common->queue_msix_vector);
- }
+ if (vp_dev->msix_enabled)
+ vp_modern_queue_vector(mdev, vq->index,
+ VIRTIO_MSI_NO_VECTOR);
- if (!vp_dev->notify_base)
- pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
+ if (!mdev->notify_base)
+ pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
vring_del_virtqueue(vq);
}
@@ -571,261 +436,36 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
};
-/**
- * virtio_pci_find_capability - walk capabilities to find device info.
- * @dev: the pci device
- * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
- * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
- * @bars: the bitmask of BARs
- *
- * Returns offset of the capability, or 0.
- */
-static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
- u32 ioresource_types, int *bars)
-{
- int pos;
-
- for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
- pos > 0;
- pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
- u8 type, bar;
- pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
- cfg_type),
- &type);
- pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
- bar),
- &bar);
-
- /* Ignore structures with reserved BAR values */
- if (bar > 0x5)
- continue;
-
- if (type == cfg_type) {
- if (pci_resource_len(dev, bar) &&
- pci_resource_flags(dev, bar) & ioresource_types) {
- *bars |= (1 << bar);
- return pos;
- }
- }
- }
- return 0;
-}
-
-/* This is part of the ABI. Don't screw with it. */
-static inline void check_offsets(void)
-{
- /* Note: disk space was harmed in compilation of this function. */
- BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
- offsetof(struct virtio_pci_cap, cap_vndr));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
- offsetof(struct virtio_pci_cap, cap_next));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
- offsetof(struct virtio_pci_cap, cap_len));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
- offsetof(struct virtio_pci_cap, cfg_type));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
- offsetof(struct virtio_pci_cap, bar));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
- offsetof(struct virtio_pci_cap, offset));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
- offsetof(struct virtio_pci_cap, length));
- BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
- offsetof(struct virtio_pci_notify_cap,
- notify_off_multiplier));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
- offsetof(struct virtio_pci_common_cfg,
- device_feature_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
- offsetof(struct virtio_pci_common_cfg, device_feature));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
- offsetof(struct virtio_pci_common_cfg,
- guest_feature_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
- offsetof(struct virtio_pci_common_cfg, guest_feature));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
- offsetof(struct virtio_pci_common_cfg, msix_config));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
- offsetof(struct virtio_pci_common_cfg, num_queues));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
- offsetof(struct virtio_pci_common_cfg, device_status));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
- offsetof(struct virtio_pci_common_cfg, config_generation));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
- offsetof(struct virtio_pci_common_cfg, queue_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
- offsetof(struct virtio_pci_common_cfg, queue_size));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
- offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
- offsetof(struct virtio_pci_common_cfg, queue_enable));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
- offsetof(struct virtio_pci_common_cfg, queue_notify_off));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
- offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
- offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
- offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
- offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
- offsetof(struct virtio_pci_common_cfg, queue_used_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
- offsetof(struct virtio_pci_common_cfg, queue_used_hi));
-}
-
/* the PCI probing function */
int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
{
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct pci_dev *pci_dev = vp_dev->pci_dev;
- int err, common, isr, notify, device;
- u32 notify_length;
- u32 notify_offset;
-
- check_offsets();
-
- /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
- return -ENODEV;
-
- if (pci_dev->device < 0x1040) {
- /* Transitional devices: use the PCI subsystem device id as
- * virtio device id, same as legacy driver always did.
- */
- vp_dev->vdev.id.device = pci_dev->subsystem_device;
- } else {
- /* Modern devices: simply use PCI device id, but start from 0x1040. */
- vp_dev->vdev.id.device = pci_dev->device - 0x1040;
- }
- vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
-
- /* check for a common config: if not, use legacy mode (bar 0). */
- common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
- if (!common) {
- dev_info(&pci_dev->dev,
- "virtio_pci: leaving for legacy driver\n");
- return -ENODEV;
- }
-
- /* If common is there, these should be too... */
- isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
- notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
- if (!isr || !notify) {
- dev_err(&pci_dev->dev,
- "virtio_pci: missing capabilities %i/%i/%i\n",
- common, isr, notify);
- return -EINVAL;
- }
-
- err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
- if (err)
- dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+ int err;
- /* Device capability is only mandatory for devices that have
- * device-specific configuration.
- */
- device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
+ mdev->pci_dev = pci_dev;
- err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
- "virtio-pci-modern");
+ err = vp_modern_probe(mdev);
if (err)
return err;
- err = -EINVAL;
- vp_dev->common = map_capability(pci_dev, common,
- sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
- NULL);
- if (!vp_dev->common)
- goto err_map_common;
- vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
- 0, 1,
- NULL);
- if (!vp_dev->isr)
- goto err_map_isr;
-
- /* Read notify_off_multiplier from config space. */
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- notify_off_multiplier),
- &vp_dev->notify_offset_multiplier);
- /* Read notify length and offset from config space. */
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- cap.length),
- &notify_length);
-
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- cap.offset),
- &notify_offset);
-
- /* We don't know how many VQs we'll map, ahead of the time.
- * If notify length is small, map it all now.
- * Otherwise, map each VQ individually later.
- */
- if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
- vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
- 0, notify_length,
- &vp_dev->notify_len);
- if (!vp_dev->notify_base)
- goto err_map_notify;
- } else {
- vp_dev->notify_map_cap = notify;
- }
-
- /* Again, we don't know how much we should map, but PAGE_SIZE
- * is more than enough for all existing devices.
- */
- if (device) {
- vp_dev->device = map_capability(pci_dev, device, 0, 4,
- 0, PAGE_SIZE,
- &vp_dev->device_len);
- if (!vp_dev->device)
- goto err_map_device;
-
+ if (mdev->device)
vp_dev->vdev.config = &virtio_pci_config_ops;
- } else {
+ else
vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
- }
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->isr = mdev->isr;
+ vp_dev->vdev.id = mdev->id;
return 0;
-
-err_map_device:
- if (vp_dev->notify_base)
- pci_iounmap(pci_dev, vp_dev->notify_base);
-err_map_notify:
- pci_iounmap(pci_dev, vp_dev->isr);
-err_map_isr:
- pci_iounmap(pci_dev, vp_dev->common);
-err_map_common:
- return err;
}
void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
- struct pci_dev *pci_dev = vp_dev->pci_dev;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- if (vp_dev->device)
- pci_iounmap(pci_dev, vp_dev->device);
- if (vp_dev->notify_base)
- pci_iounmap(pci_dev, vp_dev->notify_base);
- pci_iounmap(pci_dev, vp_dev->isr);
- pci_iounmap(pci_dev, vp_dev->common);
- pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
+ vp_modern_remove(mdev);
}
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
new file mode 100644
index 000000000000..cbd667496bb1
--- /dev/null
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/virtio_pci_modern.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/*
+ * vp_modern_map_capability - map a part of virtio pci capability
+ * @mdev: the modern virtio-pci device
+ * @off: offset of the capability
+ * @minlen: minimal length of the capability
+ * @align: align requirement
+ * @start: start from the capability
+ * @size: map size
+ * @len: the length that is actually mapped
+ *
+ * Returns the io address of for the part of the capability
+ */
+void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen,
+ u32 align,
+ u32 start, u32 size,
+ size_t *len)
+{
+ struct pci_dev *dev = mdev->pci_dev;
+ u8 bar;
+ u32 offset, length;
+ void __iomem *p;
+
+ pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
+ bar),
+ &bar);
+ pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
+ &offset);
+ pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
+ &length);
+
+ if (length <= start) {
+ dev_err(&dev->dev,
+ "virtio_pci: bad capability len %u (>%u expected)\n",
+ length, start);
+ return NULL;
+ }
+
+ if (length - start < minlen) {
+ dev_err(&dev->dev,
+ "virtio_pci: bad capability len %u (>=%zu expected)\n",
+ length, minlen);
+ return NULL;
+ }
+
+ length -= start;
+
+ if (start + offset < offset) {
+ dev_err(&dev->dev,
+ "virtio_pci: map wrap-around %u+%u\n",
+ start, offset);
+ return NULL;
+ }
+
+ offset += start;
+
+ if (offset & (align - 1)) {
+ dev_err(&dev->dev,
+ "virtio_pci: offset %u not aligned to %u\n",
+ offset, align);
+ return NULL;
+ }
+
+ if (length > size)
+ length = size;
+
+ if (len)
+ *len = length;
+
+ if (minlen + offset < minlen ||
+ minlen + offset > pci_resource_len(dev, bar)) {
+ dev_err(&dev->dev,
+ "virtio_pci: map virtio %zu@%u "
+ "out of range on bar %i length %lu\n",
+ minlen, offset,
+ bar, (unsigned long)pci_resource_len(dev, bar));
+ return NULL;
+ }
+
+ p = pci_iomap_range(dev, bar, offset, length);
+ if (!p)
+ dev_err(&dev->dev,
+ "virtio_pci: unable to map virtio %u@%u on bar %i\n",
+ length, offset, bar);
+ return p;
+}
+EXPORT_SYMBOL_GPL(vp_modern_map_capability);
+
+/**
+ * virtio_pci_find_capability - walk capabilities to find device info.
+ * @dev: the pci device
+ * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
+ * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ * @bars: the bitmask of BARs
+ *
+ * Returns offset of the capability, or 0.
+ */
+static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
+ u32 ioresource_types, int *bars)
+{
+ int pos;
+
+ for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ pos > 0;
+ pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
+ u8 type, bar;
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ cfg_type),
+ &type);
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ bar),
+ &bar);
+
+ /* Ignore structures with reserved BAR values */
+ if (bar > 0x5)
+ continue;
+
+ if (type == cfg_type) {
+ if (pci_resource_len(dev, bar) &&
+ pci_resource_flags(dev, bar) & ioresource_types) {
+ *bars |= (1 << bar);
+ return pos;
+ }
+ }
+ }
+ return 0;
+}
+
+/* This is part of the ABI. Don't screw with it. */
+static inline void check_offsets(void)
+{
+ /* Note: disk space was harmed in compilation of this function. */
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
+ offsetof(struct virtio_pci_cap, cap_vndr));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
+ offsetof(struct virtio_pci_cap, cap_next));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
+ offsetof(struct virtio_pci_cap, cap_len));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
+ offsetof(struct virtio_pci_cap, cfg_type));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
+ offsetof(struct virtio_pci_cap, bar));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
+ offsetof(struct virtio_pci_cap, offset));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
+ offsetof(struct virtio_pci_cap, length));
+ BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
+ offsetof(struct virtio_pci_notify_cap,
+ notify_off_multiplier));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
+ offsetof(struct virtio_pci_common_cfg,
+ device_feature_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
+ offsetof(struct virtio_pci_common_cfg, device_feature));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
+ offsetof(struct virtio_pci_common_cfg,
+ guest_feature_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
+ offsetof(struct virtio_pci_common_cfg, guest_feature));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
+ offsetof(struct virtio_pci_common_cfg, msix_config));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
+ offsetof(struct virtio_pci_common_cfg, num_queues));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
+ offsetof(struct virtio_pci_common_cfg, device_status));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
+ offsetof(struct virtio_pci_common_cfg, config_generation));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
+ offsetof(struct virtio_pci_common_cfg, queue_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
+ offsetof(struct virtio_pci_common_cfg, queue_size));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
+ offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
+ offsetof(struct virtio_pci_common_cfg, queue_enable));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
+ offsetof(struct virtio_pci_common_cfg, queue_notify_off));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_used_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+}
+
+/*
+ * vp_modern_probe: probe the modern virtio pci device, note that the
+ * caller is required to enable PCI device before calling this function.
+ * @mdev: the modern virtio-pci device
+ *
+ * Return 0 on succeed otherwise fail
+ */
+int vp_modern_probe(struct virtio_pci_modern_device *mdev)
+{
+ struct pci_dev *pci_dev = mdev->pci_dev;
+ int err, common, isr, notify, device;
+ u32 notify_length;
+ u32 notify_offset;
+
+ check_offsets();
+
+ mdev->pci_dev = pci_dev;
+
+ /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
+ return -ENODEV;
+
+ if (pci_dev->device < 0x1040) {
+ /* Transitional devices: use the PCI subsystem device id as
+ * virtio device id, same as legacy driver always did.
+ */
+ mdev->id.device = pci_dev->subsystem_device;
+ } else {
+ /* Modern devices: simply use PCI device id, but start from 0x1040. */
+ mdev->id.device = pci_dev->device - 0x1040;
+ }
+ mdev->id.vendor = pci_dev->subsystem_vendor;
+
+ /* check for a common config: if not, use legacy mode (bar 0). */
+ common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ if (!common) {
+ dev_info(&pci_dev->dev,
+ "virtio_pci: leaving for legacy driver\n");
+ return -ENODEV;
+ }
+
+ /* If common is there, these should be too... */
+ isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ if (!isr || !notify) {
+ dev_err(&pci_dev->dev,
+ "virtio_pci: missing capabilities %i/%i/%i\n",
+ common, isr, notify);
+ return -EINVAL;
+ }
+
+ err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
+ /* Device capability is only mandatory for devices that have
+ * device-specific configuration.
+ */
+ device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+
+ err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
+ "virtio-pci-modern");
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ mdev->common = vp_modern_map_capability(mdev, common,
+ sizeof(struct virtio_pci_common_cfg), 4,
+ 0, sizeof(struct virtio_pci_common_cfg),
+ NULL);
+ if (!mdev->common)
+ goto err_map_common;
+ mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
+ 0, 1,
+ NULL);
+ if (!mdev->isr)
+ goto err_map_isr;
+
+ /* Read notify_off_multiplier from config space. */
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ notify_off_multiplier),
+ &mdev->notify_offset_multiplier);
+ /* Read notify length and offset from config space. */
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ cap.length),
+ &notify_length);
+
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ cap.offset),
+ &notify_offset);
+
+ /* We don't know how many VQs we'll map, ahead of the time.
+ * If notify length is small, map it all now.
+ * Otherwise, map each VQ individually later.
+ */
+ if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+ mdev->notify_base = vp_modern_map_capability(mdev, notify,
+ 2, 2,
+ 0, notify_length,
+ &mdev->notify_len);
+ if (!mdev->notify_base)
+ goto err_map_notify;
+ } else {
+ mdev->notify_map_cap = notify;
+ }
+
+ /* Again, we don't know how much we should map, but PAGE_SIZE
+ * is more than enough for all existing devices.
+ */
+ if (device) {
+ mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
+ 0, PAGE_SIZE,
+ &mdev->device_len);
+ if (!mdev->device)
+ goto err_map_device;
+ }
+
+ return 0;
+
+err_map_device:
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
+err_map_notify:
+ pci_iounmap(pci_dev, mdev->isr);
+err_map_isr:
+ pci_iounmap(pci_dev, mdev->common);
+err_map_common:
+ return err;
+}
+EXPORT_SYMBOL_GPL(vp_modern_probe);
+
+/*
+ * vp_modern_probe: remove and cleanup the modern virtio pci device
+ * @mdev: the modern virtio-pci device
+ */
+void vp_modern_remove(struct virtio_pci_modern_device *mdev)
+{
+ struct pci_dev *pci_dev = mdev->pci_dev;
+
+ if (mdev->device)
+ pci_iounmap(pci_dev, mdev->device);
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
+ pci_iounmap(pci_dev, mdev->isr);
+ pci_iounmap(pci_dev, mdev->common);
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
+}
+EXPORT_SYMBOL_GPL(vp_modern_remove);
+
+/*
+ * vp_modern_get_features - get features from device
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the features read from the device
+ */
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ u64 features;
+
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features = vp_ioread32(&cfg->device_feature);
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
+
+ return features;
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_features);
+
+/*
+ * vp_modern_set_features - set features to device
+ * @mdev: the modern virtio-pci device
+ * @features: the features set to device
+ */
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ vp_iowrite32((u32)features, &cfg->guest_feature);
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ vp_iowrite32(features >> 32, &cfg->guest_feature);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_features);
+
+/*
+ * vp_modern_generation - get the device genreation
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the genreation read from device
+ */
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ return vp_ioread8(&cfg->config_generation);
+}
+EXPORT_SYMBOL_GPL(vp_modern_generation);
+
+/*
+ * vp_modern_get_status - get the device status
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the status read from device
+ */
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ return vp_ioread8(&cfg->device_status);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_status);
+
+/*
+ * vp_modern_set_status - set status to device
+ * @mdev: the modern virtio-pci device
+ * @status: the status set to device
+ */
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite8(status, &cfg->device_status);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_status);
+
+/*
+ * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(vector, &cfg->queue_msix_vector);
+ /* Flush the write out to device */
+ return vp_ioread16(&cfg->queue_msix_vector);
+}
+EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
+
+/*
+ * vp_modern_config_vector - set the vector for config interrupt
+ * @mdev: the modern virtio-pci device
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ /* Setup the vector used for configuration events */
+ vp_iowrite16(vector, &cfg->msix_config);
+ /* Verify we had enough resources to assign the vector */
+ /* Will also flush the write out to device */
+ return vp_ioread16(&cfg->msix_config);
+}
+EXPORT_SYMBOL_GPL(vp_modern_config_vector);
+
+/*
+ * vp_modern_queue_address - set the virtqueue address
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @desc_addr: address of the descriptor area
+ * @driver_addr: address of the driver area
+ * @device_addr: address of the device area
+ */
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+
+ vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+}
+EXPORT_SYMBOL_GPL(vp_modern_queue_address);
+
+/*
+ * vp_modern_set_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @enable: whether the virtqueue is enable or not
+ */
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index, bool enable)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(enable, &mdev->common->queue_enable);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
+
+/*
+ * vp_modern_get_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns whether a virtqueue is enabled or not
+ */
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_enable);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
+
+/*
+ * vp_modern_set_queue_size - set size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @size: the size of the virtqueue
+ */
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 size)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(size, &mdev->common->queue_size);
+
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
+
+/*
+ * vp_modern_get_queue_size - get size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the size of the virtqueue
+ */
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_size);
+
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
+
+/*
+ * vp_modern_get_num_queues - get the number of virtqueues
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the number of virtqueues
+ */
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
+{
+ return vp_ioread16(&mdev->common->num_queues);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
+
+/*
+ * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the notification offset for a virtqueue
+ */
+u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_notify_off);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
+
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Modern Virtio PCI Device");
+MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 4a9ddb44b2a7..e28acf482e0c 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vd_dev->lock, flags);
- /* Select and deactivate the queue */
+ /* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
- WARN_ON(ops->get_vq_ready(vdpa, index));
vring_del_virtqueue(vq);
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 54d7963c1078..1b15afea28ee 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1997,9 +1997,9 @@ static int vme_bus_remove(struct device *dev)
driver = dev->platform_data;
if (driver->remove)
- return driver->remove(vdev);
+ driver->remove(vdev);
- return -ENODEV;
+ return 0;
}
struct bus_type vme_bus_type = {
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index e17c8f70dcd0..cd8821580f71 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -688,12 +688,22 @@ static void ds9490r_search(void *data, struct w1_master *master,
* packet size.
*/
const size_t bufsize = 2 * 64;
- u64 *buf;
+ u64 *buf, *found_ids;
buf = kmalloc(bufsize, GFP_KERNEL);
if (!buf)
return;
+ /*
+ * We are holding the bus mutex during the scan, but adding devices via the
+ * callback needs the bus to be unlocked. So we queue up found ids here.
+ */
+ found_ids = kmalloc_array(master->max_slave_count, sizeof(u64), GFP_KERNEL);
+ if (!found_ids) {
+ kfree(buf);
+ return;
+ }
+
mutex_lock(&master->bus_mutex);
/* address to start searching at */
@@ -729,13 +739,13 @@ static void ds9490r_search(void *data, struct w1_master *master,
if (err < 0)
break;
for (i = 0; i < err/8; ++i) {
- ++found;
- if (found <= search_limit)
- callback(master, buf[i]);
+ found_ids[found++] = buf[i];
/* can't know if there will be a discrepancy
* value after until the next id */
- if (found == search_limit)
+ if (found == search_limit) {
master->search_id = buf[i];
+ break;
+ }
}
}
@@ -759,9 +769,14 @@ static void ds9490r_search(void *data, struct w1_master *master,
master->max_slave_count);
set_bit(W1_WARN_MAX_COUNT, &master->flags);
}
+
search_out:
mutex_unlock(&master->bus_mutex);
kfree(buf);
+
+ for (i = 0; i < found; i++) /* run callback for all queued up IDs */
+ callback(master, found_ids[i]);
+ kfree(found_ids);
}
#if 0
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 3712b1e6dc71..976eea28f268 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -667,28 +667,24 @@ static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
*/
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
- int t;
- u32 bv;
+ u16 bv;
+ s16 t;
+
+ /* Signed 16-bit value to unsigned, cpu order */
+ bv = le16_to_cpup((__le16 *)rom);
/* Config register bit R2 = 1 - GX20MH01 in 13 or 14 bit resolution mode */
if (rom[4] & 0x80) {
- /* Signed 16-bit value to unsigned, cpu order */
- bv = le16_to_cpup((__le16 *)rom);
-
/* Insert two temperature bits from config register */
/* Avoid arithmetic shift of signed value */
bv = (bv << 2) | (rom[4] & 3);
-
- t = (int) sign_extend32(bv, 17); /* Degrees, lowest bit is 2^-6 */
- return (t*1000)/64; /* Millidegrees */
+ t = (s16) bv; /* Degrees, lowest bit is 2^-6 */
+ return (int)t * 1000 / 64; /* Sign-extend to int; millidegrees */
}
-
- t = (int)le16_to_cpup((__le16 *)rom);
- return t*1000/16;
+ t = (s16)bv; /* Degrees, lowest bit is 2^-4 */
+ return (int)t * 1000 / 16; /* Sign-extend to int; millidegrees */
}
-
-
/**
* w1_DS18S20_convert_temp() - temperature computation for DS18S20
* @rom: data read from device RAM (8 data bytes + 1 CRC byte)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 15a2ee32f116..f2ae2e563dc5 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -25,6 +25,8 @@
#include "w1_netlink.h"
#define W1_FAMILY_DEFAULT 0
+#define W1_FAMILY_DS28E04 0x1C /* for crc quirk */
+
static int w1_timeout = 10;
module_param_named(timeout, w1_timeout, int, 0);
@@ -913,11 +915,44 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
mutex_unlock(&w1_mlock);
}
+static int w1_addr_crc_is_valid(struct w1_master *dev, u64 rn)
+{
+ u64 rn_le = cpu_to_le64(rn);
+ struct w1_reg_num *tmp = (struct w1_reg_num *)&rn;
+ u8 crc;
+
+ crc = w1_calc_crc8((u8 *)&rn_le, 7);
+
+ /* quirk:
+ * DS28E04 (1w eeprom) has strapping pins to change
+ * address, but will not update the crc. So normal rules
+ * for consistent w1 addresses are violated. We test
+ * with the 7 LSBs of the address forced high.
+ *
+ * (char*)&rn_le = { family, addr_lsb, ..., addr_msb, crc }.
+ */
+ if (crc != tmp->crc && tmp->family == W1_FAMILY_DS28E04) {
+ u64 corr_le = rn_le;
+
+ ((u8 *)&corr_le)[1] |= 0x7f;
+ crc = w1_calc_crc8((u8 *)&corr_le, 7);
+
+ dev_info(&dev->dev, "DS28E04 crc workaround on %02x.%012llx.%02x\n",
+ tmp->family, (unsigned long long)tmp->id, tmp->crc);
+ }
+
+ if (crc != tmp->crc) {
+ dev_dbg(&dev->dev, "w1 addr crc mismatch: %02x.%012llx.%02x != 0x%02x.\n",
+ tmp->family, (unsigned long long)tmp->id, tmp->crc, crc);
+ return 0;
+ }
+ return 1;
+}
+
void w1_slave_found(struct w1_master *dev, u64 rn)
{
struct w1_slave *sl;
struct w1_reg_num *tmp;
- u64 rn_le = cpu_to_le64(rn);
atomic_inc(&dev->refcnt);
@@ -927,7 +962,7 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
if (sl) {
set_bit(W1_SLAVE_ACTIVE, &sl->flags);
} else {
- if (rn && tmp->crc == w1_calc_crc8((u8 *)&rn_le, 7))
+ if (rn && w1_addr_crc_is_valid(dev, rn))
w1_attach_slave_device(dev, tmp);
}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7ff941e71b79..1fe0042a48d2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -254,17 +254,6 @@ config MENZ069_WATCHDOG
This driver can also be built as a module. If so the module
will be called menz069_wdt.
-config TANGOX_WATCHDOG
- tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
- select WATCHDOG_CORE
- depends on ARCH_TANGO || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Support for the watchdog in Sigma Designs SMP86xx (tango3)
- and SMP87xx (tango4) family chips.
-
- This driver can be built as a module. The module name is tangox_wdt.
-
config WDAT_WDT
tristate "ACPI Watchdog Action Table (WDAT)"
depends on ACPI
@@ -630,17 +619,6 @@ config SUNXI_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called sunxi_wdt.
-config COH901327_WATCHDOG
- bool "ST-Ericsson COH 901 327 watchdog"
- depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
- default y if MACH_U300
- select WATCHDOG_CORE
- help
- Say Y here to include Watchdog timer support for the
- watchdog embedded into the ST-Ericsson U300 series platforms.
- This watchdog is used to reset the system and thus cannot be
- compiled as a module.
-
config NPCM7XX_WATCHDOG
tristate "Nuvoton NPCM750 watchdog"
depends on ARCH_NPCM || COMPILE_TEST
@@ -788,16 +766,6 @@ config MOXART_WDT
To compile this driver as a module, choose M here: the
module will be called moxart_wdt.
-config SIRFSOC_WATCHDOG
- tristate "SiRFSOC watchdog"
- depends on HAS_IOMEM
- depends on ARCH_SIRF || COMPILE_TEST
- select WATCHDOG_CORE
- default y
- help
- Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When
- the watchdog triggers the system will be reset.
-
config ST_LPC_WATCHDOG
tristate "STMicroelectronics LPC Watchdog"
depends on ARCH_STI || COMPILE_TEST
@@ -900,16 +868,6 @@ config LPC18XX_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called lpc18xx_wdt.
-config ATLAS7_WATCHDOG
- tristate "CSRatlas7 watchdog"
- depends on ARCH_ATLAS7 || COMPILE_TEST
- help
- Say Y here to include Watchdog timer support for the watchdog
- existing on the CSRatlas7 series platforms.
-
- To compile this driver as a module, choose M here: the
- module will be called atlas7_wdt.
-
config RENESAS_WDT
tristate "Renesas WDT Watchdog"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -939,16 +897,6 @@ config ASPEED_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called aspeed_wdt.
-config ZX2967_WATCHDOG
- tristate "ZTE zx2967 SoCs watchdog support"
- depends on ARCH_ZX
- select WATCHDOG_CORE
- help
- Say Y here to include support for the watchdog timer
- in ZTE zx2967 SoCs.
- To compile this driver as a module, choose M here: the
- module will be called zx2967_wdt.
-
config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
depends on ARCH_STM32
@@ -1219,15 +1167,6 @@ config IE6XX_WDT
To compile this driver as a module, choose M here: the
module will be called ie6xx_wdt.
-config INTEL_SCU_WATCHDOG
- bool "Intel SCU Watchdog for Mobile Platforms"
- depends on X86_INTEL_MID
- help
- Hardware driver for the watchdog time built into the Intel SCU
- for Intel Mobile Platforms.
-
- To compile this driver as a module, choose M here.
-
config INTEL_MID_WATCHDOG
tristate "Intel MID Watchdog Timer"
depends on X86_INTEL_MID
@@ -2155,4 +2094,17 @@ config USBPCWATCHDOG
Most people will say N.
+config KEEMBAY_WATCHDOG
+ tristate "Intel Keem Bay SoC non-secure watchdog"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ select WATCHDOG_CORE
+ help
+ This option enable support for an In-secure watchdog timer driver for
+ Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
+ count unit. An interrupt will be triggered, when the count crosses
+ the thershold configured in the register.
+
+ To compile this driver as a module, choose M here: the
+ module will be called keembay_wdt.
+
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 5c74ee19d441..f3a6540e725e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_K3_RTI_WATCHDOG) += rti_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
-obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_NPCM7XX_WATCHDOG) += npcm_wdt.o
obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
@@ -73,7 +72,6 @@ obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
-obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
obj-$(CONFIG_ST_LPC_WATCHDOG) += st_lpc_wdt.o
obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
@@ -84,11 +82,9 @@ obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
-obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
-obj-$(CONFIG_ZX2967_WATCHDOG) += zx2967_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
@@ -140,12 +136,12 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
-obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
obj-$(CONFIG_MLX_WDT) += mlx_wdt.o
+obj-$(CONFIG_KEEMBAY_WATCHDOG) += keembay_wdt.o
# M68K Architecture
obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
@@ -213,7 +209,6 @@ obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o
obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o
-obj-$(CONFIG_TANGOX_WATCHDOG) += tangox_wdt.o
obj-$(CONFIG_WDAT_WDT) += wdat_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/atlas7_wdt.c b/drivers/watchdog/atlas7_wdt.c
deleted file mode 100644
index 9bfe650d802f..000000000000
--- a/drivers/watchdog/atlas7_wdt.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Watchdog driver for CSR Atlas7
- *
- * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-
-#define ATLAS7_TIMER_WDT_INDEX 5
-#define ATLAS7_WDT_DEFAULT_TIMEOUT 20
-
-#define ATLAS7_WDT_CNT_CTRL (0 + 4 * ATLAS7_TIMER_WDT_INDEX)
-#define ATLAS7_WDT_CNT_MATCH (0x18 + 4 * ATLAS7_TIMER_WDT_INDEX)
-#define ATLAS7_WDT_CNT (0x48 + 4 * ATLAS7_TIMER_WDT_INDEX)
-#define ATLAS7_WDT_CNT_EN (BIT(0) | BIT(1))
-#define ATLAS7_WDT_EN 0x64
-
-static unsigned int timeout = ATLAS7_WDT_DEFAULT_TIMEOUT;
-static bool nowayout = WATCHDOG_NOWAYOUT;
-
-module_param(timeout, uint, 0);
-module_param(nowayout, bool, 0);
-
-MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-struct atlas7_wdog {
- struct device *dev;
- void __iomem *base;
- unsigned long tick_rate;
- struct clk *clk;
-};
-
-static unsigned int atlas7_wdt_gettimeleft(struct watchdog_device *wdd)
-{
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
- u32 counter, match, delta;
-
- counter = readl(wdt->base + ATLAS7_WDT_CNT);
- match = readl(wdt->base + ATLAS7_WDT_CNT_MATCH);
- delta = match - counter;
-
- return delta / wdt->tick_rate;
-}
-
-static int atlas7_wdt_ping(struct watchdog_device *wdd)
-{
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
- u32 counter, match, delta;
-
- counter = readl(wdt->base + ATLAS7_WDT_CNT);
- delta = wdd->timeout * wdt->tick_rate;
- match = counter + delta;
-
- writel(match, wdt->base + ATLAS7_WDT_CNT_MATCH);
-
- return 0;
-}
-
-static int atlas7_wdt_enable(struct watchdog_device *wdd)
-{
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- atlas7_wdt_ping(wdd);
-
- writel(readl(wdt->base + ATLAS7_WDT_CNT_CTRL) | ATLAS7_WDT_CNT_EN,
- wdt->base + ATLAS7_WDT_CNT_CTRL);
- writel(1, wdt->base + ATLAS7_WDT_EN);
-
- return 0;
-}
-
-static int atlas7_wdt_disable(struct watchdog_device *wdd)
-{
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- writel(0, wdt->base + ATLAS7_WDT_EN);
- writel(readl(wdt->base + ATLAS7_WDT_CNT_CTRL) & ~ATLAS7_WDT_CNT_EN,
- wdt->base + ATLAS7_WDT_CNT_CTRL);
-
- return 0;
-}
-
-static int atlas7_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
-{
- wdd->timeout = to;
-
- return 0;
-}
-
-#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
-
-static const struct watchdog_info atlas7_wdt_ident = {
- .options = OPTIONS,
- .firmware_version = 0,
- .identity = "atlas7 Watchdog",
-};
-
-static const struct watchdog_ops atlas7_wdt_ops = {
- .owner = THIS_MODULE,
- .start = atlas7_wdt_enable,
- .stop = atlas7_wdt_disable,
- .get_timeleft = atlas7_wdt_gettimeleft,
- .ping = atlas7_wdt_ping,
- .set_timeout = atlas7_wdt_settimeout,
-};
-
-static struct watchdog_device atlas7_wdd = {
- .info = &atlas7_wdt_ident,
- .ops = &atlas7_wdt_ops,
- .timeout = ATLAS7_WDT_DEFAULT_TIMEOUT,
-};
-
-static const struct of_device_id atlas7_wdt_ids[] = {
- { .compatible = "sirf,atlas7-tick"},
- {}
-};
-
-static void atlas7_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int atlas7_wdt_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct atlas7_wdog *wdt;
- struct clk *clk;
- int ret;
-
- wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
- return -ENOMEM;
- wdt->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(wdt->base))
- return PTR_ERR(wdt->base);
-
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "clk enable failed\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, atlas7_clk_disable_unprepare, clk);
- if (ret)
- return ret;
-
- /* disable watchdog hardware */
- writel(0, wdt->base + ATLAS7_WDT_CNT_CTRL);
-
- wdt->tick_rate = clk_get_rate(clk);
- if (!wdt->tick_rate)
- return -EINVAL;
-
- wdt->clk = clk;
- atlas7_wdd.min_timeout = 1;
- atlas7_wdd.max_timeout = UINT_MAX / wdt->tick_rate;
-
- watchdog_init_timeout(&atlas7_wdd, 0, dev);
- watchdog_set_nowayout(&atlas7_wdd, nowayout);
-
- watchdog_set_drvdata(&atlas7_wdd, wdt);
- platform_set_drvdata(pdev, &atlas7_wdd);
-
- watchdog_stop_on_reboot(&atlas7_wdd);
- watchdog_stop_on_unregister(&atlas7_wdd);
- return devm_watchdog_register_device(dev, &atlas7_wdd);
-}
-
-static int __maybe_unused atlas7_wdt_suspend(struct device *dev)
-{
- /*
- * NOTE:timer controller registers settings are saved
- * and restored back by the timer-atlas7.c
- */
- return 0;
-}
-
-static int __maybe_unused atlas7_wdt_resume(struct device *dev)
-{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
-
- /*
- * NOTE: Since timer controller registers settings are saved
- * and restored back by the timer-atlas7.c, so we need not
- * update WD settings except refreshing timeout.
- */
- atlas7_wdt_ping(wdd);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(atlas7_wdt_pm_ops,
- atlas7_wdt_suspend, atlas7_wdt_resume);
-
-MODULE_DEVICE_TABLE(of, atlas7_wdt_ids);
-
-static struct platform_driver atlas7_wdt_driver = {
- .driver = {
- .name = "atlas7-wdt",
- .pm = &atlas7_wdt_pm_ops,
- .of_match_table = atlas7_wdt_ids,
- },
- .probe = atlas7_wdt_probe,
-};
-module_platform_driver(atlas7_wdt_driver);
-
-MODULE_DESCRIPTION("CSRatlas7 watchdog driver");
-MODULE_AUTHOR("Guo Zeng <Guo.Zeng@csr.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:atlas7-wdt");
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
deleted file mode 100644
index 260c50b08483..000000000000
--- a/drivers/watchdog/coh901327_wdt.c
+++ /dev/null
@@ -1,408 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * coh901327_wdt.c
- *
- * Copyright (C) 2008-2009 ST-Ericsson AB
- * Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/types.h>
-#include <linux/watchdog.h>
-#include <linux/interrupt.h>
-#include <linux/pm.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#define DRV_NAME "WDOG COH 901 327"
-
-/*
- * COH 901 327 register definitions
- */
-
-/* WDOG_FEED Register 32bit (-/W) */
-#define U300_WDOG_FR 0x00
-#define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU
-/* WDOG_TIMEOUT Register 32bit (R/W) */
-#define U300_WDOG_TR 0x04
-#define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU
-/* WDOG_DISABLE1 Register 32bit (-/W) */
-#define U300_WDOG_D1R 0x08
-#define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU
-/* WDOG_DISABLE2 Register 32bit (R/W) */
-#define U300_WDOG_D2R 0x0C
-#define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU
-#define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU
-#define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U
-/* WDOG_STATUS Register 32bit (R/W) */
-#define U300_WDOG_SR 0x10
-#define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U
-#define U300_WDOG_SR_STATUS_NORMAL 0x0000U
-#define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U
-/* WDOG_COUNT Register 32bit (R/-) */
-#define U300_WDOG_CR 0x14
-#define U300_WDOG_CR_VALID_IND 0x8000U
-#define U300_WDOG_CR_VALID_STABLE 0x0000U
-#define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU
-/* WDOG_JTAGOVR Register 32bit (R/W) */
-#define U300_WDOG_JOR 0x18
-#define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U
-#define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U
-/* WDOG_RESTART Register 32bit (-/W) */
-#define U300_WDOG_RR 0x1C
-#define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU
-/* WDOG_IRQ_EVENT Register 32bit (R/W) */
-#define U300_WDOG_IER 0x20
-#define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U
-#define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U
-/* WDOG_IRQ_MASK Register 32bit (R/W) */
-#define U300_WDOG_IMR 0x24
-#define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U
-/* WDOG_IRQ_FORCE Register 32bit (R/W) */
-#define U300_WDOG_IFR 0x28
-#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
-
-/* Default timeout in seconds = 1 minute */
-#define U300_WDOG_DEFAULT_TIMEOUT 60
-
-static unsigned int margin;
-static int irq;
-static void __iomem *virtbase;
-static struct device *parent;
-
-static struct clk *clk;
-
-/*
- * Enabling and disabling functions.
- */
-static void coh901327_enable(u16 timeout)
-{
- u16 val;
- unsigned long freq;
- unsigned long delay_ns;
-
- /* Restart timer if it is disabled */
- val = readw(virtbase + U300_WDOG_D2R);
- if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
- writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
- virtbase + U300_WDOG_RR);
- /* Acknowledge any pending interrupt so it doesn't just fire off */
- writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
- virtbase + U300_WDOG_IER);
- /*
- * The interrupt is cleared in the 32 kHz clock domain.
- * Wait 3 32 kHz cycles for it to take effect
- */
- freq = clk_get_rate(clk);
- delay_ns = DIV_ROUND_UP(1000000000, freq); /* Freq to ns and round up */
- delay_ns = 3 * delay_ns; /* Wait 3 cycles */
- ndelay(delay_ns);
- /* Enable the watchdog interrupt */
- writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
- /* Activate the watchdog timer */
- writew(timeout, virtbase + U300_WDOG_TR);
- /* Start the watchdog timer */
- writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR);
- /*
- * Extra read so that this change propagate in the watchdog.
- */
- (void) readw(virtbase + U300_WDOG_CR);
- val = readw(virtbase + U300_WDOG_D2R);
- if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
- dev_err(parent,
- "%s(): watchdog not enabled! D2R value %04x\n",
- __func__, val);
-}
-
-static void coh901327_disable(void)
-{
- u16 val;
-
- /* Disable the watchdog interrupt if it is active */
- writew(0x0000U, virtbase + U300_WDOG_IMR);
- /* If the watchdog is currently enabled, attempt to disable it */
- val = readw(virtbase + U300_WDOG_D2R);
- if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) {
- writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER,
- virtbase + U300_WDOG_D1R);
- writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
- virtbase + U300_WDOG_D2R);
- /* Write this twice (else problems occur) */
- writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
- virtbase + U300_WDOG_D2R);
- }
- val = readw(virtbase + U300_WDOG_D2R);
- if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
- dev_err(parent,
- "%s(): watchdog not disabled! D2R value %04x\n",
- __func__, val);
-}
-
-static int coh901327_start(struct watchdog_device *wdt_dev)
-{
- coh901327_enable(wdt_dev->timeout * 100);
- return 0;
-}
-
-static int coh901327_stop(struct watchdog_device *wdt_dev)
-{
- coh901327_disable();
- return 0;
-}
-
-static int coh901327_ping(struct watchdog_device *wdd)
-{
- /* Feed the watchdog */
- writew(U300_WDOG_FR_FEED_RESTART_TIMER,
- virtbase + U300_WDOG_FR);
- return 0;
-}
-
-static int coh901327_settimeout(struct watchdog_device *wdt_dev,
- unsigned int time)
-{
- wdt_dev->timeout = time;
- /* Set new timeout value */
- writew(time * 100, virtbase + U300_WDOG_TR);
- /* Feed the dog */
- writew(U300_WDOG_FR_FEED_RESTART_TIMER,
- virtbase + U300_WDOG_FR);
- return 0;
-}
-
-static unsigned int coh901327_gettimeleft(struct watchdog_device *wdt_dev)
-{
- u16 val;
-
- /* Read repeatedly until the value is stable! */
- val = readw(virtbase + U300_WDOG_CR);
- while (val & U300_WDOG_CR_VALID_IND)
- val = readw(virtbase + U300_WDOG_CR);
- val &= U300_WDOG_CR_COUNT_VALUE_MASK;
- if (val != 0)
- val /= 100;
-
- return val;
-}
-
-/*
- * This interrupt occurs 10 ms before the watchdog WILL bark.
- */
-static irqreturn_t coh901327_interrupt(int irq, void *data)
-{
- u16 val;
-
- /*
- * Ack IRQ? If this occurs we're FUBAR anyway, so
- * just acknowledge, disable the interrupt and await the imminent end.
- * If you at some point need a host of callbacks to be called
- * when the system is about to watchdog-reset, add them here!
- *
- * NOTE: on future versions of this IP-block, it will be possible
- * to prevent a watchdog reset by feeding the watchdog at this
- * point.
- */
- val = readw(virtbase + U300_WDOG_IER);
- if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND)
- writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
- virtbase + U300_WDOG_IER);
- writew(0x0000U, virtbase + U300_WDOG_IMR);
- dev_crit(parent, "watchdog is barking!\n");
- return IRQ_HANDLED;
-}
-
-static const struct watchdog_info coh901327_ident = {
- .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = DRV_NAME,
-};
-
-static const struct watchdog_ops coh901327_ops = {
- .owner = THIS_MODULE,
- .start = coh901327_start,
- .stop = coh901327_stop,
- .ping = coh901327_ping,
- .set_timeout = coh901327_settimeout,
- .get_timeleft = coh901327_gettimeleft,
-};
-
-static struct watchdog_device coh901327_wdt = {
- .info = &coh901327_ident,
- .ops = &coh901327_ops,
- /*
- * Max timeout is 327 since the 10ms
- * timeout register is max
- * 0x7FFF = 327670ms ~= 327s.
- */
- .min_timeout = 1,
- .max_timeout = 327,
- .timeout = U300_WDOG_DEFAULT_TIMEOUT,
-};
-
-static int __init coh901327_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- u16 val;
-
- parent = dev;
-
- virtbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(virtbase))
- return PTR_ERR(virtbase);
-
- clk = clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "could not get clock\n");
- return ret;
- }
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "could not prepare and enable clock\n");
- goto out_no_clk_enable;
- }
-
- val = readw(virtbase + U300_WDOG_SR);
- switch (val) {
- case U300_WDOG_SR_STATUS_TIMED_OUT:
- dev_info(dev, "watchdog timed out since last chip reset!\n");
- coh901327_wdt.bootstatus |= WDIOF_CARDRESET;
- /* Status will be cleared below */
- break;
- case U300_WDOG_SR_STATUS_NORMAL:
- dev_info(dev, "in normal status, no timeouts have occurred.\n");
- break;
- default:
- dev_info(dev, "contains an illegal status code (%08x)\n", val);
- break;
- }
-
- val = readw(virtbase + U300_WDOG_D2R);
- switch (val) {
- case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
- dev_info(dev, "currently disabled.\n");
- break;
- case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
- dev_info(dev, "currently enabled! (disabling it now)\n");
- coh901327_disable();
- break;
- default:
- dev_err(dev, "contains an illegal enable/disable code (%08x)\n",
- val);
- break;
- }
-
- /* Reset the watchdog */
- writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
-
- irq = platform_get_irq(pdev, 0);
- if (request_irq(irq, coh901327_interrupt, 0,
- DRV_NAME " Bark", pdev)) {
- ret = -EIO;
- goto out_no_irq;
- }
-
- watchdog_init_timeout(&coh901327_wdt, margin, dev);
-
- coh901327_wdt.parent = dev;
- ret = watchdog_register_device(&coh901327_wdt);
- if (ret)
- goto out_no_wdog;
-
- dev_info(dev, "initialized. (timeout=%d sec)\n",
- coh901327_wdt.timeout);
- return 0;
-
-out_no_wdog:
- free_irq(irq, pdev);
-out_no_irq:
- clk_disable_unprepare(clk);
-out_no_clk_enable:
- clk_put(clk);
- return ret;
-}
-
-#ifdef CONFIG_PM
-
-static u16 wdogenablestore;
-static u16 irqmaskstore;
-
-static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
-{
- irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
- wdogenablestore = readw(virtbase + U300_WDOG_D2R);
- /* If watchdog is on, disable it here and now */
- if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
- coh901327_disable();
- return 0;
-}
-
-static int coh901327_resume(struct platform_device *pdev)
-{
- /* Restore the watchdog interrupt */
- writew(irqmaskstore, virtbase + U300_WDOG_IMR);
- if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) {
- /* Restart the watchdog timer */
- writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
- virtbase + U300_WDOG_RR);
- writew(U300_WDOG_FR_FEED_RESTART_TIMER,
- virtbase + U300_WDOG_FR);
- }
- return 0;
-}
-#else
-#define coh901327_suspend NULL
-#define coh901327_resume NULL
-#endif
-
-/*
- * Mistreating the watchdog is the only way to perform a software reset of the
- * system on EMP platforms. So we implement this and export a symbol for it.
- */
-void coh901327_watchdog_reset(void)
-{
- /* Enable even if on JTAG too */
- writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE,
- virtbase + U300_WDOG_JOR);
- /*
- * Timeout = 5s, we have to wait for the watchdog reset to
- * actually take place: the watchdog will be reloaded with the
- * default value immediately, so we HAVE to reboot and get back
- * into the kernel in 30s, or the device will reboot again!
- * The boot loader will typically deactivate the watchdog, so we
- * need time enough for the boot loader to get to the point of
- * deactivating the watchdog before it is shut down by it.
- *
- * NOTE: on future versions of the watchdog, this restriction is
- * gone: the watchdog will be reloaded with a default value (1 min)
- * instead of last value, and you can conveniently set the watchdog
- * timeout to 10ms (value = 1) without any problems.
- */
- coh901327_enable(500);
- /* Return and await doom */
-}
-
-static const struct of_device_id coh901327_dt_match[] = {
- { .compatible = "stericsson,coh901327" },
- {},
-};
-
-static struct platform_driver coh901327_driver = {
- .driver = {
- .name = "coh901327_wdog",
- .of_match_table = coh901327_dt_match,
- .suppress_bind_attrs = true,
- },
- .suspend = coh901327_suspend,
- .resume = coh901327_resume,
-};
-builtin_platform_driver_probe(coh901327_driver, coh901327_probe);
-
-/* not really modular, but ... */
-module_param(margin, uint, 0);
-MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index cbd1498ff015..22ddba3802ef 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -123,7 +123,7 @@ static int hpwdt_settimeout(struct watchdog_device *wdd, unsigned int val)
if (val <= wdd->pretimeout) {
dev_dbg(wdd->parent, "pretimeout < timeout. Setting to zero\n");
wdd->pretimeout = 0;
- pretimeout = 0;
+ pretimeout = false;
if (watchdog_active(wdd))
hpwdt_start(wdd);
}
@@ -336,13 +336,13 @@ static int hpwdt_init_one(struct pci_dev *dev,
watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
if (is_kdump_kernel()) {
- pretimeout = 0;
+ pretimeout = false;
kdumptimeout = 0;
}
if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
- pretimeout = 0;
+ pretimeout = false;
}
hpwdt_dev.pretimeout = pretimeout ? PRETIMEOUT_SEC : 0;
kdumptimeout = min(kdumptimeout, HPWDT_MAX_TIMER);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 1ae03b64ef8b..9b2173f765c8 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -154,6 +154,10 @@ static int mid_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdt_dev, mid);
+ mid->scu = devm_intel_scu_ipc_dev_get(dev);
+ if (!mid->scu)
+ return -EPROBE_DEFER;
+
ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
wdt_dev);
@@ -162,10 +166,6 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
- mid->scu = devm_intel_scu_ipc_dev_get(dev);
- if (!mid->scu)
- return -EPROBE_DEFER;
-
/*
* The firmware followed by U-Boot leaves the watchdog running
* with the default threshold which may vary. When we get here
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
deleted file mode 100644
index 804e35940983..000000000000
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ /dev/null
@@ -1,533 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
- * for Intel part #(s):
- * - AF82MP20 PCH
- *
- * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/sfi.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/intel-mid.h>
-
-#include "intel_scu_watchdog.h"
-
-/* Bounds number of times we will retry loading time count */
-/* This retry is a work around for a silicon bug. */
-#define MAX_RETRY 16
-
-#define IPC_SET_WATCHDOG_TIMER 0xF8
-
-static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
-module_param(timer_margin, int, 0);
-MODULE_PARM_DESC(timer_margin,
- "Watchdog timer margin"
- "Time between interrupt and resetting the system"
- "The range is from 1 to 160"
- "This is the time for all keep alives to arrive");
-
-static int timer_set = DEFAULT_TIME;
-module_param(timer_set, int, 0);
-MODULE_PARM_DESC(timer_set,
- "Default Watchdog timer setting"
- "Complete cycle time"
- "The range is from 1 to 170"
- "This is the time for all keep alives to arrive");
-
-/* After watchdog device is closed, check force_boot. If:
- * force_boot == 0, then force boot on next watchdog interrupt after close,
- * force_boot == 1, then force boot immediately when device is closed.
- */
-static int force_boot;
-module_param(force_boot, int, 0);
-MODULE_PARM_DESC(force_boot,
- "A value of 1 means that the driver will reboot"
- "the system immediately if the /dev/watchdog device is closed"
- "A value of 0 means that when /dev/watchdog device is closed"
- "the watchdog timer will be refreshed for one more interval"
- "of length: timer_set. At the end of this interval, the"
- "watchdog timer will reset the system."
- );
-
-/* there is only one device in the system now; this can be made into
- * an array in the future if we have more than one device */
-
-static struct intel_scu_watchdog_dev watchdog_device;
-
-/* Forces restart, if force_reboot is set */
-static void watchdog_fire(void)
-{
- if (force_boot) {
- pr_crit("Initiating system reboot\n");
- emergency_restart();
- pr_crit("Reboot didn't ?????\n");
- }
-
- else {
- pr_crit("Immediate Reboot Disabled\n");
- pr_crit("System will reset when watchdog timer times out!\n");
- }
-}
-
-static int check_timer_margin(int new_margin)
-{
- if ((new_margin < MIN_TIME_CYCLE) ||
- (new_margin > MAX_TIME - timer_set)) {
- pr_debug("value of new_margin %d is out of the range %d to %d\n",
- new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * IPC operations
- */
-static int watchdog_set_ipc(int soft_threshold, int threshold)
-{
- u32 *ipc_wbuf;
- u8 cbuf[16] = { '\0' };
- int ipc_ret = 0;
-
- ipc_wbuf = (u32 *)&cbuf;
- ipc_wbuf[0] = soft_threshold;
- ipc_wbuf[1] = threshold;
-
- ipc_ret = intel_scu_ipc_command(
- IPC_SET_WATCHDOG_TIMER,
- 0,
- ipc_wbuf,
- 2,
- NULL,
- 0);
-
- if (ipc_ret != 0)
- pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
-
- return ipc_ret;
-};
-
-/*
- * Intel_SCU operations
- */
-
-/* timer interrupt handler */
-static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
-{
- int int_status;
- int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
-
- pr_debug("irq, int_status: %x\n", int_status);
-
- if (int_status != 0)
- return IRQ_NONE;
-
- /* has the timer been started? If not, then this is spurious */
- if (watchdog_device.timer_started == 0) {
- pr_debug("spurious interrupt received\n");
- return IRQ_HANDLED;
- }
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* set the timer to the threshold */
- iowrite32(watchdog_device.threshold,
- watchdog_device.timer_load_count_addr);
-
- /* allow the timer to run */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- return IRQ_HANDLED;
-}
-
-static int intel_scu_keepalive(void)
-{
-
- /* read eoi register - clears interrupt */
- ioread32(watchdog_device.timer_clear_interrupt_addr);
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* set the timer to the soft_threshold */
- iowrite32(watchdog_device.soft_threshold,
- watchdog_device.timer_load_count_addr);
-
- /* allow the timer to run */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- return 0;
-}
-
-static int intel_scu_stop(void)
-{
- iowrite32(0, watchdog_device.timer_control_addr);
- return 0;
-}
-
-static int intel_scu_set_heartbeat(u32 t)
-{
- int ipc_ret;
- int retry_count;
- u32 soft_value;
- u32 hw_value;
-
- watchdog_device.timer_set = t;
- watchdog_device.threshold =
- timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
- watchdog_device.soft_threshold =
- (watchdog_device.timer_set - timer_margin)
- * watchdog_device.timer_tbl_ptr->freq_hz;
-
- pr_debug("set_heartbeat: timer freq is %d\n",
- watchdog_device.timer_tbl_ptr->freq_hz);
- pr_debug("set_heartbeat: timer_set is %x (hex)\n",
- watchdog_device.timer_set);
- pr_debug("set_heartbeat: timer_margin is %x (hex)\n", timer_margin);
- pr_debug("set_heartbeat: threshold is %x (hex)\n",
- watchdog_device.threshold);
- pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
- watchdog_device.soft_threshold);
-
- /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
- /* watchdog timing come out right. */
- watchdog_device.threshold =
- watchdog_device.threshold / FREQ_ADJUSTMENT;
- watchdog_device.soft_threshold =
- watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* send the threshold and soft_threshold via IPC to the processor */
- ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
- watchdog_device.threshold);
-
- if (ipc_ret != 0) {
- /* Make sure the watchdog timer is stopped */
- intel_scu_stop();
- return ipc_ret;
- }
-
- /* Soft Threshold set loop. Early versions of silicon did */
- /* not always set this count correctly. This loop checks */
- /* the value and retries if it was not set correctly. */
-
- retry_count = 0;
- soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
- do {
-
- /* Make sure timer is stopped */
- intel_scu_stop();
-
- if (MAX_RETRY < retry_count++) {
- /* Unable to set timer value */
- pr_err("Unable to set timer\n");
- return -ENODEV;
- }
-
- /* set the timer to the soft threshold */
- iowrite32(watchdog_device.soft_threshold,
- watchdog_device.timer_load_count_addr);
-
- /* read count value before starting timer */
- ioread32(watchdog_device.timer_load_count_addr);
-
- /* Start the timer */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- /* read the value the time loaded into its count reg */
- hw_value = ioread32(watchdog_device.timer_load_count_addr);
- hw_value = hw_value & 0xFFFF0000;
-
-
- } while (soft_value != hw_value);
-
- watchdog_device.timer_started = 1;
-
- return 0;
-}
-
-/*
- * /dev/watchdog handling
- */
-
-static int intel_scu_open(struct inode *inode, struct file *file)
-{
-
- /* Set flag to indicate that watchdog device is open */
- if (test_and_set_bit(0, &watchdog_device.driver_open))
- return -EBUSY;
-
- /* Check for reopen of driver. Reopens are not allowed */
- if (watchdog_device.driver_closed)
- return -EPERM;
-
- return stream_open(inode, file);
-}
-
-static int intel_scu_release(struct inode *inode, struct file *file)
-{
- /*
- * This watchdog should not be closed, after the timer
- * is started with the WDIPC_SETTIMEOUT ioctl
- * If force_boot is set watchdog_fire() will cause an
- * immediate reset. If force_boot is not set, the watchdog
- * timer is refreshed for one more interval. At the end
- * of that interval, the watchdog timer will reset the system.
- */
-
- if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
- pr_debug("intel_scu_release, without open\n");
- return -ENOTTY;
- }
-
- if (!watchdog_device.timer_started) {
- /* Just close, since timer has not been started */
- pr_debug("closed, without starting timer\n");
- return 0;
- }
-
- pr_crit("Unexpected close of /dev/watchdog!\n");
-
- /* Since the timer was started, prevent future reopens */
- watchdog_device.driver_closed = 1;
-
- /* Refresh the timer for one more interval */
- intel_scu_keepalive();
-
- /* Reboot system (if force_boot is set) */
- watchdog_fire();
-
- /* We should only reach this point if force_boot is not set */
- return 0;
-}
-
-static ssize_t intel_scu_write(struct file *file,
- char const *data,
- size_t len,
- loff_t *ppos)
-{
-
- if (watchdog_device.timer_started)
- /* Watchdog already started, keep it alive */
- intel_scu_keepalive();
- else
- /* Start watchdog with timer value set by init */
- intel_scu_set_heartbeat(watchdog_device.timer_set);
-
- return len;
-}
-
-static long intel_scu_ioctl(struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- u32 __user *p = argp;
- u32 new_margin;
-
-
- static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT
- | WDIOF_KEEPALIVEPING,
- .firmware_version = 0, /* @todo Get from SCU via
- ipc_get_scu_fw_version()? */
- .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp,
- &ident,
- sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- intel_scu_keepalive();
-
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, p))
- return -EFAULT;
-
- if (check_timer_margin(new_margin))
- return -EINVAL;
-
- if (intel_scu_set_heartbeat(new_margin))
- return -EINVAL;
- return 0;
- case WDIOC_GETTIMEOUT:
- return put_user(watchdog_device.soft_threshold, p);
-
- default:
- return -ENOTTY;
- }
-}
-
-/*
- * Notifier for system down
- */
-static int intel_scu_notify_sys(struct notifier_block *this,
- unsigned long code,
- void *another_unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- /* Turn off the watchdog timer. */
- intel_scu_stop();
- return NOTIFY_DONE;
-}
-
-/*
- * Kernel Interfaces
- */
-static const struct file_operations intel_scu_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = intel_scu_write,
- .unlocked_ioctl = intel_scu_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = intel_scu_open,
- .release = intel_scu_release,
-};
-
-static int __init intel_scu_watchdog_init(void)
-{
- int ret;
- u32 __iomem *tmp_addr;
-
- /*
- * We don't really need to check this as the SFI timer get will fail
- * but if we do so we can exit with a clearer reason and no noise.
- *
- * If it isn't an intel MID device then it doesn't have this watchdog
- */
- if (!intel_mid_identify_cpu())
- return -ENODEV;
-
- /* Check boot parameters to verify that their initial values */
- /* are in range. */
- /* Check value of timer_set boot parameter */
- if ((timer_set < MIN_TIME_CYCLE) ||
- (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
- pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n",
- timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
- return -EINVAL;
- }
-
- /* Check value of timer_margin boot parameter */
- if (check_timer_margin(timer_margin))
- return -EINVAL;
-
- watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
-
- if (watchdog_device.timer_tbl_ptr == NULL) {
- pr_debug("timer is not available\n");
- return -ENODEV;
- }
- /* make sure the timer exists */
- if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
- pr_debug("timer %d does not have valid physical memory\n",
- sfi_mtimer_num);
- return -ENODEV;
- }
-
- if (watchdog_device.timer_tbl_ptr->irq == 0) {
- pr_debug("timer %d invalid irq\n", sfi_mtimer_num);
- return -ENODEV;
- }
-
- tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr,
- 20);
-
- if (tmp_addr == NULL) {
- pr_debug("timer unable to ioremap\n");
- return -ENOMEM;
- }
-
- watchdog_device.timer_load_count_addr = tmp_addr++;
- watchdog_device.timer_current_value_addr = tmp_addr++;
- watchdog_device.timer_control_addr = tmp_addr++;
- watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
- watchdog_device.timer_interrupt_status_addr = tmp_addr++;
-
- /* Set the default time values in device structure */
-
- watchdog_device.timer_set = timer_set;
- watchdog_device.threshold =
- timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
- watchdog_device.soft_threshold =
- (watchdog_device.timer_set - timer_margin)
- * watchdog_device.timer_tbl_ptr->freq_hz;
-
-
- watchdog_device.intel_scu_notifier.notifier_call =
- intel_scu_notify_sys;
-
- ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
- if (ret) {
- pr_err("cannot register notifier %d)\n", ret);
- goto register_reboot_error;
- }
-
- watchdog_device.miscdev.minor = WATCHDOG_MINOR;
- watchdog_device.miscdev.name = "watchdog";
- watchdog_device.miscdev.fops = &intel_scu_fops;
-
- ret = misc_register(&watchdog_device.miscdev);
- if (ret) {
- pr_err("cannot register miscdev %d err =%d\n",
- WATCHDOG_MINOR, ret);
- goto misc_register_error;
- }
-
- ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
- watchdog_timer_interrupt,
- IRQF_SHARED, "watchdog",
- &watchdog_device.timer_load_count_addr);
- if (ret) {
- pr_err("error requesting irq %d\n", ret);
- goto request_irq_error;
- }
- /* Make sure timer is disabled before returning */
- intel_scu_stop();
- return 0;
-
-/* error cleanup */
-
-request_irq_error:
- misc_deregister(&watchdog_device.miscdev);
-misc_register_error:
- unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
-register_reboot_error:
- intel_scu_stop();
- iounmap(watchdog_device.timer_load_count_addr);
- return ret;
-}
-late_initcall(intel_scu_watchdog_init);
diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h
deleted file mode 100644
index fb12a25ee417..000000000000
--- a/drivers/watchdog/intel_scu_watchdog.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
- * for Intel part #(s):
- * - AF82MP20 PCH
- *
- * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
- */
-
-#ifndef __INTEL_SCU_WATCHDOG_H
-#define __INTEL_SCU_WATCHDOG_H
-
-#define WDT_VER "0.3"
-
-/* minimum time between interrupts */
-#define MIN_TIME_CYCLE 1
-
-/* Time from warning to reboot is 2 seconds */
-#define DEFAULT_SOFT_TO_HARD_MARGIN 2
-
-#define MAX_TIME 170
-
-#define DEFAULT_TIME 5
-
-#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
-
-/* Ajustment to clock tick frequency to make timing come out right */
-#define FREQ_ADJUSTMENT 8
-
-struct intel_scu_watchdog_dev {
- ulong driver_open;
- ulong driver_closed;
- u32 timer_started;
- u32 timer_set;
- u32 threshold;
- u32 soft_threshold;
- u32 __iomem *timer_load_count_addr;
- u32 __iomem *timer_current_value_addr;
- u32 __iomem *timer_control_addr;
- u32 __iomem *timer_clear_interrupt_addr;
- u32 __iomem *timer_interrupt_status_addr;
- struct sfi_timer_table_entry *timer_tbl_ptr;
- struct notifier_block intel_scu_notifier;
- struct miscdevice miscdev;
-};
-
-extern int sfi_mtimer_num;
-
-/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
-#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 9b89d2f09568..3ce6a58bd81e 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -31,7 +31,6 @@
#include <linux/io.h>
#include <linux/ioport.h>
-#define DEBUG
#define NAME "it8712f_wdt"
MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>");
diff --git a/drivers/watchdog/keembay_wdt.c b/drivers/watchdog/keembay_wdt.c
new file mode 100644
index 000000000000..547d3fea33ff
--- /dev/null
+++ b/drivers/watchdog/keembay_wdt.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Watchdog driver for Intel Keem Bay non-secure watchdog.
+ *
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+/* Non-secure watchdog register offsets */
+#define TIM_WATCHDOG 0x0
+#define TIM_WATCHDOG_INT_THRES 0x4
+#define TIM_WDOG_EN 0x8
+#define TIM_SAFE 0xc
+
+#define WDT_ISR_MASK GENMASK(9, 8)
+#define WDT_ISR_CLEAR 0x8200ff18
+#define WDT_UNLOCK 0xf1d0dead
+#define WDT_LOAD_MAX U32_MAX
+#define WDT_LOAD_MIN 1
+#define WDT_TIMEOUT 5
+
+static unsigned int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout period in seconds (default = "
+ __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = "
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct keembay_wdt {
+ struct watchdog_device wdd;
+ struct clk *clk;
+ unsigned int rate;
+ int to_irq;
+ int th_irq;
+ void __iomem *base;
+};
+
+static inline u32 keembay_wdt_readl(struct keembay_wdt *wdt, u32 offset)
+{
+ return readl(wdt->base + offset);
+}
+
+static inline void keembay_wdt_writel(struct keembay_wdt *wdt, u32 offset, u32 val)
+{
+ writel(WDT_UNLOCK, wdt->base + TIM_SAFE);
+ writel(val, wdt->base + offset);
+}
+
+static void keembay_wdt_set_timeout_reg(struct watchdog_device *wdog)
+{
+ struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ keembay_wdt_writel(wdt, TIM_WATCHDOG, wdog->timeout * wdt->rate);
+}
+
+static void keembay_wdt_set_pretimeout_reg(struct watchdog_device *wdog)
+{
+ struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+ u32 th_val = 0;
+
+ if (wdog->pretimeout)
+ th_val = wdog->timeout - wdog->pretimeout;
+
+ keembay_wdt_writel(wdt, TIM_WATCHDOG_INT_THRES, th_val * wdt->rate);
+}
+
+static int keembay_wdt_start(struct watchdog_device *wdog)
+{
+ struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ keembay_wdt_set_timeout_reg(wdog);
+ keembay_wdt_writel(wdt, TIM_WDOG_EN, 1);
+
+ return 0;
+}
+
+static int keembay_wdt_stop(struct watchdog_device *wdog)
+{
+ struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ keembay_wdt_writel(wdt, TIM_WDOG_EN, 0);
+
+ return 0;
+}
+
+static int keembay_wdt_ping(struct watchdog_device *wdog)
+{
+ keembay_wdt_set_timeout_reg(wdog);
+
+ return 0;
+}
+
+static int keembay_wdt_set_timeout(struct watchdog_device *wdog, u32 t)
+{
+ wdog->timeout = t;
+ keembay_wdt_set_timeout_reg(wdog);
+
+ return 0;
+}
+
+static int keembay_wdt_set_pretimeout(struct watchdog_device *wdog, u32 t)
+{
+ if (t > wdog->timeout)
+ return -EINVAL;
+
+ wdog->pretimeout = t;
+ keembay_wdt_set_pretimeout_reg(wdog);
+
+ return 0;
+}
+
+static unsigned int keembay_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ return keembay_wdt_readl(wdt, TIM_WATCHDOG) / wdt->rate;
+}
+
+/*
+ * SMC call is used to clear the interrupt bits, because the TIM_GEN_CONFIG
+ * register is in the secure bank.
+ */
+static irqreturn_t keembay_wdt_to_isr(int irq, void *dev_id)
+{
+ struct keembay_wdt *wdt = dev_id;
+ struct arm_smccc_res res;
+
+ keembay_wdt_writel(wdt, TIM_WATCHDOG, 1);
+ arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
+ dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt timeout.\n");
+ emergency_restart();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t keembay_wdt_th_isr(int irq, void *dev_id)
+{
+ struct keembay_wdt *wdt = dev_id;
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
+ dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt pre-timeout.\n");
+ watchdog_notify_pretimeout(&wdt->wdd);
+
+ return IRQ_HANDLED;
+}
+
+static const struct watchdog_info keembay_wdt_info = {
+ .identity = "Intel Keem Bay Watchdog Timer",
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_PRETIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops keembay_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = keembay_wdt_start,
+ .stop = keembay_wdt_stop,
+ .ping = keembay_wdt_ping,
+ .set_timeout = keembay_wdt_set_timeout,
+ .set_pretimeout = keembay_wdt_set_pretimeout,
+ .get_timeleft = keembay_wdt_get_timeleft,
+};
+
+static int keembay_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keembay_wdt *wdt;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ /* we do not need to enable the clock as it is enabled by default */
+ wdt->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(wdt->clk))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk), "Failed to get clock\n");
+
+ wdt->rate = clk_get_rate(wdt->clk);
+ if (!wdt->rate)
+ return dev_err_probe(dev, -EINVAL, "Failed to get clock rate\n");
+
+ wdt->th_irq = platform_get_irq_byname(pdev, "threshold");
+ if (wdt->th_irq < 0)
+ return dev_err_probe(dev, wdt->th_irq, "Failed to get IRQ for threshold\n");
+
+ ret = devm_request_irq(dev, wdt->th_irq, keembay_wdt_th_isr, 0,
+ "keembay-wdt", wdt);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ for threshold\n");
+
+ wdt->to_irq = platform_get_irq_byname(pdev, "timeout");
+ if (wdt->to_irq < 0)
+ return dev_err_probe(dev, wdt->to_irq, "Failed to get IRQ for timeout\n");
+
+ ret = devm_request_irq(dev, wdt->to_irq, keembay_wdt_to_isr, 0,
+ "keembay-wdt", wdt);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ for timeout\n");
+
+ wdt->wdd.parent = dev;
+ wdt->wdd.info = &keembay_wdt_info;
+ wdt->wdd.ops = &keembay_wdt_ops;
+ wdt->wdd.min_timeout = WDT_LOAD_MIN;
+ wdt->wdd.max_timeout = WDT_LOAD_MAX / wdt->rate;
+ wdt->wdd.timeout = WDT_TIMEOUT;
+
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
+ watchdog_init_timeout(&wdt->wdd, timeout, dev);
+ keembay_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register watchdog device.\n");
+
+ platform_set_drvdata(pdev, wdt);
+ dev_info(dev, "Initial timeout %d sec%s.\n",
+ wdt->wdd.timeout, nowayout ? ", nowayout" : "");
+
+ return 0;
+}
+
+static int __maybe_unused keembay_wdt_suspend(struct device *dev)
+{
+ struct keembay_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return keembay_wdt_stop(&wdt->wdd);
+
+ return 0;
+}
+
+static int __maybe_unused keembay_wdt_resume(struct device *dev)
+{
+ struct keembay_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return keembay_wdt_start(&wdt->wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(keembay_wdt_pm_ops, keembay_wdt_suspend,
+ keembay_wdt_resume);
+
+static const struct of_device_id keembay_wdt_match[] = {
+ { .compatible = "intel,keembay-wdt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, keembay_wdt_match);
+
+static struct platform_driver keembay_wdt_driver = {
+ .probe = keembay_wdt_probe,
+ .driver = {
+ .name = "keembay_wdt",
+ .of_match_table = keembay_wdt_match,
+ .pm = &keembay_wdt_pm_ops,
+ },
+};
+
+module_platform_driver(keembay_wdt_driver);
+
+MODULE_DESCRIPTION("Intel Keem Bay SoC watchdog driver");
+MODULE_AUTHOR("Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 5391bf3e6b11..e023d7d90d66 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_stop_on_reboot(&wdt->wdd);
+ watchdog_stop_on_unregister(&wdt->wdd);
ret = watchdog_register_device(&wdt->wdd);
if (ret)
@@ -619,7 +620,7 @@ err_out:
return ret;
}
-static int mei_wdt_remove(struct mei_cl_device *cldev)
+static void mei_wdt_remove(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
@@ -636,8 +637,6 @@ static int mei_wdt_remove(struct mei_cl_device *cldev)
dbgfs_unregister(wdt);
kfree(wdt);
-
- return 0;
}
#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index d6a6393f609d..97ca993bd009 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -11,6 +11,7 @@
#include <dt-bindings/reset-controller/mt2712-resets.h>
#include <dt-bindings/reset-controller/mt8183-resets.h>
+#include <dt-bindings/reset-controller/mt8192-resets.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -76,6 +77,10 @@ static const struct mtk_wdt_data mt8183_data = {
.toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
};
+static const struct mtk_wdt_data mt8192_data = {
+ .toprgu_sw_rst_num = MT8192_TOPRGU_SW_RST_NUM,
+};
+
static int toprgu_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
@@ -195,6 +200,19 @@ static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
return 0;
}
+static void mtk_wdt_init(struct watchdog_device *wdt_dev)
+{
+ struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+ void __iomem *wdt_base;
+
+ wdt_base = mtk_wdt->wdt_base;
+
+ if (readl(wdt_base + WDT_MODE) & WDT_MODE_EN) {
+ set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
+ mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+ }
+}
+
static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
{
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
@@ -264,7 +282,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt->wdt_dev.info = &mtk_wdt_info;
mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
- mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
+ mtk_wdt->wdt_dev.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT * 1000;
mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
mtk_wdt->wdt_dev.parent = dev;
@@ -274,7 +292,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt);
- mtk_wdt_stop(&mtk_wdt->wdt_dev);
+ mtk_wdt_init(&mtk_wdt->wdt_dev);
watchdog_stop_on_reboot(&mtk_wdt->wdt_dev);
err = devm_watchdog_register_device(dev, &mtk_wdt->wdt_dev);
@@ -322,6 +340,7 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
{ .compatible = "mediatek,mt6589-wdt" },
{ .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
+ { .compatible = "mediatek,mt8192-wdt", .data = &mt8192_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index e86fa7f8351d..a793b03a785d 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -951,14 +951,11 @@ error_request_region:
return ret;
}
-static int pcwd_isa_remove(struct device *dev, unsigned int id)
+static void pcwd_isa_remove(struct device *dev, unsigned int id)
{
if (debug >= DEBUG)
pr_debug("pcwd_isa_remove id=%d\n", id);
- if (!pcwd_private.io_addr)
- return 1;
-
/* Disable the board */
if (!nowayout)
pcwd_stop();
@@ -971,8 +968,6 @@ static int pcwd_isa_remove(struct device *dev, unsigned int id)
(pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4);
pcwd_private.io_addr = 0x0000;
cards_found--;
-
- return 0;
}
static void pcwd_isa_shutdown(struct device *dev, unsigned int id)
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 7cf0f2ec649b..e38a87ffe5f5 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -22,7 +22,6 @@ enum wdt_reg {
};
#define QCOM_WDT_ENABLE BIT(0)
-#define QCOM_WDT_ENABLE_IRQ BIT(1)
static const u32 reg_offset_data_apcs_tmr[] = {
[WDT_RST] = 0x38,
@@ -63,16 +62,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
return container_of(wdd, struct qcom_wdt, wdd);
}
-static inline int qcom_get_enable(struct watchdog_device *wdd)
-{
- int enable = QCOM_WDT_ENABLE;
-
- if (wdd->pretimeout)
- enable |= QCOM_WDT_ENABLE_IRQ;
-
- return enable;
-}
-
static irqreturn_t qcom_wdt_isr(int irq, void *arg)
{
struct watchdog_device *wdd = arg;
@@ -91,7 +80,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
writel(1, wdt_addr(wdt, WDT_RST));
writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
- writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
+ writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
return 0;
}
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 47fce4de0110..5791198960e6 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -50,6 +51,7 @@ struct rwdt_priv {
struct watchdog_device wdev;
unsigned long clk_rate;
u8 cks;
+ struct clk *clk;
};
static void rwdt_write(struct rwdt_priv *priv, u32 val, unsigned int reg)
@@ -125,13 +127,33 @@ static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev)
return DIV_BY_CLKS_PER_SEC(priv, 65536 - val);
}
+/* needs to be atomic - no RPM, no usleep_range, no scheduling! */
static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
void *data)
{
struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+ u8 val;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Stop the timer before we modify any register */
+ val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME;
+ rwdt_write(priv, val, RWTCSRA);
+ /* Delay 2 cycles before setting watchdog counter */
+ udelay(DIV_ROUND_UP(2 * 1000000, priv->clk_rate));
- rwdt_start(wdev);
rwdt_write(priv, 0xffff, RWTCNT);
+ /* smallest divider to reboot soon */
+ rwdt_write(priv, 0, RWTCSRA);
+
+ readb_poll_timeout_atomic(priv->base + RWTCSRA, val,
+ !(val & RWTCSRA_WRFLG), 1, 100);
+
+ rwdt_write(priv, RWTCSRA_TME, RWTCSRA);
+
+ /* wait 2 cycles, so watchdog will trigger */
+ udelay(DIV_ROUND_UP(2 * 1000000, priv->clk_rate));
+
return 0;
}
@@ -191,7 +213,6 @@ static int rwdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rwdt_priv *priv;
- struct clk *clk;
unsigned long clks_per_sec;
int ret, i;
u8 csra;
@@ -207,13 +228,13 @@ static int rwdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- priv->clk_rate = clk_get_rate(clk);
+ priv->clk_rate = clk_get_rate(priv->clk);
csra = readb_relaxed(priv->base + RWTCSRA);
priv->wdev.bootstatus = csra & RWTCSRA_WOVF ? WDIOF_CARDRESET : 0;
pm_runtime_put(dev);
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
deleted file mode 100644
index 734cf2966ecb..000000000000
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ /dev/null
@@ -1,216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Watchdog driver for CSR SiRFprimaII and SiRFatlasVI
- *
- * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/watchdog.h>
-#include <linux/platform_device.h>
-#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#define CLOCK_FREQ 1000000
-
-#define SIRFSOC_TIMER_COUNTER_LO 0x0000
-#define SIRFSOC_TIMER_MATCH_0 0x0008
-#define SIRFSOC_TIMER_INT_EN 0x0024
-#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
-#define SIRFSOC_TIMER_LATCH 0x0030
-#define SIRFSOC_TIMER_LATCHED_LO 0x0034
-
-#define SIRFSOC_TIMER_WDT_INDEX 5
-
-#define SIRFSOC_WDT_MIN_TIMEOUT 30 /* 30 secs */
-#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */
-#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */
-
-static unsigned int timeout;
-static bool nowayout = WATCHDOG_NOWAYOUT;
-
-module_param(timeout, uint, 0);
-module_param(nowayout, bool, 0);
-
-MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-static void __iomem *sirfsoc_wdt_base(struct watchdog_device *wdd)
-{
- return (void __iomem __force *)watchdog_get_drvdata(wdd);
-}
-
-static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd)
-{
- u32 counter, match;
- void __iomem *wdt_base;
- int time_left;
-
- wdt_base = sirfsoc_wdt_base(wdd);
- counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO);
- match = readl(wdt_base +
- SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
-
- time_left = match - counter;
-
- return time_left / CLOCK_FREQ;
-}
-
-static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
-{
- u32 counter, timeout_ticks;
- void __iomem *wdt_base;
-
- timeout_ticks = wdd->timeout * CLOCK_FREQ;
- wdt_base = sirfsoc_wdt_base(wdd);
-
- /* Enable the latch before reading the LATCH_LO register */
- writel(1, wdt_base + SIRFSOC_TIMER_LATCH);
-
- /* Set the TO value */
- counter = readl(wdt_base + SIRFSOC_TIMER_LATCHED_LO);
-
- counter += timeout_ticks;
-
- writel(counter, wdt_base +
- SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
-
- return 0;
-}
-
-static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
-{
- void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
- sirfsoc_wdt_updatetimeout(wdd);
-
- /*
- * NOTE: If interrupt is not enabled
- * then WD-Reset doesn't get generated at all.
- */
- writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
- | (1 << SIRFSOC_TIMER_WDT_INDEX),
- wdt_base + SIRFSOC_TIMER_INT_EN);
- writel(1, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
-
- return 0;
-}
-
-static int sirfsoc_wdt_disable(struct watchdog_device *wdd)
-{
- void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
-
- writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
- writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
- & (~(1 << SIRFSOC_TIMER_WDT_INDEX)),
- wdt_base + SIRFSOC_TIMER_INT_EN);
-
- return 0;
-}
-
-static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
-{
- wdd->timeout = to;
- sirfsoc_wdt_updatetimeout(wdd);
-
- return 0;
-}
-
-#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
-
-static const struct watchdog_info sirfsoc_wdt_ident = {
- .options = OPTIONS,
- .firmware_version = 0,
- .identity = "SiRFSOC Watchdog",
-};
-
-static const struct watchdog_ops sirfsoc_wdt_ops = {
- .owner = THIS_MODULE,
- .start = sirfsoc_wdt_enable,
- .stop = sirfsoc_wdt_disable,
- .get_timeleft = sirfsoc_wdt_gettimeleft,
- .ping = sirfsoc_wdt_updatetimeout,
- .set_timeout = sirfsoc_wdt_settimeout,
-};
-
-static struct watchdog_device sirfsoc_wdd = {
- .info = &sirfsoc_wdt_ident,
- .ops = &sirfsoc_wdt_ops,
- .timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT,
- .min_timeout = SIRFSOC_WDT_MIN_TIMEOUT,
- .max_timeout = SIRFSOC_WDT_MAX_TIMEOUT,
-};
-
-static int sirfsoc_wdt_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- void __iomem *base;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base);
-
- watchdog_init_timeout(&sirfsoc_wdd, timeout, dev);
- watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
- sirfsoc_wdd.parent = dev;
-
- watchdog_stop_on_reboot(&sirfsoc_wdd);
- watchdog_stop_on_unregister(&sirfsoc_wdd);
- ret = devm_watchdog_register_device(dev, &sirfsoc_wdd);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, &sirfsoc_wdd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_wdt_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int sirfsoc_wdt_resume(struct device *dev)
-{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
-
- /*
- * NOTE: Since timer controller registers settings are saved
- * and restored back by the timer-prima2.c, so we need not
- * update WD settings except refreshing timeout.
- */
- sirfsoc_wdt_updatetimeout(wdd);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(sirfsoc_wdt_pm_ops,
- sirfsoc_wdt_suspend, sirfsoc_wdt_resume);
-
-static const struct of_device_id sirfsoc_wdt_of_match[] = {
- { .compatible = "sirf,prima2-tick"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match);
-
-static struct platform_driver sirfsoc_wdt_driver = {
- .driver = {
- .name = "sirfsoc-wdt",
- .pm = &sirfsoc_wdt_pm_ops,
- .of_match_table = sirfsoc_wdt_of_match,
- },
- .probe = sirfsoc_wdt_probe,
-};
-module_platform_driver(sirfsoc_wdt_driver);
-
-MODULE_DESCRIPTION("SiRF SoC watchdog driver");
-MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sirfsoc-wdt");
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 958dc32a708f..58a00e1ab23b 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -305,14 +305,12 @@ err:
return ret;
}
-static int sp805_wdt_remove(struct amba_device *adev)
+static void sp805_wdt_remove(struct amba_device *adev)
{
struct sp805_wdt *wdt = amba_get_drvdata(adev);
watchdog_unregister_device(&wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, NULL);
-
- return 0;
}
static int __maybe_unused sp805_wdt_suspend(struct device *dev)
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
deleted file mode 100644
index 1afb0e9d808c..000000000000
--- a/drivers/watchdog/tangox_wdt.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
- * SMP86xx/SMP87xx Watchdog driver
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-
-#define DEFAULT_TIMEOUT 30
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-static unsigned int timeout;
-module_param(timeout, int, 0);
-MODULE_PARM_DESC(timeout, "Watchdog timeout");
-
-/*
- * Counter counts down from programmed value. Reset asserts when
- * the counter reaches 1.
- */
-#define WD_COUNTER 0
-
-#define WD_CONFIG 4
-#define WD_CONFIG_XTAL_IN BIT(0)
-#define WD_CONFIG_DISABLE BIT(31)
-
-struct tangox_wdt_device {
- struct watchdog_device wdt;
- void __iomem *base;
- unsigned long clk_rate;
- struct clk *clk;
-};
-
-static int tangox_wdt_set_timeout(struct watchdog_device *wdt,
- unsigned int new_timeout)
-{
- wdt->timeout = new_timeout;
-
- return 0;
-}
-
-static int tangox_wdt_start(struct watchdog_device *wdt)
-{
- struct tangox_wdt_device *dev = watchdog_get_drvdata(wdt);
- u32 ticks;
-
- ticks = 1 + wdt->timeout * dev->clk_rate;
- writel(ticks, dev->base + WD_COUNTER);
-
- return 0;
-}
-
-static int tangox_wdt_stop(struct watchdog_device *wdt)
-{
- struct tangox_wdt_device *dev = watchdog_get_drvdata(wdt);
-
- writel(0, dev->base + WD_COUNTER);
-
- return 0;
-}
-
-static unsigned int tangox_wdt_get_timeleft(struct watchdog_device *wdt)
-{
- struct tangox_wdt_device *dev = watchdog_get_drvdata(wdt);
- u32 count;
-
- count = readl(dev->base + WD_COUNTER);
-
- if (!count)
- return 0;
-
- return (count - 1) / dev->clk_rate;
-}
-
-static const struct watchdog_info tangox_wdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
- .identity = "tangox watchdog",
-};
-
-static int tangox_wdt_restart(struct watchdog_device *wdt,
- unsigned long action, void *data)
-{
- struct tangox_wdt_device *dev = watchdog_get_drvdata(wdt);
-
- writel(1, dev->base + WD_COUNTER);
-
- return 0;
-}
-
-static const struct watchdog_ops tangox_wdt_ops = {
- .start = tangox_wdt_start,
- .stop = tangox_wdt_stop,
- .set_timeout = tangox_wdt_set_timeout,
- .get_timeleft = tangox_wdt_get_timeleft,
- .restart = tangox_wdt_restart,
-};
-
-static void tangox_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int tangox_wdt_probe(struct platform_device *pdev)
-{
- struct tangox_wdt_device *dev;
- u32 config;
- int err;
-
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
-
- err = clk_prepare_enable(dev->clk);
- if (err)
- return err;
- err = devm_add_action_or_reset(&pdev->dev,
- tangox_clk_disable_unprepare, dev->clk);
- if (err)
- return err;
-
- dev->clk_rate = clk_get_rate(dev->clk);
- if (!dev->clk_rate)
- return -EINVAL;
-
- dev->wdt.parent = &pdev->dev;
- dev->wdt.info = &tangox_wdt_info;
- dev->wdt.ops = &tangox_wdt_ops;
- dev->wdt.timeout = DEFAULT_TIMEOUT;
- dev->wdt.min_timeout = 1;
- dev->wdt.max_hw_heartbeat_ms = (U32_MAX - 1) / dev->clk_rate;
-
- watchdog_init_timeout(&dev->wdt, timeout, &pdev->dev);
- watchdog_set_nowayout(&dev->wdt, nowayout);
- watchdog_set_drvdata(&dev->wdt, dev);
-
- /*
- * Deactivate counter if disable bit is set to avoid
- * accidental reset.
- */
- config = readl(dev->base + WD_CONFIG);
- if (config & WD_CONFIG_DISABLE)
- writel(0, dev->base + WD_COUNTER);
-
- writel(WD_CONFIG_XTAL_IN, dev->base + WD_CONFIG);
-
- /*
- * Mark as active and restart with configured timeout if
- * already running.
- */
- if (readl(dev->base + WD_COUNTER)) {
- set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
- tangox_wdt_start(&dev->wdt);
- }
-
- watchdog_set_restart_priority(&dev->wdt, 128);
-
- watchdog_stop_on_unregister(&dev->wdt);
- err = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
- if (err)
- return err;
-
- platform_set_drvdata(pdev, dev);
-
- dev_info(&pdev->dev, "SMP86xx/SMP87xx watchdog registered\n");
-
- return 0;
-}
-
-static const struct of_device_id tangox_wdt_dt_ids[] = {
- { .compatible = "sigma,smp8642-wdt" },
- { .compatible = "sigma,smp8759-wdt" },
- { }
-};
-MODULE_DEVICE_TABLE(of, tangox_wdt_dt_ids);
-
-static struct platform_driver tangox_wdt_driver = {
- .probe = tangox_wdt_probe,
- .driver = {
- .name = "tangox-wdt",
- .of_match_table = tangox_wdt_dt_ids,
- },
-};
-
-module_platform_driver(tangox_wdt_driver);
-
-MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
-MODULE_DESCRIPTION("SMP86xx/SMP87xx Watchdog driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 0e9a99559609..5df0a22e2cb4 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -158,7 +158,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
wdd = container_of(nb, struct watchdog_device, reboot_nb);
if (code == SYS_DOWN || code == SYS_HALT) {
- if (watchdog_active(wdd)) {
+ if (watchdog_active(wdd) || watchdog_hw_running(wdd)) {
int ret;
ret = wdd->ops->stop(wdd);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index cab86a08456b..4297280807ca 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/watchdog.h>
#include <asm/unaligned.h>
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c
deleted file mode 100644
index bf183e73671a..000000000000
--- a/drivers/watchdog/zx2967_wdt.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * watchdog driver for ZTE's zx2967 family
- *
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/watchdog.h>
-
-#define ZX2967_WDT_CFG_REG 0x4
-#define ZX2967_WDT_LOAD_REG 0x8
-#define ZX2967_WDT_REFRESH_REG 0x18
-#define ZX2967_WDT_START_REG 0x1c
-
-#define ZX2967_WDT_REFRESH_MASK GENMASK(5, 0)
-
-#define ZX2967_WDT_CFG_DIV(n) ((((n) & 0xff) - 1) << 8)
-#define ZX2967_WDT_START_EN 0x1
-
-/*
- * Hardware magic number.
- * When watchdog reg is written, the lowest 16 bits are valid, but
- * the highest 16 bits should be always this number.
- */
-#define ZX2967_WDT_WRITEKEY (0x1234 << 16)
-#define ZX2967_WDT_VAL_MASK GENMASK(15, 0)
-
-#define ZX2967_WDT_DIV_DEFAULT 16
-#define ZX2967_WDT_DEFAULT_TIMEOUT 32
-#define ZX2967_WDT_MIN_TIMEOUT 1
-#define ZX2967_WDT_MAX_TIMEOUT 524
-#define ZX2967_WDT_MAX_COUNT 0xffff
-
-#define ZX2967_WDT_CLK_FREQ 0x8000
-
-#define ZX2967_WDT_FLAG_REBOOT_MON BIT(0)
-
-struct zx2967_wdt {
- struct watchdog_device wdt_device;
- void __iomem *reg_base;
- struct clk *clock;
-};
-
-static inline u32 zx2967_wdt_readl(struct zx2967_wdt *wdt, u16 reg)
-{
- return readl_relaxed(wdt->reg_base + reg);
-}
-
-static inline void zx2967_wdt_writel(struct zx2967_wdt *wdt, u16 reg, u32 val)
-{
- writel_relaxed(val | ZX2967_WDT_WRITEKEY, wdt->reg_base + reg);
-}
-
-static void zx2967_wdt_refresh(struct zx2967_wdt *wdt)
-{
- u32 val;
-
- val = zx2967_wdt_readl(wdt, ZX2967_WDT_REFRESH_REG);
- /*
- * Bit 4-5, 1 and 2: refresh config info
- * Bit 2-3, 1 and 2: refresh counter
- * Bit 0-1, 1 and 2: refresh int-value
- * we shift each group value between 1 and 2 to refresh all data.
- */
- val ^= ZX2967_WDT_REFRESH_MASK;
- zx2967_wdt_writel(wdt, ZX2967_WDT_REFRESH_REG,
- val & ZX2967_WDT_VAL_MASK);
-}
-
-static int
-zx2967_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
-{
- struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd);
- unsigned int divisor = ZX2967_WDT_DIV_DEFAULT;
- u32 count;
-
- count = timeout * ZX2967_WDT_CLK_FREQ;
- if (count > divisor * ZX2967_WDT_MAX_COUNT)
- divisor = DIV_ROUND_UP(count, ZX2967_WDT_MAX_COUNT);
- count = DIV_ROUND_UP(count, divisor);
- zx2967_wdt_writel(wdt, ZX2967_WDT_CFG_REG,
- ZX2967_WDT_CFG_DIV(divisor) & ZX2967_WDT_VAL_MASK);
- zx2967_wdt_writel(wdt, ZX2967_WDT_LOAD_REG,
- count & ZX2967_WDT_VAL_MASK);
- zx2967_wdt_refresh(wdt);
- wdd->timeout = (count * divisor) / ZX2967_WDT_CLK_FREQ;
-
- return 0;
-}
-
-static void __zx2967_wdt_start(struct zx2967_wdt *wdt)
-{
- u32 val;
-
- val = zx2967_wdt_readl(wdt, ZX2967_WDT_START_REG);
- val |= ZX2967_WDT_START_EN;
- zx2967_wdt_writel(wdt, ZX2967_WDT_START_REG,
- val & ZX2967_WDT_VAL_MASK);
-}
-
-static void __zx2967_wdt_stop(struct zx2967_wdt *wdt)
-{
- u32 val;
-
- val = zx2967_wdt_readl(wdt, ZX2967_WDT_START_REG);
- val &= ~ZX2967_WDT_START_EN;
- zx2967_wdt_writel(wdt, ZX2967_WDT_START_REG,
- val & ZX2967_WDT_VAL_MASK);
-}
-
-static int zx2967_wdt_start(struct watchdog_device *wdd)
-{
- struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd);
-
- zx2967_wdt_set_timeout(wdd, wdd->timeout);
- __zx2967_wdt_start(wdt);
-
- return 0;
-}
-
-static int zx2967_wdt_stop(struct watchdog_device *wdd)
-{
- struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd);
-
- __zx2967_wdt_stop(wdt);
-
- return 0;
-}
-
-static int zx2967_wdt_keepalive(struct watchdog_device *wdd)
-{
- struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd);
-
- zx2967_wdt_refresh(wdt);
-
- return 0;
-}
-
-#define ZX2967_WDT_OPTIONS \
- (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
-static const struct watchdog_info zx2967_wdt_ident = {
- .options = ZX2967_WDT_OPTIONS,
- .identity = "zx2967 watchdog",
-};
-
-static const struct watchdog_ops zx2967_wdt_ops = {
- .owner = THIS_MODULE,
- .start = zx2967_wdt_start,
- .stop = zx2967_wdt_stop,
- .ping = zx2967_wdt_keepalive,
- .set_timeout = zx2967_wdt_set_timeout,
-};
-
-static void zx2967_wdt_reset_sysctrl(struct device *dev)
-{
- int ret;
- void __iomem *regmap;
- unsigned int offset, mask, config;
- struct of_phandle_args out_args;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,wdt-reset-sysctrl", 3, 0, &out_args);
- if (ret)
- return;
-
- offset = out_args.args[0];
- config = out_args.args[1];
- mask = out_args.args[2];
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- of_node_put(out_args.np);
- return;
- }
-
- regmap_update_bits(regmap, offset, mask, config);
- of_node_put(out_args.np);
-}
-
-static void zx2967_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int zx2967_wdt_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct zx2967_wdt *wdt;
- int ret;
- struct reset_control *rstc;
-
- wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, wdt);
-
- wdt->wdt_device.info = &zx2967_wdt_ident;
- wdt->wdt_device.ops = &zx2967_wdt_ops;
- wdt->wdt_device.timeout = ZX2967_WDT_DEFAULT_TIMEOUT;
- wdt->wdt_device.max_timeout = ZX2967_WDT_MAX_TIMEOUT;
- wdt->wdt_device.min_timeout = ZX2967_WDT_MIN_TIMEOUT;
- wdt->wdt_device.parent = dev;
-
- wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(wdt->reg_base))
- return PTR_ERR(wdt->reg_base);
-
- zx2967_wdt_reset_sysctrl(dev);
-
- wdt->clock = devm_clk_get(dev, NULL);
- if (IS_ERR(wdt->clock)) {
- dev_err(dev, "failed to find watchdog clock source\n");
- return PTR_ERR(wdt->clock);
- }
-
- ret = clk_prepare_enable(wdt->clock);
- if (ret < 0) {
- dev_err(dev, "failed to enable clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, zx2967_clk_disable_unprepare,
- wdt->clock);
- if (ret)
- return ret;
- clk_set_rate(wdt->clock, ZX2967_WDT_CLK_FREQ);
-
- rstc = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(rstc)) {
- dev_err(dev, "failed to get rstc");
- return PTR_ERR(rstc);
- }
-
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
-
- watchdog_set_drvdata(&wdt->wdt_device, wdt);
- watchdog_init_timeout(&wdt->wdt_device,
- ZX2967_WDT_DEFAULT_TIMEOUT, dev);
- watchdog_set_nowayout(&wdt->wdt_device, WATCHDOG_NOWAYOUT);
-
- ret = devm_watchdog_register_device(dev, &wdt->wdt_device);
- if (ret)
- return ret;
-
- dev_info(dev, "watchdog enabled (timeout=%d sec, nowayout=%d)",
- wdt->wdt_device.timeout, WATCHDOG_NOWAYOUT);
-
- return 0;
-}
-
-static const struct of_device_id zx2967_wdt_match[] = {
- { .compatible = "zte,zx296718-wdt", },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx2967_wdt_match);
-
-static struct platform_driver zx2967_wdt_driver = {
- .probe = zx2967_wdt_probe,
- .driver = {
- .name = "zx2967-wdt",
- .of_match_table = of_match_ptr(zx2967_wdt_match),
- },
-};
-module_platform_driver(zx2967_wdt_driver);
-
-MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>");
-MODULE_DESCRIPTION("ZTE zx2967 Watchdog Device Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b57b2067ecbf..671c71245a7b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -331,7 +331,7 @@ static enum bp_state reserve_additional_memory(void)
mutex_unlock(&balloon_mutex);
/* add_memory_resource() requires the device_hotplug lock */
lock_device_hotplug();
- rc = add_memory_resource(nid, resource, MEMHP_MERGE_RESOURCE);
+ rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
unlock_device_hotplug();
mutex_lock(&balloon_mutex);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a8030332a191..adb7260e94b2 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -63,6 +63,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
+#include <xen/xenbus.h>
#include <asm/hw_irq.h>
#include "events_internal.h"
@@ -115,6 +116,7 @@ struct irq_info {
unsigned char flags;
uint16_t domid;
} pirq;
+ struct xenbus_device *interdomain;
} u;
};
@@ -313,11 +315,18 @@ static int xen_irq_info_common_setup(struct irq_info *info,
}
static int xen_irq_info_evtchn_setup(unsigned irq,
- evtchn_port_t evtchn)
+ evtchn_port_t evtchn,
+ struct xenbus_device *dev)
{
struct irq_info *info = info_for_irq(irq);
+ int ret;
+
+ ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
+ info->u.interdomain = dev;
+ if (dev)
+ atomic_inc(&dev->event_channels);
- return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
+ return ret;
}
static int xen_irq_info_ipi_setup(unsigned cpu,
@@ -561,18 +570,28 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
return;
if (spurious) {
+ struct xenbus_device *dev = info->u.interdomain;
+ unsigned int threshold = 1;
+
+ if (dev && dev->spurious_threshold)
+ threshold = dev->spurious_threshold;
+
if ((1 << info->spurious_cnt) < (HZ << 2)) {
if (info->spurious_cnt != 0xFF)
info->spurious_cnt++;
}
- if (info->spurious_cnt > 1) {
- delay = 1 << (info->spurious_cnt - 2);
+ if (info->spurious_cnt > threshold) {
+ delay = 1 << (info->spurious_cnt - 1 - threshold);
if (delay > HZ)
delay = HZ;
if (!info->eoi_time)
info->eoi_cpu = smp_processor_id();
info->eoi_time = get_jiffies_64() + delay;
+ if (dev)
+ atomic_add(delay, &dev->jiffies_eoi_delayed);
}
+ if (dev)
+ atomic_inc(&dev->spurious_events);
} else {
info->spurious_cnt = 0;
}
@@ -901,6 +920,7 @@ static void __unbind_from_irq(unsigned int irq)
if (VALID_EVTCHN(evtchn)) {
unsigned int cpu = cpu_from_irq(irq);
+ struct xenbus_device *dev;
xen_evtchn_close(evtchn);
@@ -911,6 +931,11 @@ static void __unbind_from_irq(unsigned int irq)
case IRQT_IPI:
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
break;
+ case IRQT_EVTCHN:
+ dev = info->u.interdomain;
+ if (dev)
+ atomic_dec(&dev->event_channels);
+ break;
default:
break;
}
@@ -1116,7 +1141,8 @@ int xen_pirq_from_irq(unsigned irq)
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
-static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
+static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
+ struct xenbus_device *dev)
{
int irq;
int ret;
@@ -1136,7 +1162,7 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
irq_set_chip_and_handler_name(irq, chip,
handle_edge_irq, "event");
- ret = xen_irq_info_evtchn_setup(irq, evtchn);
+ ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
if (ret < 0) {
__unbind_from_irq(irq);
irq = ret;
@@ -1163,7 +1189,7 @@ out:
int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
@@ -1212,27 +1238,27 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
-static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
+static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
evtchn_port_t remote_port,
struct irq_chip *chip)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
- bind_interdomain.remote_dom = remote_domain;
+ bind_interdomain.remote_dom = dev->otherend_id;
bind_interdomain.remote_port = remote_port;
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
- chip);
+ chip, dev);
}
-int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
evtchn_port_t remote_port)
{
- return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
&xen_lateeoi_chip);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
@@ -1345,7 +1371,7 @@ static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
{
int irq, retval;
- irq = bind_evtchn_to_irq_chip(evtchn, chip);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1380,14 +1406,13 @@ int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
static int bind_interdomain_evtchn_to_irqhandler_chip(
- unsigned int remote_domain, evtchn_port_t remote_port,
+ struct xenbus_device *dev, evtchn_port_t remote_port,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id, struct irq_chip *chip)
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
- chip);
+ irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
if (irq < 0)
return irq;
@@ -1400,14 +1425,14 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
return irq;
}
-int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
void *dev_id)
{
- return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+ return bind_interdomain_evtchn_to_irqhandler_chip(dev,
remote_port, handler, irqflags, devname,
dev_id, &xen_lateeoi_chip);
}
@@ -1574,6 +1599,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
{
int irq;
struct irq_info *info;
+ struct xenbus_device *dev;
irq = get_evtchn_to_irq(port);
if (irq == -1)
@@ -1603,6 +1629,10 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
info = info_for_irq(irq);
+ dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
+ if (dev)
+ atomic_inc(&dev->events);
+
if (ctrl->defer_eoi) {
info->eoi_cpu = smp_processor_id();
info->irq_epoch = __this_cpu_read(irq_epoch);
@@ -1679,7 +1709,7 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- (void)xen_irq_info_evtchn_setup(irq, evtchn);
+ (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
mutex_unlock(&irq_mapping_update_lock);
@@ -2060,16 +2090,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
-int xen_set_callback_via(uint64_t via)
-{
- struct xen_hvm_param a;
- a.domid = DOMID_SELF;
- a.index = HVM_PARAM_CALLBACK_IRQ;
- a.value = via;
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index a7a85719a8c8..c99415a70051 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -162,6 +162,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
{
struct user_evtchn *evtchn = data;
struct per_user_data *u = evtchn->user;
+ unsigned int prod, cons;
WARN(!evtchn->enabled,
"Interrupt for port %u, but apparently not enabled; per-user %p\n",
@@ -171,10 +172,14 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
spin_lock(&u->ring_prod_lock);
- if ((u->ring_prod - u->ring_cons) < u->ring_size) {
- *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
- wmb(); /* Ensure ring contents visible */
- if (u->ring_cons == u->ring_prod++) {
+ prod = READ_ONCE(u->ring_prod);
+ cons = READ_ONCE(u->ring_cons);
+
+ if ((prod - cons) < u->ring_size) {
+ *evtchn_ring_entry(u, prod) = evtchn->port;
+ smp_wmb(); /* Ensure ring contents visible */
+ WRITE_ONCE(u->ring_prod, prod + 1);
+ if (cons == prod) {
wake_up_interruptible(&u->evtchn_wait);
kill_fasync(&u->evtchn_async_queue,
SIGIO, POLL_IN);
@@ -210,8 +215,8 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
if (u->ring_overflow)
goto unlock_out;
- c = u->ring_cons;
- p = u->ring_prod;
+ c = READ_ONCE(u->ring_cons);
+ p = READ_ONCE(u->ring_prod);
if (c != p)
break;
@@ -221,7 +226,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
return -EAGAIN;
rc = wait_event_interruptible(u->evtchn_wait,
- u->ring_cons != u->ring_prod);
+ READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod));
if (rc)
return rc;
}
@@ -245,13 +250,13 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
}
rc = -EFAULT;
- rmb(); /* Ensure that we see the port before we copy it. */
+ smp_rmb(); /* Ensure that we see the port before we copy it. */
if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
((bytes2 != 0) &&
copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
goto unlock_out;
- u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
+ WRITE_ONCE(u->ring_cons, c + (bytes1 + bytes2) / sizeof(evtchn_port_t));
rc = bytes1 + bytes2;
unlock_out:
@@ -552,7 +557,9 @@ static long evtchn_ioctl(struct file *file,
/* Initialise the ring to empty. Clear errors. */
mutex_lock(&u->ring_cons_mutex);
spin_lock_irq(&u->ring_prod_lock);
- u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ WRITE_ONCE(u->ring_cons, 0);
+ WRITE_ONCE(u->ring_prod, 0);
+ u->ring_overflow = 0;
spin_unlock_irq(&u->ring_prod_lock);
mutex_unlock(&u->ring_cons_mutex);
rc = 0;
@@ -595,7 +602,7 @@ static __poll_t evtchn_poll(struct file *file, poll_table *wait)
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
- if (u->ring_cons != u->ring_prod)
+ if (READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod))
mask |= EPOLLIN | EPOLLRDNORM;
if (u->ring_overflow)
mask = EPOLLERR;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a36b71286bcf..5447c5156b2e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -309,44 +309,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
* to the kernel linear addresses of the struct pages.
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes.
+ * Note that GNTMAP_device_map isn't needed here: The
+ * dev_bus_addr output field gets consumed only from ->map_ops,
+ * and by not requesting it when mapping we also avoid needing
+ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
+ * reference to the page in the hypervisor).
*/
+ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
+ GNTMAP_host_map;
+
for (i = 0; i < map->count; i++) {
unsigned long address = (unsigned long)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
BUG_ON(PageHighMem(map->pages[i]));
- gnttab_set_map_op(&map->kmap_ops[i], address,
- map->flags | GNTMAP_host_map,
+ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
map->grants[i].ref,
map->grants[i].domid);
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
- map->flags | GNTMAP_host_map, -1);
+ flags, -1);
}
}
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
map->pages, map->count);
- if (err)
- return err;
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status) {
+ if (map->map_ops[i].status == GNTST_okay)
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else if (!err)
err = -EINVAL;
- continue;
- }
- map->unmap_ops[i].handle = map->map_ops[i].handle;
- if (use_ptemod)
- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
- else if (map->dma_vaddr) {
- unsigned long bfn;
+ if (map->flags & GNTMAP_device_map)
+ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
- bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
- map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
+ if (use_ptemod) {
+ if (map->kmap_ops[i].status == GNTST_okay)
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+ else if (!err)
+ err = -EINVAL;
}
-#endif
}
return err;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index dd911e1ff782..18f0ed8b1f93 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
goto out;
}
+ /*
+ * It doesn't strictly *have* to run on CPU0 but it sure
+ * as hell better process the event channel ports delivered
+ * to CPU0.
+ */
+ irq_set_affinity(pdev->irq, cpumask_of(0));
+
callback_via = get_callback_via(pdev);
ret = xen_set_callback_via(callback_via);
if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
ret = gnttab_init();
if (ret)
goto grant_out;
- xenbus_probe(NULL);
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b0c73c58f987..720a7b7abd46 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+ struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
- struct xen_mem_acquire_resource xdata;
+ struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
+ /* Both fields must be set or unset */
+ if (!!kdata.addr != !!kdata.num)
+ return -EINVAL;
+
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+
+ if (!kdata.addr && !kdata.num) {
+ /* Query the size of the resource. */
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ if (rc)
+ return rc;
+ return __put_user(xdata.nr_frames, &udata->num);
+ }
+
mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
- memset(&xdata, 0, sizeof(xdata));
- xdata.domid = kdata.dom;
- xdata.type = kdata.type;
- xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index a7d293fa8d14..b47fd8435061 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -348,7 +348,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
map->bytes = page;
ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
- fedata->dev->otherend_id, evtchn,
+ fedata->dev, evtchn,
pvcalls_back_conn_event, 0, "pvcalls-backend", map);
if (ret < 0)
goto out;
@@ -948,7 +948,7 @@ static int backend_connect(struct xenbus_device *dev)
goto error;
}
- err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
if (err < 0)
goto error;
fedata->irq = err;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index ce8ffb595a46..df7cab870be5 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -3,7 +3,8 @@
* Copyright 2012 by Oracle Inc
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*
- * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249
+ * This code borrows ideas from
+ * https://lore.kernel.org/lkml/1322673664-14642-6-git-send-email-konrad.wilk@oracle.com
* so many thanks go to Kevin Tian <kevin.tian@intel.com>
* and Yu Ke <ke.yu@intel.com>.
*/
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index 48a658dc7ccf..81b6e13fa5ec 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -305,11 +305,18 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf)
/* Save handles even if error, so we can unmap. */
for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
- buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
- if (unlikely(map_ops[cur_page].status != GNTST_okay))
+ if (likely(map_ops[cur_page].status == GNTST_okay)) {
+ buf->backend_map_handles[cur_page] =
+ map_ops[cur_page].handle;
+ } else {
+ buf->backend_map_handles[cur_page] =
+ INVALID_GRANT_HANDLE;
+ if (!ret)
+ ret = -ENXIO;
dev_err(&buf->xb_dev->dev,
"Failed to map page %d: %d\n",
cur_page, map_ops[cur_page].status);
+ }
}
if (ret) {
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index e7c692cfb2cf..5188f02e75fb 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -124,7 +124,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
pdev->sh_info = vaddr;
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
- pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
+ pdev->xdev, remote_evtchn, xen_pcibk_handle_event,
0, DRV_NAME, pdev);
if (err < 0) {
xenbus_dev_fatal(pdev->xdev, err,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 862162dca33c..f3024942fbdf 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -386,12 +386,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
return 0;
err = gnttab_map_refs(map, NULL, pg, cnt);
- BUG_ON(err);
for (i = 0; i < cnt; i++) {
if (unlikely(map[i].status != GNTST_okay)) {
pr_err("invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
- err = -ENOMEM;
+ if (!err)
+ err = -ENOMEM;
} else {
get_page(pg[i]);
}
@@ -799,7 +799,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
sring = (struct vscsiif_sring *)area;
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(info->dev, evtchn);
if (err < 0)
goto unmap_page;
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index eb5151fc8efa..e5fda0256feb 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
static struct task_struct *xenbus_task;
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
static irqreturn_t wake_waiting(int irq, void *unused)
{
- if (unlikely(xenstored_ready == 0)) {
- xenstored_ready = 1;
- schedule_work(&probe_work);
- }
-
wake_up(&xb_waitq);
return IRQ_HANDLED;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 44634d970a5c..97f0d234482d 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -206,6 +206,65 @@ void xenbus_otherend_changed(struct xenbus_watch *watch,
}
EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
+#define XENBUS_SHOW_STAT(name) \
+static ssize_t show_##name(struct device *_dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct xenbus_device *dev = to_xenbus_device(_dev); \
+ \
+ return sprintf(buf, "%d\n", atomic_read(&dev->name)); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+XENBUS_SHOW_STAT(event_channels);
+XENBUS_SHOW_STAT(events);
+XENBUS_SHOW_STAT(spurious_events);
+XENBUS_SHOW_STAT(jiffies_eoi_delayed);
+
+static ssize_t show_spurious_threshold(struct device *_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+
+ return sprintf(buf, "%d\n", dev->spurious_threshold);
+}
+
+static ssize_t set_spurious_threshold(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ unsigned int val;
+ ssize_t ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ dev->spurious_threshold = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(spurious_threshold, 0644, show_spurious_threshold,
+ set_spurious_threshold);
+
+static struct attribute *xenbus_attrs[] = {
+ &dev_attr_event_channels.attr,
+ &dev_attr_events.attr,
+ &dev_attr_spurious_events.attr,
+ &dev_attr_jiffies_eoi_delayed.attr,
+ &dev_attr_spurious_threshold.attr,
+ NULL
+};
+
+static const struct attribute_group xenbus_group = {
+ .name = "xenbus",
+ .attrs = xenbus_attrs,
+};
+
int xenbus_dev_probe(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
@@ -253,6 +312,11 @@ int xenbus_dev_probe(struct device *_dev)
return err;
}
+ dev->spurious_threshold = 1;
+ if (sysfs_create_group(&dev->dev.kobj, &xenbus_group))
+ dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n",
+ dev->nodename);
+
return 0;
fail_put:
module_put(drv->driver.owner);
@@ -269,6 +333,8 @@ int xenbus_dev_remove(struct device *_dev)
DPRINTK("%s", dev->nodename);
+ sysfs_remove_group(&dev->dev.kobj, &xenbus_group);
+
free_otherend_watch(dev);
if (drv->remove) {
@@ -683,29 +749,107 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+static void xenbus_probe(void)
{
xenstored_ready = 1;
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+ * xs_init() in case callbacks were not operational yet.
+ * So do it now.
+ */
+ if (xen_store_domain_type == XS_HVM)
+ xs_init();
+
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
-EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
{
- if (!xen_domain())
- return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+ return xen_store_domain_type == XS_HVM &&
+ !xen_have_vector_callback;
+#else
+ return false;
+#endif
+}
- if (xen_initial_domain() || xen_hvm_domain())
- return 0;
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
- xenbus_probe(NULL);
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
return 0;
}
+static int __init xenbus_probe_initcall(void)
+{
+ /*
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ * need to wait for the platform PCI device to come up.
+ */
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback()))
+ xenbus_probe();
+
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
+ return 0;
+}
device_initcall(xenbus_probe_initcall);
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+ if (ret)
+ return ret;
+
+ /*
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
+ * due to the callback not functioning yet, we can do it now.
+ */
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+ xenbus_probe();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
@@ -818,11 +962,17 @@ static int __init xenbus_init(void)
break;
}
- /* Initialize the interface to xenstore. */
- err = xs_init();
- if (err) {
- pr_warn("Error initializing xenstore comms: %i\n", err);
- goto out_error;
+ /*
+ * HVM domains may not have a functional callback yet. In that
+ * case let xs_init() be called from xenbus_probe(), which will
+ * get invoked at an appropriate time.
+ */
+ if (xen_store_domain_type != XS_HVM) {
+ err = xs_init();
+ if (err) {
+ pr_warn("Error initializing xenstore comms: %i\n", err);
+ goto out_error;
+ }
}
if ((xen_store_domain_type != XS_LOCAL) &&